Skip to content
Snippets Groups Projects
Select Git revision
  • 8e3cd73e68888b0e134085989f7b9eaf0bdbd55b
  • live_exam_os_ubuntu default protected
2 results

vms.go

Blame
  • vms.go 22.87 KiB
    package vms
    
    import (
    	"encoding/base64"
    	"errors"
    	"math"
    	"nexus-common/caps"
    	"nexus-common/params"
    	cu "nexus-common/utils"
    	vmc "nexus-common/vm"
    	"nexus-server/config"
    	"nexus-server/exec"
    	"nexus-server/logger"
    	"nexus-server/qga"
    	"nexus-server/users"
    	"nexus-server/utils"
    	"os"
    	osexec "os/exec"
    	"path/filepath"
    	"sort"
    	"sync"
    	"syscall"
    	"time"
    
    	"github.com/google/uuid"
    )
    
    type (
    	VMKeeperFn func(vm *VM) bool
    
    	VMs struct {
    		m              map[string]*VM       // Map of VM IDs to VMs
    		pwdToVM        map[string]uuid.UUID // Map of VM attach passwords to VM IDs
    		mutexPwdToVM   *sync.Mutex
    		dir            string        // Base directory where VMs are stored
    		rwlock         *sync.RWMutex // RWlock to ensure the VMs' map (m) coherency
    		usedPorts      [65536]bool   // Ports used by VMs for spice
    		mutexUsedPorts *sync.Mutex
    		usedRAM        int // RAM used by running VMs (in MB)
    	}
    )
    
    var log = logger.GetInstance()
    var conf = config.GetInstance()
    var vms *VMs
    
    // Returns a VMs singleton to access this module's public functions.
    // IMPORTANT: the InitVMs function must have been previously called!
    // Concurrency: safe
    func GetVMsInstance() *VMs {
    	return vms
    }
    
    // Creates all VMs from their files on disk.
    // REMARK: path is the root directory where VMs reside.
    // Concurrency: unsafe!
    func InitVMs() error {
    	vmsDir := conf.VMsDir
    	vms = &VMs{m: make(map[string]*VM), pwdToVM: make(map[string]uuid.UUID), mutexPwdToVM: new(sync.Mutex), dir: vmsDir, rwlock: new(sync.RWMutex), mutexUsedPorts: new(sync.Mutex), usedRAM: 0}
    	for i := range vms.usedPorts {
    		vms.usedPorts[i] = false
    	}
    	vms.usedPorts[conf.Core.APIDefaultPort] = true
    
    	errMsg := "Failed reading VMs directory: "
    	dirs1, err := cu.GetSubDirs(vmsDir)
    	if err != nil {
    		return errors.New(errMsg + err.Error())
    	}
    
    	for d1 := range dirs1 {
    		dirs2, err := cu.GetSubDirs(dirs1[d1])
    		if err != nil {
    			return errors.New(errMsg + err.Error())
    		}
    
    		for d2 := range dirs2 {
    			dirs3, err := cu.GetSubDirs(dirs2[d2])
    			if err != nil {
    				return errors.New(errMsg + err.Error())
    			}
    
    			for i := range dirs3 {
    				vmDir := dirs3[i]
    				filename := filepath.Join(vmDir, vmConfFile)
    				vm, err := newVMFromFile(filename)
    				if err != nil {
    					log.Warn("Skipping VM: failed reading \"" + filename + "\" " + err.Error())
    					continue
    				}
    
    				filename = filepath.Join(vmDir, vmDiskFile)
    				f, err := os.OpenFile(filename, os.O_RDONLY, 0)
    				if err != nil {
    					log.Warn("Skipping VM: failed reading config file:" + err.Error())
    					continue
    				}
    				f.Close()
    
    				vms.m[vm.v.ID.String()] = vm
    			}
    		}
    	}
    
    	return nil
    }
    
    // Returns the list of serialized VMs for which VMKeeperFn is true.
    // Concurrency: safe
    func (vms *VMs) GetNetworkSerializedVMs(keepFn VMKeeperFn) []vmc.VMNetworkSerialized {
    	vms.rwlock.RLock()
    
    	list := []vmc.VMNetworkSerialized{}
    	for _, vm := range vms.m {
    		vm.mutex.Lock()
    		if keepFn(vm) {
    			list = append(list, vm.Serialize())
    		}
    		vm.mutex.Unlock()
    	}
    	vms.rwlock.RUnlock()
    
    	// Sort VMs by names
    	sort.Slice(list, func(i, j int) bool {
    		return list[i].Name < list[j].Name
    	})
    	return list
    }
    
    // Returns the list of serialized "VM attach" credentials for which VMKeeperFn is true.
    // Concurrency: safe
    func (vms *VMs) GetVMAttachCredentialsSerialized(keepFn VMKeeperFn) []vmc.VMAttachCredentialsSerialized {
    	vms.rwlock.RLock()
    
    	list := []vmc.VMAttachCredentialsSerialized{}
    	for _, vm := range vms.m {
    		vm.mutex.Lock()
    		if keepFn(vm) {
    			list = append(list, vm.SerializeAttachCredentials())
    		}
    		vm.mutex.Unlock()
    	}
    	vms.rwlock.RUnlock()
    
    	// Sort VMs by names
    	sort.Slice(list, func(i, j int) bool {
    		return list[i].Name < list[j].Name
    	})
    	return list
    }
    
    // Returns a VM by its ID. Concurrency-safe version.
    // Concurrency: safe
    func (vms *VMs) GetVM(vmID uuid.UUID) (*VM, error) {
    	vms.rwlock.RLock()
    	defer vms.rwlock.RUnlock()
    	return vms.getVMUnsafe(vmID)
    }
    
    // Returns the running VM matching a "access password".
    // Concurrency: safe
    func (vms *VMs) GetVMByPwd(pwd string) (*VM, error) {
    	vms.rwlock.RLock()
    	defer vms.rwlock.RUnlock()
    
    	vmID, exists := vms.pwdToVM[pwd]
    	if !exists {
    		return nil, errors.New("VM not found")
    	}
    	return vms.getVMUnsafe(vmID)
    }
    
    // Returns a VM by its ID. Concurrency-unsafe version.
    // Concurrency: unsafe!
    func (vms *VMs) getVMUnsafe(vmID uuid.UUID) (*VM, error) {
    	vm, exists := vms.m[vmID.String()]
    	if !exists {
    		return nil, errors.New("VM not found")
    	}
    	return vm, nil
    }
    
    // Deletes a VM by its ID and deletes its associated files.
    // Concurrency: safe
    func (vms *VMs) DeleteVM(vmID uuid.UUID) error {
    	vms.rwlock.Lock()
    	defer vms.rwlock.Unlock()
    
    	vm, err := vms.getVMUnsafe(vmID)
    	if err != nil {
    		return err
    	}
    
    	vm.mutex.Lock()
    	defer vm.mutex.Unlock()
    
    	if vm.IsRunning() {
    		return errors.New("failed deleting VM: VM must be stopped")
    	}
    
    	if vm.IsDiskBusy() {
    		return errors.New("failed deleting VM: disk in use (busy)")
    	}
    
    	// Deletes the VM's files (and directories).
    	if err := vm.delete(); err != nil {
    		return err
    	}
    
    	// Removes the VM from the map.
    	delete(vms.m, vmID.String())
    	return nil
    }
    
    // Adds a new VM and writes its associated files.
    // Concurrency: safe
    func (vms *VMs) AddVM(vm *VM) error {
    	vm.mutex.Lock()
    	defer vm.mutex.Unlock()
    
    	// First, writes VM files since it can fail.
    	err := vm.writeFiles()
    	if err != nil {
    		return err
    	}
    
    	// Adds VM to the map of VMs.
    	vms.rwlock.Lock()
    	key := vm.v.ID.String()
    	vms.m[key] = vm
    	vms.rwlock.Unlock()
    
    	return nil
    }
    
    // Prepares a VM to be started.
    // If attachPwd is empty, then a random unique password is generated, otherwise the password
    // in argument is used.
    // Concurrency: safe
    func (vms *VMs) prepareStartVM(vmID uuid.UUID, attachPwd string) (*osexec.Cmd, *VM, string, int, int, error) {
    	prefix := "Failed starting VM: "
    	var err error
    	prepareCompleted := false
    
    	// Randomly generates a unique password used to attach to the VM
    	if attachPwd == "" {
    		attachPwd, err = vms.allocateRandomAttachPwd(vmID)
    		if err != nil {
    			msg := prefix + vmID.String() + ": attach password allocation error: " + err.Error()
    			log.Error(msg)
    			return nil, nil, "", 0, 0, errors.New(msg)
    		}
    	} else {
    		err = vms.allocateAttachPwd(attachPwd, vmID)
    		if err != nil {
    			msg := prefix + vmID.String() + ": attach password allocation error: " + err.Error()
    			log.Error(msg)
    			return nil, nil, "", 0, 0, errors.New(msg)
    		}
    	}
    
    	// Function that frees the allocated "VM attach" password.
    	freeAttachPwdFn := func(attachPwd string) {
    		if !prepareCompleted {
    			vms.freeAttachPwd(attachPwd)
    		}
    	}
    	defer freeAttachPwdFn(attachPwd)
    
    	// Randomly generates a password for Spice
    	spicePwd, err := utils.CustomPwd.Generate(conf.VMSpicePwdLength, conf.VMSpicePwdDigitCount, conf.VMSpicePwdSymbolCount, false, conf.VMSpicePwdRepeatChars)
    	if err != nil {
    		msg := prefix + vmID.String() + ": Spice password generation error: " + err.Error()
    		log.Error(msg)
    		return nil, nil, "", 0, 0, errors.New(msg)
    	}
    
    	// Allocates a free random port for Spice
    	spicePort, err := vms.allocateSpiceRandomPort()
    	if err != nil {
    		msg := prefix + vmID.String() + ": " + err.Error()
    		log.Error(msg)
    		return nil, nil, "", 0, 0, errors.New(msg)
    	}
    
    	// Function that frees the port allocated for Spice.
    	freeSpiceRandomPortFn := func(spicePort int) {
    		if !prepareCompleted {
    			vms.freeSpiceRandomPort(spicePort)
    		}
    	}
    	defer freeSpiceRandomPortFn(spicePort)
    
    	totalRAM, availRAM, err := utils.GetRAM()
    	if err != nil {
    		return nil, nil, "", 0, 0, errors.New(prefix + "failed obtaining memory info: " + err.Error())
    	}
    
    	vms.rwlock.Lock()
    	defer vms.rwlock.Unlock()
    
    	vm, err := vms.getVMUnsafe(vmID)
    	if err != nil {
    		return nil, nil, "", 0, 0, err
    	}
    
    	vm.mutex.Lock()
    	defer vm.mutex.Unlock()
    
    	if vm.IsRunning() {
    		return nil, nil, "", 0, 0, errors.New(prefix + "already running")
    	}
    
    	if vm.IsDiskBusy() {
    		return nil, nil, "", 0, 0, errors.New(prefix + "disk in use (busy)")
    	}
    
    	// We estimate ~30% of RAM saving thanks to KSM (due to page sharing across VMs).
    	estimatedVmRAM := int(math.Round(float64(vm.v.Ram) * (1. - conf.Limits.KsmRamSaving)))
    
    	vms.usedRAM += estimatedVmRAM
    
    	// Checks that enough available RAM would be left after the VM has started,
    	// otherwise, refuses to run it in order to avoid RAM saturation.
    	if availRAM-vms.usedRAM <= int(math.Round(float64(totalRAM)*(1.-conf.Limits.RamUsageLimit))) {
    		vms.usedRAM -= estimatedVmRAM
    		return nil, nil, "", 0, 0, errors.New(prefix + "insufficient free RAM")
    	}
    
    	// Writes the password in Base64 in a secret file inside the VM's directory.
    	pwdBase64 := base64.StdEncoding.EncodeToString([]byte(spicePwd))
    	content := []byte(pwdBase64)
    	spicePwdFile := filepath.Join(vm.dir, vmSecretFile)
    	if err := os.WriteFile(spicePwdFile, content, 0600); err != nil {
    		msg := prefix + "error creating secret file: " + err.Error()
    		log.Error(msg)
    		return nil, nil, "", 0, 0, errors.New(msg)
    	}
    
    	// Function that deletes a VM's secret file.
    	removeSecretFileFn := func(vm *VM) {
    		if !prepareCompleted {
    			spicePwdFile := filepath.Join(vm.dir, vmSecretFile)
    			os.Remove(spicePwdFile)
    		}
    	}
    	defer removeSecretFileFn(vm)
    
    	// Executes the QEMU process.
    	certsDir := conf.CertsDir
    	cmd, err := exec.NewQemuSystem(vm.qgaSock, vm.v.Cpus, vm.v.Ram, string(vm.v.Nic), vm.v.UsbDevs, filepath.Join(vm.dir, vmDiskFile), spicePort, spicePwdFile, certsDir)
    	if err != nil {
    		os.Remove(vm.qgaSock) // If QEMU fails it's likely the Guest Agent file it created is still there.
    		log.Error(prefix + "filepath join error: " + err.Error())
    		return nil, nil, "", 0, 0, err
    	}
    	if err := cmd.Start(); err != nil {
    		os.Remove(vm.qgaSock) // If QEMU fails it's likely the Guest Agent file it created is still there.
    		log.Error(prefix + vm.v.ID.String() + ": exec.Start error: " + err.Error())
    		log.Error("Failed cmd: " + cmd.String())
    		return nil, nil, "", 0, 0, err
    	}
    
    	// Updates the VM's running states.
    	vm.Run = runStates{State: vmc.StateRunning, Pid: cmd.Process.Pid, SpicePort: spicePort, SpicePwd: spicePwd, AttachPwd: attachPwd, StartTime: time.Now()}
    	vm.DiskBusy = true
    
    	prepareCompleted = true
    
    	return cmd, vm, attachPwd, spicePort, estimatedVmRAM, nil
    }
    
    // Starts a VM by its ID.
    // If attachPwd is empty, then a random unique password is generated, otherwise the password
    // in argument is used.
    // Concurrency: safe
    func (vms *VMs) StartVM(vmID uuid.UUID, attachPwd string) error {
    	cmd, vm, attachPwd, spicePort, estimatedVmRAM, err := vms.prepareStartVM(vmID, attachPwd)
    	if err != nil {
    		return err
    	} else {
    		go func() {
    			if err := cmd.Wait(); err != nil {
    				log.Error("Failed starting VM: " + vm.v.ID.String() + ": exec.Wait error: " + err.Error())
    				log.Error("Failed cmd: " + cmd.String())
    			}
    
    			// Resets the VM's running states, frees its port, free its attach pwd and deletes its secret file.
    			// Executed only when the VM's execution is over, i.e. when the QEMU process terminates (both normal and aborted terminations).
    			vms.freeAttachPwd(attachPwd)
    			vms.freeSpiceRandomPort(spicePort)
    			vms.rwlock.Lock()
    			vms.usedRAM -= estimatedVmRAM
    			vms.rwlock.Unlock()
    			vm.mutex.Lock()
    			spicePwdFile := filepath.Join(vm.dir, vmSecretFile)
    			os.Remove(spicePwdFile)
    			// Resets VM states and disk busy flag.
    			zeroTime := time.Time{}
    			vm.Run = runStates{State: vmc.StateStopped, Pid: 0, SpicePort: 0, SpicePwd: "", AttachPwd: "", StartTime: zeroTime}
    			vm.DiskBusy = false
    			vm.mutex.Unlock()
    		}()
    	}
    
    	return nil
    }
    
    // Allocates and returns a free port for Spice, randomly chosen between [VMSpiceMinPort,VMSpiceMaxPort].
    // When the number of free port becomes very small,
    // randomly picking a port is very time consuming (many attempts).
    // TODO: A better approach would be to do this instead:
    // If there are more than 20% of free ports: pick one randomly in the range.
    // Otherwise: pick the first available one either from the beginning or the end (randomly).
    // REMARK: this function updates the vms map.
    // Concurrency: safe
    func (vms *VMs) allocateSpiceRandomPort() (int, error) {
    	vms.mutexUsedPorts.Lock()
    	defer vms.mutexUsedPorts.Unlock()
    
    	minPort := conf.Core.VMSpiceMinPort
    	maxPort := conf.Core.VMSpiceMaxPort
    
    	isFreePortLeft := func() bool {
    		for i := 0; i < maxPort-minPort+1; i++ {
    			port := minPort + i
    			if !vms.usedPorts[port] && utils.IsPortAvailable(port) {
    				return true
    			}
    		}
    		return false
    	}
    
    	for {
    		if !isFreePortLeft() {
    			return -1, errors.New("no free port left")
    		}
    		port := utils.Rand(minPort, maxPort)
    		if !vms.usedPorts[port] {
    			if utils.IsPortAvailable(port) {
    				vms.usedPorts[port] = true
    				return port, nil
    			}
    		}
    	}
    }
    
    // Frees a port previously allocated with allocateSpiceRandomPort().
    // Concurrency: safe
    func (vms *VMs) freeSpiceRandomPort(port int) {
    	vms.mutexUsedPorts.Lock()
    	defer vms.mutexUsedPorts.Unlock()
    	vms.usedPorts[port] = false
    }
    
    // Randomly generates a unique password and reserve it for the specified VM.
    // This password can be used as credentials to access the associated VM.
    // Returns the generated password.
    // Concurrency: safe
    func (vms *VMs) allocateRandomAttachPwd(vmID uuid.UUID) (string, error) {
    	vms.mutexPwdToVM.Lock()
    	defer vms.mutexPwdToVM.Unlock()
    
    	for {
    		attachPwd, err := utils.CustomPwd.Generate(conf.VMAttachPwdLength, conf.VMAttachPwdDigitCount, conf.VMAttachPwdSymbolCount, false, conf.VMAttachPwdRepeatChars)
    		if err != nil {
    			return "", err
    		}
    		_, exists := vms.pwdToVM[attachPwd]
    		if exists {
    			continue
    		}
    		vms.pwdToVM[attachPwd] = vmID
    		return attachPwd, nil
    	}
    }
    
    // Reserve the specified password as credentials to access the specified VM.
    // Concurrency: safe
    func (vms *VMs) allocateAttachPwd(attachPwd string, vmID uuid.UUID) error {
    	vms.mutexPwdToVM.Lock()
    	defer vms.mutexPwdToVM.Unlock()
    
    	_, exists := vms.pwdToVM[attachPwd]
    	if exists {
    		return errors.New("vms.allocateAttachPwd: password already reserved")
    	}
    	vms.pwdToVM[attachPwd] = vmID
    	return nil
    }
    
    // Frees a password that was previously allocated with allocateAttachPwd().
    // Concurrency: safe
    func (vms *VMs) freeAttachPwd(attachPwd string) {
    	vms.mutexPwdToVM.Lock()
    	defer vms.mutexPwdToVM.Unlock()
    	delete(vms.pwdToVM, attachPwd)
    }
    
    // Kills a VM by its ID.
    // Concurrency: safe
    func (vms *VMs) KillVM(vmID uuid.UUID) error {
    	vms.rwlock.RLock()
    	defer vms.rwlock.RUnlock()
    
    	vm, err := vms.getVMUnsafe(vmID)
    	if err != nil {
    		return err
    	}
    
    	vm.mutex.Lock()
    	defer vm.mutex.Unlock()
    
    	if !vm.IsRunning() {
    		return errors.New("failed killing VM: VM must be running")
    	}
    
    	// Sends a SIGINT signal to terminate the QEMU process.
    	// Note that QEMU terminates with status code 0 in this case (i.e. no error).
    	if err := syscall.Kill(vm.Run.Pid, syscall.SIGTERM); err != nil {
    		msg := "Failed killing VM: " + err.Error()
    		log.Error(msg)
    		return errors.New(msg)
    	}
    
    	return nil
    }
    
    // Gracefully stops a VM by its ID.
    // Concurrency: safe
    func (vms *VMs) ShutdownVM(vmID uuid.UUID) error {
    	vms.rwlock.Lock()
    	defer vms.rwlock.Unlock()
    
    	vm, err := vms.getVMUnsafe(vmID)
    	if err != nil {
    		return err
    	}
    
    	vm.mutex.Lock()
    	defer vm.mutex.Unlock()
    
    	if !vm.IsRunning() {
    		return errors.New("shutdown failed: VM must be running")
    	}
    
    	// Function that gracefully shutdowns a running VM.
    	// Uses QGA commands to talk to the guest OS, which means QEMU Guest Agent must be
    	// running in the guest, otherwise nothing will happen. Furthermore, the guest OS must
    	// already be running and ready to accept QGA commands.
    	shutdownFn := func(vm *VM) error {
    		prefix := "Shutdown failed: "
    
    		// Sends a QGA command to order the VM to shutdown.
    		con := qga.New()
    		if err := con.Open(vm.qgaSock); err != nil {
    			log.Error(prefix + "(open): " + err.Error())
    			return errors.New(prefix + "(open): " + err.Error())
    		}
    
    		if err := con.SendShutdown(); err != nil {
    			con.Close()
    			log.Error(prefix + "(send): " + err.Error())
    			return errors.New(prefix + "(send): " + err.Error())
    		}
    
    		if err := con.Close(); err != nil {
    			log.Error(prefix + "(close): " + err.Error())
    			return errors.New(prefix + "(close): " + err.Error())
    		}
    
    		return nil
    	}
    
    	return shutdownFn(vm)
    }
    
    // Reboots a VM by its ID.
    // Concurrency: safe
    func (vms *VMs) RebootVM(vmID uuid.UUID) error {
    	vms.rwlock.Lock()
    	defer vms.rwlock.Unlock()
    
    	vm, err := vms.getVMUnsafe(vmID)
    	if err != nil {
    		return err
    	}
    
    	vm.mutex.Lock()
    	defer vm.mutex.Unlock()
    
    	if !vm.IsRunning() {
    		return errors.New("reboot failed: VM must be running")
    	}
    
    	// Function that reboots a running VM.
    	// Uses QGA commands to talk to the VM, which means QEMU Guest Agent must be
    	// running in the VM, otherwise it won't work.
    	rebootFn := func(vm *VM) error {
    		prefix := "Reboot failed: "
    
    		// Sends a QGA command to order the VM to shutdown.
    		con := qga.New()
    		if err := con.Open(vm.qgaSock); err != nil {
    			log.Error(prefix + "(open): " + err.Error())
    			return errors.New(prefix + "(open): " + err.Error())
    		}
    
    		if err := con.SendReboot(); err != nil {
    			con.Close()
    			log.Error(prefix + "(send): " + err.Error())
    			return errors.New(prefix + "(send): " + err.Error())
    		}
    
    		if err := con.Close(); err != nil {
    			log.Error(prefix + "(close): " + err.Error())
    			return errors.New(prefix + "(close): " + err.Error())
    		}
    
    		return nil
    	}
    
    	return rebootFn(vm)
    }
    
    // Returns true if the given template is used by a VM.
    // Concurrency: safe
    func (vms *VMs) IsTemplateUsed(templateID string) bool {
    	vms.rwlock.RLock()
    	defer vms.rwlock.RUnlock()
    
    	for _, vm := range vms.m {
    		vm.mutex.Lock()
    		if vm.v.TemplateID.String() == templateID {
    			vm.mutex.Unlock()
    			return true
    		}
    		vm.mutex.Unlock()
    	}
    	return false
    }
    
    // Edit a VM' specs: name, cpus, ram, nic
    // Concurrency: safe
    func (vms *VMs) EditVM(vmID uuid.UUID, p *params.VMEdit) error {
    	vms.rwlock.Lock()
    	defer vms.rwlock.Unlock()
    
    	vm, err := vms.getVMUnsafe(vmID)
    	if err != nil {
    		return err
    	}
    
    	vm.mutex.Lock()
    	defer vm.mutex.Unlock()
    
    	// Only updates fields that have changed.
    	oriVal := vm.v // Saves original VM values.
    
    	if p.Name != "" {
    		vm.v.Name = p.Name
    	}
    	if p.Cpus > 0 {
    		vm.v.Cpus = p.Cpus
    	}
    	if p.Ram > 0 {
    		vm.v.Ram = p.Ram
    	}
    	if p.Nic != "" {
    		vm.v.Nic = p.Nic
    	}
    	if p.UsbDevs != nil {
    		vm.v.UsbDevs = p.UsbDevs
    	}
    
    	if err = vm.validate(); err != nil {
    		// Restores original VM values.
    		vm.v = oriVal
    		return err
    	}
    
    	if err = vm.writeConfig(); err != nil {
    		return err
    	}
    
    	return nil
    }
    
    // Set a VM's Access for a given user (email).
    // user is the currently logged user
    // destUserEmail is the email of the user for which to modify the access
    // Concurrency: safe
    func (vms *VMs) SetVMAccess(vmID uuid.UUID, user *users.User, destUserEmail string, newAccess caps.Capabilities) error {
    	if err := caps.ValidateVMAccessCaps(newAccess); err != nil {
    		return err
    	}
    
    	vms.rwlock.Lock()
    	defer vms.rwlock.Unlock()
    
    	// Retrieves the VM for which the access caps must be changed.
    	vm, err := vms.getVMUnsafe(vmID)
    	if err != nil {
    		return err
    	}
    
    	vm.mutex.Lock()
    	defer vm.mutex.Unlock()
    
    	// If user has VM_SET_ACCESS_ANY, modify is allowed.
    	if !user.HasCapability(caps.CAP_VM_SET_ACCESS_ANY) {
    		// If user is the VM's owner, modify is allowed.
    		if !vm.IsOwner(user.Email) {
    			// If user has VM_SET_ACCESS and VM's VM access is present for the same user, modify is allowed.
    			userCaps := vm.v.Access[user.Email]
    			_, exists := userCaps[caps.CAP_VM_SET_ACCESS]
    			if !exists {
    				return errors.New("insufficient capability")
    			}
    		}
    	}
    
    	vm.v.Access[destUserEmail] = newAccess
    
    	if err = vm.writeConfig(); err != nil {
    		return err
    	}
    
    	return nil
    }
    
    // Remove a VM's Access for a given user (email).
    // user is the currently logged user
    // destUserEmail is the email of the user for which to modify the access
    // Concurrency: safe
    func (vms *VMs) DeleteVMAccess(vmID uuid.UUID, user *users.User, destUserEmail string) error {
    	vms.rwlock.Lock()
    	defer vms.rwlock.Unlock()
    
    	// Retrieves the VM for which the access caps must be changed.
    	vm, err := vms.getVMUnsafe(vmID)
    	if err != nil {
    		return err
    	}
    
    	vm.mutex.Lock()
    	defer vm.mutex.Unlock()
    
    	// If user has VM_SET_ACCESS_ANY, modify is allowed.
    	if !user.HasCapability(caps.CAP_VM_SET_ACCESS_ANY) {
    		// If user is the VM's owner, modify is allowed.
    		if !vm.IsOwner(user.Email) {
    			// If user has VM_SET_ACCESS and VM's VM access is present for the same user, modify is allowed.
    			userCaps := vm.v.Access[user.Email]
    			_, exists := userCaps[caps.CAP_VM_SET_ACCESS]
    			if !exists {
    				return errors.New("insufficient capability")
    			}
    		}
    	}
    
    	// Only removes the user from the Access map if it actually had an access.
    	if _, exists := vm.v.Access[destUserEmail]; exists {
    		delete(vm.v.Access, destUserEmail)
    	} else {
    		return errors.New("User " + destUserEmail + " has no VM access")
    	}
    
    	if err = vm.writeConfig(); err != nil {
    		return err
    	}
    
    	return nil
    }
    
    // Exports a VM's directory and its subdirectories into a tar.gz archive on the host.
    // Technically, extracting files from a running VM should work, but some files might be inconsistent.
    // In consequence, we forbid this action on a running VM.
    // Concurrency: safe
    func (vms *VMs) ExportFilesFromVM(vm *VM, vmDir, localTarGzFile string) error {
    	prefix := "Failed exporting files from VM: "
    
    	vm.mutex.Lock()
    
    	if vm.IsRunning() {
    		vm.mutex.Unlock()
    		return errors.New(prefix + "VM must be stopped")
    	}
    
    	if vm.IsDiskBusy() {
    		vm.mutex.Unlock()
    		return errors.New(prefix + "disk in use (busy)")
    	}
    
    	vm.DiskBusy = true
    	vm.mutex.Unlock()
    
    	vmDisk := vm.getDiskPath()
    
    	if err := exec.ArchiveFromVM(vmDisk, vmDir, localTarGzFile); err != nil {
    		vm.mutex.Lock()
    		vm.DiskBusy = false
    		vm.mutex.Unlock()
    		msg := prefix + err.Error()
    		log.Error(msg)
    		return errors.New(msg)
    	}
    
    	vm.mutex.Lock()
    	vm.DiskBusy = false
    	vm.mutex.Unlock()
    
    	return nil
    }
    
    // Imports files from a tar.gz archive into a VM disk image (guest's filesystem), in a specified directory.
    // Concurrency: safe
    //func (vms *VMs) ImportFilesToVM(vm *VM, localTarGzFile, vmDir string) error {
    
    type ImportFileFunc func(vmDiskFile, localFile, vmDir string) error
    
    func (vms *VMs) ImportFilesToVM(vm *VM, fn ImportFileFunc, localFile, vmDir string) error {
    	prefix := "Failed importing files into VM: "
    
    	vm.mutex.Lock()
    
    	if vm.IsRunning() {
    		vm.mutex.Unlock()
    		return errors.New(prefix + "VM must be stopped")
    	}
    
    	if vm.IsDiskBusy() {
    		vm.mutex.Unlock()
    		return errors.New(prefix + "disk in use (busy)")
    	}
    
    	vm.DiskBusy = true
    	vm.mutex.Unlock()
    
    	vmDisk := vm.getDiskPath()
    
    	if err := fn(vmDisk, localFile, vmDir); err != nil {
    		vm.mutex.Lock()
    		vm.DiskBusy = false
    		vm.mutex.Unlock()
    		msg := prefix + err.Error()
    		log.Error(msg)
    		return errors.New(msg)
    	}
    
    	vm.mutex.Lock()
    	vm.DiskBusy = false
    	vm.mutex.Unlock()
    
    	return nil
    }
    
    // Delete a file from a VM's filesystem.
    // filePath is the full path in the VM to the file to delete.
    func (vms *VMs) DeleteFileFromVM(vm *VM, filePath string) error {
    	prefix := "Failed deleting file from VM: "
    
    	vm.mutex.Lock()
    	defer vm.mutex.Unlock()
    
    	if vm.IsRunning() {
    		return errors.New(prefix + "VM must be stopped")
    	}
    
    	if vm.IsDiskBusy() {
    		return errors.New(prefix + "disk in use (busy)")
    	}
    
    	vm.DiskBusy = true
    	defer func(vm *VM) { vm.DiskBusy = false }(vm)
    
    	if err := exec.DeleteFromVM(vm.getDiskPath(), filePath); err != nil {
    		msg := prefix + err.Error()
    		log.Error(msg)
    		return errors.New(msg)
    	}
    	return nil
    }