Ejemplo n.º 1
0
func (s *btrfsMigrationSourceDriver) send(conn *websocket.Conn, btrfsPath string, btrfsParent string) error {
	args := []string{"send", btrfsPath}
	if btrfsParent != "" {
		args = append(args, "-p", btrfsParent)
	}

	cmd := exec.Command("btrfs", args...)

	stdout, err := cmd.StdoutPipe()
	if err != nil {
		return err
	}

	stderr, err := cmd.StderrPipe()
	if err != nil {
		return err
	}

	if err := cmd.Start(); err != nil {
		return err
	}

	<-shared.WebsocketSendStream(conn, stdout, 4*1024*1024)

	output, err := ioutil.ReadAll(stderr)
	if err != nil {
		shared.LogError("problem reading btrfs send stderr", log.Ctx{"err": err})
	}

	err = cmd.Wait()
	if err != nil {
		shared.LogError("problem with btrfs send", log.Ctx{"output": string(output)})
	}
	return err
}
Ejemplo n.º 2
0
func containerDeleteSnapshots(d *Daemon, cname string) error {
	shared.LogDebug("containerDeleteSnapshots",
		log.Ctx{"container": cname})

	results, err := dbContainerGetSnapshots(d.db, cname)
	if err != nil {
		return err
	}

	for _, sname := range results {
		sc, err := containerLoadByName(d, sname)
		if err != nil {
			shared.LogError(
				"containerDeleteSnapshots: Failed to load the snapshotcontainer",
				log.Ctx{"container": cname, "snapshot": sname})

			continue
		}

		if err := sc.Delete(); err != nil {
			shared.LogError(
				"containerDeleteSnapshots: Failed to delete a snapshotcontainer",
				log.Ctx{"container": cname, "snapshot": sname, "err": err})
		}
	}

	return nil
}
Ejemplo n.º 3
0
Archivo: devices.go Proyecto: vahe/lxd
func deviceUSBEvent(d *Daemon, usb usbDevice) {
	containers, err := dbContainersList(d.db, cTypeRegular)
	if err != nil {
		shared.LogError("problem loading containers list", log.Ctx{"err": err})
		return
	}

	for _, name := range containers {
		containerIf, err := containerLoadByName(d, name)
		if err != nil {
			continue
		}

		c, ok := containerIf.(*containerLXC)
		if !ok {
			shared.LogErrorf("got device event on non-LXC container?")
			return
		}

		if !c.IsRunning() {
			continue
		}

		devices := c.ExpandedDevices()
		for _, name := range devices.DeviceNames() {
			m := devices[name]
			if m["type"] != "usb" {
				continue
			}

			if m["vendorid"] != usb.vendor || (m["productid"] != "" && m["productid"] != usb.product) {
				continue
			}

			if usb.action == "add" {
				err := c.insertUnixDeviceNum(m, usb.major, usb.minor, usb.path)
				if err != nil {
					shared.LogError("failed to create usb device", log.Ctx{"err": err, "usb": usb, "container": c.Name()})
					return
				}
			} else if usb.action == "remove" {
				err := c.removeUnixDeviceNum(m, usb.major, usb.minor, usb.path)
				if err != nil {
					shared.LogError("failed to remove usb device", log.Ctx{"err": err, "usb": usb, "container": c.Name()})
					return
				}
			} else {
				shared.LogError("unknown action for usb device", log.Ctx{"usb": usb})
				continue
			}
		}
	}
}
Ejemplo n.º 4
0
Archivo: profiles.go Proyecto: vahe/lxd
func profilesGet(d *Daemon, r *http.Request) Response {
	results, err := dbProfiles(d.db)
	if err != nil {
		return SmartError(err)
	}

	recursion := d.isRecursionRequest(r)

	resultString := make([]string, len(results))
	resultMap := make([]*shared.ProfileConfig, len(results))
	i := 0
	for _, name := range results {
		if !recursion {
			url := fmt.Sprintf("/%s/profiles/%s", shared.APIVersion, name)
			resultString[i] = url
		} else {
			profile, err := doProfileGet(d, name)
			if err != nil {
				shared.LogError("Failed to get profile", log.Ctx{"profile": name})
				continue
			}
			resultMap[i] = profile
		}
		i++
	}

	if !recursion {
		return SyncResponse(true, resultString)
	}

	return SyncResponse(true, resultMap)
}
Ejemplo n.º 5
0
Archivo: images.go Proyecto: vahe/lxd
func doDeleteImage(d *Daemon, fingerprint string) error {
	id, imgInfo, err := dbImageGet(d.db, fingerprint, false, false)
	if err != nil {
		return err
	}

	// get storage before deleting images/$fp because we need to
	// look at the path
	s, err := storageForImage(d, imgInfo)
	if err != nil {
		shared.LogError("error detecting image storage backend", log.Ctx{"fingerprint": imgInfo.Fingerprint, "err": err})
	} else {
		// Remove the image from storage backend
		if err = s.ImageDelete(imgInfo.Fingerprint); err != nil {
			shared.LogError("error deleting the image from storage backend", log.Ctx{"fingerprint": imgInfo.Fingerprint, "err": err})
		}
	}

	// Remove main image file
	fname := shared.VarPath("images", imgInfo.Fingerprint)
	if shared.PathExists(fname) {
		err = os.Remove(fname)
		if err != nil {
			shared.LogDebugf("Error deleting image file %s: %s", fname, err)
		}
	}

	// Remove the rootfs file
	fname = shared.VarPath("images", imgInfo.Fingerprint) + ".rootfs"
	if shared.PathExists(fname) {
		err = os.Remove(fname)
		if err != nil {
			shared.LogDebugf("Error deleting image file %s: %s", fname, err)
		}
	}

	// Remove the DB entry
	if err = dbImageDelete(d.db, id); err != nil {
		return err
	}

	return nil
}
Ejemplo n.º 6
0
func (s *zfsMigrationSourceDriver) send(conn *websocket.Conn, zfsName string, zfsParent string, readWrapper func(io.ReadCloser) io.ReadCloser) error {
	fields := strings.SplitN(s.container.Name(), shared.SnapshotDelimiter, 2)
	args := []string{"send", fmt.Sprintf("%s/containers/%s@%s", s.zfs.zfsPool, fields[0], zfsName)}
	if zfsParent != "" {
		args = append(args, "-i", fmt.Sprintf("%s/containers/%s@%s", s.zfs.zfsPool, s.container.Name(), zfsParent))
	}

	cmd := exec.Command("zfs", args...)

	stdout, err := cmd.StdoutPipe()
	if err != nil {
		return err
	}

	readPipe := io.ReadCloser(stdout)
	if readWrapper != nil {
		readPipe = readWrapper(stdout)
	}

	stderr, err := cmd.StderrPipe()
	if err != nil {
		return err
	}

	if err := cmd.Start(); err != nil {
		return err
	}

	<-shared.WebsocketSendStream(conn, readPipe, 4*1024*1024)

	output, err := ioutil.ReadAll(stderr)
	if err != nil {
		shared.LogError("problem reading zfs send stderr", log.Ctx{"err": err})
	}

	err = cmd.Wait()
	if err != nil {
		shared.LogError("problem with zfs send", log.Ctx{"output": string(output)})
	}

	return err
}
Ejemplo n.º 7
0
Archivo: images.go Proyecto: vahe/lxd
func pruneExpiredImages(d *Daemon) {
	shared.LogInfof("Pruning expired images")

	// Get the list of expires images
	expiry := daemonConfig["images.remote_cache_expiry"].GetInt64()
	images, err := dbImagesGetExpired(d.db, expiry)
	if err != nil {
		shared.LogError("Unable to retrieve the list of expired images", log.Ctx{"err": err})
		return
	}

	// Delete them
	for _, fp := range images {
		if err := doDeleteImage(d, fp); err != nil {
			shared.LogError("Error deleting image", log.Ctx{"err": err, "fp": fp})
		}
	}

	shared.LogInfof("Done pruning expired images")
}
Ejemplo n.º 8
0
func daemonConfigInit(db *sql.DB) error {
	// Set all the keys
	daemonConfig = map[string]*daemonConfigKey{
		"core.https_address":             &daemonConfigKey{valueType: "string", setter: daemonConfigSetAddress},
		"core.https_allowed_headers":     &daemonConfigKey{valueType: "string"},
		"core.https_allowed_methods":     &daemonConfigKey{valueType: "string"},
		"core.https_allowed_origin":      &daemonConfigKey{valueType: "string"},
		"core.https_allowed_credentials": &daemonConfigKey{valueType: "bool"},
		"core.proxy_http":                &daemonConfigKey{valueType: "string", setter: daemonConfigSetProxy},
		"core.proxy_https":               &daemonConfigKey{valueType: "string", setter: daemonConfigSetProxy},
		"core.proxy_ignore_hosts":        &daemonConfigKey{valueType: "string", setter: daemonConfigSetProxy},
		"core.trust_password":            &daemonConfigKey{valueType: "string", hiddenValue: true, setter: daemonConfigSetPassword},

		"images.auto_update_cached":    &daemonConfigKey{valueType: "bool", defaultValue: "true"},
		"images.auto_update_interval":  &daemonConfigKey{valueType: "int", defaultValue: "6"},
		"images.compression_algorithm": &daemonConfigKey{valueType: "string", validator: daemonConfigValidateCompression, defaultValue: "gzip"},
		"images.remote_cache_expiry":   &daemonConfigKey{valueType: "int", defaultValue: "10", trigger: daemonConfigTriggerExpiry},

		"storage.lvm_fstype":           &daemonConfigKey{valueType: "string", defaultValue: "ext4", validValues: []string{"ext4", "xfs"}},
		"storage.lvm_mount_options":    &daemonConfigKey{valueType: "string", defaultValue: "discard"},
		"storage.lvm_thinpool_name":    &daemonConfigKey{valueType: "string", defaultValue: "LXDPool", validator: storageLVMValidateThinPoolName},
		"storage.lvm_vg_name":          &daemonConfigKey{valueType: "string", validator: storageLVMValidateVolumeGroupName, setter: daemonConfigSetStorage},
		"storage.lvm_volume_size":      &daemonConfigKey{valueType: "string", defaultValue: "10GiB"},
		"storage.zfs_pool_name":        &daemonConfigKey{valueType: "string", validator: storageZFSValidatePoolName, setter: daemonConfigSetStorage},
		"storage.zfs_remove_snapshots": &daemonConfigKey{valueType: "bool"},
		"storage.zfs_use_refquota":     &daemonConfigKey{valueType: "bool"},
	}

	// Load the values from the DB
	dbValues, err := dbConfigValuesGet(db)
	if err != nil {
		return err
	}

	daemonConfigLock.Lock()
	for k, v := range dbValues {
		_, ok := daemonConfig[k]
		if !ok {
			shared.LogError("Found invalid configuration key in database", log.Ctx{"key": k})
		}

		daemonConfig[k].currentValue = v
	}
	daemonConfigLock.Unlock()

	return nil
}
Ejemplo n.º 9
0
Archivo: profiles.go Proyecto: vahe/lxd
func getRunningContainersWithProfile(d *Daemon, profile string) []container {
	results := []container{}

	output, err := dbProfileContainersGet(d.db, profile)
	if err != nil {
		return results
	}

	for _, name := range output {
		c, err := containerLoadByName(d, name)
		if err != nil {
			shared.LogError("Failed opening container", log.Ctx{"container": name})
			continue
		}
		results = append(results, c)
	}
	return results
}
Ejemplo n.º 10
0
func containerSnapRestore(d *Daemon, name string, snap string) error {
	// normalize snapshot name
	if !shared.IsSnapshot(snap) {
		snap = name + shared.SnapshotDelimiter + snap
	}

	shared.LogInfo(
		"RESTORE => Restoring snapshot",
		log.Ctx{
			"snapshot":  snap,
			"container": name})

	c, err := containerLoadByName(d, name)
	if err != nil {
		shared.LogError(
			"RESTORE => loadcontainerLXD() failed",
			log.Ctx{
				"container": name,
				"err":       err})
		return err
	}

	source, err := containerLoadByName(d, snap)
	if err != nil {
		switch err {
		case sql.ErrNoRows:
			return fmt.Errorf("snapshot %s does not exist", snap)
		default:
			return err
		}
	}

	if err := c.Restore(source); err != nil {
		return err
	}

	return nil
}
Ejemplo n.º 11
0
Archivo: networks.go Proyecto: vahe/lxd
func networkStartup(d *Daemon) error {
	// Get a list of managed networks
	networks, err := dbNetworks(d.db)
	if err != nil {
		return err
	}

	// Bring them all up
	for _, name := range networks {
		n, err := networkLoadByName(d, name)
		if err != nil {
			return err
		}

		err = n.Start()
		if err != nil {
			// Don't cause LXD to fail to start entirely on network bring up failure
			shared.LogError("Failed to bring up network", log.Ctx{"err": err, "name": name})
		}
	}

	return nil
}
Ejemplo n.º 12
0
func (s *storageBtrfs) MigrationSink(live bool, container container, snapshots []*Snapshot, conn *websocket.Conn, srcIdmap *shared.IdmapSet) error {
	if runningInUserns {
		return rsyncMigrationSink(live, container, snapshots, conn, srcIdmap)
	}

	cName := container.Name()

	snapshotsPath := shared.VarPath(fmt.Sprintf("snapshots/%s", cName))
	if !shared.PathExists(snapshotsPath) {
		err := os.MkdirAll(shared.VarPath(fmt.Sprintf("snapshots/%s", cName)), 0700)
		if err != nil {
			return err
		}
	}

	btrfsRecv := func(btrfsPath string, targetPath string, isSnapshot bool) error {
		args := []string{"receive", "-e", btrfsPath}
		cmd := exec.Command("btrfs", args...)

		// Remove the existing pre-created subvolume
		err := s.subvolsDelete(targetPath)
		if err != nil {
			return err
		}

		stdin, err := cmd.StdinPipe()
		if err != nil {
			return err
		}

		stderr, err := cmd.StderrPipe()
		if err != nil {
			return err
		}

		if err := cmd.Start(); err != nil {
			return err
		}

		<-shared.WebsocketRecvStream(stdin, conn)

		output, err := ioutil.ReadAll(stderr)
		if err != nil {
			shared.LogDebugf("problem reading btrfs receive stderr %s", err)
		}

		err = cmd.Wait()
		if err != nil {
			shared.LogError("problem with btrfs receive", log.Ctx{"output": string(output)})
			return err
		}

		if !isSnapshot {
			cPath := containerPath(fmt.Sprintf("%s/.root", cName), true)

			err := s.subvolSnapshot(cPath, targetPath, false)
			if err != nil {
				shared.LogError("problem with btrfs snapshot", log.Ctx{"err": err})
				return err
			}

			err = s.subvolsDelete(cPath)
			if err != nil {
				shared.LogError("problem with btrfs delete", log.Ctx{"err": err})
				return err
			}
		}

		return nil
	}

	for _, snap := range snapshots {
		args := snapshotProtobufToContainerArgs(container.Name(), snap)
		s, err := containerCreateEmptySnapshot(container.Daemon(), args)
		if err != nil {
			return err
		}

		if err := btrfsRecv(containerPath(cName, true), s.Path(), true); err != nil {
			return err
		}
	}

	/* finally, do the real container */
	if err := btrfsRecv(containerPath(cName, true), container.Path(), false); err != nil {
		return err
	}

	if live {
		if err := btrfsRecv(containerPath(cName, true), container.Path(), false); err != nil {
			return err
		}
	}

	// Cleanup
	if ok, _ := shared.PathIsEmpty(snapshotsPath); ok {
		err := os.Remove(snapshotsPath)
		if err != nil {
			return err
		}
	}

	return nil
}
Ejemplo n.º 13
0
func (d *Daemon) Init() error {
	/* Initialize some variables */
	d.imagesDownloading = map[string]chan bool{}

	d.readyChan = make(chan bool)
	d.shutdownChan = make(chan bool)

	/* Set the executable path */
	/* Set the LVM environment */
	err := os.Setenv("LVM_SUPPRESS_FD_WARNINGS", "1")
	if err != nil {
		return err
	}

	/* Setup logging if that wasn't done before */
	if shared.Log == nil {
		shared.Log, err = logging.GetLogger("", "", true, true, nil)
		if err != nil {
			return err
		}
	}

	/* Print welcome message */
	if d.MockMode {
		shared.LogInfo("LXD is starting in mock mode",
			log.Ctx{"path": shared.VarPath("")})
	} else if d.SetupMode {
		shared.LogInfo("LXD is starting in setup mode",
			log.Ctx{"path": shared.VarPath("")})
	} else {
		shared.LogInfo("LXD is starting in normal mode",
			log.Ctx{"path": shared.VarPath("")})
	}

	/* Detect user namespaces */
	runningInUserns = shared.RunningInUserNS()

	/* Detect AppArmor support */
	if aaAvailable && os.Getenv("LXD_SECURITY_APPARMOR") == "false" {
		aaAvailable = false
		aaAdmin = false
		shared.LogWarnf("AppArmor support has been manually disabled")
	}

	if aaAvailable && !shared.IsDir("/sys/kernel/security/apparmor") {
		aaAvailable = false
		aaAdmin = false
		shared.LogWarnf("AppArmor support has been disabled because of lack of kernel support")
	}

	_, err = exec.LookPath("apparmor_parser")
	if aaAvailable && err != nil {
		aaAvailable = false
		aaAdmin = false
		shared.LogWarnf("AppArmor support has been disabled because 'apparmor_parser' couldn't be found")
	}

	/* Detect AppArmor admin support */
	if aaAdmin && !haveMacAdmin() {
		aaAdmin = false
		shared.LogWarnf("Per-container AppArmor profiles are disabled because the mac_admin capability is missing.")
	}

	if aaAdmin && runningInUserns {
		aaAdmin = false
		shared.LogWarnf("Per-container AppArmor profiles are disabled because LXD is running in an unprivileged container.")
	}

	/* Detect AppArmor confinment */
	if !aaConfined {
		profile := aaProfile()
		if profile != "unconfined" && profile != "" {
			aaConfined = true
			shared.LogWarnf("Per-container AppArmor profiles are disabled because LXD is already protected by AppArmor.")
		}
	}

	if aaAvailable {
		canStack := func() bool {
			contentBytes, err := ioutil.ReadFile("/sys/kernel/security/apparmor/features/domain/stack")
			if err != nil {
				return false
			}

			if string(contentBytes) != "yes\n" {
				return false
			}

			contentBytes, err = ioutil.ReadFile("/sys/kernel/security/apparmor/features/domain/version")
			if err != nil {
				return false
			}

			content := string(contentBytes)

			parts := strings.Split(strings.TrimSpace(content), ".")

			if len(parts) == 0 {
				shared.LogWarn("unknown apparmor domain version", log.Ctx{"version": content})
				return false
			}

			major, err := strconv.Atoi(parts[0])
			if err != nil {
				shared.LogWarn("unknown apparmor domain version", log.Ctx{"version": content})
				return false
			}

			minor := 0
			if len(parts) == 2 {
				minor, err = strconv.Atoi(parts[1])
				if err != nil {
					shared.LogWarn("unknown apparmor domain version", log.Ctx{"version": content})
					return false
				}
			}

			return major >= 1 && minor >= 2
		}

		aaStacking = canStack()
	}

	/* Detect CGroup support */
	cgBlkioController = shared.PathExists("/sys/fs/cgroup/blkio/")
	if !cgBlkioController {
		shared.LogWarnf("Couldn't find the CGroup blkio controller, I/O limits will be ignored.")
	}

	cgCpuController = shared.PathExists("/sys/fs/cgroup/cpu/")
	if !cgCpuController {
		shared.LogWarnf("Couldn't find the CGroup CPU controller, CPU time limits will be ignored.")
	}

	cgCpuacctController = shared.PathExists("/sys/fs/cgroup/cpuacct/")
	if !cgCpuacctController {
		shared.LogWarnf("Couldn't find the CGroup CPUacct controller, CPU accounting will not be available.")
	}

	cgCpusetController = shared.PathExists("/sys/fs/cgroup/cpuset/")
	if !cgCpusetController {
		shared.LogWarnf("Couldn't find the CGroup CPUset controller, CPU pinning will be ignored.")
	}

	cgDevicesController = shared.PathExists("/sys/fs/cgroup/devices/")
	if !cgDevicesController {
		shared.LogWarnf("Couldn't find the CGroup devices controller, device access control won't work.")
	}

	cgMemoryController = shared.PathExists("/sys/fs/cgroup/memory/")
	if !cgMemoryController {
		shared.LogWarnf("Couldn't find the CGroup memory controller, memory limits will be ignored.")
	}

	cgNetPrioController = shared.PathExists("/sys/fs/cgroup/net_prio/")
	if !cgNetPrioController {
		shared.LogWarnf("Couldn't find the CGroup network class controller, network limits will be ignored.")
	}

	cgPidsController = shared.PathExists("/sys/fs/cgroup/pids/")
	if !cgPidsController {
		shared.LogWarnf("Couldn't find the CGroup pids controller, process limits will be ignored.")
	}

	cgSwapAccounting = shared.PathExists("/sys/fs/cgroup/memory/memory.memsw.limit_in_bytes")
	if !cgSwapAccounting {
		shared.LogWarnf("CGroup memory swap accounting is disabled, swap limits will be ignored.")
	}

	/* Get the list of supported architectures */
	var architectures = []int{}

	architectureName, err := shared.ArchitectureGetLocal()
	if err != nil {
		return err
	}

	architecture, err := shared.ArchitectureId(architectureName)
	if err != nil {
		return err
	}
	architectures = append(architectures, architecture)

	personalities, err := shared.ArchitecturePersonalities(architecture)
	if err != nil {
		return err
	}
	for _, personality := range personalities {
		architectures = append(architectures, personality)
	}
	d.architectures = architectures

	/* Set container path */
	d.lxcpath = shared.VarPath("containers")

	/* Make sure all our directories are available */
	if err := os.MkdirAll(shared.CachePath(), 0700); err != nil {
		return err
	}
	if err := os.MkdirAll(shared.VarPath("containers"), 0711); err != nil {
		return err
	}
	if err := os.MkdirAll(shared.VarPath("devices"), 0711); err != nil {
		return err
	}
	if err := os.MkdirAll(shared.VarPath("devlxd"), 0755); err != nil {
		return err
	}
	if err := os.MkdirAll(shared.VarPath("images"), 0700); err != nil {
		return err
	}
	if err := os.MkdirAll(shared.LogPath(), 0700); err != nil {
		return err
	}
	if err := os.MkdirAll(shared.VarPath("security"), 0700); err != nil {
		return err
	}
	if err := os.MkdirAll(shared.VarPath("shmounts"), 0711); err != nil {
		return err
	}
	if err := os.MkdirAll(shared.VarPath("snapshots"), 0700); err != nil {
		return err
	}

	/* Detect the filesystem */
	d.BackingFs, err = filesystemDetect(d.lxcpath)
	if err != nil {
		shared.LogError("Error detecting backing fs", log.Ctx{"err": err})
	}

	/* Read the uid/gid allocation */
	d.IdmapSet, err = shared.DefaultIdmapSet()
	if err != nil {
		shared.LogWarn("Error reading idmap", log.Ctx{"err": err.Error()})
		shared.LogWarnf("Only privileged containers will be able to run")
	} else {
		shared.LogInfof("Default uid/gid map:")
		for _, lxcmap := range d.IdmapSet.ToLxcString() {
			shared.LogInfof(strings.TrimRight(" - "+lxcmap, "\n"))
		}
	}

	/* Initialize the database */
	err = initializeDbObject(d, shared.VarPath("lxd.db"))
	if err != nil {
		return err
	}

	/* Load all config values from the database */
	err = daemonConfigInit(d.db)
	if err != nil {
		return err
	}

	if !d.MockMode {
		/* Setup the storage driver */
		err = d.SetupStorageDriver()
		if err != nil {
			return fmt.Errorf("Failed to setup storage: %s", err)
		}

		/* Apply all patches */
		err = patchesApplyAll(d)
		if err != nil {
			return err
		}

		/* Setup the networks */
		err = networkStartup(d)
		if err != nil {
			return err
		}

		/* Restore simplestreams cache */
		err = imageLoadStreamCache(d)
		if err != nil {
			return err
		}
	}

	/* Log expiry */
	go func() {
		t := time.NewTicker(24 * time.Hour)
		for {
			shared.LogInfof("Expiring log files")

			err := d.ExpireLogs()
			if err != nil {
				shared.LogError("Failed to expire logs", log.Ctx{"err": err})
			}

			shared.LogInfof("Done expiring log files")
			<-t.C
		}
	}()

	/* set the initial proxy function based on config values in the DB */
	d.proxy = shared.ProxyFromConfig(
		daemonConfig["core.proxy_https"].Get(),
		daemonConfig["core.proxy_http"].Get(),
		daemonConfig["core.proxy_ignore_hosts"].Get(),
	)

	/* Setup /dev/lxd */
	shared.LogInfof("Starting /dev/lxd handler")
	d.devlxd, err = createAndBindDevLxd()
	if err != nil {
		return err
	}

	if !d.MockMode {
		/* Start the scheduler */
		go deviceEventListener(d)

		/* Setup the TLS authentication */
		certf, keyf, err := readMyCert()
		if err != nil {
			return err
		}

		cert, err := tls.LoadX509KeyPair(certf, keyf)
		if err != nil {
			return err
		}

		tlsConfig := &tls.Config{
			InsecureSkipVerify: true,
			ClientAuth:         tls.RequestClientCert,
			Certificates:       []tls.Certificate{cert},
			MinVersion:         tls.VersionTLS12,
			MaxVersion:         tls.VersionTLS12,
			CipherSuites: []uint16{
				tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
				tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA},
			PreferServerCipherSuites: true,
		}

		if shared.PathExists(shared.VarPath("server.ca")) {
			ca, err := shared.ReadCert(shared.VarPath("server.ca"))
			if err != nil {
				return err
			}

			caPool := x509.NewCertPool()
			caPool.AddCert(ca)
			tlsConfig.RootCAs = caPool
			tlsConfig.ClientCAs = caPool

			shared.LogInfof("LXD is in CA mode, only CA-signed certificates will be allowed")
		}

		tlsConfig.BuildNameToCertificate()

		d.tlsConfig = tlsConfig

		readSavedClientCAList(d)
	}

	/* Setup the web server */
	d.mux = mux.NewRouter()
	d.mux.StrictSlash(false)

	d.mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
		w.Header().Set("Content-Type", "application/json")
		SyncResponse(true, []string{"/1.0"}).Render(w)
	})

	for _, c := range api10 {
		d.createCmd("1.0", c)
	}

	for _, c := range apiInternal {
		d.createCmd("internal", c)
	}

	d.mux.NotFoundHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		shared.LogInfo("Sending top level 404", log.Ctx{"url": r.URL})
		w.Header().Set("Content-Type", "application/json")
		NotFound.Render(w)
	})

	listeners := d.GetListeners()
	if len(listeners) > 0 {
		shared.LogInfof("LXD is socket activated")

		for _, listener := range listeners {
			if shared.PathExists(listener.Addr().String()) {
				d.UnixSocket = &Socket{Socket: listener, CloseOnExit: false}
			} else {
				tlsListener := tls.NewListener(listener, d.tlsConfig)
				d.TCPSocket = &Socket{Socket: tlsListener, CloseOnExit: false}
			}
		}
	} else {
		shared.LogInfof("LXD isn't socket activated")

		localSocketPath := shared.VarPath("unix.socket")

		// If the socket exists, let's try to connect to it and see if there's
		// a lxd running.
		if shared.PathExists(localSocketPath) {
			_, err := lxd.NewClient(&lxd.DefaultConfig, "local")
			if err != nil {
				shared.LogDebugf("Detected stale unix socket, deleting")
				// Connecting failed, so let's delete the socket and
				// listen on it ourselves.
				err = os.Remove(localSocketPath)
				if err != nil {
					return err
				}
			} else {
				return fmt.Errorf("LXD is already running.")
			}
		}

		unixAddr, err := net.ResolveUnixAddr("unix", localSocketPath)
		if err != nil {
			return fmt.Errorf("cannot resolve unix socket address: %v", err)
		}

		unixl, err := net.ListenUnix("unix", unixAddr)
		if err != nil {
			return fmt.Errorf("cannot listen on unix socket: %v", err)
		}

		if err := os.Chmod(localSocketPath, 0660); err != nil {
			return err
		}

		var gid int
		if d.group != "" {
			gid, err = shared.GroupId(d.group)
			if err != nil {
				return err
			}
		} else {
			gid = os.Getgid()
		}

		if err := os.Chown(localSocketPath, os.Getuid(), gid); err != nil {
			return err
		}

		d.UnixSocket = &Socket{Socket: unixl, CloseOnExit: true}
	}

	listenAddr := daemonConfig["core.https_address"].Get()
	if listenAddr != "" {
		_, _, err := net.SplitHostPort(listenAddr)
		if err != nil {
			listenAddr = fmt.Sprintf("%s:%s", listenAddr, shared.DefaultPort)
		}

		tcpl, err := tls.Listen("tcp", listenAddr, d.tlsConfig)
		if err != nil {
			shared.LogError("cannot listen on https socket, skipping...", log.Ctx{"err": err})
		} else {
			if d.TCPSocket != nil {
				shared.LogInfof("Replacing inherited TCP socket with configured one")
				d.TCPSocket.Socket.Close()
			}
			d.TCPSocket = &Socket{Socket: tcpl, CloseOnExit: true}
		}
	}

	d.tomb.Go(func() error {
		shared.LogInfof("REST API daemon:")
		if d.UnixSocket != nil {
			shared.LogInfo(" - binding Unix socket", log.Ctx{"socket": d.UnixSocket.Socket.Addr()})
			d.tomb.Go(func() error { return http.Serve(d.UnixSocket.Socket, &lxdHttpServer{d.mux, d}) })
		}

		if d.TCPSocket != nil {
			shared.LogInfo(" - binding TCP socket", log.Ctx{"socket": d.TCPSocket.Socket.Addr()})
			d.tomb.Go(func() error { return http.Serve(d.TCPSocket.Socket, &lxdHttpServer{d.mux, d}) })
		}

		d.tomb.Go(func() error {
			server := devLxdServer(d)
			return server.Serve(d.devlxd)
		})
		return nil
	})

	if !d.MockMode && !d.SetupMode {
		err := d.Ready()
		if err != nil {
			return err
		}
	}

	return nil
}
Ejemplo n.º 14
0
Archivo: devices.go Proyecto: vahe/lxd
func deviceTaskBalance(d *Daemon) {
	min := func(x, y int) int {
		if x < y {
			return x
		}
		return y
	}

	// Don't bother running when CGroup support isn't there
	if !cgCpusetController {
		return
	}

	// Get effective cpus list - those are all guaranteed to be online
	effectiveCpus, err := cGroupGet("cpuset", "/", "cpuset.effective_cpus")
	if err != nil {
		// Older kernel - use cpuset.cpus
		effectiveCpus, err = cGroupGet("cpuset", "/", "cpuset.cpus")
		if err != nil {
			shared.LogErrorf("Error reading host's cpuset.cpus")
			return
		}
	}
	err = cGroupSet("cpuset", "/lxc", "cpuset.cpus", effectiveCpus)
	if err != nil && shared.PathExists("/sys/fs/cgroup/cpuset/lxc") {
		shared.LogWarn("Error setting lxd's cpuset.cpus", log.Ctx{"err": err})
	}
	cpus, err := parseCpuset(effectiveCpus)
	if err != nil {
		shared.LogError("Error parsing host's cpu set", log.Ctx{"cpuset": effectiveCpus, "err": err})
		return
	}

	// Iterate through the containers
	containers, err := dbContainersList(d.db, cTypeRegular)
	if err != nil {
		shared.LogError("problem loading containers list", log.Ctx{"err": err})
		return
	}
	fixedContainers := map[int][]container{}
	balancedContainers := map[container]int{}
	for _, name := range containers {
		c, err := containerLoadByName(d, name)
		if err != nil {
			continue
		}

		conf := c.ExpandedConfig()
		cpulimit, ok := conf["limits.cpu"]
		if !ok || cpulimit == "" {
			cpulimit = effectiveCpus
		}

		if !c.IsRunning() {
			continue
		}

		count, err := strconv.Atoi(cpulimit)
		if err == nil {
			// Load-balance
			count = min(count, len(cpus))
			balancedContainers[c] = count
		} else {
			// Pinned
			containerCpus, err := parseCpuset(cpulimit)
			if err != nil {
				return
			}
			for _, nr := range containerCpus {
				if !shared.IntInSlice(nr, cpus) {
					continue
				}

				_, ok := fixedContainers[nr]
				if ok {
					fixedContainers[nr] = append(fixedContainers[nr], c)
				} else {
					fixedContainers[nr] = []container{c}
				}
			}
		}
	}

	// Balance things
	pinning := map[container][]string{}
	usage := map[int]deviceTaskCPU{}

	for _, id := range cpus {
		cpu := deviceTaskCPU{}
		cpu.id = id
		cpu.strId = fmt.Sprintf("%d", id)
		count := 0
		cpu.count = &count

		usage[id] = cpu
	}

	for cpu, ctns := range fixedContainers {
		c, ok := usage[cpu]
		if !ok {
			shared.LogErrorf("Internal error: container using unavailable cpu")
			continue
		}
		id := c.strId
		for _, ctn := range ctns {
			_, ok := pinning[ctn]
			if ok {
				pinning[ctn] = append(pinning[ctn], id)
			} else {
				pinning[ctn] = []string{id}
			}
			*c.count += 1
		}
	}

	sortedUsage := make(deviceTaskCPUs, 0)
	for _, value := range usage {
		sortedUsage = append(sortedUsage, value)
	}

	for ctn, count := range balancedContainers {
		sort.Sort(sortedUsage)
		for _, cpu := range sortedUsage {
			if count == 0 {
				break
			}
			count -= 1

			id := cpu.strId
			_, ok := pinning[ctn]
			if ok {
				pinning[ctn] = append(pinning[ctn], id)
			} else {
				pinning[ctn] = []string{id}
			}
			*cpu.count += 1
		}
	}

	// Set the new pinning
	for ctn, set := range pinning {
		// Confirm the container didn't just stop
		if !ctn.IsRunning() {
			continue
		}

		sort.Strings(set)
		err := ctn.CGroupSet("cpuset.cpus", strings.Join(set, ","))
		if err != nil {
			shared.LogError("balance: Unable to set cpuset", log.Ctx{"name": ctn.Name(), "err": err, "value": strings.Join(set, ",")})
		}
	}
}
Ejemplo n.º 15
0
// ImageDownload checks if we have that Image Fingerprint else
// downloads the image from a remote server.
func (d *Daemon) ImageDownload(op *operation, server string, protocol string, certificate string, secret string, alias string, forContainer bool, autoUpdate bool) (string, error) {
	var err error
	var ss *shared.SimpleStreams
	var ctxMap log.Ctx

	if protocol == "" {
		protocol = "lxd"
	}

	fp := alias

	// Expand aliases
	if protocol == "simplestreams" {
		imageStreamCacheLock.Lock()
		entry, _ := imageStreamCache[server]
		if entry == nil || entry.expiry.Before(time.Now()) {
			refresh := func() (*imageStreamCacheEntry, error) {
				// Setup simplestreams client
				ss, err = shared.SimpleStreamsClient(server, d.proxy)
				if err != nil {
					return nil, err
				}

				// Get all aliases
				aliases, err := ss.ListAliases()
				if err != nil {
					return nil, err
				}

				// Get all fingerprints
				images, err := ss.ListImages()
				if err != nil {
					return nil, err
				}

				fingerprints := []string{}
				for _, image := range images {
					fingerprints = append(fingerprints, image.Fingerprint)
				}

				// Generate cache entry
				entry = &imageStreamCacheEntry{ss: ss, Aliases: aliases, Fingerprints: fingerprints, expiry: time.Now().Add(time.Hour)}
				imageStreamCache[server] = entry
				imageSaveStreamCache()

				return entry, nil
			}

			newEntry, err := refresh()
			if err == nil {
				// Cache refreshed
				entry = newEntry
			} else if entry != nil {
				// Failed to fetch entry but existing cache
				shared.LogWarn("Unable to refresh cache, using stale entry", log.Ctx{"server": server})
				entry.expiry = time.Now().Add(time.Hour)
			} else {
				// Failed to fetch entry and nothing in cache
				imageStreamCacheLock.Unlock()
				return "", err
			}
		} else {
			shared.LogDebug("Using SimpleStreams cache entry", log.Ctx{"server": server, "expiry": entry.expiry})
			ss = entry.ss
		}
		imageStreamCacheLock.Unlock()

		// Expand aliases
		for _, alias := range entry.Aliases {
			if alias.Name != fp {
				continue
			}

			fp = alias.Target
			break
		}

		// Expand fingerprint
		for _, fingerprint := range entry.Fingerprints {
			if !strings.HasPrefix(fingerprint, fp) {
				continue
			}

			if fp == alias {
				alias = fingerprint
			}
			fp = fingerprint
			break
		}
	} else if protocol == "lxd" {
		target, err := remoteGetImageFingerprint(d, server, certificate, fp)
		if err == nil && target != "" {
			fp = target
		}
	}

	if _, _, err := dbImageGet(d.db, fp, false, false); err == nil {
		shared.LogDebug("Image already exists in the db", log.Ctx{"image": fp})
		// already have it
		return fp, nil
	}

	// Now check if we already downloading the image
	d.imagesDownloadingLock.RLock()
	if waitChannel, ok := d.imagesDownloading[fp]; ok {
		// We already download the image
		d.imagesDownloadingLock.RUnlock()

		shared.LogDebug(
			"Already downloading the image, waiting for it to succeed",
			log.Ctx{"image": fp})

		// Wait until the download finishes (channel closes)
		if _, ok := <-waitChannel; ok {
			shared.LogWarnf("Value transmitted over image lock semaphore?")
		}

		if _, _, err := dbImageGet(d.db, fp, false, true); err != nil {
			shared.LogError(
				"Previous download didn't succeed",
				log.Ctx{"image": fp})

			return "", fmt.Errorf("Previous download didn't succeed")
		}

		shared.LogDebug(
			"Previous download succeeded",
			log.Ctx{"image": fp})

		return fp, nil
	}

	d.imagesDownloadingLock.RUnlock()

	if op == nil {
		ctxMap = log.Ctx{"alias": alias, "server": server}
	} else {
		ctxMap = log.Ctx{"trigger": op.url, "image": fp, "operation": op.id, "alias": alias, "server": server}
	}

	shared.LogInfo("Downloading image", ctxMap)

	// Add the download to the queue
	d.imagesDownloadingLock.Lock()
	d.imagesDownloading[fp] = make(chan bool)
	d.imagesDownloadingLock.Unlock()

	// Unlock once this func ends.
	defer func() {
		d.imagesDownloadingLock.Lock()
		if waitChannel, ok := d.imagesDownloading[fp]; ok {
			close(waitChannel)
			delete(d.imagesDownloading, fp)
		}
		d.imagesDownloadingLock.Unlock()
	}()

	exporturl := server

	var info shared.ImageInfo
	info.Fingerprint = fp

	destDir := shared.VarPath("images")
	destName := filepath.Join(destDir, fp)
	if shared.PathExists(destName) {
		d.Storage.ImageDelete(fp)
	}

	progress := func(progressInt int64, speedInt int64) {
		if op == nil {
			return
		}

		meta := op.metadata
		if meta == nil {
			meta = make(map[string]interface{})
		}

		progress := fmt.Sprintf("%d%% (%s/s)", progressInt, shared.GetByteSizeString(speedInt))

		if meta["download_progress"] != progress {
			meta["download_progress"] = progress
			op.UpdateMetadata(meta)
		}
	}

	if protocol == "lxd" {
		/* grab the metadata from /1.0/images/%s */
		var url string
		if secret != "" {
			url = fmt.Sprintf(
				"%s/%s/images/%s?secret=%s",
				server, shared.APIVersion, fp, secret)
		} else {
			url = fmt.Sprintf("%s/%s/images/%s", server, shared.APIVersion, fp)
		}

		resp, err := d.httpGetSync(url, certificate)
		if err != nil {
			shared.LogError(
				"Failed to download image metadata",
				log.Ctx{"image": fp, "err": err})

			return "", err
		}

		if err := json.Unmarshal(resp.Metadata, &info); err != nil {
			return "", err
		}

		/* now grab the actual file from /1.0/images/%s/export */
		if secret != "" {
			exporturl = fmt.Sprintf(
				"%s/%s/images/%s/export?secret=%s",
				server, shared.APIVersion, fp, secret)

		} else {
			exporturl = fmt.Sprintf(
				"%s/%s/images/%s/export",
				server, shared.APIVersion, fp)
		}
	} else if protocol == "simplestreams" {
		err := ss.Download(fp, "meta", destName, nil)
		if err != nil {
			return "", err
		}

		err = ss.Download(fp, "root", destName+".rootfs", progress)
		if err != nil {
			return "", err
		}

		info, err := ss.GetImageInfo(fp)
		if err != nil {
			return "", err
		}

		info.Public = false
		info.AutoUpdate = autoUpdate

		_, err = imageBuildFromInfo(d, *info)
		if err != nil {
			return "", err
		}

		if alias != fp {
			id, _, err := dbImageGet(d.db, fp, false, true)
			if err != nil {
				return "", err
			}

			err = dbImageSourceInsert(d.db, id, server, protocol, "", alias)
			if err != nil {
				return "", err
			}
		}

		shared.LogInfo("Image downloaded", ctxMap)

		if forContainer {
			return fp, dbImageLastAccessInit(d.db, fp)
		}

		return fp, nil
	}

	raw, err := d.httpGetFile(exporturl, certificate)
	if err != nil {
		shared.LogError(
			"Failed to download image",
			log.Ctx{"image": fp, "err": err})
		return "", err
	}
	info.Size = raw.ContentLength

	ctype, ctypeParams, err := mime.ParseMediaType(raw.Header.Get("Content-Type"))
	if err != nil {
		ctype = "application/octet-stream"
	}

	body := &shared.ProgressReader{
		ReadCloser: raw.Body,
		Tracker: &shared.ProgressTracker{
			Length:  raw.ContentLength,
			Handler: progress,
		},
	}

	if ctype == "multipart/form-data" {
		// Parse the POST data
		mr := multipart.NewReader(body, ctypeParams["boundary"])

		// Get the metadata tarball
		part, err := mr.NextPart()
		if err != nil {
			shared.LogError(
				"Invalid multipart image",
				log.Ctx{"image": fp, "err": err})

			return "", err
		}

		if part.FormName() != "metadata" {
			shared.LogError(
				"Invalid multipart image",
				log.Ctx{"image": fp, "err": err})

			return "", fmt.Errorf("Invalid multipart image")
		}

		destName = filepath.Join(destDir, info.Fingerprint)
		f, err := os.Create(destName)
		if err != nil {
			shared.LogError(
				"Failed to save image",
				log.Ctx{"image": fp, "err": err})

			return "", err
		}

		_, err = io.Copy(f, part)
		f.Close()

		if err != nil {
			shared.LogError(
				"Failed to save image",
				log.Ctx{"image": fp, "err": err})

			return "", err
		}

		// Get the rootfs tarball
		part, err = mr.NextPart()
		if err != nil {
			shared.LogError(
				"Invalid multipart image",
				log.Ctx{"image": fp, "err": err})

			return "", err
		}

		if part.FormName() != "rootfs" {
			shared.LogError(
				"Invalid multipart image",
				log.Ctx{"image": fp})
			return "", fmt.Errorf("Invalid multipart image")
		}

		destName = filepath.Join(destDir, info.Fingerprint+".rootfs")
		f, err = os.Create(destName)
		if err != nil {
			shared.LogError(
				"Failed to save image",
				log.Ctx{"image": fp, "err": err})
			return "", err
		}

		_, err = io.Copy(f, part)
		f.Close()

		if err != nil {
			shared.LogError(
				"Failed to save image",
				log.Ctx{"image": fp, "err": err})
			return "", err
		}
	} else {
		destName = filepath.Join(destDir, info.Fingerprint)

		f, err := os.Create(destName)
		if err != nil {
			shared.LogError(
				"Failed to save image",
				log.Ctx{"image": fp, "err": err})

			return "", err
		}

		_, err = io.Copy(f, body)
		f.Close()

		if err != nil {
			shared.LogError(
				"Failed to save image",
				log.Ctx{"image": fp, "err": err})
			return "", err
		}
	}

	if protocol == "direct" {
		imageMeta, err := getImageMetadata(destName)
		if err != nil {
			return "", err
		}

		info.Architecture = imageMeta.Architecture
		info.CreationDate = time.Unix(imageMeta.CreationDate, 0)
		info.ExpiryDate = time.Unix(imageMeta.ExpiryDate, 0)
		info.Properties = imageMeta.Properties
	}

	// By default, make all downloaded images private
	info.Public = false

	if alias != fp && secret == "" {
		info.AutoUpdate = autoUpdate
	}

	_, err = imageBuildFromInfo(d, info)
	if err != nil {
		shared.LogError(
			"Failed to create image",
			log.Ctx{"image": fp, "err": err})

		return "", err
	}

	if alias != fp {
		id, _, err := dbImageGet(d.db, fp, false, true)
		if err != nil {
			return "", err
		}

		err = dbImageSourceInsert(d.db, id, server, protocol, "", alias)
		if err != nil {
			return "", err
		}
	}

	shared.LogInfo("Image downloaded", ctxMap)

	if forContainer {
		return fp, dbImageLastAccessInit(d.db, fp)
	}

	return fp, nil
}
Ejemplo n.º 16
0
func dbUpdateFromV11(currentVersion int, version int, d *Daemon) error {
	if d.MockMode {
		// No need to move snapshots no mock runs,
		// dbUpdateFromV12 will then set the db version to 13
		return nil
	}

	cNames, err := dbContainersList(d.db, cTypeSnapshot)
	if err != nil {
		return err
	}

	errors := 0

	for _, cName := range cNames {
		snappieces := strings.SplitN(cName, shared.SnapshotDelimiter, 2)
		oldPath := shared.VarPath("containers", snappieces[0], "snapshots", snappieces[1])
		newPath := shared.VarPath("snapshots", snappieces[0], snappieces[1])
		if shared.PathExists(oldPath) && !shared.PathExists(newPath) {
			shared.LogInfo(
				"Moving snapshot",
				log.Ctx{
					"snapshot": cName,
					"oldPath":  oldPath,
					"newPath":  newPath})

			// Rsync
			// containers/<container>/snapshots/<snap0>
			//   to
			// snapshots/<container>/<snap0>
			output, err := storageRsyncCopy(oldPath, newPath)
			if err != nil {
				shared.LogError(
					"Failed rsync snapshot",
					log.Ctx{
						"snapshot": cName,
						"output":   string(output),
						"err":      err})
				errors++
				continue
			}

			// Remove containers/<container>/snapshots/<snap0>
			if err := os.RemoveAll(oldPath); err != nil {
				shared.LogError(
					"Failed to remove the old snapshot path",
					log.Ctx{
						"snapshot": cName,
						"oldPath":  oldPath,
						"err":      err})

				// Ignore this error.
				// errors++
				// continue
			}

			// Remove /var/lib/lxd/containers/<container>/snapshots
			// if its empty.
			cPathParent := filepath.Dir(oldPath)
			if ok, _ := shared.PathIsEmpty(cPathParent); ok {
				os.Remove(cPathParent)
			}

		} // if shared.PathExists(oldPath) && !shared.PathExists(newPath) {
	} // for _, cName := range cNames {

	// Refuse to start lxd if a rsync failed.
	if errors > 0 {
		return fmt.Errorf("Got errors while moving snapshots, see the log output.")
	}

	return nil
}
Ejemplo n.º 17
0
func containerExecPost(d *Daemon, r *http.Request) Response {
	name := mux.Vars(r)["name"]
	c, err := containerLoadByName(d, name)
	if err != nil {
		return SmartError(err)
	}

	if !c.IsRunning() {
		return BadRequest(fmt.Errorf("Container is not running."))
	}

	if c.IsFrozen() {
		return BadRequest(fmt.Errorf("Container is frozen."))
	}

	post := commandPostContent{}
	buf, err := ioutil.ReadAll(r.Body)
	if err != nil {
		return BadRequest(err)
	}

	if err := json.Unmarshal(buf, &post); err != nil {
		return BadRequest(err)
	}

	env := map[string]string{}

	for k, v := range c.ExpandedConfig() {
		if strings.HasPrefix(k, "environment.") {
			env[strings.TrimPrefix(k, "environment.")] = v
		}
	}

	if post.Environment != nil {
		for k, v := range post.Environment {
			env[k] = v
		}
	}

	_, ok := env["PATH"]
	if !ok {
		env["PATH"] = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
		if shared.PathExists(fmt.Sprintf("%s/snap/bin", c.RootfsPath())) {
			env["PATH"] = fmt.Sprintf("%s:/snap/bin", env["PATH"])
		}
	}

	if post.WaitForWS {
		ws := &execWs{}
		ws.fds = map[int]string{}
		idmapset := c.IdmapSet()
		if idmapset != nil {
			ws.rootUid, ws.rootGid = idmapset.ShiftIntoNs(0, 0)
		}
		ws.conns = map[int]*websocket.Conn{}
		ws.conns[-1] = nil
		ws.conns[0] = nil
		if !post.Interactive {
			ws.conns[1] = nil
			ws.conns[2] = nil
		}
		ws.allConnected = make(chan bool, 1)
		ws.controlConnected = make(chan bool, 1)
		ws.interactive = post.Interactive
		for i := -1; i < len(ws.conns)-1; i++ {
			ws.fds[i], err = shared.RandomCryptoString()
			if err != nil {
				return InternalError(err)
			}
		}

		ws.command = post.Command
		ws.container = c
		ws.env = env

		ws.width = post.Width
		ws.height = post.Height

		resources := map[string][]string{}
		resources["containers"] = []string{ws.container.Name()}

		op, err := operationCreate(operationClassWebsocket, resources, ws.Metadata(), ws.Do, nil, ws.Connect)
		if err != nil {
			return InternalError(err)
		}

		return OperationResponse(op)
	}

	run := func(op *operation) error {
		var cmdErr error
		var cmdResult int
		metadata := shared.Jmap{}

		if post.RecordOutput {
			// Prepare stdout and stderr recording
			stdout, err := os.OpenFile(filepath.Join(c.LogPath(), fmt.Sprintf("exec_%s.stdout", op.id)), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
			if err != nil {
				return err
			}
			defer stdout.Close()

			stderr, err := os.OpenFile(filepath.Join(c.LogPath(), fmt.Sprintf("exec_%s.stderr", op.id)), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
			if err != nil {
				return err
			}
			defer stderr.Close()

			// Run the command
			cmdResult, cmdErr = c.Exec(post.Command, env, nil, stdout, stderr)

			// Update metadata with the right URLs
			metadata["return"] = cmdResult
			metadata["output"] = shared.Jmap{
				"1": fmt.Sprintf("/%s/containers/%s/logs/%s", shared.APIVersion, c.Name(), filepath.Base(stdout.Name())),
				"2": fmt.Sprintf("/%s/containers/%s/logs/%s", shared.APIVersion, c.Name(), filepath.Base(stderr.Name())),
			}
		} else {
			cmdResult, cmdErr = c.Exec(post.Command, env, nil, nil, nil)
			metadata["return"] = cmdResult
		}

		err = op.UpdateMetadata(metadata)
		if err != nil {
			shared.LogError("error updating metadata for cmd", log.Ctx{"err": err, "cmd": post.Command})
		}

		return cmdErr
	}

	resources := map[string][]string{}
	resources["containers"] = []string{name}

	op, err := operationCreate(operationClassTask, resources, nil, run, nil, nil)
	if err != nil {
		return InternalError(err)
	}

	return OperationResponse(op)
}
Ejemplo n.º 18
0
func createFromMigration(d *Daemon, req *containerPostReq) Response {
	if req.Source.Mode != "pull" && req.Source.Mode != "push" {
		return NotImplemented
	}

	architecture, err := shared.ArchitectureId(req.Architecture)
	if err != nil {
		architecture = 0
	}

	args := containerArgs{
		Architecture: architecture,
		BaseImage:    req.Source.BaseImage,
		Config:       req.Config,
		Ctype:        cTypeRegular,
		Devices:      req.Devices,
		Ephemeral:    req.Ephemeral,
		Name:         req.Name,
		Profiles:     req.Profiles,
	}

	var c container
	_, _, err = dbImageGet(d.db, req.Source.BaseImage, false, true)

	/* Only create a container from an image if we're going to
	 * rsync over the top of it. In the case of a better file
	 * transfer mechanism, let's just use that.
	 *
	 * TODO: we could invent some negotiation here, where if the
	 * source and sink both have the same image, we can clone from
	 * it, but we have to know before sending the snapshot that
	 * we're sending the whole thing or just a delta from the
	 * image, so one extra negotiation round trip is needed. An
	 * alternative is to move actual container object to a later
	 * point and just negotiate it over the migration control
	 * socket. Anyway, it'll happen later :)
	 */
	if err == nil && d.Storage.MigrationType() == MigrationFSType_RSYNC {
		c, err = containerCreateFromImage(d, args, req.Source.BaseImage)
		if err != nil {
			return InternalError(err)
		}
	} else {
		c, err = containerCreateAsEmpty(d, args)
		if err != nil {
			return InternalError(err)
		}
	}

	var cert *x509.Certificate
	if req.Source.Certificate != "" {
		certBlock, _ := pem.Decode([]byte(req.Source.Certificate))
		if certBlock == nil {
			return InternalError(fmt.Errorf("Invalid certificate"))
		}

		cert, err = x509.ParseCertificate(certBlock.Bytes)
		if err != nil {
			return InternalError(err)
		}
	}

	config, err := shared.GetTLSConfig("", "", "", cert)
	if err != nil {
		c.Delete()
		return InternalError(err)
	}

	push := false
	if req.Source.Mode == "push" {
		push = true
	}

	migrationArgs := MigrationSinkArgs{
		Url: req.Source.Operation,
		Dialer: websocket.Dialer{
			TLSClientConfig: config,
			NetDial:         shared.RFC3493Dialer},
		Container: c,
		Secrets:   req.Source.Websockets,
		Push:      push,
		Live:      req.Source.Live,
	}

	sink, err := NewMigrationSink(&migrationArgs)
	if err != nil {
		c.Delete()
		return InternalError(err)
	}

	run := func(op *operation) error {
		// And finaly run the migration.
		err = sink.Do(op)
		if err != nil {
			shared.LogError("Error during migration sink", log.Ctx{"err": err})
			c.Delete()
			return fmt.Errorf("Error transferring container data: %s", err)
		}

		err = c.TemplateApply("copy")
		if err != nil {
			return err
		}

		return nil
	}

	resources := map[string][]string{}
	resources["containers"] = []string{req.Name}

	var op *operation
	if push {
		op, err = operationCreate(operationClassWebsocket, resources, sink.Metadata(), run, nil, sink.Connect)
		if err != nil {
			return InternalError(err)
		}
	} else {
		op, err = operationCreate(operationClassTask, resources, nil, run, nil, nil)
		if err != nil {
			return InternalError(err)
		}
	}

	return OperationResponse(op)
}
Ejemplo n.º 19
0
Archivo: devices.go Proyecto: vahe/lxd
func deviceNetlinkListener() (chan []string, chan []string, chan usbDevice, error) {
	NETLINK_KOBJECT_UEVENT := 15
	UEVENT_BUFFER_SIZE := 2048

	fd, err := syscall.Socket(
		syscall.AF_NETLINK, syscall.SOCK_RAW,
		NETLINK_KOBJECT_UEVENT,
	)

	if err != nil {
		return nil, nil, nil, err
	}

	nl := syscall.SockaddrNetlink{
		Family: syscall.AF_NETLINK,
		Pid:    uint32(os.Getpid()),
		Groups: 1,
	}

	err = syscall.Bind(fd, &nl)
	if err != nil {
		return nil, nil, nil, err
	}

	chCPU := make(chan []string, 1)
	chNetwork := make(chan []string, 0)
	chUSB := make(chan usbDevice)

	go func(chCPU chan []string, chNetwork chan []string, chUSB chan usbDevice) {
		b := make([]byte, UEVENT_BUFFER_SIZE*2)
		for {
			_, err := syscall.Read(fd, b)
			if err != nil {
				continue
			}

			props := map[string]string{}
			last := 0
			for i, e := range b {
				if i == len(b) || e == 0 {
					msg := string(b[last+1 : i])
					last = i
					if len(msg) == 0 || msg == "\x00" {
						continue
					}

					fields := strings.SplitN(msg, "=", 2)
					if len(fields) != 2 {
						continue
					}

					props[fields[0]] = fields[1]
				}
			}

			if props["SUBSYSTEM"] == "cpu" {
				if props["DRIVER"] != "processor" {
					continue
				}

				if props["ACTION"] != "offline" && props["ACTION"] != "online" {
					continue
				}

				// As CPU re-balancing affects all containers, no need to queue them
				select {
				case chCPU <- []string{path.Base(props["DEVPATH"]), props["ACTION"]}:
				default:
					// Channel is full, drop the event
				}
			}

			if props["SUBSYSTEM"] == "net" {
				if props["ACTION"] != "add" && props["ACTION"] != "removed" {
					continue
				}

				if !shared.PathExists(fmt.Sprintf("/sys/class/net/%s", props["INTERFACE"])) {
					continue
				}

				// Network balancing is interface specific, so queue everything
				chNetwork <- []string{props["INTERFACE"], props["ACTION"]}
			}

			if props["SUBSYSTEM"] == "usb" {
				if props["ACTION"] != "add" && props["ACTION"] != "remove" {
					continue
				}

				parts := strings.Split(props["PRODUCT"], "/")
				if len(parts) < 2 {
					continue
				}

				major, ok := props["MAJOR"]
				if !ok {
					continue
				}

				minor, ok := props["MINOR"]
				if !ok {
					continue
				}

				devname, ok := props["DEVNAME"]

				busnum, ok := props["BUSNUM"]
				if !ok {
					continue
				}

				devnum, ok := props["DEVNUM"]
				if !ok {
					continue
				}

				zeroPad := func(s string, l int) string {
					return strings.Repeat("0", l-len(s)) + s
				}

				usb, err := createUSBDevice(
					props["ACTION"],
					/* udev doesn't zero pad these, while
					 * everything else does, so let's zero pad them
					 * for consistency
					 */
					zeroPad(parts[0], 4),
					zeroPad(parts[1], 4),
					major,
					minor,
					busnum,
					devnum,
					devname,
				)
				if err != nil {
					shared.LogError("error reading usb device", log.Ctx{"err": err, "path": props["PHYSDEVPATH"]})
					continue
				}

				chUSB <- usb
			}

		}
	}(chCPU, chNetwork, chUSB)

	return chCPU, chNetwork, chUSB, nil
}
Ejemplo n.º 20
0
func (s *storageZfs) MigrationSink(live bool, container container, snapshots []*Snapshot, conn *websocket.Conn, srcIdmap *shared.IdmapSet, op *operation) error {
	zfsRecv := func(zfsName string, writeWrapper func(io.WriteCloser) io.WriteCloser) error {
		zfsFsName := fmt.Sprintf("%s/%s", s.zfsPool, zfsName)
		args := []string{"receive", "-F", "-u", zfsFsName}
		cmd := exec.Command("zfs", args...)

		stdin, err := cmd.StdinPipe()
		if err != nil {
			return err
		}

		stderr, err := cmd.StderrPipe()
		if err != nil {
			return err
		}

		if err := cmd.Start(); err != nil {
			return err
		}

		writePipe := io.WriteCloser(stdin)
		if writeWrapper != nil {
			writePipe = writeWrapper(stdin)
		}

		<-shared.WebsocketRecvStream(writePipe, conn)

		output, err := ioutil.ReadAll(stderr)
		if err != nil {
			shared.LogDebug("problem reading zfs recv stderr %s", log.Ctx{"err": err})
		}

		err = cmd.Wait()
		if err != nil {
			shared.LogError("problem with zfs recv", log.Ctx{"output": string(output)})
		}
		return err
	}

	/* In some versions of zfs we can write `zfs recv -F` to mounted
	 * filesystems, and in some versions we can't. So, let's always unmount
	 * this fs (it's empty anyway) before we zfs recv. N.B. that `zfs recv`
	 * of a snapshot also needs tha actual fs that it has snapshotted
	 * unmounted, so we do this before receiving anything.
	 */
	zfsName := fmt.Sprintf("containers/%s", container.Name())
	err := s.zfsUnmount(zfsName)
	if err != nil {
		return err
	}

	for _, snap := range snapshots {
		args := snapshotProtobufToContainerArgs(container.Name(), snap)
		_, err := containerCreateEmptySnapshot(container.Daemon(), args)
		if err != nil {
			return err
		}

		wrapper := StorageProgressWriter(op, "fs_progress", snap.GetName())
		name := fmt.Sprintf("containers/%s@snapshot-%s", container.Name(), snap.GetName())
		if err := zfsRecv(name, wrapper); err != nil {
			return err
		}

		err = os.MkdirAll(shared.VarPath(fmt.Sprintf("snapshots/%s", container.Name())), 0700)
		if err != nil {
			return err
		}

		err = os.Symlink("on-zfs", shared.VarPath(fmt.Sprintf("snapshots/%s/%s.zfs", container.Name(), snap.GetName())))
		if err != nil {
			return err
		}
	}

	defer func() {
		/* clean up our migration-send snapshots that we got from recv. */
		zfsSnapshots, err := s.zfsListSnapshots(fmt.Sprintf("containers/%s", container.Name()))
		if err != nil {
			shared.LogError("failed listing snapshots post migration", log.Ctx{"err": err})
			return
		}

		for _, snap := range zfsSnapshots {
			// If we received a bunch of snapshots, remove the migration-send-* ones, if not, wipe any snapshot we got
			if snapshots != nil && len(snapshots) > 0 && !strings.HasPrefix(snap, "migration-send") {
				continue
			}

			s.zfsSnapshotDestroy(fmt.Sprintf("containers/%s", container.Name()), snap)
		}
	}()

	/* finally, do the real container */
	wrapper := StorageProgressWriter(op, "fs_progress", container.Name())
	if err := zfsRecv(zfsName, wrapper); err != nil {
		return err
	}

	if live {
		/* and again for the post-running snapshot if this was a live migration */
		wrapper := StorageProgressWriter(op, "fs_progress", container.Name())
		if err := zfsRecv(zfsName, wrapper); err != nil {
			return err
		}
	}

	/* Sometimes, zfs recv mounts this anyway, even if we pass -u
	 * (https://forums.freebsd.org/threads/zfs-receive-u-shouldnt-mount-received-filesystem-right.36844/)
	 * but sometimes it doesn't. Let's try to mount, but not complain about
	 * failure.
	 */
	s.zfsMount(zfsName)
	return nil
}
Ejemplo n.º 21
0
Archivo: images.go Proyecto: vahe/lxd
func autoUpdateImages(d *Daemon) {
	shared.LogInfof("Updating images")

	images, err := dbImagesGet(d.db, false)
	if err != nil {
		shared.LogError("Unable to retrieve the list of images", log.Ctx{"err": err})
		return
	}

	for _, fp := range images {
		id, info, err := dbImageGet(d.db, fp, false, true)
		if err != nil {
			shared.LogError("Error loading image", log.Ctx{"err": err, "fp": fp})
			continue
		}

		if !info.AutoUpdate {
			continue
		}

		_, source, err := dbImageSourceGet(d.db, id)
		if err != nil {
			continue
		}

		shared.LogDebug("Processing image", log.Ctx{"fp": fp, "server": source.Server, "protocol": source.Protocol, "alias": source.Alias})

		hash, err := d.ImageDownload(nil, source.Server, source.Protocol, "", "", source.Alias, false, true)
		if hash == fp {
			shared.LogDebug("Already up to date", log.Ctx{"fp": fp})
			continue
		} else if err != nil {
			shared.LogError("Failed to update the image", log.Ctx{"err": err, "fp": fp})
			continue
		}

		newId, _, err := dbImageGet(d.db, hash, false, true)
		if err != nil {
			shared.LogError("Error loading image", log.Ctx{"err": err, "fp": hash})
			continue
		}

		err = dbImageLastAccessUpdate(d.db, hash, info.LastUsedDate)
		if err != nil {
			shared.LogError("Error setting last use date", log.Ctx{"err": err, "fp": hash})
			continue
		}

		err = dbImageAliasesMove(d.db, id, newId)
		if err != nil {
			shared.LogError("Error moving aliases", log.Ctx{"err": err, "fp": hash})
			continue
		}

		err = doDeleteImage(d, fp)
		if err != nil {
			shared.LogError("Error deleting image", log.Ctx{"err": err, "fp": fp})
		}
	}

	shared.LogInfof("Done updating images")
}