示例#1
0
文件: client.go 项目: djibi2/lxd
// Init creates a container from either a fingerprint or an alias; you must
// provide at least one.
func (c *Client) Init(name string, imgremote string, image string, profiles *[]string, config map[string]string, ephem bool) (*Response, error) {
	var tmpremote *Client
	var err error

	serverStatus, err := c.ServerStatus()
	if err != nil {
		return nil, err
	}
	architectures := serverStatus.Environment.Architectures

	source := shared.Jmap{"type": "image"}

	if image == "" {
		return nil, fmt.Errorf(i18n.G("You must provide an image hash or alias name."))
	}

	if imgremote != c.Name {
		source["type"] = "image"
		source["mode"] = "pull"
		tmpremote, err = NewClient(&c.Config, imgremote)
		if err != nil {
			return nil, err
		}

		fingerprint := tmpremote.GetAlias(image)
		if fingerprint == "" {
			fingerprint = image
		}

		imageinfo, err := tmpremote.GetImageInfo(fingerprint)
		if err != nil {
			return nil, err
		}

		if len(architectures) != 0 && !shared.IntInSlice(imageinfo.Architecture, architectures) {
			return nil, fmt.Errorf(i18n.G("The image architecture is incompatible with the target server"))
		}

		// FIXME: InterfaceToBool is there for backward compatibility
		if !shared.InterfaceToBool(imageinfo.Public) {
			var secret string

			resp, err := tmpremote.post("images/"+fingerprint+"/secret", nil, Async)
			if err != nil {
				return nil, err
			}

			op, err := resp.MetadataAsOperation()
			if err == nil && op.Metadata != nil {
				secret, err = op.Metadata.GetString("secret")
				if err != nil {
					return nil, err
				}
			} else {
				// FIXME: This is a backward compatibility codepath
				md := secretMd{}
				if err := json.Unmarshal(resp.Metadata, &md); err != nil {
					return nil, err
				}

				secret = md.Secret
			}

			source["secret"] = secret
		}

		source["server"] = tmpremote.BaseURL
		source["fingerprint"] = fingerprint
	} else {
		fingerprint := c.GetAlias(image)
		if fingerprint == "" {
			fingerprint = image
		}

		imageinfo, err := c.GetImageInfo(fingerprint)
		if err != nil {
			return nil, fmt.Errorf(i18n.G("can't get info for image '%s': %s"), image, err)
		}

		if len(architectures) != 0 && !shared.IntInSlice(imageinfo.Architecture, architectures) {
			return nil, fmt.Errorf(i18n.G("The image architecture is incompatible with the target server"))
		}
		source["fingerprint"] = fingerprint
	}

	body := shared.Jmap{"source": source}

	if name != "" {
		body["name"] = name
	}

	if profiles != nil {
		body["profiles"] = *profiles
	}

	if config != nil {
		body["config"] = config
	}

	if ephem {
		body["ephemeral"] = ephem
	}

	var resp *Response

	if imgremote != c.Name {
		var addresses []string
		addresses, err = tmpremote.Addresses()
		if err != nil {
			return nil, err
		}

		for _, addr := range addresses {
			body["source"].(shared.Jmap)["server"] = "https://" + addr

			resp, err = c.post("containers", body, Async)
			if err != nil {
				continue
			}

			break
		}
	} else {
		resp, err = c.post("containers", body, Async)
	}

	if err != nil {
		if LXDErrors[http.StatusNotFound] == err {
			return nil, fmt.Errorf("image doesn't exist")
		}
		return nil, err
	}

	return resp, nil
}
示例#2
0
文件: client.go 项目: Malizor/lxd
// Init creates a container from either a fingerprint or an alias; you must
// provide at least one.
func (c *Client) Init(name string, imgremote string, image string, profiles *[]string, config map[string]string, ephem bool) (*Response, error) {
	var operation string
	var tmpremote *Client
	var err error

	serverStatus, err := c.ServerStatus()
	if err != nil {
		return nil, err
	}
	architectures := serverStatus.Environment.Architectures

	source := shared.Jmap{"type": "image"}

	if image == "" {
		return nil, fmt.Errorf(gettext.Gettext("You must provide an image hash or alias name."))
	}

	if imgremote != c.name {
		source["type"] = "image"
		source["mode"] = "pull"
		tmpremote, err = NewClient(&c.config, imgremote)
		if err != nil {
			return nil, err
		}

		fingerprint := tmpremote.GetAlias(image)
		if fingerprint == "" {
			fingerprint = image
		}

		imageinfo, err := tmpremote.GetImageInfo(fingerprint)
		if err != nil {
			return nil, err
		}

		if len(architectures) != 0 && !shared.IntInSlice(imageinfo.Architecture, architectures) {
			return nil, fmt.Errorf(gettext.Gettext("The image architecture is incompatible with the target server"))
		}

		if !shared.InterfaceToBool(imageinfo.Public) {
			resp, err := tmpremote.post("images/"+fingerprint+"/secret", nil, Async)
			if err != nil {
				return nil, err
			}

			toScan := strings.Replace(resp.Operation, "/", " ", -1)
			version := ""
			count, err := fmt.Sscanf(toScan, " %s operations %s", &version, &operation)
			if err != nil || count != 2 {
				return nil, err
			}

			md := secretMd{}
			if err := json.Unmarshal(resp.Metadata, &md); err != nil {
				return nil, err
			}

			source["secret"] = md.Secret
		}

		source["server"] = tmpremote.BaseURL
		source["fingerprint"] = fingerprint
	} else {
		fingerprint := c.GetAlias(image)
		if fingerprint == "" {
			fingerprint = image
		}

		imageinfo, err := c.GetImageInfo(fingerprint)
		if err != nil {
			return nil, fmt.Errorf(gettext.Gettext("can't get info for image '%s': %s"), image, err)
		}

		if len(architectures) != 0 && !shared.IntInSlice(imageinfo.Architecture, architectures) {
			return nil, fmt.Errorf(gettext.Gettext("The image architecture is incompatible with the target server"))
		}
		source["fingerprint"] = fingerprint
	}

	body := shared.Jmap{"source": source}

	if name != "" {
		body["name"] = name
	}

	if profiles != nil {
		body["profiles"] = *profiles
	}

	if config != nil {
		body["config"] = config
	}

	if ephem {
		body["ephemeral"] = ephem
	}

	var resp *Response

	if imgremote != c.name {
		var addresses []string
		addresses, err = tmpremote.Addresses()
		if err != nil {
			return nil, err
		}

		for _, addr := range addresses {
			body["source"].(shared.Jmap)["server"] = "https://" + addr

			resp, err = c.post("containers", body, Async)
			if err != nil {
				continue
			}

			break
		}
	} else {
		resp, err = c.post("containers", body, Async)
	}

	if operation != "" {
		_, _ = tmpremote.delete("operations/"+operation, nil, Sync)
	}

	if err != nil {
		if LXDErrors[http.StatusNotFound] == err {
			return nil, fmt.Errorf("image doesn't exist")
		}
		return nil, err
	}

	return resp, nil
}
示例#3
0
文件: client.go 项目: timwukp/lxd
// Init creates a container from either a fingerprint or an alias; you must
// provide at least one.
func (c *Client) Init(name string, imgremote string, image string, profiles *[]string, ephem bool) (*Response, error) {
	var operation string
	var tmpremote *Client
	var err error

	serverStatus, err := c.ServerStatus()
	if err != nil {
		return nil, err
	}
	architectures := serverStatus.Environment.Architectures

	source := shared.Jmap{"type": "image"}

	if imgremote != "" {
		source["type"] = "image"
		source["mode"] = "pull"
		tmpremote, err = NewClient(&c.config, imgremote)
		if err != nil {
			return nil, err
		}

		fingerprint := tmpremote.GetAlias(image)
		if fingerprint == "" {
			fingerprint = image
		}

		imageinfo, err := tmpremote.GetImageInfo(fingerprint)
		if err != nil {
			return nil, err
		}

		if !shared.IntInSlice(imageinfo.Architecture, architectures) {
			return nil, fmt.Errorf(gettext.Gettext("The image architecture is incompatible with the target server"))
		}

		if imageinfo.Public == 0 {
			resp, err := tmpremote.post("images/"+fingerprint+"/secret", nil, Async)
			if err != nil {
				return nil, err
			}

			toScan := strings.Replace(resp.Operation, "/", " ", -1)
			version := ""
			count, err := fmt.Sscanf(toScan, " %s operations %s", &version, &operation)
			if err != nil || count != 2 {
				return nil, err
			}

			md := secretMd{}
			if err := json.Unmarshal(resp.Metadata, &md); err != nil {
				return nil, err
			}

			source["secret"] = md.Secret
		}

		source["server"] = tmpremote.BaseURL
		source["fingerprint"] = fingerprint
	} else {
		fingerprint := c.GetAlias(image)
		if fingerprint == "" {
			fingerprint = image
		}

		imageinfo, err := c.GetImageInfo(fingerprint)
		if err != nil {
			return nil, err
		}

		if !shared.IntInSlice(imageinfo.Architecture, architectures) {
			return nil, fmt.Errorf(gettext.Gettext("The image architecture is incompatible with the target server"))
		}
		source["fingerprint"] = fingerprint
	}

	body := shared.Jmap{"source": source}

	if name != "" {
		body["name"] = name
	}

	if profiles != nil {
		body["profiles"] = *profiles
	}

	if ephem {
		body["ephemeral"] = ephem
	}

	resp, err := c.post("containers", body, Async)

	if operation != "" {
		_, _ = tmpremote.delete("operations/"+operation, nil, Sync)
	}

	if err != nil {
		if LXDErrors[http.StatusNotFound] == err {
			return nil, fmt.Errorf("image doesn't exist")
		}
		return nil, err
	}

	return resp, nil
}
示例#4
0
文件: devices.go 项目: imoapps/lxd
func deviceTaskBalance(d *Daemon) {
	min := func(x, y int) int {
		if x < y {
			return x
		}
		return y
	}

	// Don't bother running when CGroup support isn't there
	if !cgCpusetController {
		return
	}

	// Count CPUs
	cpus := []int{}
	dents, err := ioutil.ReadDir("/sys/bus/cpu/devices/")
	if err != nil {
		shared.Log.Error("balance: Unable to list CPUs", log.Ctx{"err": err})
		return
	}

	for _, f := range dents {
		id := -1
		count, err := fmt.Sscanf(f.Name(), "cpu%d", &id)
		if count != 1 || id == -1 {
			shared.Log.Error("balance: Bad CPU", log.Ctx{"path": f.Name()})
			continue
		}

		onlinePath := fmt.Sprintf("/sys/bus/cpu/devices/%s/online", f.Name())
		if !shared.PathExists(onlinePath) {
			// CPUs without an online file are non-hotplug so are always online
			cpus = append(cpus, id)
			continue
		}

		online, err := ioutil.ReadFile(onlinePath)
		if err != nil {
			shared.Log.Error("balance: Bad CPU", log.Ctx{"path": f.Name(), "err": err})
			continue
		}

		if online[0] == byte('0') {
			continue
		}

		cpus = append(cpus, id)
	}

	// Iterate through the containers
	containers, err := dbContainersList(d.db, cTypeRegular)
	fixedContainers := map[int][]container{}
	balancedContainers := map[container]int{}
	for _, name := range containers {
		c, err := containerLoadByName(d, name)
		if err != nil {
			continue
		}

		conf := c.ExpandedConfig()
		cpu, ok := conf["limits.cpu"]
		if !ok || cpu == "" {
			currentCPUs, err := deviceGetCurrentCPUs()
			if err != nil {
				shared.Debugf("Couldn't get current CPU list: %s", err)
				cpu = fmt.Sprintf("%d", len(cpus))
			} else {
				cpu = currentCPUs
			}
		}

		if !c.IsRunning() {
			continue
		}

		count, err := strconv.Atoi(cpu)
		if err == nil {
			// Load-balance
			count = min(count, len(cpus))
			balancedContainers[c] = count
		} else {
			// Pinned
			chunks := strings.Split(cpu, ",")
			for _, chunk := range chunks {
				if strings.Contains(chunk, "-") {
					// Range
					fields := strings.SplitN(chunk, "-", 2)
					if len(fields) != 2 {
						shared.Log.Error("Invalid limits.cpu value.", log.Ctx{"container": c.Name(), "value": cpu})
						continue
					}

					low, err := strconv.Atoi(fields[0])
					if err != nil {
						shared.Log.Error("Invalid limits.cpu value.", log.Ctx{"container": c.Name(), "value": cpu})
						continue
					}

					high, err := strconv.Atoi(fields[1])
					if err != nil {
						shared.Log.Error("Invalid limits.cpu value.", log.Ctx{"container": c.Name(), "value": cpu})
						continue
					}

					for i := low; i <= high; i++ {
						if !shared.IntInSlice(i, cpus) {
							continue
						}

						_, ok := fixedContainers[i]
						if ok {
							fixedContainers[i] = append(fixedContainers[i], c)
						} else {
							fixedContainers[i] = []container{c}
						}
					}
				} else {
					// Simple entry
					nr, err := strconv.Atoi(chunk)
					if err != nil {
						shared.Log.Error("Invalid limits.cpu value.", log.Ctx{"container": c.Name(), "value": cpu})
						continue
					}

					if !shared.IntInSlice(nr, cpus) {
						continue
					}

					_, ok := fixedContainers[nr]
					if ok {
						fixedContainers[nr] = append(fixedContainers[nr], c)
					} else {
						fixedContainers[nr] = []container{c}
					}
				}
			}
		}
	}

	// Balance things
	pinning := map[container][]string{}
	usage := make(deviceTaskCPUs, 0)

	for _, id := range cpus {
		cpu := deviceTaskCPU{}
		cpu.id = id
		cpu.strId = fmt.Sprintf("%d", id)
		count := 0
		cpu.count = &count

		usage = append(usage, cpu)
	}

	for cpu, ctns := range fixedContainers {
		id := usage[cpu].strId
		for _, ctn := range ctns {
			_, ok := pinning[ctn]
			if ok {
				pinning[ctn] = append(pinning[ctn], id)
			} else {
				pinning[ctn] = []string{id}
			}
			*usage[cpu].count += 1
		}
	}

	for ctn, count := range balancedContainers {
		sort.Sort(usage)
		for _, cpu := range usage {
			if count == 0 {
				break
			}
			count -= 1

			id := cpu.strId
			_, ok := pinning[ctn]
			if ok {
				pinning[ctn] = append(pinning[ctn], id)
			} else {
				pinning[ctn] = []string{id}
			}
			*cpu.count += 1
		}
	}

	// Set the new pinning
	for ctn, set := range pinning {
		// Confirm the container didn't just stop
		if !ctn.IsRunning() {
			continue
		}

		sort.Strings(set)
		err := ctn.CGroupSet("cpuset.cpus", strings.Join(set, ","))
		if err != nil {
			shared.Log.Error("balance: Unable to set cpuset", log.Ctx{"name": ctn.Name(), "err": err, "value": strings.Join(set, ",")})
		}
	}
}
示例#5
0
文件: devices.go 项目: vahe/lxd
func deviceTaskBalance(d *Daemon) {
	min := func(x, y int) int {
		if x < y {
			return x
		}
		return y
	}

	// Don't bother running when CGroup support isn't there
	if !cgCpusetController {
		return
	}

	// Get effective cpus list - those are all guaranteed to be online
	effectiveCpus, err := cGroupGet("cpuset", "/", "cpuset.effective_cpus")
	if err != nil {
		// Older kernel - use cpuset.cpus
		effectiveCpus, err = cGroupGet("cpuset", "/", "cpuset.cpus")
		if err != nil {
			shared.LogErrorf("Error reading host's cpuset.cpus")
			return
		}
	}
	err = cGroupSet("cpuset", "/lxc", "cpuset.cpus", effectiveCpus)
	if err != nil && shared.PathExists("/sys/fs/cgroup/cpuset/lxc") {
		shared.LogWarn("Error setting lxd's cpuset.cpus", log.Ctx{"err": err})
	}
	cpus, err := parseCpuset(effectiveCpus)
	if err != nil {
		shared.LogError("Error parsing host's cpu set", log.Ctx{"cpuset": effectiveCpus, "err": err})
		return
	}

	// Iterate through the containers
	containers, err := dbContainersList(d.db, cTypeRegular)
	if err != nil {
		shared.LogError("problem loading containers list", log.Ctx{"err": err})
		return
	}
	fixedContainers := map[int][]container{}
	balancedContainers := map[container]int{}
	for _, name := range containers {
		c, err := containerLoadByName(d, name)
		if err != nil {
			continue
		}

		conf := c.ExpandedConfig()
		cpulimit, ok := conf["limits.cpu"]
		if !ok || cpulimit == "" {
			cpulimit = effectiveCpus
		}

		if !c.IsRunning() {
			continue
		}

		count, err := strconv.Atoi(cpulimit)
		if err == nil {
			// Load-balance
			count = min(count, len(cpus))
			balancedContainers[c] = count
		} else {
			// Pinned
			containerCpus, err := parseCpuset(cpulimit)
			if err != nil {
				return
			}
			for _, nr := range containerCpus {
				if !shared.IntInSlice(nr, cpus) {
					continue
				}

				_, ok := fixedContainers[nr]
				if ok {
					fixedContainers[nr] = append(fixedContainers[nr], c)
				} else {
					fixedContainers[nr] = []container{c}
				}
			}
		}
	}

	// Balance things
	pinning := map[container][]string{}
	usage := map[int]deviceTaskCPU{}

	for _, id := range cpus {
		cpu := deviceTaskCPU{}
		cpu.id = id
		cpu.strId = fmt.Sprintf("%d", id)
		count := 0
		cpu.count = &count

		usage[id] = cpu
	}

	for cpu, ctns := range fixedContainers {
		c, ok := usage[cpu]
		if !ok {
			shared.LogErrorf("Internal error: container using unavailable cpu")
			continue
		}
		id := c.strId
		for _, ctn := range ctns {
			_, ok := pinning[ctn]
			if ok {
				pinning[ctn] = append(pinning[ctn], id)
			} else {
				pinning[ctn] = []string{id}
			}
			*c.count += 1
		}
	}

	sortedUsage := make(deviceTaskCPUs, 0)
	for _, value := range usage {
		sortedUsage = append(sortedUsage, value)
	}

	for ctn, count := range balancedContainers {
		sort.Sort(sortedUsage)
		for _, cpu := range sortedUsage {
			if count == 0 {
				break
			}
			count -= 1

			id := cpu.strId
			_, ok := pinning[ctn]
			if ok {
				pinning[ctn] = append(pinning[ctn], id)
			} else {
				pinning[ctn] = []string{id}
			}
			*cpu.count += 1
		}
	}

	// Set the new pinning
	for ctn, set := range pinning {
		// Confirm the container didn't just stop
		if !ctn.IsRunning() {
			continue
		}

		sort.Strings(set)
		err := ctn.CGroupSet("cpuset.cpus", strings.Join(set, ","))
		if err != nil {
			shared.LogError("balance: Unable to set cpuset", log.Ctx{"name": ctn.Name(), "err": err, "value": strings.Join(set, ",")})
		}
	}
}