func (c *Cluster) createContainer(config *cluster.ContainerConfig, name string, withSoftImageAffinity bool) (*cluster.Container, error) { c.scheduler.Lock() defer c.scheduler.Unlock() // Ensure the name is available if cID := c.getIDFromName(name); cID != "" { return nil, fmt.Errorf("Conflict, The name %s is already assigned to %s. You have to delete (or rename) that container to be able to assign %s to a container again.", name, cID, name) } // Associate a Swarm ID to the container we are creating. config.SetSwarmID(c.generateUniqueID()) configTemp := config if withSoftImageAffinity { configTemp.AddAffinity("image==~" + config.Image) } n, err := c.scheduler.SelectNodeForContainer(c.listNodes(), configTemp) if err != nil { return nil, err } if nn, ok := c.engines[n.ID]; ok { container, err := nn.Create(config, name, true) return container, err } return nil, nil }
// CreateContainer aka schedule a brand new container into the cluster. func (c *Cluster) CreateContainer(config *cluster.ContainerConfig, name string) (*cluster.Container, error) { c.scheduler.Lock() defer c.scheduler.Unlock() // Ensure the name is avaliable if cID := c.getIDFromName(name); cID != "" { return nil, fmt.Errorf("Conflict, The name %s is already assigned to %s. You have to delete (or rename) that container to be able to assign %s to a container again.", name, cID, name) } // Associate a Swarm ID to the container we are creating. config.SetSwarmID(c.generateUniqueID()) n, err := c.scheduler.SelectNodeForContainer(c.listNodes(), config) if err != nil { return nil, err } if nn, ok := c.engines[n.ID]; ok { container, err := nn.Create(config, name, true) if err != nil { return nil, err } st := &state.RequestedState{ ID: container.Id, Name: name, Config: config, } return container, c.store.Add(container.Id, st) } return nil, nil }
// Filter is exported func (f *ConstraintFilter) Filter(config *cluster.ContainerConfig, nodes []*node.Node) ([]*node.Node, error) { constraints, err := parseExprs(config.Constraints()) if err != nil { return nil, err } for _, constraint := range constraints { log.Debugf("matching constraint: %s %s %s", constraint.key, OPERATORS[constraint.operator], constraint.value) candidates := []*node.Node{} for _, node := range nodes { switch constraint.key { case "node": // "node" label is a special case pinning a container to a specific node. if constraint.Match(node.ID, node.Name) { candidates = append(candidates, node) } default: if constraint.Match(node.Labels[constraint.key]) { candidates = append(candidates, node) } } } if len(candidates) == 0 { if constraint.isSoft { return nodes, nil } return nil, fmt.Errorf("unable to find a node that satisfies %s%s%s", constraint.key, OPERATORS[constraint.operator], constraint.value) } nodes = candidates } return nodes, nil }
// CreateContainer aka schedule a brand new container into the cluster. func (c *Cluster) CreateContainer(config *cluster.ContainerConfig, name string, authConfig *types.AuthConfig) (*cluster.Container, error) { poolUUID := os.Getenv("CX_POOL") log.Debug("CX: Adding label pool=" + poolUUID) config.Labels["pool"] = poolUUID container, err := c.createContainer(config, name, false, authConfig) if err != nil { var retries int64 // fails with image not found, then try to reschedule with image affinity // ENGINEAPIFIXME: The first error can be removed once dockerclient is removed bImageNotFoundError, _ := regexp.MatchString(`image \S* not found`, err.Error()) if (bImageNotFoundError || client.IsErrImageNotFound(err)) && !config.HaveNodeConstraint() { // Check if the image exists in the cluster // If exists, retry with an image affinity if c.Image(config.Image) != nil { container, err = c.createContainer(config, name, true, authConfig) retries++ } } for ; retries < c.createRetry && err != nil; retries++ { log.WithFields(log.Fields{"Name": "Swarm"}).Warnf("Failed to create container: %s, retrying", err) container, err = c.createContainer(config, name, false, authConfig) } } return container, err }
func (c *Cluster) createContainer(config *cluster.ContainerConfig, name string, withSoftImageAffinity bool) (*cluster.Container, error) { c.scheduler.Lock() defer c.scheduler.Unlock() // Ensure the name is available if cID := c.getIDFromName(name); cID != "" { return nil, fmt.Errorf("Conflict, The name %s is already assigned to %s. You have to delete (or rename) that container to be able to assign %s to a container again.", name, cID, name) } // Associate a Swarm ID to the container we are creating. config.SetSwarmID(c.generateUniqueID()) configTemp := config if withSoftImageAffinity { configTemp.AddAffinity("image==~" + config.Image) } n, err := c.scheduler.SelectNodeForContainer(c.listNodes(), configTemp) if err != nil { switch err { case strategy.ErrNoResourcesAvailable: var masterEngine *cluster.Engine for _, engine := range c.engines { for k, v := range engine.Labels { if k == "swarmmaster" && v == "true" { masterEngine = engine } } } containerConfig := &cluster.ContainerConfig{ dockerclient.ContainerConfig{ Image: "ankushagarwal11/machine", Cmd: []string{"-D", "create", "--driver=amazonec2", "--amazonec2-instance-type", "t2.micro", "--amazonec2-secret-key", "INSERT_SECRET_KEY", "--amazonec2-access-key", "INSERT_ACCESS_KEY", "--amazonec2-vpc-id", "INSERT_VPC_ID", "randommachine1"}, HostConfig: dockerclient.HostConfig{ Binds: []string{"/root/.docker:/root/.docker"}, }, }, } container, _ := masterEngine.Create(containerConfig, "random123", false) log.Info("Created container") masterEngine.Client.StartContainer(container.Id, &dockerclient.HostConfig{}) log.Info("Started container and now waiting") <-masterEngine.Client.Wait(container.Id) log.Info("Done waiting") return nil, err default: return nil, err } } if nn, ok := c.engines[n.ID]; ok { container, err := nn.Create(config, name, true) return container, err } return nil, nil }
// Filter is exported func (f *AffinityFilter) Filter(config *cluster.ContainerConfig, nodes []*node.Node, soft bool) ([]*node.Node, error) { affinities, err := parseExprs(config.Affinities()) if err != nil { return nil, err } for _, affinity := range affinities { if !soft && affinity.isSoft { continue } log.Debugf("matching affinity: %s%s%s (soft=%t)", affinity.key, OPERATORS[affinity.operator], affinity.value, affinity.isSoft) candidates := []*node.Node{} for _, node := range nodes { switch affinity.key { case "container": containers := []string{} for _, container := range node.Containers { if len(container.Names) > 0 { containers = append(containers, container.ID, strings.TrimPrefix(container.Names[0], "/")) } } if affinity.Match(containers...) { candidates = append(candidates, node) } case "image": images := []string{} for _, image := range node.Images { images = append(images, image.ID) images = append(images, image.RepoTags...) for _, tag := range image.RepoTags { repo, _ := cluster.ParseRepositoryTag(tag) images = append(images, repo) } } if affinity.Match(images...) { candidates = append(candidates, node) } default: labels := []string{} for _, container := range node.Containers { labels = append(labels, container.Labels[affinity.key]) } if affinity.Match(labels...) { candidates = append(candidates, node) } } } if len(candidates) == 0 { return nil, fmt.Errorf("unable to find a node that satisfies the affinity %s%s%s", affinity.key, OPERATORS[affinity.operator], affinity.value) } nodes = candidates } return nodes, nil }
// GetFilters returns a list of the affinities found in the container config. func (f *AffinityFilter) GetFilters(config *cluster.ContainerConfig) ([]string, error) { allAffinities := []string{} affinities, err := parseExprs(config.Affinities()) if err != nil { return nil, err } for _, affinity := range affinities { allAffinities = append(allAffinities, fmt.Sprintf("%s%s%s (soft=%t)", affinity.key, OPERATORS[affinity.operator], affinity.value, affinity.isSoft)) } return allAffinities, nil }
// GetFilters returns a list of the constraints found in the container config. func (f *ConstraintFilter) GetFilters(config *cluster.ContainerConfig) ([]string, error) { allConstraints := []string{} constraints, err := parseExprs(config.Constraints()) if err != nil { return nil, err } for _, constraint := range constraints { allConstraints = append(allConstraints, fmt.Sprintf("%s%s%s", constraint.key, OPERATORS[constraint.operator], constraint.value)) } return allConstraints, nil }
// CreateContainer aka schedule a brand new container into the cluster. func (c *Cluster) CreateContainer(config *cluster.ContainerConfig, name string) (*cluster.Container, error) { container, err := c.createContainer(config, name, false) // fails with image not found, then try to reschedule with soft-image-affinity if err != nil && strings.HasSuffix(err.Error(), "not found") && !config.HaveNodeConstraint() { // Check if the image exists in the cluster // If exists, retry with a soft-image-affinity if image := c.Image(config.Image); image != nil { container, err = c.createContainer(config, name, true) } } return container, err }
// CreateContainer aka schedule a brand new container into the cluster. func (c *Cluster) CreateContainer(config *cluster.ContainerConfig, name string, authConfig *dockerclient.AuthConfig) (*cluster.Container, error) { container, err := c.createContainer(config, name, false, authConfig) // fails with image not found, then try to reschedule with soft-image-affinity if err != nil { bImageNotFoundError, _ := regexp.MatchString(`image \S* not found`, err.Error()) if bImageNotFoundError && !config.HaveNodeConstraint() { // Check if the image exists in the cluster // If exists, retry with a soft-image-affinity if image := c.Image(config.Image); image != nil { container, err = c.createContainer(config, name, true, authConfig) } } } return container, err }
func (c *Cluster) createContainer(config *cluster.ContainerConfig, name string, withSoftImageAffinity bool, authConfig *dockerclient.AuthConfig) (*cluster.Container, error) { c.scheduler.Lock() // Ensure the name is available if !c.checkNameUniqueness(name) { c.scheduler.Unlock() return nil, fmt.Errorf("Conflict: The name %s is already assigned. You have to delete (or rename) that container to be able to assign %s to a container again.", name, name) } // Associate a Swarm ID to the container we are creating. swarmID := c.generateUniqueID() config.SetSwarmID(swarmID) configTemp := config if withSoftImageAffinity { configTemp.AddAffinity("image==~" + config.Image) } nodes, err := c.scheduler.SelectNodesForContainer(c.listNodes(), configTemp) if err != nil { c.scheduler.Unlock() return nil, err } n := nodes[0] engine, ok := c.engines[n.ID] if !ok { c.scheduler.Unlock() return nil, fmt.Errorf("error creating container") } c.pendingContainers[swarmID] = &pendingContainer{ Name: name, Config: config, Engine: engine, } c.scheduler.Unlock() container, err := engine.Create(config, name, true, authConfig) c.scheduler.Lock() delete(c.pendingContainers, swarmID) c.scheduler.Unlock() return container, err }
func TestCreateContainer(t *testing.T) { // create mock client config := new(cluster.ContainerConfig) config.Labels = make(map[string]string) config.Labels["upm.ip"] = "192.168.11.124/24:enp0s25" dockinfo := new(dockerclient.ContainerInfo) dockinfo.Id = "123456789" dockinfo.Config = &dockerclient.ContainerConfig{ Labels: map[string]string{ "upm.ip": "192.168.11.124/24:enp0s25", }, } client := mockclient.NewMockClient() client.On("Info").Return(mockInfo, nil) client.On("Version").Return(mockVersion, nil) client.On("StartMonitorEvents", mock.Anything, mock.Anything, mock.Anything).Return() client.On("ListContainers", true, false, "").Return([]dockerclient.Container{}, nil).Once() client.On("ListImages", mock.Anything).Return([]*dockerclient.Image{}, nil) client.On("ListVolumes", mock.Anything).Return([]*dockerclient.Volume{}, nil) client.On("ListNetworks", mock.Anything).Return([]*dockerclient.NetworkResource{}, nil) client.On("ListVolumes", mock.Anything).Return([]*dockerclient.Volume{}, nil) client.On("CreateContainer", mock.Anything, mock.Anything).Return("123456789", nil) client.On("RemoveContainer", mock.AnythingOfType("string"), true, true).Return(nil) client.On("InspectContainer", "123456789").Return(dockinfo, nil) // create a cluster from mock client c := simpleCluster(client) ca, err := c.CreateContainer(config, "lee.test") assert.Nil(t, err) if err != nil { return } assert.Equal(t, ca.Id, "123456789") err = c.RemoveContainer(ca, true, true) assert.Nil(t, err) }
// CreateContainer aka schedule a brand new container into the cluster. func (c *Cluster) CreateContainer(config *cluster.ContainerConfig, name string, authConfig *dockerclient.AuthConfig) (*cluster.Container, error) { container, err := c.createContainer(config, name, false, authConfig) if err != nil { var retries int64 // fails with image not found, then try to reschedule with image affinity bImageNotFoundError, _ := regexp.MatchString(`image \S* not found`, err.Error()) if bImageNotFoundError && !config.HaveNodeConstraint() { // Check if the image exists in the cluster // If exists, retry with a image affinity if c.Image(config.Image) != nil { container, err = c.createContainer(config, name, true, authConfig) retries++ } } for ; retries < c.createRetry && err != nil; retries++ { log.WithFields(log.Fields{"Name": "Swarm"}).Warnf("Failed to create container: %s, retrying", err) container, err = c.createContainer(config, name, false, authConfig) } } return container, err }
func (c *Cluster) createContainer(config *cluster.ContainerConfig, name string, withImageAffinity bool, authConfig *dockerclient.AuthConfig) (*cluster.Container, error) { c.scheduler.Lock() // Ensure the name is available if !c.checkNameUniqueness(name) { c.scheduler.Unlock() return nil, fmt.Errorf("Conflict: The name %s is already assigned. You have to delete (or rename) that container to be able to assign %s to a container again.", name, name) } swarmID := config.SwarmID() if swarmID == "" { // Associate a Swarm ID to the container we are creating. swarmID = c.generateUniqueID() config.SetSwarmID(swarmID) } if network := c.Networks().Get(config.HostConfig.NetworkMode); network != nil && network.Scope == "local" { if !config.HaveNodeConstraint() { config.AddConstraint("node==~" + network.Engine.Name) } config.HostConfig.NetworkMode = network.Name } if withImageAffinity { config.AddAffinity("image==" + config.Image) } nodes, err := c.scheduler.SelectNodesForContainer(c.listNodes(), config) if withImageAffinity { config.RemoveAffinity("image==" + config.Image) } if err != nil { c.scheduler.Unlock() return nil, err } n := nodes[0] engine, ok := c.engines[n.ID] if !ok { c.scheduler.Unlock() return nil, fmt.Errorf("error creating container") } c.pendingContainers[swarmID] = &pendingContainer{ Name: name, Config: config, Engine: engine, } c.scheduler.Unlock() container, err := engine.Create(config, name, true, authConfig) c.scheduler.Lock() delete(c.pendingContainers, swarmID) c.scheduler.Unlock() return container, err }
// Support cross-host linking func (c *Cluster) processLinks(containerNode *node.Node, config *cluster.ContainerConfig) error { originalLinks := config.HostConfig.Links if originalLinks == nil || len(originalLinks) == 0 { return nil } containers := c.Containers() //Cache for the container info in linking cache := map[string](*dockerclient.ContainerInfo){} addr := containerNode.Addr var newLinks []string var newEnv []string var crossHostLinks []string for _, link := range originalLinks { //Parse the link info linkInfo := strings.Split(link, ":") name, alias := linkInfo[0], linkInfo[1] linkContainerName := "/" + name for _, target := range containers { if target.Info.Name == linkContainerName { if addr == target.Engine.Addr { log.Debug("No additional work for the container link on the same host") } else { //Update the link var err error targetInfo := cache[target.Id] if targetInfo == nil { targetInfo, err = target.Engine.InspectContainer(target.Id) if err != nil { log.Warningf("Failed to find the linked container %s: %v", target.Id, err) return err } cache[target.Id] = targetInfo } //Simulate link for container on other hosts ports := make(nat.PortSet) for p := range targetInfo.NetworkSettings.Ports { ports[nat.Port(p)] = struct{}{} } linkName := fmt.Sprintf("/%s/%s", name, alias) newLink, err := links.NewLink("", targetInfo.NetworkSettings.IPAddress, linkName, targetInfo.Config.Env, ports) //Add as cross-host links crossHostLinks = append(crossHostLinks, link) //Ignore this link from the host config link = "" env := newLink.ToEnv() newEnv = append(newEnv, env...) newHost := alias + ":" + targetInfo.NetworkSettings.IPAddress config.HostConfig.ExtraHosts = append(config.HostConfig.ExtraHosts, newHost) } break } } if link != "" { newLinks = append(newLinks, link) } } //Update the Links config.HostConfig.Links = newLinks //Update the Env config.Env = append(config.Env, newEnv...) //Add the Env CROSS_HOST_LINKS if crossHostLinks != nil { envCrossHostLinks := "CROSS_HOST_LINKS=" + strings.Join(crossHostLinks, ";") config.Env = append(config.Env, envCrossHostLinks) } return nil }