Exemple #1
0
func NewAd() *Ad {
	return &Ad{
		Mutex:        new(sync.Mutex),
		Title:        namesgenerator.GetRandomName(0),
		Path:         namesgenerator.GetRandomName(0),
		Impression:   0,
		ClickedUsers: []*User{},
	}
}
Exemple #2
0
func Run(c *cli.Context) {
	timeout := time.After(duration)
	a := make(chan announcement)
	defer timeTrack(time.Now(), "main", a)
	d := make(chan int)
	phils := []philosopher{}
	forks := []fork{}
	for i := PHILOS; i > 0; i-- {
		names = append(names, namesgenerator.GetRandomName(0))
	}
	log.Println(names)

	watcher(a)

	// Initialize
	for _, name := range names {
		phils = append(phils, philosopher{name: name, announce: a, dying: d})
		forks = append(forks, fork{toRight: make(chan int, 2), toLeft: make(chan int, 2)})
	}

	for i := range phils {
		dude := &phils[i]
		dude.leftIn, dude.leftOut = forks[i].toLeft, forks[i].toRight
		if i == 0 {
			dude.rightIn, dude.rightOut = forks[len(forks)-1].toRight, forks[len(forks)-1].toLeft
		} else {
			dude.rightIn, dude.rightOut = forks[i-1].toRight, forks[i-1].toLeft
		}
	}

	// Launch the dudes
	for i := range phils {
		go phils[i].Live()
	}
	//Put forks on the table
	for i, f := range forks {
		go func(i int, f fork) {
			if i%2 == 0 {
				f.toLeft <- 1
			} else {
				f.toRight <- 1
			}
		}(i, f)

	}
Wait:
	for i := 0; i < PHILOS; {
		select {
		case _ = <-d:
			i++
		case _ = <-timeout:
			a <- announcement{from: "Main", message: fmt.Sprintf("%v dudes died during the simulation, over the total of %v.", i, PHILOS)}
			break Wait
		}
	}
	summarize()
	return
}
Exemple #3
0
func keyGenerator(key *ConnKey) error {
	for i := 0; i < 10; i++ {
		key.Name = namesgenerator.GetRandomName(i)
		if _, exsit := daemon.session[*key]; exsit {
			continue
		}
		return nil
	}
	return errors.New("key generator fail")
}
Exemple #4
0
func (s *DockerSuite) startContainerWithConfig(c *check.C, image string, config d.ContainerConfig) string {
	if config.Name == "" {
		config.Name = namesgenerator.GetRandomName(10)
	}

	container := s.project.StartWithConfig(c, image, config)

	// FIXME(vdemeester) this is ugly (it's because of the / in front of the name in docker..)
	return strings.SplitAfter(container.Name, "/")[1]
}
func (s *DockerSuite) startContainerWithConfig(c *check.C, config docker.CreateContainerOptions) string {
	if config.Name == "" {
		config.Name = namesgenerator.GetRandomName(10)
	}
	if config.Config.Labels == nil {
		config.Config.Labels = map[string]string{}
	}
	config.Config.Labels[TestLabel] = "true"

	container, err := s.client.CreateContainer(config)
	c.Assert(err, checker.IsNil, check.Commentf("Error creating a container using config %v", config))

	err = s.client.StartContainer(container.ID, &docker.HostConfig{})
	c.Assert(err, checker.IsNil, check.Commentf("Error starting container %v", container))

	return container.Name
}
Exemple #6
0
func imageToName(image string) string {
	name := image
	fields := strings.Split(image, "/")
	if len(fields) > 1 {
		name = fields[len(fields)-1]
	}
	fields = strings.Split(name, ":")
	if len(fields) < 2 {
		name = name + "-" + utils.RandStr(10, "number")
	} else {
		name = fields[0] + "-" + fields[1] + "-" + utils.RandStr(10, "number")
	}

	validContainerNameChars := `[a-zA-Z0-9][a-zA-Z0-9_.-]`
	validContainerNamePattern := regexp.MustCompile(`^/?` + validContainerNameChars + `+$`)
	if !validContainerNamePattern.MatchString(name) {
		name = namesgenerator.GetRandomName(0)
	}
	return name
}
Exemple #7
0
func (daemon *Daemon) generateNewName(id string) (string, error) {
	var name string
	for i := 0; i < 6; i++ {
		name = namesgenerator.GetRandomName(i)
		if name[0] != '/' {
			name = "/" + name
		}

		if _, err := daemon.containerGraph.Set(name, id); err != nil {
			if !graphdb.IsNonUniqueNameError(err) {
				return "", err
			}
			continue
		}
		return name, nil
	}

	name = "/" + stringid.TruncateID(id)
	if _, err := daemon.containerGraph.Set(name, id); err != nil {
		return "", err
	}
	return name, nil
}
Exemple #8
0
func (daemon *Daemon) generateNewName(id string) (string, error) {
	var name string
	for i := 0; i < 6; i++ {
		name = namesgenerator.GetRandomName(i)
		if name[0] != '/' {
			name = "/" + name
		}

		if err := daemon.nameIndex.Reserve(name, id); err != nil {
			if err == registrar.ErrNameReserved {
				continue
			}
			return "", err
		}
		return name, nil
	}

	name = "/" + stringid.TruncateID(id)
	if err := daemon.nameIndex.Reserve(name, id); err != nil {
		return "", err
	}
	return name, nil
}
Exemple #9
0
// ServiceSpecToGRPC converts a ServiceSpec to a grpc ServiceSpec.
func ServiceSpecToGRPC(s types.ServiceSpec) (swarmapi.ServiceSpec, error) {
	name := s.Name
	if name == "" {
		name = namesgenerator.GetRandomName(0)
	}

	serviceNetworks := make([]*swarmapi.NetworkAttachmentConfig, 0, len(s.Networks))
	for _, n := range s.Networks {
		serviceNetworks = append(serviceNetworks, &swarmapi.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases})
	}

	taskNetworks := make([]*swarmapi.NetworkAttachmentConfig, 0, len(s.TaskTemplate.Networks))
	for _, n := range s.TaskTemplate.Networks {
		taskNetworks = append(taskNetworks, &swarmapi.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases})
	}

	spec := swarmapi.ServiceSpec{
		Annotations: swarmapi.Annotations{
			Name:   name,
			Labels: s.Labels,
		},
		Task: swarmapi.TaskSpec{
			Resources:   resourcesToGRPC(s.TaskTemplate.Resources),
			LogDriver:   driverToGRPC(s.TaskTemplate.LogDriver),
			Networks:    taskNetworks,
			ForceUpdate: s.TaskTemplate.ForceUpdate,
		},
		Networks: serviceNetworks,
	}

	containerSpec, err := containerToGRPC(s.TaskTemplate.ContainerSpec)
	if err != nil {
		return swarmapi.ServiceSpec{}, err
	}
	spec.Task.Runtime = &swarmapi.TaskSpec_Container{Container: containerSpec}

	restartPolicy, err := restartPolicyToGRPC(s.TaskTemplate.RestartPolicy)
	if err != nil {
		return swarmapi.ServiceSpec{}, err
	}
	spec.Task.Restart = restartPolicy

	if s.TaskTemplate.Placement != nil {
		spec.Task.Placement = &swarmapi.Placement{
			Constraints: s.TaskTemplate.Placement.Constraints,
		}
	}

	if s.UpdateConfig != nil {
		var failureAction swarmapi.UpdateConfig_FailureAction
		switch s.UpdateConfig.FailureAction {
		case types.UpdateFailureActionPause, "":
			failureAction = swarmapi.UpdateConfig_PAUSE
		case types.UpdateFailureActionContinue:
			failureAction = swarmapi.UpdateConfig_CONTINUE
		default:
			return swarmapi.ServiceSpec{}, fmt.Errorf("unrecongized update failure action %s", s.UpdateConfig.FailureAction)
		}
		spec.Update = &swarmapi.UpdateConfig{
			Parallelism:     s.UpdateConfig.Parallelism,
			Delay:           *ptypes.DurationProto(s.UpdateConfig.Delay),
			FailureAction:   failureAction,
			MaxFailureRatio: s.UpdateConfig.MaxFailureRatio,
		}
		if s.UpdateConfig.Monitor != 0 {
			spec.Update.Monitor = ptypes.DurationProto(s.UpdateConfig.Monitor)
		}
	}

	if s.EndpointSpec != nil {
		if s.EndpointSpec.Mode != "" &&
			s.EndpointSpec.Mode != types.ResolutionModeVIP &&
			s.EndpointSpec.Mode != types.ResolutionModeDNSRR {
			return swarmapi.ServiceSpec{}, fmt.Errorf("invalid resolution mode: %q", s.EndpointSpec.Mode)
		}

		spec.Endpoint = &swarmapi.EndpointSpec{}

		spec.Endpoint.Mode = swarmapi.EndpointSpec_ResolutionMode(swarmapi.EndpointSpec_ResolutionMode_value[strings.ToUpper(string(s.EndpointSpec.Mode))])

		for _, portConfig := range s.EndpointSpec.Ports {
			spec.Endpoint.Ports = append(spec.Endpoint.Ports, &swarmapi.PortConfig{
				Name:          portConfig.Name,
				Protocol:      swarmapi.PortConfig_Protocol(swarmapi.PortConfig_Protocol_value[strings.ToUpper(string(portConfig.Protocol))]),
				PublishMode:   swarmapi.PortConfig_PublishMode(swarmapi.PortConfig_PublishMode_value[strings.ToUpper(string(portConfig.PublishMode))]),
				TargetPort:    portConfig.TargetPort,
				PublishedPort: portConfig.PublishedPort,
			})
		}
	}

	// Mode
	if s.Mode.Global != nil && s.Mode.Replicated != nil {
		return swarmapi.ServiceSpec{}, fmt.Errorf("cannot specify both replicated mode and global mode")
	}

	if s.Mode.Global != nil {
		spec.Mode = &swarmapi.ServiceSpec_Global{
			Global: &swarmapi.GlobalService{},
		}
	} else if s.Mode.Replicated != nil && s.Mode.Replicated.Replicas != nil {
		spec.Mode = &swarmapi.ServiceSpec_Replicated{
			Replicated: &swarmapi.ReplicatedService{Replicas: *s.Mode.Replicated.Replicas},
		}
	} else {
		spec.Mode = &swarmapi.ServiceSpec_Replicated{
			Replicated: &swarmapi.ReplicatedService{Replicas: 1},
		}
	}

	return spec, nil
}
// CreateServer creates a server using API based on typical server fields
func CreateServer(api *ScalewayAPI, c *ConfigCreateServer) (string, error) {
	if c.CommercialType == "" {
		c.CommercialType = os.Getenv("SCW_COMMERCIAL_TYPE")
		if c.CommercialType == "" {
			c.CommercialType = "C1"
		}
	}

	if c.Name == "" {
		c.Name = strings.Replace(namesgenerator.GetRandomName(0), "_", "-", -1)
	}

	var server ScalewayServerDefinition

	server.CommercialType = c.CommercialType
	server.Volumes = make(map[string]string)
	server.DynamicIPRequired = &c.DynamicIPRequired
	if c.IP != "" {
		if anonuuid.IsUUID(c.IP) == nil {
			server.PublicIP = c.IP
		} else {
			ips, err := api.GetIPS()
			if err != nil {
				return "", err
			}
			for _, ip := range ips.IPS {
				if ip.Address == c.IP {
					server.PublicIP = ip.ID
					break
				}
			}
			if server.PublicIP == "" {
				return "", fmt.Errorf("IP address %v not found", c.IP)
			}
		}
	}
	server.Tags = []string{}
	if c.Env != "" {
		server.Tags = strings.Split(c.Env, " ")
	}
	if c.AdditionalVolumes != "" {
		volumes := strings.Split(c.AdditionalVolumes, " ")
		for i := range volumes {
			volumeID, err := CreateVolumeFromHumanSize(api, volumes[i])
			if err != nil {
				return "", err
			}

			volumeIDx := fmt.Sprintf("%d", i+1)
			server.Volumes[volumeIDx] = *volumeID
		}
	}
	// FIXME build images only on ARM ?
	imageIdentifier := &ScalewayImageIdentifier{
		Arch:   "arm",
		Region: "fr-1",
	}
	server.Name = c.Name
	inheritingVolume := false
	_, err := humanize.ParseBytes(c.ImageName)
	if err == nil {
		// Create a new root volume
		volumeID, err := CreateVolumeFromHumanSize(api, c.ImageName)
		if err != nil {
			return "", err
		}
		server.Volumes["0"] = *volumeID
	} else {
		// Use an existing image
		// FIXME: handle snapshots
		inheritingVolume = true
		imageIdentifier, err = api.GetImageID(c.ImageName)
		if err != nil {
			return "", err
		}
		if imageIdentifier.Identifier != "" {
			server.Image = &imageIdentifier.Identifier
		} else {
			snapshotID, err := api.GetSnapshotID(c.ImageName)
			if err != nil {
				return "", err
			}
			snapshot, err := api.GetSnapshot(snapshotID)
			if err != nil {
				return "", err
			}
			if snapshot.BaseVolume.Identifier == "" {
				return "", fmt.Errorf("snapshot %v does not have base volume", snapshot.Name)
			}
			server.Volumes["0"] = snapshot.BaseVolume.Identifier
		}
	}
	if c.Bootscript != "" {
		bootscript, err := api.GetBootscriptID(c.Bootscript, imageIdentifier.Arch)
		if err != nil {
			return "", err
		}
		server.Bootscript = &bootscript
	}
	serverID, err := api.PostServer(server)
	if err != nil {
		return "", err
	}

	// For inherited volumes, we prefix the name with server hostname
	if inheritingVolume {
		createdServer, err := api.GetServer(serverID)
		if err != nil {
			return "", err
		}
		currentVolume := createdServer.Volumes["0"]

		var volumePayload ScalewayVolumePutDefinition
		newName := fmt.Sprintf("%s-%s", createdServer.Hostname, currentVolume.Name)
		volumePayload.Name = &newName
		volumePayload.CreationDate = &currentVolume.CreationDate
		volumePayload.Organization = &currentVolume.Organization
		volumePayload.Server.Identifier = &currentVolume.Server.Identifier
		volumePayload.Server.Name = &currentVolume.Server.Name
		volumePayload.Identifier = &currentVolume.Identifier
		volumePayload.Size = &currentVolume.Size
		volumePayload.ModificationDate = &currentVolume.ModificationDate
		volumePayload.ExportURI = &currentVolume.ExportURI
		volumePayload.VolumeType = &currentVolume.VolumeType

		err = api.PutVolume(currentVolume.Identifier, volumePayload)
		if err != nil {
			return "", err
		}
	}

	return serverID, nil
}
Exemple #11
0
	. "github.com/popcorp/doenv/lib"
	"time"
)

var CreateCommand = cli.Command{
	Name:      "create",
	Aliases:   []string{"c", "init", "setup"},
	Usage:     "Create a droplet (Initial setup)",
	ArgsUsage: "<droplet name> [image] [region] [size]",
	Action: func(c *cli.Context) {
		var DropletName string
		var Image string
		var Region string
		var Size string
		if len(c.Args()) < 1 {
			DropletName = namesgenerator.GetRandomName(1)
		} else {
			DropletName = c.Args().Get(0)
		}
		if len(c.Args()) < 2 {
			Image = "debian-8-x64"
		} else {
			Image = c.Args().Get(1)
		}
		if len(c.Args()) < 3 {
			Size = "512mb"
		} else {
			Size = c.Args().Get(2)
		}
		if len(c.Args()) < 4 {
			Region = "nyc2"
Exemple #12
0
// CreateServer creates a server using API based on typical server fields
func CreateServer(api *ScalewayAPI, imageName string, name string, bootscript string, env string, additionalVolumes string) (string, error) {
	if name == "" {
		name = strings.Replace(namesgenerator.GetRandomName(0), "_", "-", -1)
	}

	var server ScalewayServerDefinition
	server.Volumes = make(map[string]string)

	server.Tags = []string{}
	if env != "" {
		server.Tags = strings.Split(env, " ")
	}
	if additionalVolumes != "" {
		volumes := strings.Split(additionalVolumes, " ")
		for i := range volumes {
			volumeID, err := CreateVolumeFromHumanSize(api, volumes[i])
			if err != nil {
				return "", err
			}

			volumeIDx := fmt.Sprintf("%d", i+1)
			server.Volumes[volumeIDx] = *volumeID
		}
	}
	server.Name = name
	if bootscript != "" {
		bootscript := api.GetBootscriptID(bootscript)
		server.Bootscript = &bootscript
	}

	inheritingVolume := false
	_, err := humanize.ParseBytes(imageName)
	if err == nil {
		// Create a new root volume
		volumeID, err := CreateVolumeFromHumanSize(api, imageName)
		if err != nil {
			return "", err
		}
		server.Volumes["0"] = *volumeID
	} else {
		// Use an existing image
		// FIXME: handle snapshots
		inheritingVolume = true
		image := api.GetImageID(imageName, false)
		if image != "" {
			server.Image = &image
		} else {
			snapshotID := api.GetSnapshotID(imageName)
			snapshot, err := api.GetSnapshot(snapshotID)
			if err != nil {
				return "", err
			}
			if snapshot.BaseVolume.Identifier == "" {
				return "", fmt.Errorf("snapshot %v does not have base volume", snapshot.Name)
			}
			server.Volumes["0"] = snapshot.BaseVolume.Identifier
		}
	}

	serverID, err := api.PostServer(server)
	if err != nil {
		return "", nil
	}

	// For inherited volumes, we prefix the name with server hostname
	if inheritingVolume {
		createdServer, err := api.GetServer(serverID)
		if err != nil {
			return "", err
		}
		currentVolume := createdServer.Volumes["0"]

		var volumePayload ScalewayVolumePutDefinition
		newName := fmt.Sprintf("%s-%s", createdServer.Hostname, currentVolume.Name)
		volumePayload.Name = &newName
		volumePayload.CreationDate = &currentVolume.CreationDate
		volumePayload.Organization = &currentVolume.Organization
		volumePayload.Server.Identifier = &currentVolume.Server.Identifier
		volumePayload.Server.Name = &currentVolume.Server.Name
		volumePayload.Identifier = &currentVolume.Identifier
		volumePayload.Size = &currentVolume.Size
		volumePayload.ModificationDate = &currentVolume.ModificationDate
		volumePayload.ExportURI = &currentVolume.ExportURI
		volumePayload.VolumeType = &currentVolume.VolumeType

		err = api.PutVolume(currentVolume.Identifier, volumePayload)
		if err != nil {
			return "", err
		}
	}

	return serverID, nil
}
Exemple #13
0
func (ad *Advertiser) NewSlot() *Slot {
	s := NewSlot(fmt.Sprintf("%d-%s", ad.Id, namesgenerator.GetRandomName(0)))
	s.Advertiser = ad
	ad.Slots = append(ad.Slots, s)
	return s
}
Exemple #14
0
// CreateServer creates a server using API based on typical server fields
func CreateServer(api *ScalewayAPI, c *ConfigCreateServer) (string, error) {
	commercialType := os.Getenv("SCW_COMMERCIAL_TYPE")
	if commercialType == "" {
		commercialType = c.CommercialType
	}
	if len(commercialType) < 2 {
		return "", errors.New("Invalid commercial type")
	}

	if c.Name == "" {
		c.Name = strings.Replace(namesgenerator.GetRandomName(0), "_", "-", -1)
	}

	var server ScalewayServerDefinition

	server.CommercialType = commercialType
	server.Volumes = make(map[string]string)
	server.DynamicIPRequired = &c.DynamicIPRequired
	server.EnableIPV6 = c.EnableIPV6
	if commercialType == "" {
		return "", errors.New("You need to specify a commercial-type")
	}
	if c.IP != "" {
		if anonuuid.IsUUID(c.IP) == nil {
			server.PublicIP = c.IP
		} else {
			ips, err := api.GetIPS()
			if err != nil {
				return "", err
			}
			for _, ip := range ips.IPS {
				if ip.Address == c.IP {
					server.PublicIP = ip.ID
					break
				}
			}
			if server.PublicIP == "" {
				return "", fmt.Errorf("IP address %v not found", c.IP)
			}
		}
	}
	server.Tags = []string{}
	if c.Env != "" {
		server.Tags = strings.Split(c.Env, " ")
	}
	switch c.CommercialType {
	case "VC1M":
		if c.AdditionalVolumes == "" {
			c.AdditionalVolumes = "50G"
			log.Debugf("This server needs a least 50G")
		}
	case "VC1L":
		if c.AdditionalVolumes == "" {
			c.AdditionalVolumes = "150G"
			log.Debugf("This server needs a least 150G")
		}
	}
	if c.AdditionalVolumes != "" {
		volumes := strings.Split(c.AdditionalVolumes, " ")
		for i := range volumes {
			volumeID, err := CreateVolumeFromHumanSize(api, volumes[i])
			if err != nil {
				return "", err
			}

			volumeIDx := fmt.Sprintf("%d", i+1)
			server.Volumes[volumeIDx] = *volumeID
		}
	}
	arch := os.Getenv("SCW_TARGET_ARCH")
	if arch == "" {
		server.CommercialType = strings.ToUpper(server.CommercialType)
		switch server.CommercialType[:2] {
		case "C1":
			arch = "arm"
		case "C2", "VC":
			arch = "x86_64"
		default:
			return "", fmt.Errorf("%s wrong commercial type", server.CommercialType)
		}
	}
	region := os.Getenv("SCW_TARGET_REGION")
	if region == "" {
		region = "fr-1"
	}
	imageIdentifier := &ScalewayImageIdentifier{
		Arch:   arch,
		Region: region,
	}
	server.Name = c.Name
	inheritingVolume := false
	_, err := humanize.ParseBytes(c.ImageName)
	if err == nil {
		// Create a new root volume
		volumeID, errCreateVol := CreateVolumeFromHumanSize(api, c.ImageName)
		if errCreateVol != nil {
			return "", errCreateVol
		}
		server.Volumes["0"] = *volumeID
	} else {
		// Use an existing image
		inheritingVolume = true
		if anonuuid.IsUUID(c.ImageName) == nil {
			server.Image = &c.ImageName
		} else {
			imageIdentifier, err = api.GetImageID(c.ImageName, arch)
			if err != nil {
				return "", err
			}
			if imageIdentifier.Identifier != "" {
				server.Image = &imageIdentifier.Identifier
			} else {
				snapshotID, errGetSnapID := api.GetSnapshotID(c.ImageName)
				if errGetSnapID != nil {
					return "", errGetSnapID
				}
				snapshot, errGetSnap := api.GetSnapshot(snapshotID)
				if errGetSnap != nil {
					return "", errGetSnap
				}
				if snapshot.BaseVolume.Identifier == "" {
					return "", fmt.Errorf("snapshot %v does not have base volume", snapshot.Name)
				}
				server.Volumes["0"] = snapshot.BaseVolume.Identifier
			}
		}
	}

	if c.Bootscript != "" {
		bootscript := ""

		if anonuuid.IsUUID(c.Bootscript) == nil {
			bootscript = c.Bootscript
		} else {
			var errGetBootScript error

			bootscript, errGetBootScript = api.GetBootscriptID(c.Bootscript, imageIdentifier.Arch)
			if errGetBootScript != nil {
				return "", errGetBootScript
			}
		}
		server.Bootscript = &bootscript
	}
	serverID, err := api.PostServer(server)
	if err != nil {
		return "", err
	}

	// For inherited volumes, we prefix the name with server hostname
	if inheritingVolume {
		createdServer, err := api.GetServer(serverID)
		if err != nil {
			return "", err
		}
		currentVolume := createdServer.Volumes["0"]
		size := uint64(currentVolume.Size.(float64))

		var volumePayload ScalewayVolumePutDefinition
		newName := fmt.Sprintf("%s-%s", createdServer.Hostname, currentVolume.Name)
		volumePayload.Name = &newName
		volumePayload.CreationDate = &currentVolume.CreationDate
		volumePayload.Organization = &currentVolume.Organization
		volumePayload.Server.Identifier = &currentVolume.Server.Identifier
		volumePayload.Server.Name = &currentVolume.Server.Name
		volumePayload.Identifier = &currentVolume.Identifier
		volumePayload.Size = &size
		volumePayload.ModificationDate = &currentVolume.ModificationDate
		volumePayload.ExportURI = &currentVolume.ExportURI
		volumePayload.VolumeType = &currentVolume.VolumeType

		err = api.PutVolume(currentVolume.Identifier, volumePayload)
		if err != nil {
			return "", err
		}
	}

	return serverID, nil
}
Exemple #15
0
// validateCreateConfig() checks the parameters for ContainerCreate().
// It may "fix up" the config param passed into ConntainerCreate() if needed.
func validateCreateConfig(config *types.ContainerCreateConfig) error {
	defer trace.End(trace.Begin("Container.validateCreateConfig"))

	// process cpucount here
	var cpuCount int64 = DefaultCPUs

	// support windows client
	if config.HostConfig.CPUCount > 0 {
		cpuCount = config.HostConfig.CPUCount
	} else {
		// we hijack --cpuset-cpus in the non-windows case
		if config.HostConfig.CpusetCpus != "" {
			cpus := strings.Split(config.HostConfig.CpusetCpus, ",")
			if c, err := strconv.Atoi(cpus[0]); err == nil {
				cpuCount = int64(c)
			} else {
				return fmt.Errorf("Error parsing CPU count: %s", err)
			}
		}
	}
	config.HostConfig.CPUCount = cpuCount

	// fix-up cpu/memory settings here
	if cpuCount < MinCPUs {
		config.HostConfig.CPUCount = MinCPUs
	}
	log.Infof("Container CPU count: %d", config.HostConfig.CPUCount)

	// convert from bytes to MiB for vsphere
	memoryMB := config.HostConfig.Memory / units.MiB
	if memoryMB == 0 {
		memoryMB = MemoryDefaultMB
	} else if memoryMB < MemoryMinMB {
		memoryMB = MemoryMinMB
	}

	// check that memory is aligned
	if remainder := memoryMB % MemoryAlignMB; remainder != 0 {
		log.Warnf("Default container VM memory must be %d aligned for hotadd, rounding up.", MemoryAlignMB)
		memoryMB += MemoryAlignMB - remainder
	}

	config.HostConfig.Memory = memoryMB
	log.Infof("Container memory: %d MB", config.HostConfig.Memory)

	if config.NetworkingConfig == nil {
		config.NetworkingConfig = &dnetwork.NetworkingConfig{}
	}

	if config.HostConfig == nil || config.Config == nil {
		return BadRequestError("invalid config")
	}

	// validate port bindings
	if config.HostConfig != nil {
		var ips []string
		if addrs, err := externalIPv4Addrs(); err != nil {
			log.Warnf("could not get address for external interface: %s", err)
		} else {
			ips = make([]string, len(addrs))
			for i := range addrs {
				ips[i] = addrs[i].IP.String()
			}
		}

		for _, pbs := range config.HostConfig.PortBindings {
			for _, pb := range pbs {
				if pb.HostIP != "" && pb.HostIP != "0.0.0.0" {
					// check if specified host ip equals any of the addresses on the "client" interface
					found := false
					for _, i := range ips {
						if i == pb.HostIP {
							found = true
							break
						}
					}
					if !found {
						return InternalServerError("host IP for port bindings is only supported for 0.0.0.0 and the external interface IP address")
					}
				}

				start, end, _ := nat.ParsePortRangeToInt(pb.HostPort)
				if start != end {
					return InternalServerError("host port ranges are not supported for port bindings")
				}
			}
		}
	}

	// TODO(jzt): users other than root are not currently supported
	// We should check for USER in config.Config.Env once we support Dockerfiles.
	if config.Config.User != "" && config.Config.User != "root" {
		return InternalServerError("Failed to create container - users other than root are not currently supported")
	}

	// https://github.com/vmware/vic/issues/1378
	if len(config.Config.Entrypoint) == 0 && len(config.Config.Cmd) == 0 {
		return derr.NewRequestNotFoundError(fmt.Errorf("No command specified"))
	}

	// Was a name provided - if not create a friendly name
	if config.Name == "" {
		//TODO: Assume we could have a name collison here : need to
		// provide validation / retry CDG June 9th 2016
		config.Name = namesgenerator.GetRandomName(0)
	}

	return nil
}
Exemple #16
0
// getRandomName generates a random, human-readable name to ease identification
// of different test resources.
func getRandomName() string {
	// Remove characters that aren't allowed in hostnames for machines allocated
	// by Terraform.
	return strings.Replace(namesgenerator.GetRandomName(0), "_", "", -1)
}
Exemple #17
0
func main() {
	fmt.Println(namesgenerator.GetRandomName(0))
}
Exemple #18
0
// ServiceSpecToGRPC converts a ServiceSpec to a grpc ServiceSpec.
func ServiceSpecToGRPC(s types.ServiceSpec) (swarmapi.ServiceSpec, error) {
	name := s.Name
	if name == "" {
		name = namesgenerator.GetRandomName(0)
	}

	networks := make([]*swarmapi.ServiceSpec_NetworkAttachmentConfig, 0, len(s.Networks))
	for _, n := range s.Networks {
		networks = append(networks, &swarmapi.ServiceSpec_NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases})
	}

	spec := swarmapi.ServiceSpec{
		Annotations: swarmapi.Annotations{
			Name:   name,
			Labels: s.Labels,
		},
		Task: swarmapi.TaskSpec{
			Resources: resourcesToGRPC(s.TaskTemplate.Resources),
			LogDriver: driverToGRPC(s.TaskTemplate.LogDriver),
		},
		Networks: networks,
	}

	containerSpec, err := containerToGRPC(s.TaskTemplate.ContainerSpec)
	if err != nil {
		return swarmapi.ServiceSpec{}, err
	}
	spec.Task.Runtime = &swarmapi.TaskSpec_Container{Container: containerSpec}

	restartPolicy, err := restartPolicyToGRPC(s.TaskTemplate.RestartPolicy)
	if err != nil {
		return swarmapi.ServiceSpec{}, err
	}
	spec.Task.Restart = restartPolicy

	if s.TaskTemplate.Placement != nil {
		spec.Task.Placement = &swarmapi.Placement{
			Constraints: s.TaskTemplate.Placement.Constraints,
		}
	}

	if s.UpdateConfig != nil {
		spec.Update = &swarmapi.UpdateConfig{
			Parallelism: s.UpdateConfig.Parallelism,
			Delay:       *ptypes.DurationProto(s.UpdateConfig.Delay),
		}
	}

	if s.EndpointSpec != nil {
		if s.EndpointSpec.Mode != "" &&
			s.EndpointSpec.Mode != types.ResolutionModeVIP &&
			s.EndpointSpec.Mode != types.ResolutionModeDNSRR {
			return swarmapi.ServiceSpec{}, fmt.Errorf("invalid resolution mode: %q", s.EndpointSpec.Mode)
		}

		spec.Endpoint = &swarmapi.EndpointSpec{}

		spec.Endpoint.Mode = swarmapi.EndpointSpec_ResolutionMode(swarmapi.EndpointSpec_ResolutionMode_value[strings.ToUpper(string(s.EndpointSpec.Mode))])

		for _, portConfig := range s.EndpointSpec.Ports {
			spec.Endpoint.Ports = append(spec.Endpoint.Ports, &swarmapi.PortConfig{
				Name:          portConfig.Name,
				Protocol:      swarmapi.PortConfig_Protocol(swarmapi.PortConfig_Protocol_value[strings.ToUpper(string(portConfig.Protocol))]),
				TargetPort:    portConfig.TargetPort,
				PublishedPort: portConfig.PublishedPort,
			})
		}
	}

	//Mode
	if s.Mode.Global != nil {
		spec.Mode = &swarmapi.ServiceSpec_Global{
			Global: &swarmapi.GlobalService{},
		}
	} else if s.Mode.Replicated != nil && s.Mode.Replicated.Replicas != nil {
		spec.Mode = &swarmapi.ServiceSpec_Replicated{
			Replicated: &swarmapi.ReplicatedService{Replicas: *s.Mode.Replicated.Replicas},
		}
	} else {
		spec.Mode = &swarmapi.ServiceSpec_Replicated{
			Replicated: &swarmapi.ReplicatedService{Replicas: 1},
		}
	}

	return spec, nil
}