Example #1
0
func AskForPorts(portCount int) ResourceAsker {
	ret := func(resources []*mesos.Resource) ([]*mesos.Resource, *mesos.Resource, bool) {
		newResources := make([]*mesos.Resource, len(resources))
		copy(newResources, resources)
		for idx, resource := range resources {
			if resource.GetName() == "ports" {
				ports := RangesToArray(resource.GetRanges().GetRange())

				// Now we have to see if there are N ports
				if len(ports) >= portCount {
					var sliceLoc int
					// Calculate the slice where I'm taking ports:
					if len(ports)-portCount == 0 {
						sliceLoc = 0
					} else {
						sliceLoc = rand.Intn(len(ports) - portCount)
					}
					takingPorts := make([]int64, portCount)
					copy(takingPorts, ports[sliceLoc:(sliceLoc+portCount)])
					leavingPorts := make([]int64, len(ports)-portCount)
					copy(leavingPorts, ports[:sliceLoc])
					copy(leavingPorts[sliceLoc:], ports[(sliceLoc+portCount):])
					newResources[idx] = util.NewRangesResource("ports", ArrayToRanges(leavingPorts))
					ask := util.NewRangesResource("ports", ArrayToRanges(takingPorts))
					return newResources, ask, true
				}
			}
		}
		return resources, nil, false
	}
	return ret
}
Example #2
0
func (s *Scheduler) launchTask(driver scheduler.SchedulerDriver, offer *mesos.Offer) {
	taskName := fmt.Sprintf("syslog-%s", offer.GetSlaveId().GetValue())
	taskId := &mesos.TaskID{
		Value: proto.String(fmt.Sprintf("%s-%s", taskName, uuid())),
	}

	data, err := json.Marshal(Config)
	if err != nil {
		panic(err) //shouldn't happen
	}
	Logger.Debugf("Task data: %s", string(data))

	tcpPort := uint64(s.getPort(Config.TcpPort, offer, -1))
	udpPort := uint64(s.getPort(Config.UdpPort, offer, int(tcpPort)))

	task := &mesos.TaskInfo{
		Name:     proto.String(taskName),
		TaskId:   taskId,
		SlaveId:  offer.GetSlaveId(),
		Executor: s.createExecutor(offer, tcpPort, udpPort),
		Resources: []*mesos.Resource{
			util.NewScalarResource("cpus", Config.Cpus),
			util.NewScalarResource("mem", Config.Mem),
			util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(tcpPort, tcpPort)}),
			util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(udpPort, udpPort)}),
		},
		Data:   data,
		Labels: utils.StringToLabels(s.labels),
	}

	s.cluster.Add(offer.GetSlaveId().GetValue(), task)

	driver.LaunchTasks([]*mesos.OfferID{offer.GetId()}, []*mesos.TaskInfo{task}, &mesos.Filters{RefuseSeconds: proto.Float64(1)})
}
Example #3
0
func (offerHelper *OfferHelper) apply(against *ResourceGroup, cpus float64, mem float64, disk float64, ports int,
	principal string, role string, persistenceID string, containerPath string) []*mesos.Resource {

	ask := []*mesos.Resource{}

	if cpus > 0 {
		against.Cpus = against.Cpus - cpus
		if principal != "" && role != "" {
			ask = append(ask, util.NewScalarResourceWithReservation("cpus", cpus, principal, role))
		} else {
			ask = append(ask, util.NewScalarResource("cpus", cpus))
		}
	}

	if mem > 0 {
		against.Mem = against.Mem - mem
		if principal != "" && role != "" {
			ask = append(ask, util.NewScalarResourceWithReservation("mem", mem, principal, role))
		} else {
			ask = append(ask, util.NewScalarResource("mem", mem))
		}
	}

	if disk > 0 {
		against.Disk = against.Disk - disk
		if principal != "" && role != "" && containerPath != "" && persistenceID != "" {
			ask = append(ask, util.NewVolumeResourceWithReservation(disk, containerPath, persistenceID, mesos.Volume_RW.Enum(), principal, role))
		} else if principal != "" && role != "" {
			ask = append(ask, util.NewScalarResourceWithReservation("disk", disk, principal, role))
		} else {
			ask = append(ask, util.NewScalarResource("disk", disk))
		}
	}

	if ports > 0 {
		sliceLoc := 0
		if len(against.Ports)-ports > 0 {
			sliceLoc = rand.Intn(len(against.Ports) - ports)
		}
		takingPorts := make([]int64, ports)
		copy(takingPorts, against.Ports[sliceLoc:(sliceLoc+ports)])
		leavingPorts := make([]int64, len(against.Ports)-ports)
		copy(leavingPorts, against.Ports[:sliceLoc])
		copy(leavingPorts[sliceLoc:], against.Ports[(sliceLoc+ports):])

		against.Ports = leavingPorts
		if principal != "" && role != "" {
			ask = append(ask, util.AddResourceReservation(util.NewRangesResource("ports", ArrayToRanges(takingPorts)), principal, role))
		} else {
			ask = append(ask, util.NewRangesResource("ports", ArrayToRanges(takingPorts)))
		}
	}

	return ask
}
Example #4
0
func TestGoodPortAsk(t *testing.T) {
	rand.Seed(10)
	assert := assert.New(t)
	offer := generateResourceOffer()
	askFun := AskForPorts(100)
	remaining, resourceAsk, success := askFun(offer)
	assert.Equal(true, success)
	assert.Equal(util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(31105, 31204)}), resourceAsk)
	remainingPorts := util.FilterResources(remaining, func(res *mesos.Resource) bool {
		return res.GetName() == "ports"
	})
	assert.Equal([]*mesos.Resource{util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(31000, 31104), util.NewValueRange(31205, 32000)})}, remainingPorts)

}
Example #5
0
func createTaskInfo(task eremetic.Task, offer *mesosproto.Offer) (eremetic.Task, *mesosproto.TaskInfo) {
	task.FrameworkID = *offer.FrameworkId.Value
	task.SlaveID = *offer.SlaveId.Value
	task.Hostname = *offer.Hostname
	task.AgentIP = offer.GetUrl().GetAddress().GetIp()
	task.AgentPort = offer.GetUrl().GetAddress().GetPort()

	portMapping, portResources := buildPorts(task, offer)
	env := buildEnvironment(task, portMapping)

	taskInfo := &mesosproto.TaskInfo{
		TaskId:  &mesosproto.TaskID{Value: proto.String(task.ID)},
		SlaveId: offer.SlaveId,
		Name:    proto.String(task.Name),
		Command: buildCommandInfo(task, env),
		Container: &mesosproto.ContainerInfo{
			Type: mesosproto.ContainerInfo_DOCKER.Enum(),
			Docker: &mesosproto.ContainerInfo_DockerInfo{
				Image:          proto.String(task.Image),
				ForcePullImage: proto.Bool(task.ForcePullImage),
				PortMappings:   portMapping,
				Network:        mesosproto.ContainerInfo_DockerInfo_BRIDGE.Enum(),
			},
			Volumes: buildVolumes(task),
		},
		Resources: []*mesosproto.Resource{
			mesosutil.NewScalarResource("cpus", task.TaskCPUs),
			mesosutil.NewScalarResource("mem", task.TaskMem),
			mesosutil.NewRangesResource("ports", portResources),
		},
	}
	return task, taskInfo
}
Example #6
0
func (s *MemcacheScheduler) newTaskPrototype(offer *mesos.Offer) *mesos.TaskInfo {
	taskID := s.tasksCreated
	s.tasksCreated++
	portRange := getPortRange(offer)
	portRange.End = portRange.Begin
	return &mesos.TaskInfo{
		TaskId: &mesos.TaskID{
			Value: proto.String(fmt.Sprintf("Memcache-%d", taskID)),
		},
		SlaveId: offer.SlaveId,
		Resources: []*mesos.Resource{
			mesosutil.NewScalarResource("cpus", TASK_CPUS),
			mesosutil.NewScalarResource("mem", TASK_MEM),
			mesosutil.NewRangesResource("ports", []*mesos.Value_Range{portRange}),
		},
		Container: &mesos.ContainerInfo{
			Type: mesos.ContainerInfo_DOCKER.Enum(),
			Docker: &mesos.ContainerInfo_DockerInfo{
				Image:   &util.MEMCACHE_CONTAINER,
				Network: mesos.ContainerInfo_DockerInfo_BRIDGE.Enum(),
				PortMappings: []*mesos.ContainerInfo_DockerInfo_PortMapping{
					&mesos.ContainerInfo_DockerInfo_PortMapping{
						ContainerPort: &MEMCACHE_PORT,
						Protocol:      &MEMCACHE_PROTOCOL,
						HostPort:      &MEMCACHE_HOST_PORT,
					},
				},
			},
		},
	}
}
Example #7
0
func TestResource(t *testing.T) {
	mem := Resource(util.NewScalarResource("mem", 512))
	if mem != "mem:512.00" {
		t.Errorf(`Resource(util.NewScalarResource("mem", 512)) != "mem:512.00"; actual %s`, mem)
	}

	ports := Resource(util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(31000, 32000)}))
	if ports != "ports:[31000..32000]" {
		t.Errorf(`Resource(util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(31000, 32000)})) != "ports:[31000..32000]"; actual %s`, ports)
	}

	ports = Resource(util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(4000, 7000), util.NewValueRange(31000, 32000)}))
	if ports != "ports:[4000..7000][31000..32000]" {
		t.Errorf(`Resource(util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(4000, 7000), util.NewValueRange(31000, 32000)})) != "ports:[4000..7000][31000..32000]"; actual %s`, ports)
	}
}
Example #8
0
func TestBadPortAsk(t *testing.T) {
	assert := assert.New(t)
	offer := []*mesos.Resource{util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(31000, 31000)})}
	_, _, success := AskForPorts(100)(offer)

	assert.Equal(false, success)
}
Example #9
0
func TestTotalPortAsk(t *testing.T) {
	assert := assert.New(t)
	askfun := AskForPorts(1)
	offer := []*mesos.Resource{util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(31000, 31000)})}
	newOffer, _, success := askfun(offer)
	newOffer[0].GetRanges().GetRange()
	assert.Equal(0, len(newOffer[0].GetRanges().GetRange()))
	assert.Equal(true, success)
}
Example #10
0
func generateResourceOffer() []*mesos.Resource {
	val := []*mesos.Resource{
		util.NewScalarResource("cpus", 3),
		util.NewScalarResource("disk", 73590),
		util.NewScalarResource("mem", 1985),
		util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(31000, 32000)}),
	}
	return val
}
func (sched *Scheduler) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
	for _, offer := range offers {
		taskId := &mesos.TaskID{
			Value: proto.String(fmt.Sprintf("basicdocker-task-%d", time.Now().Unix())),
		}

		ports := util.FilterResources(
			offer.Resources,
			func(res *mesos.Resource) bool {
				return res.GetName() == "ports"
			},
		)
		if len(ports) > 0 && len(ports[0].GetRanges().GetRange()) > 0 {

		} else {
			return
		}
		task := &mesos.TaskInfo{
			Name:    proto.String(taskId.GetValue()),
			TaskId:  taskId,
			SlaveId: offer.SlaveId,
			Container: &mesos.ContainerInfo{
				Type:     mesos.ContainerInfo_DOCKER.Enum(),
				Volumes:  nil,
				Hostname: nil,
				Docker: &mesos.ContainerInfo_DockerInfo{
					Image:   &DOCKER_IMAGE_DEFAULT,
					Network: mesos.ContainerInfo_DockerInfo_BRIDGE.Enum(),
				},
			},
			Command: &mesos.CommandInfo{
				Shell: proto.Bool(true),
				Value: proto.String("set -x ; /bin/date ; /bin/hostname ; sleep 200 ; echo done"),
			},
			Executor: nil,
			Resources: []*mesos.Resource{
				util.NewScalarResource("cpus", getOfferCpu(offer)),
				util.NewScalarResource("mem", getOfferMem(offer)),
				util.NewRangesResource("ports", []*mesos.Value_Range{
					util.NewValueRange(
						*ports[0].GetRanges().GetRange()[0].Begin,
						*ports[0].GetRanges().GetRange()[0].Begin+1,
					),
				}),
			},
		}

		log.Infof("Prepared task: %s with offer %s for launch\n", task.GetName(), offer.Id.GetValue())

		var tasks []*mesos.TaskInfo = []*mesos.TaskInfo{task}
		log.Infoln("Launching ", len(tasks), " tasks for offer", offer.Id.GetValue())
		driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, &mesos.Filters{RefuseSeconds: proto.Float64(1)})
		sched.tasksLaunched++
		time.Sleep(time.Second)
	}
}
Example #12
0
// Offering some cpus and memory and the 8000-9000 port range
func NewTestOffer(id string) *mesos.Offer {
	hostname := "some_hostname"
	cpus := util.NewScalarResource("cpus", 3.75)
	mem := util.NewScalarResource("mem", 940)
	var port8000 uint64 = 8000
	var port9000 uint64 = 9000
	ports8000to9000 := mesos.Value_Range{Begin: &port8000, End: &port9000}
	ports := util.NewRangesResource("ports", []*mesos.Value_Range{&ports8000to9000})
	return &mesos.Offer{
		Id:        util.NewOfferID(id),
		Hostname:  &hostname,
		SlaveId:   util.NewSlaveID(hostname),
		Resources: []*mesos.Resource{cpus, mem, ports},
	}
}
Example #13
0
func NewOffer(id string) *mesos.Offer {
	return &mesos.Offer{
		Id:          util.NewOfferID(id),
		FrameworkId: util.NewFrameworkID("test-etcd-framework"),
		SlaveId:     util.NewSlaveID("slave-" + id),
		Hostname:    proto.String("localhost"),
		Resources: []*mesos.Resource{
			util.NewScalarResource("cpus", 1),
			util.NewScalarResource("mem", 256),
			util.NewScalarResource("disk", 4096),
			util.NewRangesResource("ports", []*mesos.Value_Range{
				util.NewValueRange(uint64(0), uint64(65535)),
			}),
		},
	}
}
Example #14
0
func (this *ElodinaTransportScheduler) launchNewTask(offers []*OfferAndResources) (*mesos.Offer, *mesos.TaskInfo) {
	for _, offer := range offers {
		configBlob, err := json.Marshal(this.config.ConsumerConfig)
		if err != nil {
			break
		}
		log.Logger.Debug("%v", offer)
		if this.hasEnoughResources(offer) {
			port := this.takePort(&offer.RemainingPorts)
			taskPort := &mesos.Value_Range{Begin: port, End: port}
			taskId := &mesos.TaskID{
				Value: proto.String(fmt.Sprintf("elodina-mirror-%s-%d", *offer.Offer.Hostname, *port)),
			}

			cpuTaken := this.config.CpuPerTask * float64(this.config.ThreadsPerTask)
			memoryTaken := this.config.MemPerTask * float64(this.config.ThreadsPerTask)
			task := &mesos.TaskInfo{
				Name:     proto.String(taskId.GetValue()),
				TaskId:   taskId,
				SlaveId:  offer.Offer.SlaveId,
				Executor: this.createExecutor(len(this.taskIdToTaskState), *port),
				Resources: []*mesos.Resource{
					util.NewScalarResource("cpus", cpuTaken),
					util.NewScalarResource("mem", memoryTaken),
					util.NewRangesResource("ports", []*mesos.Value_Range{taskPort}),
				},
				Data: configBlob,
			}
			log.Logger.Debug("Prepared task: %s with offer %s for launch. Ports: %s", task.GetName(), offer.Offer.Id.GetValue(), taskPort)

			transport := NewElodinaTransport(fmt.Sprintf("http://%s:%d/assign", *offer.Offer.Hostname, *port), task, this.config.StaleDuration)
			this.taskIdToTaskState[taskId.GetValue()] = transport

			log.Logger.Debug("Prepared task: %s with offer %s for launch. Ports: %s", task.GetName(), offer.Offer.Id.GetValue(), taskPort)

			offer.RemainingPorts = offer.RemainingPorts[1:]
			offer.RemainingCpu -= cpuTaken
			offer.RemainingMemory -= memoryTaken

			return offer.Offer, task
		} else {
			log.Logger.Info("Not enough CPU and memory")
		}
	}

	return nil, nil
}
Example #15
0
func (t *task) build(slaveID string) {
	t.Command = &mesosproto.CommandInfo{Shell: proto.Bool(false)}

	t.Container = &mesosproto.ContainerInfo{
		Type: mesosproto.ContainerInfo_DOCKER.Enum(),
		Docker: &mesosproto.ContainerInfo_DockerInfo{
			Image: &t.config.Image,
		},
	}

	switch t.config.HostConfig.NetworkMode {
	case "none":
		t.Container.Docker.Network = mesosproto.ContainerInfo_DockerInfo_NONE.Enum()
	case "host":
		t.Container.Docker.Network = mesosproto.ContainerInfo_DockerInfo_HOST.Enum()
	case "bridge", "":
		for containerPort, bindings := range t.config.HostConfig.PortBindings {
			for _, binding := range bindings {
				fmt.Println(containerPort)
				containerInfo := strings.SplitN(containerPort, "/", 2)
				fmt.Println(containerInfo[0], containerInfo[1])
				containerPort, err := strconv.ParseUint(containerInfo[0], 10, 32)
				if err != nil {
					log.Warn(err)
					continue
				}
				hostPort, err := strconv.ParseUint(binding.HostPort, 10, 32)
				if err != nil {
					log.Warn(err)
					continue
				}
				protocol := "tcp"
				if len(containerInfo) == 2 {
					protocol = containerInfo[1]
				}
				t.Container.Docker.PortMappings = append(t.Container.Docker.PortMappings, &mesosproto.ContainerInfo_DockerInfo_PortMapping{
					HostPort:      proto.Uint32(uint32(hostPort)),
					ContainerPort: proto.Uint32(uint32(containerPort)),
					Protocol:      proto.String(protocol),
				})
				t.Resources = append(t.Resources, mesosutil.NewRangesResource("ports", []*mesosproto.Value_Range{mesosutil.NewValueRange(hostPort, hostPort)}))
			}
		}
		// TODO handle -P here
		t.Container.Docker.Network = mesosproto.ContainerInfo_DockerInfo_BRIDGE.Enum()
	default:
		log.Errorf("Unsupported network mode %q", t.config.HostConfig.NetworkMode)
		t.Container.Docker.Network = mesosproto.ContainerInfo_DockerInfo_BRIDGE.Enum()
	}

	if cpus := t.config.CpuShares; cpus > 0 {
		t.Resources = append(t.Resources, mesosutil.NewScalarResource("cpus", float64(cpus)))
	}

	if mem := t.config.Memory; mem > 0 {
		t.Resources = append(t.Resources, mesosutil.NewScalarResource("mem", float64(mem/1024/1024)))
	}

	if len(t.config.Cmd) > 0 && t.config.Cmd[0] != "" {
		t.Command.Value = &t.config.Cmd[0]
	}

	if len(t.config.Cmd) > 1 {
		t.Command.Arguments = t.config.Cmd[1:]
	}

	for key, value := range t.config.Labels {
		t.Container.Docker.Parameters = append(t.Container.Docker.Parameters, &mesosproto.Parameter{Key: proto.String("label"), Value: proto.String(fmt.Sprintf("%s=%s", key, value))})
	}

	t.SlaveId = &mesosproto.SlaveID{Value: &slaveID}
}
func TestWildcardHostPortMatching(t *testing.T) {
	t.Parallel()
	task := fakePodTask("foo", nil, nil)
	pod := &task.Pod

	offer := &mesos.Offer{}
	mapping, err := WildcardMapper(task, offer)
	if err != nil {
		t.Fatal(err)
	}
	if len(mapping) > 0 {
		t.Fatalf("Found mappings for an empty offer and a pod without ports: %v", pod)
	}

	//--
	offer = &mesos.Offer{
		Resources: []*mesos.Resource{
			newPortsResource("*", 1, 1),
		},
	}
	mapping, err = WildcardMapper(task, offer)
	if err != nil {
		t.Fatal(err)
	}
	if len(mapping) > 0 {
		t.Fatalf("Found mappings for a pod without ports: %v", pod)
	}

	//--
	pod.Spec = api.PodSpec{
		Containers: []api.Container{{
			Ports: []api.ContainerPort{{
				HostPort: 123,
			}},
		}},
	}
	task, err = New(api.NewDefaultContext(), "", pod, &mesos.ExecutorInfo{}, nil, nil)
	if err != nil {
		t.Fatal(err)
	}
	mapping, err = WildcardMapper(task, offer)
	if err == nil {
		t.Fatalf("expected error instead of mappings: %#v", mapping)
	} else if err, _ := err.(*PortAllocationError); err == nil {
		t.Fatal("Expected port allocation error")
	} else if !(len(err.Ports) == 1 && err.Ports[0] == 123) {
		t.Fatal("Expected port allocation error for host port 123")
	}

	//--
	pod.Spec = api.PodSpec{
		Containers: []api.Container{{
			Ports: []api.ContainerPort{{
				HostPort: 0,
			}, {
				HostPort: 123,
			}},
		}},
	}
	task, err = New(api.NewDefaultContext(), "", pod, &mesos.ExecutorInfo{}, nil, nil)
	if err != nil {
		t.Fatal(err)
	}
	mapping, err = WildcardMapper(task, offer)
	if err, _ := err.(*PortAllocationError); err == nil {
		t.Fatal("Expected port allocation error")
	} else if !(len(err.Ports) == 1 && err.Ports[0] == 123) {
		t.Fatal("Expected port allocation error for host port 123")
	}

	//--
	pod.Spec = api.PodSpec{
		Containers: []api.Container{{
			Ports: []api.ContainerPort{{
				HostPort: 0,
			}, {
				HostPort: 1,
			}},
		}},
	}
	task, err = New(api.NewDefaultContext(), "", pod, &mesos.ExecutorInfo{}, nil, nil)
	if err != nil {
		t.Fatal(err)
	}
	mapping, err = WildcardMapper(task, offer)
	if err, _ := err.(*PortAllocationError); err == nil {
		t.Fatal("Expected port allocation error")
	} else if len(err.Ports) != 0 {
		t.Fatal("Expected port allocation error for wildcard port")
	}

	//--
	offer = &mesos.Offer{
		Resources: []*mesos.Resource{
			newPortsResource("*", 1, 2),
		},
	}
	mapping, err = WildcardMapper(task, offer)
	if err != nil {
		t.Fatal(err)
	} else if len(mapping) != 2 {
		t.Fatal("Expected both ports allocated")
	}
	valid := 0
	for _, entry := range mapping {
		if entry.ContainerIdx == 0 && entry.PortIdx == 0 && entry.OfferPort == 2 {
			valid++
		}
		if entry.ContainerIdx == 0 && entry.PortIdx == 1 && entry.OfferPort == 1 {
			valid++
		}
	}
	if valid < 2 {
		t.Fatalf("Expected 2 valid port mappings, not %d", valid)
	}

	//-- port mapping in case of multiple discontinuous port ranges in mesos offer
	pod.Spec = api.PodSpec{
		Containers: []api.Container{{
			Ports: []api.ContainerPort{{
				HostPort: 0,
			}, {
				HostPort: 0,
			}},
		}},
	}
	task, err = New(api.NewDefaultContext(), "", pod, &mesos.ExecutorInfo{}, nil, nil)
	if err != nil {
		t.Fatal(err)
	}
	offer = &mesos.Offer{
		Resources: []*mesos.Resource{
			mesosutil.NewRangesResource("ports", []*mesos.Value_Range{mesosutil.NewValueRange(1, 1), mesosutil.NewValueRange(3, 5)}),
		},
	}
	mapping, err = WildcardMapper(task, offer)
	if err != nil {
		t.Fatal(err)
	} else if len(mapping) != 2 {
		t.Fatal("Expected both ports allocated")
	}
	valid = 0
	for _, entry := range mapping {
		if entry.ContainerIdx == 0 && entry.PortIdx == 0 && entry.OfferPort == 1 {
			valid++
		}
		if entry.ContainerIdx == 0 && entry.PortIdx == 1 && entry.OfferPort == 3 {
			valid++
		}
	}
	if valid < 2 {
		t.Fatalf("Expected 2 valid port mappings, not %d", valid)
	}
}
Example #17
0
// mesos.Scheduler interface method.
// Invoked when resources have been offered to this framework.
func (this *TransformScheduler) ResourceOffers(driver scheduler.SchedulerDriver, offers []*mesos.Offer) {
	fmt.Println("Received offers")

	if int(this.runningInstances) > this.config.Instances {
		toKill := int(this.runningInstances) - this.config.Instances
		for i := 0; i < toKill; i++ {
			driver.KillTask(this.tasks[i])
		}

		this.tasks = this.tasks[toKill:]
	}

	offersAndTasks := make(map[*mesos.Offer][]*mesos.TaskInfo)
	for _, offer := range offers {
		cpus := getScalarResources(offer, "cpus")
		mems := getScalarResources(offer, "mem")
		ports := getRangeResources(offer, "ports")

		remainingCpus := cpus
		remainingMems := mems

		var tasks []*mesos.TaskInfo
		for int(this.getRunningInstances()) < this.config.Instances && this.config.CpuPerTask <= remainingCpus && this.config.MemPerTask <= remainingMems && len(ports) > 0 {
			port := this.takePort(&ports)
			taskPort := &mesos.Value_Range{Begin: port, End: port}
			taskId := &mesos.TaskID{
				Value: proto.String(fmt.Sprintf("golang-%s-%d", *offer.Hostname, *port)),
			}

			task := &mesos.TaskInfo{
				Name:     proto.String(taskId.GetValue()),
				TaskId:   taskId,
				SlaveId:  offer.SlaveId,
				Executor: this.createExecutor(this.getRunningInstances(), *port),
				Resources: []*mesos.Resource{
					util.NewScalarResource("cpus", float64(this.config.CpuPerTask)),
					util.NewScalarResource("mem", float64(this.config.MemPerTask)),
					util.NewRangesResource("ports", []*mesos.Value_Range{taskPort}),
				},
			}
			fmt.Printf("Prepared task: %s with offer %s for launch. Ports: %s\n", task.GetName(), offer.Id.GetValue(), taskPort)

			tasks = append(tasks, task)
			remainingCpus -= this.config.CpuPerTask
			remainingMems -= this.config.MemPerTask
			ports = ports[1:]

			this.tasks = append(this.tasks, taskId)
			this.incRunningInstances()
		}
		fmt.Printf("Launching %d tasks for offer %s\n", len(tasks), offer.Id.GetValue())
		offersAndTasks[offer] = tasks
	}

	unlaunchedTasks := this.config.Instances - int(this.getRunningInstances())
	if unlaunchedTasks > 0 {
		fmt.Printf("There are still %d tasks to be launched and no more resources are available.", unlaunchedTasks)
	}

	for _, offer := range offers {
		tasks := offersAndTasks[offer]
		driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, &mesos.Filters{RefuseSeconds: proto.Float64(1)})
	}
}
Example #18
0
// Build method builds the task
func (t *Task) Build(slaveID string, offers map[string]*mesosproto.Offer) {
	t.Command = &mesosproto.CommandInfo{Shell: proto.Bool(false)}

	t.Container = &mesosproto.ContainerInfo{
		Type: mesosproto.ContainerInfo_DOCKER.Enum(),
		Docker: &mesosproto.ContainerInfo_DockerInfo{
			Image: &t.config.Image,
		},
	}

	if t.config.Hostname != "" {
		t.Container.Hostname = proto.String(t.config.Hostname)
		if t.config.Domainname != "" {
			t.Container.Hostname = proto.String(t.config.Hostname + "." + t.config.Domainname)
		}
	}

	switch t.config.HostConfig.NetworkMode {
	case "none":
		t.Container.Docker.Network = mesosproto.ContainerInfo_DockerInfo_NONE.Enum()
	case "host":
		t.Container.Docker.Network = mesosproto.ContainerInfo_DockerInfo_HOST.Enum()
	case "default", "bridge", "":
		var ports []uint64

		for _, offer := range offers {
			ports = append(ports, getPorts(offer)...)
		}

		for containerProtoPort, bindings := range t.config.HostConfig.PortBindings {
			for _, binding := range bindings {
				containerInfo := strings.SplitN(containerProtoPort, "/", 2)
				containerPort, err := strconv.ParseUint(containerInfo[0], 10, 32)
				if err != nil {
					log.Warn(err)
					continue
				}

				var hostPort uint64

				if binding.HostPort != "" {
					hostPort, err = strconv.ParseUint(binding.HostPort, 10, 32)
					if err != nil {
						log.Warn(err)
						continue
					}
				} else if len(ports) > 0 {
					hostPort = ports[0]
					ports = ports[1:]
				}

				if hostPort == 0 {
					log.Warn("cannot find port to bind on the host")
					continue
				}

				protocol := "tcp"
				if len(containerInfo) == 2 {
					protocol = containerInfo[1]
				}
				t.Container.Docker.PortMappings = append(t.Container.Docker.PortMappings, &mesosproto.ContainerInfo_DockerInfo_PortMapping{
					HostPort:      proto.Uint32(uint32(hostPort)),
					ContainerPort: proto.Uint32(uint32(containerPort)),
					Protocol:      proto.String(protocol),
				})
				t.Resources = append(t.Resources, mesosutil.NewRangesResource("ports", []*mesosproto.Value_Range{mesosutil.NewValueRange(hostPort, hostPort)}))
			}
		}
		// TODO handle -P here
		t.Container.Docker.Network = mesosproto.ContainerInfo_DockerInfo_BRIDGE.Enum()
	default:
		log.Errorf("Unsupported network mode %q", t.config.HostConfig.NetworkMode)
		t.Container.Docker.Network = mesosproto.ContainerInfo_DockerInfo_BRIDGE.Enum()
	}

	if cpus := t.config.CpuShares; cpus > 0 {
		t.Resources = append(t.Resources, mesosutil.NewScalarResource("cpus", float64(cpus)))
	}

	if mem := t.config.Memory; mem > 0 {
		t.Resources = append(t.Resources, mesosutil.NewScalarResource("mem", float64(mem/1024/1024)))
	}

	if len(t.config.Cmd) > 0 && t.config.Cmd[0] != "" {
		t.Command.Value = &t.config.Cmd[0]
	}

	if len(t.config.Cmd) > 1 {
		t.Command.Arguments = t.config.Cmd[1:]
	}

	for key, value := range t.config.Labels {
		t.Container.Docker.Parameters = append(t.Container.Docker.Parameters, &mesosproto.Parameter{Key: proto.String("label"), Value: proto.String(fmt.Sprintf("%s=%s", key, value))})
	}

	for _, value := range t.config.Env {
		t.Container.Docker.Parameters = append(t.Container.Docker.Parameters, &mesosproto.Parameter{Key: proto.String("env"), Value: proto.String(value)})
	}

	if !t.config.AttachStdin && !t.config.AttachStdout && !t.config.AttachStderr {
		t.Container.Docker.Parameters = append(t.Container.Docker.Parameters, &mesosproto.Parameter{Key: proto.String("label"), Value: proto.String(fmt.Sprintf("%s=true", cluster.SwarmLabelNamespace+".mesos.detach"))})
	}

	t.SlaveId = &mesosproto.SlaveID{Value: &slaveID}
}
Example #19
0
func TestWildcardHostPortMatching(t *testing.T) {
	pod := &api.Pod{
		ObjectMeta: api.ObjectMeta{
			Name:      "foo",
			Namespace: "default",
		},
	}

	offer := &mesos.Offer{}
	mapping, err := WildcardMapper(pod, []string{"*"}, offer)
	if err != nil {
		t.Fatal(err)
	}
	if len(mapping) > 0 {
		t.Fatalf("Found mappings for an empty offer and a pod without ports: %v", pod)
	}

	//--
	offer = &mesos.Offer{
		Resources: []*mesos.Resource{
			resources.NewPorts("*", 1, 1),
		},
	}
	mapping, err = WildcardMapper(pod, []string{"*"}, offer)
	if err != nil {
		t.Fatal(err)
	}
	if len(mapping) > 0 {
		t.Fatalf("Found mappings for a pod without ports: %v", pod)
	}

	//--
	pod.Spec = api.PodSpec{
		Containers: []api.Container{{
			Ports: []api.ContainerPort{{
				HostPort: 123,
			}},
		}},
	}
	mapping, err = WildcardMapper(pod, []string{"*"}, offer)
	if err == nil {
		t.Fatalf("expected error instead of mappings: %#v", mapping)
	} else if err, _ := err.(*PortAllocationError); err == nil {
		t.Fatal("Expected port allocation error")
	} else if !(len(err.Ports) == 1 && err.Ports[0] == 123) {
		t.Fatal("Expected port allocation error for host port 123")
	}

	//--
	pod.Spec = api.PodSpec{
		Containers: []api.Container{{
			Ports: []api.ContainerPort{{
				HostPort: 0,
			}, {
				HostPort: 123,
			}},
		}},
	}
	mapping, err = WildcardMapper(pod, []string{"*"}, offer)
	if err, _ := err.(*PortAllocationError); err == nil {
		t.Fatal("Expected port allocation error")
	} else if !(len(err.Ports) == 1 && err.Ports[0] == 123) {
		t.Fatal("Expected port allocation error for host port 123")
	}

	//--
	pod.Spec = api.PodSpec{
		Containers: []api.Container{{
			Ports: []api.ContainerPort{{
				HostPort: 0,
			}, {
				HostPort: 1,
			}},
		}},
	}
	mapping, err = WildcardMapper(pod, []string{"*"}, offer)
	if err, _ := err.(*PortAllocationError); err == nil {
		t.Fatal("Expected port allocation error")
	} else if len(err.Ports) != 0 {
		t.Fatal("Expected port allocation error for wildcard port")
	}

	//--
	offer = &mesos.Offer{
		Resources: []*mesos.Resource{
			resources.NewPorts("*", 1, 2),
		},
	}
	mapping, err = WildcardMapper(pod, []string{"*"}, offer)
	if err != nil {
		t.Fatal(err)
	} else if len(mapping) != 2 {
		t.Fatal("Expected both ports allocated")
	}
	valid := 0
	for _, entry := range mapping {
		if entry.ContainerIdx == 0 && entry.PortIdx == 0 && entry.OfferPort == 2 {
			valid++
		}
		if entry.ContainerIdx == 0 && entry.PortIdx == 1 && entry.OfferPort == 1 {
			valid++
		}
	}
	if valid < 2 {
		t.Fatalf("Expected 2 valid port mappings, not %d", valid)
	}

	//-- port mapping in case of multiple discontinuous port ranges in mesos offer
	pod.Spec = api.PodSpec{
		Containers: []api.Container{{
			Ports: []api.ContainerPort{{
				HostPort: 0,
			}, {
				HostPort: 0,
			}},
		}},
	}
	offer = &mesos.Offer{
		Resources: []*mesos.Resource{
			mesosutil.NewRangesResource("ports", []*mesos.Value_Range{mesosutil.NewValueRange(1, 1), mesosutil.NewValueRange(3, 5)}),
		},
	}
	mapping, err = WildcardMapper(pod, []string{"*"}, offer)
	if err != nil {
		t.Fatal(err)
	} else if len(mapping) != 2 {
		t.Fatal("Expected both ports allocated")
	}
	valid = 0
	for _, entry := range mapping {
		if entry.ContainerIdx == 0 && entry.PortIdx == 0 && entry.OfferPort == 1 {
			valid++
		}
		if entry.ContainerIdx == 0 && entry.PortIdx == 1 && entry.OfferPort == 3 {
			valid++
		}
	}
	if valid < 2 {
		t.Fatalf("Expected 2 valid port mappings, not %d", valid)
	}
}
Example #20
0
func TestResources(t *testing.T) {
	resources := Resources([]*mesos.Resource{util.NewScalarResource("cpus", 4), util.NewScalarResource("mem", 512), util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(31000, 32000)})})
	if !strings.Contains(resources, "cpus") {
		t.Errorf(`%s does not contain "cpus"`, resources)
	}
	if !strings.Contains(resources, "mem") {
		t.Errorf(`%s does not contain "mem"`, resources)
	}
	if !strings.Contains(resources, "ports") {
		t.Errorf(`%s does not contain "ports"`, resources)
	}
}
Example #21
0
func TestOffers(t *testing.T) {
	offer1 := util.NewOffer(util.NewOfferID("487c73d8-9951-f23c-34bd-8085bfd30c49"), util.NewFrameworkID("20150903-065451-84125888-5050-10715-0053"),
		util.NewSlaveID("20150903-065451-84125888-5050-10715-S1"), "slave0")
	offer1.Resources = []*mesos.Resource{util.NewScalarResource("cpus", 4), util.NewScalarResource("mem", 512), util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(31000, 32000)})}

	offer2 := util.NewOffer(util.NewOfferID("26d5b34c-ef81-638d-5ad5-32c743c9c033"), util.NewFrameworkID("20150903-065451-84125888-5050-10715-0037"),
		util.NewSlaveID("20150903-065451-84125888-5050-10715-S0"), "master")
	offer2.Resources = []*mesos.Resource{util.NewScalarResource("cpus", 2), util.NewScalarResource("mem", 1024), util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(4000, 7000)})}
	offer2.Attributes = []*mesos.Attribute{&mesos.Attribute{
		Name:   proto.String("rack"),
		Type:   mesos.Value_SCALAR.Enum(),
		Scalar: &mesos.Value_Scalar{Value: proto.Float64(2)},
	}}

	offers := Offers([]*mesos.Offer{offer1, offer2})
	if len(strings.Split(offers, "\n")) != 2 {
		t.Errorf("Offers([]*mesos.Offer{offer1, offer2}) should contain two offers split by new line, actual: %s", offers)
	}
}
Example #22
0
func TestOffer(t *testing.T) {
	offer := util.NewOffer(util.NewOfferID("487c73d8-9951-f23c-34bd-8085bfd30c49"), util.NewFrameworkID("20150903-065451-84125888-5050-10715-0053"),
		util.NewSlaveID("20150903-065451-84125888-5050-10715-S1"), "slave0")

	if Offer(offer) != "slave0#30c49" {
		t.Errorf(`util.NewOffer(util.NewOfferID("487c73d8-9951-f23c-34bd-8085bfd30c49"), util.NewFrameworkID("20150903-065451-84125888-5050-10715-0053"), util.NewSlaveID("20150903-065451-84125888-5050-10715-S1"), "slave0") != "slave0#30c49"; actual %s`, Offer(offer))
	}

	offer.Resources = []*mesos.Resource{util.NewScalarResource("cpus", 4), util.NewScalarResource("mem", 512), util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(31000, 32000)})}
	if Offer(offer) != "slave0#30c49 cpus:4.00 mem:512.00 ports:[31000..32000]" {
		t.Errorf("Expected slave0#30c49 cpus:4.00 mem:512.00 ports:[31000..32000]; actual %s", Offer(offer))
	}

	offer.Attributes = []*mesos.Attribute{&mesos.Attribute{
		Name:   proto.String("rack"),
		Type:   mesos.Value_SCALAR.Enum(),
		Scalar: &mesos.Value_Scalar{Value: proto.Float64(2)},
	}}
	if Offer(offer) != "slave0#30c49 cpus:4.00 mem:512.00 ports:[31000..32000] rack:2.00" {
		t.Errorf("Expected slave0#30c49 cpus:4.00 mem:512.00 ports:[31000..32000] rack:2.00; actual %s", Offer(offer))
	}

	offer.Resources = nil
	if Offer(offer) != "slave0#30c49 rack:2.00" {
		t.Errorf("Expected slave0#30c49 rack:2.00; actual %s", Offer(offer))
	}
}
Example #23
0
func TestGrowToDesiredAfterReconciliation(t *gotesting.T) {
	testScheduler := NewEtcdScheduler(3, 0, 0, true, []*mesos.CommandInfo_URI{}, false, 4096, 1, 256)

	reconciliation := map[string]string{
		"etcd-1": "slave-1",
		"etcd-2": "slave-2",
	}
	testScheduler.reconciliationInfoFunc = func([]string, string, string) (map[string]string, error) {
		return reconciliation, nil
	}
	testScheduler.updateReconciliationInfoFunc = func(info map[string]string, _ []string, _ string, _ string) error {
		reconciliation = info
		return nil
	}

	testScheduler.masterInfo = util.NewMasterInfo("master-1", 0, 0)
	mockdriver := &MockSchedulerDriver{
		runningStatuses: make(chan *mesos.TaskStatus, 10),
		scheduler:       testScheduler,
	}
	testScheduler.state = Mutable
	testScheduler.healthCheck = func(map[string]*config.Node) error {
		return nil
	}

	// Push more than enough offers to shoot self in foot if unchecked.
	for _, offer := range []*mesos.Offer{
		NewOffer("1"),
		NewOffer("2"),
		NewOffer("3"),
	} {
		testScheduler.offerCache.Push(offer)
	}
	memberList := config.ClusterMemberList{
		Members: []httptypes.Member{
			{
				ID:         "1",
				Name:       "etcd-1",
				PeerURLs:   nil,
				ClientURLs: nil,
			},
			{
				ID:         "2",
				Name:       "etcd-2",
				PeerURLs:   nil,
				ClientURLs: nil,
			},
		},
	}

	_, port1, err := emtesting.NewTestEtcdServer(t, memberList)
	if err != nil {
		t.Fatalf("Failed to create test etcd server: %s", err)
	}

	_, port2, err := emtesting.NewTestEtcdServer(t, memberList)
	if err != nil {
		t.Fatalf("Failed to create test etcd server: %s", err)
	}

	// Valid reconciled tasks should be added to the running list.
	mockdriver.On(
		"ReconcileTasks",
		0,
	).Return(mesos.Status_DRIVER_RUNNING, nil).Once()

	for _, taskStatus := range []*mesos.TaskStatus{
		util.NewTaskStatus(
			util.NewTaskID("etcd-1 localhost 0 "+strconv.Itoa(int(port1))+" 0"),
			mesos.TaskState_TASK_RUNNING,
		),
		util.NewTaskStatus(
			util.NewTaskID("etcd-2 localhost 0 "+strconv.Itoa(int(port2))+" 0"),
			mesos.TaskState_TASK_RUNNING,
		),
	} {
		mockdriver.runningStatuses <- taskStatus
	}

	// Scheduler should grow cluster to desired number of nodes.
	offer := NewOffer("1")
	mockdriver.On(
		"LaunchTasks",
		[]*mesos.OfferID{
			offer.Id,
		},
		[]*mesos.TaskInfo{
			{
				Resources: []*mesos.Resource{
					util.NewScalarResource("cpus", 1),
					util.NewScalarResource("mem", 256),
					util.NewScalarResource("disk", 4096),
					util.NewRangesResource("ports", []*mesos.Value_Range{
						util.NewValueRange(uint64(0), uint64(2)),
					}),
				},
			},
		},
		&mesos.Filters{
			RefuseSeconds: proto.Float64(1),
		},
	).Return(mesos.Status_DRIVER_RUNNING, nil).Once()

	// Simulate failover, registration and time passing.
	mockdriver.ReconcileTasks([]*mesos.TaskStatus{})
	testScheduler.launchOne(mockdriver)
	testScheduler.launchOne(mockdriver)
	testScheduler.launchOne(mockdriver)
	testScheduler.launchOne(mockdriver)
	testScheduler.launchOne(mockdriver)

	assert.Equal(t, 3, len(testScheduler.running),
		"Scheduler should reconcile tasks properly.")

	mockdriver.AssertExpectations(t)
}
Example #24
0
func (s *visghsScheduler) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
	if (s.nodesLaunched - s.nodesErrored) >= s.nodeTasks {
		log.Info("decline all of the offers since all of our tasks are already launched")
		ids := make([]*mesos.OfferID, len(offers))
		for i, offer := range offers {
			ids[i] = offer.Id
		}
		driver.LaunchTasks(ids, []*mesos.TaskInfo{}, &mesos.Filters{RefuseSeconds: proto.Float64(120)})
		return
	}
	for _, offer := range offers {
		cpuResources := util.FilterResources(offer.Resources, func(res *mesos.Resource) bool {
			return res.GetName() == "cpus"
		})
		cpus := 0.0
		for _, res := range cpuResources {
			cpus += res.GetScalar().GetValue()
		}

		memResources := util.FilterResources(offer.Resources, func(res *mesos.Resource) bool {
			return res.GetName() == "mem"
		})
		mems := 0.0
		for _, res := range memResources {
			mems += res.GetScalar().GetValue()
		}

		portResources := util.FilterResources(offer.Resources, func(res *mesos.Resource) bool {
			return res.GetName() == "ports"
		})
		ports := []*mesos.Value_Range{}
		portsCount := uint64(0)
		for _, res := range portResources {
			for _, rs := range res.GetRanges().GetRange() {
				ports = append(ports, rs)
				portsCount += 1 + rs.GetEnd() - rs.GetBegin()
			}
		}

		log.Infoln("Received Offer <", offer.Id.GetValue(), "> with cpus=", cpus, " mem=", mems, " ports=", ports)

		remainingCpus := cpus
		remainingMems := mems
		remainingPorts := ports
		remainingPortsCount := portsCount

		// account for executor resources if there's not an executor already running on the slave
		if len(offer.ExecutorIds) == 0 {
			remainingCpus -= CPUS_PER_EXECUTOR
			remainingMems -= MEM_PER_EXECUTOR
		}

		var tasks []*mesos.TaskInfo
		for (s.nodesLaunched-s.nodesErrored) < s.nodeTasks &&
			CPUS_PER_TASK <= remainingCpus &&
			MEM_PER_TASK <= remainingMems &&
			PORTS_PER_TASK <= remainingPortsCount {
			log.Infoln("Ports <", remainingPortsCount, remainingPorts)

			s.nodesLaunched++

			taskId := &mesos.TaskID{
				Value: proto.String(strconv.Itoa(s.nodesLaunched)),
			}

			taskPorts := []*mesos.Value_Range{}
			leftOverPorts := []*mesos.Value_Range{}
			for t := 0; t < PORTS_PER_TASK; t++ {
				if len(remainingPorts) < 1 {
					// failed to allocate port, oh no!
				}
				ps := remainingPorts[0]
				//take the first port from the first
				pb := ps.GetBegin()
				pe := ps.GetEnd()
				//Create one range per port we need, it's easier this way
				tp := mesos.Value_Range{}
				p := pb
				tp.Begin = &p
				tp.End = &p
				taskPorts = append(taskPorts, &tp)

				pb++
				if pb <= pe {
					rpb := pb
					rpe := pe
					rtp := mesos.Value_Range{Begin: &rpb, End: &rpe}
					leftOverPorts = append(leftOverPorts, &rtp)
				}
				for _, ps := range remainingPorts[1:] {
					leftOverPorts = append(leftOverPorts, ps)
				}
			}

			radiaPort := (uint32)(taskPorts[0].GetBegin())
			task := &mesos.TaskInfo{
				Name:     proto.String("visghs-node-" + taskId.GetValue()),
				TaskId:   taskId,
				SlaveId:  offer.SlaveId,
				Executor: s.nexec,
				Discovery: &mesos.DiscoveryInfo{
					Name: proto.String("visghs"),
					//Visibility: mesos.DiscoveryInfo_EXTERNAL.Enum(),
					Visibility: mesos.DiscoveryInfo_FRAMEWORK.Enum(),
					Ports: &mesos.Ports{
						Ports: []*mesos.Port{
							{Protocol: proto.String("UDP"),
								Visibility: mesos.DiscoveryInfo_EXTERNAL.Enum(),
								Name:       proto.String("udpprobe"),
								Number:     proto.Uint32(radiaPort)},
							{Protocol: proto.String("TCP"),
								Visibility: mesos.DiscoveryInfo_EXTERNAL.Enum(),
								Name:       proto.String("radiarpc"),
								Number:     proto.Uint32(radiaPort)},
						},
					},
				},
				Resources: []*mesos.Resource{
					util.NewScalarResource("cpus", CPUS_PER_TASK),
					util.NewScalarResource("mem", MEM_PER_TASK),
					util.NewRangesResource("ports", taskPorts),
				},
			}
			log.Infof("Prepared task: %s with offer %s for launch\n", task.GetName(), offer.Id.GetValue())

			tasks = append(tasks, task)
			remainingCpus -= CPUS_PER_TASK
			remainingMems -= MEM_PER_TASK
			remainingPorts = leftOverPorts
			remainingPortsCount -= PORTS_PER_TASK
		}
		log.Infoln("Launching ", len(tasks), "tasks for offer", offer.Id.GetValue())
		driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, &mesos.Filters{RefuseSeconds: proto.Float64(5)})
	}
}
Example #25
0
// TODO(tyler) split this long function up!
func (s *EtcdScheduler) launchOne(driver scheduler.SchedulerDriver) {
	// Always ensure we've pruned any dead / unmanaged nodes before
	// launching new ones, or we may overconfigure the ensemble such
	// that it can not make progress if the next launch fails.
	err := s.Prune()
	if err != nil {
		log.Errorf("Failed to remove stale cluster members: %s", err)
		return
	}

	if !s.shouldLaunch(driver) {
		log.Infoln("Skipping launch attempt for now.")
		return
	}

	// validOffer filters out offers that are no longer
	// desirable, even though they may have been when
	// they were enqueued.
	validOffer := func(offer *mesos.Offer) bool {
		runningCopy := s.RunningCopy()
		for _, etcdConfig := range runningCopy {
			if etcdConfig.SlaveID == offer.SlaveId.GetValue() {
				if s.singleInstancePerSlave {
					log.Info("Skipping offer: already running on this slave.")
					return false
				}
			}
		}
		return true
	}

	// Issue BlockingPop until we get back an offer we can use.
	var offer *mesos.Offer
	for {
		offer = s.offerCache.BlockingPop()
		if validOffer(offer) {
			break
		} else {
			s.decline(driver, offer)
		}
	}

	// Do this again because BlockingPop may have taken a long time.
	if !s.shouldLaunch(driver) {
		log.Infoln("Skipping launch attempt for now.")
		s.decline(driver, offer)
		return
	}

	// TODO(tyler) this is a broken hack
	resources := parseOffer(offer)
	lowest := *resources.ports[0].Begin
	rpcPort := lowest
	clientPort := lowest + 1
	httpPort := lowest + 2

	s.mut.Lock()
	var clusterType string
	if len(s.running) == 0 {
		clusterType = "new"
	} else {
		clusterType = "existing"
	}

	s.highestInstanceID++
	name := "etcd-" + strconv.FormatInt(s.highestInstanceID, 10)

	node := &config.Node{
		Name:       name,
		Host:       *offer.Hostname,
		RPCPort:    rpcPort,
		ClientPort: clientPort,
		ReseedPort: httpPort,
		Type:       clusterType,
		SlaveID:    offer.GetSlaveId().GetValue(),
	}
	running := []*config.Node{node}
	for _, r := range s.running {
		running = append(running, r)
	}
	serializedNodes, err := json.Marshal(running)
	log.Infof("Serialized running: %+v", string(serializedNodes))
	if err != nil {
		log.Errorf("Could not serialize running list: %v", err)
		// This Unlock is not deferred because the test implementation of LaunchTasks
		// calls this scheduler's StatusUpdate method, causing the test to deadlock.
		s.decline(driver, offer)
		s.mut.Unlock()
		return
	}

	configSummary := node.String()
	taskID := &mesos.TaskID{Value: &configSummary}
	executor := s.newExecutorInfo(node, s.executorUris)
	task := &mesos.TaskInfo{
		Data:     serializedNodes,
		Name:     proto.String("etcd-server"),
		TaskId:   taskID,
		SlaveId:  offer.SlaveId,
		Executor: executor,
		Resources: []*mesos.Resource{
			util.NewScalarResource("cpus", s.cpusPerTask),
			util.NewScalarResource("mem", s.memPerTask),
			util.NewScalarResource("disk", s.diskPerTask),
			util.NewRangesResource("ports", []*mesos.Value_Range{
				util.NewValueRange(uint64(rpcPort), uint64(httpPort)),
			}),
		},
		Discovery: &mesos.DiscoveryInfo{
			Visibility: mesos.DiscoveryInfo_EXTERNAL.Enum(),
			Name:       proto.String("etcd-server"),
			Ports: &mesos.Ports{
				Ports: []*mesos.Port{
					&mesos.Port{
						Number:   proto.Uint32(uint32(rpcPort)),
						Protocol: proto.String("tcp"),
					},
					// HACK: "client" is not a real SRV protocol.  This is so
					// that we can have etcd proxies use srv discovery on the
					// above tcp name.  Mesos-dns does not yet care about
					// names for DiscoveryInfo.  When it does, we should
					// create a name for clients to use.  We want to keep
					// the rpcPort accessible at _etcd-server._tcp.<fwname>.mesos
					&mesos.Port{
						Number:   proto.Uint32(uint32(clientPort)),
						Protocol: proto.String("client"),
					},
				},
			},
		},
	}

	log.Infof(
		"Prepared task: %s with offer %s for launch",
		task.GetName(),
		offer.Id.GetValue(),
	)
	log.Info("Launching etcd node.")

	tasks := []*mesos.TaskInfo{task}

	s.pending[node.Name] = struct{}{}

	// This Unlock is not deferred because the test implementation of LaunchTasks
	// calls this scheduler's StatusUpdate method, causing the test to deadlock.
	s.mut.Unlock()

	atomic.AddUint32(&s.Stats.LaunchedServers, 1)
	driver.LaunchTasks(
		[]*mesos.OfferID{offer.Id},
		tasks,
		&mesos.Filters{
			RefuseSeconds: proto.Float64(1),
		},
	)
}