コード例 #1
0
ファイル: scheduler.go プロジェクト: elodina/syslog-service
func (s *Scheduler) launchTask(driver scheduler.SchedulerDriver, offer *mesos.Offer) {
	taskName := fmt.Sprintf("syslog-%s", offer.GetSlaveId().GetValue())
	taskId := &mesos.TaskID{
		Value: proto.String(fmt.Sprintf("%s-%s", taskName, uuid())),
	}

	data, err := json.Marshal(Config)
	if err != nil {
		panic(err) //shouldn't happen
	}
	Logger.Debugf("Task data: %s", string(data))

	tcpPort := uint64(s.getPort(Config.TcpPort, offer, -1))
	udpPort := uint64(s.getPort(Config.UdpPort, offer, int(tcpPort)))

	task := &mesos.TaskInfo{
		Name:     proto.String(taskName),
		TaskId:   taskId,
		SlaveId:  offer.GetSlaveId(),
		Executor: s.createExecutor(offer, tcpPort, udpPort),
		Resources: []*mesos.Resource{
			util.NewScalarResource("cpus", Config.Cpus),
			util.NewScalarResource("mem", Config.Mem),
			util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(tcpPort, tcpPort)}),
			util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(udpPort, udpPort)}),
		},
		Data:   data,
		Labels: utils.StringToLabels(s.labels),
	}

	s.cluster.Add(offer.GetSlaveId().GetValue(), task)

	driver.LaunchTasks([]*mesos.OfferID{offer.GetId()}, []*mesos.TaskInfo{task}, &mesos.Filters{RefuseSeconds: proto.Float64(1)})
}
コード例 #2
0
ファイル: utils_test.go プロジェクト: sanmiguel/riak-mesos
func TestArrayToRanges(t *testing.T) {
	assert := assert.New(t)
	assert.Equal(ArrayToRanges([]int64{}), []*mesos.Value_Range{})
	assert.Equal(ArrayToRanges([]int64{1, 2, 3, 4}), []*mesos.Value_Range{util.NewValueRange(1, 4)})
	assert.Equal(ArrayToRanges([]int64{1, 2, 3, 4, 6, 7, 8}), []*mesos.Value_Range{util.NewValueRange(1, 4), util.NewValueRange(6, 8)})
	assert.Equal(ArrayToRanges([]int64{2, 3, 4, 6, 7, 8}), []*mesos.Value_Range{util.NewValueRange(2, 4), util.NewValueRange(6, 8)})
	assert.Equal(ArrayToRanges([]int64{1, 3, 5}), []*mesos.Value_Range{util.NewValueRange(1, 1), util.NewValueRange(3, 3), util.NewValueRange(5, 5)})
}
コード例 #3
0
ファイル: utils_test.go プロジェクト: sanmiguel/riak-mesos
func TestGoodPortAsk(t *testing.T) {
	rand.Seed(10)
	assert := assert.New(t)
	offer := generateResourceOffer()
	askFun := AskForPorts(100)
	remaining, resourceAsk, success := askFun(offer)
	assert.Equal(true, success)
	assert.Equal(util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(31105, 31204)}), resourceAsk)
	remainingPorts := util.FilterResources(remaining, func(res *mesos.Resource) bool {
		return res.GetName() == "ports"
	})
	assert.Equal([]*mesos.Resource{util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(31000, 31104), util.NewValueRange(31205, 32000)})}, remainingPorts)

}
コード例 #4
0
ファイル: utils_test.go プロジェクト: sanmiguel/riak-mesos
func TestBadPortAsk(t *testing.T) {
	assert := assert.New(t)
	offer := []*mesos.Resource{util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(31000, 31000)})}
	_, _, success := AskForPorts(100)(offer)

	assert.Equal(false, success)
}
コード例 #5
0
ファイル: pretty_test.go プロジェクト: elodina/stack-deploy
func TestResource(t *testing.T) {
	mem := Resource(util.NewScalarResource("mem", 512))
	if mem != "mem:512.00" {
		t.Errorf(`Resource(util.NewScalarResource("mem", 512)) != "mem:512.00"; actual %s`, mem)
	}

	ports := Resource(util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(31000, 32000)}))
	if ports != "ports:[31000..32000]" {
		t.Errorf(`Resource(util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(31000, 32000)})) != "ports:[31000..32000]"; actual %s`, ports)
	}

	ports = Resource(util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(4000, 7000), util.NewValueRange(31000, 32000)}))
	if ports != "ports:[4000..7000][31000..32000]" {
		t.Errorf(`Resource(util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(4000, 7000), util.NewValueRange(31000, 32000)})) != "ports:[4000..7000][31000..32000]"; actual %s`, ports)
	}
}
コード例 #6
0
ファイル: utils_test.go プロジェクト: sanmiguel/riak-mesos
func TestTotalPortAsk(t *testing.T) {
	assert := assert.New(t)
	askfun := AskForPorts(1)
	offer := []*mesos.Resource{util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(31000, 31000)})}
	newOffer, _, success := askfun(offer)
	newOffer[0].GetRanges().GetRange()
	assert.Equal(0, len(newOffer[0].GetRanges().GetRange()))
	assert.Equal(true, success)
}
コード例 #7
0
ファイル: utils_test.go プロジェクト: sanmiguel/riak-mesos
func generateResourceOffer() []*mesos.Resource {
	val := []*mesos.Resource{
		util.NewScalarResource("cpus", 3),
		util.NewScalarResource("disk", 73590),
		util.NewScalarResource("mem", 1985),
		util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(31000, 32000)}),
	}
	return val
}
コード例 #8
0
func (sched *Scheduler) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
	for _, offer := range offers {
		taskId := &mesos.TaskID{
			Value: proto.String(fmt.Sprintf("basicdocker-task-%d", time.Now().Unix())),
		}

		ports := util.FilterResources(
			offer.Resources,
			func(res *mesos.Resource) bool {
				return res.GetName() == "ports"
			},
		)
		if len(ports) > 0 && len(ports[0].GetRanges().GetRange()) > 0 {

		} else {
			return
		}
		task := &mesos.TaskInfo{
			Name:    proto.String(taskId.GetValue()),
			TaskId:  taskId,
			SlaveId: offer.SlaveId,
			Container: &mesos.ContainerInfo{
				Type:     mesos.ContainerInfo_DOCKER.Enum(),
				Volumes:  nil,
				Hostname: nil,
				Docker: &mesos.ContainerInfo_DockerInfo{
					Image:   &DOCKER_IMAGE_DEFAULT,
					Network: mesos.ContainerInfo_DockerInfo_BRIDGE.Enum(),
				},
			},
			Command: &mesos.CommandInfo{
				Shell: proto.Bool(true),
				Value: proto.String("set -x ; /bin/date ; /bin/hostname ; sleep 200 ; echo done"),
			},
			Executor: nil,
			Resources: []*mesos.Resource{
				util.NewScalarResource("cpus", getOfferCpu(offer)),
				util.NewScalarResource("mem", getOfferMem(offer)),
				util.NewRangesResource("ports", []*mesos.Value_Range{
					util.NewValueRange(
						*ports[0].GetRanges().GetRange()[0].Begin,
						*ports[0].GetRanges().GetRange()[0].Begin+1,
					),
				}),
			},
		}

		log.Infof("Prepared task: %s with offer %s for launch\n", task.GetName(), offer.Id.GetValue())

		var tasks []*mesos.TaskInfo = []*mesos.TaskInfo{task}
		log.Infoln("Launching ", len(tasks), " tasks for offer", offer.Id.GetValue())
		driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, &mesos.Filters{RefuseSeconds: proto.Float64(1)})
		sched.tasksLaunched++
		time.Sleep(time.Second)
	}
}
コード例 #9
0
ファイル: task.go プロジェクト: klarna/eremetic
func buildPorts(task eremetic.Task, offer *mesosproto.Offer) ([]*mesosproto.ContainerInfo_DockerInfo_PortMapping, []*mesosproto.Value_Range) {
	var resources []*mesosproto.Value_Range
	var mappings []*mesosproto.ContainerInfo_DockerInfo_PortMapping

	if len(task.Ports) == 0 {
		return mappings, resources
	}

	leftToAssign := len(task.Ports)

	for _, rsrc := range offer.Resources {
		if *rsrc.Name != "ports" {
			continue
		}

		for _, rng := range rsrc.Ranges.Range {
			if leftToAssign == 0 {
				break
			}

			start, end := *rng.Begin, *rng.Begin

			for hport := int(*rng.Begin); hport <= int(*rng.End); hport++ {
				if leftToAssign == 0 {
					break
				}

				leftToAssign--

				tport := &task.Ports[leftToAssign]
				tport.HostPort = uint32(hport)

				if tport.ContainerPort == 0 {
					tport.ContainerPort = tport.HostPort
				}

				end = uint64(hport + 1)

				mappings = append(mappings, &mesosproto.ContainerInfo_DockerInfo_PortMapping{
					ContainerPort: proto.Uint32(tport.ContainerPort),
					HostPort:      proto.Uint32(tport.HostPort),
					Protocol:      proto.String(tport.Protocol),
				})
			}

			if start != end {
				resources = append(resources, mesosutil.NewValueRange(start, end))
			}
		}
	}

	return mappings, resources
}
コード例 #10
0
func NewOffer(id string) *mesos.Offer {
	return &mesos.Offer{
		Id:          util.NewOfferID(id),
		FrameworkId: util.NewFrameworkID("test-etcd-framework"),
		SlaveId:     util.NewSlaveID("slave-" + id),
		Hostname:    proto.String("localhost"),
		Resources: []*mesos.Resource{
			util.NewScalarResource("cpus", 1),
			util.NewScalarResource("mem", 256),
			util.NewScalarResource("disk", 4096),
			util.NewRangesResource("ports", []*mesos.Value_Range{
				util.NewValueRange(uint64(0), uint64(65535)),
			}),
		},
	}
}
コード例 #11
0
ファイル: utils.go プロジェクト: sanmiguel/riak-mesos
// We assume the input is sorted
func ArrayToRanges(ports []int64) []*mesos.Value_Range {
	sort.Sort(intarray(ports))
	if len(ports) == 0 {
		return []*mesos.Value_Range{}
	}
	fakeret := [][]int64{[]int64{ports[0], ports[0]}}
	for _, val := range ports {
		if val > fakeret[len(fakeret)-1][1]+1 {
			fakeret = append(fakeret, []int64{val, val})
		} else {
			fakeret[len(fakeret)-1][1] = val
		}
	}
	ret := make([]*mesos.Value_Range, len(fakeret))
	for idx := range fakeret {
		ret[idx] = util.NewValueRange(uint64(fakeret[idx][0]), uint64(fakeret[idx][1]))
	}
	return ret
}
コード例 #12
0
func TestWildcardHostPortMatching(t *testing.T) {
	t.Parallel()
	task := fakePodTask("foo", nil, nil)
	pod := &task.Pod

	offer := &mesos.Offer{}
	mapping, err := WildcardMapper(task, offer)
	if err != nil {
		t.Fatal(err)
	}
	if len(mapping) > 0 {
		t.Fatalf("Found mappings for an empty offer and a pod without ports: %v", pod)
	}

	//--
	offer = &mesos.Offer{
		Resources: []*mesos.Resource{
			newPortsResource("*", 1, 1),
		},
	}
	mapping, err = WildcardMapper(task, offer)
	if err != nil {
		t.Fatal(err)
	}
	if len(mapping) > 0 {
		t.Fatalf("Found mappings for a pod without ports: %v", pod)
	}

	//--
	pod.Spec = api.PodSpec{
		Containers: []api.Container{{
			Ports: []api.ContainerPort{{
				HostPort: 123,
			}},
		}},
	}
	task, err = New(api.NewDefaultContext(), "", pod, &mesos.ExecutorInfo{}, nil, nil)
	if err != nil {
		t.Fatal(err)
	}
	mapping, err = WildcardMapper(task, offer)
	if err == nil {
		t.Fatalf("expected error instead of mappings: %#v", mapping)
	} else if err, _ := err.(*PortAllocationError); err == nil {
		t.Fatal("Expected port allocation error")
	} else if !(len(err.Ports) == 1 && err.Ports[0] == 123) {
		t.Fatal("Expected port allocation error for host port 123")
	}

	//--
	pod.Spec = api.PodSpec{
		Containers: []api.Container{{
			Ports: []api.ContainerPort{{
				HostPort: 0,
			}, {
				HostPort: 123,
			}},
		}},
	}
	task, err = New(api.NewDefaultContext(), "", pod, &mesos.ExecutorInfo{}, nil, nil)
	if err != nil {
		t.Fatal(err)
	}
	mapping, err = WildcardMapper(task, offer)
	if err, _ := err.(*PortAllocationError); err == nil {
		t.Fatal("Expected port allocation error")
	} else if !(len(err.Ports) == 1 && err.Ports[0] == 123) {
		t.Fatal("Expected port allocation error for host port 123")
	}

	//--
	pod.Spec = api.PodSpec{
		Containers: []api.Container{{
			Ports: []api.ContainerPort{{
				HostPort: 0,
			}, {
				HostPort: 1,
			}},
		}},
	}
	task, err = New(api.NewDefaultContext(), "", pod, &mesos.ExecutorInfo{}, nil, nil)
	if err != nil {
		t.Fatal(err)
	}
	mapping, err = WildcardMapper(task, offer)
	if err, _ := err.(*PortAllocationError); err == nil {
		t.Fatal("Expected port allocation error")
	} else if len(err.Ports) != 0 {
		t.Fatal("Expected port allocation error for wildcard port")
	}

	//--
	offer = &mesos.Offer{
		Resources: []*mesos.Resource{
			newPortsResource("*", 1, 2),
		},
	}
	mapping, err = WildcardMapper(task, offer)
	if err != nil {
		t.Fatal(err)
	} else if len(mapping) != 2 {
		t.Fatal("Expected both ports allocated")
	}
	valid := 0
	for _, entry := range mapping {
		if entry.ContainerIdx == 0 && entry.PortIdx == 0 && entry.OfferPort == 2 {
			valid++
		}
		if entry.ContainerIdx == 0 && entry.PortIdx == 1 && entry.OfferPort == 1 {
			valid++
		}
	}
	if valid < 2 {
		t.Fatalf("Expected 2 valid port mappings, not %d", valid)
	}

	//-- port mapping in case of multiple discontinuous port ranges in mesos offer
	pod.Spec = api.PodSpec{
		Containers: []api.Container{{
			Ports: []api.ContainerPort{{
				HostPort: 0,
			}, {
				HostPort: 0,
			}},
		}},
	}
	task, err = New(api.NewDefaultContext(), "", pod, &mesos.ExecutorInfo{}, nil, nil)
	if err != nil {
		t.Fatal(err)
	}
	offer = &mesos.Offer{
		Resources: []*mesos.Resource{
			mesosutil.NewRangesResource("ports", []*mesos.Value_Range{mesosutil.NewValueRange(1, 1), mesosutil.NewValueRange(3, 5)}),
		},
	}
	mapping, err = WildcardMapper(task, offer)
	if err != nil {
		t.Fatal(err)
	} else if len(mapping) != 2 {
		t.Fatal("Expected both ports allocated")
	}
	valid = 0
	for _, entry := range mapping {
		if entry.ContainerIdx == 0 && entry.PortIdx == 0 && entry.OfferPort == 1 {
			valid++
		}
		if entry.ContainerIdx == 0 && entry.PortIdx == 1 && entry.OfferPort == 3 {
			valid++
		}
	}
	if valid < 2 {
		t.Fatalf("Expected 2 valid port mappings, not %d", valid)
	}
}
コード例 #13
0
ファイル: pretty_test.go プロジェクト: elodina/stack-deploy
func TestResources(t *testing.T) {
	resources := Resources([]*mesos.Resource{util.NewScalarResource("cpus", 4), util.NewScalarResource("mem", 512), util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(31000, 32000)})})
	if !strings.Contains(resources, "cpus") {
		t.Errorf(`%s does not contain "cpus"`, resources)
	}
	if !strings.Contains(resources, "mem") {
		t.Errorf(`%s does not contain "mem"`, resources)
	}
	if !strings.Contains(resources, "ports") {
		t.Errorf(`%s does not contain "ports"`, resources)
	}
}
コード例 #14
0
ファイル: task.go プロジェクト: prachidamle/swarm
// Build method builds the task
func (t *Task) Build(slaveID string, offers map[string]*mesosproto.Offer) {
	t.Command = &mesosproto.CommandInfo{Shell: proto.Bool(false)}

	t.Container = &mesosproto.ContainerInfo{
		Type: mesosproto.ContainerInfo_DOCKER.Enum(),
		Docker: &mesosproto.ContainerInfo_DockerInfo{
			Image: &t.config.Image,
		},
	}

	if t.config.Hostname != "" {
		t.Container.Hostname = proto.String(t.config.Hostname)
		if t.config.Domainname != "" {
			t.Container.Hostname = proto.String(t.config.Hostname + "." + t.config.Domainname)
		}
	}

	switch t.config.HostConfig.NetworkMode {
	case "none":
		t.Container.Docker.Network = mesosproto.ContainerInfo_DockerInfo_NONE.Enum()
	case "host":
		t.Container.Docker.Network = mesosproto.ContainerInfo_DockerInfo_HOST.Enum()
	case "default", "bridge", "":
		var ports []uint64

		for _, offer := range offers {
			ports = append(ports, getPorts(offer)...)
		}

		for containerProtoPort, bindings := range t.config.HostConfig.PortBindings {
			for _, binding := range bindings {
				containerInfo := strings.SplitN(containerProtoPort, "/", 2)
				containerPort, err := strconv.ParseUint(containerInfo[0], 10, 32)
				if err != nil {
					log.Warn(err)
					continue
				}

				var hostPort uint64

				if binding.HostPort != "" {
					hostPort, err = strconv.ParseUint(binding.HostPort, 10, 32)
					if err != nil {
						log.Warn(err)
						continue
					}
				} else if len(ports) > 0 {
					hostPort = ports[0]
					ports = ports[1:]
				}

				if hostPort == 0 {
					log.Warn("cannot find port to bind on the host")
					continue
				}

				protocol := "tcp"
				if len(containerInfo) == 2 {
					protocol = containerInfo[1]
				}
				t.Container.Docker.PortMappings = append(t.Container.Docker.PortMappings, &mesosproto.ContainerInfo_DockerInfo_PortMapping{
					HostPort:      proto.Uint32(uint32(hostPort)),
					ContainerPort: proto.Uint32(uint32(containerPort)),
					Protocol:      proto.String(protocol),
				})
				t.Resources = append(t.Resources, mesosutil.NewRangesResource("ports", []*mesosproto.Value_Range{mesosutil.NewValueRange(hostPort, hostPort)}))
			}
		}
		// TODO handle -P here
		t.Container.Docker.Network = mesosproto.ContainerInfo_DockerInfo_BRIDGE.Enum()
	default:
		log.Errorf("Unsupported network mode %q", t.config.HostConfig.NetworkMode)
		t.Container.Docker.Network = mesosproto.ContainerInfo_DockerInfo_BRIDGE.Enum()
	}

	if cpus := t.config.CpuShares; cpus > 0 {
		t.Resources = append(t.Resources, mesosutil.NewScalarResource("cpus", float64(cpus)))
	}

	if mem := t.config.Memory; mem > 0 {
		t.Resources = append(t.Resources, mesosutil.NewScalarResource("mem", float64(mem/1024/1024)))
	}

	if len(t.config.Cmd) > 0 && t.config.Cmd[0] != "" {
		t.Command.Value = &t.config.Cmd[0]
	}

	if len(t.config.Cmd) > 1 {
		t.Command.Arguments = t.config.Cmd[1:]
	}

	for key, value := range t.config.Labels {
		t.Container.Docker.Parameters = append(t.Container.Docker.Parameters, &mesosproto.Parameter{Key: proto.String("label"), Value: proto.String(fmt.Sprintf("%s=%s", key, value))})
	}

	for _, value := range t.config.Env {
		t.Container.Docker.Parameters = append(t.Container.Docker.Parameters, &mesosproto.Parameter{Key: proto.String("env"), Value: proto.String(value)})
	}

	if !t.config.AttachStdin && !t.config.AttachStdout && !t.config.AttachStderr {
		t.Container.Docker.Parameters = append(t.Container.Docker.Parameters, &mesosproto.Parameter{Key: proto.String("label"), Value: proto.String(fmt.Sprintf("%s=true", cluster.SwarmLabelNamespace+".mesos.detach"))})
	}

	t.SlaveId = &mesosproto.SlaveID{Value: &slaveID}
}
コード例 #15
0
ファイル: pretty_test.go プロジェクト: elodina/stack-deploy
func TestOffers(t *testing.T) {
	offer1 := util.NewOffer(util.NewOfferID("487c73d8-9951-f23c-34bd-8085bfd30c49"), util.NewFrameworkID("20150903-065451-84125888-5050-10715-0053"),
		util.NewSlaveID("20150903-065451-84125888-5050-10715-S1"), "slave0")
	offer1.Resources = []*mesos.Resource{util.NewScalarResource("cpus", 4), util.NewScalarResource("mem", 512), util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(31000, 32000)})}

	offer2 := util.NewOffer(util.NewOfferID("26d5b34c-ef81-638d-5ad5-32c743c9c033"), util.NewFrameworkID("20150903-065451-84125888-5050-10715-0037"),
		util.NewSlaveID("20150903-065451-84125888-5050-10715-S0"), "master")
	offer2.Resources = []*mesos.Resource{util.NewScalarResource("cpus", 2), util.NewScalarResource("mem", 1024), util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(4000, 7000)})}
	offer2.Attributes = []*mesos.Attribute{&mesos.Attribute{
		Name:   proto.String("rack"),
		Type:   mesos.Value_SCALAR.Enum(),
		Scalar: &mesos.Value_Scalar{Value: proto.Float64(2)},
	}}

	offers := Offers([]*mesos.Offer{offer1, offer2})
	if len(strings.Split(offers, "\n")) != 2 {
		t.Errorf("Offers([]*mesos.Offer{offer1, offer2}) should contain two offers split by new line, actual: %s", offers)
	}
}
コード例 #16
0
ファイル: task_test.go プロジェクト: klarna/eremetic
func TestTask(t *testing.T) {

	status := []eremetic.Status{
		eremetic.Status{
			Status: eremetic.TaskRunning,
			Time:   time.Now().Unix(),
		},
	}

	Convey("createTaskInfo", t, func() {
		eremeticTask := eremetic.Task{
			TaskCPUs: 0.2,
			TaskMem:  0.5,
			Command:  "echo hello",
			Image:    "busybox",
			Status:   status,
			ID:       "eremetic-task.1234",
			Name:     "Eremetic task 17",
		}

		portres := "ports"
		offer := mesosproto.Offer{
			FrameworkId: &mesosproto.FrameworkID{
				Value: proto.String("framework-id"),
			},
			SlaveId: &mesosproto.SlaveID{
				Value: proto.String("slave-id"),
			},
			Hostname: proto.String("hostname"),
			Resources: []*mesosproto.Resource{&mesosproto.Resource{
				Name: &portres,
				Type: mesosproto.Value_RANGES.Enum(),
				Ranges: &mesosproto.Value_Ranges{
					Range: []*mesosproto.Value_Range{
						mesosutil.NewValueRange(31000, 31010),
					},
				},
			}},
		}

		Convey("No volume or environment specified", func() {
			net, taskInfo := createTaskInfo(eremeticTask, &offer)

			So(taskInfo.TaskId.GetValue(), ShouldEqual, eremeticTask.ID)
			So(taskInfo.GetName(), ShouldEqual, eremeticTask.Name)
			So(taskInfo.GetResources()[0].GetScalar().GetValue(), ShouldEqual, eremeticTask.TaskCPUs)
			So(taskInfo.GetResources()[1].GetScalar().GetValue(), ShouldEqual, eremeticTask.TaskMem)
			So(taskInfo.Container.GetType().String(), ShouldEqual, "DOCKER")
			So(taskInfo.Container.Docker.GetImage(), ShouldEqual, "busybox")
			So(net.SlaveID, ShouldEqual, "slave-id")
			So(taskInfo.Container.Docker.GetForcePullImage(), ShouldBeFalse)
		})

		Convey("Given no Command", func() {
			eremeticTask.Command = ""

			_, taskInfo := createTaskInfo(eremeticTask, &offer)

			So(taskInfo.Command.GetValue(), ShouldBeEmpty)
			So(taskInfo.Command.GetShell(), ShouldBeFalse)
		})

		Convey("Given a volume and environment", func() {
			volumes := []eremetic.Volume{{
				ContainerPath: "/var/www",
				HostPath:      "/var/www",
			}}

			environment := make(map[string]string)
			environment["foo"] = "bar"

			eremeticTask.Environment = environment
			eremeticTask.Volumes = volumes

			_, taskInfo := createTaskInfo(eremeticTask, &offer)

			So(taskInfo.TaskId.GetValue(), ShouldEqual, eremeticTask.ID)
			So(taskInfo.Container.Volumes[0].GetContainerPath(), ShouldEqual, volumes[0].ContainerPath)
			So(taskInfo.Container.Volumes[0].GetHostPath(), ShouldEqual, volumes[0].HostPath)
			So(taskInfo.Command.Environment.Variables[0].GetName(), ShouldEqual, "foo")
			So(taskInfo.Command.Environment.Variables[0].GetValue(), ShouldEqual, "bar")
			So(taskInfo.Command.Environment.Variables[1].GetName(), ShouldEqual, "MESOS_TASK_ID")
			So(taskInfo.Command.Environment.Variables[1].GetValue(), ShouldEqual, eremeticTask.ID)
		})

		Convey("Given a port", func() {
			var ports []eremetic.Port

			ports = append(ports,
				eremetic.Port{
					ContainerPort: 80,
					Protocol:      "tcp",
				},
			)

			eremeticTask.Ports = ports

			_, taskInfo := createTaskInfo(eremeticTask, &offer)

			So(len(taskInfo.Container.Docker.PortMappings), ShouldEqual, 1)
			So(taskInfo.Container.Docker.GetPortMappings()[0].GetContainerPort(), ShouldEqual, ports[0].ContainerPort)
			So(taskInfo.GetResources()[2].GetName(), ShouldEqual, "ports")

			expectedRange := mesosutil.NewValueRange(31000, 31001)
			So(taskInfo.GetResources()[2].GetRanges().GetRange()[0].GetBegin(), ShouldEqual, expectedRange.GetBegin())
			So(taskInfo.GetResources()[2].GetRanges().GetRange()[0].GetEnd(), ShouldEqual, expectedRange.GetEnd())

			vars := taskInfo.GetCommand().GetEnvironment().GetVariables()

			var foundPortVar, foundPort0Var bool
			for _, v := range vars {
				switch v.GetName() {
				case "PORT":
					So(v.GetValue(), ShouldEqual, "31000")
					foundPortVar = true
				case "PORT0":
					So(v.GetValue(), ShouldEqual, "31000")
					foundPort0Var = true
				}
			}
			So(foundPortVar, ShouldBeTrue)
			So(foundPort0Var, ShouldBeTrue)
		})

		Convey("Given unspecified port", func() {
			var ports []eremetic.Port

			ports = append(ports,
				eremetic.Port{
					ContainerPort: 0,
					Protocol:      "tcp",
				},
			)

			eremeticTask.Ports = ports

			_, taskInfo := createTaskInfo(eremeticTask, &offer)

			So(len(taskInfo.Container.Docker.PortMappings), ShouldEqual, 1)
			So(taskInfo.Container.Docker.GetPortMappings()[0].GetContainerPort(), ShouldEqual, 31000)
			So(taskInfo.GetResources()[2].GetName(), ShouldEqual, "ports")

			expected_range := mesosutil.NewValueRange(31000, 31001)
			So(taskInfo.GetResources()[2].GetRanges().GetRange()[0].GetBegin(), ShouldEqual, expected_range.GetBegin())
			So(taskInfo.GetResources()[2].GetRanges().GetRange()[0].GetEnd(), ShouldEqual, expected_range.GetEnd())

			vars := taskInfo.GetCommand().GetEnvironment().GetVariables()

			var foundPortVar, foundPort0Var bool
			for _, v := range vars {
				switch v.GetName() {
				case "PORT":
					So(v.GetValue(), ShouldEqual, "31000")
					foundPortVar = true
				case "PORT0":
					So(v.GetValue(), ShouldEqual, "31000")
					foundPort0Var = true
				}
			}
			So(foundPortVar, ShouldBeTrue)
			So(foundPort0Var, ShouldBeTrue)
		})

		Convey("Given archive to fetch", func() {
			URI := []eremetic.URI{{
				URI:     "http://foobar.local/cats.zip",
				Extract: true,
			}}
			eremeticTask.FetchURIs = URI
			_, taskInfo := createTaskInfo(eremeticTask, &offer)

			So(taskInfo.TaskId.GetValue(), ShouldEqual, eremeticTask.ID)
			So(taskInfo.Command.Uris, ShouldHaveLength, 1)
			So(taskInfo.Command.Uris[0].GetValue(), ShouldEqual, eremeticTask.FetchURIs[0].URI)
			So(taskInfo.Command.Uris[0].GetExecutable(), ShouldBeFalse)
			So(taskInfo.Command.Uris[0].GetExtract(), ShouldBeTrue)
			So(taskInfo.Command.Uris[0].GetCache(), ShouldBeFalse)
		})

		Convey("Given archive to fetch and cache", func() {
			URI := []eremetic.URI{{
				URI:     "http://foobar.local/cats.zip",
				Extract: true,
				Cache:   true,
			}}
			eremeticTask.FetchURIs = URI
			_, taskInfo := createTaskInfo(eremeticTask, &offer)

			So(taskInfo.TaskId.GetValue(), ShouldEqual, eremeticTask.ID)
			So(taskInfo.Command.Uris, ShouldHaveLength, 1)
			So(taskInfo.Command.Uris[0].GetValue(), ShouldEqual, eremeticTask.FetchURIs[0].URI)
			So(taskInfo.Command.Uris[0].GetExecutable(), ShouldBeFalse)
			So(taskInfo.Command.Uris[0].GetExtract(), ShouldBeTrue)
			So(taskInfo.Command.Uris[0].GetCache(), ShouldBeTrue)
		})

		Convey("Given image to fetch", func() {
			URI := []eremetic.URI{{
				URI: "http://foobar.local/cats.jpeg",
			}}
			eremeticTask.FetchURIs = URI
			_, taskInfo := createTaskInfo(eremeticTask, &offer)

			So(taskInfo.TaskId.GetValue(), ShouldEqual, eremeticTask.ID)
			So(taskInfo.Command.Uris, ShouldHaveLength, 1)
			So(taskInfo.Command.Uris[0].GetValue(), ShouldEqual, eremeticTask.FetchURIs[0].URI)
			So(taskInfo.Command.Uris[0].GetExecutable(), ShouldBeFalse)
			So(taskInfo.Command.Uris[0].GetExtract(), ShouldBeFalse)
			So(taskInfo.Command.Uris[0].GetCache(), ShouldBeFalse)
		})

		Convey("Given script to fetch", func() {
			URI := []eremetic.URI{{
				URI:        "http://foobar.local/cats.sh",
				Executable: true,
			}}
			eremeticTask.FetchURIs = URI
			_, taskInfo := createTaskInfo(eremeticTask, &offer)

			So(taskInfo.TaskId.GetValue(), ShouldEqual, eremeticTask.ID)
			So(taskInfo.Command.Uris, ShouldHaveLength, 1)
			So(taskInfo.Command.Uris[0].GetValue(), ShouldEqual, eremeticTask.FetchURIs[0].URI)
			So(taskInfo.Command.Uris[0].GetExecutable(), ShouldBeTrue)
			So(taskInfo.Command.Uris[0].GetExtract(), ShouldBeFalse)
			So(taskInfo.Command.Uris[0].GetCache(), ShouldBeFalse)
		})

		Convey("Force pull of docker image", func() {
			eremeticTask.ForcePullImage = true
			_, taskInfo := createTaskInfo(eremeticTask, &offer)

			So(taskInfo.TaskId.GetValue(), ShouldEqual, eremeticTask.ID)
			So(taskInfo.Container.Docker.GetForcePullImage(), ShouldBeTrue)
		})
	})
}
コード例 #17
0
ファイル: pretty_test.go プロジェクト: elodina/stack-deploy
func TestOffer(t *testing.T) {
	offer := util.NewOffer(util.NewOfferID("487c73d8-9951-f23c-34bd-8085bfd30c49"), util.NewFrameworkID("20150903-065451-84125888-5050-10715-0053"),
		util.NewSlaveID("20150903-065451-84125888-5050-10715-S1"), "slave0")

	if Offer(offer) != "slave0#30c49" {
		t.Errorf(`util.NewOffer(util.NewOfferID("487c73d8-9951-f23c-34bd-8085bfd30c49"), util.NewFrameworkID("20150903-065451-84125888-5050-10715-0053"), util.NewSlaveID("20150903-065451-84125888-5050-10715-S1"), "slave0") != "slave0#30c49"; actual %s`, Offer(offer))
	}

	offer.Resources = []*mesos.Resource{util.NewScalarResource("cpus", 4), util.NewScalarResource("mem", 512), util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(31000, 32000)})}
	if Offer(offer) != "slave0#30c49 cpus:4.00 mem:512.00 ports:[31000..32000]" {
		t.Errorf("Expected slave0#30c49 cpus:4.00 mem:512.00 ports:[31000..32000]; actual %s", Offer(offer))
	}

	offer.Attributes = []*mesos.Attribute{&mesos.Attribute{
		Name:   proto.String("rack"),
		Type:   mesos.Value_SCALAR.Enum(),
		Scalar: &mesos.Value_Scalar{Value: proto.Float64(2)},
	}}
	if Offer(offer) != "slave0#30c49 cpus:4.00 mem:512.00 ports:[31000..32000] rack:2.00" {
		t.Errorf("Expected slave0#30c49 cpus:4.00 mem:512.00 ports:[31000..32000] rack:2.00; actual %s", Offer(offer))
	}

	offer.Resources = nil
	if Offer(offer) != "slave0#30c49 rack:2.00" {
		t.Errorf("Expected slave0#30c49 rack:2.00; actual %s", Offer(offer))
	}
}
コード例 #18
0
ファイル: task.go プロジェクト: nagyistoce/ms-docker-swarm
func (t *task) build(slaveID string) {
	t.Command = &mesosproto.CommandInfo{Shell: proto.Bool(false)}

	t.Container = &mesosproto.ContainerInfo{
		Type: mesosproto.ContainerInfo_DOCKER.Enum(),
		Docker: &mesosproto.ContainerInfo_DockerInfo{
			Image: &t.config.Image,
		},
	}

	switch t.config.HostConfig.NetworkMode {
	case "none":
		t.Container.Docker.Network = mesosproto.ContainerInfo_DockerInfo_NONE.Enum()
	case "host":
		t.Container.Docker.Network = mesosproto.ContainerInfo_DockerInfo_HOST.Enum()
	case "bridge", "":
		for containerPort, bindings := range t.config.HostConfig.PortBindings {
			for _, binding := range bindings {
				fmt.Println(containerPort)
				containerInfo := strings.SplitN(containerPort, "/", 2)
				fmt.Println(containerInfo[0], containerInfo[1])
				containerPort, err := strconv.ParseUint(containerInfo[0], 10, 32)
				if err != nil {
					log.Warn(err)
					continue
				}
				hostPort, err := strconv.ParseUint(binding.HostPort, 10, 32)
				if err != nil {
					log.Warn(err)
					continue
				}
				protocol := "tcp"
				if len(containerInfo) == 2 {
					protocol = containerInfo[1]
				}
				t.Container.Docker.PortMappings = append(t.Container.Docker.PortMappings, &mesosproto.ContainerInfo_DockerInfo_PortMapping{
					HostPort:      proto.Uint32(uint32(hostPort)),
					ContainerPort: proto.Uint32(uint32(containerPort)),
					Protocol:      proto.String(protocol),
				})
				t.Resources = append(t.Resources, mesosutil.NewRangesResource("ports", []*mesosproto.Value_Range{mesosutil.NewValueRange(hostPort, hostPort)}))
			}
		}
		// TODO handle -P here
		t.Container.Docker.Network = mesosproto.ContainerInfo_DockerInfo_BRIDGE.Enum()
	default:
		log.Errorf("Unsupported network mode %q", t.config.HostConfig.NetworkMode)
		t.Container.Docker.Network = mesosproto.ContainerInfo_DockerInfo_BRIDGE.Enum()
	}

	if cpus := t.config.CpuShares; cpus > 0 {
		t.Resources = append(t.Resources, mesosutil.NewScalarResource("cpus", float64(cpus)))
	}

	if mem := t.config.Memory; mem > 0 {
		t.Resources = append(t.Resources, mesosutil.NewScalarResource("mem", float64(mem/1024/1024)))
	}

	if len(t.config.Cmd) > 0 && t.config.Cmd[0] != "" {
		t.Command.Value = &t.config.Cmd[0]
	}

	if len(t.config.Cmd) > 1 {
		t.Command.Arguments = t.config.Cmd[1:]
	}

	for key, value := range t.config.Labels {
		t.Container.Docker.Parameters = append(t.Container.Docker.Parameters, &mesosproto.Parameter{Key: proto.String("label"), Value: proto.String(fmt.Sprintf("%s=%s", key, value))})
	}

	t.SlaveId = &mesosproto.SlaveID{Value: &slaveID}
}
コード例 #19
0
ファイル: scheduler.go プロジェクト: puppetizeme/etcd-mesos
// TODO(tyler) split this long function up!
func (s *EtcdScheduler) launchOne(driver scheduler.SchedulerDriver) {
	// Always ensure we've pruned any dead / unmanaged nodes before
	// launching new ones, or we may overconfigure the ensemble such
	// that it can not make progress if the next launch fails.
	err := s.Prune()
	if err != nil {
		log.Errorf("Failed to remove stale cluster members: %s", err)
		return
	}

	if !s.shouldLaunch(driver) {
		log.Infoln("Skipping launch attempt for now.")
		return
	}

	// validOffer filters out offers that are no longer
	// desirable, even though they may have been when
	// they were enqueued.
	validOffer := func(offer *mesos.Offer) bool {
		runningCopy := s.RunningCopy()
		for _, etcdConfig := range runningCopy {
			if etcdConfig.SlaveID == offer.SlaveId.GetValue() {
				if s.singleInstancePerSlave {
					log.Info("Skipping offer: already running on this slave.")
					return false
				}
			}
		}
		return true
	}

	// Issue BlockingPop until we get back an offer we can use.
	var offer *mesos.Offer
	for {
		offer = s.offerCache.BlockingPop()
		if validOffer(offer) {
			break
		} else {
			s.decline(driver, offer)
		}
	}

	// Do this again because BlockingPop may have taken a long time.
	if !s.shouldLaunch(driver) {
		log.Infoln("Skipping launch attempt for now.")
		s.decline(driver, offer)
		return
	}

	// TODO(tyler) this is a broken hack
	resources := parseOffer(offer)
	lowest := *resources.ports[0].Begin
	rpcPort := lowest
	clientPort := lowest + 1
	httpPort := lowest + 2

	s.mut.Lock()
	var clusterType string
	if len(s.running) == 0 {
		clusterType = "new"
	} else {
		clusterType = "existing"
	}

	s.highestInstanceID++
	name := "etcd-" + strconv.FormatInt(s.highestInstanceID, 10)

	node := &config.Node{
		Name:       name,
		Host:       *offer.Hostname,
		RPCPort:    rpcPort,
		ClientPort: clientPort,
		ReseedPort: httpPort,
		Type:       clusterType,
		SlaveID:    offer.GetSlaveId().GetValue(),
	}
	running := []*config.Node{node}
	for _, r := range s.running {
		running = append(running, r)
	}
	serializedNodes, err := json.Marshal(running)
	log.Infof("Serialized running: %+v", string(serializedNodes))
	if err != nil {
		log.Errorf("Could not serialize running list: %v", err)
		// This Unlock is not deferred because the test implementation of LaunchTasks
		// calls this scheduler's StatusUpdate method, causing the test to deadlock.
		s.decline(driver, offer)
		s.mut.Unlock()
		return
	}

	configSummary := node.String()
	taskID := &mesos.TaskID{Value: &configSummary}
	executor := s.newExecutorInfo(node, s.executorUris)
	task := &mesos.TaskInfo{
		Data:     serializedNodes,
		Name:     proto.String("etcd-server"),
		TaskId:   taskID,
		SlaveId:  offer.SlaveId,
		Executor: executor,
		Resources: []*mesos.Resource{
			util.NewScalarResource("cpus", s.cpusPerTask),
			util.NewScalarResource("mem", s.memPerTask),
			util.NewScalarResource("disk", s.diskPerTask),
			util.NewRangesResource("ports", []*mesos.Value_Range{
				util.NewValueRange(uint64(rpcPort), uint64(httpPort)),
			}),
		},
		Discovery: &mesos.DiscoveryInfo{
			Visibility: mesos.DiscoveryInfo_EXTERNAL.Enum(),
			Name:       proto.String("etcd-server"),
			Ports: &mesos.Ports{
				Ports: []*mesos.Port{
					&mesos.Port{
						Number:   proto.Uint32(uint32(rpcPort)),
						Protocol: proto.String("tcp"),
					},
					// HACK: "client" is not a real SRV protocol.  This is so
					// that we can have etcd proxies use srv discovery on the
					// above tcp name.  Mesos-dns does not yet care about
					// names for DiscoveryInfo.  When it does, we should
					// create a name for clients to use.  We want to keep
					// the rpcPort accessible at _etcd-server._tcp.<fwname>.mesos
					&mesos.Port{
						Number:   proto.Uint32(uint32(clientPort)),
						Protocol: proto.String("client"),
					},
				},
			},
		},
	}

	log.Infof(
		"Prepared task: %s with offer %s for launch",
		task.GetName(),
		offer.Id.GetValue(),
	)
	log.Info("Launching etcd node.")

	tasks := []*mesos.TaskInfo{task}

	s.pending[node.Name] = struct{}{}

	// This Unlock is not deferred because the test implementation of LaunchTasks
	// calls this scheduler's StatusUpdate method, causing the test to deadlock.
	s.mut.Unlock()

	atomic.AddUint32(&s.Stats.LaunchedServers, 1)
	driver.LaunchTasks(
		[]*mesos.OfferID{offer.Id},
		tasks,
		&mesos.Filters{
			RefuseSeconds: proto.Float64(1),
		},
	)
}
コード例 #20
0
ファイル: mapper_test.go プロジェクト: Clarifai/kubernetes
func TestWildcardHostPortMatching(t *testing.T) {
	pod := &api.Pod{
		ObjectMeta: api.ObjectMeta{
			Name:      "foo",
			Namespace: "default",
		},
	}

	offer := &mesos.Offer{}
	mapping, err := WildcardMapper(pod, []string{"*"}, offer)
	if err != nil {
		t.Fatal(err)
	}
	if len(mapping) > 0 {
		t.Fatalf("Found mappings for an empty offer and a pod without ports: %v", pod)
	}

	//--
	offer = &mesos.Offer{
		Resources: []*mesos.Resource{
			resources.NewPorts("*", 1, 1),
		},
	}
	mapping, err = WildcardMapper(pod, []string{"*"}, offer)
	if err != nil {
		t.Fatal(err)
	}
	if len(mapping) > 0 {
		t.Fatalf("Found mappings for a pod without ports: %v", pod)
	}

	//--
	pod.Spec = api.PodSpec{
		Containers: []api.Container{{
			Ports: []api.ContainerPort{{
				HostPort: 123,
			}},
		}},
	}
	mapping, err = WildcardMapper(pod, []string{"*"}, offer)
	if err == nil {
		t.Fatalf("expected error instead of mappings: %#v", mapping)
	} else if err, _ := err.(*PortAllocationError); err == nil {
		t.Fatal("Expected port allocation error")
	} else if !(len(err.Ports) == 1 && err.Ports[0] == 123) {
		t.Fatal("Expected port allocation error for host port 123")
	}

	//--
	pod.Spec = api.PodSpec{
		Containers: []api.Container{{
			Ports: []api.ContainerPort{{
				HostPort: 0,
			}, {
				HostPort: 123,
			}},
		}},
	}
	mapping, err = WildcardMapper(pod, []string{"*"}, offer)
	if err, _ := err.(*PortAllocationError); err == nil {
		t.Fatal("Expected port allocation error")
	} else if !(len(err.Ports) == 1 && err.Ports[0] == 123) {
		t.Fatal("Expected port allocation error for host port 123")
	}

	//--
	pod.Spec = api.PodSpec{
		Containers: []api.Container{{
			Ports: []api.ContainerPort{{
				HostPort: 0,
			}, {
				HostPort: 1,
			}},
		}},
	}
	mapping, err = WildcardMapper(pod, []string{"*"}, offer)
	if err, _ := err.(*PortAllocationError); err == nil {
		t.Fatal("Expected port allocation error")
	} else if len(err.Ports) != 0 {
		t.Fatal("Expected port allocation error for wildcard port")
	}

	//--
	offer = &mesos.Offer{
		Resources: []*mesos.Resource{
			resources.NewPorts("*", 1, 2),
		},
	}
	mapping, err = WildcardMapper(pod, []string{"*"}, offer)
	if err != nil {
		t.Fatal(err)
	} else if len(mapping) != 2 {
		t.Fatal("Expected both ports allocated")
	}
	valid := 0
	for _, entry := range mapping {
		if entry.ContainerIdx == 0 && entry.PortIdx == 0 && entry.OfferPort == 2 {
			valid++
		}
		if entry.ContainerIdx == 0 && entry.PortIdx == 1 && entry.OfferPort == 1 {
			valid++
		}
	}
	if valid < 2 {
		t.Fatalf("Expected 2 valid port mappings, not %d", valid)
	}

	//-- port mapping in case of multiple discontinuous port ranges in mesos offer
	pod.Spec = api.PodSpec{
		Containers: []api.Container{{
			Ports: []api.ContainerPort{{
				HostPort: 0,
			}, {
				HostPort: 0,
			}},
		}},
	}
	offer = &mesos.Offer{
		Resources: []*mesos.Resource{
			mesosutil.NewRangesResource("ports", []*mesos.Value_Range{mesosutil.NewValueRange(1, 1), mesosutil.NewValueRange(3, 5)}),
		},
	}
	mapping, err = WildcardMapper(pod, []string{"*"}, offer)
	if err != nil {
		t.Fatal(err)
	} else if len(mapping) != 2 {
		t.Fatal("Expected both ports allocated")
	}
	valid = 0
	for _, entry := range mapping {
		if entry.ContainerIdx == 0 && entry.PortIdx == 0 && entry.OfferPort == 1 {
			valid++
		}
		if entry.ContainerIdx == 0 && entry.PortIdx == 1 && entry.OfferPort == 3 {
			valid++
		}
	}
	if valid < 2 {
		t.Fatalf("Expected 2 valid port mappings, not %d", valid)
	}
}
コード例 #21
0
func TestGrowToDesiredAfterReconciliation(t *gotesting.T) {
	testScheduler := NewEtcdScheduler(3, 0, 0, true, []*mesos.CommandInfo_URI{}, false, 4096, 1, 256)

	reconciliation := map[string]string{
		"etcd-1": "slave-1",
		"etcd-2": "slave-2",
	}
	testScheduler.reconciliationInfoFunc = func([]string, string, string) (map[string]string, error) {
		return reconciliation, nil
	}
	testScheduler.updateReconciliationInfoFunc = func(info map[string]string, _ []string, _ string, _ string) error {
		reconciliation = info
		return nil
	}

	testScheduler.masterInfo = util.NewMasterInfo("master-1", 0, 0)
	mockdriver := &MockSchedulerDriver{
		runningStatuses: make(chan *mesos.TaskStatus, 10),
		scheduler:       testScheduler,
	}
	testScheduler.state = Mutable
	testScheduler.healthCheck = func(map[string]*config.Node) error {
		return nil
	}

	// Push more than enough offers to shoot self in foot if unchecked.
	for _, offer := range []*mesos.Offer{
		NewOffer("1"),
		NewOffer("2"),
		NewOffer("3"),
	} {
		testScheduler.offerCache.Push(offer)
	}
	memberList := config.ClusterMemberList{
		Members: []httptypes.Member{
			{
				ID:         "1",
				Name:       "etcd-1",
				PeerURLs:   nil,
				ClientURLs: nil,
			},
			{
				ID:         "2",
				Name:       "etcd-2",
				PeerURLs:   nil,
				ClientURLs: nil,
			},
		},
	}

	_, port1, err := emtesting.NewTestEtcdServer(t, memberList)
	if err != nil {
		t.Fatalf("Failed to create test etcd server: %s", err)
	}

	_, port2, err := emtesting.NewTestEtcdServer(t, memberList)
	if err != nil {
		t.Fatalf("Failed to create test etcd server: %s", err)
	}

	// Valid reconciled tasks should be added to the running list.
	mockdriver.On(
		"ReconcileTasks",
		0,
	).Return(mesos.Status_DRIVER_RUNNING, nil).Once()

	for _, taskStatus := range []*mesos.TaskStatus{
		util.NewTaskStatus(
			util.NewTaskID("etcd-1 localhost 0 "+strconv.Itoa(int(port1))+" 0"),
			mesos.TaskState_TASK_RUNNING,
		),
		util.NewTaskStatus(
			util.NewTaskID("etcd-2 localhost 0 "+strconv.Itoa(int(port2))+" 0"),
			mesos.TaskState_TASK_RUNNING,
		),
	} {
		mockdriver.runningStatuses <- taskStatus
	}

	// Scheduler should grow cluster to desired number of nodes.
	offer := NewOffer("1")
	mockdriver.On(
		"LaunchTasks",
		[]*mesos.OfferID{
			offer.Id,
		},
		[]*mesos.TaskInfo{
			{
				Resources: []*mesos.Resource{
					util.NewScalarResource("cpus", 1),
					util.NewScalarResource("mem", 256),
					util.NewScalarResource("disk", 4096),
					util.NewRangesResource("ports", []*mesos.Value_Range{
						util.NewValueRange(uint64(0), uint64(2)),
					}),
				},
			},
		},
		&mesos.Filters{
			RefuseSeconds: proto.Float64(1),
		},
	).Return(mesos.Status_DRIVER_RUNNING, nil).Once()

	// Simulate failover, registration and time passing.
	mockdriver.ReconcileTasks([]*mesos.TaskStatus{})
	testScheduler.launchOne(mockdriver)
	testScheduler.launchOne(mockdriver)
	testScheduler.launchOne(mockdriver)
	testScheduler.launchOne(mockdriver)
	testScheduler.launchOne(mockdriver)

	assert.Equal(t, 3, len(testScheduler.running),
		"Scheduler should reconcile tasks properly.")

	mockdriver.AssertExpectations(t)
}