Esempio n. 1
0
func prepareExecutorInfo(gt net.Addr) *mesos.ExecutorInfo {
	executorUris := []*mesos.CommandInfo_URI{}
	uri := serveSelf()
	executorUris = append(executorUris, &mesos.CommandInfo_URI{Value: uri, Executable: proto.Bool(true)})

	// forward the value of the scheduler's -v flag to the executor
	v := 0
	if f := flag.Lookup("v"); f != nil && f.Value != nil {
		if vstr := f.Value.String(); vstr != "" {
			if vi, err := strconv.ParseInt(vstr, 10, 32); err == nil {
				v = int(vi)
			}
		}
	}
	nodeCommand := fmt.Sprintf("./executor -logtostderr=true -v=%d -node -tracerAddr %s", v, gt.String())
	log.V(2).Info("nodeCommand: ", nodeCommand)

	// Create mesos scheduler driver.
	return &mesos.ExecutorInfo{
		ExecutorId: util.NewExecutorID("default"),
		Name:       proto.String("visghs-node"),
		Source:     proto.String("visghs"),
		Command: &mesos.CommandInfo{
			Value: proto.String(nodeCommand),
			Uris:  executorUris,
		},
		Resources: []*mesos.Resource{
			util.NewScalarResource("cpus", CPUS_PER_EXECUTOR),
			util.NewScalarResource("mem", MEM_PER_EXECUTOR),
		},
	}
}
Esempio n. 2
0
func TestNoPortsInPodOrOffer(t *testing.T) {
	t.Parallel()
	task, err := fakePodTask("foo")
	if err != nil || task == nil {
		t.Fatal(err)
	}

	offer := &mesos.Offer{
		Resources: []*mesos.Resource{
			mutil.NewScalarResource("cpus", 0.001),
			mutil.NewScalarResource("mem", 0.001),
		},
	}
	if ok := task.AcceptOffer(offer); ok {
		t.Fatalf("accepted offer %v:", offer)
	}

	offer = &mesos.Offer{
		Resources: []*mesos.Resource{
			mutil.NewScalarResource("cpus", t_min_cpu),
			mutil.NewScalarResource("mem", t_min_mem),
		},
	}
	if ok := task.AcceptOffer(offer); !ok {
		t.Fatalf("did not accepted offer %v:", offer)
	}
}
Esempio n. 3
0
func TestBuild(t *testing.T) {
	task, err := NewTask(cluster.BuildContainerConfig(containerConfig, hostConfig, networkingConfig), name, 5*time.Second)
	assert.NoError(t, err)

	task.Build("slave-id", nil)

	assert.Equal(t, task.Container.GetType(), mesosproto.ContainerInfo_DOCKER)
	assert.Equal(t, task.Container.Docker.GetImage(), "test-image")
	assert.Equal(t, task.Container.Docker.GetNetwork(), mesosproto.ContainerInfo_DockerInfo_BRIDGE)

	assert.Equal(t, len(task.Resources), 2)
	assert.Equal(t, task.Resources[0], mesosutil.NewScalarResource("cpus", 42.0))
	assert.Equal(t, task.Resources[1], mesosutil.NewScalarResource("mem", 2))

	assert.Equal(t, task.Command.GetValue(), "ls")
	assert.Equal(t, task.Command.GetArguments(), []string{"foo", "bar"})

	parameters := []string{task.Container.Docker.GetParameters()[0].GetValue(), task.Container.Docker.GetParameters()[1].GetValue()}
	sort.Strings(parameters)

	assert.Equal(t, len(parameters), 2)
	assert.Equal(t, parameters[0], "com.docker.swarm.mesos.name="+name)
	assert.Equal(t, parameters[1], "com.docker.swarm.mesos.task="+*task.TaskId.Value)

	assert.Equal(t, task.SlaveId.GetValue(), "slave-id")
}
Esempio n. 4
0
func TestEmptyOffer(t *testing.T) {
	t.Parallel()
	task := fakePodTask("foo", nil, nil)

	task.Pod.Spec = api.PodSpec{
		Containers: []api.Container{{
			Name: "a",
		}},
	}

	defaultProc := NewDefaultProcurement(
		&mesos.ExecutorInfo{
			Resources: []*mesos.Resource{
				mutil.NewScalarResource("cpus", 1.0),
				mutil.NewScalarResource("mem", 64.0),
			},
		},
		mockRegistry{},
	)

	if err := defaultProc.Procure(
		task,
		&api.Node{},
		NewProcureState(&mesos.Offer{}),
	); err == nil {
		t.Fatalf("accepted empty offer")
	}
}
Esempio n. 5
0
func TestBuild(t *testing.T) {
	task, err := newTask(nil, cluster.BuildContainerConfig(dockerclient.ContainerConfig{
		Image:     "test-image",
		CpuShares: 42,
		Memory:    2097152,
		Cmd:       []string{"ls", "foo", "bar"},
	}), name)
	assert.NoError(t, err)

	task.build("slave-id", nil)

	assert.Equal(t, task.Container.GetType(), mesosproto.ContainerInfo_DOCKER)
	assert.Equal(t, task.Container.Docker.GetImage(), "test-image")
	assert.Equal(t, task.Container.Docker.GetNetwork(), mesosproto.ContainerInfo_DockerInfo_BRIDGE)

	assert.Equal(t, len(task.Resources), 2)
	assert.Equal(t, task.Resources[0], mesosutil.NewScalarResource("cpus", 42.0))
	assert.Equal(t, task.Resources[1], mesosutil.NewScalarResource("mem", 2))

	assert.Equal(t, task.Command.GetValue(), "ls")
	assert.Equal(t, task.Command.GetArguments(), []string{"foo", "bar"})

	parameters := []string{task.Container.Docker.GetParameters()[0].GetValue(), task.Container.Docker.GetParameters()[1].GetValue()}
	sort.Strings(parameters)

	assert.Equal(t, len(parameters), 2)
	assert.Equal(t, parameters[0], "com.docker.swarm.mesos.name="+name)
	assert.Equal(t, parameters[1], "com.docker.swarm.mesos.task="+*task.TaskId.Value)

	assert.Equal(t, task.SlaveId.GetValue(), "slave-id")
}
Esempio n. 6
0
func prepareExecutorInfo() *mesos.ExecutorInfo {
	executorUris := []*mesos.CommandInfo_URI{}
	uri, executorCmd := serveExecutorArtifact(*executorPath)
	executorUris = append(executorUris, &mesos.CommandInfo_URI{Value: uri, Executable: proto.Bool(true)})

	// forward the value of the scheduler's -v flag to the executor
	v := 0
	if f := flag.Lookup("v"); f != nil && f.Value != nil {
		if vstr := f.Value.String(); vstr != "" {
			if vi, err := strconv.ParseInt(vstr, 10, 32); err == nil {
				v = int(vi)
			}
		}
	}
	executorCommand := fmt.Sprintf("./%s -logtostderr=true -v=%d -slow_tasks=%v", executorCmd, v, *slowTasks)

	go http.ListenAndServe(fmt.Sprintf("%s:%d", *address, *artifactPort), nil)
	log.V(2).Info("Serving executor artifacts...")

	// Create mesos scheduler driver.
	return &mesos.ExecutorInfo{
		ExecutorId: util.NewExecutorID("default"),
		Name:       proto.String("Test Executor (Go)"),
		Source:     proto.String("go_test"),
		Command: &mesos.CommandInfo{
			Value: proto.String(executorCommand),
			Uris:  executorUris,
		},
		Resources: []*mesos.Resource{
			util.NewScalarResource("cpus", CPUS_PER_EXECUTOR),
			util.NewScalarResource("mem", MEM_PER_EXECUTOR),
		},
	}
}
Esempio n. 7
0
func (s *Scheduler) launchTask(driver scheduler.SchedulerDriver, offer *mesos.Offer) {
	taskName := fmt.Sprintf("syslog-%s", offer.GetSlaveId().GetValue())
	taskId := &mesos.TaskID{
		Value: proto.String(fmt.Sprintf("%s-%s", taskName, uuid())),
	}

	data, err := json.Marshal(Config)
	if err != nil {
		panic(err) //shouldn't happen
	}
	Logger.Debugf("Task data: %s", string(data))

	tcpPort := uint64(s.getPort(Config.TcpPort, offer, -1))
	udpPort := uint64(s.getPort(Config.UdpPort, offer, int(tcpPort)))

	task := &mesos.TaskInfo{
		Name:     proto.String(taskName),
		TaskId:   taskId,
		SlaveId:  offer.GetSlaveId(),
		Executor: s.createExecutor(offer, tcpPort, udpPort),
		Resources: []*mesos.Resource{
			util.NewScalarResource("cpus", Config.Cpus),
			util.NewScalarResource("mem", Config.Mem),
			util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(tcpPort, tcpPort)}),
			util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(udpPort, udpPort)}),
		},
		Data:   data,
		Labels: utils.StringToLabels(s.labels),
	}

	s.cluster.Add(offer.GetSlaveId().GetValue(), task)

	driver.LaunchTasks([]*mesos.OfferID{offer.GetId()}, []*mesos.TaskInfo{task}, &mesos.Filters{RefuseSeconds: proto.Float64(1)})
}
Esempio n. 8
0
func TestOffer(t *testing.T) {
	offer := util.NewOffer(util.NewOfferID("487c73d8-9951-f23c-34bd-8085bfd30c49"), util.NewFrameworkID("20150903-065451-84125888-5050-10715-0053"),
		util.NewSlaveID("20150903-065451-84125888-5050-10715-S1"), "slave0")

	if Offer(offer) != "slave0#30c49" {
		t.Errorf(`util.NewOffer(util.NewOfferID("487c73d8-9951-f23c-34bd-8085bfd30c49"), util.NewFrameworkID("20150903-065451-84125888-5050-10715-0053"), util.NewSlaveID("20150903-065451-84125888-5050-10715-S1"), "slave0") != "slave0#30c49"; actual %s`, Offer(offer))
	}

	offer.Resources = []*mesos.Resource{util.NewScalarResource("cpus", 4), util.NewScalarResource("mem", 512), util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(31000, 32000)})}
	if Offer(offer) != "slave0#30c49 cpus:4.00 mem:512.00 ports:[31000..32000]" {
		t.Errorf("Expected slave0#30c49 cpus:4.00 mem:512.00 ports:[31000..32000]; actual %s", Offer(offer))
	}

	offer.Attributes = []*mesos.Attribute{&mesos.Attribute{
		Name:   proto.String("rack"),
		Type:   mesos.Value_SCALAR.Enum(),
		Scalar: &mesos.Value_Scalar{Value: proto.Float64(2)},
	}}
	if Offer(offer) != "slave0#30c49 cpus:4.00 mem:512.00 ports:[31000..32000] rack:2.00" {
		t.Errorf("Expected slave0#30c49 cpus:4.00 mem:512.00 ports:[31000..32000] rack:2.00; actual %s", Offer(offer))
	}

	offer.Resources = nil
	if Offer(offer) != "slave0#30c49 rack:2.00" {
		t.Errorf("Expected slave0#30c49 rack:2.00; actual %s", Offer(offer))
	}
}
Esempio n. 9
0
func createTask(job *Job, offer *mesos.Offer) mesos.TaskInfo {
	taskId := &mesos.TaskID{
		Value: proto.String(fmt.Sprintf("moroccron-task-%d-%s", time.Now().Unix(), job.Id)),
	}

	command_info := job.CreateCommandInfo()
	task := mesos.TaskInfo{
		Name:    proto.String(taskId.GetValue()),
		TaskId:  taskId,
		SlaveId: offer.SlaveId,
		Container: &mesos.ContainerInfo{
			Type:     mesos.ContainerInfo_DOCKER.Enum(),
			Volumes:  nil,
			Hostname: nil,
			Docker: &mesos.ContainerInfo_DockerInfo{
				Image:   &DOCKER_IMAGE_DEFAULT,
				Network: mesos.ContainerInfo_DockerInfo_BRIDGE.Enum(),
			},
		},
		Command:  &command_info,
		Executor: nil,
		Resources: []*mesos.Resource{
			util.NewScalarResource("cpus", job.CpuResources),
			util.NewScalarResource("mem", job.MemResources),
		},
		//Data: job_json,
	}
	return task
}
Esempio n. 10
0
func (s *MemcacheScheduler) newTaskPrototype(offer *mesos.Offer) *mesos.TaskInfo {
	taskID := s.tasksCreated
	s.tasksCreated++
	portRange := getPortRange(offer)
	portRange.End = portRange.Begin
	return &mesos.TaskInfo{
		TaskId: &mesos.TaskID{
			Value: proto.String(fmt.Sprintf("Memcache-%d", taskID)),
		},
		SlaveId: offer.SlaveId,
		Resources: []*mesos.Resource{
			mesosutil.NewScalarResource("cpus", TASK_CPUS),
			mesosutil.NewScalarResource("mem", TASK_MEM),
			mesosutil.NewRangesResource("ports", []*mesos.Value_Range{portRange}),
		},
		Container: &mesos.ContainerInfo{
			Type: mesos.ContainerInfo_DOCKER.Enum(),
			Docker: &mesos.ContainerInfo_DockerInfo{
				Image:   &util.MEMCACHE_CONTAINER,
				Network: mesos.ContainerInfo_DockerInfo_BRIDGE.Enum(),
				PortMappings: []*mesos.ContainerInfo_DockerInfo_PortMapping{
					&mesos.ContainerInfo_DockerInfo_PortMapping{
						ContainerPort: &MEMCACHE_PORT,
						Protocol:      &MEMCACHE_PROTOCOL,
						HostPort:      &MEMCACHE_HOST_PORT,
					},
				},
			},
		},
	}
}
Esempio n. 11
0
func TestNoPortsInPodOrOffer(t *testing.T) {
	t.Parallel()
	task, err := fakePodTask("foo")
	if err != nil || task == nil {
		t.Fatal(err)
	}

	task.Pod.Spec = api.PodSpec{
		Containers: []api.Container{{
			Name: "a",
		}},
	}

	defaultPredicate := NewDefaultPredicate(mresource.DefaultDefaultContainerCPULimit, mresource.DefaultDefaultContainerMemLimit)

	offer := &mesos.Offer{
		Resources: []*mesos.Resource{
			mutil.NewScalarResource("cpus", 0.001),
			mutil.NewScalarResource("mem", 0.001),
		},
	}
	if ok := defaultPredicate(task, offer, nil); ok {
		t.Fatalf("accepted offer %v:", offer)
	}

	offer = &mesos.Offer{
		Resources: []*mesos.Resource{
			mutil.NewScalarResource("cpus", t_min_cpu),
			mutil.NewScalarResource("mem", t_min_mem),
		},
	}
	if ok := defaultPredicate(task, offer, nil); !ok {
		t.Fatalf("did not accepted offer %v:", offer)
	}
}
Esempio n. 12
0
func TestNodeSelector(t *testing.T) {
	t.Parallel()

	sel1 := map[string]string{"rack": "a"}
	sel2 := map[string]string{"rack": "a", "gen": "2014"}

	tests := []struct {
		selector map[string]string
		attrs    []*mesos.Attribute
		ok       bool
	}{
		{sel1, []*mesos.Attribute{newTextAttribute("rack", "a")}, true},
		{sel1, []*mesos.Attribute{newTextAttribute("rack", "b")}, false},
		{sel1, []*mesos.Attribute{newTextAttribute("rack", "a"), newTextAttribute("gen", "2014")}, true},
		{sel1, []*mesos.Attribute{newTextAttribute("rack", "a"), newScalarAttribute("num", 42.0)}, true},
		{sel1, []*mesos.Attribute{newScalarAttribute("rack", 42.0)}, false},
		{sel2, []*mesos.Attribute{newTextAttribute("rack", "a"), newTextAttribute("gen", "2014")}, true},
		{sel2, []*mesos.Attribute{newTextAttribute("rack", "a"), newTextAttribute("gen", "2015")}, false},
	}

	for _, ts := range tests {
		task, _ := fakePodTask("foo")
		task.Pod.Spec.NodeSelector = ts.selector
		offer := &mesos.Offer{
			Resources: []*mesos.Resource{
				mutil.NewScalarResource("cpus", t_min_cpu),
				mutil.NewScalarResource("mem", t_min_mem),
			},
			Attributes: ts.attrs,
		}
		if got, want := DefaultPredicate(task, offer), ts.ok; got != want {
			t.Fatalf("expected acceptance of offer %v for selector %v to be %v, got %v:", want, got, ts.attrs, ts.selector)
		}
	}
}
Esempio n. 13
0
func createTaskInfo(task eremetic.Task, offer *mesosproto.Offer) (eremetic.Task, *mesosproto.TaskInfo) {
	task.FrameworkID = *offer.FrameworkId.Value
	task.SlaveID = *offer.SlaveId.Value
	task.Hostname = *offer.Hostname
	task.AgentIP = offer.GetUrl().GetAddress().GetIp()
	task.AgentPort = offer.GetUrl().GetAddress().GetPort()

	portMapping, portResources := buildPorts(task, offer)
	env := buildEnvironment(task, portMapping)

	taskInfo := &mesosproto.TaskInfo{
		TaskId:  &mesosproto.TaskID{Value: proto.String(task.ID)},
		SlaveId: offer.SlaveId,
		Name:    proto.String(task.Name),
		Command: buildCommandInfo(task, env),
		Container: &mesosproto.ContainerInfo{
			Type: mesosproto.ContainerInfo_DOCKER.Enum(),
			Docker: &mesosproto.ContainerInfo_DockerInfo{
				Image:          proto.String(task.Image),
				ForcePullImage: proto.Bool(task.ForcePullImage),
				PortMappings:   portMapping,
				Network:        mesosproto.ContainerInfo_DockerInfo_BRIDGE.Enum(),
			},
			Volumes: buildVolumes(task),
		},
		Resources: []*mesosproto.Resource{
			mesosutil.NewScalarResource("cpus", task.TaskCPUs),
			mesosutil.NewScalarResource("mem", task.TaskMem),
			mesosutil.NewRangesResource("ports", portResources),
		},
	}
	return task, taskInfo
}
func (ct *ConsumerTask) NewTaskInfo(offer *mesos.Offer) *mesos.TaskInfo {
	taskName := fmt.Sprintf("consumer-%s", ct.ID)
	taskId := &mesos.TaskID{
		Value: proto.String(fmt.Sprintf("%s-%s", taskName, uuid())),
	}

	data, err := json.Marshal(ct.Config)
	if err != nil {
		panic(err)
	}

	taskInfo := &mesos.TaskInfo{
		Name:     proto.String(taskName),
		TaskId:   taskId,
		SlaveId:  offer.GetSlaveId(),
		Executor: ct.createExecutor(),
		Resources: []*mesos.Resource{
			util.NewScalarResource("cpus", ct.Cpu),
			util.NewScalarResource("mem", ct.Mem),
		},
		Data: data,
	}

	return taskInfo
}
Esempio n. 15
0
func (s *Scheduler) launchTask(driver scheduler.SchedulerDriver, offer *mesos.Offer) {
	taskName := fmt.Sprintf("syscol-%s", offer.GetSlaveId().GetValue())
	taskId := &mesos.TaskID{
		Value: proto.String(fmt.Sprintf("%s-%s", taskName, uuid())),
	}

	data, err := json.Marshal(Config)
	if err != nil {
		panic(err) //shouldn't happen
	}
	Logger.Debugf("Task data: %s", string(data))

	task := &mesos.TaskInfo{
		Name:     proto.String(taskName),
		TaskId:   taskId,
		SlaveId:  offer.GetSlaveId(),
		Executor: s.createExecutor(offer.GetSlaveId().GetValue()),
		Resources: []*mesos.Resource{
			util.NewScalarResource("cpus", Config.Cpus),
			util.NewScalarResource("mem", Config.Mem),
		},
		Data: data,
	}

	s.cluster.Add(offer.GetSlaveId().GetValue(), task)

	driver.LaunchTasks([]*mesos.OfferID{offer.GetId()}, []*mesos.TaskInfo{task}, &mesos.Filters{RefuseSeconds: proto.Float64(1)})
}
Esempio n. 16
0
func TestSumScalarResources(t *testing.T) {
	res := []*mesos.Resource{
		util.NewScalarResource("foo", 1.0),
		util.NewScalarResource("foo", 2.0),
	}

	assert.Equal(t, 3.0, SumScalarResources(res))
}
func (s *rancherScheduler) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
	task := tasks.GetNextTask()
	if task == nil {
		for _, of := range offers {
			driver.DeclineOffer(of.Id, defaultFilter)
		}
		return
	}
	if task.RegistrationUrl == "" {
		tasks.AddTask(task)
		for _, of := range offers {
			driver.DeclineOffer(of.Id, defaultFilter)
		}
		return
	}
	taskBytes, err := task.Marshal()
	if err != nil {
		log.WithFields(log.Fields{
			"err": err,
		}).Error("Error Marshalling task")
		for _, of := range offers {
			driver.DeclineOffer(of.Id, defaultFilter)
		}
		return
	}
	for _, offer := range offers {
		inadequate := false
		for _, res := range offer.GetResources() {
			if res.GetName() == "cpus" && *res.GetScalar().Value < taskCPUs {
				driver.DeclineOffer(offer.Id, defaultFilter)
				inadequate = true
				continue
			}
			if res.GetName() == "mem" && *res.GetScalar().Value < taskMem {
				driver.DeclineOffer(offer.Id, defaultFilter)
				inadequate = true
				continue
			}
		}
		if inadequate {
			continue
		}
		mesosTask := &mesos.TaskInfo{
			TaskId: &mesos.TaskID{
				Value: proto.String(task.HostUuid),
			},
			SlaveId: offer.SlaveId,
			Resources: []*mesos.Resource{
				mesosutil.NewScalarResource("cpus", taskCPUs),
				mesosutil.NewScalarResource("mem", taskMem),
			},
			Data:     taskBytes,
			Name:     &task.Name,
			Executor: s.rancherExecutor,
		}
		driver.LaunchTasks([]*mesos.OfferID{offer.Id}, []*mesos.TaskInfo{mesosTask}, defaultFilter)
	}
}
func (sched *ExampleScheduler) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
	logOffers(offers)
	for _, offer := range offers {
		remainingCpus := getOfferCpu(offer)
		remainingMems := getOfferMem(offer)

		var tasks []*mesos.TaskInfo
		for sched.cpuPerTask <= remainingCpus &&
			sched.memPerTask <= remainingMems &&
			sched.tasksLaunched < sched.totalTasks {

			fmt.Printf("Tasks launched: %v Total tasks: %v\n", sched.tasksLaunched, sched.totalTasks)

			sched.tasksLaunched++

			taskId := &mesos.TaskID{
				Value: proto.String(strconv.Itoa(sched.tasksLaunched)),
			}

			dockerInfo := &mesos.ContainerInfo_DockerInfo{
				Image:        &sched.DockerImage,
				PortMappings: sched.DockerPorts,
			}

			containerType := mesos.ContainerInfo_DOCKER

			containerInfo := &mesos.ContainerInfo{
				Type:   &containerType,
				Docker: dockerInfo,
			}

			commandInfo := &mesos.CommandInfo{
				Value: &sched.DockerCommand,
			}

			task := &mesos.TaskInfo{
				Name:    proto.String("go-task-" + taskId.GetValue()),
				TaskId:  taskId,
				SlaveId: offer.SlaveId,
				Resources: []*mesos.Resource{
					util.NewScalarResource("cpus", sched.cpuPerTask),
					util.NewScalarResource("mem", sched.memPerTask),
				},
				Container: containerInfo,
				Command:   commandInfo,
			}
			fmt.Printf("Prepared task: %s with offer %s for launch\n", task.GetName(), offer.Id.GetValue())

			tasks = append(tasks, task)
			remainingCpus -= sched.cpuPerTask
			remainingMems -= sched.memPerTask
		}
		//		fmt.Println("Launching ", len(tasks), "tasks for offer", offer.Id.GetValue())
		driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, &mesos.Filters{RefuseSeconds: proto.Float64(1)})
	}

}
Esempio n. 19
0
func generateResourceOffer() []*mesos.Resource {
	val := []*mesos.Resource{
		util.NewScalarResource("cpus", 3),
		util.NewScalarResource("disk", 73590),
		util.NewScalarResource("mem", 1985),
		util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(31000, 32000)}),
	}
	return val
}
Esempio n. 20
0
func (sched *Scheduler) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
	for _, offer := range offers {
		taskId := &mesos.TaskID{
			Value: proto.String(fmt.Sprintf("basicdocker-task-%d", time.Now().Unix())),
		}

		ports := util.FilterResources(
			offer.Resources,
			func(res *mesos.Resource) bool {
				return res.GetName() == "ports"
			},
		)
		if len(ports) > 0 && len(ports[0].GetRanges().GetRange()) > 0 {

		} else {
			return
		}
		task := &mesos.TaskInfo{
			Name:    proto.String(taskId.GetValue()),
			TaskId:  taskId,
			SlaveId: offer.SlaveId,
			Container: &mesos.ContainerInfo{
				Type:     mesos.ContainerInfo_DOCKER.Enum(),
				Volumes:  nil,
				Hostname: nil,
				Docker: &mesos.ContainerInfo_DockerInfo{
					Image:   &DOCKER_IMAGE_DEFAULT,
					Network: mesos.ContainerInfo_DockerInfo_BRIDGE.Enum(),
				},
			},
			Command: &mesos.CommandInfo{
				Shell: proto.Bool(true),
				Value: proto.String("set -x ; /bin/date ; /bin/hostname ; sleep 200 ; echo done"),
			},
			Executor: nil,
			Resources: []*mesos.Resource{
				util.NewScalarResource("cpus", getOfferCpu(offer)),
				util.NewScalarResource("mem", getOfferMem(offer)),
				util.NewRangesResource("ports", []*mesos.Value_Range{
					util.NewValueRange(
						*ports[0].GetRanges().GetRange()[0].Begin,
						*ports[0].GetRanges().GetRange()[0].Begin+1,
					),
				}),
			},
		}

		log.Infof("Prepared task: %s with offer %s for launch\n", task.GetName(), offer.Id.GetValue())

		var tasks []*mesos.TaskInfo = []*mesos.TaskInfo{task}
		log.Infoln("Launching ", len(tasks), " tasks for offer", offer.Id.GetValue())
		driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, &mesos.Filters{RefuseSeconds: proto.Float64(1)})
		sched.tasksLaunched++
		time.Sleep(time.Second)
	}
}
Esempio n. 21
0
func (sched *ExampleScheduler) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {

	for _, offer := range offers {
		cpuResources := util.FilterResources(offer.Resources, func(res *mesos.Resource) bool {
			return res.GetName() == "cpus"
		})
		cpus := 0.0
		for _, res := range cpuResources {
			cpus += res.GetScalar().GetValue()
		}

		memResources := util.FilterResources(offer.Resources, func(res *mesos.Resource) bool {
			return res.GetName() == "mem"
		})
		mems := 0.0
		for _, res := range memResources {
			mems += res.GetScalar().GetValue()
		}

		log.Infoln("Received Offer <", offer.Id.GetValue(), "> with cpus=", cpus, " mem=", mems)

		remainingCpus := cpus
		remainingMems := mems

		var tasks []*mesos.TaskInfo
		for sched.tasksLaunched < sched.totalTasks &&
			CPUS_PER_TASK <= remainingCpus &&
			MEM_PER_TASK <= remainingMems {

			sched.tasksLaunched++

			taskId := &mesos.TaskID{
				Value: proto.String(strconv.Itoa(sched.tasksLaunched)),
			}

			task := &mesos.TaskInfo{
				Name:     proto.String("go-task-" + taskId.GetValue()),
				TaskId:   taskId,
				SlaveId:  offer.SlaveId,
				Executor: sched.executor,
				Resources: []*mesos.Resource{
					util.NewScalarResource("cpus", CPUS_PER_TASK),
					util.NewScalarResource("mem", MEM_PER_TASK),
				},
			}
			log.Infof("Prepared task: %s with offer %s for launch\n", task.GetName(), offer.Id.GetValue())

			tasks = append(tasks, task)
			remainingCpus -= CPUS_PER_TASK
			remainingMems -= MEM_PER_TASK
		}
		log.Infoln("Launching ", len(tasks), "tasks for offer", offer.Id.GetValue())
		driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, &mesos.Filters{RefuseSeconds: proto.Float64(1)})
	}
}
Esempio n. 22
0
func (offerHelper *OfferHelper) apply(against *ResourceGroup, cpus float64, mem float64, disk float64, ports int,
	principal string, role string, persistenceID string, containerPath string) []*mesos.Resource {

	ask := []*mesos.Resource{}

	if cpus > 0 {
		against.Cpus = against.Cpus - cpus
		if principal != "" && role != "" {
			ask = append(ask, util.NewScalarResourceWithReservation("cpus", cpus, principal, role))
		} else {
			ask = append(ask, util.NewScalarResource("cpus", cpus))
		}
	}

	if mem > 0 {
		against.Mem = against.Mem - mem
		if principal != "" && role != "" {
			ask = append(ask, util.NewScalarResourceWithReservation("mem", mem, principal, role))
		} else {
			ask = append(ask, util.NewScalarResource("mem", mem))
		}
	}

	if disk > 0 {
		against.Disk = against.Disk - disk
		if principal != "" && role != "" && containerPath != "" && persistenceID != "" {
			ask = append(ask, util.NewVolumeResourceWithReservation(disk, containerPath, persistenceID, mesos.Volume_RW.Enum(), principal, role))
		} else if principal != "" && role != "" {
			ask = append(ask, util.NewScalarResourceWithReservation("disk", disk, principal, role))
		} else {
			ask = append(ask, util.NewScalarResource("disk", disk))
		}
	}

	if ports > 0 {
		sliceLoc := 0
		if len(against.Ports)-ports > 0 {
			sliceLoc = rand.Intn(len(against.Ports) - ports)
		}
		takingPorts := make([]int64, ports)
		copy(takingPorts, against.Ports[sliceLoc:(sliceLoc+ports)])
		leavingPorts := make([]int64, len(against.Ports)-ports)
		copy(leavingPorts, against.Ports[:sliceLoc])
		copy(leavingPorts[sliceLoc:], against.Ports[(sliceLoc+ports):])

		against.Ports = leavingPorts
		if principal != "" && role != "" {
			ask = append(ask, util.AddResourceReservation(util.NewRangesResource("ports", ArrayToRanges(takingPorts)), principal, role))
		} else {
			ask = append(ask, util.NewRangesResource("ports", ArrayToRanges(takingPorts)))
		}
	}

	return ask
}
Esempio n. 23
0
func TestAcceptOfferPorts(t *testing.T) {
	t.Parallel()
	task, _ := fakePodTask("foo")
	pod := &task.Pod

	offer := &mesos.Offer{
		Resources: []*mesos.Resource{
			mutil.NewScalarResource("cpus", t_min_cpu),
			mutil.NewScalarResource("mem", t_min_mem),
			rangeResource("ports", []uint64{1, 1}),
		},
	}
	if ok := DefaultPredicate(task, offer); !ok {
		t.Fatalf("did not accepted offer %v:", offer)
	}

	pod.Spec = api.PodSpec{
		Containers: []api.Container{{
			Ports: []api.ContainerPort{{
				HostPort: 123,
			}},
		}},
	}

	mresource.LimitPodCPU(&task.Pod, mresource.DefaultDefaultContainerCPULimit)
	mresource.LimitPodMem(&task.Pod, mresource.DefaultDefaultContainerMemLimit)

	if ok := DefaultPredicate(task, offer); ok {
		t.Fatalf("accepted offer %v:", offer)
	}

	pod.Spec.Containers[0].Ports[0].HostPort = 1
	if ok := DefaultPredicate(task, offer); !ok {
		t.Fatalf("did not accepted offer %v:", offer)
	}

	pod.Spec.Containers[0].Ports[0].HostPort = 0
	if ok := DefaultPredicate(task, offer); !ok {
		t.Fatalf("did not accepted offer %v:", offer)
	}

	offer.Resources = []*mesos.Resource{
		mutil.NewScalarResource("cpus", t_min_cpu),
		mutil.NewScalarResource("mem", t_min_mem),
	}
	if ok := DefaultPredicate(task, offer); ok {
		t.Fatalf("accepted offer %v:", offer)
	}

	pod.Spec.Containers[0].Ports[0].HostPort = 1
	if ok := DefaultPredicate(task, offer); ok {
		t.Fatalf("accepted offer %v:", offer)
	}
}
Esempio n. 24
0
func offer(id string, cpu float64, mem float64) *mesos.Offer {
	return &mesos.Offer{
		Id: &mesos.OfferID{
			Value: proto.String(id),
		},
		Resources: []*mesos.Resource{
			mesosutil.NewScalarResource("cpus", cpu),
			mesosutil.NewScalarResource("mem", mem),
		},
	}
}
Esempio n. 25
0
func TestResources(t *testing.T) {
	resources := Resources([]*mesos.Resource{util.NewScalarResource("cpus", 4), util.NewScalarResource("mem", 512), util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(31000, 32000)})})
	if !strings.Contains(resources, "cpus") {
		t.Errorf(`%s does not contain "cpus"`, resources)
	}
	if !strings.Contains(resources, "mem") {
		t.Errorf(`%s does not contain "mem"`, resources)
	}
	if !strings.Contains(resources, "ports") {
		t.Errorf(`%s does not contain "ports"`, resources)
	}
}
Esempio n. 26
0
func TestExecutorDriverRunTaskEvent(t *testing.T) {
	setTestEnv(t)
	ch := make(chan bool, 2)
	// Mock Slave process to respond to registration event.
	server := testutil.NewMockSlaveHttpServer(t, func(rsp http.ResponseWriter, req *http.Request) {
		reqPath, err := url.QueryUnescape(req.URL.String())
		assert.NoError(t, err)
		log.Infoln("RCVD request", reqPath)
		rsp.WriteHeader(http.StatusAccepted)
	})

	defer server.Close()

	exec := newTestExecutor(t)
	exec.ch = ch
	exec.t = t

	// start
	driver := newIntegrationTestDriver(t, exec)
	stat, err := driver.Start()
	assert.NoError(t, err)
	assert.Equal(t, mesos.Status_DRIVER_RUNNING, stat)
	driver.setConnected(true)
	defer driver.Stop()

	// send runtask event to driver
	pbMsg := &mesos.RunTaskMessage{
		FrameworkId: util.NewFrameworkID(frameworkID),
		Framework: util.NewFrameworkInfo(
			"test", "test-framework-001", util.NewFrameworkID(frameworkID),
		),
		Pid: proto.String(server.PID.String()),
		Task: util.NewTaskInfo(
			"test-task",
			util.NewTaskID("test-task-001"),
			util.NewSlaveID(slaveID),
			[]*mesos.Resource{
				util.NewScalarResource("mem", 112),
				util.NewScalarResource("cpus", 2),
			},
		),
	}

	c := testutil.NewMockMesosClient(t, server.PID)
	c.SendMessage(driver.self, pbMsg)

	select {
	case <-ch:
	case <-time.After(time.Second * 2):
		log.Errorf("Tired of waiting...")
	}

}
Esempio n. 27
0
func (s *rendlerScheduler) newTaskPrototype(offer *mesos.Offer) *mesos.TaskInfo {
	taskID := s.tasksCreated
	s.tasksCreated++
	return &mesos.TaskInfo{
		TaskId: &mesos.TaskID{
			Value: proto.String(fmt.Sprintf("RENDLER-%d", taskID)),
		},
		SlaveId: offer.SlaveId,
		Resources: []*mesos.Resource{
			mesosutil.NewScalarResource("cpus", taskCPUs),
			mesosutil.NewScalarResource("mem", taskMem),
		},
	}
}
Esempio n. 28
0
func AskForScalar(resourceName string, askSize float64) ResourceAsker {
	return func(resources []*mesos.Resource) ([]*mesos.Resource, *mesos.Resource, bool) {
		newResources := make([]*mesos.Resource, len(resources))
		copy(newResources, resources)
		for idx, resource := range resources {
			if resource.GetName() == resourceName && askSize <= resource.GetScalar().GetValue() {
				newResources[idx] = util.NewScalarResource(resourceName, resource.GetScalar().GetValue()-askSize)
				ask := util.NewScalarResource(resourceName, askSize)
				return newResources, ask, true
			}
		}
		return newResources, nil, false
	}
}
Esempio n. 29
0
func (sched *ScraperScheduler) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
	logOffers(offers)

	for _, offer := range offers {
		if sched.tasksLaunched >= sched.totalTasks || len(sched.urls) == 0 {
			log.Infof("Declining offer %s", offer.Id.GetValue())
			driver.DeclineOffer(offer.Id, &mesos.Filters{})
			continue
		}
		remainingCpus := getOfferCpu(offer)
		remainingMems := getOfferMem(offer)

		var tasks []*mesos.TaskInfo
		for sched.cpuPerTask <= remainingCpus &&
			sched.memPerTask <= remainingMems &&
			sched.tasksLaunched < sched.totalTasks {

			log.Infof("Processing url %v of %v\n", sched.tasksLaunched, sched.totalTasks)
			log.Infof("Total Tasks: %d", sched.totalTasks)
			log.Infof("Tasks Launched: %d", sched.tasksLaunched)
			uri := sched.urls[sched.tasksLaunched]
			log.Infof("URI: %s", uri)

			sched.tasksLaunched++

			taskId := &mesos.TaskID{
				Value: proto.String(strconv.Itoa(sched.tasksLaunched)),
			}

			task := &mesos.TaskInfo{
				Name:     proto.String("go-task-" + taskId.GetValue()),
				TaskId:   taskId,
				SlaveId:  offer.SlaveId,
				Executor: sched.executor,
				Resources: []*mesos.Resource{
					util.NewScalarResource("cpus", sched.cpuPerTask),
					util.NewScalarResource("mem", sched.memPerTask),
				},
				Data: []byte(uri),
			}
			log.Infof("Prepared task: %s with offer %s for launch\n", task.GetName(), offer.Id.GetValue())

			tasks = append(tasks, task)
			remainingCpus -= sched.cpuPerTask
			remainingMems -= sched.memPerTask
		}
		log.Infoln("Launching ", len(tasks), "tasks for offer", offer.Id.GetValue())
		driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, &mesos.Filters{RefuseSeconds: proto.Float64(1)})
	}
}
Esempio n. 30
0
func TestFilterResources(t *testing.T) {
	rf := ResourceFilter{}
	o := util.NewOffer(util.NewOfferID("offerid"), util.NewFrameworkID("frameworkid"), util.NewSlaveID("slaveId"), "hostname")
	o.Resources = []*mesos.Resource{
		util.NewScalarResource("name", 1.0),
		util.NewScalarResource("ub0r-resource", 2.0),
		util.NewScalarResource("ub0r-resource", 3.0),
	}

	res := rf.FilterResources(o, "ub0r-resource")

	assert.Equal(t, 2, len(res))
	assert.Equal(t, "ub0r-resource", res[0].GetName())
}