예제 #1
0
func (t *T) AcceptOffer(offer *mesos.Offer) bool {
	if offer == nil {
		return false
	}

	// if the user has specified a target host, make sure this offer is for that host
	if t.Pod.Spec.NodeName != "" && offer.GetHostname() != t.Pod.Spec.NodeName {
		return false
	}

	// check the NodeSelector
	if len(t.Pod.Spec.NodeSelector) > 0 {
		slaveLabels := map[string]string{}
		for _, a := range offer.Attributes {
			if a.GetType() == mesos.Value_TEXT {
				slaveLabels[a.GetName()] = a.GetText().GetValue()
			}
		}
		selector := labels.SelectorFromSet(t.Pod.Spec.NodeSelector)
		if !selector.Matches(labels.Set(slaveLabels)) {
			return false
		}
	}

	// check ports
	if _, err := t.mapper.Generate(t, offer); err != nil {
		log.V(3).Info(err)
		return false
	}

	// find offered cpu and mem
	var (
		offeredCpus mresource.CPUShares
		offeredMem  mresource.MegaBytes
	)
	for _, resource := range offer.Resources {
		if resource.GetName() == "cpus" {
			offeredCpus = mresource.CPUShares(*resource.GetScalar().Value)
		}

		if resource.GetName() == "mem" {
			offeredMem = mresource.MegaBytes(*resource.GetScalar().Value)
		}
	}

	// calculate cpu and mem sum over all containers of the pod
	// TODO (@sttts): also support pod.spec.resources.limit.request
	// TODO (@sttts): take into account the executor resources
	cpu := mresource.PodCPULimit(&t.Pod)
	mem := mresource.PodMemLimit(&t.Pod)
	log.V(4).Infof("trying to match offer with pod %v/%v: cpus: %.2f mem: %.2f MB", t.Pod.Namespace, t.Pod.Name, cpu, mem)
	if (cpu > offeredCpus) || (mem > offeredMem) {
		log.V(3).Infof("not enough resources for pod %v/%v: cpus: %.2f mem: %.2f MB", t.Pod.Namespace, t.Pod.Name, cpu, mem)
		return false
	}

	return true
}
예제 #2
0
// NodeProcurement updates t.Spec in preparation for the task to be launched on the
// slave associated with the offer.
func NodeProcurement(t *T, offer *mesos.Offer) error {
	t.Spec.SlaveID = offer.GetSlaveId().GetValue()
	t.Spec.AssignedSlave = offer.GetHostname()

	// hostname needs of the executor needs to match that of the offer, otherwise
	// the kubelet node status checker/updater is very unhappy
	setCommandArgument(t.executor, "--hostname-override", offer.GetHostname(), true)

	return nil
}
예제 #3
0
func (s *Scheduler) acceptOffer(driver scheduler.SchedulerDriver, offer *mesos.Offer) string {
	if s.cluster.Exists(offer.GetHostname()) {
		return fmt.Sprintf("Server on host %s is already running.", offer.GetHostname())
	} else {
		declineReason := s.match(offer)
		if declineReason == "" {
			s.launchTask(driver, offer)
		}
		return declineReason
	}
}
예제 #4
0
// Fill the Spec in the T, should be called during k8s scheduling, before binding.
func (t *T) FillFromDetails(details *mesos.Offer) error {
	if details == nil {
		//programming error
		panic("offer details are nil")
	}

	// compute used resources
	cpu := mresource.PodCPULimit(&t.Pod)
	mem := mresource.PodMemLimit(&t.Pod)
	log.V(3).Infof("Recording offer(s) %s/%s against pod %v: cpu: %.2f, mem: %.2f MB", details.Id, t.Pod.Namespace, t.Pod.Name, cpu, mem)

	t.Spec = Spec{
		SlaveID:       details.GetSlaveId().GetValue(),
		AssignedSlave: details.GetHostname(),
		CPU:           cpu,
		Memory:        mem,
	}

	// fill in port mapping
	if mapping, err := t.mapper.Generate(t, details); err != nil {
		t.Reset()
		return err
	} else {
		ports := []uint64{}
		for _, entry := range mapping {
			ports = append(ports, entry.OfferPort)
		}
		t.Spec.PortMap = mapping
		t.Spec.Ports = ports
	}

	// hostname needs of the executor needs to match that of the offer, otherwise
	// the kubelet node status checker/updater is very unhappy
	const HOSTNAME_OVERRIDE_FLAG = "--hostname-override="
	hostname := details.GetHostname() // required field, non-empty
	hostnameOverride := HOSTNAME_OVERRIDE_FLAG + hostname

	argv := t.executor.Command.Arguments
	overwrite := false
	for i, arg := range argv {
		if strings.HasPrefix(arg, HOSTNAME_OVERRIDE_FLAG) {
			overwrite = true
			argv[i] = hostnameOverride
			break
		}
	}
	if !overwrite {
		t.executor.Command.Arguments = append(argv, hostnameOverride)
	}
	return nil
}
예제 #5
0
func OfferAttributes(offer *mesos.Offer) map[string]string {
	offerAttributes := map[string]string{
		"hostname": offer.GetHostname(),
	}

	for _, attribute := range offer.GetAttributes() {
		text := attribute.GetText().GetValue()
		if text != "" {
			offerAttributes[attribute.GetName()] = text
		}
	}

	return offerAttributes
}
예제 #6
0
// Fill the Spec in the T, should be called during k8s scheduling,
// before binding.
// TODO(jdef): remove hardcoded values and make use of actual pod resource settings
func (t *T) FillFromDetails(details *mesos.Offer) error {
	if details == nil {
		//programming error
		panic("offer details are nil")
	}

	log.V(3).Infof("Recording offer(s) %v against pod %v", details.Id, t.Pod.Name)

	t.Spec = Spec{
		SlaveID: details.GetSlaveId().GetValue(),
		CPU:     DefaultContainerCpus,
		Memory:  DefaultContainerMem,
	}

	if mapping, err := t.mapper.Generate(t, details); err != nil {
		t.Reset()
		return err
	} else {
		ports := []uint64{}
		for _, entry := range mapping {
			ports = append(ports, entry.OfferPort)
		}
		t.Spec.PortMap = mapping
		t.Spec.Ports = ports
	}

	// hostname needs of the executor needs to match that of the offer, otherwise
	// the qinglet node status checker/updater is very unhappy
	const HOSTNAME_OVERRIDE_FLAG = "--hostname-override="
	hostname := details.GetHostname() // required field, non-empty
	hostnameOverride := HOSTNAME_OVERRIDE_FLAG + hostname

	argv := t.executor.Command.Arguments
	overwrite := false
	for i, arg := range argv {
		if strings.HasPrefix(arg, HOSTNAME_OVERRIDE_FLAG) {
			overwrite = true
			argv[i] = hostnameOverride
			break
		}
	}
	if !overwrite {
		t.executor.Command.Arguments = append(argv, hostnameOverride)
	}
	return nil
}
예제 #7
0
func NodeSelectorPredicate(t *T, offer *mesos.Offer, n *api.Node) bool {
	// if the user has specified a target host, make sure this offer is for that host
	if t.Pod.Spec.NodeName != "" && offer.GetHostname() != t.Pod.Spec.NodeName {
		return false
	}

	// check the NodeSelector
	if len(t.Pod.Spec.NodeSelector) > 0 {
		if n.Labels == nil {
			return false
		}
		selector := labels.SelectorFromSet(t.Pod.Spec.NodeSelector)
		if !selector.Matches(labels.Set(n.Labels)) {
			return false
		}
	}
	return true
}
예제 #8
0
func Offer(offer *mesos.Offer) string {
	var buffer bytes.Buffer

	buffer.WriteString(offer.GetHostname())
	buffer.WriteString(ID(offer.GetId().GetValue()))
	resources := Resources(offer.GetResources())
	if resources != "" {
		buffer.WriteString(" ")
		buffer.WriteString(resources)
	}
	attributes := Attributes(offer.GetAttributes())
	if attributes != "" {
		buffer.WriteString(" ")
		buffer.WriteString(attributes)
	}

	return buffer.String()
}
예제 #9
0
func NodeSelectorPredicate(t *T, offer *mesos.Offer) bool {
	// if the user has specified a target host, make sure this offer is for that host
	if t.Pod.Spec.NodeName != "" && offer.GetHostname() != t.Pod.Spec.NodeName {
		return false
	}

	// check the NodeSelector
	if len(t.Pod.Spec.NodeSelector) > 0 {
		slaveLabels := map[string]string{}
		for _, a := range offer.Attributes {
			if a.GetType() == mesos.Value_TEXT {
				slaveLabels[a.GetName()] = a.GetText().GetValue()
			}
		}
		selector := labels.SelectorFromSet(t.Pod.Spec.NodeSelector)
		if !selector.Matches(labels.Set(slaveLabels)) {
			return false
		}
	}
	return true
}
예제 #10
0
func (s *Scheduler) launchTask(driver scheduler.SchedulerDriver, offer *mesos.Offer) {
	taskName := fmt.Sprintf("statsd-kafka-%s", offer.GetHostname())
	taskId := &mesos.TaskID{
		Value: proto.String(fmt.Sprintf("%s-%s", taskName, uuid())),
	}

	data, err := json.Marshal(Config)
	if err != nil {
		panic(err) //shouldn't happen
	}
	Logger.Debugf("Task data: %s", string(data))

	task := &mesos.TaskInfo{
		Name:     proto.String(taskName),
		TaskId:   taskId,
		SlaveId:  offer.GetSlaveId(),
		Executor: s.createExecutor(offer.GetHostname()),
		Resources: []*mesos.Resource{
			util.NewScalarResource("cpus", Config.Cpus),
			util.NewScalarResource("mem", Config.Mem),
		},
		Data: data,
	}

	s.cluster.Add(offer.GetHostname(), task)

	driver.LaunchTasks([]*mesos.OfferID{offer.GetId()}, []*mesos.TaskInfo{task}, &mesos.Filters{RefuseSeconds: proto.Float64(1)})
}
예제 #11
0
// NodeProcurement updates t.Spec in preparation for the task to be launched on the
// slave associated with the offer.
func NodeProcurement(t *T, offer *mesos.Offer) error {
	t.Spec.SlaveID = offer.GetSlaveId().GetValue()
	t.Spec.AssignedSlave = offer.GetHostname()

	// hostname needs of the executor needs to match that of the offer, otherwise
	// the kubelet node status checker/updater is very unhappy
	const HOSTNAME_OVERRIDE_FLAG = "--hostname-override="
	hostname := offer.GetHostname() // required field, non-empty
	hostnameOverride := HOSTNAME_OVERRIDE_FLAG + hostname

	argv := t.executor.Command.Arguments
	overwrite := false
	for i, arg := range argv {
		if strings.HasPrefix(arg, HOSTNAME_OVERRIDE_FLAG) {
			overwrite = true
			argv[i] = hostnameOverride
			break
		}
	}
	if !overwrite {
		t.executor.Command.Arguments = append(argv, hostnameOverride)
	}
	return nil
}
func (ctx *RunOnceApplicationContext) newTaskInfo(offer *mesos.Offer) *mesos.TaskInfo {
	taskName := fmt.Sprintf("%s.%s", ctx.Application.ID, offer.GetHostname())
	taskID := util.NewTaskID(fmt.Sprintf("%s|%s|%s", ctx.Application.ID, offer.GetHostname(), framework.UUID()))

	var URIs []*mesos.CommandInfo_URI
	if len(ctx.Application.ArtifactURLs) > 0 || len(ctx.Application.AdditionalArtifacts) > 0 {
		URIs = make([]*mesos.CommandInfo_URI, 0)
		for _, uri := range ctx.Application.ArtifactURLs {
			URIs = append(URIs, &mesos.CommandInfo_URI{
				Value:   proto.String(uri),
				Extract: proto.Bool(true),
			})
		}
		for _, uri := range ctx.Application.AdditionalArtifacts {
			URIs = append(URIs, &mesos.CommandInfo_URI{
				Value:   proto.String(uri),
				Extract: proto.Bool(true),
			})
		}
	}

	return &mesos.TaskInfo{
		Name:    proto.String(taskName),
		TaskId:  taskID,
		SlaveId: offer.GetSlaveId(),
		Resources: []*mesos.Resource{
			util.NewScalarResource("cpus", ctx.Application.Cpu),
			util.NewScalarResource("mem", ctx.Application.Mem),
		},
		Command: &mesos.CommandInfo{
			Shell: proto.Bool(true),
			Value: proto.String(ctx.Application.LaunchCommand),
			Uris:  URIs,
		},
	}
}
예제 #13
0
// NodeProcurement updates t.Spec in preparation for the task to be launched on the
// slave associated with the offer.
func NodeProcurement(t *T, offer *mesos.Offer) error {
	t.Spec.SlaveID = offer.GetSlaveId().GetValue()
	t.Spec.AssignedSlave = offer.GetHostname()
	return nil
}
예제 #14
0
func (s *Scheduler) createExecutor(offer *mesos.Offer, tcpPort uint64, udpPort uint64) *mesos.ExecutorInfo {
	name := fmt.Sprintf("syslog-%s", offer.GetSlaveId().GetValue())
	id := fmt.Sprintf("%s-%s", name, uuid())

	uris := []*mesos.CommandInfo_URI{
		&mesos.CommandInfo_URI{
			Value:      proto.String(fmt.Sprintf("%s/resource/%s", Config.Api, Config.Executor)),
			Executable: proto.Bool(true),
		},
	}

	if Config.ProducerProperties != "" {
		uris = append(uris, &mesos.CommandInfo_URI{
			Value: proto.String(fmt.Sprintf("%s/resource/%s", Config.Api, Config.ProducerProperties)),
		})
	}

	command := fmt.Sprintf("./%s --log.level %s --tcp %d --udp %d --host %s", Config.Executor, Config.LogLevel, tcpPort, udpPort, offer.GetHostname())

	return &mesos.ExecutorInfo{
		ExecutorId: util.NewExecutorID(id),
		Name:       proto.String(name),
		Command: &mesos.CommandInfo{
			Value: proto.String(command),
			Uris:  uris,
		},
	}
}
예제 #15
0
파일: utils.go 프로젝트: ruo91/syscol
func offerString(offer *mesos.Offer) string {
	return fmt.Sprintf("\n%s%s %s %s", offer.GetHostname(), idString(offer.GetId().GetValue()), resourcesString(offer.GetResources()), attributesString(offer.GetAttributes()))
}
예제 #16
0
func (frn *FrameworkRiakNode) PrepareForLaunchAndGetNewTaskInfo(sc *SchedulerCore, offer *mesos.Offer, executorAsk []*mesos.Resource, taskAsk []*mesos.Resource) *mesos.TaskInfo {
	// THIS IS A MUTATING CALL
	if frn.CurrentState != process_state.Shutdown && frn.CurrentState != process_state.Failed && frn.CurrentState != process_state.Unknown {
		log.Panicf("Trying to generate Task Info while node is up. ZK FRN State: %v", frn.CurrentState)
	}
	frn.Generation = frn.Generation + 1
	frn.TaskStatus = nil
	frn.CurrentState = process_state.Starting
	frn.LastOfferUsed = offer

	executorUris := []*mesos.CommandInfo_URI{
		&mesos.CommandInfo_URI{
			Value:      &(sc.schedulerHTTPServer.hostURI),
			Executable: proto.Bool(true),
		},
	}
	//executorUris = append(executorUris,
	//	&mesos.CommandInfo_URI{Value: &(frn.frc.sc.schedulerHTTPServer.hostURI), Executable: proto.Bool(true)})

	exec := &mesos.ExecutorInfo{
		//No idea is this is the "right" way to do it, but I think so?
		ExecutorId: util.NewExecutorID(frn.ExecutorID()),
		Name:       proto.String("Executor (Go)"),
		Source:     proto.String("Riak Mesos Framework (Go)"),
		Command: &mesos.CommandInfo{
			Value:     proto.String(sc.schedulerHTTPServer.executorName),
			Uris:      executorUris,
			Shell:     proto.Bool(false),
			Arguments: []string{sc.schedulerHTTPServer.executorName, "-logtostderr=true", "-taskinfo", frn.CurrentID()},
		},
		Resources: executorAsk,
	}
	taskId := &mesos.TaskID{
		Value: proto.String(frn.CurrentID()),
	}

	nodename := frn.CurrentID() + "@" + offer.GetHostname()

	if !strings.Contains(offer.GetHostname(), ".") {
		nodename = nodename + "."
	}

	taskData := common.TaskData{
		FullyQualifiedNodeName:    nodename,
		RexFullyQualifiedNodeName: "rex-" + nodename,
		Zookeepers:                sc.zookeepers,
		NodeID:                    frn.UUID.String(),
		FrameworkName:             sc.frameworkName,
		URI:                       sc.schedulerHTTPServer.GetURI(),
		ClusterName:               frn.ClusterName,
	}
	frn.TaskData = taskData

	binTaskData, err := taskData.Serialize()

	if err != nil {
		log.Panic(err)
	}

	taskInfo := &mesos.TaskInfo{
		Name:      proto.String(frn.CurrentID()),
		TaskId:    taskId,
		SlaveId:   offer.SlaveId,
		Executor:  exec,
		Resources: taskAsk,
		Data:      binTaskData,
	}
	frn.LastTaskInfo = taskInfo

	return taskInfo
}