func (ct *ConsumerTask) NewTaskInfo(offer *mesos.Offer) *mesos.TaskInfo {
	taskName := fmt.Sprintf("consumer-%s", ct.ID)
	taskId := &mesos.TaskID{
		Value: proto.String(fmt.Sprintf("%s-%s", taskName, uuid())),
	}

	data, err := json.Marshal(ct.Config)
	if err != nil {
		panic(err)
	}

	taskInfo := &mesos.TaskInfo{
		Name:     proto.String(taskName),
		TaskId:   taskId,
		SlaveId:  offer.GetSlaveId(),
		Executor: ct.createExecutor(),
		Resources: []*mesos.Resource{
			util.NewScalarResource("cpus", ct.Cpu),
			util.NewScalarResource("mem", ct.Mem),
		},
		Data: data,
	}

	return taskInfo
}
Exemple #2
0
func (s *Scheduler) createExecutor(offer *mesos.Offer, tcpPort uint64, udpPort uint64) *mesos.ExecutorInfo {
	name := fmt.Sprintf("syslog-%s", offer.GetSlaveId().GetValue())
	id := fmt.Sprintf("%s-%s", name, uuid())

	uris := []*mesos.CommandInfo_URI{
		&mesos.CommandInfo_URI{
			Value:      proto.String(fmt.Sprintf("%s/resource/%s", Config.Api, Config.Executor)),
			Executable: proto.Bool(true),
		},
	}

	if Config.ProducerProperties != "" {
		uris = append(uris, &mesos.CommandInfo_URI{
			Value: proto.String(fmt.Sprintf("%s/resource/%s", Config.Api, Config.ProducerProperties)),
		})
	}

	command := fmt.Sprintf("./%s --log.level %s --tcp %d --udp %d --host %s", Config.Executor, Config.LogLevel, tcpPort, udpPort, offer.GetHostname())

	return &mesos.ExecutorInfo{
		ExecutorId: util.NewExecutorID(id),
		Name:       proto.String(name),
		Command: &mesos.CommandInfo{
			Value: proto.String(command),
			Uris:  uris,
		},
	}
}
Exemple #3
0
func createTaskInfo(task eremetic.Task, offer *mesosproto.Offer) (eremetic.Task, *mesosproto.TaskInfo) {
	task.FrameworkID = *offer.FrameworkId.Value
	task.SlaveID = *offer.SlaveId.Value
	task.Hostname = *offer.Hostname
	task.AgentIP = offer.GetUrl().GetAddress().GetIp()
	task.AgentPort = offer.GetUrl().GetAddress().GetPort()

	portMapping, portResources := buildPorts(task, offer)
	env := buildEnvironment(task, portMapping)

	taskInfo := &mesosproto.TaskInfo{
		TaskId:  &mesosproto.TaskID{Value: proto.String(task.ID)},
		SlaveId: offer.SlaveId,
		Name:    proto.String(task.Name),
		Command: buildCommandInfo(task, env),
		Container: &mesosproto.ContainerInfo{
			Type: mesosproto.ContainerInfo_DOCKER.Enum(),
			Docker: &mesosproto.ContainerInfo_DockerInfo{
				Image:          proto.String(task.Image),
				ForcePullImage: proto.Bool(task.ForcePullImage),
				PortMappings:   portMapping,
				Network:        mesosproto.ContainerInfo_DockerInfo_BRIDGE.Enum(),
			},
			Volumes: buildVolumes(task),
		},
		Resources: []*mesosproto.Resource{
			mesosutil.NewScalarResource("cpus", task.TaskCPUs),
			mesosutil.NewScalarResource("mem", task.TaskMem),
			mesosutil.NewRangesResource("ports", portResources),
		},
	}
	return task, taskInfo
}
Exemple #4
0
func (oc *OfferCache) Push(newOffer *mesos.Offer) bool {
	oc.mut.Lock()
	defer oc.mut.Unlock()
	if len(oc.offerSet) < oc.maxOffers {
		// Reject offers from existing slaves.
		for _, offer := range oc.offerSet {
			if offer.SlaveId.GetValue() == newOffer.SlaveId.GetValue() &&
				oc.singleInstancePerSlave {
				log.Info("Offer already exists for slave ", newOffer.SlaveId.GetValue())
				return false
			}
		}
		oc.offerSet[newOffer.GetId().GetValue()] = newOffer

		// Try to add offer to the queue, clearing out invalid
		// offers in order to make room if necessary.
		for i := 0; i < 2; i++ {
			select {
			case oc.offerQueue <- newOffer:
				return true
			default:
				oc.gc()
			}
		}
	}
	log.Info("We already have enough offers cached.")
	return false
}
Exemple #5
0
func (s *Scheduler) launchTask(driver scheduler.SchedulerDriver, offer *mesos.Offer) {
	taskName := fmt.Sprintf("syscol-%s", offer.GetSlaveId().GetValue())
	taskId := &mesos.TaskID{
		Value: proto.String(fmt.Sprintf("%s-%s", taskName, uuid())),
	}

	data, err := json.Marshal(Config)
	if err != nil {
		panic(err) //shouldn't happen
	}
	Logger.Debugf("Task data: %s", string(data))

	task := &mesos.TaskInfo{
		Name:     proto.String(taskName),
		TaskId:   taskId,
		SlaveId:  offer.GetSlaveId(),
		Executor: s.createExecutor(offer.GetSlaveId().GetValue()),
		Resources: []*mesos.Resource{
			util.NewScalarResource("cpus", Config.Cpus),
			util.NewScalarResource("mem", Config.Mem),
		},
		Data: data,
	}

	s.cluster.Add(offer.GetSlaveId().GetValue(), task)

	driver.LaunchTasks([]*mesos.OfferID{offer.GetId()}, []*mesos.TaskInfo{task}, &mesos.Filters{RefuseSeconds: proto.Float64(1)})
}
Exemple #6
0
func (t *T) AcceptOffer(offer *mesos.Offer) bool {
	if offer == nil {
		return false
	}

	// if the user has specified a target host, make sure this offer is for that host
	if t.Pod.Spec.NodeName != "" && offer.GetHostname() != t.Pod.Spec.NodeName {
		return false
	}

	// check the NodeSelector
	if len(t.Pod.Spec.NodeSelector) > 0 {
		slaveLabels := map[string]string{}
		for _, a := range offer.Attributes {
			if a.GetType() == mesos.Value_TEXT {
				slaveLabels[a.GetName()] = a.GetText().GetValue()
			}
		}
		selector := labels.SelectorFromSet(t.Pod.Spec.NodeSelector)
		if !selector.Matches(labels.Set(slaveLabels)) {
			return false
		}
	}

	// check ports
	if _, err := t.mapper.Generate(t, offer); err != nil {
		log.V(3).Info(err)
		return false
	}

	// find offered cpu and mem
	var (
		offeredCpus mresource.CPUShares
		offeredMem  mresource.MegaBytes
	)
	for _, resource := range offer.Resources {
		if resource.GetName() == "cpus" {
			offeredCpus = mresource.CPUShares(*resource.GetScalar().Value)
		}

		if resource.GetName() == "mem" {
			offeredMem = mresource.MegaBytes(*resource.GetScalar().Value)
		}
	}

	// calculate cpu and mem sum over all containers of the pod
	// TODO (@sttts): also support pod.spec.resources.limit.request
	// TODO (@sttts): take into account the executor resources
	cpu := mresource.PodCPULimit(&t.Pod)
	mem := mresource.PodMemLimit(&t.Pod)
	log.V(4).Infof("trying to match offer with pod %v/%v: cpus: %.2f mem: %.2f MB", t.Pod.Namespace, t.Pod.Name, cpu, mem)
	if (cpu > offeredCpus) || (mem > offeredMem) {
		log.V(3).Infof("not enough resources for pod %v/%v: cpus: %.2f mem: %.2f MB", t.Pod.Namespace, t.Pod.Name, cpu, mem)
		return false
	}

	return true
}
func (s *Scheduler) launchTask(task Task, offer *mesos.Offer) {
	taskInfo := task.NewTaskInfo(offer)
	task.Data().State = TaskStateStaging
	task.Data().Attributes = utils.OfferAttributes(offer)
	task.Data().ExecutorID = taskInfo.GetExecutor().GetExecutorId().GetValue()
	task.Data().SlaveID = taskInfo.GetSlaveId().GetValue()
	task.Data().TaskID = taskInfo.GetTaskId().GetValue()

	s.driver.LaunchTasks([]*mesos.OfferID{offer.GetId()}, []*mesos.TaskInfo{taskInfo}, &mesos.Filters{RefuseSeconds: proto.Float64(10)})
}
func (ctx *RunOnceApplicationContext) LaunchTask(driver scheduler.SchedulerDriver, offer *mesos.Offer) error {
	ctx.lock.Lock()
	defer ctx.lock.Unlock()

	ctx.InstancesLeftToRun--
	taskInfo := ctx.newTaskInfo(offer)
	ctx.tasks = append(ctx.tasks, newRunOnceTask(offer, taskInfo.GetTaskId().GetValue()))

	_, err := driver.LaunchTasks([]*mesos.OfferID{offer.GetId()}, []*mesos.TaskInfo{taskInfo}, &mesos.Filters{RefuseSeconds: proto.Float64(10)})
	return err
}
Exemple #9
0
func (s *Scheduler) acceptOffer(driver scheduler.SchedulerDriver, offer *mesos.Offer) string {
	if s.cluster.Exists(offer.GetSlaveId().GetValue()) {
		return fmt.Sprintf("Server on slave %s is already running.", offer.GetSlaveId().GetValue())
	} else {
		declineReason := s.match(offer)
		if declineReason == "" {
			s.launchTask(driver, offer)
		}
		return declineReason
	}
}
// WildcardMapper maps k8s wildcard ports (hostPort == 0) to any available offer port
func WildcardMapper(t *T, offer *mesos.Offer) ([]HostPortMapping, error) {
	mapping, err := FixedMapper(t, offer)
	if err != nil {
		return nil, err
	}

	taken := make(map[uint64]struct{})
	for _, entry := range mapping {
		taken[entry.OfferPort] = struct{}{}
	}

	wildports := []HostPortMapping{}
	for i, container := range t.Pod.Spec.Containers {
		for pi, port := range container.Ports {
			if port.HostPort == 0 {
				wildports = append(wildports, HostPortMapping{
					ContainerIdx: i,
					PortIdx:      pi,
				})
			}
		}
	}

	remaining := len(wildports)
	foreachPortsRange(offer.GetResources(), t.Roles(), func(bp, ep uint64, role string) {
		log.V(3).Infof("Searching for wildcard port in range {%d:%d}", bp, ep)
		for i := range wildports {
			if wildports[i].OfferPort != 0 {
				continue
			}
			for port := bp; port <= ep && remaining > 0; port++ {
				if _, inuse := taken[port]; inuse {
					continue
				}
				wildports[i].OfferPort = port
				wildports[i].Role = starredRole(role)
				mapping = append(mapping, wildports[i])
				remaining--
				taken[port] = struct{}{}
				break
			}
		}
	})

	if remaining > 0 {
		err := &PortAllocationError{
			PodId: t.Pod.Name,
		}
		// it doesn't make sense to include a port list here because they were all zero (wildcards)
		return nil, err
	}

	return mapping, nil
}
Exemple #11
0
func (c *EqualsConstraint) Match(offer *mesos.Offer) bool {
	for _, a := range offer.GetAttributes() {
		if c.Attribute == a.GetName() {
			if a.GetType() == mesos.Value_TEXT {
				return c.Value == a.GetText().GetValue()
			} else if a.GetType() == mesos.Value_SCALAR {
				return c.Value == fmt.Sprintf("%.f", a.GetScalar().GetValue())
			} else {
				return false
			}
		}
	}
	return false
}
Exemple #12
0
// Fill the Spec in the T, should be called during k8s scheduling, before binding.
func (t *T) FillFromDetails(details *mesos.Offer) error {
	if details == nil {
		//programming error
		panic("offer details are nil")
	}

	// compute used resources
	cpu := mresource.PodCPULimit(&t.Pod)
	mem := mresource.PodMemLimit(&t.Pod)
	log.V(3).Infof("Recording offer(s) %s/%s against pod %v: cpu: %.2f, mem: %.2f MB", details.Id, t.Pod.Namespace, t.Pod.Name, cpu, mem)

	t.Spec = Spec{
		SlaveID: details.GetSlaveId().GetValue(),
		CPU:     cpu,
		Memory:  mem,
	}

	// fill in port mapping
	if mapping, err := t.mapper.Generate(t, details); err != nil {
		t.Reset()
		return err
	} else {
		ports := []uint64{}
		for _, entry := range mapping {
			ports = append(ports, entry.OfferPort)
		}
		t.Spec.PortMap = mapping
		t.Spec.Ports = ports
	}

	// hostname needs of the executor needs to match that of the offer, otherwise
	// the kubelet node status checker/updater is very unhappy
	const HOSTNAME_OVERRIDE_FLAG = "--hostname-override="
	hostname := details.GetHostname() // required field, non-empty
	hostnameOverride := HOSTNAME_OVERRIDE_FLAG + hostname

	argv := t.executor.Command.Arguments
	overwrite := false
	for i, arg := range argv {
		if strings.HasPrefix(arg, HOSTNAME_OVERRIDE_FLAG) {
			overwrite = true
			argv[i] = hostnameOverride
			break
		}
	}
	if !overwrite {
		t.executor.Command.Arguments = append(argv, hostnameOverride)
	}
	return nil
}
Exemple #13
0
func OfferAttributes(offer *mesos.Offer) map[string]string {
	offerAttributes := map[string]string{
		"hostname": offer.GetHostname(),
	}

	for _, attribute := range offer.GetAttributes() {
		text := attribute.GetText().GetValue()
		if text != "" {
			offerAttributes[attribute.GetName()] = text
		}
	}

	return offerAttributes
}
Exemple #14
0
func (s *Scheduler) launchTask(driver scheduler.SchedulerDriver, offer *mesos.Offer) {
	taskName := fmt.Sprintf("syslog-%s", offer.GetSlaveId().GetValue())
	taskId := &mesos.TaskID{
		Value: proto.String(fmt.Sprintf("%s-%s", taskName, uuid())),
	}

	data, err := json.Marshal(Config)
	if err != nil {
		panic(err) //shouldn't happen
	}
	Logger.Debugf("Task data: %s", string(data))

	tcpPort := uint64(s.getPort(Config.TcpPort, offer, -1))
	udpPort := uint64(s.getPort(Config.UdpPort, offer, int(tcpPort)))

	task := &mesos.TaskInfo{
		Name:     proto.String(taskName),
		TaskId:   taskId,
		SlaveId:  offer.GetSlaveId(),
		Executor: s.createExecutor(offer, tcpPort, udpPort),
		Resources: []*mesos.Resource{
			util.NewScalarResource("cpus", Config.Cpus),
			util.NewScalarResource("mem", Config.Mem),
			util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(tcpPort, tcpPort)}),
			util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(udpPort, udpPort)}),
		},
		Data:   data,
		Labels: utils.StringToLabels(s.labels),
	}

	s.cluster.Add(offer.GetSlaveId().GetValue(), task)

	driver.LaunchTasks([]*mesos.OfferID{offer.GetId()}, []*mesos.TaskInfo{task}, &mesos.Filters{RefuseSeconds: proto.Float64(1)})
}
// FixedMapper maps k8s host ports to offered ports ignoring hostPorts == 0 (remaining pod-private)
func FixedMapper(t *T, offer *mesos.Offer) ([]HostPortMapping, error) {
	requiredPorts := make(map[uint64]HostPortMapping)
	mapping := []HostPortMapping{}
	for i, container := range t.Pod.Spec.Containers {
		// strip all port==0 from this array; k8s already knows what to do with zero-
		// ports (it does not create 'port bindings' on the minion-host); we need to
		// remove the wildcards from this array since they don't consume host resources
		for pi, port := range container.Ports {
			if port.HostPort == 0 {
				continue // ignore
			}
			m := HostPortMapping{
				ContainerIdx: i,
				PortIdx:      pi,
				OfferPort:    uint64(port.HostPort),
			}
			if entry, inuse := requiredPorts[uint64(port.HostPort)]; inuse {
				return nil, &DuplicateHostPortError{entry, m}
			}
			requiredPorts[uint64(port.HostPort)] = m
		}
	}

	foreachPortsRange(offer.GetResources(), t.Roles(), func(bp, ep uint64, role string) {
		for port := range requiredPorts {
			log.V(3).Infof("evaluating port range {%d:%d} %d", bp, ep, port)
			if (bp <= port) && (port <= ep) {
				m := requiredPorts[port]
				m.Role = starredRole(role)
				mapping = append(mapping, m)
				delete(requiredPorts, port)
			}
		}
	})

	unsatisfiedPorts := len(requiredPorts)
	if unsatisfiedPorts > 0 {
		err := &PortAllocationError{
			PodId: t.Pod.Name,
		}
		for p := range requiredPorts {
			err.Ports = append(err.Ports, p)
		}
		return nil, err
	}

	return mapping, nil
}
Exemple #16
0
func NodeSelectorPredicate(t *T, offer *mesos.Offer, n *api.Node) bool {
	// if the user has specified a target host, make sure this offer is for that host
	if t.Pod.Spec.NodeName != "" && offer.GetHostname() != t.Pod.Spec.NodeName {
		return false
	}

	// check the NodeSelector
	if len(t.Pod.Spec.NodeSelector) > 0 {
		if n.Labels == nil {
			return false
		}
		selector := labels.SelectorFromSet(t.Pod.Spec.NodeSelector)
		if !selector.Matches(labels.Set(n.Labels)) {
			return false
		}
	}
	return true
}
Exemple #17
0
func NodeSelectorPredicate(t *T, offer *mesos.Offer) bool {
	// if the user has specified a target host, make sure this offer is for that host
	if t.Pod.Spec.NodeName != "" && offer.GetHostname() != t.Pod.Spec.NodeName {
		return false
	}

	// check the NodeSelector
	if len(t.Pod.Spec.NodeSelector) > 0 {
		slaveLabels := map[string]string{}
		for _, a := range offer.Attributes {
			if a.GetType() == mesos.Value_TEXT {
				slaveLabels[a.GetName()] = a.GetText().GetValue()
			}
		}
		selector := labels.SelectorFromSet(t.Pod.Spec.NodeSelector)
		if !selector.Matches(labels.Set(slaveLabels)) {
			return false
		}
	}
	return true
}
// NodeProcurement updates t.Spec in preparation for the task to be launched on the
// slave associated with the offer.
func NodeProcurement(t *T, offer *mesos.Offer) error {
	t.Spec.SlaveID = offer.GetSlaveId().GetValue()
	t.Spec.AssignedSlave = offer.GetHostname()

	// hostname needs of the executor needs to match that of the offer, otherwise
	// the kubelet node status checker/updater is very unhappy
	setCommandArgument(t.executor, "--hostname-override", offer.GetHostname(), true)

	return nil
}
Exemple #19
0
func Offer(offer *mesos.Offer) string {
	var buffer bytes.Buffer

	buffer.WriteString(offer.GetHostname())
	buffer.WriteString(ID(offer.GetId().GetValue()))
	resources := Resources(offer.GetResources())
	if resources != "" {
		buffer.WriteString(" ")
		buffer.WriteString(resources)
	}
	attributes := Attributes(offer.GetAttributes())
	if attributes != "" {
		buffer.WriteString(" ")
		buffer.WriteString(attributes)
	}

	return buffer.String()
}
func (ctx *RunOnceApplicationContext) newTaskInfo(offer *mesos.Offer) *mesos.TaskInfo {
	taskName := fmt.Sprintf("%s.%s", ctx.Application.ID, offer.GetHostname())
	taskID := util.NewTaskID(fmt.Sprintf("%s|%s|%s", ctx.Application.ID, offer.GetHostname(), framework.UUID()))

	var URIs []*mesos.CommandInfo_URI
	if len(ctx.Application.ArtifactURLs) > 0 || len(ctx.Application.AdditionalArtifacts) > 0 {
		URIs = make([]*mesos.CommandInfo_URI, 0)
		for _, uri := range ctx.Application.ArtifactURLs {
			URIs = append(URIs, &mesos.CommandInfo_URI{
				Value:   proto.String(uri),
				Extract: proto.Bool(true),
			})
		}
		for _, uri := range ctx.Application.AdditionalArtifacts {
			URIs = append(URIs, &mesos.CommandInfo_URI{
				Value:   proto.String(uri),
				Extract: proto.Bool(true),
			})
		}
	}

	return &mesos.TaskInfo{
		Name:    proto.String(taskName),
		TaskId:  taskID,
		SlaveId: offer.GetSlaveId(),
		Resources: []*mesos.Resource{
			util.NewScalarResource("cpus", ctx.Application.Cpu),
			util.NewScalarResource("mem", ctx.Application.Mem),
		},
		Command: &mesos.CommandInfo{
			Shell: proto.Bool(true),
			Value: proto.String(ctx.Application.LaunchCommand),
			Uris:  URIs,
		},
	}
}
Exemple #21
0
// NodeProcurement updates t.Spec in preparation for the task to be launched on the
// slave associated with the offer.
func NodeProcurement(t *T, offer *mesos.Offer) error {
	t.Spec.SlaveID = offer.GetSlaveId().GetValue()
	t.Spec.AssignedSlave = offer.GetHostname()

	// hostname needs of the executor needs to match that of the offer, otherwise
	// the kubelet node status checker/updater is very unhappy
	const HOSTNAME_OVERRIDE_FLAG = "--hostname-override="
	hostname := offer.GetHostname() // required field, non-empty
	hostnameOverride := HOSTNAME_OVERRIDE_FLAG + hostname

	argv := t.executor.Command.Arguments
	overwrite := false
	for i, arg := range argv {
		if strings.HasPrefix(arg, HOSTNAME_OVERRIDE_FLAG) {
			overwrite = true
			argv[i] = hostnameOverride
			break
		}
	}
	if !overwrite {
		t.executor.Command.Arguments = append(argv, hostnameOverride)
	}
	return nil
}
// NodeProcurement updates t.Spec in preparation for the task to be launched on the
// slave associated with the offer.
func NodeProcurement(t *T, offer *mesos.Offer) error {
	t.Spec.SlaveID = offer.GetSlaveId().GetValue()
	t.Spec.AssignedSlave = offer.GetHostname()
	return nil
}
Exemple #23
0
// TODO(tyler) split this long function up!
func (s *EtcdScheduler) launchOne(driver scheduler.SchedulerDriver) {
	// Always ensure we've pruned any dead / unmanaged nodes before
	// launching new ones, or we may overconfigure the ensemble such
	// that it can not make progress if the next launch fails.
	err := s.Prune()
	if err != nil {
		log.Errorf("Failed to remove stale cluster members: %s", err)
		return
	}

	if !s.shouldLaunch(driver) {
		log.Infoln("Skipping launch attempt for now.")
		return
	}

	// validOffer filters out offers that are no longer
	// desirable, even though they may have been when
	// they were enqueued.
	validOffer := func(offer *mesos.Offer) bool {
		runningCopy := s.RunningCopy()
		for _, etcdConfig := range runningCopy {
			if etcdConfig.SlaveID == offer.SlaveId.GetValue() {
				if s.singleInstancePerSlave {
					log.Info("Skipping offer: already running on this slave.")
					return false
				}
			}
		}
		return true
	}

	// Issue BlockingPop until we get back an offer we can use.
	var offer *mesos.Offer
	for {
		offer = s.offerCache.BlockingPop()
		if validOffer(offer) {
			break
		} else {
			s.decline(driver, offer)
		}
	}

	// Do this again because BlockingPop may have taken a long time.
	if !s.shouldLaunch(driver) {
		log.Infoln("Skipping launch attempt for now.")
		s.decline(driver, offer)
		return
	}

	// TODO(tyler) this is a broken hack
	resources := parseOffer(offer)
	lowest := *resources.ports[0].Begin
	rpcPort := lowest
	clientPort := lowest + 1
	httpPort := lowest + 2

	s.mut.Lock()
	var clusterType string
	if len(s.running) == 0 {
		clusterType = "new"
	} else {
		clusterType = "existing"
	}

	s.highestInstanceID++
	name := "etcd-" + strconv.FormatInt(s.highestInstanceID, 10)

	node := &config.Node{
		Name:       name,
		Host:       *offer.Hostname,
		RPCPort:    rpcPort,
		ClientPort: clientPort,
		ReseedPort: httpPort,
		Type:       clusterType,
		SlaveID:    offer.GetSlaveId().GetValue(),
	}
	running := []*config.Node{node}
	for _, r := range s.running {
		running = append(running, r)
	}
	serializedNodes, err := json.Marshal(running)
	log.Infof("Serialized running: %+v", string(serializedNodes))
	if err != nil {
		log.Errorf("Could not serialize running list: %v", err)
		// This Unlock is not deferred because the test implementation of LaunchTasks
		// calls this scheduler's StatusUpdate method, causing the test to deadlock.
		s.decline(driver, offer)
		s.mut.Unlock()
		return
	}

	configSummary := node.String()
	taskID := &mesos.TaskID{Value: &configSummary}
	executor := s.newExecutorInfo(node, s.executorUris)
	task := &mesos.TaskInfo{
		Data:     serializedNodes,
		Name:     proto.String("etcd-server"),
		TaskId:   taskID,
		SlaveId:  offer.SlaveId,
		Executor: executor,
		Resources: []*mesos.Resource{
			util.NewScalarResource("cpus", s.cpusPerTask),
			util.NewScalarResource("mem", s.memPerTask),
			util.NewScalarResource("disk", s.diskPerTask),
			util.NewRangesResource("ports", []*mesos.Value_Range{
				util.NewValueRange(uint64(rpcPort), uint64(httpPort)),
			}),
		},
		Discovery: &mesos.DiscoveryInfo{
			Visibility: mesos.DiscoveryInfo_EXTERNAL.Enum(),
			Name:       proto.String("etcd-server"),
			Ports: &mesos.Ports{
				Ports: []*mesos.Port{
					&mesos.Port{
						Number:   proto.Uint32(uint32(rpcPort)),
						Protocol: proto.String("tcp"),
					},
					// HACK: "client" is not a real SRV protocol.  This is so
					// that we can have etcd proxies use srv discovery on the
					// above tcp name.  Mesos-dns does not yet care about
					// names for DiscoveryInfo.  When it does, we should
					// create a name for clients to use.  We want to keep
					// the rpcPort accessible at _etcd-server._tcp.<fwname>.mesos
					&mesos.Port{
						Number:   proto.Uint32(uint32(clientPort)),
						Protocol: proto.String("client"),
					},
				},
			},
		},
	}

	log.Infof(
		"Prepared task: %s with offer %s for launch",
		task.GetName(),
		offer.Id.GetValue(),
	)
	log.Info("Launching etcd node.")

	tasks := []*mesos.TaskInfo{task}

	s.pending[node.Name] = struct{}{}

	// This Unlock is not deferred because the test implementation of LaunchTasks
	// calls this scheduler's StatusUpdate method, causing the test to deadlock.
	s.mut.Unlock()

	atomic.AddUint32(&s.Stats.LaunchedServers, 1)
	driver.LaunchTasks(
		[]*mesos.OfferID{offer.Id},
		tasks,
		&mesos.Filters{
			RefuseSeconds: proto.Float64(1),
		},
	)
}
func (frn *FrameworkRiakNode) PrepareForLaunchAndGetNewTaskInfo(sc *SchedulerCore, offer *mesos.Offer, executorAsk []*mesos.Resource, taskAsk []*mesos.Resource) *mesos.TaskInfo {
	// THIS IS A MUTATING CALL
	if frn.CurrentState != process_state.Shutdown && frn.CurrentState != process_state.Failed && frn.CurrentState != process_state.Unknown {
		log.Panicf("Trying to generate Task Info while node is up. ZK FRN State: %v", frn.CurrentState)
	}
	frn.Generation = frn.Generation + 1
	frn.TaskStatus = nil
	frn.CurrentState = process_state.Starting
	frn.LastOfferUsed = offer

	executorUris := []*mesos.CommandInfo_URI{
		&mesos.CommandInfo_URI{
			Value:      &(sc.schedulerHTTPServer.hostURI),
			Executable: proto.Bool(true),
		},
	}
	//executorUris = append(executorUris,
	//	&mesos.CommandInfo_URI{Value: &(frn.frc.sc.schedulerHTTPServer.hostURI), Executable: proto.Bool(true)})

	exec := &mesos.ExecutorInfo{
		//No idea is this is the "right" way to do it, but I think so?
		ExecutorId: util.NewExecutorID(frn.ExecutorID()),
		Name:       proto.String("Executor (Go)"),
		Source:     proto.String("Riak Mesos Framework (Go)"),
		Command: &mesos.CommandInfo{
			Value:     proto.String(sc.schedulerHTTPServer.executorName),
			Uris:      executorUris,
			Shell:     proto.Bool(false),
			Arguments: []string{sc.schedulerHTTPServer.executorName, "-logtostderr=true", "-taskinfo", frn.CurrentID()},
		},
		Resources: executorAsk,
	}
	taskId := &mesos.TaskID{
		Value: proto.String(frn.CurrentID()),
	}

	nodename := frn.CurrentID() + "@" + offer.GetHostname()

	if !strings.Contains(offer.GetHostname(), ".") {
		nodename = nodename + "."
	}

	taskData := common.TaskData{
		FullyQualifiedNodeName:    nodename,
		RexFullyQualifiedNodeName: "rex-" + nodename,
		Zookeepers:                sc.zookeepers,
		NodeID:                    frn.UUID.String(),
		FrameworkName:             sc.frameworkName,
		URI:                       sc.schedulerHTTPServer.GetURI(),
		ClusterName:               frn.ClusterName,
	}
	frn.TaskData = taskData

	binTaskData, err := taskData.Serialize()

	if err != nil {
		log.Panic(err)
	}

	taskInfo := &mesos.TaskInfo{
		Name:      proto.String(frn.CurrentID()),
		TaskId:    taskId,
		SlaveId:   offer.SlaveId,
		Executor:  exec,
		Resources: taskAsk,
		Data:      binTaskData,
	}
	frn.LastTaskInfo = taskInfo

	return taskInfo
}
Exemple #25
0
func offerString(offer *mesos.Offer) string {
	return fmt.Sprintf("\n%s%s %s %s", offer.GetHostname(), idString(offer.GetId().GetValue()), resourcesString(offer.GetResources()), attributesString(offer.GetAttributes()))
}