Exemplo n.º 1
0
func prepareExecutorInfo(id string) *mesos.ExecutorInfo {
	executorUris := []*mesos.CommandInfo_URI{}
	executorUris = append(executorUris, &mesos.CommandInfo_URI{Value: execUri, Executable: proto.Bool(true)})

	// forward the value of the scheduler's -v flag to the executor
	v := 0
	if f := flag.Lookup("v"); f != nil && f.Value != nil {
		if vstr := f.Value.String(); vstr != "" {
			if vi, err := strconv.ParseInt(vstr, 10, 32); err == nil {
				v = int(vi)
			}
		}
	}
	executorCommand := fmt.Sprintf("./%s -logtostderr=true -v=%d", execCmd, v)

	go http.ListenAndServe(fmt.Sprintf("%s:%d", *address, *artifactPort), nil)
	log.V(2).Info("Serving executor artifacts...")

	// Create mesos scheduler driver.
	return &mesos.ExecutorInfo{
		ExecutorId: util.NewExecutorID(id),
		Name:       proto.String("Test Executor (Go)"),
		Source:     proto.String("go_test"),
		Command: &mesos.CommandInfo{
			Value: proto.String(executorCommand),
			Uris:  executorUris,
		},
	}
}
Exemplo n.º 2
0
func prepareExecutorInfo(gt net.Addr) *mesos.ExecutorInfo {
	executorUris := []*mesos.CommandInfo_URI{}
	uri := serveSelf()
	executorUris = append(executorUris, &mesos.CommandInfo_URI{Value: uri, Executable: proto.Bool(true)})

	// forward the value of the scheduler's -v flag to the executor
	v := 0
	if f := flag.Lookup("v"); f != nil && f.Value != nil {
		if vstr := f.Value.String(); vstr != "" {
			if vi, err := strconv.ParseInt(vstr, 10, 32); err == nil {
				v = int(vi)
			}
		}
	}
	nodeCommand := fmt.Sprintf("./executor -logtostderr=true -v=%d -node -tracerAddr %s", v, gt.String())
	log.V(2).Info("nodeCommand: ", nodeCommand)

	// Create mesos scheduler driver.
	return &mesos.ExecutorInfo{
		ExecutorId: util.NewExecutorID("default"),
		Name:       proto.String("visghs-node"),
		Source:     proto.String("visghs"),
		Command: &mesos.CommandInfo{
			Value: proto.String(nodeCommand),
			Uris:  executorUris,
		},
		Resources: []*mesos.Resource{
			util.NewScalarResource("cpus", CPUS_PER_EXECUTOR),
			util.NewScalarResource("mem", MEM_PER_EXECUTOR),
		},
	}
}
Exemplo n.º 3
0
func (s *Scheduler) createExecutor(offer *mesos.Offer, tcpPort uint64, udpPort uint64) *mesos.ExecutorInfo {
	name := fmt.Sprintf("syslog-%s", offer.GetSlaveId().GetValue())
	id := fmt.Sprintf("%s-%s", name, uuid())

	uris := []*mesos.CommandInfo_URI{
		&mesos.CommandInfo_URI{
			Value:      proto.String(fmt.Sprintf("%s/resource/%s", Config.Api, Config.Executor)),
			Executable: proto.Bool(true),
		},
	}

	if Config.ProducerProperties != "" {
		uris = append(uris, &mesos.CommandInfo_URI{
			Value: proto.String(fmt.Sprintf("%s/resource/%s", Config.Api, Config.ProducerProperties)),
		})
	}

	command := fmt.Sprintf("./%s --log.level %s --tcp %d --udp %d --host %s", Config.Executor, Config.LogLevel, tcpPort, udpPort, offer.GetHostname())

	return &mesos.ExecutorInfo{
		ExecutorId: util.NewExecutorID(id),
		Name:       proto.String(name),
		Command: &mesos.CommandInfo{
			Value: proto.String(command),
			Uris:  uris,
		},
	}
}
Exemplo n.º 4
0
func (s *Scheduler) createExecutor(hostname string) *mesos.ExecutorInfo {
	id := fmt.Sprintf("statsd-kafka-%s", hostname)

	uris := []*mesos.CommandInfo_URI{
		&mesos.CommandInfo_URI{
			Value:      proto.String(fmt.Sprintf("%s/resource/%s", Config.Api, Config.Executor)),
			Executable: proto.Bool(true),
		},
	}

	if Config.ProducerProperties != "" {
		uris = append(uris, &mesos.CommandInfo_URI{
			Value: proto.String(fmt.Sprintf("%s/resource/%s", Config.Api, Config.ProducerProperties)),
		})
	}

	return &mesos.ExecutorInfo{
		ExecutorId: util.NewExecutorID(id),
		Name:       proto.String(id),
		Command: &mesos.CommandInfo{
			Value: proto.String(fmt.Sprintf("./%s --log.level %s --host %s", Config.Executor, Config.LogLevel, hostname)),
			Uris:  uris,
		},
	}
}
Exemplo n.º 5
0
func (this *ElodinaTransportScheduler) createExecutor(instanceId int, port uint64) *mesos.ExecutorInfo {
	return &mesos.ExecutorInfo{
		ExecutorId: util.NewExecutorID(fmt.Sprintf("elodina-mirror-%d", instanceId)),
		Name:       proto.String("Elodina Mirror Executor"),
		Source:     proto.String("Elodina"),
		Command: &mesos.CommandInfo{
			Value: proto.String(fmt.Sprintf("./%s --port %d --ssl.cert %s --ssl.key %s --ssl.cacert %s --api.key %s --api.user %s --target.url %s --insecure %v",
				this.config.ExecutorBinaryName, port, this.config.SSLCertFilePath, this.config.SSLKeyFilePath, this.config.SSLCACertFilePath, this.config.ApiKey, this.config.ApiUser, this.config.TargetURL, this.config.Insecure)),
			Uris: []*mesos.CommandInfo_URI{&mesos.CommandInfo_URI{
				Value:      proto.String(fmt.Sprintf("http://%s:%d/resource/%s", this.config.ServiceHost, this.config.ServicePort, this.config.ExecutorBinaryName)),
				Executable: proto.Bool(true),
			},
				&mesos.CommandInfo_URI{
					Value:      proto.String(fmt.Sprintf("http://%s:%d/resource/%s", this.config.ServiceHost, this.config.ServicePort, this.config.SSLCertFilePath)),
					Executable: proto.Bool(false),
					Extract:    proto.Bool(false),
				},
				&mesos.CommandInfo_URI{
					Value:      proto.String(fmt.Sprintf("http://%s:%d/resource/%s", this.config.ServiceHost, this.config.ServicePort, this.config.SSLKeyFilePath)),
					Executable: proto.Bool(false),
					Extract:    proto.Bool(false),
				},
				&mesos.CommandInfo_URI{
					Value:      proto.String(fmt.Sprintf("http://%s:%d/resource/%s", this.config.ServiceHost, this.config.ServicePort, this.config.SSLCACertFilePath)),
					Executable: proto.Bool(false),
					Extract:    proto.Bool(false),
				}},
		},
	}
}
func (suite *SchedulerTestSuite) TestSchdulerDriverSendFrameworkMessage() {
	messenger := messenger.NewMockedMessenger()
	messenger.On("Start").Return(nil)
	messenger.On("UPID").Return(&upid.UPID{})
	messenger.On("Send").Return(nil)
	messenger.On("Stop").Return(nil)
	messenger.On("Route").Return(nil)

	driver, err := newTestSchedulerDriver(NewMockScheduler(), suite.framework, suite.master, nil)
	driver.messenger = messenger
	suite.NoError(err)
	suite.True(driver.Stopped())

	driver.Start()
	driver.setConnected(true) // simulated
	suite.Equal(mesos.Status_DRIVER_RUNNING, driver.Status())

	stat, err := driver.SendFrameworkMessage(
		util.NewExecutorID("test-exec-001"),
		util.NewSlaveID("test-slave-001"),
		"Hello!",
	)
	suite.NoError(err)
	suite.Equal(mesos.Status_DRIVER_RUNNING, stat)
}
func (ct *ConsumerTask) createExecutor() *mesos.ExecutorInfo {
	executor, err := ct.Config.GetString("executor")
	if err != nil || executor == "" {
		fmt.Println("Executor name required")
		return nil
	}
	id := fmt.Sprintf("consumer-%s", ct.ID)
	uris := []*mesos.CommandInfo_URI{
		&mesos.CommandInfo_URI{
			Value:      proto.String(fmt.Sprintf("%s/resource/%s", Config.Api, executor)),
			Executable: proto.Bool(true),
		},
	}

	paramNames := []string{"brokers", "topics", "partitions", "cassandra", "keyspace", "schema"}

	params := make([]string, 0)
	for _, name := range paramNames {
		params = append(params, ct.makeParam(name))
	}

	paramString := strings.Join(params, " ")

	Logger.Debugf("Launching executor with params %s", paramString)

	return &mesos.ExecutorInfo{
		ExecutorId: util.NewExecutorID(id),
		Name:       proto.String("kafka-consumer"),
		Command: &mesos.CommandInfo{
			Value: proto.String(fmt.Sprintf("./%s --log.level %s --type %s %s", executor, Config.LogLevel, TaskTypeConsumer, paramString)),
			Uris:  uris,
		},
	}
}
func (mm *MirrorMakerTask) createExecutor() *mesos.ExecutorInfo {
	executor, err := mm.Config.GetString("executor")
	if err != nil || executor == "" {
		fmt.Println("Executor name required")
		return nil
	}

	id := fmt.Sprintf("mirrormaker-%s", mm.ID)
	uris := []*mesos.CommandInfo_URI{
		&mesos.CommandInfo_URI{
			Value:      proto.String(fmt.Sprintf("%s/resource/%s", Config.Api, executor)),
			Executable: proto.Bool(true),
		},
		toURI(mm.Config["producer.config"]),
	}

	for _, consumerConfig := range strings.Split(mm.Config["consumer.config"], ",") {
		uris = append(uris, toURI(consumerConfig))
	}

	return &mesos.ExecutorInfo{
		ExecutorId: util.NewExecutorID(id),
		Name:       proto.String(id),
		Command: &mesos.CommandInfo{
			Value: proto.String(fmt.Sprintf("./%s --log.level %s --type %s", executor, Config.LogLevel, TaskTypeMirrorMaker)),
			Uris:  uris,
		},
	}
}
Exemplo n.º 9
0
func prepareExecutorInfo() *mesos.ExecutorInfo {
	executorUris := []*mesos.CommandInfo_URI{}
	uri, executorCmd := serveExecutorArtifact(*executorPath)
	executorUris = append(executorUris, &mesos.CommandInfo_URI{Value: uri, Executable: proto.Bool(true)})

	// forward the value of the scheduler's -v flag to the executor
	v := 0
	if f := flag.Lookup("v"); f != nil && f.Value != nil {
		if vstr := f.Value.String(); vstr != "" {
			if vi, err := strconv.ParseInt(vstr, 10, 32); err == nil {
				v = int(vi)
			}
		}
	}
	executorCommand := fmt.Sprintf("./%s -logtostderr=true -v=%d -slow_tasks=%v", executorCmd, v, *slowTasks)

	go http.ListenAndServe(fmt.Sprintf("%s:%d", *address, *artifactPort), nil)
	log.V(2).Info("Serving executor artifacts...")

	// Create mesos scheduler driver.
	return &mesos.ExecutorInfo{
		ExecutorId: util.NewExecutorID("default"),
		Name:       proto.String("Test Executor (Go)"),
		Source:     proto.String("go_test"),
		Command: &mesos.CommandInfo{
			Value: proto.String(executorCommand),
			Uris:  executorUris,
		},
		Resources: []*mesos.Resource{
			util.NewScalarResource("cpus", CPUS_PER_EXECUTOR),
			util.NewScalarResource("mem", MEM_PER_EXECUTOR),
		},
	}
}
Exemplo n.º 10
0
// executorRefs returns a slice of known references to running executors known to this framework
func (k *framework) executorRefs() []executorRef {
	slaves := k.slaveHostNames.SlaveIDs()
	refs := make([]executorRef, 0, len(slaves))

	for _, slaveID := range slaves {
		hostname := k.slaveHostNames.HostName(slaveID)
		if hostname == "" {
			log.Warningf("hostname lookup for slaveID %q failed", slaveID)
			continue
		}

		node := k.lookupNode(hostname)
		if node == nil {
			log.Warningf("node lookup for slaveID %q failed", slaveID)
			continue
		}

		eid, ok := node.Annotations[meta.ExecutorIdKey]
		if !ok {
			log.Warningf("unable to find %q annotation for node %v", meta.ExecutorIdKey, node)
			continue
		}

		refs = append(refs, executorRef{
			executorID: mutil.NewExecutorID(eid),
			slaveID:    mutil.NewSlaveID(slaveID),
		})
	}

	return refs
}
Exemplo n.º 11
0
func prepareExecutorInfo() *mesos.ExecutorInfo {

	containerType := mesos.ContainerInfo_DOCKER
	containerNetwork := mesos.ContainerInfo_DockerInfo_HOST
	vcapDataVolumeMode := mesos.Volume_RW
	return &mesos.ExecutorInfo{
		ExecutorId: util.NewExecutorID("diego-executor"),
		Name:       proto.String("Diego Executor"),
		Source:     proto.String("diego-executor"),
		Container: &mesos.ContainerInfo{
			Type: &containerType,
			Volumes: []*mesos.Volume{
				&mesos.Volume{
					Mode:          &vcapDataVolumeMode,
					ContainerPath: proto.String("/var/vcap/data"),
					HostPath:      proto.String("data"),
				},
				&mesos.Volume{
					Mode:          &vcapDataVolumeMode,
					ContainerPath: proto.String("/var/vcap/sys/log"),
					HostPath:      proto.String("log"),
				},
				&mesos.Volume{
					Mode:          &vcapDataVolumeMode,
					ContainerPath: proto.String("/sys/fs/cgroup"),
					HostPath:      proto.String("/sys/fs/cgroup"),
				},
			},
			Docker: &mesos.ContainerInfo_DockerInfo{
				Image:          executorImage,
				Network:        &containerNetwork,
				Privileged:     proto.Bool(true),
				ForcePullImage: proto.Bool(true),
			},
		},
		Command: &mesos.CommandInfo{
			Environment: &mesos.Environment{
				Variables: []*mesos.Environment_Variable{
					&mesos.Environment_Variable{
						Name:  proto.String("CONSUL_SERVER"),
						Value: consulServer,
					},
					&mesos.Environment_Variable{
						Name:  proto.String("ETCD_URL"),
						Value: etcdUrl,
					},
				},
			},
			Shell:     proto.Bool(false),
			User:      proto.String("root"),
			Value:     proto.String("/executor"),
			Arguments: []string{"-logtostderr=true"},
		},
	}
}
Exemplo n.º 12
0
func startScheduler() (reflex *scheduler.ReflexScheduler, start func()) {
	exec := &mesos.ExecutorInfo{
		ExecutorId: util.NewExecutorID("default"),
		Name:       proto.String("BH executor (Go)"),
		Source:     proto.String("bh_test"),
		Command: &mesos.CommandInfo{
			Value: proto.String(""),
			Uris:  []*mesos.CommandInfo_URI{},
		},
	}

	fwinfo := &mesos.FrameworkInfo{
		User: proto.String(""),
		Name: proto.String("reflex"),
	}

	// skipping creds for now...
	// cred := (*mesos.Credential)(nil)
	// if *mesosAuthPrincipal != "" {
	// 	fwinfo.Principal = proto.String(*mesosAuthPrincipal)
	// 	secret, err := ioutil.ReadFile(*mesosAuthSecretFile)
	// 	if err != nil {
	// 		logrus.WithField("error", err).Fatal("failed reading secret file")
	// 	}
	// 	cred = &mesos.Credential{
	// 		Principal: proto.String(*mesosAuthPrincipal),
	// 		Secret:    secret,
	// 	}
	// }

	reflex = scheduler.NewScheduler(exec)

	config := sched.DriverConfig{
		Scheduler: reflex,
		Framework: fwinfo,
		Master:    "127.0.0.1:5050", // TODO: grab this from somewhere
		// Credential: cred,
	}

	start = func() {
		driver, err := sched.NewMesosSchedulerDriver(config)
		if err != nil {
			logrus.WithField("error", err).Fatal("unable to create a SchedulerDriver")
		}
		if stat, err := driver.Run(); err != nil {
			logrus.WithFields(logrus.Fields{
				"status": stat.String(),
				"error":  err,
			}).Info("framework stopped")
		}
	}

	return reflex, start
}
Exemplo n.º 13
0
func prepareExecutorInfo() *mesos.ExecutorInfo {
	// Create mesos scheduler driver.
	containerType := mesos.ContainerInfo_DOCKER
	return &mesos.ExecutorInfo{
		ExecutorId: util.NewExecutorID("default"),
		Name:       proto.String("mesos-runonce-executor"),
		Source:     proto.String("mesos-runonce-executor"),
		Container: &mesos.ContainerInfo{
			Type: &containerType,
		},
	}
}
Exemplo n.º 14
0
func (suite *SchedulerTestSuite) TestSchdulerDriverAcceptOffersWithError() {
	sched := mock_scheduler.New()
	sched.On("StatusUpdate").Return(nil)
	sched.On("Error").Return()

	msgr := mockedMessenger()
	driver := newTestDriver(suite.T(), driverConfigMessenger(sched, suite.framework, suite.master, nil, msgr))
	driver.OnDispatch(func(_ context.Context, _ *upid.UPID, _ proto.Message) error {
		return fmt.Errorf("Unable to send message")
	})

	go func() {
		driver.Run()
	}()
	<-driver.Started()
	driver.SetConnected(true) // simulated
	suite.True(driver.Running())

	// setup an offer
	offer := util.NewOffer(
		util.NewOfferID("test-offer-001"),
		suite.framework.Id,
		util.NewSlaveID("test-slave-001"),
		"test-slave(1)@localhost:5050",
	)

	pid, err := upid.Parse("test-slave(1)@localhost:5050")
	suite.NoError(err)
	driver.CacheOffer(offer, pid)

	// launch task
	task := util.NewTaskInfo(
		"simple-task",
		util.NewTaskID("simpe-task-1"),
		util.NewSlaveID("test-slave-001"),
		[]*mesos.Resource{util.NewScalarResourceWithReservation("mem", 400, "principal", "role")},
	)
	task.Command = util.NewCommandInfo("pwd")
	task.Executor = util.NewExecutorInfo(util.NewExecutorID("test-exec"), task.Command)
	tasks := []*mesos.TaskInfo{task}

	operations := []*mesos.Offer_Operation{util.NewLaunchOperation(tasks)}

	stat, err := driver.AcceptOffers(
		[]*mesos.OfferID{offer.Id},
		operations,
		&mesos.Filters{},
	)
	suite.Equal(mesos.Status_DRIVER_RUNNING, stat)
	suite.Error(err)
}
Exemplo n.º 15
0
func (suite *SchedulerTestSuite) TestSchdulerDriverSendFrameworkMessage() {
	driver := newTestDriver(suite.T(), driverConfigMessenger(mock_scheduler.New(), suite.framework, suite.master, nil, mockedMessenger()))

	driver.Start()
	driver.SetConnected(true) // simulated
	suite.Equal(mesos.Status_DRIVER_RUNNING, driver.Status())

	stat, err := driver.SendFrameworkMessage(
		util.NewExecutorID("test-exec-001"),
		util.NewSlaveID("test-slave-001"),
		"Hello!",
	)
	suite.NoError(err)
	suite.Equal(mesos.Status_DRIVER_RUNNING, stat)
}
Exemplo n.º 16
0
func (suite *SchedulerIntegrationTestSuite) TestSchedulerDriverFrameworkMessageEvent() {
	ok := suite.configureServerWithRegisteredFramework()
	suite.True(ok, "failed to establish running test server and driver")

	// Send a event to this SchedulerDriver (via http) to test handlers.	offer := util.NewOffer(
	pbMsg := &mesos.ExecutorToFrameworkMessage{
		SlaveId:     util.NewSlaveID("test-slave-001"),
		FrameworkId: suite.registeredFrameworkId,
		ExecutorId:  util.NewExecutorID("test-executor-001"),
		Data:        []byte("test-data-999"),
	}

	c := suite.newMockClient()
	c.SendMessage(suite.driver.UPID(), pbMsg)
	suite.sched.waitForCallback(0)
}
Exemplo n.º 17
0
func main() {
	var master = flag.String("master", "127.0.0.1:5050", "Master address <ip:port>")

	log.Infoln("Lancement PGAgentScheduler") //, time.Now())

	flag.Parse()

	config := scheduler.DriverConfig{
		Master: *master,
		Scheduler: forbin.NewDatabaseScheduler(
			&mesos.ExecutorInfo{
				ExecutorId: util.NewExecutorID("default"),
				Name:       proto.String("FBN"),
				Source:     proto.String("fb_test"),
				Command: &mesos.CommandInfo{
					Value: proto.String(CMD),
					Uris: []*mesos.CommandInfo_URI{
						&mesos.CommandInfo_URI{
							Value:   proto.String("http://localhost:8080/postgresql-bin.tar.gz"),
							Extract: proto.Bool(true),
						},
						&mesos.CommandInfo_URI{
							Value:   proto.String("http://localhost:8080/tools.tar.gz"),
							Extract: proto.Bool(true),
						},
					}},
			},
		),
		Framework: &mesos.FrameworkInfo{
			Name: proto.String(NAME),
			User: proto.String(""),
		},
	}

	driver, err := scheduler.NewMesosSchedulerDriver(config)

	if err != nil {
		log.Fatalln("Unable to create a SchedulerDriver ", err.Error())
	}

	if stat, err := driver.Run(); err != nil {
		log.Infoln("Framework stopped with status %s and error: %s\n", stat.String(), err.Error())
	}

}
Exemplo n.º 18
0
func prepareExecutorInfo(uri string, cmd string) *mesos.ExecutorInfo {
	executorUris := []*mesos.CommandInfo_URI{
		{
			Value:      &uri,
			Executable: proto.Bool(true),
		},
	}

	return &mesos.ExecutorInfo{
		ExecutorId: util.NewExecutorID("default"),
		Name:       proto.String("Test Executor (Go)"),
		Source:     proto.String("go_test"),
		Command: &mesos.CommandInfo{
			Value: proto.String(cmd),
			Uris:  executorUris,
		},
	}
}
Exemplo n.º 19
0
func (this *TransformScheduler) createExecutor(instanceId int32, port uint64) *mesos.ExecutorInfo {
	path := strings.Split(this.config.ExecutorArchiveName, "/")
	return &mesos.ExecutorInfo{
		ExecutorId: util.NewExecutorID(fmt.Sprintf("transform-%d", instanceId)),
		Name:       proto.String("LogLine Transform Executor"),
		Source:     proto.String("cisco"),
		Command: &mesos.CommandInfo{
			Value: proto.String(fmt.Sprintf("./%s --producer.config %s --topic %s --port %d --sync %t",
				this.config.ExecutorBinaryName, this.config.ProducerConfig, this.config.Topic, port, this.config.Sync)),
			Uris: []*mesos.CommandInfo_URI{&mesos.CommandInfo_URI{
				Value:   proto.String(fmt.Sprintf("http://%s:%d/resource/%s", this.config.ArtifactServerHost, this.config.ArtifactServerPort, path[len(path)-1])),
				Extract: proto.Bool(true),
			}, &mesos.CommandInfo_URI{
				Value: proto.String(fmt.Sprintf("http://%s:%d/resource/%s", this.config.ArtifactServerHost, this.config.ArtifactServerPort, this.config.ProducerConfig)),
			}},
		},
	}
}
Exemplo n.º 20
0
func (s *Scheduler) createExecutor(slave string) *mesos.ExecutorInfo {
	id := fmt.Sprintf("syscol-%s", slave)
	return &mesos.ExecutorInfo{
		ExecutorId: util.NewExecutorID(id),
		Name:       proto.String(id),
		Command: &mesos.CommandInfo{
			Value: proto.String(fmt.Sprintf("./%s --log.level %s", Config.Executor, Config.LogLevel)),
			Uris: []*mesos.CommandInfo_URI{
				&mesos.CommandInfo_URI{
					Value:      proto.String(fmt.Sprintf("%s/resource/%s", Config.Api, Config.Executor)),
					Executable: proto.Bool(true),
				},
				&mesos.CommandInfo_URI{
					Value: proto.String(fmt.Sprintf("%s/resource/%s", Config.Api, Config.ProducerProperties)),
				},
			},
		},
	}
}
Exemplo n.º 21
0
func TestExecutorDriverFrameworkToExecutorMessageEvent(t *testing.T) {
	setTestEnv(t)
	ch := make(chan bool, 2)
	// Mock Slave process to respond to registration event.
	server := testutil.NewMockSlaveHttpServer(t, func(rsp http.ResponseWriter, req *http.Request) {
		reqPath, err := url.QueryUnescape(req.URL.String())
		assert.NoError(t, err)
		log.Infoln("RCVD request", reqPath)
		rsp.WriteHeader(http.StatusAccepted)
	})

	defer server.Close()

	exec := newTestExecutor(t)
	exec.ch = ch
	exec.t = t

	// start
	driver := newIntegrationTestDriver(t, exec)
	stat, err := driver.Start()
	assert.NoError(t, err)
	assert.Equal(t, mesos.Status_DRIVER_RUNNING, stat)
	driver.setConnected(true)
	defer driver.Stop()

	// send runtask event to driver
	pbMsg := &mesos.FrameworkToExecutorMessage{
		SlaveId:     util.NewSlaveID(slaveID),
		ExecutorId:  util.NewExecutorID(executorID),
		FrameworkId: util.NewFrameworkID(frameworkID),
		Data:        []byte("Hello-Test"),
	}

	c := testutil.NewMockMesosClient(t, server.PID)
	c.SendMessage(driver.self, pbMsg)

	select {
	case <-ch:
	case <-time.After(time.Second * 1):
		log.Errorf("Tired of waiting...")
	}
}
Exemplo n.º 22
0
func TestExecutorDriverExecutorRegisteredEvent(t *testing.T) {
	setTestEnv(t)
	ch := make(chan bool, 2)
	// Mock Slave process to respond to registration event.
	server := testutil.NewMockSlaveHttpServer(t, func(rsp http.ResponseWriter, req *http.Request) {
		reqPath, err := url.QueryUnescape(req.URL.String())
		assert.NoError(t, err)
		log.Infoln("RCVD request", reqPath)
		rsp.WriteHeader(http.StatusAccepted)
	})

	defer server.Close()

	exec := newTestExecutor(t)
	exec.ch = ch
	exec.t = t

	// start
	driver := newIntegrationTestDriver(t, exec)
	stat, err := driver.Start()
	assert.NoError(t, err)
	assert.Equal(t, mesos.Status_DRIVER_RUNNING, stat)
	defer driver.Stop()

	//simulate sending ExecutorRegisteredMessage from server to exec pid.
	pbMsg := &mesos.ExecutorRegisteredMessage{
		ExecutorInfo:  util.NewExecutorInfo(util.NewExecutorID(executorID), util.NewCommandInfo("ls -l")),
		FrameworkId:   util.NewFrameworkID(frameworkID),
		FrameworkInfo: util.NewFrameworkInfo("test", "test-framework", util.NewFrameworkID(frameworkID)),
		SlaveId:       util.NewSlaveID(slaveID),
		SlaveInfo:     &mesos.SlaveInfo{Hostname: proto.String("localhost")},
	}
	c := testutil.NewMockMesosClient(t, server.PID)
	connected := driver.connectionListener()
	c.SendMessage(driver.self, pbMsg)
	select {
	case <-connected:
	case <-time.After(time.Second * 1):
		log.Errorf("Tired of waiting...")
	}
}
Exemplo n.º 23
0
func prepareExecutorInfo() *mesos.ExecutorInfo {
	executorUris := []*mesos.CommandInfo_URI{}
	uri, executorCmd := serveExecutorArtifact(*executorPath)
	executorUris = append(executorUris, &mesos.CommandInfo_URI{Value: uri, Executable: proto.Bool(true)})

	executorCommand := fmt.Sprintf("./%s", executorCmd)

	go http.ListenAndServe(fmt.Sprintf("%s:%d", address, *artifactPort), nil)
	log.V(2).Info("Serving executor artifacts...")

	// Create mesos scheduler driver.
	return &mesos.ExecutorInfo{
		ExecutorId: util.NewExecutorID("default"),
		Name:       proto.String("Test Executor (Go)"),
		Source:     proto.String("go_test"),
		Command: &mesos.CommandInfo{
			Value: proto.String(executorCommand),
			Uris:  executorUris,
		},
	}
}
Exemplo n.º 24
0
func (s *EtcdScheduler) newExecutorInfo(
	node *config.Node,
	executorURIs []*mesos.CommandInfo_URI,
) *mesos.ExecutorInfo {

	_, bin := filepath.Split(s.ExecutorPath)
	execmd := fmt.Sprintf("./%s -log_dir=./", bin)

	return &mesos.ExecutorInfo{
		ExecutorId: util.NewExecutorID(node.Name),
		Name:       proto.String("etcd"),
		Source:     proto.String("go_test"),
		Command: &mesos.CommandInfo{
			Value: proto.String(execmd),
			Uris:  executorURIs,
		},
		Resources: []*mesos.Resource{
			util.NewScalarResource("cpus", 0.1),
			util.NewScalarResource("mem", 32),
		},
	}
}
Exemplo n.º 25
0
func prepareExecutorInfo(args []string) *mesos.ExecutorInfo {
	var uri *string
	var executorCmd string = ""

	if len(executorUris) == 0 {
		uri, executorCmd = serveExecutorArtifact(*executorPath)
		executorUris = append(executorUris, &mesos.CommandInfo_URI{Value: uri, Executable: proto.Bool(true)})
	}
	log.Infof("uri = %s, executorcmd = %s\n", uri, executorCmd)
	log.Infof("executorUris = %s\n", executorUris)

	// forward the value of the scheduler's -v flag to the executor
	v := 0
	if f := flag.Lookup("v"); f != nil && f.Value != nil {
		if vstr := f.Value.String(); vstr != "" {
			if vi, err := strconv.ParseInt(vstr, 10, 32); err == nil {
				v = int(vi)
			}
		}
	}
	executorCommand := fmt.Sprintf("./%s -logtostderr=true -v=%d", executorCmd, v)

	go http.ListenAndServe(fmt.Sprintf("%s:%d", *address, *artifactPort), nil)
	log.V(2).Info("Serving executor artifacts...")

	// Create mesos scheduler driver.
	return &mesos.ExecutorInfo{
		ExecutorId: util.NewExecutorID("default"),
		Name:       proto.String("SDC Executor (Go)"),
		Source:     proto.String("sdc"),
		Command: &mesos.CommandInfo{
			Value:     proto.String(executorCommand),
			Uris:      executorUris,
			Arguments: args,
		},
	}
}
Exemplo n.º 26
0
// TestExecutorStaticPods test that the ExecutorInfo.data is parsed
// as a zip archive with pod definitions.
func TestExecutorStaticPods(t *testing.T) {
	// create some zip with static pod definition
	var buf bytes.Buffer
	zw := zip.NewWriter(&buf)
	createStaticPodFile := func(fileName, id, name string) {
		w, err := zw.Create(fileName)
		assert.NoError(t, err)
		spod := `{
	"apiVersion": "v1beta3",
	"kind": "Pod",
	"metadata": {
		"name": "%v",
		"labels": { "name": "foo", "cluster": "bar" }
	},
	"spec": {
		"containers": [{
			"name": "%v",
			"image": "library/nginx",
			"ports": [{ "containerPort": 80, "name": "http" }],
			"livenessProbe": {
				"enabled": true,
				"type": "http",
				"initialDelaySeconds": 30,
				"httpGet": { "path": "/", "port": 80 }
			}
		}]
	}
	}`
		_, err = w.Write([]byte(fmt.Sprintf(spod, id, name)))
		assert.NoError(t, err)
	}
	createStaticPodFile("spod.json", "spod-id-01", "spod-01")
	createStaticPodFile("spod2.json", "spod-id-02", "spod-02")
	createStaticPodFile("dir/spod.json", "spod-id-03", "spod-03") // same file name as first one to check for overwriting

	expectedStaticPodsNum := 2 // subdirectories are ignored by FileSource, hence only 2

	err := zw.Close()
	assert.NoError(t, err)

	// create fake apiserver
	testApiServer := NewTestServer(t, api.NamespaceDefault, nil)
	defer testApiServer.server.Close()

	// temporary directory which is normally located in the executor sandbox
	staticPodsConfigPath, err := ioutil.TempDir("/tmp", "executor-k8sm-archive")
	assert.NoError(t, err)
	defer os.RemoveAll(staticPodsConfigPath)

	mockDriver := &MockExecutorDriver{}
	updates := make(chan interface{}, 1024)
	config := Config{
		Docker:  dockertools.ConnectToDockerOrDie("fake://"),
		Updates: make(chan interface{}, 1), // allow kube-executor source to proceed past init
		APIClient: client.NewOrDie(&client.Config{
			Host:    testApiServer.server.URL,
			Version: testapi.Version(),
		}),
		Kubelet: &kubelet.Kubelet{},
		PodStatusFunc: func(kl KubeletInterface, pod *api.Pod) (*api.PodStatus, error) {
			return &api.PodStatus{
				ContainerStatuses: []api.ContainerStatus{
					{
						Name: "foo",
						State: api.ContainerState{
							Running: &api.ContainerStateRunning{},
						},
					},
				},
				Phase: api.PodRunning,
			}, nil
		},
		StaticPodsConfigPath: staticPodsConfigPath,
	}
	executor := New(config)
	hostname := "h1"
	go executor.InitializeStaticPodsSource(func() {
		kconfig.NewSourceFile(staticPodsConfigPath, hostname, 1*time.Second, updates)
	})

	// create ExecutorInfo with static pod zip in data field
	executorInfo := mesosutil.NewExecutorInfo(
		mesosutil.NewExecutorID("ex1"),
		mesosutil.NewCommandInfo("k8sm-executor"),
	)
	executorInfo.Data = buf.Bytes()

	// start the executor with the static pod data
	executor.Init(mockDriver)
	executor.Registered(mockDriver, executorInfo, nil, nil)

	// wait for static pod to start
	seenPods := map[string]struct{}{}
	timeout := time.After(time.Second)
	defer mockDriver.AssertExpectations(t)
	for {
		// filter by PodUpdate type
		select {
		case <-timeout:
			t.Fatalf("Executor should send pod updates for %v pods, only saw %v", expectedStaticPodsNum, len(seenPods))
		case update, ok := <-updates:
			if !ok {
				return
			}
			podUpdate, ok := update.(kubelet.PodUpdate)
			if !ok {
				continue
			}
			for _, pod := range podUpdate.Pods {
				seenPods[pod.Name] = struct{}{}
			}
			if len(seenPods) == expectedStaticPodsNum {
				return
			}
		}
	}
}
Exemplo n.º 27
0
func (sched *SdcScheduler) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
	if sched.tasksLaunched >= sched.totalTasks {
		log.Info("decline all of the offers since all of our tasks are already launched")
		log.Infoln("sched.totalTasks ", sched.totalTasks)
		log.Infoln("sched.tasksFinished ", sched.tasksFinished)
		log.Infoln("sched.tasksLaunched ", sched.tasksLaunched)

		// cmdQueueからCommand.Argumentsをpopする
		// 将来的に外部のキューから取得できるように置き換える
		if sched.totalTasks == 0 && sched.tasksFinished == 0 && sched.tasksLaunched == 0 && cmdQueue.Len() != 0 {
			execinfo := cmdQueue.Remove(cmdQueue.Front()).(*mesos.ExecutorInfo)
			log.Infoln("execinfo ", execinfo.Command.Arguments)

			sched.totalTasks = len(execinfo.Command.Arguments)
			sched.executor.Command.Arguments = execinfo.Command.Arguments
		}

		if sched.totalTasks == 0 && sched.tasksFinished == 0 && sched.tasksLaunched == 0 {
			ids := make([]*mesos.OfferID, len(offers))
			for i, offer := range offers {
				ids[i] = offer.Id
			}
			driver.LaunchTasks(ids, []*mesos.TaskInfo{}, &mesos.Filters{RefuseSeconds: proto.Float64(5)})
			return
		}
	}

	log.Info("prepare pass args: ", sched.executor.Command.Arguments)
	cmds := sched.executor.Command.Arguments
	for _, v := range cmds {
		fmt.Println("v = ", v)
	}

	// [/bin/cat /var/tmp/1.txt /var/tmp/2.txt /var/tmp/3.txt | /bin/grep abe > /var/tmp/grep-result.txt]
	//
	// rebuild args
	// 1. /bin/cat /var/tmp/1.txt >> /var/tmp/intermediate.txt
	// 2. /bin/cat /var/tmp/2.txt >> /var/tmp/intermediate.txt
	// 3. /bin/cat /var/tmp/3.txt >> /var/tmp/intermediate.txt
	// 4. /bin/cat /var/tmp/intermediate.txt | /bin/grep abe > /var/tmp/grep-result.txt
	// 5. /bin/rm /var/tmp/intermediate.txt

	for _, offer := range offers {
		cpuResources := util.FilterResources(offer.Resources, func(res *mesos.Resource) bool {
			return res.GetName() == "cpus"
		})
		cpus := 0.0
		for _, res := range cpuResources {
			cpus += res.GetScalar().GetValue()
		}

		memResources := util.FilterResources(offer.Resources, func(res *mesos.Resource) bool {
			return res.GetName() == "mem"
		})
		mems := 0.0
		for _, res := range memResources {
			mems += res.GetScalar().GetValue()
		}

		log.Infoln("Received Offer <", offer.Id.GetValue(), "> with cpus=", cpus, " mem=", mems)

		remainingCpus := cpus
		remainingMems := mems

		var tasks []*mesos.TaskInfo

		// $ cat 1.txt 2.txt. 3.txt | wc -lの場合
		// 先に、後ろのタスクを上げておく必要がある?
		//
		// コンセプト実装はシンプルに中間ファイル方式で行く
		// 遅いけど

		for sched.tasksLaunched < sched.totalTasks &&
			CPUS_PER_TASK <= remainingCpus &&
			MEM_PER_TASK <= remainingMems {

			sched.tasksLaunched++

			taskId := &mesos.TaskID{
				Value: proto.String(strconv.Itoa(sched.tasksLaunched)),
			}

			// executionidの書き換え
			sched.executor.ExecutorId = util.NewExecutorID(taskId.GetValue())

			log.Infof("sched.tasksLaunched = %d\n", sched.tasksLaunched)
			log.Infof("sched.totalTasks = %d\n", sched.totalTasks)
			log.Infof("sched.executor.Command.Value = %s\n", sched.executor.Command.GetValue())
			log.Infof("sched.executor.GetExecutorId() = %s\n", sched.executor.GetExecutorId())

			// sched.executor.Command.Arguments で書き換えても保持されている
			// 値はポインタなので、LaunchTasksするときに、複数のタスクでまとめられているので、
			// 値は最後に上書きされた物になる
			// そこで、Argumentsがタスクごとにまとめられないように個別にオブジェクトを生成してタスクを
			// 起動する
			exec := &mesos.ExecutorInfo{
				ExecutorId: sched.executor.GetExecutorId(),
				Name:       proto.String(sched.executor.GetName()),
				Source:     proto.String(sched.executor.GetSource()),
				Command: &mesos.CommandInfo{
					Value: proto.String(sched.executor.Command.GetValue()),
					Uris:  sched.executor.Command.GetUris(),
				},
			}

			cmd := cmds[sched.tasksLaunched-1]
			log.Infof("cmd = %s\n", cmd)
			// Argumentsコマンドラインを使うと別Executorとみなされるので、色々面倒
			// 以下はやってはいけない例
			// exec.Command.Arguments = strings.Split(cmd, " ")

			task := &mesos.TaskInfo{
				Name:     proto.String("go-task-" + taskId.GetValue()),
				TaskId:   taskId,
				SlaveId:  offer.SlaveId,
				Executor: exec,
				Resources: []*mesos.Resource{
					util.NewScalarResource("cpus", CPUS_PER_TASK),
					util.NewScalarResource("mem", MEM_PER_TASK),
				},
				// 実行したいコマンドラインはDataパラメータを使って渡せす
				Data: []byte(cmd),
			}
			log.Infof("Prepared task: %s with offer %s for launch\n", task.GetName(), offer.Id.GetValue())

			tasks = append(tasks, task)
			remainingCpus -= CPUS_PER_TASK
			remainingMems -= MEM_PER_TASK
		}

		log.Infoln("Launching ", len(tasks), "tasks for offer", offer.Id.GetValue())
		driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, &mesos.Filters{RefuseSeconds: proto.Float64(5)})
	}
}
Exemplo n.º 28
0
// reconstruct a task from metadata stashed in a pod entry. there are limited pod states that
// support reconstruction. if we expect to be able to reconstruct state but encounter errors
// in the process then those errors are returned. if the pod is in a seemingly valid state but
// otherwise does not support task reconstruction return false. if we're able to reconstruct
// state then return a reconstructed task and true.
//
// at this time task reconstruction is only supported for pods that have been annotated with
// binding metadata, which implies that they've previously been associated with a task and
// that mesos knows about it.
//
// assumes that the pod data comes from the k8s registry and reflects the desired state.
//
func RecoverFrom(pod api.Pod) (*T, bool, error) {
	// we only expect annotations if pod has been bound, which implies that it has already
	// been scheduled and launched
	if pod.Spec.NodeName == "" && len(pod.Annotations) == 0 {
		log.V(1).Infof("skipping recovery for unbound pod %v/%v", pod.Namespace, pod.Name)
		return nil, false, nil
	}

	// only process pods that are not in a terminal state
	switch pod.Status.Phase {
	case api.PodPending, api.PodRunning, api.PodUnknown: // continue
	default:
		log.V(1).Infof("skipping recovery for terminal pod %v/%v", pod.Namespace, pod.Name)
		return nil, false, nil
	}

	ctx := api.WithNamespace(api.NewDefaultContext(), pod.Namespace)
	key, err := MakePodKey(ctx, pod.Name)
	if err != nil {
		return nil, false, err
	}

	//TODO(jdef) recover ports (and other resource requirements?) from the pod spec as well

	now := time.Now()
	t := &T{
		Pod:        pod,
		CreateTime: now,
		podKey:     key,
		State:      StatePending, // possibly running? mesos will tell us during reconciliation
		Flags:      make(map[FlagType]struct{}),
		mapper:     NewHostPortMapper(&pod),
		launchTime: now,
		bindTime:   now,
		Spec:       &Spec{},
	}
	var (
		offerId string
	)
	for _, k := range []string{
		annotation.BindingHostKey,
		annotation.TaskIdKey,
		annotation.SlaveIdKey,
		annotation.OfferIdKey,
	} {
		v, found := pod.Annotations[k]
		if !found {
			return nil, false, fmt.Errorf("incomplete metadata: missing value for pod annotation: %v", k)
		}
		switch k {
		case annotation.BindingHostKey:
			t.Spec.AssignedSlave = v
		case annotation.SlaveIdKey:
			t.Spec.SlaveID = v
		case annotation.OfferIdKey:
			offerId = v
		case annotation.TaskIdKey:
			t.ID = v
		case annotation.ExecutorIdKey:
			// this is nowhere near sufficient to re-launch a task, but we really just
			// want this for tracking
			t.Spec.Executor = &mesos.ExecutorInfo{ExecutorId: mutil.NewExecutorID(v)}
		}
	}
	t.Offer = offers.Expired(offerId, t.Spec.AssignedSlave, 0)
	t.Flags[Launched] = struct{}{}
	t.Flags[Bound] = struct{}{}
	return t, true, nil
}
Exemplo n.º 29
0
// Test to create the scheduler plugin with the config returned by the scheduler,
// and play through the whole life cycle of the plugin while creating pods, deleting
// and failing them.
func TestPlugin_LifeCycle(t *testing.T) {
	t.Skip("This test is flaky, see #11901")
	assert := &EventAssertions{*assert.New(t)}

	// create a fake pod watch. We use that below to submit new pods to the scheduler
	podListWatch := NewMockPodsListWatch(api.PodList{})

	// create fake apiserver
	testApiServer := NewTestServer(t, api.NamespaceDefault, podListWatch)
	defer testApiServer.server.Close()

	// create executor with some data for static pods if set
	executor := util.NewExecutorInfo(
		util.NewExecutorID("executor-id"),
		util.NewCommandInfo("executor-cmd"),
	)
	executor.Data = []byte{0, 1, 2}

	// create scheduler
	nodeStore := cache.NewStore(cache.MetaNamespaceKeyFunc)
	as := NewAllocationStrategy(
		podtask.DefaultPredicate,
		podtask.NewDefaultProcurement(mresource.DefaultDefaultContainerCPULimit, mresource.DefaultDefaultContainerMemLimit))
	testScheduler := New(Config{
		Executor: executor,
		Client:   client.NewOrDie(&client.Config{Host: testApiServer.server.URL, Version: testapi.Default.Version()}),
		Scheduler: NewFCFSPodScheduler(as, func(node string) *api.Node {
			obj, _, _ := nodeStore.GetByKey(node)
			if obj == nil {
				return nil
			}
			return obj.(*api.Node)
		}),
		Schedcfg: *schedcfg.CreateDefaultConfig(),
	})

	assert.NotNil(testScheduler.client, "client is nil")
	assert.NotNil(testScheduler.executor, "executor is nil")
	assert.NotNil(testScheduler.offers, "offer registry is nil")

	// create scheduler process
	schedulerProcess := ha.New(testScheduler)

	// get plugin config from it
	c := testScheduler.NewPluginConfig(schedulerProcess.Terminal(), http.DefaultServeMux, &podListWatch.ListWatch)
	assert.NotNil(c)

	// make events observable
	eventObserver := NewEventObserver()
	c.Recorder = eventObserver

	// create plugin
	p := NewPlugin(c).(*schedulingPlugin)
	assert.NotNil(p)

	// run plugin
	p.Run(schedulerProcess.Terminal())
	defer schedulerProcess.End()

	// init scheduler
	err := testScheduler.Init(schedulerProcess.Master(), p, http.DefaultServeMux)
	assert.NoError(err)

	// create mock mesos scheduler driver
	mockDriver := &joinableDriver{}
	mockDriver.On("Start").Return(mesos.Status_DRIVER_RUNNING, nil).Once()
	started := mockDriver.Upon()

	mAny := mock.AnythingOfType
	mockDriver.On("ReconcileTasks", mAny("[]*mesosproto.TaskStatus")).Return(mesos.Status_DRIVER_RUNNING, nil)
	mockDriver.On("SendFrameworkMessage", mAny("*mesosproto.ExecutorID"), mAny("*mesosproto.SlaveID"), mAny("string")).
		Return(mesos.Status_DRIVER_RUNNING, nil)

	type LaunchedTask struct {
		offerId  mesos.OfferID
		taskInfo *mesos.TaskInfo
	}
	launchedTasks := make(chan LaunchedTask, 1)
	launchTasksCalledFunc := func(args mock.Arguments) {
		offerIDs := args.Get(0).([]*mesos.OfferID)
		taskInfos := args.Get(1).([]*mesos.TaskInfo)
		assert.Equal(1, len(offerIDs))
		assert.Equal(1, len(taskInfos))
		launchedTasks <- LaunchedTask{
			offerId:  *offerIDs[0],
			taskInfo: taskInfos[0],
		}
	}
	mockDriver.On("LaunchTasks", mAny("[]*mesosproto.OfferID"), mAny("[]*mesosproto.TaskInfo"), mAny("*mesosproto.Filters")).
		Return(mesos.Status_DRIVER_RUNNING, nil).Run(launchTasksCalledFunc)
	mockDriver.On("DeclineOffer", mAny("*mesosproto.OfferID"), mAny("*mesosproto.Filters")).
		Return(mesos.Status_DRIVER_RUNNING, nil)

	// elect master with mock driver
	driverFactory := ha.DriverFactory(func() (bindings.SchedulerDriver, error) {
		return mockDriver, nil
	})
	schedulerProcess.Elect(driverFactory)
	elected := schedulerProcess.Elected()

	// driver will be started
	<-started

	// tell scheduler to be registered
	testScheduler.Registered(
		mockDriver,
		util.NewFrameworkID("kubernetes-id"),
		util.NewMasterInfo("master-id", (192<<24)+(168<<16)+(0<<8)+1, 5050),
	)

	// wait for being elected
	<-elected

	//TODO(jdef) refactor things above here into a test suite setup of some sort

	// fake new, unscheduled pod
	pod, i := NewTestPod()
	podListWatch.Add(pod, true) // notify watchers

	// wait for failedScheduling event because there is no offer
	assert.EventWithReason(eventObserver, "failedScheduling", "failedScheduling event not received")

	// add some matching offer
	offers := []*mesos.Offer{NewTestOffer(fmt.Sprintf("offer%d", i))}
	testScheduler.ResourceOffers(nil, offers)

	// and wait for scheduled pod
	assert.EventWithReason(eventObserver, "scheduled")
	select {
	case launchedTask := <-launchedTasks:
		// report back that the task has been staged, and then started by mesos
		testScheduler.StatusUpdate(mockDriver, newTaskStatusForTask(launchedTask.taskInfo, mesos.TaskState_TASK_STAGING))
		testScheduler.StatusUpdate(mockDriver, newTaskStatusForTask(launchedTask.taskInfo, mesos.TaskState_TASK_RUNNING))

		// check that ExecutorInfo.data has the static pod data
		assert.Len(launchedTask.taskInfo.Executor.Data, 3)

		// report back that the task has been lost
		mockDriver.AssertNumberOfCalls(t, "SendFrameworkMessage", 0)
		testScheduler.StatusUpdate(mockDriver, newTaskStatusForTask(launchedTask.taskInfo, mesos.TaskState_TASK_LOST))

		// and wait that framework message is sent to executor
		mockDriver.AssertNumberOfCalls(t, "SendFrameworkMessage", 1)

	case <-time.After(5 * time.Second):
		t.Fatalf("timed out waiting for launchTasks call")
	}

	// Launch a pod and wait until the scheduler driver is called
	schedulePodWithOffers := func(pod *api.Pod, offers []*mesos.Offer) (*api.Pod, *LaunchedTask, *mesos.Offer) {
		// wait for failedScheduling event because there is no offer
		assert.EventWithReason(eventObserver, "failedScheduling", "failedScheduling event not received")

		// supply a matching offer
		testScheduler.ResourceOffers(mockDriver, offers)

		// and wait to get scheduled
		assert.EventWithReason(eventObserver, "scheduled")

		// wait for driver.launchTasks call
		select {
		case launchedTask := <-launchedTasks:
			for _, offer := range offers {
				if offer.Id.GetValue() == launchedTask.offerId.GetValue() {
					return pod, &launchedTask, offer
				}
			}
			t.Fatalf("unknown offer used to start a pod")
			return nil, nil, nil
		case <-time.After(5 * time.Second):
			t.Fatal("timed out waiting for launchTasks")
			return nil, nil, nil
		}
	}
	// Launch a pod and wait until the scheduler driver is called
	launchPodWithOffers := func(pod *api.Pod, offers []*mesos.Offer) (*api.Pod, *LaunchedTask, *mesos.Offer) {
		podListWatch.Add(pod, true)
		return schedulePodWithOffers(pod, offers)
	}

	// Launch a pod, wait until the scheduler driver is called and report back that it is running
	startPodWithOffers := func(pod *api.Pod, offers []*mesos.Offer) (*api.Pod, *LaunchedTask, *mesos.Offer) {
		// notify about pod, offer resources and wait for scheduling
		pod, launchedTask, offer := launchPodWithOffers(pod, offers)
		if pod != nil {
			// report back status
			testScheduler.StatusUpdate(mockDriver, newTaskStatusForTask(launchedTask.taskInfo, mesos.TaskState_TASK_STAGING))
			testScheduler.StatusUpdate(mockDriver, newTaskStatusForTask(launchedTask.taskInfo, mesos.TaskState_TASK_RUNNING))
			return pod, launchedTask, offer
		}

		return nil, nil, nil
	}

	startTestPod := func() (*api.Pod, *LaunchedTask, *mesos.Offer) {
		pod, i := NewTestPod()
		offers := []*mesos.Offer{NewTestOffer(fmt.Sprintf("offer%d", i))}
		return startPodWithOffers(pod, offers)
	}

	// start another pod
	pod, launchedTask, _ := startTestPod()

	// mock drvier.KillTask, should be invoked when a pod is deleted
	mockDriver.On("KillTask", mAny("*mesosproto.TaskID")).Return(mesos.Status_DRIVER_RUNNING, nil).Run(func(args mock.Arguments) {
		killedTaskId := *(args.Get(0).(*mesos.TaskID))
		assert.Equal(*launchedTask.taskInfo.TaskId, killedTaskId, "expected same TaskID as during launch")
	})
	killTaskCalled := mockDriver.Upon()

	// stop it again via the apiserver mock
	podListWatch.Delete(pod, true) // notify watchers

	// and wait for the driver killTask call with the correct TaskId
	select {
	case <-killTaskCalled:
		// report back that the task is finished
		testScheduler.StatusUpdate(mockDriver, newTaskStatusForTask(launchedTask.taskInfo, mesos.TaskState_TASK_FINISHED))

	case <-time.After(5 * time.Second):
		t.Fatal("timed out waiting for KillTask")
	}

	// start a pod with on a given NodeName and check that it is scheduled to the right host
	pod, i = NewTestPod()
	pod.Spec.NodeName = "hostname1"
	offers = []*mesos.Offer{}
	for j := 0; j < 3; j++ {
		offer := NewTestOffer(fmt.Sprintf("offer%d_%d", i, j))
		hostname := fmt.Sprintf("hostname%d", j)
		offer.Hostname = &hostname
		offers = append(offers, offer)
	}

	_, _, usedOffer := startPodWithOffers(pod, offers)

	assert.Equal(offers[1].Id.GetValue(), usedOffer.Id.GetValue())
	assert.Equal(pod.Spec.NodeName, *usedOffer.Hostname)

	testScheduler.OfferRescinded(mockDriver, offers[0].Id)
	testScheduler.OfferRescinded(mockDriver, offers[2].Id)

	// start pods:
	// - which are failing while binding,
	// - leading to reconciliation
	// - with different states on the apiserver

	failPodFromExecutor := func(task *mesos.TaskInfo) {
		beforePodLookups := testApiServer.Stats(pod.Name)
		status := newTaskStatusForTask(task, mesos.TaskState_TASK_FAILED)
		message := messages.CreateBindingFailure
		status.Message = &message
		testScheduler.StatusUpdate(mockDriver, status)

		// wait until pod is looked up at the apiserver
		assertext.EventuallyTrue(t, time.Second, func() bool {
			return testApiServer.Stats(pod.Name) == beforePodLookups+1
		}, "expect that reconcileTask will access apiserver for pod %v", pod.Name)
	}

	launchTestPod := func() (*api.Pod, *LaunchedTask, *mesos.Offer) {
		pod, i := NewTestPod()
		offers := []*mesos.Offer{NewTestOffer(fmt.Sprintf("offer%d", i))}
		return launchPodWithOffers(pod, offers)
	}

	// 1. with pod deleted from the apiserver
	//    expected: pod is removed from internal task registry
	pod, launchedTask, _ = launchTestPod()
	podListWatch.Delete(pod, false) // not notifying the watchers
	failPodFromExecutor(launchedTask.taskInfo)

	podKey, _ := podtask.MakePodKey(api.NewDefaultContext(), pod.Name)
	assertext.EventuallyTrue(t, time.Second, func() bool {
		t, _ := p.api.tasks().ForPod(podKey)
		return t == nil
	})

	// 2. with pod still on the apiserver, not bound
	//    expected: pod is rescheduled
	pod, launchedTask, _ = launchTestPod()
	failPodFromExecutor(launchedTask.taskInfo)

	retryOffers := []*mesos.Offer{NewTestOffer("retry-offer")}
	schedulePodWithOffers(pod, retryOffers)

	// 3. with pod still on the apiserver, bound, notified via ListWatch
	// expected: nothing, pod updates not supported, compare ReconcileTask function
	pod, launchedTask, usedOffer = startTestPod()
	pod.Annotations = map[string]string{
		meta.BindingHostKey: *usedOffer.Hostname,
	}
	pod.Spec.NodeName = *usedOffer.Hostname
	podListWatch.Modify(pod, true) // notifying the watchers
	time.Sleep(time.Second / 2)
	failPodFromExecutor(launchedTask.taskInfo)
}
Exemplo n.º 30
0
func newLifecycleTest(t *testing.T) lifecycleTest {
	assert := &EventAssertions{*assert.New(t)}

	// create a fake pod watch. We use that below to submit new pods to the scheduler
	podsListWatch := NewMockPodsListWatch(api.PodList{})

	// create fake apiserver
	apiServer := NewTestServer(t, api.NamespaceDefault, podsListWatch)

	// create ExecutorInfo with some data for static pods if set
	ei := mesosutil.NewExecutorInfo(
		mesosutil.NewExecutorID("executor-id"),
		mesosutil.NewCommandInfo("executor-cmd"),
	)
	ei.Data = []byte{0, 1, 2}

	// create framework
	client := client.NewOrDie(&client.Config{
		Host:         apiServer.server.URL,
		GroupVersion: testapi.Default.GroupVersion(),
	})
	c := *schedcfg.CreateDefaultConfig()
	fw := framework.New(framework.Config{
		Executor:        ei,
		Client:          client,
		SchedulerConfig: c,
		LookupNode:      apiServer.LookupNode,
	})

	// TODO(sttts): re-enable the following tests
	// assert.NotNil(framework.client, "client is nil")
	// assert.NotNil(framework.executor, "executor is nil")
	// assert.NotNil(framework.offers, "offer registry is nil")

	// create pod scheduler
	strategy := podschedulers.NewAllocationStrategy(
		podtask.NewDefaultPredicate(
			mresource.DefaultDefaultContainerCPULimit,
			mresource.DefaultDefaultContainerMemLimit,
		),
		podtask.NewDefaultProcurement(
			mresource.DefaultDefaultContainerCPULimit,
			mresource.DefaultDefaultContainerMemLimit,
		),
	)
	fcfs := podschedulers.NewFCFSPodScheduler(strategy, apiServer.LookupNode)

	// create scheduler process
	schedulerProc := ha.New(fw)

	// create scheduler
	eventObs := NewEventObserver()
	scheduler := components.New(&c, fw, fcfs, client, eventObs, schedulerProc.Terminal(), http.DefaultServeMux, &podsListWatch.ListWatch)
	assert.NotNil(scheduler)

	// create mock mesos scheduler driver
	driver := &framework.JoinableDriver{}

	return lifecycleTest{
		apiServer:     apiServer,
		driver:        driver,
		eventObs:      eventObs,
		podsListWatch: podsListWatch,
		framework:     fw,
		schedulerProc: schedulerProc,
		sched:         scheduler,
		t:             t,
	}
}