Ejemplo n.º 1
0
func (self *ResMan) runTaskUsingOffer(driver *mesos.SchedulerDriver, offer mesos.Offer,
	ts []*Task) (launchCount int) {
	cpus, mem := extraCpuMem(offer)
	var tasks []mesos.TaskInfo
	for i := 0; i < len(ts) && cpus > CPU_UNIT && mem > MEM_UNIT; i++ {
		t := ts[i]
		log.Debugf("Launching task: %s\n", t.Tid)
		job := t.job
		executor := &mesos.ExecutorInfo{
			Command: &mesos.CommandInfo{
				//Value: proto.String(job.Executor + ` "` + job.ExecutorFlags + `"`),
				Value: proto.String(fmt.Sprintf(`%s "%s"`, job.Executor,
					base64.StdEncoding.EncodeToString([]byte(job.ExecutorFlags)))),
			},
			Name:   proto.String("shell executor (Go)"),
			Source: proto.String("go_test"),
		}

		executorId := self.genExtorId(t.Tid)
		executor.ExecutorId = &mesos.ExecutorID{Value: proto.String(executorId)}
		log.Debug(*executor.Command.Value)

		urls := splitTrim(job.Uris)
		taskUris := make([]*mesos.CommandInfo_URI, len(urls))
		for i, _ := range urls {
			taskUris[i] = &mesos.CommandInfo_URI{Value: &urls[i]}
		}
		executor.Command.Uris = taskUris

		task := mesos.TaskInfo{
			Name: proto.String(job.Name),
			TaskId: &mesos.TaskID{
				Value: proto.String(t.Tid),
			},
			SlaveId:  offer.SlaveId,
			Executor: executor,
			Resources: []*mesos.Resource{
				mesos.ScalarResource("cpus", CPU_UNIT),
				mesos.ScalarResource("mem", MEM_UNIT),
			},
		}

		tasks = append(tasks, task)
		t.state = taskRuning

		t.LastUpdate = time.Now()
		t.SlaveId = offer.GetSlaveId().GetValue()
		self.running.Add(t.Tid, t)
		log.Debugf("remove %+v from ready queue", t.Tid)
		self.ready.Del(t.Tid)
		cpus -= CPU_UNIT
		mem -= MEM_UNIT
	}

	if len(tasks) == 0 {
		return 0
	}

	log.Debugf("%+v", tasks)

	filters := mesos.Filters{RefuseSeconds: &refuseSeconds}
	driver.LaunchTasks(offer.Id, tasks, filters)

	return len(tasks)
}
Ejemplo n.º 2
0
func main() {
	crawlQueue := list.New()  // list of string
	renderQueue := list.New() // list of string

	processedURLs := list.New() // list of string
	crawlResults := list.New()  // list of CrawlEdge
	renderResults := make(map[string]string)

	seedUrl := flag.String("seed", "http://mesosphere.io", "The first URL to crawl")
	master := flag.String("master", "127.0.1.1:5050", "Location of leading Mesos master")
	localMode := flag.Bool("local", true, "If true, saves rendered web pages on local disk")
	// TODO(nnielsen): Add flag for artifacts.

	flag.Parse()

	crawlQueue.PushBack(*seedUrl)

	tasksCreated := 0
	tasksRunning := 0
	shuttingDown := false

	crawlCommand := "python crawl_executor.py"
	renderCommand := "python render_executor.py"

	if *localMode {
		renderCommand += " --local"
	}

	// TODO(nnielsen): In local mode, verify artifact locations.
	rendlerArtifacts := executorURIs()

	crawlExecutor := &mesos.ExecutorInfo{
		ExecutorId: &mesos.ExecutorID{Value: proto.String("crawl-executor")},
		Command: &mesos.CommandInfo{
			Value: proto.String(crawlCommand),
			Uris:  rendlerArtifacts,
		},
		Name: proto.String("Crawler"),
	}

	renderExecutor := &mesos.ExecutorInfo{
		ExecutorId: &mesos.ExecutorID{Value: proto.String("render-executor")},
		Command: &mesos.CommandInfo{
			Value: proto.String(renderCommand),
			Uris:  rendlerArtifacts,
		},
		Name: proto.String("Renderer"),
	}

	makeTaskPrototype := func(offer mesos.Offer) *mesos.TaskInfo {
		taskId := tasksCreated
		tasksCreated++
		return &mesos.TaskInfo{
			TaskId: &mesos.TaskID{
				Value: proto.String(fmt.Sprintf("RENDLER-%d", taskId)),
			},
			SlaveId: offer.SlaveId,
			Resources: []*mesos.Resource{
				mesos.ScalarResource("cpus", TASK_CPUS),
				mesos.ScalarResource("mem", TASK_MEM),
			},
		}
	}

	makeCrawlTask := func(url string, offer mesos.Offer) *mesos.TaskInfo {
		task := makeTaskPrototype(offer)
		task.Name = proto.String("CRAWL_" + *task.TaskId.Value)
		task.Executor = crawlExecutor
		task.Data = []byte(url)
		return task
	}

	makeRenderTask := func(url string, offer mesos.Offer) *mesos.TaskInfo {
		task := makeTaskPrototype(offer)
		task.Name = proto.String("RENDER_" + *task.TaskId.Value)
		task.Executor = renderExecutor
		task.Data = []byte(url)
		return task
	}

	maxTasksForOffer := func(offer mesos.Offer) int {
		// TODO(nnielsen): Parse offer resources.
		count := 0

		var cpus float64 = 0
		var mem float64 = 0

		for _, resource := range offer.Resources {
			if resource.GetName() == "cpus" {
				cpus = *resource.GetScalar().Value
			}

			if resource.GetName() == "mem" {
				mem = *resource.GetScalar().Value
			}
		}

		for cpus >= TASK_CPUS && mem >= TASK_MEM {
			count++
			cpus -= TASK_CPUS
			mem -= TASK_MEM
		}

		return count
	}

	printQueueStatistics := func() {
		// TODO(nnielsen): Print queue lengths.
	}

	driver := mesos.SchedulerDriver{
		Master: *master,
		Framework: mesos.FrameworkInfo{
			Name: proto.String("RENDLER"),
			User: proto.String(""),
		},

		Scheduler: &mesos.Scheduler{

			Registered: func(
				driver *mesos.SchedulerDriver,
				frameworkId mesos.FrameworkID,
				masterInfo mesos.MasterInfo) {
				log.Printf("Registered")
			},

			ResourceOffers: func(driver *mesos.SchedulerDriver, offers []mesos.Offer) {
				printQueueStatistics()

				for _, offer := range offers {
					if shuttingDown {
						fmt.Println("Shutting down: declining offer on [", offer.Hostname, "]")
						driver.DeclineOffer(offer.Id)
						continue
					}

					tasks := []mesos.TaskInfo{}

					for i := 0; i < maxTasksForOffer(offer)/2; i++ {
						if crawlQueue.Front() != nil {
							url := crawlQueue.Front().Value.(string)
							crawlQueue.Remove(crawlQueue.Front())
							task := makeCrawlTask(url, offer)
							tasks = append(tasks, *task)
						}
						if renderQueue.Front() != nil {
							url := renderQueue.Front().Value.(string)
							renderQueue.Remove(renderQueue.Front())
							task := makeRenderTask(url, offer)
							tasks = append(tasks, *task)
						}
					}

					if len(tasks) == 0 {
						driver.DeclineOffer(offer.Id)
					} else {
						driver.LaunchTasks(offer.Id, tasks)
					}
				}
			},

			StatusUpdate: func(driver *mesos.SchedulerDriver, status mesos.TaskStatus) {
				log.Printf("Received task status [%s] for task [%s]", rendler.NameFor(status.State), *status.TaskId.Value)

				if *status.State == mesos.TaskState_TASK_RUNNING {
					tasksRunning++
				} else if rendler.IsTerminal(status.State) {
					tasksRunning--
				}
			},

			FrameworkMessage: func(
				driver *mesos.SchedulerDriver,
				executorId mesos.ExecutorID,
				slaveId mesos.SlaveID,
				message string) {

				switch *executorId.Value {
				case *crawlExecutor.ExecutorId.Value:
					log.Print("Received framework message from crawler")
					var result rendler.CrawlResult
					err := json.Unmarshal([]byte(message), &result)
					if err != nil {
						log.Printf("Error deserializing CrawlResult: [%s]", err)
					} else {
						for _, link := range result.Links {
							edge := rendler.Edge{From: result.URL, To: link}
							log.Printf("Appending [%s] to crawl results", edge)
							crawlResults.PushBack(edge)

							alreadyProcessed := false
							for e := processedURLs.Front(); e != nil && !alreadyProcessed; e = e.Next() {
								processedURL := e.Value.(string)
								if link == processedURL {
									alreadyProcessed = true
								}
							}

							if !alreadyProcessed {
								log.Printf("Enqueueing [%s]", link)
								crawlQueue.PushBack(link)
								renderQueue.PushBack(link)
								processedURLs.PushBack(link)
							}
						}
					}

				case *renderExecutor.ExecutorId.Value:
					log.Printf("Received framework message from renderer")
					var result rendler.RenderResult
					err := json.Unmarshal([]byte(message), &result)
					if err != nil {
						log.Printf("Error deserializing RenderResult: [%s]", err)
					} else {
						log.Printf(
							"Appending [%s] to render results",
							rendler.Edge{From: result.URL, To: result.ImageURL})
						renderResults[result.URL] = result.ImageURL
					}

				default:
					log.Printf("Received a framework message from some unknown source: %s", *executorId.Value)
				}
			},
		},
	}

	c := make(chan os.Signal, 1)
	signal.Notify(c, os.Interrupt, os.Kill)
	go func(c chan os.Signal) {
		s := <-c
		fmt.Println("Got signal:", s)

		if s == os.Interrupt {
			fmt.Println("RENDLER is shutting down")
			shuttingDown = true
			wait_started := time.Now()
			for tasksRunning > 0 && SHUTDOWN_TIMEOUT > int(time.Since(wait_started).Seconds()) {
				time.Sleep(time.Second)
			}

			if tasksRunning > 0 {
				fmt.Println("Shutdown by timeout,", tasksRunning, "task(s) have not completed")
			}

			driver.Stop(false)
		}
	}(c)

	driver.Init()
	defer driver.Destroy()

	driver.Start()
	driver.Join()
	driver.Stop(false)
	rendler.WriteDOTFile(crawlResults, renderResults)
	os.Exit(0)
}
Ejemplo n.º 3
0
func main() {
	taskLimit := 5
	taskId := 0
	exit := make(chan bool)
	localExecutor, _ := executorPath()

	master := flag.String("master", "localhost:5050", "Location of leading Mesos master")
	executorUri := flag.String("executor-uri", localExecutor, "URI of executor executable")
	flag.Parse()

	executor := &mesos.ExecutorInfo{
		ExecutorId: &mesos.ExecutorID{Value: proto.String("default")},
		Command: &mesos.CommandInfo{
			Value: proto.String("./example_executor"),
			Uris: []*mesos.CommandInfo_URI{
				&mesos.CommandInfo_URI{Value: executorUri},
			},
		},
		Name:   proto.String("Test Executor (Go)"),
		Source: proto.String("go_test"),
	}

	driver := mesos.SchedulerDriver{
		Master: *master,
		Framework: mesos.FrameworkInfo{
			Name: proto.String("GoFramework"),
			User: proto.String(""),
		},

		Scheduler: &mesos.Scheduler{
			ResourceOffers: func(driver *mesos.SchedulerDriver, offers []mesos.Offer) {
				for _, offer := range offers {
					taskId++
					fmt.Printf("Launching task: %d\n", taskId)

					tasks := []mesos.TaskInfo{
						mesos.TaskInfo{
							Name: proto.String("go-task"),
							TaskId: &mesos.TaskID{
								Value: proto.String("go-task-" + strconv.Itoa(taskId)),
							},
							SlaveId:  offer.SlaveId,
							Executor: executor,
							Resources: []*mesos.Resource{
								mesos.ScalarResource("cpus", 1),
								mesos.ScalarResource("mem", 512),
							},
						},
					}

					driver.LaunchTasks(offer.Id, tasks)
				}
			},

			StatusUpdate: func(driver *mesos.SchedulerDriver, status mesos.TaskStatus) {
				fmt.Println("Received task status: " + *status.Message)

				if *status.State == mesos.TaskState_TASK_FINISHED {
					taskLimit--
					if taskLimit <= 0 {
						exit <- true
					}
				}
			},
		},
	}

	driver.Init()
	defer driver.Destroy()

	driver.Start()
	<-exit
	driver.Stop(false)
}
Ejemplo n.º 4
0
Archivo: main.go Proyecto: nqn/angstrom
func main() {
	taskId := 0
	localExecutor, _ := executorPath()

	goPath := os.Getenv("GOPATH") + "/"

	master := flag.String("master", "localhost:5050", "Location of leading Mesos master")
	executorPath := flag.String("executor-uri", localExecutor, "Path to executor executable")
	address := flag.String("address", "localhost", "Hostname to serve artifacts from")
	angstromPath := flag.String("angstrom-path", goPath+"src/github.com/nqn/angstrom", "Path to angstrom checkout")

	flag.Parse()

	serveExecutorArtifact := func(path string) string {
		serveFile := func(pattern string, filename string) {
			http.HandleFunc(pattern, func(w http.ResponseWriter, r *http.Request) {
				http.ServeFile(w, r, filename)
			})
		}

		// Create base path (http://foobar:5000/<base>)
		pathSplit := strings.Split(path, "/")
		var base string
		if len(pathSplit) > 0 {
			base = pathSplit[len(pathSplit)-1]
		} else {
			base = path
		}
		serveFile("/"+base, path)

		hostURI := fmt.Sprintf("http://%s:%d/%s", *address, defaultPort, base)

		glog.V(2).Infof("Serving '%s'\n", hostURI)

		return hostURI
	}

	executorURI := serveExecutorArtifact(*executorPath)
	executable := true

	executor := &mesos.ExecutorInfo{
		ExecutorId: &mesos.ExecutorID{Value: proto.String("default")},
		Command: &mesos.CommandInfo{
			Value: proto.String("./angstrom-executor"),
			Uris: []*mesos.CommandInfo_URI{
				&mesos.CommandInfo_URI{Value: &executorURI, Executable: &executable},
			},
		},
		Name:   proto.String("Angstrom Executor"),
		Source: proto.String("angstrom"),
	}

	cluster := acluster.NewCluster(*master)

	cluster.Update()

	// Keep updating cluster state
	go func() {
		for {
			cluster.Update()
			time.Sleep(updateInterval)
		}
	}()

	slaves := list.New()
	for _, slave := range cluster.Sample.Slaves {
		slaveHostname := slave.Hostname + ":" + strconv.Itoa(slave.Port)
		slaves.PushBack(slaveHostname)
	}

	// TODO(nnielsen): Move this and callbacks to dedicated scheduler package / struct.
	taskToSlave := make(map[string]string)

	scheduleTask := func(offer mesos.Offer) *mesos.TaskInfo {
		slave := slaves.Front()
		if slave == nil {
			return nil
		}

		glog.V(2).Infof("Scheduling slave '%s'", slave.Value.(string))

		slaves.Remove(slave)

		task := "angstrom-task-" + strconv.Itoa(taskId)
		taskToSlave[task] = slave.Value.(string)

		return &mesos.TaskInfo{
			Name: proto.String("angstrom-task"),
			TaskId: &mesos.TaskID{
				Value: proto.String(task),
			},
			SlaveId:  offer.SlaveId,
			Executor: executor,
			Data:     []byte("{\"slave\": \"" + slave.Value.(string) + "\"}"),
			Resources: []*mesos.Resource{
				mesos.ScalarResource("cpus", 0.5),
				mesos.ScalarResource("mem", 32),
			},
		}
	}

	driver := mesos.SchedulerDriver{
		Master: *master,
		Framework: mesos.FrameworkInfo{
			Name: proto.String("Angstrom metrics"),
			User: proto.String(""),
		},

		Scheduler: &mesos.Scheduler{
			ResourceOffers: func(driver *mesos.SchedulerDriver, offers []mesos.Offer) {
				for _, offer := range offers {
					taskId++

					tasks := make([]mesos.TaskInfo, 0)

					task := scheduleTask(offer)
					if task != nil {
						glog.V(2).Infof("Launching on offer %v", offer.Id)

						tasks = append(tasks, *task)
						driver.LaunchTasks(offer.Id, tasks)
					} else {
						glog.V(2).Infof("Declining offer %v", offer.Id)
						driver.DeclineOffer(offer.Id)
					}

				}
			},

			FrameworkMessage: func(driver *mesos.SchedulerDriver, _executorId mesos.ExecutorID, slaveId mesos.SlaveID, data string) {
				var target []payload.StatisticsInfo
				err := json.Unmarshal([]byte(data), &target)
				if err != nil {
					return
				}

				cluster.AddSlaveSamples(slaveId, target)
			},

			StatusUpdate: func(driver *mesos.SchedulerDriver, status mesos.TaskStatus) {
				// TODO(nnielsen): Readd slave task to queue in case of any terminal state.
				if *status.State == mesos.TaskState_TASK_RUNNING {
				} else if *status.State == mesos.TaskState_TASK_FINISHED {
				}
			},
		},
	}

	driver.Init()
	defer driver.Destroy()
	driver.Start()

	endpoints.Initialize(defaultPort, *angstromPath, cluster)

	glog.V(2).Infof("Waiting for threads to join")

	driver.Join()
}
Ejemplo n.º 5
0
// See the Mesos Framework Development Guide:
// http://mesos.apache.org/documentation/latest/app-framework-development-guide
//
// Scheduler, scheduler driver, executor, and executor driver definitions:
// https://github.com/apache/mesos/blob/master/src/python/src/mesos.py
// https://github.com/apache/mesos/blob/master/include/mesos/scheduler.hpp
//
// Mesos protocol buffer definitions for Python:
// https://github.com/mesosphere/deimos/blob/master/deimos/mesos_pb2.py
// https://github.com/apache/mesos/blob/master/include/mesos/mesos.proto
//
// NOTE: Feel free to strip out "_ = variable" stubs. They are in place to
// silence the Go compiler.
func main() {
	crawlQueue := list.New()  // list of string
	renderQueue := list.New() // list of string
	_ = renderQueue

	processedURLs := list.New() // list of string
	_ = processedURLs

	crawlResults := list.New() // list of CrawlEdge
	renderResults := make(map[string]string)

	seedUrl := flag.String("seed", "http://mesosphere.io", "The first URL to crawl")
	master := flag.String("master", "127.0.1.1:5050", "Location of leading Mesos master")
	localMode := flag.Bool("local", true, "If true, saves rendered web pages on local disk")
	// TODO(nnielsen): Add flag for artifacts.

	flag.Parse()

	crawlQueue.PushBack(*seedUrl)

	tasksCreated := 0
	tasksRunning := 0

	// TODO(nnielsen): based on `tasksRunning`, do
	// graceful shutdown of framework (allow ongoing render tasks to
	// finish).
	c := make(chan os.Signal, 1)
	signal.Notify(c, os.Interrupt, os.Kill)
	go func(c chan os.Signal) {
		s := <-c
		fmt.Println("Got signal:", s)

		if s == os.Interrupt {
			rendler.WriteDOTFile(crawlResults, renderResults)
		}
		os.Exit(1)
	}(c)

	crawlCommand := "python crawl_executor.py"
	renderCommand := "python render_executor.py"

	if *localMode {
		renderCommand += " --local"
	}

	// TODO(nnielsen): In local mode, verify artifact locations.
	rendlerArtifacts := executorURIs()

	crawlExecutor := &mesos.ExecutorInfo{
		ExecutorId: &mesos.ExecutorID{Value: proto.String("crawl-executor")},
		Command: &mesos.CommandInfo{
			Value: proto.String(crawlCommand),
			Uris:  rendlerArtifacts,
		},
		Name: proto.String("Crawler"),
	}

	renderExecutor := &mesos.ExecutorInfo{
		ExecutorId: &mesos.ExecutorID{Value: proto.String("render-executor")},
		Command: &mesos.CommandInfo{
			Value: proto.String(renderCommand),
			Uris:  rendlerArtifacts,
		},
		Name: proto.String("Renderer"),
	}

	makeTaskPrototype := func(offer mesos.Offer) *mesos.TaskInfo {
		taskId := tasksCreated
		tasksCreated++
		return &mesos.TaskInfo{
			TaskId: &mesos.TaskID{
				Value: proto.String(fmt.Sprintf("RENDLER-%d", taskId)),
			},
			SlaveId: offer.SlaveId,
			Resources: []*mesos.Resource{
				mesos.ScalarResource("cpus", TASK_CPUS),
				mesos.ScalarResource("mem", TASK_MEM),
			},
		}
	}

	makeCrawlTask := func(url string, offer mesos.Offer) *mesos.TaskInfo {
		task := makeTaskPrototype(offer)
		task.Name = proto.String("CRAWL_" + *task.TaskId.Value)
		//
		// TODO
		//
		return task
	}
	_ = makeCrawlTask

	makeRenderTask := func(url string, offer mesos.Offer) *mesos.TaskInfo {
		task := makeTaskPrototype(offer)
		task.Name = proto.String("RENDER_" + *task.TaskId.Value)
		//
		// TODO
		//
		return task
	}
	_ = makeRenderTask

	maxTasksForOffer := func(offer mesos.Offer) int {
		// TODO(nnielsen): Parse offer resources.
		count := 0

		var cpus float64 = 0
		_ = cpus

		var mem float64 = 0
		_ = mem

		for _, resource := range offer.Resources {
			if resource.GetName() == "cpus" {
				cpus = *resource.GetScalar().Value
			}

			if resource.GetName() == "mem" {
				mem = *resource.GetScalar().Value
			}
		}

		//
		// TODO
		//

		return count
	}
	_ = maxTasksForOffer

	printQueueStatistics := func() {
		// TODO(nnielsen): Print queue lengths.
	}

	driver := mesos.SchedulerDriver{
		Master: *master,
		Framework: mesos.FrameworkInfo{
			Name: proto.String("RENDLER"),
			User: proto.String(""),
		},

		Scheduler: &mesos.Scheduler{

			Registered: func(
				driver *mesos.SchedulerDriver,
				frameworkId mesos.FrameworkID,
				masterInfo mesos.MasterInfo) {
				log.Printf("Registered")
			},

			ResourceOffers: func(driver *mesos.SchedulerDriver, offers []mesos.Offer) {
				printQueueStatistics()

				//
				// TODO
				//
			},

			StatusUpdate: func(driver *mesos.SchedulerDriver, status mesos.TaskStatus) {
				log.Printf("Received task status [%s] for task [%s]", rendler.NameFor(status.State), *status.TaskId.Value)

				if *status.State == mesos.TaskState_TASK_RUNNING {
					tasksRunning++
				} else if rendler.IsTerminal(status.State) {
					tasksRunning--
				}
			},

			FrameworkMessage: func(
				driver *mesos.SchedulerDriver,
				executorId mesos.ExecutorID,
				slaveId mesos.SlaveID,
				message string) {

				switch *executorId.Value {
				case *crawlExecutor.ExecutorId.Value:
					log.Print("Received framework message from crawler")
					var result rendler.CrawlResult
					err := json.Unmarshal([]byte(message), &result)
					if err != nil {
						log.Printf("Error deserializing CrawlResult: [%s]", err)
					} else {
						//
						// TODO
						//
					}

				case *renderExecutor.ExecutorId.Value:
					log.Printf("Received framework message from renderer")
					var result rendler.RenderResult
					err := json.Unmarshal([]byte(message), &result)
					if err != nil {
						log.Printf("Error deserializing RenderResult: [%s]", err)
					} else {
						//
						// TODO
						//
					}

				default:
					log.Printf("Received a framework message from some unknown source: %s", *executorId.Value)
				}
			},
		},
	}

	driver.Init()
	defer driver.Destroy()

	driver.Start()
	driver.Join()
	driver.Stop(false)
}