示例#1
0
func main() {
	mgr := metamgr.NewMetadataManager(frameworkID, []string{zookeeperAddr})
	c := cepm.NewCPMd(port, mgr)
	fmt.Println("CEPMd running on port: ", c.GetPort())
	c.Foreground()

}
func TestNothing(t *testing.T) {
	if os.Getenv("TRAVIS") == "true" {
		t.Skip("Unable to run test on Travis")
	}
	assert := assert.New(t)

	mgr := metamgr.NewMetadataManager("rmf5", []string{"127.0.0.1"})
	c := cepm.NewCPMd(7902, mgr)

	go c.Background()
	// Port number for testing
	re, err := NewRiakExplorer(7901, "rex@ubuntu.", c) // 998th  prime number.
	assert.Equal(nil, err)
	re.TearDown()
	_, err = re.NewRiakExplorerClient().Ping()
	assert.NotNil(err)
	procs, err := ps.Processes()
	if err != nil {
		t.Fatal("Could not get OS processes")
	}
	pid := os.Getpid()
	for _, proc := range procs {
		if proc.PPid() == pid {
			assert.Fail("There are children proccesses leftover")
		}
	}

}
示例#3
0
func NewSchedulerCore(
	schedulerHostname string,
	frameworkName string,
	frameworkRole string,
	zookeepers []string,
	schedulerIPAddr string,
	user string,
	authProvider string,
	mesosAuthPrincipal string,
	mesosAuthSecretFile string) *SchedulerCore {

	mgr := metamgr.NewMetadataManager(frameworkName, zookeepers)
	ss := GetSchedulerState(mgr)

	c := cepm.NewCPMd(0, mgr)
	c.Background()

	scheduler := &SchedulerCore{
		lock:            &sync.Mutex{},
		schedulerIPAddr: schedulerIPAddr,
		mgr:             mgr,
		frnDict:         make(map[string]*FrameworkRiakNode),
		user:            user,
		zookeepers:      zookeepers,
		cepm:            c,
		frameworkName:   frameworkName,
		frameworkRole:   frameworkRole,
		schedulerState:  ss,
	}
	scheduler.schedulerHTTPServer = ServeExecutorArtifact(scheduler, schedulerHostname)
	return scheduler
}
示例#4
0
func getState() {
	mm := metadata_manager.NewMetadataManager(frameworkName, []string{zookeeperAddr})
	zkNode, err := mm.GetRootNode().GetChild("SchedulerState")
	if err != zk.ErrNoNode {
		// This results in the inclusion of all of the bindata used for scheduler... Lets not deserialize
		//ss, err := scheduler.DeserializeSchedulerState(zkNode.GetData())
		ss := zkNode.GetData()
		if err != nil {
			log.Panic(err)
		}
		fmt.Printf("%# v", pretty.Formatter(ss))

	}
}
示例#5
0
func getURL() string {
	overrideURL := os.Getenv("RM_API")

	if overrideURL != "" {
		return overrideURL
	}

	mgr := metadata_manager.NewMetadataManager(frameworkName, []string{zookeeperAddr})
	zkNode, err := mgr.GetRootNode().GetChild("uri")
	if err != nil {
		log.Panic(err)
	}
	return string(zkNode.GetData())
}
示例#6
0
func NewRiakNode(taskInfo *mesos.TaskInfo, executor *ExecutorCore) *RiakNode {
	taskData, err := common.DeserializeTaskData(taskInfo.Data)
	if err != nil {
		log.Panic("Got error", err)
	}

	log.Infof("Deserialized task data: %+v", taskData)
	mgr := metamgr.NewMetadataManager(taskData.FrameworkName, taskData.Zookeepers)

	return &RiakNode{

		executor:        executor,
		taskInfo:        taskInfo,
		running:         false,
		metadataManager: mgr,
		taskData:        taskData,
	}
}
示例#7
0
func NewSchedulerCore(
	schedulerHostname string,
	frameworkName string,
	frameworkRole string,
	zookeepers []string,
	schedulerIPAddr string,
	user string,
	nodeCpus string,
	nodeMem string,
	nodeDisk string,
	authProvider string,
	mesosAuthPrincipal string,
	mesosAuthSecretFile string,
	useReservations bool) *SchedulerCore {

	mgr := metamgr.NewMetadataManager(frameworkName, zookeepers)
	ss := GetSchedulerState(mgr)

	c := cepm.NewCPMd(0, mgr)
	c.Background()

	scheduler := &SchedulerCore{
		lock:                &sync.Mutex{},
		schedulerIPAddr:     schedulerIPAddr,
		mgr:                 mgr,
		user:                user,
		zookeepers:          zookeepers,
		cepm:                c,
		frameworkName:       frameworkName,
		frameworkRole:       frameworkRole,
		nodeCpus:            nodeCpus,
		nodeMem:             nodeMem,
		nodeDisk:            nodeDisk,
		schedulerState:      ss,
		authProvider:        authProvider,
		mesosAuthPrincipal:  mesosAuthPrincipal,
		mesosAuthSecretFile: mesosAuthSecretFile,
		compatibilityMode:   !useReservations,
	}
	scheduler.schedulerHTTPServer = ServeExecutorArtifact(scheduler, schedulerHostname)
	return scheduler
}
示例#8
0
// TODO: Fix test and decompress trusty into "root"
// It needs to manage the root itself
func TestREX(t *testing.T) {
	if os.Getenv("TRAVIS") == "true" {
		t.Skip("Unable to run test on Travis")
	}
	assert := assert.New(t)

	mgr := metamgr.NewMetadataManager("rmf5", []string{"127.0.0.1"})
	c := cepm.NewCPMd(7902, mgr)

	go c.Background()
	// Port number for testing
	dirname, err := ioutil.TempDir("", "root")
	defer os.RemoveAll(dirname)
	t.Log("Decompressing into: ", dirname)
	assert.Nil(err)
	//defer os.RemoveAll(dirname)

	f, err := os.Open("../artifacts/data/trusty.tar.gz")
	assert.Nil(err)
	assert.Nil(common.ExtractGZ(dirname, f))
	f, err = os.Open("../artifacts/data/riak_explorer-bin.tar.gz")
	assert.Nil(err)
	assert.Nil(common.ExtractGZ(dirname, f))

	re, err := NewRiakExplorer(7901, "rex@ubuntu.", c, dirname, true) // 998th  prime number.
	assert.Equal(nil, err)
	re.TearDown()
	_, err = re.NewRiakExplorerClient().Ping()
	assert.NotNil(err)
	procs, err := ps.Processes()
	if err != nil {
		t.Fatal("Could not get OS processes")
	}
	pid := os.Getpid()
	for _, proc := range procs {
		if proc.PPid() == pid {
			assert.Fail("There are children proccesses leftover")
		}
	}

}
示例#9
0
func NewRiakNode(taskInfo *mesos.TaskInfo, executor *ExecutorCore) *RiakNode {
	taskData, err := common.DeserializeTaskData(taskInfo.Data)
	if err != nil {
		log.Panic("Got error", err)
	}

	log.Infof("Deserialized task data: %+v", taskData)
	mgr := metamgr.NewMetadataManager(taskData.FrameworkName, taskData.Zookeepers)

	killStatus := &mesos.TaskStatus{
		TaskId: taskInfo.GetTaskId(),
		State:  mesos.TaskState_TASK_FAILED.Enum(),
	}

	return &RiakNode{
		executor:        executor,
		taskInfo:        taskInfo,
		running:         false,
		metadataManager: mgr,
		taskData:        taskData,
		killStatus:      killStatus,
	}
}