Пример #1
0
func main() {
	mgr := metamgr.NewMetadataManager(frameworkID, []string{zookeeperAddr})
	c := cepm.NewCPMd(port, mgr)
	fmt.Println("CEPMd running on port: ", c.GetPort())
	c.Foreground()

}
func TestNothing(t *testing.T) {
	if os.Getenv("TRAVIS") == "true" {
		t.Skip("Unable to run test on Travis")
	}
	assert := assert.New(t)

	mgr := metamgr.NewMetadataManager("rmf5", []string{"127.0.0.1"})
	c := cepm.NewCPMd(7902, mgr)

	go c.Background()
	// Port number for testing
	re, err := NewRiakExplorer(7901, "rex@ubuntu.", c) // 998th  prime number.
	assert.Equal(nil, err)
	re.TearDown()
	_, err = re.NewRiakExplorerClient().Ping()
	assert.NotNil(err)
	procs, err := ps.Processes()
	if err != nil {
		t.Fatal("Could not get OS processes")
	}
	pid := os.Getpid()
	for _, proc := range procs {
		if proc.PPid() == pid {
			assert.Fail("There are children proccesses leftover")
		}
	}

}
Пример #3
0
func NewSchedulerCore(
	schedulerHostname string,
	frameworkName string,
	frameworkRole string,
	zookeepers []string,
	schedulerIPAddr string,
	user string,
	authProvider string,
	mesosAuthPrincipal string,
	mesosAuthSecretFile string) *SchedulerCore {

	mgr := metamgr.NewMetadataManager(frameworkName, zookeepers)
	ss := GetSchedulerState(mgr)

	c := cepm.NewCPMd(0, mgr)
	c.Background()

	scheduler := &SchedulerCore{
		lock:            &sync.Mutex{},
		schedulerIPAddr: schedulerIPAddr,
		mgr:             mgr,
		frnDict:         make(map[string]*FrameworkRiakNode),
		user:            user,
		zookeepers:      zookeepers,
		cepm:            c,
		frameworkName:   frameworkName,
		frameworkRole:   frameworkRole,
		schedulerState:  ss,
	}
	scheduler.schedulerHTTPServer = ServeExecutorArtifact(scheduler, schedulerHostname)
	return scheduler
}
Пример #4
0
func NewSchedulerCore(
	schedulerHostname string,
	frameworkName string,
	frameworkRole string,
	zookeepers []string,
	schedulerIPAddr string,
	user string,
	nodeCpus string,
	nodeMem string,
	nodeDisk string,
	authProvider string,
	mesosAuthPrincipal string,
	mesosAuthSecretFile string,
	useReservations bool) *SchedulerCore {

	mgr := metamgr.NewMetadataManager(frameworkName, zookeepers)
	ss := GetSchedulerState(mgr)

	c := cepm.NewCPMd(0, mgr)
	c.Background()

	scheduler := &SchedulerCore{
		lock:                &sync.Mutex{},
		schedulerIPAddr:     schedulerIPAddr,
		mgr:                 mgr,
		user:                user,
		zookeepers:          zookeepers,
		cepm:                c,
		frameworkName:       frameworkName,
		frameworkRole:       frameworkRole,
		nodeCpus:            nodeCpus,
		nodeMem:             nodeMem,
		nodeDisk:            nodeDisk,
		schedulerState:      ss,
		authProvider:        authProvider,
		mesosAuthPrincipal:  mesosAuthPrincipal,
		mesosAuthSecretFile: mesosAuthSecretFile,
		compatibilityMode:   !useReservations,
	}
	scheduler.schedulerHTTPServer = ServeExecutorArtifact(scheduler, schedulerHostname)
	return scheduler
}
Пример #5
0
// TODO: Fix test and decompress trusty into "root"
// It needs to manage the root itself
func TestREX(t *testing.T) {
	if os.Getenv("TRAVIS") == "true" {
		t.Skip("Unable to run test on Travis")
	}
	assert := assert.New(t)

	mgr := metamgr.NewMetadataManager("rmf5", []string{"127.0.0.1"})
	c := cepm.NewCPMd(7902, mgr)

	go c.Background()
	// Port number for testing
	dirname, err := ioutil.TempDir("", "root")
	defer os.RemoveAll(dirname)
	t.Log("Decompressing into: ", dirname)
	assert.Nil(err)
	//defer os.RemoveAll(dirname)

	f, err := os.Open("../artifacts/data/trusty.tar.gz")
	assert.Nil(err)
	assert.Nil(common.ExtractGZ(dirname, f))
	f, err = os.Open("../artifacts/data/riak_explorer-bin.tar.gz")
	assert.Nil(err)
	assert.Nil(common.ExtractGZ(dirname, f))

	re, err := NewRiakExplorer(7901, "rex@ubuntu.", c, dirname, true) // 998th  prime number.
	assert.Equal(nil, err)
	re.TearDown()
	_, err = re.NewRiakExplorerClient().Ping()
	assert.NotNil(err)
	procs, err := ps.Processes()
	if err != nil {
		t.Fatal("Could not get OS processes")
	}
	pid := os.Getpid()
	for _, proc := range procs {
		if proc.PPid() == pid {
			assert.Fail("There are children proccesses leftover")
		}
	}

}
Пример #6
0
func (riakNode *RiakNode) Run() {
	var err error
	var fetchURI string
	var resp *http.Response

	os.Mkdir("root", 0777)

	riakNode.decompress()

	fetchURI = fmt.Sprintf("%s/static2/riak-2.1.1-bin.tar.gz", riakNode.taskData.URI)
	log.Info("Preparing to fetch riak from: ", fetchURI)
	resp, err = http.Get(fetchURI)
	if err != nil {
		log.Panic("Unable to fetch riak root: ", err)
	}
	err = common.ExtractGZ("root", resp.Body)
	if err != nil {
		log.Panic("Unable to extract riak root: ", err)
	}

	config := riakNode.configureRiak(riakNode.taskData)

	c := cepm.NewCPMd(0, riakNode.metadataManager)
	c.Background()
	riakNode.configureAdvanced(c.GetPort())

	args := []string{"console", "-noinput"}

	kernelDirs, err := filepath.Glob("root/riak/lib/kernel*")
	if err != nil {
		log.Fatal("Could not find kernel directory")
	}

	log.Infof("Found kernel dirs: %v", kernelDirs)

	err = cepm.InstallInto(fmt.Sprint(kernelDirs[0], "/ebin"))
	if err != nil {
		log.Panic(err)
	}
	if err := common.KillEPMD("root/riak"); err != nil {
		log.Fatal("Could not kill EPMd: ", err)
	}
	args = append(args, "-no_epmd")
	os.MkdirAll(fmt.Sprint(kernelDirs[0], "/priv"), 0777)
	ioutil.WriteFile(fmt.Sprint(kernelDirs[0], "/priv/cepmd_port"), []byte(fmt.Sprintf("%d.", c.GetPort())), 0777)

	HealthCheckFun := func() error {
		log.Info("Checking is Riak is started")
		data, err := ioutil.ReadFile("root/riak/log/console.log")
		if err != nil {
			if bytes.Contains(data, []byte("Wait complete for service riak_kv")) {
				log.Info("Riak started, waiting 10 seconds to avoid race conditions (HACK)")
				time.Sleep(10 * time.Second)
				return nil
			}
			return errors.New("Riak KV not yet started")
		}
		return err
	}

	wd, err := os.Getwd()
	if err != nil {
		log.Panic("Could not get wd: ", err)
	}
	chroot := filepath.Join(wd, "root")
	riakNode.pm, err = process_manager.NewProcessManager(func() { return }, "/riak/bin/riak", args, HealthCheckFun, &chroot, riakNode.taskData.UseSuperChroot)

	if err != nil {
		log.Error("Could not start Riak: ", err)

		runStatus := &mesos.TaskStatus{
			TaskId: riakNode.taskInfo.GetTaskId(),
			State:  mesos.TaskState_TASK_FAILED.Enum(),
		}
		_, err = riakNode.executor.Driver.SendStatusUpdate(runStatus)
		if err != nil {
			log.Panic("Got error", err)
		}
		// Shutdown:
		time.Sleep(15 * time.Minute)
		log.Info("Shutting down due to GC, after failing to bring up Riak node")
		riakNode.executor.Driver.Stop()
	} else {
		rexPort := riakNode.taskData.RexPort
		riakNode.startRex(rexPort, c)
		rootNode := riakNode.metadataManager.GetRootNode()

		rootNode.CreateChildIfNotExists("coordinator")
		coordinator, err := rootNode.GetChild("coordinator")
		if err != nil {
			log.Panic(err)
		}
		coordinator.CreateChildIfNotExists("coordinatedNodes")
		coordinatedNodes, err := coordinator.GetChild("coordinatedNodes")
		if err != nil {
			log.Panic(err)
		}

		child, err := coordinatedNodes.MakeChild(riakNode.taskInfo.GetTaskId().GetValue(), true)
		if err != nil {
			log.Panic(err)
		}
		coordinatedData := common.CoordinatedData{
			NodeName:      riakNode.taskData.FullyQualifiedNodeName,
			DisterlPort:   int(config.DisterlPort),
			PBPort:        int(config.PBPort),
			HTTPPort:      int(config.HTTPPort),
			Hostname:      riakNode.executor.slaveInfo.GetHostname(),
			ClusterName:   riakNode.taskData.ClusterName,
			FrameworkName: riakNode.taskData.FrameworkName,
		}
		cdBytes, err := coordinatedData.Serialize()
		if err != nil {
			log.Panic("Could not serialize coordinated data	", err)
		}
		child.SetData(cdBytes)
		// lock.Unlock()
		tsd := common.TaskStatusData{
			RexPort: rexPort,
		}
		tsdBytes, err := tsd.Serialize()

		if err != nil {
			log.Panic("Could not serialize Riak Explorer data", err)
		}
		runStatus := &mesos.TaskStatus{
			TaskId: riakNode.taskInfo.GetTaskId(),
			State:  mesos.TaskState_TASK_RUNNING.Enum(),
			Data:   tsdBytes,
		}
		_, err = riakNode.executor.Driver.SendStatusUpdate(runStatus)
		if err != nil {
			log.Panic("Got error", err)
		}
		riakNode.running = true
		go riakNode.runLoop(child)
	}
}
Пример #7
0
func (riakNode *RiakNode) Run() {
	var err error

	config := riakNode.configureRiak(riakNode.taskData)

	c := cepm.NewCPMd(0, riakNode.metadataManager)
	c.Background()
	riakNode.configureAdvanced(c.GetPort())

	args := []string{"console", "-noinput"}

	kernelDirs, err := filepath.Glob("root/riak/lib/kernel*")
	if err != nil {
		log.Fatal("Could not find kernel directory")
	}

	log.Infof("Found kernel dirs: %v", kernelDirs)

	err = cepm.InstallInto(fmt.Sprint(kernelDirs[0], "/ebin"))
	if err != nil {
		log.Panic(err)
	}
	if err := common.KillEPMD("root/riak"); err != nil {
		log.Fatal("Could not kill EPMd: ", err)
	}
	args = append(args, "-no_epmd")
	os.MkdirAll(fmt.Sprint(kernelDirs[0], "/priv"), 0777)
	ioutil.WriteFile(fmt.Sprint(kernelDirs[0], "/priv/cepmd_port"), []byte(fmt.Sprintf("%d.", c.GetPort())), 0777)

	HealthCheckFun := func() error {
		log.Info("Checking is Riak is started")
		data, err := ioutil.ReadFile("root/riak/log/console.log")
		if err != nil {
			if bytes.Contains(data, []byte("Wait complete for service riak_kv")) {
				log.Info("Riak started, waiting 10 seconds to avoid race conditions (HACK)")
				time.Sleep(10 * time.Second)
				return nil
			}
			return errors.New("Riak KV not yet started")
		}
		return err
	}

	wd, err := os.Getwd()
	if err != nil {
		log.Panic("Could not get wd: ", err)
	}
	chroot := filepath.Join(wd, "root")
	riakNode.pm, err = process_manager.NewProcessManager(func() { return }, "/riak/bin/riak", args, HealthCheckFun, &chroot, riakNode.taskData.UseSuperChroot)

	if err != nil {
		log.Error("Could not start Riak: ", err)

		runStatus := &mesos.TaskStatus{
			TaskId: riakNode.taskInfo.GetTaskId(),
			State:  mesos.TaskState_TASK_FAILED.Enum(),
		}
		_, err = riakNode.executor.Driver.SendStatusUpdate(runStatus)
		if err != nil {
			log.Panic("Got error", err)
		}
		// Shutdown:
		time.Sleep(15 * time.Minute)
		log.Info("Shutting down due to GC, after failing to bring up Riak node")
		riakNode.executor.Driver.Stop()
	} else {
		child := riakNode.getCoordinatedChild()
		riakNode.setCoordinatedData(child, config)

		rexPort := riakNode.taskData.HTTPPort
		tsd := common.TaskStatusData{
			RexPort: rexPort,
		}
		tsdBytes, err := tsd.Serialize()

		if err != nil {
			log.Panic("Could not serialize Riak Explorer data", err)
		}

		runStatus := &mesos.TaskStatus{
			TaskId: riakNode.taskInfo.GetTaskId(),
			State:  mesos.TaskState_TASK_RUNNING.Enum(),
			Data:   tsdBytes,
		}
		_, err = riakNode.executor.Driver.SendStatusUpdate(runStatus)
		if err != nil {
			log.Panic("Got error", err)
		}
		riakNode.running = true
		go riakNode.runLoop(child)
	}
}