Example #1
0
//####################################//
// Code
//====================================//
// Init code
func init() {
	// FIXME init flags
	const (
		flagDef_Debug = false
		flagDescDebug = "   -debug   enable debug"
		flagDef_Quiet = false
		flagDescQuiet = "-q -quiet   suppress output"
	)
	flag.BoolVar(&flagDebug, "debug", false, flagDescDebug)
	flag.BoolVar(&flagQuiet, "q", false, flagDescQuiet)
	flag.BoolVar(&flagQuiet, "quiet", false, flagDescQuiet)

	//----------------------------------//
	// Help
	// FIXME update help
	flag.Usage = func() {
		usage := `Usage: %s [options]
Options:
  ` + flag.Lookup("q").Usage + `
  ` + flag.Lookup("debug").Usage + `
  No more options for now.

MIT, BSD or something. There is no help.
`
		fmt.Fprintf(os.Stderr, usage, os.Args[0])
	}
	//----------------------------------//
}
Example #2
0
func TestRegisterFlags(t *testing.T) {
	c := &Config{
		DHTRouters:    "example.router.com:6060",
		MaxNodes:      2020,
		CleanupPeriod: time.Second,
		SavePeriod:    time.Second * 2,
		RateLimit:     999,
	}
	RegisterFlags(c)
	if flag.Lookup("routers").DefValue != c.DHTRouters {
		t.Fatal("Incorrect routers flag")
	}
	if flag.Lookup("maxNodes").DefValue != strconv.FormatInt(int64(c.MaxNodes), 10) {
		t.Fatal("Incorrect maxNodes flag")
	}
	if flag.Lookup("cleanupPeriod").DefValue != c.CleanupPeriod.String() {
		t.Fatal("Incorrect cleanupPeriod flag")
	}
	if flag.Lookup("savePeriod").DefValue != c.SavePeriod.String() {
		t.Fatal("Incorrect routers flag")
	}
	if flag.Lookup("rateLimit").DefValue != strconv.FormatInt(c.RateLimit, 10) {
		t.Fatal("Incorrect routers flag")
	}
}
Example #3
0
func main() {
	flag.Parse()

	if *cliVersion {
		fmt.Println(flag.Lookup("version").Usage)
		exit(0)
		return
	}
	if *cliHelp {
		fmt.Println(flag.Lookup("help").Usage)
		exit(0)
		return
	}
	var res beatsone.BeatsOne
	var err error
	if *cliSchedule {
		res, err = beatsone.GetSchedule()
	} else {
		res, err = beatsone.GetNowPlaying()
	}
	if err != nil {
		fmt.Println(err)
		exit(1)
	}
	if *cliJSON {
		fmt.Println(res.JSONString())
	} else {
		fmt.Println(res.String())
	}
	exit(0)
	return
}
Example #4
0
func (p *GitParser) Setup() {
	executable := flag.Lookup("executable").Value.String()
	out := flag.Lookup("out").Value.String()

	if out == "<STDOUT>" {
		out = "REVISION.json"
	}

	hook := executable + " -out=\"" + out + "\"; # scm-status hook\r\n"

	hook_dir := strings.Join([]string{p.Dir(), ".git", "hooks"}, path_separator)

	filenames := []string{
		hook_dir + path_separator + "post-checkout",
		hook_dir + path_separator + "post-merge",
		hook_dir + path_separator + "post-commit",
	}

	for _, filename := range filenames {
		fp, _ := os.OpenFile(filename, os.O_RDWR+os.O_APPEND+os.O_CREATE, 0775)

		_, _ = fp.WriteString(hook)

		fp.Close()
	}
}
Example #5
0
func main() {
	flag.Parse()

	if flagOne == "" {
		fmt.Fprintf(os.Stdout, "Usage of cody.guo ok %s:\n", os.Args[0])
		flag.PrintDefaults()
		os.Exit(1)
	}

	fmt.Println("参数数量:", flag.NFlag())
	oneFlag := flag.Lookup("one")

	fmt.Println(oneFlag.Name, oneFlag.Value)

	// fmt.Println(len(os.Args))

	if debug {
		fmt.Println("debug is on.")
	} else {
		fmt.Println("debug is off.")
	}

	fmt.Println(flagOne)

	debugFlag := flag.Lookup("d")
	fmt.Println(debugFlag.Name, debugFlag.Value)

}
Example #6
0
func fakeRequest() {
	flag.Lookup("alsologtostderr").Value.Set("true")
	flag.Lookup("log_dir").Value.Set("./log_dir")
	flag.Parse()
	glog.Infoln("begin fake..")
	// test_uid := "45529"
	host := "0.0.0.0"
	port := "12231"
	dirver_trail_json := `[{"lat":"33.33066","lng":"121.284148","t":1472338663}]`
	driver_trail_form := url.Values{
		"session_id": []string{"test_uid"},
		"json":       []string{dirver_trail_json},
		"city":       []string{"上海"},
	}

	ticker := time.NewTicker(time.Second * 5)
	for t := range ticker.C {
		resp, err := http.PostForm(fmt.Sprintf("http://%s:%s/driver/trail", host, port), driver_trail_form)
		if err != nil {
			glog.Errorf("at %s, PostForm-err: %s\n", t, err)
		}
		defer resp.Body.Close()
		resp_body, err := ioutil.ReadAll(resp.Body)
		if err != nil {
			glog.Errorf("at %s, ioutil.ReadAll-err: %s\n", t, err)
		}
		glog.Infof("at %s, fake post, fadback is %s", t, string(resp_body))
	}
}
Example #7
0
func main() {
	// Initialise our configuration from flags

	var nodeName = flag.String("name", REQUIRED, "Node network name and port, e.g. localhost:3000")
	var gobName = flag.String("gob", "", "Alternative gob network name and port for clients, allowing clients to connect over a different physical interface to nodes.")
	var httpName = flag.String("http", "", "Network name and port for the http ExpVar to listen on.")
	var cborName = flag.String("cbor", "", "Network name and port for the CBOR RPC interface to listen on.")
	var nodePath = flag.String("path", REQUIRED, "Node root path for configuration and log files")
	var clusterID = flag.String("id", "", "Cluster ID that this node is part of")

	flag.Parse()

	if flag.Lookup("help") != nil || flag.Lookup("h") != nil {
		flag.PrintDefaults()
		return
	}

	if *nodeName == REQUIRED {
		log.Printf("name missing.\n")
		flag.PrintDefaults()
		return
	}

	if *nodePath == REQUIRED {
		log.Printf("path missing.\n")
		flag.PrintDefaults()
		return
	}

	// Create our server
	serverNode, err := server.NewServerNode(*nodeName, *gobName, *httpName, *cborName, *nodePath, *clusterID)

	if err != nil {
		log.Fatalf("Unable to start server due to errors.\n")
	}

	expvar.Publish("node", expvar.Func(serverNode.ExpVar))

	//dataLog := &memlog.MemoryLog{}

	// Start a listener to handle incoming requests from other peers
	err = serverNode.ListenConnections()
	if err != nil {
		log.Fatalf("Error starting listener: %v\n", err)
	}

	// Setup signal handling to catch interrupts.
	sigc := make(chan os.Signal, 1)
	signal.Notify(sigc,
		os.Interrupt,
		os.Kill)
	go func() {
		<-sigc
		serverNode.RequestShutdown("Request to terminate process detected.")
	}()

	serverNode.WaitForShutdown()

}
Example #8
0
func main() {
	settings.Init()
	flag.Parse()

	if len(os.Args) == 1 {
		flag.Usage()
		return
	}

	if flag.Lookup("run") == nil && flag.Lookup("svcctl") == nil && flag.Lookup("help") == nil && flag.Lookup("console") == nil {
		flag.Usage()
		return
	}

	if *help {
		flag.Usage()
		return
	}

	LogFile, err := os.OpenFile(settings.Log.File, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0777)
	if err != nil {
		fmt.Println(err)
		return
	}

	p := &Program{
		Logger: log.New(LogFile, "[filesync]", log.Ldefault|log.Lmicroseconds),
	}

	s, err := svc.New(p, &svc.Config{
		Name:        "Filesync",
		DisplayName: "FileSync Service",
		Description: "Filesync is a simple tool to sync files between multiple directory pairs.",
		Arguments:   []string{"-run"},
	})

	if err != nil {
		fmt.Println(err.Error())
		return
	}

	if *run {
		if *console {
			p.Logger = log.New(os.Stdout, "[filesync]", log.Ldefault|log.Lmicroseconds)
			p.run()
			return
		}
		err := s.Run()
		fmt.Println("run with error: ", err)
		return
	}

	Actions := strings.Split(*controls, ",")
	for _, action := range Actions {
		err := svc.Control(s, action)
		fmt.Println(err)
	}

}
Example #9
0
func main() {
	flag.Parse()

	flagsOk := true
	if flag.Lookup("project").Value.String() == "" {
		fmt.Fprintf(os.Stderr, "Flag --project is required\n")
		flagsOk = false
	}

	var sourceFlagCount int
	if flag.Lookup("dataset").Value.String() != "" {
		sourceFlagCount++
	}
	if flag.Lookup("jobid").Value.String() != "" {
		sourceFlagCount++
	}
	if sourceFlagCount != 1 {
		fmt.Fprintf(os.Stderr, "Exactly one of --dataset or --jobid must be set\n")
		flagsOk = false
	}

	if !flagsOk {
		os.Exit(1)
	}

	tableRE, err := regexp.Compile(*table)
	if err != nil {
		fmt.Fprintf(os.Stderr, "--table is not a valid regular expression: %q\n", *table)
		os.Exit(1)
	}

	httpClient, err := google.DefaultClient(context.Background(), bigquery.Scope)
	if err != nil {
		log.Fatalf("Creating http client: %v", err)
	}

	client, err := bigquery.NewClient(httpClient, *project)
	if err != nil {
		log.Fatalf("Creating bigquery client: %v", err)
	}

	if *jobID != "" {
		printQueryResults(client, *jobID)
		return
	}
	ds := client.Dataset(*dataset)
	var tables []*bigquery.Table
	tables, err = ds.ListTables(context.Background())
	if err != nil {
		log.Fatalf("Listing tables: %v", err)
	}
	for _, t := range tables {
		if tableRE.MatchString(t.TableID) {
			printTable(client, t)
		}
	}
}
Example #10
0
func main() {
	flag.Parse()

	flagsOk := true
	if flag.Lookup("project").Value.String() == "" {
		fmt.Fprintf(os.Stderr, "Flag --project is required\n")
		flagsOk = false
	}

	var sourceFlagCount int
	if flag.Lookup("dataset").Value.String() != "" {
		sourceFlagCount++
	}
	if flag.Lookup("jobid").Value.String() != "" {
		sourceFlagCount++
	}
	if sourceFlagCount != 1 {
		fmt.Fprintf(os.Stderr, "Exactly one of --dataset or --jobid must be set\n")
		flagsOk = false
	}

	if !flagsOk {
		os.Exit(1)
	}

	ctx := context.Background()
	tableRE, err := regexp.Compile(*table)
	if err != nil {
		fmt.Fprintf(os.Stderr, "--table is not a valid regular expression: %q\n", *table)
		os.Exit(1)
	}

	client, err := bigquery.NewClient(ctx, *project)
	if err != nil {
		log.Fatalf("Creating bigquery client: %v", err)
	}

	if *jobID != "" {
		printQueryResults(ctx, client, *jobID)
		return
	}
	ds := client.Dataset(*dataset)
	tableIter := ds.Tables(context.Background())
	for {
		t, err := tableIter.Next()
		if err == iterator.Done {
			break
		}
		if err != nil {
			log.Fatalf("Listing tables: %v", err)
		}
		if tableRE.MatchString(t.TableID) {
			printTable(ctx, client, t)
		}
	}
}
Example #11
0
func UpdateLoggingFlagsFromConfig(conf *Config) {
	err := flag.Lookup("v").Value.Set(strconv.Itoa(conf.Verbosity))
	if err != nil {
		glog.Errorf("Failed to apply config.Verbosity to flag.v: %v", err)
	}

	err = flag.Lookup("logtostderr").Value.Set("true")
	if err != nil {
		glog.Errorf("Failed to set flag.logtostderr to true: %v", err)
	}
}
Example #12
0
func logFlush() {
	// go run main.go -alsologtostderr -log_dir="./"
	logDir := flag.Lookup("log_dir")
	testFlag := flag.Lookup("log_dir")
	glog.Infoln("lookup before parse", logDir.Name, logDir.Value, testFlag)
	err := flag.Set("log_dir", "test_value")
	glog.Errorln("err: ", err)
	glog.Infoln("abc..")
	glog.Infof("abc..%d", 123)
	logDir = flag.Lookup("log_dir")
	testFlag = flag.Lookup("log_dir")
	glog.Infoln("lookup before parse", logDir.Name, logDir.Value, testFlag.Name, testFlag.Value)
	glog.Flush()
}
Example #13
0
File: llgo.go Project: pcc/llgo
func computeTriple() string {
	if *triple != "" {
		// Ensure os/arch aren't specified if triple/ is specified.
		//
		// This is an ugly way of telling whether or not -os or -arch were
		// specified. We can't just check the value, as it will have a default.
		archFlag := flag.Lookup("arch")
		osFlag := flag.Lookup("os")
		flag.Visit(func(f *flag.Flag) {
			switch f {
			case archFlag, osFlag:
				fmt.Fprintln(os.Stderr, tripleArchOsError)
				os.Exit(1)
			}
		})
		return *triple
	}

	// -arch is either an architecture name recognised by
	// the gc compiler, or an LLVM architecture name.
	targetArch := *arch
	if targetArch == "" {
		targetArch = runtime.GOARCH
	}
	switch targetArch {
	case "386":
		targetArch = "x86"
	case "amd64", "x86_64":
		targetArch = "x86-64"
	}

	// -os is either an OS name recognised by the gc
	// compiler, or an LLVM OS name.
	targetOS := *os_
	if targetOS == "" {
		targetOS = runtime.GOOS
	}
	switch targetOS {
	case "windows":
		targetOS = "win32"
	case "darwin":
		// Using darwin11 rather than just darwin enables TLS support,
		// making llgo-dist run without manually specifying a target
		// triple
		targetOS = "darwin11"
	}

	tripleArch := getTripleArchName(targetArch)
	return fmt.Sprintf("%s-unknown-%s", tripleArch, targetOS)
}
Example #14
0
// Check returns a non-nil error if the Config is unusable.
func (c Config) Check() error {
	if c.SourceBuffer <= 2 {
		return errors.New("-source-buffer must be greater than 2")
	}
	if c.FrameBytes < 1 {
		return errors.New("-frame-bytes must not be zero")
	}
	if c.Path == "" {
		return errors.New("-path must not be empty")
	}
	if c.CPUMax < 0 {
		return errors.New("-cpu-max must not be negative")
	}
	if c.CPUMax == 0 {
		c.CPUMax = runtime.NumCPU()
	}
	if c.ExecFlag && c.Path != flag.Lookup("path").DefValue && c.Path != "" {
		return errors.New("cannot combine -exec and -path")
	}
	if c.ExecFlag == (len(c.Args) == 0) {
		return errors.New("cannot use -exec without providing a command (or vice versa)")
	}
	if _, ok := Filters[c.FrameFilter]; !ok {
		haveFilters := []string{}
		for f := range Filters {
			haveFilters = append(haveFilters, "\""+f+"\"")
		}
		return fmt.Errorf("-frame-filter \"%s\" not supported; try one of %v", c.FrameFilter, haveFilters)
	}
	return nil
}
Example #15
0
func init() {
	// Override the default cAdvisor housekeeping interval.
	if f := flag.Lookup("housekeeping_interval"); f != nil {
		f.DefValue = defaultHousekeepingInterval.String()
		f.Value.Set(f.DefValue)
	}
}
Example #16
0
func mustLookup(flag_name string) *flag.Flag {
	val := flag.Lookup(flag_name)
	if val == nil {
		panic(fmt.Errorf("flag %#v doesn't exist", flag_name))
	}
	return val
}
Example #17
0
func runDiscoveryServer(t *testing.T, serverType taas.ServerType, authType taas.AuthType) *taas.Server {
	// TODO: we can avoid setting the port manually when appc/spec gains
	// the ability to specify ports for discovery.
	// See https://github.com/appc/spec/pull/110
	//
	// httptest by default uses random ports. We override this via the
	// "httptest.serve" flag.
	//
	// As long as we set the port via the "httptest.serve" flag, we have
	// to use https rather than http because httptest.Start() would wait
	// forever in "select {}", see
	// https://golang.org/src/net/http/httptest/server.go?s=2768:2792#L92
	//
	// This means this test must:
	// - use https only
	// - ignore tls errors with --insecure-options=tls
	serverURL := flag.Lookup("httptest.serve")
	if serverURL == nil {
		panic("could not find the httptest.serve flag")
	}
	serverURL.Value.Set("127.0.0.1:443")
	// reset httptest.serve to "" so we don't influence other tests
	defer serverURL.Value.Set("")
	return runServer(t, serverType, authType)
}
Example #18
0
func GetStartCommand(kubernetesConfig KubernetesConfig) string {
	flagVals := make([]string, len(constants.LogFlags))
	for _, logFlag := range constants.LogFlags {
		if logVal := gflag.Lookup(logFlag); logVal != nil && logVal.Value.String() != logVal.DefValue {
			flagVals = append(flagVals, fmt.Sprintf("--%s %s", logFlag, logVal.Value.String()))
		}
	}

	if kubernetesConfig.ContainerRuntime != "" {
		flagVals = append(flagVals, "--container-runtime="+kubernetesConfig.ContainerRuntime)
	}

	if kubernetesConfig.NetworkPlugin != "" {
		flagVals = append(flagVals, "--network-plugin="+kubernetesConfig.NetworkPlugin)
	}

	flags := strings.Join(flagVals, " ")

	return fmt.Sprintf(
		startCommandFmtStr,
		flags,
		kubernetesConfig.NodeIP,
		constants.RemoteLocalKubeErrPath,
		constants.RemoteLocalKubeOutPath,
		constants.LocalkubePIDPath,
	)
}
Example #19
0
func TestMain(m *testing.M) {
	flag.Parse()
	if flag.Lookup("test.short").Value.String() != "false" {
		quickCfg.MaxCount = 10
	}
	os.Exit(m.Run())
}
Example #20
0
// MyDpmsControl detects motion and turns the display screen on or off
// on the external display by using dpms shell commands.
func (m *Viki) DEPRECATED_myDpmsControl(c chan devicemanager.DeviceData) {

	log.Printf("starting user routine dpms control...")
	res := flag.Lookup("resource").Value.String()
	screenOn := false
	for {
		select {
		case got := <-c:
			d, _ := got.Data.(string)
			_, o := m.getObject(got.Object)

			// Got some motion.
			if o.checkTag("motion") && d == "On" && !screenOn {
				// Turn on screen.
				if err := exec.Command(res + "/dpmsoff.sh").Run(); err != nil {
					log.Printf("error running dpms off %s ", err)
					continue
				}
				screenOn = true
				time.AfterFunc(60*time.Minute, func() {
					// Turn off screen.
					if err := exec.Command(res + "/dpmson.sh").Run(); err != nil {
						log.Printf("error running dpms on %s", err)
						return
					}
					screenOn = false
				})
			}
		}
	}
}
Example #21
0
func main() {
	flag.Parse()

	flagsOk := true
	for _, f := range []string{"project", "dataset", "table", "bucket", "object"} {
		if flag.Lookup(f).Value.String() == "" {
			fmt.Fprintf(os.Stderr, "Flag --%s is required\n", f)
			flagsOk = false
		}
	}
	if !flagsOk {
		os.Exit(1)
	}

	ctx := context.Background()
	client, err := bigquery.NewClient(ctx, *project)
	if err != nil {
		log.Fatalf("Creating bigquery client: %v", err)
	}

	table := &bigquery.Table{
		ProjectID: *project,
		DatasetID: *dataset,
		TableID:   *table,
	}

	gcs := client.NewGCSReference(fmt.Sprintf("gs://%s/%s", *bucket, *object))
	gcs.SkipLeadingRows = *skiprows

	// Load data from Google Cloud Storage into a BigQuery table.
	job, err := client.Copy(
		ctx, table, gcs,
		bigquery.MaxBadRecords(1),
		bigquery.AllowQuotedNewlines(),
		bigquery.WriteTruncate)

	if err != nil {
		log.Fatalf("Loading data: %v", err)
	}

	fmt.Printf("Job for data load operation: %+v\n", job)
	fmt.Printf("Waiting for job to complete.\n")

	for range time.Tick(*pollint) {
		status, err := job.Status(ctx)
		if err != nil {
			fmt.Printf("Failure determining status: %v", err)
			break
		}
		if !status.Done() {
			continue
		}
		if err := status.Err(); err == nil {
			fmt.Printf("Success\n")
		} else {
			fmt.Printf("Failure: %+v\n", err)
		}
		break
	}
}
Example #22
0
func main() {
	flag.Parse()

	flagsOk := true
	for _, f := range []string{"project", "dataset", "q"} {
		if flag.Lookup(f).Value.String() == "" {
			fmt.Fprintf(os.Stderr, "Flag --%s is required\n", f)
			flagsOk = false
		}
	}
	if !flagsOk {
		os.Exit(1)
	}

	ctx := context.Background()
	client, err := bigquery.NewClient(ctx, *project)
	if err != nil {
		log.Fatalf("Creating bigquery client: %v", err)
	}

	query := client.Query(*q)
	query.DefaultProjectID = *project
	query.DefaultDatasetID = *dataset
	query.WriteDisposition = bigquery.WriteTruncate

	if *dest != "" {
		query.Dst = client.Dataset(*dataset).Table(*dest)
	}

	// Query data.
	job, err := query.Run(ctx)

	if err != nil {
		log.Fatalf("Querying: %v", err)
	}

	fmt.Printf("Submitted query. Job ID: %s\n", job.ID())
	if !*wait {
		return
	}

	fmt.Printf("Waiting for job to complete.\n")

	for range time.Tick(*pollint) {
		status, err := job.Status(ctx)
		if err != nil {
			fmt.Printf("Failure determining status: %v", err)
			break
		}
		if !status.Done() {
			continue
		}
		if err := status.Err(); err == nil {
			fmt.Printf("Success\n")
		} else {
			fmt.Printf("Failure: %+v\n", err)
		}
		break
	}
}
Example #23
0
func prepareExecutorInfo(id string) *mesos.ExecutorInfo {
	executorUris := []*mesos.CommandInfo_URI{}
	executorUris = append(executorUris, &mesos.CommandInfo_URI{Value: execUri, Executable: proto.Bool(true)})

	// forward the value of the scheduler's -v flag to the executor
	v := 0
	if f := flag.Lookup("v"); f != nil && f.Value != nil {
		if vstr := f.Value.String(); vstr != "" {
			if vi, err := strconv.ParseInt(vstr, 10, 32); err == nil {
				v = int(vi)
			}
		}
	}
	executorCommand := fmt.Sprintf("./%s -logtostderr=true -v=%d", execCmd, v)

	go http.ListenAndServe(fmt.Sprintf("%s:%d", *address, *artifactPort), nil)
	log.V(2).Info("Serving executor artifacts...")

	// Create mesos scheduler driver.
	return &mesos.ExecutorInfo{
		ExecutorId: util.NewExecutorID(id),
		Name:       proto.String("Test Executor (Go)"),
		Source:     proto.String("go_test"),
		Command: &mesos.CommandInfo{
			Value: proto.String(executorCommand),
			Uris:  executorUris,
		},
	}
}
Example #24
0
func main() {
	defer common.LogPanic()
	common.Init()

	if logDirFlag := flag.Lookup("log_dir"); logDirFlag != nil {
		logDir = logDirFlag.Value.String()
	}

	if *dryRun {
		exec.SetRunForTesting(func(command *exec.Command) error {
			glog.Infof("dry_run: %s", exec.DebugString(command))
			return nil
		})
	}
	if *local {
		frontend.InitForTesting("http://localhost:8000/")
	} else {
		frontend.MustInit()
	}

	workerHealthTick := time.Tick(*workerHealthCheckInterval)
	pollTick := time.Tick(*pollInterval)
	// Run immediately, since pollTick will not fire until after pollInterval.
	pollAndExecOnce()
	for {
		select {
		case <-workerHealthTick:
			doWorkerHealthCheck()
		case <-pollTick:
			pollAndExecOnce()
		}
	}
}
Example #25
0
// GetSubprocessFlags returns the list of flags to use to have subprocesses
// log in the same directory as the current process.
func GetSubprocessFlags() []string {
	logDir := flag.Lookup("log_dir")
	if logDir == nil {
		panic("the logging module doesn't specify a log_dir flag")
	}
	return []string{"-log_dir", logDir.Value.String()}
}
Example #26
0
func prepareExecutorInfo(gt net.Addr) *mesos.ExecutorInfo {
	executorUris := []*mesos.CommandInfo_URI{}
	uri := serveSelf()
	executorUris = append(executorUris, &mesos.CommandInfo_URI{Value: uri, Executable: proto.Bool(true)})

	// forward the value of the scheduler's -v flag to the executor
	v := 0
	if f := flag.Lookup("v"); f != nil && f.Value != nil {
		if vstr := f.Value.String(); vstr != "" {
			if vi, err := strconv.ParseInt(vstr, 10, 32); err == nil {
				v = int(vi)
			}
		}
	}
	nodeCommand := fmt.Sprintf("./executor -logtostderr=true -v=%d -node -tracerAddr %s", v, gt.String())
	log.V(2).Info("nodeCommand: ", nodeCommand)

	// Create mesos scheduler driver.
	return &mesos.ExecutorInfo{
		ExecutorId: util.NewExecutorID("default"),
		Name:       proto.String("visghs-node"),
		Source:     proto.String("visghs"),
		Command: &mesos.CommandInfo{
			Value: proto.String(nodeCommand),
			Uris:  executorUris,
		},
		Resources: []*mesos.Resource{
			util.NewScalarResource("cpus", CPUS_PER_EXECUTOR),
			util.NewScalarResource("mem", MEM_PER_EXECUTOR),
		},
	}
}
Example #27
0
func Test(t *testing.T) {
	check.TestingT(t)
	if suitesRun != suitesRunExpected && flag.Lookup("check.g").Value.String() == "" {
		critical(fmt.Sprintf("Expected %d suites to run rather than %d",
			suitesRunExpected, suitesRun))
	}
}
Example #28
0
func prepareExecutorInfo() *mesos.ExecutorInfo {
	executorUris := []*mesos.CommandInfo_URI{}
	uri, executorCmd := serveExecutorArtifact(*executorPath)
	executorUris = append(executorUris, &mesos.CommandInfo_URI{Value: uri, Executable: proto.Bool(true)})

	// forward the value of the scheduler's -v flag to the executor
	v := 0
	if f := flag.Lookup("v"); f != nil && f.Value != nil {
		if vstr := f.Value.String(); vstr != "" {
			if vi, err := strconv.ParseInt(vstr, 10, 32); err == nil {
				v = int(vi)
			}
		}
	}
	executorCommand := fmt.Sprintf("./%s -logtostderr=true -v=%d -slow_tasks=%v", executorCmd, v, *slowTasks)

	go http.ListenAndServe(fmt.Sprintf("%s:%d", *address, *artifactPort), nil)
	log.V(2).Info("Serving executor artifacts...")

	// Create mesos scheduler driver.
	return &mesos.ExecutorInfo{
		ExecutorId: util.NewExecutorID("default"),
		Name:       proto.String("Test Executor (Go)"),
		Source:     proto.String("go_test"),
		Command: &mesos.CommandInfo{
			Value: proto.String(executorCommand),
			Uris:  executorUris,
		},
		Resources: []*mesos.Resource{
			util.NewScalarResource("cpus", CPUS_PER_EXECUTOR),
			util.NewScalarResource("mem", MEM_PER_EXECUTOR),
		},
	}
}
Example #29
0
func TestDeletingImagePruner(t *testing.T) {
	flag.Lookup("v").Value.Set(fmt.Sprint(*logLevel))

	tests := map[string]struct {
		imageDeletionError error
	}{
		"no error": {},
		"delete error": {
			imageDeletionError: fmt.Errorf("foo"),
		},
	}

	for name, test := range tests {
		imageClient := testclient.Fake{}
		imageClient.SetErr(test.imageDeletionError)
		imagePruner := NewDeletingImagePruner(imageClient.Images())
		err := imagePruner.PruneImage(&imageapi.Image{ObjectMeta: kapi.ObjectMeta{Name: "id2"}})
		if test.imageDeletionError != nil {
			if e, a := test.imageDeletionError, err; e != a {
				t.Errorf("%s: err: expected %v, got %v", name, e, a)
			}
			continue
		}

		if e, a := 1, len(imageClient.Actions()); e != a {
			t.Errorf("%s: expected %d actions, got %d: %#v", name, e, a, imageClient.Actions())
			continue
		}

		if !imageClient.Actions()[0].Matches("delete", "images") {
			t.Errorf("%s: expected action %s, got %v", name, "delete-images", imageClient.Actions()[0])
		}
	}
}
Example #30
0
// startNode starts a Docker container to run testNode. It may be called in
// parallel to start many nodes at once, and thus should remain threadsafe.
func (l *LocalCluster) startNode(ctx context.Context, node *testNode) {
	cmd := []string{
		"start",
		"--ca-cert=/certs/ca.crt",
		"--cert=/certs/node.crt",
		"--key=/certs/node.key",
		"--host=" + node.nodeStr,
		"--verbosity=1",
	}

	// Forward the vmodule flag to the nodes.
	vmoduleFlag := flag.Lookup(logflags.VModuleName)
	if vmoduleFlag.Value.String() != "" {
		cmd = append(cmd, fmt.Sprintf("--%s=%s", vmoduleFlag.Name, vmoduleFlag.Value.String()))
	}

	for _, store := range node.stores {
		storeSpec := base.StoreSpec{
			Path:        store.dataStr,
			SizeInBytes: int64(store.config.MaxRanges) * maxRangeBytes,
		}
		cmd = append(cmd, fmt.Sprintf("--store=%s", storeSpec))
	}
	// Append --join flag for all nodes except first.
	if node.index > 0 {
		cmd = append(cmd, "--join="+net.JoinHostPort(l.Nodes[0].nodeStr, base.DefaultPort))
	}

	var locallogDir string
	if len(l.logDir) > 0 {
		dockerlogDir := "/logs/" + node.nodeStr
		locallogDir = filepath.Join(l.logDir, node.nodeStr)
		maybePanic(os.MkdirAll(locallogDir, 0777))
		cmd = append(
			cmd,
			"--alsologtostderr=ERROR",
			"--log-dir="+dockerlogDir)
	} else {
		cmd = append(cmd, "--alsologtostderr=INFO")
	}
	env := []string{
		"COCKROACH_SCAN_MAX_IDLE_TIME=200ms",
		"COCKROACH_CONSISTENCY_CHECK_PANIC_ON_FAILURE=true",
		"COCKROACH_SKIP_UPDATE_CHECK=1",
	}
	l.createRoach(ctx, node, l.vols, env, cmd...)
	maybePanic(node.Start(ctx))
	httpAddr := node.Addr(ctx, defaultHTTP)

	log.Infof(ctx, `*** started %[1]s ***
  ui:        %[2]s
  trace:     %[2]s/debug/requests
  logs:      %[3]s/cockroach.INFO
  pprof:     docker exec -it %[4]s /bin/bash -c 'go tool pprof /cockroach <(wget --no-check-certificate -qO- https://$(hostname):%[5]s/debug/pprof/heap)'
  cockroach: %[6]s

  cli-env:   COCKROACH_INSECURE=false COCKROACH_CA_CERT=%[7]s/ca.crt COCKROACH_CERT=%[7]s/node.crt COCKROACH_KEY=%[7]s/node.key COCKROACH_HOST=%s COCKROACH_PORT=%d`,
		node.Name(), "https://"+httpAddr.String(), locallogDir, node.Container.id[:5],
		base.DefaultHTTPPort, cmd, l.CertsDir, httpAddr.IP, httpAddr.Port)
}