Beispiel #1
0
func Validate(c *cli.Context, startTime time.Time) {
	gossConfig := getGossConfig(c)
	sys := system.New(c)
	outputer := getOutputer(c)

	sleep := c.Duration("sleep")
	retryTimeout := c.Duration("retry-timeout")
	i := 1
	for {
		iStartTime := time.Now()
		out := validate(sys, gossConfig, c.Int("max-concurrent"))
		exitCode := outputer.Output(os.Stdout, out, iStartTime)
		if retryTimeout == 0 || exitCode == 0 {
			os.Exit(exitCode)
		}
		elapsed := time.Since(startTime)
		if elapsed+sleep > retryTimeout {
			color.Red("\nERROR: Timeout of %s reached before tests entered a passing state", retryTimeout)
			os.Exit(3)
		}
		color.Red("Retrying in %s (elapsed/timeout time: %.3fs/%s)\n\n\n", sleep, elapsed.Seconds(), retryTimeout)
		// Reset cache
		sys = system.New(c)
		time.Sleep(sleep)
		i++
		fmt.Printf("Attempt #%d:\n", i)
	}
}
Beispiel #2
0
// Simple wrapper to add multiple resources
func AutoAddResources(fileName string, keys []string, c *cli.Context) error {
	setStoreFormatFromFileName(fileName)
	config := util.Config{
		IgnoreList: c.GlobalStringSlice("exclude-attr"),
		Timeout:    int(c.Duration("timeout") / time.Millisecond),
	}

	var gossConfig GossConfig
	if _, err := os.Stat(fileName); err == nil {
		gossConfig = ReadJSON(fileName)
	} else {
		gossConfig = *NewGossConfig()
	}

	sys := system.New(c)

	for _, key := range keys {
		if err := AutoAddResource(fileName, gossConfig, key, c, config, sys); err != nil {
			return err
		}
	}
	WriteJSON(fileName, gossConfig)

	return nil
}
Beispiel #3
0
func cmdServe(c *cli.Context) error {
	testSvc := ble.NewService(lib.TestSvcUUID)
	testSvc.AddCharacteristic(lib.NewCountChar())
	testSvc.AddCharacteristic(lib.NewEchoChar())

	if err := ble.AddService(testSvc); err != nil {
		return errors.Wrap(err, "can't add service")
	}

	fmt.Printf("Serving GATT Server for %s...\n", c.Duration("tmo"))
	ctx := ble.WithSigHandler(context.WithTimeout(context.Background(), c.Duration("tmo")))
	return chkErr(ble.AdvertiseNameAndServices(ctx, "Gopher", testSvc.UUID))
}
func pollOAuthConfirmation(context *cli.Context, deviceCode string, interval int) (*http.Client, string) {
	config := oauth2.Config{
		ClientID:     context.String("gcp-oauth-client-id"),
		ClientSecret: context.String("gcp-oauth-client-secret"),
		Endpoint: oauth2.Endpoint{
			AuthURL:  lib.DefaultConfig.GCPOAuthAuthURL,
			TokenURL: lib.DefaultConfig.GCPOAuthTokenURL,
		},
		RedirectURL: gcp.RedirectURL,
		Scopes:      []string{gcp.ScopeCloudPrint},
	}

	for {
		time.Sleep(time.Duration(interval) * time.Second)

		form := url.Values{
			"client_id":     {context.String("gcp-oauth-client-id")},
			"client_secret": {context.String("gcp-oauth-client-secret")},
			"code":          {deviceCode},
			"grant_type":    {gcpOAuthGrantTypeDevice},
		}
		response, err := postWithRetry(gcpOAuthTokenPollURL, form)
		if err != nil {
			log.Fatalln(err)
		}

		var r struct {
			Error        string `json:"error"`
			AccessToken  string `json:"access_token"`
			ExpiresIn    int    `json:"expires_in"`
			RefreshToken string `json:"refresh_token"`
		}
		json.NewDecoder(response.Body).Decode(&r)

		switch r.Error {
		case "":
			token := &oauth2.Token{RefreshToken: r.RefreshToken}
			client := config.Client(oauth2.NoContext, token)
			client.Timeout = context.Duration("gcp-api-timeout")
			return client, r.RefreshToken
		case "authorization_pending":
		case "slow_down":
			interval *= 2
		default:
			log.Fatalln(err)
		}
	}

	panic("unreachable")
}
// getUserClientFromToken creates a user client with just a refresh token.
func getUserClientFromToken(context *cli.Context) *http.Client {
	config := &oauth2.Config{
		ClientID:     context.String("gcp-oauth-client-id"),
		ClientSecret: context.String("gcp-oauth-client-secret"),
		Endpoint: oauth2.Endpoint{
			AuthURL:  lib.DefaultConfig.GCPOAuthAuthURL,
			TokenURL: lib.DefaultConfig.GCPOAuthTokenURL,
		},
		RedirectURL: gcp.RedirectURL,
		Scopes:      []string{gcp.ScopeCloudPrint},
	}

	token := &oauth2.Token{RefreshToken: context.String("gcp-user-refresh-token")}
	client := config.Client(oauth2.NoContext, token)
	client.Timeout = context.Duration("gcp-api-timeout")

	return client
}
Beispiel #6
0
func run(c *cli.Context) error {
	if c.String("env-file") != "" {
		_ = godotenv.Load(c.String("env-file"))
	}

	plugin := Plugin{
		Config: Config{
			Key:     c.String("ssh-key"),
			User:    c.String("user"),
			Host:    c.StringSlice("host"),
			Port:    c.Int("port"),
			Sleep:   c.Int("sleep"),
			Timeout: c.Duration("timeout"),
			Script:  c.StringSlice("script"),
		},
	}

	return plugin.Exec()
}
Beispiel #7
0
func runAgent(c *cli.Context) error {
	a := agent.Agent{
		DockerAddress: c.String("docker"),
		FusisAddress:  c.String("fusis-addr"),
		LabelFilter:   c.String("label-filter"),
		Interval:      c.Duration("interval"),
	}
	if a.FusisAddress == "" {
		return cli.NewExitError("Parameter --fusis-addr is mandatory", 1)
	}
	err := a.Init()
	if err != nil {
		return cli.NewExitError(err.Error(), 1)
	}
	handleSignals(&a)
	log.Print("Running agent...")
	a.Start()
	a.Wait()
	return nil
}
Beispiel #8
0
func ProxyAction(c *cli.Context) error {
	http.HandleFunc("/health", func(w http.ResponseWriter, r *http.Request) {
		err := HealthCheck("tcp://127.0.0.1:2379", 5*time.Second)

		if err == nil {
			fmt.Fprintf(w, "OK")
			if c.Bool("debug") {
				log.Println("OK")
			}
		} else {
			http.Error(w, err.Error(), http.StatusServiceUnavailable)
			if c.Bool("debug") {
				log.Println(err.Error())
			}
		}
	})

	time.Sleep(c.Duration("wait"))
	// TODO (llparse): determine when raft index has caught up with other nodes in metadata
	return http.ListenAndServe(c.String("port"), nil)
}
func monitorConnector(context *cli.Context) {
	config, filename, err := lib.GetConfig(context)
	if err != nil {
		log.Fatalf("Failed to read config file: %s\n", err)
	}
	if filename == "" {
		fmt.Println("No config file was found, so using defaults")
	}

	if _, err := os.Stat(config.MonitorSocketFilename); err != nil {
		if !os.IsNotExist(err) {
			log.Fatalln(err)
		}
		log.Fatalf(
			"No connector is running, or the monitoring socket %s is mis-configured\n",
			config.MonitorSocketFilename)
	}

	timer := time.AfterFunc(context.Duration("monitor-timeout"), func() {
		log.Fatalf("Timeout after %s\n", context.Duration("monitor-timeout").String())
	})

	conn, err := net.DialTimeout("unix", config.MonitorSocketFilename, time.Second)
	if err != nil {
		log.Fatalf(
			"No connector is running, or it is not listening to socket %s\n",
			config.MonitorSocketFilename)
	}
	defer conn.Close()

	buf, err := ioutil.ReadAll(conn)
	if err != nil {
		log.Fatalln(err)
	}

	timer.Stop()

	fmt.Printf(string(buf))
}
Beispiel #10
0
func Serve(c *cli.Context) {
	endpoint := c.String("endpoint")
	color.NoColor = true
	cache := cache.New(c.Duration("cache"), 30*time.Second)

	health := healthHandler{
		c:             c,
		gossConfig:    getGossConfig(c),
		sys:           system.New(c),
		outputer:      getOutputer(c),
		cache:         cache,
		gossMu:        &sync.Mutex{},
		maxConcurrent: c.Int("max-concurrent"),
	}
	if c.String("format") == "json" {
		health.contentType = "application/json"
	}
	http.Handle(endpoint, health)
	listenAddr := c.String("listen-addr")
	log.Printf("Starting to listen on: %s", listenAddr)
	log.Fatal(http.ListenAndServe(c.String("listen-addr"), nil))
}
Beispiel #11
0
func ProxyAction(c *cli.Context) error {
	SetLoggingLevel(c.Bool("debug"))

	http.HandleFunc("/health", func(w http.ResponseWriter, r *http.Request) {
		err := HealthCheck("tcp://127.0.0.1:2379", 5*time.Second)

		if err == nil {
			fmt.Fprintf(w, "OK")
			log.Debug("HealthCheck succeeded")

		} else {
			http.Error(w, err.Error(), http.StatusServiceUnavailable)
			log.WithFields(log.Fields{
				"error": err.Error(),
			}).Debug("HealthCheck failed")
		}
	})

	time.Sleep(c.Duration("wait"))
	// TODO (llparse): determine when raft index has caught up with other nodes in metadata
	return http.ListenAndServe(c.String("port"), nil)
}
Beispiel #12
0
func RollingBackupAction(c *cli.Context) error {
	SetLoggingLevel(c.Bool("debug"))

	backupPeriod := c.Duration("period")
	retentionPeriod := c.Duration("retention")
	index := c.Int("index")

	log.WithFields(log.Fields{
		"period":    backupPeriod,
		"retention": retentionPeriod,
	}).Info("Initializing Rolling Backups")

	backupTicker := time.NewTicker(backupPeriod)
	for {
		select {
		case backupTime := <-backupTicker.C:
			CreateBackup(backupTime, index)
			DeleteBackups(backupTime, retentionPeriod)
		}
	}
	return nil
}
Beispiel #13
0
func cmdConnect(c *cli.Context) error {
	curr.client = nil

	var cln ble.Client
	var err error

	ctx := ble.WithSigHandler(context.WithTimeout(context.Background(), c.Duration("tmo")))
	if c.String("addr") != "" {
		curr.addr = ble.NewAddr(c.String("addr"))
		fmt.Printf("Dialing to specified address: %s\n", curr.addr)
		cln, err = ble.Dial(ctx, curr.addr)
	} else if filter(c) != nil {
		fmt.Printf("Scanning with filter...\n")
		if cln, err = ble.Connect(ctx, filter(c)); err == nil {
			curr.addr = cln.Address()
			fmt.Printf("Connected to %s\n", curr.addr)

		}
	} else if curr.addr != nil {
		fmt.Printf("Dialing to implicit address: %s\n", curr.addr)
		cln, err = ble.Dial(ctx, curr.addr)
	} else {
		return fmt.Errorf("no filter specified, and cached peripheral address")
	}
	if err == nil {
		curr.client = cln
		curr.clients[cln.Address().String()] = cln
	}
	go func() {
		<-cln.Disconnected()
		delete(curr.clients, cln.Address().String())
		curr.client = nil
		fmt.Printf("\n%s disconnected\n", cln.Address().String())
	}()
	return err
}
Beispiel #14
0
func manage(c *cli.Context) {
	var (
		tlsConfig *tls.Config
		err       error
	)

	// If either --tls or --tlsverify are specified, load the certificates.
	if c.Bool("tls") || c.Bool("tlsverify") {
		if !c.IsSet("tlscert") || !c.IsSet("tlskey") {
			log.Fatal("--tlscert and --tlskey must be provided when using --tls")
		}
		if c.Bool("tlsverify") && !c.IsSet("tlscacert") {
			log.Fatal("--tlscacert must be provided when using --tlsverify")
		}
		tlsConfig, err = loadTLSConfig(
			c.String("tlscacert"),
			c.String("tlscert"),
			c.String("tlskey"),
			c.Bool("tlsverify"))
		if err != nil {
			log.Fatal(err)
		}
	} else {
		// Otherwise, if neither --tls nor --tlsverify are specified, abort if
		// the other flags are passed as they will be ignored.
		if c.IsSet("tlscert") || c.IsSet("tlskey") || c.IsSet("tlscacert") {
			log.Fatal("--tlscert, --tlskey and --tlscacert require the use of either --tls or --tlsverify")
		}
	}

	refreshMinInterval := c.Duration("engine-refresh-min-interval")
	refreshMaxInterval := c.Duration("engine-refresh-max-interval")
	if refreshMinInterval <= time.Duration(0)*time.Second {
		log.Fatal("min refresh interval should be a positive number")
	}
	if refreshMaxInterval < refreshMinInterval {
		log.Fatal("max refresh interval cannot be less than min refresh interval")
	}
	// engine-refresh-retry is deprecated
	refreshRetry := c.Int("engine-refresh-retry")
	if refreshRetry != 3 {
		log.Fatal("--engine-refresh-retry is deprecated. Use --engine-failure-retry")
	}
	failureRetry := c.Int("engine-failure-retry")
	if failureRetry <= 0 {
		log.Fatal("invalid failure retry count")
	}
	engineOpts := &cluster.EngineOpts{
		RefreshMinInterval: refreshMinInterval,
		RefreshMaxInterval: refreshMaxInterval,
		FailureRetry:       failureRetry,
	}

	uri := getDiscovery(c)
	if uri == "" {
		log.Fatalf("discovery required to manage a cluster. See '%s manage --help'.", c.App.Name)
	}
	discovery := createDiscovery(uri, c)
	s, err := strategy.New(c.String("strategy"))
	if err != nil {
		log.Fatal(err)
	}

	// see https://github.com/urfave/cli/issues/160
	names := c.StringSlice("filter")
	if c.IsSet("filter") || c.IsSet("f") {
		names = names[DefaultFilterNumber:]
	}
	fs, err := filter.New(names)
	if err != nil {
		log.Fatal(err)
	}

	sched := scheduler.New(s, fs)
	var cl cluster.Cluster
	switch c.String("cluster-driver") {
	case "mesos-experimental":
		log.Warn("WARNING: the mesos driver is currently experimental, use at your own risks")
		cl, err = mesos.NewCluster(sched, tlsConfig, uri, c.StringSlice("cluster-opt"), engineOpts)
	case "swarm":
		cl, err = swarm.NewCluster(sched, tlsConfig, discovery, c.StringSlice("cluster-opt"), engineOpts)
	default:
		log.Fatalf("unsupported cluster %q", c.String("cluster-driver"))
	}
	if err != nil {
		log.Fatal(err)
	}

	// see https://github.com/urfave/cli/issues/160
	hosts := c.StringSlice("host")
	if c.IsSet("host") || c.IsSet("H") {
		hosts = hosts[1:]
	}

	server := api.NewServer(hosts, tlsConfig)
	if c.Bool("replication") {
		addr := c.String("advertise")
		if addr == "" {
			log.Fatal("--advertise address must be provided when using --leader-election")
		}
		if !checkAddrFormat(addr) {
			log.Fatal("--advertise should be of the form ip:port or hostname:port")
		}
		leaderTTL, err := time.ParseDuration(c.String("replication-ttl"))
		if err != nil {
			log.Fatalf("invalid --replication-ttl: %v", err)
		}
		if leaderTTL <= time.Duration(0)*time.Second {
			log.Fatalf("--replication-ttl should be a positive number")
		}

		setupReplication(c, cl, server, discovery, addr, leaderTTL, tlsConfig)
	} else {
		server.SetHandler(api.NewPrimary(cl, tlsConfig, &statusHandler{cl, nil, nil}, c.GlobalBool("debug"), c.Bool("cors")))
		cluster.NewWatchdog(cl)
	}

	log.Fatal(server.ListenAndServe())
}
Beispiel #15
0
func cmdScan(c *cli.Context) error {
	fmt.Printf("Scanning for %s...\n", c.Duration("tmo"))
	ctx := ble.WithSigHandler(context.WithTimeout(context.Background(), c.Duration("tmo")))
	return chkErr(ble.Scan(ctx, c.Bool("dup"), advHandler, filter(c)))
}
Beispiel #16
0
func cmdAdv(c *cli.Context) error {
	fmt.Printf("Advertising for %s...\n", c.Duration("tmo"))
	ctx := ble.WithSigHandler(context.WithTimeout(context.Background(), c.Duration("tmo")))
	return chkErr(ble.AdvertiseNameAndServices(ctx, "Gopher"))
}
Beispiel #17
0
//
// getFiles retrieve files from bucket
//
func getFiles(o *formatter, cx *cli.Context, cmd *cliCommand) error {
	var err error

	// step: get the
	bucket := cx.String("bucket")
	directory := cx.String("output-dir")
	flatten := cx.Bool("flatten")
	recursive := cx.Bool("recursive")
	syncEnabled := cx.Bool("sync")
	syncInterval := cx.Duration("sync-interval")

	// step: validate the filter if any
	var filter *regexp.Regexp
	if filter, err = regexp.Compile(cx.String("filter")); err != nil {
		return fmt.Errorf("filter: %s is invalid, message: %s", cx.String("filter"), err)
	}

	// step: create the output directory if required
	if err = os.MkdirAll(directory, 0755); err != nil {
		return err
	}

	// step: create a signal to handle exits and a ticker for intervals
	signalCh := make(chan os.Signal)
	signal.Notify(signalCh, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT)
	tickerCh := time.NewTicker(1)
	exitCh := make(chan error, 1)
	firstTime := true

	// step: create a map for etags - used to maintainer the etags of the files
	fileTags := make(map[string]string, 0)

	for {
		select {
		case err = <-exitCh:
			return err
		case <-tickerCh.C:
			if firstTime {
				tickerCh.Stop()
				tickerCh = time.NewTicker(syncInterval)
				firstTime = false
			}
			// step: iterate the paths specified on the command line
			err := func() error {
				for _, bucketPath := range getPaths(cx) {
					path := strings.TrimPrefix(bucketPath, "/")
					// step: retrieve a list of files under this path
					list, err := cmd.listBucketKeys(bucket, path)
					if err != nil {
						o.fields(map[string]interface{}{
							"bucket": bucket,
							"path":   path,
							"error":  err.Error(),
						}).log("unable to retrieve a listing in bucket: %s, path: %s\n", bucket, path)

						return err
					}

					// step: iterate the files under the path
					for _, file := range list {
						keyName := strings.TrimPrefix(*file.Key, "/")
						// step: apply the filter and ignore everything were not interested in
						if !filter.MatchString(keyName) {
							continue
						}
						// step: are we recursive? i.e. if not, check the file ends with the filename
						if !recursive && !strings.HasSuffix(path, keyName) {
							continue
						}

						// step: if we have download this file before, check the etag has changed
						if etag, found := fileTags[keyName]; found && etag == *file.ETag {
							continue // we can skip the file, nothing has changed
						}

						// step: are we flattening the files
						filename := fmt.Sprintf("%s/%s", directory, keyName)
						if flatten {
							filename = fmt.Sprintf("%s/%s", directory, filepath.Base(keyName))
						}

						// step: retrieve file and write the content to disk
						if err := processFile(filename, keyName, bucket, cmd); err != nil {
							o.fields(map[string]interface{}{
								"action":      "get",
								"bucket":      bucket,
								"destination": path,
								"error":       err.Error(),
							}).log("failed to retrieve file: %s, error: %s\n", keyName, err)

							return err
						}
						// step: update the filetags
						fileTags[keyName] = *file.ETag

						// step: add the log
						o.fields(map[string]interface{}{
							"action":      "get",
							"bucket":      bucket,
							"destination": filename,
							"etag":        file.ETag,
						}).log("retrieved the file: %s and wrote to: %s\n", keyName, filename)
					}
				}

				return nil
			}()
			// step: if we are not in a sync loop we can exit
			if !syncEnabled {
				exitCh <- err
			}
		case <-signalCh:
			o.log("exitting the synchronzition service")
			return nil
		}
	}
}
Beispiel #18
0
func runCert(ctx *cli.Context) error {
	if len(ctx.String("host")) == 0 {
		log.Fatal("Missing required --host parameter")
	}

	var priv interface{}
	var err error
	switch ctx.String("ecdsa-curve") {
	case "":
		priv, err = rsa.GenerateKey(rand.Reader, ctx.Int("rsa-bits"))
	case "P224":
		priv, err = ecdsa.GenerateKey(elliptic.P224(), rand.Reader)
	case "P256":
		priv, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
	case "P384":
		priv, err = ecdsa.GenerateKey(elliptic.P384(), rand.Reader)
	case "P521":
		priv, err = ecdsa.GenerateKey(elliptic.P521(), rand.Reader)
	default:
		log.Fatalf("Unrecognized elliptic curve: %q", ctx.String("ecdsa-curve"))
	}
	if err != nil {
		log.Fatalf("Failed to generate private key: %s", err)
	}

	var notBefore time.Time
	if len(ctx.String("start-date")) == 0 {
		notBefore = time.Now()
	} else {
		notBefore, err = time.Parse("Jan 2 15:04:05 2006", ctx.String("start-date"))
		if err != nil {
			log.Fatalf("Failed to parse creation date: %s", err)
		}
	}

	notAfter := notBefore.Add(ctx.Duration("duration"))

	serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
	serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
	if err != nil {
		log.Fatalf("Failed to generate serial number: %s", err)
	}

	template := x509.Certificate{
		SerialNumber: serialNumber,
		Subject: pkix.Name{
			Organization: []string{"Acme Co"},
			CommonName:   "Gogs",
		},
		NotBefore: notBefore,
		NotAfter:  notAfter,

		KeyUsage:              x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
		ExtKeyUsage:           []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
		BasicConstraintsValid: true,
	}

	hosts := strings.Split(ctx.String("host"), ",")
	for _, h := range hosts {
		if ip := net.ParseIP(h); ip != nil {
			template.IPAddresses = append(template.IPAddresses, ip)
		} else {
			template.DNSNames = append(template.DNSNames, h)
		}
	}

	if ctx.Bool("ca") {
		template.IsCA = true
		template.KeyUsage |= x509.KeyUsageCertSign
	}

	derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, publicKey(priv), priv)
	if err != nil {
		log.Fatalf("Failed to create certificate: %s", err)
	}

	certOut, err := os.Create("cert.pem")
	if err != nil {
		log.Fatalf("Failed to open cert.pem for writing: %s", err)
	}
	pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
	certOut.Close()
	log.Println("Written cert.pem")

	keyOut, err := os.OpenFile("key.pem", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
	if err != nil {
		log.Fatalf("Failed to open key.pem for writing: %v\n", err)
	}
	pem.Encode(keyOut, pemBlockForKey(priv))
	keyOut.Close()
	log.Println("Written key.pem")

	return nil
}