Exemplo n.º 1
0
func userGrantRevoke(c *cli.Context, grant bool) {
	roles := c.StringSlice("roles")
	if len(roles) == 0 {
		fmt.Fprintln(os.Stderr, "No roles specified; please use `--roles`")
		os.Exit(1)
	}

	ctx, cancel := contextWithTotalTimeout(c)
	defer cancel()

	api, user := mustUserAPIAndName(c)
	currentUser, err := api.GetUser(ctx, user)
	if currentUser == nil {
		fmt.Fprintln(os.Stderr, err.Error())
		os.Exit(1)
	}

	if grant {
		_, err = api.GrantUser(ctx, user, roles)
	} else {
		_, err = api.RevokeUser(ctx, user, roles)
	}

	if err != nil {
		fmt.Fprintln(os.Stderr, err.Error())
		os.Exit(1)
	}

	fmt.Printf("User %s updated\n", user)
}
Exemplo n.º 2
0
func runTemplate(c *cli.Context) error {
	data := NewData(c.StringSlice("datasource"))

	g := NewGomplate(data)
	g.RunTemplate(os.Stdin, os.Stdout)
	return nil
}
Exemplo n.º 3
0
func before(c *cli.Context) error {
	// Log level
	if logLevel, err := log.ParseLevel(c.String(LogLevelKey)); err != nil {
		log.Fatal("Failed to parse log level:", err)
	} else {
		log.SetLevel(logLevel)
	}

	if len(c.Args()) != 0 && c.Args().First() != "version" && !c.Bool(HelpKey) && !c.Bool(VersionKey) {
		if err := MachineWorkdir.Set(c.String(WorkdirKey)); err != nil {
			log.Fatalf("Failed to set MachineWorkdir: %s", err)
		}
		if MachineWorkdir.String() == "" {
			log.Fatalln("No Workdir specified!")
		}
	}
	MachineWorkdir.Freeze()

	if err := MachineConfigTypeID.Set(c.String(ConfigTypeIDParamKey)); err != nil {
		log.Fatalf("Failed to set MachineConfigTypeID: %s", err)
	}
	log.Debugf("MachineConfigTypeID: %s", MachineConfigTypeID)

	if err := MachineParamsAdditionalEnvs.Set(c.StringSlice(EnvironmentParamKey)); err != nil {
		log.Fatalf("Failed to set MachineParamsAdditionalEnvs: %s", err)
	}
	log.Debugf("MachineParamsAdditionalEnvs: %s", MachineParamsAdditionalEnvs)
	MachineParamsAdditionalEnvs.Freeze()

	return nil
}
Exemplo n.º 4
0
func run(c *cli.Context) error {
	if c.String("env-file") != "" {
		_ = godotenv.Load(c.String("env-file"))
	}

	plugin := Plugin{
		Repo: Repo{
			Owner: c.String("repo.owner"),
			Name:  c.String("repo.name"),
		},
		Build: Build{
			Event: c.String("build.event"),
		},
		Commit: Commit{
			Ref: c.String("commit.ref"),
		},
		Config: Config{
			APIKey:     c.String("api-key"),
			Files:      c.StringSlice("files"),
			FileExists: c.String("file-exists"),
			Checksum:   c.StringSlice("checksum"),
			Draft:      c.Bool("draft"),
			BaseURL:    c.String("base-url"),
			UploadURL:  c.String("upload-url"),
		},
	}

	return plugin.Exec()
}
Exemplo n.º 5
0
func (cmd *Restart) Run(scope scope.Scope, c *cli.Context) {
	if err := net.VerifyLoginURL(cmd.network); err != nil {
		error_handler.ErrorExit(err)
	}
	parms := c.StringSlice("param")
	invoke(cmd.network, scope.Application, scope.Entity, restartEffector, parms)
}
Exemplo n.º 6
0
func run(c *cli.Context) error {
	if c.String("env-file") != "" {
		_ = godotenv.Load(c.String("env-file"))
	}

	plugin := Plugin{
		Repo: Repo{
			Owner: c.String("repo.owner"),
			Name:  c.String("repo.name"),
		},
		Commit: Commit{
			Sha:    c.String("commit.sha"),
			Ref:    c.String("commit.ref"),
			Branch: c.String("commit.branch"),
			Author: c.String("commit.author"),
			Link:   c.String("commit.link"),
		},
		Build: Build{
			Number: c.Int("build.number"),
			Event:  c.String("build.event"),
			Status: c.String("build.status"),
			Deploy: c.String("build.deploy"),
			Link:   c.String("build.link"),
		},
		Config: Config{
			Webhook: c.StringSlice("webhook"),
		},
	}

	return plugin.Exec()
}
Exemplo n.º 7
0
func run(c *cli.Context) error {
	if c.String("env-file") != "" {
		_ = godotenv.Load(c.String("env-file"))
	}

	plugin := Plugin{
		Repo: Repo{
			FullName: c.String("repo.fullname"),
			Owner:    c.String("repo.owner"),
			Name:     c.String("repo.name"),
			Link:     c.String("repo.link"),
		},
		Build: Build{
			Commit:    c.String("commit.sha"),
			Branch:    c.String("commit.branch"),
			Ref:       c.String("commit.ref"),
			Link:      c.String("commit.link"),
			Message:   c.String("commit.message"),
			Author:    c.String("commit.author.name"),
			Email:     c.String("commit.author.email"),
			Number:    c.Int("build.number"),
			Status:    c.String("build.status"),
			Event:     c.String("build.event"),
			Deploy:    c.String("build.deploy"),
			BuildLink: c.String("build.link"),
		},
		BuildLast: Build{
			Number: c.Int("prev.build.number"),
			Status: c.String("prev.build.status"),
			Commit: c.String("prev.commit.sha"),
		},
		Config: Config{
			Token:   c.String("token"),
			Channel: c.String("channel"),
			Mapping: c.String("mapping"),
			Success: MessageOptions{
				Username:         c.String("success_username"),
				Icon:             c.String("success_icon"),
				Template:         c.String("success_template"),
				ImageAttachments: c.StringSlice("success_image_attachments"),
			},
			Failure: MessageOptions{
				Username:         c.String("failure_username"),
				Icon:             c.String("failure_icon"),
				Template:         c.String("failure_template"),
				ImageAttachments: c.StringSlice("failure_image_attachments"),
			},
		},
	}

	return plugin.Exec()
}
Exemplo n.º 8
0
func run(c *cli.Context) error {
	if c.String("env-file") != "" {
		_ = godotenv.Load(c.String("env-file"))
	}

	plugin := Plugin{
		Repos:  c.StringSlice("repositories"),
		Server: c.String("server"),
		Token:  c.String("token"),
		Fork:   c.Bool("fork"),
	}

	return plugin.Exec()
}
Exemplo n.º 9
0
func setEmptyNsMask(context *cli.Context, options *libcontainer.CriuOpts) error {
	var nsmask int

	for _, ns := range context.StringSlice("empty-ns") {
		f, exists := namespaceMapping[specs.NamespaceType(ns)]
		if !exists {
			return fmt.Errorf("namespace %q is not supported", ns)
		}
		nsmask |= f
	}

	options.EmptyNs = uint32(nsmask)
	return nil
}
Exemplo n.º 10
0
func getDiscoveryOpt(c *cli.Context) map[string]string {
	// Process the store options
	options := map[string]string{}
	for _, option := range c.StringSlice("discovery-opt") {
		if !strings.Contains(option, "=") {
			log.Fatal("--discovery-opt must contain key=value strings")
		}
		kvpair := strings.SplitN(option, "=", 2)
		options[kvpair[0]] = kvpair[1]
	}
	if _, ok := options["kv.path"]; !ok {
		options["kv.path"] = "docker/swarm/nodes"
	}
	return options
}
Exemplo n.º 11
0
// main function
func doMain(c *cli.Context) error {

	var apache2 Apache2Plugin

	apache2.Host = c.String("http_host")
	apache2.Port = uint16(c.Int("http_port"))
	apache2.Path = c.String("status_page")
	apache2.Header = c.StringSlice("header")

	helper := mp.NewMackerelPlugin(apache2)
	helper.Tempfile = c.String("tempfile")

	helper.Run()
	return nil
}
Exemplo n.º 12
0
func (cmd *Entity) Run(scope scope.Scope, c *cli.Context) {
	if err := net.VerifyLoginURL(cmd.network); err != nil {
		error_handler.ErrorExit(err)
	}
	if c.NumFlags() > 0 && c.FlagNames()[0] == "children" {
		cmd.listentity(scope.Application, c.StringSlice("children")[0])
	} else {
		if c.Args().Present() {
			cmd.show(scope.Application, c.Args().First())
		} else {
			if scope.Entity == scope.Application {
				cmd.listapp(scope.Application)
			} else {
				cmd.listentity(scope.Application, scope.Entity)
			}
		}
	}
}
Exemplo n.º 13
0
func run(c *cli.Context) error {
	if c.String("env-file") != "" {
		_ = godotenv.Load(c.String("env-file"))
	}

	plugin := Plugin{
		Config: Config{
			Key:     c.String("ssh-key"),
			User:    c.String("user"),
			Host:    c.StringSlice("host"),
			Port:    c.Int("port"),
			Sleep:   c.Int("sleep"),
			Timeout: c.Duration("timeout"),
			Script:  c.StringSlice("script"),
		},
	}

	return plugin.Exec()
}
Exemplo n.º 14
0
func run(c *cli.Context) error {
	logrus.WithFields(logrus.Fields{
		"Revision": revision,
	}).Info("Drone Terraform Plugin Version")

	if c.String("env-file") != "" {
		_ = godotenv.Load(c.String("env-file"))
	}

	remote := Remote{}
	json.Unmarshal([]byte(c.String("remote")), &remote)

	var vars map[string]string
	if c.String("vars") != "" {
		if err := json.Unmarshal([]byte(c.String("vars")), &vars); err != nil {
			panic(err)
		}
	}
	var secrets map[string]string
	if c.String("secrets") != "" {
		if err := json.Unmarshal([]byte(c.String("secrets")), &secrets); err != nil {
			panic(err)
		}
	}

	plugin := Plugin{
		Config: Config{
			Remote:      remote,
			Plan:        c.Bool("plan"),
			Vars:        vars,
			Secrets:     secrets,
			Cacert:      c.String("ca_cert"),
			Sensitive:   c.Bool("sensitive"),
			RoleARN:     c.String("role_arn_to_assume"),
			RootDir:     c.String("root_dir"),
			Parallelism: c.Int("parallelism"),
			Targets:     c.StringSlice("targets"),
		},
	}

	return plugin.Exec()
}
Exemplo n.º 15
0
// main function
func doMain(c *cli.Context) error {
	var linux LinuxPlugin

	typemap := map[string]bool{}
	types := c.StringSlice("type")
	// If no `type` is specified, fetch all metrics
	if len(types) == 0 {
		typemap["all"] = true
	} else {
		for _, t := range types {
			typemap[t] = true
		}
	}
	linux.Typemap = typemap
	helper := mp.NewMackerelPlugin(linux)
	helper.Tempfile = c.String("tempfile")

	helper.Run()
	return nil
}
Exemplo n.º 16
0
func handleImportSnap(c *cli.Context) error {
	d, err := ioutil.ReadFile(c.String("snap"))
	if err != nil {
		if c.String("snap") == "" {
			fmt.Printf("no snapshot file provided (use --snap)\n")
		} else {
			fmt.Printf("cannot read snapshot file %s\n", c.String("snap"))
		}
		os.Exit(1)
	}

	st := store.New()
	err = st.Recovery(d)

	wg := &sync.WaitGroup{}
	setc := make(chan set)
	concurrent := c.Int("c")
	fmt.Printf("starting to import snapshot %s with %d clients\n", c.String("snap"), concurrent)
	for i := 0; i < concurrent; i++ {
		go runSet(mustNewKeyAPI(c), setc, wg)
	}

	all, err := st.Get("/", true, true)
	if err != nil {
		handleError(ExitServerError, err)
	}
	n := copyKeys(all.Node, setc)

	hiddens := c.StringSlice("hidden")
	for _, h := range hiddens {
		allh, err := st.Get(h, true, true)
		if err != nil {
			handleError(ExitServerError, err)
		}
		n += copyKeys(allh.Node, setc)
	}
	close(setc)
	wg.Wait()
	fmt.Printf("finished importing %d keys\n", n)
	return nil
}
Exemplo n.º 17
0
// NewVisualisationURLOptions returns a new doarama.VisualisationURLOptions
// from c.
func NewVisualisationURLOptions(c *cli.Context) *doarama.VisualisationURLOptions {
	vuo := &doarama.VisualisationURLOptions{}
	if name := c.StringSlice("name"); name != nil {
		vuo.Names = name
	}
	if avatar := c.StringSlice("avatar"); avatar != nil {
		vuo.Avatars = avatar
	}
	if avatarBaseURL := c.String("avatarbaseurl"); avatarBaseURL != "" {
		vuo.AvatarBaseURL = avatarBaseURL
	}
	if fixedAspect := c.Bool("fixedaspect"); fixedAspect {
		vuo.FixedAspect = fixedAspect
	}
	if minimalView := c.Bool("minimalview"); minimalView {
		vuo.MinimalView = minimalView
	}
	if dzml := c.String("dzml"); dzml != "" {
		vuo.DZML = dzml
	}
	return vuo
}
Exemplo n.º 18
0
func run(c *cli.Context) error {
	if c.String("env-file") != "" {
		_ = godotenv.Load(c.String("env-file"))
	}

	plugin := Plugin{
		Dryrun: c.Bool("dry-run"),
		Login: Login{
			Registry: c.String("docker.registry"),
			Username: c.String("docker.username"),
			Password: c.String("docker.password"),
			Email:    c.String("docker.email"),
		},
		Build: Build{
			Name:       c.String("commit.sha"),
			Dockerfile: c.String("dockerfile"),
			Context:    c.String("context"),
			Tags:       c.StringSlice("tags"),
			Args:       c.StringSlice("args"),
			Repo:       c.String("repo"),
		},
		Daemon: Daemon{
			Registry:      c.String("docker.registry"),
			Mirror:        c.String("daemon.mirror"),
			StorageDriver: c.String("daemon.storage-driver"),
			StoragePath:   c.String("daemon.storage-path"),
			Insecure:      c.Bool("daemon.insecure"),
			Disabled:      c.Bool("daemon.off"),
			IPv6:          c.Bool("daemon.ipv6"),
			Debug:         c.Bool("daemon.debug"),
			Bip:           c.String("daemon.bip"),
			DNS:           c.StringSlice("daemon.dns"),
			MTU:           c.String("daemon.mtu"),
		},
	}

	return plugin.Exec()
}
Exemplo n.º 19
0
func parseSQLFromArgs(ctx *cli.Context) *sqlcommand.SQLCommand {
	var sqlText string
	if !ctx.IsSet("sql") {
		//Trying from stdin
		fi, err := os.Stdin.Stat()
		if err != nil {
			panic(err)
		}
		if fi.Mode()&os.ModeNamedPipe == 0 {
			return nil
		}
		bio := bufio.NewReader(os.Stdin)
		sqlText, err = bio.ReadString(0)
		if err != nil && err != io.EOF {
			panic(err)
		}
	} else {
		sqlText = ctx.String("sql")
	}
	//Prepare parameters
	colParams, e := parameters.GetInstance().All()
	if e != nil {
		panic(e)
	}
	params := colParams.Get()
	paramsArgs := ctx.StringSlice("param")
	for i := 0; i < len(paramsArgs); i++ {

		newparam, e := paramsreplace.Replace(paramsArgs[i], params)
		if e != nil {
			logger.Error.Println(e)
		} else {
			paramsArgs[i] = newparam
		}
	}
	return sqlcommand.New(sqlText, paramsArgs)
}
Exemplo n.º 20
0
func setupSpec(g *generate.Generator, context *cli.Context) error {
	if context.GlobalBool("host-specific") {
		g.HostSpecific = true
	}

	spec := g.Spec()

	if len(spec.Version) == 0 {
		g.SetVersion(rspec.Version)
	}

	if context.IsSet("hostname") {
		g.SetHostname(context.String("hostname"))
	}

	g.SetPlatformOS(context.String("os"))
	g.SetPlatformArch(context.String("arch"))

	if context.IsSet("label") {
		annotations := context.StringSlice("label")
		for _, s := range annotations {
			pair := strings.Split(s, "=")
			if len(pair) != 2 {
				return fmt.Errorf("incorrectly specified annotation: %s", s)
			}
			g.AddAnnotation(pair[0], pair[1])
		}
	}

	g.SetRootPath(context.String("rootfs"))

	if context.IsSet("read-only") {
		g.SetRootReadonly(context.Bool("read-only"))
	}

	if context.IsSet("uid") {
		g.SetProcessUID(uint32(context.Int("uid")))
	}

	if context.IsSet("gid") {
		g.SetProcessGID(uint32(context.Int("gid")))
	}

	if context.IsSet("selinux-label") {
		g.SetProcessSelinuxLabel(context.String("selinux-label"))
	}

	g.SetProcessCwd(context.String("cwd"))

	if context.IsSet("apparmor") {
		g.SetProcessApparmorProfile(context.String("apparmor"))
	}

	if context.IsSet("no-new-privileges") {
		g.SetProcessNoNewPrivileges(context.Bool("no-new-privileges"))
	}

	if context.IsSet("tty") {
		g.SetProcessTerminal(context.Bool("tty"))
	}

	if context.IsSet("args") {
		g.SetProcessArgs(context.StringSlice("args"))
	}

	if context.IsSet("env") {
		envs := context.StringSlice("env")
		for _, env := range envs {
			g.AddProcessEnv(env)
		}
	}

	if context.IsSet("groups") {
		groups := context.StringSlice("groups")
		for _, group := range groups {
			groupID, err := strconv.Atoi(group)
			if err != nil {
				return err
			}
			g.AddProcessAdditionalGid(uint32(groupID))
		}
	}

	if context.IsSet("cgroups-path") {
		g.SetLinuxCgroupsPath(context.String("cgroups-path"))
	}

	if context.IsSet("mount-label") {
		g.SetLinuxMountLabel(context.String("mount-label"))
	}

	if context.IsSet("sysctl") {
		sysctls := context.StringSlice("sysctl")
		for _, s := range sysctls {
			pair := strings.Split(s, "=")
			if len(pair) != 2 {
				return fmt.Errorf("incorrectly specified sysctl: %s", s)
			}
			g.AddLinuxSysctl(pair[0], pair[1])
		}
	}

	privileged := false
	if context.IsSet("privileged") {
		privileged = context.Bool("privileged")
	}
	g.SetupPrivileged(privileged)

	if context.IsSet("cap-add") {
		addCaps := context.StringSlice("cap-add")
		for _, cap := range addCaps {
			if err := g.AddProcessCapability(cap); err != nil {
				return err
			}
		}
	}

	if context.IsSet("cap-drop") {
		dropCaps := context.StringSlice("cap-drop")
		for _, cap := range dropCaps {
			if err := g.DropProcessCapability(cap); err != nil {
				return err
			}
		}
	}

	needsNewUser := false

	var uidMaps, gidMaps []string

	if context.IsSet("uidmappings") {
		uidMaps = context.StringSlice("uidmappings")
	}

	if context.IsSet("gidmappings") {
		gidMaps = context.StringSlice("gidmappings")
	}

	if len(uidMaps) > 0 || len(gidMaps) > 0 {
		needsNewUser = true
	}

	setupLinuxNamespaces(context, g, needsNewUser)

	if context.IsSet("tmpfs") {
		tmpfsSlice := context.StringSlice("tmpfs")
		for _, s := range tmpfsSlice {
			dest, options, err := parseTmpfsMount(s)
			if err != nil {
				return err
			}
			g.AddTmpfsMount(dest, options)
		}
	}

	mountCgroupOption := context.String("mount-cgroups")
	if err := g.AddCgroupsMount(mountCgroupOption); err != nil {
		return err
	}

	if context.IsSet("bind") {
		binds := context.StringSlice("bind")
		for _, bind := range binds {
			source, dest, options, err := parseBindMount(bind)
			if err != nil {
				return err
			}
			g.AddBindMount(source, dest, options)
		}
	}

	if context.IsSet("prestart") {
		preStartHooks := context.StringSlice("prestart")
		for _, hook := range preStartHooks {
			path, args := parseHook(hook)
			g.AddPreStartHook(path, args)
		}
	}

	if context.IsSet("poststop") {
		postStopHooks := context.StringSlice("poststop")
		for _, hook := range postStopHooks {
			path, args := parseHook(hook)
			g.AddPostStopHook(path, args)
		}
	}

	if context.IsSet("poststart") {
		postStartHooks := context.StringSlice("poststart")
		for _, hook := range postStartHooks {
			path, args := parseHook(hook)
			g.AddPostStartHook(path, args)
		}
	}

	if context.IsSet("root-propagation") {
		rp := context.String("root-propagation")
		if err := g.SetLinuxRootPropagation(rp); err != nil {
			return err
		}
	}

	for _, uidMap := range uidMaps {
		hid, cid, size, err := parseIDMapping(uidMap)
		if err != nil {
			return err
		}

		g.AddLinuxUIDMapping(hid, cid, size)
	}

	for _, gidMap := range gidMaps {
		hid, cid, size, err := parseIDMapping(gidMap)
		if err != nil {
			return err
		}

		g.AddLinuxGIDMapping(hid, cid, size)
	}

	if context.IsSet("disable-oom-kill") {
		g.SetLinuxResourcesDisableOOMKiller(context.Bool("disable-oom-kill"))
	}

	if context.IsSet("oom-score-adj") {
		g.SetLinuxResourcesOOMScoreAdj(context.Int("oom-score-adj"))
	}

	if context.IsSet("linux-cpu-shares") {
		g.SetLinuxResourcesCPUShares(context.Uint64("linux-cpu-shares"))
	}

	if context.IsSet("linux-cpu-period") {
		g.SetLinuxResourcesCPUPeriod(context.Uint64("linux-cpu-period"))
	}

	if context.IsSet("linux-cpu-quota") {
		g.SetLinuxResourcesCPUQuota(context.Uint64("linux-cpu-quota"))
	}

	if context.IsSet("linux-realtime-runtime") {
		g.SetLinuxResourcesCPURealtimeRuntime(context.Uint64("linux-realtime-runtime"))
	}

	if context.IsSet("linux-realtime-period") {
		g.SetLinuxResourcesCPURealtimePeriod(context.Uint64("linux-realtime-period"))
	}

	if context.IsSet("linux-cpus") {
		g.SetLinuxResourcesCPUCpus(context.String("linux-cpus"))
	}

	if context.IsSet("linux-mems") {
		g.SetLinuxResourcesCPUMems(context.String("linux-mems"))
	}

	if context.IsSet("linux-mem-limit") {
		g.SetLinuxResourcesMemoryLimit(context.Uint64("linux-mem-limit"))
	}

	if context.IsSet("linux-mem-reservation") {
		g.SetLinuxResourcesMemoryReservation(context.Uint64("linux-mem-reservation"))
	}

	if context.IsSet("linux-mem-swap") {
		g.SetLinuxResourcesMemorySwap(context.Uint64("linux-mem-swap"))
	}

	if context.IsSet("linux-mem-kernel-limit") {
		g.SetLinuxResourcesMemoryKernel(context.Uint64("linux-mem-kernel-limit"))
	}

	if context.IsSet("linux-mem-kernel-tcp") {
		g.SetLinuxResourcesMemoryKernelTCP(context.Uint64("linux-mem-kernel-tcp"))
	}

	if context.IsSet("linux-mem-swappiness") {
		g.SetLinuxResourcesMemorySwappiness(context.Uint64("linux-mem-swappiness"))
	}

	if context.IsSet("linux-pids-limit") {
		g.SetLinuxResourcesPidsLimit(context.Int64("linux-pids-limit"))
	}

	var sd string
	var sa, ss []string

	if context.IsSet("seccomp-default") {
		sd = context.String("seccomp-default")
	}

	if context.IsSet("seccomp-arch") {
		sa = context.StringSlice("seccomp-arch")
	}

	if context.IsSet("seccomp-syscalls") {
		ss = context.StringSlice("seccomp-syscalls")
	}

	if sd == "" && len(sa) == 0 && len(ss) == 0 {
		return nil
	}

	// Set the DefaultAction of seccomp
	if context.IsSet("seccomp-default") {
		if err := g.SetLinuxSeccompDefault(sd); err != nil {
			return err
		}
	}

	// Add the additional architectures permitted to be used for system calls
	if context.IsSet("seccomp-arch") {
		for _, arch := range sa {
			if err := g.AddLinuxSeccompArch(arch); err != nil {
				return err
			}
		}
	}

	// Set syscall restrict in Seccomp
	if context.IsSet("seccomp-syscalls") {
		for _, syscall := range ss {
			if err := g.AddLinuxSeccompSyscall(syscall); err != nil {
				return err
			}
		}
	}

	if context.IsSet("seccomp-allow") {
		seccompAllows := context.StringSlice("seccomp-allow")
		for _, s := range seccompAllows {
			g.AddLinuxSeccompSyscallAllow(s)
		}
	}

	if context.IsSet("seccomp-errno") {
		seccompErrnos := context.StringSlice("seccomp-errno")
		for _, s := range seccompErrnos {
			g.AddLinuxSeccompSyscallErrno(s)
		}
	}

	return nil
}
Exemplo n.º 21
0
Arquivo: cli.go Projeto: igalic/mgmt
// run is the main run target.
func run(c *cli.Context) error {

	obj := &Main{}

	obj.Program = c.App.Name
	obj.Version = c.App.Version

	if h := c.String("hostname"); c.IsSet("hostname") && h != "" {
		obj.Hostname = &h
	}

	if s := c.String("prefix"); c.IsSet("prefix") && s != "" {
		obj.Prefix = &s
	}
	obj.TmpPrefix = c.Bool("tmp-prefix")
	obj.AllowTmpPrefix = c.Bool("allow-tmp-prefix")

	if _ = c.String("code"); c.IsSet("code") {
		if obj.GAPI != nil {
			return fmt.Errorf("Can't combine code GAPI with existing GAPI.")
		}
		// TODO: implement DSL GAPI
		//obj.GAPI = &dsl.GAPI{
		//	Code: &s,
		//}
		return fmt.Errorf("The Code GAPI is not implemented yet!") // TODO: DSL
	}
	if y := c.String("yaml"); c.IsSet("yaml") {
		if obj.GAPI != nil {
			return fmt.Errorf("Can't combine YAML GAPI with existing GAPI.")
		}
		obj.GAPI = &yamlgraph.GAPI{
			File: &y,
		}
	}
	if p := c.String("puppet"); c.IsSet("puppet") {
		if obj.GAPI != nil {
			return fmt.Errorf("Can't combine puppet GAPI with existing GAPI.")
		}
		obj.GAPI = &puppet.GAPI{
			PuppetParam: &p,
			PuppetConf:  c.String("puppet-conf"),
		}
	}
	obj.Remotes = c.StringSlice("remote") // FIXME: GAPI-ify somehow?

	obj.NoWatch = c.Bool("no-watch")
	obj.Noop = c.Bool("noop")
	obj.Graphviz = c.String("graphviz")
	obj.GraphvizFilter = c.String("graphviz-filter")
	obj.ConvergedTimeout = c.Int("converged-timeout")
	obj.MaxRuntime = uint(c.Int("max-runtime"))

	obj.Seeds = c.StringSlice("seeds")
	obj.ClientURLs = c.StringSlice("client-urls")
	obj.ServerURLs = c.StringSlice("server-urls")
	obj.IdealClusterSize = c.Int("ideal-cluster-size")
	obj.NoServer = c.Bool("no-server")

	obj.CConns = uint16(c.Int("cconns"))
	obj.AllowInteractive = c.Bool("allow-interactive")
	obj.SSHPrivIDRsa = c.String("ssh-priv-id-rsa")
	obj.NoCaching = c.Bool("no-caching")
	obj.Depth = uint16(c.Int("depth"))

	if err := obj.Init(); err != nil {
		return err
	}

	// install the exit signal handler
	exit := make(chan struct{})
	defer close(exit)
	go func() {
		signals := make(chan os.Signal, 1)
		signal.Notify(signals, os.Interrupt) // catch ^C
		//signal.Notify(signals, os.Kill) // catch signals
		signal.Notify(signals, syscall.SIGTERM)

		select {
		case sig := <-signals: // any signal will do
			if sig == os.Interrupt {
				log.Println("Interrupted by ^C")
				obj.Exit(nil)
				return
			}
			log.Println("Interrupted by signal")
			obj.Exit(fmt.Errorf("Killed by %v", sig))
			return
		case <-exit:
			return
		}
	}()

	if err := obj.Run(); err != nil {
		return err
		//return cli.NewExitError(err.Error(), 1) // TODO: ?
		//return cli.NewExitError("", 1) // TODO: ?
	}
	return nil
}
Exemplo n.º 22
0
Arquivo: main.go Projeto: ffrank/mgmt
// run is the main run target.
func run(c *cli.Context) error {
	var start = time.Now().UnixNano()
	log.Printf("This is: %v, version: %v", program, version)
	log.Printf("Main: Start: %v", start)

	hostname, _ := os.Hostname()
	// allow passing in the hostname, instead of using --hostname
	if c.IsSet("file") {
		if config := gconfig.ParseConfigFromFile(c.String("file")); config != nil {
			if h := config.Hostname; h != "" {
				hostname = h
			}
		}
	}
	if c.IsSet("hostname") { // override by cli
		if h := c.String("hostname"); h != "" {
			hostname = h
		}
	}
	noop := c.Bool("noop")

	seeds, err := etcdtypes.NewURLs(
		util.FlattenListWithSplit(c.StringSlice("seeds"), []string{",", ";", " "}),
	)
	if err != nil && len(c.StringSlice("seeds")) > 0 {
		log.Printf("Main: Error: seeds didn't parse correctly!")
		return cli.NewExitError("", 1)
	}
	clientURLs, err := etcdtypes.NewURLs(
		util.FlattenListWithSplit(c.StringSlice("client-urls"), []string{",", ";", " "}),
	)
	if err != nil && len(c.StringSlice("client-urls")) > 0 {
		log.Printf("Main: Error: clientURLs didn't parse correctly!")
		return cli.NewExitError("", 1)
	}
	serverURLs, err := etcdtypes.NewURLs(
		util.FlattenListWithSplit(c.StringSlice("server-urls"), []string{",", ";", " "}),
	)
	if err != nil && len(c.StringSlice("server-urls")) > 0 {
		log.Printf("Main: Error: serverURLs didn't parse correctly!")
		return cli.NewExitError("", 1)
	}

	idealClusterSize := uint16(c.Int("ideal-cluster-size"))
	if idealClusterSize < 1 {
		log.Printf("Main: Error: idealClusterSize should be at least one!")
		return cli.NewExitError("", 1)
	}

	if c.IsSet("file") && c.IsSet("puppet") {
		log.Println("Main: Error: the --file and --puppet parameters cannot be used together!")
		return cli.NewExitError("", 1)
	}

	if c.Bool("no-server") && len(c.StringSlice("remote")) > 0 {
		// TODO: in this case, we won't be able to tunnel stuff back to
		// here, so if we're okay with every remote graph running in an
		// isolated mode, then this is okay. Improve on this if there's
		// someone who really wants to be able to do this.
		log.Println("Main: Error: the --no-server and --remote parameters cannot be used together!")
		return cli.NewExitError("", 1)
	}

	cConns := uint16(c.Int("cconns"))
	if cConns < 0 {
		log.Printf("Main: Error: --cconns should be at least zero!")
		return cli.NewExitError("", 1)
	}

	if c.IsSet("converged-timeout") && cConns > 0 && len(c.StringSlice("remote")) > c.Int("cconns") {
		log.Printf("Main: Error: combining --converged-timeout with more remotes than available connections will never converge!")
		return cli.NewExitError("", 1)
	}

	depth := uint16(c.Int("depth"))
	if depth < 0 { // user should not be using this argument manually
		log.Printf("Main: Error: negative values for --depth are not permitted!")
		return cli.NewExitError("", 1)
	}

	if c.IsSet("prefix") && c.Bool("tmp-prefix") {
		log.Println("Main: Error: combining --prefix and the request for a tmp prefix is illogical!")
		return cli.NewExitError("", 1)
	}
	if s := c.String("prefix"); c.IsSet("prefix") && s != "" {
		prefix = s
	}

	// make sure the working directory prefix exists
	if c.Bool("tmp-prefix") || os.MkdirAll(prefix, 0770) != nil {
		if c.Bool("tmp-prefix") || c.Bool("allow-tmp-prefix") {
			if prefix, err = ioutil.TempDir("", program+"-"); err != nil {
				log.Printf("Main: Error: Can't create temporary prefix!")
				return cli.NewExitError("", 1)
			}
			log.Println("Main: Warning: Working prefix directory is temporary!")

		} else {
			log.Printf("Main: Error: Can't create prefix!")
			return cli.NewExitError("", 1)
		}
	}
	log.Printf("Main: Working prefix is: %s", prefix)

	var wg sync.WaitGroup
	exit := make(chan bool) // exit signal
	var G, fullGraph *pgraph.Graph

	// exit after `max-runtime` seconds for no reason at all...
	if i := c.Int("max-runtime"); i > 0 {
		go func() {
			time.Sleep(time.Duration(i) * time.Second)
			exit <- true
		}()
	}

	// setup converger
	converger := converger.NewConverger(
		c.Int("converged-timeout"),
		nil, // stateFn gets added in by EmbdEtcd
	)
	go converger.Loop(true) // main loop for converger, true to start paused

	// embedded etcd
	if len(seeds) == 0 {
		log.Printf("Main: Seeds: No seeds specified!")
	} else {
		log.Printf("Main: Seeds(%v): %v", len(seeds), seeds)
	}
	EmbdEtcd := etcd.NewEmbdEtcd(
		hostname,
		seeds,
		clientURLs,
		serverURLs,
		c.Bool("no-server"),
		idealClusterSize,
		prefix,
		converger,
	)
	if EmbdEtcd == nil {
		// TODO: verify EmbdEtcd is not nil below...
		log.Printf("Main: Etcd: Creation failed!")
		exit <- true
	} else if err := EmbdEtcd.Startup(); err != nil { // startup (returns when etcd main loop is running)
		log.Printf("Main: Etcd: Startup failed: %v", err)
		exit <- true
	}
	convergerStateFn := func(b bool) error {
		// exit if we are using the converged-timeout and we are the
		// root node. otherwise, if we are a child node in a remote
		// execution hierarchy, we should only notify our converged
		// state and wait for the parent to trigger the exit.
		if depth == 0 && c.Int("converged-timeout") >= 0 {
			if b {
				log.Printf("Converged for %d seconds, exiting!", c.Int("converged-timeout"))
				exit <- true // trigger an exit!
			}
			return nil
		}
		// send our individual state into etcd for others to see
		return etcd.EtcdSetHostnameConverged(EmbdEtcd, hostname, b) // TODO: what should happen on error?
	}
	if EmbdEtcd != nil {
		converger.SetStateFn(convergerStateFn)
	}

	exitchan := make(chan struct{}) // exit on close
	go func() {
		startchan := make(chan struct{}) // start signal
		go func() { startchan <- struct{}{} }()
		file := c.String("file")
		var configchan chan bool
		var puppetchan <-chan time.Time
		if !c.Bool("no-watch") && c.IsSet("file") {
			configchan = ConfigWatch(file)
		} else if c.IsSet("puppet") {
			interval := puppet.PuppetInterval(c.String("puppet-conf"))
			puppetchan = time.Tick(time.Duration(interval) * time.Second)
		}
		log.Println("Etcd: Starting...")
		etcdchan := etcd.EtcdWatch(EmbdEtcd)
		first := true // first loop or not
		for {
			log.Println("Main: Waiting...")
			select {
			case <-startchan: // kick the loop once at start
				// pass

			case b := <-etcdchan:
				if !b { // ignore the message
					continue
				}
				// everything else passes through to cause a compile!

			case <-puppetchan:
				// nothing, just go on

			case msg := <-configchan:
				if c.Bool("no-watch") || !msg {
					continue // not ready to read config
				}
			// XXX: case compile_event: ...
			// ...
			case <-exitchan:
				return
			}

			var config *gconfig.GraphConfig
			if c.IsSet("file") {
				config = gconfig.ParseConfigFromFile(file)
			} else if c.IsSet("puppet") {
				config = puppet.ParseConfigFromPuppet(c.String("puppet"), c.String("puppet-conf"))
			}
			if config == nil {
				log.Printf("Config: Parse failure")
				continue
			}

			if config.Hostname != "" && config.Hostname != hostname {
				log.Printf("Config: Hostname changed, ignoring config!")
				continue
			}
			config.Hostname = hostname // set it in case it was ""

			// run graph vertex LOCK...
			if !first { // TODO: we can flatten this check out I think
				converger.Pause() // FIXME: add sync wait?
				G.Pause()         // sync
			}

			// build graph from yaml file on events (eg: from etcd)
			// we need the vertices to be paused to work on them
			if newFullgraph, err := config.NewGraphFromConfig(fullGraph, EmbdEtcd, noop); err == nil { // keep references to all original elements
				fullGraph = newFullgraph
			} else {
				log.Printf("Config: Error making new graph from config: %v", err)
				// unpause!
				if !first {
					G.Start(&wg, first) // sync
					converger.Start()   // after G.Start()
				}
				continue
			}

			G = fullGraph.Copy() // copy to active graph
			// XXX: do etcd transaction out here...
			G.AutoEdges() // add autoedges; modifies the graph
			G.AutoGroup() // run autogroup; modifies the graph
			// TODO: do we want to do a transitive reduction?

			log.Printf("Graph: %v", G) // show graph
			err := G.ExecGraphviz(c.String("graphviz-filter"), c.String("graphviz"))
			if err != nil {
				log.Printf("Graphviz: %v", err)
			} else {
				log.Printf("Graphviz: Successfully generated graph!")
			}
			G.AssociateData(converger)
			// G.Start(...) needs to be synchronous or wait,
			// because if half of the nodes are started and
			// some are not ready yet and the EtcdWatch
			// loops, we'll cause G.Pause(...) before we
			// even got going, thus causing nil pointer errors
			G.Start(&wg, first) // sync
			converger.Start()   // after G.Start()
			first = false
		}
	}()

	configWatcher := NewConfigWatcher()
	events := configWatcher.Events()
	if !c.Bool("no-watch") {
		configWatcher.Add(c.StringSlice("remote")...) // add all the files...
	} else {
		events = nil // signal that no-watch is true
	}

	// initialize the add watcher, which calls the f callback on map changes
	convergerCb := func(f func(map[string]bool) error) (func(), error) {
		return etcd.EtcdAddHostnameConvergedWatcher(EmbdEtcd, f)
	}

	// build remotes struct for remote ssh
	remotes := remote.NewRemotes(
		EmbdEtcd.LocalhostClientURLs().StringSlice(),
		[]string{etcd.DefaultClientURL},
		noop,
		c.StringSlice("remote"), // list of files
		events,                  // watch for file changes
		cConns,
		c.Bool("allow-interactive"),
		c.String("ssh-priv-id-rsa"),
		!c.Bool("no-caching"),
		depth,
		prefix,
		converger,
		convergerCb,
		program,
	)

	// TODO: is there any benefit to running the remotes above in the loop?
	// wait for etcd to be running before we remote in, which we do above!
	go remotes.Run()

	if !c.IsSet("file") && !c.IsSet("puppet") {
		converger.Start() // better start this for empty graphs
	}
	log.Println("Main: Running...")

	waitForSignal(exit) // pass in exit channel to watch

	log.Println("Destroy...")

	configWatcher.Close() // stop sending file changes to remotes
	remotes.Exit()        // tell all the remote connections to shutdown; waits!

	G.Exit() // tell all the children to exit

	// tell inner main loop to exit
	close(exitchan)

	// cleanup etcd main loop last so it can process everything first
	if err := EmbdEtcd.Destroy(); err != nil { // shutdown and cleanup etcd
		log.Printf("Etcd exited poorly with: %v", err)
	}

	if global.DEBUG {
		log.Printf("Graph: %v", G)
	}

	wg.Wait() // wait for primary go routines to exit

	// TODO: wait for each vertex to exit...
	log.Println("Goodbye!")
	return nil
}
Exemplo n.º 23
0
//
// handleCommand is a generic wrapper for handling commands, or more precisely their errors
//
func handleCommand(cx *cli.Context, options []string, cmd *cliCommand, method func(*formatter, *cli.Context, *cliCommand) error) error {
	// step: handle any panics in the command
	defer func() {
		if r := recover(); r != nil {
			fmt.Fprintf(os.Stderr, "[error] internal error occurred, message: %s", r)
			os.Exit(1)
		}
	}()

	// step: check the required options were specified
	for _, k := range options {
		items := strings.Split(k, ":")
		if len(items) != 3 {
			panic("invalid required option definition, SCOPE:NAME:TYPE")
		}
		name := items[1]

		//
		// @Fix the cli lib IsSet does not check if the option was set by a environment variable, the
		// issue https://github.com/urfave/cli/issues/294 highlights problem. As a consequence, we can't determine
		// if the variable is actually set. The hack below attempts to remedy it.
		//
		var invalid bool

		switch scope := items[0]; scope {
		case "g":
			switch t := items[2]; t {
			case "s":
				invalid = !cx.GlobalIsSet(name) && cx.String(name) == ""
			case "a":
				invalid = !cx.GlobalIsSet(name) && len(cx.GlobalStringSlice(name)) == 0
			}
			if invalid {
				printError("the global option: '%s' is required", name)
			}
		default:
			switch t := items[2]; t {
			case "s":
				invalid = !cx.IsSet(name) && cx.String(name) == ""
			case "a":
				invalid = !cx.IsSet(name) && len(cx.StringSlice(name)) == 0
			}
			if invalid {
				printError("the command option: '%s' is required", name)
			}
		}
	}

	// step: create a cli output
	writer, err := newFormatter(cx.GlobalString("format"), os.Stdout)
	if err != nil {
		printError("error: %s", err)
	}

	// step: call the command and handle any errors
	if err := method(writer, cx, cmd); err != nil {
		printError("operation failed, error: %s", err)
	}

	return nil
}
Exemplo n.º 24
0
func getProcess(context *cli.Context, bundle string) (*specs.Process, error) {
	if path := context.String("process"); path != "" {
		f, err := os.Open(path)
		if err != nil {
			return nil, err
		}
		defer f.Close()
		var p specs.Process
		if err := json.NewDecoder(f).Decode(&p); err != nil {
			return nil, err
		}
		return &p, validateProcessSpec(&p)
	}
	// process via cli flags
	if err := os.Chdir(bundle); err != nil {
		return nil, err
	}
	spec, err := loadSpec(specConfig)
	if err != nil {
		return nil, err
	}
	p := spec.Process
	p.Args = context.Args()[1:]
	// override the cwd, if passed
	if context.String("cwd") != "" {
		p.Cwd = context.String("cwd")
	}
	if ap := context.String("apparmor"); ap != "" {
		p.ApparmorProfile = ap
	}
	if l := context.String("process-label"); l != "" {
		p.SelinuxLabel = l
	}
	if caps := context.StringSlice("cap"); len(caps) > 0 {
		p.Capabilities = caps
	}
	// append the passed env variables
	for _, e := range context.StringSlice("env") {
		p.Env = append(p.Env, e)
	}
	// set the tty
	if context.IsSet("tty") {
		p.Terminal = context.Bool("tty")
	}
	if context.IsSet("no-new-privs") {
		p.NoNewPrivileges = context.Bool("no-new-privs")
	}
	// override the user, if passed
	if context.String("user") != "" {
		u := strings.SplitN(context.String("user"), ":", 2)
		if len(u) > 1 {
			gid, err := strconv.Atoi(u[1])
			if err != nil {
				return nil, fmt.Errorf("parsing %s as int for gid failed: %v", u[1], err)
			}
			p.User.GID = uint32(gid)
		}
		uid, err := strconv.Atoi(u[0])
		if err != nil {
			return nil, fmt.Errorf("parsing %s as int for uid failed: %v", u[0], err)
		}
		p.User.UID = uint32(uid)
	}
	return &p, nil
}
Exemplo n.º 25
0
func manage(c *cli.Context) {
	var (
		tlsConfig *tls.Config
		err       error
	)

	// If either --tls or --tlsverify are specified, load the certificates.
	if c.Bool("tls") || c.Bool("tlsverify") {
		if !c.IsSet("tlscert") || !c.IsSet("tlskey") {
			log.Fatal("--tlscert and --tlskey must be provided when using --tls")
		}
		if c.Bool("tlsverify") && !c.IsSet("tlscacert") {
			log.Fatal("--tlscacert must be provided when using --tlsverify")
		}
		tlsConfig, err = loadTLSConfig(
			c.String("tlscacert"),
			c.String("tlscert"),
			c.String("tlskey"),
			c.Bool("tlsverify"))
		if err != nil {
			log.Fatal(err)
		}
	} else {
		// Otherwise, if neither --tls nor --tlsverify are specified, abort if
		// the other flags are passed as they will be ignored.
		if c.IsSet("tlscert") || c.IsSet("tlskey") || c.IsSet("tlscacert") {
			log.Fatal("--tlscert, --tlskey and --tlscacert require the use of either --tls or --tlsverify")
		}
	}

	refreshMinInterval := c.Duration("engine-refresh-min-interval")
	refreshMaxInterval := c.Duration("engine-refresh-max-interval")
	if refreshMinInterval <= time.Duration(0)*time.Second {
		log.Fatal("min refresh interval should be a positive number")
	}
	if refreshMaxInterval < refreshMinInterval {
		log.Fatal("max refresh interval cannot be less than min refresh interval")
	}
	// engine-refresh-retry is deprecated
	refreshRetry := c.Int("engine-refresh-retry")
	if refreshRetry != 3 {
		log.Fatal("--engine-refresh-retry is deprecated. Use --engine-failure-retry")
	}
	failureRetry := c.Int("engine-failure-retry")
	if failureRetry <= 0 {
		log.Fatal("invalid failure retry count")
	}
	engineOpts := &cluster.EngineOpts{
		RefreshMinInterval: refreshMinInterval,
		RefreshMaxInterval: refreshMaxInterval,
		FailureRetry:       failureRetry,
	}

	uri := getDiscovery(c)
	if uri == "" {
		log.Fatalf("discovery required to manage a cluster. See '%s manage --help'.", c.App.Name)
	}
	discovery := createDiscovery(uri, c)
	s, err := strategy.New(c.String("strategy"))
	if err != nil {
		log.Fatal(err)
	}

	// see https://github.com/urfave/cli/issues/160
	names := c.StringSlice("filter")
	if c.IsSet("filter") || c.IsSet("f") {
		names = names[DefaultFilterNumber:]
	}
	fs, err := filter.New(names)
	if err != nil {
		log.Fatal(err)
	}

	sched := scheduler.New(s, fs)
	var cl cluster.Cluster
	switch c.String("cluster-driver") {
	case "mesos-experimental":
		log.Warn("WARNING: the mesos driver is currently experimental, use at your own risks")
		cl, err = mesos.NewCluster(sched, tlsConfig, uri, c.StringSlice("cluster-opt"), engineOpts)
	case "swarm":
		cl, err = swarm.NewCluster(sched, tlsConfig, discovery, c.StringSlice("cluster-opt"), engineOpts)
	default:
		log.Fatalf("unsupported cluster %q", c.String("cluster-driver"))
	}
	if err != nil {
		log.Fatal(err)
	}

	// see https://github.com/urfave/cli/issues/160
	hosts := c.StringSlice("host")
	if c.IsSet("host") || c.IsSet("H") {
		hosts = hosts[1:]
	}

	server := api.NewServer(hosts, tlsConfig)
	if c.Bool("replication") {
		addr := c.String("advertise")
		if addr == "" {
			log.Fatal("--advertise address must be provided when using --leader-election")
		}
		if !checkAddrFormat(addr) {
			log.Fatal("--advertise should be of the form ip:port or hostname:port")
		}
		leaderTTL, err := time.ParseDuration(c.String("replication-ttl"))
		if err != nil {
			log.Fatalf("invalid --replication-ttl: %v", err)
		}
		if leaderTTL <= time.Duration(0)*time.Second {
			log.Fatalf("--replication-ttl should be a positive number")
		}

		setupReplication(c, cl, server, discovery, addr, leaderTTL, tlsConfig)
	} else {
		server.SetHandler(api.NewPrimary(cl, tlsConfig, &statusHandler{cl, nil, nil}, c.GlobalBool("debug"), c.Bool("cors")))
		cluster.NewWatchdog(cl)
	}

	log.Fatal(server.ListenAndServe())
}