func runVerify(c *cli.Context) {
	config := common.NewConfig()
	err := config.LoadConfig(c.String("config"))
	if err != nil {
		log.Fatalln(err)
		return
	}

	// verify if runner exist
	runners := []*common.RunnerConfig{}
	for _, runner := range config.Runners {
		if common.VerifyRunner(runner.URL, runner.Token) {
			runners = append(runners, runner)
		}
	}

	if !c.Bool("delete") {
		return
	}

	// check if anything changed
	if len(config.Runners) == len(runners) {
		return
	}

	config.Runners = runners

	// save config file
	err = config.SaveConfig(c.String("config"))
	if err != nil {
		log.Fatalln("Failed to update", c.String("config"), err)
	}
	log.Println("Updated", c.String("config"))
}
Example #2
0
func (c *configOptions) loadConfig() error {
	config := common.NewConfig()
	err := config.LoadConfig(c.ConfigFile)
	if err != nil {
		return err
	}
	c.config = config
	return nil
}
func (mr *MultiRunner) loadConfig() error {
	newConfig := common.NewConfig()
	err := newConfig.LoadConfig(mr.configFile)
	if err != nil {
		return err
	}

	mr.healthy = nil
	mr.config = newConfig
	return nil
}
func (mr *MultiRunner) loadConfig() error {
	newConfig := common.NewConfig()
	err := newConfig.LoadConfig(mr.configFile)
	if err != nil {
		return err
	}

	// pass user to execute scripts as specific user
	if mr.user != "" {
		newConfig.User = &mr.user
	}

	mr.healthy = nil
	mr.config = newConfig
	return nil
}
func runUnregister(c *cli.Context) {
	runner := common.RunnerConfig{
		URL:   c.String("url"),
		Token: c.String("token"),
	}

	if !common.DeleteRunner(runner.URL, runner.Token) {
		log.Fatalln("Failed to delete runner")
	}

	config := common.NewConfig()
	err := config.LoadConfig(c.String("config"))
	if err != nil {
		return
	}

	runners := []*common.RunnerConfig{}
	for _, otherRunner := range config.Runners {
		if otherRunner.Token == runner.Token && otherRunner.URL == runner.URL {
			continue
		}
		runners = append(runners, otherRunner)
	}

	// check if anything changed
	if len(config.Runners) == len(runners) {
		return
	}

	config.Runners = runners

	// save config file
	err = config.SaveConfig(c.String("config"))
	if err != nil {
		log.Fatalln("Failed to update", c.String("config"), err)
	}
	log.Println("Updated", c.String("config"))
}
Example #6
0
func runServiceInstall(s service.Service, c *cli.Context) error {
	if user := c.String("user"); user == "" && os.Getuid() == 0 {
		log.Fatal("Please specify user that will run gitlab-runner service")
	}

	if configFile := c.String("config"); configFile != "" {
		// try to load existing config
		config := common.NewConfig()
		err := config.LoadConfig(configFile)
		if err != nil {
			return err
		}

		// save config for the first time
		if !config.Loaded {
			err = config.SaveConfig(configFile)
			if err != nil {
				return err
			}
		}
	}
	return service.Control(s, "install")
}
func runRegister(c *cli.Context) {
	s := RegistrationContext{
		Context:    c,
		config:     common.NewConfig(),
		configFile: c.String("config"),
		reader:     bufio.NewReader(os.Stdin),
	}

	defer func() {
		if r := recover(); r != nil {
			log.Fatalf("FATAL ERROR: %v", r)
		}
	}()

	s.loadConfig()

	runnerConfig := s.askRunner()

	if !c.Bool("leave-runner") {
		defer func() {
			if r := recover(); r != nil {
				common.DeleteRunner(runnerConfig.URL, runnerConfig.Token)
				// pass panic to next defer
				panic(r)
			}
		}()

		signals := make(chan os.Signal, 1)
		signal.Notify(signals, os.Interrupt)

		go func() {
			s := <-signals
			common.DeleteRunner(runnerConfig.URL, runnerConfig.Token)
			log.Fatalf("RECEIVED SIGNAL: %v", s)
		}()
	}

	runnerConfig.Executor = s.askExecutor()
	limit := c.Int("limit")
	runnerConfig.Limit = &limit

	if s.config.Concurrent < limit {
		log.Warningf("Specified limit (%d) larger then current concurrent limit (%d). Concurrent limit will not be enlarged.", limit, s.config.Concurrent)
	}

	switch runnerConfig.Executor {
	case "docker", "docker-ssh":
		s.askDocker(&runnerConfig)
	case "parallels":
		s.askParallels(&runnerConfig)
	}

	switch runnerConfig.Executor {
	case "ssh":
		s.askSSH(&runnerConfig, false)
	case "docker-ssh":
		s.askSSH(&runnerConfig, true)
	case "parallels":
		s.askSSH(&runnerConfig, true)
	}

	s.addRunner(&runnerConfig)
	s.saveConfig()

	log.Printf("Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!")
}
func runSingle(c *cli.Context) {
	buildsDir := c.String("builds-dir")
	shell := c.String("shell")
	config := common.NewConfig()
	runner := common.RunnerConfig{
		URL:       c.String("url"),
		Token:     c.String("token"),
		Executor:  c.String("executor"),
		BuildsDir: &buildsDir,
		Shell:     &shell,
	}

	if len(runner.URL) == 0 {
		log.Fatalln("Missing URL")
	}
	if len(runner.Token) == 0 {
		log.Fatalln("Missing Token")
	}
	if len(runner.Executor) == 0 {
		log.Fatalln("Missing Executor")
	}

	go runServer(c.String("addr"))
	go runHerokuURL(c.String("heroku-url"))

	signals := make(chan os.Signal)
	signal.Notify(signals, os.Interrupt, syscall.SIGTERM)

	log.Println("Starting runner for", runner.URL, "with token", runner.ShortDescription(), "...")

	finished := false
	abortSignal := make(chan os.Signal)
	doneSignal := make(chan int, 1)

	go func() {
		interrupt := <-signals
		log.Warningln("Requested exit:", interrupt)
		finished = true

		go func() {
			for {
				abortSignal <- interrupt
			}
		}()

		select {
		case newSignal := <-signals:
			log.Fatalln("forced exit:", newSignal)
		case <-time.After(common.ShutdownTimeout * time.Second):
			log.Fatalln("shutdown timedout")
		case <-doneSignal:
		}
	}()

	for !finished {
		buildData, healthy := common.GetBuild(runner)
		if !healthy {
			log.Println("Runner is not healthy!")
			select {
			case <-time.After(common.NotHealthyCheckInterval * time.Second):
			case <-abortSignal:
			}
			continue
		}

		if buildData == nil {
			select {
			case <-time.After(common.CheckInterval * time.Second):
			case <-abortSignal:
			}
			continue
		}

		newBuild := common.Build{
			GetBuildResponse: *buildData,
			Runner:           &runner,
			BuildAbort:       abortSignal,
		}
		newBuild.AssignID()
		newBuild.Run(config)
	}

	doneSignal <- 0
}
func (r *RunSingleCommand) Execute(c *cli.Context) {
	if len(r.URL) == 0 {
		log.Fatalln("Missing URL")
	}
	if len(r.Token) == 0 {
		log.Fatalln("Missing Token")
	}
	if len(r.Executor) == 0 {
		log.Fatalln("Missing Executor")
	}

	config := common.NewConfig()
	signals := make(chan os.Signal)
	signal.Notify(signals, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT)

	log.Println("Starting runner for", r.URL, "with token", r.ShortDescription(), "...")

	finished := false
	abortSignal := make(chan os.Signal)
	doneSignal := make(chan int, 1)

	go func() {
		interrupt := <-signals
		finished = true

		// request stop, but wait for force exit
		for interrupt == syscall.SIGQUIT {
			log.Warningln("Requested quit, waiting for builds to finish")
			interrupt = <-signals
		}

		log.Warningln("Requested exit:", interrupt)

		go func() {
			for {
				abortSignal <- interrupt
			}
		}()

		select {
		case newSignal := <-signals:
			log.Fatalln("forced exit:", newSignal)
		case <-time.After(common.ShutdownTimeout * time.Second):
			log.Fatalln("shutdown timedout")
		case <-doneSignal:
		}
	}()

	for !finished {
		buildData, healthy := common.GetBuild(r.RunnerConfig)
		if !healthy {
			log.Println("Runner is not healthy!")
			select {
			case <-time.After(common.NotHealthyCheckInterval * time.Second):
			case <-abortSignal:
			}
			continue
		}

		if buildData == nil {
			select {
			case <-time.After(common.CheckInterval * time.Second):
			case <-abortSignal:
			}
			continue
		}

		newBuild := common.Build{
			GetBuildResponse: *buildData,
			Runner:           &r.RunnerConfig,
			BuildAbort:       abortSignal,
		}
		newBuild.AssignID()
		newBuild.Run(config)
	}

	doneSignal <- 0
}