Ejemplo n.º 1
0
func main() {
	cf_debug_server.AddFlags(flag.CommandLine)
	cf_lager.AddFlags(flag.CommandLine)
	flag.Parse()

	cf_http.Initialize(*communicationTimeout)

	logger, reconfigurableSink := cf_lager.New(*sessionName)
	natsClient := diegonats.NewClient()
	clock := clock.NewClock()
	syncer := syncer.NewSyncer(clock, *syncInterval, natsClient, logger)

	initializeDropsonde(logger)

	natsClientRunner := diegonats.NewClientRunner(*natsAddresses, *natsUsername, *natsPassword, logger, natsClient)

	table := initializeRoutingTable()
	emitter := initializeNatsEmitter(natsClient, logger)
	watcher := ifrit.RunFunc(func(signals <-chan os.Signal, ready chan<- struct{}) error {
		return watcher.NewWatcher(initializeBBSClient(logger), clock, table, emitter, syncer.Events(), logger).Run(signals, ready)
	})

	syncRunner := ifrit.RunFunc(func(signals <-chan os.Signal, ready chan<- struct{}) error {
		return syncer.Run(signals, ready)
	})

	lockMaintainer := initializeLockMaintainer(logger, *consulCluster, *sessionName, *lockTTL, *lockRetryInterval, clock)

	members := grouper.Members{
		{"lock-maintainer", lockMaintainer},
		{"nats-client", natsClientRunner},
		{"watcher", watcher},
		{"syncer", syncRunner},
	}

	if dbgAddr := cf_debug_server.DebugAddress(flag.CommandLine); dbgAddr != "" {
		members = append(grouper.Members{
			{"debug-server", cf_debug_server.Runner(dbgAddr, reconfigurableSink)},
		}, members...)
	}

	group := grouper.NewOrdered(os.Interrupt, members)

	monitor := ifrit.Invoke(sigmon.New(group))

	logger.Info("started")

	err := <-monitor.Wait()
	if err != nil {
		logger.Error("exited-with-failure", err)
		os.Exit(1)
	}

	logger.Info("exited")
}
Ejemplo n.º 2
0
func (maker ComponentMaker) Consul(argv ...string) ifrit.Runner {
	_, port, err := net.SplitHostPort(maker.Addresses.Consul)
	Expect(err).NotTo(HaveOccurred())
	httpPort, err := strconv.Atoi(port)
	Expect(err).NotTo(HaveOccurred())

	startingPort := httpPort - consulrunner.PortOffsetHTTP

	clusterRunner := consulrunner.NewClusterRunner(startingPort, 1, "http")
	return ifrit.RunFunc(func(signals <-chan os.Signal, ready chan<- struct{}) error {
		done := make(chan struct{})
		go func() {
			clusterRunner.Start()
			close(done)
		}()

		Eventually(done, 10).Should(BeClosed())

		close(ready)

		select {
		case <-signals:
			clusterRunner.Stop()
		}

		return nil
	})
}
Ejemplo n.º 3
0
func (resource *resource) Put(ioConfig IOConfig, source atc.Source, params atc.Params, artifactSource ArtifactSource) VersionedSource {
	resourceDir := ResourcesDir("put")

	vs := &versionedSource{
		container:   resource.container,
		resourceDir: resourceDir,
	}

	vs.Runner = ifrit.RunFunc(func(signals <-chan os.Signal, ready chan<- struct{}) error {
		return resource.runScript(
			"/opt/resource/out",
			[]string{resourceDir},
			outRequest{
				Params: params,
				Source: source,
			},
			&vs.versionResult,
			ioConfig.Stderr,
			artifactSource,
			vs,
			true,
		).Run(signals, ready)
	})

	return vs
}
Ejemplo n.º 4
0
Archivo: server.go Proyecto: savaki/tsa
func (server *registrarSSHServer) forwardTCPIP(
	logger lager.Logger,
	conn *ssh.ServerConn,
	listener net.Listener,
	forwardIP string,
	forwardPort uint32,
) ifrit.Process {
	return ifrit.Background(ifrit.RunFunc(func(signals <-chan os.Signal, ready chan<- struct{}) error {
		go func() {
			<-signals

			listener.Close()
		}()

		close(ready)

		for {
			localConn, err := listener.Accept()
			if err != nil {
				logger.Error("failed-to-accept", err)
				break
			}

			go forwardLocalConn(logger, localConn, conn, forwardIP, forwardPort)
		}

		return nil
	}))
}
Ejemplo n.º 5
0
Archivo: radar.go Proyecto: utako/atc
func (radar *Radar) Scanner(logger lager.Logger, resourceName string) ifrit.Runner {
	return ifrit.RunFunc(func(signals <-chan os.Signal, ready chan<- struct{}) error {
		ticker := time.NewTicker(radar.interval)

		close(ready)

		for {
			select {
			case <-signals:
				return nil

			case <-ticker.C:
				lock := radar.checkLock(radar.db.ScopedName(resourceName))
				resourceCheckingLock, err := radar.locker.AcquireWriteLockImmediately(lock)

				if err != nil {
					continue
				}

				err = radar.scan(logger.Session("tick"), resourceName)

				resourceCheckingLock.Release()

				if err != nil {
					return err
				}
			}
		}
	})
}
Ejemplo n.º 6
0
func closeHub(hub event.Hub) ifrit.Runner {
	return ifrit.RunFunc(func(signals <-chan os.Signal, ready chan<- struct{}) error {
		close(ready)
		<-signals
		hub.Close()
		return nil
	})
}
Ejemplo n.º 7
0
func New(proxySignals <-chan os.Signal, runner ifrit.Runner) ifrit.Runner {
	return ifrit.RunFunc(func(signals <-chan os.Signal, ready chan<- struct{}) error {
		process := ifrit.Background(runner)
		<-process.Ready()
		close(ready)
		go forwardSignals(proxySignals, process)
		go forwardSignals(signals, process)
		return <-process.Wait()
	})
}
Ejemplo n.º 8
0
func constructStopper(database db.DB) ifrit.Runner {
	return ifrit.RunFunc(func(signals <-chan os.Signal, ready chan<- struct{}) error {
		close(ready)
		select {
		case <-signals:
			database.CancelWatches()
		}

		return nil
	})
}
Ejemplo n.º 9
0
func constructStopper(stopChan chan struct{}) ifrit.Runner {
	return ifrit.RunFunc(func(signals <-chan os.Signal, ready chan<- struct{}) error {
		close(ready)
		select {
		case <-signals:
			close(stopChan)
		}

		return nil
	})
}
Ejemplo n.º 10
0
func (a *AllocationStore) RegistryPruner(logger lager.Logger, expirationTime time.Duration) ifrit.Runner {
	logger = logger.Session("allocation-store-pruner")

	return ifrit.RunFunc(func(signals <-chan os.Signal, ready chan<- struct{}) error {
		ticker := a.clock.NewTicker(expirationTime / 2)
		defer ticker.Stop()

		close(ready)

		for {
			select {
			case <-signals:
				logger.Info("exiting-pruning-loop")
				return nil

			case <-ticker.C():
				logger.Debug("checking-for-expired-containers")
				expiredAllocations := []string{}

				a.lock.Lock()

				for guid, container := range a.allocated {
					if container.State != executor.StateReserved {
						// only prune reserved containers
						continue
					}

					lifespan := a.clock.Now().Sub(time.Unix(0, container.AllocatedAt))

					if lifespan >= expirationTime {
						logger.Info("reserved-container-expired", lager.Data{"guid": guid, "lifespan": lifespan})
						expiredAllocations = append(expiredAllocations, guid)
					}
				}

				if len(expiredAllocations) > 0 {
					logger.Info("reaping-expired-allocations", lager.Data{"num-reaped": len(expiredAllocations)})
				} else {
					logger.Info("no-expired-allocations-found")
				}

				for _, guid := range expiredAllocations {
					logger.Info("deleting-expired-container", lager.Data{"guid": guid})
					delete(a.allocated, guid)
				}

				a.lock.Unlock()
			}
		}

		return nil
	})
}
Ejemplo n.º 11
0
func closeHub(logger lager.Logger, hub event.Hub) ifrit.Runner {
	return ifrit.RunFunc(func(signals <-chan os.Signal, ready chan<- struct{}) error {
		logger.Info("starting")
		defer logger.Info("finished")

		close(ready)
		logger.Info("started")

		<-signals
		logger.Info("shutting-down")
		hub.Close()

		return nil
	})
}
Ejemplo n.º 12
0
func main() {
	cf_debug_server.AddFlags(flag.CommandLine)
	cf_lager.AddFlags(flag.CommandLine)
	flag.Parse()

	logger, reconfigurableSink := cf_lager.New("tps-watcher")
	initializeDropsonde(logger)

	lockMaintainer := initializeLockMaintainer(logger)

	ccClient := cc_client.NewCcClient(*ccBaseURL, *ccUsername, *ccPassword, *skipCertVerify)

	watcher := ifrit.RunFunc(func(signals <-chan os.Signal, ready chan<- struct{}) error {
		w, err := watcher.NewWatcher(logger, *eventHandlingWorkers, initializeBBSClient(logger), ccClient)
		if err != nil {
			return err
		}

		return w.Run(signals, ready)
	})

	members := grouper.Members{
		{"lock-maintainer", lockMaintainer},
		{"watcher", watcher},
	}

	if dbgAddr := cf_debug_server.DebugAddress(flag.CommandLine); dbgAddr != "" {
		members = append(grouper.Members{
			{"debug-server", cf_debug_server.Runner(dbgAddr, reconfigurableSink)},
		}, members...)
	}

	group := grouper.NewOrdered(os.Interrupt, members)

	monitor := ifrit.Invoke(sigmon.New(group))

	logger.Info("started")

	err := <-monitor.Wait()
	if err != nil {
		logger.Error("exited-with-failure", err)
		os.Exit(1)
	}

	logger.Info("exited")
}
Ejemplo n.º 13
0
func NewRunner(
	logger lager.Logger,
	baggageCollector BaggageCollector,
	db RunnerDB,
	clock clock.Clock,
	interval time.Duration,
) ifrit.Runner {
	return ifrit.RunFunc(func(signals <-chan os.Signal, ready chan<- struct{}) error {

		close(ready)

		ticker := clock.NewTicker(interval)
		defer ticker.Stop()

		for {
			select {
			case <-ticker.C():
				leaseLogger := logger.Session("lease-invalidate-cache")
				leaseLogger.Info("tick")

				lease, leased, err := db.LeaseCacheInvalidation(leaseLogger, interval)

				if err != nil {
					leaseLogger.Error("failed-to-get-lease", err)
					break
				}

				if !leased {
					leaseLogger.Debug("did-not-get-lease")
					break
				}

				leaseLogger.Info("collecting-baggage")
				err = baggageCollector.Collect()
				if err != nil {
					leaseLogger.Error("failed-to-collect-baggage", err)
				}

				lease.Break()
			case <-signals:
				return nil
			}
		}
	})
}
Ejemplo n.º 14
0
func onReady(runner ifrit.Runner, cb func()) ifrit.Runner {
	return ifrit.RunFunc(func(signals <-chan os.Signal, ready chan<- struct{}) error {
		process := ifrit.Background(runner)

		subExited := process.Wait()
		subReady := process.Ready()

		for {
			select {
			case <-subReady:
				cb()
				subReady = nil
			case err := <-subExited:
				return err
			case sig := <-signals:
				process.Signal(sig)
			}
		}
	})
}
Ejemplo n.º 15
0
			taskDB = new(dbfakes.FakeTaskDB)
			taskDB.ResolvingTaskReturns(nil)
			taskDB.DeleteTaskReturns(nil)
		})

		simulateTaskCompleting := func(signals <-chan os.Signal, ready chan<- struct{}) error {
			close(ready)
			task = model_helpers.NewValidTask("the-task-guid")
			task.CompletionCallbackUrl = callbackURL
			taskworkpool.HandleCompletedTask(logger, httpClient, taskDB, task)
			return nil
		}

		var process ifrit.Process
		JustBeforeEach(func() {
			process = ifrit.Invoke(ifrit.RunFunc(simulateTaskCompleting))
		})

		AfterEach(func() {
			ginkgomon.Kill(process)
		})

		Context("when the task has a completion callback URL", func() {
			BeforeEach(func() {
				Expect(taskDB.ResolvingTaskCallCount()).To(Equal(0))
			})

			It("marks the task as resolving", func() {
				statusCodes <- 200

				Eventually(taskDB.ResolvingTaskCallCount).Should(Equal(1))
	Describe("Stop", func() {

		var runnerIndex int64
		var startOrder chan int64
		var stopOrder chan int64
		var receivedSignals chan os.Signal

		makeRunner := func(waitTime time.Duration) (ifrit.Runner, chan struct{}) {
			quickExit := make(chan struct{})
			return ifrit.RunFunc(func(signals <-chan os.Signal, ready chan<- struct{}) error {
				index := atomic.AddInt64(&runnerIndex, 1)
				startOrder <- index
				close(ready)

				select {
				case <-quickExit:
				case <-signals:
				}
				time.Sleep(waitTime)
				stopOrder <- index

				return nil
			}), quickExit
		}

		makeSignalEchoRunner := func(waitTime time.Duration, name string) ifrit.Runner {
			return ifrit.RunFunc(func(signals <-chan os.Signal, ready chan<- struct{}) error {
				close(ready)
				done := make(chan bool)
				go func() {
					time.Sleep(waitTime)
					done <- true
Ejemplo n.º 17
0
func (store *GardenStore) runStepProcess(
	logger lager.Logger,
	step steps.Step,
	hasStartedRunning <-chan struct{},
	gardenContainer garden.Container,
	guid string,
) {
	process := ifrit.Invoke(ifrit.RunFunc(func(signals <-chan os.Signal, ready chan<- struct{}) error {
		logger := logger.Session("run-step-process")
		logger.Info("started")
		defer logger.Info("finished")
		seqComplete := make(chan error)

		close(ready)

		go func() {
			seqComplete <- step.Perform()
		}()

		result := executor.ContainerRunResult{}

		toldToStop := false

	OUTER_LOOP:
		for {
			select {
			case <-signals:
				signals = nil
				toldToStop = true

				logger.Info("signaled")
				step.Cancel()

			case <-hasStartedRunning:
				hasStartedRunning = nil
				logger.Info("transitioning-to-running")
				err := store.transitionToRunning(logger, gardenContainer)
				if err != nil {
					logger.Error("failed-transitioning-to-running", err)
					result.Failed = true
					result.FailureReason = err.Error()
					break OUTER_LOOP
				}
				logger.Info("succeeded-transitioning-to-running")

			case err := <-seqComplete:
				if err == nil {
					logger.Info("step-finished-normally")
				} else if toldToStop {
					logger.Info("step-cancelled")
					result.Stopped = true
				} else {
					logger.Info("step-finished-with-error", lager.Data{"error": err.Error()})
					result.Failed = true
					result.FailureReason = err.Error()
				}

				break OUTER_LOOP
			}
		}

		logger.Info("transitioning-to-complete")
		err := store.transitionToComplete(logger, gardenContainer, result)
		if err != nil {
			logger.Error("failed-transitioning-to-complete", err)
			return nil
		}
		logger.Info("succeeded-transitioning-to-complete")

		return nil
	}))

	store.processesL.Lock()
	store.runningProcesses[guid] = process
	numProcesses := len(store.runningProcesses)
	store.processesL.Unlock()

	logger.Info("stored-step-process", lager.Data{"num-step-processes": numProcesses})
}
Ejemplo n.º 18
0
				},
				{
					Name: "some-other-resource",
				},
			},
		}

		pipelineDB.ScopedNameStub = func(thing string) string {
			return "pipeline:" + thing
		}
		pipelineDB.GetConfigReturns(initialConfig, 1, nil)

		scannerFactory.ScannerStub = func(lager.Logger, string) ifrit.Runner {
			return ifrit.RunFunc(func(signals <-chan os.Signal, ready chan<- struct{}) error {
				close(ready)
				<-signals
				return nil
			})
		}
	})

	JustBeforeEach(func() {
		process = ginkgomon.Invoke(NewRunner(
			lagertest.NewTestLogger("test"),
			noop,
			locker,
			scannerFactory,
			pipelineDB,
			syncInterval,
		))
	})
Ejemplo n.º 19
0
				case <-signals:
					if eventSource != nil {
						err := eventSource.Close()
						if err != nil {
							logger.Error("failed-closing-event-source", err)
						}
					}
					return nil
				}
			}
		}

		var process ifrit.Process
		BeforeEach(func() {
			process = ifrit.Invoke(ifrit.RunFunc(eventCountRunner))
		})

		AfterEach(func() {
			ginkgomon.Kill(process)
		})

		Measure("data for benchmarks", func(b Benchmarker) {
			wg := sync.WaitGroup{}

			// start nsync
			go func() {
				defer GinkgoRecover()
				logger.Info("start-nsync-bulker-loop")
				defer logger.Info("finish-nsync-bulker-loop")
				wg.Add(1)
Ejemplo n.º 20
0
import (
	"errors"
	"os"

	"github.com/tedsuo/ifrit"
)

type Ping struct{}

var PingerExitedFromPing = errors.New("pinger exited with a ping")
var PingerExitedFromSignal = errors.New("pinger exited with a signal")

type PingChan chan Ping

func (p PingChan) Run(sigChan <-chan os.Signal, ready chan<- struct{}) error {
	close(ready)
	select {
	case <-sigChan:
		return PingerExitedFromSignal
	case p <- Ping{}:
		return PingerExitedFromPing
	}
}

var NoReadyExitedNormally = errors.New("no ready exited normally")

var NoReadyRunner = ifrit.RunFunc(func(sigChan <-chan os.Signal, ready chan<- struct{}) error {
	return NoReadyExitedNormally
})
Ejemplo n.º 21
0
func (radar *Radar) Scanner(logger lager.Logger, resourceName string) ifrit.Runner {
	return ifrit.RunFunc(func(signals <-chan os.Signal, ready chan<- struct{}) error {
		// do an immediate initial check
		var interval time.Duration = 0

		close(ready)

		for {
			timer := radar.clock.NewTimer(interval)

			var resourceConfig atc.ResourceConfig
			var resourceTypes atc.ResourceTypes

			select {
			case <-signals:
				timer.Stop()
				return nil

			case <-timer.C():
				var err error

				resourceConfig, resourceTypes, err = radar.getResourceConfig(logger, resourceName)
				if err != nil {
					return err
				}

				savedResource, err := radar.db.GetResource(resourceConfig.Name)
				if err != nil {
					return err
				}

				interval, err = radar.checkInterval(resourceConfig)
				if err != nil {
					setErr := radar.db.SetResourceCheckError(savedResource, err)
					if setErr != nil {
						logger.Error("failed-to-set-check-error", err)
					}

					return err
				}

				leaseLogger := logger.Session("lease", lager.Data{
					"resource": resourceName,
				})

				lease, leased, err := radar.db.LeaseResourceChecking(resourceName, interval, false)

				if err != nil {
					leaseLogger.Error("failed-to-get-lease", err, lager.Data{
						"resource": resourceName,
					})
					break
				}

				if !leased {
					leaseLogger.Debug("did-not-get-lease")
					break
				}

				err = radar.scan(logger.Session("tick"), resourceConfig, resourceTypes, savedResource)

				lease.Break()

				if err != nil {
					return err
				}
			}
		}
	})
}
Ejemplo n.º 22
0
func (resource *resource) runScript(
	path string,
	args []string,
	input interface{},
	output interface{},
	logDest io.Writer,
	inputSource ArtifactSource,
	inputDestination ArtifactDestination,
	recoverable bool,
) ifrit.Runner {
	return ifrit.RunFunc(func(signals <-chan os.Signal, ready chan<- struct{}) error {
		request, err := json.Marshal(input)
		if err != nil {
			return err
		}

		if recoverable {
			result, err := resource.container.Property(resourceResultPropertyName)
			if err == nil {
				return json.Unmarshal([]byte(result), &output)
			}
		}

		stdout := new(bytes.Buffer)
		stderr := new(bytes.Buffer)

		processIO := garden.ProcessIO{
			Stdin:  bytes.NewBuffer(request),
			Stdout: stdout,
		}

		if logDest != nil {
			processIO.Stderr = logDest
		} else {
			processIO.Stderr = stderr
		}

		var process garden.Process

		var processIDProp string
		if recoverable {
			processIDProp, err = resource.container.Property(resourceProcessIDPropertyName)
			if err != nil {
				processIDProp = ""
			}
		}

		if processIDProp != "" {
			var processID uint32
			_, err = fmt.Sscanf(processIDProp, "%d", &processID)
			if err != nil {
				return err
			}

			process, err = resource.container.Attach(processID, processIO)
			if err != nil {
				return err
			}
		} else {
			if inputSource != nil {
				err := inputSource.StreamTo(inputDestination)
				if err != nil {
					return err
				}
			}

			process, err = resource.container.Run(garden.ProcessSpec{
				Path: path,
				Args: args,
				User: "******",
			}, processIO)
			if err != nil {
				return err
			}

			if recoverable {
				processIDValue := fmt.Sprintf("%d", process.ID())

				err := resource.container.SetProperty(resourceProcessIDPropertyName, processIDValue)
				if err != nil {
					return err
				}
			}
		}

		close(ready)

		statusCh := make(chan int, 1)
		errCh := make(chan error, 1)

		go func() {
			status, err := process.Wait()
			if err != nil {
				errCh <- err
			} else {
				statusCh <- status
			}
		}()

		select {
		case status := <-statusCh:
			if status != 0 {
				return ErrResourceScriptFailed{
					Path:       path,
					Args:       args,
					ExitStatus: status,

					Stderr: stderr.String(),
				}
			}

			if recoverable {
				err := resource.container.SetProperty(resourceResultPropertyName, stdout.String())
				if err != nil {
					return err
				}
			}

			return json.Unmarshal(stdout.Bytes(), output)

		case err := <-errCh:
			return err

		case <-signals:
			resource.container.Stop(false)
			return ErrAborted
		}
	})
}
Ejemplo n.º 23
0
Archivo: main.go Proyecto: utako/atc
func main() {
	flag.Parse()

	if !*dev && (*httpUsername == "" || (*httpHashedPassword == "" && *httpPassword == "")) {
		fatal(errors.New("must specify -httpUsername and -httpPassword or -httpHashedPassword or turn on dev mode"))
	}

	if _, err := os.Stat(*templatesDir); err != nil {
		fatal(errors.New("directory specified via -templates does not exist"))
	}

	if _, err := os.Stat(*publicDir); err != nil {
		fatal(errors.New("directory specified via -public does not exist"))
	}

	logger := lager.NewLogger("atc")

	logLevel := lager.INFO
	if *dev {
		logLevel = lager.DEBUG
	}

	sink := lager.NewReconfigurableSink(lager.NewWriterSink(os.Stdout, lager.DEBUG), logLevel)
	logger.RegisterSink(sink)

	var err error

	var dbConn Db.Conn

	for {
		dbConn, err = migration.Open(*sqlDriver, *sqlDataSource, migrations.Migrations)
		if err != nil {
			if strings.Contains(err.Error(), " dial ") {
				logger.Error("failed-to-open-db", err)
				time.Sleep(5 * time.Second)
				continue
			}

			fatal(err)
		}

		break
	}

	dbConn = Db.Explain(logger, dbConn, 500*time.Millisecond)

	listener := pq.NewListener(*sqlDataSource, time.Second, time.Minute, nil)
	bus := Db.NewNotificationsBus(listener)

	db := Db.NewSQL(logger.Session("db"), dbConn, bus)
	pipelineDBFactory := Db.NewPipelineDBFactory(logger.Session("db"), dbConn, bus, db)

	var configDB Db.ConfigDB
	configDB = Db.PlanConvertingConfigDB{db}

	var resourceTypesNG []atc.WorkerResourceType
	err = json.Unmarshal([]byte(*resourceTypes), &resourceTypesNG)
	if err != nil {
		logger.Fatal("invalid-resource-types", err)
	}

	var workerClient worker.Client
	if *gardenAddr != "" {
		workerClient = worker.NewGardenWorker(
			gclient.New(gconn.NewWithLogger(
				*gardenNetwork,
				*gardenAddr,
				logger.Session("garden-connection"),
			)),
			clock.NewClock(),
			-1,
			resourceTypesNG,
			"linux",
			[]string{},
		)
	} else {
		workerClient = worker.NewPool(worker.NewDBWorkerProvider(db, logger))
	}

	resourceTracker := resource.NewTracker(workerClient)
	gardenFactory := exec.NewGardenFactory(workerClient, resourceTracker, func() string {
		guid, err := uuid.NewV4()
		if err != nil {
			panic("not enough entropy to generate guid: " + err.Error())
		}

		return guid.String()
	})
	execEngine := engine.NewExecEngine(gardenFactory, engine.NewBuildDelegateFactory(db), db)

	engine := engine.NewDBEngine(engine.Engines{execEngine}, db, db)

	var webValidator auth.Validator

	if *httpUsername != "" && *httpHashedPassword != "" {
		webValidator = auth.BasicAuthHashedValidator{
			Username:       *httpUsername,
			HashedPassword: *httpHashedPassword,
		}
	} else if *httpUsername != "" && *httpPassword != "" {
		webValidator = auth.BasicAuthValidator{
			Username: *httpUsername,
			Password: *httpPassword,
		}
	} else {
		webValidator = auth.NoopValidator{}
	}

	callbacksURL, err := url.Parse(*callbacksURLString)
	if err != nil {
		fatal(err)
	}

	drain := make(chan struct{})

	apiHandler, err := api.NewHandler(
		logger,            // logger lager.Logger,
		webValidator,      // validator auth.Validator,
		pipelineDBFactory, // pipelineDBFactory db.PipelineDBFactory,

		configDB, // configDB db.ConfigDB,

		db, // buildsDB buildserver.BuildsDB,
		db, // workerDB workerserver.WorkerDB,
		db, // pipeDB pipes.PipeDB,
		db, // pipelinesDB db.PipelinesDB,

		config.ValidateConfig,       // configValidator configserver.ConfigValidator,
		callbacksURL.String(),       // peerURL string,
		buildserver.NewEventHandler, // eventHandlerFactory buildserver.EventHandlerFactory,
		drain, // drain <-chan struct{},

		engine,       // engine engine.Engine,
		workerClient, // workerClient worker.Client,

		sink, // sink *lager.ReconfigurableSink,

		*cliDownloadsDir, // cliDownloadsDir string,
	)
	if err != nil {
		fatal(err)
	}

	radarSchedulerFactory := pipelines.NewRadarSchedulerFactory(
		resourceTracker,
		*checkInterval,
		db,
		engine,
		db,
	)

	webHandler, err := web.NewHandler(
		logger,
		webValidator,
		radarSchedulerFactory,
		db,
		pipelineDBFactory,
		configDB,
		*templatesDir,
		*publicDir,
		engine,
	)
	if err != nil {
		fatal(err)
	}

	webMux := http.NewServeMux()
	webMux.Handle("/api/v1/", apiHandler)
	webMux.Handle("/", webHandler)

	var httpHandler http.Handler

	httpHandler = webMux

	if !*publiclyViewable {
		httpHandler = auth.Handler{
			Handler:   httpHandler,
			Validator: webValidator,
		}
	}

	// copy Authorization header as ATC-Authorization cookie for websocket auth
	httpHandler = auth.CookieSetHandler{
		Handler: httpHandler,
	}

	httpHandler = httpmetrics.Wrap(httpHandler)

	webListenAddr := fmt.Sprintf("%s:%d", *webListenAddress, *webListenPort)
	debugListenAddr := fmt.Sprintf("%s:%d", *debugListenAddress, *debugListenPort)

	syncer := pipelines.NewSyncer(
		logger.Session("syncer"),
		db,
		pipelineDBFactory,
		func(pipelineDB Db.PipelineDB) ifrit.Runner {
			return grouper.NewParallel(os.Interrupt, grouper.Members{
				{
					pipelineDB.ScopedName("radar"),
					rdr.NewRunner(
						logger.Session(pipelineDB.ScopedName("radar")),
						*noop,
						db,
						radarSchedulerFactory.BuildRadar(pipelineDB),
						pipelineDB,
						1*time.Minute,
					),
				},
				{
					pipelineDB.ScopedName("scheduler"),
					&sched.Runner{
						Logger: logger.Session(pipelineDB.ScopedName("scheduler")),

						Locker: db,
						DB:     pipelineDB,

						Scheduler: radarSchedulerFactory.BuildScheduler(pipelineDB),

						Noop: *noop,

						Interval: 10 * time.Second,
					},
				},
			})
		},
	)

	buildTracker := builds.NewTracker(
		logger.Session("build-tracker"),
		db,
		engine,
	)

	memberGrouper := []grouper.Member{
		{"web", http_server.New(webListenAddr, httpHandler)},

		{"debug", http_server.New(debugListenAddr, http.DefaultServeMux)},

		{"drainer", ifrit.RunFunc(func(signals <-chan os.Signal, ready chan<- struct{}) error {
			close(ready)

			<-signals

			close(drain)

			return nil
		})},

		{"pipelines", pipelines.SyncRunner{
			Syncer:   syncer,
			Interval: 10 * time.Second,
			Clock:    clock.NewClock(),
		}},

		{"builds", builds.TrackerRunner{
			Tracker:  buildTracker,
			Interval: 10 * time.Second,
			Clock:    clock.NewClock(),
		}},
	}

	group := grouper.NewParallel(os.Interrupt, memberGrouper)

	running := ifrit.Envoke(sigmon.New(group))

	logger.Info("listening", lager.Data{
		"web":   webListenAddr,
		"debug": debugListenAddr,
	})

	err = <-running.Wait()
	if err != nil {
		logger.Error("exited-with-failure", err)
		os.Exit(1)
	}
}
Ejemplo n.º 24
0
	Describe("Stop", func() {

		var runnerIndex int64
		var startOrder chan int64
		var stopOrder chan int64

		makeRunner := func(waitTime time.Duration) (ifrit.Runner, chan struct{}) {
			quickExit := make(chan struct{})
			return ifrit.RunFunc(func(signals <-chan os.Signal, ready chan<- struct{}) error {
				index := atomic.AddInt64(&runnerIndex, 1)
				startOrder <- index
				close(ready)

				select {
				case <-quickExit:
				case <-signals:
				}
				time.Sleep(waitTime)
				stopOrder <- index

				return nil
			}), quickExit
		}

		BeforeEach(func() {
			startOrder = make(chan int64, 3)
			stopOrder = make(chan int64, 3)

			r1, _ := makeRunner(0)
			r2, _ := makeRunner(30 * time.Millisecond)
			r3, _ := makeRunner(50 * time.Millisecond)
Ejemplo n.º 25
0
func (sf *scanRunnerFactory) ScanResourceTypeRunner(logger lager.Logger, name string) ifrit.Runner {
	intervalRunner := NewIntervalRunner(logger, sf.clock, name, sf.resourceTypeScanner)
	return ifrit.RunFunc(intervalRunner.RunFunc)
}