func main() { flag.Parse() gclient := client.New(connection.New("tcp", "localhost:7777")) var container garden.Container containers, err := gclient.Containers(garden.Properties{}) must(err) for _, c := range containers { if c.Handle() == *containerHandle { container = c break } } if container == nil { panic("Container not found!") } process, err := container.Attach(uint32(*processId), garden.ProcessIO{}) must(err) switch *signalType { case "term": fmt.Println("Signalling term") must(process.Signal(garden.SignalTerminate)) break case "kill": fmt.Println("Signalling kill") must(process.Signal(garden.SignalKill)) break } }
func (provider *dbProvider) Workers() ([]Worker, error) { workerInfos, err := provider.db.Workers() if err != nil { return nil, err } tikTok := clock.NewClock() workers := make([]Worker, len(workerInfos)) for i, info := range workerInfos { workerLog := provider.logger.Session("worker-connection", lager.Data{ "addr": info.Addr, }) gardenConn := RetryableConnection{ Logger: workerLog, Connection: gconn.NewWithLogger("tcp", info.Addr, workerLog.Session("garden-connection")), Sleeper: tikTok, RetryPolicy: ExponentialRetryPolicy{ Timeout: 5 * time.Minute, }, } workers[i] = NewGardenWorker( gclient.New(gardenConn), tikTok, info.ActiveContainers, info.ResourceTypes, info.Platform, info.Tags, ) } return workers, nil }
func start(creator RunnerCreator, network, addr string, argv ...string) *RunningGarden { tmpDir := filepath.Join( os.TempDir(), fmt.Sprintf("test-garden-%d", ginkgo.GinkgoParallelNode()), ) if GraphRoot == "" { GraphRoot = filepath.Join(tmpDir, "graph") } graphPath := filepath.Join(GraphRoot, fmt.Sprintf("node-%d", ginkgo.GinkgoParallelNode())) r := &RunningGarden{ GraphRoot: GraphRoot, GraphPath: graphPath, tmpdir: tmpDir, logger: lagertest.NewTestLogger("garden-runner"), Client: client.New(connection.New(network, addr)), } c := cmd(tmpDir, graphPath, network, addr, GardenBin, BinPath, RootFSPath, argv...) r.process = ifrit.Invoke(creator.Create(c)) r.Pid = c.Process.Pid return r }
func ContainerCheck(gardenAddr string, processes []ps.Process) error { var errMsg string stdout, _, err := RunCommand(` $proc = Get-CimInstance Win32_Process -Filter "name = 'containerizer.exe'" $result = Invoke-CimMethod -InputObject $proc -MethodName GetOwner $result.User `) if err != nil { return err } if strings.HasPrefix(stdout, "SYSTEM") { errMsg = batchLogonMessage } else { errMsg = localLogonMessage } client := gclient.New(gconnection.New("tcp", gardenAddr)) container, err := client.Create(garden.ContainerSpec{}) if container != nil { defer client.Destroy(container.Handle()) } if err != nil { if err.Error() == logonFailure { return errors.New("Failed to create container\n" + errMsg) } else { return errors.New("Failed to create container\n" + err.Error()) } } return nil }
func NewGardenClient() *FakeGardenClient { connection := new(fakes.FakeConnection) return &FakeGardenClient{ Connection: connection, Client: client.New(connection), } }
func start(network, addr string, argv ...string) *RunningGarden { tmpDir := filepath.Join( os.TempDir(), fmt.Sprintf("test-garden-%d", ginkgo.GinkgoParallelNode()), ) Expect(os.MkdirAll(tmpDir, 0755)).To(Succeed()) if GraphRoot == "" { GraphRoot = filepath.Join(tmpDir, "graph") } graphPath := filepath.Join(GraphRoot, fmt.Sprintf("node-%d", ginkgo.GinkgoParallelNode())) stateDirPath := filepath.Join(tmpDir, "state") depotPath := filepath.Join(tmpDir, "containers") snapshotsPath := filepath.Join(tmpDir, "snapshots") if err := os.MkdirAll(stateDirPath, 0755); err != nil { Expect(err).ToNot(HaveOccurred()) } if err := os.MkdirAll(depotPath, 0755); err != nil { Expect(err).ToNot(HaveOccurred()) } if err := os.MkdirAll(snapshotsPath, 0755); err != nil { Expect(err).ToNot(HaveOccurred()) } MustMountTmpfs(graphPath) r := &RunningGarden{ GraphRoot: GraphRoot, GraphPath: graphPath, StateDirPath: stateDirPath, DepotPath: depotPath, SnapshotsPath: snapshotsPath, tmpdir: tmpDir, logger: lagertest.NewTestLogger("garden-runner"), Client: client.New(connection.New(network, addr)), } c := cmd(stateDirPath, depotPath, snapshotsPath, graphPath, network, addr, GardenBin, BinPath, RootFSPath, argv...) r.runner = ginkgomon.New(ginkgomon.Config{ Name: "garden-linux", Command: c, AnsiColorCode: "31m", StartCheck: "garden-linux.started", StartCheckTimeout: 30 * time.Second, }) r.process = ifrit.Invoke(r.runner) r.Pid = c.Process.Pid return r }
func (server *registrarSSHServer) heartbeatWorker(logger lager.Logger, worker atc.Worker, channel ssh.Channel) ifrit.Process { return ifrit.Background(tsa.NewHeartbeater( logger, server.heartbeatInterval, gclient.New(gconn.New("tcp", worker.Addr)), server.atcEndpoint, worker, channel, )) }
func New() *FakeClient { connection := fake_connection.New() return &FakeClient{ Connection: connection, Client: client.New(&FakeConnectionProvider{ Connection: connection, }), } }
func main() { gardenClient := client.New(connection.New("tcp", "127.0.0.1:7777")) _ = gardenClient.Destroy("foo") foo, err := gardenClient.Create(garden.ContainerSpec{Handle: "foo"}) failIf(err, "Create") err = foo.NetOut(garden.NetOutRule{ Protocol: garden.ProtocolICMP, Networks: []garden.IPRange{garden.IPRangeFromIP(net.ParseIP("8.8.8.8"))}, }) failIf(err, "NetOut") restartGarden() }
func (provider *dbProvider) newGardenWorker(tikTok clock.Clock, info db.WorkerInfo) Worker { workerLog := provider.logger.Session("worker-connection", lager.Data{ "addr": info.GardenAddr, }) gardenConn := NewRetryableConnection( workerLog, tikTok, provider.retryPolicy, NewGardenConnectionFactory( provider.db, provider.dialer, provider.logger.Session("garden-connection"), info.Name, info.GardenAddr, ), ) var bClient baggageclaim.Client if info.BaggageclaimURL != "" { bClient = bclient.New(info.BaggageclaimURL) } volumeFactory := NewVolumeFactory( provider.logger.Session("volume-factory"), provider.db, tikTok, ) return NewGardenWorker( gclient.New(gardenConn), bClient, volumeFactory, provider.db, provider, tikTok, info.ActiveContainers, info.ResourceTypes, info.Platform, info.Tags, info.Name, ) }
func (provider *dbProvider) newGardenWorker(tikTok clock.Clock, savedWorker db.SavedWorker) Worker { workerLog := provider.logger.Session("worker-connection", lager.Data{ "addr": savedWorker.GardenAddr, }) gardenConn := NewRetryableConnection( workerLog, tikTok, provider.retryPolicy, NewGardenConnectionFactory( provider.db, provider.dialer, provider.logger.Session("garden-connection"), savedWorker.Name, savedWorker.GardenAddr, ), ) var bClient baggageclaim.Client if savedWorker.BaggageclaimURL != "" { bClient = bclient.New(savedWorker.BaggageclaimURL) } volumeFactory := NewVolumeFactory( provider.db, tikTok, ) return NewGardenWorker( gclient.New(gardenConn), bClient, volumeFactory, provider.imageFetcher, provider.db, provider, tikTok, savedWorker.ActiveContainers, savedWorker.ResourceTypes, savedWorker.Platform, savedWorker.Tags, savedWorker.Name, ) }
func main() { gardenClient := client.New(connection.New("tcp", "127.0.0.1:7777")) _ = gardenClient.Destroy("foo") container, err := gardenClient.Create(garden.ContainerSpec{ Handle: "foo", Env: []string{"LANG=en_GB.iso885915"}, RootFSPath: "docker:///debian#8", }) failIf(err, "Create") var output bytes.Buffer process, err := container.Run(garden.ProcessSpec{ Path: "sh", Args: []string{"-c", "echo $LANG"}, }, garden.ProcessIO{Stdout: &output}) failIf(err, "Run") process.Wait() fmt.Println(output.String()) }
func Start(bin, iodaemonBin, nstarBin string, argv ...string) *RunningGarden { network := "unix" addr := fmt.Sprintf("/tmp/garden_%d.sock", GinkgoParallelNode()) tmpDir := filepath.Join( os.TempDir(), fmt.Sprintf("test-garden-%d", ginkgo.GinkgoParallelNode()), ) if GraphRoot == "" { GraphRoot = filepath.Join(tmpDir, "graph") } graphPath := filepath.Join(GraphRoot, fmt.Sprintf("node-%d", ginkgo.GinkgoParallelNode())) depotDir := filepath.Join(tmpDir, "containers") MustMountTmpfs(graphPath) r := &RunningGarden{ DepotDir: depotDir, GraphRoot: GraphRoot, GraphPath: graphPath, tmpdir: tmpDir, logger: lagertest.NewTestLogger("garden-runner"), Client: client.New(connection.New(network, addr)), } c := cmd(tmpDir, depotDir, graphPath, network, addr, bin, iodaemonBin, nstarBin, TarPath, RootFSPath, argv...) r.process = ifrit.Invoke(&ginkgomon.Runner{ Name: "guardian", Command: c, AnsiColorCode: "31m", StartCheck: "guardian.started", StartCheckTimeout: 30 * time.Second, }) r.Pid = c.Process.Pid return r }
fakeBackend.ContainersStub = func(garden.Properties) ([]garden.Container, error) { return (<-stubs)() } }) JustBeforeEach(func() { err := json.NewEncoder(sshStdin).Encode(workerPayload) Ω(err).ShouldNot(HaveOccurred()) }) It("forwards garden API calls through the tunnel", func() { registration := <-registered addr := registration.worker.Addr client := gclient.New(gconn.New("tcp", addr)) fakeBackend.CreateReturns(new(gfakes.FakeContainer), nil) _, err := client.Create(garden.ContainerSpec{}) Ω(err).ShouldNot(HaveOccurred()) Ω(fakeBackend.CreateCallCount()).Should(Equal(1)) }) It("continuously registers it with the ATC as long as it works", func() { a := time.Now() registration := <-registered Ω(registration.ttl).Should(Equal(2 * heartbeatInterval)) // shortcut for equality w/out checking addr
func GardenContainers(gardenAddr string, gardenNetwork string, raw bool, out io.Writer) error { client := client.New(connection.New(gardenNetwork, gardenAddr)) containers, err := client.Containers(nil) if err != nil { return err } workPool, err := workpool.NewWorkPool(32) if err != nil { return err } lock := &sync.Mutex{} wg := &sync.WaitGroup{} wg.Add(len(containers)) containerInfos := []ContainerInfo{} for _, container := range containers { container := container workPool.Submit(func() { defer wg.Done() info, err := container.Info() if err != nil { say.Println(1, say.Red("Failed to fetch container info: %s\n", container.Handle())) return } metrics, err := container.Metrics() if err != nil { say.Println(1, say.Red("Failed to fetch container metrics: %s\n", container.Handle())) return } lock.Lock() defer lock.Unlock() containerInfos = append(containerInfos, ContainerInfo{ container.Handle(), info, metrics, }) }) } wg.Wait() if raw { encoded, err := json.MarshalIndent(containerInfos, "", " ") if err != nil { return err } out.Write(encoded) return nil } if len(containerInfos) == 0 { say.Println(0, say.Red("No Containers")) } for _, containerInfo := range containerInfos { printContainer(out, containerInfo) } return nil }
func main() { flag.Parse() if !*dev && (*httpUsername == "" || (*httpHashedPassword == "" && *httpPassword == "")) { fatal(errors.New("must specify -httpUsername and -httpPassword or -httpHashedPassword or turn on dev mode")) } if _, err := os.Stat(*templatesDir); err != nil { fatal(errors.New("directory specified via -templates does not exist")) } if _, err := os.Stat(*publicDir); err != nil { fatal(errors.New("directory specified via -public does not exist")) } logger := lager.NewLogger("atc") logLevel := lager.INFO if *dev { logLevel = lager.DEBUG } sink := lager.NewReconfigurableSink(lager.NewWriterSink(os.Stdout, lager.DEBUG), logLevel) logger.RegisterSink(sink) var err error var dbConn Db.Conn for { dbConn, err = migration.Open(*sqlDriver, *sqlDataSource, migrations.Migrations) if err != nil { if strings.Contains(err.Error(), " dial ") { logger.Error("failed-to-open-db", err) time.Sleep(5 * time.Second) continue } fatal(err) } break } dbConn = Db.Explain(logger, dbConn, 500*time.Millisecond) listener := pq.NewListener(*sqlDataSource, time.Second, time.Minute, nil) bus := Db.NewNotificationsBus(listener) db := Db.NewSQL(logger.Session("db"), dbConn, bus) pipelineDBFactory := Db.NewPipelineDBFactory(logger.Session("db"), dbConn, bus, db) var configDB Db.ConfigDB configDB = Db.PlanConvertingConfigDB{db} var resourceTypesNG []atc.WorkerResourceType err = json.Unmarshal([]byte(*resourceTypes), &resourceTypesNG) if err != nil { logger.Fatal("invalid-resource-types", err) } var workerClient worker.Client if *gardenAddr != "" { workerClient = worker.NewGardenWorker( gclient.New(gconn.NewWithLogger( *gardenNetwork, *gardenAddr, logger.Session("garden-connection"), )), clock.NewClock(), -1, resourceTypesNG, "linux", []string{}, ) } else { workerClient = worker.NewPool(worker.NewDBWorkerProvider(db, logger)) } resourceTracker := resource.NewTracker(workerClient) gardenFactory := exec.NewGardenFactory(workerClient, resourceTracker, func() string { guid, err := uuid.NewV4() if err != nil { panic("not enough entropy to generate guid: " + err.Error()) } return guid.String() }) execEngine := engine.NewExecEngine(gardenFactory, engine.NewBuildDelegateFactory(db), db) engine := engine.NewDBEngine(engine.Engines{execEngine}, db, db) var webValidator auth.Validator if *httpUsername != "" && *httpHashedPassword != "" { webValidator = auth.BasicAuthHashedValidator{ Username: *httpUsername, HashedPassword: *httpHashedPassword, } } else if *httpUsername != "" && *httpPassword != "" { webValidator = auth.BasicAuthValidator{ Username: *httpUsername, Password: *httpPassword, } } else { webValidator = auth.NoopValidator{} } callbacksURL, err := url.Parse(*callbacksURLString) if err != nil { fatal(err) } drain := make(chan struct{}) apiHandler, err := api.NewHandler( logger, // logger lager.Logger, webValidator, // validator auth.Validator, pipelineDBFactory, // pipelineDBFactory db.PipelineDBFactory, configDB, // configDB db.ConfigDB, db, // buildsDB buildserver.BuildsDB, db, // workerDB workerserver.WorkerDB, db, // pipeDB pipes.PipeDB, db, // pipelinesDB db.PipelinesDB, config.ValidateConfig, // configValidator configserver.ConfigValidator, callbacksURL.String(), // peerURL string, buildserver.NewEventHandler, // eventHandlerFactory buildserver.EventHandlerFactory, drain, // drain <-chan struct{}, engine, // engine engine.Engine, workerClient, // workerClient worker.Client, sink, // sink *lager.ReconfigurableSink, *cliDownloadsDir, // cliDownloadsDir string, ) if err != nil { fatal(err) } radarSchedulerFactory := pipelines.NewRadarSchedulerFactory( resourceTracker, *checkInterval, db, engine, db, ) webHandler, err := web.NewHandler( logger, webValidator, radarSchedulerFactory, db, pipelineDBFactory, configDB, *templatesDir, *publicDir, engine, ) if err != nil { fatal(err) } webMux := http.NewServeMux() webMux.Handle("/api/v1/", apiHandler) webMux.Handle("/", webHandler) var httpHandler http.Handler httpHandler = webMux if !*publiclyViewable { httpHandler = auth.Handler{ Handler: httpHandler, Validator: webValidator, } } // copy Authorization header as ATC-Authorization cookie for websocket auth httpHandler = auth.CookieSetHandler{ Handler: httpHandler, } httpHandler = httpmetrics.Wrap(httpHandler) webListenAddr := fmt.Sprintf("%s:%d", *webListenAddress, *webListenPort) debugListenAddr := fmt.Sprintf("%s:%d", *debugListenAddress, *debugListenPort) syncer := pipelines.NewSyncer( logger.Session("syncer"), db, pipelineDBFactory, func(pipelineDB Db.PipelineDB) ifrit.Runner { return grouper.NewParallel(os.Interrupt, grouper.Members{ { pipelineDB.ScopedName("radar"), rdr.NewRunner( logger.Session(pipelineDB.ScopedName("radar")), *noop, db, radarSchedulerFactory.BuildRadar(pipelineDB), pipelineDB, 1*time.Minute, ), }, { pipelineDB.ScopedName("scheduler"), &sched.Runner{ Logger: logger.Session(pipelineDB.ScopedName("scheduler")), Locker: db, DB: pipelineDB, Scheduler: radarSchedulerFactory.BuildScheduler(pipelineDB), Noop: *noop, Interval: 10 * time.Second, }, }, }) }, ) buildTracker := builds.NewTracker( logger.Session("build-tracker"), db, engine, ) memberGrouper := []grouper.Member{ {"web", http_server.New(webListenAddr, httpHandler)}, {"debug", http_server.New(debugListenAddr, http.DefaultServeMux)}, {"drainer", ifrit.RunFunc(func(signals <-chan os.Signal, ready chan<- struct{}) error { close(ready) <-signals close(drain) return nil })}, {"pipelines", pipelines.SyncRunner{ Syncer: syncer, Interval: 10 * time.Second, Clock: clock.NewClock(), }}, {"builds", builds.TrackerRunner{ Tracker: buildTracker, Interval: 10 * time.Second, Clock: clock.NewClock(), }}, } group := grouper.NewParallel(os.Interrupt, memberGrouper) running := ifrit.Envoke(sigmon.New(group)) logger.Info("listening", lager.Data{ "web": webListenAddr, "debug": debugListenAddr, }) err = <-running.Wait() if err != nil { logger.Error("exited-with-failure", err) os.Exit(1) } }
apiServer = server.New( "unix", socketPath, serverContainerGraceTime, serverBackend, logger, ) err = apiServer.Start() Ω(err).ShouldNot(HaveOccurred()) isRunning = true Eventually(ErrorDialing("unix", socketPath)).ShouldNot(HaveOccurred()) apiClient = client.New(connection.New("unix", socketPath)) }) AfterEach(func() { if isRunning { apiServer.Stop() } if tmpdir != "" { os.RemoveAll(tmpdir) } }) Context("and the client sends a PingRequest", func() { Context("and the backend ping succeeds", func() { It("does not error", func() { Ω(apiClient.Ping()).ShouldNot(HaveOccurred())
func Initialize(logger lager.Logger, config Configuration, clock clock.Clock) (executor.Client, grouper.Members, error) { gardenClient := GardenClient.New(GardenConnection.New(config.GardenNetwork, config.GardenAddr)) err := waitForGarden(logger, gardenClient, clock) if err != nil { return nil, nil, err } containersFetcher := &executorContainers{ gardenClient: gardenClient, owner: config.ContainerOwnerName, } destroyContainers(gardenClient, containersFetcher, logger) workDir := setupWorkDir(logger, config.TempDir) transformer := initializeTransformer( logger, config.CachePath, workDir, config.MaxCacheSizeInBytes, uint(config.MaxConcurrentDownloads), maxConcurrentUploads, config.SkipCertVerify, config.ExportNetworkEnvVars, clock, ) hub := event.NewHub() gardenStore, err := gardenstore.NewGardenStore( gardenClient, config.ContainerOwnerName, config.ContainerMaxCpuShares, config.ContainerInodeLimit, config.HealthyMonitoringInterval, config.UnhealthyMonitoringInterval, transformer, clock, hub, config.HealthCheckWorkPoolSize, ) if err != nil { return nil, grouper.Members{}, err } allocationStore := allocationstore.NewAllocationStore(clock, hub) workPoolSettings := executor.WorkPoolSettings{ CreateWorkPoolSize: config.CreateWorkPoolSize, DeleteWorkPoolSize: config.DeleteWorkPoolSize, ReadWorkPoolSize: config.ReadWorkPoolSize, MetricsWorkPoolSize: config.MetricsWorkPoolSize, } depotClientProvider, err := depot.NewClientProvider( fetchCapacity(logger, gardenClient, config), allocationStore, gardenStore, hub, keyed_lock.NewLockManager(), workPoolSettings, ) if err != nil { return nil, grouper.Members{}, err } metricsLogger := logger.Session("metrics-reporter") containerMetricsLogger := logger.Session("container-metrics-reporter") return depotClientProvider.WithLogger(logger), grouper.Members{ {"metrics-reporter", &metrics.Reporter{ ExecutorSource: depotClientProvider.WithLogger(metricsLogger), Interval: metricsReportInterval, Logger: metricsLogger, }}, {"hub-closer", closeHub(hub)}, {"registry-pruner", allocationStore.RegistryPruner(logger, config.RegistryPruningInterval)}, {"container-metrics-reporter", containermetrics.NewStatsReporter( containerMetricsLogger, containerMetricsReportInterval, clock, depotClientProvider.WithLogger(containerMetricsLogger), )}, }, nil }
. "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "github.com/onsi/gomega/gbytes" ) func TestGardenAcceptance(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "Garden Acceptance Suite") } var gardenClient client.Client var hostIP = "10.244.16.6" var _ = BeforeSuite(func() { gardenClient = client.New(connection.New("tcp", hostIP+":7777")) }) var _ = BeforeEach(func() { destroyAllContainers(gardenClient) }) var _ = AfterEach(func() { destroyAllContainers(gardenClient) }) var lsProcessSpec = garden.ProcessSpec{User: "******", Path: "ls", Args: []string{"-l", "/"}} var silentProcessIO = garden.ProcessIO{Stdout: GinkgoWriter, Stderr: GinkgoWriter} func recordedProcessIO(buffer *gbytes.Buffer) garden.ProcessIO { return garden.ProcessIO{
func (r *Runner) NewClient() warden.Client { return client.New(&connection.Info{ Network: r.Network, Addr: r.Addr, }) }
func client(c *cli.Context) garden.Client { target := c.GlobalString("target") return gclient.New(gconn.New("tcp", target)) }
func (r *Runner) NewClient() client.Client { return client.New(connection.New(r.network, r.addr)) }
var _ = Describe("Concurrent container creation", func() { BeforeEach(func() { runtime.GOMAXPROCS(runtime.NumCPU()) }) Measure("multiple concurrent creates", func(b Benchmarker) { handles := []string{} b.Time("concurrent creations", func() { chans := []chan string{} for i := 0; i < creates; i++ { ch := make(chan string, 1) go func(c chan string, index int) { defer GinkgoRecover() client := gclient.New(connection.New("tcp", "localhost:7777")) b.Time("create-"+strconv.Itoa(index), func() { ctr, err := client.Create(garden.ContainerSpec{}) Expect(err).ToNot(HaveOccurred()) c <- ctr.Handle() }) }(ch, i) chans = append(chans, ch) } for _, ch := range chans { handle := <-ch if handle != "" { handles = append(handles, handle) }
func (maker ComponentMaker) GardenClient() garden.Client { return gardenclient.New(gardenconnection.New("tcp", maker.Addresses.GardenLinux)) }
info, err := container.Info() Expect(err).ToNot(HaveOccurred()) nestedGardenAddress := fmt.Sprintf("%s:7778", info.ContainerIP) Eventually(nestedServerOutput, "60s").Should(gbytes.Say("garden-linux.started")) return container, nestedGardenAddress } It("can start a nested garden-linux and run a container inside it", func() { container, nestedGardenAddress := startNestedGarden() defer func() { Expect(client.Destroy(container.Handle())).To(Succeed()) }() nestedClient := gclient.New(gconn.New("tcp", nestedGardenAddress)) nestedContainer, err := nestedClient.Create(garden.ContainerSpec{}) Expect(err).ToNot(HaveOccurred()) nestedOutput := gbytes.NewBuffer() _, err = nestedContainer.Run(garden.ProcessSpec{ User: "******", Path: "/bin/echo", Args: []string{ "I am nested!", }, }, garden.ProcessIO{Stdout: nestedOutput, Stderr: nestedOutput}) Expect(err).ToNot(HaveOccurred()) Eventually(nestedOutput, "60s").Should(gbytes.Say("I am nested!")) })
func main() { const ( tunnelPort = 10 tunnelID = 101 bridgeName = "ovs-bridge" tunnelPortName = "remote-tun" ) var ( containerIP string containerMAC string containerIPAddressMask = 24 containerMTU = 1400 cleanup bool containerOVSPort int remoteContainerIP string remoteContainerMAC string remoteHostIP string ) flag.BoolVar(&debug, "debug", false, "print executed commands for debug") flag.StringVar(&containerIP, "containerIP", "", "create a container with this IP address on the OVS network") flag.StringVar(&containerMAC, "containerMAC", "", "use this MAC address on the OVS network for the container") flag.StringVar(&remoteHostIP, "remoteHostIP", "", "create an OVS tunnel to a remote host") flag.StringVar(&remoteContainerIP, "remoteContainerIP", "", "the IP address of the garden container on the remote host") flag.StringVar(&remoteContainerMAC, "remoteContainerMAC", "", "the MAC address of the garden container on the remote host") flag.IntVar(&containerOVSPort, "containerOVSPort", 0, "port number to attach to container on OVS switch") flag.BoolVar(&cleanup, "cleanup", false, "delete all containers") flag.Parse() if err := os.MkdirAll("/var/run/netns", 0644); err != nil { panic(err) } gardenClient := client.New(connection.New("tcp", "localhost:7777")) if cleanup { cleanupContainers(gardenClient) destroyTunnel(bridgeName) return } if containerIP != "" { if containerOVSPort == 0 { panic("need to set OVS port number for container") } if containerMAC == "" { panic("need to set MAC address for container on OVS network") } err := createContainerAndSetupOVS(gardenClient, containerMAC, containerIP, containerIPAddressMask, containerMTU, bridgeName, containerOVSPort, tunnelID, tunnelPort) if err != nil { panic(err) } return } if remoteHostIP != "" { err := setupTunnel(bridgeName, tunnelPortName, remoteHostIP, tunnelPort) if err != nil { panic(err) } return } if remoteContainerIP != "" { if remoteContainerMAC == "" { panic("need to set MAC for remote container") } err := addFlow(remoteContainerIP, remoteContainerMAC, bridgeName, tunnelPort, tunnelID) if err != nil { panic(err) } return } listContainers(gardenClient) }