func main() { if reexec.Init() { return } var insecureRegistries vars.StringList flag.Var( &insecureRegistries, "insecureDockerRegistry", "Docker registry to allow connecting to even if not secure. (Can be specified multiple times to allow insecure connection to multiple repositories)", ) var persistentImages vars.StringList flag.Var( &persistentImages, "persistentImage", "Image which should never be garbage collected. (Can be specified multiple times)", ) var dnsServers vars.StringList flag.Var( &dnsServers, "dnsServer", "DNS server IP address to use instead of automatically determined servers. (Can be specified multiple times)", ) cf_debug_server.AddFlags(flag.CommandLine) cf_lager.AddFlags(flag.CommandLine) flag.Parse() runtime.GOMAXPROCS(runtime.NumCPU()) logger, reconfigurableSink := cf_lager.New("garden-linux") initializeDropsonde(logger) if *binPath == "" { missing("-bin") } if *stateDirPath == "" { missing("-stateDir") } if *depotPath == "" { missing("-depot") } if len(*tag) > 2 { println("-tag parameter must be less than 3 characters long") println() flag.Usage() return } _, dynamicRange, err := net.ParseCIDR(*networkPool) if err != nil { logger.Fatal("failed-to-parse-network-pool", err) } subnetPool, err := subnets.NewSubnets(dynamicRange) if err != nil { logger.Fatal("failed-to-create-subnet-pool", err) } portPoolState, err := port_pool.LoadState(path.Join(*stateDirPath, "port_pool.json")) if err != nil { logger.Error("failed-to-parse-pool-state", err) } // TODO: use /proc/sys/net/ipv4/ip_local_port_range by default (end + 1) portPool, err := port_pool.New(uint32(*portPoolStart), uint32(*portPoolSize), portPoolState) if err != nil { logger.Fatal("invalid pool range", err) } useKernelLogging := true switch *iptablesLogMethod { case "nflog": useKernelLogging = false case "kernel": /* noop */ default: println("-iptablesLogMethod value not recognized") println() flag.Usage() return } config := sysconfig.NewConfig(*tag, *allowHostAccess, dnsServers.List) runner := sysconfig.NewRunner(config, linux_command_runner.New()) if err := os.MkdirAll(*graphRoot, 0755); err != nil { logger.Fatal("failed-to-create-graph-directory", err) } dockerGraphDriver, err := selectGraphDriver(logger, *graphDriverName, *graphRoot) if err != nil { logger.Fatal("failed-to-construct-graph-driver", err) } backingStoresPath := filepath.Join(*graphRoot, "backing_stores") if err := os.MkdirAll(backingStoresPath, 0660); err != nil { logger.Fatal("failed-to-mkdir-backing-stores", err) } quotaedGraphDriver := "aed_aufs.QuotaedDriver{ GraphDriver: dockerGraphDriver, Unmount: quotaed_aufs.Unmount, BackingStoreMgr: "aed_aufs.BackingStore{ RootPath: backingStoresPath, Logger: logger.Session("backing-store-mgr"), }, LoopMounter: "aed_aufs.Loop{ Retrier: retrier.New(retrier.ConstantBackoff(200, 500*time.Millisecond), nil), Logger: logger.Session("loop-mounter"), }, Retrier: retrier.New(retrier.ConstantBackoff(200, 500*time.Millisecond), nil), RootPath: *graphRoot, Logger: logger.Session("quotaed-driver"), } metricsProvider := metrics.NewMetrics(logger, backingStoresPath, *depotPath) if dbgAddr := cf_debug_server.DebugAddress(flag.CommandLine); dbgAddr != "" { metrics.StartDebugServer(dbgAddr, reconfigurableSink, metricsProvider) } dockerGraph, err := graph.NewGraph(*graphRoot, quotaedGraphDriver) if err != nil { logger.Fatal("failed-to-construct-graph", err) } var cake layercake.Cake = &layercake.Docker{ Graph: dockerGraph, Driver: quotaedGraphDriver, } if cake.DriverName() == "aufs" { cake = &layercake.AufsCake{ Cake: cake, Runner: runner, GraphRoot: *graphRoot, } } repo := container_repository.New() retainer := cleaner.NewRetainer() repoFetcher := &repository_fetcher.Retryable{ RepositoryFetcher: &repository_fetcher.CompositeFetcher{ LocalFetcher: &repository_fetcher.Local{ Cake: cake, DefaultRootFSPath: *rootFSPath, IDProvider: repository_fetcher.LayerIDProvider{}, }, RemoteFetcher: repository_fetcher.NewRemote( logger, *dockerRegistry, cake, distclient.NewDialer(insecureRegistries.List), repository_fetcher.VerifyFunc(repository_fetcher.Verify), ), }, Logger: logger, } maxId := uint32(sysinfo.Min(sysinfo.MustGetMaxValidUID(), sysinfo.MustGetMaxValidGID())) mappingList := rootfs_provider.MappingList{ { ContainerID: 0, HostID: maxId, Size: 1, }, { ContainerID: 1, HostID: 1, Size: maxId - 1, }, } rootFSNamespacer := &rootfs_provider.UidNamespacer{ Logger: logger, Translator: rootfs_provider.NewUidTranslator( mappingList, // uid mappingList, // gid ), } cleaner := cleaner.NewOvenCleaner( retainer, cleaner.NewThreshold(int64(*graphCleanupThreshold)*1024*1024), ) layerCreator := rootfs_provider.NewLayerCreator(cake, rootfs_provider.SimpleVolumeCreator{}, rootFSNamespacer) cakeOrdinator := rootfs_provider.NewCakeOrdinator(cake, repoFetcher, layerCreator, nil, cleaner) imageRetainer := &repository_fetcher.ImageRetainer{ GraphRetainer: retainer, DirectoryRootfsIDProvider: repository_fetcher.LayerIDProvider{}, DockerImageIDFetcher: repoFetcher, NamespaceCacheKey: rootFSNamespacer.CacheKey(), Logger: logger, } // spawn off in a go function to avoid blocking startup // worst case is if an image is immediately created and deleted faster than // we can retain it we'll garbage collect it when we shouldn't. This // is an OK trade-off for not having garden startup block on dockerhub. go imageRetainer.Retain(persistentImages.List) rootfsCleaner := &linux_backend.RootFSCleaner{ FilePaths: []string{ "/tmp", "/proc", "/sys", "/dev", "/etc", "/etc/config", "/etc/hostname", "/etc/hosts", "/etc/resolv.conf", }, } if *externalIP == "" { ip, err := localip.LocalIP() if err != nil { panic("couldn't determine local IP to use for -externalIP parameter. You can use the -externalIP flag to pass an external IP") } externalIP = &ip } parsedExternalIP := net.ParseIP(*externalIP) if parsedExternalIP == nil { panic(fmt.Sprintf("Value of -externalIP %s could not be converted to an IP", *externalIP)) } var quotaManager linux_container.QuotaManager = "a_manager.AUFSQuotaManager{ BaseSizer: quota_manager.NewAUFSBaseSizer(cake), DiffSizer: "a_manager.AUFSDiffSizer{quotaedGraphDriver}, } ipTablesMgr := createIPTablesManager(config, runner, logger) injector := &provider{ useKernelLogging: useKernelLogging, chainPrefix: config.IPTables.Filter.InstancePrefix, runner: runner, log: logger, portPool: portPool, ipTablesMgr: ipTablesMgr, sysconfig: config, quotaManager: quotaManager, } currentContainerVersion, err := semver.Make(CurrentContainerVersion) if err != nil { logger.Fatal("failed-to-parse-container-version", err) } pool := resource_pool.New( logger, *binPath, *depotPath, config, cakeOrdinator, rootfsCleaner, mappingList, parsedExternalIP, *mtu, subnetPool, bridgemgr.New("w"+config.Tag+"b-", &devices.Bridge{}, &devices.Link{}), ipTablesMgr, injector, iptables.NewGlobalChain(config.IPTables.Filter.DefaultChain, runner, logger.Session("global-chain")), portPool, strings.Split(*denyNetworks, ","), strings.Split(*allowNetworks, ","), runner, quotaManager, currentContainerVersion, system.MkdirChowner{}, ) systemInfo := sysinfo.NewProvider(*depotPath) backend := linux_backend.New(logger, pool, repo, injector, systemInfo, layercake.GraphPath(*graphRoot), *snapshotsPath, int(*maxContainers)) err = backend.Setup() if err != nil { logger.Fatal("failed-to-set-up-backend", err) } graceTime := *containerGraceTime gardenServer := server.New(*listenNetwork, *listenAddr, graceTime, backend, logger) err = gardenServer.Start() if err != nil { logger.Fatal("failed-to-start-server", err) } clock := clock.NewClock() metronNotifier := metrics.NewPeriodicMetronNotifier(logger, metricsProvider, *metricsEmissionInterval, clock) metronNotifier.Start() signals := make(chan os.Signal, 1) go func() { <-signals portPoolState = portPool.RefreshState() port_pool.SaveState(path.Join(*stateDirPath, "port_pool.json"), portPoolState) gardenServer.Stop() metronNotifier.Stop() os.Exit(0) }() signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP) logger.Info("started", lager.Data{ "network": *listenNetwork, "addr": *listenAddr, }) select {} }
Expect(ioutil.WriteFile( filepath.Join(backingStorePath, "bs-1"), []byte("test"), 0660, )).To(Succeed()) Expect(ioutil.WriteFile( filepath.Join(backingStorePath, "bs-2"), []byte("test"), 0660, )).To(Succeed()) depotPath, err = ioutil.TempDir("", "depotDirs") Expect(err).NotTo(HaveOccurred()) Expect(os.Mkdir(filepath.Join(depotPath, "depot-1"), 0660)).To(Succeed()) Expect(os.Mkdir(filepath.Join(depotPath, "depot-2"), 0660)).To(Succeed()) Expect(os.Mkdir(filepath.Join(depotPath, "depot-3"), 0660)).To(Succeed()) Expect(err).ToNot(HaveOccurred()) logger := lagertest.NewTestLogger("test") m = metrics.NewMetrics(logger, backingStorePath, depotPath) }) AfterEach(func() { Expect(os.RemoveAll(depotPath)).To(Succeed()) Expect(os.RemoveAll(backingStorePath)).To(Succeed()) }) It("should report the number of loop devices, backing store files and depotDirs", func() { Expect(m.NumCPU()).To(Equal(runtime.NumCPU())) Expect(m.NumGoroutine()).To(BeNumerically("~", runtime.NumGoroutine(), 2)) Expect(m.LoopDevices()).NotTo(BeNil()) Expect(m.BackingStores()).To(Equal(2)) Expect(m.DepotDirs()).To(Equal(3)) }) })