// New returns a new DB object. func New() *DB { db := &DB{l: newLogger()} cli := gentleman.New() cli.Use(retry.New(retrier.New(retrier.ExponentialBackoff(3, 100*time.Millisecond), nil))) cli.UseRequest(func(ctx *context.Context, h context.Handler) { u, err := url.Parse(db.url) if err != nil { h.Error(ctx, err) return } ctx.Request.URL.Scheme = u.Scheme ctx.Request.URL.Host = u.Host ctx.Request.URL.Path = db.dbPath() h.Next(ctx) }) cli.UseRequest(func(ctx *context.Context, h context.Handler) { ctx.Request.SetBasicAuth(db.username, db.password) h.Next(ctx) }) db.conn = cli return db }
func readPid(pidFilePath string) (int, error) { retrier := retrier.New(retrier.ConstantBackoff(20, 500*time.Millisecond), nil) var ( pid int = -1 err error ) retrier.Run(func() error { pid, err = parsePid(pidFilePath) return err }) return pid, err }
func (r *RunningGarden) Cleanup() { // unmount aufs since the docker graph driver leaves this around, // otherwise the following commands might fail retry := retrier.New(retrier.ConstantBackoff(200, 500*time.Millisecond), nil) err := retry.Run(func() error { if err := os.RemoveAll(path.Join(r.GraphPath, "aufs")); err == nil { return nil // if we can remove it, it's already unmounted } if err := syscall.Unmount(path.Join(r.GraphPath, "aufs"), MNT_DETACH); err != nil { r.logger.Error("failed-unmount-attempt", err) return err } return nil }) if err != nil { r.logger.Error("failed-to-unmount", err) } MustUnmountTmpfs(r.GraphPath) // In the kernel version 3.19.0-51-generic the code bellow results in // hanging the running VM. We are not deleting the node-X directories. They // are empty and the next test will re-use them. We will stick with that // workaround until we can test on a newer kernel that will hopefully not // have this bug. // // if err := os.RemoveAll(r.GraphPath); err != nil { // r.logger.Error("remove-graph", err) // } r.logger.Info("cleanup-tempdirs") if err := os.RemoveAll(r.Tmpdir); err != nil { r.logger.Error("cleanup-tempdirs-failed", err, lager.Data{"tmpdir": r.Tmpdir}) } else { r.logger.Info("tempdirs-removed") } }
func (cmd *GuardianCommand) wireContainerizer(log lager.Logger, depotPath, dadooPath, runcPath, nstarPath, tarPath, defaultRootFSPath, appArmorProfile string, properties gardener.PropertyManager) *rundmc.Containerizer { depot := depot.New(depotPath) commandRunner := linux_command_runner.New() chrootMkdir := bundlerules.ChrootMkdir{ Command: preparerootfs.Command, CommandRunner: commandRunner, } pidFileReader := &dadoo.PidFileReader{ Clock: clock.NewClock(), Timeout: 10 * time.Second, SleepInterval: time.Millisecond * 100, } runcrunner := runrunc.New( commandRunner, runrunc.NewLogRunner(commandRunner, runrunc.LogDir(os.TempDir()).GenerateLogFile), goci.RuncBinary(runcPath), dadooPath, runcPath, runrunc.NewExecPreparer(&goci.BndlLoader{}, runrunc.LookupFunc(runrunc.LookupUser), chrootMkdir, NonRootMaxCaps), dadoo.NewExecRunner( dadooPath, runcPath, cmd.wireUidGenerator(), pidFileReader, linux_command_runner.New()), ) mounts := []specs.Mount{ {Type: "sysfs", Source: "sysfs", Destination: "/sys", Options: []string{"nosuid", "noexec", "nodev", "ro"}}, {Type: "tmpfs", Source: "tmpfs", Destination: "/dev/shm"}, {Type: "devpts", Source: "devpts", Destination: "/dev/pts", Options: []string{"nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620"}}, {Type: "bind", Source: cmd.Bin.Init.Path(), Destination: "/tmp/garden-init", Options: []string{"bind"}}, } privilegedMounts := append(mounts, specs.Mount{Type: "proc", Source: "proc", Destination: "/proc", Options: []string{"nosuid", "noexec", "nodev"}}, ) unprivilegedMounts := append(mounts, specs.Mount{Type: "proc", Source: "proc", Destination: "/proc", Options: []string{"nosuid", "noexec", "nodev"}}, ) rwm := "rwm" character := "c" var majorMinor = func(i int64) *int64 { return &i } var worldReadWrite os.FileMode = 0666 fuseDevice := specs.LinuxDevice{ Path: "/dev/fuse", Type: "c", Major: 10, Minor: 229, FileMode: &worldReadWrite, } denyAll := specs.LinuxDeviceCgroup{Allow: false, Access: &rwm} allowedDevices := []specs.LinuxDeviceCgroup{ {Access: &rwm, Type: &character, Major: majorMinor(1), Minor: majorMinor(3), Allow: true}, {Access: &rwm, Type: &character, Major: majorMinor(5), Minor: majorMinor(0), Allow: true}, {Access: &rwm, Type: &character, Major: majorMinor(1), Minor: majorMinor(8), Allow: true}, {Access: &rwm, Type: &character, Major: majorMinor(1), Minor: majorMinor(9), Allow: true}, {Access: &rwm, Type: &character, Major: majorMinor(1), Minor: majorMinor(5), Allow: true}, {Access: &rwm, Type: &character, Major: majorMinor(1), Minor: majorMinor(7), Allow: true}, {Access: &rwm, Type: &character, Major: majorMinor(1), Minor: majorMinor(7), Allow: true}, {Access: &rwm, Type: &character, Major: majorMinor(fuseDevice.Major), Minor: majorMinor(fuseDevice.Minor), Allow: true}, } baseProcess := specs.Process{ Capabilities: UnprivilegedMaxCaps, Args: []string{"/tmp/garden-init"}, Cwd: "/", } baseBundle := goci.Bundle(). WithNamespaces(PrivilegedContainerNamespaces...). WithResources(&specs.LinuxResources{Devices: append([]specs.LinuxDeviceCgroup{denyAll}, allowedDevices...)}). WithRootFS(defaultRootFSPath). WithDevices(fuseDevice). WithProcess(baseProcess) unprivilegedBundle := baseBundle. WithNamespace(goci.UserNamespace). WithUIDMappings(idMappings...). WithGIDMappings(idMappings...). WithMounts(unprivilegedMounts...). WithMaskedPaths(defaultMaskedPaths()) unprivilegedBundle.Spec.Linux.Seccomp = seccomp if appArmorProfile != "" { unprivilegedBundle.Spec.Process.ApparmorProfile = appArmorProfile } privilegedBundle := baseBundle. WithMounts(privilegedMounts...). WithCapabilities(PrivilegedMaxCaps...) template := &rundmc.BundleTemplate{ Rules: []rundmc.BundlerRule{ bundlerules.Base{ PrivilegedBase: privilegedBundle, UnprivilegedBase: unprivilegedBundle, }, bundlerules.RootFS{ ContainerRootUID: idMappings.Map(0), ContainerRootGID: idMappings.Map(0), MkdirChown: chrootMkdir, }, bundlerules.Limits{}, bundlerules.BindMounts{}, bundlerules.Env{}, bundlerules.Hostname{}, }, } log.Info("base-bundles", lager.Data{ "privileged": privilegedBundle, "unprivileged": unprivilegedBundle, }) eventStore := rundmc.NewEventStore(properties) stateStore := rundmc.NewStateStore(properties) nstar := rundmc.NewNstarRunner(nstarPath, tarPath, linux_command_runner.New()) stopper := stopper.New(stopper.NewRuncStateCgroupPathResolver("/run/runc"), nil, retrier.New(retrier.ConstantBackoff(10, 1*time.Second), nil)) return rundmc.New(depot, template, runcrunner, &goci.BndlLoader{}, nstar, stopper, eventStore, stateStore) }
func (cmd *GuardianCommand) wireVolumeCreator(logger lager.Logger, graphRoot string, insecureRegistries, persistentImages []string) gardener.VolumeCreator { if graphRoot == "" { return gardener.NoopVolumeCreator{} } if cmd.Bin.ImagePlugin.Path() != "" { defaultRootFS, err := url.Parse(cmd.Containers.DefaultRootFSDir.Path()) if err != nil { logger.Fatal("failed-to-parse-default-rootfs", err) } return imageplugin.New(cmd.Bin.ImagePlugin.Path(), linux_command_runner.New(), defaultRootFS, idMappings) } logger = logger.Session("volume-creator", lager.Data{"graphRoot": graphRoot}) runner := &logging.Runner{CommandRunner: linux_command_runner.New(), Logger: logger} if err := os.MkdirAll(graphRoot, 0755); err != nil { logger.Fatal("failed-to-create-graph-directory", err) } dockerGraphDriver, err := graphdriver.New(graphRoot, nil) if err != nil { logger.Fatal("failed-to-construct-graph-driver", err) } backingStoresPath := filepath.Join(graphRoot, "backing_stores") if err := os.MkdirAll(backingStoresPath, 0660); err != nil { logger.Fatal("failed-to-mkdir-backing-stores", err) } quotaedGraphDriver := "aed_aufs.QuotaedDriver{ GraphDriver: dockerGraphDriver, Unmount: quotaed_aufs.Unmount, BackingStoreMgr: "aed_aufs.BackingStore{ RootPath: backingStoresPath, Logger: logger.Session("backing-store-mgr"), }, LoopMounter: "aed_aufs.Loop{ Retrier: retrier.New(retrier.ConstantBackoff(200, 500*time.Millisecond), nil), Logger: logger.Session("loop-mounter"), }, Retrier: retrier.New(retrier.ConstantBackoff(200, 500*time.Millisecond), nil), RootPath: graphRoot, Logger: logger.Session("quotaed-driver"), } dockerGraph, err := graph.NewGraph(graphRoot, quotaedGraphDriver) if err != nil { logger.Fatal("failed-to-construct-graph", err) } var cake layercake.Cake = &layercake.Docker{ Graph: dockerGraph, Driver: quotaedGraphDriver, } if cake.DriverName() == "aufs" { cake = &layercake.AufsCake{ Cake: cake, Runner: runner, GraphRoot: graphRoot, } } repoFetcher := repository_fetcher.Retryable{ RepositoryFetcher: &repository_fetcher.CompositeFetcher{ LocalFetcher: &repository_fetcher.Local{ Cake: cake, DefaultRootFSPath: cmd.Containers.DefaultRootFSDir.Path(), IDProvider: repository_fetcher.LayerIDProvider{}, }, RemoteFetcher: repository_fetcher.NewRemote( logger, cmd.Docker.Registry, cake, distclient.NewDialer(insecureRegistries), repository_fetcher.VerifyFunc(repository_fetcher.Verify), ), }, Logger: logger, } rootFSNamespacer := &rootfs_provider.UidNamespacer{ Translator: rootfs_provider.NewUidTranslator( idMappings, // uid idMappings, // gid ), } retainer := cleaner.NewRetainer() ovenCleaner := cleaner.NewOvenCleaner(retainer, cleaner.NewThreshold(int64(cmd.Graph.CleanupThresholdInMegabytes)*1024*1024), ) imageRetainer := &repository_fetcher.ImageRetainer{ GraphRetainer: retainer, DirectoryRootfsIDProvider: repository_fetcher.LayerIDProvider{}, DockerImageIDFetcher: repoFetcher, NamespaceCacheKey: rootFSNamespacer.CacheKey(), Logger: logger, } // spawn off in a go function to avoid blocking startup // worst case is if an image is immediately created and deleted faster than // we can retain it we'll garbage collect it when we shouldn't. This // is an OK trade-off for not having garden startup block on dockerhub. go imageRetainer.Retain(persistentImages) layerCreator := rootfs_provider.NewLayerCreator(cake, rootfs_provider.SimpleVolumeCreator{}, rootFSNamespacer) quotaManager := "a_manager.AUFSQuotaManager{ BaseSizer: quota_manager.NewAUFSBaseSizer(cake), DiffSizer: "a_manager.AUFSDiffSizer{ AUFSDiffPathFinder: quotaedGraphDriver, }, } return rootfs_provider.NewCakeOrdinator(cake, repoFetcher, layerCreator, rootfs_provider.NewMetricsAdapter(quotaManager.GetUsage, quotaedGraphDriver.GetMntPath), ovenCleaner) }
package devices import ( "fmt" "net" "sync" "time" "github.com/docker/libcontainer/netlink" "github.com/eapache/go-resiliency/retrier" ) // netlink is not thread-safe, all calls to netlink should be guarded by this mutex var netlinkMu *sync.Mutex = new(sync.Mutex) var retry = retrier.New(retrier.ExponentialBackoff(6, 10*time.Millisecond), nil) type Bridge struct{} // Create creates a bridge device and returns the interface. // If the device already exists, returns the existing interface. func (Bridge) Create(name string, ip net.IP, subnet *net.IPNet) (intf *net.Interface, err error) { netlinkMu.Lock() defer netlinkMu.Unlock() if intf, err = idempotentlyCreateBridge(name); err != nil { return nil, err } if err = netlink.NetworkLinkAddIp(intf, ip, subnet); err != nil && err.Error() != "file exists" { return nil, fmt.Errorf("devices: add IP to bridge: %v", err)
func main() { if reexec.Init() { return } var insecureRegistries vars.StringList flag.Var( &insecureRegistries, "insecureDockerRegistry", "Docker registry to allow connecting to even if not secure. (Can be specified multiple times to allow insecure connection to multiple repositories)", ) var persistentImages vars.StringList flag.Var( &persistentImages, "persistentImage", "Image which should never be garbage collected. (Can be specified multiple times)", ) var dnsServers vars.StringList flag.Var( &dnsServers, "dnsServer", "DNS server IP address to use instead of automatically determined servers. (Can be specified multiple times)", ) cf_debug_server.AddFlags(flag.CommandLine) cf_lager.AddFlags(flag.CommandLine) flag.Parse() runtime.GOMAXPROCS(runtime.NumCPU()) logger, reconfigurableSink := cf_lager.New("garden-linux") initializeDropsonde(logger) if *binPath == "" { missing("-bin") } if *stateDirPath == "" { missing("-stateDir") } if *depotPath == "" { missing("-depot") } if len(*tag) > 2 { println("-tag parameter must be less than 3 characters long") println() flag.Usage() return } _, dynamicRange, err := net.ParseCIDR(*networkPool) if err != nil { logger.Fatal("failed-to-parse-network-pool", err) } subnetPool, err := subnets.NewSubnets(dynamicRange) if err != nil { logger.Fatal("failed-to-create-subnet-pool", err) } portPoolState, err := port_pool.LoadState(path.Join(*stateDirPath, "port_pool.json")) if err != nil { logger.Error("failed-to-parse-pool-state", err) } // TODO: use /proc/sys/net/ipv4/ip_local_port_range by default (end + 1) portPool, err := port_pool.New(uint32(*portPoolStart), uint32(*portPoolSize), portPoolState) if err != nil { logger.Fatal("invalid pool range", err) } useKernelLogging := true switch *iptablesLogMethod { case "nflog": useKernelLogging = false case "kernel": /* noop */ default: println("-iptablesLogMethod value not recognized") println() flag.Usage() return } config := sysconfig.NewConfig(*tag, *allowHostAccess, dnsServers.List) runner := sysconfig.NewRunner(config, linux_command_runner.New()) if err := os.MkdirAll(*graphRoot, 0755); err != nil { logger.Fatal("failed-to-create-graph-directory", err) } dockerGraphDriver, err := selectGraphDriver(logger, *graphDriverName, *graphRoot) if err != nil { logger.Fatal("failed-to-construct-graph-driver", err) } backingStoresPath := filepath.Join(*graphRoot, "backing_stores") if err := os.MkdirAll(backingStoresPath, 0660); err != nil { logger.Fatal("failed-to-mkdir-backing-stores", err) } quotaedGraphDriver := "aed_aufs.QuotaedDriver{ GraphDriver: dockerGraphDriver, Unmount: quotaed_aufs.Unmount, BackingStoreMgr: "aed_aufs.BackingStore{ RootPath: backingStoresPath, Logger: logger.Session("backing-store-mgr"), }, LoopMounter: "aed_aufs.Loop{ Retrier: retrier.New(retrier.ConstantBackoff(200, 500*time.Millisecond), nil), Logger: logger.Session("loop-mounter"), }, Retrier: retrier.New(retrier.ConstantBackoff(200, 500*time.Millisecond), nil), RootPath: *graphRoot, Logger: logger.Session("quotaed-driver"), } metricsProvider := metrics.NewMetrics(logger, backingStoresPath, *depotPath) if dbgAddr := cf_debug_server.DebugAddress(flag.CommandLine); dbgAddr != "" { metrics.StartDebugServer(dbgAddr, reconfigurableSink, metricsProvider) } dockerGraph, err := graph.NewGraph(*graphRoot, quotaedGraphDriver) if err != nil { logger.Fatal("failed-to-construct-graph", err) } var cake layercake.Cake = &layercake.Docker{ Graph: dockerGraph, Driver: quotaedGraphDriver, } if cake.DriverName() == "aufs" { cake = &layercake.AufsCake{ Cake: cake, Runner: runner, GraphRoot: *graphRoot, } } repo := container_repository.New() retainer := cleaner.NewRetainer() repoFetcher := &repository_fetcher.Retryable{ RepositoryFetcher: &repository_fetcher.CompositeFetcher{ LocalFetcher: &repository_fetcher.Local{ Cake: cake, DefaultRootFSPath: *rootFSPath, IDProvider: repository_fetcher.LayerIDProvider{}, }, RemoteFetcher: repository_fetcher.NewRemote( logger, *dockerRegistry, cake, distclient.NewDialer(insecureRegistries.List), repository_fetcher.VerifyFunc(repository_fetcher.Verify), ), }, Logger: logger, } maxId := uint32(sysinfo.Min(sysinfo.MustGetMaxValidUID(), sysinfo.MustGetMaxValidGID())) mappingList := rootfs_provider.MappingList{ { ContainerID: 0, HostID: maxId, Size: 1, }, { ContainerID: 1, HostID: 1, Size: maxId - 1, }, } rootFSNamespacer := &rootfs_provider.UidNamespacer{ Logger: logger, Translator: rootfs_provider.NewUidTranslator( mappingList, // uid mappingList, // gid ), } cleaner := cleaner.NewOvenCleaner( retainer, cleaner.NewThreshold(int64(*graphCleanupThreshold)*1024*1024), ) layerCreator := rootfs_provider.NewLayerCreator(cake, rootfs_provider.SimpleVolumeCreator{}, rootFSNamespacer) cakeOrdinator := rootfs_provider.NewCakeOrdinator(cake, repoFetcher, layerCreator, nil, cleaner) imageRetainer := &repository_fetcher.ImageRetainer{ GraphRetainer: retainer, DirectoryRootfsIDProvider: repository_fetcher.LayerIDProvider{}, DockerImageIDFetcher: repoFetcher, NamespaceCacheKey: rootFSNamespacer.CacheKey(), Logger: logger, } // spawn off in a go function to avoid blocking startup // worst case is if an image is immediately created and deleted faster than // we can retain it we'll garbage collect it when we shouldn't. This // is an OK trade-off for not having garden startup block on dockerhub. go imageRetainer.Retain(persistentImages.List) rootfsCleaner := &linux_backend.RootFSCleaner{ FilePaths: []string{ "/tmp", "/proc", "/sys", "/dev", "/etc", "/etc/config", "/etc/hostname", "/etc/hosts", "/etc/resolv.conf", }, } if *externalIP == "" { ip, err := localip.LocalIP() if err != nil { panic("couldn't determine local IP to use for -externalIP parameter. You can use the -externalIP flag to pass an external IP") } externalIP = &ip } parsedExternalIP := net.ParseIP(*externalIP) if parsedExternalIP == nil { panic(fmt.Sprintf("Value of -externalIP %s could not be converted to an IP", *externalIP)) } var quotaManager linux_container.QuotaManager = "a_manager.AUFSQuotaManager{ BaseSizer: quota_manager.NewAUFSBaseSizer(cake), DiffSizer: "a_manager.AUFSDiffSizer{quotaedGraphDriver}, } ipTablesMgr := createIPTablesManager(config, runner, logger) injector := &provider{ useKernelLogging: useKernelLogging, chainPrefix: config.IPTables.Filter.InstancePrefix, runner: runner, log: logger, portPool: portPool, ipTablesMgr: ipTablesMgr, sysconfig: config, quotaManager: quotaManager, } currentContainerVersion, err := semver.Make(CurrentContainerVersion) if err != nil { logger.Fatal("failed-to-parse-container-version", err) } pool := resource_pool.New( logger, *binPath, *depotPath, config, cakeOrdinator, rootfsCleaner, mappingList, parsedExternalIP, *mtu, subnetPool, bridgemgr.New("w"+config.Tag+"b-", &devices.Bridge{}, &devices.Link{}), ipTablesMgr, injector, iptables.NewGlobalChain(config.IPTables.Filter.DefaultChain, runner, logger.Session("global-chain")), portPool, strings.Split(*denyNetworks, ","), strings.Split(*allowNetworks, ","), runner, quotaManager, currentContainerVersion, system.MkdirChowner{}, ) systemInfo := sysinfo.NewProvider(*depotPath) backend := linux_backend.New(logger, pool, repo, injector, systemInfo, layercake.GraphPath(*graphRoot), *snapshotsPath, int(*maxContainers)) err = backend.Setup() if err != nil { logger.Fatal("failed-to-set-up-backend", err) } graceTime := *containerGraceTime gardenServer := server.New(*listenNetwork, *listenAddr, graceTime, backend, logger) err = gardenServer.Start() if err != nil { logger.Fatal("failed-to-start-server", err) } clock := clock.NewClock() metronNotifier := metrics.NewPeriodicMetronNotifier(logger, metricsProvider, *metricsEmissionInterval, clock) metronNotifier.Start() signals := make(chan os.Signal, 1) go func() { <-signals portPoolState = portPool.RefreshState() port_pool.SaveState(path.Join(*stateDirPath, "port_pool.json"), portPoolState) gardenServer.Stop() metronNotifier.Stop() os.Exit(0) }() signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP) logger.Info("started", lager.Data{ "network": *listenNetwork, "addr": *listenAddr, }) select {} }
Expect(err).NotTo(HaveOccurred()) Expect(syscall.Mount("tmpfs", root, "tmpfs", 0, "")).To(Succeed()) driver, err = graphdriver.GetDriver("aufs", root, nil) Expect(err).NotTo(HaveOccurred()) driver = "aedaufs.QuotaedDriver{ GraphDriver: driver, Unmount: quotaedaufs.Unmount, BackingStoreMgr: "aedaufs.BackingStore{ RootPath: backingStoreRoot, Logger: lagertest.NewTestLogger("test"), }, LoopMounter: "aedaufs.Loop{ Retrier: retrier.New(retrier.ConstantBackoff(200, 500*time.Millisecond), nil), Logger: lagertest.NewTestLogger("test"), }, Retrier: retrier.New(retrier.ConstantBackoff(200, 500*time.Millisecond), nil), RootPath: root, Logger: lagertest.NewTestLogger("test"), } graph, err := graph.NewGraph(root, driver) Expect(err).NotTo(HaveOccurred()) cake = &layercake.Docker{ Graph: graph, Driver: driver, } })
Eventually(nc).Should(gbytes.Say("hallo")) } return err }).ShouldNot(HaveOccurred()) }) Describe("domain name resolution", func() { itCanResolve := func(domainName string) { defer func() { err := gardenClient.Destroy(container.Handle()) Expect(err).NotTo(HaveOccurred()) }() output := gbytes.NewBuffer() err := retrier.New(retrier.ConstantBackoff(30, 2*time.Second), nil).Run(func() error { proc, err := container.Run(garden.ProcessSpec{ // We are using ping here rather than nslookup as we saw some // flakey behaviour with nslookup on our local concourse machines. // We're testing on the output of ping, which reports "bad address" // if it is unable to resolve a domain. Path: "ping", Args: []string{"-c", "1", domainName}, User: "******", }, garden.ProcessIO{Stdout: output, Stderr: output}) Expect(err).NotTo(HaveOccurred()) _, err = proc.Wait() return err })