示例#1
0
func wireContainerizer(log lager.Logger, depotPath, iodaemonPath, nstarPath, tarPath, defaultRootFSPath string) *rundmc.Containerizer {
	depot := depot.New(depotPath)

	startChecker := rundmc.StartChecker{Expect: "Pid 1 Running", Timeout: 3 * time.Second}
	stateChecker := rundmc.StateChecker{StateFileDir: OciStateDir}

	commandRunner := linux_command_runner.New()

	runcrunner := runrunc.New(
		process_tracker.New(path.Join(os.TempDir(), fmt.Sprintf("garden-%s", *tag), "processes"), iodaemonPath, commandRunner),
		commandRunner,
		wireUidGenerator(),
		goci.RuncBinary("runc"),
		&goci.BndlLoader{},
		runrunc.LookupFunc(runrunc.LookupUser),
	)

	baseBundle := goci.Bundle().
		WithNamespaces(PrivilegedContainerNamespaces...).
		WithResources(&specs.Resources{}).
		WithMounts(
			goci.Mount{Name: "proc", Type: "proc", Source: "proc", Destination: "/proc"},
			goci.Mount{Name: "tmp", Type: "tmpfs", Source: "tmpfs", Destination: "/tmp"},
		).WithRootFS(defaultRootFSPath).
		WithProcess(goci.Process("/bin/sh", "-c", `echo "Pid 1 Running"; read x`)).
		WithDevices(specs.Device{Path: "/dev/null", Type: 'c', Major: 1, Minor: 3, UID: 0, GID: 0, Permissions: "rwm", FileMode: 0666})

	nstar := rundmc.NewNstarRunner(nstarPath, tarPath, linux_command_runner.New())
	return rundmc.New(depot, &rundmc.BundleTemplate{Bndl: baseBundle}, runcrunner, startChecker, stateChecker, nstar)
}
示例#2
0
func (cmd *GuardianCommand) wireNetworker(log lager.Logger, propManager kawasaki.ConfigStore, portPool *ports.PortPool) (gardener.Networker, gardener.Starter, error) {
	externalIP, err := defaultExternalIP(cmd.Network.ExternalIP)
	if err != nil {
		return nil, nil, err
	}

	dnsServers := make([]net.IP, len(cmd.Network.DNSServers))
	for i, ip := range cmd.Network.DNSServers {
		dnsServers[i] = ip.IP()
	}

	if cmd.Network.Plugin.Path() != "" {
		resolvConfigurer := &kawasaki.ResolvConfigurer{
			HostsFileCompiler:  &dns.HostsFileCompiler{},
			ResolvFileCompiler: &dns.ResolvFileCompiler{},
			FileWriter:         &dns.RootfsWriter{},
			IDMapReader:        &kawasaki.RootIdMapReader{},
		}
		externalNetworker := netplugin.New(
			linux_command_runner.New(),
			propManager,
			externalIP,
			dnsServers,
			resolvConfigurer,
			cmd.Network.Plugin.Path(),
			cmd.Network.PluginExtraArgs,
		)
		return externalNetworker, externalNetworker, nil
	}

	var denyNetworksList []string
	for _, network := range cmd.Network.DenyNetworks {
		denyNetworksList = append(denyNetworksList, network.String())
	}

	interfacePrefix := fmt.Sprintf("w%s", cmd.Server.Tag)
	chainPrefix := fmt.Sprintf("w-%s-", cmd.Server.Tag)
	idGenerator := kawasaki.NewSequentialIDGenerator(time.Now().UnixNano())
	iptRunner := &logging.Runner{CommandRunner: linux_command_runner.New(), Logger: log.Session("iptables-runner")}
	locksmith := &locksmithpkg.FileSystem{}
	ipTables := iptables.New(cmd.Bin.IPTables.Path(), cmd.Bin.IPTablesRestore.Path(), iptRunner, locksmith, chainPrefix)
	ipTablesStarter := iptables.NewStarter(ipTables, cmd.Network.AllowHostAccess, interfacePrefix, denyNetworksList, cmd.Containers.DestroyContainersOnStartup)
	ruleTranslator := iptables.NewRuleTranslator()

	networker := kawasaki.New(
		kawasaki.SpecParserFunc(kawasaki.ParseSpec),
		subnets.NewPool(cmd.Network.Pool.CIDR()),
		kawasaki.NewConfigCreator(idGenerator, interfacePrefix, chainPrefix, externalIP, dnsServers, cmd.Network.Mtu),
		propManager,
		factory.NewDefaultConfigurer(ipTables),
		portPool,
		iptables.NewPortForwarder(ipTables),
		iptables.NewFirewallOpener(ruleTranslator, ipTables),
	)

	return networker, ipTablesStarter, nil
}
示例#3
0
func wireStarter(logger lager.Logger, iptablesMgr *iptables.Manager) gardener.Starter {
	runner := &logging.Runner{CommandRunner: linux_command_runner.New(), Logger: logger.Session("runner")}

	return &StartAll{starters: []gardener.Starter{
		rundmc.NewStarter(logger, mustOpen("/proc/cgroups"), path.Join(os.TempDir(), fmt.Sprintf("cgroups-%s", *tag)), runner),
		iptablesMgr,
	}}
}
示例#4
0
func wireNetworker(
	log lager.Logger,
	tag string,
	networkPoolCIDR *net.IPNet,
	externalIP net.IP,
	iptablesMgr kawasaki.IPTablesConfigurer,
	interfacePrefix string,
	chainPrefix string,
	propManager *properties.Manager,
	networkModulePath string) gardener.Networker {
	runner := &logging.Runner{CommandRunner: linux_command_runner.New(), Logger: log.Session("network-runner")}

	hostConfigurer := &configure.Host{
		Veth:   &devices.VethCreator{},
		Link:   &devices.Link{Name: "guardian"},
		Bridge: &devices.Bridge{},
		Logger: log.Session("network-host-configurer"),
	}

	containerCfgApplier := &configure.Container{
		Logger: log.Session("network-container-configurer"),
		Link:   &devices.Link{Name: "guardian"},
	}

	idGenerator := kawasaki.NewSequentialIDGenerator(time.Now().UnixNano())
	portPool, err := ports.NewPool(uint32(*portPoolStart), uint32(*portPoolSize), ports.State{})
	if err != nil {
		log.Fatal("invalid pool range", err)
	}

	switch networkModulePath {
	case "":
		return kawasaki.New(
			kawasaki.NewManager(runner, "/var/run/netns"),
			kawasaki.SpecParserFunc(kawasaki.ParseSpec),
			subnets.NewPool(networkPoolCIDR),
			kawasaki.NewConfigCreator(idGenerator, interfacePrefix, chainPrefix, externalIP),
			kawasaki.NewConfigurer(
				hostConfigurer,
				containerCfgApplier,
				iptablesMgr,
				&netns.Execer{},
			),
			propManager,
			iptables.NewPortForwarder(runner),
			portPool,
		)
	default:
		if _, err := os.Stat(networkModulePath); err != nil {
			log.Fatal("failed-to-stat-network-module", err)
			return nil
		}
		return gardener.ForeignNetworkAdaptor{
			ForeignNetworker: genclient.New(networkModulePath),
		}
	}
}
示例#5
0
文件: main.go 项目: glyn/pango
func wireContainerizer(depotPath, iodaemonPath, defaultRootFSPath string) *rundmc.Containerizer {
	depot := depot.New(depotPath)

	startCheck := rundmc.StartChecker{Expect: "Pid 1 Running", Timeout: 3 * time.Second}

	runcrunner := runrunc.New(
		process_tracker.New(path.Join(os.TempDir(), fmt.Sprintf("garden-%s", *tag), "processes"), iodaemonPath, linux_command_runner.New()),
		linux_command_runner.New(),
		wireUidGenerator(),
		goci.RuncBinary("runc"),
	)

	baseBundle := goci.Bundle().
		WithNamespaces(PrivilegedContainerNamespaces...).
		WithResources(&specs.Resources{}).
		WithMounts(goci.Mount{Name: "proc", Type: "proc", Source: "proc", Destination: "/proc"}).
		WithRootFS(defaultRootFSPath).
		WithProcess(goci.Process("/bin/sh", "-c", `echo "Pid 1 Running"; read x`))

	return rundmc.New(depot, &rundmc.BundleTemplate{baseBundle}, runcrunner, startCheck)
}
示例#6
0
func main() {
	cf_lager.AddFlags(flag.CommandLine)
	flag.Parse()
	logger, _ := cf_lager.New("hook")

	oldWd, err := os.Getwd()
	if err != nil {
		panic(err)
	}
	cwd := path.Dir(os.Args[0])
	os.Chdir(cwd)
	defer os.Chdir(oldWd)

	config, err := process.EnvFromFile("../etc/config")
	if err != nil {
		panic(fmt.Sprintf("error reading config file in hook: %s", err))
	}
	runner := &logging.Runner{linux_command_runner.New(), logger}
	configurer := network.NewConfigurer(logger.Session("linux_backend: hook.CHILD_AFTER_PIVOT"))
	linux_backend.RegisterHooks(hook.DefaultHookSet, runner, config, configurer)

	hook.Main(os.Args[1:])
}
示例#7
0
func wireIptables(logger lager.Logger, tag string, allowHostAccess bool, interfacePrefix, chainPrefix string) *iptables.Manager {
	runner := &logging.Runner{CommandRunner: linux_command_runner.New(), Logger: logger.Session("iptables-runner")}

	filterConfig := iptables.FilterConfig{
		AllowHostAccess: allowHostAccess,
		InputChain:      fmt.Sprintf("w-%s-input", tag),
		ForwardChain:    fmt.Sprintf("w-%s-forward", tag),
		DefaultChain:    fmt.Sprintf("w-%s-default", tag),
	}

	natConfig := iptables.NATConfig{
		PreroutingChain:  fmt.Sprintf("w-%s-prerouting", tag),
		PostroutingChain: fmt.Sprintf("w-%s-postrouting", tag),
	}

	return iptables.NewManager(
		filterConfig,
		natConfig,
		interfacePrefix,
		chainPrefix,
		runner,
		logger,
	)
}
示例#8
0
func (cmd *GuardianCommand) wireVolumeCreator(logger lager.Logger, graphRoot string, insecureRegistries, persistentImages []string) gardener.VolumeCreator {
	if graphRoot == "" {
		return gardener.NoopVolumeCreator{}
	}

	if cmd.Bin.ImagePlugin.Path() != "" {
		defaultRootFS, err := url.Parse(cmd.Containers.DefaultRootFSDir.Path())
		if err != nil {
			logger.Fatal("failed-to-parse-default-rootfs", err)
		}
		return imageplugin.New(cmd.Bin.ImagePlugin.Path(), linux_command_runner.New(), defaultRootFS, idMappings)
	}

	logger = logger.Session("volume-creator", lager.Data{"graphRoot": graphRoot})
	runner := &logging.Runner{CommandRunner: linux_command_runner.New(), Logger: logger}

	if err := os.MkdirAll(graphRoot, 0755); err != nil {
		logger.Fatal("failed-to-create-graph-directory", err)
	}

	dockerGraphDriver, err := graphdriver.New(graphRoot, nil)
	if err != nil {
		logger.Fatal("failed-to-construct-graph-driver", err)
	}

	backingStoresPath := filepath.Join(graphRoot, "backing_stores")
	if err := os.MkdirAll(backingStoresPath, 0660); err != nil {
		logger.Fatal("failed-to-mkdir-backing-stores", err)
	}

	quotaedGraphDriver := &quotaed_aufs.QuotaedDriver{
		GraphDriver: dockerGraphDriver,
		Unmount:     quotaed_aufs.Unmount,
		BackingStoreMgr: &quotaed_aufs.BackingStore{
			RootPath: backingStoresPath,
			Logger:   logger.Session("backing-store-mgr"),
		},
		LoopMounter: &quotaed_aufs.Loop{
			Retrier: retrier.New(retrier.ConstantBackoff(200, 500*time.Millisecond), nil),
			Logger:  logger.Session("loop-mounter"),
		},
		Retrier:  retrier.New(retrier.ConstantBackoff(200, 500*time.Millisecond), nil),
		RootPath: graphRoot,
		Logger:   logger.Session("quotaed-driver"),
	}

	dockerGraph, err := graph.NewGraph(graphRoot, quotaedGraphDriver)
	if err != nil {
		logger.Fatal("failed-to-construct-graph", err)
	}

	var cake layercake.Cake = &layercake.Docker{
		Graph:  dockerGraph,
		Driver: quotaedGraphDriver,
	}

	if cake.DriverName() == "aufs" {
		cake = &layercake.AufsCake{
			Cake:      cake,
			Runner:    runner,
			GraphRoot: graphRoot,
		}
	}

	repoFetcher := repository_fetcher.Retryable{
		RepositoryFetcher: &repository_fetcher.CompositeFetcher{
			LocalFetcher: &repository_fetcher.Local{
				Cake:              cake,
				DefaultRootFSPath: cmd.Containers.DefaultRootFSDir.Path(),
				IDProvider:        repository_fetcher.LayerIDProvider{},
			},
			RemoteFetcher: repository_fetcher.NewRemote(
				logger,
				cmd.Docker.Registry,
				cake,
				distclient.NewDialer(insecureRegistries),
				repository_fetcher.VerifyFunc(repository_fetcher.Verify),
			),
		},
		Logger: logger,
	}

	rootFSNamespacer := &rootfs_provider.UidNamespacer{
		Translator: rootfs_provider.NewUidTranslator(
			idMappings, // uid
			idMappings, // gid
		),
	}

	retainer := cleaner.NewRetainer()
	ovenCleaner := cleaner.NewOvenCleaner(retainer,
		cleaner.NewThreshold(int64(cmd.Graph.CleanupThresholdInMegabytes)*1024*1024),
	)

	imageRetainer := &repository_fetcher.ImageRetainer{
		GraphRetainer:             retainer,
		DirectoryRootfsIDProvider: repository_fetcher.LayerIDProvider{},
		DockerImageIDFetcher:      repoFetcher,

		NamespaceCacheKey: rootFSNamespacer.CacheKey(),
		Logger:            logger,
	}

	// spawn off in a go function to avoid blocking startup
	// worst case is if an image is immediately created and deleted faster than
	// we can retain it we'll garbage collect it when we shouldn't. This
	// is an OK trade-off for not having garden startup block on dockerhub.
	go imageRetainer.Retain(persistentImages)

	layerCreator := rootfs_provider.NewLayerCreator(cake, rootfs_provider.SimpleVolumeCreator{}, rootFSNamespacer)

	quotaManager := &quota_manager.AUFSQuotaManager{
		BaseSizer: quota_manager.NewAUFSBaseSizer(cake),
		DiffSizer: &quota_manager.AUFSDiffSizer{
			AUFSDiffPathFinder: quotaedGraphDriver,
		},
	}

	return rootfs_provider.NewCakeOrdinator(cake,
		repoFetcher,
		layerCreator,
		rootfs_provider.NewMetricsAdapter(quotaManager.GetUsage, quotaedGraphDriver.GetMntPath),
		ovenCleaner)
}
示例#9
0
func (cmd *GuardianCommand) wireRunDMCStarter(logger lager.Logger) gardener.Starter {
	var cgroupsMountpoint string
	if cmd.Server.Tag != "" {
		cgroupsMountpoint = filepath.Join(os.TempDir(), fmt.Sprintf("cgroups-%s", cmd.Server.Tag))
	} else {
		cgroupsMountpoint = "/sys/fs/cgroup"
	}

	return rundmc.NewStarter(logger, mustOpen("/proc/cgroups"), mustOpen("/proc/self/cgroup"), cgroupsMountpoint, linux_command_runner.New())
}
示例#10
0
func main() {
	if reexec.Init() {
		return
	}

	cf_debug_server.AddFlags(flag.CommandLine)
	cf_lager.AddFlags(flag.CommandLine)
	flag.Parse()

	runtime.GOMAXPROCS(runtime.NumCPU())

	logger, reconfigurableSink := cf_lager.New("garden-linux")
	if dbgAddr := cf_debug_server.DebugAddress(flag.CommandLine); dbgAddr != "" {
		debug.Run(dbgAddr, reconfigurableSink)
	}

	initializeDropsonde(logger)

	if *binPath == "" {
		missing("-bin")
	}

	if *depotPath == "" {
		missing("-depot")
	}

	if len(*tag) > 2 {
		println("-tag parameter must be less than 3 characters long")
		println()
		flag.Usage()
		return
	}

	_, dynamicRange, err := net.ParseCIDR(*networkPool)
	if err != nil {
		logger.Fatal("failed-to-parse-network-pool", err)
	}

	subnetPool, err := subnets.NewSubnets(dynamicRange)
	if err != nil {
		logger.Fatal("failed-to-create-subnet-pool", err)
	}

	// TODO: use /proc/sys/net/ipv4/ip_local_port_range by default (end + 1)
	portPool, err := port_pool.New(uint32(*portPoolStart), uint32(*portPoolSize))
	if err != nil {
		logger.Fatal("invalid pool range", err)
	}

	useKernelLogging := true
	switch *iptablesLogMethod {
	case "nflog":
		useKernelLogging = false
	case "kernel":
		/* noop */
	default:
		println("-iptablesLogMethod value not recognized")
		println()
		flag.Usage()
		return
	}

	config := sysconfig.NewConfig(*tag, *allowHostAccess)

	runner := sysconfig.NewRunner(config, linux_command_runner.New())

	if err := os.MkdirAll(*graphRoot, 0755); err != nil {
		logger.Fatal("failed-to-create-graph-directory", err)
	}

	dockerGraphDriver, err := graphdriver.New(*graphRoot, nil)
	if err != nil {
		logger.Fatal("failed-to-construct-graph-driver", err)
	}

	dockerGraph, err := graph.NewGraph(*graphRoot, dockerGraphDriver)
	if err != nil {
		logger.Fatal("failed-to-construct-graph", err)
	}

	graphMountPoint := mountPoint(logger, *graphRoot)

	var cake layercake.Cake = &layercake.Docker{
		Graph:  dockerGraph,
		Driver: dockerGraphDriver,
	}

	if cake.DriverName() == "btrfs" {
		cake = &layercake.BtrfsCleaningCake{
			Cake:            cake,
			Runner:          runner,
			BtrfsMountPoint: graphMountPoint,
			RemoveAll:       os.RemoveAll,
			Logger:          logger.Session("btrfs-cleanup"),
		}
	}

	retainer := layercake.NewRetainer()
	cake = &layercake.OvenCleaner{
		Cake:     cake,
		Retainer: retainer,
		Logger:   logger.Session("oven-cleaner"),
	}

	lock := repository_fetcher.NewFetchLock()
	repoFetcher := repository_fetcher.Retryable{
		repository_fetcher.NewRemote(
			repository_fetcher.NewRepositoryProvider(
				*dockerRegistry,
				strings.Split(*insecureRegistries, ","),
			),
			cake,
			map[registry.APIVersion]repository_fetcher.VersionedFetcher{
				registry.APIVersion1: &repository_fetcher.RemoteV1Fetcher{
					Cake:      cake,
					Retainer:  retainer,
					GraphLock: lock,
				},
				registry.APIVersion2: &repository_fetcher.RemoteV2Fetcher{
					Cake:      cake,
					Retainer:  retainer,
					GraphLock: lock,
				},
			},
			repository_fetcher.EndpointPinger{},
		),
	}

	maxId := sysinfo.Min(sysinfo.MustGetMaxValidUID(), sysinfo.MustGetMaxValidGID())
	mappingList := rootfs_provider.MappingList{
		{
			FromID: 0,
			ToID:   maxId,
			Size:   1,
		},
		{
			FromID: 1,
			ToID:   1,
			Size:   maxId - 1,
		},
	}

	rootFSNamespacer := &rootfs_provider.UidNamespacer{
		Logger: logger,
		Translator: rootfs_provider.NewUidTranslator(
			mappingList, // uid
			mappingList, // gid
		),
	}

	remoteRootFSProvider, err := rootfs_provider.NewDocker(fmt.Sprintf("docker-remote-%s", cake.DriverName()),
		repoFetcher, cake, rootfs_provider.SimpleVolumeCreator{}, rootFSNamespacer, clock.NewClock())
	if err != nil {
		logger.Fatal("failed-to-construct-docker-rootfs-provider", err)
	}

	localRootFSProvider, err := rootfs_provider.NewDocker(fmt.Sprintf("docker-local-%s", cake.DriverName()),
		&repository_fetcher.Local{
			Cake:              cake,
			DefaultRootFSPath: *rootFSPath,
			IDProvider:        repository_fetcher.LayerIDProvider{},
		}, cake, rootfs_provider.SimpleVolumeCreator{}, rootFSNamespacer, clock.NewClock())
	if err != nil {
		logger.Fatal("failed-to-construct-warden-rootfs-provider", err)
	}

	rootFSProviders := map[string]rootfs_provider.RootFSProvider{
		"":       localRootFSProvider,
		"docker": remoteRootFSProvider,
	}

	if *externalIP == "" {
		ip, err := localip.LocalIP()
		if err != nil {
			panic("couldn't determine local IP to use for -externalIP parameter. You can use the -externalIP flag to pass an external IP")
		}

		externalIP = &ip
	}

	parsedExternalIP := net.ParseIP(*externalIP)
	if parsedExternalIP == nil {
		panic(fmt.Sprintf("Value of -externalIP %s could not be converted to an IP", *externalIP))
	}

	var quotaManager linux_container.QuotaManager = quota_manager.DisabledQuotaManager{}
	if !*disableQuotas {
		quotaManager = &quota_manager.BtrfsQuotaManager{
			Runner:     runner,
			MountPoint: graphMountPoint,
		}
	}

	injector := &provider{
		useKernelLogging: useKernelLogging,
		chainPrefix:      config.IPTables.Filter.InstancePrefix,
		runner:           runner,
		log:              logger,
		portPool:         portPool,
		sysconfig:        config,
		quotaManager:     quotaManager,
	}

	currentContainerVersion, err := semver.Make(CurrentContainerVersion)
	if err != nil {
		logger.Fatal("failed-to-parse-container-version", err)
	}

	pool := resource_pool.New(
		logger,
		*binPath,
		*depotPath,
		config,
		rootFSProviders,
		cake,
		mappingList,
		parsedExternalIP,
		*mtu,
		subnetPool,
		bridgemgr.New("w"+config.Tag+"b-", &devices.Bridge{}, &devices.Link{}),
		injector,
		iptables.NewGlobalChain(config.IPTables.Filter.DefaultChain, runner, logger.Session("global-chain")),
		portPool,
		strings.Split(*denyNetworks, ","),
		strings.Split(*allowNetworks, ","),
		runner,
		quotaManager,
		currentContainerVersion,
	)

	systemInfo := sysinfo.NewProvider(*depotPath)

	backend := linux_backend.New(logger, pool, container_repository.New(), injector, systemInfo, *snapshotsPath, int(*maxContainers))

	err = backend.Setup()
	if err != nil {
		logger.Fatal("failed-to-set-up-backend", err)
	}

	graceTime := *containerGraceTime

	gardenServer := server.New(*listenNetwork, *listenAddr, graceTime, backend, logger)

	err = gardenServer.Start()
	if err != nil {
		logger.Fatal("failed-to-start-server", err)
	}

	signals := make(chan os.Signal, 1)

	go func() {
		<-signals
		gardenServer.Stop()
		os.Exit(0)
	}()

	signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP)

	logger.Info("started", lager.Data{
		"network": *listenNetwork,
		"addr":    *listenAddr,
	})

	select {}
}
示例#11
0
func main() {
	flag.Parse()

	maxProcs := runtime.NumCPU()
	prevMaxProcs := runtime.GOMAXPROCS(maxProcs)

	log.Println("set GOMAXPROCS to", maxProcs, "was", prevMaxProcs)

	if *binPath == "" {
		log.Fatalln("must specify -bin with linux backend")
	}

	if *depotPath == "" {
		log.Fatalln("must specify -depot with linux backend")
	}

	if *rootFSPath == "" {
		log.Fatalln("must specify -rootfs with linux backend")
	}

	uidPool := uid_pool.New(uint32(*uidPoolStart), uint32(*uidPoolSize))

	_, ipNet, err := net.ParseCIDR(*networkPool)
	if err != nil {
		log.Fatalln("error parsing CIDR:", err)
	}

	networkPool := network_pool.New(ipNet)

	// TODO: use /proc/sys/net/ipv4/ip_local_port_range by default (end + 1)
	portPool := port_pool.New(uint32(*portPoolStart), uint32(*portPoolSize))

	runner := linux_command_runner.New(*debug)

	quotaManager, err := quota_manager.New(*depotPath, *binPath, runner)
	if err != nil {
		log.Fatalln("error creating quota manager:", err)
	}

	if *disableQuotas {
		quotaManager.Disable()
	}

	graphDriver, err := graphdriver.New(*graphRoot)
	if err != nil {
		log.Fatalln("error constructing graph driver:", err)
	}

	graph, err := graph.NewGraph(*graphRoot, graphDriver)
	if err != nil {
		log.Fatalln("error constructing graph:", err)
	}

	reg, err := registry.NewRegistry(nil, nil, *dockerRegistry)
	if err != nil {
		log.Fatalln(err)
	}

	pool := container_pool.New(
		*binPath,
		*depotPath,
		*rootFSPath,
		repository_fetcher.Retryable{repository_fetcher.New(reg, graph)},
		graphDriver,
		uidPool,
		networkPool,
		portPool,
		strings.Split(*denyNetworks, ","),
		strings.Split(*allowNetworks, ","),
		runner,
		quotaManager,
	)

	systemInfo := system_info.NewProvider(*depotPath)

	backend := linux_backend.New(pool, systemInfo, *snapshotsPath)

	log.Println("setting up backend")

	err = backend.Setup()
	if err != nil {
		log.Fatalln("failed to set up backend:", err)
	}

	log.Println("starting server; listening with", *listenNetwork, "on", *listenAddr)

	graceTime := *containerGraceTime

	wardenServer := server.New(*listenNetwork, *listenAddr, graceTime, backend)

	err = wardenServer.Start()
	if err != nil {
		log.Fatalln("failed to start:", err)
	}

	signals := make(chan os.Signal, 1)

	go func() {
		<-signals
		log.Println("stopping...")
		wardenServer.Stop()
		os.Exit(0)
	}()

	signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP)

	select {}
}
示例#12
0
func main() {
	libPath := flag.String("lib", "./lib", "Directory containing hooks")
	rootFsPath := flag.String("root", "", "Directory that will become root in the new mount namespace")
	runPath := flag.String("run", "./run", "Directory where server socket is placed")
	userNsFlag := flag.String("userns", "enabled", "If specified, use user namespacing")
	title := flag.String("title", "", "")
	flag.Parse()

	if *rootFsPath == "" {
		missing("--root")
	}

	binPath, err := filepath.Abs(filepath.Dir(os.Args[0]))
	if err != nil {
		fmt.Fprintf(os.Stderr, "wshd: obtain absolute path: %s", err)
		os.Exit(6)
	}

	socketPath := path.Join(*runPath, "wshd.sock")

	privileged := false
	if *userNsFlag == "" || *userNsFlag == "disabled" {
		privileged = true
	}

	containerReader, hostWriter, err := os.Pipe()
	if err != nil {
		fmt.Fprintf(os.Stderr, "wshd: create pipe: %s", err)
		os.Exit(5)
	}

	hostReader, containerWriter, err := os.Pipe()
	if err != nil {
		fmt.Fprintf(os.Stderr, "wshd: create pipe: %s", err)
		os.Exit(4)
	}

	sync := &containerizer.PipeSynchronizer{
		Reader: hostReader,
		Writer: hostWriter,
	}

	listener, err := unix_socket.NewListenerFromPath(socketPath)
	if err != nil {
		fmt.Fprintf(os.Stderr, "wshd: create listener: %s", err)
		os.Exit(8)
	}

	socketFile, err := listener.File()
	if err != nil {
		fmt.Fprintf(os.Stderr, "wshd: obtain listener file: %s", err)
		os.Exit(9)
	}

	beforeCloneInitializer := &system.Initializer{Steps: []system.StepRunner{
		&containerizer.FuncStep{
			(&container_daemon.RlimitsManager{}).Init,
		},
	}}

	maxUID := sysinfo.Min(sysinfo.MustGetMaxValidUID(), sysinfo.MustGetMaxValidGID())
	cz := containerizer.Containerizer{
		BeforeCloneInitializer: beforeCloneInitializer,
		InitBinPath:            path.Join(binPath, "initc"),
		InitArgs: []string{
			"--root", *rootFsPath,
			"--config", path.Join(*libPath, "../etc/config"),
			"--title", *title,
		},
		Execer: &system.NamespacingExecer{
			CommandRunner: linux_command_runner.New(),
			ExtraFiles:    []*os.File{containerReader, containerWriter, socketFile},
			Privileged:    privileged,
			MaxUID:        maxUID,
		},
		Signaller: sync,
		Waiter:    sync,
		// Temporary until we merge the hook scripts functionality in Golang
		CommandRunner: linux_command_runner.New(),
		LibPath:       *libPath,
		RootfsPath:    *rootFsPath,
	}

	err = cz.Create()
	if err != nil {
		fmt.Fprintf(os.Stderr, "Failed to create container: %s", err)
		os.Exit(2)
	}
}
示例#13
0
文件: main.go 项目: julz/guardian
func main() {
	depotDir := flag.String(
		"depot",
		"/var/vcap/data/garden-runc/depot",
		"the depot directory to store containers in",
	)

	listenNetwork := flag.String(
		"listenNetwork",
		"tcp",
		"how to listen on the address (unix, tcp, etc.)",
	)

	listenAddr := flag.String(
		"listenAddr",
		"0.0.0.0:7777",
		"address to listen on",
	)

	containerGraceTime := flag.Duration(
		"containerGraceTime",
		0,
		"time after which to destroy idle containers",
	)

	cf_lager.AddFlags(flag.CommandLine)
	flag.Parse()

	logger, _ := cf_lager.New("garden-runc")
	runner := &logging.Runner{
		CommandRunner: linux_command_runner.New(),
		Logger:        logger,
	}

	iodaemonBin, err := gexec.Build("github.com/cloudfoundry-incubator/garden-linux/iodaemon/cmd/iodaemon")
	if err != nil {
		panic(err)
	}

	gdnr := &gardener.Gardener{
		Volumizer: &gardenshed.Shed{},
		Containerizer: &rundmc.Containerizer{
			Repo: rundmc.Depot{
				Dir: *depotDir,
				ActualContainerProvider: &rundmc.RuncContainerFactory{
					Tracker: process_tracker.New("/tmp", iodaemonBin, runner),
				},
			},
		},
	}

	server := server.New(*listenNetwork, *listenAddr, *containerGraceTime, gdnr, logger)
	if err := server.Start(); err != nil {
		logger.Fatal("failed-to-start-server", err)
	}

	logger.Info("started", lager.Data{
		"network": *listenNetwork,
		"addr":    *listenAddr,
	})

	signals := make(chan os.Signal, 1)

	go func() {
		<-signals
		server.Stop()
		os.Exit(0)
	}()

	signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP)

	select {}
}
示例#14
0
文件: main.go 项目: glyn/pango
func wireStarter() *rundmc.Starter {
	runner := &log.Runner{CommandRunner: linux_command_runner.New(), Logger: log.Session("runner")}
	return rundmc.NewStarter(mustOpen("/proc/cgroups"), path.Join(os.TempDir(), fmt.Sprintf("cgroups-%s", *tag)), runner)
}
示例#15
0
func (cmd *GuardianCommand) wireContainerizer(log lager.Logger, depotPath, dadooPath, runcPath, nstarPath, tarPath, defaultRootFSPath, appArmorProfile string, properties gardener.PropertyManager) *rundmc.Containerizer {
	depot := depot.New(depotPath)

	commandRunner := linux_command_runner.New()
	chrootMkdir := bundlerules.ChrootMkdir{
		Command:       preparerootfs.Command,
		CommandRunner: commandRunner,
	}

	pidFileReader := &dadoo.PidFileReader{
		Clock:         clock.NewClock(),
		Timeout:       10 * time.Second,
		SleepInterval: time.Millisecond * 100,
	}

	runcrunner := runrunc.New(
		commandRunner,
		runrunc.NewLogRunner(commandRunner, runrunc.LogDir(os.TempDir()).GenerateLogFile),
		goci.RuncBinary(runcPath),
		dadooPath,
		runcPath,
		runrunc.NewExecPreparer(&goci.BndlLoader{}, runrunc.LookupFunc(runrunc.LookupUser), chrootMkdir, NonRootMaxCaps),
		dadoo.NewExecRunner(
			dadooPath,
			runcPath,
			cmd.wireUidGenerator(),
			pidFileReader,
			linux_command_runner.New()),
	)

	mounts := []specs.Mount{
		{Type: "sysfs", Source: "sysfs", Destination: "/sys", Options: []string{"nosuid", "noexec", "nodev", "ro"}},
		{Type: "tmpfs", Source: "tmpfs", Destination: "/dev/shm"},
		{Type: "devpts", Source: "devpts", Destination: "/dev/pts",
			Options: []string{"nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620"}},
		{Type: "bind", Source: cmd.Bin.Init.Path(), Destination: "/tmp/garden-init", Options: []string{"bind"}},
	}

	privilegedMounts := append(mounts,
		specs.Mount{Type: "proc", Source: "proc", Destination: "/proc", Options: []string{"nosuid", "noexec", "nodev"}},
	)

	unprivilegedMounts := append(mounts,
		specs.Mount{Type: "proc", Source: "proc", Destination: "/proc", Options: []string{"nosuid", "noexec", "nodev"}},
	)

	rwm := "rwm"
	character := "c"
	var majorMinor = func(i int64) *int64 {
		return &i
	}

	var worldReadWrite os.FileMode = 0666
	fuseDevice := specs.LinuxDevice{
		Path:     "/dev/fuse",
		Type:     "c",
		Major:    10,
		Minor:    229,
		FileMode: &worldReadWrite,
	}

	denyAll := specs.LinuxDeviceCgroup{Allow: false, Access: &rwm}
	allowedDevices := []specs.LinuxDeviceCgroup{
		{Access: &rwm, Type: &character, Major: majorMinor(1), Minor: majorMinor(3), Allow: true},
		{Access: &rwm, Type: &character, Major: majorMinor(5), Minor: majorMinor(0), Allow: true},
		{Access: &rwm, Type: &character, Major: majorMinor(1), Minor: majorMinor(8), Allow: true},
		{Access: &rwm, Type: &character, Major: majorMinor(1), Minor: majorMinor(9), Allow: true},
		{Access: &rwm, Type: &character, Major: majorMinor(1), Minor: majorMinor(5), Allow: true},
		{Access: &rwm, Type: &character, Major: majorMinor(1), Minor: majorMinor(7), Allow: true},
		{Access: &rwm, Type: &character, Major: majorMinor(1), Minor: majorMinor(7), Allow: true},
		{Access: &rwm, Type: &character, Major: majorMinor(fuseDevice.Major), Minor: majorMinor(fuseDevice.Minor), Allow: true},
	}

	baseProcess := specs.Process{
		Capabilities: UnprivilegedMaxCaps,
		Args:         []string{"/tmp/garden-init"},
		Cwd:          "/",
	}

	baseBundle := goci.Bundle().
		WithNamespaces(PrivilegedContainerNamespaces...).
		WithResources(&specs.LinuxResources{Devices: append([]specs.LinuxDeviceCgroup{denyAll}, allowedDevices...)}).
		WithRootFS(defaultRootFSPath).
		WithDevices(fuseDevice).
		WithProcess(baseProcess)

	unprivilegedBundle := baseBundle.
		WithNamespace(goci.UserNamespace).
		WithUIDMappings(idMappings...).
		WithGIDMappings(idMappings...).
		WithMounts(unprivilegedMounts...).
		WithMaskedPaths(defaultMaskedPaths())

	unprivilegedBundle.Spec.Linux.Seccomp = seccomp
	if appArmorProfile != "" {
		unprivilegedBundle.Spec.Process.ApparmorProfile = appArmorProfile
	}

	privilegedBundle := baseBundle.
		WithMounts(privilegedMounts...).
		WithCapabilities(PrivilegedMaxCaps...)

	template := &rundmc.BundleTemplate{
		Rules: []rundmc.BundlerRule{
			bundlerules.Base{
				PrivilegedBase:   privilegedBundle,
				UnprivilegedBase: unprivilegedBundle,
			},
			bundlerules.RootFS{
				ContainerRootUID: idMappings.Map(0),
				ContainerRootGID: idMappings.Map(0),
				MkdirChown:       chrootMkdir,
			},
			bundlerules.Limits{},
			bundlerules.BindMounts{},
			bundlerules.Env{},
			bundlerules.Hostname{},
		},
	}

	log.Info("base-bundles", lager.Data{
		"privileged":   privilegedBundle,
		"unprivileged": unprivilegedBundle,
	})

	eventStore := rundmc.NewEventStore(properties)
	stateStore := rundmc.NewStateStore(properties)

	nstar := rundmc.NewNstarRunner(nstarPath, tarPath, linux_command_runner.New())
	stopper := stopper.New(stopper.NewRuncStateCgroupPathResolver("/run/runc"), nil, retrier.New(retrier.ConstantBackoff(10, 1*time.Second), nil))
	return rundmc.New(depot, template, runcrunner, &goci.BndlLoader{}, nstar, stopper, eventStore, stateStore)
}
示例#16
0
	"github.com/cloudfoundry/gunk/command_runner/linux_command_runner"
	"github.com/pivotal-golang/lager"
	"github.com/pivotal-golang/lager/lagertest"

	. "github.com/onsi/ginkgo"
	. "github.com/onsi/gomega"
)

var _ = Describe("Logging Runner", func() {
	var innerRunner command_runner.CommandRunner
	var logger *lagertest.TestLogger

	var runner *logging.Runner

	BeforeEach(func() {
		innerRunner = linux_command_runner.New()
		logger = lagertest.NewTestLogger("test")
	})

	JustBeforeEach(func() {
		runner = &logging.Runner{
			CommandRunner: innerRunner,
			Logger:        logger,
		}
	})

	It("logs the duration it took to run the command", func() {
		err := runner.Run(exec.Command("sleep", "1"))
		Expect(err).ToNot(HaveOccurred())

		Expect(logger.TestSink.Logs()).To(HaveLen(2))
示例#17
0
func main() {
	if reexec.Init() {
		return
	}

	var insecureRegistries vars.StringList
	flag.Var(
		&insecureRegistries,
		"insecureDockerRegistry",
		"Docker registry to allow connecting to even if not secure. (Can be specified multiple times to allow insecure connection to multiple repositories)",
	)

	var persistentImages vars.StringList
	flag.Var(
		&persistentImages,
		"persistentImage",
		"Image which should never be garbage collected. (Can be specified multiple times)",
	)

	var dnsServers vars.StringList
	flag.Var(
		&dnsServers,
		"dnsServer",
		"DNS server IP address to use instead of automatically determined servers. (Can be specified multiple times)",
	)

	cf_debug_server.AddFlags(flag.CommandLine)
	cf_lager.AddFlags(flag.CommandLine)
	flag.Parse()

	runtime.GOMAXPROCS(runtime.NumCPU())

	logger, reconfigurableSink := cf_lager.New("garden-linux")
	initializeDropsonde(logger)

	if *binPath == "" {
		missing("-bin")
	}

	if *stateDirPath == "" {
		missing("-stateDir")
	}

	if *depotPath == "" {
		missing("-depot")
	}

	if len(*tag) > 2 {
		println("-tag parameter must be less than 3 characters long")
		println()
		flag.Usage()
		return
	}

	_, dynamicRange, err := net.ParseCIDR(*networkPool)
	if err != nil {
		logger.Fatal("failed-to-parse-network-pool", err)
	}

	subnetPool, err := subnets.NewSubnets(dynamicRange)
	if err != nil {
		logger.Fatal("failed-to-create-subnet-pool", err)
	}

	portPoolState, err := port_pool.LoadState(path.Join(*stateDirPath, "port_pool.json"))
	if err != nil {
		logger.Error("failed-to-parse-pool-state", err)
	}

	// TODO: use /proc/sys/net/ipv4/ip_local_port_range by default (end + 1)
	portPool, err := port_pool.New(uint32(*portPoolStart), uint32(*portPoolSize), portPoolState)
	if err != nil {
		logger.Fatal("invalid pool range", err)
	}

	useKernelLogging := true
	switch *iptablesLogMethod {
	case "nflog":
		useKernelLogging = false
	case "kernel":
		/* noop */
	default:
		println("-iptablesLogMethod value not recognized")
		println()
		flag.Usage()
		return
	}

	config := sysconfig.NewConfig(*tag, *allowHostAccess, dnsServers.List)

	runner := sysconfig.NewRunner(config, linux_command_runner.New())

	if err := os.MkdirAll(*graphRoot, 0755); err != nil {
		logger.Fatal("failed-to-create-graph-directory", err)
	}

	dockerGraphDriver, err := selectGraphDriver(logger, *graphDriverName, *graphRoot)
	if err != nil {
		logger.Fatal("failed-to-construct-graph-driver", err)
	}

	backingStoresPath := filepath.Join(*graphRoot, "backing_stores")
	if err := os.MkdirAll(backingStoresPath, 0660); err != nil {
		logger.Fatal("failed-to-mkdir-backing-stores", err)
	}

	quotaedGraphDriver := &quotaed_aufs.QuotaedDriver{
		GraphDriver: dockerGraphDriver,
		Unmount:     quotaed_aufs.Unmount,
		BackingStoreMgr: &quotaed_aufs.BackingStore{
			RootPath: backingStoresPath,
			Logger:   logger.Session("backing-store-mgr"),
		},
		LoopMounter: &quotaed_aufs.Loop{
			Retrier: retrier.New(retrier.ConstantBackoff(200, 500*time.Millisecond), nil),
			Logger:  logger.Session("loop-mounter"),
		},
		Retrier:  retrier.New(retrier.ConstantBackoff(200, 500*time.Millisecond), nil),
		RootPath: *graphRoot,
		Logger:   logger.Session("quotaed-driver"),
	}

	metricsProvider := metrics.NewMetrics(logger, backingStoresPath, *depotPath)

	if dbgAddr := cf_debug_server.DebugAddress(flag.CommandLine); dbgAddr != "" {
		metrics.StartDebugServer(dbgAddr, reconfigurableSink, metricsProvider)
	}

	dockerGraph, err := graph.NewGraph(*graphRoot, quotaedGraphDriver)
	if err != nil {
		logger.Fatal("failed-to-construct-graph", err)
	}

	var cake layercake.Cake = &layercake.Docker{
		Graph:  dockerGraph,
		Driver: quotaedGraphDriver,
	}

	if cake.DriverName() == "aufs" {
		cake = &layercake.AufsCake{
			Cake:      cake,
			Runner:    runner,
			GraphRoot: *graphRoot,
		}
	}

	repo := container_repository.New()
	retainer := cleaner.NewRetainer()

	repoFetcher := &repository_fetcher.Retryable{
		RepositoryFetcher: &repository_fetcher.CompositeFetcher{
			LocalFetcher: &repository_fetcher.Local{
				Cake:              cake,
				DefaultRootFSPath: *rootFSPath,
				IDProvider:        repository_fetcher.LayerIDProvider{},
			},
			RemoteFetcher: repository_fetcher.NewRemote(
				logger,
				*dockerRegistry,
				cake,
				distclient.NewDialer(insecureRegistries.List),
				repository_fetcher.VerifyFunc(repository_fetcher.Verify),
			),
		},
		Logger: logger,
	}

	maxId := uint32(sysinfo.Min(sysinfo.MustGetMaxValidUID(), sysinfo.MustGetMaxValidGID()))
	mappingList := rootfs_provider.MappingList{
		{
			ContainerID: 0,
			HostID:      maxId,
			Size:        1,
		},
		{
			ContainerID: 1,
			HostID:      1,
			Size:        maxId - 1,
		},
	}

	rootFSNamespacer := &rootfs_provider.UidNamespacer{
		Logger: logger,
		Translator: rootfs_provider.NewUidTranslator(
			mappingList, // uid
			mappingList, // gid
		),
	}

	cleaner := cleaner.NewOvenCleaner(
		retainer,
		cleaner.NewThreshold(int64(*graphCleanupThreshold)*1024*1024),
	)

	layerCreator := rootfs_provider.NewLayerCreator(cake, rootfs_provider.SimpleVolumeCreator{}, rootFSNamespacer)
	cakeOrdinator := rootfs_provider.NewCakeOrdinator(cake, repoFetcher, layerCreator, nil, cleaner)

	imageRetainer := &repository_fetcher.ImageRetainer{
		GraphRetainer:             retainer,
		DirectoryRootfsIDProvider: repository_fetcher.LayerIDProvider{},
		DockerImageIDFetcher:      repoFetcher,

		NamespaceCacheKey: rootFSNamespacer.CacheKey(),
		Logger:            logger,
	}

	// spawn off in a go function to avoid blocking startup
	// worst case is if an image is immediately created and deleted faster than
	// we can retain it we'll garbage collect it when we shouldn't. This
	// is an OK trade-off for not having garden startup block on dockerhub.
	go imageRetainer.Retain(persistentImages.List)

	rootfsCleaner := &linux_backend.RootFSCleaner{
		FilePaths: []string{
			"/tmp", "/proc", "/sys", "/dev", "/etc", "/etc/config", "/etc/hostname",
			"/etc/hosts", "/etc/resolv.conf",
		},
	}

	if *externalIP == "" {
		ip, err := localip.LocalIP()
		if err != nil {
			panic("couldn't determine local IP to use for -externalIP parameter. You can use the -externalIP flag to pass an external IP")
		}

		externalIP = &ip
	}

	parsedExternalIP := net.ParseIP(*externalIP)
	if parsedExternalIP == nil {
		panic(fmt.Sprintf("Value of -externalIP %s could not be converted to an IP", *externalIP))
	}

	var quotaManager linux_container.QuotaManager = &quota_manager.AUFSQuotaManager{
		BaseSizer: quota_manager.NewAUFSBaseSizer(cake),
		DiffSizer: &quota_manager.AUFSDiffSizer{quotaedGraphDriver},
	}

	ipTablesMgr := createIPTablesManager(config, runner, logger)
	injector := &provider{
		useKernelLogging: useKernelLogging,
		chainPrefix:      config.IPTables.Filter.InstancePrefix,
		runner:           runner,
		log:              logger,
		portPool:         portPool,
		ipTablesMgr:      ipTablesMgr,
		sysconfig:        config,
		quotaManager:     quotaManager,
	}

	currentContainerVersion, err := semver.Make(CurrentContainerVersion)
	if err != nil {
		logger.Fatal("failed-to-parse-container-version", err)
	}

	pool := resource_pool.New(
		logger,
		*binPath,
		*depotPath,
		config,
		cakeOrdinator,
		rootfsCleaner,
		mappingList,
		parsedExternalIP,
		*mtu,
		subnetPool,
		bridgemgr.New("w"+config.Tag+"b-", &devices.Bridge{}, &devices.Link{}),
		ipTablesMgr,
		injector,
		iptables.NewGlobalChain(config.IPTables.Filter.DefaultChain, runner, logger.Session("global-chain")),
		portPool,
		strings.Split(*denyNetworks, ","),
		strings.Split(*allowNetworks, ","),
		runner,
		quotaManager,
		currentContainerVersion,
		system.MkdirChowner{},
	)

	systemInfo := sysinfo.NewProvider(*depotPath)

	backend := linux_backend.New(logger, pool, repo, injector, systemInfo, layercake.GraphPath(*graphRoot), *snapshotsPath, int(*maxContainers))

	err = backend.Setup()
	if err != nil {
		logger.Fatal("failed-to-set-up-backend", err)
	}

	graceTime := *containerGraceTime

	gardenServer := server.New(*listenNetwork, *listenAddr, graceTime, backend, logger)

	err = gardenServer.Start()
	if err != nil {
		logger.Fatal("failed-to-start-server", err)
	}

	clock := clock.NewClock()
	metronNotifier := metrics.NewPeriodicMetronNotifier(logger, metricsProvider, *metricsEmissionInterval, clock)
	metronNotifier.Start()

	signals := make(chan os.Signal, 1)

	go func() {
		<-signals

		portPoolState = portPool.RefreshState()
		port_pool.SaveState(path.Join(*stateDirPath, "port_pool.json"), portPoolState)

		gardenServer.Stop()
		metronNotifier.Stop()

		os.Exit(0)
	}()

	signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP)

	logger.Info("started", lager.Data{
		"network": *listenNetwork,
		"addr":    *listenAddr,
	})

	select {}
}
示例#18
0
var _ = Describe("Process tracker", func() {
	var (
		processTracker process_tracker.ProcessTracker
		tmpdir         string
	)

	BeforeEach(func() {
		var err error

		tmpdir, err = ioutil.TempDir("", "process-tracker-tests")
		Expect(err).ToNot(HaveOccurred())

		err = os.MkdirAll(filepath.Join(tmpdir, "bin"), 0755)
		Expect(err).ToNot(HaveOccurred())

		processTracker = process_tracker.New(tmpdir, iodaemonBin, linux_command_runner.New())
	})

	AfterEach(func() {
		os.RemoveAll(tmpdir)
	})

	Describe("Running processes", func() {
		It("runs the process and returns its exit code", func() {
			cmd := exec.Command("bash", "-c", "exit 42")

			process, err := processTracker.Run("555", cmd, garden.ProcessIO{}, nil, nil)
			Expect(err).NotTo(HaveOccurred())

			status, err := process.Wait()
			Expect(err).ToNot(HaveOccurred())
package linux_command_runner_test

import (
	"os"
	"os/exec"
	"syscall"

	. "github.com/onsi/ginkgo"
	. "github.com/onsi/gomega"

	"github.com/cloudfoundry/gunk/command_runner/linux_command_runner"
)

var _ = Describe("Running commands", func() {
	It("runs the command and returns nil", func() {
		runner := linux_command_runner.New()

		cmd := exec.Command("ls")
		Expect(cmd.ProcessState).To(BeNil())

		err := runner.Run(cmd)
		Expect(err).ToNot(HaveOccurred())

		Expect(cmd.ProcessState).ToNot(BeNil())
	})

	Context("when the command fails", func() {
		It("returns an error", func() {
			runner := linux_command_runner.New()

			err := runner.Run(exec.Command("/bin/bash", "-c", "exit 1"))
示例#20
0
func wireVolumeCreator(logger lager.Logger, graphRoot string) *rootfs_provider.CakeOrdinator {
	logger = logger.Session("volume-creator", lager.Data{"graphRoot": graphRoot})
	runner := &logging.Runner{CommandRunner: linux_command_runner.New(), Logger: logger}

	if err := os.MkdirAll(graphRoot, 0755); err != nil {
		logger.Fatal("failed-to-create-graph-directory", err)
	}

	dockerGraphDriver, err := graphdriver.New(graphRoot, nil)
	if err != nil {
		logger.Fatal("failed-to-construct-graph-driver", err)
	}

	backingStoresPath := filepath.Join(graphRoot, "backing_stores")
	if err := os.MkdirAll(backingStoresPath, 0660); err != nil {
		logger.Fatal("failed-to-mkdir-backing-stores", err)
	}

	graphRetrier := &retrier.Retrier{
		Timeout:         100 * time.Second,
		PollingInterval: 500 * time.Millisecond,
		Clock:           clock.NewClock(),
	}

	quotaedGraphDriver := &quotaed_aufs.QuotaedDriver{
		GraphDriver: dockerGraphDriver,
		Unmount:     quotaed_aufs.Unmount,
		BackingStoreMgr: &quotaed_aufs.BackingStore{
			RootPath: backingStoresPath,
			Logger:   logger.Session("backing-store-mgr"),
		},
		LoopMounter: &quotaed_aufs.Loop{
			Retrier: graphRetrier,
			Logger:  logger.Session("loop-mounter"),
		},
		Retrier:  graphRetrier,
		RootPath: graphRoot,
		Logger:   logger.Session("quotaed-driver"),
	}

	dockerGraph, err := graph.NewGraph(graphRoot, quotaedGraphDriver)
	if err != nil {
		logger.Fatal("failed-to-construct-graph", err)
	}

	var cake layercake.Cake = &layercake.Docker{
		Graph:  dockerGraph,
		Driver: quotaedGraphDriver,
	}

	if cake.DriverName() == "aufs" {
		cake = &layercake.AufsCake{
			Cake:      cake,
			Runner:    runner,
			GraphRoot: graphRoot,
		}
	}

	repoFetcher := &repository_fetcher.CompositeFetcher{
		LocalFetcher: &repository_fetcher.Local{
			Cake:              cake,
			DefaultRootFSPath: *rootFSPath,
			IDProvider:        repository_fetcher.LayerIDProvider{},
		},
	}

	maxId := sysinfo.Min(sysinfo.MustGetMaxValidUID(), sysinfo.MustGetMaxValidGID())
	mappingList := rootfs_provider.MappingList{
		{
			FromID: 0,
			ToID:   maxId,
			Size:   1,
		},
		{
			FromID: 1,
			ToID:   1,
			Size:   maxId - 1,
		},
	}

	rootFSNamespacer := &rootfs_provider.UidNamespacer{
		Logger: logger,
		Translator: rootfs_provider.NewUidTranslator(
			mappingList, // uid
			mappingList, // gid
		),
	}

	layerCreator := rootfs_provider.NewLayerCreator(
		cake, rootfs_provider.SimpleVolumeCreator{}, rootFSNamespacer)

	cakeOrdinator := rootfs_provider.NewCakeOrdinator(
		cake, repoFetcher, layerCreator, nil, logger.Session("cake-ordinator"),
	)
	return cakeOrdinator
}
package linux_command_runner_test

import (
	"os"
	"os/exec"
	"syscall"

	. "github.com/onsi/ginkgo"
	. "github.com/onsi/gomega"

	"github.com/cloudfoundry/gunk/command_runner/linux_command_runner"
)

var _ = Describe("Running commands", func() {
	It("runs the command and returns nil", func() {
		runner := linux_command_runner.New(false)

		cmd := &exec.Cmd{Path: "ls"}
		Expect(cmd.ProcessState).To(BeNil())

		err := runner.Run(cmd)
		Expect(err).ToNot(HaveOccurred())

		Expect(cmd.ProcessState).ToNot(BeNil())
	})

	It("wires in debugging to stdout/stderr", func() {
		runner := linux_command_runner.New(true)

		cmd := &exec.Cmd{
			Path: "/bin/bash",
	BeforeEach(func() {
		var err error

		tmpdir, err = ioutil.TempDir("", "process-tracker-tests")
		Expect(err).ToNot(HaveOccurred())

		err = os.MkdirAll(filepath.Join(tmpdir, "bin"), 0755)
		Expect(err).ToNot(HaveOccurred())

		err = copyFile(iodaemonBin, filepath.Join(tmpdir, "bin", "iodaemon"))
		Expect(err).ToNot(HaveOccurred())

		signaller = &process_tracker.LinkSignaller{}

		processTracker = process_tracker.New(tmpdir, linux_command_runner.New())
	})

	AfterEach(func() {
		os.RemoveAll(tmpdir)
	})

	Describe("Running processes", func() {
		It("runs the process and returns its exit code", func() {
			cmd := exec.Command("bash", "-c", "exit 42")

			process, err := processTracker.Run(555, cmd, garden.ProcessIO{}, nil, signaller)
			Expect(err).NotTo(HaveOccurred())

			status, err := process.Wait()
			Expect(err).ToNot(HaveOccurred())
		containerHandle = fmt.Sprintf("h-%d", GinkgoParallelNode())
		iptablesChain = ""
		externalIP = nil
		containerIP = nil
	})

	JustBeforeEach(func() {
		spec = &kawasaki.PortForwarderSpec{
			IPTableChain: iptablesChain,
			ExternalIP:   externalIP,
			ContainerIP:  containerIP,
			FromPort:     externalPort,
			ToPort:       containerPort,
		}

		forwarder = iptables.NewPortForwarder(linux_command_runner.New())
	})

	Context("when NetworkConfig is valid", func() {
		BeforeEach(func() {
			externalIP = net.ParseIP("127.0.0.1")
			containerIP = net.ParseIP("127.0.0.2")
			iptablesChain = fmt.Sprintf("chain-%s", containerHandle)

			createChainCmd := exec.Command("iptables", "-w", "-t", "nat", "-N", iptablesChain)
			Expect(createChainCmd.Run()).To(Succeed())
		})

		AfterEach(func() {
			// clean up rules created by PortForwarder
			deleteRuleCmd := exec.Command(