예제 #1
0
func (cmd *WorkerCommand) houdiniRunner(logger lager.Logger, platform string) (atc.Worker, ifrit.Runner, error) {
	depotDir := filepath.Join(cmd.WorkDir, "containers")

	err := os.MkdirAll(depotDir, 0755)
	if err != nil {
		return atc.Worker{}, nil, fmt.Errorf("failed to create depot dir: %s", err)
	}

	backend := houdini.NewBackend(depotDir)

	server := server.New(
		"tcp",
		cmd.bindAddr(),
		0,
		backend,
		logger,
	)

	worker := atc.Worker{
		Platform: platform,
		Tags:     cmd.Tags,
	}

	worker.Name, err = cmd.workerName()
	if err != nil {
		return atc.Worker{}, nil, err
	}

	return worker, gardenServerRunner{logger, server}, nil
}
예제 #2
0
func main() {
	defaultListNetwork := "unix"
	defaultListAddr := "/tmp/garden.sock"
	if os.Getenv("PORT") != "" {
		defaultListNetwork = "tcp"
		defaultListAddr = "0.0.0.0:" + os.Getenv("PORT")
	}
	var listenNetwork = flag.String(
		"listenNetwork",
		defaultListNetwork,
		"how to listen on the address (unix, tcp, etc.)",
	)
	var listenAddr = flag.String(
		"listenAddr",
		defaultListAddr,
		"address to listen on",
	)
	cf_lager.AddFlags(flag.CommandLine)
	flag.Parse()

	logger, _ := cf_lager.New("garden-windows")

	initializeDropsonde(logger)

	url, err := url.Parse(*containerizerURL)
	if err != nil {
		logger.Fatal("Could not parse containerizer url", err, lager.Data{
			"containerizerURL": containerizerURL,
		})
	}
	client := dotnet.NewClient(logger, url)
	netBackend, err := backend.NewDotNetBackend(client, logger)
	if err != nil {
		logger.Fatal("Server Failed to Start", err)
		os.Exit(1)
	}

	gardenServer := server.New(*listenNetwork, *listenAddr, *containerGraceTime, netBackend, logger)
	err = gardenServer.Start()
	if err != nil {
		logger.Fatal("Server Failed to Start", err)
		os.Exit(1)
	}

	logger.Info("started", lager.Data{
		"network": *listenNetwork,
		"addr":    *listenAddr,
	})

	signals := make(chan os.Signal, 1)

	go func() {
		<-signals
		gardenServer.Stop()
		os.Exit(0)
	}()

	signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP)
	select {}
}
예제 #3
0
파일: main.go 프로젝트: glyn/pango
func main() {
	cf_debug_server.AddFlags(flag.CommandLine)
	cf_lager.AddFlags(flag.CommandLine)
	flag.Parse()

	l, _ := cf_lager.New("guardian")
	log.SetLogger(l)

	if *depotPath == "" {
		missing("-depot")
	}

	if *iodaemonBin == "" {
		missing("-iodaemonBin")
	}

	resolvedRootFSPath, err := filepath.EvalSymlinks(*rootFSPath)
	if err != nil {
		panic(err)
	}

	backend := &gardener.Gardener{
		UidGenerator:  wireUidGenerator(),
		Starter:       wireStarter(),
		Networker:     wireNetworker(),
		Containerizer: wireContainerizer(*depotPath, *iodaemonBin, resolvedRootFSPath),
	}

	gardenServer := server.New(*listenNetwork, *listenAddr, *graceTime, backend, log.Session("api"))

	err = gardenServer.Start()
	if err != nil {
		log.Fatal("failed-to-start-server", err)
	}

	signals := make(chan os.Signal, 1)

	go func() {
		<-signals
		gardenServer.Stop()
		os.Exit(0)
	}()

	signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP)

	log.Info("started", lager.Data{
		"network": *listenNetwork,
		"addr":    *listenAddr,
	})

	select {}
}
예제 #4
0
파일: main.go 프로젝트: cromega/garden-lame
func main() {
	logger := lager.NewLogger("garden-lame")
	logger.RegisterSink(lager.NewWriterSink(os.Stdout, lager.DEBUG))

	client := NewLameClient(logger)
	backend := lameBackend{logger: logger, Client: client}
	srv := server.New("tcp", ":3000", 5*time.Minute, &backend, logger)
	srv.Start()

	exit := make(chan os.Signal)
	signal.Notify(exit, syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP)

	<-exit

	srv.Stop()
}
예제 #5
0
func main() {
	flag.Parse()

	logger := lager.NewLogger("houdini")
	logger.RegisterSink(lager.NewWriterSink(os.Stdout, lager.DEBUG))

	depot, err := filepath.Abs(*containersDir)
	if err != nil {
		logger.Fatal("failed-to-determine-depot-dir", err)
	}

	backend := houdini.NewBackend(depot)

	gardenServer := server.New(*listenNetwork, *listenAddr, *containerGraceTime, backend, logger)

	err = gardenServer.Start()
	if err != nil {
		logger.Fatal("failed-to-start-server", err)
	}

	logger.Info("started", lager.Data{
		"network": *listenNetwork,
		"addr":    *listenAddr,
	})

	signals := make(chan os.Signal, 1)

	go func() {
		<-signals
		gardenServer.Stop()
		os.Exit(0)
	}()

	signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP)

	debugListenAddr := fmt.Sprintf("%s:%d", *debugListenAddress, *debugListenPort)

	err = http.ListenAndServe(debugListenAddr, nil)
	if err != nil {
		logger.Fatal("failed-to-start-debug-server", err)
	}

	select {}
}
예제 #6
0
파일: main.go 프로젝트: digideskio/guardian
func main() {
	if reexec.Init() {
		return
	}

	cf_debug_server.AddFlags(flag.CommandLine)
	cf_lager.AddFlags(flag.CommandLine)
	flag.Parse()

	logger, _ := cf_lager.New("guardian")

	if *depotPath == "" {
		missing("-depot")
	}

	if *iodaemonBin == "" {
		missing("-iodaemonBin")
	}

	if *nstarBin == "" {
		missing("-nstarBin")
	}

	if *tarBin == "" {
		missing("-tarBin")
	}

	resolvedRootFSPath, err := filepath.EvalSymlinks(*rootFSPath)
	if err != nil {
		panic(err)
	}

	_, networkPoolCIDR, err := net.ParseCIDR(*networkPool)
	if err != nil {
		panic(err)
	}

	interfacePrefix := fmt.Sprintf("w%s", *tag)
	chainPrefix := fmt.Sprintf("w-%s-instance", *tag)
	iptablesMgr := wireIptables(logger, *tag, *allowHostAccess, interfacePrefix, chainPrefix)
	externalIPAddr, err := parseExternalIP(*externalIP)
	if err != nil {
		panic(err)
	}

	sysInfoProvider := sysinfo.NewProvider(*depotPath)

	propManager := properties.NewManager()

	backend := &gardener.Gardener{
		SysInfoProvider: sysInfoProvider,
		UidGenerator:    wireUidGenerator(),
		Starter:         wireStarter(logger, iptablesMgr),
		Networker:       wireNetworker(logger, *tag, networkPoolCIDR, externalIPAddr, iptablesMgr, interfacePrefix, chainPrefix, propManager, *networkModulePath),
		VolumeCreator:   wireVolumeCreator(logger, *graphRoot),
		Containerizer:   wireContainerizer(logger, *depotPath, *iodaemonBin, *nstarBin, *tarBin, resolvedRootFSPath),
		Logger:          logger,
		PropertyManager: propManager,
	}

	gardenServer := server.New(*listenNetwork, *listenAddr, *graceTime, backend, logger.Session("api"))

	err = gardenServer.Start()
	if err != nil {
		logger.Fatal("failed-to-start-server", err)
	}

	signals := make(chan os.Signal, 1)

	go func() {
		<-signals
		gardenServer.Stop()
		os.Exit(0)
	}()

	signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP)

	logger.Info("started", lager.Data{
		"network": *listenNetwork,
		"addr":    *listenAddr,
	})

	select {}
}
예제 #7
0
		workerServer          *server.GardenServer
		provider              WorkerProvider

		workers    []Worker
		workersErr error
	)

	BeforeEach(func() {
		fakeDB = new(fakes.FakeWorkerDB)
		logger = lagertest.NewTestLogger("test")
		worker = new(gfakes.FakeBackend)

		workerAddr = fmt.Sprintf("0.0.0.0:%d", 8888+GinkgoParallelNode())
		workerBaggageclaimURL = "http://1.2.3.4:7788"

		workerServer = server.New("tcp", workerAddr, 0, worker, logger)
		err := workerServer.Start()
		Expect(err).NotTo(HaveOccurred())

		provider = NewDBWorkerProvider(logger, fakeDB, nil, immediateRetryPolicy{})
	})

	AfterEach(func() {
		workerServer.Stop()

		Eventually(func() error {
			conn, err := net.Dial("tcp", workerAddr)
			if err == nil {
				conn.Close()
			}
예제 #8
0
	BeforeEach(func() {
		logger = lagertest.NewTestLogger("test")

		var err error
		tmpdir, err = ioutil.TempDir(os.TempDir(), "api-server-test")
		Ω(err).ShouldNot(HaveOccurred())

		socketPath = path.Join(tmpdir, "api.sock")
		serverBackend = new(fakes.FakeBackend)
		serverContainerGraceTime = 42 * time.Second

		apiServer = server.New(
			"unix",
			socketPath,
			serverContainerGraceTime,
			serverBackend,
			logger,
		)

		err = apiServer.Start()
		Ω(err).ShouldNot(HaveOccurred())

		isRunning = true

		Eventually(ErrorDialing("unix", socketPath)).ShouldNot(HaveOccurred())

		apiClient = client.New(connection.New("unix", socketPath))
	})

	AfterEach(func() {
예제 #9
0
파일: main.go 프로젝트: timani/garden-linux
func main() {
	if reexec.Init() {
		return
	}

	var insecureRegistries vars.StringList
	flag.Var(
		&insecureRegistries,
		"insecureDockerRegistry",
		"Docker registry to allow connecting to even if not secure. (Can be specified multiple times to allow insecure connection to multiple repositories)",
	)

	var persistentImages vars.StringList
	flag.Var(
		&persistentImages,
		"persistentImage",
		"Image which should never be garbage collected. (Can be specified multiple times)",
	)

	var dnsServers vars.StringList
	flag.Var(
		&dnsServers,
		"dnsServer",
		"DNS server IP address to use instead of automatically determined servers. (Can be specified multiple times)",
	)

	cf_debug_server.AddFlags(flag.CommandLine)
	cf_lager.AddFlags(flag.CommandLine)
	flag.Parse()

	runtime.GOMAXPROCS(runtime.NumCPU())

	logger, reconfigurableSink := cf_lager.New("garden-linux")
	initializeDropsonde(logger)

	if *binPath == "" {
		missing("-bin")
	}

	if *stateDirPath == "" {
		missing("-stateDir")
	}

	if *depotPath == "" {
		missing("-depot")
	}

	if len(*tag) > 2 {
		println("-tag parameter must be less than 3 characters long")
		println()
		flag.Usage()
		return
	}

	_, dynamicRange, err := net.ParseCIDR(*networkPool)
	if err != nil {
		logger.Fatal("failed-to-parse-network-pool", err)
	}

	subnetPool, err := subnets.NewSubnets(dynamicRange)
	if err != nil {
		logger.Fatal("failed-to-create-subnet-pool", err)
	}

	portPoolState, err := port_pool.LoadState(path.Join(*stateDirPath, "port_pool.json"))
	if err != nil {
		logger.Error("failed-to-parse-pool-state", err)
	}

	// TODO: use /proc/sys/net/ipv4/ip_local_port_range by default (end + 1)
	portPool, err := port_pool.New(uint32(*portPoolStart), uint32(*portPoolSize), portPoolState)
	if err != nil {
		logger.Fatal("invalid pool range", err)
	}

	useKernelLogging := true
	switch *iptablesLogMethod {
	case "nflog":
		useKernelLogging = false
	case "kernel":
		/* noop */
	default:
		println("-iptablesLogMethod value not recognized")
		println()
		flag.Usage()
		return
	}

	config := sysconfig.NewConfig(*tag, *allowHostAccess, dnsServers.List)

	runner := sysconfig.NewRunner(config, linux_command_runner.New())

	if err := os.MkdirAll(*graphRoot, 0755); err != nil {
		logger.Fatal("failed-to-create-graph-directory", err)
	}

	dockerGraphDriver, err := selectGraphDriver(logger, *graphDriverName, *graphRoot)
	if err != nil {
		logger.Fatal("failed-to-construct-graph-driver", err)
	}

	backingStoresPath := filepath.Join(*graphRoot, "backing_stores")
	if err := os.MkdirAll(backingStoresPath, 0660); err != nil {
		logger.Fatal("failed-to-mkdir-backing-stores", err)
	}

	quotaedGraphDriver := &quotaed_aufs.QuotaedDriver{
		GraphDriver: dockerGraphDriver,
		Unmount:     quotaed_aufs.Unmount,
		BackingStoreMgr: &quotaed_aufs.BackingStore{
			RootPath: backingStoresPath,
			Logger:   logger.Session("backing-store-mgr"),
		},
		LoopMounter: &quotaed_aufs.Loop{
			Retrier: retrier.New(retrier.ConstantBackoff(200, 500*time.Millisecond), nil),
			Logger:  logger.Session("loop-mounter"),
		},
		Retrier:  retrier.New(retrier.ConstantBackoff(200, 500*time.Millisecond), nil),
		RootPath: *graphRoot,
		Logger:   logger.Session("quotaed-driver"),
	}

	metricsProvider := metrics.NewMetrics(logger, backingStoresPath, *depotPath)

	if dbgAddr := cf_debug_server.DebugAddress(flag.CommandLine); dbgAddr != "" {
		metrics.StartDebugServer(dbgAddr, reconfigurableSink, metricsProvider)
	}

	dockerGraph, err := graph.NewGraph(*graphRoot, quotaedGraphDriver)
	if err != nil {
		logger.Fatal("failed-to-construct-graph", err)
	}

	var cake layercake.Cake = &layercake.Docker{
		Graph:  dockerGraph,
		Driver: quotaedGraphDriver,
	}

	if cake.DriverName() == "aufs" {
		cake = &layercake.AufsCake{
			Cake:      cake,
			Runner:    runner,
			GraphRoot: *graphRoot,
		}
	}

	repo := container_repository.New()
	retainer := cleaner.NewRetainer()

	repoFetcher := &repository_fetcher.Retryable{
		RepositoryFetcher: &repository_fetcher.CompositeFetcher{
			LocalFetcher: &repository_fetcher.Local{
				Cake:              cake,
				DefaultRootFSPath: *rootFSPath,
				IDProvider:        repository_fetcher.LayerIDProvider{},
			},
			RemoteFetcher: repository_fetcher.NewRemote(
				logger,
				*dockerRegistry,
				cake,
				distclient.NewDialer(insecureRegistries.List),
				repository_fetcher.VerifyFunc(repository_fetcher.Verify),
			),
		},
		Logger: logger,
	}

	maxId := uint32(sysinfo.Min(sysinfo.MustGetMaxValidUID(), sysinfo.MustGetMaxValidGID()))
	mappingList := rootfs_provider.MappingList{
		{
			ContainerID: 0,
			HostID:      maxId,
			Size:        1,
		},
		{
			ContainerID: 1,
			HostID:      1,
			Size:        maxId - 1,
		},
	}

	rootFSNamespacer := &rootfs_provider.UidNamespacer{
		Logger: logger,
		Translator: rootfs_provider.NewUidTranslator(
			mappingList, // uid
			mappingList, // gid
		),
	}

	cleaner := cleaner.NewOvenCleaner(
		retainer,
		cleaner.NewThreshold(int64(*graphCleanupThreshold)*1024*1024),
	)

	layerCreator := rootfs_provider.NewLayerCreator(cake, rootfs_provider.SimpleVolumeCreator{}, rootFSNamespacer)
	cakeOrdinator := rootfs_provider.NewCakeOrdinator(cake, repoFetcher, layerCreator, nil, cleaner)

	imageRetainer := &repository_fetcher.ImageRetainer{
		GraphRetainer:             retainer,
		DirectoryRootfsIDProvider: repository_fetcher.LayerIDProvider{},
		DockerImageIDFetcher:      repoFetcher,

		NamespaceCacheKey: rootFSNamespacer.CacheKey(),
		Logger:            logger,
	}

	// spawn off in a go function to avoid blocking startup
	// worst case is if an image is immediately created and deleted faster than
	// we can retain it we'll garbage collect it when we shouldn't. This
	// is an OK trade-off for not having garden startup block on dockerhub.
	go imageRetainer.Retain(persistentImages.List)

	rootfsCleaner := &linux_backend.RootFSCleaner{
		FilePaths: []string{
			"/tmp", "/proc", "/sys", "/dev", "/etc", "/etc/config", "/etc/hostname",
			"/etc/hosts", "/etc/resolv.conf",
		},
	}

	if *externalIP == "" {
		ip, err := localip.LocalIP()
		if err != nil {
			panic("couldn't determine local IP to use for -externalIP parameter. You can use the -externalIP flag to pass an external IP")
		}

		externalIP = &ip
	}

	parsedExternalIP := net.ParseIP(*externalIP)
	if parsedExternalIP == nil {
		panic(fmt.Sprintf("Value of -externalIP %s could not be converted to an IP", *externalIP))
	}

	var quotaManager linux_container.QuotaManager = &quota_manager.AUFSQuotaManager{
		BaseSizer: quota_manager.NewAUFSBaseSizer(cake),
		DiffSizer: &quota_manager.AUFSDiffSizer{quotaedGraphDriver},
	}

	ipTablesMgr := createIPTablesManager(config, runner, logger)
	injector := &provider{
		useKernelLogging: useKernelLogging,
		chainPrefix:      config.IPTables.Filter.InstancePrefix,
		runner:           runner,
		log:              logger,
		portPool:         portPool,
		ipTablesMgr:      ipTablesMgr,
		sysconfig:        config,
		quotaManager:     quotaManager,
	}

	currentContainerVersion, err := semver.Make(CurrentContainerVersion)
	if err != nil {
		logger.Fatal("failed-to-parse-container-version", err)
	}

	pool := resource_pool.New(
		logger,
		*binPath,
		*depotPath,
		config,
		cakeOrdinator,
		rootfsCleaner,
		mappingList,
		parsedExternalIP,
		*mtu,
		subnetPool,
		bridgemgr.New("w"+config.Tag+"b-", &devices.Bridge{}, &devices.Link{}),
		ipTablesMgr,
		injector,
		iptables.NewGlobalChain(config.IPTables.Filter.DefaultChain, runner, logger.Session("global-chain")),
		portPool,
		strings.Split(*denyNetworks, ","),
		strings.Split(*allowNetworks, ","),
		runner,
		quotaManager,
		currentContainerVersion,
		system.MkdirChowner{},
	)

	systemInfo := sysinfo.NewProvider(*depotPath)

	backend := linux_backend.New(logger, pool, repo, injector, systemInfo, layercake.GraphPath(*graphRoot), *snapshotsPath, int(*maxContainers))

	err = backend.Setup()
	if err != nil {
		logger.Fatal("failed-to-set-up-backend", err)
	}

	graceTime := *containerGraceTime

	gardenServer := server.New(*listenNetwork, *listenAddr, graceTime, backend, logger)

	err = gardenServer.Start()
	if err != nil {
		logger.Fatal("failed-to-start-server", err)
	}

	clock := clock.NewClock()
	metronNotifier := metrics.NewPeriodicMetronNotifier(logger, metricsProvider, *metricsEmissionInterval, clock)
	metronNotifier.Start()

	signals := make(chan os.Signal, 1)

	go func() {
		<-signals

		portPoolState = portPool.RefreshState()
		port_pool.SaveState(path.Join(*stateDirPath, "port_pool.json"), portPoolState)

		gardenServer.Stop()
		metronNotifier.Stop()

		os.Exit(0)
	}()

	signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP)

	logger.Info("started", lager.Data{
		"network": *listenNetwork,
		"addr":    *listenAddr,
	})

	select {}
}
예제 #10
0
파일: main.go 프로젝트: vito/warden-docker
func main() {
	flag.Parse()

	maxProcs := runtime.NumCPU()
	prevMaxProcs := runtime.GOMAXPROCS(maxProcs)

	log.Println("set GOMAXPROCS to", maxProcs, "was", prevMaxProcs)

	if *binPath == "" {
		log.Fatalln("must specify -bin with linux backend")
	}

	if *depotPath == "" {
		log.Fatalln("must specify -depot with linux backend")
	}

	if *rootFSPath == "" {
		log.Fatalln("must specify -rootfs with linux backend")
	}

	uidPool := uid_pool.New(uint32(*uidPoolStart), uint32(*uidPoolSize))

	_, ipNet, err := net.ParseCIDR(*networkPool)
	if err != nil {
		log.Fatalln("error parsing CIDR:", err)
	}

	networkPool := network_pool.New(ipNet)

	// TODO: use /proc/sys/net/ipv4/ip_local_port_range by default (end + 1)
	portPool := port_pool.New(uint32(*portPoolStart), uint32(*portPoolSize))

	runner := linux_command_runner.New(*debug)

	quotaManager, err := quota_manager.New(*depotPath, *binPath, runner)
	if err != nil {
		log.Fatalln("error creating quota manager:", err)
	}

	if *disableQuotas {
		quotaManager.Disable()
	}

	graphDriver, err := graphdriver.New(*graphRoot)
	if err != nil {
		log.Fatalln("error constructing graph driver:", err)
	}

	graph, err := graph.NewGraph(*graphRoot, graphDriver)
	if err != nil {
		log.Fatalln("error constructing graph:", err)
	}

	reg, err := registry.NewRegistry(nil, nil, *dockerRegistry)
	if err != nil {
		log.Fatalln(err)
	}

	pool := container_pool.New(
		*binPath,
		*depotPath,
		*rootFSPath,
		repository_fetcher.Retryable{repository_fetcher.New(reg, graph)},
		graphDriver,
		uidPool,
		networkPool,
		portPool,
		strings.Split(*denyNetworks, ","),
		strings.Split(*allowNetworks, ","),
		runner,
		quotaManager,
	)

	systemInfo := system_info.NewProvider(*depotPath)

	backend := linux_backend.New(pool, systemInfo, *snapshotsPath)

	log.Println("setting up backend")

	err = backend.Setup()
	if err != nil {
		log.Fatalln("failed to set up backend:", err)
	}

	log.Println("starting server; listening with", *listenNetwork, "on", *listenAddr)

	graceTime := *containerGraceTime

	wardenServer := server.New(*listenNetwork, *listenAddr, graceTime, backend)

	err = wardenServer.Start()
	if err != nil {
		log.Fatalln("failed to start:", err)
	}

	signals := make(chan os.Signal, 1)

	go func() {
		<-signals
		log.Println("stopping...")
		wardenServer.Stop()
		os.Exit(0)
	}()

	signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP)

	select {}
}
예제 #11
0
		workers    []Worker
		workersErr error
	)

	BeforeEach(func() {
		fakeDB = new(fakes.FakeWorkerDB)

		logger = lagertest.NewTestLogger("test")

		workerA = new(gfakes.FakeBackend)
		workerB = new(gfakes.FakeBackend)

		workerAAddr = fmt.Sprintf("0.0.0.0:%d", 8888+GinkgoParallelNode())
		workerBAddr = fmt.Sprintf("0.0.0.0:%d", 9999+GinkgoParallelNode())

		workerAServer = server.New("tcp", workerAAddr, 0, workerA, logger)
		workerBServer = server.New("tcp", workerBAddr, 0, workerB, logger)

		err := workerAServer.Start()
		Ω(err).ShouldNot(HaveOccurred())

		err = workerBServer.Start()
		Ω(err).ShouldNot(HaveOccurred())

		provider = NewDBWorkerProvider(fakeDB, logger)
	})

	JustBeforeEach(func() {
		workers, workersErr = provider.Workers()
	})
예제 #12
0
파일: main_test.go 프로젝트: savaki/tsa
			userKnownHostsFile string

			userKey    string
			userKeyPub string
		)

		BeforeEach(func() {
			tsaPort = 9800 + GinkgoParallelNode()

			gardenPort := 9001 + GinkgoParallelNode()
			gardenAddr = fmt.Sprintf("127.0.0.1:%d", gardenPort)

			fakeBackend = new(gfakes.FakeBackend)

			gardenServer = gserver.New("tcp", gardenAddr, 0, fakeBackend, lagertest.NewTestLogger("garden"))
			err := gardenServer.Start()
			Ω(err).ShouldNot(HaveOccurred())

			atcServer = ghttp.NewServer()

			hostKey, hostKeyPub = generateSSHKeypair()

			userKnownHosts, err := ioutil.TempFile("", "known-hosts")
			Ω(err).ShouldNot(HaveOccurred())

			defer userKnownHosts.Close()

			userKnownHostsFile = userKnownHosts.Name()

			_, err = fmt.Fprintf(userKnownHosts, "[127.0.0.1]:%d ", tsaPort)
예제 #13
0
파일: main.go 프로젝트: julz/guardian
func main() {
	depotDir := flag.String(
		"depot",
		"/var/vcap/data/garden-runc/depot",
		"the depot directory to store containers in",
	)

	listenNetwork := flag.String(
		"listenNetwork",
		"tcp",
		"how to listen on the address (unix, tcp, etc.)",
	)

	listenAddr := flag.String(
		"listenAddr",
		"0.0.0.0:7777",
		"address to listen on",
	)

	containerGraceTime := flag.Duration(
		"containerGraceTime",
		0,
		"time after which to destroy idle containers",
	)

	cf_lager.AddFlags(flag.CommandLine)
	flag.Parse()

	logger, _ := cf_lager.New("garden-runc")
	runner := &logging.Runner{
		CommandRunner: linux_command_runner.New(),
		Logger:        logger,
	}

	iodaemonBin, err := gexec.Build("github.com/cloudfoundry-incubator/garden-linux/iodaemon/cmd/iodaemon")
	if err != nil {
		panic(err)
	}

	gdnr := &gardener.Gardener{
		Volumizer: &gardenshed.Shed{},
		Containerizer: &rundmc.Containerizer{
			Repo: rundmc.Depot{
				Dir: *depotDir,
				ActualContainerProvider: &rundmc.RuncContainerFactory{
					Tracker: process_tracker.New("/tmp", iodaemonBin, runner),
				},
			},
		},
	}

	server := server.New(*listenNetwork, *listenAddr, *containerGraceTime, gdnr, logger)
	if err := server.Start(); err != nil {
		logger.Fatal("failed-to-start-server", err)
	}

	logger.Info("started", lager.Data{
		"network": *listenNetwork,
		"addr":    *listenAddr,
	})

	signals := make(chan os.Signal, 1)

	go func() {
		<-signals
		server.Stop()
		os.Exit(0)
	}()

	signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP)

	select {}
}
예제 #14
0
	protocol "github.com/cloudfoundry-incubator/garden/protocol"
	"github.com/cloudfoundry-incubator/garden/server"
	"github.com/cloudfoundry-incubator/garden/transport"
	"github.com/cloudfoundry-incubator/garden/warden"
	"github.com/cloudfoundry-incubator/garden/warden/fake_backend"
)

var _ = Describe("The Warden server", func() {
	Context("when passed a socket", func() {
		It("listens on the given socket path and chmods it to 0777", func() {
			tmpdir, err := ioutil.TempDir(os.TempDir(), "warden-server-test")
			Expect(err).ToNot(HaveOccurred())

			socketPath := path.Join(tmpdir, "warden.sock")

			wardenServer := server.New("unix", socketPath, 0, fake_backend.New())

			err = wardenServer.Start()
			Expect(err).ToNot(HaveOccurred())

			Eventually(ErrorDialing("unix", socketPath)).ShouldNot(HaveOccurred())

			stat, err := os.Stat(socketPath)
			Expect(err).ToNot(HaveOccurred())

			Expect(int(stat.Mode() & 0777)).To(Equal(0777))
		})

		It("deletes the socket file if it is already there", func() {
			tmpdir, err := ioutil.TempDir(os.TempDir(), "warden-server-test")
			Expect(err).ToNot(HaveOccurred())
예제 #15
0
	AfterEach(func() {
		if tmpdir != "" {
			os.RemoveAll(tmpdir)
		}
	})

	Context("when passed a socket", func() {
		It("listens on the given socket path and chmods it to 0777", func() {
			var err error
			tmpdir, err = ioutil.TempDir(os.TempDir(), "api-server-test")
			Ω(err).ShouldNot(HaveOccurred())

			socketPath := path.Join(tmpdir, "api.sock")

			apiServer := server.New("unix", socketPath, 0, new(fakes.FakeBackend), logger)

			err = apiServer.Start()
			Ω(err).ShouldNot(HaveOccurred())

			Eventually(ErrorDialing("unix", socketPath)).ShouldNot(HaveOccurred())

			stat, err := os.Stat(socketPath)
			Ω(err).ShouldNot(HaveOccurred())

			Ω(int(stat.Mode() & 0777)).Should(Equal(0777))
		})

		It("deletes the socket file if it is already there", func() {
			var err error
			tmpdir, err = ioutil.TempDir(os.TempDir(), "api-server-test")
예제 #16
0
func main() {
	if reexec.Init() {
		return
	}

	cf_debug_server.AddFlags(flag.CommandLine)
	cf_lager.AddFlags(flag.CommandLine)
	flag.Parse()

	runtime.GOMAXPROCS(runtime.NumCPU())

	logger, reconfigurableSink := cf_lager.New("garden-linux")
	if dbgAddr := cf_debug_server.DebugAddress(flag.CommandLine); dbgAddr != "" {
		debug.Run(dbgAddr, reconfigurableSink)
	}

	initializeDropsonde(logger)

	if *binPath == "" {
		missing("-bin")
	}

	if *depotPath == "" {
		missing("-depot")
	}

	if len(*tag) > 2 {
		println("-tag parameter must be less than 3 characters long")
		println()
		flag.Usage()
		return
	}

	_, dynamicRange, err := net.ParseCIDR(*networkPool)
	if err != nil {
		logger.Fatal("failed-to-parse-network-pool", err)
	}

	subnetPool, err := subnets.NewSubnets(dynamicRange)
	if err != nil {
		logger.Fatal("failed-to-create-subnet-pool", err)
	}

	// TODO: use /proc/sys/net/ipv4/ip_local_port_range by default (end + 1)
	portPool, err := port_pool.New(uint32(*portPoolStart), uint32(*portPoolSize))
	if err != nil {
		logger.Fatal("invalid pool range", err)
	}

	useKernelLogging := true
	switch *iptablesLogMethod {
	case "nflog":
		useKernelLogging = false
	case "kernel":
		/* noop */
	default:
		println("-iptablesLogMethod value not recognized")
		println()
		flag.Usage()
		return
	}

	config := sysconfig.NewConfig(*tag, *allowHostAccess)

	runner := sysconfig.NewRunner(config, linux_command_runner.New())

	if err := os.MkdirAll(*graphRoot, 0755); err != nil {
		logger.Fatal("failed-to-create-graph-directory", err)
	}

	dockerGraphDriver, err := graphdriver.New(*graphRoot, nil)
	if err != nil {
		logger.Fatal("failed-to-construct-graph-driver", err)
	}

	dockerGraph, err := graph.NewGraph(*graphRoot, dockerGraphDriver)
	if err != nil {
		logger.Fatal("failed-to-construct-graph", err)
	}

	graphMountPoint := mountPoint(logger, *graphRoot)

	var cake layercake.Cake = &layercake.Docker{
		Graph:  dockerGraph,
		Driver: dockerGraphDriver,
	}

	if cake.DriverName() == "btrfs" {
		cake = &layercake.BtrfsCleaningCake{
			Cake:            cake,
			Runner:          runner,
			BtrfsMountPoint: graphMountPoint,
			RemoveAll:       os.RemoveAll,
			Logger:          logger.Session("btrfs-cleanup"),
		}
	}

	retainer := layercake.NewRetainer()
	cake = &layercake.OvenCleaner{
		Cake:     cake,
		Retainer: retainer,
		Logger:   logger.Session("oven-cleaner"),
	}

	lock := repository_fetcher.NewFetchLock()
	repoFetcher := repository_fetcher.Retryable{
		repository_fetcher.NewRemote(
			repository_fetcher.NewRepositoryProvider(
				*dockerRegistry,
				strings.Split(*insecureRegistries, ","),
			),
			cake,
			map[registry.APIVersion]repository_fetcher.VersionedFetcher{
				registry.APIVersion1: &repository_fetcher.RemoteV1Fetcher{
					Cake:      cake,
					Retainer:  retainer,
					GraphLock: lock,
				},
				registry.APIVersion2: &repository_fetcher.RemoteV2Fetcher{
					Cake:      cake,
					Retainer:  retainer,
					GraphLock: lock,
				},
			},
			repository_fetcher.EndpointPinger{},
		),
	}

	maxId := sysinfo.Min(sysinfo.MustGetMaxValidUID(), sysinfo.MustGetMaxValidGID())
	mappingList := rootfs_provider.MappingList{
		{
			FromID: 0,
			ToID:   maxId,
			Size:   1,
		},
		{
			FromID: 1,
			ToID:   1,
			Size:   maxId - 1,
		},
	}

	rootFSNamespacer := &rootfs_provider.UidNamespacer{
		Logger: logger,
		Translator: rootfs_provider.NewUidTranslator(
			mappingList, // uid
			mappingList, // gid
		),
	}

	remoteRootFSProvider, err := rootfs_provider.NewDocker(fmt.Sprintf("docker-remote-%s", cake.DriverName()),
		repoFetcher, cake, rootfs_provider.SimpleVolumeCreator{}, rootFSNamespacer, clock.NewClock())
	if err != nil {
		logger.Fatal("failed-to-construct-docker-rootfs-provider", err)
	}

	localRootFSProvider, err := rootfs_provider.NewDocker(fmt.Sprintf("docker-local-%s", cake.DriverName()),
		&repository_fetcher.Local{
			Cake:              cake,
			DefaultRootFSPath: *rootFSPath,
			IDProvider:        repository_fetcher.LayerIDProvider{},
		}, cake, rootfs_provider.SimpleVolumeCreator{}, rootFSNamespacer, clock.NewClock())
	if err != nil {
		logger.Fatal("failed-to-construct-warden-rootfs-provider", err)
	}

	rootFSProviders := map[string]rootfs_provider.RootFSProvider{
		"":       localRootFSProvider,
		"docker": remoteRootFSProvider,
	}

	if *externalIP == "" {
		ip, err := localip.LocalIP()
		if err != nil {
			panic("couldn't determine local IP to use for -externalIP parameter. You can use the -externalIP flag to pass an external IP")
		}

		externalIP = &ip
	}

	parsedExternalIP := net.ParseIP(*externalIP)
	if parsedExternalIP == nil {
		panic(fmt.Sprintf("Value of -externalIP %s could not be converted to an IP", *externalIP))
	}

	var quotaManager linux_container.QuotaManager = quota_manager.DisabledQuotaManager{}
	if !*disableQuotas {
		quotaManager = &quota_manager.BtrfsQuotaManager{
			Runner:     runner,
			MountPoint: graphMountPoint,
		}
	}

	injector := &provider{
		useKernelLogging: useKernelLogging,
		chainPrefix:      config.IPTables.Filter.InstancePrefix,
		runner:           runner,
		log:              logger,
		portPool:         portPool,
		sysconfig:        config,
		quotaManager:     quotaManager,
	}

	currentContainerVersion, err := semver.Make(CurrentContainerVersion)
	if err != nil {
		logger.Fatal("failed-to-parse-container-version", err)
	}

	pool := resource_pool.New(
		logger,
		*binPath,
		*depotPath,
		config,
		rootFSProviders,
		cake,
		mappingList,
		parsedExternalIP,
		*mtu,
		subnetPool,
		bridgemgr.New("w"+config.Tag+"b-", &devices.Bridge{}, &devices.Link{}),
		injector,
		iptables.NewGlobalChain(config.IPTables.Filter.DefaultChain, runner, logger.Session("global-chain")),
		portPool,
		strings.Split(*denyNetworks, ","),
		strings.Split(*allowNetworks, ","),
		runner,
		quotaManager,
		currentContainerVersion,
	)

	systemInfo := sysinfo.NewProvider(*depotPath)

	backend := linux_backend.New(logger, pool, container_repository.New(), injector, systemInfo, *snapshotsPath, int(*maxContainers))

	err = backend.Setup()
	if err != nil {
		logger.Fatal("failed-to-set-up-backend", err)
	}

	graceTime := *containerGraceTime

	gardenServer := server.New(*listenNetwork, *listenAddr, graceTime, backend, logger)

	err = gardenServer.Start()
	if err != nil {
		logger.Fatal("failed-to-start-server", err)
	}

	signals := make(chan os.Signal, 1)

	go func() {
		<-signals
		gardenServer.Stop()
		os.Exit(0)
	}()

	signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP)

	logger.Info("started", lager.Data{
		"network": *listenNetwork,
		"addr":    *listenAddr,
	})

	select {}
}