예제 #1
0
func main() {
	flag.Parse()
	util.InitLogs()
	defer util.FlushLogs()
	rand.Seed(time.Now().UTC().UnixNano())

	// Set up logger for etcd client
	etcd.SetLogger(util.NewLogger("etcd "))

	endpoint := "unix:///var/run/docker.sock"
	dockerClient, err := docker.NewClient(endpoint)
	if err != nil {
		glog.Fatal("Couldn't connnect to docker.")
	}

	hostname := []byte(*hostnameOverride)
	if string(hostname) == "" {
		// Note: We use exec here instead of os.Hostname() because we
		// want the FQDN, and this is the easiest way to get it.
		hostname, err = exec.Command("hostname", "-f").Output()
		if err != nil {
			glog.Fatalf("Couldn't determine hostname: %v", err)
		}
	}

	my_kubelet := kubelet.Kubelet{
		Hostname:           string(hostname),
		DockerClient:       dockerClient,
		FileCheckFrequency: *fileCheckFrequency,
		SyncFrequency:      *syncFrequency,
		HTTPCheckFrequency: *httpCheckFrequency,
	}
	my_kubelet.RunKubelet(*config, *manifestUrl, *etcdServers, *address, *port)
}
예제 #2
0
func main() {
	flag.Parse()
	rand.Seed(time.Now().UTC().UnixNano())

	// Set up logger for etcd client
	etcd.SetLogger(log.New(os.Stderr, "etcd ", log.LstdFlags))

	endpoint := "unix:///var/run/docker.sock"
	dockerClient, err := docker.NewClient(endpoint)
	if err != nil {
		log.Fatal("Couldn't connnect to docker.")
	}

	hostname := []byte(*hostnameOverride)
	if string(hostname) == "" {
		hostname, err = exec.Command("hostname", "-f").Output()
		if err != nil {
			log.Fatalf("Couldn't determine hostname: %v", err)
		}
	}

	my_kubelet := kubelet.Kubelet{
		Hostname:           string(hostname),
		DockerClient:       dockerClient,
		FileCheckFrequency: *fileCheckFrequency,
		SyncFrequency:      *syncFrequency,
		HTTPCheckFrequency: *httpCheckFrequency,
	}
	my_kubelet.RunKubelet(*file, *manifestUrl, *etcdServers, *address, *port)
}
예제 #3
0
func main() {
	flag.Parse()
	util.InitLogs()
	defer util.FlushLogs()

	// Set up logger for etcd client
	etcd.SetLogger(util.NewLogger("etcd "))

	glog.Infof("Using configuration file %s and etcd_servers %s", *config_file, *etcd_servers)

	proxyConfig := config.NewServiceConfig()

	// Create a configuration source that handles configuration from etcd.
	etcdClient := etcd.NewClient([]string{*etcd_servers})
	config.NewConfigSourceEtcd(etcdClient,
		proxyConfig.GetServiceConfigurationChannel("etcd"),
		proxyConfig.GetEndpointsConfigurationChannel("etcd"))

	// And create a configuration source that reads from a local file
	config.NewConfigSourceFile(*config_file,
		proxyConfig.GetServiceConfigurationChannel("file"),
		proxyConfig.GetEndpointsConfigurationChannel("file"))

	loadBalancer := proxy.NewLoadBalancerRR()
	proxier := proxy.NewProxier(loadBalancer)
	// Wire proxier to handle changes to services
	proxyConfig.RegisterServiceHandler(proxier)
	// And wire loadBalancer to handle changes to endpoints to services
	proxyConfig.RegisterEndpointsHandler(loadBalancer)

	// Just loop forever for now...
	select {}

}
예제 #4
0
파일: proxy.go 프로젝트: nvdnkpr/kubernetes
func main() {
	flag.Parse()
	util.InitLogs()
	defer util.FlushLogs()

	verflag.PrintAndExitIfRequested()

	serviceConfig := config.NewServiceConfig()
	endpointsConfig := config.NewEndpointsConfig()

	// define api config source
	if *master != "" {
		glog.Infof("Using api calls to get config %v", *master)
		//TODO: add auth info
		client, err := client.New(*master, nil)
		if err != nil {
			glog.Fatalf("Invalid -master: %v", err)
		}
		config.NewSourceAPI(
			client,
			30*time.Second,
			serviceConfig.Channel("api"),
			endpointsConfig.Channel("api"),
		)
	}

	// Create a configuration source that handles configuration from etcd.
	if len(etcdServerList) > 0 && *master == "" {
		glog.Infof("Using etcd servers %v", etcdServerList)

		// Set up logger for etcd client
		etcd.SetLogger(util.NewLogger("etcd "))
		etcdClient := etcd.NewClient(etcdServerList)
		config.NewConfigSourceEtcd(etcdClient,
			serviceConfig.Channel("etcd"),
			endpointsConfig.Channel("etcd"))
	}

	// And create a configuration source that reads from a local file
	config.NewConfigSourceFile(*configFile,
		serviceConfig.Channel("file"),
		endpointsConfig.Channel("file"))
	glog.Infof("Using configuration file %s", *configFile)

	loadBalancer := proxy.NewLoadBalancerRR()
	proxier := proxy.NewProxier(loadBalancer)
	// Wire proxier to handle changes to services
	serviceConfig.RegisterHandler(proxier)
	// And wire loadBalancer to handle changes to endpoints to services
	endpointsConfig.RegisterHandler(loadBalancer)

	// Just loop forever for now...
	select {}
}
예제 #5
0
func main() {
	flag.Parse()

	// Set up logger for etcd client
	etcd.SetLogger(log.New(os.Stderr, "etcd ", log.LstdFlags))

	go api_server()
	go fake_kubelet()
	go controller_manager()

	log.Printf("All components started.\nMaster running at: http://%s:%d\nKubelet running at: http://%s:%d\n",
		*master_address, *master_port,
		*kubelet_address, *kubelet_port)
	select {}
}
예제 #6
0
// New returns a new SkyDNS server.
func New(backend Backend, config *Config) *server {
	if config.Verbose {
		log.Print("set go-etcd logger output to os.Stdout")
		etcd.SetLogger(log.New(os.Stdout, "go-etcd: ", log.LstdFlags))
	}

	return &server{
		backend: backend,
		config:  config,

		group:        new(sync.WaitGroup),
		scache:       cache.New(config.SCache, 0),
		rcache:       cache.New(config.RCache, config.RCacheTtl),
		dnsUDPclient: &dns.Client{Net: "udp", ReadTimeout: 2 * config.ReadTimeout, WriteTimeout: 2 * config.ReadTimeout, SingleInflight: true},
		dnsTCPclient: &dns.Client{Net: "tcp", ReadTimeout: 2 * config.ReadTimeout, WriteTimeout: 2 * config.ReadTimeout, SingleInflight: true},
	}
}
예제 #7
0
func main() {
	flag.Parse()

	if len(*etcd_servers) == 0 || len(*master) == 0 {
		log.Fatal("usage: controller-manager -etcd_servers <servers> -master <master>")
	}

	// Set up logger for etcd client
	etcd.SetLogger(log.New(os.Stderr, "etcd ", log.LstdFlags))

	controllerManager := controller.MakeReplicationManager(etcd.NewClient([]string{*etcd_servers}),
		client.Client{
			Host: "http://" + *master,
		})

	controllerManager.Run(10 * time.Second)
	select {}
}
예제 #8
0
func main() {
	flag.Parse()
	util.InitLogs()
	defer util.FlushLogs()

	if len(*etcdServers) == 0 || len(*master) == 0 {
		glog.Fatal("usage: controller-manager -etcd_servers <servers> -master <master>")
	}

	// Set up logger for etcd client
	etcd.SetLogger(util.NewLogger("etcd "))

	controllerManager := controller.MakeReplicationManager(
		etcd.NewClient([]string{*etcdServers}),
		client.New("http://"+*master, nil))

	controllerManager.Run(10 * time.Second)
	select {}
}
예제 #9
0
func main() {
	flag.Parse()

	if len(*etcd_servers) == 0 || len(*master) == 0 {
		log.Fatal("usage: controller-manager -etcd_servers <servers> -master <master>")
	}

	// Set up logger for etcd client
	etcd.SetLogger(log.New(os.Stderr, "etcd ", log.LstdFlags))

	controllerManager := registry.MakeReplicationManager(etcd.NewClient([]string{*etcd_servers}),
		client.Client{
			Host: "http://" + *master,
		})

	go util.Forever(func() { controllerManager.Synchronize() }, 20*time.Second)
	go util.Forever(func() { controllerManager.WatchControllers() }, 20*time.Second)
	select {}
}
예제 #10
0
func main() {
	flag.Parse()
	rand.Seed(time.Now().UTC().UnixNano())

	// Set up logger for etcd client
	etcd.SetLogger(log.New(os.Stderr, "etcd ", log.LstdFlags))

	endpoint := "unix:///var/run/docker.sock"
	dockerClient, err := docker.NewClient(endpoint)
	if err != nil {
		log.Fatal("Couldn't connnect to docker.")
	}

	my_kubelet := kubelet.Kubelet{
		DockerClient:       dockerClient,
		FileCheckFrequency: *fileCheckFrequency,
		SyncFrequency:      *syncFrequency,
		HTTPCheckFrequency: *httpCheckFrequency,
	}
	my_kubelet.RunKubelet(*file, *manifest_url, *etcd_servers, *address, *port)
}
예제 #11
0
func main() {
	flag.Parse()
	util.InitLogs()
	defer util.FlushLogs()

	verflag.PrintAndExitIfRequested()

	// Set up logger for etcd client
	etcd.SetLogger(util.NewLogger("etcd "))

	glog.Infof("Using configuration file %s and etcd_servers %v", *configFile, etcdServerList)

	serviceConfig := config.NewServiceConfig()
	endpointsConfig := config.NewEndpointsConfig()

	// Create a configuration source that handles configuration from etcd.
	etcdClient := etcd.NewClient(etcdServerList)
	config.NewConfigSourceEtcd(etcdClient,
		serviceConfig.Channel("etcd"),
		endpointsConfig.Channel("etcd"))

	// And create a configuration source that reads from a local file
	config.NewConfigSourceFile(*configFile,
		serviceConfig.Channel("file"),
		endpointsConfig.Channel("file"))

	loadBalancer := proxy.NewLoadBalancerRR()
	proxier := proxy.NewProxier(loadBalancer)
	// Wire proxier to handle changes to services
	serviceConfig.RegisterHandler(proxier)
	// And wire loadBalancer to handle changes to endpoints to services
	endpointsConfig.RegisterHandler(loadBalancer)

	// Just loop forever for now...
	select {}
}
예제 #12
0
func main() {
	util.InitFlags()
	util.InitLogs()
	defer util.FlushLogs()

	if err := util.ApplyOomScoreAdj(*oomScoreAdj); err != nil {
		glog.Info(err)
	}

	verflag.PrintAndExitIfRequested()

	serviceConfig := config.NewServiceConfig()
	endpointsConfig := config.NewEndpointsConfig()

	protocol := iptables.ProtocolIpv4
	if net.IP(bindAddress).To4() == nil {
		protocol = iptables.ProtocolIpv6
	}
	loadBalancer := proxy.NewLoadBalancerRR()
	proxier := proxy.NewProxier(loadBalancer, net.IP(bindAddress), iptables.New(exec.New(), protocol))
	if proxier == nil {
		glog.Fatalf("failed to create proxier, aborting")
	}
	// Wire proxier to handle changes to services
	serviceConfig.RegisterHandler(proxier)
	// And wire loadBalancer to handle changes to endpoints to services
	endpointsConfig.RegisterHandler(loadBalancer)

	// Note: RegisterHandler() calls need to happen before creation of Sources because sources
	// only notify on changes, and the initial update (on process start) may be lost if no handlers
	// are registered yet.

	// define api config source
	if clientConfig.Host != "" {
		glog.Infof("Using api calls to get config %v", clientConfig.Host)
		client, err := client.New(clientConfig)
		if err != nil {
			glog.Fatalf("Invalid API configuration: %v", err)
		}
		config.NewSourceAPI(
			client.Services(api.NamespaceAll),
			client.Endpoints(api.NamespaceAll),
			30*time.Second,
			serviceConfig.Channel("api"),
			endpointsConfig.Channel("api"),
		)
	} else {

		var etcdClient *etcd.Client

		// Set up etcd client
		if len(etcdServerList) > 0 {
			// Set up logger for etcd client
			etcd.SetLogger(util.NewLogger("etcd "))
			etcdClient = etcd.NewClient(etcdServerList)
		} else if *etcdConfigFile != "" {
			// Set up logger for etcd client
			etcd.SetLogger(util.NewLogger("etcd "))
			var err error
			etcdClient, err = etcd.NewClientFromFile(*etcdConfigFile)

			if err != nil {
				glog.Fatalf("Error with etcd config file: %v", err)
			}
		}

		// Create a configuration source that handles configuration from etcd.
		if etcdClient != nil {
			glog.Infof("Using etcd servers %v", etcdClient.GetCluster())

			config.NewConfigSourceEtcd(etcdClient,
				serviceConfig.Channel("etcd"),
				endpointsConfig.Channel("etcd"))
		}
	}

	if *healthz_port > 0 {
		go util.Forever(func() {
			err := http.ListenAndServe(bindAddress.String()+":"+strconv.Itoa(*healthz_port), nil)
			if err != nil {
				glog.Errorf("Starting health server failed: %v", err)
			}
		}, 5*time.Second)
	}

	// Just loop forever for now...
	proxier.SyncLoop()
}
예제 #13
0
파일: etcd.go 프로젝트: nivertech/dkron
func init() {
	etcdc.SetLogger(stdlog.New(log.Writer(), "go-etcd", stdlog.LstdFlags))
}
예제 #14
0
파일: etcd.go 프로젝트: Xmagicer/origin
func logEtcd() {
	etcd.SetLogger(log.New(os.Stderr, "go-etcd", log.LstdFlags))
}
예제 #15
0
파일: util.go 프로젝트: nhr/kubernetes
// TODO: Split this up?
func SetupLogging() {
	etcd.SetLogger(util.NewLogger("etcd "))
	// Log the events locally too.
	record.StartLogging(glog.Infof)
}
예제 #16
0
func init() {
	flag.Parse()
	if *etcdDebug {
		etcd_client.SetLogger(log.New(os.Stderr, "etcd_client: ", 0))
	}
}
예제 #17
0
func (l *EtcdLocator) Init(app *Application, config interface{}) (err error) {
	conf := config.(*EtcdLocatorConf)
	l.logger = app.Logger()
	l.metrics = app.Metrics()

	if l.refreshInterval, err = time.ParseDuration(conf.RefreshInterval); err != nil {
		l.logger.Panic("etcd", "Could not parse refreshInterval",
			LogFields{"error": err.Error(),
				"refreshInterval": conf.RefreshInterval})
		return err
	}
	// default time for the server to be "live"
	if l.defaultTTL, err = time.ParseDuration(conf.DefaultTTL); err != nil {
		l.logger.Panic("etcd",
			"Could not parse etcd default TTL",
			LogFields{"value": conf.DefaultTTL, "error": err.Error()})
		return err
	}
	if l.defaultTTL < minTTL {
		l.logger.Panic("etcd",
			"default TTL too short",
			LogFields{"value": conf.DefaultTTL})
		return ErrMinTTL
	}
	if l.retryDelay, err = time.ParseDuration(conf.RetryDelay); err != nil {
		l.logger.Panic("etcd",
			"Could not parse etcd 'retryDelay'",
			LogFields{"value": conf.RetryDelay, "error": err.Error()})
		return err
	}
	if l.maxJitter, err = time.ParseDuration(conf.MaxJitter); err != nil {
		l.logger.Panic("etcd",
			"Could not parse etcd 'maxJitter'",
			LogFields{"value": conf.MaxJitter, "error": err.Error()})
		return err
	}
	if l.maxDelay, err = time.ParseDuration(conf.MaxDelay); err != nil {
		l.logger.Panic("etcd",
			"Could not parse etcd 'maxDelay'",
			LogFields{"value": conf.MaxDelay, "error": err.Error()})
		return err
	}
	l.maxRetries = conf.MaxRetries

	l.serverList = conf.Servers
	l.dir = path.Clean(conf.Dir)

	// Use the hostname and port of the current server as the etcd key.
	l.url = app.Router().URL()
	uri, err := url.ParseRequestURI(l.url)
	if err != nil {
		l.logger.Panic("etcd", "Error parsing router URL", LogFields{
			"error": err.Error(), "url": l.url})
		return err
	}
	if len(uri.Host) > 0 {
		l.key = path.Join(l.dir, uri.Host)
	}

	if l.logger.ShouldLog(INFO) {
		l.logger.Info("etcd", "connecting to etcd servers",
			LogFields{"list": strings.Join(l.serverList, ";")})
	}
	etcd.SetLogger(log.New(&LogWriter{l.logger, "etcd", DEBUG}, "", 0))
	l.client = etcd.NewClient(l.serverList)
	l.client.CheckRetry = l.checkRetry

	// create the push hosts directory (if not already there)
	if _, err = l.client.CreateDir(l.dir, 0); err != nil {
		if !IsEtcdKeyExist(err) {
			l.logger.Panic("etcd", "etcd createDir error", LogFields{
				"error": err.Error()})
			return err
		}
	}
	if err = l.Register(); err != nil {
		l.logger.Panic("etcd", "Could not register with etcd",
			LogFields{"error": err.Error()})
		return err
	}
	if l.contacts, err = l.getServers(); err != nil {
		l.logger.Panic("etcd", "Could not fetch contact list",
			LogFields{"error": err.Error()})
		return err
	}

	l.closeWait.Add(2)
	go l.registerLoop()
	go l.fetchLoop()
	return nil
}
예제 #18
0
func main() {
	flag.Parse()
	util.InitLogs()
	defer util.FlushLogs()
	rand.Seed(time.Now().UTC().UnixNano())

	verflag.PrintAndExitIfRequested()

	etcd.SetLogger(util.NewLogger("etcd "))

	dockerClient, err := docker.NewClient(getDockerEndpoint())
	if err != nil {
		glog.Fatal("Couldn't connect to docker.")
	}

	cadvisorClient, err := cadvisor.NewClient("http://127.0.0.1:4194")
	if err != nil {
		glog.Errorf("Error on creating cadvisor client: %v", err)
	}

	hostname := getHostname()

	if *rootDirectory == "" {
		glog.Fatal("Invalid root directory path.")
	}
	*rootDirectory = path.Clean(*rootDirectory)
	os.MkdirAll(*rootDirectory, 0750)

	// source of all configuration
	cfg := kconfig.NewPodConfig(kconfig.PodConfigNotificationSnapshotAndUpdates)

	// define file config source
	if *config != "" {
		kconfig.NewSourceFile(*config, *fileCheckFrequency, cfg.Channel("file"))
	}

	// define url config source
	if *manifestURL != "" {
		kconfig.NewSourceURL(*manifestURL, *httpCheckFrequency, cfg.Channel("http"))
	}

	// define etcd config source and initialize etcd client
	var etcdClient tools.EtcdClient
	if len(etcdServerList) > 0 {
		glog.Infof("Watching for etcd configs at %v", etcdServerList)
		etcdClient = etcd.NewClient(etcdServerList)
		kconfig.NewSourceEtcd(kconfig.EtcdKeyForHost(hostname), etcdClient, 30*time.Second, cfg.Channel("etcd"))
	}

	// TODO: block until all sources have delivered at least one update to the channel, or break the sync loop
	// up into "per source" synchronizations

	k := kubelet.NewMainKubelet(
		getHostname(),
		dockerClient,
		cadvisorClient,
		etcdClient,
		*rootDirectory)

	// start the kubelet
	go util.Forever(func() { k.Run(cfg.Updates()) }, 0)

	// resynchronize periodically
	// TODO: make this part of PodConfig so that it is only delivered after syncFrequency has elapsed without
	// an update
	go util.Forever(cfg.Sync, *syncFrequency)

	// start the kubelet server
	if *enableServer {
		go util.Forever(func() {
			kubelet.ListenAndServeKubeletServer(k, cfg.Channel("http"), http.DefaultServeMux, *address, *port)
		}, 0)
	}

	// runs forever
	select {}
}
예제 #19
0
func main() {
	flag.Parse()
	util.InitLogs()
	defer util.FlushLogs()
	rand.Seed(time.Now().UTC().UnixNano())

	verflag.PrintAndExitIfRequested()

	if *runonce {
		exclusiveFlag := "invalid option: --runonce and %s are mutually exclusive"
		if len(etcdServerList) > 0 {
			glog.Fatalf(exclusiveFlag, "--etcd_servers")
		}
		if *enableServer {
			glog.Infof("--runonce is set, disabling server")
			*enableServer = false
		}
	}

	etcd.SetLogger(util.NewLogger("etcd "))

	// Log the events locally too.
	record.StartLogging(glog.Infof)

	capabilities.Initialize(capabilities.Capabilities{
		AllowPrivileged: *allowPrivileged,
	})

	dockerClient, err := docker.NewClient(getDockerEndpoint())
	if err != nil {
		glog.Fatal("Couldn't connect to docker.")
	}

	hostname := getHostname()

	if *rootDirectory == "" {
		glog.Fatal("Invalid root directory path.")
	}
	*rootDirectory = path.Clean(*rootDirectory)
	if err := os.MkdirAll(*rootDirectory, 0750); err != nil {
		glog.Fatalf("Error creating root directory: %v", err)
	}

	// source of all configuration
	cfg := kconfig.NewPodConfig(kconfig.PodConfigNotificationSnapshotAndUpdates)

	// define file config source
	if *config != "" {
		kconfig.NewSourceFile(*config, *fileCheckFrequency, cfg.Channel("file"))
	}

	// define url config source
	if *manifestURL != "" {
		kconfig.NewSourceURL(*manifestURL, *httpCheckFrequency, cfg.Channel("http"))
	}

	// define etcd config source and initialize etcd client
	var etcdClient *etcd.Client
	if len(etcdServerList) > 0 {
		etcdClient = etcd.NewClient(etcdServerList)
	} else if *etcdConfigFile != "" {
		var err error
		etcdClient, err = etcd.NewClientFromFile(*etcdConfigFile)
		if err != nil {
			glog.Fatalf("Error with etcd config file: %v", err)
		}
	}

	if etcdClient != nil {
		glog.Infof("Watching for etcd configs at %v", etcdClient.GetCluster())
		kconfig.NewSourceEtcd(kconfig.EtcdKeyForHost(hostname), etcdClient, cfg.Channel("etcd"))
	}

	// TODO: block until all sources have delivered at least one update to the channel, or break the sync loop
	// up into "per source" synchronizations

	k := kubelet.NewMainKubelet(
		getHostname(),
		dockerClient,
		etcdClient,
		*rootDirectory,
		*networkContainerImage,
		*syncFrequency,
		float32(*registryPullQPS),
		*registryBurst,
		*minimumGCAge,
		*maxContainerCount)

	k.BirthCry()

	go func() {
		util.Forever(func() {
			err := k.GarbageCollectContainers()
			if err != nil {
				glog.Errorf("Garbage collect failed: %v", err)
			}
		}, time.Minute*1)
	}()

	go func() {
		defer util.HandleCrash()
		// TODO: Monitor this connection, reconnect if needed?
		glog.V(1).Infof("Trying to create cadvisor client.")
		cadvisorClient, err := cadvisor.NewClient("http://127.0.0.1:4194")
		if err != nil {
			glog.Errorf("Error on creating cadvisor client: %v", err)
			return
		}
		glog.V(1).Infof("Successfully created cadvisor client.")
		k.SetCadvisorClient(cadvisorClient)
	}()

	// TODO: These should probably become more plugin-ish: register a factory func
	// in each checker's init(), iterate those here.
	health.AddHealthChecker(health.NewExecHealthChecker(k))
	health.AddHealthChecker(health.NewHTTPHealthChecker(&http.Client{}))
	health.AddHealthChecker(&health.TCPHealthChecker{})

	// process pods and exit.
	if *runonce {
		if _, err := k.RunOnce(cfg.Updates()); err != nil {
			glog.Fatalf("--runonce failed: %v", err)
		}
		return
	}

	// start the kubelet
	go util.Forever(func() { k.Run(cfg.Updates()) }, 0)

	// start the kubelet server
	if *enableServer {
		go util.Forever(func() {
			kubelet.ListenAndServeKubeletServer(k, cfg.Channel("http"), net.IP(address), *port, *enableDebuggingHandlers)
		}, 0)
	}

	// runs forever
	select {}
}
예제 #20
0
func main() {
	flag.Parse()
	util.InitLogs()
	defer util.FlushLogs()

	verflag.PrintAndExitIfRequested()

	serviceConfig := config.NewServiceConfig()
	endpointsConfig := config.NewEndpointsConfig()

	// define api config source
	if clientConfig.Host != "" {
		glog.Infof("Using api calls to get config %v", clientConfig.Host)
		client, err := client.New(clientConfig)
		if err != nil {
			glog.Fatalf("Invalid API configuration: %v", err)
		}
		config.NewSourceAPI(
			client.Services(api.NamespaceAll),
			client.Endpoints(api.NamespaceAll),
			30*time.Second,
			serviceConfig.Channel("api"),
			endpointsConfig.Channel("api"),
		)
	} else {

		var etcdClient *etcd.Client

		// Set up etcd client
		if len(etcdServerList) > 0 {
			// Set up logger for etcd client
			etcd.SetLogger(util.NewLogger("etcd "))
			etcdClient = etcd.NewClient(etcdServerList)
		} else if *etcdConfigFile != "" {
			// Set up logger for etcd client
			etcd.SetLogger(util.NewLogger("etcd "))
			var err error
			etcdClient, err = etcd.NewClientFromFile(*etcdConfigFile)

			if err != nil {
				glog.Fatalf("Error with etcd config file: %v", err)
			}
		}

		// Create a configuration source that handles configuration from etcd.
		if etcdClient != nil {
			glog.Infof("Using etcd servers %v", etcdClient.GetCluster())

			config.NewConfigSourceEtcd(etcdClient,
				serviceConfig.Channel("etcd"),
				endpointsConfig.Channel("etcd"))
		}
	}

	loadBalancer := proxy.NewLoadBalancerRR()
	proxier := proxy.NewProxier(loadBalancer, net.IP(bindAddress), iptables.New(exec.New()))
	// Wire proxier to handle changes to services
	serviceConfig.RegisterHandler(proxier)
	// And wire loadBalancer to handle changes to endpoints to services
	endpointsConfig.RegisterHandler(loadBalancer)

	// Just loop forever for now...
	proxier.SyncLoop()
}
예제 #21
0
func TestIntegration(t *testing.T) {
	if testing.Short() {
		t.Skip("long-running integration test")
	}

	repoInfos := getUserRepos(t, *repoOwner)

	type nodeInfo struct {
		cmd     *exec.Cmd
		baseURL string
	}
	nodes := map[string]*nodeInfo{}

	repos := map[repoInfo]vcs.Repository{}
	var reposMu sync.Mutex

	if *etcdDebugLog {
		etcd_client.SetLogger(log.New(os.Stderr, "etcd: ", 0))
	}

	withEtcd(t, func(etcdConfig *config.Config, ec *etcd_client.Client) {
		defer func() {
			if *waitBeforeExit {
				log.Printf("\n\nTest run ended. Ctrl-C to exit.")
				select {}
			}
		}()

		b := datad.NewEtcdBackend("/datad/vcs", ec)
		cc := NewClient(datad.NewClient(b), nil)

		if err := exec.Command("go", "install", "sourcegraph.com/sourcegraph/vcsstore/cmd/vcsstore").Run(); err != nil {
			t.Fatal(err)
		}

		killNode := func(name string, ni *nodeInfo) {
			if ni != nil && ni.cmd != nil {
				ni.cmd.Process.Kill()
				ni.cmd = nil
				delete(nodes, name)
			}
		}

		// Start the nodes and vcsstore servers.
		for i := 0; i < *numNodes; i++ {
			n := 6000 + i
			nodeName := fmt.Sprintf("127.0.0.1:%d", n)
			storageDir := fmt.Sprintf("/tmp/test-vcsstore%d", n)
			cmd := exec.Command("vcsstore", "-v", "-etcd="+etcdConfig.Addr, "-s="+storageDir, "serve", "-datad", "-d", fmt.Sprintf("-http=:%d", n), "-datad-node-name="+nodeName)
			nodes[nodeName] = &nodeInfo{cmd: cmd, baseURL: "http://" + nodeName}
			cmd.Stdout, cmd.Stderr = os.Stderr, os.Stderr
			err := cmd.Start()
			if err != nil {
				t.Fatalf("error starting %v: %s", cmd.Args, err)
			}
			log.Printf("Launched node %s with storage dir %s (%v).", nodeName, storageDir, cmd.Args)
			defer func() {
				killNode(nodeName, nodes[nodeName])
			}()
		}

		// Wait for servers.
		time.Sleep(400 * time.Millisecond)

		// Clone the repositories.
		cloneStart := time.Now()
		var wg sync.WaitGroup
		for _, ri_ := range repoInfos {
			ri := ri_
			wg.Add(1)
			go func() {
				defer wg.Done()
				log.Printf("cloning %v...", ri)
				repo, err := cc.Repository(ri.vcsType, mustParseURL(ri.cloneURL))
				if err != nil {
					t.Errorf("clone %v failed: %s", ri, err)
					return
				}
				err = repo.(vcsclient.RepositoryCloneUpdater).CloneOrUpdate(vcs.RemoteOpts{})
				if err != nil {
					t.Errorf("remote clone %v failed: %s", ri, err)
					return
				}

				reposMu.Lock()
				defer reposMu.Unlock()
				repos[ri] = repo
			}()
		}
		wg.Wait()
		t.Logf("Cloned %d repositories in %s.", len(repoInfos), time.Since(cloneStart))

		performRepoOps := func() error {
			par := parallel.NewRun(1) // keep at 1, libgit2 has concurrency segfaults :(
			// Perform some operations on the repos.
			for ri_, repo_ := range repos {
				par.Do(func() error {
					ri, repo := ri_, repo_
					commitID, err := repo.ResolveBranch("master")
					if err != nil {
						return fmt.Errorf("repo %v: resolve branch 'master' failed: %s", ri, err)
					}
					commits, _, err := repo.Commits(vcs.CommitsOptions{Head: commitID})
					if err != nil {
						return fmt.Errorf("repo %v: commit log of 'master' failed: %s", ri, err)
					}
					if len(commits) == 0 {
						return fmt.Errorf("repo %v: commit log has 0 entries", ri)
					}
					fs, err := repo.FileSystem(commitID)
					if err != nil {
						return fmt.Errorf("repo %v: filesystem at 'master' failed: %s", ri, err)
					}
					entries, err := fs.ReadDir("/")
					if err != nil {
						return fmt.Errorf("repo %v: readdir '/' at 'master' failed: %s", ri, err)
					}
					_ = entries
					return nil
				})
			}
			return par.Wait()
		}

		opsStart := time.Now()
		err := performRepoOps()
		if err != nil {
			t.Fatalf("before killing any nodes, got error in repo ops: %s", err)
		}
		log.Printf("\n\n\nPerformed various operations on %d repositories in %s.\n\n\n", len(repos), time.Since(opsStart))

		if *numNodes <= 1 {
			t.Fatal("can't test cluster resilience with only 1 node")
		}

		// Kill half of all nodes to test resilience.
		for i := 0; i < *numNodes/2; i++ {
			for name, ni := range nodes {
				reg := datad.NewRegistry(b)
				regKeys, err := reg.KeysForNode(name)
				if err != nil {
					t.Fatal(err)
				}
				killNode(name, ni)
				log.Printf("\n\n\nKilled node %s. Before killing, it had registered keys: %v. Expect to see failures related to these keys in the next set of operations we perform.\n\n\n", name, regKeys)
				break
			}
		}
		time.Sleep(time.Millisecond * 300)
		killedTime := time.Now()

		// After killing nodes, run the same set of operations. We expect some
		// KeyTransportErrors here because the cluster detects that some nodes
		// are down but hasn't had time yet to fetch the data to other nodes.
		//
		// Keep running the operations until we get no more errors.

		log.Printf("\n\n\nAfter killing some nodes, we're going to keep performing VCS ops on the cluster until it fully heals itself and we see no more errors. (We expect some errors until it heals itself.)\n\n\n")

		try := 0
		for {
			err := performRepoOps()
			if err != nil {
				log.Printf("\n\n\nTry #%d (%s): got error %+v\n\n\n", try, time.Since(killedTime), err)
				try++
				time.Sleep(2000 * time.Millisecond)
				continue
			}

			log.Printf("\n\n\nTry #%d: SUCCESS. The cluster healed itself after %s.\n\n\n", try, time.Since(killedTime))
			break
		}
	})
}