예제 #1
0
func main() {
	runtime.GOMAXPROCS(runtime.NumCPU())

	config := HollowNodeConfig{}
	config.addFlags(pflag.CommandLine)
	util.InitFlags()

	// create a client for Kubelet to communicate with API server.
	cl, err := createClientFromFile(config.KubeconfigPath)
	if err != nil {
		glog.Fatal("Failed to create a Client. Exiting.")
	}
	cadvisorInterface := new(cadvisor.Fake)

	fakeDockerClient := &dockertools.FakeDockerClient{}
	fakeDockerClient.VersionInfo = docker.Env{"ApiVersion=1.18"}
	fakeDockerClient.ContainerMap = make(map[string]*docker.Container)
	fakeDockerClient.EnableSleep = true

	hollowKubelet := kubemark.NewHollowKubelet(
		config.NodeName,
		cl,
		cadvisorInterface,
		fakeDockerClient,
		config.KubeletPort,
		config.KubeletReadOnlyPort,
	)
	hollowKubelet.Run()
}
예제 #2
0
func main() {
	// TODO: replace by non k8s code / logs
	util.InitFlags()
	util.InitLogs()
	defer util.FlushLogs()

	if *argVersion {
		fmt.Println(VERSION)
		os.Exit(0)
	}
	glog.Infof("Running version %s", VERSION)

	kv := kube2vulcand{etcdMutationTimeout: *argEtcdMutationTimeout}

	var err error
	// TODO: Validate input flags.
	if kv.etcdClient, err = newEtcdClient(*argEtcdServer); err != nil {
		glog.Fatalf("Failed to create etcd client - %v", err)
	}

	kubeClient, err := newKubeClient()
	if err != nil {
		glog.Fatalf("Failed to create a kubernetes client: %v", err)
	}

	kv.ingressesStore = watchForIngresses(kubeClient.ExtensionsClient, &kv)

	select {}
}
예제 #3
0
func main() {
	runtime.GOMAXPROCS(runtime.NumCPU())

	config := HollowNodeConfig{}
	config.addFlags(pflag.CommandLine)
	util.InitFlags()

	if !knownMorphs.Has(config.Morph) {
		glog.Fatalf("Unknown morph: %v. Allowed values: %v", config.Morph, knownMorphs.List())
	}

	// create a client to communicate with API server.
	cl, err := createClientFromFile(config.KubeconfigPath)
	clientset := clientset.FromUnversionedClient(cl)
	if err != nil {
		glog.Fatal("Failed to create a Client. Exiting.")
	}

	if config.Morph == "kubelet" {
		cadvisorInterface := new(cadvisortest.Fake)
		containerManager := cm.NewStubContainerManager()

		fakeDockerClient := dockertools.NewFakeDockerClient()
		fakeDockerClient.VersionInfo = docker.Env{"Version=1.1.3", "ApiVersion=1.18"}
		fakeDockerClient.EnableSleep = true

		hollowKubelet := kubemark.NewHollowKubelet(
			config.NodeName,
			clientset,
			cadvisorInterface,
			fakeDockerClient,
			config.KubeletPort,
			config.KubeletReadOnlyPort,
			containerManager,
			maxPods,
		)
		hollowKubelet.Run()
	}

	if config.Morph == "proxy" {
		eventBroadcaster := record.NewBroadcaster()
		recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "kube-proxy", Host: config.NodeName})

		iptInterface := fakeiptables.NewFake()

		serviceConfig := proxyconfig.NewServiceConfig()
		serviceConfig.RegisterHandler(&kubemark.FakeProxyHandler{})

		endpointsConfig := proxyconfig.NewEndpointsConfig()
		endpointsConfig.RegisterHandler(&kubemark.FakeProxyHandler{})

		hollowProxy := kubemark.NewHollowProxyOrDie(config.NodeName, cl, endpointsConfig, serviceConfig, iptInterface, eventBroadcaster, recorder)
		hollowProxy.Run()
	}
}
예제 #4
0
func main() {
	runtime.GOMAXPROCS(runtime.NumCPU())
	s := app.NewSchedulerServer()
	s.AddFlags(pflag.CommandLine)

	util.InitFlags()
	util.InitLogs()
	defer util.FlushLogs()

	verflag.PrintAndExitIfRequested()

	//s.Run(pflag.CommandLine.Args())
}
예제 #5
0
func main() {
	runtime.GOMAXPROCS(runtime.NumCPU())
	glog.V(2).Infof("*** Run Kubeturbo service ***")

	s := app.NewVMTServer()
	s.AddFlags(pflag.CommandLine)

	util.InitFlags()
	util.InitLogs()
	defer util.FlushLogs()

	verflag.PrintAndExitIfRequested()

	s.Run(pflag.CommandLine.Args())
}
예제 #6
0
func main() {
	runtime.GOMAXPROCS(runtime.NumCPU())
	s := app.NewKubeletServer()
	s.AddFlags(pflag.CommandLine)

	util.InitFlags()
	util.InitLogs()
	defer util.FlushLogs()

	verflag.PrintAndExitIfRequested()

	if err := s.Run(pflag.CommandLine.Args()); err != nil {
		fmt.Fprintf(os.Stderr, "%v\n", err)
		os.Exit(1)
	}
}
예제 #7
0
func main() {
	runtime.GOMAXPROCS(runtime.NumCPU())
	s := service.NewSchedulerServer()
	s.AddStandaloneFlags(pflag.CommandLine)

	util.InitFlags()
	util.InitLogs()
	defer util.FlushLogs()

	verflag.PrintAndExitIfRequested()

	if err := s.Run(hyperkube.Nil(), pflag.CommandLine.Args()); err != nil {
		fmt.Fprintf(os.Stderr, err.Error())
		os.Exit(1)
	}
}
예제 #8
0
func main() {
	runtime.GOMAXPROCS(runtime.NumCPU())

	s := controllermanager.NewCMServer()
	s.AddFlags(pflag.CommandLine)

	util.InitFlags()
	util.InitLogs()
	defer util.FlushLogs()

	verflag.PrintAndExitIfRequested()

	if err := s.Run(pflag.CommandLine.Args()); err != nil {
		fmt.Fprintf(os.Stderr, err.Error())
		os.Exit(1)
	}
}
예제 #9
0
func main() {
	runtime.GOMAXPROCS(runtime.NumCPU())
	addFlags(pflag.CommandLine)
	util.InitFlags()

	// create a client for Kubelet to communicate with API server.
	cl, err := createClientFromFile(kubeconfigPath)
	if err != nil {
		glog.Fatal("Failed to create a Client. Exiting.")
	}
	cadvisorInterface := new(cadvisor.Fake)

	testRootDir := makeTempDirOrDie("hollow-kubelet.", "")
	configFilePath := makeTempDirOrDie("config", testRootDir)
	glog.Infof("Using %s as root dir for hollow-kubelet", testRootDir)
	fakeDockerClient.VersionInfo = docker.Env{"ApiVersion=1.18"}
	fakeDockerClient.ContainerMap = make(map[string]*docker.Container)
	fakeDockerClient.EnableSleep = true
	kcfg := kubeletapp.SimpleKubelet(
		cl,
		&fakeDockerClient,
		nodeName,
		testRootDir,
		"",        /* manifest-url */
		"0.0.0.0", /* bind address */
		uint(kubeletPort),
		uint(kubeletReadOnlyPort),
		api.NamespaceDefault,
		empty_dir.ProbeVolumePlugins(),
		nil, /* tls-options */
		cadvisorInterface,
		configFilePath,
		nil, /* cloud-provider */
		kubecontainer.FakeOS{}, /* os-interface */
		20*time.Second,         /* FileCheckFrequency */
		20*time.Second,         /* HTTPCheckFrequency */
		1*time.Minute,          /* MinimumGCAge */
		10*time.Second,         /* NodeStatusUpdateFrequency */
		10*time.Second,         /* SyncFrequency */
		40,                     /* MaxPods */
	)
	kubeletapp.RunKubelet(kcfg)

	select {}
}
예제 #10
0
func main() {
	runtime.GOMAXPROCS(runtime.NumCPU())
	addFlags(pflag.CommandLine)

	util.InitFlags()
	util.ReallyCrash = true
	util.InitLogs()
	defer util.FlushLogs()

	go func() {
		defer util.FlushLogs()
		time.Sleep(3 * time.Minute)
		glog.Fatalf("This test has timed out.")
	}()

	glog.Infof("Running tests for APIVersion: %s", os.Getenv("KUBE_TEST_API"))

	firstManifestURL := ServeCachedManifestFile(testPodSpec
예제 #11
0
func main() {
	runtime.GOMAXPROCS(runtime.NumCPU())
	rand.Seed(time.Now().UTC().UnixNano())

	s := options.NewAPIServer()
	s.AddFlags(pflag.CommandLine)

	util.InitFlags()
	util.InitLogs()
	defer util.FlushLogs()

	verflag.PrintAndExitIfRequested()

	if err := app.Run(s); err != nil {
		fmt.Fprintf(os.Stderr, "%v\n", err)
		os.Exit(1)
	}
}
예제 #12
0
func main() {
	runtime.GOMAXPROCS(runtime.NumCPU())
	addFlags(pflag.CommandLine)
	util.InitFlags()

	// create a client for Kubelet to communicate with API server.
	cl := client.NewOrDie(&client.Config{Host: fmt.Sprintf("http://%v:%v", apiServer, serverPort), Version: latest.GroupOrDie("").Version})
	cadvisorInterface := new(cadvisor.Fake)

	testRootDir := makeTempDirOrDie("hollow-kubelet.", "")
	configFilePath := makeTempDirOrDie("config", testRootDir)
	glog.Infof("Using %s as root dir for hollow-kubelet", testRootDir)
	fakeDockerClient.VersionInfo = docker.Env{"ApiVersion=1.18"}
	kcfg := kubeletapp.SimpleKubelet(
		cl,
		&fakeDockerClient,
		nodeName,
		testRootDir,
		"",        /* manifest-url */
		"0.0.0.0", /* bind address */
		uint(kubeletPort),
		uint(kubeletReadOnlyPort),
		api.NamespaceDefault,
		empty_dir.ProbeVolumePlugins(),
		nil, /* tls-options */
		cadvisorInterface,
		configFilePath,
		nil, /* cloud-provider */
		kubecontainer.FakeOS{}, /* os-interface */
		20*time.Second,         /* FileCheckFrequency */
		20*time.Second,         /* HTTPCheckFrequency */
		1*time.Minute,          /* MinimumGCAge */
		10*time.Second,         /* NodeStatusUpdateFrequency */
		10*time.Second,         /* SyncFrequency */
		40,                     /* MaxPods */
	)
	kubeletapp.RunKubelet(kcfg, nil)

	select {}
}
예제 #13
0
func main() {
	runtime.GOMAXPROCS(runtime.NumCPU())
	config := app.NewProxyConfig()
	config.AddFlags(pflag.CommandLine)

	util.InitFlags()
	util.InitLogs()
	defer util.FlushLogs()

	verflag.PrintAndExitIfRequested()

	s, err := app.NewProxyServerDefault(config)
	if err != nil {
		fmt.Fprintf(os.Stderr, "%v\n", err)
		os.Exit(1)
	}

	if err = s.Run(pflag.CommandLine.Args()); err != nil {
		fmt.Fprintf(os.Stderr, "%v\n", err)
		os.Exit(1)
	}
}
예제 #14
0
func main() {
	runtime.GOMAXPROCS(runtime.NumCPU())
	builder := builder.NewSimulatorBuilder()
	builder.AddFlags(pflag.CommandLine)

	util.InitFlags()
	util.InitLogs()
	defer util.FlushLogs()

	verflag.PrintAndExitIfRequested()

	builder.Init(pflag.CommandLine.Args())
	simulator, err := builder.Build()
	if err != nil {
		glog.Errorf("error getting simulator: %s", err)
		return
	}

	action := simulator.Action()
	namespace := simulator.Namespace()

	// The simulator can simulate move, get and provision action now.
	actor := vmtaction.NewKubeActor(simulator.KubeClient(), simulator.Etcd())
	if action == "move" || action == "Move " {
		podName := simulator.PodToMove()
		destinationNode := simulator.Destination()
		podIdentifier := namespace + "/" + podName
		actor.MovePod(podIdentifier, destinationNode, -1)
		return
	} else if action == "get" {
		actor.GetAllNodes()
		return
	} else if action == "provision" {
		podLabel := simulator.Label()
		newReplicas, _ := strconv.Atoi(simulator.NewReplica())
		actor.UpdateReplicas(podLabel, namespace, newReplicas)
		return
	}
}
예제 #15
0
func main() {
	runtime.GOMAXPROCS(runtime.NumCPU())
	addFlags(pflag.CommandLine)

	util.InitFlags()
	util.ReallyCrash = true
	util.InitLogs()
	defer util.FlushLogs()

	go func() {
		defer util.FlushLogs()
		time.Sleep(3 * time.Minute)
		glog.Fatalf("This test has timed out.")
	}()

	glog.Infof("Running tests for APIVersion: %s", apiVersion)

	firstManifestURL := ServeCachedManifestFile(testPodSpecFile)
	secondManifestURL := ServeCachedManifestFile(testPodSpecFile)
	apiServerURL, _ := startComponents(firstManifestURL, secondManifestURL, apiVersion)

	// Ok. we're good to go.
	glog.Infof("API Server started on %s", apiServerURL)
	// Wait for the synchronization threads to come up.
	time.Sleep(time.Second * 10)

	kubeClient := client.NewOrDie(&client.Config{Host: apiServerURL, Version: apiVersion})

	// Run tests in parallel
	testFuncs := []testFunc{
		runReplicationControllerTest,
		runAtomicPutTest,
		runPatchTest,
		runServiceTest,
		runAPIVersionsTest,
		runMasterServiceTest,
		func(c *client.Client) {
			runSelfLinkTestOnNamespace(c, api.NamespaceDefault)
			runSelfLinkTestOnNamespace(c, "other")
		},
	}

	// Only run at most maxConcurrency tests in parallel.
	if maxConcurrency <= 0 {
		maxConcurrency = len(testFuncs)
	}
	glog.Infof("Running %d tests in parallel.", maxConcurrency)
	ch := make(chan struct{}, maxConcurrency)

	var wg sync.WaitGroup
	wg.Add(len(testFuncs))
	for i := range testFuncs {
		f := testFuncs[i]
		go func() {
			ch <- struct{}{}
			f(kubeClient)
			<-ch
			wg.Done()
		}()
	}
	wg.Wait()
	close(ch)

	// Check that kubelet tried to make the containers.
	// Using a set to list unique creation attempts. Our fake is
	// really stupid, so kubelet tries to create these multiple times.
	createdConts := util.StringSet{}
	for _, p := range fakeDocker1.Created {
		// The last 8 characters are random, so slice them off.
		if n := len(p); n > 8 {
			createdConts.Insert(p[:n-8])
		}
	}
	for _, p := range fakeDocker2.Created {
		// The last 8 characters are random, so slice them off.
		if n := len(p); n > 8 {
			createdConts.Insert(p[:n-8])
		}
	}
	// We expect 9: 2 pod infra containers + 2 containers from the replication controller +
	//              1 pod infra container + 2 containers from the URL on first Kubelet +
	//              1 pod infra container + 2 containers from the URL on second Kubelet +
	//              1 pod infra container + 1 container from the service test.
	// The total number of container created is 9

	if len(createdConts) != 12 {
		glog.Fatalf("Expected 12 containers; got %v\n\nlist of created containers:\n\n%#v\n\nDocker 1 Created:\n\n%#v\n\nDocker 2 Created:\n\n%#v\n\n", len(createdConts), createdConts.List(), fakeDocker1.Created, fakeDocker2.Created)
	}
	glog.Infof("OK - found created containers: %#v", createdConts.List())

	// This test doesn't run with the others because it can't run in
	// parallel and also it schedules extra pods which would change the
	// above pod counting logic.
	runSchedulerNoPhantomPodsTest(kubeClient)

	glog.Infof("\n\nLogging high latency metrics from the 10250 kubelet")
	e2e.HighLatencyKubeletOperations(nil, 1*time.Second, "localhost:10250")
	glog.Infof("\n\nLogging high latency metrics from the 10251 kubelet")
	e2e.HighLatencyKubeletOperations(nil, 1*time.Second, "localhost:10251")
}