func main() { runtime.GOMAXPROCS(runtime.NumCPU()) s := app.NewSchedulerServer() s.AddFlags(pflag.CommandLine) util.InitFlags() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() s.Run(pflag.CommandLine.Args()) }
func main() { runtime.GOMAXPROCS(runtime.NumCPU()) s := app.NewCMServer() s.AddFlags(pflag.CommandLine) util.InitFlags() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() if err := s.Run(pflag.CommandLine.Args()); err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } }
func main() { runtime.GOMAXPROCS(runtime.NumCPU()) s := service.NewSchedulerServer() s.AddStandaloneFlags(pflag.CommandLine) util.InitFlags() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() if err := s.Run(hyperkube.Nil(), pflag.CommandLine.Args()); err != nil { fmt.Fprintf(os.Stderr, err.Error()) os.Exit(1) } }
func main() { runtime.GOMAXPROCS(runtime.NumCPU()) config := app.NewProxyConfig() config.AddFlags(pflag.CommandLine) util.InitFlags() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() s, err := app.NewProxyServerDefault(config) if err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } if err = s.Run(pflag.CommandLine.Args()); err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } }
// Run the server. This will pick the appropriate server and run it. func (hk *HyperKube) Run(args []string) error { // If we are called directly, parse all flags up to the first real // argument. That should be the server to run. baseCommand := path.Base(args[0]) serverName := baseCommand if serverName == hk.Name { args = args[1:] baseFlags := hk.Flags() baseFlags.SetInterspersed(false) // Only parse flags up to the next real command err := baseFlags.Parse(args) if err != nil || hk.helpFlagVal { if err != nil { hk.Println("Error:", err) } hk.Usage() return err } verflag.PrintAndExitIfRequested() args = baseFlags.Args() if len(args) > 0 && len(args[0]) > 0 { serverName = args[0] baseCommand = baseCommand + " " + serverName args = args[1:] } else { err = errors.New("No server specified") hk.Printf("Error: %v\n\n", err) hk.Usage() return err } } s, err := hk.FindServer(serverName) if err != nil { hk.Printf("Error: %v\n\n", err) hk.Usage() return err } s.Flags().AddFlagSet(hk.Flags()) err = s.Flags().Parse(args) if err != nil || hk.helpFlagVal { if err != nil { hk.Printf("Error: %v\n\n", err) } s.Usage() return err } verflag.PrintAndExitIfRequested() util.InitLogs() defer util.FlushLogs() err = s.Run(s, s.Flags().Args()) if err != nil { hk.Println("Error:", err) } return err }
func main() { runtime.GOMAXPROCS(runtime.NumCPU()) addFlags(pflag.CommandLine) util.InitFlags() util.ReallyCrash = true util.InitLogs() defer util.FlushLogs() go func() { defer util.FlushLogs() time.Sleep(3 * time.Minute) glog.Fatalf("This test has timed out.") }() glog.Infof("Running tests for APIVersion: %s", os.Getenv("KUBE_TEST_API")) firstManifestURL := ServeCachedManifestFile(testPodSpecFile) secondManifestURL := ServeCachedManifestFile(testPodSpecFile) apiServerURL, _ := startComponents(firstManifestURL, secondManifestURL) // Ok. we're good to go. glog.Infof("API Server started on %s", apiServerURL) // Wait for the synchronization threads to come up. time.Sleep(time.Second * 10) kubeClient := client.NewOrDie(&client.Config{Host: apiServerURL, Version: testapi.Default.GroupAndVersion()}) // TODO: caesarxuchao: hacky way to specify version of Experimental client. // We will fix this by supporting multiple group versions in Config kubeClient.ExtensionsClient = client.NewExtensionsOrDie(&client.Config{Host: apiServerURL, Version: testapi.Extensions.GroupAndVersion()}) // Run tests in parallel testFuncs := []testFunc{ runReplicationControllerTest, runAtomicPutTest, runPatchTest, runServiceTest, runAPIVersionsTest, runMasterServiceTest, func(c *client.Client) { runSelfLinkTestOnNamespace(c, api.NamespaceDefault) runSelfLinkTestOnNamespace(c, "other") }, } // Only run at most maxConcurrency tests in parallel. if maxConcurrency <= 0 { maxConcurrency = len(testFuncs) } glog.Infof("Running %d tests in parallel.", maxConcurrency) ch := make(chan struct{}, maxConcurrency) var wg sync.WaitGroup wg.Add(len(testFuncs)) for i := range testFuncs { f := testFuncs[i] go func() { ch <- struct{}{} f(kubeClient) <-ch wg.Done() }() } wg.Wait() close(ch) // Check that kubelet tried to make the containers. // Using a set to list unique creation attempts. Our fake is // really stupid, so kubelet tries to create these multiple times. createdConts := sets.String{} for _, p := range fakeDocker1.Created { // The last 8 characters are random, so slice them off. if n := len(p); n > 8 { createdConts.Insert(p[:n-8]) } } for _, p := range fakeDocker2.Created { // The last 8 characters are random, so slice them off. if n := len(p); n > 8 { createdConts.Insert(p[:n-8]) } } // We expect 9: 2 pod infra containers + 2 containers from the replication controller + // 1 pod infra container + 2 containers from the URL on first Kubelet + // 1 pod infra container + 2 containers from the URL on second Kubelet + // 1 pod infra container + 1 container from the service test. // The total number of container created is 9 if len(createdConts) != 12 { glog.Fatalf("Expected 12 containers; got %v\n\nlist of created containers:\n\n%#v\n\nDocker 1 Created:\n\n%#v\n\nDocker 2 Created:\n\n%#v\n\n", len(createdConts), createdConts.List(), fakeDocker1.Created, fakeDocker2.Created) } glog.Infof("OK - found created containers: %#v", createdConts.List()) // This test doesn't run with the others because it can't run in // parallel and also it schedules extra pods which would change the // above pod counting logic. runSchedulerNoPhantomPodsTest(kubeClient) glog.Infof("\n\nLogging high latency metrics from the 10250 kubelet") e2e.HighLatencyKubeletOperations(nil, 1*time.Second, "localhost:10250") glog.Infof("\n\nLogging high latency metrics from the 10251 kubelet") e2e.HighLatencyKubeletOperations(nil, 1*time.Second, "localhost:10251") }
func TestE2E(t *testing.T) { util.ReallyCrash = true util.InitLogs() defer util.FlushLogs() if *reportDir != "" { if err := os.MkdirAll(*reportDir, 0755); err != nil { glog.Errorf("Failed creating report directory: %v", err) } defer CoreDump(*reportDir) } if testContext.Provider == "" { glog.Info("The --provider flag is not set. Treating as a conformance test. Some tests may not be run.") } if testContext.Provider == "aws" { awsConfig := "[Global]\n" if cloudConfig.Zone == "" { glog.Fatal("gce-zone must be specified for AWS") } awsConfig += fmt.Sprintf("Zone=%s\n", cloudConfig.Zone) if cloudConfig.ClusterTag == "" { glog.Fatal("--cluster-tag must be specified for AWS") } awsConfig += fmt.Sprintf("KubernetesClusterTag=%s\n", cloudConfig.ClusterTag) var err error cloudConfig.Provider, err = cloudprovider.GetCloudProvider(testContext.Provider, strings.NewReader(awsConfig)) if err != nil { glog.Fatal("Error building AWS provider: ", err) } } // Disable skipped tests unless they are explicitly requested. if config.GinkgoConfig.FocusString == "" && config.GinkgoConfig.SkipString == "" { config.GinkgoConfig.SkipString = "Skipped" } gomega.RegisterFailHandler(ginkgo.Fail) c, err := loadClient() if err != nil { glog.Fatal("Error loading client: ", err) } // Delete any namespaces except default and kube-system. This ensures no // lingering resources are left over from a previous test run. if testContext.CleanStart { deleted, err := deleteNamespaces(c, nil /* deleteFilter */, []string{api.NamespaceSystem, api.NamespaceDefault}) if err != nil { t.Errorf("Error deleting orphaned namespaces: %v", err) } glog.Infof("Waiting for deletion of the following namespaces: %v", deleted) if err := waitForNamespacesDeleted(c, deleted, namespaceCleanupTimeout); err != nil { glog.Fatalf("Failed to delete orphaned namespaces %v: %v", deleted, err) } } // Ensure all pods are running and ready before starting tests (otherwise, // cluster infrastructure pods that are being pulled or started can block // test pods from running, and tests that ensure all pods are running and // ready will fail). if err := waitForPodsRunningReady(api.NamespaceSystem, testContext.MinStartupPods, podStartupTimeout); err != nil { t.Errorf("Error waiting for all pods to be running and ready: %v", err) return } // Run tests through the Ginkgo runner with output to console + JUnit for Jenkins var r []ginkgo.Reporter if *reportDir != "" { r = append(r, reporters.NewJUnitReporter(path.Join(*reportDir, fmt.Sprintf("junit_%02d.xml", config.GinkgoConfig.ParallelNode)))) } ginkgo.RunSpecsWithDefaultAndCustomReporters(t, "Kubernetes e2e suite", r) }