func TestE2E(t *testing.T) { util.ReallyCrash = true util.InitLogs() defer util.FlushLogs() if testContext.Provider == "" { glog.Info("The --provider flag is not set. Treating as a conformance test. Some tests may not be run.") } if testContext.Provider == "aws" { awsConfig := "[Global]\n" if cloudConfig.Zone == "" { glog.Fatal("gce-zone must be specified for AWS") } awsConfig += fmt.Sprintf("Zone=%s\n", cloudConfig.Zone) if cloudConfig.ClusterTag == "" { glog.Fatal("--cluster-tag must be specified for AWS") } awsConfig += fmt.Sprintf("QingYuanClusterTag=%s\n", cloudConfig.ClusterTag) var err error cloudConfig.Provider, err = cloudprovider.GetCloudProvider(testContext.Provider, strings.NewReader(awsConfig)) if err != nil { glog.Fatal("Error building AWS provider: ", err) } } // Disable density test unless it's explicitly requested. if config.GinkgoConfig.FocusString == "" && config.GinkgoConfig.SkipString == "" { config.GinkgoConfig.SkipString = "Skipped" } gomega.RegisterFailHandler(ginkgo.Fail) // Ensure all pods are running and ready before starting tests (otherwise, // cluster infrastructure pods that are being pulled or started can block // test pods from running, and tests that ensure all pods are running and // ready will fail). if err := waitForPodsRunningReady(api.NamespaceDefault, testContext.MinStartupPods, podStartupTimeout); err != nil { glog.Fatalf("Error waiting for all pods to be running and ready: %v", err) } // Run tests through the Ginkgo runner with output to console + JUnit for Jenkins var r []ginkgo.Reporter if *reportDir != "" { r = append(r, reporters.NewJUnitReporter(path.Join(*reportDir, fmt.Sprintf("junit_%02d.xml", config.GinkgoConfig.ParallelNode)))) failReport := &failReporter{} r = append(r, failReport) defer func() { if failReport.failed { coreDump(*reportDir) } }() } ginkgo.RunSpecsWithDefaultAndCustomReporters(t, "QingYuan e2e suite", r) }
func main() { runtime.GOMAXPROCS(runtime.NumCPU()) s := app.NewSchedulerServer() s.AddFlags(pflag.CommandLine) util.InitFlags() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() s.Run(pflag.CommandLine.Args()) }
func main() { runtime.GOMAXPROCS(runtime.NumCPU()) s := service.NewSchedulerServer() s.AddStandaloneFlags(pflag.CommandLine) util.InitFlags() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() if err := s.Run(hyperqing.Nil(), pflag.CommandLine.Args()); err != nil { fmt.Fprintf(os.Stderr, err.Error()) os.Exit(1) } }
func main() { runtime.GOMAXPROCS(runtime.NumCPU()) s := app.NewProxyServer() s.AddFlags(pflag.CommandLine) util.InitFlags() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() if err := s.Run(pflag.CommandLine.Args()); err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } }
func main() { runtime.GOMAXPROCS(runtime.NumCPU()) s := controllermanager.NewCMServer() s.AddFlags(pflag.CommandLine) util.InitFlags() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() if err := s.Run(pflag.CommandLine.Args()); err != nil { fmt.Fprintf(os.Stderr, err.Error()) os.Exit(1) } }
func main() { runtime.GOMAXPROCS(runtime.NumCPU()) util.InitFlags() util.InitLogs() defer util.FlushLogs() glog.Infof("Creating etcd client pointing to %v", *etcdServer) etcdClient, err := tools.NewEtcdClientStartServerIfNecessary(*etcdServer) if err != nil { glog.Fatalf("Failed to connect to etcd: %v", err) } address := net.ParseIP(*addr) startComponents(etcdClient, newApiClient(address, *port), address, *port) glog.Infof("QingYuan API Server is up and running on http://%s:%d", *addr, *port) select {} }
// Run the server. This will pick the appropriate server and run it. func (hk *HyperQing) Run(args []string) error { // If we are called directly, parse all flags up to the first real // argument. That should be the server to run. baseCommand := path.Base(args[0]) serverName := baseCommand if serverName == hk.Name { args = args[1:] baseFlags := hk.Flags() baseFlags.SetInterspersed(false) // Only parse flags up to the next real command err := baseFlags.Parse(args) if err != nil || hk.helpFlagVal { if err != nil { hk.Println("Error:", err) } hk.Usage() return err } verflag.PrintAndExitIfRequested() args = baseFlags.Args() if len(args) > 0 && len(args[0]) > 0 { serverName = args[0] baseCommand = baseCommand + " " + serverName args = args[1:] } else { err = errors.New("No server specified") hk.Printf("Error: %v\n\n", err) hk.Usage() return err } } s, err := hk.FindServer(serverName) if err != nil { hk.Printf("Error: %v\n\n", err) hk.Usage() return err } util.AddPFlagSetToPFlagSet(hk.Flags(), s.Flags()) err = s.Flags().Parse(args) if err != nil || hk.helpFlagVal { if err != nil { hk.Printf("Error: %v\n\n", err) } s.Usage() return err } verflag.PrintAndExitIfRequested() util.InitLogs() defer util.FlushLogs() err = s.Run(s, s.Flags().Args()) if err != nil { hk.Println("Error:", err) } return err }
func main() { runtime.GOMAXPROCS(runtime.NumCPU()) addFlags(pflag.CommandLine) util.InitFlags() util.ReallyCrash = true util.InitLogs() defer util.FlushLogs() go func() { defer util.FlushLogs() time.Sleep(3 * time.Minute) glog.Fatalf("This test has timed out.") }() glog.Infof("Running tests for APIVersion: %s", apiVersion) firstManifestURL := ServeCachedManifestFile(testPodSpecFile) secondManifestURL := ServeCachedManifestFile(testPodSpecFile) apiServerURL, _ := startComponents(firstManifestURL, secondManifestURL, apiVersion) // Ok. we're good to go. glog.Infof("API Server started on %s", apiServerURL) // Wait for the synchronization threads to come up. time.Sleep(time.Second * 10) qingClient := client.NewOrDie(&client.Config{Host: apiServerURL, Version: apiVersion}) // Run tests in parallel testFuncs := []testFunc{ runReplicationControllerTest, runAtomicPutTest, runPatchTest, runServiceTest, runAPIVersionsTest, runMasterServiceTest, func(c *client.Client) { runSelfLinkTestOnNamespace(c, api.NamespaceDefault) runSelfLinkTestOnNamespace(c, "other") }, } // Only run at most maxConcurrency tests in parallel. if maxConcurrency <= 0 { maxConcurrency = len(testFuncs) } glog.Infof("Running %d tests in parallel.", maxConcurrency) ch := make(chan struct{}, maxConcurrency) var wg sync.WaitGroup wg.Add(len(testFuncs)) for i := range testFuncs { f := testFuncs[i] go func() { ch <- struct{}{} f(qingClient) <-ch wg.Done() }() } wg.Wait() close(ch) // Check that qinglet tried to make the containers. // Using a set to list unique creation attempts. Our fake is // really stupid, so qinglet tries to create these multiple times. createdConts := util.StringSet{} for _, p := range fakeDocker1.Created { // The last 8 characters are random, so slice them off. if n := len(p); n > 8 { createdConts.Insert(p[:n-8]) } } for _, p := range fakeDocker2.Created { // The last 8 characters are random, so slice them off. if n := len(p); n > 8 { createdConts.Insert(p[:n-8]) } } // We expect 9: 2 pod infra containers + 2 containers from the replication controller + // 1 pod infra container + 2 containers from the URL on first Qinglet + // 1 pod infra container + 2 containers from the URL on second Qinglet + // 1 pod infra container + 1 container from the service test. // The total number of container created is 9 if len(createdConts) != 12 { glog.Fatalf("Expected 12 containers; got %v\n\nlist of created containers:\n\n%#v\n\nDocker 1 Created:\n\n%#v\n\nDocker 2 Created:\n\n%#v\n\n", len(createdConts), createdConts.List(), fakeDocker1.Created, fakeDocker2.Created) } glog.Infof("OK - found created containers: %#v", createdConts.List()) // This test doesn't run with the others because it can't run in // parallel and also it schedules extra pods which would change the // above pod counting logic. runSchedulerNoPhantomPodsTest(qingClient) }