// create a cluster and run test func runTest(t *Test, pltfrm string) error { var err error var cluster platform.Cluster if pltfrm == "qemu" { cluster, err = platform.NewQemuCluster(*QemuImage) } else if pltfrm == "gce" { cluster, err = platform.NewGCECluster(GCEOpts()) } else { plog.Errorf("Invalid platform: %v", pltfrm) } if err != nil { return fmt.Errorf("Cluster failed: %v", err) } defer func() { if err := cluster.Destroy(); err != nil { plog.Errorf("cluster.Destroy(): %v", err) } }() url, err := cluster.GetDiscoveryURL(t.ClusterSize) if err != nil { return fmt.Errorf("Failed to create discovery endpoint: %v", err) } cfgs := makeConfigs(url, t.CloudConfig, t.ClusterSize) for i := 0; i < t.ClusterSize; i++ { _, err := cluster.NewMachine(cfgs[i]) if err != nil { return fmt.Errorf("Cluster failed starting machine: %v", err) } plog.Infof("%v instance up", pltfrm) } // pass along all registered native functions var names []string for k := range t.NativeFuncs { names = append(names, k) } // Cluster -> TestCluster tcluster := platform.TestCluster{t.Name, names, cluster} // drop kolet binary on machines if t.NativeFuncs != nil { err = scpKolet(tcluster) if err != nil { return fmt.Errorf("dropping kolet binary: %v", err) } } // run test err = t.Run(tcluster) return err }
func runBootchart(cmd *cobra.Command, args []string) { if len(args) != 0 { fmt.Fprintf(os.Stderr, "No args accepted\n") os.Exit(2) } var ( cluster platform.Cluster err error ) if kolaPlatform == "qemu" { cluster, err = platform.NewQemuCluster(kola.QEMUOptions) } else if kolaPlatform == "gce" { cluster, err = platform.NewGCECluster(kola.GCEOptions) } else if kolaPlatform == "aws" { cluster, err = platform.NewAWSCluster(kola.AWSOptions) } else { fmt.Fprintf(os.Stderr, "Invalid platform: %v", kolaPlatform) } if err != nil { fmt.Fprintf(os.Stderr, "Cluster failed: %v\n", err) os.Exit(1) } defer cluster.Destroy() m, err := cluster.NewMachine("") if err != nil { fmt.Fprintf(os.Stderr, "Machine failed: %v\n", err) os.Exit(1) } defer m.Destroy() ssh, err := m.SSHSession() if err != nil { fmt.Fprintf(os.Stderr, "SSH failed: %v\n", err) os.Exit(1) } ssh.Stdout = os.Stdout ssh.Stderr = os.Stderr if err = ssh.Run("systemd-analyze plot"); err != nil { fmt.Fprintf(os.Stderr, "SSH failed: %v\n", err) os.Exit(1) } }
func runSpawn(cmd *cobra.Command, args []string) { var userdata []byte var err error var cluster platform.Cluster if spawnUserData != "" { userdata, err = ioutil.ReadFile(spawnUserData) if err != nil { die("Reading userdata failed: %v", err) } } switch kolaPlatform { case "qemu": cluster, err = platform.NewQemuCluster(kola.QEMUOptions) case "gce": cluster, err = platform.NewGCECluster(kola.GCEOptions) case "aws": cluster, err = platform.NewAWSCluster(kola.AWSOptions) default: err = fmt.Errorf("invalid platform %q", kolaPlatform) } if err != nil { die("Cluster failed: %v", err) } mach, err := cluster.NewMachine(string(userdata)) if err != nil { die("Spawning instance failed: %v", err) } if spawnRemove { defer mach.Destroy() } if spawnShell { if err := platform.Manhole(mach); err != nil { die("Manhole failed: %v", err) } } }
func runQemu(cmd *cobra.Command, args []string) { if len(args) != 0 { fmt.Fprintf(os.Stderr, "No args accepted\n") os.Exit(2) } cluster, err := platform.NewQemuCluster(kola.QEMUOptions) if err != nil { fmt.Fprintf(os.Stderr, "Cluster failed: %v\n", err) os.Exit(1) } defer cluster.Destroy() m, err := cluster.NewMachine("") if err != nil { fmt.Fprintf(os.Stderr, "Machine failed: %v\n", err) os.Exit(1) } out, err := m.SSH("uname -a") if err != nil { fmt.Fprintf(os.Stderr, "SSH failed: %v\n", err) os.Exit(1) } if len(out) != 0 { fmt.Fprintf(os.Stdout, "SSH: %s\n", out) } err = m.Destroy() if err != nil { fmt.Fprintf(os.Stderr, "Destroy failed: %v\n", err) os.Exit(1) } if len(cluster.Machines()) != 0 { fmt.Fprintf(os.Stderr, "Cluster not empty.\n") os.Exit(1) } fmt.Printf("QEMU successful!\n") }
func runQemu(cmd *cobra.Command, args []string) { if len(args) != 0 { fmt.Fprintf(os.Stderr, "No args accepted\n") os.Exit(2) } cluster, err := platform.NewQemuCluster(kola.QEMUOptions) if err != nil { fmt.Fprintf(os.Stderr, "Cluster failed: %v\n", err) os.Exit(1) } defer cluster.Destroy() m, err := cluster.NewMachine("") if err != nil { fmt.Fprintf(os.Stderr, "Machine failed: %v\n", err) os.Exit(1) } out, err := m.SSH("uname -a") if err != nil { fmt.Fprintf(os.Stderr, "SSH failed: %v\n", err) os.Exit(1) } if len(out) != 0 { fmt.Fprintf(os.Stdout, "SSH: %s\n", out) } ssh := cluster.NewCommand("ssh", "-l", "core", "-o", "StrictHostKeyChecking=no", "-o", "UserKnownHostsFile=/dev/null", "-o", "BatchMode=yes", m.IP(), "uptime") out, err = ssh.Output() if err != nil { fmt.Fprintf(os.Stderr, "SSH command failed: %v\n", err) os.Exit(1) } if len(out) != 0 { fmt.Fprintf(os.Stdout, "SSH command: %s\n", out) } else { fmt.Fprintf(os.Stderr, "SSH command produced no output.\n") os.Exit(1) } err = m.Destroy() if err != nil { fmt.Fprintf(os.Stderr, "Destroy failed: %v\n", err) os.Exit(1) } if len(cluster.Machines()) != 0 { fmt.Fprintf(os.Stderr, "Cluster not empty.\n") os.Exit(1) } fmt.Printf("QEMU successful!\n") }
// create a cluster and run test func RunTest(t *Test, pltfrm string) error { var err error var cluster platform.Cluster switch pltfrm { case "qemu": cluster, err = platform.NewQemuCluster(QEMUOptions) case "gce": cluster, err = platform.NewGCECluster(GCEOptions) case "aws": cluster, err = platform.NewAWSCluster(AWSOptions) default: err = fmt.Errorf("invalid platform %q", pltfrm) } if err != nil { return fmt.Errorf("Cluster failed: %v", err) } defer func() { if err := cluster.Destroy(); err != nil { plog.Errorf("cluster.Destroy(): %v", err) } }() url, err := cluster.GetDiscoveryURL(t.ClusterSize) if err != nil { return fmt.Errorf("Failed to create discovery endpoint: %v", err) } cfgs := makeConfigs(url, t.CloudConfig, t.ClusterSize) if t.ClusterSize > 0 { _, err := platform.NewMachines(cluster, cfgs) if err != nil { return fmt.Errorf("Cluster failed starting machines: %v", err) } } // pass along all registered native functions var names []string for k := range t.NativeFuncs { names = append(names, k) } // prevent unsafe access if tests ever become parallel and access tempTestOptions := make(map[string]string, 0) for k, v := range testOptions { tempTestOptions[k] = v } // Cluster -> TestCluster tcluster := platform.TestCluster{ Name: t.Name, NativeFuncs: names, Options: tempTestOptions, Cluster: cluster, } // drop kolet binary on machines if t.NativeFuncs != nil { err = scpKolet(tcluster) if err != nil { return fmt.Errorf("dropping kolet binary: %v", err) } } // run test err = t.Run(tcluster) // give some time for the remote journal to be flushed so it can be read // before we run the deferred machine destruction if err != nil { time.Sleep(10 * time.Second) } return err }
func runUpdatePayload(cmd *cobra.Command, args []string) { if len(args) != 0 { plog.Fatal("No args accepted") } plog.Info("Generating update payload") // check for update file, generate if it doesn't exist version := "latest" dir := sdk.BuildImageDir(version) payload := "coreos_production_update.gz" _, err := os.Stat(filepath.Join(dir, payload)) if err != nil { err = sdkomaha.GenerateFullUpdate("latest", true) if err != nil { plog.Fatalf("Building full update failed: %v", err) } } plog.Info("Bringing up test harness cluster") cluster, err := platform.NewQemuCluster(kola.QEMUOptions) qc := cluster.(*platform.QEMUCluster) if err != nil { plog.Fatalf("Cluster failed: %v", err) } defer cluster.Destroy() svc := &updateServer{ updatePath: dir, payload: payload, } qc.OmahaServer.Updater = svc // tell omaha server to handle file requests for the images dir qc.OmahaServer.Mux.Handle(dir+"/", http.StripPrefix(dir+"/", http.FileServer(http.Dir(dir)))) _, port, err := net.SplitHostPort(qc.OmahaServer.Addr().String()) if err != nil { plog.Errorf("SplitHostPort failed: %v", err) return } tmplVals := map[string]string{ "Server": fmt.Sprintf("10.0.0.1:%s", port), } tmpl := template.Must(template.New("userdata").Parse(userdata)) buf := new(bytes.Buffer) err = tmpl.Execute(buf, tmplVals) if err != nil { plog.Errorf("Template execution failed: %v", err) return } plog.Infof("Spawning test machine") m, err := cluster.NewMachine(buf.String()) if err != nil { plog.Errorf("Machine failed: %v", err) return } plog.Info("Checking for boot from USR-A partition") /* check that we are on USR-A. */ if err := checkUsrPartition(m, []string{"PARTUUID=" + sdk.USRAUUID.String(), "PARTLABEL=USR-A"}); err != nil { plog.Errorf("Did not find USR-A partition: %v", err) return } plog.Infof("Triggering update_engine") /* trigger update, monitor the progress. */ out, err := m.SSH("update_engine_client -check_for_update") if err != nil { plog.Errorf("Executing update_engine_client failed: %v: %v", out, err) return } checker := func() error { envs, err := m.SSH("update_engine_client -status 2>/dev/null") if err != nil { return err } em := splitNewlineEnv(string(envs)) if em["CURRENT_OP"] != "UPDATE_STATUS_UPDATED_NEED_REBOOT" { return fmt.Errorf("have not arrived in reboot state: currently at %s", em["CURRENT_OP"]) } return nil } if err := util.Retry(12, 10*time.Second, checker); err != nil { plog.Errorf("Applying update payload failed: %v", err) return } plog.Info("Rebooting test machine") /* reboot it */ if err := platform.Reboot(m); err != nil { plog.Errorf("Rebooting machine failed: %v", err) return } plog.Info("Checking for boot from USR-B partition") /* check that we are on USR-B now. */ if err := checkUsrPartition(m, []string{"PARTUUID=" + sdk.USRBUUID.String(), "PARTLABEL=USR-B"}); err != nil { plog.Errorf("Did not find USR-B partition: %v", err) return } plog.Info("Update complete!") }