// scpKolet searches for a kolet binary and copies it to the machine. func scpKolet(t platform.TestCluster) error { // TODO: determine the GOARCH for the remote machine mArch := "amd64" for _, d := range []string{ ".", filepath.Dir(os.Args[0]), filepath.Join("/usr/lib/kola", mArch), } { kolet := filepath.Join(d, "kolet") if _, err := os.Stat(kolet); err == nil { return t.DropFile(kolet) } } return fmt.Errorf("Unable to locate kolet binary for %s", mArch) }
func RollingUpgrade(cluster platform.TestCluster) error { replaceDefaultsWithFlags(options.Opts) csize := len(cluster.Machines()) if plog.LevelAt(capnslog.DEBUG) { // get journalctl -f from all machines before starting for _, m := range cluster.Machines() { if err := m.StartJournal(); err != nil { return fmt.Errorf("failed to start journal: %v", err) } } } // drop in starting etcd binary plog.Debug("adding files to cluster") if err := cluster.DropFile(etcdBin); err != nil { return err } // TODO(pb): skip this test if binaries aren't available once we // have meaninful way to do so. // drop in etcd binary to upgrade to if err := cluster.DropFile(etcdBin2); err != nil { return err } // replace existing etcd2 binary with 2.0.12 plog.Info("replacing etcd with 2.0.12") etcdPath := filepath.Join(dropPath, filepath.Base(etcdBin)) for _, m := range cluster.Machines() { if err := replaceEtcd2Bin(m, etcdPath); err != nil { return err } } // start 2.0 cluster plog.Info("starting 2.0 cluster") for _, m := range cluster.Machines() { if err := startEtcd2(m); err != nil { return err } } for _, m := range cluster.Machines() { if err := getClusterHealth(m, csize); err != nil { return err } } if !skipVersionCheck { for _, m := range cluster.Machines() { if err := checkEtcdVersion(cluster, m, etcdVersion); err != nil { return err } } } // set some values on all nodes mapSet, err := setKeys(cluster, settingSize) if err != nil { return err } // rolling replacement checking cluster health, and // version after each replaced binary. Also test plog.Info("rolling upgrade to 2.1") etcdPath2 := filepath.Join(dropPath, filepath.Base(etcdBin2)) for i, m := range cluster.Machines() { // check current value set if err := checkKeys(cluster, mapSet); err != nil { return err } plog.Infof("stopping instance %v", i) if err := stopEtcd2(m); err != nil { return err } if err := replaceEtcd2Bin(m, etcdPath2); err != nil { return err } // set some values while running down a node and update set tempSet, err := setKeys(cluster, settingSize) if err != nil { return err } mapCopy(mapSet, tempSet) plog.Infof("starting instance %v with upgraded binary", i) if err := startEtcd2(m); err != nil { return err } for _, m := range cluster.Machines() { if err := getClusterHealth(m, csize); err != nil { return err } } } // set some more values tempSet, err := setKeys(cluster, settingSize) if err != nil { return err } mapCopy(mapSet, tempSet) // final check all values written correctly if err := checkKeys(cluster, mapSet); err != nil { return err } // check version is now 2.1 if !skipVersionCheck { for _, m := range cluster.Machines() { if err := checkEtcdVersion(cluster, m, etcdVersion2); err != nil { return err } } } return nil }
func RollingUpgrade(cluster platform.TestCluster) error { var ( firstVersion = cluster.Options["EtcdUpgradeVersion"] secondVersion = cluster.Options["EtcdUpgradeVersion2"] firstBin = cluster.Options["EtcdUpgradeBin"] secondBin = cluster.Options["EtcdUpgradeBin2"] ) csize := len(cluster.Machines()) if plog.LevelAt(capnslog.DEBUG) { // get journalctl -f from all machines before starting for _, m := range cluster.Machines() { if err := m.StartJournal(); err != nil { return fmt.Errorf("failed to start journal: %v", err) } } } // drop in starting etcd binary plog.Debug("adding files to cluster") if err := cluster.DropFile(firstBin); err != nil { return err } // drop in etcd binary to upgrade to if err := cluster.DropFile(secondBin); err != nil { return err } // replace existing etcd2 binary with 2.0.12 plog.Info("replacing etcd with 2.0.12") firstPath := filepath.Join(dropPath, filepath.Base(firstBin)) for _, m := range cluster.Machines() { if err := replaceEtcd2Bin(m, firstPath); err != nil { return err } } // start 2.0 cluster plog.Info("starting 2.0 cluster") for _, m := range cluster.Machines() { if err := startEtcd2(m); err != nil { return err } } for _, m := range cluster.Machines() { if err := getClusterHealth(m, csize); err != nil { return err } } if firstVersion != "" { for _, m := range cluster.Machines() { if err := checkEtcdVersion(cluster, m, firstVersion); err != nil { return err } } } // set some values on all nodes mapSet, err := SetKeys(cluster, settingSize) if err != nil { return err } // rolling replacement checking cluster health, and // version after each replaced binary. Also test plog.Info("rolling upgrade to 2.1") secondPath := filepath.Join(dropPath, filepath.Base(secondBin)) for i, m := range cluster.Machines() { // check current value set if err := CheckKeys(cluster, mapSet, true); err != nil { return err } plog.Infof("stopping instance %v", i) if err := stopEtcd2(m); err != nil { return err } if err := replaceEtcd2Bin(m, secondPath); err != nil { return err } // set some values while running down a node and update set tempSet, err := SetKeys(cluster, settingSize) if err != nil { return err } mapCopy(mapSet, tempSet) plog.Infof("starting instance %v with upgraded binary", i) if err := startEtcd2(m); err != nil { return err } for _, m := range cluster.Machines() { if err := getClusterHealth(m, csize); err != nil { return err } } } // set some more values tempSet, err := SetKeys(cluster, settingSize) if err != nil { return err } mapCopy(mapSet, tempSet) // final check all values written correctly if err := CheckKeys(cluster, mapSet, true); err != nil { return err } // check version is now 2.1 if secondVersion != "" { for _, m := range cluster.Machines() { if err := checkEtcdVersion(cluster, m, secondVersion); err != nil { return err } } } return nil }