示例#1
0
func OmahaPing(c platform.TestCluster) error {
	qc, ok := c.Cluster.(*platform.QEMUCluster)
	if !ok {
		return errors.New("test only works in qemu")
	}

	omahaserver := qc.LocalCluster.OmahaServer

	svc := &pingServer{
		ping: make(chan struct{}),
	}

	omahaserver.Updater = svc

	m := c.Machines()[0]

	out, err := m.SSH("update_engine_client -check_for_update")
	if err != nil {
		return fmt.Errorf("failed to execute update_engine_client -check_for_update: %v: %v", out, err)
	}

	tc := time.After(30 * time.Second)

	select {
	case <-tc:
		platform.Manhole(m)
		return errors.New("timed out waiting for omaha ping")
	case <-svc.ping:
	}

	return nil
}
示例#2
0
// VerityVerify asserts that the filesystem mounted on /usr matches the
// dm-verity hash that is embedded in the CoreOS kernel.
func VerityVerify(c platform.TestCluster) error {
	m := c.Machines()[0]

	// extract verity hash from kernel
	hash, err := m.SSH("dd if=/boot/coreos/vmlinuz-a skip=64 count=64 bs=1 2>/dev/null")
	if err != nil {
		return fmt.Errorf("failed to extract verity hash from kernel: %v: %v", hash, err)
	}

	// find /usr dev
	usrdev, err := m.SSH("findmnt -no SOURCE /usr")
	if err != nil {
		return fmt.Errorf("failed to find device for /usr: %v: %v", usrdev, err)
	}

	// figure out partition size for hash dev offset
	offset, err := m.SSH("sudo e2size " + string(usrdev))
	if err != nil {
		return fmt.Errorf("failed to find /usr partition size: %v: %v", offset, err)
	}

	offset = bytes.TrimSpace(offset)
	veritycmd := fmt.Sprintf("sudo veritysetup verify --verbose --hash-offset=%s %s %s %s", offset, usrdev, usrdev, hash)

	verify, err := m.SSH(veritycmd)
	if err != nil {
		return fmt.Errorf("verity hash verification on %s failed: %v: %v", usrdev, verify, err)
	}

	return nil
}
示例#3
0
// run internet based tests
func InternetTests(c platform.TestCluster) error {
	tests := c.ListNativeFunctions()
	for _, name := range tests {
		plog.Noticef("running %v...", name)
		err := c.RunNative(name, c.Machines()[0])
		if err != nil {
			return err
		}
	}
	return nil
}
示例#4
0
func testRoot(c platform.TestCluster, fs string) error {
	m := c.Machines()[0]

	out, err := m.SSH("findmnt --noheadings --output FSTYPE --target /")
	if err != nil {
		return fmt.Errorf("failed to run findmnt: %s: %v", out, err)
	}

	if string(out) != fs {
		return fmt.Errorf("root wasn't correctly reformatted:\n%s", out)
	}

	return nil
}
示例#5
0
func setHostname(c platform.TestCluster) error {
	m := c.Machines()[0]

	out, err := m.SSH("hostnamectl")
	if err != nil {
		return fmt.Errorf("failed to run hostnamectl: %s: %v", out, err)
	}

	if !strings.Contains(string(out), "Static hostname: core1") {
		return fmt.Errorf("hostname wasn't set correctly:\n%s", out)
	}

	return nil
}
示例#6
0
// run clustering based tests
func ClusterTests(c platform.TestCluster) error {
	if plog.LevelAt(capnslog.DEBUG) {
		// get journalctl -f from all machines before starting
		for _, m := range c.Machines() {
			if err := platform.StreamJournal(m); err != nil {
				return fmt.Errorf("failed to start journal: %v", err)
			}
		}
	}

	// wait for etcd to come up
	if err := etcd.GetClusterHealth(c.Machines()[0], len(c.Machines())); err != nil {
		return err
	}

	tests := c.ListNativeFunctions()
	for _, name := range tests {
		plog.Noticef("running %v...", name)
		err := c.RunNative(name, c.Machines()[0])
		if err != nil {
			return err
		}
	}
	return nil

}
示例#7
0
func verify(c platform.TestCluster, keys ...string) error {
	m := c.Machines()[0]

	out, err := m.SSH("cat /run/metadata/coreos")
	if err != nil {
		return fmt.Errorf("failed to cat /run/metadata/coreos: %s: %v", out, err)
	}

	for _, key := range keys {
		if !strings.Contains(string(out), key) {
			return fmt.Errorf("%q wasn't found in %q", key, string(out))
		}
	}

	return nil
}
示例#8
0
// run clustering based tests
func ClusterTests(c platform.TestCluster) error {
	if plog.LevelAt(capnslog.DEBUG) {
		// get journalctl -f from all machines before starting
		for _, m := range c.Machines() {
			if err := platform.StreamJournal(m); err != nil {
				return fmt.Errorf("failed to start journal: %v", err)
			}
		}
	}

	// make sure etcd is up and running
	var keyMap map[string]string
	var retryFuncs []func() error

	retryFuncs = append(retryFuncs, func() error {
		var err error
		keyMap, err = etcd.SetKeys(c, 3)
		if err != nil {
			return err
		}
		return nil
	})
	retryFuncs = append(retryFuncs, func() error {
		if err := etcd.CheckKeys(c, keyMap, true); err != nil {
			return err
		}
		return nil
	})
	for _, retry := range retryFuncs {
		if err := util.Retry(5, 5*time.Second, retry); err != nil {
			return fmt.Errorf("etcd failed health check: %v", err)
		}
	}

	tests := c.ListNativeFunctions()
	for _, name := range tests {
		plog.Noticef("running %v...", name)
		err := c.RunNative(name, c.Machines()[0])
		if err != nil {
			return err
		}
	}
	return nil

}
示例#9
0
func verifyAWS(c platform.TestCluster) error {
	m := c.Machines()[0]

	out, err := m.SSH("coreos-metadata --version")
	if err != nil {
		return fmt.Errorf("failed to cat /run/metadata/coreos: %s: %v", out, err)
	}

	versionStr := strings.TrimPrefix(string(out), "coreos-metadata v")
	version, err := semver.NewVersion(versionStr)
	if err != nil {
		return fmt.Errorf("failed to parse coreos-metadata version: %v", err)
	}

	if version.LessThan(semver.Version{Minor: 3}) {
		return verify(c, "COREOS_IPV4_LOCAL", "COREOS_IPV4_PUBLIC", "COREOS_HOSTNAME")
	} else {
		return verify(c, "COREOS_EC2_IPV4_LOCAL", "COREOS_EC2_IPV4_PUBLIC", "COREOS_EC2_HOSTNAME")
	}
}
示例#10
0
文件: rolling.go 项目: hanscj1/mantle
func RollingUpgrade(cluster platform.TestCluster) error {
	replaceDefaultsWithFlags(options.Opts)

	csize := len(cluster.Machines())

	if plog.LevelAt(capnslog.DEBUG) {
		// get journalctl -f from all machines before starting
		for _, m := range cluster.Machines() {
			if err := m.StartJournal(); err != nil {
				return fmt.Errorf("failed to start journal: %v", err)
			}
		}
	}

	// drop in starting etcd binary
	plog.Debug("adding files to cluster")
	if err := cluster.DropFile(etcdBin); err != nil {
		return err
	}
	// TODO(pb): skip this test if binaries aren't available once we
	// have meaninful way to do so.

	// drop in etcd binary to upgrade to
	if err := cluster.DropFile(etcdBin2); err != nil {
		return err
	}

	// replace existing etcd2 binary with 2.0.12
	plog.Info("replacing etcd with 2.0.12")
	etcdPath := filepath.Join(dropPath, filepath.Base(etcdBin))
	for _, m := range cluster.Machines() {
		if err := replaceEtcd2Bin(m, etcdPath); err != nil {
			return err
		}
	}

	// start 2.0 cluster
	plog.Info("starting 2.0 cluster")
	for _, m := range cluster.Machines() {
		if err := startEtcd2(m); err != nil {
			return err
		}
	}
	for _, m := range cluster.Machines() {
		if err := getClusterHealth(m, csize); err != nil {
			return err
		}
	}
	if !skipVersionCheck {
		for _, m := range cluster.Machines() {
			if err := checkEtcdVersion(cluster, m, etcdVersion); err != nil {
				return err
			}
		}
	}

	// set some values on all nodes
	mapSet, err := setKeys(cluster, settingSize)
	if err != nil {
		return err
	}

	// rolling replacement checking cluster health, and
	// version after each replaced binary. Also test
	plog.Info("rolling upgrade to 2.1")
	etcdPath2 := filepath.Join(dropPath, filepath.Base(etcdBin2))
	for i, m := range cluster.Machines() {

		// check current value set
		if err := checkKeys(cluster, mapSet); err != nil {
			return err
		}

		plog.Infof("stopping instance %v", i)
		if err := stopEtcd2(m); err != nil {
			return err
		}
		if err := replaceEtcd2Bin(m, etcdPath2); err != nil {
			return err
		}

		// set some values while running down a node and update set
		tempSet, err := setKeys(cluster, settingSize)
		if err != nil {
			return err
		}
		mapCopy(mapSet, tempSet)

		plog.Infof("starting instance %v with upgraded binary", i)
		if err := startEtcd2(m); err != nil {
			return err
		}

		for _, m := range cluster.Machines() {
			if err := getClusterHealth(m, csize); err != nil {
				return err
			}
		}

	}
	// set some more values
	tempSet, err := setKeys(cluster, settingSize)
	if err != nil {
		return err
	}
	mapCopy(mapSet, tempSet)

	// final check all values written correctly
	if err := checkKeys(cluster, mapSet); err != nil {
		return err
	}

	// check version is now 2.1
	if !skipVersionCheck {
		for _, m := range cluster.Machines() {
			if err := checkEtcdVersion(cluster, m, etcdVersion2); err != nil {
				return err
			}
		}
	}

	return nil
}
示例#11
0
文件: rolling.go 项目: chancez/mantle
func RollingUpgrade(cluster platform.TestCluster) error {
	var (
		firstVersion  = cluster.Options["EtcdUpgradeVersion"]
		secondVersion = cluster.Options["EtcdUpgradeVersion2"]
		firstBin      = cluster.Options["EtcdUpgradeBin"]
		secondBin     = cluster.Options["EtcdUpgradeBin2"]
	)

	csize := len(cluster.Machines())

	if plog.LevelAt(capnslog.DEBUG) {
		// get journalctl -f from all machines before starting
		for _, m := range cluster.Machines() {
			if err := m.StartJournal(); err != nil {
				return fmt.Errorf("failed to start journal: %v", err)
			}
		}
	}

	// drop in starting etcd binary
	plog.Debug("adding files to cluster")
	if err := cluster.DropFile(firstBin); err != nil {
		return err
	}

	// drop in etcd binary to upgrade to
	if err := cluster.DropFile(secondBin); err != nil {
		return err
	}

	// replace existing etcd2 binary with 2.0.12
	plog.Info("replacing etcd with 2.0.12")
	firstPath := filepath.Join(dropPath, filepath.Base(firstBin))
	for _, m := range cluster.Machines() {
		if err := replaceEtcd2Bin(m, firstPath); err != nil {
			return err
		}
	}

	// start 2.0 cluster
	plog.Info("starting 2.0 cluster")
	for _, m := range cluster.Machines() {
		if err := startEtcd2(m); err != nil {
			return err
		}
	}
	for _, m := range cluster.Machines() {
		if err := getClusterHealth(m, csize); err != nil {
			return err
		}
	}
	if firstVersion != "" {
		for _, m := range cluster.Machines() {
			if err := checkEtcdVersion(cluster, m, firstVersion); err != nil {
				return err
			}
		}
	}

	// set some values on all nodes
	mapSet, err := SetKeys(cluster, settingSize)
	if err != nil {
		return err
	}

	// rolling replacement checking cluster health, and
	// version after each replaced binary. Also test
	plog.Info("rolling upgrade to 2.1")
	secondPath := filepath.Join(dropPath, filepath.Base(secondBin))
	for i, m := range cluster.Machines() {

		// check current value set
		if err := CheckKeys(cluster, mapSet, true); err != nil {
			return err
		}

		plog.Infof("stopping instance %v", i)
		if err := stopEtcd2(m); err != nil {
			return err
		}
		if err := replaceEtcd2Bin(m, secondPath); err != nil {
			return err
		}

		// set some values while running down a node and update set
		tempSet, err := SetKeys(cluster, settingSize)
		if err != nil {
			return err
		}
		mapCopy(mapSet, tempSet)

		plog.Infof("starting instance %v with upgraded binary", i)
		if err := startEtcd2(m); err != nil {
			return err
		}

		for _, m := range cluster.Machines() {
			if err := getClusterHealth(m, csize); err != nil {
				return err
			}
		}

	}
	// set some more values
	tempSet, err := SetKeys(cluster, settingSize)
	if err != nil {
		return err
	}
	mapCopy(mapSet, tempSet)

	// final check all values written correctly
	if err := CheckKeys(cluster, mapSet, true); err != nil {
		return err
	}

	// check version is now 2.1
	if secondVersion != "" {
		for _, m := range cluster.Machines() {
			if err := checkEtcdVersion(cluster, m, secondVersion); err != nil {
				return err
			}
		}
	}

	return nil
}
示例#12
0
// VXLAN tests that flannel can send packets using the vxlan backend.
func vxlan(c platform.TestCluster) error {
	machs := c.Machines()
	return util.Retry(12, 10*time.Second, func() error { return ping(machs[0], machs[2], "flannel.1") })
}