Beispiel #1
0
func OmahaPing(c platform.TestCluster) error {
	qc, ok := c.Cluster.(*platform.QEMUCluster)
	if !ok {
		return errors.New("test only works in qemu")
	}

	omahaserver := qc.LocalCluster.OmahaServer

	svc := &pingServer{
		ping: make(chan struct{}),
	}

	omahaserver.Updater = svc

	m := c.Machines()[0]

	out, err := m.SSH("update_engine_client -check_for_update")
	if err != nil {
		return fmt.Errorf("failed to execute update_engine_client -check_for_update: %v: %v", out, err)
	}

	tc := time.After(30 * time.Second)

	select {
	case <-tc:
		platform.Manhole(m)
		return errors.New("timed out waiting for omaha ping")
	case <-svc.ping:
	}

	return nil
}
Beispiel #2
0
// Test that timesyncd starts using the local NTP server
func NTP(c platform.TestCluster) error {
	m, err := c.NewMachine("")
	if err != nil {
		return fmt.Errorf("Cluster.NewMachine: %s", err)
	}
	defer m.Destroy()

	out, err := m.SSH("networkctl status eth0")
	if err != nil {
		return fmt.Errorf("networkctl: %v", err)
	}
	if !bytes.Contains(out, []byte("NTP: 10.0.0.1")) {
		return fmt.Errorf("Bad network config:\n%s", out)
	}

	plog.Info("Waiting for systemd-timesyncd.service")
	for i := 0; i < 60; i++ {
		out, err = m.SSH("systemctl status systemd-timesyncd.service")
		if err != nil {
			return fmt.Errorf("systemctl: %v", err)
		}

		if bytes.Contains(out, []byte(`Status: "Using Time Server 10.0.0.1:123 (10.0.0.1)."`)) {
			plog.Info("systemd-timesyncd.service is working!")
			return nil
		}

		time.Sleep(time.Second)
	}

	return fmt.Errorf("Bad status:\n%s", out)
}
Beispiel #3
0
// VerityVerify asserts that the filesystem mounted on /usr matches the
// dm-verity hash that is embedded in the CoreOS kernel.
func VerityVerify(c platform.TestCluster) error {
	m := c.Machines()[0]

	// extract verity hash from kernel
	hash, err := m.SSH("dd if=/boot/coreos/vmlinuz-a skip=64 count=64 bs=1 2>/dev/null")
	if err != nil {
		return fmt.Errorf("failed to extract verity hash from kernel: %v: %v", hash, err)
	}

	// find /usr dev
	usrdev, err := m.SSH("findmnt -no SOURCE /usr")
	if err != nil {
		return fmt.Errorf("failed to find device for /usr: %v: %v", usrdev, err)
	}

	// figure out partition size for hash dev offset
	offset, err := m.SSH("sudo e2size " + string(usrdev))
	if err != nil {
		return fmt.Errorf("failed to find /usr partition size: %v: %v", offset, err)
	}

	offset = bytes.TrimSpace(offset)
	veritycmd := fmt.Sprintf("sudo veritysetup verify --verbose --hash-offset=%s %s %s %s", offset, usrdev, usrdev, hash)

	verify, err := m.SSH(veritycmd)
	if err != nil {
		return fmt.Errorf("verity hash verification on %s failed: %v: %v", usrdev, verify, err)
	}

	return nil
}
Beispiel #4
0
// Start a multi-node cluster from offcial kubernetes 1.0 guides. Once
// up, do a couple basic smoke checks. See:
// http://kubernetes.io/v1.0/docs/getting-started-guides/coreos/coreos_multinode_cluster.html
func MultiNodeSmoke(c platform.TestCluster) error {
	const clusterSize = 3

	// spawn master
	master, err := c.NewMachine(masterConfig)
	if err != nil {
		return err
	}

	// get master private IP and place into nodeConfig
	nodeConfig = strings.Replace(nodeConfig, "<master-private-ip>", master.PrivateIP(), -1)
	var nodeConfigs []string
	for i := 0; i < clusterSize-1; i++ {
		nodeConfigs = append(nodeConfigs, nodeConfig)
	}

	// spawn nodes
	nodes, err := platform.NewMachines(c, nodeConfigs)
	if err != nil {
		return err
	}

	// get kubectl in master
	_, err = master.SSH("wget -q https://storage.googleapis.com/kubernetes-release/release/v1.0.1/bin/linux/amd64/kubectl")
	if err != nil {
		return err
	}
	_, err = master.SSH("chmod +x kubectl")
	if err != nil {
		return err
	}

	// check that all nodes appear in kubectl
	f := func() error {
		if err = nodeCheck(master, nodes); err != nil {
			return err
		}
		return nil
	}
	if err := util.Retry(10, 5*time.Second, f); err != nil {
		return err
	}

	// start nginx pod and curl endpoint
	if err = nginxCheck(master, nodes); err != nil {
		return err
	}

	// http://kubernetes.io/v1.0/docs/user-guide/secrets/ Also, ensures
	// https://github.com/coreos/bugs/issues/447 does not re-occur.
	if err = secretCheck(master, nodes); err != nil {
		return err
	}

	return nil
}
Beispiel #5
0
func setHostname(c platform.TestCluster) error {
	m := c.Machines()[0]

	out, err := m.SSH("hostnamectl")
	if err != nil {
		return fmt.Errorf("failed to run hostnamectl: %s: %v", out, err)
	}

	if !strings.Contains(string(out), "Static hostname: core1") {
		return fmt.Errorf("hostname wasn't set correctly:\n%s", out)
	}

	return nil
}
Beispiel #6
0
// JournalRemote tests that systemd-journal-remote can read log entries from
// a systemd-journal-gatewayd server.
func JournalRemote(c platform.TestCluster) error {
	// start gatewayd and log a message
	gateway, err := c.NewMachine(gatewayconf.String())
	if err != nil {
		return fmt.Errorf("Cluster.NewMachine: %s", err)
	}
	defer gateway.Destroy()

	// log a unique message on gatewayd machine
	msg := "supercalifragilisticexpialidocious"
	out, err := gateway.SSH("logger " + msg)
	if err != nil {
		return fmt.Errorf("logger: %v: %v", out, err)
	}

	// spawn a machine to read from gatewayd
	collector, err := c.NewMachine("")
	if err != nil {
		return fmt.Errorf("Cluster.NewMachine: %s", err)
	}
	defer collector.Destroy()

	// collect logs from gatewayd machine
	cmd := fmt.Sprintf("sudo systemd-run --unit systemd-journal-remote-client /usr/lib/systemd/systemd-journal-remote --url http://%s:19531", gateway.PrivateIP())
	out, err = collector.SSH(cmd)
	if err != nil {
		return fmt.Errorf("failed to start systemd-journal-remote: %v: %v", out, err)
	}

	// find the message on the collector
	journalReader := func() error {
		cmd = fmt.Sprintf("sudo journalctl _HOSTNAME=%s -t core --file /var/log/journal/remote/remote-%s:19531.journal", gatewayconf.Hostname, gateway.PrivateIP())
		out, err = collector.SSH(cmd)
		if err != nil {
			return fmt.Errorf("journalctl: %v: %v", out, err)
		}

		if !strings.Contains(string(out), msg) {
			return fmt.Errorf("journal missing entry: expected %q got %q", msg, out)
		}

		return nil
	}

	if err := util.Retry(5, 2*time.Second, journalReader); err != nil {
		return err
	}

	return nil
}
Beispiel #7
0
func testRoot(c platform.TestCluster, fs string) error {
	m := c.Machines()[0]

	out, err := m.SSH("findmnt --noheadings --output FSTYPE --target /")
	if err != nil {
		return fmt.Errorf("failed to run findmnt: %s: %v", out, err)
	}

	if string(out) != fs {
		return fmt.Errorf("root wasn't correctly reformatted:\n%s", out)
	}

	return nil
}
Beispiel #8
0
// Test to make sure rkt install works.
func Install(c platform.TestCluster) error {
	mach, err := c.NewMachine("")
	if err != nil {
		return fmt.Errorf("Cluster.NewMachine: %s", err)
	}
	defer mach.Destroy()

	cmd := "sudo rkt install"
	output, err := mach.SSH(cmd)
	if err != nil {
		return fmt.Errorf("failed to run %q: %s: %s", cmd, err, output)
	}

	return nil
}
Beispiel #9
0
// scpKolet searches for a kolet binary and copies it to the machine.
func scpKolet(t platform.TestCluster) error {
	// TODO: determine the GOARCH for the remote machine
	mArch := "amd64"
	for _, d := range []string{
		".",
		filepath.Dir(os.Args[0]),
		filepath.Join("/usr/lib/kola", mArch),
	} {
		kolet := filepath.Join(d, "kolet")
		if _, err := os.Stat(kolet); err == nil {
			return t.DropFile(kolet)
		}
	}
	return fmt.Errorf("Unable to locate kolet binary for %s", mArch)
}
Beispiel #10
0
func verify(c platform.TestCluster, keys ...string) error {
	m := c.Machines()[0]

	out, err := m.SSH("cat /run/metadata/coreos")
	if err != nil {
		return fmt.Errorf("failed to cat /run/metadata/coreos: %s: %v", out, err)
	}

	for _, key := range keys {
		if !strings.Contains(string(out), key) {
			return fmt.Errorf("%q wasn't found in %q", key, string(out))
		}
	}

	return nil
}
Beispiel #11
0
func verifyAWS(c platform.TestCluster) error {
	m := c.Machines()[0]

	out, err := m.SSH("coreos-metadata --version")
	if err != nil {
		return fmt.Errorf("failed to cat /run/metadata/coreos: %s: %v", out, err)
	}

	versionStr := strings.TrimPrefix(string(out), "coreos-metadata v")
	version, err := semver.NewVersion(versionStr)
	if err != nil {
		return fmt.Errorf("failed to parse coreos-metadata version: %v", err)
	}

	if version.LessThan(semver.Version{Minor: 3}) {
		return verify(c, "COREOS_IPV4_LOCAL", "COREOS_IPV4_PUBLIC", "COREOS_HOSTNAME")
	} else {
		return verify(c, "COREOS_EC2_IPV4_LOCAL", "COREOS_EC2_IPV4_PUBLIC", "COREOS_EC2_HOSTNAME")
	}
}
Beispiel #12
0
// Test that timesyncd starts using the local NTP server
func NTP(c platform.TestCluster) error {
	m, err := c.NewMachine("")
	if err != nil {
		return fmt.Errorf("Cluster.NewMachine: %s", err)
	}
	defer m.Destroy()

	out, err := m.SSH("networkctl status eth0")
	if err != nil {
		return fmt.Errorf("networkctl: %v", err)
	}
	if !bytes.Contains(out, []byte("NTP: 10.0.0.1")) {
		return fmt.Errorf("Bad network config:\n%s", out)
	}

	plog.Info("Waiting for systemd-timesyncd.service")

	checker := func() error {
		out, err = m.SSH("systemctl status systemd-timesyncd.service")
		if err != nil {
			return fmt.Errorf("systemctl: %v", err)
		}

		if !bytes.Contains(out, []byte(`Status: "Synchronized to time server 10.0.0.1:123 (10.0.0.1)."`)) {
			return fmt.Errorf("unexpected systemd-timesyncd status: %v", out)
		}

		plog.Info("systemd-timesyncd.service is working!")
		return nil
	}

	err = util.Retry(60, 1*time.Second, checker)
	if err != nil {
		return nil
	}

	return nil
}
Beispiel #13
0
// run clustering based tests
func ClusterTests(c platform.TestCluster) error {
	if plog.LevelAt(capnslog.DEBUG) {
		// get journalctl -f from all machines before starting
		for _, m := range c.Machines() {
			if err := platform.StreamJournal(m); err != nil {
				return fmt.Errorf("failed to start journal: %v", err)
			}
		}
	}

	// make sure etcd is up and running
	var keyMap map[string]string
	var retryFuncs []func() error

	retryFuncs = append(retryFuncs, func() error {
		var err error
		keyMap, err = etcd.SetKeys(c, 3)
		if err != nil {
			return err
		}
		return nil
	})
	retryFuncs = append(retryFuncs, func() error {
		if err := etcd.CheckKeys(c, keyMap, true); err != nil {
			return err
		}
		return nil
	})
	for _, retry := range retryFuncs {
		if err := util.Retry(5, 5*time.Second, retry); err != nil {
			return fmt.Errorf("etcd failed health check: %v", err)
		}
	}

	tests := c.ListNativeFunctions()
	for _, name := range tests {
		plog.Noticef("running %v...", name)
		err := c.RunNative(name, c.Machines()[0])
		if err != nil {
			return err
		}
	}
	return nil

}
Beispiel #14
0
// run internet based tests
func InternetTests(c platform.TestCluster) error {
	tests := c.ListNativeFunctions()
	for _, name := range tests {
		plog.Noticef("running %v...", name)
		err := c.RunNative(name, c.Machines()[0])
		if err != nil {
			return err
		}
	}
	return nil
}
Beispiel #15
0
// Test fleet running through an etcd2 proxy.
func Proxy(c platform.TestCluster) error {
	masterconf.CoreOS.Etcd2.Discovery, _ = c.GetDiscoveryURL(1)
	master, err := c.NewMachine(masterconf.String())
	if err != nil {
		return fmt.Errorf("Cluster.NewMachine: %s", err)
	}
	defer master.Destroy()

	proxyconf.CoreOS.Etcd2.Discovery = masterconf.CoreOS.Etcd2.Discovery
	proxy, err := c.NewMachine(proxyconf.String())
	if err != nil {
		return fmt.Errorf("Cluster.NewMachine: %s", err)
	}
	defer proxy.Destroy()

	err = platform.InstallFile(strings.NewReader(fleetunit), proxy, "/home/core/hello.service")
	if err != nil {
		return fmt.Errorf("InstallFile: %s", err)
	}

	// settling...
	fleetStart := func() error {
		_, err = proxy.SSH("fleetctl start /home/core/hello.service")
		if err != nil {
			return fmt.Errorf("fleetctl start: %s", err)
		}
		return nil
	}
	if err := util.Retry(5, 5*time.Second, fleetStart); err != nil {
		return fmt.Errorf("fleetctl start failed: %v", err)
	}

	var status []byte

	fleetList := func() error {
		status, err = proxy.SSH("fleetctl list-units -l -fields active -no-legend")
		if err != nil {
			return fmt.Errorf("fleetctl list-units: %s", err)
		}

		if !bytes.Equal(status, []byte("active")) {
			return fmt.Errorf("unit not active")
		}

		return nil
	}

	if err := util.Retry(5, 1*time.Second, fleetList); err != nil {
		return fmt.Errorf("fleetctl list-units failed: %v", err)
	}

	return nil
}
Beispiel #16
0
func RollingUpgrade(cluster platform.TestCluster) error {
	replaceDefaultsWithFlags(options.Opts)

	csize := len(cluster.Machines())

	if plog.LevelAt(capnslog.DEBUG) {
		// get journalctl -f from all machines before starting
		for _, m := range cluster.Machines() {
			if err := m.StartJournal(); err != nil {
				return fmt.Errorf("failed to start journal: %v", err)
			}
		}
	}

	// drop in starting etcd binary
	plog.Debug("adding files to cluster")
	if err := cluster.DropFile(etcdBin); err != nil {
		return err
	}
	// TODO(pb): skip this test if binaries aren't available once we
	// have meaninful way to do so.

	// drop in etcd binary to upgrade to
	if err := cluster.DropFile(etcdBin2); err != nil {
		return err
	}

	// replace existing etcd2 binary with 2.0.12
	plog.Info("replacing etcd with 2.0.12")
	etcdPath := filepath.Join(dropPath, filepath.Base(etcdBin))
	for _, m := range cluster.Machines() {
		if err := replaceEtcd2Bin(m, etcdPath); err != nil {
			return err
		}
	}

	// start 2.0 cluster
	plog.Info("starting 2.0 cluster")
	for _, m := range cluster.Machines() {
		if err := startEtcd2(m); err != nil {
			return err
		}
	}
	for _, m := range cluster.Machines() {
		if err := getClusterHealth(m, csize); err != nil {
			return err
		}
	}
	if !skipVersionCheck {
		for _, m := range cluster.Machines() {
			if err := checkEtcdVersion(cluster, m, etcdVersion); err != nil {
				return err
			}
		}
	}

	// set some values on all nodes
	mapSet, err := setKeys(cluster, settingSize)
	if err != nil {
		return err
	}

	// rolling replacement checking cluster health, and
	// version after each replaced binary. Also test
	plog.Info("rolling upgrade to 2.1")
	etcdPath2 := filepath.Join(dropPath, filepath.Base(etcdBin2))
	for i, m := range cluster.Machines() {

		// check current value set
		if err := checkKeys(cluster, mapSet); err != nil {
			return err
		}

		plog.Infof("stopping instance %v", i)
		if err := stopEtcd2(m); err != nil {
			return err
		}
		if err := replaceEtcd2Bin(m, etcdPath2); err != nil {
			return err
		}

		// set some values while running down a node and update set
		tempSet, err := setKeys(cluster, settingSize)
		if err != nil {
			return err
		}
		mapCopy(mapSet, tempSet)

		plog.Infof("starting instance %v with upgraded binary", i)
		if err := startEtcd2(m); err != nil {
			return err
		}

		for _, m := range cluster.Machines() {
			if err := getClusterHealth(m, csize); err != nil {
				return err
			}
		}

	}
	// set some more values
	tempSet, err := setKeys(cluster, settingSize)
	if err != nil {
		return err
	}
	mapCopy(mapSet, tempSet)

	// final check all values written correctly
	if err := checkKeys(cluster, mapSet); err != nil {
		return err
	}

	// check version is now 2.1
	if !skipVersionCheck {
		for _, m := range cluster.Machines() {
			if err := checkEtcdVersion(cluster, m, etcdVersion2); err != nil {
				return err
			}
		}
	}

	return nil
}
Beispiel #17
0
// VXLAN tests that flannel can send packets using the vxlan backend.
func vxlan(c platform.TestCluster) error {
	machs := c.Machines()
	return util.Retry(12, 10*time.Second, func() error { return ping(machs[0], machs[2], "flannel.1") })
}
Beispiel #18
0
func testNFS(c platform.TestCluster, nfsversion int) error {
	m1, err := c.NewMachine(nfsserverconf.String())
	if err != nil {
		return fmt.Errorf("Cluster.NewMachine: %s", err)
	}

	defer m1.Destroy()

	plog.Info("NFS server booted.")

	/* poke a file in /tmp */
	tmp, err := m1.SSH("mktemp")
	if err != nil {
		return fmt.Errorf("Machine.SSH: %s", err)
	}

	plog.Infof("Test file %q created on server.", tmp)

	c2 := config.CloudConfig{
		CoreOS: config.CoreOS{
			Units: []config.Unit{
				config.Unit{
					Name:    "mnt.mount",
					Command: "start",
					Content: fmt.Sprintf(mounttmpl, m1.PrivateIP(), nfsversion),
				},
			},
		},
		Hostname: "nfs2",
	}

	m2, err := c.NewMachine(c2.String())
	if err != nil {
		return fmt.Errorf("Cluster.NewMachine: %s", err)
	}

	defer m2.Destroy()

	plog.Info("NFS client booted.")

	plog.Info("Waiting for NFS mount on client...")

	checkmount := func() error {
		status, err := m2.SSH("systemctl is-active mnt.mount")
		if err != nil || string(status) != "active" {
			return fmt.Errorf("mnt.mount status is %q: %v", status, err)
		}

		plog.Info("Got NFS mount.")
		return nil
	}

	if err = util.Retry(10, 3*time.Second, checkmount); err != nil {
		return err
	}

	_, err = m2.SSH(fmt.Sprintf("stat /mnt/%s", path.Base(string(tmp))))
	if err != nil {
		return fmt.Errorf("file %q does not exist", tmp)
	}

	return nil
}
Beispiel #19
0
// run clustering based tests
func ClusterTests(c platform.TestCluster) error {
	if plog.LevelAt(capnslog.DEBUG) {
		// get journalctl -f from all machines before starting
		for _, m := range c.Machines() {
			if err := platform.StreamJournal(m); err != nil {
				return fmt.Errorf("failed to start journal: %v", err)
			}
		}
	}

	// wait for etcd to come up
	if err := etcd.GetClusterHealth(c.Machines()[0], len(c.Machines())); err != nil {
		return err
	}

	tests := c.ListNativeFunctions()
	for _, name := range tests {
		plog.Noticef("running %v...", name)
		err := c.RunNative(name, c.Machines()[0])
		if err != nil {
			return err
		}
	}
	return nil

}
Beispiel #20
0
func RollingUpgrade(cluster platform.TestCluster) error {
	var (
		firstVersion  = cluster.Options["EtcdUpgradeVersion"]
		secondVersion = cluster.Options["EtcdUpgradeVersion2"]
		firstBin      = cluster.Options["EtcdUpgradeBin"]
		secondBin     = cluster.Options["EtcdUpgradeBin2"]
	)

	csize := len(cluster.Machines())

	if plog.LevelAt(capnslog.DEBUG) {
		// get journalctl -f from all machines before starting
		for _, m := range cluster.Machines() {
			if err := m.StartJournal(); err != nil {
				return fmt.Errorf("failed to start journal: %v", err)
			}
		}
	}

	// drop in starting etcd binary
	plog.Debug("adding files to cluster")
	if err := cluster.DropFile(firstBin); err != nil {
		return err
	}

	// drop in etcd binary to upgrade to
	if err := cluster.DropFile(secondBin); err != nil {
		return err
	}

	// replace existing etcd2 binary with 2.0.12
	plog.Info("replacing etcd with 2.0.12")
	firstPath := filepath.Join(dropPath, filepath.Base(firstBin))
	for _, m := range cluster.Machines() {
		if err := replaceEtcd2Bin(m, firstPath); err != nil {
			return err
		}
	}

	// start 2.0 cluster
	plog.Info("starting 2.0 cluster")
	for _, m := range cluster.Machines() {
		if err := startEtcd2(m); err != nil {
			return err
		}
	}
	for _, m := range cluster.Machines() {
		if err := getClusterHealth(m, csize); err != nil {
			return err
		}
	}
	if firstVersion != "" {
		for _, m := range cluster.Machines() {
			if err := checkEtcdVersion(cluster, m, firstVersion); err != nil {
				return err
			}
		}
	}

	// set some values on all nodes
	mapSet, err := SetKeys(cluster, settingSize)
	if err != nil {
		return err
	}

	// rolling replacement checking cluster health, and
	// version after each replaced binary. Also test
	plog.Info("rolling upgrade to 2.1")
	secondPath := filepath.Join(dropPath, filepath.Base(secondBin))
	for i, m := range cluster.Machines() {

		// check current value set
		if err := CheckKeys(cluster, mapSet, true); err != nil {
			return err
		}

		plog.Infof("stopping instance %v", i)
		if err := stopEtcd2(m); err != nil {
			return err
		}
		if err := replaceEtcd2Bin(m, secondPath); err != nil {
			return err
		}

		// set some values while running down a node and update set
		tempSet, err := SetKeys(cluster, settingSize)
		if err != nil {
			return err
		}
		mapCopy(mapSet, tempSet)

		plog.Infof("starting instance %v with upgraded binary", i)
		if err := startEtcd2(m); err != nil {
			return err
		}

		for _, m := range cluster.Machines() {
			if err := getClusterHealth(m, csize); err != nil {
				return err
			}
		}

	}
	// set some more values
	tempSet, err := SetKeys(cluster, settingSize)
	if err != nil {
		return err
	}
	mapCopy(mapSet, tempSet)

	// final check all values written correctly
	if err := CheckKeys(cluster, mapSet, true); err != nil {
		return err
	}

	// check version is now 2.1
	if secondVersion != "" {
		for _, m := range cluster.Machines() {
			if err := checkEtcdVersion(cluster, m, secondVersion); err != nil {
				return err
			}
		}
	}

	return nil
}
Beispiel #21
0
// Test that the kernel NFS server and client work within CoreOS.
func NFS(c platform.TestCluster) error {
	/* server machine */
	c1 := config.CloudConfig{
		CoreOS: config.CoreOS{
			Units: []config.Unit{
				config.Unit{
					Name:    "rpcbind.service",
					Command: "start",
				},
				config.Unit{
					Name:    "rpc-statd.service",
					Command: "start",
				},
				config.Unit{
					Name:    "rpc-mountd.service",
					Command: "start",
				},
				config.Unit{
					Name:    "nfsd.service",
					Command: "start",
				},
			},
		},
		WriteFiles: []config.File{
			config.File{
				Content: "/tmp	*(ro,insecure,all_squash,no_subtree_check,fsid=0)",
				Path: "/etc/exports",
			},
		},
		Hostname: "nfs1",
	}

	m1, err := c.NewMachine(c1.String())
	if err != nil {
		return fmt.Errorf("Cluster.NewMachine: %s", err)
	}

	defer m1.Destroy()

	plog.Info("NFS server booted.")

	/* poke a file in /tmp */
	tmp, err := m1.SSH("mktemp")
	if err != nil {
		return fmt.Errorf("Machine.SSH: %s", err)
	}

	plog.Infof("Test file %q created on server.", tmp)

	/* client machine */

	nfstmpl := `[Unit]
Description=NFS Client
After=network-online.target
Requires=network-online.target
After=rpc-statd.service
Requires=rpc-statd.service

[Mount]
What=%s:/tmp
Where=/mnt
Type=nfs
Options=defaults,noexec
`

	c2 := config.CloudConfig{
		CoreOS: config.CoreOS{
			Units: []config.Unit{
				config.Unit{
					Name:    "rpc-statd.service",
					Command: "start",
				},
				config.Unit{
					Name:    "mnt.mount",
					Command: "start",
					Content: fmt.Sprintf(nfstmpl, m1.IP()),
				},
			},
		},
		Hostname: "nfs2",
	}

	m2, err := c.NewMachine(c2.String())
	if err != nil {
		return fmt.Errorf("Cluster.NewMachine: %s", err)
	}

	defer m2.Destroy()

	plog.Info("NFS client booted.")

	var lsmnt []byte

	plog.Info("Waiting for NFS mount on client...")

	/* there's probably a better wait to check the mount */
	for i := 0; i < 5; i++ {
		lsmnt, _ = m2.SSH("ls /mnt")

		if len(lsmnt) != 0 {
			plog.Info("Got NFS mount.")
			break
		}

		time.Sleep(1 * time.Second)
	}

	if len(lsmnt) == 0 {
		return fmt.Errorf("Client /mnt is empty.")
	}

	if bytes.Contains(lsmnt, []byte(path.Base(string(tmp)))) != true {
		return fmt.Errorf("Client /mnt did not contain file %q from server /tmp -- /mnt: %s", tmp, lsmnt)
	}

	return nil
}
Beispiel #22
0
// Start a multi-node cluster from offcial coreos guides on manual
// installation. Once up, do a couple basic smoke checks. See:
// https://coreos.com/kubernetes/docs/latest/getting-started.html
func CoreOSBasic(c platform.TestCluster, version string) error {
	// start single-node etcd
	etcdNode, err := c.NewMachine(etcdConfig)
	if err != nil {
		return err
	}

	if err := etcd.GetClusterHealth(etcdNode, 1); err != nil {
		return err
	}

	master, err := c.NewMachine("")
	if err != nil {
		return err
	}

	options := map[string]string{
		"HYPERKUBE_ACI":       "quay.io/coreos/hyperkube",
		"MASTER_HOST":         master.PrivateIP(),
		"ETCD_ENDPOINTS":      fmt.Sprintf("http://%v:2379", etcdNode.PrivateIP()),
		"CONTROLLER_ENDPOINT": fmt.Sprintf("https://%v:443", master.PrivateIP()),
		"K8S_SERVICE_IP":      "10.3.0.1",
		"K8S_VER":             version,
		"KUBELET_PATH":        "/usr/lib/coreos/kubelet-wrapper",
	}

	// generate TLS assets on master
	if err := generateMasterTLSAssets(master, options); err != nil {
		return err
	}

	// create 3 worker nodes
	workerConfigs := []string{"", "", ""}
	workers, err := platform.NewMachines(c, workerConfigs)
	if err != nil {
		return err
	}

	// generate tls assets on workers by transfering ca from master
	if err := generateWorkerTLSAssets(master, workers); err != nil {
		return err
	}

	// configure nodes via generic install scripts
	if err := runInstallScript(master, controllerInstallScript, options); err != nil {
		return fmt.Errorf("Installing controller: %v", err)
	}

	for _, worker := range workers {
		if err := runInstallScript(worker, workerInstallScript, options); err != nil {
			return fmt.Errorf("Installing worker: %v", err)
		}
	}

	// configure kubectl
	if err := configureKubectl(master, master.PrivateIP(), version); err != nil {
		return err
	}

	// check that all nodes appear in kubectl
	f := func() error {
		return nodeCheck(master, workers)
	}
	if err := util.Retry(15, 10*time.Second, f); err != nil {
		return err
	}

	// start nginx pod and curl endpoint
	if err = nginxCheck(master, workers); err != nil {
		return err
	}

	// http://kubernetes.io/v1.0/docs/user-guide/secrets/ Also, ensures
	// https://github.com/coreos/bugs/issues/447 does not re-occur.
	if err = secretCheck(master, workers); err != nil {
		return err
	}

	return nil
}