Exemplo n.º 1
0
// Test that timesyncd starts using the local NTP server
func NTP(c platform.TestCluster) error {
	m, err := c.NewMachine("")
	if err != nil {
		return fmt.Errorf("Cluster.NewMachine: %s", err)
	}
	defer m.Destroy()

	out, err := m.SSH("networkctl status eth0")
	if err != nil {
		return fmt.Errorf("networkctl: %v", err)
	}
	if !bytes.Contains(out, []byte("NTP: 10.0.0.1")) {
		return fmt.Errorf("Bad network config:\n%s", out)
	}

	plog.Info("Waiting for systemd-timesyncd.service")
	for i := 0; i < 60; i++ {
		out, err = m.SSH("systemctl status systemd-timesyncd.service")
		if err != nil {
			return fmt.Errorf("systemctl: %v", err)
		}

		if bytes.Contains(out, []byte(`Status: "Using Time Server 10.0.0.1:123 (10.0.0.1)."`)) {
			plog.Info("systemd-timesyncd.service is working!")
			return nil
		}

		time.Sleep(time.Second)
	}

	return fmt.Errorf("Bad status:\n%s", out)
}
Exemplo n.º 2
0
// Start a multi-node cluster from offcial kubernetes 1.0 guides. Once
// up, do a couple basic smoke checks. See:
// http://kubernetes.io/v1.0/docs/getting-started-guides/coreos/coreos_multinode_cluster.html
func MultiNodeSmoke(c platform.TestCluster) error {
	const clusterSize = 3

	// spawn master
	master, err := c.NewMachine(masterConfig)
	if err != nil {
		return err
	}

	// get master private IP and place into nodeConfig
	nodeConfig = strings.Replace(nodeConfig, "<master-private-ip>", master.PrivateIP(), -1)
	var nodeConfigs []string
	for i := 0; i < clusterSize-1; i++ {
		nodeConfigs = append(nodeConfigs, nodeConfig)
	}

	// spawn nodes
	nodes, err := platform.NewMachines(c, nodeConfigs)
	if err != nil {
		return err
	}

	// get kubectl in master
	_, err = master.SSH("wget -q https://storage.googleapis.com/kubernetes-release/release/v1.0.1/bin/linux/amd64/kubectl")
	if err != nil {
		return err
	}
	_, err = master.SSH("chmod +x kubectl")
	if err != nil {
		return err
	}

	// check that all nodes appear in kubectl
	f := func() error {
		if err = nodeCheck(master, nodes); err != nil {
			return err
		}
		return nil
	}
	if err := util.Retry(10, 5*time.Second, f); err != nil {
		return err
	}

	// start nginx pod and curl endpoint
	if err = nginxCheck(master, nodes); err != nil {
		return err
	}

	// http://kubernetes.io/v1.0/docs/user-guide/secrets/ Also, ensures
	// https://github.com/coreos/bugs/issues/447 does not re-occur.
	if err = secretCheck(master, nodes); err != nil {
		return err
	}

	return nil
}
Exemplo n.º 3
0
// Test fleet running through an etcd2 proxy.
func Proxy(c platform.TestCluster) error {
	masterconf.CoreOS.Etcd2.Discovery, _ = c.GetDiscoveryURL(1)
	master, err := c.NewMachine(masterconf.String())
	if err != nil {
		return fmt.Errorf("Cluster.NewMachine: %s", err)
	}
	defer master.Destroy()

	proxyconf.CoreOS.Etcd2.Discovery = masterconf.CoreOS.Etcd2.Discovery
	proxy, err := c.NewMachine(proxyconf.String())
	if err != nil {
		return fmt.Errorf("Cluster.NewMachine: %s", err)
	}
	defer proxy.Destroy()

	err = platform.InstallFile(strings.NewReader(fleetunit), proxy, "/home/core/hello.service")
	if err != nil {
		return fmt.Errorf("InstallFile: %s", err)
	}

	// settling...
	fleetStart := func() error {
		_, err = proxy.SSH("fleetctl start /home/core/hello.service")
		if err != nil {
			return fmt.Errorf("fleetctl start: %s", err)
		}
		return nil
	}
	if err := util.Retry(5, 5*time.Second, fleetStart); err != nil {
		return fmt.Errorf("fleetctl start failed: %v", err)
	}

	var status []byte

	fleetList := func() error {
		status, err = proxy.SSH("fleetctl list-units -l -fields active -no-legend")
		if err != nil {
			return fmt.Errorf("fleetctl list-units: %s", err)
		}

		if !bytes.Equal(status, []byte("active")) {
			return fmt.Errorf("unit not active")
		}

		return nil
	}

	if err := util.Retry(5, 1*time.Second, fleetList); err != nil {
		return fmt.Errorf("fleetctl list-units failed: %v", err)
	}

	return nil
}
Exemplo n.º 4
0
// JournalRemote tests that systemd-journal-remote can read log entries from
// a systemd-journal-gatewayd server.
func JournalRemote(c platform.TestCluster) error {
	// start gatewayd and log a message
	gateway, err := c.NewMachine(gatewayconf.String())
	if err != nil {
		return fmt.Errorf("Cluster.NewMachine: %s", err)
	}
	defer gateway.Destroy()

	// log a unique message on gatewayd machine
	msg := "supercalifragilisticexpialidocious"
	out, err := gateway.SSH("logger " + msg)
	if err != nil {
		return fmt.Errorf("logger: %v: %v", out, err)
	}

	// spawn a machine to read from gatewayd
	collector, err := c.NewMachine("")
	if err != nil {
		return fmt.Errorf("Cluster.NewMachine: %s", err)
	}
	defer collector.Destroy()

	// collect logs from gatewayd machine
	cmd := fmt.Sprintf("sudo systemd-run --unit systemd-journal-remote-client /usr/lib/systemd/systemd-journal-remote --url http://%s:19531", gateway.PrivateIP())
	out, err = collector.SSH(cmd)
	if err != nil {
		return fmt.Errorf("failed to start systemd-journal-remote: %v: %v", out, err)
	}

	// find the message on the collector
	journalReader := func() error {
		cmd = fmt.Sprintf("sudo journalctl _HOSTNAME=%s -t core --file /var/log/journal/remote/remote-%s:19531.journal", gatewayconf.Hostname, gateway.PrivateIP())
		out, err = collector.SSH(cmd)
		if err != nil {
			return fmt.Errorf("journalctl: %v: %v", out, err)
		}

		if !strings.Contains(string(out), msg) {
			return fmt.Errorf("journal missing entry: expected %q got %q", msg, out)
		}

		return nil
	}

	if err := util.Retry(5, 2*time.Second, journalReader); err != nil {
		return err
	}

	return nil
}
Exemplo n.º 5
0
// Test to make sure rkt install works.
func Install(c platform.TestCluster) error {
	mach, err := c.NewMachine("")
	if err != nil {
		return fmt.Errorf("Cluster.NewMachine: %s", err)
	}
	defer mach.Destroy()

	cmd := "sudo rkt install"
	output, err := mach.SSH(cmd)
	if err != nil {
		return fmt.Errorf("failed to run %q: %s: %s", cmd, err, output)
	}

	return nil
}
Exemplo n.º 6
0
// Test that timesyncd starts using the local NTP server
func NTP(c platform.TestCluster) error {
	m, err := c.NewMachine("")
	if err != nil {
		return fmt.Errorf("Cluster.NewMachine: %s", err)
	}
	defer m.Destroy()

	out, err := m.SSH("networkctl status eth0")
	if err != nil {
		return fmt.Errorf("networkctl: %v", err)
	}
	if !bytes.Contains(out, []byte("NTP: 10.0.0.1")) {
		return fmt.Errorf("Bad network config:\n%s", out)
	}

	plog.Info("Waiting for systemd-timesyncd.service")

	checker := func() error {
		out, err = m.SSH("systemctl status systemd-timesyncd.service")
		if err != nil {
			return fmt.Errorf("systemctl: %v", err)
		}

		if !bytes.Contains(out, []byte(`Status: "Synchronized to time server 10.0.0.1:123 (10.0.0.1)."`)) {
			return fmt.Errorf("unexpected systemd-timesyncd status: %v", out)
		}

		plog.Info("systemd-timesyncd.service is working!")
		return nil
	}

	err = util.Retry(60, 1*time.Second, checker)
	if err != nil {
		return nil
	}

	return nil
}
Exemplo n.º 7
0
// Test that the kernel NFS server and client work within CoreOS.
func NFS(c platform.TestCluster) error {
	/* server machine */
	c1 := config.CloudConfig{
		CoreOS: config.CoreOS{
			Units: []config.Unit{
				config.Unit{
					Name:    "rpcbind.service",
					Command: "start",
				},
				config.Unit{
					Name:    "rpc-statd.service",
					Command: "start",
				},
				config.Unit{
					Name:    "rpc-mountd.service",
					Command: "start",
				},
				config.Unit{
					Name:    "nfsd.service",
					Command: "start",
				},
			},
		},
		WriteFiles: []config.File{
			config.File{
				Content: "/tmp	*(ro,insecure,all_squash,no_subtree_check,fsid=0)",
				Path: "/etc/exports",
			},
		},
		Hostname: "nfs1",
	}

	m1, err := c.NewMachine(c1.String())
	if err != nil {
		return fmt.Errorf("Cluster.NewMachine: %s", err)
	}

	defer m1.Destroy()

	plog.Info("NFS server booted.")

	/* poke a file in /tmp */
	tmp, err := m1.SSH("mktemp")
	if err != nil {
		return fmt.Errorf("Machine.SSH: %s", err)
	}

	plog.Infof("Test file %q created on server.", tmp)

	/* client machine */

	nfstmpl := `[Unit]
Description=NFS Client
After=network-online.target
Requires=network-online.target
After=rpc-statd.service
Requires=rpc-statd.service

[Mount]
What=%s:/tmp
Where=/mnt
Type=nfs
Options=defaults,noexec
`

	c2 := config.CloudConfig{
		CoreOS: config.CoreOS{
			Units: []config.Unit{
				config.Unit{
					Name:    "rpc-statd.service",
					Command: "start",
				},
				config.Unit{
					Name:    "mnt.mount",
					Command: "start",
					Content: fmt.Sprintf(nfstmpl, m1.IP()),
				},
			},
		},
		Hostname: "nfs2",
	}

	m2, err := c.NewMachine(c2.String())
	if err != nil {
		return fmt.Errorf("Cluster.NewMachine: %s", err)
	}

	defer m2.Destroy()

	plog.Info("NFS client booted.")

	var lsmnt []byte

	plog.Info("Waiting for NFS mount on client...")

	/* there's probably a better wait to check the mount */
	for i := 0; i < 5; i++ {
		lsmnt, _ = m2.SSH("ls /mnt")

		if len(lsmnt) != 0 {
			plog.Info("Got NFS mount.")
			break
		}

		time.Sleep(1 * time.Second)
	}

	if len(lsmnt) == 0 {
		return fmt.Errorf("Client /mnt is empty.")
	}

	if bytes.Contains(lsmnt, []byte(path.Base(string(tmp)))) != true {
		return fmt.Errorf("Client /mnt did not contain file %q from server /tmp -- /mnt: %s", tmp, lsmnt)
	}

	return nil
}
Exemplo n.º 8
0
// Start a multi-node cluster from offcial coreos guides on manual
// installation. Once up, do a couple basic smoke checks. See:
// https://coreos.com/kubernetes/docs/latest/getting-started.html
func CoreOSBasic(c platform.TestCluster, version string) error {
	// start single-node etcd
	etcdNode, err := c.NewMachine(etcdConfig)
	if err != nil {
		return err
	}

	if err := etcd.GetClusterHealth(etcdNode, 1); err != nil {
		return err
	}

	master, err := c.NewMachine("")
	if err != nil {
		return err
	}

	options := map[string]string{
		"HYPERKUBE_ACI":       "quay.io/coreos/hyperkube",
		"MASTER_HOST":         master.PrivateIP(),
		"ETCD_ENDPOINTS":      fmt.Sprintf("http://%v:2379", etcdNode.PrivateIP()),
		"CONTROLLER_ENDPOINT": fmt.Sprintf("https://%v:443", master.PrivateIP()),
		"K8S_SERVICE_IP":      "10.3.0.1",
		"K8S_VER":             version,
		"KUBELET_PATH":        "/usr/lib/coreos/kubelet-wrapper",
	}

	// generate TLS assets on master
	if err := generateMasterTLSAssets(master, options); err != nil {
		return err
	}

	// create 3 worker nodes
	workerConfigs := []string{"", "", ""}
	workers, err := platform.NewMachines(c, workerConfigs)
	if err != nil {
		return err
	}

	// generate tls assets on workers by transfering ca from master
	if err := generateWorkerTLSAssets(master, workers); err != nil {
		return err
	}

	// configure nodes via generic install scripts
	if err := runInstallScript(master, controllerInstallScript, options); err != nil {
		return fmt.Errorf("Installing controller: %v", err)
	}

	for _, worker := range workers {
		if err := runInstallScript(worker, workerInstallScript, options); err != nil {
			return fmt.Errorf("Installing worker: %v", err)
		}
	}

	// configure kubectl
	if err := configureKubectl(master, master.PrivateIP(), version); err != nil {
		return err
	}

	// check that all nodes appear in kubectl
	f := func() error {
		return nodeCheck(master, workers)
	}
	if err := util.Retry(15, 10*time.Second, f); err != nil {
		return err
	}

	// start nginx pod and curl endpoint
	if err = nginxCheck(master, workers); err != nil {
		return err
	}

	// http://kubernetes.io/v1.0/docs/user-guide/secrets/ Also, ensures
	// https://github.com/coreos/bugs/issues/447 does not re-occur.
	if err = secretCheck(master, workers); err != nil {
		return err
	}

	return nil
}
Exemplo n.º 9
0
func testNFS(c platform.TestCluster, nfsversion int) error {
	m1, err := c.NewMachine(nfsserverconf.String())
	if err != nil {
		return fmt.Errorf("Cluster.NewMachine: %s", err)
	}

	defer m1.Destroy()

	plog.Info("NFS server booted.")

	/* poke a file in /tmp */
	tmp, err := m1.SSH("mktemp")
	if err != nil {
		return fmt.Errorf("Machine.SSH: %s", err)
	}

	plog.Infof("Test file %q created on server.", tmp)

	c2 := config.CloudConfig{
		CoreOS: config.CoreOS{
			Units: []config.Unit{
				config.Unit{
					Name:    "mnt.mount",
					Command: "start",
					Content: fmt.Sprintf(mounttmpl, m1.PrivateIP(), nfsversion),
				},
			},
		},
		Hostname: "nfs2",
	}

	m2, err := c.NewMachine(c2.String())
	if err != nil {
		return fmt.Errorf("Cluster.NewMachine: %s", err)
	}

	defer m2.Destroy()

	plog.Info("NFS client booted.")

	plog.Info("Waiting for NFS mount on client...")

	checkmount := func() error {
		status, err := m2.SSH("systemctl is-active mnt.mount")
		if err != nil || string(status) != "active" {
			return fmt.Errorf("mnt.mount status is %q: %v", status, err)
		}

		plog.Info("Got NFS mount.")
		return nil
	}

	if err = util.Retry(10, 3*time.Second, checkmount); err != nil {
		return err
	}

	_, err = m2.SSH(fmt.Sprintf("stat /mnt/%s", path.Base(string(tmp))))
	if err != nil {
		return fmt.Errorf("file %q does not exist", tmp)
	}

	return nil
}