Beispiel #1
0
func TestVolumes(t *testing.T) {
	cfg, ok := configFromEnv()
	if !ok {
		t.Skipf("No config found in environment")
	}

	os, err := newOpenStack(cfg)
	if err != nil {
		t.Fatalf("Failed to construct/authenticate OpenStack: %s", err)
	}

	tags := map[string]string{
		"test": "value",
	}
	vol, err := os.CreateVolume("kubernetes-test-volume-"+rand.String(10), 1, &tags)
	if err != nil {
		t.Fatalf("Cannot create a new Cinder volume: %v", err)
	}

	err = os.DeleteVolume(vol)
	if err != nil {
		t.Fatalf("Cannot delete Cinder volume %s: %v", vol, err)
	}

}
// Generates apiserver-like line with average length of 100 symbols
func generateLogLine(id int) string {
	method := httpMethods[rand.Intn(len(httpMethods))]
	namespace := namespaces[rand.Intn(len(namespaces))]

	podName := rand.String(rand.IntnRange(3, 5))
	url := fmt.Sprintf("/api/v1/namespaces/%s/pods/%s", namespace, podName)
	status := rand.IntnRange(200, 600)

	return fmt.Sprintf("%d %s %s %d", id, method, url, status)
}
func TestInstances(t *testing.T) {
	testVM, _, cfg, ok := configFromEnv()
	if !ok {
		t.Skipf("No config found in environment")
	}
	NodeName := types.NodeName(testVM)

	pc, err := newPCCloud(cfg)
	if err != nil {
		t.Fatalf("Failed to create new Photon client: %s", err)
	}

	i, ok := pc.Instances()
	if !ok {
		t.Fatalf("Instances() returned false")
	}

	externalId, err := i.ExternalID(NodeName)
	if err != nil {
		t.Fatalf("Instances.ExternalID(%s) failed: %s", testVM, err)
	}
	t.Logf("Found ExternalID(%s) = %s\n", testVM, externalId)

	nonExistingVM := types.NodeName(rand.String(15))
	externalId, err = i.ExternalID(nonExistingVM)
	if err == cloudprovider.InstanceNotFound {
		t.Logf("VM %s was not found as expected\n", nonExistingVM)
	} else if err == nil {
		t.Fatalf("Instances.ExternalID did not fail as expected, VM %s was found", nonExistingVM)
	} else {
		t.Fatalf("Instances.ExternalID did not fail as expected, err: %v", err)
	}

	instanceId, err := i.InstanceID(NodeName)
	if err != nil {
		t.Fatalf("Instances.InstanceID(%s) failed: %s", testVM, err)
	}
	t.Logf("Found InstanceID(%s) = %s\n", testVM, instanceId)

	instanceId, err = i.InstanceID(nonExistingVM)
	if err == cloudprovider.InstanceNotFound {
		t.Logf("VM %s was not found as expected\n", nonExistingVM)
	} else if err == nil {
		t.Fatalf("Instances.InstanceID did not fail as expected, VM %s was found", nonExistingVM)
	} else {
		t.Fatalf("Instances.InstanceID did not fail as expected, err: %v", err)
	}

	addrs, err := i.NodeAddresses(NodeName)
	if err != nil {
		t.Fatalf("Instances.NodeAddresses(%s) failed: %s", testVM, err)
	}
	t.Logf("Found NodeAddresses(%s) = %s\n", testVM, addrs)
}
Beispiel #4
0
func (s *rsyncDaemonStrategy) startRemoteDaemon() error {
	port, err := s.getFreePort()
	if err != nil {
		return err
	}
	cmdOut := &bytes.Buffer{}
	cmdErr := &bytes.Buffer{}
	cmdIn := bytes.NewBufferString(fmt.Sprintf(startDaemonScript, krand.String(32), krand.String(32), port))
	err = s.RemoteExecutor.Execute([]string{"sh"}, cmdIn, cmdOut, cmdErr)
	if err != nil {
		glog.V(1).Infof("Error starting rsync daemon: %v. Out: %s, Err: %s\n", err, cmdOut.String(), cmdErr.String())
		// Determine whether rsync is present in the container
		if checkRsync(s.RemoteExecutor) != nil {
			return strategySetupError("rsync not available in container")
		}
		return err
	}
	s.daemonPort = port
	s.daemonPIDFile = strings.TrimSpace(cmdOut.String())
	return nil
}
Beispiel #5
0
func (simpleNameGenerator) GenerateName(base string) string {
	if len(base) > maxGeneratedNameLength {
		base = base[:maxGeneratedNameLength]
	}
	gname := getNameOfMovedPod(base)
	fmt.Printf("$$$$$$$$$$$$$$$$ gname is %s $$$$$$$$$$$$$$$$$$$$$\n", gname)
	if gname == "" {
		gname = utilrand.String(randomLength)
	}
	fmt.Printf("$$$$$$$$$$$$$$$$ gname is %s $$$$$$$$$$$$$$$$$$$$$\n", gname)

	return fmt.Sprintf("%s%s", base, gname)
}
Beispiel #6
0
func TestVolumes(t *testing.T) {
	cfg, ok := configFromEnv()
	if !ok {
		t.Skipf("No config found in environment")
	}

	vs, err := newVSphere(cfg)
	if err != nil {
		t.Fatalf("Failed to construct/authenticate vSphere: %s", err)
	}

	i, ok := vs.Instances()
	if !ok {
		t.Fatalf("Instances() returned false")
	}

	srvs, err := i.List("*")
	if err != nil {
		t.Fatalf("Instances.List() failed: %s", err)
	}
	if len(srvs) == 0 {
		t.Fatalf("Instances.List() returned zero servers")
	}

	volumeOptions := &VolumeOptions{
		CapacityKB: 1 * 1024 * 1024,
		Tags:       nil,
		Name:       "kubernetes-test-volume-" + rand.String(10),
		DiskFormat: "thin"}

	volPath, err := vs.CreateVolume(volumeOptions)
	if err != nil {
		t.Fatalf("Cannot create a new VMDK volume: %v", err)
	}

	_, _, err = vs.AttachDisk(volPath, "")
	if err != nil {
		t.Fatalf("Cannot attach volume(%s) to VM(%s): %v", volPath, srvs[0], err)
	}

	err = vs.DetachDisk(volPath, "")
	if err != nil {
		t.Fatalf("Cannot detach disk(%s) from VM(%s): %v", volPath, srvs[0], err)
	}

	// todo: Deleting a volume after detach currently not working through API or UI (vSphere)
	// err = vs.DeleteVolume(volPath)
	// if err != nil {
	// 	t.Fatalf("Cannot delete VMDK volume %s: %v", volPath, err)
	// }
}
Beispiel #7
0
func TestVolumes(t *testing.T) {
	cfg, ok := configFromEnv()
	if !ok {
		t.Skipf("No config found in environment")
	}

	os, err := newOpenStack(cfg)
	if err != nil {
		t.Fatalf("Failed to construct/authenticate OpenStack: %s", err)
	}

	tags := map[string]string{
		"test": "value",
	}
	vol, err := os.CreateVolume("kubernetes-test-volume-"+rand.String(10), 1, "", "", &tags)
	if err != nil {
		t.Fatalf("Cannot create a new Cinder volume: %v", err)
	}
	t.Logf("Volume (%s) created\n", vol)

	WaitForVolumeStatus(t, os, vol, volumeAvailableStatus, volumeCreateTimeoutSeconds)

	diskId, err := os.AttachDisk(os.localInstanceID, vol)
	if err != nil {
		t.Fatalf("Cannot AttachDisk Cinder volume %s: %v", vol, err)
	}
	t.Logf("Volume (%s) attached, disk ID: %s\n", vol, diskId)

	WaitForVolumeStatus(t, os, vol, volumeInUseStatus, volumeCreateTimeoutSeconds)

	devicePath := os.GetDevicePath(diskId)
	if !strings.HasPrefix(devicePath, "/dev/disk/by-id/") {
		t.Fatalf("GetDevicePath returned and unexpected path for Cinder volume %s, returned %s", vol, devicePath)
	}
	t.Logf("Volume (%s) found at path: %s\n", vol, devicePath)

	err = os.DetachDisk(os.localInstanceID, vol)
	if err != nil {
		t.Fatalf("Cannot DetachDisk Cinder volume %s: %v", vol, err)
	}
	t.Logf("Volume (%s) detached\n", vol)

	WaitForVolumeStatus(t, os, vol, volumeAvailableStatus, volumeCreateTimeoutSeconds)

	err = os.DeleteVolume(vol)
	if err != nil {
		t.Fatalf("Cannot delete Cinder volume %s: %v", vol, err)
	}
	t.Logf("Volume (%s) deleted\n", vol)

}
Beispiel #8
0
func (s *rsyncDaemonStrategy) startRemoteDaemon() error {
	port, err := s.getFreePort()
	if err != nil {
		return err
	}
	cmdOut := &bytes.Buffer{}
	cmdErr := &bytes.Buffer{}
	pidFile := krand.String(32)
	configFile := krand.String(32)
	cmdIn := bytes.NewBufferString(fmt.Sprintf(startDaemonScript, configFile, pidFile, port))
	daemonErr := make(chan error, 1)
	go func() {
		err = s.RemoteExecutor.Execute([]string{"sh"}, cmdIn, cmdOut, cmdErr)
		if err != nil {
			daemonErr <- fmt.Errorf("error starting rsync daemon: %v\nOut: %s\nErr: %s\n", err, cmdOut.String(), cmdErr.String())
		}
	}()
	// Wait until a pid file is present or an error has occurred
	checkScript := bytes.NewBufferString(fmt.Sprintf(checkDaemonScript, pidFile))
	startTime := time.Now()
	for {
		if time.Since(startTime) > RsyncDaemonStartTimeOut {
			return fmt.Errorf("Timed out waiting for rsync daemon to start")
		}
		checkScript.Reset()
		err = s.RemoteExecutor.Execute([]string{"sh"}, checkScript, ioutil.Discard, ioutil.Discard)
		if err == nil {
			break
		}
		if len(daemonErr) > 0 {
			return <-daemonErr
		}
		time.Sleep(100 * time.Millisecond)
	}
	s.daemonPort = port
	s.daemonPIDFile = pidFile
	return nil
}
// create count pods with the given phase for the given job
func newPodList(count int, status api.PodPhase, job *extensions.Job) []api.Pod {
	pods := []api.Pod{}
	for i := 0; i < count; i++ {
		newPod := api.Pod{
			ObjectMeta: api.ObjectMeta{
				Name:      fmt.Sprintf("pod-%v", rand.String(10)),
				Labels:    job.Spec.Selector.MatchLabels,
				Namespace: job.Namespace,
			},
			Status: api.PodStatus{Phase: status},
		}
		pods = append(pods, newPod)
	}
	return pods
}
func (puller *serializedImagePuller) pullImages() {
	id := rand.String(8)
	log.Printf("[Debug] loop(%s) new pull some images\n", id)
	for pullRequest := range puller.pullRequests {
		log.Println("---------------------------------")
		puller.logIt(pullRequest.ref, api.EventTypeNormal, PullingImage, pullRequest.logPrefix, fmt.Sprintf("pulling image %q", pullRequest.container.Image), glog.Info)
		log.Printf("[Debug] loop(%s) pull image %s [pulling]\n", id, pullRequest.container.Image)
		err := puller.runtime.PullImage(pullRequest.spec, pullRequest.pullSecrets)
		log.Printf("[Debug] loop(%s) pull image %s. get result %v [return]\n", id, pullRequest.container.Image, err)
		pullRequest.returnChan <- err
		log.Printf("[Debug] loop(%s) pull image %s. get result %v, and send result to channel success. [ok]\n", id, pullRequest.container.Image, err)
		log.Println("---------------------------------")
	}
	log.Printf("[Debug] loop(%s) finish pull some images\n", id)
}
// create count pods with the given phase for the given job
func newPodList(count int32, status v1.PodPhase, job *batch.Job) []v1.Pod {
	pods := []v1.Pod{}
	for i := int32(0); i < count; i++ {
		newPod := v1.Pod{
			ObjectMeta: v1.ObjectMeta{
				Name:      fmt.Sprintf("pod-%v", rand.String(10)),
				Labels:    job.Spec.Selector.MatchLabels,
				Namespace: job.Namespace,
			},
			Status: v1.PodStatus{Phase: status},
		}
		pods = append(pods, newPod)
	}
	return pods
}
Beispiel #12
0
func TestVolumes(t *testing.T) {
	cfg, ok := configFromEnv()
	if !ok {
		t.Skipf("No config found in environment")
	}

	os, err := newOpenStack(cfg)
	if err != nil {
		t.Fatalf("Failed to construct/authenticate OpenStack: %s", err)
	}

	tags := map[string]string{
		"test": "value",
	}
	vol, err := os.CreateVolume("kubernetes-test-volume-"+rand.String(10), 1, &tags)
	if err != nil {
		t.Fatalf("Cannot create a new Cinder volume: %v", err)
	}
	t.Logf("Volume (%s) created\n", vol)

	WaitForVolumeStatus(t, os, vol, volumeAvailableStatus, volumeCreateTimeoutSeconds)

	diskId, err := os.AttachDisk(vol)
	if err != nil {
		t.Fatalf("Cannot AttachDisk Cinder volume %s: %v", vol, err)
	}
	t.Logf("Volume (%s) attached, disk ID: %s\n", vol, diskId)

	WaitForVolumeStatus(t, os, vol, volumeInUseStatus, volumeCreateTimeoutSeconds)

	err = os.DetachDisk(vol)
	if err != nil {
		t.Fatalf("Cannot DetachDisk Cinder volume %s: %v", vol, err)
	}
	t.Logf("Volume (%s) detached\n", vol)

	WaitForVolumeStatus(t, os, vol, volumeAvailableStatus, volumeCreateTimeoutSeconds)

	err = os.DeleteVolume(vol)
	if err != nil {
		t.Fatalf("Cannot delete Cinder volume %s: %v", vol, err)
	}
	t.Logf("Volume (%s) deleted\n", vol)

}
func TestVolumes(t *testing.T) {
	testVM, testFlavor, cfg, ok := configFromEnv()
	if !ok {
		t.Skipf("No config found in environment")
	}

	pc, err := newPCCloud(cfg)
	if err != nil {
		t.Fatalf("Failed to create new Photon client: %s", err)
	}

	NodeName := types.NodeName(testVM)

	volumeOptions := &VolumeOptions{
		CapacityGB: 2,
		Tags:       nil,
		Name:       "kubernetes-test-volume-" + rand.String(10),
		Flavor:     testFlavor}

	pdID, err := pc.CreateDisk(volumeOptions)
	if err != nil {
		t.Fatalf("Cannot create a Photon persistent disk: %v", err)
	}

	err = pc.AttachDisk(pdID, NodeName)
	if err != nil {
		t.Fatalf("Cannot attach persistent disk(%s) to VM(%s): %v", pdID, testVM, err)
	}

	_, err = pc.DiskIsAttached(pdID, NodeName)
	if err != nil {
		t.Fatalf("Cannot attach persistent disk(%s) to VM(%s): %v", pdID, testVM, err)
	}

	err = pc.DetachDisk(pdID, NodeName)
	if err != nil {
		t.Fatalf("Cannot detach persisten disk(%s) from VM(%s): %v", pdID, testVM, err)
	}

	err = pc.DeleteDisk(pdID)
	if err != nil {
		t.Fatalf("Cannot delete persisten disk(%s): %v", pdID, err)
	}
}
Beispiel #14
0
func newControllerPlug(options configapi.MasterConfig, client *etcdclient.Client) (plug.Plug, func()) {
	switch {
	case options.ControllerLeaseTTL > 0:
		// TODO: replace with future API for leasing from Kube
		id := fmt.Sprintf("master-%s", kutilrand.String(8))
		leaser := leaderlease.NewEtcd(
			client,
			path.Join(options.EtcdStorageConfig.OpenShiftStoragePrefix, "leases/controllers"),
			id,
			uint64(options.ControllerLeaseTTL),
		)
		leased := plug.NewLeased(leaser)
		return leased, func() {
			glog.V(2).Infof("Attempting to acquire controller lease as %s, renewing every %d seconds", id, options.ControllerLeaseTTL)
			go leased.Run()
		}
	default:
		return plug.New(!options.PauseControllers), func() {}
	}
}
Beispiel #15
0
func (simpleNameGenerator) GenerateName(base string) string {
	if len(base) > maxGeneratedNameLength {
		base = base[:maxGeneratedNameLength]
	}
	return fmt.Sprintf("%s%s", base, utilrand.String(randomLength))
}
Beispiel #16
0
func replicationTestFactory(oc *exutil.CLI, template string) func() {
	return func() {
		oc.SetOutputDir(exutil.TestContext.OutputDir)
		defer cleanup(oc)

		_, err := exutil.SetupHostPathVolumes(oc.AdminKubeREST().PersistentVolumes(), oc.Namespace(), "512Mi", 1)
		o.Expect(err).NotTo(o.HaveOccurred())

		err = oc.Run("new-app").Args("-f", template).Execute()
		o.Expect(err).NotTo(o.HaveOccurred())

		err = oc.Run("new-app").Args("-f", helperTemplate, "-p", fmt.Sprintf("DATABASE_SERVICE_NAME=%s", helperName)).Execute()
		o.Expect(err).NotTo(o.HaveOccurred())
		err = oc.KubeFramework().WaitForAnEndpoint(helperName)
		o.Expect(err).NotTo(o.HaveOccurred())

		assertReplicationIsWorking := func(masterDeployment, slaveDeployment string, slaveCount int) (exutil.Database, []exutil.Database, exutil.Database) {
			table := fmt.Sprintf("table_%s", rand.String(10))

			master, slaves, helper := CreateMySQLReplicationHelpers(oc.KubeREST().Pods(oc.Namespace()), masterDeployment, slaveDeployment, fmt.Sprintf("%s-1", helperName), slaveCount)
			o.Expect(exutil.WaitUntilAllHelpersAreUp(oc, []exutil.Database{master, helper})).NotTo(o.HaveOccurred())
			o.Expect(exutil.WaitUntilAllHelpersAreUp(oc, slaves)).NotTo(o.HaveOccurred())

			// Test if we can query as root
			oc.KubeFramework().WaitForAnEndpoint("mysql-master")
			err := helper.TestRemoteLogin(oc, "mysql-master")
			o.Expect(err).NotTo(o.HaveOccurred())

			// Create a new table with random name
			_, err = master.Query(oc, fmt.Sprintf("CREATE TABLE %s (col1 VARCHAR(20), col2 VARCHAR(20));", table))
			o.Expect(err).NotTo(o.HaveOccurred())

			// Write new data to the table through master
			_, err = master.Query(oc, fmt.Sprintf("INSERT INTO %s (col1, col2) VALUES ('val1', 'val2');", table))
			o.Expect(err).NotTo(o.HaveOccurred())

			// Make sure data is present on master
			err = exutil.WaitForQueryOutput(oc, master, 10*time.Second, false, fmt.Sprintf("SELECT * FROM %s\\G;", table), "col1: val1\ncol2: val2")
			o.Expect(err).NotTo(o.HaveOccurred())

			// Make sure data was replicated to all slaves
			for _, slave := range slaves {
				err = exutil.WaitForQueryOutput(oc, slave, 90*time.Second, false, fmt.Sprintf("SELECT * FROM %s\\G;", table), "col1: val1\ncol2: val2")
				o.Expect(err).NotTo(o.HaveOccurred())
			}

			return master, slaves, helper
		}

		g.By("after initial deployment")
		master, _, _ := assertReplicationIsWorking("mysql-master-1", "mysql-slave-1", 1)

		g.By("after master is restarted by changing the Deployment Config")
		err = oc.Run("env").Args("dc", "mysql-master", "MYSQL_ROOT_PASSWORD=newpass").Execute()
		o.Expect(err).NotTo(o.HaveOccurred())
		o.Expect(err).NotTo(o.HaveOccurred())
		err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), master.GetPodName(), 60*time.Second)
		master, _, _ = assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 1)

		g.By("after master is restarted by deleting the pod")
		err = oc.Run("delete").Args("pod", "-l", "deployment=mysql-master-2").Execute()
		o.Expect(err).NotTo(o.HaveOccurred())
		err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), master.GetPodName(), 60*time.Second)
		o.Expect(err).NotTo(o.HaveOccurred())
		_, slaves, _ := assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 1)

		g.By("after slave is restarted by deleting the pod")
		err = oc.Run("delete").Args("pod", "-l", "deployment=mysql-slave-1").Execute()
		o.Expect(err).NotTo(o.HaveOccurred())
		err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), slaves[0].GetPodName(), 60*time.Second)
		o.Expect(err).NotTo(o.HaveOccurred())
		assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 1)

		g.By("after slave is scaled to 0 and then back to 4 replicas")
		err = oc.Run("scale").Args("dc", "mysql-slave", "--replicas=0").Execute()
		o.Expect(err).NotTo(o.HaveOccurred())
		err = oc.Run("scale").Args("dc", "mysql-slave", "--replicas=4").Execute()
		o.Expect(err).NotTo(o.HaveOccurred())
		assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 4)
	}
}
Beispiel #17
0
func TestInstances(t *testing.T) {
	cfg, ok := configFromEnv()
	if !ok {
		t.Skipf("No config found in environment")
	}

	vs, err := newVSphere(cfg)
	if err != nil {
		t.Fatalf("Failed to construct/authenticate vSphere: %s", err)
	}

	i, ok := vs.Instances()
	if !ok {
		t.Fatalf("Instances() returned false")
	}

	srvs, err := vs.list("*")
	if err != nil {
		t.Fatalf("list() failed: %s", err)
	}

	if len(srvs) == 0 {
		t.Fatalf("list() returned zero servers")
	}
	t.Logf("Found servers (%d): %s\n", len(srvs), srvs)

	externalId, err := i.ExternalID(srvs[0])
	if err != nil {
		t.Fatalf("Instances.ExternalID(%s) failed: %s", srvs[0], err)
	}
	t.Logf("Found ExternalID(%s) = %s\n", srvs[0], externalId)

	nonExistingVM := types.NodeName(rand.String(15))
	externalId, err = i.ExternalID(nonExistingVM)
	if err == cloudprovider.InstanceNotFound {
		t.Logf("VM %s was not found as expected\n", nonExistingVM)
	} else if err == nil {
		t.Fatalf("Instances.ExternalID did not fail as expected, VM %s was found", nonExistingVM)
	} else {
		t.Fatalf("Instances.ExternalID did not fail as expected, err: %v", err)
	}

	instanceId, err := i.InstanceID(srvs[0])
	if err != nil {
		t.Fatalf("Instances.InstanceID(%s) failed: %s", srvs[0], err)
	}
	t.Logf("Found InstanceID(%s) = %s\n", srvs[0], instanceId)

	instanceId, err = i.InstanceID(nonExistingVM)
	if err == cloudprovider.InstanceNotFound {
		t.Logf("VM %s was not found as expected\n", nonExistingVM)
	} else if err == nil {
		t.Fatalf("Instances.InstanceID did not fail as expected, VM %s was found", nonExistingVM)
	} else {
		t.Fatalf("Instances.InstanceID did not fail as expected, err: %v", err)
	}

	addrs, err := i.NodeAddresses(srvs[0])
	if err != nil {
		t.Fatalf("Instances.NodeAddresses(%s) failed: %s", srvs[0], err)
	}
	t.Logf("Found NodeAddresses(%s) = %s\n", srvs[0], addrs)
}