Exemplo n.º 1
0
	Ω(err).Should(MatchError(ErrWriter))
	Ω(n).Should(Equal(writes), "ErrorWriter should write %d bytes", writes)
}

func validErrorWriteSuccess(w io.Writer, dataLen int) {
	Ω(w.Write(make([]byte, dataLen))).Should(Equal(dataLen), "ErrorWriter should write succeed")
}

var _ = bdd.Describe("Writers", func() {

	bdd.BeforeEach(func() {
		reset.Enable()
	})

	bdd.AfterEach(func() {
		reset.Disable()
	})

	bdd.It("ErrorWriter", func() {
		w := ErrorWriter(0)
		validErrorWrite(w, 10, 0)

		w = ErrorWriter(5)
		validErrorWriteSuccess(w, 3)
		validErrorWrite(w, 10, 2)
		validErrorWrite(w, 10, 0)

		w = ErrorWriter(5)
		validErrorWriteSuccess(w, 5)
		validErrorWrite(w, 10, 0)
	})
Exemplo n.º 2
0
		err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-cluster-role-to-user", "system:image-pruner", oc.Username()).Execute()
		o.Expect(err).NotTo(o.HaveOccurred())
	})

	g.Describe("of schema 1", func() {
		g.JustBeforeEach(func() {
			if *originalAcceptSchema2 {
				g.By("ensure the registry does not accept schema 2")
				err := ensureRegistryAcceptsSchema2(oc, false)
				o.Expect(err).NotTo(o.HaveOccurred())
			}
		})

		g.AfterEach(func() {
			if *originalAcceptSchema2 {
				err := ensureRegistryAcceptsSchema2(oc, true)
				o.Expect(err).NotTo(o.HaveOccurred())
			}
		})

		g.It("should prune old image", func() { testPruneImages(oc, 1) })
	})

	g.Describe("of schema 2", func() {
		g.JustBeforeEach(func() {
			if !*originalAcceptSchema2 {
				g.By("ensure the registry accepts schema 2")
				err := ensureRegistryAcceptsSchema2(oc, true)
				o.Expect(err).NotTo(o.HaveOccurred())
			}
		})
Exemplo n.º 3
0
		deploymentFixture               = exutil.FixturePath("testdata", "test-deployment-test.yaml")
		simpleDeploymentFixture         = exutil.FixturePath("testdata", "deployment-simple.yaml")
		customDeploymentFixture         = exutil.FixturePath("testdata", "custom-deployment.yaml")
		generationFixture               = exutil.FixturePath("testdata", "generation-test.yaml")
		pausedDeploymentFixture         = exutil.FixturePath("testdata", "paused-deployment.yaml")
		failedHookFixture               = exutil.FixturePath("testdata", "failing-pre-hook.yaml")
		brokenDeploymentFixture         = exutil.FixturePath("testdata", "test-deployment-broken.yaml")
		historyLimitedDeploymentFixture = exutil.FixturePath("testdata", "deployment-history-limit.yaml")
		minReadySecondsFixture          = exutil.FixturePath("testdata", "deployment-min-ready-seconds.yaml")
		multipleICTFixture              = exutil.FixturePath("testdata", "deployment-example.yaml")
		tagImagesFixture                = exutil.FixturePath("testdata", "tag-images-deployment.yaml")
	)

	g.Describe("when run iteratively", func() {
		g.AfterEach(func() {
			failureTrap(oc, "deployment-simple", g.CurrentGinkgoTestDescription().Failed)
		})

		g.It("should only deploy the last deployment [Conformance]", func() {
			_, err := oc.Run("create").Args("-f", simpleDeploymentFixture).Output()
			o.Expect(err).NotTo(o.HaveOccurred())

			iterations := 15
			for i := 0; i < iterations; i++ {
				if rand.Float32() < 0.2 {
					time.Sleep(time.Duration(rand.Float32() * rand.Float32() * float32(time.Second)))
				}
				switch n := rand.Float32(); {

				case n < 0.4:
					// trigger a new deployment
Exemplo n.º 4
0
	"github.com/onsi/gomega"
	"github.com/vmware/photon-controller-go-sdk/photon/internal/mocks"
)

var _ = ginkgo.Describe("Info", func() {
	var (
		server *mocks.Server
		client *Client
	)

	ginkgo.BeforeEach(func() {
		server, client = testSetup()
	})

	ginkgo.AfterEach(func() {
		server.Close()
	})

	ginkgo.Describe("Get", func() {
		ginkgo.It("Get deployment info successfully", func() {
			baseVersion := "1.1.0"
			fullVersion := "1.1.0-bcea65f"
			gitCommitHash := "bcea65f"
			networkType := "SOFTWARE_DEFINED"
			server.SetResponseJson(200,
				Info{
					BaseVersion:   baseVersion,
					FullVersion:   fullVersion,
					GitCommitHash: gitCommitHash,
					NetworkType:   networkType,
				})
Exemplo n.º 5
0
	var oc = exutil.NewCLI("force-pull-s2i", exutil.KubeConfigPath())

	g.JustBeforeEach(func() {
		g.By("waiting for builder service account")
		err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace()))
		o.Expect(err).NotTo(o.HaveOccurred())
	})

	g.Describe("\n FORCE PULL TEST:  Force pull and s2i builder", func() {
		// corrupt the s2i builder image
		g.BeforeEach(func() {
			exutil.CorruptImage(s2iDockBldr, custBldr, "s21")
		})

		g.AfterEach(func() {
			exutil.ResetImage(resetData)
		})

		g.JustBeforeEach(func() {
			g.By("waiting for builder service account")
			err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace()))
			o.Expect(err).NotTo(o.HaveOccurred())
		})

		g.Context("\n FORCE PULL TEST:  when s2i force pull is false and the image is bad", func() {

			g.It("\n FORCE PULL TEST s2i false", func() {
				fpFalseS2I := exutil.FixturePath("fixtures", "forcepull-false-s2i.json")
				g.By(fmt.Sprintf("\n%s FORCE PULL TEST s2i false:  calling create on %s", time.Now().Format(time.RFC850), fpFalseS2I))
				exutil.StartBuild(fpFalseS2I, buildPrefix, oc)
Exemplo n.º 6
0
var _ = bdd.Describe("mongotest", func() {
	var (
		testDb *TestDb
	)

	bdd.BeforeEach(func() {
		reset.Enable()
		testDb = New("/unittest")
	})

	bdd.AfterEach(func() {
		reset.Disable()

		session, err := mgo.Dial("127.0.0.1")
		Ω(err).Should(Succeed())
		defer session.Close()

		// ensure database are deleted in reset
		dbs, err := session.DatabaseNames()
		Ω(err).Should(Succeed())
		Ω(dbs).ShouldNot(ContainElement(mongo.DefaultTestDBName))
	})

	bdd.It("Fields", func() {
		Ω(testDb.Session).ShouldNot(BeNil())
		Ω(testDb.Database).ShouldNot(BeNil())
		Ω(testDb.Name).Should(Equal(mongo.DefaultTestDBName))
		Ω(testDb.Url()).Should(Equal(mongo.DbURL()))
	})

	bdd.It("Insert", func() {
		recs := []*Rec{
Exemplo n.º 7
0
		customDeploymentFixture = exutil.FixturePath("testdata", "custom-deployment.yaml")
		generationFixture       = exutil.FixturePath("testdata", "test-deployment.yaml")
		pausedDeploymentFixture = exutil.FixturePath("testdata", "paused-deployment.yaml")
		failedHookFixture       = exutil.FixturePath("testdata", "failing-pre-hook.yaml")
		brokenDeploymentFixture = exutil.FixturePath("testdata", "test-deployment-broken.yaml")
	)

	g.Describe("when run iteratively", func() {
		g.AfterEach(func() {
			if !g.CurrentGinkgoTestDescription().Failed {
				return
			}

			if dc, rcs, pods, err := deploymentInfo(oc, "deployment-simple"); err == nil {
				e2e.Logf("DC: %#v", dc)
				e2e.Logf("  RCs: %#v", rcs)
				p, _ := deploymentPods(pods)
				for k, v := range p {
					for _, pod := range v {
						e2e.Logf("  Deployer: %s %#v", k, pod)
					}
				}
			}
		})

		g.It("should only deploy the last deployment [Conformance]", func() {
			_, err := oc.Run("create").Args("-f", simpleDeploymentFixture).Output()
			o.Expect(err).NotTo(o.HaveOccurred())

			iterations := 15
			for i := 0; i < iterations; i++ {
				if rand.Float32() < 0.2 {
Exemplo n.º 8
0
	g.AfterEach(func() {
		g.By(fmt.Sprintf("Deleting quota %s", quotaName))
		oc.AdminKubeREST().ResourceQuotas(oc.Namespace()).Delete(quotaName)

		g.By("Deleting images")
		iss, err := oc.AdminREST().ImageStreams(oc.Namespace()).List(kapi.ListOptions{})
		o.Expect(err).NotTo(o.HaveOccurred())
		for _, is := range iss.Items {
			for _, history := range is.Status.Tags {
				for i := range history.Items {
					oc.AdminREST().Images().Delete(history.Items[i].Image)
				}
			}
			for _, tagRef := range is.Spec.Tags {
				switch tagRef.From.Kind {
				case "ImageStreamImage":
					nameParts := strings.Split(tagRef.From.Name, "@")
					if len(nameParts) != 2 {
						continue
					}
					imageName := nameParts[1]
					oc.AdminREST().Images().Delete(imageName)
				}
			}
			err := oc.AdminREST().ImageStreams(is.Namespace).Delete(is.Name)
			o.Expect(err).NotTo(o.HaveOccurred())
		}

		g.By("Deleting shared project")
		oc.AdminREST().Projects().Delete(oc.Namespace() + "-shared")
	})
Exemplo n.º 9
0
var _ = g.Describe("[image_ecosystem][jenkins][Slow] openshift pipeline plugin", func() {
	defer g.GinkgoRecover()
	var oc = exutil.NewCLI("jenkins-plugin", exutil.KubeConfigPath())
	var j *JenkinsRef
	var dcLogFollow *exec.Cmd
	var dcLogStdOut, dcLogStdErr *bytes.Buffer

	g.AfterEach(func() {
		ginkgolog("Jenkins DC description follows. If there were issues, check to see if there were any restarts in the jenkins pod.")
		exutil.DumpDeploymentLogs("jenkins", oc)

		// Destroy the Jenkins namespace
		oc.Run("delete").Args("project", j.namespace).Execute()
		if dcLogFollow != nil && dcLogStdOut != nil && dcLogStdErr != nil {
			ginkgolog("Waiting for Jenkins DC log follow to terminate")
			dcLogFollow.Process.Wait()
			ginkgolog("Jenkins server logs from test:\nstdout>\n%s\n\nstderr>\n%s\n\n", string(dcLogStdOut.Bytes()), string(dcLogStdErr.Bytes()))
			dcLogFollow = nil
		} else {
			ginkgolog("Logs were not captured!\n%v\n%v\n%v\n", dcLogFollow, dcLogStdOut, dcLogStdErr)
		}
	})

	g.BeforeEach(func() {
		testNamespace := oc.Namespace()

		jenkinsNamespace := oc.Namespace() + "-jenkins"
		g.By("Starting a Jenkins instance in namespace: " + jenkinsNamespace)

		oc.Run("new-project").Args(jenkinsNamespace).Execute()
Exemplo n.º 10
0
		g.BeforeEach(func() {
			var err error
			jsonTempDir, err = ioutil.TempDir(exutil.TestContext.OutputDir, "jenkins-kubernetes-")
			o.Expect(err).NotTo(o.HaveOccurred())

			// We need to prepare the templates first in order to use binary builds:
			// 1. remove BuildConfig triggers to not start build immediately after instantiating template,
			// 2. remove contextDir so that we can send just that directory as a binary, not whole repo.
			jenkinsMasterTemplate = patchTemplate(filepath.Join(jenkinsExampleDir, "jenkins-master-template.json"), jsonTempDir)
			jenkinsSlaveBuilderTemplate = patchTemplate(filepath.Join(jenkinsExampleDir, "jenkins-slave-template.json"), jsonTempDir)
		})

		g.AfterEach(func() {
			if len(jsonTempDir) > 0 {
				os.RemoveAll(jsonTempDir)
			}
		})

		g.It("by creating slave from existing builder and adding it to Jenkins master", func() {

			g.By("create the jenkins slave builder template")
			err := oc.Run("create").Args("-f", jenkinsSlaveBuilderTemplate).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("create the jenkins master template")
			err = oc.Run("create").Args("-f", jenkinsMasterTemplate).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("instantiate the slave template")
			err = oc.Run("new-app").Args("--template", "jenkins-slave-builder").Execute()
Exemplo n.º 11
0
			log += msg + "\n"
		}
		assertLog = func(expected string) {
			Ω(log).Should(Equal(expected))
			log = ""
		}
	)

	bdd.BeforeEach(func() {
		log = ""
	})

	bdd.AfterEach(func() {
		ClearInternal()

		if Enabled() {
			Disable()
		}
	})

	bdd.It("One", func() {
		Register(func() {
			appendLog("onReset")
		}, func() {
			appendLog("onRecover")
		})
		assertLog("onRecover\n")

		Enable()
		assertLog("")
Exemplo n.º 12
0
)

var _ = g.Describe("[image_ecosystem][mongodb][Slow] openshift mongodb replication (with petset)", func() {
	defer g.GinkgoRecover()

	const templatePath = "https://raw.githubusercontent.com/sclorg/mongodb-container/master/examples/petset/mongodb-petset-persistent.yaml"

	oc := exutil.NewCLI("mongodb-petset-replica", exutil.KubeConfigPath()).Verbose()

	g.Describe("creating from a template", func() {
		g.AfterEach(func() {
			for i := 0; i < 3; i++ {
				pod := fmt.Sprintf("mongodb-replicaset-%d", i)
				podLogs, err := oc.Run("logs").Args(pod, "--timestamps").Output()
				if err != nil {
					ginkgolog("error retrieving pod logs for %s: %v", pod, err)
					continue
				}
				ginkgolog("pod logs for %s:\n%s", podLogs, err)
			}
		})
		g.It(fmt.Sprintf("should process and create the %q template", templatePath), func() {
			oc.SetOutputDir(exutil.TestContext.OutputDir)

			g.By("creating persistent volumes")
			_, err := exutil.SetupHostPathVolumes(
				oc.AdminKubeClient().Core().PersistentVolumes(),
				oc.Namespace(),
				"256Mi",
				3,
			)
	var projectId = "project1"

	ginkgo.BeforeEach(func() {
		server, client = testSetup()
		networkSpec = &VirtualSubnetCreateSpec{
			Name:                 randomString(10, "go-sdk-virtual-network-"),
			Description:          "a test virtual network",
			RoutingType:          "ROUTED",
			Size:                 256,
			ReservedStaticIpSize: 20,
		}
	})

	ginkgo.AfterEach(func() {
		cleanVirtualSubnets(client, projectId)
		server.Close()
	})

	ginkgo.Describe("CreateDeleteVirtualSubnet", func() {
		ginkgo.It("Virtual subnet create and delete succeeds", func() {
			mockTask := createMockTask("CREATE_VIRTUAL_NETWORK", "COMPLETED")
			server.SetResponseJson(200, mockTask)

			task, err := client.VirtualSubnets.Create(projectId, networkSpec)
			task, err = client.Tasks.Wait(task.ID)
			ginkgo.GinkgoT().Log(err)

			gomega.Expect(err).Should(gomega.BeNil())
			gomega.Expect(task).ShouldNot(gomega.BeNil())
			gomega.Expect(task.Operation).Should(gomega.Equal("CREATE_VIRTUAL_NETWORK"))
			gomega.Expect(task.State).Should(gomega.Equal("COMPLETED"))
Exemplo n.º 14
0
		targetFile, err := ioutil.TempFile(exutil.TestContext.OutputDir, "idling-services-")
		o.Expect(err).ToNot(o.HaveOccurred())
		defer targetFile.Close()
		idlingFile = targetFile.Name()
		_, err = targetFile.Write([]byte(strings.Join(serviceNames, "\n")))
		o.Expect(err).ToNot(o.HaveOccurred())

		g.By("Waiting for the endpoints to exist")
		serviceName := resources["service"][0]
		g.By("Waiting for endpoints to be up")
		err = waitForEndpointsAvailable(oc, serviceName)
		o.Expect(err).ToNot(o.HaveOccurred())
	})

	g.AfterEach(func() {
		g.By("Cleaning up the idling file")
		os.Remove(idlingFile)
	})

	g.Describe("idling", func() {
		g.Context("with a single service and DeploymentConfig [Conformance]", func() {
			g.BeforeEach(func() {
				framework.BeforeEach()
				fixture = echoServerFixture
			})

			g.It("should idle the service and DeploymentConfig properly", func() {
				checkSingleIdle(oc, idlingFile, resources, "deploymentconfig", "DeploymentConfig")
			})
		})

		g.Context("with a single service and ReplicationController", func() {