func TestAutomaticCreationOfPullSecrets(t *testing.T) {
	saNamespace := api.NamespaceDefault
	saName := serviceaccountadmission.DefaultServiceAccountName

	_, clusterAdminConfig, err := testutil.StartTestMaster()
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	clusterAdminKubeClient, err := testutil.GetClusterAdminKubeClient(clusterAdminConfig)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	// Get a service account token
	saToken, err := waitForServiceAccountToken(clusterAdminKubeClient, saNamespace, saName, 20, time.Second)
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}
	if len(saToken) == 0 {
		t.Errorf("token was not created")
	}

	// Get the matching dockercfg secret
	saPullSecret, err := waitForServiceAccountPullSecret(clusterAdminKubeClient, saNamespace, saName, 20, time.Second)
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}
	if len(saPullSecret) == 0 {
		t.Errorf("pull secret was not created")
	}
}
Ejemplo n.º 2
0
// TestPushSecretName exercises one of the complex Build scenarios, where you
// first build a Docker image using Docker build strategy, which will later by
// consumed by Custom build strategy to verify that the 'PushSecretName' (Docker
// credentials) were successfully transported to the builder. The content of the
// Secret file is verified in the end.
func TestPushSecretName(t *testing.T) {
	namespace := testutil.RandomNamespace("secret")
	client, _ := testutil.GetClusterAdminClient(testutil.KubeConfigPath())
	kclient, _ := testutil.GetClusterAdminKubeClient(testutil.KubeConfigPath())

	stream := testutil.CreateSampleImageStream(namespace)
	if stream == nil {
		t.Fatal("Failed to create ImageStream")
	}
	defer testutil.DeleteSampleImageStream(stream, namespace)

	// Create Secret with dockercfg
	secret := testutil.GetSecretFixture("fixtures/test-secret.json")
	// TODO: Why do I need to set namespace here?
	secret.Namespace = namespace
	_, err := kclient.Secrets(namespace).Create(secret)
	if err != nil {
		t.Fatalf("Failed to create Secret: %v", err)
	}

	watcher, err := client.Builds(namespace).Watch(labels.Everything(), fields.Everything(), "0")
	if err != nil {
		t.Fatalf("Failed to create watcher: %v", err)
	}
	defer watcher.Stop()

	// First build the builder image (custom build builder)
	dockerBuild := testutil.GetBuildFixture("fixtures/test-secret-build.json")
	newDockerBuild, err := client.Builds(namespace).Create(dockerBuild)
	if err != nil {
		t.Fatalf("Unable to create Build %s: %v", dockerBuild.Name, err)
	}
	waitForComplete(newDockerBuild, watcher, t)

	// Now build the application image using custom build (run the previous image)
	// Custom build will copy the dockercfg file into the application image.
	customBuild := testutil.GetBuildFixture("fixtures/test-custom-build.json")
	imageName := fmt.Sprintf("%s/%s/%s", os.Getenv("REGISTRY_ADDR"), namespace, stream.Name)
	customBuild.Parameters.Strategy.CustomStrategy.Image = imageName
	newCustomBuild, err := client.Builds(namespace).Create(customBuild)
	if err != nil {
		t.Fatalf("Unable to create Build %s: %v", dockerBuild.Name, err)
	}
	waitForComplete(newCustomBuild, watcher, t)

	// Verify that the dockercfg file is there
	if err := testutil.VerifyImage(stream, "application", namespace, validatePushSecret); err != nil {
		t.Fatalf("Image verification failed: %v", err)
	}
}
func TestOAuthDisabled(t *testing.T) {
	// Build master config
	masterOptions, err := testutil.DefaultMasterOptions()
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	// Disable OAuth
	masterOptions.OAuthConfig = nil

	// Start server
	clusterAdminKubeConfig, err := testutil.StartConfiguredMaster(masterOptions)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	client, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	clientConfig, err := testutil.GetClusterAdminClientConfig(clusterAdminKubeConfig)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	// Make sure cert auth still works
	namespaces, err := client.Namespaces().List(labels.Everything(), fields.Everything())
	if err != nil {
		t.Fatalf("Unexpected error %v", err)
	}
	if len(namespaces.Items) == 0 {
		t.Errorf("Expected namespaces, got none")
	}

	// Use the server and CA info
	anonConfig := kclient.Config{}
	anonConfig.Host = clientConfig.Host
	anonConfig.CAFile = clientConfig.CAFile
	anonConfig.CAData = clientConfig.CAData

	// Make sure we can't authenticate using OAuth
	if _, err := tokencmd.RequestToken(&anonConfig, nil, "username", "password"); err == nil {
		t.Error("Expected error, got none")
	}

}
func setup(t *testing.T) *client.Client {
	_, clusterAdminKubeConfig, err := testutil.StartTestMaster()
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	clusterAdminClient, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	clusterAdminKubeClient, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	clusterAdminKubeClient.Namespaces().Create(&kapi.Namespace{
		ObjectMeta: kapi.ObjectMeta{Name: testutil.Namespace()},
	})
	return clusterAdminClient
}
func setupBuildControllerTest(additionalBuildControllers, additionalBuildPodControllers, additionalImageChangeControllers int, t *testing.T) (*client.Client, *kclient.Client) {
	master, clusterAdminKubeConfig, err := testutil.StartTestMaster()
	checkErr(t, err)

	clusterAdminClient, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig)
	checkErr(t, err)

	clusterAdminKubeClient, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig)
	checkErr(t, err)
	_, err = clusterAdminKubeClient.Namespaces().Create(&kapi.Namespace{
		ObjectMeta: kapi.ObjectMeta{Name: testutil.Namespace()},
	})
	checkErr(t, err)

	if err := testutil.WaitForServiceAccounts(clusterAdminKubeClient, testutil.Namespace(), []string{bootstrappolicy.BuilderServiceAccountName, bootstrappolicy.DefaultServiceAccountName}); err != nil {
		t.Errorf("unexpected error: %v", err)
	}

	openshiftConfig, err := origin.BuildMasterConfig(*master)
	checkErr(t, err)

	// Get the build controller clients, since those rely on service account tokens
	// We don't want to proceed with the rest of the test until those are available
	openshiftConfig.BuildControllerClients()

	for i := 0; i < additionalBuildControllers; i++ {
		openshiftConfig.RunBuildController()
	}
	for i := 0; i < additionalBuildPodControllers; i++ {
		openshiftConfig.RunBuildPodController()
	}
	for i := 0; i < additionalImageChangeControllers; i++ {
		openshiftConfig.RunBuildImageChangeTriggerController()
	}
	return clusterAdminClient, clusterAdminKubeClient
}
func TestServiceAccountAuthorization(t *testing.T) {
	saNamespace := api.NamespaceDefault
	saName := serviceaccountadmission.DefaultServiceAccountName
	saUsername := serviceaccount.MakeUsername(saNamespace, saName)

	// Start one OpenShift master as "cluster1" to play the external kube server
	cluster1MasterConfig, cluster1AdminConfigFile, err := testutil.StartTestMaster()
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	cluster1AdminConfig, err := testutil.GetClusterAdminClientConfig(cluster1AdminConfigFile)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	cluster1AdminKubeClient, err := testutil.GetClusterAdminKubeClient(cluster1AdminConfigFile)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	cluster1AdminOSClient, err := testutil.GetClusterAdminClient(cluster1AdminConfigFile)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	// Get a service account token and build a client
	saToken, err := waitForServiceAccountToken(cluster1AdminKubeClient, saNamespace, saName, 20, time.Second)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	if len(saToken) == 0 {
		t.Fatalf("token was not created")
	}
	cluster1SAClientConfig := kclient.Config{
		Host:        cluster1AdminConfig.Host,
		Prefix:      cluster1AdminConfig.Prefix,
		BearerToken: saToken,
		TLSClientConfig: kclient.TLSClientConfig{
			CAFile: cluster1AdminConfig.CAFile,
			CAData: cluster1AdminConfig.CAData,
		},
	}
	cluster1SAKubeClient, err := kclient.New(&cluster1SAClientConfig)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	// Make sure the service account doesn't have access
	failNS := &api.Namespace{ObjectMeta: api.ObjectMeta{Name: "test-fail"}}
	if _, err := cluster1SAKubeClient.Namespaces().Create(failNS); !errors.IsForbidden(err) {
		t.Fatalf("expected forbidden error, got %v", err)
	}

	// Make the service account a cluster admin on cluster1
	addRoleOptions := &policy.RoleModificationOptions{
		RoleName:            bootstrappolicy.ClusterAdminRoleName,
		RoleBindingAccessor: policy.NewClusterRoleBindingAccessor(cluster1AdminOSClient),
		Users:               []string{saUsername},
	}
	if err := addRoleOptions.AddRole(); err != nil {
		t.Fatalf("could not add role to service account")
	}

	// Give the policy cache a second to catch it's breath
	time.Sleep(time.Second)

	// Make sure the service account now has access
	// This tests authentication using the etcd-based token getter
	passNS := &api.Namespace{ObjectMeta: api.ObjectMeta{Name: "test-pass"}}
	if _, err := cluster1SAKubeClient.Namespaces().Create(passNS); err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	// Create a kubeconfig from the serviceaccount config
	cluster1SAKubeConfigFile, err := ioutil.TempFile(testutil.GetBaseDir(), "cluster1-service-account.kubeconfig")
	if err != nil {
		t.Fatalf("error creating tmpfile: %v", err)
	}
	defer os.Remove(cluster1SAKubeConfigFile.Name())
	if err := writeClientConfigToKubeConfig(cluster1SAClientConfig, cluster1SAKubeConfigFile.Name()); err != nil {
		t.Fatalf("error creating kubeconfig: %v", err)
	}

	// Set up cluster 2 to run against cluster 1 as external kubernetes
	cluster2MasterConfig, err := testutil.DefaultMasterOptions()
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	// Don't start kubernetes in process
	cluster2MasterConfig.KubernetesMasterConfig = nil
	// Connect to cluster1 using the service account credentials
	cluster2MasterConfig.MasterClients.ExternalKubernetesKubeConfig = cluster1SAKubeConfigFile.Name()
	// Don't start etcd
	cluster2MasterConfig.EtcdConfig = nil
	// Use the same credentials as cluster1 to connect to existing etcd
	cluster2MasterConfig.EtcdClientInfo = cluster1MasterConfig.EtcdClientInfo
	// Set a custom etcd prefix to make sure data is getting sent to cluster1
	cluster2MasterConfig.EtcdStorageConfig.KubernetesStoragePrefix += "2"
	cluster2MasterConfig.EtcdStorageConfig.OpenShiftStoragePrefix += "2"
	// Don't manage any names in cluster2
	cluster2MasterConfig.ServiceAccountConfig.ManagedNames = []string{}
	// Don't create any service account tokens in cluster2
	cluster2MasterConfig.ServiceAccountConfig.PrivateKeyFile = ""
	// Use the same public keys to validate tokens as cluster1
	cluster2MasterConfig.ServiceAccountConfig.PublicKeyFiles = cluster1MasterConfig.ServiceAccountConfig.PublicKeyFiles

	// Start cluster 2 (without clearing etcd) and get admin client configs and clients
	cluster2Options := testutil.TestOptions{DeleteAllEtcdKeys: false}
	cluster2AdminConfigFile, err := testutil.StartConfiguredMasterWithOptions(cluster2MasterConfig, cluster2Options)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	cluster2AdminConfig, err := testutil.GetClusterAdminClientConfig(cluster2AdminConfigFile)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	cluster2AdminOSClient, err := testutil.GetClusterAdminClient(cluster2AdminConfigFile)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	// Build a client to use the same service account token against cluster2
	cluster2SAClientConfig := cluster1SAClientConfig
	cluster2SAClientConfig.Host = cluster2AdminConfig.Host
	cluster2SAKubeClient, err := kclient.New(&cluster2SAClientConfig)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	// Make sure the service account doesn't have access
	// A forbidden error makes sure the token was recognized, and policy denied us
	// This exercises the client-based token getter
	// It also makes sure we don't loop back through the cluster2 kube proxy which would cause an auth loop
	failNS2 := &api.Namespace{ObjectMeta: api.ObjectMeta{Name: "test-fail2"}}
	if _, err := cluster2SAKubeClient.Namespaces().Create(failNS2); !errors.IsForbidden(err) {
		t.Fatalf("expected forbidden error, got %v", err)
	}

	// Make the service account a cluster admin on cluster2
	addRoleOptions2 := &policy.RoleModificationOptions{
		RoleName:            bootstrappolicy.ClusterAdminRoleName,
		RoleBindingAccessor: policy.NewClusterRoleBindingAccessor(cluster2AdminOSClient),
		Users:               []string{saUsername},
	}
	if err := addRoleOptions2.AddRole(); err != nil {
		t.Fatalf("could not add role to service account")
	}

	// Give the policy cache a second to catch it's breath
	time.Sleep(time.Second)

	// Make sure the service account now has access to cluster2
	passNS2 := &api.Namespace{ObjectMeta: api.ObjectMeta{Name: "test-pass2"}}
	if _, err := cluster2SAKubeClient.Namespaces().Create(passNS2); err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	// Make sure the ns actually got created in cluster1
	if _, err := cluster1SAKubeClient.Namespaces().Get(passNS2.Name); err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
}
Ejemplo n.º 7
0
func TestDNS(t *testing.T) {
	masterConfig, clientFile, err := testutil.StartTestMaster()
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	localIP := net.ParseIP("127.0.0.1")
	var masterIP net.IP
	// verify service DNS entry is visible
	stop := make(chan struct{})
	util.Until(func() {
		m1 := &dns.Msg{
			MsgHdr:   dns.MsgHdr{Id: dns.Id(), RecursionDesired: false},
			Question: []dns.Question{{"kubernetes.default.svc.cluster.local.", dns.TypeA, dns.ClassINET}},
		}
		in, err := dns.Exchange(m1, masterConfig.DNSConfig.BindAddress)
		if err != nil {
			t.Logf("unexpected error: %v", err)
			return
		}
		if len(in.Answer) != 1 {
			t.Logf("unexpected answer: %#v", in)
			return
		}
		if a, ok := in.Answer[0].(*dns.A); ok {
			if a.A == nil {
				t.Fatalf("expected an A record with an IP: %#v", a)
			}
			masterIP = a.A
		} else {
			t.Fatalf("expected an A record: %#v", in)
		}
		t.Log(in)
		close(stop)
	}, 50*time.Millisecond, stop)

	client, err := testutil.GetClusterAdminKubeClient(clientFile)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	for {
		if _, err := client.Services(kapi.NamespaceDefault).Create(&kapi.Service{
			ObjectMeta: kapi.ObjectMeta{
				Name: "headless",
			},
			Spec: kapi.ServiceSpec{
				PortalIP: kapi.PortalIPNone,
				Ports:    []kapi.ServicePort{{Port: 443}},
			},
		}); err != nil {
			if errors.IsForbidden(err) {
				t.Logf("forbidden, sleeping: %v", err)
				time.Sleep(100 * time.Millisecond)
				continue
			}
			t.Fatalf("unexpected error: %v", err)
		}
		if _, err := client.Endpoints(kapi.NamespaceDefault).Create(&kapi.Endpoints{
			ObjectMeta: kapi.ObjectMeta{
				Name: "headless",
			},
			Subsets: []kapi.EndpointSubset{{
				Addresses: []kapi.EndpointAddress{{IP: "172.0.0.1"}},
				Ports: []kapi.EndpointPort{
					{Port: 2345},
				},
			}},
		}); err != nil {
			t.Fatalf("unexpected error: %v", err)
		}
		break
	}
	headlessIP := net.ParseIP("172.0.0.1")

	if _, err := client.Services(kapi.NamespaceDefault).Create(&kapi.Service{
		ObjectMeta: kapi.ObjectMeta{
			Name: "headless2",
		},
		Spec: kapi.ServiceSpec{
			PortalIP: kapi.PortalIPNone,
			Ports:    []kapi.ServicePort{{Port: 443}},
		},
	}); err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	if _, err := client.Endpoints(kapi.NamespaceDefault).Create(&kapi.Endpoints{
		ObjectMeta: kapi.ObjectMeta{
			Name: "headless2",
		},
		Subsets: []kapi.EndpointSubset{{
			Addresses: []kapi.EndpointAddress{{IP: "172.0.0.2"}},
			Ports: []kapi.EndpointPort{
				{Port: 2345, Name: "other"},
				{Port: 2346, Name: "http"},
			},
		}},
	}); err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	headless2IP := net.ParseIP("172.0.0.2")

	tests := []struct {
		dnsQuestionName   string
		recursionExpected bool
		retry             bool
		expect            []*net.IP
		srv               []*dns.SRV
	}{
		{ // wildcard resolution of a service works
			dnsQuestionName: "foo.kubernetes.default.svc.cluster.local.",
			expect:          []*net.IP{&masterIP},
		},
		{ // resolving endpoints of a service works
			dnsQuestionName: "_endpoints.kubernetes.default.svc.cluster.local.",
			expect:          []*net.IP{&localIP},
		},
		{ // openshift override works
			dnsQuestionName: "openshift.default.svc.cluster.local.",
			expect:          []*net.IP{&masterIP},
		},
		{ // headless service
			dnsQuestionName: "headless.default.svc.cluster.local.",
			expect:          []*net.IP{&headlessIP},
		},
		{ // specific port of a headless service
			dnsQuestionName: "unknown-port-2345.e1.headless.default.svc.cluster.local.",
			expect:          []*net.IP{&headlessIP},
		},
		{ // SRV record for that service
			dnsQuestionName: "headless.default.svc.cluster.local.",
			srv: []*dns.SRV{
				{
					Target: "unknown-port-2345.e1.headless.",
					Port:   2345,
				},
			},
		},
		{ // the SRV record resolves to the IP
			dnsQuestionName: "unknown-port-2345.e1.headless.default.svc.cluster.local.",
			expect:          []*net.IP{&headlessIP},
		},
		{ // headless 2 service
			dnsQuestionName: "headless2.default.svc.cluster.local.",
			expect:          []*net.IP{&headless2IP},
		},
		{ // SRV records for that service
			dnsQuestionName: "headless2.default.svc.cluster.local.",
			srv: []*dns.SRV{
				{
					Target: "http.e1.headless2.",
					Port:   2346,
				},
				{
					Target: "other.e1.headless2.",
					Port:   2345,
				},
			},
		},
		{ // the SRV record resolves to the IP
			dnsQuestionName: "other.e1.headless2.default.svc.cluster.local.",
			expect:          []*net.IP{&headless2IP},
		},
		{
			dnsQuestionName:   "www.google.com.",
			recursionExpected: false,
		},
	}
	for i, tc := range tests {
		qType := dns.TypeA
		if tc.srv != nil {
			qType = dns.TypeSRV
		}
		m1 := &dns.Msg{
			MsgHdr:   dns.MsgHdr{Id: dns.Id(), RecursionDesired: false},
			Question: []dns.Question{{tc.dnsQuestionName, qType, dns.ClassINET}},
		}
		ch := make(chan struct{})
		count := 0
		util.Until(func() {
			count++
			if count > 100 {
				t.Errorf("%d: failed after max iterations", i)
				close(ch)
				return
			}
			in, err := dns.Exchange(m1, masterConfig.DNSConfig.BindAddress)
			if err != nil {
				return
			}
			switch {
			case tc.srv != nil:
				if len(in.Answer) != len(tc.srv) {
					t.Logf("%d: incorrect number of answers: %#v", i, in)
					return
				}
			case tc.recursionExpected:
				if len(in.Answer) == 0 {
					t.Errorf("%d: expected forward resolution: %#v", i, in)
				}
				close(ch)
				return
			default:
				if len(in.Answer) != len(tc.expect) {
					t.Logf("%d: did not resolve or unexpected forward resolution: %#v", i, in)
					return
				}
			}
			for _, answer := range in.Answer {
				switch a := answer.(type) {
				case *dns.A:
					matches := false
					if a.A != nil {
						for _, expect := range tc.expect {
							if a.A.String() == expect.String() {
								matches = true
								break
							}
						}
					}
					if !matches {
						t.Errorf("A record does not match any expected answer: %v", a.A)
					}
				case *dns.SRV:
					matches := false
					for _, expect := range tc.srv {
						if expect.Port == a.Port && expect.Target == a.Target {
							matches = true
							break
						}
					}
					if !matches {
						t.Errorf("SRV record does not match any expected answer: %#v", a)
					}
				default:
					t.Errorf("expected an A or SRV record: %#v", in)
				}
			}
			t.Log(in)
			close(ch)
		}, 50*time.Millisecond, ch)
	}
}
func TestWebhookGitHubPushWithImage(t *testing.T) {
	_, clusterAdminKubeConfig, err := testutil.StartTestMaster()
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	clusterAdminClient, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig)
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}

	clusterAdminClientConfig, err := testutil.GetClusterAdminClientConfig(clusterAdminKubeConfig)
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}

	err = testutil.CreateNamespace(clusterAdminKubeConfig, testutil.Namespace())
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}

	clusterAdminKubeClient, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig)
	checkErr(t, err)

	if err := testutil.WaitForServiceAccounts(clusterAdminKubeClient, testutil.Namespace(), []string{bootstrappolicy.BuilderServiceAccountName, bootstrappolicy.DefaultServiceAccountName}); err != nil {
		t.Errorf("unexpected error: %v", err)
	}

	// create imagerepo
	imageStream := &imageapi.ImageStream{
		ObjectMeta: kapi.ObjectMeta{Name: "image-stream"},
		Spec: imageapi.ImageStreamSpec{
			DockerImageRepository: "registry:3000/integration/imageStream",
			Tags: map[string]imageapi.TagReference{
				"validTag": {
					From: &kapi.ObjectReference{
						Kind: "DockerImage",
						Name: "registry:3000/integration/imageStream:success",
					},
				},
			},
		},
	}
	if _, err := clusterAdminClient.ImageStreams(testutil.Namespace()).Create(imageStream); err != nil {
		t.Fatalf("Unexpected error: %v", err)
	}

	ism := &imageapi.ImageStreamMapping{
		ObjectMeta: kapi.ObjectMeta{Name: "image-stream"},
		Tag:        "validTag",
		Image: imageapi.Image{
			ObjectMeta: kapi.ObjectMeta{
				Name: "myimage",
			},
			DockerImageReference: "registry:3000/integration/imageStream:success",
		},
	}
	if err := clusterAdminClient.ImageStreamMappings(testutil.Namespace()).Create(ism); err != nil {
		t.Fatalf("Unexpected error: %v", err)
	}

	// create buildconfig
	buildConfig := mockBuildConfigImageParms("originalImage", "imageStream", "validTag")

	if _, err := clusterAdminClient.BuildConfigs(testutil.Namespace()).Create(buildConfig); err != nil {
		t.Fatalf("Unexpected error: %v", err)
	}

	watch, err := clusterAdminClient.Builds(testutil.Namespace()).Watch(labels.Everything(), fields.Everything(), "0")
	if err != nil {
		t.Fatalf("Couldn't subscribe to builds: %v", err)
	}
	defer watch.Stop()

	for _, s := range []string{
		"/oapi/v1/namespaces/" + testutil.Namespace() + "/buildconfigs/pushbuild/webhooks/secret101/github",
	} {

		// trigger build event sending push notification
		postFile(clusterAdminClient.RESTClient.Client, "push", "pushevent.json", clusterAdminClientConfig.Host+s, http.StatusOK, t)

		event := <-watch.ResultChan()
		actual := event.Object.(*buildapi.Build)

		// FIXME: I think the build creation is fast and in some situlation we miss
		// the BuildStatusNew here. Note that this is not a bug, in future we should
		// move this to use go routine to capture all events.
		if actual.Status != buildapi.BuildStatusNew && actual.Status != buildapi.BuildStatusPending {
			t.Errorf("Expected %s or %s, got %s", buildapi.BuildStatusNew, buildapi.BuildStatusPending, actual.Status)
		}

		if actual.Parameters.Strategy.DockerStrategy.From.Name != "originalImage" {
			t.Errorf("Expected %s, got %s", "originalImage", actual.Parameters.Strategy.DockerStrategy.From.Name)
		}
	}
}