コード例 #1
0
ファイル: tokens_controller.go プロジェクト: cjnygard/origin
// secretDeleted reacts to a Secret being deleted by removing a reference from the corresponding ServiceAccount if needed
func (e *TokensController) secretDeleted(obj interface{}) {
	secret, ok := obj.(*api.Secret)
	if !ok {
		// Unknown type. If we missed a Secret deletion, the corresponding ServiceAccount (if it exists)
		// will get a secret recreated (if needed) during the ServiceAccount re-list
		return
	}

	serviceAccount, err := e.getServiceAccount(secret, false)
	if err != nil {
		glog.Error(err)
		return
	}
	if serviceAccount == nil {
		return
	}

	for i := 1; i <= NumServiceAccountRemoveReferenceRetries; i++ {
		if _, err := e.removeSecretReferenceIfNeeded(serviceAccount, secret.Name); err != nil {
			if apierrors.IsConflict(err) && i < NumServiceAccountRemoveReferenceRetries {
				time.Sleep(wait.Jitter(100*time.Millisecond, 0.0))
				continue
			}
			glog.Error(err)
			break
		}
		break
	}
}
コード例 #2
0
func TestUpdateImageStreamOK(t *testing.T) {
	fakeEtcdClient, helper := newHelper(t)
	fakeEtcdClient.Data["/imagestreams/default/bar"] = tools.EtcdResponseWithError{
		R: &etcd.Response{
			Node: &etcd.Node{
				Value: runtime.EncodeOrDie(latest.Codec, &api.ImageStream{
					ObjectMeta: kapi.ObjectMeta{Name: "bar", Namespace: "default"},
				}),
				ModifiedIndex: 2,
			},
		},
	}
	storage, _ := NewREST(helper, noDefaultRegistry, &fakeSubjectAccessReviewRegistry{})

	ctx := kapi.WithUser(kapi.NewDefaultContext(), &fakeUser{})
	obj, created, err := storage.Update(ctx, &api.ImageStream{ObjectMeta: kapi.ObjectMeta{Name: "bar", ResourceVersion: "1"}})
	if !errors.IsConflict(err) {
		t.Fatalf("unexpected non-error: %v", err)
	}
	obj, created, err = storage.Update(ctx, &api.ImageStream{ObjectMeta: kapi.ObjectMeta{Name: "bar", ResourceVersion: "2"}})
	if err != nil || created {
		t.Fatalf("Unexpected non-nil error: %#v", err)
	}
	stream, ok := obj.(*api.ImageStream)
	if !ok {
		t.Errorf("Expected image stream, got %#v", obj)
	}
	if stream.Name != "bar" {
		t.Errorf("Unexpected stream returned: %#v", stream)
	}
}
コード例 #3
0
// secretDeleted reacts to a Secret being deleted by looking to see if it's a dockercfg secret for a service account, in which case it
// it removes the references from the service account and removes the token created to back the dockercfgSecret
func (e *DockercfgDeletedController) secretDeleted(obj interface{}) {
	dockercfgSecret, ok := obj.(*api.Secret)
	if !ok {
		return
	}
	if _, exists := dockercfgSecret.Annotations[ServiceAccountTokenSecretNameKey]; !exists {
		return
	}

	for i := 1; i <= NumServiceAccountUpdateRetries; i++ {
		if err := e.removeDockercfgSecretReference(dockercfgSecret); err != nil {
			if kapierrors.IsConflict(err) && i < NumServiceAccountUpdateRetries {
				time.Sleep(wait.Jitter(100*time.Millisecond, 0.0))
				continue
			}

			glog.Error(err)
			break
		}

		break
	}

	// remove the reference token secret
	if err := e.client.Secrets(dockercfgSecret.Namespace).Delete(dockercfgSecret.Annotations[ServiceAccountTokenSecretNameKey]); (err != nil) && !kapierrors.IsNotFound(err) {
		util.HandleError(err)
	}
}
コード例 #4
0
ファイル: rest_test.go プロジェクト: cjnygard/origin
func TestUpdateWithMismatchedResourceVersion(t *testing.T) {
	// Starting conditions
	associatedUser1, associatedIdentity1User1 := makeAssociated()
	unassociatedUser2 := makeUser()
	// Finishing conditions
	_, unassociatedIdentity1 := disassociate(associatedUser1, associatedIdentity1User1)

	expectedActions := []test.Action{
		// Existing mapping lookup
		{"GetIdentity", associatedIdentity1User1.Name},
		{"GetUser", associatedUser1.Name},
	}

	mapping := &api.UserIdentityMapping{
		ObjectMeta: kapi.ObjectMeta{ResourceVersion: "123"},
		Identity:   kapi.ObjectReference{Name: unassociatedIdentity1.Name},
		User:       kapi.ObjectReference{Name: unassociatedUser2.Name},
	}

	actions, _, _, rest := setupRegistries(associatedIdentity1User1, associatedUser1, unassociatedUser2)
	_, _, err := rest.Update(kapi.NewContext(), mapping)

	if err == nil {
		t.Errorf("Expected error")
	}
	if !kerrs.IsConflict(err) {
		t.Errorf("Unexpected error: %v", err)
	}
	verifyActions(expectedActions, *actions, t)
}
コード例 #5
0
func (s *ServiceController) persistUpdate(service *api.Service) error {
	var err error
	for i := 0; i < clientRetryCount; i++ {
		_, err = s.kubeClient.Services(service.Namespace).Update(service)
		if err == nil {
			return nil
		}
		// If the object no longer exists, we don't want to recreate it. Just bail
		// out so that we can process the delete, which we should soon be receiving
		// if we haven't already.
		if errors.IsNotFound(err) {
			glog.Infof("Not persisting update to service that no longer exists: %v", err)
			return nil
		}
		// TODO: Try to resolve the conflict if the change was unrelated to load
		// balancers and public IPs. For now, just rely on the fact that we'll
		// also process the update that caused the resource version to change.
		if errors.IsConflict(err) {
			glog.Infof("Not persisting update to service that has been changed since we received it: %v", err)
			return nil
		}
		glog.Warningf("Failed to persist updated PublicIPs to service %s after creating its external load balancer: %v",
			service.Name, err)
		time.Sleep(clientRetryInterval)
	}
	return err
}
コード例 #6
0
ファイル: tokens_controller.go プロジェクト: cjnygard/origin
// createSecret creates a secret of type ServiceAccountToken for the given ServiceAccount
func (e *TokensController) createSecret(serviceAccount *api.ServiceAccount) error {
	// We don't want to update the cache's copy of the service account
	// so add the secret to a freshly retrieved copy of the service account
	serviceAccounts := e.client.ServiceAccounts(serviceAccount.Namespace)
	liveServiceAccount, err := serviceAccounts.Get(serviceAccount.Name)
	if err != nil {
		return err
	}
	if liveServiceAccount.ResourceVersion != serviceAccount.ResourceVersion {
		// our view of the service account is not up to date
		// we'll get notified of an update event later and get to try again
		// this only prevent interactions between successive runs of this controller's event handlers, but that is useful
		glog.V(2).Infof("View of ServiceAccount %s/%s is not up to date, skipping token creation", serviceAccount.Namespace, serviceAccount.Name)
		return nil
	}

	// Build the secret
	secret := &api.Secret{
		ObjectMeta: api.ObjectMeta{
			Name:      secret.Strategy.GenerateName(fmt.Sprintf("%s-token-", serviceAccount.Name)),
			Namespace: serviceAccount.Namespace,
			Annotations: map[string]string{
				api.ServiceAccountNameKey: serviceAccount.Name,
				api.ServiceAccountUIDKey:  string(serviceAccount.UID),
			},
		},
		Type: api.SecretTypeServiceAccountToken,
		Data: map[string][]byte{},
	}

	// Generate the token
	token, err := e.token.GenerateToken(*serviceAccount, *secret)
	if err != nil {
		return err
	}
	secret.Data[api.ServiceAccountTokenKey] = []byte(token)

	// Save the secret
	if _, err := e.client.Secrets(serviceAccount.Namespace).Create(secret); err != nil {
		return err
	}

	liveServiceAccount.Secrets = append(liveServiceAccount.Secrets, api.ObjectReference{Name: secret.Name})

	_, err = serviceAccounts.Update(liveServiceAccount)
	if err != nil {
		// we weren't able to use the token, try to clean it up.
		glog.V(2).Infof("Deleting secret %s/%s because reference couldn't be added (%v)", secret.Namespace, secret.Name, err)
		if err := e.client.Secrets(secret.Namespace).Delete(secret.Name); err != nil {
			glog.Error(err) // if we fail, just log it
		}
	}
	if apierrors.IsConflict(err) {
		// nothing to do.  We got a conflict, that means that the service account was updated.  We simply need to return because we'll get an update notification later
		return nil
	}

	return err
}
コード例 #7
0
func (t *Tester) TestUpdateFailsOnVersion(older runtime.Object) {
	_, _, err := t.storage.(rest.Updater).Update(api.NewDefaultContext(), older)
	if err == nil {
		t.Errorf("Expected an error, but we didn't get one")
	} else if !errors.IsConflict(err) {
		t.Errorf("Expected Conflict error, got '%v'", err)
	}
}
コード例 #8
0
ファイル: tokens_controller.go プロジェクト: Ima8/kubernetes
// createSecret creates a secret of type ServiceAccountToken for the given ServiceAccount
func (e *TokensController) createSecret(serviceAccount *api.ServiceAccount) error {
	// Build the secret
	secret := &api.Secret{
		ObjectMeta: api.ObjectMeta{
			Name:      secret.Strategy.GenerateName(fmt.Sprintf("%s-token-", serviceAccount.Name)),
			Namespace: serviceAccount.Namespace,
			Annotations: map[string]string{
				api.ServiceAccountNameKey: serviceAccount.Name,
				api.ServiceAccountUIDKey:  string(serviceAccount.UID),
			},
		},
		Type: api.SecretTypeServiceAccountToken,
		Data: map[string][]byte{},
	}

	// Generate the token
	token, err := e.token.GenerateToken(*serviceAccount, *secret)
	if err != nil {
		return err
	}
	secret.Data[api.ServiceAccountTokenKey] = []byte(token)
	if e.rootCA != nil && len(e.rootCA) > 0 {
		secret.Data[api.ServiceAccountRootCAKey] = e.rootCA
	}

	// Save the secret
	if _, err := e.client.Secrets(serviceAccount.Namespace).Create(secret); err != nil {
		return err
	}

	// We don't want to update the cache's copy of the service account
	// so add the secret to a freshly retrieved copy of the service account
	serviceAccounts := e.client.ServiceAccounts(serviceAccount.Namespace)
	serviceAccount, err = serviceAccounts.Get(serviceAccount.Name)
	if err != nil {
		return err
	}
	serviceAccount.Secrets = append(serviceAccount.Secrets, api.ObjectReference{Name: secret.Name})

	_, err = serviceAccounts.Update(serviceAccount)
	if err != nil {
		// we weren't able to use the token, try to clean it up.
		glog.V(2).Infof("Deleting secret %s/%s because reference couldn't be added (%v)", secret.Namespace, secret.Name, err)
		if err := e.client.Secrets(secret.Namespace).Delete(secret.Name); err != nil {
			glog.Error(err) // if we fail, just log it
		}
	}
	if apierrors.IsConflict(err) {
		// nothing to do.  We got a conflict, that means that the service account was updated.  We simply need to return because we'll get an update notification later
		return nil
	}

	return err
}
コード例 #9
0
ファイル: edit.go プロジェクト: brandon-adams/origin
func (r *editResults) AddError(err error, info *resource.Info) string {
	switch {
	case errors.IsInvalid(err):
		r.edit = append(r.edit, info)
		reason := editReason{
			head: fmt.Sprintf("%s %s was not valid", info.Mapping.Kind, info.Name),
		}
		if err, ok := err.(kclient.APIStatus); ok {
			if details := err.Status().Details; details != nil {
				for _, cause := range details.Causes {
					reason.other = append(reason.other, cause.Message)
				}
			}
		}
		r.header.reasons = append(r.header.reasons, reason)
		return fmt.Sprintf("Error: the %s %s is invalid", info.Mapping.Kind, info.Name)
	case errors.IsNotFound(err):
		r.notfound++
		return fmt.Sprintf("Error: the %s %s has been deleted on the server", info.Mapping.Kind, info.Name)

	case errors.IsConflict(err):
		if r.delta != nil {
			v1 := info.ResourceVersion
			if perr := applyPatch(r.delta, info, r.version); perr != nil {
				// the error was related to the patching process
				if nerr, ok := perr.(patchError); ok {
					r.conflict++
					if jsonmerge.IsPreconditionFailed(nerr.error) {
						return fmt.Sprintf("Error: the API version of the provided object cannot be changed")
					}
					// the patch is in conflict, report to user and exit
					if jsonmerge.IsConflicting(nerr.error) {
						// TODO: read message
						return fmt.Sprintf("Error: a conflicting change was made to the %s %s on the server", info.Mapping.Kind, info.Name)
					}
					glog.V(4).Infof("Attempted to patch the resource, but failed: %v", perr)
					return fmt.Sprintf("Error: %v", err)
				}
				// try processing this server error and unset delta so we don't recurse
				r.delta = nil
				return r.AddError(err, info)
			}
			return fmt.Sprintf("Applied your changes to %s from version %s onto %s", info.Name, v1, info.ResourceVersion)
		}
		// no delta was available
		r.conflict++
		return fmt.Sprintf("Error: %v", err)
	default:
		r.retryable++
		return fmt.Sprintf("Error: the %s %s could not be updated: %v", info.Mapping.Kind, info.Name, err)
	}
}
コード例 #10
0
ファイル: controller.go プロジェクト: ppitonak/origin
// done marks the stream as being processed due to an error or failure condition
func (c *ImportController) done(stream *api.ImageStream, reason string, retry int) error {
	if len(reason) == 0 {
		reason = util.Now().UTC().Format(time.RFC3339)
	}
	if stream.Annotations == nil {
		stream.Annotations = make(map[string]string)
	}
	stream.Annotations[api.DockerImageRepositoryCheckAnnotation] = reason
	if _, err := c.streams.ImageStreams(stream.Namespace).Update(stream); err != nil && !errors.IsNotFound(err) {
		if errors.IsConflict(err) && retry > 0 {
			if stream, err := c.streams.ImageStreams(stream.Namespace).Get(stream.Name); err == nil {
				return c.done(stream, reason, retry-1)
			}
		}
		return err
	}
	return nil
}
コード例 #11
0
ファイル: service.go プロジェクト: netbaby/kubernetes
// updateService fetches a service, calls the update function on it,
// and then attempts to send the updated service. It retries up to 2
// times in the face of timeouts and conflicts.
func updateService(c *client.Client, namespace, serviceName string, update func(*api.Service)) (*api.Service, error) {
	var service *api.Service
	var err error
	for i := 0; i < 3; i++ {
		service, err = c.Services(namespace).Get(serviceName)
		if err != nil {
			return service, err
		}

		update(service)

		service, err = c.Services(namespace).Update(service)

		if !errors.IsConflict(err) && !errors.IsServerTimeout(err) {
			return service, err
		}
	}
	return service, err
}
コード例 #12
0
ファイル: etcd_test.go プロジェクト: Ima8/kubernetes
func TestEtcdCreateWithConflict(t *testing.T) {
	registry, bindingRegistry, _, fakeClient, _ := newStorage(t)
	ctx := api.NewDefaultContext()
	fakeClient.TestIndex = true
	key, _ := registry.KeyFunc(ctx, "foo")
	fakeClient.Data[key] = tools.EtcdResponseWithError{
		R: &etcd.Response{
			Node: nil,
		},
		E: tools.EtcdErrorNotFound,
	}

	_, err := registry.Create(ctx, validNewPod())
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	// Suddenly, a wild scheduler appears:
	binding := api.Binding{
		ObjectMeta: api.ObjectMeta{
			Namespace:   api.NamespaceDefault,
			Name:        "foo",
			Annotations: map[string]string{"label1": "value1"},
		},
		Target: api.ObjectReference{Name: "machine"},
	}
	_, err = bindingRegistry.Create(ctx, &binding)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	_, err = bindingRegistry.Create(ctx, &binding)
	if err == nil || !errors.IsConflict(err) {
		t.Fatalf("expected resource conflict error, not: %v", err)
	}
}
コード例 #13
0
ファイル: cancelbuild.go プロジェクト: nstrug/origin
// RunCancelBuild contains all the necessary functionality for the OpenShift cli cancel-build command
func RunCancelBuild(f *clientcmd.Factory, out io.Writer, cmd *cobra.Command, args []string) error {
	if len(args) == 0 || len(args[0]) == 0 {
		return cmdutil.UsageError(cmd, "You must specify the name of a build to cancel.")
	}

	buildName := args[0]
	namespace, _, err := f.DefaultNamespace()
	if err != nil {
		return err
	}

	client, _, err := f.Clients()
	if err != nil {
		return err
	}
	buildClient := client.Builds(namespace)
	build, err := buildClient.Get(buildName)
	if err != nil {
		return err
	}

	if !isBuildCancellable(build) {
		return nil
	}

	// Print build logs before cancelling build.
	if cmdutil.GetFlagBool(cmd, "dump-logs") {
		opts := buildapi.BuildLogOptions{
			NoWait: true,
			Follow: false,
		}
		response, err := client.BuildLogs(namespace).Get(buildName, opts).Do().Raw()
		if err != nil {
			glog.Errorf("Could not fetch build logs for %s: %v", buildName, err)
		} else {
			glog.Infof("Build logs for %s:\n%v", buildName, string(response))
		}
	}

	// Mark build to be cancelled.
	for {
		build.Status.Cancelled = true
		if _, err = buildClient.Update(build); err != nil && errors.IsConflict(err) {
			build, err = buildClient.Get(buildName)
			if err != nil {
				return err
			}
			continue
		}
		if err != nil {
			return err
		}
		break
	}
	glog.V(2).Infof("Build %s was cancelled.", buildName)

	// Create a new build with the same configuration.
	if cmdutil.GetFlagBool(cmd, "restart") {
		request := &buildapi.BuildRequest{
			ObjectMeta: kapi.ObjectMeta{Name: build.Name},
		}
		newBuild, err := client.Builds(namespace).Clone(request)
		if err != nil {
			return err
		}
		glog.V(2).Infof("Restarted build %s.", buildName)
		fmt.Fprintf(out, "%s\n", newBuild.Name)
	} else {
		fmt.Fprintf(out, "%s\n", build.Name)
	}
	return nil
}
コード例 #14
0
func runAtomicPutTest(c *client.Client) {
	var svc api.Service
	err := c.Post().Resource("services").Body(
		&api.Service{
			TypeMeta: api.TypeMeta{
				APIVersion: latest.Version,
			},
			ObjectMeta: api.ObjectMeta{
				Name: "atomicservice",
				Labels: map[string]string{
					"name": "atomicService",
				},
			},
			Spec: api.ServiceSpec{
				Port: 12345,
				// This is here because validation requires it.
				Selector: map[string]string{
					"foo": "bar",
				},
			},
		},
	).Do().Into(&svc)
	if err != nil {
		glog.Fatalf("Failed creating atomicService: %v", err)
	}
	glog.Info("Created atomicService")
	testLabels := labels.Set{
		"foo": "bar",
	}
	for i := 0; i < 5; i++ {
		// a: z, b: y, etc...
		testLabels[string([]byte{byte('a' + i)})] = string([]byte{byte('z' - i)})
	}
	var wg sync.WaitGroup
	wg.Add(len(testLabels))
	for label, value := range testLabels {
		go func(l, v string) {
			for {
				glog.Infof("Starting to update (%s, %s)", l, v)
				var tmpSvc api.Service
				err := c.Get().
					Resource("services").
					Name(svc.Name).
					Do().
					Into(&tmpSvc)
				if err != nil {
					glog.Errorf("Error getting atomicService: %v", err)
					continue
				}
				if tmpSvc.Spec.Selector == nil {
					tmpSvc.Spec.Selector = map[string]string{l: v}
				} else {
					tmpSvc.Spec.Selector[l] = v
				}
				glog.Infof("Posting update (%s, %s)", l, v)
				err = c.Put().Resource("services").Name(svc.Name).Body(&tmpSvc).Do().Error()
				if err != nil {
					if errors.IsConflict(err) {
						glog.Infof("Conflict: (%s, %s)", l, v)
						// This is what we expect.
						continue
					}
					glog.Errorf("Unexpected error putting atomicService: %v", err)
					continue
				}
				break
			}
			glog.Infof("Done update (%s, %s)", l, v)
			wg.Done()
		}(label, value)
	}
	wg.Wait()
	if err := c.Get().Resource("services").Name(svc.Name).Do().Into(&svc); err != nil {
		glog.Fatalf("Failed getting atomicService after writers are complete: %v", err)
	}
	if !reflect.DeepEqual(testLabels, labels.Set(svc.Spec.Selector)) {
		glog.Fatalf("Selector PUTs were not atomic: wanted %v, got %v", testLabels, svc.Spec.Selector)
	}
	glog.Info("Atomic PUTs work.")
}
コード例 #15
0
ファイル: controller.go プロジェクト: nstrug/origin
// Handle processes change triggers for config.
func (c *DeploymentConfigChangeController) Handle(config *deployapi.DeploymentConfig) error {
	hasChangeTrigger := false
	for _, trigger := range config.Triggers {
		if trigger.Type == deployapi.DeploymentTriggerOnConfigChange {
			hasChangeTrigger = true
			break
		}
	}

	if !hasChangeTrigger {
		glog.V(4).Infof("Ignoring DeploymentConfig %s; no change triggers detected", deployutil.LabelForDeploymentConfig(config))
		return nil
	}

	if config.LatestVersion == 0 {
		_, _, err := c.generateDeployment(config)
		if err != nil {
			if kerrors.IsConflict(err) {
				return fatalError(fmt.Sprintf("DeploymentConfig %s updated since retrieval; aborting trigger: %v", deployutil.LabelForDeploymentConfig(config), err))
			}
			c.recorder.Eventf(config, "failedCreate", "Couldn't create initial deployment: %v", err)
			return nil
		}
		glog.V(4).Infof("Created initial Deployment for DeploymentConfig %s", deployutil.LabelForDeploymentConfig(config))
		return nil
	}

	latestDeploymentName := deployutil.LatestDeploymentNameForConfig(config)
	deployment, err := c.changeStrategy.getDeployment(config.Namespace, latestDeploymentName)
	if err != nil {
		// If there's no deployment for the latest config, we have no basis of
		// comparison. It's the responsibility of the deployment config controller
		// to make the deployment for the config, so return early.
		if kerrors.IsNotFound(err) {
			glog.V(2).Infof("Ignoring change for DeploymentConfig %s; no existing Deployment found", deployutil.LabelForDeploymentConfig(config))
			return nil
		}
		return fmt.Errorf("couldn't retrieve Deployment for DeploymentConfig %s: %v", deployutil.LabelForDeploymentConfig(config), err)
	}

	deployedConfig, err := c.decodeConfig(deployment)
	if err != nil {
		return fatalError(fmt.Sprintf("error decoding DeploymentConfig from Deployment %s for DeploymentConfig %s: %v", deployutil.LabelForDeployment(deployment), deployutil.LabelForDeploymentConfig(config), err))
	}

	// Detect template diffs, and return early if there aren't any changes.
	if kapi.Semantic.DeepEqual(config.Template.ControllerTemplate.Template, deployedConfig.Template.ControllerTemplate.Template) {
		glog.V(2).Infof("Ignoring DeploymentConfig change for %s (latestVersion=%d); same as Deployment %s", deployutil.LabelForDeploymentConfig(config), config.LatestVersion, deployutil.LabelForDeployment(deployment))
		return nil
	}

	// There was a template diff, so generate a new config version.
	fromVersion, toVersion, err := c.generateDeployment(config)
	if err != nil {
		if kerrors.IsConflict(err) {
			return fatalError(fmt.Sprintf("DeploymentConfig %s updated since retrieval; aborting trigger: %v", deployutil.LabelForDeploymentConfig(config), err))
		}
		return fmt.Errorf("couldn't generate deployment for DeploymentConfig %s: %v", deployutil.LabelForDeploymentConfig(config), err)
	}
	glog.V(4).Infof("Updated DeploymentConfig %s from version %d to %d for existing deployment %s", deployutil.LabelForDeploymentConfig(config), fromVersion, toVersion, deployutil.LabelForDeployment(deployment))
	return nil
}
コード例 #16
0
ファイル: etcd_test.go プロジェクト: cjnygard/origin
func TestEtcdUpdate(t *testing.T) {
	podA := &api.Pod{
		ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: api.NamespaceDefault},
		Spec:       api.PodSpec{Host: "machine"},
	}
	podB := &api.Pod{
		ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: api.NamespaceDefault, ResourceVersion: "1"},
		Spec:       api.PodSpec{Host: "machine2"},
	}

	nodeWithPodA := tools.EtcdResponseWithError{
		R: &etcd.Response{
			Node: &etcd.Node{
				Value:         runtime.EncodeOrDie(testapi.Codec(), podA),
				ModifiedIndex: 1,
				CreatedIndex:  1,
			},
		},
		E: nil,
	}

	newerNodeWithPodA := tools.EtcdResponseWithError{
		R: &etcd.Response{
			Node: &etcd.Node{
				Value:         runtime.EncodeOrDie(testapi.Codec(), podA),
				ModifiedIndex: 2,
				CreatedIndex:  1,
			},
		},
		E: nil,
	}

	nodeWithPodB := tools.EtcdResponseWithError{
		R: &etcd.Response{
			Node: &etcd.Node{
				Value:         runtime.EncodeOrDie(testapi.Codec(), podB),
				ModifiedIndex: 1,
				CreatedIndex:  1,
			},
		},
		E: nil,
	}

	emptyNode := tools.EtcdResponseWithError{
		R: &etcd.Response{},
		E: tools.EtcdErrorNotFound,
	}

	table := map[string]struct {
		existing    tools.EtcdResponseWithError
		expect      tools.EtcdResponseWithError
		toUpdate    runtime.Object
		allowCreate bool
		objOK       func(obj runtime.Object) bool
		errOK       func(error) bool
	}{
		"normal": {
			existing: nodeWithPodA,
			expect:   nodeWithPodB,
			toUpdate: podB,
			errOK:    func(err error) bool { return err == nil },
		},
		"notExisting": {
			existing: emptyNode,
			expect:   emptyNode,
			toUpdate: podA,
			errOK:    func(err error) bool { return errors.IsNotFound(err) },
		},
		"createIfNotFound": {
			existing:    emptyNode,
			toUpdate:    podA,
			allowCreate: true,
			objOK:       hasCreated(t, podA),
			errOK:       func(err error) bool { return err == nil },
		},
		"outOfDate": {
			existing: newerNodeWithPodA,
			expect:   newerNodeWithPodA,
			toUpdate: podB,
			errOK:    func(err error) bool { return errors.IsConflict(err) },
		},
	}

	for name, item := range table {
		fakeClient, registry := NewTestGenericEtcdRegistry(t)
		registry.UpdateStrategy.(*testRESTStrategy).allowCreateOnUpdate = item.allowCreate
		path := etcdtest.AddPrefix("pods/foo")
		fakeClient.Data[path] = item.existing
		obj, _, err := registry.Update(api.NewDefaultContext(), item.toUpdate)
		if !item.errOK(err) {
			t.Errorf("%v: unexpected error: %v", name, err)
		}

		actual := fakeClient.Data[path]
		if item.objOK != nil {
			if !item.objOK(obj) {
				t.Errorf("%v: unexpected returned: %#v", name, obj)
			}
			actualObj, err := api.Scheme.Decode([]byte(actual.R.Node.Value))
			if err != nil {
				t.Errorf("unable to decode stored value for %#v", actual)
				continue
			}
			if !item.objOK(actualObj) {
				t.Errorf("%v: unexpected response: %#v", name, actual)
			}
		} else {
			if e, a := item.expect, actual; !api.Semantic.DeepDerivative(e, a) {
				t.Errorf("%v:\n%s", name, util.ObjectDiff(e, a))
			}
		}
	}
}
コード例 #17
0
ファイル: pods.go プロジェクト: VinGorilla/kubernetes
		expectNoError(wait.Poll(time.Millisecond*500, time.Second*30, func() (bool, error) {
			By("updating the pod")
			value = strconv.Itoa(time.Now().Nanosecond())
			if pod == nil { // on retries we need to re-get
				pod, err = podClient.Get(name)
				if err != nil {
					return false, fmt.Errorf("failed to get pod: %v", err)
				}
			}
			pod.Labels["time"] = value
			pod, err = podClient.Update(pod)
			if err == nil {
				Logf("Successfully updated pod")
				return true, nil
			}
			if errors.IsConflict(err) {
				Logf("Conflicting update to pod, re-get and re-update: %v", err)
				pod = nil // re-get it when we retry
				return false, nil
			}
			return false, fmt.Errorf("failed to update pod: %v", err)
		}))

		expectNoError(waitForPodRunning(c, pod.Name))

		By("verifying the updated pod is in kubernetes")
		pods, err = podClient.List(labels.SelectorFromSet(labels.Set(map[string]string{"time": value})), fields.Everything())
		Expect(len(pods.Items)).To(Equal(1))
		Logf("Pod update OK")
	})
コード例 #18
0
// createDockercfgSecretIfNeeded makes sure at least one ServiceAccountToken secret exists, and is included in the serviceAccount's Secrets list
func (e *DockercfgController) createDockercfgSecretIfNeeded(serviceAccount *api.ServiceAccount) error {

	mountableDockercfgSecrets, imageDockercfgPullSecrets := getGeneratedDockercfgSecretNames(serviceAccount)

	// look for an ImagePullSecret in the form
	foundPullSecret := len(imageDockercfgPullSecrets) > 0
	foundMountableSecret := len(mountableDockercfgSecrets) > 0

	switch {
	// if we already have a docker pull secret, simply return
	case foundPullSecret && foundMountableSecret:
		return nil

	case foundPullSecret && !foundMountableSecret, !foundPullSecret && foundMountableSecret:
		dockercfgSecretName := ""
		switch {
		case foundPullSecret:
			dockercfgSecretName = imageDockercfgPullSecrets.List()[0]
		case foundMountableSecret:
			dockercfgSecretName = mountableDockercfgSecrets.List()[0]
		}

		err := e.createDockerPullSecretReference(serviceAccount, dockercfgSecretName)
		if kapierrors.IsConflict(err) {
			// nothing to do.  Our choice was stale or we got a conflict.  Either way that means that the service account was updated.  We simply need to return because we'll get an update notification later
			return nil
		}

		return err

	}

	// if we get here, then we need to create a new dockercfg secret
	// before we do expensive things, make sure our view of the service account is up to date
	if liveServiceAccount, err := e.client.ServiceAccounts(serviceAccount.Namespace).Get(serviceAccount.Name); err != nil {
		return err
	} else if liveServiceAccount.ResourceVersion != serviceAccount.ResourceVersion {
		// our view of the service account is not up to date
		// we'll get notified of an update event later and get to try again
		// this only prevent interactions between successive runs of this controller's event handlers, but that is useful
		glog.V(2).Infof("View of ServiceAccount %s/%s is not up to date, skipping dockercfg creation", serviceAccount.Namespace, serviceAccount.Name)
		return nil
	}

	dockercfgSecret, err := e.createDockerPullSecret(serviceAccount)
	if err != nil {
		return err
	}

	err = e.createDockerPullSecretReference(serviceAccount, dockercfgSecret.Name)
	if kapierrors.IsConflict(err) {
		// nothing to do.  Our choice was stale or we got a conflict.  Either way that means that the service account was updated.  We simply need to return because we'll get an update notification later
		// we do need to clean up our dockercfgSecret.  token secrets are cleaned up by the controller handling service account dockercfg secret deletes
		glog.V(2).Infof("Deleting secret %s/%s (err=%v)", dockercfgSecret.Namespace, dockercfgSecret.Name, err)
		if err := e.client.Secrets(dockercfgSecret.Namespace).Delete(dockercfgSecret.Name); (err != nil) && !kapierrors.IsNotFound(err) {
			util.HandleError(err)
		}
		return nil
	}

	return err
}
コード例 #19
0
ファイル: integration.go プロジェクト: Bazooki/kubernetes
func runAtomicPutTest(c *client.Client) {
	svcBody := api.Service{
		TypeMeta: api.TypeMeta{
			APIVersion: c.APIVersion(),
		},
		ObjectMeta: api.ObjectMeta{
			Name: "atomicservice",
			Labels: map[string]string{
				"name": "atomicService",
			},
		},
		Spec: api.ServiceSpec{
			// This is here because validation requires it.
			Selector: map[string]string{
				"foo": "bar",
			},
			Ports: []api.ServicePort{{
				Port:     12345,
				Protocol: "TCP",
			}},
			SessionAffinity: "None",
		},
	}
	services := c.Services(api.NamespaceDefault)
	svc, err := services.Create(&svcBody)
	if err != nil {
		glog.Fatalf("Failed creating atomicService: %v", err)
	}
	glog.Info("Created atomicService")
	testLabels := labels.Set{
		"foo": "bar",
	}
	for i := 0; i < 5; i++ {
		// a: z, b: y, etc...
		testLabels[string([]byte{byte('a' + i)})] = string([]byte{byte('z' - i)})
	}
	var wg sync.WaitGroup
	wg.Add(len(testLabels))
	for label, value := range testLabels {
		go func(l, v string) {
			for {
				glog.Infof("Starting to update (%s, %s)", l, v)
				tmpSvc, err := services.Get(svc.Name)
				if err != nil {
					glog.Errorf("Error getting atomicService: %v", err)
					continue
				}
				if tmpSvc.Spec.Selector == nil {
					tmpSvc.Spec.Selector = map[string]string{l: v}
				} else {
					tmpSvc.Spec.Selector[l] = v
				}
				glog.Infof("Posting update (%s, %s)", l, v)
				tmpSvc, err = services.Update(tmpSvc)
				if err != nil {
					if apierrors.IsConflict(err) {
						glog.Infof("Conflict: (%s, %s)", l, v)
						// This is what we expect.
						continue
					}
					glog.Errorf("Unexpected error putting atomicService: %v", err)
					continue
				}
				break
			}
			glog.Infof("Done update (%s, %s)", l, v)
			wg.Done()
		}(label, value)
	}
	wg.Wait()
	svc, err = services.Get(svc.Name)
	if err != nil {
		glog.Fatalf("Failed getting atomicService after writers are complete: %v", err)
	}
	if !reflect.DeepEqual(testLabels, labels.Set(svc.Spec.Selector)) {
		glog.Fatalf("Selector PUTs were not atomic: wanted %v, got %v", testLabels, svc.Spec.Selector)
	}
	glog.Info("Atomic PUTs work.")
}
コード例 #20
0
// HandleImageRepo processes the next ImageStream event.
func (c *ImageChangeController) HandleImageRepo(repo *imageapi.ImageStream) error {
	glog.V(4).Infof("Build image change controller detected ImageStream change %s", repo.Status.DockerImageRepository)

	// Loop through all build configurations and record if there was an error
	// instead of breaking the loop. The error will be returned in the end, so the
	// retry controller can retry. Any BuildConfigs that were processed successfully
	// should have had their LastTriggeredImageID updated, so the retry should result
	// in a no-op for them.
	hasError := false

	// TODO: this is inefficient
	for _, bc := range c.BuildConfigStore.List() {
		config := bc.(*buildapi.BuildConfig)

		var (
			from           *kapi.ObjectReference
			shouldBuild    = false
			triggeredImage = ""
		)
		// For every ImageChange trigger find the latest tagged image from the image repository and
		// invoke a build using that image id. A new build is triggered only if the latest tagged image id or pull spec
		// differs from the last triggered build recorded on the build config for that trigger
		for _, trigger := range config.Spec.Triggers {
			if trigger.Type != buildapi.ImageChangeBuildTriggerType {
				continue
			}
			if trigger.ImageChange.From != nil {
				from = trigger.ImageChange.From
			} else {
				from = buildutil.GetImageStreamForStrategy(config.Spec.Strategy)
			}

			if from == nil || from.Kind != "ImageStreamTag" {
				continue
			}
			fromStreamName, tag, ok := imageapi.SplitImageStreamTag(from.Name)
			if !ok {
				glog.Errorf("Invalid image stream tag: %s in build config %s/%s", from.Name, config.Name, config.Namespace)
				continue
			}

			fromNamespace := from.Namespace
			if len(fromNamespace) == 0 {
				fromNamespace = config.Namespace
			}

			// only trigger a build if this image repo matches the name and namespace of the ref in the build trigger
			// also do not trigger if the imagerepo does not have a valid DockerImageRepository value for us to pull
			// the image from
			if len(repo.Status.DockerImageRepository) == 0 || fromStreamName != repo.Name || fromNamespace != repo.Namespace {
				continue
			}

			// This split is safe because ImageStreamTag names always have the form
			// name:tag.
			latest := imageapi.LatestTaggedImage(repo, tag)
			if latest == nil {
				glog.V(4).Infof("unable to find tagged image: no image recorded for %s/%s:%s", repo.Namespace, repo.Name, tag)
				continue
			}
			glog.V(4).Infof("Found ImageStream %s/%s with tag %s", repo.Namespace, repo.Name, tag)

			// (must be different) to trigger a build
			last := trigger.ImageChange.LastTriggeredImageID
			next := latest.DockerImageReference

			if len(last) == 0 || (len(next) > 0 && next != last) {
				triggeredImage = next
				shouldBuild = true
				// it doesn't really make sense to have multiple image change triggers any more,
				// so just exit the loop now
				break
			}
		}

		if shouldBuild {
			glog.V(4).Infof("Running build for BuildConfig %s/%s", config.Namespace, config.Name)
			// instantiate new build
			request := &buildapi.BuildRequest{
				ObjectMeta: kapi.ObjectMeta{
					Name:      config.Name,
					Namespace: config.Namespace,
				},
				TriggeredByImage: &kapi.ObjectReference{
					Kind: "DockerImage",
					Name: triggeredImage,
				},
				From: from,
			}
			if _, err := c.BuildConfigInstantiator.Instantiate(config.Namespace, request); err != nil {
				if kerrors.IsConflict(err) {
					util.HandleError(fmt.Errorf("unable to instantiate Build for BuildConfig %s/%s due to a conflicting update: %v", config.Namespace, config.Name, err))
				} else {
					util.HandleError(fmt.Errorf("error instantiating Build from BuildConfig %s/%s: %v", config.Namespace, config.Name, err))
				}
				hasError = true
				continue
			}
		}
	}
	if hasError {
		return fmt.Errorf("an error occurred processing 1 or more build configurations; the image change trigger for image stream %s will be retried", repo.Status.DockerImageRepository)
	}
	return nil
}
コード例 #21
0
ファイル: controller.go プロジェクト: ppitonak/origin
// Handle processes change triggers for config.
func (c *DeploymentConfigChangeController) Handle(config *deployapi.DeploymentConfig) error {
	hasChangeTrigger := false
	for _, trigger := range config.Triggers {
		if trigger.Type == deployapi.DeploymentTriggerOnConfigChange {
			hasChangeTrigger = true
			break
		}
	}

	if !hasChangeTrigger {
		glog.V(4).Infof("Ignoring DeploymentConfig %s; no change triggers detected", deployutil.LabelForDeploymentConfig(config))
		return nil
	}

	if config.LatestVersion == 0 {
		_, _, err := c.generateDeployment(config)
		if err != nil {
			if kerrors.IsConflict(err) {
				return fatalError(fmt.Sprintf("DeploymentConfig %s updated since retrieval; aborting trigger: %v", deployutil.LabelForDeploymentConfig(config), err))
			}
			// TODO: This needs handled by setting some state within the API so
			// users know why the trigger isn't doing anything.
			// https://github.com/openshift/origin/issues/3526
			return nil
		}
		glog.V(4).Infof("Created initial Deployment for DeploymentConfig %s", deployutil.LabelForDeploymentConfig(config))
		return nil
	}

	latestDeploymentName := deployutil.LatestDeploymentNameForConfig(config)
	deployment, err := c.changeStrategy.getDeployment(config.Namespace, latestDeploymentName)
	if err != nil {
		// If there's no deployment for the latest config, we have no basis of
		// comparison. It's the responsibility of the deployment config controller
		// to make the deployment for the config, so return early.
		if kerrors.IsNotFound(err) {
			glog.V(2).Infof("Ignoring change for DeploymentConfig %s; no existing Deployment found", deployutil.LabelForDeploymentConfig(config))
			return nil
		}
		return fmt.Errorf("couldn't retrieve Deployment for DeploymentConfig %s: %v", deployutil.LabelForDeploymentConfig(config), err)
	}

	deployedConfig, err := c.decodeConfig(deployment)
	if err != nil {
		return fatalError(fmt.Sprintf("error decoding DeploymentConfig from Deployment %s for DeploymentConfig %s: %v", deployutil.LabelForDeployment(deployment), deployutil.LabelForDeploymentConfig(config), err))
	}

	newSpec, oldSpec := config.Template.ControllerTemplate.Template.Spec, deployedConfig.Template.ControllerTemplate.Template.Spec
	if kapi.Semantic.DeepEqual(oldSpec, newSpec) {
		glog.V(2).Infof("Ignoring DeploymentConfig change for %s (latestVersion=%d); same as Deployment %s", deployutil.LabelForDeploymentConfig(config), config.LatestVersion, deployutil.LabelForDeployment(deployment))
		return nil
	}

	fromVersion, toVersion, err := c.generateDeployment(config)
	if err != nil {
		if kerrors.IsConflict(err) {
			return fatalError(fmt.Sprintf("DeploymentConfig %s updated since retrieval; aborting trigger: %v", deployutil.LabelForDeploymentConfig(config), err))
		}
		return fmt.Errorf("couldn't generate deployment for DeploymentConfig %s: %v", deployutil.LabelForDeploymentConfig(config), err)
	}
	glog.V(4).Infof("Updated DeploymentConfig %s from version %d to %d for existing deployment %s", deployutil.LabelForDeploymentConfig(config), fromVersion, toVersion, deployutil.LabelForDeployment(deployment))
	return nil
}
コード例 #22
0
// Next processes a changed namespace and tries to allocate a uid range for it.  If it is
// successful, an mcs label corresponding to the relative position of the range is also
// set.
func (c *Allocation) Next(ns *kapi.Namespace) error {
	tx := &tx{}
	defer tx.Rollback()

	if _, ok := ns.Annotations[security.UIDRangeAnnotation]; ok {
		return nil
	}

	if ns.Annotations == nil {
		ns.Annotations = make(map[string]string)
	}

	// do uid allocation
	block, err := c.uid.AllocateNext()
	if err != nil {
		return err
	}
	tx.Add(func() error { return c.uid.Release(block) })
	ns.Annotations[security.UIDRangeAnnotation] = block.String()
	if _, ok := ns.Annotations[security.MCSAnnotation]; !ok {
		if label := c.mcs(block); label != nil {
			ns.Annotations[security.MCSAnnotation] = label.String()
		}
	}

	// TODO: could use a client.GuaranteedUpdate/Merge function
	for i := 0; i < retryCount; i++ {
		_, err := c.client.Update(ns)
		if err == nil {
			// commit and exit
			tx.Commit()
			return nil
		}

		if errors.IsNotFound(err) {
			return nil
		}
		if !errors.IsConflict(err) {
			return err
		}
		newNs, err := c.client.Get(ns.Name)
		if errors.IsNotFound(err) {
			return nil
		}
		if err != nil {
			return err
		}
		if changedAndSetAnnotations(ns, newNs) {
			return nil
		}

		// try again
		if newNs.Annotations == nil {
			newNs.Annotations = make(map[string]string)
		}
		newNs.Annotations[security.UIDRangeAnnotation] = ns.Annotations[security.UIDRangeAnnotation]
		newNs.Annotations[security.MCSAnnotation] = ns.Annotations[security.MCSAnnotation]
		ns = newNs
	}

	return fmt.Errorf("unable to allocate security info on %q after %d retries", ns.Name, retryCount)
}