Ejemplo n.º 1
0
// Because openshift-sdn uses an overlay and doesn't need GCE Routes, we need to
// clear the NetworkUnavailable condition that kubelet adds to initial node
// status when using GCE.
// TODO: make upstream kubelet more flexible with overlays and GCE so this
// condition doesn't get added for network plugins that don't want it, and then
// we can remove this function.
func (master *OsdnMaster) clearInitialNodeNetworkUnavailableCondition(node *kapi.Node) {
	knode := node
	cleared := false
	resultErr := kclient.RetryOnConflict(kclient.DefaultBackoff, func() error {
		var err error

		if knode != node {
			knode, err = master.kClient.Nodes().Get(node.ObjectMeta.Name)
			if err != nil {
				return err
			}
		}

		// Let caller modify knode's status, then push to api server.
		_, condition := kapi.GetNodeCondition(&node.Status, kapi.NodeNetworkUnavailable)
		if condition != nil && condition.Status != kapi.ConditionFalse && condition.Reason == "NoRouteCreated" {
			condition.Status = kapi.ConditionFalse
			condition.Reason = "RouteCreated"
			condition.Message = "openshift-sdn cleared kubelet-set NoRouteCreated"
			condition.LastTransitionTime = kapiunversioned.Now()
			knode, err = master.kClient.Nodes().UpdateStatus(knode)
			if err == nil {
				cleared = true
			}
		}
		return err
	})
	if resultErr != nil {
		utilruntime.HandleError(fmt.Errorf("Status update failed for local node: %v", resultErr))
	} else if cleared {
		log.Infof("Cleared node NetworkUnavailable/NoRouteCreated condition for %s", node.ObjectMeta.Name)
	}
}
Ejemplo n.º 2
0
// updateRcWithRetries retries updating the given rc on conflict with the following steps:
// 1. Get latest resource
// 2. applyUpdate
// 3. Update the resource
func updateRcWithRetries(c client.Interface, namespace string, rc *api.ReplicationController, applyUpdate updateRcFunc) (*api.ReplicationController, error) {
	// Deep copy the rc in case we failed on Get during retry loop
	obj, err := api.Scheme.Copy(rc)
	if err != nil {
		return nil, fmt.Errorf("failed to deep copy rc before updating it: %v", err)
	}
	oldRc := obj.(*api.ReplicationController)
	err = client.RetryOnConflict(client.DefaultBackoff, func() (e error) {
		// Apply the update, then attempt to push it to the apiserver.
		applyUpdate(rc)
		if rc, e = c.ReplicationControllers(namespace).Update(rc); e == nil {
			// rc contains the latest controller post update
			return
		}
		updateErr := e
		// Update the controller with the latest resource version, if the update failed we
		// can't trust rc so use oldRc.Name.
		if rc, e = c.ReplicationControllers(namespace).Get(oldRc.Name); e != nil {
			// The Get failed: Value in rc cannot be trusted.
			rc = oldRc
		}
		// Only return the error from update
		return updateErr
	})
	// If the error is non-nil the returned controller cannot be trusted, if it is nil, the returned
	// controller contains the applied update.
	return rc, err
}
Ejemplo n.º 3
0
// reconcileDeployments reconciles existing deployment replica counts which
// could have diverged outside the deployment process (e.g. due to auto or
// manual scaling, or partial deployments). The active deployment is the last
// successful deployment, not necessarily the latest in terms of the config
// version. The active deployment replica count should follow the config, and
// all other deployments should be scaled to zero.
func (c *DeploymentConfigController) reconcileDeployments(existingDeployments []kapi.ReplicationController, config *deployapi.DeploymentConfig) error {
	activeDeployment := deployutil.ActiveDeployment(existingDeployments)

	// Reconcile deployments. The active deployment follows the config, and all
	// other deployments should be scaled to zero.
	var updatedDeployments []kapi.ReplicationController
	for i := range existingDeployments {
		deployment := existingDeployments[i]
		toAppend := deployment

		isActiveDeployment := activeDeployment != nil && deployment.Name == activeDeployment.Name

		oldReplicaCount := deployment.Spec.Replicas
		newReplicaCount := int32(0)
		if isActiveDeployment {
			newReplicaCount = config.Spec.Replicas
		}
		if config.Spec.Test {
			glog.V(4).Infof("Deployment config %q is test and deployment %q will be scaled down", deployutil.LabelForDeploymentConfig(config), deployutil.LabelForDeployment(&deployment))
			newReplicaCount = 0
		}

		// Only update if necessary.
		var copied *kapi.ReplicationController
		if newReplicaCount != oldReplicaCount {
			if err := kclient.RetryOnConflict(kclient.DefaultBackoff, func() error {
				// refresh the replication controller version
				rc, err := c.rcStore.ReplicationControllers(deployment.Namespace).Get(deployment.Name)
				if err != nil {
					return err
				}
				copied, err = deployutil.DeploymentDeepCopy(rc)
				if err != nil {
					glog.V(2).Infof("Deep copy of deployment %q failed: %v", rc.Name, err)
					return err
				}
				copied.Spec.Replicas = newReplicaCount
				copied, err = c.rn.ReplicationControllers(copied.Namespace).Update(copied)
				return err
			}); err != nil {
				c.recorder.Eventf(config, kapi.EventTypeWarning, "ReplicationControllerScaleFailed",
					"Failed to scale replication controler %q from %d to %d: %v", deployment.Name, oldReplicaCount, newReplicaCount, err)
				return err
			}

			c.recorder.Eventf(config, kapi.EventTypeNormal, "ReplicationControllerScaled", "Scaled replication controller %q from %d to %d", copied.Name, oldReplicaCount, newReplicaCount)
			toAppend = *copied
		}

		updatedDeployments = append(updatedDeployments, toAppend)
	}

	// As the deployment configuration has changed, we need to make sure to clean
	// up old deployments if we have now reached our deployment history quota
	if err := c.cleanupOldDeployments(updatedDeployments, config); err != nil {
		c.recorder.Eventf(config, kapi.EventTypeWarning, "ReplicationControllerCleanupFailed", "Couldn't clean up replication controllers: %v", err)
	}

	return c.updateStatus(config, updatedDeployments)
}
Ejemplo n.º 4
0
Archivo: test.go Proyecto: o8o/origin
func addE2EServiceAccountsToSCC(c *kclient.Client, namespaces *kapi.NamespaceList, sccName string) {
	err := kclient.RetryOnConflict(kclient.DefaultRetry, func() error {
		scc, err := c.SecurityContextConstraints().Get(sccName)
		if err != nil {
			if apierrs.IsNotFound(err) {
				return nil
			}
			return err
		}

		groups := []string{}
		for _, name := range scc.Groups {
			if !strings.Contains(name, "e2e-") {
				groups = append(groups, name)
			}
		}
		for _, ns := range namespaces.Items {
			if strings.HasPrefix(ns.Name, "e2e-") {
				groups = append(groups, fmt.Sprintf("system:serviceaccounts:%s", ns.Name))
			}
		}
		scc.Groups = groups
		if _, err := c.SecurityContextConstraints().Update(scc); err != nil {
			return err
		}
		return nil
	})
	if err != nil {
		FatalErr(err)
	}
}
Ejemplo n.º 5
0
func (e *TokensController) syncSecret() {
	key, quit := e.syncSecretQueue.Get()
	if quit {
		return
	}
	defer e.syncSecretQueue.Done(key)

	// Track whether or not we should retry this sync
	retry := false
	defer func() {
		e.retryOrForget(e.syncSecretQueue, key, retry)
	}()

	secretInfo, err := parseSecretQueueKey(key)
	if err != nil {
		glog.Error(err)
		return
	}

	secret, err := e.getSecret(secretInfo.namespace, secretInfo.name, secretInfo.uid, false)
	switch {
	case err != nil:
		glog.Error(err)
		retry = true
	case secret == nil:
		// If the service account exists
		if sa, saErr := e.getServiceAccount(secretInfo.namespace, secretInfo.saName, secretInfo.saUID, false); saErr == nil && sa != nil {
			// secret no longer exists, so delete references to this secret from the service account
			if err := client.RetryOnConflict(RemoveTokenBackoff, func() error {
				return e.removeSecretReference(secretInfo.namespace, secretInfo.saName, secretInfo.saUID, secretInfo.name)
			}); err != nil {
				glog.Error(err)
			}
		}
	default:
		// Ensure service account exists
		sa, saErr := e.getServiceAccount(secretInfo.namespace, secretInfo.saName, secretInfo.saUID, true)
		switch {
		case saErr != nil:
			glog.Error(saErr)
			retry = true
		case sa == nil:
			// Delete token
			glog.V(4).Infof("syncSecret(%s/%s), service account does not exist, deleting token", secretInfo.namespace, secretInfo.name)
			if retriable, err := e.deleteToken(secretInfo.namespace, secretInfo.name, secretInfo.uid); err != nil {
				glog.Errorf("error deleting serviceaccount token %s/%s for service account %s: %v", secretInfo.namespace, secretInfo.name, secretInfo.saName, err)
				retry = retriable
			}
		default:
			// Update token if needed
			if retriable, err := e.generateTokenIfNeeded(sa, secret); err != nil {
				glog.Errorf("error populating serviceaccount token %s/%s for service account %s: %v", secretInfo.namespace, secretInfo.name, secretInfo.saName, err)
				retry = retriable
			}
		}
	}
}
Ejemplo n.º 6
0
func TestTriggers_manual(t *testing.T) {
	testutil.DeleteAllEtcdKeys()
	openshift := NewTestDeployOpenshift(t)
	defer openshift.Close()

	config := deploytest.OkDeploymentConfig(0)
	config.Namespace = testutil.Namespace()
	config.Triggers = []deployapi.DeploymentTriggerPolicy{
		{
			Type: deployapi.DeploymentTriggerManual,
		},
	}

	dc, err := openshift.Client.DeploymentConfigs(testutil.Namespace()).Create(config)
	if err != nil {
		t.Fatalf("Couldn't create DeploymentConfig: %v %#v", err, config)
	}

	watch, err := openshift.KubeClient.ReplicationControllers(testutil.Namespace()).Watch(labels.Everything(), fields.Everything(), dc.ResourceVersion)
	if err != nil {
		t.Fatalf("Couldn't subscribe to Deployments: %v", err)
	}
	defer watch.Stop()

	retryErr := kclient.RetryOnConflict(wait.Backoff{Steps: maxUpdateRetries}, func() error {
		config, err := openshift.Client.DeploymentConfigs(testutil.Namespace()).Generate(config.Name)
		if err != nil {
			return err
		}
		if config.LatestVersion != 1 {
			t.Fatalf("Generated deployment should have version 1: %#v", config)
		}
		t.Logf("config(1): %#v", config)
		updatedConfig, err := openshift.Client.DeploymentConfigs(testutil.Namespace()).Update(config)
		if err != nil {
			return err
		}
		t.Logf("config(2): %#v", updatedConfig)
		return nil
	})
	if retryErr != nil {
		t.Fatal(err)
	}
	event := <-watch.ResultChan()
	if e, a := watchapi.Added, event.Type; e != a {
		t.Fatalf("expected watch event type %s, got %s", e, a)
	}
	deployment := event.Object.(*kapi.ReplicationController)

	if e, a := config.Name, deployutil.DeploymentConfigNameFor(deployment); e != a {
		t.Fatalf("Expected deployment annotated with deploymentConfig '%s', got '%s'", e, a)
	}
	if e, a := 1, deployutil.DeploymentVersionFor(deployment); e != a {
		t.Fatalf("Deployment annotation version does not match: %#v", deployment)
	}
}
Ejemplo n.º 7
0
// ensureOpenShiftInfraNamespace is called as part of global policy initialization to ensure infra namespace exists
func (c *MasterConfig) ensureOpenShiftInfraNamespace() {
	ns := c.Options.PolicyConfig.OpenShiftInfrastructureNamespace

	// Ensure namespace exists
	namespace, err := c.KubeClient().Namespaces().Create(&kapi.Namespace{ObjectMeta: kapi.ObjectMeta{Name: ns}})
	if kapierror.IsAlreadyExists(err) {
		// Get the persisted namespace
		namespace, err = c.KubeClient().Namespaces().Get(ns)
		if err != nil {
			glog.Errorf("Error getting namespace %s: %v", ns, err)
			return
		}
	} else if err != nil {
		glog.Errorf("Error creating namespace %s: %v", ns, err)
		return
	}

	roleAccessor := policy.NewClusterRoleBindingAccessor(c.ServiceAccountRoleBindingClient())
	for _, saName := range bootstrappolicy.InfraSAs.GetServiceAccounts() {
		_, err := c.KubeClient().ServiceAccounts(ns).Create(&kapi.ServiceAccount{ObjectMeta: kapi.ObjectMeta{Name: saName}})
		if err != nil && !kapierror.IsAlreadyExists(err) {
			glog.Errorf("Error creating service account %s/%s: %v", ns, saName, err)
		}

		role, _ := bootstrappolicy.InfraSAs.RoleFor(saName)

		reconcileRole := &policy.ReconcileClusterRolesOptions{
			RolesToReconcile: []string{role.Name},
			Confirmed:        true,
			Union:            true,
			Out:              ioutil.Discard,
			RoleClient:       c.PrivilegedLoopbackOpenShiftClient.ClusterRoles(),
		}
		if err := reconcileRole.RunReconcileClusterRoles(nil, nil); err != nil {
			glog.Errorf("Could not reconcile %v: %v\n", role.Name, err)
		}

		addRole := &policy.RoleModificationOptions{
			RoleName:            role.Name,
			RoleBindingAccessor: roleAccessor,
			Subjects:            []kapi.ObjectReference{{Namespace: ns, Name: saName, Kind: "ServiceAccount"}},
		}
		if err := kclient.RetryOnConflict(kclient.DefaultRetry, func() error { return addRole.AddRole() }); err != nil {
			glog.Errorf("Could not add %v service accounts to the %v cluster role: %v\n", saName, role.Name, err)
		} else {
			glog.V(2).Infof("Added %v service accounts to the %v cluster role: %v\n", saName, role.Name, err)
		}
	}

	c.ensureNamespaceServiceAccountRoleBindings(namespace)
}
Ejemplo n.º 8
0
// retryOnConflictError retries the specified fn if there was a conflict error
func retryOnConflictError(kubeClient client.Interface, namespace *api.Namespace, fn updateNamespaceFunc) (result *api.Namespace, err error) {
	result = namespace
	err = client.RetryOnConflict(wait.Backoff{Steps: maxRetriesOnConflict}, func() error {
		if result == nil {
			if result, err = kubeClient.Namespaces().Get(namespace.Name); err != nil {
				return err
			}
		}
		if result, err = fn(kubeClient, result); err != nil {
			result = nil
		}
		return err
	})
	return
}
Ejemplo n.º 9
0
// UpdateConfigWithRetries will try to update a deployment config and ignore any update conflicts.
func UpdateConfigWithRetries(dn DeploymentConfigsNamespacer, namespace, name string, applyUpdate updateConfigFunc) (*deployapi.DeploymentConfig, error) {
	var config *deployapi.DeploymentConfig

	resultErr := kclient.RetryOnConflict(kclient.DefaultBackoff, func() error {
		var err error
		config, err = dn.DeploymentConfigs(namespace).Get(name)
		if err != nil {
			return err
		}
		// Apply the update, then attempt to push it to the apiserver.
		applyUpdate(config)
		config, err = dn.DeploymentConfigs(namespace).Update(config)
		return err
	})
	return config, resultErr
}
Ejemplo n.º 10
0
func ensureOAuthClient(client oauthapi.OAuthClient, clientRegistry clientregistry.Registry, preserveExistingRedirects bool) error {
	ctx := kapi.NewContext()
	_, err := clientRegistry.CreateClient(ctx, &client)
	if err == nil || !kerrs.IsAlreadyExists(err) {
		return err
	}

	return unversioned.RetryOnConflict(unversioned.DefaultRetry, func() error {
		existing, err := clientRegistry.GetClient(ctx, client.Name)
		if err != nil {
			return err
		}

		// Ensure the correct challenge setting
		existing.RespondWithChallenges = client.RespondWithChallenges
		// Preserve an existing client secret
		if len(existing.Secret) == 0 {
			existing.Secret = client.Secret
		}

		// Preserve redirects for clients other than the CLI client
		// The CLI client doesn't care about the redirect URL, just the token or error fragment
		if preserveExistingRedirects {
			// Add in any redirects from the existing one
			// This preserves any additional customized redirects in the default clients
			redirects := sets.NewString(client.RedirectURIs...)
			for _, redirect := range existing.RedirectURIs {
				if !redirects.Has(redirect) {
					client.RedirectURIs = append(client.RedirectURIs, redirect)
					redirects.Insert(redirect)
				}
			}
		}
		existing.RedirectURIs = client.RedirectURIs

		// If the GrantMethod is present, keep it for compatibility
		// If it is empty, assign the requested strategy.
		if len(existing.GrantMethod) == 0 {
			existing.GrantMethod = client.GrantMethod
		}

		_, err = clientRegistry.UpdateClient(ctx, existing)
		return err
	})
}
Ejemplo n.º 11
0
func (m *VirtualStorage) createRoleBinding(ctx kapi.Context, obj runtime.Object, allowEscalation bool) (*authorizationapi.RoleBinding, error) {
	// Copy object before passing to BeforeCreate, since it mutates
	objCopy, err := kapi.Scheme.DeepCopy(obj)
	if err != nil {
		return nil, err
	}
	obj = objCopy.(runtime.Object)

	if err := rest.BeforeCreate(m.CreateStrategy, ctx, obj); err != nil {
		return nil, err
	}

	roleBinding := obj.(*authorizationapi.RoleBinding)

	if !allowEscalation {
		if err := m.confirmNoEscalation(ctx, roleBinding); err != nil {
			return nil, err
		}
	}

	// Retry if we hit a conflict on the underlying PolicyBinding object
	if err := kclient.RetryOnConflict(kclient.DefaultRetry, func() error {
		policyBinding, err := m.getPolicyBindingForPolicy(ctx, roleBinding.RoleRef.Namespace, allowEscalation)
		if err != nil {
			return err
		}

		_, exists := policyBinding.RoleBindings[roleBinding.Name]
		if exists {
			return kapierrors.NewAlreadyExists(authorizationapi.Resource("rolebinding"), roleBinding.Name)
		}

		roleBinding.ResourceVersion = policyBinding.ResourceVersion
		policyBinding.RoleBindings[roleBinding.Name] = roleBinding
		policyBinding.LastModified = unversioned.Now()

		return m.BindingRegistry.UpdatePolicyBinding(ctx, policyBinding)
	}); err != nil {
		return nil, err
	}

	return roleBinding, nil
}
Ejemplo n.º 12
0
func addRoleToE2EServiceAccounts(c *client.Client, namespaces []kapi.Namespace, roleName string) {
	err := kclient.RetryOnConflict(kclient.DefaultRetry, func() error {
		for _, ns := range namespaces {
			if strings.HasPrefix(ns.Name, "e2e-") && ns.Status.Phase != kapi.NamespaceTerminating {
				sa := fmt.Sprintf("system:serviceaccount:%s:default", ns.Name)
				addRole := &policy.RoleModificationOptions{
					RoleNamespace:       "",
					RoleName:            roleName,
					RoleBindingAccessor: policy.NewLocalRoleBindingAccessor(ns.Name, c),
					Users:               []string{sa},
				}
				if err := addRole.AddRole(); err != nil {
					e2e.Logf("Warning: Failed to add role to e2e service account: %v", err)
				}
			}
		}
		return nil
	})
	if err != nil {
		FatalErr(err)
	}
}
Ejemplo n.º 13
0
func (m *VirtualStorage) createRole(ctx kapi.Context, obj runtime.Object, allowEscalation bool) (*authorizationapi.Role, error) {
	// Copy object before passing to BeforeCreate, since it mutates
	objCopy, err := kapi.Scheme.DeepCopy(obj)
	if err != nil {
		return nil, err
	}
	obj = objCopy.(runtime.Object)

	if err := rest.BeforeCreate(m.CreateStrategy, ctx, obj); err != nil {
		return nil, err
	}

	role := obj.(*authorizationapi.Role)
	if !allowEscalation {
		if err := rulevalidation.ConfirmNoEscalation(ctx, m.Resource, role.Name, m.RuleResolver, m.CachedRuleResolver, authorizationinterfaces.NewLocalRoleAdapter(role)); err != nil {
			return nil, err
		}
	}

	if err := kclient.RetryOnConflict(kclient.DefaultRetry, func() error {
		policy, err := m.EnsurePolicy(ctx)
		if err != nil {
			return err
		}
		if _, exists := policy.Roles[role.Name]; exists {
			return kapierrors.NewAlreadyExists(m.Resource, role.Name)
		}

		role.ResourceVersion = policy.ResourceVersion
		policy.Roles[role.Name] = role
		policy.LastModified = unversioned.Now()

		return m.PolicyStorage.UpdatePolicy(ctx, policy)
	}); err != nil {
		return nil, err
	}

	return role, nil
}
Ejemplo n.º 14
0
func retryBuildStatusUpdate(build *api.Build, client client.BuildInterface, sourceRev *api.SourceRevision) error {
	return kclient.RetryOnConflict(kclient.DefaultBackoff, func() error {
		// before updating, make sure we are using the latest version of the build
		latestBuild, err := client.Get(build.Name)
		if err != nil {
			// usually this means we failed to get resources due to the missing
			// privilleges
			return err
		}
		if sourceRev != nil {
			latestBuild.Spec.Revision = sourceRev
			latestBuild.ResourceVersion = ""
		}

		latestBuild.Status.Reason = build.Status.Reason
		latestBuild.Status.Message = build.Status.Message

		if _, err := client.UpdateDetails(latestBuild); err != nil {
			return err
		}
		return nil
	})
}
Ejemplo n.º 15
0
// secretDeleted reacts to a Secret being deleted by removing a reference from the corresponding ServiceAccount if needed
func (e *TokensController) secretDeleted(obj interface{}) {
	secret, ok := obj.(*api.Secret)
	if !ok {
		// Unknown type. If we missed a Secret deletion, the corresponding ServiceAccount (if it exists)
		// will get a secret recreated (if needed) during the ServiceAccount re-list
		return
	}

	serviceAccount, err := e.getServiceAccount(secret, false)
	if err != nil {
		glog.Error(err)
		return
	}
	if serviceAccount == nil {
		return
	}

	if err := client.RetryOnConflict(RemoveTokenBackoff, func() error {
		return e.removeSecretReferenceIfNeeded(serviceAccount, secret.Name)
	}); err != nil {
		util.HandleError(err)
	}
}
Ejemplo n.º 16
0
func (c *ImportController) importImageStream(staleImageStream *api.ImageStream) {
	// if we're already in the workingset, that means that some thread is already trying to do an import for this.
	// This does NOT mean that we shouldn't attempt to do this work, only that we shouldn't attempt to do it now.
	if !c.addToWorkingSet(staleImageStream) {
		// If there isn't any other work in the queue, wait for a while so that we don't hot loop.
		// Then requeue to the end of the channel.  That allows other work to continue without delay
		if len(c.work) == 0 {
			time.Sleep(100 * time.Millisecond)
		}
		glog.V(5).Infof("requeuing %s to the worklist", workingSetKey(staleImageStream))
		c.work <- staleImageStream

		return
	}
	defer c.removeFromWorkingSet(staleImageStream)

	err := kclient.RetryOnConflict(kclient.DefaultBackoff, func() error {
		liveImageStream, err := c.streams.ImageStreams(staleImageStream.Namespace).Get(staleImageStream.Name)
		// no work to do here
		if kapierrors.IsNotFound(err) {
			return nil
		}
		if err != nil {
			return err
		}
		if !needsImport(liveImageStream) {
			return nil
		}

		// if we're notified, do work and then start waiting again.
		return c.Next(liveImageStream)
	})

	if err != nil {
		util.HandleError(err)
	}
}
Ejemplo n.º 17
0
func (m *VirtualStorage) Delete(ctx kapi.Context, name string, options *kapi.DeleteOptions) (runtime.Object, error) {
	if err := kclient.RetryOnConflict(kclient.DefaultRetry, func() error {
		owningPolicyBinding, err := m.getPolicyBindingOwningRoleBinding(ctx, name)
		if kapierrors.IsNotFound(err) {
			return kapierrors.NewNotFound(authorizationapi.Resource("rolebinding"), name)
		}
		if err != nil {
			return err
		}

		if _, exists := owningPolicyBinding.RoleBindings[name]; !exists {
			return kapierrors.NewNotFound(authorizationapi.Resource("rolebinding"), name)
		}

		delete(owningPolicyBinding.RoleBindings, name)
		owningPolicyBinding.LastModified = unversioned.Now()

		return m.BindingRegistry.UpdatePolicyBinding(ctx, owningPolicyBinding)
	}); err != nil {
		return nil, err
	}

	return &unversioned.Status{Status: unversioned.StatusSuccess}, nil
}
Ejemplo n.º 18
0
// Delete(ctx api.Context, name string) (runtime.Object, error)
func (m *VirtualStorage) Delete(ctx kapi.Context, name string, options *kapi.DeleteOptions) (runtime.Object, error) {
	if err := kclient.RetryOnConflict(kclient.DefaultRetry, func() error {
		policy, err := m.PolicyStorage.GetPolicy(ctx, authorizationapi.PolicyName)
		if kapierrors.IsNotFound(err) {
			return kapierrors.NewNotFound(m.Resource, name)
		}
		if err != nil {
			return err
		}

		if _, exists := policy.Roles[name]; !exists {
			return kapierrors.NewNotFound(m.Resource, name)
		}

		delete(policy.Roles, name)
		policy.LastModified = unversioned.Now()

		return m.PolicyStorage.UpdatePolicy(ctx, policy)
	}); err != nil {
		return nil, err
	}

	return &unversioned.Status{Status: unversioned.StatusSuccess}, nil
}
Ejemplo n.º 19
0
// updatePodWithRetries retries updating the given pod on conflict with the following steps:
// 1. Get latest resource
// 2. applyUpdate
// 3. Update the resource
func updatePodWithRetries(c client.Interface, namespace string, pod *api.Pod, applyUpdate updatePodFunc) (*api.Pod, error) {
	// Deep copy the pod in case we failed on Get during retry loop
	obj, err := api.Scheme.Copy(pod)
	if err != nil {
		return nil, fmt.Errorf("failed to deep copy pod before updating it: %v", err)
	}
	oldPod := obj.(*api.Pod)
	err = client.RetryOnConflict(client.DefaultBackoff, func() (e error) {
		// Apply the update, then attempt to push it to the apiserver.
		applyUpdate(pod)
		if pod, e = c.Pods(namespace).Update(pod); e == nil {
			return
		}
		updateErr := e
		if pod, e = c.Pods(namespace).Get(oldPod.Name); e != nil {
			pod = oldPod
		}
		// Only return the error from update
		return updateErr
	})
	// If the error is non-nil the returned pod cannot be trusted, if it is nil, the returned
	// controller contains the applied update.
	return pod, err
}
Ejemplo n.º 20
0
// ensureNamespaceServiceAccountRoleBindings initializes roles for service accounts in the namespace
func (c *MasterConfig) ensureNamespaceServiceAccountRoleBindings(namespace *kapi.Namespace) {
	const ServiceAccountRolesInitializedAnnotation = "openshift.io/sa.initialized-roles"

	// Short-circuit if we're already initialized
	if namespace.Annotations[ServiceAccountRolesInitializedAnnotation] == "true" {
		return
	}

	hasErrors := false
	for _, binding := range bootstrappolicy.GetBootstrapServiceAccountProjectRoleBindings(namespace.Name) {
		addRole := &policy.RoleModificationOptions{
			RoleName:            binding.RoleRef.Name,
			RoleNamespace:       binding.RoleRef.Namespace,
			RoleBindingAccessor: policy.NewLocalRoleBindingAccessor(namespace.Name, c.ServiceAccountRoleBindingClient()),
			Subjects:            binding.Subjects,
		}
		if err := kclient.RetryOnConflict(kclient.DefaultRetry, func() error { return addRole.AddRole() }); err != nil {
			glog.Errorf("Could not add service accounts to the %v role in the %q namespace: %v\n", binding.RoleRef.Name, namespace.Name, err)
			hasErrors = true
		}
	}

	// If we had errors, don't register initialization so we can try again
	if hasErrors {
		return
	}

	if namespace.Annotations == nil {
		namespace.Annotations = map[string]string{}
	}
	namespace.Annotations[ServiceAccountRolesInitializedAnnotation] = "true"
	// Log any error other than a conflict (the update will be retried and recorded again on next startup in that case)
	if _, err := c.KubeClient().Namespaces().Update(namespace); err != nil && !kapierror.IsConflict(err) {
		glog.Errorf("Error recording adding service account roles to %q namespace: %v", namespace.Name, err)
	}
}
Ejemplo n.º 21
0
// Handle implements the loop that processes deployment configs. Since this controller started
// using caches, the provided config MUST be deep-copied beforehand (see work() in factory.go).
func (c *DeploymentConfigController) Handle(config *deployapi.DeploymentConfig) error {
	// There's nothing to reconcile until the version is nonzero.
	if config.Status.LatestVersion == 0 {
		return c.updateStatus(config, []kapi.ReplicationController{})
	}

	// Find all deployments owned by the deployment config.
	selector := deployutil.ConfigSelector(config.Name)
	existingDeployments, err := c.rcStore.ReplicationControllers(config.Namespace).List(selector)
	if err != nil {
		return err
	}

	// In case the deployment config has been marked for deletion, merely update its status with
	// the latest available information. Some deletions make take some time to complete so there
	// is value in doing this.
	if config.DeletionTimestamp != nil {
		return c.updateStatus(config, existingDeployments)
	}

	latestIsDeployed, latestDeployment := deployutil.LatestDeploymentInfo(config, existingDeployments)
	// If the latest deployment doesn't exist yet, cancel any running
	// deployments to allow them to be superceded by the new config version.
	awaitingCancellations := false
	if !latestIsDeployed {
		for i := range existingDeployments {
			deployment := existingDeployments[i]
			// Skip deployments with an outcome.
			if deployutil.IsTerminatedDeployment(&deployment) {
				continue
			}
			// Cancel running deployments.
			awaitingCancellations = true
			if !deployutil.IsDeploymentCancelled(&deployment) {

				// Retry faster on conflicts
				var updatedDeployment *kapi.ReplicationController
				if err := kclient.RetryOnConflict(kclient.DefaultBackoff, func() error {
					rc, err := c.rcStore.ReplicationControllers(deployment.Namespace).Get(deployment.Name)
					if kapierrors.IsNotFound(err) {
						return nil
					}
					if err != nil {
						return err
					}
					copied, err := deployutil.DeploymentDeepCopy(rc)
					if err != nil {
						return err
					}
					copied.Annotations[deployapi.DeploymentCancelledAnnotation] = deployapi.DeploymentCancelledAnnotationValue
					copied.Annotations[deployapi.DeploymentStatusReasonAnnotation] = deployapi.DeploymentCancelledNewerDeploymentExists
					updatedDeployment, err = c.rn.ReplicationControllers(copied.Namespace).Update(copied)
					return err
				}); err != nil {
					c.recorder.Eventf(config, kapi.EventTypeWarning, "DeploymentCancellationFailed", "Failed to cancel deployment %q superceded by version %d: %s", deployment.Name, config.Status.LatestVersion, err)
				} else {
					if updatedDeployment != nil {
						// replace the current deployment with the updated copy so that a future update has a chance at working
						existingDeployments[i] = *updatedDeployment
						c.recorder.Eventf(config, kapi.EventTypeNormal, "DeploymentCancelled", "Cancelled deployment %q superceded by version %d", deployment.Name, config.Status.LatestVersion)
					}
				}
			}
		}
	}
	// Wait for deployment cancellations before reconciling or creating a new
	// deployment to avoid competing with existing deployment processes.
	if awaitingCancellations {
		c.recorder.Eventf(config, kapi.EventTypeNormal, "DeploymentAwaitingCancellation", "Deployment of version %d awaiting cancellation of older running deployments", config.Status.LatestVersion)
		return fmt.Errorf("found previous inflight deployment for %s - requeuing", deployutil.LabelForDeploymentConfig(config))
	}
	// If the latest deployment already exists, reconcile existing deployments
	// and return early.
	if latestIsDeployed {
		// If the latest deployment is still running, try again later. We don't
		// want to compete with the deployer.
		if !deployutil.IsTerminatedDeployment(latestDeployment) {
			return c.updateStatus(config, existingDeployments)
		}

		return c.reconcileDeployments(existingDeployments, config)
	}
	// If the config is paused we shouldn't create new deployments for it.
	if config.Spec.Paused {
		// in order for revision history limit cleanup to work for paused
		// deployments, we need to trigger it here
		if err := c.cleanupOldDeployments(existingDeployments, config); err != nil {
			c.recorder.Eventf(config, kapi.EventTypeWarning, "DeploymentCleanupFailed", "Couldn't clean up deployments: %v", err)
		}

		return c.updateStatus(config, existingDeployments)
	}
	// No deployments are running and the latest deployment doesn't exist, so
	// create the new deployment.
	deployment, err := deployutil.MakeDeployment(config, c.codec)
	if err != nil {
		return fatalError(fmt.Sprintf("couldn't make deployment from (potentially invalid) deployment config %s: %v", deployutil.LabelForDeploymentConfig(config), err))
	}
	created, err := c.rn.ReplicationControllers(config.Namespace).Create(deployment)
	if err != nil {
		// If the deployment was already created, just move on. The cache could be
		// stale, or another process could have already handled this update.
		if kapierrors.IsAlreadyExists(err) {
			return c.updateStatus(config, existingDeployments)
		}
		c.recorder.Eventf(config, kapi.EventTypeWarning, "DeploymentCreationFailed", "Couldn't deploy version %d: %s", config.Status.LatestVersion, err)
		return fmt.Errorf("couldn't create deployment for deployment config %s: %v", deployutil.LabelForDeploymentConfig(config), err)
	}
	c.recorder.Eventf(config, kapi.EventTypeNormal, "DeploymentCreated", "Created new deployment %q for version %d", created.Name, config.Status.LatestVersion)

	// As we've just created a new deployment, we need to make sure to clean
	// up old deployments if we have reached our deployment history quota
	existingDeployments = append(existingDeployments, *created)
	if err := c.cleanupOldDeployments(existingDeployments, config); err != nil {
		c.recorder.Eventf(config, kapi.EventTypeWarning, "DeploymentCleanupFailed", "Couldn't clean up deployments: %v", err)
	}

	return c.updateStatus(config, existingDeployments)
}
Ejemplo n.º 22
0
// reconcileDeployments reconciles existing deployment replica counts which
// could have diverged outside the deployment process (e.g. due to auto or
// manual scaling, or partial deployments). The active deployment is the last
// successful deployment, not necessarily the latest in terms of the config
// version. The active deployment replica count should follow the config, and
// all other deployments should be scaled to zero.
//
// Previously, scaling behavior was that the config replica count was used
// only for initial deployments and the active deployment had to be scaled up
// directly. To continue supporting that old behavior we must detect when the
// deployment has been directly manipulated, and if so, preserve the directly
// updated value and sync the config with the deployment.
func (c *DeploymentConfigController) reconcileDeployments(existingDeployments []kapi.ReplicationController, config *deployapi.DeploymentConfig) error {
	latestIsDeployed, latestDeployment := deployutil.LatestDeploymentInfo(config, existingDeployments)
	if !latestIsDeployed {
		// We shouldn't be reconciling if the latest deployment hasn't been
		// created; this is enforced on the calling side, but double checking
		// can't hurt.
		return c.updateStatus(config, existingDeployments)
	}
	activeDeployment := deployutil.ActiveDeployment(existingDeployments)
	// Compute the replica count for the active deployment (even if the active
	// deployment doesn't exist). The active replica count is the value that
	// should be assigned to the config, to allow the replica propagation to
	// flow downward from the config.
	//
	// By default we'll assume the config replicas should be used to update the
	// active deployment except in special cases (like first sync or externally
	// updated deployments.)
	activeReplicas := config.Spec.Replicas
	source := "the deploymentConfig itself (no change)"

	activeDeploymentExists := activeDeployment != nil
	activeDeploymentIsLatest := activeDeploymentExists && activeDeployment.Name == latestDeployment.Name
	latestDesiredReplicas, latestHasDesiredReplicas := deployutil.DeploymentDesiredReplicas(latestDeployment)

	switch {
	case activeDeploymentExists && activeDeploymentIsLatest:
		// The active/latest deployment follows the config unless this is its first
		// sync or if an external change to the deployment replicas is detected.
		lastActiveReplicas, hasLastActiveReplicas := deployutil.DeploymentReplicas(activeDeployment)
		if !hasLastActiveReplicas || lastActiveReplicas != activeDeployment.Spec.Replicas {
			activeReplicas = activeDeployment.Spec.Replicas
			source = fmt.Sprintf("the latest/active deployment %q which was scaled directly or has not previously been synced", deployutil.LabelForDeployment(activeDeployment))
		}
	case activeDeploymentExists && !activeDeploymentIsLatest:
		// The active/non-latest deployment follows the config if it was
		// previously synced; if this is the first sync, infer what the config
		// value should be based on either the latest desired or whatever the
		// deployment is currently scaled to.
		_, hasLastActiveReplicas := deployutil.DeploymentReplicas(activeDeployment)
		if hasLastActiveReplicas {
			break
		}
		if latestHasDesiredReplicas {
			activeReplicas = latestDesiredReplicas
			source = fmt.Sprintf("the desired replicas of latest deployment %q which has not been previously synced", deployutil.LabelForDeployment(latestDeployment))
		} else if activeDeployment.Spec.Replicas > 0 {
			activeReplicas = activeDeployment.Spec.Replicas
			source = fmt.Sprintf("the active deployment %q which has not been previously synced", deployutil.LabelForDeployment(activeDeployment))
		}
	case !activeDeploymentExists && latestHasDesiredReplicas:
		// If there's no active deployment, use the latest desired, if available.
		activeReplicas = latestDesiredReplicas
		source = fmt.Sprintf("the desired replicas of latest deployment %q with no active deployment", deployutil.LabelForDeployment(latestDeployment))
	}

	// Bring the config in sync with the deployment. Once we know the config
	// accurately represents the desired replica count of the active deployment,
	// we can safely reconcile deployments.
	//
	// If the deployment config is test, never update the deployment config based
	// on deployments, since test behavior overrides user scaling.
	switch {
	case config.Spec.Replicas == activeReplicas:
	case config.Spec.Test:
		glog.V(4).Infof("Detected changed replicas for test deploymentConfig %q, ignoring that change", deployutil.LabelForDeploymentConfig(config))
	default:
		copied, err := deployutil.DeploymentConfigDeepCopy(config)
		if err != nil {
			return err
		}
		oldReplicas := copied.Spec.Replicas
		copied.Spec.Replicas = activeReplicas
		config, err = c.dn.DeploymentConfigs(copied.Namespace).Update(copied)
		if err != nil {
			return err
		}
		glog.V(4).Infof("Synced deploymentConfig %q replicas from %d to %d based on %s", deployutil.LabelForDeploymentConfig(config), oldReplicas, activeReplicas, source)
	}

	// Reconcile deployments. The active deployment follows the config, and all
	// other deployments should be scaled to zero.
	var updatedDeployments []kapi.ReplicationController
	for i := range existingDeployments {
		deployment := existingDeployments[i]
		toAppend := deployment

		isActiveDeployment := activeDeployment != nil && deployment.Name == activeDeployment.Name

		oldReplicaCount := deployment.Spec.Replicas
		newReplicaCount := int32(0)
		if isActiveDeployment {
			newReplicaCount = activeReplicas
		}
		if config.Spec.Test {
			glog.V(4).Infof("Deployment config %q is test and deployment %q will be scaled down", deployutil.LabelForDeploymentConfig(config), deployutil.LabelForDeployment(&deployment))
			newReplicaCount = 0
		}
		lastReplicas, hasLastReplicas := deployutil.DeploymentReplicas(&deployment)
		// Only update if necessary.
		var copied *kapi.ReplicationController
		if !hasLastReplicas || newReplicaCount != oldReplicaCount || lastReplicas != newReplicaCount {
			if err := kclient.RetryOnConflict(kclient.DefaultBackoff, func() error {
				// refresh the replication controller version
				rc, err := c.rcStore.ReplicationControllers(deployment.Namespace).Get(deployment.Name)
				if err != nil {
					return err
				}
				copied, err = deployutil.DeploymentDeepCopy(rc)
				if err != nil {
					glog.V(2).Infof("Deep copy of deployment %q failed: %v", rc.Name, err)
					return err
				}
				copied.Spec.Replicas = newReplicaCount
				copied.Annotations[deployapi.DeploymentReplicasAnnotation] = strconv.Itoa(int(newReplicaCount))
				_, err = c.rn.ReplicationControllers(copied.Namespace).Update(copied)
				return err
			}); err != nil {
				c.recorder.Eventf(config, kapi.EventTypeWarning, "DeploymentScaleFailed",
					"Failed to scale deployment %q from %d to %d: %v", deployment.Name, oldReplicaCount, newReplicaCount, err)
				return err
			}

			// Only report scaling events if we changed the replica count.
			if oldReplicaCount != newReplicaCount {
				c.recorder.Eventf(config, kapi.EventTypeNormal, "DeploymentScaled",
					"Scaled deployment %q from %d to %d", copied.Name, oldReplicaCount, newReplicaCount)
			} else {
				glog.V(4).Infof("Updated deployment %q replica annotation to match current replica count %d", deployutil.LabelForDeployment(copied), newReplicaCount)
			}
			toAppend = *copied
		}

		updatedDeployments = append(updatedDeployments, toAppend)
	}

	// As the deployment configuration has changed, we need to make sure to clean
	// up old deployments if we have now reached our deployment history quota
	if err := c.cleanupOldDeployments(existingDeployments, config); err != nil {
		c.recorder.Eventf(config, kapi.EventTypeWarning, "DeploymentCleanupFailed", "Couldn't clean up deployments: %v", err)
	}

	return c.updateStatus(config, updatedDeployments)
}
Ejemplo n.º 23
0
// RunOnce verifies the state of the cluster IP allocations and returns an error if an unrecoverable problem occurs.
func (c *Repair) RunOnce() error {
	return client.RetryOnConflict(client.DefaultBackoff, c.runOnce)
}
Ejemplo n.º 24
0
func TestImageStreamAdmitStatusUpdate(t *testing.T) {
	defer testutil.DumpEtcdOnFailure(t)
	kClient, client := setupImageStreamAdmissionTest(t)
	images := []*imageapi.Image{}

	for _, name := range []string{imagetest.BaseImageWith1LayerDigest, imagetest.BaseImageWith2LayersDigest} {
		imageReference := fmt.Sprintf("openshift/test@%s", name)
		image := &imageapi.Image{
			ObjectMeta: kapi.ObjectMeta{
				Name: name,
			},
			DockerImageReference: imageReference,
		}
		images = append(images, image)

		_, err := client.Images().Create(image)
		if err != nil {
			t.Fatal(err)
		}
	}

	limit := kapi.ResourceList{
		imageapi.ResourceImageStreamTags:   resource.MustParse("0"),
		imageapi.ResourceImageStreamImages: resource.MustParse("0"),
	}
	lrClient := kClient.LimitRanges(testutil.Namespace())
	createLimitRangeOfType(t, lrClient, limitRangeName, imageapi.LimitTypeImageStream, limit)

	t.Logf("trying to create a new image stream with a tag exceeding limit %v", limit)
	_, err := client.ImageStreams(testutil.Namespace()).Create(&imageapi.ImageStream{
		ObjectMeta: kapi.ObjectMeta{
			Name: "is",
		},
	})
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	t.Logf("adding new tag to image stream status exceeding limit %v", limit)
	err = kclient.RetryOnConflict(kclient.DefaultRetry, func() error {
		is, err := client.ImageStreams(testutil.Namespace()).Get("is")
		if err != nil {
			return err
		}
		is.Status.Tags["tag1"] = imageapi.TagEventList{
			Items: []imageapi.TagEvent{
				{
					DockerImageReference: images[0].DockerImageReference,
					Image:                images[0].Name,
				},
			},
		}
		_, err = client.ImageStreams(testutil.Namespace()).UpdateStatus(is)
		return err
	})
	if err == nil {
		t.Fatalf("unexpected non-error")
	}
	if !quotautil.IsErrorQuotaExceeded(err) {
		t.Errorf("expected quota exceeded error, got instead: %v", err)
	}
	if !strings.Contains(err.Error(), string(imageapi.ResourceImageStreamImages)) {
		t.Errorf("expected resource %q in error string: %v", imageapi.ResourceImageStreamImages, err)
	}

	limit = bumpLimit(t, lrClient, limitRangeName, imageapi.ResourceImageStreamImages, "1")

	t.Logf("adding new tag to image stream status below limit %v", limit)
	err = kclient.RetryOnConflict(kclient.DefaultRetry, func() error {
		is, err := client.ImageStreams(testutil.Namespace()).Get("is")
		if err != nil {
			return err
		}
		is.Status.Tags["tag1"] = imageapi.TagEventList{
			Items: []imageapi.TagEvent{
				{
					DockerImageReference: images[0].DockerImageReference,
					Image:                images[0].Name,
				},
			},
		}
		_, err = client.ImageStreams(testutil.Namespace()).UpdateStatus(is)
		return err
	})
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	t.Logf("adding new tag to image stream status exceeding limit %v", limit)
	err = kclient.RetryOnConflict(kclient.DefaultRetry, func() error {
		is, err := client.ImageStreams(testutil.Namespace()).Get("is")
		if err != nil {
			return err
		}
		is.Status.Tags["tag2"] = imageapi.TagEventList{
			Items: []imageapi.TagEvent{
				{
					DockerImageReference: images[1].DockerImageReference,
					Image:                images[1].Name,
				},
			},
		}
		_, err = client.ImageStreams(testutil.Namespace()).UpdateStatus(is)
		return err
	})
	if err == nil {
		t.Fatalf("unexpected non-error")
	}
	if !quotautil.IsErrorQuotaExceeded(err) {
		t.Errorf("expected quota exceeded error, got instead: %v", err)
	}
	if !strings.Contains(err.Error(), string(imageapi.ResourceImageStreamImages)) {
		t.Errorf("expected resource %q in error string: %v", imageapi.ResourceImageStreamImages, err)
	}

	t.Logf("re-tagging the image under different tag")
	err = kclient.RetryOnConflict(kclient.DefaultRetry, func() error {
		is, err := client.ImageStreams(testutil.Namespace()).Get("is")
		if err != nil {
			return err
		}
		is.Status.Tags["1again"] = imageapi.TagEventList{
			Items: []imageapi.TagEvent{
				{
					DockerImageReference: images[0].DockerImageReference,
					Image:                images[0].Name,
				},
			},
		}
		_, err = client.ImageStreams(testutil.Namespace()).UpdateStatus(is)
		return err
	})
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
}
Ejemplo n.º 25
0
func TestImageStreamAdmitSpecUpdate(t *testing.T) {
	defer testutil.DumpEtcdOnFailure(t)
	kClient, client := setupImageStreamAdmissionTest(t)

	for i, name := range []string{imagetest.BaseImageWith1LayerDigest, imagetest.BaseImageWith2LayersDigest} {
		imageReference := fmt.Sprintf("openshift/test@%s", name)
		image := &imageapi.Image{
			ObjectMeta: kapi.ObjectMeta{
				Name: name,
			},
			DockerImageReference: imageReference,
		}
		tag := fmt.Sprintf("tag%d", i+1)

		err := client.ImageStreamMappings(testutil.Namespace()).Create(&imageapi.ImageStreamMapping{
			ObjectMeta: kapi.ObjectMeta{
				Name: "src",
			},
			Tag:   tag,
			Image: *image,
		})
		if err != nil {
			t.Fatal(err)
		}
	}

	limit := kapi.ResourceList{
		imageapi.ResourceImageStreamTags:   resource.MustParse("0"),
		imageapi.ResourceImageStreamImages: resource.MustParse("0"),
	}
	lrClient := kClient.LimitRanges(testutil.Namespace())
	createLimitRangeOfType(t, lrClient, limitRangeName, imageapi.LimitTypeImageStream, limit)

	t.Logf("trying to create a new image stream with a tag exceeding limit %v", limit)
	_, err := client.ImageStreams(testutil.Namespace()).Create(&imageapi.ImageStream{
		ObjectMeta: kapi.ObjectMeta{
			Name: "is",
		},
		Spec: imageapi.ImageStreamSpec{
			Tags: map[string]imageapi.TagReference{
				"tag1": {
					Name: "tag1",
					From: &kapi.ObjectReference{
						Kind: "ImageStreamTag",
						Name: "src:tag1",
					},
				},
			},
		},
	})
	if err == nil {
		t.Fatal("unexpected non-error")
	}
	if !quotautil.IsErrorQuotaExceeded(err) {
		t.Errorf("expected quota exceeded error, got instead: %v", err)
	}
	for _, res := range []kapi.ResourceName{imageapi.ResourceImageStreamTags, imageapi.ResourceImageStreamImages} {
		if !strings.Contains(err.Error(), string(res)) {
			t.Errorf("expected resource %q in error string: %v", res, err)
		}
	}

	limit = bumpLimit(t, lrClient, limitRangeName, imageapi.ResourceImageStreamTags, "1")
	limit = bumpLimit(t, lrClient, limitRangeName, imageapi.ResourceImageStreamImages, "1")

	t.Logf("trying to create a new image stream with a tag below limit %v", limit)
	// we may hit cache with old limit, let's retry in such a case
	err = retryOnQuotaExceeded(t, 1, func() error {
		_, err = client.ImageStreams(testutil.Namespace()).Create(&imageapi.ImageStream{
			ObjectMeta: kapi.ObjectMeta{
				Name: "is",
			},
			Spec: imageapi.ImageStreamSpec{
				Tags: map[string]imageapi.TagReference{
					"tag1": {
						Name: "tag1",
						From: &kapi.ObjectReference{
							Kind: "ImageStreamTag",
							Name: "src:tag1",
						},
					},
				},
			},
		})
		return err
	})
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	t.Logf("adding new tag to image stream spec exceeding limit %v", limit)
	err = kclient.RetryOnConflict(kclient.DefaultRetry, func() error {
		is, err := client.ImageStreams(testutil.Namespace()).Get("is")
		if err != nil {
			return err
		}
		is.Spec.Tags["tag2"] = imageapi.TagReference{
			Name: "tag2",
			From: &kapi.ObjectReference{
				Kind: "ImageStreamTag",
				Name: "src:tag2",
			},
		}
		_, err = client.ImageStreams(testutil.Namespace()).Update(is)
		return err
	})
	if err == nil {
		t.Fatalf("unexpected non-error")
	}
	if !quotautil.IsErrorQuotaExceeded(err) {
		t.Errorf("expected quota exceeded error, got instead: %v", err)
	}
	for _, res := range []kapi.ResourceName{imageapi.ResourceImageStreamTags, imageapi.ResourceImageStreamImages} {
		if !strings.Contains(err.Error(), string(res)) {
			t.Errorf("expected resource %q in error string: %v", res, err)
		}
	}

	t.Logf("re-tagging the image under different tag")
	err = kclient.RetryOnConflict(kclient.DefaultRetry, func() error {
		is, err := client.ImageStreams(testutil.Namespace()).Get("is")
		if err != nil {
			return err
		}
		is.Spec.Tags["1again"] = imageapi.TagReference{
			Name: "1again",
			From: &kapi.ObjectReference{
				Kind: "ImageStreamTag",
				Name: "src:tag1",
			},
		}
		_, err = client.ImageStreams(testutil.Namespace()).Update(is)
		return err
	})
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
}
Ejemplo n.º 26
0
func TestTriggers_manual(t *testing.T) {
	const namespace = "test-triggers-manual"

	_, clusterAdminKubeConfig, err := testserver.StartTestMaster()
	checkErr(t, err)
	clusterAdminClientConfig, err := testutil.GetClusterAdminClientConfig(clusterAdminKubeConfig)
	checkErr(t, err)
	clusterAdminClient, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig)
	checkErr(t, err)
	_, err = testserver.CreateNewProject(clusterAdminClient, *clusterAdminClientConfig, namespace, "my-test-user")
	checkErr(t, err)
	osClient, kubeClient, _, err := testutil.GetClientForUser(*clusterAdminClientConfig, "my-test-user")
	checkErr(t, err)

	config := deploytest.OkDeploymentConfig(0)
	config.Namespace = namespace
	config.Spec.Triggers = []deployapi.DeploymentTriggerPolicy{
		{
			Type: deployapi.DeploymentTriggerManual,
		},
	}

	dc, err := osClient.DeploymentConfigs(namespace).Create(config)
	if err != nil {
		t.Fatalf("Couldn't create DeploymentConfig: %v %#v", err, config)
	}

	rcWatch, err := kubeClient.ReplicationControllers(namespace).Watch(labels.Everything(), fields.Everything(), dc.ResourceVersion)
	if err != nil {
		t.Fatalf("Couldn't subscribe to Deployments: %v", err)
	}
	defer rcWatch.Stop()

	retryErr := kclient.RetryOnConflict(wait.Backoff{Steps: maxUpdateRetries}, func() error {
		config, err := osClient.DeploymentConfigs(namespace).Generate(config.Name)
		if err != nil {
			return err
		}
		if config.Status.LatestVersion != 1 {
			t.Fatalf("Generated deployment should have version 1: %#v", config)
		}
		t.Logf("config(1): %#v", config)
		updatedConfig, err := osClient.DeploymentConfigs(namespace).Update(config)
		if err != nil {
			return err
		}
		t.Logf("config(2): %#v", updatedConfig)
		return nil
	})
	if retryErr != nil {
		t.Fatal(err)
	}
	event := <-rcWatch.ResultChan()
	if e, a := watchapi.Added, event.Type; e != a {
		t.Fatalf("expected watch event type %s, got %s", e, a)
	}
	deployment := event.Object.(*kapi.ReplicationController)

	if e, a := config.Name, deployutil.DeploymentConfigNameFor(deployment); e != a {
		t.Fatalf("Expected deployment annotated with deploymentConfig '%s', got '%s'", e, a)
	}
	if e, a := 1, deployutil.DeploymentVersionFor(deployment); e != a {
		t.Fatalf("Deployment annotation version does not match: %#v", deployment)
	}
}
Ejemplo n.º 27
0
func TestTriggers_configChange(t *testing.T) {
	const namespace = "test-triggers-configchange"

	_, clusterAdminKubeConfig, err := testserver.StartTestMaster()
	checkErr(t, err)
	clusterAdminClientConfig, err := testutil.GetClusterAdminClientConfig(clusterAdminKubeConfig)
	checkErr(t, err)
	clusterAdminClient, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig)
	checkErr(t, err)
	_, err = testserver.CreateNewProject(clusterAdminClient, *clusterAdminClientConfig, namespace, "my-test-user")
	checkErr(t, err)
	osClient, kubeClient, _, err := testutil.GetClientForUser(*clusterAdminClientConfig, "my-test-user")
	checkErr(t, err)

	config := deploytest.OkDeploymentConfig(0)
	config.Namespace = namespace
	config.Spec.Triggers[0] = deploytest.OkConfigChangeTrigger()

	rcWatch, err := kubeClient.ReplicationControllers(namespace).Watch(labels.Everything(), fields.Everything(), "0")
	if err != nil {
		t.Fatalf("Couldn't subscribe to Deployments %v", err)
	}
	defer rcWatch.Stop()

	// submit the initial deployment config
	if _, err := osClient.DeploymentConfigs(namespace).Create(config); err != nil {
		t.Fatalf("Couldn't create DeploymentConfig: %v", err)
	}

	// verify the initial deployment exists
	event := <-rcWatch.ResultChan()
	if e, a := watchapi.Added, event.Type; e != a {
		t.Fatalf("expected watch event type %s, got %s", e, a)
	}

	deployment := event.Object.(*kapi.ReplicationController)

	if e, a := config.Name, deployutil.DeploymentConfigNameFor(deployment); e != a {
		t.Fatalf("Expected deployment annotated with deploymentConfig '%s', got '%s'", e, a)
	}

	assertEnvVarEquals("ENV1", "VAL1", deployment, t)

	retryErr := kclient.RetryOnConflict(wait.Backoff{Steps: maxUpdateRetries}, func() error {
		// submit a new config with an updated environment variable
		config, err := osClient.DeploymentConfigs(namespace).Generate(config.Name)
		if err != nil {
			return err
		}

		config.Spec.Template.Spec.Containers[0].Env[0].Value = "UPDATED"

		// before we update the config, we need to update the state of the existing deployment
		// this is required to be done manually since the deployment and deployer pod controllers are not run in this test
		// get this live or conflicts will never end up resolved
		liveDeployment, err := kubeClient.ReplicationControllers(deployment.Namespace).Get(deployment.Name)
		if err != nil {
			return err
		}
		liveDeployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(deployapi.DeploymentStatusComplete)
		// update the deployment
		if _, err := kubeClient.ReplicationControllers(namespace).Update(liveDeployment); err != nil {
			return err
		}

		event = <-rcWatch.ResultChan()
		if e, a := watchapi.Modified, event.Type; e != a {
			t.Fatalf("expected watch event type %s, got %s", e, a)
		}

		if _, err := osClient.DeploymentConfigs(namespace).Update(config); err != nil {
			return err
		}
		return nil
	})
	if retryErr != nil {
		t.Fatal(retryErr)
	}

	var newDeployment *kapi.ReplicationController
	for {
		event = <-rcWatch.ResultChan()
		if event.Type != watchapi.Added {
			// Discard modifications which could be applied to the original RC, etc.
			continue
		}
		newDeployment = event.Object.(*kapi.ReplicationController)
		break
	}

	assertEnvVarEquals("ENV1", "UPDATED", newDeployment, t)

	if newDeployment.Name == deployment.Name {
		t.Fatalf("expected new deployment; old=%s, new=%s", deployment.Name, newDeployment.Name)
	}
}
Ejemplo n.º 28
0
// Stop deletes the build configuration and all of the associated builds.
func (reaper *BuildConfigReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *kapi.DeleteOptions) error {
	_, err := reaper.oc.BuildConfigs(namespace).Get(name)

	if err != nil {
		return err
	}

	var bcPotentialBuilds []buildapi.Build

	// Collect builds related to the config.
	builds, err := reaper.oc.Builds(namespace).List(kapi.ListOptions{LabelSelector: buildutil.BuildConfigSelector(name)})
	if err != nil {
		return err
	}

	bcPotentialBuilds = append(bcPotentialBuilds, builds.Items...)

	// Collect deprecated builds related to the config.
	// TODO: Delete this block after BuildConfigLabelDeprecated is removed.
	builds, err = reaper.oc.Builds(namespace).List(kapi.ListOptions{LabelSelector: buildutil.BuildConfigSelectorDeprecated(name)})
	if err != nil {
		return err
	}

	bcPotentialBuilds = append(bcPotentialBuilds, builds.Items...)

	// A map of builds associated with this build configuration
	bcBuilds := make(map[ktypes.UID]buildapi.Build)

	// Because of name length limits in the BuildConfigSelector, annotations are used to ensure
	// reliable selection of associated builds.
	for _, build := range bcPotentialBuilds {
		if build.Annotations != nil {
			if bcName, ok := build.Annotations[buildapi.BuildConfigAnnotation]; ok {
				// The annotation, if present, has the full build config name.
				if bcName != name {
					// If the name does not match exactly, the build is not truly associated with the build configuration
					continue
				}
			}
		}
		// Note that if there is no annotation, this is a deprecated build spec
		// and we choose to include it in the deletion having matched only the BuildConfigSelectorDeprecated

		// Use a map to union the lists returned by the contemporary & deprecated build queries
		// (there will be overlap between the lists, and we only want to try to delete each build once)
		bcBuilds[build.UID] = build
	}

	// If there are builds associated with this build configuration, pause it before attempting the deletion
	if len(bcBuilds) > 0 {

		// Add paused annotation to the build config pending the deletion
		err = kclient.RetryOnConflict(kclient.DefaultRetry, func() error {

			bc, err := reaper.oc.BuildConfigs(namespace).Get(name)
			if err != nil {
				return err
			}

			// Ignore if the annotation already exists
			if strings.ToLower(bc.Annotations[buildapi.BuildConfigPausedAnnotation]) == "true" {
				return nil
			}

			// Set the annotation and update
			if err := util.AddObjectAnnotations(bc, map[string]string{buildapi.BuildConfigPausedAnnotation: "true"}); err != nil {
				return err
			}
			_, err = reaper.oc.BuildConfigs(namespace).Update(bc)
			return err
		})

		if err != nil {
			return err
		}

	}

	// Warn the user if the BuildConfig won't get deleted after this point.
	bcDeleted := false
	defer func() {
		if !bcDeleted {
			glog.Warningf("BuildConfig %s/%s will not be deleted because not all associated builds could be deleted. You can try re-running the command or removing them manually", namespace, name)
		}
	}()

	// For the benefit of test cases, sort the UIDs so that the deletion order is deterministic
	buildUIDs := make([]string, 0, len(bcBuilds))
	for buildUID := range bcBuilds {
		buildUIDs = append(buildUIDs, string(buildUID))
	}
	sort.Strings(buildUIDs)

	errList := []error{}
	for _, buildUID := range buildUIDs {
		build := bcBuilds[ktypes.UID(buildUID)]
		if err := reaper.oc.Builds(namespace).Delete(build.Name); err != nil {
			glog.Warningf("Cannot delete Build %s/%s: %v", build.Namespace, build.Name, err)
			if !kerrors.IsNotFound(err) {
				errList = append(errList, err)
			}
		}
	}

	// Aggregate all errors
	if len(errList) > 0 {
		return kutilerrors.NewAggregate(errList)
	}

	if err := reaper.oc.BuildConfigs(namespace).Delete(name); err != nil {
		return err
	}

	bcDeleted = true
	return nil
}
Ejemplo n.º 29
0
// Stop deletes the build configuration and all of the associated builds.
func (reaper *BuildConfigReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *kapi.DeleteOptions) error {
	noBcFound := false
	noBuildFound := true

	// Add deletion pending annotation to the build config
	err := unversioned.RetryOnConflict(unversioned.DefaultRetry, func() error {
		bc, err := reaper.oc.BuildConfigs(namespace).Get(name)
		if kerrors.IsNotFound(err) {
			noBcFound = true
			return nil
		}
		if err != nil {
			return err
		}

		// Ignore if the annotation already exists
		if strings.ToLower(bc.Annotations[buildapi.BuildConfigPausedAnnotation]) == "true" {
			return nil
		}

		// Set the annotation and update
		if err := util.AddObjectAnnotations(bc, map[string]string{buildapi.BuildConfigPausedAnnotation: "true"}); err != nil {
			return err
		}
		_, err = reaper.oc.BuildConfigs(namespace).Update(bc)
		return err
	})
	if err != nil {
		return err
	}

	// Warn the user if the BuildConfig won't get deleted after this point.
	bcDeleted := false
	defer func() {
		if !bcDeleted {
			glog.Warningf("BuildConfig %s/%s will not be deleted because not all associated builds could be deleted. You can try re-running the command or removing them manually", namespace, name)
		}
	}()

	// Collect builds related to the config.
	builds, err := reaper.oc.Builds(namespace).List(kapi.ListOptions{LabelSelector: buildutil.BuildConfigSelector(name)})
	if err != nil {
		return err
	}
	errList := []error{}
	for _, build := range builds.Items {
		noBuildFound = false
		if err := reaper.oc.Builds(namespace).Delete(build.Name); err != nil {
			glog.Warningf("Cannot delete Build %s/%s: %v", build.Namespace, build.Name, err)
			if !kerrors.IsNotFound(err) {
				errList = append(errList, err)
			}
		}
	}

	// Collect deprecated builds related to the config.
	// TODO: Delete this block after BuildConfigLabelDeprecated is removed.
	builds, err = reaper.oc.Builds(namespace).List(kapi.ListOptions{LabelSelector: buildutil.BuildConfigSelectorDeprecated(name)})
	if err != nil {
		return err
	}
	for _, build := range builds.Items {
		noBuildFound = false
		if err := reaper.oc.Builds(namespace).Delete(build.Name); err != nil {
			glog.Warningf("Cannot delete Build %s/%s: %v", build.Namespace, build.Name, err)
			if !kerrors.IsNotFound(err) {
				errList = append(errList, err)
			}
		}
	}

	// Aggregate all errors
	if len(errList) > 0 {
		return kutilerrors.NewAggregate(errList)
	}

	// Finally we can delete the BuildConfig
	if !noBcFound {
		if err := reaper.oc.BuildConfigs(namespace).Delete(name); err != nil {
			return err
		}
	}
	bcDeleted = true

	if noBcFound && noBuildFound {
		return kerrors.NewNotFound("BuildConfig", name)
	}

	return nil
}
Ejemplo n.º 30
0
// Put creates or updates the named manifest.
func (r *repository) Put(ctx context.Context, manifest *manifest.SignedManifest) error {
	// Resolve the payload in the manifest.
	payload, err := manifest.Payload()
	if err != nil {
		return err
	}

	// Calculate digest
	dgst, err := digest.FromBytes(payload)
	if err != nil {
		return err
	}

	// Upload to openshift
	ism := imageapi.ImageStreamMapping{
		ObjectMeta: kapi.ObjectMeta{
			Namespace: r.namespace,
			Name:      r.name,
		},
		Tag: manifest.Tag,
		Image: imageapi.Image{
			ObjectMeta: kapi.ObjectMeta{
				Name: dgst.String(),
				Annotations: map[string]string{
					imageapi.ManagedByOpenShiftAnnotation: "true",
				},
			},
			DockerImageReference: fmt.Sprintf("%s/%s/%s@%s", r.registryAddr, r.namespace, r.name, dgst.String()),
			DockerImageManifest:  string(payload),
		},
	}

	if err := r.registryClient.ImageStreamMappings(r.namespace).Create(&ism); err != nil {
		// if the error was that the image stream wasn't found, try to auto provision it
		statusErr, ok := err.(*kerrors.StatusError)
		if !ok {
			log.Errorf("Error creating ImageStreamMapping: %s", err)
			return err
		}

		status := statusErr.ErrStatus
		if status.Code != http.StatusNotFound || status.Details.Kind != "imageStream" || status.Details.Name != r.name {
			log.Errorf("Error creating ImageStreamMapping: %s", err)
			return err
		}

		stream := imageapi.ImageStream{
			ObjectMeta: kapi.ObjectMeta{
				Name: r.name,
			},
		}

		client, ok := UserClientFrom(ctx)
		if !ok {
			log.Errorf("Error creating user client to auto provision image stream: Origin user client unavailable")
			return statusErr
		}

		if _, err := client.ImageStreams(r.namespace).Create(&stream); err != nil {
			log.Errorf("Error auto provisioning image stream: %s", err)
			return statusErr
		}

		// try to create the ISM again
		if err := unversioned.RetryOnConflict(unversioned.DefaultRetry, func() error {
			return r.registryClient.ImageStreamMappings(r.namespace).Create(&ism)
		}); err != nil {
			log.Errorf("Error creating image stream mapping: %s", err)
			return err
		}
	}

	// Grab each json signature and store them.
	signatures, err := manifest.Signatures()
	if err != nil {
		return err
	}

	for _, signature := range signatures {
		if err := r.Signatures().Put(dgst, signature); err != nil {
			log.Errorf("Error storing signature: %s", err)
			return err
		}
	}

	return nil
}