コード例 #1
2
ファイル: controller.go プロジェクト: thrasher-redhat/origin
func deleteLocalSubnetRoute(device, localSubnetCIDR string) {
	backoff := utilwait.Backoff{
		Duration: 100 * time.Millisecond,
		Factor:   1.25,
		Steps:    6,
	}
	err := utilwait.ExponentialBackoff(backoff, func() (bool, error) {
		itx := ipcmd.NewTransaction(kexec.New(), device)
		routes, err := itx.GetRoutes()
		if err != nil {
			return false, fmt.Errorf("could not get routes: %v", err)
		}
		for _, route := range routes {
			if strings.Contains(route, localSubnetCIDR) {
				itx.DeleteRoute(localSubnetCIDR)
				err = itx.EndTransaction()
				if err != nil {
					return false, fmt.Errorf("could not delete route: %v", err)
				}
				return true, nil
			}
		}
		return false, nil
	})

	if err != nil {
		glog.Errorf("Error removing %s route from dev %s: %v; if the route appears later it will not be deleted.", localSubnetCIDR, device, err)
	}
}
コード例 #2
0
func (p *ProjectOptions) UpdatePodNetwork(nsName string, action sdnapi.PodNetworkAction, args string) error {
	// Get corresponding NetNamespace for given namespace
	netns, err := p.Oclient.NetNamespaces().Get(nsName)
	if err != nil {
		return err
	}

	// Apply pod network change intent
	sdnapi.SetChangePodNetworkAnnotation(netns, action, args)

	// Update NetNamespace object
	_, err = p.Oclient.NetNamespaces().Update(netns)
	if err != nil {
		return err
	}

	// Validate SDN controller applied or rejected the intent
	backoff := wait.Backoff{
		Steps:    15,
		Duration: 500 * time.Millisecond,
		Factor:   1.1,
	}
	return wait.ExponentialBackoff(backoff, func() (bool, error) {
		updatedNetNs, err := p.Oclient.NetNamespaces().Get(netns.NetName)
		if err != nil {
			return false, err
		}

		if _, _, err = sdnapi.GetChangePodNetworkAnnotation(updatedNetNs); err == sdnapi.ErrorPodNetworkAnnotationNotFound {
			return true, nil
		}
		// Pod network change not applied yet
		return false, nil
	})
}
コード例 #3
0
ファイル: cli.go プロジェクト: abhgupta/origin
// SetupProject creates a new project and assign a random user to the project.
// All resources will be then created within this project and Kubernetes E2E
// suite will destroy the project after test case finish.
func (c *CLI) SetupProject(name string, kubeClient *kclient.Client, _ map[string]string) (*kapi.Namespace, error) {
	newNamespace := kapi.SimpleNameGenerator.GenerateName(fmt.Sprintf("extended-test-%s-", name))
	c.SetNamespace(newNamespace).ChangeUser(fmt.Sprintf("%s-user", c.Namespace()))
	e2e.Logf("The user is now %q", c.Username())

	e2e.Logf("Creating project %q", c.Namespace())
	_, err := c.REST().ProjectRequests().Create(&projectapi.ProjectRequest{
		ObjectMeta: kapi.ObjectMeta{Name: c.Namespace()},
	})
	if err != nil {
		e2e.Logf("Failed to create a project and namespace %q: %v", c.Namespace(), err)
		return nil, err
	}
	if err := wait.ExponentialBackoff(kclient.DefaultBackoff, func() (bool, error) {
		if _, err := c.KubeREST().Pods(c.Namespace()).List(kapi.ListOptions{}); err != nil {
			if apierrs.IsForbidden(err) {
				e2e.Logf("Waiting for user to have access to the namespace")
				return false, nil
			}
		}
		return true, nil
	}); err != nil {
		return nil, err
	}
	return &kapi.Namespace{ObjectMeta: kapi.ObjectMeta{Name: c.Namespace()}}, err
}
コード例 #4
0
ファイル: oidc.go プロジェクト: Cloven/minikube
func (r *roundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
	var res *http.Response
	var err error
	firstTime := true
	wait.ExponentialBackoff(backoff, func() (bool, error) {
		if !firstTime {
			var jwt jose.JWT
			jwt, err = r.refresher.Refresh()
			if err != nil {
				return true, nil
			}
			r.wrapped.SetJWT(jwt)
		} else {
			firstTime = false
		}

		res, err = r.wrapped.RoundTrip(req)
		if err != nil {
			return true, nil
		}
		if res.StatusCode == http.StatusUnauthorized {
			return false, nil
		}
		return true, nil
	})
	return res, err
}
コード例 #5
0
ファイル: controller.go プロジェクト: thrasher-redhat/origin
func (plugin *OsdnNode) getLocalSubnet() (string, error) {
	var subnet *osapi.HostSubnet
	backoff := utilwait.Backoff{
		Duration: 100 * time.Millisecond,
		Factor:   2,
		Steps:    8,
	}
	err := utilwait.ExponentialBackoff(backoff, func() (bool, error) {
		var err error
		subnet, err = plugin.osClient.HostSubnets().Get(plugin.hostName)
		if err == nil {
			return true, nil
		} else if kapierrors.IsNotFound(err) {
			glog.Warningf("Could not find an allocated subnet for node: %s, Waiting...", plugin.hostName)
			return false, nil
		} else {
			return false, err
		}
	})
	if err != nil {
		return "", fmt.Errorf("Failed to get subnet for this host: %s, error: %v", plugin.hostName, err)
	}

	if err = plugin.networkInfo.validateNodeIP(subnet.HostIP); err != nil {
		return "", fmt.Errorf("Failed to validate own HostSubnet: %v", err)
	}

	return subnet.Subnet, nil
}
コード例 #6
0
ファイル: setup.go プロジェクト: LalatenduMohanty/origin
func (d *NetworkDiagnostic) waitForNetworkPod(nsName, prefix string, validPhases []kapi.PodPhase) error {
	backoff := wait.Backoff{
		Steps:    30,
		Duration: 500 * time.Millisecond,
		Factor:   1.1,
	}

	return wait.ExponentialBackoff(backoff, func() (bool, error) {
		podList, err := d.getPodList(nsName, prefix)
		if err != nil {
			return false, err
		}

		for _, pod := range podList.Items {
			foundValidPhase := false
			for _, phase := range validPhases {
				if pod.Status.Phase == phase {
					foundValidPhase = true
					break
				}
			}
			if !foundValidPhase {
				return false, nil
			}
		}
		return true, nil
	})
}
コード例 #7
0
// waitForInactiveReplicaSets will wait until all passed replica sets are inactive and have been noticed
// by the replica set controller.
func (dc *DeploymentController) waitForInactiveReplicaSets(oldRSs []*extensions.ReplicaSet) error {
	for i := range oldRSs {
		rs := oldRSs[i]
		desiredGeneration := rs.Generation
		observedGeneration := rs.Status.ObservedGeneration
		specReplicas := rs.Spec.Replicas
		statusReplicas := rs.Status.Replicas

		if err := wait.ExponentialBackoff(retry.DefaultRetry, func() (bool, error) {
			replicaSet, err := dc.rsLister.ReplicaSets(rs.Namespace).Get(rs.Name)
			if err != nil {
				return false, err
			}

			specReplicas = replicaSet.Spec.Replicas
			statusReplicas = replicaSet.Status.Replicas
			observedGeneration = replicaSet.Status.ObservedGeneration

			// TODO: We also need to wait for terminating replicas to actually terminate.
			// See https://github.com/kubernetes/kubernetes/issues/32567
			return observedGeneration >= desiredGeneration && replicaSet.Spec.Replicas == 0 && replicaSet.Status.Replicas == 0, nil
		}); err != nil {
			if err == wait.ErrWaitTimeout {
				err = fmt.Errorf("replica set %q never became inactive: synced=%t, spec.replicas=%d, status.replicas=%d",
					rs.Name, observedGeneration >= desiredGeneration, specReplicas, statusReplicas)
			}
			return err
		}
	}
	return nil
}
コード例 #8
0
ファイル: sdn_test.go プロジェクト: rootfs/origin
func updateNetNamespace(osClient *osclient.Client, netns *sdnapi.NetNamespace, action sdnapi.PodNetworkAction, args string) (*sdnapi.NetNamespace, error) {
	sdnapi.SetChangePodNetworkAnnotation(netns, action, args)
	_, err := osClient.NetNamespaces().Update(netns)
	if err != nil {
		return nil, err
	}

	backoff := utilwait.Backoff{
		Duration: 100 * time.Millisecond,
		Factor:   2,
		Steps:    5,
	}
	name := netns.Name
	err = utilwait.ExponentialBackoff(backoff, func() (bool, error) {
		netns, err = osClient.NetNamespaces().Get(name)
		if err != nil {
			return false, err
		}

		if _, _, err := sdnapi.GetChangePodNetworkAnnotation(netns); err == sdnapi.ErrorPodNetworkAnnotationNotFound {
			return true, nil
		} else {
			return false, nil
		}
	})
	if err != nil {
		return nil, err
	}
	return netns, nil
}
コード例 #9
0
ファイル: sdn_test.go プロジェクト: rootfs/origin
func createProject(osClient *osclient.Client, clientConfig *restclient.Config, name string) (*sdnapi.NetNamespace, error) {
	_, err := testserver.CreateNewProject(osClient, *clientConfig, name, name)
	if err != nil {
		return nil, fmt.Errorf("error creating project %q: %v", name, err)
	}

	backoff := utilwait.Backoff{
		Duration: 100 * time.Millisecond,
		Factor:   2,
		Steps:    5,
	}
	var netns *sdnapi.NetNamespace
	err = utilwait.ExponentialBackoff(backoff, func() (bool, error) {
		netns, err = osClient.NetNamespaces().Get(name)
		if kapierrors.IsNotFound(err) {
			return false, nil
		} else if err != nil {
			return false, err
		}
		return true, nil
	})
	if err != nil {
		return nil, fmt.Errorf("could not get NetNamepsace %q: %v", name, err)
	}
	return netns, nil
}
コード例 #10
0
func retryWithExponentialBackOff(initialDuration time.Duration, fn wait.ConditionFunc) error {
	backoff := wait.Backoff{
		Duration: initialDuration,
		Factor:   3,
		Jitter:   0,
		Steps:    6,
	}
	return wait.ExponentialBackoff(backoff, fn)
}
コード例 #11
0
ファイル: node.go プロジェクト: php-coder/origin
// Detect whether we are upgrading from a pre-CNI openshift and clean up
// interfaces and iptables rules that are no longer required
func (node *OsdnNode) dockerPreCNICleanup() error {
	exec := kexec.New()
	itx := ipcmd.NewTransaction(exec, "lbr0")
	itx.SetLink("down")
	if err := itx.EndTransaction(); err != nil {
		// no cleanup required
		return nil
	}

	node.clearLbr0IptablesRule = true

	// Restart docker to kill old pods and make it use docker0 again.
	// "systemctl restart" will bail out (unnecessarily) in the
	// OpenShift-in-a-container case, so we work around that by sending
	// the messages by hand.
	if _, err := osexec.Command("dbus-send", "--system", "--print-reply", "--reply-timeout=2000", "--type=method_call", "--dest=org.freedesktop.systemd1", "/org/freedesktop/systemd1", "org.freedesktop.systemd1.Manager.Reload").CombinedOutput(); err != nil {
		log.Error(err)
	}
	if _, err := osexec.Command("dbus-send", "--system", "--print-reply", "--reply-timeout=2000", "--type=method_call", "--dest=org.freedesktop.systemd1", "/org/freedesktop/systemd1", "org.freedesktop.systemd1.Manager.RestartUnit", "string:'docker.service' string:'replace'").CombinedOutput(); err != nil {
		log.Error(err)
	}

	// Delete pre-CNI interfaces
	for _, intf := range []string{"lbr0", "vovsbr", "vlinuxbr"} {
		itx := ipcmd.NewTransaction(exec, intf)
		itx.DeleteLink()
		itx.IgnoreError()
		itx.EndTransaction()
	}

	// Wait until docker has restarted since kubelet will exit it docker isn't running
	dockerClient, err := docker.NewClientFromEnv()
	if err != nil {
		return fmt.Errorf("failed to get docker client: %v", err)
	}
	err = kwait.ExponentialBackoff(
		kwait.Backoff{
			Duration: 100 * time.Millisecond,
			Factor:   1.2,
			Steps:    6,
		},
		func() (bool, error) {
			if err := dockerClient.Ping(); err != nil {
				// wait longer
				return false, nil
			}
			return true, nil
		})
	if err != nil {
		return fmt.Errorf("failed to connect to docker after SDN cleanup restart: %v", err)
	}

	log.Infof("Cleaned up left-over openshift-sdn docker bridge and interfaces")

	return nil
}
コード例 #12
0
ファイル: recreate.go プロジェクト: CodeJuan/kubernetes
// waitForInactiveReplicaSets will wait until all passed replica sets are inactive and have been noticed
// by the replica set controller.
func (dc *DeploymentController) waitForInactiveReplicaSets(oldRSs []*extensions.ReplicaSet) error {
	for i := range oldRSs {
		rs := oldRSs[i]

		condition := rsutil.ReplicaSetIsInactive(dc.client.Extensions(), rs)
		if err := wait.ExponentialBackoff(unversionedclient.DefaultRetry, condition); err != nil {
			return err
		}
	}
	return nil
}
コード例 #13
0
ファイル: setup.go プロジェクト: LalatenduMohanty/origin
func (d *NetworkDiagnostic) makeNamespaceGlobal(nsName string) error {
	backoff := wait.Backoff{
		Steps:    30,
		Duration: 500 * time.Millisecond,
		Factor:   1.1,
	}
	var netns *sdnapi.NetNamespace
	err := wait.ExponentialBackoff(backoff, func() (bool, error) {
		var err error
		netns, err = d.OSClient.NetNamespaces().Get(nsName)
		if kerrs.IsNotFound(err) {
			// NetNamespace not created yet
			return false, nil
		} else if err != nil {
			return false, err
		}
		return true, nil
	})
	if err != nil {
		return err
	}

	sdnapi.SetChangePodNetworkAnnotation(netns, sdnapi.GlobalPodNetwork, "")

	if _, err = d.OSClient.NetNamespaces().Update(netns); err != nil {
		return err
	}

	return wait.ExponentialBackoff(backoff, func() (bool, error) {
		updatedNetNs, err := d.OSClient.NetNamespaces().Get(netns.NetName)
		if err != nil {
			return false, err
		}

		if _, _, err = sdnapi.GetChangePodNetworkAnnotation(updatedNetNs); err == sdnapi.ErrorPodNetworkAnnotationNotFound {
			return true, nil
		}
		// Pod network change not applied yet
		return false, nil
	})
}
コード例 #14
0
ファイル: vnids_node.go プロジェクト: juanluisvaladas/origin
// Nodes asynchronously watch for both NetNamespaces and services
// NetNamespaces populates vnid map and services/pod-setup depend on vnid map
// If for some reason, vnid map propagation from master to node is slow
// and if service/pod-setup tries to lookup vnid map then it may fail.
// So, use this method to alleviate this problem. This method will
// retry vnid lookup before giving up.
func (vmap *nodeVNIDMap) WaitAndGetVNID(name string) (uint32, error) {
	var id uint32
	backoff := utilwait.Backoff{
		Duration: 100 * time.Millisecond,
		Factor:   1.5,
		Steps:    5,
	}
	err := utilwait.ExponentialBackoff(backoff, func() (bool, error) {
		var err error
		id, err = vmap.GetVNID(name)
		return err == nil, nil
	})
	if err == nil {
		return id, nil
	} else {
		return 0, fmt.Errorf("Failed to find netid for namespace: %s in vnid map", name)
	}
}
コード例 #15
0
ファイル: util.go プロジェクト: Cloven/minikube
// RetryConflict executes the provided function repeatedly, retrying if the server returns a conflicting
// write. Callers should preserve previous executions if they wish to retry changes. It performs an
// exponential backoff.
//
//     var pod *api.Pod
//     err := RetryOnConflict(DefaultBackoff, func() (err error) {
//       pod, err = c.Pods("mynamespace").UpdateStatus(podStatus)
//       return
//     })
//     if err != nil {
//       // may be conflict if max retries were hit
//       return err
//     }
//     ...
//
// TODO: Make Backoff an interface?
func RetryOnConflict(backoff wait.Backoff, fn func() error) error {
	var lastConflictErr error
	err := wait.ExponentialBackoff(backoff, func() (bool, error) {
		err := fn()
		switch {
		case err == nil:
			return true, nil
		case errors.IsConflict(err):
			lastConflictErr = err
			return false, nil
		default:
			return false, err
		}
	})
	if err == wait.ErrWaitTimeout {
		err = lastConflictErr
	}
	return err
}
コード例 #16
0
ファイル: framework_test.go プロジェクト: ncdc/kubernetes
// waitTest waits until all tests, controllers and other goroutines do their
// job and list of current volumes/claims is equal to list of expected
// volumes/claims (with ~10 second timeout).
func (r *volumeReactor) waitTest(test controllerTest) error {
	// start with 10 ms, multiply by 2 each step, 10 steps = 10.23 seconds
	backoff := wait.Backoff{
		Duration: 10 * time.Millisecond,
		Jitter:   0,
		Factor:   2,
		Steps:    10,
	}
	err := wait.ExponentialBackoff(backoff, func() (done bool, err error) {
		// Finish all operations that are in progress
		r.ctrl.runningOperations.Wait()

		// Return 'true' if the reactor reached the expected state
		err1 := r.checkClaims(test.expectedClaims)
		err2 := r.checkVolumes(test.expectedVolumes)
		if err1 == nil && err2 == nil {
			return true, nil
		}
		return false, nil
	})
	return err
}
コード例 #17
0
ファイル: controller.go プロジェクト: xgwang-zte/origin
func persistService(client kcoreclient.ServicesGetter, service *kapi.Service, targetStatus bool) error {
	backoff := wait.Backoff{
		Steps:    clientRetryCount,
		Duration: clientRetryInterval,
		Factor:   clientRetryFactor,
	}
	return wait.ExponentialBackoff(backoff, func() (bool, error) {
		var err error
		if targetStatus {
			_, err = client.Services(service.Namespace).UpdateStatus(service)
		} else {
			_, err = client.Services(service.Namespace).Update(service)
		}
		switch {
		case err == nil:
			return true, nil
		case kerrors.IsNotFound(err):
			// If the service no longer exists, we don't want to recreate
			// it. Just bail out so that we can process the delete, which
			// we should soon be receiving if we haven't already.
			glog.V(5).Infof("Not persisting update to service '%s/%s' that no longer exists: %v",
				service.Namespace, service.Name, err)
			return true, nil
		case kerrors.IsConflict(err):
			// TODO: Try to resolve the conflict if the change was
			// unrelated to load balancer status. For now, just rely on
			// the fact that we'll also process the update that caused the
			// resource version to change.
			glog.V(5).Infof("Not persisting update to service '%s/%s' that has been changed since we received it: %v",
				service.Namespace, service.Name, err)
			return true, nil
		default:
			err = fmt.Errorf("Failed to persist updated LoadBalancerStatus to service '%s/%s': %v",
				service.Namespace, service.Name, err)
			return false, err
		}
	})
}
コード例 #18
0
ファイル: delegated.go プロジェクト: juanluisvaladas/origin
func (r *REST) waitForRoleBinding(namespace, name string) {
	// we have a rolebinding, the we check the cache we have to see if its been updated with this rolebinding
	// if you share a cache with our authorizer (you should), then this will let you know when the authorizer is ready.
	// doesn't matter if this failed.  When the call returns, return.  If we have access great.  If not, oh well.
	backoff := kclient.DefaultBackoff
	backoff.Steps = 6 // this effectively waits for 6-ish seconds
	err := wait.ExponentialBackoff(backoff, func() (bool, error) {
		policyBindingList, _ := r.policyBindings.PolicyBindings(namespace).List(kapi.ListOptions{})
		for _, policyBinding := range policyBindingList.Items {
			for roleBindingName := range policyBinding.RoleBindings {
				if roleBindingName == name {
					return true, nil
				}
			}
		}

		return false, nil
	})

	if err != nil {
		glog.V(4).Infof("authorization cache failed to update for %v %v: %v", namespace, name, err)
	}
}
コード例 #19
0
ファイル: webhook.go プロジェクト: CodeJuan/kubernetes
// WithExponentialBackoff will retry webhookFn 5 times w/ exponentially
// increasing backoff when a 429 or a 5xx response code is returned.
func (g *GenericWebhook) WithExponentialBackoff(webhookFn func() restclient.Result) restclient.Result {
	backoff := wait.Backoff{
		Duration: g.initialBackoff,
		Factor:   1.5,
		Jitter:   0.2,
		Steps:    5,
	}
	var result restclient.Result
	wait.ExponentialBackoff(backoff, func() (bool, error) {
		result = webhookFn()
		// Return from Request.Do() errors immediately.
		if err := result.Error(); err != nil {
			return false, err
		}
		// Retry 429s, and 5xxs.
		var statusCode int
		if result.StatusCode(&statusCode); statusCode == 429 || statusCode >= 500 {
			return false, nil
		}
		return true, nil
	})
	return result
}
コード例 #20
0
ファイル: webhook.go プロジェクト: ncdc/kubernetes
// WithExponentialBackoff will retry webhookFn() up to 5 times with exponentially increasing backoff when
// it returns an error for which apierrors.SuggestsClientDelay() or apierrors.IsInternalError() returns true.
func WithExponentialBackoff(initialBackoff time.Duration, webhookFn func() error) error {
	backoff := wait.Backoff{
		Duration: initialBackoff,
		Factor:   1.5,
		Jitter:   0.2,
		Steps:    5,
	}

	var err error
	wait.ExponentialBackoff(backoff, func() (bool, error) {
		err = webhookFn()
		if _, shouldRetry := apierrors.SuggestsClientDelay(err); shouldRetry {
			return false, nil
		}
		if apierrors.IsInternalError(err) {
			return false, nil
		}
		if err != nil {
			return false, err
		}
		return true, nil
	})
	return err
}
コード例 #21
0
ファイル: rest.go プロジェクト: RomainVabre/origin
// Create registers a new image (if it doesn't exist) and updates the
// specified ImageStream's tags. If attempts to update the ImageStream fail
// with a resource conflict, the update will be retried if the newer
// ImageStream has no tag diffs from the previous state. If tag diffs are
// detected, the conflict error is returned.
func (s *REST) Create(ctx kapi.Context, obj runtime.Object) (runtime.Object, error) {
	if err := rest.BeforeCreate(Strategy, ctx, obj); err != nil {
		return nil, err
	}

	mapping := obj.(*api.ImageStreamMapping)

	stream, err := s.findStreamForMapping(ctx, mapping)
	if err != nil {
		return nil, err
	}

	image := mapping.Image
	tag := mapping.Tag
	if len(tag) == 0 {
		tag = api.DefaultImageTag
	}

	if err := s.imageRegistry.CreateImage(ctx, &image); err != nil && !errors.IsAlreadyExists(err) {
		return nil, err
	}

	next := api.TagEvent{
		Created:              unversioned.Now(),
		DockerImageReference: image.DockerImageReference,
		Image:                image.Name,
	}

	err = wait.ExponentialBackoff(wait.Backoff{Steps: maxRetriesOnConflict}, func() (bool, error) {
		lastEvent := api.LatestTaggedImage(stream, tag)

		next.Generation = stream.Generation

		if !api.AddTagEventToImageStream(stream, tag, next) {
			// nothing actually changed
			return true, nil
		}
		api.UpdateTrackingTags(stream, tag, next)
		_, err := s.imageStreamRegistry.UpdateImageStreamStatus(ctx, stream)
		if err == nil {
			return true, nil
		}
		if !errors.IsConflict(err) {
			return false, err
		}
		// If the update conflicts, get the latest stream and check for tag
		// updates. If the latest tag hasn't changed, retry.
		latestStream, findLatestErr := s.findStreamForMapping(ctx, mapping)
		if findLatestErr != nil {
			return false, findLatestErr
		}

		// no previous tag
		if lastEvent == nil {
			// The tag hasn't changed, so try again with the updated stream.
			stream = latestStream
			return false, nil
		}

		// check for tag change
		newerEvent := api.LatestTaggedImage(latestStream, tag)
		// generation and creation time differences are ignored
		lastEvent.Generation = newerEvent.Generation
		lastEvent.Created = newerEvent.Created
		if kapi.Semantic.DeepEqual(lastEvent, newerEvent) {
			// The tag hasn't changed, so try again with the updated stream.
			stream = latestStream
			return false, nil
		}

		// The tag changed, so return the conflict error back to the client.
		return false, err
	})
	if err != nil {
		return nil, err
	}
	return &unversioned.Status{Status: unversioned.StatusSuccess}, nil
}
コード例 #22
0
func TestImageStreamTagsAdmission(t *testing.T) {
	defer testutil.DumpEtcdOnFailure(t)
	kClient, client := setupImageStreamAdmissionTest(t)

	for i, name := range []string{imagetest.BaseImageWith1LayerDigest, imagetest.BaseImageWith2LayersDigest, imagetest.MiscImageDigest} {
		imageReference := fmt.Sprintf("openshift/test@%s", name)
		image := &imageapi.Image{
			ObjectMeta: kapi.ObjectMeta{
				Name: name,
			},
			DockerImageReference: imageReference,
		}
		tag := fmt.Sprintf("tag%d", i+1)

		err := client.ImageStreamMappings(testutil.Namespace()).Create(&imageapi.ImageStreamMapping{
			ObjectMeta: kapi.ObjectMeta{
				Name: "src",
			},
			Tag:   tag,
			Image: *image,
		})
		if err != nil {
			t.Fatal(err)
		}
	}

	limit := kapi.ResourceList{imageapi.ResourceImageStreamTags: resource.MustParse("0")}
	lrClient := kClient.LimitRanges(testutil.Namespace())
	createLimitRangeOfType(t, lrClient, limitRangeName, imageapi.LimitTypeImageStream, limit)

	t.Logf("trying to create ImageStreamTag referencing isimage exceeding quota %v", limit)
	ist := &imageapi.ImageStreamTag{
		ObjectMeta: kapi.ObjectMeta{
			Name: "dest:tag1",
		},
		Tag: &imageapi.TagReference{
			Name: "1",
			From: &kapi.ObjectReference{
				Kind: "ImageStreamImage",
				Name: "src@" + imagetest.BaseImageWith1LayerDigest,
			},
		},
	}
	_, err := client.ImageStreamTags(testutil.Namespace()).Update(ist)
	if err == nil {
		t.Fatalf("expected error")
	}
	if !quotautil.IsErrorQuotaExceeded(err) {
		t.Errorf("expected quota exceeded error, got instead: %v", err)
	}

	limit = bumpLimit(t, lrClient, limitRangeName, imageapi.ResourceImageStreamTags, "1")

	t.Logf("trying to create ImageStreamTag referencing isimage below quota %v", limit)
	ist = &imageapi.ImageStreamTag{
		ObjectMeta: kapi.ObjectMeta{
			Name: "dest:tag1",
		},
		Tag: &imageapi.TagReference{
			Name: "1",
			From: &kapi.ObjectReference{
				Kind: "ImageStreamImage",
				Name: "src@" + imagetest.BaseImageWith1LayerDigest,
			},
		},
	}
	// we may hit cache with old limit, let's retry in such a case
	err = wait.ExponentialBackoff(quotaExceededBackoff, func() (bool, error) {
		_, err := client.ImageStreamTags(testutil.Namespace()).Update(ist)
		if err != nil && !quotautil.IsErrorQuotaExceeded(err) {
			return false, err
		}
		return err == nil, nil
	})
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	t.Logf("trying to create ImageStreamTag exceeding quota %v", limit)
	ist = &imageapi.ImageStreamTag{
		ObjectMeta: kapi.ObjectMeta{
			Name: "dest:tag2",
		},
		Tag: &imageapi.TagReference{
			Name: "2",
			From: &kapi.ObjectReference{
				Kind: "ImageStreamImage",
				Name: "src@" + imagetest.BaseImageWith2LayersDigest,
			},
		},
	}
	_, err = client.ImageStreamTags(testutil.Namespace()).Update(ist)
	if err == nil {
		t.Fatalf("expected error")
	}
	if !quotautil.IsErrorQuotaExceeded(err) {
		t.Errorf("expected quota exceeded error, got instead: %v", err)
	}

	t.Log("trying to create ImageStreamTag referencing isimage already referenced")
	ist = &imageapi.ImageStreamTag{
		ObjectMeta: kapi.ObjectMeta{
			Name: "dest:tag1again",
		},
		Tag: &imageapi.TagReference{
			Name: "tag1again",
			From: &kapi.ObjectReference{
				Kind: "ImageStreamImage",
				Name: "src@" + imagetest.BaseImageWith1LayerDigest,
			},
		},
	}
	_, err = client.ImageStreamTags(testutil.Namespace()).Update(ist)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	t.Log("trying to create ImageStreamTag in a new image stream")
	ist = &imageapi.ImageStreamTag{
		ObjectMeta: kapi.ObjectMeta{
			Name: "new:misc",
		},
		Tag: &imageapi.TagReference{
			Name: "misc",
			From: &kapi.ObjectReference{
				Kind: "ImageStreamImage",
				Name: "src@" + imagetest.MiscImageDigest,
			},
		},
	}
	_, err = client.ImageStreamTags(testutil.Namespace()).Update(ist)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	limit = bumpLimit(t, lrClient, limitRangeName, imageapi.ResourceImageStreamTags, "2")

	t.Logf("trying to create ImageStreamTag referencing istag below quota %v", limit)
	ist = &imageapi.ImageStreamTag{
		ObjectMeta: kapi.ObjectMeta{
			Name: "dest:tag2",
		},
		Tag: &imageapi.TagReference{
			Name: "2",
			From: &kapi.ObjectReference{
				Kind: "ImageStreamTag",
				Name: "src:tag2",
			},
		},
	}
	// we may hit cache with old limit, let's retry in such a case
	err = wait.ExponentialBackoff(quotaExceededBackoff, func() (bool, error) {
		_, err := client.ImageStreamTags(testutil.Namespace()).Update(ist)
		if err != nil && !quotautil.IsErrorQuotaExceeded(err) {
			return false, err
		}
		return err == nil, nil
	})
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	t.Logf("trying to create ImageStreamTag referencing istag exceeding quota %v", limit)
	ist = &imageapi.ImageStreamTag{
		ObjectMeta: kapi.ObjectMeta{
			Name: "dest:tag3",
		},
		Tag: &imageapi.TagReference{
			Name: "3",
			From: &kapi.ObjectReference{
				Kind: "ImageStreamTag",
				Name: "src:tag3",
			},
		},
	}
	_, err = client.ImageStreamTags(testutil.Namespace()).Update(ist)
	if err == nil {
		t.Fatal("creating image stream tag should have failed")
	}
	if !quotautil.IsErrorQuotaExceeded(err) {
		t.Fatalf("expected quota exceeded error, not: %v", err)
	}

	t.Log("trying to create ImageStreamTag referencing istag already referenced")
	ist = &imageapi.ImageStreamTag{
		ObjectMeta: kapi.ObjectMeta{
			Name: "dest:tag2again",
		},
		Tag: &imageapi.TagReference{
			Name: "tag2again",
			From: &kapi.ObjectReference{
				Kind: "ImageStreamTag",
				Name: "src:tag2",
			},
		},
	}
	_, err = client.ImageStreamTags(testutil.Namespace()).Update(ist)
	if err != nil {
		t.Fatal(err)
	}
}
コード例 #23
0
func TestImageStreamAdmitSpecUpdate(t *testing.T) {
	defer testutil.DumpEtcdOnFailure(t)
	kClient, client := setupImageStreamAdmissionTest(t)

	for i, name := range []string{imagetest.BaseImageWith1LayerDigest, imagetest.BaseImageWith2LayersDigest} {
		imageReference := fmt.Sprintf("openshift/test@%s", name)
		image := &imageapi.Image{
			ObjectMeta: kapi.ObjectMeta{
				Name: name,
			},
			DockerImageReference: imageReference,
		}
		tag := fmt.Sprintf("tag%d", i+1)

		err := client.ImageStreamMappings(testutil.Namespace()).Create(&imageapi.ImageStreamMapping{
			ObjectMeta: kapi.ObjectMeta{
				Name: "src",
			},
			Tag:   tag,
			Image: *image,
		})
		if err != nil {
			t.Fatal(err)
		}
	}

	limit := kapi.ResourceList{
		imageapi.ResourceImageStreamTags:   resource.MustParse("0"),
		imageapi.ResourceImageStreamImages: resource.MustParse("0"),
	}
	lrClient := kClient.LimitRanges(testutil.Namespace())
	createLimitRangeOfType(t, lrClient, limitRangeName, imageapi.LimitTypeImageStream, limit)

	t.Logf("trying to create a new image stream with a tag exceeding limit %v", limit)
	_, err := client.ImageStreams(testutil.Namespace()).Create(
		newImageStreamWithSpecTags("is", map[string]kapi.ObjectReference{
			"tag1": {Kind: "ImageStreamTag", Name: "src:tag1"},
		}))

	if err == nil {
		t.Fatal("unexpected non-error")
	}
	if !quotautil.IsErrorQuotaExceeded(err) {
		t.Errorf("expected quota exceeded error, got instead: %#+v", err)
	}
	for _, res := range []kapi.ResourceName{imageapi.ResourceImageStreamTags, imageapi.ResourceImageStreamImages} {
		if !strings.Contains(err.Error(), string(res)) {
			t.Errorf("expected resource %q in error string: %v", res, err)
		}
	}

	limit = bumpLimit(t, lrClient, limitRangeName, imageapi.ResourceImageStreamTags, "1")
	limit = bumpLimit(t, lrClient, limitRangeName, imageapi.ResourceImageStreamImages, "1")

	t.Logf("trying to create a new image stream with a tag below limit %v", limit)
	// we may hit cache with old limit, let's retry in such a case
	err = wait.ExponentialBackoff(quotaExceededBackoff, func() (bool, error) {
		_, err = client.ImageStreams(testutil.Namespace()).Create(
			newImageStreamWithSpecTags("is", map[string]kapi.ObjectReference{"tag1": {
				Kind: "ImageStreamTag",
				Name: "src:tag1",
			}}))
		if err != nil && !quotautil.IsErrorQuotaExceeded(err) {
			return false, err
		}
		return err == nil, nil
	})
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	t.Logf("adding new tag to image stream spec exceeding limit %v", limit)
	is, err := client.ImageStreams(testutil.Namespace()).Get("is")
	if err != nil {
		t.Fatal(err)
	}
	is.Spec.Tags["tag2"] = imageapi.TagReference{
		Name: "tag2",
		From: &kapi.ObjectReference{
			Kind: "ImageStreamTag",
			Name: "src:tag2",
		},
	}
	_, err = client.ImageStreams(testutil.Namespace()).Update(is)
	if err == nil {
		t.Fatalf("unexpected non-error")
	}
	if !quotautil.IsErrorQuotaExceeded(err) {
		t.Errorf("expected quota exceeded error, got instead: %v", err)
	}
	for _, res := range []kapi.ResourceName{imageapi.ResourceImageStreamTags, imageapi.ResourceImageStreamImages} {
		if !strings.Contains(err.Error(), string(res)) {
			t.Errorf("expected resource %q in error string: %v", res, err)
		}
	}

	t.Logf("re-tagging the image under different tag")
	is, err = client.ImageStreams(testutil.Namespace()).Get("is")
	if err != nil {
		t.Fatal(err)
	}
	is.Spec.Tags["1again"] = imageapi.TagReference{
		Name: "1again",
		From: &kapi.ObjectReference{
			Kind: "ImageStreamTag",
			Name: "src:tag1",
		},
	}
	_, err = client.ImageStreams(testutil.Namespace()).Update(is)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
}