Esempio n. 1
0
// finishRequest makes a given resultFunc asynchronous and handles errors returned by the response.
// Any api.Status object returned is considered an "error", which interrupts the normal response flow.
func finishRequest(timeout time.Duration, fn resultFunc) (result runtime.Object, err error) {
	// these channels need to be buffered to prevent the goroutine below from hanging indefinitely
	// when the select statement reads something other than the one the goroutine sends on.
	ch := make(chan runtime.Object, 1)
	errCh := make(chan error, 1)
	panicCh := make(chan interface{}, 1)
	go func() {
		// panics don't cross goroutine boundaries, so we have to handle ourselves
		defer util.HandleCrash(func(panicReason interface{}) {
			// Propagate to parent goroutine
			panicCh <- panicReason
		})

		if result, err := fn(); err != nil {
			errCh <- err
		} else {
			ch <- result
		}
	}()

	select {
	case result = <-ch:
		if status, ok := result.(*unversioned.Status); ok {
			return nil, errors.FromObject(status)
		}
		return result, nil
	case err = <-errCh:
		return nil, err
	case p := <-panicCh:
		panic(p)
	case <-time.After(timeout):
		return nil, errors.NewTimeoutError("request did not complete within allowed duration", 0)
	}
}
Esempio n. 2
0
// Get returns a streamer resource with the contents of the build log
func (r *REST) Get(ctx kapi.Context, name string, opts runtime.Object) (runtime.Object, error) {
	buildLogOpts, ok := opts.(*api.BuildLogOptions)
	if !ok {
		return nil, errors.NewBadRequest("did not get an expected options.")
	}
	obj, err := r.Getter.Get(ctx, name)
	if err != nil {
		return nil, err
	}
	build := obj.(*api.Build)
	switch build.Status.Phase {
	// Build has not launched, wait til it runs
	case api.BuildPhaseNew, api.BuildPhasePending:
		if buildLogOpts.NoWait {
			glog.V(4).Infof("Build %s/%s is in %s state. No logs to retrieve yet.", build.Namespace, name, build.Status.Phase)
			// return empty content if not waiting for build
			return &genericrest.LocationStreamer{}, nil
		}
		glog.V(4).Infof("Build %s/%s is in %s state, waiting for Build to start", build.Namespace, name, build.Status.Phase)
		latest, ok, err := registry.WaitForRunningBuild(r.Watcher, ctx, build, r.Timeout)
		if err != nil {
			return nil, errors.NewBadRequest(fmt.Sprintf("unable to wait for build %s to run: %v", name, err))
		}
		switch latest.Status.Phase {
		case api.BuildPhaseError:
			return nil, errors.NewBadRequest(fmt.Sprintf("build %s encountered an error: %s", name, buildutil.NoBuildLogsMessage))
		case api.BuildPhaseCancelled:
			return nil, errors.NewBadRequest(fmt.Sprintf("build %s was cancelled: %s", name, buildutil.NoBuildLogsMessage))
		}
		if !ok {
			return nil, errors.NewTimeoutError(fmt.Sprintf("timed out waiting for build %s to start after %s", build.Name, r.Timeout), 1)
		}

	// The build was cancelled
	case api.BuildPhaseCancelled:
		return nil, errors.NewBadRequest(fmt.Sprintf("build %s was cancelled. %s", name, buildutil.NoBuildLogsMessage))

	// An error occurred launching the build, return an error
	case api.BuildPhaseError:
		return nil, errors.NewBadRequest(fmt.Sprintf("build %s is in an error state. %s", name, buildutil.NoBuildLogsMessage))
	}
	// The container should be the default build container, so setting it to blank
	buildPodName := buildutil.GetBuildPodName(build)
	logOpts := &kapi.PodLogOptions{
		Follow: buildLogOpts.Follow,
	}
	location, transport, err := pod.LogLocation(r.PodGetter, r.ConnectionInfo, ctx, buildPodName, logOpts)
	if err != nil {
		if errors.IsNotFound(err) {
			return nil, errors.NewNotFound("pod", buildPodName)
		}
		return nil, errors.NewBadRequest(err.Error())
	}
	return &genericrest.LocationStreamer{
		Location:    location,
		Transport:   transport,
		ContentType: "text/plain",
		Flush:       buildLogOpts.Follow,
	}, nil
}
Esempio n. 3
0
// finishRequest makes a given resultFunc asynchronous and handles errors returned by the response.
// Any api.Status object returned is considered an "error", which interrupts the normal response flow.
func finishRequest(timeout time.Duration, fn resultFunc) (result runtime.Object, err error) {
	// these channels need to be buffered to prevent the goroutine below from hanging indefinitely
	// when the select statement reads something other than the one the goroutine sends on.
	ch := make(chan runtime.Object, 1)
	errCh := make(chan error, 1)
	go func() {
		if result, err := fn(); err != nil {
			errCh <- err
		} else {
			ch <- result
		}
	}()

	select {
	case result = <-ch:
		if status, ok := result.(*api.Status); ok {
			return nil, errors.FromObject(status)
		}
		return result, nil
	case err = <-errCh:
		return nil, err
	case <-time.After(timeout):
		return nil, errors.NewTimeoutError("request did not complete within allowed duration", 0)
	}
}
Esempio n. 4
0
// waitUntilFreshAndBlock waits until cache is at least as fresh as given <resourceVersion>.
// NOTE: This function acquired lock and doesn't release it.
// You HAVE TO explicitly call w.RUnlock() after this function.
func (w *watchCache) waitUntilFreshAndBlock(resourceVersion uint64, trace *util.Trace) error {
	startTime := w.clock.Now()
	go func() {
		// Wake us up when the time limit has expired.  The docs
		// promise that time.After (well, NewTimer, which it calls)
		// will wait *at least* the duration given. Since this go
		// routine starts sometime after we record the start time, and
		// it will wake up the loop below sometime after the broadcast,
		// we don't need to worry about waking it up before the time
		// has expired accidentally.
		<-w.clock.After(blockTimeout)
		w.cond.Broadcast()
	}()

	w.RLock()
	if trace != nil {
		trace.Step("watchCache locked acquired")
	}
	for w.resourceVersion < resourceVersion {
		if w.clock.Since(startTime) >= blockTimeout {
			// Timeout with retry after 1 second.
			return errors.NewTimeoutError(fmt.Sprintf("Too large resource version: %v, current: %v", resourceVersion, w.resourceVersion), 1)
		}
		w.cond.Wait()
	}
	if trace != nil {
		trace.Step("watchCache fresh enough")
	}
	return nil
}
Esempio n. 5
0
func (r *REST) waitForBuild(ctx kapi.Context, build *api.Build) error {
	fieldSelector := fields.Set{"metadata.name": build.Name}.AsSelector()
	w, err := r.BuildRegistry.WatchBuilds(ctx, labels.Everything(), fieldSelector, build.ResourceVersion)
	if err != nil {
		return err
	}
	defer w.Stop()
	done := make(chan struct{})
	errchan := make(chan error)
	go func(ch <-chan watch.Event) {
		for event := range ch {
			obj, ok := event.Object.(*api.Build)
			if !ok {
				errchan <- fmt.Errorf("event object is not a Build: %#v", event.Object)
				break
			}
			switch obj.Status.Phase {
			case api.BuildPhaseCancelled:
				errchan <- fmt.Errorf("build %s/%s was cancelled. %s", build.Namespace, build.Name, buildutil.NoBuildLogsMessage)
				break
			case api.BuildPhaseError:
				errchan <- fmt.Errorf("build %s/%s is in an error state. %s", build.Namespace, build.Name, buildutil.NoBuildLogsMessage)
				break
			case api.BuildPhaseRunning, api.BuildPhaseComplete, api.BuildPhaseFailed:
				done <- struct{}{}
				break
			}
		}
	}(w.ResultChan())
	select {
	case err := <-errchan:
		return err
	case <-done:
		return nil
	case <-time.After(r.Timeout):
		return errors.NewTimeoutError(fmt.Sprintf("timed out waiting for Build %s/%s", build.Namespace, build.Name), 1)
	}
}
Esempio n. 6
0
File: rest.go Progetto: rrati/origin
func (h *binaryInstantiateHandler) handle(r io.Reader) (runtime.Object, error) {
	h.options.Name = h.name
	if err := rest.BeforeCreate(BinaryStrategy, h.ctx, h.options); err != nil {
		glog.Infof("failed to validate binary: %#v", h.options)
		return nil, err
	}

	request := &buildapi.BuildRequest{}
	request.Name = h.name
	if len(h.options.Commit) > 0 {
		request.Revision = &buildapi.SourceRevision{
			Git: &buildapi.GitSourceRevision{
				Committer: buildapi.SourceControlUser{
					Name:  h.options.CommitterName,
					Email: h.options.CommitterEmail,
				},
				Author: buildapi.SourceControlUser{
					Name:  h.options.AuthorName,
					Email: h.options.AuthorEmail,
				},
				Message: h.options.Message,
				Commit:  h.options.Commit,
			},
		}
	}
	request.Binary = &buildapi.BinaryBuildSource{
		AsFile: h.options.AsFile,
	}
	build, err := h.r.Generator.Instantiate(h.ctx, request)
	if err != nil {
		glog.Infof("failed to instantiate: %#v", request)
		return nil, err
	}

	latest, ok, err := registry.WaitForRunningBuild(h.r.Watcher, h.ctx, build, h.r.Timeout)
	if err != nil {
		switch latest.Status.Phase {
		case buildapi.BuildPhaseError:
			return nil, errors.NewBadRequest(fmt.Sprintf("build %s encountered an error: %s", build.Name, buildutil.NoBuildLogsMessage))
		case buildapi.BuildPhaseCancelled:
			return nil, errors.NewBadRequest(fmt.Sprintf("build %s was cancelled: %s", build.Name, buildutil.NoBuildLogsMessage))
		}
		return nil, errors.NewBadRequest(fmt.Sprintf("unable to wait for build %s to run: %v", build.Name, err))
	}
	if !ok {
		return nil, errors.NewTimeoutError(fmt.Sprintf("timed out waiting for build %s to start after %s", build.Name, h.r.Timeout), 0)
	}
	if latest.Status.Phase != buildapi.BuildPhaseRunning {
		return nil, errors.NewBadRequest(fmt.Sprintf("build %s is no longer running, cannot upload file: %s", build.Name, build.Status.Phase))
	}

	// The container should be the default build container, so setting it to blank
	buildPodName := buildutil.GetBuildPodName(build)
	opts := &kapi.PodAttachOptions{
		Stdin: true,
	}
	location, transport, err := pod.AttachLocation(h.r.PodGetter, h.r.ConnectionInfo, h.ctx, buildPodName, opts)
	if err != nil {
		if errors.IsNotFound(err) {
			return nil, errors.NewNotFound(kapi.Resource("pod"), buildPodName)
		}
		return nil, errors.NewBadRequest(err.Error())
	}
	rawTransport, ok := transport.(*http.Transport)
	if !ok {
		return nil, errors.NewInternalError(fmt.Errorf("unable to connect to node, unrecognized type: %v", reflect.TypeOf(transport)))
	}
	upgrader := spdy.NewRoundTripper(rawTransport.TLSClientConfig)
	exec, err := remotecommand.NewStreamExecutor(upgrader, nil, "POST", location)
	if err != nil {
		return nil, errors.NewInternalError(fmt.Errorf("unable to connect to server: %v", err))
	}
	if err := exec.Stream(r, nil, nil, false); err != nil {
		return nil, errors.NewInternalError(err)
	}
	return latest, nil
}
Esempio n. 7
0
// Get returns a streamer resource with the contents of the deployment log
func (r *REST) Get(ctx kapi.Context, name string, opts runtime.Object) (runtime.Object, error) {
	// Ensure we have a namespace in the context
	namespace, ok := kapi.NamespaceFrom(ctx)
	if !ok {
		return nil, errors.NewBadRequest("namespace parameter required.")
	}

	// Validate DeploymentLogOptions
	deployLogOpts, ok := opts.(*deployapi.DeploymentLogOptions)
	if !ok {
		return nil, errors.NewBadRequest("did not get an expected options.")
	}
	if errs := validation.ValidateDeploymentLogOptions(deployLogOpts); len(errs) > 0 {
		return nil, errors.NewInvalid("deploymentLogOptions", "", errs)
	}

	// Fetch deploymentConfig and check latest version; if 0, there are no deployments
	// for this config
	config, err := r.ConfigGetter.DeploymentConfigs(namespace).Get(name)
	if err != nil {
		return nil, errors.NewNotFound("deploymentConfig", name)
	}
	desiredVersion := config.Status.LatestVersion
	if desiredVersion == 0 {
		return nil, errors.NewBadRequest(fmt.Sprintf("no deployment exists for deploymentConfig %q", config.Name))
	}

	// Support retrieving logs for older deployments
	switch {
	case deployLogOpts.Version == nil:
		// Latest or previous
		if deployLogOpts.Previous {
			desiredVersion--
			if desiredVersion < 1 {
				return nil, errors.NewBadRequest(fmt.Sprintf("no previous deployment exists for deploymentConfig %q", config.Name))
			}
		}
	case *deployLogOpts.Version <= 0 || int(*deployLogOpts.Version) > config.Status.LatestVersion:
		// Invalid version
		return nil, errors.NewBadRequest(fmt.Sprintf("invalid version for deploymentConfig %q: %d", config.Name, *deployLogOpts.Version))
	default:
		desiredVersion = int(*deployLogOpts.Version)
	}

	// Get desired deployment
	targetName := deployutil.DeploymentNameForConfigVersion(config.Name, desiredVersion)
	target, err := r.DeploymentGetter.ReplicationControllers(namespace).Get(targetName)
	if err != nil {
		return nil, err
	}

	// Check for deployment status; if it is new or pending, we will wait for it. If it is complete,
	// the deployment completed successfully and the deployer pod will be deleted so we will return a
	// success message. If it is running or failed, retrieve the log from the deployer pod.
	status := deployutil.DeploymentStatusFor(target)
	switch status {
	case deployapi.DeploymentStatusNew, deployapi.DeploymentStatusPending:
		if deployLogOpts.NoWait {
			glog.V(4).Infof("Deployment %s is in %s state. No logs to retrieve yet.", deployutil.LabelForDeployment(target), status)
			return &genericrest.LocationStreamer{}, nil
		}
		glog.V(4).Infof("Deployment %s is in %s state, waiting for it to start...", deployutil.LabelForDeployment(target), status)

		latest, ok, err := registry.WaitForRunningDeployment(r.DeploymentGetter, target, r.Timeout)
		if err != nil {
			return nil, errors.NewBadRequest(fmt.Sprintf("unable to wait for deployment %s to run: %v", deployutil.LabelForDeployment(target), err))
		}
		if !ok {
			return nil, errors.NewTimeoutError(fmt.Sprintf("timed out waiting for deployment %s to start after %s", deployutil.LabelForDeployment(target), r.Timeout), 1)
		}
		if deployutil.DeploymentStatusFor(latest) == deployapi.DeploymentStatusComplete {
			// Deployer pod has been deleted, no logs to retrieve
			glog.V(4).Infof("Deployment %s was successful so the deployer pod is deleted. No logs to retrieve.", deployutil.LabelForDeployment(target))
			return &genericrest.LocationStreamer{}, nil
		}
	case deployapi.DeploymentStatusComplete:
		// Deployer pod has been deleted, no logs to retrieve
		glog.V(4).Infof("Deployment %s was successful so the deployer pod is deleted. No logs to retrieve.", deployutil.LabelForDeployment(target))
		return &genericrest.LocationStreamer{}, nil
	}

	// Setup url of the deployer pod
	deployPodName := deployutil.DeployerPodNameForDeployment(target.Name)
	logOpts := deployapi.DeploymentToPodLogOptions(deployLogOpts)
	location, transport, err := pod.LogLocation(r.PodGetter, r.ConnectionInfo, ctx, deployPodName, logOpts)
	if err != nil {
		return nil, errors.NewBadRequest(err.Error())
	}

	return &genericrest.LocationStreamer{
		Location:        location,
		Transport:       transport,
		ContentType:     "text/plain",
		Flush:           deployLogOpts.Follow,
		ResponseChecker: genericrest.NewGenericHttpResponseChecker("Pod", deployPodName),
	}, nil
}
Esempio n. 8
0
// Create attempts to create a new eviction.  That is, it tries to evict a pod.
func (r *EvictionREST) Create(ctx api.Context, obj runtime.Object) (runtime.Object, error) {
	eviction := obj.(*policy.Eviction)

	obj, err := r.store.Get(ctx, eviction.Name, &metav1.GetOptions{})
	if err != nil {
		return nil, err
	}
	pod := obj.(*api.Pod)
	var rtStatus *metav1.Status
	var pdbName string
	err = retry.RetryOnConflict(EvictionsRetry, func() error {
		pdbs, err := r.getPodDisruptionBudgets(ctx, pod)
		if err != nil {
			return err
		}

		if len(pdbs) > 1 {
			rtStatus = &metav1.Status{
				Status:  metav1.StatusFailure,
				Message: "This pod has more than one PodDisruptionBudget, which the eviction subresource does not support.",
				Code:    500,
			}
			return nil
		} else if len(pdbs) == 1 {
			pdb := pdbs[0]
			pdbName = pdb.Name
			// Try to verify-and-decrement

			// If it was false already, or if it becomes false during the course of our retries,
			// raise an error marked as a 429.
			ok, err := r.checkAndDecrement(pod.Namespace, pod.Name, pdb)
			if err != nil {
				return err
			}

			if !ok {
				rtStatus = &metav1.Status{
					Status: metav1.StatusFailure,
					// TODO(mml): Include some more details about why the eviction is disallowed.
					// Ideally any such text is generated by the DisruptionController (offline).
					Message: "Cannot evict pod as it would violate the pod's disruption budget.",
					Code:    429,
					// TODO(mml): Add a Retry-After header.  Once there are time-based
					// budgets, we can sometimes compute a sensible suggested value.  But
					// even without that, we can give a suggestion (10 minutes?) that
					// prevents well-behaved clients from hammering us.
				}
			}
		}
		return nil
	})
	if err == wait.ErrWaitTimeout {
		err = errors.NewTimeoutError(fmt.Sprintf("couldn't update PodDisruptionBudget %q due to conflicts", pdbName), 10)
	}
	if err != nil {
		return nil, err
	}

	if rtStatus != nil {
		return rtStatus, nil
	}

	// At this point there was either no PDB or we succeded in decrementing

	// Try the delete
	_, err = r.store.Delete(ctx, eviction.Name, eviction.DeleteOptions)
	if err != nil {
		return nil, err
	}

	// Success!
	return &metav1.Status{Status: metav1.StatusSuccess}, nil
}