Exemple #1
0
func (c *BtrfsCleaningCake) removeQgroup(log lager.Logger, layerPath string) error {
	log = log.Session("remove-qgroup")
	log.Info("start")

	runner := &logging.Runner{c.Runner, log}

	qgroupInfo, err := c.run(runner, exec.Command("btrfs", "qgroup", "show", "-f", layerPath))
	if err != nil {
		return err
	}

	qgroupInfoLines := strings.Split(qgroupInfo, "\n")
	if len(qgroupInfoLines) != 4 {
		return fmt.Errorf("unexpected qgroup show output: %s", qgroupInfo)
	}

	qgroupid := strings.SplitN(qgroupInfoLines[2], " ", 2)[0]
	_, err = c.run(runner, exec.Command("btrfs", "qgroup", "destroy", qgroupid, c.BtrfsMountPoint))

	if err != nil {
		log.Error("failed", err)
	}

	log.Info("destroyed", lager.Data{"qgroupid": qgroupid})
	return nil
}
Exemple #2
0
func (db *serviceClient) CellEvents(logger lager.Logger) <-chan models.CellEvent {
	logger = logger.Session("cell-events")

	disappearanceWatcher, disappeared := locket.NewDisappearanceWatcher(logger, db.consulClient, CellSchemaRoot(), db.clock)
	process := ifrit.Invoke(disappearanceWatcher)

	events := make(chan models.CellEvent)
	go func() {
		for {
			select {
			case keys, ok := <-disappeared:
				if !ok {
					process.Signal(os.Interrupt)
					return
				}

				cellIDs := make([]string, len(keys))
				for i, key := range keys {
					cellIDs[i] = path.Base(key)
				}
				logger.Info("cell-disappeared", lager.Data{"cell_ids": cellIDs})
				events <- models.NewCellDisappearedEvent(cellIDs)
			}
		}
	}()

	return events
}
Exemple #3
0
// Exec a process in a bundle using 'runc exec'
func (r *RunRunc) Exec(log lager.Logger, bundlePath, id string, spec garden.ProcessSpec, io garden.ProcessIO) (garden.Process, error) {
	log = log.Session("exec", lager.Data{"id": id, "path": spec.Path})

	log.Info("started")
	defer log.Info("finished")

	tmpFile, err := ioutil.TempFile("", "guardianprocess")
	if err != nil {
		log.Error("tempfile-failed", err)
		return nil, err
	}

	if err := r.writeProcessJSON(bundlePath, spec, tmpFile); err != nil {
		log.Error("encode-failed", err)
		return nil, fmt.Errorf("writeProcessJSON for container %s: %s", id, err)
	}

	cmd := r.runc.ExecCommand(id, tmpFile.Name())

	process, err := r.tracker.Run(r.pidGenerator.Generate(), cmd, io, spec.TTY)
	if err != nil {
		log.Error("run-failed", err)
		return nil, err
	}

	return process, nil
}
Exemple #4
0
func (b *Bulker) sync(logger lager.Logger) {
	logger = logger.Session("sync")

	logger.Info("starting")
	defer logger.Info("finished")

	startTime := b.clock.Now()

	ops, batchError := b.generator.BatchOperations(logger)

	endTime := b.clock.Now()

	sendError := repBulkSyncDuration.Send(endTime.Sub(startTime))
	if sendError != nil {
		logger.Error("failed-to-send-rep-bulk-sync-duration-metric", sendError)
	}

	if batchError != nil {
		logger.Error("failed-to-generate-operations", batchError)
		return
	}

	for _, operation := range ops {
		b.queue.Push(operation)
	}
}
Exemple #5
0
func (db *ETCDDB) FailActualLRP(logger lager.Logger, key *models.ActualLRPKey, errorMessage string) (*models.ActualLRPGroup, *models.ActualLRPGroup, error) {
	logger = logger.WithData(lager.Data{"actual_lrp_key": key, "error_message": errorMessage})
	logger.Info("starting")
	lrp, prevIndex, err := db.rawActualLRPByProcessGuidAndIndex(logger, key.ProcessGuid, key.Index)
	if err != nil {
		logger.Error("failed-to-get-actual-lrp", err)
		return nil, nil, err
	}
	beforeActualLRP := *lrp

	if lrp.State != models.ActualLRPStateUnclaimed {
		return nil, nil, models.ErrActualLRPCannotBeFailed
	}

	lrp.ModificationTag.Increment()
	lrp.PlacementError = errorMessage
	lrp.Since = db.clock.Now().UnixNano()

	lrpData, serialErr := db.serializeModel(logger, lrp)
	if serialErr != nil {
		return nil, nil, serialErr
	}

	_, err = db.client.CompareAndSwap(ActualLRPSchemaPath(key.ProcessGuid, key.Index), lrpData, 0, prevIndex)
	if err != nil {
		logger.Error("failed", err)
		return nil, nil, models.ErrActualLRPCannotBeFailed
	}

	logger.Info("succeeded")
	return &models.ActualLRPGroup{Instance: &beforeActualLRP}, &models.ActualLRPGroup{Instance: lrp}, nil
}
Exemple #6
0
func newUaaClient(logger lager.Logger, clock clock.Clock, c *config.Config) uaa_client.Client {
	if c.RoutingApi.AuthDisabled {
		logger.Info("using-noop-token-fetcher")
		return uaa_client.NewNoOpUaaClient()
	}

	if c.OAuth.Port == -1 {
		logger.Fatal("tls-not-enabled", errors.New("GoRouter requires TLS enabled to get OAuth token"), lager.Data{"token-endpoint": c.OAuth.TokenEndpoint, "port": c.OAuth.Port})
	}

	tokenURL := fmt.Sprintf("https://%s:%d", c.OAuth.TokenEndpoint, c.OAuth.Port)

	cfg := &uaa_config.Config{
		UaaEndpoint:           tokenURL,
		SkipVerification:      c.OAuth.SkipSSLValidation,
		ClientName:            c.OAuth.ClientName,
		ClientSecret:          c.OAuth.ClientSecret,
		CACerts:               c.OAuth.CACerts,
		MaxNumberOfRetries:    c.TokenFetcherMaxRetries,
		RetryInterval:         c.TokenFetcherRetryInterval,
		ExpirationBufferInSec: c.TokenFetcherExpirationBufferTimeInSeconds,
	}

	uaaClient, err := uaa_client.NewClient(logger, cfg, clock)
	if err != nil {
		logger.Fatal("initialize-token-fetcher-error", err)
	}
	return uaaClient
}
Exemple #7
0
func (db *ETCDDB) batchDeleteNodes(keys []string, logger lager.Logger) {
	if len(keys) == 0 {
		return
	}

	works := []func(){}

	for _, key := range keys {
		key := key
		works = append(works, func() {
			logger.Info("deleting", lager.Data{"key": key})
			_, err := db.client.Delete(key, true)
			if err != nil {
				logger.Error("failed-to-delete", err, lager.Data{
					"key": key,
				})
			}
		})
	}

	throttler, err := workpool.NewThrottler(db.convergenceWorkersSize, works)
	if err != nil {
		logger.Error("failed-to-create-throttler", err)
	}

	throttler.Work()
	return
}
Exemple #8
0
// Acquire uses the given subnet and IP selectors to request a subnet, container IP address combination
// from the pool.
func (p *pool) Acquire(sn SubnetSelector, i IPSelector, logger lager.Logger) (network *linux_backend.Network, err error) {
	p.mu.Lock()
	defer p.mu.Unlock()

	logger = logger.Session("acquire")

	network = &linux_backend.Network{}

	allocatedSubnets := subnets(p.allocated)
	logger.Info("subnet-selecting", lager.Data{"allocated-subnets": subnetsStr(allocatedSubnets)})
	if network.Subnet, err = sn.SelectSubnet(p.dynamicRange, allocatedSubnets); err != nil {
		logger.Error("subnet-selecting-failed", err)
		return nil, err
	}
	logger.Info("subnet-selected", lager.Data{"subnet": network.Subnet.String()})

	ips := p.allocated[network.Subnet.String()]
	logger.Info("ip-selecting", lager.Data{"allocated-ips": ipsStr(ips)})
	allocatedIPs := append(ips, NetworkIP(network.Subnet), GatewayIP(network.Subnet), BroadcastIP(network.Subnet))
	if network.IP, err = i.SelectIP(network.Subnet, allocatedIPs); err != nil {
		logger.Error("ip-selecting-failed", err)
		return nil, err
	}
	logger.Info("ip-selected", lager.Data{"ip": network.IP.String()})

	p.allocated[network.Subnet.String()] = append(ips, network.IP)
	logger.Info("new-allocated", lager.Data{"allocated-ips": ipsStr(p.allocated[network.Subnet.String()])})

	return network, nil
}
Exemple #9
0
func logInfoOrError(logger lager.Logger, msg string, err error) {
	if err == executor.ErrContainerNotFound {
		logger.Info(msg, lager.Data{"error": err.Error()})
	} else {
		logger.Error(msg, err)
	}
}
Exemple #10
0
func (worker *gardenWorker) FindContainerForIdentifier(logger lager.Logger, id Identifier) (Container, bool, error) {
	containerInfo, found, err := worker.provider.FindContainerForIdentifier(id)
	if err != nil {
		return nil, false, err
	}

	if !found {
		return nil, found, nil
	}

	container, found, err := worker.LookupContainer(logger, containerInfo.Handle)
	if err != nil {
		return nil, false, err
	}

	if !found {
		logger.Info("reaping-container-not-found-on-worker", lager.Data{
			"container-handle": containerInfo.Handle,
			"worker-name":      containerInfo.WorkerName,
		})

		err := worker.provider.ReapContainer(containerInfo.Handle)
		if err != nil {
			return nil, false, err
		}

		return nil, false, err
	}

	return container, found, nil
}
Exemple #11
0
func (worker *gardenWorker) LookupContainer(logger lager.Logger, handle string) (Container, bool, error) {
	gardenContainer, err := worker.gardenClient.Lookup(handle)
	if err != nil {
		if _, ok := err.(garden.ContainerNotFoundError); ok {
			logger.Info("container-not-found")
			return nil, false, nil
		}

		logger.Error("failed-to-lookup-on-garden", err)
		return nil, false, err
	}

	container, err := newGardenWorkerContainer(
		logger,
		gardenContainer,
		worker.gardenClient,
		worker.baggageclaimClient,
		worker.db,
		worker.clock,
		worker.volumeFactory,
	)
	if err != nil {
		logger.Error("failed-to-construct-container", err)
		return nil, false, err
	}

	return container, true, nil
}
Exemple #12
0
func (h *TaskAuctionHandler) Create(w http.ResponseWriter, r *http.Request, logger lager.Logger) {
	logger = h.logSession(logger).Session("create")

	payload, err := ioutil.ReadAll(r.Body)
	if err != nil {
		logger.Error("failed-to-read-request-body", err)
		writeInternalErrorJSONResponse(w, err)
		return
	}

	tasks := []*models.Task{}
	err = json.Unmarshal(payload, &tasks)
	if err != nil {
		logger.Error("malformed-json", err)
		writeInvalidJSONResponse(w, err)
		return
	}

	validTasks := make([]*models.Task, 0, len(tasks))
	taskGuids := make([]string, 0, len(tasks))
	for _, t := range tasks {
		if err := t.Validate(); err == nil {
			validTasks = append(validTasks, t)
			taskGuids = append(taskGuids, t.TaskGuid)
		} else {
			logger.Error("task-validate-failed", err, lager.Data{"task": t})
		}
	}

	h.runner.ScheduleTasksForAuctions(validTasks)

	logger.Info("submitted", lager.Data{"tasks": taskGuids})
	writeStatusAcceptedResponse(w)
}
Exemple #13
0
func newVolume(logger lager.Logger, bcVol baggageclaim.Volume, clock clock.Clock, db VolumeFactoryDB) Volume {
	vol := &volume{
		Volume: bcVol,
		db:     db,

		heartbeating: new(sync.WaitGroup),
		release:      make(chan time.Duration, 1),
	}

	ttl, err := vol.db.GetVolumeTTL(vol.Handle())
	if err != nil {
		logger.Info("failed-to-lookup-ttl", lager.Data{"error": err.Error()})
		ttl, _, err = bcVol.Expiration()

		if err != nil {
			logger.Error("failed-to-lookup-expiration-of-volume", err)
			return nil
		}
	}

	vol.heartbeat(logger.Session("initial-heartbeat"), ttl)

	vol.heartbeating.Add(1)
	go vol.heartbeatContinuously(
		logger.Session("continuos-heartbeat"),
		clock.NewTicker(volumeKeepalive),
		ttl,
	)

	return vol
}
Exemple #14
0
func (c *BtrfsCleaningCake) removeSubvols(log lager.Logger, layerPath string) error {
	runner := &logging.Runner{c.Runner, log}

	listSubvolumesOutput, err := c.run(runner, exec.Command("btrfs", "subvolume", "list", c.BtrfsMountPoint))
	if err != nil {
		return err
	}

	subvols := finalColumns(strings.Split(listSubvolumesOutput, "\n"))
	sort.Sort(deepestFirst(subvols))

	for _, subvolume := range subvols {
		subvolumeAbsPath := filepath.Join(c.BtrfsMountPoint, subvolume)

		if strings.Index(subvolumeAbsPath, layerPath) == 0 && subvolumeAbsPath != layerPath {
			log.Info("removing-subvol", lager.Data{"layerPath": layerPath, "subvolumeAbsPath": subvolumeAbsPath})

			c.RemoveAll(subvolumeAbsPath)

			if _, err := c.run(runner, exec.Command("btrfs", "subvolume", "delete", subvolumeAbsPath)); err != nil {
				return err
			}
		}
	}

	return nil
}
func (delegate *delegate) saveImplicitOutput(logger lager.Logger, plan atc.GetPlan, info exec.VersionInfo) {
	if plan.Pipeline == "" {
		return
	}

	metadata := make([]db.MetadataField, len(info.Metadata))
	for i, md := range info.Metadata {
		metadata[i] = db.MetadataField{
			Name:  md.Name,
			Value: md.Value,
		}
	}

	_, err := delegate.db.SaveBuildOutput(delegate.buildID, db.VersionedResource{
		PipelineName: plan.Pipeline,
		Resource:     plan.Resource,
		Type:         plan.Type,
		Version:      db.Version(info.Version),
		Metadata:     metadata,
	}, false)
	if err != nil {
		logger.Error("failed-to-save", err)
		return
	}

	logger.Info("saved", lager.Data{"resource": plan.Resource})
}
Exemple #16
0
func (d *containerDelegate) FetchContainerResultFile(logger lager.Logger, guid string, filename string) (string, error) {
	logger.Info("fetching-container-result")
	stream, err := d.client.GetFiles(logger, guid, filename)
	if err != nil {
		logInfoOrError(logger, "failed-fetching-container-result-stream-from-executor", err)
		return "", err
	}

	defer stream.Close()

	tarReader := tar.NewReader(stream)

	_, err = tarReader.Next()
	if err != nil {
		return "", err
	}

	buf := make([]byte, MAX_RESULT_SIZE+1)
	n, err := tarReader.Read(buf)
	if n > MAX_RESULT_SIZE {
		logInfoOrError(logger, "failed-fetching-container-result-too-large", err)
		return "", ErrResultFileTooLarge
	}

	logger.Info("succeeded-fetching-container-result")
	return string(buf[:n]), nil
}
Exemple #17
0
func (fetcher *CCFetcher) doRequest(
	logger lager.Logger,
	httpClient *http.Client,
	req *http.Request,
	value interface{},
) error {
	req.Header.Set("Content-Type", "application/json")
	req.SetBasicAuth(fetcher.Username, fetcher.Password)

	resp, err := httpClient.Do(req)
	if err != nil {
		return err
	}

	defer resp.Body.Close()

	logger.Info("fetching-desired-complete", lager.Data{
		"StatusCode": resp.StatusCode,
	})

	if resp.StatusCode != http.StatusOK {
		return fmt.Errorf("invalid response code %d", resp.StatusCode)
	}

	err = json.NewDecoder(resp.Body).Decode(value)
	if err != nil {
		logger.Error("decode-body", err)
		return err
	}

	return nil
}
Exemple #18
0
func (db *ETCDDB) FailActualLRP(logger lager.Logger, request *models.FailActualLRPRequest) *models.Error {
	key := request.ActualLrpKey
	errorMessage := request.ErrorMessage

	logger.Info("failing")
	lrp, prevIndex, bbsErr := db.rawActuaLLRPByProcessGuidAndIndex(logger, key.ProcessGuid, key.Index)
	if bbsErr != nil {
		logger.Error("failed-to-get-actual-lrp", bbsErr)
		return bbsErr
	}

	if lrp.State != models.ActualLRPStateUnclaimed {
		return models.ErrActualLRPCannotBeFailed
	}

	lrp.ModificationTag.Increment()
	lrp.PlacementError = errorMessage
	lrp.Since = db.clock.Now().UnixNano()

	lrpRawJSON, err := json.Marshal(lrp)
	if err != nil {
		return models.ErrSerializeJSON
	}

	_, err = db.client.CompareAndSwap(ActualLRPSchemaPath(key.ProcessGuid, key.Index), string(lrpRawJSON), 0, "", prevIndex)
	if err != nil {
		logger.Error("failed", err)
		return models.ErrActualLRPCannotBeFailed
	}

	logger.Info("succeeded")
	return nil
}
Exemple #19
0
func (db *ETCDDB) ConvergeLRPs(logger lager.Logger, cellSet models.CellSet) ([]*auctioneer.LRPStartRequest, []*models.ActualLRPKeyWithSchedulingInfo, []*models.ActualLRPKey) {
	convergeStart := db.clock.Now()
	convergeLRPRunsCounter.Increment()
	logger = logger.Session("etcd")
	logger.Info("starting-convergence")
	defer logger.Info("finished-convergence")

	defer func() {
		err := convergeLRPDuration.Send(time.Since(convergeStart))
		if err != nil {
			logger.Error("failed-sending-converge-lrp-duration-metric", err)
		}
	}()

	logger.Debug("gathering-convergence-input")
	input, err := db.GatherAndPruneLRPs(logger, cellSet)
	if err != nil {
		logger.Error("failed-gathering-convergence-input", err)
		return nil, nil, nil
	}
	logger.Debug("succeeded-gathering-convergence-input")

	changes := CalculateConvergence(logger, db.clock, models.NewDefaultRestartCalculator(), input)

	return db.ResolveConvergence(logger, input.DesiredLRPs, changes)
}
Exemple #20
0
func (p *taskProcessor) completeTask(logger lager.Logger, container executor.Container) {
	var result string
	var err error
	if !container.RunResult.Failed {
		result, err = p.containerDelegate.FetchContainerResultFile(logger, container.Guid, container.Tags[rep.ResultFileTag])
		if err != nil {
			p.failTask(logger, container.Guid, TaskCompletionReasonFailedToFetchResult)
			return
		}
	}

	logger.Info("completing-task")
	err = p.bbsClient.CompleteTask(container.Guid, p.cellID, container.RunResult.Failed, container.RunResult.FailureReason, result)
	if err != nil {
		logger.Error("failed-completing-task", err)

		bbsErr := models.ConvertError(err)
		if bbsErr.Type == models.Error_InvalidStateTransition {
			p.failTask(logger, container.Guid, TaskCompletionReasonInvalidTransition)
		}
		return
	}

	logger.Info("succeeded-completing-task")
}
Exemple #21
0
func (h *LRPAuctionHandler) Create(w http.ResponseWriter, r *http.Request, logger lager.Logger) {
	logger = h.logSession(logger).Session("create")

	payload, err := ioutil.ReadAll(r.Body)
	if err != nil {
		logger.Error("failed-to-read-request-body", err)
		writeInternalErrorJSONResponse(w, err)
		return
	}

	starts := []models.LRPStartRequest{}
	err = json.Unmarshal(payload, &starts)
	if err != nil {
		logger.Error("malformed-json", err)
		writeInvalidJSONResponse(w, err)
		return
	}

	validStarts := make([]models.LRPStartRequest, 0, len(starts))
	lrpGuids := make(map[string][]uint)
	for _, start := range starts {
		if err := start.Validate(); err == nil {
			validStarts = append(validStarts, start)
			indices := lrpGuids[start.DesiredLRP.ProcessGuid]
			indices = append(indices, start.Indices...)
			lrpGuids[start.DesiredLRP.ProcessGuid] = indices
		} else {
			logger.Error("start-validate-failed", err, lager.Data{"lrp-start": start})
		}
	}

	h.runner.ScheduleLRPsForAuctions(validStarts)
	logger.Info("submitted", lager.Data{"lrps": lrpGuids})
	writeStatusAcceptedResponse(w)
}
Exemple #22
0
// RemoveDesiredLRP deletes the DesiredLRPSchedulingInfo and the DesiredLRPRunInfo
// from the database. We delete DesiredLRPSchedulingInfo first because the system
// uses it to determine wheter the lrp is present. In the event that only the
// RunInfo fails to delete, the orphaned DesiredLRPRunInfo will be garbage
// collected later by convergence.
func (db *ETCDDB) RemoveDesiredLRP(logger lager.Logger, processGuid string) error {
	logger = logger.WithData(lager.Data{"process_guid": processGuid})
	logger.Info("starting")
	defer logger.Info("complete")

	_, schedulingInfoErr := db.client.Delete(DesiredLRPSchedulingInfoSchemaPath(processGuid), true)
	schedulingInfoErr = ErrorFromEtcdError(logger, schedulingInfoErr)
	if schedulingInfoErr != nil && schedulingInfoErr != models.ErrResourceNotFound {
		logger.Error("failed-deleting-scheduling-info", schedulingInfoErr)
		return schedulingInfoErr
	}

	_, runInfoErr := db.client.Delete(DesiredLRPRunInfoSchemaPath(processGuid), true)
	runInfoErr = ErrorFromEtcdError(logger, runInfoErr)
	if runInfoErr != nil && runInfoErr != models.ErrResourceNotFound {
		logger.Error("failed-deleting-run-info", runInfoErr)
		return runInfoErr
	}

	if schedulingInfoErr == models.ErrResourceNotFound && runInfoErr == models.ErrResourceNotFound {
		// If neither component of the desired LRP exists, don't bother trying to delete running instances
		return models.ErrResourceNotFound
	}

	return nil
}
Exemple #23
0
func (s *CgroupStarter) mountCgroup(log lager.Logger, cgroupPath, cgroupType string) error {
	log = log.Session("setup-cgroup", lager.Data{
		"path": cgroupPath,
		"type": cgroupType,
	})

	log.Info("started")
	defer log.Info("finished")

	if !s.isMountPoint(cgroupPath) {
		if err := os.MkdirAll(cgroupPath, 0755); err != nil {
			log.Error("mkdir-failed", err)
			return err
		}

		cmd := exec.Command("mount", "-n", "-t", "cgroup", "-o", cgroupType, "cgroup", cgroupPath)
		cmd.Stderr = logging.Writer(log.Session("mount-cgroup-cmd"))
		if err := s.CommandRunner.Run(cmd); err != nil {
			log.Error("mount-cgroup-failed", err)
			return err
		}
	}

	return nil
}
func NewInstanceBackuper(
	backupConfig BackupConfig,
	logger lager.Logger,
	injectors ...BackupInjector,
) (*instanceBackuper, error) {
	backuper := &instanceBackuper{
		redisConfigFinderProvider:          instance.NewRedisConfigFinder,
		redisBackuperProvider:              redisbackup.NewRedisBackuper,
		redisClientProvider:                redis.Connect,
		sharedInstanceIDLocatorProvider:    id.SharedInstanceIDLocator,
		dedicatedInstanceIDLocatorProvider: id.DedicatedInstanceIDLocator,
		timeProvider:                       time.Now,
		logger:                             logger,
		backupConfig:                       backupConfig,
	}

	for _, injector := range injectors {
		injector(backuper)
	}

	logger.Info("init-instance-backuper", lager.Data{"event": "starting"})

	if err := backuper.init(); err != nil {
		logger.Error("init-instance-backuper", err, lager.Data{"event": "failed"})
		return nil, err
	}

	logger.Info("init-instance-backuper", lager.Data{"event": "done"})

	return backuper, nil
}
Exemple #25
0
func (v *volume) heartbeatContinuously(logger lager.Logger, pacemaker clock.Ticker, initialTTL time.Duration) {
	defer v.heartbeating.Done()
	defer pacemaker.Stop()

	logger.Debug("start")
	defer logger.Debug("done")

	ttlToSet := initialTTL

	for {
		select {
		case <-pacemaker.C():
			ttl, found, err := v.db.GetVolumeTTL(v.Handle())
			if err != nil {
				logger.Error("failed-to-lookup-ttl", err)
			} else {
				if !found {
					logger.Info("volume-expired-from-database")
					return
				}

				ttlToSet = ttl
			}

			v.heartbeat(logger.Session("tick"), ttlToSet)

		case finalTTL := <-v.release:
			if finalTTL != nil {
				v.heartbeat(logger.Session("final"), *finalTTL)
			}

			return
		}
	}
}
Exemple #26
0
func (cc *ccClient) StagingComplete(stagingGuid string, completionCallback string, payload []byte, logger lager.Logger) error {
	logger = logger.Session("cc-client")
	logger.Info("delivering-staging-response", lager.Data{"payload": string(payload)})

	request, err := http.NewRequest("POST", cc.stagingCompleteURI(stagingGuid, completionCallback), bytes.NewReader(payload))
	if err != nil {
		return err
	}

	request.SetBasicAuth(cc.username, cc.password)
	request.Header.Set("content-type", "application/json")

	response, err := cc.httpClient.Do(request)
	if err != nil {
		logger.Error("deliver-staging-response-failed", err)
		return err
	}

	defer response.Body.Close()

	if response.StatusCode != http.StatusOK {
		return &BadResponseError{response.StatusCode}
	}

	logger.Info("delivered-staging-response")
	return nil
}
Exemple #27
0
// Create creates a bundle in the depot and starts its init process
func (c *Containerizer) Create(log lager.Logger, spec gardener.DesiredContainerSpec) error {
	log = log.Session("containerizer-create", lager.Data{"handle": spec.Handle})

	log.Info("started")
	defer log.Info("finished")

	if err := c.depot.Create(log, spec.Handle, c.bundler.Bundle(spec)); err != nil {
		log.Error("create-failed", err)
		return err
	}

	path, err := c.depot.Lookup(log, spec.Handle)
	if err != nil {
		log.Error("lookup-failed", err)
		return err
	}

	stdoutR, stdoutW := io.Pipe()
	_, err = c.runner.Start(log, path, spec.Handle, garden.ProcessIO{
		Stdout: io.MultiWriter(logging.Writer(log), stdoutW),
		Stderr: logging.Writer(log),
	})
	if err != nil {
		log.Error("start", err)
		return err
	}

	if err := c.startChecker.Check(log, stdoutR); err != nil {
		log.Error("check", err)
		return err
	}

	return nil
}
func setWindowSize(logger lager.Logger, pseudoTty *os.File, columns, rows uint32) error {
	logger.Info("new-size", lager.Data{"columns": columns, "rows": rows})
	return term.SetWinsize(pseudoTty.Fd(), &term.Winsize{
		Width:  uint16(columns),
		Height: uint16(rows),
	})
}
Exemple #29
0
func (tracker *tracker) Init(logger lager.Logger, metadata Metadata, session Session, typ ResourceType, tags atc.Tags) (Resource, error) {
	logger = logger.Session("init")

	logger.Debug("start")
	defer logger.Debug("done")

	container, found, err := tracker.workerClient.FindContainerForIdentifier(logger, session.ID)
	if err != nil {
		logger.Error("failed-to-look-for-existing-container", err)
		return nil, err
	}

	if found {
		logger.Debug("found-existing-container", lager.Data{"container": container.Handle()})
		return NewResource(container), nil
	}

	logger.Debug("creating-container")

	container, err = tracker.workerClient.CreateContainer(logger, session.ID, worker.ResourceTypeContainerSpec{
		Type:      string(typ),
		Ephemeral: session.Ephemeral,
		Tags:      tags,
		Env:       metadata.Env(),
	})
	if err != nil {
		return nil, err
	}

	logger.Info("created", lager.Data{"container": container.Handle()})

	return NewResource(container), nil
}
func (b *Broker) createUser(username string, settings rabbithole.UserSettings, logger lager.Logger) error {
	logger = logger.Session("create-user")

	userInfo, err := b.client.GetUser(username)
	if err != nil {
		if !strings.Contains(err.Error(), NotFoundIdentifier) {
			logger.Error("get-user-failed", err)
			return err
		}
	}

	// check userInfo
	if userInfo != nil {
		logger.Error("found-existing-user", fmt.Errorf("User already exists"))
		return fmt.Errorf("User already exists")
	}

	resp, err := b.client.PutUser(username, settings)
	if err != nil {
		logger.Error("put-user-failed", err)
		return err
	}
	logger.Info("put-user-succeeded")

	if err := validateResponse(resp); err != nil {
		logger.Error("put-user-failed", err)
		return err
	}

	logger.Info("create-user-succeeded")
	return nil
}