コード例 #1
0
ファイル: lrp_convergence.go プロジェクト: emc-xchallenge/bbs
func (db *ETCDDB) resolveActualsWithMissingCells(logger lager.Logger, desired *models.DesiredLRP, actual *models.ActualLRP, starts *startRequests) func() {
	return func() {
		logger = logger.Session("start-missing-actual", lager.Data{
			"process-guid": actual.ProcessGuid,
			"index":        actual.Index,
		})

		logger.Debug("removing-actual-lrp")
		removeErr := db.RemoveActualLRP(logger, actual.ActualLRPKey.ProcessGuid, actual.ActualLRPKey.Index)
		if removeErr != nil {
			logger.Error("failed-removing-actual-lrp", removeErr)
			return
		}
		logger.Debug("succeeded-removing-actual-lrp")

		if actual.Index >= desired.Instances {
			return
		}

		logger.Debug("creating-actual-lrp")
		err := db.createActualLRP(logger, desired, actual.Index)
		if err != nil {
			logger.Error("failed-creating-actual-lrp", err)
			return
		}
		logger.Debug("succeeded-creating-actual-lrp")

		starts.Add(logger, &actual.ActualLRPKey)
	}
}
コード例 #2
0
ファイル: scheduler.go プロジェクト: pcfdev-forks/atc
func (s *Scheduler) TryNextPendingBuild(logger lager.Logger, versions *algorithm.VersionsDB, job atc.JobConfig, resources atc.ResourceConfigs, resourceTypes atc.ResourceTypes) Waiter {
	logger = logger.Session("try-next-pending")

	wg := new(sync.WaitGroup)

	wg.Add(1)
	go func() {
		defer wg.Done()

		build, found, err := s.PipelineDB.GetNextPendingBuild(job.Name)
		if err != nil {
			logger.Error("failed-to-get-next-pending-build", err)
			return
		}

		if !found {
			return
		}

		jobService, err := NewJobService(job, s.PipelineDB, s.Scanner)
		if err != nil {
			logger.Error("failed-to-get-job-service", err)
			return
		}

		s.ScheduleAndResumePendingBuild(logger, versions, build, job, resources, resourceTypes, jobService)
	}()

	return wg
}
コード例 #3
0
ファイル: runrunc.go プロジェクト: digideskio/guardian
// Exec a process in a bundle using 'runc exec'
func (r *RunRunc) Exec(log lager.Logger, bundlePath, id string, spec garden.ProcessSpec, io garden.ProcessIO) (garden.Process, error) {
	log = log.Session("exec", lager.Data{"id": id, "path": spec.Path})

	log.Info("started")
	defer log.Info("finished")

	tmpFile, err := ioutil.TempFile("", "guardianprocess")
	if err != nil {
		log.Error("tempfile-failed", err)
		return nil, err
	}

	if err := r.writeProcessJSON(bundlePath, spec, tmpFile); err != nil {
		log.Error("encode-failed", err)
		return nil, fmt.Errorf("writeProcessJSON for container %s: %s", id, err)
	}

	cmd := r.runc.ExecCommand(id, tmpFile.Name())

	process, err := r.tracker.Run(r.pidGenerator.Generate(), cmd, io, spec.TTY)
	if err != nil {
		log.Error("run-failed", err)
		return nil, err
	}

	return process, nil
}
コード例 #4
0
ファイル: containerizer.go プロジェクト: digideskio/guardian
// Create creates a bundle in the depot and starts its init process
func (c *Containerizer) Create(log lager.Logger, spec gardener.DesiredContainerSpec) error {
	log = log.Session("containerizer-create", lager.Data{"handle": spec.Handle})

	log.Info("started")
	defer log.Info("finished")

	if err := c.depot.Create(log, spec.Handle, c.bundler.Bundle(spec)); err != nil {
		log.Error("create-failed", err)
		return err
	}

	path, err := c.depot.Lookup(log, spec.Handle)
	if err != nil {
		log.Error("lookup-failed", err)
		return err
	}

	stdoutR, stdoutW := io.Pipe()
	_, err = c.runner.Start(log, path, spec.Handle, garden.ProcessIO{
		Stdout: io.MultiWriter(logging.Writer(log), stdoutW),
		Stderr: logging.Writer(log),
	})
	if err != nil {
		log.Error("start", err)
		return err
	}

	if err := c.startChecker.Check(log, stdoutR); err != nil {
		log.Error("check", err)
		return err
	}

	return nil
}
コード例 #5
0
ファイル: exec_engine.go プロジェクト: xoebus/checkin
func (build *execBuild) Resume(logger lager.Logger) {
	stepFactory := build.buildStepFactory(logger, build.metadata.Plan)
	source := stepFactory.Using(&exec.NoopStep{}, exec.NewSourceRepository())

	defer source.Release()

	process := ifrit.Background(source)

	exited := process.Wait()

	aborted := false
	var succeeded exec.Success

	for {
		select {
		case err := <-exited:
			if aborted {
				succeeded = false
			} else if !source.Result(&succeeded) {
				logger.Error("step-had-no-result", errors.New("step failed to provide us with a result"))
				succeeded = false
			}

			build.delegate.Finish(logger.Session("finish"), err, succeeded, aborted)
			return

		case sig := <-build.signals:
			process.Signal(sig)

			if sig == os.Kill {
				aborted = true
			}
		}
	}
}
コード例 #6
0
ファイル: service_client.go プロジェクト: timani/bbs
func (db *serviceClient) CellEvents(logger lager.Logger) <-chan models.CellEvent {
	logger = logger.Session("cell-events")

	disappearanceWatcher, disappeared := locket.NewDisappearanceWatcher(logger, db.consulClient, CellSchemaRoot(), db.clock)
	process := ifrit.Invoke(disappearanceWatcher)

	events := make(chan models.CellEvent)
	go func() {
		for {
			select {
			case keys, ok := <-disappeared:
				if !ok {
					process.Signal(os.Interrupt)
					return
				}

				cellIDs := make([]string, len(keys))
				for i, key := range keys {
					cellIDs[i] = path.Base(key)
				}
				logger.Info("cell-disappeared", lager.Data{"cell_ids": cellIDs})
				events <- models.NewCellDisappearedEvent(cellIDs)
			}
		}
	}()

	return events
}
コード例 #7
0
ファイル: docker_backend.go プロジェクト: cfibmers/stager
func getDockerRegistryServices(consulCluster string, backendLogger lager.Logger) ([]consulServiceInfo, error) {
	logger := backendLogger.Session("docker-registry-consul-services")

	response, err := http.Get(consulCluster + "/v1/catalog/service/docker-registry")
	if err != nil {
		return nil, err
	}

	defer response.Body.Close()
	body, err := ioutil.ReadAll(response.Body)
	if err != nil {
		return nil, err
	}

	var ips []consulServiceInfo
	err = json.Unmarshal(body, &ips)
	if err != nil {
		return nil, err
	}

	if len(ips) == 0 {
		return nil, ErrMissingDockerRegistry
	}

	logger.Debug("docker-registry-consul-services", lager.Data{"ips": ips})

	return ips, nil
}
コード例 #8
0
ファイル: scheduler.go プロジェクト: utako/atc
func (s *Scheduler) TryNextPendingBuild(logger lager.Logger, job atc.JobConfig, resources atc.ResourceConfigs) Waiter {
	logger = logger.Session("try-next-pending")

	wg := new(sync.WaitGroup)

	wg.Add(1)
	go func() {
		defer wg.Done()

		build, err := s.PipelineDB.GetNextPendingBuild(job.Name)
		if err != nil {
			if err == db.ErrNoBuild {
				return
			}

			logger.Error("failed-to-get-next-pending-build", err)

			return
		}

		s.scheduleAndResumePendingBuild(logger, build, job, resources)
	}()

	return wg
}
コード例 #9
0
ファイル: lrp_convergence.go プロジェクト: timani/bbs
func (db *ETCDDB) ConvergeLRPs(logger lager.Logger, cellSet models.CellSet) ([]*auctioneer.LRPStartRequest, []*models.ActualLRPKeyWithSchedulingInfo, []*models.ActualLRPKey) {
	convergeStart := db.clock.Now()
	convergeLRPRunsCounter.Increment()
	logger = logger.Session("etcd")
	logger.Info("starting-convergence")
	defer logger.Info("finished-convergence")

	defer func() {
		err := convergeLRPDuration.Send(time.Since(convergeStart))
		if err != nil {
			logger.Error("failed-sending-converge-lrp-duration-metric", err)
		}
	}()

	logger.Debug("gathering-convergence-input")
	input, err := db.GatherAndPruneLRPs(logger, cellSet)
	if err != nil {
		logger.Error("failed-gathering-convergence-input", err)
		return nil, nil, nil
	}
	logger.Debug("succeeded-gathering-convergence-input")

	changes := CalculateConvergence(logger, db.clock, models.NewDefaultRestartCalculator(), input)

	return db.ResolveConvergence(logger, input.DesiredLRPs, changes)
}
コード例 #10
0
ファイル: api.go プロジェクト: x6j8x/rds-broker
func lastOperation(serviceBroker ServiceBroker, router httpRouter, logger lager.Logger) http.HandlerFunc {
	return func(w http.ResponseWriter, req *http.Request) {
		vars := router.Vars(req)
		instanceID := vars["instance_id"]

		logger := logger.Session(lastOperationLogKey, lager.Data{
			instanceIDLogKey: instanceID,
		})

		lastOperationResponse, err := serviceBroker.LastOperation(instanceID)
		if err != nil {
			switch err {
			case ErrInstanceDoesNotExist:
				logger.Error(instanceMissingErrorKey, err)
				respond(w, http.StatusGone, EmptyResponse{})
			default:
				logger.Error(unknownErrorKey, err)
				respond(w, http.StatusInternalServerError, ErrorResponse{
					Description: err.Error(),
				})
			}
			return
		}

		respond(w, http.StatusOK, lastOperationResponse)
	}
}
コード例 #11
0
ファイル: api.go プロジェクト: x6j8x/rds-broker
func provision(serviceBroker ServiceBroker, router httpRouter, logger lager.Logger) http.HandlerFunc {
	return func(w http.ResponseWriter, req *http.Request) {
		vars := router.Vars(req)
		instanceID := vars["instance_id"]
		acceptsIncomplete := false
		if req.URL.Query().Get("accepts_incomplete") == "true" {
			acceptsIncomplete = true
		}

		logger := logger.Session(provisionLogKey, lager.Data{
			instanceIDLogKey: instanceID,
		})

		var details ProvisionDetails
		if err := json.NewDecoder(req.Body).Decode(&details); err != nil {
			logger.Error(invalidProvisionDetailsErrorKey, err)
			respond(w, http.StatusBadRequest, ErrorResponse{
				Description: err.Error(),
			})
			return
		}

		logger = logger.WithData(lager.Data{
			provisionDetailsLogKey: details,
		})

		provisioningResponse, asynch, err := serviceBroker.Provision(instanceID, details, acceptsIncomplete)
		if err != nil {
			switch err {
			case ErrInstanceAlreadyExists:
				logger.Error(instanceAlreadyExistsErrorKey, err)
				respond(w, http.StatusConflict, EmptyResponse{})
			case ErrInstanceLimitMet:
				logger.Error(instanceLimitReachedErrorKey, err)
				respond(w, http.StatusInternalServerError, ErrorResponse{
					Description: err.Error(),
				})
			case ErrAsyncRequired:
				logger.Error(instanceAsyncRequiredErrorKey, err)
				respond(w, statusUnprocessableEntity, ErrorResponse{
					Error:       "AsyncRequired",
					Description: err.Error(),
				})
			default:
				logger.Error(unknownErrorKey, err)
				respond(w, http.StatusInternalServerError, ErrorResponse{
					Description: err.Error(),
				})
			}
			return
		}

		if asynch {
			respond(w, http.StatusAccepted, provisioningResponse)
			return
		}

		respond(w, http.StatusCreated, provisioningResponse)
	}
}
コード例 #12
0
ファイル: api.go プロジェクト: x6j8x/rds-broker
func bind(serviceBroker ServiceBroker, router httpRouter, logger lager.Logger) http.HandlerFunc {
	return func(w http.ResponseWriter, req *http.Request) {
		vars := router.Vars(req)
		instanceID := vars["instance_id"]
		bindingID := vars["binding_id"]

		logger := logger.Session(bindLogKey, lager.Data{
			instanceIDLogKey: instanceID,
			bindingIDLogKey:  bindingID,
		})

		var details BindDetails
		if err := json.NewDecoder(req.Body).Decode(&details); err != nil {
			logger.Error(invalidBindDetailsErrorKey, err)
			respond(w, http.StatusBadRequest, ErrorResponse{
				Description: err.Error(),
			})
			return
		}

		logger = logger.WithData(lager.Data{
			bindDetailsLogKey: details,
		})

		bindingResponse, err := serviceBroker.Bind(instanceID, bindingID, details)
		if err != nil {
			switch err {
			case ErrInstanceDoesNotExist:
				logger.Error(instanceMissingErrorKey, err)
				respond(w, http.StatusInternalServerError, ErrorResponse{
					Description: err.Error(),
				})
			case ErrBindingAlreadyExists:
				logger.Error(bindingAlreadyExistsErrorKey, err)
				respond(w, http.StatusConflict, ErrorResponse{
					Description: err.Error(),
				})
			case ErrAppGUIDRequired:
				logger.Error(bindingAppGUIDRequiredErrorKey, err)
				respond(w, statusUnprocessableEntity, ErrorResponse{
					Error:       "RequiresApp",
					Description: err.Error(),
				})
			case ErrInstanceNotBindable:
				logger.Error(instanceNotBindableErrorKey, err)
				respond(w, http.StatusInternalServerError, ErrorResponse{
					Description: err.Error(),
				})
			default:
				logger.Error(unknownErrorKey, err)
				respond(w, http.StatusInternalServerError, ErrorResponse{
					Description: err.Error(),
				})
			}
			return
		}

		respond(w, http.StatusCreated, bindingResponse)
	}
}
コード例 #13
0
ファイル: metrics.go プロジェクト: ACPK/atc
func (event SchedulingJobDuration) Emit(logger lager.Logger) {
	state := "ok"

	if event.Duration > time.Second {
		state = "warning"
	}

	if event.Duration > 5*time.Second {
		state = "critical"
	}

	emit(
		logger.Session("job-scheduling-duration", lager.Data{
			"pipeline": event.PipelineName,
			"job":      event.JobName,
			"duration": event.Duration.String(),
		}),
		goryman.Event{
			Service: "scheduling: job duration (ms)",
			Metric:  ms(event.Duration),
			State:   state,
			Attributes: map[string]string{
				"pipeline": event.PipelineName,
				"job":      event.JobName,
			},
		},
	)
}
コード例 #14
0
ファイル: metrics.go プロジェクト: ACPK/atc
func (event HTTPReponseTime) Emit(logger lager.Logger) {
	state := "ok"

	if event.Duration > 100*time.Millisecond {
		state = "warning"
	}

	if event.Duration > 1*time.Second {
		state = "critical"
	}

	emit(
		logger.Session("http-response-time", lager.Data{
			"route":    event.Route,
			"path":     event.Path,
			"duration": event.Duration.String(),
		}),
		goryman.Event{
			Service: "http response time",
			Metric:  ms(event.Duration),
			State:   state,
			Attributes: map[string]string{
				"route": event.Route,
				"path":  event.Path,
			},
		},
	)
}
コード例 #15
0
ファイル: volume.go プロジェクト: xoebus/checkin
func (v *volume) heartbeatContinuously(logger lager.Logger, pacemaker clock.Ticker, initialTTL time.Duration) {
	defer v.heartbeating.Done()
	defer pacemaker.Stop()

	logger.Debug("start")
	defer logger.Debug("done")

	ttlToSet := initialTTL

	for {
		select {
		case <-pacemaker.C():
			ttl, found, err := v.db.GetVolumeTTL(v.Handle())
			if err != nil {
				logger.Error("failed-to-lookup-ttl", err)
			} else {
				if !found {
					logger.Info("volume-expired-from-database")
					return
				}

				ttlToSet = ttl
			}

			v.heartbeat(logger.Session("tick"), ttlToSet)

		case finalTTL := <-v.release:
			if finalTTL != nil {
				v.heartbeat(logger.Session("final"), *finalTTL)
			}

			return
		}
	}
}
コード例 #16
0
ファイル: lrp_convergence.go プロジェクト: timani/bbs
func (db *ETCDDB) resolveRestartableCrashedActualLRPS(logger lager.Logger, actualLRP *models.ActualLRP, starts *startRequests) func() {
	return func() {
		actualKey := actualLRP.ActualLRPKey

		logger = logger.Session("restart-crash", lager.Data{
			"process_guid": actualKey.ProcessGuid,
			"index":        actualKey.Index,
		})

		if actualLRP.State != models.ActualLRPStateCrashed {
			logger.Error("failed-actual-lrp-state-is-not-crashed", nil)
			return
		}

		logger.Debug("unclaiming-actual-lrp", lager.Data{"process_guid": actualLRP.ActualLRPKey.ProcessGuid, "index": actualLRP.ActualLRPKey.Index})
		_, err := db.unclaimActualLRP(logger, &actualLRP.ActualLRPKey, &actualLRP.ActualLRPInstanceKey)
		if err != nil {
			logger.Error("failed-unclaiming-crash", err)
			return
		}
		logger.Debug("succeeded-unclaiming-actual-lrp")

		starts.Add(logger, &actualKey)
	}
}
コード例 #17
0
ファイル: volume.go プロジェクト: xoebus/checkin
func newVolume(logger lager.Logger, bcVol baggageclaim.Volume, clock clock.Clock, db VolumeFactoryDB) (Volume, bool, error) {
	vol := &volume{
		Volume: bcVol,
		db:     db,

		heartbeating: new(sync.WaitGroup),
		release:      make(chan *time.Duration, 1),
	}

	ttl, found, err := vol.db.GetVolumeTTL(vol.Handle())
	if err != nil {
		logger.Error("failed-to-lookup-expiration-of-volume", err)
		return nil, false, err
	}

	if !found {
		return nil, false, nil
	}

	vol.heartbeat(logger.Session("initial-heartbeat"), ttl)

	vol.heartbeating.Add(1)
	go vol.heartbeatContinuously(
		logger.Session("continuous-heartbeat"),
		clock.NewTicker(volumeKeepalive),
		ttl,
	)

	return vol, true, nil
}
コード例 #18
0
ファイル: uploader.go プロジェクト: snowsnail/executor
func New(timeout time.Duration, skipSSLVerification bool, logger lager.Logger) Uploader {
	transport := &http.Transport{
		Proxy: http.ProxyFromEnvironment,
		Dial: (&net.Dialer{
			Timeout:   10 * time.Second,
			KeepAlive: 30 * time.Second,
		}).Dial,
		TLSHandshakeTimeout: 10 * time.Second,
		TLSClientConfig: &tls.Config{
			InsecureSkipVerify: skipSSLVerification,
			MinVersion:         tls.VersionTLS10,
		},
	}

	httpClient := &http.Client{
		Transport: transport,
		Timeout:   timeout,
	}

	return &URLUploader{
		httpClient: httpClient,
		transport:  transport,
		logger:     logger.Session("URLUploader"),
	}
}
コード例 #19
0
func New(natsClient diegonats.NATSClient, workPool *workpool.WorkPool, logger lager.Logger) NATSEmitter {
	return &natsEmitter{
		natsClient: natsClient,
		workPool:   workPool,
		logger:     logger.Session("nats-emitter"),
	}
}
コード例 #20
0
ファイル: configure.go プロジェクト: nagyistoce/garden-linux
func (c *NetworkConfigurer) configureHostIntf(log lager.Logger, intf *net.Interface, bridge *net.Interface, mtu int) error {
	log = log.Session("host-interface", lager.Data{
		"bridge-interface": bridge,
		"host-interface":   intf,
	})

	log.Debug("set-mtu")
	if err := c.Link.SetMTU(intf, mtu); err != nil {
		log.Error("set-mtu", err)
		return &MTUError{err, intf, mtu}
	}

	log.Debug("add-to-bridge")
	if err := c.Bridge.Add(bridge, intf); err != nil {
		log.Error("add-to-bridge", err)
		return &AddToBridgeError{err, bridge, intf}
	}

	log.Debug("bring-link-up")
	if err := c.Link.SetUp(intf); err != nil {
		log.Error("bring-link-up", err)
		return &LinkUpError{err, intf, "host"}
	}

	return nil
}
コード例 #21
0
ファイル: actual_lrp_handlers.go プロジェクト: rowhit/lattice
func NewActualLRPHandler(bbs bbs.Client, legacyBBS Bbs.ReceptorBBS, logger lager.Logger) *ActualLRPHandler {
	return &ActualLRPHandler{
		bbs:       bbs,
		legacyBBS: legacyBBS,
		logger:    logger.Session("actual-lrp-handler"),
	}
}
コード例 #22
0
ファイル: poller.go プロジェクト: emc-xchallenge/cc-uploader
func NewPoller(logger lager.Logger, httpClient *http.Client, pollInterval time.Duration) Poller {
	return &poller{
		client:       httpClient,
		pollInterval: pollInterval,
		logger:       logger.Session("poller"),
	}
}
コード例 #23
0
ファイル: proxy.go プロジェクト: benjaminharnett/diego-ssh
func ProxyRequests(logger lager.Logger, channelType string, reqs <-chan *ssh.Request, channel ssh.Channel) {
	logger = logger.Session("proxy-requests", lager.Data{
		"channel-type": channelType,
	})

	logger.Info("started")
	defer logger.Info("completed")
	defer channel.Close()

	for req := range reqs {
		logger.Info("request", lager.Data{
			"type":      req.Type,
			"wantReply": req.WantReply,
			"payload":   req.Payload,
		})
		success, err := channel.SendRequest(req.Type, req.WantReply, req.Payload)
		if err != nil {
			logger.Error("send-request-failed", err)
			continue
		}

		if req.WantReply {
			req.Reply(success, nil)
		}
	}
}
コード例 #24
0
ファイル: bulker.go プロジェクト: jiangytcn/rep
func (b *Bulker) sync(logger lager.Logger) {
	logger = logger.Session("sync")

	logger.Info("starting")
	defer logger.Info("finished")

	startTime := b.clock.Now()

	ops, batchError := b.generator.BatchOperations(logger)

	endTime := b.clock.Now()

	sendError := repBulkSyncDuration.Send(endTime.Sub(startTime))
	if sendError != nil {
		logger.Error("failed-to-send-rep-bulk-sync-duration-metric", sendError)
	}

	if batchError != nil {
		logger.Error("failed-to-generate-operations", batchError)
		return
	}

	for _, operation := range ops {
		b.queue.Push(operation)
	}
}
コード例 #25
0
ファイル: cell_handlers.go プロジェクト: timani/bbs
func NewCellHandler(logger lager.Logger, serviceClient bbs.ServiceClient, exitChan chan<- struct{}) *CellHandler {
	return &CellHandler{
		logger:        logger.Session("cell-handler"),
		serviceClient: serviceClient,
		exitChan:      exitChan,
	}
}
コード例 #26
0
ファイル: starter.go プロジェクト: digideskio/guardian
func (s *CgroupStarter) mountCgroup(log lager.Logger, cgroupPath, cgroupType string) error {
	log = log.Session("setup-cgroup", lager.Data{
		"path": cgroupPath,
		"type": cgroupType,
	})

	log.Info("started")
	defer log.Info("finished")

	if !s.isMountPoint(cgroupPath) {
		if err := os.MkdirAll(cgroupPath, 0755); err != nil {
			log.Error("mkdir-failed", err)
			return err
		}

		cmd := exec.Command("mount", "-n", "-t", "cgroup", "-o", cgroupType, "cgroup", cgroupPath)
		cmd.Stderr = logging.Writer(log.Session("mount-cgroup-cmd"))
		if err := s.CommandRunner.Run(cmd); err != nil {
			log.Error("mount-cgroup-failed", err)
			return err
		}
	}

	return nil
}
コード例 #27
0
ファイル: scheduler.go プロジェクト: pcfdev-forks/atc
func (s *Scheduler) TriggerImmediately(logger lager.Logger, job atc.JobConfig, resources atc.ResourceConfigs, resourceTypes atc.ResourceTypes) (db.Build, Waiter, error) {
	logger = logger.Session("trigger-immediately")

	build, err := s.PipelineDB.CreateJobBuild(job.Name)
	if err != nil {
		logger.Error("failed-to-create-build", err)
		return db.Build{}, nil, err
	}

	jobService, err := NewJobService(job, s.PipelineDB, s.Scanner)
	if err != nil {
		return db.Build{}, nil, err
	}

	wg := new(sync.WaitGroup)
	wg.Add(1)

	// do not block request on scanning input versions
	go func() {
		defer wg.Done()
		s.ScheduleAndResumePendingBuild(logger, nil, build, job, resources, resourceTypes, jobService)
	}()

	return build, wg, nil
}
コード例 #28
0
ファイル: actual_lrp_db.go プロジェクト: timani/bbs
func (db *ETCDDB) createActualLRP(logger lager.Logger, desiredLRP *models.DesiredLRP, index int32) error {
	logger = logger.Session("create-actual-lrp")
	var err error
	if index >= desiredLRP.Instances {
		err = models.NewError(models.Error_InvalidRecord, "Index too large")
		logger.Error("actual-lrp-index-too-large", err, lager.Data{"actual_index": index, "desired_instances": desiredLRP.Instances})
		return err
	}

	guid, err := uuid.NewV4()
	if err != nil {
		return err
	}

	actualLRP := &models.ActualLRP{
		ActualLRPKey: models.NewActualLRPKey(
			desiredLRP.ProcessGuid,
			index,
			desiredLRP.Domain,
		),
		State: models.ActualLRPStateUnclaimed,
		Since: db.clock.Now().UnixNano(),
		ModificationTag: models.ModificationTag{
			Epoch: guid.String(),
			Index: 0,
		},
	}

	err = db.createRawActualLRP(logger, actualLRP)
	if err != nil {
		return err
	}

	return nil
}
コード例 #29
0
ファイル: tracker.go プロジェクト: ACPK/atc
func (tracker *tracker) Init(logger lager.Logger, metadata Metadata, session Session, typ ResourceType, tags atc.Tags) (Resource, error) {
	logger = logger.Session("init")

	logger.Debug("start")
	defer logger.Debug("done")

	container, found, err := tracker.workerClient.FindContainerForIdentifier(logger, session.ID)
	if err != nil {
		logger.Error("failed-to-look-for-existing-container", err)
		return nil, err
	}

	if found {
		logger.Debug("found-existing-container", lager.Data{"container": container.Handle()})
		return NewResource(container), nil
	}

	logger.Debug("creating-container")

	container, err = tracker.workerClient.CreateContainer(logger, session.ID, worker.ResourceTypeContainerSpec{
		Type:      string(typ),
		Ephemeral: session.Ephemeral,
		Tags:      tags,
		Env:       metadata.Env(),
	})
	if err != nil {
		return nil, err
	}

	logger.Info("created", lager.Data{"container": container.Handle()})

	return NewResource(container), nil
}
コード例 #30
0
func (delegate *delegate) Finish(logger lager.Logger, err error, succeeded exec.Success, aborted bool) {
	if aborted {
		delegate.saveStatus(logger, atc.StatusAborted)

		logger.Info("aborted")
	} else if err != nil {
		delegate.saveStatus(logger, atc.StatusErrored)

		logger.Info("errored", lager.Data{"error": err.Error()})
	} else if bool(succeeded) {
		delegate.saveStatus(logger, atc.StatusSucceeded)

		implicits := logger.Session("implicit-outputs")

		for _, o := range delegate.implicitOutputs {
			delegate.saveImplicitOutput(implicits.Session(o.plan.Name), o.plan, o.info)
		}

		logger.Info("succeeded")
	} else {
		delegate.saveStatus(logger, atc.StatusFailed)

		logger.Info("failed")
	}
}