func (v *volume) heartbeatContinuously(logger lager.Logger, pacemaker clock.Ticker, initialTTL time.Duration) { defer v.heartbeating.Done() defer pacemaker.Stop() logger.Debug("start") defer logger.Debug("done") ttlToSet := initialTTL for { select { case <-pacemaker.C(): ttl, found, err := v.db.GetVolumeTTL(v.Handle()) if err != nil { logger.Error("failed-to-lookup-ttl", err) } else { if !found { logger.Info("volume-expired-from-database") return } ttlToSet = ttl } v.heartbeat(logger.Session("tick"), ttlToSet) case finalTTL := <-v.release: if finalTTL != nil { v.heartbeat(logger.Session("final"), *finalTTL) } return } } }
func (c *checker) initialDestroy(logger lager.Logger, containers []garden.Container) error { logger = logger.Session("initial-destroy") logger.Debug("starting", lager.Data{"numContainers": len(containers)}) defer logger.Debug("finished") for i := range containers { err := retryOnFail(c.retryInterval, func(attempt uint) (destroyErr error) { handle := containers[i].Handle() destroyErr = c.gardenClient.Destroy(handle) if destroyErr != nil { if destroyErr.Error() == server.ErrConcurrentDestroy.Error() { // Log but don't fail if container is already being destroyed logger.Debug("already-being-destroyed", lager.Data{"handle": handle}) return nil } logger.Error("failed", destroyErr, lager.Data{"handle": handle, "attempt": attempt}) return destroyErr } logger.Debug("succeeded", lager.Data{"handle": handle, "attempt": attempt}) return nil }) if err != nil { return err } } logger.Debug("succeeded") return nil }
func emit(logger lager.Logger, event goryman.Event) { logger.Debug("emit") if riemannClient == nil { return } event.Host = eventHost event.Time = time.Now().Unix() event.Tags = append(event.Tags, eventTags...) mergedAttributes := map[string]string{} for k, v := range eventAttributes { mergedAttributes[k] = v } if event.Attributes != nil { for k, v := range event.Attributes { mergedAttributes[k] = v } } event.Attributes = mergedAttributes select { case emissions <- eventEmission{logger: logger, event: event}: default: logger.Error("queue-full", nil) } }
func (cc *ccClient) AppCrashed(guid string, appCrashed cc_messages.AppCrashedRequest, logger lager.Logger) error { logger = logger.Session("cc-client") logger.Debug("delivering-app-crashed-response", lager.Data{"app_crashed": appCrashed}) payload, err := json.Marshal(appCrashed) if err != nil { return err } request, err := http.NewRequest("POST", fmt.Sprintf(cc.ccURI, guid), bytes.NewReader(payload)) if err != nil { return err } request.SetBasicAuth(cc.username, cc.password) request.Header.Set("content-type", "application/json") response, err := cc.httpClient.Do(request) if err != nil { logger.Error("deliver-app-crashed-response-failed", err) return err } defer response.Body.Close() if response.StatusCode != http.StatusOK { return &BadResponseError{response.StatusCode} } logger.Debug("delivered-app-crashed-response") return nil }
func (db *ETCDDB) SetEncryptionKeyLabel(logger lager.Logger, keyLabel string) error { logger.Debug("set-encryption-key-label", lager.Data{"encryption-key-label": keyLabel}) defer logger.Debug("set-encryption-key-label-finished") _, err := db.client.Set(EncryptionKeyLabelKey, []byte(keyLabel), NO_TTL) return err }
func (a *AllocationStore) Initialize(logger lager.Logger, req *executor.RunRequest) error { a.lock.Lock() defer a.lock.Unlock() container, err := a.lookup(req.Guid) if err != nil { logger.Error("failed-initializing-container", err) return err } logger.Debug("initializing-container", lager.Data{"guid": req.Guid}) if container.State != executor.StateReserved { logger.Error( "failed-initializing-container", executor.ErrInvalidTransition, lager.Data{ "current_state": container.State, "expected_state": executor.StateReserved, }, ) return executor.ErrInvalidTransition } container.State = executor.StateInitializing container.RunInfo = req.RunInfo container.Tags.Add(req.Tags) a.allocated[container.Guid] = container return nil }
func (db *ETCDDB) resolveRestartableCrashedActualLRPS(logger lager.Logger, actualLRP *models.ActualLRP, starts *startRequests) func() { return func() { actualKey := actualLRP.ActualLRPKey logger = logger.Session("restart-crash", lager.Data{ "process_guid": actualKey.ProcessGuid, "index": actualKey.Index, }) if actualLRP.State != models.ActualLRPStateCrashed { logger.Error("failed-actual-lrp-state-is-not-crashed", nil) return } logger.Debug("unclaiming-actual-lrp", lager.Data{"process_guid": actualLRP.ActualLRPKey.ProcessGuid, "index": actualLRP.ActualLRPKey.Index}) _, err := db.unclaimActualLRP(logger, &actualLRP.ActualLRPKey, &actualLRP.ActualLRPInstanceKey) if err != nil { logger.Error("failed-unclaiming-crash", err) return } logger.Debug("succeeded-unclaiming-actual-lrp") starts.Add(logger, &actualKey) } }
func (db *ETCDDB) ConvergeLRPs(logger lager.Logger, cellSet models.CellSet) ([]*auctioneer.LRPStartRequest, []*models.ActualLRPKeyWithSchedulingInfo, []*models.ActualLRPKey) { convergeStart := db.clock.Now() convergeLRPRunsCounter.Increment() logger = logger.Session("etcd") logger.Info("starting-convergence") defer logger.Info("finished-convergence") defer func() { err := convergeLRPDuration.Send(time.Since(convergeStart)) if err != nil { logger.Error("failed-sending-converge-lrp-duration-metric", err) } }() logger.Debug("gathering-convergence-input") input, err := db.GatherAndPruneLRPs(logger, cellSet) if err != nil { logger.Error("failed-gathering-convergence-input", err) return nil, nil, nil } logger.Debug("succeeded-gathering-convergence-input") changes := CalculateConvergence(logger, db.clock, models.NewDefaultRestartCalculator(), input) return db.ResolveConvergence(logger, input.DesiredLRPs, changes) }
func (db *ETCDDB) Tasks(logger lager.Logger, taskFilter db.TaskFilter) (*models.Tasks, *models.Error) { root, bbsErr := db.fetchRecursiveRaw(logger, TaskSchemaRoot) if bbsErr.Equal(models.ErrResourceNotFound) { return &models.Tasks{}, nil } if bbsErr != nil { return nil, bbsErr } if root.Nodes.Len() == 0 { return &models.Tasks{}, nil } tasks := models.Tasks{} for _, node := range root.Nodes { node := node var task models.Task deserializeErr := models.FromJSON([]byte(node.Value), &task) if deserializeErr != nil { logger.Error("failed-parsing-task", deserializeErr, lager.Data{"key": node.Key}) return nil, models.ErrUnknownError } if taskFilter == nil || taskFilter(&task) { tasks.Tasks = append(tasks.Tasks, &task) } } logger.Debug("succeeded-performing-deserialization", lager.Data{"num-tasks": len(tasks.GetTasks())}) return &tasks, nil }
func (u *updater) applyCachedEvents(logger lager.Logger) { logger.Debug("applying-cached-events") defer logger.Debug("applied-cached-events") for _, e := range u.cachedEvents { u.handleEvent(e) } }
func (db *SQLDB) Domains(logger lager.Logger) ([]string, error) { logger = logger.Session("domains-sqldb") logger.Debug("starting") defer logger.Debug("complete") expireTime := db.clock.Now().Round(time.Second).UnixNano() rows, err := db.all(logger, db.db, domainsTable, domainColumns, NoLockRow, "expire_time > ?", expireTime, ) if err != nil { logger.Error("failed-query", err) return nil, db.convertSQLError(err) } defer rows.Close() var domain string var results []string for rows.Next() { err = rows.Scan(&domain) if err != nil { logger.Error("failed-scan-row", err) return nil, db.convertSQLError(err) } results = append(results, domain) } if rows.Err() != nil { logger.Error("failed-fetching-row", err) return nil, db.convertSQLError(err) } return results, nil }
func (p *LinuxResourcePool) setupRootfs(spec garden.ContainerSpec, id string, resources *linux_backend.Resources, pLog lager.Logger) (string, process.Env, error) { rootFSURL, err := url.Parse(spec.RootFSPath) if err != nil { pLog.Error("parse-rootfs-path-failed", err, lager.Data{ "RootFSPath": spec.RootFSPath, }) return "", nil, err } rootFSSpec := rootfs_provider.Spec{ RootFS: rootFSURL, Namespaced: resources.RootUID != 0, QuotaSize: int64(spec.Limits.Disk.ByteHard), QuotaScope: rootfs_provider.QuotaScope(spec.Limits.Disk.Scope), } pLog.Debug("provide-rootfs-starting") rootFSPath, rootFSEnvVars, err := p.rootFSProvider.Create(pLog, id, rootFSSpec) if err != nil { pLog.Error("provide-rootfs-failed", err) return "", nil, err } pLog.Debug("provide-rootfs-ended") rootFSProcessEnv, err := process.NewEnv(rootFSEnvVars) if err != nil { pLog.Error("rootfs-env-malformed", err) return "", nil, err } return rootFSPath, rootFSProcessEnv, nil }
func (h *DesireAppHandler) createDesiredApp( logger lager.Logger, desireAppMessage cc_messages.DesireAppRequestFromCC, ) error { var builder recipebuilder.RecipeBuilder = h.recipeBuilders["buildpack"] if desireAppMessage.DockerImageUrl != "" { builder = h.recipeBuilders["docker"] } desiredLRP, err := builder.Build(&desireAppMessage) if err != nil { logger.Error("failed-to-build-recipe", err) return err } logger.Debug("creating-desired-lrp", lager.Data{"routes": sanitizeRoutes(desiredLRP.Routes)}) err = h.bbsClient.DesireLRP(desiredLRP) if err != nil { logger.Error("failed-to-create-lrp", err) return err } logger.Debug("created-desired-lrp") return nil }
func (db *ETCDDB) UnclaimActualLRP(logger lager.Logger, key *models.ActualLRPKey) (*models.ActualLRPGroup, *models.ActualLRPGroup, error) { actualLRP, modifiedIndex, err := db.rawActualLRPByProcessGuidAndIndex(logger, key.ProcessGuid, key.Index) bbsErr := models.ConvertError(err) if bbsErr != nil { return nil, nil, bbsErr } beforeActualLRP := *actualLRP if actualLRP.State == models.ActualLRPStateUnclaimed { logger.Debug("already-unclaimed") return nil, nil, models.ErrActualLRPCannotBeUnclaimed } actualLRP.State = models.ActualLRPStateUnclaimed actualLRP.ActualLRPKey = *key actualLRP.ActualLRPInstanceKey = models.ActualLRPInstanceKey{} actualLRP.ActualLRPNetInfo = models.EmptyActualLRPNetInfo() actualLRP.Since = db.clock.Now().UnixNano() actualLRP.ModificationTag.Increment() data, err := db.serializeModel(logger, actualLRP) if err != nil { return nil, nil, err } _, err = db.client.CompareAndSwap(ActualLRPSchemaPath(key.ProcessGuid, key.Index), data, 0, modifiedIndex) if err != nil { logger.Error("failed-compare-and-swap", err) return nil, nil, ErrorFromEtcdError(logger, err) } return &models.ActualLRPGroup{Instance: &beforeActualLRP}, &models.ActualLRPGroup{Instance: actualLRP}, nil }
func (watcher *Watcher) emitMessages(logger lager.Logger, messagesToEmit routing_table.MessagesToEmit) { if watcher.emitter != nil { logger.Debug("emitting-messages", lager.Data{"messages": messagesToEmit}) watcher.emitter.Emit(messagesToEmit) routesRegistered.Add(messagesToEmit.RouteRegistrationCount()) routesUnregistered.Add(messagesToEmit.RouteUnregistrationCount()) } }
func (db *ETCDDB) fetchRecursiveRaw(logger lager.Logger, key string) (*etcd.Node, error) { logger.Debug("fetching-recursive-from-etcd") response, err := db.client.Get(key, false, true) if err != nil { return nil, ErrorFromEtcdError(logger, err) } logger.Debug("succeeded-fetching-recursive-from-etcd", lager.Data{"num_nodes": response.Node.Nodes.Len()}) return response.Node, nil }
func (u *updater) toRoutingTableEntry(logger lager.Logger, routeMapping db.TcpRouteMapping) (models.RoutingKey, models.BackendServerInfo) { logger.Debug("converting-tcp-route-mapping", lager.Data{"tcp-route": routeMapping}) routingKey := models.RoutingKey{Port: routeMapping.TcpRoute.ExternalPort} backendServerInfo := models.BackendServerInfo{ Address: routeMapping.HostIP, Port: routeMapping.HostPort, } return routingKey, backendServerInfo }
func (v *volume) heartbeat(logger lager.Logger, ttl time.Duration) { logger.Debug("start") defer logger.Debug("done") err := v.SetTTL(ttl) if err != nil { logger.Error("failed-to-heartbeat-to-volume", err) } }
func (db *ETCDDB) fetchRaw(logger lager.Logger, key string) (*etcd.Node, error) { logger.Debug("fetching-from-etcd") response, err := db.client.Get(key, false, false) if err != nil { return nil, ErrorFromEtcdError(logger, err) } logger.Debug("succeeded-fetching-from-etcd") return response.Node, nil }
func (s *GardenServer) streamProcess(logger lager.Logger, conn net.Conn, process garden.Process, stdinPipe *io.PipeWriter, connCloseCh chan struct{}) { statusCh := make(chan int, 1) errCh := make(chan error, 1) go func() { status, err := process.Wait() if err != nil { logger.Error("wait-failed", err, lager.Data{ "id": process.ID(), }) errCh <- err } else { logger.Info("exited", lager.Data{ "status": status, "id": process.ID(), }) statusCh <- status } }() for { select { case status := <-statusCh: transport.WriteMessage(conn, &transport.ProcessPayload{ ProcessID: process.ID(), ExitStatus: &status, }) stdinPipe.Close() return case err := <-errCh: e := err.Error() transport.WriteMessage(conn, &transport.ProcessPayload{ ProcessID: process.ID(), Error: &e, }) stdinPipe.Close() return case <-s.stopping: logger.Debug("detaching", lager.Data{ "id": process.ID(), }) return case <-connCloseCh: return } } }
func setRequestXVcapRequestId(request *http.Request, logger lager.Logger) { uuid, err := common.GenerateUUID() if err == nil { request.Header.Set(router_http.VcapRequestIdHeader, uuid) if logger != nil { logger.Debug("vcap-request-id-header-set", lager.Data{router_http.VcapRequestIdHeader: uuid}) } } }
func (scanner *resourceScanner) ScanFromVersion(logger lager.Logger, resourceName string, fromVersion atc.Version) error { // if fromVersion is nil then force a check without specifying a version // otherwise specify fromVersion to underlying call to resource.Check() leaseLogger := logger.Session("lease", lager.Data{ "resource": resourceName, }) savedResource, found, err := scanner.db.GetResource(resourceName) if err != nil { return err } if !found { logger.Debug("resource-not-found") return db.ResourceNotFoundError{Name: resourceName} } resourceConfig, resourceTypes, err := scanner.getResourceConfig(logger, resourceName) if err != nil { return err } interval, err := scanner.checkInterval(resourceConfig) if err != nil { setErr := scanner.db.SetResourceCheckError(savedResource, err) if setErr != nil { logger.Error("failed-to-set-check-error", err) } return err } for { lease, leased, err := scanner.db.LeaseResourceChecking(logger, resourceName, interval, true) if err != nil { leaseLogger.Error("failed-to-get-lease", err, lager.Data{ "resource": resourceName, }) return err } if !leased { leaseLogger.Debug("did-not-get-lease") scanner.clock.Sleep(time.Second) continue } defer lease.Break() break } return scanner.scan(logger, resourceConfig, resourceTypes, savedResource, fromVersion) }
func (db *SQLDB) DesiredLRPByProcessGuid(logger lager.Logger, processGuid string) (*models.DesiredLRP, error) { logger = logger.WithData(lager.Data{"process_guid": processGuid}) logger.Debug("starting") defer logger.Debug("complete") row := db.one(logger, db.db, desiredLRPsTable, desiredLRPColumns, NoLockRow, "process_guid = ?", processGuid, ) return db.fetchDesiredLRP(logger, row) }
func (db *ETCDDB) CrashActualLRP(logger lager.Logger, key *models.ActualLRPKey, instanceKey *models.ActualLRPInstanceKey, errorMessage string) (*models.ActualLRPGroup, *models.ActualLRPGroup, bool, error) { logger = logger.WithData(lager.Data{"actual_lrp_key": key, "actual_lrp_instance_key": instanceKey}) logger.Info("starting") lrp, prevIndex, err := db.rawActualLRPByProcessGuidAndIndex(logger, key.ProcessGuid, key.Index) if err != nil { logger.Error("failed-to-get-actual-lrp", err) return nil, nil, false, err } beforeActualLRP := *lrp latestChangeTime := time.Duration(db.clock.Now().UnixNano() - lrp.Since) var newCrashCount int32 if latestChangeTime > models.CrashResetTimeout && lrp.State == models.ActualLRPStateRunning { newCrashCount = 1 } else { newCrashCount = lrp.CrashCount + 1 } logger.Debug("retrieved-lrp") if !lrp.AllowsTransitionTo(key, instanceKey, models.ActualLRPStateCrashed) { logger.Error("failed-to-transition-to-crashed", nil, lager.Data{"from_state": lrp.State, "same_instance_key": lrp.ActualLRPInstanceKey.Equal(instanceKey)}) return nil, nil, false, models.ErrActualLRPCannotBeCrashed } lrp.State = models.ActualLRPStateCrashed lrp.Since = db.clock.Now().UnixNano() lrp.CrashCount = newCrashCount lrp.ActualLRPInstanceKey = models.ActualLRPInstanceKey{} lrp.ActualLRPNetInfo = models.EmptyActualLRPNetInfo() lrp.ModificationTag.Increment() lrp.CrashReason = errorMessage var immediateRestart bool if lrp.ShouldRestartImmediately(models.NewDefaultRestartCalculator()) { lrp.State = models.ActualLRPStateUnclaimed immediateRestart = true } lrpData, serializeErr := db.serializeModel(logger, lrp) if serializeErr != nil { return nil, nil, false, serializeErr } _, err = db.client.CompareAndSwap(ActualLRPSchemaPath(key.ProcessGuid, key.Index), lrpData, 0, prevIndex) if err != nil { logger.Error("failed", err) return nil, nil, false, models.ErrActualLRPCannotBeCrashed } logger.Info("succeeded") return &models.ActualLRPGroup{Instance: &beforeActualLRP}, &models.ActualLRPGroup{Instance: lrp}, immediateRestart, nil }
func (db *ETCDDB) DesiredLRPs(logger lager.Logger, filter models.DesiredLRPFilter) (*models.DesiredLRPs, *models.Error) { root, bbsErr := db.fetchRecursiveRaw(logger, DesiredLRPSchemaRoot) if bbsErr.Equal(models.ErrResourceNotFound) { return &models.DesiredLRPs{}, nil } if bbsErr != nil { return nil, bbsErr } if root.Nodes.Len() == 0 { return &models.DesiredLRPs{}, nil } desiredLRPs := models.DesiredLRPs{} lrpsLock := sync.Mutex{} var workErr atomic.Value works := []func(){} for _, node := range root.Nodes { node := node works = append(works, func() { var lrp models.DesiredLRP deserializeErr := models.FromJSON([]byte(node.Value), &lrp) if deserializeErr != nil { logger.Error("failed-parsing-desired-lrp", deserializeErr) workErr.Store(fmt.Errorf("cannot parse lrp JSON for key %s: %s", node.Key, deserializeErr.Error())) return } if filter.Domain == "" || lrp.GetDomain() == filter.Domain { lrpsLock.Lock() desiredLRPs.DesiredLrps = append(desiredLRPs.DesiredLrps, &lrp) lrpsLock.Unlock() } }) } throttler, err := workpool.NewThrottler(maxDesiredLRPGetterWorkPoolSize, works) if err != nil { logger.Error("failed-constructing-throttler", err, lager.Data{"max-workers": maxDesiredLRPGetterWorkPoolSize, "num-works": len(works)}) return &models.DesiredLRPs{}, models.ErrUnknownError } logger.Debug("performing-deserialization-work") throttler.Work() if err, ok := workErr.Load().(error); ok { logger.Error("failed-performing-deserialization-work", err) return &models.DesiredLRPs{}, models.ErrUnknownError } logger.Debug("succeeded-performing-deserialization-work", lager.Data{"num-desired-lrps": len(desiredLRPs.GetDesiredLrps())}) return &desiredLRPs, nil }
func (db *ETCDDB) EncryptionKeyLabel(logger lager.Logger) (string, error) { logger.Debug("get-encryption-key-label") defer logger.Debug("get-encryption-key-label-finished") node, err := db.fetchRaw(logger, EncryptionKeyLabelKey) if err != nil { return "", err } return node.Value, nil }
func (c *NetworkConfigurer) configureVethPair(log lager.Logger, hostName, containerName string) (*net.Interface, *net.Interface, error) { log = log.Session("veth") log.Debug("create") if host, container, err := c.Veth.Create(hostName, containerName); err != nil { log.Error("create", err) return nil, nil, &VethPairCreationError{err, hostName, containerName} } else { return host, container, err } }
func (db *ETCDDB) unclaimActualLRPWithIndex( logger lager.Logger, lrp *models.ActualLRP, storeIndex uint64, actualLRPKey *models.ActualLRPKey, actualLRPInstanceKey *models.ActualLRPInstanceKey, ) (change stateChange, err error) { logger = logger.Session("unclaim-actual-lrp-with-index") defer func() { logger.Debug("complete", lager.Data{"stateChange": change, "error": err}) }() if !lrp.ActualLRPKey.Equal(actualLRPKey) { logger.Error("failed-actual-lrp-key-differs", models.ErrActualLRPCannotBeUnclaimed) return stateDidNotChange, models.ErrActualLRPCannotBeUnclaimed } if lrp.State == models.ActualLRPStateUnclaimed { logger.Info("already-unclaimed") return stateDidNotChange, nil } if !lrp.ActualLRPInstanceKey.Equal(actualLRPInstanceKey) { logger.Error("failed-actual-lrp-instance-key-differs", models.ErrActualLRPCannotBeUnclaimed) return stateDidNotChange, models.ErrActualLRPCannotBeUnclaimed } lrp.Since = db.clock.Now().UnixNano() lrp.State = models.ActualLRPStateUnclaimed lrp.ActualLRPInstanceKey = models.ActualLRPInstanceKey{} lrp.ActualLRPNetInfo = models.EmptyActualLRPNetInfo() lrp.ModificationTag.Increment() err = lrp.Validate() if err != nil { logger.Error("failed-to-validate-unclaimed-lrp", err) return stateDidNotChange, models.NewError(models.Error_InvalidRecord, err.Error()) } lrpData, serialErr := db.serializeModel(logger, lrp) if serialErr != nil { logger.Error("failed-to-marshal-unclaimed-lrp", serialErr) return stateDidNotChange, serialErr } _, err = db.client.CompareAndSwap(ActualLRPSchemaPath(actualLRPKey.ProcessGuid, actualLRPKey.Index), lrpData, 0, storeIndex) if err != nil { logger.Error("failed-to-compare-and-swap", err) return stateDidNotChange, models.ErrActualLRPCannotBeUnclaimed } logger.Debug("changed-to-unclaimed") return stateDidChange, nil }
func (a *AllocationStore) RegistryPruner(logger lager.Logger, expirationTime time.Duration) ifrit.Runner { logger = logger.Session("allocation-store-pruner") return ifrit.RunFunc(func(signals <-chan os.Signal, ready chan<- struct{}) error { ticker := a.clock.NewTicker(expirationTime / 2) defer ticker.Stop() close(ready) for { select { case <-signals: logger.Info("exiting-pruning-loop") return nil case <-ticker.C(): logger.Debug("checking-for-expired-containers") expiredAllocations := []string{} a.lock.Lock() for guid, container := range a.allocated { if container.State != executor.StateReserved { // only prune reserved containers continue } lifespan := a.clock.Now().Sub(time.Unix(0, container.AllocatedAt)) if lifespan >= expirationTime { logger.Info("reserved-container-expired", lager.Data{"guid": guid, "lifespan": lifespan}) expiredAllocations = append(expiredAllocations, guid) } } if len(expiredAllocations) > 0 { logger.Info("reaping-expired-allocations", lager.Data{"num-reaped": len(expiredAllocations)}) } else { logger.Info("no-expired-allocations-found") } for _, guid := range expiredAllocations { logger.Info("deleting-expired-container", lager.Data{"guid": guid}) delete(a.allocated, guid) } a.lock.Unlock() } } return nil }) }
func (db *ETCDDB) SetVersion(logger lager.Logger, version *models.Version) error { logger.Debug("set-version", lager.Data{"version": version}) defer logger.Debug("set-version-finished") value, err := json.Marshal(version) if err != nil { return err } _, err = db.client.Set(VersionKey, value, NO_TTL) return err }