func NewMetrics(logger lager.Logger, backingStoresPath, depotPath string) Metrics { return &metrics{ backingStoresPath: backingStoresPath, depotPath: depotPath, logger: logger.Session("metrics"), } }
func (db *SQLDB) DeleteTask(logger lager.Logger, taskGuid string) error { logger = logger.Session("delete-task", lager.Data{"task_guid": taskGuid}) logger.Info("starting") defer logger.Info("complete") return db.transact(logger, func(logger lager.Logger, tx *sql.Tx) error { task, err := db.fetchTaskForUpdate(logger, taskGuid, tx) if err != nil { logger.Error("failed-locking-task", err) return err } if task.State != models.Task_Resolving { err = models.NewTaskTransitionError(task.State, models.Task_Resolving) logger.Error("invalid-state-transition", err) return err } _, err = db.delete(logger, tx, tasksTable, "guid = ?", taskGuid) if err != nil { logger.Error("failed-deleting-task", err) return db.convertSQLError(err) } return nil }) }
func (h *TaskHandler) commonTaskByGuid(logger lager.Logger, w http.ResponseWriter, req *http.Request, version format.Version) { var err error logger = logger.Session("task-by-guid", lager.Data{"revision": 0}) request := &models.TaskByGuidRequest{} response := &models.TaskResponse{} defer func() { exitIfUnrecoverable(logger, h.exitChan, response.Error) }() defer writeResponse(w, response) err = parseRequest(logger, req, request) if err != nil { logger.Error("failed-parsing-request", err) response.Error = models.ConvertError(err) return } response.Task, err = h.controller.TaskByGuid(logger, request.TaskGuid) if err != nil { response.Error = models.ConvertError(err) return } if response.Task.TaskDefinition != nil { response.Task = response.Task.VersionDownTo(version) } }
func (r *remoteClient) Unmount(logger lager.Logger, driverId string, volumeId string) error { logger = logger.Session("mount") logger.Info("start") defer logger.Info("end") unmountRequest := volman.UnmountRequest{driverId, volumeId} payload, err := json.Marshal(unmountRequest) if err != nil { return r.clientError(logger, err, fmt.Sprintf("Error marshalling JSON request %#v", unmountRequest)) } request, err := r.reqGen.CreateRequest(volman.UnmountRoute, nil, bytes.NewReader(payload)) if err != nil { return r.clientError(logger, err, fmt.Sprintf("Error creating request to %s", volman.UnmountRoute)) } response, err := r.HttpClient.Do(request) if err != nil { return r.clientError(logger, err, fmt.Sprintf("Error unmounting volume %s", volumeId)) } if response.StatusCode == 500 { var remoteError volman.Error if err := unmarshallJSON(logger, response.Body, &remoteError); err != nil { return r.clientError(logger, err, fmt.Sprintf("Error parsing 500 response from %s", volman.UnmountRoute)) } return remoteError } return nil }
func (db *SQLDB) CompleteTask(logger lager.Logger, taskGuid, cellID string, failed bool, failureReason, taskResult string) (*models.Task, error) { logger = logger.Session("complete-task", lager.Data{"task_guid": taskGuid, "cell_id": cellID}) logger.Info("starting") defer logger.Info("complete") var task *models.Task err := db.transact(logger, func(logger lager.Logger, tx *sql.Tx) error { var err error task, err = db.fetchTaskForUpdate(logger, taskGuid, tx) if err != nil { logger.Error("failed-locking-task", err) return err } if task.CellId != cellID && task.State == models.Task_Running { logger.Error("failed-task-already-running-on-different-cell", err) return models.NewRunningOnDifferentCellError(cellID, task.CellId) } if err = task.ValidateTransitionTo(models.Task_Completed); err != nil { logger.Error("failed-to-transition-task-to-completed", err) return err } return db.completeTask(logger, task, failed, failureReason, taskResult, tx) }) return task, err }
func (db *SQLDB) failExpiredPendingTasks(logger lager.Logger, expirePendingTaskDuration time.Duration) int64 { logger = logger.Session("fail-expired-pending-tasks") now := db.clock.Now() result, err := db.update(logger, db.db, tasksTable, SQLAttributes{ "failed": true, "failure_reason": "not started within time limit", "result": "", "state": models.Task_Completed, "first_completed_at": now.UnixNano(), "updated_at": now.UnixNano(), }, "state = ? AND created_at < ?", models.Task_Pending, now.Add(-expirePendingTaskDuration).UnixNano()) if err != nil { logger.Error("failed-query", err) return 0 } rowsAffected, err := result.RowsAffected() if err != nil { logger.Error("failed-rows-affected", err) return 0 } return rowsAffected }
func (db *ETCDDB) resolveRestartableCrashedActualLRPS(logger lager.Logger, actualLRP *models.ActualLRP, starts *startRequests) func() { return func() { actualKey := actualLRP.ActualLRPKey logger = logger.Session("restart-crash", lager.Data{ "process_guid": actualKey.ProcessGuid, "index": actualKey.Index, }) if actualLRP.State != models.ActualLRPStateCrashed { logger.Error("failed-actual-lrp-state-is-not-crashed", nil) return } logger.Debug("unclaiming-actual-lrp", lager.Data{"process_guid": actualLRP.ActualLRPKey.ProcessGuid, "index": actualLRP.ActualLRPKey.Index}) _, err := db.unclaimActualLRP(logger, &actualLRP.ActualLRPKey, &actualLRP.ActualLRPInstanceKey) if err != nil { logger.Error("failed-unclaiming-crash", err) return } logger.Debug("succeeded-unclaiming-actual-lrp") starts.Add(logger, &actualKey) } }
func (h *ActualLRPLifecycleHandler) StartActualLRP(logger lager.Logger, w http.ResponseWriter, req *http.Request) { var err error logger = logger.Session("start-actual-lrp") request := &models.StartActualLRPRequest{} response := &models.ActualLRPLifecycleResponse{} defer func() { exitIfUnrecoverable(logger, h.exitChan, response.Error) }() defer writeResponse(w, response) err = parseRequest(logger, req, request) if err != nil { response.Error = models.ConvertError(err) return } before, after, err := h.db.StartActualLRP(logger, request.ActualLrpKey, request.ActualLrpInstanceKey, request.ActualLrpNetInfo) if err != nil { response.Error = models.ConvertError(err) return } if before == nil { go h.actualHub.Emit(models.NewActualLRPCreatedEvent(after)) } else if !before.Equal(after) { go h.actualHub.Emit(models.NewActualLRPChangedEvent(before, after)) } }
// Destroy deletes the container and the bundle directory func (c *Containerizer) Destroy(log lager.Logger, handle string) error { log = log.Session("destroy", lager.Data{"handle": handle}) log.Info("started") defer log.Info("finished") state, err := c.runtime.State(log, handle) if err != nil { log.Info("state-failed-skipping-delete", lager.Data{"error": err.Error()}) return nil } log.Info("state", lager.Data{ "state": state, }) if state.Status == runrunc.CreatedStatus || state.Status == runrunc.StoppedStatus { if err := c.runtime.Delete(log, handle); err != nil { log.Error("delete-failed", err) return err } } return nil }
func (h *ActualLRPLifecycleHandler) RemoveActualLRP(logger lager.Logger, w http.ResponseWriter, req *http.Request) { var err error logger = logger.Session("remove-actual-lrp") request := &models.RemoveActualLRPRequest{} response := &models.ActualLRPLifecycleResponse{} defer func() { exitIfUnrecoverable(logger, h.exitChan, response.Error) }() defer writeResponse(w, response) err = parseRequest(logger, req, request) if err != nil { response.Error = models.ConvertError(err) return } beforeActualLRPGroup, err := h.db.ActualLRPGroupByProcessGuidAndIndex(logger, request.ProcessGuid, request.Index) if err != nil { response.Error = models.ConvertError(err) return } err = h.db.RemoveActualLRP(logger, request.ProcessGuid, request.Index, request.ActualLrpInstanceKey) if err != nil { response.Error = models.ConvertError(err) return } go h.actualHub.Emit(models.NewActualLRPRemovedEvent(beforeActualLRPGroup)) }
func (h *ActualLRPLifecycleHandler) ClaimActualLRP(logger lager.Logger, w http.ResponseWriter, req *http.Request) { var err error logger = logger.Session("claim-actual-lrp") request := &models.ClaimActualLRPRequest{} response := &models.ActualLRPLifecycleResponse{} defer func() { exitIfUnrecoverable(logger, h.exitChan, response.Error) }() defer writeResponse(w, response) err = parseRequest(logger, req, request) if err != nil { response.Error = models.ConvertError(err) return } before, after, err := h.db.ClaimActualLRP(logger, request.ProcessGuid, request.Index, request.ActualLrpInstanceKey) if err != nil { response.Error = models.ConvertError(err) return } if !after.Equal(before) { go h.actualHub.Emit(models.NewActualLRPChangedEvent(before, after)) } }
func (p *ExternalImageManager) Metrics(log lager.Logger, _, rootfs string) (garden.ContainerDiskStat, error) { log = log.Session("image-plugin-metrics", lager.Data{"rootfs": rootfs}) log.Debug("start") defer log.Debug("end") imagePath := filepath.Dir(rootfs) cmd := exec.Command(p.binPath, "stats", imagePath) cmd.Stderr = lagregator.NewRelogger(log) outBuffer := bytes.NewBuffer([]byte{}) cmd.Stdout = outBuffer if err := p.commandRunner.Run(cmd); err != nil { logData := lager.Data{"action": "stats", "stderr": outBuffer.String()} log.Error("external-image-manager-result", err, logData) return garden.ContainerDiskStat{}, fmt.Errorf("external image manager metrics failed: %s (%s)", outBuffer.String(), err) } var metrics map[string]map[string]uint64 if err := json.NewDecoder(outBuffer).Decode(&metrics); err != nil { return garden.ContainerDiskStat{}, fmt.Errorf("parsing metrics: %s", err) } return garden.ContainerDiskStat{ TotalBytesUsed: metrics["disk_usage"]["total_bytes_used"], ExclusiveBytesUsed: metrics["disk_usage"]["exclusive_bytes_used"], }, nil }
func (a *AUFSDiffSizer) DiffSize(logger lager.Logger, containerRootFSPath string) (uint64, error) { _, err := os.Stat(containerRootFSPath) if os.IsNotExist(err) { return 0, fmt.Errorf("get usage: %s", err) } log := logger.Session("diff-size", lager.Data{"path": containerRootFSPath}) log.Debug("start") command := fmt.Sprintf("df -B 1 %s | tail -n1 | awk -v N=3 '{print $N}'", a.AUFSDiffPathFinder.GetDiffLayerPath((containerRootFSPath))) outbytes, err := exec.Command("sh", "-c", command).CombinedOutput() if err != nil { log.Error("df-failed", err) return 0, fmt.Errorf("get usage: df: %s, %s", err, string(outbytes)) } var bytesUsed uint64 if _, err := fmt.Sscanf(string(outbytes), "%d", &bytesUsed); err != nil { log.Error("scanf-failed", err, lager.Data{"out": string(outbytes)}) return 0, nil } log.Debug("finished", lager.Data{"bytes": bytesUsed}) return bytesUsed, nil }
func (s *CgroupStarter) mountCgroup(logger lager.Logger, cgroupPath, subsystems string) error { logger = logger.Session("mount-cgroup", lager.Data{ "path": cgroupPath, "subsystems": subsystems, }) logger.Info("started") if !s.isMountPoint(cgroupPath) { if err := os.MkdirAll(cgroupPath, 0755); err != nil { return fmt.Errorf("mkdir '%s': %s", cgroupPath, err) } cmd := exec.Command("mount", "-n", "-t", "cgroup", "-o", subsystems, "cgroup", cgroupPath) cmd.Stderr = logging.Writer(logger.Session("mount-cgroup-cmd")) if err := s.CommandRunner.Run(cmd); err != nil { return fmt.Errorf("mounting subsystems '%s' in '%s': %s", subsystems, cgroupPath, err) } } else { logger.Info("subsystems-already-mounted") } logger.Info("finished") return nil }
func (e *IncreaseRunInfoColumnSize) Up(logger lager.Logger) error { logger = logger.Session("increase-run-info-column") logger.Info("starting") defer logger.Info("completed") return alterTables(logger, e.rawSQLDB, e.dbFlavor) }
// Create creates a bundle in the depot and starts its init process func (c *Containerizer) Create(log lager.Logger, spec gardener.DesiredContainerSpec) error { log = log.Session("containerizer-create", lager.Data{"handle": spec.Handle}) log.Info("start") defer log.Info("finished") if err := c.depot.Create(log, spec.Handle, c.bundler.Generate(spec)); err != nil { log.Error("depot-create-failed", err) return err } path, err := c.depot.Lookup(log, spec.Handle) if err != nil { log.Error("lookup-failed", err) return err } if err = c.runtime.Create(log, path, spec.Handle, garden.ProcessIO{}); err != nil { log.Error("runtime-create-failed", err) return err } go func() { if err := c.runtime.WatchEvents(log, spec.Handle, c.events); err != nil { log.Error("watch-failed", err) } }() return nil }
func (client *localClient) Unmount(logger lager.Logger, driverId string, volumeName string) error { logger = logger.Session("unmount") logger.Info("start") defer logger.Info("end") logger.Debug("unmounting-volume", lager.Data{"volumeName": volumeName}) unmountStart := client.clock.Now() defer func() { sendUnmountDurationMetrics(logger, time.Since(unmountStart), driverId) }() driver, found := client.driverRegistry.Driver(driverId) if !found { err := errors.New("Driver '" + driverId + "' not found in list of known drivers") logger.Error("mount-driver-lookup-error", err) volmanUnmountErrorsCounter.Increment() return err } env := driverhttp.NewHttpDriverEnv(logger, context.TODO()) if response := driver.Unmount(env, voldriver.UnmountRequest{Name: volumeName}); response.Err != "" { err := errors.New(response.Err) logger.Error("unmount-failed", err) volmanUnmountErrorsCounter.Increment() return err } return nil }
func (stopper *CgroupStopper) StopAll(log lager.Logger, cgroupName string, exceptions []int, kill bool) error { log = log.Session("stop-all", lager.Data{ "name": cgroupName, }) log.Debug("start") defer log.Debug("finished") devicesSubsystemPath, err := stopper.cgroupPathResolver.Resolve(cgroupName, "devices") if err != nil { return err } if !kill { stopper.retrier.Run(func() error { return stopper.killAllRemaining(syscall.SIGTERM, devicesSubsystemPath, exceptions) }) } stopper.retrier.Run(func() error { return stopper.killAllRemaining(syscall.SIGKILL, devicesSubsystemPath, exceptions) }) return nil // we killed, so everything must die }
func (db *ETCDDB) ConvergeLRPs(logger lager.Logger, cellSet models.CellSet) ([]*auctioneer.LRPStartRequest, []*models.ActualLRPKeyWithSchedulingInfo, []*models.ActualLRPKey) { convergeStart := db.clock.Now() convergeLRPRunsCounter.Increment() logger = logger.Session("etcd") logger.Info("starting-convergence") defer logger.Info("finished-convergence") defer func() { err := convergeLRPDuration.Send(time.Since(convergeStart)) if err != nil { logger.Error("failed-sending-converge-lrp-duration-metric", err) } }() logger.Debug("gathering-convergence-input") input, err := db.GatherAndPruneLRPs(logger, cellSet) if err != nil { logger.Error("failed-gathering-convergence-input", err) return nil, nil, nil } logger.Debug("succeeded-gathering-convergence-input") changes := CalculateConvergence(logger, db.clock, models.NewDefaultRestartCalculator(), input) return db.ResolveConvergence(logger, input.DesiredLRPs, changes) }
// Adds orphaned Actual LRPs (ones with no corresponding Desired LRP) to the // list of keys to retire. func (c *convergence) orphanedActualLRPs(logger lager.Logger) { logger = logger.Session("orphaned-actual-lrps") rows, err := c.selectOrphanedActualLRPs(logger, c.db) if err != nil { logger.Error("failed-query", err) return } for rows.Next() { actualLRPKey := &models.ActualLRPKey{} err := rows.Scan( &actualLRPKey.ProcessGuid, &actualLRPKey.Index, &actualLRPKey.Domain, ) if err != nil { logger.Error("failed-scanning", err) continue } c.addKeyToRetire(logger, actualLRPKey) } if rows.Err() != nil { logger.Error("failed-getting-next-row", rows.Err()) } }
func NewPoller(logger lager.Logger, httpClient *http.Client, pollInterval time.Duration) Poller { return &poller{ client: httpClient, pollInterval: pollInterval, logger: logger.Session("poller"), } }
// Unclaim Actual LRPs that have missing cells (not in the cell set passed to // convergence) and add them to the list of start requests. func (c *convergence) actualLRPsWithMissingCells(logger lager.Logger, cellSet models.CellSet) { // time.Sleep(1000 * time.Second) logger = logger.Session("actual-lrps-with-missing-cells") keysWithMissingCells := make([]*models.ActualLRPKeyWithSchedulingInfo, 0) rows, err := c.selectLRPsWithMissingCells(logger, c.db, cellSet) if err != nil { logger.Error("failed-query", err) return } for rows.Next() { var index int32 schedulingInfo, err := c.fetchDesiredLRPSchedulingInfoAndMore(logger, rows, &index) if err == nil { keysWithMissingCells = append(keysWithMissingCells, &models.ActualLRPKeyWithSchedulingInfo{ Key: &models.ActualLRPKey{ ProcessGuid: schedulingInfo.ProcessGuid, Domain: schedulingInfo.Domain, Index: index, }, SchedulingInfo: schedulingInfo, }) } } if rows.Err() != nil { logger.Error("failed-getting-next-row", rows.Err()) } c.keysWithMissingCells = keysWithMissingCells }
func (db *SQLDB) DesireTask(logger lager.Logger, taskDef *models.TaskDefinition, taskGuid, domain string) error { logger = logger.Session("desire-task", lager.Data{"task_guid": taskGuid}) logger.Info("starting") defer logger.Info("complete") taskDefData, err := db.serializeModel(logger, taskDef) if err != nil { logger.Error("failed-serializing-task-definition", err) return err } return db.transact(logger, func(logger lager.Logger, tx *sql.Tx) error { now := db.clock.Now().UnixNano() _, err = db.insert(logger, tx, tasksTable, SQLAttributes{ "guid": taskGuid, "domain": domain, "created_at": now, "updated_at": now, "first_completed_at": 0, "state": models.Task_Pending, "task_definition": taskDefData, }, ) if err != nil { logger.Error("failed-inserting-task", err) return db.convertSQLError(err) } return nil }) }
func (db *ETCDDB) createActualLRP(logger lager.Logger, desiredLRP *models.DesiredLRP, index int32) error { logger = logger.Session("create-actual-lrp") var err error if index >= desiredLRP.Instances { err = models.NewError(models.Error_InvalidRecord, "Index too large") logger.Error("actual-lrp-index-too-large", err, lager.Data{"actual_index": index, "desired_instances": desiredLRP.Instances}) return err } guid, err := uuid.NewV4() if err != nil { return err } actualLRP := &models.ActualLRP{ ActualLRPKey: models.NewActualLRPKey( desiredLRP.ProcessGuid, index, desiredLRP.Domain, ), State: models.ActualLRPStateUnclaimed, Since: db.clock.Now().UnixNano(), ModificationTag: models.ModificationTag{ Epoch: guid.String(), Index: 0, }, } err = db.createRawActualLRP(logger, actualLRP) if err != nil { return err } return nil }
func (db *SQLDB) FailTask(logger lager.Logger, taskGuid, failureReason string) (*models.Task, error) { logger = logger.Session("fail-task", lager.Data{"task_guid": taskGuid}) logger.Info("starting") defer logger.Info("complete") var task *models.Task err := db.transact(logger, func(logger lager.Logger, tx *sql.Tx) error { var err error task, err = db.fetchTaskForUpdate(logger, taskGuid, tx) if err != nil { logger.Error("failed-locking-task", err) return err } if err = task.ValidateTransitionTo(models.Task_Completed); err != nil { if task.State != models.Task_Pending { logger.Error("failed-to-transition-task-to-completed", err) return err } } return db.completeTask(logger, task, true, failureReason, "", tx) }) return task, err }
func newUnmountHandler(logger lager.Logger, client volman.Manager) http.HandlerFunc { return func(w http.ResponseWriter, req *http.Request) { logger := logger.Session("unmount") logger.Info("start") defer logger.Info("end") body, err := ioutil.ReadAll(req.Body) if err != nil { respondWithError(logger, "Error reading unmount request body", err, w) return } var unmountRequest volman.UnmountRequest if err = json.Unmarshal(body, &unmountRequest); err != nil { respondWithError(logger, fmt.Sprintf("Error reading unmount request body: %#v", body), err, w) return } err = client.Unmount(logger, unmountRequest.DriverId, unmountRequest.VolumeId) if err != nil { respondWithError(logger, fmt.Sprintf("Error unmounting volume %s with driver %s", unmountRequest.VolumeId, unmountRequest.DriverId), err, w) return } cf_http_handlers.WriteJSONResponse(w, http.StatusOK, struct{}{}) } }
func (h *TaskHandler) commonTasks(logger lager.Logger, w http.ResponseWriter, req *http.Request, version format.Version) { var err error logger = logger.Session("tasks", lager.Data{"revision": 0}) request := &models.TasksRequest{} response := &models.TasksResponse{} defer func() { exitIfUnrecoverable(logger, h.exitChan, response.Error) }() defer writeResponse(w, response) err = parseRequest(logger, req, request) if err != nil { logger.Error("failed-parsing-request", err) response.Error = models.ConvertError(err) return } filter := models.TaskFilter{Domain: request.Domain, CellID: request.CellId} response.Tasks, err = h.controller.Tasks(logger, filter.Domain, filter.CellID) if err != nil { response.Error = models.ConvertError(err) return } for i := range response.Tasks { task := response.Tasks[i] if task.TaskDefinition == nil { continue } response.Tasks[i] = task.VersionDownTo(version) } }
func (r *RootfsWriter) writeFile(log lager.Logger, filePath string, contents []byte, rootfs string, uid, gid int) error { log = log.Session("rootfs-write-file", lager.Data{ "rootfs": rootfs, "path": filePath, "rootUid": uid, "rootGit": gid, }) log.Info("write") dir := filepath.Dir(filePath) if _, err := os.Lstat(dir); os.IsNotExist(err) { os.Mkdir(dir, 0755) } file, err := os.Create(filePath) if err != nil { log.Error("creating-file", err) return err } defer file.Close() if _, err := file.Write(contents); err != nil { log.Error("writting-file", err) return err } if err := os.Chown(filePath, uid, gid); err != nil { log.Error("chowing-file", err) return err } log.Info("written") return nil }
func HandleCompletedTask(logger lager.Logger, httpClient *http.Client, taskDB db.TaskDB, task *models.Task) { logger = logger.Session("handle-completed-task", lager.Data{"task_guid": task.TaskGuid}) if task.CompletionCallbackUrl != "" { modelErr := taskDB.ResolvingTask(logger, task.TaskGuid) if modelErr != nil { logger.Error("marking-task-as-resolving-failed", modelErr) return } logger = logger.WithData(lager.Data{"callback_url": task.CompletionCallbackUrl}) json, err := json.Marshal(&models.TaskCallbackResponse{ TaskGuid: task.TaskGuid, Failed: task.Failed, FailureReason: task.FailureReason, Result: task.Result, Annotation: task.Annotation, CreatedAt: task.CreatedAt, }) if err != nil { logger.Error("marshalling-task-failed", err) return } var statusCode int for i := 0; i < MAX_CB_RETRIES; i++ { request, err := http.NewRequest("POST", task.CompletionCallbackUrl, bytes.NewReader(json)) if err != nil { logger.Error("building-request-failed", err) return } request.Header.Set("Content-Type", "application/json") response, err := httpClient.Do(request) if err != nil { matched, _ := regexp.MatchString("Client.Timeout|use of closed network connection", err.Error()) if matched { continue } logger.Error("doing-request-failed", err) return } defer response.Body.Close() statusCode = response.StatusCode if shouldResolve(statusCode) { modelErr := taskDB.DeleteTask(logger, task.TaskGuid) if modelErr != nil { logger.Error("delete-task-failed", modelErr) } return } } logger.Info("callback-failed", lager.Data{"status_code": statusCode}) } return }
func (db *serviceClient) CellEvents(logger lager.Logger) <-chan models.CellEvent { logger = logger.Session("cell-events") disappearanceWatcher, disappeared := locket.NewDisappearanceWatcher(logger, db.consulClient, CellSchemaRoot(), db.clock) process := ifrit.Invoke(disappearanceWatcher) events := make(chan models.CellEvent) go func() { for { select { case keys, ok := <-disappeared: if !ok { process.Signal(os.Interrupt) return } cellIDs := make([]string, len(keys)) for i, key := range keys { cellIDs[i] = path.Base(key) } logger.Info("cell-disappeared", lager.Data{"cell_ids": cellIDs}) events <- models.NewCellDisappearedEvent(cellIDs) } } }() return events }