func (p *externalBinaryNetworker) exec(log lager.Logger, action, handle string, inputData interface{}, outputData interface{}) error { stdinBytes, err := json.Marshal(inputData) if err != nil { return err } args := append(p.extraArg, "--action", action, "--handle", handle) cmd := exec.Command(p.path, args...) stdout := &bytes.Buffer{} cmd.Stdout = stdout stderr := &bytes.Buffer{} cmd.Stderr = stderr cmd.Stdin = bytes.NewReader(stdinBytes) err = p.commandRunner.Run(cmd) logData := lager.Data{"action": action, "stdin": string(stdinBytes), "stderr": stderr.String(), "stdout": stdout.String()} if err != nil { log.Error("external-networker-result", err, logData) return fmt.Errorf("external networker %s: %s", action, err) } if outputData != nil { err = json.Unmarshal(stdout.Bytes(), outputData) if err != nil { log.Error("external-networker-result", err, logData) return fmt.Errorf("unmarshaling result from external networker: %s", err) } } log.Debug("external-networker-result", logData) return nil }
func (p *ExternalImageManager) Metrics(log lager.Logger, _, rootfs string) (garden.ContainerDiskStat, error) { log = log.Session("image-plugin-metrics", lager.Data{"rootfs": rootfs}) log.Debug("start") defer log.Debug("end") imagePath := filepath.Dir(rootfs) cmd := exec.Command(p.binPath, "stats", imagePath) cmd.Stderr = lagregator.NewRelogger(log) outBuffer := bytes.NewBuffer([]byte{}) cmd.Stdout = outBuffer if err := p.commandRunner.Run(cmd); err != nil { logData := lager.Data{"action": "stats", "stderr": outBuffer.String()} log.Error("external-image-manager-result", err, logData) return garden.ContainerDiskStat{}, fmt.Errorf("external image manager metrics failed: %s (%s)", outBuffer.String(), err) } var metrics map[string]map[string]uint64 if err := json.NewDecoder(outBuffer).Decode(&metrics); err != nil { return garden.ContainerDiskStat{}, fmt.Errorf("parsing metrics: %s", err) } return garden.ContainerDiskStat{ TotalBytesUsed: metrics["disk_usage"]["total_bytes_used"], ExclusiveBytesUsed: metrics["disk_usage"]["exclusive_bytes_used"], }, nil }
func (db *SQLDB) ActualLRPGroupsByProcessGuid(logger lager.Logger, processGuid string) ([]*models.ActualLRPGroup, error) { logger = logger.WithData(lager.Data{"process_guid": processGuid}) logger.Debug("starting") defer logger.Debug("complete") return db.getActualLRPS(logger, "process_guid = ?", processGuid) }
func (e *ETCDToSQL) migrateDomains(logger lager.Logger) error { logger = logger.Session("migrating-domains") logger.Debug("starting") defer logger.Debug("finished") response, err := e.storeClient.Get(etcd.DomainSchemaRoot, false, true) if err != nil { logger.Error("failed-fetching-domains", err) } if response != nil { for _, node := range response.Node.Nodes { domain := path.Base(node.Key) expireTime := e.clock.Now().UnixNano() + int64(time.Second)*node.TTL _, err := e.rawSQLDB.Exec(sqldb.RebindForFlavor(` INSERT INTO domains (domain, expire_time) VALUES (?, ?) `, e.dbFlavor), domain, expireTime) if err != nil { logger.Error("failed-inserting-domain", err) continue } } } return nil }
func (stopper *CgroupStopper) StopAll(log lager.Logger, cgroupName string, exceptions []int, kill bool) error { log = log.Session("stop-all", lager.Data{ "name": cgroupName, }) log.Debug("start") defer log.Debug("finished") devicesSubsystemPath, err := stopper.cgroupPathResolver.Resolve(cgroupName, "devices") if err != nil { return err } if !kill { stopper.retrier.Run(func() error { return stopper.killAllRemaining(syscall.SIGTERM, devicesSubsystemPath, exceptions) }) } stopper.retrier.Run(func() error { return stopper.killAllRemaining(syscall.SIGKILL, devicesSubsystemPath, exceptions) }) return nil // we killed, so everything must die }
func (db *ETCDDB) UnclaimActualLRP(logger lager.Logger, key *models.ActualLRPKey) (*models.ActualLRPGroup, *models.ActualLRPGroup, error) { actualLRP, modifiedIndex, err := db.rawActualLRPByProcessGuidAndIndex(logger, key.ProcessGuid, key.Index) bbsErr := models.ConvertError(err) if bbsErr != nil { return nil, nil, bbsErr } beforeActualLRP := *actualLRP if actualLRP.State == models.ActualLRPStateUnclaimed { logger.Debug("already-unclaimed") return nil, nil, models.ErrActualLRPCannotBeUnclaimed } actualLRP.State = models.ActualLRPStateUnclaimed actualLRP.ActualLRPKey = *key actualLRP.ActualLRPInstanceKey = models.ActualLRPInstanceKey{} actualLRP.ActualLRPNetInfo = models.EmptyActualLRPNetInfo() actualLRP.Since = db.clock.Now().UnixNano() actualLRP.ModificationTag.Increment() data, err := db.serializeModel(logger, actualLRP) if err != nil { return nil, nil, err } _, err = db.client.CompareAndSwap(ActualLRPSchemaPath(key.ProcessGuid, key.Index), data, 0, modifiedIndex) if err != nil { logger.Error("failed-compare-and-swap", err) return nil, nil, ErrorFromEtcdError(logger, err) } return &models.ActualLRPGroup{Instance: &beforeActualLRP}, &models.ActualLRPGroup{Instance: actualLRP}, nil }
func (db *ETCDDB) ConvergeLRPs(logger lager.Logger, cellSet models.CellSet) ([]*auctioneer.LRPStartRequest, []*models.ActualLRPKeyWithSchedulingInfo, []*models.ActualLRPKey) { convergeStart := db.clock.Now() convergeLRPRunsCounter.Increment() logger = logger.Session("etcd") logger.Info("starting-convergence") defer logger.Info("finished-convergence") defer func() { err := convergeLRPDuration.Send(time.Since(convergeStart)) if err != nil { logger.Error("failed-sending-converge-lrp-duration-metric", err) } }() logger.Debug("gathering-convergence-input") input, err := db.GatherAndPruneLRPs(logger, cellSet) if err != nil { logger.Error("failed-gathering-convergence-input", err) return nil, nil, nil } logger.Debug("succeeded-gathering-convergence-input") changes := CalculateConvergence(logger, db.clock, models.NewDefaultRestartCalculator(), input) return db.ResolveConvergence(logger, input.DesiredLRPs, changes) }
func (client *localClient) Unmount(logger lager.Logger, driverId string, volumeName string) error { logger = logger.Session("unmount") logger.Info("start") defer logger.Info("end") logger.Debug("unmounting-volume", lager.Data{"volumeName": volumeName}) unmountStart := client.clock.Now() defer func() { sendUnmountDurationMetrics(logger, time.Since(unmountStart), driverId) }() driver, found := client.driverRegistry.Driver(driverId) if !found { err := errors.New("Driver '" + driverId + "' not found in list of known drivers") logger.Error("mount-driver-lookup-error", err) volmanUnmountErrorsCounter.Increment() return err } env := driverhttp.NewHttpDriverEnv(logger, context.TODO()) if response := driver.Unmount(env, voldriver.UnmountRequest{Name: volumeName}); response.Err != "" { err := errors.New(response.Err) logger.Error("unmount-failed", err) volmanUnmountErrorsCounter.Increment() return err } return nil }
func (db *ETCDDB) DesireTask(logger lager.Logger, taskDef *models.TaskDefinition, taskGuid, domain string) error { logger = logger.WithData(lager.Data{"task_guid": taskGuid}) logger.Info("starting") defer logger.Info("finished") now := db.clock.Now().UnixNano() task := &models.Task{ TaskDefinition: taskDef, TaskGuid: taskGuid, Domain: domain, State: models.Task_Pending, CreatedAt: now, UpdatedAt: now, } value, err := db.serializeModel(logger, task) if err != nil { return err } logger.Debug("persisting-task") _, err = db.client.Create(TaskSchemaPathByGuid(task.TaskGuid), value, NO_TTL) if err != nil { return ErrorFromEtcdError(logger, err) } logger.Debug("succeeded-persisting-task") return nil }
func (db *ETCDDB) resolveRestartableCrashedActualLRPS(logger lager.Logger, actualLRP *models.ActualLRP, starts *startRequests) func() { return func() { actualKey := actualLRP.ActualLRPKey logger = logger.Session("restart-crash", lager.Data{ "process_guid": actualKey.ProcessGuid, "index": actualKey.Index, }) if actualLRP.State != models.ActualLRPStateCrashed { logger.Error("failed-actual-lrp-state-is-not-crashed", nil) return } logger.Debug("unclaiming-actual-lrp", lager.Data{"process_guid": actualLRP.ActualLRPKey.ProcessGuid, "index": actualLRP.ActualLRPKey.Index}) _, err := db.unclaimActualLRP(logger, &actualLRP.ActualLRPKey, &actualLRP.ActualLRPInstanceKey) if err != nil { logger.Error("failed-unclaiming-crash", err) return } logger.Debug("succeeded-unclaiming-actual-lrp") starts.Add(logger, &actualKey) } }
func (p *ExternalImageManager) Create(log lager.Logger, handle string, spec rootfs_provider.Spec) (string, []string, error) { log = log.Session("image-plugin-create") log.Debug("start") defer log.Debug("end") args := []string{"create"} if spec.QuotaSize != 0 { if spec.QuotaScope == garden.DiskLimitScopeExclusive { args = append(args, "--exclude-image-from-quota") } args = append(args, "--disk-limit-size-bytes", strconv.FormatInt(spec.QuotaSize, 10)) } if spec.Namespaced { for _, mapping := range p.mappings { args = append(args, "--uid-mapping", stringifyMapping(mapping)) args = append(args, "--gid-mapping", stringifyMapping(mapping)) } } if spec.RootFS == nil || spec.RootFS.String() == "" { args = append(args, p.defaultBaseImage.String()) } else { args = append(args, strings.Replace(spec.RootFS.String(), "#", ":", 1)) } args = append(args, handle) cmd := exec.Command(p.binPath, args...) cmd.Stderr = lagregator.NewRelogger(log) outBuffer := bytes.NewBuffer([]byte{}) cmd.Stdout = outBuffer if spec.Namespaced { cmd.SysProcAttr = &syscall.SysProcAttr{ Credential: &syscall.Credential{ Uid: p.mappings[0].HostID, Gid: p.mappings[0].HostID, }, } } if err := p.commandRunner.Run(cmd); err != nil { logData := lager.Data{"action": "create", "stdout": outBuffer.String()} log.Error("external-image-manager-result", err, logData) return "", nil, fmt.Errorf("external image manager create failed: %s (%s)", outBuffer.String(), err) } imagePath := strings.TrimSpace(outBuffer.String()) envVars, err := p.readEnvVars(imagePath) if err != nil { return "", nil, err } rootFSPath := filepath.Join(imagePath, "rootfs") return rootFSPath, envVars, nil }
func (db *ETCDDB) fetchRaw(logger lager.Logger, key string) (*etcd.Node, error) { logger.Debug("fetching-from-etcd") response, err := db.client.Get(key, false, false) if err != nil { return nil, ErrorFromEtcdError(logger, err) } logger.Debug("succeeded-fetching-from-etcd") return response.Node, nil }
func SetVcapRequestIdHeader(request *http.Request, logger lager.Logger) { guid, err := uuid.GenerateUUID() if err == nil { request.Header.Set(VcapRequestIdHeader, guid) if logger != nil { logger.Debug("vcap-request-id-header-set", lager.Data{VcapRequestIdHeader: guid}) } } }
func (s *GardenServer) streamProcess(logger lager.Logger, conn net.Conn, process garden.Process, stdinPipe *io.PipeWriter, connCloseCh chan struct{}) { statusCh := make(chan int, 1) errCh := make(chan error, 1) go func() { status, err := process.Wait() if err != nil { logger.Error("wait-failed", err, lager.Data{ "id": process.ID(), }) errCh <- err } else { logger.Info("exited", lager.Data{ "status": status, "id": process.ID(), }) statusCh <- status } }() for { select { case status := <-statusCh: transport.WriteMessage(conn, &transport.ProcessPayload{ ProcessID: process.ID(), ExitStatus: &status, }) stdinPipe.Close() return case err := <-errCh: e := err.Error() transport.WriteMessage(conn, &transport.ProcessPayload{ ProcessID: process.ID(), Error: &e, }) stdinPipe.Close() return case <-s.stopping: logger.Debug("detaching", lager.Data{ "id": process.ID(), }) return case <-connCloseCh: return } } }
func (db *ETCDDB) fetchRecursiveRaw(logger lager.Logger, key string) (*etcd.Node, error) { logger.Debug("fetching-recursive-from-etcd") response, err := db.client.Get(key, false, true) if err != nil { return nil, ErrorFromEtcdError(logger, err) } logger.Debug("succeeded-fetching-recursive-from-etcd", lager.Data{"num_nodes": response.Node.Nodes.Len()}) return response.Node, nil }
func (r *driverSyncer) getMatchingDriverSpecs(logger lager.Logger, path string, pattern string) ([]string, error) { logger.Debug("binaries", lager.Data{"path": path, "pattern": pattern}) matchingDriverSpecs, err := filepath.Glob(path + "/*." + pattern) if err != nil { // untestable on linux, does glob work differently on windows??? return nil, fmt.Errorf("Volman configured with an invalid driver path '%s', error occured list files (%s)", path, err.Error()) } return matchingDriverSpecs, nil }
func forwardRuncLogsToLager(log lager.Logger, buff []byte) { parsedLogLine := struct{ Msg string }{} for _, logLine := range strings.Split(string(buff), "\n") { if err := logfmt.Unmarshal([]byte(logLine), &parsedLogLine); err == nil { log.Debug("runc", lager.Data{ "message": parsedLogLine.Msg, }) } } }
func (db *SQLDB) UnclaimActualLRP(logger lager.Logger, key *models.ActualLRPKey) (*models.ActualLRPGroup, *models.ActualLRPGroup, error) { logger = logger.WithData(lager.Data{"key": key}) var beforeActualLRP models.ActualLRP var actualLRP *models.ActualLRP processGuid := key.ProcessGuid index := key.Index err := db.transact(logger, func(logger lager.Logger, tx *sql.Tx) error { var err error actualLRP, err = db.fetchActualLRPForUpdate(logger, processGuid, index, false, tx) if err != nil { logger.Error("failed-fetching-actual-lrp-for-share", err) return err } beforeActualLRP = *actualLRP if actualLRP.State == models.ActualLRPStateUnclaimed { logger.Debug("already-unclaimed") return models.ErrActualLRPCannotBeUnclaimed } logger.Info("starting") defer logger.Info("complete") now := db.clock.Now().UnixNano() actualLRP.ModificationTag.Increment() actualLRP.State = models.ActualLRPStateUnclaimed actualLRP.ActualLRPInstanceKey.CellId = "" actualLRP.ActualLRPInstanceKey.InstanceGuid = "" actualLRP.Since = now actualLRP.ActualLRPNetInfo = models.ActualLRPNetInfo{} _, err = db.update(logger, tx, actualLRPsTable, SQLAttributes{ "state": actualLRP.State, "cell_id": actualLRP.CellId, "instance_guid": actualLRP.InstanceGuid, "modification_tag_index": actualLRP.ModificationTag.Index, "since": actualLRP.Since, "net_info": []byte{}, }, "process_guid = ? AND instance_index = ? AND evacuating = ?", processGuid, index, false, ) if err != nil { logger.Error("failed-to-unclaim-actual-lrp", err) return db.convertSQLError(err) } return nil }) return &models.ActualLRPGroup{Instance: &beforeActualLRP}, &models.ActualLRPGroup{Instance: actualLRP}, err }
func (db *ETCDDB) CrashActualLRP(logger lager.Logger, key *models.ActualLRPKey, instanceKey *models.ActualLRPInstanceKey, errorMessage string) (*models.ActualLRPGroup, *models.ActualLRPGroup, bool, error) { logger = logger.WithData(lager.Data{"actual_lrp_key": key, "actual_lrp_instance_key": instanceKey}) logger.Info("starting") lrp, prevIndex, err := db.rawActualLRPByProcessGuidAndIndex(logger, key.ProcessGuid, key.Index) if err != nil { logger.Error("failed-to-get-actual-lrp", err) return nil, nil, false, err } beforeActualLRP := *lrp latestChangeTime := time.Duration(db.clock.Now().UnixNano() - lrp.Since) var newCrashCount int32 if latestChangeTime > models.CrashResetTimeout && lrp.State == models.ActualLRPStateRunning { newCrashCount = 1 } else { newCrashCount = lrp.CrashCount + 1 } logger.Debug("retrieved-lrp") if !lrp.AllowsTransitionTo(key, instanceKey, models.ActualLRPStateCrashed) { logger.Error("failed-to-transition-to-crashed", nil, lager.Data{"from_state": lrp.State, "same_instance_key": lrp.ActualLRPInstanceKey.Equal(instanceKey)}) return nil, nil, false, models.ErrActualLRPCannotBeCrashed } lrp.State = models.ActualLRPStateCrashed lrp.Since = db.clock.Now().UnixNano() lrp.CrashCount = newCrashCount lrp.ActualLRPInstanceKey = models.ActualLRPInstanceKey{} lrp.ActualLRPNetInfo = models.EmptyActualLRPNetInfo() lrp.ModificationTag.Increment() lrp.CrashReason = errorMessage var immediateRestart bool if lrp.ShouldRestartImmediately(models.NewDefaultRestartCalculator()) { lrp.State = models.ActualLRPStateUnclaimed immediateRestart = true } lrpData, serializeErr := db.serializeModel(logger, lrp) if serializeErr != nil { return nil, nil, false, serializeErr } _, err = db.client.CompareAndSwap(ActualLRPSchemaPath(key.ProcessGuid, key.Index), lrpData, 0, prevIndex) if err != nil { logger.Error("failed", err) return nil, nil, false, models.ErrActualLRPCannotBeCrashed } logger.Info("succeeded") return &models.ActualLRPGroup{Instance: &beforeActualLRP}, &models.ActualLRPGroup{Instance: lrp}, immediateRestart, nil }
func (c *Host) configureVethPair(log lager.Logger, hostName, containerName string) (*net.Interface, *net.Interface, error) { log = log.Session("veth") log.Debug("create") if host, container, err := c.Veth.Create(hostName, containerName); err != nil { log.Error("create", err) return nil, nil, &VethPairCreationError{err, hostName, containerName} } else { return host, container, err } }
func (h *EvacuationHandler) EvacuateStoppedActualLRP(logger lager.Logger, w http.ResponseWriter, req *http.Request) { logger = logger.Session("evacuate-stopped-actual-lrp") request := &models.EvacuateStoppedActualLRPRequest{} response := &models.EvacuationResponse{} var bbsErr *models.Error defer func() { exitIfUnrecoverable(logger, h.exitChan, bbsErr) }() defer writeResponse(w, response) err := parseRequest(logger, req, request) if err != nil { logger.Error("failed-to-parse-request", err) bbsErr = models.ConvertError(err) response.Error = bbsErr return } guid := request.ActualLrpKey.ProcessGuid index := request.ActualLrpKey.Index group, err := h.actualLRPDB.ActualLRPGroupByProcessGuidAndIndex(logger, guid, index) if err != nil { logger.Error("failed-fetching-actual-lrp-group", err) bbsErr = models.ConvertError(err) response.Error = bbsErr return } err = h.db.RemoveEvacuatingActualLRP(logger, request.ActualLrpKey, request.ActualLrpInstanceKey) if err != nil { logger.Error("failed-removing-evacuating-actual-lrp", err) bbsErr = models.ConvertError(err) } else if group.Evacuating != nil { go h.actualHub.Emit(models.NewActualLRPRemovedEvent(&models.ActualLRPGroup{Evacuating: group.Evacuating})) } if group.Instance == nil || !group.Instance.ActualLRPInstanceKey.Equal(request.ActualLrpInstanceKey) { logger.Debug("cannot-remove-actual-lrp") response.Error = models.ErrActualLRPCannotBeRemoved return } err = h.actualLRPDB.RemoveActualLRP(logger, guid, index, request.ActualLrpInstanceKey) if err != nil { logger.Error("failed-to-remove-actual-lrp", err) bbsErr = models.ConvertError(err) response.Error = bbsErr return } else { go h.actualHub.Emit(models.NewActualLRPRemovedEvent(&models.ActualLRPGroup{Instance: group.Instance})) } }
func (db *SQLDB) DesiredLRPByProcessGuid(logger lager.Logger, processGuid string) (*models.DesiredLRP, error) { logger = logger.WithData(lager.Data{"process_guid": processGuid}) logger.Debug("starting") defer logger.Debug("complete") row := db.one(logger, db.db, desiredLRPsTable, desiredLRPColumns, NoLockRow, "process_guid = ?", processGuid, ) return db.fetchDesiredLRP(logger, row) }
func (d *DirectoryDepot) Lookup(log lager.Logger, handle string) (string, error) { log = log.Session("lookup", lager.Data{"handle": handle}) log.Debug("started") defer log.Debug("finished") if _, err := os.Stat(d.toDir(handle)); err != nil { return "", ErrDoesNotExist } return d.toDir(handle), nil }
func (db *ETCDDB) SetVersion(logger lager.Logger, version *models.Version) error { logger.Debug("set-version", lager.Data{"version": version}) defer logger.Debug("set-version-finished") value, err := json.Marshal(version) if err != nil { return err } _, err = db.client.Set(VersionKey, value, NO_TTL) return err }
func (db *ETCDDB) ActualLRPGroups(logger lager.Logger, filter models.ActualLRPFilter) ([]*models.ActualLRPGroup, error) { node, err := db.fetchRecursiveRaw(logger, ActualLRPSchemaRoot) bbsErr := models.ConvertError(err) if bbsErr != nil { if bbsErr.Type == models.Error_ResourceNotFound { return []*models.ActualLRPGroup{}, nil } return nil, err } if len(node.Nodes) == 0 { return []*models.ActualLRPGroup{}, nil } groups := []*models.ActualLRPGroup{} var workErr atomic.Value groupChan := make(chan []*models.ActualLRPGroup, len(node.Nodes)) wg := sync.WaitGroup{} logger.Debug("performing-deserialization-work") for _, node := range node.Nodes { node := node wg.Add(1) go func() { defer wg.Done() g, err := db.parseActualLRPGroups(logger, node, filter) if err != nil { workErr.Store(err) return } groupChan <- g }() } go func() { wg.Wait() close(groupChan) }() for g := range groupChan { groups = append(groups, g...) } if err, ok := workErr.Load().(error); ok { logger.Error("failed-performing-deserialization-work", err) return []*models.ActualLRPGroup{}, models.ErrUnknownError } logger.Debug("succeeded-performing-deserialization-work", lager.Data{"num_actual_lrp_groups": len(groups)}) return groups, nil }
func (db *ETCDDB) unclaimActualLRPWithIndex( logger lager.Logger, lrp *models.ActualLRP, storeIndex uint64, actualLRPKey *models.ActualLRPKey, actualLRPInstanceKey *models.ActualLRPInstanceKey, ) (change stateChange, err error) { logger = logger.Session("unclaim-actual-lrp-with-index") defer logger.Debug("complete", lager.Data{"state_change": change, "error": err}) if !lrp.ActualLRPKey.Equal(actualLRPKey) { logger.Error("failed-actual-lrp-key-differs", models.ErrActualLRPCannotBeUnclaimed) return stateDidNotChange, models.ErrActualLRPCannotBeUnclaimed } if lrp.State == models.ActualLRPStateUnclaimed { logger.Info("already-unclaimed") return stateDidNotChange, nil } if !lrp.ActualLRPInstanceKey.Equal(actualLRPInstanceKey) { logger.Error("failed-actual-lrp-instance-key-differs", models.ErrActualLRPCannotBeUnclaimed) return stateDidNotChange, models.ErrActualLRPCannotBeUnclaimed } lrp.Since = db.clock.Now().UnixNano() lrp.State = models.ActualLRPStateUnclaimed lrp.ActualLRPInstanceKey = models.ActualLRPInstanceKey{} lrp.ActualLRPNetInfo = models.EmptyActualLRPNetInfo() lrp.ModificationTag.Increment() err = lrp.Validate() if err != nil { logger.Error("failed-to-validate-unclaimed-lrp", err) return stateDidNotChange, models.NewError(models.Error_InvalidRecord, err.Error()) } lrpData, serialErr := db.serializeModel(logger, lrp) if serialErr != nil { logger.Error("failed-to-marshal-unclaimed-lrp", serialErr) return stateDidNotChange, serialErr } _, err = db.client.CompareAndSwap(ActualLRPSchemaPath(actualLRPKey.ProcessGuid, actualLRPKey.Index), lrpData, 0, storeIndex) if err != nil { logger.Error("failed-to-compare-and-swap", err) return stateDidNotChange, models.ErrActualLRPCannotBeUnclaimed } logger.Debug("changed-to-unclaimed") return stateDidChange, nil }
func (db *SQLDB) SetVersion(logger lager.Logger, version *models.Version) error { logger = logger.Session("set-version", lager.Data{"version": version}) logger.Debug("starting") defer logger.Debug("complete") versionJSON, err := json.Marshal(version) if err != nil { logger.Error("failed-marshalling-version", err) return err } return db.setConfigurationValue(logger, VersionID, string(versionJSON)) }
func (db *SQLDB) Tasks(logger lager.Logger, filter models.TaskFilter) ([]*models.Task, error) { logger = logger.Session("tasks", lager.Data{"filter": filter}) logger.Debug("starting") defer logger.Debug("complete") wheres := []string{} values := []interface{}{} if filter.Domain != "" { wheres = append(wheres, "domain = ?") values = append(values, filter.Domain) } if filter.CellID != "" { wheres = append(wheres, "cell_id = ?") values = append(values, filter.CellID) } results := []*models.Task{} err := db.transact(logger, func(logger lager.Logger, tx *sql.Tx) error { rows, err := db.all(logger, tx, tasksTable, taskColumns, NoLockRow, strings.Join(wheres, " AND "), values..., ) if err != nil { logger.Error("failed-query", err) return db.convertSQLError(err) } defer rows.Close() for rows.Next() { task, err := db.fetchTask(logger, rows, db.db) if err != nil { logger.Error("failed-fetch", err) return err } results = append(results, task) } if rows.Err() != nil { logger.Error("failed-getting-next-row", rows.Err()) return db.convertSQLError(rows.Err()) } return nil }) return results, err }
func (db *SQLDB) domainSet(logger lager.Logger) (map[string]struct{}, error) { logger.Debug("listing-domains") domains, err := db.Domains(logger) if err != nil { logger.Error("failed-listing-domains", err) return nil, err } logger.Debug("succeeded-listing-domains") m := make(map[string]struct{}, len(domains)) for _, domain := range domains { m[domain] = struct{}{} } return m, nil }
func (e *ETCDToSQL) migrateActualLRPs(logger lager.Logger) error { logger = logger.Session("migrating-actual-lrps") logger.Debug("starting") defer logger.Debug("finished") response, err := e.storeClient.Get(etcd.ActualLRPSchemaRoot, false, true) if err != nil { logger.Error("failed-fetching-actual-lrps", err) } if response != nil { for _, parent := range response.Node.Nodes { for _, indices := range parent.Nodes { for _, node := range indices.Nodes { // we're going to explicitly ignore evacuating lrps for simplicity's sake if path.Base(node.Key) == "instance" { actualLRP := new(models.ActualLRP) err := e.serializer.Unmarshal(logger, []byte(node.Value), actualLRP) if err != nil { logger.Error("failed-to-deserialize-actual-lrp", err) continue } netInfoData, err := e.serializer.Marshal(logger, format.ENCRYPTED_PROTO, &actualLRP.ActualLRPNetInfo) if err != nil { logger.Error("failed-to-marshal-net-info", err) } _, err = e.rawSQLDB.Exec(sqldb.RebindForFlavor(` INSERT INTO actual_lrps (process_guid, instance_index, domain, instance_guid, cell_id, net_info, crash_count, crash_reason, state, placement_error, since, modification_tag_epoch, modification_tag_index) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) `, e.dbFlavor), actualLRP.ProcessGuid, actualLRP.Index, actualLRP.Domain, actualLRP.InstanceGuid, actualLRP.CellId, netInfoData, actualLRP.CrashCount, actualLRP.CrashReason, actualLRP.State, actualLRP.PlacementError, actualLRP.Since, actualLRP.ModificationTag.Epoch, actualLRP.ModificationTag.Index) if err != nil { logger.Error("failed-inserting-actual-lrp", err) continue } } } } } } return nil }