func (db *ETCDDB) rawDesiredLRPByProcessGuid(logger lager.Logger, processGuid string) (*models.DesiredLRP, uint64, error) { var wg sync.WaitGroup var schedulingInfo *models.DesiredLRPSchedulingInfo var runInfo *models.DesiredLRPRunInfo var schedulingErr, runErr error var index uint64 wg.Add(1) go func() { defer wg.Done() schedulingInfo, index, schedulingErr = db.rawDesiredLRPSchedulingInfo(logger, processGuid) }() wg.Add(1) go func() { defer wg.Done() runInfo, runErr = db.rawDesiredLRPRunInfo(logger, processGuid) }() wg.Wait() if schedulingErr != nil { return nil, 0, schedulingErr } if runErr != nil { return nil, 0, runErr } desiredLRP := models.NewDesiredLRP(*schedulingInfo, *runInfo) return &desiredLRP, index, nil }
func (db *ETCDDB) desiredLRPs(logger lager.Logger, filter models.DesiredLRPFilter) ([]*models.DesiredLRP, guidSet, error) { root, err := db.fetchRecursiveRaw(logger, DesiredLRPComponentsSchemaRoot) bbsErr := models.ConvertError(err) if bbsErr != nil { if bbsErr.Type == models.Error_ResourceNotFound { return []*models.DesiredLRP{}, newGuidSet(), nil } return nil, newGuidSet(), err } if root.Nodes.Len() == 0 { return []*models.DesiredLRP{}, newGuidSet(), nil } var schedules map[string]*models.DesiredLRPSchedulingInfo var runs map[string]*models.DesiredLRPRunInfo var malformedInfos guidSet var malformedRunInfos guidSet var wg sync.WaitGroup for i := range root.Nodes { node := root.Nodes[i] switch node.Key { case DesiredLRPSchedulingInfoSchemaRoot: wg.Add(1) go func() { defer wg.Done() schedules, malformedInfos = db.deserializeScheduleInfos(logger, node.Nodes, filter) }() case DesiredLRPRunInfoSchemaRoot: wg.Add(1) go func() { defer wg.Done() runs, malformedRunInfos = db.deserializeRunInfos(logger, node.Nodes, filter) }() default: logger.Error("unexpected-etcd-key", nil, lager.Data{"key": node.Key}) } } wg.Wait() desiredLRPs := []*models.DesiredLRP{} for processGuid, schedule := range schedules { desired := models.NewDesiredLRP(*schedule, *runs[processGuid]) desiredLRPs = append(desiredLRPs, &desired) } malformedInfos.Merge(malformedRunInfos) return desiredLRPs, malformedInfos, nil }
func (db *SQLDB) fetchDesiredLRP(logger lager.Logger, scanner RowScanner) (*models.DesiredLRP, error) { var runInfoData []byte schedulingInfo, err := db.fetchDesiredLRPSchedulingInfoAndMore(logger, scanner, &runInfoData) if err != nil { logger.Error("failed-fetching-run-info", err) return nil, err } var runInfo models.DesiredLRPRunInfo err = db.deserializeModel(logger, runInfoData, &runInfo) if err != nil { _, err := db.delete(logger, db.db, desiredLRPsTable, "process_guid = ?", schedulingInfo.ProcessGuid) if err != nil { logger.Error("failed-deleting-invalid-row", err) } return nil, models.ErrDeserialize } desiredLRP := models.NewDesiredLRP(*schedulingInfo, runInfo) return &desiredLRP, nil }
func (db *ETCDDB) GatherAndPruneDesiredLRPs(logger lager.Logger, guids map[string]struct{}, lmc *LRPMetricCounter) (map[string]*models.DesiredLRP, error) { desiredLRPsRoot, modelErr := db.fetchRecursiveRaw(logger, DesiredLRPComponentsSchemaRoot) if modelErr == models.ErrResourceNotFound { logger.Info("actual-lrp-schema-root-not-found") return map[string]*models.DesiredLRP{}, nil } if modelErr != nil { return nil, modelErr } schedulingInfos := map[string]*models.DesiredLRPSchedulingInfo{} runInfos := map[string]*models.DesiredLRPRunInfo{} var malformedSchedulingInfos, malformedRunInfos []string var guidsLock, schedulingInfosLock, runInfosLock sync.Mutex works := []func(){} logger.Debug("walking-desired-lrp-components-tree") for _, componentRoot := range desiredLRPsRoot.Nodes { switch componentRoot.Key { case DesiredLRPSchedulingInfoSchemaRoot: for _, node := range componentRoot.Nodes { node := node works = append(works, func() { var schedulingInfo models.DesiredLRPSchedulingInfo err := db.deserializeModel(logger, node, &schedulingInfo) if err != nil || schedulingInfo.Validate() != nil { logger.Error("failed-to-deserialize-scheduling-info", err) schedulingInfosLock.Lock() malformedSchedulingInfos = append(malformedSchedulingInfos, node.Key) schedulingInfosLock.Unlock() } else { schedulingInfosLock.Lock() schedulingInfos[schedulingInfo.ProcessGuid] = &schedulingInfo schedulingInfosLock.Unlock() atomic.AddInt32(&lmc.desiredLRPs, schedulingInfo.Instances) guidsLock.Lock() guids[schedulingInfo.ProcessGuid] = struct{}{} guidsLock.Unlock() } }) } case DesiredLRPRunInfoSchemaRoot: for _, node := range componentRoot.Nodes { node := node works = append(works, func() { var runInfo models.DesiredLRPRunInfo err := db.deserializeModel(logger, node, &runInfo) if err != nil || runInfo.Validate() != nil { runInfosLock.Lock() malformedRunInfos = append(malformedRunInfos, node.Key) runInfosLock.Unlock() } else { runInfosLock.Lock() runInfos[runInfo.ProcessGuid] = &runInfo runInfosLock.Unlock() } }) } default: err := fmt.Errorf("unrecognized node under desired LRPs root node: %s", componentRoot.Key) logger.Error("unrecognized-node", err) return nil, err } } throttler, err := workpool.NewThrottler(db.convergenceWorkersSize, works) if err != nil { logger.Error("failed-to-create-throttler", err) } throttler.Work() db.batchDeleteNodes(malformedSchedulingInfos, logger) db.batchDeleteNodes(malformedRunInfos, logger) malformedSchedulingInfosMetric.Add(uint64(len(malformedSchedulingInfos))) malformedRunInfosMetric.Add(uint64(len(malformedRunInfos))) logger.Debug("done-walking-desired-lrp-tree") desireds := make(map[string]*models.DesiredLRP) var schedInfosToDelete []string for guid, schedulingInfo := range schedulingInfos { runInfo, ok := runInfos[guid] if !ok { err := fmt.Errorf("Missing runInfo for GUID %s", guid) logger.Error("runInfo-not-found-error", err) schedInfosToDelete = append(schedInfosToDelete, DesiredLRPSchedulingInfoSchemaPath(guid)) } else { desiredLRP := models.NewDesiredLRP(*schedulingInfo, *runInfo) desireds[guid] = &desiredLRP } } db.batchDeleteNodes(schedInfosToDelete, logger) // Check to see if we have orphaned RunInfos if len(runInfos) != len(schedulingInfos) { var runInfosToDelete []string for guid, runInfo := range runInfos { // If there is no corresponding SchedulingInfo and the RunInfo has // existed for longer than desiredLRPCreationTimeout, consider it orphaned // and delete it. _, ok := schedulingInfos[guid] if !ok && db.clock.Since(time.Unix(0, runInfo.CreatedAt)) > db.desiredLRPCreationTimeout { orphanedRunInfosMetric.Add(1) runInfosToDelete = append(runInfosToDelete, DesiredLRPRunInfoSchemaPath(guid)) } } db.batchDeleteNodes(runInfosToDelete, logger) } return desireds, nil }
"key": "value", "another_key": "another_value" } } }` BeforeEach(func() { desiredLRP = models.DesiredLRP{} err := json.Unmarshal([]byte(jsonDesiredLRP), &desiredLRP) Expect(err).NotTo(HaveOccurred()) }) Describe("CreateComponents", func() { It("decomposes the desired lrp into it's component parts", func() { schedInfo, runInfo := desiredLRP.CreateComponents(time.Unix(123, 456)) newDesired := models.NewDesiredLRP(schedInfo, runInfo) Expect(newDesired).To(BeEquivalentTo(desiredLRP)) }) It("saves the created at time on the run info", func() { _, runInfo := desiredLRP.CreateComponents(time.Unix(123, 456)) Expect(runInfo.CreatedAt).To(BeEquivalentTo((time.Unix(123, 456).UnixNano()))) }) }) Describe("serialization", func() { It("successfully round trips through json and protobuf", func() { jsonSerialization, err := json.Marshal(desiredLRP) Expect(err).NotTo(HaveOccurred()) Expect(jsonSerialization).To(MatchJSON(jsonDesiredLRP))