func (c *client) DesiredLRPs(filter models.DesiredLRPFilter) ([]*models.DesiredLRP, error) { var desiredLRPs models.DesiredLRPs query := url.Values{} if filter.Domain != "" { query.Set("domain", filter.Domain) } err := c.doRequest(DesiredLRPsRoute, nil, query, nil, &desiredLRPs) return desiredLRPs.GetDesiredLrps(), err }
func (db *ETCDDB) DesiredLRPs(logger lager.Logger, filter models.DesiredLRPFilter) (*models.DesiredLRPs, *models.Error) { root, bbsErr := db.fetchRecursiveRaw(logger, DesiredLRPSchemaRoot) if bbsErr.Equal(models.ErrResourceNotFound) { return &models.DesiredLRPs{}, nil } if bbsErr != nil { return nil, bbsErr } if root.Nodes.Len() == 0 { return &models.DesiredLRPs{}, nil } desiredLRPs := models.DesiredLRPs{} lrpsLock := sync.Mutex{} var workErr atomic.Value works := []func(){} for _, node := range root.Nodes { node := node works = append(works, func() { var lrp models.DesiredLRP deserializeErr := models.FromJSON([]byte(node.Value), &lrp) if deserializeErr != nil { logger.Error("failed-parsing-desired-lrp", deserializeErr) workErr.Store(fmt.Errorf("cannot parse lrp JSON for key %s: %s", node.Key, deserializeErr.Error())) return } if filter.Domain == "" || lrp.GetDomain() == filter.Domain { lrpsLock.Lock() desiredLRPs.DesiredLrps = append(desiredLRPs.DesiredLrps, &lrp) lrpsLock.Unlock() } }) } throttler, err := workpool.NewThrottler(maxDesiredLRPGetterWorkPoolSize, works) if err != nil { logger.Error("failed-constructing-throttler", err, lager.Data{"max-workers": maxDesiredLRPGetterWorkPoolSize, "num-works": len(works)}) return &models.DesiredLRPs{}, models.ErrUnknownError } logger.Debug("performing-deserialization-work") throttler.Work() if err, ok := workErr.Load().(error); ok { logger.Error("failed-performing-deserialization-work", err) return &models.DesiredLRPs{}, models.ErrUnknownError } logger.Debug("succeeded-performing-deserialization-work", lager.Data{"num-desired-lrps": len(desiredLRPs.GetDesiredLrps())}) return &desiredLRPs, nil }