// MinionClassifierKeys returns all classifier keys that a minion has func (c *etcdMinionClient) MinionClassifierKeys(m uuid.UUID) ([]string, error) { // Classifier directory in etcd classifierDir := filepath.Join(minion.EtcdMinionSpace, m.String(), "classifier") opts := &etcdclient.GetOptions{ Recursive: true, } resp, err := c.kapi.Get(context.Background(), classifierDir, opts) if err != nil { return nil, err } var classifierKeys []string for _, node := range resp.Node.Nodes { klassifier := new(classifier.Classifier) err := json.Unmarshal([]byte(node.Value), &klassifier) if err != nil { return nil, err } classifierKeys = append(classifierKeys, klassifier.Key) } return classifierKeys, nil }
func (db *DB) StartTask(id uuid.UUID, effective time.Time) error { validtime := ValidateEffectiveUnix(effective) return db.Exec( `UPDATE tasks SET status = ?, started_at = ? WHERE uuid = ?`, RunningStatus, validtime, id.String(), ) }
func (db *DB) GetTarget(id uuid.UUID) (*Target, error) { r, err := db.Query(` SELECT t.uuid, t.name, t.summary, t.plugin, t.endpoint, t.agent FROM targets t LEFT JOIN jobs j ON j.target_uuid = t.uuid WHERE t.uuid = ?`, id.String()) if err != nil { return nil, err } defer r.Close() if !r.Next() { return nil, nil } ann := &Target{} var this NullUUID if err = r.Scan(&this, &ann.Name, &ann.Summary, &ann.Plugin, &ann.Endpoint, &ann.Agent); err != nil { return nil, err } ann.UUID = this.UUID return ann, nil }
func (db *DB) GetStore(id uuid.UUID) (*Store, error) { r, err := db.Query(` SELECT s.uuid, s.name, s.summary, s.plugin, s.endpoint FROM stores s LEFT JOIN jobs j ON j.store_uuid = s.uuid WHERE s.uuid = ?`, id.String()) if err != nil { return nil, err } defer r.Close() if !r.Next() { return nil, nil } ann := &Store{} var this NullUUID if err = r.Scan(&this, &ann.Name, &ann.Summary, &ann.Plugin, &ann.Endpoint); err != nil { return nil, err } ann.UUID = this.UUID return ann, nil }
func (db *InMemDatabase) GetByUuid(id uuid.UUID) (contrail.IObject, error) { uid := makeUID(id) obj, ok := db.objByIdMap[uid] if !ok { return nil, fmt.Errorf("%s: Not found", id.String()) } return obj, nil }
func newPod(h *Host, id uuid.UUID) *Pod { if id == nil { id = uuid.NewRandom() } return &Pod{ Host: h, UUID: id, ui: ui.NewUI("yellow", "pod", id.String()), } }
// MinionName returns the name of a minion identified by its uuid func (c *etcdMinionClient) MinionName(m uuid.UUID) (string, error) { nameKey := filepath.Join(minion.EtcdMinionSpace, m.String(), "name") resp, err := c.kapi.Get(context.Background(), nameKey, nil) if err != nil { return "", err } return resp.Node.Value, nil }
func (db *DB) UpdateSchedule(id uuid.UUID, ts string) error { _, err := timespec.Parse(ts) if err != nil { return err } return db.Exec( `UPDATE schedules SET timespec = ? WHERE uuid = ?`, ts, id.String(), ) }
func NewImage(h *Host, id uuid.UUID) *Image { if id == nil { id = uuid.NewRandom() } return &Image{ Host: h, UUID: id, Manifest: *schema.BlankImageManifest(), ui: ui.NewUI("blue", "image", id.String()), } }
func (db *DB) GetTask(id uuid.UUID) (*Task, error) { filter := TaskFilter{UUID: id.String()} r, err := db.GetAllTasks(&filter) if err != nil { return nil, err } if len(r) == 0 { return nil, nil } return r[0], nil }
func (db *DB) CreateTaskArchive(id uuid.UUID, key string, effective time.Time) (uuid.UUID, error) { // fail on empty store_key, as '' seems to satisfy the NOT NULL constraint in postgres if key == "" { return nil, fmt.Errorf("cannot create an archive without a store_key") } // determine how long we need to keep this specific archive for r, err := db.Query( `SELECT r.expiry FROM retention r INNER JOIN jobs j ON r.uuid = j.retention_uuid INNER JOIN tasks t ON j.uuid = t.job_uuid WHERE t.uuid = ?`, id.String(), ) if err != nil { return nil, err } defer r.Close() if !r.Next() { return nil, fmt.Errorf("failed to determine expiration for task %s", id) } var expiry int if err := r.Scan(&expiry); err != nil { return nil, err } r.Close() // insert an archive with all proper references, expiration, etc. archive_id := uuid.NewRandom() validtime := ValidateEffectiveUnix(effective) err = db.Exec( `INSERT INTO archives (uuid, target_uuid, store_uuid, store_key, taken_at, expires_at, notes, status, purge_reason) SELECT ?, t.uuid, s.uuid, ?, ?, ?, '', ?, '' FROM tasks INNER JOIN jobs j ON j.uuid = tasks.job_uuid INNER JOIN targets t ON t.uuid = j.target_uuid INNER JOIN stores s ON s.uuid = j.store_uuid WHERE tasks.uuid = ?`, archive_id.String(), key, validtime, effective.Add(time.Duration(expiry)*time.Second).Unix(), "valid", id.String(), ) if err != nil { return nil, err } // and finally, associate task -> archive return archive_id, db.Exec( `UPDATE tasks SET archive_uuid = ? WHERE uuid = ?`, archive_id.String(), id.String(), ) }
// MinionLastSeen retrieves the time a minion was last seen func (c *etcdMinionClient) MinionLastseen(m uuid.UUID) (int64, error) { lastseenKey := filepath.Join(minion.EtcdMinionSpace, m.String(), "lastseen") resp, err := c.kapi.Get(context.Background(), lastseenKey, nil) if err != nil { return 0, err } lastseen, err := strconv.ParseInt(resp.Node.Value, 10, 64) return lastseen, err }
// MinionClassifier retrieves a classifier with the given key func (c *etcdMinionClient) MinionClassifier(m uuid.UUID, key string) (*classifier.Classifier, error) { // Classifier key in etcd classifierKey := filepath.Join(minion.EtcdMinionSpace, m.String(), "classifier", key) resp, err := c.kapi.Get(context.Background(), classifierKey, nil) if err != nil { return nil, err } klassifier := new(classifier.Classifier) err = json.Unmarshal([]byte(resp.Node.Value), &klassifier) return klassifier, err }
// MinionTaskResult retrieves the result of a task for a given minion func (c *etcdMinionClient) MinionTaskResult(m uuid.UUID, t uuid.UUID) (*task.Task, error) { // Task key in etcd taskKey := filepath.Join(minion.EtcdMinionSpace, m.String(), "log", t.String()) // Get the task from etcd resp, err := c.kapi.Get(context.Background(), taskKey, nil) if err != nil { return nil, err } result := new(task.Task) err = json.Unmarshal([]byte(resp.Node.Value), &result) return result, err }
func hasPermission(session *LoginSession, uuidBytes uuid.UUID) bool { var tags []string if session == nil { tags = defaulttags } else { tags = session.tags } uuidString := uuidBytes.String() for _, tag := range tags { if tagHasPermission(tag, uuidBytes, uuidString) { return true } } return false }
func new_peer(uuid uuid.UUID) (peer *peer_t) { peer = &peer_t{ uuid_bytes: []byte(uuid), uuid_string: uuid.String(), } return }
/* Always make sure a new ID is unique... * the probability of a UUID collision is somewhere around 1% in 100 million * UUIDs but we'll be overly cautious and check anyway. */ func getUniqueID() (uuid.UUID, error) { var id uuid.UUID for { id = uuid.NewRandom() exists, err := dbClient.UserExists(id.String()) if err != nil { return nil, errors.Wrap(err, "dbClient.UserExists") } if exists { continue } break } return id, nil }
func (db *DB) GetArchive(id uuid.UUID) (*Archive, error) { r, err := db.Query(` SELECT a.uuid, a.store_key, a.taken_at, a.expires_at, a.notes, t.uuid, t.plugin, t.endpoint, s.uuid, s.plugin, s.endpoint, a.status, a.purge_reason FROM archives a INNER JOIN targets t ON t.uuid = a.target_uuid INNER JOIN stores s ON s.uuid = a.store_uuid WHERE a.uuid = ?`, id.String()) if err != nil { return nil, err } defer r.Close() if !r.Next() { return nil, nil } ann := &Archive{} var takenAt, expiresAt *int64 var this, target, store NullUUID if err = r.Scan( &this, &ann.StoreKey, &takenAt, &expiresAt, &ann.Notes, &target, &ann.TargetPlugin, &ann.TargetEndpoint, &store, &ann.StorePlugin, &ann.StoreEndpoint, &ann.Status, &ann.PurgeReason); err != nil { return nil, err } ann.UUID = this.UUID ann.TargetUUID = target.UUID ann.StoreUUID = store.UUID if takenAt != nil { ann.TakenAt = parseEpochTime(*takenAt) } if expiresAt != nil { ann.ExpiresAt = parseEpochTime(*expiresAt) } return ann, nil }
// MinionSubmitTask submits a new task to a given minion uuid func (c *etcdMinionClient) MinionSubmitTask(m uuid.UUID, t *task.Task) error { rootDir := filepath.Join(minion.EtcdMinionSpace, m.String()) queueDir := filepath.Join(rootDir, "queue") // Check if minion exists first _, err := c.kapi.Get(context.Background(), rootDir, nil) if err != nil { return err } // Serialize task and submit it to the minion data, err := json.Marshal(t) if err != nil { return err } _, err = c.kapi.CreateInOrder(context.Background(), queueDir, string(data), nil) return err }
func (db *DB) GetRetentionPolicy(id uuid.UUID) (*RetentionPolicy, error) { r, err := db.Query(` SELECT uuid, name, summary, expiry FROM retention WHERE uuid = ?`, id.String()) if err != nil { return nil, err } defer r.Close() if !r.Next() { return nil, nil } ann := &RetentionPolicy{} var this NullUUID if err = r.Scan(&this, &ann.Name, &ann.Summary, &ann.Expires); err != nil { return nil, err } ann.UUID = this.UUID return ann, nil }
func (db *DB) DeleteRetentionPolicy(id uuid.UUID) (bool, error) { r, err := db.Query( `SELECT COUNT(uuid) FROM jobs WHERE jobs.retention_uuid = ?`, id.String(), ) if err != nil { return false, err } defer r.Close() // already deleted if !r.Next() { return true, nil } var numJobs int if err = r.Scan(&numJobs); err != nil { return false, err } if numJobs < 0 { return false, fmt.Errorf("Retention policy %s is in used by %d (negative) Jobs", id.String(), numJobs) } if numJobs > 0 { return false, nil } r.Close() return true, db.Exec( `DELETE FROM retention WHERE uuid = ?`, id.String(), ) }
func (db *DB) GetRestoreTaskDetails(archive, target uuid.UUID, storePlugin, storeEndpoint, storeKey, targetPlugin, targetEndpoint, agent *string) error { // retrieve store plugin / endpoint / key r, err := db.Query(` SELECT s.plugin, s.endpoint, a.store_key FROM stores s INNER JOIN archives a ON s.uuid = a.store_uuid WHERE a.uuid = ?`, archive.String()) if err != nil { return err } defer r.Close() if !r.Next() { return fmt.Errorf("failed to determine task details for archive %s -> target %s", archive, target) } if err := r.Scan(storePlugin, storeEndpoint, storeKey); err != nil { return err } r.Close() // retrieve target plugin / endpoint r, err = db.Query(` SELECT t.plugin, t.endpoint, t.agent FROM targets t WHERE t.uuid = ?`, target.String()) if err != nil { return err } defer r.Close() if !r.Next() { return fmt.Errorf("failed to determine task details for archive %s -> target %s", archive, target) } if err := r.Scan(targetPlugin, targetEndpoint, agent); err != nil { return err } r.Close() return nil }
func (db *DB) GetSchedule(id uuid.UUID) (*Schedule, error) { r, err := db.Query(` SELECT uuid, name, summary, timespec FROM schedules WHERE uuid = ?`, id.String()) if err != nil { return nil, err } defer r.Close() if !r.Next() { return nil, nil } ann := &Schedule{} var this NullUUID if err = r.Scan(&this, &ann.Name, &ann.Summary, &ann.When); err != nil { return nil, err } ann.UUID = this.UUID return ann, nil }
// MinionTaskLog returns the uuids of tasks which have already been // processed by a minion func (c *etcdMinionClient) MinionTaskLog(m uuid.UUID) ([]uuid.UUID, error) { logDir := filepath.Join(minion.EtcdMinionSpace, m.String(), "log") opts := &etcdclient.GetOptions{ Recursive: true, } resp, err := c.kapi.Get(context.Background(), logDir, opts) if err != nil { return nil, err } var tasks []uuid.UUID for _, node := range resp.Node.Nodes { t, err := minion.EtcdUnmarshalTask(node) if err != nil { return nil, err } tasks = append(tasks, t.ID) } return tasks, nil }
func (db *DB) PurgeArchive(id uuid.UUID) error { a, err := db.GetArchive(id) if err != nil { return err } if a.Status == "valid" { return fmt.Errorf("Invalid attempt to purge a 'valid' archive detected") } err = db.Exec(`UPDATE archives SET purge_reason = (SELECT status FROM archives WHERE uuid = ?) WHERE uuid = ? `, id.String(), id.String()) if err != nil { return err } return db.Exec(`UPDATE archives SET status = 'purged' WHERE uuid = ?`, id.String()) }
func (db *DB) AnnotateArchive(id uuid.UUID, notes string) error { return db.Exec( `UPDATE archives SET notes = ? WHERE uuid = ?`, notes, id.String(), ) }
func (db *DB) DeleteArchive(id uuid.UUID) (bool, error) { return true, db.Exec( `DELETE FROM archives WHERE uuid = ?`, id.String(), ) }
func (db *DB) ExpireArchive(id uuid.UUID) error { return db.Exec(`UPDATE archives SET status = 'expired' WHERE uuid = ?`, id.String()) }
func tagHasPermission(tag string, uuidBytes uuid.UUID, uuidString string) bool { var hasPerm bool uuidarr := uuidBytes.Array() permcacheLock.Lock() taginfo, ok := permcache[tag] if !ok { /* Cache Miss: never seen this token */ taginfo = &TagInfo{name: tag, permissions: make(map[uuid.Array]bool)} lruListLock.Lock() taginfo.element = lruList.PushFront(taginfo) lruListLock.Unlock() permcache[tag] = taginfo } else { lruListLock.Lock() lruList.MoveToFront(taginfo.element) // most recently used lruListLock.Unlock() hasPerm, ok = taginfo.permissions[uuidBytes.Array()] if ok { /* Cache Hit */ permcacheLock.Unlock() return hasPerm } /* Cache Miss: never seen this UUID */ } permcacheLock.Unlock() /* Ask the metadata server for the metadata of the corresponding stream. */ query := fmt.Sprintf("select * where uuid = \"%s\";", uuidString) mdReq, err := http.NewRequest("POST", fmt.Sprintf("%s?tags=%s", mdServer, tag), strings.NewReader(query)) if err != nil { return false } mdReq.Header.Set("Content-Type", "text") mdReq.Header.Set("Content-Length", fmt.Sprintf("%v", len(query))) resp, err := http.DefaultClient.Do(mdReq) if err != nil { return false } /* If the response is [] we lack permission; if it's longer we have permission. */ buf := make([]byte, 3) n, err := io.ReadFull(resp.Body, buf) resp.Body.Close() if n == 3 && buf[0] == '[' { hasPerm = true } else if n == 2 && err == io.ErrUnexpectedEOF && buf[0] == '[' && buf[1] == ']' { hasPerm = false } else { /* Server error. */ fmt.Printf("Metadata server error: %v %c %c %c\n", n, buf[0], buf[1], buf[2]) return false } /* If we didn't return early due to some kind of error, cache the result and return it. */ permcacheLock.Lock() if taginfo.element != nil { // If this has been evicted from the cache, don't bother _, ok := taginfo.permissions[uuidarr] taginfo.permissions[uuidarr] = hasPerm // still update cached value if !ok { // If a different goroutine added it before we got here, then skip this part totalCached += 1 if totalCached > MAX_CACHED { // Make this access return quickly, so start pruning in a new goroutine fmt.Println("Pruning cache") go pruneCache() } } } permcacheLock.Unlock() return hasPerm }
func (db *DB) InvalidateArchive(id uuid.UUID) error { return db.Exec(`UPDATE archives SET status = 'invalid' WHERE uuid = ?`, id.String()) }