// TestFiltering tests the filtering of the logging. func TestFiltering(t *testing.T) { assert := audit.NewTestingAssertion(t, true) ownLogger := &testLogger{} logger.SetLogger(ownLogger) logger.SetLevel(logger.LevelDebug) logger.SetFilter(func(level logger.LogLevel, info, msg string) bool { return level >= logger.LevelWarning && level <= logger.LevelError }) logger.Debugf("Debug.") logger.Infof("Info.") logger.Warningf("Warning.") logger.Errorf("Error.") logger.Criticalf("Critical.") assert.Length(ownLogger.logs, 3) logger.UnsetFilter() ownLogger = &testLogger{} logger.SetLogger(ownLogger) logger.Debugf("Debug.") logger.Infof("Info.") logger.Warningf("Warning.") logger.Errorf("Error.") logger.Criticalf("Critical.") assert.Length(ownLogger.logs, 5) }
// DeleteHashes deletes all the checksum hashes given from the filestore. func DeleteHashes(fileHashes []string) { if config.Config.UseMySQL { deleteHashesMySQL(fileHashes) } else if config.Config.UsePostgreSQL { deleteHashesPostgreSQL(fileHashes) } else { for _, ff := range fileHashes { delFile, err := Get(ff) if err != nil { logger.Debugf("Strange, we got an error trying to get %s to delete it.\n", ff) logger.Debugf(err.Error()) } else { _ = delFile.Delete() } // May be able to remove this. Check that it actually deleted d, _ := Get(ff) if d != nil { logger.Debugf("Stranger and stranger, %s is still in the file store.\n", ff) } } } if config.Config.LocalFstoreDir != "" { for _, fh := range fileHashes { err := os.Remove(path.Join(config.Config.LocalFstoreDir, fh)) if err != nil { logger.Errorf(err.Error()) } } } }
// GetStreamOutput gets all ShoveyRunStream objects associated with a ShoveyRun // of the given output type. func (sr *ShoveyRun) GetStreamOutput(outputType string, seq int) ([]*ShoveyRunStream, util.Gerror) { if config.UsingDB() { return sr.getStreamOutSQL(outputType, seq) } var streams []*ShoveyRunStream ds := datastore.New() for i := seq; ; i++ { logger.Debugf("Getting %s", fmt.Sprintf("%s_%s_%s_%d", sr.ShoveyUUID, sr.NodeName, outputType, i)) s, found := ds.Get("shovey_run_stream", fmt.Sprintf("%s_%s_%s_%d", sr.ShoveyUUID, sr.NodeName, outputType, i)) if !found { break } logger.Debugf("got a stream: %v", s) streams = append(streams, s.(*ShoveyRunStream)) } return streams, nil }
// LogEvent writes an event of the action type, performed by the given actor, // against the given object. func LogEvent(doer actor.Actor, obj util.GoiardiObj, action string) error { if !config.Config.LogEvents { logger.Debugf("Not logging this event") return nil } logger.Debugf("Logging event") var actorType string if doer.IsUser() { actorType = "user" } else { actorType = "client" } le := new(LogInfo) le.Action = action le.Actor = doer le.ActorType = actorType le.ObjectName = obj.GetName() le.ObjectType = reflect.TypeOf(obj).String() le.Time = time.Now() extInfo, err := datastore.EncodeToJSON(obj) if err != nil { return err } le.ExtendedInfo = extInfo actorInfo, err := datastore.EncodeToJSON(doer) if err != nil { return err } le.ActorInfo = actorInfo if config.Config.SerfEventAnnounce { qle := make(map[string]interface{}, 4) qle["time"] = le.Time qle["action"] = le.Action qle["object_type"] = le.ObjectType qle["object_name"] = le.ObjectName go serfin.SendEvent("log-event", qle) } if config.UsingDB() { return le.writeEventSQL() } return le.writeEventInMem() }
func (ic *IdxCollection) searchTextCollection(term string, notop bool) (map[string]Document, error) { results := make(map[string]Document) ic.m.RLock() defer ic.m.RUnlock() l := len(ic.docs) errCh := make(chan error, l) resCh := make(chan *searchRes, l) for k, v := range ic.docs { go func(k string, v *IdxDoc) { m, err := v.TextSearch(term) if err != nil { errCh <- err resCh <- nil } else { errCh <- nil if (m && !notop) || (!m && notop) { r := &searchRes{k, v} logger.Debugf("Adding result %s to channel", k) resCh <- r } else { resCh <- nil } } }(k, v) } for i := 0; i < l; i++ { e := <-errCh if e != nil { return nil, e } } for i := 0; i < l; i++ { r := <-resCh if r != nil { logger.Debugf("adding result") results[r.key] = Document(r.doc) } } rsafe := safeSearchResults(results) return rsafe, nil }
func (ic *IdxCollection) searchRange(field string, start string, end string, inclusive bool) (map[string]Document, error) { results := make(map[string]Document) ic.m.RLock() defer ic.m.RUnlock() l := len(ic.docs) errCh := make(chan error, l) resCh := make(chan *searchRes, l) for k, v := range ic.docs { go func(k string, v *IdxDoc) { m, err := v.RangeSearch(field, start, end, inclusive) if err != nil { errCh <- err resCh <- nil } else { errCh <- nil if m { r := &searchRes{k, v} logger.Debugf("Adding result %s to channel", k) resCh <- r } else { resCh <- nil } } }(k, v) } for i := 0; i < l; i++ { e := <-errCh if e != nil { return nil, e } } for i := 0; i < l; i++ { r := <-resCh if r != nil { logger.Debugf("adding result") results[r.key] = Document(r.doc) } } rsafe := safeSearchResults(results) return rsafe, nil }
// Test logging with the go logger. func TestGoLogger(t *testing.T) { log.SetOutput(os.Stdout) logger.SetLevel(logger.LevelDebug) logger.SetLogger(logger.NewGoLogger()) logger.Debugf("Debug.") logger.Infof("Info.") logger.Warningf("Warning.") logger.Errorf("Error.") logger.Criticalf("Critical.") }
// SendEvent sends a serf event out from goiardi. func SendEvent(eventName string, payload interface{}) { jsonPayload, err := json.Marshal(payload) if err != nil { logger.Errorf(err.Error()) return } err = Serfer.UserEvent(eventName, jsonPayload, true) if err != nil { logger.Debugf(err.Error()) } return }
// SendQuery sends a basic, no frills query out over serf. func SendQuery(queryName string, payload interface{}) { jsonPayload, err := json.Marshal(payload) if err != nil { logger.Errorf(err.Error()) return } q := &serfclient.QueryParam{Name: queryName, Payload: jsonPayload} err = Serfer.Query(q) if err != nil { logger.Debugf(err.Error()) } return }
// Test log level filtering. func TestLogLevelFiltering(t *testing.T) { assert := audit.NewTestingAssertion(t, true) ownLogger := &testLogger{} logger.SetLogger(ownLogger) logger.SetLevel(logger.LevelDebug) logger.Debugf("Debug.") logger.Infof("Info.") logger.Warningf("Warning.") logger.Errorf("Error.") logger.Criticalf("Critical.") assert.Length(ownLogger.logs, 5) ownLogger = &testLogger{} logger.SetLogger(ownLogger) logger.SetLevel(logger.LevelError) logger.Debugf("Debug.") logger.Infof("Info.") logger.Warningf("Warning.") logger.Errorf("Error.") logger.Criticalf("Critical.") assert.Length(ownLogger.logs, 2) }
func startEventMonitor(sc *serfclient.RPCClient, errch chan<- error) { ch := make(chan map[string]interface{}, 10) sh, err := sc.Stream("*", ch) if err != nil { errch <- err return } errch <- nil defer sc.Stop(sh) // watch the events and queries for e := range ch { logger.Debugf("Got an event: %v", e) eName, _ := e["Name"] switch eName { case "node_status": jsonPayload := make(map[string]string) err = json.Unmarshal(e["Payload"].([]byte), &jsonPayload) if err != nil { logger.Errorf(err.Error()) continue } n, _ := node.Get(jsonPayload["node"]) if n == nil { logger.Errorf("No node %s", jsonPayload["node"]) continue } err = n.UpdateStatus(jsonPayload["status"]) if err != nil { logger.Errorf(err.Error()) continue } r := map[string]string{"response": "ok"} response, _ := json.Marshal(r) var id uint64 switch t := e["ID"].(type) { case int64: id = uint64(t) case uint64: id = t default: logger.Errorf("node_status ID %v type %T not int64 or uint64", e["ID"], e["ID"]) continue } sc.Respond(id, response) } } return }
// Test logging with the syslogger. func TestSysLogger(t *testing.T) { assert := audit.NewTestingAssertion(t, true) logger.SetLevel(logger.LevelDebug) sl, err := logger.NewSysLogger("GOAS") assert.Nil(err) logger.SetLogger(sl) logger.Debugf("Debug.") logger.Infof("Info.") logger.Warningf("Warning.") logger.Errorf("Error.") logger.Criticalf("Critical.") }
func (sq *SolrQuery) execute() (map[string]indexer.Document, error) { s := sq.queryChain curOp := OpNotAnOp for s != nil { var r map[string]indexer.Document var err error switch c := s.(type) { case *SubQuery: _ = c newq, nend, nerr := extractSubQuery(s) if nerr != nil { return nil, err } s = nend var d map[string]indexer.Document if curOp == OpBinAnd { d = sq.docs } else { d = make(map[string]indexer.Document) } nsq := &SolrQuery{queryChain: newq, idxName: sq.idxName, docs: d} r, err = nsq.execute() default: if curOp == OpBinAnd { r, err = s.SearchResults(sq.docs) } else { r, err = s.SearchIndex(sq.idxName) } } if err != nil { return nil, err } if len(sq.docs) == 0 || curOp == OpBinAnd { // nothing in place yet sq.docs = r } else if curOp == OpBinOr { for k, v := range r { sq.docs[k] = v } } else { logger.Debugf("Somehow we got to what should have been an impossible state with search") } curOp = s.Op() s = s.Next() } return sq.docs, nil }
// AllFilestores returns all file checksums and their contents, for exporting. func AllFilestores() []*FileStore { var filestores []*FileStore if config.UsingDB() { filestores = allFilestoresSQL() } else { fileList := GetList() for _, f := range fileList { fl, err := Get(f) if err != nil { logger.Debugf("File checksum %s was in the list of files, but wasn't found when fetched. Continuing.", f) continue } filestores = append(filestores, fl) } } return filestores }
func setLogEventPurgeTicker() { if config.Config.LogEventKeep != 0 { ticker := time.NewTicker(time.Second * time.Duration(60)) go func() { for _ = range ticker.C { les, _ := loginfo.GetLogInfos(nil, 0, 1) if len(les) != 0 { p, err := loginfo.PurgeLogInfos(les[0].ID - config.Config.LogEventKeep) if err != nil { logger.Errorf(err.Error()) } logger.Debugf("Purged %d events automatically", p) } } }() } }
func deleteHashesPostgreSQL(fileHashes []string) { if len(fileHashes) == 0 { return // nothing to do } tx, err := datastore.Dbh.Begin() if err != nil { log.Fatal(err) } deleteQuery := "DELETE FROM goiardi.file_checksums WHERE checksum = ANY($1::varchar(32)[])" _, err = tx.Exec(deleteQuery, "{"+strings.Join(fileHashes, ",")+"}") if err != nil && err != sql.ErrNoRows { logger.Debugf("Error %s trying to delete hashes", err.Error()) tx.Rollback() return } tx.Commit() return }
// AddStreamOutput adds a chunk of output from the job to the output list on the // server stored in the ShoveyRunStream objects. func (sr *ShoveyRun) AddStreamOutput(output string, outputType string, seq int, isLast bool) util.Gerror { if config.UsingDB() { return sr.addStreamOutSQL(output, outputType, seq, isLast) } stream := &ShoveyRunStream{ShoveyUUID: sr.ShoveyUUID, NodeName: sr.NodeName, Seq: seq, OutputType: outputType, Output: output, IsLast: isLast, CreatedAt: time.Now()} ds := datastore.New() streamKey := fmt.Sprintf("%s_%s_%s_%d", sr.ShoveyUUID, sr.NodeName, outputType, seq) logger.Debugf("Setting %s", streamKey) _, found := ds.Get("shovey_run_stream", streamKey) if found { err := util.Errorf("sequence %d for %s - %s already exists", seq, sr.ShoveyUUID, sr.NodeName) err.SetStatus(http.StatusConflict) return err } ds.Set("shovey_run_stream", streamKey, stream) return nil }
// AllCookbooks returns all the cookbooks that have been uploaded to this server. func AllCookbooks() (cookbooks []*Cookbook) { if config.UsingDB() { cookbooks = allCookbooksSQL() for _, c := range cookbooks { // populate the versions hash c.sortedVersions() } } else { cookbookList := GetList() for _, c := range cookbookList { cb, err := Get(c) if err != nil { logger.Debugf("Curious. Cookbook %s was in the cookbook list, but wasn't found when fetched. Continuing.", c) continue } cookbooks = append(cookbooks, cb) } } return cookbooks }
func (s *Shovey) checkCompleted() { if config.UsingDB() { s.checkCompletedSQL() return } srs, err := s.GetNodeRuns() if err != nil { logger.Debugf("Something went wrong checking for job completion: %s", err.Error()) return } c := 0 for _, sr := range srs { if sr.Status == "invalid" || sr.Status == "succeeded" || sr.Status == "failed" || sr.Status == "down" || sr.Status == "nacked" || sr.Status == "cancelled" { c++ } } if c == len(s.NodeNames) { s.Status = "complete" s.save() } }
func deleteHashesMySQL(fileHashes []string) { if len(fileHashes) == 0 { return // nothing to do } tx, err := datastore.Dbh.Begin() if err != nil { log.Fatal(err) } deleteQuery := "DELETE FROM file_checksums WHERE checksum IN(?" + strings.Repeat(",?", len(fileHashes)-1) + ")" delArgs := make([]interface{}, len(fileHashes)) for i, v := range fileHashes { delArgs[i] = v } _, err = tx.Exec(deleteQuery, delArgs...) if err != nil && err != sql.ErrNoRows { logger.Debugf("Error %s trying to delete hashes", err.Error()) tx.Rollback() return } tx.Commit() return }
// Updates names of monitor on server, create monitor if not exist func SyncMonitor(m *Monitor) error { monitorData := map[string]string{ "name": m.Name, } monitorDataJson, _ := json.Marshal(monitorData) logger.Debugf("%s", string(monitorDataJson)) req, err := http.NewRequest("PATCH", Config.APIUrl+"/api/monitors/"+strconv.Itoa(int(m.ComponentID))+"/", bytes.NewBuffer(monitorDataJson)) req.Header.Set("Content-Type", "application/json") req.Header.Set("Authorization", "Token "+Config.APIToken) client := &http.Client{} if Config.InsecureAPI == true { client.Transport = &http.Transport{ TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, } } res, err := client.Do(req) if err != nil { return err } defer res.Body.Close() /* if res.StatusCode == 404 { // no such monitor, create a new one monitorData["status"] = "U" // TODO need to write obtained monitor pk back to config */ if res.StatusCode != 200 { body, _ := ioutil.ReadAll(res.Body) return fmt.Errorf("Cannot sync monitor %d (status code %d): %s", m.ComponentID, res.StatusCode, string(body)) } return nil }
func (p *PostgresSearch) Search(idx string, q string, rows int, sortOrder string, start int, partialData map[string]interface{}) ([]map[string]interface{}, error) { // check that the endpoint actually exists sqlStmt := "SELECT 1 FROM goiardi.search_collections WHERE organization_id = $1 AND name = $2" stmt, serr := datastore.Dbh.Prepare(sqlStmt) if serr != nil { return nil, serr } defer stmt.Close() var zzz int serr = stmt.QueryRow(1, idx).Scan(&zzz) // don't care about zzz if serr != nil { if serr == sql.ErrNoRows { serr = fmt.Errorf("I don't know how to search for %s data objects.", idx) } return nil, serr } // Special case "goodness". If the search term is "*:*" with no // qualifiers, short circuit everything and just get a list of the // distinct items. var qresults []string if q == "*:*" { logger.Debugf("Searching '*:*' on %s, short circuiting", idx) var builtinIdx bool if idx == "node" || idx == "client" || idx == "environment" || idx == "role" { builtinIdx = true sqlStmt = fmt.Sprintf("SELECT COALESCE(ARRAY_AGG(name), '{}'::text[]) FROM goiardi.%ss WHERE organization_id = $1", idx) } else { sqlStmt = "SELECT COALESCE(ARRAY_AGG(orig_name), '{}'::text[]) FROM goiardi.data_bag_items JOIN goiardi.data_bags ON goiardi.data_bag_items.data_bag_id = goiardi.data_bags.id WHERE goiardi.data_bags.organization_id = $1 AND goiardi.data_bags.name = $2" } var res util.StringSlice stmt, err := datastore.Dbh.Prepare(sqlStmt) if err != nil { return nil, err } defer stmt.Close() if builtinIdx { err = stmt.QueryRow(1).Scan(&res) } else { err = stmt.QueryRow(1, idx).Scan(&res) } if err != nil && err != sql.ErrNoRows { return nil, err } qresults = res } else { // keep up with the ersatz solr. qq := &Tokenizer{Buffer: q} qq.Init() if err := qq.Parse(); err != nil { return nil, err } qq.Execute() qchain := qq.Evaluate() pgQ := &PgQuery{idx: idx, queryChain: qchain} err := pgQ.execute() if err != nil { return nil, err } qresults, err = pgQ.results() if err != nil { return nil, err } } // THE WRONG WAY: // Eventually, ordering by the keys themselves would be awesome. objs := getResults(idx, qresults) res := make([]map[string]interface{}, len(objs)) for i, r := range objs { switch r := r.(type) { case *client.Client: jc := map[string]interface{}{ "name": r.Name, "chef_type": r.ChefType, "json_class": r.JSONClass, "admin": r.Admin, "public_key": r.PublicKey(), "validator": r.Validator, } res[i] = jc default: res[i] = util.MapifyObject(r) } } /* If we're doing partial search, tease out the fields we want. */ if partialData != nil { var err error res, err = formatPartials(res, objs, partialData) if err != nil { return nil, err } } // and at long last, sort ss := strings.Split(sortOrder, " ") sortKey := ss[0] if sortKey == "id" { sortKey = "name" } var ordering string if len(ss) > 1 { ordering = strings.ToLower(ss[1]) } else { ordering = "asc" } sortResults := results{res, sortKey} if ordering == "desc" { sort.Sort(sort.Reverse(sortResults)) } else { sort.Sort(sortResults) } res = sortResults.res end := start + rows if end > len(res) { end = len(res) } res = res[start:end] return res, nil }
func (pq *PgQuery) execute(startTableID ...*int) error { p := pq.queryChain curOp := OpNotAnOp var t *int if len(startTableID) == 0 { z := 0 t = &z } else { t = startTableID[0] } for p != nil { switch c := p.(type) { case *BasicQuery: // an empty field can only happen up here if c.field != "" { pq.paths = append(pq.paths, string(c.field)) } args, qstr := buildBasicQuery(c.field, c.term, t, curOp) pq.arguments = append(pq.arguments, args...) pq.queryStrs = append(pq.queryStrs, qstr) *t++ case *GroupedQuery: pq.paths = append(pq.paths, string(c.field)) args, qstr := buildGroupedQuery(c.field, c.terms, t, curOp) pq.arguments = append(pq.arguments, args...) pq.queryStrs = append(pq.queryStrs, qstr) *t++ case *RangeQuery: pq.paths = append(pq.paths, string(c.field)) args, qstr := buildRangeQuery(c.field, c.start, c.end, c.inclusive, t, curOp) pq.arguments = append(pq.arguments, args...) pq.queryStrs = append(pq.queryStrs, qstr) *t++ case *SubQuery: newq, nend, nerr := extractSubQuery(c) if nerr != nil { return nerr } p = nend np := &PgQuery{queryChain: newq} err := np.execute(t) if err != nil { return err } pq.paths = append(pq.paths, np.paths...) pq.arguments = append(pq.arguments, np.arguments...) pq.queryStrs = append(pq.queryStrs, fmt.Sprintf("%s(%s)", binOp(curOp), strings.Join(np.queryStrs, " "))) default: err := fmt.Errorf("Unknown type %T for query", c) return err } curOp = p.Op() p = p.Next() } fullQ, allArgs := craftFullQuery(1, pq.idx, pq.paths, pq.arguments, pq.queryStrs, t) logger.Debugf("pg search info:") logger.Debugf("full query: %s", fullQ) logger.Debugf("all %d args: %v", len(allArgs), allArgs) pq.fullQuery = fullQ pq.allArgs = allArgs return nil }
func (s *Shovey) startJobs() Qerror { // determine if we meet the quorum // First is this a percentage or absolute quorum qnum, err := getQuorum(s.Quorum, len(s.NodeNames)) if err != nil { return err } // query node statuses to see if enough are up upNodes, nerr := node.GetNodesByStatus(s.NodeNames, "up") if nerr != nil { return CastErr(nerr) } if len(upNodes) < qnum { err = Errorf("Not enough nodes were up to execute job %s - got %d, needed at least %d", s.RunID, len(upNodes), qnum) err.SetStatus("quorum_failed") // be setting up/down nodes here too return err } // if that all worked, send the commands errch := make(chan error) go func() { tagNodes := make([]string, len(upNodes)) d := make(map[string]bool) for i, n := range upNodes { tagNodes[i] = n.Name d[n.Name] = true sr := &ShoveyRun{ShoveyUUID: s.RunID, NodeName: n.Name, Status: "created"} err := sr.save() if err != nil { logger.Errorf("error saving shovey run: %s", err.Error()) errch <- err return } } for _, n := range s.NodeNames { if !d[n] { sr := &ShoveyRun{ShoveyUUID: s.RunID, NodeName: n, Status: "down", EndTime: time.Now()} err := sr.save() if err != nil { logger.Errorf("error saving shovey run: %s", err.Error()) errch <- err return } } } // make sure this is the right amount of buffering payload := make(map[string]string) payload["run_id"] = s.RunID payload["command"] = s.Command payload["action"] = "start" payload["time"] = time.Now().Format(time.RFC3339) payload["timeout"] = fmt.Sprintf("%d", s.Timeout) sig, serr := s.signRequest(payload) if serr != nil { errch <- serr return } payload["signature"] = sig jsonPayload, _ := json.Marshal(payload) ackCh := make(chan string, len(tagNodes)) respCh := make(chan serfclient.NodeResponse, len(tagNodes)) q := &serfclient.QueryParam{Name: "shovey", Payload: jsonPayload, FilterNodes: tagNodes, RequestAck: true, AckCh: ackCh, RespCh: respCh} qerr := serfin.Serfer.Query(q) if qerr != nil { errch <- qerr return } errch <- nil srCh := make(chan *ShoveyRun, len(upNodes)*2) go func() { for sr := range srCh { sr.save() } }() for i := 0; i < len(upNodes)*2; i++ { select { case a := <-ackCh: if a == "" { continue } sr, err := s.GetRun(a) if err != nil { logger.Debugf("err with sr %s: %s", a, err.Error()) continue } sr.AckTime = time.Now() srCh <- sr case r := <-respCh: logger.Debugf("got a response: %v", r) break case <-time.After(s.Timeout * time.Second): logger.Debugf("timed out, might not be appropriate") break } } close(srCh) logger.Debugf("out of for/select loop for shovey responses") }() grerr := <-errch if grerr != nil { return CastErr(grerr) } return nil }
func shoveyHandler(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") opUser, oerr := actor.GetReqUser(r.Header.Get("X-OPS-USERID")) if oerr != nil { jsonErrorReport(w, r, oerr.Error(), oerr.Status()) return } if !opUser.IsAdmin() && r.Method != "PUT" { jsonErrorReport(w, r, "you cannot perform this action", http.StatusForbidden) return } if !config.Config.UseShovey { jsonErrorReport(w, r, "shovey is not enabled", http.StatusPreconditionFailed) return } pathArray := splitPath(r.URL.Path) pathArrayLen := len(pathArray) if pathArrayLen < 2 || pathArrayLen > 4 || pathArray[1] == "" { jsonErrorReport(w, r, "Bad request", http.StatusBadRequest) return } op := pathArray[1] shoveyResponse := make(map[string]interface{}) switch op { case "jobs": switch r.Method { case "GET": switch pathArrayLen { case 4: shove, err := shovey.Get(pathArray[2]) if err != nil { jsonErrorReport(w, r, err.Error(), err.Status()) return } sj, err := shove.GetRun(pathArray[3]) if err != nil { jsonErrorReport(w, r, err.Error(), err.Status()) return } shoveyResponse, err = sj.ToJSON() if err != nil { jsonErrorReport(w, r, err.Error(), err.Status()) return } case 3: shove, err := shovey.Get(pathArray[2]) if err != nil { jsonErrorReport(w, r, err.Error(), err.Status()) return } shoveyResponse, err = shove.ToJSON() if err != nil { jsonErrorReport(w, r, err.Error(), err.Status()) return } default: shoveyIDs, err := shovey.AllShoveyIDs() if err != nil { jsonErrorReport(w, r, err.Error(), err.Status()) return } enc := json.NewEncoder(w) if jerr := enc.Encode(&shoveyIDs); err != nil { jsonErrorReport(w, r, jerr.Error(), http.StatusInternalServerError) } return } case "POST": if pathArrayLen != 2 { jsonErrorReport(w, r, "Bad request", http.StatusBadRequest) return } shvData, err := parseObjJSON(r.Body) if err != nil { jsonErrorReport(w, r, err.Error(), http.StatusBadRequest) return } logger.Debugf("shvData: %v", shvData) var quorum string var timeout int var ok bool if quorum, ok = shvData["quorum"].(string); !ok { quorum = "100%" } logger.Debugf("run_timeout is a %T", shvData["run_timeout"]) if t, ok := shvData["run_timeout"].(float64); !ok { timeout = 300 } else { timeout = int(t) } var nodeNames []string if shvNodes, ok := shvData["nodes"].([]interface{}); ok { if len(shvNodes) == 0 { jsonErrorReport(w, r, "no nodes provided", http.StatusBadRequest) return } nodeNames = make([]string, len(shvNodes)) for i, v := range shvNodes { nodeNames[i] = v.(string) } } else { jsonErrorReport(w, r, "node list not an array", http.StatusBadRequest) return } s, gerr := shovey.New(shvData["command"].(string), timeout, quorum, nodeNames) if gerr != nil { jsonErrorReport(w, r, gerr.Error(), gerr.Status()) return } gerr = s.Start() if gerr != nil { jsonErrorReport(w, r, gerr.Error(), gerr.Status()) return } shoveyResponse["id"] = s.RunID shoveyResponse["uri"] = util.CustomURL(fmt.Sprintf("/shovey/jobs/%s", s.RunID)) case "PUT": switch pathArrayLen { case 3: if pathArray[2] != "cancel" { jsonErrorReport(w, r, "Bad request", http.StatusBadRequest) return } cancelData, perr := parseObjJSON(r.Body) if perr != nil { jsonErrorReport(w, r, perr.Error(), http.StatusBadRequest) return } var nodeNames []string runID, ok := cancelData["run_id"].(string) if !ok { jsonErrorReport(w, r, "No shovey run ID provided, or provided id was invalid", http.StatusBadRequest) return } if nn, ok := cancelData["nodes"].([]interface{}); ok { for _, v := range nn { nodeNames = append(nodeNames, v.(string)) } } shove, err := shovey.Get(runID) if err != nil { jsonErrorReport(w, r, err.Error(), err.Status()) return } if len(nodeNames) != 0 { serr := shove.CancelRuns(nodeNames) if serr != nil { logger.Debugf("Error cancelling runs: %s", serr.Error()) jsonErrorReport(w, r, err.Error(), err.Status()) return } } else { err = shove.Cancel() if err != nil { jsonErrorReport(w, r, err.Error(), err.Status()) return } } shoveyResponse, err = shove.ToJSON() if err != nil { jsonErrorReport(w, r, err.Error(), err.Status()) return } case 4: sjData, perr := parseObjJSON(r.Body) if perr != nil { jsonErrorReport(w, r, perr.Error(), http.StatusBadRequest) return } nodeName := pathArray[3] logger.Debugf("sjData: %v", sjData) shove, err := shovey.Get(pathArray[2]) if err != nil { jsonErrorReport(w, r, err.Error(), err.Status()) return } sj, err := shove.GetRun(nodeName) if err != nil { jsonErrorReport(w, r, err.Error(), err.Status()) return } err = sj.UpdateFromJSON(sjData) if err != nil { jsonErrorReport(w, r, err.Error(), err.Status()) return } shoveyResponse["id"] = shove.RunID shoveyResponse["node"] = nodeName shoveyResponse["response"] = "ok" default: jsonErrorReport(w, r, "Bad request", http.StatusBadRequest) return } default: jsonErrorReport(w, r, "Unrecognized method", http.StatusMethodNotAllowed) return } case "stream": if pathArrayLen != 4 { jsonErrorReport(w, r, "Bad request", http.StatusBadRequest) return } switch r.Method { case "GET": var seq int r.ParseForm() if s, found := r.Form["sequence"]; found { if len(s) < 0 { jsonErrorReport(w, r, "invalid sequence", http.StatusBadRequest) return } var err error seq, err = strconv.Atoi(s[0]) if err != nil { jsonErrorReport(w, r, err.Error(), http.StatusBadRequest) return } } var outType string if o, found := r.Form["output_type"]; found { if len(o) < 0 { jsonErrorReport(w, r, "invalid output type", http.StatusBadRequest) return } outType = o[0] if outType != "stdout" && outType != "stderr" && outType != "both" { jsonErrorReport(w, r, "output type must be 'stdout', 'stderr', or 'both'", http.StatusBadRequest) return } } else { outType = "stdout" } shove, err := shovey.Get(pathArray[2]) if err != nil { jsonErrorReport(w, r, err.Error(), err.Status()) return } sj, err := shove.GetRun(pathArray[3]) if err != nil { jsonErrorReport(w, r, err.Error(), err.Status()) return } stream, err := sj.GetStreamOutput(outType, seq) if err != nil { jsonErrorReport(w, r, err.Error(), err.Status()) return } combinedOutput, err := sj.CombineStreamOutput(outType, seq) if err != nil { jsonErrorReport(w, r, err.Error(), err.Status()) return } shoveyResponse["run_id"] = sj.ShoveyUUID shoveyResponse["node_name"] = sj.NodeName shoveyResponse["output_type"] = outType shoveyResponse["is_last"] = false if len(stream) != 0 { shoveyResponse["last_seq"] = stream[len(stream)-1].Seq shoveyResponse["is_last"] = stream[len(stream)-1].IsLast } shoveyResponse["output"] = combinedOutput case "PUT": streamData, serr := parseObjJSON(r.Body) logger.Debugf("streamData: %v", streamData) if serr != nil { jsonErrorReport(w, r, serr.Error(), http.StatusBadRequest) return } shove, err := shovey.Get(pathArray[2]) if err != nil { jsonErrorReport(w, r, err.Error(), err.Status()) return } sj, err := shove.GetRun(pathArray[3]) if err != nil { jsonErrorReport(w, r, err.Error(), err.Status()) return } output, ok := streamData["output"].(string) if !ok { oerr := util.Errorf("invalid output") jsonErrorReport(w, r, oerr.Error(), oerr.Status()) return } outputType, ok := streamData["output_type"].(string) if !ok { oerr := util.Errorf("invalid output type") jsonErrorReport(w, r, oerr.Error(), oerr.Status()) return } isLast, ok := streamData["is_last"].(bool) if !ok { oerr := util.Errorf("invalid is_last") jsonErrorReport(w, r, oerr.Error(), oerr.Status()) return } seqFloat, ok := streamData["seq"].(float64) if !ok { oerr := util.Errorf("invalid seq") jsonErrorReport(w, r, oerr.Error(), oerr.Status()) return } seq := int(seqFloat) err = sj.AddStreamOutput(output, outputType, seq, isLast) if err != nil { jsonErrorReport(w, r, err.Error(), err.Status()) return } shoveyResponse["response"] = "ok" default: jsonErrorReport(w, r, "Unrecognized method", http.StatusMethodNotAllowed) return } default: jsonErrorReport(w, r, "Unrecognized operation", http.StatusBadRequest) return } enc := json.NewEncoder(w) if jerr := enc.Encode(&shoveyResponse); jerr != nil { jsonErrorReport(w, r, jerr.Error(), http.StatusInternalServerError) } return }
func (h *interceptHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { /* knife sometimes sends URL paths that start with //. Redirecting * worked for GETs, but since it was breaking POSTs and screwing with * GETs with query params, we just clean up the path and move on. */ /* log the URL */ // TODO: set this to verbosity level 4 or so logger.Debugf("Serving %s -- %s", r.URL.Path, r.Method) if r.Method != "CONNECT" { if p := cleanPath(r.URL.Path); p != r.URL.Path { r.URL.Path = p } } /* Make configurable, I guess, but Chef wants it to be 1000000 */ if !strings.HasPrefix(r.URL.Path, "/file_store") && r.ContentLength > config.Config.JSONReqMaxSize { logger.Debugf("Content length was too long for %s", r.URL.Path) http.Error(w, "Content-length too long!", http.StatusRequestEntityTooLarge) // hmm, with 1.5 it gets a broken pipe now if we don't do // anything with the body they're trying to send. Try copying it // to /dev/null. This seems crazy, but merely closing the body // doesn't actually work. io.Copy(ioutil.Discard, r.Body) r.Body.Close() return } else if r.ContentLength > config.Config.ObjMaxSize { http.Error(w, "Content-length waaaaaay too long!", http.StatusRequestEntityTooLarge) return } w.Header().Set("X-Goiardi", "yes") w.Header().Set("X-Goiardi-Version", config.Version) w.Header().Set("X-Chef-Version", config.ChefVersion) apiInfo := fmt.Sprintf("flavor=osc;version:%s;goiardi=%s", config.ChefVersion, config.Version) w.Header().Set("X-Ops-API-Info", apiInfo) userID := r.Header.Get("X-OPS-USERID") if rs := r.Header.Get("X-Ops-Request-Source"); rs == "web" { /* If use-auth is on and disable-webui is on, and this is a * webui connection, it needs to fail. */ if config.Config.DisableWebUI { w.Header().Set("Content-Type", "application/json") logger.Warningf("Attempting to log in through webui, but webui is disabled") jsonErrorReport(w, r, "invalid action", http.StatusUnauthorized) return } /* Check that the user in question with the web request exists. * If not, fail. */ if _, uherr := actor.GetReqUser(userID); uherr != nil { w.Header().Set("Content-Type", "application/json") logger.Warningf("Attempting to use invalid user %s through X-Ops-Request-Source = web", userID) jsonErrorReport(w, r, "invalid action", http.StatusUnauthorized) return } userID = "chef-webui" } /* Only perform the authorization check if that's configured. Bomb with * an error if the check of the headers, timestamps, etc. fails. */ /* No clue why /principals doesn't require authorization. Hrmph. */ if config.Config.UseAuth && !strings.HasPrefix(r.URL.Path, "/file_store") && !(strings.HasPrefix(r.URL.Path, "/principals") && r.Method == "GET") { herr := authentication.CheckHeader(userID, r) if herr != nil { w.Header().Set("Content-Type", "application/json") logger.Errorf("Authorization failure: %s\n", herr.Error()) w.Header().Set("Www-Authenticate", `X-Ops-Sign version="1.0" version="1.1" version="1.2"`) //http.Error(w, herr.Error(), herr.Status()) jsonErrorReport(w, r, herr.Error(), herr.Status()) return } } // Experimental: decompress gzipped requests if r.Header.Get("Content-Encoding") == "gzip" { reader, err := gzip.NewReader(r.Body) if err != nil { w.Header().Set("Content-Type", "application/json") logger.Errorf("Failure decompressing gzipped request body: %s\n", err.Error()) jsonErrorReport(w, r, err.Error(), http.StatusBadRequest) return } r.Body = reader } http.DefaultServeMux.ServeHTTP(w, r) }
// NewDBItem creates a new data bag item in the associated data bag. func (db *DataBag) NewDBItem(rawDbagItem map[string]interface{}) (*DataBagItem, util.Gerror) { var dbiID string var dbagItem *DataBagItem switch t := rawDbagItem["id"].(type) { case string: if t == "" { err := util.Errorf("Field 'id' missing") return nil, err } dbiID = t default: err := util.Errorf("Field 'id' missing") return nil, err } if err := validateDataBagName(dbiID, true); err != nil { return nil, err } dbiFullName := fmt.Sprintf("data_bag_item_%s_%s", db.Name, dbiID) if config.UsingDB() { d, err := db.getDBItemSQL(dbiID) if d != nil || (err != nil && err != sql.ErrNoRows) { if err != nil { logger.Debugf("Log real SQL error in NewDBItem: %s", err.Error()) } gerr := util.Errorf("Data Bag Item '%s' already exists in Data Bag '%s'.", dbiID, db.Name) gerr.SetStatus(http.StatusConflict) return nil, gerr } if config.Config.UseMySQL { dbagItem, err = db.newDBItemMySQL(dbiID, rawDbagItem) } else if config.Config.UsePostgreSQL { dbagItem, err = db.newDBItemPostgreSQL(dbiID, rawDbagItem) } if err != nil { gerr := util.Errorf(err.Error()) gerr.SetStatus(http.StatusInternalServerError) return nil, gerr } } else { /* Look for an existing dbag item with this name */ if d, _ := db.GetDBItem(dbiID); d != nil { gerr := util.Errorf("Data Bag Item '%s' already exists in Data Bag '%s'.", dbiID, db.Name) gerr.SetStatus(http.StatusConflict) return nil, gerr } /* But should we store the raw data as a JSON string? */ dbagItem = &DataBagItem{ Name: dbiFullName, ChefType: "data_bag_item", JSONClass: "Chef::DataBagItem", DataBagName: db.Name, RawData: rawDbagItem, } db.DataBagItems[dbiID] = dbagItem } err := db.Save() if err != nil { gerr := util.Errorf(err.Error()) gerr.SetStatus(http.StatusInternalServerError) return nil, gerr } indexer.IndexObj(dbagItem) return dbagItem, nil }
// CancelRuns cancels the shovey runs given in the slice of strings with the // node names to cancel jobs on. func (s *Shovey) CancelRuns(nodeNames []string) util.Gerror { if config.UsingDB() { err := s.cancelRunsSQL() if err != nil { return err } } else { for _, n := range nodeNames { sr, err := s.GetRun(n) if err != nil { return err } if sr.Status != "invalid" && sr.Status != "succeeded" && sr.Status != "failed" && sr.Status != "down" && sr.Status != "nacked" { sr.EndTime = time.Now() sr.Status = "cancelled" err = sr.save() if err != nil { return err } } } } if len(nodeNames) == len(s.NodeNames) { sort.Strings(nodeNames) sort.Strings(s.NodeNames) if reflect.DeepEqual(nodeNames, s.NodeNames) { s.Status = "cancelled" s.save() } } else { s.checkCompleted() } payload := make(map[string]string) payload["action"] = "cancel" payload["run_id"] = s.RunID payload["time"] = time.Now().Format(time.RFC3339) sig, serr := s.signRequest(payload) if serr != nil { return util.CastErr(serr) } payload["signature"] = sig jsonPayload, _ := json.Marshal(payload) ackCh := make(chan string, len(nodeNames)) q := &serfclient.QueryParam{Name: "shovey", Payload: jsonPayload, FilterNodes: nodeNames, RequestAck: true, AckCh: ackCh} err := serfin.Serfer.Query(q) if err != nil { return util.CastErr(err) } doneCh := make(chan struct{}) go func() { for c := range ackCh { logger.Debugf("Received acknowledgement from %s", c) } doneCh <- struct{}{} }() select { case <-doneCh: logger.Infof("All nodes acknowledged cancellation") // probably do a report here? case <-time.After(time.Duration(60) * time.Second): logger.Errorf("Didn't get all acknowledgements within 60 seconds") } return nil }