Ejemplo n.º 1
0
func (idoc *IdxDoc) update(object Indexable) {
	idoc.m.Lock()
	defer idoc.m.Unlock()
	flattened := util.Indexify(object.Flatten())
	flatText := strings.Join(flattened, "\n")
	/* recover from horrific trie errors that seem to happen with really
	 * big values. :-/ */
	defer func() {
		if e := recover(); e != nil {
			logger.Errorf("There was a problem creating the trie: %s", fmt.Sprintln(e))
		}
	}()
	trie, err := gtrie.Create(flattened)
	if err != nil {
		logger.Errorf(err.Error())
	} else {
		var err error
		idoc.trie, err = compressTrie(trie)
		if err != nil {
			panic(err)
		}
		idoc.docText, err = compressText(flatText)
		if err != nil {
			panic(err)
		}
	}
}
Ejemplo n.º 2
0
func handleSignals() {
	c := make(chan os.Signal, 1)
	// SIGTERM is not exactly portable, but Go has a fake signal for it
	// with Windows so it being there should theoretically not break it
	// running on windows
	signal.Notify(c, os.Interrupt, syscall.SIGTERM, syscall.SIGHUP)

	// if we receive a SIGINT or SIGTERM, do cleanup here.
	go func() {
		for sig := range c {
			if sig == os.Interrupt || sig == syscall.SIGTERM {
				logger.Infof("cleaning up...")
				if config.Config.FreezeData {
					if config.Config.DataStoreFile != "" {
						ds := datastore.New()
						if err := ds.Save(config.Config.DataStoreFile); err != nil {
							logger.Errorf(err.Error())
						}
					}
					if err := indexer.SaveIndex(config.Config.IndexFile); err != nil {
						logger.Errorf(err.Error())
					}
				}
				if config.UsingDB() {
					datastore.Dbh.Close()
				}
				os.Exit(0)
			} else if sig == syscall.SIGHUP {
				logger.Infof("Reloading configuration...")
				config.ParseConfigOptions()
			}
		}
	}()
}
Ejemplo n.º 3
0
func (mStore MetricStore) ProcessBuffer(c <-chan metricdef.IndvMetric, workerId int) {
	buf := make([]metricdef.IndvMetric, 0)

	// flush buffer every second
	t := time.NewTicker(time.Second)
	for {
		select {
		case b := <-c:
			if b.Name != "" {
				logger.Debugf("worker %d appending to buffer", workerId)
				buf = append(buf, b)
			}
		case <-t.C:
			// A possibility: it might be worth it to hack up the
			// carbon lib to allow batch submissions of metrics if
			// doing them individually proves to be too slow

			//copy contents of buffer
			currentBuf := make([]metricdef.IndvMetric, len(buf))
			copy(currentBuf, buf)
			buf = nil
			logger.Debugf("worker %d flushing %d items in buffer now", workerId, len(currentBuf))
			for _, backend := range mStore.Backends {
				if err := backend.SendMetrics(&currentBuf); err != nil {
					logger.Errorf(err.Error())
				} else {
					logger.Debugf("worker %d flushed metrics buffer to %s backend", workerId, backend.Type())
				}
			}
		}
	}
}
Ejemplo n.º 4
0
// DeleteHashes deletes all the checksum hashes given from the filestore.
func DeleteHashes(fileHashes []string) {
	if config.Config.UseMySQL {
		deleteHashesMySQL(fileHashes)
	} else if config.Config.UsePostgreSQL {
		deleteHashesPostgreSQL(fileHashes)
	} else {
		for _, ff := range fileHashes {
			delFile, err := Get(ff)
			if err != nil {
				logger.Debugf("Strange, we got an error trying to get %s to delete it.\n", ff)
				logger.Debugf(err.Error())
			} else {
				_ = delFile.Delete()
			}
			// May be able to remove this. Check that it actually deleted
			d, _ := Get(ff)
			if d != nil {
				logger.Debugf("Stranger and stranger, %s is still in the file store.\n", ff)
			}
		}
	}
	if config.Config.LocalFstoreDir != "" {
		for _, fh := range fileHashes {
			err := os.Remove(path.Join(config.Config.LocalFstoreDir, fh))
			if err != nil {
				logger.Errorf(err.Error())
			}
		}
	}
}
Ejemplo n.º 5
0
func GetMetricDefinition(id string) (*MetricDefinition, error) {
	// TODO: fetch from redis before checking elasticsearch
	if v, err := rs.Get(id).Result(); err != nil && err != redis.Nil {
		logger.Errorf("the redis client bombed: %s", err.Error())
		return nil, err
	} else if err == nil {
		logger.Debugf("json for %s found in elasticsearch: %s", id)
		def, err := DefFromJSON([]byte(v))
		if err != nil {
			return nil, err
		}
		return def, nil
	}

	logger.Debugf("getting %s from elasticsearch", id)
	res, err := es.Get("definitions", "metric", id, nil)
	logger.Debugf("res is: %+v", res)
	if err != nil {
		return nil, err
	}
	logger.Debugf("get returned %q", res.Source)
	logger.Debugf("placing %s into redis", id)
	if rerr := rs.SetEx(id, time.Duration(300)*time.Second, string(*res.Source)).Err(); err != nil {
		logger.Debugf("redis err: %s", rerr.Error())
	}

	def, err := DefFromJSON(*res.Source)
	if err != nil {
		return nil, err
	}

	return def, nil
}
Ejemplo n.º 6
0
func jsonErrorReport(w http.ResponseWriter, r *http.Request, errorStr string, status int) {
	logger.Infof(errorStr)
	jsonError := map[string][]string{"error": []string{errorStr}}
	w.WriteHeader(status)
	enc := json.NewEncoder(w)
	if err := enc.Encode(&jsonError); err != nil {
		logger.Errorf(err.Error())
	}
	return
}
Ejemplo n.º 7
0
// SendEvent sends a serf event out from goiardi.
func SendEvent(eventName string, payload interface{}) {
	jsonPayload, err := json.Marshal(payload)
	if err != nil {
		logger.Errorf(err.Error())
		return
	}
	err = Serfer.UserEvent(eventName, jsonPayload, true)
	if err != nil {
		logger.Debugf(err.Error())
	}
	return
}
Ejemplo n.º 8
0
func setSaveTicker() {
	if config.Config.FreezeData {
		ds := datastore.New()
		ticker := time.NewTicker(time.Second * time.Duration(config.Config.FreezeInterval))
		go func() {
			for _ = range ticker.C {
				if config.Config.DataStoreFile != "" {
					logger.Infof("Automatically saving data store...")
					uerr := ds.Save(config.Config.DataStoreFile)
					if uerr != nil {
						logger.Errorf(uerr.Error())
					}
				}
				logger.Infof("Automatically saving index...")
				ierr := indexer.SaveIndex(config.Config.IndexFile)
				if ierr != nil {
					logger.Errorf(ierr.Error())
				}
			}
		}()
	}
}
Ejemplo n.º 9
0
// SendQuery sends a basic, no frills query out over serf.
func SendQuery(queryName string, payload interface{}) {
	jsonPayload, err := json.Marshal(payload)
	if err != nil {
		logger.Errorf(err.Error())
		return
	}
	q := &serfclient.QueryParam{Name: queryName, Payload: jsonPayload}
	err = Serfer.Query(q)
	if err != nil {
		logger.Debugf(err.Error())
	}
	return
}
Ejemplo n.º 10
0
func reindexAll() {
	reindexObjs := make([]indexer.Indexable, 0, 100)
	// We clear the index, *then* do the fetch because if
	// something comes in between the time we fetch the
	// objects to reindex and when it gets done, they'll
	// just be added naturally
	indexer.ClearIndex()

	for _, v := range client.AllClients() {
		reindexObjs = append(reindexObjs, v)
	}
	for _, v := range node.AllNodes() {
		reindexObjs = append(reindexObjs, v)
	}
	for _, v := range role.AllRoles() {
		reindexObjs = append(reindexObjs, v)
	}
	for _, v := range environment.AllEnvironments() {
		reindexObjs = append(reindexObjs, v)
	}
	defaultEnv, _ := environment.Get("_default")
	reindexObjs = append(reindexObjs, defaultEnv)
	// data bags have to be done separately
	dbags := databag.GetList()
	for _, db := range dbags {
		dbag, err := databag.Get(db)
		if err != nil {
			continue
		}
		dbis := make([]indexer.Indexable, dbag.NumDBItems())
		i := 0
		allDBItems, derr := dbag.AllDBItems()
		if derr != nil {
			logger.Errorf(derr.Error())
			continue
		}
		for _, k := range allDBItems {
			n := k
			dbis[i] = n
			i++
		}
		reindexObjs = append(reindexObjs, dbis...)
	}
	indexer.ReIndex(reindexObjs)
	return
}
Ejemplo n.º 11
0
func setLogEventPurgeTicker() {
	if config.Config.LogEventKeep != 0 {
		ticker := time.NewTicker(time.Second * time.Duration(60))
		go func() {
			for _ = range ticker.C {
				les, _ := loginfo.GetLogInfos(nil, 0, 1)
				if len(les) != 0 {
					p, err := loginfo.PurgeLogInfos(les[0].ID - config.Config.LogEventKeep)
					if err != nil {
						logger.Errorf(err.Error())
					}
					logger.Debugf("Purged %d events automatically", p)
				}
			}
		}()
	}
}
Ejemplo n.º 12
0
func getResults(variety string, toGet []string) []indexer.Indexable {
	var results []indexer.Indexable
	switch variety {
	case "node":
		for _, n := range toGet {
			if node, _ := node.Get(n); node != nil {
				results = append(results, node)
			}
		}
	case "role":
		for _, r := range toGet {
			if role, _ := role.Get(r); role != nil {
				results = append(results, role)
			}
		}
	case "client":
		for _, c := range toGet {
			if client, _ := client.Get(c); client != nil {
				results = append(results, client)
			}
		}
	case "environment":
		for _, e := range toGet {
			if environment, _ := environment.Get(e); environment != nil {
				results = append(results, environment)
			}
		}
	default: // It's a data bag
		/* These may require further processing later. */
		dbag, _ := databag.Get(variety)
		if dbag != nil {
			for _, k := range toGet {
				dbi, err := dbag.GetDBItem(k)
				if err != nil {
					// at least log the error for
					// now
					logger.Errorf(err.Error())
				}
				results = append(results, dbi)
			}
		}
	}
	return results
}
Ejemplo n.º 13
0
// UpdateFromJSON updates a ShoveyRun with the given JSON from the client.
func (sr *ShoveyRun) UpdateFromJSON(srData map[string]interface{}) util.Gerror {
	if status, ok := srData["status"].(string); ok {
		if status == "invalid" || status == "succeeded" || status == "failed" || status == "nacked" {
			sr.EndTime = time.Now()
		}
		sr.Status = status
	} else {
		logger.Errorf("status isn't getting set?? type: %T status %v", srData["status"], srData["status"])
	}
	if errorStr, ok := srData["error"].(string); ok {
		sr.Error = errorStr
	}
	if exitStatus, ok := srData["exit_status"].(float64); ok {
		sr.ExitStatus = uint8(exitStatus)
	}

	err := sr.save()
	if err != nil {
		return err
	}
	go sr.notifyParent()
	return nil
}
Ejemplo n.º 14
0
func main() {
	if setting.Config.ExpvarAddr != "" {
		go func() {
			err := http.ListenAndServe(setting.Config.ExpvarAddr, nil)
			if err != nil {
				fmt.Println("Error starting expvar http listener:", err.Error())
				os.Exit(1)
			}
		}()
	}

	// First fire up a queue to consume metric def events
	mdConn, err := amqp.Dial(setting.Config.RabbitMQURL)
	if err != nil {
		logger.Criticalf(err.Error())
		os.Exit(1)
	}
	defer mdConn.Close()
	logger.Debugf("connected")

	done := make(chan error, 1)

	var numCPU int
	if setting.Config.NumWorkers != 0 {
		numCPU = setting.Config.NumWorkers
	} else {
		numCPU = runtime.NumCPU()
	}

	err = qproc.ProcessQueue(mdConn, "metrics", "topic", "", "metrics.*", false, true, true, done, processMetricDefEvent, numCPU)
	if err != nil {
		logger.Criticalf(err.Error())
		os.Exit(1)
	}
	err = qproc.ProcessQueue(mdConn, "metricResults", "x-consistent-hash", "", "10", false, true, true, done, processMetrics, numCPU)
	if err != nil {
		logger.Criticalf(err.Error())
		os.Exit(1)
	}
	err = initEventProcessing(mdConn, numCPU, done)
	if err != nil {
		logger.Criticalf(err.Error())
		os.Exit(1)
	}

	// Signal handling. If SIGQUIT is received, print out the current
	// stack. Otherwise if SIGINT or SIGTERM are received clean up and exit
	// in an orderly fashion.
	go func() {
		sigs := make(chan os.Signal, 1)
		signal.Notify(sigs, syscall.SIGQUIT, os.Interrupt, syscall.SIGTERM)
		buf := make([]byte, 1<<20)
		for sig := range sigs {
			if sig == syscall.SIGQUIT {
				// print out the current stack on SIGQUIT
				runtime.Stack(buf, true)
				log.Printf("=== received SIGQUIT ===\n*** goroutine dump...\n%s\n*** end\n", buf)
			} else {
				// finish our existing work, clean up, and exit
				// in an orderly fashion
				logger.Infof("Closing rabbitmq connection")
				cerr := mdConn.Close()
				if cerr != nil {
					logger.Errorf("Received error closing rabbitmq connection: %s", cerr.Error())
				}
				logger.Infof("Closing processing buffer channel")
				close(bufCh)
			}
		}
	}()

	// this channel returns when one of the workers exits.
	err = <-done
	logger.Criticalf("all done!", err)
	if err != nil {
		logger.Criticalf("Had an error, aiiieeee! '%s'", err.Error())
	}
}
Ejemplo n.º 15
0
func main() {
	config.ParseConfigOptions()

	/* Here goes nothing, db... */
	if config.UsingDB() {
		var derr error
		if config.Config.UseMySQL {
			datastore.Dbh, derr = datastore.ConnectDB("mysql", config.Config.MySQL)
		} else if config.Config.UsePostgreSQL {
			datastore.Dbh, derr = datastore.ConnectDB("postgres", config.Config.PostgreSQL)
		}
		if derr != nil {
			logger.Criticalf(derr.Error())
			os.Exit(1)
		}
	}

	gobRegister()
	ds := datastore.New()
	if config.Config.FreezeData {
		if config.Config.DataStoreFile != "" {
			uerr := ds.Load(config.Config.DataStoreFile)
			if uerr != nil {
				logger.Criticalf(uerr.Error())
				os.Exit(1)
			}
		}
		ierr := indexer.LoadIndex(config.Config.IndexFile)
		if ierr != nil {
			logger.Criticalf(ierr.Error())
			os.Exit(1)
		}
	}
	setSaveTicker()
	setLogEventPurgeTicker()

	/* handle import/export */
	if config.Config.DoExport {
		fmt.Printf("Exporting data to %s....\n", config.Config.ImpExFile)
		err := exportAll(config.Config.ImpExFile)
		if err != nil {
			logger.Criticalf("Something went wrong during the export: %s", err.Error())
			os.Exit(1)
		}
		fmt.Println("All done!")
		os.Exit(0)
	} else if config.Config.DoImport {
		fmt.Printf("Importing data from %s....\n", config.Config.ImpExFile)
		err := importAll(config.Config.ImpExFile)
		if err != nil {
			logger.Criticalf("Something went wrong during the import: %s", err.Error())
			os.Exit(1)
		}
		if config.Config.FreezeData {
			if config.Config.DataStoreFile != "" {
				ds := datastore.New()
				if err := ds.Save(config.Config.DataStoreFile); err != nil {
					logger.Errorf(err.Error())
				}
			}
			if err := indexer.SaveIndex(config.Config.IndexFile); err != nil {
				logger.Errorf(err.Error())
			}
		}
		if config.UsingDB() {
			datastore.Dbh.Close()
		}
		fmt.Println("All done.")
		os.Exit(0)
	}

	/* Create default clients and users. Currently chef-validator,
	 * chef-webui, and admin. */
	createDefaultActors()
	handleSignals()

	/* Register the various handlers, found in their own source files. */
	http.HandleFunc("/authenticate_user", authenticateUserHandler)
	http.HandleFunc("/clients", listHandler)
	http.HandleFunc("/clients/", clientHandler)
	http.HandleFunc("/cookbooks", cookbookHandler)
	http.HandleFunc("/cookbooks/", cookbookHandler)
	http.HandleFunc("/data", dataHandler)
	http.HandleFunc("/data/", dataHandler)
	http.HandleFunc("/environments", environmentHandler)
	http.HandleFunc("/environments/", environmentHandler)
	http.HandleFunc("/nodes", listHandler)
	http.HandleFunc("/nodes/", nodeHandler)
	http.HandleFunc("/principals/", principalHandler)
	http.HandleFunc("/roles", listHandler)
	http.HandleFunc("/roles/", roleHandler)
	http.HandleFunc("/sandboxes", sandboxHandler)
	http.HandleFunc("/sandboxes/", sandboxHandler)
	http.HandleFunc("/search", searchHandler)
	http.HandleFunc("/search/", searchHandler)
	http.HandleFunc("/search/reindex", reindexHandler)
	http.HandleFunc("/users", listHandler)
	http.HandleFunc("/users/", userHandler)
	http.HandleFunc("/file_store/", fileStoreHandler)
	http.HandleFunc("/events", eventListHandler)
	http.HandleFunc("/events/", eventHandler)
	http.HandleFunc("/reports/", reportHandler)

	/* TODO: figure out how to handle the root & not found pages */
	http.HandleFunc("/", rootHandler)

	listenAddr := config.ListenAddr()
	var err error
	if config.Config.UseSSL {
		err = http.ListenAndServeTLS(listenAddr, config.Config.SSLCert, config.Config.SSLKey, &interceptHandler{})
	} else {
		err = http.ListenAndServe(listenAddr, &interceptHandler{})
	}
	if err != nil {
		logger.Criticalf("ListenAndServe: %s", err.Error())
		os.Exit(1)
	}
}
Ejemplo n.º 16
0
// CancelRuns cancels the shovey runs given in the slice of strings with the
// node names to cancel jobs on.
func (s *Shovey) CancelRuns(nodeNames []string) util.Gerror {
	if config.UsingDB() {
		err := s.cancelRunsSQL()
		if err != nil {
			return err
		}
	} else {
		for _, n := range nodeNames {
			sr, err := s.GetRun(n)
			if err != nil {
				return err
			}
			if sr.Status != "invalid" && sr.Status != "succeeded" && sr.Status != "failed" && sr.Status != "down" && sr.Status != "nacked" {
				sr.EndTime = time.Now()
				sr.Status = "cancelled"
				err = sr.save()
				if err != nil {
					return err
				}
			}
		}
	}
	if len(nodeNames) == len(s.NodeNames) {
		sort.Strings(nodeNames)
		sort.Strings(s.NodeNames)
		if reflect.DeepEqual(nodeNames, s.NodeNames) {
			s.Status = "cancelled"
			s.save()
		}
	} else {
		s.checkCompleted()
	}

	payload := make(map[string]string)
	payload["action"] = "cancel"
	payload["run_id"] = s.RunID
	payload["time"] = time.Now().Format(time.RFC3339)
	sig, serr := s.signRequest(payload)
	if serr != nil {
		return util.CastErr(serr)
	}
	payload["signature"] = sig
	jsonPayload, _ := json.Marshal(payload)
	ackCh := make(chan string, len(nodeNames))
	q := &serfclient.QueryParam{Name: "shovey", Payload: jsonPayload, FilterNodes: nodeNames, RequestAck: true, AckCh: ackCh}
	err := serfin.Serfer.Query(q)
	if err != nil {
		return util.CastErr(err)
	}
	doneCh := make(chan struct{})
	go func() {
		for c := range ackCh {
			logger.Debugf("Received acknowledgement from %s", c)
		}
		doneCh <- struct{}{}
	}()
	select {
	case <-doneCh:
		logger.Infof("All nodes acknowledged cancellation")
		// probably do a report here?
	case <-time.After(time.Duration(60) * time.Second):
		logger.Errorf("Didn't get all acknowledgements within 60 seconds")
	}

	return nil
}
Ejemplo n.º 17
0
func reindexHandler(w http.ResponseWriter, r *http.Request) {
	w.Header().Set("Content-Type", "application/json")
	reindexResponse := make(map[string]interface{})
	opUser, oerr := actor.GetReqUser(r.Header.Get("X-OPS-USERID"))
	if oerr != nil {
		jsonErrorReport(w, r, oerr.Error(), oerr.Status())
		return
	}
	switch r.Method {
	case "POST":
		if !opUser.IsAdmin() {
			jsonErrorReport(w, r, "You are not allowed to perform that action.", http.StatusForbidden)
			return
		}
		reindexObjs := make([]indexer.Indexable, 0, 100)
		// We clear the index, *then* do the fetch because if
		// something comes in between the time we fetch the
		// objects to reindex and when it gets done, they'll
		// just be added naturally
		indexer.ClearIndex()

		for _, v := range client.AllClients() {
			reindexObjs = append(reindexObjs, v)
		}
		for _, v := range node.AllNodes() {
			reindexObjs = append(reindexObjs, v)
		}
		for _, v := range role.AllRoles() {
			reindexObjs = append(reindexObjs, v)
		}
		for _, v := range environment.AllEnvironments() {
			reindexObjs = append(reindexObjs, v)
		}
		defaultEnv, _ := environment.Get("_default")
		reindexObjs = append(reindexObjs, defaultEnv)
		// data bags have to be done separately
		dbags := databag.GetList()
		for _, db := range dbags {
			dbag, err := databag.Get(db)
			if err != nil {
				continue
			}
			dbis := make([]indexer.Indexable, dbag.NumDBItems())
			i := 0
			allDBItems, derr := dbag.AllDBItems()
			if derr != nil {
				logger.Errorf(derr.Error())
				continue
			}
			for _, k := range allDBItems {
				n := k
				dbis[i] = n
				i++
			}
			reindexObjs = append(reindexObjs, dbis...)
		}
		indexer.ReIndex(reindexObjs)
		reindexResponse["reindex"] = "OK"
	default:
		jsonErrorReport(w, r, "Method not allowed. If you're trying to do something with a data bag named 'reindex', it's not going to work I'm afraid.", http.StatusMethodNotAllowed)
		return
	}
	enc := json.NewEncoder(w)
	if err := enc.Encode(&reindexResponse); err != nil {
		jsonErrorReport(w, r, err.Error(), http.StatusInternalServerError)
	}
}
Ejemplo n.º 18
0
func environmentHandler(w http.ResponseWriter, r *http.Request) {
	w.Header().Set("Content-Type", "application/json")
	accErr := checkAccept(w, r, "application/json")
	if accErr != nil {
		jsonErrorReport(w, r, accErr.Error(), http.StatusNotAcceptable)
		return
	}

	opUser, oerr := actor.GetReqUser(r.Header.Get("X-OPS-USERID"))
	if oerr != nil {
		jsonErrorReport(w, r, oerr.Error(), oerr.Status())
		return
	}

	pathArray := splitPath(r.URL.Path)
	envResponse := make(map[string]interface{})
	var numResults string
	r.ParseForm()
	if nrs, found := r.Form["num_versions"]; found {
		if len(nrs) < 0 {
			jsonErrorReport(w, r, "invalid num_versions", http.StatusBadRequest)
			return
		}
		numResults = nrs[0]
		err := util.ValidateNumVersions(numResults)
		if err != nil {
			jsonErrorReport(w, r, "You have requested an invalid number of versions (x >= 0 || 'all')", err.Status())
			return
		}
	}

	pathArrayLen := len(pathArray)

	if pathArrayLen == 1 {
		switch r.Method {
		case "GET":
			if opUser.IsValidator() {
				jsonErrorReport(w, r, "You are not allowed to perform this action", http.StatusForbidden)
				return
			}
			envList := environment.GetList()
			for _, env := range envList {
				envResponse[env] = util.CustomURL(fmt.Sprintf("/environments/%s", env))
			}
		case "POST":
			if !opUser.IsAdmin() {
				jsonErrorReport(w, r, "You are not allowed to perform this action", http.StatusForbidden)
				return
			}
			envData, jerr := parseObjJSON(r.Body)
			if jerr != nil {
				jsonErrorReport(w, r, jerr.Error(), http.StatusBadRequest)
				return
			}
			if _, ok := envData["name"].(string); !ok || envData["name"].(string) == "" {
				jsonErrorReport(w, r, "Environment name missing", http.StatusBadRequest)
				return
			}
			chefEnv, _ := environment.Get(envData["name"].(string))
			if chefEnv != nil {
				httperr := fmt.Errorf("Environment already exists")
				jsonErrorReport(w, r, httperr.Error(), http.StatusConflict)
				return
			}
			var eerr util.Gerror
			chefEnv, eerr = environment.NewFromJSON(envData)
			if eerr != nil {
				jsonErrorReport(w, r, eerr.Error(), eerr.Status())
				return
			}
			if err := chefEnv.Save(); err != nil {
				jsonErrorReport(w, r, err.Error(), http.StatusBadRequest)
				return
			}
			if lerr := loginfo.LogEvent(opUser, chefEnv, "create"); lerr != nil {
				jsonErrorReport(w, r, lerr.Error(), http.StatusInternalServerError)
				return
			}
			envResponse["uri"] = util.ObjURL(chefEnv)
			w.WriteHeader(http.StatusCreated)
		default:
			jsonErrorReport(w, r, "Unrecognized method", http.StatusMethodNotAllowed)
			return
		}
	} else if pathArrayLen == 2 {
		/* All of the 2 element operations return the environment
		 * object, so we do the json encoding in this block and return
		 * out. */
		envName := pathArray[1]
		env, err := environment.Get(envName)
		delEnv := false /* Set this to delete the environment after
		 * sending the json. */
		if err != nil {
			jsonErrorReport(w, r, err.Error(), http.StatusNotFound)
			return
		}
		switch r.Method {
		case "GET", "DELETE":
			/* We don't actually have to do much here. */
			if r.Method == "DELETE" {
				if !opUser.IsAdmin() {
					jsonErrorReport(w, r, "You are not allowed to perform this action", http.StatusForbidden)
					return
				}
				if envName == "_default" {
					jsonErrorReport(w, r, "The '_default' environment cannot be modified.", http.StatusMethodNotAllowed)
					return
				}
				delEnv = true
			} else {
				if opUser.IsValidator() {
					jsonErrorReport(w, r, "You are not allowed to perform this action", http.StatusForbidden)
					return
				}
			}
		case "PUT":
			if !opUser.IsAdmin() {
				jsonErrorReport(w, r, "You are not allowed to perform this action", http.StatusForbidden)
				return
			}
			envData, jerr := parseObjJSON(r.Body)
			if jerr != nil {
				jsonErrorReport(w, r, jerr.Error(), http.StatusBadRequest)
				return
			}
			if envData == nil {
				jsonErrorReport(w, r, "No environment data in body at all!", http.StatusBadRequest)
				return
			}
			if _, ok := envData["name"]; !ok {
				//envData["name"] = envName
				jsonErrorReport(w, r, "Environment name missing", http.StatusBadRequest)
				return
			}
			jsonName, sterr := util.ValidateAsString(envData["name"])
			if sterr != nil {
				jsonErrorReport(w, r, sterr.Error(), sterr.Status())
				return
			} else if jsonName == "" {
				jsonErrorReport(w, r, "Environment name missing", http.StatusBadRequest)
				return
			}
			if envName != envData["name"].(string) {
				env, err = environment.Get(envData["name"].(string))
				if err == nil {
					jsonErrorReport(w, r, "Environment already exists", http.StatusConflict)
					return
				}
				var eerr util.Gerror
				env, eerr = environment.NewFromJSON(envData)
				if eerr != nil {
					jsonErrorReport(w, r, eerr.Error(), eerr.Status())
					return
				}
				w.WriteHeader(http.StatusCreated)
				oldenv, olderr := environment.Get(envName)
				if olderr == nil {
					oldenv.Delete()
				}
			} else {
				if jsonName == "" {
					envData["name"] = envName
				}
				if err := env.UpdateFromJSON(envData); err != nil {
					jsonErrorReport(w, r, err.Error(), err.Status())
					return
				}
			}
			if err := env.Save(); err != nil {
				jsonErrorReport(w, r, err.Error(), err.Status())
				return
			}
			if lerr := loginfo.LogEvent(opUser, env, "modify"); lerr != nil {
				jsonErrorReport(w, r, lerr.Error(), http.StatusInternalServerError)
				return
			}
		default:
			jsonErrorReport(w, r, "Unrecognized method", http.StatusMethodNotAllowed)
			return
		}
		enc := json.NewEncoder(w)
		if err := enc.Encode(&env); err != nil {
			jsonErrorReport(w, r, err.Error(), http.StatusInternalServerError)
			return
		}
		if delEnv {
			err := env.Delete()
			if err != nil {
				jsonErrorReport(w, r, err.Error(), http.StatusInternalServerError)
				return
			}
			if lerr := loginfo.LogEvent(opUser, env, "delete"); lerr != nil {
				jsonErrorReport(w, r, lerr.Error(), http.StatusInternalServerError)
				return
			}
		}
		return
	} else if pathArrayLen == 3 {
		envName := pathArray[1]
		op := pathArray[2]

		if op == "cookbook_versions" && r.Method != "POST" || op != "cookbook_versions" && r.Method != "GET" {
			jsonErrorReport(w, r, "Unrecognized method", http.StatusMethodNotAllowed)
			return
		}

		if opUser.IsValidator() {
			jsonErrorReport(w, r, "You are not allowed to perform this action", http.StatusForbidden)
			return
		}

		env, err := environment.Get(envName)
		if err != nil {
			var errMsg string
			// bleh, stupid errors
			if err.Status() == http.StatusNotFound && (op != "recipes" && op != "cookbooks") {
				errMsg = fmt.Sprintf("environment '%s' not found", envName)
			} else {
				errMsg = err.Error()
			}
			jsonErrorReport(w, r, errMsg, err.Status())
			return
		}

		switch op {
		case "cookbook_versions":
			/* Chef Server API docs aren't even remotely
			 * right here. What it actually wants is the
			 * usual hash of info for the latest or
			 * constrained version. Weird. */
			cbVer, jerr := parseObjJSON(r.Body)
			if jerr != nil {
				errmsg := jerr.Error()
				if !strings.Contains(errmsg, "Field") {
					errmsg = "invalid JSON"
				} else {
					errmsg = jerr.Error()
				}
				jsonErrorReport(w, r, errmsg, http.StatusBadRequest)
				return
			}

			if _, ok := cbVer["run_list"]; !ok {
				jsonErrorReport(w, r, "POSTed JSON badly formed.", http.StatusMethodNotAllowed)
				return
			}
			deps, derr := cookbook.DependsCookbooks(cbVer["run_list"].([]string), env.CookbookVersions)
			if derr != nil {
				switch derr := derr.(type) {
				case *cookbook.DependsError:
					// In 1.0.0-dev, there's a
					// JSONErrorMapReport function in util.
					// Use that when moving this forward
					errMap := make(map[string][]map[string]interface{})
					errMap["error"] = make([]map[string]interface{}, 1)
					errMap["error"][0] = derr.ErrMap()
					w.WriteHeader(http.StatusPreconditionFailed)
					enc := json.NewEncoder(w)
					if jerr := enc.Encode(&errMap); jerr != nil {
						logger.Errorf(jerr.Error())
					}
				default:
					jsonErrorReport(w, r, derr.Error(), http.StatusPreconditionFailed)
				}
				return
			}
			/* Need our own encoding here too. */
			enc := json.NewEncoder(w)
			if err := enc.Encode(&deps); err != nil {
				jsonErrorReport(w, r, err.Error(), http.StatusInternalServerError)
			}
			return
		case "cookbooks":
			envResponse = env.AllCookbookHash(numResults)
		case "nodes":
			nodeList, err := node.GetFromEnv(envName)
			if err != nil {
				jsonErrorReport(w, r, err.Error(), http.StatusInternalServerError)
				return
			}
			for _, chefNode := range nodeList {
				envResponse[chefNode.Name] = util.ObjURL(chefNode)
			}
		case "recipes":
			envRecipes := env.RecipeList()
			/* And... we have to do our own json response
			 * here. Hmph. */
			/* TODO: make the JSON encoding stuff its own
			 * function. Dunno why I never thought of that
			 * before now for this. */
			enc := json.NewEncoder(w)
			if err := enc.Encode(&envRecipes); err != nil {
				jsonErrorReport(w, r, err.Error(), http.StatusInternalServerError)
			}
			return
		default:
			jsonErrorReport(w, r, "Bad request", http.StatusBadRequest)
			return

		}
	} else if pathArrayLen == 4 {
		envName := pathArray[1]
		/* op is either "cookbooks" or "roles", and opName is the name
		 * of the object op refers to. */
		op := pathArray[2]
		opName := pathArray[3]

		if r.Method != "GET" {
			jsonErrorReport(w, r, "Method not allowed", http.StatusMethodNotAllowed)
			return
		}
		if opUser.IsValidator() {
			jsonErrorReport(w, r, "You are not allowed to perform this action", http.StatusForbidden)
			return
		}
		env, err := environment.Get(envName)
		if err != nil {
			jsonErrorReport(w, r, err.Error(), http.StatusNotFound)
			return
		}

		/* Biting the bullet and not redirecting this to
		 * /roles/NAME/environments/NAME. The behavior is exactly the
		 * same, but it makes clients and chef-pedant somewhat unhappy
		 * to not have this way available. */
		if op == "roles" {
			role, err := role.Get(opName)
			if err != nil {
				jsonErrorReport(w, r, err.Error(), http.StatusNotFound)
				return
			}
			var runList []string
			if envName == "_default" {
				runList = role.RunList
			} else {
				runList = role.EnvRunLists[envName]
			}
			envResponse["run_list"] = runList
		} else if op == "cookbooks" {
			cb, err := cookbook.Get(opName)
			if err != nil {
				jsonErrorReport(w, r, err.Error(), http.StatusNotFound)
				return
			}
			/* Here and, I think, here only, if num_versions isn't
			 * set it's supposed to return ALL matching versions.
			 * API docs are wrong here. */
			if numResults == "" {
				numResults = "all"
			}
			envResponse[opName] = cb.ConstrainedInfoHash(numResults, env.CookbookVersions[opName])
		} else {
			/* Not an op we know. */
			jsonErrorReport(w, r, "Bad request - too many elements in path", http.StatusBadRequest)
			return
		}
	} else {
		/* Bad number of path elements. */
		jsonErrorReport(w, r, "Bad request - too many elements in path", http.StatusBadRequest)
		return
	}

	enc := json.NewEncoder(w)
	if err := enc.Encode(&envResponse); err != nil {
		jsonErrorReport(w, r, err.Error(), http.StatusInternalServerError)
	}
}
Ejemplo n.º 19
0
func (s *Shovey) startJobs() Qerror {
	// determine if we meet the quorum
	// First is this a percentage or absolute quorum
	qnum, err := getQuorum(s.Quorum, len(s.NodeNames))
	if err != nil {
		return err
	}
	// query node statuses to see if enough are up
	upNodes, nerr := node.GetNodesByStatus(s.NodeNames, "up")
	if nerr != nil {
		return CastErr(nerr)
	}
	if len(upNodes) < qnum {
		err = Errorf("Not enough nodes were up to execute job %s - got %d, needed at least %d", s.RunID, len(upNodes), qnum)
		err.SetStatus("quorum_failed")
		// be setting up/down nodes here too
		return err
	}

	// if that all worked, send the commands
	errch := make(chan error)
	go func() {
		tagNodes := make([]string, len(upNodes))
		d := make(map[string]bool)
		for i, n := range upNodes {
			tagNodes[i] = n.Name
			d[n.Name] = true
			sr := &ShoveyRun{ShoveyUUID: s.RunID, NodeName: n.Name, Status: "created"}
			err := sr.save()
			if err != nil {
				logger.Errorf("error saving shovey run: %s", err.Error())
				errch <- err
				return
			}
		}
		for _, n := range s.NodeNames {
			if !d[n] {
				sr := &ShoveyRun{ShoveyUUID: s.RunID, NodeName: n, Status: "down", EndTime: time.Now()}
				err := sr.save()
				if err != nil {
					logger.Errorf("error saving shovey run: %s", err.Error())
					errch <- err
					return
				}
			}
		}
		// make sure this is the right amount of buffering
		payload := make(map[string]string)
		payload["run_id"] = s.RunID
		payload["command"] = s.Command
		payload["action"] = "start"
		payload["time"] = time.Now().Format(time.RFC3339)
		payload["timeout"] = fmt.Sprintf("%d", s.Timeout)
		sig, serr := s.signRequest(payload)
		if serr != nil {
			errch <- serr
			return
		}
		payload["signature"] = sig
		jsonPayload, _ := json.Marshal(payload)
		ackCh := make(chan string, len(tagNodes))
		respCh := make(chan serfclient.NodeResponse, len(tagNodes))
		q := &serfclient.QueryParam{Name: "shovey", Payload: jsonPayload, FilterNodes: tagNodes, RequestAck: true, AckCh: ackCh, RespCh: respCh}
		qerr := serfin.Serfer.Query(q)
		if qerr != nil {
			errch <- qerr
			return
		}
		errch <- nil
		srCh := make(chan *ShoveyRun, len(upNodes)*2)

		go func() {
			for sr := range srCh {
				sr.save()
			}
		}()

		for i := 0; i < len(upNodes)*2; i++ {
			select {
			case a := <-ackCh:
				if a == "" {
					continue
				}
				sr, err := s.GetRun(a)
				if err != nil {
					logger.Debugf("err with sr %s: %s", a, err.Error())
					continue
				}
				sr.AckTime = time.Now()
				srCh <- sr
			case r := <-respCh:
				logger.Debugf("got a response: %v", r)
				break
			case <-time.After(s.Timeout * time.Second):
				logger.Debugf("timed out, might not be appropriate")
				break
			}
		}
		close(srCh)

		logger.Debugf("out of for/select loop for shovey responses")
	}()
	grerr := <-errch
	if grerr != nil {
		return CastErr(grerr)
	}

	return nil
}
Ejemplo n.º 20
0
func (h *interceptHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
	/* knife sometimes sends URL paths that start with //. Redirecting
	 * worked for GETs, but since it was breaking POSTs and screwing with
	 * GETs with query params, we just clean up the path and move on. */

	/* log the URL */
	// TODO: set this to verbosity level 4 or so
	logger.Debugf("Serving %s -- %s\n", r.URL.Path, r.Method)

	if r.Method != "CONNECT" {
		if p := cleanPath(r.URL.Path); p != r.URL.Path {
			r.URL.Path = p
		}
	}

	/* Make configurable, I guess, but Chef wants it to be 1000000 */
	if !strings.HasPrefix(r.URL.Path, "/file_store") && r.ContentLength > config.Config.JSONReqMaxSize {
		http.Error(w, "Content-length too long!", http.StatusRequestEntityTooLarge)
		return
	} else if r.ContentLength > config.Config.ObjMaxSize {
		http.Error(w, "Content-length waaaaaay too long!", http.StatusRequestEntityTooLarge)
		return
	}

	w.Header().Set("X-Goiardi", "yes")
	w.Header().Set("X-Goiardi-Version", config.Version)
	w.Header().Set("X-Chef-Version", config.ChefVersion)
	apiInfo := fmt.Sprintf("flavor=osc;version:%s;goiardi=%s", config.ChefVersion, config.Version)
	w.Header().Set("X-Ops-API-Info", apiInfo)

	userID := r.Header.Get("X-OPS-USERID")
	if rs := r.Header.Get("X-Ops-Request-Source"); rs == "web" {
		/* If use-auth is on and disable-webui is on, and this is a
		 * webui connection, it needs to fail. */
		if config.Config.DisableWebUI {
			w.Header().Set("Content-Type", "application/json")
			logger.Warningf("Attempting to log in through webui, but webui is disabled")
			jsonErrorReport(w, r, "invalid action", http.StatusUnauthorized)
			return
		}

		/* Check that the user in question with the web request exists.
		 * If not, fail. */
		if _, uherr := actor.GetReqUser(userID); uherr != nil {
			w.Header().Set("Content-Type", "application/json")
			logger.Warningf("Attempting to use invalid user %s through X-Ops-Request-Source = web", userID)
			jsonErrorReport(w, r, "invalid action", http.StatusUnauthorized)
			return
		}
		userID = "chef-webui"
	}
	/* Only perform the authorization check if that's configured. Bomb with
	 * an error if the check of the headers, timestamps, etc. fails. */
	/* No clue why /principals doesn't require authorization. Hrmph. */
	if config.Config.UseAuth && !strings.HasPrefix(r.URL.Path, "/file_store") && !(strings.HasPrefix(r.URL.Path, "/principals") && r.Method == "GET") {
		herr := authentication.CheckHeader(userID, r)
		if herr != nil {
			w.Header().Set("Content-Type", "application/json")
			logger.Errorf("Authorization failure: %s\n", herr.Error())
			//http.Error(w, herr.Error(), herr.Status())
			jsonErrorReport(w, r, herr.Error(), herr.Status())
			return
		}
	}

	// Experimental: decompress gzipped requests
	if r.Header.Get("Content-Encoding") == "gzip" {
		reader, err := gzip.NewReader(r.Body)
		if err != nil {
			w.Header().Set("Content-Type", "application/json")
			logger.Errorf("Failure decompressing gzipped request body: %s\n", err.Error())
			jsonErrorReport(w, r, err.Error(), http.StatusBadRequest)
			return
		}
		r.Body = reader
	}

	http.DefaultServeMux.ServeHTTP(w, r)
}