// Update performs an update to the TUF repo as defined by the TUF spec func (c *Client) Update() (*tuf.Repo, error) { // 1. Get timestamp // a. If timestamp error (verification, expired, etc...) download new root and return to 1. // 2. Check if local snapshot is up to date // a. If out of date, get updated snapshot // i. If snapshot error, download new root and return to 1. // 3. Check if root correct against snapshot // a. If incorrect, download new root and return to 1. // 4. Iteratively download and search targets and delegations to find target meta logrus.Debug("updating TUF client") err := c.update() if err != nil { logrus.Debug("Error occurred. Root will be downloaded and another update attempted") logrus.Debug("Resetting the TUF builder...") c.newBuilder = c.newBuilder.BootstrapNewBuilder() if err := c.downloadRoot(); err != nil { logrus.Debug("Client Update (Root):", err) return nil, err } // If we error again, we now have the latest root and just want to fail // out as there's no expectation the problem can be resolved automatically logrus.Debug("retrying TUF client update") if err := c.update(); err != nil { return nil, err } } return c.newBuilder.Finish() }
func changeTargetMeta(repo *tuf.Repo, c changelist.Change) error { var err error switch c.Action() { case changelist.ActionCreate: logrus.Debug("changelist add: ", c.Path()) meta := &data.FileMeta{} err = json.Unmarshal(c.Content(), meta) if err != nil { return err } files := data.Files{c.Path(): *meta} err = doWithRoleFallback(c.Scope(), func(role string) error { _, e := repo.AddTargets(role, files) return e }) if err != nil { logrus.Errorf("couldn't add target to %s: %s", c.Scope(), err.Error()) } case changelist.ActionDelete: logrus.Debug("changelist remove: ", c.Path()) err = doWithRoleFallback(c.Scope(), func(role string) error { return repo.RemoveTargets(role, c.Path()) }) if err != nil { logrus.Errorf("couldn't remove target from %s: %s", c.Scope(), err.Error()) } default: logrus.Debug("action not yet supported: ", c.Action()) } return err }
func (p *Parser) processArgument(argument string, parameter string) (processedParam bool) { processedParam = false // Only process if this argument has the "--" prefix as expected if len(argument) < 3 || argument[0] != '-' || argument[1] != '-' { log.Debug(fmt.Sprintf("Argument [%s] does not start with '--', skipping...", argument)) return } // See if the next "argument" is actually a parameter for this argument // even if we don't find a match for the argument in the expected args if len(parameter) < 3 || parameter[0] != '-' || parameter[1] != '-' { log.Debug(fmt.Sprintf("Next argument [%s] does not start with '--', treating as a parameter", parameter)) processedParam = true } // Now look for a match for this argument in the expected args argument = argument[2:] log.Debug(fmt.Sprintf("Looking for an args match for [%s]", argument)) for _, expectedArg := range p.args { if expectedArg.LongForm == argument { log.Debug(fmt.Sprintf("Found a match: %s", expectedArg.String())) expectedArg.Present = true if processedParam == true { expectedArg.Param = parameter } break } } return }
// HTTP handler to allow clients to add/update schedules func (a *API) HandlePUT(w http.ResponseWriter, r *http.Request) { log.Debug("Handling PUT") userID := r.Header.Get("user_id") defer r.Body.Close() log.Debug("going to decode schedule") decoder := json.NewDecoder(r.Body) proposal := types.Schedule{} err := decoder.Decode(&proposal) if err != nil { log.Warn("failed to decode proposed schedule: ", err) handleError(w, UnmarshalError) return } log.Debug("proposal: ", proposal) err = a.db.Put(userID, proposal) if err != nil { log.Warn("error putting: ", err) handleError(w, err) return } w.WriteHeader(200) }
// ExecIpmiToolRemote method runs ipmitool command on a remote system func ExecIpmiToolRemote(request []byte, strct *LinuxOutOfBand, addr string) []byte { c, err := exec.LookPath("ipmitool") if err != nil { log.Debug("Unable to find ipmitool") return nil } a := []string{"-I", "lanplus", "-H", addr, "-U", strct.User, "-P", strct.Pass, "-b", strct.Channel, "-t", strct.Slave, "raw"} for i := range request { a = append(a, fmt.Sprintf("0x%02x", request[i])) } ret, err := exec.Command(c, a...).CombinedOutput() if err != nil { log.Debug("Unable to run ipmitool") return nil } returnStrings := strings.Split(string(ret), " ") rets := make([]byte, len(returnStrings)) for ind, el := range returnStrings { value, _ := strconv.ParseInt(el, 16, 0) rets[ind] = byte(value) } return rets }
func StartReadListening(readPort int, writePort int, defaultWriters []string, mapOperation SplitterMap) { // Create buffer to hold data queue := lane.NewQueue() MonitorServer(queue) // Start listening for writer destinations go StartWriteListening(queue, defaultWriters, writePort) socket, err := net.Listen("tcp", ":"+strconv.Itoa(readPort)) if err != nil { logrus.Error(err) } // This will block the main thread for { // Begin trying to accept connections logrus.Debug("Awaiting Connection...") //Block and wait for listeners conn, err := socket.Accept() if err != nil { logrus.Error(err) } else { logrus.Debug("Accepted Connection...") go HandleReadConnection(conn, queue, writePort, mapOperation) } } }
// helper function to encode the build step to // a json string. Primarily used for plugins, which // expect a json encoded string in stdin or arg[1]. func toCommand(s *State, n *parser.DockerNode) []string { p := payload{ Workspace: s.Workspace, Repo: s.Repo, Build: s.Build, Job: s.Job, Vargs: n.Vargs, } y, err := yaml.Marshal(n.Vargs) if err != nil { log.Debug(err) } p.Vargs = map[string]interface{}{} err = yamljson.Unmarshal(y, &p.Vargs) if err != nil { log.Debug(err) } p.System = &plugin.System{ Version: s.System.Version, Link: s.System.Link, } b, _ := json.Marshal(p) return []string{"--", string(b)} }
// NewSnapshot initilizes a SignedSnapshot with a given top level root // and targets objects func NewSnapshot(root *Signed, targets *Signed) (*SignedSnapshot, error) { logrus.Debug("generating new snapshot...") targetsJSON, err := json.Marshal(targets) if err != nil { logrus.Debug("Error Marshalling Targets") return nil, err } rootJSON, err := json.Marshal(root) if err != nil { logrus.Debug("Error Marshalling Root") return nil, err } rootMeta, err := NewFileMeta(bytes.NewReader(rootJSON), NotaryDefaultHashes...) if err != nil { return nil, err } targetsMeta, err := NewFileMeta(bytes.NewReader(targetsJSON), NotaryDefaultHashes...) if err != nil { return nil, err } return &SignedSnapshot{ Signatures: make([]Signature, 0), Signed: Snapshot{ SignedCommon: SignedCommon{ Type: TUFTypes[CanonicalSnapshotRole], Version: 0, Expires: DefaultExpires(CanonicalSnapshotRole), }, Meta: Files{ CanonicalRootRole: rootMeta, CanonicalTargetsRole: targetsMeta, }, }, }, nil }
func startAgent(c Config) { for { Inner: for _, console := range c.Consoles { url := strings.Join([]string{"http://", console, ":", c.ConsolePort, "/agent"}, "") log.Debug("POSTing to URL: ", url) // JSON Post json, _ := json.Marshal(getData(c)) log.Debug("POST ", string(json)) req, err := http.NewRequest("POST", url, bytes.NewBuffer(json)) req.Header.Set("X-Custom-Header", "myvalue") req.Header.Set("Content-Type", "application/json") client := &http.Client{} resp, err := client.Do(req) if err != nil { log.Warn("Failed to POST to ", url) continue Inner } defer resp.Body.Close() // Sleep for 1 minute before next POST log.Info("Successful POST to ", url) log.Debug("Response Status: ", resp.Status) log.Debug("Response Headers: ", resp.Header) body, _ := ioutil.ReadAll(resp.Body) log.Debug("Response Body: ", body) } time.Sleep(2 * time.Minute) } }
func (ns *NerveService) Run(stop <-chan bool) { defer servicesWaitGroup.Done() log.Debug("Service Running [", ns.Name, "]") Loop: for { // Here The job to check, and report status, err := ns.Watcher.Check() if err != nil { log.Warn("Check error for Service [", ns.Name, "] [", err, "]") } ns.Reporter.Report(status) // Wait for the stop signal select { case hasToStop := <-stop: if hasToStop { log.Debug("Nerve: Service [", ns.Name, "]Run Close Signal Received") } else { log.Debug("Nerve: Service [", ns.Name, "]Run Close Signal Received (but a strange false one)") } break Loop default: time.Sleep(time.Millisecond * time.Duration(ns.CheckInterval)) } } err := ns.Reporter.Destroy() if err != nil { log.Warn("Service [", ns.Name, "] has detected an error when destroying Reporter (", err, ")") } log.Debug("Service [", ns.Name, "] stopped") }
//If we shut down without doing this stuff, we will lose some of the packet data //still in the processing pipeline. func gracefulShutdown(channels []chan *packetData, reChan chan tcpDataStruct, logChan chan dnsLogEntry) { var wait_time int = 3 var numprocs int = len(channels) log.Debug("Draining TCP data...") OUTER: for { select { case reassembledTcp := <-reChan: pd := NewTcpData(reassembledTcp) channels[int(reassembledTcp.IpLayer.FastHash())&(numprocs-1)] <- pd case <-time.After(3 * time.Second): break OUTER } } log.Debug("Stopping packet processing...") for i := 0; i < numprocs; i++ { close(channels[i]) } log.Debug("waiting for log pipeline to flush...") close(logChan) for len(logChan) > 0 { wait_time-- if wait_time == 0 { log.Debug("exited with messages remaining in log queue!") return } time.Sleep(time.Second) } }
// POST /events/end HTTP Handler func EndCreateHandler(c web.C, w http.ResponseWriter, r *http.Request) { decoder := json.NewDecoder(r.Body) rbody := &endCreateReqBody{} // Decode JSON err := decoder.Decode(&rbody) if err != nil { log.Debug(err) http.Error(w, http.StatusText(400), 400) return } // Validate res, err := v.ValidateStruct(rbody) if err != nil { log.Debug(res) http.Error(w, http.StatusText(422), 422) return } // Publish event if err := events.PublishEndEvent( c.Env["REDIS"].(*redis.Client), rbody.Track, rbody.User); err != nil { log.Error(err) http.Error(w, http.StatusText(500), 500) return } // We got to the end - everything went fine! w.WriteHeader(201) }
// Populate the selected database with the data scraped from the term URL. func PopulateDB(termURL, ip, port, dbName, collectionName string) error { scrapeDB := db.New(ip, port, dbName, collectionName) err := scrapeDB.Init() if err != nil { return err } log.Debug("purging database") scrapeDB.Purge() term, err := scrape.GetXML(termURL) courseChan := make(chan types.Class) go scrape.DigestAll(term, courseChan) for class := range courseChan { err = scrapeDB.Put(class) if err != nil { return err } } log.Debug("finished populating database") return nil }
func Start(w http.ResponseWriter, r *http.Request) { vars, session, err := initSession(w, r) if err != nil { return } if *startSecretKey == "" || isValidToken(vars["CardId"], *startSecretKey) { log.Debugf("Valid Start page: %v", vars["CardId"]) session.Values["cardId"] = vars["CardId"] sessions.Save(r, w) var fileName string if session.Values["admin"] == "1" { log.Debug("Sending Admin UI") fileName = root + "/admin.html" } else { log.Debug("Sending User UI") fileName = root + "/public.html" } f, err := os.Open(fileName) if err == nil { http.ServeContent(w, r, fileName, time.Time{}, f) } else { log.Error(err.Error()) http.Error(w, err.Error(), http.StatusInternalServerError) } } else { log.Infof("Bad Start page: %v", vars["CardId"]) err = templates["bad-cards.html"].Execute(w, nil) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) } } }
func changeTargetMeta(repo *tuf.Repo, c changelist.Change) error { var err error switch c.Action() { case changelist.ActionCreate: logrus.Debug("changelist add: ", c.Path()) meta := &data.FileMeta{} err = json.Unmarshal(c.Content(), meta) if err != nil { return err } files := data.Files{c.Path(): *meta} // Attempt to add the target to this role if _, err = repo.AddTargets(c.Scope(), files); err != nil { logrus.Errorf("couldn't add target to %s: %s", c.Scope(), err.Error()) } case changelist.ActionDelete: logrus.Debug("changelist remove: ", c.Path()) // Attempt to remove the target from this role if err = repo.RemoveTargets(c.Scope(), c.Path()); err != nil { logrus.Errorf("couldn't remove target from %s: %s", c.Scope(), err.Error()) } default: logrus.Debug("action not yet supported: ", c.Action()) } return err }
func waitPortBinding(watchedPort string, strictBinding bool) { for { p := procfs.Self() pids := []int{} if strictBinding { descendants, err := p.Descendants() if err != nil { log.Error(err) break } for _, p := range descendants { pids = append(pids, p.Pid) } log.Debug(pids) } binderPid, err := port.IsPortBound(watchedPort, pids) if err != nil { log.Error(err) break } log.Debug(binderPid) if binderPid != -1 { log.Debugf("port %s binded by pid %d (used strict check: %v)", watchedPort, binderPid, strictBinding) processStateChanged(notifier.StatusRunning) break } time.Sleep(200 * time.Millisecond) } }
// Stop all socket listeners func (app *App) stopListeners() { if app.TCP != nil { app.TCP.Stop() app.TCP = nil logrus.Debug("[tcp] finished") } if app.Pickle != nil { app.Pickle.Stop() app.Pickle = nil logrus.Debug("[pickle] finished") } if app.UDP != nil { app.UDP.Stop() app.UDP = nil logrus.Debug("[udp] finished") } if app.CarbonLink != nil { app.CarbonLink.Stop() app.CarbonLink = nil logrus.Debug("[carbonlink] finished") } }
//ValidateUsername checks if a username is already taken or not func (service *Service) ValidateUsername(w http.ResponseWriter, request *http.Request) { username := request.URL.Query().Get("username") response := struct { Valid bool `json:"valid"` Error string `json:"error"` }{ Valid: true, Error: "", } valid := user.ValidateUsername(username) if !valid { log.Debug("Invalid username format:", username) response.Error = "invalid_username_format" response.Valid = false json.NewEncoder(w).Encode(&response) return } userMgr := user.NewManager(request) userExists, err := userMgr.Exists(username) if err != nil { http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError) return } if userExists { log.Debug("username ", username, " already taken") response.Error = "duplicate_username" response.Valid = false } json.NewEncoder(w).Encode(&response) return }
func StartWriteListening(readQueue *lane.Queue, defaultWriters []string, writePort int) { cList := NewConnectionList() socket, err := net.Listen("tcp", ":"+strconv.Itoa(writePort)) if err != nil { logrus.Error(err) } // Begin trying to connect to default endpoints for _, writer := range defaultWriters { logrus.Debug("Opening connections to endpoints...") conn, err := net.Dial("tcp", writer) if err != nil { logrus.Error(err) } else { logrus.Debug("Accepted Connection...") cList.AddConnection(conn) go HandleWriteConnections(cList, readQueue) } } for { // Begin trying to accept connections logrus.Debug("Awaiting Connection...") //Block and wait for listeners conn, err := socket.Accept() if err != nil { logrus.Error(err) } else { logrus.Debug("Accepted Connection...") cList.AddConnection(conn) go HandleWriteConnections(cList, readQueue) } } }
// The container index func Agent(w http.ResponseWriter, r *http.Request) { log.Debug("/agent POST") // Make a channel to dump our requests asynchronously respCh := make(chan *HttpPost) // Make an array of hostData to feed into hostDataArry := []*HttpPost{} // Spawn a proc to dump the data into our channel go func(r *http.Request) { var newData HttpPost body, err := ioutil.ReadAll(r.Body) if err != nil { log.Error(err) } // Unmarshal the POST into .Data err = json.Unmarshal(body, &newData.Data) // Type assert our way to the hostname newData.Host = newData.Data["host"].(map[string]interface{})["hostname"].(string) //newData.Time = string(time.Now().Format("2006010215040500")) respCh <- &newData }(r) // Check the channel for a resp select { case r := <-respCh: // log.Debug("New data from ", r.Host, "@", r.Time) log.Debug("New data from ", r.Host) log.Debug(r.Data) hostDataArry = append(hostDataArry, r) dumpToElastic(hostDataArry) } }
func cleanup(r *bytes.Buffer, e *etcd.Client, ep *testProcess, dp *testProcess) { if r != nil { log.Debug("Writing report") rpath := testDir + "/report.txt" if err := ioutil.WriteFile(rpath, r.Bytes(), 0644); err != nil { log.WithFields(log.Fields{ "error": err, "func": "ioutil.WriteFile", "path": rpath, }).Warning("Could not write report") } } if dp != nil { log.Debug("Exiting cdhcpd") _ = dp.finish() time.Sleep(time.Second) } if e != nil { log.Debug("Clearing test data") if _, err := e.Delete("/lochness", true); err != nil { log.WithFields(log.Fields{ "error": err, "func": "etcd.Delete", }).Warning("Could not clear test-created data from etcd") } time.Sleep(time.Second) } if ep != nil { log.Debug("Exiting etcd") _ = ep.finish() } log.Info("Done") }
// PUT /volume HTTP Handler func VolumeUpdateHandler(c web.C, w http.ResponseWriter, r *http.Request) { decoder := json.NewDecoder(r.Body) rbody := &volumeUpdateReqBody{} // Decode JSON err := decoder.Decode(&rbody) if err != nil { log.Debug(err) http.Error(w, http.StatusText(400), 400) return } // Validate res, err := v.ValidateStruct(rbody) if err != nil { log.Debug(res) http.Error(w, http.StatusText(422), 422) return } // Set the vol redis keys if err := events.PublishVolumeEvent(c.Env["REDIS"].(*redis.Client), rbody.Level); err != nil { log.Error(err) http.Error(w, http.StatusText(500), 500) return } // We got here! It's alllll good. w.WriteHeader(200) }
// ExecIpmiToolLocal method runs ipmitool command on a local system func ExecIpmiToolLocal(request []byte, strct *LinuxInBandIpmitool) []byte { c, err := exec.LookPath("ipmitool") if err != nil { log.Debug("Unable to find ipmitool") return nil } stringRequest := []string{"-b", strct.Channel, "-t", strct.Slave, "raw"} for i := range request { stringRequest = append(stringRequest, fmt.Sprintf("0x%02x", request[i])) } ret, err := exec.Command(c, stringRequest...).CombinedOutput() if err != nil { log.Debug("Unable to run ipmitool") return nil } returnStrings := strings.Split(string(ret), " ") rets := make([]byte, len(returnStrings)) for i, element := range returnStrings { value, _ := strconv.ParseInt(element, 16, 0) rets[i] = byte(value) } return rets }
func (h *Hoverfly) ExportSimulation() ([]byte, error) { slingRequest, err := h.buildGetRequest(v2ApiSimulation) if err != nil { return nil, err } response, err := h.doRequest(slingRequest) if err != nil { return nil, err } defer response.Body.Close() body, err := ioutil.ReadAll(response.Body) if err != nil { log.Debug(err.Error()) return nil, errors.New("Could not export from Hoverfly") } var jsonBytes bytes.Buffer err = json.Indent(&jsonBytes, body, "", "\t") if err != nil { log.Debug(err.Error()) return nil, errors.New("Could not export from Hoverfly") } return jsonBytes.Bytes(), nil }
func (h *Healthcheck) Run(debug bool) { if h.isRunning { return } hasquit := make(chan bool) quit := make(chan bool) run := make(chan bool) go func() { // Simple and dumb runner. Runs healthcheck and then sleeps the 'Every' time. Loop: // Healthchecks are expected to complete much faster than the Every time! for { select { case <-quit: log.Debug("Healthcheck is exiting") break Loop case <-run: log.Debug("Healthcheck is running") h.PerformHealthcheck() log.Debug("Healthcheck has run") sleepAndSend(h.Every, run) // Queue the next run up } } hasquit <- true close(hasquit) }() h.hasQuitChan = hasquit h.quitChan = quit h.isRunning = true run <- true // Fire straight away once set running }
func (h *Hoverfly) stop(hoverflyDirectory HoverflyDirectory) error { if !h.isLocal() { return errors.New("hoverctl can not stop an instance of Hoverfly on a remote host") } pid, err := hoverflyDirectory.GetPid(h.AdminPort, h.ProxyPort) if err != nil { log.Debug(err.Error()) return errors.New("Could not read Hoverfly pid file") } if pid == 0 { return errors.New("Hoverfly is not running") } hoverflyProcess := os.Process{Pid: pid} err = hoverflyProcess.Kill() if err != nil { log.Info(err.Error()) return errors.New("Could not kill Hoverfly") } err = hoverflyDirectory.DeletePid(h.AdminPort, h.ProxyPort) if err != nil { log.Debug(err.Error()) return errors.New("Could not delete Hoverfly pid") } return nil }
func (db *DBFiles) Put(values []string, key ...string) error { record := record{ values: values, key: key, basedir: db.BaseDir, } _, err := os.Stat(record.basedir) if os.IsNotExist(err) { err := db.Structure.Create(record.basedir) if err != nil { return errgo.Notef(err, "can not create structure") } } file, err := db.Structure.File(record.basedir, db.Driver, record.key) if err != nil { return errgo.Notef(err, "can not open file") } defer file.Close() err = db.Driver.Write(file, record.values) if err != nil { return errgo.Notef(err, "can not write values") } var data []byte io.ReadFull(file, data) log.Debug("Data: ", string(data)) log.Debug("finished writing record: ", record) return err }
/* Stop the job running under this tool, don't forget to cleanup and file system resources, etc. */ func (v *johndictTasker) Quit() common.Job { log.WithField("Task", v.job.UUID).Debug("Attempting to quit johndict task.") // Update the jobs status log.Debug("Getting status before quit") v.Status() v.mux.Lock() // Kill the process after a SIGHUP log.Debug("Sending SIGHUP before process kill") v.cmd.Process.Signal(syscall.SIGHUP) log.Debug("Sending kill signal to process") v.cmd.Process.Kill() v.mux.Unlock() // Wait for the program to actually exit log.Debug("Waiting on the process to finish") <-v.doneWaitChan // Change the status to paused log.Debug("Change status") v.mux.Lock() v.job.Status = common.STATUS_QUIT v.mux.Unlock() log.WithField("Task", v.job.UUID).Debug("Task has been quit successfully.") return v.job }
func (q *Queue) AllTaskStatus(rpc common.RPCCall, j *[]common.Job) error { log.Debug("Gathering all Task Status") // Add a defered catch for panic from within the tools defer func() { if err := recover(); err != nil { log.Errorf("Recovered from Panic in Resource.AllTaskStatus: %v", err) } }() log.Debug("Gathering status on all jobs") // Loop through any tasks in the stack and update their status while // grabing the Job object output var jobs []common.Job q.Lock() for i, _ := range q.stack { jobs = append(jobs, q.stack[i].Status()) } *j = jobs q.Unlock() return nil }
// downloadTimestamp is responsible for downloading the timestamp.json // Timestamps are special in that we ALWAYS attempt to download and only // use cache if the download fails (and the cache is still valid). func (c *Client) downloadTimestamp() error { logrus.Debug("Loading timestamp...") role := data.CanonicalTimestampRole consistentInfo := c.newBuilder.GetConsistentInfo(role) // get the cached timestamp, if it exists cachedTS, cachedErr := c.cache.GetMeta(role, notary.MaxTimestampSize) // always get the remote timestamp, since it supercedes the local one _, remoteErr := c.tryLoadRemote(consistentInfo, cachedTS) switch { case remoteErr == nil: return nil case cachedErr == nil: logrus.Debug(remoteErr.Error()) logrus.Warn("Error while downloading remote metadata, using cached timestamp - this might not be the latest version available remotely") err := c.newBuilder.Load(role, cachedTS, 1, false) if err == nil { logrus.Debug("successfully verified cached timestamp") } return err default: logrus.Debug("no cached or remote timestamp available") return remoteErr } }