func (serv ApiService) ApiAddContact(d Contact) { c, err := redis.NewSynchClientWithSpec(getConnection(REDIS_READWRITE).connspec) if err != nil { log.Err(err.Error()) serv.ResponseBuilder().SetResponseCode(500).WriteAndOveride([]byte("Error connecting to backend")) return } // Check for existing host definition exists, err := c.Sismember(CONTACT_LIST, []byte(CONTACT_PREFIX+":"+d.Name)) if err != nil { log.Err(err.Error()) serv.ResponseBuilder().SetResponseCode(500).WriteAndOveride([]byte("Error connecting to backend")) return } if exists { serv.ResponseBuilder().SetResponseCode(409).WriteAndOveride([]byte("Contact already exists")) return } // All clear, add the contact k := CONTACT_PREFIX + ":" + d.Name c.Sadd(CONTACT_LIST, []byte(k)) c.Hset(k, "name", []byte(d.Name)) c.Hset(k, "display_name", []byte(d.DisplayName)) c.Hset(k, "email", []byte(d.EmailAddress)) serv.ResponseBuilder().Created("/api/contacts/" + k) }
func (serv ApiService) ApiAddHost(h HostDefinition) { c, err := redis.NewSynchClientWithSpec(getConnection(REDIS_READWRITE).connspec) if err != nil { log.Err(err.Error()) serv.ResponseBuilder().SetResponseCode(500).WriteAndOveride([]byte("Error connecting to backend")) return } // Check for existing host definition exists, err := c.Sismember(HOSTS_LIST, []byte(HOST_PREFIX+":"+h.Name)) if err != nil { log.Err(err.Error()) serv.ResponseBuilder().SetResponseCode(500).WriteAndOveride([]byte("Error connecting to backend")) return } if exists { serv.ResponseBuilder().SetResponseCode(409).WriteAndOveride([]byte("Host already exists")) return } // All clear, add the host k := HOST_PREFIX + ":" + h.Name c.Sadd(HOSTS_LIST, []byte(k)) c.Hset(k, "name", []byte(h.Name)) c.Hset(k, "address", []byte(h.Address)) serv.ResponseBuilder().Created("/api/hosts/" + k) }
func threadAlert(threadNum int) { c, cerr := redis.NewSynchClientWithSpec(getConnection(REDIS_READWRITE).connspec) if cerr != nil { log.Info(fmt.Sprintf("Alert thread #%d unable to acquire db connection", threadNum)) return } log.Info(fmt.Sprintf("Starting alert thread #%d", threadNum)) for { //log.Info(fmt.Sprintf("[%d] BLPOP %s 10", threadNum, ALERT_QUEUE)) out, oerr := c.Blpop(ALERT_QUEUE, 0) if oerr != nil { log.Err(fmt.Sprintf("[ALERT %d] %s", threadNum, oerr.Error())) } else { if out == nil { log.Info(fmt.Sprintf("[ALERT %d] No output", threadNum)) } else { if len(out) == 2 { log.Info(string(out[1])) } } } // Avoid potential pig-pile time.Sleep(10 * time.Millisecond) } return }
func (self *FSRedis) Put(d FileStoreDescriptor, c []byte) (FileStoreDescriptor, error) { dU := d // RW connection conn, err := redis.NewSynchClientWithSpec(self.getConnection(REDIS_READWRITE).connspec) if err != nil { return dU, err } // Create new location k := "fs_" + strconv.FormatInt(d.Id, 16) + "_" + dU.Name l := FileStoreLocation{ Id: self.RwServer, // store server name, in case of migration Driver: self.DriverName(), Created: time.Now(), Location: k, } // Push out to filesystem err = conn.Set(k, c) if err != nil { return dU, err } // Append location if dU.Location == nil { dU.Location = make([]FileStoreLocation, 0) } dU.Location = append(dU.Location, l) // No errors, send back return dU, nil }
func (serv ApiService) ApiGetContacts() (r []Contact) { c, err := redis.NewSynchClientWithSpec(getConnection(REDIS_READONLY).connspec) if err != nil { log.Err(err.Error()) serv.ResponseBuilder().SetResponseCode(500).WriteAndOveride([]byte("Error connecting to backend")) return } cmembers, e := c.Smembers(CONTACT_LIST) if e != nil { log.Err(e.Error()) serv.ResponseBuilder().SetResponseCode(500).WriteAndOveride([]byte("Error connecting to backend")) return } ret := make([]Contact, len(cmembers)) for i := 0; i < len(cmembers); i++ { cmember := string(cmembers[i]) cdef := Contact{} // Grab full info from member h, e := c.Hgetall(cmember) if e != nil { log.Err(e.Error()) } else { for j := 0; j < len(h); j += 2 { k := string(h[j]) v := string(h[j+1]) switch k { case "name": { cdef.Name = v } case "display_name": { cdef.DisplayName = v } case "email": { cdef.EmailAddress = v } default: { log.Debug("Unknown key " + k + " sighted in contact " + cmember) } } } ret[i] = cdef } } return ret }
func (serv ApiService) ApiGetStatus() (r []CheckStatus) { c, err := redis.NewSynchClientWithSpec(getConnection(REDIS_READONLY).connspec) if err != nil { log.Err(err.Error()) serv.ResponseBuilder().SetResponseCode(500).WriteAndOveride([]byte("Error connecting to backend")) return } cmembers, e := c.Smembers(CHECKS_LIST) if e != nil { log.Err(e.Error()) serv.ResponseBuilder().SetResponseCode(500).WriteAndOveride([]byte("Error connecting to backend")) return } ret := make([]CheckStatus, len(cmembers)) // TODO: FIXME: XXX: pull statuses to return! return ret }
func (self *FSRedis) Delete(d FileStoreDescriptor, l FileStoreLocation) (FileStoreDescriptor, error) { dU := d // RW connection conn, err := redis.NewSynchClientWithSpec(self.getConnection(REDIS_READWRITE).connspec) if err != nil { return dU, err } // Delete from disk _, err = conn.Del(l.Location) if err != nil { return dU, err } // Remove from mapping RemoveLocation(dU, l) // No errors, send back return dU, nil }
func (self *FSRedis) Get(d FileStoreDescriptor) ([]byte, FileStoreLocation, error) { // RO connection conn, err := redis.NewSynchClientWithSpec(self.getConnection(REDIS_READONLY).connspec) if err != nil { return nil, FileStoreLocation{}, err } // Find the pertinent FileStoreLocation l, rerr := LocationForDriver(d, self.DriverName()) if rerr != nil { return nil, FileStoreLocation{}, errors.New(err.Error()) } // Retrieve actual file data from disk c, err := conn.Get(l.Location) if err != nil { return nil, l, err } // Send everything back return c, l, nil }
func (serv ApiService) ApiGetHosts() (r []HostDefinition) { c, err := redis.NewSynchClientWithSpec(getConnection(REDIS_READONLY).connspec) if err != nil { log.Err(err.Error()) serv.ResponseBuilder().SetResponseCode(500).WriteAndOveride([]byte("Error connecting to backend")) return } hmembers, e := c.Smembers(HOSTS_LIST) if e != nil { log.Err(e.Error()) serv.ResponseBuilder().SetResponseCode(500).WriteAndOveride([]byte("Error connecting to backend")) return } ret := make([]HostDefinition, len(hmembers)) for i := 0; i < len(hmembers); i++ { hmember := string(hmembers[i]) hdef := HostDefinition{} // Grab full info from member h, e := c.Hgetall(hmember) if e != nil { log.Err(e.Error()) } else { for j := 0; j < len(h); j += 2 { k := string(h[j]) v := string(h[j+1]) switch k { case "name": { hdef.Name = v } case "address": { hdef.Address = v } default: { log.Debug("Unknown key " + k + " sighted in host " + hmember) } } } // Get list of checks h, e := c.Hgetall(hmember + ":checks") if e != nil { log.Err(e.Error()) } else { hdef.Checks = make([]string, len(h)/2) for j := 0; j < len(h); j += 2 { k := string(h[j]) hdef.Checks[j/2] = strings.Replace(k, CHECK_PREFIX+":", "", -1) } } ret[i] = hdef } } return ret }
func threadControl() { if ControlThreadRunning { log.Warning("Control thread start attempting, but it looks like it's already running") return } log.Info("Starting control thread") c, err := redis.NewSynchClientWithSpec(getConnection(REDIS_READWRITE).connspec) if err != nil { log.Err(err.Error()) return } // Check to see if we need to run control thread, or if it is currently // running on another host for { if grabControlThread(c) { // If we're actually starting up, set global running flag ControlThreadRunning = true for { if !ControlThreadRunning { // Catch shutdown log.Warning("ControlThreadRunning was set to false, shutting down control thread") return } // Endlessly attempt to schedule checks members, err := c.Smembers(CHECKS_LIST) if err != nil { log.Err("Unable to pull from key " + CHECKS_LIST) } else { // Pull list of all hosts/services for i := 0; i < len(members); i++ { // Pull last run and schedule interval to see if this needs to // be scheduled for another run, and push onto POLL_QUEUE. intervalRaw, err := c.Hget(string(members[i]), "interval") interval, _ := strconv.ParseUint(string(intervalRaw), 10, 64) command, err := c.Hget(string(members[i]), "command") typeRaw, err := c.Hget(string(members[i]), "type") checkType, _ := strconv.ParseUint(string(typeRaw), 10, 32) curtime := uint64(time.Now().Unix()) items, err := c.Hgetall(string(members[i]) + ":hosts") if err == nil { for j := 0; j < len(items)/2; j += 2 { host := string(items[j]) lastrun, _ := strconv.ParseUint(string(items[j+1]), 10, 64) //log.Info(fmt.Sprintf("curtime = %d, lastrun = %d, diff = %d, interval = %d", curtime, lastrun, curtime-lastrun, interval )) if curtime-lastrun >= interval { log.Info(fmt.Sprintf("Adding %s : %s to poll queue", members[i], host)) // Set lastrun to current time e := c.Hset(string(members[i])+":hosts", host, []byte(fmt.Sprint(curtime))) if e != nil { log.Err(e.Error()) } // Also update reverse index e = c.Hset(host+":checks", string(members[i]), []byte(fmt.Sprint(curtime))) if e != nil { log.Err(e.Error()) } // Form JSON object to serialize onto the scheduler stack obj := PollCheck{ Host: host, CheckName: string(members[i]), EnqueueTime: curtime, Type: uint(checkType), Command: string(command), } o, err := json.Marshal(obj) if err == nil { e = c.Rpush(POLL_QUEUE, o) if e != nil { log.Err(e.Error()) } } else { log.Err(e.Error()) } } } } } } // Extend control thread expiry extendControlExpiry(c) // Sleep for a few seconds to avoid CPU piling. time.Sleep(2000 * time.Millisecond) } } else { log.Debug("ControlThread: already owned, waiting to start.") } // Sleep for a few seconds to avoid CPU piling. time.Sleep(15 * time.Second) } // for }