Exemplo n.º 1
0
func visSpark(data []giles.SmapMessage) {
	for _, msg := range data {
		readings := extractDataNumeric(msg)
		sparkline := spark.Line(readings)
		fmt.Printf("%s %v %v %v\n", msg.UUID, readings[0], sparkline, readings[len(readings)-1])
	}
}
Exemplo n.º 2
0
func testLatency(server Testserver) error {
	//perform a full latency test
	durs, err := server.Ping(fullTestCount)
	if err != nil {
		return err
	}
	var avg, max, min uint64
	var latencies []float64
	for i := range durs {
		ms := uint64(durs[i].Nanoseconds() / 1000000)
		latencies = append(latencies, float64(ms))
		avg += ms
		if ms > max {
			max = ms
		}
		if ms < min || min == 0 {
			min = ms
		}
	}
	avg = avg / uint64(len(durs))
	median := durs[len(durs)/2].Nanoseconds() / 1000000
	sparkline := spark.Line(latencies)
	fmt.Printf("Latency: %s\t%dms avg\t%dms median\t%dms max\t%dms min\n", sparkline, avg, median, max, min)
	return nil
}
Exemplo n.º 3
0
func (m *Merki) DrawSparkline(fileName, measure string) (string, error) {
	var values []float64
	parser := NewParser(string(m.delimiter))
	go parser.ParseFile(fileName)
	err := func() error {
		for {
			select {
			case record := <-parser.Record:
				if record.Measurement == measure {
					values = append(values, record.Value)
				}
			case err := <-parser.Error:
				return err
			case <-parser.Done:
				return nil
			}
		}
	}()
	if err != nil {
		return "", err
	}
	sparkline := spark.Line(values)
	return sparkline, nil
}
Exemplo n.º 4
0
func Message(text string, source User, channel string, response MessageTarget) {
	if source.HasRights() {
		data := ParseLine(text)
		if !response.IsPublic() {
			data = append([]string{Config.Name}, data...)
		}
		forMe := false
		if data[0] == Config.Name {
			forMe = true
		}
		for _, group := range Config.Groups {
			if group == data[0] {
				forMe = true
				break
			}
		}
		if !forMe {
			return
		}
		if len(data) < 2 {
			return
		}
		if cmd, ok := Config.Commands[data[1]]; ok {
			args := cmd.Command
			for pos, param := range data {
				args = strings.Replace(args, fmt.Sprintf("$%d", pos), param, -1)
			}
			cmdexec := exec.Command("bash", "-c", args)
			output, _ := cmdexec.CombinedOutput()
			if cmd.Output {
				lines := strings.Split(string(output), "\n")
				for _, line := range lines {
					response.SendMessage(line)
				}
			}
		} else if log, ok := Config.Logs[data[1]]; ok {
			for _, line := range log.lines {
				response.SendMessage("%s", line.Text)
			}
		} else if data[1] == "get" {
			if len(data) < 3 {
				response.SendMessage("Please provide an expression to compute")
				return
			}
			c, err := Decode(data[2])
			if err != nil {
				response.SendMessage("Error parsing expression: %s", err)
				return
			}
			val, err := RunCompute(c)
			if err != nil {
				response.SendMessage("Error computing expression: %s", err)
				return
			}
			response.SendMessage("%s = %v", data[2], val)
		} else if data[1] == "monitor" {
			if len(data) < 3 {
				if response.IsPublic() {
					response.SendMessage("Responding in PM")
				}
				source.SendMessage("List of available monitors")
				for name, _ := range Config.Monitors {
					source.SendMessage(name)
				}
				return
			}
			if monitor, ok := Config.Monitors[data[2]]; ok {
				if len(data) < 4 {
					if response.IsPublic() {
						response.SendMessage("Responding in PM")
					}
					source.SendMessage("Available monitor commands: variables, get")
					return
				}
				switch data[3] {
				case "variables":
					if response.IsPublic() {
						response.SendMessage("Responding in PM")
					}
					variables := monitor.monitor.GetVariables()
					if len(data) > 4 {
						regex, err := regexp.Compile("(?i)" + data[4])
						if err != nil {
							source.SendMessage("Error compiling regex: %s", err)
							return
						}
						newvars := []string{}
						for _, name := range variables {
							if regex.MatchString(name) {
								newvars = append(newvars, name)
							}
						}
						variables = newvars
					}

					if len(variables) > 10 {
						if len(data) > 4 {
							source.SendMessage("There are over %d variables in monitor %s matching %s, filter using `monitor %s variables <regex>`", len(variables), data[2], data[4], data[2])
						} else {
							source.SendMessage("There are over %d variables in monitor %s, filter using `monitor %s variables <regex>`", len(variables), data[2], data[2])
						}
					} else {
						if len(data) > 4 {
							source.SendMessage("List of %d variables in monitor %s matching %s", len(variables), data[2], data[4])
						} else {
							source.SendMessage("List of %d variables in monitor %s", len(variables), data[2])
						}
						for _, name := range variables {
							source.SendMessage(name)
						}
					}
				case "get":
					if len(data) < 5 {
						response.SendMessage("Please specify a variable or variables to retrieve")
						return
					}
					variables := data[4:]
					values := monitor.monitor.GetValues(variables)
					for _, variable := range variables {
						if value, ok := values[variable]; ok {
							response.SendMessage("%s = %v", variable, value)
						}
					}
				case "track":
					if len(data) < 5 {
						if response.IsPublic() {
							response.SendMessage("Responding in PM")
						}
						source.SendMessage("History tracking for %d variables", len(monitor.track.Variables))
						for variable, vt := range monitor.track.Variables {
							source.SendMessage("%s = %d items", variable, vt.History)
						}
						return
					}
					if len(data) < 6 {
						if vt, ok := monitor.track.Variables[data[4]]; ok {
							response.SendMessage("Not tracking history for variable %s of monitor %s", data[4], data[3])
						} else {
							response.SendMessage("History tracking for variable %s of monitor %s set to %v items", data[4], data[3], vt.History)
						}
						return
					}
					h, err := strconv.ParseInt(data[5], 10, 32)
					if err != nil {
						response.SendMessage("Error parsing %s: %s", data[5], err)
						return
					}
					fmt.Println(monitor.track, data[4], h)
					monitor.track.SetTrack(data[4], int(h))
					response.SendMessage("History tracking for variable %s of monitor %s set to %v items", data[4], data[3], h)
				case "interval":
					if len(data) < 5 {
						response.SendMessage("Interval for monitor %s set to %v", data[3], monitor.track.Interval)
						return
					}
					interval, err := strconv.ParseInt(data[4], 10, 32)
					if err != nil {
						response.SendMessage("Error parsing %s: %s", data[4], err)
						return
					}
					monitor.track.Interval = int(interval)
					monitor.track.timer.Reset(time.Second * time.Duration(interval))
					response.SendMessage("Interval for monitor %s set to %v", data[3], interval)
				case "spark":
					if len(data) < 5 {
						response.SendMessage("Please specify a variable to display")
						return
					}
					vt, ok := monitor.track.Variables[data[4]]
					if !ok {
						response.SendMessage("Not tracking that variable")
						return
					}
					values := make([]float64, len(vt.Data))
					high := -math.MaxFloat64
					low := math.MaxFloat64
					for i, val := range vt.Data {
						switch tt := val.(type) {
						case float64:
							values[i] = tt
						case float32:
							values[i] = float64(tt)
						case uint32:
							values[i] = float64(tt)
						case uint64:
							values[i] = float64(tt)
						case int32:
							values[i] = float64(tt)
						case int64:
							values[i] = float64(tt)
						default:
							response.SendMessage("Variable is of type %t, cannot spark", tt)
						}
						if values[i] > high {
							high = values[i]
						}
						if values[i] < low {
							low = values[i]
						}
					}
					response.SendMessage("%s: %s High: %v Low: %v", data[4], spark.Line(values), high, low)
				default:
					response.SendMessage("Monitor command `%s` not recognized", data[3])
				}

			}
		}
	}
}
Exemplo n.º 5
0
func bucketize() {
	events := make([]*RawLogEvent, 0)
	flush := time.Tick(flush_interval)

	for {
		select {

		// append incoming RawLogEvent to events[]
		case event := <-rawlog_output:
			events = append(events, &event)

		case <-flush:
			// take all the log lines since the last flush,
			// generate stats, puts results in a bucket,
			// append the bucket to a slice,
			// fire off stats reporting to the UI
			// fire off an alert check/ui update
			_ip := make(map[string]int)
			_referer := make(map[string]int)
			_section := make(map[string]int)
			_status := make(map[string]int)
			_useragent := make(map[string]int)
			ip := make([]Counter, 0)
			referer := make([]Counter, 0)
			section := make([]Counter, 0)
			status := make([]Counter, 0)
			useragent := make([]Counter, 0)
			timestamp := time.Now().Local()
			var bytes int64 = 0
			var hits int = 0

			// roll up, aggregate, average out
			for _, event := range events {
				_ip[event.ip]++
				path := strings.Split(event.query, "/")[1]
				_section[path]++
				_status[strconv.Itoa(event.status)]++
				bytes += event.bytes
				_referer[event.referer]++
				_useragent[event.useragent]++
				hits++
			}

			// empty the events slice
			events = events[0:0]

			// ugh this needs refactoring, and is totally stupid. a result of learning go
			// while writing this code.. I used maps to count uniques and then learned that
			// you can't sort them, but you can implement a sortable struct that's exactly
			// like a map. (Or maybe you can use the sortable primitives on a type that is
			// a map and I'm just a go nub).. this just copies the maps into Counters and
			// sorts them, ideally, we could get rid of the maps just use Counter directly
			for k, v := range _ip {
				counter := Counter{k, v}
				ip = append(ip, counter)
				sort.Sort(ByCount(ip))
			}
			for k, v := range _section {
				counter := Counter{k, v}
				section = append(section, counter)
				sort.Sort(ByCount(section))
			}
			for k, v := range _status {
				counter := Counter{k, v}
				status = append(status, counter)
				sort.Sort(ByCount(status))
			}
			for k, v := range _referer {
				counter := Counter{k, v}
				referer = append(referer, counter)
				sort.Sort(ByCount(referer))
			}
			for k, v := range _useragent {
				counter := Counter{k, v}
				useragent = append(useragent, counter)
				sort.Sort(ByCount(useragent))
			}

			// put it in a bucket
			bucket := Bucket{ip, timestamp, section, status, bytes, referer, useragent, hits}

			// put the bucket in the time series slice
			ts = append(ts, &bucket)

			// draw stats
			go func() {
				// this should be refactored into TimeSeries methods
				sparkline_width := int(math.Abs(math.Min(float64(len(ts)-1), float64(maxX-38))))
				sparkline_start := len(ts) - sparkline_width
				top_sections := int(math.Abs(math.Min(float64(len(ts[len(ts)-1].section)), float64(maxY-17))))

				averages_message := ""
				averages_message += fmt.Sprint(" avg hits: ", ts.AverageHits(average_by))
				averages_message += fmt.Sprint("   avg bytes: ", ts.AverageBytes(average_by))
				averages_output <- averages_message

				status_message := ""
				status_message += fmt.Sprintln(" total hits:  ", ts.TotalHits())
				status_message += fmt.Sprintln(" total bytes: ", ts.TotalBytes())
				status_output <- status_message

				sparks_message := " "
				hits_history := make([]float64, 0)
				bytes_history := make([]float64, 0)
				for _, b := range ts[sparkline_start:] {
					hits_history = append(hits_history, float64(b.hits))
					bytes_history = append(bytes_history, float64(b.bytes))
				}
				sparks_message += spark.Line(hits_history)
				sparks_message += fmt.Sprint(" ", ts.LastBucket().hits, "\n ")
				sparks_message += spark.Line(bytes_history)
				sparks_message += fmt.Sprint(" ", ts.LastBucket().bytes, "\n ")
				sparks_output <- sparks_message

				message := ""
				for _, v := range ts.LastBucket().section[:top_sections] {
					message += fmt.Sprint(" /", v.name, " : ", strconv.Itoa(v.count), "\n")
				}
				main_output <- message
			}()

			// alert on crossing threshold
			go MonitorHits()
		}
	}
}