func (b *Poisson) Run() { var err error λ := 1.0 sampler := NewPoissonSampler(λ) for { select { case ruleI := <-b.inrule: // set a parameter of the block rule, ok := ruleI.(map[string]interface{}) if !ok { b.Error(errors.New("couldn't assert rule to map")) } λ, err = util.ParseFloat(rule, "Rate") if err != nil { b.Error(err) } sampler = NewPoissonSampler(λ) case <-b.quit: // quit the block return case <-b.inpoll: // deal with a poll request b.out <- map[string]interface{}{ "sample": float64(sampler()), } case c := <-b.queryrule: // deal with a query request c <- map[string]interface{}{ "Rate": λ, } } } }
func (b *Exponential) Run() { var err error λ := 1.0 for { select { case ruleI := <-b.inrule: // set a parameter of the block rule, ok := ruleI.(map[string]interface{}) if !ok { b.Error(errors.New("couldn't assert rule to map")) } λ, err = util.ParseFloat(rule, "rate") if err != nil { b.Error(err) } case <-b.quit: // quit the block return case <-b.inpoll: // deal with a poll request b.out <- map[string]interface{}{ "sample": rand.ExpFloat64(), } case c := <-b.queryrule: // deal with a query request c <- map[string]interface{}{ "rate": λ, } } } }
// Run is the block's main loop. Here we listen on the different channels we set up. // this is actually the Zipf-Manadlebrot "law". // http://en.wikipedia.org/wiki/Zipf%E2%80%93Mandelbrot_law // the parameter `v` is denoted `q` on wikipedia. func (b *Zipf) Run() { var err error var s, v, imax float64 s = 2.0 v = 5.0 imax = 99.0 r := rand.New(rand.NewSource(12345)) sampler := rand.NewZipf(r, s, v, uint64(imax)) for { select { case ruleI := <-b.inrule: // set a parameter of the block rule, ok := ruleI.(map[string]interface{}) if !ok { b.Error(errors.New("couldn't assert rule to map")) } s, err = util.ParseFloat(rule, "s") if err != nil { b.Error(err) } v, err = util.ParseFloat(rule, "v") if err != nil { b.Error(err) } imax, err = util.ParseFloat(rule, "N") if err != nil { b.Error(err) } sampler = rand.NewZipf(r, s, v, uint64(imax)) case <-b.quit: // quit the block return case <-b.inpoll: // deal with a poll request b.out <- map[string]interface{}{ "sample": float64(sampler.Uint64()), } case c := <-b.queryrule: // deal with a query request c <- map[string]interface{}{ "s": s, "v": v, "N": imax, } } } }
// Run is the block's main loop. Here we listen on the different channels we set up. func (b *Gaussian) Run() { var err error mean := 0.0 stddev := 1.0 for { select { case ruleI := <-b.inrule: // set a parameter of the block rule, ok := ruleI.(map[string]interface{}) if !ok { b.Error(errors.New("couldn't assert rule to map")) } mean, err = util.ParseFloat(rule, "Mean") if err != nil { b.Error(err) } stddev, err = util.ParseFloat(rule, "StdDev") if err != nil { b.Error(err) } case <-b.quit: // quit the block return case <-b.inpoll: // deal with a poll request b.out <- map[string]interface{}{ "sample": rand.NormFloat64()*stddev + mean, } case MsgChan := <-b.queryrule: // deal with a query request out := map[string]interface{}{ "Mean": mean, "StdDev": stddev, } MsgChan <- out } } }
// Run is the block's main loop. Here we listen on the different channels we set up. func (b *PackByCount) Run() { var batch []interface{} var packSize int for { select { case ruleI := <-b.inrule: packSizeTmp, err := util.ParseFloat(ruleI, "MaxCount") if err != nil { b.Error("error parsing batch size") break } packSize = int(packSizeTmp) batch = nil case <-b.quit: // quit the block return case m := <-b.in: if packSize == 0 { break } if len(batch) == packSize { b.out <- map[string]interface{}{ "Pack": batch, } batch = nil } batch = append(batch, m) case <-b.clear: batch = nil case <-b.flush: b.out <- map[string]interface{}{ "Pack": batch, } batch = nil case r := <-b.queryrule: r <- map[string]interface{}{ "MaxCount": packSize, } } } }
// connects to an NSQ topic and emits each message into streamtools. func (b *ToNSQMulti) Run() { var err error var nsqdTCPAddrs string var topic string var writer *nsq.Producer var batch [][]byte interval := time.Duration(1 * time.Second) maxBatch := 100 conf := nsq.NewConfig() dump := time.NewTicker(interval) for { select { case <-dump.C: if writer == nil || len(batch) == 0 { break } err = writer.MultiPublish(topic, batch) if err != nil { b.Error(err.Error()) } batch = nil case ruleI := <-b.inrule: //rule := ruleI.(map[string]interface{}) topic, err = util.ParseString(ruleI, "Topic") if err != nil { b.Error(err) break } nsqdTCPAddrs, err = util.ParseString(ruleI, "NsqdTCPAddrs") if err != nil { b.Error(err) break } intervalS, err := util.ParseString(ruleI, "Interval") if err != nil { b.Error("bad input") break } dur, err := time.ParseDuration(intervalS) if err != nil { b.Error(err) break } if dur <= 0 { b.Error("interval must be positive") break } batchSize, err := util.ParseFloat(ruleI, "MaxBatch") if err != nil { b.Error("error parsing batch size") break } if writer != nil { writer.Stop() } maxBatch = int(batchSize) interval = dur dump.Stop() dump = time.NewTicker(interval) writer, err = nsq.NewProducer(nsqdTCPAddrs, conf) if err != nil { b.Error(err) break } topic = topic nsqdTCPAddrs = nsqdTCPAddrs case msg := <-b.in: if writer == nil { break } msgByte, err := json.Marshal(msg) if err != nil { b.Error(err) } batch = append(batch, msgByte) if len(batch) > maxBatch { err := writer.MultiPublish(topic, batch) if err != nil { b.Error(err) break } batch = nil } case <-b.quit: if writer != nil { writer.Stop() } dump.Stop() return case c := <-b.queryrule: c <- map[string]interface{}{ "Topic": topic, "NsqdTCPAddrs": nsqdTCPAddrs, "MaxBatch": maxBatch, "Interval": interval.String(), } } } }
// Run is the block's main loop. Here we listen on the different channels we set up. func (b *Timeseries) Run() { var err error //var path, lagStr string var path string var tree *jee.TokenTree //var lag time.Duration var data tsData var numSamples float64 // defaults numSamples = 1 for { select { case ruleI := <-b.inrule: // set a parameter of the block rule, ok := ruleI.(map[string]interface{}) if !ok { b.Error(errors.New("could not assert rule to map")) } path, err = util.ParseString(rule, "Path") if err != nil { b.Error(err) continue } tree, err = util.BuildTokenTree(path) if err != nil { b.Error(err) continue } /* lagStr, err = util.ParseString(rule, "Lag") if err != nil { b.Error(err) continue } lag, err = time.ParseDuration(lagStr) if err != nil { b.Error(err) continue } */ numSamples, err = util.ParseFloat(rule, "NumSamples") if err != nil { b.Error(err) continue } data = tsData{ Values: make([]tsDataPoint, int(numSamples)), } case <-b.quit: // quit * time.Second the block return case msg := <-b.in: if tree == nil { continue } if data.Values == nil { continue } // deal with inbound data v, err := jee.Eval(tree, msg) if err != nil { b.Error(err) continue } var val float64 switch v := v.(type) { case float32: val = float64(v) case int: val = float64(v) case float64: val = v } //t := float64(time.Now().Add(-lag).Unix()) t := float64(time.Now().Unix()) d := tsDataPoint{ Timestamp: t, Value: val, } data.Values = append(data.Values[1:], d) case MsgChan := <-b.queryrule: // deal with a query request MsgChan <- map[string]interface{}{ //"Window": lagStr, "Path": path, "NumSamples": numSamples, } case MsgChan := <-b.querystate: out := map[string]interface{}{ "timeseries": data, } MsgChan <- out case <-b.inpoll: outArray := make([]interface{}, len(data.Values)) for i, d := range data.Values { di := map[string]interface{}{ "timestamp": d.Timestamp, "value": d.Value, } outArray[i] = di } out := map[string]interface{}{ "timeseries": outArray, } b.out <- out } } }
// connects to an NSQ topic and emits each message into streamtools. func (b *FromNSQ) Run() { var reader *nsq.Reader var topic, channel, lookupdAddr string var maxInFlight float64 var err error toOut := make(chan interface{}) toError := make(chan error) for { select { case msg := <-toOut: b.out <- msg case err := <-toError: b.Error(err) case ruleI := <-b.inrule: // convert message to a map of string interfaces // aka keys are strings, values are empty interfaces rule := ruleI.(map[string]interface{}) topic, err = util.ParseString(rule, "ReadTopic") if err != nil { b.Error(err) continue } lookupdAddr, err = util.ParseString(rule, "LookupdAddr") if err != nil { b.Error(err) continue } maxInFlight, err = util.ParseFloat(rule, "MaxInFlight") if err != nil { b.Error(err) continue } channel, err = util.ParseString(rule, "ReadChannel") if err != nil { b.Error(err) continue } if reader != nil { reader.Stop() } reader, err = nsq.NewReader(topic, channel) if err != nil { b.Error(err) continue } reader.SetMaxInFlight(int(maxInFlight)) h := readWriteHandler{toOut, toError} reader.AddHandler(h) err = reader.ConnectToLookupd(lookupdAddr) if err != nil { b.Error(err) continue } case <-b.quit: if reader != nil { reader.Stop() } return case c := <-b.queryrule: c <- map[string]interface{}{ "ReadTopic": topic, "ReadChannel": channel, "LookupdAddr": lookupdAddr, "MaxInFlight": maxInFlight, } } } }