func Test_dispatcherIncomingDPMessages(t *testing.T) { defer func() { // restore default output log.SetOutput(os.Stderr) }() fl := &fakeLogger{} log.SetOutput(fl) rcv := make(chan *cluster.Msg) dpCh := make(chan *IncomingDP) count := 0 go func() { for { if _, ok := <-dpCh; !ok { break } count++ } }() go dispatcherIncomingDPMessages(rcv, dpCh) // Sending a bogus message should not cause anything be written to dpCh rcv <- &cluster.Msg{} rcv <- &cluster.Msg{} // second send ensures the loop has gone full circle if count > 0 { t.Errorf("Malformed messages should not cause data points, count: %d", count) } if !strings.Contains(string(fl.last), "decoding FAILED") { t.Errorf("Malformed messages should log 'decoding FAILED'") } // now we need a real message dp := &IncomingDP{Name: "foo", TimeStamp: time.Unix(1000, 0), Value: 123} m, _ := cluster.NewMsg(&cluster.Node{}, dp) rcv <- m rcv <- m if count < 1 { t.Errorf("At least 1 data point should have been sent to dpCh") } dp.Hops = 1000 // exceed maxhops (which in fakeCluster is 0?) m, _ = cluster.NewMsg(&cluster.Node{}, dp) rcv <- m // "clear" the loop count = 0 rcv <- m rcv <- m if count > 0 { t.Errorf("Hops exceeded should not cause data points, count: %d", count) } if !strings.Contains(string(fl.last), "max hops") { t.Errorf("Hops exceeded messages should log 'max hops'") } // Closing the dpCh should cause the recover() to happen // The test here is that it doesn't panic close(dpCh) dp.Hops = 0 m, _ = cluster.NewMsg(&cluster.Node{}, dp) rcv <- m // Closing the channel exists (not sure how to really test for that) go dispatcherIncomingDPMessages(rcv, dpCh) close(rcv) }
func Test_aggworkerIncomingAggCmds(t *testing.T) { fl := &fakeLogger{} log.SetOutput(fl) defer func() { log.SetOutput(os.Stderr) // restore default output }() ident := "FOO" rcv := make(chan *cluster.Msg) aggCh := make(chan *aggregator.Command) count := 0 go func() { for { if _, ok := <-aggCh; !ok { break } count++ } }() go aggWorkerIncomingAggCmds(ident, rcv, aggCh) // Sending a bogus message should not cause anything be written to cmdCh rcv <- &cluster.Msg{} rcv <- &cluster.Msg{} if count > 0 { t.Errorf("aggworkerIncomingAggCmds: Malformed messages should not cause data points, count: %d", count) } if !strings.Contains(string(fl.last), "decoding FAILED") { t.Errorf("aggworkerIncomingAggCmds: Malformed messages should log 'decoding FAILED'") } // now we need a real message cmd := aggregator.NewCommand(aggregator.CmdAdd, "foo", 123) m, _ := cluster.NewMsg(&cluster.Node{}, cmd) rcv <- m rcv <- m if count < 1 { t.Errorf("aggworkerIncomingAggCmds: At least 1 data point should have been sent to dpCh") } cmd.Hops = 1000 // exceed maxhops m, _ = cluster.NewMsg(&cluster.Node{}, cmd) rcv <- m // "clear" the loop count = 0 rcv <- m rcv <- m if count > 0 { t.Errorf("aggworkerIncomingAggCmds: Hops exceeded should not cause data points, count: %d", count) } if !strings.Contains(string(fl.last), "max hops") { t.Errorf("aggworkerIncomingAggCmds: Hops exceeded messages should log 'max hops'") } // Closing the dpCh should cause the recover() to happen // The test here is that it doesn't panic close(aggCh) cmd.Hops = 0 m, _ = cluster.NewMsg(&cluster.Node{}, cmd) rcv <- m // Closing the channel exists (not sure how to really test for that) go aggWorkerIncomingAggCmds(ident, rcv, aggCh) close(rcv) }
maxHops := 2 if dp.Hops > maxHops { log.Printf("dispatcher: dropping data point, max hops (%d) reached", maxHops) continue } dpCh <- &dp // See recover above } } var dispatcherForwardDPToNode = func(dp *IncomingDP, node *cluster.Node, snd chan *cluster.Msg) error { if dp.Hops == 0 { // we do not forward more than once if node.Ready() { dp.Hops++ msg, _ := cluster.NewMsg(node, dp) // can't possibly error snd <- msg } else { return fmt.Errorf("dispatcherForwardDPToNode: Node is not ready") } } return nil } var dispatcherProcessOrForward = func(rds *receiverDs, clstr clusterer, workerChs workerChannels, dp *IncomingDP, snd chan *cluster.Msg) (forwarded int) { for _, node := range clstr.NodesForDistDatum(rds) { if node.Name() == clstr.LocalNode().Name() { workerChs.queue(dp, rds) } else { if err := dispatcherForwardDPToNode(dp, node, snd); err != nil { log.Printf("dispatcher: Error forwarding a data point: %v", err)