func doLoadSpans(hcl *htrace.Client, reader io.Reader) int { dec := json.NewDecoder(reader) spans := make([]*common.Span, 0, 32) var err error for { var span common.Span if err = dec.Decode(&span); err != nil { if err == io.EOF { break } fmt.Printf("Failed to decode JSON: %s\n", err.Error()) return EXIT_FAILURE } spans = append(spans, &span) } if *verbose { fmt.Printf("Writing ") prefix := "" for i := range spans { fmt.Printf("%s%s", prefix, spans[i].ToJson()) prefix = ", " } fmt.Printf("\n") } err = hcl.WriteSpans(&common.WriteSpansReq{ Spans: spans, }) if err != nil { fmt.Println(err.Error()) return EXIT_FAILURE } return EXIT_SUCCESS }
func testIngestedSpansMetricsImpl(t *testing.T, usePacked bool) { htraceBld := &MiniHTracedBuilder{Name: "TestIngestedSpansMetrics", DataDirs: make([]string, 2), } ht, err := htraceBld.Build() if err != nil { t.Fatalf("failed to create datastore: %s", err.Error()) } defer ht.Close() var hcl *htrace.Client hcl, err = htrace.NewClient(ht.ClientConf(), &htrace.TestHooks{ HrpcDisabled: !usePacked, }) if err != nil { t.Fatalf("failed to create client: %s", err.Error()) } NUM_TEST_SPANS := 12 allSpans := createRandomTestSpans(NUM_TEST_SPANS) err = hcl.WriteSpans(allSpans) if err != nil { t.Fatalf("WriteSpans failed: %s\n", err.Error()) } for { var stats *common.ServerStats stats, err = hcl.GetServerStats() if err != nil { t.Fatalf("GetServerStats failed: %s\n", err.Error()) } if stats.IngestedSpans == uint64(NUM_TEST_SPANS) { break } time.Sleep(1 * time.Millisecond) } }
func TestDumpAll(t *testing.T) { htraceBld := &MiniHTracedBuilder{Name: "TestDumpAll", DataDirs: make([]string, 2), WrittenSpans: common.NewSemaphore(0), Cnf: map[string]string{ conf.HTRACE_LOG_LEVEL: "INFO", }, } ht, err := htraceBld.Build() if err != nil { t.Fatalf("failed to create datastore: %s", err.Error()) } defer ht.Close() var hcl *htrace.Client hcl, err = htrace.NewClient(ht.ClientConf(), nil) if err != nil { t.Fatalf("failed to create client: %s", err.Error()) } defer hcl.Close() NUM_TEST_SPANS := 100 allSpans := createRandomTestSpans(NUM_TEST_SPANS) sort.Sort(allSpans) err = hcl.WriteSpans(allSpans) if err != nil { t.Fatalf("WriteSpans failed: %s\n", err.Error()) } ht.Store.WrittenSpans.Waits(int64(NUM_TEST_SPANS)) out := make(chan *common.Span, NUM_TEST_SPANS) var dumpErr error go func() { dumpErr = hcl.DumpAll(3, out) }() var numSpans int nextLogTime := time.Now().Add(time.Millisecond * 5) for { span, channelOpen := <-out if !channelOpen { break } common.ExpectSpansEqual(t, allSpans[numSpans], span) numSpans++ if testing.Verbose() { now := time.Now() if !now.Before(nextLogTime) { nextLogTime = now nextLogTime = nextLogTime.Add(time.Millisecond * 5) fmt.Printf("read back %d span(s)...\n", numSpans) } } } if numSpans != len(allSpans) { t.Fatalf("expected to read %d spans... but only read %d\n", len(allSpans), numSpans) } if dumpErr != nil { t.Fatalf("got dump error %s\n", dumpErr.Error()) } }
// Tests that HRPC I/O timeouts work. func TestHrpcIoTimeout(t *testing.T) { htraceBld := &MiniHTracedBuilder{Name: "TestHrpcIoTimeout", DataDirs: make([]string, 2), Cnf: map[string]string{ conf.HTRACE_NUM_HRPC_HANDLERS: fmt.Sprintf("%d", TEST_NUM_HRPC_HANDLERS), conf.HTRACE_HRPC_IO_TIMEOUT_MS: "1", }, } ht, err := htraceBld.Build() if err != nil { t.Fatalf("failed to create datastore: %s", err.Error()) } defer ht.Close() var hcl *htrace.Client finishClient := make(chan interface{}) defer func() { // Close the finishClient channel, if it hasn't already been closed. defer func() { recover() }() close(finishClient) }() testHooks := &htrace.TestHooks{ HandleWriteRequestBody: func() { <-finishClient }, } hcl, err = htrace.NewClient(ht.ClientConf(), testHooks) if err != nil { t.Fatalf("failed to create client: %s", err.Error()) } // Create some random trace spans. allSpans := createRandomTestSpans(TEST_NUM_WRITESPANS) var wg sync.WaitGroup wg.Add(TEST_NUM_WRITESPANS) for iter := 0; iter < TEST_NUM_WRITESPANS; iter++ { go func(i int) { defer wg.Done() // Ignore the error return because there are internal retries in // the client which will make this succeed eventually, usually. // Keep in mind that we only block until we have seen // TEST_NUM_WRITESPANS I/O errors in the HRPC server-- after that, // we let requests through so that the test can exit cleanly. hcl.WriteSpans(allSpans[i : i+1]) }(iter) } for { if ht.Hsv.GetNumIoErrors() >= TEST_NUM_WRITESPANS { break } time.Sleep(1000 * time.Nanosecond) } close(finishClient) wg.Wait() }
// Tests that HRPC limits the number of simultaneous connections being processed. func TestHrpcAdmissionsControl(t *testing.T) { var wg sync.WaitGroup wg.Add(TEST_NUM_WRITESPANS) var numConcurrentHrpcCalls int32 testHooks := &hrpcTestHooks{ HandleAdmission: func() { defer wg.Done() n := atomic.AddInt32(&numConcurrentHrpcCalls, 1) if n > TEST_NUM_HRPC_HANDLERS { t.Fatalf("The number of concurrent HRPC calls went above "+ "%d: it's at %d\n", TEST_NUM_HRPC_HANDLERS, n) } time.Sleep(1 * time.Millisecond) n = atomic.AddInt32(&numConcurrentHrpcCalls, -1) if n >= TEST_NUM_HRPC_HANDLERS { t.Fatalf("The number of concurrent HRPC calls went above "+ "%d: it was at %d\n", TEST_NUM_HRPC_HANDLERS, n+1) } }, } htraceBld := &MiniHTracedBuilder{Name: "TestHrpcAdmissionsControl", DataDirs: make([]string, 2), Cnf: map[string]string{ conf.HTRACE_NUM_HRPC_HANDLERS: fmt.Sprintf("%d", TEST_NUM_HRPC_HANDLERS), }, WrittenSpans: common.NewSemaphore(0), HrpcTestHooks: testHooks, } ht, err := htraceBld.Build() if err != nil { t.Fatalf("failed to create datastore: %s", err.Error()) } defer ht.Close() var hcl *htrace.Client hcl, err = htrace.NewClient(ht.ClientConf(), nil) if err != nil { t.Fatalf("failed to create client: %s", err.Error()) } // Create some random trace spans. allSpans := createRandomTestSpans(TEST_NUM_WRITESPANS) for iter := 0; iter < TEST_NUM_WRITESPANS; iter++ { go func(i int) { err = hcl.WriteSpans(allSpans[i : i+1]) if err != nil { t.Fatalf("WriteSpans failed: %s\n", err.Error()) } }(iter) } wg.Wait() ht.Store.WrittenSpans.Waits(int64(TEST_NUM_WRITESPANS)) }
func TestReloadDataStore(t *testing.T) { htraceBld := &MiniHTracedBuilder{Name: "TestReloadDataStore", Cnf: map[string]string{ conf.HTRACE_DATASTORE_HEARTBEAT_PERIOD_MS: "30000", }, DataDirs: make([]string, 2), KeepDataDirsOnClose: true, WrittenSpans: common.NewSemaphore(0), } ht, err := htraceBld.Build() if err != nil { t.Fatalf("failed to create datastore: %s", err.Error()) } dataDirs := make([]string, len(ht.DataDirs)) copy(dataDirs, ht.DataDirs) defer func() { if ht != nil { ht.Close() } for i := range dataDirs { os.RemoveAll(dataDirs[i]) } }() var hcl *htrace.Client hcl, err = htrace.NewClient(ht.ClientConf(), nil) if err != nil { t.Fatalf("failed to create client: %s", err.Error()) } hcnf := ht.Cnf.Clone() // Create some random trace spans. NUM_TEST_SPANS := 5 allSpans := createRandomTestSpans(NUM_TEST_SPANS) err = hcl.WriteSpans(allSpans) if err != nil { t.Fatalf("WriteSpans failed: %s\n", err.Error()) } ht.Store.WrittenSpans.Waits(int64(NUM_TEST_SPANS)) // Look up the spans we wrote. var span *common.Span for i := 0; i < NUM_TEST_SPANS; i++ { span, err = hcl.FindSpan(allSpans[i].Id) if err != nil { t.Fatalf("FindSpan(%d) failed: %s\n", i, err.Error()) } common.ExpectSpansEqual(t, allSpans[i], span) } hcl.Close() ht.Close() ht = nil // Verify that we can reload the datastore, even if we configure the data // directories in a different order. verifySuccessfulLoad(t, allSpans, []string{dataDirs[1], dataDirs[0]}) // If we try to reload the datastore with only one directory, it won't work // (we need both). verifyFailedLoad(t, []string{dataDirs[1]}, "The TotalShards field of all shards is 2, but we have 1 shards.") // Test that we give an intelligent error message when 0 directories are // configured. verifyFailedLoad(t, []string{}, "No shard directories found.") // Can't specify the same directory more than once... will get "lock // already held by process" verifyFailedLoad(t, []string{dataDirs[0], dataDirs[1], dataDirs[1]}, " already held by process.") // Open the datastore and modify it to have the wrong DaemonId dld := NewDataStoreLoader(hcnf) defer func() { if dld != nil { dld.Close() dld = nil } }() dld.LoadShards() sinfo, err := dld.shards[0].readShardInfo() if err != nil { t.Fatalf("error reading shard info for shard %s: %s\n", dld.shards[0].path, err.Error()) } newDaemonId := sinfo.DaemonId + 1 dld.lg.Infof("Read %s from shard %s. Changing daemonId to 0x%016x\n.", asJson(sinfo), dld.shards[0].path, newDaemonId) sinfo.DaemonId = newDaemonId err = dld.shards[0].writeShardInfo(sinfo) if err != nil { t.Fatalf("error writing shard info for shard %s: %s\n", dld.shards[0].path, err.Error()) } dld.Close() dld = nil verifyFailedLoad(t, dataDirs, "DaemonId mismatch.") // Open the datastore and modify it to have the wrong TotalShards dld = NewDataStoreLoader(hcnf) dld.LoadShards() sinfo, err = dld.shards[0].readShardInfo() if err != nil { t.Fatalf("error reading shard info for shard %s: %s\n", dld.shards[0].path, err.Error()) } newDaemonId = sinfo.DaemonId - 1 dld.lg.Infof("Read %s from shard %s. Changing daemonId to 0x%016x, "+ "TotalShards to 3\n.", asJson(sinfo), dld.shards[0].path, newDaemonId) sinfo.DaemonId = newDaemonId sinfo.TotalShards = 3 err = dld.shards[0].writeShardInfo(sinfo) if err != nil { t.Fatalf("error writing shard info for shard %s: %s\n", dld.shards[0].path, err.Error()) } dld.Close() dld = nil verifyFailedLoad(t, dataDirs, "TotalShards mismatch.") // Open the datastore and modify it to have the wrong LayoutVersion dld = NewDataStoreLoader(hcnf) dld.LoadShards() for shardIdx := range dld.shards { sinfo, err = dld.shards[shardIdx].readShardInfo() if err != nil { t.Fatalf("error reading shard info for shard %s: %s\n", dld.shards[shardIdx].path, err.Error()) } dld.lg.Infof("Read %s from shard %s. Changing TotalShards to 2, "+ "LayoutVersion to 2\n", asJson(sinfo), dld.shards[shardIdx].path) sinfo.TotalShards = 2 sinfo.LayoutVersion = 2 err = dld.shards[shardIdx].writeShardInfo(sinfo) if err != nil { t.Fatalf("error writing shard info for shard %s: %s\n", dld.shards[0].path, err.Error()) } } dld.Close() dld = nil verifyFailedLoad(t, dataDirs, "The layout version of all shards is 2, "+ "but we only support") // It should work with data.store.clear set. htraceBld = &MiniHTracedBuilder{ Name: "TestReloadDataStore#clear", DataDirs: dataDirs, KeepDataDirsOnClose: true, Cnf: map[string]string{conf.HTRACE_DATA_STORE_CLEAR: "true"}, } ht, err = htraceBld.Build() if err != nil { t.Fatalf("failed to create datastore: %s", err.Error()) } }
func TestReloadDataStore(t *testing.T) { htraceBld := &MiniHTracedBuilder{Name: "TestReloadDataStore", DataDirs: make([]string, 2), KeepDataDirsOnClose: true} ht, err := htraceBld.Build() if err != nil { t.Fatalf("failed to create datastore: %s", err.Error()) } dataDirs := make([]string, len(ht.DataDirs)) copy(dataDirs, ht.DataDirs) defer func() { if ht != nil { ht.Close() } for i := range dataDirs { os.RemoveAll(dataDirs[i]) } }() var hcl *htrace.Client hcl, err = htrace.NewClient(ht.ClientConf()) if err != nil { t.Fatalf("failed to create client: %s", err.Error()) } // Create some random trace spans. NUM_TEST_SPANS := 5 allSpans := createRandomTestSpans(NUM_TEST_SPANS) err = hcl.WriteSpans(&common.WriteSpansReq{ Spans: allSpans, }) if err != nil { t.Fatalf("WriteSpans failed: %s\n", err.Error()) } // Look up the spans we wrote. var span *common.Span for i := 0; i < NUM_TEST_SPANS; i++ { span, err = hcl.FindSpan(allSpans[i].Id) if err != nil { t.Fatalf("FindSpan(%d) failed: %s\n", i, err.Error()) } common.ExpectSpansEqual(t, allSpans[i], span) } ht.Close() ht = nil htraceBld = &MiniHTracedBuilder{Name: "TestReloadDataStore2", DataDirs: dataDirs, KeepDataDirsOnClose: true} ht, err = htraceBld.Build() if err != nil { t.Fatalf("failed to re-create datastore: %s", err.Error()) } hcl, err = htrace.NewClient(ht.ClientConf()) if err != nil { t.Fatalf("failed to re-create client: %s", err.Error()) } // Look up the spans we wrote earlier. for i := 0; i < NUM_TEST_SPANS; i++ { span, err = hcl.FindSpan(allSpans[i].Id) if err != nil { t.Fatalf("FindSpan(%d) failed: %s\n", i, err.Error()) } common.ExpectSpansEqual(t, allSpans[i], span) } // Set an old datastore version number. for i := range ht.Store.shards { shard := ht.Store.shards[i] writeDataStoreVersion(ht.Store, shard.ldb, CURRENT_LAYOUT_VERSION-1) } ht.Close() ht = nil htraceBld = &MiniHTracedBuilder{Name: "TestReloadDataStore3", DataDirs: dataDirs, KeepDataDirsOnClose: true} ht, err = htraceBld.Build() if err == nil { t.Fatalf("expected the datastore to fail to load after setting an " + "incorrect version.\n") } if !strings.Contains(err.Error(), "Invalid layout version") { t.Fatal(`expected the loading error to contain "invalid layout version"` + "\n") } // It should work with data.store.clear set. htraceBld = &MiniHTracedBuilder{Name: "TestReloadDataStore4", DataDirs: dataDirs, KeepDataDirsOnClose: true, Cnf: map[string]string{conf.HTRACE_DATA_STORE_CLEAR: "true"}} ht, err = htraceBld.Build() if err != nil { t.Fatalf("expected the datastore loading to succeed after setting an "+ "incorrect version. But it failed with error %s\n", err.Error()) } }
func TestClientOperations(t *testing.T) { htraceBld := &MiniHTracedBuilder{Name: "TestClientOperations", DataDirs: make([]string, 2)} ht, err := htraceBld.Build() if err != nil { t.Fatalf("failed to create datastore: %s", err.Error()) } defer ht.Close() var hcl *htrace.Client hcl, err = htrace.NewClient(ht.ClientConf()) if err != nil { t.Fatalf("failed to create client: %s", err.Error()) } // Create some random trace spans. NUM_TEST_SPANS := 30 allSpans := createRandomTestSpans(NUM_TEST_SPANS) // Write half of the spans to htraced via the client. err = hcl.WriteSpans(&common.WriteSpansReq{ Spans: allSpans[0 : NUM_TEST_SPANS/2], }) if err != nil { t.Fatalf("WriteSpans(0:%d) failed: %s\n", NUM_TEST_SPANS/2, err.Error()) } // Look up the first half of the spans. They should be found. var span *common.Span for i := 0; i < NUM_TEST_SPANS/2; i++ { span, err = hcl.FindSpan(allSpans[i].Id) if err != nil { t.Fatalf("FindSpan(%d) failed: %s\n", i, err.Error()) } common.ExpectSpansEqual(t, allSpans[i], span) } // Look up the second half of the spans. They should not be found. for i := NUM_TEST_SPANS / 2; i < NUM_TEST_SPANS; i++ { span, err = hcl.FindSpan(allSpans[i].Id) if err != nil { t.Fatalf("FindSpan(%d) failed: %s\n", i, err.Error()) } if span != nil { t.Fatalf("Unexpectedly found a span we never write to "+ "the server: FindSpan(%d) succeeded\n", i) } } // Test FindChildren childSpan := allSpans[1] parentId := childSpan.Parents[0] var children []common.SpanId children, err = hcl.FindChildren(parentId, 1) if err != nil { t.Fatalf("FindChildren(%s) failed: %s\n", parentId, err.Error()) } if len(children) != 1 { t.Fatalf("FindChildren(%s) returned an invalid number of "+ "children: expected %d, got %d\n", parentId, 1, len(children)) } if !children[0].Equal(childSpan.Id) { t.Fatalf("FindChildren(%s) returned an invalid child id: expected %s, "+ " got %s\n", parentId, childSpan.Id, children[0]) } // Test FindChildren on a span that has no children childlessSpan := allSpans[NUM_TEST_SPANS/2] children, err = hcl.FindChildren(childlessSpan.Id, 10) if err != nil { t.Fatalf("FindChildren(%d) failed: %s\n", childlessSpan.Id, err.Error()) } if len(children) != 0 { t.Fatalf("FindChildren(%d) returned an invalid number of "+ "children: expected %d, got %d\n", childlessSpan.Id, 0, len(children)) } // Test Query var query common.Query query = common.Query{Lim: 10} spans, err := hcl.Query(&query) if err != nil { t.Fatalf("Query({lim: %d}) failed: %s\n", 10, err.Error()) } if len(spans) != 10 { t.Fatalf("Query({lim: %d}) returned an invalid number of "+ "children: expected %d, got %d\n", 10, 10, len(spans)) } }
func doWriteSpans(name string, N int, maxSpansPerRpc uint32, b *testing.B) { htraceBld := &MiniHTracedBuilder{Name: "doWriteSpans", Cnf: map[string]string{ conf.HTRACE_LOG_LEVEL: "INFO", conf.HTRACE_NUM_HRPC_HANDLERS: "20", }, WrittenSpans: common.NewSemaphore(int64(1 - N)), } ht, err := htraceBld.Build() if err != nil { panic(err) } defer ht.Close() rnd := rand.New(rand.NewSource(1)) allSpans := make([]*common.Span, N) for n := 0; n < N; n++ { allSpans[n] = test.NewRandomSpan(rnd, allSpans[0:n]) } // Determine how many calls to WriteSpans we should make. Each writeSpans // message should be small enough so that it doesn't exceed the max RPC // body length limit. TODO: a production-quality golang client would do // this internally rather than needing us to do it here in the unit test. bodyLen := (4 * common.MAX_HRPC_BODY_LENGTH) / 5 reqs := make([][]*common.Span, 0, 4) curReq := -1 curReqLen := bodyLen var curReqSpans uint32 mh := new(codec.MsgpackHandle) mh.WriteExt = true var mbuf [8192]byte buf := mbuf[:0] enc := codec.NewEncoderBytes(&buf, mh) for n := 0; n < N; n++ { span := allSpans[n] if (curReqSpans >= maxSpansPerRpc) || (curReqLen >= bodyLen) { reqs = append(reqs, make([]*common.Span, 0, 16)) curReqLen = 0 curReq++ curReqSpans = 0 } buf = mbuf[:0] enc.ResetBytes(&buf) err := enc.Encode(span) if err != nil { panic(fmt.Sprintf("Error encoding span %s: %s\n", span.String(), err.Error())) } bufLen := len(buf) if bufLen > (bodyLen / 5) { panic(fmt.Sprintf("Span too long at %d bytes\n", bufLen)) } curReqLen += bufLen reqs[curReq] = append(reqs[curReq], span) curReqSpans++ } ht.Store.lg.Infof("num spans: %d. num WriteSpansReq calls: %d\n", N, len(reqs)) var hcl *htrace.Client hcl, err = htrace.NewClient(ht.ClientConf(), nil) if err != nil { panic(fmt.Sprintf("failed to create client: %s", err.Error())) } defer hcl.Close() // Reset the timer to avoid including the time required to create new // random spans in the benchmark total. if b != nil { b.ResetTimer() } // Write many random spans. for reqIdx := range reqs { go func(i int) { err = hcl.WriteSpans(reqs[i]) if err != nil { panic(fmt.Sprintf("failed to send WriteSpans request %d: %s", i, err.Error())) } }(reqIdx) } // Wait for all the spans to be written. ht.Store.WrittenSpans.Wait() }