func TestHashJoinNilKeyFunc(t *testing.T) { keyFunc := func(val interface{}) interface{} { return val } pairs, left, right := HashJoin(JoinList{10, 11, 12}, JoinList{10, 11, 12}, nil, keyFunc) if len(left) > 0 { t.Errorf("Unexpected lefts: %s", left) } if len(right) > 0 { t.Errorf("Unexpected rights: %s", right) } if !eq(pairs, []Pair{{10, 10}, {11, 11}, {12, 12}}) { t.Error(spew.Sprintf("Unexpected pairs: %s", pairs)) } pairs, left, right = HashJoin(JoinList{10, 11, 12}, JoinList{13, 11, 2}, keyFunc, nil) if len(left) != 2 { t.Error(spew.Sprintf("Unexpected left: %s", left)) } if len(right) != 2 { t.Error(spew.Sprintf("Unexpected right %s", right)) } if !eq(pairs, []Pair{{11, 11}}) { t.Error(spew.Sprintf("Unexpected pairs: %s", pairs)) } }
func TestJoin(t *testing.T) { score := func(left, right interface{}) int { return left.(int) - right.(int) } pairs, left, right := Join([]int{10, 11, 12}, []int{10, 11, 12}, score) if len(left) > 0 { t.Errorf("Unexpected lefts: %s", left) } if len(right) > 0 { t.Errorf("Unexpected rights: %s", right) } if !eq(pairs, []Pair{{10, 10}, {11, 11}, {12, 12}}) { t.Error(spew.Sprintf("Unexpected pairs: %s", pairs)) } pairs, left, right = Join([]int{10, 11, 12}, []int{13, 1, 2}, score) if !eq(left, []interface{}{12}) { t.Error(spew.Sprintf("Unexpected left: %s", left)) } if !eq(right, []interface{}{13}) { t.Error(spew.Sprintf("Unexpected right %s", right)) } if !eq(pairs, []Pair{{10, 2}, {11, 1}}) { t.Error(spew.Sprintf("Unexpected pairs: %s", pairs)) } }
func valueTest(actual, expected interface{}) { if expected == nil { Convey(`... should create no value.`, func() { So(actual, ShouldBeNil) }) } else { Convey(`... should create the right value.`, func() { So(fmt.Sprintf("%T", actual), ShouldEqual, fmt.Sprintf("%T", expected)) So(spew.Sprintf("%v", actual), ShouldEqual, spew.Sprintf("%v", expected)) }) } }
func TestGetDirectory(t *testing.T) { store := NewMock() paths := []string{ "/a/b", "/a/c/d/e", } for _, p := range paths { if err := store.Set(p, p); err != nil { t.Fatal(err) } } dir, err := getDirectory(store, "/") if err != nil { t.Fatal(err) } exp := make(directory) exp["a"] = map[string]string{"b": "/a/b", "c": ""} if !eq(dir, exp) { t.Error(spew.Sprintf("\nGet Directory:\n%s\n\nExpected:\n%s\n", dir, exp)) } dir, err = getDirectory(store, "/a") if err != nil { t.Fatal(err) } exp = make(directory) exp["b"] = map[string]string{} exp["c"] = map[string]string{"d": ""} if !eq(dir, exp) { t.Error(spew.Sprintf("\nGet Directory:\n%s\n\nExpected:\n%s\n", dir, exp)) } dir, err = getDirectory(store, "/a/c") if err != nil { t.Fatal(err) } exp = make(directory) exp["d"] = map[string]string{"e": "/a/c/d/e"} if !eq(dir, exp) { t.Error(spew.Sprintf("\nGetDirectory:\n%s\n\nExpected:\n%s\n", dir, exp)) } if val, err := getDirectory(store, "/junk"); err == nil { t.Error(spew.Sprintf("Expected error, got %s", val)) } }
func (v *ValidationError) Error() string { var s string switch v.Type { case ValidationErrorTypeRequired: s = spew.Sprintf("%s: %s", v.Field, v.Type) default: s = spew.Sprintf("%s: %s '%+v'", v.Field, v.Type, v.BadValue) } if len(v.Detail) != 0 { s += fmt.Sprintf(": %s", v.Detail) } return s }
// ErrorBody returns the error message without the field name. This is useful // for building nice-looking higher-level error reporting. func (v *Error) ErrorBody() string { var s string switch v.Type { case ErrorTypeRequired, ErrorTypeTooLong, ErrorTypeInternal: s = spew.Sprintf("%s", v.Type) default: s = spew.Sprintf("%s '%+v'", v.Type, v.BadValue) } if len(v.Detail) != 0 { s += fmt.Sprintf(", Details: %s", v.Detail) } return s }
func TestSyncDB(t *testing.T) { spew := spew.NewDefaultConfig() spew.MaxDepth = 2 checkSyncDB := func(cloudMachines []provider.Machine, databaseMachines []db.Machine, expectedBoot, expectedStop []provider.Machine) { _, bootResult, stopResult := syncDB(cloudMachines, databaseMachines) if !emptySlices(bootResult, expectedBoot) && !reflect.DeepEqual(bootResult, expectedBoot) { t.Error(spew.Sprintf( "booted wrong machines. Expected %v, got %v.", expectedBoot, bootResult)) } if !emptySlices(stopResult, expectedStop) && !reflect.DeepEqual( stopResult, expectedStop) { t.Error(spew.Sprintf( "stopped wrong machines. Expected %v, got %v.", expectedStop, stopResult)) } } var noMachines []provider.Machine dbNoSize := db.Machine{Provider: FakeAmazon} cmNoSize := provider.Machine{Provider: FakeAmazon} dbLarge := db.Machine{Provider: FakeAmazon, Size: "m4.large"} cmLarge := provider.Machine{Provider: FakeAmazon, Size: "m4.large"} // Test boot with no size checkSyncDB(noMachines, []db.Machine{dbNoSize}, []provider.Machine{cmNoSize}, noMachines) // Test boot with size checkSyncDB(noMachines, []db.Machine{dbLarge}, []provider.Machine{cmLarge}, noMachines) // Test mixed boot checkSyncDB(noMachines, []db.Machine{dbNoSize, dbLarge}, []provider.Machine{ cmNoSize, cmLarge}, noMachines) // Test partial boot checkSyncDB([]provider.Machine{cmNoSize}, []db.Machine{dbNoSize, dbLarge}, []provider.Machine{cmLarge}, noMachines) // Test stop checkSyncDB([]provider.Machine{cmNoSize}, []db.Machine{}, noMachines, []provider.Machine{cmNoSize}) // Test partial stop checkSyncDB([]provider.Machine{cmNoSize, cmLarge}, []db.Machine{}, noMachines, []provider.Machine{cmNoSize, cmLarge}) }
func TestInto(t *testing.T) { cache, err := cachei.Open("redis", cachei.DataSource{}) if err != nil { t.Fatal(err) } for i, tt := range cacheTests { cacheMiss := false missFunc := func() (interface{}, error) { cacheMiss = true return tt.value, nil } verr, cerr := cache.OutSetFn(fmt.Sprintf("__kdarcacheinto_redis_test_%d", i), 1, tt.into, missFunc) if verr != nil { t.Fatal(verr) } if cerr != nil { t.Fatal(cerr) } if !cacheMiss { t.Fatalf("%d-1. Expected a cache miss, but instead found cache", i) } if !reflect.DeepEqual(tt.value, reflect.ValueOf(tt.into).Elem().Interface()) { t.Fatalf("%d-1. Expected:\n%s,\ngot:\n%s", i, spew.Sprintf("%#v", &tt.value), spew.Sprintf("%#v", tt.into)) } cacheMiss = false verr, cerr = cache.OutSetFn(fmt.Sprintf("__kdarcacheinto_redis_test_%d", i), 1, tt.into, missFunc) if verr != nil { t.Fatal(verr) } if cerr != nil { t.Fatal(cerr) } if cacheMiss { t.Fatalf("%d-2. Expected cache, but instead got a cache miss.", i) } if !reflect.DeepEqual(tt.value, reflect.ValueOf(tt.into).Elem().Interface()) { t.Fatalf("%d-2. Expected: %s, got: %s", i, spew.Sprintf("%#v", tt.value), spew.Sprintf("%#v", tt.into)) } } }
func testReadContainerTransact(t *testing.T, view db.Database) { minion := view.InsertMinion() minion.Role = db.Worker minion.Self = true view.Commit(minion) for _, id := range []string{"a", "b"} { container := view.InsertContainer() container.DockerID = id view.Commit(container) } container := view.InsertContainer() container.DockerID = "c" container.IP = "junk" view.Commit(container) dir := directory(map[string]map[string]string{ "a": {"IP": "1.0.0.0", "Labels": `["e"]`}, "b": {"IP": "2.0.0.0", "Labels": `["e", "f"]`}, }) readContainerTransact(view, dir) ipMap := map[string]string{} labelMap := map[string][]string{} for _, c := range view.SelectFromContainer(nil) { ipMap[c.DockerID] = c.IP labelMap[c.DockerID] = c.Labels } expIPMap := map[string]string{ "a": "1.0.0.0", "b": "2.0.0.0", "c": "", } if !eq(ipMap, expIPMap) { t.Error(spew.Sprintf("Found %s, Expected: %s", ipMap, expIPMap)) } expLabelMap := map[string][]string{ "a": {"e"}, "b": {"e", "f"}, "c": nil, } if !eq(labelMap, expLabelMap) { t.Error(spew.Sprintf("Found %s, Expected: %s", ipMap, expIPMap)) } }
func TestGet(t *testing.T) { cache, err := cachei.Open("redis", cachei.DataSource{}) if err != nil { t.Fatal(err) } for i, tt := range cacheTests { cacheMiss := false missFunc := func() (interface{}, error) { cacheMiss = true return tt.value, nil } v1, verr, cerr := cache.GetSetFn(fmt.Sprintf("__kdarcacheget_redis_test_%d", i), 1, missFunc) if verr != nil { t.Fatal(verr) } if cerr != nil { t.Fatal(cerr) } if !cacheMiss { t.Fatalf("%d-1. Expected a cache miss, but instead found cache", i) } if !reflect.DeepEqual(tt.value, v1) { t.Fatalf("%d-1. Expected: %s, got: %s", i, spew.Sprintf("%#v", tt.value), spew.Sprintf("%#v", v1)) } cacheMiss = false v2, verr, cerr := cache.GetSetFn(fmt.Sprintf("__kdarcacheget_redis_test_%d", i), 1, missFunc) if verr != nil { t.Fatal(verr) } if cerr != nil { t.Fatal(cerr) } if cacheMiss { t.Fatalf("%d-2. Expected cache, but instead got a cache miss.", i) } if !reflect.DeepEqual(v1, v2) { t.Fatalf("%d-2. Expected: %s, got: %s", i, spew.Sprintf("%#v", v1), spew.Sprintf("%#v", v2)) } } }
func TestSyncLabels(t *testing.T) { store := NewMock() store.Mkdir("/test/a") store.Mkdir("/test/b") store.Mkdir("/test/c") dir, _ := getDirectory(store, "/test") containers := []db.Container{ {DockerID: "a", Labels: []string{"d", "c"}}, {DockerID: "b", Labels: []string{}}, {DockerID: "c", Labels: nil}, } syncLabels(store, dir, "/test", containers) newDir, _ := getDirectory(store, "/test") if !eq(dir, newDir) { t.Error(spew.Sprintf("syncLabels did not update dir.\n"+ "Found %s\nExpected %s", dir, newDir)) } expDir := directory(map[string]map[string]string{ "a": {"Labels": `["c","d"]`}, "b": {"Labels": "[]"}, "c": {"Labels": "[]"}, }) if !eq(dir, expDir) { t.Error(spew.Sprintf("syncLabels Found %s\nExpected %s", dir, expDir)) } containers = []db.Container{ {DockerID: "a", Labels: []string{"d", "c"}}, } syncLabels(store, dir, "/test", containers) newDir, _ = getDirectory(store, "/test") if !eq(dir, newDir) { t.Error(spew.Sprintf("syncLabels did not update dir.\n"+ "Found %s\nExpected %s", dir, newDir)) } expDir = directory(map[string]map[string]string{ "a": {"Labels": `["c","d"]`}, "b": {"Labels": `[]`}, "c": {"Labels": `[]`}, }) if !eq(dir, expDir) { t.Error(spew.Sprintf("syncLabels Found %s\nExpected %s", dir, expDir)) } }
func (kl *Kubelet) newVolumeBuilderFromPlugins(spec *api.Volume, podRef *api.ObjectReference) (volume.Builder, error) { plugin, err := kl.volumePluginMgr.FindPluginBySpec(spec) if err != nil { return nil, fmt.Errorf("can't use volume plugins for %s: %v", spew.Sprintf("%#v", *spec), err) } if plugin == nil { // Not found but not an error return nil, nil } builder, err := plugin.NewBuilder(spec, podRef) if err != nil { return nil, fmt.Errorf("failed to instantiate volume plugin for %s: %v", spew.Sprintf("%#v", *spec), err) } glog.V(3).Infof("Used volume plugin %q for %s", plugin.Name(), spew.Sprintf("%#v", *spec)) return builder, nil }
func TestKeys(t *testing.T) { getGithubKeys = func(username string) ([]string, error) { return []string{username}, nil } checkKeys := func(code, expectedCode string, expected ...string) { ctx := parseTest(t, code, expectedCode) machineResult := Stitch{"", ctx}.QueryMachines() if len(machineResult) == 0 { t.Error("no machine found") return } if !reflect.DeepEqual(machineResult[0].SSHKeys, expected) { t.Error(spew.Sprintf("test: %s, result: %s, expected: %s", code, machineResult[0].SSHKeys, expected)) } } code := `(machine (sshkey "key"))` checkKeys(code, code, "key") code = `(machine (githubKey "user"))` checkKeys(code, code, "user") code = `(machine (githubKey "user") (sshkey "key"))` checkKeys(code, code, "user", "key") }
func (kl *Qinglet) newVolumeBuilderFromPlugins(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions, mounter mount.Interface) (volume.Builder, error) { plugin, err := kl.volumePluginMgr.FindPluginBySpec(spec) if err != nil { return nil, fmt.Errorf("can't use volume plugins for %s: %v", spew.Sprintf("%#v", *spec), err) } if plugin == nil { // Not found but not an error return nil, nil } builder, err := plugin.NewBuilder(spec, pod, opts, mounter) if err != nil { return nil, fmt.Errorf("failed to instantiate volume plugin for %s: %v", spew.Sprintf("%#v", *spec), err) } glog.V(3).Infof("Used volume plugin %q for %s", plugin.Name(), spew.Sprintf("%#v", *spec)) return builder, nil }
// ES: Yes, it isn't great. Whatever. Go fix something else. func dump(obj interface{}) string { x, err := json.Marshal(obj) if err != nil { return spew.Sprintf("%+v", obj) } return string(x) }
func (m *MessageAggregator) Write(envelope *events.Envelope) { // TODO: don't call for every message if throughput becomes a problem m.cleanupOrphanedHTTPStart() if envelope.EventType == nil { m.outputWriter.Write(envelope) return } switch envelope.GetEventType() { case events.Envelope_HttpStart: m.handleHTTPStart(envelope) case events.Envelope_HttpStop: startStopMessage := m.handleHTTPStop(envelope) if startStopMessage != nil { m.outputWriter.Write(startStopMessage) } case events.Envelope_CounterEvent: counterEventMessage := m.handleCounter(envelope) m.outputWriter.Write(counterEventMessage) default: m.incrementCounter(&m.uncategorizedEventCount) m.logger.Debugf("passing through message %v", spew.Sprintf("%v", envelope)) m.outputWriter.Write(envelope) } }
func TestInvalidBST(t *testing.T) { tree := Node(1, Leaf(2), Leaf(3)) if tree.IsBST() { t.Error(spew.Sprintf("%v should not be a bst", tree)) } }
func (v *ValidationError) Error() string { s := spew.Sprintf("%s: %s '%+v'", v.Field, v.Type, v.BadValue) if v.Detail != "" { s += fmt.Sprintf(": %s", v.Detail) } return s }
func TestDeserializeLines(t *testing.T) { config.produce.partitioner = "hashCode" data := []struct { in string literal bool partition int32 partitionCount int32 expected message }{ { in: "", literal: false, partitionCount: 1, expected: newMessage("", "", 0), }, { in: `{"key":"hans","value":"123"}`, literal: false, partitionCount: 4, expected: newMessage("hans", "123", hashCodePartition("hans", 4)), }, { in: `{"key":"hans","value":"123","partition":1}`, literal: false, partitionCount: 3, expected: newMessage("hans", "123", 1), }, { in: `{"other":"json","values":"avail"}`, literal: true, partition: 2, partitionCount: 4, expected: newMessage("", `{"other":"json","values":"avail"}`, 2), }, { in: `so lange schon`, literal: false, partitionCount: 3, expected: newMessage("", "so lange schon", 0), }, } for _, d := range data { in := make(chan string, 1) out := make(chan message) config.produce.literal = d.literal config.produce.partition = d.partition go deserializeLines(in, out, d.partitionCount) in <- d.in select { case <-time.After(50 * time.Millisecond): t.Errorf("did not receive output in time") case actual := <-out: if !(reflect.DeepEqual(d.expected, actual)) { t.Errorf(spew.Sprintf("\nexpected %#v\nactual %#v", d.expected, actual)) } } } }
func TestValidBST(t *testing.T) { for _, tree := range valid { if !tree.IsBST() { t.Error(spew.Sprintf("%v should be a bst", tree)) } } }
func (kl *Kubelet) newVolumeBuilderFromPlugins(spec *api.Volume, podUID types.UID) volume.Builder { plugin, err := kl.volumePluginMgr.FindPluginBySpec(spec) if err != nil { glog.Warningf("Can't use volume plugins for %s: %v", spew.Sprintf("%#v", *spec), err) return nil } if plugin == nil { glog.Errorf("No error, but nil volume plugin for %s", spew.Sprintf("%#v", *spec)) return nil } builder, err := plugin.NewBuilder(spec, podUID) if err != nil { glog.Warningf("Error instantiating volume plugin for %s: %v", spew.Sprintf("%#v", *spec), err) return nil } glog.V(3).Infof("Used volume plugin %q for %s", plugin.Name(), spew.Sprintf("%#v", *spec)) return builder }
func asInfo(srv string, prt uint) (*map[string]nsStatType, error) { conn, err := net.DialTimeout("tcp", spew.Sprintf("%s:%d", srv, prt), time.Second) if err != nil { return nil, err } defer func() { conn.Close() }() err = conn.SetDeadline(time.Now().Add(time.Second)) if err != nil { return nil, err } _, err = conn.Write([]byte("namespaces\n")) if err != nil { return nil, err } connbuf := bufio.NewReader(conn) str, err := connbuf.ReadString('\n') if err != nil { return nil, err } nsList := strings.Split(strings.Trim(str, "\n"), ";") info := make(map[string]nsStatType, len(nsList)+1) for _, nsName := range nsList { _, err = conn.Write([]byte("namespace/" + nsName + "\n")) if err != nil { return nil, err } str, err = connbuf.ReadString('\n') if err != nil { return nil, err } info["namespace."+nsName] = splitStatistics(str) } _, err = conn.Write([]byte("statistics\n")) if err != nil { return nil, err } str, err = connbuf.ReadString('\n') if err != nil { return nil, err } info["statistics"] = splitStatistics(str) return &info, nil }
func logInfo(trid int, sTime time.Time, msg string, params ...interface{}) { //timestamp spentTime peer x-real-ip method status 'request URI' message spew.Fprintf(os.Stderr, "Thread %d: %s %d %s\n", trid, sTime.Local().Format("2006-01-02-15-04-05.000"), int(time.Now().Sub(sTime).Seconds()*1000), spew.Sprintf(msg, params...), ) }
func (m *MessageAggregator) handleHTTPStart(envelope *events.Envelope) { m.incrementCounter(&m.httpStartReceivedCount) m.logger.Debugf("handling HTTP start message %v", spew.Sprintf("%v", envelope)) startEvent := envelope.GetHttpStart() requestID := startEvent.RequestId.String() event := eventID{requestID: requestID, peerType: startEvent.GetPeerType()} m.startEventsByEventID[event] = startEventEntry{startEvent: startEvent, entryTime: time.Now()} }
func writeStringTables(directory string, tick int, t string) { err := os.MkdirAll(directory, os.ModePerm|os.ModeDir) if err != nil { panic(err) } path := spew.Sprintf("%s/tick_%010d.txt", directory, tick) err = ioutil.WriteFile(path, []byte(t), 0644) if err != nil { panic(err) } }
func TestSyncDir(t *testing.T) { store := NewMock() store.Mkdir("/test") dir, _ := getDirectory(store, "/test") ids := []string{"a", "b", "c"} syncDir(store, dir, "/test", ids) newDir, _ := getDirectory(store, "/test") if !eq(dir, newDir) { t.Error(spew.Sprintf("syncDir did not update dir.\n"+ "Found %s\nExpected %s", dir, newDir)) } keySet := dirToKeySet(dir) expKeySet := sliceToSet(ids) if !eq(keySet, expKeySet) { t.Error(spew.Sprintf("\nKeys: %s\nExpected: %s", keySet, expKeySet)) } store.Set("/test/a/IP", "foo") store.Set("/test/b/IP", "bar") dir, _ = getDirectory(store, "/test") ids = []string{"b", "c", "d"} syncDir(store, dir, "/test", ids) newDir, _ = getDirectory(store, "/test") if !eq(dir, newDir) { t.Error(spew.Sprintf("syncDir did not update dir.\n"+ "Found %s\nExpected %s", dir, newDir)) } keySet = dirToKeySet(dir) expKeySet = sliceToSet(ids) if !eq(keySet, expKeySet) { t.Error(spew.Sprintf("\nKeys: %s\nExpected: %s", keySet, expKeySet)) } if _, ok := dir["b"]["IP"]; !ok { t.Error("Key b is missing IP") } }
// GetAll retrieves a list of rooms // // Response // [ // { // "id" : "1468fbcd-3ca6-4c6f-a742-ab91221e5462", // "things" : [ // { // "type" : "light", // "id" : "4b518a5d-f855-4e21-86e0-6e91f6772bea", // "device" : "2864dd823a", // "name" : "Hue Lamp 2", // "location" : "1468fbcd-3ca6-4c6f-a742-ab91221e5462" // }, // { // "type" : "light", // "id" : "525425b8-7d8e-4da9-9317-a38dd447ece7", // "device" : "2df71ceb74", // "name" : "Hue Lamp 1", // "location" : "1468fbcd-3ca6-4c6f-a742-ab91221e5462" // }, // { // "device" : "076ca89411", // "id" : "8252f0e2-43d5-4dd2-bf13-834af1b789ca", // "type" : "light", // "name" : "Hue Lamp", // "location" : "1468fbcd-3ca6-4c6f-a742-ab91221e5462" // } // ], // "name" : "Living Room" // } // ] // func (lr *RoomRouter) GetAll(w http.ResponseWriter, roomModel *models.RoomModel, conn redis.Conn) { rooms, err := roomModel.FetchAll(conn) log.Infof(spew.Sprintf("room: %v", rooms)) if err != nil { WriteServerErrorResponse("Unable to retrieve rooms", http.StatusInternalServerError, w) return } WriteServerResponse(rooms, http.StatusOK, w) }
func (m *EventMarshaller) Write(message *events.Envelope) { messageBytes, err := proto.Marshal(message) if err != nil { m.logger.Errorf("eventMarshaller: marshal error %v for message %v", err, message) metrics.BatchIncrementCounter("dropsondeMarshaller.marshalErrors") return } m.logger.Debugf("eventMarshaller: marshalled message %v", spew.Sprintf("%v", message)) m.incrementMessageCount(message.GetEventType()) m.outputWriter.Write(messageBytes) }
func (m *MessageAggregator) handleHTTPStop(envelope *events.Envelope) *events.Envelope { if m.emitMetrics { metrics.BatchIncrementCounter("MessageAggregator.httpStopReceived") } atomic.AddUint64(&m.httpStopReceivedCount, 1) m.logger.Debugf("handling HTTP stop message %v", spew.Sprintf("%v", envelope)) stopEvent := envelope.GetHttpStop() requestID := stopEvent.RequestId.String() event := eventID{requestID: requestID, peerType: stopEvent.GetPeerType()} startEventEntry, ok := m.startEventsByEventID[event] if !ok { m.logger.Warnf("no matching HTTP start message found for %v", event) if m.emitMetrics { metrics.BatchIncrementCounter("MessageAggregator.httpUnmatchedStopReceived") } atomic.AddUint64(&m.httpUnmatchedStopReceivedCount, 1) return nil } if m.emitMetrics { metrics.BatchIncrementCounter("MessageAggregator.httpStartStopEmitted") } atomic.AddUint64(&m.httpStartStopEmittedCount, 1) delete(m.startEventsByEventID, event) startEvent := startEventEntry.startEvent return &events.Envelope{ Origin: envelope.Origin, Timestamp: stopEvent.Timestamp, EventType: events.Envelope_HttpStartStop.Enum(), HttpStartStop: &events.HttpStartStop{ StartTimestamp: startEvent.Timestamp, StopTimestamp: stopEvent.Timestamp, RequestId: startEvent.RequestId, PeerType: startEvent.PeerType, Method: startEvent.Method, Uri: startEvent.Uri, RemoteAddress: startEvent.RemoteAddress, UserAgent: startEvent.UserAgent, StatusCode: stopEvent.StatusCode, ContentLength: stopEvent.ContentLength, ParentRequestId: startEvent.ParentRequestId, ApplicationId: stopEvent.ApplicationId, InstanceIndex: startEvent.InstanceIndex, InstanceId: startEvent.InstanceId, }, } }
func (u *LegacyUnmarshaller) unmarshalMessage(message []byte) (*logmessage.LogEnvelope, error) { envelope := &logmessage.LogEnvelope{} err := proto.Unmarshal(message, envelope) if err != nil { u.logger.Debugf("legacyUnmarshaller: unmarshal error %v for message %v", err, message) metrics.BatchIncrementCounter("legacyUnmarshaller.unmarshalErrors") return nil, err } u.logger.Debugf("legacyUnmarshaller: received message %v", spew.Sprintf("%v", envelope)) return envelope, nil }