// flushLog attempts to flush any pending logs to the appserver. // It should not be called concurrently. func (c *context) flushLog(force bool) (flushed bool) { c.pendingLogs.Lock() // Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious. n, rem := 0, 30<<20 for ; n < len(c.pendingLogs.lines); n++ { ll := c.pendingLogs.lines[n] // Each log line will require about 3 bytes of overhead. nb := proto.Size(ll) + 3 if nb > rem { break } rem -= nb } lines := c.pendingLogs.lines[:n] c.pendingLogs.lines = c.pendingLogs.lines[n:] c.pendingLogs.Unlock() if len(lines) == 0 && !force { // Nothing to flush. return false } rescueLogs := false defer func() { if rescueLogs { c.pendingLogs.Lock() c.pendingLogs.lines = append(lines, c.pendingLogs.lines...) c.pendingLogs.Unlock() } }() buf, err := proto.Marshal(&logpb.UserAppLogGroup{ LogLine: lines, }) if err != nil { log.Printf("internal.flushLog: marshaling UserAppLogGroup: %v", err) rescueLogs = true return false } req := &logpb.FlushRequest{ Logs: buf, } res := &basepb.VoidProto{} c.pendingLogs.Lock() c.pendingLogs.flushes++ c.pendingLogs.Unlock() if err := c.Call("logservice", "Flush", req, res, nil); err != nil { log.Printf("internal.flushLog: Flush RPC: %v", err) rescueLogs = true return false } return true }
func CreateHekaStream(msgBytes []byte, outBytes *[]byte, msc *message.MessageSigningConfig) error { h := &message.Header{} h.SetMessageLength(uint32(len(msgBytes))) if msc != nil { h.SetHmacSigner(msc.Name) h.SetHmacKeyVersion(msc.Version) var hm hash.Hash switch msc.Hash { case "sha1": hm = hmac.New(sha1.New, []byte(msc.Key)) h.SetHmacHashFunction(message.Header_SHA1) default: hm = hmac.New(md5.New, []byte(msc.Key)) } hm.Write(msgBytes) h.SetHmac(hm.Sum(nil)) } headerSize := proto.Size(h) requiredSize := message.HEADER_FRAMING_SIZE + headerSize + len(msgBytes) if requiredSize > message.MAX_RECORD_SIZE { return fmt.Errorf("Message too big, requires %d (MAX_RECORD_SIZE = %d)", requiredSize, message.MAX_RECORD_SIZE) } if cap(*outBytes) < requiredSize { *outBytes = make([]byte, requiredSize) } else { *outBytes = (*outBytes)[:requiredSize] } (*outBytes)[0] = message.RECORD_SEPARATOR (*outBytes)[1] = uint8(headerSize) // This looks odd but is correct; it effectively "seeks" the initial write // position for the protobuf output to be at the // `(*outBytes)[message.HEADER_DELIMITER_SIZE]` position. pbuf := proto.NewBuffer((*outBytes)[message.HEADER_DELIMITER_SIZE:message.HEADER_DELIMITER_SIZE]) if err := pbuf.Marshal(h); err != nil { return err } (*outBytes)[headerSize+message.HEADER_DELIMITER_SIZE] = message.UNIT_SEPARATOR copy((*outBytes)[message.HEADER_FRAMING_SIZE+headerSize:], msgBytes) return nil }
func createStream(msgBytes []byte, encoding message.Header_MessageEncoding, outBytes *[]byte, msc *message.MessageSigningConfig) error { h := &message.Header{} h.SetMessageLength(uint32(len(msgBytes))) if encoding != message.Default_Header_MessageEncoding { h.SetMessageEncoding(encoding) } if msc != nil { h.SetHmacSigner(msc.Name) h.SetHmacKeyVersion(msc.Version) var hm hash.Hash switch msc.Hash { case "sha1": hm = hmac.New(sha1.New, []byte(msc.Key)) h.SetHmacHashFunction(message.Header_SHA1) default: hm = hmac.New(md5.New, []byte(msc.Key)) } hm.Write(msgBytes) h.SetHmac(hm.Sum(nil)) } headerSize := uint8(proto.Size(h)) requiredSize := int(3 + headerSize) if cap(*outBytes) < requiredSize { *outBytes = make([]byte, requiredSize, requiredSize+len(msgBytes)) } else { *outBytes = (*outBytes)[:requiredSize] } (*outBytes)[0] = message.RECORD_SEPARATOR (*outBytes)[1] = uint8(headerSize) pbuf := proto.NewBuffer((*outBytes)[2:2]) err := pbuf.Marshal(h) if err != nil { return err } (*outBytes)[headerSize+2] = message.UNIT_SEPARATOR *outBytes = append(*outBytes, msgBytes...) return nil }
func OutputsSpec(c gs.Context) { t := new(ts.SimpleT) ctrl := gomock.NewController(t) defer ctrl.Finish() oth := NewOutputTestHelper(ctrl) var wg sync.WaitGroup inChan := make(chan *PipelinePack, 1) pConfig := NewPipelineConfig(nil) c.Specify("A FileOutput", func() { fileOutput := new(FileOutput) tmpFileName := fmt.Sprintf("fileoutput-test-%d", time.Now().UnixNano()) tmpFilePath := fmt.Sprint(os.TempDir(), string(os.PathSeparator), tmpFileName) config := fileOutput.ConfigStruct().(*FileOutputConfig) config.Path = tmpFilePath msg := getTestMessage() pack := NewPipelinePack(pConfig.inputRecycleChan) pack.Message = msg pack.Decoded = true toString := func(outData interface{}) string { return string(*(outData.(*[]byte))) } c.Specify("correctly formats text output", func() { err := fileOutput.Init(config) defer os.Remove(tmpFilePath) c.Assume(err, gs.IsNil) outData := make([]byte, 0, 20) c.Specify("by default", func() { fileOutput.handleMessage(pack, &outData) c.Expect(toString(&outData), gs.Equals, *msg.Payload+"\n") }) c.Specify("w/ a prepended timestamp when specified", func() { fileOutput.prefix_ts = true fileOutput.handleMessage(pack, &outData) // Test will fail if date flips btn handleMessage call and // todayStr calculation... should be extremely rare. todayStr := time.Now().Format("[2006/Jan/02:") strContents := toString(&outData) payload := *msg.Payload c.Expect(strContents, ts.StringContains, payload) c.Expect(strContents, ts.StringStartsWith, todayStr) }) }) c.Specify("correctly formats JSON output", func() { config.Format = "json" err := fileOutput.Init(config) defer os.Remove(tmpFilePath) c.Assume(err, gs.IsNil) outData := make([]byte, 0, 200) c.Specify("when specified", func() { fileOutput.handleMessage(pack, &outData) msgJson, err := json.Marshal(pack.Message) c.Assume(err, gs.IsNil) c.Expect(toString(&outData), gs.Equals, string(msgJson)+"\n") }) c.Specify("and with a timestamp", func() { fileOutput.prefix_ts = true fileOutput.handleMessage(pack, &outData) // Test will fail if date flips btn handleMessage call and // todayStr calculation... should be extremely rare. todayStr := time.Now().Format("[2006/Jan/02:") strContents := toString(&outData) msgJson, err := json.Marshal(pack.Message) c.Assume(err, gs.IsNil) c.Expect(strContents, ts.StringContains, string(msgJson)+"\n") c.Expect(strContents, ts.StringStartsWith, todayStr) }) }) c.Specify("correctly formats protocol buffer stream output", func() { config.Format = "protobufstream" err := fileOutput.Init(config) defer os.Remove(tmpFilePath) c.Assume(err, gs.IsNil) outData := make([]byte, 0, 200) c.Specify("when specified and timestamp ignored", func() { fileOutput.prefix_ts = true err := fileOutput.handleMessage(pack, &outData) c.Expect(err, gs.IsNil) b := []byte{30, 2, 8, uint8(proto.Size(pack.Message)), 31, 10, 16} // sanity check the header and the start of the protocol buffer c.Expect(bytes.Equal(b, outData[:len(b)]), gs.IsTrue) }) }) c.Specify("processes incoming messages", func() { err := fileOutput.Init(config) defer os.Remove(tmpFilePath) c.Assume(err, gs.IsNil) // Save for comparison. payload := fmt.Sprintf("%s\n", pack.Message.GetPayload()) oth.MockOutputRunner.EXPECT().InChan().Return(inChan) wg.Add(1) go fileOutput.receiver(oth.MockOutputRunner, &wg) inChan <- pack close(inChan) outBatch := <-fileOutput.batchChan wg.Wait() c.Expect(string(outBatch), gs.Equals, payload) }) c.Specify("Init halts if basedirectory is not writable", func() { tmpdir := filepath.Join(os.TempDir(), "tmpdir") err := os.MkdirAll(tmpdir, 0400) c.Assume(err, gs.IsNil) config.Path = tmpdir err = fileOutput.Init(config) c.Assume(err, gs.Not(gs.IsNil)) }) c.Specify("commits to a file", func() { outStr := "Write me out to the log file" outBytes := []byte(outStr) c.Specify("with default settings", func() { err := fileOutput.Init(config) defer os.Remove(tmpFilePath) c.Assume(err, gs.IsNil) // Start committer loop wg.Add(1) go fileOutput.committer(oth.MockOutputRunner, &wg) // Feed and close the batchChan go func() { fileOutput.batchChan <- outBytes _ = <-fileOutput.backChan // clear backChan to prevent blocking close(fileOutput.batchChan) }() wg.Wait() // Wait for the file close operation to happen. //for ; err == nil; _, err = fileOutput.file.Stat() { //} tmpFile, err := os.Open(tmpFilePath) defer tmpFile.Close() c.Assume(err, gs.IsNil) contents, err := ioutil.ReadAll(tmpFile) c.Assume(err, gs.IsNil) c.Expect(string(contents), gs.Equals, outStr) }) c.Specify("with different Perm settings", func() { config.Perm = "600" err := fileOutput.Init(config) defer os.Remove(tmpFilePath) c.Assume(err, gs.IsNil) // Start committer loop wg.Add(1) go fileOutput.committer(oth.MockOutputRunner, &wg) // Feed and close the batchChan go func() { fileOutput.batchChan <- outBytes _ = <-fileOutput.backChan // clear backChan to prevent blocking close(fileOutput.batchChan) }() wg.Wait() // Wait for the file close operation to happen. //for ; err == nil; _, err = fileOutput.file.Stat() { //} tmpFile, err := os.Open(tmpFilePath) defer tmpFile.Close() c.Assume(err, gs.IsNil) fileInfo, err := tmpFile.Stat() c.Assume(err, gs.IsNil) fileMode := fileInfo.Mode() if runtime.GOOS == "windows" { c.Expect(fileMode.String(), ts.StringContains, "-rw-rw-rw-") } else { // 7 consecutive dashes implies no perms for group or other c.Expect(fileMode.String(), ts.StringContains, "-------") } }) }) }) c.Specify("A TcpOutput", func() { tcpOutput := new(TcpOutput) config := tcpOutput.ConfigStruct().(*TcpOutputConfig) tcpOutput.connection = ts.NewMockConn(ctrl) msg := getTestMessage() pack := NewPipelinePack(pConfig.inputRecycleChan) pack.Message = msg pack.Decoded = true c.Specify("correctly formats protocol buffer stream output", func() { outBytes := make([]byte, 0, 200) err := createProtobufStream(pack, &outBytes) c.Expect(err, gs.IsNil) b := []byte{30, 2, 8, uint8(proto.Size(pack.Message)), 31, 10, 16} // sanity check the header and the start of the protocol buffer c.Expect(bytes.Equal(b, (outBytes)[:len(b)]), gs.IsTrue) }) c.Specify("writes out to the network", func() { inChanCall := oth.MockOutputRunner.EXPECT().InChan().AnyTimes() inChanCall.Return(inChan) collectData := func(ch chan string) { ln, err := net.Listen("tcp", "localhost:9125") if err != nil { ch <- err.Error() } ch <- "ready" conn, err := ln.Accept() if err != nil { ch <- err.Error() } b := make([]byte, 1000) n, _ := conn.Read(b) ch <- string(b[0:n]) } ch := make(chan string, 1) // don't block on put go collectData(ch) result := <-ch // wait for server err := tcpOutput.Init(config) c.Assume(err, gs.IsNil) outStr := "Write me out to the network" pack.Message.SetPayload(outStr) go func() { wg.Add(1) tcpOutput.Run(oth.MockOutputRunner, oth.MockHelper) wg.Done() }() inChan <- pack close(inChan) wg.Wait() // wait for close to finish, prevents intermittent test failures matchBytes := make([]byte, 0, 1000) err = createProtobufStream(pack, &matchBytes) c.Expect(err, gs.IsNil) result = <-ch c.Expect(result, gs.Equals, string(matchBytes)) }) }) c.Specify("Runner restarts a plugin on the first time only", func() { pc := new(PipelineConfig) var pluginGlobals PluginGlobals pluginGlobals.Retries = RetryOptions{ MaxDelay: "1us", Delay: "1us", MaxJitter: "1us", MaxRetries: 1, } pw := &PluginWrapper{ name: "stoppingOutput", configCreator: func() interface{} { return nil }, pluginCreator: func() interface{} { return new(StoppingOutput) }, } output := new(StoppingOutput) pc.outputWrappers = make(map[string]*PluginWrapper) pc.outputWrappers["stoppingOutput"] = pw oRunner := NewFORunner("stoppingOutput", output, &pluginGlobals) var wg sync.WaitGroup cfgCall := oth.MockHelper.EXPECT().PipelineConfig() cfgCall.Return(pc) wg.Add(1) oRunner.Start(oth.MockHelper, &wg) // no panic => success wg.Wait() c.Expect(stopoutputTimes, gs.Equals, 2) }) c.Specify("Runner restarts plugin and resumes feeding it", func() { pc := new(PipelineConfig) var pluginGlobals PluginGlobals pluginGlobals.Retries = RetryOptions{ MaxDelay: "1us", Delay: "1us", MaxJitter: "1us", MaxRetries: 4, } pw := &PluginWrapper{ name: "stoppingresumeOutput", configCreator: func() interface{} { return nil }, pluginCreator: func() interface{} { return new(StopResumeOutput) }, } output := new(StopResumeOutput) pc.outputWrappers = make(map[string]*PluginWrapper) pc.outputWrappers["stoppingresumeOutput"] = pw oRunner := NewFORunner("stoppingresumeOutput", output, &pluginGlobals) var wg sync.WaitGroup cfgCall := oth.MockHelper.EXPECT().PipelineConfig() cfgCall.Return(pc) wg.Add(1) oRunner.Start(oth.MockHelper, &wg) // no panic => success wg.Wait() c.Expect(stopresumerunTimes, gs.Equals, 3) c.Expect(len(stopresumeHolder), gs.Equals, 2) c.Expect(stopresumeHolder[1], gs.Equals, "woot") c.Expect(oRunner.retainPack, gs.IsNil) }) }
func FileOutputSpec(c gs.Context) { t := new(pipeline_ts.SimpleT) ctrl := gomock.NewController(t) defer ctrl.Finish() oth := plugins_ts.NewOutputTestHelper(ctrl) var wg sync.WaitGroup inChan := make(chan *PipelinePack, 1) pConfig := NewPipelineConfig(nil) c.Specify("A FileOutput", func() { fileOutput := new(FileOutput) tmpFileName := fmt.Sprintf("fileoutput-test-%d", time.Now().UnixNano()) tmpFilePath := fmt.Sprint(os.TempDir(), string(os.PathSeparator), tmpFileName) config := fileOutput.ConfigStruct().(*FileOutputConfig) config.Path = tmpFilePath msg := pipeline_ts.GetTestMessage() pack := NewPipelinePack(pConfig.InputRecycleChan()) pack.Message = msg pack.Decoded = true toString := func(outData interface{}) string { return string(*(outData.(*[]byte))) } c.Specify("correctly formats text output", func() { err := fileOutput.Init(config) defer os.Remove(tmpFilePath) c.Assume(err, gs.IsNil) outData := make([]byte, 0, 20) c.Specify("by default", func() { err = fileOutput.handleMessage(pack, &outData) c.Expect(err, gs.IsNil) c.Expect(toString(&outData), gs.Equals, *msg.Payload+"\n") }) c.Specify("w/ a prepended timestamp when specified", func() { fileOutput.prefix_ts = true err = fileOutput.handleMessage(pack, &outData) c.Expect(err, gs.IsNil) // Test will fail if date flips btn handleMessage call and // todayStr calculation... should be extremely rare. todayStr := time.Now().Format("[2006/Jan/02:") strContents := toString(&outData) payload := *msg.Payload c.Expect(strContents, pipeline_ts.StringContains, payload) c.Expect(strContents, pipeline_ts.StringStartsWith, todayStr) }) c.Specify("even when payload is nil", func() { pack.Message.Payload = nil err = fileOutput.handleMessage(pack, &outData) c.Expect(err, gs.IsNil) strContents := toString(&outData) c.Expect(strContents, gs.Equals, "\n") }) c.Specify("payload is nil and with a timestamp", func() { pack.Message.Payload = nil fileOutput.prefix_ts = true err = fileOutput.handleMessage(pack, &outData) c.Expect(err, gs.IsNil) // Test will fail if date flips btn handleMessage call and // todayStr calculation... should be extremely rare. todayStr := time.Now().Format("[2006/Jan/02:") strContents := toString(&outData) c.Expect(strings.HasPrefix(strContents, todayStr), gs.IsTrue) c.Expect(strings.HasSuffix(strContents, " \n"), gs.IsTrue) }) }) c.Specify("correctly formats JSON output", func() { config.Format = "json" err := fileOutput.Init(config) defer os.Remove(tmpFilePath) c.Assume(err, gs.IsNil) outData := make([]byte, 0, 200) c.Specify("when specified", func() { fileOutput.handleMessage(pack, &outData) msgJson, err := json.Marshal(pack.Message) c.Assume(err, gs.IsNil) c.Expect(toString(&outData), gs.Equals, string(msgJson)+"\n") }) c.Specify("and with a timestamp", func() { fileOutput.prefix_ts = true fileOutput.handleMessage(pack, &outData) // Test will fail if date flips btn handleMessage call and // todayStr calculation... should be extremely rare. todayStr := time.Now().Format("[2006/Jan/02:") strContents := toString(&outData) msgJson, err := json.Marshal(pack.Message) c.Assume(err, gs.IsNil) c.Expect(strContents, pipeline_ts.StringContains, string(msgJson)+"\n") c.Expect(strContents, pipeline_ts.StringStartsWith, todayStr) }) }) c.Specify("correctly formats protocol buffer stream output", func() { config.Format = "protobufstream" err := fileOutput.Init(config) defer os.Remove(tmpFilePath) c.Assume(err, gs.IsNil) outData := make([]byte, 0, 200) c.Specify("when specified and timestamp ignored", func() { fileOutput.prefix_ts = true err := fileOutput.handleMessage(pack, &outData) c.Expect(err, gs.IsNil) b := []byte{30, 2, 8, uint8(proto.Size(pack.Message)), 31, 10, 16} // sanity check the header and the start of the protocol buffer c.Expect(bytes.Equal(b, outData[:len(b)]), gs.IsTrue) }) }) c.Specify("processes incoming messages", func() { err := fileOutput.Init(config) defer os.Remove(tmpFilePath) c.Assume(err, gs.IsNil) // Save for comparison. payload := fmt.Sprintf("%s\n", pack.Message.GetPayload()) oth.MockOutputRunner.EXPECT().InChan().Return(inChan) wg.Add(1) go fileOutput.receiver(oth.MockOutputRunner, &wg) inChan <- pack close(inChan) outBatch := <-fileOutput.batchChan wg.Wait() c.Expect(string(outBatch), gs.Equals, payload) }) c.Specify("Init halts if basedirectory is not writable", func() { tmpdir := filepath.Join(os.TempDir(), "tmpdir") err := os.MkdirAll(tmpdir, 0400) c.Assume(err, gs.IsNil) config.Path = tmpdir err = fileOutput.Init(config) c.Assume(err, gs.Not(gs.IsNil)) }) c.Specify("commits to a file", func() { outStr := "Write me out to the log file" outBytes := []byte(outStr) c.Specify("with default settings", func() { err := fileOutput.Init(config) defer os.Remove(tmpFilePath) c.Assume(err, gs.IsNil) // Start committer loop wg.Add(1) go fileOutput.committer(oth.MockOutputRunner, &wg) // Feed and close the batchChan go func() { fileOutput.batchChan <- outBytes _ = <-fileOutput.backChan // clear backChan to prevent blocking close(fileOutput.batchChan) }() wg.Wait() // Wait for the file close operation to happen. //for ; err == nil; _, err = fileOutput.file.Stat() { //} tmpFile, err := os.Open(tmpFilePath) defer tmpFile.Close() c.Assume(err, gs.IsNil) contents, err := ioutil.ReadAll(tmpFile) c.Assume(err, gs.IsNil) c.Expect(string(contents), gs.Equals, outStr) }) c.Specify("with different Perm settings", func() { config.Perm = "600" err := fileOutput.Init(config) defer os.Remove(tmpFilePath) c.Assume(err, gs.IsNil) // Start committer loop wg.Add(1) go fileOutput.committer(oth.MockOutputRunner, &wg) // Feed and close the batchChan go func() { fileOutput.batchChan <- outBytes _ = <-fileOutput.backChan // clear backChan to prevent blocking close(fileOutput.batchChan) }() wg.Wait() // Wait for the file close operation to happen. //for ; err == nil; _, err = fileOutput.file.Stat() { //} tmpFile, err := os.Open(tmpFilePath) defer tmpFile.Close() c.Assume(err, gs.IsNil) fileInfo, err := tmpFile.Stat() c.Assume(err, gs.IsNil) fileMode := fileInfo.Mode() if runtime.GOOS == "windows" { c.Expect(fileMode.String(), pipeline_ts.StringContains, "-rw-rw-rw-") } else { // 7 consecutive dashes implies no perms for group or other c.Expect(fileMode.String(), pipeline_ts.StringContains, "-------") } }) }) c.Specify("honors folder_perm setting", func() { config.FolderPerm = "750" config.Path = filepath.Join(tmpFilePath, "subfile") err := fileOutput.Init(config) defer os.Remove(config.Path) c.Assume(err, gs.IsNil) fi, err := os.Stat(tmpFilePath) c.Expect(fi.IsDir(), gs.IsTrue) c.Expect(fi.Mode().Perm(), gs.Equals, os.FileMode(0750)) }) c.Specify("that starts receiving w/ a flush interval", func() { config.FlushInterval = 100000000 // We'll trigger the timer manually. inChan := make(chan *PipelinePack) oth.MockOutputRunner.EXPECT().InChan().Return(inChan) timerChan := make(chan time.Time) msg2 := pipeline_ts.GetTestMessage() pack2 := NewPipelinePack(pConfig.InputRecycleChan()) pack2.Message = msg2 recvWithConfig := func(config *FileOutputConfig) { err := fileOutput.Init(config) c.Assume(err, gs.IsNil) wg.Add(1) go fileOutput.receiver(oth.MockOutputRunner, &wg) runtime.Gosched() // Yield so we can overwrite the timerChan. fileOutput.timerChan = timerChan } cleanUp := func() { close(inChan) wg.Done() } c.Specify("honors flush interval", func() { recvWithConfig(config) defer cleanUp() inChan <- pack select { case _ = <-fileOutput.batchChan: c.Expect("", gs.Equals, "fileOutput.batchChan should NOT have fired yet") default: } timerChan <- time.Now() select { case _ = <-fileOutput.batchChan: default: c.Expect("", gs.Equals, "fileOutput.batchChan SHOULD have fired by now") } }) c.Specify("honors flush interval AND flush count", func() { config.FlushCount = 2 recvWithConfig(config) defer cleanUp() inChan <- pack select { case <-fileOutput.batchChan: c.Expect("", gs.Equals, "fileOutput.batchChan should NOT have fired yet") default: } timerChan <- time.Now() select { case <-fileOutput.batchChan: c.Expect("", gs.Equals, "fileOutput.batchChan should NOT have fired yet") default: } inChan <- pack2 runtime.Gosched() select { case <-fileOutput.batchChan: default: c.Expect("", gs.Equals, "fileOutput.batchChan SHOULD have fired by now") } }) c.Specify("honors flush interval OR flush count", func() { config.FlushCount = 2 config.FlushOperator = "OR" recvWithConfig(config) defer cleanUp() inChan <- pack select { case <-fileOutput.batchChan: c.Expect("", gs.Equals, "fileOutput.batchChan should NOT have fired yet") default: } c.Specify("when interval triggers first", func() { timerChan <- time.Now() select { case <-fileOutput.batchChan: default: c.Expect("", gs.Equals, "fileOutput.batchChan SHOULD have fired by now") } }) c.Specify("when count triggers first", func() { inChan <- pack2 runtime.Gosched() select { case <-fileOutput.batchChan: default: c.Expect("", gs.Equals, "fileOutput.batchChan SHOULD have fired by now") } }) }) }) }) }
func (self *Request) Size() int { return proto.Size(self) }
func (self *Response) Size() int { return proto.Size(self) }
func main() { uj := &Pmd.UserJsMessageForwardUserPmd_CS{} uj.Msgbytes = []byte(`{"whj":111}`) fmt.Println("XXXXX", uj.String(), proto.MarshalTextString(uj)) rc := &Pmd.ReconnectErrorLoginUserPmd_S{} rc.Desc = proto.String(`{"whj":111}`) fmt.Println("xxxxx", rc.String(), proto.MarshalTextString(rc), *rc.Desc) m := make(map[int]string) m[1] = "wabghaijun" a := 1 fmt.Println(protobuf.Encode(&a)) mset := make(map[int32]proto.Extension) //mset[1] = proto.Extension{enc: []byte("sss")} //umset, err := proto.MarshalMessageSet(mset) var b []byte fmt.Println(len(b)) nmd := &Pmd.ForwardNullUserPmd_CS{} nmd1 := &Pmd.ForwardNullUserPmd_CS{} nmd2 := &Pmd.ForwardNullUserPmd_CS{} cmd3 := &Pmd.RequestCloseNullUserPmd_CS{} cmd4 := &Pmd.RequestCloseNullUserPmd_CS{} cmd3.Reason = proto.String("2222") fmt.Println(proto.GetProperties(reflect.TypeOf(cmd3).Elem())) cmd3byte, err1 := proto.Marshal(cmd3) if err1 != nil { fmt.Println("xxxxxxxxxxx", err1) } fmt.Println(proto.Unmarshal(cmd3byte, cmd3)) fmt.Println(mset) cmd3test := proto.MarshalTextString(cmd3) fmt.Println(cmd3test) fmt.Println(proto.UnmarshalText(cmd3test, nmd)) //nmd.Prototype = proto.Uint64(2) nmd.ByCmd = proto.Uint32(0) //nmd.ByParam = proto.Uint32(0) //nmd.ByCmd = append(nmd.ByCmd, 0) //nmd.ByParam = append(nmd.ByParam, 0) sendbuf := proto.NewBuffer(nil) err := sendbuf.Marshal(nmd) if err != nil { fmt.Println("1", err) } nmd.Data = sendbuf.Bytes() fmt.Println(nmd, proto.Size(nmd), len(sendbuf.Bytes())) fmt.Println(len(sendbuf.Bytes()), sendbuf.Bytes()) //data := sendbuf.Bytes() err = sendbuf.Marshal(cmd3) if err != nil { fmt.Println("2", err) } fmt.Println(len(sendbuf.Bytes()), sendbuf.Bytes()) data := sendbuf.Bytes() fmt.Println(len(data), data) //data = append(data, byte(1)) databuf := proto.NewBuffer(data) err = databuf.Unmarshal(nmd1) if err != nil { fmt.Println("3", err) } //err = databuf.Unmarshal(nmd2) err = proto.Unmarshal(data[:2], nmd2) if err != nil { fmt.Println("4", err) } err = proto.Unmarshal(data[2:], cmd4) //err = databuf.Unmarshal(cmd4) if err != nil { fmt.Println("5", err) } fmt.Println(nmd, proto.Size(nmd)) fmt.Println(nmd1) fmt.Println(nmd2) fmt.Println(cmd4) }