// bigIntToNetIPv6 is a helper function that correctly returns a net.IP with the // correctly padded values. func bigIntToNetIPv6(bi *big.Int) *net.IP { x := make(net.IP, IPv6len) ipv6Bytes := bi.Bytes() // It's possibe for ipv6Bytes to be less than IPv6len bytes in size. If // they are different sizes we to pad the size of response. if len(ipv6Bytes) < IPv6len { buf := new(bytes.Buffer) buf.Grow(IPv6len) for i := len(ipv6Bytes); i < IPv6len; i++ { if err := binary.Write(buf, binary.BigEndian, byte(0)); err != nil { panic(fmt.Sprintf("Unable to pad byte %d of input %v: %v", i, bi, err)) } } for _, b := range ipv6Bytes { if err := binary.Write(buf, binary.BigEndian, b); err != nil { panic(fmt.Sprintf("Unable to preserve endianness of input %v: %v", bi, err)) } } ipv6Bytes = buf.Bytes() } i := copy(x, ipv6Bytes) if i != IPv6len { panic("IPv6 wrong size") } return &x }
func csv2String() string { buf := new(bytes.Buffer) for _, pub := range csv2.pubkeys { buf.WriteString(fmt.Sprintf("%s,\n", pub)) } return string(buf.Bytes()) }
func TestDumper(t *testing.T) { var in [40]byte for i := range in { in[i] = byte(i + 30) } for stride := 1; stride < len(in); stride++ { var out bytes.Buffer dumper := Dumper(&out) done := 0 for done < len(in) { todo := done + stride if todo > len(in) { todo = len(in) } dumper.Write(in[done:todo]) done = todo } dumper.Close() if !bytes.Equal(out.Bytes(), expectedHexDump) { t.Errorf("stride: %d failed. got:\n%s\nwant:\n%s", stride, out.Bytes(), expectedHexDump) } } }
func (client *Client) HandleLoadGroupOffline(lh *LoadGroupOffline) { messages := storage.LoadGroupOfflineMessage(lh.appid, lh.gid, lh.uid, lh.device_id, GROUP_OFFLINE_LIMIT) result := &MessageResult{status: 0} buffer := new(bytes.Buffer) var count int16 = 0 for _, emsg := range messages { if emsg.msg.cmd == MSG_GROUP_IM { im := emsg.msg.body.(*IMMessage) if im.sender == lh.uid && emsg.device_id == lh.device_id { continue } } count += 1 } binary.Write(buffer, binary.BigEndian, count) for _, emsg := range messages { if emsg.msg.cmd == MSG_GROUP_IM { im := emsg.msg.body.(*IMMessage) if im.sender == lh.uid && emsg.device_id == lh.device_id { continue } } ebuf := client.WriteEMessage(emsg) var size int16 = int16(len(ebuf)) binary.Write(buffer, binary.BigEndian, size) buffer.Write(ebuf) } result.content = buffer.Bytes() msg := &Message{cmd: MSG_RESULT, body: result} SendMessage(client.conn, msg) }
func (options *Html) Smartypants(out *bytes.Buffer, text []byte) { smrt := smartypantsData{false, false} // first do normal entity escaping var escaped bytes.Buffer attrEscape(&escaped, text) text = escaped.Bytes() mark := 0 for i := 0; i < len(text); i++ { if action := options.smartypants[text[i]]; action != nil { if i > mark { out.Write(text[mark:i]) } previousChar := byte(0) if i > 0 { previousChar = text[i-1] } i += action(out, &smrt, previousChar, text[i:]) mark = i + 1 } } if mark < len(text) { out.Write(text[mark:]) } }
func TestPixels(t *testing.T) { rgba := decodeFile(t, "rgba.png") allExpected := []*Pixel{ {Red: 255}, {Green: 255}, {Blue: 255}, {Opacity: 127}, } leftExpected := []*Pixel{ allExpected[0], allExpected[2], } rightExpected := []*Pixel{ allExpected[1], allExpected[3], } runPixelTests := func() { px1, err := rgba.Pixels(rgba.Rect()) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(px1, allExpected) { t.Errorf("expecting pixels %v, got %v instead", allExpected, px1) } px2, err := rgba.Pixels(Rect{Width: 1, Height: 2}) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(px2, leftExpected) { t.Errorf("expecting left pixels %v, got %v instead", leftExpected, px2) } px3, err := rgba.Pixels(Rect{X: 1, Width: 1, Height: 2}) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(px3, rightExpected) { t.Errorf("expecting right pixels %v, got %v instead", rightExpected, px3) } } // First test with the image as it's loaded runPixelTests() // Change the green pixel to blue and test again if err := rgba.SetPixel(1, 0, &Pixel{Blue: 255}); err != nil { t.Fatal(err) } allExpected[1].Green = 0 allExpected[1].Blue = 255 runPixelTests() // Encode the image, decode it and check again var buf bytes.Buffer if err := rgba.Encode(&buf, nil); err != nil { t.Fatal(err) } var err error rgba, err = DecodeData(buf.Bytes()) if err != nil { t.Fatal(err) } runPixelTests() }
// loads notification config if any for a given bucket, returns // structured notification config. func loadNotificationConfig(bucket string, objAPI ObjectLayer) (*notificationConfig, error) { // Construct the notification config path. ncPath := path.Join(bucketConfigPrefix, bucket, bucketNotificationConfig) // Acquire a write lock on notification config before modifying. objLock := globalNSMutex.NewNSLock(minioMetaBucket, ncPath) objLock.RLock() defer objLock.RUnlock() var buffer bytes.Buffer err := objAPI.GetObject(minioMetaBucket, ncPath, 0, -1, &buffer) // Read everything. if err != nil { // 'notification.xml' not found return // 'errNoSuchNotifications'. This is default when no // bucket notifications are found on the bucket. if isErrObjectNotFound(err) || isErrIncompleteBody(err) { return nil, errNoSuchNotifications } errorIf(err, "Unable to load bucket-notification for bucket %s", bucket) // Returns error for other errors. return nil, err } // Unmarshal notification bytes. notificationConfigBytes := buffer.Bytes() notificationCfg := ¬ificationConfig{} if err = xml.Unmarshal(notificationConfigBytes, ¬ificationCfg); err != nil { return nil, err } // Return success. return notificationCfg, nil }
func main() { var ( config Config ) configFileName := flag.String("config", "", "Config file") headerReport := flag.Bool("headerReport", false, "Produce a report of header mappings") flag.Parse() configFile, err := os.Open(*configFileName) if err != nil { fmt.Println("Error opening config:", err) return } defer configFile.Close() configBuf := new(bytes.Buffer) configBuf.ReadFrom(configFile) xml.Unmarshal(configBuf.Bytes(), &config) // Parse templates textTemplates, err := makeTemplates(&config.TemplateConfig) if err != nil { panic(err) } // Process each input file config for _, fileConfig := range config.FileConfig { if *headerReport { processHeader(fileConfig, textTemplates) continue } processFile(fileConfig, textTemplates) } }
func init() { buf := new(bytes.Buffer) for i := 0; i < 1e6; i++ { fmt.Fprintf(buf, "%d\n", i) } digits = buf.Bytes() }
func Compile(filename, pkg, input string) ([]byte, error) { ast, errors := parse.Parse(filename, input) if len(errors) > 0 { return nil, fmt.Errorf("parse errors\n%v", errors) } ctx := &context{make(map[string]string)} err := ctx.generateTypes(ast.Scope) if err != nil { return nil, err } types := make([]string, 0) for _, v := range ctx.types { types = append(types, v) } tmplData := struct { Package string Types []string }{ Package: pkg, Types: types, } var buf bytes.Buffer err = packageTmpl.Execute(&buf, tmplData) if err != nil { return nil, err } return buf.Bytes(), nil }
func toCache(root, node, id string, content []byte, mods *service.CacheMods) error { // Write deps to filesystem. thisDep := service.CacheDep{Node: node, Cache: id} if mods != nil { for _, dep := range mods.Deps { err := appendRdeps(root, dep, []service.CacheDep{thisDep}) if err != nil { return fmt.Errorf("Could not write rdeps: %v", err) } } } // Write cache to filesystem. nodePath := filepath.Join(root, node[1:]) path := filepath.Join(nodePath, ".data", filepath.Base(id)) if err := os.MkdirAll(filepath.Dir(path), 0770); err != nil { return fmt.Errorf("Could not create node cache directory: %v", err) } if mods != nil { mods.Deps = nil } var raw bytes.Buffer enc := gob.NewEncoder(&raw) data := cacheData{Data: content, CacheMods: mods} if err := enc.Encode(&data); err != nil { return fmt.Errorf("Could not encode cache data: %v", err) } if err := ioutil.WriteFile(path, raw.Bytes(), 0660); err != nil { return fmt.Errorf("Could not write node cache: %v", err) } return nil }
func verifyPrint(filename string, ast1 *File) { var buf1 bytes.Buffer _, err := Fprint(&buf1, ast1, true) if err != nil { panic(err) } ast2, err := ParseBytes(buf1.Bytes(), nil, nil, 0) if err != nil { panic(err) } var buf2 bytes.Buffer _, err = Fprint(&buf2, ast2, true) if err != nil { panic(err) } if bytes.Compare(buf1.Bytes(), buf2.Bytes()) != 0 { fmt.Printf("--- %s ---\n", filename) fmt.Printf("%s\n", buf1.Bytes()) fmt.Println() fmt.Printf("--- %s ---\n", filename) fmt.Printf("%s\n", buf2.Bytes()) fmt.Println() panic("not equal") } }
func main() { var file *os.File var err error if file, err = os.Open("files/sample.tar.bz2"); err != nil { log.Fatalln(err) } defer file.Close() reader := tar.NewReader(bzip2.NewReader(file)) var header *tar.Header for { header, err = reader.Next() if err == io.EOF { // ファイルの最後 break } if err != nil { log.Fatalln(err) } buf := new(bytes.Buffer) if _, err = io.Copy(buf, reader); err != nil { log.Fatalln(err) } if err = ioutil.WriteFile("output/"+header.Name, buf.Bytes(), 0755); err != nil { log.Fatal(err) } } }
// Test that the line splitter errors out on a long line. func TestScanLineTooLong(t *testing.T) { const smallMaxTokenSize = 256 // Much smaller for more efficient testing. // Build a buffer of lots of line lengths up to but not exceeding smallMaxTokenSize. tmp := new(bytes.Buffer) buf := new(bytes.Buffer) lineNum := 0 j := 0 for i := 0; i < 2*smallMaxTokenSize; i++ { genLine(tmp, lineNum, j, true) j++ buf.Write(tmp.Bytes()) lineNum++ } s := NewScanner(&slowReader{3, buf}) s.Split(ScanLines) s.MaxTokenSize(smallMaxTokenSize) j = 0 for lineNum := 0; s.Scan(); lineNum++ { genLine(tmp, lineNum, j, false) if j < smallMaxTokenSize { j++ } else { j-- } line := tmp.Bytes() if !bytes.Equal(s.Bytes(), line) { t.Errorf("%d: bad line: %d %d\n%.100q\n%.100q\n", lineNum, len(s.Bytes()), len(line), s.Bytes(), line) } } err := s.Err() if err != ErrTooLong { t.Fatalf("expected ErrTooLong; got %s", err) } }
// stack returns a nicely formated stack frame, skipping skip frames func stack(skip int) []byte { buf := new(bytes.Buffer) // the returned data // As we loop, we open files and read them. These variables record the currently // loaded file. var lines [][]byte var lastFile string for i := skip; ; i++ { // Skip the expected number of frames pc, file, line, ok := runtime.Caller(i) if !ok { break } // Print this much at least. If we can't find the source, it won't show. fmt.Fprintf(buf, "%s:%d (0x%x)\n", file, line, pc) if file != lastFile { data, err := ioutil.ReadFile(file) if err != nil { continue } lines = bytes.Split(data, []byte{'\n'}) lastFile = file } fmt.Fprintf(buf, "\t%s: %s\n", function(pc), source(lines, line)) } return buf.Bytes() }
// SaveGameState saves the current gamestate for a user. Does not delete old gamestates. func (db *GameStateDB) SaveGameState( tx *sqlx.Tx, userRow UserRow, gameState libgame.GameState) error { var binarizedState bytes.Buffer encoder := gob.NewEncoder(&binarizedState) encoder.Encode(gameState) dataStruct := GameStateRow{} dataStruct.UserID = userRow.ID dataStruct.BinarizedState = binarizedState.Bytes() dataMap := make(map[string]interface{}) dataMap["user_id"] = dataStruct.UserID dataMap["binarized_state"] = dataStruct.BinarizedState insertResult, err := db.InsertIntoTable(tx, dataMap) if err != nil { logrus.Warning("error saving game state:", err) return err } rowsAffected, err := insertResult.RowsAffected() if err != nil || rowsAffected != 1 { return errors.New( fmt.Sprintf("expected to change 1 row, changed %d", insertResult.RowsAffected)) } id, err := insertResult.LastInsertId() logrus.Infof("Saved new gamestate (id %d) to db", id) return nil }
func (pipeline *Pipeline) JSONFeed(since, limit int) ([]byte, error) { pipeline.mutex.RLock() var lineRaw []byte var line *PipelineFeedLine var buffer bytes.Buffer var err error data := pipeline.data[since:] count := 0 for seq, msg := range data { line = &PipelineFeedLine{ Seq: seq + since, Msg: msg.Outgoing, } lineRaw, err = json.Marshal(line) if err != nil { return nil, err } buffer.Write(lineRaw) buffer.WriteString("\n") count++ if limit > 0 && count >= limit { break } } pipeline.mutex.RUnlock() return buffer.Bytes(), nil }
func (p *parser) table(out *bytes.Buffer, data []byte) int { var header bytes.Buffer i, columns := p.tableHeader(&header, data) if i == 0 { return 0 } var body bytes.Buffer for i < len(data) { pipes, rowStart := 0, i for ; data[i] != '\n'; i++ { if data[i] == '|' { pipes++ } } if pipes == 0 { i = rowStart break } // include the newline in data sent to tableRow i++ p.tableRow(&body, data[rowStart:i], columns, false) } p.r.Table(out, header.Bytes(), body.Bytes(), columns) return i }
// BtcEncode encodes the receiver to w using the bitcoin protocol encoding. // This is part of the Message interface implementation. func (msg *MsgAlert) BtcEncode(w io.Writer, pver uint32, enc MessageEncoding) error { var err error var serializedpayload []byte if msg.Payload != nil { // try to Serialize Payload if possible r := new(bytes.Buffer) err = msg.Payload.Serialize(r, pver) if err != nil { // Serialize failed - ignore & fallback // to SerializedPayload serializedpayload = msg.SerializedPayload } else { serializedpayload = r.Bytes() } } else { serializedpayload = msg.SerializedPayload } slen := uint64(len(serializedpayload)) if slen == 0 { return messageError("MsgAlert.BtcEncode", "empty serialized payload") } err = WriteVarBytes(w, pver, serializedpayload) if err != nil { return err } return WriteVarBytes(w, pver, msg.Signature) }
// parse a blockquote fragment func (p *parser) quote(out *bytes.Buffer, data []byte) int { var raw bytes.Buffer beg, end := 0, 0 for beg < len(data) { end = beg for data[end] != '\n' { end++ } end++ if pre := p.quotePrefix(data[beg:]); pre > 0 { // skip the prefix beg += pre } else if p.isEmpty(data[beg:]) > 0 && (end >= len(data) || (p.quotePrefix(data[end:]) == 0 && p.isEmpty(data[end:]) == 0)) { // blockquote ends with at least one blank line // followed by something without a blockquote prefix break } // this line is part of the blockquote raw.Write(data[beg:end]) beg = end } var cooked bytes.Buffer p.block(&cooked, raw.Bytes()) p.r.BlockQuote(out, cooked.Bytes()) return end }
func (s *charmsSuite) TestGetUsesCache(c *gc.C) { // Add a fake charm archive in the cache directory. cacheDir := filepath.Join(s.DataDir(), "charm-get-cache") err := os.MkdirAll(cacheDir, 0755) c.Assert(err, jc.ErrorIsNil) // Create and save a bundle in it. charmDir := testcharms.Repo.ClonedDir(c.MkDir(), "dummy") testPath := filepath.Join(charmDir.Path, "utils.js") contents := "// blah blah" err = ioutil.WriteFile(testPath, []byte(contents), 0755) c.Assert(err, jc.ErrorIsNil) var buffer bytes.Buffer err = charmDir.ArchiveTo(&buffer) c.Assert(err, jc.ErrorIsNil) charmArchivePath := filepath.Join( cacheDir, charm.Quote("local:trusty/django-42")+".zip") err = ioutil.WriteFile(charmArchivePath, buffer.Bytes(), 0644) c.Assert(err, jc.ErrorIsNil) // Ensure the cached contents are properly retrieved. uri := s.charmsURI(c, "?url=local:trusty/django-42&file=utils.js") resp, err := s.authRequest(c, "GET", uri, "", nil) c.Assert(err, jc.ErrorIsNil) s.assertGetFileResponse(c, resp, contents, "application/javascript") }
// If src != nil, readSource converts src to a []byte if possible; // otherwise it returns an error. If src == nil, readSource returns // the result of reading the file specified by filename. // func readSource(filename string, src interface{}) ([]byte, error) { if src != nil { switch s := src.(type) { case string: return []byte(s), nil case []byte: return s, nil case *bytes.Buffer: // is io.Reader, but src is already available in []byte form if s != nil { return s.Bytes(), nil } case io.Reader: var buf bytes.Buffer _, err := io.Copy(&buf, s) if err != nil { return nil, err } return buf.Bytes(), nil default: return nil, errors.New("invalid source") } } return ioutil.ReadFile(filename) }
func dl_balance(w http.ResponseWriter, r *http.Request) { if !ipchecker(r) { return } wallet.UpdateBalanceFolder() buf := new(bytes.Buffer) zi := zip.NewWriter(buf) filepath.Walk("balance/", func(path string, fi os.FileInfo, err error) error { if !fi.IsDir() { f, _ := zi.Create(path) if f != nil { da, _ := ioutil.ReadFile(path) f.Write(da) } } return nil }) if zi.Close() == nil { w.Header()["Content-Type"] = []string{"application/zip"} w.Write(buf.Bytes()) } else { w.Write([]byte("Error")) } }
func fixAndDecodeGif(data []byte, try int) (*Image, error) { if gifsicleCmd == "" { return nil, errNoGifsicle } args := []string{"--careful"} if try > 0 { args = append(args, "--unoptimize") } data, err := runGifsicle(data, args) if err != nil { return nil, err } if try > 1 { if convertCmd == "" { return nil, errNoConvert } cmd := exec.Command(convertCmd, "-", "-") cmd.Stdin = bytes.NewReader(data) var out bytes.Buffer cmd.Stdout = &out err := cmd.Run() if err != nil { return nil, fmt.Errorf("error running convert: %s", err) } data = out.Bytes() } return decodeData(data, try+1) }
func (options *Html) Header(out *bytes.Buffer, text func() bool, level int, id string) { marker := out.Len() doubleSpace(out) if id != "" { out.WriteString(fmt.Sprintf("<h%d id=\"%s\">", level, id)) } else if options.flags&HTML_TOC != 0 { // headerCount is incremented in htmlTocHeader out.WriteString(fmt.Sprintf("<h%d id=\"toc_%d\">", level, options.headerCount)) } else { out.WriteString(fmt.Sprintf("<h%d>", level)) } tocMarker := out.Len() if !text() { out.Truncate(marker) return } // are we building a table of contents? if options.flags&HTML_TOC != 0 { options.TocHeaderWithAnchor(out.Bytes()[tocMarker:], level, id) } out.WriteString(fmt.Sprintf("</h%d>\n", level)) }
// Get gets the properties of a light. lightId is the ID of the light. // properties is the returned properties. // response is the raw response from the hue bridge or nil if communication // failed. This function may return both a non-nil response and an error // if the response from the hue bridge indicates an error. For most // applications, it is enough just to look at properties and err. func (c *Context) Get(lightId int) ( properties *LightProperties, response []byte, err error) { request := &http.Request{ Method: "GET", URL: c.getLightUrl(lightId), } client := c.client var resp *http.Response if resp, err = client.Do(request); err != nil { return } defer resp.Body.Close() var respBuffer bytes.Buffer if _, err = respBuffer.ReadFrom(resp.Body); err != nil { return } response = respBuffer.Bytes() var jsonProps json_structs.LightState if err = json.Unmarshal(response, &jsonProps); err != nil { err = toError(response) return } if jsonProps.State != nil && len(jsonProps.State.XY) == 2 { state := jsonProps.State jsonColor := state.XY properties = &LightProperties{ C: NewMaybeColor(NewColor(jsonColor[0], jsonColor[1])), Bri: maybe.NewUint8(state.Bri), On: maybe.NewBool(state.On)} } else { err = GeneralError } return }
func csv1String() string { buf := new(bytes.Buffer) for i, pub := range csv1.pubkeys { buf.WriteString(fmt.Sprintf("%s,%d,%s,%d,%d\n", pub, csv1.amts[i], csv1.names[i], csv1.perms[i], csv1.setbits[i])) } return string(buf.Bytes()) }
func TestEncoding(t *testing.T) { msgs := []raftpb.Message{ {To: 1, From: 1}, {To: 2, From: 2, Entries: []raftpb.Entry{{Term: 2, Data: []byte{2}}}}, {To: 3, From: 3}, {To: 4, From: 4, Entries: []raftpb.Entry{{Term: 2, Data: []byte{4, 4}}}}, } s := 0 var buf bytes.Buffer enc := NewEncoder(&buf) for _, m := range msgs { s += m.Size() if err := enc.Encode(m); err != nil { t.Fatalf("cannot encode message: %v", err) } } b := buf.Bytes() if len(b) != s+len(msgs)*4 { t.Errorf("invalid number of bytes: actual=%d want=%d", len(b), s+len(msgs)*4) } dec := NewDecoder(&buf) dmsgs := make([]raftpb.Message, len(msgs)) for i := range dmsgs { if err := dec.Decode(&dmsgs[i]); err != nil { t.Fatalf("cannot decode: %v", err) continue } if !reflect.DeepEqual(msgs[i], dmsgs[i]) { t.Errorf("invalid decoded message: actual=%#v want=%#v", msgs[i], dmsgs[i]) } } }
func (sh *StreamHandler) writeToRequests(eventBytes []byte) error { var b bytes.Buffer _, err := b.Write([]byte("data:")) if err != nil { return err } _, err = b.Write(eventBytes) if err != nil { return err } _, err = b.Write([]byte("\n\n")) if err != nil { return err } dataBytes := b.Bytes() sh.mu.RLock() for _, requestEvents := range sh.requests { select { case requestEvents <- dataBytes: default: } } sh.mu.RUnlock() return nil }
func (client *Client) WriteEMessage(emsg *EMessage) []byte { buffer := new(bytes.Buffer) binary.Write(buffer, binary.BigEndian, emsg.msgid) binary.Write(buffer, binary.BigEndian, emsg.device_id) SendMessage(buffer, emsg.msg) return buffer.Bytes() }