func (s *Slot) WriteTo(w io.Writer) (n int64, err error) { var rw must.ReadWriter rw.WriteInt16(w, s.BlockId) if s.BlockId == -1 { return rw.Result() } rw.WriteInt8(w, int8(s.Amount)) rw.WriteInt16(w, s.Damage) if s.Enchantments == nil { rw.WriteInt16(w, -1) return rw.Result() } var bn, bw bytes.Buffer rw.Check(nbt.Write(&bn, s.Enchantments)) gw := gzip.NewWriter(&bw) rw.Must(io.Copy(gw, &bn)) rw.Check(gw.Close()) rw.WriteInt16(w, int16(bw.Len())) rw.WriteByteArray(w, bw.Bytes()) return rw.Result() }
func (s *Slot) ReadFrom(r io.Reader) (n int64, err error) { var rw must.ReadWriter s.BlockId = rw.ReadInt16(r) if s.BlockId == -1 { return rw.Result() } s.Amount = byte(rw.ReadInt8(r)) s.Damage = rw.ReadInt16(r) Length := rw.ReadInt16(r) if Length == -1 { return rw.Result() } var br bytes.Buffer // Copy gzip'd NBT Compound gs := rw.ReadByteArray(r, int(Length)) bn := bytes.NewBuffer(gs) // Ungzip byte array gr, err := gzip.NewReader(bn) rw.Check(err) rw.Must(io.Copy(&br, gr)) rw.Check(gr.Close()) // Read NBT Compound s.Enchantments, err = nbt.Read(&br) rw.Check(err) return rw.Result() }
func (m Metadata) ReadFrom(r io.Reader) (n int64, err error) { var rw must.ReadWriter var key byte for key != 0x7f { // Read type+key key = byte(rw.ReadInt8(r)) if key == 0x7f { break } var ( typ byte = key & 0xE0 >> 5 index byte = key & 0x1F payload Entry = EntryFrom(typ) ) // Read payload rw.Must(payload.ReadFrom(r)) m.Entries[index] = payload } return rw.Result() }
func (e *EntryVector) WriteTo(w io.Writer) (n int64, err error) { var rw must.ReadWriter for i := 0; i < len(e.Data); i++ { rw.Must(e.Data[i].WriteTo(w)) } return rw.Result() }
func (e *EntryVector) ReadFrom(r io.Reader) (n int64, err error) { var rw must.ReadWriter for i := 0; i < len(e.Data); i++ { rw.Must(e.Data[i].ReadFrom(r)) } return rw.Result() }
func (re *Region) ReadFrom(r io.Reader) (n int64, err error) { var rw must.ReadWriter // Copy everything to a buffer. Max size: 4MB + 8KB var all bytes.Buffer rw.Must(io.Copy(&all, r)) // Read chunk positions. for i := 0; i < len(re.Pos); i++ { // Read 4KB offset from file start. Only first 3 bytes needed. re.Pos[i] = rw.ReadInt32(&all) >> 8 // Fourth byte is a 4KB section counter which is ignored because we // already know the length of chunk data. // // More info here: // http://www.minecraftwiki.net/wiki/Region_file_format#Structure // // " The remainder of the file consists of data for up to 1024 chunks, // interspersed with an arbitrary amount of unused space. " // // TLDR: Just another idiotic/bad designed spec. } // Read chunk timestamps. // // Last modification time of a chunk. Unit: unknown, seconds? // // NOTE: Does something use this? MCEdit maybe? for i := 0; i < len(re.Mod); i++ { re.Mod[i] = rw.ReadInt32(&all) } // Read chunk data. for i := 0; i < len(re.Data); i++ { re.Data[i].Length = rw.ReadInt32(&all) re.Data[i].Compression = byte(rw.ReadInt8(&all)) var buf bytes.Buffer io.CopyN(&buf, &all, length-1) switch scheme { case Gzip: panic("Alpha chunk format not implemented.") case Zlib: zr := zlib.NewReader(&all) io.Copy(&buf, zr) } re.Data[i].Chunk = buf.Bytes() } return rw.Result() }
func (m Metadata) WriteTo(w io.Writer) (n int64, err error) { var rw must.ReadWriter var buf bytes.Buffer for index, payload := range m.Entries { buf.Reset() typ := payload.Type() rw.Check(buf.WriteByte(typ<<5 | (index & 0x1F))) // Write type+key & payload rw.Must(buf.WriteTo(w)) rw.Must(payload.WriteTo(w)) } buf.Reset() rw.Check(buf.WriteByte(0x7f)) rw.Must(buf.WriteTo(w)) return rw.Result() }