// TODO -- Clean up all the writing and simplify now that we have block-aligned writes. // writeBlocks ingests blocks of voxel data asynchronously using batch writes. func (d *Data) writeBlocks(v dvid.VersionID, b storage.TKeyValues, wg1, wg2 *sync.WaitGroup) error { batcher, err := d.GetKeyValueBatcher() if err != nil { return err } preCompress, postCompress := 0, 0 ctx := datastore.NewVersionedCtx(d, v) evt := datastore.SyncEvent{d.DataUUID(), IngestBlockEvent} <-server.HandlerToken go func() { defer func() { wg1.Done() wg2.Done() dvid.Debugf("Wrote voxel blocks. Before %s: %d bytes. After: %d bytes\n", d.Compression(), preCompress, postCompress) server.HandlerToken <- 1 }() mutID := d.NewMutationID() batch := batcher.NewBatch(ctx) for i, block := range b { serialization, err := dvid.SerializeData(block.V, d.Compression(), d.Checksum()) preCompress += len(block.V) postCompress += len(serialization) if err != nil { dvid.Errorf("Unable to serialize block: %v\n", err) return } batch.Put(block.K, serialization) indexZYX, err := DecodeTKey(block.K) if err != nil { dvid.Errorf("Unable to recover index from block key: %v\n", block.K) return } msg := datastore.SyncMessage{IngestBlockEvent, v, Block{indexZYX, block.V, mutID}} if err := datastore.NotifySubscribers(evt, msg); err != nil { dvid.Errorf("Unable to notify subscribers of ChangeBlockEvent in %s\n", d.DataName()) return } // Check if we should commit if i%KVWriteSize == KVWriteSize-1 { if err := batch.Commit(); err != nil { dvid.Errorf("Error on trying to write batch: %v\n", err) return } batch = batcher.NewBatch(ctx) } } if err := batch.Commit(); err != nil { dvid.Errorf("Error on trying to write batch: %v\n", err) return } }() return nil }
// Goroutine that handles splits across a lot of blocks for one label. func (d *Data) splitBlock(ctx *datastore.VersionedCtx, op splitOp) { defer d.MutDone(op.mutID) store, err := d.GetOrderedKeyValueDB() if err != nil { dvid.Errorf("Data type labelblk had error initializing store: %v\n", err) return } // Read the block. tk := NewTKeyByCoord(op.block) data, err := store.Get(ctx, tk) if err != nil { dvid.Errorf("Error on GET of labelblk with coord string %v\n", []byte(op.block)) return } if data == nil { dvid.Errorf("nil label block where split was done, coord %v\n", []byte(op.block)) return } blockData, _, err := dvid.DeserializeData(data, true) if err != nil { dvid.Criticalf("unable to deserialize label block in '%s' key %v: %v\n", d.DataName(), []byte(op.block), err) return } blockBytes := int(d.BlockSize().Prod() * 8) if len(blockData) != blockBytes { dvid.Criticalf("splitBlock: coord %v got back %d bytes, expected %d bytes\n", []byte(op.block), len(blockData), blockBytes) return } // Modify the block using either voxel-level changes or coarser block-level mods. if op.rles != nil { if err := d.storeRLEs(blockData, op.newLabel, op.block, op.rles); err != nil { dvid.Errorf("can't store label %d RLEs into block %s: %v\n", op.newLabel, op.block, err) return } } else { // We are doing coarse split and will replace all if err := d.replaceLabel(blockData, op.oldLabel, op.newLabel); err != nil { dvid.Errorf("can't replace label %d with %d in block %s: %v\n", op.oldLabel, op.newLabel, op.block, err) return } } // Write the modified block. serialization, err := dvid.SerializeData(blockData, d.Compression(), d.Checksum()) if err != nil { dvid.Criticalf("Unable to serialize block %s in %q: %v\n", op.block, d.DataName(), err) return } if err := store.Put(ctx, tk, serialization); err != nil { dvid.Errorf("Error in putting key %v: %v\n", tk, err) } // Notify any downstream downres instance. d.publishBlockChange(ctx.VersionID(), op.mutID, op.block, blockData) }
// Goroutine that handles relabeling of blocks during a merge operation. // Since the same block coordinate always gets mapped to the same goroutine, we handle // concurrency by serializing GET/PUT for a particular block coordinate. func (d *Data) mergeBlock(in <-chan mergeOp) { store, err := storage.MutableStore() if err != nil { dvid.Errorf("Data type labelblk had error initializing store: %v\n", err) return } blockBytes := int(d.BlockSize().Prod() * 8) for op := range in { tk := NewTKeyByCoord(op.izyx) data, err := store.Get(op.ctx, tk) if err != nil { dvid.Errorf("Error on GET of labelblk with coord string %q\n", op.izyx) op.wg.Done() continue } if data == nil { dvid.Errorf("nil label block where merge was done!\n") op.wg.Done() continue } blockData, _, err := dvid.DeserializeData(data, true) if err != nil { dvid.Criticalf("unable to deserialize label block in '%s': %v\n", d.DataName(), err) op.wg.Done() continue } if len(blockData) != blockBytes { dvid.Criticalf("After labelblk deserialization got back %d bytes, expected %d bytes\n", len(blockData), blockBytes) op.wg.Done() continue } // Iterate through this block of labels and relabel if label in merge. for i := 0; i < blockBytes; i += 8 { label := binary.LittleEndian.Uint64(blockData[i : i+8]) if _, merged := op.Merged[label]; merged { binary.LittleEndian.PutUint64(blockData[i:i+8], op.Target) } } // Store this block. serialization, err := dvid.SerializeData(blockData, d.Compression(), d.Checksum()) if err != nil { dvid.Criticalf("Unable to serialize block in %q: %v\n", d.DataName(), err) op.wg.Done() continue } if err := store.Put(op.ctx, tk, serialization); err != nil { dvid.Errorf("Error in putting key %v: %v\n", tk, err) } op.wg.Done() } }
// serializeVertex serializes a dvid.GraphVertex (compression turned off for now) func (db *GraphKeyValueDB) serializeVertex(vert dvid.GraphVertex) []byte { // encode: vertex id, vertex weight, num vertices, vertex array, // num properties, property array total_size := 24 + 8*len(vert.Vertices) + 8 // find size for property strings (account for null character) for propname, _ := range vert.Properties { total_size += len(propname) + 1 } // create byte buffer buf := make([]byte, total_size, total_size) // encode vertex id start := 0 binary.LittleEndian.PutUint64(buf[start:], uint64(vert.Id)) start += 8 // encode vertex weight floatbits := math.Float64bits(vert.Weight) binary.LittleEndian.PutUint64(buf[start:], floatbits) start += 8 // encode number of vertices binary.LittleEndian.PutUint64(buf[start:], uint64(len(vert.Vertices))) start += 8 // encode vertex partners for _, vertpartner := range vert.Vertices { binary.LittleEndian.PutUint64(buf[start:], uint64(vertpartner)) start += 8 } // encode number of properties binary.LittleEndian.PutUint64(buf[start:], uint64(len(vert.Properties))) start += 8 // encode property strings for propname, _ := range vert.Properties { for _, propchar := range propname { buf[start] = byte(propchar) start += 1 } buf[start] = byte(0) start += 1 } // encode DVID related info (currenlty compression is disabled) compression, _ := dvid.NewCompression(dvid.Uncompressed, dvid.DefaultCompression) finalbuf, _ := dvid.SerializeData(buf, compression, dvid.NoChecksum) return finalbuf }
// handles relabeling of blocks during a merge operation. func (d *Data) mergeBlock(ctx *datastore.VersionedCtx, op mergeOp) { defer d.MutDone(op.mutID) store, err := d.GetKeyValueDB() if err != nil { dvid.Errorf("Data type labelblk had error initializing store: %v\n", err) return } tk := NewTKeyByCoord(op.block) data, err := store.Get(ctx, tk) if err != nil { dvid.Errorf("Error on GET of labelblk with coord string %q\n", op.block) return } if data == nil { dvid.Errorf("nil label block where merge was done!\n") return } blockData, _, err := dvid.DeserializeData(data, true) if err != nil { dvid.Criticalf("unable to deserialize label block in '%s': %v\n", d.DataName(), err) return } blockBytes := int(d.BlockSize().Prod() * 8) if len(blockData) != blockBytes { dvid.Criticalf("After labelblk deserialization got back %d bytes, expected %d bytes\n", len(blockData), blockBytes) return } // Iterate through this block of labels and relabel if label in merge. for i := 0; i < blockBytes; i += 8 { label := binary.LittleEndian.Uint64(blockData[i : i+8]) if _, merged := op.Merged[label]; merged { binary.LittleEndian.PutUint64(blockData[i:i+8], op.Target) } } // Store this block. serialization, err := dvid.SerializeData(blockData, d.Compression(), d.Checksum()) if err != nil { dvid.Criticalf("Unable to serialize block in %q: %v\n", d.DataName(), err) return } if err := store.Put(ctx, tk, serialization); err != nil { dvid.Errorf("Error in putting key %v: %v\n", tk, err) } // Notify any downstream downres instance. d.publishBlockChange(ctx.VersionID(), op.mutID, op.block, blockData) }
// PutData puts a key-value at a given uuid func (d *Data) PutData(ctx storage.Context, keyStr string, value []byte) error { db, err := storage.MutableStore() if err != nil { return err } serialization, err := dvid.SerializeData(value, d.Compression(), d.Checksum()) if err != nil { return fmt.Errorf("Unable to serialize data: %v\n", err) } tk, err := NewTKey(keyStr) if err != nil { return err } return db.Put(ctx, tk, serialization) }
// serializeEdge serializes a dvid.GraphEdge (compression turned off for now) func (db *GraphKeyValueDB) serializeEdge(edge dvid.GraphEdge) []byte { // encode: vertex1 id, vertex2 id, weight, // num properties, property array total_size := 32 // find size for property strings (account for null character) for propname, _ := range edge.Properties { total_size += len(propname) + 1 } // create byte buffer buf := make([]byte, total_size, total_size) // encode vertex 1 start := 0 binary.LittleEndian.PutUint64(buf[start:], uint64(edge.Vertexpair.Vertex1)) start += 8 // encode vertex 2 binary.LittleEndian.PutUint64(buf[start:], uint64(edge.Vertexpair.Vertex2)) start += 8 // encode weight floatbits := math.Float64bits(edge.Weight) binary.LittleEndian.PutUint64(buf[start:], floatbits) start += 8 // encode number of properties binary.LittleEndian.PutUint64(buf[start:], uint64(len(edge.Properties))) start += 8 // encode property strings for propname, _ := range edge.Properties { for _, propchar := range propname { buf[start] = byte(propchar) start += 1 } buf[start] = byte(0) start += 1 } // encode DVID related info (currenlty compression is disabled) compression, _ := dvid.NewCompression(dvid.Uncompressed, dvid.DefaultCompression) finalbuf, _ := dvid.SerializeData(buf, compression, dvid.NoChecksum) return finalbuf }
// serializes octant contents. Writes octants into preallocated block buffer that may have old data, // and then returns a serialized data slice suitable for storage. func (d *Data) serializeOctants(oct octant, blockBuf []byte) ([]byte, error) { blockSize := d.BlockSize() nx := blockSize.Value(0) nxy := blockSize.Value(1) * nx halfx := blockSize.Value(0) >> 1 halfy := blockSize.Value(1) >> 1 halfz := blockSize.Value(2) >> 1 sectorbytes := int(halfx * halfy * halfz * 8) xbytes := halfx * 8 for sector, data := range oct { if len(data) > 0 { if len(data) != sectorbytes { dvid.Criticalf("Expected %d bytes in octant for %d, instead got %d bytes.\n", sectorbytes, d.DataName(), len(data)) } // Get the corner voxel (in block coordinates) for this sector. iz := sector >> 2 sector -= iz * 4 iy := sector >> 1 ix := sector % 2 ox := int32(ix) * halfx oy := int32(iy) * halfy oz := int32(iz) * halfz // Copy data from octant into larger block buffer. var oi int32 for z := oz; z < oz+halfz; z++ { for y := oy; y < oy+halfy; y++ { di := (z*nxy + y*nx + ox) * 8 copy(blockBuf[di:di+xbytes], data[oi:oi+xbytes]) oi += xbytes } } } } return dvid.SerializeData(blockBuf, d.Compression(), d.Checksum()) }
// handelPropertyTransaction allows gets/posts (really puts) of edge or vertex properties. func (d *Data) handlePropertyTransaction(ctx *datastore.VersionedCtx, db storage.GraphDB, w http.ResponseWriter, r *http.Request, path []string, method string) error { if len(path) < 2 { return fmt.Errorf("Must specify edges or vertices in URI and property name") } if method == "delete" { return fmt.Errorf("Transactional delete not supported") } edgemode := false if path[0] == "edges" { edgemode = true } else if path[0] != "vertices" { return fmt.Errorf("Must specify edges or vertices in URI") } propertyname := path[1] readonly := false if method == "get" { readonly = true } data, err := ioutil.ReadAll(r.Body) // only allow 1000 vertices to be locked transactions, start, err := d.transaction_log.createTransactionGroupBinary(data, readonly) defer transactions.closeTransaction() if err != nil { return fmt.Errorf("Failed to create property transaction: %v", err) } returned_data := transactions.exportTransactionsBinary() if method == "post" { // deserialize transaction (vertex or edge) -- use URI? num_properties := binary.LittleEndian.Uint64(data[start:]) start += 8 for i := uint64(0); i < num_properties; i++ { temp := binary.LittleEndian.Uint64(data[start:]) id := dvid.VertexID(temp) var id2 dvid.VertexID start += 8 if edgemode { temp = binary.LittleEndian.Uint64(data[start:]) id2 = dvid.VertexID(temp) start += 8 } data_size := binary.LittleEndian.Uint64(data[start:]) start += 8 data_begin := start start += int(data_size) data_end := start if data_begin == data_end { continue } // check if post is possible if _, ok := transactions.locked_ids[id]; !ok { continue } if edgemode { if _, ok := transactions.locked_ids[id2]; !ok { continue } } // execute post serialization, err := dvid.SerializeData(data[data_begin:data_end], d.Compression(), d.Checksum()) if err != nil { return fmt.Errorf("Unable to serialize data: %v\n", err) } if edgemode { err = db.SetEdgeProperty(ctx, id, id2, propertyname, serialization) } else { err = db.SetVertexProperty(ctx, id, propertyname, serialization) } if err != nil { return fmt.Errorf("Failed to add property %s: %v\n", propertyname, err) } } } else { num_properties := binary.LittleEndian.Uint64(data[start:]) start += 8 num_properties_loc := len(returned_data) longbuf := make([]byte, 8, 8) binary.LittleEndian.PutUint64(longbuf, 0) returned_data = append(returned_data, longbuf...) num_executed_transactions := uint64(0) // read the vertex or edge properties desired for i := uint64(0); i < num_properties; i++ { temp := binary.LittleEndian.Uint64(data[start:]) id := dvid.VertexID(temp) var id2 dvid.VertexID start += 8 if edgemode { temp := binary.LittleEndian.Uint64(data[start:]) id2 = dvid.VertexID(temp) start += 8 } // check if post is possible if _, ok := transactions.locked_ids[id]; !ok { continue } if edgemode { if _, ok := transactions.locked_ids[id2]; !ok { continue } } // execute get command var dataout []byte if edgemode { dataout, err = db.GetEdgeProperty(ctx, id, id2, propertyname) } else { dataout, err = db.GetVertexProperty(ctx, id, propertyname) } // serialize return data only if there is return data and no error; // otherwise return just return the id and size of 0 var data_serialized []byte if (err == nil) && len(dataout) > 0 { uncompress := true data_serialized, _, err = dvid.DeserializeData(dataout, uncompress) if err != nil { return fmt.Errorf("Unable to deserialize data for property '%s': %v\n", propertyname, err) } } // save transaction num_executed_transactions += 1 binary.LittleEndian.PutUint64(longbuf, uint64(id)) returned_data = append(returned_data, longbuf...) if edgemode { binary.LittleEndian.PutUint64(longbuf, uint64(id2)) returned_data = append(returned_data, longbuf...) } binary.LittleEndian.PutUint64(longbuf, uint64(len(data_serialized))) returned_data = append(returned_data, longbuf...) returned_data = append(returned_data, data_serialized...) } // update the number of transactions binary.LittleEndian.PutUint64(returned_data[num_properties_loc:], num_executed_transactions) } w.Header().Set("Content-Type", "application/octet-stream") _, err = w.Write(returned_data) return err }
// Goroutine that handles splits across a lot of blocks for one label. func (d *Data) splitBlock(in <-chan splitOp) { store, err := storage.MutableStore() if err != nil { dvid.Errorf("Data type labelblk had error initializing store: %v\n", err) return } batcher, ok := store.(storage.KeyValueBatcher) if !ok { err = fmt.Errorf("Data type labelblk requires batch-enabled store, which %q is not\n", store) return } blockBytes := int(d.BlockSize().Prod() * 8) for op := range in { // Iterate through all the modified blocks, inserting the new label using the RLEs for that block. timedLog := dvid.NewTimeLog() splitCache.Incr(op.ctx.InstanceVersion(), op.OldLabel) batch := batcher.NewBatch(&op.ctx) for _, zyxStr := range op.SortedBlocks { // Read the block. tk := NewTKeyByCoord(zyxStr) data, err := store.Get(&op.ctx, tk) if err != nil { dvid.Errorf("Error on GET of labelblk with coord string %v\n", []byte(zyxStr)) continue } if data == nil { dvid.Errorf("nil label block where split was done, coord %v\n", []byte(zyxStr)) continue } bdata, _, err := dvid.DeserializeData(data, true) if err != nil { dvid.Criticalf("unable to deserialize label block in '%s' key %v: %v\n", d.DataName(), []byte(zyxStr), err) continue } if len(bdata) != blockBytes { dvid.Criticalf("splitBlock: coord %v got back %d bytes, expected %d bytes\n", []byte(zyxStr), len(bdata), blockBytes) continue } // Modify the block using either voxel-level changes or coarser block-level mods. if op.Split != nil { rles, found := op.Split[zyxStr] if !found { dvid.Errorf("split block %s not present in block RLEs\n", zyxStr.Print()) continue } if err := d.storeRLEs(bdata, op.NewLabel, zyxStr, rles); err != nil { dvid.Errorf("can't store label %d RLEs into block %s: %v\n", op.NewLabel, zyxStr.Print(), err) continue } } else { // We are doing coarse split and will replace all if err := d.replaceLabel(bdata, op.OldLabel, op.NewLabel); err != nil { dvid.Errorf("can't replace label %d with %d in block %s: %v\n", op.OldLabel, op.NewLabel, zyxStr.Print(), err) continue } } // Write the modified block. serialization, err := dvid.SerializeData(bdata, d.Compression(), d.Checksum()) if err != nil { dvid.Criticalf("Unable to serialize block %s in %q: %v\n", zyxStr.Print(), d.DataName(), err) continue } batch.Put(tk, serialization) } if err := batch.Commit(); err != nil { dvid.Errorf("Batch PUT during %q block split of %d: %v\n", d.DataName(), op.OldLabel, err) } splitCache.Decr(op.ctx.InstanceVersion(), op.OldLabel) timedLog.Debugf("labelblk sync complete for split of %d -> %d", op.OldLabel, op.NewLabel) } }
// TODO -- Clean up all the writing and simplify now that we have block-aligned writes. // writeBlocks persists blocks of voxel data asynchronously using batch writes. func (d *Data) writeBlocks(v dvid.VersionID, b storage.TKeyValues, wg1, wg2 *sync.WaitGroup) error { store, err := storage.BigDataStore() if err != nil { return fmt.Errorf("Data type imageblk had error initializing store: %v\n", err) } batcher, ok := store.(storage.KeyValueBatcher) if !ok { return fmt.Errorf("Data type imageblk requires batch-enabled store, which %q is not\n", store) } preCompress, postCompress := 0, 0 ctx := datastore.NewVersionedCtx(d, v) evt := datastore.SyncEvent{d.DataName(), ChangeBlockEvent} <-server.HandlerToken go func() { defer func() { wg1.Done() wg2.Done() dvid.Debugf("Wrote voxel blocks. Before %s: %d bytes. After: %d bytes\n", d.Compression(), preCompress, postCompress) server.HandlerToken <- 1 }() batch := batcher.NewBatch(ctx) for i, block := range b { serialization, err := dvid.SerializeData(block.V, d.Compression(), d.Checksum()) preCompress += len(block.V) postCompress += len(serialization) if err != nil { dvid.Errorf("Unable to serialize block: %v\n", err) return } batch.Put(block.K, serialization) indexZYX, err := DecodeTKey(block.K) if err != nil { dvid.Errorf("Unable to recover index from block key: %v\n", block.K) return } msg := datastore.SyncMessage{v, Block{indexZYX, block.V}} if err := datastore.NotifySubscribers(evt, msg); err != nil { dvid.Errorf("Unable to notify subscribers of ChangeBlockEvent in %s\n", d.DataName()) return } // Check if we should commit if i%KVWriteSize == KVWriteSize-1 { if err := batch.Commit(); err != nil { dvid.Errorf("Error on trying to write batch: %v\n", err) return } batch = batcher.NewBatch(ctx) } } if err := batch.Commit(); err != nil { dvid.Errorf("Error on trying to write batch: %v\n", err) return } }() return nil }
func (d *Data) putChunk(chunk *storage.Chunk) { defer func() { // After processing a chunk, return the token. server.HandlerToken <- 1 // Notify the requestor that this chunk is done. if chunk.Wg != nil { chunk.Wg.Done() } }() op, ok := chunk.Op.(*putOperation) if !ok { log.Fatalf("Illegal operation passed to ProcessChunk() for data %s\n", d.DataName()) } // Make sure our received chunk is valid. if chunk == nil { dvid.Errorf("Received nil chunk in ProcessChunk. Ignoring chunk.\n") return } if chunk.K == nil { dvid.Errorf("Received nil chunk key in ProcessChunk. Ignoring chunk.\n") return } // Initialize the block buffer using the chunk of data. For voxels, this chunk of // data needs to be uncompressed and deserialized. var blockData []byte var err error if chunk.V == nil { blockData = d.BackgroundBlock() } else { blockData, _, err = dvid.DeserializeData(chunk.V, true) if err != nil { dvid.Errorf("Unable to deserialize block in '%s': %v\n", d.DataName(), err) return } } // Perform the operation. block := &storage.TKeyValue{K: chunk.K, V: blockData} if err = op.voxels.WriteBlock(block, d.BlockSize()); err != nil { dvid.Errorf("Unable to WriteBlock() in %q: %v\n", d.DataName(), err) return } serialization, err := dvid.SerializeData(blockData, d.Compression(), d.Checksum()) if err != nil { dvid.Errorf("Unable to serialize block in %q: %v\n", d.DataName(), err) return } store, err := storage.BigDataStore() if err != nil { dvid.Errorf("Data type imageblk had error initializing store: %v\n", err) return } ctx := datastore.NewVersionedCtx(d, op.version) if err := store.Put(ctx, chunk.K, serialization); err != nil { dvid.Errorf("Unable to PUT voxel data for key %v: %v\n", chunk.K, err) return } // Notify any subscribers that you've changed block. evt := datastore.SyncEvent{d.DataName(), ChangeBlockEvent} msg := datastore.SyncMessage{op.version, Block{&op.indexZYX, block.V}} if err := datastore.NotifySubscribers(evt, msg); err != nil { dvid.Errorf("Unable to notify subscribers of ChangeBlockEvent in %s\n", d.DataName()) } }
// PutBlocks stores blocks of data in a span along X func (d *Data) PutBlocks(v dvid.VersionID, start dvid.ChunkPoint3d, span int, data io.ReadCloser) error { store, err := storage.BigDataStore() if err != nil { return fmt.Errorf("Data type imageblk had error initializing store: %v\n", err) } batcher, ok := store.(storage.KeyValueBatcher) if !ok { return fmt.Errorf("Data type imageblk requires batch-enabled store, which %q is not\n", store) } ctx := datastore.NewVersionedCtx(d, v) batch := batcher.NewBatch(ctx) // Read blocks from the stream until we can output a batch put. const BatchSize = 1000 var readBlocks int numBlockBytes := d.BlockSize().Prod() chunkPt := start buf := make([]byte, numBlockBytes) for { // Read a block's worth of data readBytes := int64(0) for { n, err := data.Read(buf[readBytes:]) readBytes += int64(n) if readBytes == numBlockBytes { break } if err == io.EOF { return fmt.Errorf("Block data ceased before all block data read") } if err != nil { return fmt.Errorf("Error reading blocks: %v\n", err) } } if readBytes != numBlockBytes { return fmt.Errorf("Expected %d bytes in block read, got %d instead! Aborting.", numBlockBytes, readBytes) } serialization, err := dvid.SerializeData(buf, d.Compression(), d.Checksum()) if err != nil { return err } zyx := dvid.IndexZYX(chunkPt) batch.Put(NewTKey(&zyx), serialization) // Notify any subscribers that you've changed block. evt := datastore.SyncEvent{d.DataName(), ChangeBlockEvent} msg := datastore.SyncMessage{v, Block{&zyx, buf}} if err := datastore.NotifySubscribers(evt, msg); err != nil { return err } // Advance to next block chunkPt[0]++ readBlocks++ finish := (readBlocks == span) if finish || readBlocks%BatchSize == 0 { if err := batch.Commit(); err != nil { return fmt.Errorf("Error on batch commit, block %d: %v\n", readBlocks, err) } batch = batcher.NewBatch(ctx) } if finish { break } } return nil }
func (d *Data) createCompositeChunk(chunk *storage.Chunk) { defer func() { // After processing a chunk, return the token. server.HandlerToken <- 1 // Notify the requestor that this chunk is done. if chunk.Wg != nil { chunk.Wg.Done() } }() op := chunk.Op.(*compositeOp) // Get the spatial index associated with this chunk. zyx, err := imageblk.DecodeTKey(chunk.K) if err != nil { dvid.Errorf("Error in %s.ChunkApplyMap(): %v", d.Data.DataName(), err) return } // Initialize the label buffers. For voxels, this data needs to be uncompressed and deserialized. curZMutex.Lock() if zyx[2] > curZ { curZ = zyx[2] minZ := zyx.MinPoint(d.BlockSize()).Value(2) maxZ := zyx.MaxPoint(d.BlockSize()).Value(2) dvid.Debugf("Now creating composite blocks for Z %d to %d\n", minZ, maxZ) } curZMutex.Unlock() labelData, _, err := dvid.DeserializeData(chunk.V, true) if err != nil { dvid.Infof("Unable to deserialize block in '%s': %v\n", d.DataName(), err) return } blockBytes := len(labelData) if blockBytes%8 != 0 { dvid.Infof("Retrieved, deserialized block is wrong size: %d bytes\n", blockBytes) return } // Get the corresponding grayscale block. store, err := op.grayscale.GetOrderedKeyValueDB() if err != nil { dvid.Errorf("Unable to retrieve store for %q: %v\n", op.grayscale.DataName(), err) return } grayscaleCtx := datastore.NewVersionedCtx(op.grayscale, op.versionID) blockData, err := store.Get(grayscaleCtx, chunk.K) if err != nil { dvid.Errorf("Error getting grayscale block for index %s\n", zyx) return } grayscaleData, _, err := dvid.DeserializeData(blockData, true) if err != nil { dvid.Errorf("Unable to deserialize block in '%s': %v\n", op.grayscale.DataName(), err) return } // Compute the composite block. // TODO -- Exploit run lengths, use cache of hash? compositeBytes := blockBytes / 2 compositeData := make([]byte, compositeBytes, compositeBytes) compositeI := 0 labelI := 0 hashBuf := make([]byte, 4, 4) for _, grayscale := range grayscaleData { //murmurhash3(labelData[labelI:labelI+8], hashBuf) //hashBuf[3] = grayscale writePseudoColor(grayscale, labelData[labelI:labelI+8], hashBuf) copy(compositeData[compositeI:compositeI+4], hashBuf) compositeI += 4 labelI += 8 } // Store the composite block into the rgba8 data. serialization, err := dvid.SerializeData(compositeData, d.Compression(), d.Checksum()) if err != nil { dvid.Errorf("Unable to serialize composite block %s: %v\n", zyx, err) return } compositeCtx := datastore.NewVersionedCtx(op.composite, op.versionID) err = store.Put(compositeCtx, chunk.K, serialization) if err != nil { dvid.Errorf("Unable to PUT composite block %s: %v\n", zyx, err) return } }
// PutBlocks stores blocks of data in a span along X func (d *Data) PutBlocks(v dvid.VersionID, mutID uint64, start dvid.ChunkPoint3d, span int, data io.ReadCloser, mutate bool) error { batcher, err := d.GetKeyValueBatcher() if err != nil { return err } ctx := datastore.NewVersionedCtx(d, v) batch := batcher.NewBatch(ctx) // Read blocks from the stream until we can output a batch put. const BatchSize = 1000 var readBlocks int numBlockBytes := d.BlockSize().Prod() chunkPt := start buf := make([]byte, numBlockBytes) for { // Read a block's worth of data readBytes := int64(0) for { n, err := data.Read(buf[readBytes:]) readBytes += int64(n) if readBytes == numBlockBytes { break } if err == io.EOF { return fmt.Errorf("Block data ceased before all block data read") } if err != nil { return fmt.Errorf("Error reading blocks: %v\n", err) } } if readBytes != numBlockBytes { return fmt.Errorf("Expected %d bytes in block read, got %d instead! Aborting.", numBlockBytes, readBytes) } serialization, err := dvid.SerializeData(buf, d.Compression(), d.Checksum()) if err != nil { return err } zyx := dvid.IndexZYX(chunkPt) tk := NewTKey(&zyx) // If we are mutating, get the previous block of data. var oldBlock []byte if mutate { oldBlock, err = d.loadOldBlock(v, tk) if err != nil { return fmt.Errorf("Unable to load previous block in %q, key %v: %v\n", d.DataName(), tk, err) } } // Write the new block batch.Put(tk, serialization) // Notify any subscribers that you've changed block. var event string var delta interface{} if mutate { event = MutateBlockEvent delta = MutatedBlock{&zyx, oldBlock, buf, mutID} } else { event = IngestBlockEvent delta = Block{&zyx, buf, mutID} } evt := datastore.SyncEvent{d.DataUUID(), event} msg := datastore.SyncMessage{event, v, delta} if err := datastore.NotifySubscribers(evt, msg); err != nil { return err } // Advance to next block chunkPt[0]++ readBlocks++ finish := (readBlocks == span) if finish || readBlocks%BatchSize == 0 { if err := batch.Commit(); err != nil { return fmt.Errorf("Error on batch commit, block %d: %v\n", readBlocks, err) } batch = batcher.NewBatch(ctx) } if finish { break } } return nil }
// handleProperty retrieves or deletes properties that can be added to a vertex or edge -- data posted // or retrieved uses default compression func (d *Data) handleProperty(ctx *datastore.VersionedCtx, db storage.GraphDB, w http.ResponseWriter, r *http.Request, path []string, method string) error { edgemode := false var propertyname string if len(path) == 3 { edgemode = true propertyname = path[2] } else if len(path) != 2 { return fmt.Errorf("Incorrect number of parameters specified for handling properties") } else { propertyname = path[1] } temp, err := strconv.Atoi(path[0]) if err != nil { return fmt.Errorf("Vertex number not provided") } id1 := dvid.VertexID(temp) id2 := dvid.VertexID(0) if edgemode { temp, err := strconv.Atoi(path[1]) if err != nil { return fmt.Errorf("Vertex number not provided") } id2 = dvid.VertexID(temp) } // remove a property from a vertex or edge if method == "delete" { if edgemode { db.RemoveEdgeProperty(ctx, id1, id2, propertyname) if err != nil { return fmt.Errorf("Failed to remove edge property %d-%d %s: %v\n", id1, id2, propertyname, err) } } else { db.RemoveVertexProperty(ctx, id1, propertyname) if err != nil { return fmt.Errorf("Failed to remove vertex property %d %s: %v\n", id1, propertyname, err) } } } else if method == "get" { var data []byte if edgemode { data, err = db.GetEdgeProperty(ctx, id1, id2, propertyname) } else { data, err = db.GetVertexProperty(ctx, id1, propertyname) } if err != nil { return fmt.Errorf("Failed to get property %s: %v\n", propertyname, err) } uncompress := true value, _, e := dvid.DeserializeData(data, uncompress) if e != nil { err = fmt.Errorf("Unable to deserialize data for property '%s': %v\n", propertyname, e.Error()) return err } w.Header().Set("Content-Type", "application/octet-stream") _, err = w.Write(value) if err != nil { return err } } else if method == "post" { // read as binary and load into propertyname data, err := ioutil.ReadAll(r.Body) if err != nil { return err } serialization, err := dvid.SerializeData(data, d.Compression(), d.Checksum()) if err != nil { return fmt.Errorf("Unable to serialize data: %v\n", err) } if edgemode { err = db.SetEdgeProperty(ctx, id1, id2, propertyname, serialization) } else { err = db.SetVertexProperty(ctx, id1, propertyname, serialization) } if err != nil { return fmt.Errorf("Failed to add property %s: %v\n", propertyname, err) } } return err }
func (d *Data) putChunk(chunk *storage.Chunk, putbuffer storage.RequestBuffer) { defer func() { // After processing a chunk, return the token. server.HandlerToken <- 1 // Notify the requestor that this chunk is done. if chunk.Wg != nil { chunk.Wg.Done() } }() op, ok := chunk.Op.(*putOperation) if !ok { log.Fatalf("Illegal operation passed to ProcessChunk() for data %s\n", d.DataName()) } // Make sure our received chunk is valid. if chunk == nil { dvid.Errorf("Received nil chunk in ProcessChunk. Ignoring chunk.\n") return } if chunk.K == nil { dvid.Errorf("Received nil chunk key in ProcessChunk. Ignoring chunk.\n") return } // Initialize the block buffer using the chunk of data. For voxels, this chunk of // data needs to be uncompressed and deserialized. var blockData []byte var err error if chunk.V == nil { blockData = d.BackgroundBlock() } else { blockData, _, err = dvid.DeserializeData(chunk.V, true) if err != nil { dvid.Errorf("Unable to deserialize block in %q: %v\n", d.DataName(), err) return } } // If we are mutating, get the previous block of data. var oldBlock []byte if op.mutate { oldBlock, err = d.loadOldBlock(op.version, chunk.K) if err != nil { dvid.Errorf("Unable to load previous block in %q, key %v: %v\n", d.DataName(), chunk.K, err) return } } // Perform the operation. block := &storage.TKeyValue{K: chunk.K, V: blockData} if err = op.voxels.WriteBlock(block, d.BlockSize()); err != nil { dvid.Errorf("Unable to WriteBlock() in %q: %v\n", d.DataName(), err) return } serialization, err := dvid.SerializeData(blockData, d.Compression(), d.Checksum()) if err != nil { dvid.Errorf("Unable to serialize block in %q: %v\n", d.DataName(), err) return } store, err := d.GetOrderedKeyValueDB() if err != nil { dvid.Errorf("Data type imageblk had error initializing store: %v\n", err) return } ready := make(chan error, 1) callback := func() { // Notify any subscribers that you've changed block. resperr := <-ready if resperr != nil { dvid.Errorf("Unable to PUT voxel data for key %v: %v\n", chunk.K, resperr) return } var event string var delta interface{} if op.mutate { event = MutateBlockEvent delta = MutatedBlock{&op.indexZYX, oldBlock, block.V, op.mutID} } else { event = IngestBlockEvent delta = Block{&op.indexZYX, block.V, op.mutID} } evt := datastore.SyncEvent{d.DataUUID(), event} msg := datastore.SyncMessage{event, op.version, delta} if err := datastore.NotifySubscribers(evt, msg); err != nil { dvid.Errorf("Unable to notify subscribers of event %s in %s\n", event, d.DataName()) } } // put data -- use buffer if available ctx := datastore.NewVersionedCtx(d, op.version) if putbuffer != nil { go callback() putbuffer.PutCallback(ctx, chunk.K, serialization, ready) } else { if err := store.Put(ctx, chunk.K, serialization); err != nil { dvid.Errorf("Unable to PUT voxel data for key %v: %v\n", chunk.K, err) return } ready <- nil callback() } }