func (s *DynamoStore) makeNamespacedKey(h hash.Hash) []byte { // This is semantically `return append(s.namespace, r.DigestSlice()...)`, but it seemed like we'd be doing this a LOT, and we know how much space we're going to need anyway. So, pre-allocate a slice and then copy into it. hashSlice := h.DigestSlice() key := make([]byte, s.namespaceLen+len(hashSlice)) copy(key, s.namespace) copy(key[s.namespaceLen:], hashSlice) return key }
func (l *internalLevelDBStore) updateRootByKey(key []byte, current, last hash.Hash) bool { l.mu.Lock() defer l.mu.Unlock() if last != l.rootByKey(key) { return false } // Sync: true write option should fsync memtable data to disk err := l.db.Put(key, []byte(current.String()), &opt.WriteOptions{Sync: true}) d.Chk.NoError(err) return true }
// toDbKey takes a refHeight and a hash and returns a binary key suitable for use with LevelDB. The default sort order used by LevelDB ensures that these keys (and their associated values) will be iterated in ref-height order. func toDbKey(refHeight uint64, hash hash.Hash) []byte { digest := hash.DigestSlice() buf := bytes.NewBuffer(make([]byte, 0, uint64Size+binary.Size(digest))) err := binary.Write(buf, binary.BigEndian, refHeight) d.Chk.NoError(err) err = binary.Write(buf, binary.BigEndian, digest) d.Chk.NoError(err) return buf.Bytes() }
func (s *DynamoStore) UpdateRoot(current, last hash.Hash) bool { s.requestWg.Wait() putArgs := dynamodb.PutItemInput{ TableName: aws.String(s.table), Item: map[string]*dynamodb.AttributeValue{ refAttr: {B: s.rootKey}, chunkAttr: {B: current.DigestSlice()}, compAttr: {S: aws.String(noneValue)}, }, } if last.IsEmpty() { putArgs.ConditionExpression = aws.String(valueNotExistsExpression) } else { putArgs.ConditionExpression = aws.String(valueEqualsExpression) putArgs.ExpressionAttributeValues = map[string]*dynamodb.AttributeValue{ ":prev": {B: last.DigestSlice()}, } } _, err := s.ddbsvc.PutItem(&putArgs) if err != nil { if awsErr, ok := err.(awserr.Error); ok { if awsErr.Code() == "ConditionalCheckFailedException" { return false } d.Chk.NoError(awsErr) } else { d.Chk.NoError(err) } } return true }
func (bhcs *httpBatchStore) requestRoot(method string, current, last hash.Hash) *http.Response { u := *bhcs.host u.Path = httprouter.CleanPath(bhcs.host.Path + constants.RootPath) if method == "POST" { d.Exp.False(current.IsEmpty()) params := u.Query() params.Add("last", last.String()) params.Add("current", current.String()) u.RawQuery = params.Encode() } req := newRequest(method, bhcs.auth, u.String(), nil, nil) res, err := bhcs.httpClient.Do(req) d.Chk.NoError(err) return res }
func EnsureHash(h *hash.Hash, v Value) hash.Hash { if h.IsEmpty() { *h = getHash(v) } return *h }
func (l *LevelDBStore) toChunkKey(r hash.Hash) []byte { digest := r.DigestSlice() out := make([]byte, len(l.chunkPrefix), len(l.chunkPrefix)+len(digest)) copy(out, l.chunkPrefix) return append(out, digest...) }
func (rv *rollingValueHasher) writeHash(h hash.Hash) { digest := h.Digest() for _, b := range digest[:] { rv.HashByte(b) } }
func (w *nomsTestWriter) writeHash(h hash.Hash) { w.writeString(h.String()) }
func assertInputNotInStore(input string, h hash.Hash, s ChunkStore, assert *assert.Assertions) { data := s.Get(h) assert.Nil(data, "Shouldn't have gotten data for %s", h.String()) }
func assertInputInStore(input string, h hash.Hash, s ChunkStore, assert *assert.Assertions) { chunk := s.Get(h) assert.False(chunk.IsEmpty(), "Shouldn't get empty chunk for %s", h.String()) assert.Equal(input, string(chunk.Data())) }
func (b *binaryNomsWriter) writeHash(h hash.Hash) { b.ensureCapacity(sha1.Size) digest := h.Digest() copy(b.buff[b.offset:], digest[:]) b.offset += sha1.Size }
func CreateHashSpecString(protocol, path string, h hash.Hash) string { return fmt.Sprintf("%s:%s::#%s", protocol, path, h.String()) }
func serializeHash(w io.Writer, h hash.Hash) { digest := h.Digest() n, err := io.Copy(w, bytes.NewReader(digest[:])) d.Chk.NoError(err) d.Chk.True(int64(hash.ByteLen) == n) }
func (b *binaryNomsWriter) writeHash(h hash.Hash) { b.ensureCapacity(hash.ByteLen) digest := h.Digest() copy(b.buff[b.offset:], digest[:]) b.offset += hash.ByteLen }