func (s *DynamoStore) makeNamespacedKey(r ref.Ref) []byte { // This is semantically `return append(s.namespace, r.DigestSlice()...)`, but it seemed like we'd be doing this a LOT, and we know how much space we're going to need anyway. So, pre-allocate a slice and then copy into it. refSlice := r.DigestSlice() key := make([]byte, s.namespaceLen+len(refSlice)) copy(key, s.namespace) copy(key[s.namespaceLen:], refSlice) return key }
func (l *internalLevelDBStore) updateRootByKey(key []byte, current, last ref.Ref) bool { l.mu.Lock() defer l.mu.Unlock() if last != l.rootByKey(key) { return false } // Sync: true write option should fsync memtable data to disk err := l.db.Put(key, []byte(current.String()), &opt.WriteOptions{Sync: true}) d.Chk.NoError(err) return true }
func (h *HTTPStore) requestRef(r ref.Ref, method string, body io.Reader) *http.Response { url := *h.host url.Path = httprouter.CleanPath(h.host.Path + constants.RefPath) if !r.IsEmpty() { url.Path = path.Join(url.Path, r.String()) } header := http.Header{} if body != nil { header.Set("Content-Type", "application/octet-stream") } req := h.newRequest(method, url.String(), body, header) res, err := h.httpClient.Do(req) d.Chk.NoError(err) return res }
func main() { cpuCount := runtime.NumCPU() runtime.GOMAXPROCS(cpuCount) flag.Parse() sourceStore, ok := sourceStoreFlags.CreateDataStore() sink := sinkDsFlags.CreateDataset() if !ok || sink == nil || *p == 0 || *sourceObject == "" { flag.Usage() return } defer sourceStore.Close() defer sink.Store().Close() err := d.Try(func() { if util.MaybeStartCPUProfile() { defer util.StopCPUProfile() } sourceRef := ref.Ref{} if r, ok := ref.MaybeParse(*sourceObject); ok { if sourceStore.Has(r) { sourceRef = r } } else { if c, ok := sourceStore.MaybeHead(*sourceObject); ok { sourceRef = c.Ref() } } d.Exp.False(sourceRef.IsEmpty(), "Unknown source object: %s", *sourceObject) var err error *sink, err = sink.Pull(sourceStore, sourceRef, int(*p)) util.MaybeWriteMemProfile() d.Exp.NoError(err) }) if err != nil { log.Fatal(err) } }
// CopyReachableChunksP copies to |sink| all chunks reachable from (and including) |r|, but that are not in the subtree rooted at |exclude| func CopyReachableChunksP(source, sink DataStore, sourceRef, exclude ref.Ref, concurrency int) { excludeRefs := map[ref.Ref]bool{} if !exclude.IsEmpty() { mu := sync.Mutex{} excludeCallback := func(r ref.Ref) bool { mu.Lock() excludeRefs[r] = true mu.Unlock() return false } walk.SomeChunksP(exclude, source, excludeCallback, concurrency) } copyCallback := func(r ref.Ref) bool { return excludeRefs[r] } copyWorker(source, sink.transitionalChunkSink(), sourceRef, copyCallback, concurrency) }
func (s *DynamoStore) UpdateRoot(current, last ref.Ref) bool { s.requestWg.Wait() putArgs := dynamodb.PutItemInput{ TableName: aws.String(s.table), Item: map[string]*dynamodb.AttributeValue{ refAttr: {B: s.rootKey}, chunkAttr: {B: current.DigestSlice()}, compAttr: {S: aws.String(noneValue)}, }, } if last.IsEmpty() { putArgs.ConditionExpression = aws.String(valueNotExistsExpression) } else { putArgs.ConditionExpression = aws.String(valueEqualsExpression) putArgs.ExpressionAttributeValues = map[string]*dynamodb.AttributeValue{ ":prev": {B: last.DigestSlice()}, } } _, err := s.ddbsvc.PutItem(&putArgs) if err != nil { if awsErr, ok := err.(awserr.Error); ok { if awsErr.Code() == "ConditionalCheckFailedException" { return false } d.Chk.NoError(awsErr) } else { d.Chk.NoError(err) } } return true }
func (h *HTTPStore) requestRoot(method string, current, last ref.Ref) *http.Response { u := *h.host u.Path = httprouter.CleanPath(h.host.Path + constants.RootPath) if method == "POST" { d.Exp.False(current.IsEmpty()) params := url.Values{} params.Add("last", last.String()) params.Add("current", current.String()) u.RawQuery = params.Encode() } req := h.newRequest(method, u.String(), nil, nil) res, err := h.httpClient.Do(req) d.Chk.NoError(err) return res }
func assertInputInStore(input string, ref ref.Ref, s ChunkStore, assert *assert.Assertions) { chunk := s.Get(ref) assert.False(chunk.IsEmpty(), "Shouldn't get empty chunk for %s", ref.String()) assert.Equal(input, string(chunk.Data())) }
func assertInputNotInStore(input string, ref ref.Ref, s ChunkStore, assert *assert.Assertions) { data := s.Get(ref) assert.Nil(data, "Shouldn't have gotten data for %s", ref.String()) }
func (w *jsonArrayWriter) writeRef(r ref.Ref) { w.write(r.String()) }
func (l *LevelDBStore) toChunkKey(r ref.Ref) []byte { digest := r.DigestSlice() out := make([]byte, len(l.chunkPrefix), len(l.chunkPrefix)+len(digest)) copy(out, l.chunkPrefix) return append(out, digest...) }
// ToTag replaces "-" characters in s with "_", so it can be used in a Go identifier. // TODO: replace other illegal chars as well? func ToTag(r ref.Ref) string { return strings.Replace(r.String()[0:12], "-", "_", -1) }
// RefToJSIdentfierName generates an identifier name representing a Ref. ie. `sha1_abc1234`. func (gen *Generator) RefToJSIdentfierName(r ref.Ref) string { return strings.Replace(r.String(), "-", "_", 1)[0:12] }
func EnsureRef(r *ref.Ref, v Value) ref.Ref { if r.IsEmpty() { *r = getRef(v) } return *r }