// WithTrace allocates a traced timing span in a new context. This allows a // caller to track the time between calling WithTrace and the returned done // function. When the done function is called, a log message is emitted with a // "trace.duration" field, corresponding to the elapased time and a // "trace.func" field, corresponding to the function that called WithTrace. // // The logging keys "trace.id" and "trace.parent.id" are provided to implement // dapper-like tracing. This function should be complemented with a WithSpan // method that could be used for tracing distributed RPC calls. // // The main benefit of this function is to post-process log messages or // intercept them in a hook to provide timing data. Trace ids and parent ids // can also be linked to provide call tracing, if so required. // // Here is an example of the usage: // // func timedOperation(ctx Context) { // ctx, done := WithTrace(ctx) // defer done("this will be the log message") // // ... function body ... // } // // If the function ran for roughly 1s, such a usage would emit a log message // as follows: // // INFO[0001] this will be the log message trace.duration=1.004575763s trace.func=github.com/docker/distribution/context.traceOperation trace.id=<id> ... // // Notice that the function name is automatically resolved, along with the // package and a trace id is emitted that can be linked with parent ids. func WithTrace(ctx Context) (Context, func(format string, a ...interface{})) { if ctx == nil { ctx = Background() } pc, file, line, _ := runtime.Caller(1) f := runtime.FuncForPC(pc) ctx = &traced{ Context: ctx, id: uuid.Generate().String(), start: time.Now(), parent: GetStringValue(ctx, "trace.id"), fnname: f.Name(), file: file, line: line, } return ctx, func(format string, a ...interface{}) { GetLogger(ctx, "trace.duration", "trace.id", "trace.parent.id", "trace.func", "trace.file", "trace.line"). Debugf(format, a...) } }
func TestPurgeOnlyUploads(t *testing.T) { oldUploadCount := 5 oneHourAgo := time.Now().Add(-1 * time.Hour) fs, ctx := testUploadFS(t, oldUploadCount, "test-repo", oneHourAgo) // Create a directory tree outside _uploads and ensure // these files aren't deleted. dataPath, err := pm.path(uploadDataPathSpec{name: "test-repo", id: uuid.Generate().String()}) if err != nil { t.Fatalf(err.Error()) } nonUploadPath := strings.Replace(dataPath, "_upload", "_important", -1) if strings.Index(nonUploadPath, "_upload") != -1 { t.Fatalf("Non-upload path not created correctly") } nonUploadFile := path.Join(nonUploadPath, "file") if err = fs.PutContent(ctx, nonUploadFile, []byte("")); err != nil { t.Fatalf("Unable to write data file") } deleted, errs := PurgeUploads(ctx, fs, time.Now(), true) if len(errs) != 0 { t.Error("Unexpected errors", errs) } for _, file := range deleted { if strings.Index(file, "_upload") == -1 { t.Errorf("Non-upload file deleted") } } }
// createEvent returns a new event, timestamped, with the specified action. func createEvent(action string) *Event { return &Event{ ID: uuid.Generate().String(), Timestamp: time.Now(), Action: action, } }
// Writer begins a blob write session, returning a handle. func (lbs *linkedBlobStore) Create(ctx context.Context) (distribution.BlobWriter, error) { context.GetLogger(ctx).Debug("(*linkedBlobStore).Writer") uuid := uuid.Generate().String() startedAt := time.Now().UTC() path, err := lbs.blobStore.pm.path(uploadDataPathSpec{ name: lbs.repository.Name(), id: uuid, }) if err != nil { return nil, err } startedAtPath, err := lbs.blobStore.pm.path(uploadStartedAtPathSpec{ name: lbs.repository.Name(), id: uuid, }) if err != nil { return nil, err } // Write a startedat file for this upload if err := lbs.blobStore.driver.PutContent(ctx, startedAtPath, []byte(startedAt.Format(time.RFC3339))); err != nil { return nil, err } return lbs.newBlobUpload(ctx, uuid, path, startedAt) }
func testUploadFS(t *testing.T, numUploads int, repoName string, startedAt time.Time) (driver.StorageDriver, context.Context) { d := inmemory.New() ctx := context.Background() for i := 0; i < numUploads; i++ { addUploads(ctx, t, d, uuid.Generate().String(), repoName, startedAt) } return d, ctx }
// WithRequest places the request on the context. The context of the request // is assigned a unique id, available at "http.request.id". The request itself // is available at "http.request". Other common attributes are available under // the prefix "http.request.". If a request is already present on the context, // this method will panic. func WithRequest(ctx Context, r *http.Request) Context { if ctx.Value("http.request") != nil { // NOTE(stevvooe): This needs to be considered a programming error. It // is unlikely that we'd want to have more than one request in // context. panic("only one request per context") } return &httpRequestContext{ Context: ctx, startedAt: time.Now(), id: uuid.Generate().String(), r: r, } }
func TestPurgeAll(t *testing.T) { uploadCount := 10 oneHourAgo := time.Now().Add(-1 * time.Hour) fs, ctx := testUploadFS(t, uploadCount, "test-repo", oneHourAgo) // Ensure > 1 repos are purged addUploads(ctx, t, fs, uuid.Generate().String(), "test-repo2", oneHourAgo) uploadCount++ deleted, errs := PurgeUploads(ctx, fs, time.Now(), true) if len(errs) != 0 { t.Error("Unexpected errors:", errs) } fileCount := uploadCount if len(deleted) != fileCount { t.Errorf("Unexpectedly deleted file count %d != %d", len(deleted), fileCount) } }
func TestPurgeSome(t *testing.T) { oldUploadCount := 5 oneHourAgo := time.Now().Add(-1 * time.Hour) fs, ctx := testUploadFS(t, oldUploadCount, "library/test-repo", oneHourAgo) newUploadCount := 4 for i := 0; i < newUploadCount; i++ { addUploads(ctx, t, fs, uuid.Generate().String(), "test-repo", time.Now().Add(1*time.Hour)) } deleted, errs := PurgeUploads(ctx, fs, time.Now(), true) if len(errs) != 0 { t.Error("Unexpected errors:", errs) } if len(deleted) != oldUploadCount { t.Errorf("Unexpectedly deleted file count %d != %d", len(deleted), oldUploadCount) } }
"github.com/emerald-ci/test-runner/Godeps/_workspace/src/github.com/docker/libtrust" "github.com/emerald-ci/test-runner/Godeps/_workspace/src/github.com/docker/distribution/manifest" "github.com/emerald-ci/test-runner/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2" "github.com/emerald-ci/test-runner/Godeps/_workspace/src/github.com/docker/distribution/uuid" ) var ( // common environment for expected manifest events. repo = "test/repo" source = SourceRecord{ Addr: "remote.test", InstanceID: uuid.Generate().String(), } ub = mustUB(v2.NewURLBuilderFromString("http://test.example.com/")) actor = ActorRecord{ Name: "test", } request = RequestRecord{} m = manifest.Manifest{ Name: repo, Tag: "latest", } sm *manifest.SignedManifest payload []byte dgst digest.Digest
// Generate a blob identifier func (d *driver) generateOid() string { return objectBlobPrefix + uuid.Generate().String() }
type instanceContext struct { Context id string // id of context, logged as "instance.id" } func (ic *instanceContext) Value(key interface{}) interface{} { if key == "instance.id" { return ic.id } return ic.Context.Value(key) } var background = &instanceContext{ Context: context.Background(), id: uuid.Generate().String(), } // Background returns a non-nil, empty Context. The background context // provides a single key, "instance.id" that is globally unique to the // process. func Background() Context { return background } // WithValue returns a copy of parent in which the value associated with key is // val. Use context Values only for request-scoped data that transits processes // and APIs, not for passing optional parameters to functions. func WithValue(parent Context, key, val interface{}) Context { return context.WithValue(parent, key, val) }
func TestBlobUploadMonolithic(t *testing.T) { dgst, b1 := newRandomBlob(1024) var m testutil.RequestResponseMap repo := "test.example.com/uploadrepo" uploadID := uuid.Generate().String() m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "POST", Route: "/v2/" + repo + "/blobs/uploads/", }, Response: testutil.Response{ StatusCode: http.StatusAccepted, Headers: http.Header(map[string][]string{ "Content-Length": {"0"}, "Location": {"/v2/" + repo + "/blobs/uploads/" + uploadID}, "Docker-Upload-UUID": {uploadID}, "Range": {"0-0"}, }), }, }) m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "PATCH", Route: "/v2/" + repo + "/blobs/uploads/" + uploadID, Body: b1, }, Response: testutil.Response{ StatusCode: http.StatusAccepted, Headers: http.Header(map[string][]string{ "Location": {"/v2/" + repo + "/blobs/uploads/" + uploadID}, "Docker-Upload-UUID": {uploadID}, "Content-Length": {"0"}, "Docker-Content-Digest": {dgst.String()}, "Range": {fmt.Sprintf("0-%d", len(b1)-1)}, }), }, }) m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "PUT", Route: "/v2/" + repo + "/blobs/uploads/" + uploadID, QueryParams: map[string][]string{ "digest": {dgst.String()}, }, }, Response: testutil.Response{ StatusCode: http.StatusCreated, Headers: http.Header(map[string][]string{ "Content-Length": {"0"}, "Docker-Content-Digest": {dgst.String()}, "Content-Range": {fmt.Sprintf("0-%d", len(b1)-1)}, }), }, }) m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "HEAD", Route: "/v2/" + repo + "/blobs/" + dgst.String(), }, Response: testutil.Response{ StatusCode: http.StatusOK, Headers: http.Header(map[string][]string{ "Content-Length": {fmt.Sprint(len(b1))}, "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, }), }, }) e, c := testServer(m) defer c() ctx := context.Background() r, err := NewRepository(ctx, repo, e, nil) if err != nil { t.Fatal(err) } l := r.Blobs(ctx) upload, err := l.Create(ctx) if err != nil { t.Fatal(err) } if upload.ID() != uploadID { log.Fatalf("Unexpected UUID %s; expected %s", upload.ID(), uploadID) } n, err := upload.ReadFrom(bytes.NewReader(b1)) if err != nil { t.Fatal(err) } if n != int64(len(b1)) { t.Fatalf("Unexpected ReadFrom length: %d; expected: %d", n, len(b1)) } blob, err := upload.Commit(ctx, distribution.Descriptor{ Digest: dgst, Size: int64(len(b1)), }) if err != nil { t.Fatal(err) } if blob.Size != int64(len(b1)) { t.Fatalf("Unexpected blob size: %d; expected: %d", blob.Size, len(b1)) } }
func TestBlobUploadChunked(t *testing.T) { dgst, b1 := newRandomBlob(1024) var m testutil.RequestResponseMap chunks := [][]byte{ b1[0:256], b1[256:512], b1[512:513], b1[513:1024], } repo := "test.example.com/uploadrepo" uuids := []string{uuid.Generate().String()} m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "POST", Route: "/v2/" + repo + "/blobs/uploads/", }, Response: testutil.Response{ StatusCode: http.StatusAccepted, Headers: http.Header(map[string][]string{ "Content-Length": {"0"}, "Location": {"/v2/" + repo + "/blobs/uploads/" + uuids[0]}, "Docker-Upload-UUID": {uuids[0]}, "Range": {"0-0"}, }), }, }) offset := 0 for i, chunk := range chunks { uuids = append(uuids, uuid.Generate().String()) newOffset := offset + len(chunk) m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "PATCH", Route: "/v2/" + repo + "/blobs/uploads/" + uuids[i], Body: chunk, }, Response: testutil.Response{ StatusCode: http.StatusAccepted, Headers: http.Header(map[string][]string{ "Content-Length": {"0"}, "Location": {"/v2/" + repo + "/blobs/uploads/" + uuids[i+1]}, "Docker-Upload-UUID": {uuids[i+1]}, "Range": {fmt.Sprintf("%d-%d", offset, newOffset-1)}, }), }, }) offset = newOffset } m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "PUT", Route: "/v2/" + repo + "/blobs/uploads/" + uuids[len(uuids)-1], QueryParams: map[string][]string{ "digest": {dgst.String()}, }, }, Response: testutil.Response{ StatusCode: http.StatusCreated, Headers: http.Header(map[string][]string{ "Content-Length": {"0"}, "Docker-Content-Digest": {dgst.String()}, "Content-Range": {fmt.Sprintf("0-%d", offset-1)}, }), }, }) m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "HEAD", Route: "/v2/" + repo + "/blobs/" + dgst.String(), }, Response: testutil.Response{ StatusCode: http.StatusOK, Headers: http.Header(map[string][]string{ "Content-Length": {fmt.Sprint(offset)}, "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, }), }, }) e, c := testServer(m) defer c() ctx := context.Background() r, err := NewRepository(ctx, repo, e, nil) if err != nil { t.Fatal(err) } l := r.Blobs(ctx) upload, err := l.Create(ctx) if err != nil { t.Fatal(err) } if upload.ID() != uuids[0] { log.Fatalf("Unexpected UUID %s; expected %s", upload.ID(), uuids[0]) } for _, chunk := range chunks { n, err := upload.Write(chunk) if err != nil { t.Fatal(err) } if n != len(chunk) { t.Fatalf("Unexpected length returned from write: %d; expected: %d", n, len(chunk)) } } blob, err := upload.Commit(ctx, distribution.Descriptor{ Digest: dgst, Size: int64(len(b1)), }) if err != nil { t.Fatal(err) } if blob.Size != int64(len(b1)) { t.Fatalf("Unexpected blob size: %d; expected: %d", blob.Size, len(b1)) } }