func (s *Disk) getObjectIDPath(id *types.ObjectID) string { // !TODO redo this with more []byte appending(we know how big it will be) // less string concatination h := id.StrHash() // Disk objects are writen 2 levels deep with maximum of 256 folders in each if s.skipCacheKeyInPath { return filepath.Join(s.path, h[0:2], h[2:4], h) } return filepath.Join(s.path, id.CacheKey(), h[0:2], h[2:4], h) }
// Tests the storage headers map in multithreading usage. An error will be // triggered by a race condition. This may or may not happen. So no failures // in the test do not mean there are no problems. But it may catch an error // from time to time. In fact it does quite often for me. // // Most of the time the test fails with a panic. And most of the time // the panic is in the runtime. So isntead of a error message via t.Error // the test fails with a panic. func TestStorageHeadersFunctionWithManyGoroutines(t *testing.T) { up, cz, ca, goroutines := setup() pathFunc := func(i int) string { return fmt.Sprintf("/%d", i) } headerKeyFunc := func(i int) string { return fmt.Sprintf("X-Header-%d", i) } headerValueFunc := func(i int) string { return fmt.Sprintf("value-%d", i) } // setup the response for i := 0; i < 100; i++ { var headers = make(http.Header) headers.Add(headerKeyFunc(i), headerValueFunc(i)) up.addFakeResponse(pathFunc(i), fakeResponse{ Status: "200", ResponseTime: 10 * time.Nanosecond, Headers: headers, }, ) } storage := New(cz, ca, newStdLogger()) defer storage.Close() defer os.RemoveAll(storage.path) ctx := contexts.NewVhostContext(context.Background(), &types.VirtualHost{Upstream: up}) concurrentTestHelper(t, goroutines, 100, func(t *testing.T, i, j int) { oid := types.ObjectID{} oid.CacheKey = "1" oid.Path = pathFunc(i) header, err := storage.Headers(ctx, oid) if err != nil { t.Errorf("Got error from storage.Headers on %d, %d: %s", j, i, err) } value := header.Get(headerKeyFunc(i)) if value != headerValueFunc(i) { t.Errorf("Expected header [%s] to have value [%s] but it had value %s for %d, %d", headerKeyFunc(i), headerValueFunc(i), value, j, i) } }) }
func TestDiskCloseReturnesAfterFinishingRequests(t *testing.T) { expected := "awesome" fakeup, cz, ca, _ := setup() runtime.GOMAXPROCS(runtime.NumCPU()) up := newBlockingUpstream(fakeup) up.addFakeResponse("/path", fakeResponse{ Status: "200", ResponseTime: 0, Response: expected, }) storage := New(cz, ca, newStdLogger()) defer os.RemoveAll(storage.path) ctx := contexts.NewVhostContext(context.Background(), &types.VirtualHost{Upstream: up}) wCh := make(chan struct{}) go func(ch chan struct{}) { defer close(ch) oid := types.ObjectID{} oid.CacheKey = "1" oid.Path = "/path" resp, err := storage.GetFullFile(ctx, oid) if err != nil { t.Errorf("Got Error on a GetFullFile on closing storage:\n%s", err) } b, err := ioutil.ReadAll(resp) if err != nil { t.Errorf("Got Error on a ReadAll on an already closed storage:\n%s", err) } if string(b) != expected { t.Errorf("Expected read from GetFullFile was %s but got %s", expected, string(b)) } }(wCh) go close(<-up.requestPartial) err := storage.Close() if err != nil { t.Errorf("Storage.Close() shouldn't have returned error\n%s", err) } <-wCh }
func TestStorageSimultaneousGets(t *testing.T) { fakeup, cz, ca, goroutines := setup() runtime.GOMAXPROCS(runtime.NumCPU()) up := &countingUpstream{ fakeUpstream: fakeup, } up.addFakeResponse("/path", fakeResponse{ Status: "200", ResponseTime: 10 * time.Millisecond, Response: "awesome", }) storage := New(cz, ca, newStdLogger()) defer storage.Close() defer os.RemoveAll(storage.path) ctx := contexts.NewVhostContext(context.Background(), &types.VirtualHost{Upstream: up}) concurrentTestHelper(t, goroutines, 1, func(t *testing.T, i, j int) { oid := types.ObjectID{} oid.CacheKey = "1" oid.Path = "/path" file, err := storage.GetFullFile(ctx, oid) if err != nil { t.Errorf("Got error from storage.Get on %d, %d: %s", j, i, err) } b, err := ioutil.ReadAll(file) if err != nil { t.Errorf("Got error while reading response on %d, %d: %s", j, i, err) } if string(b) != "awesome" { t.Errorf("The response was expected to be 'awesome' but it was %s", string(b)) } }) if up.called != 1 { t.Errorf("Expected upstream.GetRequest to be called once it got called %d", up.called) } }
func TestDiskCloseReturnesAfterFinishingHeaders(t *testing.T) { var expected = make(http.Header) var expectedHeader, expectedValue = "awesome-header", "awesome-value" expected.Add(expectedHeader, expectedValue) fakeup, cz, ca, _ := setup() runtime.GOMAXPROCS(runtime.NumCPU()) up := newBlockingUpstream(fakeup) up.addFakeResponse("/path", fakeResponse{ Status: "200", ResponseTime: 0, Headers: expected, }) storage := New(cz, ca, newStdLogger()) defer os.RemoveAll(storage.path) ctx := contexts.NewVhostContext(context.Background(), &types.VirtualHost{Upstream: up}) wCh := make(chan struct{}) go func(ch chan struct{}) { defer close(ch) oid := types.ObjectID{} oid.CacheKey = "1" oid.Path = "/path" headers, err := storage.Headers(ctx, oid) if err != nil { t.Errorf("Got Error on a Headers on closing storage:\n%s", err) } if headers.Get(expectedHeader) != expectedValue { //!TODO: better check t.Errorf("The returned headers:\n%s\n Did not match the expected headers\n%s", headers, expected) } }(wCh) go close(<-up.requestHeader) err := storage.Close() if err != nil { t.Errorf("Storage.Close() shouldn't have returned error\n%s", err) } <-wCh }
func (s *Disk) getObjectIDPath(id *types.ObjectID) string { h := id.StrHash() // Disk objects are writen 2 levels deep with maximum of 256 folders in each return filepath.Join(s.path, id.CacheKey(), h[0:2], h[2:4], h) }