Example #1
0
func TestRestoreFromDisk(t *testing.T) {
	expected := "awesome"
	up, cz, _, _ := setup()
	ca := NewFakeCacheAlgorithm()
	runtime.GOMAXPROCS(runtime.NumCPU())
	up.addFakeResponse("/path",
		fakeResponse{
			Status:       "200",
			ResponseTime: 0,
			Response:     expected,
		})

	storage := New(cz, ca, newStdLogger())
	defer os.RemoveAll(storage.path)

	ctx := contexts.NewVhostContext(context.Background(), &types.VirtualHost{Upstream: up})
	oid := types.ObjectID{
		CacheKey: "1",
		Path:     "/path",
	}
	index := types.ObjectIndex{
		ObjID: oid,
		Part:  0,
	}
	ca.AddFakeReplies(index, FakeReplies{
		Lookup:     LookupFalse,
		ShouldKeep: ShouldKeepTrue,
		AddObject:  AddObjectNil,
	})

	makeAndCheckGetFullFile(t, ctx, storage, oid, expected)

	if err := storage.Close(); err != nil {
		t.Errorf("Storage.Close() shouldn't have returned error\n%s", err)
	}

	ca.AddFakeReplies(index, FakeReplies{
		Lookup:     LookupTrue, // change to True now that we supposevely will read it from disk
		ShouldKeep: ShouldKeepTrue,
		AddObject:  AddObjectNil,
	})
	coup := &countingUpstream{
		fakeUpstream: up,
	}

	storage = New(cz, ca, newStdLogger())
	ctx = contexts.NewVhostContext(context.Background(), &types.VirtualHost{Upstream: coup}) // upstream is nil so that if used a panic will be produced
	makeAndCheckGetFullFile(t, ctx, storage, oid, expected)
	if err := storage.Close(); err != nil {
		t.Errorf("Storage.Close() shouldn't have returned error\n%s", err)
	}
	if coup.called != 0 {
		t.Errorf("Storage should've not called even once upstream but it did %d times", coup.called)
	}
}
Example #2
0
func (app *Application) ServeHTTP(writer http.ResponseWriter, req *http.Request) {

	vh := app.findVirtualHost(req)

	if vh == nil {
		http.NotFound(writer, req)
		return
	}

	vh.Handler.RequestHandle(contexts.NewVhostContext(app.ctx, vh), writer, req, vh)
}
Example #3
0
// Tests the storage headers map in multithreading usage. An error will be
// triggered by a race condition. This may or may not happen. So no failures
// in the test do not mean there are no problems. But it may catch an error
// from time to time. In fact it does quite often for me.
//
// Most of the time the test fails with a panic. And most of the time
// the panic is in the runtime. So isntead of a error message via t.Error
// the test fails with a panic.
func TestStorageHeadersFunctionWithManyGoroutines(t *testing.T) {
	up, cz, ca, goroutines := setup()

	pathFunc := func(i int) string {
		return fmt.Sprintf("/%d", i)
	}

	headerKeyFunc := func(i int) string {
		return fmt.Sprintf("X-Header-%d", i)
	}

	headerValueFunc := func(i int) string {
		return fmt.Sprintf("value-%d", i)
	}

	// setup the response
	for i := 0; i < 100; i++ {
		var headers = make(http.Header)
		headers.Add(headerKeyFunc(i), headerValueFunc(i))
		up.addFakeResponse(pathFunc(i),
			fakeResponse{
				Status:       "200",
				ResponseTime: 10 * time.Nanosecond,
				Headers:      headers,
			},
		)
	}
	storage := New(cz, ca, newStdLogger())
	defer storage.Close()
	defer os.RemoveAll(storage.path)
	ctx := contexts.NewVhostContext(context.Background(), &types.VirtualHost{Upstream: up})

	concurrentTestHelper(t, goroutines, 100, func(t *testing.T, i, j int) {
		oid := types.ObjectID{}
		oid.CacheKey = "1"
		oid.Path = pathFunc(i)

		header, err := storage.Headers(ctx, oid)
		if err != nil {
			t.Errorf("Got error from storage.Headers on %d, %d: %s", j, i, err)
		}
		value := header.Get(headerKeyFunc(i))
		if value != headerValueFunc(i) {
			t.Errorf("Expected header [%s] to have value [%s] but it had value %s for %d, %d", headerKeyFunc(i), headerValueFunc(i), value, j, i)
		}
	})

}
Example #4
0
func TestDiskCloseReturnesAfterFinishingRequests(t *testing.T) {
	expected := "awesome"
	fakeup, cz, ca, _ := setup()
	runtime.GOMAXPROCS(runtime.NumCPU())
	up := newBlockingUpstream(fakeup)
	up.addFakeResponse("/path",
		fakeResponse{
			Status:       "200",
			ResponseTime: 0,
			Response:     expected,
		})

	storage := New(cz, ca, newStdLogger())
	defer os.RemoveAll(storage.path)

	ctx := contexts.NewVhostContext(context.Background(), &types.VirtualHost{Upstream: up})
	wCh := make(chan struct{})
	go func(ch chan struct{}) {
		defer close(ch)
		oid := types.ObjectID{}
		oid.CacheKey = "1"
		oid.Path = "/path"
		resp, err := storage.GetFullFile(ctx, oid)
		if err != nil {
			t.Errorf("Got Error on a GetFullFile on closing storage:\n%s", err)
		}
		b, err := ioutil.ReadAll(resp)
		if err != nil {
			t.Errorf("Got Error on a ReadAll on an already closed storage:\n%s", err)
		}
		if string(b) != expected {
			t.Errorf("Expected read from GetFullFile was %s but got %s", expected, string(b))
		}

	}(wCh)
	go close(<-up.requestPartial)
	err := storage.Close()
	if err != nil {
		t.Errorf("Storage.Close() shouldn't have returned error\n%s", err)
	}
	<-wCh
}
Example #5
0
func TestStorageSimultaneousGets(t *testing.T) {
	fakeup, cz, ca, goroutines := setup()
	runtime.GOMAXPROCS(runtime.NumCPU())
	up := &countingUpstream{
		fakeUpstream: fakeup,
	}

	up.addFakeResponse("/path",
		fakeResponse{
			Status:       "200",
			ResponseTime: 10 * time.Millisecond,
			Response:     "awesome",
		})

	storage := New(cz, ca, newStdLogger())
	defer storage.Close()
	defer os.RemoveAll(storage.path)
	ctx := contexts.NewVhostContext(context.Background(), &types.VirtualHost{Upstream: up})

	concurrentTestHelper(t, goroutines, 1, func(t *testing.T, i, j int) {
		oid := types.ObjectID{}
		oid.CacheKey = "1"
		oid.Path = "/path"
		file, err := storage.GetFullFile(ctx, oid)
		if err != nil {
			t.Errorf("Got error from storage.Get on %d, %d: %s", j, i, err)
		}
		b, err := ioutil.ReadAll(file)
		if err != nil {
			t.Errorf("Got error while reading response on %d, %d: %s", j, i, err)
		}
		if string(b) != "awesome" {
			t.Errorf("The response was expected to be 'awesome' but it was %s", string(b))
		}

	})

	if up.called != 1 {
		t.Errorf("Expected upstream.GetRequest to be called once it got called %d", up.called)
	}
}
Example #6
0
func TestDiskCloseReturnesAfterFinishingHeaders(t *testing.T) {
	var expected = make(http.Header)
	var expectedHeader, expectedValue = "awesome-header", "awesome-value"
	expected.Add(expectedHeader, expectedValue)
	fakeup, cz, ca, _ := setup()
	runtime.GOMAXPROCS(runtime.NumCPU())
	up := newBlockingUpstream(fakeup)
	up.addFakeResponse("/path",
		fakeResponse{
			Status:       "200",
			ResponseTime: 0,
			Headers:      expected,
		})

	storage := New(cz, ca, newStdLogger())
	defer os.RemoveAll(storage.path)

	ctx := contexts.NewVhostContext(context.Background(), &types.VirtualHost{Upstream: up})
	wCh := make(chan struct{})
	go func(ch chan struct{}) {
		defer close(ch)
		oid := types.ObjectID{}
		oid.CacheKey = "1"
		oid.Path = "/path"

		headers, err := storage.Headers(ctx, oid)
		if err != nil {
			t.Errorf("Got Error on a Headers on closing storage:\n%s", err)
		}
		if headers.Get(expectedHeader) != expectedValue { //!TODO: better check
			t.Errorf("The returned headers:\n%s\n Did not match the expected headers\n%s", headers, expected)
		}

	}(wCh)
	go close(<-up.requestHeader)
	err := storage.Close()
	if err != nil {
		t.Errorf("Storage.Close() shouldn't have returned error\n%s", err)
	}
	<-wCh
}
Example #7
0
func TestRestoreFromDisk2(t *testing.T) {
	file_number := 64
	expected := "awesome"
	up, cz, _, _ := setup()
	ca := NewFakeCacheAlgorithm()
	ca.AddFakeReplies(types.ObjectIndex{}, FakeReplies{
		Lookup:     LookupFalse,
		ShouldKeep: ShouldKeepTrue,
		AddObject:  AddObjectNil,
	})

	runtime.GOMAXPROCS(runtime.NumCPU())
	for i := 0; i < file_number; i++ {
		up.addFakeResponse(fmt.Sprintf("/path/%02d", i),
			fakeResponse{
				Status:       "200",
				ResponseTime: 0,
				Response:     expected,
			})
	}

	storage := New(cz, ca, newStdLogger())
	defer os.RemoveAll(storage.path)
	ctx := contexts.NewVhostContext(context.Background(), &types.VirtualHost{Upstream: up})

	for i := 0; i < file_number; i++ {
		oid := types.ObjectID{
			CacheKey: "1",
			Path:     fmt.Sprintf("/path/%02d", i),
		}

		makeAndCheckGetFullFile(t, ctx, storage, oid, expected)
	}

	if err := storage.Close(); err != nil {
		t.Errorf("Storage.Close() shouldn't have returned error\n%s", err)
	}

	coup := &countingUpstream{
		fakeUpstream: up,
	}

	var shouldKeepCount int32 = 0
	ca.AddFakeReplies(types.ObjectIndex{}, FakeReplies{
		Lookup: LookupTrue,
		ShouldKeep: func(o types.ObjectIndex) bool {
			atomic.AddInt32(&shouldKeepCount, 1)
			return true
		},
		AddObject: AddObjectNil,
	})

	storage = New(cz, ca, newStdLogger())
	if int(shouldKeepCount) != file_number {
		t.Errorf("Algorithm.ShouldKeep should've been called for each file a total of %d but was called %d times.", file_number, shouldKeepCount)
	}

	ctx = contexts.NewVhostContext(context.Background(), &types.VirtualHost{Upstream: coup})
	for i := 0; i < file_number; i++ {
		oid := types.ObjectID{
			CacheKey: "1",
			Path:     fmt.Sprintf("/path/%02d", i),
		}

		makeAndCheckGetFullFile(t, ctx, storage, oid, expected)
	}
	if err := storage.Close(); err != nil {
		t.Errorf("Storage.Close() shouldn't have returned error\n%s", err)
	}
	if coup.called != 0 {
		t.Errorf("Storage should've not called even once upstream but it did %d times", coup.called)
	}
}