Exemple #1
0
func (mrt *MapreduceTests) TestReduceError(c *ck.C) {
	return
	u := testReduceError{}
	job := mrt.setup(&u, &u.SimpleTasks)
	defer u.SimpleTasks.gather()
	ds := appwrap.NewLocalDatastore()

	_, err := Run(appwrap.StubContext(), ds, job)
	c.Check(err, ck.Equals, nil)

	resultUrl := <-u.SimpleTasks.done

	url, err := url.Parse(resultUrl)
	c.Check(err, ck.IsNil)
	fields := url.Query()
	c.Check(err, ck.IsNil)

	c.Check(fields["status"][0], ck.Equals, "error")
	c.Check(fields["error"][0], ck.Equals, "error retrying: maxium retries exceeded (task failed due to: reduce had an error)")

	// see if we handle retries properly
	v := testReduceError{succeedThreshold: u.count / 2}
	job = mrt.setup(&v, &v.SimpleTasks)
	defer v.SimpleTasks.gather()

	_, err = Run(appwrap.StubContext(), ds, job)
	c.Check(err, ck.Equals, nil)

	resultUrl = <-v.SimpleTasks.done
	c.Check(strings.Index(resultUrl, "status=done"), ck.Equals, 6)
}
Exemple #2
0
func (mrt *MapreduceTests) TestWaitForStageCompletion(c *ck.C) {
	ds := appwrap.NewLocalDatastore()
	ctx := appwrap.StubContext()
	jobKey, err := createJob(ds, "prefix", []string{}, "complete", false, "", 5)
	c.Assert(err, ck.IsNil)

	taskMock := &taskInterfaceMock{}
	count := 0
	job, err := doWaitForStageCompletion(ctx, ds, taskMock, jobKey, StageMapping, StageReducing, 1*time.Millisecond,
		func(c context.Context, ds appwrap.Datastore, jobKey *datastore.Key, tasks []*datastore.Key, expectedStage, nextStage JobStage) (stageChanged bool, job JobInfo, finalErr error) {
			if count == 5 {
				return true, JobInfo{UrlPrefix: "foo"}, nil
			}

			count++
			return false, JobInfo{}, nil
		},
		time.Minute)
	c.Assert(err, ck.IsNil)
	c.Assert(job.UrlPrefix, ck.Equals, "foo")

	taskMock.On("PostStatus", ctx, mock.Anything).Return(nil).Once()

	job, err = doWaitForStageCompletion(ctx, ds, taskMock, jobKey, StageMapping, StageReducing, 1*time.Millisecond,
		func(c context.Context, ds appwrap.Datastore, jobKey *datastore.Key, tasks []*datastore.Key, expectedStage, nextStage JobStage) (stageChanged bool, job JobInfo, finalErr error) {
			// this is what happens when a task fails
			return true, JobInfo{Stage: StageFailed}, taskError{"some failure"}
		},
		time.Minute)

	c.Assert(err, ck.NotNil)
	taskMock.AssertExpectations(c)
}
Exemple #3
0
func (mrt *MapreduceTests) TestWordCount(c *ck.C) {
	return
	u := testUniqueWordCount{}
	job := mrt.setup(&u, &u.SimpleTasks)
	job.SeparateReduceItems = true
	defer u.SimpleTasks.gather()
	ds := appwrap.NewLocalDatastore()

	_, err := Run(appwrap.StubContext(), ds, job)
	c.Assert(err, ck.Equals, nil)

	resultUrl := <-u.SimpleTasks.done
	c.Check(strings.Index(resultUrl, "status=done"), ck.Equals, 6)
}
Exemple #4
0
func (mrt *MapreduceTests) TestReduceFatal(c *ck.C) {
	return
	u := testReduceError{fatal: true}
	job := mrt.setup(&u, &u.SimpleTasks)
	ds := appwrap.NewLocalDatastore()
	defer u.SimpleTasks.gather()

	_, err := Run(appwrap.StubContext(), ds, job)
	c.Check(err, ck.Equals, nil)

	resultUrl := <-u.SimpleTasks.done

	url, err := url.Parse(resultUrl)
	c.Check(err, ck.IsNil)
	fields := url.Query()
	c.Check(err, ck.IsNil)

	c.Check(fields["status"][0], ck.Equals, "error")
	c.Check(fields["error"][0], ck.Equals, "failed task: reduce had an error")
}
Exemple #5
0
func (mrt *MapreduceTests) TestMapError(c *ck.C) {
	return
	u := testMapError{}
	job := mrt.setup(&u, &u.SimpleTasks)
	defer u.SimpleTasks.gather()
	ds := appwrap.NewLocalDatastore()

	_, err := Run(appwrap.StubContext(), ds, job)
	c.Check(err, ck.Equals, nil)

	resultUrl := <-u.SimpleTasks.done

	url, err := url.Parse(resultUrl)
	c.Check(err, ck.IsNil)
	fields := url.Query()
	c.Check(err, ck.IsNil)

	print("result ", resultUrl, "\n")
	c.Check(fields["status"][0], ck.Equals, "error")
	c.Check(fields["error"][0], ck.Equals, "error retrying: maxium retries exceeded (task failed due to: map had an error)")
}
func (mrt *MapreduceTests) TestIntermediateMerge(c *ck.C) {
	memStorage := &memoryIntermediateStorage{}
	ctx := appwrap.StubContext()

	handler := struct {
		Int64KeyHandler
		Int64ValueHandler
	}{}

	merger := newMerger(handler)
	for i := 0; i < 5; i++ {
		w, _ := memStorage.CreateIntermediate(ctx, handler)
		for j := 0; j < 1000; j++ {
			w.WriteMappedData(MappedData{Key: int64(j*5 + i), Value: int64(i)})
		}

		w.Close(ctx)

		iterator, _ := memStorage.Iterator(ctx, w.ToName(), handler)
		merger.addSource(iterator)
	}

	w, _ := memStorage.CreateIntermediate(ctx, handler)
	err := mergeIntermediate(w, handler, merger)
	c.Assert(err, ck.Equals, nil)
	err = w.Close(ctx)
	c.Assert(err, ck.Equals, nil)

	iter, err := memStorage.Iterator(ctx, w.ToName(), handler)
	c.Assert(err, ck.IsNil)

	next := int64(0)
	for data, valid, err := iter.Next(); valid && err == nil; data, valid, err = iter.Next() {
		c.Assert(data.Key, ck.Equals, next)
		next++
	}

	c.Assert(next, ck.Equals, int64(5000))

}
Exemple #7
0
func (mrt *MapreduceTests) TestJobStageComplete(c *ck.C) {
	c.Skip("YOU SHALL NOT PASS! (Because the dual monitor patch broke it)")
	ds := appwrap.NewLocalDatastore()
	ctx := appwrap.StubContext()

	jobKey, err := createJob(ds, "prefix", []string{}, "complete", false, "", 5)
	c.Assert(err, ck.IsNil)

	checkStage := func(expected JobStage) {
		var job JobInfo
		err := ds.Get(jobKey, &job)
		c.Assert(err, ck.IsNil)
		c.Assert(job.Stage, ck.Equals, expected)
	}

	checkStage(StageFormation)

	taskKeys := make([]*datastore.Key, 2)
	tasks := make([]JobTask, len(taskKeys))
	for i := range taskKeys {
		taskKeys[i] = ds.NewKey(TaskEntity, "", int64(i+1), jobKey)
		tasks[i].Status = TaskStatusRunning
		tasks[i].Type = TaskTypeMap
	}

	err = createTasks(ds, jobKey, taskKeys, tasks, StageMapping)
	c.Assert(err, ck.IsNil)
	checkStage(StageMapping)

	advanced, _, err := jobStageComplete(ctx, ds, jobKey, taskKeys, StageMapping, StageReducing)
	c.Assert(err, ck.IsNil)
	c.Assert(advanced, ck.Equals, false)

	tasks[0].Status = TaskStatusDone
	tasks[0].Done = jobKey
	_, err = ds.Put(taskKeys[0], &tasks[0])
	c.Assert(err, ck.IsNil)
	advanced, _, err = jobStageComplete(ctx, ds, jobKey, taskKeys, StageMapping, StageReducing)
	c.Assert(err, ck.IsNil)
	c.Assert(advanced, ck.Equals, false)

	tasks[1].Status = TaskStatusDone
	tasks[1].Done = jobKey
	_, err = ds.Put(taskKeys[1], &tasks[1])
	c.Assert(err, ck.IsNil)

	// this uses an index query, which is eventually consistent
	advanced, _, err = jobStageComplete(ctx, ds, jobKey, taskKeys, StageMapping, StageReducing)
	c.Assert(err, ck.IsNil)
	c.Assert(advanced, ck.Equals, true)
	checkStage(StageReducing)

	// we're already at StageReducing, so nothing should happen here
	advanced, _, err = jobStageComplete(ctx, ds, jobKey, taskKeys, StageMapping, StageReducing)
	c.Assert(err, ck.IsNil)
	c.Assert(advanced, ck.Equals, false)
	checkStage(StageReducing)

	// let's fail a reducer and see what happens
	reduceKeys := make([]*datastore.Key, 2)
	reduceTasks := make([]JobTask, len(reduceKeys))
	reduceKeys[0] = datastore.NewKey(ctx, TaskEntity, "", int64(1001), jobKey)
	reduceKeys[1] = datastore.NewKey(ctx, TaskEntity, "", int64(1002), jobKey)
	reduceTasks[0] = JobTask{
		Status: TaskStatusFailed,
		Info:   "reason for failure",
		Done:   jobKey,
		Type:   TaskTypeReduce,
	}
	reduceTasks[1] = JobTask{
		Status: TaskStatusDone,
		Done:   jobKey,
		Type:   TaskTypeReduce,
	}
	err = createTasks(ds, jobKey, reduceKeys, reduceTasks, StageReducing)
	c.Assert(err, ck.IsNil)

	// this uses an index query, which is eventually consistent
	advanced, checkJob, err := jobStageComplete(ctx, ds, jobKey, reduceKeys, StageReducing, StageDone)
	c.Assert(advanced, ck.Equals, true)
	c.Assert(checkJob.Stage, ck.Equals, StageFailed)
	checkStage(StageFailed)
}
Exemple #8
0
func (mrt *MapreduceTests) ContextFn(*http.Request) context.Context {
	return appwrap.StubContext()
}