Beispiel #1
0
func TestStateTransferredToOldPoint(t *testing.T) {
	skipped := false
	instance := newPbftCore(3, loadConfig(), &omniProto{
		skipToImpl: func(s uint64, id []byte, replicas []uint64) {
			skipped = true
		},
		invalidateStateImpl: func() {},
	}, &inertTimerFactory{})
	instance.moveWatermarks(90)
	instance.updateHighStateTarget(&stateUpdateTarget{
		checkpointMessage: checkpointMessage{
			seqNo: 100,
			id:    []byte("onehundred"),
		},
	})

	events.SendEvent(instance, stateUpdatedEvent{
		chkpt: &checkpointMessage{
			seqNo: 10,
		},
	})

	if !skipped {
		t.Fatalf("Expected state transfer to be kicked off once execution completed")
	}
}
Beispiel #2
0
// Test for issue #1119
func TestSendQueueThrottling(t *testing.T) {
	prePreparesSent := 0

	mock := &omniProto{}
	instance := newPbftCore(0, loadConfig(), mock, &inertTimerFactory{})
	instance.f = 1
	instance.K = 2
	instance.L = 4
	instance.consumer = &omniProto{
		validateImpl: func(p []byte) error { return nil },
		broadcastImpl: func(p []byte) {
			prePreparesSent++
		},
	}
	defer instance.close()

	for j := 0; j < 4; j++ {
		events.SendEvent(instance, &Request{
			Timestamp: &gp.Timestamp{Seconds: int64(j), Nanos: 0},
			Payload:   []byte(fmt.Sprintf("%d", j)),
		})
	}

	expected := 2
	if prePreparesSent != expected {
		t.Fatalf("Expected to send only %d pre-prepares, but got %d messages", expected, prePreparesSent)
	}
}
Beispiel #3
0
func (mock *mockEventManager) process() {
	for {
		select {
		case ev := <-mock.bufferedChannel:
			events.SendEvent(mock.target, ev)
		default:
			return
		}
	}
}
Beispiel #4
0
// This test is designed to ensure state transfer occurs if our checkpoint does not match a quorum cert
func TestCheckpointDiffersFromQuorum(t *testing.T) {
	invalidated := false
	skipped := false
	instance := newPbftCore(3, loadConfig(), &omniProto{
		//broadcastImpl:       func(b []byte) { viewChangeSent = true },
		//signImpl:            func(b []byte) ([]byte, error) { return b, nil },
		//verifyImpl:          func(senderID uint64, signature []byte, message []byte) error { return nil },
		invalidateStateImpl: func() { invalidated = true },
		skipToImpl:          func(s uint64, id []byte, replicas []uint64) { skipped = true },
	}, &inertTimerFactory{})

	seqNo := uint64(10)

	badChkpt := &Checkpoint{
		SequenceNumber: 10,
		Id:             base64.StdEncoding.EncodeToString([]byte("WRONG")),
		ReplicaId:      0,
	}
	instance.chkpts[seqNo] = badChkpt.Id // This is done via the exec path, shortcut it here
	events.SendEvent(instance, badChkpt)

	for i := uint64(1); i <= 3; i++ {
		events.SendEvent(instance, &Checkpoint{
			SequenceNumber: 10,
			Id:             base64.StdEncoding.EncodeToString([]byte("CORRECT")),
			ReplicaId:      i,
		})
	}

	if instance.h != 10 {
		t.Fatalf("Replica should have moved its watermarks but did not")
	}

	if !instance.skipInProgress {
		t.Fatalf("Replica should be attempting state transfer")
	}

	if !invalidated || !skipped {
		t.Fatalf("Replica should have invalidated its state and skipped")
	}
}
Beispiel #5
0
func TestReplicaPersistQSet(t *testing.T) {
	persist := make(map[string][]byte)

	stack := &omniProto{
		validateImpl: func(b []byte) error {
			return nil
		},
		broadcastImpl: func(msg []byte) {
		},
		StoreStateImpl: func(key string, value []byte) error {
			persist[key] = value
			return nil
		},
		DelStateImpl: func(key string) {
			delete(persist, key)
		},
		ReadStateImpl: func(key string) ([]byte, error) {
			if val, ok := persist[key]; ok {
				return val, nil
			}
			return nil, fmt.Errorf("key not found")
		},
		ReadStateSetImpl: func(prefix string) (map[string][]byte, error) {
			r := make(map[string][]byte)
			for k, v := range persist {
				if len(k) >= len(prefix) && k[0:len(prefix)] == prefix {
					r[k] = v
				}
			}
			return r, nil
		},
	}
	p := newPbftCore(1, loadConfig(), stack, &inertTimerFactory{})
	req := &Request{
		Timestamp: &gp.Timestamp{Seconds: 1, Nanos: 0},
		Payload:   []byte("foo"),
		ReplicaId: uint64(0),
	}
	events.SendEvent(p, &PrePrepare{
		View:           0,
		SequenceNumber: 1,
		RequestDigest:  hashReq(req),
		Request:        req,
		ReplicaId:      uint64(0),
	})
	p.close()

	p = newPbftCore(1, loadConfig(), stack, &inertTimerFactory{})
	if !p.prePrepared(hashReq(req), 0, 1) {
		t.Errorf("did not restore qset properly")
	}
}
Beispiel #6
0
// Test for issue #1091
// Once the primary ran out of sequence numbers, it would queue requests into a map, and resubmit them in arbitrary order
// This is incorrect, they need to be resubmitted in the order of their timestamps
func TestSendQueueOrdering(t *testing.T) {
	prePreparesSent := 0

	mock := &omniProto{}
	instance := newPbftCore(0, loadConfig(), mock, &inertTimerFactory{})
	instance.f = 1
	instance.K = 2
	instance.L = 100
	lastTime := &gp.Timestamp{Seconds: 0, Nanos: 0}

	instance.consumer = &omniProto{
		validateImpl: func(p []byte) error { return nil },
		broadcastImpl: func(p []byte) {
			msg := &Message{}
			err := proto.Unmarshal(p, msg)
			if err != nil {
				t.Fatalf("Error unmarshaling payload")
				return
			}
			prePrep := msg.GetPrePrepare()
			if prePrep == nil {
				// not a preprepare, ignoring
				return
			}
			req := prePrep.Request
			if lastTime.Seconds > req.Timestamp.Seconds {
				t.Fatalf("Did not arrive in order, got %d after %d", req.Timestamp.Seconds, lastTime.Seconds)
			}
			lastTime = req.Timestamp

			// As each pre-prepare is sent, delete it from the outstanding requests, like it executed
			delete(instance.outstandingReqs, prePrep.RequestDigest)
			prePreparesSent++
		},
	}
	defer instance.close()

	for j := 1; j <= 100; j++ {
		events.SendEvent(instance, &Request{
			Timestamp: &gp.Timestamp{Seconds: int64(j), Nanos: 0},
			Payload:   []byte(fmt.Sprintf("%d", j)),
		})
	}

	instance.moveWatermarks(50)

	expected := 100
	if prePreparesSent != expected {
		t.Fatalf("Expected to send only %d pre-prepares, but got %d messages", expected, prePreparesSent)
	}
}
Beispiel #7
0
// From issue #687
func TestWitnessCheckpointOutOfBounds(t *testing.T) {
	mock := &omniProto{}
	instance := newPbftCore(1, loadConfig(), mock, &inertTimerFactory{})
	instance.f = 1
	instance.K = 2
	instance.L = 4
	defer instance.close()

	events.SendEvent(instance, &Checkpoint{
		SequenceNumber: 6,
		ReplicaId:      0,
	})

	instance.moveWatermarks(6)

	// This causes the list of high checkpoints to grow to be f+1
	// even though there are not f+1 checkpoints witnessed outside our range
	// historically, this caused an index out of bounds error
	events.SendEvent(instance, &Checkpoint{
		SequenceNumber: 10,
		ReplicaId:      3,
	})
}
Beispiel #8
0
// From issue #687
func TestWitnessFallBehindMissingPrePrepare(t *testing.T) {
	mock := &omniProto{}
	instance := newPbftCore(1, loadConfig(), mock, &inertTimerFactory{})
	instance.f = 1
	instance.K = 2
	instance.L = 4
	defer instance.close()

	events.SendEvent(instance, &Commit{
		SequenceNumber: 2,
		ReplicaId:      0,
	})

	// Historically, the lack of prePrepare associated with the commit would cause
	// a nil pointer reference
	instance.moveWatermarks(6)
}
Beispiel #9
0
func TestIncompletePayload(t *testing.T) {
	mock := &omniProto{}
	instance := newPbftCore(1, loadConfig(), mock, &inertTimerFactory{})
	defer instance.close()
	instance.replicaCount = 5

	broadcaster := uint64(generateBroadcaster(instance.replicaCount))

	checkMsg := func(msg *Message, errMsg string, args ...interface{}) {
		mock.broadcastImpl = func(msgPayload []byte) {
			t.Errorf(errMsg, args...)
		}
		events.SendEvent(instance, pbftMessageEvent{msg: msg, sender: broadcaster})
	}

	checkMsg(&Message{}, "Expected to reject empty message")
	checkMsg(&Message{Payload: &Message_PrePrepare{PrePrepare: &PrePrepare{ReplicaId: broadcaster}}}, "Expected to reject empty pre-prepare")
}
Beispiel #10
0
func TestMaliciousPrePrepare(t *testing.T) {
	mock := &omniProto{
		broadcastImpl: func(msgPayload []byte) {
			t.Fatalf("Expected to ignore malicious pre-prepare")
		},
	}
	instance := newPbftCore(1, loadConfig(), mock, &inertTimerFactory{})
	defer instance.close()
	instance.replicaCount = 5

	pbftMsg := &Message_PrePrepare{&PrePrepare{
		View:           0,
		SequenceNumber: 1,
		BatchDigest:    hash(createPbftReqBatch(1, 1)),
		RequestBatch:   createPbftReqBatch(1, 2),
		ReplicaId:      0,
	}}
	events.SendEvent(instance, pbftMsg)
}
Beispiel #11
0
func TestStateNetworkMovesOnDuringSlowStateTransfer(t *testing.T) {
	instance := newPbftCore(3, loadConfig(), &omniProto{
		skipToImpl:          func(s uint64, id []byte, replicas []uint64) {},
		invalidateStateImpl: func() {},
	}, &inertTimerFactory{})
	instance.skipInProgress = true

	seqNo := uint64(20)

	for i := uint64(0); i < 3; i++ {
		events.SendEvent(instance, &Checkpoint{
			SequenceNumber: seqNo,
			ReplicaId:      i,
			Id:             base64.StdEncoding.EncodeToString([]byte("twenty")),
		})
	}

	if instance.h != seqNo {
		t.Fatalf("Expected watermark movement to %d because of state transfer, but low watermark is %d", seqNo, instance.h)
	}
}
Beispiel #12
0
func TestMaliciousPrePrepare(t *testing.T) {
	mock := &omniProto{
		broadcastImpl: func(msgPayload []byte) {
			t.Fatalf("Expected to ignore malicious pre-prepare")
		},
	}
	instance := newPbftCore(1, loadConfig(), mock, &inertTimerFactory{})
	defer instance.close()
	instance.replicaCount = 5

	digest1 := "hi there"
	request2 := &Request{Payload: []byte("other"), ReplicaId: uint64(generateBroadcaster(instance.replicaCount))}

	pbftMsg := &Message_PrePrepare{&PrePrepare{
		View:           0,
		SequenceNumber: 1,
		RequestDigest:  digest1,
		Request:        request2,
		ReplicaId:      0,
	}}
	events.SendEvent(instance, pbftMsg)
}
Beispiel #13
0
// Test for issue #1119
func TestSendQueueThrottling(t *testing.T) {
	prePreparesSent := 0

	mock := &omniProto{}
	instance := newPbftCore(0, loadConfig(), mock, &inertTimerFactory{})
	instance.f = 1
	instance.K = 2
	instance.L = 4
	instance.consumer = &omniProto{
		broadcastImpl: func(p []byte) {
			prePreparesSent++
		},
	}
	defer instance.close()

	for j := 0; j < 4; j++ {
		events.SendEvent(instance, createPbftReqBatch(int64(j), 0)) // replica ID for req doesn't matter
	}

	expected := 2
	if prePreparesSent != expected {
		t.Fatalf("Expected to send only %d pre-prepares, but got %d messages", expected, prePreparesSent)
	}
}
Beispiel #14
0
func TestOutstandingReqsResubmission(t *testing.T) {
	omni := &omniProto{}
	config := loadConfig()
	config.Set("general.batchsize", 2)
	b := newObcBatch(0, config, omni)
	defer b.Close() // The broadcasting threads only cause problems here... but this test stalls without them

	transactionsBroadcast := 0
	omni.ExecuteImpl = func(tag interface{}, txs []*pb.Transaction) {
		transactionsBroadcast += len(txs)
		logger.Debugf("\nExecuting %d transactions (%v)\n", len(txs), txs)
		nextExec := b.pbft.lastExec + 1
		b.pbft.currentExec = &nextExec
		b.manager.Inject(executedEvent{tag: tag})
	}

	omni.CommitImpl = func(tag interface{}, meta []byte) {
		b.manager.Inject(committedEvent{})
	}

	omni.UnicastImpl = func(ocMsg *pb.Message, dest *pb.PeerID) error {
		return nil
	}

	reqs := make([]*Request, 8)
	for i := 0; i < len(reqs); i++ {
		reqs[i] = createPbftReq(int64(i), 0)
	}

	// Add four requests, with a batch size of 2
	b.reqStore.storeOutstanding(reqs[0])
	b.reqStore.storeOutstanding(reqs[1])
	b.reqStore.storeOutstanding(reqs[2])
	b.reqStore.storeOutstanding(reqs[3])

	executed := make(map[string]struct{})
	execute := func() {
		for d, reqBatch := range b.pbft.outstandingReqBatches {
			if _, ok := executed[d]; ok {
				continue
			}
			executed[d] = struct{}{}
			b.execute(b.pbft.lastExec+1, reqBatch)
		}
	}

	tmp := uint64(1)
	b.pbft.currentExec = &tmp
	events.SendEvent(b, committedEvent{})
	execute()

	if b.reqStore.outstandingRequests.Len() != 0 {
		t.Fatalf("All request batches should have been executed and deleted after exec")
	}

	// Simulate changing views, with a request in the qSet, and one outstanding which is not
	wreqsBatch := &RequestBatch{Batch: []*Request{reqs[4]}}
	prePrep := &PrePrepare{
		View:           0,
		SequenceNumber: b.pbft.lastExec + 1,
		BatchDigest:    "foo",
		RequestBatch:   wreqsBatch,
	}

	b.pbft.certStore[msgID{v: prePrep.View, n: prePrep.SequenceNumber}] = &msgCert{prePrepare: prePrep}

	// Add the request, which is already pre-prepared, to be outstanding, and one outstanding not pending, not prepared
	b.reqStore.storeOutstanding(reqs[4]) // req 6
	b.reqStore.storeOutstanding(reqs[5])
	b.reqStore.storeOutstanding(reqs[6])
	b.reqStore.storeOutstanding(reqs[7])

	events.SendEvent(b, viewChangedEvent{})
	execute()

	if b.reqStore.hasNonPending() {
		t.Errorf("All requests should have been resubmitted after view change")
	}

	// We should have one request in batch which has not been sent yet
	expected := 6
	if transactionsBroadcast != expected {
		t.Errorf("Expected %d transactions broadcast, got %d", expected, transactionsBroadcast)
	}

	events.SendEvent(b, batchTimerEvent{})
	execute()

	// If the already prepared request were to be resubmitted, we would get count 8 here
	expected = 7
	if transactionsBroadcast != expected {
		t.Errorf("Expected %d transactions broadcast, got %d", expected, transactionsBroadcast)
	}
}
Beispiel #15
0
func TestViewChangeDuringExecution(t *testing.T) {
	skipped := false
	instance := newPbftCore(3, loadConfig(), &omniProto{
		viewChangeImpl: func(v uint64) {},
		skipToImpl: func(s uint64, id []byte, replicas []uint64) {
			skipped = true
		},
		invalidateStateImpl: func() {},
		broadcastImpl:       func(b []byte) {},
		signImpl:            func(b []byte) ([]byte, error) { return b, nil },
		verifyImpl:          func(senderID uint64, signature []byte, message []byte) error { return nil },
	}, &inertTimerFactory{})
	instance.activeView = false
	instance.view = 1
	instance.lastExec = 1
	nextExec := uint64(2)
	instance.currentExec = &nextExec

	vset := make([]*ViewChange, 3)

	cset := []*ViewChange_C{
		{
			SequenceNumber: 100,
			Id:             base64.StdEncoding.EncodeToString([]byte("onehundred")),
		},
	}

	// Replica 0 sent checkpoints for 100
	vset[0] = &ViewChange{
		H:    90,
		Cset: cset,
	}

	// Replica 1 sent checkpoints for 10
	vset[1] = &ViewChange{
		H:    90,
		Cset: cset,
	}

	// Replica 2 sent checkpoints for 10
	vset[2] = &ViewChange{
		H:    90,
		Cset: cset,
	}

	xset := make(map[uint64]string)
	xset[101] = ""

	instance.newViewStore[1] = &NewView{
		View:      1,
		Vset:      vset,
		Xset:      xset,
		ReplicaId: 1,
	}

	if _, ok := instance.processNewView().(viewChangedEvent); !ok {
		t.Fatalf("Failed to successfully process new view")
	}

	if skipped {
		t.Fatalf("Expected state transfer not to be kicked off until execution completes")
	}

	events.SendEvent(instance, execDoneEvent{})

	if !skipped {
		t.Fatalf("Expected state transfer to be kicked off once execution completed")
	}
}
Beispiel #16
0
// TestViewChangeCanExecuteToCheckpoint tests a replica mid-execution, which receives a view change to a checkpoint above its watermarks
// but which has enough commit certificates to reach the checkpoint. State should not transfer and executions should trigger the view change
func TestViewChangeCanExecuteToCheckpoint(t *testing.T) {
	instance := newPbftCore(3, loadConfig(), &omniProto{
		broadcastImpl: func(b []byte) {},
		getStateImpl:  func() []byte { return []byte("state") },
		signImpl:      func(b []byte) ([]byte, error) { return b, nil },
		verifyImpl:    func(senderID uint64, signature []byte, message []byte) error { return nil },
		skipToImpl: func(s uint64, id []byte, replicas []uint64) {
			t.Fatalf("Should not have performed state transfer, should have caught up via execution")
		},
	}, &inertTimerFactory{})
	instance.activeView = false
	instance.view = 1
	instance.lastExec = 5
	newViewBaseSeqNo := uint64(10)
	nextExec := uint64(6)
	instance.currentExec = &nextExec

	for i := nextExec + 1; i <= newViewBaseSeqNo; i++ {
		commit := &Commit{View: 0, SequenceNumber: i}
		prepare := &Prepare{View: 0, SequenceNumber: i}
		instance.certStore[msgID{v: 0, n: i}] = &msgCert{
			digest:     "", // null request
			prePrepare: &PrePrepare{View: 0, SequenceNumber: i},
			prepare:    []*Prepare{prepare, prepare, prepare},
			commit:     []*Commit{commit, commit, commit},
		}
	}

	vset := make([]*ViewChange, 3)

	cset := []*ViewChange_C{
		{
			SequenceNumber: newViewBaseSeqNo,
			Id:             base64.StdEncoding.EncodeToString([]byte("Ten")),
		},
	}

	for i := 0; i < 3; i++ {
		// Replica 0 sent checkpoints for 100
		vset[i] = &ViewChange{
			H:    newViewBaseSeqNo,
			Cset: cset,
		}
	}

	xset := make(map[uint64]string)
	xset[11] = ""

	instance.lastExec = 9

	instance.newViewStore[1] = &NewView{
		View:      1,
		Vset:      vset,
		Xset:      xset,
		ReplicaId: 1,
	}

	if instance.processNewView() != nil {
		t.Fatalf("Should not have processed the new view")
	}

	events.SendEvent(instance, execDoneEvent{})

	if !instance.activeView {
		t.Fatalf("Should have finished processing new view after executions")
	}
}