Esempio n. 1
0
func TestViewChangeResend(t *testing.T) {
	viewChangeSent := false
	instance := newPbftCore(3, loadConfig(), &omniProto{
		broadcastImpl: func(b []byte) { viewChangeSent = true },
		signImpl:      func(b []byte) ([]byte, error) { return b, nil },
		verifyImpl:    func(senderID uint64, signature []byte, message []byte) error { return nil },
	}, &inertTimerFactory{})
	instance.activeView = true

	events.SendEvent(instance, viewChangeResendTimerEvent{})

	if !instance.activeView {
		t.Fatalf("Should not have resent view change resent timer event while in an active view")
	}

	oldView := uint64(2)
	instance.activeView = false
	instance.view = oldView
	events.SendEvent(instance, viewChangeResendTimerEvent{})

	if instance.activeView {
		t.Fatalf("Should still be inactive in our view")
	}

	if instance.view != oldView {
		t.Fatalf("Should still be waiting for the same view (%d) got %d", oldView, instance.view)
	}
}
Esempio n. 2
0
// Test for issue #1119
func TestSendQueueThrottling(t *testing.T) {
	prePreparesSent := 0

	mock := &omniProto{}
	instance := newPbftCore(0, loadConfig(), mock, &inertTimerFactory{})
	instance.f = 1
	instance.K = 2
	instance.L = 4
	instance.consumer = &omniProto{
		validateImpl: func(p []byte) error { return nil },
		broadcastImpl: func(p []byte) {
			prePreparesSent++
		},
	}
	defer instance.close()

	for j := 0; j < 4; j++ {
		events.SendEvent(instance, &Request{
			Timestamp: &gp.Timestamp{Seconds: int64(j), Nanos: 0},
			Payload:   []byte(fmt.Sprintf("%d", j)),
		})
	}

	expected := 2
	if prePreparesSent != expected {
		t.Fatalf("Expected to send only %d pre-prepares, but got %d messages", expected, prePreparesSent)
	}
}
Esempio n. 3
0
func TestStateTransferredToOldPoint(t *testing.T) {
	skipped := false
	instance := newPbftCore(3, loadConfig(), &omniProto{
		skipToImpl: func(s uint64, id []byte, replicas []uint64) {
			skipped = true
		},
		invalidateStateImpl: func() {},
	}, &inertTimerFactory{})
	instance.moveWatermarks(90)
	instance.updateHighStateTarget(&stateUpdateTarget{
		checkpointMessage: checkpointMessage{
			seqNo: 100,
			id:    []byte("onehundred"),
		},
	})

	events.SendEvent(instance, stateUpdatedEvent{
		chkpt: &checkpointMessage{
			seqNo: 10,
		},
	})

	if !skipped {
		t.Fatalf("Expected state transfer to be kicked off once execution completed")
	}
}
Esempio n. 4
0
func (mock *mockEventManager) process() {
	for {
		select {
		case ev := <-mock.bufferedChannel:
			events.SendEvent(mock.target, ev)
		default:
			return
		}
	}
}
Esempio n. 5
0
func TestReplicaPersistQSet(t *testing.T) {
	persist := make(map[string][]byte)

	stack := &omniProto{
		validateImpl: func(b []byte) error {
			return nil
		},
		broadcastImpl: func(msg []byte) {
		},
		StoreStateImpl: func(key string, value []byte) error {
			persist[key] = value
			return nil
		},
		DelStateImpl: func(key string) {
			delete(persist, key)
		},
		ReadStateImpl: func(key string) ([]byte, error) {
			if val, ok := persist[key]; ok {
				return val, nil
			}
			return nil, fmt.Errorf("key not found")
		},
		ReadStateSetImpl: func(prefix string) (map[string][]byte, error) {
			r := make(map[string][]byte)
			for k, v := range persist {
				if len(k) >= len(prefix) && k[0:len(prefix)] == prefix {
					r[k] = v
				}
			}
			return r, nil
		},
	}
	p := newPbftCore(1, loadConfig(), stack, &inertTimerFactory{})
	req := &Request{
		Timestamp: &gp.Timestamp{Seconds: 1, Nanos: 0},
		Payload:   []byte("foo"),
		ReplicaId: uint64(0),
	}
	events.SendEvent(p, &PrePrepare{
		View:           0,
		SequenceNumber: 1,
		RequestDigest:  hashReq(req),
		Request:        req,
		ReplicaId:      uint64(0),
	})
	p.close()

	p = newPbftCore(1, loadConfig(), stack, &inertTimerFactory{})
	if !p.prePrepared(hashReq(req), 0, 1) {
		t.Errorf("did not restore qset properly")
	}
}
Esempio n. 6
0
// Test for issue #1091
// Once the primary ran out of sequence numbers, it would queue requests into a map, and resubmit them in arbitrary order
// This is incorrect, they need to be resubmitted in the order of their timestamps
func TestSendQueueOrdering(t *testing.T) {
	prePreparesSent := 0

	mock := &omniProto{}
	instance := newPbftCore(0, loadConfig(), mock, &inertTimerFactory{})
	instance.f = 1
	instance.K = 2
	instance.L = 100
	lastTime := &gp.Timestamp{Seconds: 0, Nanos: 0}

	instance.consumer = &omniProto{
		validateImpl: func(p []byte) error { return nil },
		broadcastImpl: func(p []byte) {
			msg := &Message{}
			err := proto.Unmarshal(p, msg)
			if err != nil {
				t.Fatalf("Error unmarshaling payload")
				return
			}
			prePrep := msg.GetPrePrepare()
			if prePrep == nil {
				// not a preprepare, ignoring
				return
			}
			req := prePrep.Request
			if lastTime.Seconds > req.Timestamp.Seconds {
				t.Fatalf("Did not arrive in order, got %d after %d", req.Timestamp.Seconds, lastTime.Seconds)
			}
			lastTime = req.Timestamp

			// As each pre-prepare is sent, delete it from the outstanding requests, like it executed
			delete(instance.outstandingReqs, prePrep.RequestDigest)
			prePreparesSent++
		},
	}
	defer instance.close()

	for j := 1; j <= 100; j++ {
		events.SendEvent(instance, &Request{
			Timestamp: &gp.Timestamp{Seconds: int64(j), Nanos: 0},
			Payload:   []byte(fmt.Sprintf("%d", j)),
		})
	}

	instance.moveWatermarks(50)

	expected := 100
	if prePreparesSent != expected {
		t.Fatalf("Expected to send only %d pre-prepares, but got %d messages", expected, prePreparesSent)
	}
}
Esempio n. 7
0
// From issue #687
func TestWitnessCheckpointOutOfBounds(t *testing.T) {
	mock := &omniProto{}
	instance := newPbftCore(1, loadConfig(), mock, &inertTimerFactory{})
	instance.f = 1
	instance.K = 2
	instance.L = 4
	defer instance.close()

	events.SendEvent(instance, &Checkpoint{
		SequenceNumber: 6,
		ReplicaId:      0,
	})

	instance.moveWatermarks(6)

	// This causes the list of high checkpoints to grow to be f+1
	// even though there are not f+1 checkpoints witnessed outside our range
	// historically, this caused an index out of bounds error
	events.SendEvent(instance, &Checkpoint{
		SequenceNumber: 10,
		ReplicaId:      3,
	})
}
Esempio n. 8
0
// From issue #687
func TestWitnessFallBehindMissingPrePrepare(t *testing.T) {
	mock := &omniProto{}
	instance := newPbftCore(1, loadConfig(), mock, &inertTimerFactory{})
	instance.f = 1
	instance.K = 2
	instance.L = 4
	defer instance.close()

	events.SendEvent(instance, &Commit{
		SequenceNumber: 2,
		ReplicaId:      0,
	})

	// Historically, the lack of prePrepare associated with the commit would cause
	// a nil pointer reference
	instance.moveWatermarks(6)
}
Esempio n. 9
0
func TestMaliciousPrePrepare(t *testing.T) {
	mock := &omniProto{
		broadcastImpl: func(msgPayload []byte) {
			t.Fatalf("Expected to ignore malicious pre-prepare")
		},
	}
	instance := newPbftCore(1, loadConfig(), mock, &inertTimerFactory{})
	defer instance.close()
	instance.replicaCount = 5

	digest1 := "hi there"
	request2 := &Request{Payload: []byte("other"), ReplicaId: uint64(generateBroadcaster(instance.replicaCount))}

	pbftMsg := &Message_PrePrepare{&PrePrepare{
		View:           0,
		SequenceNumber: 1,
		RequestDigest:  digest1,
		Request:        request2,
		ReplicaId:      0,
	}}
	events.SendEvent(instance, pbftMsg)
}
Esempio n. 10
0
func TestIncompletePayload(t *testing.T) {
	mock := &omniProto{
		validateImpl: func(msg []byte) error {
			return nil
		},
	}
	instance := newPbftCore(1, loadConfig(), mock, &inertTimerFactory{})
	defer instance.close()
	instance.replicaCount = 5

	broadcaster := uint64(generateBroadcaster(instance.replicaCount))

	checkMsg := func(msg *Message, errMsg string, args ...interface{}) {
		mock.broadcastImpl = func(msgPayload []byte) {
			t.Errorf(errMsg, args...)
		}
		events.SendEvent(instance, pbftMessageEvent{msg: msg, sender: broadcaster})
	}

	checkMsg(&Message{}, "Expected to reject empty message")
	checkMsg(&Message{&Message_Request{&Request{ReplicaId: broadcaster}}}, "Expected to reject empty request")
	checkMsg(&Message{&Message_PrePrepare{&PrePrepare{ReplicaId: broadcaster}}}, "Expected to reject empty pre-prepare")
}
Esempio n. 11
0
func TestStateNetworkMovesOnDuringSlowStateTransfer(t *testing.T) {
	instance := newPbftCore(3, loadConfig(), &omniProto{
		skipToImpl:          func(s uint64, id []byte, replicas []uint64) {},
		invalidateStateImpl: func() {},
		//broadcastImpl:       func(b []byte) {},
		//signImpl:            func(b []byte) ([]byte, error) { return b, nil },
		//verifyImpl:          func(senderID uint64, signature []byte, message []byte) error { return nil },
	}, &inertTimerFactory{})
	instance.skipInProgress = true

	seqNo := uint64(20)

	for i := uint64(0); i < 3; i++ {
		events.SendEvent(instance, &Checkpoint{
			SequenceNumber: seqNo,
			ReplicaId:      i,
			Id:             base64.StdEncoding.EncodeToString([]byte("twenty")),
		})
	}

	if instance.h != seqNo {
		t.Fatalf("Expected watermark movement to %d because of state transfer, but low watermark is %d", seqNo, instance.h)
	}
}
Esempio n. 12
0
func TestViewChangeDuringExecution(t *testing.T) {
	skipped := false
	instance := newPbftCore(3, loadConfig(), &omniProto{
		viewChangeImpl: func(v uint64) {},
		skipToImpl: func(s uint64, id []byte, replicas []uint64) {
			skipped = true
		},
		invalidateStateImpl: func() {},
		broadcastImpl:       func(b []byte) {},
		signImpl:            func(b []byte) ([]byte, error) { return b, nil },
		verifyImpl:          func(senderID uint64, signature []byte, message []byte) error { return nil },
	}, &inertTimerFactory{})
	instance.activeView = false
	instance.view = 1
	instance.lastExec = 1
	nextExec := uint64(2)
	instance.currentExec = &nextExec

	vset := make([]*ViewChange, 3)

	cset := []*ViewChange_C{
		{
			SequenceNumber: 100,
			Id:             base64.StdEncoding.EncodeToString([]byte("onehundred")),
		},
	}

	// Replica 0 sent checkpoints for 100
	vset[0] = &ViewChange{
		H:    90,
		Cset: cset,
	}

	// Replica 1 sent checkpoints for 10
	vset[1] = &ViewChange{
		H:    90,
		Cset: cset,
	}

	// Replica 2 sent checkpoints for 10
	vset[2] = &ViewChange{
		H:    90,
		Cset: cset,
	}

	xset := make(map[uint64]string)
	xset[101] = ""

	instance.newViewStore[1] = &NewView{
		View:      1,
		Vset:      vset,
		Xset:      xset,
		ReplicaId: 1,
	}

	if _, ok := instance.processNewView().(viewChangedEvent); !ok {
		t.Fatalf("Failed to successfully process new view")
	}

	if skipped {
		t.Fatalf("Expected state transfer not to be kicked off until execution completes")
	}

	events.SendEvent(instance, execDoneEvent{})

	if !skipped {
		t.Fatalf("Expected state transfer to be kicked off once execution completed")
	}
}
Esempio n. 13
0
// TestViewChangeCanExecuteToCheckpoint tests a replica mid-execution, which receives a view change to a checkpoint above its watermarks
// but which has enough commit certificates to reach the checkpoint. State should not transfer and executions should trigger the view change
func TestViewChangeCanExecuteToCheckpoint(t *testing.T) {
	instance := newPbftCore(3, loadConfig(), &omniProto{
		broadcastImpl: func(b []byte) {},
		getStateImpl:  func() []byte { return []byte("state") },
		signImpl:      func(b []byte) ([]byte, error) { return b, nil },
		verifyImpl:    func(senderID uint64, signature []byte, message []byte) error { return nil },
		skipToImpl: func(s uint64, id []byte, replicas []uint64) {
			t.Fatalf("Should not have performed state transfer, should have caught up via execution")
		},
	}, &inertTimerFactory{})
	instance.activeView = false
	instance.view = 1
	instance.lastExec = 5
	newViewBaseSeqNo := uint64(10)
	nextExec := uint64(6)
	instance.currentExec = &nextExec

	for i := nextExec + 1; i <= newViewBaseSeqNo; i++ {
		commit := &Commit{View: 0, SequenceNumber: i}
		prepare := &Prepare{View: 0, SequenceNumber: i}
		instance.certStore[msgID{v: 0, n: i}] = &msgCert{
			digest:     "", // null request
			prePrepare: &PrePrepare{View: 0, SequenceNumber: i},
			prepare:    []*Prepare{prepare, prepare, prepare},
			commit:     []*Commit{commit, commit, commit},
		}
	}

	vset := make([]*ViewChange, 3)

	cset := []*ViewChange_C{
		{
			SequenceNumber: newViewBaseSeqNo,
			Id:             base64.StdEncoding.EncodeToString([]byte("Ten")),
		},
	}

	for i := 0; i < 3; i++ {
		// Replica 0 sent checkpoints for 100
		vset[i] = &ViewChange{
			H:    newViewBaseSeqNo,
			Cset: cset,
		}
	}

	xset := make(map[uint64]string)
	xset[11] = ""

	instance.lastExec = 9

	instance.newViewStore[1] = &NewView{
		View:      1,
		Vset:      vset,
		Xset:      xset,
		ReplicaId: 1,
	}

	if instance.processNewView() != nil {
		t.Fatalf("Should not have processed the new view")
	}

	events.SendEvent(instance, execDoneEvent{})

	if !instance.activeView {
		t.Fatalf("Should have finished processing new view after executions")
	}
}
Esempio n. 14
0
func TestOutstandingReqsResubmission(t *testing.T) {
	omni := &omniProto{}
	b := newObcBatch(0, loadConfig(), omni)
	defer b.Close() // The broadcasting threads only cause problems here... but this test stalls without them

	transactionsBroadcast := 0
	omni.ExecuteImpl = func(tag interface{}, txs []*pb.Transaction) {
		transactionsBroadcast += len(txs)
		logger.Debugf("\nExecuting %d transactions (%v)\n", len(txs), txs)
		nextExec := b.pbft.lastExec + 1
		b.pbft.currentExec = &nextExec
		b.manager.Inject(executedEvent{tag: tag})
	}

	omni.CommitImpl = func(tag interface{}, meta []byte) {
		b.manager.Inject(committedEvent{})
	}

	omni.UnicastImpl = func(ocMsg *pb.Message, dest *pb.PeerID) error {
		return nil
	}

	reqs := make([]*Request, 8)
	for i := 0; i < len(reqs); i++ {
		reqs[i] = createPbftRequestWithChainTx(int64(i), 0)
	}

	// Add three requests, with a batch size of 2
	b.reqStore.storeOutstanding(reqs[0])
	b.reqStore.storeOutstanding(reqs[1])
	b.reqStore.storeOutstanding(reqs[2])
	b.reqStore.storeOutstanding(reqs[3])

	executed := make(map[string]struct{})
	execute := func() {
		for d, req := range b.pbft.outstandingReqs {
			if _, ok := executed[d]; ok {
				continue
			}
			executed[d] = struct{}{}
			b.execute(b.pbft.lastExec+1, req.Payload)
		}
	}

	tmp := uint64(1)
	b.pbft.currentExec = &tmp
	events.SendEvent(b, committedEvent{})
	execute()

	if b.reqStore.outstandingRequests.Len() != 0 {
		t.Fatalf("All requests should have been executed and deleted after exec")
	}

	// Simulate changing views, with a request in the qSet, and one outstanding which is not
	wreq := reqs[4]

	reqsPacked, err := proto.Marshal(&RequestBlock{[]*Request{wreq}})
	if err != nil {
		t.Fatalf("Unable to pack block for new batch request")
	}

	breq := &Request{Payload: reqsPacked}
	prePrep := &PrePrepare{
		View:           0,
		SequenceNumber: b.pbft.lastExec + 1,
		RequestDigest:  "foo",
		Request:        breq,
	}

	b.pbft.certStore[msgID{v: prePrep.View, n: prePrep.SequenceNumber}] = &msgCert{prePrepare: prePrep}

	// Add the request, which is already pre-prepared, to be outstanding, and one outstanding not pending, not prepared
	b.reqStore.storeOutstanding(wreq) // req 6
	b.reqStore.storeOutstanding(reqs[5])
	b.reqStore.storeOutstanding(reqs[6])
	b.reqStore.storeOutstanding(reqs[7])

	events.SendEvent(b, viewChangedEvent{})
	execute()

	if b.reqStore.hasNonPending() {
		t.Errorf("All requests should have been resubmitted after view change")
	}

	// We should have one request in batch which has not been sent yet
	expected := 6
	if transactionsBroadcast != expected {
		t.Errorf("Expected %d transactions broadcast, got %d", expected, transactionsBroadcast)
	}

	events.SendEvent(b, batchTimerEvent{})
	execute()

	// If the already prepared request were to be resubmitted, we would get count 8 here
	expected = 7
	if transactionsBroadcast != expected {
		t.Errorf("Expected %d transactions broadcast, got %d", expected, transactionsBroadcast)
	}
}