Esempio n. 1
0
func TestMultipleInitiators(t *testing.T) {
	t.Parallel()
	// Scenario: inst1, inst2 and inst3 both start protocol with inst4 at the same time.
	// Expected outcome: inst4 successfully transfers state to all of them
	peers := make(map[string]*pullTestInstance)
	inst1 := newPushPullTestInstance("p1", peers)
	inst2 := newPushPullTestInstance("p2", peers)
	inst3 := newPushPullTestInstance("p3", peers)
	inst4 := newPushPullTestInstance("p4", peers)
	defer inst1.stop()
	defer inst2.stop()
	defer inst3.stop()
	defer inst4.stop()

	inst4.Add("1", "2", "3", "4")
	inst1.setNextPeerSelection([]string{"p4"})
	inst2.setNextPeerSelection([]string{"p4"})
	inst3.setNextPeerSelection([]string{"p4"})

	time.Sleep(time.Duration(2000) * time.Millisecond)

	for _, inst := range []*pullTestInstance{inst1, inst2, inst3} {
		assert.True(t, util.IndexInSlice(inst.state.ToArray(), "1", Strcmp) != -1)
		assert.True(t, util.IndexInSlice(inst.state.ToArray(), "2", Strcmp) != -1)
		assert.True(t, util.IndexInSlice(inst.state.ToArray(), "3", Strcmp) != -1)
		assert.True(t, util.IndexInSlice(inst.state.ToArray(), "4", Strcmp) != -1)
	}

}
Esempio n. 2
0
func (d *gossipDiscoveryImpl) resurrectMember(am *proto.AliveMessage, t proto.PeerTime) {
	d.logger.Info("Entering, AliveMessage:", am, "t:", t)
	defer d.logger.Info("Exiting")
	d.lock.Lock()
	defer d.lock.Unlock()

	member := am.Membership
	pkiID := member.PkiID
	d.aliveLastTS[string(pkiID)] = &timestamp{
		lastSeen: time.Now(),
		seqNum:   t.SeqNum,
		incTime:  tsToTime(t.IncNumber),
	}

	d.id2Member[string(pkiID)] = &NetworkMember{
		Endpoint: member.Endpoint,
		Metadata: member.Metadata,
		PKIid:    member.PkiID,
	}
	delete(d.deadLastTS, string(pkiID))
	aliveMsgWithID := &proto.AliveMessage{
		Membership: &proto.Member{PkiID: pkiID},
	}

	i := util.IndexInSlice(d.cachedMembership.Dead, aliveMsgWithID, samePKIidAliveMessage)
	if i != -1 {
		d.cachedMembership.Dead = append(d.cachedMembership.Dead[:i], d.cachedMembership.Dead[i+1:]...)
	}

	if util.IndexInSlice(d.cachedMembership.Alive, am, samePKIidAliveMessage) == -1 {
		d.cachedMembership.Alive = append(d.cachedMembership.Alive, am)
	}
}
Esempio n. 3
0
func TestBiDiUpdates(t *testing.T) {
	t.Parallel()
	// Scenario: inst1 has {1, 3} and inst2 has {0,2} and both initiate to the other at the same time.
	// Expected outcome: both have {0,1,2,3} in the end
	peers := make(map[string]*pullTestInstance)
	inst1 := newPushPullTestInstance("p1", peers)
	inst2 := newPushPullTestInstance("p2", peers)
	defer inst1.stop()
	defer inst2.stop()

	inst1.Add("1", "3")
	inst2.Add("0", "2")

	inst1.setNextPeerSelection([]string{"p2"})
	inst2.setNextPeerSelection([]string{"p1"})

	time.Sleep(time.Duration(2000) * time.Millisecond)

	assert.True(t, util.IndexInSlice(inst1.state.ToArray(), "0", Strcmp) != -1)
	assert.True(t, util.IndexInSlice(inst1.state.ToArray(), "1", Strcmp) != -1)
	assert.True(t, util.IndexInSlice(inst1.state.ToArray(), "2", Strcmp) != -1)
	assert.True(t, util.IndexInSlice(inst1.state.ToArray(), "3", Strcmp) != -1)

	assert.True(t, util.IndexInSlice(inst2.state.ToArray(), "0", Strcmp) != -1)
	assert.True(t, util.IndexInSlice(inst2.state.ToArray(), "1", Strcmp) != -1)
	assert.True(t, util.IndexInSlice(inst2.state.ToArray(), "2", Strcmp) != -1)
	assert.True(t, util.IndexInSlice(inst2.state.ToArray(), "3", Strcmp) != -1)

}
Esempio n. 4
0
func TestLatePeers(t *testing.T) {
	t.Parallel()
	// Scenario: inst1 initiates to inst2 (items: {1,2,3,4}) and inst3 (items: {5,6,7,8}),
	// but inst2 is too slow to respond, and all items
	// should be received from inst3.
	peers := make(map[string]*pullTestInstance)
	inst1 := newPushPullTestInstance("p1", peers)
	inst2 := newPushPullTestInstance("p2", peers)
	inst3 := newPushPullTestInstance("p3", peers)
	defer inst1.stop()
	defer inst2.stop()
	defer inst3.stop()
	inst2.Add("1", "2", "3", "4")
	inst3.Add("5", "6", "7", "8")
	inst2.hook(func(m interface{}) {
		time.Sleep(time.Duration(600) * time.Millisecond)
	})
	inst1.setNextPeerSelection([]string{"p2", "p3"})

	time.Sleep(time.Duration(2000) * time.Millisecond)

	assert.True(t, util.IndexInSlice(inst1.state.ToArray(), "1", Strcmp) == -1)
	assert.True(t, util.IndexInSlice(inst1.state.ToArray(), "2", Strcmp) == -1)
	assert.True(t, util.IndexInSlice(inst1.state.ToArray(), "3", Strcmp) == -1)
	assert.True(t, util.IndexInSlice(inst1.state.ToArray(), "4", Strcmp) == -1)

	assert.True(t, util.IndexInSlice(inst1.state.ToArray(), "5", Strcmp) != -1)
	assert.True(t, util.IndexInSlice(inst1.state.ToArray(), "6", Strcmp) != -1)
	assert.True(t, util.IndexInSlice(inst1.state.ToArray(), "7", Strcmp) != -1)
	assert.True(t, util.IndexInSlice(inst1.state.ToArray(), "8", Strcmp) != -1)

}
Esempio n. 5
0
func TestByzantineResponder(t *testing.T) {
	t.Parallel()
	// Scenario: inst1 sends hello to inst2 but inst3 is byzantine so it attempts to send a digest and a response to inst1.
	// expected outcome is for inst1 not to process updates from inst3.
	peers := make(map[string]*pullTestInstance)
	inst1 := newPushPullTestInstance("p1", peers)
	inst2 := newPushPullTestInstance("p2", peers)
	inst3 := newPushPullTestInstance("p3", peers)
	defer inst1.stop()
	defer inst2.stop()
	defer inst3.stop()

	receivedDigestFromInst3 := int32(0)

	inst2.Add("1", "2", "3")
	inst3.Add("1", "6", "7")

	inst2.hook(func(m interface{}) {
		if _, isHello := m.(*helloMsg); isHello {
			inst3.SendDigest([]string{"5", "6", "7"}, 0, "p1")
		}
	})

	inst1.hook(func(m interface{}) {
		if dig, isDig := m.(*digestMsg); isDig {
			if dig.source == "p3" {
				atomic.StoreInt32(&receivedDigestFromInst3, int32(1))
				time.AfterFunc(time.Duration(150)*time.Millisecond, func() {
					inst3.SendRes([]string{"5", "6", "7"}, "p1", 0)
				})
			}
		}

		if res, isRes := m.(*resMsg); isRes {
			// the response is from p3
			if util.IndexInSlice(res.items, "6", Strcmp) != -1 {
				// inst1 is currently accepting responses
				assert.Equal(t, int32(1), atomic.LoadInt32(&(inst1.acceptingResponses)), "inst1 is not accepting digests")
			}
		}
	})

	inst1.setNextPeerSelection([]string{"p2"})

	time.Sleep(time.Duration(1000) * time.Millisecond)

	assert.Equal(t, int32(1), atomic.LoadInt32(&receivedDigestFromInst3), "inst1 hasn't received a digest from inst3")

	assert.True(t, util.IndexInSlice(inst1.state.ToArray(), "1", Strcmp) != -1)
	assert.True(t, util.IndexInSlice(inst1.state.ToArray(), "2", Strcmp) != -1)
	assert.True(t, util.IndexInSlice(inst1.state.ToArray(), "3", Strcmp) != -1)

	assert.True(t, util.IndexInSlice(inst1.state.ToArray(), "5", Strcmp) == -1)
	assert.True(t, util.IndexInSlice(inst1.state.ToArray(), "6", Strcmp) == -1)
	assert.True(t, util.IndexInSlice(inst1.state.ToArray(), "7", Strcmp) == -1)

}
Esempio n. 6
0
func (d *gossipDiscoveryImpl) learnExistingMembers(aliveArr []*proto.AliveMessage) {
	d.logger.Infof("Entering: learnedMembers={%v}", aliveArr)
	defer d.logger.Debug("Exiting")

	d.lock.Lock()
	defer d.lock.Unlock()

	for _, am := range aliveArr {
		d.logger.Debug("updating", am)
		// update member's data
		member := d.id2Member[string(am.Membership.PkiID)]
		member.Endpoint = am.Membership.Endpoint
		member.Metadata = am.Membership.Metadata

		if _, isKnownAsDead := d.deadLastTS[string(am.Membership.PkiID)]; isKnownAsDead {
			d.logger.Warning(am.Membership.Endpoint, "has already expired")
			continue
		}

		if _, isKnownAsAlive := d.aliveLastTS[string(am.Membership.PkiID)]; !isKnownAsAlive {
			d.logger.Warning(am.Membership.Endpoint, "has already expired")
			continue
		} else {
			d.logger.Debug("Updating aliveness data:", am)
			// update existing aliveness data
			alive := d.aliveLastTS[string(am.Membership.PkiID)]
			alive.incTime = tsToTime(am.Timestamp.IncNumber)
			alive.lastSeen = time.Now()
			alive.seqNum = am.Timestamp.SeqNum

			i := util.IndexInSlice(d.cachedMembership.Alive, am, samePKIidAliveMessage)
			if i == -1 {
				d.logger.Debug("Appended", am, "to d.cachedMembership.Alive")
				d.cachedMembership.Alive = append(d.cachedMembership.Alive, am)
			} else {
				d.logger.Debug("Replaced", am, "in d.cachedMembership.Alive")
				d.cachedMembership.Alive[i] = am
			}
		}
	}
}
Esempio n. 7
0
func (d *gossipDiscoveryImpl) expireDeadMembers(dead []common.PKIidType) {
	d.logger.Warning("Entering", dead)
	defer d.logger.Warning("Exiting")

	var deadMembers2Expire []*NetworkMember

	d.lock.Lock()

	for _, pkiID := range dead {
		if _, isAlive := d.aliveLastTS[string(pkiID)]; !isAlive {
			continue
		}
		deadMembers2Expire = append(deadMembers2Expire, d.id2Member[string(pkiID)])
		// move lastTS from alive to dead
		lastTS, hasLastTS := d.aliveLastTS[string(pkiID)]
		if hasLastTS {
			d.deadLastTS[string(pkiID)] = lastTS
			delete(d.aliveLastTS, string(pkiID))
		}

		aliveMsgWithPKIid := &proto.AliveMessage{
			Membership: &proto.Member{PkiID: pkiID},
		}
		aliveMemberIndex := util.IndexInSlice(d.cachedMembership.Alive, aliveMsgWithPKIid, samePKIidAliveMessage)
		if aliveMemberIndex != -1 {
			// Move the alive member to the dead members
			d.cachedMembership.Dead = append(d.cachedMembership.Dead, d.cachedMembership.Alive[aliveMemberIndex])
			// Delete the alive member from the cached membership
			d.cachedMembership.Alive = append(d.cachedMembership.Alive[:aliveMemberIndex], d.cachedMembership.Alive[aliveMemberIndex+1:]...)
		}
	}

	d.lock.Unlock()

	for _, member2Expire := range deadMembers2Expire {
		d.logger.Warning("Closing connection to", member2Expire.Endpoint)
		d.comm.CloseConn(member2Expire)
	}
}
Esempio n. 8
0
func TestPullEngineSelectiveUpdates(t *testing.T) {
	t.Parallel()
	// Scenario: inst1 has {1, 3} and inst2 has {0,1,2,3}.
	// inst1 initiates to inst2
	// Expected outcome: inst1 asks for 0,2 and inst2 sends 0,2 only
	peers := make(map[string]*pullTestInstance)
	inst1 := newPushPullTestInstance("p1", peers)
	inst2 := newPushPullTestInstance("p2", peers)
	defer inst1.stop()
	defer inst2.stop()

	inst1.Add("1", "3")
	inst2.Add("0", "1", "2", "3")

	// Ensure inst2 sent a proper digest to inst1
	inst1.hook(func(m interface{}) {
		if dig, isDig := m.(*digestMsg); isDig {
			assert.True(t, util.IndexInSlice(dig.digest, "0", Strcmp) != -1)
			assert.True(t, util.IndexInSlice(dig.digest, "1", Strcmp) != -1)
			assert.True(t, util.IndexInSlice(dig.digest, "2", Strcmp) != -1)
			assert.True(t, util.IndexInSlice(dig.digest, "3", Strcmp) != -1)
		}
	})

	// Ensure inst1 requested only needed updates from inst2
	inst2.hook(func(m interface{}) {
		if req, isReq := m.(*reqMsg); isReq {
			assert.True(t, util.IndexInSlice(req.items, "1", Strcmp) == -1)
			assert.True(t, util.IndexInSlice(req.items, "3", Strcmp) == -1)

			assert.True(t, util.IndexInSlice(req.items, "0", Strcmp) != -1)
			assert.True(t, util.IndexInSlice(req.items, "2", Strcmp) != -1)
		}
	})

	// Ensure inst1 received only needed updates from inst2
	inst1.hook(func(m interface{}) {
		if res, isRes := m.(*resMsg); isRes {
			assert.True(t, util.IndexInSlice(res.items, "1", Strcmp) == -1)
			assert.True(t, util.IndexInSlice(res.items, "3", Strcmp) == -1)

			assert.True(t, util.IndexInSlice(res.items, "0", Strcmp) != -1)
			assert.True(t, util.IndexInSlice(res.items, "2", Strcmp) != -1)
		}
	})

	inst1.setNextPeerSelection([]string{"p2"})

	time.Sleep(time.Duration(2000) * time.Millisecond)
	assert.Equal(t, len(inst2.state.ToArray()), len(inst1.state.ToArray()))
}