示例#1
0
func newTestServerWithDefaults() (*Server, *MockRingBuilderThings) {
	b := ring.NewBuilder(64)
	b.SetReplicaCount(3)
	b.AddNode(true, 1, []string{"server1", "zone1"}, []string{"1.2.3.4:56789"}, "server1|meta one", []byte("Conf Thing1"))
	b.AddNode(true, 1, []string{"dummy1", "zone42"}, []string{"1.42.42.42:56789"}, "dummy1|meta one", []byte("Dummy Conf"))
	ring := b.Ring()

	rbytes := []byte("imnotaring")
	bbytes := []byte("imnotbuilder")

	mock := &MockRingBuilderThings{
		builderPath:  "/tmp/test.builder",
		ringPath:     "/tmp/test.ring",
		ring:         ring,
		ringbytes:    &rbytes,
		builder:      b,
		builderbytes: &bbytes,
		managedNodes: make(map[uint64]ManagedNode, 0),
		slaves:       make([]*RingSlave, 0),
		changeChan:   make(chan *changeMsg, 1),
	}
	s := newTestServer(&Config{}, "test", mock)
	_, netblock, _ := net.ParseCIDR("10.0.0.0/24")
	s.netlimits = append(s.netlimits, netblock)
	_, netblock, _ = net.ParseCIDR("1.2.3.0/24")
	s.netlimits = append(s.netlimits, netblock)
	return s, mock
}
示例#2
0
func getTestRing() (*ring.Builder, ring.Ring) {
	b := ring.NewBuilder(64)
	b.SetReplicaCount(3)
	b.AddNode(true, 1, []string{"server1", "zone1"}, []string{"1.2.3.4:56789"}, "Meta One", []byte("Conf"))
	b.AddNode(true, 1, []string{"server2", "zone1"}, []string{"1.2.3.5:56789", "1.2.3.5:9876"}, "Meta Four", []byte("Conf"))
	b.AddNode(false, 0, []string{"server3", "zone1"}, []string{"1.2.3.6:56789"}, "Meta Three", []byte("Conf"))
	return b, b.Ring()
}
func TestGroupBulkSetAckMsgIncoming(t *testing.T) {
	b := ring.NewBuilder(64)
	n, err := b.AddNode(true, 1, nil, nil, "", nil)
	if err != nil {
		t.Fatal(err)
	}
	r := b.Ring()
	r.SetLocalNode(n.ID() + 1) // so we're not responsible for anything
	m := &msgRingPlaceholder{ring: r}
	cfg := lowMemGroupStoreConfig()
	cfg.MsgRing = m
	cfg.InBulkSetAckWorkers = 1
	cfg.InBulkSetAckMsgs = 1
	store, _, err := NewGroupStore(cfg)
	if err != nil {
		t.Fatal("")
	}
	store.EnableAll()
	defer store.DisableAll()
	ts, err := store.write(1, 2, 3, 4, 0x500, []byte("testing"), true)
	if err != nil {
		t.Fatal(err)
	}
	if ts != 0 {
		t.Fatal(ts)
	}
	// just double check the item is there
	ts2, v, err := store.read(1, 2, 3, 4, nil)
	if err != nil {
		t.Fatal(err)
	}
	if ts2 != 0x500 {
		t.Fatal(ts2)
	}
	if string(v) != "testing" {
		t.Fatal(string(v))
	}
	bsam := <-store.bulkSetAckState.inFreeMsgChan
	bsam.body = bsam.body[:0]
	if !bsam.add(1, 2, 3, 4, 0x500) {
		t.Fatal("")
	}
	store.bulkSetAckState.inMsgChan <- bsam
	// only one of these, so if we get it back we know the previous data was
	// processed
	<-store.bulkSetAckState.inFreeMsgChan
	// Make sure the item is gone
	ts2, v, err = store.read(1, 2, 3, 4, nil)
	if err != ErrNotFound {
		t.Fatal(err)
	}
	if ts2 != 0x500|_TSB_LOCAL_REMOVAL {
		t.Fatal(ts2)
	}
	if string(v) != "" {
		t.Fatal(string(v))
	}
}
示例#4
0
func TestBulkSetAckMsgIncoming(t *testing.T) {
	b := ring.NewBuilder(64)
	n, err := b.AddNode(true, 1, nil, nil, "", nil)
	if err != nil {
		t.Fatal(err)
	}
	r := b.Ring()
	r.SetLocalNode(n.ID() + 1) // so we're not responsible for anything
	m := &msgRingPlaceholder{ring: r}
	vs := New(&Config{
		MsgRing:             m,
		InBulkSetAckWorkers: 1,
		InBulkSetAckMsgs:    1,
	})
	vs.EnableAll()
	defer vs.DisableAll()
	ts, err := vs.write(1, 2, 0x300, []byte("testing"))
	if err != nil {
		t.Fatal(err)
	}
	if ts != 0 {
		t.Fatal(ts)
	}
	// just double check the item is there
	ts2, v, err := vs.read(1, 2, nil)
	if err != nil {
		t.Fatal(err)
	}
	if ts2 != 0x300 {
		t.Fatal(ts2)
	}
	if string(v) != "testing" {
		t.Fatal(string(v))
	}
	bsam := <-vs.bulkSetAckState.inFreeMsgChan
	bsam.body = bsam.body[:0]
	if !bsam.add(1, 2, 0x300) {
		t.Fatal("")
	}
	vs.bulkSetAckState.inMsgChan <- bsam
	// only one of these, so if we get it back we know the previous data was
	// processed
	<-vs.bulkSetAckState.inFreeMsgChan
	// Make sure the item is gone
	ts2, v, err = vs.read(1, 2, nil)
	if err != ErrNotFound {
		t.Fatal(err)
	}
	if ts2 != 0x300|_TSB_LOCAL_REMOVAL {
		t.Fatal(ts2)
	}
	if string(v) != "" {
		t.Fatal(string(v))
	}
}
示例#5
0
func TestBulkSetMsgOutDefaultsToFromLocalNode(t *testing.T) {
	b := ring.NewBuilder()
	n := b.AddNode(true, 1, nil, nil, "", nil)
	r := b.Ring()
	r.SetLocalNode(n.ID())
	vs := New(&Config{MsgRing: &msgRingPlaceholder{ring: r}})
	bsm := vs.newOutBulkSetMsg()
	if binary.BigEndian.Uint64(bsm.header) != n.ID() {
		t.Fatal(bsm)
	}
}
func TestGroupBulkSetMsgWithAck(t *testing.T) {
	b := ring.NewBuilder(64)
	n, err := b.AddNode(true, 1, nil, nil, "", nil)
	if err != nil {
		t.Fatal(err)
	}
	r := b.Ring()
	r.SetLocalNode(n.ID())
	m := &msgRingPlaceholder{ring: r}
	cfg := lowMemGroupStoreConfig()
	cfg.MsgRing = m
	cfg.InBulkSetWorkers = 1
	cfg.InBulkSetMsgs = 1
	store, _, err := NewGroupStore(cfg)
	if err != nil {
		t.Fatal("")
	}
	store.EnableAll()
	defer store.DisableAll()
	bsm := <-store.bulkSetState.inFreeMsgChan
	binary.BigEndian.PutUint64(bsm.header, 123)
	bsm.body = bsm.body[:0]
	if !bsm.add(1, 2, 3, 4, 0x500, []byte("testing")) {
		t.Fatal("")
	}
	store.bulkSetState.inMsgChan <- bsm
	// only one of these, so if we get it back we know the previous data was
	// processed
	<-store.bulkSetState.inFreeMsgChan
	ts, v, err := store.Read(1, 2, 3, 4, nil)
	if err != nil {
		t.Fatal(err)
	}
	if ts != 5 { // the bottom 8 bits are discarded for the public Read
		t.Fatal(ts)
	}
	if string(v) != "testing" {
		t.Fatal(string(v))
	}
	m.lock.Lock()
	v2 := len(m.msgToNodeIDs)
	m.lock.Unlock()
	if v2 != 1 {
		t.Fatal(v2)
	}
	m.lock.Lock()
	v3 := m.msgToNodeIDs[0]
	m.lock.Unlock()
	if v3 != 123 {
		t.Fatal(v3)
	}
}
示例#7
0
func TestServer_ParseConfig(t *testing.T) {
	b := ring.NewBuilder(64)
	b.SetReplicaCount(3)
	b.AddNode(true, 1, []string{"server1", "zone1"}, []string{"1.2.3.4:56789"}, "server1|meta one", []byte("Conf Thing1"))
	b.AddNode(true, 1, []string{"dummy1", "zone42"}, []string{"1.42.42.42:56789"}, "dummy1|meta one", []byte("Dummy Conf"))
	ring := b.Ring()

	rbytes := []byte("imnotaring")
	bbytes := []byte("imnotbuilder")
	mock := &MockRingBuilderThings{
		builderPath:  "/tmp/test.builder",
		ringPath:     "/tmp/test.ring",
		ring:         ring,
		ringbytes:    &rbytes,
		builder:      b,
		builderbytes: &bbytes,
		managedNodes: make(map[uint64]ManagedNode, 0),
		slaves:       make([]*RingSlave, 0),
		changeChan:   make(chan *changeMsg, 1),
	}
	s := newTestServer(&Config{}, "test", mock)
	s.parseConfig()

	if s.cfg.NetFilter == nil {
		t.Errorf("Failed to set default NetFilter")
	}
	if s.cfg.TierFilter == nil {
		t.Errorf("Failed to set default TierFilter")
	}
	if s.cfg.Port != DefaultPort {
		t.Errorf("Failed to set default Port: %#v", s.cfg.Port)
	}
	if s.cfg.MsgRingPort != DefaultMsgRingPort {
		t.Errorf("Failed to set default MsgRingPort: %#v", s.cfg.MsgRingPort)
	}
	if s.cfg.CmdCtrlPort != DefaultCmdCtrlPort {
		t.Errorf("Failed to set default CmdCtrlPort: %#v", s.cfg.CmdCtrlPort)
	}
	if s.cfg.RingDir != filepath.Join(DefaultRingDir, s.servicename) {
		t.Errorf("Failed to set default RingDir: %#v", s.cfg.RingDir)
	}
	if s.cfg.CertFile != DefaultCertFile {
		t.Errorf("Failed to set default CertFile: %#v", s.cfg.CertFile)
	}
	if s.cfg.KeyFile != DefaultCertKey {
		t.Errorf("Failed to set default KeyFile: %#v", s.cfg.KeyFile)
	}
}
func TestGroupPullReplicationSimple(t *testing.T) {
	b := ring.NewBuilder(64)
	b.SetReplicaCount(2)
	n, err := b.AddNode(true, 1, nil, nil, "", nil)
	if err != nil {
		t.Fatal(err)
	}
	_, err = b.AddNode(true, 1, nil, nil, "", nil)
	if err != nil {
		t.Fatal(err)
	}
	r := b.Ring()
	r.SetLocalNode(n.ID())
	m := &msgRingGroupPullReplicationTester{ring: r}
	cfg := lowMemGroupStoreConfig()
	cfg.MsgRing = m
	store, _, err := NewGroupStore(cfg)
	if err != nil {
		t.Fatal("")
	}
	store.EnableAll()
	defer store.DisableAll()
	_, err = store.write(1, 2, 3, 4, 0x500, []byte("testing"), false)
	if err != nil {
		t.Fatal(err)
	}
	store.OutPullReplicationPass()
	m.lock.Lock()
	v := len(m.headerToPartitions)
	m.lock.Unlock()
	if v == 0 {
		t.Fatal(v)
	}
	mayHave := false
	m.lock.Lock()
	for i := 0; i < len(m.headerToPartitions); i++ {
		prm := &groupPullReplicationMsg{store: store, header: m.headerToPartitions[i], body: m.bodyToPartitions[i]}
		bf := prm.ktBloomFilter()
		if bf.mayHave(1, 2, 3, 4, 0x500) {
			mayHave = true
		}
	}
	m.lock.Unlock()
	if !mayHave {
		t.Fatal("")
	}
}
示例#9
0
func TestBulkSetMsgWithAck(t *testing.T) {
	b := ring.NewBuilder()
	n := b.AddNode(true, 1, nil, nil, "", nil)
	r := b.Ring()
	r.SetLocalNode(n.ID())
	m := &msgRingPlaceholder{ring: r}
	vs := New(&Config{
		MsgRing:          m,
		InBulkSetWorkers: 1,
		InBulkSetMsgs:    1,
	})
	vs.EnableAll()
	defer vs.DisableAll()
	bsm := <-vs.bulkSetState.inFreeMsgChan
	binary.BigEndian.PutUint64(bsm.header, 123)
	bsm.body = bsm.body[:0]
	if !bsm.add(1, 2, 0x300, []byte("testing")) {
		t.Fatal("")
	}
	vs.bulkSetState.inMsgChan <- bsm
	// only one of these, so if we get it back we know the previous data was
	// processed
	<-vs.bulkSetState.inFreeMsgChan
	ts, v, err := vs.Read(1, 2, nil)
	if err != nil {
		t.Fatal(err)
	}
	if ts != 3 { // the bottom 8 bits are discarded for the public Read
		t.Fatal(ts)
	}
	if string(v) != "testing" {
		t.Fatal(string(v))
	}
	m.lock.Lock()
	v2 := len(m.msgToNodeIDs)
	m.lock.Unlock()
	if v2 != 1 {
		t.Fatal(v2)
	}
	m.lock.Lock()
	v3 := m.msgToNodeIDs[0]
	m.lock.Unlock()
	if v3 != 123 {
		t.Fatal(v3)
	}
}
示例#10
0
func TestGroupBulkSetMsgOutDefaultsToFromLocalNode(t *testing.T) {
	b := ring.NewBuilder(64)
	n, err := b.AddNode(true, 1, nil, nil, "", nil)
	if err != nil {
		t.Fatal(err)
	}
	r := b.Ring()
	r.SetLocalNode(n.ID())
	cfg := lowMemGroupStoreConfig()
	cfg.MsgRing = &msgRingPlaceholder{ring: r}
	store, _, err := NewGroupStore(cfg)
	if err != nil {
		t.Fatal("")
	}
	bsm := store.newOutBulkSetMsg()
	if binary.BigEndian.Uint64(bsm.header) != n.ID() {
		t.Fatal(bsm)
	}
}
func TestPullReplicationSimple(t *testing.T) {
	b := ring.NewBuilder(64)
	b.SetReplicaCount(2)
	n, err := b.AddNode(true, 1, nil, nil, "", nil)
	if err != nil {
		t.Fatal(err)
	}
	_, err = b.AddNode(true, 1, nil, nil, "", nil)
	if err != nil {
		t.Fatal(err)
	}
	r := b.Ring()
	r.SetLocalNode(n.ID())
	m := &msgRingPullReplicationTester{ring: r}
	vs := New(&Config{MsgRing: m})
	vs.EnableAll()
	defer vs.DisableAll()
	_, err = vs.write(1, 2, 0x300, []byte("testing"))
	if err != nil {
		t.Fatal(err)
	}
	vs.OutPullReplicationPass()
	m.lock.Lock()
	v := len(m.headerToPartitions)
	m.lock.Unlock()
	if v == 0 {
		t.Fatal(v)
	}
	mayHave := false
	m.lock.Lock()
	for i := 0; i < len(m.headerToPartitions); i++ {
		prm := &pullReplicationMsg{vs: vs, header: m.headerToPartitions[i], body: m.bodyToPartitions[i]}
		bf := prm.ktBloomFilter()
		if bf.mayHave(1, 2, 0x300) {
			mayHave = true
		}
	}
	m.lock.Unlock()
	if !mayHave {
		t.Fatal("")
	}
}
示例#12
0
文件: main.go 项目: wreese/valuestore
func createCmd(filename string, args []string) error {
	replicaCount := 3
	pointsAllowed := 1
	maxPartitionBitCount := 23
	moveWait := 60
	var err error
	for _, arg := range args {
		switch arg {
		case "replicas":
			if replicaCount, err = strconv.Atoi(arg); err != nil {
				return err
			}
			if replicaCount < 1 {
				replicaCount = 1
			}
		case "points-allowed":
			if pointsAllowed, err = strconv.Atoi(arg); err != nil {
				return err
			}
			if pointsAllowed < 0 {
				pointsAllowed = 0
			} else if pointsAllowed > 255 {
				pointsAllowed = 255
			}
		case "max-partition-bits":
			if maxPartitionBitCount, err = strconv.Atoi(arg); err != nil {
				return err
			}
			if maxPartitionBitCount < 1 {
				maxPartitionBitCount = 1
			} else if maxPartitionBitCount > 64 {
				maxPartitionBitCount = 64
			}
		case "move-wait":
			if moveWait, err = strconv.Atoi(arg); err != nil {
				return err
			}
			if moveWait < 0 {
				moveWait = 0
			} else if moveWait > math.MaxUint16 {
				moveWait = math.MaxUint16
			}
		}
	}
	if _, err = os.Stat(filename); err == nil {
		return fmt.Errorf("file already exists")
	}
	if !os.IsNotExist(err) {
		return err
	}
	var f *os.File
	if f, err = os.Create(filename); err != nil {
		return err
	}
	b := ring.NewBuilder()
	b.SetReplicaCount(replicaCount)
	b.SetPointsAllowed(byte(pointsAllowed))
	b.SetMaxPartitionBitCount(uint16(maxPartitionBitCount))
	b.SetMoveWait(uint16(moveWait))
	if err = b.Persist(f); err != nil {
		return err
	}
	if err = f.Close(); err != nil {
		return err
	}
	return nil
}