Esempio n. 1
0
// Leases returns map binary.BigEndian.Uint32(IP) and Lease of all assigned leases
func (p *LeasePool) Leases() (map[string]Lease, error) {
	p.dataLock.Lock()
	defer p.dataLock.Unlock()
	leases := make(map[string]Lease, 10)

	ctxGet, cancelGet := context.WithTimeout(context.Background(), 2*time.Second)
	defer cancelGet()
	response, err := p.dataSource.Get(ctxGet, path.Join(p.etcdDir, "/leases"), &etcd.GetOptions{Recursive: true})

	if err != nil {
		etcdError, found := err.(etcd.Error)
		if found && etcdError.Code == etcd.ErrorCodeKeyNotFound {
			// handle key not found
			ctxSet, cancelSet := context.WithTimeout(context.Background(), 2*time.Second)
			defer cancelSet()
			_, err := p.dataSource.Set(ctxSet, path.Join(p.etcdDir, "/leases"), "", &etcd.SetOptions{Dir: true})
			if err != nil {
				return nil, err
			}
			return leases, nil
		}
		return nil, err
	}
	for i := range response.Node.Nodes {
		var lease Lease
		err := json.Unmarshal([]byte(response.Node.Nodes[i].Value), &lease)
		if err == nil {
			leases[lease.IP.String()] = lease
		} else {
			return nil, ErrFoundInvalidLease
		}
	}
	return leases, nil
}
Esempio n. 2
0
File: etcc.go Progetto: jonahwu/lab
func main() {
	cfg := client.Config{
		Endpoints: []string{"http://172.16.235.128:4001"},
		Transport: client.DefaultTransport,
		// set timeout per request to fail fast when the target endpoint is unavailable
		HeaderTimeoutPerRequest: time.Second,
	}
	c, err := client.New(cfg)
	if err != nil {
		log.Fatal(err)
	}
	kapi := client.NewKeysAPI(c)
	// set "/woo" key with "bar" value
	log.Print("Setting '/woo' key with 'bar' value")
	resp, err := kapi.Set(context.Background(), "/woo", "bar", nil)
	if err != nil {
		log.Fatal(err)
	} else {
		// print common key info
		log.Printf("Set is done. Metadata is %q\n", resp)
	}
	// get "/woo" key's value
	log.Print("Getting '/woo' key value")
	resp, err = kapi.Get(context.Background(), "/woo", nil)
	if err != nil {
		log.Fatal(err)
	} else {
		// print common key info
		log.Printf("Get is done. Metadata is %q\n", resp)
		// print value
		log.Printf("%q key has %q value\n", resp.Node.Key, resp.Node.Value)
	}
}
Esempio n. 3
0
File: etcd.go Progetto: lsc/mgmt
// helper function to store our data in etcd
func (etcdO *EtcdWObject) EtcdPut(hostname, key, res string, obj interface{}) bool {
	kapi := etcdO.GetKAPI()
	output, ok := ObjToB64(obj)
	if !ok {
		log.Printf("Etcd: Could not encode %v key.", key)
		return false
	}

	path := fmt.Sprintf("/exported/%s/resources/%s/res", hostname, key)
	_, err := kapi.Set(etcd_context.Background(), path, res, nil)
	// XXX validate...

	path = fmt.Sprintf("/exported/%s/resources/%s/value", hostname, key)
	resp, err := kapi.Set(etcd_context.Background(), path, output, nil)
	if err != nil {
		if cerr, ok := err.(*etcd.ClusterError); ok {
			// not running or disconnected
			for _, e := range cerr.Errors {
				if strings.HasSuffix(e.Error(), "getsockopt: connection refused") {
				}
				//if e == etcd.ErrClusterUnavailable
			}
		}
		log.Printf("Etcd: Could not store %v key.", key)
		return false
	}
	log.Print("Etcd: ", resp) // w00t... bonus
	return true
}
Esempio n. 4
0
func TestMemberUpdate(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	capi := clientv3.NewCluster(clus.RandClient())
	resp, err := capi.MemberList(context.Background())
	if err != nil {
		t.Fatalf("failed to list member %v", err)
	}

	urls := []string{"http://127.0.0.1:1234"}
	_, err = capi.MemberUpdate(context.Background(), resp.Members[0].ID, urls)
	if err != nil {
		t.Fatalf("failed to update member %v", err)
	}

	resp, err = capi.MemberList(context.Background())
	if err != nil {
		t.Fatalf("failed to list member %v", err)
	}

	if !reflect.DeepEqual(resp.Members[0].PeerURLs, urls) {
		t.Errorf("urls = %v, want %v", urls, resp.Members[0].PeerURLs)
	}
}
Esempio n. 5
0
func TestMemberRemove(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	capi := clientv3.NewCluster(clus.Client(1))
	resp, err := capi.MemberList(context.Background())
	if err != nil {
		t.Fatalf("failed to list member %v", err)
	}

	_, err = capi.MemberRemove(context.Background(), resp.Members[0].ID)
	if err != nil {
		t.Fatalf("failed to remove member %v", err)
	}

	resp, err = capi.MemberList(context.Background())
	if err != nil {
		t.Fatalf("failed to list member %v", err)
	}

	if len(resp.Members) != 2 {
		t.Errorf("number of members = %d, want %d", len(resp.Members), 2)
	}
}
Esempio n. 6
0
func main() {
	cfg := client.Config{
		Endpoints: []string{"http://127.0.0.1:2379"},
		Transport: client.DefaultTransport,
		// set timeout per request to fail fast when the target endpoint is unavailable
		HeaderTimeoutPerRequest: time.Second,
	}
	c, err := client.New(cfg)
	if err != nil {
		log.Fatal(err)
	}
	kapi := client.NewKeysAPI(c)
	resp, err := kapi.Get(context.Background(), "/foo", nil)
	if err != nil {
		//		fmt.Println("oops:", err)
		msg := fmt.Sprintf("%s", err)
		if strings.HasPrefix(msg, "100:") {
			fmt.Println("oops:", msg)
		}
		_, err2 := kapi.Set(context.Background(), "foo", "bar", nil)
		if err != nil {
			log.Fatal(err2)
		}
	} else {
		fmt.Println(resp.Node.Value)
	}
}
Esempio n. 7
0
// TestV3LeaseExists creates a lease on a random client, then sends a keepalive on another
// client to confirm it's visible to the whole cluster.
func TestV3LeaseExists(t *testing.T) {
	defer testutil.AfterTest(t)
	clus := NewClusterV3(t, &ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	// create lease
	ctx0, cancel0 := context.WithCancel(context.Background())
	defer cancel0()
	lresp, err := clus.RandClient().Lease.LeaseCreate(
		ctx0,
		&pb.LeaseCreateRequest{TTL: 30})
	if err != nil {
		t.Fatal(err)
	}
	if lresp.Error != "" {
		t.Fatal(lresp.Error)
	}

	// confirm keepalive
	ctx1, cancel1 := context.WithCancel(context.Background())
	defer cancel1()
	lac, err := clus.RandClient().Lease.LeaseKeepAlive(ctx1)
	if err != nil {
		t.Fatal(err)
	}
	defer lac.CloseSend()
	if err = lac.Send(&pb.LeaseKeepAliveRequest{ID: lresp.ID}); err != nil {
		t.Fatal(err)
	}
	if _, err = lac.Recv(); err != nil {
		t.Fatal(err)
	}
}
Esempio n. 8
0
func Conn() {
	cfg := client.Config{
		Endpoints:               []string{"http://127.0.0.1:2379/", "http://127.0.0.1:4001"},
		Transport:               client.DefaultTransport,
		HeaderTimeoutPerRequest: time.Second,
	}

	c, err := client.New(cfg)

	if err != nil {
		log.Fatal(err)
	}

	kapi := client.NewKeysAPI(c)

	log.Print("Setting '/foo' key with 'bar' value")
	resp, err := kapi.Set(context.Background(), "/foo", "bar", nil)
	if err != nil {
		log.Fatal(err)
	} else {
		log.Printf("Set is done.Metadata is %q\n", resp)
	}

	log.Print("Getting '/foo' key value")
	resp, err = kapi.Get(context.Background(), "/foo", nil)
	if err != nil {
		log.Fatal(err)
	} else {
		log.Printf("Get is done,Metadata is %q\n", resp)
		log.Printf("%q key has %q value\n", resp.Node.Key, resp.Node.Value)
	}

}
Esempio n. 9
0
func TestLeaseRevoke(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	lapi := clientv3.NewLease(clus.RandClient())
	defer lapi.Close()

	kv := clientv3.NewKV(clus.RandClient())

	resp, err := lapi.Create(context.Background(), 10)
	if err != nil {
		t.Errorf("failed to create lease %v", err)
	}

	_, err = lapi.Revoke(context.Background(), lease.LeaseID(resp.ID))
	if err != nil {
		t.Errorf("failed to revoke lease %v", err)
	}

	_, err = kv.Put(context.TODO(), "foo", "bar", clientv3.WithLease(lease.LeaseID(resp.ID)))
	if err != v3rpc.ErrLeaseNotFound {
		t.Fatalf("err = %v, want %v", err, v3rpc.ErrLeaseNotFound)
	}
}
Esempio n. 10
0
func TestLeaseKeepAlive(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	lapi := clientv3.NewLease(clus.RandClient())

	resp, err := lapi.Create(context.Background(), 10)
	if err != nil {
		t.Errorf("failed to create lease %v", err)
	}

	rc, kerr := lapi.KeepAlive(context.Background(), lease.LeaseID(resp.ID))
	if kerr != nil {
		t.Errorf("failed to keepalive lease %v", kerr)
	}

	kresp, ok := <-rc
	if !ok {
		t.Errorf("chan is closed, want not closed")
	}

	if kresp.ID != resp.ID {
		t.Errorf("ID = %x, want %x", kresp.ID, resp.ID)
	}

	lapi.Close()

	_, ok = <-rc
	if ok {
		t.Errorf("chan is not closed, want lease Close() closes chan")
	}
}
Esempio n. 11
0
func ExampleKV_compact() {
	cli, err := clientv3.New(clientv3.Config{
		Endpoints:   endpoints,
		DialTimeout: dialTimeout,
	})
	if err != nil {
		log.Fatal(err)
	}
	defer cli.Close()

	kvc := clientv3.NewKV(cli)

	ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
	resp, err := kvc.Get(ctx, "foo")
	cancel()
	if err != nil {
		log.Fatal(err)
	}
	compRev := resp.Header.Revision // specify compact revision of your choice

	ctx, cancel = context.WithTimeout(context.Background(), requestTimeout)
	err = kvc.Compact(ctx, compRev)
	cancel()
	if err != nil {
		log.Fatal(err)
	}
}
Esempio n. 12
0
func TestEmptyAddrs(t *testing.T) {
	servers, r := startServers(t, 1, 0, math.MaxUint32)
	cc, err := Dial("foo.bar.com", WithPicker(NewUnicastNamingPicker(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{}))
	if err != nil {
		t.Fatalf("Failed to create ClientConn: %v", err)
	}
	var reply string
	if err := Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply, cc); err != nil || reply != expectedResponse {
		t.Fatalf("grpc.Invoke(_, _, _, _, _) = %v, want <nil>", err)
	}
	// Inject name resolution change to remove the server address so that there is no address
	// available after that.
	var updates []*naming.Update
	updates = append(updates, &naming.Update{
		Op:   naming.Delete,
		Addr: "127.0.0.1:" + servers[0].port,
	})
	r.w.inject(updates)
	// Loop until the above updates apply.
	for {
		time.Sleep(10 * time.Millisecond)
		ctx, _ := context.WithTimeout(context.Background(), 10*time.Millisecond)
		if err := Invoke(ctx, "/foo/bar", &expectedRequest, &reply, cc); err != nil {
			break
		}
	}
	cc.Close()
	servers[0].stop()
}
Esempio n. 13
0
func ExampleKV_getSortedPrefix() {
	cli, err := clientv3.New(clientv3.Config{
		Endpoints:   endpoints,
		DialTimeout: dialTimeout,
	})
	if err != nil {
		log.Fatal(err)
	}
	defer cli.Close()

	kvc := clientv3.NewKV(cli)

	for i := range make([]int, 3) {
		ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
		_, err = kvc.Put(ctx, fmt.Sprintf("key_%d", i), "value")
		cancel()
		if err != nil {
			log.Fatal(err)
		}
	}

	ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
	resp, err := kvc.Get(ctx, "key", clientv3.WithPrefix(), clientv3.WithSort(clientv3.SortByKey, clientv3.SortDescend))
	cancel()
	if err != nil {
		log.Fatal(err)
	}
	for _, ev := range resp.Kvs {
		fmt.Printf("%s : %s\n", ev.Key, ev.Value)
	}
	// key_2 : value
	// key_1 : value
	// key_0 : value
}
Esempio n. 14
0
func actionUserPasswd(c *cli.Context) {
	api, user := mustUserAPIAndName(c)
	ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
	currentUser, err := api.GetUser(ctx, user)
	cancel()
	if currentUser == nil {
		fmt.Fprintln(os.Stderr, err.Error())
		os.Exit(1)
	}
	pass, err := speakeasy.Ask("New password: "******"Error reading password:"******"Password updated\n")
}
Esempio n. 15
0
func (d *discovery) checkCluster() ([]*client.Node, int, uint64, error) {
	configKey := path.Join("/", d.cluster, "_config")
	ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
	// find cluster size
	resp, err := d.c.Get(ctx, path.Join(configKey, "size"), nil)
	cancel()
	if err != nil {
		if eerr, ok := err.(*client.Error); ok && eerr.Code == client.ErrorCodeKeyNotFound {
			return nil, 0, 0, ErrSizeNotFound
		}
		if err == client.ErrInvalidJSON {
			return nil, 0, 0, ErrBadDiscoveryEndpoint
		}
		if ce, ok := err.(*client.ClusterError); ok {
			plog.Error(ce.Detail())
			return d.checkClusterRetry()
		}
		return nil, 0, 0, err
	}
	size, err := strconv.Atoi(resp.Node.Value)
	if err != nil {
		return nil, 0, 0, ErrBadSizeKey
	}

	ctx, cancel = context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
	resp, err = d.c.Get(ctx, d.cluster, nil)
	cancel()
	if err != nil {
		if ce, ok := err.(*client.ClusterError); ok {
			plog.Error(ce.Detail())
			return d.checkClusterRetry()
		}
		return nil, 0, 0, err
	}
	nodes := make([]*client.Node, 0)
	// append non-config keys to nodes
	for _, n := range resp.Node.Nodes {
		if !(path.Base(n.Key) == path.Base(configKey)) {
			nodes = append(nodes, n)
		}
	}

	snodes := sortableNodes{nodes}
	sort.Sort(snodes)

	// find self position
	for i := range nodes {
		if path.Base(nodes[i].Key) == path.Base(d.selfKey()) {
			break
		}
		if i >= size-1 {
			return nodes[:size], size, resp.Index, ErrFullCluster
		}
	}
	return nodes, size, resp.Index, nil
}
Esempio n. 16
0
func (e *etcd) SaveRoute(route models.Route) error {
	key := generateHttpRouteKey(route)

	retries := 0

	for retries <= maxRetries {
		response, err := e.keysAPI.Get(context.Background(), key, readOpts())

		// Update
		if response != nil && err == nil {
			var existingRoute models.Route
			err = json.Unmarshal([]byte(response.Node.Value), &existingRoute)
			if err != nil {
				return err
			}

			route.ModificationTag = existingRoute.ModificationTag
			route.ModificationTag.Increment()

			routeJSON, _ := json.Marshal(route)
			_, err = e.keysAPI.Set(context.Background(), key, string(routeJSON), updateOptsWithTTL(route.TTL, response.Node.ModifiedIndex))
			if err == nil {
				break
			}
		} else if cerr, ok := err.(client.Error); ok && cerr.Code == client.ErrorCodeKeyNotFound { //create
			// Delete came in between a read and an update
			if retries > 0 {
				return ErrorConflict
			}

			var tag models.ModificationTag
			tag, err = models.NewModificationTag()
			if err != nil {
				return err
			}
			route.ModificationTag = tag
			routeJSON, _ := json.Marshal(route)
			_, err = e.keysAPI.Set(ctx(), key, string(routeJSON), createOpts(route.TTL))
			if err == nil {
				break
			}
		}

		// only retry on a compare and swap error
		if cerr, ok := err.(client.Error); ok && cerr.Code == client.ErrorCodeTestFailed {
			retries++
		} else {
			return err
		}
	}

	if retries > maxRetries {
		return ErrorConflict
	}
	return nil
}
Esempio n. 17
0
func TestClientWithMisbehavedServer(t *testing.T) {
	server, ct := setUp(t, 0, math.MaxUint32, misbehaved)
	callHdr := &CallHdr{
		Host:   "localhost",
		Method: "foo",
	}
	conn, ok := ct.(*http2Client)
	if !ok {
		t.Fatalf("Failed to convert %v to *http2Client", ct)
	}
	// Test the logic for the violation of stream flow control window size restriction.
	s, err := ct.NewStream(context.Background(), callHdr)
	if err != nil {
		t.Fatalf("Failed to open stream: %v", err)
	}
	if err := ct.Write(s, expectedRequest, &Options{Last: true, Delay: false}); err != nil {
		t.Fatalf("Failed to write: %v", err)
	}
	// Read without window update.
	for {
		p := make([]byte, http2MaxFrameLen)
		if _, err = s.dec.Read(p); err != nil {
			break
		}
	}
	if s.fc.pendingData != initialWindowSize || s.fc.pendingUpdate != 0 || conn.fc.pendingData != initialWindowSize || conn.fc.pendingUpdate != 0 {
		t.Fatalf("Client mistakenly updates inbound flow control params: got %d, %d, %d, %d; want %d, %d, %d, %d", s.fc.pendingData, s.fc.pendingUpdate, conn.fc.pendingData, conn.fc.pendingUpdate, initialWindowSize, 0, initialWindowSize, 0)
	}
	if err != io.EOF || s.statusCode != codes.Internal {
		t.Fatalf("Got err %v and the status code %d, want <EOF> and the code %d", err, s.statusCode, codes.Internal)
	}
	conn.CloseStream(s, err)
	if s.fc.pendingData != 0 || s.fc.pendingUpdate != 0 || conn.fc.pendingData != 0 || conn.fc.pendingUpdate != initialWindowSize {
		t.Fatalf("Client mistakenly resets inbound flow control params: got %d, %d, %d, %d; want 0, 0, 0, %d", s.fc.pendingData, s.fc.pendingUpdate, conn.fc.pendingData, conn.fc.pendingUpdate, initialWindowSize)
	}
	// Test the logic for the violation of the connection flow control window size restriction.
	//
	// Generate enough streams to drain the connection window.
	callHdr = &CallHdr{
		Host:   "localhost",
		Method: "foo.MaxFrame",
	}
	for i := 0; i < int(initialConnWindowSize/initialWindowSize+10); i++ {
		s, err := ct.NewStream(context.Background(), callHdr)
		if err != nil {
			t.Fatalf("Failed to open stream: %v", err)
		}
		if err := ct.Write(s, expectedRequest, &Options{Last: true, Delay: false}); err != nil {
			break
		}
	}
	// http2Client.errChan is closed due to connection flow control window size violation.
	<-conn.Error()
	ct.Close()
	server.stop()
}
Esempio n. 18
0
func TestReporter(t *testing.T) {
	fmt.Printf("Test: etcd reporter...\n")
	filePath := "../example/services/tcp_service.json"
	data, err := ioutil.ReadFile(filePath)
	if err != nil {
		t.Fatalf("SRegister: service file %s doesn't exists. Error: %v.", filePath, err)
	}

	sc := &configuration.ServiceConf{}
	jsonErr := json.Unmarshal(data, sc)

	if jsonErr != nil {
		t.Fatalf("SRegister: parse service file %s failed. Error: %v.", filePath, jsonErr)
	}

	etcdRep := &EtcdReporter{}

	err = etcdRep.NewReporter(sc)

	if err != nil {
		t.Fatalf("reporter: new reporter failed. %v", err)
	}

	if etcdRep.path == "/" {
		t.Fatalf("reporter: new reporter failed")
	}

	etcdRep.ReportUp()

	response, err := etcdRep.etcdClient.Get(context.Background(), etcdRep.key, &client.GetOptions{})

	if err != nil || response.Node == nil || response.Node.Value == "" {
		t.Fatalf("reporter: reporter up failed, get null value")
	}

	if !etcdRep.Ping() {
		t.Fatalf("reporter: the key doesn't exist after reporting up")
	}

	key := etcdRep.key
	etcdRep.ReportDown()

	_, err = etcdRep.etcdClient.Get(context.Background(), key, &client.GetOptions{})

	realErr, ok := err.(client.Error)
	if !ok || realErr.Code != client.ErrorCodeKeyNotFound {
		t.Fatalf("reporter: reporter down failed")
	}

	fmt.Printf("... PASS\n")

}
Esempio n. 19
0
func (d *discovery) createSelf(contents string) error {
	ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
	resp, err := d.c.Create(ctx, d.selfKey(), contents, -1)
	cancel()
	if err != nil {
		return err
	}

	// ensure self appears on the server we connected to
	w := d.c.Watch(d.selfKey(), resp.Node.CreatedIndex)
	_, err = w.Next(context.Background())
	return err
}
Esempio n. 20
0
func runStream(b *testing.B, maxConcurrentCalls int) {
	s := stats.AddStats(b, 38)
	b.StopTimer()
	target, stopper := StartServer("localhost:0")
	defer stopper()
	conn := NewClientConn(target)
	tc := testpb.NewTestServiceClient(conn)

	// Warm up connection.
	stream, err := tc.StreamingCall(context.Background())
	if err != nil {
		b.Fatalf("%v.StreamingCall(_) = _, %v", tc, err)
	}
	for i := 0; i < 10; i++ {
		streamCaller(tc, stream)
	}

	ch := make(chan int, maxConcurrentCalls*4)
	var (
		mu sync.Mutex
		wg sync.WaitGroup
	)
	wg.Add(maxConcurrentCalls)

	// Distribute the b.N calls over maxConcurrentCalls workers.
	for i := 0; i < maxConcurrentCalls; i++ {
		go func() {
			stream, err := tc.StreamingCall(context.Background())
			if err != nil {
				b.Fatalf("%v.StreamingCall(_) = _, %v", tc, err)
			}
			for range ch {
				start := time.Now()
				streamCaller(tc, stream)
				elapse := time.Since(start)
				mu.Lock()
				s.Add(elapse)
				mu.Unlock()
			}
			wg.Done()
		}()
	}
	b.StartTimer()
	for i := 0; i < b.N; i++ {
		ch <- i
	}
	b.StopTimer()
	close(ch)
	wg.Wait()
	conn.Close()
}
Esempio n. 21
0
func (d *discovery) checkCluster() (client.Nodes, int, error) {
	configKey := path.Join("/", d.cluster, "_config")
	ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
	// find cluster size
	resp, err := d.c.Get(ctx, path.Join(configKey, "size"))
	cancel()
	if err != nil {
		if err == client.ErrKeyNoExist {
			return nil, 0, ErrSizeNotFound
		}
		if err == client.ErrTimeout {
			return d.checkClusterRetry()
		}
		return nil, 0, err
	}
	size, err := strconv.Atoi(resp.Node.Value)
	if err != nil {
		return nil, 0, ErrBadSizeKey
	}

	ctx, cancel = context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
	resp, err = d.c.Get(ctx, d.cluster)
	cancel()
	if err != nil {
		if err == client.ErrTimeout {
			return d.checkClusterRetry()
		}
		return nil, 0, err
	}
	nodes := make(client.Nodes, 0)
	// append non-config keys to nodes
	for _, n := range resp.Node.Nodes {
		if !(path.Base(n.Key) == path.Base(configKey)) {
			nodes = append(nodes, n)
		}
	}

	snodes := sortableNodes{nodes}
	sort.Sort(snodes)

	// find self position
	for i := range nodes {
		if path.Base(nodes[i].Key) == path.Base(d.selfKey()) {
			break
		}
		if i >= size-1 {
			return nodes[:size], size, ErrFullCluster
		}
	}
	return nodes, size, nil
}
Esempio n. 22
0
// TODO: add a client that can connect to all the members of cluster via unix sock.
// TODO: test handle more complicated failures.
func TestLeaseKeepAliveHandleFailure(t *testing.T) {
	t.Skip("test it when we have a cluster client")

	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	// TODO: change this line to get a cluster client
	lapi := clientv3.NewLease(clus.RandClient())

	resp, err := lapi.Create(context.Background(), 10)
	if err != nil {
		t.Errorf("failed to create lease %v", err)
	}

	rc, kerr := lapi.KeepAlive(context.Background(), lease.LeaseID(resp.ID))
	if kerr != nil {
		t.Errorf("failed to keepalive lease %v", kerr)
	}

	kresp := <-rc
	if kresp.ID != resp.ID {
		t.Errorf("ID = %x, want %x", kresp.ID, resp.ID)
	}

	// restart the connected member.
	clus.Members[0].Stop(t)

	select {
	case <-rc:
		t.Fatalf("unexpected keepalive")
	case <-time.After(10*time.Second/3 + 1):
	}

	// recover the member.
	clus.Members[0].Restart(t)

	kresp = <-rc
	if kresp.ID != resp.ID {
		t.Errorf("ID = %x, want %x", kresp.ID, resp.ID)
	}

	lapi.Close()

	_, ok := <-rc
	if ok {
		t.Errorf("chan is not closed, want lease Close() closes chan")
	}
}
Esempio n. 23
0
func testWatchCancelRunning(t *testing.T, wctx *watchctx) {
	ctx, cancel := context.WithCancel(context.Background())
	if wctx.ch = wctx.w.Watch(ctx, "a"); wctx.ch == nil {
		t.Fatalf("expected non-nil watcher channel")
	}
	if _, err := wctx.kv.Put(ctx, "a", "a"); err != nil {
		t.Fatal(err)
	}
	cancel()
	select {
	case <-time.After(time.Second):
		t.Fatalf("took too long to cancel")
	case v, ok := <-wctx.ch:
		if !ok {
			// closed before getting put; OK
			break
		}
		// got the PUT; should close next
		select {
		case <-time.After(time.Second):
			t.Fatalf("took too long to close")
		case v, ok = <-wctx.ch:
			if ok {
				t.Fatalf("expected watcher channel to close, got %v", v)
			}
		}
	}
}
Esempio n. 24
0
// txnCommandFunc executes the "txn" command.
func txnCommandFunc(c *cli.Context) {
	if len(c.Args()) != 0 {
		panic("unexpected args")
	}

	reader := bufio.NewReader(os.Stdin)

	next := compareState
	txn := &pb.TxnRequest{}
	for next != nil {
		next = next(txn, reader)
	}

	conn, err := grpc.Dial("127.0.0.1:12379")
	if err != nil {
		panic(err)
	}
	etcd := pb.NewEtcdClient(conn)

	resp, err := etcd.Txn(context.Background(), txn)
	if err != nil {
		fmt.Println(err)
	}
	if resp.Succeeded {
		fmt.Println("executed success request list")
	} else {
		fmt.Println("executed failure request list")
	}
}
Esempio n. 25
0
// publish registers server information into the cluster. The information
// is the JSON representation of this server's member struct, updated with the
// static clientURLs of the server.
// The function keeps attempting to register until it succeeds,
// or its server is stopped.
func (s *EtcdServer) publish(timeout time.Duration) {
	b, err := json.Marshal(s.attributes)
	if err != nil {
		plog.Panicf("json marshal error: %v", err)
		return
	}
	req := pb.Request{
		Method: "PUT",
		Path:   MemberAttributesStorePath(s.id),
		Val:    string(b),
	}

	for {
		ctx, cancel := context.WithTimeout(context.Background(), timeout)
		_, err := s.Do(ctx, req)
		cancel()
		switch err {
		case nil:
			plog.Infof("published %+v to cluster %s", s.attributes, s.cluster.ID())
			return
		case ErrStopped:
			plog.Infof("aborting publish because server is stopped")
			return
		default:
			plog.Errorf("publish error: %v", err)
		}
	}
}
Esempio n. 26
0
func TestHTTPKeysAPIDeleteResponse(t *testing.T) {
	client := &staticHTTPClient{
		resp: http.Response{
			StatusCode: http.StatusOK,
			Header:     http.Header{"X-Etcd-Index": []string{"22"}},
		},
		body: []byte(`{"action":"delete","node":{"key":"/pants/foo/bar/baz","value":"snarf","modifiedIndex":22,"createdIndex":19},"prevNode":{"key":"/pants/foo/bar/baz","value":"snazz","modifiedIndex":20,"createdIndex":19}}`),
	}

	wantResponse := &Response{
		Action:   "delete",
		Node:     &Node{Key: "/pants/foo/bar/baz", Value: "snarf", CreatedIndex: uint64(19), ModifiedIndex: uint64(22)},
		PrevNode: &Node{Key: "/pants/foo/bar/baz", Value: "snazz", CreatedIndex: uint64(19), ModifiedIndex: uint64(20)},
		Index:    uint64(22),
	}

	kAPI := &httpKeysAPI{client: client, prefix: "/pants"}
	resp, err := kAPI.Delete(context.Background(), "/foo/bar/baz", nil)
	if err != nil {
		t.Errorf("non-nil error: %#v", err)
	}
	if !reflect.DeepEqual(wantResponse, resp) {
		t.Errorf("incorrect Response: want=%#v got=%#v", wantResponse, resp)
	}
}
Esempio n. 27
0
func (c *cluster) addMember(t *testing.T) {
	m := c.mustNewMember(t)

	scheme := "http"
	if c.cfg.PeerTLS != nil {
		scheme = "https"
	}

	// send add request to the cluster
	cc := mustNewHTTPClient(t, []string{c.URL(0)}, c.cfg.ClientTLS)
	ma := client.NewMembersAPI(cc)
	ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
	peerURL := scheme + "://" + m.PeerListeners[0].Addr().String()
	if _, err := ma.Add(ctx, peerURL); err != nil {
		t.Fatalf("add member on %s error: %v", c.URL(0), err)
	}
	cancel()

	// wait for the add node entry applied in the cluster
	members := append(c.HTTPMembers(), client.Member{PeerURLs: []string{peerURL}, ClientURLs: []string{}})
	c.waitMembersMatch(t, members)

	m.InitialPeerURLsMap = types.URLsMap{}
	for _, mm := range c.Members {
		m.InitialPeerURLsMap[mm.Name] = mm.PeerURLs
	}
	m.InitialPeerURLsMap[m.Name] = m.PeerURLs
	m.NewCluster = false
	if err := m.Launch(); err != nil {
		t.Fatal(err)
	}
	c.Members = append(c.Members, m)
	// wait cluster to be stable to receive future client requests
	c.waitMembersMatch(t, c.HTTPMembers())
}
Esempio n. 28
0
func (c *cluster) RemoveMember(t *testing.T, id uint64) {
	// send remove request to the cluster
	cc := mustNewHTTPClient(t, c.URLs(), c.cfg.ClientTLS)
	ma := client.NewMembersAPI(cc)
	ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
	if err := ma.Remove(ctx, types.ID(id).String()); err != nil {
		t.Fatalf("unexpected remove error %v", err)
	}
	cancel()
	newMembers := make([]*member, 0)
	for _, m := range c.Members {
		if uint64(m.s.ID()) != id {
			newMembers = append(newMembers, m)
		} else {
			select {
			case <-m.s.StopNotify():
				m.Terminate(t)
			// 1s stop delay + election timeout + 1s disk and network delay + connection write timeout
			// TODO: remove connection write timeout by selecting on http response closeNotifier
			// blocking on https://github.com/golang/go/issues/9524
			case <-time.After(time.Second + time.Duration(electionTicks)*tickDuration + time.Second + rafthttp.ConnWriteTimeout):
				t.Fatalf("failed to remove member %s in time", m.s.ID())
			}
		}
	}
	c.Members = newMembers
	c.waitMembersMatch(t, c.HTTPMembers())
}
Esempio n. 29
0
// publish registers server information into the cluster. The information
// is the JSON representation of this server's member struct, updated with the
// static clientURLs of the server.
// The function keeps attempting to register until it succeeds,
// or its server is stopped.
func (s *EtcdServer) publish(retryInterval time.Duration) {
	b, err := json.Marshal(s.attributes)
	if err != nil {
		log.Printf("etcdserver: json marshal error: %v", err)
		return
	}
	req := pb.Request{
		ID:     GenID(),
		Method: "PUT",
		Path:   MemberAttributesStorePath(s.id),
		Val:    string(b),
	}

	for {
		ctx, cancel := context.WithTimeout(context.Background(), retryInterval)
		_, err := s.Do(ctx, req)
		cancel()
		switch err {
		case nil:
			log.Printf("etcdserver: published %+v to cluster %s", s.attributes, s.Cluster.ID())
			return
		case ErrStopped:
			log.Printf("etcdserver: aborting publish because server is stopped")
			return
		default:
			log.Printf("etcdserver: publish error: %v", err)
		}
	}
}
Esempio n. 30
0
// rangeCommandFunc executes the "range" command.
func rangeCommandFunc(cmd *cobra.Command, args []string) {
	if len(args) == 0 {
		ExitWithError(ExitBadArgs, fmt.Errorf("range command needs arguments."))
	}

	var rangeEnd []byte
	key := []byte(args[0])
	if len(args) > 1 {
		rangeEnd = []byte(args[1])
	}

	endpoint, err := cmd.Flags().GetString("endpoint")
	if err != nil {
		ExitWithError(ExitError, err)
	}
	conn, err := grpc.Dial(endpoint)
	if err != nil {
		ExitWithError(ExitBadConnection, err)
	}
	kv := pb.NewKVClient(conn)
	req := &pb.RangeRequest{Key: key, RangeEnd: rangeEnd}

	resp, err := kv.Range(context.Background(), req)
	for _, kv := range resp.Kvs {
		fmt.Printf("%s %s\n", string(kv.Key), string(kv.Value))
	}
}