Esempio n. 1
0
func (h *keysHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
	if !allowMethod(w, r.Method, "HEAD", "GET", "PUT", "POST", "DELETE") {
		return
	}
	w.Header().Set("X-Etcd-Cluster-ID", h.clusterInfo.ID().String())

	ctx, cancel := context.WithTimeout(context.Background(), h.timeout)
	defer cancel()

	rr, err := parseKeyRequest(r, clockwork.NewRealClock())
	if err != nil {
		writeError(w, err)
		return
	}

	resp, err := h.server.Do(ctx, rr)
	if err != nil {
		err = trimErrorPrefix(err, etcdserver.StoreKeysPrefix)
		writeError(w, err)
		return
	}
	switch {
	case resp.Event != nil:
		if err := writeKeyEvent(w, resp.Event, h.timer); err != nil {
			// Should never be reached
			log.Printf("error writing event: %v", err)
		}
	case resp.Watcher != nil:
		ctx, cancel := context.WithTimeout(context.Background(), defaultWatchTimeout)
		defer cancel()
		handleKeyWatch(ctx, w, resp.Watcher, rr.Stream, h.timer)
	default:
		writeError(w, errors.New("received response with no Event/Watcher!"))
	}
}
Esempio n. 2
0
// init phase:
// - read the shard info, make sure it has sources
func (sdw *SplitDiffWorker) init(ctx context.Context) error {
	sdw.SetState(WorkerStateInit)

	var err error
	shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout)
	sdw.keyspaceInfo, err = sdw.wr.TopoServer().GetKeyspace(shortCtx, sdw.keyspace)
	cancel()
	if err != nil {
		return fmt.Errorf("cannot read keyspace %v: %v", sdw.keyspace, err)
	}
	shortCtx, cancel = context.WithTimeout(ctx, *remoteActionsTimeout)
	sdw.shardInfo, err = sdw.wr.TopoServer().GetShard(shortCtx, sdw.keyspace, sdw.shard)
	cancel()
	if err != nil {
		return fmt.Errorf("cannot read shard %v/%v: %v", sdw.keyspace, sdw.shard, err)
	}

	if len(sdw.shardInfo.SourceShards) == 0 {
		return fmt.Errorf("shard %v/%v has no source shard", sdw.keyspace, sdw.shard)
	}
	if !sdw.shardInfo.HasMaster() {
		return fmt.Errorf("shard %v/%v has no master", sdw.keyspace, sdw.shard)
	}

	return nil
}
Esempio n. 3
0
func ExampleKV_getSortedPrefix() {
	cli, err := clientv3.New(clientv3.Config{
		Endpoints:   endpoints,
		DialTimeout: dialTimeout,
	})
	if err != nil {
		log.Fatal(err)
	}
	defer cli.Close()

	for i := range make([]int, 3) {
		ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
		_, err = cli.Put(ctx, fmt.Sprintf("key_%d", i), "value")
		cancel()
		if err != nil {
			log.Fatal(err)
		}
	}

	ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
	resp, err := cli.Get(ctx, "key", clientv3.WithPrefix(), clientv3.WithSort(clientv3.SortByKey, clientv3.SortDescend))
	cancel()
	if err != nil {
		log.Fatal(err)
	}
	for _, ev := range resp.Kvs {
		fmt.Printf("%s : %s\n", ev.Key, ev.Value)
	}
	// Output:
	// key_2 : value
	// key_1 : value
	// key_0 : value
}
Esempio n. 4
0
func actionUserPasswd(c *cli.Context) {
	api, user := mustUserAPIAndName(c)
	ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
	currentUser, err := api.GetUser(ctx, user)
	cancel()
	if currentUser == nil {
		fmt.Fprintln(os.Stderr, err.Error())
		os.Exit(1)
	}
	pass, err := speakeasy.Ask("New password: "******"Error reading password:"******"Password updated\n")
}
Esempio n. 5
0
func TestLockRelease(t *testing.T) {
	SetUp(t)
	defer TearDown(t)

	node1 := lock.New(api, key, "node1")
	ctx1, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)
	defer cancel()

	node2 := lock.New(api, key, "node2")
	ctx2, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)
	defer cancel()

	if err := node1.Acquire(ctx1); err != nil {
		t.Fatal(err)
	}

	wg := &sync.WaitGroup{}
	wg.Add(1)
	go func() {
		defer wg.Done()
		if err := node2.Acquire(ctx2); err != nil {
			t.Error(err)
		}
	}()
	time.Sleep(200 * time.Millisecond)
	if err := node1.Release(context.Background()); err != nil {
		t.Error(err)
	}
	wg.Wait()
}
Esempio n. 6
0
// invokeWithRetry calls stub using an exponential backoff retry mechanism
// based on the values provided in retrySettings.
func invokeWithRetry(ctx context.Context, stub APICall, callSettings CallSettings) error {
	retrySettings := callSettings.RetrySettings
	backoffSettings := callSettings.RetrySettings.BackoffSettings
	delay := backoffSettings.DelayTimeoutSettings.Initial
	timeout := backoffSettings.RPCTimeoutSettings.Initial
	for {
		// If the deadline is exceeded...
		if ctx.Err() != nil {
			return ctx.Err()
		}
		timeoutCtx, _ := context.WithTimeout(ctx, backoffSettings.RPCTimeoutSettings.Max)
		timeoutCtx, _ = context.WithTimeout(timeoutCtx, timeout)
		err := stub(timeoutCtx)
		code := grpc.Code(err)
		if code == codes.OK {
			return nil
		}
		if !retrySettings.RetryCodes[code] {
			return err
		}
		delayCtx, _ := context.WithTimeout(ctx, backoffSettings.DelayTimeoutSettings.Max)
		delayCtx, _ = context.WithTimeout(delayCtx, delay)
		<-delayCtx.Done()

		delay = scaleDuration(delay, backoffSettings.DelayTimeoutSettings.Multiplier)
		timeout = scaleDuration(timeout, backoffSettings.RPCTimeoutSettings.Multiplier)
	}
}
Esempio n. 7
0
func testTimeoutOnDeadServer(t *testing.T, e env) {
	s, cc := setUp(t, nil, math.MaxUint32, "", e)
	tc := testpb.NewTestServiceClient(cc)
	ctx, _ := context.WithTimeout(context.Background(), time.Second)
	if _, err := cc.WaitForStateChange(ctx, grpc.Idle); err != nil {
		t.Fatalf("cc.WaitForStateChange(_, %s) = _, %v, want _, <nil>", grpc.Idle, err)
	}
	ctx, _ = context.WithTimeout(context.Background(), time.Second)
	if _, err := cc.WaitForStateChange(ctx, grpc.Connecting); err != nil {
		t.Fatalf("cc.WaitForStateChange(_, %s) = _, %v, want _, <nil>", grpc.Connecting, err)
	}
	if state, err := cc.State(); err != nil || state != grpc.Ready {
		t.Fatalf("cc.State() = %s, %v, want %s, <nil>", state, err, grpc.Ready)
	}
	ctx, _ = context.WithTimeout(context.Background(), time.Second)
	if _, err := cc.WaitForStateChange(ctx, grpc.Ready); err != context.DeadlineExceeded {
		t.Fatalf("cc.WaitForStateChange(_, %s) = _, %v, want _, %v", grpc.Ready, err, context.DeadlineExceeded)
	}
	s.Stop()
	// Set -1 as the timeout to make sure if transportMonitor gets error
	// notification in time the failure path of the 1st invoke of
	// ClientConn.wait hits the deadline exceeded error.
	ctx, _ = context.WithTimeout(context.Background(), -1)
	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); grpc.Code(err) != codes.DeadlineExceeded {
		t.Fatalf("TestService/EmptyCall(%v, _) = _, error %v, want _, error code: %d", ctx, err, codes.DeadlineExceeded)
	}
	ctx, _ = context.WithTimeout(context.Background(), time.Second)
	if _, err := cc.WaitForStateChange(ctx, grpc.Ready); err != nil {
		t.Fatalf("cc.WaitForStateChange(_, %s) = _, %v, want _, <nil>", grpc.Ready, err)
	}
	if state, err := cc.State(); err != nil || (state != grpc.Connecting && state != grpc.TransientFailure) {
		t.Fatalf("cc.State() = %s, %v, want %s or %s, <nil>", state, err, grpc.Connecting, grpc.TransientFailure)
	}
	cc.Close()
}
Esempio n. 8
0
File: raft.go Progetto: Mic92/docker
func (n *Node) handleAddressChange(ctx context.Context, member *membership.Member, reconnectAddr string) error {
	newConn, err := n.ConnectToMember(reconnectAddr, 0)
	if err != nil {
		return errors.Wrapf(err, "could connect to member ID %x at observed address %s", member.RaftID, reconnectAddr)
	}

	healthCtx, cancelHealth := context.WithTimeout(ctx, time.Duration(n.Config.ElectionTick)*n.opts.TickInterval)
	defer cancelHealth()

	if err := newConn.HealthCheck(healthCtx); err != nil {
		return errors.Wrapf(err, "%x failed health check at observed address %s", member.RaftID, reconnectAddr)
	}

	if err := n.cluster.ReplaceMemberConnection(member.RaftID, member, newConn, reconnectAddr, false); err != nil {
		newConn.Conn.Close()
		return errors.Wrap(err, "failed to replace connection to raft member")
	}

	// If we're the leader, write the address change to raft
	updateCtx, cancelUpdate := context.WithTimeout(ctx, time.Duration(n.Config.ElectionTick)*n.opts.TickInterval)
	defer cancelUpdate()
	if err := n.updateMember(updateCtx, reconnectAddr, member.RaftID, member.NodeID); err != nil {
		return errors.Wrap(err, "failed to update member address in raft")
	}

	return nil
}
Esempio n. 9
0
func TestDropRequestFailedNonFailFast(t *testing.T) {
	// Start a backend.
	beLis, err := net.Listen("tcp", "localhost:0")
	if err != nil {
		t.Fatalf("Failed to listen %v", err)
	}
	beAddr := strings.Split(beLis.Addr().String(), ":")
	bePort, err := strconv.Atoi(beAddr[1])
	backends := startBackends(t, besn, beLis)
	defer stopBackends(backends)

	// Start a load balancer.
	lbLis, err := net.Listen("tcp", "localhost:0")
	if err != nil {
		t.Fatalf("Failed to create the listener for the load balancer %v", err)
	}
	lbCreds := &serverNameCheckCreds{
		sn: lbsn,
	}
	lb := grpc.NewServer(grpc.Creds(lbCreds))
	if err != nil {
		t.Fatalf("Failed to generate the port number %v", err)
	}
	be := &lbpb.Server{
		IpAddress:        []byte(beAddr[0]),
		Port:             int32(bePort),
		LoadBalanceToken: lbToken,
		DropRequest:      true,
	}
	var bes []*lbpb.Server
	bes = append(bes, be)
	sl := &lbpb.ServerList{
		Servers: bes,
	}
	ls := newRemoteBalancer(sl)
	lbpb.RegisterLoadBalancerServer(lb, ls)
	go func() {
		lb.Serve(lbLis)
	}()
	defer func() {
		ls.stop()
		lb.Stop()
	}()
	creds := serverNameCheckCreds{
		expected: besn,
	}
	ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
	cc, err := grpc.DialContext(ctx, besn, grpc.WithBalancer(Balancer(&testNameResolver{
		addr: lbLis.Addr().String(),
	})), grpc.WithBlock(), grpc.WithTransportCredentials(&creds))
	if err != nil {
		t.Fatalf("Failed to dial to the backend %v", err)
	}
	helloC := hwpb.NewGreeterClient(cc)
	ctx, _ = context.WithTimeout(context.Background(), 10*time.Millisecond)
	if _, err := helloC.SayHello(ctx, &hwpb.HelloRequest{Name: "grpc"}, grpc.FailFast(false)); grpc.Code(err) != codes.DeadlineExceeded {
		t.Fatalf("%v.SayHello(_, _) = _, %v, want _, %s", helloC, err, codes.DeadlineExceeded)
	}
	cc.Close()
}
Esempio n. 10
0
func ExampleKV_compact() {
	cli, err := clientv3.New(clientv3.Config{
		Endpoints:   endpoints,
		DialTimeout: dialTimeout,
	})
	if err != nil {
		log.Fatal(err)
	}
	defer cli.Close()

	ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
	resp, err := cli.Get(ctx, "foo")
	cancel()
	if err != nil {
		log.Fatal(err)
	}
	compRev := resp.Header.Revision // specify compact revision of your choice

	ctx, cancel = context.WithTimeout(context.Background(), requestTimeout)
	_, err = cli.Compact(ctx, compRev)
	cancel()
	if err != nil {
		log.Fatal(err)
	}
}
Esempio n. 11
0
func roleGrantRevoke(c *cli.Context, grant bool) {
	path := c.String("path")
	if path == "" {
		fmt.Fprintln(os.Stderr, "No path specified; please use `-path`")
		os.Exit(1)
	}

	read := c.Bool("read")
	write := c.Bool("write")
	rw := c.Bool("readwrite")
	permcount := 0
	for _, v := range []bool{read, write, rw} {
		if v {
			permcount++
		}
	}
	if permcount != 1 {
		fmt.Fprintln(os.Stderr, "Please specify exactly one of -read, -write or -readwrite")
		os.Exit(1)
	}
	var permType client.PermissionType
	switch {
	case read:
		permType = client.ReadPermission
	case write:
		permType = client.WritePermission
	case rw:
		permType = client.ReadWritePermission
	}

	api, role := mustRoleAPIAndName(c)
	ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
	currentRole, err := api.GetRole(ctx, role)
	cancel()
	if err != nil {
		fmt.Fprintln(os.Stderr, err.Error())
		os.Exit(1)
	}
	ctx, cancel = context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
	var newRole *client.Role
	if grant {
		newRole, err = api.GrantRoleKV(ctx, role, []string{path}, permType)
	} else {
		newRole, err = api.RevokeRoleKV(ctx, role, []string{path}, permType)
	}
	cancel()
	if err != nil {
		fmt.Fprintln(os.Stderr, err.Error())
		os.Exit(1)
	}
	if reflect.DeepEqual(newRole, currentRole) {
		if grant {
			fmt.Printf("Role unchanged; already granted")
		} else {
			fmt.Printf("Role unchanged; already revoked")
		}
	}

	fmt.Printf("Role %s updated\n", role)
}
Esempio n. 12
0
func main() {
	func() {
		ctx := context.Background()
		ctx = setContextWithAppStartTS(ctx, time.Now().String())
		ctx = setContextWithIP(ctx, "1.2.3.4")
		ctx = setContextWithUserAgent(ctx, "Linux")
		fmt.Println(ctx)
		fmt.Println(getAppStartTSFromContext(ctx))
		fmt.Println(getIPFromContext(ctx))
		fmt.Println(getUserAgentFromContext(ctx))
		fmt.Println("Done 1:", ctx)
	}()
	/*
	   Done 1: context.Background.WithValue(0, "2015-09-02 22:38:00.640981471 -0700 PDT").WithValue(1, "1.2.3.4").WithValue(2, "Linux")
	*/

	fmt.Println()
	func() {
		timeout := 100 * time.Millisecond
		processingTime := time.Nanosecond
		ctx, cancel := context.WithTimeout(context.Background(), timeout)
		cancel()
		send(ctx, processingTime)
		fmt.Println("Done 2")
	}()
	/*
		send Timeout: context canceled
		Done 2
	*/

	fmt.Println()
	func() {
		timeout := 100 * time.Millisecond
		processingTime := time.Nanosecond
		ctx, cancel := context.WithTimeout(context.Background(), timeout)
		defer cancel()
		send(ctx, processingTime)
		fmt.Println("Done 3")
	}()
	/*
		send Done!
		Done 3
	*/

	fmt.Println()
	func() {
		timeout := 100 * time.Millisecond
		processingTime := time.Minute
		ctx, cancel := context.WithTimeout(context.Background(), timeout)
		defer cancel()
		send(ctx, processingTime)
		fmt.Println("Done 4")
	}()
	/*
		send Timeout: context deadline exceeded
		Done 4
	*/
}
Esempio n. 13
0
// synchronizeReplication phase:
// 1 - ask the subset slave to stop replication
// 2 - sleep for 5 seconds
// 3 - ask the superset slave to stop replication
// Note this is not 100% correct, but good enough for now
func (worker *SQLDiffWorker) synchronizeReplication(ctx context.Context) error {
	worker.SetState(WorkerStateSyncReplication)

	// stop replication on subset slave
	worker.wr.Logger().Infof("Stopping replication on subset slave %v", worker.subset.alias)
	subsetTablet, err := worker.wr.TopoServer().GetTablet(ctx, worker.subset.alias)
	if err != nil {
		return err
	}
	shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout)
	err = worker.wr.TabletManagerClient().StopSlave(shortCtx, subsetTablet)
	cancel()
	if err != nil {
		return fmt.Errorf("Cannot stop slave %v: %v", worker.subset.alias, err)
	}
	if err := checkDone(ctx); err != nil {
		return err
	}

	// change the cleaner actions from ChangeSlaveType(rdonly)
	// to StartSlave() + ChangeSlaveType(spare)
	wrangler.RecordStartSlaveAction(worker.cleaner, subsetTablet)
	action, err := wrangler.FindChangeSlaveTypeActionByTarget(worker.cleaner, worker.subset.alias)
	if err != nil {
		return fmt.Errorf("cannot find ChangeSlaveType action for %v: %v", worker.subset.alias, err)
	}
	action.TabletType = topo.TYPE_SPARE

	// sleep for a few seconds
	time.Sleep(5 * time.Second)
	if err := checkDone(ctx); err != nil {
		return err
	}

	// stop replication on superset slave
	worker.wr.Logger().Infof("Stopping replication on superset slave %v", worker.superset.alias)
	supersetTablet, err := worker.wr.TopoServer().GetTablet(ctx, worker.superset.alias)
	if err != nil {
		return err
	}
	shortCtx, cancel = context.WithTimeout(ctx, *remoteActionsTimeout)
	err = worker.wr.TabletManagerClient().StopSlave(shortCtx, supersetTablet)
	cancel()
	if err != nil {
		return fmt.Errorf("Cannot stop slave %v: %v", worker.superset.alias, err)
	}

	// change the cleaner actions from ChangeSlaveType(rdonly)
	// to StartSlave() + ChangeSlaveType(spare)
	wrangler.RecordStartSlaveAction(worker.cleaner, supersetTablet)
	action, err = wrangler.FindChangeSlaveTypeActionByTarget(worker.cleaner, worker.superset.alias)
	if err != nil {
		return fmt.Errorf("cannot find ChangeSlaveType action for %v: %v", worker.superset.alias, err)
	}
	action.TabletType = topo.TYPE_SPARE

	return nil
}
Esempio n. 14
0
// shardsWithTablesSources returns all the shards that have SourceShards set
// to one value, with an array of Tables.
func shardsWithTablesSources(ctx context.Context, wr *wrangler.Wrangler) ([]map[string]string, error) {
	shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout)
	keyspaces, err := wr.TopoServer().GetKeyspaces(shortCtx)
	cancel()
	if err != nil {
		return nil, err
	}

	wg := sync.WaitGroup{}
	mu := sync.Mutex{} // protects result
	result := make([]map[string]string, 0, len(keyspaces))
	rec := concurrency.AllErrorRecorder{}
	for _, keyspace := range keyspaces {
		wg.Add(1)
		go func(keyspace string) {
			defer wg.Done()
			shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout)
			shards, err := wr.TopoServer().GetShardNames(shortCtx, keyspace)
			cancel()
			if err != nil {
				rec.RecordError(err)
				return
			}
			for _, shard := range shards {
				wg.Add(1)
				go func(keyspace, shard string) {
					defer wg.Done()
					shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout)
					si, err := wr.TopoServer().GetShard(shortCtx, keyspace, shard)
					cancel()
					if err != nil {
						rec.RecordError(err)
						return
					}

					if len(si.SourceShards) == 1 && len(si.SourceShards[0].Tables) > 0 {
						mu.Lock()
						result = append(result, map[string]string{
							"Keyspace": keyspace,
							"Shard":    shard,
						})
						mu.Unlock()
					}
				}(keyspace, shard)
			}
		}(keyspace)
	}
	wg.Wait()

	if rec.HasErrors() {
		return nil, rec.Error()
	}
	if len(result) == 0 {
		return nil, fmt.Errorf("There are no shards with SourceShards")
	}
	return result, nil
}
Esempio n. 15
0
func TestSecondElection(t *testing.T) {
	defer TearDown(t)
	name1 := "node1"
	value1 := "10.1.1.10"
	noms1 := []*Nomination{}
	events1 := []*LeaderEvent{}

	name2 := "node2"
	value2 := "10.1.1.11"
	noms2 := []*Nomination{}
	events2 := []*LeaderEvent{}

	wg := &sync.WaitGroup{}
	wg.Add(2)

	// run first election immediately; resign at 10s
	go func() {
		defer wg.Done()
		ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
		defer cancel()
		noms1, events1 = RunElection(t, ctx, name1, value1)
	}()

	// run second election at 5s; resign at 15s
	go func() {
		defer wg.Done()
		time.Sleep(5 * time.Second)
		ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
		defer cancel()
		noms2, events2 = RunElection(t, ctx, name2, value2)
	}()

	ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
	defer cancel()
	AssertWait(t, ctx, wg)

	t.Log("checking election 1")
	AssertNoms(t, noms1, []*Nomination{
		{Name: name1, Size: 1, Leaders: map[string]string{}},
	})

	AssertEvents(t, events1, []*LeaderEvent{
		{1, map[string]string{name1: value1}},
		{1, map[string]string{}},
	})

	t.Log("checking election 2")
	AssertNoms(t, noms2, []*Nomination{
		{Name: name2, Size: 1, Leaders: map[string]string{}},
	})

	AssertEvents(t, events2, []*LeaderEvent{
		{1, map[string]string{name1: value1}},
		{1, map[string]string{name2: value2}},
		{1, map[string]string{}},
	})
}
Esempio n. 16
0
func (d *discovery) checkCluster() ([]*client.Node, int, uint64, error) {
	configKey := path.Join("/", d.cluster, "_config")
	ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
	// find cluster size
	resp, err := d.c.Get(ctx, path.Join(configKey, "size"), nil)
	cancel()
	if err != nil {
		if eerr, ok := err.(*client.Error); ok && eerr.Code == client.ErrorCodeKeyNotFound {
			return nil, 0, 0, ErrSizeNotFound
		}
		if err == client.ErrInvalidJSON {
			return nil, 0, 0, ErrBadDiscoveryEndpoint
		}
		if ce, ok := err.(*client.ClusterError); ok {
			plog.Error(ce.Detail())
			return d.checkClusterRetry()
		}
		return nil, 0, 0, err
	}
	size, err := strconv.Atoi(resp.Node.Value)
	if err != nil {
		return nil, 0, 0, ErrBadSizeKey
	}

	ctx, cancel = context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
	resp, err = d.c.Get(ctx, d.cluster, nil)
	cancel()
	if err != nil {
		if ce, ok := err.(*client.ClusterError); ok {
			plog.Error(ce.Detail())
			return d.checkClusterRetry()
		}
		return nil, 0, 0, err
	}
	nodes := make([]*client.Node, 0)
	// append non-config keys to nodes
	for _, n := range resp.Node.Nodes {
		if !(path.Base(n.Key) == path.Base(configKey)) {
			nodes = append(nodes, n)
		}
	}

	snodes := sortableNodes{nodes}
	sort.Sort(snodes)

	// find self position
	for i := range nodes {
		if path.Base(nodes[i].Key) == path.Base(d.selfKey()) {
			break
		}
		if i >= size-1 {
			return nodes[:size], size, resp.Index, ErrFullCluster
		}
	}
	return nodes, size, resp.Index, nil
}
Esempio n. 17
0
func TestJQEval(t *testing.T) {
	pwd, _ := os.Getwd()
	err := SetPath(filepath.Join(pwd, ".."))
	if err != nil {
		t.Fatalf("can't set JQ path: %s", err)
	}

	jq := &JQ{}
	ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
	err = jq.Eval(ctx, ioutil.Discard)
	cancel()

	if err == nil {
		t.Errorf("err should not be nil since it's invalid input")
	}

	jq = &JQ{
		J: `{"dependencies":{"capnp":{"version":"0.1.4","dependencies":{"es6-promise":{"version":"1.0.0","dependencies":{"es6-promise":{"version":"1.0.0"}}}}}}}`,
		Q: `.dependencies | recurse(to_entries | map(.values.dependencies))`,
	}
	ctx, cancel = context.WithTimeout(context.Background(), 3*time.Second)
	err = jq.Eval(ctx, ioutil.Discard)
	cancel()

	if err == nil {
		t.Errorf("err should not be nil since the executation should timeout")
	}

	if err.Error() != "jq execution timeout" {
		t.Errorf("err message should be jq execution timeout, but it's %s", err)
	}

	// simulate race condition
	var wg sync.WaitGroup
	for i := 0; i < 100; i++ {
		wg.Add(1)
		go func() {
			defer wg.Done()

			ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
			defer cancel()

			jq := &JQ{
				J: `{ "foo": { "bar": { "baz": 123 } } }`,
				Q: ".",
			}
			err = jq.Eval(ctx, ioutil.Discard)
			if err != nil {
				t.Errorf("err should be nil: %s", err)
			}
		}()
	}

	wg.Wait()
}
Esempio n. 18
0
// init phase:
// - read the destination keyspace, make sure it has 'servedFrom' values
func (scw *SplitCloneWorker) init(ctx context.Context) error {
	scw.setState(WorkerStateInit)
	var err error

	// read the keyspace and validate it
	shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout)
	scw.keyspaceInfo, err = scw.wr.TopoServer().GetKeyspace(shortCtx, scw.keyspace)
	cancel()
	if err != nil {
		return fmt.Errorf("cannot read keyspace %v: %v", scw.keyspace, err)
	}

	// find the OverlappingShards in the keyspace
	shortCtx, cancel = context.WithTimeout(ctx, *remoteActionsTimeout)
	osList, err := topotools.FindOverlappingShards(shortCtx, scw.wr.TopoServer(), scw.keyspace)
	cancel()
	if err != nil {
		return fmt.Errorf("cannot FindOverlappingShards in %v: %v", scw.keyspace, err)
	}

	// find the shard we mentioned in there, if any
	os := topotools.OverlappingShardsForShard(osList, scw.shard)
	if os == nil {
		return fmt.Errorf("the specified shard %v/%v is not in any overlapping shard", scw.keyspace, scw.shard)
	}
	scw.wr.Logger().Infof("Found overlapping shards: %+v\n", os)

	// one side should have served types, the other one none,
	// figure out wich is which, then double check them all
	if len(os.Left[0].ServedTypes) > 0 {
		scw.sourceShards = os.Left
		scw.destinationShards = os.Right
	} else {
		scw.sourceShards = os.Right
		scw.destinationShards = os.Left
	}

	// validate all serving types
	servingTypes := []pb.TabletType{pb.TabletType_MASTER, pb.TabletType_REPLICA, pb.TabletType_RDONLY}
	for _, st := range servingTypes {
		for _, si := range scw.sourceShards {
			if si.GetServedType(st) == nil {
				return fmt.Errorf("source shard %v/%v is not serving type %v", si.Keyspace(), si.ShardName(), st)
			}
		}
	}
	for _, si := range scw.destinationShards {
		if len(si.ServedTypes) > 0 {
			return fmt.Errorf("destination shard %v/%v is serving some types", si.Keyspace(), si.ShardName())
		}
	}

	return nil
}
Esempio n. 19
0
func TestCanRemoveMember(t *testing.T) {
	nodes, clockSource := raftutils.NewRaftCluster(t, tc)
	defer raftutils.TeardownCluster(t, nodes)

	// Stop node 2 and node 3 (2 nodes out of 3)
	nodes[2].Server.Stop()
	nodes[2].Shutdown()
	nodes[3].Server.Stop()
	nodes[3].Shutdown()

	// Node 2 and Node 3 should be listed as Unreachable
	assert.NoError(t, raftutils.PollFunc(clockSource, func() error {
		members := nodes[1].GetMemberlist()
		if len(members) != 3 {
			return fmt.Errorf("expected 3 nodes, got %d", len(members))
		}
		if members[nodes[2].Config.ID].Status.Reachability == api.RaftMemberStatus_REACHABLE {
			return fmt.Errorf("expected node 2 to be unreachable")
		}
		if members[nodes[3].Config.ID].Status.Reachability == api.RaftMemberStatus_REACHABLE {
			return fmt.Errorf("expected node 3 to be unreachable")
		}
		return nil
	}))

	// Removing node 3 should fail
	ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
	err := nodes[1].RemoveMember(ctx, 3)
	assert.Error(t, err)
	assert.Equal(t, err, raft.ErrCannotRemoveMember)
	members := nodes[1].GetMemberlist()
	assert.Equal(t, len(members), 3)

	// Restart node 2 and node 3
	nodes[2] = raftutils.RestartNode(t, clockSource, nodes[2], false)
	nodes[3] = raftutils.RestartNode(t, clockSource, nodes[3], false)
	raftutils.WaitForCluster(t, clockSource, nodes)

	// Removing node 3 should succeed
	ctx, _ = context.WithTimeout(context.Background(), 10*time.Second)
	err = nodes[1].RemoveMember(ctx, nodes[3].Config.ID)
	assert.NoError(t, err)
	members = nodes[1].GetMemberlist()
	assert.Nil(t, members[nodes[3].Config.ID])
	assert.Equal(t, len(members), 2)

	// Removing node 2 should fail
	ctx, _ = context.WithTimeout(context.Background(), 10*time.Second)
	err = nodes[1].RemoveMember(ctx, nodes[2].Config.ID)
	assert.Error(t, err)
	assert.Equal(t, err, raft.ErrCannotRemoveMember)
	assert.Equal(t, len(members), 2)
}
Esempio n. 20
0
func TestProcess(t *testing.T) {
	p := getConn(t)

	// test global time-out
	timedoutCtx, cancel1 := context.WithTimeout(
		context.Background(),
		time.Millisecond,
	)
	defer cancel1() // releases resources if slowOperation completes before timeout elapses

	done1 := make(chan struct{}, 1)
	f := func(sqldb *sql.DB) {
		time.Sleep(time.Millisecond * 100)
		close(done1)
	}

	time.Sleep(time.Millisecond * 2)
	if err := p.process(timedoutCtx, f, done1); err != context.DeadlineExceeded {
		t.Errorf("Expected deadline exceeded, got: %# v", err)
	}

	done2 := make(chan struct{}, 1)
	f = func(sqldb *sql.DB) {
		time.Sleep(time.Millisecond * 120)
		close(done2)
	}

	// test sem aquired timeout
	semtimeoutCtx, cancel2 := context.WithTimeout(
		context.Background(),
		time.Millisecond*100,
	)
	defer cancel2()

	if err := p.process(semtimeoutCtx, f, done2); err != context.DeadlineExceeded {
		t.Errorf("Expected deadline exceeded, got: %# v", err)
	}

	done3 := make(chan struct{}, 1)
	f = func(sqldb *sql.DB) {
		time.Sleep(time.Millisecond * 120)
		close(done3)
	}
	semtimeoutCtx3, cancel3 := context.WithTimeout(
		context.Background(),
		time.Millisecond*100,
	)
	defer cancel3()
	p.conns = nil
	if err := p.process(semtimeoutCtx3, f, done2); err != ErrClosed {
		t.Errorf("Expected ClosedConnection, got: %# v", err)
	}
}
Esempio n. 21
0
func TestSearchTimeout(t *testing.T) {
	defer func() {
		err := os.RemoveAll("testidx")
		if err != nil {
			t.Fatal(err)
		}
	}()

	index, err := New("testidx", NewIndexMapping())
	if err != nil {
		t.Fatal(err)
	}
	defer func() {
		err := index.Close()
		if err != nil {
			t.Fatal(err)
		}
	}()

	// first run a search with an absurdly long timeout (should succeeed)
	ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
	query := NewTermQuery("water")
	req := NewSearchRequest(query)
	_, err = index.SearchInContext(ctx, req)
	if err != nil {
		t.Fatal(err)
	}

	// now run a search again with an absurdly low timeout (should timeout)
	ctx, _ = context.WithTimeout(context.Background(), 1*time.Microsecond)
	sq := &slowQuery{
		actual: query,
		delay:  50 * time.Millisecond, // on Windows timer resolution is 15ms
	}
	req.Query = sq
	_, err = index.SearchInContext(ctx, req)
	if err != context.DeadlineExceeded {
		t.Fatalf("exected %v, got: %v", context.DeadlineExceeded, err)
	}

	// now run a search with a long timeout, but with a long query, and cancel it
	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
	sq = &slowQuery{
		actual: query,
		delay:  100 * time.Millisecond, // on Windows timer resolution is 15ms
	}
	req = NewSearchRequest(sq)
	cancel()
	_, err = index.SearchInContext(ctx, req)
	if err != context.Canceled {
		t.Fatalf("exected %v, got: %v", context.Canceled, err)
	}
}
Esempio n. 22
0
func TestWritableAuth(t *testing.T) {
	s := startServer(7680, true, testCert, testCert)
	defer s.Stop()
	c := buildClient(t, 7680, true)
	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
	defer cancel()
	_, err := c.Retrieve(ctx, &pb.RetrieveRequest{})
	assert.NoError(t, err)
	ctx, cancel = context.WithTimeout(context.Background(), 5*time.Second)
	defer cancel()
	_, err = c.Store(ctx, &pb.StoreRequest{})
	assert.NoError(t, err)
}
Esempio n. 23
0
func TestReadonlyAuth(t *testing.T) {
	s := startServer(7679, true, testCert, testCert2)
	defer s.Stop()
	c := buildClient(t, 7679, true)
	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
	defer cancel()
	_, err := c.Retrieve(ctx, &pb.RetrieveRequest{})
	assert.NoError(t, err)
	ctx, cancel = context.WithTimeout(context.Background(), 5*time.Second)
	defer cancel()
	_, err = c.Store(ctx, &pb.StoreRequest{})
	assert.Error(t, err, "Fails because the client isn't authenticated")
}
Esempio n. 24
0
// FindHealthyRdonlyEndPoint returns a random healthy endpoint.
// Since we don't want to use them all, we require at least
// minHealthyEndPoints servers to be healthy.
// May block up to -wait_for_healthy_rdonly_endpoints_timeout.
func FindHealthyRdonlyEndPoint(ctx context.Context, wr *wrangler.Wrangler, cell, keyspace, shard string) (*topodatapb.TabletAlias, error) {
	busywaitCtx, busywaitCancel := context.WithTimeout(ctx, *WaitForHealthyEndPointsTimeout)
	defer busywaitCancel()

	var healthyEndpoints []*topodatapb.EndPoint
	for {
		select {
		case <-busywaitCtx.Done():
			return nil, fmt.Errorf("Not enough endpoints to choose from in (%v,%v/%v), have %v healthy ones, need at least %v Context Error: %v", cell, keyspace, shard, len(healthyEndpoints), *minHealthyEndPoints, busywaitCtx.Err())
		default:
		}

		shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout)
		endPoints, _, err := wr.TopoServer().GetEndPoints(shortCtx, cell, keyspace, shard, topodatapb.TabletType_RDONLY)
		cancel()
		if err != nil {
			if err == topo.ErrNoNode {
				// If the node doesn't exist, count that as 0 available rdonly instances.
				endPoints = &topodatapb.EndPoints{}
			} else {
				return nil, fmt.Errorf("GetEndPoints(%v,%v,%v,rdonly) failed: %v", cell, keyspace, shard, err)
			}
		}
		healthyEndpoints = make([]*topodatapb.EndPoint, 0, len(endPoints.Entries))
		for _, entry := range endPoints.Entries {
			if len(entry.HealthMap) == 0 {
				healthyEndpoints = append(healthyEndpoints, entry)
			}
		}
		if len(healthyEndpoints) < *minHealthyEndPoints {
			deadlineForLog, _ := busywaitCtx.Deadline()
			wr.Logger().Infof("Waiting for enough endpoints to become available. available: %v required: %v Waiting up to %.1f more seconds.", len(healthyEndpoints), *minHealthyEndPoints, deadlineForLog.Sub(time.Now()).Seconds())
			// Block for 1 second because 2 seconds is the -health_check_interval flag value in integration tests.
			timer := time.NewTimer(1 * time.Second)
			select {
			case <-busywaitCtx.Done():
				timer.Stop()
			case <-timer.C:
			}
		} else {
			break
		}
	}

	// random server in the list is what we want
	index := rand.Intn(len(healthyEndpoints))
	return &topodatapb.TabletAlias{
		Cell: cell,
		Uid:  healthyEndpoints[index].Uid,
	}, nil
}
Esempio n. 25
0
// testChannelBalance creates a new channel between Alice and  Bob, then
// checks channel balance to be equal amount specified while creation of channel.
func testChannelBalance(net *networkHarness, t *harnessTest) {
	timeout := time.Duration(time.Second * 5)

	// Open a channel with 0.5 BTC between Alice and Bob, ensuring the
	// channel has been opened properly.
	amount := btcutil.Amount(btcutil.SatoshiPerBitcoin / 2)
	ctx, _ := context.WithTimeout(context.Background(), timeout)

	// Creates a helper closure to be used below which asserts the proper
	// response to a channel balance RPC.
	checkChannelBalance := func(node lnrpc.LightningClient,
		amount btcutil.Amount) {

		response, err := node.ChannelBalance(ctx, &lnrpc.ChannelBalanceRequest{})
		if err != nil {
			t.Fatalf("unable to get channel balance: %v", err)
		}

		balance := btcutil.Amount(response.Balance)
		if balance != amount {
			t.Fatalf("channel balance wrong: %v != %v", balance,
				amount)
		}
	}

	chanPoint := openChannelAndAssert(t, net, ctx, net.Alice, net.Bob,
		amount)

	// As this is a single funder channel, Alice's balance should be
	// exactly 0.5 BTC since now state transitions have taken place yet.
	checkChannelBalance(net.Alice, amount)

	// Since we only explicitly wait for Alice's channel open notification,
	// Bob might not yet have updated his internal state in response to
	// Alice's channel open proof. So we sleep here for a second to let Bob
	// catch up.
	// TODO(roasbeef): Bob should also watch for the channel on-chain after
	// the changes to restrict the number of pending channels are in.
	time.Sleep(time.Second)

	// Ensure Bob currently has no available balance within the channel.
	checkChannelBalance(net.Bob, 0)

	// Finally close the channel between Alice and Bob, asserting that the
	// channel has been properly closed on-chain.
	ctx, _ = context.WithTimeout(context.Background(), timeout)
	closeChannelAndAssert(t, net, ctx, net.Alice, chanPoint)
}
Esempio n. 26
0
// publish registers server information into the cluster. The information
// is the JSON representation of this server's member struct, updated with the
// static clientURLs of the server.
// The function keeps attempting to register until it succeeds,
// or its server is stopped.
func (s *EtcdServer) publish(timeout time.Duration) {
	b, err := json.Marshal(s.attributes)
	if err != nil {
		plog.Panicf("json marshal error: %v", err)
		return
	}
	req := pb.Request{
		Method: "PUT",
		Path:   membership.MemberAttributesStorePath(s.id),
		Val:    string(b),
	}

	for {
		ctx, cancel := context.WithTimeout(context.Background(), timeout)
		_, err := s.Do(ctx, req)
		cancel()
		switch err {
		case nil:
			close(s.readych)
			plog.Infof("published %+v to cluster %s", s.attributes, s.cluster.ID())
			return
		case ErrStopped:
			plog.Infof("aborting publish because server is stopped")
			return
		default:
			plog.Errorf("publish error: %v", err)
		}
	}
}
Esempio n. 27
0
// Cleanup stops active swarm node. This is run before daemon shutdown.
func (c *Cluster) Cleanup() {
	c.Lock()
	node := c.node
	if node == nil {
		c.Unlock()
		return
	}

	if c.isActiveManager() {
		active, reachable, unreachable, err := c.managerStats()
		if err == nil {
			singlenode := active && reachable == 1 && unreachable == 0
			if active && !singlenode && reachable-2 <= unreachable {
				logrus.Errorf("Leaving cluster with %v managers left out of %v. Raft quorum will be lost.", reachable-1, reachable+unreachable)
			}
		}
	}
	c.cancelReconnect()
	c.Unlock()
	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
	defer cancel()
	if err := node.Stop(ctx); err != nil {
		logrus.Errorf("error cleaning up cluster: %v", err)
	}
	c.Lock()
	c.node = nil
	c.ready = false
	c.conn = nil
	c.Unlock()
}
Esempio n. 28
0
func TestHTTPClusterClientDoDeadlineExceedContext(t *testing.T) {
	fakeURL := url.URL{}
	tr := newFakeTransport()
	tr.finishCancel <- struct{}{}
	c := &httpClusterClient{
		clientFactory: newHTTPClientFactory(tr, DefaultCheckRedirect, 0),
		endpoints:     []url.URL{fakeURL},
	}

	errc := make(chan error)
	go func() {
		ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond)
		defer cancel()
		_, _, err := c.Do(ctx, &fakeAction{})
		errc <- err
	}()

	select {
	case err := <-errc:
		if err != context.DeadlineExceeded {
			t.Errorf("err = %+v, want %+v", err, context.DeadlineExceeded)
		}
	case <-time.After(time.Second):
		t.Fatalf("unexpected timeout when waiting for request to deadline exceed")
	}
}
Esempio n. 29
0
// Lookup implements proto.E2EKSLookupServer
func (ks *Keyserver) Lookup(ctx context.Context, req *proto.LookupRequest) (*proto.LookupProof, error) {
	ctx, _ = context.WithTimeout(ctx, ks.clientTimeout)
	var lookupEpoch uint64
	var ratifications []*proto.SignedEpochHead
	if req.Epoch == 0 {
		// use the latest epoch possible
		var err error
		lookupEpoch, ratifications, err = ks.findLatestEpochSignedByQuorum(req.QuorumRequirement)
		if err != nil {
			return nil, err
		}
	} else {
		lookupEpoch = req.Epoch
		var err error
		var haveVerifiers map[uint64]struct{}
		ratifications, haveVerifiers, err = ks.findRatificationsForEpoch(lookupEpoch, coname.ListQuorum(req.QuorumRequirement, nil))
		if err != nil {
			return nil, err
		}
		if !coname.CheckQuorum(req.QuorumRequirement, haveVerifiers) {
			// TODO: return whatever ratification we could find
			return nil, fmt.Errorf("could not find sufficient verification")
		}
	}
	return ks.assembleLookupProof(req, lookupEpoch, ratifications)
}
Esempio n. 30
0
// findTargets phase:
// - find one rdonly in the source shard
// - mark it as 'worker' pointing back to us
// - get the aliases of all the targets
func (vscw *VerticalSplitCloneWorker) findTargets(ctx context.Context) error {
	vscw.setState(WorkerStateFindTargets)

	// find an appropriate endpoint in the source shard
	var err error
	vscw.sourceAlias, err = FindWorkerTablet(ctx, vscw.wr, vscw.cleaner, vscw.cell, vscw.sourceKeyspace, "0")
	if err != nil {
		return fmt.Errorf("FindWorkerTablet() failed for %v/%v/0: %v", vscw.cell, vscw.sourceKeyspace, err)
	}
	vscw.wr.Logger().Infof("Using tablet %v as the source", topo.TabletAliasString(vscw.sourceAlias))

	// get the tablet info for it
	vscw.sourceTablet, err = vscw.wr.TopoServer().GetTablet(ctx, vscw.sourceAlias)
	if err != nil {
		return fmt.Errorf("cannot read tablet %v: %v", topo.TabletAliasString(vscw.sourceAlias), err)
	}

	// stop replication on it
	shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout)
	err = vscw.wr.TabletManagerClient().StopSlave(shortCtx, vscw.sourceTablet)
	cancel()
	if err != nil {
		return fmt.Errorf("cannot stop replication on tablet %v", topo.TabletAliasString(vscw.sourceAlias))
	}

	wrangler.RecordStartSlaveAction(vscw.cleaner, vscw.sourceTablet)
	action, err := wrangler.FindChangeSlaveTypeActionByTarget(vscw.cleaner, vscw.sourceAlias)
	if err != nil {
		return fmt.Errorf("cannot find ChangeSlaveType action for %v: %v", topo.TabletAliasString(vscw.sourceAlias), err)
	}
	action.TabletType = pb.TabletType_SPARE

	return vscw.ResolveDestinationMasters(ctx)
}