예제 #1
0
파일: insert.go 프로젝트: MerlinDMC/machine
func (cmd *insert) Run(f *flag.FlagSet) error {
	vm, err := cmd.VirtualMachine()
	if err != nil {
		return err
	}

	if vm == nil || f.NArg() != 1 {
		return flag.ErrHelp
	}

	devices, err := vm.Device(context.TODO())
	if err != nil {
		return err
	}

	c, err := devices.FindCdrom(cmd.device)
	if err != nil {
		return err
	}

	iso, err := cmd.DatastorePath(f.Arg(0))
	if err != nil {
		return nil
	}

	return vm.EditDevice(context.TODO(), devices.InsertIso(c, iso))
}
예제 #2
0
func (flag *DatacenterFlag) Finder() (*find.Finder, error) {
	if flag.finder != nil {
		return flag.finder, nil
	}

	c, err := flag.Client()
	if err != nil {
		return nil, err
	}

	finder := find.NewFinder(c, flag.JSON || flag.Dump)

	// Datacenter is not required (ls command for example).
	// Set for relative func if dc flag is given or
	// if there is a single (default) Datacenter
	if flag.path == "" {
		flag.dc, flag.err = finder.DefaultDatacenter(context.TODO())
	} else {
		if flag.dc, err = finder.Datacenter(context.TODO(), flag.path); err != nil {
			return nil, err
		}
	}

	finder.SetDatacenter(flag.dc)

	flag.finder = finder

	return flag.finder, nil
}
예제 #3
0
func TestWatcherTimeout(t *testing.T) {
	server, etcdStorage := newEtcdTestStorage(t, testapi.Default.Codec(), etcdtest.PathPrefix())
	defer server.Terminate(t)
	cacher := newTestCacher(etcdStorage)
	defer cacher.Stop()

	// initialVersion is used to initate the watcher at the beginning of the world,
	// which is not defined precisely in etcd.
	initialVersion, err := cacher.LastSyncResourceVersion()
	if err != nil {
		t.Fatalf("Unexpected error: %v", err)
	}
	startVersion := strconv.Itoa(int(initialVersion))

	// Create a watcher that will not be reading any result.
	watcher, err := cacher.WatchList(context.TODO(), "pods/ns", startVersion, storage.Everything)
	if err != nil {
		t.Fatalf("Unexpected error: %v", err)
	}
	defer watcher.Stop()

	// Create a second watcher that will be reading result.
	readingWatcher, err := cacher.WatchList(context.TODO(), "pods/ns", startVersion, storage.Everything)
	if err != nil {
		t.Fatalf("Unexpected error: %v", err)
	}
	defer readingWatcher.Stop()

	for i := 1; i <= 22; i++ {
		pod := makeTestPod(strconv.Itoa(i))
		_ = updatePod(t, etcdStorage, pod, nil)
		verifyWatchEvent(t, readingWatcher, watch.Added, pod)
	}
}
func testAccCheckVSphereVirtualMachineDestroy(s *terraform.State) error {
	client := testAccProvider.Meta().(*govmomi.Client)
	finder := find.NewFinder(client.Client, true)

	for _, rs := range s.RootModule().Resources {
		if rs.Type != "vsphere_virtual_machine" {
			continue
		}

		dc, err := finder.Datacenter(context.TODO(), rs.Primary.Attributes["datacenter"])
		if err != nil {
			return fmt.Errorf("error %s", err)
		}

		dcFolders, err := dc.Folders(context.TODO())
		if err != nil {
			return fmt.Errorf("error %s", err)
		}

		_, err = object.NewSearchIndex(client.Client).FindChild(context.TODO(), dcFolders.VmFolder, rs.Primary.Attributes["name"])
		if err == nil {
			return fmt.Errorf("Record still exists")
		}
	}

	return nil
}
// createHardDisk creates a new Hard Disk.
func createHardDisk(client *govmomi.Client, size int, diskPath string, diskType string, adapterType string, dc string) error {
	virtualDiskManager := object.NewVirtualDiskManager(client.Client)
	spec := &types.FileBackedVirtualDiskSpec{
		VirtualDiskSpec: types.VirtualDiskSpec{
			AdapterType: adapterType,
			DiskType:    diskType,
		},
		CapacityKb: int64(1024 * 1024 * size),
	}
	datacenter, err := getDatacenter(client, dc)
	if err != nil {
		return err
	}
	log.Printf("[DEBUG] Disk spec: %v", spec)

	task, err := virtualDiskManager.CreateVirtualDisk(context.TODO(), diskPath, datacenter, spec)
	if err != nil {
		return err
	}

	_, err = task.WaitForResult(context.TODO(), nil)
	if err != nil {
		log.Printf("[INFO] Failed to create disk:  %v", err)
		return err
	}
	log.Printf("[INFO] Created disk.")

	return nil
}
예제 #6
0
func TestGuaranteedUpdateKeyNotFound(t *testing.T) {
	fakeClient := tools.NewFakeEtcdClient(t)
	fakeClient.TestIndex = true
	helper := newEtcdHelper(fakeClient, codec, etcdtest.PathPrefix())
	key := etcdtest.AddPrefix("/some/key")

	// Create a new node.
	fakeClient.ExpectNotFoundGet(key)
	obj := &storagetesting.TestResource{ObjectMeta: api.ObjectMeta{Name: "foo"}, Value: 1}

	f := storage.SimpleUpdate(func(in runtime.Object) (runtime.Object, error) {
		return obj, nil
	})

	ignoreNotFound := false
	err := helper.GuaranteedUpdate(context.TODO(), "/some/key", &storagetesting.TestResource{}, ignoreNotFound, f)
	if err == nil {
		t.Errorf("Expected error for key not found.")
	}

	ignoreNotFound = true
	err = helper.GuaranteedUpdate(context.TODO(), "/some/key", &storagetesting.TestResource{}, ignoreNotFound, f)
	if err != nil {
		t.Errorf("Unexpected error %v.", err)
	}
}
예제 #7
0
func TestWatchListFromZeroIndex(t *testing.T) {
	codec := testapi.Default.Codec()
	key := etcdtest.AddPrefix("/some/key")
	server := etcdtesting.NewEtcdTestClientServer(t)
	defer server.Terminate(t)
	h := newEtcdHelper(server.Client, codec, key)

	watching, err := h.WatchList(context.TODO(), key, 0, storage.Everything)
	if err != nil {
		t.Fatalf("Unexpected error: %v", err)
	}

	// creates key/foo which should trigger the WatchList for "key"
	pod := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}}
	err = h.Create(context.TODO(), pod.Name, pod, pod, 0)
	if err != nil {
		t.Fatalf("Unexpected error: %v", err)
	}

	event, _ := <-watching.ResultChan()
	if event.Type != watch.Added {
		t.Errorf("Unexpected event %#v", event)
	}

	if e, a := pod, event.Object; !api.Semantic.DeepDerivative(e, a) {
		t.Errorf("%s: expected %v, got %v", e, a)
	}

	watching.Stop()
}
예제 #8
0
func (cmd *destroy) Run(ctx context.Context, f *flag.FlagSet) error {
	if len(f.Args()) < 1 {
		return flag.ErrHelp
	}
	datacentersToDestroy := f.Args()

	client, err := cmd.ClientFlag.Client()
	if err != nil {
		return err
	}

	finder := find.NewFinder(client, false)
	for _, datacenterToDestroy := range datacentersToDestroy {
		foundDatacenters, err := finder.DatacenterList(context.TODO(), datacenterToDestroy)
		if err != nil {
			return err
		}
		for _, foundDatacenter := range foundDatacenters {
			task, err := foundDatacenter.Destroy(context.TODO())
			if err != nil {
				return err
			}

			if err := task.Wait(context.TODO()); err != nil {
				return err
			}
		}
	}
	return nil
}
예제 #9
0
파일: stm.go 프로젝트: CliffYuan/etcd
func doSTM(ctx context.Context, client *v3.Client, requests <-chan stmApply) {
	defer wg.Done()

	var m *v3sync.Mutex
	if stmMutex {
		m = v3sync.NewMutex(client, "stmlock")
	}

	for applyf := range requests {
		st := time.Now()
		if m != nil {
			m.Lock(context.TODO())
		}
		_, err := mkSTM(context.TODO(), client, applyf)
		if m != nil {
			m.Unlock(context.TODO())
		}
		var errStr string
		if err != nil {
			errStr = err.Error()
		}
		results <- result{errStr: errStr, duration: time.Since(st), happened: time.Now()}
		bar.Increment()
	}
}
func waitForNetworkingActive(client *govmomi.Client, datacenter, name string) resource.StateRefreshFunc {
	return func() (interface{}, string, error) {
		dc, err := getDatacenter(client, datacenter)
		if err != nil {
			log.Printf("[ERROR] %#v", err)
			return nil, "", err
		}
		finder := find.NewFinder(client.Client, true)
		finder = finder.SetDatacenter(dc)

		vm, err := finder.VirtualMachine(context.TODO(), name)
		if err != nil {
			log.Printf("[ERROR] %#v", err)
			return nil, "", err
		}

		var mvm mo.VirtualMachine
		collector := property.DefaultCollector(client.Client)
		if err := collector.RetrieveOne(context.TODO(), vm.Reference(), []string{"summary"}, &mvm); err != nil {
			log.Printf("[ERROR] %#v", err)
			return nil, "", err
		}

		if mvm.Summary.Guest.IpAddress != "" {
			log.Printf("[DEBUG] IP address with DHCP: %v", mvm.Summary.Guest.IpAddress)
			return mvm.Summary, "active", err
		} else {
			log.Printf("[DEBUG] Waiting for IP address")
			return nil, "pending", err
		}
	}
}
예제 #11
0
파일: stm.go 프로젝트: pulcy/vault-monkey
func doSTM(client *v3.Client, requests <-chan stmApply, results chan<- report.Result) {
	defer wg.Done()

	var m *v3sync.Mutex
	if stmMutex {
		s, err := v3sync.NewSession(client)
		if err != nil {
			panic(err)
		}
		m = v3sync.NewMutex(s, "stmlock")
	}

	for applyf := range requests {
		st := time.Now()
		if m != nil {
			m.Lock(context.TODO())
		}
		_, err := mkSTM(context.TODO(), client, applyf)
		if m != nil {
			m.Unlock(context.TODO())
		}
		results <- report.Result{Err: err, Start: st, End: time.Now()}
		bar.Increment()
	}
}
예제 #12
0
// This is to emulate the case where another party updates the object when
// etcdHelper.Delete has verified the preconditions, but hasn't carried out the
// deletion yet. Etcd will fail the deletion and report the conflict. etcdHelper
// should retry until there is no conflict.
func TestDeleteWithRetry(t *testing.T) {
	server := etcdtesting.NewEtcdTestClientServer(t)
	defer server.Terminate(t)
	prefix := path.Join("/", etcdtest.PathPrefix())

	obj := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo", UID: "A"}}
	// fakeGet returns a large ModifiedIndex to emulate the case that another
	// party has updated the object.
	fakeGet := func(ctx context.Context, key string, opts *etcd.GetOptions) (*etcd.Response, error) {
		data, _ := runtime.Encode(testapi.Default.Codec(), obj)
		return &etcd.Response{Node: &etcd.Node{Value: string(data), ModifiedIndex: 99}}, nil
	}
	expectedRetries := 3
	helper := newEtcdHelper(server.Client, testapi.Default.Codec(), prefix)
	fake := &fakeDeleteKeysAPI{KeysAPI: helper.etcdKeysAPI, fakeGetCap: expectedRetries, fakeGetFunc: fakeGet}
	helper.etcdKeysAPI = fake

	returnedObj := &api.Pod{}
	err := helper.Create(context.TODO(), "/some/key", obj, returnedObj, 0)
	if err != nil {
		t.Errorf("Unexpected error %#v", err)
	}

	err = helper.Delete(context.TODO(), "/some/key", obj, storage.NewUIDPreconditions("A"))
	if err != nil {
		t.Errorf("Unexpected error %#v", err)
	}
	if fake.getCount != expectedRetries {
		t.Errorf("Expect %d retries, got %d", expectedRetries, fake.getCount)
	}
	err = helper.Get(context.TODO(), "/some/key", obj, false)
	if !storage.IsNotFound(err) {
		t.Errorf("Expect an NotFound error, got %v", err)
	}
}
예제 #13
0
func TestGuaranteedUpdateKeyNotFound(t *testing.T) {
	_, codec := testScheme(t)
	server := etcdtesting.NewEtcdTestClientServer(t)
	defer server.Terminate(t)
	key := etcdtest.AddPrefix("/some/key")
	helper := newEtcdHelper(server.Client, codec, key)

	// Create a new node.
	obj := &storagetesting.TestResource{ObjectMeta: api.ObjectMeta{Name: "foo"}, Value: 1}

	f := storage.SimpleUpdate(func(in runtime.Object) (runtime.Object, error) {
		return obj, nil
	})

	ignoreNotFound := false
	err := helper.GuaranteedUpdate(context.TODO(), key, &storagetesting.TestResource{}, ignoreNotFound, nil, f)
	if err == nil {
		t.Errorf("Expected error for key not found.")
	}

	ignoreNotFound = true
	err = helper.GuaranteedUpdate(context.TODO(), key, &storagetesting.TestResource{}, ignoreNotFound, nil, f)
	if err != nil {
		t.Errorf("Unexpected error %v.", err)
	}
}
예제 #14
0
func TestGuaranteedUpdateNoChange(t *testing.T) {
	_, codec := testScheme(t)
	server := etcdtesting.NewEtcdTestClientServer(t)
	defer server.Terminate(t)
	key := etcdtest.AddPrefix("/some/key")
	helper := newEtcdHelper(server.Client, codec, key)

	obj := &storagetesting.TestResource{ObjectMeta: api.ObjectMeta{Name: "foo"}, Value: 1}
	err := helper.GuaranteedUpdate(context.TODO(), key, &storagetesting.TestResource{}, true, nil, storage.SimpleUpdate(func(in runtime.Object) (runtime.Object, error) {
		return obj, nil
	}))
	if err != nil {
		t.Errorf("Unexpected error %#v", err)
	}

	// Update an existing node with the same data
	callbackCalled := false
	objUpdate := &storagetesting.TestResource{ObjectMeta: api.ObjectMeta{Name: "foo"}, Value: 1}
	err = helper.GuaranteedUpdate(context.TODO(), key, &storagetesting.TestResource{}, true, nil, storage.SimpleUpdate(func(in runtime.Object) (runtime.Object, error) {
		callbackCalled = true
		return objUpdate, nil
	}))
	if err != nil {
		t.Fatalf("Unexpected error %#v", err)
	}
	if !callbackCalled {
		t.Errorf("tryUpdate callback should have been called.")
	}
}
예제 #15
0
func TestCreate(t *testing.T) {
	obj := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}}
	server := NewEtcdTestClientServer(t)
	defer server.Terminate(t)
	helper := newEtcdHelper(server.client, testapi.Default.Codec(), etcdtest.PathPrefix())
	returnedObj := &api.Pod{}
	err := helper.Create(context.TODO(), "/some/key", obj, returnedObj, 5)
	if err != nil {
		t.Errorf("Unexpected error %#v", err)
	}
	_, err = testapi.Default.Codec().Encode(obj)
	if err != nil {
		t.Errorf("Unexpected error %#v", err)
	}
	err = helper.Get(context.TODO(), "/some/key", returnedObj, false)
	if err != nil {
		t.Errorf("Unexpected error %#v", err)
	}
	_, err = testapi.Default.Codec().Encode(returnedObj)
	if err != nil {
		t.Errorf("Unexpected error %#v", err)
	}
	if obj.Name != returnedObj.Name {
		t.Errorf("Wanted %v, got %v", obj.Name, returnedObj.Name)
	}
}
예제 #16
0
// TestNodePropose ensures that node.Propose sends the given proposal to the underlying raft.
func TestNodePropose(t *testing.T) {
	msgs := []raftpb.Message{}
	appendStep := func(r *raft, m raftpb.Message) {
		msgs = append(msgs, m)
	}

	n := newNode()
	s := NewMemoryStorage()
	r := newTestRaft(1, []uint64{1}, 10, 1, s)
	go n.run(r)
	n.Campaign(context.TODO())
	for {
		rd := <-n.Ready()
		s.Append(rd.Entries)
		// change the step function to appendStep until this raft becomes leader
		if rd.SoftState.Lead == r.id {
			r.step = appendStep
			n.Advance()
			break
		}
		n.Advance()
	}
	n.Propose(context.TODO(), []byte("somedata"))
	n.Stop()

	if len(msgs) != 1 {
		t.Fatalf("len(msgs) = %d, want %d", len(msgs), 1)
	}
	if msgs[0].Type != raftpb.MsgProp {
		t.Errorf("msg type = %d, want %d", msgs[0].Type, raftpb.MsgProp)
	}
	if !bytes.Equal(msgs[0].Entries[0].Data, []byte("somedata")) {
		t.Errorf("data = %v, want %v", msgs[0].Entries[0].Data, []byte("somedata"))
	}
}
예제 #17
0
func TestGuaranteedUpdateNoChange(t *testing.T) {
	fakeClient := tools.NewFakeEtcdClient(t)
	fakeClient.TestIndex = true
	helper := newEtcdHelper(fakeClient, codec, etcdtest.PathPrefix())
	key := etcdtest.AddPrefix("/some/key")

	// Create a new node.
	fakeClient.ExpectNotFoundGet(key)
	obj := &storagetesting.TestResource{ObjectMeta: api.ObjectMeta{Name: "foo"}, Value: 1}
	err := helper.GuaranteedUpdate(context.TODO(), "/some/key", &storagetesting.TestResource{}, true, storage.SimpleUpdate(func(in runtime.Object) (runtime.Object, error) {
		return obj, nil
	}))
	if err != nil {
		t.Errorf("Unexpected error %#v", err)
	}

	// Update an existing node with the same data
	callbackCalled := false
	objUpdate := &storagetesting.TestResource{ObjectMeta: api.ObjectMeta{Name: "foo"}, Value: 1}
	err = helper.GuaranteedUpdate(context.TODO(), "/some/key", &storagetesting.TestResource{}, true, storage.SimpleUpdate(func(in runtime.Object) (runtime.Object, error) {
		fakeClient.Err = errors.New("should not be called")
		callbackCalled = true
		return objUpdate, nil
	}))
	if err != nil {
		t.Fatalf("Unexpected error %#v", err)
	}
	if !callbackCalled {
		t.Errorf("tryUpdate callback should have been called.")
	}
}
예제 #18
0
// TestBlockProposal ensures that node will block proposal when it does not
// know who is the current leader; node will accept proposal when it knows
// who is the current leader.
func TestBlockProposal(t *testing.T) {
	n := newNode()
	r := newTestRaft(1, []uint64{1}, 10, 1, NewMemoryStorage())
	go n.run(r)
	defer n.Stop()

	errc := make(chan error, 1)
	go func() {
		errc <- n.Propose(context.TODO(), []byte("somedata"))
	}()

	testutil.WaitSchedule()
	select {
	case err := <-errc:
		t.Errorf("err = %v, want blocking", err)
	default:
	}

	n.Campaign(context.TODO())
	select {
	case err := <-errc:
		if err != nil {
			t.Errorf("err = %v, want %v", err, nil)
		}
	case <-time.After(10 * time.Second):
		t.Errorf("blocking proposal, want unblocking")
	}
}
예제 #19
0
func TestWatch(t *testing.T) {
	codec := testapi.Default.Codec()
	server := etcdtesting.NewEtcdTestClientServer(t)
	defer server.Terminate(t)
	key := "/some/key"
	h := newEtcdHelper(server.Client, codec, etcdtest.PathPrefix())

	watching, err := h.Watch(context.TODO(), key, 0, storage.Everything)
	if err != nil {
		t.Fatalf("Unexpected error: %v", err)
	}

	// Test normal case
	pod := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}}
	returnObj := &api.Pod{}
	err = h.Set(context.TODO(), key, pod, returnObj, 0)
	if err != nil {
		t.Fatalf("Unexpected error: %v", err)
	}

	event := <-watching.ResultChan()
	if e, a := watch.Added, event.Type; e != a {
		t.Errorf("Expected %v, got %v", e, a)
	}
	if e, a := pod, event.Object; !api.Semantic.DeepDerivative(e, a) {
		t.Errorf("Expected %v, got %v", e, a)
	}

	watching.Stop()

	if _, open := <-watching.ResultChan(); open {
		t.Errorf("An injected error did not cause a graceful shutdown")
	}
}
예제 #20
0
func TestEndpoint_Exec(t *testing.T) {
	in := make(chan interface{})
	go func() {
		in <- "A"
		in <- "B"
		in <- "C"
		close(in)
	}()

	i := &Endpoint{
		Name: "proc",
		Function: func(ctx context.Context, i interface{}) error {
			s := i.(string)
			if s != "A" && s != "B" && s != "C" {
				t.Fatal("Unexpected data from input", s)
			}
			return nil
		},
	}
	i.SetInput(in)
	if err := i.Init(context.TODO()); err != nil {
		t.Fatal("Unable to init()", err)
	}

	if err := i.Exec(context.TODO()); err != nil {
		t.Fatal("Error during execution:", err)
	}

	select {
	case <-i.Done():
	case <-time.After(time.Millisecond * 50):
		t.Fatal("Took too long")
	}

}
예제 #21
0
func TestWatchListIgnoresRootKey(t *testing.T) {
	codec := testapi.Default.Codec()
	pod := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}}
	key := etcdtest.AddPrefix("/some/key")
	server := etcdtesting.NewEtcdTestClientServer(t)
	defer server.Terminate(t)
	h := newEtcdHelper(server.Client, codec, key)

	watching, err := h.WatchList(context.TODO(), key, 0, storage.Everything)
	if err != nil {
		t.Fatalf("Unexpected error: %v", err)
	}

	// creates key/foo which should trigger the WatchList for "key"
	err = h.Create(context.TODO(), key, pod, pod, 0)
	if err != nil {
		t.Fatalf("Unexpected error: %v", err)
	}

	// force context switch to ensure watches would catch and notify.
	rt.Gosched()

	select {
	case event, _ := <-watching.ResultChan():
		t.Fatalf("Unexpected event: %#v", event)
	default:
		// fall through, expected behavior
	}

	watching.Stop()
}
예제 #22
0
func TestEndpoint_Init(t *testing.T) {
	i := &Endpoint{}
	if err := i.Init(context.TODO()); err == nil {
		t.Fatal("Error expected for missing attributes")
	}

	i = &Endpoint{Name: "proc"}
	if err := i.Init(context.TODO()); err == nil {
		t.Fatal("Error expected for missing attributes Input and Function")
	}

	in := make(chan interface{})
	i = &Endpoint{Name: "proc"}
	i.SetInput(in)
	if err := i.Init(context.TODO()); err == nil {
		t.Fatal("Error expected for missing attribute Function")
	}

	i = &Endpoint{
		Name: "proc",
		Function: func(ctx context.Context, i interface{}) error {
			return nil
		},
	}
	i.SetInput(in)
	if err := i.Init(context.TODO()); err != nil {
		t.Fatal("No error expected after Init(): ", err)
	}

	if i.GetName() == "" {
		t.Fatal("Name attribute not set")
	}
}
func resourceVSphereVirtualDiskDelete(d *schema.ResourceData, meta interface{}) error {
	client := meta.(*govmomi.Client)

	vDisk := virtualDisk{}

	if v, ok := d.GetOk("vmdk_path"); ok {
		vDisk.vmdkPath = v.(string)
	}
	if v, ok := d.GetOk("datastore"); ok {
		vDisk.datastore = v.(string)
	}

	dc, err := getDatacenter(client, d.Get("datacenter").(string))
	if err != nil {
		return err
	}
	diskPath := fmt.Sprintf("[%v] %v", vDisk.datastore, vDisk.vmdkPath)

	virtualDiskManager := object.NewVirtualDiskManager(client.Client)

	task, err := virtualDiskManager.DeleteVirtualDisk(context.TODO(), diskPath, dc)
	if err != nil {
		return err
	}

	_, err = task.WaitForResult(context.TODO(), nil)
	if err != nil {
		log.Printf("[INFO] Failed to delete disk:  %v", err)
		return err
	}

	log.Printf("[INFO] Deleted disk: %v", diskPath)
	d.SetId("")
	return nil
}
예제 #24
0
// Run is part of the Task interface.
func (t *SplitCloneTask) Run(parameters map[string]string) ([]*automationpb.TaskContainer, string, error) {
	// TODO(mberlin): Add parameters for the following options?
	//                        '--source_reader_count', '1',
	//                        '--destination_writer_count', '1',
	args := []string{"SplitClone"}
	if online := parameters["online"]; online != "" {
		args = append(args, "--online="+online)
	}
	if offline := parameters["offline"]; offline != "" {
		args = append(args, "--offline="+offline)
	}
	if excludeTables := parameters["exclude_tables"]; excludeTables != "" {
		args = append(args, "--exclude_tables="+excludeTables)
	}
	if writeQueryMaxRows := parameters["write_query_max_rows"]; writeQueryMaxRows != "" {
		args = append(args, "--write_query_max_rows="+writeQueryMaxRows)
	}
	if writeQueryMaxSize := parameters["write_query_max_size"]; writeQueryMaxSize != "" {
		args = append(args, "--write_query_max_size="+writeQueryMaxSize)
	}
	if minHealthyRdonlyTablets := parameters["min_healthy_rdonly_tablets"]; minHealthyRdonlyTablets != "" {
		args = append(args, "--min_healthy_rdonly_tablets="+minHealthyRdonlyTablets)
	}
	args = append(args, topoproto.KeyspaceShardString(parameters["keyspace"], parameters["source_shard"]))
	output, err := ExecuteVtworker(context.TODO(), parameters["vtworker_endpoint"], args)

	// TODO(mberlin): Remove explicit reset when vtworker supports it implicility.
	if err == nil {
		// Ignore output and error of the Reset.
		ExecuteVtworker(context.TODO(), parameters["vtworker_endpoint"], []string{"Reset"})
	}
	return nil, output, err
}
예제 #25
0
func TestBreaker(t *testing.T) {
	// setup
	r := mock.NewRegistry()
	s := selector.NewSelector(selector.Registry(r))

	c := client.NewClient(
		// set the selector
		client.Selector(s),
		// add the breaker wrapper
		client.Wrap(NewClientWrapper()),
	)

	req := c.NewJsonRequest("test.service", "Test.Method", map[string]string{
		"foo": "bar",
	})

	var rsp map[string]interface{}

	// Force to point of trip
	for i := 0; i < (hystrix.DefaultVolumeThreshold * 2); i++ {
		c.Call(context.TODO(), req, rsp)
	}

	err := c.Call(context.TODO(), req, rsp)
	if err == nil {
		t.Error("Expecting tripped breaker, got nil error")
	}

	if err.Error() != "hystrix: circuit open" {
		t.Errorf("Expecting tripped breaker, got %v", err)
	}
}
예제 #26
0
파일: change.go 프로젝트: kristinn/govmomi
func (cmd *change) Run(f *flag.FlagSet) error {
	if f.NArg() == 0 {
		return flag.ErrHelp
	}

	finder, err := cmd.Finder()
	if err != nil {
		return err
	}

	cmd.SetAllocation(func(a types.BaseResourceAllocationInfo) {
		ra := a.GetResourceAllocationInfo()
		if ra.Shares.Level == "" {
			ra.Shares = nil
		}
	})

	for _, arg := range f.Args() {
		pools, err := finder.ResourcePoolList(context.TODO(), arg)
		if err != nil {
			return err
		}

		for _, pool := range pools {
			err := pool.UpdateConfig(context.TODO(), cmd.name, &cmd.ResourceConfigSpec)
			if err != nil {
				return err
			}
		}
	}

	return nil
}
예제 #27
0
// TestV3AlarmDeactivate ensures that space alarms can be deactivated so puts go through.
func TestV3AlarmDeactivate(t *testing.T) {
	clus := NewClusterV3(t, &ClusterConfig{Size: 3})
	defer clus.Terminate(t)
	kvc := toGRPC(clus.RandClient()).KV
	mt := toGRPC(clus.RandClient()).Maintenance

	alarmReq := &pb.AlarmRequest{
		MemberID: 123,
		Action:   pb.AlarmRequest_ACTIVATE,
		Alarm:    pb.AlarmType_NOSPACE,
	}
	if _, err := mt.Alarm(context.TODO(), alarmReq); err != nil {
		t.Fatal(err)
	}

	key := []byte("abc")
	smallbuf := make([]byte, 512)
	_, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: key, Value: smallbuf})
	if err == nil && err != rpctypes.ErrNoSpace {
		t.Fatalf("put got %v, expected %v", err, rpctypes.ErrNoSpace)
	}

	alarmReq.Action = pb.AlarmRequest_DEACTIVATE
	if _, err = mt.Alarm(context.TODO(), alarmReq); err != nil {
		t.Fatal(err)
	}

	if _, err = kvc.Put(context.TODO(), &pb.PutRequest{Key: key, Value: smallbuf}); err != nil {
		t.Fatal(err)
	}
}
func (na *NetworkAllocator) deallocateVIP(vip *api.Endpoint_VirtualIP) error {
	localNet := na.getNetwork(vip.NetworkID)
	if localNet == nil {
		return fmt.Errorf("networkallocator: could not find local network state")
	}

	ipam, _, err := na.resolveIPAM(localNet.nw)
	if err != nil {
		return fmt.Errorf("failed to resolve IPAM while allocating : %v", err)
	}

	// Retrieve the poolID and immediately nuke
	// out the mapping.
	poolID := localNet.endpoints[vip.Addr]
	delete(localNet.endpoints, vip.Addr)

	ip, _, err := net.ParseCIDR(vip.Addr)
	if err != nil {
		log.G(context.TODO()).Errorf("Could not parse VIP address %s while releasing", vip.Addr)
		return err
	}

	if err := ipam.ReleaseAddress(poolID, ip); err != nil {
		log.G(context.TODO()).Errorf("IPAM failure while releasing VIP address %s: %v", vip.Addr, err)
		return err
	}

	return nil
}
예제 #29
0
func (a *apiServer) bestParent(pipelineInfo *pps.PipelineInfo, inputCommitInfo *pfs.CommitInfo) (*pfs.Commit, error) {
	for {
		jobInfos, err := a.jobAPIClient.ListJob(
			context.Background(),
			&pps.ListJobRequest{
				Pipeline: pipelineInfo.Pipeline,
				Input:    inputCommitInfo.Commit,
			},
		)
		if err != nil {
			return nil, err
		}
		// newest to oldest assumed
		for _, jobInfo := range jobInfos.JobInfo {
			if jobInfo.OutputCommit != nil {
				outputCommitInfo, err := a.pfsAPIClient.InspectCommit(context.TODO(), &pfs.InspectCommitRequest{Commit: jobInfo.OutputCommit})
				if err != nil {
					return nil, err
				}
				if outputCommitInfo.CommitType == pfs.CommitType_COMMIT_TYPE_READ {
					return outputCommitInfo.Commit, nil
				}
			}
		}
		if inputCommitInfo.ParentCommit.Id == "" {
			return inputCommitInfo.ParentCommit, nil
		}
		inputCommitInfo, err = a.pfsAPIClient.InspectCommit(context.TODO(), &pfs.InspectCommitRequest{Commit: inputCommitInfo.ParentCommit})
		if err != nil {
			return nil, err
		}
	}
}
예제 #30
0
파일: executor.go 프로젝트: jak-atx/vic
func NewExecutor(c *vim25.Client, host *object.HostSystem) (*Executor, error) {
	e := &Executor{
		c:    c,
		host: host,
		info: make(map[string]*CommandInfo),
	}

	{
		req := types.RetrieveManagedMethodExecuter{
			This: host.Reference(),
		}

		res, err := methods.RetrieveManagedMethodExecuter(context.TODO(), c, &req)
		if err != nil {
			return nil, err
		}

		e.mme = res.Returnval
	}

	{
		req := types.RetrieveDynamicTypeManager{
			This: host.Reference(),
		}

		res, err := methods.RetrieveDynamicTypeManager(context.TODO(), c, &req)
		if err != nil {
			return nil, err
		}

		e.dtm = res.Returnval
	}

	return e, nil
}