Beispiel #1
0
// CreateVolume creates a volume in the cluster
func (c *Cluster) CreateVolume(request *types.VolumeCreateRequest) (*types.Volume, error) {
	var (
		wg     sync.WaitGroup
		volume *types.Volume
		err    error
		parts  = strings.SplitN(request.Name, "/", 2)
		node   = ""
	)

	if request.Name == "" {
		request.Name = stringid.GenerateRandomID()
	} else if len(parts) == 2 {
		node = parts[0]
		request.Name = parts[1]
	}
	if node == "" {
		c.RLock()
		for _, e := range c.engines {
			wg.Add(1)

			go func(engine *cluster.Engine) {
				defer wg.Done()

				v, er := engine.CreateVolume(request)
				if v != nil {
					volume = v
					err = nil
				}
				if er != nil && volume == nil {
					err = er
				}
			}(e)
		}
		c.RUnlock()

		wg.Wait()
	} else {
		config := cluster.BuildContainerConfig(containertypes.Config{Env: []string{"constraint:node==" + parts[0]}}, containertypes.HostConfig{}, networktypes.NetworkingConfig{})
		nodes, err := c.scheduler.SelectNodesForContainer(c.listNodes(), config)
		if err != nil {
			return nil, err
		}
		if nodes != nil {
			v, er := c.engines[nodes[0].ID].CreateVolume(request)
			if v != nil {
				volume = v
				err = nil
			}
			if er != nil && volume == nil {
				err = er
			}
		}
	}

	return volume, err
}
// Sanity test for VMDK volumes
// - check we can attach/detach correct volume (we use 'touch' and 'stat' to validate
// - check volumes are correctly created and deleted.
// - check we see it properly from another docker VM (-H2 flag)
func TestSanity(t *testing.T) {

	fmt.Printf("Running tests on  %s (may take a while)...\n", endPoint1)
	clients := []struct {
		endPoint string
		client   *client.Client
	}{
		{endPoint1, new(client.Client)},
		{endPoint2, new(client.Client)},
	}

	for idx, elem := range clients {
		c, err := client.NewClient(elem.endPoint, apiVersion, nil, defaultHeaders)
		if err != nil {
			t.Fatalf("Failed to connect to %s, err: %v", elem.endPoint, err)
		}
		t.Logf("Successfully connected to %s", elem.endPoint)
		clients[idx].client = c
	}

	c := clients[0].client // this is the endpoint we use as master
	t.Logf("Creating vol=%s on client %s.", volumeName, clients[0].endPoint)
	_, err := c.VolumeCreate(context.Background(),
		types.VolumeCreateRequest{
			Name:   volumeName,
			Driver: driverName,
			DriverOpts: map[string]string{
				"size": "1gb",
			},
		})
	if err != nil {
		t.Fatal(err)
	}

	checkTouch(t, c, volumeName, "file_to_touch", clients[0].endPoint)

	for _, elem := range clients {
		v := volumeVmdkExists(t, elem.client, volumeName)
		if v == nil {
			t.Fatalf("Volume=%s is missing on %s after create",
				volumeName, elem.endPoint)
		}
		if v.Driver != driverName {
			t.Fatalf("wrong driver (%s) for volume %s", v.Driver, v.Name)
		}
	}

	err = c.VolumeRemove(context.Background(), volumeName)
	if err != nil {
		t.Fatalf("Failed to delete volume, err: %v", err)
	}

	for _, elem := range clients {
		if volumeVmdkExists(t, elem.client, volumeName) != nil {
			t.Errorf("Volume=%s is still present on %s after removal",
				volumeName, elem.endPoint)
		}
	}

	fmt.Printf("Running parallel tests on %s and %s (may take a while)...\n", endPoint1, endPoint2)
	// Create a short buffered channel to introduce random pauses
	results := make(chan error, parallelVolumes)
	createRequest := types.VolumeCreateRequest{
		Name:   volumeName,
		Driver: driverName,
		DriverOpts: map[string]string{
			"size": "1gb",
		},
	}
	// Create/delete routine
	for idx, elem := range clients {
		go func(idx int, c *client.Client) {
			for i := 0; i < parallelVolumes; i++ {
				volName := "volTestP" + strconv.Itoa(idx) + strconv.Itoa(i)
				createRequest.Name = volName
				_, err := c.VolumeCreate(context.Background(), createRequest)
				results <- err
				err = c.VolumeRemove(context.Background(), volName)
				results <- err
			}
		}(idx, elem.client)
	}
	// We need to read #clients * #volumes * 2 operations from the channel
	for i := 0; i < len(clients)*parallelVolumes*2; i++ {
		err := <-results
		if err != nil {
			t.Fatalf("Parallel test failed, err: %v", err)
		}
	}
}