Пример #1
0
func (d *driver) prefixToMountUnmount(
	volumeName,
	volumeID string) ([]*core.Volume, []*core.VolumeAttachment, *core.Instance, error) {
	if volumeName == "" && volumeID == "" {
		return nil, nil, nil, errors.New("Missing volume name or ID")
	}

	var instance *core.Instance
	var err error
	if instance, err = d.getInstance(); err != nil {
		return nil, nil, nil, err
	}

	var vols []*core.Volume
	if vols, err = d.r.Storage.GetVolume(volumeID, volumeName); err != nil {
		return nil, nil, nil, err
	}

	switch {
	case len(vols) == 0:
		return nil, nil, nil, errors.New("No volumes returned by name")
	case len(vols) > 1:
		return nil, nil, nil, errors.New("Multiple volumes returned by name")
	}

	var volAttachments []*core.VolumeAttachment
	if volAttachments, err = d.r.Storage.GetVolumeAttach(
		vols[0].VolumeID, instance.InstanceID); err != nil {
		return nil, nil, nil, err
	}

	return vols, volAttachments, instance, nil
}
Пример #2
0
// NetworkName will return relevant information about how a volume can be discovered on an OS
func (d *driver) NetworkName(volumeName, instanceID string) (string, error) {
	log.WithFields(log.Fields{
		"volumeName": volumeName,
		"instanceID": instanceID,
		"driverName": d.Name()}).Info("returning network name")

	volumes, err := d.r.Storage.GetVolume("", volumeName)
	if err != nil {
		return "", err
	}

	switch {
	case len(volumes) == 0:
		return "", errors.New("No volumes returned by name")
	case len(volumes) > 1:
		return "", errors.New("Multiple volumes returned by name")
	}

	volumeAttachment, err := d.r.Storage.GetVolumeAttach(
		volumes[0].VolumeID, instanceID)
	if err != nil {
		return "", err
	}

	if len(volumeAttachment) == 0 {
		return "", errors.New("Volume not attached")
	}

	volumes, err = d.r.Storage.GetVolume("", volumeName)
	if err != nil {
		return "", err
	}

	return volumes[0].NetworkName, nil
}
// Key adds a key to the registration.
//
// The first vararg argument is the yaml name of the key, using a '.' as
// the nested separator. If the second two arguments are omitted they will be
// generated from the first argument. The second argument is the explicit name
// of the flag bound to this key. The third argument is the explicit name of
// the environment variable bound to thie key.
func (r *Registration) Key(
	keyType KeyType,
	short string,
	defVal interface{},
	description string,
	keys ...string) {

	if keys == nil {
		panic(errors.New("keys is nil"))
	}

	lk := len(keys)

	if lk == 0 {
		panic(errors.New("len(keys) == 0"))
	}

	kn := keys[0]

	rk := &regKey{
		keyType: keyType,
		short:   short,
		desc:    description,
		defVal:  defVal,
		keyName: keys[0],
	}

	if lk < 2 {
		kp := strings.Split(kn, ".")
		for x, s := range kp {
			if x == 0 {
				var buff []byte
				b := bytes.NewBuffer(buff)
				for y, r := range s {
					if y == 0 {
						b.WriteRune(unicode.ToLower(r))
					} else {
						b.WriteRune(r)
					}
				}
				kp[x] = b.String()
			} else {
				kp[x] = strings.Title(s)
			}
		}
		rk.flagName = strings.Join(kp, "")
	}

	if lk < 3 {
		kp := strings.Split(kn, ".")
		for x, s := range kp {
			kp[x] = strings.ToUpper(s)
		}
		rk.envVarName = strings.Join(kp, "_")
	}

	r.keys = append(r.keys, rk)
}
Пример #4
0
// Path returns the mounted path of the volume
func (d *driver) Path(volumeName, volumeID string) (string, error) {
	log.WithFields(log.Fields{
		"volumeName": volumeName,
		"volumeID":   volumeID,
		"driverName": d.Name()}).Info("getting path to volume")
	if volumeName == "" && volumeID == "" {
		return "", errors.New("Missing volume name or ID")
	}

	instances, err := d.r.Storage.GetInstances()
	if err != nil {
		return "", err
	}

	switch {
	case len(instances) == 0:
		return "", errors.New("No instances")
	case len(instances) > 1:
		return "", errors.New("Too many instances returned, limit the storagedrivers")
	}

	volumes, err := d.r.Storage.GetVolume(volumeID, volumeName)
	if err != nil {
		return "", err
	}

	switch {
	case len(volumes) == 0:
		return "", errors.New("No volumes returned by name")
	case len(volumes) > 1:
		return "", errors.New("Multiple volumes returned by name")
	}

	volumeAttachment, err := d.r.Storage.GetVolumeAttach(volumes[0].VolumeID, instances[0].InstanceID)
	if err != nil {
		return "", err
	}

	if len(volumeAttachment) == 0 {
		return "", nil
	}

	mounts, err := d.r.OS.GetMounts(volumeAttachment[0].DeviceName, "")
	if err != nil {
		return "", err
	}

	if len(mounts) == 0 {
		return "", nil
	}

	return mounts[0].Mountpoint, nil
}
Пример #5
0
func getVolumeMountPath(name string) (string, error) {
	if name == "" {
		return "", errors.New("Missing volume name")
	}

	return fmt.Sprintf("%s/%s", mountDirectoryPath, name), nil
}
Пример #6
0
func (d *driver) CopySnapshot(
	runAsync bool,
	volumeID, snapshotID,
	snapshotName, destinationSnapshotName,
	destinationRegion string) (*core.Snapshot, error) {
	return nil, errors.New("This driver does not implement CopySnapshot")
}
Пример #7
0
func (d *xtremIODriver) getLunMaps(initiatorName, volumeID string) (xtio.Refs, error) {
	if initiatorName == "" {
		return nil, errors.New("Missing initiatorName")
	}

	initiatorGroup, err := d.client.GetInitiatorGroup("", initiatorName)
	if err != nil {
		return nil, err
	}

	lunMaps, err := d.client.GetLunMaps()
	if err != nil {
		return nil, err
	}

	var refs xtio.Refs
	for _, ref := range lunMaps {

		idents := strings.Split(ref.Name, "_")
		if len(idents) < 3 {
			continue
		} else if strconv.Itoa(initiatorGroup.Index) == idents[1] && volumeID == idents[0] {
			refs = append(refs, ref)
		}
	}

	return refs, nil
}
Пример #8
0
func (d *driver) getInstance() (*core.Instance, error) {
	instances, err := d.r.Storage.GetInstances()
	if err != nil {
		return nil, err
	}

	switch {
	case len(instances) == 0:
		return nil, errors.New("No instances")
	case len(instances) > 1:
		return nil,
			errors.New("Too many instances returned, limit the storagedrivers")
	}

	return instances[0], nil
}
Пример #9
0
// Remove will remove a remote volume
func (d *driver) Remove(volumeName string) error {
	log.WithFields(log.Fields{
		"volumeName": volumeName,
		"driverName": d.Name()}).Info("removing volume")

	if volumeName == "" {
		return errors.New("Missing volume name")
	}

	instances, err := d.r.Storage.GetInstances()
	if err != nil {
		return err
	}

	switch {
	case len(instances) == 0:
		return errors.New("No instances")
	case len(instances) > 1:
		return errors.New("Too many instances returned, limit the storagedrivers")
	}

	volumes, err := d.r.Storage.GetVolume("", volumeName)
	if err != nil {
		return err
	}

	switch {
	case len(volumes) == 0:
		return errors.New("No volumes returned by name")
	case len(volumes) > 1:
		return errors.New("Multiple volumes returned by name")
	}

	err = d.Unmount("", volumes[0].VolumeID)
	if err != nil {
		return err
	}

	err = d.r.Storage.RemoveVolume(volumes[0].VolumeID)
	if err != nil {
		return err
	}

	return nil
}
Пример #10
0
func (d *driver) createGetInstance() error {
	var err error
	var instances []*core.Instance

	if instances, err = d.r.Storage.GetInstances(); err != nil {
		return err
	}

	switch {
	case len(instances) == 0:
		return errors.New("No instances")
	case len(instances) > 1:
		return errors.New(
			"Too many instances returned, limit the storagedrivers")
	}

	return nil
}
Пример #11
0
// ReadConfig reads a configuration stream into the current config instance
func (c *Config) ReadConfig(in io.Reader) error {

	if in == nil {
		return errors.New("config reader is nil")
	}

	c.v.ReadConfigNoNil(in)

	return nil
}
Пример #12
0
func getIQN() (string, error) {
	data, err := ioutil.ReadFile("/etc/iscsi/initiatorname.iscsi")
	if err != nil {
		return "", err
	}

	result := string(data)
	lines := strings.Split(result, "\n")

	for _, line := range lines {
		split := strings.Split(line, "=")
		if split[0] == "InitiatorName" {
			return split[1], nil
		}
	}
	return "", errors.New("IQN not found")
}
Пример #13
0
// ReadConfig reads a configuration stream into the current config instance
func (c *Config) ReadConfig(in io.Reader) error {

	if in == nil {
		return errors.New("config reader is nil")
	}

	c.Viper.ReadConfigNoNil(in)
	c.Viper.Unmarshal(&c.secureConfig)
	c.Viper.Unmarshal(&c.plainTextConfig)

	for key := range keys {
		c.updateFlag(key, c.GlobalFlags)
		c.updateFlag(key, c.AdditionalFlags)
	}

	return nil
}
Пример #14
0
func (d *driver) GetDeviceNextAvailable() (string, error) {
	letters := []string{
		"a", "b", "c", "d", "e", "f", "g", "h",
		"i", "j", "k", "l", "m", "n", "o", "p"}

	blockDeviceNames := make(map[string]bool)

	blockDeviceMapping, err := d.GetVolumeMapping()
	if err != nil {
		return "", err
	}

	for _, blockDevice := range blockDeviceMapping {
		re, _ := regexp.Compile(`^/dev/xvd([a-z])`)
		res := re.FindStringSubmatch(blockDevice.DeviceName)
		if len(res) > 0 {
			blockDeviceNames[res[1]] = true
		}
	}

	localDevices, err := getLocalDevices()
	if err != nil {
		return "", err
	}

	for _, localDevice := range localDevices {
		re, _ := regexp.Compile(`^xvd([a-z])`)
		res := re.FindStringSubmatch(localDevice)
		if len(res) > 0 {
			blockDeviceNames[res[1]] = true
		}
	}

	for _, letter := range letters {
		if !blockDeviceNames[letter] {
			nextDeviceName := "/dev/xvd" + letter
			log.Println("Got next device name: " + nextDeviceName)
			return nextDeviceName, nil
		}
	}
	return "", errors.New("No available device")
}
Пример #15
0
func (d *driver) AttachVolume(
	runAsync bool,
	volumeID, instanceID string) ([]*core.VolumeAttachment, error) {
	if instanceID == "" {
		instanceID = d.currentInstanceId
	}
	instance, err := d.GetInstance()
	if err != nil {
		return nil, err
	}
	instanceID = instance.Name
	log.WithField("provider", providerName).Debugf("AttachVolume %s %s", volumeID, instance.Name)
	query := d.client.Disks.List(d.project, d.zone)
	query.Filter(fmt.Sprintf("name eq %s", volumeID))
	disks, err := query.Do()
	if err != nil {
		return nil, err
	}
	if len(disks.Items) != 1 {
		return nil, errors.New("No available device")
	}

	disk := &compute.AttachedDisk{
		AutoDelete: false,
		Boot:       false,
		Source:     disks.Items[0].SelfLink,
	}
	operation, err := d.client.Instances.AttachDisk(d.project, d.zone, instanceID, disk).Do()
	if err != nil {
		return nil, err
	}
	if !runAsync {
		err := d.waitUntilOperationIsFinished(operation)
		if err != nil {
			return nil, err
		}
	}

	return d.GetVolumeAttach(volumeID, instanceID)

}
Пример #16
0
// Format will look for ext4/xfs and overwrite it is it doesn't exist
func (d *driver) Format(
	deviceName, newFsType string, overwriteFs bool) error {

	var fsDetected bool

	fsType, err := probeFsType(deviceName)
	if err != nil && err != errors.ErrUnknownFileSystem {
		return err
	}
	if fsType != "" {
		fsDetected = true
	}

	log.WithFields(log.Fields{
		"fsDetected":  fsDetected,
		"fsType":      fsType,
		"deviceName":  deviceName,
		"overwriteFs": overwriteFs,
		"driverName":  d.Name()}).Info("probe information")

	if overwriteFs || !fsDetected {
		switch newFsType {
		case "ext4":
			if err := exec.Command("mkfs.ext4", deviceName).Run(); err != nil {
				return fmt.Errorf(
					"Problem creating filesystem on %s with error %s",
					deviceName, err)
			}
		case "xfs":
			if err := exec.Command("mkfs.xfs", "-f", deviceName).Run(); err != nil {
				return fmt.Errorf(
					"Problem creating filesystem on %s with error %s",
					deviceName, err)
			}
		default:
			return errors.New("Unsupported FS")
		}
	}

	return nil
}
Пример #17
0
func (d *driver) DetachVolume(
	runAsync bool,
	volumeID, blank string) error {
	instance, err := d.GetInstance()
	if err != nil {
		return err
	}
	instanceID := instance.Name
	log.WithField("provider", providerName).Debugf("DetachVolume %s %s", volumeID, instance.Name)
	query := d.client.Disks.List(d.project, d.zone)
	query.Filter(fmt.Sprintf("name eq %s", volumeID))
	disks, err := query.Do()
	if err != nil {
		return err
	}
	if len(disks.Items) != 1 {
		return errors.New("No available device")
	}
	attachements, err := d.GetVolumeAttach(volumeID, instanceID)
	for _, attachement := range attachements {
		targetVolumeId := strings.Replace(attachement.VolumeID,
			fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/zones/%s/disks/", d.project, d.zone),
			"",
			-1)
		if targetVolumeId == volumeID {
			diskName := strings.Replace(attachement.DeviceName, "/dev/disk/by-id/google-", "", -1)
			operation, err := d.client.Instances.DetachDisk(d.project, d.zone, instanceID, diskName).Do()
			if err != nil {
				return err
			}
			if !runAsync {
				err := d.waitUntilOperationIsFinished(operation)
				if err != nil {
					return err
				}
			}
		}
	}
	return nil
}
Пример #18
0
func (d *driver) GetMounts(
	deviceName, mountPoint string) (core.MountInfoArray, error) {

	mounts, err := mount.GetMounts()
	if err != nil {
		return nil, err
	}

	if mountPoint == "" && deviceName == "" {
		return mounts, nil
	} else if mountPoint != "" && deviceName != "" {
		return nil, errors.New("Cannot specify mountPoint and deviceName")
	}

	var matchedMounts []*mount.Info
	for _, mount := range mounts {
		if mount.Mountpoint == mountPoint || mount.Source == deviceName {
			matchedMounts = append(matchedMounts, mount)
		}
	}
	return matchedMounts, nil
}
Пример #19
0
func (m *badMockOSDriver) Init(r *core.RexRay) error {
	return errors.New("init error")
}
Пример #20
0
func (d *driver) CopySnapshot(runAsync bool,
	volumeID, snapshotID, snapshotName, destinationSnapshotName,
	destinationRegion string) (*core.Snapshot, error) {

	if volumeID == "" && snapshotID == "" && snapshotName == "" {
		return nil, errors.New("Missing volumeID, snapshotID, or snapshotName")
	}

	snapshots, err := d.getSnapshot(volumeID, snapshotID, snapshotName)
	if err != nil {
		return nil, err
	}

	if len(snapshots) > 1 {
		return nil, errors.ErrMultipleVolumesReturned
	} else if len(snapshots) == 0 {
		return nil, errors.ErrNoVolumesReturned
	}

	snapshotID = snapshots[0].Id

	options := &ec2.CopySnapshot{
		SourceRegion:      d.ec2Instance.Region.Name,
		DestinationRegion: destinationRegion,
		SourceSnapshotId:  snapshotID,
		Description: fmt.Sprintf("[Copied %s from %s]",
			snapshotID, d.ec2Instance.Region.Name),
	}
	resp := &ec2.CopySnapshotResp{}

	auth := aws.Auth{
		AccessKey: d.r.Config.AwsAccessKey,
		SecretKey: d.r.Config.AwsSecretKey}
	destec2Instance := ec2.New(
		auth,
		aws.Regions[destinationRegion],
	)

	origec2Instance := d.ec2Instance
	d.ec2Instance = destec2Instance
	defer func() { d.ec2Instance = origec2Instance }()

	resp, err = d.ec2Instance.CopySnapshot(options)
	if err != nil {
		return nil, err
	}

	if destinationSnapshotName != "" {
		_, err := d.ec2Instance.CreateTags(
			[]string{resp.SnapshotId},
			[]ec2.Tag{{"Name", destinationSnapshotName}})

		if err != nil {
			return nil, err
		}
	}

	if !runAsync {
		log.Println("Waiting for snapshot copy to complete")
		err = d.waitSnapshotComplete(resp.SnapshotId)
		if err != nil {
			return nil, err
		}
	}

	snapshot, err := d.GetSnapshot("", resp.SnapshotId, "")
	if err != nil {
		return nil, err
	}

	return snapshot[0], nil
}
Пример #21
0
// Create will create a remote volume
func (d *driver) Create(volumeName string, volumeOpts core.VolumeOpts) error {
	log.WithFields(log.Fields{
		"volumeName": volumeName,
		"volumeOpts": volumeOpts,
		"driverName": d.Name()}).Info("creating volume")

	if volumeName == "" {
		return errors.New("Missing volume name")
	}

	var err error

	if err = d.createGetInstance(); err != nil {
		return err
	}

	for k, v := range volumeOpts {
		volumeOpts[strings.ToLower(k)] = v
	}
	newFsType := volumeOpts["newfstype"]

	var overwriteFs bool
	var volumes []*core.Volume

	volumes, overwriteFs, err = d.createGetVolumes(volumeName, volumeOpts)
	if err != nil {
		return err
	}

	if len(volumes) > 0 {
		return nil
	}

	var volFrom *core.Volume
	var volumeID string
	if volFrom, err = d.createInitVolume(
		volumeName, volumeOpts); err != nil {
		return err
	} else if volFrom != nil {
		volumeID = volFrom.VolumeID
	}

	var snapFrom *core.Snapshot
	var snapshotID string
	if snapFrom, err = d.createGetSnapshot(volumeOpts); err != nil {
		return err
	} else if snapFrom != nil {
		snapshotID = snapFrom.SnapshotID
	}

	volumeType := createInitVolumeType(volumeOpts, volFrom)
	IOPS := createInitIOPS(volumeOpts, volFrom)
	size := createInitSize(volumeOpts, volFrom, snapFrom)
	availabilityZone := createInitAvailabilityZone(volumeOpts)

	if len(volumes) == 0 {
		if _, err = d.r.Storage.CreateVolume(
			false, volumeName, volumeID, snapshotID,
			volumeType, IOPS, size, availabilityZone); err != nil {
			return err
		}
	}

	if newFsType != "" || overwriteFs {
		_, err = d.Mount(volumeName, "", overwriteFs, newFsType)
		if err != nil {
			log.WithFields(log.Fields{
				"volumeName":  volumeName,
				"overwriteFs": overwriteFs,
				"newFsType":   newFsType,
				"driverName":  d.Name()}).Error("Failed to create or mount file system")
		}
		err = d.Unmount(volumeName, "")
		if err != nil {
			return err
		}
	}

	return nil
}
Пример #22
0
// Mount will perform the steps to get an existing Volume with or without a fileystem mounted to a guest
func (d *driver) Mount(volumeName, volumeID string, overwriteFs bool, newFsType string) (string, error) {
	log.WithFields(log.Fields{
		"volumeName":  volumeName,
		"volumeID":    volumeID,
		"overwriteFs": overwriteFs,
		"newFsType":   newFsType,
		"driverName":  d.Name()}).Info("mounting volume")

	var err error
	var vols []*core.Volume
	var volAttachments []*core.VolumeAttachment
	var instance *core.Instance

	if vols, volAttachments, instance, err = d.prefixToMountUnmount(
		volumeName, volumeID); err != nil {
		return "", err
	}

	if len(volAttachments) == 0 {
		volAttachments, err = d.r.Storage.AttachVolume(
			false, vols[0].VolumeID, instance.InstanceID)
		if err != nil {
			return "", err
		}
	}

	if len(volAttachments) == 0 {
		return "", errors.New("Volume did not attach")
	}

	mounts, err := d.r.OS.GetMounts(volAttachments[0].DeviceName, "")
	if err != nil {
		return "", err
	}

	if len(mounts) > 0 {
		return mounts[0].Mountpoint, nil
	}

	switch {
	case os.Getenv("REXRAY_DOCKER_VOLUMETYPE") != "":
		newFsType = os.Getenv("REXRAY_DOCKER_VOLUMETYPE")
	case newFsType == "":
		newFsType = "ext4"
	}

	if err := d.r.OS.Format(
		volAttachments[0].DeviceName, newFsType, overwriteFs); err != nil {
		return "", err
	}

	mountPath, err := getVolumeMountPath(vols[0].Name)
	if err != nil {
		return "", err
	}

	if err := os.MkdirAll(mountPath, 0755); err != nil {
		return "", err
	}

	if err := d.r.OS.Mount(
		volAttachments[0].DeviceName, mountPath, "", ""); err != nil {
		return "", err
	}

	return mountPath, nil
}
Пример #23
0
// StartModule starts the module with the provided instance ID.
func StartModule(modInstID int32) error {

	modInstancesRwl.RLock()
	defer modInstancesRwl.RUnlock()

	lf := map[string]interface{}{"id": modInstID}

	mod, modExists := modInstances[modInstID]

	if !modExists {
		return errors.WithFields(lf, "unknown module instance")
	}

	lf["id"] = mod.ID
	lf["typeId"] = mod.Type.ID
	lf["typeName"] = mod.Type.Name
	lf["address"] = mod.Config.Address

	started := make(chan bool)
	timeout := make(chan bool)
	startError := make(chan error)

	go func() {

		defer func() {
			r := recover()
			m := "error starting module"

			errMsg := fmt.Sprintf(
				"Error starting module type %d, %d-%s at %s",
				mod.TypeID, mod.ID, mod.Name, mod.Config.Address)

			if r == nil {
				startError <- errors.New(errMsg)
				return
			}

			switch x := r.(type) {
			case string:
				lf["inner"] = x
				startError <- errors.WithFields(lf, m)
			case error:
				startError <- errors.WithFieldsE(lf, m, x)
			default:
				startError <- errors.WithFields(lf, m)
			}
		}()

		sErr := mod.Inst.Start()
		if sErr != nil {
			startError <- sErr
		} else {
			started <- true
		}
	}()

	go func() {
		time.Sleep(3 * time.Second)
		timeout <- true
	}()

	select {
	case <-started:
		mod.IsStarted = true
		log.WithFields(lf).Info("started module")
	case <-timeout:
		log.WithFields(lf).Debug("timed out while monitoring module start")
	case sErr := <-startError:
		return sErr
	}

	return nil
}
Пример #24
0
}

func newMod(id int32, cfg *module.Config) (module.Module, error) {
	return &mod{
		id:   id,
		r:    core.New(cfg.Config),
		name: modName,
		desc: modDescription,
		addr: cfg.Address,
	}, nil
}

const driverName = "dockervolumedriver"

var (
	errMissingHost      = errors.New("Missing host parameter")
	errBadHostSpecified = errors.New("Bad host specified, ie. unix:///run/docker/plugins/rexray.sock or tcp://127.0.0.1:8080")
	errBadProtocol      = errors.New("Bad protocol specified with host, ie. unix:// or tcp://")
)

type pluginRequest struct {
	Name string          `json:"Name,omitempty"`
	Opts core.VolumeOpts `json:"Opts,omitempty"`
}

func (m *mod) Start() error {

	proto, addr, parseAddrErr := util.ParseAddress(m.Address())
	if parseAddrErr != nil {
		return parseAddrErr
	}
Пример #25
0
func (m *mod) buildMux() *http.ServeMux {

	mux := http.NewServeMux()

	mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) {
		w.Header().Set("Content-Type", "appplication/vnd.docker.plugins.v1+json")
		fmt.Fprintln(w, `{"Implements": ["RemoteVolumeDriver"]}`)
	})

	mux.HandleFunc("/RemoteVolumeDriver.Create", func(w http.ResponseWriter, r *http.Request) {
		var pr pluginRequest
		if err := json.NewDecoder(r.Body).Decode(&pr); err != nil {
			http.Error(w, fmt.Sprintf("{\"Error\":\"%s\"}", err.Error()), 500)
			return
		}
		err := m.r.Volume.Create(pr.Name, pr.Opts)
		if err != nil {
			http.Error(w, fmt.Sprintf("{\"Error\":\"%s\"}", err.Error()), 500)
			return
		}

		w.Header().Set("Content-Type", "appplication/vnd.docker.plugins.v1+json")
		fmt.Fprintln(w, `{}`)
	})

	mux.HandleFunc("/RemoteVolumeDriver.Remove", func(w http.ResponseWriter, r *http.Request) {
		var pr pluginRequest
		if err := json.NewDecoder(r.Body).Decode(&pr); err != nil {
			http.Error(w, fmt.Sprintf("{\"Error\":\"%s\"}", err.Error()), 500)
			return
		}

		err := m.r.Volume.Remove(pr.Name)
		if err != nil {
			http.Error(w, fmt.Sprintf("{\"Error\":\"%s\"}", err.Error()), 500)
			return
		}

		w.Header().Set("Content-Type", "appplication/vnd.docker.plugins.v1+json")
		fmt.Fprintln(w, `{}`)
	})

	mux.HandleFunc("/RemoteVolumeDriver.NetworkName", func(w http.ResponseWriter, r *http.Request) {
		var pr pluginRequest
		if err := json.NewDecoder(r.Body).Decode(&pr); err != nil {
			http.Error(w, fmt.Sprintf("{\"Error\":\"%s\"}", err.Error()), 500)
			return
		}

		if pr.InstanceID == "" {
			http.Error(w, fmt.Sprintf("{\"Error\":\"%s\"}", errors.New("Missing InstanceID").Error()), 500)
			return
		}

		networkName, err := m.r.Volume.NetworkName(pr.Name, pr.InstanceID)
		if err != nil {
			http.Error(w, fmt.Sprintf("{\"Error\":\"%s\"}", err.Error()), 500)
			return
		}

		w.Header().Set("Content-Type", "appplication/vnd.docker.plugins.v1+json")
		fmt.Fprintln(w, fmt.Sprintf("{\"Networkname\": \"%s\"}", networkName))
	})

	mux.HandleFunc("/RemoteVolumeDriver.Attach", func(w http.ResponseWriter, r *http.Request) {
		var pr pluginRequest
		if err := json.NewDecoder(r.Body).Decode(&pr); err != nil {
			http.Error(w, fmt.Sprintf("{\"Error\":\"%s\"}", err.Error()), 500)
			return
		}

		if pr.InstanceID == "" {
			http.Error(w, fmt.Sprintf("{\"Error\":\"%s\"}", errors.New("Missing InstanceID").Error()), 500)
			return
		}

		networkName, err := m.r.Volume.Attach(pr.Name, pr.InstanceID)
		if err != nil {
			http.Error(w, fmt.Sprintf("{\"Error\":\"%s\"}", err.Error()), 500)
			return
		}

		w.Header().Set("Content-Type", "appplication/vnd.docker.plugins.v1+json")
		fmt.Fprintln(w, fmt.Sprintf("{\"Networkname\": \"%s\"}", networkName))
	})

	mux.HandleFunc("/RemoteVolumeDriver.Detach", func(w http.ResponseWriter, r *http.Request) {
		var pr pluginRequest
		if err := json.NewDecoder(r.Body).Decode(&pr); err != nil {
			http.Error(w, fmt.Sprintf("{\"Error\":\"%s\"}", err.Error()), 500)
			return
		}

		if pr.InstanceID == "" {
			http.Error(w, fmt.Sprintf("{\"Error\":\"%s\"}", errors.New("Missing InstanceID").Error()), 500)
			return
		}

		err := m.r.Volume.Detach(pr.Name, pr.InstanceID)
		if err != nil {
			http.Error(w, fmt.Sprintf("{\"Error\":\"%s\"}", err.Error()), 500)
			return
		}

		w.Header().Set("Content-Type", "appplication/vnd.docker.plugins.v1+json")
		fmt.Fprintln(w, `{}`)
	})

	return mux
}