Beispiel #1
0
func (m *Manager) Attach(op trace.Operation, disk *types.VirtualDisk) error {
	deviceList := object.VirtualDeviceList{}
	deviceList = append(deviceList, disk)

	changeSpec, err := deviceList.ConfigSpec(types.VirtualDeviceConfigSpecOperationAdd)
	if err != nil {
		return err
	}

	machineSpec := types.VirtualMachineConfigSpec{}
	machineSpec.DeviceChange = append(machineSpec.DeviceChange, changeSpec...)

	m.reconfig.Lock()
	_, err = m.vm.WaitForResult(op, func(ctx context.Context) (tasks.Task, error) {
		t, er := m.vm.Reconfigure(ctx, machineSpec)

		op.Debugf("Attach reconfigure task=%s", t.Reference())

		return t, er
	})
	m.reconfig.Unlock()

	if err != nil {
		op.Errorf("vmdk storage driver failed to attach disk: %s", errors.ErrorStack(err))
		return errors.Trace(err)
	}
	return nil
}
Beispiel #2
0
func (m *Manager) Detach(op trace.Operation, d *VirtualDisk) error {
	defer trace.End(trace.Begin(d.DevicePath))
	op.Infof("Detaching disk %s", d.DevicePath)

	d.lock()
	defer d.unlock()

	if !d.Attached() {
		op.Infof("Disk %s is already detached", d.DevicePath)
		return nil
	}

	if err := d.canBeDetached(); err != nil {
		return errors.Trace(err)
	}

	spec := types.VirtualMachineConfigSpec{}

	disk, err := findDisk(op, m.vm, d.DatastoreURI)
	if err != nil {
		return errors.Trace(err)
	}

	config := []types.BaseVirtualDeviceConfigSpec{
		&types.VirtualDeviceConfigSpec{
			Device:    disk,
			Operation: types.VirtualDeviceConfigSpecOperationRemove,
		},
	}

	spec.DeviceChange = config

	m.reconfig.Lock()
	_, err = m.vm.WaitForResult(op, func(ctx context.Context) (tasks.Task, error) {
		t, er := m.vm.Reconfigure(ctx, spec)

		op.Debugf("Detach reconfigure task=%s", t.Reference())

		return t, er
	})
	m.reconfig.Unlock()

	if err != nil {
		op.Errorf(err.Error())
		log.Warnf("detach for %s failed with %s", d.DevicePath, errors.ErrorStack(err))
		return errors.Trace(err)
	}

	func() {
		select {
		case <-m.maxAttached:
		default:
		}
	}()

	return d.setDetached()
}
Beispiel #3
0
func (v *ImageStore) scratch(op trace.Operation, storeName string) error {

	// Create the image directory in the store.
	imageDir := v.imageDirPath(storeName, portlayer.Scratch.ID)
	if _, err := v.ds.Mkdir(op, false, imageDir); err != nil {
		return err
	}

	// Write the metadata to the datastore
	metaDataDir := v.imageMetadataDirPath(storeName, portlayer.Scratch.ID)
	if err := writeMetadata(op, v.ds, metaDataDir, nil); err != nil {
		return err
	}

	imageDiskDsURI := v.imageDiskDSPath(storeName, portlayer.Scratch.ID)
	op.Infof("Creating image %s (%s)", portlayer.Scratch.ID, imageDiskDsURI)

	var size int64
	size = defaultDiskSize
	if portlayer.Config.ScratchSize != 0 {
		size = portlayer.Config.ScratchSize
	}

	// Create the disk
	vmdisk, err := v.dm.CreateAndAttach(op, imageDiskDsURI, "", size, os.O_RDWR)
	if err != nil {
		return err
	}
	defer func() {
		if vmdisk.Attached() {
			v.dm.Detach(op, vmdisk)
		}
	}()
	op.Debugf("Scratch disk created with size %d", portlayer.Config.ScratchSize)

	// Make the filesystem and set its label to defaultDiskLabel
	if err = vmdisk.Mkfs(defaultDiskLabel); err != nil {
		return err
	}

	if err = v.dm.Detach(op, vmdisk); err != nil {
		return err
	}

	if err = v.writeManifest(op, storeName, portlayer.Scratch.ID, nil); err != nil {
		return err
	}

	return nil
}
Beispiel #4
0
func (v *ImageStore) GetImage(op trace.Operation, store *url.URL, ID string) (*portlayer.Image, error) {

	defer trace.End(trace.Begin(store.String()))
	storeName, err := util.ImageStoreName(store)
	if err != nil {
		return nil, err
	}

	imageURL, err := util.ImageURL(storeName, ID)
	if err != nil {
		return nil, err
	}

	if err = v.verifyImage(op, storeName, ID); err != nil {
		return nil, err
	}

	// get the metadata
	metaDataDir := v.imageMetadataDirPath(storeName, ID)
	meta, err := getMetadata(op, v.ds, metaDataDir)
	if err != nil {
		return nil, err
	}

	var s = *store
	var parentURL *url.URL

	parentID := v.parents.Get(ID)
	if parentID != "" {
		parentURL, _ = util.ImageURL(storeName, parentID)
	}

	newImage := &portlayer.Image{
		ID:       ID,
		SelfLink: imageURL,
		// We're relying on the parent map for this since we don't currently have a
		// way to get the disk's spec.  See VIC #482 for details.  Parent:
		// parent.SelfLink,
		Store:      &s,
		ParentLink: parentURL,
		Metadata:   meta,
	}

	op.Debugf("Returning image from location %s with parent url %s", newImage.SelfLink, newImage.Parent())
	return newImage, nil
}
Beispiel #5
0
// GetImage gets the specified image from the given store by retreiving it from the cache.
func (c *NameLookupCache) GetImage(op trace.Operation, store *url.URL, ID string) (*Image, error) {

	op.Debugf("Getting image %s from %s", ID, store.String())

	storeName, err := util.ImageStoreName(store)
	if err != nil {
		return nil, err
	}

	// Check the store exists
	if _, err = c.GetImageStore(op, storeName); err != nil {
		return nil, err
	}

	c.storeCacheLock.Lock()
	indx := c.storeCache[*store]
	c.storeCacheLock.Unlock()

	imgURL, err := util.ImageURL(storeName, ID)
	if err != nil {
		return nil, err
	}
	node, err := c.storeCache[*store].Get(imgURL.String())

	var img *Image
	if err != nil {
		if err == index.ErrNodeNotFound {
			debugf("Image %s not in cache, retreiving from datastore", ID)
			// Not in the cache.  Try to load it.
			img, err = c.DataStore.GetImage(op, store, ID)
			if err != nil {
				return nil, err
			}

			if err = indx.Insert(img); err != nil {
				return nil, err
			}
		} else {
			return nil, err
		}
	} else {
		img, _ = node.(*Image)
	}

	return img, nil
}
Beispiel #6
0
// CreateAndAttach creates a new vmdk child from parent of the given size.
// Returns a VirtualDisk corresponding to the created and attached disk.  The
// newDiskURI and parentURI are both Datastore URI paths in the form of
// [datastoreN] /path/to/disk.vmdk.
func (m *Manager) CreateAndAttach(op trace.Operation, newDiskURI,
	parentURI string,
	capacity int64, flags int) (*VirtualDisk, error) {
	defer trace.End(trace.Begin(newDiskURI))

	// ensure we abide by max attached disks limits
	m.maxAttached <- true

	d, err := NewVirtualDisk(newDiskURI)
	if err != nil {
		return nil, errors.Trace(err)
	}

	spec := m.createDiskSpec(newDiskURI, parentURI, capacity, flags)

	op.Infof("Create/attach vmdk %s from parent %s", newDiskURI, parentURI)

	err = m.Attach(op, spec)
	if err != nil {
		return nil, errors.Trace(err)
	}

	op.Debugf("Mapping vmdk to pci device %s", newDiskURI)
	devicePath, err := m.devicePathByURI(op, newDiskURI)
	if err != nil {
		return nil, errors.Trace(err)
	}

	d.setAttached(devicePath)

	if err := waitForPath(op, devicePath); err != nil {
		op.Infof("waitForPath failed for %s with %s", newDiskURI, errors.ErrorStack(err))
		// ensure that the disk is detached if it's the publish that's failed

		if detachErr := m.Detach(op, d); detachErr != nil {
			op.Debugf("detach(%s) failed with %s", newDiskURI, errors.ErrorStack(detachErr))
		}

		return nil, errors.Trace(err)
	}

	return d, nil
}
Beispiel #7
0
// Create the image directory, create a temp vmdk in this directory,
// attach/mount the disk, unpack the tar, check the checksum.  If the data
// doesn't match the expected checksum, abort by nuking the image directory.
// If everything matches, move the tmp vmdk to ID.vmdk.  The unwind path is a
// bit convoluted here;  we need to clean up on the way out in the error case
func (v *ImageStore) writeImage(op trace.Operation, storeName, parentID, ID string, meta map[string][]byte,
	sum string, r io.Reader) error {

	// Create a temp image directory in the store.
	imageDir := v.imageDirPath(storeName, ID)
	_, err := v.ds.Mkdir(op, true, imageDir)
	if err != nil {
		return err
	}

	// Write the metadata to the datastore
	metaDataDir := v.imageMetadataDirPath(storeName, ID)
	err = writeMetadata(op, v.ds, metaDataDir, meta)
	if err != nil {
		return err
	}

	// datastore path to the parent
	parentDiskDsURI := v.imageDiskDSPath(storeName, parentID)

	// datastore path to the disk we're creating
	diskDsURI := v.imageDiskDSPath(storeName, ID)
	op.Infof("Creating image %s (%s)", ID, diskDsURI)

	var vmdisk *disk.VirtualDisk

	// On error, unmount if mounted, detach if attached, and nuke the image directory
	defer func() {
		if err != nil {
			op.Errorf("Cleaning up failed WriteImage directory %s", imageDir)

			if vmdisk != nil {
				if vmdisk.Mounted() {
					op.Debugf("Unmounting abandoned disk")
					vmdisk.Unmount()
				}

				if vmdisk.Attached() {
					op.Debugf("Detaching abandoned disk")
					v.dm.Detach(op, vmdisk)
				}
			}

			v.ds.Rm(op, imageDir)
		}
	}()

	// Create the disk
	vmdisk, err = v.dm.CreateAndAttach(op, diskDsURI, parentDiskDsURI, 0, os.O_RDWR)
	if err != nil {
		return err
	}
	// tmp dir to mount the disk
	dir, err := ioutil.TempDir("", "mnt-"+ID)
	if err != nil {
		return err
	}
	defer os.RemoveAll(dir)

	if err := vmdisk.Mount(dir, nil); err != nil {
		return err
	}

	h := sha256.New()
	t := io.TeeReader(r, h)

	// Untar the archive
	var n int64
	if n, err = archive.ApplyLayer(dir, t); err != nil {
		return err
	}

	op.Debugf("%s wrote %d bytes", ID, n)

	actualSum := fmt.Sprintf("sha256:%x", h.Sum(nil))
	if actualSum != sum {
		err = fmt.Errorf("Failed to validate image checksum. Expected %s, got %s", sum, actualSum)
		return err
	}

	if err = vmdisk.Unmount(); err != nil {
		return err
	}

	if err = v.dm.Detach(op, vmdisk); err != nil {
		return err
	}

	// Write our own bookkeeping manifest file to the image's directory.  We
	// treat the manifest file like a done file.  Its existence means this vmdk
	// is consistent.  Previously we were writing the vmdk to a tmp vmdk file
	// then moving it (using the MoveDatastoreFile or MoveVirtualDisk calls).
	// However(!!) this flattens the vmdk.  Also mkdir foo && ls -l foo fails
	// on VSAN (see
	// https://github.com/vmware/vic/pull/1764#issuecomment-237093424 for
	// detail).  We basically can't trust any of the datastore calls to help us
	// with atomic operations.  Touching an empty file seems to work well
	// enough.
	if err = v.writeManifest(op, storeName, ID, nil); err != nil {
		return err
	}

	return nil
}