func (m *Manager) Attach(op trace.Operation, disk *types.VirtualDisk) error { deviceList := object.VirtualDeviceList{} deviceList = append(deviceList, disk) changeSpec, err := deviceList.ConfigSpec(types.VirtualDeviceConfigSpecOperationAdd) if err != nil { return err } machineSpec := types.VirtualMachineConfigSpec{} machineSpec.DeviceChange = append(machineSpec.DeviceChange, changeSpec...) m.reconfig.Lock() _, err = m.vm.WaitForResult(op, func(ctx context.Context) (tasks.Task, error) { t, er := m.vm.Reconfigure(ctx, machineSpec) op.Debugf("Attach reconfigure task=%s", t.Reference()) return t, er }) m.reconfig.Unlock() if err != nil { op.Errorf("vmdk storage driver failed to attach disk: %s", errors.ErrorStack(err)) return errors.Trace(err) } return nil }
func (v *ImageStore) deleteImage(op trace.Operation, storeName, ID string) error { // Delete in order of manifest (the done file), the vmdk (because VC honors // the deletable flag in the vmdk file), then the directory to get // everything else. paths := []string{ v.manifestPath(storeName, ID), v.imageDiskPath(storeName, ID), v.imageDirPath(storeName, ID), } for _, pth := range paths { err := v.ds.Rm(op, pth) // not exist is ok if err == nil || types.IsFileNotFound(err) { continue } // something isn't right. bale. op.Errorf("ImageStore: delete image error: %s", err.Error()) return err } return nil }
func (m *Manager) Detach(op trace.Operation, d *VirtualDisk) error { defer trace.End(trace.Begin(d.DevicePath)) op.Infof("Detaching disk %s", d.DevicePath) d.lock() defer d.unlock() if !d.Attached() { op.Infof("Disk %s is already detached", d.DevicePath) return nil } if err := d.canBeDetached(); err != nil { return errors.Trace(err) } spec := types.VirtualMachineConfigSpec{} disk, err := findDisk(op, m.vm, d.DatastoreURI) if err != nil { return errors.Trace(err) } config := []types.BaseVirtualDeviceConfigSpec{ &types.VirtualDeviceConfigSpec{ Device: disk, Operation: types.VirtualDeviceConfigSpecOperationRemove, }, } spec.DeviceChange = config m.reconfig.Lock() _, err = m.vm.WaitForResult(op, func(ctx context.Context) (tasks.Task, error) { t, er := m.vm.Reconfigure(ctx, spec) op.Debugf("Detach reconfigure task=%s", t.Reference()) return t, er }) m.reconfig.Unlock() if err != nil { op.Errorf(err.Error()) log.Warnf("detach for %s failed with %s", d.DevicePath, errors.ErrorStack(err)) return errors.Trace(err) } func() { select { case <-m.maxAttached: default: } }() return d.setDetached() }
// DeleteImage deletes an image from the image store. If the image is in // use either by way of inheritance or because it's attached to a // container, this will return an error. func (v *ImageStore) DeleteImage(op trace.Operation, image *portlayer.Image) error { // check if the image is in use. if err := imagesInUse(op, image.ID); err != nil { op.Errorf("ImageStore: delete image error: %s", err.Error()) return err } storeName, err := util.ImageStoreName(image.Store) if err != nil { return err } return v.deleteImage(op, storeName, image.ID) }
// Create the image directory, create a temp vmdk in this directory, // attach/mount the disk, unpack the tar, check the checksum. If the data // doesn't match the expected checksum, abort by nuking the image directory. // If everything matches, move the tmp vmdk to ID.vmdk. The unwind path is a // bit convoluted here; we need to clean up on the way out in the error case func (v *ImageStore) writeImage(op trace.Operation, storeName, parentID, ID string, meta map[string][]byte, sum string, r io.Reader) error { // Create a temp image directory in the store. imageDir := v.imageDirPath(storeName, ID) _, err := v.ds.Mkdir(op, true, imageDir) if err != nil { return err } // Write the metadata to the datastore metaDataDir := v.imageMetadataDirPath(storeName, ID) err = writeMetadata(op, v.ds, metaDataDir, meta) if err != nil { return err } // datastore path to the parent parentDiskDsURI := v.imageDiskDSPath(storeName, parentID) // datastore path to the disk we're creating diskDsURI := v.imageDiskDSPath(storeName, ID) op.Infof("Creating image %s (%s)", ID, diskDsURI) var vmdisk *disk.VirtualDisk // On error, unmount if mounted, detach if attached, and nuke the image directory defer func() { if err != nil { op.Errorf("Cleaning up failed WriteImage directory %s", imageDir) if vmdisk != nil { if vmdisk.Mounted() { op.Debugf("Unmounting abandoned disk") vmdisk.Unmount() } if vmdisk.Attached() { op.Debugf("Detaching abandoned disk") v.dm.Detach(op, vmdisk) } } v.ds.Rm(op, imageDir) } }() // Create the disk vmdisk, err = v.dm.CreateAndAttach(op, diskDsURI, parentDiskDsURI, 0, os.O_RDWR) if err != nil { return err } // tmp dir to mount the disk dir, err := ioutil.TempDir("", "mnt-"+ID) if err != nil { return err } defer os.RemoveAll(dir) if err := vmdisk.Mount(dir, nil); err != nil { return err } h := sha256.New() t := io.TeeReader(r, h) // Untar the archive var n int64 if n, err = archive.ApplyLayer(dir, t); err != nil { return err } op.Debugf("%s wrote %d bytes", ID, n) actualSum := fmt.Sprintf("sha256:%x", h.Sum(nil)) if actualSum != sum { err = fmt.Errorf("Failed to validate image checksum. Expected %s, got %s", sum, actualSum) return err } if err = vmdisk.Unmount(); err != nil { return err } if err = v.dm.Detach(op, vmdisk); err != nil { return err } // Write our own bookkeeping manifest file to the image's directory. We // treat the manifest file like a done file. Its existence means this vmdk // is consistent. Previously we were writing the vmdk to a tmp vmdk file // then moving it (using the MoveDatastoreFile or MoveVirtualDisk calls). // However(!!) this flattens the vmdk. Also mkdir foo && ls -l foo fails // on VSAN (see // https://github.com/vmware/vic/pull/1764#issuecomment-237093424 for // detail). We basically can't trust any of the datastore calls to help us // with atomic operations. Touching an empty file seems to work well // enough. if err = v.writeManifest(op, storeName, ID, nil); err != nil { return err } return nil }