// waitRemove blocks until either: // a) the device registered at <device_set_prefix>-<hash> is removed, // or b) the 10 second timeout expires. func (devices *DeviceSet) waitRemove(devname string) error { log.Debugf("[deviceset %s] waitRemove(%s)", devices.devicePrefix, devname) defer log.Debugf("[deviceset %s] waitRemove(%s) END", devices.devicePrefix, devname) i := 0 for ; i < 1000; i += 1 { devinfo, err := getInfo(devname) if err != nil { // If there is an error we assume the device doesn't exist. // The error might actually be something else, but we can't differentiate. return nil } if i%100 == 0 { log.Debugf("Waiting for removal of %s: exists=%d", devname, devinfo.Exists) } if devinfo.Exists == 0 { break } devices.Unlock() time.Sleep(10 * time.Millisecond) devices.Lock() } if i == 1000 { return fmt.Errorf("Timeout while waiting for device %s to be removed", devname) } return nil }
func (devices *DeviceSet) AddDevice(hash, baseHash string) error { baseInfo, err := devices.lookupDevice(baseHash) if err != nil { return err } baseInfo.lock.Lock() defer baseInfo.lock.Unlock() devices.Lock() defer devices.Unlock() if info, _ := devices.lookupDevice(hash); info != nil { return fmt.Errorf("device %s already exists", hash) } deviceId := devices.nextDeviceId if err := createSnapDevice(devices.getPoolDevName(), &deviceId, baseInfo.Name(), baseInfo.DeviceId); err != nil { log.Debugf("Error creating snap device: %s", err) return err } // Ids are 24bit, so wrap around devices.nextDeviceId = (deviceId + 1) & 0xffffff if _, err := devices.registerDevice(deviceId, hash, baseInfo.Size); err != nil { deleteDevice(devices.getPoolDevName(), deviceId) log.Debugf("Error registering device: %s", err) return err } return nil }
func (devices *DeviceSet) deactivatePool() error { log.Debugf("[devmapper] deactivatePool()") defer log.Debugf("[devmapper] deactivatePool END") devname := devices.getPoolDevName() devinfo, err := getInfo(devname) if err != nil { return err } if devinfo.Exists != 0 { return removeDevice(devname) } return nil }
func (devices *DeviceSet) setupBaseImage() error { oldInfo, _ := devices.lookupDevice("") if oldInfo != nil && oldInfo.Initialized { return nil } if oldInfo != nil && !oldInfo.Initialized { log.Debugf("Removing uninitialized base image") if err := devices.deleteDevice(oldInfo); err != nil { return err } } log.Debugf("Initializing base device-manager snapshot") id := devices.nextDeviceId // Create initial device if err := createDevice(devices.getPoolDevName(), &id); err != nil { return err } // Ids are 24bit, so wrap around devices.nextDeviceId = (id + 1) & 0xffffff log.Debugf("Registering base device (id %v) with FS size %v", id, devices.baseFsSize) info, err := devices.registerDevice(id, "", devices.baseFsSize) if err != nil { _ = deleteDevice(devices.getPoolDevName(), id) return err } log.Debugf("Creating filesystem on base device-manager snapshot") if err = devices.activateDeviceIfNeeded(info); err != nil { return err } if err := devices.createFilesystem(info); err != nil { return err } info.Initialized = true if err = devices.saveMetadata(info); err != nil { info.Initialized = false return err } return nil }
// CopyWithTar creates a tar archive of filesystem path `src`, and // unpacks it at filesystem path `dst`. // The archive is streamed directly with fixed buffering and no // intermediary disk IO. // func CopyWithTar(src, dst string) error { srcSt, err := os.Stat(src) if err != nil { return err } if !srcSt.IsDir() { return CopyFileWithTar(src, dst) } // Create dst, copy src's content into it log.Debugf("Creating dest directory: %s", dst) if err := os.MkdirAll(dst, 0755); err != nil && !os.IsExist(err) { return err } log.Debugf("Calling TarUntar(%s, %s)", src, dst) return TarUntar(src, dst) }
func removeDevice(name string) error { log.Debugf("[devmapper] removeDevice START") defer log.Debugf("[devmapper] removeDevice END") task, err := createTask(DeviceRemove, name) if task == nil { return err } dmSawBusy = false if err = task.Run(); err != nil { if dmSawBusy { return ErrBusy } return fmt.Errorf("Error running removeDevice %s", err) } return nil }
func createDevice(poolName string, deviceId *int) error { log.Debugf("[devmapper] createDevice(poolName=%v, deviceId=%v)", poolName, *deviceId) for { task, err := createTask(DeviceTargetMsg, poolName) if task == nil { return err } if err := task.SetSector(0); err != nil { return fmt.Errorf("Can't set sector %s", err) } if err := task.SetMessage(fmt.Sprintf("create_thin %d", *deviceId)); err != nil { return fmt.Errorf("Can't set message %s", err) } dmSawExist = false if err := task.Run(); err != nil { if dmSawExist { // Already exists, try next id *deviceId++ continue } return fmt.Errorf("Error running createDevice %s", err) } break } return nil }
func SetDevDir(dir string) error { if res := DmSetDevDir(dir); res != 1 { log.Debugf("Error dm_set_dev_dir") return ErrSetDevDir } return nil }
func UdevWait(cookie uint) error { if res := DmUdevWait(cookie); res != 1 { log.Debugf("Failed to wait on udev cookie %d", cookie) return ErrUdevWait } return nil }
func (devices *DeviceSet) registerDevice(id int, hash string, size uint64) (*DevInfo, error) { log.Debugf("registerDevice(%v, %v)", id, hash) info := &DevInfo{ Hash: hash, DeviceId: id, Size: size, TransactionId: devices.allocateTransactionId(), Initialized: false, devices: devices, } devices.devicesLock.Lock() devices.Devices[hash] = info devices.devicesLock.Unlock() if err := devices.saveMetadata(info); err != nil { // Try to remove unused device devices.devicesLock.Lock() delete(devices.Devices, hash) devices.devicesLock.Unlock() return nil, err } return info, nil }
// NewRequest() creates a new *http.Request, // applies all decorators in the HTTPRequestFactory on the request, // then applies decorators provided by d on the request. func (h *HTTPRequestFactory) NewRequest(method, urlStr string, body io.Reader, d ...HTTPRequestDecorator) (*http.Request, error) { req, err := http.NewRequest(method, urlStr, body) if err != nil { return nil, err } // By default, a nil factory should work. if h == nil { return req, nil } for _, dec := range h.decorators { req, err = dec.ChangeRequest(req) if err != nil { return nil, err } } for _, dec := range d { req, err = dec.ChangeRequest(req) if err != nil { return nil, err } } log.Debugf("%v -- HEADERS: %v", req.URL, req.Header) return req, err }
// ensureImage creates a sparse file of <size> bytes at the path // <root>/devicemapper/<name>. // If the file already exists, it does nothing. // Either way it returns the full path. func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) { dirname := devices.loopbackDir() filename := path.Join(dirname, name) if err := os.MkdirAll(dirname, 0700); err != nil && !os.IsExist(err) { return "", err } if _, err := os.Stat(filename); err != nil { if !os.IsNotExist(err) { return "", err } log.Debugf("Creating loopback file %s for device-manage use", filename) file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600) if err != nil { return "", err } defer file.Close() if err = file.Truncate(size); err != nil { return "", err } } return filename, nil }
func (devices *DeviceSet) log(level int, file string, line int, dmError int, message string) { if level >= 7 { return // Ignore _LOG_DEBUG } log.Debugf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) }
func createBridgeIface(name string) error { kv, err := kernel.GetKernelVersion() // only set the bridge's mac address if the kernel version is > 3.3 // before that it was not supported setBridgeMacAddr := err == nil && (kv.Kernel >= 3 && kv.Major >= 3) log.Debugf("setting bridge mac address = %v", setBridgeMacAddr) return netlink.CreateBridge(name, setBridgeMacAddr) }
func (devices *DeviceSet) activateDeviceIfNeeded(info *DevInfo) error { log.Debugf("activateDeviceIfNeeded(%v)", info.Hash) if devinfo, _ := getInfo(info.Name()); devinfo != nil && devinfo.Exists != 0 { return nil } return activateDevice(devices.getPoolDevName(), info.Name(), info.DeviceId, info.Size) }
// TarUntar is a convenience function which calls Tar and Untar, with // the output of one piped into the other. If either Tar or Untar fails, // TarUntar aborts and returns the error. func TarUntar(src string, dst string) error { log.Debugf("TarUntar(%s %s)", src, dst) archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) if err != nil { return err } defer archive.Close() return Untar(archive, dst, nil) }
func ExportChanges(dir string, changes []Change) (Archive, error) { reader, writer := io.Pipe() tw := tar.NewWriter(writer) go func() { twBuf := bufio.NewWriterSize(nil, twBufSize) // In general we log errors here but ignore them because // during e.g. a diff operation the container can continue // mutating the filesystem and we can see transient errors // from this for _, change := range changes { if change.Kind == ChangeDelete { whiteOutDir := filepath.Dir(change.Path) whiteOutBase := filepath.Base(change.Path) whiteOut := filepath.Join(whiteOutDir, ".wh."+whiteOutBase) timestamp := time.Now() hdr := &tar.Header{ Name: whiteOut[1:], Size: 0, ModTime: timestamp, AccessTime: timestamp, ChangeTime: timestamp, } if err := tw.WriteHeader(hdr); err != nil { log.Debugf("Can't write whiteout header: %s", err) } } else { path := filepath.Join(dir, change.Path) if err := addTarFile(path, change.Path[1:], tw, twBuf); err != nil { log.Debugf("Can't add file %s to tar: %s", path, err) } } } // Make sure to check the error on Close. if err := tw.Close(); err != nil { log.Debugf("Can't close layer: %s", err) } writer.Close() }() return reader, nil }
func (devices *DeviceSet) deleteDevice(info *DevInfo) error { if devices.doBlkDiscard { // This is a workaround for the kernel not discarding block so // on the thin pool when we remove a thinp device, so we do it // manually if err := devices.activateDeviceIfNeeded(info); err == nil { if err := BlockDeviceDiscard(info.DevName()); err != nil { log.Debugf("Error discarding block on device: %s (ignoring)", err) } } } devinfo, _ := getInfo(info.Name()) if devinfo != nil && devinfo.Exists != 0 { if err := devices.removeDeviceAndWait(info.Name()); err != nil { log.Debugf("Error removing device: %s", err) return err } } if err := deleteDevice(devices.getPoolDevName(), info.DeviceId); err != nil { log.Debugf("Error deleting device: %s", err) return err } devices.allocateTransactionId() devices.devicesLock.Lock() delete(devices.Devices, info.Hash) devices.devicesLock.Unlock() if err := devices.removeMetadata(info); err != nil { devices.devicesLock.Lock() devices.Devices[info.Hash] = info devices.devicesLock.Unlock() log.Debugf("Error removing meta data: %s", err) return err } return nil }
func (devices *DeviceSet) deactivateDevice(info *DevInfo) error { log.Debugf("[devmapper] deactivateDevice(%s)", info.Hash) defer log.Debugf("[devmapper] deactivateDevice END") // Wait for the unmount to be effective, // by watching the value of Info.OpenCount for the device if err := devices.waitClose(info); err != nil { log.Errorf("Warning: error waiting for device %s to close: %s", info.Hash, err) } devinfo, err := getInfo(info.Name()) if err != nil { return err } if devinfo.Exists != 0 { if err := devices.removeDeviceAndWait(info.Name()); err != nil { return err } } return nil }
func (devices *DeviceSet) UnmountDevice(hash string) error { log.Debugf("[devmapper] UnmountDevice(hash=%s)", hash) defer log.Debugf("[devmapper] UnmountDevice END") info, err := devices.lookupDevice(hash) if err != nil { return err } info.lock.Lock() defer info.lock.Unlock() devices.Lock() defer devices.Unlock() if info.mountCount == 0 { return fmt.Errorf("UnmountDevice: device not-mounted id %s\n", hash) } info.mountCount-- if info.mountCount > 0 { return nil } log.Debugf("[devmapper] Unmount(%s)", info.mountPath) if err := syscall.Unmount(info.mountPath, 0); err != nil { return err } log.Debugf("[devmapper] Unmount done") if err := devices.deactivateDevice(info); err != nil { return err } info.mountPath = "" return nil }
// Useful helper for cleanup func RemoveDevice(name string) error { task := TaskCreate(DeviceRemove) if task == nil { return ErrCreateRemoveTask } if err := task.SetName(name); err != nil { log.Debugf("Can't set task name %s", name) return err } if err := task.Run(); err != nil { return ErrRunRemoveDevice } return nil }
// CopyFileWithTar emulates the behavior of the 'cp' command-line // for a single file. It copies a regular file from path `src` to // path `dst`, and preserves all its metadata. // // If `dst` ends with a trailing slash '/', the final destination path // will be `dst/base(src)`. func CopyFileWithTar(src, dst string) (err error) { log.Debugf("CopyFileWithTar(%s, %s)", src, dst) srcSt, err := os.Stat(src) if err != nil { return err } if srcSt.IsDir() { return fmt.Errorf("Can't copy a directory") } // Clean up the trailing / if dst[len(dst)-1] == '/' { dst = path.Join(dst, filepath.Base(src)) } // Create the holding directory if necessary if err := os.MkdirAll(filepath.Dir(dst), 0700); err != nil && !os.IsExist(err) { return err } r, w := io.Pipe() errC := utils.Go(func() error { defer w.Close() srcF, err := os.Open(src) if err != nil { return err } defer srcF.Close() hdr, err := tar.FileInfoHeader(srcSt, "") if err != nil { return err } hdr.Name = filepath.Base(dst) tw := tar.NewWriter(w) defer tw.Close() if err := tw.WriteHeader(hdr); err != nil { return err } if _, err := io.Copy(tw, srcF); err != nil { return err } return nil }) defer func() { if er := <-errC; err != nil { err = er } }() return Untar(r, filepath.Dir(dst), nil) }
func (devices *DeviceSet) Shutdown() error { log.Debugf("[deviceset %s] shutdown()", devices.devicePrefix) log.Debugf("[devmapper] Shutting down DeviceSet: %s", devices.root) defer log.Debugf("[deviceset %s] shutdown END", devices.devicePrefix) var devs []*DevInfo devices.devicesLock.Lock() for _, info := range devices.Devices { devs = append(devs, info) } devices.devicesLock.Unlock() for _, info := range devs { info.lock.Lock() if info.mountCount > 0 { // We use MNT_DETACH here in case it is still busy in some running // container. This means it'll go away from the global scope directly, // and the device will be released when that container dies. if err := syscall.Unmount(info.mountPath, syscall.MNT_DETACH); err != nil { log.Debugf("Shutdown unmounting %s, error: %s", info.mountPath, err) } devices.Lock() if err := devices.deactivateDevice(info); err != nil { log.Debugf("Shutdown deactivate %s , error: %s", info.Hash, err) } devices.Unlock() } info.lock.Unlock() } info, _ := devices.lookupDevice("") if info != nil { info.lock.Lock() devices.Lock() if err := devices.deactivateDevice(info); err != nil { log.Debugf("Shutdown deactivate base , error: %s", err) } devices.Unlock() info.lock.Unlock() } devices.Lock() if err := devices.deactivatePool(); err != nil { log.Debugf("Shutdown deactivate pool , error: %s", err) } devices.Unlock() return nil }
func getStatus(name string) (uint64, uint64, string, string, error) { task, err := createTask(DeviceStatus, name) if task == nil { log.Debugf("getStatus: Error createTask: %s", err) return 0, 0, "", "", err } if err := task.Run(); err != nil { log.Debugf("getStatus: Error Run: %s", err) return 0, 0, "", "", err } devinfo, err := task.GetInfo() if err != nil { log.Debugf("getStatus: Error GetInfo: %s", err) return 0, 0, "", "", err } if devinfo.Exists == 0 { log.Debugf("getStatus: Non existing device %s", name) return 0, 0, "", "", fmt.Errorf("Non existing device %s", name) } _, start, length, targetType, params := task.GetNextTarget(0) return start, length, targetType, params, nil }
func DetectCompression(source []byte) Compression { for compression, m := range map[Compression][]byte{ Bzip2: {0x42, 0x5A, 0x68}, Gzip: {0x1F, 0x8B, 0x08}, Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, } { if len(source) < len(m) { log.Debugf("Len too short") continue } if bytes.Compare(m, source[:len(m)]) == 0 { return compression } } return Uncompressed }
// Matches returns true if relFilePath matches any of the patterns func Matches(relFilePath string, patterns []string) (bool, error) { for _, exclude := range patterns { matched, err := filepath.Match(exclude, relFilePath) if err != nil { log.Errorf("Error matching: %s (pattern: %s)", relFilePath, exclude) return false, err } if matched { if filepath.Clean(relFilePath) == "." { log.Errorf("Can't exclude whole path, excluding pattern: %s", exclude) continue } log.Debugf("Skipping excluded path: %s", relFilePath) return true, nil } } return false, nil }
// attachLoopDevice attaches the given sparse file to the next // available loopback device. It returns an opened *os.File. func attachLoopDevice(sparseName string) (loop *os.File, err error) { // Try to retrieve the next available loopback device via syscall. // If it fails, we discard error and start loopking for a // loopback from index 0. startIndex, err := getNextFreeLoopbackIndex() if err != nil { log.Debugf("Error retrieving the next available loopback: %s", err) } // OpenFile adds O_CLOEXEC sparseFile, err := os.OpenFile(sparseName, os.O_RDWR, 0644) if err != nil { log.Errorf("Error openning sparse file %s: %s", sparseName, err) return nil, ErrAttachLoopbackDevice } defer sparseFile.Close() loopFile, err := openNextAvailableLoopback(startIndex, sparseFile) if err != nil { return nil, err } // Set the status of the loopback device loopInfo := &LoopInfo64{ loFileName: stringToLoopName(loopFile.Name()), loOffset: 0, loFlags: LoFlagsAutoClear, } if err := ioctlLoopSetStatus64(loopFile.Fd(), loopInfo); err != nil { log.Errorf("Cannot set up loopback device info: %s", err) // If the call failed, then free the loopback device if err := ioctlLoopClrFd(loopFile.Fd()); err != nil { log.Errorf("Error while cleaning up the loopback device") } loopFile.Close() return nil, ErrAttachLoopbackDevice } return loopFile, nil }
// waitClose blocks until either: // a) the device registered at <device_set_prefix>-<hash> is closed, // or b) the 10 second timeout expires. func (devices *DeviceSet) waitClose(info *DevInfo) error { i := 0 for ; i < 1000; i += 1 { devinfo, err := getInfo(info.Name()) if err != nil { return err } if i%100 == 0 { log.Debugf("Waiting for unmount of %s: opencount=%d", info.Hash, devinfo.OpenCount) } if devinfo.OpenCount == 0 { break } devices.Unlock() time.Sleep(10 * time.Millisecond) devices.Lock() } if i == 1000 { return fmt.Errorf("Timeout while waiting for device %s to close", info.Hash) } return nil }
func DecompressStream(archive io.Reader) (io.ReadCloser, error) { buf := bufio.NewReader(archive) bs, err := buf.Peek(10) if err != nil { return nil, err } log.Debugf("[tar autodetect] n: %v", bs) compression := DetectCompression(bs) switch compression { case Uncompressed: return ioutil.NopCloser(buf), nil case Gzip: return gzip.NewReader(buf) case Bzip2: return ioutil.NopCloser(bzip2.NewReader(buf)), nil case Xz: return xzDecompress(buf) default: return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) } }
// CreateBridgeIface creates a network bridge interface on the host system with the name `ifaceName`, // and attempts to configure it with an address which doesn't conflict with any other interface on the host. // If it can't find an address which doesn't conflict, it will return an error. func createBridge(bridgeIP string) error { nameservers := []string{} resolvConf, _ := resolvconf.Get() // we don't check for an error here, because we don't really care // if we can't read /etc/resolv.conf. So instead we skip the append // if resolvConf is nil. It either doesn't exist, or we can't read it // for some reason. if resolvConf != nil { nameservers = append(nameservers, resolvconf.GetNameserversAsCIDR(resolvConf)...) } var ifaceAddr string if len(bridgeIP) != 0 { _, _, err := net.ParseCIDR(bridgeIP) if err != nil { return err } ifaceAddr = bridgeIP } else { for _, addr := range addrs { _, dockerNetwork, err := net.ParseCIDR(addr) if err != nil { return err } if err := networkdriver.CheckNameserverOverlaps(nameservers, dockerNetwork); err == nil { if err := networkdriver.CheckRouteOverlaps(dockerNetwork); err == nil { ifaceAddr = addr break } else { log.Debugf("%s %s", addr, err) } } } } if ifaceAddr == "" { return fmt.Errorf("Could not find a free IP address range for interface '%s'. Please configure its address manually and run 'docker -b %s'", bridgeIface, bridgeIface) } log.Debugf("Creating bridge %s with network %s", bridgeIface, ifaceAddr) if err := createBridgeIface(bridgeIface); err != nil { return err } iface, err := net.InterfaceByName(bridgeIface) if err != nil { return err } ipAddr, ipNet, err := net.ParseCIDR(ifaceAddr) if err != nil { return err } if netlink.NetworkLinkAddIp(iface, ipAddr, ipNet); err != nil { return fmt.Errorf("Unable to add private network: %s", err) } if err := netlink.NetworkLinkUp(iface); err != nil { return fmt.Errorf("Unable to start network bridge: %s", err) } return nil }