// useDirperm checks dirperm1 mount option can be used with the current // version of aufs. func useDirperm() bool { enableDirpermLock.Do(func() { base, err := ioutil.TempDir("", "docker-aufs-base") if err != nil { glog.Errorf("error checking dirperm1: %s", err.Error()) return } defer os.RemoveAll(base) union, err := ioutil.TempDir("", "docker-aufs-union") if err != nil { glog.Errorf("error checking dirperm1: %s", err.Error()) return } defer os.RemoveAll(union) opts := fmt.Sprintf("br:%s,dirperm1,xino=/dev/shm/aufs.xino", base) if err := syscall.Mount("none", union, "aufs", 0, opts); err != nil { return } enableDirperm = true if err := aufsUnmount(union); err != nil { glog.Errorf("error checking dirperm1: failed to unmount %s", err.Error()) } }) return enableDirperm }
func (d *Driver) VmMountLayer(id string) error { if d.daemon == nil { if err := d.Setup(); err != nil { return err } } var ( diffSrc = fmt.Sprintf("%s/diff/%s", d.RootPath(), id) volDst = fmt.Sprintf("%s/images/%s.vdi", d.RootPath(), id) ) podstring, err := MakeMountPod("mac-vm-disk-mount-layer", "puller:latest", id, diffSrc, volDst) if err != nil { return err } podId := fmt.Sprintf("pull-%s", utils.RandStr(10, "alpha")) vm, ok := d.daemon.VmList[d.pullVm] if !ok { return fmt.Errorf("can not find VM(%s)", d.pullVm) } if vm.Status == types.S_VM_IDLE { code, cause, err := d.daemon.StartPod(podId, podstring, d.pullVm, nil, false, true, types.VM_KEEP_AFTER_SHUTDOWN) if err != nil { glog.Errorf("Code is %d, Cause is %s, %s", code, cause, err.Error()) d.daemon.KillVm(d.pullVm) return err } vm := d.daemon.VmList[d.pullVm] // wait for cmd finish _, _, ret3, err := vm.GetVmChan() if err != nil { glog.Error(err.Error()) return err } subVmStatus := ret3.(chan *types.VmResponse) var vmResponse *types.VmResponse for { vmResponse = <-subVmStatus if vmResponse.VmId == d.pullVm { if vmResponse.Code == types.E_POD_FINISHED { glog.Infof("Got E_POD_FINISHED code response") break } } } d.daemon.PodList[podId].Vm = d.pullVm // release pod from VM code, cause, err = d.daemon.StopPod(podId, "no") if err != nil { glog.Errorf("Code is %d, Cause is %s, %s", code, cause, err.Error()) d.daemon.KillVm(d.pullVm) return err } d.daemon.CleanPod(podId) } else { glog.Errorf("pull vm should not be associated") } return nil }
// Run executes the job and blocks until the job completes. // If the job fails it returns an error func (job *Job) Run() (err error) { defer func() { // Wait for all background tasks to complete if job.closeIO { if err := job.Stdout.Close(); err != nil { glog.Errorf("%s\n", err) } if err := job.Stderr.Close(); err != nil { glog.Errorf("%s\n", err) } if err := job.Stdin.Close(); err != nil { glog.Errorf("%s\n", err) } } }() if job.Eng.IsShutdown() && !job.GetenvBool("overrideShutdown") { return fmt.Errorf("engine is shutdown") } // FIXME: this is a temporary workaround to avoid Engine.Shutdown // waiting 5 seconds for server/api.ServeApi to complete (which it never will) // everytime the daemon is cleanly restarted. // The permanent fix is to implement Job.Stop and Job.OnStop so that // ServeApi can cooperate and terminate cleanly. if job.Name != "serveapi" { job.Eng.l.Lock() job.Eng.tasks.Add(1) job.Eng.l.Unlock() defer job.Eng.tasks.Done() } // FIXME: make this thread-safe // FIXME: implement wait if !job.end.IsZero() { return fmt.Errorf("%s: job has already completed", job.Name) } // Log beginning and end of the job if job.Eng.Logging { glog.V(0).Infof("+job %s\n", job.CallString()) defer func() { okerr := "OK" if err != nil { okerr = fmt.Sprintf("ERR: %s", err) } glog.V(0).Infof("-job %s %s\n", job.CallString(), okerr) }() } if job.handler == nil { return fmt.Errorf("%s: command not found\n", job.Name) } var errorMessage = bytes.NewBuffer(nil) job.Stderr.Add(errorMessage) err = job.handler(job) job.end = time.Now() return }
func (d *Driver) Setup() (err error) { var ( vm *hypervisor.Vm ids []string parentIds []string ) if d.daemon == nil { d.daemon, err = GetDaemon() if err != nil { return err } } vm, err = d.daemon.StartVm(d.pullVm, 1, 64, false, types.VM_KEEP_AFTER_SHUTDOWN) if err != nil { glog.Errorf(err.Error()) return err } defer func() { if err != nil { d.daemon.KillVm(vm.Id) } }() if err = d.daemon.WaitVmStart(vm); err != nil { glog.Error(err) return err } if err = virtualbox.RegisterDisk(d.pullVm, d.pullVm, d.BaseImage(), 4); err != nil { glog.Errorf(err.Error()) return err } ids, err = loadIds(path.Join(d.RootPath(), "layers")) if err != nil { return err } for _, id := range ids { if d.disks[id] == true { continue } parentIds, err = getParentIds(d.RootPath(), id) if err != nil { glog.Warningf(err.Error()) continue } for _, cid := range parentIds { if d.disks[cid] == true { continue } d.Exists(cid) d.disks[cid] = true } d.disks[id] = true } return nil }
// create and setup network bridge func configureBridge(bridgeIP, bridgeIface string) error { var ifaceAddr string if len(bridgeIP) != 0 { _, _, err := net.ParseCIDR(bridgeIP) if err != nil { glog.Errorf("%s parsecidr failed\n", bridgeIP) return err } ifaceAddr = bridgeIP } if ifaceAddr == "" { return fmt.Errorf("Could not find a free IP address range for interface '%s'. Please configure its address manually", bridgeIface, bridgeIface) } if err := CreateBridgeIface(bridgeIface); err != nil { // The bridge may already exist, therefore we can ignore an "exists" error if !os.IsExist(err) { glog.Errorf("CreateBridgeIface failed %s %s\n", bridgeIface, ifaceAddr) return err } } iface, err := net.InterfaceByName(bridgeIface) if err != nil { return err } ipAddr, ipNet, err := net.ParseCIDR(ifaceAddr) if err != nil { return err } if ipAddr.Equal(ipNet.IP) { ipAddr, err = IpAllocator.RequestIP(ipNet, nil) } else { ipAddr, err = IpAllocator.RequestIP(ipNet, ipAddr) } if err != nil { return err } glog.V(3).Infof("Allocate IP Address %s for bridge %s\n", ipAddr, bridgeIface) if err := NetworkLinkAddIp(iface, ipAddr, ipNet); err != nil { return fmt.Errorf("Unable to add private network: %s", err) } if err := NetworkLinkUp(iface); err != nil { return fmt.Errorf("Unable to start network bridge: %s", err) } return nil }
func openNextAvailableLoopback(index int, sparseFile *os.File) (loopFile *os.File, err error) { // Start looking for a free /dev/loop for { target := fmt.Sprintf("/dev/loop%d", index) index++ fi, err := os.Stat(target) if err != nil { if os.IsNotExist(err) { glog.Errorf("There are no more loopback devices available.") } return nil, ErrAttachLoopbackDevice } if fi.Mode()&os.ModeDevice != os.ModeDevice { glog.Errorf("Loopback device %s is not a block device.", target) continue } // OpenFile adds O_CLOEXEC loopFile, err = os.OpenFile(target, os.O_RDWR, 0644) if err != nil { glog.Errorf("Error opening loopback device: %s", err) return nil, ErrAttachLoopbackDevice } // Try to attach to the loop file if err := ioctlLoopSetFd(loopFile.Fd(), sparseFile.Fd()); err != nil { loopFile.Close() // If the error is EBUSY, then try the next loopback if err != syscall.EBUSY { glog.Errorf("Cannot set up loopback device %s: %s", target, err) return nil, ErrAttachLoopbackDevice } // Otherwise, we keep going with the loop continue } // In case of success, we finished. Break the loop. break } // This can't happen, but let's be sure if loopFile == nil { glog.Errorf("Unreachable code reached! Error attaching %s to a loopback device.", sparseFile.Name()) return nil, ErrAttachLoopbackDevice } return loopFile, nil }
func (devices *DeviceSet) processPendingTransaction() error { if err := devices.loadTransactionMetaData(); err != nil { return err } // If there was open transaction but pool transaction Id is same // as open transaction Id, nothing to roll back. if devices.TransactionId == devices.OpenTransactionId { return nil } // If open transaction Id is less than pool transaction Id, something // is wrong. Bail out. if devices.OpenTransactionId < devices.TransactionId { glog.Errorf("Open Transaction id %d is less than pool transaction id %d", devices.OpenTransactionId, devices.TransactionId) return nil } // Pool transaction Id is not same as open transaction. There is // a transaction which was not completed. if err := devices.rollbackTransaction(); err != nil { return fmt.Errorf("Rolling back open transaction failed: %s", err) } devices.OpenTransactionId = devices.TransactionId return nil }
func (vc *VBoxContext) AllocateNetwork(vmId, requestedIP string, maps []pod.UserContainerPort) (*network.Settings, error) { ip, err := network.IpAllocator.RequestIP(network.BridgeIPv4Net, net.ParseIP(requestedIP)) if err != nil { return nil, err } maskSize, _ := network.BridgeIPv4Net.Mask.Size() err = SetupPortMaps(vmId, ip.String(), maps) if err != nil { glog.Errorf("Setup Port Map failed %s", err) return nil, err } return &network.Settings{ Mac: "", IPAddress: ip.String(), Gateway: network.BridgeIPv4Net.IP.String(), Bridge: "", IPPrefixLen: maskSize, Device: "", File: nil, }, nil }
func (vc *VBoxContext) AddNic(ctx *hypervisor.VmContext, host *hypervisor.HostNicInfo, guest *hypervisor.GuestNicInfo) { go func() { callback := &hypervisor.NetDevInsertedEvent{ Index: guest.Index, DeviceName: guest.Device, Address: guest.Busaddr, } if guest.Index > 7 || guest.Index < 0 { glog.Errorf("Hot adding NIC failed, can not add more than 8 NICs") ctx.Hub <- &hypervisor.DeviceFailed{ Session: callback, } } /* if err := vc.Machine.ModifyNIC(guest.Index+1, virtualbox.NICNetNAT, ""); err != nil { glog.Errorf("Hot adding NIC failed, %s", err.Error()) ctx.Hub <- &hypervisor.DeviceFailed{ Session: callback, } return } */ glog.V(1).Infof("nic %s insert succeeded", guest.Device) ctx.Hub <- callback return }() }
func LoopbackSetCapacity(file *os.File) error { if err := ioctlLoopSetCapacity(file.Fd(), 0); err != nil { glog.Errorf("Error loopbackSetCapacity: %s", err) return ErrLoopbackSetCapacity } return nil }
func httpError(w http.ResponseWriter, err error) { statusCode := http.StatusInternalServerError // FIXME: this is brittle and should not be necessary. // If we need to differentiate between different possible error types, we should // create appropriate error types with clearly defined meaning. errStr := strings.ToLower(err.Error()) if strings.Contains(errStr, "no such") { statusCode = http.StatusNotFound } else if strings.Contains(errStr, "bad parameter") { statusCode = http.StatusBadRequest } else if strings.Contains(errStr, "conflict") { statusCode = http.StatusConflict } else if strings.Contains(errStr, "impossible") { statusCode = http.StatusNotAcceptable } else if strings.Contains(errStr, "wrong login/password") { statusCode = http.StatusUnauthorized } else if strings.Contains(errStr, "hasn't been activated") { statusCode = http.StatusForbidden } if err != nil { glog.Errorf("HTTP Error: statusCode=%d %v", statusCode, err) http.Error(w, err.Error(), statusCode) } }
// This function will only be invoked during daemon start func (vm *Vm) AssociateVm(mypod *Pod, data []byte) error { glog.V(1).Infof("Associate the POD(%s) with VM(%s)", mypod.Id, mypod.Vm) var ( PodEvent = make(chan VmEvent, 128) Status = make(chan *types.VmResponse, 128) subStatus = make(chan *types.VmResponse, 128) ) go VmAssociate(mypod.Vm, PodEvent, Status, mypod.Wg, data) go vm.handlePodEvent(mypod) ass := <-Status if ass.Code != types.E_OK { glog.Errorf("cannot associate with vm: %s, error status %d (%s)", mypod.Vm, ass.Code, ass.Cause) return errors.New("load vm status failed") } if err := vm.SetVmChan(PodEvent, Status, subStatus); err != nil { glog.V(1).Infof("SetVmChan error: %s", err.Error()) return err } mypod.Status = types.S_POD_RUNNING mypod.SetContainerStatus(types.S_POD_RUNNING) vm.Status = types.S_VM_ASSOCIATED vm.Pod = mypod return nil }
func makeHttpHandler(eng *engine.Engine, logging bool, localMethod string, localRoute string, handlerFunc HttpApiFunc, corsHeaders string, dockerVersion version.Version) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { // log the request glog.V(0).Infof("Calling %s %s", localMethod, localRoute) if logging { glog.V(1).Infof("%s %s", r.Method, r.RequestURI) } if strings.Contains(r.Header.Get("User-Agent"), "Docker-Client/") { userAgent := strings.Split(r.Header.Get("User-Agent"), "/") if len(userAgent) == 2 && !dockerVersion.Equal(version.Version(userAgent[1])) { glog.Warningf("client and server don't have the same version (client: %s, server: %s)", userAgent[1], dockerVersion) } } version := version.Version(mux.Vars(r)["version"]) if version == "" { version = utils.APIVERSION } if corsHeaders != "" { writeCorsHeaders(w, r, corsHeaders) } if version.GreaterThan(utils.APIVERSION) { http.Error(w, fmt.Errorf("client and server don't have same version (client API version: %s, server API version: %s)", version, utils.APIVERSION).Error(), http.StatusNotFound) return } if err := handlerFunc(eng, version, w, r, mux.Vars(r)); err != nil { glog.Errorf("Handler for %s %s returned error: %s", localMethod, localRoute, err) httpError(w, err) } } }
func (daemon *Daemon) PrepareServices(userPod *pod.UserPod, podId string) error { err := servicediscovery.PrepareServices(userPod, podId) if err != nil { glog.Errorf("PrepareServices failed %s", err.Error()) } return err }
func (d *Driver) Put(id string) error { err := d.DeviceSet.UnmountDevice(id) if err != nil { glog.Errorf("Error unmounting device %s: %s", id, err) } return err }
func (p *Pod) PrepareServices() error { err := servicediscovery.PrepareServices(p.spec, p.id) if err != nil { glog.Errorf("PrepareServices failed %s", err.Error()) } return err }
func (daemon *Daemon) StopPod(podId, stopVm string) (int, string, error) { glog.V(1).Infof("Prepare to stop the POD: %s", podId) // find the vm id which running POD, and stop it pod, ok := daemon.PodList.Get(podId) if !ok { glog.Errorf("Can not find pod(%s)", podId) return -1, "", fmt.Errorf("Can not find pod(%s)", podId) } // we need to set the 'RestartPolicy' of the pod to 'never' if stop command is invoked // for kubernetes if pod.status.Type == "kubernetes" { pod.status.RestartPolicy = "never" } if pod.vm == nil { return types.E_VM_SHUTDOWN, "", nil } vmId := pod.vm.Id vmResponse := pod.vm.StopPod(pod.status, stopVm) // Delete the Vm info for POD daemon.DeleteVmByPod(podId) if vmResponse.Code == types.E_VM_SHUTDOWN { daemon.RemoveVm(vmId) } if pod.status.Autoremove == true { daemon.CleanPod(podId) } return vmResponse.Code, vmResponse.Cause, nil }
// loadManifest loads a manifest from a byte array and verifies its content, // returning the local digest, the manifest itself, whether or not it was // verified. If ref is a digest, rather than a tag, this will be treated as // the local digest. An error will be returned if the signature verification // fails, local digest verification fails and, if provided, the remote digest // verification fails. The boolean return will only be false without error on // the failure of signatures trust check. func (s *TagStore) loadManifest(manifestBytes []byte, ref string, remoteDigest digest.Digest) (digest.Digest, *registry.ManifestData, bool, error) { payload, keys, err := unpackSignedManifest(manifestBytes) if err != nil { return "", nil, false, fmt.Errorf("error unpacking manifest: %v", err) } // TODO(stevvooe): It would be a lot better here to build up a stack of // verifiers, then push the bytes one time for signatures and digests, but // the manifests are typically small, so this optimization is not worth // hacking this code without further refactoring. var localDigest digest.Digest // Verify the local digest, if present in ref. ParseDigest will validate // that the ref is a digest and verify against that if present. Otherwize // (on error), we simply compute the localDigest and proceed. if dgst, err := digest.ParseDigest(ref); err == nil { // verify the manifest against local ref if err := verifyDigest(dgst, payload); err != nil { return "", nil, false, fmt.Errorf("verifying local digest: %v", err) } localDigest = dgst } else { // We don't have a local digest, since we are working from a tag. // Compute the digest of the payload and return that. glog.V(1).Infof("provided manifest reference %q is not a digest: %v", ref, err) localDigest, err = digest.FromBytes(payload) if err != nil { // near impossible glog.Errorf("error calculating local digest during tag pull: %v", err) return "", nil, false, err } } // verify against the remote digest, if available if remoteDigest != "" { if err := verifyDigest(remoteDigest, payload); err != nil { return "", nil, false, fmt.Errorf("verifying remote digest: %v", err) } } var manifest registry.ManifestData if err := json.Unmarshal(payload, &manifest); err != nil { return "", nil, false, fmt.Errorf("error unmarshalling manifest: %s", err) } // validate the contents of the manifest if err := validateManifest(&manifest); err != nil { return "", nil, false, err } var verified bool verified, err = s.verifyTrustedKeys(manifest.Name, keys) if err != nil { return "", nil, false, fmt.Errorf("error verifying trusted keys: %v", err) } return localDigest, &manifest, verified, nil }
func getLoopbackBackingFile(file *os.File) (uint64, uint64, error) { loopInfo, err := ioctlLoopGetStatus64(file.Fd()) if err != nil { glog.Errorf("Error get loopback backing file: %s", err) return 0, 0, ErrGetLoopbackBackingFile } return loopInfo.loDevice, loopInfo.loInode, nil }
func (vd *VBoxDriver) InitNetwork(bIface, bIP string) error { var i = 0 if bIP == "" { network.BridgeIP = network.DefaultBridgeIP } else { network.BridgeIP = bIP } bip, ipnet, err := net.ParseCIDR(network.BridgeIP) if err != nil { glog.Errorf(err.Error()) return err } gateway := bip.Mask(ipnet.Mask) inc(gateway, 2) if !ipnet.Contains(gateway) { glog.Errorf(err.Error()) return fmt.Errorf("get Gateway from BridgeIP %s failed", network.BridgeIP) } prefixSize, _ := ipnet.Mask.Size() _, network.BridgeIPv4Net, err = net.ParseCIDR(gateway.String() + fmt.Sprintf("/%d", prefixSize)) if err != nil { glog.Errorf(err.Error()) return err } network.BridgeIPv4Net.IP = gateway glog.Warningf(network.BridgeIPv4Net.String()) /* * Filter the IPs which can not be used for VMs */ bip = bip.Mask(ipnet.Mask) for inc(bip, 1); ipnet.Contains(bip) && i < 2; inc(bip, 1) { i++ glog.V(3).Infof("Try %s", bip.String()) _, err = network.IpAllocator.RequestIP(network.BridgeIPv4Net, bip) if err != nil { glog.Errorf(err.Error()) return err } } return nil }
func GetTotalUsedFds() int { if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { glog.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err) } else { return len(fds) } return -1 }
func GetBlockDeviceSize(file *os.File) (uint64, error) { size, err := ioctlBlkGetSize64(file.Fd()) if err != nil { glog.Errorf("Error getblockdevicesize: %s", err) return 0, ErrGetBlockSize } return uint64(size), nil }
func (d *Driver) Setup() error { if d.daemon == nil { daemon, err := GetDaemon() if err != nil { return err } d.daemon = daemon } if vm, err := d.daemon.StartVm(d.pullVm, 1, 64, false, types.VM_KEEP_AFTER_SHUTDOWN); err != nil { glog.Errorf(err.Error()) return err } else { d.daemon.AddVm(vm) } if err := virtualbox.RegisterDisk(d.pullVm, d.pullVm, d.BaseImage(), 4); err != nil { glog.Errorf(err.Error()) return err } ids, err := loadIds(path.Join(d.RootPath(), "layers")) if err != nil { return err } for _, id := range ids { if d.disks[id] == true { continue } parentIds, err := getParentIds(d.RootPath(), id) if err != nil { glog.Warningf(err.Error()) continue } for _, cid := range parentIds { if d.disks[cid] == true { continue } d.Exists(cid) d.disks[cid] = true } d.disks[id] = true } return nil }
func Unmount(target string) error { if err := exec.Command("auplink", target, "flush").Run(); err != nil { glog.Errorf("Couldn't run auplink before unmount: %s", err) } if err := syscall.Unmount(target, 0); err != nil { return err } return nil }
// attachLoopDevice attaches the given sparse file to the next // available loopback device. It returns an opened *os.File. func AttachLoopDevice(sparseName string) (loop *os.File, err error) { // Try to retrieve the next available loopback device via syscall. // If it fails, we discard error and start loopking for a // loopback from index 0. startIndex, err := getNextFreeLoopbackIndex() if err != nil { glog.V(1).Infof("Error retrieving the next available loopback: %s", err) } // OpenFile adds O_CLOEXEC sparseFile, err := os.OpenFile(sparseName, os.O_RDWR, 0644) if err != nil { glog.Errorf("Error opening sparse file %s: %s", sparseName, err) return nil, ErrAttachLoopbackDevice } defer sparseFile.Close() loopFile, err := openNextAvailableLoopback(startIndex, sparseFile) if err != nil { return nil, err } // Set the status of the loopback device loopInfo := &LoopInfo64{ loFileName: stringToLoopName(loopFile.Name()), loOffset: 0, loFlags: LoFlagsAutoClear, } if err := ioctlLoopSetStatus64(loopFile.Fd(), loopInfo); err != nil { glog.Errorf("Cannot set up loopback device info: %s", err) // If the call failed, then free the loopback device if err := ioctlLoopClrFd(loopFile.Fd()); err != nil { glog.Errorf("Error while cleaning up the loopback device") } loopFile.Close() return nil, ErrAttachLoopbackDevice } return loopFile, nil }
// lookupImageOnEndpoint checks the specified endpoint to see if an image exists // and if it is absent then it sends the image id to the channel to be pushed. func lookupImageOnEndpoint(wg *sync.WaitGroup, r *registry.Session, out io.Writer, sf *streamformatter.StreamFormatter, images chan imagePushData, imagesToPush chan string) { defer wg.Done() for image := range images { if err := r.LookupRemoteImage(image.id, image.endpoint); err != nil { glog.Errorf("Error in LookupRemoteImage: %s", err) imagesToPush <- image.id continue } out.Write(sf.FormatStatus("", "Image %s already pushed, skipping", stringid.TruncateID(image.id))) } }
// Create creates a new container from the given configuration with a given name. func (daemon *Daemon) Create(config *runconfig.Config, hostConfig *runconfig.HostConfig, name string) (*Container, []string, error) { var ( container *Container warnings []string img *image.Image imgID string err error ) if config.Image != "" { img, err = daemon.repositories.LookupImage(config.Image) if err != nil { glog.Errorf(err.Error()) return nil, nil, err } if err = img.CheckDepth(); err != nil { return nil, nil, err } imgID = img.ID } if err := daemon.mergeAndVerifyConfig(config, img); err != nil { return nil, nil, err } if !config.NetworkDisabled && daemon.SystemConfig().IPv4ForwardingDisabled { warnings = append(warnings, "IPv4 forwarding is disabled.") } if hostConfig == nil { hostConfig = &runconfig.HostConfig{} } if container, err = daemon.newContainer(name, config, imgID); err != nil { return nil, nil, err } if err := daemon.Register(container); err != nil { return nil, nil, err } if err := daemon.createRootfs(container); err != nil { return nil, nil, err } if err := daemon.setHostConfig(container, hostConfig); err != nil { return nil, nil, err } if err := container.Mount(); err != nil { return nil, nil, err } defer container.Unmount() if err := container.ToDisk(); err != nil { return nil, nil, err } container.LogEvent("create") return container, warnings, nil }
func MountVFSVolume(src, sharedDir string) (string, error) { var flags uintptr = utils.MS_BIND mountSharedDir := utils.RandStr(10, "alpha") targetDir := path.Join(sharedDir, mountSharedDir) glog.V(1).Infof("trying to bind dir %s to %s", src, targetDir) stat, err := os.Stat(src) if err != nil { glog.Error("Cannot stat volume Source ", err.Error()) return "", err } if runtime.GOOS == "linux" { base := filepath.Dir(targetDir) if err := os.MkdirAll(base, 0755); err != nil && !os.IsExist(err) { glog.Errorf("error to create dir %s for volume %s", base, src) return "", err } if stat.IsDir() { if err := os.MkdirAll(targetDir, 0755); err != nil && !os.IsExist(err) { glog.Errorf("error to create dir %s for volume %s", targetDir, src) return "", err } } else if f, err := os.Create(targetDir); err != nil && !os.IsExist(err) { glog.Errorf("error to create file %s for volume %s", targetDir, src) return "", err } else if err == nil { f.Close() } } if err := utils.Mount(src, targetDir, "none", flags, "--bind"); err != nil { glog.Errorf("bind dir %s failed: %s", src, err.Error()) return "", err } return mountSharedDir, nil }
// Release an interface for a select ip func Release(vmId, releasedIP string, index int, maps []pod.UserContainerPort, file *os.File) error { if file != nil { file.Close() } delete(IPAddressList, releasedIP) if err := ReleasePortMaps(vmId, index, releasedIP, maps); err != nil { glog.Errorf("fail to release port map %s", err) return err } return nil }
// Release an interface for a select ip func (vc *VBoxContext) ReleaseNetwork(vmId, releasedIP string, maps []pod.UserContainerPort, file *os.File) error { if err := network.IpAllocator.ReleaseIP(network.BridgeIPv4Net, net.ParseIP(releasedIP)); err != nil { return err } if err := ReleasePortMaps(vmId, releasedIP, maps); err != nil { glog.Errorf("fail to release port map %s", err) return err } return nil }