func (l *Layer0) Remove(id string) error { if !l.isLayer0(id) { return l.Driver.Remove(l.realID(id)) } l.Lock() defer l.Unlock() var err error v, ok := l.volumes[id] if ok { atomic.AddInt32(&v.ref, -1) if v.ref == 0 { // Save the upper dir and blow away the rest. upperDir := path.Join(path.Join(l.home, l.realID(id)), "upper") err := os.Rename(upperDir, path.Join(v.path, "upper")) if err != nil { dlog.Warnf("Failed in rename(%v): %v", id, err) } l.Driver.Remove(l.realID(id)) err = l.volDriver.Unmount(v.volumeID, v.path) if l.volDriver.Type() == api.DriverType_DRIVER_TYPE_BLOCK { _ = l.volDriver.Detach(v.volumeID) } err = os.RemoveAll(v.path) delete(l.volumes, v.id) } } else { dlog.Warnf("Failed to find layer0 vol for id %v", id) } return err }
func (d *driver) Create( locator *api.VolumeLocator, source *api.Source, spec *api.VolumeSpec) (string, error) { volumeID := uuid.New() volumeID = strings.TrimSuffix(volumeID, "\n") // Create a directory on the NFS server with this UUID. volPath := path.Join(nfsMountPath, volumeID) err := os.MkdirAll(volPath, 0744) if err != nil { dlog.Println(err) return "", err } if source != nil { if len(source.Seed) != 0 { seed, err := seed.New(source.Seed, spec.VolumeLabels) if err != nil { dlog.Warnf("Failed to initailize seed from %q : %v", source.Seed, err) return "", err } err = seed.Load(path.Join(volPath, config.DataDir)) if err != nil { dlog.Warnf("Failed to seed from %q to %q: %v", source.Seed, nfsMountPath, err) return "", err } } } f, err := os.Create(path.Join(nfsMountPath, volumeID+nfsBlockFile)) if err != nil { dlog.Println(err) return "", err } defer f.Close() if err := f.Truncate(int64(spec.Size)); err != nil { dlog.Println(err) return "", err } v := common.NewVolume( volumeID, api.FSType_FS_TYPE_NFS, locator, source, spec, ) v.DevicePath = path.Join(nfsMountPath, volumeID+nfsBlockFile) if err := d.CreateVol(v); err != nil { return "", err } return v.Id, err }
// Initialize node and alert listeners that we are joining the cluster. func (c *ClusterManager) joinCluster(db *Database, self *api.Node, exist bool) error { var err error // If I am already in the cluster map, don't add me again. if exist { goto found } // Alert all listeners that we are a new node joining an existing cluster. for e := c.listeners.Front(); e != nil; e = e.Next() { err = e.Value.(ClusterListener).Init(self, db) if err != nil { self.Status = api.Status_STATUS_ERROR dlog.Warnf("Failed to initialize Init %s: %v", e.Value.(ClusterListener).String(), err) c.cleanupInit(db, self) goto done } } found: // Alert all listeners that we are joining the cluster. for e := c.listeners.Front(); e != nil; e = e.Next() { err = e.Value.(ClusterListener).Join(self, db) if err != nil { self.Status = api.Status_STATUS_ERROR dlog.Warnf("Failed to initialize Join %s: %v", e.Value.(ClusterListener).String(), err) if exist == false { c.cleanupInit(db, self) } goto done } } for id, n := range db.NodeEntries { if id != c.config.NodeId { // Check to see if the IP is the same. If it is, then we have a stale entry. if n.MgmtIp == self.MgmtIp { dlog.Warnf("Warning, Detected node %s with the same IP %s in the database. Will not connect to this node.", id, n.MgmtIp) } else { // Gossip with this node. dlog.Infof("Connecting to node %s with IP %s.", id, n.MgmtIp) c.gossip.AddNode(n.MgmtIp+":9002", types.NodeId(id)) } } } done: return err }
// Unmount device at mountpoint or decrement refcnt. If device has no // mountpoints left after this operation, it is removed from the matrix. // ErrEnoent is returned if the device or mountpoint for the device is not found. func (m *Mounter) Unmount(device, path string) error { m.Lock() info, ok := m.mounts[device] if !ok { m.Unlock() return ErrEnoent } m.Unlock() info.Lock() defer info.Unlock() for i, p := range info.Mountpoint { if p.Path == path { p.ref-- // Unmount only if refcnt is 0 if p.ref == 0 { err := syscall.Unmount(path, 0) if err != nil { return err } if _, pathExists := m.paths[path]; pathExists { delete(m.paths, path) } else { dlog.Warnf("Path %q for device %q does not exist in pathMap", path, device) } // Blow away this mountpoint. info.Mountpoint[i] = info.Mountpoint[len(info.Mountpoint)-1] info.Mountpoint = info.Mountpoint[0 : len(info.Mountpoint)-1] m.maybeRemoveDevice(device) } return nil } } return ErrEnoent }
func writeDatabase(db *Database) error { kvdb := kvdb.Instance() b, err := json.Marshal(db) if err != nil { dlog.Warnf("Fatal, Could not marshal cluster database to JSON: %v", err) return err } if _, err := kvdb.Put(ClusterDBKey, b, 0); err != nil { dlog.Warnf("Fatal, Could not marshal cluster database to JSON: %v", err) return err } dlog.Infoln("Cluster database updated.") return nil }
// Mount new mountpoint for specified device. func (m *Mounter) Mount(minor int, device, path, fs string, flags uintptr, data string) error { m.Lock() dev, ok := m.paths[path] if ok && dev != device { dlog.Warnf("cannot mount %q, device %q is mounted at %q", device, dev, path) m.Unlock() return ErrExist } info, ok := m.mounts[device] if !ok { info = &Info{ Device: device, Mountpoint: make([]*PathInfo, 0), Minor: minor, Fs: fs, } } m.mounts[device] = info m.Unlock() info.Lock() defer info.Unlock() // Validate input params if fs != info.Fs { dlog.Warnf("%s Existing mountpoint has fs %q cannot change to %q", device, info.Fs, fs) return ErrEinval } // Try to find the mountpoint. If it already exists, then increment refcnt for _, p := range info.Mountpoint { if p.Path == path { p.ref++ return nil } } // The device is not mounted at path, mount it and add to its mountpoints. err := syscall.Mount(device, path, fs, flags, data) if err != nil { return err } info.Mountpoint = append(info.Mountpoint, &PathInfo{Path: path, ref: 1}) m.paths[path] = device return nil }
// Shutdown can be called when THIS node is gracefully shutting down. func (c *ClusterManager) Shutdown() error { db, err := readDatabase() if err != nil { dlog.Warnf("Could not read cluster database (%v).", err) return err } // Alert all listeners that we are shutting this node down. for e := c.listeners.Front(); e != nil; e = e.Next() { dlog.Infof("Shutting down %s", e.Value.(ClusterListener).String()) if err := e.Value.(ClusterListener).Halt(&c.selfNode, &db); err != nil { dlog.Warnf("Failed to shutdown %s", e.Value.(ClusterListener).String()) } } return nil }
// ApplyDiff extracts the changeset from the given diff into the // layer with the specified id and parent, returning the size of the // new layer in bytes. // The archive.Reader must be an uncompressed stream. func (d *Driver) ApplyDiff(id string, parent string, diff archive.Reader) (size int64, err error) { dir := path.Join(virtPath, id) // dir := path.Join("/tmp/chainfs/", id) dlog.Infof("Applying diff at path %s\n", dir) if err := chrootarchive.UntarUncompressed(diff, dir, nil); err != nil { dlog.Warnf("Error while applying diff to %s: %v", id, err) return 0, err } // show invalid whiteouts warning. files, err := ioutil.ReadDir(path.Join(dir, archive.WhiteoutLinkDir)) if err == nil && len(files) > 0 { dlog.Warnf("Archive contains aufs hardlink references that are not supported.") } return d.DiffSize(id, parent) }
// Remove attempts to remove the filesystem layer with this id. func (d *Driver) Remove(id string) error { dlog.Infof("Removing layer %s", id) cID := C.CString(id) ret, err := C.remove_layer(cID) if int(ret) != 0 { dlog.Warnf("Error while removing layer %s", id) return err } return nil }
func (c *ClusterManager) cleanupInit(db *Database, self *api.Node) error { var resErr error var err error dlog.Infof("Cleanup Init services") for e := c.listeners.Front(); e != nil; e = e.Next() { dlog.Warnf("Cleanup Init for service %s.", e.Value.(ClusterListener).String()) err = e.Value.(ClusterListener).CleanupInit(self, db) if err != nil { dlog.Warnf("Failed to Cleanup Init %s: %v", e.Value.(ClusterListener).String(), err) resErr = err } } return resErr }
// Get returns the mountpoint for the layered filesystem referred // to by this id. You can optionally specify a mountLabel or "". // Returns the absolute path to the mounted layered filesystem. func (d *Driver) Get(id, mountLabel string) (string, error) { cID := C.CString(id) ret, err := C.alloc_chainfs(cID) if int(ret) != 0 { dlog.Warnf("Error while creating a chain FS for %s", id) return "", err } else { dlog.Debugf("Created a chain FS for %s", id) chainPath := path.Join(virtPath, id) return chainPath, err } }
func (d *Driver) volumeState(ec2VolState *string) api.VolumeState { if ec2VolState == nil { return api.VolumeState_VOLUME_STATE_DETACHED } switch *ec2VolState { case ec2.VolumeAttachmentStateAttached: return api.VolumeState_VOLUME_STATE_ATTACHED case ec2.VolumeAttachmentStateDetached: return api.VolumeState_VOLUME_STATE_DETACHED case ec2.VolumeAttachmentStateAttaching, ec2.VolumeAttachmentStateDetaching: return api.VolumeState_VOLUME_STATE_PENDING default: dlog.Warnf("Failed to translate EC2 volume status %v", ec2VolState) } return api.VolumeState_VOLUME_STATE_ERROR }
// Create creates a new, empty, filesystem layer with the // specified id and parent and mountLabel. Parent and mountLabel may be "". func (d *Driver) Create(id string, parent string, ml string) error { if parent != "" { dlog.Infof("Creating layer %s with parent %s", id, parent) } else { dlog.Infof("Creating parent layer %s", id) } cID := C.CString(id) cParent := C.CString(parent) ret, err := C.create_layer(cID, cParent) if int(ret) != 0 { dlog.Warnf("Error while creating layer %s", id) return err } return nil }
// Create aws volume from spec. func (d *Driver) Create( locator *api.VolumeLocator, source *api.Source, spec *api.VolumeSpec, ) (string, error) { var snapID *string // Spec size is in bytes, translate to GiB. sz := int64(spec.Size / (1024 * 1024 * 1024)) iops, volType := mapCos(spec.Cos) if source != nil && string(source.Parent) != "" { id := string(source.Parent) snapID = &id } dryRun := false encrypted := false req := &ec2.CreateVolumeInput{ AvailabilityZone: &d.md.zone, DryRun: &dryRun, Encrypted: &encrypted, Size: &sz, Iops: iops, VolumeType: volType, SnapshotId: snapID, } vol, err := d.ec2.CreateVolume(req) if err != nil { dlog.Warnf("Failed in CreateVolumeRequest :%v", err) return "", err } volume := common.NewVolume( *vol.VolumeId, api.FSType_FS_TYPE_NONE, locator, source, spec, ) err = d.UpdateVol(volume) if err != nil { return "", err } err = d.waitStatus(volume.Id, ec2.VolumeStateAvailable) return volume.Id, err }
func (d *Driver) Format(volumeID string) error { v, err := d.GetVol(volumeID) if err != nil { return fmt.Errorf("Failed to locate volume %q", volumeID) } // XXX: determine mount state devicePath, err := d.devicePath(volumeID) if err != nil { return err } cmd := "/sbin/mkfs." + string(v.Spec.Format) o, err := exec.Command(cmd, devicePath).Output() if err != nil { dlog.Warnf("Failed to run command %v %v: %v", cmd, devicePath, o) return err } v.Format = v.Spec.Format err = d.UpdateVol(v) return err }
func Init(params map[string]string) (volume.VolumeDriver, error) { host, ok := params["url"] if !ok { return nil, ErrApiUrlRequired } token, ok := params["token"] if !ok { return nil, ErrApiAuthTokenRequired } // create a coprhd api client instance client := coprhd.NewClient(host, token) d := &driver{ DefaultEnumerator: volume.NewDefaultEnumerator(Name, kvdb.Instance()), client: client, } if projectName, ok := params["project"]; ok { if project, err := client.Project().Name(projectName).Query(); err != nil { return nil, err } else { d.project = project } } else { dlog.Warnln("Default coprhd 'project' not set") } if varrayName, ok := params["varray"]; ok { if varray, err := client.VArray().Name(varrayName).Query(); err != nil { return nil, err } else { d.varray = varray } } else { dlog.Warnf("Default coprhd 'varray' not set") } if vpoolName, ok := params["vpool"]; ok { if vpool, err := client.VPool().Name(vpoolName).Query(); err != nil { return nil, err } else { d.vpool = vpool } } else { dlog.Warnf("Default coprhd 'vpool' not set") } if port, ok := params["port"]; ok { if initiator, err := client.Initiator().Port(port).Query(); err != nil { return nil, err } else { d.initiator = initiator } } else { return nil, ErrPortRequired } return d, nil }
func start(c *cli.Context) { if !osdcli.DaemonMode(c) { cli.ShowAppHelp(c) return } datastores := []string{mem.Name, etcd.Name, consul.Name} // We are in daemon mode. file := c.String("file") if file == "" { dlog.Warnln("OSD configuration file not specified. Visit openstorage.org for an example.") return } cfg, err := config.Parse(file) if err != nil { dlog.Errorln(err) return } kvdbURL := c.String("kvdb") u, err := url.Parse(kvdbURL) scheme := u.Scheme u.Scheme = "http" kv, err := kvdb.New(scheme, "openstorage", []string{u.String()}, nil) if err != nil { dlog.Warnf("Failed to initialize KVDB: %v (%v)", scheme, err) dlog.Warnf("Supported datastores: %v", datastores) return } err = kvdb.SetInstance(kv) if err != nil { dlog.Warnf("Failed to initialize KVDB: %v", err) return } // Start the cluster state machine, if enabled. clusterInit := false if cfg.Osd.ClusterConfig.NodeId != "" && cfg.Osd.ClusterConfig.ClusterId != "" { dlog.Infof("OSD enabling cluster mode.") if err := cluster.Init(cfg.Osd.ClusterConfig); err != nil { dlog.Errorln("Unable to init cluster server: %v", err) return } clusterInit = true if err := server.StartClusterAPI(config.ClusterAPIBase); err != nil { dlog.Warnf("Unable to start cluster API server: %v", err) return } } // Start the volume drivers. for d, v := range cfg.Osd.Drivers { dlog.Infof("Starting volume driver: %v", d) if _, err := volume.New(d, v); err != nil { dlog.Warnf("Unable to start volume driver: %v, %v", d, err) return } if err := server.StartPluginAPI(d, config.DriverAPIBase, config.PluginAPIBase); err != nil { dlog.Warnf("Unable to start volume plugin: %v", err) return } } if err := server.StartFlexVolumeAPI(config.FlexVolumePort); err != nil { dlog.Warnf("Unable to start flexvolume API: %v", err) return } // Start the graph drivers. for d, _ := range cfg.Osd.GraphDrivers { dlog.Infof("Starting graph driver: %v", d) if err := server.StartGraphAPI(d, config.PluginAPIBase); err != nil { dlog.Warnf("Unable to start graph plugin: %v", err) return } } if clusterInit { cm, err := cluster.Inst() if err != nil { dlog.Warnf("Unable to find cluster instance: %v", err) return } if err := cm.Start(); err != nil { dlog.Warnf("Unable to start cluster manager: %v", err) return } } // Daemon does not exit. select {} }
func (l *Layer0) create(id, parent string) (string, *Layer0Vol, error) { l.Lock() defer l.Unlock() // If this is the parent of the Layer0, add an entry for it. baseID, l0 := l.isLayer0Parent(id) if l0 { l.volumes[baseID] = &Layer0Vol{id: baseID, parent: parent} return id, nil, nil } // Don't do anything if this is not layer 0 if !l.isLayer0(id) { return id, nil, nil } vol, ok := l.volumes[id] if !ok { dlog.Warnf("Failed to find layer0 volume for id %v", id) return id, nil, nil } // Query volume for Layer 0 vols, err := l.volDriver.Enumerate(&api.VolumeLocator{Name: vol.parent}, nil) // If we don't find a volume configured for this image, // then don't track layer0 if err != nil || vols == nil { dlog.Infof("Failed to find configured volume for id %v", vol.parent) delete(l.volumes, id) return id, nil, nil } // Find a volume that is available. index := -1 for i, v := range vols { if len(v.AttachPath) == 0 { index = i break } } if index == -1 { dlog.Infof("Failed to find free volume for id %v", vol.parent) delete(l.volumes, id) return id, nil, nil } mountPath := path.Join(l.home, l.loID(id)) os.MkdirAll(mountPath, 0755) // If this is a block driver, first attach the volume. if l.volDriver.Type() == api.DriverType_DRIVER_TYPE_BLOCK { _, err := l.volDriver.Attach(vols[index].Id) if err != nil { dlog.Errorf("Failed to attach volume %v", vols[index].Id) delete(l.volumes, id) return id, nil, nil } } err = l.volDriver.Mount(vols[index].Id, mountPath) if err != nil { dlog.Errorf("Failed to mount volume %v at path %v", vols[index].Id, mountPath) delete(l.volumes, id) return id, nil, nil } vol.path = mountPath vol.volumeID = vols[index].Id vol.ref = 1 return l.realID(id), vol, nil }
func Init(params volume.DriverParams) (volume.VolumeDriver, error) { path, ok := params["path"] if !ok { return nil, errors.New("No NFS path provided") } server, ok := params["server"] if !ok { dlog.Printf("No NFS server provided, will attempt to bind mount %s", path) } else { dlog.Printf("NFS driver initializing with %s:%s ", server, path) } // Create a mount manager for this NFS server. Blank sever is OK. mounter, err := mount.New(mount.NFSMount, server) if err != nil { dlog.Warnf("Failed to create mount manager for server: %v (%v)", server, err) return nil, err } inst := &driver{ IoNotSupported: &volume.IoNotSupported{}, DefaultEnumerator: volume.NewDefaultEnumerator(Name, kvdb.Instance()), nfsServer: server, nfsPath: path, mounter: mounter, } if err := os.MkdirAll(nfsMountPath, 0744); err != nil { return nil, err } src := inst.nfsPath if server != "" { src = ":" + inst.nfsPath } // If src is already mounted at dest, leave it be. mountExists, err := mounter.Exists(src, nfsMountPath) if !mountExists { // Mount the nfs server locally on a unique path. syscall.Unmount(nfsMountPath, 0) if server != "" { err = syscall.Mount(src, nfsMountPath, "nfs", 0, "nolock,addr="+inst.nfsServer) } else { err = syscall.Mount(src, nfsMountPath, "", syscall.MS_BIND, "") } if err != nil { dlog.Printf("Unable to mount %s:%s at %s (%+v)", inst.nfsServer, inst.nfsPath, nfsMountPath, err) return nil, err } } volumeInfo, err := inst.DefaultEnumerator.Enumerate(&api.VolumeLocator{}, nil) if err == nil { for _, info := range volumeInfo { if info.Status == api.VolumeStatus_VOLUME_STATUS_NONE { info.Status = api.VolumeStatus_VOLUME_STATUS_UP inst.UpdateVol(info) } } } else { dlog.Println("Could not enumerate Volumes, ", err) } dlog.Println("NFS initialized and driver mounted at: ", nfsMountPath) return inst, nil }
func (d *driver) Create(locator *api.VolumeLocator, source *api.Source, spec *api.VolumeSpec) (string, error) { volumeID := uuid.New() volumeID = strings.TrimSuffix(volumeID, "\n") if spec.Size == 0 { return "", fmt.Errorf("Volume size cannot be zero", "buse") } if spec.Format == api.FSType_FS_TYPE_NONE { return "", fmt.Errorf("Missing volume format", "buse") } // Create a file on the local buse path with this UUID. buseFile := path.Join(BuseMountPath, volumeID) f, err := os.Create(buseFile) if err != nil { dlog.Println(err) return "", err } if err := f.Truncate(int64(spec.Size)); err != nil { dlog.Println(err) return "", err } bd := &buseDev{ file: buseFile, f: f, } nbd := Create(bd, int64(spec.Size)) bd.nbd = nbd dlog.Infof("Connecting to NBD...") dev, err := bd.nbd.Connect() if err != nil { dlog.Println(err) return "", err } dlog.Infof("Formatting %s with %v", dev, spec.Format) cmd := "/sbin/mkfs." + spec.Format.SimpleString() o, err := exec.Command(cmd, dev).Output() if err != nil { dlog.Warnf("Failed to run command %v %v: %v", cmd, dev, o) return "", err } dlog.Infof("BUSE mapped NBD device %s (size=%v) to block file %s", dev, spec.Size, buseFile) v := common.NewVolume( volumeID, spec.Format, locator, source, spec, ) v.DevicePath = dev d.buseDevices[dev] = bd err = d.CreateVol(v) if err != nil { return "", err } return v.Id, err }
func notFound(w http.ResponseWriter, r *http.Request) { dlog.Warnf("Not found: %+v ", r.URL) http.NotFound(w, r) }