Esempio n. 1
0
func (d *driver) Delete(volumeID string) error {
	v, err := d.GetVol(volumeID)
	if err != nil {
		dlog.Println(err)
		return err
	}

	bd, ok := d.buseDevices[v.DevicePath]
	if !ok {
		err = fmt.Errorf("Cannot locate a BUSE device for %s", v.DevicePath)
		dlog.Println(err)
		return err
	}

	// Clean up buse block file and close the NBD connection.
	os.Remove(bd.file)
	bd.f.Close()
	bd.nbd.Disconnect()

	dlog.Infof("BUSE deleted volume %v at NBD device %s", volumeID, v.DevicePath)

	if err := d.DeleteVol(volumeID); err != nil {
		dlog.Println(err)
		return err
	}

	return nil
}
Esempio n. 2
0
func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
	var volumeDriver string
	for _, option := range options {
		key, val, err := parsers.ParseKeyValueOpt(option)
		if err != nil {
			return nil, err
		}
		switch key {
		case Layer0VolumeDriver:
			volumeDriver = val
		default:
			return nil, fmt.Errorf("Unknown option %s\n", key)
		}
	}
	dlog.Infof("Layer0 volume driver: %v", volumeDriver)
	volDriver, err := volume.Get(volumeDriver)
	if err != nil {
		return nil, err
	}
	ov, err := overlay.Init(home, options, uidMaps, gidMaps)
	if err != nil {
		volDriver.Shutdown()
		return nil, err
	}
	d := &Layer0{
		Driver:    ov,
		home:      home,
		volumes:   make(map[string]*Layer0Vol),
		volDriver: volDriver,
	}

	return d, nil
}
Esempio n. 3
0
func (c *ClusterManager) initNode(db *Database) (*api.Node, bool) {
	c.nodeCache[c.selfNode.Id] = *c.getCurrentState()

	_, exists := db.NodeEntries[c.selfNode.Id]

	// Add us into the database.
	db.NodeEntries[c.config.NodeId] = NodeEntry{Id: c.selfNode.Id,
		MgmtIp: c.selfNode.MgmtIp, DataIp: c.selfNode.DataIp,
		GenNumber: c.selfNode.GenNumber}

	dlog.Infof("Node %s joining cluster...", c.config.NodeId)
	dlog.Infof("Cluster ID: %s", c.config.ClusterId)
	dlog.Infof("Node Mgmt IP: %s", c.selfNode.MgmtIp)
	dlog.Infof("Node Data IP: %s", c.selfNode.DataIp)

	return &c.selfNode, exists
}
Esempio n. 4
0
// Create creates a new, empty, filesystem layer with the
// specified id and parent and mountLabel. Parent and mountLabel may be "".
func (d *Driver) Create(id string, parent string, ml string) error {
	if parent != "" {
		dlog.Infof("Creating layer %s with parent %s", id, parent)
	} else {
		dlog.Infof("Creating parent layer %s", id)
	}

	cID := C.CString(id)
	cParent := C.CString(parent)

	ret, err := C.create_layer(cID, cParent)
	if int(ret) != 0 {
		dlog.Warnf("Error while creating layer %s", id)
		return err
	}

	return nil
}
Esempio n. 5
0
func (nbd *NBD) connect() {
	runtime.LockOSThread()
	defer runtime.UnlockOSThread()

	// NBD_CONNECT does not return until disconnect.
	ioctl(nbd.deviceFile.Fd(), NBD_CONNECT, 0)

	dlog.Infof("Closing device file %s", nbd.devicePath)
}
Esempio n. 6
0
func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
	dlog.Infof("Initializing libchainfs at home: %s and storage: %v...", home, virtPath)

	cVirtPath := C.CString(virtPath)
	go C.start_chainfs(1, cVirtPath)

	d := &Driver{}

	return d, nil
}
Esempio n. 7
0
// Initialize node and alert listeners that we are joining the cluster.
func (c *ClusterManager) joinCluster(db *Database, self *api.Node, exist bool) error {
	var err error

	// If I am already in the cluster map, don't add me again.
	if exist {
		goto found
	}

	// Alert all listeners that we are a new node joining an existing cluster.
	for e := c.listeners.Front(); e != nil; e = e.Next() {
		err = e.Value.(ClusterListener).Init(self, db)
		if err != nil {
			self.Status = api.Status_STATUS_ERROR
			dlog.Warnf("Failed to initialize Init %s: %v",
				e.Value.(ClusterListener).String(), err)
			c.cleanupInit(db, self)
			goto done
		}
	}

found:
	// Alert all listeners that we are joining the cluster.
	for e := c.listeners.Front(); e != nil; e = e.Next() {
		err = e.Value.(ClusterListener).Join(self, db)
		if err != nil {
			self.Status = api.Status_STATUS_ERROR
			dlog.Warnf("Failed to initialize Join %s: %v",
				e.Value.(ClusterListener).String(), err)

			if exist == false {
				c.cleanupInit(db, self)
			}
			goto done
		}
	}

	for id, n := range db.NodeEntries {
		if id != c.config.NodeId {
			// Check to see if the IP is the same.  If it is, then we have a stale entry.
			if n.MgmtIp == self.MgmtIp {
				dlog.Warnf("Warning, Detected node %s with the same IP %s in the database.  Will not connect to this node.",
					id, n.MgmtIp)
			} else {
				// Gossip with this node.
				dlog.Infof("Connecting to node %s with IP %s.", id, n.MgmtIp)
				c.gossip.AddNode(n.MgmtIp+":9002", types.NodeId(id))
			}
		}
	}

done:
	return err
}
Esempio n. 8
0
// Remove attempts to remove the filesystem layer with this id.
func (d *Driver) Remove(id string) error {
	dlog.Infof("Removing layer %s", id)

	cID := C.CString(id)
	ret, err := C.remove_layer(cID)
	if int(ret) != 0 {
		dlog.Warnf("Error while removing layer %s", id)
		return err
	}

	return nil
}
Esempio n. 9
0
// Init aws volume driver metadata.
func Init(params volume.DriverParams) (volume.VolumeDriver, error) {
	zone, err := metadata("placement/availability-zone")
	if err != nil {
		return nil, err
	}
	instance, err := metadata("instance-id")
	if err != nil {
		return nil, err
	}
	dlog.Infof("AWS instance %v zone %v", instance, zone)
	accessKey, ok := params["AWS_ACCESS_KEY_ID"]
	if !ok {
		if accessKey = os.Getenv("AWS_ACCESS_KEY_ID"); accessKey == "" {
			return nil, fmt.Errorf("AWS_ACCESS_KEY_ID environment variable must be set")
		}
	}
	secretKey, ok := params["AWS_SECRET_ACCESS_KEY"]
	if !ok {
		if secretKey = os.Getenv("AWS_SECRET_ACCESS_KEY"); secretKey == "" {
			return nil, fmt.Errorf("AWS_SECRET_ACCESS_KEY environment variable must be set")
		}
	}
	creds := credentials.NewStaticCredentials(accessKey, secretKey, "")
	region := zone[:len(zone)-1]
	d := &Driver{
		ec2: ec2.New(
			session.New(
				&aws.Config{
					Region:      &region,
					Credentials: creds,
				},
			),
		),
		md: &Metadata{
			zone:     zone,
			instance: instance,
		},
		IoNotSupported:    &volume.IoNotSupported{},
		DefaultEnumerator: volume.NewDefaultEnumerator(Name, kvdb.Instance()),
	}
	devPrefix, letters, err := d.freeDevices()
	if err != nil {
		return nil, err
	}
	d.SingleLetter, err = device.NewSingleLetter(devPrefix, letters)
	if err != nil {
		return nil, err
	}
	return d, nil
}
Esempio n. 10
0
// Shutdown can be called when THIS node is gracefully shutting down.
func (c *ClusterManager) Shutdown() error {
	db, err := readDatabase()
	if err != nil {
		dlog.Warnf("Could not read cluster database (%v).", err)
		return err
	}

	// Alert all listeners that we are shutting this node down.
	for e := c.listeners.Front(); e != nil; e = e.Next() {
		dlog.Infof("Shutting down %s", e.Value.(ClusterListener).String())
		if err := e.Value.(ClusterListener).Halt(&c.selfNode, &db); err != nil {
			dlog.Warnf("Failed to shutdown %s",
				e.Value.(ClusterListener).String())
		}
	}
	return nil
}
Esempio n. 11
0
// ApplyDiff extracts the changeset from the given diff into the
// layer with the specified id and parent, returning the size of the
// new layer in bytes.
// The archive.Reader must be an uncompressed stream.
func (d *Driver) ApplyDiff(id string, parent string, diff archive.Reader) (size int64, err error) {
	dir := path.Join(virtPath, id)
	// dir := path.Join("/tmp/chainfs/", id)

	dlog.Infof("Applying diff at path %s\n", dir)

	if err := chrootarchive.UntarUncompressed(diff, dir, nil); err != nil {
		dlog.Warnf("Error while applying diff to %s: %v", id, err)
		return 0, err
	}

	// show invalid whiteouts warning.
	files, err := ioutil.ReadDir(path.Join(dir, archive.WhiteoutLinkDir))
	if err == nil && len(files) > 0 {
		dlog.Warnf("Archive contains aufs hardlink references that are not supported.")
	}

	return d.DiffSize(id, parent)
}
Esempio n. 12
0
func (d *driver) Mount(volumeID string, mountpath string) error {
	v, err := d.GetVol(volumeID)
	if err != nil {
		return fmt.Errorf("Failed to locate volume %q", volumeID)
	}
	if err := syscall.Mount(v.DevicePath, mountpath, v.Spec.Format.SimpleString(), 0, ""); err != nil {
		// TODO(pedge): same string for log message and error?
		dlog.Errorf("Mounting %s on %s failed because of %v", v.DevicePath, mountpath, err)
		return fmt.Errorf("Failed to mount %v at %v: %v", v.DevicePath, mountpath, err)
	}

	dlog.Infof("BUSE mounted NBD device %s at %s", v.DevicePath, mountpath)

	v.AttachPath = mountpath
	// TODO(pedge): why ignoring the error?
	err = d.UpdateVol(v)

	return nil
}
Esempio n. 13
0
// Connect the network block device.
func (nbd *NBD) Connect() (dev string, err error) {
	pair, err := syscall.Socketpair(syscall.SOCK_STREAM, syscall.AF_UNIX, 0)
	if err != nil {
		return "", err
	}

	// Find free NBD device.
	for i := 0; ; i++ {
		dev = fmt.Sprintf("/dev/nbd%d", i)
		if _, err = os.Stat(dev); os.IsNotExist(err) {
			dev = ""
			return "", errors.New("No more NBD devices left.")
		}
		if _, err = os.Stat(fmt.Sprintf("/sys/block/nbd%d/pid", i)); !os.IsNotExist(err) {
			continue // Busy.
		}

		dlog.Infof("Attempting to open device %v", dev)
		if nbd.deviceFile, err = os.Open(dev); err == nil {
			// Possible candidate.
			ioctl(nbd.deviceFile.Fd(), BLKROSET, 0)
			if err := ioctl(nbd.deviceFile.Fd(), NBD_SET_SOCK, uintptr(pair[0])); err == nil {
				nbd.socket = pair[1]
				break // Success.
			}
		}
	}

	// Setup.
	if err = nbd.Size(nbd.size); err != nil {
		// Already set by nbd.Size().
	} else if err = ioctl(nbd.deviceFile.Fd(), NBD_SET_FLAGS, 1); err != nil {
		err = &os.PathError{nbd.deviceFile.Name(), "ioctl NBD_SET_FLAGS", err}
	} else {
		go nbd.connect()
		go nbd.handle()
	}

	nbd.devicePath = dev

	return dev, err
}
Esempio n. 14
0
func (c *ClusterManager) cleanupInit(db *Database, self *api.Node) error {
	var resErr error
	var err error

	dlog.Infof("Cleanup Init services")

	for e := c.listeners.Front(); e != nil; e = e.Next() {
		dlog.Warnf("Cleanup Init for service %s.",
			e.Value.(ClusterListener).String())

		err = e.Value.(ClusterListener).CleanupInit(self, db)
		if err != nil {
			dlog.Warnf("Failed to Cleanup Init %s: %v",
				e.Value.(ClusterListener).String(), err)
			resErr = err
		}

	}

	return resErr
}
Esempio n. 15
0
func start(c *cli.Context) {

	if !osdcli.DaemonMode(c) {
		cli.ShowAppHelp(c)
		return
	}

	datastores := []string{mem.Name, etcd.Name, consul.Name}

	// We are in daemon mode.
	file := c.String("file")
	if file == "" {
		dlog.Warnln("OSD configuration file not specified.  Visit openstorage.org for an example.")
		return
	}

	cfg, err := config.Parse(file)
	if err != nil {
		dlog.Errorln(err)
		return
	}
	kvdbURL := c.String("kvdb")
	u, err := url.Parse(kvdbURL)
	scheme := u.Scheme
	u.Scheme = "http"

	kv, err := kvdb.New(scheme, "openstorage", []string{u.String()}, nil)
	if err != nil {
		dlog.Warnf("Failed to initialize KVDB: %v (%v)", scheme, err)
		dlog.Warnf("Supported datastores: %v", datastores)
		return
	}
	err = kvdb.SetInstance(kv)
	if err != nil {
		dlog.Warnf("Failed to initialize KVDB: %v", err)
		return
	}

	// Start the cluster state machine, if enabled.
	clusterInit := false
	if cfg.Osd.ClusterConfig.NodeId != "" && cfg.Osd.ClusterConfig.ClusterId != "" {
		dlog.Infof("OSD enabling cluster mode.")

		if err := cluster.Init(cfg.Osd.ClusterConfig); err != nil {
			dlog.Errorln("Unable to init cluster server: %v", err)
			return
		}
		clusterInit = true

		if err := server.StartClusterAPI(config.ClusterAPIBase); err != nil {
			dlog.Warnf("Unable to start cluster API server: %v", err)
			return
		}
	}

	// Start the volume drivers.
	for d, v := range cfg.Osd.Drivers {
		dlog.Infof("Starting volume driver: %v", d)
		if _, err := volume.New(d, v); err != nil {
			dlog.Warnf("Unable to start volume driver: %v, %v", d, err)
			return
		}

		if err := server.StartPluginAPI(d, config.DriverAPIBase, config.PluginAPIBase); err != nil {
			dlog.Warnf("Unable to start volume plugin: %v", err)
			return
		}
	}

	if err := server.StartFlexVolumeAPI(config.FlexVolumePort); err != nil {
		dlog.Warnf("Unable to start flexvolume API: %v", err)
		return
	}

	// Start the graph drivers.
	for d, _ := range cfg.Osd.GraphDrivers {
		dlog.Infof("Starting graph driver: %v", d)
		if err := server.StartGraphAPI(d, config.PluginAPIBase); err != nil {
			dlog.Warnf("Unable to start graph plugin: %v", err)
			return
		}
	}

	if clusterInit {
		cm, err := cluster.Inst()
		if err != nil {
			dlog.Warnf("Unable to find cluster instance: %v", err)
			return
		}
		if err := cm.Start(); err != nil {
			dlog.Warnf("Unable to start cluster manager: %v", err)
			return
		}
	}

	// Daemon does not exit.
	select {}
}
Esempio n. 16
0
func (l *Layer0) create(id, parent string) (string, *Layer0Vol, error) {
	l.Lock()
	defer l.Unlock()

	// If this is the parent of the Layer0, add an entry for it.
	baseID, l0 := l.isLayer0Parent(id)
	if l0 {
		l.volumes[baseID] = &Layer0Vol{id: baseID, parent: parent}
		return id, nil, nil
	}

	// Don't do anything if this is not layer 0
	if !l.isLayer0(id) {
		return id, nil, nil
	}

	vol, ok := l.volumes[id]
	if !ok {
		dlog.Warnf("Failed to find layer0 volume for id %v", id)
		return id, nil, nil
	}

	// Query volume for Layer 0
	vols, err := l.volDriver.Enumerate(&api.VolumeLocator{Name: vol.parent}, nil)

	// If we don't find a volume configured for this image,
	// then don't track layer0
	if err != nil || vols == nil {
		dlog.Infof("Failed to find configured volume for id %v", vol.parent)
		delete(l.volumes, id)
		return id, nil, nil
	}

	// Find a volume that is available.
	index := -1
	for i, v := range vols {
		if len(v.AttachPath) == 0 {
			index = i
			break
		}
	}
	if index == -1 {
		dlog.Infof("Failed to find free volume for id %v", vol.parent)
		delete(l.volumes, id)
		return id, nil, nil
	}

	mountPath := path.Join(l.home, l.loID(id))
	os.MkdirAll(mountPath, 0755)

	// If this is a block driver, first attach the volume.
	if l.volDriver.Type() == api.DriverType_DRIVER_TYPE_BLOCK {
		_, err := l.volDriver.Attach(vols[index].Id)
		if err != nil {
			dlog.Errorf("Failed to attach volume %v", vols[index].Id)
			delete(l.volumes, id)
			return id, nil, nil
		}
	}
	err = l.volDriver.Mount(vols[index].Id, mountPath)
	if err != nil {
		dlog.Errorf("Failed to mount volume %v at path %v",
			vols[index].Id, mountPath)
		delete(l.volumes, id)
		return id, nil, nil
	}
	vol.path = mountPath
	vol.volumeID = vols[index].Id
	vol.ref = 1

	return l.realID(id), vol, nil
}
Esempio n. 17
0
func (d *driver) Create(locator *api.VolumeLocator, source *api.Source, spec *api.VolumeSpec) (string, error) {
	volumeID := uuid.New()
	volumeID = strings.TrimSuffix(volumeID, "\n")
	if spec.Size == 0 {
		return "", fmt.Errorf("Volume size cannot be zero", "buse")
	}
	if spec.Format == api.FSType_FS_TYPE_NONE {
		return "", fmt.Errorf("Missing volume format", "buse")
	}
	// Create a file on the local buse path with this UUID.
	buseFile := path.Join(BuseMountPath, volumeID)
	f, err := os.Create(buseFile)
	if err != nil {
		dlog.Println(err)
		return "", err
	}

	if err := f.Truncate(int64(spec.Size)); err != nil {
		dlog.Println(err)
		return "", err
	}

	bd := &buseDev{
		file: buseFile,
		f:    f,
	}
	nbd := Create(bd, int64(spec.Size))
	bd.nbd = nbd

	dlog.Infof("Connecting to NBD...")
	dev, err := bd.nbd.Connect()
	if err != nil {
		dlog.Println(err)
		return "", err
	}

	dlog.Infof("Formatting %s with %v", dev, spec.Format)
	cmd := "/sbin/mkfs." + spec.Format.SimpleString()
	o, err := exec.Command(cmd, dev).Output()
	if err != nil {
		dlog.Warnf("Failed to run command %v %v: %v", cmd, dev, o)
		return "", err
	}

	dlog.Infof("BUSE mapped NBD device %s (size=%v) to block file %s", dev, spec.Size, buseFile)

	v := common.NewVolume(
		volumeID,
		spec.Format,
		locator,
		source,
		spec,
	)
	v.DevicePath = dev

	d.buseDevices[dev] = bd

	err = d.CreateVol(v)
	if err != nil {
		return "", err
	}
	return v.Id, err
}
Esempio n. 18
0
// Handle block requests.
func (nbd *NBD) handle() {
	buf := make([]byte, 2<<19)
	var x request

	for {
		bytes, err := syscall.Read(nbd.socket, buf[0:28])
		if nbd.deviceFile == nil {
			dlog.Infof("Disconnecting device %s", nbd.devicePath)
			return
		}

		if bytes < 0 || err != nil {
			dlog.Errorf("Error reading from device %s", nbd.devicePath)
			nbd.Disconnect()
			return
		}

		x.magic = binary.BigEndian.Uint32(buf)
		x.typus = binary.BigEndian.Uint32(buf[4:8])
		x.handle = binary.BigEndian.Uint64(buf[8:16])
		x.from = binary.BigEndian.Uint64(buf[16:24])
		x.len = binary.BigEndian.Uint32(buf[24:28])

		switch x.magic {
		case NBD_REPLY_MAGIC:
			fallthrough
		case NBD_REQUEST_MAGIC:
			switch x.typus {
			case NBD_CMD_READ:
				nbd.device.ReadAt(buf[16:16+x.len], int64(x.from))
				binary.BigEndian.PutUint32(buf[0:4], NBD_REPLY_MAGIC)
				binary.BigEndian.PutUint32(buf[4:8], 0)
				syscall.Write(nbd.socket, buf[0:16+x.len])
			case NBD_CMD_WRITE:
				n, _ := syscall.Read(nbd.socket, buf[28:28+x.len])
				for uint32(n) < x.len {
					m, _ := syscall.Read(nbd.socket, buf[28+n:28+x.len])
					n += m
				}
				nbd.device.WriteAt(buf[28:28+x.len], int64(x.from))
				binary.BigEndian.PutUint32(buf[0:4], NBD_REPLY_MAGIC)
				binary.BigEndian.PutUint32(buf[4:8], 0)
				syscall.Write(nbd.socket, buf[0:16])
			case NBD_CMD_DISC:
				dlog.Infof("Disconnecting device %s", nbd.devicePath)
				nbd.Disconnect()
				return
			case NBD_CMD_FLUSH:
				fallthrough
			case NBD_CMD_TRIM:
				binary.BigEndian.PutUint32(buf[0:4], NBD_REPLY_MAGIC)
				binary.BigEndian.PutUint32(buf[4:8], 1)
				syscall.Write(nbd.socket, buf[0:16])
			default:
				dlog.Errorf("Unknown command recieved on device %s", nbd.devicePath)
				nbd.Disconnect()
				return
			}
		default:
			dlog.Errorf("Invalid packet command recieved on device %s", nbd.devicePath)
			nbd.Disconnect()
			return
		}
	}
}
Esempio n. 19
0
// Cleanup performs necessary tasks to release resources
// held by the driver, e.g., unmounting all layered filesystems
// known to this driver.
func (d *Driver) Cleanup() error {
	dlog.Infof("Stopping libchainfs at %s", virtPath)
	C.stop_chainfs()
	return nil
}
Esempio n. 20
0
func (d *driver) Create(
	locator *api.VolumeLocator,
	source *api.Source,
	spec *api.VolumeSpec) (string, error) {

	var err error

	project := d.project
	varray := d.varray
	vpool := d.vpool
	initiator := d.initiator

	if name, ok := locator.VolumeLabels["project"]; ok {
		project, err = d.client.Project().
			Name(name).
			Query()
		if err != nil {
			return "", err
		}
	}
	if project == nil {
		return "", ErrProjectRequired
	}

	if name, ok := locator.VolumeLabels["varray"]; ok {
		varray, err = d.client.VArray().
			Name(name).
			Query()
		if err != nil {
			return "", err
		}
	}
	if varray == nil {
		return "", ErrArrayRequired
	}

	if name, ok := locator.VolumeLabels["vpool"]; ok {
		vpool, err = d.client.VPool().
			Name(name).
			Query()
		if err != nil {
			return "", err
		}
	}
	if vpool == nil {
		return "", ErrPoolRequired
	}

	if !vpool.IsBlock() {
		return "", ErrInvalidPool
	}

	// make sure this pool supports the initiator protocol
	if !vpool.HasProtocol(initiator.Protocol) {
		return "", ErrInvalidPort
	}

	sz := spec.Size
	if sz < minVolumeSize {
		sz = minVolumeSize
	}

	vol, err := d.client.Volume().
		Name(locator.Name).
		Project(project.Id).
		Array(varray.Id).
		Pool(vpool.Id).
		Create(sz)
	if err != nil {
		return "", err
	}

	volumeID := strings.ToLower(vol.WWN)

	dlog.Infof("coprhd volume %s created", volumeID)

	volume := common.NewVolume(
		volumeID,
		api.FSType_FS_TYPE_EXT4,
		locator,
		source,
		spec)

	if err := d.UpdateVol(volume); err != nil {
		return "", err
	}

	if _, err := d.Attach(volumeID); err != nil {
		return "", err
	}

	dlog.Infof("coprhd preparing volume %s...", volumeID)

	if err := d.Format(volumeID); err != nil {
		return "", err
	}

	if err := d.Detach(volumeID); err != nil {
		return "", err
	}

	return volumeID, nil
}
Esempio n. 21
0
func (d *driver) Shutdown() {
	dlog.Infof("%s Shutting down", Name)
}