func (d *Driver) CreateVolume(id string, opts map[string]string) error { d.mutex.Lock() defer d.mutex.Unlock() volumeName := opts[convoydriver.OPT_VOLUME_NAME] if volumeName == "" { volumeName = "volume-" + id[:8] } volume := d.blankVolume(id) exists, err := util.ObjectExists(volume) if err != nil { return err } if exists { return fmt.Errorf("volume %v already exists", id) } gVolume := d.gVolumes[d.DefaultVolumePool] volumePath := filepath.Join(gVolume.MountPoint, volumeName) if util.VolumeMountPointDirectoryExists(gVolume, volumeName) { log.Debugf("Found existing volume named %v, reuse it", volumeName) } else if err := util.VolumeMountPointDirectoryCreate(gVolume, volumeName); err != nil { return err } volume.Name = volumeName volume.Path = volumePath volume.VolumePool = gVolume.UUID return util.ObjectSave(volume) }
func (d *Driver) CreateVolume(id string, opts map[string]string) error { d.mutex.Lock() defer d.mutex.Unlock() backupURL := opts[convoydriver.OPT_BACKUP_URL] if backupURL != "" { objVolume, err := objectstore.LoadVolume(backupURL) if err != nil { return err } if objVolume.Driver != d.Name() { return fmt.Errorf("Cannot restore backup of %v to %v", objVolume.Driver, d.Name()) } } volumeName := opts[convoydriver.OPT_VOLUME_NAME] if volumeName == "" { volumeName = "volume-" + id[:8] } volume := d.blankVolume(id) exists, err := util.ObjectExists(volume) if err != nil { return err } if exists { return fmt.Errorf("volume %v already exists", id) } volume.PrepareForVM, err = strconv.ParseBool(opts[convoydriver.OPT_PREPARE_FOR_VM]) if err != nil { return err } if volume.PrepareForVM { volume.Size, err = d.getSize(opts, d.DefaultVolumeSize) if err != nil { return err } } volumePath := filepath.Join(d.Path, volumeName) if err := util.MkdirIfNotExists(volumePath); err != nil { return err } volume.Path = volumePath volume.Snapshots = make(map[string]Snapshot) if backupURL != "" { file, err := objectstore.RestoreSingleFileBackup(backupURL, volumePath) if err != nil { return err } // file would be removed after this because it's under volumePath if err := util.DecompressDir(file, volumePath); err != nil { return err } } return util.ObjectSave(volume) }
func Init(root string, config map[string]string) (convoydriver.ConvoyDriver, error) { dev := &Device{ Root: root, } exists, err := util.ObjectExists(dev) if err != nil { return nil, err } if exists { if err := util.ObjectLoad(dev); err != nil { return nil, err } } else { if err := util.MkdirIfNotExists(root); err != nil { return nil, err } path := config[VFS_PATH] if path == "" { return nil, fmt.Errorf("VFS driver base path unspecified") } if err := util.MkdirIfNotExists(path); err != nil { return nil, err } dev = &Device{ Root: root, Path: path, } if _, exists := config[VFS_DEFAULT_VOLUME_SIZE]; !exists { config[VFS_DEFAULT_VOLUME_SIZE] = DEFAULT_VOLUME_SIZE } volumeSize, err := util.ParseSize(config[VFS_DEFAULT_VOLUME_SIZE]) if err != nil || volumeSize == 0 { return nil, fmt.Errorf("Illegal default volume size specified") } dev.DefaultVolumeSize = volumeSize } // For upgrade case if dev.DefaultVolumeSize == 0 { dev.DefaultVolumeSize, err = util.ParseSize(DEFAULT_VOLUME_SIZE) if err != nil || dev.DefaultVolumeSize == 0 { return nil, fmt.Errorf("Illegal default volume size specified") } } if err := util.ObjectSave(dev); err != nil { return nil, err } d := &Driver{ mutex: &sync.RWMutex{}, Device: *dev, } return d, nil }
func Init(root string, config map[string]string) (convoydriver.ConvoyDriver, error) { ebsService, err := NewEBSService() if err != nil { return nil, err } dev := &Device{ Root: root, } exists, err := util.ObjectExists(dev) if err != nil { return nil, err } if exists { if err := util.ObjectLoad(dev); err != nil { return nil, err } } else { if err := util.MkdirIfNotExists(root); err != nil { return nil, err } if config[EBS_DEFAULT_VOLUME_SIZE] == "" { config[EBS_DEFAULT_VOLUME_SIZE] = DEFAULT_VOLUME_SIZE } size, err := util.ParseSize(config[EBS_DEFAULT_VOLUME_SIZE]) if err != nil { return nil, err } if config[EBS_DEFAULT_VOLUME_TYPE] == "" { config[EBS_DEFAULT_VOLUME_TYPE] = DEFAULT_VOLUME_TYPE } volumeType := config[EBS_DEFAULT_VOLUME_TYPE] if err := checkVolumeType(volumeType); err != nil { return nil, err } dev = &Device{ Root: root, DefaultVolumeSize: size, DefaultVolumeType: volumeType, } if err := util.ObjectSave(dev); err != nil { return nil, err } } d := &Driver{ mutex: &sync.RWMutex{}, ebsService: ebsService, Device: *dev, } if err := d.remountVolumes(); err != nil { return nil, err } return d, nil }
func (d *Driver) CreateVolume(id string, opts map[string]string) error { d.mutex.Lock() defer d.mutex.Unlock() volumeName := opts[convoydriver.OPT_VOLUME_NAME] if volumeName == "" { volumeName = "volume-" + id[:8] } volume := d.blankVolume(id) exists, err := util.ObjectExists(volume) if err != nil { return err } if exists { return fmt.Errorf("volume %v already exists", id) } volume.PrepareForVM, err = strconv.ParseBool(opts[convoydriver.OPT_PREPARE_FOR_VM]) if err != nil { return err } if volume.PrepareForVM { volume.Size, err = d.getSize(opts, d.DefaultVolumeSize) if err != nil { return err } } gVolume := d.gVolumes[d.DefaultVolumePool] volumePath := filepath.Join(gVolume.MountPoint, volumeName) if util.VolumeMountPointFileExists(gVolume, volumeName, util.FILE_TYPE_DIRECTORY) { log.Debugf("Found existing volume named %v, reuse it", volumeName) } else if err := util.VolumeMountPointDirectoryCreate(gVolume, volumeName); err != nil { return err } volume.Name = volumeName volume.Path = volumePath volume.VolumePool = gVolume.UUID return util.ObjectSave(volume) }
func (d *Driver) CreateVolume(req Request) error { d.mutex.Lock() defer d.mutex.Unlock() id := req.Name opts := req.Options volume := d.blankVolume(id) exists, err := util.ObjectExists(volume) if err != nil { return err } if exists { return fmt.Errorf("volume %v already exists", id) } volume.PrepareForVM, err = strconv.ParseBool(opts[OPT_PREPARE_FOR_VM]) if err != nil { return err } if volume.PrepareForVM { volume.Size, err = d.getSize(opts, d.DefaultVolumeSize) if err != nil { return err } } gVolume := d.gVolumes[d.DefaultVolumePool] volumePath := filepath.Join(gVolume.MountPoint, id) if util.VolumeMountPointFileExists(gVolume, id, util.FILE_TYPE_DIRECTORY) { log.Debugf("Found existing volume named %v, reuse it", id) } else if err := util.VolumeMountPointDirectoryCreate(gVolume, id); err != nil { return err } volume.Name = id volume.Path = volumePath volume.VolumePool = gVolume.UUID volume.CreatedTime = util.Now() return util.ObjectSave(volume) }
func Init(root string, config map[string]string) (convoydriver.ConvoyDriver, error) { dev := &Device{ Root: root, } exists, err := util.ObjectExists(dev) if err != nil { return nil, err } if exists { if err := util.ObjectLoad(dev); err != nil { return nil, err } } else { if err := util.MkdirIfNotExists(root); err != nil { return nil, err } path := config[VFS_PATH] if path == "" { return nil, fmt.Errorf("VFS driver base path unspecified") } if err := util.MkdirIfNotExists(path); err != nil { return nil, err } dev = &Device{ Root: root, Path: path, } if err := util.ObjectSave(dev); err != nil { return nil, err } } d := &Driver{ mutex: &sync.RWMutex{}, Device: *dev, } return d, nil }
func Init(root string, config map[string]string) (ConvoyDriver, error) { dev := &Device{ Root: root, } exists, err := util.ObjectExists(dev) if err != nil { return nil, err } if exists { if err := util.ObjectLoad(dev); err != nil { return nil, err } } else { if err := util.MkdirIfNotExists(root); err != nil { return nil, err } serverList := config[GLUSTERFS_SERVERS] if serverList == "" { return nil, fmt.Errorf("Missing required parameter: %v", GLUSTERFS_SERVERS) } servers := strings.Split(serverList, ",") for _, server := range servers { if !util.ValidNetworkAddr(server) { return nil, fmt.Errorf("Invalid or unsolvable address: %v", server) } } defaultVolumePool := config[GLUSTERFS_DEFAULT_VOLUME_POOL] if defaultVolumePool == "" { return nil, fmt.Errorf("Missing required parameter: %v", GLUSTERFS_DEFAULT_VOLUME_POOL) } if _, exists := config[GLUSTERFS_DEFAULT_VOLUME_SIZE]; !exists { config[GLUSTERFS_DEFAULT_VOLUME_SIZE] = DEFAULT_VOLUME_SIZE } volumeSize, err := util.ParseSize(config[GLUSTERFS_DEFAULT_VOLUME_SIZE]) if err != nil || volumeSize == 0 { return nil, fmt.Errorf("Illegal default volume size specified") } dev.DefaultVolumeSize = volumeSize dev = &Device{ Root: root, Servers: servers, DefaultVolumePool: defaultVolumePool, } } // For upgrade case if dev.DefaultVolumeSize == 0 { dev.DefaultVolumeSize, err = util.ParseSize(DEFAULT_VOLUME_SIZE) if err != nil || dev.DefaultVolumeSize == 0 { return nil, fmt.Errorf("Illegal default volume size specified") } } d := &Driver{ mutex: &sync.RWMutex{}, gVolumes: map[string]*GlusterFSVolume{}, Device: *dev, } gVolume := &GlusterFSVolume{ UUID: dev.DefaultVolumePool, Servers: dev.Servers, configPath: d.Root, } // We would always mount the default volume pool // TODO: Also need to mount any existing volume's pool if _, err := util.VolumeMount(gVolume, "", true); err != nil { return nil, err } d.gVolumes[d.DefaultVolumePool] = gVolume if err := util.ObjectSave(dev); err != nil { return nil, err } return d, nil }
func (d *Driver) CreateVolume(id string, opts map[string]string) error { var ( size int64 err error ) backupURL := opts[convoydriver.OPT_BACKUP_URL] if backupURL != "" { objVolume, err := objectstore.LoadVolume(backupURL) if err != nil { return err } if objVolume.Driver != d.Name() { return fmt.Errorf("Cannot restore backup of %v to %v", objVolume.Driver, d.Name()) } size, err = d.getSize(opts, objVolume.Size) if err != nil { return err } if size != objVolume.Size { return fmt.Errorf("Volume size must match with backup's size") } } else { size, err = d.getSize(opts, d.DefaultVolumeSize) if err != nil { return err } } if size%(d.ThinpoolBlockSize*SECTOR_SIZE) != 0 { return fmt.Errorf("Size must be multiple of block size") } volume := d.blankVolume(id) exists, err := util.ObjectExists(volume) if err != nil { return err } if exists { return generateError(logrus.Fields{ LOG_FIELD_VOLUME: id, }, "Already has volume with specific uuid") } devID, err := d.allocateDevID() if err != nil { return err } log.WithFields(logrus.Fields{ LOG_FIELD_REASON: LOG_REASON_START, LOG_FIELD_EVENT: LOG_EVENT_CREATE, LOG_FIELD_OBJECT: LOG_OBJECT_VOLUME, LOG_FIELD_VOLUME: id, DM_LOG_FIELD_VOLUME_DEVID: devID, }).Debugf("Creating volume") err = devicemapper.CreateDevice(d.ThinpoolDevice, devID) if err != nil { return err } log.WithFields(logrus.Fields{ LOG_FIELD_REASON: LOG_REASON_START, LOG_FIELD_EVENT: LOG_EVENT_ACTIVATE, LOG_FIELD_OBJECT: LOG_OBJECT_VOLUME, LOG_FIELD_VOLUME: id, DM_LOG_FIELD_VOLUME_DEVID: devID, }).Debugf("Activating device for volume") err = devicemapper.ActivateDevice(d.ThinpoolDevice, id, devID, uint64(size)) if err != nil { log.WithFields(logrus.Fields{ LOG_FIELD_REASON: LOG_REASON_ROLLBACK, LOG_FIELD_EVENT: LOG_EVENT_REMOVE, LOG_FIELD_OBJECT: LOG_OBJECT_VOLUME, LOG_FIELD_VOLUME: id, DM_LOG_FIELD_VOLUME_DEVID: devID, }).Debugf("Removing device for volume due to fail to activate") if err := devicemapper.DeleteDevice(d.ThinpoolDevice, devID); err != nil { log.WithFields(logrus.Fields{ LOG_FIELD_REASON: LOG_REASON_FAILURE, LOG_FIELD_EVENT: LOG_EVENT_REMOVE, LOG_FIELD_OBJECT: LOG_OBJECT_VOLUME, LOG_FIELD_VOLUME: id, DM_LOG_FIELD_VOLUME_DEVID: devID, }).Debugf("Failed to remove device") } return err } volume.DevID = devID volume.Size = size volume.Snapshots = make(map[string]Snapshot) if err := util.ObjectSave(volume); err != nil { return err } dev, err := d.GetVolumeDevice(id) if err != nil { return err } if backupURL == "" { // format the device if _, err := util.Execute("mkfs", []string{"-t", "ext4", dev}); err != nil { return err } } else { if err := objectstore.RestoreDeltaBlockBackup(backupURL, dev); err != nil { return err } } return nil }
func Init(root string, config map[string]string) (convoydriver.ConvoyDriver, error) { devicemapper.LogInitVerbose(1) devicemapper.LogInit(&DMLogger{}) if err := checkEnvironment(); err != nil { return nil, err } if err := util.MkdirIfNotExists(root); err != nil { return nil, err } dev := &Device{ Root: root, } exists, err := util.ObjectExists(dev) if err != nil { return nil, err } if exists { if err := util.ObjectLoad(dev); err != nil { return nil, err } d := &Driver{ Mutex: &sync.Mutex{}, Device: *dev, } if err := d.activatePool(); err != nil { return nil, err } if err := d.remountVolumes(); err != nil { return nil, err } return d, nil } dev, err = verifyConfig(config) if err != nil { return nil, err } dev.Root = root dataDev, err := os.Open(dev.DataDevice) if err != nil { return nil, err } defer dataDev.Close() metadataDev, err := os.Open(dev.MetadataDevice) if err != nil { return nil, err } defer metadataDev.Close() thinpSize, err := devicemapper.GetBlockDeviceSize(dataDev) if err != nil { return nil, err } dev.ThinpoolSize = int64(thinpSize) dev.LastDevID = 0 if err = createPool(filepath.Base(dev.ThinpoolDevice), dataDev, metadataDev, uint32(dev.ThinpoolBlockSize)); err != nil { return nil, err } if err = util.ObjectSave(dev); err != nil { return nil, err } d := &Driver{ Mutex: &sync.Mutex{}, Device: *dev, } return d, nil }
func (d *Driver) CreateVolume(id string, opts map[string]string) error { var ( err error volumeSize int64 format bool snapshot *Snapshot ) d.mutex.Lock() defer d.mutex.Unlock() volume := d.blankVolume(id) exists, err := util.ObjectExists(volume) if err != nil { return err } if exists { return fmt.Errorf("Volume %v already exists", id) } //EBS volume ID volumeID := opts[convoydriver.OPT_VOLUME_ID] backupURL := opts[convoydriver.OPT_BACKUP_URL] if backupURL != "" && volumeID != "" { return fmt.Errorf("Cannot specify both backup and EBS volume ID") } newTags := map[string]string{ "Name": opts[convoydriver.OPT_VOLUME_NAME], "ConvoyVolumeUUID": id, } if volumeID != "" { ebsVolume, err := d.ebsService.GetVolume(volumeID) if err != nil { return err } volumeSize = *ebsVolume.Size * GB log.Debugf("Found EBS volume %v for volume %v, update tags", volumeID, id) if err := d.ebsService.AddTags(volumeID, newTags); err != nil { log.Debugf("Failed to update tags for volume %v, but continue", volumeID) } } else if backupURL != "" { region, ebsSnapshotID, err := decodeURL(backupURL) if err != nil { return err } if region != d.ebsService.Region { // We don't want to automatically copy snapshot here // because it's way too time consuming. return fmt.Errorf("Snapshot %v is at %v rather than current region %v. Copy snapshot is needed", ebsSnapshotID, region, d.ebsService.Region) } if err := d.ebsService.WaitForSnapshotComplete(ebsSnapshotID); err != nil { return err } log.Debugf("Snapshot %v is ready", ebsSnapshotID) ebsSnapshot, err := d.ebsService.GetSnapshot(ebsSnapshotID) if err != nil { return err } snapshot = &Snapshot{ UUID: uuid.New(), VolumeUUID: id, EBSID: ebsSnapshotID, } snapshotVolumeSize := *ebsSnapshot.VolumeSize * GB volumeSize, err = d.getSize(opts, snapshotVolumeSize) if err != nil { return err } if volumeSize < snapshotVolumeSize { return fmt.Errorf("Volume size cannot be less than snapshot size %v", snapshotVolumeSize) } volumeType, iops, err := d.getTypeAndIOPS(opts) if err != nil { return err } r := &CreateEBSVolumeRequest{ Size: volumeSize, SnapshotID: ebsSnapshotID, VolumeType: volumeType, IOPS: iops, Tags: newTags, } volumeID, err = d.ebsService.CreateVolume(r) if err != nil { return err } log.Debugf("Created volume %v from EBS snapshot %v", id, ebsSnapshotID) } else { // Create a new EBS volume volumeSize, err = d.getSize(opts, d.DefaultVolumeSize) if err != nil { return err } volumeType, iops, err := d.getTypeAndIOPS(opts) if err != nil { return err } r := &CreateEBSVolumeRequest{ Size: volumeSize, VolumeType: volumeType, IOPS: iops, Tags: newTags, } volumeID, err = d.ebsService.CreateVolume(r) if err != nil { return err } log.Debugf("Created volume %v from EBS volume %v", id, volumeID) format = true } dev, err := d.ebsService.AttachVolume(volumeID, volumeSize) if err != nil { return err } log.Debugf("Attached EBS volume %v to %v", volumeID, dev) volume.EBSID = volumeID volume.Device = dev volume.Snapshots = make(map[string]Snapshot) if snapshot != nil { volume.Snapshots[snapshot.UUID] = *snapshot } // We don't format existing or snapshot restored volume if format { if _, err := util.Execute("mkfs", []string{"-t", "ext4", dev}); err != nil { return err } } return util.ObjectSave(volume) }
// Start the daemon func Start(sockFile string, c *cli.Context) error { var err error if err = daemonEnvironmentSetup(c); err != nil { return err } defer environmentCleanup() root := c.String("root") s := &daemon{ ConvoyDrivers: make(map[string]convoydriver.ConvoyDriver), } config := &daemonConfig{ Root: root, } exists, err := util.ObjectExists(config) if err != nil { return err } if exists { log.Debug("Found existing config. Ignoring command line opts, loading config from ", root) if err := util.ObjectLoad(config); err != nil { return err } } else { fd := c.String("mnt-ns") if fd != "" { if _, err := os.Stat(fd); err != nil { return fmt.Errorf("Cannot find mount namespace fd %v", fd) } config.MountNamespaceFD = fd } driverList := c.StringSlice("drivers") if len(driverList) == 0 { return fmt.Errorf("Missing or invalid parameters") } log.Debug("Creating config at ", root) config.DriverList = driverList config.DefaultDriver = driverList[0] } s.daemonConfig = *config if err := util.InitMountNamespace(s.MountNamespaceFD); err != nil { return err } // driverOpts would be ignored by Convoy Drivers if config already exists driverOpts := util.SliceToMap(c.StringSlice("driver-opts")) if err := s.initDrivers(driverOpts); err != nil { return err } if err := s.finializeInitialization(); err != nil { return err } if err := util.ObjectSave(config); err != nil { return err } s.Router = createRouter(s) if err := util.MkdirIfNotExists(filepath.Dir(sockFile)); err != nil { return err } // This should be safe because lock file prevent starting daemon twice if _, err := os.Stat(sockFile); err == nil { log.Warnf("Remove previous sockfile at %v", sockFile) if err := os.Remove(sockFile); err != nil { return err } } l, err := net.Listen("unix", sockFile) if err != nil { fmt.Println("listen err", err) return err } defer l.Close() sigs := make(chan os.Signal, 1) done := make(chan bool, 1) signal.Notify(sigs, os.Interrupt, os.Kill, syscall.SIGTERM) go func() { sig := <-sigs fmt.Printf("Caught signal %s: shutting down.\n", sig) done <- true }() go func() { err = http.Serve(l, s.Router) if err != nil { log.Error("http server error", err.Error()) } done <- true }() <-done return nil }
func Init(root string, config map[string]string) (convoydriver.ConvoyDriver, error) { dev := &Device{ Root: root, } exists, err := util.ObjectExists(dev) if err != nil { return nil, err } if exists { if err := util.ObjectLoad(dev); err != nil { return nil, err } dev.RancherURL = override(dev.RancherURL, config[LH_RANCHER_URL]) dev.RancherAccessKey = override(dev.RancherAccessKey, config[LH_RANCHER_ACCESS_KEY]) dev.RancherSecretKey = override(dev.RancherSecretKey, config[LH_RANCHER_SECRET_KEY]) } else { if err := util.MkdirIfNotExists(root); err != nil { return nil, err } url := config[LH_RANCHER_URL] accessKey := config[LH_RANCHER_ACCESS_KEY] secretKey := config[LH_RANCHER_SECRET_KEY] if url == "" || accessKey == "" || secretKey == "" { return nil, fmt.Errorf("Missing required parameter. lh.rancherurl or lh.rancheraccesskey or lh.ranchersecretkey") } if _, exists := config[LH_DEFAULT_VOLUME_SIZE]; !exists { config[LH_DEFAULT_VOLUME_SIZE] = DEFAULT_VOLUME_SIZE } volumeSize, err := util.ParseSize(config[LH_DEFAULT_VOLUME_SIZE]) if err != nil || volumeSize == 0 { return nil, fmt.Errorf("Illegal default volume size specified") } dev = &Device{ Root: root, RancherURL: url, RancherAccessKey: accessKey, RancherSecretKey: secretKey, DefaultVolumeSize: volumeSize, } } containerName := config[LH_CONTAINER_NAME] if containerName == "" { handler := metadata.NewClient(RANCHER_METADATA_URL) container, err := handler.GetSelfContainer() if err != nil { return nil, err } containerName = container.UUID } log.Debugf("Try to connect to Rancher server at %s [%s:%s]", dev.RancherURL, dev.RancherAccessKey, dev.RancherSecretKey) client, err := rancherClient.NewRancherClient(&rancherClient.ClientOpts{ Url: dev.RancherURL, AccessKey: dev.RancherAccessKey, SecretKey: dev.RancherSecretKey, }) if err != nil { return nil, fmt.Errorf("Failed to establish connection to Rancher server") } if err := util.ObjectSave(dev); err != nil { return nil, err } d := &Driver{ client: client, containerName: containerName, Device: *dev, } return d, nil }
// Start the daemon func Start(sockFile string, c *cli.Context) error { var err error if err = daemonEnvironmentSetup(c); err != nil { return err } defer environmentCleanup() root := c.String("root") s := &daemon{ ConvoyDrivers: make(map[string]convoydriver.ConvoyDriver), } config := &daemonConfig{ Root: root, } exists, err := util.ObjectExists(config) if err != nil { return err } driverOpts := util.SliceToMap(c.StringSlice("driver-opts")) if exists { log.Debug("Found existing config. Ignoring command line opts, loading config from ", root) if err := util.ObjectLoad(config); err != nil { return err } } else { driverList := c.StringSlice("drivers") if len(driverList) == 0 || driverOpts == nil { return fmt.Errorf("Missing or invalid parameters") } log.Debug("Creating config at ", root) config.DriverList = driverList config.DefaultDriver = driverList[0] } s.daemonConfig = *config if err := s.initDrivers(driverOpts); err != nil { return err } if err := s.finializeInitialization(); err != nil { return err } if err := util.ObjectSave(config); err != nil { return err } s.Router = createRouter(s) if err := util.MkdirIfNotExists(filepath.Dir(sockFile)); err != nil { return err } l, err := net.Listen("unix", sockFile) if err != nil { fmt.Println("listen err", err) return err } defer l.Close() sigs := make(chan os.Signal, 1) done := make(chan bool, 1) signal.Notify(sigs, os.Interrupt, os.Kill, syscall.SIGTERM) go func() { sig := <-sigs fmt.Printf("Caught signal %s: shutting down.\n", sig) done <- true }() go func() { err = http.Serve(l, s.Router) if err != nil { log.Error("http server error", err.Error()) } done <- true }() <-done return nil }
func Init(root string, config map[string]string) (convoydriver.ConvoyDriver, error) { dev := &Device{ Root: root, } exists, err := util.ObjectExists(dev) if err != nil { return nil, err } if exists { if err := util.ObjectLoad(dev); err != nil { return nil, err } } else { if err := util.MkdirIfNotExists(root); err != nil { return nil, err } stack := config[GLUSTERFS_RANCHER_STACK] if stack == "" { return nil, fmt.Errorf("Missing required parameter: %v", GLUSTERFS_RANCHER_STACK) } service := config[GLUSTERFS_RANCHER_GLUSTER_SERVICE] if service == "" { return nil, fmt.Errorf("Missing required parameter: %v", GLUSTERFS_RANCHER_GLUSTER_SERVICE) } defaultVolumePool := config[GLUSTERFS_DEFAULT_VOLUME_POOL] if defaultVolumePool == "" { return nil, fmt.Errorf("Missing required parameter: %v", GLUSTERFS_DEFAULT_VOLUME_POOL) } dev = &Device{ Root: root, RancherStack: stack, RancherService: service, DefaultVolumePool: defaultVolumePool, } } serverIPs, err := rancher.GetIPsForServiceInStack(dev.RancherService, dev.RancherStack) if err != nil { return nil, err } d := &Driver{ mutex: &sync.RWMutex{}, gVolumes: map[string]*GlusterFSVolume{}, Device: *dev, } gVolume := &GlusterFSVolume{ UUID: dev.DefaultVolumePool, ServerIPs: serverIPs, configPath: d.Root, } // We would always mount the default volume pool // TODO: Also need to mount any existing volume's pool if _, err := util.VolumeMount(gVolume, "", true); err != nil { return nil, err } d.gVolumes[d.DefaultVolumePool] = gVolume if err := util.ObjectSave(dev); err != nil { return nil, err } return d, nil }