func NewVolumeDeleteCommand(options *Options) *VolumeDeleteCommand { godbc.Require(options != nil) cmd := &VolumeDeleteCommand{} cmd.name = "delete" cmd.options = options cmd.flags = flag.NewFlagSet(cmd.name, flag.ExitOnError) //usage on -help cmd.flags.Usage = func() { fmt.Println(` Deletes the volume USAGE heketi-cli [options] volume delete [id] Where "id" is the id of the volume to be deleted EXAMPLE $ heketi-cli volume delete 886a86a868711bef83001 `) } godbc.Ensure(cmd.flags != nil) godbc.Ensure(cmd.name == "delete") return cmd }
// ASU1 + ASU2 + ASU3 = X // ASU1 is 45% of X // ASU2 is 45% of X // ASU3 is 10% of X // Call this function after all ASUs are opened func (s *SpcInfo) adjustAsuSizes() error { godbc.Require(s.asus[ASU1].len != 0) godbc.Require(s.asus[ASU2].len != 0) godbc.Require(s.asus[ASU3].len != 0) // lets start making user ASU1 and ASU2 are equal if s.asus[ASU1].len > s.asus[ASU2].len { s.asus[ASU1].len = s.asus[ASU2].len } else { s.asus[ASU2].len = s.asus[ASU1].len } // Now we need to adjust ASU3 asu3_correct_size := uint32(float64(2*s.asus[ASU1].len) / 9) if asu3_correct_size > s.asus[ASU3].len { return fmt.Errorf("\nASU3 size is too small: %v KB.\n"+ "It must be bigger than 1/9 of 2*ASU1,\n"+ "or %v KB for this configuration\n", s.asus[ASU3].len*4, asu3_correct_size*4) } else { s.asus[ASU3].len = asu3_correct_size } godbc.Ensure(s.asus[ASU1].len != 0) godbc.Ensure(s.asus[ASU2].len != 0) godbc.Ensure(s.asus[ASU3].len != 0, asu3_correct_size) return nil }
func NewKubeExecutor(config *KubeConfig) (*KubeExecutor, error) { // Override configuration setWithEnvVariables(config) // Initialize k := &KubeExecutor{} k.config = config k.Throttlemap = make(map[string]chan bool) k.RemoteExecutor = k if k.config.Fstab == "" { k.Fstab = "/etc/fstab" } else { k.Fstab = config.Fstab } // Check required values if k.config.Namespace == "" { return nil, fmt.Errorf("Namespace must be provided in configuration") } godbc.Ensure(k != nil) godbc.Ensure(k.Fstab != "") return k, nil }
func NewClusterInfoCommand(options *Options) *ClusterInfoCommand { godbc.Require(options != nil) cmd := &ClusterInfoCommand{} cmd.name = "info" cmd.options = options cmd.flags = flag.NewFlagSet(cmd.name, flag.ExitOnError) //usage on -help cmd.flags.Usage = func() { fmt.Println(` Retreives information about the cluster USAGE heketi-cli [options] cluster info [id] Where "id" is the id of the cluster EXAMPLE $ heketi-cli cluster info 886a86a868711bef83001 `) } godbc.Ensure(cmd.flags != nil) godbc.Ensure(cmd.name == "info") return cmd }
func NewLogger(prefix string, level LogLevel) *Logger { godbc.Require(level >= 0, level) godbc.Require(level <= LEVEL_DEBUG, level) l := &Logger{} if level == LEVEL_NOLOG { l.level = LEVEL_DEBUG } else { l.level = level } l.critlog = log.New(stderr, prefix+" CRITICAL ", log.LstdFlags) l.errorlog = log.New(stderr, prefix+" ERROR ", log.LstdFlags) l.warninglog = log.New(stdout, prefix+" WARNING ", log.LstdFlags) l.infolog = log.New(stdout, prefix+" INFO ", log.LstdFlags) l.debuglog = log.New(stdout, prefix+" DEBUG ", log.LstdFlags) godbc.Ensure(l.critlog != nil) godbc.Ensure(l.errorlog != nil) godbc.Ensure(l.warninglog != nil) godbc.Ensure(l.infolog != nil) godbc.Ensure(l.debuglog != nil) return l }
func (c *ClusterEntry) StorageAdd(amount uint64) { c.Info.Storage.Free += amount c.Info.Storage.Total += amount godbc.Ensure(c.Info.Storage.Free >= 0) godbc.Ensure(c.Info.Storage.Used >= 0) godbc.Ensure(c.Info.Storage.Total >= 0) }
func (c *ClusterEntry) StorageAllocate(amount uint64) { c.Info.Storage.Free -= amount c.Info.Storage.Used += amount c.Info.Storage.Total -= amount godbc.Ensure(c.Info.Storage.Free >= 0) godbc.Ensure(c.Info.Storage.Used >= 0) godbc.Ensure(c.Info.Storage.Total >= 0) }
// Registers that the handler has completed and no data needs to be returned func (h *AsyncHttpHandler) Completed() { h.manager.lock.RLock() defer h.manager.lock.RUnlock() godbc.Require(h.completed == false) h.completed = true godbc.Ensure(h.completed == true) godbc.Ensure(h.location == "") godbc.Ensure(h.err == nil) }
func NewDeviceAddCommand(options *Options) *DeviceAddCommand { godbc.Require(options != nil) cmd := &DeviceAddCommand{} cmd.name = "add" cmd.options = options cmd.flags = flag.NewFlagSet(cmd.name, flag.ExitOnError) cmd.flags.StringVar(&cmd.device, "name", "", "Name of device to add") cmd.flags.StringVar(&cmd.nodeId, "node", "", "Id of the node which has this device") //usage on -help cmd.flags.Usage = func() { fmt.Println(` Add new device to node to be managed by Heketi USAGE heketi-cli device add [options] OPTIONS`) //print flags cmd.flags.PrintDefaults() fmt.Println(` EXAMPLES $ heketi-cli device add \ -name=/dev/sdb -node=3e098cb4407d7109806bb196d9e8f095 `) } godbc.Ensure(cmd.name == "add") return cmd }
func NewClusterCreateCommand(options *Options) *ClusterCreateCommand { godbc.Require(options != nil) cmd := &ClusterCreateCommand{} cmd.name = "create" cmd.options = options // Set flags cmd.flags = flag.NewFlagSet(cmd.name, flag.ExitOnError) //usage on -help cmd.flags.Usage = func() { fmt.Println(` Create a cluster A cluster is used to group a collection of nodes. It also provides the caller with the choice to specify clusters where volumes should be created. USAGE heketi-cli [options] cluster create EXAMPLE $ heketi-cli cluster create `) } godbc.Ensure(cmd.name == "create") return cmd }
func NewVolumeExpandCommand(options *Options) *VolumeExpandCommand { godbc.Require(options != nil) cmd := &VolumeExpandCommand{} cmd.name = "expand" cmd.options = options cmd.flags = flag.NewFlagSet(cmd.name, flag.ExitOnError) cmd.flags.IntVar(&cmd.expand_size, "expand-size", -1, "\n\tAmount in GB to add to the volume") cmd.flags.StringVar(&cmd.id, "volume", "", "\n\tId of volume to expand") //usage on -help cmd.flags.Usage = func() { fmt.Println(` Expand a volume USAGE heketi-cli volume expand [options] OPTIONS`) //print flags cmd.flags.PrintDefaults() fmt.Println(` EXAMPLES * Add 10GB to a volume $ heketi-cli volume expand -volume=60d46d518074b13a04ce1022c8c7193c -expand-size=10 `) } godbc.Ensure(cmd.name == "expand") return cmd }
func NewNodeDestroyCommand(options *Options) *NodeDestroyCommand { godbc.Require(options != nil) cmd := &NodeDestroyCommand{} cmd.name = "delete" cmd.options = options cmd.flags = flag.NewFlagSet(cmd.name, flag.ExitOnError) //usage on -help cmd.flags.Usage = func() { fmt.Println(` Deletes a node from Heketi management USAGE heketi-cli [options] node delete [id] Where "id" is the id of the cluster EXAMPLE $ heketi-cli node delete 886a86a868711bef83001 `) } godbc.Ensure(cmd.name == "delete") return cmd }
//function to create new node command func NewNodeCommand(options *Options) *NodeCommand { godbc.Require(options != nil) cmd := &NodeCommand{} cmd.name = "node" cmd.options = options cmd.cmds = Commands{ NewNodeAddCommand(options), NewNodeInfoCommand(options), NewNodeDestroyCommand(options), } cmd.flags = flag.NewFlagSet(cmd.name, flag.ExitOnError) //usage on -help cmd.flags.Usage = func() { fmt.Println(` Heketi node management USAGE heketi-cli [options] node [commands] COMMANDS add Adds a node for Heketi to manage. info Returns information about a specific node. delete Delete node with specified id. Use "heketi-cli node [command] -help" for more information about a command `) } godbc.Ensure(cmd.name == "node") return cmd }
func NewVolumeListCommand(options *Options) *VolumeListCommand { godbc.Require(options != nil) cmd := &VolumeListCommand{} cmd.name = "list" cmd.options = options cmd.flags = flag.NewFlagSet(cmd.name, flag.ExitOnError) //usage on -help cmd.flags.Usage = func() { fmt.Println(` Lists the volumes managed by Heketi USAGE heketi-cli [options] volume list EXAMPLE $ heketi-cli volume list `) } godbc.Ensure(cmd.name == "list") return cmd }
func (m *Message) Add(child *Message) { godbc.Require(child.parent == nil, child) m.wg.Add(1) child.parent = m godbc.Ensure(child.parent == m) }
//function to create new cluster command func NewClusterCommand(options *Options) *ClusterCommand { //require before we do any work godbc.Require(options != nil) //create ClusterCommand object cmd := &ClusterCommand{} cmd.name = "cluster" cmd.options = options //setup subcommands cmd.cmds = Commands{ NewClusterCreateCommand(options), NewClusterInfoCommand(options), NewClusterListCommand(options), NewClusterDestroyCommand(options), } //create flags cmd.flags = flag.NewFlagSet(cmd.name, flag.ExitOnError) //usage on -help cmd.flags.Usage = func() { fmt.Println(` Heketi cluster management USAGE heketi-cli [options] cluster [commands] COMMANDS create Creates a new cluster for Heketi to manage. list Returns a list of all clusters info Returns information about a specific cluster. delete Delete a cluster Use "heketi-cli cluster [command] -help" for more information about a command `) } //ensure before we return godbc.Ensure(cmd.flags != nil) godbc.Ensure(cmd.name == "cluster") return cmd }
func (b *BrickEntry) Create(db *bolt.DB, executor executors.Executor) error { godbc.Require(db != nil) godbc.Require(b.TpSize > 0) godbc.Require(b.Info.Size > 0) // Get node hostname var host string err := db.View(func(tx *bolt.Tx) error { node, err := NewNodeEntryFromId(tx, b.Info.NodeId) if err != nil { return err } host = node.ManageHostName() godbc.Check(host != "") return nil }) if err != nil { return err } // Create request req := &executors.BrickRequest{} req.Name = b.Info.Id req.Size = b.Info.Size req.TpSize = b.TpSize req.VgId = b.Info.DeviceId req.PoolMetadataSize = b.PoolMetadataSize // Create brick on node logger.Info("Creating brick %v", b.Info.Id) info, err := executor.BrickCreate(host, req) if err != nil { return err } b.Info.Path = info.Path b.State = BRICK_STATE_ONLINE godbc.Ensure(b.Info.Path != "") godbc.Ensure(b.State == BRICK_STATE_ONLINE) return nil }
func (a *Asu) Open(filename string) error { godbc.Require(filename != "") // Set the appropriate flags flags := os.O_RDWR | os.O_EXCL if a.usedirectio { flags |= cache.OSSYNC } // Open the file //fp, err := os.OpenFile(filename, flags, os.ModePerm) fp, err := openFile(filename, flags, os.ModePerm) if err != nil { return err } // Get storage size var size int64 size, err = fp.Seek(0, os.SEEK_END) if err != nil { return err } if size == 0 { return fmt.Errorf("Size of %s cannot be zero", filename) } // Check max size for all fps in this asu if a.fpsize == 0 || a.fpsize > size { a.fpsize = size } // Append to ASU a.fps = append(a.fps, fp) a.len = uint32(a.fpsize/(4*KB)) * uint32(len(a.fps)) godbc.Ensure(a.len > 0, a.len) godbc.Ensure(len(a.fps) > 0) godbc.Ensure(a.fpsize > 0) return nil }
// Registers that the handler has completed with an error func (h *AsyncHttpHandler) CompletedWithError(err error) { h.manager.lock.RLock() defer h.manager.lock.RUnlock() godbc.Require(h.completed == false) h.err = err h.completed = true godbc.Ensure(h.completed == true) }
func NewCacheMap(blocks, blocksize uint32, pipeline chan *message.Message) *CacheMap { godbc.Require(blocks > 0) godbc.Require(pipeline != nil) cache := &CacheMap{} cache.blocks = blocks cache.pipeline = pipeline cache.blocksize = blocksize cache.stats = &cachestats{} cache.bda = NewBlockDescriptorArray(cache.blocks) cache.addressmap = make(map[uint64]uint32) godbc.Ensure(cache.blocks > 0) godbc.Ensure(cache.bda != nil) godbc.Ensure(cache.addressmap != nil) godbc.Ensure(cache.stats != nil) return cache }
func NewSshExecutor(config *SshConfig) *SshExecutor { godbc.Require(config != nil) godbc.Require(DEFAULT_MAX_CONNECTIONS > 1) s := &SshExecutor{} s.throttlemap = make(map[string]chan bool) // Set configuration if config.PrivateKeyFile == "" { s.private_keyfile = os.Getenv("HOME") + "/.ssh/id_rsa" } else { s.private_keyfile = config.PrivateKeyFile } if config.User == "" { s.user = "******" } else { s.user = config.User } s.config = config // Show experimental settings if s.config.RebalanceOnExpansion { logger.Warning("Rebalance on volume expansion has been enabled. This is an EXPERIMENTAL feature") } // Setup key s.exec = ssh.NewSshExecWithKeyFile(logger, s.user, s.private_keyfile) if s.exec == nil { logger.LogError("Unable to load ssh user and private keyfile") return nil } godbc.Ensure(s != nil) godbc.Ensure(s.config == config) godbc.Ensure(s.user != "") godbc.Ensure(s.private_keyfile != "") return s }
func NewBrickEntry(size, tpsize, poolMetadataSize uint64, deviceid, nodeid string) *BrickEntry { godbc.Require(size > 0) godbc.Require(tpsize > 0) godbc.Require(deviceid != "") godbc.Require(nodeid != "") entry := &BrickEntry{} entry.TpSize = tpsize entry.PoolMetadataSize = poolMetadataSize entry.Info.Id = utils.GenUUID() entry.Info.Size = size entry.Info.NodeId = nodeid entry.Info.DeviceId = deviceid godbc.Ensure(entry.Info.Id != "") godbc.Ensure(entry.TpSize == tpsize) godbc.Ensure(entry.Info.Size == size) godbc.Ensure(entry.Info.NodeId == nodeid) godbc.Ensure(entry.Info.DeviceId == deviceid) return entry }
func NewSshExecutor(config *SshConfig) (*SshExecutor, error) { godbc.Require(config != nil) s := &SshExecutor{} s.RemoteExecutor = s s.Throttlemap = make(map[string]chan bool) // Set configuration if config.PrivateKeyFile == "" { return nil, fmt.Errorf("Missing ssh private key file in configuration") } s.private_keyfile = config.PrivateKeyFile if config.User == "" { s.user = "******" } else { s.user = config.User } if config.Port == "" { s.port = "22" } else { s.port = config.Port } if config.Fstab == "" { s.Fstab = "/etc/fstab" } else { s.Fstab = config.Fstab } // Save the configuration s.config = config // Show experimental settings if s.config.RebalanceOnExpansion { logger.Warning("Rebalance on volume expansion has been enabled. This is an EXPERIMENTAL feature") } // Setup key var err error s.exec, err = sshNew(logger, s.user, s.private_keyfile) if err != nil { logger.Err(err) return nil, err } godbc.Ensure(s != nil) godbc.Ensure(s.config == config) godbc.Ensure(s.user != "") godbc.Ensure(s.private_keyfile != "") godbc.Ensure(s.port != "") godbc.Ensure(s.Fstab != "") return s, nil }
func (b *BrickEntry) Destroy(db *bolt.DB, executor executors.Executor) error { godbc.Require(db != nil) godbc.Require(b.TpSize > 0) godbc.Require(b.Info.Size > 0) if b.State != BRICK_STATE_ONLINE { return nil } // Get node hostname var host string err := db.View(func(tx *bolt.Tx) error { node, err := NewNodeEntryFromId(tx, b.Info.NodeId) if err != nil { return err } host = node.ManageHostName() godbc.Check(host != "") return nil }) if err != nil { return err } // Create request req := &executors.BrickRequest{} req.Name = b.Info.Id req.Size = b.Info.Size req.TpSize = b.TpSize req.VgId = b.Info.DeviceId // Delete brick on node logger.Info("Deleting brick %v", b.Info.Id) err = executor.BrickDestroy(host, req) if err != nil { b.State = BRICK_STATE_FAILED return err } b.State = BRICK_STATE_DELETED godbc.Ensure(b.State == BRICK_STATE_DELETED) return nil }
func (v *VolumeEntry) createVolume(db *bolt.DB, executor executors.Executor, brick_entries []*BrickEntry) error { godbc.Require(db != nil) godbc.Require(brick_entries != nil) // Create a volume request for executor with // the bricks allocated vr, host, err := v.createVolumeRequest(db, brick_entries) if err != nil { return err } // Create the volume _, err = executor.VolumeCreate(host, vr) if err != nil { return err } // Get all brick hosts stringset := utils.NewStringSet() for _, brick := range vr.Bricks { stringset.Add(brick.Host) } hosts := stringset.Strings() v.Info.Mount.GlusterFS.Hosts = hosts // Save volume information v.Info.Mount.GlusterFS.MountPoint = fmt.Sprintf("%v:%v", hosts[0], vr.Name) // Set glusterfs mount volfile-servers options v.Info.Mount.GlusterFS.Options = make(map[string]string) v.Info.Mount.GlusterFS.Options["backup-volfile-servers"] = strings.Join(hosts[1:], ",") godbc.Ensure(v.Info.Mount.GlusterFS.MountPoint != "") return nil }
//function to create new cluster command func NewVolumeCommand(options *Options) *VolumeCommand { godbc.Require(options != nil) cmd := &VolumeCommand{} cmd.name = "volume" cmd.options = options cmd.cmds = Commands{ NewVolumeCreateCommand(options), NewVolumeInfoCommand(options), NewVolumeListCommand(options), NewVolumeDeleteCommand(options), NewVolumeExpandCommand(options), } cmd.flags = flag.NewFlagSet(cmd.name, flag.ExitOnError) //usage on -help cmd.flags.Usage = func() { fmt.Println(` Heketi volume management USAGE heketi-cli [options] volume [commands] COMMANDS create Creates a new volume info Returns information about a specific volume. list List all volumes delete Delete volume expand Expand volume Use "heketi-cli volume [command] -help" for more information about a command `) } godbc.Ensure(cmd.name == "volume") return cmd }
func NewNodeAddCommand(options *Options) *NodeAddCommand { godbc.Require(options != nil) cmd := &NodeAddCommand{} cmd.name = "add" cmd.options = options cmd.flags = flag.NewFlagSet(cmd.name, flag.ExitOnError) cmd.flags.IntVar(&cmd.zone, "zone", -1, "The zone in which the node should reside") cmd.flags.StringVar(&cmd.clusterId, "cluster", "", "The cluster in which the node should reside") cmd.flags.StringVar(&cmd.managmentHostNames, "management-host-name", "", "Managment host name") cmd.flags.StringVar(&cmd.storageHostNames, "storage-host-name", "", "Storage host name") //usage on -help cmd.flags.Usage = func() { fmt.Println(` Add new node to be managed by Heketi USAGE heketi-cli node add [options] OPTIONS`) //print flags cmd.flags.PrintDefaults() fmt.Println(` EXAMPLES $ heketi-cli node add \ -zone=3 \ -cluster=3e098cb4407d7109806bb196d9e8f095 \ -managment-host-name=node1-manage.gluster.lab.com \ -storage-host-name=node1-storage.gluster.lab.com `) } godbc.Ensure(cmd.name == "add") return cmd }
func NewLoadCommand(options *Options) *LoadCommand { //require before we do any work godbc.Require(options != nil) //create ClusterCommand object cmd := &LoadCommand{} cmd.name = "load" cmd.options = options //create flags cmd.flags = flag.NewFlagSet(cmd.name, flag.ExitOnError) cmd.flags.StringVar(&cmd.jsonConfigFile, "json", "", "\n\tConfiguration containing devices, nodes, and clusters, in"+ "\n\tJSON format.") //usage on -help cmd.flags.Usage = func() { fmt.Println(` Add devices to Heketi from a configuration file USAGE heketi-cli load [options] OPTIONS`) //print flags cmd.flags.PrintDefaults() fmt.Println(` EXAMPLES $ heketi-cli load -json=topology.json `) } godbc.Ensure(cmd.name == "load") return cmd }
func NewVolumeCreateCommand(options *Options) *VolumeCreateCommand { godbc.Require(options != nil) cmd := &VolumeCreateCommand{} cmd.name = "create" cmd.options = options cmd.flags = flag.NewFlagSet(cmd.name, flag.ExitOnError) cmd.flags.IntVar(&cmd.size, "size", -1, "\n\tSize of volume in GB") cmd.flags.StringVar(&cmd.volname, "name", "", "\n\tOptional: Name of volume. Only set if really necessary") cmd.flags.StringVar(&cmd.durability, "durability", "replicate", "\n\tOptional: Durability type. Values are:"+ "\n\t\tnone: No durability. Distributed volume only."+ "\n\t\treplicate: (Default) Distributed-Replica volume."+ "\n\t\tdisperse: Distributed-Erasure Coded volume.") cmd.flags.IntVar(&cmd.replica, "replica", 3, "\n\tReplica value for durability type 'replicate'."+ "\n\tDefault is 3") cmd.flags.IntVar(&cmd.disperse_data, "disperse-data", 4, "\n\tOptional: Dispersion value for durability type 'disperse'."+ "\n\tDefault is 4") cmd.flags.IntVar(&cmd.redundancy, "redundancy", 2, "\n\tOptional: Redundancy value for durability type 'disperse'."+ "\n\tDefault is 2") cmd.flags.Float64Var(&cmd.snapshot_factor, "snapshot-factor", 1.0, "\n\tOptional: Amount of storage to allocate for snapshot support."+ "\n\tMust be greater 1.0. For example if a 10TiB volume requires 5TiB of"+ "\n\tsnapshot storage, then snapshot-factor would be set to 1.5. If the"+ "\n\tvalue is set to 1, then snapshots will not be enabled for this volume") cmd.flags.StringVar(&cmd.clusters, "clusters", "", "\n\tOptional: Comma separated list of cluster ids where this volume"+ "\n\tmust be allocated. If ommitted, Heketi will allocate the volume"+ "\n\ton any of the configured clusters which have the available space."+ "\n\tProviding a set of clusters will ensure Heketi allocates storage"+ "\n\tfor this volume only in the clusters specified.") //usage on -help cmd.flags.Usage = func() { fmt.Println(` Create a GlusterFS volume USAGE heketi-cli volume create [options] OPTIONS`) //print flags cmd.flags.PrintDefaults() fmt.Println(` EXAMPLES * Create a 100GB replica 3 volume: $ heketi-cli volume create -size=100 * Create a 100GB replica 3 volume specifying two specific clusters: $ heketi-cli volume create -size=100 \ -clusters=0995098e1284ddccb46c7752d142c832,60d46d518074b13a04ce1022c8c7193c * Create a 100GB replica 2 volume with 50GB of snapshot storage: $ heketi-cli volume create -size=100 -snapshot-factor=1.5 -replica=2 * Create a 100GB distributed volume $ heketi-cli volume create -size=100 -durability=none * Create a 100GB erasure coded 4+2 volume with 25GB snapshot storage: $ heketi-cli volume create -size=100 -durability=disperse -snapshot-factor=1.25 * Create a 100GB erasure coded 8+3 volume with 25GB snapshot storage: $ heketi-cli volume create -size=100 -durability=disperse -snapshot-factor=1.25 \ -disperse-data=8 -redundancy=3 `) } godbc.Ensure(cmd.name == "create") return cmd }
func NewLog(logfile string, blocksize, blocks_per_segment, bcsize uint32, usedirectio bool) (*Log, uint32, error) { var err error // Initialize Log log := &Log{} log.stats = &logstats{} log.blocksize = blocksize log.blocks_per_segment = blocks_per_segment log.segmentsize = log.blocks_per_segment * log.blocksize // For DirectIO if usedirectio { log.fp, err = openFile(logfile, OSSYNC|os.O_RDWR|os.O_EXCL, os.ModePerm) } else { log.fp, err = openFile(logfile, os.O_RDWR|os.O_EXCL, os.ModePerm) } if err != nil { return nil, 0, err } // Determine cache size var size int64 size, err = log.fp.Seek(0, os.SEEK_END) if err != nil { return nil, 0, err } if size == 0 { return nil, 0, ErrLogTooSmall } blocks := size / int64(blocksize) if logMaxBlocks <= blocks { return nil, 0, ErrLogTooLarge } // We have to make sure that the number of blocks requested // fit into the segments tracked by the log log.numsegments = uint32(blocks) / log.blocks_per_segment log.size = uint64(log.numsegments) * uint64(log.segmentsize) // maximum number of aligned blocks to segments log.blocks = log.numsegments * log.blocks_per_segment // Adjust the number of segment buffers if log.numsegments < NumberSegmentBuffers { log.segmentbuffers = int(log.numsegments) } else { log.segmentbuffers = NumberSegmentBuffers } godbc.Check(log.numsegments != 0, fmt.Sprintf("bs:%v ssize:%v sbuffers:%v blocks:%v max:%v ns:%v size:%v\n", log.blocksize, log.segmentsize, log.segmentbuffers, log.blocks, log.blocks_per_segment, log.numsegments, log.size)) // Incoming message channel log.Msgchan = make(chan *message.Message, 32) log.quitchan = make(chan struct{}) log.logreaders = make(chan *message.Message, 32) // Segment channel state machine: // -> Client writes available segment // -> Segment written to storage // -> Segment read from storage // -> Segment available log.chwriting = make(chan *IoSegment, log.segmentbuffers) log.chavailable = make(chan *IoSegment, log.segmentbuffers) log.chreader = make(chan *IoSegment, log.segmentbuffers) // Set up each of the segments log.segments = make([]IoSegment, log.segmentbuffers) for i := 0; i < log.segmentbuffers; i++ { log.segments[i].segmentbuf = make([]byte, log.segmentsize) log.segments[i].data = bufferio.NewBufferIO(log.segments[i].segmentbuf) // Fill ch available with all the available buffers log.chreader <- &log.segments[i] } godbc.Ensure(log.size != 0) godbc.Ensure(log.blocksize == blocksize) godbc.Ensure(log.Msgchan != nil) godbc.Ensure(log.chwriting != nil) godbc.Ensure(log.chavailable != nil) godbc.Ensure(log.chreader != nil) godbc.Ensure(log.segmentbuffers == len(log.segments)) godbc.Ensure(log.segmentbuffers == len(log.chreader)) godbc.Ensure(0 == len(log.chavailable)) godbc.Ensure(0 == len(log.chwriting)) // Return the log object to the caller. // Also return the maximum number of blocks, which may // be different from what the caller asked. The log // will make sure that the maximum number of blocks // are contained per segment return log, log.blocks, nil }