Esempio n. 1
0
func (d *DeviceEntry) Save(tx *bolt.Tx) error {
	godbc.Require(tx != nil)
	godbc.Require(len(d.Info.Id) > 0)

	return EntrySave(tx, d, d.Info.Id)

}
Esempio n. 2
0
File: log.go Progetto: Zandrr/heketi
func NewLogger(prefix string, level LogLevel) *Logger {
	godbc.Require(level >= 0, level)
	godbc.Require(level <= LEVEL_DEBUG, level)

	l := &Logger{}

	if level == LEVEL_NOLOG {
		l.level = LEVEL_DEBUG
	} else {
		l.level = level
	}

	l.critlog = log.New(stderr, prefix+" CRITICAL ", log.LstdFlags)
	l.errorlog = log.New(stderr, prefix+" ERROR ", log.LstdFlags)
	l.warninglog = log.New(stdout, prefix+" WARNING ", log.LstdFlags)
	l.infolog = log.New(stdout, prefix+" INFO ", log.LstdFlags)
	l.debuglog = log.New(stdout, prefix+" DEBUG ", log.LstdFlags)

	godbc.Ensure(l.critlog != nil)
	godbc.Ensure(l.errorlog != nil)
	godbc.Ensure(l.warninglog != nil)
	godbc.Ensure(l.infolog != nil)
	godbc.Ensure(l.debuglog != nil)

	return l
}
Esempio n. 3
0
func (b *Brick) Destroy() error {
	godbc.Require(b.NodeId != "")
	godbc.Require(b.Path != "")
	godbc.Require(b.db != nil)

	// Just for now, it will work wih https://github.com/lpabon/vagrant-gfsm
	sshexec := ssh.NewSshExecWithKeyFile("vagrant", "insecure_private_key")
	godbc.Check(sshexec != nil)

	// Get node name
	var nodename string
	err := b.db.Reader(func() error {
		nodename = b.db.nodes[b.NodeId].Info.Name
		return nil
	})

	// Delete brick storage
	commands := []string{
		fmt.Sprintf("sudo umount /gluster/brick_%v", b.Id),
		fmt.Sprintf("sudo lvremove -f vg_%v/tp_%v", b.DeviceId, b.Id),
		fmt.Sprintf("sudo rmdir /gluster/brick_%v", b.Id),
	}

	_, err = sshexec.ConnectAndExec(nodename+":22", commands, nil)
	if err != nil {
		return err
	}

	err = b.FreeStorage()

	return err
}
Esempio n. 4
0
// Checks if the key already exists in the database.  If it does not exist,
// then it will save the key value pair in the datababucket se
func EntryRegister(tx *bolt.Tx, entry DbEntry, key string, value []byte) ([]byte, error) {
	godbc.Require(tx != nil)
	godbc.Require(len(key) > 0)

	// Access bucket
	b := tx.Bucket([]byte(entry.BucketName()))
	if b == nil {
		err := ErrDbAccess
		logger.Err(err)
		return nil, err
	}

	// Check if key exists already
	val := b.Get([]byte(key))
	if val != nil {
		return val, ErrKeyExists
	}

	// Key does not exist.  We can save it
	err := b.Put([]byte(key), value)
	if err != nil {
		logger.Err(err)
		return nil, err
	}

	return nil, nil
}
Esempio n. 5
0
func (s *SshExecutor) VolumeDestroy(host string, volume string) error {
	godbc.Require(host != "")
	godbc.Require(volume != "")

	// Shutdown volume
	commands := []string{
		// stop gluster volume
		fmt.Sprintf("sudo gluster --mode=script volume stop %v force", volume),
	}

	// Execute command
	_, err := s.RemoteExecutor.RemoteCommandExecute(host, commands, 10)
	if err != nil {
		logger.LogError("Unable to stop volume %v: %v", volume, err)
	}

	// Shutdown volume
	commands = []string{
		// stop gluster volume
		fmt.Sprintf("sudo gluster --mode=script volume delete %v", volume),
	}

	// Execute command
	_, err = s.RemoteExecutor.RemoteCommandExecute(host, commands, 10)
	if err != nil {
		logger.LogError("Unable to delete volume %v: %v", volume, err)
	}

	return nil
}
Esempio n. 6
0
func EntrySave(tx *bolt.Tx, entry DbEntry, key string) error {
	godbc.Require(tx != nil)
	godbc.Require(len(key) > 0)

	// Access bucket
	b := tx.Bucket([]byte(entry.BucketName()))
	if b == nil {
		err := errors.New("Unable to access db")
		logger.Err(err)
		return err
	}

	// Save device entry to db
	buffer, err := entry.Marshal()
	if err != nil {
		logger.Err(err)
		return err
	}

	// Save data using the id as the key
	err = b.Put([]byte(key), buffer)
	if err != nil {
		logger.Err(err)
		return err
	}

	return nil
}
Esempio n. 7
0
// ASU1 + ASU2 + ASU3 = X
// ASU1 is 45% of X
// ASU2 is 45% of X
// ASU3 is 10% of X
// Call this function after all ASUs are opened
func (s *SpcInfo) adjustAsuSizes() error {
	godbc.Require(s.asus[ASU1].len != 0)
	godbc.Require(s.asus[ASU2].len != 0)
	godbc.Require(s.asus[ASU3].len != 0)

	// lets start making user ASU1 and ASU2 are equal
	if s.asus[ASU1].len > s.asus[ASU2].len {
		s.asus[ASU1].len = s.asus[ASU2].len
	} else {
		s.asus[ASU2].len = s.asus[ASU1].len
	}

	// Now we need to adjust ASU3
	asu3_correct_size := uint32(float64(2*s.asus[ASU1].len) / 9)
	if asu3_correct_size > s.asus[ASU3].len {
		return fmt.Errorf("\nASU3 size is too small: %v KB.\n"+
			"It must be bigger than 1/9 of 2*ASU1,\n"+
			"or %v KB for this configuration\n",
			s.asus[ASU3].len*4, asu3_correct_size*4)
	} else {
		s.asus[ASU3].len = asu3_correct_size
	}

	godbc.Ensure(s.asus[ASU1].len != 0)
	godbc.Ensure(s.asus[ASU2].len != 0)
	godbc.Ensure(s.asus[ASU3].len != 0, asu3_correct_size)

	return nil
}
Esempio n. 8
0
func (b *BrickEntry) DestroyCheck(db *bolt.DB, executor executors.Executor) error {
	godbc.Require(db != nil)
	godbc.Require(b.TpSize > 0)
	godbc.Require(b.Info.Size > 0)

	// Get node hostname
	var host string
	err := db.View(func(tx *bolt.Tx) error {
		node, err := NewNodeEntryFromId(tx, b.Info.NodeId)
		if err != nil {
			return err
		}

		host = node.ManageHostName()
		godbc.Check(host != "")
		return nil
	})
	if err != nil {
		return err
	}

	// Create request
	req := &executors.BrickRequest{}
	req.Name = b.Info.Id
	req.Size = b.Info.Size
	req.TpSize = b.TpSize
	req.VgId = b.Info.DeviceId

	// Check brick on node
	return executor.BrickDestroyCheck(host, req)
}
Esempio n. 9
0
func (n *NodeEntry) Save(tx *bolt.Tx) error {
	godbc.Require(tx != nil)
	godbc.Require(len(n.Info.Id) > 0)

	return EntrySave(tx, n, n.Info.Id)

}
Esempio n. 10
0
func (s *SshExecutor) VolumeCreate(host string,
	volume *executors.VolumeRequest) (*executors.VolumeInfo, error) {

	godbc.Require(volume != nil)
	godbc.Require(host != "")
	godbc.Require(len(volume.Bricks) > 0)
	godbc.Require(volume.Name != "")

	// Create volume command
	cmd := fmt.Sprintf("sudo gluster --mode=script volume create %v ", volume.Name)

	// Add durability settings to the volume command
	var (
		inSet     int
		maxPerSet int
	)
	switch volume.Type {
	case executors.DurabilityNone:
		logger.Info("Creating volume %v with no durability", volume.Name)
		inSet = 1
		maxPerSet = 15
	case executors.DurabilityReplica:
		logger.Info("Creating volume %v replica %v", volume.Name, volume.Replica)
		cmd += fmt.Sprintf("replica %v ", volume.Replica)
		inSet = volume.Replica
		maxPerSet = 5
	case executors.DurabilityDispersion:
		logger.Info("Creating volume %v dispersion %v+%v",
			volume.Name, volume.Data, volume.Redundancy)
		cmd += fmt.Sprintf("disperse-data %v redundancy %v ", volume.Data, volume.Redundancy)
		inSet = volume.Data + volume.Redundancy
		maxPerSet = 1
	}

	// Setup volume create command
	// There could many, many bricks which could make the command line
	// too long.  Instead, create the volume first, then add each brick set.
	for _, brick := range volume.Bricks[:inSet] {
		cmd += fmt.Sprintf("%v:%v ", brick.Host, brick.Path)
	}

	// Initialize the commands with the create command
	commands := []string{cmd}

	// Now add all the commands to add the bricks
	commands = append(commands, s.createAddBrickCommands(volume, inSet, inSet, maxPerSet)...)

	// Add command to start the volume
	commands = append(commands, fmt.Sprintf("sudo gluster volume start %v", volume.Name))

	// Execute command
	_, err := s.RemoteExecutor.RemoteCommandExecute(host, commands, 10)
	if err != nil {
		s.VolumeDestroy(host, volume.Name)
		return nil, err
	}

	return &executors.VolumeInfo{}, nil
}
Esempio n. 11
0
func (l *Log) Start() {
	godbc.Require(l.size != 0)
	godbc.Require(l.Msgchan != nil)
	godbc.Require(l.chwriting != nil)
	godbc.Require(l.chavailable != nil)
	godbc.Require(l.chreader != nil)
	godbc.Require(l.segmentbuffers == len(l.segments))
	godbc.Require(l.segmentbuffers == len(l.chreader))
	godbc.Require(0 == len(l.chavailable))
	godbc.Require(0 == len(l.chwriting))

	// Set up the first available segment
	l.segment = <-l.chreader
	l.segment.offset = int64(l.current) * int64(l.segmentsize)
	if l.wrapped {
		n, err := l.fp.ReadAt(l.segment.segmentbuf, l.segment.offset)
		godbc.Check(n == len(l.segment.segmentbuf), n)
		godbc.Check(err == nil)
	}

	// Now that we are sure everything is clean,
	// we can start the goroutines
	for i := 0; i < 32; i++ {
		l.wg.Add(1)
		go l.logread()
	}
	go l.server()
	go l.writer()
	go l.reader()
	l.wg.Add(3)
}
Esempio n. 12
0
func (s *SshExecutor) VolumeDestroyCheck(host, volume string) error {
	godbc.Require(host != "")
	godbc.Require(volume != "")

	// Determine if the volume is able to be deleted
	err := s.checkForSnapshots(host, volume)
	if err != nil {
		return err
	}

	return nil
}
Esempio n. 13
0
func (s *SshExecutor) BrickDestroyCheck(host string,
	brick *executors.BrickRequest) error {
	godbc.Require(brick != nil)
	godbc.Require(host != "")
	godbc.Require(brick.Name != "")
	godbc.Require(brick.VgId != "")

	err := s.checkThinPoolUsage(host, brick)
	if err != nil {
		return err
	}

	return nil
}
Esempio n. 14
0
File: peer.go Progetto: pkoro/heketi
func (s *SshExecutor) PeerDetach(host, detachnode string) error {
	godbc.Require(host != "")
	godbc.Require(detachnode != "")

	// create the commands
	logger.Info("Detaching node %v", detachnode)
	commands := []string{
		fmt.Sprintf("sudo gluster peer detach %v", detachnode),
	}
	_, err := s.sshExec(host, commands, 10)
	if err != nil {
		logger.Err(err)
	}
	return nil
}
Esempio n. 15
0
func (s *SshExecutor) BrickDestroy(host string,
	brick *executors.BrickRequest) error {

	godbc.Require(brick != nil)
	godbc.Require(host != "")
	godbc.Require(brick.Name != "")
	godbc.Require(brick.VgId != "")

	// Try to unmount first
	commands := []string{
		fmt.Sprintf("sudo umount %v", s.brickMountPoint(brick)),
	}
	_, err := s.RemoteExecutor.RemoteCommandExecute(host, commands, 5)
	if err != nil {
		logger.Err(err)
	}

	// Now try to remove the LV
	commands = []string{
		fmt.Sprintf("sudo lvremove -f %v/%v", s.vgName(brick.VgId), s.tpName(brick.Name)),
	}
	_, err = s.RemoteExecutor.RemoteCommandExecute(host, commands, 5)
	if err != nil {
		logger.Err(err)
	}

	// Now cleanup the mount point
	commands = []string{
		fmt.Sprintf("sudo rmdir %v", s.brickMountPoint(brick)),
	}
	_, err = s.RemoteExecutor.RemoteCommandExecute(host, commands, 5)
	if err != nil {
		logger.Err(err)
	}

	// Remove from fstab
	commands = []string{
		fmt.Sprintf("sudo sed -i.save '/%v/d' %v",
			s.brickName(brick.Name),
			s.Fstab),
	}
	_, err = s.RemoteExecutor.RemoteCommandExecute(host, commands, 5)
	if err != nil {
		logger.Err(err)
	}

	return nil
}
Esempio n. 16
0
func NewDeviceAddCommand(options *Options) *DeviceAddCommand {
	godbc.Require(options != nil)

	cmd := &DeviceAddCommand{}
	cmd.name = "add"
	cmd.options = options
	cmd.flags = flag.NewFlagSet(cmd.name, flag.ExitOnError)
	cmd.flags.StringVar(&cmd.device, "name", "", "Name of device to add")
	cmd.flags.StringVar(&cmd.nodeId, "node", "", "Id of the node which has this device")

	//usage on -help
	cmd.flags.Usage = func() {
		fmt.Println(`
Add new device to node to be managed by Heketi

USAGE
  heketi-cli device add [options]

OPTIONS`)

		//print flags
		cmd.flags.PrintDefaults()
		fmt.Println(`
EXAMPLES
  $ heketi-cli device add \
      -name=/dev/sdb
      -node=3e098cb4407d7109806bb196d9e8f095 
`)
	}

	godbc.Ensure(cmd.name == "add")

	return cmd
}
Esempio n. 17
0
func NewClusterCreateCommand(options *Options) *ClusterCreateCommand {

	godbc.Require(options != nil)

	cmd := &ClusterCreateCommand{}
	cmd.name = "create"
	cmd.options = options

	// Set flags
	cmd.flags = flag.NewFlagSet(cmd.name, flag.ExitOnError)

	//usage on -help
	cmd.flags.Usage = func() {
		fmt.Println(`
Create a cluster

A cluster is used to group a collection of nodes.  It also provides
the caller with the choice to specify clusters where volumes should
be created.

USAGE
  heketi-cli [options] cluster create

EXAMPLE
  $ heketi-cli cluster create

`)
	}

	godbc.Ensure(cmd.name == "create")

	return cmd
}
Esempio n. 18
0
func NewDeviceInfoCommand(options *Options) *DeviceInfoCommand {
	godbc.Require(options != nil)

	cmd := &DeviceInfoCommand{}
	cmd.name = "info"
	cmd.options = options
	cmd.flags = flag.NewFlagSet(cmd.name, flag.ExitOnError)

	//usage on -help
	cmd.flags.Usage = func() {
		fmt.Println(`
Retreives information about the device

USAGE
  heketi-cli [options] node device [id]

  Where "id" is the id of the device 

EXAMPLE
  $ heketi-cli node info 886a86a868711bef83001
`)
	}

	godbc.Ensure(cmd.name == "info")

	return cmd
}
Esempio n. 19
0
func NewVolumeExpandCommand(options *Options) *VolumeExpandCommand {

	godbc.Require(options != nil)

	cmd := &VolumeExpandCommand{}
	cmd.name = "expand"
	cmd.options = options
	cmd.flags = flag.NewFlagSet(cmd.name, flag.ExitOnError)
	cmd.flags.IntVar(&cmd.expand_size, "expand-size", -1,
		"\n\tAmount in GB to add to the volume")
	cmd.flags.StringVar(&cmd.id, "volume", "",
		"\n\tId of volume to expand")

	//usage on -help
	cmd.flags.Usage = func() {
		fmt.Println(`
Expand a volume

USAGE
  heketi-cli volume expand [options]

OPTIONS`)

		//print flags
		cmd.flags.PrintDefaults()
		fmt.Println(`
EXAMPLES
  * Add 10GB to a volume
      $ heketi-cli volume expand -volume=60d46d518074b13a04ce1022c8c7193c -expand-size=10
`)
	}
	godbc.Ensure(cmd.name == "expand")

	return cmd
}
Esempio n. 20
0
func NewNodeDestroyCommand(options *Options) *NodeDestroyCommand {

	godbc.Require(options != nil)

	cmd := &NodeDestroyCommand{}
	cmd.name = "delete"
	cmd.options = options
	cmd.flags = flag.NewFlagSet(cmd.name, flag.ExitOnError)

	//usage on -help
	cmd.flags.Usage = func() {
		fmt.Println(`
Deletes a node from Heketi management

USAGE
  heketi-cli [options] node delete [id]

  Where "id" is the id of the cluster

EXAMPLE
  $ heketi-cli node delete 886a86a868711bef83001
`)
	}

	godbc.Ensure(cmd.name == "delete")

	return cmd
}
Esempio n. 21
0
func (v *VolumeEntry) NewInfoResponse(tx *bolt.Tx) (*VolumeInfoResponse, error) {
	godbc.Require(tx != nil)

	info := NewVolumeInfoResponse()
	info.Id = v.Info.Id
	info.Cluster = v.Info.Cluster
	info.Mount = v.Info.Mount
	info.Snapshot = v.Info.Snapshot
	info.Size = v.Info.Size
	info.Durability = v.Info.Durability
	info.Name = v.Info.Name

	for _, brickid := range v.BricksIds() {
		brick, err := NewBrickEntryFromId(tx, brickid)
		if err != nil {
			return nil, err
		}
		brickinfo, err := brick.NewInfoResponse(tx)
		if err != nil {
			return nil, err
		}

		info.Bricks = append(info.Bricks, *brickinfo)
	}

	return info, nil
}
Esempio n. 22
0
//function to create new node command
func NewNodeCommand(options *Options) *NodeCommand {
	godbc.Require(options != nil)

	cmd := &NodeCommand{}
	cmd.name = "node"
	cmd.options = options
	cmd.cmds = Commands{
		NewNodeAddCommand(options),
		NewNodeInfoCommand(options),
		NewNodeDestroyCommand(options),
	}

	cmd.flags = flag.NewFlagSet(cmd.name, flag.ExitOnError)

	//usage on -help
	cmd.flags.Usage = func() {
		fmt.Println(`
Heketi node management

USAGE
  heketi-cli [options] node [commands]

COMMANDS
  add     Adds a node for Heketi to manage.
  info    Returns information about a specific node.
  delete  Delete node with specified id. 

Use "heketi-cli node [command] -help" for more information about a command
`)
	}

	godbc.Ensure(cmd.name == "node")
	return cmd
}
Esempio n. 23
0
func NewVolumeListCommand(options *Options) *VolumeListCommand {

	godbc.Require(options != nil)

	cmd := &VolumeListCommand{}
	cmd.name = "list"
	cmd.options = options
	cmd.flags = flag.NewFlagSet(cmd.name, flag.ExitOnError)

	//usage on -help
	cmd.flags.Usage = func() {
		fmt.Println(`
Lists the volumes managed by Heketi

USAGE
  heketi-cli [options] volume list

EXAMPLE
  $ heketi-cli volume list
`)
	}

	godbc.Ensure(cmd.name == "list")

	return cmd
}
Esempio n. 24
0
func (c *Log) put(msg *message.Message) error {

	iopkt := msg.IoPkt()
	godbc.Require(iopkt.LogBlock < c.blocks)

	// Make sure the block number curresponds to the
	// current segment.  If not, c.sync() will place
	// the next available segment into c.segment
	for !c.inRange(iopkt.LogBlock, c.segment) {
		c.sync()
	}

	// get log offset
	offset := c.offset(iopkt.LogBlock)

	// Write to current buffer
	n, err := c.segment.data.WriteAt(iopkt.Buffer, offset-c.segment.offset)
	godbc.Check(n == len(iopkt.Buffer))
	godbc.Check(err == nil)

	c.segment.written = true

	// We have written the data, and we are done with the message
	msg.Done()

	return err
}
Esempio n. 25
0
func (n *NodeEntry) NewInfoReponse(tx *bolt.Tx) (*NodeInfoResponse, error) {

	godbc.Require(tx != nil)

	info := &NodeInfoResponse{}
	info.ClusterId = n.Info.ClusterId
	info.Hostnames = n.Info.Hostnames
	info.Id = n.Info.Id
	info.Zone = n.Info.Zone
	info.DevicesInfo = make([]DeviceInfoResponse, 0)

	// Add each drive information
	for _, deviceid := range n.Devices {
		device, err := NewDeviceEntryFromId(tx, deviceid)
		if err != nil {
			return nil, err
		}

		driveinfo, err := device.NewInfoResponse(tx)
		if err != nil {
			return nil, err
		}
		info.DevicesInfo = append(info.DevicesInfo, *driveinfo)
	}

	return info, nil
}
Esempio n. 26
0
func NewVolumeDeleteCommand(options *Options) *VolumeDeleteCommand {

	godbc.Require(options != nil)

	cmd := &VolumeDeleteCommand{}
	cmd.name = "delete"
	cmd.options = options
	cmd.flags = flag.NewFlagSet(cmd.name, flag.ExitOnError)

	//usage on -help
	cmd.flags.Usage = func() {
		fmt.Println(`
Deletes the volume

USAGE
  heketi-cli [options] volume delete [id]

  Where "id" is the id of the volume to be deleted

EXAMPLE
  $ heketi-cli volume delete 886a86a868711bef83001

`)
	}

	godbc.Ensure(cmd.flags != nil)
	godbc.Ensure(cmd.name == "delete")

	return cmd
}
Esempio n. 27
0
func (d *DeviceEntry) NewInfoResponse(tx *bolt.Tx) (*api.DeviceInfoResponse, error) {

	godbc.Require(tx != nil)

	info := &api.DeviceInfoResponse{}
	info.Id = d.Info.Id
	info.Name = d.Info.Name
	info.Storage = d.Info.Storage
	info.State = d.State
	info.Bricks = make([]api.BrickInfo, 0)

	// Add each drive information
	for _, id := range d.Bricks {
		brick, err := NewBrickEntryFromId(tx, id)
		if err != nil {
			return nil, err
		}

		brickinfo, err := brick.NewInfoResponse(tx)
		if err != nil {
			return nil, err
		}
		info.Bricks = append(info.Bricks, *brickinfo)
	}

	return info, nil
}
Esempio n. 28
0
func NewINodeRef(vol VolumeID, i INodeID) INodeRef {
	godbc.Require(vol < VolumeMax, vol)
	return INodeRef{
		volume: vol & VolumeMax,
		INode:  i,
	}
}
Esempio n. 29
0
func (b *Brick) Create() error {
	godbc.Require(b.db != nil)
	godbc.Require(b.DeviceId != "")

	// Just for now, it will work wih https://github.com/lpabon/vagrant-gfsm
	sshexec := ssh.NewSshExecWithKeyFile("vagrant", "insecure_private_key")
	godbc.Check(sshexec != nil)

	var nodename string
	err := b.db.Reader(func() error {
		nodename = b.db.nodes[b.NodeId].Info.Name
		return nil
	})

	commands := []string{
		fmt.Sprintf("sudo lvcreate -L %vKiB -T vg_%v/tp_%v -V %vKiB -n brick_%v",
			//Thin Pool Size
			uint64(float64(b.Size)*THINP_SNAPSHOT_FACTOR),

			// volume group
			b.DeviceId,

			// ThinP name
			b.Id,

			// Volume size
			b.Size,

			// Logical Vol name
			b.Id),
		fmt.Sprintf("sudo mkfs.xfs -i size=512 /dev/vg_%v/brick_%v", b.DeviceId, b.Id),
		fmt.Sprintf("sudo mkdir /gluster/brick_%v", b.Id),
		fmt.Sprintf("sudo mount /dev/vg_%v/brick_%v /gluster/brick_%v",
			b.DeviceId, b.Id, b.Id),
		fmt.Sprintf("sudo mkdir /gluster/brick_%v/brick", b.Id),
	}

	_, err = sshexec.ConnectAndExec(nodename+":22", commands, nil)
	if err != nil {
		return err
	}

	// SSH into node and create brick
	b.Path = fmt.Sprintf("/gluster/brick_%v", b.Id)
	return nil
}
Esempio n. 30
0
func (b *BrickEntry) Destroy(db *bolt.DB, executor executors.Executor) error {

	godbc.Require(db != nil)
	godbc.Require(b.TpSize > 0)
	godbc.Require(b.Info.Size > 0)

	if b.State != BRICK_STATE_ONLINE {
		return nil
	}

	// Get node hostname
	var host string
	err := db.View(func(tx *bolt.Tx) error {
		node, err := NewNodeEntryFromId(tx, b.Info.NodeId)
		if err != nil {
			return err
		}

		host = node.ManageHostName()
		godbc.Check(host != "")
		return nil
	})
	if err != nil {
		return err
	}

	// Create request
	req := &executors.BrickRequest{}
	req.Name = b.Info.Id
	req.Size = b.Info.Size
	req.TpSize = b.TpSize
	req.VgId = b.Info.DeviceId

	// Delete brick on node
	logger.Info("Deleting brick %v", b.Info.Id)
	err = executor.BrickDestroy(host, req)
	if err != nil {
		b.State = BRICK_STATE_FAILED
		return err
	}

	b.State = BRICK_STATE_DELETED

	godbc.Ensure(b.State == BRICK_STATE_DELETED)
	return nil
}