Ejemplo n.º 1
0
//Add a queue by name
func (m *Metadata) AddQueue(queue string) error {

	mutex := zk.NewLock(m.zkClient.Conn, m.operationPath, zk.WorldACL(zk.PermAll))
	if err := mutex.Lock(); err != nil {
		return errors.Trace(err)
	}
	defer mutex.Unlock()

	if err := m.RefreshMetadata(); err != nil {
		return errors.Trace(err)
	}

	if exist := m.ExistQueue(queue); exist {
		return errors.AlreadyExistsf("CreateQueue queue:%s ", queue)
	}

	if err := m.manager.CreateTopic(queue, int32(m.config.KafkaReplications),
		int32(m.config.KafkaPartitions)); err != nil {
		return errors.Trace(err)
	}

	path := m.buildQueuePath(queue)
	data := ""
	log.Debugf("add queue, zk path:%s, data:%s", path, data)

	if err := m.zkClient.CreateRec(path, data, 0); err != nil {
		return errors.Trace(err)
	}
	return nil
}
Ejemplo n.º 2
0
//Delete a queue by name
func (m *Metadata) DelQueue(queue string) error {

	mutex := zk.NewLock(m.zkClient.Conn, m.operationPath, zk.WorldACL(zk.PermAll))
	if err := mutex.Lock(); err != nil {
		return errors.Trace(err)
	}
	defer mutex.Unlock()

	if err := m.RefreshMetadata(); err != nil {
		return errors.Trace(err)
	}

	can, err := m.canDeleteQueue(queue)
	if err != nil {
		return errors.Trace(err)
	}
	if !can {
		return errors.NotValidf("DeleteQueue queue:%s has one or more group", queue)
	}

	path := m.buildQueuePath(queue)
	log.Debugf("del queue, zk path:%s", path)
	if err := m.zkClient.DeleteRec(path); err != nil {
		return errors.Trace(err)
	}
	delete(m.queueConfigs, queue)
	if err := m.manager.DeleteTopic(queue); err != nil {
		return errors.Trace(err)
	}
	return nil
}
Ejemplo n.º 3
0
func (i *ZookeeperHALock) attemptLock(lockpath string, didLock chan struct{}, failLock chan error, releaseCh chan bool) {
	// Wait to acquire the lock in ZK
	lock := zk.NewLock(i.in.client, lockpath, i.in.acl)
	err := lock.Lock()
	if err != nil {
		failLock <- err
		return
	}
	// Set node value
	data := []byte(i.value)
	err = i.in.ensurePath(lockpath, data)
	if err != nil {
		failLock <- err
		lock.Unlock()
		return
	}
	i.zkLock = lock

	// Signal that lock is held
	close(didLock)

	// Handle an early abort
	release := <-releaseCh
	if release {
		lock.Unlock()
	}
}
Ejemplo n.º 4
0
func (m *Metadata) UpdateGroupConfig(group string, queue string,
	write bool, read bool, url string, ips []string) error {

	mutex := zk.NewLock(m.zkClient.Conn, m.operationPath, zk.WorldACL(zk.PermAll))
	if err := mutex.Lock(); err != nil {
		return errors.Trace(err)
	}
	defer mutex.Unlock()

	if err := m.RefreshMetadata(); err != nil {
		return errors.Trace(err)
	}

	if exist := m.ExistGroup(queue, group); !exist {
		return errors.NotFoundf("queue : %q, group: %q", queue, group)
	}

	path := m.buildConfigPath(group, queue)
	groupConfig := GroupConfig{
		Group: group,
		Queue: queue,
		Write: write,
		Read:  read,
		Url:   url,
		Ips:   ips,
	}
	data := groupConfig.String()
	log.Debugf("update group config, zk path:%s, data:%s", path, data)
	if err := m.zkClient.Set(path, data); err != nil {
		return errors.Trace(err)
	}
	return nil
}
Ejemplo n.º 5
0
func (c *Candidate) campaign() {
	defer close(c.electedChn)
	defer close(c.errChn)

	for {

		c.setLeader(false)

		if c.zkLock == nil {
			c.zkLock = zk.NewLock(c.store.conn, c.key, zk.WorldACL(zk.PermAll))
		}

		err := c.zkLock.Lock()
		if err != nil {
			log.Info("Get Lock error :", err)
			c.errChn <- err
			return
		}

		//good, we are now the leader
		log.Info("Get the leader lock success")
		c.setLeader(true)

		select {
		case <-c.resignChn:
			c.zkLock.Unlock()
		case <-c.stopChn:
			if c.IsLeader() {
				c.zkLock.Lock()
			}
			return
		}
	}

}
Ejemplo n.º 6
0
func NewStore(s *Server) *Storage {
	conn, connEvent, err := zk.Connect([]string{"127.0.0.1"}, defaultLeaderTime)

	if err != nil {
		panic(err)
		log.Error("Zk连接创建失败...", err)
	}

	log.Info("Zk连接创建成功...")

	execIdLock := zk.NewLock(conn, "/swiss/ids/execid", WorldACLPermAll)
	jobIdLock := zk.NewLock(conn, "/swiss/ids/jobid", WorldACLPermAll)
	store := &Storage{conn: conn, connEvent: connEvent, execIdLock: execIdLock, jobIdLock: jobIdLock, server: s}
	store.initBasePath()
	return store
}
Ejemplo n.º 7
0
func unbinding(portUuid, hostUuid string) error {
	log.Println("unbinding port " + portUuid)

	conn, _, err := connect()
	if err != nil {
		return err
	}
	defer conn.Close()

	lock := zk.NewLock(conn, GetLockPath(lockKey), zk.WorldACL(zk.PermAll))
	if err = lock.Lock(); err != nil {
		return err
	}
	defer lock.Unlock()

	portPath := GetPortPath(portUuid)
	var data []byte
	if data, _, err = conn.Get(portPath); err != nil {
		fmt.Fprintf(os.Stderr, "Error on getting  port: %s\n", err.Error())
		return err
	}
	port := &WrappedPort{}
	if err = json.Unmarshal(data, port); err != nil {
		fmt.Fprintf(os.Stderr, "Error on deserializing port: %s\n", err.Error())
		return err
	}

	if port.Data.HostId != hostUuid {
		return errors.New("The given host ID didn't match with one in NSDB")
	}
	port.Data.InterfaceName = ""

	updatedPort, err := json.Marshal(port)
	if err != nil {
		fmt.Fprintf(os.Stderr, "Error on serializing port: %s\n", err.Error())
		return err
	}

	if _, err = conn.Set(portPath, updatedPort, -1); err != nil {
		return err
	}

	vrnMappingPath := GetVrnMappingPath(hostUuid, portUuid)
	var exists bool
	if exists, _, err = conn.Exists(vrnMappingPath); err != nil {
		fmt.Fprintf(os.Stderr, "Error on examining vrnMapping %s\n", err.Error())
		return err
	}
	if exists {
		if err = conn.Delete(vrnMappingPath, -1); err != nil {
			fmt.Fprintf(os.Stderr, "Error on deleging vrnMapping %s\n", err.Error())
			return err
		}
	}
	log.Println("Succeded to unbind the port")

	return nil
}
Ejemplo n.º 8
0
func acquireLock(zookeepers []string, sessionTimeout time.Duration, lockPath string) (*lock, error) {
	conn, _, err := zk.Connect(zookeepers, sessionTimeout)
	if err != nil {
		return nil, err
	}

	zkLock := zk.NewLock(conn, lockPath, zk.WorldACL(zk.PermAll))
	err = zkLock.Lock()
	if err != nil {
		return nil, err
	} else {
		return &lock{zkLock}, nil
	}
}
Ejemplo n.º 9
0
// NewLock returns a handle to a lock struct which can be used to acquire and
// release the mutex.
func (s *Zookeeper) NewLock(key string, options *LockOptions) (Locker, error) {
	value := []byte("")

	// Apply options
	if options != nil {
		if options.Value != nil {
			value = options.Value
		}
	}

	return &zookeeperLock{
		client: s.client,
		key:    normalize(key),
		value:  value,
		lock:   zk.NewLock(s.client, normalize(key), zk.WorldACL(zk.PermAll)),
	}, nil
}
Ejemplo n.º 10
0
func (c *Client) Select(queue, msgID string) (string, error) {
	if err := c.assertConnected(); err != nil {
		return "", err
	}

	lock := zk.NewLock(c.zk, zRoot+zLock, acl)
	if err := lock.Lock(); err != nil {
		return "", err
	}
	defer lock.Unlock()

	txnZNode := fmt.Sprintf("%s%s/%s", zRoot, zTxn, msgID)
	if _, err := c.zk.Sync(txnZNode); err != nil {
		return "", err
	}
	exists, _, err := c.zk.Exists(txnZNode)
	if err != nil {
		return "", err
	}

	defer c.mark(msgID)
	if exists {
		return c.getSelected(msgID)
	}

	if err := c.createTxn(queue, msgID); err != nil {
		return "", err
	}

	selected, err := c.selectCandidate(queue)
	if err != nil {
		return "", err
	}

	if _, err := c.zk.Create(fmt.Sprintf("%s%s/%s", txnZNode, zSelected, selected), nil, 0, acl); err != nil {
		return "", err
	}

	clientAndID := strings.Split(selected, "-")
	if clientAndID[0] != c.clientID {
		return "", ErrNoSelection
	}
	return clientAndID[1], nil
}
Ejemplo n.º 11
0
// NewLock returns a handle to a lock struct which can
// be used to provide mutual exclusion on a key
func (s *Zookeeper) NewLock(key string, options *store.LockOptions) (lock store.Locker, err error) {
	value := []byte("")

	// Apply options
	if options != nil {
		if options.Value != nil {
			value = options.Value
		}
	}

	lock = &zookeeperLock{
		client: s.client,
		key:    store.Normalize(key),
		value:  value,
		lock:   zk.NewLock(s.client, store.Normalize(key), zk.WorldACL(zk.PermAll)),
	}

	return lock, err
}
Ejemplo n.º 12
0
func NewMetadataManager(frameworkID string, zookeepers []string) *MetadataManager {
	conn, _, err := zk.Connect(zookeepers, time.Second)
	if err != nil {
		log.Panic(err)
	}
	bns := baseNamespace{}
	ns := makeSubSpace(makeSubSpace(makeSubSpace(bns, "riak"), "frameworks"), frameworkID)
	lockPath := makeSubSpace(ns, "lock")
	zkLock := zk.NewLock(conn, lockPath.GetZKPath(), zk.WorldACL(zk.PermAll))

	manager := &MetadataManager{
		lock:        &sync.Mutex{},
		frameworkID: frameworkID,
		zkConn:      conn,
		namespace:   ns,
		zkLock:      *zkLock,
	}

	manager.setup()
	return manager
}
func removeId(appId, taskId string) {
	zkPath := filepath.Join(zkBase, appId)
	conn := connect()
	defer conn.Close()
	exists, _, err := conn.Exists(zkPath)
	check(err)
	if !exists {
		return
	}
	lock := zk.NewLock(conn, zkPath, acl)
	lock.Lock()
	defer lock.Unlock()
	contents, stat, err := conn.Get(zkPath)
	if len(contents) == 0 {
		return
	}
	newContents := strings.Replace(string(contents), taskId, "NA", 1)
	_, err = conn.Set(zkPath, []byte(newContents), stat.Version)
	check(err)
	return
}
Ejemplo n.º 14
0
func (c *Client) Subscribe(id, queue string) error {
	if err := c.assertConnected(); err != nil {
		return err
	}

	if _, err := c.createIfNotExists(fmt.Sprintf("%s%s/%s", zRoot, zQueue, queue), nil, 0, acl); err != nil {
		return err
	}

	lock := zk.NewLock(c.zk, zRoot+zLock, acl)
	if err := lock.Lock(); err != nil {
		return err
	}
	defer lock.Unlock()

	zNode := fmt.Sprintf("%s%s/%s/%s-%s", zRoot, zQueue, queue, c.clientID, id)
	if _, err := c.createIfNotExists(zNode, nil, zk.FlagEphemeral, acl); err != nil {
		return err
	}

	return nil
}
Ejemplo n.º 15
0
func main() {
	zks := flag.String("zk", "127.0.0.1:2181,127.0.0.1:2182", "zookeeper servers")
	root := flag.String("root", "mycluster", "zookeeper root path")
	numOfNodes := flag.Uint("nsize", 128, "cluster virtual node size, default 128")
	flag.Uint64Var(&nodeDowntime, "downtime", 30, "erase cluster node after downtime(secs)")
	flag.Uint64Var(&nodeUptime, "uptime", 10, "add cluster node after uptime(secs)")
	flag.Parse()
	defer glog.Flush()

	zkAcls = zk.WorldACL(zk.PermAll)

	zkServers := strings.Split(*zks, ",")
	c, _, err := zk.Connect(zkServers, time.Second*10)
	if nil != err {
		glog.Errorf("Connect %v failed with reason:%v", zkServers, err)
		return
	}
	rootZkPath = "/" + *root
	serversZkPath = rootZkPath + "/servers"
	topoNodesPath = rootZkPath + "/topo/nodes"
	topoPartitionsPath = rootZkPath + "/topo/partitions"
	c.Create(rootZkPath, nil, 0, zkAcls)

	lock := zk.NewLock(c, rootZkPath+"/lock", zkAcls)
	err = lock.Lock()
	if nil != err {
		glog.Errorf("Lock failed with reason:%v", err)
		return
	}
	defer lock.Unlock()
	c.Create(serversZkPath, nil, 0, zkAcls)
	c.Create(rootZkPath+"/topo", nil, 0, zkAcls)
	c.Create(topoNodesPath, nil, 0, zkAcls)
	c.Create(topoPartitionsPath, nil, 0, zkAcls)
	zconn = c
	consitent = ssf.NewConsistent(int(*numOfNodes))
	watchdogProcess()
}
Ejemplo n.º 16
0
func (m *Metadata) DeleteGroupConfig(group string, queue string) error {

	mutex := zk.NewLock(m.zkClient.Conn, m.operationPath, zk.WorldACL(zk.PermAll))
	if err := mutex.Lock(); err != nil {
		return errors.Trace(err)
	}
	defer mutex.Unlock()

	if err := m.RefreshMetadata(); err != nil {
		return errors.Trace(err)
	}

	if exist := m.ExistGroup(queue, group); !exist {
		return errors.NotFoundf("queue : %q, group : %q", queue, group)
	}

	path := m.buildConfigPath(group, queue)
	log.Debugf("delete group config, zk path:%s", path)
	if err := m.zkClient.DeleteRec(path); err != nil {
		return errors.Trace(err)
	}
	return nil
}
func updateId(appId, taskId string) {
	replaced := false
	newContents := ""
	zkPath := filepath.Join(zkBase, appId)
	conn := connect()
	defer conn.Close()
	checkPath(conn, zkPath)
	lock := zk.NewLock(conn, zkPath, acl)
	lock.Lock()
	defer lock.Unlock()
	contents, stat, err := conn.Get(zkPath)
	if len(contents) == 0 {
		_, err := conn.Set(zkPath, []byte(taskId), stat.Version)
		check(err)
		return
	}
	log.Print("Original contents: ", string(contents))
	sContents := removeInactiveIds(string(contents), appId)
	ids := strings.Split(sContents, ",")
	for i, id := range ids {
		if id == "NA" {
			ids[i] = taskId
			replaced = true
			break
		}
	}
	if replaced {
		newContents = strings.Join(ids, ",")
	} else {
		newContents = strings.Join([]string{sContents, taskId}, ",")
	}
	log.Print("Contents: ", sContents)
	log.Print("New Contents: ", newContents)
	stat, err = conn.Set(zkPath, []byte(newContents), stat.Version)
	check(err)
	return
}
Ejemplo n.º 18
0
func (node *ZkNode) GetLock() *zk.Lock {
	zkLock := zk.NewLock(node.mgr.zkConn, node.ns.GetZKPath(), zk.WorldACL(zk.PermAll))
	return zkLock
}
Ejemplo n.º 19
0
// Why two mains? Golang doesn't let main() return, which means defers will not run.
// So we do everything in a separate main, that way we can easily exit out with an error code and still run defers
func burrowMain() int {
	// The only command line arg is the config file
	var cfgfile = flag.String("config", "burrow.cfg", "Full path to the configuration file")
	flag.Parse()

	// Load and validate the configuration
	fmt.Fprintln(os.Stderr, "Reading configuration from", *cfgfile)
	appContext := &ApplicationContext{Config: ReadConfig(*cfgfile)}
	if err := ValidateConfig(appContext); err != nil {
		log.Criticalf("Cannot validate configuration: %v", err)
		return 1
	}

	// Create the PID file to lock out other processes. Defer removal so it's the last thing to go
	createPidFile(appContext.Config.General.LogDir + "/" + appContext.Config.General.PIDFile)
	defer removePidFile(appContext.Config.General.LogDir + "/" + appContext.Config.General.PIDFile)

	// Set up stderr/stdout to go to a separate log file
	openOutLog(appContext.Config.General.LogDir + "/burrow.out")
	fmt.Println("Started Burrow at", time.Now().Format("January 2, 2006 at 3:04pm (MST)"))

	// If a logging config is specified, replace the existing loggers
	if appContext.Config.General.LogConfig != "" {
		NewLogger(appContext.Config.General.LogConfig)
	}

	// Start a local Zookeeper client (used for application locks)
	log.Info("Starting Zookeeper client")
	zkconn, _, err := zk.Connect(appContext.Config.Zookeeper.Hosts, time.Duration(appContext.Config.Zookeeper.Timeout)*time.Second)
	if err != nil {
		log.Criticalf("Cannot start Zookeeper client: %v", err)
		return 1
	}
	defer zkconn.Close()

	// Start an offsets storage module
	log.Info("Starting Offsets Storage module")
	appContext.Storage, err = NewOffsetStorage(appContext)
	if err != nil {
		log.Criticalf("Cannot configure offsets storage module: %v", err)
		return 1
	}
	defer appContext.Storage.Stop()

	// Start an HTTP server
	log.Info("Starting HTTP server")
	appContext.Server, err = NewHttpServer(appContext)
	if err != nil {
		log.Criticalf("Cannot start HTTP server: %v", err)
		return 1
	}
	defer appContext.Server.Stop()

	// Start Kafka clients and Zookeepers for each cluster
	appContext.Clusters = make(map[string]*KafkaCluster, len(appContext.Config.Kafka))
	for cluster, _ := range appContext.Config.Kafka {
		log.Infof("Starting Zookeeper client for cluster %s", cluster)
		zkconn, err := NewZookeeperClient(appContext, cluster)
		if err != nil {
			log.Criticalf("Cannot start Zookeeper client for cluster %s: %v", cluster, err)
			return 1
		}
		defer zkconn.Stop()

		log.Infof("Starting Kafka client for cluster %s", cluster)
		client, err := NewKafkaClient(appContext, cluster)
		if err != nil {
			log.Criticalf("Cannot start Kafka client for cluster %s: %v", cluster, err)
			return 1
		}
		defer client.Stop()

		appContext.Clusters[cluster] = &KafkaCluster{Client: client, Zookeeper: zkconn}
	}

	// Set up the Zookeeper lock for notification
	appContext.NotifierLock = zk.NewLock(zkconn, appContext.Config.Zookeeper.LockPath, zk.WorldACL(zk.PermAll))

	// Load the notifiers, but do not start them
	err = loadNotifiers(appContext)
	if err != nil {
		// Error was already logged
		return 1
	}

	// Notifiers are started in a goroutine if we get the ZK lock
	go startNotifiers(appContext)
	defer stopNotifiers(appContext)

	// Register signal handlers for exiting
	exitChannel := make(chan os.Signal, 1)
	signal.Notify(exitChannel, syscall.SIGINT, syscall.SIGQUIT, syscall.SIGSTOP, syscall.SIGTERM)

	// Wait until we're told to exit
	<-exitChannel
	log.Info("Shutdown triggered")
	return 0
}
Ejemplo n.º 20
0
func binding(portUuid, hostUuid, interfaceName string) error {
	log.Println("binding port " + portUuid + " to " + interfaceName)

	conn, _, err := connect()
	if err != nil {
		return err
	}
	defer conn.Close()

	lock := zk.NewLock(conn, GetLockPath(lockKey), zk.WorldACL(zk.PermAll))
	if err = lock.Lock(); err != nil {
		return err
	}
	defer lock.Unlock()

	portPath := GetPortPath(portUuid)
	var data []byte
	if data, _, err = conn.Get(portPath); err != nil {
		fmt.Fprintf(os.Stderr, "Error on getting port: %s\n", err.Error())
		return err
	}
	port := &WrappedPort{}
	if err = json.Unmarshal(data, port); err != nil {
		fmt.Fprintf(os.Stderr, "Error on deserializing port: %s\n", err.Error())
		return err
	}

	port.Data.HostId = hostUuid
	port.Data.InterfaceName = interfaceName

	updatedPort, err := json.Marshal(port)
	if err != nil {
		fmt.Fprintf(os.Stderr, "Error on serializing port: %s\n", err.Error())
		return err
	}

	if _, err = conn.Set(portPath, updatedPort, -1); err != nil {
		fmt.Fprintf(os.Stderr, "Error on setting port: %s\n", err.Error())
		return err
	}

	vrnMappingPath := GetVrnMappingPath(hostUuid, portUuid)
	var exists bool
	if exists, _, err = conn.Exists(vrnMappingPath); err != nil {
		fmt.Fprintf(os.Stderr, "Error on examining vrnMapping %s\n", err.Error())
		return err
	}
	var vrnMappingData []byte
	vrnMapping := &WrappedVrnMapping{}
	if exists {
		if vrnMappingData, _, err = conn.Get(vrnMappingPath); err != nil {
			fmt.Fprintf(os.Stderr, "Error on getting vrnMapping %s\n", err.Error())
			return err
		}
		log.Println(fmt.Sprintf("Got vrnMapping data: %s", vrnMappingData))
		if err = json.Unmarshal(vrnMappingData, vrnMapping); err != nil {
			fmt.Fprintf(os.Stderr, "Error on deserializing vrnMapping:  %s\n", err.Error())
			return err
		}
	} else {
		data := &VrnMapping{}
		data.VirtualPortId = portUuid
		data.LocalDeviceName = interfaceName
		vrnMapping.Data = data
		vrnMapping.Version = port.Version
	}
	updatedVrnMapping, err := json.Marshal(vrnMapping)
	if err != nil {
		fmt.Fprintf(os.Stderr, "Error on deserializing vrnMapping: %s\n", err.Error())
		return err
	}
	if exists {
		if _, err = conn.Set(vrnMappingPath, updatedVrnMapping, -1); err != nil {
			fmt.Fprintf(os.Stderr, "Error on setting vrnMapping: %s\n", err.Error())
			return err
		}
	} else {
		if _, err = conn.Create(vrnMappingPath, updatedVrnMapping, 0, zk.WorldACL(zk.PermAll)); err != nil {
			fmt.Fprintf(os.Stderr, "Error on creating a new vrnMapping:  %s\n", err.Error())
			return err
		}
	}

	log.Println("Succeded to bind the port")

	return nil
}
Ejemplo n.º 21
0
/**
 * get a lock
 */
func (self *ZkHelper) GetLocker(path string) *zk.Lock {
	return zk.NewLock(self.GetZkConn(), path, zk.WorldACL(zk.PermAll))
}
Ejemplo n.º 22
0
func (zkClient *ZookeeperClient) NewLock(path string) *zk.Lock {
	// Pass through to the connection NewLock, without ACLs
	return zk.NewLock(zkClient.conn, path, make([]zk.ACL, 0))
}
Ejemplo n.º 23
-1
func (mgr *MetadataManager) CreateConnection() {
	if mgr.zkConn != nil {
		// Return if the connection is good already
		if mgr.zkConn.State() == zk.StateConnected ||
			mgr.zkConn.State() == zk.StateHasSession ||
			mgr.zkConn.State() == zk.StateConnecting {
			return
		}
		// Close the connection because it probably expired
		mgr.zkConn.Close()
	}

	conn, _, err := zk.Connect(mgr.zookeepers, time.Second)
	if err != nil {
		conn.Close()
		log.Panic(err)
	}
	bns := baseNamespace{}
	ns := makeSubSpace(makeSubSpace(makeSubSpace(bns, "riak"), "frameworks"), mgr.frameworkID)
	lockPath := makeSubSpace(ns, "lock")
	zkLock := zk.NewLock(conn, lockPath.GetZKPath(), zk.WorldACL(zk.PermAll))

	mgr.zkConn = conn
	mgr.namespace = ns
	mgr.zkLock = *zkLock
}