コード例 #1
0
ファイル: util.go プロジェクト: prataprc/indexing
func ValidateBucket(cluster, bucket string, uuids []string) bool {

	var cinfo *common.ClusterInfoCache
	url, err := common.ClusterAuthUrl(cluster)
	if err == nil {
		cinfo, err = common.NewClusterInfoCache(url, DEFAULT_POOL)
	}
	if err != nil {
		logging.Fatalf("Indexer::Fail to init ClusterInfoCache : %v", err)
		common.CrashOnError(err)
	}

	cinfo.Lock()
	defer cinfo.Unlock()

	if err := cinfo.Fetch(); err != nil {
		logging.Errorf("Indexer::Fail to init ClusterInfoCache : %v", err)
		common.CrashOnError(err)
	}

	if nids, err := cinfo.GetNodesByBucket(bucket); err == nil && len(nids) != 0 {
		// verify UUID
		currentUUID := cinfo.GetBucketUUID(bucket)
		for _, uuid := range uuids {
			if uuid != currentUUID {
				return false
			}
		}
		return true
	} else {
		logging.Fatalf("Indexer::Error Fetching Bucket Info: %v Nids: %v", err, nids)
		return false
	}

}
コード例 #2
0
ファイル: cbq_client.go プロジェクト: jchris/indexing
// newCbqClient create cbq-cluster client.
func newCbqClient(cluster string) (*cbqClient, error) {
	clusterUrl, err := common.ClusterAuthUrl(cluster)
	if err != nil {
		return nil, err
	}
	cinfo, err := common.NewClusterInfoCache(clusterUrl, "default" /*pooln*/)
	if err != nil {
		return nil, err
	}
	if err = cinfo.Fetch(); err != nil {
		return nil, err
	}
	nodes := cinfo.GetNodesByServiceType("indexAdmin")
	if l := len(nodes); l < 1 {
		err := fmt.Errorf("cinfo.GetNodesByServiceType() returns %d nodes", l)
		return nil, err
	}
	adminport, err := cinfo.GetServiceAddress(nodes[0], "indexAdmin")
	if err != nil {
		return nil, err
	}
	queryport, err := cinfo.GetServiceAddress(nodes[0], "indexScan")
	if err != nil {
		return nil, err
	}

	b := &cbqClient{
		adminport: "http://" + adminport,
		queryport: queryport,
		httpc:     http.DefaultClient,
	}
	b.logPrefix = fmt.Sprintf("[cbqClient %v]", b.adminport)
	return b, nil
}
コード例 #3
0
ファイル: kv_sender.go プロジェクト: jchris/indexing
func NewKVSender(supvCmdch MsgChannel, supvRespch MsgChannel,
	config c.Config) (KVSender, Message) {

	var cinfo *c.ClusterInfoCache
	url, err := c.ClusterAuthUrl(config["clusterAddr"].String())
	if err == nil {
		cinfo, err = c.NewClusterInfoCache(url, DEFAULT_POOL)
	}
	if err != nil {
		panic("Unable to initialize cluster_info - " + err.Error())
	}
	//Init the kvSender struct
	k := &kvSender{
		supvCmdch:  supvCmdch,
		supvRespch: supvRespch,
		cInfoCache: cinfo,
		config:     config,
	}

	k.cInfoCache.SetMaxRetries(MAX_CLUSTER_FETCH_RETRY)
	k.cInfoCache.SetLogPrefix("KVSender: ")
	//start kvsender loop which listens to commands from its supervisor
	go k.run()

	return k, &MsgSuccess{}

}
コード例 #4
0
ファイル: datapath.go プロジェクト: jchris/indexing
func getProjectorAdminport(cluster, pooln string) string {
	cinfo, err := c.NewClusterInfoCache(c.ClusterUrl(cluster), pooln)
	if err != nil {
		log.Fatal(err)
	}
	if err := cinfo.Fetch(); err != nil {
		log.Fatal(err)
	}
	nodeID := cinfo.GetCurrentNode()
	adminport, err := cinfo.GetServiceAddress(nodeID, "projector")
	if err != nil {
		log.Fatal(err)
	}
	return adminport
}
コード例 #5
0
ファイル: secondary_index.go プロジェクト: prataprc/indexing
// get cluster info and refresh ns-server data.
func getClusterInfo(
	cluster string, pooln string) (*c.ClusterInfoCache, errors.Error) {

	clusterURL, err := c.ClusterAuthUrl(cluster)
	if err != nil {
		return nil, errors.NewError(err, fmt.Sprintf("ClusterAuthUrl() failed"))
	}
	cinfo, err := c.NewClusterInfoCache(clusterURL, pooln)
	if err != nil {
		return nil, errors.NewError(err, fmt.Sprintf("ClusterInfo() failed"))
	}
	if err := cinfo.Fetch(); err != nil {
		msg := fmt.Sprintf("Fetch ClusterInfo() failed")
		return nil, errors.NewError(err, msg)
	}
	return cinfo, nil
}
コード例 #6
0
func GetIndexerNodesHttpAddresses(hostaddress string) ([]string, error) {
	clusterURL, err := c.ClusterAuthUrl(hostaddress)
	if err != nil {
		return nil, err
	}

	cinfo, err := c.NewClusterInfoCache(clusterURL, "default")
	if err != nil {
		return nil, err
	}

	if err := cinfo.Fetch(); err != nil {
		return nil, err
	}

	node_ids := cinfo.GetNodesByServiceType(c.INDEX_HTTP_SERVICE)
	indexNodes := []string{}
	for _, node_id := range node_ids {
		addr, _ := cinfo.GetServiceAddress(node_id, c.INDEX_HTTP_SERVICE)
		indexNodes = append(indexNodes, addr)
	}

	return indexNodes, nil
}
コード例 #7
0
ファイル: meta_client.go プロジェクト: jchris/indexing
// update 2i cluster information,
// IMPORTANT: make sure to call Refresh() after calling updateIndexerList()
func (b *metadataClient) updateIndexerList(discardExisting bool) error {
	clusterURL, err := common.ClusterAuthUrl(b.cluster)
	if err != nil {
		return err
	}
	cinfo, err := common.NewClusterInfoCache(clusterURL, "default")
	if err != nil {
		return err
	}
	if err := cinfo.Fetch(); err != nil {
		return err
	}
	// populate indexers' adminport and queryport
	adminports, err := getIndexerAdminports(cinfo)
	if err != nil {
		return err
	}

	fmsg := "Refreshing indexer list due to cluster changes or auto-refresh."
	logging.Infof(fmsg)
	logging.Infof("Refreshed Indexer List: %v", adminports)

	b.rw.Lock()
	defer b.rw.Unlock()

	if discardExisting {
		for _, indexerID := range b.adminports {
			b.mdClient.UnwatchMetadata(indexerID)
		}
		b.adminports = nil
	}

	// watch all indexers
	m := make(map[string]common.IndexerId)
	for _, adminport := range adminports { // add new indexer-nodes if any
		if indexerID, ok := b.adminports[adminport]; !ok {
			// This adminport is provided by cluster manager.  Meta client will
			// honor cluster manager to treat this adminport as a healthy node.
			// If the indexer is unavail during initialization, WatchMetadata()
			// will return afer timeout. A background watcher will keep
			// retrying, since it can be tranisent partitioning error.
			// If retry eventually successful, this callback will be invoked
			// to update meta_client. The metadata client has to rely on the
			// cluster manager to send a notification if this node is detected
			// to be down, such that the metadata client can stop the
			// background watcher.
			fn := func(ad string, n_id common.IndexerId, o_id common.IndexerId) {
				b.updateIndexer(ad, n_id, o_id)
			}

			// WatchMetadata will "unwatch" an old metadata watcher which
			// shares the same indexer Id (but the adminport may be different).
			indexerID = b.mdClient.WatchMetadata(adminport, fn)
			m[adminport] = indexerID
		} else {
			err = b.mdClient.UpdateServiceAddrForIndexer(indexerID, adminport)
			m[adminport] = indexerID
			delete(b.adminports, adminport)
		}
	}
	// delete indexer-nodes that got removed from cluster.
	for _, indexerID := range b.adminports {
		// check if the indexerId exists in var "m".  In case the
		// adminport changes for the same index node, there would
		// be two adminport mapping to the same indexerId, one
		// in b.adminport (old) and the other in "m" (new).  So
		// make sure not to accidently unwatch the indexer.
		found := false
		for _, id := range m {
			if indexerID == id {
				found = true
			}
		}
		if !found {
			b.mdClient.UnwatchMetadata(indexerID)
		}
	}
	b.adminports = m
	return err
}
コード例 #8
0
ファイル: testclient.go プロジェクト: jchris/indexing
//
// main function
//
func main() {
	var curl string
	var cmd string
	var meta string
	var host string
	var defnId uint64

	flag.StringVar(&curl, "curl", "", "cluster url")
	flag.StringVar(&cmd, "cmd", "", "command=getIndexStatus/getIndexMetadata/restoreIndexMetadata/dropIndex")
	flag.StringVar(&meta, "meta", "", "metadata to restore")
	flag.StringVar(&host, "host", "", "host for index defnition id")
	flag.Uint64Var(&defnId, "id", 0, "index definition id")
	flag.Parse()

	cinfo, err := common.NewClusterInfoCache(curl, "default")
	if err != nil {
		log.Printf("%v", err)
		return
	}

	if err := cinfo.Fetch(); err != nil {
		log.Printf("%v", err)
		return
	}

	nodes := cinfo.GetNodesByServiceType("indexHttp")
	if len(nodes) == 0 {
		log.Printf("There is no couchbase server running with indexer service")
		return
	}

	indexHttp, err := cinfo.GetServiceAddress(nodes[0], "indexHttp")
	if err != nil {
		log.Printf("%v", err)
		return
	}

	if cmd == "getIndexStatus" || cmd == "getIndexMetadata" {

		resp, err := http.Get("http://" + indexHttp + "/" + cmd)
		if err != nil {
			log.Printf("%v", err)
			return
		}

		buf := new(bytes.Buffer)
		if _, err := buf.ReadFrom(resp.Body); err != nil {
			log.Printf("%v", err)
			return
		}

		log.Printf("%v", string(buf.Bytes()))

	} else if cmd == "restoreIndexMetadata" {

		bodybuf := bytes.NewBuffer([]byte(meta))
		resp, err := http.Post("http://"+indexHttp+"/"+cmd, "application/json", bodybuf)
		if err != nil {
			log.Printf("%v", err)
			return
		}

		buf := new(bytes.Buffer)
		if _, err := buf.ReadFrom(resp.Body); err != nil {
			log.Printf("%v", err)
			return
		}

		log.Printf("%v", string(buf.Bytes()))

	} else if cmd == "dropIndex" {

		for _, id := range nodes {
			indexHttp, err := cinfo.GetServiceAddress(id, "indexHttp")
			if err != nil {
				log.Printf("%v", err)
				return
			}

			if strings.HasPrefix(indexHttp, host) {

				defn := common.IndexDefn{DefnId: common.IndexDefnId(defnId)}
				request := &IndexRequest{Type: "drop", Index: defn}

				body, err := json.Marshal(&request)
				if err != nil {
					log.Printf("%v", err)
					return
				}

				log.Printf("dialing http://" + indexHttp + "/" + cmd)

				bodybuf := bytes.NewBuffer(body)
				resp, err := http.Post("http://"+indexHttp+"/"+cmd, "application/json", bodybuf)
				if err != nil {
					log.Printf("%v", err)
					return
				}

				buf := new(bytes.Buffer)
				if _, err := buf.ReadFrom(resp.Body); err != nil {
					log.Printf("%v", err)
					return
				}

				log.Printf("%v", string(buf.Bytes()))
				return
			}
		}

		log.Printf("Cannot find matching host %d", host)
	}
}
コード例 #9
0
func NewClustMgrAgent(supvCmdch MsgChannel, supvRespch MsgChannel, cfg common.Config) (
	ClustMgrAgent, Message) {

	//Init the clustMgrAgent struct
	c := &clustMgrAgent{
		supvCmdch:  supvCmdch,
		supvRespch: supvRespch,
		config:     cfg,
	}

	var cinfo *common.ClusterInfoCache
	url, err := common.ClusterAuthUrl(cfg["clusterAddr"].String())
	if err == nil {
		cinfo, err = common.NewClusterInfoCache(url, DEFAULT_POOL)
	}
	if err != nil {
		logging.Errorf("ClustMgrAgent::Fail to init ClusterInfoCache : %v", err)
		return nil, &MsgError{
			err: Error{code: ERROR_CLUSTER_MGR_AGENT_INIT,
				severity: FATAL,
				category: CLUSTER_MGR,
				cause:    err}}
	}

	cinfo.Lock()
	defer cinfo.Unlock()

	if err := cinfo.Fetch(); err != nil {
		logging.Errorf("ClustMgrAgent::Fail to init ClusterInfoCache : %v", err)
		return nil, &MsgError{
			err: Error{code: ERROR_CLUSTER_MGR_AGENT_INIT,
				severity: FATAL,
				category: CLUSTER_MGR,
				cause:    err}}
	}

	mgr, err := manager.NewIndexManager(cinfo, cfg)
	if err != nil {
		logging.Errorf("ClustMgrAgent::NewClustMgrAgent Error In Init %v", err)
		return nil, &MsgError{
			err: Error{code: ERROR_CLUSTER_MGR_AGENT_INIT,
				severity: FATAL,
				category: CLUSTER_MGR,
				cause:    err}}

	}

	c.mgr = mgr

	metaNotifier := NewMetaNotifier(supvRespch, cfg)
	if metaNotifier == nil {
		logging.Errorf("ClustMgrAgent::NewClustMgrAgent Error In Init %v", err)
		return nil, &MsgError{
			err: Error{code: ERROR_CLUSTER_MGR_AGENT_INIT,
				severity: FATAL,
				category: CLUSTER_MGR}}

	}

	mgr.RegisterNotifier(metaNotifier)

	c.metaNotifier = metaNotifier

	//start clustMgrAgent loop which listens to commands from its supervisor
	go c.run()

	//register with Index Manager for notification of metadata updates

	return c, &MsgSuccess{}

}