示例#1
0
文件: util.go 项目: prataprc/indexing
func ValidateBucket(cluster, bucket string, uuids []string) bool {

	var cinfo *common.ClusterInfoCache
	url, err := common.ClusterAuthUrl(cluster)
	if err == nil {
		cinfo, err = common.NewClusterInfoCache(url, DEFAULT_POOL)
	}
	if err != nil {
		logging.Fatalf("Indexer::Fail to init ClusterInfoCache : %v", err)
		common.CrashOnError(err)
	}

	cinfo.Lock()
	defer cinfo.Unlock()

	if err := cinfo.Fetch(); err != nil {
		logging.Errorf("Indexer::Fail to init ClusterInfoCache : %v", err)
		common.CrashOnError(err)
	}

	if nids, err := cinfo.GetNodesByBucket(bucket); err == nil && len(nids) != 0 {
		// verify UUID
		currentUUID := cinfo.GetBucketUUID(bucket)
		for _, uuid := range uuids {
			if uuid != currentUUID {
				return false
			}
		}
		return true
	} else {
		logging.Fatalf("Indexer::Error Fetching Bucket Info: %v Nids: %v", err, nids)
		return false
	}

}
示例#2
0
func addPartnInfoToProtoInst(cfg c.Config, cinfo *c.ClusterInfoCache,
	indexInst c.IndexInst, streamId c.StreamId, protoInst *protobuf.IndexInst) {

	switch partn := indexInst.Pc.(type) {
	case *c.KeyPartitionContainer:

		//Right now the fill the SinglePartition as that is the only
		//partition structure supported
		partnDefn := partn.GetAllPartitions()

		//TODO move this to indexer init. These addresses cannot change.
		//Better to get these once and store.
		cinfo.Lock()
		defer cinfo.Unlock()

		err := cinfo.Fetch()
		c.CrashOnError(err)

		nid := cinfo.GetCurrentNode()
		streamMaintAddr, err := cinfo.GetServiceAddress(nid, "indexStreamMaint")
		c.CrashOnError(err)
		streamInitAddr, err := cinfo.GetServiceAddress(nid, "indexStreamInit")
		c.CrashOnError(err)
		streamCatchupAddr, err := cinfo.GetServiceAddress(nid, "indexStreamCatchup")
		c.CrashOnError(err)

		var endpoints []string
		for _, p := range partnDefn {
			for _, e := range p.Endpoints() {
				//Set the right endpoint based on streamId
				switch streamId {
				case c.MAINT_STREAM:
					e = c.Endpoint(streamMaintAddr)
				case c.CATCHUP_STREAM:
					e = c.Endpoint(streamCatchupAddr)
				case c.INIT_STREAM:
					e = c.Endpoint(streamInitAddr)
				}
				endpoints = append(endpoints, string(e))
			}
		}
		protoInst.SinglePartn = &protobuf.SinglePartition{
			Endpoints: endpoints,
		}
	}
}
示例#3
0
func (m *requestHandlerContext) getIndexMetadata(cinfo *common.ClusterInfoCache,
	indexerHostMap map[common.IndexerId]string) (*ClusterIndexMetadata, error) {

	if err := cinfo.Fetch(); err != nil {
		return nil, err
	}

	// find all nodes that has a index http service
	nids := cinfo.GetNodesByServiceType(common.INDEX_HTTP_SERVICE)

	clusterMeta := &ClusterIndexMetadata{Metadata: make([]LocalIndexMetadata, len(nids))}

	for i, nid := range nids {

		addr, err := cinfo.GetServiceAddress(nid, common.INDEX_HTTP_SERVICE)
		if err == nil {

			resp, err := getWithAuth(addr + "/getLocalIndexMetadata")
			if err != nil {
				return nil, errors.New(fmt.Sprintf("Fail to retrieve index definition from url %s", addr))
			}

			localMeta := new(LocalIndexMetadata)
			status := convertResponse(resp, localMeta)
			if status == RESP_ERROR {
				return nil, errors.New(fmt.Sprintf("Fail to retrieve local metadata from url %s.", addr))
			}

			indexerHostMap[common.IndexerId(localMeta.IndexerId)] = addr
			clusterMeta.Metadata[i] = *localMeta

		} else {
			return nil, errors.New(fmt.Sprintf("Fail to retrieve http endpoint for index node"))
		}
	}

	return clusterMeta, nil
}
示例#4
0
func (m *requestHandlerContext) getIndexStatus(cinfo *common.ClusterInfoCache, bucket string) ([]IndexStatus, []string, error) {

	if err := cinfo.Fetch(); err != nil {
		return nil, nil, err
	}

	// find all nodes that has a index http service
	nids := cinfo.GetNodesByServiceType(common.INDEX_HTTP_SERVICE)

	list := make([]IndexStatus, 0)
	failedNodes := make([]string, 0)

	for _, nid := range nids {

		addr, err := cinfo.GetServiceAddress(nid, common.INDEX_HTTP_SERVICE)
		if err == nil {

			resp, err := getWithAuth(addr + "/getLocalIndexMetadata")
			if err != nil {
				failedNodes = append(failedNodes, addr)
				continue
			}

			localMeta := new(LocalIndexMetadata)
			status := convertResponse(resp, localMeta)
			if status == RESP_ERROR {
				failedNodes = append(failedNodes, addr)
				continue
			}

			curl, err := cinfo.GetServiceAddress(nid, "mgmt")
			if err != nil {
				failedNodes = append(failedNodes, addr)
				continue
			}

			resp, err = getWithAuth(addr + "/stats?async=true")
			if err != nil {
				failedNodes = append(failedNodes, addr)
				continue
			}

			stats := new(common.Statistics)
			status = convertResponse(resp, stats)
			if status == RESP_ERROR {
				failedNodes = append(failedNodes, addr)
				continue
			}

			for _, defn := range localMeta.IndexDefinitions {

				if len(bucket) != 0 && bucket != defn.Bucket {
					continue
				}

				if topology := m.findTopologyByBucket(localMeta.IndexTopologies, defn.Bucket); topology != nil {
					state, errStr := topology.GetStatusByDefn(defn.DefnId)

					if state != common.INDEX_STATE_DELETED && state != common.INDEX_STATE_NIL {

						stateStr := "Not Available"
						switch state {
						case common.INDEX_STATE_CREATED:
							stateStr = "Created"
						case common.INDEX_STATE_READY:
							stateStr = "Created"
						case common.INDEX_STATE_INITIAL:
							stateStr = "Building"
						case common.INDEX_STATE_CATCHUP:
							stateStr = "Building"
						case common.INDEX_STATE_ACTIVE:
							stateStr = "Ready"
						}

						if len(errStr) != 0 {
							stateStr = "Error"
						}

						completion := int(0)
						key := fmt.Sprintf("%v:%v:build_progress", defn.Bucket, defn.Name)
						if progress, ok := stats.ToMap()[key]; ok {
							completion = int(progress.(float64))
						}

						status := IndexStatus{
							DefnId:     defn.DefnId,
							Name:       defn.Name,
							Bucket:     defn.Bucket,
							IsPrimary:  defn.IsPrimary,
							SecExprs:   defn.SecExprs,
							WhereExpr:  defn.WhereExpr,
							Status:     stateStr,
							Error:      errStr,
							Hosts:      []string{curl},
							Definition: common.IndexStatement(defn),
							Completion: completion,
						}

						list = append(list, status)
					}
				}
			}
		} else {
			failedNodes = append(failedNodes, addr)
			continue
		}
	}

	return list, failedNodes, nil
}
func NewClustMgrAgent(supvCmdch MsgChannel, supvRespch MsgChannel, cfg common.Config) (
	ClustMgrAgent, Message) {

	//Init the clustMgrAgent struct
	c := &clustMgrAgent{
		supvCmdch:  supvCmdch,
		supvRespch: supvRespch,
		config:     cfg,
	}

	var cinfo *common.ClusterInfoCache
	url, err := common.ClusterAuthUrl(cfg["clusterAddr"].String())
	if err == nil {
		cinfo, err = common.NewClusterInfoCache(url, DEFAULT_POOL)
	}
	if err != nil {
		logging.Errorf("ClustMgrAgent::Fail to init ClusterInfoCache : %v", err)
		return nil, &MsgError{
			err: Error{code: ERROR_CLUSTER_MGR_AGENT_INIT,
				severity: FATAL,
				category: CLUSTER_MGR,
				cause:    err}}
	}

	cinfo.Lock()
	defer cinfo.Unlock()

	if err := cinfo.Fetch(); err != nil {
		logging.Errorf("ClustMgrAgent::Fail to init ClusterInfoCache : %v", err)
		return nil, &MsgError{
			err: Error{code: ERROR_CLUSTER_MGR_AGENT_INIT,
				severity: FATAL,
				category: CLUSTER_MGR,
				cause:    err}}
	}

	mgr, err := manager.NewIndexManager(cinfo, cfg)
	if err != nil {
		logging.Errorf("ClustMgrAgent::NewClustMgrAgent Error In Init %v", err)
		return nil, &MsgError{
			err: Error{code: ERROR_CLUSTER_MGR_AGENT_INIT,
				severity: FATAL,
				category: CLUSTER_MGR,
				cause:    err}}

	}

	c.mgr = mgr

	metaNotifier := NewMetaNotifier(supvRespch, cfg)
	if metaNotifier == nil {
		logging.Errorf("ClustMgrAgent::NewClustMgrAgent Error In Init %v", err)
		return nil, &MsgError{
			err: Error{code: ERROR_CLUSTER_MGR_AGENT_INIT,
				severity: FATAL,
				category: CLUSTER_MGR}}

	}

	mgr.RegisterNotifier(metaNotifier)

	c.metaNotifier = metaNotifier

	//start clustMgrAgent loop which listens to commands from its supervisor
	go c.run()

	//register with Index Manager for notification of metadata updates

	return c, &MsgSuccess{}

}