Ejemplo n.º 1
0
//
// Get the projector client for the given node
//
func (p *ProjectorStreamClientFactoryImpl) GetClientForNode(server string) ProjectorStreamClient {

	var projAddr string

	if host, port, err := net.SplitHostPort(server); err == nil {
		if common.IsIPLocal(host) {

			if port == KV_DCP_PORT {
				projAddr = LOCALHOST + ":" + PROJECTOR_PORT

			} else {
				iportProj, _ := strconv.Atoi(PROJECTOR_PORT)
				iportKV, _ := strconv.Atoi(port)
				iportKV0, _ := strconv.Atoi(KV_DCP_PORT_CLUSTER_RUN)

				//In cluster_run, port number increments by 2
				nodeNum := (iportKV - iportKV0) / 2
				p := iportProj + nodeNum
				projAddr = LOCALHOST + ":" + strconv.Itoa(p)
			}
			logging.Debugf("StreamAdmin::GetClientForNode(): Local Projector Addr: %v", projAddr)

		} else {
			projAddr = host + ":" + PROJECTOR_PORT
			logging.Debugf("StreamAdmin::GetClientForNode(): Remote Projector Addr: %v", projAddr)
		}
	}

	//create client for node's projectors
	config := common.SystemConfig.SectionConfig("manager.projectorclient.", true)
	maxvbs := common.SystemConfig["maxVbuckets"].Int()
	ap := projectorC.NewClient(HTTP_PREFIX+projAddr+"/adminport/", maxvbs, config)
	return ap
}
Ejemplo n.º 2
0
//create client for node's projectors
func newProjClient(addr string) *projClient.Client {

	config := c.SystemConfig.SectionConfig("indexer.projectorclient.", true)
	config.SetValue("retryInterval", 0) //no retry
	maxvbs := c.SystemConfig["maxVbuckets"].Int()
	return projClient.NewClient(addr, maxvbs, config)

}
Ejemplo n.º 3
0
func main() {
	clusters := argParse()

	// setup cbauth
	up := strings.Split(options.auth, ":")
	_, err := cbauth.InternalRetryDefaultInit(clusters[0], up[0], up[1])
	if err != nil {
		log.Fatalf("Failed to initialize cbauth: %s", err)
	}

	maxvbs := c.SystemConfig["maxVbuckets"].Int()
	dconf := c.SystemConfig.SectionConfig("indexer.dataport.", true)

	// start dataport servers.
	for _, endpoint := range options.endpoints {
		go dataport.Application(
			endpoint, options.stat, options.timeout, maxvbs, dconf,
			func(addr string, msg interface{}) bool { return true })
	}
	go dataport.Application(options.coordEndpoint, 0, 0, maxvbs, dconf, nil)

	for _, cluster := range clusters {
		adminport := getProjectorAdminport(cluster, "default")
		if options.projector {
			config := c.SystemConfig.Clone()
			config.SetValue("projector.clusterAddr", cluster)
			config.SetValue("projector.adminport.listenAddr", adminport)
			econf := c.SystemConfig.SectionConfig("projector.dataport.", true)
			epfactory := NewEndpointFactory(cluster, maxvbs, econf)
			config.SetValue("projector.routerEndpointFactory", epfactory)
			projector.NewProjector(maxvbs, config) // start projector daemon
		}

		// projector-client
		cconfig := c.SystemConfig.SectionConfig("indexer.projectorclient.", true)
		projectors[cluster] = projc.NewClient(adminport, maxvbs, cconfig)
	}

	// index instances for specified buckets.
	instances := protobuf.ExampleIndexInstances(
		options.buckets, options.endpoints, options.coordEndpoint)

	// start backfill stream on each projector
	for _, client := range projectors {
		// start backfill stream on each projector
		_, err := client.InitialTopicRequest(
			"backfill" /*topic*/, "default", /*pooln*/
			"dataport" /*endpointType*/, instances)
		if err != nil {
			log.Fatal(err)
		}
	}

	time.Sleep(1000 * time.Second)
	//<-make(chan bool) // wait for ever
}