Exemple #1
0
// Decr increments stat value(s) by `vals`.
func (s Statistics) Decr(path string, vals ...int) {
	l := len(vals)
	if l == 0 {
		logging.Warnf("Decr called without value")
		return
	}

	switch vs := s[path].(type) {
	case float64:
		s[path] = vs - float64(vals[0])

	case []interface{}:
		if l != len(vs) {
			logging.Warnf("Decr expected %v values, got %v", len(vs), l)
			return
		}
		for i, v := range vs {
			vs[i] = v.(float64) - float64(vals[i])
		}

	case []float64:
		if l != len(vs) {
			logging.Warnf("Incr expected %v values, got %v", len(vs), l)
			return
		}
		for i, v := range vs {
			vs[i] = v - float64(vals[i])
		}
	}
}
Exemple #2
0
func makeTapEvent(req transport.MCRequest) *TapEvent {
	event := TapEvent{
		VBucket: req.VBucket,
	}
	switch req.Opcode {
	case transport.TAP_MUTATION:
		event.Opcode = TapMutation
		event.Key = req.Key
		event.Value = req.Body
		event.Cas = req.Cas
	case transport.TAP_DELETE:
		event.Opcode = TapDeletion
		event.Key = req.Key
		event.Cas = req.Cas
	case transport.TAP_CHECKPOINT_START:
		event.Opcode = TapCheckpointStart
	case transport.TAP_CHECKPOINT_END:
		event.Opcode = TapCheckpointEnd
	case transport.TAP_OPAQUE:
		if len(req.Extras) < 8+4 {
			return nil
		}
		switch op := int(binary.BigEndian.Uint32(req.Extras[8:])); op {
		case transport.TAP_OPAQUE_INITIAL_VBUCKET_STREAM:
			event.Opcode = TapBeginBackfill
		case transport.TAP_OPAQUE_CLOSE_BACKFILL:
			event.Opcode = TapEndBackfill
		case transport.TAP_OPAQUE_CLOSE_TAP_STREAM:
			event.Opcode = tapEndStream
		case transport.TAP_OPAQUE_ENABLE_AUTO_NACK:
			return nil
		case transport.TAP_OPAQUE_ENABLE_CHECKPOINT_SYNC:
			return nil
		default:
			logging.Warnf("TapFeed: Ignoring TAP_OPAQUE/%d", op)
			return nil // unknown opaque event
		}
	case transport.NOOP:
		return nil // ignore
	default:
		logging.Warnf("TapFeed: Ignoring %s", req.Opcode)
		return nil // unknown event
	}

	if len(req.Extras) >= tapMutationExtraLen &&
		(event.Opcode == TapMutation || event.Opcode == TapDeletion) {

		event.Flags = binary.BigEndian.Uint32(req.Extras[8:])
		event.Expiry = binary.BigEndian.Uint32(req.Extras[12:])
	}

	return &event
}
Exemple #3
0
//
// Run the server until it stop.  Will not attempt to re-run.
//
func (c *Coordinator) runOnce(config string) int {

	logging.Debugf("Coordinator.runOnce() : Start Running Coordinator")

	pauseTime := 0

	defer func() {
		if r := recover(); r != nil {
			logging.Warnf("panic in Coordinator.runOnce() : %s\n", r)
		}

		common.SafeRun("Coordinator.cleanupState()",
			func() {
				c.cleanupState()
			})
	}()

	err := c.bootstrap(config)
	if err != nil {
		pauseTime = 200
	}

	// Check if the server has been terminated explicitly. If so, don't run.
	if !c.IsDone() {

		// runElection() finishes if there is an error, election result is known or
		// it being terminated. Unless being killed explicitly, a goroutine
		// will continue to run to responds to other peer election request
		leader, err := c.runElection()
		if err != nil {
			logging.Warnf("Coordinator.runOnce() : Error Encountered During Election : %s", err.Error())
			pauseTime = 100
		} else {

			// Check if the server has been terminated explicitly. If so, don't run.
			if !c.IsDone() {
				// runCoordinator() is done if there is an error	or being terminated explicitly (killch)
				err := c.runProtocol(leader)
				if err != nil {
					logging.Warnf("Coordinator.RunOnce() : Error Encountered From Coordinator : %s", err.Error())
				}
			}
		}
	} else {
		logging.Infof("Coordinator.RunOnce(): Coordinator has been terminated explicitly. Terminate.")
	}

	return pauseTime
}
Exemple #4
0
// Goroutine that runs the feed
func (feed *TapFeed) run() {
	retryInterval := initialRetryInterval
	bucketOK := true
	for {
		// Connect to the TAP feed of each server node:
		if bucketOK {
			killSwitch, err := feed.connectToNodes()
			if err == nil {
				// Run until one of the sub-feeds fails:
				select {
				case <-killSwitch:
				case <-feed.quit:
					return
				}
				feed.closeNodeFeeds()
				retryInterval = initialRetryInterval
			}
		}

		// On error, try to refresh the bucket in case the list of nodes changed:
		logging.Warnf("dcp-client: TAP connection lost; reconnecting to bucket %q in %v",
			feed.bucket.Name, retryInterval)
		err := feed.bucket.Refresh()
		bucketOK = err == nil

		select {
		case <-time.After(retryInterval):
		case <-feed.quit:
			return
		}
		if retryInterval *= 2; retryInterval > maximumRetryInterval {
			retryInterval = maximumRetryInterval
		}
	}
}
Exemple #5
0
//
// Terminate the Coordinator
//
func (s *Coordinator) Terminate() {

	defer func() {
		if r := recover(); r != nil {
			logging.Warnf("panic in Coordinator.Terminate() : %s.  Ignored.\n", r)
		}
	}()

	s.state.mutex.Lock()
	defer s.state.mutex.Unlock()

	if s.state.done {
		return
	}

	s.state.done = true

	if s.site != nil {
		s.site.Close()
		s.site = nil
	}

	if s.configRepo != nil {
		s.configRepo.Close()
		s.configRepo = nil
	}

	if s.skillch != nil {
		s.skillch <- true // kill leader/follower server
	}
}
Exemple #6
0
func (s *Server) monitorClient(
	conn net.Conn,
	rcvch <-chan interface{},
	quitch chan<- interface{},
	finch chan bool) {

	raddr := conn.RemoteAddr()

	select {
	case req, ok := <-rcvch:
		if ok {
			if _, yes := req.(*protobuf.EndStreamRequest); yes {
				format := "%v connection %s client requested quit"
				logging.Debugf(format, s.logPrefix, raddr)
			} else {
				format := "%v connection %s unknown request %v"
				logging.Errorf(format, s.logPrefix, raddr, req)
			}
		} else {
			format := "%v connection %s client closed connection"
			logging.Warnf(format, s.logPrefix, raddr)
		}
	case <-s.killch:
	case <-finch:
		close(finch)
		return
	}
	close(quitch)

	<-finch
	close(finch)
}
Exemple #7
0
func (p *Pool) refresh() (err error) {
	p.BucketMap = make(map[string]Bucket)

loop:
	buckets := []Bucket{}
	err = p.client.parseURLResponse(p.BucketURL["uri"], &buckets)
	if err != nil {
		return err
	}
	for _, b := range buckets {
		nb := &Bucket{}
		err = p.client.parseURLResponse(p.BucketURL["terseBucketsBase"]+b.Name, nb)
		if err != nil {
			// bucket list is out of sync with cluster bucket list
			// bucket might have got deleted.
			if strings.Contains(err.Error(), "HTTP error 404") {
				logging.Warnf("cluster_info: Out of sync for bucket %s. Retrying..", b.Name)
				goto loop
			}
			return err
		}
		b.pool = p
		b.init(nb)
		p.BucketMap[b.Name] = b
	}
	return nil
}
Exemple #8
0
func (instance *serviceNotifierInstance) getNotifyCallback(t NotificationType) func(interface{}) error {
	fn := func(msg interface{}) error {
		instance.Lock()
		defer instance.Unlock()

		if !instance.valid {
			return ErrNotifierInvalid
		}

		notifMsg := Notification{
			Type: t,
			Msg:  msg,
		}

		logging.Infof("serviceChangeNotifier: received %s", notifMsg)

		for id, w := range instance.waiters {
			select {
			case w <- notifMsg:
			case <-time.After(notifyWaitTimeout):
				logging.Warnf("servicesChangeNotifier: Consumer for %v took too long to read notification, making the consumer invalid", instance.clusterUrl)
				close(w)
				delete(instance.waiters, id)
			}
		}
		return nil
	}

	return fn
}
Exemple #9
0
// Update config object with data, can be a Config, map[string]interface{},
// []byte.
func (config Config) Update(data interface{}) error {
	fmsg := "CONF[] skipping setting key %q value '%v': %v"
	switch v := data.(type) {
	case Config: // Clone
		for key, value := range v {
			config.Set(key, value)
		}

	case []byte: // parse JSON
		m := make(map[string]interface{})
		if err := json.Unmarshal(v, &m); err != nil {
			return err
		}
		config.Update(m)

	case map[string]interface{}: // transform
		for key, value := range v {
			if cv, ok := SystemConfig[key]; ok { // valid config.
				if _, ok := config[key]; !ok {
					config[key] = cv // copy by value
				}
				if err := config.SetValue(key, value); err != nil {
					logging.Warnf(fmsg, key, value, err)
				}

			} else {
				logging.Errorf("invalid config param %q", key)
			}
		}

	default:
		return nil
	}
	return nil
}
Exemple #10
0
func waitForConnections(ls net.Listener) {
	reqChannel := make(chan chanReq)

	go RunServer(reqChannel)
	handler := &reqHandler{reqChannel}

	logging.Warnf("Listening on port %d", *port)
	for {
		s, e := ls.Accept()
		if e == nil {
			logging.Warnf("Got a connection from %v", s.RemoteAddr())
			go connectionHandler(s, handler)
		} else {
			logging.Warnf("Error accepting from %s", ls)
		}
	}
}
//GetPartitionById returns the partition for the given partitionId
//or nil if partitionId is not found
func (pc *KeyPartitionContainer) GetPartitionById(id PartitionId) PartitionDefn {
	if p, ok := pc.PartitionMap[id]; ok {
		return p
	} else {
		logging.Warnf("KeyPartitionContainer: Invalid Partition Id %v", id)
		return nil
	}
}
Exemple #12
0
func (instance *serviceNotifierInstance) RunServicesObserver() {
	servicesCallback := instance.getNotifyCallback(ServiceChangeNotification)
	err := instance.client.RunObserveNodeServices(instance.pool, servicesCallback, nil)
	if err != nil {
		logging.Warnf("servicesChangeNotifier: Connection terminated for services notifier instance of %s, %s (%v)", instance.clusterUrl, instance.pool, err)
	}
	instance.cleanup()
}
Exemple #13
0
// Internal goroutine that reads from the socket and writes events to
// the channel
func (mc *Client) runFeed(ch chan TapEvent, feed *TapFeed) {
	defer close(ch)
	var headerBuf [transport.HDR_LEN]byte
loop:
	for {
		// Read the next request from the server.
		//
		//  (Can't call mc.Receive() because it reads a
		//  _response_ not a request.)
		var pkt transport.MCRequest
		n, err := pkt.Receive(mc.conn, headerBuf[:])
		if TapRecvHook != nil {
			TapRecvHook(&pkt, n, err)
		}

		if err != nil {
			if err != io.EOF {
				feed.Error = err
			}
			break loop
		}

		//logging.Warnf("** TapFeed received %#v : %q", pkt, pkt.Body)

		if pkt.Opcode == transport.TAP_CONNECT {
			// This is not an event from the server; it's
			// an error response to my connect request.
			feed.Error = fmt.Errorf("tap connection failed: %s", pkt.Body)
			break loop
		}

		event := makeTapEvent(pkt)
		if event != nil {
			if event.Opcode == tapEndStream {
				break loop
			}

			select {
			case ch <- *event:
			case <-feed.closer:
				break loop
			}
		}

		if len(pkt.Extras) >= 4 {
			reqFlags := binary.BigEndian.Uint16(pkt.Extras[2:])
			if reqFlags&transport.TAP_ACK != 0 {
				if _, err := mc.sendAck(&pkt); err != nil {
					feed.Error = err
					break loop
				}
			}
		}
	}
	if err := mc.Close(); err != nil {
		logging.Warnf("Error closing memcached client:  %v", err)
	}
}
Exemple #14
0
func handleFlush(req *transport.MCRequest, s *storage) (ret *transport.MCResponse) {
	ret = &transport.MCResponse{}
	delay := binary.BigEndian.Uint32(req.Extras)
	if delay > 0 {
		logging.Warnf("Delay not supported (got %d)", delay)
	}
	s.data = make(map[string]transport.MCItem)
	return
}
Exemple #15
0
// RunServer runs the cache server.
func RunServer(input chan chanReq) {
	var s storage
	s.data = make(map[string]transport.MCItem)
	for {
		req := <-input
		logging.Warnf("Got a request: %s", req.req)
		req.res <- dispatch(req.req, &s)
	}
}
//GetEndpointsByPartitionId returns the list of Endpoints hosting the give partitionId
//or nil if partitionId is not found
func (pc *KeyPartitionContainer) GetEndpointsByPartitionId(id PartitionId) []Endpoint {

	if p, ok := pc.PartitionMap[id]; ok {
		return p.Endpoints()
	} else {
		logging.Warnf("KeyPartitionContainer: Invalid Partition Id %v", id)
		return nil
	}
}
Exemple #17
0
//GetSliceById returns Slice for the given SliceId
func (sc *HashedSliceContainer) GetSliceById(id SliceId) Slice {

	if s, ok := sc.SliceMap[id]; ok {
		return s
	} else {
		logging.Warnf("HashedSliceContainer: Invalid Slice Id %v", id)
		return nil
	}
}
Exemple #18
0
func (p *Pipeline) runIt(o *pipelineObject) {
	p.wg.Add(1)
	go func() {
		err := o.r.Routine()
		if err != nil {
			logging.Warnf("%v exited with error %v", o.n, err)
		}
		p.wg.Done()
	}()
}
Exemple #19
0
// LogConfig will check wether a configuration parameter is
// mutable and log that information.
func (config Config) LogConfig(prefix string) {
	for key, cv := range config {
		if cv.Immutable {
			fmsg := "%v immutable settings %v cannot be update to `%v`\n"
			logging.Warnf(fmsg, prefix, key, cv.Value)
		} else {
			fmsg := "%v settings %v will updated to `%v`\n"
			logging.Infof(fmsg, prefix, key, cv.Value)
		}
	}
}
func (c *clustMgrAgent) handleGetGlobalTopology(cmd Message) {

	logging.Debugf("ClustMgr:handleGetGlobalTopology %v", cmd)

	//get the latest topology from manager
	metaIter, err := c.mgr.NewIndexDefnIterator()
	if err != nil {
		common.CrashOnError(err)
	}
	defer metaIter.Close()

	indexInstMap := make(common.IndexInstMap)

	for _, defn, err := metaIter.Next(); err == nil; _, defn, err = metaIter.Next() {

		var idxDefn common.IndexDefn
		idxDefn = *defn

		t, e := c.mgr.GetTopologyByBucket(idxDefn.Bucket)
		if e != nil {
			common.CrashOnError(e)
		}

		inst := t.GetIndexInstByDefn(idxDefn.DefnId)

		if inst == nil {
			logging.Warnf("ClustMgr:handleGetGlobalTopology Index Instance Not "+
				"Found For Index Definition %v. Ignored.", idxDefn)
			continue
		}

		//for indexer, Ready state doesn't matter. Till index build,
		//the index stays in Created state.
		var state common.IndexState
		instState := common.IndexState(inst.State)
		if instState == common.INDEX_STATE_READY {
			state = common.INDEX_STATE_CREATED
		} else {
			state = instState
		}

		idxInst := common.IndexInst{InstId: common.IndexInstId(inst.InstId),
			Defn:   idxDefn,
			State:  state,
			Stream: common.StreamId(inst.StreamId),
		}

		indexInstMap[idxInst.InstId] = idxInst

	}

	c.supvCmdch <- &MsgClustMgrTopology{indexInstMap: indexInstMap}
}
Exemple #21
0
// ResetConfig accepts a full-set or subset of global configuration
// and updates projector related fields.
func (p *Projector) ResetConfig(config c.Config) {
	p.rw.Lock()
	defer p.rw.Unlock()
	defer logging.Infof("%v\n", c.LogRuntime())

	// reset configuration.
	if cv, ok := config["projector.settings.log_level"]; ok {
		logging.SetLogLevel(logging.Level(cv.String()))
	}
	if cv, ok := config["projector.maxCpuPercent"]; ok {
		c.SetNumCPUs(cv.Int())
	}
	p.config = p.config.Override(config)

	// CPU-profiling
	cpuProfile, ok := config["projector.cpuProfile"]
	if ok && cpuProfile.Bool() && p.cpuProfFd == nil {
		cpuProfFname, ok := config["projector.cpuProfFname"]
		if ok {
			fname := cpuProfFname.String()
			logging.Infof("%v cpu profiling => %q\n", p.logPrefix, fname)
			p.cpuProfFd = p.startCPUProfile(fname)

		} else {
			logging.Errorf("Missing cpu-profile o/p filename\n")
		}

	} else if ok && !cpuProfile.Bool() {
		if p.cpuProfFd != nil {
			pprof.StopCPUProfile()
			logging.Infof("%v cpu profiling stopped\n", p.logPrefix)
		}
		p.cpuProfFd = nil

	} else if ok {
		logging.Warnf("%v cpu profiling already active !!\n", p.logPrefix)
	}

	// MEM-profiling
	memProfile, ok := config["projector.memProfile"]
	if ok && memProfile.Bool() {
		memProfFname, ok := config["projector.memProfFname"]
		if ok {
			fname := memProfFname.String()
			if p.takeMEMProfile(fname) {
				logging.Infof("%v mem profile => %q\n", p.logPrefix, fname)
			}
		} else {
			logging.Errorf("Missing mem-profile o/p filename\n")
		}
	}
}
Exemple #22
0
//
// Go-routine to bootstrap projectors for shared stream, as well as continous
// maintanence of the shared stream.  It listens to any new topology update and
// update the projector in response to topology update.
//
func (s *StreamManager) run() {

	// register to index manager for receiving topology change
	changeCh, err := s.indexMgr.StartListenTopologyUpdate("Stream Manager")
	if err != nil {
		panic(fmt.Sprintf("StreamManager.run(): Fail to listen to topology changes from repository.  Error = %v", err))
	}

	// load topology
	if err := s.loadTopology(); err != nil {
		panic(fmt.Sprintf("StreamManager.run(): Fail to load topology from repository.  Error = %v", err))
	}

	// initialize stream
	if err := s.initializeMaintenanceStream(); err != nil {
		panic(fmt.Sprintf("StreamManager.run(): Fail to initialize maintenance stream.  Error = %v", err))
	}

	for {
		select {
		case data, ok := <-changeCh:
			if !ok {
				logging.Debugf("StreamManager.run(): topology change channel is closed.  Terminates.")
				return
			}

			func() {
				defer func() {
					if r := recover(); r != nil {
						logging.Warnf("panic in StreamManager.run() : %s.  Ignored.", r)
					}
				}()

				topology, err := unmarshallIndexTopology(data.([]byte))
				if err != nil {
					logging.Errorf("StreamManager.run(): unable to unmarshall topology.  Topology change is ignored by stream manager.")
				} else {
					err := s.handleTopologyChange(topology)
					if err != nil {
						logging.Errorf("StreamManager.run(): receive error from handleTopologyChange.  Error = %v.  Ignore", err)
					}
				}
			}()

		case <-s.stopch:
			return
		}
	}
}
Exemple #23
0
func main() {
	platform.HideConsole(true)
	defer platform.HideConsole(false)
	common.SeedProcess()

	logging.Infof("Indexer started with command line: %v\n", os.Args)
	flag.Parse()

	logging.SetLogLevel(logging.Level(*logLevel))
	forestdb.Log = &logging.SystemLogger

	// setup cbauth
	if *auth != "" {
		up := strings.Split(*auth, ":")
		logging.Tracef("Initializing cbauth with user %v for cluster %v\n", up[0], *cluster)
		if _, err := cbauth.InternalRetryDefaultInit(*cluster, up[0], up[1]); err != nil {
			logging.Fatalf("Failed to initialize cbauth: %s", err)
		}
	}

	go platform.DumpOnSignal()
	go common.ExitOnStdinClose()

	config := common.SystemConfig
	config.SetValue("indexer.clusterAddr", *cluster)
	config.SetValue("indexer.numVbuckets", *numVbuckets)
	config.SetValue("indexer.enableManager", *enableManager)
	config.SetValue("indexer.adminPort", *adminPort)
	config.SetValue("indexer.scanPort", *scanPort)
	config.SetValue("indexer.httpPort", *httpPort)
	config.SetValue("indexer.streamInitPort", *streamInitPort)
	config.SetValue("indexer.streamCatchupPort", *streamCatchupPort)
	config.SetValue("indexer.streamMaintPort", *streamMaintPort)
	config.SetValue("indexer.storage_dir", *storageDir)

	storage_dir := config["indexer.storage_dir"].String()
	if err := os.MkdirAll(storage_dir, 0755); err != nil {
		common.CrashOnError(err)
	}

	_, msg := indexer.NewIndexer(config)

	if msg.GetMsgType() != indexer.MSG_SUCCESS {
		logging.Warnf("Indexer Failure to Init %v", msg)
	}

	logging.Infof("Indexer exiting normally\n")
}
Exemple #24
0
func (c *GsiClient) doScan(
	defnID uint64,
	callb func(*GsiScanClient, *common.IndexDefn) (error, bool)) (err error) {

	var qc *GsiScanClient
	var ok1, ok2, partial bool
	var queryport string
	var targetDefnID uint64

	wait := c.config["retryIntervalScanport"].Int()
	retry := c.config["retryScanPort"].Int()
	for i := 0; true; {
		if queryport, targetDefnID, ok1 = c.bridge.GetScanport(defnID, i); ok1 {
			index := c.bridge.GetIndexDefn(targetDefnID)
			if qc, ok2 = c.queryClients[queryport]; ok2 {
				begin := time.Now()
				err, partial = callb(qc, index)
				if c.isTimeit(err) {
					c.bridge.Timeit(targetDefnID, float64(time.Since(begin)))
					return err
				}
				if err != nil && partial {
					// partially succeeded scans, we don't reset-hash and we
					// don't retry
					return err
				} else { // TODO: make this error message precise
					// reset the hash so that we do a full STATS for next
					// query.
					c.setBucketHash(index.Bucket, 0)
				}
			}
		}
		if i = i + 1; i < retry {
			logging.Warnf(
				"Trying scan again for index %v (%v %v): %v ...\n",
				targetDefnID, ok1, ok2, err)
			c.updateScanClients()
			time.Sleep(time.Duration(wait) * time.Millisecond)
			continue
		}
		break
	}
	if err != nil {
		return err
	}
	return ErrorNoHost
}
//allocNode tries to get node from freelist, otherwise allocates a new node and returns
func (q *atomicMutationQueue) allocNode(vbucket Vbucket, appch StopChannel) *node {

	//get node from freelist
	n := q.popFreeList(vbucket)
	if n != nil {
		return n
	} else {
		currLen := platform.LoadInt64(&q.size[vbucket])
		if currLen < q.maxLen {
			//allocate new node and return
			return &node{}
		}
	}

	//every ALLOC_POLL_INTERVAL milliseconds, check for free nodes
	ticker := time.NewTicker(time.Millisecond * ALLOC_POLL_INTERVAL)

	var totalWait int
	for {
		select {
		case <-ticker.C:
			totalWait += ALLOC_POLL_INTERVAL
			n = q.popFreeList(vbucket)
			if n != nil {
				return n
			}
			if totalWait > 5000 {
				logging.Warnf("Indexer::MutationQueue Waiting for Node "+
					"Alloc for %v Milliseconds Vbucket %v", totalWait, vbucket)
			}

		case <-q.stopch[vbucket]:
			return nil

		case <-appch:
			//caller no longer wants to wait
			//allocate new node and return
			return &node{}

		}
	}

	return nil

}
Exemple #26
0
// return adminports for all known indexers.
func getIndexerAdminports(cinfo *common.ClusterInfoCache) ([]string, error) {
	iAdminports := make([]string, 0)
	for _, node := range cinfo.GetNodesByServiceType("indexAdmin") {
		status, err := cinfo.GetNodeStatus(node)
		if err != nil {
			return nil, err
		}
		logging.Verbosef("node %v status: %q", node, status)
		if status == "healthy" || status == "active" || status == "warmup" {
			adminport, err := cinfo.GetServiceAddress(node, "indexAdmin")
			if err != nil {
				return nil, err
			}
			iAdminports = append(iAdminports, adminport)
		} else {
			logging.Warnf("node %v status: %q", node, status)
		}
	}
	return iAdminports, nil
}
Exemple #27
0
func registerRequestHandler(mgr *IndexManager, clusterUrl string) {

	handlerContext.initializer.Do(func() {
		defer func() {
			if r := recover(); r != nil {
				logging.Warnf("error encountered when registering http createIndex handler : %v.  Ignored.\n", r)
			}
		}()

		http.HandleFunc("/createIndex", handlerContext.createIndexRequest)
		http.HandleFunc("/dropIndex", handlerContext.dropIndexRequest)
		http.HandleFunc("/getLocalIndexMetadata", handlerContext.handleLocalIndexMetadataRequest)
		http.HandleFunc("/getIndexMetadata", handlerContext.handleIndexMetadataRequest)
		http.HandleFunc("/restoreIndexMetadata", handlerContext.handleRestoreIndexMetadataRequest)
		http.HandleFunc("/getIndexStatus", handlerContext.handleIndexStatusRequest)
	})

	handlerContext.mgr = mgr
	handlerContext.clusterUrl = clusterUrl
}
Exemple #28
0
// send mutations for a set of vbuckets, update vbucket channels based on
// StreamBegin and StreamEnd.
func (c *Client) sendKeyVersions(
	vbs []*common.VbKeyVersions,
	vbChans map[string]chan interface{},
	quitch chan []string) []string {

	var idx int

	for _, vb := range vbs {
		if len(vb.Kvs) == 0 {
			logging.Warnf("%v empty mutations\n", c.logPrefix)
			continue
		}

		fin, l := false, len(vb.Kvs)

		if vb.Kvs[0].Commands[0] == common.StreamBegin { // first mutation
			vbChans[vb.Uuid], idx = c.addVbucket(vb.Uuid)
			logging.Tracef(
				"%v mapped vbucket {%v,%v}\n",
				c.logPrefixes[idx], vb.Bucket, vb.Vbucket)
		}

		if vb.Kvs[l-1].Commands[0] == common.StreamEnd { // last mutation
			fin = true
		}

		select {
		case vbChans[vb.Uuid] <- vb:
			if fin {
				logging.Tracef(
					"%v {%v,%v} ended\n", c.logPrefix, vb.Bucket, vb.Vbucket)
				c.delVbucket(vb.Uuid)
				delete(vbChans, vb.Uuid)
			}

		case msg := <-quitch:
			return msg
		}
	}
	return nil
}
Exemple #29
0
func (feed *DcpFeed) doDcpCloseStream(vbno, opaqueMSB uint16) error {
	prefix := feed.logPrefix
	stream, ok := feed.vbstreams[vbno]
	if !ok || stream == nil {
		fmsg := "%v ##%x stream for vb %d is not active"
		logging.Warnf(fmsg, prefix, opaqueMSB, vbno)
		return nil // TODO: should we return error here ?
	}
	stream.CloseOpaque = opaqueMSB
	rq := &transport.MCRequest{
		Opcode:  transport.DCP_CLOSESTREAM,
		VBucket: vbno,
		Opaque:  composeOpaque(vbno, opaqueMSB),
	}
	if err := feed.conn.Transmit(rq); err != nil {
		fmsg := "%v ##%x (##%x) doDcpCloseStream.Transmit(): %v"
		logging.Errorf(fmsg, prefix, opaqueMSB, stream.AppOpaque, err)
		return err
	}
	return nil
}
Exemple #30
0
func (feed *DcpFeed) handleStreamRequest(
	res *transport.MCResponse, vb uint16, stream *DcpStream, event *DcpEvent) {

	prefix := feed.logPrefix
	switch {
	case res.Status == transport.ROLLBACK && len(res.Body) != 8:
		event.Status, event.Seqno = res.Status, 0
		fmsg := "%v ##%x STREAMREQ(%v) invalid rollback: %v\n"
		logging.Errorf(fmsg, prefix, stream.AppOpaque, vb, res.Body)
		delete(feed.vbstreams, vb)

	case res.Status == transport.ROLLBACK:
		rollback := binary.BigEndian.Uint64(res.Body)
		event.Status, event.Seqno = res.Status, rollback
		fmsg := "%v ##%x STREAMREQ(%v) with rollback %d\n"
		logging.Warnf(fmsg, prefix, stream.AppOpaque, vb, rollback)
		delete(feed.vbstreams, vb)

	case res.Status == transport.SUCCESS:
		event.Status, event.Seqno = res.Status, stream.StartSeq
		flog, err := parseFailoverLog(res.Body[:])
		if err != nil {
			fmsg := "%v ##%x STREAMREQ(%v) parseFailoverLog: %v\n"
			logging.Errorf(fmsg, prefix, stream.AppOpaque, vb, err)
		}
		event.FailoverLog = flog
		stream.connected = true
		fmsg := "%v ##%x STREAMREQ(%d) successful\n"
		logging.Debugf(fmsg, prefix, stream.AppOpaque, vb)

	default:
		event.Status = res.Status
		event.VBucket = vb
		fmsg := "%v ##%x STREAMREQ(%v) unexpected status: %v\n"
		logging.Errorf(fmsg, prefix, stream.AppOpaque, vb, res.Status)
		delete(feed.vbstreams, vb)
	}
	return
}