Esempio n. 1
0
// Walk the data dir and register pindexes for a Manager instance.
func (mgr *Manager) LoadDataDir() error {
	log.Printf("manager: loading dataDir...")

	dirEntries, err := ioutil.ReadDir(mgr.dataDir)
	if err != nil {
		return fmt.Errorf("manager: could not read dataDir: %s, err: %v",
			mgr.dataDir, err)
	}

	for _, dirInfo := range dirEntries {
		path := mgr.dataDir + string(os.PathSeparator) + dirInfo.Name()
		_, ok := mgr.ParsePIndexPath(path)
		if !ok {
			continue // Skip the entry that doesn't match the naming pattern.
		}

		log.Printf("manager: opening pindex: %s", path)
		pindex, err := OpenPIndex(mgr, path)
		if err != nil {
			log.Printf("manager: could not open pindex: %s, err: %v",
				path, err)
			continue
		}

		mgr.registerPIndex(pindex)
	}

	log.Printf("manager: loading dataDir... done")
	return nil
}
Esempio n. 2
0
// MainUUID is a helper function for cmd-line tool developers, that
// reuses a previous "baseName.uuid" file from the dataDir if it
// exists, or generates a brand new UUID (and persists it).
func MainUUID(baseName, dataDir string) (string, error) {
	uuid := cbgt.NewUUID()
	uuidPath := dataDir + string(os.PathSeparator) + baseName + ".uuid"
	uuidBuf, err := ioutil.ReadFile(uuidPath)
	if err == nil {
		uuid = strings.TrimSpace(string(uuidBuf))
		if uuid == "" {
			return "", fmt.Errorf("error: could not parse uuidPath: %s",
				uuidPath)
		}
		log.Printf("main: manager uuid: %s", uuid)
		log.Printf("main: manager uuid was reloaded")
	} else {
		log.Printf("main: manager uuid: %s", uuid)
		log.Printf("main: manager uuid was generated")
	}
	err = ioutil.WriteFile(uuidPath, []byte(uuid), 0600)
	if err != nil {
		return "", fmt.Errorf("error: could not write uuidPath: %s\n"+
			"  Please check that your -data/-dataDir parameter (%q)\n"+
			"  is to a writable directory where %s can store\n"+
			"  index data.",
			uuidPath, dataDir, baseName)
	}
	return uuid, nil
}
Esempio n. 3
0
func (t *TAPFeed) Start() error {
	if t.disable {
		log.Printf("feed_tap: disable, name: %s", t.Name())
		return nil
	}

	log.Printf("feed_tap: start, name: %s", t.Name())

	backoffFactor := t.params.BackoffFactor
	if backoffFactor <= 0.0 {
		backoffFactor = FEED_BACKOFF_FACTOR
	}
	sleepInitMS := t.params.SleepInitMS
	if sleepInitMS <= 0 {
		sleepInitMS = FEED_SLEEP_INIT_MS
	}
	sleepMaxMS := t.params.SleepMaxMS
	if sleepMaxMS <= 0 {
		sleepMaxMS = FEED_SLEEP_MAX_MS
	}

	go ExponentialBackoffLoop(t.Name(),
		func() int {
			progress, err := t.feed()
			if err != nil {
				log.Printf("feed_tap: name: %s, progress: %d, err: %v",
					t.Name(), progress, err)
			}
			return progress
		},
		sleepInitMS, backoffFactor, sleepMaxMS)

	return nil
}
Esempio n. 4
0
func (m *CtlMgr) GetCurrentTopology(haveTopologyRev service.Revision,
	cancelCh service.Cancel) (*service.Topology, error) {
	ctlTopology, err :=
		m.ctl.WaitGetTopology(string(haveTopologyRev), cancelCh)
	if err != nil {
		if err != service.ErrCanceled {
			log.Printf("ctl/manager, GetCurrenTopology, haveTopologyRev: %s,"+
				" err: %v", haveTopologyRev, err)
		}

		return nil, err
	}

	rv := &service.Topology{
		Rev:   service.Revision([]byte(ctlTopology.Rev)),
		Nodes: []service.NodeID{},
	}

	for _, ctlNode := range ctlTopology.MemberNodes {
		rv.Nodes = append(rv.Nodes, service.NodeID(ctlNode.UUID))
	}

	// TODO: Need a proper IsBalanced computation.
	rv.IsBalanced =
		len(ctlTopology.PrevWarnings) <= 0 && len(ctlTopology.PrevErrs) <= 0

	for resourceName, resourceWarnings := range ctlTopology.PrevWarnings {
		aggregate := map[string]bool{}
		for _, resourceWarning := range resourceWarnings {
			if strings.HasPrefix(resourceWarning, "could not meet constraints") {
				aggregate["could not meet replication constraints"] = true
			} else {
				aggregate[resourceWarning] = true
			}
		}

		for resourceWarning := range aggregate {
			rv.Messages = append(rv.Messages,
				fmt.Sprintf("warning: resource: %q -- %s",
					resourceName, resourceWarning))
		}
	}

	for _, err := range ctlTopology.PrevErrs {
		rv.Messages = append(rv.Messages, fmt.Sprintf("error: %v", err))
	}

	m.mu.Lock()
	m.lastTopology.Rev = rv.Rev
	same := reflect.DeepEqual(&m.lastTopology, rv)
	m.lastTopology = *rv
	m.mu.Unlock()

	if !same {
		log.Printf("ctl/manager, GetCurrenTopology, haveTopologyRev: %s,"+
			" changed, rv: %+v", haveTopologyRev, rv)
	}

	return rv, nil
}
Esempio n. 5
0
// To start a cpu profiling...
//    curl -X POST http://127.0.0.1:9090/api/runtime/profile/cpu -d secs=5
// To analyze a profiling...
//    go tool pprof [program-binary] run-cpu.pprof
func RESTProfileCPU(w http.ResponseWriter, r *http.Request) {
	secs, err := strconv.Atoi(r.FormValue("secs"))
	if err != nil || secs <= 0 {
		http.Error(w, "incorrect or missing secs parameter", 400)
		return
	}
	fname := "./run-cpu.pprof"
	os.Remove(fname)
	f, err := os.Create(fname)
	if err != nil {
		http.Error(w, fmt.Sprintf("profileCPU:"+
			" couldn't create file: %s, err: %v",
			fname, err), 500)
		return
	}
	log.Printf("profileCPU: start, file: %s", fname)
	err = pprof.StartCPUProfile(f)
	if err != nil {
		http.Error(w, fmt.Sprintf("profileCPU:"+
			" couldn't start CPU profile, file: %s, err: %v",
			fname, err), 500)
		return
	}
	go func() {
		time.Sleep(time.Duration(secs) * time.Second)
		pprof.StopCPUProfile()
		f.Close()
		log.Printf("profileCPU: end, file: %s", fname)
	}()
	w.WriteHeader(204)
}
Esempio n. 6
0
// Split logical indexes into PIndexes and assign PIndexes to nodes.
func CalcPlan(mode string, indexDefs *IndexDefs, nodeDefs *NodeDefs,
	planPIndexesPrev *PlanPIndexes, version, server string) (
	*PlanPIndexes, error) {
	// This simple planner assigns at most MaxPartitionsPerPIndex
	// number of partitions onto a PIndex.  And then uses blance to
	// assign the PIndex to 1 or more nodes (based on NumReplicas).
	if indexDefs == nil || nodeDefs == nil {
		return nil, nil
	}

	nodeUUIDsAll, nodeUUIDsToAdd, nodeUUIDsToRemove,
		nodeWeights, nodeHierarchy :=
		CalcNodesLayout(indexDefs, nodeDefs, planPIndexesPrev)

	planPIndexes := NewPlanPIndexes(version)

	// Examine every indexDef...
	for _, indexDef := range indexDefs.IndexDefs {
		// If the plan is frozen, CasePlanFrozen clones the previous
		// plan for this index.
		if CasePlanFrozen(indexDef, planPIndexesPrev, planPIndexes) {
			continue
		}

		// Skip indexDef's with no instantiatable pindexImplType, such
		// as index aliases.
		pindexImplType, exists := PIndexImplTypes[indexDef.Type]
		if !exists ||
			pindexImplType == nil ||
			pindexImplType.New == nil ||
			pindexImplType.Open == nil {
			continue
		}

		// Split each indexDef into 1 or more PlanPIndexes.
		planPIndexesForIndex, err :=
			SplitIndexDefIntoPlanPIndexes(indexDef, server, planPIndexes)
		if err != nil {
			log.Printf("planner: could not SplitIndexDefIntoPlanPIndexes,"+
				" indexDef.Name: %s, server: %s, err: %v",
				indexDef.Name, server, err)
			continue // Keep planning the other IndexDefs.
		}

		// Once we have a 1 or more PlanPIndexes for an IndexDef, use
		// blance to assign the PlanPIndexes to nodes.
		warnings := BlancePlanPIndexes(mode, indexDef,
			planPIndexesForIndex, planPIndexesPrev,
			nodeUUIDsAll, nodeUUIDsToAdd, nodeUUIDsToRemove,
			nodeWeights, nodeHierarchy)
		planPIndexes.Warnings[indexDef.Name] = warnings

		for _, warning := range warnings {
			log.Printf("planner: indexDef.Name: %s,"+
				" PlanNextMap warning: %s", indexDef.Name, warning)
		}
	}

	return planPIndexes, nil
}
Esempio n. 7
0
func (meh *MainHandlers) OnFeedError(srcType string, r cbgt.Feed, err error) {
	log.Printf("main: meh.OnFeedError, srcType: %s, err: %v", srcType, err)

	if _, ok := err.(*couchbase.BucketNotFoundError); !ok ||
		srcType != "couchbase" || r == nil {
		return
	}

	dcpFeed, ok := r.(*cbgt.DCPFeed)
	if !ok {
		return
	}

	gone, err := dcpFeed.VerifyBucketNotExists()
	log.Printf("main: meh.OnFeedError, VerifyBucketNotExists,"+
		" srcType: %s, gone: %t, err: %v", srcType, gone, err)
	if !gone {
		return
	}

	bucketName, bucketUUID := dcpFeed.GetBucketDetails()
	if bucketName == "" {
		return
	}

	log.Printf("main: meh.OnFeedError, DeleteAllIndexFromSource,"+
		" srcType: %s, bucketName: %s, bucketUUID: %s",
		srcType, bucketName, bucketUUID)

	meh.mgr.DeleteAllIndexFromSource(srcType, bucketName, bucketUUID)
}
Esempio n. 8
0
func main() {
	flag.Parse()

	if flags.Help {
		flag.Usage()
		os.Exit(2)
	}

	if flags.Version {
		fmt.Printf("%s main: %s, data: %s\n",
			path.Base(os.Args[0]), cbgt.VERSION, cbgt.VERSION)
		os.Exit(0)
	}

	cmd.MainCommon(cbgt.VERSION, flagAliases)

	cfg, err := cmd.MainCfgClient(path.Base(os.Args[0]), flags.CfgConnect)
	if err != nil {
		log.Fatalf("%v", err)
		return
	}

	if flags.IndexTypes != "" {
		cmd.RegisterIndexTypes(strings.Split(flags.IndexTypes, ","))
	}

	nodesToRemove := []string(nil)
	if len(flags.RemoveNodes) > 0 {
		nodesToRemove = strings.Split(flags.RemoveNodes, ",")
	}

	var steps map[string]bool
	if flags.Steps != "" {
		steps = cbgt.StringsToMap(strings.Split(flags.Steps, ","))
	}

	// ------------------------------------------------

	if steps == nil || steps["rebalance"] {
		log.Printf("main: step rebalance")

		err := runRebalance(cfg, flags.Server, nodesToRemove,
			flags.FavorMinNodes, flags.DryRun, flags.Verbose)
		if err != nil {
			log.Fatalf("%v", err)
			return
		}
	}

	// ------------------------------------------------

	err = cmd.PlannerSteps(steps, cfg, cbgt.VERSION,
		flags.Server, nodesToRemove, flags.DryRun)
	if err != nil {
		log.Fatalf("%v", err)
		return
	}

	log.Printf("main: done")
}
Esempio n. 9
0
func LogFlags(flagAliases map[string][]string) {
	flag.VisitAll(func(f *flag.Flag) {
		if flagAliases[f.Name] != nil {
			log.Printf("  -%s=%q\n", f.Name, f.Value)
		}
	})
	log.Printf("  GOMAXPROCS=%d", runtime.GOMAXPROCS(-1))
}
Esempio n. 10
0
func (m *CtlMgr) StartTopologyChange(change service.TopologyChange) error {
	log.Printf("ctl/manager, StartTopologyChange, change: %v", change)

	m.mu.Lock()
	defer m.mu.Unlock()

	// Possible for caller to not care about current topology, but
	// just wants to impose or force a topology change.
	if len(change.CurrentTopologyRev) > 0 &&
		string(change.CurrentTopologyRev) != m.ctl.GetTopology().Rev {
		log.Printf("ctl/manager, StartTopologyChange, rev check, err: %v",
			service.ErrConflict)

		return service.ErrConflict
	}

	var err error

	started := false

	var taskHandlesNext []*taskHandle

	for _, th := range m.tasks.taskHandles {
		if th.task.Type == service.TaskTypeRebalance {
			log.Printf("ctl/manager, StartTopologyChange,"+
				" task rebalance check, err: %v",
				service.ErrConflict)

			return service.ErrConflict
		}

		if th.task.Type == service.TaskTypePrepared {
			th, err = m.startTopologyChangeTaskHandleLOCKED(change)
			if err != nil {
				log.Printf("ctl/manager, StartTopologyChange,"+
					" prepared, err: %v", err)

				return err
			}

			started = true
		}

		taskHandlesNext = append(taskHandlesNext, th)
	}

	if !started {
		return service.ErrNotFound
	}

	m.updateTasksLOCKED(func(s *tasks) {
		s.taskHandles = taskHandlesNext
	})

	log.Printf("ctl/manager, StartTopologyChange, started")

	return nil
}
Esempio n. 11
0
// JanitorLoop is the main loop for the janitor.
func (mgr *Manager) JanitorLoop() {
	if mgr.cfg != nil { // Might be nil for testing.
		go func() {
			ec := make(chan CfgEvent)
			mgr.cfg.Subscribe(PLAN_PINDEXES_KEY, ec)
			mgr.cfg.Subscribe(CfgNodeDefsKey(NODE_DEFS_WANTED), ec)
			for {
				select {
				case <-mgr.stopCh:
					return
				case e := <-ec:
					atomic.AddUint64(&mgr.stats.TotJanitorSubscriptionEvent, 1)
					mgr.JanitorKick("cfg changed, key: " + e.Key)
				}
			}
		}()
	}

	for {
		select {
		case <-mgr.stopCh:
			return
		case m := <-mgr.janitorCh:
			log.Printf("janitor: awakes, reason: %s", m.msg)

			var err error
			if m.op == WORK_KICK {
				atomic.AddUint64(&mgr.stats.TotJanitorKickStart, 1)
				err = mgr.JanitorOnce(m.msg)
				if err != nil {
					// Keep looping as perhaps it's a transient issue.
					// TODO: Perhaps need a rescheduled janitor kick.
					log.Printf("janitor: JanitorOnce, err: %v", err)
					atomic.AddUint64(&mgr.stats.TotJanitorKickErr, 1)
				} else {
					atomic.AddUint64(&mgr.stats.TotJanitorKickOk, 1)
				}
			} else if m.op == WORK_NOOP {
				atomic.AddUint64(&mgr.stats.TotJanitorNOOPOk, 1)
			} else if m.op == JANITOR_CLOSE_PINDEX {
				mgr.stopPIndex(m.obj.(*PIndex), false)
				atomic.AddUint64(&mgr.stats.TotJanitorClosePIndex, 1)
			} else if m.op == JANITOR_REMOVE_PINDEX {
				mgr.stopPIndex(m.obj.(*PIndex), true)
				atomic.AddUint64(&mgr.stats.TotJanitorRemovePIndex, 1)
			} else {
				err = fmt.Errorf("janitor: unknown op: %s, m: %#v", m.op, m)
				atomic.AddUint64(&mgr.stats.TotJanitorUnknownErr, 1)
			}
			if m.resCh != nil {
				if err != nil {
					m.resCh <- err
				}
				close(m.resCh)
			}
		}
	}
}
Esempio n. 12
0
func (t *DCPFeed) Start() error {
	if t.disable {
		log.Printf("feed_dcp: disable, name: %s", t.Name())
		return nil
	}

	log.Printf("feed_dcp: start, name: %s", t.Name())
	return t.bds.Start()
}
Esempio n. 13
0
func DumpOnSignal(signals ...os.Signal) {
	c := make(chan os.Signal, 1)
	signal.Notify(c, signals...)
	for _ = range c {
		log.Printf("dump: goroutine...")
		pprof.Lookup("goroutine").WriteTo(os.Stderr, 1)
		log.Printf("dump: heap...")
		pprof.Lookup("heap").WriteTo(os.Stderr, 1)
	}
}
Esempio n. 14
0
func (mgr *Manager) startPIndex(planPIndex *PlanPIndex) error {
	var pindex *PIndex
	var err error

	path := mgr.PIndexPath(planPIndex.Name)

	// First, try reading the path with OpenPIndex().  An
	// existing path might happen during a case of rollback.
	_, err = os.Stat(path)
	if err == nil {
		pindex, err = OpenPIndex(mgr, path)
		if err != nil {
			log.Printf("janitor: startPIndex, OpenPIndex error,"+
				" cleaning up and trying NewPIndex,"+
				" path: %s, err: %v", path, err)
			os.RemoveAll(path)
		} else {
			if !PIndexMatchesPlan(pindex, planPIndex) {
				log.Printf("janitor: startPIndex, pindex does not match plan,"+
					" cleaning up and trying NewPIndex, path: %s, err: %v",
					path, err)
				pindex.Close(true)
				pindex = nil
			}
		}
	}

	if pindex == nil {
		pindex, err = NewPIndex(mgr, planPIndex.Name, NewUUID(),
			planPIndex.IndexType,
			planPIndex.IndexName,
			planPIndex.IndexUUID,
			planPIndex.IndexParams,
			planPIndex.SourceType,
			planPIndex.SourceName,
			planPIndex.SourceUUID,
			planPIndex.SourceParams,
			planPIndex.SourcePartitions,
			path)
		if err != nil {
			return fmt.Errorf("janitor: NewPIndex, name: %s, err: %v",
				planPIndex.Name, err)
		}
	}

	err = mgr.registerPIndex(pindex)
	if err != nil {
		pindex.Close(true)
		return err
	}

	return nil
}
Esempio n. 15
0
func MainWelcome(flagAliases map[string][]string) {
	cmd.LogFlags(flagAliases)

	log.Printf("main: registered bleve stores")
	types, instances := bleveRegistry.KVStoreTypesAndInstances()
	for _, s := range types {
		log.Printf("  %s", s)
	}
	for _, s := range instances {
		log.Printf("  %s", s)
	}
}
Esempio n. 16
0
// InitStaticRouterEx is like InitStaticRouter, but with optional
// manager parameter for more options.
func InitStaticRouterEx(r *mux.Router, staticDir, staticETag string,
	pages []string, pagesHandler http.Handler,
	mgr *cbgt.Manager) *mux.Router {
	prefix := ""
	if mgr != nil {
		prefix = mgr.Options()["urlPrefix"]
	}

	PIndexTypesInitRouter(r, "static.before", mgr)

	var s http.FileSystem
	if staticDir != "" {
		if _, err := os.Stat(staticDir); err == nil {
			log.Printf("http: serving assets from staticDir: %s", staticDir)
			s = http.Dir(staticDir)
		}
	}
	if s == nil {
		log.Printf("http: serving assets from embedded data")
		s = AssetFS()
	}

	r.PathPrefix(prefix + "/static/").Handler(
		http.StripPrefix(prefix+"/static/",
			ETagFileHandler{http.FileServer(s), staticETag}))

	// Bootstrap UI insists on loading templates from this path.
	r.PathPrefix(prefix + "/template/").Handler(
		http.StripPrefix(prefix+"/template/",
			ETagFileHandler{http.FileServer(s), staticETag}))

	// If client ask for any of the pages, redirect.
	for _, p := range pages {
		if pagesHandler != nil {
			r.PathPrefix(p).Handler(pagesHandler)
		} else {
			r.PathPrefix(p).Handler(RewriteURL("/", http.FileServer(s)))
		}
	}

	r.Handle(prefix+"/index.html",
		http.RedirectHandler(prefix+"/static/index.html", 302))
	r.Handle(prefix+"/",
		http.RedirectHandler(prefix+"/static/index.html", 302))

	PIndexTypesInitRouter(r, "static.after", mgr)

	return r
}
Esempio n. 17
0
func (m *CtlMgr) GetTaskList(haveTasksRev service.Revision,
	cancelCh service.Cancel) (*service.TaskList, error) {
	m.mu.Lock()

	if len(haveTasksRev) > 0 {
		haveTasksRevNum, err := DecodeRev(haveTasksRev)
		if err != nil {
			m.mu.Unlock()

			log.Printf("ctl/manager, GetTaskList, DecodeRev, haveTasksRev: %s,"+
				" err: %v", haveTasksRev, err)

			return nil, err
		}

		for haveTasksRevNum == m.tasks.revNum {
			if m.tasksWaitCh == nil {
				m.tasksWaitCh = make(chan struct{})
			}
			tasksWaitCh := m.tasksWaitCh

			m.mu.Unlock()
			select {
			case <-cancelCh:
				return nil, service.ErrCanceled

			case <-tasksWaitCh:
				// FALLTHRU
			}
			m.mu.Lock()
		}
	}

	rv := m.getTaskListLOCKED()

	m.lastTaskList.Rev = rv.Rev
	same := reflect.DeepEqual(&m.lastTaskList, rv)
	m.lastTaskList = *rv

	m.mu.Unlock()

	if !same {
		log.Printf("ctl/manager, GetTaskList, haveTasksRev: %s,"+
			" changed, rv: %+v", haveTasksRev, rv)
	}

	return rv, nil
}
Esempio n. 18
0
func (service *AuditSvc) writeOnClient(client *mcc.Client, eventId uint32,
	event interface{}) error {
	req, err := composeAuditRequest(eventId, event)
	if err != nil {
		return err
	}
	conn := client.Hijack()
	conn.(*net.TCPConn).SetWriteDeadline(time.Now().Add(WriteTimeout))

	if err := client.Transmit(req); err != nil {
		return err
	}

	conn.(*net.TCPConn).SetReadDeadline(time.Now().Add(ReadTimeout))
	res, err := client.Receive()
	log.Printf("audit: response=%v, opcode=%v, opaque=%v, status=%v, err=%v\n",
		res, res.Opcode, res.Opaque, res.Status, err)

	if err != nil {
		return err
	} else if res.Opcode != AuditPutCommandCode {
		return errors.New(fmt.Sprintf("unexpected #opcode %v", res.Opcode))
	} else if req.Opaque != res.Opaque {
		return errors.New(fmt.Sprintf("opaque mismatch, %v over %v",
			req.Opaque, res.Opaque))
	} else if res.Status != mc.SUCCESS {
		return errors.New(fmt.Sprintf("unsuccessful status = %v", res.Status))
	}

	return nil
}
Esempio n. 19
0
func (c *CfgMetaKv) metaKVCallback(path string,
	value []byte, rev interface{}) error {
	c.m.Lock()
	defer c.m.Unlock()

	key := c.pathToKey(path)

	log.Printf("cfg_metakv: metaKVCallback, path: %v, key: %v", path, key)

	for splitKey := range cfgMetaKvSplitKeys {
		if strings.HasPrefix(key, splitKey) {
			return c.cfgMem.Refresh()
		}
	}

	if value == nil { // Deletion.
		return c.delUnlocked(key, 0)
	}

	cas, err := c.cfgMem.Set(key, value, CFG_CAS_FORCE)
	if err == nil {
		c.cfgMem.SetRev(key, cas, rev)
	}

	return err
}
Esempio n. 20
0
func (t *BleveDest) Rollback(partition string, rollbackSeq uint64) error {
	t.AddError("dest rollback", partition, nil, rollbackSeq, nil, nil)

	// NOTE: A rollback of any partition means a rollback of all the
	// partitions in the bindex, so lock the entire BleveDest.
	t.m.Lock()
	defer t.m.Unlock()

	wasClosed, wasPartial, err := t.partialRollbackLOCKED(partition, rollbackSeq)

	log.Printf("pindex_bleve_rollback: path: %s,"+
		" wasClosed: %t, wasPartial: %t, err: %v",
		t.path, wasClosed, wasPartial, err)

	if !wasClosed {
		t.closeLOCKED()
	}

	if !wasPartial {
		os.RemoveAll(t.path) // Full rollback to zero.
	}

	// Whether partial or full rollback, restart the BleveDest so that
	// feeds are restarted.
	t.restart()

	return nil
}
Esempio n. 21
0
func (r *CfgCB) OnError(err error) {
	log.Printf("cfg_cb: OnError, err: %v", err)

	go func() {
		r.FireEvent("", 0, err)
	}()
}
Esempio n. 22
0
func (t *BleveDest) AddError(op, partition string,
	key []byte, seq uint64, val []byte, err error) {
	log.Printf("bleve: %s, partition: %s, key: %q, seq: %d,"+
		" val: %q, err: %v", op, partition, key, seq, val, err)

	e := struct {
		Time      string
		Op        string
		Partition string
		Key       string
		Seq       uint64
		Val       string
		Err       string
	}{
		Time:      time.Now().Format(time.RFC3339Nano),
		Op:        op,
		Partition: partition,
		Key:       string(key),
		Seq:       seq,
		Val:       string(val),
		Err:       fmt.Sprintf("%v", err),
	}

	buf, err := json.Marshal(&e)
	if err == nil {
		t.m.Lock()
		for t.stats.Errors.Len() >= cbgt.PINDEX_STORE_MAX_ERRORS {
			t.stats.Errors.Remove(t.stats.Errors.Front())
		}
		t.stats.Errors.PushBack(string(buf))
		t.m.Unlock()
	}
}
Esempio n. 23
0
// NewCfgMetaKv returns a CfgMetaKv that reads and stores its single
// configuration file in the metakv.
func NewCfgMetaKv(nodeUUID string) (*CfgMetaKv, error) {
	cfg := &CfgMetaKv{
		prefix:       CfgMetaKvPrefix,
		nodeUUID:     nodeUUID,
		cfgMem:       NewCfgMem(),
		cancelCh:     make(chan struct{}),
		splitEntries: map[string]CfgMetaKvEntry{},
	}

	backoffStartSleepMS := 200
	backoffFactor := float32(1.5)
	backoffMaxSleepMS := 5000

	go ExponentialBackoffLoop("cfg_metakv.RunObserveChildren",
		func() int {
			err := metakv.RunObserveChildren(cfg.prefix, cfg.metaKVCallback,
				cfg.cancelCh)
			if err == nil {
				return -1 // Success, so stop the loop.
			}

			log.Printf("cfg_metakv: RunObserveChildren, err: %v", err)

			return 0 // No progress, so exponential backoff.
		},
		backoffStartSleepMS, backoffFactor, backoffMaxSleepMS)

	return cfg, nil
}
Esempio n. 24
0
// ParseOptions parses an options string and environment variable.
// The optionKVs string is a comma-separated string formated as
// "key=val,key=val,...".
func ParseOptions(optionKVs string, envName string,
	options map[string]string) map[string]string {
	if options == nil {
		options = map[string]string{}
	}

	if optionKVs != "" {
		for _, kv := range strings.Split(optionKVs, ",") {
			a := strings.Split(kv, "=")
			if len(a) >= 2 {
				options[a[0]] = a[1]
			}
		}
	}

	optionsFile, exists := options["optionsFile"]
	if exists {
		log.Printf("main_flags: loading optionsFile: %q", optionsFile)

		b, err := ioutil.ReadFile(optionsFile)
		if err != nil {
			log.Printf("main_flags: reading options file: %s, err: %v",
				optionsFile, err)
		} else {
			err = json.Unmarshal(b, &options)
			if err != nil {
				log.Printf("main_flags: JSON parse option file: %s, err: %v",
					optionsFile, err)
			}
		}
	}

	optionsEnv := os.Getenv(envName)
	if optionsEnv != "" {
		log.Printf(envName)
		for _, kv := range strings.Split(optionsEnv, ",") {
			a := strings.Split(kv, "=")
			if len(a) >= 2 {
				log.Printf("  %s", a[0])
				options[a[0]] = a[1]
			}
		}
	}

	return options
}
Esempio n. 25
0
func MainWelcome(flagAliases map[string][]string) {
	flag.VisitAll(func(f *flag.Flag) {
		if flagAliases[f.Name] != nil {
			log.Printf("  -%s=%q\n", f.Name, f.Value)
		}
	})
	log.Printf("  GOMAXPROCS=%d", runtime.GOMAXPROCS(-1))

	log.Printf("main: registered bleve stores")
	types, instances := bleveRegistry.KVStoreTypesAndInstances()
	for _, s := range types {
		log.Printf("  %s", s)
	}
	for _, s := range instances {
		log.Printf("  %s", s)
	}
}
Esempio n. 26
0
// PlannerOnce is the main body of a PlannerLoop.
func (mgr *Manager) PlannerOnce(reason string) (bool, error) {
	log.Printf("planner: awakes, reason: %s", reason)

	if mgr.cfg == nil { // Can occur during testing.
		return false, fmt.Errorf("planner: skipped due to nil cfg")
	}

	return Plan(mgr.cfg, mgr.version, mgr.uuid, mgr.server)
}
Esempio n. 27
0
func (m *CtlMgr) handleTaskProgress(taskProgress taskProgress) {
	m.mu.Lock()
	defer m.mu.Unlock()

	updated := false

	var taskHandlesNext []*taskHandle

	for _, th := range m.tasks.taskHandles {
		if th.task.ID == taskProgress.taskId {
			if taskProgress.progressExists || len(taskProgress.errs) > 0 {
				revNum := m.allocRevNumLOCKED(0)

				taskNext := *th.task // Copy.
				taskNext.Rev = EncodeRev(revNum)
				taskNext.Progress = taskProgress.progress

				log.Printf("ctl/manager, revNum: %d, progress: %f",
					revNum, taskProgress.progress)

				// TODO: DetailedProgress.

				taskNext.ErrorMessage = ""
				for _, err := range taskProgress.errs {
					if len(taskNext.ErrorMessage) > 0 {
						taskNext.ErrorMessage = taskNext.ErrorMessage + "\n"
					}
					taskNext.ErrorMessage = taskNext.ErrorMessage + err.Error()
				}

				if !taskProgress.progressExists || len(taskProgress.errs) > 0 {
					taskNext.Status = service.TaskStatusFailed
				}

				taskHandlesNext = append(taskHandlesNext, &taskHandle{
					startTime: th.startTime,
					task:      &taskNext,
					stop:      th.stop,
				})
			}

			updated = true
		} else {
			taskHandlesNext = append(taskHandlesNext, th)
		}
	}

	if !updated {
		return
	}

	m.updateTasksLOCKED(func(s *tasks) {
		s.taskHandles = taskHandlesNext
	})
}
Esempio n. 28
0
func (service *AuditSvc) init() (err error) {
	if !service.initialized {
		client, err := couchbase.ConnectWithAuthCreds(service.uri,
			service.u, service.p)
		if err != nil {
			log.Printf("audit :error in connecting to url %s err %v",
				service.uri, err)
			return err
		}
		pool, err := client.GetPool("default")
		if err != nil {
			log.Printf("audit :error in connecting to default pool %v", err)
			return err
		}
		for _, p := range pool.Nodes {
			if p.ThisNode {
				port, ok := p.Ports["direct"]
				if !ok {
					return fmt.Errorf("Error in getting memcached port")
				}
				h := strings.Split(p.Hostname, ":")
				if len(h) <= 1 {
					return fmt.Errorf("Invalid host string")
				}
				service.kvaddr = h[0] + ":" + strconv.Itoa(port)
				break
			}
		}
		if service.kvaddr == "" {
			return fmt.Errorf("Error in getting port")
		}
		for i := 0; i < PoolClients; i++ {
			c, err := GetNewConnection(service.kvaddr)
			if err != nil {
				return fmt.Errorf("audit: Error %v", err)
			}
			service.client <- c
		}
		service.initialized = true
	}
	return err
}
Esempio n. 29
0
func (mh *MossHerder) OnClose(c moss.Collection) {
	mh.m.Lock()

	if mh.waiting > 0 {
		log.Printf("moss_herder: close progess, waiting: %d", mh.waiting)
	}

	delete(mh.collections, c)

	mh.m.Unlock()
}
Esempio n. 30
0
func (c *CfgMetaKv) Set(key string, val []byte, cas uint64) (
	uint64, error) {
	log.Printf("cfg_metakv: Set, key: %v, cas: %x, split: %t, nodeUUID: %s",
		key, cas, cfgMetaKvAdvancedKeys[key] != nil, c.nodeUUID)

	c.m.Lock()
	casResult, err := c.setLOCKED(key, val, cas)
	c.m.Unlock()

	return casResult, err
}