Esempio n. 1
0
func (s *session) WritingLoop() {
	s.lastUnsentResponseSeq = 1
	for {
		select {
		case resp, ok := <-s.backQ:
			if !ok {
				s.Close()
				s.closeSignal.Done()
				return
			}

			flush, err := s.handleResponse(resp)
			if err != nil {
				log.Warning(s.RemoteAddr(), resp.ctx, errors.ErrorStack(err))
				s.Close() //notify reader to exit
				continue
			}

			if flush && len(s.backQ) == 0 {
				err := s.w.Flush()
				if err != nil {
					s.Close() //notify reader to exit
					log.Warning(s.RemoteAddr(), resp.ctx, errors.ErrorStack(err))
					continue
				}
			}
		}
	}
}
Esempio n. 2
0
func apiPromoteServer(server models.Server, param martini.Params) (int, string) {
	conn := CreateCoordConn()
	defer conn.Close()

	lock := utils.GetCoordLock(conn, globalEnv.ProductName())
	lock.Lock(fmt.Sprintf("promote server %+v", server))
	defer func() {
		err := lock.Unlock()
		if err != nil {
			log.Warning(err)
		}
	}()

	group, err := models.GetGroup(conn, globalEnv.ProductName(), server.GroupId)
	if err != nil {
		log.Warning(err)
		return 500, err.Error()
	}
	err = group.Promote(conn, server.Addr, globalEnv.StoreAuth())
	if err != nil {
		log.Warning(errors.ErrorStack(err))
		log.Warning(err)
		return 500, err.Error()
	}

	return jsonRetSucc()
}
Esempio n. 3
0
// create new server group
func apiAddServerGroup(newGroup models.ServerGroup) (int, string) {
	conn := CreateCoordConn()
	defer conn.Close()

	lock := utils.GetCoordLock(conn, globalEnv.ProductName())
	lock.Lock(fmt.Sprintf("add group %+v", newGroup))

	defer func() {
		err := lock.Unlock()
		if err != nil {
			log.Warning(err)
		}
	}()

	newGroup.ProductName = globalEnv.ProductName()

	exists, err := newGroup.Exists(conn)
	if err != nil {
		log.Warning(err)
		return 500, err.Error()
	}

	if exists {
		return 500, "group already exists"
	}

	err = newGroup.Create(conn)
	if err != nil {
		log.Warning(err)
		return 500, err.Error()
	}
	return jsonRetSucc()
}
Esempio n. 4
0
func apiSlotRangeSet(task RangeSetTask) (int, string) {
	conn := CreateCoordConn()
	defer conn.Close()

	lock := utils.GetCoordLock(conn, globalEnv.ProductName())
	lock.Lock(fmt.Sprintf("set slot range, %+v", task))
	defer func() {
		err := lock.Unlock()
		if err != nil {
			log.Warning(err)
		}
	}()

	// default set online
	if len(task.Status) == 0 {
		task.Status = string(models.SLOT_STATUS_ONLINE)
	}

	err := models.SetSlotRange(conn, globalEnv.ProductName(), task.FromSlot, task.ToSlot, task.NewGroupId, models.SlotStatus(task.Status))
	if err != nil {
		log.Warning(err)
		return 500, err.Error()
	}

	return jsonRetSucc()
}
Esempio n. 5
0
// experimental simple auto rebalance :)
func Rebalance(coordConn zkhelper.Conn, delay int) error {
	targetQuota, err := getQuotaMap(coordConn)
	if err != nil {
		return errors.Trace(err)
	}
	livingNodes, err := getLivingNodeInfos(coordConn)
	if err != nil {
		return errors.Trace(err)
	}
	log.Info("start rebalance")
	for _, node := range livingNodes {
		for len(node.CurSlots) > targetQuota[node.GroupId] {
			for _, dest := range livingNodes {
				if dest.GroupId != node.GroupId && len(dest.CurSlots) < targetQuota[dest.GroupId] {
					slot := node.CurSlots[len(node.CurSlots)-1]
					// create a migration task
					t := NewMigrateTask(MigrateTaskInfo{
						Delay:      delay,
						FromSlot:   slot,
						ToSlot:     slot,
						NewGroupId: dest.GroupId,
						Status:     MIGRATE_TASK_MIGRATING,
						CreateAt:   strconv.FormatInt(time.Now().Unix(), 10),
					})
					u, err := uuid.NewV4()
					if err != nil {
						return errors.Trace(err)
					}
					t.Id = u.String()

					if ok, err := preMigrateCheck(t); ok {
						// do migrate
						err := t.run()
						if err != nil {
							log.Warning(err)
							return errors.Trace(err)
						}
					} else {
						log.Warning(err)
						return errors.Trace(err)
					}
					node.CurSlots = node.CurSlots[0 : len(node.CurSlots)-1]
					dest.CurSlots = append(dest.CurSlots, slot)
				}
			}
		}
	}
	log.Info("rebalance finish")
	return nil
}
Esempio n. 6
0
func apiGetRedisSlotInfoFromGroupId(param martini.Params) (int, string) {
	groupId, err := strconv.Atoi(param["group_id"])
	if err != nil {
		log.Warning(err)
		return 500, err.Error()
	}

	slotId, err := strconv.Atoi(param["slot_id"])
	if err != nil {
		log.Warning(err)
		return 500, err.Error()
	}

	conn := CreateCoordConn()
	defer conn.Close()

	g, err := models.GetGroup(conn, globalEnv.ProductName(), groupId)
	if err != nil {
		log.Warning(err)
		return 500, err.Error()
	}

	s, err := g.Master(conn)
	if err != nil {
		log.Warning(err)
		return 500, err.Error()
	}

	if s == nil {
		return 500, "master not found"
	}

	slotInfo, err := utils.SlotsInfo(s.Addr, slotId, slotId, globalEnv.StoreAuth())
	if err != nil {
		log.Warning(err)
		return 500, err.Error()
	}

	out, _ := json.MarshalIndent(map[string]interface{}{
		"keys":     slotInfo[slotId],
		"slot_id":  slotId,
		"group_id": groupId,
		"addr":     s.Addr,
	}, " ", "  ")

	return 200, string(out)

}
Esempio n. 7
0
func (tr *taskRunner) writeloop() {
	var err error
	tick := time.Tick(2 * time.Second)
	for {
		if tr.closed && tr.tasks.Len() == 0 {
			log.Warning("exit taskrunner", tr.redisAddr)
			tr.wgClose.Done()
			tr.c.Close()
			return
		}

		if err != nil { //clean up
			err = tr.tryRecover(err)
			if err != nil {
				continue
			}
		}

		select {
		case t := <-tr.in:
			tr.processTask(t)
		case resp := <-tr.out:
			err = tr.handleResponse(resp)
		case <-tick:
			if tr.tasks.Len() > 0 && int(time.Since(tr.latest).Seconds()) > tr.netTimeout {
				tr.c.Close()
			}
		}
	}
}
Esempio n. 8
0
File: main.go Progetto: disksing/tso
func main() {
	tso := &TimestampOracle{
		ticker: time.NewTicker(10 * time.Millisecond),
	}
	current := &atomicObject{
		physical: time.Now(),
	}
	tso.ts.Store(current)
	go tso.updateTicker()
	go http.ListenAndServe(":5555", nil)

	ln, err := net.Listen("tcp", ":1234")
	if err != nil {
		log.Fatal(err)
	}

	for {
		conn, err := ln.Accept()
		if err != nil {
			log.Warning(err)
			continue
			// handle error
		}
		s := &session{
			r:    bufio.NewReaderSize(conn, 8192),
			w:    bufio.NewWriterSize(conn, 8192),
			conn: conn,
		}
		go tso.handleConnection(s)
	}
}
Esempio n. 9
0
func apiInitSlots(r *http.Request) (int, string) {
	r.ParseForm()

	isForce := false
	val := r.FormValue("is_force")
	if len(val) > 0 && (val == "1" || val == "true") {
		isForce = true
	}

	conn := CreateCoordConn()
	defer conn.Close()

	if !isForce {
		s, _ := models.Slots(conn, globalEnv.ProductName())
		if len(s) > 0 {
			return 500, "slots already initialized, you may use 'is_force' flag and try again."
		}
	}

	if err := models.InitSlotSet(conn, globalEnv.ProductName(), models.DEFAULT_SLOT_NUM); err != nil {
		log.Warning(err)
		return 500, err.Error()
	}
	return jsonRetSucc()
}
Esempio n. 10
0
func jsonRet(output map[string]interface{}) (int, string) {
	b, err := json.Marshal(output)
	if err != nil {
		log.Warning(err)
	}
	return 200, string(b)
}
Esempio n. 11
0
// actions
func apiActionGC(r *http.Request) (int, string) {
	r.ParseForm()
	keep, _ := strconv.Atoi(r.FormValue("keep"))
	secs, _ := strconv.Atoi(r.FormValue("secs"))

	conn := CreateCoordConn()
	defer conn.Close()
	lock := utils.GetCoordLock(conn, globalEnv.ProductName())
	lock.Lock(fmt.Sprintf("action gc"))
	defer func() {
		err := lock.Unlock()
		if err != nil {
			log.Warning(err)
		}
	}()

	var err error
	if keep >= 0 {
		err = models.ActionGC(conn, globalEnv.ProductName(), models.GC_TYPE_N, keep)
	} else if secs > 0 {
		err = models.ActionGC(conn, globalEnv.ProductName(), models.GC_TYPE_SEC, secs)
	}

	if err != nil {
		return 500, err.Error()
	}

	return jsonRetSucc()
}
Esempio n. 12
0
func (s *Server) Serve() error {
	log.Debug("this is ddbatman v4")
	s.running = true
	var sessionId int64 = 0
	for s.running {
		select {
		case sessionChan <- sessionId:
			//do nothing
		default:
			//warnning!
			log.Warnf("TASK_CHANNEL is full!")
		}

		conn, err := s.Accept()
		if err != nil {
			log.Warning("accept error %s", err.Error())
			continue
		}
		//allocate a sessionId for a session
		go s.onConn(conn)
		sessionId += 1
	}
	if s.restart == true {
		log.Debug("Begin to restart graceful")
		listenerFile, err := s.listener.(*net.TCPListener).File()
		if err != nil {
			log.Fatal("Fail to get socket file descriptor:", err)
		}
		listenerFd := listenerFile.Fd()

		os.Setenv("_GRACEFUL_RESTART", "true")
		execSpec := &syscall.ProcAttr{
			Env:   os.Environ(),
			Files: []uintptr{os.Stdin.Fd(), os.Stdout.Fd(), os.Stderr.Fd(), listenerFd},
		}
		fork, err := syscall.ForkExec(os.Args[0], os.Args, execSpec)
		if err != nil {
			return fmt.Errorf("failed to forkexec: %v", err)
		}

		log.Infof("start new process success, pid %d.", fork)
	}
	timeout := time.NewTimer(time.Minute)
	wait := make(chan struct{})
	go func() {
		s.wg.Wait()
		wait <- struct{}{}
	}()

	select {
	case <-timeout.C:
		log.Error("server : Waittimeout error when close the service")
		return nil
	case <-wait:
		log.Info("server : all goroutine has been done")
		return nil
	}
	return nil
}
Esempio n. 13
0
func (tr *taskRunner) tryRecover(err error) error {
	log.Warning("try recover from ", err)
	tr.cleanupOutgoingTasks(err)
	//try to recover
	c, err := newRedisConn(tr.redisAddr, tr.netTimeout, PipelineBufSize, PipelineBufSize, tr.auth)
	if err != nil {
		tr.cleanupQueueTasks() //do not block dispatcher
		log.Warning(err)
		time.Sleep(1 * time.Second)
		return errors.Trace(err)
	}

	tr.c = c
	go tr.readloop()

	return nil
}
Esempio n. 14
0
func (c *Cache) Put(x interface{}) {
	c.mu.Lock()
	if len(c.saved) < cap(c.saved) {
		c.saved = append(c.saved, x)
	} else {
		log.Warning(c.name, "is full, you may need to increase pool size")
	}
	c.mu.Unlock()
}
Esempio n. 15
0
func (s *Server) OnGroupChange(groupId int) {
	log.Warning("group changed", groupId)

	for i, slot := range s.slots {
		if slot.slotInfo.GroupId == groupId {
			s.fillSlot(i, true)
		}
	}
}
Esempio n. 16
0
func cmdAction(argv []string) (err error) {
	usage := `usage: reborn-config action (gc [-n <num> | -s <seconds>] | remove-lock | remove-fence)

options:
	gc:
	gc -n N		keep last N actions;
	gc -s Sec	keep last Sec seconds actions;

	remove-lock	force remove zookeeper lock;
`
	args, err := docopt.Parse(usage, argv, true, "", false)
	if err != nil {
		log.Error(err)
		return errors.Trace(err)
	}

	if args["remove-lock"].(bool) {
		return errors.Trace(runRemoveLock())
	}

	if args["remove-fence"].(bool) {
		return errors.Trace(runRemoveFence())
	}

	if args["gc"].(bool) {
		if args["-n"].(bool) {
			n, err := strconv.Atoi(args["<num>"].(string))
			if err != nil {
				log.Warning(err)
				return err
			}
			return runGCKeepN(n)
		} else if args["-s"].(bool) {
			sec, err := strconv.Atoi(args["<seconds>"].(string))
			if err != nil {
				log.Warning(err)
				return errors.Trace(err)
			}
			return runGCKeepNSec(sec)
		}
	}

	return nil
}
Esempio n. 17
0
func filterErr(err error, ignoreErr bool) error {
	if err == nil {
		return nil
	}
	if !ignoreErr {
		return errors.Trace(err)
	}
	log.Warning("ignore err:%v", errors.ErrorStack(err))
	return nil
}
Esempio n. 18
0
func (txn *themisTxn) commitSecondarySync() {
	for _, r := range txn.secondaryRows {
		err := txn.rpc.commitSecondaryRow(r.tbl, r.row, r.mutationList(false), txn.startTs, txn.commitTs)
		if err != nil {
			// fail of secondary commit will not stop the commits of next
			// secondaries
			log.Warning(err)
		}
	}
}
Esempio n. 19
0
func getAllProxyOps() int64 {
	conn := CreateCoordConn()
	defer conn.Close()

	proxies, err := models.ProxyList(conn, globalEnv.ProductName(), nil)
	if err != nil {
		log.Warning(err)
		return -1
	}

	var total int64
	for _, p := range proxies {
		i, err := p.Ops()
		if err != nil {
			log.Warning(err)
		}
		total += i
	}
	return total
}
Esempio n. 20
0
func CheckUlimit(min int) {
	ulimitN, err := exec.Command("/bin/sh", "-c", "ulimit -n").Output()
	if err != nil {
		log.Warning("get ulimit failed", err)
	}

	n, err := strconv.Atoi(strings.TrimSpace(string(ulimitN)))
	if err != nil || n < min {
		log.Fatalf("ulimit too small: %d, should be at least %d", n, min)
	}
}
Esempio n. 21
0
func apiGetServerGroup(param martini.Params) (int, string) {
	id := param["id"]
	groupId, err := strconv.Atoi(id)
	if err != nil {
		log.Warning(err)
		return 500, err.Error()
	}

	conn := CreateCoordConn()
	defer conn.Close()

	group, err := models.GetGroup(conn, globalEnv.ProductName(), groupId)
	if err != nil {
		log.Warning(err)
		return 500, err.Error()
	}

	b, err := json.MarshalIndent(group, " ", "  ")
	return 200, string(b)
}
Esempio n. 22
0
// for debug
func getAllProxyDebugVars() map[string]map[string]interface{} {
	conn := CreateCoordConn()
	defer conn.Close()

	proxies, err := models.ProxyList(conn, globalEnv.ProductName(), nil)
	if err != nil {
		log.Warning(err)
		return nil
	}

	ret := make(map[string]map[string]interface{})
	for _, p := range proxies {
		m, err := p.DebugVars()
		if err != nil {
			log.Warning(err)
		}
		ret[p.ID] = m
	}
	return ret
}
Esempio n. 23
0
func apiGetRedisSlotInfo(param martini.Params) (int, string) {
	addr := param["addr"]
	slotId, err := strconv.Atoi(param["id"])
	if err != nil {
		log.Warning(err)
		return 500, err.Error()
	}

	slotInfo, err := utils.SlotsInfo(addr, slotId, slotId, globalEnv.StoreAuth())
	if err != nil {
		log.Warning(err)
		return 500, err.Error()
	}

	out, _ := json.MarshalIndent(map[string]interface{}{
		"keys":    slotInfo[slotId],
		"slot_id": slotId,
	}, " ", "  ")

	return 200, string(out)
}
Esempio n. 24
0
func apiRemoveFence() (int, string) {
	conn := CreateCoordConn()
	defer conn.Close()

	err := models.ForceRemoveDeadFence(conn, globalEnv.ProductName())
	if err != nil {
		log.Warning(errors.ErrorStack(err))
		return 500, err.Error()
	}
	return jsonRetSucc()

}
Esempio n. 25
0
func apiGetServerGroupList() (int, string) {
	conn := CreateCoordConn()
	defer conn.Close()

	groups, err := models.ServerGroups(conn, globalEnv.ProductName())
	if err != nil {
		log.Warning(err)
		return 500, err.Error()
	}

	b, err := json.MarshalIndent(groups, " ", "  ")
	return 200, string(b)
}
Esempio n. 26
0
func apiGetProxyList(param martini.Params) (int, string) {
	conn := CreateCoordConn()
	defer conn.Close()

	proxies, err := models.ProxyList(conn, globalEnv.ProductName(), nil)
	if err != nil {
		log.Warning(err)
		return 500, err.Error()
	}

	b, err := json.MarshalIndent(proxies, " ", "  ")
	return 200, string(b)
}
Esempio n. 27
0
func apiGetSlots() (int, string) {
	conn := CreateCoordConn()
	defer conn.Close()

	slots, err := models.Slots(conn, globalEnv.ProductName())
	if err != nil {
		log.Warning("Error getting slot info, try init slots first? err: ", err)
		return 500, err.Error()
	}

	b, err := json.MarshalIndent(slots, " ", "  ")
	return 200, string(b)
}
Esempio n. 28
0
// add redis server to exist server group
func apiAddServerToGroup(server models.Server, param martini.Params) (int, string) {
	groupId, _ := strconv.Atoi(param["id"])

	conn := CreateCoordConn()
	defer conn.Close()

	lock := utils.GetCoordLock(conn, globalEnv.ProductName())
	lock.Lock(fmt.Sprintf("add server to group,  %+v", server))
	defer func() {
		err := lock.Unlock()
		if err != nil {
			log.Warning(err)
		}
	}()

	// check group exists first
	serverGroup := models.NewServerGroup(globalEnv.ProductName(), groupId)

	exists, err := serverGroup.Exists(conn)
	if err != nil {
		log.Warning(err)
		return 500, err.Error()
	}

	// create new group if not exists
	if !exists {
		if err := serverGroup.Create(conn); err != nil {
			return 500, err.Error()
		}
	}

	if err := serverGroup.AddServer(conn, &server, globalEnv.StoreAuth()); err != nil {
		log.Warning(errors.ErrorStack(err))
		return 500, err.Error()
	}

	return jsonRetSucc()
}
Esempio n. 29
0
func (s *Server) stopTaskRunners() {
	wg := &sync.WaitGroup{}
	log.Warning("taskrunner count", len(s.pipeConns))
	wg.Add(len(s.pipeConns))
	for _, tr := range s.pipeConns {
		tr.in <- wg
	}
	wg.Wait()

	// remove all
	for k, _ := range s.pipeConns {
		delete(s.pipeConns, k)
	}
}
Esempio n. 30
0
func (s *Server) handleTopoEvent() {
	for {
		select {
		case r := <-s.reqCh:
			if s.slots[r.slotIdx].slotInfo.State.Status == models.SLOT_STATUS_PRE_MIGRATE {
				s.bufferedReq.PushBack(r)
				continue
			}

			for e := s.bufferedReq.Front(); e != nil; {
				next := e.Next()
				if s.dispatch(e.Value.(*PipelineRequest)) {
					s.bufferedReq.Remove(e)
				}
				e = next
			}

			if !s.dispatch(r) {
				log.Fatalf("should never happend, %+v, %+v", r, s.slots[r.slotIdx].slotInfo)
			}
		case e := <-s.evtbus:
			switch e.(type) {
			case *killEvent:
				s.handleMarkOffline()
				e.(*killEvent).done <- nil
			default:
				if s.top.IsSessionExpiredEvent(e) {
					log.Fatalf("session expired: %+v", e)
				}

				evtPath := GetEventPath(e)
				log.Infof("got event %s, %v, lastActionSeq %d", s.pi.ID, e, s.lastActionSeq)
				if strings.Index(evtPath, models.GetActionResponsePath(s.conf.ProductName)) == 0 {
					seq, err := strconv.Atoi(path.Base(evtPath))
					if err != nil {
						log.Warning(err)
					} else {
						if seq < s.lastActionSeq {
							log.Infof("ignore, lastActionSeq %d, seq %d", s.lastActionSeq, seq)
							continue
						}
					}

				}

				s.processAction(e)
			}
		}
	}
}