Пример #1
0
// background loader, copy chan into map, execute dump every SAVE_DELAY
func (s *server) loader_task() {
	dirty := make(map[string]bool)
	timer := time.After(SAVE_DELAY)
	timer_count := time.After(COUNT_DELAY)
	sig := make(chan os.Signal, 1)
	signal.Notify(sig, syscall.SIGTERM)

	var count uint64

	for {
		select {
		case key := <-s.wait:
			dirty[key] = true
		case <-timer:
			if len(dirty) > 0 {
				count += uint64(len(dirty))
				s.dump(dirty)
				dirty = make(map[string]bool)
			}
			timer = time.After(SAVE_DELAY)
		case <-timer_count:
			log.Info("num records saved:", count)
			timer_count = time.After(COUNT_DELAY)
		case <-sig:
			if len(dirty) > 0 {
				s.dump(dirty)
			}
			log.Info("SIGTERM")
			os.Exit(0)
		}
	}
}
Пример #2
0
// get stored service name
func (p *service_pool) load_names() {
	p.service_names = make(map[string]bool)
	client := p.client_pool.Get().(*etcd.Client)
	defer func() {
		p.client_pool.Put(client)
	}()

	// get the keys under directory
	log.Info("reading names:", DEFAULT_NAME_FILE)
	resp, err := client.Get(DEFAULT_NAME_FILE, false, false)
	if err != nil {
		log.Error(err)
		return
	}

	// validation check
	if resp.Node.Dir {
		log.Error("names is not a file")
		return
	}

	// split names
	names := strings.Split(resp.Node.Value, "\n")
	log.Info("all service names:", names)
	for _, v := range names {
		p.service_names[DEFAULT_SERVICE_PATH+"/"+strings.TrimSpace(v)] = true
	}

	p.enable_name_check = true
}
Пример #3
0
// connect to all services
func (p *service_pool) connect_all(directory string) {
	client := p.client_pool.Get().(*etcd.Client)
	defer func() {
		p.client_pool.Put(client)
	}()

	// get the keys under directory
	log.Info("connecting services under:", directory)
	resp, err := client.Get(directory, true, true)
	if err != nil {
		log.Error(err)
		return
	}

	// validation check
	if !resp.Node.Dir {
		log.Error("not a directory")
		return
	}

	for _, node := range resp.Node.Nodes {
		if node.Dir { // service directory
			for _, service := range node.Nodes {
				p.add_service(service.Key, service.Value)
			}
		} else {
			log.Warning("malformed service directory:", node.Key)
		}
	}
	log.Info("services add complete")
}
Пример #4
0
func main() {
	// to catch all uncaught panic
	defer utils.PrintPanicStack()

	// open profiling
	go func() {
		log.Info(http.ListenAndServe("0.0.0.0:6060", nil))
	}()

	// set log prefix
	log.SetPrefix(SERVICE)

	// resolve address & start listening
	tcpAddr, err := net.ResolveTCPAddr("tcp4", _port)
	checkError(err)

	listener, err := net.ListenTCP("tcp", tcpAddr)
	checkError(err)

	log.Info("listening on:", listener.Addr())

	// startup
	startup()

LOOP:
	// loop accepting
	for {
		conn, err := listener.AcceptTCP()
		if err != nil {
			log.Warning("accept failed:", err)
			continue
		}
		go handleClient(conn) // start a goroutine for every incoming connection for reading

		// check server close signal
		select {
		case <-die:
			listener.Close()
			break LOOP
		default:
		}
	}

	// server closed, wait forever
	// other options:
	// select{} 	-- may cause deadlock detected error, not tested yet
	for {
		<-time.After(time.Second)
	}
}
Пример #5
0
func (arch *Archiver) archive_task() {
	sig := make(chan os.Signal, 1)
	signal.Notify(sig, syscall.SIGTERM)
	timer := time.After(REDO_ROTATE_INTERVAL)
	db := arch.new_redolog()
	for {
		select {
		case msg := <-arch.pending:
			var record map[string]interface{}
			err := msgpack.Unmarshal(msg, &record)
			if err != nil {
				log.Error(err)
				continue
			}

			db.Update(func(tx *bolt.Tx) error {
				b := tx.Bucket([]byte(BOLTDB_BUCKET))
				err := b.Put([]byte(fmt.Sprint(record["TS"])), msg)
				return err
			})
		case <-timer:
			db.Close()
			// rotate redolog
			db = arch.new_redolog()
			timer = time.After(REDO_ROTATE_INTERVAL)
		case <-sig:
			db.Close()
			log.Info("SIGTERM")
			os.Exit(0)
		}
	}
}
Пример #6
0
//---------------------------------------------------------- 产生GridFS文件
func SaveFile(filename string, buf []byte) bool {
	ms := _global_ms.Copy()
	defer ms.Close()

	gridfs := ms.DB("").GridFS("fs")

	// 首先删除同名文件
	err := gridfs.Remove(filename)
	if err != nil {
		log.Critical("gridfs", filename, err)
		return false
	}

	// 产生新文件
	file, err := gridfs.Create(filename)
	if err != nil {
		log.Critical("gridfs", filename, err)
		return false
	}

	n, err := file.Write(buf)
	if err != nil {
		log.Critical("gridfs", filename, n, err)
		return false
	}

	err = file.Close()
	if err != nil {
		log.Critical("gridfs", filename, err)
		return false
	}
	log.Info("gridfs", filename, "saved to GridFS!!")
	return true
}
Пример #7
0
func (s *server) init() {
	// get an unique value for consumer channel of nsq
	s.machines = []string{DEFAULT_ETCD}
	if env := os.Getenv("ETCD_HOST"); env != "" {
		s.machines = strings.Split(env, ";")
	}

	s.client_pool = make(chan *etcd.Client, CLIENT_MAX)

	// init client pool
	for i := 0; i < CLIENT_MAX; i++ {
		s.client_pool <- etcd.NewClient(s.machines)
	}

	// check if user specified machine id is set
	if env := os.Getenv(ENV_MACHINE_ID); env != "" {
		if id, err := strconv.Atoi(env); err == nil {
			s.machine_id = (uint64(id) & MACHINE_ID_MASK) << 12
			log.Info("machine id specified:", id)
		} else {
			log.Critical(err)
			os.Exit(-1)
		}
	} else {
		s.init_machine_id()
	}
}
Пример #8
0
// persistence endpoints into db
func (s *server) persistence_task() {
	timer := time.After(CHECK_INTERVAL)
	db := s.open_db()
	changes := make(map[uint64]bool)
	sig := make(chan os.Signal, 1)
	signal.Notify(sig, syscall.SIGTERM, syscall.SIGINT)

	for {
		select {
		case key := <-s.pending:
			changes[key] = true
		case <-timer:
			s.dump(db, changes)
			if len(changes) > 0 {
				log.Infof("perisisted %v endpoints:", len(changes))
			}
			changes = make(map[uint64]bool)
			timer = time.After(CHECK_INTERVAL)
		case nr := <-sig:
			s.dump(db, changes)
			db.Close()
			log.Info(nr)
			os.Exit(0)
		}
	}
}
Пример #9
0
func (arch *Archiver) init() {
	arch.pending = make(chan []byte)
	arch.stop = make(chan bool)
	cfg := nsq.NewConfig()
	consumer, err := nsq.NewConsumer(TOPIC, CHANNEL, cfg)
	if err != nil {
		log.Critical(err)
		os.Exit(-1)
	}

	// message process
	consumer.AddHandler(nsq.HandlerFunc(func(msg *nsq.Message) error {
		arch.pending <- msg.Body
		return nil
	}))

	// read environtment variable
	addresses := []string{DEFAULT_NSQLOOKUPD}
	if env := os.Getenv(ENV_NSQLOOKUPD); env != "" {
		addresses = strings.Split(env, ";")
	}

	// connect to nsqlookupd
	log.Trace("connect to nsqlookupds ip:", addresses)
	if err := consumer.ConnectToNSQLookupds(addresses); err != nil {
		log.Critical(err)
		return
	}
	log.Info("nsqlookupd connected")

	go arch.archive_task()
}
Пример #10
0
func main() {
	defer utils.PrintPanicStack()
	go func() {
		log.Info(http.ListenAndServe("0.0.0.0:6060", nil))
	}()

	log.SetPrefix(SERVICE)

	// resolve
	tcpAddr, err := net.ResolveTCPAddr("tcp4", _port)
	checkError(err)

	listener, err := net.ListenTCP("tcp", tcpAddr)
	checkError(err)

	log.Info("listening on:", listener.Addr())

	// init services
	sp.Init()

	// startup
	startup()

	// loop accepting
LOOP:
	for {
		conn, err := listener.AcceptTCP()
		if err != nil {
			log.Warning("accept failed:", err)
			continue
		}
		go handleClient(conn)

		// check server close signal
		select {
		case <-die:
			listener.Close()
			break LOOP
		default:
		}
	}

	// server closed, wait forever
	for {
		<-time.After(time.Second)
	}
}
Пример #11
0
// handle unix signals
func sig_handler() {
	defer utils.PrintPanicStack()
	ch := make(chan os.Signal, 1)
	signal.Notify(ch, syscall.SIGTERM)

	for {
		msg := <-ch
		switch msg {
		case syscall.SIGTERM: // 关闭agent
			close(die)
			log.Info("sigterm received")
			log.Info("waiting for agents close, please wait...")
			wg.Wait()
			log.Info("agent shutdown.")
			os.Exit(0)
		}
	}
}
Пример #12
0
func init() {
	go func() { // padding content update procedure
		for {
			for k := range _padding {
				_padding[k] = byte(<-utils.LCG)
			}
			log.Info("Padding Updated:", _padding)
			<-time.After(PADDING_UPDATE_PERIOD * time.Second)
		}
	}()
}
Пример #13
0
func main() {
	log.SetPrefix(SERVICE)
	// 监听
	lis, err := net.Listen("tcp", _port)
	if err != nil {
		log.Critical(err)
		os.Exit(-1)
	}
	log.Info("listening on ", lis.Addr())

	// 注册服务
	s := grpc.NewServer()
	ins := &server{}
	ins.init()
	pb.RegisterRankingServiceServer(s, ins)
	// 开始服务
	s.Serve(lis)
}
Пример #14
0
func (arch *Archiver) archive_task() {
	sig := make(chan os.Signal, 1)
	signal.Notify(sig, syscall.SIGTERM)
	timer := time.After(REDO_ROTATE_INTERVAL)
	sync_ticker := time.NewTicker(SYNC_INTERVAL)
	db := arch.new_redolog()
	key := make([]byte, 8)
	for {
		select {
		case <-sync_ticker.C:
			n := len(arch.pending)
			if n == 0 {
				continue
			}

			db.Update(func(tx *bolt.Tx) error {
				b := tx.Bucket([]byte(BOLTDB_BUCKET))
				for i := 0; i < n; i++ {
					id, err := b.NextSequence()
					if err != nil {
						log.Critical(err)
						continue
					}
					binary.BigEndian.PutUint64(key, uint64(id))
					if err = b.Put(key, <-arch.pending); err != nil {
						log.Critical(err)
						continue
					}
				}
				return nil
			})
		case <-timer:
			db.Close()
			// rotate redolog
			db = arch.new_redolog()
			timer = time.After(REDO_ROTATE_INTERVAL)
		case <-sig:
			db.Close()
			log.Info("SIGTERM")
			os.Exit(0)
		}
	}
}
Пример #15
0
func (arch *Archiver) new_redolog() *bolt.DB {
	file := DATA_DIRECTORY + time.Now().Format(REDO_TIME_FORMAT)
	log.Info(file)
	db, err := bolt.Open(file, 0600, nil)
	if err != nil {
		log.Critical(err)
		os.Exit(-1)
	}
	// create bulket
	db.Update(func(tx *bolt.Tx) error {
		_, err := tx.CreateBucket([]byte(BOLTDB_BUCKET))
		if err != nil {
			log.Criticalf("create bucket: %s", err)
			return err
		}
		return nil
	})
	return db
}
Пример #16
0
func main() {
	log.SetPrefix(SERVICE)
	// 监听
	lis, err := net.Listen("tcp", _port)
	if err != nil {
		log.Critical(err)
		os.Exit(-1)
	}
	log.Info("listening on ", lis.Addr())

	// 注册服务
	s := grpc.NewServer()
	ins := new(server)
	pb.RegisterGameServiceServer(s, ins)

	// 初始化Services
	sp.Init("snowflake")
	// 开始服务
	s.Serve(lis)
}
Пример #17
0
func (s *server) init() {
	s.client_pool = make(chan etcd.KeysAPI, CONCURRENT)
	s.ch_proc = make(chan chan uint64, UUID_QUEUE)

	// init client pool
	for i := 0; i < CONCURRENT; i++ {
		s.client_pool <- etcdclient.KeysAPI()
	}

	// check if user specified machine id is set
	if env := os.Getenv(ENV_MACHINE_ID); env != "" {
		if id, err := strconv.Atoi(env); err == nil {
			s.machine_id = (uint64(id) & MACHINE_ID_MASK) << 12
			log.Info("machine id specified:", id)
		} else {
			log.Critical(err)
			os.Exit(-1)
		}
	} else {
		s.init_machine_id()
	}

	go s.uuid_task()
}
Пример #18
0
// watcher for data change in etcd directory
func (p *service_pool) watcher() {
	client := p.client_pool.Get().(*etcd.Client)
	defer func() {
		p.client_pool.Put(client)
	}()

	for {
		ch := make(chan *etcd.Response, 10)
		go func() {
			for {
				if resp, ok := <-ch; ok {
					if resp.Node.Dir {
						continue
					}
					key, value := resp.Node.Key, resp.Node.Value
					if value == "" {
						log.Tracef("node delete: %v", key)
						p.remove_service(key)
					} else {
						log.Tracef("node add: %v %v", key, value)
						p.add_service(key, value)
					}
				} else {
					return
				}
			}
		}()

		log.Info("Watching:", DEFAULT_SERVICE_PATH)
		_, err := client.Watch(DEFAULT_SERVICE_PATH, 0, true, ch, nil)
		if err != nil {
			log.Critical(err)
		}
		<-time.After(RETRY_DELAY)
	}
}