Ejemplo n.º 1
0
// In the case where this server is running and another one in the cluster stops responding, at some point this server will have to just write
// requests to disk. When the downed server comes back up, it's this server's responsibility to send out any writes that were queued up. If
// the yield function returns nil then the request is committed.
func (self *WAL) RecoverServerFromRequestNumber(requestNumber uint32, shardIds []uint32, yield func(request *protocol.Request, shardId uint32) error) error {
	var firstLogFile int

	state := self.logFiles[len(self.logFiles)-1].state
outer:
	for _, logFile := range self.logFiles[firstLogFile:] {
		logger.Info("Replaying from %s", logFile.file.Name())
		count := 0
		ch, stopChan := logFile.replayFromRequestNumber(shardIds, requestNumber, state)
		for {
			x := <-ch
			if x == nil {
				logger.Info("%s yielded %d requests", logFile.file.Name(), count)
				continue outer
			}

			if x.err != nil {
				return x.err
			}

			if err := yield(x.request, x.shardId); err != nil {
				stopChan <- struct{}{}
				return err
			}
			count++
		}
		close(stopChan)
	}
	return nil
}
Ejemplo n.º 2
0
func postnetwork(container *global.Container, ip string) {
	gulpPort, _ := config.GetInt("docker:gulp_port")

	url := "http://" + container.SwarmNode + ":" + strconv.Itoa(gulpPort) + "/docker/networks"
	log.Info("URL:> %s", url)

	bridge, _ := config.GetString("docker:bridge")
	gateway, _ := config.GetString("docker:gateway")

	data := &global.DockerNetworksInfo{Bridge: bridge, ContainerId: container.ContainerID, IpAddr: ip, Gateway: gateway}
	res2B, _ := json.Marshal(data)
	req, err := http.NewRequest("POST", url, bytes.NewBuffer(res2B))
	req.Header.Set("X-Custom-Header", "myvalue")
	req.Header.Set("Content-Type", "application/json")

	client := &http.Client{}
	resp, err := client.Do(req)
	if err != nil {
		log.Error("gulpd client was failed : %s", err)
	}
	defer resp.Body.Close()

	log.Info("response Status : %s", resp.Status)
	log.Info("response Headers : %s", resp.Header)
	body, _ := ioutil.ReadAll(resp.Body)
	log.Info("response Body : %s", string(body))
}
Ejemplo n.º 3
0
func (dm *DataMigrator) migrateDir(name string) {
	migrateMarkerFile := filepath.Join(dm.shardDir(name), MIGRATED_MARKER)
	if _, err := os.Stat(migrateMarkerFile); err == nil {
		log.Info("Already migrated %s. Skipping", name)
		return
	}
	log.Info("Migrating %s", name)
	shard, err := dm.getShard(name)
	if err != nil {
		log.Error("Migration error getting shard: %s", err.Error())
		return
	}
	defer shard.Close()
	databases := dm.clusterConfig.GetDatabases()
	for _, database := range databases {
		err := dm.migrateDatabaseInShard(database.Name, shard)
		if err != nil {
			log.Error("Error migrating database %s: %s", database.Name, err.Error())
			return
		}
	}
	err = ioutil.WriteFile(migrateMarkerFile, []byte("done.\n"), 0644)
	if err != nil {
		log.Error("Problem writing migration marker for shard %s: %s", name, err.Error())
	}
}
Ejemplo n.º 4
0
func (self *Server) Checker() {
	log.Info("verifying rabbitmq")
	factor, err := amqp.Factory()
	if err != nil {
		log.Error("Error: %v\nFailed to get the queue", err)
	}

	_, connerr := factor.Dial()
	if connerr != nil {
		fmt.Fprintf(os.Stderr, "Error: %v\n Please start rabbitmq service.\n", connerr)
		os.Exit(1)
	}
	log.Info("rabbitmq connected [ok]")

	log.Info("verifying riak")

	rconn, rerr := db.Conn("connection")
	if rerr != nil {
		fmt.Fprintf(os.Stderr, "Error: %v\n Please start Riak service.\n", rerr)
		os.Exit(1)
	}

	data := "sampledata"
	ferr := rconn.StoreObject("sampleobject", data)
	if ferr != nil {
		fmt.Fprintf(os.Stderr, "Error: %v\n Please start Riak service.\n", ferr)
		os.Exit(1)
	}
	defer rconn.Close()
	log.Info("riak connected [ok]")

}
Ejemplo n.º 5
0
func (self *ClusterConfiguration) AddPotentialServer(server *ClusterServer) {
	self.serversLock.Lock()
	defer self.serversLock.Unlock()
	server.State = Potential
	self.servers = append(self.servers, server)
	server.Id = uint32(len(self.servers))
	log.Info("Added server to cluster config: %d, %s, %s", server.Id, server.RaftConnectionString, server.ProtobufConnectionString)
	log.Info("Checking whether this is the local server new: %s, local: %s\n", self.config.ProtobufConnectionString(), server.ProtobufConnectionString)
	if server.RaftName != self.LocalRaftName {
		log.Info("Connecting to ProtobufServer: %s", server.ProtobufConnectionString, self.config.ProtobufConnectionString())
		if server.connection == nil {
			server.connection = self.connectionCreator(server.ProtobufConnectionString)
			server.Connect()
		}
		writeBuffer := NewWriteBuffer(fmt.Sprintf("%d", server.GetId()), server, self.wal, server.Id, self.config.PerServerWriteBufferSize)
		self.writeBuffers = append(self.writeBuffers, writeBuffer)
		server.SetWriteBuffer(writeBuffer)
		server.StartHeartbeat()
	} else if !self.addedLocalServer {
		log.Info("Added the local server")
		self.LocalServerId = server.Id
		self.addedLocalServerWait <- true
		self.addedLocalServer = true
	}
}
Ejemplo n.º 6
0
func cmdStoreRun(args []string) error {
	if *cpu < 1 {
		*cpu = runtime.NumCPU()
	}
	runtime.GOMAXPROCS(*cpu)
	log4go.Info("configuration path: %s", *storeConfPath)
	log4go.Info("volume path: %s", *volumeDir)
	if _, err := os.Stat(*storeConfPath); err != nil {
		if os.IsNotExist(err) {
			if err = os.MkdirAll(*storeConfPath, os.ModePerm); err != nil {
				return err
			}
			if err = ioutil.WriteFile(filepath.Join(*storeConfPath, "rabbitfs.conf.json"), []byte(defaultConfig), os.ModePerm); err != nil {
				return err
			}
		} else {
			return err
		}
	}
	httpAddr := fmt.Sprintf("%s:%d", *storeIP, *storePort)
	store, err := server.NewStoreServer(
		*storeConfPath,
		*volumeDir,
		float32(*garbageThreshold),
		httpAddr,
		time.Duration((*storeTimeout))*time.Millisecond,
	)
	if err != nil {
		return err
	}
	store.ListenAndServe()
	return nil
}
Ejemplo n.º 7
0
// In the case where this server is running and another one in the
// cluster stops responding, at some point this server will have to
// just write requests to disk. When the downed server comes back up,
// it's this server's responsibility to send out any writes that were
// queued up. If the yield function returns nil then the request is
// committed.
func (self *WAL) RecoverServerFromRequestNumber(requestNumber uint32, shardIds []uint32, yield func(request *protocol.Request, shardId uint32) error) error {
	// don't replay if we don't have any log files yet
	if len(self.logFiles) == 0 {
		return nil
	}

	firstIndex := 0
	firstOffset := int64(-1)
	// find the log file from which replay will start if the request
	// number is in range, otherwise replay from all log files
	if !self.isInRange(requestNumber) {
		return nil
	}

	for idx, logIndex := range self.logIndex {
		logger.Debug("Trying to find request %d in %s", requestNumber, self.logFiles[idx].file.Name())
		if firstOffset = logIndex.requestOffset(requestNumber); firstOffset != -1 {
			logger.Debug("Found reqeust %d in %s at offset %d", requestNumber, self.logFiles[idx].file.Name(), firstOffset)
			firstIndex = idx
			break
		}
	}

	// the request must be at the end of the current log file
	if firstOffset == -1 {
		firstIndex = len(self.logIndex) - 1
		firstOffset = self.logIndex[firstIndex].requestOrLastOffset(requestNumber)
	}
outer:
	for idx := firstIndex; idx < len(self.logFiles); idx++ {
		logFile := self.logFiles[idx]
		if idx > firstIndex {
			firstOffset = -1
		}
		logger.Info("Replaying from %s:%d", logFile.file.Name(), firstOffset)
		count := 0
		ch, stopChan := logFile.dupAndReplayFromOffset(shardIds, firstOffset, requestNumber)
		for {
			x := <-ch
			if x == nil {
				logger.Info("%s yielded %d requests", logFile.file.Name(), count)
				continue outer
			}

			if x.err != nil {
				return x.err
			}

			logger.Debug("Yielding request %d", x.request.GetRequestNumber())
			if err := yield(x.request, x.shardId); err != nil {
				logger.Debug("Stopping replay due to error: %s", err)
				stopChan <- struct{}{}
				return err
			}
			count++
		}
		close(stopChan)
	}
	return nil
}
Ejemplo n.º 8
0
Archivo: sync.go Proyecto: hushi55/Grep
// psync command
func psync() {

	runid, offset := initRedisRepilcationInfo()

	log.Info("psync cmd starting ...")
	cmd := NewStringCmd("PSYNC", runid, offset)

	addr := fmt.Sprintf("%s:%s", Conf.RedisMasterIP, Conf.RedisMasterPort)
	cn, err := net.DialTimeout("tcp", addr, time.Minute*30)

	if err != nil {
		log.Error("connect master error : %s", err)
	} else {
		log.Info("connect redis master : %s", addr)
	}

	time.Sleep(time.Second * 5)

	// init runid offset
	conn := &conn{
		netcn:  cn,
		buf:    make([]byte, 1024*1024*32),
		runid:  runid,
		offset: offset,
	}
	conn.rd = bufio.NewReader(conn)

	conn.WriteTimeout = time.Minute * 30
	conn.ReadTimeout = time.Minute * 30

	redisAuth(conn)

	sync(cmd, conn)

}
Ejemplo n.º 9
0
Archivo: sync.go Proyecto: hushi55/Grep
func delta(val interface{}) {

	if val == nil {
		log.Warn("redis master delta command is nil")
		return
	}

	log.Info("redis master delta command is %s", val)

	switch vv := val.(type) {
	case []interface{}:

		if vstring, ok := vv[0].(string); ok {
			if strings.ToLower(vstring) == "ping" {
				replAck <- true
				return
			} else if strings.ToLower(vstring) == "publish" && len(vv) > 1 { //"PUBLISH","__sentinel__
				if sentinel, ok := vv[1].(string); ok {
					if strings.HasPrefix(sentinel, "__sentinel__") {
						log.Info("publish sentinel info")
						return
					}
				}
			}
		}
	}

	newEvent(val)
}
Ejemplo n.º 10
0
Archivo: sync.go Proyecto: hushi55/Grep
// sync command
func fullsync() {

	log.Info("full sync cmd starting ...")
	cmd := NewStringCmd("SYNC")
	//	cmd := NewStringCmd("PSYNC", "?", -1)

	addr := fmt.Sprintf("%s:%s", Conf.RedisMasterIP, Conf.RedisMasterPort)
	cn, err := net.DialTimeout("tcp", addr, time.Minute*30)

	if err != nil {
		log.Error("connect master error : %s", err)
	} else {
		log.Info("connect redis master : %s", addr)
	}

	time.Sleep(time.Second * 5)

	conn := &conn{
		netcn: cn,
		buf:   make([]byte, 1024*1024*32),
	}
	conn.rd = bufio.NewReader(conn)

	conn.WriteTimeout = time.Minute * 30
	conn.ReadTimeout = time.Minute * 30

	redisAuth(conn)

	sync(cmd, conn)
}
Ejemplo n.º 11
0
func (self *Server) ListenAndServe() error {
	go self.ProtobufServer.ListenAndServe()

	err := self.RaftServer.ListenAndServe()
	if err != nil {
		return err
	}

	log.Info("Waiting for local server to be added")
	self.ClusterConfig.WaitForLocalServerLoaded()
	self.writeLog.SetServerId(self.ClusterConfig.ServerId())

	log.Info("Recovering from log...")
	err = self.ClusterConfig.RecoverFromWAL()
	if err != nil {
		return err
	}
	log.Info("recovered")

	err = self.Coordinator.(*coordinator.CoordinatorImpl).ConnectToProtobufServers(self.Config.ProtobufConnectionString())
	if err != nil {
		return err
	}
	log.Info("Starting admin interface on port %d", self.Config.AdminHttpPort)
	go self.AdminServer.ListenAndServe()
	log.Info("Starting Http Api server on port %d", self.Config.ApiHttpPort)
	self.HttpApi.ListenAndServe()
	return nil
}
Ejemplo n.º 12
0
func main() {
	l4g.Info("Process ID: %d", os.Getpid())

	usage := `
Usage:
  napoleon run <path_location>
  napoleon -h | --help
  napoleon -v | --version

Options:
  -h --help     Show this screen.
  -v --version  Show version.`

	options, _ := docopt.Parse(usage, nil, true, "1.0.2", false)
	l4g.Debug(options)

	c := make(<-chan int)

	if options["run"].(bool) {
		c = consumer(producer(options))
	}
	<-c

	time.Sleep(time.Millisecond)
	l4g.Info("napoleon stopped")
}
Ejemplo n.º 13
0
func (s *RaftServer) startRaft() error {
	log.Info("Initializing Raft Server: %s", s.config.RaftConnectionString())

	// Initialize and start Raft server.
	transporter := raft.NewHTTPTransporter("/raft")
	var err error
	s.raftServer, err = raft.NewServer(s.name, s.path, transporter, s.clusterConfig, s.clusterConfig, "")
	if err != nil {
		return err
	}

	s.raftServer.SetElectionTimeout(s.config.RaftTimeout.Duration)
	s.raftServer.LoadSnapshot() // ignore errors

	s.raftServer.AddEventListener(raft.StateChangeEventType, s.raftEventHandler)

	transporter.Install(s.raftServer, s)
	s.raftServer.Start()

	go s.CompactLog()

	if !s.raftServer.IsLogEmpty() {
		log.Info("Recovered from log")
		return nil
	}

	potentialLeaders := s.config.SeedServers

	if len(potentialLeaders) == 0 {
		log.Info("Starting as new Raft leader...")
		name := s.raftServer.Name()
		_, err := s.raftServer.Do(&InfluxJoinCommand{
			Name:                     name,
			ConnectionString:         s.config.RaftConnectionString(),
			ProtobufConnectionString: s.config.ProtobufConnectionString(),
		})

		if err != nil {
			log.Error(err)
		}
		err = s.CreateRootUser()
		return err
	}

	for {
		for _, leader := range potentialLeaders {
			log.Info("(raft:%s) Attempting to join leader: %s", s.raftServer.Name(), leader)

			if err := s.Join(leader); err == nil {
				log.Info("Joined: %s", leader)
				return nil
			}
		}

		log.Warn("Couldn't join any of the seeds, sleeping and retrying...")
		time.Sleep(100 * time.Millisecond)
	}

	return nil
}
Ejemplo n.º 14
0
func main() {
	var err error
	// Parse cmd-line arguments
	flag.Parse()
	log.Info("web ver: \"%s\" start", ver.Version)
	if err = InitConfig(); err != nil {
		panic(err)
	}
	// Set max routine
	runtime.GOMAXPROCS(Conf.MaxProc)
	// init log
	log.LoadConfiguration(Conf.Log)
	defer log.Close()
	// init zookeeper
	zkConn, err := InitZK()
	if err != nil {
		if zkConn != nil {
			zkConn.Close()
		}
		panic(err)
	}
	// start pprof http
	perf.Init(Conf.PprofBind)
	// start http listen.
	StartHTTP()
	// process init
	if err = process.Init(Conf.User, Conf.Dir, Conf.PidFile); err != nil {
		panic(err)
	}
	// init signals, block wait signals
	signalCH := InitSignal()
	HandleSignal(signalCH)
	log.Info("web stop")
}
Ejemplo n.º 15
0
/*
* work - 处理单个任务
*
* PARAMS:
*   - job : 一个待处理任务
*
 */
func (s *Spider) work(job Job) {
	defer s.wg.Done()
	// 检查是否访问过
	if s.visitedUrl[job.url] {
		l4g.Info("visted job,continue. url:%s, depth:%d", job.url, job.depth)
		return
	}
	// 判断是否超出最大爬取深度
	if job.depth > s.maxDepth {
		l4g.Info("visted job,continue. url:%s, depth:%d", job.url, job.depth)
		return
	}
	// 标记为访问过
	s.visitedUrl[job.url] = true
	resp, err := http.Get(job.url)
	if err != nil {
		l4g.Error("Failed to crawl %s, err[%s]", job.url, err)
		return
	} else {
		//l4g.Info("http response:%s", resp)
	}
	defer resp.Body.Close()
	// 解析Html, 获取新的url并入任务队列
	urls := s.parseHtml(resp.Body, job)
	for _, url := range urls {
		//新任务入公共队列
		s.wg.Add(1)
		s.addJob(Job{url, job.depth + 1})
		l4g.Info("add job: %s, depth:%d", url, job.depth+1)
	}
}
Ejemplo n.º 16
0
// Get a user channel from ChannleList.
func (l *ChannelList) Get(key string, newOne bool) (Channel, error) {
	// validate
	if err := l.validate(key); err != nil {
		return nil, err
	}
	// get a channel bucket
	b := l.Bucket(key)
	b.Lock()
	if c, ok := b.Data[key]; !ok {
		if !Conf.Auth && newOne {
			c = NewSeqChannel()
			b.Data[key] = c
			b.Unlock()
			ChStat.IncrCreate()
			log.Info("user_key:\"%s\" create a new channel", key)
			return c, nil
		} else {
			b.Unlock()
			log.Warn("user_key:\"%s\" channle not exists", key)
			return nil, ErrChannelNotExist
		}
	} else {
		b.Unlock()
		ChStat.IncrAccess()
		log.Info("user_key:\"%s\" refresh channel bucket expire time", key)
		return c, nil
	}
}
Ejemplo n.º 17
0
/**
** subscribe the all connected queues from queue server
** and to be connect the channel and serve the messages to handlers
**/
func (self *QueueServer) ListenAndServe() {
	factor, err := amqp.Factory()
	if err != nil {
		log.Error("Failed to get the queue instance: %s", err)
	}

	pubsub, err := factor.Get(self.ListenAddress)
	if err != nil {
		log.Error("Failed to get the queue instance: %s", err)
	}

	//res := &global.Message{}

	msgChan, _ := pubsub.Sub()
	for msg := range msgChan {
		log.Info(" [x] %q", msg)

		queue1, _ := config.GetString("name")
		if self.ListenAddress == queue1 {
			coordinator.Handler(msg)
		}
	}
	log.Info("Handling message %v", msgChan)
	self.chann = msgChan
}
Ejemplo n.º 18
0
func (self *ClusterConfiguration) RecoverFromWAL() error {
	writeBuffer := NewWriteBuffer("local", self.shardStore, self.wal, self.LocalServer.Id, self.config.LocalStoreWriteBufferSize)
	self.writeBuffers = append(self.writeBuffers, writeBuffer)
	self.shardStore.SetWriteBuffer(writeBuffer)
	var waitForAll sync.WaitGroup
	for _, _server := range self.servers {
		server := _server
		waitForAll.Add(1)
		if server.RaftName == self.LocalRaftName {
			self.LocalServer = server
			go func(serverId uint32) {
				log.Info("Recovering local server")
				self.recover(serverId, self.shardStore)
				log.Info("Recovered local server")
				waitForAll.Done()
			}(server.Id)
		} else {
			go func(serverId uint32) {
				if server.connection == nil {
					server.connection = self.connectionCreator(server.ProtobufConnectionString)
					server.Connect()
				}
				log.Info("Recovering remote server %d", serverId)
				self.recover(serverId, server)
				log.Info("Recovered remote server %d", serverId)
				waitForAll.Done()
			}(server.Id)
		}
	}
	log.Info("Waiting for servers to recover")
	waitForAll.Wait()
	return nil
}
Ejemplo n.º 19
0
func (self *ClusterConfiguration) AddPotentialServer(server *ClusterServer) {
	self.serversLock.Lock()
	defer self.serversLock.Unlock()
	server.State = Potential
	self.servers = append(self.servers, server)
	server.Id = uint32(len(self.servers))
	log.Info("Added server to cluster config: %d, %s, %s", server.Id, server.RaftConnectionString, server.ProtobufConnectionString)
	log.Info("Checking whether this is the local server local: %s, new: %s", self.config.ProtobufConnectionString(), server.ProtobufConnectionString)

	if server.RaftName == self.LocalRaftName && self.addedLocalServer {
		panic("how did we add the same server twice ?")
	}

	// if this is the local server unblock WaitForLocalServerLoaded()
	// and set the local connection string and id
	if server.RaftName == self.LocalRaftName {
		log.Info("Added the local server")
		self.LocalServer = server
		self.addedLocalServerWait <- true
		self.addedLocalServer = true
		return
	}

	// if this isn't the local server, connect to it
	log.Info("Connecting to ProtobufServer: %s from %s", server.ProtobufConnectionString, self.config.ProtobufConnectionString())
	if server.connection == nil {
		server.connection = self.connectionCreator(server.ProtobufConnectionString)
		server.Connect()
	}
	writeBuffer := NewWriteBuffer(fmt.Sprintf("%d", server.GetId()), server, self.wal, server.Id, self.config.PerServerWriteBufferSize)
	self.writeBuffers = append(self.writeBuffers, writeBuffer)
	server.SetWriteBuffer(writeBuffer)
	server.StartHeartbeat()
	return
}
Ejemplo n.º 20
0
func (self *PiAssistant) Init(configPath string) error {
	fileData, readErr := ioutil.ReadFile(configPath)
	if readErr != nil {
		l4g.Error("Read config file error: %v", readErr)
		return readErr
	}

	l4g.Info("Loading config file: %s", configPath)
	var piAssiConf PiAssistantConfig
	unmarshalErr := json.Unmarshal(fileData, &piAssiConf)
	if unmarshalErr != nil {
		l4g.Error("Config file formt error: %v", unmarshalErr)
		return unmarshalErr
	}
	self.piAssiConf = piAssiConf

	self.piai = piai.NewPiAi(self.piAssiConf.PiAiConf.SessionTimeout)
	serviceInitErr := self.initServices()
	if serviceInitErr != nil {
		l4g.Error("Service init failed: %v", serviceInitErr)
		return serviceInitErr
	}
	l4g.Info("Initialize services successful!")

	return nil
}
Ejemplo n.º 21
0
func (self *WAL) openLog(logFileName string) (*log, *index, error) {
	logger.Info("Opening log file %s", logFileName)

	logFile, err := os.OpenFile(logFileName, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0644)
	if err != nil {
		return nil, nil, err
	}
	log, err := newLog(logFile, self.config)
	if err != nil {
		return nil, nil, err
	}

	self.logFiles = append(self.logFiles, log)
	suffix := strings.TrimPrefix(path.Base(logFileName), "log.")
	indexFileName := path.Join(self.config.WalDir, "index."+suffix)
	logger.Info("Opening index file %s", indexFileName)
	index, err := newIndex(indexFileName)
	if err != nil {
		logger.Error("Cannot open index file %s", err)
		log.close()
		return nil, nil, err
	}
	self.logIndex = append(self.logIndex, index)
	return log, index, nil
}
Ejemplo n.º 22
0
// StartHTTP start listen http.
func StartHTTP() {
	// external
	httpServeMux := http.NewServeMux()
	// 2
	httpServeMux.HandleFunc("/2/server/get", GetServer2)
	// 1.0
	httpServeMux.HandleFunc("/1/server/get", GetServer)
	httpServeMux.HandleFunc("/1/msg/get", GetOfflineMsg)
	httpServeMux.HandleFunc("/1/time/get", GetTime)
	// old
	httpServeMux.HandleFunc("/server/get", GetServer0)
	httpServeMux.HandleFunc("/msg/get", GetOfflineMsg0)
	httpServeMux.HandleFunc("/time/get", GetTime0)
	// internal
	httpAdminServeMux := http.NewServeMux()
	// 1.0
	httpAdminServeMux.HandleFunc("/1/admin/push/private", PushPrivate)
	httpAdminServeMux.HandleFunc("/1/admin/push/mprivate", PushMultiPrivate)
	httpAdminServeMux.HandleFunc("/1/admin/msg/del", DelPrivate)
	// old
	httpAdminServeMux.HandleFunc("/admin/push", PushPrivate)
	httpAdminServeMux.HandleFunc("/admin/msg/clean", DelPrivate)
	for _, bind := range Conf.HttpBind {
		log.Info("start http listen addr:\"%s\"", bind)
		go httpListen(httpServeMux, bind)
	}
	for _, bind := range Conf.AdminBind {
		log.Info("start admin http listen addr:\"%s\"", bind)
		go httpListen(httpAdminServeMux, bind)
	}
}
Ejemplo n.º 23
0
func (s *RaftServer) Serve(l net.Listener) error {
	s.port = l.Addr().(*net.TCPAddr).Port
	s.listener = l

	log.Info("Initializing Raft HTTP server")

	// Initialize and start HTTP server.
	s.httpServer = &http.Server{
		Handler: s.router,
	}

	s.router.HandleFunc("/cluster_config", s.configHandler).Methods("GET")
	s.router.HandleFunc("/join", s.joinHandler).Methods("POST")
	s.router.HandleFunc("/process_command/{command_type}", s.processCommandHandler).Methods("POST")

	log.Info("Raft Server Listening at %s", s.connectionString())

	go func() {
		err := s.httpServer.Serve(l)
		if !strings.Contains(err.Error(), "closed network") {
			panic(err)
		}
	}()
	started := make(chan error)
	go func() {
		started <- s.startRaft()
	}()
	err := <-started
	//	time.Sleep(3 * time.Second)
	return err
}
Ejemplo n.º 24
0
// RegisterTmp create a ephemeral node, and watch it, if node droped then send a SIGQUIT to self.
func RegisterTemp(conn *zk.Conn, fpath string, data []byte) error {
	tpath, err := conn.Create(path.Join(fpath)+"/", data, zk.FlagEphemeral|zk.FlagSequence, zk.WorldACL(zk.PermAll))
	if err != nil {
		log.Error("conn.Create(\"%s\", \"%s\", zk.FlagEphemeral|zk.FlagSequence) error(%v)", fpath, string(data), err)
		return err
	}
	log.Debug("create a zookeeper node:%s", tpath)
	// watch self
	go func() {
		for {
			log.Info("zk path: \"%s\" set a watch", tpath)
			exist, _, watch, err := conn.ExistsW(tpath)
			if err != nil {
				log.Error("zk.ExistsW(\"%s\") error(%v)", tpath, err)
				log.Warn("zk path: \"%s\" set watch failed, kill itself", tpath)
				killSelf()
				return
			}
			if !exist {
				log.Warn("zk path: \"%s\" not exist, kill itself", tpath)
				killSelf()
				return
			}
			event := <-watch
			log.Info("zk path: \"%s\" receive a event %v", tpath, event)
		}
	}()
	return nil
}
Ejemplo n.º 25
0
func (self *ShardData) QueryResponseBufferSize(querySpec *parser.QuerySpec, batchPointSize int) int {
	groupByTime := querySpec.GetGroupByInterval()
	if groupByTime == nil {
		// If the group by time is nil, we shouldn't have to use a buffer since the shards should be queried sequentially.
		// However, set this to something high just to be safe.
		log.Info("BUFFER SIZE: 1000")
		return 1000
	}
	tickCount := int(self.shardSeconds / int64(groupByTime.Seconds()))
	if tickCount < 10 {
		tickCount = 100
	} else if tickCount > 1000 {
		// cap this because each response should have up to this number of points in it.
		tickCount = tickCount / batchPointSize

		// but make sure it's at least 1k
		if tickCount < 1000 {
			tickCount = 1000
		}
	}
	columnCount := querySpec.GetGroupByColumnCount()
	if columnCount > 1 {
		// we don't really know the cardinality for any column up front. This is a just a multiplier so we'll see how this goes.
		// each response can have many points, so having a buffer of the ticks * 100 should be safe, but we'll see.
		tickCount = tickCount * 100
	}
	log.Info("BUFFER SIZE: ", tickCount)
	return tickCount
}
Ejemplo n.º 26
0
func InitKafka() error {
	log.Info("start topic:%s consumer", Conf.KafkaTopic)
	log.Info("consumer group name:%s", KAFKA_GROUP_NAME)
	config := consumergroup.NewConfig()
	config.Offsets.Initial = sarama.OffsetNewest
	config.Offsets.ProcessingTimeout = OFFSETS_PROCESSING_TIMEOUT_SECONDS
	config.Offsets.CommitInterval = OFFSETS_COMMIT_INTERVAL
	config.Zookeeper.Chroot = Conf.ZKRoot
	kafkaTopics := []string{Conf.KafkaTopic}
	cg, err := consumergroup.JoinConsumerGroup(KAFKA_GROUP_NAME, kafkaTopics, Conf.ZKAddrs, config)
	if err != nil {
		return err
	}
	go func() {
		for err := range cg.Errors() {
			log.Error("consumer error(%v)", err)
		}
	}()
	go func() {
		for msg := range cg.Messages() {
			log.Info("deal with topic:%s, partitionId:%d, Offset:%d, Key:%s msg:%s", msg.Topic, msg.Partition, msg.Offset, msg.Key, msg.Value)
			push(string(msg.Key), msg.Value)
			cg.CommitUpto(msg)
		}
	}()
	return nil
}
Ejemplo n.º 27
0
// Migrate migrate portion of connections which don't belong to this comet.
func (l *ChannelList) Migrate(nw map[string]int) (err error) {
	migrate := false
	// check new/update node
	for k, v := range nw {
		weight, ok := nodeWeightMap[k]
		// not found or weight change
		if !ok || weight != v {
			migrate = true
			break
		}
	}
	// check del node
	if !migrate {
		for k, _ := range nodeWeightMap {
			// node deleted
			if _, ok := nw[k]; !ok {
				migrate = true
				break
			}
		}
	}
	if !migrate {
		return
	}
	// init ketama
	ring := ketama.NewRing(ketama.Base)
	for node, weight := range nw {
		ring.AddNode(node, weight)
	}
	ring.Bake()
	// atomic update
	nodeWeightMap = nw
	CometRing = ring
	// get all the channel lock
	channels := []Channel{}
	for i, c := range l.Channels {
		c.Lock()
		for k, v := range c.Data {
			hn := ring.Hash(k)
			if hn != Conf.ZookeeperCometNode {
				channels = append(channels, v)
				delete(c.Data, k)
				log.Debug("migrate delete channel key \"%s\"", k)
			}
		}
		c.Unlock()
		log.Debug("migrate channel bucket:%d finished", i)
	}
	// close all the migrate channels
	log.Info("close all the migrate channels")
	for _, channel := range channels {
		if err := channel.Close(); err != nil {
			log.Error("channel.Close() error(%v)", err)
			continue
		}
	}
	log.Info("close all the migrate channels finished")
	return
}
Ejemplo n.º 28
0
func dockerStateHandler(chann []byte) {

	Msg := &Message{}
	parse_err := json.Unmarshal(chann, &Msg)
	log.Info(parse_err)
	if parse_err != nil {
		log.Error("Error: Message parsing error:\n%s.", parse_err)
		return
	}

	apprequest := global.AppRequest{Id: Msg.Id}
	req, err := apprequest.Get(Msg.Id)
	log.Info(req)
	if err != nil {
		log.Error("Error: Riak didn't cooperate:\n%s.", err)
		return
	}

	assembly := global.Assembly{Id: req.AppId}
	asm, err := assembly.GetAssemblyWithComponents(req.AppId)
	if err != nil {
		log.Error("Error: Riak didn't cooperate:\n%s.", err)
		return
	}

	cont_id, perrscm := global.ParseKeyValuePair(asm.Components[0].Outputs, "id")
	if perrscm != nil {
		log.Error("Failed to get the container id : %s", perrscm)
	}
	endpoint, perrscm := global.ParseKeyValuePair(asm.Components[0].Outputs, "endpoint")
	if perrscm != nil {
		log.Error("Failed to get the container id : %s", perrscm)
	}

	pair_cpu, perrscm := global.ParseKeyValuePair(asm.Inputs, "cpu")
	if perrscm != nil {
		log.Error("Failed to get the cpu value : %s", perrscm)
	}

	pair_memory, iderr := global.ParseKeyValuePair(asm.Inputs, "ram")
	if iderr != nil {
		log.Error("Failed to get the memory value : %s", iderr)
	}

	switch req.Action {
	case "start":
		log.Info("Starting Container")
		go docker.StartContainer(&global.Container{ContainerID: cont_id.Value, Cpu: pair_cpu.Value, Ram: pair_memory.Value}, endpoint.Value)
		break
	case "stop":
		log.Info("Stopping Container")
		go docker.StopContainer(&global.Container{ContainerID: cont_id.Value}, endpoint.Value)
		break
	case "restart":
		log.Info("Restarting container")
		go docker.RestartContainer(&global.Container{ContainerID: cont_id.Value}, endpoint.Value)
		break
	}
}
Ejemplo n.º 29
0
func (self *Server) ListenAndServe() error {
	log.Info("Starting admin interface on port")

	log.Info("talking to the http api..")
	self.HttpApi.ListenAndServe()

	return nil
}
Ejemplo n.º 30
0
func (self *ClusterConfiguration) Recovery(b []byte) error {
	log.Info("Recovering the cluster configuration")
	data := &SavedConfiguration{}

	err := gob.NewDecoder(bytes.NewReader(b)).Decode(&data)
	if err != nil {
		log.Error("Error while decoding snapshot: %s", err)
		return err
	}

	self.DatabaseReplicationFactors = make(map[string]struct{}, len(data.Databases))
	for k, _ := range data.Databases {
		self.DatabaseReplicationFactors[k] = struct{}{}
	}
	self.clusterAdmins = data.Admins
	self.dbUsers = data.DbUsers
	self.servers = data.Servers

	for _, server := range self.servers {
		log.Info("Checking whether %s is the local server %s", server.RaftName, self.LocalRaftName)
		if server.RaftName == self.LocalRaftName {
			self.LocalServer = server
			self.addedLocalServerWait <- true
			self.addedLocalServer = true
			continue
		}

		server.connection = self.connectionCreator(server.ProtobufConnectionString)
		writeBuffer := NewWriteBuffer(fmt.Sprintf("server: %d", server.GetId()), server, self.wal, server.Id, self.config.PerServerWriteBufferSize)
		self.writeBuffers = append(self.writeBuffers, writeBuffer)
		server.SetWriteBuffer(writeBuffer)
		server.Connect()
		server.StartHeartbeat()
	}

	self.shardsByIdLock.Lock()
	self.shardLock.Lock()
	defer self.shardsByIdLock.Unlock()
	defer self.shardLock.Unlock()
	self.shortTermShards = self.convertNewShardDataToShards(data.ShortTermShards)
	self.longTermShards = self.convertNewShardDataToShards(data.LongTermShards)
	for _, s := range self.shortTermShards {
		shard := s
		self.shardsById[s.id] = shard
	}
	for _, s := range self.longTermShards {
		shard := s
		self.shardsById[s.id] = shard
	}

	for db, queries := range data.ContinuousQueries {
		for _, query := range queries {
			self.addContinuousQuery(db, query)
		}
	}

	return nil
}