// Accept accepts connections on the listener and serves requests // for each incoming connection. Accept blocks; the caller typically // invokes it in a go statement. func acceptTCP(server *Server, lis *net.TCPListener) { var ( conn *net.TCPConn err error r int ) for { if conn, err = lis.AcceptTCP(); err != nil { // if listener close then return log.Error("listener.Accept(\"%s\") error(%v)", lis.Addr().String(), err) return } if err = conn.SetKeepAlive(Conf.TCPKeepalive); err != nil { log.Error("conn.SetKeepAlive() error(%v)", err) return } if err = conn.SetReadBuffer(Conf.TCPSndbuf); err != nil { log.Error("conn.SetReadBuffer() error(%v)", err) return } if err = conn.SetWriteBuffer(Conf.TCPRcvbuf); err != nil { log.Error("conn.SetWriteBuffer() error(%v)", err) return } go serveTCP(server, conn, r) if r++; r == maxInt { r = 0 } } }
func runWebApi(listener *net.TCPListener, store *StoreChan, staticDir string) (err error) { var sessions *webSessionFactory if sessions, err = NewWebSessionFactory(600 * time.Second); err != nil { // TODO: hardcoded value return err } http.Handle("/api/authenticate", webHandler{store, sessions, handleWebAuthenticate}) http.Handle("/api/add", webHandler{store, sessions, handleWebAdd}) http.Handle("/api/remove", webHandler{store, sessions, handleWebRemove}) http.Handle("/api/update", webHandler{store, sessions, handleWebUpdate}) http.Handle("/api/set-admin", webHandler{store, sessions, handleWebSetAdmin}) http.Handle("/api/list", webHandler{store, sessions, handleWebList}) http.Handle("/api/list-full", webHandler{store, sessions, handleWebListFull}) http.Handle("/admin/", http.StripPrefix("/admin/", http.FileServer(http.Dir(staticDir)))) http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { if r.URL.Path != "/" { http.NotFound(w, r) return } http.Redirect(w, r, "/admin/", http.StatusTemporaryRedirect) }) wl.Printf("web-api: listening on '%s'", listener.Addr()) server := &http.Server{ReadTimeout: 60 * time.Second, WriteTimeout: 60 * time.Second} return server.Serve(tcpKeepAliveListener{listener}) }
func (s *Server) loop(listener *net.TCPListener) { defer s.waitGroup.Done() log.Printf("[DEBUG] waiting for connections...") for { select { case <-s.ch: log.Printf("[DEBUG] stopping listening on %s", listener.Addr()) _ = listener.Close() return default: } _ = listener.SetDeadline(time.Now().Add(s.Deadline)) conn, err := listener.AcceptTCP() if nil != err { if opErr, ok := err.(*net.OpError); ok && opErr.Timeout() { continue } log.Printf("[ERROR] error on loop: %s", err) } log.Printf("[DEBUG] connected to %s", conn.RemoteAddr()) c := &connection{ conn: conn, protocol: s.protocolCtor(conn), waitGroup: s.waitGroup, deadline: s.Deadline, readBuffSize: s.ReadBuffSize, writeBuffSize: s.WriteBuffSize, } s.waitGroup.Add(1) go c.handle(s.ch) } }
func (self *Service) Serve(listener *net.TCPListener) { defer self.wg.Done() log.Println("initializing...") var err error self.repo, err = repository.Initialize(self.dataDir) log.Println("data directory: ", self.dataDir) if err != nil { log.Fatal(err) } for { select { case <-self.ch: log.Println("stopping listening on", listener.Addr()) listener.Close() return default: } listener.SetDeadline(time.Now().Add(1e9)) conn, err := listener.AcceptTCP() if nil != err { if opErr, ok := err.(*net.OpError); ok && opErr.Timeout() { continue } log.Println(err) } self.wg.Add(1) go self.HandleConnection(conn) } }
// Accept connections and spawn a goroutine to serve each one. Stop listening // if anything is received on the service's channel. func (self *Service) Serve(listener *net.TCPListener) { defer self.waitGroup.Done() for { select { case <-self.done: log.Println("Stopping listening on", listener.Addr()) listener.Close() return default: } listener.SetDeadline(time.Now().Add(1e9)) conn, err := listener.AcceptTCP() if err != nil { if opErr, ok := err.(*net.OpError); ok && opErr.Timeout() { continue } log.Println(err) } log.Println(conn.RemoteAddr(), "connected") connection := NewConnection(conn, make(chan []byte)) self.dataMap[connection.Id] = connection self.waitGroup.Add(1) go self.serve(connection) } }
func (this *Server) Start(listener *net.TCPListener) { log.Printf("Start listen on %v", listener.Addr()) this.waitGroup.Add(1) defer func() { listener.Close() this.waitGroup.Done() }() for { select { case <-this.exitCh: log.Printf("Stop listen on %v", listener.Addr()) return default: } listener.SetDeadline(time.Now().Add(this.acceptTimeout)) conn, err := listener.AcceptTCP() if err != nil { if opErr, ok := err.(*net.OpError); ok && opErr.Timeout() { // log.Printf("Accept timeout: %v", opErr) continue } log.Printf("Accept error: %v", err) continue } log.Printf("Accept: %v", conn.RemoteAddr()) go this.handleClientConn(conn) } }
func New(id circuit.WorkerID, bindAddr string, host string) *Transport { // Bind var l *net.TCPListener if strings.Index(bindAddr, ":") < 0 { bindAddr = bindAddr + ":0" } l_, err := net.Listen("tcp", bindAddr) if err != nil { panic(err) } // Build transport structure l = l_.(*net.TCPListener) t := &Transport{ listener: l, addrtabl: makeAddrTabl(), pipelining: DefaultPipelining, remote: make(map[circuit.WorkerID]*link), ach: make(chan *conn), } // Resolve self address laddr := l.Addr().(*net.TCPAddr) t.self, err = NewAddr(id, os.Getpid(), fmt.Sprintf("%s:%d", host, laddr.Port)) if err != nil { panic(err) } // This LocalAddr might be useless for connect purposes (e.g. 0.0.0.0). Consider self instead. t.bind = t.addrtabl.Normalize(&Addr{ID: id, PID: os.Getpid(), Addr: laddr}) go t.loop() return t }
// Accept accepts connections on the listener and serves requests // for each incoming connection. Accept blocks; the caller typically // invokes it in a go statement. func (server *Server) AcceptTCP(lis *net.TCPListener, i int) { var ( conn *net.TCPConn err error ) for { log.Debug("server: accept round: %d", i) if conn, err = lis.AcceptTCP(); err != nil { // if listener close then return log.Error("listener.Accept(\"%s\") error(%v)", lis.Addr().String(), err) return } if err = conn.SetKeepAlive(Conf.TCPKeepalive); err != nil { log.Error("conn.SetKeepAlive() error(%v)", err) return } if err = conn.SetReadBuffer(Conf.TCPSndbuf); err != nil { log.Error("conn.SetReadBuffer() error(%v)", err) return } if err = conn.SetWriteBuffer(Conf.TCPRcvbuf); err != nil { log.Error("conn.SetWriteBuffer() error(%v)", err) return } go server.serveConn(conn, i) if i++; i == maxInt { i = 0 } } }
func newServer(s *state.State, lis *net.TCPListener, cfg ServerConfig) (_ *Server, err error) { logger.Infof("listening on %q", lis.Addr()) srv := &Server{ state: s, statePool: state.NewStatePool(s), addr: lis.Addr().(*net.TCPAddr), // cannot fail tag: cfg.Tag, dataDir: cfg.DataDir, logDir: cfg.LogDir, limiter: utils.NewLimiter(loginRateLimit), validator: cfg.Validator, adminApiFactories: map[int]adminApiFactory{ 0: newAdminApiV0, 1: newAdminApiV1, 2: newAdminApiV2, }, } srv.authCtxt = newAuthContext(srv) tlsCert, err := tls.X509KeyPair(cfg.Cert, cfg.Key) if err != nil { return nil, err } // TODO(rog) check that *srvRoot is a valid type for using // as an RPC server. tlsConfig := tls.Config{ Certificates: []tls.Certificate{tlsCert}, } changeCertListener := newChangeCertListener(lis, cfg.CertChanged, tlsConfig) go srv.run(changeCertListener) return srv, nil }
// FreePorts returns maximum n tcp ports available to use. func FreePorts(n int, matchFunc func(int) bool) (ports []int, err error) { if n > 50000 { return nil, fmt.Errorf("too many ports requested (%d)", n) } for len(ports) < n { var addr *net.TCPAddr addr, err = net.ResolveTCPAddr("tcp", "localhost:0") if err != nil { break } var l *net.TCPListener l, err = net.ListenTCP("tcp", addr) if err != nil { break } port := l.Addr().(*net.TCPAddr).Port l.Close() if matchFunc == nil { ports = append(ports, port) continue } if matchFunc(port) { ports = append(ports, port) } } return }
func (tcp *Tcp) serve(listeningPoint *net.TCPListener) { log.Info("Begin serving TCP on address " + listeningPoint.Addr().String()) for { baseConn, err := listeningPoint.Accept() if err != nil { log.Severe("Failed to accept TCP conn on address " + listeningPoint.Addr().String() + "; " + err.Error()) continue } conn := NewConn(baseConn, tcp.output) log.Debug("Accepted new TCP conn %p from %s on address %s", &conn, conn.baseConn.RemoteAddr(), conn.baseConn.LocalAddr()) tcp.connTable.Notify(baseConn.RemoteAddr().String(), conn) } }
/* Wait for and accept a client. wg.Done() will be called when l is closed or an error occurs (and the goroutine terminates). */ func accept(l *net.TCPListener, wg sync.WaitGroup) { defer wg.Done() for { /* Get a connection */ c, err := l.AcceptTCP() /* Notify listener and die on error */ if err != nil { log.Printf("Ceasing to listen on %v: %v", l.Addr(), err) return } /* Handle client */ go handle(c) } }
func NewTcpAcceptor(strAddr string) (*TcpAcceptor, error) { var err error var listener *net.TCPListener var tcpAddr *net.TCPAddr if tcpAddr, err = net.ResolveTCPAddr("tcp", strAddr); err == nil { listener, err = net.ListenTCP("tcp", tcpAddr) } if err == nil { a := TcpAcceptor{} a.listener = listener addr := listener.Addr().String() a.endPoint, _ = NewTcpEndPoint(addr) return &a, nil } else { return nil, err } }
func listenTCP(host, port string) (err error) { var listener *net.TCPListener // Automatically assign open port address, err := net.ResolveTCPAddr("tcp", net.JoinHostPort(host, port)) if err != nil { log.WithField("error", err).Fatal("Unable to resolve tcp address") return } listener, err = net.ListenTCP("tcp", address) if err != nil { log.WithField("error", err).Fatal("Unable to bind to tcp on localhost") return } log.WithField("address", listener.Addr()).Info("Listening") defer listener.Close() serve(listener) return }
func (this *Server) Run(listener *net.TCPListener) { defer func() { listener.Close() }() //go this.dealSpamConn() log.Infof("Starting comet server on: %s", listener.Addr().String()) if err := storage.Instance.AddComet(this.Name); err != nil { log.Errorf("failed to add comet to Redis: %s", err.Error()) } // keep the data of this node not expired on redis this.startRefreshRoutine() this.startWorkers() for { select { case <-this.ctrl: log.Infof("ask me to quit") this.stopWorkers() this.stopRefreshRoutine() return default: } listener.SetDeadline(time.Now().Add(this.acceptTimeout * time.Second)) conn, err := listener.AcceptTCP() if err != nil { if e, ok := err.(*net.OpError); ok && e.Timeout() { continue } log.Errorf("accept failed: %v\n", err) continue } /* // first packet must sent by client in specified seconds if err = conn.SetReadDeadline(time.Now().Add(20)); err != nil { glog.Errorf("conn.SetReadDeadLine() error(%v)", err) conn.Close() continue }*/ go this.handleConnection(conn) } }
func (this *Server) Start(listener *net.TCPListener) { log.Printf("Start listen on %v\r\n", listener.Addr()) this.waitGroup.Add(1) defer func() { listener.Close() this.waitGroup.Done() }() // 防止恶意连接 go this.dealSpamConn() // report记录,定时发送邮件 go report.Work() for { select { case <-this.exitCh: log.Printf("Stop listen on %v\r\n", listener.Addr()) return default: } listener.SetDeadline(time.Now().Add(this.acceptTimeout)) conn, err := listener.AcceptTCP() if err != nil { if opErr, ok := err.(*net.OpError); ok && opErr.Timeout() { // log.Printf("Accept timeout: %v\r\n", opErr) continue } report.AddCount(report.TryConnect, 1) log.Printf("Accept error: %v\r\n", err) continue } report.AddCount(report.SuccessConnect, 1) // 连接后等待登陆验证 handlers.ConnMapLoginStatus.Set(conn, time.Now()) log.Printf("Accept: %v\r\n", conn.RemoteAddr()) go this.handleClientConn(conn) } }
func (this *Server) Run(listener *net.TCPListener) { this.waitGroup.Add(1) defer func() { listener.Close() this.waitGroup.Done() }() //go this.dealSpamConn() log.Infof("Starting comet server on: %s\n", listener.Addr().String()) log.Infof("Comet server settings: readtimeout [%d], accepttimeout [%d], heartbeattimeout [%d]\n", this.readTimeout, this.acceptTimeout, this.heartbeatTimeout) for { select { case <-this.exitCh: log.Infof("Stopping comet server") return default: } listener.SetDeadline(time.Now().Add(this.acceptTimeout)) conn, err := listener.AcceptTCP() if err != nil { if e, ok := err.(*net.OpError); ok && e.Timeout() { continue } log.Errorf("accept failed: %v\n", err) continue } /* // first packet must sent by client in specified seconds if err = conn.SetReadDeadline(time.Now().Add(20)); err != nil { glog.Errorf("conn.SetReadDeadLine() error(%v)", err) conn.Close() continue }*/ go this.handleConnection(conn) } }
func NewTcpServer(uri string) *TcpServer { if uri == "" { uri = "tcp://127.0.0.1:0" } var u *url.URL var err error if u, err = url.Parse(uri); err != nil { panic(err.Error()) } var addr *net.TCPAddr if addr, err = net.ResolveTCPAddr(u.Scheme, u.Host); err != nil { panic(err.Error()) } var listener *net.TCPListener if listener, err = net.ListenTCP(u.Scheme, addr); err != nil { panic(err.Error()) } return &TcpServer{ TcpService: NewTcpService(), URL: u.Scheme + "://" + listener.Addr().String(), TCPListener: listener, } }
func (s *Server) serve(l *net.TCPListener) { defer s.waitGroup.Done() s.waitGroup.Add(1) for { select { case <-s.ch: log.Debug("stopping listening on: ", l.Addr()) l.Close() return default: } l.SetDeadline(time.Now().Add(1e9)) conn, err := l.AcceptTCP() if err != nil { if opErr, ok := err.(*net.OpError); ok && opErr.Timeout() { continue } log.Debug(err) } // handle the connection in a new goroutine. This returns to listener // accepting code so that multiple connections may be served concurrently. keeper := NewKeeper(conn, s.storeClient) go func() { defer s.waitGroup.Done() s.waitGroup.Add(1) log.Debug("client connected: ", conn.RemoteAddr()) if err := keeper.Handle(); err != nil { log.Debug("client disconnected: ", conn.RemoteAddr(), " with error: ", err) } else { log.Debug("client disconnected: ", conn.RemoteAddr()) } }() } }
// Re-exec this image without dropping the listener passed to this function. func Relaunch(l *net.TCPListener) error { argv0, err := exec.LookPath(os.Args[0]) if nil != err { return err } wd, err := os.Getwd() if nil != err { return err } v := reflect.ValueOf(l).Elem().FieldByName("fd").Elem() fd := uintptr(v.FieldByName("sysfd").Int()) if err := os.Setenv("GOAGAIN_FD", fmt.Sprint(fd)); nil != err { return err } if err := os.Setenv("GOAGAIN_NAME", fmt.Sprintf("tcp:%s->", l.Addr().String())); nil != err { return err } if err := os.Setenv("GOAGAIN_PPID", fmt.Sprint(syscall.Getpid())); nil != err { return err } files := make([]*os.File, fd+1) files[syscall.Stdin] = os.Stdin files[syscall.Stdout] = os.Stdout files[syscall.Stderr] = os.Stderr files[fd] = os.NewFile(fd, string(v.FieldByName("sysfile").String())) p, err := os.StartProcess(argv0, os.Args, &os.ProcAttr{ Dir: wd, Env: os.Environ(), Files: files, Sys: &syscall.SysProcAttr{}, }) if nil != err { return err } log.Printf("spawned child %d\n", p.Pid) return nil }
func run() int { bytearray.EnableAutoGC(60, 74) runtime.SetBlockProfileRate(1000) go func() { log.Println(http.ListenAndServe(":6060", nil)) }() //defer profile.Start(&profile.Config{CPUProfile: false, MemProfile: true, ProfilePath: ".", NoShutdownHook: true}).Stop() /*defer func() { // Panic error handling if r := recover(); r != nil { fmt.Println(r) return 1 } }()*/ var err error exeRoot, _ := osext.ExecutableFolder() var serverPort int64 = int64(DEFAULT_SERVER_IP_PORT) datDirectory = filepath.Join(exeRoot, "data") idxDirectory = filepath.Join(exeRoot, "index") cmd.Title = fmt.Sprintf("Hashbox Server %s", Version) cmd.IntOption("port", "", "<port>", "Server listening port", &serverPort, cmd.Standard) cmd.StringOption("data", "", "<path>", "Full path to dat files", &datDirectory, cmd.Standard) cmd.StringOption("index", "", "<path>", "Full path to idx and meta files", &idxDirectory, cmd.Standard) var loglvl int64 = int64(core.LogInfo) cmd.IntOption("loglevel", "", "<level>", "Set log level (0=errors, 1=warnings, 2=info, 3=debug, 4=trace", &loglvl, cmd.Hidden).OnChange(func() { core.LogLevel = int(loglvl) }) // Please note that datPath has not been set until we have parsed arguments, that is ok because neither of the handlers // start opening files on their own // TODO: remove datPath global and send them into handlers on creation instead accountHandler = NewAccountHandler() defer accountHandler.Close() storageHandler = NewStorageHandler() defer storageHandler.Close() cmd.Command("", "", func() { // Default serverAddr := net.TCPAddr{nil, int(serverPort), ""} if lock, err := lockfile.Lock(filepath.Join(datDirectory, "hashbox.lck")); err != nil { panic(err) } else { defer lock.Unlock() } var listener *net.TCPListener if listener, err = net.ListenTCP("tcp", &serverAddr); err != nil { panic(errors.New(fmt.Sprintf("Error listening: %v", err.Error()))) } core.Log(core.LogInfo, "%s is listening on %s", cmd.Title, listener.Addr().String()) done = make(chan bool) defer close(done) signalchan := make(chan os.Signal, 1) defer close(signalchan) signal.Notify(signalchan, os.Interrupt) signal.Notify(signalchan, os.Kill) go func() { for s := range signalchan { core.Log(core.LogInfo, "Received OS signal: %v", s) listener.Close() // done <- true return } }() go connectionListener(listener) go func() { var lastStats string for { // ever time.Sleep(10 * time.Second) s := core.MemoryStats() if s != lastStats { fmt.Println(s) lastStats = s } } }() // blocking channel read select { case <-done: case <-accountHandler.signal: case <-storageHandler.signal: } core.Log(core.LogInfo, "Hashbox Server terminating") }) // TODO: This is a temporary hack to allow creation of hashback users on the server side // It should be an interface to an adminsitrative tool instead cmd.Command("adduser", "<username> <password>", func() { if lock, err := lockfile.Lock(filepath.Join(datDirectory, "hashbox.lck")); err != nil { panic(err) } else { defer lock.Unlock() } if len(cmd.Args) < 4 { panic(errors.New("Missing argument to adduser command")) } if (!accountHandler.SetInfo(AccountInfo{AccountName: core.String(cmd.Args[2]), AccessKey: core.GenerateAccessKey(cmd.Args[2], cmd.Args[3])})) { panic(errors.New("Error creating account")) } accountNameH := core.Hash([]byte(cmd.Args[2])) dataEncryptionKey := core.GenerateDataEncryptionKey() core.Log(core.LogDebug, "DataEncryptionKey is: %x", dataEncryptionKey) core.EncryptDataInPlace(dataEncryptionKey[:], core.GenerateBackupKey(cmd.Args[2], cmd.Args[3])) var blockData bytearray.ByteArray blockData.Write(dataEncryptionKey[:]) block := core.NewHashboxBlock(core.BlockDataTypeRaw, blockData, nil) if !storageHandler.writeBlock(block) { panic(errors.New("Error writing key block")) } err := accountHandler.AddDatasetState(accountNameH, core.String("\x07HASHBACK_DEK"), core.DatasetState{BlockID: block.BlockID}) PanicOn(err) block.Release() core.Log(core.LogInfo, "User added") }) var doRepair bool var doRebuild bool var skipData, skipMeta, skipIndex bool cmd.BoolOption("repair", "check-storage", "Repair non-fatal errors", &doRepair, cmd.Standard) cmd.BoolOption("rebuild", "check-storage", "Rebuild index and meta files from data", &doRebuild, cmd.Standard) cmd.BoolOption("skipdata", "check-storage", "Skip checking data files", &skipData, cmd.Standard) cmd.BoolOption("skipmeta", "check-storage", "Skip checking meta files", &skipMeta, cmd.Standard) cmd.BoolOption("skipindex", "check-storage", "Skip checking index files", &skipIndex, cmd.Standard) cmd.Command("check-storage", "", func() { if doRebuild { if len(cmd.Args) > 2 { panic("Start and end file arguments are not valid in combination with rebuild") } doRepair = true } startfile := int32(0) endfile := int32(-1) if len(cmd.Args) > 2 { i, err := strconv.ParseInt(cmd.Args[2], 0, 32) if err != nil { panic(err) } startfile = int32(i) core.Log(core.LogInfo, "Starting from file #%d (%04x)", startfile, startfile) } if len(cmd.Args) > 3 { i, err := strconv.ParseInt(cmd.Args[3], 0, 32) if err != nil { panic(err) } endfile = int32(i) core.Log(core.LogInfo, "Stopping after file #%d (%04x)", endfile, endfile) } if doRepair { if lock, err := lockfile.Lock(filepath.Join(datDirectory, "hashbox.lck")); err != nil { panic(err) } else { defer lock.Unlock() } } start := time.Now() if doRebuild { core.Log(core.LogInfo, "Removing index files") storageHandler.RemoveFiles(storageFileTypeIndex) core.Log(core.LogInfo, "Removing meta files") storageHandler.RemoveFiles(storageFileTypeMeta) } core.Log(core.LogInfo, "Checking all storage files") repaired, critical := storageHandler.CheckFiles(doRepair) if !skipData && (doRepair || critical == 0) { core.Log(core.LogInfo, "Checking data files") r, c := storageHandler.CheckData(doRepair, startfile, endfile) repaired += r critical += c } if !skipMeta && (doRepair || critical == 0) { core.Log(core.LogInfo, "Checking meta files") storageHandler.CheckMeta() } if !skipIndex && (doRepair || critical == 0) { core.Log(core.LogInfo, "Checking index files") storageHandler.CheckIndexes(doRepair) } if doRepair || critical == 0 { core.Log(core.LogInfo, "Checking dataset transactions") rootlist := accountHandler.RebuildAccountFiles() core.Log(core.LogInfo, "Checking block chain integrity") verified := make(map[core.Byte128]bool) // Keep track of verified blocks for i, r := range rootlist { tag := fmt.Sprintf("%s.%s.%x", r.AccountName, r.DatasetName, r.StateID[:]) core.Log(core.LogDebug, "CheckChain on %s", tag) c := storageHandler.CheckChain(r.BlockID, tag, verified) if c > 0 { accountHandler.InvalidateDatasetState(r.AccountNameH, r.DatasetName, r.StateID) } critical += c p := int(i * 100 / len(rootlist)) fmt.Printf("%d%%\r", p) } } if critical > 0 { core.Log(core.LogError, "Detected %d critical errors, DO NOT start the server unless everything is repaired", critical) } if repaired > 0 { core.Log(core.LogWarning, "Performed %d repairs, please run again to verify repairs", repaired) } if critical == 0 && repaired == 0 { core.Log(core.LogInfo, "All checks completed successfully in %.1f minutes", time.Since(start).Minutes()) } }) var doCompact bool var deadSkip int64 = 5 var skipSweep bool var doForce bool cmd.BoolOption("compact", "gc", "Compact data files to free space", &doCompact, cmd.Standard) cmd.BoolOption("skipsweep", "gc", "Skip sweeping indexes", &skipSweep, cmd.Standard) cmd.BoolOption("force", "gc", "Ignore broken datasets and force a garbage collect", &doForce, cmd.Standard) cmd.IntOption("threshold", "gc", "<percentage>", "Compact minimum dead space threshold", &deadSkip, cmd.Standard) cmd.Command("gc", "", func() { if lock, err := lockfile.Lock(filepath.Join(datDirectory, "hashbox.lck")); err != nil { panic(err) } else { defer lock.Unlock() } start := time.Now() if !skipSweep { core.Log(core.LogInfo, "Marking index entries") var roots []core.Byte128 for _, r := range accountHandler.CollectAllRootBlocks(doForce) { roots = append(roots, r.BlockID) } storageHandler.MarkIndexes(roots, true) storageHandler.SweepIndexes(true) core.Log(core.LogInfo, "Mark and sweep duration %.1f minutes", time.Since(start).Minutes()) storageHandler.ShowStorageDeadSpace() } if doCompact { storageHandler.CompactIndexes(true) storageHandler.CompactAll(storageFileTypeMeta, int(deadSkip)) storageHandler.CompactAll(storageFileTypeData, int(deadSkip)) } core.Log(core.LogInfo, "Garbage collection completed in %.1f minutes", time.Since(start).Minutes()) }) err = cmd.Parse() PanicOn(err) fmt.Println(core.MemoryStats()) return 0 }
// Creates and runs a new mock instance // The path is the path to the mock jar. // nodes is the total number of cluster nodes (and thus the number of mock threads) // replicas is the number of replica nodes (subset of the number of nodes) for each couchbase bucket. // vbuckets is the number of vbuckets to use for each couchbase bucket // specs should be a list of specifications of buckets to use.. func NewMock(path string, nodes uint, replicas uint, vbuckets uint, specs ...BucketSpec) (m *Mock, err error) { var lsn *net.TCPListener = nil chAccept := make(chan bool) m = &Mock{} defer func() { close(chAccept) if lsn != nil { lsn.Close() } exc := recover() if exc == nil { // No errors, everything is OK return } // Close mock on error, destroying resources m.Close() if mExc, ok := exc.(mockError); !ok { panic(mExc) } else { m = nil err = mExc } }() if lsn, err = net.ListenTCP("tcp", &net.TCPAddr{Port: 0}); err != nil { throwMockError("Couldn't set up listening socket", err) } _, ctlPort, _ := net.SplitHostPort(lsn.Addr().String()) log.Printf("Listening for control connection at %s\n", ctlPort) go func() { defer func() { chAccept <- false }() if m.conn, err = lsn.Accept(); err != nil { throwMockError("Couldn't accept incoming control connection from mock", err) return } }() if len(specs) == 0 { specs = []BucketSpec{BucketSpec{Name: "default", Type: BCouchbase}} } options := []string{ "-jar", path, "--harakiri-monitor", "localhost:" + ctlPort, "--port", "0", "--replicas", strconv.Itoa(int(replicas)), "--vbuckets", strconv.Itoa(int(vbuckets)), "--nodes", strconv.Itoa(int(nodes)), "--buckets", m.buildSpecStrings(specs), } log.Printf("Invoking java %s", strings.Join(options, " ")) m.cmd = exec.Command("java", options...) m.cmd.Stdout = os.Stdout m.cmd.Stderr = os.Stderr if err = m.cmd.Start(); err != nil { m.cmd = nil throwMockError("Couldn't start command", err) } select { case <-chAccept: break case <-time.After(mockInitTimeout): throwMockError("Timed out waiting for initalization", errors.New("timeout")) } m.rw = bufio.NewReadWriter(bufio.NewReader(m.conn), bufio.NewWriter(m.conn)) // Read the port buffer, which is delimited by a NUL byte if portBytes, err := m.rw.ReadBytes(0); err != nil { throwMockError("Couldn't get port information", err) } else { portBytes = portBytes[:len(portBytes)-1] if entryPort, err := strconv.Atoi(string(portBytes)); err != nil { throwMockError("Incorrectly formatted port from mock", err) } else { m.EntryPort = uint16(entryPort) } } log.Printf("Mock HTTP port at %d\n", m.EntryPort) return }
. "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "github.com/pivotal-cf-experimental/opentsdb-firehose-nozzle/poster" ) var telnetChan chan []byte var _ = Describe("OpentsdbClient Tcp", func() { var tcpListener *net.TCPListener var p *poster.TelnetPoster BeforeEach(func() { telnetChan = make(chan []byte, 1) tcpListener = NewTCPServer() p = poster.NewTelnetPoster(tcpListener.Addr().String()) }) It("posts value metrics over tcp", func() { timestamp := time.Now().Unix() metric := poster.Metric{ Metric: "origin.metricName", Value: 5, Timestamp: timestamp, Tags: poster.Tags{ Deployment: "deployment-name", Job: "doppler", Index: "SOME-GUID", IP: "10.10.10.10", },
/* Listen for a connection on *gc.Cwait, handle it with handler */ func listener(init *sync.WaitGroup) { /* If someone's waiting on us... */ if *gc.Cwait { defer WG.Done() } /* Make sure the listen address isn't just a port. */ if _, err := strconv.Atoi(*gc.Cport); nil == err { *gc.Cport = ":" + *gc.Cport } /* Get tcp4 and tcp6 addresses, maybe */ a4, e4 := net.ResolveTCPAddr("tcp4", *gc.Cport) a6, e6 := net.ResolveTCPAddr("tcp6", *gc.Cport) /* Die if neither work */ if e4 != nil && e6 != nil { log.Printf("Unable to resolve %v as an IPv4 address: %v", *gc.Cport, e4) log.Printf("Unable to resolve %v as an IPv6 address: %v", *gc.Cport, e6) os.Exit(-6) } /* Waitgroup for listeners */ wg := sync.WaitGroup{} /* Start listeners */ var l4 *net.TCPListener var l6 *net.TCPListener if nil == e4 { l4, e4 = net.ListenTCP("tcp4", a4) if nil == e4 { log.Printf("Command listener started on %v", l4.Addr()) wg.Add(1) go accept(l4, wg) defer l4.Close() } } else { e4 = nil } if nil == e6 { l6, e6 = net.ListenTCP("tcp6", a6) if nil == e6 { log.Printf("Command listener started on %v", l6.Addr()) wg.Add(1) go accept(l6, wg) defer l6.Close() } } else { e6 = nil } if e4 != nil && e6 != nil { log.Printf("Cannot start IPv4 listener on %v: %v", a4, e4) log.Printf("Cannot start IPv6 listener on %v: %v", a6, e6) os.Exit(-7) } /* Signal end of initialization */ init.Done() /* Wait for accepters to die. */ wg.Wait() return }