func (m *mainFrame) createConnection() (*beanstalk.Conn, error) { c, err := beanstalk.Dial("tcp", fmt.Sprintf("%s:%d", m.host, m.port)) if err != nil { return nil, err } return c, nil }
func TestReserveIsParallelAndWaits(t *testing.T) { count := int32(0) tubeName := strconv.Itoa(int(time.Now().Unix())) start := time.Now() mux := NewWorkMux() mux.Handle(tubeName, HandlerFunc(func(job *Job) { time.Sleep(time.Second) atomic.AddInt32(&count, 1) job.Delete() })) go func() { conn, _ := beanstalk.Dial("tcp", "localhost:11300") tube := &beanstalk.Tube{Conn: conn, Name: tubeName} tube.Put([]byte("job1"), 0, 0, time.Minute) tube.Put([]byte("job2"), 0, 0, time.Minute) tube.Put([]byte("job3"), 0, 0, time.Minute) tube.Put([]byte("job4"), 0, 0, time.Minute) tube.Put([]byte("job5"), 0, 0, time.Minute) time.Sleep(time.Millisecond * 1100) syscall.Kill(syscall.Getpid(), syscall.SIGTERM) }() ConnectAndWork("tcp", "localhost:11300", mux) if count != 5 || time.Since(start) > time.Duration(time.Millisecond*2200) { t.Fail() } }
func getTestConn(t *testing.T) *beanstalk.Conn { conn, err := beanstalk.Dial("tcp", "localhost:11300") if err != nil { t.Fail() } return conn }
func Producer(fname, tubeName string) { if fname == "" || tubeName == "" { return } c, err := beanstalk.Dial("tcp", "127.0.0.1:11300") if err != nil { panic(err) } defer c.Close() c.Tube.Name = tubeName c.TubeSet.Name[tubeName] = true fmt.Println(fname, " [Producer] tubeName:", tubeName, " c.Tube.Name:", c.Tube.Name) for i := 0; i < 5; i++ { msg := fmt.Sprintf("for %s %d", tubeName, i) c.Put([]byte(msg), 30, 0, 120*time.Second) fmt.Println(fname, " [Producer] beanstalk put body:", msg) //time.Sleep(1 * time.Second) } c.Close() fmt.Println("Producer() end.") }
// Connect to the server func (c *Client) Connect() (err error) { if c.Conn != nil { return nil } c.Conn, err = beanstalk.Dial("tcp", c.addr) return err }
// TubesStats connects to beanstalkd and return a hash of all stats // for each tube we are watching // // Return error if fail to connect to beanstalkd of fail to get stats // for a specic tube // // Panic if a specic stat is not an integer, this should never happen func TubesStats() (stats map[string]map[string]int, err error) { conn, err := beanstalk.Dial("tcp", config.BeanstalkdAddr) if err != nil { return stats, fmt.Errorf("Failed to connect to beanstalkd: %s", err) } tubes, err := conn.ListTubes() if err != nil { return stats, fmt.Errorf("Failed to list tubes: %s", err) } stats = map[string]map[string]int{} for _, tubeName := range tubes { if !watchingTube(tubeName) { continue } tube := &beanstalk.Tube{ Name: tubeName, Conn: conn, } data, err := tube.Stats() if err != nil { return stats, fmt.Errorf("Failed to get stats for tube %s: %s", tubeName, err) } stats[tubeName] = map[string]int{ "buried": mustInt(data["current-jobs-buried"]), "ready": mustInt(data["current-jobs-ready"]), "delayed": mustInt(data["current-jobs-delayed"]), "reserver": mustInt(data["current-jobs-reserved"]), "urgent": mustInt(data["current-jobs-urgent"]), "waiting": mustInt(data["current-waiting"]), "total": mustInt(data["total-jobs"]), } } return stats, nil }
func main() { commandArgs, other := parseCommands() beansAddr := parseArgs(other) c, err := beanstalk.Dial("tcp", beansAddr) if err != nil { fmt.Printf("Warning: can't connect to beanstalk on %s (use -b arg to specify correct connection addr:port)\n", beansAddr) } else { defer c.Close() } if len(commandArgs) == 0 { printUsageInfo() os.Exit(1) } switch commandArgs[0] { case "help": helpCmd(c) os.Exit(0) case "server-info": checkConn(c) s, _ := c.Stats() fmt.Println("Global server info:\n") printStats(s) default: checkConn(c) tubeCmd(c, commandArgs[0], commandArgs[1:]) } }
func DialBeanstalk() *beanstalk.Conn { conn, err := beanstalk.Dial("tcp", *beanstalkdAddress) if err != nil { fatal(1, "Error connecting to beanstalkd:\n%v\n", err) } return conn }
func connection() (*beanstalk.Conn, error) { var ( addr string err error ) mut.Lock() if conn == nil { mut.Unlock() addr, err = config.GetString("queue-server") if err != nil { addr = "localhost:11300" } mut.Lock() if conn, err = beanstalk.Dial("tcp", addr); err != nil { mut.Unlock() return nil, err } } if _, err = conn.ListTubes(); err != nil { mut.Unlock() conn = nil return connection() } mut.Unlock() return conn, err }
/** 连接队列 */ func (beansObj *BeansCon) Init(config [2]string) { c, err := beanstalk.Dial(config[0], config[1]) if err != nil { panic(err) } beansObj.conn = c }
func (m *beanstalkdPubSub) dialSubSocket() error { c, err := beanstalk.Dial("tcp", m.address) if err != nil { return err } m.connSub = c return nil }
func main() { flag.Parse() var tb *beanstalk.TubeSet var conn_bs *beanstalk.Conn rs_timeout := time.Duration(Settings.BeanstalkdReserveTimeout) fail_wait := time.Duration(Settings.FailWait) * time.Second conn_bs, e := beanstalk.Dial("tcp", Settings.BeanstalkdAddr) if e != nil { log.Fatal("failed to connected to beanstalkd", e) } tb = beanstalk.NewTubeSet(conn_bs, Settings.BeanstalkdTube) for { // reserve a job id, job, e := tb.Reserve(rs_timeout) // timeout is valid, anything else is fatal if cerr, ok := e.(beanstalk.ConnError); ok && cerr.Err == beanstalk.ErrTimeout { time.Sleep(fail_wait) continue } else if e != nil { log.Fatal("failed to reserve job", e) } else { log.Println("read job id", id, "size", len(job), "bytes") } // connect to the gor replay server conn_gr, e := net.Dial("tcp", Settings.GorReplayAddr) if e != nil { log.Fatal("failed to connected to gor replay server", e) time.Sleep(fail_wait) } // write to gor replay server w, e := conn_gr.Write(job) if e != nil { log.Fatal("failed to write to", Settings.GorReplayAddr, "error", e) } else { log.Println("wrote", w, "bytes to", Settings.GorReplayAddr) } // close connection to gor replay server conn_gr.Close() // delete the job e = conn_bs.Delete(id) if e != nil { log.Println("failed to delete job id", id, "error", e) } } }
func connect(hostname string) *beanstalk.Conn { c, err := beanstalk.Dial("tcp", hostname) if err != nil { fmt.Fprintf(os.Stderr, "Could not connect to %s\n", hostname) os.Exit(2) } return c }
func (m *beanstalkdPubSub) dialPubConnection() error { // open connection for subscription c, err := beanstalk.Dial("tcp", m.address) if err != nil { return err } m.connPub = c return nil }
func main() { c, err := beanstalk.Dial("tcp", "127.0.0.1:11300") if err != nil { fmt.Println("beanstakd not started") os.Exit(1) } id, err := c.Put([]byte("hello"), 1, 0, 120*time.Second) fmt.Println(id, err) }
func (c *Command) Init() error { var err error c.conn, err = beanstalk.Dial("tcp", c.Host) if err != nil { return err } return nil }
// Helper function for retrying a job. This accepts an error and a // number of times that a unit of work should be retried. An optional // DelayDecay func is accepted for setting the amount of time, based // on number of releases, that a unit of work should be delayed before // acting against it again. func (r *Request) RetryJob(err error, maxRetries int, delay DelayDecay) Response { if delay == nil { delay = defaultDecay } beanConn, dialErr := beanstalk.Dial("tcp", r.host) if dialErr != nil { // send it back as retry = 1 return Response{ Result: ReleaseJob, Error: err.Error(), Delay: delay(1), } } defer beanConn.Close() stats, statsErr := beanConn.StatsJob(r.id) if statsErr != nil { // send it back as retry = 1 return Response{ Result: ReleaseJob, Error: err.Error(), Delay: delay(1), } } _, ok := stats["releases"] if !ok { // send it back as retry = 1 return Response{ Result: ReleaseJob, Error: err.Error(), Delay: delay(1), } } releases, strErr := strconv.Atoi(stats["releases"]) if strErr != nil { // send it back as retry = 1 return Response{ Result: ReleaseJob, Error: err.Error(), Delay: delay(1), } } if releases >= maxRetries { return r.BuryJob(err) } return Response{ Result: ReleaseJob, Error: err.Error(), Delay: delay(releases), } }
func (t *BeanWorker) Open(tube string) error { conn, err := beanstalk.Dial("tcp", t.address) if err != nil { return err } conn.Tube = beanstalk.Tube{conn, tube} conn.TubeSet = *beanstalk.NewTubeSet(conn, tube) t.conn = conn return nil }
func main() { // Initialize config cfg, err := Load("/root/go/src/github.com/pistarlabs/server/config.json") if err != nil { panic(err) } // Get development environment configuration cfg, err = cfg.Get(mode) if err != nil { panic(err) } // Establishing connection to MongoDB sess, err = mgo.Dial("mongodb://" + cfg.UString("database.host") + "/" + cfg.UString("database.name")) if err != nil { panic(err) } // Establishing connection to Beanstalkd queue, err = beanstalk.Dial("tcp", cfg.UString("queue.host")+":"+cfg.UString("queue.port")) if err != nil { panic(err) } // Router using Gin r := gin.Default() // Global middleware r.Use() // No route handler r.NoRoute(noRoute) // Route group by API api := r.Group("/api") { api.GET("/", home) api.GET("/token", token) api.POST("/send", send) api.GET("/messages") api.GET("/messages/:id") api.POST("/subscribe/:channel") api.POST("/unsubscribe/:channel") api.POST("/broadcast/:channel") } // Run server r.Run(":" + cfg.UString("server.port")) }
func NewBeanstalkd(numberOfMessages int, testLatency bool) *Beanstalkd { pub, _ := beanstalk.Dial("tcp", "localhost:11300") sub, _ := beanstalk.Dial("tcp", "localhost:11300") var handler benchmark.MessageHandler if testLatency { handler = &benchmark.LatencyMessageHandler{ NumberOfMessages: numberOfMessages, Latencies: []float32{}, } } else { handler = &benchmark.ThroughputMessageHandler{NumberOfMessages: numberOfMessages} } return &Beanstalkd{ handler: handler, pub: pub, sub: sub, } }
func newKVStore(proposeC chan<- string, commitC <-chan *string, errorC <-chan error, id *int) *bstalk { var c *beanstalk.Conn //var err error if *id == 1 { log.Printf("connecting to remote beanstalkd") c, _ = beanstalk.Dial("tcp", "127.0.0.1:1234") } else if *id == 2 { log.Printf("connecting to remote beanstalkd") c, _ = beanstalk.Dial("tcp", "127.0.0.1:2234") } else { log.Printf("connecting to local") c, _ = beanstalk.Dial("tcp", "127.0.0.1:3234") } s := &bstalk{proposeC: proposeC, conn: c} // replay log into key-value map s.readCommits(commitC, errorC) // read commits from raft into kvStore map until error go s.readCommits(commitC, errorC) return s }
// NewApnsWorker return a APNS worker instance func NewApnsWorker(name string) *ApnsWorker { dsn := fmt.Sprintf("%s:%s", "0.0.0.0", "11300") queue, err := beanstalk.Dial("tcp", dsn) if err != nil { panic(err) } return &ApnsWorker{ TubeName: name, Conn: queue, } }
func main() { c, err := beanstalk.Dial("tcp", "127.0.0.1:11300") if err != nil { fmt.Println("beanstalkd not started") os.Exit(1) } for { id, body, err := c.Reserve(5 * time.Hour) fmt.Println(id, string(body), err) } }
func Example() { c, err := beanstalk.Dial("tcp", "127.0.0.1:11300") if err != nil { panic(err) } c.Put([]byte("hello"), 1, 0, 120*time.Second) id, body, err := c.Reserve(5 * time.Second) if err != nil { panic(err) } fmt.Println("job", id) fmt.Println(string(body)) }
func assertJobStat(t *testing.T, id uint64, key, value string) { c, err := beanstalk.Dial("tcp", address) if err != nil { t.Fatal(err) } stats, err := c.StatsJob(id) if err != nil { t.Fatal(err) } if stats[key] != value { t.Fatalf("job %d %s = %s, expected %s", id, key, stats[key], value) } }
// NewClient creates a new Client and initializes the beanstalk connection + tubes func NewClient(bstalk string, e *etcd.Client) (*Client, error) { conn, err := beanstalk.Dial("tcp", bstalk) if err != nil { return nil, err } client := &Client{ beanConn: conn, etcd: e, tubes: newTubes(conn), } return client, nil }
func (w *worker) sendFeedback(job *Request, jsonRes []byte) error { beanConn, err := beanstalk.Dial("tcp", w.options.Host) if err != nil { return ErrBeanstalkConnect } defer beanConn.Close() beanConn.Tube.Name = w.tube + "_" + strconv.FormatUint(job.id, 10) _, err = beanConn.Put(jsonRes, w.options.Priority, w.options.Delay, w.options.TTR) if err != nil { return err } return nil }
func connection() (*beanstalk.Conn, error) { var ( addr string err error ) if conn == nil { addr, err = config.GetString("queue-server") if err != nil { return nil, errors.New(`"queue-server" is not defined in config file.`) } conn, err = beanstalk.Dial("tcp", addr) } return conn, err }
// NewPeer creates and returns a new Peer for communicating with Beanstalkd. func NewPeer(host string) (*Peer, error) { conn, err := beanstalk.Dial("tcp", host) if err != nil { return nil, err } return &Peer{ conn: conn, messages: make(chan []byte, 10000), send: make(chan []byte), errors: make(chan error, 1), done: make(chan bool), }, nil }
func testWorker(h string, count int, size int, ch chan int) { conn, e := beanstalk.Dial("tcp", h) defer conn.Close() data := make([]byte, size) if e != nil { log.Fatal(e) } for i := 0; i < count; i++ { _, err := conn.Put(data, 0, 0, 120*time.Second) if err != nil { log.Fatal(err) } } ch <- 1 }