func withRandomJob(t *testing.T, conn *beanstalk.Conn) *Job { id, err := conn.Put([]byte{}, 0, 0, time.Minute*5) if err != nil { t.Fail() } return NewJob(conn, "default", id, []byte{}) }
func (this *Beanstalkd) handle(id uint64, body []byte, conn *beanstalk.Conn) { this.processRaw(string(body)) time.Sleep(time.Second * 2) if err := conn.Delete(id); err != nil { this.Log.Error("Could not delete job %d: %s", id, err.Error()) } }
// deleteJobs deletes the jobs with specified state func (m *mainFrame) deleteJobs(c *beanstalk.Conn, tubeName, state string) (int, error) { var id uint64 var err error t := &beanstalk.Tube{c, tubeName} n := 0 for { switch state { case "ready": id, _, err = t.PeekReady() case "buried": id, _, err = t.PeekBuried() case "delayed": id, _, err = t.PeekDelayed() } if err != nil { return n, err } if err := c.Delete(id); err != nil { return n, err } n++ } return n, nil }
func withRandomReservedJob(t *testing.T, conn *beanstalk.Conn) *Job { withRandomJob(t, conn) id, body, err := conn.Reserve(0) if err != nil { t.Fail() } return NewJob(conn, "default", id, body) }
func main() { flag.Parse() var tb *beanstalk.TubeSet var conn_bs *beanstalk.Conn rs_timeout := time.Duration(Settings.BeanstalkdReserveTimeout) fail_wait := time.Duration(Settings.FailWait) * time.Second conn_bs, e := beanstalk.Dial("tcp", Settings.BeanstalkdAddr) if e != nil { log.Fatal("failed to connected to beanstalkd", e) } tb = beanstalk.NewTubeSet(conn_bs, Settings.BeanstalkdTube) for { // reserve a job id, job, e := tb.Reserve(rs_timeout) // timeout is valid, anything else is fatal if cerr, ok := e.(beanstalk.ConnError); ok && cerr.Err == beanstalk.ErrTimeout { time.Sleep(fail_wait) continue } else if e != nil { log.Fatal("failed to reserve job", e) } else { log.Println("read job id", id, "size", len(job), "bytes") } // connect to the gor replay server conn_gr, e := net.Dial("tcp", Settings.GorReplayAddr) if e != nil { log.Fatal("failed to connected to gor replay server", e) time.Sleep(fail_wait) } // write to gor replay server w, e := conn_gr.Write(job) if e != nil { log.Fatal("failed to write to", Settings.GorReplayAddr, "error", e) } else { log.Println("wrote", w, "bytes to", Settings.GorReplayAddr) } // close connection to gor replay server conn_gr.Close() // delete the job e = conn_bs.Delete(id) if e != nil { log.Println("failed to delete job id", id, "error", e) } } }
func helpCmd(c *beanstalk.Conn) { printUsageInfo() fmt.Println("\nAvailable tube actions:") fmt.Println("- info") fmt.Println("- kick [bound]") fmt.Println("- delete all|{id}") fmt.Println("- put {data} [pri] [delay] [ttr]") if c != nil { tubes, _ := c.ListTubes() fmt.Printf("\nAvailable tubes: %s\n", strings.Join(tubes, " | ")) } fmt.Println("\nGlobal args:") fmt.Println("-b=127.0.0.1:11300\tBeanstalkd [addr]:port") fmt.Println("") }
func NewClient(addr string, tubes []string) (client *Client, err error) { var conn *beanstalk.Conn if conn, err = beanstalk.Dial("tcp", addr); err != nil { return } conn.TubeSet = *beanstalk.NewTubeSet(conn, tubes...) client = &Client{ conn: conn, mu: new(sync.Mutex), ReserveTimeout: time.Duration(5 * time.Second), } return }
func (this *Beanstalkd) selectTubes(conn *beanstalk.Conn) *beanstalk.Tube { tubes, err := conn.ListTubes() if err != nil { this.Log.Error("Could not retrieve list of tubes: %s", err.Error()) } for _, tube := range tubes { t := &beanstalk.Tube{conn, tube} stats, err := t.Stats() if err != nil { this.Log.Error("Could not get stats for tube %s", t) } ready, _ := strconv.Atoi(stats["current-jobs-ready"]) watching, _ := strconv.Atoi(stats["current-watching"]) if ready > 0 && watching < this.MaxWatchers { return t } } return nil }
func consumer() { var c *beanstalk.Conn = nil for { // connect to beanstalkd, infinite loop, if c == nil { var err error = nil c, err = beanstalk.Dial("tcp", CONN_BS_HOST+":"+CONN_BS_PORT) if err != nil { fmt.Println("Error connect to beanstalkd:", err.Error()) } else { fmt.Println("Consumer connect to beanstalkd successfully.") } } /* keys := make([]string, 0, len(mSNConn)) for k := range mSNConn { keys = append(keys, k) } tubeSet := beanstalk.NewTubeSet(c, keys...) id, body, err := tubeSet.Reserve(5*time.Second) */ // blocking api id, body, err := c.Reserve(120 * time.Second) if err != nil { // this err indicate the job queue is empty //fmt.Println("Error comsume beanstalk:", err.Error()) } else { // val, ok := mSNConn[string(body[:3])] if ok { val.Write(body) } // delete job c.Delete(id) fmt.Printf("task id is: 【%d】; task content is 【%s】\n", id, string(body)) } } }
func main() { flag.Parse() if len(ignoreChannels) > 0 { common.SetIgnores(ignoreChannels) } var ( bs *beanstalk.Conn err error ) if len(flag.Args()) >= 1 { bs, err = beanstalk.Dial("tcp", flag.Args()[0]) if err != nil { log.Fatal(err) } if !quiet { log.Printf("Connected to [%s]", flag.Args()[0]) } if len(flag.Args()) >= 2 { bs.Tube.Name = flag.Args()[1] } } else { log.Fatalf("provide the beanstalk publisher! like example.com:11300") } //Clear out the old messages before we start back up for { id, msg, err := bs.Reserve(5 * time.Second) if !quiet { noti_msg := common.IrcNotify{} json.Unmarshal(msg, ¬i_msg) log.Printf("removing old message [%s]", noti_msg.Message) } if err != nil { break } err = bs.Delete(id) if err != nil { log.Fatal(err) } } for { id, msg, err := bs.Reserve(5 * time.Second) if err == nil { noti_msg := common.IrcNotify{} json.Unmarshal(msg, ¬i_msg) go common.Display(noti_msg, linger, quiet) err = bs.Delete(id) if err != nil { log.Fatal(err) } } time.Sleep(500 * time.Millisecond) } }
func QueueDelete(conn *beanstalk.Conn, id uint64) (err error) { return conn.Delete(id) }
func QueueDisconnect(conn *beanstalk.Conn) { conn.Close() }
func jobExists(t *testing.T, conn *beanstalk.Conn, id uint64) bool { _, err := conn.Peek(id) return err == nil || !strings.HasSuffix(err.Error(), "not found") }
func getBury(c *beanstalk.Conn, id uint64, body []byte) { c.Bury(id, 0) }
func getDelete(c *beanstalk.Conn, id uint64, body []byte) { c.Delete(id) }
func getRelease(c *beanstalk.Conn, id uint64, body []byte) { c.Release(id, PRI, 0) }