func (this *Beanstalkd) handle(id uint64, body []byte, conn *beanstalk.Conn) { this.processRaw(string(body)) time.Sleep(time.Second * 2) if err := conn.Delete(id); err != nil { this.Log.Error("Could not delete job %d: %s", id, err.Error()) } }
// deleteJobs deletes the jobs with specified state func (m *mainFrame) deleteJobs(c *beanstalk.Conn, tubeName, state string) (int, error) { var id uint64 var err error t := &beanstalk.Tube{c, tubeName} n := 0 for { switch state { case "ready": id, _, err = t.PeekReady() case "buried": id, _, err = t.PeekBuried() case "delayed": id, _, err = t.PeekDelayed() } if err != nil { return n, err } if err := c.Delete(id); err != nil { return n, err } n++ } return n, nil }
func main() { flag.Parse() var tb *beanstalk.TubeSet var conn_bs *beanstalk.Conn rs_timeout := time.Duration(Settings.BeanstalkdReserveTimeout) fail_wait := time.Duration(Settings.FailWait) * time.Second conn_bs, e := beanstalk.Dial("tcp", Settings.BeanstalkdAddr) if e != nil { log.Fatal("failed to connected to beanstalkd", e) } tb = beanstalk.NewTubeSet(conn_bs, Settings.BeanstalkdTube) for { // reserve a job id, job, e := tb.Reserve(rs_timeout) // timeout is valid, anything else is fatal if cerr, ok := e.(beanstalk.ConnError); ok && cerr.Err == beanstalk.ErrTimeout { time.Sleep(fail_wait) continue } else if e != nil { log.Fatal("failed to reserve job", e) } else { log.Println("read job id", id, "size", len(job), "bytes") } // connect to the gor replay server conn_gr, e := net.Dial("tcp", Settings.GorReplayAddr) if e != nil { log.Fatal("failed to connected to gor replay server", e) time.Sleep(fail_wait) } // write to gor replay server w, e := conn_gr.Write(job) if e != nil { log.Fatal("failed to write to", Settings.GorReplayAddr, "error", e) } else { log.Println("wrote", w, "bytes to", Settings.GorReplayAddr) } // close connection to gor replay server conn_gr.Close() // delete the job e = conn_bs.Delete(id) if e != nil { log.Println("failed to delete job id", id, "error", e) } } }
func consumer() { var c *beanstalk.Conn = nil for { // connect to beanstalkd, infinite loop, if c == nil { var err error = nil c, err = beanstalk.Dial("tcp", CONN_BS_HOST+":"+CONN_BS_PORT) if err != nil { fmt.Println("Error connect to beanstalkd:", err.Error()) } else { fmt.Println("Consumer connect to beanstalkd successfully.") } } /* keys := make([]string, 0, len(mSNConn)) for k := range mSNConn { keys = append(keys, k) } tubeSet := beanstalk.NewTubeSet(c, keys...) id, body, err := tubeSet.Reserve(5*time.Second) */ // blocking api id, body, err := c.Reserve(120 * time.Second) if err != nil { // this err indicate the job queue is empty //fmt.Println("Error comsume beanstalk:", err.Error()) } else { // val, ok := mSNConn[string(body[:3])] if ok { val.Write(body) } // delete job c.Delete(id) fmt.Printf("task id is: 【%d】; task content is 【%s】\n", id, string(body)) } } }
func main() { flag.Parse() if len(ignoreChannels) > 0 { common.SetIgnores(ignoreChannels) } var ( bs *beanstalk.Conn err error ) if len(flag.Args()) >= 1 { bs, err = beanstalk.Dial("tcp", flag.Args()[0]) if err != nil { log.Fatal(err) } if !quiet { log.Printf("Connected to [%s]", flag.Args()[0]) } if len(flag.Args()) >= 2 { bs.Tube.Name = flag.Args()[1] } } else { log.Fatalf("provide the beanstalk publisher! like example.com:11300") } //Clear out the old messages before we start back up for { id, msg, err := bs.Reserve(5 * time.Second) if !quiet { noti_msg := common.IrcNotify{} json.Unmarshal(msg, ¬i_msg) log.Printf("removing old message [%s]", noti_msg.Message) } if err != nil { break } err = bs.Delete(id) if err != nil { log.Fatal(err) } } for { id, msg, err := bs.Reserve(5 * time.Second) if err == nil { noti_msg := common.IrcNotify{} json.Unmarshal(msg, ¬i_msg) go common.Display(noti_msg, linger, quiet) err = bs.Delete(id) if err != nil { log.Fatal(err) } } time.Sleep(500 * time.Millisecond) } }
func QueueDelete(conn *beanstalk.Conn, id uint64) (err error) { return conn.Delete(id) }
func getDelete(c *beanstalk.Conn, id uint64, body []byte) { c.Delete(id) }