func (me *Orchestrator) Orchestrate(delivery amqp.Delivery) (*Instructions, error) { defer func() { err := delivery.Ack(false) if err != nil { // If this fails, and I don't exit, will I leak memory? me.Logger.Printf("amqp - Error acking delivery %+v: %+v\n", delivery, err) } }() var instructions *Instructions var err error switch { case ApplicationJsonRegex.MatchString(delivery.ContentType): instructions, err = me.parseJson(delivery.Body) default: instructions, err = me.parseJson(delivery.Body) } if err != nil { return nil, err } instructions.AuthToken = me.AuthToken return instructions, nil }
func processMetrics(d *amqp.Delivery) error { metrics := make([]*metricdef.IndvMetric, 0) if err := json.Unmarshal(d.Body, &metrics); err != nil { return err } logger.Debugf("The parsed out json: %v", metrics) for _, m := range metrics { logger.Debugf("processing %s", m.Name) id := fmt.Sprintf("%d.%s", m.OrgId, m.Name) if m.Id == "" { m.Id = id } if err := metricDefs.CheckMetricDef(id, m); err != nil { return err } if err := storeMetric(m); err != nil { return err } } if err := d.Ack(false); err != nil { return err } return nil }
func handle_retry(ch *amqp.Channel, d amqp.Delivery) { m := new(models.Task) if err := Decode(d.Body, m); err != nil { Log.Warn("decode retry message err: %v", err) } Log.Debug("sender retry message: %+v", m) defer d.Ack(false) }
/** * Receives a message from DB Queue and adds it to queries channel */ func receiveQueueMessage(msg models.QueueMessage, d amqp.Delivery) { // Add to queries channel db.Worker.Ch <- msg // TODO: Pass this delivery object along ans send ACK only after finishing everything??? d.Ack(false) }
func work(d amqp.Delivery) { defer d.Ack(false) // we acknowledge whether the job succeeded or not - as we do not want it to stay in the queue qe := QueueEntry{} if err := json.Unmarshal(d.Body, &qe); err != nil { glog.Errorln("worker:work could not unmarshal josn form queue") return } glog.Infoln("worker:work on job", qe.JobId, qe.Filename) if err := handleJob(qe.JobId, qe.Filename); err != nil { jobError(qe.JobId, err.Error()) } }
// DefaultErrHandler controls the errors, return false if an error occurred func (c *Controller) DefaultErrHandler(delivery amqp.Delivery, err error) bool { if delivery.Redelivered { c.log.Error("Redelivered message gave error again, putting to maintenance queue", err) delivery.Ack(false) return true } c.log.Error("an error occurred while sending email error as %+v ", err.Error()) delivery.Nack(false, true) return false }
func processEvent(d *amqp.Delivery) error { event, err := eventdef.EventFromJSON(d.Body) if err != nil { return err } if err = event.Save(); err != nil { return err } if err := d.Ack(false); err != nil { return err } return nil }
// Consume a single message func (amqpBroker *AMQPBroker) consumeOne(d amqp.Delivery, taskProcessor TaskProcessor, errorsChan chan error) { if len(d.Body) == 0 { d.Nack(false, false) // multiple, requeue errorsChan <- errors.New("Received an empty message.") // RabbitMQ down? return } log.Printf("Received new message: %s", d.Body) signature := signatures.TaskSignature{} if err := json.Unmarshal(d.Body, &signature); err != nil { d.Nack(false, false) // multiple, requeue errorsChan <- err return } // If the task is not registered, we nack it and requeue, // there might be different workers for processing specific tasks if !amqpBroker.IsTaskRegistered(signature.Name) { d.Nack(false, true) // multiple, requeue return } if err := taskProcessor.Process(&signature); err != nil { errorsChan <- err } d.Ack(false) // multiple }
func (ai *AMQPInput) Run(ir InputRunner, h PluginHelper) (err error) { atomic.StoreUint32(&ai.stopped, 0) var ( n int e error msg amqp.Delivery ok bool ) stream, err := ai.ch.Consume(ai.config.Queue, "", false, ai.config.QueueExclusive, false, false, nil) if err != nil { return fmt.Errorf("Cannot consume from queue %s: %s", ai.config.Queue, err) } sRunner := ir.NewSplitterRunner("") if !sRunner.UseMsgBytes() { sRunner.SetPackDecorator(ai.packDecorator) } defer func() { ai.usageWg.Done() sRunner.Done() }() for { e = nil if msg, ok = <-stream; !ok { break } n, e = sRunner.SplitBytes(msg.Body, nil) if e != nil { ir.LogError(fmt.Errorf("processing message of type %s: %s", msg.Type, e.Error())) } if n > 0 && n != len(msg.Body) { ir.LogError(fmt.Errorf("extra data in message of type %s dropped", msg.Type)) } msg.Ack(false) } if atomic.LoadUint32(&ai.stopped) == 0 { // `Stop` wasn't called, return an error message to trigger a potential // restart of the plugin. err = fmt.Errorf("Channel closed while reading from queue %s", ai.config.Queue) } return err }
func (p *DefaultDeliverer) Deliver(d amqp.Delivery, cfg config.EndpointConfig) { queueName, ok := cfg.QueueConfig["queuename"].(string) if !ok { queueName = "(unknown)" } log.Printf("Received a message on %s", queueName) requeue, err := processMsg(d, cfg) if err != nil { log.Printf("%s: %s", cfg.Name, err) d.Nack(false, requeue) } else { log.Printf("%s: Message Processed", cfg.Name) d.Ack(false) } }
/** * Receives a message from MCP Queue and calls handler */ func receiveQueueMessage(msg models.QueueMessage, d amqp.Delivery) { // Lookup original sender from message switch msg.Source { // From websockets origin, send response back case "mcp.ws": outMsg := models.WsMessage{BaseMessage: msg.BaseMessage} HandleWsResponseMessage(outMsg) // TODO: Handle malformed/other messages here } // TODO: Pass this delivery object along ans send ACK only after finishing everything??? d.Ack(false) }
func dispatch_task(ch *amqp.Channel, d amqp.Delivery) { t := new(models.Task) if err := Decode(d.Body, t); err != nil { Log.Warn("decode dispatch task err: %v", err) } if err := models.CreateTask(Session, t); err != nil { Log.Error("create task err: %v", err) } defer d.Ack(false) t.Channel = "HY" if err := Publish(EX_INCOMING, "send."+t.SendType, t); err != nil { Log.Error("dispatch task %s err: %v", t.SendType, err) } }
func main() { conn, err := amqp.Dial("amqp://*****:*****@localhost:5672/") failOnError(err, "Failed to connect to RabbitMQ") defer conn.Close() ch, err := conn.Channel() failOnError(err, "Failed to open a channel") defer ch.Close() q, err := ch.QueueDeclare( "task_queue", // name true, // durable false, // delete when unused false, // exclusive false, // noWait nil, // arguments ) failOnError(err, "Failed to declare a queue") ch.Qos(3, 0, false) msgs, err := ch.Consume(q.Name, "", false, false, false, false, nil) failOnError(err, "Failed to register a consumer") done := make(chan bool) var d amqp.Delivery go func() { for d = range msgs { log.Printf("Received a message: %s", d.Body) d.Ack(false) done <- true } }() log.Printf(" [*] Waiting for messages. To exit press CTRL+C") select { case <-done: break } log.Printf("Done") os.Exit(0) }
func (as *AmqpService) handle(delivery *amqp.Delivery) { defer delivery.Ack(true) log.WithFields(log.Fields{ "id": delivery.MessageId, "when": delivery.Timestamp, }).Info("AMQP delivery received") var chat Chat err := proto.Unmarshal(delivery.Body, &chat) if err != nil { log.WithFields(log.Fields{ "error": err, }).Warn("Unable to unmarshall delivery from AMQP") } else { as.SendChatCommands <- chat } }
func handleReq(ch *amqp.Channel, d amqp.Delivery, handler rpcHandler) { contentType, body := handler(d.ContentType, d.Body) err := ch.Publish( "", // exchange d.ReplyTo, // routing key false, // mandatory false, // immediate amqp.Publishing{ ContentType: contentType, CorrelationId: d.CorrelationId, Body: body, }) if err != nil { log.Error("Failed to publish a message: " + err.Error()) return } d.Ack(false) }
//ProcessMessage processes an amqp.Delivery within a context func ProcessMessage(c *ctx.Context, m *amqp.Delivery) { //Decode the payload var payload models.EventTrackingPayload err := json.Unmarshal(m.Body, &payload) if err != nil { c.Logger.Errorf("Impossible to decode payload from message - Error: %s", err.Error()) //We can ignore the err from Nack because auto-ack is false m.Nack(false, true) return } err = c.StorageDb.StoreBatchEvents(&payload) if err != nil { c.Logger.Errorf("Impossible to store payload from message - Error: %s", err.Error()) //We can ignore the err from Nack because auto-ack is false m.Nack(false, true) return } //ACK that the message has been processed sucessfully c.Logger.Infof("Sucessfully processed message: %s", m.MessageId) err = m.Ack(false) if err != nil { c.Logger.Infof("Could not ack message: %s", err.Error()) } return }
func send_mail(ch *amqp.Channel, d amqp.Delivery) { t := new(models.Task) if err := Decode(d.Body, t); err != nil { Log.Warn("decode send message err: %v", err) } else { Log.Info("consuming queue: %s", t.TaskId) } sender := get_sender(t) res, err := sender.Send() r := res.(mailer.Result) code := 200 if err == nil && r != nil { if !r.IsSuccess() { code = 400 Log.Error("something wrong with send email %s: %v", t.TaskId, r.Error()) e := Publish(EX_INCOMING, RK_RETRY, t) if e != nil { Log.Warn("publish retry task %s err: %v", t.TaskId, e) } } } else if err != nil { code = 500 Log.Error("mail send error: %v", err) } defer d.Ack(false) err = models.SendLog(Session, t, code, r.TaskId()) if err != nil { //TODO: refund money when sp return unsucceed message Log.Error("save sendlog %s err: %v", t.TaskId, err) } intervals := map[string]int{ "notice": 1, "fanout": 3, } <-time.After(time.Second * time.Duration(intervals[t.SendType])) }
func emptyQueue(amqpURI, queueName string) error { var conn *amqp.Connection var channel *amqp.Channel var err error var ok bool var msg amqp.Delivery log.Printf("dialing %q", amqpURI) conn, err = amqp.Dial(amqpURI) if err != nil { return fmt.Errorf("Dial: %s", err) } log.Printf("got Connection, getting Channel") channel, err = conn.Channel() if err != nil { return fmt.Errorf("Channel: %s", err) } for { if msg, ok, err = channel.Get( queueName, // name of the queue false, // autoAck ); err != nil { return fmt.Errorf("Queue Get: %s", err) } if ok != true { log.Printf("Queue empty") break } log.Printf( "got %dB delivery: [%v] %q", len(msg.Body), msg.DeliveryTag, msg.Body, ) msg.Ack(false) } return nil }
func consumer(id int, db *sql.DB, conn *amqp.Connection) { fmt.Println("starting processer") channel, _ := conn.Channel() channel.QueueDeclare("repos", false, false, false, false, nil) channel.QueueDeclare("repos-priority", false, false, false, false, nil) channel.Qos(1, 0, true) priRepos, _ := channel.Consume("repos-priority", "consumer-"+strconv.Itoa(rand.Int()), false, false, false, false, nil) regRepos, _ := channel.Consume("repos", "consumer-"+strconv.Itoa(rand.Int()), false, false, false, false, nil) for { var message amqp.Delivery select { case message = <-priRepos: processRepo(string(message.Body), db, id) message.Ack(false) case message = <-regRepos: processRepo(string(message.Body), db, id) message.Ack(false) } } }
func (ai *AMQPInput) Run(ir InputRunner, h PluginHelper) (err error) { var ( n int e error msg amqp.Delivery ok bool ) defer ai.usageWg.Done() stream, err := ai.ch.Consume(ai.config.Queue, "", false, ai.config.QueueExclusive, false, false, nil) if err != nil { return } sRunner := ir.NewSplitterRunner("") if !sRunner.UseMsgBytes() { sRunner.SetPackDecorator(ai.packDecorator) } for { e = nil if msg, ok = <-stream; !ok { break } n, e = sRunner.SplitBytes(msg.Body, nil) if e != nil { ir.LogError(fmt.Errorf("processing message of type %s: %s", msg.Type, e.Error())) } if n > 0 && n != len(msg.Body) { ir.LogError(fmt.Errorf("extra data in message of type %s dropped", msg.Type)) } msg.Ack(false) } return nil }
/** * Receives a message from basic modules queue and adds it to queries channel */ func receiveQueueMessage(msg models.QueueMessage, d amqp.Delivery) { // Get source exchange channel exchange := strings.Split(msg.Sender, ".")[0] // Lookup original exchange channel from message, then call logic and send response switch exchange { // Coming from MCP case "mcp": handleMcpMessage(msg) // Coming from a Worker case "workers": handleWorkerMessage(msg) // Unknown source default: fmt.Println("Unknown message exchange source for Basic Module") } // TODO: Pass this delivery object along ans send ACK only after finishing everything??? d.Ack(false) }
func (c *Consumer) handleError(err error, delivery amqp.Delivery) { switch err { case nil: c.withCounter("success", func() { delivery.Ack(false) }) case ErrNoHandlerFound: c.withCounter("handlernotfound", func() { delivery.Ack(false) }) c.Log.Debug("unknown event type (%s) recieved, deleting message from RMQ", delivery.Type) case gorm.RecordNotFound: c.withCounter("gormrecordnotfound", func() { delivery.Ack(false) }) c.Log.Warning("Record not found in our db (%s) recieved, deleting message from RMQ", string(delivery.Body)) case mgo.ErrNotFound: c.withCounter("mgorecordnotfound", func() { delivery.Ack(false) }) c.Log.Warning("Record not found in our mongo db (%s) recieved, deleting message from RMQ", string(delivery.Body)) default: c.withCounter("othererror", func() { // default err handler should handle the ack process if c.context.DefaultErrHandler(delivery, err) { if c.MaintenancePublisher == nil { return } data, err := json.Marshal(delivery) if err != nil { return } msg := amqp.Publishing{ Body: []byte(data), AppId: c.WorkerName, } c.withCounter("publishedtomaintenancequeue", func() { c.MaintenancePublisher.Publish(msg) }) } }) } }
// DefaultErrHandler controls the errors, return false if an error occurred func (c *Controller) DefaultErrHandler(delivery amqp.Delivery, err error) bool { c.logger.Error("an error occurred deleting dispatcher event: %s", err) delivery.Ack(false) return false }
// DefaultErrHandler handles the errors, we dont need to ack a message, // continue to the success func (c *Controller) DefaultErrHandler(delivery amqp.Delivery, err error) bool { c.log.Error("an error occurred putting message back to queue", err) delivery.Nack(false, true) return false }
func (s *Server) handleDelivery(d amqp.Delivery) { defer func() { <-s.parallelMethods s.wg.Done() }() if d.CorrelationId == "" || d.ReplyTo == "" { d.Nack(false, false) // drop message logf("dropped message: %+v", d) return } var ( msg rpcMsg ret []byte err error ) if err = json.Unmarshal(d.Body, &msg); err == nil { f, ok := s.methods[msg.Method] if ok { ret, err = f(d.CorrelationId, msg.Data) } else { err = errors.New("method has not been registered") } } else { err = errors.New("cannot unmarshal message") } errStr := "" if err != nil { errStr = err.Error() } result := &Result{ UUID: d.CorrelationId, Data: ret, Err: errStr, } body, err := json.Marshal(result) if err != nil { d.Nack(false, true) // requeue message logf("requeued message: %+v", d) return } // guarantee that the received ack/nack corresponds with this publishing s.ac.mu.Lock() defer s.ac.mu.Unlock() err = s.ac.channel.Publish( "", // exchange d.ReplyTo, // key false, // mandatory false, // immediate amqp.Publishing{ // msg CorrelationId: d.CorrelationId, ReplyTo: d.ReplyTo, ContentType: "application/json", Body: body, DeliveryMode: uint8(s.DeliveryMode), }, ) if err != nil { d.Nack(false, true) // requeue message return } select { case _, ok := <-s.ac.acks: if ok { d.Ack(false) return } case tag, ok := <-s.ac.nacks: if ok { logf("nack recived (%v)", tag) d.Nack(false, true) // requeue message return } } logf("missing ack/nack") d.Nack(false, true) // requeue message }
// HandleDelivery handles the amqp.Delivery object and either prints it out or // writes it to a files func HandleDelivery(delivery amqp.Delivery, debugger amqptools.Debugger) { addlData := make(map[string]interface{}) addlData["BodyAsString"] = string(delivery.Body) deliveryPlus := &amqptools.DeliveryPlus{ delivery, addlData, } var jsonBytes []byte var err error // necessary because otherwise it isn't unmarshalable deliveryPlus.RawDelivery.Acknowledger = nil if *prettyPrint { jsonBytes, err = json.MarshalIndent(deliveryPlus, "", "\t") if debugger.WithError(err, "Unable to marshal delivery into JSON.") { return } } else { jsonBytes, err = json.Marshal(deliveryPlus) if debugger.WithError(err, "Unable to marshal delivery into JSON.") { return } } if len(*outDirFlag) == 0 { fmt.Println(fmt.Sprintf("%s", string(jsonBytes))) } else { var folderName string if len(delivery.MessageId) > 0 { folderName = delivery.MessageId } else { h := sha1.New() fmt.Fprintf(h, "%s", jsonBytes) folderName = fmt.Sprintf("%x", h.Sum(nil)) } var exchangeStr string if len(delivery.Exchange) == 0 { exchangeStr = "_" } else { exchangeStr = delivery.Exchange } pathParts := []string{ strings.TrimRight(*outDirFlag, string(os.PathSeparator)), exchangeStr, folderName, } fullPath := strings.Join(pathParts, string(os.PathSeparator)) fileName := fmt.Sprintf("%s%smessage.json", fullPath, string(os.PathSeparator)) err = os.MkdirAll(fullPath, os.ModeDir|os.ModePerm) if debugger.WithError(err, fmt.Sprintf("Unable to create output directory '%s'.", fullPath)) { return } file, err := os.Create(fileName) if debugger.WithError(err, fmt.Sprintf("Unable to create file '%s'.", fileName)) { return } _, err = file.Write(jsonBytes) if debugger.WithError(err, fmt.Sprintf("Unable to write data into buffer for '%s'.", fileName)) { return } debugger.Print(fmt.Sprintf("Data written to %s", fileName)) err = file.Close() debugger.WithError(err, fmt.Sprintf("Unable to close file '%s'.", fileName)) } if *keepMessages { err = delivery.Reject(true) } else { err = delivery.Ack(false) } if debugger.WithError(err, "Unable to Ack a message") { return } }
// DefaultErrHandler handles the errors for collaboration worker func (t *Controller) DefaultErrHandler(delivery amqp.Delivery, err error) bool { delivery.Nack(false, true) return false }
// DefaultErrHandler controls the errors, return false if an error occurred func (r *Controller) DefaultErrHandler(delivery amqp.Delivery, err error) bool { r.log.Error("an error occurred deleting realtime event", err) delivery.Ack(false) return false }
func parseMessage(msg amqp.Delivery) { debug.Println("Msg:", string(msg.Body)) m := &totemResult{} err := json.Unmarshal(msg.Body, m) if err != nil { warning.Printf("Could not decode msg: %s\n", msg.Body) msg.Nack(false, false) return } /* // This approach has been revised since the data is now // saved as string and not as pure JSON document. // since totem sends the results as json encoded string // (which contains json) we need to unmarshal data // and save it this way. var resData interface{} err = json.Unmarshal([]byte(m.Data), &resData) if err != nil { warning.Printf("Could not decode data: %s\n", m.Data) msg.Nack(false, false) return } */ // TODO: Add validation to received msg //m.Validate() // TODO: Totem needs to send more data // TODO: Totem needs to send hashes lowercase result := &storerGeneric.Result{ Id: "", //will be filled by the storage engine SHA256: strings.ToLower(m.SHA256), //totem currently send the hash all upper case SchemaVersion: "1", UserId: "NotSend", SourceId: []string{"NotSend"}, SourceTag: []string{"NotSend"}, ServiceName: strings.SplitN(msg.RoutingKey, ".", 2)[0], ServiceVersion: "NotSend", ServiceConfig: "NotSend", ObjectCategory: []string{"NotSend"}, ObjectType: "sample", Results: m.Data, Tags: m.Tags, StartedDateTime: time.Now(), FinishedDateTime: time.Now(), WatchguardStatus: "NotImplemented", WatchguardLog: []string{"NotImplemented"}, WatchguardVersion: "NotImplemented", } err = mainStorer.StoreResult(result) if err != nil { if strings.Contains(err.Error(), "Size must be between 0 and 16793600") { warning.Println("Message to large, dropped!", err.Error()) msg.Ack(false) return } warning.Println("Failed to safe result:", err.Error(), "SHA256:", m.SHA256) msg.Nack(false, true) return } debug.Println("Msg saved successfully!") msg.Ack(false) }