Пример #1
0
func (m *MessageHandler) handleEvent(timestamp int64, event, delivery string, payload json.RawMessage) error {
	// Check if we are subscribed to this particular event type.
	if !m.repo.IsSubscribed(event) {
		log.Debugf("ignoring event %q for repository %s", event, m.repo.PrettyName())
		return nil
	}
	log.Infof("receive event %q for repository %q", event, m.repo.PrettyName())

	// Create the blob object and complete any data that needs to be.
	b, err := blob.NewBlobFromPayload(event, delivery, payload)
	if err = m.prepareForStorage(b); err != nil {
		log.Errorf("preparing event %q for storage: %v", event, err)
		return err
	}

	// Take the timestamp from the NSQ Message (useful if the queue was put on
	// hold or if the process is catching up). This timestamp is a UnixNano.
	b.Timestamp = time.Unix(0, timestamp)
	return m.store.Store(storage.StoreLiveEvent, m.repo, b)
}
Пример #2
0
// indexingProc takes input from the toIndex channel and pushes the content to
// the Elastic Search backend.
func (s *syncCmd) indexingProc(r *storage.Repository) {
	for i := range s.toIndex {
		// We have to serialize back to JSON in order to transform the payload
		// as we wish. This could be optimized out if we were to read the raw
		// GitHub data rather than rely on the typed go-github package.
		payload, err := json.Marshal(i)
		if err != nil {
			log.Errorf("error marshaling githubIndexedItem %q (%s): %v", i.ID(), i.Type(), err)
			continue
		}
		// We create a blob from the payload, which essentially deserialized
		// the object back from JSON...
		b, err := blob.NewBlobFromPayload(i.Type(), i.ID(), payload)
		if err != nil {
			log.Errorf("creating blob from payload %q (%s): %v", i.ID(), i.Type(), err)
			continue
		}
		// Persist the object in Elastic Search.
		if err := s.blobStore.Store(s.options.Storage, r, b); err != nil {
			log.Error(err)
		}
	}
	s.wgIndex.Done()
}