Esempio n. 1
0
File: indexer.go Progetto: mrares/no
func processor(partition int, conn *net.UDPConn, tick <-chan compactionTick, quit chan struct{}, wg *sync.WaitGroup) {
	wg.Add(1)
	defer wg.Done()

	shouldQuit := false
	buf := make([]byte, 65536+4, 65536+4)
	index := make(map[string]*deque)
	data := pb.Data{}
	totalMsg := 0

	log.Printf("new processor %d", partition)

	var file *os.File
	defer file.Close()
	getFile := func(t time.Time) (*os.File, int64) {
		if file == nil {
			_, fileName, err := makePath(t)
			if err != nil {
				log.Println("Failed to create path", fileName, err)
				return nil, 0
			}

			fileName += fmt.Sprintf("%d.%d.data", t.Unix(), partition)
			log.Printf("[%d] open new file %s for epoch %d", partition, fileName, t.Unix())

			if file, err = createFile(fileName); err != nil {
				log.Println("Failed to open file", fileName, err)
				return nil, 0
			}

			if _, err = file.WriteString(dataFileHeader); err != nil {
				log.Println("Failed to write to file", err)
				return nil, 0
			}
		}

		offset, _ := file.Seek(0, 1)
		return file, offset
	}

	ctick := <-tick

loop:
	for {
		select {
		case <-quit:
			shouldQuit = true
			log.Printf("[%d] will quit", partition)

		case ct := <-tick:
			log.Printf("[%d] send job to compactor %d\n", partition, ctick.t.Unix())
			ctick.ch <- compactionJob{partition, totalMsg, index}
			index = make(map[string]*deque)
			file.Close()
			file = nil
			totalMsg = 0
			ctick = ct

			if shouldQuit {
				log.Printf("[%d] quiting", partition)
				break loop
			}

		default:
			conn.SetReadDeadline(time.Now().Add(time.Millisecond))
			length, err := conn.Read(buf[4:])
			if err != nil {
				if nerr, ok := err.(net.Error); ok && nerr.Timeout() == false {
					log.Println("UDP read error", err)
				}
				continue
			}

			data.Reset()
			if err := data.Unmarshal(buf[4 : length+4]); err != nil {
				log.Println("Failed to decode", err)
				continue
			}

			file, offset := getFile(ctick.t)
			if file == nil {
				log.Println("Failed to get file")
				continue
			}

			intToByteArray(uint32(length), buf[0:4])
			if _, err := file.Write(buf[:length+4]); err != nil {
				log.Println("Failed to write to file", err)
				continue
			}

			tags := data.GetHeader().GetTags()
			if len(tags) > maxTagsInMessage {
				log.Println("Too many tags in message ", len(tags))
				continue
			}

			for _, tag := range tags {
				if len(tag) > maxTagLength {
					log.Println("Too long tag")
					continue
				}

				deque, ok := index[tag]
				if !ok {
					deque = newDeque(partition)
					index[tag] = deque
				}

				deque.Append(uint32(offset))
			}

			totalMsg++
		}
	}
}
Esempio n. 2
0
func handler(w http.ResponseWriter, r *http.Request) {
	log.Println("new request from", r.RemoteAddr)
	defer r.Body.Close()

	var q query
	var err error
	dec := json.NewDecoder(r.Body)
	if err = dec.Decode(&q); err != nil {
		log.Println("failed to decode request", err)
		http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
		return
	}

	t := time.Unix(int64(q.From), 0).UTC()
	idx, err := mcache.getIndex(t)
	if err != nil {
		log.Println("failed to get index", err)
		http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
		return
	}

	idx.borrow()
	defer idx.release()

	var v indexPartitions
	if len(q.And) > 0 {
		v = parseQuery(idx, mergeAnd, q.And)
	} else {
		v = parseQuery(idx, mergeOr, q.Or)
	}

	var substreams []string
	for k, v := range r.URL.Query() {
		if k == "sub" {
			substreams = v
			break
		}
	}

	log.Println("going to send N offsets", v.size(), substreams)

	h := r.Header
	h.Set("Content-Type", "text/event-stream")
	h.Set("Cache-Control", "no-cache")

	var buf []byte
	buf4 := make([]byte, 4, 4)
	buf64K := make([]byte, 65536, 65536)

	send := func(b []byte) error {
		if _, err = w.Write(intToByteArray(len(b), buf4)); err != nil {
			return err
		}

		if _, err = w.Write(b); err != nil {
			return err
		}

		return nil
	}

	data := pb.Data{}
	for p, offsets := range v {
		db, err := mcache.getDatabase(int(p), t)
		if err != nil {
			log.Println(err)
			continue
		}

		db.borrow()
		defer db.release()

		for _, off := range offsets {
			offset := int(off)
			if offset+uint32Size > len(db.data) {
				log.Println("invalid offset", offset, len(db.data))
				break
			}

			length := byteArrayToInt(db.data[offset : offset+uint32Size])
			if offset+length+uint32Size > len(db.data) {
				log.Println("invalid length", offset+length+uint32Size, len(db.data))
				break
			}

			buf = db.data[offset+uint32Size : offset+uint32Size+length]

			if len(substreams) > 0 {
				data.Reset()
				if err = data.Unmarshal(buf); err != nil {
					log.Println("Failed to decode", err)
					break
				}

				var payload []*pb.Payload
				for _, sub := range substreams {
					for _, frame := range data.GetFrames() {
						if frame.GetId() == sub {
							payload = append(payload, frame)
						}
					}
				}

				data.Frames = payload
				if length, err = data.MarshalTo(buf64K); err != nil {
					log.Println("Failed to encode", err)
					break
				}

				if err = send(buf64K[:length]); err != nil {
					log.Println(err)
					break
				}
			} else {
				if err = send(buf); err != nil {
					log.Println(err)
					break
				}
			}
		}
	}
}