示例#1
0
func writeFromPointers(queryid string, f io.Writer, pointers []resultPointer) error {
	var capnbuf bytes.Buffer
	firstPathRank := state[queryid].FirstPathRank

	state[queryid].tempFilesMu.Lock()
	defer state[queryid].tempFilesMu.Unlock()

	if _, err := f.Write([]byte("[")); err != nil {
		return err
	}
	for idx, pointer := range pointers {
		src := state[queryid].perBackend[pointer.backendidx].tempFile
		if _, err := src.Seek(pointer.offset, os.SEEK_SET); err != nil {
			return err
		}
		if idx > 0 {
			if _, err := f.Write([]byte(",")); err != nil {
				return err
			}
		}
		seg, err := capn.ReadFromPackedStream(src, &capnbuf)
		if err != nil {
			return err
		}
		z := proto.ReadRootZ(seg)
		if z.Which() != proto.Z_MATCH {
			return fmt.Errorf("Expected to find a proto.Z_MATCH, instead got %d", z.Which())
		}
		result := z.Match()
		// We need to fix the ranking here because we persist raw results from
		// the dcs-source-backend in queryBackend(), but then modify the
		// ranking in storeResult().
		result.SetRanking(result.Pathrank() + ((firstPathRank * 0.1) * result.Ranking()))
		if err := result.WriteJSON(f); err != nil {
			return err
		}
	}
	if _, err := f.Write([]byte("]\n")); err != nil {
		return err
	}
	return nil
}
示例#2
0
func queryBackend(queryid string, backend string, backendidx int, sourceQuery []byte) {
	// When exiting this function, check that all results were processed. If
	// not, the backend query must have failed for some reason. Send a progress
	// update to prevent the query from running forever.
	defer func() {
		filesTotal := state[queryid].filesTotal[backendidx]

		if state[queryid].filesProcessed[backendidx] == filesTotal {
			return
		}

		if filesTotal == -1 {
			filesTotal = 0
		}

		seg := capn.NewBuffer(nil)
		p := proto.NewProgressUpdate(seg)
		p.SetFilesprocessed(uint64(filesTotal))
		p.SetFilestotal(uint64(filesTotal))
		storeProgress(queryid, backendidx, p)

		addEventMarshal(queryid, &Error{
			Type:      "error",
			ErrorType: "backendunavailable",
		})
	}()

	// TODO: switch in the config
	log.Printf("[%s] [src:%s] connecting...\n", queryid, backend)
	conn, err := net.DialTimeout("tcp", strings.Replace(backend, "28082", "26082", -1), 5*time.Second)
	if err != nil {
		log.Printf("[%s] [src:%s] Connection failed: %v\n", queryid, backend, err)
		return
	}
	defer conn.Close()
	if _, err := conn.Write(sourceQuery); err != nil {
		log.Printf("[%s] [src:%s] could not send query: %v\n", queryid, backend, err)
		return
	}

	bufferedReader := bufio.NewReaderSize(conn, 65536)
	bstate := state[queryid].perBackend[backendidx]
	tempFileWriter := bstate.tempFileWriter
	var capnbuf bytes.Buffer
	var written countingWriter

	for !state[queryid].done {
		conn.SetReadDeadline(time.Now().Add(10 * time.Second))

		written = 0
		tee := io.TeeReader(bufferedReader, io.MultiWriter(
			tempFileWriter, &written))

		seg, err := capn.ReadFromPackedStream(tee, &capnbuf)
		if err != nil {
			if err == io.EOF {
				log.Printf("[%s] [src:%s] EOF\n", queryid, backend)
				return
			} else {
				log.Printf("[%s] [src:%s] Error decoding result stream: %v\n", queryid, backend, err)
				return
			}
		}

		z := proto.ReadRootZ(seg)
		if z.Which() == proto.Z_PROGRESSUPDATE {
			storeProgress(queryid, backendidx, z.Progressupdate())
		} else {
			storeResult(queryid, backendidx, z.Match())
		}

		bstate.tempFileOffset += int64(written)
	}
	log.Printf("[%s] [src:%s] query done, disconnecting\n", queryid, backend)
}