func GetArtifactContent(ctx context.Context, r render.Render, req *http.Request, res http.ResponseWriter, db database.Database, s3bucket *s3.Bucket, artifact *model.Artifact) {
	if artifact == nil {
		LogAndRespondWithErrorf(ctx, r, http.StatusBadRequest, "No artifact specified")
		return
	}

	switch artifact.State {
	case model.UPLOADED:
		// Fetch from S3
		url := s3bucket.SignedURL(artifact.S3URL, time.Now().Add(30*time.Minute))
		rq, err := http.NewRequest("GET", url, nil)
		if byteRanges := req.Header.Get("Range"); byteRanges != "" {
			// If request contains Range: headers, pass them right through to S3.
			// TODO(anupc): Validation? We're sending user input through to the data store.
			rq.Header.Add("Range", byteRanges)
		}
		resp, err := http.DefaultClient.Do(rq)
		if err != nil {
			LogAndRespondWithError(ctx, r, http.StatusInternalServerError, err)
			return
		}
		if resp.StatusCode != http.StatusPartialContent && resp.StatusCode != http.StatusOK {
			LogAndRespondWithErrorf(ctx, r, http.StatusInternalServerError, fmt.Sprintf("Bad status code %d recieved from S3", resp.StatusCode))
			return
		}
		contentdisposition.SetFilename(res, filepath.Base(artifact.RelativePath))
		res.Header().Add("Content-Length", strconv.Itoa(int(artifact.Size)))
		if n, err := io.CopyN(res, resp.Body, artifact.Size); err != nil {
			sentry.ReportError(ctx, fmt.Errorf("Error transferring artifact (for artifact %s/%s, bytes (%d/%d) read): %s", artifact.BucketId, artifact.Name, n, artifact.Size, err))
			return
		}
		return
	case model.UPLOADING:
		// Not done uploading to S3 yet. Error.
		LogAndRespondWithErrorf(ctx, r, http.StatusNotFound, "Waiting for content to complete uploading")
		return
	case model.APPENDING:
		fallthrough
	case model.APPEND_COMPLETE:
		// Pick from log chunks
		contentdisposition.SetFilename(res, filepath.Base(artifact.RelativePath))
		// All written bytes are immutable. So, unless size changes, all previously read contents can be cached.
		res.Header().Add("ETag", strconv.Itoa(int(artifact.Size)))
		http.ServeContent(res, req, filepath.Base(artifact.RelativePath), time.Time{}, newLogChunkReaderWithReadahead(artifact, db))
		return
	case model.WAITING_FOR_UPLOAD:
		// Not started yet. Error
		LogAndRespondWithErrorf(ctx, r, http.StatusNotFound, "Waiting for content to get uploaded")
		return
	}
}
// GetArtifactContentChunks lists artifact contents in a chunked form. Useful to poll for updates to
// chunked artifacts. All artifact types are supported and chunks can be requested from arbitrary
// locations within artifacts.
//
// This is primarily meant for Changes UI for log following. If you need to fetch byte ranges from the
// store, it should be available directly at /content
//
// URL query parameters offset and limit can be used to control range of chunks to be fetched.
// offset -> byte offset of the start of the range to be fetched (defaults to beginning of artifact)
// limit  -> number of bytes to be fetched (defaults to 100KB)
//
// Negative values for any query parameter will cause it to be set to 0 (default)
func GetArtifactContentChunks(ctx context.Context, r render.Render, req *http.Request, res http.ResponseWriter, db database.Database, s3bucket *s3.Bucket, artifact *model.Artifact) {
	if artifact == nil {
		LogAndRespondWithErrorf(ctx, r, http.StatusBadRequest, "No artifact specified")
		return
	}

	type Chunk struct {
		ID     int64  `json:"id"`
		Offset int64  `json:"offset"`
		Size   int64  `json:"size"`
		Text   string `json:"text"`
	}

	type Result struct {
		Chunks     []Chunk `json:"chunks"`
		EOF        bool    `json:"eof"`
		NextOffset int64   `json:"nextOffset"`
	}

	byteRangeBegin, byteRangeEnd, err := getByteRangeFromRequest(req, artifact)

	if err != nil {
		// If given range is not valid, steer client to a valid range.
		r.JSON(http.StatusOK, &Result{Chunks: []Chunk{}, EOF: err == errReadBeyondEOF && artifact.State == model.UPLOADED, NextOffset: byteRangeEnd})
		return
	}

	switch artifact.State {
	case model.UPLOADING:
		// No data to report right now. Wait till upload to S3 completes.
		fallthrough
	case model.WAITING_FOR_UPLOAD:
		// Upload hasn't started. No data to report. Try again later.
		r.JSON(http.StatusOK, &Result{Chunks: []Chunk{}, NextOffset: byteRangeBegin})
		return
	case model.UPLOADED:
		// Fetch from S3
		url := s3bucket.SignedURL(artifact.S3URL, time.Now().Add(30*time.Minute))
		rq, err := http.NewRequest("GET", url, nil)
		if err != nil {
			LogAndRespondWithError(ctx, r, http.StatusInternalServerError, err)
			return
		}
		rq.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", byteRangeBegin, byteRangeEnd))
		resp, err := http.DefaultClient.Do(rq)
		if err != nil {
			LogAndRespondWithError(ctx, r, http.StatusInternalServerError, err)
			return
		}
		if resp.StatusCode != http.StatusPartialContent && resp.StatusCode != http.StatusOK {
			LogAndRespondWithErrorf(ctx, r, http.StatusBadRequest, fmt.Sprintf("Bad status code %d", resp.StatusCode))
			return
		}
		var buf bytes.Buffer
		n, err := buf.ReadFrom(resp.Body)
		if err != nil {
			LogAndRespondWithError(ctx, r, http.StatusInternalServerError, err)
			return
		}

		nextOffset := byteRangeBegin + int64(n)
		r.JSON(http.StatusOK, &Result{
			Chunks:     []Chunk{Chunk{Offset: byteRangeBegin, Size: int64(n), Text: buf.String()}},
			EOF:        nextOffset == artifact.Size,
			NextOffset: nextOffset,
		})
		return
	case model.APPENDING:
		fallthrough
	case model.APPEND_COMPLETE:
		// Pick from log chunks
		rd := newLogChunkReader(artifact, db)
		rd.Seek(byteRangeBegin, os.SEEK_SET)

		bts := make([]byte, byteRangeEnd-byteRangeBegin+1)
		n, err := runeLimitedRead(rd, bts)
		if err != nil && err != io.EOF {
			LogAndRespondWithError(ctx, r, http.StatusInternalServerError, err)
			return
		}

		if n > 0 {
			r.JSON(http.StatusOK, &Result{
				Chunks:     []Chunk{Chunk{Offset: byteRangeBegin, Size: int64(n), Text: string(bts[:n])}},
				NextOffset: byteRangeBegin + int64(n),
			})
		} else {
			r.JSON(http.StatusOK, &Result{Chunks: []Chunk{}, NextOffset: byteRangeBegin})
		}
		return
	}
}