func GetArtifactContent(r render.Render, req *http.Request, res http.ResponseWriter, db database.Database, params martini.Params, s3bucket *s3.Bucket, artifact *model.Artifact) { if artifact == nil { JsonErrorf(r, http.StatusBadRequest, "Error: no artifact specified") return } switch artifact.State { case model.UPLOADED: // Fetch from S3 reader, err := s3bucket.GetReader(artifact.S3URL) if err != nil { JsonErrorf(r, http.StatusInternalServerError, err.Error()) return } // Ideally, we'll use a Hijacker to take over the conn so that we can employ an io.Writer // instead of loading the entire file into memory before writing it back out. But, for now, we // will run the risk of OOM if large files need to be served. var buf bytes.Buffer _, err = buf.ReadFrom(reader) if err != nil { JsonErrorf(r, http.StatusInternalServerError, "Error reading upload buffer: %s", err.Error()) return } res.Write(buf.Bytes()) return case model.UPLOADING: // Not done uploading to S3 yet. Error. r.JSON(http.StatusNotFound, map[string]string{"error": "Waiting for content to complete uploading"}) return case model.APPENDING: fallthrough case model.APPEND_COMPLETE: // Pick from log chunks logChunks, err := db.ListLogChunksInArtifact(artifact.Id) if err != nil { JsonErrorf(r, http.StatusInternalServerError, err.Error()) return } var buf bytes.Buffer for _, logChunk := range logChunks { buf.WriteString(logChunk.Content) } res.Write(buf.Bytes()) return case model.WAITING_FOR_UPLOAD: // Not started yet. Error JsonErrorf(r, http.StatusNotFound, "Waiting for content to get uploaded") return } }
// Merges all of the individual chunks into a single object and stores it on s3. // The log chunks are stored in the database, while the object is uploaded to s3. func MergeLogChunks(artifact *model.Artifact, db database.Database, s3bucket *s3.Bucket) error { switch artifact.State { case model.APPEND_COMPLETE: // TODO: Reimplement using GorpDatabase // If the file is empty, don't bother creating an object on S3. if artifact.Size == 0 { artifact.State = model.CLOSED_WITHOUT_DATA artifact.S3URL = "" // Conversion between *DatabaseEror and error is tricky. If we don't do this, a nil // *DatabaseError can become a non-nil error. return db.UpdateArtifact(artifact).GetError() } // XXX Do we need to commit here or is this handled transparently? artifact.State = model.UPLOADING if err := db.UpdateArtifact(artifact); err != nil { return err } logChunks, err := db.ListLogChunksInArtifact(artifact.Id) if err != nil { return err } r, w := io.Pipe() errChan := make(chan error) uploadCompleteChan := make(chan bool) fileName := artifact.DefaultS3URL() // Asynchronously upload the object to s3 while reading from the r, w // pipe. Thus anything written to "w" will be sent to S3. go func() { defer close(errChan) defer close(uploadCompleteChan) defer r.Close() if err := s3bucket.PutReader(fileName, r, artifact.Size, "binary/octet-stream", s3.PublicRead); err != nil { errChan <- fmt.Errorf("Error uploading to S3: %s", err) return } uploadCompleteChan <- true }() for _, logChunk := range logChunks { w.Write([]byte(logChunk.Content)) } w.Close() // Wait either for S3 upload to complete or for it to fail with an error. // XXX This is a long operation and should probably be asynchronous from the // actual HTTP request, and the client should poll to check when its uploaded. select { case _ = <-uploadCompleteChan: artifact.State = model.UPLOADED artifact.S3URL = fileName if err := db.UpdateArtifact(artifact); err != nil { return err } // From this point onwards, we will not send back any errors back to the user. If we are // unable to delete logchunks, we log it to Sentry instead. if n, err := db.DeleteLogChunksForArtifact(artifact.Id); err != nil { // TODO: Send this error to Sentry log.Printf("Error deleting logchunks for artifact %d: %v\n", artifact.Id, err) return nil } else if n != int64(len(logChunks)) { // TODO: Send this error to Sentry log.Printf("Mismatch in number of logchunks while deleting logchunks for artifact %d:"+ "Expected: %d Actual: %d\n", artifact.Id, len(logChunks), n) } return nil case err := <-errChan: return err } case model.WAITING_FOR_UPLOAD: fallthrough case model.ERROR: fallthrough case model.APPENDING: fallthrough case model.UPLOADED: fallthrough case model.UPLOADING: return fmt.Errorf("Artifact can only be merged when in APPEND_COMPLETE state, but state is %s", artifact.State) default: return fmt.Errorf("Illegal artifact state! State code is %d", artifact.State) } }