func AppendLogChunk(db database.Database, artifact *model.Artifact, logChunk *model.LogChunk) *HttpError { if artifact.State != model.APPENDING { return NewHttpError(http.StatusBadRequest, fmt.Sprintf("Unexpected artifact state: %s", artifact.State)) } if logChunk.Size <= 0 { return NewHttpError(http.StatusBadRequest, "Invalid chunk size %d", logChunk.Size) } if logChunk.Content == "" { return NewHttpError(http.StatusBadRequest, "Empty content string") } if int64(len(logChunk.Content)) != logChunk.Size { return NewHttpError(http.StatusBadRequest, "Content length does not match indicated size") } // Find previous chunk in DB - append only if nextByteOffset, err := db.GetLastByteSeenForArtifact(artifact.Id); err != nil { return NewHttpError(http.StatusInternalServerError, "Error while checking for previous byte range: %s", err) } else if nextByteOffset != logChunk.ByteOffset { return NewHttpError(http.StatusBadRequest, "Overlapping ranges detected, expected offset: %d, actual offset: %d", nextByteOffset, logChunk.ByteOffset) } logChunk.ArtifactId = artifact.Id // Expand artifact size - redundant after above change. if artifact.Size < logChunk.ByteOffset+logChunk.Size { artifact.Size = logChunk.ByteOffset + logChunk.Size if err := db.UpdateArtifact(artifact); err != nil { return NewHttpError(http.StatusInternalServerError, err.Error()) } } if err := db.InsertLogChunk(logChunk); err != nil { return NewHttpError(http.StatusBadRequest, "Error updating log chunk: %s", err) } return nil }
// CreateArtifact creates a new artifact in a open bucket. // // If an artifact with the same name already exists in the same bucket, we attempt to rename the // artifact by adding a suffix. // If the request specifies a chunked artifact, the size field is ignored and always set to zero. // If the request is for a streamed artifact, size is mandatory. // A relative path field may be specified to preserve the original file name and path. If no path is // specified, the original artifact name is used by default. func CreateArtifact(req createArtifactReq, bucket *model.Bucket, db database.Database) (*model.Artifact, *HttpError) { if len(req.Name) == 0 { return nil, NewHttpError(http.StatusBadRequest, "Artifact name not provided") } if bucket.State != model.OPEN { return nil, NewHttpError(http.StatusBadRequest, "Bucket is already closed") } artifact := new(model.Artifact) artifact.Name = req.Name artifact.BucketId = bucket.Id artifact.DateCreated = time.Now() if req.DeadlineMins == 0 { artifact.DeadlineMins = DEFAULT_DEADLINE } else { artifact.DeadlineMins = req.DeadlineMins } if req.Chunked { artifact.State = model.APPENDING } else { if req.Size == 0 { return nil, NewHttpError(http.StatusBadRequest, "Cannot create a new upload artifact without size.") } else if req.Size > MaxArtifactSizeBytes { return nil, NewHttpError(http.StatusRequestEntityTooLarge, fmt.Sprintf("Entity '%s' (size %d) is too large (limit %d)", req.Name, req.Size, MaxArtifactSizeBytes)) } artifact.Size = req.Size artifact.State = model.WAITING_FOR_UPLOAD } if req.RelativePath == "" { // Use artifact name provided as default relativePath artifact.RelativePath = req.Name } else { artifact.RelativePath = req.RelativePath } // Attempt to insert artifact and retry with a different name if it fails. if err := db.InsertArtifact(artifact); err != nil { for attempt := 1; attempt <= MaxDuplicateFileNameResolutionAttempts; attempt++ { // Unable to create new artifact - if an artifact already exists, the above insert failed // because of a collision. if _, err := db.GetArtifactByName(bucket.Id, artifact.Name); err != nil { // This could be a transient DB error (down/unreachable), in which case we expect the client // to retry. There is no value in attempting alternate artifact names. // // We have no means of verifying there was a name collision - bail with an internal error. return nil, NewHttpError(http.StatusInternalServerError, err.Error()) } // File name collision - attempt to resolve artifact.Name = fmt.Sprintf(DuplicateArtifactNameFormat, req.Name, randString(5)) if err := db.InsertArtifact(artifact); err == nil { return artifact, nil } } return nil, NewHttpError(http.StatusInternalServerError, "Exceeded retry limit avoiding duplicates") } return artifact, nil }
// AppendLogChunk appends a logchunk to an artifact. // If the logchunk position does not match the current end of artifact, an error is returned. // An exception to this is made when the last seen logchunk is repeated, which is silently ignored // without an error. func AppendLogChunk(ctx context.Context, db database.Database, artifact *model.Artifact, logChunkReq *createLogChunkReq) *HttpError { if artifact.State != model.APPENDING { return NewHttpError(http.StatusBadRequest, fmt.Sprintf("Unexpected artifact state: %s", artifact.State)) } if logChunkReq.Size <= 0 { return NewHttpError(http.StatusBadRequest, "Invalid chunk size %d", logChunkReq.Size) } var contentBytes []byte if len(logChunkReq.Bytes) != 0 { // If request sent Bytes, use Bytes. if int64(len(logChunkReq.Bytes)) != logChunkReq.Size { return NewHttpError(http.StatusBadRequest, "Content length %d does not match indicated size %d", len(logChunkReq.Bytes), logChunkReq.Size) } contentBytes = logChunkReq.Bytes } else { // Otherwise, allow Content, for now. if len(logChunkReq.Content) == 0 { return NewHttpError(http.StatusBadRequest, "Empty content string") } if int64(len(logChunkReq.Content)) != logChunkReq.Size { return NewHttpError(http.StatusBadRequest, "Content length %d does not match indicated size %d", len(logChunkReq.Content), logChunkReq.Size) } contentBytes = []byte(logChunkReq.Content) } // Find previous chunk in DB - append only nextByteOffset := artifact.Size if nextByteOffset != logChunkReq.ByteOffset { // There is a possibility the previous logchunk is being retried - we need to handle cases where // a server/proxy time out caused the client not to get an ACK when it successfully uploaded the // previous logchunk, due to which it is retrying. // // This is a best-effort check - if we encounter DB errors or any mismatch in the chunk // contents, we ignore this test and claim that a range mismatch occured. if nextByteOffset != 0 && nextByteOffset == logChunkReq.ByteOffset+logChunkReq.Size { if prevLogChunk, err := db.GetLastLogChunkSeenForArtifact(artifact.Id); err == nil { if prevLogChunk != nil && prevLogChunk.ByteOffset == logChunkReq.ByteOffset && prevLogChunk.Size == logChunkReq.Size && bytes.Equal(prevLogChunk.ContentBytes, contentBytes) { sentry.ReportMessage(ctx, fmt.Sprintf("Received duplicate chunk for artifact %v of size %d at byte %d", artifact.Id, logChunkReq.Size, logChunkReq.ByteOffset)) return nil } } } return NewHttpError(http.StatusBadRequest, "Overlapping ranges detected, expected offset: %d, actual offset: %d", nextByteOffset, logChunkReq.ByteOffset) } // Expand artifact size - redundant after above change. if artifact.Size < logChunkReq.ByteOffset+logChunkReq.Size { artifact.Size = logChunkReq.ByteOffset + logChunkReq.Size if err := db.UpdateArtifact(artifact); err != nil { return NewHttpError(http.StatusInternalServerError, err.Error()) } } logChunk := &model.LogChunk{ ArtifactId: artifact.Id, ByteOffset: logChunkReq.ByteOffset, ContentBytes: contentBytes, Size: logChunkReq.Size, } if err := db.InsertLogChunk(logChunk); err != nil { return NewHttpError(http.StatusBadRequest, "Error updating log chunk: %s", err) } return nil }