func (b *s3Backend) Put(tx *postgres.DBTx, info FileInfo, r io.Reader, append bool) error { if append { // This is a hack, the next easiest thing to do if we need to handle // upload resumption is to finalize the multipart upload when the client // disconnects and when the rest of the data arrives, start a new // multi-part upload copying the existing object as the first part // (which is supported by S3 as a specific API call). This requires // replacing the simple uploader, so it was not done in the first pass. existing, err := b.Open(tx, info, false) if err != nil { return err } r = io.MultiReader(existing, r) } info.ExternalID = random.UUID() if err := tx.Exec("UPDATE files SET external_id = $2 WHERE file_id = $1", info.ID, info.ExternalID); err != nil { return err } u := s3manager.NewUploaderWithClient(b.client) _, err := u.Upload(&s3manager.UploadInput{ Bucket: &b.bucket, Key: &info.ExternalID, ContentType: &info.Type, Body: r, }) return err }
func (b *gcsBackend) Open(tx *postgres.DBTx, info FileInfo, txControl bool) (FileStream, error) { if txControl { // We don't need the database transaction, so clean it up tx.Rollback() } url, err := storage.SignedURL(b.bucketName, info.ExternalID, b.signOpts()) return newRedirectFileStream(url), err }
func (b *gcsBackend) Copy(tx *postgres.DBTx, dst, src FileInfo) error { dst.ExternalID = random.UUID() if err := tx.Exec("UPDATE files SET external_id = $2 WHERE file_id = $1", dst.ID, dst.ExternalID); err != nil { return err } _, err := b.bucket.Object(src.ExternalID).CopyTo(context.Background(), b.bucket.Object(dst.ExternalID), nil) return err }
func (b *azureBackend) Open(tx *postgres.DBTx, info FileInfo, txControl bool) (FileStream, error) { if txControl { // We don't need the database transaction, so clean it up tx.Rollback() } url, err := b.client.GetBlobSASURI(b.container, info.ExternalID, time.Now().Add(10*time.Minute), "r") return newRedirectFileStream(url), err }
func (b *azureBackend) Put(tx *postgres.DBTx, info FileInfo, r io.Reader, appendBlob bool) error { if appendBlob { // TODO(titanous): This is a hack, we should modify the block list. existing, err := b.Open(tx, info, false) if err != nil { return err } r = io.MultiReader(existing, r) } info.ExternalID = random.UUID() if err := tx.Exec("UPDATE files SET external_id = $2 WHERE file_id = $1", info.ID, info.ExternalID); err != nil { return err } // Create blob that will be filled with blocks if err := b.client.CreateBlockBlob(b.container, info.ExternalID); err != nil { return err } var blocks []storage.Block // Create blocks buf := make([]byte, azureMaxBlockSize) for { n, err := io.ReadFull(r, buf) if err == io.EOF { break } if err != nil && err != io.ErrUnexpectedEOF { return err } data := buf[:n] md5sum := md5.Sum(data) blockID := base64.StdEncoding.EncodeToString(random.Bytes(16)) if err := b.client.PutBlockWithLength( b.container, info.ExternalID, blockID, uint64(n), bytes.NewReader(data), map[string]string{"Content-MD5": base64.StdEncoding.EncodeToString(md5sum[:])}, ); err != nil { return err } blocks = append(blocks, storage.Block{ID: blockID, Status: storage.BlockStatusUncommitted}) if err == io.ErrUnexpectedEOF { break } } // Save the list of blocks to the blob return b.client.PutBlockList(b.container, info.ExternalID, blocks) }
// migrateProcessData populates ProcessType.Volumes if ProcessType.Data is set func migrateProcessData(tx *postgres.DBTx) error { type Release struct { ID string // use map[string]interface{} for process types so we can just // update Volumes and Data and leave other fields untouched Processes map[string]map[string]interface{} } var releases []Release rows, err := tx.Query("SELECT release_id, processes FROM releases") if err != nil { return err } defer rows.Close() for rows.Next() { var release Release if err := rows.Scan(&release.ID, &release.Processes); err != nil { return err } releases = append(releases, release) } if err := rows.Err(); err != nil { return err } for _, release := range releases { for typ, proc := range release.Processes { v, ok := proc["data"] if !ok { continue } data, ok := v.(bool) if !ok || !data { continue } proc["volumes"] = []struct { Path string `json:"path"` }{ {Path: "/data"}, } delete(proc, "data") release.Processes[typ] = proc } // save the processes back to the db if err := tx.Exec("UPDATE releases SET processes = $1 WHERE release_id = $2", release.Processes, release.ID); err != nil { return err } } return nil }
func (b *s3Backend) Copy(tx *postgres.DBTx, dst, src FileInfo) error { dst.ExternalID = random.UUID() if err := tx.Exec("UPDATE files SET external_id = $2 WHERE file_id = $1", dst.ID, dst.ExternalID); err != nil { return err } _, err := b.client.CopyObject(&s3.CopyObjectInput{ Bucket: &b.bucket, CopySource: aws.String(fmt.Sprintf("%s/%s", b.bucket, src.ExternalID)), Key: &dst.ExternalID, ContentType: &dst.Type, }) return err }
func (b *s3Backend) Open(tx *postgres.DBTx, info FileInfo, txControl bool) (FileStream, error) { if txControl { // We don't need the database transaction, so clean it up tx.Rollback() } req, _ := b.client.GetObjectRequest(&s3.GetObjectInput{ Bucket: &b.bucket, Key: &info.ExternalID, }) url, err := req.Presign(10 * time.Minute) if err != nil { return nil, err } return newRedirectFileStream(url), nil }
func (p pg) Open(tx *postgres.DBTx, info FileInfo, txControl bool) (FileStream, error) { if info.Oid == nil { return nil, ErrNotFound } lo, err := tx.LargeObjects() if err != nil { return nil, err } obj, err := lo.Open(*info.Oid, pgx.LargeObjectModeRead) if err != nil { return nil, err } f := &pgFile{LargeObject: obj, size: info.Size} if txControl { f.tx = tx } return f, nil }
func (b *gcsBackend) Put(tx *postgres.DBTx, info FileInfo, r io.Reader, append bool) error { if append { // TODO(titanous): This is a hack, we should use resumable uploads. existing, err := b.Open(tx, info, false) if err != nil { return err } r = io.MultiReader(existing, r) } info.ExternalID = random.UUID() if err := tx.Exec("UPDATE files SET external_id = $2 WHERE file_id = $1", info.ID, info.ExternalID); err != nil { return err } w := b.bucket.Object(info.ExternalID).NewWriter(context.Background()) w.ContentType = info.Type if _, err := io.Copy(w, r); err != nil { w.Close() return err } return w.Close() }
func (p pg) Put(tx *postgres.DBTx, info FileInfo, r io.Reader, append bool) error { if !append { if err := tx.QueryRow("UPDATE files SET file_oid = lo_create(0) WHERE file_id = $1 RETURNING file_oid", info.ID).Scan(&info.Oid); err != nil { return err } } lo, err := tx.LargeObjects() if err != nil { return err } obj, err := lo.Open(*info.Oid, pgx.LargeObjectModeWrite) if err != nil { return err } if append { obj.Seek(info.Size, os.SEEK_SET) } if _, err := io.Copy(obj, r); err != nil { return err } return nil }
// migrateProcessArgs sets ProcessType.Args from Entrypoint / Cmd for every // release, and also prepends an explicit entrypoint for system and slug apps // (they will no longer use the Dockerfile Entrypoint as they have some args // like `scheduler` for the controller scheduler and `start web` for slugs). func migrateProcessArgs(tx *postgres.DBTx) error { type Release struct { ID string AppName *string AppMeta map[string]string Meta map[string]string // use map[string]interface{} for process types so we can // just update Args and leave other fields untouched Processes map[string]map[string]interface{} } // get all the releases with the associated app name if set var releases []Release rows, err := tx.Query("SELECT r.release_id, r.meta, r.processes, a.name, a.meta FROM releases r LEFT JOIN apps a ON a.release_id = r.release_id") if err != nil { return err } for rows.Next() { var release Release if err := rows.Scan(&release.ID, &release.Meta, &release.Processes, &release.AppName, &release.AppMeta); err != nil { rows.Close() return err } releases = append(releases, release) } if err := rows.Err(); err != nil { return err } for _, release := range releases { for typ, proc := range release.Processes { // if the release is for a system app which has a Cmd, // explicitly set the Entrypoint var cmd []interface{} if v, ok := proc["cmd"]; ok { cmd = v.([]interface{}) } if release.AppName != nil && release.AppMeta["flynn-system-app"] == "true" && len(cmd) > 0 { switch *release.AppName { case "postgres": proc["entrypoint"] = []interface{}{"/bin/start-flynn-postgres"} case "controller": proc["entrypoint"] = []interface{}{"/bin/start-flynn-controller"} case "redis": proc["entrypoint"] = []interface{}{"/bin/start-flynn-redis"} case "mariadb": proc["entrypoint"] = []interface{}{"/bin/start-flynn-mariadb"} case "mongodb": proc["entrypoint"] = []interface{}{"/bin/start-flynn-mongodb"} case "router": proc["entrypoint"] = []interface{}{"/bin/flynn-router"} case "logaggregator": proc["entrypoint"] = []interface{}{"/bin/logaggregator"} default: if strings.HasPrefix(*release.AppName, "redis-") { proc["entrypoint"] = []interface{}{"/bin/start-flynn-redis"} } else { panic(fmt.Sprintf("migration failed to set entrypoint for system app %s", *release.AppName)) } } } // git releases use the slugrunner which need an Entrypoint if release.Meta["git"] == "true" { proc["entrypoint"] = []interface{}{"/runner/init"} } // construct Args by appending Cmd to Entrypoint var args []interface{} if v, ok := proc["entrypoint"]; ok { args = v.([]interface{}) } proc["args"] = append(args, cmd...) release.Processes[typ] = proc } // save the processes back to the db if err := tx.Exec("UPDATE releases SET processes = $1 WHERE release_id = $2", release.Processes, release.ID); err != nil { return err } } return nil }
func (p pg) Delete(tx *postgres.DBTx, info FileInfo) error { if err := tx.Exec("SELECT lo_unlink($1)", info.Oid); err != nil { return err } return tx.Exec("UPDATE files SET file_oid = NULL WHERE file_id = $1", info.ID) }
func (f *FileRepo) SetBackend(tx *postgres.DBTx, id, name string) error { return tx.Exec("UPDATE files SET backend = $2 WHERE file_id = $1", id, name) }