func InitConfig() { client, err := google.DefaultClient(context.Background(), scope) if err != nil { log.Fatalf("Unable to get default client: %v", err) } service, err = storage.New(client) if err != nil { log.Fatalf("Unable to create storage service: %v", err) } oService = storage.NewObjectsService(service) if err != nil { log.Fatalf("Unable to create objects storage service: %v", err) } }
// WriteStream stores the contents of the provided io.ReadCloser at a // location designated by the given path. // May be used to resume writing a stream by providing a nonzero offset. // The offset must be no larger than the CurrentSize for this path. func (d *driver) WriteStream(context ctx.Context, path string, offset int64, reader io.Reader) (totalRead int64, err error) { if offset < 0 { return 0, storagedriver.InvalidOffsetError{Path: path, Offset: offset} } if offset == 0 { return d.writeCompletely(context, path, 0, reader) } service, err := storageapi.New(d.client) if err != nil { return 0, err } objService := storageapi.NewObjectsService(service) var obj *storageapi.Object err = retry(5, func() error { o, err := objService.Get(d.bucket, d.pathToKey(path)).Do() obj = o return err }) // obj, err := retry(5, objService.Get(d.bucket, d.pathToKey(path)).Do) if err != nil { return 0, err } // cannot append more chunks, so redo from scratch if obj.ComponentCount >= 1023 { return d.writeCompletely(context, path, offset, reader) } // skip from reader objSize := int64(obj.Size) nn, err := skip(reader, objSize-offset) if err != nil { return nn, err } // Size <= offset partName := fmt.Sprintf("%v#part-%d#", d.pathToKey(path), obj.ComponentCount) gcsContext := d.context(context) wc := storage.NewWriter(gcsContext, d.bucket, partName) wc.ContentType = "application/octet-stream" if objSize < offset { err = writeZeros(wc, offset-objSize) if err != nil { wc.CloseWithError(err) return nn, err } } n, err := io.Copy(wc, reader) if err != nil { wc.CloseWithError(err) return nn, err } err = wc.Close() if err != nil { return nn, err } // wc was closed successfully, so the temporary part exists, schedule it for deletion at the end // of the function defer storageDeleteObject(gcsContext, d.bucket, partName) req := &storageapi.ComposeRequest{ Destination: &storageapi.Object{Bucket: obj.Bucket, Name: obj.Name, ContentType: obj.ContentType}, SourceObjects: []*storageapi.ComposeRequestSourceObjects{ { Name: obj.Name, Generation: obj.Generation, }, { Name: partName, Generation: wc.Object().Generation, }}, } err = retry(5, func() error { _, err := objService.Compose(d.bucket, obj.Name, req).Do(); return err }) if err == nil { nn = nn + n } return nn, err }