func TestNestedInference(t *testing.T) { testCases := []struct { in interface{} want Schema }{ { in: containsNested{}, want: Schema{ reqField("NotNested", "INTEGER"), &FieldSchema{ Name: "Nested", Required: true, Type: "RECORD", Schema: Schema{reqField("Inside", "INTEGER")}, }, }, }, { in: containsDoubleNested{}, want: Schema{ reqField("NotNested", "INTEGER"), &FieldSchema{ Name: "Nested", Required: true, Type: "RECORD", Schema: Schema{ { Name: "InsideNested", Required: true, Type: "RECORD", Schema: Schema{reqField("Inside", "INTEGER")}, }, }, }, }, }, { in: ptrNested{}, want: Schema{ &FieldSchema{ Name: "Ptr", Required: true, Type: "RECORD", Schema: Schema{reqField("Inside", "INTEGER")}, }, }, }, } for i, tc := range testCases { got, err := InferSchema(tc.in) if err != nil { t.Fatalf("%d: error inferring TableSchema: %v", i, err) } if !reflect.DeepEqual(got, tc.want) { t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i, pretty.Value(got), pretty.Value(tc.want)) } } }
func TestStructLoaderFieldOverlap(t *testing.T) { // It's OK if the struct has fields that the schema does not, and vice versa. type S1 struct { I int X [][]int // not in the schema; does not even correspond to a valid BigQuery type // many schema fields missing } var s1 S1 if err := load(&s1, schema2, testValues); err != nil { t.Fatal(err) } want1 := S1{I: 7} if !reflect.DeepEqual(s1, want1) { t.Errorf("got %+v, want %+v", pretty.Value(s1), pretty.Value(want1)) } // It's even valid to have no overlapping fields at all. type S2 struct{ Z int } var s2 S2 if err := load(&s2, schema2, testValues); err != nil { t.Fatal(err) } want2 := S2{} if !reflect.DeepEqual(s2, want2) { t.Errorf("got %+v, want %+v", pretty.Value(s2), pretty.Value(want2)) } }
func TestIntegration_Tables(t *testing.T) { if client == nil { t.Skip("Integration tests skipped") } ctx := context.Background() table := newTable(t, schema) defer table.Delete(ctx) // Iterate over tables in the dataset. it := dataset.Tables(ctx) var tables []*Table for { tbl, err := it.Next() if err == iterator.Done { break } if err != nil { t.Fatal(err) } tables = append(tables, tbl) } // Other tests may be running with this dataset, so there might be more // than just our table in the list. So don't try for an exact match; just // make sure that our table is there somewhere. found := false for _, tbl := range tables { if reflect.DeepEqual(tbl, table) { found = true break } } if !found { t.Errorf("Tables: got %v\nshould see %v in the list", pretty.Value(tables), pretty.Value(table)) } }
func TestQuote(t *testing.T) { ptr := func(s string) *string { return &s } for _, test := range []struct { quote string force bool want *string }{ {"", false, nil}, {"", true, ptr("")}, {"-", false, ptr("-")}, {"-", true, ptr("")}, } { fc := FileConfig{ Quote: test.quote, ForceZeroQuote: test.force, } got := fc.quote() if (got == nil) != (test.want == nil) { t.Errorf("%+v\ngot %v\nwant %v", test, pretty.Value(got), pretty.Value(test.want)) } if got != nil && test.want != nil && *got != *test.want { t.Errorf("%+v: got %q, want %q", test, *got, *test.want) } } }
func TestTagInference(t *testing.T) { testCases := []struct { in interface{} want Schema }{ { in: withTags{}, want: withTagsSchema, }, { in: withTagsNested{}, want: Schema{ &FieldSchema{ Name: "nested", Required: true, Type: "RECORD", Schema: withTagsSchema, }, &FieldSchema{ Name: "anon", Required: true, Type: "RECORD", Schema: Schema{reqField("inside", "INTEGER")}, }, }, }, { in: withTagsRepeated{}, want: Schema{ &FieldSchema{ Name: "repeated", Repeated: true, Type: "RECORD", Schema: withTagsSchema, }, &FieldSchema{ Name: "anon", Repeated: true, Type: "RECORD", Schema: Schema{reqField("inside", "INTEGER")}, }, }, }, { in: withTagsEmbedded{}, want: withTagsSchema, }, } for i, tc := range testCases { got, err := InferSchema(tc.in) if err != nil { t.Fatalf("%d: error inferring TableSchema: %v", i, err) } if !reflect.DeepEqual(got, tc.want) { t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i, pretty.Value(got), pretty.Value(tc.want)) } } }
func TestEmbeddedInference(t *testing.T) { got, err := InferSchema(nestedEmbedded{}) if err != nil { t.Fatal(err) } want := Schema{ reqField("Embedded", "INTEGER"), reqField("Embedded2", "INTEGER"), } if !reflect.DeepEqual(got, want) { t.Errorf("got %v, want %v", pretty.Value(got), pretty.Value(want)) } }
func TestRepeatedInference(t *testing.T) { testCases := []struct { in interface{} want Schema }{ { in: repeated{}, want: Schema{ reqField("NotRepeated", "BYTES"), repField("RepeatedByteSlice", "BYTES"), repField("Slice", "INTEGER"), repField("Array", "BOOLEAN"), }, }, { in: nestedRepeated{}, want: Schema{ reqField("NotRepeated", "INTEGER"), { Name: "Repeated", Repeated: true, Type: "RECORD", Schema: Schema{reqField("Inside", "INTEGER")}, }, { Name: "RepeatedPtr", Repeated: true, Type: "RECORD", Schema: Schema{reqField("Inside", "INTEGER")}, }, }, }, } for i, tc := range testCases { got, err := InferSchema(tc.in) if err != nil { t.Fatalf("%d: error inferring TableSchema: %v", i, err) } if !reflect.DeepEqual(got, tc.want) { t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i, pretty.Value(got), pretty.Value(tc.want)) } } }
func TestValueSavers(t *testing.T) { ts := &testSaver{ir: &insertionRow{}} type T struct{ I int } schema, err := InferSchema(T{}) if err != nil { t.Fatal(err) } for _, test := range []struct { in interface{} want []ValueSaver }{ {ts, []ValueSaver{ts}}, {T{I: 1}, []ValueSaver{&StructSaver{Schema: schema, Struct: T{I: 1}}}}, {[]ValueSaver{ts, ts}, []ValueSaver{ts, ts}}, {[]interface{}{ts, ts}, []ValueSaver{ts, ts}}, {[]T{{I: 1}, {I: 2}}, []ValueSaver{ &StructSaver{Schema: schema, Struct: T{I: 1}}, &StructSaver{Schema: schema, Struct: T{I: 2}}, }}, {[]interface{}{T{I: 1}, &T{I: 2}}, []ValueSaver{ &StructSaver{Schema: schema, Struct: T{I: 1}}, &StructSaver{Schema: schema, Struct: &T{I: 2}}, }}, } { got, err := valueSavers(test.in) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(got, test.want) { t.Errorf("%+v: got %v, want %v", test.in, pretty.Value(got), pretty.Value(test.want)) } // Make sure Save is successful. for i, vs := range got { _, _, err := vs.Save() if err != nil { t.Fatalf("%+v, #%d: got error %v, want nil", test.in, i, err) } } } }
func TestPopulateLoadConfig(t *testing.T) { hyphen := "-" fc := FileConfig{ SourceFormat: CSV, FieldDelimiter: "\t", SkipLeadingRows: 8, AllowJaggedRows: true, AllowQuotedNewlines: true, Encoding: UTF_8, MaxBadRecords: 7, IgnoreUnknownValues: true, Schema: Schema{ stringFieldSchema(), nestedFieldSchema(), }, Quote: hyphen, } want := &bq.JobConfigurationLoad{ SourceFormat: "CSV", FieldDelimiter: "\t", SkipLeadingRows: 8, AllowJaggedRows: true, AllowQuotedNewlines: true, Encoding: "UTF-8", MaxBadRecords: 7, IgnoreUnknownValues: true, Schema: &bq.TableSchema{ Fields: []*bq.TableFieldSchema{ bqStringFieldSchema(), bqNestedFieldSchema(), }}, Quote: &hyphen, } got := &bq.JobConfigurationLoad{} fc.populateLoadConfig(got) if !reflect.DeepEqual(got, want) { t.Errorf("got:\n%v\nwant:\n%v", pretty.Value(got), pretty.Value(want)) } }
func TestStructLoader(t *testing.T) { var ts1 testStruct1 if err := load(&ts1, schema2, testValues); err != nil { t.Fatal(err) } // Note: the schema field named "s" gets matched to the exported struct // field "S", not the unexported "s". want := &testStruct1{ B: true, I: 7, F: 3.14, times: times{TS: testTimestamp, T: testTime, D: testDate, DT: testDateTime}, S: "x", S2: "y", By: []byte{1, 2, 3}, Nested: nested{NestS: "nested", NestI: 17}, Tagged: "z", } if !reflect.DeepEqual(&ts1, want) { t.Errorf("got %+v, want %+v", pretty.Value(ts1), pretty.Value(*want)) d, _, err := pretty.Diff(*want, ts1) if err == nil { t.Logf("diff:\n%s", d) } } // Test pointers to nested structs. type nestedPtr struct{ Nested *nested } var np nestedPtr if err := load(&np, schema2, testValues); err != nil { t.Fatal(err) } want2 := &nestedPtr{Nested: &nested{NestS: "nested", NestI: 17}} if !reflect.DeepEqual(&np, want2) { t.Errorf("got %+v, want %+v", pretty.Value(np), pretty.Value(*want2)) } // Existing values should be reused. nst := &nested{NestS: "x", NestI: -10} np = nestedPtr{Nested: nst} if err := load(&np, schema2, testValues); err != nil { t.Fatal(err) } if !reflect.DeepEqual(&np, want2) { t.Errorf("got %+v, want %+v", pretty.Value(np), pretty.Value(*want2)) } if np.Nested != nst { t.Error("nested struct pointers not equal") } }
func TestStructLoaderRepeated(t *testing.T) { var r1 repStruct if err := load(&r1, repSchema, repValues); err != nil { t.Fatal(err) } want := repStruct{ Nums: []int{1, 2, 3}, ShortNums: [...]int{1, 2}, // extra values discarded LongNums: [...]int{1, 2, 3, 0, 0}, Nested: []*nested{{"x", 1}, {"y", 2}}, } if !reflect.DeepEqual(r1, want) { t.Errorf("got %+v, want %+v", pretty.Value(r1), pretty.Value(want)) } r2 := repStruct{ Nums: []int{-1, -2, -3, -4, -5}, // truncated to zero and appended to LongNums: [...]int{-1, -2, -3, -4, -5}, // unset elements are zeroed } if err := load(&r2, repSchema, repValues); err != nil { t.Fatal(err) } if !reflect.DeepEqual(r2, want) { t.Errorf("got %+v, want %+v", pretty.Value(r2), pretty.Value(want)) } if got, want := cap(r2.Nums), 5; got != want { t.Errorf("cap(r2.Nums) = %d, want %d", got, want) } // Short slice case. r3 := repStruct{Nums: []int{-1}} if err := load(&r3, repSchema, repValues); err != nil { t.Fatal(err) } if !reflect.DeepEqual(r3, want) { t.Errorf("got %+v, want %+v", pretty.Value(r3), pretty.Value(want)) } if got, want := cap(r3.Nums), 3; got != want { t.Errorf("cap(r3.Nums) = %d, want %d", got, want) } }
func TestSchemaConversion(t *testing.T) { testCases := []struct { schema Schema bqSchema *bq.TableSchema }{ { // required bqSchema: &bq.TableSchema{ Fields: []*bq.TableFieldSchema{ bqTableFieldSchema("desc", "name", "STRING", "REQUIRED"), }, }, schema: Schema{ fieldSchema("desc", "name", "STRING", false, true), }, }, { // repeated bqSchema: &bq.TableSchema{ Fields: []*bq.TableFieldSchema{ bqTableFieldSchema("desc", "name", "STRING", "REPEATED"), }, }, schema: Schema{ fieldSchema("desc", "name", "STRING", true, false), }, }, { // nullable, string bqSchema: &bq.TableSchema{ Fields: []*bq.TableFieldSchema{ bqTableFieldSchema("desc", "name", "STRING", ""), }, }, schema: Schema{ fieldSchema("desc", "name", "STRING", false, false), }, }, { // integer bqSchema: &bq.TableSchema{ Fields: []*bq.TableFieldSchema{ bqTableFieldSchema("desc", "name", "INTEGER", ""), }, }, schema: Schema{ fieldSchema("desc", "name", "INTEGER", false, false), }, }, { // float bqSchema: &bq.TableSchema{ Fields: []*bq.TableFieldSchema{ bqTableFieldSchema("desc", "name", "FLOAT", ""), }, }, schema: Schema{ fieldSchema("desc", "name", "FLOAT", false, false), }, }, { // boolean bqSchema: &bq.TableSchema{ Fields: []*bq.TableFieldSchema{ bqTableFieldSchema("desc", "name", "BOOLEAN", ""), }, }, schema: Schema{ fieldSchema("desc", "name", "BOOLEAN", false, false), }, }, { // timestamp bqSchema: &bq.TableSchema{ Fields: []*bq.TableFieldSchema{ bqTableFieldSchema("desc", "name", "TIMESTAMP", ""), }, }, schema: Schema{ fieldSchema("desc", "name", "TIMESTAMP", false, false), }, }, { // civil times bqSchema: &bq.TableSchema{ Fields: []*bq.TableFieldSchema{ bqTableFieldSchema("desc", "f1", "TIME", ""), bqTableFieldSchema("desc", "f2", "DATE", ""), bqTableFieldSchema("desc", "f3", "DATETIME", ""), }, }, schema: Schema{ fieldSchema("desc", "f1", "TIME", false, false), fieldSchema("desc", "f2", "DATE", false, false), fieldSchema("desc", "f3", "DATETIME", false, false), }, }, { // nested bqSchema: &bq.TableSchema{ Fields: []*bq.TableFieldSchema{ { Description: "An outer schema wrapping a nested schema", Name: "outer", Mode: "REQUIRED", Type: "RECORD", Fields: []*bq.TableFieldSchema{ bqTableFieldSchema("inner field", "inner", "STRING", ""), }, }, }, }, schema: Schema{ &FieldSchema{ Description: "An outer schema wrapping a nested schema", Name: "outer", Required: true, Type: "RECORD", Schema: []*FieldSchema{ { Description: "inner field", Name: "inner", Type: "STRING", }, }, }, }, }, } for _, tc := range testCases { bqSchema := tc.schema.asTableSchema() if !reflect.DeepEqual(bqSchema, tc.bqSchema) { t.Errorf("converting to TableSchema: got:\n%v\nwant:\n%v", pretty.Value(bqSchema), pretty.Value(tc.bqSchema)) } schema := convertTableSchema(tc.bqSchema) if !reflect.DeepEqual(schema, tc.schema) { t.Errorf("converting to Schema: got:\n%v\nwant:\n%v", schema, tc.schema) } } }
func TestSimpleInference(t *testing.T) { testCases := []struct { in interface{} want Schema }{ { in: allSignedIntegers{}, want: Schema{ reqField("Int64", "INTEGER"), reqField("Int32", "INTEGER"), reqField("Int16", "INTEGER"), reqField("Int8", "INTEGER"), reqField("Int", "INTEGER"), }, }, { in: allUnsignedIntegers{}, want: Schema{ reqField("Uint32", "INTEGER"), reqField("Uint16", "INTEGER"), reqField("Uint8", "INTEGER"), }, }, { in: allFloat{}, want: Schema{ reqField("Float64", "FLOAT"), reqField("Float32", "FLOAT"), }, }, { in: allBoolean{}, want: Schema{ reqField("Bool", "BOOLEAN"), }, }, { in: &allBoolean{}, want: Schema{ reqField("Bool", "BOOLEAN"), }, }, { in: allTime{}, want: Schema{ reqField("Timestamp", "TIMESTAMP"), reqField("Time", "TIME"), reqField("Date", "DATE"), reqField("DateTime", "DATETIME"), }, }, { in: allStrings{}, want: Schema{ reqField("String", "STRING"), reqField("ByteSlice", "BYTES"), }, }, } for i, tc := range testCases { got, err := InferSchema(tc.in) if err != nil { t.Fatalf("%d: error inferring TableSchema: %v", i, err) } if !reflect.DeepEqual(got, tc.want) { t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i, pretty.Value(got), pretty.Value(tc.want)) } } }
func TestIntegration_Update(t *testing.T) { if client == nil { t.Skip("Integration tests skipped") } ctx := context.Background() table := newTable(t, schema) defer table.Delete(ctx) // Test Update of non-schema fields. tm, err := table.Metadata(ctx) if err != nil { t.Fatal(err) } wantDescription := tm.Description + "more" wantName := tm.Name + "more" got, err := table.Update(ctx, TableMetadataToUpdate{ Description: wantDescription, Name: wantName, }) if err != nil { t.Fatal(err) } if got.Description != wantDescription { t.Errorf("Description: got %q, want %q", got.Description, wantDescription) } if got.Name != wantName { t.Errorf("Name: got %q, want %q", got.Name, wantName) } if !reflect.DeepEqual(got.Schema, schema) { t.Errorf("Schema: got %v, want %v", pretty.Value(got.Schema), pretty.Value(schema)) } // Test schema update. // Columns can be added. schema2 is the same as schema, except for the // added column in the middle. nested := Schema{ {Name: "nested", Type: BooleanFieldType}, {Name: "other", Type: StringFieldType}, } schema2 := Schema{ schema[0], {Name: "rec", Type: RecordFieldType, Schema: nested}, schema[1], } got, err = table.Update(ctx, TableMetadataToUpdate{Schema: schema2}) if err != nil { t.Fatal(err) } // Wherever you add the column, it appears at the end. schema3 := Schema{schema2[0], schema2[2], schema2[1]} if !reflect.DeepEqual(got.Schema, schema3) { t.Errorf("add field:\ngot %v\nwant %v", pretty.Value(got.Schema), pretty.Value(schema3)) } // Updating with the empty schema succeeds, but is a no-op. got, err = table.Update(ctx, TableMetadataToUpdate{Schema: Schema{}}) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(got.Schema, schema3) { t.Errorf("empty schema:\ngot %v\nwant %v", pretty.Value(got.Schema), pretty.Value(schema3)) } // Error cases. for _, test := range []struct { desc string fields []*FieldSchema }{ {"change from optional to required", []*FieldSchema{ schema3[0], {Name: "num", Type: IntegerFieldType, Required: true}, schema3[2], }}, {"add a required field", []*FieldSchema{ schema3[0], schema3[1], schema3[2], {Name: "req", Type: StringFieldType, Required: true}, }}, {"remove a field", []*FieldSchema{schema3[0], schema3[1]}}, {"remove a nested field", []*FieldSchema{ schema3[0], schema3[1], {Name: "rec", Type: RecordFieldType, Schema: Schema{nested[0]}}}}, {"remove all nested fields", []*FieldSchema{ schema3[0], schema3[1], {Name: "rec", Type: RecordFieldType, Schema: Schema{}}}}, } { for { _, err = table.Update(ctx, TableMetadataToUpdate{Schema: Schema(test.fields)}) if !hasStatusCode(err, 403) { break } // We've hit the rate limit for updates. Wait a bit and retry. t.Logf("%s: retrying after getting %v", test.desc, err) time.Sleep(4 * time.Second) } if err == nil { t.Errorf("%s: want error, got nil", test.desc) } else if !hasStatusCode(err, 400) { t.Errorf("%s: want 400, got %v", test.desc, err) } } }
func TestIntegration_UploadAndReadStructs(t *testing.T) { if client == nil { t.Skip("Integration tests skipped") } schema, err := InferSchema(TestStruct{}) if err != nil { t.Fatal(err) } ctx := context.Background() table := newTable(t, schema) defer table.Delete(ctx) // Populate the table. upl := table.Uploader() structs := []*TestStruct{ {Name: "a", Nums: []int{1, 2}, Sub: Sub{B: true}, Subs: []*Sub{{false}, {true}}}, {Name: "b", Nums: []int{1}, Subs: []*Sub{{false}, nil, {true}}}, nil, {Name: "c", Sub: Sub{B: true}}, } var savers []*StructSaver for _, s := range structs { savers = append(savers, &StructSaver{Schema: schema, Struct: s}) } if err := upl.Put(ctx, savers); err != nil { t.Fatal(err) } // Wait until the data has been uploaded. This can take a few seconds, according // to https://cloud.google.com/bigquery/streaming-data-into-bigquery. if err := waitForRow(ctx, table); err != nil { t.Fatal(err) } // Test iteration with structs. it := table.Read(ctx) var got []*TestStruct for { var g TestStruct err := it.Next(&g) if err == iterator.Done { break } if err != nil { t.Fatal(err) } got = append(got, &g) } sort.Sort(byName(got)) // BigQuery elides nils, both at top level and in nested structs. // This may be surprising, but the client library is faithfully // rendering these nils into JSON, so we should not change it. // structs[1].Subs[1] and structs[2] are nil. want := []*TestStruct{structs[0], structs[1], structs[3]} want[1].Subs = []*Sub{want[1].Subs[0], want[1].Subs[2]} for i, g := range got { if i >= len(want) { t.Errorf("%d: got %v, past end of want", i, pretty.Value(g)) } else if w := want[i]; !reflect.DeepEqual(g, w) { t.Errorf("%d: got %v, want %v", i, pretty.Value(g), pretty.Value(w)) } } }
func TestLoad(t *testing.T) { c := &Client{projectID: "project-id"} testCases := []struct { dst *Table src LoadSource config LoadConfig want *bq.Job }{ { dst: c.Dataset("dataset-id").Table("table-id"), src: NewGCSReference("uri"), want: defaultLoadJob(), }, { dst: c.Dataset("dataset-id").Table("table-id"), config: LoadConfig{ CreateDisposition: CreateNever, WriteDisposition: WriteTruncate, JobID: "ajob", }, src: NewGCSReference("uri"), want: func() *bq.Job { j := defaultLoadJob() j.Configuration.Load.CreateDisposition = "CREATE_NEVER" j.Configuration.Load.WriteDisposition = "WRITE_TRUNCATE" j.JobReference = &bq.JobReference{ JobId: "ajob", ProjectId: "project-id", } return j }(), }, { dst: c.Dataset("dataset-id").Table("table-id"), src: func() *GCSReference { g := NewGCSReference("uri") g.MaxBadRecords = 1 g.AllowJaggedRows = true g.AllowQuotedNewlines = true g.IgnoreUnknownValues = true return g }(), want: func() *bq.Job { j := defaultLoadJob() j.Configuration.Load.MaxBadRecords = 1 j.Configuration.Load.AllowJaggedRows = true j.Configuration.Load.AllowQuotedNewlines = true j.Configuration.Load.IgnoreUnknownValues = true return j }(), }, { dst: c.Dataset("dataset-id").Table("table-id"), src: func() *GCSReference { g := NewGCSReference("uri") g.Schema = Schema{ stringFieldSchema(), nestedFieldSchema(), } return g }(), want: func() *bq.Job { j := defaultLoadJob() j.Configuration.Load.Schema = &bq.TableSchema{ Fields: []*bq.TableFieldSchema{ bqStringFieldSchema(), bqNestedFieldSchema(), }} return j }(), }, { dst: c.Dataset("dataset-id").Table("table-id"), src: func() *GCSReference { g := NewGCSReference("uri") g.SkipLeadingRows = 1 g.SourceFormat = JSON g.Encoding = UTF_8 g.FieldDelimiter = "\t" g.Quote = "-" return g }(), want: func() *bq.Job { j := defaultLoadJob() j.Configuration.Load.SkipLeadingRows = 1 j.Configuration.Load.SourceFormat = "NEWLINE_DELIMITED_JSON" j.Configuration.Load.Encoding = "UTF-8" j.Configuration.Load.FieldDelimiter = "\t" hyphen := "-" j.Configuration.Load.Quote = &hyphen return j }(), }, { dst: c.Dataset("dataset-id").Table("table-id"), src: NewGCSReference("uri"), want: func() *bq.Job { j := defaultLoadJob() // Quote is left unset in GCSReference, so should be nil here. j.Configuration.Load.Quote = nil return j }(), }, { dst: c.Dataset("dataset-id").Table("table-id"), src: func() *GCSReference { g := NewGCSReference("uri") g.ForceZeroQuote = true return g }(), want: func() *bq.Job { j := defaultLoadJob() empty := "" j.Configuration.Load.Quote = &empty return j }(), }, { dst: c.Dataset("dataset-id").Table("table-id"), src: func() *ReaderSource { r := NewReaderSource(strings.NewReader("foo")) r.SkipLeadingRows = 1 r.SourceFormat = JSON r.Encoding = UTF_8 r.FieldDelimiter = "\t" r.Quote = "-" return r }(), want: func() *bq.Job { j := defaultLoadJob() j.Configuration.Load.SourceUris = nil j.Configuration.Load.SkipLeadingRows = 1 j.Configuration.Load.SourceFormat = "NEWLINE_DELIMITED_JSON" j.Configuration.Load.Encoding = "UTF-8" j.Configuration.Load.FieldDelimiter = "\t" hyphen := "-" j.Configuration.Load.Quote = &hyphen return j }(), }, } for i, tc := range testCases { s := &testService{} c.service = s loader := tc.dst.LoaderFrom(tc.src) tc.config.Src = tc.src tc.config.Dst = tc.dst loader.LoadConfig = tc.config if _, err := loader.Run(context.Background()); err != nil { t.Errorf("%d: err calling Loader.Run: %v", i, err) continue } if !reflect.DeepEqual(s.Job, tc.want) { t.Errorf("loading %d: got:\n%v\nwant:\n%v", i, pretty.Value(s.Job), pretty.Value(tc.want)) } } }