You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 

1919 lines
51 KiB

  1. // Copyright 2015 Google LLC
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package bigquery
  15. import (
  16. "errors"
  17. "flag"
  18. "fmt"
  19. "log"
  20. "math/big"
  21. "net/http"
  22. "os"
  23. "sort"
  24. "strings"
  25. "testing"
  26. "time"
  27. "github.com/google/go-cmp/cmp"
  28. "github.com/google/go-cmp/cmp/cmpopts"
  29. gax "github.com/googleapis/gax-go"
  30. "cloud.google.com/go/civil"
  31. "cloud.google.com/go/internal"
  32. "cloud.google.com/go/internal/pretty"
  33. "cloud.google.com/go/internal/testutil"
  34. "cloud.google.com/go/internal/uid"
  35. "cloud.google.com/go/storage"
  36. "golang.org/x/net/context"
  37. "google.golang.org/api/googleapi"
  38. "google.golang.org/api/iterator"
  39. "google.golang.org/api/option"
  40. )
  41. var (
  42. client *Client
  43. storageClient *storage.Client
  44. dataset *Dataset
  45. schema = Schema{
  46. {Name: "name", Type: StringFieldType},
  47. {Name: "nums", Type: IntegerFieldType, Repeated: true},
  48. {Name: "rec", Type: RecordFieldType, Schema: Schema{
  49. {Name: "bool", Type: BooleanFieldType},
  50. }},
  51. }
  52. testTableExpiration time.Time
  53. // BigQuery does not accept hyphens in dataset or table IDs, so we create IDs
  54. // with underscores.
  55. datasetIDs = uid.NewSpace("dataset", &uid.Options{Sep: '_'})
  56. tableIDs = uid.NewSpace("table", &uid.Options{Sep: '_'})
  57. )
  58. // Note: integration tests cannot be run in parallel, because TestIntegration_Location
  59. // modifies the client.
  60. func TestMain(m *testing.M) {
  61. cleanup := initIntegrationTest()
  62. r := m.Run()
  63. cleanup()
  64. os.Exit(r)
  65. }
  66. func getClient(t *testing.T) *Client {
  67. if client == nil {
  68. t.Skip("Integration tests skipped")
  69. }
  70. return client
  71. }
  72. // If integration tests will be run, create a unique bucket for them.
  73. func initIntegrationTest() func() {
  74. flag.Parse() // needed for testing.Short()
  75. if testing.Short() {
  76. return func() {}
  77. }
  78. ctx := context.Background()
  79. ts := testutil.TokenSource(ctx, Scope)
  80. if ts == nil {
  81. log.Println("Integration tests skipped. See CONTRIBUTING.md for details")
  82. return func() {}
  83. }
  84. projID := testutil.ProjID()
  85. var err error
  86. client, err = NewClient(ctx, projID, option.WithTokenSource(ts))
  87. if err != nil {
  88. log.Fatalf("NewClient: %v", err)
  89. }
  90. storageClient, err = storage.NewClient(ctx,
  91. option.WithTokenSource(testutil.TokenSource(ctx, storage.ScopeFullControl)))
  92. if err != nil {
  93. log.Fatalf("storage.NewClient: %v", err)
  94. }
  95. dataset = client.Dataset(datasetIDs.New())
  96. if err := dataset.Create(ctx, nil); err != nil {
  97. log.Fatalf("creating dataset %s: %v", dataset.DatasetID, err)
  98. }
  99. testTableExpiration = time.Now().Add(10 * time.Minute).Round(time.Second)
  100. return func() {
  101. if err := dataset.DeleteWithContents(ctx); err != nil {
  102. log.Printf("could not delete %s", dataset.DatasetID)
  103. }
  104. }
  105. }
  106. func TestIntegration_TableCreate(t *testing.T) {
  107. // Check that creating a record field with an empty schema is an error.
  108. if client == nil {
  109. t.Skip("Integration tests skipped")
  110. }
  111. table := dataset.Table("t_bad")
  112. schema := Schema{
  113. {Name: "rec", Type: RecordFieldType, Schema: Schema{}},
  114. }
  115. err := table.Create(context.Background(), &TableMetadata{
  116. Schema: schema,
  117. ExpirationTime: time.Now().Add(5 * time.Minute),
  118. })
  119. if err == nil {
  120. t.Fatal("want error, got nil")
  121. }
  122. if !hasStatusCode(err, http.StatusBadRequest) {
  123. t.Fatalf("want a 400 error, got %v", err)
  124. }
  125. }
  126. func TestIntegration_TableCreateView(t *testing.T) {
  127. if client == nil {
  128. t.Skip("Integration tests skipped")
  129. }
  130. ctx := context.Background()
  131. table := newTable(t, schema)
  132. defer table.Delete(ctx)
  133. // Test that standard SQL views work.
  134. view := dataset.Table("t_view_standardsql")
  135. query := fmt.Sprintf("SELECT APPROX_COUNT_DISTINCT(name) FROM `%s.%s.%s`",
  136. dataset.ProjectID, dataset.DatasetID, table.TableID)
  137. err := view.Create(context.Background(), &TableMetadata{
  138. ViewQuery: query,
  139. UseStandardSQL: true,
  140. })
  141. if err != nil {
  142. t.Fatalf("table.create: Did not expect an error, got: %v", err)
  143. }
  144. if err := view.Delete(ctx); err != nil {
  145. t.Fatal(err)
  146. }
  147. }
  148. func TestIntegration_TableMetadata(t *testing.T) {
  149. if client == nil {
  150. t.Skip("Integration tests skipped")
  151. }
  152. ctx := context.Background()
  153. table := newTable(t, schema)
  154. defer table.Delete(ctx)
  155. // Check table metadata.
  156. md, err := table.Metadata(ctx)
  157. if err != nil {
  158. t.Fatal(err)
  159. }
  160. // TODO(jba): check md more thorougly.
  161. if got, want := md.FullID, fmt.Sprintf("%s:%s.%s", dataset.ProjectID, dataset.DatasetID, table.TableID); got != want {
  162. t.Errorf("metadata.FullID: got %q, want %q", got, want)
  163. }
  164. if got, want := md.Type, RegularTable; got != want {
  165. t.Errorf("metadata.Type: got %v, want %v", got, want)
  166. }
  167. if got, want := md.ExpirationTime, testTableExpiration; !got.Equal(want) {
  168. t.Errorf("metadata.Type: got %v, want %v", got, want)
  169. }
  170. // Check that timePartitioning is nil by default
  171. if md.TimePartitioning != nil {
  172. t.Errorf("metadata.TimePartitioning: got %v, want %v", md.TimePartitioning, nil)
  173. }
  174. // Create tables that have time partitioning
  175. partitionCases := []struct {
  176. timePartitioning TimePartitioning
  177. wantExpiration time.Duration
  178. wantField string
  179. }{
  180. {TimePartitioning{}, time.Duration(0), ""},
  181. {TimePartitioning{Expiration: time.Second}, time.Second, ""},
  182. {
  183. TimePartitioning{
  184. Expiration: time.Second,
  185. Field: "date",
  186. }, time.Second, "date"},
  187. }
  188. schema2 := Schema{
  189. {Name: "name", Type: StringFieldType},
  190. {Name: "date", Type: DateFieldType},
  191. }
  192. for i, c := range partitionCases {
  193. table := dataset.Table(fmt.Sprintf("t_metadata_partition_%v", i))
  194. err = table.Create(context.Background(), &TableMetadata{
  195. Schema: schema2,
  196. TimePartitioning: &c.timePartitioning,
  197. ExpirationTime: time.Now().Add(5 * time.Minute),
  198. })
  199. if err != nil {
  200. t.Fatal(err)
  201. }
  202. defer table.Delete(ctx)
  203. md, err = table.Metadata(ctx)
  204. if err != nil {
  205. t.Fatal(err)
  206. }
  207. got := md.TimePartitioning
  208. want := &TimePartitioning{
  209. Expiration: c.wantExpiration,
  210. Field: c.wantField,
  211. }
  212. if !testutil.Equal(got, want) {
  213. t.Errorf("metadata.TimePartitioning: got %v, want %v", got, want)
  214. }
  215. }
  216. }
  217. func TestIntegration_DatasetCreate(t *testing.T) {
  218. if client == nil {
  219. t.Skip("Integration tests skipped")
  220. }
  221. ctx := context.Background()
  222. ds := client.Dataset(datasetIDs.New())
  223. wmd := &DatasetMetadata{Name: "name", Location: "EU"}
  224. err := ds.Create(ctx, wmd)
  225. if err != nil {
  226. t.Fatal(err)
  227. }
  228. gmd, err := ds.Metadata(ctx)
  229. if err != nil {
  230. t.Fatal(err)
  231. }
  232. if got, want := gmd.Name, wmd.Name; got != want {
  233. t.Errorf("name: got %q, want %q", got, want)
  234. }
  235. if got, want := gmd.Location, wmd.Location; got != want {
  236. t.Errorf("location: got %q, want %q", got, want)
  237. }
  238. if err := ds.Delete(ctx); err != nil {
  239. t.Fatalf("deleting dataset %v: %v", ds, err)
  240. }
  241. }
  242. func TestIntegration_DatasetMetadata(t *testing.T) {
  243. if client == nil {
  244. t.Skip("Integration tests skipped")
  245. }
  246. ctx := context.Background()
  247. md, err := dataset.Metadata(ctx)
  248. if err != nil {
  249. t.Fatal(err)
  250. }
  251. if got, want := md.FullID, fmt.Sprintf("%s:%s", dataset.ProjectID, dataset.DatasetID); got != want {
  252. t.Errorf("FullID: got %q, want %q", got, want)
  253. }
  254. jan2016 := time.Date(2016, 1, 1, 0, 0, 0, 0, time.UTC)
  255. if md.CreationTime.Before(jan2016) {
  256. t.Errorf("CreationTime: got %s, want > 2016-1-1", md.CreationTime)
  257. }
  258. if md.LastModifiedTime.Before(jan2016) {
  259. t.Errorf("LastModifiedTime: got %s, want > 2016-1-1", md.LastModifiedTime)
  260. }
  261. // Verify that we get a NotFound for a nonexistent dataset.
  262. _, err = client.Dataset("does_not_exist").Metadata(ctx)
  263. if err == nil || !hasStatusCode(err, http.StatusNotFound) {
  264. t.Errorf("got %v, want NotFound error", err)
  265. }
  266. }
  267. func TestIntegration_DatasetDelete(t *testing.T) {
  268. if client == nil {
  269. t.Skip("Integration tests skipped")
  270. }
  271. ctx := context.Background()
  272. ds := client.Dataset(datasetIDs.New())
  273. if err := ds.Create(ctx, nil); err != nil {
  274. t.Fatalf("creating dataset %s: %v", ds.DatasetID, err)
  275. }
  276. if err := ds.Delete(ctx); err != nil {
  277. t.Fatalf("deleting dataset %s: %v", ds.DatasetID, err)
  278. }
  279. }
  280. func TestIntegration_DatasetDeleteWithContents(t *testing.T) {
  281. if client == nil {
  282. t.Skip("Integration tests skipped")
  283. }
  284. ctx := context.Background()
  285. ds := client.Dataset(datasetIDs.New())
  286. if err := ds.Create(ctx, nil); err != nil {
  287. t.Fatalf("creating dataset %s: %v", ds.DatasetID, err)
  288. }
  289. table := ds.Table(tableIDs.New())
  290. if err := table.Create(ctx, nil); err != nil {
  291. t.Fatalf("creating table %s in dataset %s: %v", table.TableID, table.DatasetID, err)
  292. }
  293. // We expect failure here
  294. if err := ds.Delete(ctx); err == nil {
  295. t.Fatalf("non-recursive delete of dataset %s succeeded unexpectedly.", ds.DatasetID)
  296. }
  297. if err := ds.DeleteWithContents(ctx); err != nil {
  298. t.Fatalf("deleting recursively dataset %s: %v", ds.DatasetID, err)
  299. }
  300. }
  301. func TestIntegration_DatasetUpdateETags(t *testing.T) {
  302. if client == nil {
  303. t.Skip("Integration tests skipped")
  304. }
  305. check := func(md *DatasetMetadata, wantDesc, wantName string) {
  306. if md.Description != wantDesc {
  307. t.Errorf("description: got %q, want %q", md.Description, wantDesc)
  308. }
  309. if md.Name != wantName {
  310. t.Errorf("name: got %q, want %q", md.Name, wantName)
  311. }
  312. }
  313. ctx := context.Background()
  314. md, err := dataset.Metadata(ctx)
  315. if err != nil {
  316. t.Fatal(err)
  317. }
  318. if md.ETag == "" {
  319. t.Fatal("empty ETag")
  320. }
  321. // Write without ETag succeeds.
  322. desc := md.Description + "d2"
  323. name := md.Name + "n2"
  324. md2, err := dataset.Update(ctx, DatasetMetadataToUpdate{Description: desc, Name: name}, "")
  325. if err != nil {
  326. t.Fatal(err)
  327. }
  328. check(md2, desc, name)
  329. // Write with original ETag fails because of intervening write.
  330. _, err = dataset.Update(ctx, DatasetMetadataToUpdate{Description: "d", Name: "n"}, md.ETag)
  331. if err == nil {
  332. t.Fatal("got nil, want error")
  333. }
  334. // Write with most recent ETag succeeds.
  335. md3, err := dataset.Update(ctx, DatasetMetadataToUpdate{Description: "", Name: ""}, md2.ETag)
  336. if err != nil {
  337. t.Fatal(err)
  338. }
  339. check(md3, "", "")
  340. }
  341. func TestIntegration_DatasetUpdateDefaultExpiration(t *testing.T) {
  342. if client == nil {
  343. t.Skip("Integration tests skipped")
  344. }
  345. ctx := context.Background()
  346. md, err := dataset.Metadata(ctx)
  347. if err != nil {
  348. t.Fatal(err)
  349. }
  350. // Set the default expiration time.
  351. md, err = dataset.Update(ctx, DatasetMetadataToUpdate{DefaultTableExpiration: time.Hour}, "")
  352. if err != nil {
  353. t.Fatal(err)
  354. }
  355. if md.DefaultTableExpiration != time.Hour {
  356. t.Fatalf("got %s, want 1h", md.DefaultTableExpiration)
  357. }
  358. // Omitting DefaultTableExpiration doesn't change it.
  359. md, err = dataset.Update(ctx, DatasetMetadataToUpdate{Name: "xyz"}, "")
  360. if err != nil {
  361. t.Fatal(err)
  362. }
  363. if md.DefaultTableExpiration != time.Hour {
  364. t.Fatalf("got %s, want 1h", md.DefaultTableExpiration)
  365. }
  366. // Setting it to 0 deletes it (which looks like a 0 duration).
  367. md, err = dataset.Update(ctx, DatasetMetadataToUpdate{DefaultTableExpiration: time.Duration(0)}, "")
  368. if err != nil {
  369. t.Fatal(err)
  370. }
  371. if md.DefaultTableExpiration != 0 {
  372. t.Fatalf("got %s, want 0", md.DefaultTableExpiration)
  373. }
  374. }
  375. func TestIntegration_DatasetUpdateAccess(t *testing.T) {
  376. if client == nil {
  377. t.Skip("Integration tests skipped")
  378. }
  379. ctx := context.Background()
  380. md, err := dataset.Metadata(ctx)
  381. if err != nil {
  382. t.Fatal(err)
  383. }
  384. origAccess := append([]*AccessEntry(nil), md.Access...)
  385. newEntry := &AccessEntry{
  386. Role: ReaderRole,
  387. Entity: "Joe@example.com",
  388. EntityType: UserEmailEntity,
  389. }
  390. newAccess := append(md.Access, newEntry)
  391. dm := DatasetMetadataToUpdate{Access: newAccess}
  392. md, err = dataset.Update(ctx, dm, md.ETag)
  393. if err != nil {
  394. t.Fatal(err)
  395. }
  396. defer func() {
  397. _, err := dataset.Update(ctx, DatasetMetadataToUpdate{Access: origAccess}, md.ETag)
  398. if err != nil {
  399. t.Log("could not restore dataset access list")
  400. }
  401. }()
  402. if diff := testutil.Diff(md.Access, newAccess); diff != "" {
  403. t.Fatalf("got=-, want=+:\n%s", diff)
  404. }
  405. }
  406. func TestIntegration_DatasetUpdateLabels(t *testing.T) {
  407. if client == nil {
  408. t.Skip("Integration tests skipped")
  409. }
  410. ctx := context.Background()
  411. md, err := dataset.Metadata(ctx)
  412. if err != nil {
  413. t.Fatal(err)
  414. }
  415. var dm DatasetMetadataToUpdate
  416. dm.SetLabel("label", "value")
  417. md, err = dataset.Update(ctx, dm, "")
  418. if err != nil {
  419. t.Fatal(err)
  420. }
  421. if got, want := md.Labels["label"], "value"; got != want {
  422. t.Errorf("got %q, want %q", got, want)
  423. }
  424. dm = DatasetMetadataToUpdate{}
  425. dm.DeleteLabel("label")
  426. md, err = dataset.Update(ctx, dm, "")
  427. if err != nil {
  428. t.Fatal(err)
  429. }
  430. if _, ok := md.Labels["label"]; ok {
  431. t.Error("label still present after deletion")
  432. }
  433. }
  434. func TestIntegration_TableUpdateLabels(t *testing.T) {
  435. if client == nil {
  436. t.Skip("Integration tests skipped")
  437. }
  438. ctx := context.Background()
  439. table := newTable(t, schema)
  440. defer table.Delete(ctx)
  441. var tm TableMetadataToUpdate
  442. tm.SetLabel("label", "value")
  443. md, err := table.Update(ctx, tm, "")
  444. if err != nil {
  445. t.Fatal(err)
  446. }
  447. if got, want := md.Labels["label"], "value"; got != want {
  448. t.Errorf("got %q, want %q", got, want)
  449. }
  450. tm = TableMetadataToUpdate{}
  451. tm.DeleteLabel("label")
  452. md, err = table.Update(ctx, tm, "")
  453. if err != nil {
  454. t.Fatal(err)
  455. }
  456. if _, ok := md.Labels["label"]; ok {
  457. t.Error("label still present after deletion")
  458. }
  459. }
  460. func TestIntegration_Tables(t *testing.T) {
  461. if client == nil {
  462. t.Skip("Integration tests skipped")
  463. }
  464. ctx := context.Background()
  465. table := newTable(t, schema)
  466. defer table.Delete(ctx)
  467. wantName := table.FullyQualifiedName()
  468. // This test is flaky due to eventual consistency.
  469. ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
  470. defer cancel()
  471. err := internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) {
  472. // Iterate over tables in the dataset.
  473. it := dataset.Tables(ctx)
  474. var tableNames []string
  475. for {
  476. tbl, err := it.Next()
  477. if err == iterator.Done {
  478. break
  479. }
  480. if err != nil {
  481. return false, err
  482. }
  483. tableNames = append(tableNames, tbl.FullyQualifiedName())
  484. }
  485. // Other tests may be running with this dataset, so there might be more
  486. // than just our table in the list. So don't try for an exact match; just
  487. // make sure that our table is there somewhere.
  488. for _, tn := range tableNames {
  489. if tn == wantName {
  490. return true, nil
  491. }
  492. }
  493. return false, fmt.Errorf("got %v\nwant %s in the list", tableNames, wantName)
  494. })
  495. if err != nil {
  496. t.Fatal(err)
  497. }
  498. }
  499. func TestIntegration_UploadAndRead(t *testing.T) {
  500. if client == nil {
  501. t.Skip("Integration tests skipped")
  502. }
  503. ctx := context.Background()
  504. table := newTable(t, schema)
  505. defer table.Delete(ctx)
  506. // Populate the table.
  507. upl := table.Uploader()
  508. var (
  509. wantRows [][]Value
  510. saverRows []*ValuesSaver
  511. )
  512. for i, name := range []string{"a", "b", "c"} {
  513. row := []Value{name, []Value{int64(i)}, []Value{true}}
  514. wantRows = append(wantRows, row)
  515. saverRows = append(saverRows, &ValuesSaver{
  516. Schema: schema,
  517. InsertID: name,
  518. Row: row,
  519. })
  520. }
  521. if err := upl.Put(ctx, saverRows); err != nil {
  522. t.Fatal(putError(err))
  523. }
  524. // Wait until the data has been uploaded. This can take a few seconds, according
  525. // to https://cloud.google.com/bigquery/streaming-data-into-bigquery.
  526. if err := waitForRow(ctx, table); err != nil {
  527. t.Fatal(err)
  528. }
  529. // Read the table.
  530. checkRead(t, "upload", table.Read(ctx), wantRows)
  531. // Query the table.
  532. q := client.Query(fmt.Sprintf("select name, nums, rec from %s", table.TableID))
  533. q.DefaultProjectID = dataset.ProjectID
  534. q.DefaultDatasetID = dataset.DatasetID
  535. rit, err := q.Read(ctx)
  536. if err != nil {
  537. t.Fatal(err)
  538. }
  539. checkRead(t, "query", rit, wantRows)
  540. // Query the long way.
  541. job1, err := q.Run(ctx)
  542. if err != nil {
  543. t.Fatal(err)
  544. }
  545. if job1.LastStatus() == nil {
  546. t.Error("no LastStatus")
  547. }
  548. job2, err := client.JobFromID(ctx, job1.ID())
  549. if err != nil {
  550. t.Fatal(err)
  551. }
  552. if job2.LastStatus() == nil {
  553. t.Error("no LastStatus")
  554. }
  555. rit, err = job2.Read(ctx)
  556. if err != nil {
  557. t.Fatal(err)
  558. }
  559. checkRead(t, "job.Read", rit, wantRows)
  560. // Get statistics.
  561. jobStatus, err := job2.Status(ctx)
  562. if err != nil {
  563. t.Fatal(err)
  564. }
  565. if jobStatus.Statistics == nil {
  566. t.Fatal("jobStatus missing statistics")
  567. }
  568. if _, ok := jobStatus.Statistics.Details.(*QueryStatistics); !ok {
  569. t.Errorf("expected QueryStatistics, got %T", jobStatus.Statistics.Details)
  570. }
  571. // Test reading directly into a []Value.
  572. valueLists, schema, _, err := readAll(table.Read(ctx))
  573. if err != nil {
  574. t.Fatal(err)
  575. }
  576. it := table.Read(ctx)
  577. for i, vl := range valueLists {
  578. var got []Value
  579. if err := it.Next(&got); err != nil {
  580. t.Fatal(err)
  581. }
  582. if !testutil.Equal(it.Schema, schema) {
  583. t.Fatalf("got schema %v, want %v", it.Schema, schema)
  584. }
  585. want := []Value(vl)
  586. if !testutil.Equal(got, want) {
  587. t.Errorf("%d: got %v, want %v", i, got, want)
  588. }
  589. }
  590. // Test reading into a map.
  591. it = table.Read(ctx)
  592. for _, vl := range valueLists {
  593. var vm map[string]Value
  594. if err := it.Next(&vm); err != nil {
  595. t.Fatal(err)
  596. }
  597. if got, want := len(vm), len(vl); got != want {
  598. t.Fatalf("valueMap len: got %d, want %d", got, want)
  599. }
  600. // With maps, structs become nested maps.
  601. vl[2] = map[string]Value{"bool": vl[2].([]Value)[0]}
  602. for i, v := range vl {
  603. if got, want := vm[schema[i].Name], v; !testutil.Equal(got, want) {
  604. t.Errorf("%d, name=%s: got %#v, want %#v",
  605. i, schema[i].Name, got, want)
  606. }
  607. }
  608. }
  609. }
  610. type SubSubTestStruct struct {
  611. Integer int64
  612. }
  613. type SubTestStruct struct {
  614. String string
  615. Record SubSubTestStruct
  616. RecordArray []SubSubTestStruct
  617. }
  618. type TestStruct struct {
  619. Name string
  620. Bytes []byte
  621. Integer int64
  622. Float float64
  623. Boolean bool
  624. Timestamp time.Time
  625. Date civil.Date
  626. Time civil.Time
  627. DateTime civil.DateTime
  628. Numeric *big.Rat
  629. StringArray []string
  630. IntegerArray []int64
  631. FloatArray []float64
  632. BooleanArray []bool
  633. TimestampArray []time.Time
  634. DateArray []civil.Date
  635. TimeArray []civil.Time
  636. DateTimeArray []civil.DateTime
  637. NumericArray []*big.Rat
  638. Record SubTestStruct
  639. RecordArray []SubTestStruct
  640. }
  641. // Round times to the microsecond for comparison purposes.
  642. var roundToMicros = cmp.Transformer("RoundToMicros",
  643. func(t time.Time) time.Time { return t.Round(time.Microsecond) })
  644. func TestIntegration_UploadAndReadStructs(t *testing.T) {
  645. if client == nil {
  646. t.Skip("Integration tests skipped")
  647. }
  648. schema, err := InferSchema(TestStruct{})
  649. if err != nil {
  650. t.Fatal(err)
  651. }
  652. ctx := context.Background()
  653. table := newTable(t, schema)
  654. defer table.Delete(ctx)
  655. d := civil.Date{Year: 2016, Month: 3, Day: 20}
  656. tm := civil.Time{Hour: 15, Minute: 4, Second: 5, Nanosecond: 6000}
  657. ts := time.Date(2016, 3, 20, 15, 4, 5, 6000, time.UTC)
  658. dtm := civil.DateTime{Date: d, Time: tm}
  659. d2 := civil.Date{Year: 1994, Month: 5, Day: 15}
  660. tm2 := civil.Time{Hour: 1, Minute: 2, Second: 4, Nanosecond: 0}
  661. ts2 := time.Date(1994, 5, 15, 1, 2, 4, 0, time.UTC)
  662. dtm2 := civil.DateTime{Date: d2, Time: tm2}
  663. // Populate the table.
  664. upl := table.Uploader()
  665. want := []*TestStruct{
  666. {
  667. "a",
  668. []byte("byte"),
  669. 42,
  670. 3.14,
  671. true,
  672. ts,
  673. d,
  674. tm,
  675. dtm,
  676. big.NewRat(57, 100),
  677. []string{"a", "b"},
  678. []int64{1, 2},
  679. []float64{1, 1.41},
  680. []bool{true, false},
  681. []time.Time{ts, ts2},
  682. []civil.Date{d, d2},
  683. []civil.Time{tm, tm2},
  684. []civil.DateTime{dtm, dtm2},
  685. []*big.Rat{big.NewRat(1, 2), big.NewRat(3, 5)},
  686. SubTestStruct{
  687. "string",
  688. SubSubTestStruct{24},
  689. []SubSubTestStruct{{1}, {2}},
  690. },
  691. []SubTestStruct{
  692. {String: "empty"},
  693. {
  694. "full",
  695. SubSubTestStruct{1},
  696. []SubSubTestStruct{{1}, {2}},
  697. },
  698. },
  699. },
  700. {
  701. Name: "b",
  702. Bytes: []byte("byte2"),
  703. Integer: 24,
  704. Float: 4.13,
  705. Boolean: false,
  706. Timestamp: ts,
  707. Date: d,
  708. Time: tm,
  709. DateTime: dtm,
  710. Numeric: big.NewRat(4499, 10000),
  711. },
  712. }
  713. var savers []*StructSaver
  714. for _, s := range want {
  715. savers = append(savers, &StructSaver{Schema: schema, Struct: s})
  716. }
  717. if err := upl.Put(ctx, savers); err != nil {
  718. t.Fatal(putError(err))
  719. }
  720. // Wait until the data has been uploaded. This can take a few seconds, according
  721. // to https://cloud.google.com/bigquery/streaming-data-into-bigquery.
  722. if err := waitForRow(ctx, table); err != nil {
  723. t.Fatal(err)
  724. }
  725. // Test iteration with structs.
  726. it := table.Read(ctx)
  727. var got []*TestStruct
  728. for {
  729. var g TestStruct
  730. err := it.Next(&g)
  731. if err == iterator.Done {
  732. break
  733. }
  734. if err != nil {
  735. t.Fatal(err)
  736. }
  737. got = append(got, &g)
  738. }
  739. sort.Sort(byName(got))
  740. // BigQuery does not elide nils. It reports an error for nil fields.
  741. for i, g := range got {
  742. if i >= len(want) {
  743. t.Errorf("%d: got %v, past end of want", i, pretty.Value(g))
  744. } else if diff := testutil.Diff(g, want[i], roundToMicros); diff != "" {
  745. t.Errorf("%d: got=-, want=+:\n%s", i, diff)
  746. }
  747. }
  748. }
  749. type byName []*TestStruct
  750. func (b byName) Len() int { return len(b) }
  751. func (b byName) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
  752. func (b byName) Less(i, j int) bool { return b[i].Name < b[j].Name }
  753. func TestIntegration_UploadAndReadNullable(t *testing.T) {
  754. if client == nil {
  755. t.Skip("Integration tests skipped")
  756. }
  757. ctm := civil.Time{Hour: 15, Minute: 4, Second: 5, Nanosecond: 6000}
  758. cdt := civil.DateTime{Date: testDate, Time: ctm}
  759. rat := big.NewRat(33, 100)
  760. testUploadAndReadNullable(t, testStructNullable{}, make([]Value, len(testStructNullableSchema)))
  761. testUploadAndReadNullable(t, testStructNullable{
  762. String: NullString{"x", true},
  763. Bytes: []byte{1, 2, 3},
  764. Integer: NullInt64{1, true},
  765. Float: NullFloat64{2.3, true},
  766. Boolean: NullBool{true, true},
  767. Timestamp: NullTimestamp{testTimestamp, true},
  768. Date: NullDate{testDate, true},
  769. Time: NullTime{ctm, true},
  770. DateTime: NullDateTime{cdt, true},
  771. Numeric: rat,
  772. Record: &subNullable{X: NullInt64{4, true}},
  773. },
  774. []Value{"x", []byte{1, 2, 3}, int64(1), 2.3, true, testTimestamp, testDate, ctm, cdt, rat, []Value{int64(4)}})
  775. }
  776. func testUploadAndReadNullable(t *testing.T, ts testStructNullable, wantRow []Value) {
  777. ctx := context.Background()
  778. table := newTable(t, testStructNullableSchema)
  779. defer table.Delete(ctx)
  780. // Populate the table.
  781. upl := table.Uploader()
  782. if err := upl.Put(ctx, []*StructSaver{{Schema: testStructNullableSchema, Struct: ts}}); err != nil {
  783. t.Fatal(putError(err))
  784. }
  785. // Wait until the data has been uploaded. This can take a few seconds, according
  786. // to https://cloud.google.com/bigquery/streaming-data-into-bigquery.
  787. if err := waitForRow(ctx, table); err != nil {
  788. t.Fatal(err)
  789. }
  790. // Read into a []Value.
  791. iter := table.Read(ctx)
  792. gotRows, _, _, err := readAll(iter)
  793. if err != nil {
  794. t.Fatal(err)
  795. }
  796. if len(gotRows) != 1 {
  797. t.Fatalf("got %d rows, want 1", len(gotRows))
  798. }
  799. if diff := testutil.Diff(gotRows[0], wantRow, roundToMicros); diff != "" {
  800. t.Error(diff)
  801. }
  802. // Read into a struct.
  803. want := ts
  804. var sn testStructNullable
  805. it := table.Read(ctx)
  806. if err := it.Next(&sn); err != nil {
  807. t.Fatal(err)
  808. }
  809. if diff := testutil.Diff(sn, want, roundToMicros); diff != "" {
  810. t.Error(diff)
  811. }
  812. }
  813. func TestIntegration_TableUpdate(t *testing.T) {
  814. if client == nil {
  815. t.Skip("Integration tests skipped")
  816. }
  817. ctx := context.Background()
  818. table := newTable(t, schema)
  819. defer table.Delete(ctx)
  820. // Test Update of non-schema fields.
  821. tm, err := table.Metadata(ctx)
  822. if err != nil {
  823. t.Fatal(err)
  824. }
  825. wantDescription := tm.Description + "more"
  826. wantName := tm.Name + "more"
  827. wantExpiration := tm.ExpirationTime.Add(time.Hour * 24)
  828. got, err := table.Update(ctx, TableMetadataToUpdate{
  829. Description: wantDescription,
  830. Name: wantName,
  831. ExpirationTime: wantExpiration,
  832. }, tm.ETag)
  833. if err != nil {
  834. t.Fatal(err)
  835. }
  836. if got.Description != wantDescription {
  837. t.Errorf("Description: got %q, want %q", got.Description, wantDescription)
  838. }
  839. if got.Name != wantName {
  840. t.Errorf("Name: got %q, want %q", got.Name, wantName)
  841. }
  842. if got.ExpirationTime != wantExpiration {
  843. t.Errorf("ExpirationTime: got %q, want %q", got.ExpirationTime, wantExpiration)
  844. }
  845. if !testutil.Equal(got.Schema, schema) {
  846. t.Errorf("Schema: got %v, want %v", pretty.Value(got.Schema), pretty.Value(schema))
  847. }
  848. // Blind write succeeds.
  849. _, err = table.Update(ctx, TableMetadataToUpdate{Name: "x"}, "")
  850. if err != nil {
  851. t.Fatal(err)
  852. }
  853. // Write with old etag fails.
  854. _, err = table.Update(ctx, TableMetadataToUpdate{Name: "y"}, got.ETag)
  855. if err == nil {
  856. t.Fatal("Update with old ETag succeeded, wanted failure")
  857. }
  858. // Test schema update.
  859. // Columns can be added. schema2 is the same as schema, except for the
  860. // added column in the middle.
  861. nested := Schema{
  862. {Name: "nested", Type: BooleanFieldType},
  863. {Name: "other", Type: StringFieldType},
  864. }
  865. schema2 := Schema{
  866. schema[0],
  867. {Name: "rec2", Type: RecordFieldType, Schema: nested},
  868. schema[1],
  869. schema[2],
  870. }
  871. got, err = table.Update(ctx, TableMetadataToUpdate{Schema: schema2}, "")
  872. if err != nil {
  873. t.Fatal(err)
  874. }
  875. // Wherever you add the column, it appears at the end.
  876. schema3 := Schema{schema2[0], schema2[2], schema2[3], schema2[1]}
  877. if !testutil.Equal(got.Schema, schema3) {
  878. t.Errorf("add field:\ngot %v\nwant %v",
  879. pretty.Value(got.Schema), pretty.Value(schema3))
  880. }
  881. // Updating with the empty schema succeeds, but is a no-op.
  882. got, err = table.Update(ctx, TableMetadataToUpdate{Schema: Schema{}}, "")
  883. if err != nil {
  884. t.Fatal(err)
  885. }
  886. if !testutil.Equal(got.Schema, schema3) {
  887. t.Errorf("empty schema:\ngot %v\nwant %v",
  888. pretty.Value(got.Schema), pretty.Value(schema3))
  889. }
  890. // Error cases when updating schema.
  891. for _, test := range []struct {
  892. desc string
  893. fields Schema
  894. }{
  895. {"change from optional to required", Schema{
  896. {Name: "name", Type: StringFieldType, Required: true},
  897. schema3[1],
  898. schema3[2],
  899. schema3[3],
  900. }},
  901. {"add a required field", Schema{
  902. schema3[0], schema3[1], schema3[2], schema3[3],
  903. {Name: "req", Type: StringFieldType, Required: true},
  904. }},
  905. {"remove a field", Schema{schema3[0], schema3[1], schema3[2]}},
  906. {"remove a nested field", Schema{
  907. schema3[0], schema3[1], schema3[2],
  908. {Name: "rec2", Type: RecordFieldType, Schema: Schema{nested[0]}}}},
  909. {"remove all nested fields", Schema{
  910. schema3[0], schema3[1], schema3[2],
  911. {Name: "rec2", Type: RecordFieldType, Schema: Schema{}}}},
  912. } {
  913. _, err = table.Update(ctx, TableMetadataToUpdate{Schema: Schema(test.fields)}, "")
  914. if err == nil {
  915. t.Errorf("%s: want error, got nil", test.desc)
  916. } else if !hasStatusCode(err, 400) {
  917. t.Errorf("%s: want 400, got %v", test.desc, err)
  918. }
  919. }
  920. }
  921. func TestIntegration_Load(t *testing.T) {
  922. if client == nil {
  923. t.Skip("Integration tests skipped")
  924. }
  925. ctx := context.Background()
  926. // CSV data can't be loaded into a repeated field, so we use a different schema.
  927. table := newTable(t, Schema{
  928. {Name: "name", Type: StringFieldType},
  929. {Name: "nums", Type: IntegerFieldType},
  930. })
  931. defer table.Delete(ctx)
  932. // Load the table from a reader.
  933. r := strings.NewReader("a,0\nb,1\nc,2\n")
  934. wantRows := [][]Value{
  935. {"a", int64(0)},
  936. {"b", int64(1)},
  937. {"c", int64(2)},
  938. }
  939. rs := NewReaderSource(r)
  940. loader := table.LoaderFrom(rs)
  941. loader.WriteDisposition = WriteTruncate
  942. loader.Labels = map[string]string{"test": "go"}
  943. job, err := loader.Run(ctx)
  944. if err != nil {
  945. t.Fatal(err)
  946. }
  947. if job.LastStatus() == nil {
  948. t.Error("no LastStatus")
  949. }
  950. conf, err := job.Config()
  951. if err != nil {
  952. t.Fatal(err)
  953. }
  954. config, ok := conf.(*LoadConfig)
  955. if !ok {
  956. t.Fatalf("got %T, want LoadConfig", conf)
  957. }
  958. diff := testutil.Diff(config, &loader.LoadConfig,
  959. cmp.AllowUnexported(Table{}),
  960. cmpopts.IgnoreUnexported(Client{}, ReaderSource{}),
  961. // returned schema is at top level, not in the config
  962. cmpopts.IgnoreFields(FileConfig{}, "Schema"))
  963. if diff != "" {
  964. t.Errorf("got=-, want=+:\n%s", diff)
  965. }
  966. if err := wait(ctx, job); err != nil {
  967. t.Fatal(err)
  968. }
  969. checkReadAndTotalRows(t, "reader load", table.Read(ctx), wantRows)
  970. }
  971. func TestIntegration_DML(t *testing.T) {
  972. if client == nil {
  973. t.Skip("Integration tests skipped")
  974. }
  975. ctx := context.Background()
  976. table := newTable(t, schema)
  977. defer table.Delete(ctx)
  978. sql := fmt.Sprintf(`INSERT %s.%s (name, nums, rec)
  979. VALUES ('a', [0], STRUCT<BOOL>(TRUE)),
  980. ('b', [1], STRUCT<BOOL>(FALSE)),
  981. ('c', [2], STRUCT<BOOL>(TRUE))`,
  982. table.DatasetID, table.TableID)
  983. if err := dmlInsert(ctx, sql); err != nil {
  984. t.Fatal(err)
  985. }
  986. wantRows := [][]Value{
  987. {"a", []Value{int64(0)}, []Value{true}},
  988. {"b", []Value{int64(1)}, []Value{false}},
  989. {"c", []Value{int64(2)}, []Value{true}},
  990. }
  991. checkRead(t, "DML", table.Read(ctx), wantRows)
  992. }
  993. func dmlInsert(ctx context.Context, sql string) error {
  994. // Retry insert; sometimes it fails with INTERNAL.
  995. return internal.Retry(ctx, gax.Backoff{}, func() (bool, error) {
  996. // Use DML to insert.
  997. q := client.Query(sql)
  998. job, err := q.Run(ctx)
  999. if err != nil {
  1000. if e, ok := err.(*googleapi.Error); ok && e.Code < 500 {
  1001. return true, err // fail on 4xx
  1002. }
  1003. return false, err
  1004. }
  1005. if err := wait(ctx, job); err != nil {
  1006. if e, ok := err.(*googleapi.Error); ok && e.Code < 500 {
  1007. return true, err // fail on 4xx
  1008. }
  1009. return false, err
  1010. }
  1011. return true, nil
  1012. })
  1013. }
  1014. func TestIntegration_TimeTypes(t *testing.T) {
  1015. if client == nil {
  1016. t.Skip("Integration tests skipped")
  1017. }
  1018. ctx := context.Background()
  1019. dtSchema := Schema{
  1020. {Name: "d", Type: DateFieldType},
  1021. {Name: "t", Type: TimeFieldType},
  1022. {Name: "dt", Type: DateTimeFieldType},
  1023. {Name: "ts", Type: TimestampFieldType},
  1024. }
  1025. table := newTable(t, dtSchema)
  1026. defer table.Delete(ctx)
  1027. d := civil.Date{Year: 2016, Month: 3, Day: 20}
  1028. tm := civil.Time{Hour: 12, Minute: 30, Second: 0, Nanosecond: 6000}
  1029. dtm := civil.DateTime{Date: d, Time: tm}
  1030. ts := time.Date(2016, 3, 20, 15, 04, 05, 0, time.UTC)
  1031. wantRows := [][]Value{
  1032. {d, tm, dtm, ts},
  1033. }
  1034. upl := table.Uploader()
  1035. if err := upl.Put(ctx, []*ValuesSaver{
  1036. {Schema: dtSchema, Row: wantRows[0]},
  1037. }); err != nil {
  1038. t.Fatal(putError(err))
  1039. }
  1040. if err := waitForRow(ctx, table); err != nil {
  1041. t.Fatal(err)
  1042. }
  1043. // SQL wants DATETIMEs with a space between date and time, but the service
  1044. // returns them in RFC3339 form, with a "T" between.
  1045. query := fmt.Sprintf("INSERT %s.%s (d, t, dt, ts) "+
  1046. "VALUES ('%s', '%s', '%s', '%s')",
  1047. table.DatasetID, table.TableID,
  1048. d, CivilTimeString(tm), CivilDateTimeString(dtm), ts.Format("2006-01-02 15:04:05"))
  1049. if err := dmlInsert(ctx, query); err != nil {
  1050. t.Fatal(err)
  1051. }
  1052. wantRows = append(wantRows, wantRows[0])
  1053. checkRead(t, "TimeTypes", table.Read(ctx), wantRows)
  1054. }
  1055. func TestIntegration_StandardQuery(t *testing.T) {
  1056. if client == nil {
  1057. t.Skip("Integration tests skipped")
  1058. }
  1059. ctx := context.Background()
  1060. d := civil.Date{Year: 2016, Month: 3, Day: 20}
  1061. tm := civil.Time{Hour: 15, Minute: 04, Second: 05, Nanosecond: 0}
  1062. ts := time.Date(2016, 3, 20, 15, 04, 05, 0, time.UTC)
  1063. dtm := ts.Format("2006-01-02 15:04:05")
  1064. // Constructs Value slices made up of int64s.
  1065. ints := func(args ...int) []Value {
  1066. vals := make([]Value, len(args))
  1067. for i, arg := range args {
  1068. vals[i] = int64(arg)
  1069. }
  1070. return vals
  1071. }
  1072. testCases := []struct {
  1073. query string
  1074. wantRow []Value
  1075. }{
  1076. {"SELECT 1", ints(1)},
  1077. {"SELECT 1.3", []Value{1.3}},
  1078. {"SELECT CAST(1.3 AS NUMERIC)", []Value{big.NewRat(13, 10)}},
  1079. {"SELECT NUMERIC '0.25'", []Value{big.NewRat(1, 4)}},
  1080. {"SELECT TRUE", []Value{true}},
  1081. {"SELECT 'ABC'", []Value{"ABC"}},
  1082. {"SELECT CAST('foo' AS BYTES)", []Value{[]byte("foo")}},
  1083. {fmt.Sprintf("SELECT TIMESTAMP '%s'", dtm), []Value{ts}},
  1084. {fmt.Sprintf("SELECT [TIMESTAMP '%s', TIMESTAMP '%s']", dtm, dtm), []Value{[]Value{ts, ts}}},
  1085. {fmt.Sprintf("SELECT ('hello', TIMESTAMP '%s')", dtm), []Value{[]Value{"hello", ts}}},
  1086. {fmt.Sprintf("SELECT DATETIME(TIMESTAMP '%s')", dtm), []Value{civil.DateTime{Date: d, Time: tm}}},
  1087. {fmt.Sprintf("SELECT DATE(TIMESTAMP '%s')", dtm), []Value{d}},
  1088. {fmt.Sprintf("SELECT TIME(TIMESTAMP '%s')", dtm), []Value{tm}},
  1089. {"SELECT (1, 2)", []Value{ints(1, 2)}},
  1090. {"SELECT [1, 2, 3]", []Value{ints(1, 2, 3)}},
  1091. {"SELECT ([1, 2], 3, [4, 5])", []Value{[]Value{ints(1, 2), int64(3), ints(4, 5)}}},
  1092. {"SELECT [(1, 2, 3), (4, 5, 6)]", []Value{[]Value{ints(1, 2, 3), ints(4, 5, 6)}}},
  1093. {"SELECT [([1, 2, 3], 4), ([5, 6], 7)]", []Value{[]Value{[]Value{ints(1, 2, 3), int64(4)}, []Value{ints(5, 6), int64(7)}}}},
  1094. {"SELECT ARRAY(SELECT STRUCT([1, 2]))", []Value{[]Value{[]Value{ints(1, 2)}}}},
  1095. }
  1096. for _, c := range testCases {
  1097. q := client.Query(c.query)
  1098. it, err := q.Read(ctx)
  1099. if err != nil {
  1100. t.Fatal(err)
  1101. }
  1102. checkRead(t, "StandardQuery", it, [][]Value{c.wantRow})
  1103. }
  1104. }
  1105. func TestIntegration_LegacyQuery(t *testing.T) {
  1106. if client == nil {
  1107. t.Skip("Integration tests skipped")
  1108. }
  1109. ctx := context.Background()
  1110. ts := time.Date(2016, 3, 20, 15, 04, 05, 0, time.UTC)
  1111. dtm := ts.Format("2006-01-02 15:04:05")
  1112. testCases := []struct {
  1113. query string
  1114. wantRow []Value
  1115. }{
  1116. {"SELECT 1", []Value{int64(1)}},
  1117. {"SELECT 1.3", []Value{1.3}},
  1118. {"SELECT TRUE", []Value{true}},
  1119. {"SELECT 'ABC'", []Value{"ABC"}},
  1120. {"SELECT CAST('foo' AS BYTES)", []Value{[]byte("foo")}},
  1121. {fmt.Sprintf("SELECT TIMESTAMP('%s')", dtm), []Value{ts}},
  1122. {fmt.Sprintf("SELECT DATE(TIMESTAMP('%s'))", dtm), []Value{"2016-03-20"}},
  1123. {fmt.Sprintf("SELECT TIME(TIMESTAMP('%s'))", dtm), []Value{"15:04:05"}},
  1124. }
  1125. for _, c := range testCases {
  1126. q := client.Query(c.query)
  1127. q.UseLegacySQL = true
  1128. it, err := q.Read(ctx)
  1129. if err != nil {
  1130. t.Fatal(err)
  1131. }
  1132. checkRead(t, "LegacyQuery", it, [][]Value{c.wantRow})
  1133. }
  1134. }
  1135. func TestIntegration_QueryParameters(t *testing.T) {
  1136. if client == nil {
  1137. t.Skip("Integration tests skipped")
  1138. }
  1139. ctx := context.Background()
  1140. d := civil.Date{Year: 2016, Month: 3, Day: 20}
  1141. tm := civil.Time{Hour: 15, Minute: 04, Second: 05, Nanosecond: 3008}
  1142. rtm := tm
  1143. rtm.Nanosecond = 3000 // round to microseconds
  1144. dtm := civil.DateTime{Date: d, Time: tm}
  1145. ts := time.Date(2016, 3, 20, 15, 04, 05, 0, time.UTC)
  1146. rat := big.NewRat(13, 10)
  1147. type ss struct {
  1148. String string
  1149. }
  1150. type s struct {
  1151. Timestamp time.Time
  1152. StringArray []string
  1153. SubStruct ss
  1154. SubStructArray []ss
  1155. }
  1156. testCases := []struct {
  1157. query string
  1158. parameters []QueryParameter
  1159. wantRow []Value
  1160. wantConfig interface{}
  1161. }{
  1162. {
  1163. "SELECT @val",
  1164. []QueryParameter{{"val", 1}},
  1165. []Value{int64(1)},
  1166. int64(1),
  1167. },
  1168. {
  1169. "SELECT @val",
  1170. []QueryParameter{{"val", 1.3}},
  1171. []Value{1.3},
  1172. 1.3,
  1173. },
  1174. {
  1175. "SELECT @val",
  1176. []QueryParameter{{"val", rat}},
  1177. []Value{rat},
  1178. rat,
  1179. },
  1180. {
  1181. "SELECT @val",
  1182. []QueryParameter{{"val", true}},
  1183. []Value{true},
  1184. true,
  1185. },
  1186. {
  1187. "SELECT @val",
  1188. []QueryParameter{{"val", "ABC"}},
  1189. []Value{"ABC"},
  1190. "ABC",
  1191. },
  1192. {
  1193. "SELECT @val",
  1194. []QueryParameter{{"val", []byte("foo")}},
  1195. []Value{[]byte("foo")},
  1196. []byte("foo"),
  1197. },
  1198. {
  1199. "SELECT @val",
  1200. []QueryParameter{{"val", ts}},
  1201. []Value{ts},
  1202. ts,
  1203. },
  1204. {
  1205. "SELECT @val",
  1206. []QueryParameter{{"val", []time.Time{ts, ts}}},
  1207. []Value{[]Value{ts, ts}},
  1208. []interface{}{ts, ts},
  1209. },
  1210. {
  1211. "SELECT @val",
  1212. []QueryParameter{{"val", dtm}},
  1213. []Value{civil.DateTime{Date: d, Time: rtm}},
  1214. civil.DateTime{Date: d, Time: rtm},
  1215. },
  1216. {
  1217. "SELECT @val",
  1218. []QueryParameter{{"val", d}},
  1219. []Value{d},
  1220. d,
  1221. },
  1222. {
  1223. "SELECT @val",
  1224. []QueryParameter{{"val", tm}},
  1225. []Value{rtm},
  1226. rtm,
  1227. },
  1228. {
  1229. "SELECT @val",
  1230. []QueryParameter{{"val", s{ts, []string{"a", "b"}, ss{"c"}, []ss{{"d"}, {"e"}}}}},
  1231. []Value{[]Value{ts, []Value{"a", "b"}, []Value{"c"}, []Value{[]Value{"d"}, []Value{"e"}}}},
  1232. map[string]interface{}{
  1233. "Timestamp": ts,
  1234. "StringArray": []interface{}{"a", "b"},
  1235. "SubStruct": map[string]interface{}{"String": "c"},
  1236. "SubStructArray": []interface{}{
  1237. map[string]interface{}{"String": "d"},
  1238. map[string]interface{}{"String": "e"},
  1239. },
  1240. },
  1241. },
  1242. {
  1243. "SELECT @val.Timestamp, @val.SubStruct.String",
  1244. []QueryParameter{{"val", s{Timestamp: ts, SubStruct: ss{"a"}}}},
  1245. []Value{ts, "a"},
  1246. map[string]interface{}{
  1247. "Timestamp": ts,
  1248. "SubStruct": map[string]interface{}{"String": "a"},
  1249. "StringArray": nil,
  1250. "SubStructArray": nil,
  1251. },
  1252. },
  1253. }
  1254. for _, c := range testCases {
  1255. q := client.Query(c.query)
  1256. q.Parameters = c.parameters
  1257. job, err := q.Run(ctx)
  1258. if err != nil {
  1259. t.Fatal(err)
  1260. }
  1261. if job.LastStatus() == nil {
  1262. t.Error("no LastStatus")
  1263. }
  1264. it, err := job.Read(ctx)
  1265. if err != nil {
  1266. t.Fatal(err)
  1267. }
  1268. checkRead(t, "QueryParameters", it, [][]Value{c.wantRow})
  1269. config, err := job.Config()
  1270. if err != nil {
  1271. t.Fatal(err)
  1272. }
  1273. got := config.(*QueryConfig).Parameters[0].Value
  1274. if !testutil.Equal(got, c.wantConfig) {
  1275. t.Errorf("param %[1]v (%[1]T): config:\ngot %[2]v (%[2]T)\nwant %[3]v (%[3]T)",
  1276. c.parameters[0].Value, got, c.wantConfig)
  1277. }
  1278. }
  1279. }
  1280. func TestIntegration_QueryDryRun(t *testing.T) {
  1281. if client == nil {
  1282. t.Skip("Integration tests skipped")
  1283. }
  1284. ctx := context.Background()
  1285. q := client.Query("SELECT word from " + stdName + " LIMIT 10")
  1286. q.DryRun = true
  1287. job, err := q.Run(ctx)
  1288. if err != nil {
  1289. t.Fatal(err)
  1290. }
  1291. s := job.LastStatus()
  1292. if s.State != Done {
  1293. t.Errorf("state is %v, expected Done", s.State)
  1294. }
  1295. if s.Statistics == nil {
  1296. t.Fatal("no statistics")
  1297. }
  1298. if s.Statistics.Details.(*QueryStatistics).Schema == nil {
  1299. t.Fatal("no schema")
  1300. }
  1301. }
  1302. func TestIntegration_ExtractExternal(t *testing.T) {
  1303. // Create a table, extract it to GCS, then query it externally.
  1304. if client == nil {
  1305. t.Skip("Integration tests skipped")
  1306. }
  1307. ctx := context.Background()
  1308. schema := Schema{
  1309. {Name: "name", Type: StringFieldType},
  1310. {Name: "num", Type: IntegerFieldType},
  1311. }
  1312. table := newTable(t, schema)
  1313. defer table.Delete(ctx)
  1314. // Insert table data.
  1315. sql := fmt.Sprintf(`INSERT %s.%s (name, num)
  1316. VALUES ('a', 1), ('b', 2), ('c', 3)`,
  1317. table.DatasetID, table.TableID)
  1318. if err := dmlInsert(ctx, sql); err != nil {
  1319. t.Fatal(err)
  1320. }
  1321. // Extract to a GCS object as CSV.
  1322. bucketName := testutil.ProjID()
  1323. objectName := fmt.Sprintf("bq-test-%s.csv", table.TableID)
  1324. uri := fmt.Sprintf("gs://%s/%s", bucketName, objectName)
  1325. defer storageClient.Bucket(bucketName).Object(objectName).Delete(ctx)
  1326. gr := NewGCSReference(uri)
  1327. gr.DestinationFormat = CSV
  1328. e := table.ExtractorTo(gr)
  1329. job, err := e.Run(ctx)
  1330. if err != nil {
  1331. t.Fatal(err)
  1332. }
  1333. conf, err := job.Config()
  1334. if err != nil {
  1335. t.Fatal(err)
  1336. }
  1337. config, ok := conf.(*ExtractConfig)
  1338. if !ok {
  1339. t.Fatalf("got %T, want ExtractConfig", conf)
  1340. }
  1341. diff := testutil.Diff(config, &e.ExtractConfig,
  1342. cmp.AllowUnexported(Table{}),
  1343. cmpopts.IgnoreUnexported(Client{}))
  1344. if diff != "" {
  1345. t.Errorf("got=-, want=+:\n%s", diff)
  1346. }
  1347. if err := wait(ctx, job); err != nil {
  1348. t.Fatal(err)
  1349. }
  1350. edc := &ExternalDataConfig{
  1351. SourceFormat: CSV,
  1352. SourceURIs: []string{uri},
  1353. Schema: schema,
  1354. Options: &CSVOptions{SkipLeadingRows: 1},
  1355. }
  1356. // Query that CSV file directly.
  1357. q := client.Query("SELECT * FROM csv")
  1358. q.TableDefinitions = map[string]ExternalData{"csv": edc}
  1359. wantRows := [][]Value{
  1360. {"a", int64(1)},
  1361. {"b", int64(2)},
  1362. {"c", int64(3)},
  1363. }
  1364. iter, err := q.Read(ctx)
  1365. if err != nil {
  1366. t.Fatal(err)
  1367. }
  1368. checkReadAndTotalRows(t, "external query", iter, wantRows)
  1369. // Make a table pointing to the file, and query it.
  1370. // BigQuery does not allow a Table.Read on an external table.
  1371. table = dataset.Table(tableIDs.New())
  1372. err = table.Create(context.Background(), &TableMetadata{
  1373. Schema: schema,
  1374. ExpirationTime: testTableExpiration,
  1375. ExternalDataConfig: edc,
  1376. })
  1377. if err != nil {
  1378. t.Fatal(err)
  1379. }
  1380. q = client.Query(fmt.Sprintf("SELECT * FROM %s.%s", table.DatasetID, table.TableID))
  1381. iter, err = q.Read(ctx)
  1382. if err != nil {
  1383. t.Fatal(err)
  1384. }
  1385. checkReadAndTotalRows(t, "external table", iter, wantRows)
  1386. // While we're here, check that the table metadata is correct.
  1387. md, err := table.Metadata(ctx)
  1388. if err != nil {
  1389. t.Fatal(err)
  1390. }
  1391. // One difference: since BigQuery returns the schema as part of the ordinary
  1392. // table metadata, it does not populate ExternalDataConfig.Schema.
  1393. md.ExternalDataConfig.Schema = md.Schema
  1394. if diff := testutil.Diff(md.ExternalDataConfig, edc); diff != "" {
  1395. t.Errorf("got=-, want=+\n%s", diff)
  1396. }
  1397. }
  1398. func TestIntegration_ReadNullIntoStruct(t *testing.T) {
  1399. // Reading a null into a struct field should return an error (not panic).
  1400. if client == nil {
  1401. t.Skip("Integration tests skipped")
  1402. }
  1403. ctx := context.Background()
  1404. table := newTable(t, schema)
  1405. defer table.Delete(ctx)
  1406. upl := table.Uploader()
  1407. row := &ValuesSaver{
  1408. Schema: schema,
  1409. Row: []Value{nil, []Value{}, []Value{nil}},
  1410. }
  1411. if err := upl.Put(ctx, []*ValuesSaver{row}); err != nil {
  1412. t.Fatal(putError(err))
  1413. }
  1414. if err := waitForRow(ctx, table); err != nil {
  1415. t.Fatal(err)
  1416. }
  1417. q := client.Query(fmt.Sprintf("select name from %s", table.TableID))
  1418. q.DefaultProjectID = dataset.ProjectID
  1419. q.DefaultDatasetID = dataset.DatasetID
  1420. it, err := q.Read(ctx)
  1421. if err != nil {
  1422. t.Fatal(err)
  1423. }
  1424. type S struct{ Name string }
  1425. var s S
  1426. if err := it.Next(&s); err == nil {
  1427. t.Fatal("got nil, want error")
  1428. }
  1429. }
  1430. const (
  1431. stdName = "`bigquery-public-data.samples.shakespeare`"
  1432. legacyName = "[bigquery-public-data:samples.shakespeare]"
  1433. )
  1434. // These tests exploit the fact that the two SQL versions have different syntaxes for
  1435. // fully-qualified table names.
  1436. var useLegacySqlTests = []struct {
  1437. t string // name of table
  1438. std, legacy bool // use standard/legacy SQL
  1439. err bool // do we expect an error?
  1440. }{
  1441. {t: legacyName, std: false, legacy: true, err: false},
  1442. {t: legacyName, std: true, legacy: false, err: true},
  1443. {t: legacyName, std: false, legacy: false, err: true}, // standard SQL is default
  1444. {t: legacyName, std: true, legacy: true, err: true},
  1445. {t: stdName, std: false, legacy: true, err: true},
  1446. {t: stdName, std: true, legacy: false, err: false},
  1447. {t: stdName, std: false, legacy: false, err: false}, // standard SQL is default
  1448. {t: stdName, std: true, legacy: true, err: true},
  1449. }
  1450. func TestIntegration_QueryUseLegacySQL(t *testing.T) {
  1451. // Test the UseLegacySQL and UseStandardSQL options for queries.
  1452. if client == nil {
  1453. t.Skip("Integration tests skipped")
  1454. }
  1455. ctx := context.Background()
  1456. for _, test := range useLegacySqlTests {
  1457. q := client.Query(fmt.Sprintf("select word from %s limit 1", test.t))
  1458. q.UseStandardSQL = test.std
  1459. q.UseLegacySQL = test.legacy
  1460. _, err := q.Read(ctx)
  1461. gotErr := err != nil
  1462. if gotErr && !test.err {
  1463. t.Errorf("%+v:\nunexpected error: %v", test, err)
  1464. } else if !gotErr && test.err {
  1465. t.Errorf("%+v:\nsucceeded, but want error", test)
  1466. }
  1467. }
  1468. }
  1469. func TestIntegration_TableUseLegacySQL(t *testing.T) {
  1470. // Test UseLegacySQL and UseStandardSQL for Table.Create.
  1471. if client == nil {
  1472. t.Skip("Integration tests skipped")
  1473. }
  1474. ctx := context.Background()
  1475. table := newTable(t, schema)
  1476. defer table.Delete(ctx)
  1477. for i, test := range useLegacySqlTests {
  1478. view := dataset.Table(fmt.Sprintf("t_view_%d", i))
  1479. tm := &TableMetadata{
  1480. ViewQuery: fmt.Sprintf("SELECT word from %s", test.t),
  1481. UseStandardSQL: test.std,
  1482. UseLegacySQL: test.legacy,
  1483. }
  1484. err := view.Create(ctx, tm)
  1485. gotErr := err != nil
  1486. if gotErr && !test.err {
  1487. t.Errorf("%+v:\nunexpected error: %v", test, err)
  1488. } else if !gotErr && test.err {
  1489. t.Errorf("%+v:\nsucceeded, but want error", test)
  1490. }
  1491. _ = view.Delete(ctx)
  1492. }
  1493. }
  1494. func TestIntegration_ListJobs(t *testing.T) {
  1495. // It's difficult to test the list of jobs, because we can't easily
  1496. // control what's in it. Also, there are many jobs in the test project,
  1497. // and it takes considerable time to list them all.
  1498. if client == nil {
  1499. t.Skip("Integration tests skipped")
  1500. }
  1501. ctx := context.Background()
  1502. // About all we can do is list a few jobs.
  1503. const max = 20
  1504. var jobs []*Job
  1505. it := client.Jobs(ctx)
  1506. for {
  1507. job, err := it.Next()
  1508. if err == iterator.Done {
  1509. break
  1510. }
  1511. if err != nil {
  1512. t.Fatal(err)
  1513. }
  1514. jobs = append(jobs, job)
  1515. if len(jobs) >= max {
  1516. break
  1517. }
  1518. }
  1519. // We expect that there is at least one job in the last few months.
  1520. if len(jobs) == 0 {
  1521. t.Fatal("did not get any jobs")
  1522. }
  1523. }
  1524. const tokyo = "asia-northeast1"
  1525. func TestIntegration_Location(t *testing.T) {
  1526. if client == nil {
  1527. t.Skip("Integration tests skipped")
  1528. }
  1529. client.Location = ""
  1530. testLocation(t, tokyo)
  1531. client.Location = tokyo
  1532. defer func() {
  1533. client.Location = ""
  1534. }()
  1535. testLocation(t, "")
  1536. }
  1537. func testLocation(t *testing.T, loc string) {
  1538. ctx := context.Background()
  1539. tokyoDataset := client.Dataset("tokyo")
  1540. err := tokyoDataset.Create(ctx, &DatasetMetadata{Location: loc})
  1541. if err != nil && !hasStatusCode(err, 409) { // 409 = already exists
  1542. t.Fatal(err)
  1543. }
  1544. md, err := tokyoDataset.Metadata(ctx)
  1545. if err != nil {
  1546. t.Fatal(err)
  1547. }
  1548. if md.Location != tokyo {
  1549. t.Fatalf("dataset location: got %s, want %s", md.Location, tokyo)
  1550. }
  1551. table := tokyoDataset.Table(tableIDs.New())
  1552. err = table.Create(context.Background(), &TableMetadata{
  1553. Schema: Schema{
  1554. {Name: "name", Type: StringFieldType},
  1555. {Name: "nums", Type: IntegerFieldType},
  1556. },
  1557. ExpirationTime: testTableExpiration,
  1558. })
  1559. if err != nil {
  1560. t.Fatal(err)
  1561. }
  1562. defer table.Delete(ctx)
  1563. loader := table.LoaderFrom(NewReaderSource(strings.NewReader("a,0\nb,1\nc,2\n")))
  1564. loader.Location = loc
  1565. job, err := loader.Run(ctx)
  1566. if err != nil {
  1567. t.Fatal("loader.Run", err)
  1568. }
  1569. if job.Location() != tokyo {
  1570. t.Fatalf("job location: got %s, want %s", job.Location(), tokyo)
  1571. }
  1572. _, err = client.JobFromID(ctx, job.ID())
  1573. if client.Location == "" && err == nil {
  1574. t.Error("JobFromID with Tokyo job, no client location: want error, got nil")
  1575. }
  1576. if client.Location != "" && err != nil {
  1577. t.Errorf("JobFromID with Tokyo job, with client location: want nil, got %v", err)
  1578. }
  1579. _, err = client.JobFromIDLocation(ctx, job.ID(), "US")
  1580. if err == nil {
  1581. t.Error("JobFromIDLocation with US: want error, got nil")
  1582. }
  1583. job2, err := client.JobFromIDLocation(ctx, job.ID(), loc)
  1584. if loc == tokyo && err != nil {
  1585. t.Errorf("loc=tokyo: %v", err)
  1586. }
  1587. if loc == "" && err == nil {
  1588. t.Error("loc empty: got nil, want error")
  1589. }
  1590. if job2 != nil && (job2.ID() != job.ID() || job2.Location() != tokyo) {
  1591. t.Errorf("got id %s loc %s, want id%s loc %s", job2.ID(), job2.Location(), job.ID(), tokyo)
  1592. }
  1593. if err := wait(ctx, job); err != nil {
  1594. t.Fatal(err)
  1595. }
  1596. // Cancel should succeed even if the job is done.
  1597. if err := job.Cancel(ctx); err != nil {
  1598. t.Fatal(err)
  1599. }
  1600. q := client.Query(fmt.Sprintf("SELECT * FROM %s.%s", table.DatasetID, table.TableID))
  1601. q.Location = loc
  1602. iter, err := q.Read(ctx)
  1603. if err != nil {
  1604. t.Fatal(err)
  1605. }
  1606. wantRows := [][]Value{
  1607. {"a", int64(0)},
  1608. {"b", int64(1)},
  1609. {"c", int64(2)},
  1610. }
  1611. checkRead(t, "location", iter, wantRows)
  1612. table2 := tokyoDataset.Table(tableIDs.New())
  1613. copier := table2.CopierFrom(table)
  1614. copier.Location = loc
  1615. if _, err := copier.Run(ctx); err != nil {
  1616. t.Fatal(err)
  1617. }
  1618. bucketName := testutil.ProjID()
  1619. objectName := fmt.Sprintf("bq-test-%s.csv", table.TableID)
  1620. uri := fmt.Sprintf("gs://%s/%s", bucketName, objectName)
  1621. defer storageClient.Bucket(bucketName).Object(objectName).Delete(ctx)
  1622. gr := NewGCSReference(uri)
  1623. gr.DestinationFormat = CSV
  1624. e := table.ExtractorTo(gr)
  1625. e.Location = loc
  1626. if _, err := e.Run(ctx); err != nil {
  1627. t.Fatal(err)
  1628. }
  1629. }
  1630. func TestIntegration_NumericErrors(t *testing.T) {
  1631. // Verify that the service returns an error for a big.Rat that's too large.
  1632. if client == nil {
  1633. t.Skip("Integration tests skipped")
  1634. }
  1635. ctx := context.Background()
  1636. schema := Schema{{Name: "n", Type: NumericFieldType}}
  1637. table := newTable(t, schema)
  1638. defer table.Delete(ctx)
  1639. tooBigRat := &big.Rat{}
  1640. if _, ok := tooBigRat.SetString("1e40"); !ok {
  1641. t.Fatal("big.Rat.SetString failed")
  1642. }
  1643. upl := table.Uploader()
  1644. err := upl.Put(ctx, []*ValuesSaver{{Schema: schema, Row: []Value{tooBigRat}}})
  1645. if err == nil {
  1646. t.Fatal("got nil, want error")
  1647. }
  1648. }
  1649. func TestIntegration_QueryErrors(t *testing.T) {
  1650. // Verify that a bad query returns an appropriate error.
  1651. if client == nil {
  1652. t.Skip("Integration tests skipped")
  1653. }
  1654. ctx := context.Background()
  1655. q := client.Query("blah blah broken")
  1656. _, err := q.Read(ctx)
  1657. const want = "invalidQuery"
  1658. if !strings.Contains(err.Error(), want) {
  1659. t.Fatalf("got %q, want substring %q", err, want)
  1660. }
  1661. }
  1662. // Creates a new, temporary table with a unique name and the given schema.
  1663. func newTable(t *testing.T, s Schema) *Table {
  1664. table := dataset.Table(tableIDs.New())
  1665. err := table.Create(context.Background(), &TableMetadata{
  1666. Schema: s,
  1667. ExpirationTime: testTableExpiration,
  1668. })
  1669. if err != nil {
  1670. t.Fatal(err)
  1671. }
  1672. return table
  1673. }
  1674. func checkRead(t *testing.T, msg string, it *RowIterator, want [][]Value) {
  1675. if msg2, ok := compareRead(it, want, false); !ok {
  1676. t.Errorf("%s: %s", msg, msg2)
  1677. }
  1678. }
  1679. func checkReadAndTotalRows(t *testing.T, msg string, it *RowIterator, want [][]Value) {
  1680. if msg2, ok := compareRead(it, want, true); !ok {
  1681. t.Errorf("%s: %s", msg, msg2)
  1682. }
  1683. }
  1684. func compareRead(it *RowIterator, want [][]Value, compareTotalRows bool) (msg string, ok bool) {
  1685. got, _, totalRows, err := readAll(it)
  1686. if err != nil {
  1687. return err.Error(), false
  1688. }
  1689. if len(got) != len(want) {
  1690. return fmt.Sprintf("got %d rows, want %d", len(got), len(want)), false
  1691. }
  1692. if compareTotalRows && len(got) != int(totalRows) {
  1693. return fmt.Sprintf("got %d rows, but totalRows = %d", len(got), totalRows), false
  1694. }
  1695. sort.Sort(byCol0(got))
  1696. for i, r := range got {
  1697. gotRow := []Value(r)
  1698. wantRow := want[i]
  1699. if !testutil.Equal(gotRow, wantRow) {
  1700. return fmt.Sprintf("#%d: got %#v, want %#v", i, gotRow, wantRow), false
  1701. }
  1702. }
  1703. return "", true
  1704. }
  1705. func readAll(it *RowIterator) ([][]Value, Schema, uint64, error) {
  1706. var (
  1707. rows [][]Value
  1708. schema Schema
  1709. totalRows uint64
  1710. )
  1711. for {
  1712. var vals []Value
  1713. err := it.Next(&vals)
  1714. if err == iterator.Done {
  1715. return rows, schema, totalRows, nil
  1716. }
  1717. if err != nil {
  1718. return nil, nil, 0, err
  1719. }
  1720. rows = append(rows, vals)
  1721. schema = it.Schema
  1722. totalRows = it.TotalRows
  1723. }
  1724. }
  1725. type byCol0 [][]Value
  1726. func (b byCol0) Len() int { return len(b) }
  1727. func (b byCol0) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
  1728. func (b byCol0) Less(i, j int) bool {
  1729. switch a := b[i][0].(type) {
  1730. case string:
  1731. return a < b[j][0].(string)
  1732. case civil.Date:
  1733. return a.Before(b[j][0].(civil.Date))
  1734. default:
  1735. panic("unknown type")
  1736. }
  1737. }
  1738. func hasStatusCode(err error, code int) bool {
  1739. if e, ok := err.(*googleapi.Error); ok && e.Code == code {
  1740. return true
  1741. }
  1742. return false
  1743. }
  1744. // wait polls the job until it is complete or an error is returned.
  1745. func wait(ctx context.Context, job *Job) error {
  1746. status, err := job.Wait(ctx)
  1747. if err != nil {
  1748. return err
  1749. }
  1750. if status.Err() != nil {
  1751. return fmt.Errorf("job status error: %#v", status.Err())
  1752. }
  1753. if status.Statistics == nil {
  1754. return errors.New("nil Statistics")
  1755. }
  1756. if status.Statistics.EndTime.IsZero() {
  1757. return errors.New("EndTime is zero")
  1758. }
  1759. if status.Statistics.Details == nil {
  1760. return errors.New("nil Statistics.Details")
  1761. }
  1762. return nil
  1763. }
  1764. // waitForRow polls the table until it contains a row.
  1765. // TODO(jba): use internal.Retry.
  1766. func waitForRow(ctx context.Context, table *Table) error {
  1767. for {
  1768. it := table.Read(ctx)
  1769. var v []Value
  1770. err := it.Next(&v)
  1771. if err == nil {
  1772. return nil
  1773. }
  1774. if err != iterator.Done {
  1775. return err
  1776. }
  1777. time.Sleep(1 * time.Second)
  1778. }
  1779. }
  1780. func putError(err error) string {
  1781. pme, ok := err.(PutMultiError)
  1782. if !ok {
  1783. return err.Error()
  1784. }
  1785. var msgs []string
  1786. for _, err := range pme {
  1787. msgs = append(msgs, err.Error())
  1788. }
  1789. return strings.Join(msgs, "\n")
  1790. }