You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 

2218 lines
59 KiB

  1. // Copyright 2015 Google LLC
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package bigquery
  15. import (
  16. "context"
  17. "encoding/json"
  18. "errors"
  19. "flag"
  20. "fmt"
  21. "log"
  22. "math/big"
  23. "net/http"
  24. "os"
  25. "sort"
  26. "strings"
  27. "testing"
  28. "time"
  29. "cloud.google.com/go/civil"
  30. "cloud.google.com/go/httpreplay"
  31. "cloud.google.com/go/internal"
  32. "cloud.google.com/go/internal/pretty"
  33. "cloud.google.com/go/internal/testutil"
  34. "cloud.google.com/go/internal/uid"
  35. "cloud.google.com/go/storage"
  36. "github.com/google/go-cmp/cmp"
  37. "github.com/google/go-cmp/cmp/cmpopts"
  38. gax "github.com/googleapis/gax-go/v2"
  39. "google.golang.org/api/googleapi"
  40. "google.golang.org/api/iterator"
  41. "google.golang.org/api/option"
  42. )
  43. const replayFilename = "bigquery.replay"
  44. var record = flag.Bool("record", false, "record RPCs")
  45. var (
  46. client *Client
  47. storageClient *storage.Client
  48. dataset *Dataset
  49. schema = Schema{
  50. {Name: "name", Type: StringFieldType},
  51. {Name: "nums", Type: IntegerFieldType, Repeated: true},
  52. {Name: "rec", Type: RecordFieldType, Schema: Schema{
  53. {Name: "bool", Type: BooleanFieldType},
  54. }},
  55. }
  56. testTableExpiration time.Time
  57. datasetIDs, tableIDs *uid.Space
  58. )
  59. // Note: integration tests cannot be run in parallel, because TestIntegration_Location
  60. // modifies the client.
  61. func TestMain(m *testing.M) {
  62. cleanup := initIntegrationTest()
  63. r := m.Run()
  64. cleanup()
  65. os.Exit(r)
  66. }
  67. func getClient(t *testing.T) *Client {
  68. if client == nil {
  69. t.Skip("Integration tests skipped")
  70. }
  71. return client
  72. }
  73. // If integration tests will be run, create a unique dataset for them.
  74. // Return a cleanup function.
  75. func initIntegrationTest() func() {
  76. ctx := context.Background()
  77. flag.Parse() // needed for testing.Short()
  78. projID := testutil.ProjID()
  79. switch {
  80. case testing.Short() && *record:
  81. log.Fatal("cannot combine -short and -record")
  82. return func() {}
  83. case testing.Short() && httpreplay.Supported() && testutil.CanReplay(replayFilename) && projID != "":
  84. // go test -short with a replay file will replay the integration tests if the
  85. // environment variables are set.
  86. log.Printf("replaying from %s", replayFilename)
  87. httpreplay.DebugHeaders()
  88. replayer, err := httpreplay.NewReplayer(replayFilename)
  89. if err != nil {
  90. log.Fatal(err)
  91. }
  92. var t time.Time
  93. if err := json.Unmarshal(replayer.Initial(), &t); err != nil {
  94. log.Fatal(err)
  95. }
  96. hc, err := replayer.Client(ctx) // no creds needed
  97. if err != nil {
  98. log.Fatal(err)
  99. }
  100. client, err = NewClient(ctx, projID, option.WithHTTPClient(hc))
  101. if err != nil {
  102. log.Fatal(err)
  103. }
  104. storageClient, err = storage.NewClient(ctx, option.WithHTTPClient(hc))
  105. if err != nil {
  106. log.Fatal(err)
  107. }
  108. cleanup := initTestState(client, t)
  109. return func() {
  110. cleanup()
  111. _ = replayer.Close() // No actionable error returned.
  112. }
  113. case testing.Short():
  114. // go test -short without a replay file skips the integration tests.
  115. if testutil.CanReplay(replayFilename) && projID != "" {
  116. log.Print("replay not supported for Go versions before 1.8")
  117. }
  118. client = nil
  119. storageClient = nil
  120. return func() {}
  121. default: // Run integration tests against a real backend.
  122. ts := testutil.TokenSource(ctx, Scope)
  123. if ts == nil {
  124. log.Println("Integration tests skipped. See CONTRIBUTING.md for details")
  125. return func() {}
  126. }
  127. bqOpt := option.WithTokenSource(ts)
  128. sOpt := option.WithTokenSource(testutil.TokenSource(ctx, storage.ScopeFullControl))
  129. cleanup := func() {}
  130. now := time.Now().UTC()
  131. if *record {
  132. if !httpreplay.Supported() {
  133. log.Print("record not supported for Go versions before 1.8")
  134. } else {
  135. nowBytes, err := json.Marshal(now)
  136. if err != nil {
  137. log.Fatal(err)
  138. }
  139. recorder, err := httpreplay.NewRecorder(replayFilename, nowBytes)
  140. if err != nil {
  141. log.Fatalf("could not record: %v", err)
  142. }
  143. log.Printf("recording to %s", replayFilename)
  144. hc, err := recorder.Client(ctx, bqOpt)
  145. if err != nil {
  146. log.Fatal(err)
  147. }
  148. bqOpt = option.WithHTTPClient(hc)
  149. hc, err = recorder.Client(ctx, sOpt)
  150. if err != nil {
  151. log.Fatal(err)
  152. }
  153. sOpt = option.WithHTTPClient(hc)
  154. cleanup = func() {
  155. if err := recorder.Close(); err != nil {
  156. log.Printf("saving recording: %v", err)
  157. }
  158. }
  159. }
  160. }
  161. var err error
  162. client, err = NewClient(ctx, projID, bqOpt)
  163. if err != nil {
  164. log.Fatalf("NewClient: %v", err)
  165. }
  166. storageClient, err = storage.NewClient(ctx, sOpt)
  167. if err != nil {
  168. log.Fatalf("storage.NewClient: %v", err)
  169. }
  170. c := initTestState(client, now)
  171. return func() { c(); cleanup() }
  172. }
  173. }
  174. func initTestState(client *Client, t time.Time) func() {
  175. // BigQuery does not accept hyphens in dataset or table IDs, so we create IDs
  176. // with underscores.
  177. ctx := context.Background()
  178. opts := &uid.Options{Sep: '_', Time: t}
  179. datasetIDs = uid.NewSpace("dataset", opts)
  180. tableIDs = uid.NewSpace("table", opts)
  181. testTableExpiration = t.Add(10 * time.Minute).Round(time.Second)
  182. // For replayability, seed the random source with t.
  183. Seed(t.UnixNano())
  184. dataset = client.Dataset(datasetIDs.New())
  185. if err := dataset.Create(ctx, nil); err != nil {
  186. log.Fatalf("creating dataset %s: %v", dataset.DatasetID, err)
  187. }
  188. return func() {
  189. if err := dataset.DeleteWithContents(ctx); err != nil {
  190. log.Printf("could not delete %s", dataset.DatasetID)
  191. }
  192. }
  193. }
  194. func TestIntegration_TableCreate(t *testing.T) {
  195. // Check that creating a record field with an empty schema is an error.
  196. if client == nil {
  197. t.Skip("Integration tests skipped")
  198. }
  199. table := dataset.Table("t_bad")
  200. schema := Schema{
  201. {Name: "rec", Type: RecordFieldType, Schema: Schema{}},
  202. }
  203. err := table.Create(context.Background(), &TableMetadata{
  204. Schema: schema,
  205. ExpirationTime: testTableExpiration.Add(5 * time.Minute),
  206. })
  207. if err == nil {
  208. t.Fatal("want error, got nil")
  209. }
  210. if !hasStatusCode(err, http.StatusBadRequest) {
  211. t.Fatalf("want a 400 error, got %v", err)
  212. }
  213. }
  214. func TestIntegration_TableCreateView(t *testing.T) {
  215. if client == nil {
  216. t.Skip("Integration tests skipped")
  217. }
  218. ctx := context.Background()
  219. table := newTable(t, schema)
  220. defer table.Delete(ctx)
  221. // Test that standard SQL views work.
  222. view := dataset.Table("t_view_standardsql")
  223. query := fmt.Sprintf("SELECT APPROX_COUNT_DISTINCT(name) FROM `%s.%s.%s`",
  224. dataset.ProjectID, dataset.DatasetID, table.TableID)
  225. err := view.Create(context.Background(), &TableMetadata{
  226. ViewQuery: query,
  227. UseStandardSQL: true,
  228. })
  229. if err != nil {
  230. t.Fatalf("table.create: Did not expect an error, got: %v", err)
  231. }
  232. if err := view.Delete(ctx); err != nil {
  233. t.Fatal(err)
  234. }
  235. }
  236. func TestIntegration_TableMetadata(t *testing.T) {
  237. t.Skip("Internal bug 128670231")
  238. if client == nil {
  239. t.Skip("Integration tests skipped")
  240. }
  241. ctx := context.Background()
  242. table := newTable(t, schema)
  243. defer table.Delete(ctx)
  244. // Check table metadata.
  245. md, err := table.Metadata(ctx)
  246. if err != nil {
  247. t.Fatal(err)
  248. }
  249. // TODO(jba): check md more thorougly.
  250. if got, want := md.FullID, fmt.Sprintf("%s:%s.%s", dataset.ProjectID, dataset.DatasetID, table.TableID); got != want {
  251. t.Errorf("metadata.FullID: got %q, want %q", got, want)
  252. }
  253. if got, want := md.Type, RegularTable; got != want {
  254. t.Errorf("metadata.Type: got %v, want %v", got, want)
  255. }
  256. if got, want := md.ExpirationTime, testTableExpiration; !got.Equal(want) {
  257. t.Errorf("metadata.Type: got %v, want %v", got, want)
  258. }
  259. // Check that timePartitioning is nil by default
  260. if md.TimePartitioning != nil {
  261. t.Errorf("metadata.TimePartitioning: got %v, want %v", md.TimePartitioning, nil)
  262. }
  263. // Create tables that have time partitioning
  264. partitionCases := []struct {
  265. timePartitioning TimePartitioning
  266. wantExpiration time.Duration
  267. wantField string
  268. wantPruneFilter bool
  269. }{
  270. {TimePartitioning{}, time.Duration(0), "", false},
  271. {TimePartitioning{Expiration: time.Second}, time.Second, "", false},
  272. {TimePartitioning{RequirePartitionFilter: true}, time.Duration(0), "", true},
  273. {
  274. TimePartitioning{
  275. Expiration: time.Second,
  276. Field: "date",
  277. RequirePartitionFilter: true,
  278. }, time.Second, "date", true},
  279. }
  280. schema2 := Schema{
  281. {Name: "name", Type: StringFieldType},
  282. {Name: "date", Type: DateFieldType},
  283. }
  284. clustering := &Clustering{
  285. Fields: []string{"name"},
  286. }
  287. // Currently, clustering depends on partitioning. Interleave testing of the two features.
  288. for i, c := range partitionCases {
  289. table := dataset.Table(fmt.Sprintf("t_metadata_partition_nocluster_%v", i))
  290. clusterTable := dataset.Table(fmt.Sprintf("t_metadata_partition_cluster_%v", i))
  291. // Create unclustered, partitioned variant and get metadata.
  292. err = table.Create(context.Background(), &TableMetadata{
  293. Schema: schema2,
  294. TimePartitioning: &c.timePartitioning,
  295. ExpirationTime: testTableExpiration,
  296. })
  297. if err != nil {
  298. t.Fatal(err)
  299. }
  300. defer table.Delete(ctx)
  301. md, err := table.Metadata(ctx)
  302. if err != nil {
  303. t.Fatal(err)
  304. }
  305. // Created clustered table and get metadata.
  306. err = clusterTable.Create(context.Background(), &TableMetadata{
  307. Schema: schema2,
  308. TimePartitioning: &c.timePartitioning,
  309. ExpirationTime: testTableExpiration,
  310. Clustering: clustering,
  311. })
  312. if err != nil {
  313. t.Fatal(err)
  314. }
  315. clusterMD, err := clusterTable.Metadata(ctx)
  316. if err != nil {
  317. t.Fatal(err)
  318. }
  319. for _, v := range []*TableMetadata{md, clusterMD} {
  320. got := v.TimePartitioning
  321. want := &TimePartitioning{
  322. Expiration: c.wantExpiration,
  323. Field: c.wantField,
  324. RequirePartitionFilter: c.wantPruneFilter,
  325. }
  326. if !testutil.Equal(got, want) {
  327. t.Errorf("metadata.TimePartitioning: got %v, want %v", got, want)
  328. }
  329. // check that RequirePartitionFilter can be inverted.
  330. mdUpdate := TableMetadataToUpdate{
  331. TimePartitioning: &TimePartitioning{
  332. Expiration: v.TimePartitioning.Expiration,
  333. RequirePartitionFilter: !want.RequirePartitionFilter,
  334. },
  335. }
  336. newmd, err := table.Update(ctx, mdUpdate, "")
  337. if err != nil {
  338. t.Errorf("failed to invert RequirePartitionFilter on %s: %v", table.FullyQualifiedName(), err)
  339. }
  340. if newmd.TimePartitioning.RequirePartitionFilter == want.RequirePartitionFilter {
  341. t.Errorf("inverting RequirePartitionFilter on %s failed, want %t got %t", table.FullyQualifiedName(), !want.RequirePartitionFilter, newmd.TimePartitioning.RequirePartitionFilter)
  342. }
  343. }
  344. if md.Clustering != nil {
  345. t.Errorf("metadata.Clustering was not nil on unclustered table %s", table.TableID)
  346. }
  347. got := clusterMD.Clustering
  348. want := clustering
  349. if clusterMD.Clustering != clustering {
  350. if !testutil.Equal(got, want) {
  351. t.Errorf("metadata.Clustering: got %v, want %v", got, want)
  352. }
  353. }
  354. }
  355. }
  356. func TestIntegration_RemoveTimePartitioning(t *testing.T) {
  357. if client == nil {
  358. t.Skip("Integration tests skipped")
  359. }
  360. ctx := context.Background()
  361. table := dataset.Table(tableIDs.New())
  362. want := 24 * time.Hour
  363. err := table.Create(ctx, &TableMetadata{
  364. ExpirationTime: testTableExpiration,
  365. TimePartitioning: &TimePartitioning{
  366. Expiration: want,
  367. },
  368. })
  369. if err != nil {
  370. t.Fatal(err)
  371. }
  372. defer table.Delete(ctx)
  373. md, err := table.Metadata(ctx)
  374. if err != nil {
  375. t.Fatal(err)
  376. }
  377. if got := md.TimePartitioning.Expiration; got != want {
  378. t.Fatalf("TimeParitioning expiration want = %v, got = %v", want, got)
  379. }
  380. // Remove time partitioning expiration
  381. md, err = table.Update(context.Background(), TableMetadataToUpdate{
  382. TimePartitioning: &TimePartitioning{Expiration: 0},
  383. }, md.ETag)
  384. if err != nil {
  385. t.Fatal(err)
  386. }
  387. want = time.Duration(0)
  388. if got := md.TimePartitioning.Expiration; got != want {
  389. t.Fatalf("TimeParitioning expiration want = %v, got = %v", want, got)
  390. }
  391. }
  392. func TestIntegration_DatasetCreate(t *testing.T) {
  393. if client == nil {
  394. t.Skip("Integration tests skipped")
  395. }
  396. ctx := context.Background()
  397. ds := client.Dataset(datasetIDs.New())
  398. wmd := &DatasetMetadata{Name: "name", Location: "EU"}
  399. err := ds.Create(ctx, wmd)
  400. if err != nil {
  401. t.Fatal(err)
  402. }
  403. gmd, err := ds.Metadata(ctx)
  404. if err != nil {
  405. t.Fatal(err)
  406. }
  407. if got, want := gmd.Name, wmd.Name; got != want {
  408. t.Errorf("name: got %q, want %q", got, want)
  409. }
  410. if got, want := gmd.Location, wmd.Location; got != want {
  411. t.Errorf("location: got %q, want %q", got, want)
  412. }
  413. if err := ds.Delete(ctx); err != nil {
  414. t.Fatalf("deleting dataset %v: %v", ds, err)
  415. }
  416. }
  417. func TestIntegration_DatasetMetadata(t *testing.T) {
  418. if client == nil {
  419. t.Skip("Integration tests skipped")
  420. }
  421. ctx := context.Background()
  422. md, err := dataset.Metadata(ctx)
  423. if err != nil {
  424. t.Fatal(err)
  425. }
  426. if got, want := md.FullID, fmt.Sprintf("%s:%s", dataset.ProjectID, dataset.DatasetID); got != want {
  427. t.Errorf("FullID: got %q, want %q", got, want)
  428. }
  429. jan2016 := time.Date(2016, 1, 1, 0, 0, 0, 0, time.UTC)
  430. if md.CreationTime.Before(jan2016) {
  431. t.Errorf("CreationTime: got %s, want > 2016-1-1", md.CreationTime)
  432. }
  433. if md.LastModifiedTime.Before(jan2016) {
  434. t.Errorf("LastModifiedTime: got %s, want > 2016-1-1", md.LastModifiedTime)
  435. }
  436. // Verify that we get a NotFound for a nonexistent dataset.
  437. _, err = client.Dataset("does_not_exist").Metadata(ctx)
  438. if err == nil || !hasStatusCode(err, http.StatusNotFound) {
  439. t.Errorf("got %v, want NotFound error", err)
  440. }
  441. }
  442. func TestIntegration_DatasetDelete(t *testing.T) {
  443. if client == nil {
  444. t.Skip("Integration tests skipped")
  445. }
  446. ctx := context.Background()
  447. ds := client.Dataset(datasetIDs.New())
  448. if err := ds.Create(ctx, nil); err != nil {
  449. t.Fatalf("creating dataset %s: %v", ds.DatasetID, err)
  450. }
  451. if err := ds.Delete(ctx); err != nil {
  452. t.Fatalf("deleting dataset %s: %v", ds.DatasetID, err)
  453. }
  454. }
  455. func TestIntegration_DatasetDeleteWithContents(t *testing.T) {
  456. if client == nil {
  457. t.Skip("Integration tests skipped")
  458. }
  459. ctx := context.Background()
  460. ds := client.Dataset(datasetIDs.New())
  461. if err := ds.Create(ctx, nil); err != nil {
  462. t.Fatalf("creating dataset %s: %v", ds.DatasetID, err)
  463. }
  464. table := ds.Table(tableIDs.New())
  465. if err := table.Create(ctx, nil); err != nil {
  466. t.Fatalf("creating table %s in dataset %s: %v", table.TableID, table.DatasetID, err)
  467. }
  468. // We expect failure here
  469. if err := ds.Delete(ctx); err == nil {
  470. t.Fatalf("non-recursive delete of dataset %s succeeded unexpectedly.", ds.DatasetID)
  471. }
  472. if err := ds.DeleteWithContents(ctx); err != nil {
  473. t.Fatalf("deleting recursively dataset %s: %v", ds.DatasetID, err)
  474. }
  475. }
  476. func TestIntegration_DatasetUpdateETags(t *testing.T) {
  477. if client == nil {
  478. t.Skip("Integration tests skipped")
  479. }
  480. check := func(md *DatasetMetadata, wantDesc, wantName string) {
  481. if md.Description != wantDesc {
  482. t.Errorf("description: got %q, want %q", md.Description, wantDesc)
  483. }
  484. if md.Name != wantName {
  485. t.Errorf("name: got %q, want %q", md.Name, wantName)
  486. }
  487. }
  488. ctx := context.Background()
  489. md, err := dataset.Metadata(ctx)
  490. if err != nil {
  491. t.Fatal(err)
  492. }
  493. if md.ETag == "" {
  494. t.Fatal("empty ETag")
  495. }
  496. // Write without ETag succeeds.
  497. desc := md.Description + "d2"
  498. name := md.Name + "n2"
  499. md2, err := dataset.Update(ctx, DatasetMetadataToUpdate{Description: desc, Name: name}, "")
  500. if err != nil {
  501. t.Fatal(err)
  502. }
  503. check(md2, desc, name)
  504. // Write with original ETag fails because of intervening write.
  505. _, err = dataset.Update(ctx, DatasetMetadataToUpdate{Description: "d", Name: "n"}, md.ETag)
  506. if err == nil {
  507. t.Fatal("got nil, want error")
  508. }
  509. // Write with most recent ETag succeeds.
  510. md3, err := dataset.Update(ctx, DatasetMetadataToUpdate{Description: "", Name: ""}, md2.ETag)
  511. if err != nil {
  512. t.Fatal(err)
  513. }
  514. check(md3, "", "")
  515. }
  516. func TestIntegration_DatasetUpdateDefaultExpiration(t *testing.T) {
  517. if client == nil {
  518. t.Skip("Integration tests skipped")
  519. }
  520. ctx := context.Background()
  521. _, err := dataset.Metadata(ctx)
  522. if err != nil {
  523. t.Fatal(err)
  524. }
  525. // Set the default expiration time.
  526. md, err := dataset.Update(ctx, DatasetMetadataToUpdate{DefaultTableExpiration: time.Hour}, "")
  527. if err != nil {
  528. t.Fatal(err)
  529. }
  530. if md.DefaultTableExpiration != time.Hour {
  531. t.Fatalf("got %s, want 1h", md.DefaultTableExpiration)
  532. }
  533. // Omitting DefaultTableExpiration doesn't change it.
  534. md, err = dataset.Update(ctx, DatasetMetadataToUpdate{Name: "xyz"}, "")
  535. if err != nil {
  536. t.Fatal(err)
  537. }
  538. if md.DefaultTableExpiration != time.Hour {
  539. t.Fatalf("got %s, want 1h", md.DefaultTableExpiration)
  540. }
  541. // Setting it to 0 deletes it (which looks like a 0 duration).
  542. md, err = dataset.Update(ctx, DatasetMetadataToUpdate{DefaultTableExpiration: time.Duration(0)}, "")
  543. if err != nil {
  544. t.Fatal(err)
  545. }
  546. if md.DefaultTableExpiration != 0 {
  547. t.Fatalf("got %s, want 0", md.DefaultTableExpiration)
  548. }
  549. }
  550. func TestIntegration_DatasetUpdateAccess(t *testing.T) {
  551. if client == nil {
  552. t.Skip("Integration tests skipped")
  553. }
  554. ctx := context.Background()
  555. md, err := dataset.Metadata(ctx)
  556. if err != nil {
  557. t.Fatal(err)
  558. }
  559. origAccess := append([]*AccessEntry(nil), md.Access...)
  560. newEntry := &AccessEntry{
  561. Role: ReaderRole,
  562. Entity: "Joe@example.com",
  563. EntityType: UserEmailEntity,
  564. }
  565. newAccess := append(md.Access, newEntry)
  566. dm := DatasetMetadataToUpdate{Access: newAccess}
  567. md, err = dataset.Update(ctx, dm, md.ETag)
  568. if err != nil {
  569. t.Fatal(err)
  570. }
  571. defer func() {
  572. _, err := dataset.Update(ctx, DatasetMetadataToUpdate{Access: origAccess}, md.ETag)
  573. if err != nil {
  574. t.Log("could not restore dataset access list")
  575. }
  576. }()
  577. if diff := testutil.Diff(md.Access, newAccess); diff != "" {
  578. t.Fatalf("got=-, want=+:\n%s", diff)
  579. }
  580. }
  581. func TestIntegration_DatasetUpdateLabels(t *testing.T) {
  582. if client == nil {
  583. t.Skip("Integration tests skipped")
  584. }
  585. ctx := context.Background()
  586. _, err := dataset.Metadata(ctx)
  587. if err != nil {
  588. t.Fatal(err)
  589. }
  590. var dm DatasetMetadataToUpdate
  591. dm.SetLabel("label", "value")
  592. md, err := dataset.Update(ctx, dm, "")
  593. if err != nil {
  594. t.Fatal(err)
  595. }
  596. if got, want := md.Labels["label"], "value"; got != want {
  597. t.Errorf("got %q, want %q", got, want)
  598. }
  599. dm = DatasetMetadataToUpdate{}
  600. dm.DeleteLabel("label")
  601. md, err = dataset.Update(ctx, dm, "")
  602. if err != nil {
  603. t.Fatal(err)
  604. }
  605. if _, ok := md.Labels["label"]; ok {
  606. t.Error("label still present after deletion")
  607. }
  608. }
  609. func TestIntegration_TableUpdateLabels(t *testing.T) {
  610. if client == nil {
  611. t.Skip("Integration tests skipped")
  612. }
  613. ctx := context.Background()
  614. table := newTable(t, schema)
  615. defer table.Delete(ctx)
  616. var tm TableMetadataToUpdate
  617. tm.SetLabel("label", "value")
  618. md, err := table.Update(ctx, tm, "")
  619. if err != nil {
  620. t.Fatal(err)
  621. }
  622. if got, want := md.Labels["label"], "value"; got != want {
  623. t.Errorf("got %q, want %q", got, want)
  624. }
  625. tm = TableMetadataToUpdate{}
  626. tm.DeleteLabel("label")
  627. md, err = table.Update(ctx, tm, "")
  628. if err != nil {
  629. t.Fatal(err)
  630. }
  631. if _, ok := md.Labels["label"]; ok {
  632. t.Error("label still present after deletion")
  633. }
  634. }
  635. func TestIntegration_Tables(t *testing.T) {
  636. if client == nil {
  637. t.Skip("Integration tests skipped")
  638. }
  639. ctx := context.Background()
  640. table := newTable(t, schema)
  641. defer table.Delete(ctx)
  642. wantName := table.FullyQualifiedName()
  643. // This test is flaky due to eventual consistency.
  644. ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
  645. defer cancel()
  646. err := internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) {
  647. // Iterate over tables in the dataset.
  648. it := dataset.Tables(ctx)
  649. var tableNames []string
  650. for {
  651. tbl, err := it.Next()
  652. if err == iterator.Done {
  653. break
  654. }
  655. if err != nil {
  656. return false, err
  657. }
  658. tableNames = append(tableNames, tbl.FullyQualifiedName())
  659. }
  660. // Other tests may be running with this dataset, so there might be more
  661. // than just our table in the list. So don't try for an exact match; just
  662. // make sure that our table is there somewhere.
  663. for _, tn := range tableNames {
  664. if tn == wantName {
  665. return true, nil
  666. }
  667. }
  668. return false, fmt.Errorf("got %v\nwant %s in the list", tableNames, wantName)
  669. })
  670. if err != nil {
  671. t.Fatal(err)
  672. }
  673. }
  674. func TestIntegration_InsertAndRead(t *testing.T) {
  675. if client == nil {
  676. t.Skip("Integration tests skipped")
  677. }
  678. ctx := context.Background()
  679. table := newTable(t, schema)
  680. defer table.Delete(ctx)
  681. // Populate the table.
  682. ins := table.Inserter()
  683. var (
  684. wantRows [][]Value
  685. saverRows []*ValuesSaver
  686. )
  687. for i, name := range []string{"a", "b", "c"} {
  688. row := []Value{name, []Value{int64(i)}, []Value{true}}
  689. wantRows = append(wantRows, row)
  690. saverRows = append(saverRows, &ValuesSaver{
  691. Schema: schema,
  692. InsertID: name,
  693. Row: row,
  694. })
  695. }
  696. if err := ins.Put(ctx, saverRows); err != nil {
  697. t.Fatal(putError(err))
  698. }
  699. // Wait until the data has been uploaded. This can take a few seconds, according
  700. // to https://cloud.google.com/bigquery/streaming-data-into-bigquery.
  701. if err := waitForRow(ctx, table); err != nil {
  702. t.Fatal(err)
  703. }
  704. // Read the table.
  705. checkRead(t, "upload", table.Read(ctx), wantRows)
  706. // Query the table.
  707. q := client.Query(fmt.Sprintf("select name, nums, rec from %s", table.TableID))
  708. q.DefaultProjectID = dataset.ProjectID
  709. q.DefaultDatasetID = dataset.DatasetID
  710. rit, err := q.Read(ctx)
  711. if err != nil {
  712. t.Fatal(err)
  713. }
  714. checkRead(t, "query", rit, wantRows)
  715. // Query the long way.
  716. job1, err := q.Run(ctx)
  717. if err != nil {
  718. t.Fatal(err)
  719. }
  720. if job1.LastStatus() == nil {
  721. t.Error("no LastStatus")
  722. }
  723. job2, err := client.JobFromID(ctx, job1.ID())
  724. if err != nil {
  725. t.Fatal(err)
  726. }
  727. if job2.LastStatus() == nil {
  728. t.Error("no LastStatus")
  729. }
  730. rit, err = job2.Read(ctx)
  731. if err != nil {
  732. t.Fatal(err)
  733. }
  734. checkRead(t, "job.Read", rit, wantRows)
  735. // Get statistics.
  736. jobStatus, err := job2.Status(ctx)
  737. if err != nil {
  738. t.Fatal(err)
  739. }
  740. if jobStatus.Statistics == nil {
  741. t.Fatal("jobStatus missing statistics")
  742. }
  743. if _, ok := jobStatus.Statistics.Details.(*QueryStatistics); !ok {
  744. t.Errorf("expected QueryStatistics, got %T", jobStatus.Statistics.Details)
  745. }
  746. // Test reading directly into a []Value.
  747. valueLists, schema, _, err := readAll(table.Read(ctx))
  748. if err != nil {
  749. t.Fatal(err)
  750. }
  751. it := table.Read(ctx)
  752. for i, vl := range valueLists {
  753. var got []Value
  754. if err := it.Next(&got); err != nil {
  755. t.Fatal(err)
  756. }
  757. if !testutil.Equal(it.Schema, schema) {
  758. t.Fatalf("got schema %v, want %v", it.Schema, schema)
  759. }
  760. want := []Value(vl)
  761. if !testutil.Equal(got, want) {
  762. t.Errorf("%d: got %v, want %v", i, got, want)
  763. }
  764. }
  765. // Test reading into a map.
  766. it = table.Read(ctx)
  767. for _, vl := range valueLists {
  768. var vm map[string]Value
  769. if err := it.Next(&vm); err != nil {
  770. t.Fatal(err)
  771. }
  772. if got, want := len(vm), len(vl); got != want {
  773. t.Fatalf("valueMap len: got %d, want %d", got, want)
  774. }
  775. // With maps, structs become nested maps.
  776. vl[2] = map[string]Value{"bool": vl[2].([]Value)[0]}
  777. for i, v := range vl {
  778. if got, want := vm[schema[i].Name], v; !testutil.Equal(got, want) {
  779. t.Errorf("%d, name=%s: got %#v, want %#v",
  780. i, schema[i].Name, got, want)
  781. }
  782. }
  783. }
  784. }
  785. type SubSubTestStruct struct {
  786. Integer int64
  787. }
  788. type SubTestStruct struct {
  789. String string
  790. Record SubSubTestStruct
  791. RecordArray []SubSubTestStruct
  792. }
  793. type TestStruct struct {
  794. Name string
  795. Bytes []byte
  796. Integer int64
  797. Float float64
  798. Boolean bool
  799. Timestamp time.Time
  800. Date civil.Date
  801. Time civil.Time
  802. DateTime civil.DateTime
  803. Numeric *big.Rat
  804. Geography string
  805. StringArray []string
  806. IntegerArray []int64
  807. FloatArray []float64
  808. BooleanArray []bool
  809. TimestampArray []time.Time
  810. DateArray []civil.Date
  811. TimeArray []civil.Time
  812. DateTimeArray []civil.DateTime
  813. NumericArray []*big.Rat
  814. GeographyArray []string
  815. Record SubTestStruct
  816. RecordArray []SubTestStruct
  817. }
  818. // Round times to the microsecond for comparison purposes.
  819. var roundToMicros = cmp.Transformer("RoundToMicros",
  820. func(t time.Time) time.Time { return t.Round(time.Microsecond) })
  821. func TestIntegration_InsertAndReadStructs(t *testing.T) {
  822. if client == nil {
  823. t.Skip("Integration tests skipped")
  824. }
  825. schema, err := InferSchema(TestStruct{})
  826. if err != nil {
  827. t.Fatal(err)
  828. }
  829. ctx := context.Background()
  830. table := newTable(t, schema)
  831. defer table.Delete(ctx)
  832. d := civil.Date{Year: 2016, Month: 3, Day: 20}
  833. tm := civil.Time{Hour: 15, Minute: 4, Second: 5, Nanosecond: 6000}
  834. ts := time.Date(2016, 3, 20, 15, 4, 5, 6000, time.UTC)
  835. dtm := civil.DateTime{Date: d, Time: tm}
  836. d2 := civil.Date{Year: 1994, Month: 5, Day: 15}
  837. tm2 := civil.Time{Hour: 1, Minute: 2, Second: 4, Nanosecond: 0}
  838. ts2 := time.Date(1994, 5, 15, 1, 2, 4, 0, time.UTC)
  839. dtm2 := civil.DateTime{Date: d2, Time: tm2}
  840. g := "POINT(-122.350220 47.649154)"
  841. g2 := "POINT(-122.0836791 37.421827)"
  842. // Populate the table.
  843. ins := table.Inserter()
  844. want := []*TestStruct{
  845. {
  846. "a",
  847. []byte("byte"),
  848. 42,
  849. 3.14,
  850. true,
  851. ts,
  852. d,
  853. tm,
  854. dtm,
  855. big.NewRat(57, 100),
  856. g,
  857. []string{"a", "b"},
  858. []int64{1, 2},
  859. []float64{1, 1.41},
  860. []bool{true, false},
  861. []time.Time{ts, ts2},
  862. []civil.Date{d, d2},
  863. []civil.Time{tm, tm2},
  864. []civil.DateTime{dtm, dtm2},
  865. []*big.Rat{big.NewRat(1, 2), big.NewRat(3, 5)},
  866. []string{g, g2},
  867. SubTestStruct{
  868. "string",
  869. SubSubTestStruct{24},
  870. []SubSubTestStruct{{1}, {2}},
  871. },
  872. []SubTestStruct{
  873. {String: "empty"},
  874. {
  875. "full",
  876. SubSubTestStruct{1},
  877. []SubSubTestStruct{{1}, {2}},
  878. },
  879. },
  880. },
  881. {
  882. Name: "b",
  883. Bytes: []byte("byte2"),
  884. Integer: 24,
  885. Float: 4.13,
  886. Boolean: false,
  887. Timestamp: ts,
  888. Date: d,
  889. Time: tm,
  890. DateTime: dtm,
  891. Numeric: big.NewRat(4499, 10000),
  892. },
  893. }
  894. var savers []*StructSaver
  895. for _, s := range want {
  896. savers = append(savers, &StructSaver{Schema: schema, Struct: s})
  897. }
  898. if err := ins.Put(ctx, savers); err != nil {
  899. t.Fatal(putError(err))
  900. }
  901. // Wait until the data has been uploaded. This can take a few seconds, according
  902. // to https://cloud.google.com/bigquery/streaming-data-into-bigquery.
  903. if err := waitForRow(ctx, table); err != nil {
  904. t.Fatal(err)
  905. }
  906. // Test iteration with structs.
  907. it := table.Read(ctx)
  908. var got []*TestStruct
  909. for {
  910. var g TestStruct
  911. err := it.Next(&g)
  912. if err == iterator.Done {
  913. break
  914. }
  915. if err != nil {
  916. t.Fatal(err)
  917. }
  918. got = append(got, &g)
  919. }
  920. sort.Sort(byName(got))
  921. // BigQuery does not elide nils. It reports an error for nil fields.
  922. for i, g := range got {
  923. if i >= len(want) {
  924. t.Errorf("%d: got %v, past end of want", i, pretty.Value(g))
  925. } else if diff := testutil.Diff(g, want[i], roundToMicros); diff != "" {
  926. t.Errorf("%d: got=-, want=+:\n%s", i, diff)
  927. }
  928. }
  929. }
  930. type byName []*TestStruct
  931. func (b byName) Len() int { return len(b) }
  932. func (b byName) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
  933. func (b byName) Less(i, j int) bool { return b[i].Name < b[j].Name }
  934. func TestIntegration_InsertAndReadNullable(t *testing.T) {
  935. if client == nil {
  936. t.Skip("Integration tests skipped")
  937. }
  938. ctm := civil.Time{Hour: 15, Minute: 4, Second: 5, Nanosecond: 6000}
  939. cdt := civil.DateTime{Date: testDate, Time: ctm}
  940. rat := big.NewRat(33, 100)
  941. geo := "POINT(-122.198939 47.669865)"
  942. // Nil fields in the struct.
  943. testInsertAndReadNullable(t, testStructNullable{}, make([]Value, len(testStructNullableSchema)))
  944. // Explicitly invalidate the Null* types within the struct.
  945. testInsertAndReadNullable(t, testStructNullable{
  946. String: NullString{Valid: false},
  947. Integer: NullInt64{Valid: false},
  948. Float: NullFloat64{Valid: false},
  949. Boolean: NullBool{Valid: false},
  950. Timestamp: NullTimestamp{Valid: false},
  951. Date: NullDate{Valid: false},
  952. Time: NullTime{Valid: false},
  953. DateTime: NullDateTime{Valid: false},
  954. Geography: NullGeography{Valid: false},
  955. },
  956. make([]Value, len(testStructNullableSchema)))
  957. // Populate the struct with values.
  958. testInsertAndReadNullable(t, testStructNullable{
  959. String: NullString{"x", true},
  960. Bytes: []byte{1, 2, 3},
  961. Integer: NullInt64{1, true},
  962. Float: NullFloat64{2.3, true},
  963. Boolean: NullBool{true, true},
  964. Timestamp: NullTimestamp{testTimestamp, true},
  965. Date: NullDate{testDate, true},
  966. Time: NullTime{ctm, true},
  967. DateTime: NullDateTime{cdt, true},
  968. Numeric: rat,
  969. Geography: NullGeography{geo, true},
  970. Record: &subNullable{X: NullInt64{4, true}},
  971. },
  972. []Value{"x", []byte{1, 2, 3}, int64(1), 2.3, true, testTimestamp, testDate, ctm, cdt, rat, geo, []Value{int64(4)}})
  973. }
  974. func testInsertAndReadNullable(t *testing.T, ts testStructNullable, wantRow []Value) {
  975. ctx := context.Background()
  976. table := newTable(t, testStructNullableSchema)
  977. defer table.Delete(ctx)
  978. // Populate the table.
  979. ins := table.Inserter()
  980. if err := ins.Put(ctx, []*StructSaver{{Schema: testStructNullableSchema, Struct: ts}}); err != nil {
  981. t.Fatal(putError(err))
  982. }
  983. // Wait until the data has been uploaded. This can take a few seconds, according
  984. // to https://cloud.google.com/bigquery/streaming-data-into-bigquery.
  985. if err := waitForRow(ctx, table); err != nil {
  986. t.Fatal(err)
  987. }
  988. // Read into a []Value.
  989. iter := table.Read(ctx)
  990. gotRows, _, _, err := readAll(iter)
  991. if err != nil {
  992. t.Fatal(err)
  993. }
  994. if len(gotRows) != 1 {
  995. t.Fatalf("got %d rows, want 1", len(gotRows))
  996. }
  997. if diff := testutil.Diff(gotRows[0], wantRow, roundToMicros); diff != "" {
  998. t.Error(diff)
  999. }
  1000. // Read into a struct.
  1001. want := ts
  1002. var sn testStructNullable
  1003. it := table.Read(ctx)
  1004. if err := it.Next(&sn); err != nil {
  1005. t.Fatal(err)
  1006. }
  1007. if diff := testutil.Diff(sn, want, roundToMicros); diff != "" {
  1008. t.Error(diff)
  1009. }
  1010. }
  1011. func TestIntegration_TableUpdate(t *testing.T) {
  1012. if client == nil {
  1013. t.Skip("Integration tests skipped")
  1014. }
  1015. ctx := context.Background()
  1016. table := newTable(t, schema)
  1017. defer table.Delete(ctx)
  1018. // Test Update of non-schema fields.
  1019. tm, err := table.Metadata(ctx)
  1020. if err != nil {
  1021. t.Fatal(err)
  1022. }
  1023. wantDescription := tm.Description + "more"
  1024. wantName := tm.Name + "more"
  1025. wantExpiration := tm.ExpirationTime.Add(time.Hour * 24)
  1026. got, err := table.Update(ctx, TableMetadataToUpdate{
  1027. Description: wantDescription,
  1028. Name: wantName,
  1029. ExpirationTime: wantExpiration,
  1030. }, tm.ETag)
  1031. if err != nil {
  1032. t.Fatal(err)
  1033. }
  1034. if got.Description != wantDescription {
  1035. t.Errorf("Description: got %q, want %q", got.Description, wantDescription)
  1036. }
  1037. if got.Name != wantName {
  1038. t.Errorf("Name: got %q, want %q", got.Name, wantName)
  1039. }
  1040. if got.ExpirationTime != wantExpiration {
  1041. t.Errorf("ExpirationTime: got %q, want %q", got.ExpirationTime, wantExpiration)
  1042. }
  1043. if !testutil.Equal(got.Schema, schema) {
  1044. t.Errorf("Schema: got %v, want %v", pretty.Value(got.Schema), pretty.Value(schema))
  1045. }
  1046. // Blind write succeeds.
  1047. _, err = table.Update(ctx, TableMetadataToUpdate{Name: "x"}, "")
  1048. if err != nil {
  1049. t.Fatal(err)
  1050. }
  1051. // Write with old etag fails.
  1052. _, err = table.Update(ctx, TableMetadataToUpdate{Name: "y"}, got.ETag)
  1053. if err == nil {
  1054. t.Fatal("Update with old ETag succeeded, wanted failure")
  1055. }
  1056. // Test schema update.
  1057. // Columns can be added. schema2 is the same as schema, except for the
  1058. // added column in the middle.
  1059. nested := Schema{
  1060. {Name: "nested", Type: BooleanFieldType},
  1061. {Name: "other", Type: StringFieldType},
  1062. }
  1063. schema2 := Schema{
  1064. schema[0],
  1065. {Name: "rec2", Type: RecordFieldType, Schema: nested},
  1066. schema[1],
  1067. schema[2],
  1068. }
  1069. got, err = table.Update(ctx, TableMetadataToUpdate{Schema: schema2}, "")
  1070. if err != nil {
  1071. t.Fatal(err)
  1072. }
  1073. // Wherever you add the column, it appears at the end.
  1074. schema3 := Schema{schema2[0], schema2[2], schema2[3], schema2[1]}
  1075. if !testutil.Equal(got.Schema, schema3) {
  1076. t.Errorf("add field:\ngot %v\nwant %v",
  1077. pretty.Value(got.Schema), pretty.Value(schema3))
  1078. }
  1079. // Updating with the empty schema succeeds, but is a no-op.
  1080. got, err = table.Update(ctx, TableMetadataToUpdate{Schema: Schema{}}, "")
  1081. if err != nil {
  1082. t.Fatal(err)
  1083. }
  1084. if !testutil.Equal(got.Schema, schema3) {
  1085. t.Errorf("empty schema:\ngot %v\nwant %v",
  1086. pretty.Value(got.Schema), pretty.Value(schema3))
  1087. }
  1088. // Error cases when updating schema.
  1089. for _, test := range []struct {
  1090. desc string
  1091. fields Schema
  1092. }{
  1093. {"change from optional to required", Schema{
  1094. {Name: "name", Type: StringFieldType, Required: true},
  1095. schema3[1],
  1096. schema3[2],
  1097. schema3[3],
  1098. }},
  1099. {"add a required field", Schema{
  1100. schema3[0], schema3[1], schema3[2], schema3[3],
  1101. {Name: "req", Type: StringFieldType, Required: true},
  1102. }},
  1103. {"remove a field", Schema{schema3[0], schema3[1], schema3[2]}},
  1104. {"remove a nested field", Schema{
  1105. schema3[0], schema3[1], schema3[2],
  1106. {Name: "rec2", Type: RecordFieldType, Schema: Schema{nested[0]}}}},
  1107. {"remove all nested fields", Schema{
  1108. schema3[0], schema3[1], schema3[2],
  1109. {Name: "rec2", Type: RecordFieldType, Schema: Schema{}}}},
  1110. } {
  1111. _, err = table.Update(ctx, TableMetadataToUpdate{Schema: Schema(test.fields)}, "")
  1112. if err == nil {
  1113. t.Errorf("%s: want error, got nil", test.desc)
  1114. } else if !hasStatusCode(err, 400) {
  1115. t.Errorf("%s: want 400, got %v", test.desc, err)
  1116. }
  1117. }
  1118. }
  1119. func TestIntegration_Load(t *testing.T) {
  1120. if client == nil {
  1121. t.Skip("Integration tests skipped")
  1122. }
  1123. ctx := context.Background()
  1124. // CSV data can't be loaded into a repeated field, so we use a different schema.
  1125. table := newTable(t, Schema{
  1126. {Name: "name", Type: StringFieldType},
  1127. {Name: "nums", Type: IntegerFieldType},
  1128. })
  1129. defer table.Delete(ctx)
  1130. // Load the table from a reader.
  1131. r := strings.NewReader("a,0\nb,1\nc,2\n")
  1132. wantRows := [][]Value{
  1133. {"a", int64(0)},
  1134. {"b", int64(1)},
  1135. {"c", int64(2)},
  1136. }
  1137. rs := NewReaderSource(r)
  1138. loader := table.LoaderFrom(rs)
  1139. loader.WriteDisposition = WriteTruncate
  1140. loader.Labels = map[string]string{"test": "go"}
  1141. job, err := loader.Run(ctx)
  1142. if err != nil {
  1143. t.Fatal(err)
  1144. }
  1145. if job.LastStatus() == nil {
  1146. t.Error("no LastStatus")
  1147. }
  1148. conf, err := job.Config()
  1149. if err != nil {
  1150. t.Fatal(err)
  1151. }
  1152. config, ok := conf.(*LoadConfig)
  1153. if !ok {
  1154. t.Fatalf("got %T, want LoadConfig", conf)
  1155. }
  1156. diff := testutil.Diff(config, &loader.LoadConfig,
  1157. cmp.AllowUnexported(Table{}),
  1158. cmpopts.IgnoreUnexported(Client{}, ReaderSource{}),
  1159. // returned schema is at top level, not in the config
  1160. cmpopts.IgnoreFields(FileConfig{}, "Schema"))
  1161. if diff != "" {
  1162. t.Errorf("got=-, want=+:\n%s", diff)
  1163. }
  1164. if err := wait(ctx, job); err != nil {
  1165. t.Fatal(err)
  1166. }
  1167. checkReadAndTotalRows(t, "reader load", table.Read(ctx), wantRows)
  1168. }
  1169. func TestIntegration_DML(t *testing.T) {
  1170. if client == nil {
  1171. t.Skip("Integration tests skipped")
  1172. }
  1173. ctx := context.Background()
  1174. table := newTable(t, schema)
  1175. defer table.Delete(ctx)
  1176. sql := fmt.Sprintf(`INSERT %s.%s (name, nums, rec)
  1177. VALUES ('a', [0], STRUCT<BOOL>(TRUE)),
  1178. ('b', [1], STRUCT<BOOL>(FALSE)),
  1179. ('c', [2], STRUCT<BOOL>(TRUE))`,
  1180. table.DatasetID, table.TableID)
  1181. if err := runDML(ctx, sql); err != nil {
  1182. t.Fatal(err)
  1183. }
  1184. wantRows := [][]Value{
  1185. {"a", []Value{int64(0)}, []Value{true}},
  1186. {"b", []Value{int64(1)}, []Value{false}},
  1187. {"c", []Value{int64(2)}, []Value{true}},
  1188. }
  1189. checkRead(t, "DML", table.Read(ctx), wantRows)
  1190. }
  1191. func runDML(ctx context.Context, sql string) error {
  1192. // Retry insert; sometimes it fails with INTERNAL.
  1193. return internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) {
  1194. ri, err := client.Query(sql).Read(ctx)
  1195. if err != nil {
  1196. if e, ok := err.(*googleapi.Error); ok && e.Code < 500 {
  1197. return true, err // fail on 4xx
  1198. }
  1199. return false, err
  1200. }
  1201. // It is OK to try to iterate over DML results. The first call to Next
  1202. // will return iterator.Done.
  1203. err = ri.Next(nil)
  1204. if err == nil {
  1205. return true, errors.New("want iterator.Done on the first call, got nil")
  1206. }
  1207. if err == iterator.Done {
  1208. return true, nil
  1209. }
  1210. if e, ok := err.(*googleapi.Error); ok && e.Code < 500 {
  1211. return true, err // fail on 4xx
  1212. }
  1213. return false, err
  1214. })
  1215. }
  1216. func TestIntegration_TimeTypes(t *testing.T) {
  1217. if client == nil {
  1218. t.Skip("Integration tests skipped")
  1219. }
  1220. ctx := context.Background()
  1221. dtSchema := Schema{
  1222. {Name: "d", Type: DateFieldType},
  1223. {Name: "t", Type: TimeFieldType},
  1224. {Name: "dt", Type: DateTimeFieldType},
  1225. {Name: "ts", Type: TimestampFieldType},
  1226. }
  1227. table := newTable(t, dtSchema)
  1228. defer table.Delete(ctx)
  1229. d := civil.Date{Year: 2016, Month: 3, Day: 20}
  1230. tm := civil.Time{Hour: 12, Minute: 30, Second: 0, Nanosecond: 6000}
  1231. dtm := civil.DateTime{Date: d, Time: tm}
  1232. ts := time.Date(2016, 3, 20, 15, 04, 05, 0, time.UTC)
  1233. wantRows := [][]Value{
  1234. {d, tm, dtm, ts},
  1235. }
  1236. ins := table.Inserter()
  1237. if err := ins.Put(ctx, []*ValuesSaver{
  1238. {Schema: dtSchema, Row: wantRows[0]},
  1239. }); err != nil {
  1240. t.Fatal(putError(err))
  1241. }
  1242. if err := waitForRow(ctx, table); err != nil {
  1243. t.Fatal(err)
  1244. }
  1245. // SQL wants DATETIMEs with a space between date and time, but the service
  1246. // returns them in RFC3339 form, with a "T" between.
  1247. query := fmt.Sprintf("INSERT %s.%s (d, t, dt, ts) "+
  1248. "VALUES ('%s', '%s', '%s', '%s')",
  1249. table.DatasetID, table.TableID,
  1250. d, CivilTimeString(tm), CivilDateTimeString(dtm), ts.Format("2006-01-02 15:04:05"))
  1251. if err := runDML(ctx, query); err != nil {
  1252. t.Fatal(err)
  1253. }
  1254. wantRows = append(wantRows, wantRows[0])
  1255. checkRead(t, "TimeTypes", table.Read(ctx), wantRows)
  1256. }
  1257. func TestIntegration_StandardQuery(t *testing.T) {
  1258. if client == nil {
  1259. t.Skip("Integration tests skipped")
  1260. }
  1261. ctx := context.Background()
  1262. d := civil.Date{Year: 2016, Month: 3, Day: 20}
  1263. tm := civil.Time{Hour: 15, Minute: 04, Second: 05, Nanosecond: 0}
  1264. ts := time.Date(2016, 3, 20, 15, 04, 05, 0, time.UTC)
  1265. dtm := ts.Format("2006-01-02 15:04:05")
  1266. // Constructs Value slices made up of int64s.
  1267. ints := func(args ...int) []Value {
  1268. vals := make([]Value, len(args))
  1269. for i, arg := range args {
  1270. vals[i] = int64(arg)
  1271. }
  1272. return vals
  1273. }
  1274. testCases := []struct {
  1275. query string
  1276. wantRow []Value
  1277. }{
  1278. {"SELECT 1", ints(1)},
  1279. {"SELECT 1.3", []Value{1.3}},
  1280. {"SELECT CAST(1.3 AS NUMERIC)", []Value{big.NewRat(13, 10)}},
  1281. {"SELECT NUMERIC '0.25'", []Value{big.NewRat(1, 4)}},
  1282. {"SELECT TRUE", []Value{true}},
  1283. {"SELECT 'ABC'", []Value{"ABC"}},
  1284. {"SELECT CAST('foo' AS BYTES)", []Value{[]byte("foo")}},
  1285. {fmt.Sprintf("SELECT TIMESTAMP '%s'", dtm), []Value{ts}},
  1286. {fmt.Sprintf("SELECT [TIMESTAMP '%s', TIMESTAMP '%s']", dtm, dtm), []Value{[]Value{ts, ts}}},
  1287. {fmt.Sprintf("SELECT ('hello', TIMESTAMP '%s')", dtm), []Value{[]Value{"hello", ts}}},
  1288. {fmt.Sprintf("SELECT DATETIME(TIMESTAMP '%s')", dtm), []Value{civil.DateTime{Date: d, Time: tm}}},
  1289. {fmt.Sprintf("SELECT DATE(TIMESTAMP '%s')", dtm), []Value{d}},
  1290. {fmt.Sprintf("SELECT TIME(TIMESTAMP '%s')", dtm), []Value{tm}},
  1291. {"SELECT (1, 2)", []Value{ints(1, 2)}},
  1292. {"SELECT [1, 2, 3]", []Value{ints(1, 2, 3)}},
  1293. {"SELECT ([1, 2], 3, [4, 5])", []Value{[]Value{ints(1, 2), int64(3), ints(4, 5)}}},
  1294. {"SELECT [(1, 2, 3), (4, 5, 6)]", []Value{[]Value{ints(1, 2, 3), ints(4, 5, 6)}}},
  1295. {"SELECT [([1, 2, 3], 4), ([5, 6], 7)]", []Value{[]Value{[]Value{ints(1, 2, 3), int64(4)}, []Value{ints(5, 6), int64(7)}}}},
  1296. {"SELECT ARRAY(SELECT STRUCT([1, 2]))", []Value{[]Value{[]Value{ints(1, 2)}}}},
  1297. }
  1298. for _, c := range testCases {
  1299. q := client.Query(c.query)
  1300. it, err := q.Read(ctx)
  1301. if err != nil {
  1302. t.Fatal(err)
  1303. }
  1304. checkRead(t, "StandardQuery", it, [][]Value{c.wantRow})
  1305. }
  1306. }
  1307. func TestIntegration_LegacyQuery(t *testing.T) {
  1308. if client == nil {
  1309. t.Skip("Integration tests skipped")
  1310. }
  1311. ctx := context.Background()
  1312. ts := time.Date(2016, 3, 20, 15, 04, 05, 0, time.UTC)
  1313. dtm := ts.Format("2006-01-02 15:04:05")
  1314. testCases := []struct {
  1315. query string
  1316. wantRow []Value
  1317. }{
  1318. {"SELECT 1", []Value{int64(1)}},
  1319. {"SELECT 1.3", []Value{1.3}},
  1320. {"SELECT TRUE", []Value{true}},
  1321. {"SELECT 'ABC'", []Value{"ABC"}},
  1322. {"SELECT CAST('foo' AS BYTES)", []Value{[]byte("foo")}},
  1323. {fmt.Sprintf("SELECT TIMESTAMP('%s')", dtm), []Value{ts}},
  1324. {fmt.Sprintf("SELECT DATE(TIMESTAMP('%s'))", dtm), []Value{"2016-03-20"}},
  1325. {fmt.Sprintf("SELECT TIME(TIMESTAMP('%s'))", dtm), []Value{"15:04:05"}},
  1326. }
  1327. for _, c := range testCases {
  1328. q := client.Query(c.query)
  1329. q.UseLegacySQL = true
  1330. it, err := q.Read(ctx)
  1331. if err != nil {
  1332. t.Fatal(err)
  1333. }
  1334. checkRead(t, "LegacyQuery", it, [][]Value{c.wantRow})
  1335. }
  1336. }
  1337. func TestIntegration_QueryParameters(t *testing.T) {
  1338. if client == nil {
  1339. t.Skip("Integration tests skipped")
  1340. }
  1341. ctx := context.Background()
  1342. d := civil.Date{Year: 2016, Month: 3, Day: 20}
  1343. tm := civil.Time{Hour: 15, Minute: 04, Second: 05, Nanosecond: 3008}
  1344. rtm := tm
  1345. rtm.Nanosecond = 3000 // round to microseconds
  1346. dtm := civil.DateTime{Date: d, Time: tm}
  1347. ts := time.Date(2016, 3, 20, 15, 04, 05, 0, time.UTC)
  1348. rat := big.NewRat(13, 10)
  1349. type ss struct {
  1350. String string
  1351. }
  1352. type s struct {
  1353. Timestamp time.Time
  1354. StringArray []string
  1355. SubStruct ss
  1356. SubStructArray []ss
  1357. }
  1358. testCases := []struct {
  1359. query string
  1360. parameters []QueryParameter
  1361. wantRow []Value
  1362. wantConfig interface{}
  1363. }{
  1364. {
  1365. "SELECT @val",
  1366. []QueryParameter{{"val", 1}},
  1367. []Value{int64(1)},
  1368. int64(1),
  1369. },
  1370. {
  1371. "SELECT @val",
  1372. []QueryParameter{{"val", 1.3}},
  1373. []Value{1.3},
  1374. 1.3,
  1375. },
  1376. {
  1377. "SELECT @val",
  1378. []QueryParameter{{"val", rat}},
  1379. []Value{rat},
  1380. rat,
  1381. },
  1382. {
  1383. "SELECT @val",
  1384. []QueryParameter{{"val", true}},
  1385. []Value{true},
  1386. true,
  1387. },
  1388. {
  1389. "SELECT @val",
  1390. []QueryParameter{{"val", "ABC"}},
  1391. []Value{"ABC"},
  1392. "ABC",
  1393. },
  1394. {
  1395. "SELECT @val",
  1396. []QueryParameter{{"val", []byte("foo")}},
  1397. []Value{[]byte("foo")},
  1398. []byte("foo"),
  1399. },
  1400. {
  1401. "SELECT @val",
  1402. []QueryParameter{{"val", ts}},
  1403. []Value{ts},
  1404. ts,
  1405. },
  1406. {
  1407. "SELECT @val",
  1408. []QueryParameter{{"val", []time.Time{ts, ts}}},
  1409. []Value{[]Value{ts, ts}},
  1410. []interface{}{ts, ts},
  1411. },
  1412. {
  1413. "SELECT @val",
  1414. []QueryParameter{{"val", dtm}},
  1415. []Value{civil.DateTime{Date: d, Time: rtm}},
  1416. civil.DateTime{Date: d, Time: rtm},
  1417. },
  1418. {
  1419. "SELECT @val",
  1420. []QueryParameter{{"val", d}},
  1421. []Value{d},
  1422. d,
  1423. },
  1424. {
  1425. "SELECT @val",
  1426. []QueryParameter{{"val", tm}},
  1427. []Value{rtm},
  1428. rtm,
  1429. },
  1430. {
  1431. "SELECT @val",
  1432. []QueryParameter{{"val", s{ts, []string{"a", "b"}, ss{"c"}, []ss{{"d"}, {"e"}}}}},
  1433. []Value{[]Value{ts, []Value{"a", "b"}, []Value{"c"}, []Value{[]Value{"d"}, []Value{"e"}}}},
  1434. map[string]interface{}{
  1435. "Timestamp": ts,
  1436. "StringArray": []interface{}{"a", "b"},
  1437. "SubStruct": map[string]interface{}{"String": "c"},
  1438. "SubStructArray": []interface{}{
  1439. map[string]interface{}{"String": "d"},
  1440. map[string]interface{}{"String": "e"},
  1441. },
  1442. },
  1443. },
  1444. {
  1445. "SELECT @val.Timestamp, @val.SubStruct.String",
  1446. []QueryParameter{{"val", s{Timestamp: ts, SubStruct: ss{"a"}}}},
  1447. []Value{ts, "a"},
  1448. map[string]interface{}{
  1449. "Timestamp": ts,
  1450. "SubStruct": map[string]interface{}{"String": "a"},
  1451. "StringArray": nil,
  1452. "SubStructArray": nil,
  1453. },
  1454. },
  1455. }
  1456. for _, c := range testCases {
  1457. q := client.Query(c.query)
  1458. q.Parameters = c.parameters
  1459. job, err := q.Run(ctx)
  1460. if err != nil {
  1461. t.Fatal(err)
  1462. }
  1463. if job.LastStatus() == nil {
  1464. t.Error("no LastStatus")
  1465. }
  1466. it, err := job.Read(ctx)
  1467. if err != nil {
  1468. t.Fatal(err)
  1469. }
  1470. checkRead(t, "QueryParameters", it, [][]Value{c.wantRow})
  1471. config, err := job.Config()
  1472. if err != nil {
  1473. t.Fatal(err)
  1474. }
  1475. got := config.(*QueryConfig).Parameters[0].Value
  1476. if !testutil.Equal(got, c.wantConfig) {
  1477. t.Errorf("param %[1]v (%[1]T): config:\ngot %[2]v (%[2]T)\nwant %[3]v (%[3]T)",
  1478. c.parameters[0].Value, got, c.wantConfig)
  1479. }
  1480. }
  1481. }
  1482. func TestIntegration_QueryDryRun(t *testing.T) {
  1483. if client == nil {
  1484. t.Skip("Integration tests skipped")
  1485. }
  1486. ctx := context.Background()
  1487. q := client.Query("SELECT word from " + stdName + " LIMIT 10")
  1488. q.DryRun = true
  1489. job, err := q.Run(ctx)
  1490. if err != nil {
  1491. t.Fatal(err)
  1492. }
  1493. s := job.LastStatus()
  1494. if s.State != Done {
  1495. t.Errorf("state is %v, expected Done", s.State)
  1496. }
  1497. if s.Statistics == nil {
  1498. t.Fatal("no statistics")
  1499. }
  1500. if s.Statistics.Details.(*QueryStatistics).Schema == nil {
  1501. t.Fatal("no schema")
  1502. }
  1503. if s.Statistics.Details.(*QueryStatistics).TotalBytesProcessedAccuracy == "" {
  1504. t.Fatal("no cost accuracy")
  1505. }
  1506. }
  1507. func TestIntegration_ExtractExternal(t *testing.T) {
  1508. // Create a table, extract it to GCS, then query it externally.
  1509. if client == nil {
  1510. t.Skip("Integration tests skipped")
  1511. }
  1512. ctx := context.Background()
  1513. schema := Schema{
  1514. {Name: "name", Type: StringFieldType},
  1515. {Name: "num", Type: IntegerFieldType},
  1516. }
  1517. table := newTable(t, schema)
  1518. defer table.Delete(ctx)
  1519. // Insert table data.
  1520. sql := fmt.Sprintf(`INSERT %s.%s (name, num)
  1521. VALUES ('a', 1), ('b', 2), ('c', 3)`,
  1522. table.DatasetID, table.TableID)
  1523. if err := runDML(ctx, sql); err != nil {
  1524. t.Fatal(err)
  1525. }
  1526. // Extract to a GCS object as CSV.
  1527. bucketName := testutil.ProjID()
  1528. objectName := fmt.Sprintf("bq-test-%s.csv", table.TableID)
  1529. uri := fmt.Sprintf("gs://%s/%s", bucketName, objectName)
  1530. defer storageClient.Bucket(bucketName).Object(objectName).Delete(ctx)
  1531. gr := NewGCSReference(uri)
  1532. gr.DestinationFormat = CSV
  1533. e := table.ExtractorTo(gr)
  1534. job, err := e.Run(ctx)
  1535. if err != nil {
  1536. t.Fatal(err)
  1537. }
  1538. conf, err := job.Config()
  1539. if err != nil {
  1540. t.Fatal(err)
  1541. }
  1542. config, ok := conf.(*ExtractConfig)
  1543. if !ok {
  1544. t.Fatalf("got %T, want ExtractConfig", conf)
  1545. }
  1546. diff := testutil.Diff(config, &e.ExtractConfig,
  1547. cmp.AllowUnexported(Table{}),
  1548. cmpopts.IgnoreUnexported(Client{}))
  1549. if diff != "" {
  1550. t.Errorf("got=-, want=+:\n%s", diff)
  1551. }
  1552. if err := wait(ctx, job); err != nil {
  1553. t.Fatal(err)
  1554. }
  1555. edc := &ExternalDataConfig{
  1556. SourceFormat: CSV,
  1557. SourceURIs: []string{uri},
  1558. Schema: schema,
  1559. Options: &CSVOptions{
  1560. SkipLeadingRows: 1,
  1561. // This is the default. Since we use edc as an expectation later on,
  1562. // let's just be explicit.
  1563. FieldDelimiter: ",",
  1564. },
  1565. }
  1566. // Query that CSV file directly.
  1567. q := client.Query("SELECT * FROM csv")
  1568. q.TableDefinitions = map[string]ExternalData{"csv": edc}
  1569. wantRows := [][]Value{
  1570. {"a", int64(1)},
  1571. {"b", int64(2)},
  1572. {"c", int64(3)},
  1573. }
  1574. iter, err := q.Read(ctx)
  1575. if err != nil {
  1576. t.Fatal(err)
  1577. }
  1578. checkReadAndTotalRows(t, "external query", iter, wantRows)
  1579. // Make a table pointing to the file, and query it.
  1580. // BigQuery does not allow a Table.Read on an external table.
  1581. table = dataset.Table(tableIDs.New())
  1582. err = table.Create(context.Background(), &TableMetadata{
  1583. Schema: schema,
  1584. ExpirationTime: testTableExpiration,
  1585. ExternalDataConfig: edc,
  1586. })
  1587. if err != nil {
  1588. t.Fatal(err)
  1589. }
  1590. q = client.Query(fmt.Sprintf("SELECT * FROM %s.%s", table.DatasetID, table.TableID))
  1591. iter, err = q.Read(ctx)
  1592. if err != nil {
  1593. t.Fatal(err)
  1594. }
  1595. checkReadAndTotalRows(t, "external table", iter, wantRows)
  1596. // While we're here, check that the table metadata is correct.
  1597. md, err := table.Metadata(ctx)
  1598. if err != nil {
  1599. t.Fatal(err)
  1600. }
  1601. // One difference: since BigQuery returns the schema as part of the ordinary
  1602. // table metadata, it does not populate ExternalDataConfig.Schema.
  1603. md.ExternalDataConfig.Schema = md.Schema
  1604. if diff := testutil.Diff(md.ExternalDataConfig, edc); diff != "" {
  1605. t.Errorf("got=-, want=+\n%s", diff)
  1606. }
  1607. }
  1608. func TestIntegration_ReadNullIntoStruct(t *testing.T) {
  1609. // Reading a null into a struct field should return an error (not panic).
  1610. if client == nil {
  1611. t.Skip("Integration tests skipped")
  1612. }
  1613. ctx := context.Background()
  1614. table := newTable(t, schema)
  1615. defer table.Delete(ctx)
  1616. ins := table.Inserter()
  1617. row := &ValuesSaver{
  1618. Schema: schema,
  1619. Row: []Value{nil, []Value{}, []Value{nil}},
  1620. }
  1621. if err := ins.Put(ctx, []*ValuesSaver{row}); err != nil {
  1622. t.Fatal(putError(err))
  1623. }
  1624. if err := waitForRow(ctx, table); err != nil {
  1625. t.Fatal(err)
  1626. }
  1627. q := client.Query(fmt.Sprintf("select name from %s", table.TableID))
  1628. q.DefaultProjectID = dataset.ProjectID
  1629. q.DefaultDatasetID = dataset.DatasetID
  1630. it, err := q.Read(ctx)
  1631. if err != nil {
  1632. t.Fatal(err)
  1633. }
  1634. type S struct{ Name string }
  1635. var s S
  1636. if err := it.Next(&s); err == nil {
  1637. t.Fatal("got nil, want error")
  1638. }
  1639. }
  1640. const (
  1641. stdName = "`bigquery-public-data.samples.shakespeare`"
  1642. legacyName = "[bigquery-public-data:samples.shakespeare]"
  1643. )
  1644. // These tests exploit the fact that the two SQL versions have different syntaxes for
  1645. // fully-qualified table names.
  1646. var useLegacySQLTests = []struct {
  1647. t string // name of table
  1648. std, legacy bool // use standard/legacy SQL
  1649. err bool // do we expect an error?
  1650. }{
  1651. {t: legacyName, std: false, legacy: true, err: false},
  1652. {t: legacyName, std: true, legacy: false, err: true},
  1653. {t: legacyName, std: false, legacy: false, err: true}, // standard SQL is default
  1654. {t: legacyName, std: true, legacy: true, err: true},
  1655. {t: stdName, std: false, legacy: true, err: true},
  1656. {t: stdName, std: true, legacy: false, err: false},
  1657. {t: stdName, std: false, legacy: false, err: false}, // standard SQL is default
  1658. {t: stdName, std: true, legacy: true, err: true},
  1659. }
  1660. func TestIntegration_QueryUseLegacySQL(t *testing.T) {
  1661. // Test the UseLegacySQL and UseStandardSQL options for queries.
  1662. if client == nil {
  1663. t.Skip("Integration tests skipped")
  1664. }
  1665. ctx := context.Background()
  1666. for _, test := range useLegacySQLTests {
  1667. q := client.Query(fmt.Sprintf("select word from %s limit 1", test.t))
  1668. q.UseStandardSQL = test.std
  1669. q.UseLegacySQL = test.legacy
  1670. _, err := q.Read(ctx)
  1671. gotErr := err != nil
  1672. if gotErr && !test.err {
  1673. t.Errorf("%+v:\nunexpected error: %v", test, err)
  1674. } else if !gotErr && test.err {
  1675. t.Errorf("%+v:\nsucceeded, but want error", test)
  1676. }
  1677. }
  1678. }
  1679. func TestIntegration_TableUseLegacySQL(t *testing.T) {
  1680. // Test UseLegacySQL and UseStandardSQL for Table.Create.
  1681. if client == nil {
  1682. t.Skip("Integration tests skipped")
  1683. }
  1684. ctx := context.Background()
  1685. table := newTable(t, schema)
  1686. defer table.Delete(ctx)
  1687. for i, test := range useLegacySQLTests {
  1688. view := dataset.Table(fmt.Sprintf("t_view_%d", i))
  1689. tm := &TableMetadata{
  1690. ViewQuery: fmt.Sprintf("SELECT word from %s", test.t),
  1691. UseStandardSQL: test.std,
  1692. UseLegacySQL: test.legacy,
  1693. }
  1694. err := view.Create(ctx, tm)
  1695. gotErr := err != nil
  1696. if gotErr && !test.err {
  1697. t.Errorf("%+v:\nunexpected error: %v", test, err)
  1698. } else if !gotErr && test.err {
  1699. t.Errorf("%+v:\nsucceeded, but want error", test)
  1700. }
  1701. _ = view.Delete(ctx)
  1702. }
  1703. }
  1704. func TestIntegration_ListJobs(t *testing.T) {
  1705. // It's difficult to test the list of jobs, because we can't easily
  1706. // control what's in it. Also, there are many jobs in the test project,
  1707. // and it takes considerable time to list them all.
  1708. if client == nil {
  1709. t.Skip("Integration tests skipped")
  1710. }
  1711. ctx := context.Background()
  1712. // About all we can do is list a few jobs.
  1713. const max = 20
  1714. var jobs []*Job
  1715. it := client.Jobs(ctx)
  1716. for {
  1717. job, err := it.Next()
  1718. if err == iterator.Done {
  1719. break
  1720. }
  1721. if err != nil {
  1722. t.Fatal(err)
  1723. }
  1724. jobs = append(jobs, job)
  1725. if len(jobs) >= max {
  1726. break
  1727. }
  1728. }
  1729. // We expect that there is at least one job in the last few months.
  1730. if len(jobs) == 0 {
  1731. t.Fatal("did not get any jobs")
  1732. }
  1733. }
  1734. const tokyo = "asia-northeast1"
  1735. func TestIntegration_Location(t *testing.T) {
  1736. if client == nil {
  1737. t.Skip("Integration tests skipped")
  1738. }
  1739. client.Location = ""
  1740. testLocation(t, tokyo)
  1741. client.Location = tokyo
  1742. defer func() {
  1743. client.Location = ""
  1744. }()
  1745. testLocation(t, "")
  1746. }
  1747. func testLocation(t *testing.T, loc string) {
  1748. ctx := context.Background()
  1749. tokyoDataset := client.Dataset("tokyo")
  1750. err := tokyoDataset.Create(ctx, &DatasetMetadata{Location: loc})
  1751. if err != nil && !hasStatusCode(err, 409) { // 409 = already exists
  1752. t.Fatal(err)
  1753. }
  1754. md, err := tokyoDataset.Metadata(ctx)
  1755. if err != nil {
  1756. t.Fatal(err)
  1757. }
  1758. if md.Location != tokyo {
  1759. t.Fatalf("dataset location: got %s, want %s", md.Location, tokyo)
  1760. }
  1761. table := tokyoDataset.Table(tableIDs.New())
  1762. err = table.Create(context.Background(), &TableMetadata{
  1763. Schema: Schema{
  1764. {Name: "name", Type: StringFieldType},
  1765. {Name: "nums", Type: IntegerFieldType},
  1766. },
  1767. ExpirationTime: testTableExpiration,
  1768. })
  1769. if err != nil {
  1770. t.Fatal(err)
  1771. }
  1772. defer table.Delete(ctx)
  1773. loader := table.LoaderFrom(NewReaderSource(strings.NewReader("a,0\nb,1\nc,2\n")))
  1774. loader.Location = loc
  1775. job, err := loader.Run(ctx)
  1776. if err != nil {
  1777. t.Fatal("loader.Run", err)
  1778. }
  1779. if job.Location() != tokyo {
  1780. t.Fatalf("job location: got %s, want %s", job.Location(), tokyo)
  1781. }
  1782. _, err = client.JobFromID(ctx, job.ID())
  1783. if client.Location == "" && err == nil {
  1784. t.Error("JobFromID with Tokyo job, no client location: want error, got nil")
  1785. }
  1786. if client.Location != "" && err != nil {
  1787. t.Errorf("JobFromID with Tokyo job, with client location: want nil, got %v", err)
  1788. }
  1789. _, err = client.JobFromIDLocation(ctx, job.ID(), "US")
  1790. if err == nil {
  1791. t.Error("JobFromIDLocation with US: want error, got nil")
  1792. }
  1793. job2, err := client.JobFromIDLocation(ctx, job.ID(), loc)
  1794. if loc == tokyo && err != nil {
  1795. t.Errorf("loc=tokyo: %v", err)
  1796. }
  1797. if loc == "" && err == nil {
  1798. t.Error("loc empty: got nil, want error")
  1799. }
  1800. if job2 != nil && (job2.ID() != job.ID() || job2.Location() != tokyo) {
  1801. t.Errorf("got id %s loc %s, want id%s loc %s", job2.ID(), job2.Location(), job.ID(), tokyo)
  1802. }
  1803. if err := wait(ctx, job); err != nil {
  1804. t.Fatal(err)
  1805. }
  1806. // Cancel should succeed even if the job is done.
  1807. if err := job.Cancel(ctx); err != nil {
  1808. t.Fatal(err)
  1809. }
  1810. q := client.Query(fmt.Sprintf("SELECT * FROM %s.%s", table.DatasetID, table.TableID))
  1811. q.Location = loc
  1812. iter, err := q.Read(ctx)
  1813. if err != nil {
  1814. t.Fatal(err)
  1815. }
  1816. wantRows := [][]Value{
  1817. {"a", int64(0)},
  1818. {"b", int64(1)},
  1819. {"c", int64(2)},
  1820. }
  1821. checkRead(t, "location", iter, wantRows)
  1822. table2 := tokyoDataset.Table(tableIDs.New())
  1823. copier := table2.CopierFrom(table)
  1824. copier.Location = loc
  1825. if _, err := copier.Run(ctx); err != nil {
  1826. t.Fatal(err)
  1827. }
  1828. bucketName := testutil.ProjID()
  1829. objectName := fmt.Sprintf("bq-test-%s.csv", table.TableID)
  1830. uri := fmt.Sprintf("gs://%s/%s", bucketName, objectName)
  1831. defer storageClient.Bucket(bucketName).Object(objectName).Delete(ctx)
  1832. gr := NewGCSReference(uri)
  1833. gr.DestinationFormat = CSV
  1834. e := table.ExtractorTo(gr)
  1835. e.Location = loc
  1836. if _, err := e.Run(ctx); err != nil {
  1837. t.Fatal(err)
  1838. }
  1839. }
  1840. func TestIntegration_NumericErrors(t *testing.T) {
  1841. // Verify that the service returns an error for a big.Rat that's too large.
  1842. if client == nil {
  1843. t.Skip("Integration tests skipped")
  1844. }
  1845. ctx := context.Background()
  1846. schema := Schema{{Name: "n", Type: NumericFieldType}}
  1847. table := newTable(t, schema)
  1848. defer table.Delete(ctx)
  1849. tooBigRat := &big.Rat{}
  1850. if _, ok := tooBigRat.SetString("1e40"); !ok {
  1851. t.Fatal("big.Rat.SetString failed")
  1852. }
  1853. ins := table.Inserter()
  1854. err := ins.Put(ctx, []*ValuesSaver{{Schema: schema, Row: []Value{tooBigRat}}})
  1855. if err == nil {
  1856. t.Fatal("got nil, want error")
  1857. }
  1858. }
  1859. func TestIntegration_QueryErrors(t *testing.T) {
  1860. // Verify that a bad query returns an appropriate error.
  1861. if client == nil {
  1862. t.Skip("Integration tests skipped")
  1863. }
  1864. ctx := context.Background()
  1865. q := client.Query("blah blah broken")
  1866. _, err := q.Read(ctx)
  1867. const want = "invalidQuery"
  1868. if !strings.Contains(err.Error(), want) {
  1869. t.Fatalf("got %q, want substring %q", err, want)
  1870. }
  1871. }
  1872. func TestIntegration_Model(t *testing.T) {
  1873. // Create an ML model.
  1874. if client == nil {
  1875. t.Skip("Integration tests skipped")
  1876. }
  1877. ctx := context.Background()
  1878. schema := Schema{
  1879. {Name: "input", Type: IntegerFieldType},
  1880. {Name: "label", Type: IntegerFieldType},
  1881. }
  1882. table := newTable(t, schema)
  1883. defer table.Delete(ctx)
  1884. // Insert table data.
  1885. tableName := fmt.Sprintf("%s.%s", table.DatasetID, table.TableID)
  1886. sql := fmt.Sprintf(`INSERT %s (input, label)
  1887. VALUES (1, 0), (2, 1), (3, 0), (4, 1)`,
  1888. tableName)
  1889. wantNumRows := 4
  1890. if err := runDML(ctx, sql); err != nil {
  1891. t.Fatal(err)
  1892. }
  1893. model := dataset.Table("my_model")
  1894. modelName := fmt.Sprintf("%s.%s", model.DatasetID, model.TableID)
  1895. sql = fmt.Sprintf(`CREATE MODEL %s OPTIONS (model_type='logistic_reg') AS SELECT input, label FROM %s`,
  1896. modelName, tableName)
  1897. if err := runDML(ctx, sql); err != nil {
  1898. t.Fatal(err)
  1899. }
  1900. defer model.Delete(ctx)
  1901. sql = fmt.Sprintf(`SELECT * FROM ml.PREDICT(MODEL %s, TABLE %s)`, modelName, tableName)
  1902. q := client.Query(sql)
  1903. ri, err := q.Read(ctx)
  1904. if err != nil {
  1905. t.Fatal(err)
  1906. }
  1907. rows, _, _, err := readAll(ri)
  1908. if err != nil {
  1909. t.Fatal(err)
  1910. }
  1911. if got := len(rows); got != wantNumRows {
  1912. t.Fatalf("got %d rows in prediction table, want %d", got, wantNumRows)
  1913. }
  1914. iter := dataset.Tables(ctx)
  1915. seen := false
  1916. for {
  1917. tbl, err := iter.Next()
  1918. if err == iterator.Done {
  1919. break
  1920. }
  1921. if err != nil {
  1922. t.Fatal(err)
  1923. }
  1924. if tbl.TableID == "my_model" {
  1925. seen = true
  1926. }
  1927. }
  1928. if !seen {
  1929. t.Fatal("model not listed in dataset")
  1930. }
  1931. if err := model.Delete(ctx); err != nil {
  1932. t.Fatal(err)
  1933. }
  1934. }
  1935. // Creates a new, temporary table with a unique name and the given schema.
  1936. func newTable(t *testing.T, s Schema) *Table {
  1937. table := dataset.Table(tableIDs.New())
  1938. err := table.Create(context.Background(), &TableMetadata{
  1939. Schema: s,
  1940. ExpirationTime: testTableExpiration,
  1941. })
  1942. if err != nil {
  1943. t.Fatal(err)
  1944. }
  1945. return table
  1946. }
  1947. func checkRead(t *testing.T, msg string, it *RowIterator, want [][]Value) {
  1948. if msg2, ok := compareRead(it, want, false); !ok {
  1949. t.Errorf("%s: %s", msg, msg2)
  1950. }
  1951. }
  1952. func checkReadAndTotalRows(t *testing.T, msg string, it *RowIterator, want [][]Value) {
  1953. if msg2, ok := compareRead(it, want, true); !ok {
  1954. t.Errorf("%s: %s", msg, msg2)
  1955. }
  1956. }
  1957. func compareRead(it *RowIterator, want [][]Value, compareTotalRows bool) (msg string, ok bool) {
  1958. got, _, totalRows, err := readAll(it)
  1959. if err != nil {
  1960. return err.Error(), false
  1961. }
  1962. if len(got) != len(want) {
  1963. return fmt.Sprintf("got %d rows, want %d", len(got), len(want)), false
  1964. }
  1965. if compareTotalRows && len(got) != int(totalRows) {
  1966. return fmt.Sprintf("got %d rows, but totalRows = %d", len(got), totalRows), false
  1967. }
  1968. sort.Sort(byCol0(got))
  1969. for i, r := range got {
  1970. gotRow := []Value(r)
  1971. wantRow := want[i]
  1972. if !testutil.Equal(gotRow, wantRow) {
  1973. return fmt.Sprintf("#%d: got %#v, want %#v", i, gotRow, wantRow), false
  1974. }
  1975. }
  1976. return "", true
  1977. }
  1978. func readAll(it *RowIterator) ([][]Value, Schema, uint64, error) {
  1979. var (
  1980. rows [][]Value
  1981. schema Schema
  1982. totalRows uint64
  1983. )
  1984. for {
  1985. var vals []Value
  1986. err := it.Next(&vals)
  1987. if err == iterator.Done {
  1988. return rows, schema, totalRows, nil
  1989. }
  1990. if err != nil {
  1991. return nil, nil, 0, err
  1992. }
  1993. rows = append(rows, vals)
  1994. schema = it.Schema
  1995. totalRows = it.TotalRows
  1996. }
  1997. }
  1998. type byCol0 [][]Value
  1999. func (b byCol0) Len() int { return len(b) }
  2000. func (b byCol0) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
  2001. func (b byCol0) Less(i, j int) bool {
  2002. switch a := b[i][0].(type) {
  2003. case string:
  2004. return a < b[j][0].(string)
  2005. case civil.Date:
  2006. return a.Before(b[j][0].(civil.Date))
  2007. default:
  2008. panic("unknown type")
  2009. }
  2010. }
  2011. func hasStatusCode(err error, code int) bool {
  2012. if e, ok := err.(*googleapi.Error); ok && e.Code == code {
  2013. return true
  2014. }
  2015. return false
  2016. }
  2017. // wait polls the job until it is complete or an error is returned.
  2018. func wait(ctx context.Context, job *Job) error {
  2019. status, err := job.Wait(ctx)
  2020. if err != nil {
  2021. return err
  2022. }
  2023. if status.Err() != nil {
  2024. return fmt.Errorf("job status error: %#v", status.Err())
  2025. }
  2026. if status.Statistics == nil {
  2027. return errors.New("nil Statistics")
  2028. }
  2029. if status.Statistics.EndTime.IsZero() {
  2030. return errors.New("EndTime is zero")
  2031. }
  2032. if status.Statistics.Details == nil {
  2033. return errors.New("nil Statistics.Details")
  2034. }
  2035. return nil
  2036. }
  2037. // waitForRow polls the table until it contains a row.
  2038. // TODO(jba): use internal.Retry.
  2039. func waitForRow(ctx context.Context, table *Table) error {
  2040. for {
  2041. it := table.Read(ctx)
  2042. var v []Value
  2043. err := it.Next(&v)
  2044. if err == nil {
  2045. return nil
  2046. }
  2047. if err != iterator.Done {
  2048. return err
  2049. }
  2050. time.Sleep(1 * time.Second)
  2051. }
  2052. }
  2053. func putError(err error) string {
  2054. pme, ok := err.(PutMultiError)
  2055. if !ok {
  2056. return err.Error()
  2057. }
  2058. var msgs []string
  2059. for _, err := range pme {
  2060. msgs = append(msgs, err.Error())
  2061. }
  2062. return strings.Join(msgs, "\n")
  2063. }