Compare commits

...

184 Commits

Author SHA1 Message Date
  Fionera 610dd0b2c1 test 3 years ago
  JustAnotherArchivist a99d3146da Fix go.mod: replace for package fork and zstd dependency 3 years ago
  JustAnotherArchivist 19687483c2 Replace transfer.sh-web 3 years ago
  JustAnotherArchivist 11aec07c2e Prevent sending empty Content-Type header 3 years ago
  JustAnotherArchivist 40e2cf850e On-the-fly Zstd decompression 3 years ago
  JustAnotherArchivist ab7a007ed2 Disable file preview page 3 years ago
  Andrea Spacca ba21621d66 bump frontend 3 years ago
  Andrea Spacca 7cf688e86e fix bug in purge for local storage 3 years ago
  Andrea Spacca 1abc7cfb0e bump version 3 years ago
  Andrea Spacca be6dea2546
Merge pull request #339 from dutchcoders/PR-298 3 years ago
  Andrea Spacca d554eb8e2d clone url 3 years ago
  Andrea Spacca fe9e699255
Merge pull request #328 from dutchcoders/fix-readme-build-instructions 3 years ago
  Andrea Spacca d3b6fd5e90 update disclaimer 3 years ago
  Andrea Spacca 322310d47f fix bug in gdrive purge 3 years ago
  Andrea Spacca 76fd83b599
Merge pull request #337 from dutchcoders/PURGE_FEATURE 3 years ago
  Andrea Spacca d3381a2293 GOFMT 3 years ago
  Andrea Spacca 6ac6c8fa99 PURGE FEATURE 3 years ago
  Andrea Spacca 2721ece759
Merge pull request #336 from dutchcoders/ISSUE-332 3 years ago
  Andrea Spacca e53d599e09 ISSUE-332 3 years ago
  Andrea Spacca bfb34634de
Merge pull request #266 from stefanbenten/master 3 years ago
  Stefan Benten 614a7fd23d Merge remote-tracking branch 'dutchcoders/master' 3 years ago
  Andrea Spacca fb8f0e781b
Merge pull request #334 from dutchcoders/REMOVE_FUZZIT 3 years ago
  Andrea Spacca 06d87eac8e Remove fuzzit 3 years ago
  Andrea Spacca 580cd8d978 Remove fuzzit 3 years ago
  Stefan Benten 111f1794fb Update dependencies to latest versions and tidy go.mod/go.sum 3 years ago
  Stefan Benten b372dc97b5 Merge remote-tracking branch 'dutchcoders/master' 3 years ago
  Andrea Spacca ae00f8ceaa
Merge pull request #331 from zuh0/official-docker-buildx-ghaction 3 years ago
  Gabriel Duque ec9db7a42e
ci: switch to official docker buildx GitHub action 3 years ago
  Andrea Spacca bcf40c15a2
Merge pull request #330 from stillwondering/show-version-in-version-command 3 years ago
  stillwondering fe1cde6614
Include version string in version command output 3 years ago
  Andrea Spacca e182376100
Update README.md 3 years ago
  Andrea Spacca 4a2fce4344
Update README.md 3 years ago
  Andrea Spacca ffb2cf0011
Merge pull request #317 from zuh0/multi-architecture-docker-images 3 years ago
  Gabriel Duque 085e9f8414
ci: deploy multi-architecture Docker images using buildx 3 years ago
  Andrea Spacca a4bf102d29
Merge pull request #315 from dutchcoders/revert-314-ISSUE-313-BUILDX 3 years ago
  Andrea Spacca c8497fb27d
Revert "ISSUE-313-BUILDX" 3 years ago
  Andrea Spacca 68212d100e
Merge pull request #314 from dutchcoders/ISSUE-313-BUILDX 3 years ago
  Andrea Spacca 18f4b42cf1 ISSUE-313-BUILDX 3 years ago
  Andrea Spacca 92055f1b3c
Merge pull request #274 from eksoverzero/os_and_arch 3 years ago
  Andrea Spacca 9430e53689
Merge pull request #307 from cheeseandcereal/add_proxy_port 3 years ago
  Andrea Spacca a26b32dd86
Merge pull request #311 from cheeseandcereal/crypto_seed 3 years ago
  Adam Crowder 0a6b5817a9
use cryptographically secure rng seed 3 years ago
  Andrea Spacca 5e7e3a1b39 v1.1.7 4 years ago
  Andrea Spacca 42adceb4c6
Merge pull request #309 from cheeseandcereal/fix_arbitrary_reading 4 years ago
  Adam Crowder f909ad3ce2
fix missing metadata security vuln 4 years ago
  Adam Crowder d830bf1afc
revert println 4 years ago
  Adam Crowder f366e8217e
add proxy-port for port override when running behind a proxy 4 years ago
  Andrea Spacca 8a5c737140 UPDATE README.md 4 years ago
  Andrea Spacca b920eb842a
Merge pull request #304 from tofuSCHNITZEL/master 4 years ago
  Tobias Perschon 45e0967a37
Update cmd.go 4 years ago
  Andrea Spacca 28614c991d
Merge pull request #297 from dutchcoders/ISSUE-296 4 years ago
  Andrea Spacca ef28bcb28f ISSUE-296 add CORS 4 years ago
  Andrea Spacca 2dd23bff3c
Merge pull request #291 from dutchcoders/ISSUE-278 4 years ago
  Andrea Spacca 663c59e754 ISSUE 278 4 years ago
  stefanbenten c89b7f56c2 use general errors package 4 years ago
  stefanbenten 8bb5094ccc handle obscure Errors properly 4 years ago
  stefanbenten b7deec505b Update Dependencies to Release Version and Interface Compliance 4 years ago
  stefanbenten c62e5757ef Merge remote-tracking branch 'origin/master' 4 years ago
  Andrea Spacca 38d643e12b fix fuzzy 4 years ago
  Andrea Spacca 0411db0443 Min go 1.13 required 4 years ago
  Andrea Spacca 3b1b5b890a Removed contentType from storage, rely on metadata 4 years ago
  stefanbenten fb3d410004 Update Dependencies to latest RC Candidate 4 years ago
  stefanbenten 924275e45b Code Review Tweaks 4 years ago
  stefanbenten fdca501681 Fix Typo's 4 years ago
  stefanbenten 38996c9c7f Update to RC 4 years ago
  stefanbenten 9f9e1b66b2 CR Changes 4 years ago
  stefanbenten b3f2319cba Further Cleanup 4 years ago
  stefanbenten 867471aa9c Clean up documentation and remove unnecessary flag 4 years ago
  stefanbenten 96fb3cbe61 Commit the upload 4 years ago
  stefanbenten 800dd6658e Remove old and reduce dependencies 4 years ago
  Andrea Spacca 2e33f7c716 v1.1.3 4 years ago
  Andrea Spacca 5fab83bde8 v1.1.3 4 years ago
  Stefan Benten dcaf572f3d Remove Error Wrapping and update Dependencies 4 years ago
  Stefan Benten ce4e015641 Adding Setup Instructions to README 4 years ago
  Stefan Benten 9c15248adc
Merge pull request #2 from dutchcoders/master 4 years ago
  Stefan Benten d8e1248949 Further Doc Polishing 4 years ago
  Stefan Benten 7dc3982c77 Adding missing IsNotExist Function and proper Path Handling 4 years ago
  Stefan Benten 82617a25c1 Change CLI Flags to match 4 years ago
  Stefan Benten fcbddcee17 Switch to Scope Handling 4 years ago
  Andrea Spacca 8b92f5bac4
Merge pull request #275 from thanakijwanavit/patch-1 4 years ago
  Nic Wanavit 8aa835f21f
add google drive instruction in readme 4 years ago
  Steven Eksteen 9297c253aa Remove explicit OS and Arch builds 4 years ago
  stefanbenten b22a410a78 Actually append Config to Uplink Creation 4 years ago
  stefanbenten 229482fa3d Cleanup Dependencies 4 years ago
  stefanbenten f22bfbf804 Merge remote-tracking branch 'origin/master' 4 years ago
  stefanbenten 8ecc94ed7c Merge remote-tracking branch 'origin/master' 4 years ago
  stefanbenten 0ee2dfd7e1 Update Dependencies 4 years ago
  Andrea Spacca b90bd685e7
Merge pull request #269 from dutchcoders/FIX-BUILD 4 years ago
  Andrea Spacca 73e2bca999 FIX BUILD 4 years ago
  Andrea Spacca d09f4fdd81 FIX BUILD 4 years ago
  Andrea Spacca 356412c838 FIX BUILD 4 years ago
  Andrea Spacca 6b249d21c4 FIX BUILD 4 years ago
  Andrea Spacca c0d5d99e5d FIX BUILD 4 years ago
  Andrea Spacca 9e27485bbb FIX BUILD 4 years ago
  Andrea Spacca b8ca25fb9b FIX BUILD 4 years ago
  Andrea Spacca bef766f605 FIX BUILD 4 years ago
  Stefan Benten c2a152c622
Revert 1.13 Build Check 4 years ago
  stefanbenten c403f9a2f1 Update Dependencies and use go 1.13 4 years ago
  stefanbenten 8d196039ba Roll back to keep dependencies compatible 4 years ago
  stefanbenten 11ad244584 Update README 4 years ago
  stefanbenten 060dbdf152 Add Ability to run against local test-network 4 years ago
  Andrea Spacca 2127b4f1e4
Merge pull request #267 from dutchcoders/fuzz 4 years ago
  Andrea Spacca 8c5ef8f2e1 Fuzz local storage test 4 years ago
  Book Moons b40e9d1fb8 fuzz: Qualify target ID 4 years ago
  Book Moons 071ecb491c fuzz: Remove API key requirement 4 years ago
  Book Moons 806006a0b8 Add Fuzzit badge 4 years ago
  Book Moons 76f00c5d04 Configure Fuzzit 4 years ago
  Book Moons 6b148d3574 Define fuzz targets 4 years ago
  stefanbenten 6eabb248da Rename variable for consistency 4 years ago
  stefanbenten 5bb0e84fa5 Wrap Errors and rename class 4 years ago
  Stefan Benten 1e815d382c
Merge pull request #1 from stefanbenten/storj-storage-adapter 4 years ago
  stefanbenten cf00af36f2 Changing Error Handling and increase Logging 4 years ago
  stefanbenten 79c5130066 Changes according to CR 4 years ago
  stefanbenten c1691c58dc Switch to Salted Key Generation 4 years ago
  stefanbenten d5e07fe2f4 Add Env Vars 4 years ago
  stefanbenten 874268d80e Add Storj Storage Adapter - add Binding 4 years ago
  stefanbenten 59014f2110 Add Storj Storage Adapter - Base Functions 4 years ago
  Andrea Spacca 6c54ddccef
Merge pull request #250 from n8225/fixS3MultiPartUpload 4 years ago
  n8225 a2ab63c7b8 Remove commented debug line. 4 years ago
  Andrea Spacca 11dffa22b4 v1.1.2 4 years ago
  Andrea Spacca 758cd881ce
Merge pull request #262 from dutchcoders/BINARY-RELEASES 4 years ago
  Andrea Spacca f00209973e BINARY RELEASES 4 years ago
  Andrea Spacca 0b27adba9d
Merge pull request #261 from dutchcoders/ISSUE-256 4 years ago
  Andrea Spacca 8463d4169b
Merge pull request #260 from dopessoa/ISSUE-257 4 years ago
  Andrea Spacca e340ad9e45
Merge pull request #259 from dopessoa/ISSUE-230 4 years ago
  Andrea Spacca 4e288cc4e4 Remove go1.9 4 years ago
  Andrea Spacca ee7011d55b ISSUE-256 4 years ago
  Douglas Pessoa 6c73f13fd0 Fixes "mkdir /basedir: no such file or directory" by creating the directory recursively 4 years ago
  Douglas Pessoa e5b5ee7140 Create examples.md and update README.md 4 years ago
  n8225 6a956a445f rmv s3-part-size from NewS3Storage call. 4 years ago
  n8225 ce7e008bb3 Remove maxUploadParts and PartSize options 4 years ago
  Andrea Spacca b9a03ac180
Merge pull request #254 from randomwangran/master 4 years ago
  Ran Wang cccbaac6d7 fix typo 4 years ago
  Andrea Spacca a09bcf3bf0
Merge pull request #251 from different55/patch-1 4 years ago
  Different55 0b23354473
Fix fish alias for busybox systems 4 years ago
  nrew225 372ff5f368 Fix s3 100MB max upload issue. 4 years ago
  Andrea Spacca ce043efca0
Merge pull request #249 from n8225/addS3PathStyle 4 years ago
  nrew225 6cb2de3cff Add support for S3 path style URLS. 4 years ago
  Andrea Spacca 58edf68fd7 no need for abs path flag param 4 years ago
  Andrea Spacca 2e41852860 fix resolved url 4 years ago
  Andrea Spacca 2536b39220
Merge pull request #247 from dutchcoders/ISSUE-238 4 years ago
  Andrea Spacca 82fc38856d
Merge pull request #246 from dutchcoders/ISSUE-240 4 years ago
  Andrea Spacca 682a2bd3d8 ISSUE-238 fix version 4 years ago
  Andrea Spacca 1809ffe8b5 ISSUE-240 negative max metadata instead of big enough 4 years ago
  Andrea Spacca 8a3b87c9cf
Merge pull request #245 from dutchcoders/ISSUE-241 4 years ago
  Andrea Spacca 2ee646590d ISSUE-241 check metadata on preview 4 years ago
  Andrea Spacca 4a1697ee60 error checking 4 years ago
  Andrea Spacca 6fa94d6631
Merge pull request #243 from dutchcoders/ISSUE-242 4 years ago
  Andrea Spacca 2cebb7520c ISSUE-242 just some small refactoring 4 years ago
  Andrea Spacca cc1a1b8487
Merge pull request #244 from computeralex92/ISSUE-242 4 years ago
  Alexander Lauster e5e411cdaa Add new options to Readme 4 years ago
  Alexander Lauster cb17b4a1cc Remove default endpoint because S3 SDK is automatically set the correct one based on the region 4 years ago
  Alexander Lauster 6e03965c8b Merge branch 'ISSUE-242' of https://github.com/dutchcoders/transfer.sh into ISSUE-242 4 years ago
  Andrea Spacca c072345869 ISSUE-242 fix head/get 4 years ago
  Alexander Lauster 47d4907a06 Add option to set the S3 region to support other regions as eu-west-1 4 years ago
  Alexander Lauster 818e5bc444 Add SSL CA Certificates to the image to allow access to S3 Buckets with active HTTPS 4 years ago
  Andrea Spacca 9355bab9d4 ISSUE-242 fix forgotten dead code 4 years ago
  Andrea Spacca 703987c516 ISSUE-242 4 years ago
  Andrea Spacca 14c48cb4d8
Merge pull request #234 from dopessoa/ISSUE-229 4 years ago
  Andrea Spacca b34bec004e
Merge pull request #231 from dopessoa/ISSUE-219 4 years ago
  Douglas Pessoa b8faedc92f Add X-Remaining-Downloads and X-Remaining-Days headers to HEAD and GET responses 4 years ago
  Andrea Spacca 7d5611da94
Merge pull request #236 from JustAnotherArchivist/fix-filename-spaces 4 years ago
  JustAnotherArchivist 568f917c67 Fix encoding of spaces in filenames from + to %20 4 years ago
  Douglas Pessoa 8c91a2284a Update Dockerfile generating smaller image 4 years ago
  Andrea Spacca b3d7af058f bump transfer.sh-web 4 years ago
  Andrea Spacca 46c53c1cab
Merge pull request #228 from dutchcoders/ISSUE-221 4 years ago
  Andrea Spacca e8ccf543f6 ISSUE-221 4 years ago
  Andrea Spacca db38a31b26
Merge pull request #224 from dutchcoders/ISSUE-223 4 years ago
  Andrea Spacca 464fc37d68
Merge pull request #227 from sente/master 5 years ago
  Stuart Powers 75ea4c8d5f Fix the bash functions to handle files with spaces 5 years ago
  Andrea Spacca d16dc4298f
Merge pull request #225 from dopessoa/ISSUE-177 5 years ago
  Andrea Spacca 6ac8ae20e3 ISSUE-223 5 years ago
  Douglas Pessoa 10a332b672 Document supported headers (#177) 5 years ago
  Andrea Spacca e7380750f8 ISSUE-223 5 years ago
  Andrea Spacca a4e4cafc30 ISSUE-223 5 years ago
  Andrea Spacca 415d23393d ISSUE-223 5 years ago
  Andrea Spacca 2e64726628 ISSUE-223 5 years ago
  Andrea Spacca 4d88a2fbc4 ISSUE-223 5 years ago
  Andrea Spacca 9148f20aee ISSUE-203 5 years ago
  Andrea Spacca cc401433a6 ISSUE-203 5 years ago
  Andrea Spacca 5bbafa9ed7 Add disclaimer 5 years ago
  Andrea Spacca 61a4b4fe33 ISSUE-203 5 years ago
  Andrea Spacca 314d24a876 Add disclaimer 5 years ago
  Andrea Spacca 6653cfd251
Merge pull request #218 from dutchcoders/ISSUE-217 5 years ago
100 changed files with 1811 additions and 59042 deletions
Split View
  1. +89
    -0
      .github/workflows/build-docker-images.yml
  2. +36
    -7
      .travis.yml
  3. +16
    -6
      Dockerfile
  4. +129
    -83
      README.md
  5. +205
    -63
      cmd/cmd.go
  6. +176
    -0
      examples.md
  7. +35
    -0
      go.mod
  8. +280
    -0
      go.sum
  9. +0
    -203
      lock.json
  10. +261
    -91
      server/handlers.go
  11. +212
    -0
      server/ip_filter.go
  12. +96
    -2
      server/server.go
  13. +266
    -170
      server/storage.go
  14. +10
    -41
      server/utils.go
  15. +0
    -15
      vendor/cloud.google.com/go/AUTHORS
  16. +0
    -1122
      vendor/cloud.google.com/go/CHANGES.md
  17. +0
    -44
      vendor/cloud.google.com/go/CODE_OF_CONDUCT.md
  18. +0
    -234
      vendor/cloud.google.com/go/CONTRIBUTING.md
  19. +0
    -40
      vendor/cloud.google.com/go/CONTRIBUTORS
  20. +0
    -202
      vendor/cloud.google.com/go/LICENSE
  21. +0
    -504
      vendor/cloud.google.com/go/README.md
  22. +0
    -47
      vendor/cloud.google.com/go/RELEASING.md
  23. +0
    -249
      vendor/cloud.google.com/go/asset/apiv1beta1/asset_client.go
  24. +0
    -75
      vendor/cloud.google.com/go/asset/apiv1beta1/asset_client_example_test.go
  25. +0
    -89
      vendor/cloud.google.com/go/asset/apiv1beta1/doc.go
  26. +0
    -266
      vendor/cloud.google.com/go/asset/apiv1beta1/mock_test.go
  27. +0
    -248
      vendor/cloud.google.com/go/asset/v1beta1/asset_client.go
  28. +0
    -75
      vendor/cloud.google.com/go/asset/v1beta1/asset_client_example_test.go
  29. +0
    -89
      vendor/cloud.google.com/go/asset/v1beta1/doc.go
  30. +0
    -266
      vendor/cloud.google.com/go/asset/v1beta1/mock_test.go
  31. +0
    -73
      vendor/cloud.google.com/go/authexample_test.go
  32. +0
    -8
      vendor/cloud.google.com/go/bigquery/benchmarks/README.md
  33. +0
    -85
      vendor/cloud.google.com/go/bigquery/benchmarks/bench.go
  34. +0
    -10
      vendor/cloud.google.com/go/bigquery/benchmarks/queries.json
  35. +0
    -162
      vendor/cloud.google.com/go/bigquery/bigquery.go
  36. +0
    -26493
      vendor/cloud.google.com/go/bigquery/bigquery.replay
  37. +0
    -107
      vendor/cloud.google.com/go/bigquery/copy.go
  38. +0
    -163
      vendor/cloud.google.com/go/bigquery/copy_test.go
  39. +0
    -536
      vendor/cloud.google.com/go/bigquery/dataset.go
  40. +0
    -326
      vendor/cloud.google.com/go/bigquery/dataset_test.go
  41. +0
    -67
      vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/ListDataSources_smoke_test.go
  42. +0
    -625
      vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/data_transfer_client.go
  43. +0
    -289
      vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/data_transfer_client_example_test.go
  44. +0
    -90
      vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/doc.go
  45. +0
    -1146
      vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/mock_test.go
  46. +0
    -135
      vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/path_funcs.go
  47. +0
    -310
      vendor/cloud.google.com/go/bigquery/doc.go
  48. +0
    -83
      vendor/cloud.google.com/go/bigquery/error.go
  49. +0
    -109
      vendor/cloud.google.com/go/bigquery/error_test.go
  50. +0
    -829
      vendor/cloud.google.com/go/bigquery/examples_test.go
  51. +0
    -400
      vendor/cloud.google.com/go/bigquery/external.go
  52. +0
    -143
      vendor/cloud.google.com/go/bigquery/external_test.go
  53. +0
    -110
      vendor/cloud.google.com/go/bigquery/extract.go
  54. +0
    -116
      vendor/cloud.google.com/go/bigquery/extract_test.go
  55. +0
    -137
      vendor/cloud.google.com/go/bigquery/file.go
  56. +0
    -98
      vendor/cloud.google.com/go/bigquery/file_test.go
  57. +0
    -75
      vendor/cloud.google.com/go/bigquery/gcs.go
  58. +0
    -238
      vendor/cloud.google.com/go/bigquery/inserter.go
  59. +0
    -210
      vendor/cloud.google.com/go/bigquery/inserter_test.go
  60. +0
    -2217
      vendor/cloud.google.com/go/bigquery/integration_test.go
  61. +0
    -222
      vendor/cloud.google.com/go/bigquery/iterator.go
  62. +0
    -362
      vendor/cloud.google.com/go/bigquery/iterator_test.go
  63. +0
    -830
      vendor/cloud.google.com/go/bigquery/job.go
  64. +0
    -95
      vendor/cloud.google.com/go/bigquery/job_test.go
  65. +0
    -153
      vendor/cloud.google.com/go/bigquery/load.go
  66. +0
    -298
      vendor/cloud.google.com/go/bigquery/load_test.go
  67. +0
    -348
      vendor/cloud.google.com/go/bigquery/nulls.go
  68. +0
    -75
      vendor/cloud.google.com/go/bigquery/nulls_test.go
  69. +0
    -38
      vendor/cloud.google.com/go/bigquery/oc_test.go
  70. +0
    -370
      vendor/cloud.google.com/go/bigquery/params.go
  71. +0
    -385
      vendor/cloud.google.com/go/bigquery/params_test.go
  72. +0
    -328
      vendor/cloud.google.com/go/bigquery/query.go
  73. +0
    -408
      vendor/cloud.google.com/go/bigquery/query_test.go
  74. +0
    -56
      vendor/cloud.google.com/go/bigquery/random.go
  75. +0
    -233
      vendor/cloud.google.com/go/bigquery/read_test.go
  76. +0
    -518
      vendor/cloud.google.com/go/bigquery/schema.go
  77. +0
    -1044
      vendor/cloud.google.com/go/bigquery/schema_test.go
  78. +0
    -255
      vendor/cloud.google.com/go/bigquery/storage/apiv1beta1/big_query_storage_client.go
  79. +0
    -132
      vendor/cloud.google.com/go/bigquery/storage/apiv1beta1/big_query_storage_client_example_test.go
  80. +0
    -89
      vendor/cloud.google.com/go/bigquery/storage/apiv1beta1/doc.go
  81. +0
    -452
      vendor/cloud.google.com/go/bigquery/storage/apiv1beta1/mock_test.go
  82. +0
    -629
      vendor/cloud.google.com/go/bigquery/table.go
  83. +0
    -372
      vendor/cloud.google.com/go/bigquery/table_test.go
  84. +0
    -892
      vendor/cloud.google.com/go/bigquery/value.go
  85. +0
    -1240
      vendor/cloud.google.com/go/bigquery/value_test.go
  86. +0
    -1133
      vendor/cloud.google.com/go/bigtable/admin.go
  87. +0
    -578
      vendor/cloud.google.com/go/bigtable/admin_test.go
  88. +0
    -914
      vendor/cloud.google.com/go/bigtable/bigtable.go
  89. +0
    -1255
      vendor/cloud.google.com/go/bigtable/bigtable_test.go
  90. +0
    -83
      vendor/cloud.google.com/go/bigtable/bttest/example_test.go
  91. +0
    -1406
      vendor/cloud.google.com/go/bigtable/bttest/inmem.go
  92. +0
    -1193
      vendor/cloud.google.com/go/bigtable/bttest/inmem_test.go
  93. +0
    -1614
      vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt.go
  94. +0
    -173
      vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt_test.go
  95. +0
    -425
      vendor/cloud.google.com/go/bigtable/cmd/cbt/cbtdoc.go
  96. +0
    -215
      vendor/cloud.google.com/go/bigtable/cmd/cbt/gcpolicy.go
  97. +0
    -196
      vendor/cloud.google.com/go/bigtable/cmd/cbt/gcpolicy_test.go
  98. +0
    -52
      vendor/cloud.google.com/go/bigtable/cmd/emulator/cbtemulator.go
  99. +0
    -205
      vendor/cloud.google.com/go/bigtable/cmd/loadtest/loadtest.go
  100. +0
    -155
      vendor/cloud.google.com/go/bigtable/cmd/scantest/scantest.go

+ 89
- 0
.github/workflows/build-docker-images.yml View File

@@ -0,0 +1,89 @@
name: Deploy multi-architecture Docker images for transfer.sh with buildx

on:
schedule:
- cron: '0 0 * * *' # everyday at midnight UTC
pull_request:
branches: master
push:
branches: master
tags:
- v*

jobs:
buildx:
runs-on: ubuntu-latest
steps:
-
name: Checkout
uses: actions/checkout@v2
-
name: Prepare
id: prepare
run: |
DOCKER_IMAGE=dutchcoders/transfer.sh
DOCKER_PLATFORMS=linux/amd64,linux/arm/v7,linux/arm64,linux/386
VERSION=edge

if [[ $GITHUB_REF == refs/tags/* ]]; then
VERSION=${GITHUB_REF#refs/tags/v}
fi

if [ "${{ github.event_name }}" = "schedule" ]; then
VERSION=nightly
fi

TAGS="--tag ${DOCKER_IMAGE}:${VERSION}"

if [ $VERSION = edge -o $VERSION = nightly ]; then
TAGS="$TAGS --tag ${DOCKER_IMAGE}:latest"
fi

echo ::set-output name=docker_image::${DOCKER_IMAGE}
echo ::set-output name=version::${VERSION}
echo ::set-output name=buildx_args::--platform ${DOCKER_PLATFORMS} \
--build-arg VERSION=${VERSION} \
--build-arg BUILD_DATE=$(date -u +'%Y-%m-%dT%H:%M:%SZ') \
--build-arg VCS_REF=${GITHUB_SHA::8} \
${TAGS} .
-
name: Set up QEMU
uses: docker/setup-qemu-action@v1
with:
platforms: all
-
name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v1
with:
version: latest
-
name: Available platforms
run: echo ${{ steps.buildx.outputs.platforms }}
-
name: Docker Buildx (build)
run: |
docker buildx build --no-cache --pull --output "type=image,push=false" ${{ steps.prepare.outputs.buildx_args }}
-
name: Docker Login
if: success() && github.event_name != 'pull_request'
env:
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
run: |
echo "${DOCKER_PASSWORD}" | docker login --username "${DOCKER_USERNAME}" --password-stdin
-
name: Docker Buildx (push)
if: success() && github.event_name != 'pull_request'
run: |
docker buildx build --output "type=image,push=true" ${{ steps.prepare.outputs.buildx_args }}
-
name: Docker Check Manifest
if: always() && github.event_name != 'pull_request'
run: |
docker run --rm mplatform/mquery ${{ steps.prepare.outputs.docker_image }}:${{ steps.prepare.outputs.version }}
-
name: Clear
if: always() && github.event_name != 'pull_request'
run: |
rm -f ${HOME}/.docker/config.json

+ 36
- 7
.travis.yml View File

@@ -4,16 +4,45 @@ sudo: false
os:
- linux

services:
- docker

go:
- 1.9.x
- 1.10.x
- 1.11.x
- 1.13.x
- 1.14.x
- 1.15.x
- tip

env:
global:
- GO111MODULE=on

install:
- echo "This is an override of the default install deps step in travis."
- go get -t -u -v ./...
- go build -v .
- go vet ./...

script:
- go build -v .
- go vet $(go list ./... | grep -v vendor)
- go test $(go list ./... | grep -v vendor)
- go test ./...

before_deploy:
- mkdir -p release
- "GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -ldflags -a -tags netgo -ldflags '-s -w -extldflags -static' -o release/transfersh-$TRAVIS_TAG-linux-amd64"
- "GOOS=linux GOARCH=arm GOARM=7 CGO_ENABLED=0 go build -ldflags -a -tags netgo -ldflags '-s -w -extldflags -static' -o release/transfersh-$TRAVIS_TAG-linux-armv7"
- "GOOS=darwin GOARCH=amd64 CGO_ENABLED=0 go build -ldflags -a -tags netgo -ldflags '-s -w -extldflags -static' -o release/transfersh-$TRAVIS_TAG-darwin-amd64"
- "GOOS=windows GOARCH=amd64 CGO_ENABLED=0 go build -ldflags -a -tags netgo -ldflags '-s -w -extldflags -static' -o release/transfersh-$TRAVIS_TAG-win-amd64.exe"

deploy:
provider: releases
api_key:
secure: cOuMGyvrl/9GX3TZFL+Vq++2Bv5Hlb3VfXSYONfeAj+1AXI3Y+tPruy/XnWpa1MUxkvFuIhea3sUAiKfwhHip9csCmMUhDJtaTU9apsxRkyF/OFrWb7/FlbnqYuAwnp91ImvtSlnubg2VHTjhBA6ycNQF7WZcJEMVMsAtC/nSY4=
file:
- "release/transfersh-$TRAVIS_TAG-linux-amd64"
- "release/transfersh-$TRAVIS_TAG-linux-armv7"
- "release/transfersh-$TRAVIS_TAG-darwin-amd64"
- "release/transfersh-$TRAVIS_TAG-win-amd64.exe"
skip_cleanup: true
on:
tags: true
go: 1.15.x
overwrite: true

+ 16
- 6
Dockerfile View File

@@ -1,14 +1,24 @@
FROM golang:1.11-alpine as build
LABEL maintainer="Remco Verhoef <remco@dutchcoders.io>"
# Default to Go 1.13
ARG GO_VERSION=1.13
FROM golang:${GO_VERSION}-alpine as build

# Necessary to run 'go get' and to compile the linked binary
RUN apk add git musl-dev

# Copy the local package files to the container's workspace.
ADD . /go/src/github.com/dutchcoders/transfer.sh

WORKDIR /go/src/github.com/dutchcoders/transfer.sh

ENV GO111MODULE=on

# build & install server
RUN go build -o /go/bin/transfersh github.com/dutchcoders/transfer.sh
RUN go get -u ./... && CGO_ENABLED=0 go build -ldflags -a -tags netgo -ldflags '-w -extldflags "-static"' -o /go/bin/transfersh github.com/dutchcoders/transfer.sh

FROM scratch AS final
LABEL maintainer="Andrea Spacca <andrea.spacca@gmail.com>"

FROM golang:1.11-alpine
COPY --from=build /go/bin/transfersh /go/bin/transfersh
COPY --from=build /go/bin/transfersh /go/bin/transfersh
COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt

ENTRYPOINT ["/go/bin/transfersh", "--listener", ":8080"]



+ 129
- 83
README.md View File

@@ -1,9 +1,16 @@
# transfer.sh [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/dutchcoders/transfer.sh?utm_source=badge&utm_medium=badge&utm_campaign=&utm_campaign=pr-badge&utm_content=badge) [![Go Report Card](https://goreportcard.com/badge/github.com/dutchcoders/transfer.sh)](https://goreportcard.com/report/github.com/dutchcoders/transfer.sh) [![Docker pulls](https://img.shields.io/docker/pulls/dutchcoders/transfer.sh.svg)](https://hub.docker.com/r/dutchcoders/transfer.sh/) [![Build Status](https://travis-ci.org/dutchcoders/transfer.sh.svg?branch=master)](https://travis-ci.org/dutchcoders/transfer.sh)
# transfer.sh [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/dutchcoders/transfer.sh?utm_source=badge&utm_medium=badge&utm_campaign=&utm_campaign=pr-badge&utm_content=badge) [![Go Report Card](https://goreportcard.com/badge/github.com/dutchcoders/transfer.sh)](https://goreportcard.com/report/github.com/dutchcoders/transfer.sh) [![Docker pulls](https://img.shields.io/docker/pulls/dutchcoders/transfer.sh.svg)](https://hub.docker.com/r/dutchcoders/transfer.sh/) [![Build Status](https://travis-ci.com/dutchcoders/transfer.sh.svg?branch=master)](https://travis-ci.com/dutchcoders/transfer.sh)

Easy and fast file sharing from the command-line. This code contains the server with everything you need to create your own instance.

Transfer.sh currently supports the s3 (Amazon S3), gdrive (Google Drive) providers, and local file system (local).

## Disclaimer
This project repository has no relation with the service at https://transfer.sh that's managed by https://storj.io.
So far we cannot address any issue related to the service at https://transfer.sh.

The service at https://transfersh.com is of unknown origin and reported as cloud malware.


## Usage

### Upload:
@@ -26,74 +33,36 @@ $ curl https://transfer.sh/1lDau/test.txt|gpg -o- > /tmp/hello.txt
$ curl -X PUT --upload-file nhgbhhj https://transfer.sh/test.txt/virustotal
```

## Add alias to .bashrc or .zshrc

### Using curl
### Deleting
```bash
transfer() {
curl --progress-bar --upload-file "$1" https://transfer.sh/$(basename $1) | tee /dev/null;
}

alias transfer=transfer
$ curl -X DELETE <X-Url-Delete Response Header URL>
```

### Using wget
```bash
transfer() {
wget -t 1 -qO - --method=PUT --body-file="$1" --header="Content-Type: $(file -b --mime-type $1)" https://transfer.sh/$(basename $1);
}
## Request Headers

alias transfer=transfer
### Max-Downloads
```bash
$ curl --upload-file ./hello.txt https://transfer.sh/hello.txt -H "Max-Downloads: 1" # Limit the number of downloads
```

## Add alias for fish-shell

### Using curl
```fish
function transfer --description 'Upload a file to transfer.sh'
if [ $argv[1] ]
# write to output to tmpfile because of progress bar
set -l tmpfile ( mktemp -t transferXXX )
curl --progress-bar --upload-file "$argv[1]" https://transfer.sh/(basename $argv[1]) >> $tmpfile
cat $tmpfile
command rm -f $tmpfile
else
echo 'usage: transfer FILE_TO_TRANSFER'
end
end

funcsave transfer
### Max-Days
```bash
$ curl --upload-file ./hello.txt https://transfer.sh/hello.txt -H "Max-Days: 1" # Set the number of days before deletion
```

### Using wget
```fish
function transfer --description 'Upload a file to transfer.sh'
if [ $argv[1] ]
wget -t 1 -qO - --method=PUT --body-file="$argv[1]" --header="Content-Type: (file -b --mime-type $argv[1])" https://transfer.sh/(basename $argv[1])
else
echo 'usage: transfer FILE_TO_TRANSFER'
end
end

funcsave transfer
```
## Response Headers

### X-Url-Delete

Now run it like this:
The URL used to request the deletion of a file. Returned as a response header.
```bash
$ transfer test.txt
curl -sD - --upload-file ./hello https://transfer.sh/hello.txt | grep 'X-Url-Delete'
X-Url-Delete: https://transfer.sh/hello.txt/BAYh0/hello.txt/PDw0NHPcqU
```

## Add alias on Windows
## Examples

Put a file called `transfer.cmd` somewhere in your PATH with this inside it:
```cmd
@echo off
setlocal
:: use env vars to pass names to PS, to avoid escaping issues
set FN=%~nx1
set FULL=%1
powershell -noprofile -command "$(Invoke-Webrequest -Method put -Infile $Env:FULL https://transfer.sh/$Env:FN).Content"
```
See good usage examples on [examples.md](examples.md)

## Link aliases

@@ -109,38 +78,53 @@ https://transfer.sh/1lDau/test.txt --> https://transfer.sh/inline/1lDau/test.txt

Parameter | Description | Value | Env
--- | --- | --- | ---
listener | port to use for http (:80) | |
profile-listener | port to use for profiler (:6060)| |
force-https | redirect to https | false |
tls-listener | port to use for https (:443) | |
tls-listener-only | flag to enable tls listener only | |
tls-cert-file | path to tls certificate | |
tls-private-key | path to tls private key | |
http-auth-user | user for basic http auth on upload | |
http-auth-pass | pass for basic http auth on upload | |
temp-path | path to temp folder | system temp |
web-path | path to static web files (for development) | |
ga-key | google analytics key for the front end | |
uservoice-key | user voice key for the front end | |
provider | which storage provider to use | (s3, grdrive or local) |
aws-access-key | aws access key | | AWS_ACCESS_KEY
aws-secret-key | aws access key | | AWS_SECRET_KEY
bucket | aws bucket | | BUCKET
s3-no-multipart | disables s3 multipart upload | false | |
basedir | path storage for local/gdrive provider| |
gdrive-client-json-filepath | path to oauth client json config for gdrive provider| |
gdrive-local-config-path | path to store local transfer.sh config cache for gdrive provider| |
gdrive-chunk-size | chunk size for gdrive upload in megabytes, must be lower than available memory (8 MB) | |
lets-encrypt-hosts | hosts to use for lets encrypt certificates (comma seperated) | |
log | path to log file| |
listener | port to use for http (:80) | | LISTENER |
profile-listener | port to use for profiler (:6060) | | PROFILE_LISTENER |
force-https | redirect to https | false | FORCE_HTTPS
tls-listener | port to use for https (:443) | | TLS_LISTENER |
tls-listener-only | flag to enable tls listener only | | TLS_LISTENER_ONLY |
tls-cert-file | path to tls certificate | | TLS_CERT_FILE |
tls-private-key | path to tls private key | | TLS_PRIVATE_KEY |
http-auth-user | user for basic http auth on upload | | HTTP_AUTH_USER |
http-auth-pass | pass for basic http auth on upload | | HTTP_AUTH_PASS |
ip-whitelist | comma separated list of ips allowed to connect to the service | | IP_WHITELIST |
ip-blacklist | comma separated list of ips not allowed to connect to the service | | IP_BLACKLIST |
temp-path | path to temp folder | system temp | TEMP_PATH |
web-path | path to static web files (for development or custom front end) | | WEB_PATH |
proxy-path | path prefix when service is run behind a proxy | | PROXY_PATH |
proxy-port | port of the proxy when the service is run behind a proxy | | PROXY_PORT |
ga-key | google analytics key for the front end | | GA_KEY |
provider | which storage provider to use | (s3, storj, gdrive or local) |
uservoice-key | user voice key for the front end | | USERVOICE_KEY |
aws-access-key | aws access key | | AWS_ACCESS_KEY |
aws-secret-key | aws access key | | AWS_SECRET_KEY |
bucket | aws bucket | | BUCKET |
s3-endpoint | Custom S3 endpoint. | | S3_ENDPOINT |
s3-region | region of the s3 bucket | eu-west-1 | S3_REGION |
s3-no-multipart | disables s3 multipart upload | false | S3_NO_MULTIPART |
s3-path-style | Forces path style URLs, required for Minio. | false | S3_PATH_STYLE |
storj-access | Access for the project | | STORJ_ACCESS |
storj-bucket | Bucket to use within the project | | STORJ_BUCKET |
basedir | path storage for local/gdrive provider | | BASEDIR |
gdrive-client-json-filepath | path to oauth client json config for gdrive provider | | GDRIVE_CLIENT_JSON_FILEPATH |
gdrive-local-config-path | path to store local transfer.sh config cache for gdrive provider| | GDRIVE_LOCAL_CONFIG_PATH |
gdrive-chunk-size | chunk size for gdrive upload in megabytes, must be lower than available memory (8 MB) | | GDRIVE_CHUNK_SIZE |
lets-encrypt-hosts | hosts to use for lets encrypt certificates (comma seperated) | | HOSTS |
log | path to log file| | LOG |
cors-domains | comma separated list of domains for CORS, setting it enable CORS | | CORS_DOMAINS |
clamav-host | host for clamav feature | | CLAMAV_HOST |
rate-limit | request per minute | | RATE_LIMIT |
max-upload-size | max upload size in kilobytes | | MAX_UPLOAD_SIZE |
purge-days | number of days after the uploads are purged automatically | | PURGE_DAYS |
purge-interval | interval in hours to run the automatic purge for (not applicable to S3 and Storj) | | PURGE_INTERVAL |

If you want to use TLS using lets encrypt certificates, set lets-encrypt-hosts to your domain, set tls-listener to :443 and enable force-https.

If you want to use TLS using your own certificates, set tls-listener to :443, force-https, tls-cert=file and tls-private-key.
If you want to use TLS using your own certificates, set tls-listener to :443, force-https, tls-cert-file and tls-private-key.

## Development

Make sure your GOPATH is set correctly.
Switched to GO111MODULE

```bash
go run main.go --provider=local --listener :8080 --temp-path=/tmp/ --basedir=/tmp/
@@ -149,7 +133,9 @@ go run main.go --provider=local --listener :8080 --temp-path=/tmp/ --basedir=/tm
## Build

```bash
go build -o transfersh main.go
$ git clone git@github.com:dutchcoders/transfer.sh.git
$ cd transfer.sh
$ go build -o transfersh main.go
```

## Docker
@@ -160,6 +146,66 @@ For easy deployment, we've created a Docker container.
docker run --publish 8080:8080 dutchcoders/transfer.sh:latest --provider local --basedir /tmp/
```

## S3 Usage

For the usage with a AWS S3 Bucket, you just need to specify the following options:
- provider
- aws-access-key
- aws-secret-key
- bucket
- s3-region

If you specify the s3-region, you don't need to set the endpoint URL since the correct endpoint will used automatically.

### Custom S3 providers

To use a custom non-AWS S3 provider, you need to specify the endpoint as defined from your cloud provider.

## Storj Network Provider

To use the Storj Network as storage provider you need to specify the following flags:
- provider `--provider storj`
- storj-access _(either via flag or environment variable STORJ_ACCESS)_
- storj-bucket _(either via flag or environment variable STORJ_BUCKET)_

### Creating Bucket and Scope

In preparation you need to create a scope (or copy it from the uplink configuration) and a bucket.

To get started, download the latest uplink from the release page: https://github.com/storj/storj/releases

After extracting, execute `uplink setup`. The Wizard asks for Satellite to use, the API Key
(which you can retrieve via the Satellite UI), as well as an Encryption Key.
Once the uplink is setup create the bucket using the following schema:
`uplink mb sj://<BUCKET>` where <BUCKET> is your desired name.

Afterwards you can copy the SCOPE out of the configuration file of the uplink and then start the startup of the
transfer.sh endpoint. For enhanced security its recommended to provide both the scope and the bucket name as ENV Variables.

Example:
```
export STORJ_BUCKET=transfersh
export STORJ_ACCESS=<SCOPE>
transfer.sh --provider storj
```

## Google Drive Usage

For the usage with Google drive, you need to specify the following options:
- provider
- gdrive-client-json-filepath
- gdrive-local-config-path
- basedir

### Creating Gdrive Client Json

You need to create a Oauth Client id from console.cloud.google.com
download the file and place into a safe directory

### Usage example

```go run main.go --provider gdrive --basedir /tmp/ --gdrive-client-json-filepath /[credential_dir] --gdrive-local-config-path [directory_to_save_config] ```

## Contributions

Contributions are welcome.


+ 205
- 63
cmd/cmd.go View File

@@ -8,11 +8,11 @@ import (

"github.com/dutchcoders/transfer.sh/server"
"github.com/fatih/color"
"github.com/minio/cli"
"github.com/urfave/cli"
"google.golang.org/api/googleapi"
)

var Version = "0.1"
var Version = "1.2.1"
var helpTemplate = `NAME:
{{.Name}} - {{.Usage}}

@@ -34,69 +34,99 @@ VERSION:

var globalFlags = []cli.Flag{
cli.StringFlag{
Name: "listener",
Usage: "127.0.0.1:8080",
Value: "127.0.0.1:8080",
Name: "listener",
Usage: "127.0.0.1:8080",
Value: "127.0.0.1:8080",
EnvVar: "LISTENER",
},
// redirect to https?
// hostnames
cli.StringFlag{
Name: "profile-listener",
Usage: "127.0.0.1:6060",
Value: "",
Name: "profile-listener",
Usage: "127.0.0.1:6060",
Value: "",
EnvVar: "PROFILE_LISTENER",
},
cli.BoolFlag{
Name: "force-https",
Usage: "",
Name: "force-https",
Usage: "",
EnvVar: "FORCE_HTTPS",
},
cli.StringFlag{
Name: "tls-listener",
Usage: "127.0.0.1:8443",
Value: "",
Name: "tls-listener",
Usage: "127.0.0.1:8443",
Value: "",
EnvVar: "TLS_LISTENER",
},
cli.BoolFlag{
Name: "tls-listener-only",
Usage: "",
Name: "tls-listener-only",
Usage: "",
EnvVar: "TLS_LISTENER_ONLY",
},
cli.StringFlag{
Name: "tls-cert-file",
Value: "",
Name: "tls-cert-file",
Value: "",
EnvVar: "TLS_CERT_FILE",
},
cli.StringFlag{
Name: "tls-private-key",
Value: "",
Name: "tls-private-key",
Value: "",
EnvVar: "TLS_PRIVATE_KEY",
},
cli.StringFlag{
Name: "temp-path",
Usage: "path to temp files",
Value: os.TempDir(),
Name: "temp-path",
Usage: "path to temp files",
Value: os.TempDir(),
EnvVar: "TEMP_PATH",
},
cli.StringFlag{
Name: "web-path",
Usage: "path to static web files",
Value: "",
Name: "web-path",
Usage: "path to static web files",
Value: "",
EnvVar: "WEB_PATH",
},
cli.StringFlag{
Name: "ga-key",
Usage: "key for google analytics (front end)",
Value: "",
Name: "proxy-path",
Usage: "path prefix when service is run behind a proxy",
Value: "",
EnvVar: "PROXY_PATH",
},
cli.StringFlag{
Name: "uservoice-key",
Usage: "key for user voice (front end)",
Value: "",
Name: "proxy-port",
Usage: "port of the proxy when the service is run behind a proxy",
Value: "",
EnvVar: "PROXY_PORT",
},
cli.StringFlag{
Name: "provider",
Usage: "s3|gdrive|local",
Value: "",
Name: "ga-key",
Usage: "key for google analytics (front end)",
Value: "",
EnvVar: "GA_KEY",
},
cli.StringFlag{
Name: "uservoice-key",
Usage: "key for user voice (front end)",
Value: "",
EnvVar: "USERVOICE_KEY",
},
cli.StringFlag{
Name: "provider",
Usage: "s3|gdrive|local",
Value: "",
EnvVar: "PROVIDER",
},
cli.StringFlag{
Name: "s3-endpoint",
Usage: "",
Value: "http://s3-eu-west-1.amazonaws.com",
Value: "",
EnvVar: "S3_ENDPOINT",
},
cli.StringFlag{
Name: "s3-region",
Usage: "",
Value: "eu-west-1",
EnvVar: "S3_REGION",
},
cli.StringFlag{
Name: "aws-access-key",
Usage: "",
@@ -116,29 +146,68 @@ var globalFlags = []cli.Flag{
EnvVar: "BUCKET",
},
cli.BoolFlag{
Name: "s3-no-multipart",
Usage: "Disables S3 Multipart Puts",
Name: "s3-no-multipart",
Usage: "Disables S3 Multipart Puts",
EnvVar: "S3_NO_MULTIPART",
},
cli.BoolFlag{
Name: "s3-path-style",
Usage: "Forces path style URLs, required for Minio.",
EnvVar: "S3_PATH_STYLE",
},
cli.StringFlag{
Name: "gdrive-client-json-filepath",
Usage: "",
Value: "",
Name: "gdrive-client-json-filepath",
Usage: "",
Value: "",
EnvVar: "GDRIVE_CLIENT_JSON_FILEPATH",
},
cli.StringFlag{
Name: "gdrive-local-config-path",
Usage: "",
Value: "",
Name: "gdrive-local-config-path",
Usage: "",
Value: "",
EnvVar: "GDRIVE_LOCAL_CONFIG_PATH",
},
cli.IntFlag{
Name: "gdrive-chunk-size",
Usage: "",
Value: googleapi.DefaultUploadChunkSize / 1024 / 1024,
Name: "gdrive-chunk-size",
Usage: "",
Value: googleapi.DefaultUploadChunkSize / 1024 / 1024,
EnvVar: "GDRIVE_CHUNK_SIZE",
},
cli.StringFlag{
Name: "storj-access",
Usage: "Access for the project",
Value: "",
EnvVar: "STORJ_ACCESS",
},
cli.StringFlag{
Name: "storj-bucket",
Usage: "Bucket to use within the project",
Value: "",
EnvVar: "STORJ_BUCKET",
},
cli.IntFlag{
Name: "rate-limit",
Usage: "requests per minute",
Value: 0,
EnvVar: "",
EnvVar: "RATE_LIMIT",
},
cli.IntFlag{
Name: "purge-days",
Usage: "number of days after uploads are purged automatically",
Value: 0,
EnvVar: "PURGE_DAYS",
},
cli.IntFlag{
Name: "purge-interval",
Usage: "interval in hours to run the automatic purge for",
Value: 0,
EnvVar: "PURGE_INTERVAL",
},
cli.Int64Flag{
Name: "max-upload-size",
Usage: "max limit for upload, in kilobytes",
Value: 0,
EnvVar: "MAX_UPLOAD_SIZE",
},
cli.StringFlag{
Name: "lets-encrypt-hosts",
@@ -147,14 +216,16 @@ var globalFlags = []cli.Flag{
EnvVar: "HOSTS",
},
cli.StringFlag{
Name: "log",
Usage: "/var/log/transfersh.log",
Value: "",
Name: "log",
Usage: "/var/log/transfersh.log",
Value: "",
EnvVar: "LOG",
},
cli.StringFlag{
Name: "basedir",
Usage: "path to storage",
Value: "",
Name: "basedir",
Usage: "path to storage",
Value: "",
EnvVar: "BASEDIR",
},
cli.StringFlag{
Name: "clamav-host",
@@ -169,18 +240,39 @@ var globalFlags = []cli.Flag{
EnvVar: "VIRUSTOTAL_KEY",
},
cli.BoolFlag{
Name: "profiler",
Usage: "enable profiling",
Name: "profiler",
Usage: "enable profiling",
EnvVar: "PROFILER",
},
cli.StringFlag{
Name: "http-auth-user",
Usage: "user for http basic auth",
Value: "",
Name: "http-auth-user",
Usage: "user for http basic auth",
Value: "",
EnvVar: "HTTP_AUTH_USER",
},
cli.StringFlag{
Name: "http-auth-pass",
Usage: "pass for http basic auth",
Value: "",
Name: "http-auth-pass",
Usage: "pass for http basic auth",
Value: "",
EnvVar: "HTTP_AUTH_PASS",
},
cli.StringFlag{
Name: "ip-whitelist",
Usage: "comma separated list of ips allowed to connect to the service",
Value: "",
EnvVar: "IP_WHITELIST",
},
cli.StringFlag{
Name: "ip-blacklist",
Usage: "comma separated list of ips not allowed to connect to the service",
Value: "",
EnvVar: "IP_BLACKLIST",
},
cli.StringFlag{
Name: "cors-domains",
Usage: "comma separated list of domains allowed for CORS requests",
Value: "",
EnvVar: "CORS_DOMAINS",
},
}

@@ -189,7 +281,7 @@ type Cmd struct {
}

func VersionAction(c *cli.Context) {
fmt.Println(color.YellowString(fmt.Sprintf("transfer.sh: Easy file sharing from the command line")))
fmt.Println(color.YellowString(fmt.Sprintf("transfer.sh %s: Easy file sharing from the command line", Version)))
}

func New() *Cmd {
@@ -200,6 +292,7 @@ func New() *Cmd {
app.Author = ""
app.Usage = "transfer.sh"
app.Description = `Easy file sharing from the command line`
app.Version = Version
app.Flags = globalFlags
app.CustomAppHelpTemplate = helpTemplate
app.Commands = []cli.Command{
@@ -219,6 +312,10 @@ func New() *Cmd {
options = append(options, server.Listener(v))
}

if v := c.String("cors-domains"); v != "" {
options = append(options, server.CorsDomains(v))
}

if v := c.String("tls-listener"); v == "" {
} else if c.Bool("tls-listener-only") {
options = append(options, server.TLSListener(v, true))
@@ -234,6 +331,14 @@ func New() *Cmd {
options = append(options, server.WebPath(v))
}

if v := c.String("proxy-path"); v != "" {
options = append(options, server.ProxyPath(v))
}

if v := c.String("proxy-port"); v != "" {
options = append(options, server.ProxyPort(v))
}

if v := c.String("ga-key"); v != "" {
options = append(options, server.GoogleAnalytics(v))
}
@@ -264,10 +369,20 @@ func New() *Cmd {
options = append(options, server.ClamavHost(v))
}

if v := c.Int64("max-upload-size"); v > 0 {
options = append(options, server.MaxUploadSize(v))
}

if v := c.Int("rate-limit"); v > 0 {
options = append(options, server.RateLimit(v))
}

purgeDays := c.Int("purge-days")
purgeInterval := c.Int("purge-interval")
if purgeDays > 0 && purgeInterval > 0 {
options = append(options, server.Purge(purgeDays, purgeInterval))
}

if cert := c.String("tls-cert-file"); cert == "" {
} else if pk := c.String("tls-private-key"); pk == "" {
} else {
@@ -288,6 +403,23 @@ func New() *Cmd {
options = append(options, server.HttpAuthCredentials(httpAuthUser, httpAuthPass))
}

applyIPFilter := false
ipFilterOptions := server.IPFilterOptions{}
if ipWhitelist := c.String("ip-whitelist"); ipWhitelist != "" {
applyIPFilter = true
ipFilterOptions.AllowedIPs = strings.Split(ipWhitelist, ",")
ipFilterOptions.BlockByDefault = true
}

if ipBlacklist := c.String("ip-blacklist"); ipBlacklist != "" {
applyIPFilter = true
ipFilterOptions.BlockedIPs = strings.Split(ipBlacklist, ",")
}

if applyIPFilter {
options = append(options, server.FilterOptions(ipFilterOptions))
}

switch provider := c.String("provider"); provider {
case "s3":
if accessKey := c.String("aws-access-key"); accessKey == "" {
@@ -296,7 +428,7 @@ func New() *Cmd {
panic("secret-key not set.")
} else if bucket := c.String("bucket"); bucket == "" {
panic("bucket not set.")
} else if storage, err := server.NewS3Storage(accessKey, secretKey, bucket, c.String("s3-endpoint"), logger, c.Bool("s3-no-multipart")); err != nil {
} else if storage, err := server.NewS3Storage(accessKey, secretKey, bucket, purgeDays, c.String("s3-region"), c.String("s3-endpoint"), c.Bool("s3-no-multipart"), c.Bool("s3-path-style"), logger); err != nil {
panic(err)
} else {
options = append(options, server.UseStorage(storage))
@@ -315,6 +447,16 @@ func New() *Cmd {
} else {
options = append(options, server.UseStorage(storage))
}
case "storj":
if access := c.String("storj-access"); access == "" {
panic("storj-access not set.")
} else if bucket := c.String("storj-bucket"); bucket == "" {
panic("storj-bucket not set.")
} else if storage, err := server.NewStorjStorage(access, bucket, purgeDays, logger); err != nil {
panic(err)
} else {
options = append(options, server.UseStorage(storage))
}
case "local":
if v := c.String("basedir"); v == "" {
panic("basedir not set.")


+ 176
- 0
examples.md View File

@@ -0,0 +1,176 @@
# Table of Contents

* [Aliases](#aliases)
* [Uploading and downloading](#uploading-and-downloading)
* [Archiving and backups](#archiving-and-backups)
* [Encrypting and decrypting](#encrypting-and-decrypting)
* [Scanning for viruses](#scanning-for-viruses)

## Aliases
<a name="aliases"/>

## Add alias to .bashrc or .zshrc

### Using curl
```bash
transfer() {
curl --progress-bar --upload-file "$1" https://transfer.sh/$(basename "$1") | tee /dev/null;
echo
}

alias transfer=transfer
```

### Using wget
```bash
transfer() {
wget -t 1 -qO - --method=PUT --body-file="$1" --header="Content-Type: $(file -b --mime-type "$1")" https://transfer.sh/$(basename "$1");
echo
}

alias transfer=transfer
```

## Add alias for fish-shell

### Using curl
```fish
function transfer --description 'Upload a file to transfer.sh'
if [ $argv[1] ]
# write to output to tmpfile because of progress bar
set -l tmpfile ( mktemp -t transferXXXXXX )
curl --progress-bar --upload-file "$argv[1]" https://transfer.sh/(basename $argv[1]) >> $tmpfile
cat $tmpfile
command rm -f $tmpfile
else
echo 'usage: transfer FILE_TO_TRANSFER'
end
end

funcsave transfer
```

### Using wget
```fish
function transfer --description 'Upload a file to transfer.sh'
if [ $argv[1] ]
wget -t 1 -qO - --method=PUT --body-file="$argv[1]" --header="Content-Type: (file -b --mime-type $argv[1])" https://transfer.sh/(basename $argv[1])
else
echo 'usage: transfer FILE_TO_TRANSFER'
end
end

funcsave transfer
```

Now run it like this:
```bash
$ transfer test.txt
```

## Add alias on Windows

Put a file called `transfer.cmd` somewhere in your PATH with this inside it:
```cmd
@echo off
setlocal
:: use env vars to pass names to PS, to avoid escaping issues
set FN=%~nx1
set FULL=%1
powershell -noprofile -command "$(Invoke-Webrequest -Method put -Infile $Env:FULL https://transfer.sh/$Env:FN).Content"
```

## Uploading and Downloading
<a name="uploading-and-downloading"/>

### Uploading with wget
```bash
$ wget --method PUT --body-file=/tmp/file.tar https://transfer.sh/file.tar -O - -nv
```

### Uploading with PowerShell
```posh
PS H:\> invoke-webrequest -method put -infile .\file.txt https://transfer.sh/file.txt
```

### Upload using HTTPie
```bash
$ http https://transfer.sh/ -vv < /tmp/test.log
```

### Uploading a filtered text file
```bash
$ grep 'pound' /var/log/syslog | curl --upload-file - https://transfer.sh/pound.log
```

### Downloading with curl
```bash
$ curl https://transfer.sh/1lDau/test.txt -o test.txt
```

### Downloading with wget
```bash
$ wget https://transfer.sh/1lDau/test.txt
```

## Archiving and backups
<a name="archiving-and-backups"/>

### Backup, encrypt and transfer a MySQL dump
```bash
$ mysqldump --all-databases | gzip | gpg -ac -o- | curl -X PUT --upload-file "-" https://transfer.sh/test.txt
```

### Archive and upload directory
```bash
$ tar -czf - /var/log/journal | curl --upload-file - https://transfer.sh/journal.tar.gz
```

### Uploading multiple files at once
```bash
$ curl -i -F filedata=@/tmp/hello.txt -F filedata=@/tmp/hello2.txt https://transfer.sh/
```

### Combining downloads as zip or tar.gz archive
```bash
$ curl https://transfer.sh/(15HKz/hello.txt,15HKz/hello.txt).tar.gz
$ curl https://transfer.sh/(15HKz/hello.txt,15HKz/hello.txt).zip
```

### Transfer and send email with link (using an alias)
```bash
$ transfer /tmp/hello.txt | mail -s "Hello World" user@yourmaildomain.com
```
## Encrypting and decrypting
<a name="encrypting-and-decrypting"/>

### Encrypting files with password using gpg
```bash
$ cat /tmp/hello.txt | gpg -ac -o- | curl -X PUT --upload-file "-" https://transfer.sh/test.txt
```

### Downloading and decrypting
```bash
$ curl https://transfer.sh/1lDau/test.txt | gpg -o- > /tmp/hello.txt
```

### Import keys from [keybase](https://keybase.io/)
```bash
$ keybase track [them] # Encrypt for recipient(s)
$ cat somebackupfile.tar.gz | keybase encrypt [them] | curl --upload-file '-' https://transfer.sh/test.txt # Decrypt
$ curl https://transfer.sh/sqUFi/test.md | keybase decrypt
```

## Scanning for viruses
<a name="scanning-for-viruses"/>

### Scan for malware or viruses using Clamav
```bash
$ wget http://www.eicar.org/download/eicar.com
$ curl -X PUT --upload-file ./eicar.com https://transfer.sh/eicar.com/scan
```

### Upload malware to VirusTotal, get a permalink in return
```bash
$ curl -X PUT --upload-file nhgbhhj https://transfer.sh/test.txt/virustotal
```

+ 35
- 0
go.mod View File

@@ -0,0 +1,35 @@
module git.kiska.pw/fionera/transfer.sh

go 1.15

replace github.com/dutchcoders/transfer.sh => ./

replace github.com/dutchcoders/transfer.sh-web => git.kiska.pw/kiska/transfer.sh-web v0.0.0-20210208004239-696c724b9327

require (
github.com/PuerkitoBio/ghost v0.0.0-20160324114900-206e6e460e14
github.com/VojtechVitek/ratelimit v0.0.0-20160722140851-dc172bc0f6d2
github.com/aws/aws-sdk-go v1.29.24
github.com/dutchcoders/go-clamd v0.0.0-20170520113014-b970184f4d9e
github.com/dutchcoders/go-virustotal v0.0.0-20140923143438-24cc8e6fa329
github.com/dutchcoders/transfer.sh v1.2.1
github.com/dutchcoders/transfer.sh-web v0.0.0-20210130180835-bc7d8b891391
github.com/elazarl/go-bindata-assetfs v1.0.0
github.com/fatih/color v1.9.0
github.com/golang/gddo v0.0.0-20200310004957-95ce5a452273
github.com/gorilla/handlers v1.4.2
github.com/gorilla/mux v1.7.4
github.com/klauspost/compress v1.11.7
github.com/microcosm-cc/bluemonday v1.0.2
github.com/russross/blackfriday/v2 v2.0.1
github.com/skip2/go-qrcode v0.0.0-20191027152451-9434209cb086
github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce
github.com/urfave/cli v1.22.3
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9
golang.org/x/net v0.0.0-20200301022130-244492dfa37a
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d
google.golang.org/api v0.20.0
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15
storj.io/common v0.0.0-20201207172416-78f4e59925c3
storj.io/uplink v1.4.2
)

+ 280
- 0
go.sum View File

@@ -0,0 +1,280 @@
cloud.google.com/go v0.16.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0 h1:ROfEUZz+Gh5pa62DJWXSaonyu3StP6EA6lPEXPI6mCo=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
git.kiska.pw/kiska/transfer.sh-web v0.0.0-20210208004239-696c724b9327 h1:i/m42TzRsnhdPkI0n2ny/dyXi2fQfY4j4f5GAx3y+KI=
git.kiska.pw/kiska/transfer.sh-web v0.0.0-20210208004239-696c724b9327/go.mod h1:jTzXZabwihvQgvmySgD4f4GNszimkXK3o8x1ucH1z5Q=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/PuerkitoBio/ghost v0.0.0-20160324114900-206e6e460e14 h1:3zOOc7WdrATDXof+h/rBgMsg0sAmZIEVHft1UbWHh94=
github.com/PuerkitoBio/ghost v0.0.0-20160324114900-206e6e460e14/go.mod h1:+VFiaivV54Sa94ijzA/ZHQLoHuoUIS9hIqCK6f/76Zw=
github.com/VojtechVitek/ratelimit v0.0.0-20160722140851-dc172bc0f6d2 h1:sIvihcW4qpN5qGSjmrsDDAbLpEq5tuHjJJfWY0Hud5Y=
github.com/VojtechVitek/ratelimit v0.0.0-20160722140851-dc172bc0f6d2/go.mod h1:3YwJE8rEisS9eraee0hygGG4G3gqX8H8Nyu+nPTUnGU=
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
github.com/aws/aws-sdk-go v1.29.24 h1:KOnds/LwADMDBaALL4UB98ZR+TUR1A1mYmAYbdLixLA=
github.com/aws/aws-sdk-go v1.29.24/go.mod h1:1KvfttTE3SPKMpo8g2c6jL3ZKfXtFvKscTgahTma5Xg=
github.com/bradfitz/gomemcache v0.0.0-20170208213004-1952afaa557d/go.mod h1:PmM6Mmwb0LSuEubjR8N7PtNe1KxZLtOUHtbeikc5h60=
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
github.com/btcsuite/btcutil v1.0.3-0.20201124182144-4031bdc69ded h1:WcPFZzCIqGt/TdFJHsOiX5dIlB/MUzrftltMhpjzfA8=
github.com/btcsuite/btcutil v1.0.3-0.20201124182144-4031bdc69ded/go.mod h1:0DVlHczLPewLcPGEIeUEzfOJhqGPQ0mJJRDBtD307+o=
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg=
github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY=
github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
github.com/calebcase/tmpfile v1.0.2-0.20200602150926-3af473ef8439 h1:fqGdSbbWVbQqNCQXd/dyZ7Bl+u8R2X7QCiNzwgyUa/M=
github.com/calebcase/tmpfile v1.0.2-0.20200602150926-3af473ef8439/go.mod h1:iErLeG/iqJr8LaQ/gYRv4GXdqssi3jg4iSzvrA06/lw=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d h1:U+s90UTSYgptZMwQh2aRr3LuazLJIa+Pg3Kc1ylSYVY=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dutchcoders/go-clamd v0.0.0-20170520113014-b970184f4d9e h1:rcHHSQqzCgvlwP0I/fQ8rQMn/MpHE5gWSLdtpxtP6KQ=
github.com/dutchcoders/go-clamd v0.0.0-20170520113014-b970184f4d9e/go.mod h1:Byz7q8MSzSPkouskHJhX0er2mZY/m0Vj5bMeMCkkyY4=
github.com/dutchcoders/go-virustotal v0.0.0-20140923143438-24cc8e6fa329 h1:ERqCkG/uSyT74P1m/j9yR+so+7ynY4fbTvLY/Mr1ZMg=
github.com/dutchcoders/go-virustotal v0.0.0-20140923143438-24cc8e6fa329/go.mod h1:G5qOfE5bQZ5scycLpB7fYWgN4y3xdfXo+pYWM8z2epY=
github.com/elazarl/go-bindata-assetfs v1.0.0 h1:G/bYguwHIzWq9ZoyUQqrjTmJbbYn3j3CKKpKinvZLFk=
github.com/elazarl/go-bindata-assetfs v1.0.0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s=
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
github.com/fsnotify/fsnotify v1.4.3-0.20170329110642-4da3e2cfbabc/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/garyburd/redigo v1.1.1-0.20170914051019-70e1b1943d4f h1:Sk0u0gIncQaQD23zAoAZs2DNi2u2l5UTLi4CmCBL5v8=
github.com/garyburd/redigo v1.1.1-0.20170914051019-70e1b1943d4f/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-stack/stack v1.6.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/golang/gddo v0.0.0-20200310004957-95ce5a452273 h1:fHbEOLXdSMRJrzJwQkKixEg3Uq2RIcNQF0Y7iiQ+gRk=
github.com/golang/gddo v0.0.0-20200310004957-95ce5a452273/go.mod h1:sam69Hju0uq+5uvLJUMDlsKlQ21Vrs1Kd/1YFPNYdOU=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/lint v0.0.0-20170918230701-e5d664eb928e/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.1.1-0.20171103154506-982329095285/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/googleapis/gax-go v2.0.0+incompatible h1:j0GKcs05QVmm7yesiZq2+9cxHkNK9YM6zKx4D2qucQU=
github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/gorilla/handlers v1.4.2 h1:0QniY0USkHQ1RGCLfKxeNHK9bkDHGRYGNDFBCS+YARg=
github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
github.com/gorilla/mux v1.7.4 h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc=
github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ=
github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
github.com/gregjones/httpcache v0.0.0-20170920190843-316c5e0ff04e/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/hcl v0.0.0-20170914154624-68e816d1c783/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/inconshreveable/log15 v0.0.0-20170622235902-74a0988b5f80/go.mod h1:cOaXtrgN4ScfRrD9Bre7U1thNq5RtJ8ZoP4iXVGRj6o=
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
github.com/klauspost/compress v1.11.7 h1:0hzRabrMN4tSTvMfnL3SCv1ZGeAP23ynzodBgaHeMeg=
github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/magiconair/properties v1.7.4-0.20170902060319-8d7837e64d3c/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mattn/go-colorable v0.0.10-0.20170816031813-ad5389df28cd/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA=
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-isatty v0.0.2/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.11 h1:FxPOTFNqGkuDUGi3H/qkUbQO4ZiBa2brKq5r0l8TGeM=
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
github.com/microcosm-cc/bluemonday v1.0.2 h1:5lPfLTTAvAbtS0VqT+94yOtFnGfUWYyx0+iToC3Os3s=
github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc=
github.com/mitchellh/mapstructure v0.0.0-20170523030023-d0303fe80992/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d h1:VhgPp6v9qf9Agr/56bj7Y/xa04UccTW04VP0Qed4vnQ=
github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d/go.mod h1:YUTz3bUH2ZwIWBy3CJBeOBEugqcmXREj14T+iG/4k4U=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/pelletier/go-toml v1.0.1-0.20170904195809-1d6b12b7cb29/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/skip2/go-qrcode v0.0.0-20191027152451-9434209cb086 h1:RYiqpb2ii2Z6J4x0wxK46kvPBbFuZcdhS+CIztmYgZs=
github.com/skip2/go-qrcode v0.0.0-20191027152451-9434209cb086/go.mod h1:PLPIyL7ikehBD1OAjmKKiOEhbvWyHGaNDjquXMcYABo=
github.com/spacemonkeygo/monkit/v3 v3.0.4 h1:Ay+PZirv+qfd4sqcT+X/U3BnC7AcIaqp/IXh0oV36k8=
github.com/spacemonkeygo/monkit/v3 v3.0.4/go.mod h1:JcK1pCbReQsOsMKF/POFSZCq7drXFybgGmbc27tuwes=
github.com/spacemonkeygo/monkit/v3 v3.0.7 h1:LsGdIXl8mccqJrYEh4Uf4sLVGu/g0tjhNqQzdn9MzVk=
github.com/spacemonkeygo/monkit/v3 v3.0.7/go.mod h1:kj1ViJhlyADa7DiA4xVnTuPA46lFKbM7mxQTrXCuJP4=
github.com/spacemonkeygo/monotime v0.0.0-20180824235756-e3f48a95f98a h1:8+cCjxhToanKmxLIbuyBNe2EnpgwhiivsIaRJstDRFA=
github.com/spacemonkeygo/monotime v0.0.0-20180824235756-e3f48a95f98a/go.mod h1:ul4bvvnCOPZgq8w0nTkSmWVg/hauVpFS97Am1YM1XXo=
github.com/spf13/afero v0.0.0-20170901052352-ee1bd8ee15a1/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/cast v1.1.0/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg=
github.com/spf13/jwalterweatherman v0.0.0-20170901151539-12bd96e66386/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v1.0.1-0.20170901120850-7aff26db30c1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/viper v1.0.0/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce h1:fb190+cK2Xz/dvi9Hv8eCYJYvIGUTN2/KLq1pT6CjEc=
github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4=
github.com/urfave/cli v1.22.3 h1:FpNT6zq26xNpHZy08emi755QwzLPs6Pukqjlc7RfOMU=
github.com/urfave/cli v1.22.3/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/vivint/infectious v0.0.0-20200605153912-25a574ae18a3 h1:zMsHhfK9+Wdl1F7sIKLyx3wrOFofpb3rWFbA4HgcK5k=
github.com/vivint/infectious v0.0.0-20200605153912-25a574ae18a3/go.mod h1:R0Gbuw7ElaGSLOZUSwBm/GgVwMd30jWxBDdAyMOeTuc=
github.com/zeebo/admission/v3 v3.0.2/go.mod h1:BP3isIv9qa2A7ugEratNq1dnl2oZRXaQUGdU7WXKtbw=
github.com/zeebo/assert v1.1.0 h1:hU1L1vLTHsnO8x8c9KAR5GmM5QscxHg5RNU5z5qbUWY=
github.com/zeebo/assert v1.1.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0=
github.com/zeebo/errs v1.2.2 h1:5NFypMTuSdoySVTqlNs1dEoU21QVamMQJxW/Fii5O7g=
github.com/zeebo/errs v1.2.2/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4=
github.com/zeebo/float16 v0.1.0 h1:kRqxv5og6z1emEyz5FpW0/BVHe5VfxEAw6b1ljCZlUc=
github.com/zeebo/float16 v0.1.0/go.mod h1:fssGvvXu+XS8MH57cKmyrLB/cqioYeYX/2mXCN3a5wo=
github.com/zeebo/incenc v0.0.0-20180505221441-0d92902eec54 h1:+cwNE5KJ3pika4HuzmDHkDlK5myo0G9Sv+eO7WWxnUQ=
github.com/zeebo/incenc v0.0.0-20180505221441-0d92902eec54/go.mod h1:EI8LcOBDlSL3POyqwC1eJhOYlMBMidES+613EtmmT5w=
go.opencensus.io v0.21.0 h1:mU6zScU4U1YAFPHEHYk+3JC4SY7JxgkqS10ZOSyksNg=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0=
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/oauth2 v0.0.0-20170912212905-13449ad91cb2/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20170517211232-f52d1811a629/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a h1:WXEvlFVvvGxCJLG6REjsT03iWnKLEWinaScsxF2Vm2o=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200610111108-226ff32320da h1:bGb80FudwxpeucJUjPYJXuJ8Hk91vNtfvrymzwiei38=
golang.org/x/sys v0.0.0-20200610111108-226ff32320da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/time v0.0.0-20170424234030-8be79e1e0910/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
google.golang.org/api v0.0.0-20170921000349-586095a6e407/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.20.0 h1:jz2KixHX7EcCPiQrySzPdnYT7DbINAypCqKZ1Z7GM40=
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20170918111702-1e559d0a00ee/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/grpc v1.2.1-0.20170921194603-d4b75ebd4f9f/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
storj.io/common v0.0.0-20201207172416-78f4e59925c3 h1:D+rAQBzjl0Mw3VQ+1Sjv5/53I7JaIymMrkDW5DYBgRE=
storj.io/common v0.0.0-20201207172416-78f4e59925c3/go.mod h1:6sepaQTRLuygvA+GNPzdgRPOB1+wFfjde76KBWofbMY=
storj.io/drpc v0.0.16 h1:9sxypc5lKi/0D69cR21BR0S21+IvXfON8L5nXMVNTwQ=
storj.io/drpc v0.0.16/go.mod h1:zdmQ93nx4Z35u11pQ+GAnBy4DGOK3HJCSOfeh2RryTo=
storj.io/uplink v1.4.2 h1:Kj4ubuSQfoLRC0n1cJXt1Tgns0WF1DRNMs8Luh6+rY8=
storj.io/uplink v1.4.2/go.mod h1:VoxYTP5AzJ+gnzsqptuB5Ra8Old+fVVbwLCmi4jr5y4=

+ 0
- 203
lock.json View File

@@ -1,203 +0,0 @@
{
"memo": "5b27aecb0272e40f3b8b8f9a5deeb7c9f5dbf06c53b1134de2b84eac466d27e0",
"projects": [
{
"name": "github.com/PuerkitoBio/ghost",
"branch": "master",
"revision": "206e6e460e14a42d1d811c970b30248db058e9b2",
"packages": [
".",
"handlers"
]
},
{
"name": "github.com/VojtechVitek/ratelimit",
"branch": "master",
"revision": "dc172bc0f6d241e980010dbc63957ef1a2c8ca33",
"packages": [
".",
"memory"
]
},
{
"name": "github.com/dutchcoders/go-clamd",
"branch": "master",
"revision": "a9a81beaffff0392094052913ec45fa140eb8511",
"packages": [
"."
]
},
{
"name": "github.com/dutchcoders/go-virustotal",
"branch": "master",
"revision": "24cc8e6fa329f020c70a3b32330b5743f1ba7971",
"packages": [
"."
]
},
{
"name": "github.com/dutchcoders/transfer.sh-web",
"branch": "master",
"revision": "cdb97c4b64a33dd1eec7e94b2c200cd9b06296a1",
"packages": [
"."
]
},
{
"name": "github.com/elazarl/go-bindata-assetfs",
"branch": "master",
"revision": "30f82fa23fd844bd5bb1e5f216db87fd77b5eb43",
"packages": [
"."
]
},
{
"name": "github.com/fatih/color",
"version": "v1.4.1",
"revision": "9131ab34cf20d2f6d83fdc67168a5430d1c7dc23",
"packages": [
"."
]
},
{
"name": "github.com/garyburd/redigo",
"version": "v1.0.0",
"revision": "8873b2f1995f59d4bcdd2b0dc9858e2cb9bf0c13",
"packages": [
"internal",
"redis"
]
},
{
"name": "github.com/goamz/goamz",
"branch": "master",
"revision": "c35091c30f44b7f151ec9028b895465a191d1ea7",
"packages": [
"aws",
"s3"
]
},
{
"name": "github.com/golang/gddo",
"branch": "master",
"revision": "72302b972abba39585150723aea3cf343e99437c",
"packages": [
"httputil/header"
]
},
{
"name": "github.com/gorilla/context",
"version": "v1.1",
"revision": "1ea25387ff6f684839d82767c1733ff4d4d15d0a",
"packages": [
"."
]
},
{
"name": "github.com/gorilla/mux",
"version": "v1.3.0",
"revision": "392c28fe23e1c45ddba891b0320b3b5df220beea",
"packages": [
"."
]
},
{
"name": "github.com/gorilla/securecookie",
"version": "v1.1",
"revision": "667fe4e3466a040b780561fe9b51a83a3753eefc",
"packages": [
"."
]
},
{
"name": "github.com/mattn/go-colorable",
"version": "v0.0.7",
"revision": "d228849504861217f796da67fae4f6e347643f15",
"packages": [
"."
]
},
{
"name": "github.com/mattn/go-isatty",
"version": "v0.0.1",
"revision": "3a115632dcd687f9c8cd01679c83a06a0e21c1f3",
"packages": [
"."
]
},
{
"name": "github.com/minio/cli",
"version": "v1.3.0",
"revision": "8683fa7fef37cc8cb092f47bdb6b403e0049f9ee",
"packages": [
"."
]
},
{
"name": "github.com/nu7hatch/gouuid",
"branch": "master",
"revision": "179d4d0c4d8d407a32af483c2354df1d2c91e6c3",
"packages": [
"."
]
},
{
"name": "github.com/russross/blackfriday",
"version": "v1.4",
"revision": "0b647d0506a698cca42caca173e55559b12a69f2",
"packages": [
"."
]
},
{
"name": "github.com/shurcooL/sanitized_anchor_name",
"branch": "master",
"revision": "1dba4b3954bc059efc3991ec364f9f9a35f597d2",
"packages": [
"."
]
},
{
"name": "github.com/vaughan0/go-ini",
"branch": "master",
"revision": "a98ad7ee00ec53921f08832bc06ecf7fd600e6a1",
"packages": [
"."
]
},
{
"name": "golang.org/x/crypto",
"branch": "master",
"revision": "459e26527287adbc2adcc5d0d49abff9a5f315a7",
"packages": [
"acme",
"acme/autocert"
]
},
{
"name": "golang.org/x/net",
"branch": "master",
"revision": "a6577fac2d73be281a500b310739095313165611",
"packages": [
"context",
"context/ctxhttp"
]
},
{
"name": "golang.org/x/sys",
"branch": "master",
"revision": "99f16d856c9836c42d24e7ab64ea72916925fa97",
"packages": [
"unix"
]
},
{
"name": "gopkg.in/check.v1",
"branch": "v1",
"revision": "20d25e2804050c1cd24a7eea1e7a6447dd0e74ec",
"packages": [
"."
]
}
]
}

+ 261
- 91
server/handlers.go View File

@@ -35,6 +35,7 @@ import (
"encoding/json"
"errors"
"fmt"
blackfriday "github.com/russross/blackfriday/v2"
"html"
html_template "html/template"
"io"
@@ -55,14 +56,17 @@ import (

"net"

"encoding/base64"
web "github.com/dutchcoders/transfer.sh-web"
"github.com/gorilla/mux"
"github.com/russross/blackfriday"
"github.com/microcosm-cc/bluemonday"
"github.com/skip2/go-qrcode"

"encoding/base64"
qrcode "github.com/skip2/go-qrcode"
"github.com/klauspost/compress/zstd"
)

const getPathPart = "get"

var (
htmlTemplates = initHTMLTemplates()
textTemplates = initTextTemplates()
@@ -100,7 +104,16 @@ func (s *Server) previewHandler(w http.ResponseWriter, r *http.Request) {
token := vars["token"]
filename := vars["filename"]

contentType, contentLength, err := s.storage.Head(token, filename)
metadata, err := s.CheckMetadata(token, filename, false)

if err != nil {
log.Printf("Error metadata: %s", err.Error())
http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)
return
}

contentType := metadata.ContentType
contentLength, err := s.storage.Head(token, filename)
if err != nil {
http.Error(w, http.StatusText(404), 404)
return
@@ -120,7 +133,7 @@ func (s *Server) previewHandler(w http.ResponseWriter, r *http.Request) {
templatePath = "download.markdown.html"

var reader io.ReadCloser
if reader, _, _, err = s.storage.Get(token, filename); err != nil {
if reader, _, err = s.storage.Get(token, filename); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
@@ -133,8 +146,8 @@ func (s *Server) previewHandler(w http.ResponseWriter, r *http.Request) {
}

if strings.HasPrefix(contentType, "text/x-markdown") || strings.HasPrefix(contentType, "text/markdown") {
escapedData := html.EscapeString(string(data))
output := blackfriday.MarkdownCommon([]byte(escapedData))
unsafe := blackfriday.Run(data)
output := bluemonday.UGCPolicy().SanitizeBytes(unsafe)
content = html_template.HTML(output)
} else if strings.HasPrefix(contentType, "text/plain") {
content = html_template.HTML(fmt.Sprintf("<pre>%s</pre>", html.EscapeString(string(data))))
@@ -146,9 +159,12 @@ func (s *Server) previewHandler(w http.ResponseWriter, r *http.Request) {
templatePath = "download.html"
}

resolvedUrl := resolveUrl(r, getURL(r).ResolveReference(r.URL), true)
relativeURL, _ := url.Parse(path.Join(s.proxyPath, token, filename))
resolvedURL := resolveURL(r, relativeURL, s.proxyPort)
relativeURLGet, _ := url.Parse(path.Join(s.proxyPath, getPathPart, token, filename))
resolvedURLGet := resolveURL(r, relativeURLGet, s.proxyPort)
var png []byte
png, err = qrcode.Encode(resolvedUrl, qrcode.High, 150)
png, err = qrcode.Encode(resolvedURL, qrcode.High, 150)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
@@ -156,14 +172,15 @@ func (s *Server) previewHandler(w http.ResponseWriter, r *http.Request) {

qrCode := base64.StdEncoding.EncodeToString(png)

hostname := getURL(r).Host
webAddress := resolveWebAddress(r)
hostname := getURL(r, s.proxyPort).Host
webAddress := resolveWebAddress(r, s.proxyPath, s.proxyPort)

data := struct {
ContentType string
Content html_template.HTML
Filename string
Url string
UrlGet string
Hostname string
WebAddress string
ContentLength uint64
@@ -174,7 +191,8 @@ func (s *Server) previewHandler(w http.ResponseWriter, r *http.Request) {
contentType,
content,
filename,
resolvedUrl,
resolvedURL,
resolvedURLGet,
hostname,
webAddress,
contentLength,
@@ -196,8 +214,8 @@ func (s *Server) previewHandler(w http.ResponseWriter, r *http.Request) {
func (s *Server) viewHandler(w http.ResponseWriter, r *http.Request) {
// vars := mux.Vars(r)

hostname := getURL(r).Host
webAddress := resolveWebAddress(r)
hostname := getURL(r, s.proxyPort).Host
webAddress := resolveWebAddress(r, s.proxyPath, s.proxyPort)

data := struct {
Hostname string
@@ -295,6 +313,12 @@ func (s *Server) postHandler(w http.ResponseWriter, r *http.Request) {

contentLength := n

if s.maxUploadSize > 0 && contentLength > s.maxUploadSize {
log.Print("Entity too large")
http.Error(w, http.StatusText(http.StatusRequestEntityTooLarge), http.StatusRequestEntityTooLarge)
return
}

metadata := MetadataForRequest(contentType, r)

buffer := &bytes.Buffer{}
@@ -321,9 +345,9 @@ func (s *Server) postHandler(w http.ResponseWriter, r *http.Request) {

}

filename = url.QueryEscape(filename)
relativeURL, _ := url.Parse(path.Join(token, filename))
fmt.Fprintln(w, getURL(r).ResolveReference(relativeURL).String())
filename = url.PathEscape(filename)
relativeURL, _ := url.Parse(path.Join(s.proxyPath, token, filename))
fmt.Fprintln(w, getURL(r, s.proxyPort).ResolveReference(relativeURL).String())

cleanTmpFile(file)
}
@@ -362,9 +386,9 @@ type Metadata struct {
func MetadataForRequest(contentType string, r *http.Request) Metadata {
metadata := Metadata{
ContentType: contentType,
MaxDate: time.Now().Add(time.Hour * 24 * 365 * 10),
MaxDate: time.Time{},
Downloads: 0,
MaxDownloads: 99999999,
MaxDownloads: -1,
DeletionToken: Encode(10000000+int64(rand.Intn(1000000000))) + Encode(10000000+int64(rand.Intn(1000000000))),
}

@@ -439,6 +463,12 @@ func (s *Server) putHandler(w http.ResponseWriter, r *http.Request) {
contentLength = n
}

if s.maxUploadSize > 0 && contentLength > s.maxUploadSize {
log.Print("Entity too large")
http.Error(w, http.StatusText(http.StatusRequestEntityTooLarge), http.StatusRequestEntityTooLarge)
return
}

if contentLength == 0 {
log.Print("Empty content-length")
http.Error(w, errors.New("Could not upload empty file").Error(), 400)
@@ -480,31 +510,70 @@ func (s *Server) putHandler(w http.ResponseWriter, r *http.Request) {

w.Header().Set("Content-Type", "text/plain")

filename = url.QueryEscape(filename)
relativeURL, _ := url.Parse(path.Join(token, filename))
deleteUrl, _ := url.Parse(path.Join(token, filename, metadata.DeletionToken))
filename = url.PathEscape(filename)
relativeURL, _ := url.Parse(path.Join(s.proxyPath, token, filename))
deleteURL, _ := url.Parse(path.Join(s.proxyPath, token, filename, metadata.DeletionToken))

w.Header().Set("X-Url-Delete", resolveURL(r, deleteURL, s.proxyPort))

fmt.Fprint(w, resolveURL(r, relativeURL, s.proxyPort))
}

func resolveURL(r *http.Request, u *url.URL, proxyPort string) string {
r.URL.Path = ""

return getURL(r, proxyPort).ResolveReference(u).String()
}

func resolveKey(key, proxyPath string) string {
if strings.HasPrefix(key, "/") {
key = key[1:]
}

if strings.HasPrefix(key, proxyPath) {
key = key[len(proxyPath):]
}

w.Header().Set("X-Url-Delete", resolveUrl(r, deleteUrl, true))
key = strings.Replace(key, "\\", "/", -1)

fmt.Fprint(w, resolveUrl(r, relativeURL, false))
return key
}

func resolveUrl(r *http.Request, u *url.URL, absolutePath bool) string {
if absolutePath {
r.URL.Path = ""
func resolveWebAddress(r *http.Request, proxyPath string, proxyPort string) string {
url := getURL(r, proxyPort)

var webAddress string

if len(proxyPath) == 0 {
webAddress = fmt.Sprintf("%s://%s/",
url.ResolveReference(url).Scheme,
url.ResolveReference(url).Host)
} else {
webAddress = fmt.Sprintf("%s://%s/%s",
url.ResolveReference(url).Scheme,
url.ResolveReference(url).Host,
proxyPath)
}

return getURL(r).ResolveReference(u).String()
return webAddress
}

func resolveWebAddress(r *http.Request) string {
url := getURL(r)
// Similar to the logic found here:
// https://github.com/golang/go/blob/release-branch.go1.14/src/net/http/clone.go#L22-L33
func cloneURL(u *url.URL) *url.URL {
c := &url.URL{}
*c = *u

return fmt.Sprintf("%s://%s", url.ResolveReference(url).Scheme, url.ResolveReference(url).Host)
if u.User != nil {
c.User = &url.Userinfo{}
*c.User = *u.User
}

return c
}

func getURL(r *http.Request) *url.URL {
u, _ := url.Parse(r.URL.String())
func getURL(r *http.Request, proxyPort string) *url.URL {
u := cloneURL(r.URL)

if r.TLS != nil {
u.Scheme = "https"
@@ -514,22 +583,48 @@ func getURL(r *http.Request) *url.URL {
u.Scheme = "http"
}

if u.Host != "" {
} else if host, port, err := net.SplitHostPort(r.Host); err != nil {
u.Host = r.Host
} else {
if port == "80" && u.Scheme == "http" {
u.Host = host
} else if port == "443" && u.Scheme == "https" {
if u.Host == "" {
host, port, err := net.SplitHostPort(r.Host)
if err != nil {
host = r.Host
port = ""
}
if len(proxyPort) != 0 {
port = proxyPort
}
if len(port) == 0 {
u.Host = host
} else {
u.Host = net.JoinHostPort(host, port)
if port == "80" && u.Scheme == "http" {
u.Host = host
} else if port == "443" && u.Scheme == "https" {
u.Host = host
} else {
u.Host = net.JoinHostPort(host, port)
}
}
}

return u
}

func (metadata Metadata) remainingLimitHeaderValues() (remainingDownloads, remainingDays string) {
if metadata.MaxDate.IsZero() {
remainingDays = "n/a"
} else {
timeDifference := metadata.MaxDate.Sub(time.Now())
remainingDays = strconv.Itoa(int(timeDifference.Hours()/24) + 1)
}

if metadata.MaxDownloads == -1 {
remainingDownloads = "n/a"
} else {
remainingDownloads = strconv.Itoa(metadata.MaxDownloads - metadata.Downloads)
}

return remainingDownloads, remainingDays
}

func (s *Server) Lock(token, filename string) error {
key := path.Join(token, filename)

@@ -549,42 +644,42 @@ func (s *Server) Unlock(token, filename string) error {
return nil
}

func (s *Server) CheckMetadata(token, filename string) error {
func (s *Server) CheckMetadata(token, filename string, increaseDownload bool) (Metadata, error) {
s.Lock(token, filename)
defer s.Unlock(token, filename)

var metadata Metadata

r, _, _, err := s.storage.Get(token, fmt.Sprintf("%s.metadata", filename))
if s.storage.IsNotExist(err) {
return nil
} else if err != nil {
return err
r, _, err := s.storage.Get(token, fmt.Sprintf("%s.metadata", filename))
if err != nil {
return metadata, err
}

defer r.Close()

if err := json.NewDecoder(r).Decode(&metadata); err != nil {
return err
} else if metadata.Downloads >= metadata.MaxDownloads {
return errors.New("MaxDownloads expired.")
} else if time.Now().After(metadata.MaxDate) {
return errors.New("MaxDate expired.")
return metadata, err
} else if metadata.MaxDownloads != -1 && metadata.Downloads >= metadata.MaxDownloads {
return metadata, errors.New("MaxDownloads expired.")
} else if !metadata.MaxDate.IsZero() && time.Now().After(metadata.MaxDate) {
return metadata, errors.New("MaxDate expired.")
} else {
// todo(nl5887): mutex?

// update number of downloads
metadata.Downloads++
if increaseDownload {
metadata.Downloads++
}

buffer := &bytes.Buffer{}
if err := json.NewEncoder(buffer).Encode(metadata); err != nil {
return errors.New("Could not encode metadata")
return metadata, errors.New("Could not encode metadata")
} else if err := s.storage.Put(token, fmt.Sprintf("%s.metadata", filename), buffer, "text/json", uint64(buffer.Len())); err != nil {
return errors.New("Could not save metadata")
return metadata, errors.New("Could not save metadata")
}
}

return nil
return metadata, nil
}

func (s *Server) CheckDeletionToken(deletionToken, token, filename string) error {
@@ -593,7 +688,7 @@ func (s *Server) CheckDeletionToken(deletionToken, token, filename string) error

var metadata Metadata

r, _, _, err := s.storage.Get(token, fmt.Sprintf("%s.metadata", filename))
r, _, err := s.storage.Get(token, fmt.Sprintf("%s.metadata", filename))
if s.storage.IsNotExist(err) {
return nil
} else if err != nil {
@@ -611,6 +706,19 @@ func (s *Server) CheckDeletionToken(deletionToken, token, filename string) error
return nil
}

func (s *Server) purgeHandler() {
ticker := time.NewTicker(s.purgeInterval)
go func() {
for {
select {
case <-ticker.C:
err := s.storage.Purge(s.purgeDays)
log.Printf("error cleaning up expired files: %v", err)
}
}
}()
}

func (s *Server) deleteHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)

@@ -649,21 +757,17 @@ func (s *Server) zipHandler(w http.ResponseWriter, r *http.Request) {
zw := zip.NewWriter(w)

for _, key := range strings.Split(files, ",") {
if strings.HasPrefix(key, "/") {
key = key[1:]
}

key = strings.Replace(key, "\\", "/", -1)
key = resolveKey(key, s.proxyPath)

token := strings.Split(key, "/")[0]
filename := sanitize(strings.Split(key, "/")[1])

if err := s.CheckMetadata(token, filename); err != nil {
if _, err := s.CheckMetadata(token, filename, true); err != nil {
log.Printf("Error metadata: %s", err.Error())
continue
}

reader, _, _, err := s.storage.Get(token, filename)
reader, _, err := s.storage.Get(token, filename)

if err != nil {
if s.storage.IsNotExist(err) {
@@ -725,21 +829,17 @@ func (s *Server) tarGzHandler(w http.ResponseWriter, r *http.Request) {
defer zw.Close()

for _, key := range strings.Split(files, ",") {
if strings.HasPrefix(key, "/") {
key = key[1:]
}

key = strings.Replace(key, "\\", "/", -1)
key = resolveKey(key, s.proxyPath)

token := strings.Split(key, "/")[0]
filename := sanitize(strings.Split(key, "/")[1])

if err := s.CheckMetadata(token, filename); err != nil {
if _, err := s.CheckMetadata(token, filename, true); err != nil {
log.Printf("Error metadata: %s", err.Error())
continue
}

reader, _, contentLength, err := s.storage.Get(token, filename)
reader, contentLength, err := s.storage.Get(token, filename)
if err != nil {
if s.storage.IsNotExist(err) {
http.Error(w, "File not found", 404)
@@ -788,15 +888,17 @@ func (s *Server) tarHandler(w http.ResponseWriter, r *http.Request) {
defer zw.Close()

for _, key := range strings.Split(files, ",") {
key = resolveKey(key, s.proxyPath)

token := strings.Split(key, "/")[0]
filename := strings.Split(key, "/")[1]

if err := s.CheckMetadata(token, filename); err != nil {
if _, err := s.CheckMetadata(token, filename, true); err != nil {
log.Printf("Error metadata: %s", err.Error())
continue
}

reader, _, contentLength, err := s.storage.Get(token, filename)
reader, contentLength, err := s.storage.Get(token, filename)
if err != nil {
if s.storage.IsNotExist(err) {
http.Error(w, "File not found", 404)
@@ -835,14 +937,28 @@ func (s *Server) headHandler(w http.ResponseWriter, r *http.Request) {

token := vars["token"]
filename := vars["filename"]
realFilename := filename
useZstd := false

if err := s.CheckMetadata(token, filename); err != nil {
log.Printf("Error metadata: %s", err.Error())
http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)
return
metadata, err := s.CheckMetadata(token, filename, false)

var contentType string
if err != nil {
log.Printf("Error metadata: %s; trying .zst", err.Error())
useZstd = true
filename += ".zst"
metadata, err = s.CheckMetadata(token, filename, false)
if err != nil {
log.Printf("Error metadata .zst: %s", err.Error())
http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)
return
}
contentType = mime.TypeByExtension(filepath.Ext(realFilename))
} else {
contentType = metadata.ContentType
}

contentType, contentLength, err := s.storage.Head(token, filename)
contentLength, err := s.storage.Head(token, filename)
if s.storage.IsNotExist(err) {
http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)
return
@@ -852,9 +968,17 @@ func (s *Server) headHandler(w http.ResponseWriter, r *http.Request) {
return
}

w.Header().Set("Content-Type", contentType)
w.Header().Set("Content-Length", strconv.FormatUint(contentLength, 10))
remainingDownloads, remainingDays := metadata.remainingLimitHeaderValues()

if contentType != "" {
w.Header().Set("Content-Type", contentType)
}
if !useZstd {
w.Header().Set("Content-Length", strconv.FormatUint(contentLength, 10))
}
w.Header().Set("Connection", "close")
w.Header().Set("X-Remaining-Downloads", remainingDownloads)
w.Header().Set("X-Remaining-Days", remainingDays)
}

func (s *Server) getHandler(w http.ResponseWriter, r *http.Request) {
@@ -863,14 +987,25 @@ func (s *Server) getHandler(w http.ResponseWriter, r *http.Request) {
action := vars["action"]
token := vars["token"]
filename := vars["filename"]
realFilename := filename
useZstd := false

if err := s.CheckMetadata(token, filename); err != nil {
log.Printf("Error metadata: %s", err.Error())
http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)
return
metadata, err := s.CheckMetadata(token, filename, true)

if err != nil {
log.Printf("Error metadata: %s; trying .zst", err.Error())
useZstd = true
filename += ".zst"
metadata, err = s.CheckMetadata(token, filename, true)
if err != nil {
log.Printf("Error metadata .zst: %s", err.Error())
http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)
return
}
}

reader, contentType, contentLength, err := s.storage.Get(token, filename)
contentType := metadata.ContentType
reader, contentLength, err := s.storage.Get(token, filename)
if s.storage.IsNotExist(err) {
http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)
return
@@ -882,18 +1017,42 @@ func (s *Server) getHandler(w http.ResponseWriter, r *http.Request) {

defer reader.Close()

var disposition string
if useZstd {
contentType = mime.TypeByExtension(filepath.Ext(realFilename))

d, err := zstd.NewReader(reader)
if err != nil {
log.Printf("Failed to create zstd reader: %s", err.Error())
http.Error(w, "Could not retrieve file.", 500)
return
}

defer d.Close()

reader = d.IOReadCloser()
}

var disposition string
if action == "inline" {
disposition = "inline"
} else {
disposition = "attachment"
}

w.Header().Set("Content-Type", contentType)
w.Header().Set("Content-Length", strconv.FormatUint(contentLength, 10))
w.Header().Set("Content-Disposition", fmt.Sprintf("%s; filename=\"%s\"", disposition, filename))
remainingDownloads, remainingDays := metadata.remainingLimitHeaderValues()

if contentType != "" {
w.Header().Set("Content-Type", contentType)
}
if !useZstd {
w.Header().Set("Content-Length", strconv.FormatUint(contentLength, 10))
} else {
w.Header().Set("Transfer-Encoding", "chunked")
}
w.Header().Set("Content-Disposition", fmt.Sprintf("%s; filename=\"%s\"", disposition, realFilename))
w.Header().Set("Connection", "keep-alive")
w.Header().Set("X-Remaining-Downloads", remainingDownloads)
w.Header().Set("X-Remaining-Days", remainingDays)

if w.Header().Get("Range") == "" {
if _, err = io.Copy(w, reader); err != nil {
@@ -929,7 +1088,7 @@ func (s *Server) getHandler(w http.ResponseWriter, r *http.Request) {
}
}

http.ServeContent(w, r, filename, time.Now(), file)
http.ServeContent(w, r, filename, time.Unix(0, 0), file)
}

func (s *Server) RedirectHandler(h http.Handler) http.HandlerFunc {
@@ -943,7 +1102,7 @@ func (s *Server) RedirectHandler(h http.Handler) http.HandlerFunc {
} else if r.Header.Get("X-Forwarded-Proto") == "https" {
} else if r.URL.Scheme == "https" {
} else {
u := getURL(r)
u := getURL(r, s.proxyPort)
u.Scheme = "https"

http.Redirect(w, r, u.String(), http.StatusPermanentRedirect)
@@ -964,6 +1123,17 @@ func LoveHandler(h http.Handler) http.HandlerFunc {
}
}

func IPFilterHandler(h http.Handler, ipFilterOptions *IPFilterOptions) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
if ipFilterOptions == nil {
h.ServeHTTP(w, r)
} else {
WrapIPFilter(h, *ipFilterOptions).ServeHTTP(w, r)
}
return
}
}

func (s *Server) BasicAuthHandler(h http.Handler) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
if s.AuthUser == "" || s.AuthPass == "" {


+ 212
- 0
server/ip_filter.go View File

@@ -0,0 +1,212 @@
/*
MIT License
Copyright © 2016 <dev@jpillora.com>

Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/

package server

import (
"log"
"net"
"net/http"
"os"
"sync"

"github.com/tomasen/realip"
)

//IPFilterOptions for IPFilter. Allowed takes precendence over Blocked.
//IPs can be IPv4 or IPv6 and can optionally contain subnet
//masks (/24). Note however, determining if a given IP is
//included in a subnet requires a linear scan so is less performant
//than looking up single IPs.
//
//This could be improved with some algorithmic magic.
type IPFilterOptions struct {
//explicity allowed IPs
AllowedIPs []string
//explicity blocked IPs
BlockedIPs []string
//block by default (defaults to allow)
BlockByDefault bool
// TrustProxy enable check request IP from proxy
TrustProxy bool

Logger interface {
Printf(format string, v ...interface{})
}
}

type IPFilter struct {
opts IPFilterOptions
//mut protects the below
//rw since writes are rare
mut sync.RWMutex
defaultAllowed bool
ips map[string]bool
subnets []*subnet
}

type subnet struct {
str string
ipnet *net.IPNet
allowed bool
}

//New constructs IPFilter instance.
func NewIPFilter(opts IPFilterOptions) *IPFilter {
if opts.Logger == nil {
flags := log.LstdFlags
opts.Logger = log.New(os.Stdout, "", flags)
}
f := &IPFilter{
opts: opts,
ips: map[string]bool{},
defaultAllowed: !opts.BlockByDefault,
}
for _, ip := range opts.BlockedIPs {
f.BlockIP(ip)
}
for _, ip := range opts.AllowedIPs {
f.AllowIP(ip)
}
return f
}

func (f *IPFilter) AllowIP(ip string) bool {
return f.ToggleIP(ip, true)
}

func (f *IPFilter) BlockIP(ip string) bool {
return f.ToggleIP(ip, false)
}

func (f *IPFilter) ToggleIP(str string, allowed bool) bool {
//check if has subnet
if ip, net, err := net.ParseCIDR(str); err == nil {
// containing only one ip?
if n, total := net.Mask.Size(); n == total {
f.mut.Lock()
f.ips[ip.String()] = allowed
f.mut.Unlock()
return true
}
//check for existing
f.mut.Lock()
found := false
for _, subnet := range f.subnets {
if subnet.str == str {
found = true
subnet.allowed = allowed
break
}
}
if !found {
f.subnets = append(f.subnets, &subnet{
str: str,
ipnet: net,
allowed: allowed,
})
}
f.mut.Unlock()
return true
}
//check if plain ip
if ip := net.ParseIP(str); ip != nil {
f.mut.Lock()
f.ips[ip.String()] = allowed
f.mut.Unlock()
return true
}
return false
}

//ToggleDefault alters the default setting
func (f *IPFilter) ToggleDefault(allowed bool) {
f.mut.Lock()
f.defaultAllowed = allowed
f.mut.Unlock()
}

//Allowed returns if a given IP can pass through the filter
func (f *IPFilter) Allowed(ipstr string) bool {
return f.NetAllowed(net.ParseIP(ipstr))
}

//NetAllowed returns if a given net.IP can pass through the filter
func (f *IPFilter) NetAllowed(ip net.IP) bool {
//invalid ip
if ip == nil {
return false
}
//read lock entire function
//except for db access
f.mut.RLock()
defer f.mut.RUnlock()
//check single ips
allowed, ok := f.ips[ip.String()]
if ok {
return allowed
}
//scan subnets for any allow/block
blocked := false
for _, subnet := range f.subnets {
if subnet.ipnet.Contains(ip) {
if subnet.allowed {
return true
}
blocked = true
}
}
if blocked {
return false
}

//use default setting
return f.defaultAllowed
}

//Blocked returns if a given IP can NOT pass through the filter
func (f *IPFilter) Blocked(ip string) bool {
return !f.Allowed(ip)
}

//NetBlocked returns if a given net.IP can NOT pass through the filter
func (f *IPFilter) NetBlocked(ip net.IP) bool {
return !f.NetAllowed(ip)
}

//WrapIPFilter the provided handler with simple IP blocking middleware
//using this IP filter and its configuration
func (f *IPFilter) Wrap(next http.Handler) http.Handler {
return &ipFilterMiddleware{IPFilter: f, next: next}
}

//WrapIPFilter is equivalent to NewIPFilter(opts) then Wrap(next)
func WrapIPFilter(next http.Handler, opts IPFilterOptions) http.Handler {
return NewIPFilter(opts).Wrap(next)
}

type ipFilterMiddleware struct {
*IPFilter
next http.Handler
}

func (m *ipFilterMiddleware) ServeHTTP(w http.ResponseWriter, r *http.Request) {
remoteIP := realip.FromRequest(r)

if !m.IPFilter.Allowed(remoteIP) {
//show simple forbidden text
http.Error(w, http.StatusText(http.StatusForbidden), http.StatusForbidden)
return
}

//success!
m.next.ServeHTTP(w, r)
}

+ 96
- 2
server/server.go View File

@@ -25,7 +25,10 @@ THE SOFTWARE.
package server

import (
crypto_rand "crypto/rand"
"encoding/binary"
"errors"
gorillaHandlers "github.com/gorilla/handlers"
"log"
"math/rand"
"mime"
@@ -85,6 +88,13 @@ func Listener(s string) OptionFn {

}

func CorsDomains(s string) OptionFn {
return func(srvr *Server) {
srvr.CorsDomains = s
}

}

func GoogleAnalytics(gaKey string) OptionFn {
return func(srvr *Server) {
srvr.gaKey = gaKey
@@ -121,6 +131,22 @@ func WebPath(s string) OptionFn {
}
}

func ProxyPath(s string) OptionFn {
return func(srvr *Server) {
if s[len(s)-1:] != "/" {
s = s + string(filepath.Separator)
}

srvr.proxyPath = s
}
}

func ProxyPort(s string) OptionFn {
return func(srvr *Server) {
srvr.proxyPort = s
}
}

func TempPath(s string) OptionFn {
return func(srvr *Server) {
if s[len(s)-1:] != "/" {
@@ -149,12 +175,25 @@ func Logger(logger *log.Logger) OptionFn {
}
}

func MaxUploadSize(kbytes int64) OptionFn {
return func(srvr *Server) {
srvr.maxUploadSize = kbytes * 1024
}

}
func RateLimit(requests int) OptionFn {
return func(srvr *Server) {
srvr.rateLimitRequests = requests
}
}

func Purge(days, interval int) OptionFn {
return func(srvr *Server) {
srvr.purgeDays = time.Duration(days) * time.Hour * 24
srvr.purgeInterval = time.Duration(interval) * time.Hour
}
}

func ForceHTTPs() OptionFn {
return func(srvr *Server) {
srvr.forceHTTPs = true
@@ -219,6 +258,20 @@ func HttpAuthCredentials(user string, pass string) OptionFn {
}
}

func FilterOptions(options IPFilterOptions) OptionFn {
for i, allowedIP := range options.AllowedIPs {
options.AllowedIPs[i] = strings.TrimSpace(allowedIP)
}

for i, blockedIP := range options.BlockedIPs {
options.BlockedIPs[i] = strings.TrimSpace(blockedIP)
}

return func(srvr *Server) {
srvr.ipFilterOptions = &options
}
}

type Server struct {
AuthUser string
AuthPass string
@@ -231,23 +284,32 @@ type Server struct {

locks map[string]*sync.Mutex

maxUploadSize int64
rateLimitRequests int

purgeDays time.Duration
purgeInterval time.Duration

storage Storage

forceHTTPs bool

ipFilterOptions *IPFilterOptions

VirusTotalKey string
ClamAVDaemonHost string

tempPath string

webPath string
proxyPath string
proxyPort string
gaKey string
userVoiceKey string

TLSListenerOnly bool

CorsDomains string
ListenerString string
TLSListenerString string
ProfileListenerString string
@@ -270,7 +332,11 @@ func New(options ...OptionFn) (*Server, error) {
}

func init() {
rand.Seed(time.Now().UTC().UnixNano())
var seedBytes [8]byte
if _, err := crypto_rand.Read(seedBytes[:]); err != nil {
panic("cannot obtain cryptographically secure seed")
}
rand.Seed(int64(binary.LittleEndian.Uint64(seedBytes[:])))
}

func (s *Server) Run() {
@@ -341,6 +407,7 @@ func (s *Server) Run() {
r.HandleFunc("/{action:(?:download|get|inline)}/{token}/{filename}", s.headHandler).Methods("HEAD")

r.HandleFunc("/{token}/{filename}", s.previewHandler).MatcherFunc(func(r *http.Request, rm *mux.RouteMatch) (match bool) {
return false
match = false

// The file will show a preview page when opening the link in browser directly or
@@ -386,7 +453,30 @@ func (s *Server) Run() {

s.logger.Printf("Transfer.sh server started.\nusing temp folder: %s\nusing storage provider: %s", s.tempPath, s.storage.Type())

h := handlers.PanicHandler(handlers.LogHandler(LoveHandler(s.RedirectHandler(r)), handlers.NewLogOptions(s.logger.Printf, "_default_")), nil)
var cors func(http.Handler) http.Handler
if len(s.CorsDomains) > 0 {
cors = gorillaHandlers.CORS(
gorillaHandlers.AllowedHeaders([]string{"*"}),
gorillaHandlers.AllowedOrigins(strings.Split(s.CorsDomains, ",")),
gorillaHandlers.AllowedMethods([]string{"GET", "HEAD", "POST", "PUT", "DELETE", "OPTIONS"}),
)
} else {
cors = func(h http.Handler) http.Handler {
return h
}
}

h := handlers.PanicHandler(
IPFilterHandler(
handlers.LogHandler(
LoveHandler(
s.RedirectHandler(cors(r))),
handlers.NewLogOptions(s.logger.Printf, "_default_"),
),
s.ipFilterOptions,
),
nil,
)

if !s.TLSListenerOnly {
srvr := &http.Server{
@@ -421,6 +511,10 @@ func (s *Server) Run() {

s.logger.Printf("---------------------------")

if s.purgeDays > 0 {
go s.purgeHandler()
}

term := make(chan os.Signal, 1)
signal.Notify(term, os.Interrupt)
signal.Notify(term, syscall.SIGTERM)


+ 266
- 170
server/storage.go View File

@@ -1,34 +1,39 @@
package server

import (
"bytes"
"encoding/json"
"errors"
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"golang.org/x/net/context"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
"google.golang.org/api/drive/v3"
"google.golang.org/api/googleapi"
"io"
"io/ioutil"
"log"
"mime"
"net/http"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
"time"

"github.com/goamz/goamz/s3"
"golang.org/x/net/context"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
"google.golang.org/api/drive/v3"
"google.golang.org/api/googleapi"
"storj.io/common/storj"
"storj.io/uplink"
)

type Storage interface {
Get(token string, filename string) (reader io.ReadCloser, contentType string, contentLength uint64, err error)
Head(token string, filename string) (contentType string, contentLength uint64, err error)
Get(token string, filename string) (reader io.ReadCloser, contentLength uint64, err error)
Head(token string, filename string) (contentLength uint64, err error)
Put(token string, filename string, reader io.Reader, contentType string, contentLength uint64) error
Delete(token string, filename string) error
IsNotExist(err error) bool
Purge(days time.Duration) error

Type() string
}
@@ -47,7 +52,7 @@ func (s *LocalStorage) Type() string {
return "local"
}

func (s *LocalStorage) Head(token string, filename string) (contentType string, contentLength uint64, err error) {
func (s *LocalStorage) Head(token string, filename string) (contentLength uint64, err error) {
path := filepath.Join(s.basedir, token, filename)

var fi os.FileInfo
@@ -57,12 +62,10 @@ func (s *LocalStorage) Head(token string, filename string) (contentType string,

contentLength = uint64(fi.Size())

contentType = mime.TypeByExtension(filepath.Ext(filename))

return
}

func (s *LocalStorage) Get(token string, filename string) (reader io.ReadCloser, contentType string, contentLength uint64, err error) {
func (s *LocalStorage) Get(token string, filename string) (reader io.ReadCloser, contentLength uint64, err error) {
path := filepath.Join(s.basedir, token, filename)

// content type , content length
@@ -77,8 +80,6 @@ func (s *LocalStorage) Get(token string, filename string) (reader io.ReadCloser,

contentLength = uint64(fi.Size())

contentType = mime.TypeByExtension(filepath.Ext(filename))

return
}

@@ -91,6 +92,27 @@ func (s *LocalStorage) Delete(token string, filename string) (err error) {
return
}

func (s *LocalStorage) Purge(days time.Duration) (err error) {
err = filepath.Walk(s.basedir,
func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}

if info.ModTime().Before(time.Now().Add(-1 * days)) {
err = os.Remove(path)
return err
}

return nil
})

return
}

func (s *LocalStorage) IsNotExist(err error) bool {
if err == nil {
return false
@@ -105,7 +127,7 @@ func (s *LocalStorage) Put(token string, filename string, reader io.Reader, cont

path := filepath.Join(s.basedir, token)

if err = os.Mkdir(path, 0700); err != nil && !os.IsExist(err) {
if err = os.MkdirAll(path, 0700); err != nil && !os.IsExist(err) {
return err
}

@@ -124,192 +146,112 @@ func (s *LocalStorage) Put(token string, filename string, reader io.Reader, cont

type S3Storage struct {
Storage
bucket *s3.Bucket
bucket string
session *session.Session
s3 *s3.S3
logger *log.Logger
purgeDays time.Duration
noMultipart bool
}

func NewS3Storage(accessKey, secretKey, bucketName, endpoint string, logger *log.Logger, disableMultipart bool) (*S3Storage, error) {
bucket, err := getBucket(accessKey, secretKey, bucketName, endpoint)
if err != nil {
return nil, err
}
func NewS3Storage(accessKey, secretKey, bucketName string, purgeDays int, region, endpoint string, disableMultipart bool, forcePathStyle bool, logger *log.Logger) (*S3Storage, error) {
sess := getAwsSession(accessKey, secretKey, region, endpoint, forcePathStyle)

return &S3Storage{bucket: bucket, logger: logger, noMultipart: disableMultipart}, nil
return &S3Storage{
bucket: bucketName,
s3: s3.New(sess),
session: sess,
logger: logger,
noMultipart: disableMultipart,
purgeDays: time.Duration(purgeDays*24) * time.Hour,
}, nil
}

func (s *S3Storage) Type() string {
return "s3"
}

func (s *S3Storage) Head(token string, filename string) (contentType string, contentLength uint64, err error) {
func (s *S3Storage) Head(token string, filename string) (contentLength uint64, err error) {
key := fmt.Sprintf("%s/%s", token, filename)

headRequest := &s3.HeadObjectInput{
Bucket: aws.String(s.bucket),
Key: aws.String(key),
}

// content type , content length
response, err := s.bucket.Head(key, map[string][]string{})
response, err := s.s3.HeadObject(headRequest)
if err != nil {
return
}
contentType = response.Header.Get("Content-Type")

contentLength, err = strconv.ParseUint(response.Header.Get("Content-Length"), 10, 0)
if err != nil {
return
if response.ContentLength != nil {
contentLength = uint64(*response.ContentLength)
}

return
}

func (s *S3Storage) Purge(days time.Duration) (err error) {
// NOOP expiration is set at upload time
return nil
}

func (s *S3Storage) IsNotExist(err error) bool {
if err == nil {
return false
}

s.logger.Printf("IsNotExist: %s, %#v", err.Error(), err)
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case s3.ErrCodeNoSuchKey:
return true
}
}

b := (err.Error() == "The specified key does not exist.")
b = b || (err.Error() == "Access Denied")
return b
return false
}

func (s *S3Storage) Get(token string, filename string) (reader io.ReadCloser, contentType string, contentLength uint64, err error) {
func (s *S3Storage) Get(token string, filename string) (reader io.ReadCloser, contentLength uint64, err error) {
key := fmt.Sprintf("%s/%s", token, filename)

// content type , content length
response, err := s.bucket.GetResponse(key)
if err != nil {
return
getRequest := &s3.GetObjectInput{
Bucket: aws.String(s.bucket),
Key: aws.String(key),
}

contentType = response.Header.Get("Content-Type")
contentLength, err = strconv.ParseUint(response.Header.Get("Content-Length"), 10, 0)
response, err := s.s3.GetObject(getRequest)
if err != nil {
return
}

if response.ContentLength != nil {
contentLength = uint64(*response.ContentLength)
}

reader = response.Body
return
}

func (s *S3Storage) Delete(token string, filename string) (err error) {
metadata := fmt.Sprintf("%s/%s.metadata", token, filename)
s.bucket.Del(metadata)

key := fmt.Sprintf("%s/%s", token, filename)
err = s.bucket.Del(key)

return
}

func (s *S3Storage) putMulti(key string, reader io.Reader, contentType string, contentLength uint64) (err error) {

var (
multi *s3.Multi
parts []s3.Part
)

if multi, err = s.bucket.InitMulti(key, contentType, s3.Private); err != nil {
s.logger.Printf(err.Error())
return
deleteRequest := &s3.DeleteObjectInput{
Bucket: aws.String(s.bucket),
Key: aws.String(metadata),
}

// 20 mb parts
partsChan := make(chan interface{})
// partsChan := make(chan s3.Part)

go func() {
// maximize to 20 threads
sem := make(chan int, 20)
index := 1
var wg sync.WaitGroup

for {
// buffered in memory because goamz s3 multi needs seekable reader
var (
buffer []byte = make([]byte, (1<<20)*10)
count int
err error
)

// Amazon expects parts of at least 5MB, except for the last one
if count, err = io.ReadAtLeast(reader, buffer, (1<<20)*5); err != nil && err != io.ErrUnexpectedEOF && err != io.EOF {
s.logger.Printf(err.Error())
return
}

// always send minimal 1 part
if err == io.EOF && index > 1 {
s.logger.Printf("Waiting for all parts to finish uploading.")

// wait for all parts to be finished uploading
wg.Wait()

// and close the channel
close(partsChan)

return
}

wg.Add(1)

sem <- 1

// using goroutines because of retries when upload fails
go func(multi *s3.Multi, buffer []byte, index int) {
s.logger.Printf("Uploading part %d %d", index, len(buffer))

defer func() {
s.logger.Printf("Finished part %d %d", index, len(buffer))

wg.Done()

<-sem
}()

partReader := bytes.NewReader(buffer)

var part s3.Part

if part, err = multi.PutPart(index, partReader); err != nil {
s.logger.Printf("Error while uploading part %d %d %s", index, len(buffer), err.Error())
partsChan <- err
return
}

s.logger.Printf("Finished uploading part %d %d", index, len(buffer))

partsChan <- part

}(multi, buffer[:count], index)

index++
}
}()

// wait for all parts to be uploaded
for part := range partsChan {
switch part.(type) {
case s3.Part:
parts = append(parts, part.(s3.Part))
case error:
// abort multi upload
s.logger.Printf("Error during upload, aborting %s.", part.(error).Error())
err = part.(error)

multi.Abort()
return
}

_, err = s.s3.DeleteObject(deleteRequest)
if err != nil {
return
}

s.logger.Printf("Completing upload %d parts", len(parts))

if err = multi.Complete(parts); err != nil {
s.logger.Printf("Error during completing upload %d parts %s", len(parts), err.Error())
return
key := fmt.Sprintf("%s/%s", token, filename)
deleteRequest = &s3.DeleteObjectInput{
Bucket: aws.String(s.bucket),
Key: aws.String(key),
}

s.logger.Printf("Completed uploading %d", len(parts))
_, err = s.s3.DeleteObject(deleteRequest)

return
}
@@ -318,17 +260,25 @@ func (s *S3Storage) Put(token string, filename string, reader io.Reader, content
key := fmt.Sprintf("%s/%s", token, filename)

s.logger.Printf("Uploading file %s to S3 Bucket", filename)
var concurrency int
if !s.noMultipart {
err = s.putMulti(key, reader, contentType, contentLength)
concurrency = 20
} else {
err = s.bucket.PutReader(key, reader, int64(contentLength), contentType, s3.Private, s3.Options{})
concurrency = 1
}

if err != nil {
return
}
// Create an uploader with the session and custom options
uploader := s3manager.NewUploader(s.session, func(u *s3manager.Uploader) {
u.Concurrency = concurrency // default is 5
u.LeavePartsOnError = false
})

s.logger.Printf("Completed uploading %s", filename)
_, err = uploader.Upload(&s3manager.UploadInput{
Bucket: aws.String(s.bucket),
Key: aws.String(key),
Body: reader,
Expires: aws.Time(time.Now().Add(s.purgeDays)),
})

return
}
@@ -475,7 +425,7 @@ func (s *GDrive) Type() string {
return "gdrive"
}

func (s *GDrive) Head(token string, filename string) (contentType string, contentLength uint64, err error) {
func (s *GDrive) Head(token string, filename string) (contentLength uint64, err error) {
var fileId string
fileId, err = s.findId(filename, token)
if err != nil {
@@ -483,18 +433,16 @@ func (s *GDrive) Head(token string, filename string) (contentType string, conten
}

var fi *drive.File
if fi, err = s.service.Files.Get(fileId).Fields("mimeType", "size").Do(); err != nil {
if fi, err = s.service.Files.Get(fileId).Fields("size").Do(); err != nil {
return
}

contentLength = uint64(fi.Size)

contentType = fi.MimeType

return
}

func (s *GDrive) Get(token string, filename string) (reader io.ReadCloser, contentType string, contentLength uint64, err error) {
func (s *GDrive) Get(token string, filename string) (reader io.ReadCloser, contentLength uint64, err error) {
var fileId string
fileId, err = s.findId(filename, token)
if err != nil {
@@ -502,14 +450,13 @@ func (s *GDrive) Get(token string, filename string) (reader io.ReadCloser, conte
}

var fi *drive.File
fi, err = s.service.Files.Get(fileId).Fields("mimeType", "size", "md5Checksum").Do()
fi, err = s.service.Files.Get(fileId).Fields("size", "md5Checksum").Do()
if !s.hasChecksum(fi) {
err = fmt.Errorf("Cannot find file %s/%s", token, filename)
return
}

contentLength = uint64(fi.Size)
contentType = fi.MimeType

ctx := context.Background()
var res *http.Response
@@ -537,11 +484,35 @@ func (s *GDrive) Delete(token string, filename string) (err error) {
return
}

func (s *GDrive) IsNotExist(err error) bool {
if err == nil {
return false
func (s *GDrive) Purge(days time.Duration) (err error) {
nextPageToken := ""

expirationDate := time.Now().Add(-1 * days).Format(time.RFC3339)
q := fmt.Sprintf("'%s' in parents and modifiedTime < '%s' and mimeType!='%s' and trashed=false", s.rootId, expirationDate, GDriveDirectoryMimeType)
l, err := s.list(nextPageToken, q)
if err != nil {
return err
}

for 0 < len(l.Files) {
for _, fi := range l.Files {
err = s.service.Files.Delete(fi.Id).Do()
if err != nil {
return
}
}

if l.NextPageToken == "" {
break
}

l, err = s.list(l.NextPageToken, q)
}

return
}

func (s *GDrive) IsNotExist(err error) bool {
if err != nil {
if e, ok := err.(*googleapi.Error); ok {
return e.Code == http.StatusNotFound
@@ -642,3 +613,128 @@ func saveGDriveToken(path string, token *oauth2.Token, logger *log.Logger) {

json.NewEncoder(f).Encode(token)
}

type StorjStorage struct {
Storage
project *uplink.Project
bucket *uplink.Bucket
purgeDays time.Duration
logger *log.Logger
}

func NewStorjStorage(access, bucket string, purgeDays int, logger *log.Logger) (*StorjStorage, error) {
var instance StorjStorage
var err error

ctx := context.TODO()

parsedAccess, err := uplink.ParseAccess(access)
if err != nil {
return nil, err
}

instance.project, err = uplink.OpenProject(ctx, parsedAccess)
if err != nil {
return nil, err
}

instance.bucket, err = instance.project.EnsureBucket(ctx, bucket)
if err != nil {
//Ignoring the error to return the one that occurred first, but try to clean up.
_ = instance.project.Close()
return nil, err
}

instance.purgeDays = time.Duration(purgeDays*24) * time.Hour

instance.logger = logger

return &instance, nil
}

func (s *StorjStorage) Type() string {
return "storj"
}

func (s *StorjStorage) Head(token string, filename string) (contentLength uint64, err error) {
key := storj.JoinPaths(token, filename)

ctx := context.TODO()

obj, err := s.project.StatObject(ctx, s.bucket.Name, key)
if err != nil {
return 0, err
}

contentLength = uint64(obj.System.ContentLength)

return
}

func (s *StorjStorage) Get(token string, filename string) (reader io.ReadCloser, contentLength uint64, err error) {
key := storj.JoinPaths(token, filename)

s.logger.Printf("Getting file %s from Storj Bucket", filename)

ctx := context.TODO()

download, err := s.project.DownloadObject(ctx, s.bucket.Name, key, nil)
if err != nil {
return nil, 0, err
}

contentLength = uint64(download.Info().System.ContentLength)

reader = download
return
}

func (s *StorjStorage) Delete(token string, filename string) (err error) {
key := storj.JoinPaths(token, filename)

s.logger.Printf("Deleting file %s from Storj Bucket", filename)

ctx := context.TODO()

_, err = s.project.DeleteObject(ctx, s.bucket.Name, key)

return
}

func (s *StorjStorage) Purge(days time.Duration) (err error) {
// NOOP expiration is set at upload time
return nil
}

func (s *StorjStorage) Put(token string, filename string, reader io.Reader, contentType string, contentLength uint64) (err error) {
key := storj.JoinPaths(token, filename)

s.logger.Printf("Uploading file %s to Storj Bucket", filename)

ctx := context.TODO()

writer, err := s.project.UploadObject(ctx, s.bucket.Name, key, &uplink.UploadOptions{Expires: time.Now().Add(s.purgeDays)})
if err != nil {
return err
}

n, err := io.Copy(writer, reader)
if err != nil || uint64(n) != contentLength {
//Ignoring the error to return the one that occurred first, but try to clean up.
_ = writer.Abort()
return err
}
err = writer.SetCustomMetadata(ctx, uplink.CustomMetadata{"content-type": contentType})
if err != nil {
//Ignoring the error to return the one that occurred first, but try to clean up.
_ = writer.Abort()
return err
}

err = writer.Commit()
return err
}

func (s *StorjStorage) IsNotExist(err error) bool {
return errors.Is(err, uplink.ErrObjectNotFound)
}

+ 10
- 41
server/utils.go View File

@@ -30,51 +30,20 @@ import (
"net/mail"
"strconv"
"strings"
"time"

"github.com/goamz/goamz/aws"
"github.com/goamz/goamz/s3"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/golang/gddo/httputil/header"
)

func getBucket(accessKey, secretKey, bucket, endpoint string) (*s3.Bucket, error) {
auth, err := aws.GetAuth(accessKey, secretKey, "", time.Time{})
if err != nil {
return nil, err
}

var EUWestWithoutHTTPS = aws.Region{
Name: "eu-west-1",
EC2Endpoint: "https://ec2.eu-west-1.amazonaws.com",
S3Endpoint: endpoint,
S3BucketEndpoint: "",
S3LocationConstraint: true,
S3LowercaseBucket: true,
SDBEndpoint: "https://sdb.eu-west-1.amazonaws.com",
SESEndpoint: "https://email.eu-west-1.amazonaws.com",
SNSEndpoint: "https://sns.eu-west-1.amazonaws.com",
SQSEndpoint: "https://sqs.eu-west-1.amazonaws.com",
IAMEndpoint: "https://iam.amazonaws.com",
ELBEndpoint: "https://elasticloadbalancing.eu-west-1.amazonaws.com",
DynamoDBEndpoint: "https://dynamodb.eu-west-1.amazonaws.com",
CloudWatchServicepoint: aws.ServiceInfo{
Endpoint: "https://monitoring.eu-west-1.amazonaws.com",
Signer: aws.V2Signature,
},
AutoScalingEndpoint: "https://autoscaling.eu-west-1.amazonaws.com",
RDSEndpoint: aws.ServiceInfo{
Endpoint: "https://rds.eu-west-1.amazonaws.com",
Signer: aws.V2Signature,
},
STSEndpoint: "https://sts.amazonaws.com",
CloudFormationEndpoint: "https://cloudformation.eu-west-1.amazonaws.com",
ECSEndpoint: "https://ecs.eu-west-1.amazonaws.com",
DynamoDBStreamsEndpoint: "https://streams.dynamodb.eu-west-1.amazonaws.com",
}

conn := s3.New(auth, EUWestWithoutHTTPS)
b := conn.Bucket(bucket)
return b, nil
func getAwsSession(accessKey, secretKey, region, endpoint string, forcePathStyle bool) *session.Session {
return session.Must(session.NewSession(&aws.Config{
Region: aws.String(region),
Endpoint: aws.String(endpoint),
Credentials: credentials.NewStaticCredentials(accessKey, secretKey, ""),
S3ForcePathStyle: aws.Bool(forcePathStyle),
}))
}

func formatNumber(format string, s uint64) string {


+ 0
- 15
vendor/cloud.google.com/go/AUTHORS View File

@@ -1,15 +0,0 @@
# This is the official list of cloud authors for copyright purposes.
# This file is distinct from the CONTRIBUTORS files.
# See the latter for an explanation.

# Names should be added to this file as:
# Name or Organization <email address>
# The email address is not required for organizations.

Filippo Valsorda <hi@filippo.io>
Google Inc.
Ingo Oeser <nightlyone@googlemail.com>
Palm Stone Games, Inc.
Paweł Knap <pawelknap88@gmail.com>
Péter Szilágyi <peterke@gmail.com>
Tyler Treat <ttreat31@gmail.com>

+ 0
- 1122
vendor/cloud.google.com/go/CHANGES.md
File diff suppressed because it is too large
View File


+ 0
- 44
vendor/cloud.google.com/go/CODE_OF_CONDUCT.md View File

@@ -1,44 +0,0 @@
# Contributor Code of Conduct

As contributors and maintainers of this project,
and in the interest of fostering an open and welcoming community,
we pledge to respect all people who contribute through reporting issues,
posting feature requests, updating documentation,
submitting pull requests or patches, and other activities.

We are committed to making participation in this project
a harassment-free experience for everyone,
regardless of level of experience, gender, gender identity and expression,
sexual orientation, disability, personal appearance,
body size, race, ethnicity, age, religion, or nationality.

Examples of unacceptable behavior by participants include:

* The use of sexualized language or imagery
* Personal attacks
* Trolling or insulting/derogatory comments
* Public or private harassment
* Publishing other's private information,
such as physical or electronic
addresses, without explicit permission
* Other unethical or unprofessional conduct.

Project maintainers have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions
that are not aligned to this Code of Conduct.
By adopting this Code of Conduct,
project maintainers commit themselves to fairly and consistently
applying these principles to every aspect of managing this project.
Project maintainers who do not follow or enforce the Code of Conduct
may be permanently removed from the project team.

This code of conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community.

Instances of abusive, harassing, or otherwise unacceptable behavior
may be reported by opening an issue
or contacting one or more of the project maintainers.

This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0,
available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/)


+ 0
- 234
vendor/cloud.google.com/go/CONTRIBUTING.md View File

@@ -1,234 +0,0 @@
# Contributing

1. Sign one of the contributor license agreements below.
1. `go get golang.org/x/review/git-codereview` to install the code reviewing
tool.
1. You will need to ensure that your `GOBIN` directory (by default
`$GOPATH/bin`) is in your `PATH` so that git can find the command.
1. If you would like, you may want to set up aliases for git-codereview,
such that `git codereview change` becomes `git change`. See the
[godoc](https://godoc.org/golang.org/x/review/git-codereview) for details.
1. Should you run into issues with the git-codereview tool, please note
that all error messages will assume that you have set up these aliases.
1. Get the cloud package by running `go get -d cloud.google.com/go`.
1. If you have already checked out the source, make sure that the remote
git origin is https://code.googlesource.com/gocloud:

```
git remote set-url origin https://code.googlesource.com/gocloud
```

1. Make sure your auth is configured correctly by visiting
https://code.googlesource.com, clicking "Generate Password", and following the
directions.
1. Make changes and create a change by running `git codereview change <name>`,
provide a commit message, and use `git codereview mail` to create a Gerrit CL.
1. Keep amending to the change with `git codereview change` and mail as your
receive feedback. Each new mailed amendment will create a new patch set for
your change in Gerrit.

## Integration Tests

In addition to the unit tests, you may run the integration test suite. These
directions describe setting up your environment to run integration tests for
_all_ packages: note that many of these instructions may be redundant if you
intend only to run integration tests on a single package.

#### GCP Setup

To run the integrations tests, creation and configuration of two projects in
the Google Developers Console is required: one specifically for Firestore
integration tests, and another for all other integration tests. We'll refer to
these projects as "general project" and "Firestore project".

After creating each project, you must [create a service account](https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount)
for each project. Ensure the project-level **Owner**
[IAM role](console.cloud.google.com/iam-admin/iam/project) role is added to
each service account. During the creation of the service account, you should
download the JSON credential file for use later.

Next, ensure the following APIs are enabled in the general project:

- BigQuery API
- BigQuery Data Transfer API
- Cloud Dataproc API
- Cloud Dataproc Control API Private
- Cloud Datastore API
- Cloud Firestore API
- Cloud Key Management Service (KMS) API
- Cloud Natural Language API
- Cloud OS Login API
- Cloud Pub/Sub API
- Cloud Resource Manager API
- Cloud Spanner API
- Cloud Speech API
- Cloud Translation API
- Cloud Video Intelligence API
- Cloud Vision API
- Compute Engine API
- Compute Engine Instance Group Manager API
- Container Registry API
- Firebase Rules API
- Google Cloud APIs
- Google Cloud Deployment Manager V2 API
- Google Cloud SQL
- Google Cloud Storage
- Google Cloud Storage JSON API
- Google Compute Engine Instance Group Updater API
- Google Compute Engine Instance Groups API
- Kubernetes Engine API
- Stackdriver Error Reporting API

Next, create a Datastore database in the general project, and a Firestore
database in the Firestore project.

Finally, in the general project, create an API key for the translate API:

- Go to GCP Developer Console.
- Navigate to APIs & Services > Credentials.
- Click Create Credentials > API Key.
- Save this key for use in `GCLOUD_TESTS_API_KEY` as described below.

#### Local Setup

Once the two projects are created and configured, set the following environment
variables:

- `GCLOUD_TESTS_GOLANG_PROJECT_ID`: Developers Console project's ID (e.g.
bamboo-shift-455) for the general project.
- `GCLOUD_TESTS_GOLANG_KEY`: The path to the JSON key file of the general
project's service account.
- `GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID`: Developers Console project's ID
(e.g. doorway-cliff-677) for the Firestore project.
- `GCLOUD_TESTS_GOLANG_FIRESTORE_KEY`: The path to the JSON key file of the
Firestore project's service account.
- `GCLOUD_TESTS_GOLANG_KEYRING`: The full name of the keyring for the tests,
in the form
"projects/P/locations/L/keyRings/R". The creation of this is described below.
- `GCLOUD_TESTS_API_KEY`: API key for using the Translate API.
- `GCLOUD_TESTS_GOLANG_ZONE`: Compute Engine zone.

Install the [gcloud command-line tool][gcloudcli] to your machine and use it to
create some resources used in integration tests.

From the project's root directory:

``` sh
# Sets the default project in your env.
$ gcloud config set project $GCLOUD_TESTS_GOLANG_PROJECT_ID

# Authenticates the gcloud tool with your account.
$ gcloud auth login

# Create the indexes used in the datastore integration tests.
$ gcloud datastore indexes create datastore/testdata/index.yaml

# Creates a Google Cloud storage bucket with the same name as your test project,
# and with the Stackdriver Logging service account as owner, for the sink
# integration tests in logging.
$ gsutil mb gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID
$ gsutil acl ch -g cloud-logs@google.com:O gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID

# Creates a PubSub topic for integration tests of storage notifications.
$ gcloud beta pubsub topics create go-storage-notification-test
# Next, go to the Pub/Sub dashboard in GCP console. Authorize the user
# "service-<numberic project id>@gs-project-accounts.iam.gserviceaccount.com"
# as a publisher to that topic.

# Creates a Spanner instance for the spanner integration tests.
$ gcloud beta spanner instances create go-integration-test --config regional-us-central1 --nodes 10 --description 'Instance for go client test'
# NOTE: Spanner instances are priced by the node-hour, so you may want to
# delete the instance after testing with 'gcloud beta spanner instances delete'.

$ export MY_KEYRING=some-keyring-name
$ export MY_LOCATION=global
# Creates a KMS keyring, in the same location as the default location for your
# project's buckets.
$ gcloud kms keyrings create $MY_KEYRING --location $MY_LOCATION
# Creates two keys in the keyring, named key1 and key2.
$ gcloud kms keys create key1 --keyring $MY_KEYRING --location $MY_LOCATION --purpose encryption
$ gcloud kms keys create key2 --keyring $MY_KEYRING --location $MY_LOCATION --purpose encryption
# Sets the GCLOUD_TESTS_GOLANG_KEYRING environment variable.
$ export GCLOUD_TESTS_GOLANG_KEYRING=projects/$GCLOUD_TESTS_GOLANG_PROJECT_ID/locations/$MY_LOCATION/keyRings/$MY_KEYRING
# Authorizes Google Cloud Storage to encrypt and decrypt using key1.
gsutil kms authorize -p $GCLOUD_TESTS_GOLANG_PROJECT_ID -k $GCLOUD_TESTS_GOLANG_KEYRING/cryptoKeys/key1
```

#### Running

Once you've done the necessary setup, you can run the integration tests by
running:

``` sh
$ go test -v cloud.google.com/go/...
```

#### Replay

Some packages can record the RPCs during integration tests to a file for
subsequent replay. To record, pass the `-record` flag to `go test`. The
recording will be saved to the _package_`.replay` file. To replay integration
tests from a saved recording, the replay file must be present, the `-short`
flag must be passed to `go test`, and the `GCLOUD_TESTS_GOLANG_ENABLE_REPLAY`
environment variable must have a non-empty value.

## Contributor License Agreements

Before we can accept your pull requests you'll need to sign a Contributor
License Agreement (CLA):

- **If you are an individual writing original source code** and **you own the
intellectual property**, then you'll need to sign an [individual CLA][indvcla].
- **If you work for a company that wants to allow you to contribute your
work**, then you'll need to sign a [corporate CLA][corpcla].

You can sign these electronically (just scroll to the bottom). After that,
we'll be able to accept your pull requests.

## Contributor Code of Conduct

As contributors and maintainers of this project,
and in the interest of fostering an open and welcoming community,
we pledge to respect all people who contribute through reporting issues,
posting feature requests, updating documentation,
submitting pull requests or patches, and other activities.

We are committed to making participation in this project
a harassment-free experience for everyone,
regardless of level of experience, gender, gender identity and expression,
sexual orientation, disability, personal appearance,
body size, race, ethnicity, age, religion, or nationality.

Examples of unacceptable behavior by participants include:

* The use of sexualized language or imagery
* Personal attacks
* Trolling or insulting/derogatory comments
* Public or private harassment
* Publishing other's private information,
such as physical or electronic
addresses, without explicit permission
* Other unethical or unprofessional conduct.

Project maintainers have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions
that are not aligned to this Code of Conduct.
By adopting this Code of Conduct,
project maintainers commit themselves to fairly and consistently
applying these principles to every aspect of managing this project.
Project maintainers who do not follow or enforce the Code of Conduct
may be permanently removed from the project team.

This code of conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community.

Instances of abusive, harassing, or otherwise unacceptable behavior
may be reported by opening an issue
or contacting one or more of the project maintainers.

This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0,
available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/)

[gcloudcli]: https://developers.google.com/cloud/sdk/gcloud/
[indvcla]: https://developers.google.com/open-source/cla/individual
[corpcla]: https://developers.google.com/open-source/cla/corporate

+ 0
- 40
vendor/cloud.google.com/go/CONTRIBUTORS View File

@@ -1,40 +0,0 @@
# People who have agreed to one of the CLAs and can contribute patches.
# The AUTHORS file lists the copyright holders; this file
# lists people. For example, Google employees are listed here
# but not in AUTHORS, because Google holds the copyright.
#
# https://developers.google.com/open-source/cla/individual
# https://developers.google.com/open-source/cla/corporate
#
# Names should be added to this file as:
# Name <email address>

# Keep the list alphabetically sorted.

Alexis Hunt <lexer@google.com>
Andreas Litt <andreas.litt@gmail.com>
Andrew Gerrand <adg@golang.org>
Brad Fitzpatrick <bradfitz@golang.org>
Burcu Dogan <jbd@google.com>
Dave Day <djd@golang.org>
David Sansome <me@davidsansome.com>
David Symonds <dsymonds@golang.org>
Filippo Valsorda <hi@filippo.io>
Glenn Lewis <gmlewis@google.com>
Ingo Oeser <nightlyone@googlemail.com>
James Hall <james.hall@shopify.com>
Johan Euphrosine <proppy@google.com>
Jonathan Amsterdam <jba@google.com>
Kunpei Sakai <namusyaka@gmail.com>
Luna Duclos <luna.duclos@palmstonegames.com>
Magnus Hiie <magnus.hiie@gmail.com>
Mario Castro <mariocaster@gmail.com>
Michael McGreevy <mcgreevy@golang.org>
Omar Jarjur <ojarjur@google.com>
Paweł Knap <pawelknap88@gmail.com>
Péter Szilágyi <peterke@gmail.com>
Sarah Adams <shadams@google.com>
Thanatat Tamtan <acoshift@gmail.com>
Toby Burress <kurin@google.com>
Tuo Shan <shantuo@google.com>
Tyler Treat <ttreat31@gmail.com>

+ 0
- 202
vendor/cloud.google.com/go/LICENSE View File

@@ -1,202 +0,0 @@

Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/

TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION

1. Definitions.

"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.

"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.

"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.

"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.

"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.

"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.

"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).

"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.

"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."

"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.

2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.

3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.

4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:

(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and

(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and

(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and

(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.

You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.

5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.

6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.

7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.

8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.

9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.

END OF TERMS AND CONDITIONS

APPENDIX: How to apply the Apache License to your work.

To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.

Copyright [yyyy] [name of copyright owner]

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

+ 0
- 504
vendor/cloud.google.com/go/README.md View File

@@ -1,505 +0,0 @@
# Google Cloud Client Libraries for Go

[![GoDoc](https://godoc.org/cloud.google.com/go?status.svg)](https://godoc.org/cloud.google.com/go)

Go packages for [Google Cloud Platform](https://cloud.google.com) services.

``` go
import "cloud.google.com/go"
```

To install the packages on your system, *do not clone the repo*. Instead use

```
$ go get -u cloud.google.com/go/...
```

**NOTE:** Some of these packages are under development, and may occasionally
make backwards-incompatible changes.

**NOTE:** Github repo is a mirror of [https://code.googlesource.com/gocloud](https://code.googlesource.com/gocloud).

* [News](#news)
* [Supported APIs](#supported-apis)
* [Go Versions Supported](#go-versions-supported)
* [Authorization](#authorization)
* [Cloud Datastore](#cloud-datastore-)
* [Cloud Storage](#cloud-storage-)
* [Cloud Pub/Sub](#cloud-pub-sub-)
* [BigQuery](#cloud-bigquery-)
* [Stackdriver Logging](#stackdriver-logging-)
* [Cloud Spanner](#cloud-spanner-)


## News

_7 August 2018_

As of November 1, the code in the repo will no longer support Go versions 1.8
and earlier. No one other than AppEngine users should be on those old versions,
and AppEngine
[Standard](https://groups.google.com/forum/#!topic/google-appengine-go/e7oPNomd7ak)
and
[Flex](https://groups.google.com/forum/#!topic/google-appengine-go/wHsYtxvEbXI)
will stop supporting new deployments with those versions on that date.


Changes have been moved to [CHANGES](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/CHANGES.md).


## Supported APIs

Google API | Status | Package
[Asset][cloud-asset] | alpha | [`cloud.google.com/go/asset/v1beta`][cloud-asset-ref]
[BigQuery][cloud-bigquery] | stable | [`cloud.google.com/go/bigquery`][cloud-bigquery-ref]
[Bigtable][cloud-bigtable] | stable | [`cloud.google.com/go/bigtable`][cloud-bigtable-ref]
[Cloudtasks][cloud-tasks] | beta | [`cloud.google.com/go/cloudtasks/apiv2beta3`][cloud-tasks-ref]
[Container][cloud-container] | stable | [`cloud.google.com/go/container/apiv1`][cloud-container-ref]
[ContainerAnalysis][cloud-containeranalysis] | beta | [`cloud.google.com/go/containeranalysis/apiv1beta1`][cloud-containeranalysis-ref]
[Dataproc][cloud-dataproc] | stable | [`cloud.google.com/go/dataproc/apiv1`][cloud-dataproc-ref]
[Datastore][cloud-datastore] | stable | [`cloud.google.com/go/datastore`][cloud-datastore-ref]
[Debugger][cloud-debugger] | alpha | [`cloud.google.com/go/debugger/apiv2`][cloud-debugger-ref]
[Dialogflow][cloud-dialogflow] | alpha | [`cloud.google.com/go/dialogflow/apiv2`][cloud-dialogflow-ref]
[Data Loss Prevention][cloud-dlp] | alpha | [`cloud.google.com/go/dlp/apiv2`][cloud-dlp-ref]
[ErrorReporting][cloud-errors] | alpha | [`cloud.google.com/go/errorreporting`][cloud-errors-ref]
[Firestore][cloud-firestore] | beta | [`cloud.google.com/go/firestore`][cloud-firestore-ref]
[IAM][cloud-iam] | stable | [`cloud.google.com/go/iam`][cloud-iam-ref]
[KMS][cloud-kms] | stable | [`cloud.google.com/go/kms`][cloud-kms-ref]
[Natural Language][cloud-natural-language] | stable | [`cloud.google.com/go/language/apiv1`][cloud-natural-language-ref]
[Logging][cloud-logging] | stable | [`cloud.google.com/go/logging`][cloud-logging-ref]
[Monitoring][cloud-monitoring] | alpha | [`cloud.google.com/go/monitoring/apiv3`][cloud-monitoring-ref]
[OS Login][cloud-oslogin] | alpha | [`cloud.google.com/compute/docs/oslogin/rest`][cloud-oslogin-ref]
[Pub/Sub][cloud-pubsub] | stable | [`cloud.google.com/go/pubsub`][cloud-pubsub-ref]
[Memorystore][cloud-memorystore] | stable | [`cloud.google.com/go/redis/apiv1beta1`][cloud-memorystore-ref]
[Spanner][cloud-spanner] | stable | [`cloud.google.com/go/spanner`][cloud-spanner-ref]
[Speech][cloud-speech] | stable | [`cloud.google.com/go/speech/apiv1`][cloud-speech-ref]
[Storage][cloud-storage] | stable | [`cloud.google.com/go/storage`][cloud-storage-ref]
[Text To Speech][cloud-texttospeech] | alpha | [`cloud.google.com/go/texttospeech/apiv1`][cloud-texttospeech-ref]
[Trace][cloud-trace] | alpha | [`cloud.google.com/go/trace/apiv2`][cloud-trace-ref]
[Translation][cloud-translation] | stable | [`cloud.google.com/go/translate`][cloud-translation-ref]
[Video Intelligence][cloud-video] | alpha | [`cloud.google.com/go/videointelligence/apiv1beta1`][cloud-video-ref]
[Vision][cloud-vision] | stable | [`cloud.google.com/go/vision/apiv1`][cloud-vision-ref]

> **Alpha status**: the API is still being actively developed. As a
> result, it might change in backward-incompatible ways and is not recommended
> for production use.
>
> **Beta status**: the API is largely complete, but still has outstanding
> features and bugs to be addressed. There may be minor backwards-incompatible
> changes where necessary.
>
> **Stable status**: the API is mature and ready for production use. We will
> continue addressing bugs and feature requests.

Documentation and examples are available at
https://godoc.org/cloud.google.com/go

Visit or join the
[google-api-go-announce group](https://groups.google.com/forum/#!forum/google-api-go-announce)
for updates on these packages.

## Go Versions Supported

We support the two most recent major versions of Go. If Google App Engine uses
an older version, we support that as well.

## Authorization

By default, each API will use [Google Application Default Credentials][default-creds]
for authorization credentials used in calling the API endpoints. This will allow your
application to run in many environments without requiring explicit configuration.

[snip]:# (auth)
```go
client, err := storage.NewClient(ctx)
```

To authorize using a
[JSON key file](https://cloud.google.com/iam/docs/managing-service-account-keys),
pass
[`option.WithCredentialsFile`](https://godoc.org/google.golang.org/api/option#WithCredentialsFile)
to the `NewClient` function of the desired package. For example:

[snip]:# (auth-JSON)
```go
client, err := storage.NewClient(ctx, option.WithCredentialsFile("path/to/keyfile.json"))
```

You can exert more control over authorization by using the
[`golang.org/x/oauth2`](https://godoc.org/golang.org/x/oauth2) package to
create an `oauth2.TokenSource`. Then pass
[`option.WithTokenSource`](https://godoc.org/google.golang.org/api/option#WithTokenSource)
to the `NewClient` function:
[snip]:# (auth-ts)
```go
tokenSource := ...
client, err := storage.NewClient(ctx, option.WithTokenSource(tokenSource))
```

## Cloud Datastore [![GoDoc](https://godoc.org/cloud.google.com/go/datastore?status.svg)](https://godoc.org/cloud.google.com/go/datastore)

- [About Cloud Datastore][cloud-datastore]
- [Activating the API for your project][cloud-datastore-activation]
- [API documentation][cloud-datastore-docs]
- [Go client documentation](https://godoc.org/cloud.google.com/go/datastore)
- [Complete sample program](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/datastore/tasks)

### Example Usage

First create a `datastore.Client` to use throughout your application:

[snip]:# (datastore-1)
```go
client, err := datastore.NewClient(ctx, "my-project-id")
if err != nil {
log.Fatal(err)
}
```

Then use that client to interact with the API:

[snip]:# (datastore-2)
```go
type Post struct {
Title string
Body string `datastore:",noindex"`
PublishedAt time.Time
}
keys := []*datastore.Key{
datastore.NameKey("Post", "post1", nil),
datastore.NameKey("Post", "post2", nil),
}
posts := []*Post{
{Title: "Post 1", Body: "...", PublishedAt: time.Now()},
{Title: "Post 2", Body: "...", PublishedAt: time.Now()},
}
if _, err := client.PutMulti(ctx, keys, posts); err != nil {
log.Fatal(err)
}
```

## Cloud Storage [![GoDoc](https://godoc.org/cloud.google.com/go/storage?status.svg)](https://godoc.org/cloud.google.com/go/storage)

- [About Cloud Storage][cloud-storage]
- [API documentation][cloud-storage-docs]
- [Go client documentation](https://godoc.org/cloud.google.com/go/storage)
- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/storage)

### Example Usage

First create a `storage.Client` to use throughout your application:

[snip]:# (storage-1)
```go
client, err := storage.NewClient(ctx)
if err != nil {
log.Fatal(err)
}
```

[snip]:# (storage-2)
```go
// Read the object1 from bucket.
rc, err := client.Bucket("bucket").Object("object1").NewReader(ctx)
if err != nil {
log.Fatal(err)
}
defer rc.Close()
body, err := ioutil.ReadAll(rc)
if err != nil {
log.Fatal(err)
}
```

## Cloud Pub/Sub [![GoDoc](https://godoc.org/cloud.google.com/go/pubsub?status.svg)](https://godoc.org/cloud.google.com/go/pubsub)

- [About Cloud Pubsub][cloud-pubsub]
- [API documentation][cloud-pubsub-docs]
- [Go client documentation](https://godoc.org/cloud.google.com/go/pubsub)
- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/pubsub)

### Example Usage

First create a `pubsub.Client` to use throughout your application:

[snip]:# (pubsub-1)
```go
client, err := pubsub.NewClient(ctx, "project-id")
if err != nil {
log.Fatal(err)
}
```

Then use the client to publish and subscribe:

[snip]:# (pubsub-2)
```go
// Publish "hello world" on topic1.
topic := client.Topic("topic1")
res := topic.Publish(ctx, &pubsub.Message{
Data: []byte("hello world"),
})
// The publish happens asynchronously.
// Later, you can get the result from res:
...
msgID, err := res.Get(ctx)
if err != nil {
log.Fatal(err)
}

// Use a callback to receive messages via subscription1.
sub := client.Subscription("subscription1")
err = sub.Receive(ctx, func(ctx context.Context, m *pubsub.Message) {
fmt.Println(m.Data)
m.Ack() // Acknowledge that we've consumed the message.
})
if err != nil {
log.Println(err)
}
```

## BigQuery [![GoDoc](https://godoc.org/cloud.google.com/go/bigquery?status.svg)](https://godoc.org/cloud.google.com/go/bigquery)

- [About BigQuery][cloud-bigquery]
- [API documentation][cloud-bigquery-docs]
- [Go client documentation][cloud-bigquery-ref]
- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/bigquery)

### Example Usage

First create a `bigquery.Client` to use throughout your application:
[snip]:# (bq-1)
```go
c, err := bigquery.NewClient(ctx, "my-project-ID")
if err != nil {
// TODO: Handle error.
}
```

Then use that client to interact with the API:
[snip]:# (bq-2)
```go
// Construct a query.
q := c.Query(`
SELECT year, SUM(number)
FROM [bigquery-public-data:usa_names.usa_1910_2013]
WHERE name = "William"
GROUP BY year
ORDER BY year
`)
// Execute the query.
it, err := q.Read(ctx)
if err != nil {
// TODO: Handle error.
}
// Iterate through the results.
for {
var values []bigquery.Value
err := it.Next(&values)
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
fmt.Println(values)
}
```


## Stackdriver Logging [![GoDoc](https://godoc.org/cloud.google.com/go/logging?status.svg)](https://godoc.org/cloud.google.com/go/logging)

- [About Stackdriver Logging][cloud-logging]
- [API documentation][cloud-logging-docs]
- [Go client documentation][cloud-logging-ref]
- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/logging)

### Example Usage

First create a `logging.Client` to use throughout your application:
[snip]:# (logging-1)
```go
ctx := context.Background()
client, err := logging.NewClient(ctx, "my-project")
if err != nil {
// TODO: Handle error.
}
```

Usually, you'll want to add log entries to a buffer to be periodically flushed
(automatically and asynchronously) to the Stackdriver Logging service.
[snip]:# (logging-2)
```go
logger := client.Logger("my-log")
logger.Log(logging.Entry{Payload: "something happened!"})
```

Close your client before your program exits, to flush any buffered log entries.
[snip]:# (logging-3)
```go
err = client.Close()
if err != nil {
// TODO: Handle error.
}
```

## Cloud Spanner [![GoDoc](https://godoc.org/cloud.google.com/go/spanner?status.svg)](https://godoc.org/cloud.google.com/go/spanner)

- [About Cloud Spanner][cloud-spanner]
- [API documentation][cloud-spanner-docs]
- [Go client documentation](https://godoc.org/cloud.google.com/go/spanner)

### Example Usage

First create a `spanner.Client` to use throughout your application:

[snip]:# (spanner-1)
```go
client, err := spanner.NewClient(ctx, "projects/P/instances/I/databases/D")
if err != nil {
log.Fatal(err)
}
```

[snip]:# (spanner-2)
```go
// Simple Reads And Writes
_, err = client.Apply(ctx, []*spanner.Mutation{
spanner.Insert("Users",
[]string{"name", "email"},
[]interface{}{"alice", "a@example.com"})})
if err != nil {
log.Fatal(err)
}
row, err := client.Single().ReadRow(ctx, "Users",
spanner.Key{"alice"}, []string{"email"})
if err != nil {
log.Fatal(err)
}
```


## Contributing

Contributions are welcome. Please, see the
[CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/CONTRIBUTING.md)
document for details. We're using Gerrit for our code reviews. Please don't open pull
requests against this repo, new pull requests will be automatically closed.

Please note that this project is released with a Contributor Code of Conduct.
By participating in this project you agree to abide by its terms.
See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/CONTRIBUTING.md#contributor-code-of-conduct)
for more information.

[cloud-datastore]: https://cloud.google.com/datastore/
[cloud-datastore-ref]: https://godoc.org/cloud.google.com/go/datastore
[cloud-datastore-docs]: https://cloud.google.com/datastore/docs
[cloud-datastore-activation]: https://cloud.google.com/datastore/docs/activate

[cloud-firestore]: https://cloud.google.com/firestore/
[cloud-firestore-ref]: https://godoc.org/cloud.google.com/go/firestore
[cloud-firestore-docs]: https://cloud.google.com/firestore/docs
[cloud-firestore-activation]: https://cloud.google.com/firestore/docs/activate

[cloud-pubsub]: https://cloud.google.com/pubsub/
[cloud-pubsub-ref]: https://godoc.org/cloud.google.com/go/pubsub
[cloud-pubsub-docs]: https://cloud.google.com/pubsub/docs

[cloud-storage]: https://cloud.google.com/storage/
[cloud-storage-ref]: https://godoc.org/cloud.google.com/go/storage
[cloud-storage-docs]: https://cloud.google.com/storage/docs
[cloud-storage-create-bucket]: https://cloud.google.com/storage/docs/cloud-console#_creatingbuckets

[cloud-bigtable]: https://cloud.google.com/bigtable/
[cloud-bigtable-ref]: https://godoc.org/cloud.google.com/go/bigtable

[cloud-bigquery]: https://cloud.google.com/bigquery/
[cloud-bigquery-docs]: https://cloud.google.com/bigquery/docs
[cloud-bigquery-ref]: https://godoc.org/cloud.google.com/go/bigquery

[cloud-logging]: https://cloud.google.com/logging/
[cloud-logging-docs]: https://cloud.google.com/logging/docs
[cloud-logging-ref]: https://godoc.org/cloud.google.com/go/logging

[cloud-monitoring]: https://cloud.google.com/monitoring/
[cloud-monitoring-ref]: https://godoc.org/cloud.google.com/go/monitoring/apiv3

[cloud-vision]: https://cloud.google.com/vision
[cloud-vision-ref]: https://godoc.org/cloud.google.com/go/vision/apiv1

[cloud-language]: https://cloud.google.com/natural-language
[cloud-language-ref]: https://godoc.org/cloud.google.com/go/language/apiv1

[cloud-oslogin]: https://cloud.google.com/compute/docs/oslogin/rest
[cloud-oslogin-ref]: https://cloud.google.com/compute/docs/oslogin/rest

[cloud-speech]: https://cloud.google.com/speech
[cloud-speech-ref]: https://godoc.org/cloud.google.com/go/speech/apiv1

[cloud-spanner]: https://cloud.google.com/spanner/
[cloud-spanner-ref]: https://godoc.org/cloud.google.com/go/spanner
[cloud-spanner-docs]: https://cloud.google.com/spanner/docs

[cloud-translation]: https://cloud.google.com/translation
[cloud-translation-ref]: https://godoc.org/cloud.google.com/go/translation

[cloud-video]: https://cloud.google.com/video-intelligence/
[cloud-video-ref]: https://godoc.org/cloud.google.com/go/videointelligence/apiv1beta1

[cloud-errors]: https://cloud.google.com/error-reporting/
[cloud-errors-ref]: https://godoc.org/cloud.google.com/go/errorreporting

[cloud-container]: https://cloud.google.com/containers/
[cloud-container-ref]: https://godoc.org/cloud.google.com/go/container/apiv1

[cloud-debugger]: https://cloud.google.com/debugger/
[cloud-debugger-ref]: https://godoc.org/cloud.google.com/go/debugger/apiv2

[cloud-dlp]: https://cloud.google.com/dlp/
[cloud-dlp-ref]: https://godoc.org/cloud.google.com/go/dlp/apiv2beta1

[default-creds]: https://developers.google.com/identity/protocols/application-default-credentials

[cloud-dataproc]: https://cloud.google.com/dataproc/
[cloud-dataproc-docs]: https://cloud.google.com/dataproc/docs
[cloud-dataproc-ref]: https://godoc.org/cloud.google.com/go/dataproc/apiv1

[cloud-iam]: https://cloud.google.com/iam/
[cloud-iam-docs]: https://cloud.google.com/iam/docs
[cloud-iam-ref]: https://godoc.org/cloud.google.com/go/iam

[cloud-kms]: https://cloud.google.com/kms/
[cloud-kms-docs]: https://cloud.google.com/kms/docs
[cloud-kms-ref]: https://godoc.org/cloud.google.com/go/kms/apiv1

[cloud-natural-language]: https://cloud.google.com/natural-language/
[cloud-natural-language-docs]: https://cloud.google.com/natural-language/docs
[cloud-natural-language-ref]: https://godoc.org/cloud.google.com/go/language/apiv1

[cloud-memorystore]: https://cloud.google.com/memorystore/
[cloud-memorystore-docs]: https://cloud.google.com/memorystore/docs
[cloud-memorystore-ref]: https://godoc.org/cloud.google.com/go/redis/apiv1beta1

[cloud-texttospeech]: https://cloud.google.com/texttospeech/
[cloud-texttospeech-docs]: https://cloud.google.com/texttospeech/docs
[cloud-texttospeech-ref]: https://godoc.org/cloud.google.com/go/texttospeech/apiv1

[cloud-trace]: https://cloud.google.com/trace/
[cloud-trace-docs]: https://cloud.google.com/trace/docs
[cloud-trace-ref]: https://godoc.org/cloud.google.com/go/trace/apiv2

[cloud-dialogflow]: https://cloud.google.com/dialogflow-enterprise/
[cloud-dialogflow-docs]: https://cloud.google.com/dialogflow-enterprise/docs/
[cloud-dialogflow-ref]: https://godoc.org/cloud.google.com/go/dialogflow/apiv2

[cloud-containeranalysis]: https://cloud.google.com/container-registry/docs/container-analysis
[cloud-containeranalysis-docs]: https://cloud.google.com/container-analysis/api/reference/rest/
[cloud-containeranalysis-ref]: https://godoc.org/cloud.google.com/go/devtools/containeranalysis/apiv1beta1

[cloud-asset]: https://cloud.google.com/security-command-center/docs/how-to-asset-inventory
[cloud-asset-docs]: https://cloud.google.com/security-command-center/docs/how-to-asset-inventory
[cloud-asset-ref]: https://godoc.org/cloud.google.com/go/asset/apiv1

[cloud-tasks]: https://cloud.google.com/tasks/
[cloud-tasks-ref]: https://godoc.org/cloud.google.com/go/cloudtasks/apiv2beta3

+ 0
- 47
vendor/cloud.google.com/go/RELEASING.md View File

@@ -1,47 +0,0 @@
# How to Create a New Release

## Prerequisites

Install [releasetool](https://github.com/googleapis/releasetool).

## Create a release

1. `cd` into the root directory, e.g., `~/go/src/cloud.google.com/go`
1. Checkout the master branch and ensure a clean and up-to-date state.
```
git checkout master
git pull --tags origin master
```
1. Run releasetool to generate a changelog from the last version. Note,
releasetool will prompt if the new version is a major, minor, or patch
version.
```
releasetool start --language go
```
1. Format the output to match CHANGES.md.
1. Submit a CL with the changes in CHANGES.md. The commit message should look
like this (where `v0.31.0` is instead the correct version number):
```
all: Release v0.31.0
```
1. Wait for approval from all reviewers and then submit the CL.
1. Return to the master branch and pull the release commit.
```
git checkout master
git pull origin master
```
1. Tag the current commit with the new version (e.g., `v0.31.0`)
```
releasetool tag --language go
```
1. Publish the tag to GoogleSource (i.e., origin):
```
git push origin $NEW_VERSION
```
1. Visit the [releases page][releases] on GitHub and click the "Draft a new
release" button. For tag version, enter the tag published in the previous
step. For the release title, use the version (e.g., `v0.31.0`). For the
description, copy the changes added to CHANGES.md.


[releases]: https://github.com/googleapis/google-cloud-go/releases

+ 0
- 249
vendor/cloud.google.com/go/asset/apiv1beta1/asset_client.go View File

@@ -1,249 +0,0 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

// Code generated by gapic-generator. DO NOT EDIT.

package asset

import (
"context"
"time"

"cloud.google.com/go/longrunning"
lroauto "cloud.google.com/go/longrunning/autogen"
gax "github.com/googleapis/gax-go/v2"
"google.golang.org/api/option"
"google.golang.org/api/transport"
assetpb "google.golang.org/genproto/googleapis/cloud/asset/v1beta1"
longrunningpb "google.golang.org/genproto/googleapis/longrunning"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
)

// CallOptions contains the retry settings for each method of Client.
type CallOptions struct {
ExportAssets []gax.CallOption
BatchGetAssetsHistory []gax.CallOption
}

func defaultClientOptions() []option.ClientOption {
return []option.ClientOption{
option.WithEndpoint("cloudasset.googleapis.com:443"),
option.WithScopes(DefaultAuthScopes()...),
}
}

func defaultCallOptions() *CallOptions {
retry := map[[2]string][]gax.CallOption{
{"default", "idempotent"}: {
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 100 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 1.3,
})
}),
},
}
return &CallOptions{
ExportAssets: retry[[2]string{"default", "non_idempotent"}],
BatchGetAssetsHistory: retry[[2]string{"default", "idempotent"}],
}
}

// Client is a client for interacting with Cloud Asset API.
//
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
type Client struct {
// The connection to the service.
conn *grpc.ClientConn

// The gRPC API client.
client assetpb.AssetServiceClient

// LROClient is used internally to handle longrunning operations.
// It is exposed so that its CallOptions can be modified if required.
// Users should not Close this client.
LROClient *lroauto.OperationsClient

// The call options for this service.
CallOptions *CallOptions

// The x-goog-* metadata to be sent with each request.
xGoogMetadata metadata.MD
}

// NewClient creates a new asset service client.
//
// Asset service definition.
func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...)
if err != nil {
return nil, err
}
c := &Client{
conn: conn,
CallOptions: defaultCallOptions(),

client: assetpb.NewAssetServiceClient(conn),
}
c.setGoogleClientInfo()

c.LROClient, err = lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn))
if err != nil {
// This error "should not happen", since we are just reusing old connection
// and never actually need to dial.
// If this does happen, we could leak conn. However, we cannot close conn:
// If the user invoked the function with option.WithGRPCConn,
// we would close a connection that's still in use.
// TODO(pongad): investigate error conditions.
return nil, err
}
return c, nil
}

// Connection returns the client's connection to the API service.
func (c *Client) Connection() *grpc.ClientConn {
return c.conn
}

// Close closes the connection to the API service. The user should invoke this when
// the client is no longer required.
func (c *Client) Close() error {
return c.conn.Close()
}

// setGoogleClientInfo sets the name and version of the application in
// the `x-goog-api-client` header passed on each request. Intended for
// use by Google-written clients.
func (c *Client) setGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", versionGo()}, keyval...)
kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version)
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
}

// ExportAssets exports assets with time and resource types to a given Cloud Storage
// location. The output format is newline-delimited JSON.
// This API implements the
// [google.longrunning.Operation][google.longrunning.Operation] API allowing
// you to keep track of the export.
func (c *Client) ExportAssets(ctx context.Context, req *assetpb.ExportAssetsRequest, opts ...gax.CallOption) (*ExportAssetsOperation, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.ExportAssets[0:len(c.CallOptions.ExportAssets):len(c.CallOptions.ExportAssets)], opts...)
var resp *longrunningpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.ExportAssets(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return &ExportAssetsOperation{
lro: longrunning.InternalNewOperation(c.LROClient, resp),
}, nil
}

// BatchGetAssetsHistory batch gets the update history of assets that overlap a time window.
// For RESOURCE content, this API outputs history with asset in both
// non-delete or deleted status.
// For IAM_POLICY content, this API outputs history when the asset and its
// attached IAM POLICY both exist. This can create gaps in the output history.
func (c *Client) BatchGetAssetsHistory(ctx context.Context, req *assetpb.BatchGetAssetsHistoryRequest, opts ...gax.CallOption) (*assetpb.BatchGetAssetsHistoryResponse, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.BatchGetAssetsHistory[0:len(c.CallOptions.BatchGetAssetsHistory):len(c.CallOptions.BatchGetAssetsHistory)], opts...)
var resp *assetpb.BatchGetAssetsHistoryResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.BatchGetAssetsHistory(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}

// ExportAssetsOperation manages a long-running operation from ExportAssets.
type ExportAssetsOperation struct {
lro *longrunning.Operation
}

// ExportAssetsOperation returns a new ExportAssetsOperation from a given name.
// The name must be that of a previously created ExportAssetsOperation, possibly from a different process.
func (c *Client) ExportAssetsOperation(name string) *ExportAssetsOperation {
return &ExportAssetsOperation{
lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}),
}
}

// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
//
// See documentation of Poll for error-handling information.
func (op *ExportAssetsOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*assetpb.ExportAssetsResponse, error) {
var resp assetpb.ExportAssetsResponse
if err := op.lro.WaitWithInterval(ctx, &resp, 5000*time.Millisecond, opts...); err != nil {
return nil, err
}
return &resp, nil
}

// Poll fetches the latest state of the long-running operation.
//
// Poll also fetches the latest metadata, which can be retrieved by Metadata.
//
// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
// the operation has completed with failure, the error is returned and op.Done will return true.
// If Poll succeeds and the operation has completed successfully,
// op.Done will return true, and the response of the operation is returned.
// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
func (op *ExportAssetsOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*assetpb.ExportAssetsResponse, error) {
var resp assetpb.ExportAssetsResponse
if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
return nil, err
}
if !op.Done() {
return nil, nil
}
return &resp, nil
}

// Metadata returns metadata associated with the long-running operation.
// Metadata itself does not contact the server, but Poll does.
// To get the latest metadata, call this method after a successful call to Poll.
// If the metadata is not available, the returned metadata and error are both nil.
func (op *ExportAssetsOperation) Metadata() (*assetpb.ExportAssetsRequest, error) {
var meta assetpb.ExportAssetsRequest
if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
return nil, nil
} else if err != nil {
return nil, err
}
return &meta, nil
}

// Done reports whether the long-running operation has completed.
func (op *ExportAssetsOperation) Done() bool {
return op.lro.Done()
}

// Name returns the name of the long-running operation.
// The name is assigned by the server and is unique within the service from which the operation is created.
func (op *ExportAssetsOperation) Name() string {
return op.lro.Name()
}

+ 0
- 75
vendor/cloud.google.com/go/asset/apiv1beta1/asset_client_example_test.go View File

@@ -1,75 +0,0 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

// Code generated by gapic-generator. DO NOT EDIT.

package asset_test

import (
"context"

asset "cloud.google.com/go/asset/apiv1beta1"
assetpb "google.golang.org/genproto/googleapis/cloud/asset/v1beta1"
)

func ExampleNewClient() {
ctx := context.Background()
c, err := asset.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
// TODO: Use client.
_ = c
}

func ExampleClient_ExportAssets() {
ctx := context.Background()
c, err := asset.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}

req := &assetpb.ExportAssetsRequest{
// TODO: Fill request struct fields.
}
op, err := c.ExportAssets(ctx, req)
if err != nil {
// TODO: Handle error.
}

resp, err := op.Wait(ctx)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}

func ExampleClient_BatchGetAssetsHistory() {
ctx := context.Background()
c, err := asset.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}

req := &assetpb.BatchGetAssetsHistoryRequest{
// TODO: Fill request struct fields.
}
resp, err := c.BatchGetAssetsHistory(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}

+ 0
- 89
vendor/cloud.google.com/go/asset/apiv1beta1/doc.go View File

@@ -1,89 +0,0 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

// Code generated by gapic-generator. DO NOT EDIT.

// Package asset is an auto-generated package for the
// Cloud Asset API.
//
// NOTE: This package is in beta. It is not stable, and may be subject to changes.
//
// The cloud asset API manages the history and inventory of cloud resources.
package asset // import "cloud.google.com/go/asset/apiv1beta1"

import (
"context"
"runtime"
"strings"
"unicode"

"google.golang.org/grpc/metadata"
)

func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
out, _ := metadata.FromOutgoingContext(ctx)
out = out.Copy()
for _, md := range mds {
for k, v := range md {
out[k] = append(out[k], v...)
}
}
return metadata.NewOutgoingContext(ctx, out)
}

// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
func DefaultAuthScopes() []string {
return []string{
"https://www.googleapis.com/auth/cloud-platform",
}
}

// versionGo returns the Go runtime version. The returned string
// has no whitespace, suitable for reporting in header.
func versionGo() string {
const develPrefix = "devel +"

s := runtime.Version()
if strings.HasPrefix(s, develPrefix) {
s = s[len(develPrefix):]
if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
s = s[:p]
}
return s
}

notSemverRune := func(r rune) bool {
return strings.IndexRune("0123456789.", r) < 0
}

if strings.HasPrefix(s, "go1") {
s = s[2:]
var prerelease string
if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
s, prerelease = s[:p], s[p:]
}
if strings.HasSuffix(s, ".") {
s += "0"
} else if strings.Count(s, ".") < 2 {
s += ".0"
}
if prerelease != "" {
s += "-" + prerelease
}
return s
}
return "UNKNOWN"
}

const versionClient = "20190306"

+ 0
- 266
vendor/cloud.google.com/go/asset/apiv1beta1/mock_test.go View File

@@ -1,266 +0,0 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

// Code generated by gapic-generator. DO NOT EDIT.

package asset

import (
assetpb "google.golang.org/genproto/googleapis/cloud/asset/v1beta1"
longrunningpb "google.golang.org/genproto/googleapis/longrunning"
)

import (
"context"
"flag"
"fmt"
"io"
"log"
"net"
"os"
"strings"
"testing"

"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
"google.golang.org/api/option"
status "google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
gstatus "google.golang.org/grpc/status"
)

var _ = io.EOF
var _ = ptypes.MarshalAny
var _ status.Status

type mockAssetServer struct {
// Embed for forward compatibility.
// Tests will keep working if more methods are added
// in the future.
assetpb.AssetServiceServer

reqs []proto.Message

// If set, all calls return this error.
err error

// responses to return if err == nil
resps []proto.Message
}

func (s *mockAssetServer) ExportAssets(ctx context.Context, req *assetpb.ExportAssetsRequest) (*longrunningpb.Operation, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*longrunningpb.Operation), nil
}

func (s *mockAssetServer) BatchGetAssetsHistory(ctx context.Context, req *assetpb.BatchGetAssetsHistoryRequest) (*assetpb.BatchGetAssetsHistoryResponse, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*assetpb.BatchGetAssetsHistoryResponse), nil
}

// clientOpt is the option tests should use to connect to the test server.
// It is initialized by TestMain.
var clientOpt option.ClientOption

var (
mockAsset mockAssetServer
)

func TestMain(m *testing.M) {
flag.Parse()

serv := grpc.NewServer()
assetpb.RegisterAssetServiceServer(serv, &mockAsset)

lis, err := net.Listen("tcp", "localhost:0")
if err != nil {
log.Fatal(err)
}
go serv.Serve(lis)

conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure())
if err != nil {
log.Fatal(err)
}
clientOpt = option.WithGRPCConn(conn)

os.Exit(m.Run())
}

func TestAssetServiceExportAssets(t *testing.T) {
var expectedResponse *assetpb.ExportAssetsResponse = &assetpb.ExportAssetsResponse{}

mockAsset.err = nil
mockAsset.reqs = nil

any, err := ptypes.MarshalAny(expectedResponse)
if err != nil {
t.Fatal(err)
}
mockAsset.resps = append(mockAsset.resps[:0], &longrunningpb.Operation{
Name: "longrunning-test",
Done: true,
Result: &longrunningpb.Operation_Response{Response: any},
})

var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]")
var outputConfig *assetpb.OutputConfig = &assetpb.OutputConfig{}
var request = &assetpb.ExportAssetsRequest{
Parent: formattedParent,
OutputConfig: outputConfig,
}

c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}

respLRO, err := c.ExportAssets(context.Background(), request)
if err != nil {
t.Fatal(err)
}
resp, err := respLRO.Wait(context.Background())

if err != nil {
t.Fatal(err)
}

if want, got := request, mockAsset.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}

if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}

func TestAssetServiceExportAssetsError(t *testing.T) {
errCode := codes.PermissionDenied
mockAsset.err = nil
mockAsset.resps = append(mockAsset.resps[:0], &longrunningpb.Operation{
Name: "longrunning-test",
Done: true,
Result: &longrunningpb.Operation_Error{
Error: &status.Status{
Code: int32(errCode),
Message: "test error",
},
},
})

var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]")
var outputConfig *assetpb.OutputConfig = &assetpb.OutputConfig{}
var request = &assetpb.ExportAssetsRequest{
Parent: formattedParent,
OutputConfig: outputConfig,
}

c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}

respLRO, err := c.ExportAssets(context.Background(), request)
if err != nil {
t.Fatal(err)
}
resp, err := respLRO.Wait(context.Background())

if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}
func TestAssetServiceBatchGetAssetsHistory(t *testing.T) {
var expectedResponse *assetpb.BatchGetAssetsHistoryResponse = &assetpb.BatchGetAssetsHistoryResponse{}

mockAsset.err = nil
mockAsset.reqs = nil

mockAsset.resps = append(mockAsset.resps[:0], expectedResponse)

var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]")
var contentType assetpb.ContentType = assetpb.ContentType_CONTENT_TYPE_UNSPECIFIED
var readTimeWindow *assetpb.TimeWindow = &assetpb.TimeWindow{}
var request = &assetpb.BatchGetAssetsHistoryRequest{
Parent: formattedParent,
ContentType: contentType,
ReadTimeWindow: readTimeWindow,
}

c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}

resp, err := c.BatchGetAssetsHistory(context.Background(), request)

if err != nil {
t.Fatal(err)
}

if want, got := request, mockAsset.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}

if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}

func TestAssetServiceBatchGetAssetsHistoryError(t *testing.T) {
errCode := codes.PermissionDenied
mockAsset.err = gstatus.Error(errCode, "test error")

var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]")
var contentType assetpb.ContentType = assetpb.ContentType_CONTENT_TYPE_UNSPECIFIED
var readTimeWindow *assetpb.TimeWindow = &assetpb.TimeWindow{}
var request = &assetpb.BatchGetAssetsHistoryRequest{
Parent: formattedParent,
ContentType: contentType,
ReadTimeWindow: readTimeWindow,
}

c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}

resp, err := c.BatchGetAssetsHistory(context.Background(), request)

if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}

+ 0
- 248
vendor/cloud.google.com/go/asset/v1beta1/asset_client.go View File

@@ -1,248 +0,0 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

// AUTO-GENERATED CODE. DO NOT EDIT.

package asset

import (
"context"
"time"

"cloud.google.com/go/longrunning"
lroauto "cloud.google.com/go/longrunning/autogen"
gax "github.com/googleapis/gax-go/v2"
"google.golang.org/api/option"
"google.golang.org/api/transport"
assetpb "google.golang.org/genproto/googleapis/cloud/asset/v1beta1"
longrunningpb "google.golang.org/genproto/googleapis/longrunning"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
)

// CallOptions contains the retry settings for each method of Client.
type CallOptions struct {
ExportAssets []gax.CallOption
BatchGetAssetsHistory []gax.CallOption
}

func defaultClientOptions() []option.ClientOption {
return []option.ClientOption{
option.WithEndpoint("cloudasset.googleapis.com:443"),
option.WithScopes(DefaultAuthScopes()...),
}
}

func defaultCallOptions() *CallOptions {
retry := map[[2]string][]gax.CallOption{
{"default", "idempotent"}: {
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 100 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 1.3,
})
}),
},
}
return &CallOptions{
ExportAssets: retry[[2]string{"default", "non_idempotent"}],
BatchGetAssetsHistory: retry[[2]string{"default", "idempotent"}],
}
}

// Client is a client for interacting with Cloud Asset API.
//
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
type Client struct {
// The connection to the service.
conn *grpc.ClientConn

// The gRPC API client.
client assetpb.AssetServiceClient

// LROClient is used internally to handle longrunning operations.
// It is exposed so that its CallOptions can be modified if required.
// Users should not Close this client.
LROClient *lroauto.OperationsClient

// The call options for this service.
CallOptions *CallOptions

// The x-goog-* metadata to be sent with each request.
xGoogMetadata metadata.MD
}

// NewClient creates a new asset service client.
//
// Asset service definition.
func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...)
if err != nil {
return nil, err
}
c := &Client{
conn: conn,
CallOptions: defaultCallOptions(),

client: assetpb.NewAssetServiceClient(conn),
}
c.setGoogleClientInfo()

c.LROClient, err = lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn))
if err != nil {
// This error "should not happen", since we are just reusing old connection
// and never actually need to dial.
// If this does happen, we could leak conn. However, we cannot close conn:
// If the user invoked the function with option.WithGRPCConn,
// we would close a connection that's still in use.
// TODO(pongad): investigate error conditions.
return nil, err
}
return c, nil
}

// Connection returns the client's connection to the API service.
func (c *Client) Connection() *grpc.ClientConn {
return c.conn
}

// Close closes the connection to the API service. The user should invoke this when
// the client is no longer required.
func (c *Client) Close() error {
return c.conn.Close()
}

// setGoogleClientInfo sets the name and version of the application in
// the `x-goog-api-client` header passed on each request. Intended for
// use by Google-written clients.
func (c *Client) setGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", versionGo()}, keyval...)
kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version)
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
}

// ExportAssets exports assets with time and resource types to a given Cloud Storage
// location. The output format is newline-delimited JSON.
// This API implements the [google.longrunning.Operation][google.longrunning.Operation] API allowing you
// to keep track of the export.
func (c *Client) ExportAssets(ctx context.Context, req *assetpb.ExportAssetsRequest, opts ...gax.CallOption) (*ExportAssetsOperation, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.ExportAssets[0:len(c.CallOptions.ExportAssets):len(c.CallOptions.ExportAssets)], opts...)
var resp *longrunningpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.ExportAssets(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return &ExportAssetsOperation{
lro: longrunning.InternalNewOperation(c.LROClient, resp),
}, nil
}

// BatchGetAssetsHistory batch gets the update history of assets that overlap a time window.
// For RESOURCE content, this API outputs history with asset in both
// non-delete or deleted status.
// For IAM_POLICY content, this API outputs history when the asset and its
// attached IAM POLICY both exist. This can create gaps in the output history.
func (c *Client) BatchGetAssetsHistory(ctx context.Context, req *assetpb.BatchGetAssetsHistoryRequest, opts ...gax.CallOption) (*assetpb.BatchGetAssetsHistoryResponse, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.BatchGetAssetsHistory[0:len(c.CallOptions.BatchGetAssetsHistory):len(c.CallOptions.BatchGetAssetsHistory)], opts...)
var resp *assetpb.BatchGetAssetsHistoryResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.BatchGetAssetsHistory(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}

// ExportAssetsOperation manages a long-running operation from ExportAssets.
type ExportAssetsOperation struct {
lro *longrunning.Operation
}

// ExportAssetsOperation returns a new ExportAssetsOperation from a given name.
// The name must be that of a previously created ExportAssetsOperation, possibly from a different process.
func (c *Client) ExportAssetsOperation(name string) *ExportAssetsOperation {
return &ExportAssetsOperation{
lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}),
}
}

// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
//
// See documentation of Poll for error-handling information.
func (op *ExportAssetsOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*assetpb.ExportAssetsResponse, error) {
var resp assetpb.ExportAssetsResponse
if err := op.lro.WaitWithInterval(ctx, &resp, 5000*time.Millisecond, opts...); err != nil {
return nil, err
}
return &resp, nil
}

// Poll fetches the latest state of the long-running operation.
//
// Poll also fetches the latest metadata, which can be retrieved by Metadata.
//
// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
// the operation has completed with failure, the error is returned and op.Done will return true.
// If Poll succeeds and the operation has completed successfully,
// op.Done will return true, and the response of the operation is returned.
// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
func (op *ExportAssetsOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*assetpb.ExportAssetsResponse, error) {
var resp assetpb.ExportAssetsResponse
if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
return nil, err
}
if !op.Done() {
return nil, nil
}
return &resp, nil
}

// Metadata returns metadata associated with the long-running operation.
// Metadata itself does not contact the server, but Poll does.
// To get the latest metadata, call this method after a successful call to Poll.
// If the metadata is not available, the returned metadata and error are both nil.
func (op *ExportAssetsOperation) Metadata() (*assetpb.ExportAssetsRequest, error) {
var meta assetpb.ExportAssetsRequest
if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
return nil, nil
} else if err != nil {
return nil, err
}
return &meta, nil
}

// Done reports whether the long-running operation has completed.
func (op *ExportAssetsOperation) Done() bool {
return op.lro.Done()
}

// Name returns the name of the long-running operation.
// The name is assigned by the server and is unique within the service from which the operation is created.
func (op *ExportAssetsOperation) Name() string {
return op.lro.Name()
}

+ 0
- 75
vendor/cloud.google.com/go/asset/v1beta1/asset_client_example_test.go View File

@@ -1,75 +0,0 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

// AUTO-GENERATED CODE. DO NOT EDIT.

package asset_test

import (
"context"

asset "cloud.google.com/go/asset/v1beta1"
assetpb "google.golang.org/genproto/googleapis/cloud/asset/v1beta1"
)

func ExampleNewClient() {
ctx := context.Background()
c, err := asset.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
// TODO: Use client.
_ = c
}

func ExampleClient_ExportAssets() {
ctx := context.Background()
c, err := asset.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}

req := &assetpb.ExportAssetsRequest{
// TODO: Fill request struct fields.
}
op, err := c.ExportAssets(ctx, req)
if err != nil {
// TODO: Handle error.
}

resp, err := op.Wait(ctx)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}

func ExampleClient_BatchGetAssetsHistory() {
ctx := context.Background()
c, err := asset.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}

req := &assetpb.BatchGetAssetsHistoryRequest{
// TODO: Fill request struct fields.
}
resp, err := c.BatchGetAssetsHistory(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}

+ 0
- 89
vendor/cloud.google.com/go/asset/v1beta1/doc.go View File

@@ -1,89 +0,0 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

// AUTO-GENERATED CODE. DO NOT EDIT.

// Package asset is an auto-generated package for the
// Cloud Asset API.
//
// NOTE: This package is in alpha. It is not stable, and is likely to change.
//
// The cloud asset API manages the history and inventory of cloud resources.
package asset // import "cloud.google.com/go/asset/v1beta1"

import (
"context"
"runtime"
"strings"
"unicode"

"google.golang.org/grpc/metadata"
)

func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
out, _ := metadata.FromOutgoingContext(ctx)
out = out.Copy()
for _, md := range mds {
for k, v := range md {
out[k] = append(out[k], v...)
}
}
return metadata.NewOutgoingContext(ctx, out)
}

// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
func DefaultAuthScopes() []string {
return []string{
"https://www.googleapis.com/auth/cloud-platform",
}
}

// versionGo returns the Go runtime version. The returned string
// has no whitespace, suitable for reporting in header.
func versionGo() string {
const develPrefix = "devel +"

s := runtime.Version()
if strings.HasPrefix(s, develPrefix) {
s = s[len(develPrefix):]
if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
s = s[:p]
}
return s
}

notSemverRune := func(r rune) bool {
return strings.IndexRune("0123456789.", r) < 0
}

if strings.HasPrefix(s, "go1") {
s = s[2:]
var prerelease string
if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
s, prerelease = s[:p], s[p:]
}
if strings.HasSuffix(s, ".") {
s += "0"
} else if strings.Count(s, ".") < 2 {
s += ".0"
}
if prerelease != "" {
s += "-" + prerelease
}
return s
}
return "UNKNOWN"
}

const versionClient = "20181219"

+ 0
- 266
vendor/cloud.google.com/go/asset/v1beta1/mock_test.go View File

@@ -1,266 +0,0 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

// AUTO-GENERATED CODE. DO NOT EDIT.

package asset

import (
assetpb "google.golang.org/genproto/googleapis/cloud/asset/v1beta1"
longrunningpb "google.golang.org/genproto/googleapis/longrunning"
)

import (
"context"
"flag"
"fmt"
"io"
"log"
"net"
"os"
"strings"
"testing"

"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
"google.golang.org/api/option"
status "google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
gstatus "google.golang.org/grpc/status"
)

var _ = io.EOF
var _ = ptypes.MarshalAny
var _ status.Status

type mockAssetServer struct {
// Embed for forward compatibility.
// Tests will keep working if more methods are added
// in the future.
assetpb.AssetServiceServer

reqs []proto.Message

// If set, all calls return this error.
err error

// responses to return if err == nil
resps []proto.Message
}

func (s *mockAssetServer) ExportAssets(ctx context.Context, req *assetpb.ExportAssetsRequest) (*longrunningpb.Operation, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*longrunningpb.Operation), nil
}

func (s *mockAssetServer) BatchGetAssetsHistory(ctx context.Context, req *assetpb.BatchGetAssetsHistoryRequest) (*assetpb.BatchGetAssetsHistoryResponse, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*assetpb.BatchGetAssetsHistoryResponse), nil
}

// clientOpt is the option tests should use to connect to the test server.
// It is initialized by TestMain.
var clientOpt option.ClientOption

var (
mockAsset mockAssetServer
)

func TestMain(m *testing.M) {
flag.Parse()

serv := grpc.NewServer()
assetpb.RegisterAssetServiceServer(serv, &mockAsset)

lis, err := net.Listen("tcp", "localhost:0")
if err != nil {
log.Fatal(err)
}
go serv.Serve(lis)

conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure())
if err != nil {
log.Fatal(err)
}
clientOpt = option.WithGRPCConn(conn)

os.Exit(m.Run())
}

func TestAssetServiceExportAssets(t *testing.T) {
var expectedResponse *assetpb.ExportAssetsResponse = &assetpb.ExportAssetsResponse{}

mockAsset.err = nil
mockAsset.reqs = nil

any, err := ptypes.MarshalAny(expectedResponse)
if err != nil {
t.Fatal(err)
}
mockAsset.resps = append(mockAsset.resps[:0], &longrunningpb.Operation{
Name: "longrunning-test",
Done: true,
Result: &longrunningpb.Operation_Response{Response: any},
})

var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]")
var outputConfig *assetpb.OutputConfig = &assetpb.OutputConfig{}
var request = &assetpb.ExportAssetsRequest{
Parent: formattedParent,
OutputConfig: outputConfig,
}

c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}

respLRO, err := c.ExportAssets(context.Background(), request)
if err != nil {
t.Fatal(err)
}
resp, err := respLRO.Wait(context.Background())

if err != nil {
t.Fatal(err)
}

if want, got := request, mockAsset.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}

if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}

func TestAssetServiceExportAssetsError(t *testing.T) {
errCode := codes.PermissionDenied
mockAsset.err = nil
mockAsset.resps = append(mockAsset.resps[:0], &longrunningpb.Operation{
Name: "longrunning-test",
Done: true,
Result: &longrunningpb.Operation_Error{
Error: &status.Status{
Code: int32(errCode),
Message: "test error",
},
},
})

var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]")
var outputConfig *assetpb.OutputConfig = &assetpb.OutputConfig{}
var request = &assetpb.ExportAssetsRequest{
Parent: formattedParent,
OutputConfig: outputConfig,
}

c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}

respLRO, err := c.ExportAssets(context.Background(), request)
if err != nil {
t.Fatal(err)
}
resp, err := respLRO.Wait(context.Background())

if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}
func TestAssetServiceBatchGetAssetsHistory(t *testing.T) {
var expectedResponse *assetpb.BatchGetAssetsHistoryResponse = &assetpb.BatchGetAssetsHistoryResponse{}

mockAsset.err = nil
mockAsset.reqs = nil

mockAsset.resps = append(mockAsset.resps[:0], expectedResponse)

var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]")
var contentType assetpb.ContentType = assetpb.ContentType_CONTENT_TYPE_UNSPECIFIED
var readTimeWindow *assetpb.TimeWindow = &assetpb.TimeWindow{}
var request = &assetpb.BatchGetAssetsHistoryRequest{
Parent: formattedParent,
ContentType: contentType,
ReadTimeWindow: readTimeWindow,
}

c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}

resp, err := c.BatchGetAssetsHistory(context.Background(), request)

if err != nil {
t.Fatal(err)
}

if want, got := request, mockAsset.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}

if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}

func TestAssetServiceBatchGetAssetsHistoryError(t *testing.T) {
errCode := codes.PermissionDenied
mockAsset.err = gstatus.Error(errCode, "test error")

var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]")
var contentType assetpb.ContentType = assetpb.ContentType_CONTENT_TYPE_UNSPECIFIED
var readTimeWindow *assetpb.TimeWindow = &assetpb.TimeWindow{}
var request = &assetpb.BatchGetAssetsHistoryRequest{
Parent: formattedParent,
ContentType: contentType,
ReadTimeWindow: readTimeWindow,
}

c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}

resp, err := c.BatchGetAssetsHistory(context.Background(), request)

if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}

+ 0
- 73
vendor/cloud.google.com/go/authexample_test.go View File

@@ -1,73 +0,0 @@
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package cloud_test

import (
"context"

"cloud.google.com/go/datastore"
"cloud.google.com/go/pubsub"
"golang.org/x/oauth2/google"
"google.golang.org/api/option"
)

// Google Application Default Credentials is the recommended way to authorize
// and authenticate clients.
//
// For information on how to create and obtain Application Default Credentials, see
// https://developers.google.com/identity/protocols/application-default-credentials.
func Example_applicationDefaultCredentials() {
client, err := datastore.NewClient(context.Background(), "project-id")
if err != nil {
// TODO: handle error.
}
_ = client // Use the client.
}

// You can use a file with credentials to authenticate and authorize, such as a JSON
// key file associated with a Google service account. Service Account keys can be
// created and downloaded from
// https://console.developers.google.com/permissions/serviceaccounts.
//
// This example uses the Datastore client, but the same steps apply to
// the other client libraries underneath this package.
func Example_credentialsFile() {
client, err := datastore.NewClient(context.Background(),
"project-id", option.WithCredentialsFile("/path/to/service-account-key.json"))
if err != nil {
// TODO: handle error.
}
_ = client // Use the client.
}

// In some cases (for instance, you don't want to store secrets on disk), you can
// create credentials from in-memory JSON and use the WithCredentials option.
//
// The google package in this example is at golang.org/x/oauth2/google.
//
// This example uses the PubSub client, but the same steps apply to
// the other client libraries underneath this package.
func Example_credentialsFromJSON() {
ctx := context.Background()
creds, err := google.CredentialsFromJSON(ctx, []byte("JSON creds"), pubsub.ScopePubSub)
if err != nil {
// TODO: handle error.
}
client, err := pubsub.NewClient(ctx, "project-id", option.WithCredentials(creds))
if err != nil {
// TODO: handle error.
}
_ = client // Use the client.
}

+ 0
- 8
vendor/cloud.google.com/go/bigquery/benchmarks/README.md View File

@@ -1,8 +0,0 @@
# BigQuery Benchmark
This directory contains benchmarks for BigQuery client.

## Usage
`go run bench.go -- <your project id> queries.json`

BigQuery service caches requests so the benchmark should be run
at least twice, disregarding the first result.

+ 0
- 85
vendor/cloud.google.com/go/bigquery/benchmarks/bench.go View File

@@ -1,85 +0,0 @@
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

//+build ignore

package main

import (
"context"
"encoding/json"
"flag"
"io/ioutil"
"log"
"time"

"cloud.google.com/go/bigquery"
"google.golang.org/api/iterator"
)

func main() {
flag.Parse()

ctx := context.Background()
c, err := bigquery.NewClient(ctx, flag.Arg(0))
if err != nil {
log.Fatal(err)
}

queriesJSON, err := ioutil.ReadFile(flag.Arg(1))
if err != nil {
log.Fatal(err)
}

var queries []string
if err := json.Unmarshal(queriesJSON, &queries); err != nil {
log.Fatal(err)
}

for _, q := range queries {
doQuery(ctx, c, q)
}
}

func doQuery(ctx context.Context, c *bigquery.Client, qt string) {
startTime := time.Now()
q := c.Query(qt)
it, err := q.Read(ctx)
if err != nil {
log.Fatal(err)
}

numRows, numCols := 0, 0
var firstByte time.Duration

for {
var values []bigquery.Value
err := it.Next(&values)
if err == iterator.Done {
break
}
if err != nil {
log.Fatal(err)
}
if numRows == 0 {
numCols = len(values)
firstByte = time.Since(startTime)
} else if numCols != len(values) {
log.Fatalf("got %d columns, want %d", len(values), numCols)
}
numRows++
}
log.Printf("query %q: %d rows, %d cols, first byte %f sec, total %f sec",
qt, numRows, numCols, firstByte.Seconds(), time.Since(startTime).Seconds())
}

+ 0
- 10
vendor/cloud.google.com/go/bigquery/benchmarks/queries.json View File

@@ -1,10 +0,0 @@
[
"SELECT * FROM `nyc-tlc.yellow.trips` LIMIT 10000",
"SELECT * FROM `nyc-tlc.yellow.trips` LIMIT 100000",
"SELECT * FROM `nyc-tlc.yellow.trips` LIMIT 1000000",
"SELECT title FROM `bigquery-public-data.samples.wikipedia` ORDER BY title LIMIT 1000",
"SELECT title, id, timestamp, contributor_ip FROM `bigquery-public-data.samples.wikipedia` WHERE title like 'Blo%' ORDER BY id",
"SELECT * FROM `bigquery-public-data.baseball.games_post_wide` ORDER BY gameId",
"SELECT * FROM `bigquery-public-data.samples.github_nested` WHERE repository.has_downloads ORDER BY repository.created_at LIMIT 10000",
"SELECT repo_name, path FROM `bigquery-public-data.github_repos.files` WHERE path LIKE '%.java' ORDER BY id LIMIT 1000000"
]

+ 0
- 162
vendor/cloud.google.com/go/bigquery/bigquery.go View File

@@ -1,162 +0,0 @@
// Copyright 2015 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package bigquery

import (
"context"
"fmt"
"io"
"net/http"
"time"

"cloud.google.com/go/internal"
"cloud.google.com/go/internal/version"
gax "github.com/googleapis/gax-go/v2"
bq "google.golang.org/api/bigquery/v2"
"google.golang.org/api/googleapi"
"google.golang.org/api/option"
htransport "google.golang.org/api/transport/http"
)

const (
prodAddr = "https://www.googleapis.com/bigquery/v2/"
// Scope is the Oauth2 scope for the service.
Scope = "https://www.googleapis.com/auth/bigquery"
userAgent = "gcloud-golang-bigquery/20160429"
)

var xGoogHeader = fmt.Sprintf("gl-go/%s gccl/%s", version.Go(), version.Repo)

func setClientHeader(headers http.Header) {
headers.Set("x-goog-api-client", xGoogHeader)
}

// Client may be used to perform BigQuery operations.
type Client struct {
// Location, if set, will be used as the default location for all subsequent
// dataset creation and job operations. A location specified directly in one of
// those operations will override this value.
Location string

projectID string
bqs *bq.Service
}

// NewClient constructs a new Client which can perform BigQuery operations.
// Operations performed via the client are billed to the specified GCP project.
func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) {
o := []option.ClientOption{
option.WithEndpoint(prodAddr),
option.WithScopes(Scope),
option.WithUserAgent(userAgent),
}
o = append(o, opts...)
httpClient, endpoint, err := htransport.NewClient(ctx, o...)
if err != nil {
return nil, fmt.Errorf("bigquery: dialing: %v", err)
}
bqs, err := bq.New(httpClient)
if err != nil {
return nil, fmt.Errorf("bigquery: constructing client: %v", err)
}
bqs.BasePath = endpoint
c := &Client{
projectID: projectID,
bqs: bqs,
}
return c, nil
}

// Close closes any resources held by the client.
// Close should be called when the client is no longer needed.
// It need not be called at program exit.
func (c *Client) Close() error {
return nil
}

// Calls the Jobs.Insert RPC and returns a Job.
func (c *Client) insertJob(ctx context.Context, job *bq.Job, media io.Reader) (*Job, error) {
call := c.bqs.Jobs.Insert(c.projectID, job).Context(ctx)
setClientHeader(call.Header())
if media != nil {
call.Media(media)
}
var res *bq.Job
var err error
invoke := func() error {
res, err = call.Do()
return err
}
// A job with a client-generated ID can be retried; the presence of the
// ID makes the insert operation idempotent.
// We don't retry if there is media, because it is an io.Reader. We'd
// have to read the contents and keep it in memory, and that could be expensive.
// TODO(jba): Look into retrying if media != nil.
if job.JobReference != nil && media == nil {
err = runWithRetry(ctx, invoke)
} else {
err = invoke()
}
if err != nil {
return nil, err
}
return bqToJob(res, c)
}

// Convert a number of milliseconds since the Unix epoch to a time.Time.
// Treat an input of zero specially: convert it to the zero time,
// rather than the start of the epoch.
func unixMillisToTime(m int64) time.Time {
if m == 0 {
return time.Time{}
}
return time.Unix(0, m*1e6)
}

// runWithRetry calls the function until it returns nil or a non-retryable error, or
// the context is done.
// See the similar function in ../storage/invoke.go. The main difference is the
// reason for retrying.
func runWithRetry(ctx context.Context, call func() error) error {
// These parameters match the suggestions in https://cloud.google.com/bigquery/sla.
backoff := gax.Backoff{
Initial: 1 * time.Second,
Max: 32 * time.Second,
Multiplier: 2,
}
return internal.Retry(ctx, backoff, func() (stop bool, err error) {
err = call()
if err == nil {
return true, nil
}
return !retryableError(err), err
})
}

// This is the correct definition of retryable according to the BigQuery team. It
// also considers 502 ("Bad Gateway") and 503 ("Service Unavailable") errors
// retryable; these are returned by systems between the client and the BigQuery
// service.
func retryableError(err error) bool {
e, ok := err.(*googleapi.Error)
if !ok {
return false
}
var reason string
if len(e.Errors) > 0 {
reason = e.Errors[0].Reason
}
return e.Code == http.StatusServiceUnavailable || e.Code == http.StatusBadGateway || reason == "backendError" || reason == "rateLimitExceeded"
}

+ 0
- 26493
vendor/cloud.google.com/go/bigquery/bigquery.replay
File diff suppressed because it is too large
View File


+ 0
- 107
vendor/cloud.google.com/go/bigquery/copy.go View File

@@ -1,107 +0,0 @@
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package bigquery

import (
"context"

bq "google.golang.org/api/bigquery/v2"
)

// CopyConfig holds the configuration for a copy job.
type CopyConfig struct {
// Srcs are the tables from which data will be copied.
Srcs []*Table

// Dst is the table into which the data will be copied.
Dst *Table

// CreateDisposition specifies the circumstances under which the destination table will be created.
// The default is CreateIfNeeded.
CreateDisposition TableCreateDisposition

// WriteDisposition specifies how existing data in the destination table is treated.
// The default is WriteEmpty.
WriteDisposition TableWriteDisposition

// The labels associated with this job.
Labels map[string]string

// Custom encryption configuration (e.g., Cloud KMS keys).
DestinationEncryptionConfig *EncryptionConfig
}

func (c *CopyConfig) toBQ() *bq.JobConfiguration {
var ts []*bq.TableReference
for _, t := range c.Srcs {
ts = append(ts, t.toBQ())
}
return &bq.JobConfiguration{
Labels: c.Labels,
Copy: &bq.JobConfigurationTableCopy{
CreateDisposition: string(c.CreateDisposition),
WriteDisposition: string(c.WriteDisposition),
DestinationTable: c.Dst.toBQ(),
DestinationEncryptionConfiguration: c.DestinationEncryptionConfig.toBQ(),
SourceTables: ts,
},
}
}

func bqToCopyConfig(q *bq.JobConfiguration, c *Client) *CopyConfig {
cc := &CopyConfig{
Labels: q.Labels,
CreateDisposition: TableCreateDisposition(q.Copy.CreateDisposition),
WriteDisposition: TableWriteDisposition(q.Copy.WriteDisposition),
Dst: bqToTable(q.Copy.DestinationTable, c),
DestinationEncryptionConfig: bqToEncryptionConfig(q.Copy.DestinationEncryptionConfiguration),
}
for _, t := range q.Copy.SourceTables {
cc.Srcs = append(cc.Srcs, bqToTable(t, c))
}
return cc
}

// A Copier copies data into a BigQuery table from one or more BigQuery tables.
type Copier struct {
JobIDConfig
CopyConfig
c *Client
}

// CopierFrom returns a Copier which can be used to copy data into a
// BigQuery table from one or more BigQuery tables.
// The returned Copier may optionally be further configured before its Run method is called.
func (t *Table) CopierFrom(srcs ...*Table) *Copier {
return &Copier{
c: t.c,
CopyConfig: CopyConfig{
Srcs: srcs,
Dst: t,
},
}
}

// Run initiates a copy job.
func (c *Copier) Run(ctx context.Context) (*Job, error) {
return c.c.insertJob(ctx, c.newJob(), nil)
}

func (c *Copier) newJob() *bq.Job {
return &bq.Job{
JobReference: c.JobIDConfig.createJobRef(c.c),
Configuration: c.CopyConfig.toBQ(),
}
}

+ 0
- 163
vendor/cloud.google.com/go/bigquery/copy_test.go View File

@@ -1,163 +0,0 @@
// Copyright 2015 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package bigquery

import (
"testing"

"cloud.google.com/go/internal/testutil"
"github.com/google/go-cmp/cmp/cmpopts"
bq "google.golang.org/api/bigquery/v2"
)

func defaultCopyJob() *bq.Job {
return &bq.Job{
JobReference: &bq.JobReference{JobId: "RANDOM", ProjectId: "client-project-id"},
Configuration: &bq.JobConfiguration{
Copy: &bq.JobConfigurationTableCopy{
DestinationTable: &bq.TableReference{
ProjectId: "d-project-id",
DatasetId: "d-dataset-id",
TableId: "d-table-id",
},
SourceTables: []*bq.TableReference{
{
ProjectId: "s-project-id",
DatasetId: "s-dataset-id",
TableId: "s-table-id",
},
},
},
},
}
}

func TestCopy(t *testing.T) {
defer fixRandomID("RANDOM")()
testCases := []struct {
dst *Table
srcs []*Table
jobID string
location string
config CopyConfig
want *bq.Job
}{
{
dst: &Table{
ProjectID: "d-project-id",
DatasetID: "d-dataset-id",
TableID: "d-table-id",
},
srcs: []*Table{
{
ProjectID: "s-project-id",
DatasetID: "s-dataset-id",
TableID: "s-table-id",
},
},
want: defaultCopyJob(),
},
{
dst: &Table{
ProjectID: "d-project-id",
DatasetID: "d-dataset-id",
TableID: "d-table-id",
},
srcs: []*Table{
{
ProjectID: "s-project-id",
DatasetID: "s-dataset-id",
TableID: "s-table-id",
},
},
config: CopyConfig{
CreateDisposition: CreateNever,
WriteDisposition: WriteTruncate,
DestinationEncryptionConfig: &EncryptionConfig{KMSKeyName: "keyName"},
Labels: map[string]string{"a": "b"},
},
want: func() *bq.Job {
j := defaultCopyJob()
j.Configuration.Labels = map[string]string{"a": "b"}
j.Configuration.Copy.CreateDisposition = "CREATE_NEVER"
j.Configuration.Copy.WriteDisposition = "WRITE_TRUNCATE"
j.Configuration.Copy.DestinationEncryptionConfiguration = &bq.EncryptionConfiguration{KmsKeyName: "keyName"}
return j
}(),
},
{
dst: &Table{
ProjectID: "d-project-id",
DatasetID: "d-dataset-id",
TableID: "d-table-id",
},
srcs: []*Table{
{
ProjectID: "s-project-id",
DatasetID: "s-dataset-id",
TableID: "s-table-id",
},
},
jobID: "job-id",
want: func() *bq.Job {
j := defaultCopyJob()
j.JobReference.JobId = "job-id"
return j
}(),
},
{
dst: &Table{
ProjectID: "d-project-id",
DatasetID: "d-dataset-id",
TableID: "d-table-id",
},
srcs: []*Table{
{
ProjectID: "s-project-id",
DatasetID: "s-dataset-id",
TableID: "s-table-id",
},
},
location: "asia-northeast1",
want: func() *bq.Job {
j := defaultCopyJob()
j.JobReference.Location = "asia-northeast1"
return j
}(),
},
}
c := &Client{projectID: "client-project-id"}
for i, tc := range testCases {
tc.dst.c = c
copier := tc.dst.CopierFrom(tc.srcs...)
copier.JobID = tc.jobID
copier.Location = tc.location
tc.config.Srcs = tc.srcs
tc.config.Dst = tc.dst
copier.CopyConfig = tc.config
got := copier.newJob()
checkJob(t, i, got, tc.want)

jc, err := bqToJobConfig(got.Configuration, c)
if err != nil {
t.Fatalf("#%d: %v", i, err)
}
diff := testutil.Diff(jc.(*CopyConfig), &copier.CopyConfig,
cmpopts.IgnoreUnexported(Table{}))
if diff != "" {
t.Errorf("#%d: (got=-, want=+:\n%s", i, diff)
}
}
}

+ 0
- 536
vendor/cloud.google.com/go/bigquery/dataset.go View File

@@ -1,536 +0,0 @@
// Copyright 2015 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package bigquery

import (
"context"
"errors"
"fmt"
"time"

"cloud.google.com/go/internal/optional"
"cloud.google.com/go/internal/trace"
bq "google.golang.org/api/bigquery/v2"
"google.golang.org/api/iterator"
)

// Dataset is a reference to a BigQuery dataset.
type Dataset struct {
ProjectID string
DatasetID string
c *Client
}

// DatasetMetadata contains information about a BigQuery dataset.
type DatasetMetadata struct {
// These fields can be set when creating a dataset.
Name string // The user-friendly name for this dataset.
Description string // The user-friendly description of this dataset.
Location string // The geo location of the dataset.
DefaultTableExpiration time.Duration // The default expiration time for new tables.
Labels map[string]string // User-provided labels.
Access []*AccessEntry // Access permissions.

// These fields are read-only.
CreationTime time.Time
LastModifiedTime time.Time // When the dataset or any of its tables were modified.
FullID string // The full dataset ID in the form projectID:datasetID.

// ETag is the ETag obtained when reading metadata. Pass it to Dataset.Update to
// ensure that the metadata hasn't changed since it was read.
ETag string
}

// DatasetMetadataToUpdate is used when updating a dataset's metadata.
// Only non-nil fields will be updated.
type DatasetMetadataToUpdate struct {
Description optional.String // The user-friendly description of this table.
Name optional.String // The user-friendly name for this dataset.

// DefaultTableExpiration is the default expiration time for new tables.
// If set to time.Duration(0), new tables never expire.
DefaultTableExpiration optional.Duration

// The entire access list. It is not possible to replace individual entries.
Access []*AccessEntry

labelUpdater
}

// Dataset creates a handle to a BigQuery dataset in the client's project.
func (c *Client) Dataset(id string) *Dataset {
return c.DatasetInProject(c.projectID, id)
}

// DatasetInProject creates a handle to a BigQuery dataset in the specified project.
func (c *Client) DatasetInProject(projectID, datasetID string) *Dataset {
return &Dataset{
ProjectID: projectID,
DatasetID: datasetID,
c: c,
}
}

// Create creates a dataset in the BigQuery service. An error will be returned if the
// dataset already exists. Pass in a DatasetMetadata value to configure the dataset.
func (d *Dataset) Create(ctx context.Context, md *DatasetMetadata) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Dataset.Create")
defer func() { trace.EndSpan(ctx, err) }()

ds, err := md.toBQ()
if err != nil {
return err
}
ds.DatasetReference = &bq.DatasetReference{DatasetId: d.DatasetID}
// Use Client.Location as a default.
if ds.Location == "" {
ds.Location = d.c.Location
}
call := d.c.bqs.Datasets.Insert(d.ProjectID, ds).Context(ctx)
setClientHeader(call.Header())
_, err = call.Do()
return err
}

func (dm *DatasetMetadata) toBQ() (*bq.Dataset, error) {
ds := &bq.Dataset{}
if dm == nil {
return ds, nil
}
ds.FriendlyName = dm.Name
ds.Description = dm.Description
ds.Location = dm.Location
ds.DefaultTableExpirationMs = int64(dm.DefaultTableExpiration / time.Millisecond)
ds.Labels = dm.Labels
var err error
ds.Access, err = accessListToBQ(dm.Access)
if err != nil {
return nil, err
}
if !dm.CreationTime.IsZero() {
return nil, errors.New("bigquery: Dataset.CreationTime is not writable")
}
if !dm.LastModifiedTime.IsZero() {
return nil, errors.New("bigquery: Dataset.LastModifiedTime is not writable")
}
if dm.FullID != "" {
return nil, errors.New("bigquery: Dataset.FullID is not writable")
}
if dm.ETag != "" {
return nil, errors.New("bigquery: Dataset.ETag is not writable")
}
return ds, nil
}

func accessListToBQ(a []*AccessEntry) ([]*bq.DatasetAccess, error) {
var q []*bq.DatasetAccess
for _, e := range a {
a, err := e.toBQ()
if err != nil {
return nil, err
}
q = append(q, a)
}
return q, nil
}

// Delete deletes the dataset. Delete will fail if the dataset is not empty.
func (d *Dataset) Delete(ctx context.Context) (err error) {
return d.deleteInternal(ctx, false)
}

// DeleteWithContents deletes the dataset, as well as contained resources.
func (d *Dataset) DeleteWithContents(ctx context.Context) (err error) {
return d.deleteInternal(ctx, true)
}

func (d *Dataset) deleteInternal(ctx context.Context, deleteContents bool) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Dataset.Delete")
defer func() { trace.EndSpan(ctx, err) }()

call := d.c.bqs.Datasets.Delete(d.ProjectID, d.DatasetID).Context(ctx).DeleteContents(deleteContents)
setClientHeader(call.Header())
return call.Do()
}

// Metadata fetches the metadata for the dataset.
func (d *Dataset) Metadata(ctx context.Context) (md *DatasetMetadata, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Dataset.Metadata")
defer func() { trace.EndSpan(ctx, err) }()

call := d.c.bqs.Datasets.Get(d.ProjectID, d.DatasetID).Context(ctx)
setClientHeader(call.Header())
var ds *bq.Dataset
if err := runWithRetry(ctx, func() (err error) {
ds, err = call.Do()
return err
}); err != nil {
return nil, err
}
return bqToDatasetMetadata(ds)
}

func bqToDatasetMetadata(d *bq.Dataset) (*DatasetMetadata, error) {
dm := &DatasetMetadata{
CreationTime: unixMillisToTime(d.CreationTime),
LastModifiedTime: unixMillisToTime(d.LastModifiedTime),
DefaultTableExpiration: time.Duration(d.DefaultTableExpirationMs) * time.Millisecond,
Description: d.Description,
Name: d.FriendlyName,
FullID: d.Id,
Location: d.Location,
Labels: d.Labels,
ETag: d.Etag,
}
for _, a := range d.Access {
e, err := bqToAccessEntry(a, nil)
if err != nil {
return nil, err
}
dm.Access = append(dm.Access, e)
}
return dm, nil
}

// Update modifies specific Dataset metadata fields.
// To perform a read-modify-write that protects against intervening reads,
// set the etag argument to the DatasetMetadata.ETag field from the read.
// Pass the empty string for etag for a "blind write" that will always succeed.
func (d *Dataset) Update(ctx context.Context, dm DatasetMetadataToUpdate, etag string) (md *DatasetMetadata, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Dataset.Update")
defer func() { trace.EndSpan(ctx, err) }()

ds, err := dm.toBQ()
if err != nil {
return nil, err
}
call := d.c.bqs.Datasets.Patch(d.ProjectID, d.DatasetID, ds).Context(ctx)
setClientHeader(call.Header())
if etag != "" {
call.Header().Set("If-Match", etag)
}
var ds2 *bq.Dataset
if err := runWithRetry(ctx, func() (err error) {
ds2, err = call.Do()
return err
}); err != nil {
return nil, err
}
return bqToDatasetMetadata(ds2)
}

func (dm *DatasetMetadataToUpdate) toBQ() (*bq.Dataset, error) {
ds := &bq.Dataset{}
forceSend := func(field string) {
ds.ForceSendFields = append(ds.ForceSendFields, field)
}

if dm.Description != nil {
ds.Description = optional.ToString(dm.Description)
forceSend("Description")
}
if dm.Name != nil {
ds.FriendlyName = optional.ToString(dm.Name)
forceSend("FriendlyName")
}
if dm.DefaultTableExpiration != nil {
dur := optional.ToDuration(dm.DefaultTableExpiration)
if dur == 0 {
// Send a null to delete the field.
ds.NullFields = append(ds.NullFields, "DefaultTableExpirationMs")
} else {
ds.DefaultTableExpirationMs = int64(dur / time.Millisecond)
}
}
if dm.Access != nil {
var err error
ds.Access, err = accessListToBQ(dm.Access)
if err != nil {
return nil, err
}
if len(ds.Access) == 0 {
ds.NullFields = append(ds.NullFields, "Access")
}
}
labels, forces, nulls := dm.update()
ds.Labels = labels
ds.ForceSendFields = append(ds.ForceSendFields, forces...)
ds.NullFields = append(ds.NullFields, nulls...)
return ds, nil
}

// Table creates a handle to a BigQuery table in the dataset.
// To determine if a table exists, call Table.Metadata.
// If the table does not already exist, use Table.Create to create it.
func (d *Dataset) Table(tableID string) *Table {
return &Table{ProjectID: d.ProjectID, DatasetID: d.DatasetID, TableID: tableID, c: d.c}
}

// Tables returns an iterator over the tables in the Dataset.
func (d *Dataset) Tables(ctx context.Context) *TableIterator {
it := &TableIterator{
ctx: ctx,
dataset: d,
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
it.fetch,
func() int { return len(it.tables) },
func() interface{} { b := it.tables; it.tables = nil; return b })
return it
}

// A TableIterator is an iterator over Tables.
type TableIterator struct {
ctx context.Context
dataset *Dataset
tables []*Table
pageInfo *iterator.PageInfo
nextFunc func() error
}

// Next returns the next result. Its second return value is Done if there are
// no more results. Once Next returns Done, all subsequent calls will return
// Done.
func (it *TableIterator) Next() (*Table, error) {
if err := it.nextFunc(); err != nil {
return nil, err
}
t := it.tables[0]
it.tables = it.tables[1:]
return t, nil
}

// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *TableIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }

// for testing
var listTables = func(it *TableIterator, pageSize int, pageToken string) (*bq.TableList, error) {
call := it.dataset.c.bqs.Tables.List(it.dataset.ProjectID, it.dataset.DatasetID).
PageToken(pageToken).
Context(it.ctx)
setClientHeader(call.Header())
if pageSize > 0 {
call.MaxResults(int64(pageSize))
}
var res *bq.TableList
err := runWithRetry(it.ctx, func() (err error) {
res, err = call.Do()
return err
})
return res, err
}

func (it *TableIterator) fetch(pageSize int, pageToken string) (string, error) {
res, err := listTables(it, pageSize, pageToken)
if err != nil {
return "", err
}
for _, t := range res.Tables {
it.tables = append(it.tables, bqToTable(t.TableReference, it.dataset.c))
}
return res.NextPageToken, nil
}

func bqToTable(tr *bq.TableReference, c *Client) *Table {
if tr == nil {
return nil
}
return &Table{
ProjectID: tr.ProjectId,
DatasetID: tr.DatasetId,
TableID: tr.TableId,
c: c,
}
}

// Datasets returns an iterator over the datasets in a project.
// The Client's project is used by default, but that can be
// changed by setting ProjectID on the returned iterator before calling Next.
func (c *Client) Datasets(ctx context.Context) *DatasetIterator {
return c.DatasetsInProject(ctx, c.projectID)
}

// DatasetsInProject returns an iterator over the datasets in the provided project.
//
// Deprecated: call Client.Datasets, then set ProjectID on the returned iterator.
func (c *Client) DatasetsInProject(ctx context.Context, projectID string) *DatasetIterator {
it := &DatasetIterator{
ctx: ctx,
c: c,
ProjectID: projectID,
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
it.fetch,
func() int { return len(it.items) },
func() interface{} { b := it.items; it.items = nil; return b })
return it
}

// DatasetIterator iterates over the datasets in a project.
type DatasetIterator struct {
// ListHidden causes hidden datasets to be listed when set to true.
// Set before the first call to Next.
ListHidden bool

// Filter restricts the datasets returned by label. The filter syntax is described in
// https://cloud.google.com/bigquery/docs/labeling-datasets#filtering_datasets_using_labels
// Set before the first call to Next.
Filter string

// The project ID of the listed datasets.
// Set before the first call to Next.
ProjectID string

ctx context.Context
c *Client
pageInfo *iterator.PageInfo
nextFunc func() error
items []*Dataset
}

// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *DatasetIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }

// Next returns the next Dataset. Its second return value is iterator.Done if
// there are no more results. Once Next returns Done, all subsequent calls will
// return Done.
func (it *DatasetIterator) Next() (*Dataset, error) {
if err := it.nextFunc(); err != nil {
return nil, err
}
item := it.items[0]
it.items = it.items[1:]
return item, nil
}

// for testing
var listDatasets = func(it *DatasetIterator, pageSize int, pageToken string) (*bq.DatasetList, error) {
call := it.c.bqs.Datasets.List(it.ProjectID).
Context(it.ctx).
PageToken(pageToken).
All(it.ListHidden)
setClientHeader(call.Header())
if pageSize > 0 {
call.MaxResults(int64(pageSize))
}
if it.Filter != "" {
call.Filter(it.Filter)
}
var res *bq.DatasetList
err := runWithRetry(it.ctx, func() (err error) {
res, err = call.Do()
return err
})
return res, err
}

func (it *DatasetIterator) fetch(pageSize int, pageToken string) (string, error) {
res, err := listDatasets(it, pageSize, pageToken)
if err != nil {
return "", err
}
for _, d := range res.Datasets {
it.items = append(it.items, &Dataset{
ProjectID: d.DatasetReference.ProjectId,
DatasetID: d.DatasetReference.DatasetId,
c: it.c,
})
}
return res.NextPageToken, nil
}

// An AccessEntry describes the permissions that an entity has on a dataset.
type AccessEntry struct {
Role AccessRole // The role of the entity
EntityType EntityType // The type of entity
Entity string // The entity (individual or group) granted access
View *Table // The view granted access (EntityType must be ViewEntity)
}

// AccessRole is the level of access to grant to a dataset.
type AccessRole string

const (
// OwnerRole is the OWNER AccessRole.
OwnerRole AccessRole = "OWNER"
// ReaderRole is the READER AccessRole.
ReaderRole AccessRole = "READER"
// WriterRole is the WRITER AccessRole.
WriterRole AccessRole = "WRITER"
)

// EntityType is the type of entity in an AccessEntry.
type EntityType int

const (
// DomainEntity is a domain (e.g. "example.com").
DomainEntity EntityType = iota + 1

// GroupEmailEntity is an email address of a Google Group.
GroupEmailEntity

// UserEmailEntity is an email address of an individual user.
UserEmailEntity

// SpecialGroupEntity is a special group: one of projectOwners, projectReaders, projectWriters or
// allAuthenticatedUsers.
SpecialGroupEntity

// ViewEntity is a BigQuery view.
ViewEntity
)

func (e *AccessEntry) toBQ() (*bq.DatasetAccess, error) {
q := &bq.DatasetAccess{Role: string(e.Role)}
switch e.EntityType {
case DomainEntity:
q.Domain = e.Entity
case GroupEmailEntity:
q.GroupByEmail = e.Entity
case UserEmailEntity:
q.UserByEmail = e.Entity
case SpecialGroupEntity:
q.SpecialGroup = e.Entity
case ViewEntity:
q.View = e.View.toBQ()
default:
return nil, fmt.Errorf("bigquery: unknown entity type %d", e.EntityType)
}
return q, nil
}

func bqToAccessEntry(q *bq.DatasetAccess, c *Client) (*AccessEntry, error) {
e := &AccessEntry{Role: AccessRole(q.Role)}
switch {
case q.Domain != "":
e.Entity = q.Domain
e.EntityType = DomainEntity
case q.GroupByEmail != "":
e.Entity = q.GroupByEmail
e.EntityType = GroupEmailEntity
case q.UserByEmail != "":
e.Entity = q.UserByEmail
e.EntityType = UserEmailEntity
case q.SpecialGroup != "":
e.Entity = q.SpecialGroup
e.EntityType = SpecialGroupEntity
case q.View != nil:
e.View = c.DatasetInProject(q.View.ProjectId, q.View.DatasetId).Table(q.View.TableId)
e.EntityType = ViewEntity
default:
return nil, errors.New("bigquery: invalid access value")
}
return e, nil
}

+ 0
- 326
vendor/cloud.google.com/go/bigquery/dataset_test.go View File

@@ -1,326 +0,0 @@
// Copyright 2015 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package bigquery

import (
"context"
"errors"
"strconv"
"testing"
"time"

"cloud.google.com/go/internal/testutil"
"github.com/google/go-cmp/cmp"
bq "google.golang.org/api/bigquery/v2"
itest "google.golang.org/api/iterator/testing"
)

// readServiceStub services read requests by returning data from an in-memory list of values.
type listTablesStub struct {
expectedProject, expectedDataset string
tables []*bq.TableListTables
}

func (s *listTablesStub) listTables(it *TableIterator, pageSize int, pageToken string) (*bq.TableList, error) {
if it.dataset.ProjectID != s.expectedProject {
return nil, errors.New("wrong project id")
}
if it.dataset.DatasetID != s.expectedDataset {
return nil, errors.New("wrong dataset id")
}
const maxPageSize = 2
if pageSize <= 0 || pageSize > maxPageSize {
pageSize = maxPageSize
}
start := 0
if pageToken != "" {
var err error
start, err = strconv.Atoi(pageToken)
if err != nil {
return nil, err
}
}
end := start + pageSize
if end > len(s.tables) {
end = len(s.tables)
}
nextPageToken := ""
if end < len(s.tables) {
nextPageToken = strconv.Itoa(end)
}
return &bq.TableList{
Tables: s.tables[start:end],
NextPageToken: nextPageToken,
}, nil
}

func TestTables(t *testing.T) {
c := &Client{projectID: "p1"}
inTables := []*bq.TableListTables{
{TableReference: &bq.TableReference{ProjectId: "p1", DatasetId: "d1", TableId: "t1"}},
{TableReference: &bq.TableReference{ProjectId: "p1", DatasetId: "d1", TableId: "t2"}},
{TableReference: &bq.TableReference{ProjectId: "p1", DatasetId: "d1", TableId: "t3"}},
}
outTables := []*Table{
{ProjectID: "p1", DatasetID: "d1", TableID: "t1", c: c},
{ProjectID: "p1", DatasetID: "d1", TableID: "t2", c: c},
{ProjectID: "p1", DatasetID: "d1", TableID: "t3", c: c},
}

lts := &listTablesStub{
expectedProject: "p1",
expectedDataset: "d1",
tables: inTables,
}
old := listTables
listTables = lts.listTables // cannot use t.Parallel with this test
defer func() { listTables = old }()

msg, ok := itest.TestIterator(outTables,
func() interface{} { return c.Dataset("d1").Tables(context.Background()) },
func(it interface{}) (interface{}, error) { return it.(*TableIterator).Next() })
if !ok {
t.Error(msg)
}
}

type listDatasetsStub struct {
expectedProject string
datasets []*bq.DatasetListDatasets
hidden map[*bq.DatasetListDatasets]bool
}

func (s *listDatasetsStub) listDatasets(it *DatasetIterator, pageSize int, pageToken string) (*bq.DatasetList, error) {
const maxPageSize = 2
if pageSize <= 0 || pageSize > maxPageSize {
pageSize = maxPageSize
}
if it.Filter != "" {
return nil, errors.New("filter not supported")
}
if it.ProjectID != s.expectedProject {
return nil, errors.New("bad project ID")
}
start := 0
if pageToken != "" {
var err error
start, err = strconv.Atoi(pageToken)
if err != nil {
return nil, err
}
}
var (
i int
result []*bq.DatasetListDatasets
nextPageToken string
)
for i = start; len(result) < pageSize && i < len(s.datasets); i++ {
if s.hidden[s.datasets[i]] && !it.ListHidden {
continue
}
result = append(result, s.datasets[i])
}
if i < len(s.datasets) {
nextPageToken = strconv.Itoa(i)
}
return &bq.DatasetList{
Datasets: result,
NextPageToken: nextPageToken,
}, nil
}

func TestDatasets(t *testing.T) {
client := &Client{projectID: "p"}
inDatasets := []*bq.DatasetListDatasets{
{DatasetReference: &bq.DatasetReference{ProjectId: "p", DatasetId: "a"}},
{DatasetReference: &bq.DatasetReference{ProjectId: "p", DatasetId: "b"}},
{DatasetReference: &bq.DatasetReference{ProjectId: "p", DatasetId: "hidden"}},
{DatasetReference: &bq.DatasetReference{ProjectId: "p", DatasetId: "c"}},
}
outDatasets := []*Dataset{
{"p", "a", client},
{"p", "b", client},
{"p", "hidden", client},
{"p", "c", client},
}
lds := &listDatasetsStub{
expectedProject: "p",
datasets: inDatasets,
hidden: map[*bq.DatasetListDatasets]bool{inDatasets[2]: true},
}
old := listDatasets
listDatasets = lds.listDatasets // cannot use t.Parallel with this test
defer func() { listDatasets = old }()

msg, ok := itest.TestIterator(outDatasets,
func() interface{} { it := client.Datasets(context.Background()); it.ListHidden = true; return it },
func(it interface{}) (interface{}, error) { return it.(*DatasetIterator).Next() })
if !ok {
t.Fatalf("ListHidden=true: %s", msg)
}

msg, ok = itest.TestIterator([]*Dataset{outDatasets[0], outDatasets[1], outDatasets[3]},
func() interface{} { it := client.Datasets(context.Background()); it.ListHidden = false; return it },
func(it interface{}) (interface{}, error) { return it.(*DatasetIterator).Next() })
if !ok {
t.Fatalf("ListHidden=false: %s", msg)
}
}

func TestDatasetToBQ(t *testing.T) {
for _, test := range []struct {
in *DatasetMetadata
want *bq.Dataset
}{
{nil, &bq.Dataset{}},
{&DatasetMetadata{Name: "name"}, &bq.Dataset{FriendlyName: "name"}},
{&DatasetMetadata{
Name: "name",
Description: "desc",
DefaultTableExpiration: time.Hour,
Location: "EU",
Labels: map[string]string{"x": "y"},
Access: []*AccessEntry{{Role: OwnerRole, Entity: "example.com", EntityType: DomainEntity}},
}, &bq.Dataset{
FriendlyName: "name",
Description: "desc",
DefaultTableExpirationMs: 60 * 60 * 1000,
Location: "EU",
Labels: map[string]string{"x": "y"},
Access: []*bq.DatasetAccess{{Role: "OWNER", Domain: "example.com"}},
}},
} {
got, err := test.in.toBQ()
if err != nil {
t.Fatal(err)
}
if !testutil.Equal(got, test.want) {
t.Errorf("%v:\ngot %+v\nwant %+v", test.in, got, test.want)
}
}

// Check that non-writeable fields are unset.
aTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local)
for _, dm := range []*DatasetMetadata{
{CreationTime: aTime},
{LastModifiedTime: aTime},
{FullID: "x"},
{ETag: "e"},
} {
if _, err := dm.toBQ(); err == nil {
t.Errorf("%+v: got nil, want error", dm)
}
}
}

func TestBQToDatasetMetadata(t *testing.T) {
cTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local)
cMillis := cTime.UnixNano() / 1e6
mTime := time.Date(2017, 10, 31, 0, 0, 0, 0, time.Local)
mMillis := mTime.UnixNano() / 1e6
q := &bq.Dataset{
CreationTime: cMillis,
LastModifiedTime: mMillis,
FriendlyName: "name",
Description: "desc",
DefaultTableExpirationMs: 60 * 60 * 1000,
Location: "EU",
Labels: map[string]string{"x": "y"},
Access: []*bq.DatasetAccess{
{Role: "READER", UserByEmail: "joe@example.com"},
{Role: "WRITER", GroupByEmail: "users@example.com"},
},
Etag: "etag",
}
want := &DatasetMetadata{
CreationTime: cTime,
LastModifiedTime: mTime,
Name: "name",
Description: "desc",
DefaultTableExpiration: time.Hour,
Location: "EU",
Labels: map[string]string{"x": "y"},
Access: []*AccessEntry{
{Role: ReaderRole, Entity: "joe@example.com", EntityType: UserEmailEntity},
{Role: WriterRole, Entity: "users@example.com", EntityType: GroupEmailEntity},
},
ETag: "etag",
}
got, err := bqToDatasetMetadata(q)
if err != nil {
t.Fatal(err)
}
if diff := testutil.Diff(got, want); diff != "" {
t.Errorf("-got, +want:\n%s", diff)
}
}

func TestDatasetMetadataToUpdateToBQ(t *testing.T) {
dm := DatasetMetadataToUpdate{
Description: "desc",
Name: "name",
DefaultTableExpiration: time.Hour,
}
dm.SetLabel("label", "value")
dm.DeleteLabel("del")

got, err := dm.toBQ()
if err != nil {
t.Fatal(err)
}
want := &bq.Dataset{
Description: "desc",
FriendlyName: "name",
DefaultTableExpirationMs: 60 * 60 * 1000,
Labels: map[string]string{"label": "value"},
ForceSendFields: []string{"Description", "FriendlyName"},
NullFields: []string{"Labels.del"},
}
if diff := testutil.Diff(got, want); diff != "" {
t.Errorf("-got, +want:\n%s", diff)
}
}

func TestConvertAccessEntry(t *testing.T) {
c := &Client{projectID: "pid"}
for _, e := range []*AccessEntry{
{Role: ReaderRole, Entity: "e", EntityType: DomainEntity},
{Role: WriterRole, Entity: "e", EntityType: GroupEmailEntity},
{Role: OwnerRole, Entity: "e", EntityType: UserEmailEntity},
{Role: ReaderRole, Entity: "e", EntityType: SpecialGroupEntity},
{Role: ReaderRole, EntityType: ViewEntity,
View: &Table{ProjectID: "p", DatasetID: "d", TableID: "t", c: c}},
} {
q, err := e.toBQ()
if err != nil {
t.Fatal(err)
}
got, err := bqToAccessEntry(q, c)
if err != nil {
t.Fatal(err)
}
if diff := testutil.Diff(got, e, cmp.AllowUnexported(Table{}, Client{})); diff != "" {
t.Errorf("got=-, want=+:\n%s", diff)
}
}

e := &AccessEntry{Role: ReaderRole, Entity: "e"}
if _, err := e.toBQ(); err == nil {
t.Error("got nil, want error")
}
if _, err := bqToAccessEntry(&bq.DatasetAccess{Role: "WRITER"}, nil); err == nil {
t.Error("got nil, want error")
}
}

+ 0
- 67
vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/ListDataSources_smoke_test.go View File

@@ -1,67 +0,0 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

// Code generated by gapic-generator. DO NOT EDIT.

package datatransfer

import (
datatransferpb "google.golang.org/genproto/googleapis/cloud/bigquery/datatransfer/v1"
)

import (
"context"
"fmt"
"strconv"
"testing"
"time"

"cloud.google.com/go/internal/testutil"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
)

var _ = fmt.Sprintf
var _ = iterator.Done
var _ = strconv.FormatUint
var _ = time.Now

func TestDataTransferServiceSmoke(t *testing.T) {
if testing.Short() {
t.Skip("skipping smoke test in short mode")
}
ctx := context.Background()
ts := testutil.TokenSource(ctx, DefaultAuthScopes()...)
if ts == nil {
t.Skip("Integration tests skipped. See CONTRIBUTING.md for details")
}

projectId := testutil.ProjID()
_ = projectId

c, err := NewClient(ctx, option.WithTokenSource(ts))
if err != nil {
t.Fatal(err)
}

var formattedParent string = fmt.Sprintf("projects/%s", projectId)
var request = &datatransferpb.ListDataSourcesRequest{
Parent: formattedParent,
}

iter := c.ListDataSources(ctx, request)
if _, err := iter.Next(); err != nil && err != iterator.Done {
t.Error(err)
}
}

+ 0
- 625
vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/data_transfer_client.go View File

@@ -1,625 +0,0 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

// Code generated by gapic-generator. DO NOT EDIT.

package datatransfer

import (
"context"
"fmt"
"math"
"time"

"github.com/golang/protobuf/proto"
gax "github.com/googleapis/gax-go/v2"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
"google.golang.org/api/transport"
datatransferpb "google.golang.org/genproto/googleapis/cloud/bigquery/datatransfer/v1"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
)

// CallOptions contains the retry settings for each method of Client.
type CallOptions struct {
GetDataSource []gax.CallOption
ListDataSources []gax.CallOption
CreateTransferConfig []gax.CallOption
UpdateTransferConfig []gax.CallOption
DeleteTransferConfig []gax.CallOption
GetTransferConfig []gax.CallOption
ListTransferConfigs []gax.CallOption
ScheduleTransferRuns []gax.CallOption
GetTransferRun []gax.CallOption
DeleteTransferRun []gax.CallOption
ListTransferRuns []gax.CallOption
ListTransferLogs []gax.CallOption
CheckValidCreds []gax.CallOption
}

func defaultClientOptions() []option.ClientOption {
return []option.ClientOption{
option.WithEndpoint("bigquerydatatransfer.googleapis.com:443"),
option.WithScopes(DefaultAuthScopes()...),
}
}

func defaultCallOptions() *CallOptions {
retry := map[[2]string][]gax.CallOption{
{"default", "idempotent"}: {
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 100 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 1.3,
})
}),
},
}
return &CallOptions{
GetDataSource: retry[[2]string{"default", "idempotent"}],
ListDataSources: retry[[2]string{"default", "idempotent"}],
CreateTransferConfig: retry[[2]string{"default", "non_idempotent"}],
UpdateTransferConfig: retry[[2]string{"default", "non_idempotent"}],
DeleteTransferConfig: retry[[2]string{"default", "idempotent"}],
GetTransferConfig: retry[[2]string{"default", "idempotent"}],
ListTransferConfigs: retry[[2]string{"default", "idempotent"}],
ScheduleTransferRuns: retry[[2]string{"default", "non_idempotent"}],
GetTransferRun: retry[[2]string{"default", "idempotent"}],
DeleteTransferRun: retry[[2]string{"default", "idempotent"}],
ListTransferRuns: retry[[2]string{"default", "idempotent"}],
ListTransferLogs: retry[[2]string{"default", "idempotent"}],
CheckValidCreds: retry[[2]string{"default", "idempotent"}],
}
}

// Client is a client for interacting with BigQuery Data Transfer API.
//
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
type Client struct {
// The connection to the service.
conn *grpc.ClientConn

// The gRPC API client.
client datatransferpb.DataTransferServiceClient

// The call options for this service.
CallOptions *CallOptions

// The x-goog-* metadata to be sent with each request.
xGoogMetadata metadata.MD
}

// NewClient creates a new data transfer service client.
//
// The Google BigQuery Data Transfer Service API enables BigQuery users to
// configure the transfer of their data from other Google Products into
// BigQuery. This service contains methods that are end user exposed. It backs
// up the frontend.
func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...)
if err != nil {
return nil, err
}
c := &Client{
conn: conn,
CallOptions: defaultCallOptions(),

client: datatransferpb.NewDataTransferServiceClient(conn),
}
c.setGoogleClientInfo()
return c, nil
}

// Connection returns the client's connection to the API service.
func (c *Client) Connection() *grpc.ClientConn {
return c.conn
}

// Close closes the connection to the API service. The user should invoke this when
// the client is no longer required.
func (c *Client) Close() error {
return c.conn.Close()
}

// setGoogleClientInfo sets the name and version of the application in
// the `x-goog-api-client` header passed on each request. Intended for
// use by Google-written clients.
func (c *Client) setGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", versionGo()}, keyval...)
kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version)
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
}

// GetDataSource retrieves a supported data source and returns its settings,
// which can be used for UI rendering.
func (c *Client) GetDataSource(ctx context.Context, req *datatransferpb.GetDataSourceRequest, opts ...gax.CallOption) (*datatransferpb.DataSource, error) {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.GetDataSource[0:len(c.CallOptions.GetDataSource):len(c.CallOptions.GetDataSource)], opts...)
var resp *datatransferpb.DataSource
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.GetDataSource(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}

// ListDataSources lists supported data sources and returns their settings,
// which can be used for UI rendering.
func (c *Client) ListDataSources(ctx context.Context, req *datatransferpb.ListDataSourcesRequest, opts ...gax.CallOption) *DataSourceIterator {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.ListDataSources[0:len(c.CallOptions.ListDataSources):len(c.CallOptions.ListDataSources)], opts...)
it := &DataSourceIterator{}
req = proto.Clone(req).(*datatransferpb.ListDataSourcesRequest)
it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.DataSource, string, error) {
var resp *datatransferpb.ListDataSourcesResponse
req.PageToken = pageToken
if pageSize > math.MaxInt32 {
req.PageSize = math.MaxInt32
} else {
req.PageSize = int32(pageSize)
}
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.ListDataSources(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, "", err
}
return resp.DataSources, resp.NextPageToken, nil
}
fetch := func(pageSize int, pageToken string) (string, error) {
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
if err != nil {
return "", err
}
it.items = append(it.items, items...)
return nextPageToken, nil
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
it.pageInfo.MaxSize = int(req.PageSize)
return it
}

// CreateTransferConfig creates a new data transfer configuration.
func (c *Client) CreateTransferConfig(ctx context.Context, req *datatransferpb.CreateTransferConfigRequest, opts ...gax.CallOption) (*datatransferpb.TransferConfig, error) {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.CreateTransferConfig[0:len(c.CallOptions.CreateTransferConfig):len(c.CallOptions.CreateTransferConfig)], opts...)
var resp *datatransferpb.TransferConfig
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.CreateTransferConfig(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}

// UpdateTransferConfig updates a data transfer configuration.
// All fields must be set, even if they are not updated.
func (c *Client) UpdateTransferConfig(ctx context.Context, req *datatransferpb.UpdateTransferConfigRequest, opts ...gax.CallOption) (*datatransferpb.TransferConfig, error) {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "transfer_config.name", req.GetTransferConfig().GetName()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.UpdateTransferConfig[0:len(c.CallOptions.UpdateTransferConfig):len(c.CallOptions.UpdateTransferConfig)], opts...)
var resp *datatransferpb.TransferConfig
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.UpdateTransferConfig(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}

// DeleteTransferConfig deletes a data transfer configuration,
// including any associated transfer runs and logs.
func (c *Client) DeleteTransferConfig(ctx context.Context, req *datatransferpb.DeleteTransferConfigRequest, opts ...gax.CallOption) error {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.DeleteTransferConfig[0:len(c.CallOptions.DeleteTransferConfig):len(c.CallOptions.DeleteTransferConfig)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
_, err = c.client.DeleteTransferConfig(ctx, req, settings.GRPC...)
return err
}, opts...)
return err
}

// GetTransferConfig returns information about a data transfer config.
func (c *Client) GetTransferConfig(ctx context.Context, req *datatransferpb.GetTransferConfigRequest, opts ...gax.CallOption) (*datatransferpb.TransferConfig, error) {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.GetTransferConfig[0:len(c.CallOptions.GetTransferConfig):len(c.CallOptions.GetTransferConfig)], opts...)
var resp *datatransferpb.TransferConfig
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.GetTransferConfig(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}

// ListTransferConfigs returns information about all data transfers in the project.
func (c *Client) ListTransferConfigs(ctx context.Context, req *datatransferpb.ListTransferConfigsRequest, opts ...gax.CallOption) *TransferConfigIterator {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.ListTransferConfigs[0:len(c.CallOptions.ListTransferConfigs):len(c.CallOptions.ListTransferConfigs)], opts...)
it := &TransferConfigIterator{}
req = proto.Clone(req).(*datatransferpb.ListTransferConfigsRequest)
it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.TransferConfig, string, error) {
var resp *datatransferpb.ListTransferConfigsResponse
req.PageToken = pageToken
if pageSize > math.MaxInt32 {
req.PageSize = math.MaxInt32
} else {
req.PageSize = int32(pageSize)
}
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.ListTransferConfigs(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, "", err
}
return resp.TransferConfigs, resp.NextPageToken, nil
}
fetch := func(pageSize int, pageToken string) (string, error) {
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
if err != nil {
return "", err
}
it.items = append(it.items, items...)
return nextPageToken, nil
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
it.pageInfo.MaxSize = int(req.PageSize)
return it
}

// ScheduleTransferRuns creates transfer runs for a time range [start_time, end_time].
// For each date - or whatever granularity the data source supports - in the
// range, one transfer run is created.
// Note that runs are created per UTC time in the time range.
func (c *Client) ScheduleTransferRuns(ctx context.Context, req *datatransferpb.ScheduleTransferRunsRequest, opts ...gax.CallOption) (*datatransferpb.ScheduleTransferRunsResponse, error) {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.ScheduleTransferRuns[0:len(c.CallOptions.ScheduleTransferRuns):len(c.CallOptions.ScheduleTransferRuns)], opts...)
var resp *datatransferpb.ScheduleTransferRunsResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.ScheduleTransferRuns(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}

// GetTransferRun returns information about the particular transfer run.
func (c *Client) GetTransferRun(ctx context.Context, req *datatransferpb.GetTransferRunRequest, opts ...gax.CallOption) (*datatransferpb.TransferRun, error) {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.GetTransferRun[0:len(c.CallOptions.GetTransferRun):len(c.CallOptions.GetTransferRun)], opts...)
var resp *datatransferpb.TransferRun
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.GetTransferRun(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}

// DeleteTransferRun deletes the specified transfer run.
func (c *Client) DeleteTransferRun(ctx context.Context, req *datatransferpb.DeleteTransferRunRequest, opts ...gax.CallOption) error {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.DeleteTransferRun[0:len(c.CallOptions.DeleteTransferRun):len(c.CallOptions.DeleteTransferRun)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
_, err = c.client.DeleteTransferRun(ctx, req, settings.GRPC...)
return err
}, opts...)
return err
}

// ListTransferRuns returns information about running and completed jobs.
func (c *Client) ListTransferRuns(ctx context.Context, req *datatransferpb.ListTransferRunsRequest, opts ...gax.CallOption) *TransferRunIterator {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.ListTransferRuns[0:len(c.CallOptions.ListTransferRuns):len(c.CallOptions.ListTransferRuns)], opts...)
it := &TransferRunIterator{}
req = proto.Clone(req).(*datatransferpb.ListTransferRunsRequest)
it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.TransferRun, string, error) {
var resp *datatransferpb.ListTransferRunsResponse
req.PageToken = pageToken
if pageSize > math.MaxInt32 {
req.PageSize = math.MaxInt32
} else {
req.PageSize = int32(pageSize)
}
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.ListTransferRuns(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, "", err
}
return resp.TransferRuns, resp.NextPageToken, nil
}
fetch := func(pageSize int, pageToken string) (string, error) {
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
if err != nil {
return "", err
}
it.items = append(it.items, items...)
return nextPageToken, nil
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
it.pageInfo.MaxSize = int(req.PageSize)
return it
}

// ListTransferLogs returns user facing log messages for the data transfer run.
func (c *Client) ListTransferLogs(ctx context.Context, req *datatransferpb.ListTransferLogsRequest, opts ...gax.CallOption) *TransferMessageIterator {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.ListTransferLogs[0:len(c.CallOptions.ListTransferLogs):len(c.CallOptions.ListTransferLogs)], opts...)
it := &TransferMessageIterator{}
req = proto.Clone(req).(*datatransferpb.ListTransferLogsRequest)
it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.TransferMessage, string, error) {
var resp *datatransferpb.ListTransferLogsResponse
req.PageToken = pageToken
if pageSize > math.MaxInt32 {
req.PageSize = math.MaxInt32
} else {
req.PageSize = int32(pageSize)
}
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.ListTransferLogs(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, "", err
}
return resp.TransferMessages, resp.NextPageToken, nil
}
fetch := func(pageSize int, pageToken string) (string, error) {
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
if err != nil {
return "", err
}
it.items = append(it.items, items...)
return nextPageToken, nil
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
it.pageInfo.MaxSize = int(req.PageSize)
return it
}

// CheckValidCreds returns true if valid credentials exist for the given data source and
// requesting user.
// Some data sources doesn't support service account, so we need to talk to
// them on behalf of the end user. This API just checks whether we have OAuth
// token for the particular user, which is a pre-requisite before user can
// create a transfer config.
func (c *Client) CheckValidCreds(ctx context.Context, req *datatransferpb.CheckValidCredsRequest, opts ...gax.CallOption) (*datatransferpb.CheckValidCredsResponse, error) {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.CheckValidCreds[0:len(c.CallOptions.CheckValidCreds):len(c.CallOptions.CheckValidCreds)], opts...)
var resp *datatransferpb.CheckValidCredsResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.CheckValidCreds(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}

// DataSourceIterator manages a stream of *datatransferpb.DataSource.
type DataSourceIterator struct {
items []*datatransferpb.DataSource
pageInfo *iterator.PageInfo
nextFunc func() error

// InternalFetch is for use by the Google Cloud Libraries only.
// It is not part of the stable interface of this package.
//
// InternalFetch returns results from a single call to the underlying RPC.
// The number of results is no greater than pageSize.
// If there are no more results, nextPageToken is empty and err is nil.
InternalFetch func(pageSize int, pageToken string) (results []*datatransferpb.DataSource, nextPageToken string, err error)
}

// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *DataSourceIterator) PageInfo() *iterator.PageInfo {
return it.pageInfo
}

// Next returns the next result. Its second return value is iterator.Done if there are no more
// results. Once Next returns Done, all subsequent calls will return Done.
func (it *DataSourceIterator) Next() (*datatransferpb.DataSource, error) {
var item *datatransferpb.DataSource
if err := it.nextFunc(); err != nil {
return item, err
}
item = it.items[0]
it.items = it.items[1:]
return item, nil
}

func (it *DataSourceIterator) bufLen() int {
return len(it.items)
}

func (it *DataSourceIterator) takeBuf() interface{} {
b := it.items
it.items = nil
return b
}

// TransferConfigIterator manages a stream of *datatransferpb.TransferConfig.
type TransferConfigIterator struct {
items []*datatransferpb.TransferConfig
pageInfo *iterator.PageInfo
nextFunc func() error

// InternalFetch is for use by the Google Cloud Libraries only.
// It is not part of the stable interface of this package.
//
// InternalFetch returns results from a single call to the underlying RPC.
// The number of results is no greater than pageSize.
// If there are no more results, nextPageToken is empty and err is nil.
InternalFetch func(pageSize int, pageToken string) (results []*datatransferpb.TransferConfig, nextPageToken string, err error)
}

// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *TransferConfigIterator) PageInfo() *iterator.PageInfo {
return it.pageInfo
}

// Next returns the next result. Its second return value is iterator.Done if there are no more
// results. Once Next returns Done, all subsequent calls will return Done.
func (it *TransferConfigIterator) Next() (*datatransferpb.TransferConfig, error) {
var item *datatransferpb.TransferConfig
if err := it.nextFunc(); err != nil {
return item, err
}
item = it.items[0]
it.items = it.items[1:]
return item, nil
}

func (it *TransferConfigIterator) bufLen() int {
return len(it.items)
}

func (it *TransferConfigIterator) takeBuf() interface{} {
b := it.items
it.items = nil
return b
}

// TransferMessageIterator manages a stream of *datatransferpb.TransferMessage.
type TransferMessageIterator struct {
items []*datatransferpb.TransferMessage
pageInfo *iterator.PageInfo
nextFunc func() error

// InternalFetch is for use by the Google Cloud Libraries only.
// It is not part of the stable interface of this package.
//
// InternalFetch returns results from a single call to the underlying RPC.
// The number of results is no greater than pageSize.
// If there are no more results, nextPageToken is empty and err is nil.
InternalFetch func(pageSize int, pageToken string) (results []*datatransferpb.TransferMessage, nextPageToken string, err error)
}

// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *TransferMessageIterator) PageInfo() *iterator.PageInfo {
return it.pageInfo
}

// Next returns the next result. Its second return value is iterator.Done if there are no more
// results. Once Next returns Done, all subsequent calls will return Done.
func (it *TransferMessageIterator) Next() (*datatransferpb.TransferMessage, error) {
var item *datatransferpb.TransferMessage
if err := it.nextFunc(); err != nil {
return item, err
}
item = it.items[0]
it.items = it.items[1:]
return item, nil
}

func (it *TransferMessageIterator) bufLen() int {
return len(it.items)
}

func (it *TransferMessageIterator) takeBuf() interface{} {
b := it.items
it.items = nil
return b
}

// TransferRunIterator manages a stream of *datatransferpb.TransferRun.
type TransferRunIterator struct {
items []*datatransferpb.TransferRun
pageInfo *iterator.PageInfo
nextFunc func() error

// InternalFetch is for use by the Google Cloud Libraries only.
// It is not part of the stable interface of this package.
//
// InternalFetch returns results from a single call to the underlying RPC.
// The number of results is no greater than pageSize.
// If there are no more results, nextPageToken is empty and err is nil.
InternalFetch func(pageSize int, pageToken string) (results []*datatransferpb.TransferRun, nextPageToken string, err error)
}

// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *TransferRunIterator) PageInfo() *iterator.PageInfo {
return it.pageInfo
}

// Next returns the next result. Its second return value is iterator.Done if there are no more
// results. Once Next returns Done, all subsequent calls will return Done.
func (it *TransferRunIterator) Next() (*datatransferpb.TransferRun, error) {
var item *datatransferpb.TransferRun
if err := it.nextFunc(); err != nil {
return item, err
}
item = it.items[0]
it.items = it.items[1:]
return item, nil
}

func (it *TransferRunIterator) bufLen() int {
return len(it.items)
}

func (it *TransferRunIterator) takeBuf() interface{} {
b := it.items
it.items = nil
return b
}

+ 0
- 289
vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/data_transfer_client_example_test.go View File

@@ -1,289 +0,0 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

// Code generated by gapic-generator. DO NOT EDIT.

package datatransfer_test

import (
"context"

datatransfer "cloud.google.com/go/bigquery/datatransfer/apiv1"
"google.golang.org/api/iterator"
datatransferpb "google.golang.org/genproto/googleapis/cloud/bigquery/datatransfer/v1"
)

func ExampleNewClient() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
// TODO: Use client.
_ = c
}

func ExampleClient_GetDataSource() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}

req := &datatransferpb.GetDataSourceRequest{
// TODO: Fill request struct fields.
}
resp, err := c.GetDataSource(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}

func ExampleClient_ListDataSources() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}

req := &datatransferpb.ListDataSourcesRequest{
// TODO: Fill request struct fields.
}
it := c.ListDataSources(ctx, req)
for {
resp, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
}

func ExampleClient_CreateTransferConfig() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}

req := &datatransferpb.CreateTransferConfigRequest{
// TODO: Fill request struct fields.
}
resp, err := c.CreateTransferConfig(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}

func ExampleClient_UpdateTransferConfig() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}

req := &datatransferpb.UpdateTransferConfigRequest{
// TODO: Fill request struct fields.
}
resp, err := c.UpdateTransferConfig(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}

func ExampleClient_DeleteTransferConfig() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}

req := &datatransferpb.DeleteTransferConfigRequest{
// TODO: Fill request struct fields.
}
err = c.DeleteTransferConfig(ctx, req)
if err != nil {
// TODO: Handle error.
}
}

func ExampleClient_GetTransferConfig() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}

req := &datatransferpb.GetTransferConfigRequest{
// TODO: Fill request struct fields.
}
resp, err := c.GetTransferConfig(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}

func ExampleClient_ListTransferConfigs() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}

req := &datatransferpb.ListTransferConfigsRequest{
// TODO: Fill request struct fields.
}
it := c.ListTransferConfigs(ctx, req)
for {
resp, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
}

func ExampleClient_ScheduleTransferRuns() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}

req := &datatransferpb.ScheduleTransferRunsRequest{
// TODO: Fill request struct fields.
}
resp, err := c.ScheduleTransferRuns(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}

func ExampleClient_GetTransferRun() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}

req := &datatransferpb.GetTransferRunRequest{
// TODO: Fill request struct fields.
}
resp, err := c.GetTransferRun(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}

func ExampleClient_DeleteTransferRun() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}

req := &datatransferpb.DeleteTransferRunRequest{
// TODO: Fill request struct fields.
}
err = c.DeleteTransferRun(ctx, req)
if err != nil {
// TODO: Handle error.
}
}

func ExampleClient_ListTransferRuns() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}

req := &datatransferpb.ListTransferRunsRequest{
// TODO: Fill request struct fields.
}
it := c.ListTransferRuns(ctx, req)
for {
resp, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
}

func ExampleClient_ListTransferLogs() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}

req := &datatransferpb.ListTransferLogsRequest{
// TODO: Fill request struct fields.
}
it := c.ListTransferLogs(ctx, req)
for {
resp, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
}

func ExampleClient_CheckValidCreds() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}

req := &datatransferpb.CheckValidCredsRequest{
// TODO: Fill request struct fields.
}
resp, err := c.CheckValidCreds(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}

+ 0
- 90
vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/doc.go View File

@@ -1,90 +0,0 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

// Code generated by gapic-generator. DO NOT EDIT.

// Package datatransfer is an auto-generated package for the
// BigQuery Data Transfer API.
//
// NOTE: This package is in alpha. It is not stable, and is likely to change.
//
// Transfers data from partner SaaS applications to Google BigQuery on a
// scheduled, managed basis.
package datatransfer // import "cloud.google.com/go/bigquery/datatransfer/apiv1"

import (
"context"
"runtime"
"strings"
"unicode"

"google.golang.org/grpc/metadata"
)

func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
out, _ := metadata.FromOutgoingContext(ctx)
out = out.Copy()
for _, md := range mds {
for k, v := range md {
out[k] = append(out[k], v...)
}
}
return metadata.NewOutgoingContext(ctx, out)
}

// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
func DefaultAuthScopes() []string {
return []string{
"https://www.googleapis.com/auth/cloud-platform",
}
}

// versionGo returns the Go runtime version. The returned string
// has no whitespace, suitable for reporting in header.
func versionGo() string {
const develPrefix = "devel +"

s := runtime.Version()
if strings.HasPrefix(s, develPrefix) {
s = s[len(develPrefix):]
if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
s = s[:p]
}
return s
}

notSemverRune := func(r rune) bool {
return strings.IndexRune("0123456789.", r) < 0
}

if strings.HasPrefix(s, "go1") {
s = s[2:]
var prerelease string
if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
s, prerelease = s[:p], s[p:]
}
if strings.HasSuffix(s, ".") {
s += "0"
} else if strings.Count(s, ".") < 2 {
s += ".0"
}
if prerelease != "" {
s += "-" + prerelease
}
return s
}
return "UNKNOWN"
}

const versionClient = "20190306"

+ 0
- 1146
vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/mock_test.go
File diff suppressed because it is too large
View File


+ 0
- 135
vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/path_funcs.go View File

@@ -1,135 +0,0 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package datatransfer

// ProjectPath returns the path for the project resource.
//
// Deprecated: Use
// fmt.Sprintf("projects/%s", project)
// instead.
func ProjectPath(project string) string {
return "" +
"projects/" +
project +
""
}

// LocationPath returns the path for the location resource.
//
// Deprecated: Use
// fmt.Sprintf("projects/%s/locations/%s", project, location)
// instead.
func LocationPath(project, location string) string {
return "" +
"projects/" +
project +
"/locations/" +
location +
""
}

// LocationDataSourcePath returns the path for the location data source resource.
//
// Deprecated: Use
// fmt.Sprintf("projects/%s/locations/%s/dataSources/%s", project, location, dataSource)
// instead.
func LocationDataSourcePath(project, location, dataSource string) string {
return "" +
"projects/" +
project +
"/locations/" +
location +
"/dataSources/" +
dataSource +
""
}

// LocationTransferConfigPath returns the path for the location transfer config resource.
//
// Deprecated: Use
// fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s", project, location, transferConfig)
// instead.
func LocationTransferConfigPath(project, location, transferConfig string) string {
return "" +
"projects/" +
project +
"/locations/" +
location +
"/transferConfigs/" +
transferConfig +
""
}

// LocationRunPath returns the path for the location run resource.
//
// Deprecated: Use
// fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s/runs/%s", project, location, transferConfig, run)
// instead.
func LocationRunPath(project, location, transferConfig, run string) string {
return "" +
"projects/" +
project +
"/locations/" +
location +
"/transferConfigs/" +
transferConfig +
"/runs/" +
run +
""
}

// DataSourcePath returns the path for the data source resource.
//
// Deprecated: Use
// fmt.Sprintf("projects/%s/dataSources/%s", project, dataSource)
// instead.
func DataSourcePath(project, dataSource string) string {
return "" +
"projects/" +
project +
"/dataSources/" +
dataSource +
""
}

// TransferConfigPath returns the path for the transfer config resource.
//
// Deprecated: Use
// fmt.Sprintf("projects/%s/transferConfigs/%s", project, transferConfig)
// instead.
func TransferConfigPath(project, transferConfig string) string {
return "" +
"projects/" +
project +
"/transferConfigs/" +
transferConfig +
""
}

// RunPath returns the path for the run resource.
//
// Deprecated: Use
// fmt.Sprintf("projects/%s/transferConfigs/%s/runs/%s", project, transferConfig, run)
// instead.
func RunPath(project, transferConfig, run string) string {
return "" +
"projects/" +
project +
"/transferConfigs/" +
transferConfig +
"/runs/" +
run +
""
}

+ 0
- 310
vendor/cloud.google.com/go/bigquery/doc.go View File

@@ -1,310 +0,0 @@
// Copyright 2015 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

/*
Package bigquery provides a client for the BigQuery service.

Note: This package is in beta. Some backwards-incompatible changes may occur.

The following assumes a basic familiarity with BigQuery concepts.
See https://cloud.google.com/bigquery/docs.

See https://godoc.org/cloud.google.com/go for authentication, timeouts,
connection pooling and similar aspects of this package.


Creating a Client

To start working with this package, create a client:

ctx := context.Background()
client, err := bigquery.NewClient(ctx, projectID)
if err != nil {
// TODO: Handle error.
}

Querying

To query existing tables, create a Query and call its Read method:

q := client.Query(`
SELECT year, SUM(number) as num
FROM ` + "`bigquery-public-data.usa_names.usa_1910_2013`" + `
WHERE name = "William"
GROUP BY year
ORDER BY year
`)
it, err := q.Read(ctx)
if err != nil {
// TODO: Handle error.
}

Then iterate through the resulting rows. You can store a row using
anything that implements the ValueLoader interface, or with a slice or map of bigquery.Value.
A slice is simplest:

for {
var values []bigquery.Value
err := it.Next(&values)
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
fmt.Println(values)
}

You can also use a struct whose exported fields match the query:

type Count struct {
Year int
Num int
}
for {
var c Count
err := it.Next(&c)
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
fmt.Println(c)
}

You can also start the query running and get the results later.
Create the query as above, but call Run instead of Read. This returns a Job,
which represents an asynchronous operation.

job, err := q.Run(ctx)
if err != nil {
// TODO: Handle error.
}

Get the job's ID, a printable string. You can save this string to retrieve
the results at a later time, even in another process.

jobID := job.ID()
fmt.Printf("The job ID is %s\n", jobID)

To retrieve the job's results from the ID, first look up the Job:

job, err = client.JobFromID(ctx, jobID)
if err != nil {
// TODO: Handle error.
}

Use the Job.Read method to obtain an iterator, and loop over the rows.
Query.Read is just a convenience method that combines Query.Run and Job.Read.

it, err = job.Read(ctx)
if err != nil {
// TODO: Handle error.
}
// Proceed with iteration as above.

Datasets and Tables

You can refer to datasets in the client's project with the Dataset method, and
in other projects with the DatasetInProject method:

myDataset := client.Dataset("my_dataset")
yourDataset := client.DatasetInProject("your-project-id", "your_dataset")

These methods create references to datasets, not the datasets themselves. You can have
a dataset reference even if the dataset doesn't exist yet. Use Dataset.Create to
create a dataset from a reference:

if err := myDataset.Create(ctx, nil); err != nil {
// TODO: Handle error.
}

You can refer to tables with Dataset.Table. Like bigquery.Dataset, bigquery.Table is a reference
to an object in BigQuery that may or may not exist.

table := myDataset.Table("my_table")

You can create, delete and update the metadata of tables with methods on Table.
For instance, you could create a temporary table with:

err = myDataset.Table("temp").Create(ctx, &bigquery.TableMetadata{
ExpirationTime: time.Now().Add(1*time.Hour)})
if err != nil {
// TODO: Handle error.
}

We'll see how to create a table with a schema in the next section.

Schemas

There are two ways to construct schemas with this package.
You can build a schema by hand, like so:

schema1 := bigquery.Schema{
{Name: "Name", Required: true, Type: bigquery.StringFieldType},
{Name: "Grades", Repeated: true, Type: bigquery.IntegerFieldType},
{Name: "Optional", Required: false, Type: bigquery.IntegerFieldType},
}

Or you can infer the schema from a struct:

type student struct {
Name string
Grades []int
Optional bigquery.NullInt64
}
schema2, err := bigquery.InferSchema(student{})
if err != nil {
// TODO: Handle error.
}
// schema1 and schema2 are identical.

Struct inference supports tags like those of the encoding/json package, so you can
change names, ignore fields, or mark a field as nullable (non-required). Fields
declared as one of the Null types (NullInt64, NullFloat64, NullString, NullBool,
NullTimestamp, NullDate, NullTime, NullDateTime, and NullGeography) are
automatically inferred as nullable, so the "nullable" tag is only needed for []byte,
*big.Rat and pointer-to-struct fields.

type student2 struct {
Name string `bigquery:"full_name"`
Grades []int
Secret string `bigquery:"-"`
Optional []byte `bigquery:",nullable"
}
schema3, err := bigquery.InferSchema(student2{})
if err != nil {
// TODO: Handle error.
}
// schema3 has required fields "full_name" and "Grade", and nullable BYTES field "Optional".

Having constructed a schema, you can create a table with it like so:

if err := table.Create(ctx, &bigquery.TableMetadata{Schema: schema1}); err != nil {
// TODO: Handle error.
}

Copying

You can copy one or more tables to another table. Begin by constructing a Copier
describing the copy. Then set any desired copy options, and finally call Run to get a Job:

copier := myDataset.Table("dest").CopierFrom(myDataset.Table("src"))
copier.WriteDisposition = bigquery.WriteTruncate
job, err = copier.Run(ctx)
if err != nil {
// TODO: Handle error.
}

You can chain the call to Run if you don't want to set options:

job, err = myDataset.Table("dest").CopierFrom(myDataset.Table("src")).Run(ctx)
if err != nil {
// TODO: Handle error.
}

You can wait for your job to complete:

status, err := job.Wait(ctx)
if err != nil {
// TODO: Handle error.
}

Job.Wait polls with exponential backoff. You can also poll yourself, if you
wish:

for {
status, err := job.Status(ctx)
if err != nil {
// TODO: Handle error.
}
if status.Done() {
if status.Err() != nil {
log.Fatalf("Job failed with error %v", status.Err())
}
break
}
time.Sleep(pollInterval)
}

Loading and Uploading

There are two ways to populate a table with this package: load the data from a Google Cloud Storage
object, or upload rows directly from your program.

For loading, first create a GCSReference, configuring it if desired. Then make a Loader, optionally configure
it as well, and call its Run method.

gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object")
gcsRef.AllowJaggedRows = true
loader := myDataset.Table("dest").LoaderFrom(gcsRef)
loader.CreateDisposition = bigquery.CreateNever
job, err = loader.Run(ctx)
// Poll the job for completion if desired, as above.

To upload, first define a type that implements the ValueSaver interface, which has a single method named Save.
Then create an Uploader, and call its Put method with a slice of values.

u := table.Uploader()
// Item implements the ValueSaver interface.
items := []*Item{
{Name: "n1", Size: 32.6, Count: 7},
{Name: "n2", Size: 4, Count: 2},
{Name: "n3", Size: 101.5, Count: 1},
}
if err := u.Put(ctx, items); err != nil {
// TODO: Handle error.
}

You can also upload a struct that doesn't implement ValueSaver. Use the StructSaver type
to specify the schema and insert ID by hand, or just supply the struct or struct pointer
directly and the schema will be inferred:

type Item2 struct {
Name string
Size float64
Count int
}
// Item implements the ValueSaver interface.
items2 := []*Item2{
{Name: "n1", Size: 32.6, Count: 7},
{Name: "n2", Size: 4, Count: 2},
{Name: "n3", Size: 101.5, Count: 1},
}
if err := u.Put(ctx, items2); err != nil {
// TODO: Handle error.
}

Extracting

If you've been following so far, extracting data from a BigQuery table
into a Google Cloud Storage object will feel familiar. First create an
Extractor, then optionally configure it, and lastly call its Run method.

extractor := table.ExtractorTo(gcsRef)
extractor.DisableHeader = true
job, err = extractor.Run(ctx)
// Poll the job for completion if desired, as above.

Errors

Errors returned by this client are often of the type [`googleapi.Error`](https://godoc.org/google.golang.org/api/googleapi#Error).
These errors can be introspected for more information by type asserting to the richer `googleapi.Error` type. For example:

if e, ok := err.(*googleapi.Error); ok {
if e.Code = 409 { ... }
}
*/
package bigquery // import "cloud.google.com/go/bigquery"

+ 0
- 83
vendor/cloud.google.com/go/bigquery/error.go View File

@@ -1,83 +0,0 @@
// Copyright 2015 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package bigquery

import (
"fmt"

bq "google.golang.org/api/bigquery/v2"
)

// An Error contains detailed information about a failed bigquery operation.
// Detailed description of possible Reasons can be found here: https://cloud.google.com/bigquery/troubleshooting-errors.
type Error struct {
// Mirrors bq.ErrorProto, but drops DebugInfo
Location, Message, Reason string
}

func (e Error) Error() string {
return fmt.Sprintf("{Location: %q; Message: %q; Reason: %q}", e.Location, e.Message, e.Reason)
}

func bqToError(ep *bq.ErrorProto) *Error {
if ep == nil {
return nil
}
return &Error{
Location: ep.Location,
Message: ep.Message,
Reason: ep.Reason,
}
}

// A MultiError contains multiple related errors.
type MultiError []error

func (m MultiError) Error() string {
switch len(m) {
case 0:
return "(0 errors)"
case 1:
return m[0].Error()
case 2:
return m[0].Error() + " (and 1 other error)"
}
return fmt.Sprintf("%s (and %d other errors)", m[0].Error(), len(m)-1)
}

// RowInsertionError contains all errors that occurred when attempting to insert a row.
type RowInsertionError struct {
InsertID string // The InsertID associated with the affected row.
RowIndex int // The 0-based index of the affected row in the batch of rows being inserted.
Errors MultiError
}

func (e *RowInsertionError) Error() string {
errFmt := "insertion of row [insertID: %q; insertIndex: %v] failed with error: %s"
return fmt.Sprintf(errFmt, e.InsertID, e.RowIndex, e.Errors.Error())
}

// PutMultiError contains an error for each row which was not successfully inserted
// into a BigQuery table.
type PutMultiError []RowInsertionError

func (pme PutMultiError) Error() string {
plural := "s"
if len(pme) == 1 {
plural = ""
}

return fmt.Sprintf("%v row insertion%s failed", len(pme), plural)
}

+ 0
- 109
vendor/cloud.google.com/go/bigquery/error_test.go View File

@@ -1,109 +0,0 @@
// Copyright 2015 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package bigquery

import (
"errors"
"strings"
"testing"

"cloud.google.com/go/internal/testutil"
bq "google.golang.org/api/bigquery/v2"
)

func rowInsertionError(msg string) RowInsertionError {
return RowInsertionError{Errors: []error{errors.New(msg)}}
}

func TestPutMultiErrorString(t *testing.T) {
testCases := []struct {
errs PutMultiError
want string
}{
{
errs: PutMultiError{},
want: "0 row insertions failed",
},
{
errs: PutMultiError{rowInsertionError("a")},
want: "1 row insertion failed",
},
{
errs: PutMultiError{rowInsertionError("a"), rowInsertionError("b")},
want: "2 row insertions failed",
},
}

for _, tc := range testCases {
if tc.errs.Error() != tc.want {
t.Errorf("PutMultiError string: got:\n%v\nwant:\n%v", tc.errs.Error(), tc.want)
}
}
}

func TestMultiErrorString(t *testing.T) {
testCases := []struct {
errs MultiError
want string
}{
{
errs: MultiError{},
want: "(0 errors)",
},
{
errs: MultiError{errors.New("a")},
want: "a",
},
{
errs: MultiError{errors.New("a"), errors.New("b")},
want: "a (and 1 other error)",
},
{
errs: MultiError{errors.New("a"), errors.New("b"), errors.New("c")},
want: "a (and 2 other errors)",
},
}

for _, tc := range testCases {
if tc.errs.Error() != tc.want {
t.Errorf("PutMultiError string: got:\n%v\nwant:\n%v", tc.errs.Error(), tc.want)
}
}
}

func TestErrorFromErrorProto(t *testing.T) {
for _, test := range []struct {
in *bq.ErrorProto
want *Error
}{
{nil, nil},
{
in: &bq.ErrorProto{Location: "L", Message: "M", Reason: "R"},
want: &Error{Location: "L", Message: "M", Reason: "R"},
},
} {
if got := bqToError(test.in); !testutil.Equal(got, test.want) {
t.Errorf("%v: got %v, want %v", test.in, got, test.want)
}
}
}

func TestErrorString(t *testing.T) {
e := &Error{Location: "<L>", Message: "<M>", Reason: "<R>"}
got := e.Error()
if !strings.Contains(got, "<L>") || !strings.Contains(got, "<M>") || !strings.Contains(got, "<R>") {
t.Errorf(`got %q, expected to see "<L>", "<M>" and "<R>"`, got)
}
}

+ 0
- 829
vendor/cloud.google.com/go/bigquery/examples_test.go View File

@@ -1,829 +0,0 @@
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package bigquery_test

import (
"context"
"fmt"
"os"
"time"

"cloud.google.com/go/bigquery"
"google.golang.org/api/iterator"
)

func ExampleNewClient() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
_ = client // TODO: Use client.
}

func ExampleClient_Dataset() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
ds := client.Dataset("my_dataset")
fmt.Println(ds)
}

func ExampleClient_DatasetInProject() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
ds := client.DatasetInProject("their-project-id", "their-dataset")
fmt.Println(ds)
}

func ExampleClient_Datasets() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
it := client.Datasets(ctx)
_ = it // TODO: iterate using Next or iterator.Pager.
}

func ExampleClient_DatasetsInProject() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
it := client.DatasetsInProject(ctx, "their-project-id")
_ = it // TODO: iterate using Next or iterator.Pager.
}

func getJobID() string { return "" }

func ExampleClient_JobFromID() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
jobID := getJobID() // Get a job ID using Job.ID, the console or elsewhere.
job, err := client.JobFromID(ctx, jobID)
if err != nil {
// TODO: Handle error.
}
fmt.Println(job.LastStatus()) // Display the job's status.
}

func ExampleClient_Jobs() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
it := client.Jobs(ctx)
it.State = bigquery.Running // list only running jobs.
_ = it // TODO: iterate using Next or iterator.Pager.
}

func ExampleNewGCSReference() {
gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object")
fmt.Println(gcsRef)
}

func ExampleClient_Query() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
q := client.Query("select name, num from t1")
q.DefaultProjectID = "project-id"
// TODO: set other options on the Query.
// TODO: Call Query.Run or Query.Read.
}

func ExampleClient_Query_parameters() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
q := client.Query("select num from t1 where name = @user")
q.Parameters = []bigquery.QueryParameter{
{Name: "user", Value: "Elizabeth"},
}
// TODO: set other options on the Query.
// TODO: Call Query.Run or Query.Read.
}

// This example demonstrates how to run a query job on a table
// with a customer-managed encryption key. The same
// applies to load and copy jobs as well.
func ExampleClient_Query_encryptionKey() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
q := client.Query("select name, num from t1")
// TODO: Replace this key with a key you have created in Cloud KMS.
keyName := "projects/P/locations/L/keyRings/R/cryptoKeys/K"
q.DestinationEncryptionConfig = &bigquery.EncryptionConfig{KMSKeyName: keyName}
// TODO: set other options on the Query.
// TODO: Call Query.Run or Query.Read.
}

func ExampleQuery_Read() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
q := client.Query("select name, num from t1")
it, err := q.Read(ctx)
if err != nil {
// TODO: Handle error.
}
_ = it // TODO: iterate using Next or iterator.Pager.
}

func ExampleRowIterator_Next() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
q := client.Query("select name, num from t1")
it, err := q.Read(ctx)
if err != nil {
// TODO: Handle error.
}
for {
var row []bigquery.Value
err := it.Next(&row)
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
fmt.Println(row)
}
}

func ExampleRowIterator_Next_struct() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}

type score struct {
Name string
Num int
}

q := client.Query("select name, num from t1")
it, err := q.Read(ctx)
if err != nil {
// TODO: Handle error.
}
for {
var s score
err := it.Next(&s)
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
fmt.Println(s)
}
}

func ExampleJob_Read() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
q := client.Query("select name, num from t1")
// Call Query.Run to get a Job, then call Read on the job.
// Note: Query.Read is a shorthand for this.
job, err := q.Run(ctx)
if err != nil {
// TODO: Handle error.
}
it, err := job.Read(ctx)
if err != nil {
// TODO: Handle error.
}
_ = it // TODO: iterate using Next or iterator.Pager.
}

func ExampleJob_Wait() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
ds := client.Dataset("my_dataset")
job, err := ds.Table("t1").CopierFrom(ds.Table("t2")).Run(ctx)
if err != nil {
// TODO: Handle error.
}
status, err := job.Wait(ctx)
if err != nil {
// TODO: Handle error.
}
if status.Err() != nil {
// TODO: Handle error.
}
}

func ExampleJob_Config() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
ds := client.Dataset("my_dataset")
job, err := ds.Table("t1").CopierFrom(ds.Table("t2")).Run(ctx)
if err != nil {
// TODO: Handle error.
}
jc, err := job.Config()
if err != nil {
// TODO: Handle error.
}
copyConfig := jc.(*bigquery.CopyConfig)
fmt.Println(copyConfig.Dst, copyConfig.CreateDisposition)
}

func ExampleDataset_Create() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
ds := client.Dataset("my_dataset")
if err := ds.Create(ctx, &bigquery.DatasetMetadata{Location: "EU"}); err != nil {
// TODO: Handle error.
}
}

func ExampleDataset_Delete() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
if err := client.Dataset("my_dataset").Delete(ctx); err != nil {
// TODO: Handle error.
}
}

func ExampleDataset_Metadata() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
md, err := client.Dataset("my_dataset").Metadata(ctx)
if err != nil {
// TODO: Handle error.
}
fmt.Println(md)
}

// This example illustrates how to perform a read-modify-write sequence on dataset
// metadata. Passing the metadata's ETag to the Update call ensures that the call
// will fail if the metadata was changed since the read.
func ExampleDataset_Update_readModifyWrite() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
ds := client.Dataset("my_dataset")
md, err := ds.Metadata(ctx)
if err != nil {
// TODO: Handle error.
}
md2, err := ds.Update(ctx,
bigquery.DatasetMetadataToUpdate{Name: "new " + md.Name},
md.ETag)
if err != nil {
// TODO: Handle error.
}
fmt.Println(md2)
}

// To perform a blind write, ignoring the existing state (and possibly overwriting
// other updates), pass the empty string as the etag.
func ExampleDataset_Update_blindWrite() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
md, err := client.Dataset("my_dataset").Update(ctx, bigquery.DatasetMetadataToUpdate{Name: "blind"}, "")
if err != nil {
// TODO: Handle error.
}
fmt.Println(md)
}

func ExampleDataset_Table() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
// Table creates a reference to the table. It does not create the actual
// table in BigQuery; to do so, use Table.Create.
t := client.Dataset("my_dataset").Table("my_table")
fmt.Println(t)
}

func ExampleDataset_Tables() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
it := client.Dataset("my_dataset").Tables(ctx)
_ = it // TODO: iterate using Next or iterator.Pager.
}

func ExampleDatasetIterator_Next() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
it := client.Datasets(ctx)
for {
ds, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
fmt.Println(ds)
}
}

func ExampleInferSchema() {
type Item struct {
Name string
Size float64
Count int
}
schema, err := bigquery.InferSchema(Item{})
if err != nil {
fmt.Println(err)
// TODO: Handle error.
}
for _, fs := range schema {
fmt.Println(fs.Name, fs.Type)
}
// Output:
// Name STRING
// Size FLOAT
// Count INTEGER
}

func ExampleInferSchema_tags() {
type Item struct {
Name string
Size float64
Count int `bigquery:"number"`
Secret []byte `bigquery:"-"`
Optional bigquery.NullBool
OptBytes []byte `bigquery:",nullable"`
}
schema, err := bigquery.InferSchema(Item{})
if err != nil {
fmt.Println(err)
// TODO: Handle error.
}
for _, fs := range schema {
fmt.Println(fs.Name, fs.Type, fs.Required)
}
// Output:
// Name STRING true
// Size FLOAT true
// number INTEGER true
// Optional BOOLEAN false
// OptBytes BYTES false
}

func ExampleTable_Create() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
t := client.Dataset("my_dataset").Table("new-table")
if err := t.Create(ctx, nil); err != nil {
// TODO: Handle error.
}
}

// Initialize a new table by passing TableMetadata to Table.Create.
func ExampleTable_Create_initialize() {
ctx := context.Background()
// Infer table schema from a Go type.
schema, err := bigquery.InferSchema(Item{})
if err != nil {
// TODO: Handle error.
}
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
t := client.Dataset("my_dataset").Table("new-table")
if err := t.Create(ctx,
&bigquery.TableMetadata{
Name: "My New Table",
Schema: schema,
ExpirationTime: time.Now().Add(24 * time.Hour),
}); err != nil {
// TODO: Handle error.
}
}

// This example demonstrates how to create a table with
// a customer-managed encryption key.
func ExampleTable_Create_encryptionKey() {
ctx := context.Background()
// Infer table schema from a Go type.
schema, err := bigquery.InferSchema(Item{})
if err != nil {
// TODO: Handle error.
}
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
t := client.Dataset("my_dataset").Table("new-table")

// TODO: Replace this key with a key you have created in Cloud KMS.
keyName := "projects/P/locations/L/keyRings/R/cryptoKeys/K"
if err := t.Create(ctx,
&bigquery.TableMetadata{
Name: "My New Table",
Schema: schema,
EncryptionConfig: &bigquery.EncryptionConfig{KMSKeyName: keyName},
}); err != nil {
// TODO: Handle error.
}
}

func ExampleTable_Delete() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
if err := client.Dataset("my_dataset").Table("my_table").Delete(ctx); err != nil {
// TODO: Handle error.
}
}

func ExampleTable_Metadata() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
md, err := client.Dataset("my_dataset").Table("my_table").Metadata(ctx)
if err != nil {
// TODO: Handle error.
}
fmt.Println(md)
}

func ExampleTable_Inserter() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
ins := client.Dataset("my_dataset").Table("my_table").Inserter()
_ = ins // TODO: Use ins.
}

func ExampleTable_Inserter_options() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
ins := client.Dataset("my_dataset").Table("my_table").Inserter()
ins.SkipInvalidRows = true
ins.IgnoreUnknownValues = true
_ = ins // TODO: Use ins.
}

func ExampleTable_CopierFrom() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
ds := client.Dataset("my_dataset")
c := ds.Table("combined").CopierFrom(ds.Table("t1"), ds.Table("t2"))
c.WriteDisposition = bigquery.WriteTruncate
// TODO: set other options on the Copier.
job, err := c.Run(ctx)
if err != nil {
// TODO: Handle error.
}
status, err := job.Wait(ctx)
if err != nil {
// TODO: Handle error.
}
if status.Err() != nil {
// TODO: Handle error.
}
}

func ExampleTable_ExtractorTo() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object")
gcsRef.FieldDelimiter = ":"
// TODO: set other options on the GCSReference.
ds := client.Dataset("my_dataset")
extractor := ds.Table("my_table").ExtractorTo(gcsRef)
extractor.DisableHeader = true
// TODO: set other options on the Extractor.
job, err := extractor.Run(ctx)
if err != nil {
// TODO: Handle error.
}
status, err := job.Wait(ctx)
if err != nil {
// TODO: Handle error.
}
if status.Err() != nil {
// TODO: Handle error.
}
}

func ExampleTable_LoaderFrom() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object")
gcsRef.AllowJaggedRows = true
gcsRef.MaxBadRecords = 5
gcsRef.Schema = schema
// TODO: set other options on the GCSReference.
ds := client.Dataset("my_dataset")
loader := ds.Table("my_table").LoaderFrom(gcsRef)
loader.CreateDisposition = bigquery.CreateNever
// TODO: set other options on the Loader.
job, err := loader.Run(ctx)
if err != nil {
// TODO: Handle error.
}
status, err := job.Wait(ctx)
if err != nil {
// TODO: Handle error.
}
if status.Err() != nil {
// TODO: Handle error.
}
}

func ExampleTable_LoaderFrom_reader() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
f, err := os.Open("data.csv")
if err != nil {
// TODO: Handle error.
}
rs := bigquery.NewReaderSource(f)
rs.AllowJaggedRows = true
rs.MaxBadRecords = 5
rs.Schema = schema
// TODO: set other options on the GCSReference.
ds := client.Dataset("my_dataset")
loader := ds.Table("my_table").LoaderFrom(rs)
loader.CreateDisposition = bigquery.CreateNever
// TODO: set other options on the Loader.
job, err := loader.Run(ctx)
if err != nil {
// TODO: Handle error.
}
status, err := job.Wait(ctx)
if err != nil {
// TODO: Handle error.
}
if status.Err() != nil {
// TODO: Handle error.
}
}

func ExampleTable_Read() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
it := client.Dataset("my_dataset").Table("my_table").Read(ctx)
_ = it // TODO: iterate using Next or iterator.Pager.
}

// This example illustrates how to perform a read-modify-write sequence on table
// metadata. Passing the metadata's ETag to the Update call ensures that the call
// will fail if the metadata was changed since the read.
func ExampleTable_Update_readModifyWrite() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
t := client.Dataset("my_dataset").Table("my_table")
md, err := t.Metadata(ctx)
if err != nil {
// TODO: Handle error.
}
md2, err := t.Update(ctx,
bigquery.TableMetadataToUpdate{Name: "new " + md.Name},
md.ETag)
if err != nil {
// TODO: Handle error.
}
fmt.Println(md2)
}

// To perform a blind write, ignoring the existing state (and possibly overwriting
// other updates), pass the empty string as the etag.
func ExampleTable_Update_blindWrite() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
t := client.Dataset("my_dataset").Table("my_table")
tm, err := t.Update(ctx, bigquery.TableMetadataToUpdate{
Description: "my favorite table",
}, "")
if err != nil {
// TODO: Handle error.
}
fmt.Println(tm)
}

func ExampleTableIterator_Next() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
it := client.Dataset("my_dataset").Tables(ctx)
for {
t, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
fmt.Println(t)
}
}

type Item struct {
Name string
Size float64
Count int
}

// Save implements the ValueSaver interface.
func (i *Item) Save() (map[string]bigquery.Value, string, error) {
return map[string]bigquery.Value{
"Name": i.Name,
"Size": i.Size,
"Count": i.Count,
}, "", nil
}

func ExampleInserter_Put() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
ins := client.Dataset("my_dataset").Table("my_table").Inserter()
// Item implements the ValueSaver interface.
items := []*Item{
{Name: "n1", Size: 32.6, Count: 7},
{Name: "n2", Size: 4, Count: 2},
{Name: "n3", Size: 101.5, Count: 1},
}
if err := ins.Put(ctx, items); err != nil {
// TODO: Handle error.
}
}

var schema bigquery.Schema

func ExampleInserter_Put_structSaver() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
ins := client.Dataset("my_dataset").Table("my_table").Inserter()

type score struct {
Name string
Num int
}

// Assume schema holds the table's schema.
savers := []*bigquery.StructSaver{
{Struct: score{Name: "n1", Num: 12}, Schema: schema, InsertID: "id1"},
{Struct: score{Name: "n2", Num: 31}, Schema: schema, InsertID: "id2"},
{Struct: score{Name: "n3", Num: 7}, Schema: schema, InsertID: "id3"},
}
if err := ins.Put(ctx, savers); err != nil {
// TODO: Handle error.
}
}

func ExampleInserter_Put_struct() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
ins := client.Dataset("my_dataset").Table("my_table").Inserter()

type score struct {
Name string
Num int
}
scores := []score{
{Name: "n1", Num: 12},
{Name: "n2", Num: 31},
{Name: "n3", Num: 7},
}
// Schema is inferred from the score type.
if err := ins.Put(ctx, scores); err != nil {
// TODO: Handle error.
}
}

func ExampleInserter_Put_valuesSaver() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}

ins := client.Dataset("my_dataset").Table("my_table").Inserter()

var vss []*bigquery.ValuesSaver
for i, name := range []string{"n1", "n2", "n3"} {
// Assume schema holds the table's schema.
vss = append(vss, &bigquery.ValuesSaver{
Schema: schema,
InsertID: name,
Row: []bigquery.Value{name, int64(i)},
})
}

if err := ins.Put(ctx, vss); err != nil {
// TODO: Handle error.
}
}

+ 0
- 400
vendor/cloud.google.com/go/bigquery/external.go View File

@@ -1,400 +0,0 @@
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package bigquery

import (
"encoding/base64"
"unicode/utf8"

bq "google.golang.org/api/bigquery/v2"
)

// DataFormat describes the format of BigQuery table data.
type DataFormat string

// Constants describing the format of BigQuery table data.
const (
CSV DataFormat = "CSV"
Avro DataFormat = "AVRO"
JSON DataFormat = "NEWLINE_DELIMITED_JSON"
DatastoreBackup DataFormat = "DATASTORE_BACKUP"
GoogleSheets DataFormat = "GOOGLE_SHEETS"
Bigtable DataFormat = "BIGTABLE"
Parquet DataFormat = "PARQUET"
ORC DataFormat = "ORC"
)

// ExternalData is a table which is stored outside of BigQuery. It is implemented by
// *ExternalDataConfig.
// GCSReference also implements it, for backwards compatibility.
type ExternalData interface {
toBQ() bq.ExternalDataConfiguration
}

// ExternalDataConfig describes data external to BigQuery that can be used
// in queries and to create external tables.
type ExternalDataConfig struct {
// The format of the data. Required.
SourceFormat DataFormat

// The fully-qualified URIs that point to your
// data in Google Cloud. Required.
//
// For Google Cloud Storage URIs, each URI can contain one '*' wildcard character
// and it must come after the 'bucket' name. Size limits related to load jobs
// apply to external data sources.
//
// For Google Cloud Bigtable URIs, exactly one URI can be specified and it has be
// a fully specified and valid HTTPS URL for a Google Cloud Bigtable table.
//
// For Google Cloud Datastore backups, exactly one URI can be specified. Also,
// the '*' wildcard character is not allowed.
SourceURIs []string

// The schema of the data. Required for CSV and JSON; disallowed for the
// other formats.
Schema Schema

// Try to detect schema and format options automatically.
// Any option specified explicitly will be honored.
AutoDetect bool

// The compression type of the data.
Compression Compression

// IgnoreUnknownValues causes values not matching the schema to be
// tolerated. Unknown values are ignored. For CSV this ignores extra values
// at the end of a line. For JSON this ignores named values that do not
// match any column name. If this field is not set, records containing
// unknown values are treated as bad records. The MaxBadRecords field can
// be used to customize how bad records are handled.
IgnoreUnknownValues bool

// MaxBadRecords is the maximum number of bad records that will be ignored
// when reading data.
MaxBadRecords int64

// Additional options for CSV, GoogleSheets and Bigtable formats.
Options ExternalDataConfigOptions
}

func (e *ExternalDataConfig) toBQ() bq.ExternalDataConfiguration {
q := bq.ExternalDataConfiguration{
SourceFormat: string(e.SourceFormat),
SourceUris: e.SourceURIs,
Autodetect: e.AutoDetect,
Compression: string(e.Compression),
IgnoreUnknownValues: e.IgnoreUnknownValues,
MaxBadRecords: e.MaxBadRecords,
}
if e.Schema != nil {
q.Schema = e.Schema.toBQ()
}
if e.Options != nil {
e.Options.populateExternalDataConfig(&q)
}
return q
}

func bqToExternalDataConfig(q *bq.ExternalDataConfiguration) (*ExternalDataConfig, error) {
e := &ExternalDataConfig{
SourceFormat: DataFormat(q.SourceFormat),
SourceURIs: q.SourceUris,
AutoDetect: q.Autodetect,
Compression: Compression(q.Compression),
IgnoreUnknownValues: q.IgnoreUnknownValues,
MaxBadRecords: q.MaxBadRecords,
Schema: bqToSchema(q.Schema),
}
switch {
case q.CsvOptions != nil:
e.Options = bqToCSVOptions(q.CsvOptions)
case q.GoogleSheetsOptions != nil:
e.Options = bqToGoogleSheetsOptions(q.GoogleSheetsOptions)
case q.BigtableOptions != nil:
var err error
e.Options, err = bqToBigtableOptions(q.BigtableOptions)
if err != nil {
return nil, err
}
}
return e, nil
}

// ExternalDataConfigOptions are additional options for external data configurations.
// This interface is implemented by CSVOptions, GoogleSheetsOptions and BigtableOptions.
type ExternalDataConfigOptions interface {
populateExternalDataConfig(*bq.ExternalDataConfiguration)
}

// CSVOptions are additional options for CSV external data sources.
type CSVOptions struct {
// AllowJaggedRows causes missing trailing optional columns to be tolerated
// when reading CSV data. Missing values are treated as nulls.
AllowJaggedRows bool

// AllowQuotedNewlines sets whether quoted data sections containing
// newlines are allowed when reading CSV data.
AllowQuotedNewlines bool

// Encoding is the character encoding of data to be read.
Encoding Encoding

// FieldDelimiter is the separator for fields in a CSV file, used when
// reading or exporting data. The default is ",".
FieldDelimiter string

// Quote is the value used to quote data sections in a CSV file. The
// default quotation character is the double quote ("), which is used if
// both Quote and ForceZeroQuote are unset.
// To specify that no character should be interpreted as a quotation
// character, set ForceZeroQuote to true.
// Only used when reading data.
Quote string
ForceZeroQuote bool

// The number of rows at the top of a CSV file that BigQuery will skip when
// reading data.
SkipLeadingRows int64
}

func (o *CSVOptions) populateExternalDataConfig(c *bq.ExternalDataConfiguration) {
c.CsvOptions = &bq.CsvOptions{
AllowJaggedRows: o.AllowJaggedRows,
AllowQuotedNewlines: o.AllowQuotedNewlines,
Encoding: string(o.Encoding),
FieldDelimiter: o.FieldDelimiter,
Quote: o.quote(),
SkipLeadingRows: o.SkipLeadingRows,
}
}

// quote returns the CSV quote character, or nil if unset.
func (o *CSVOptions) quote() *string {
if o.ForceZeroQuote {
quote := ""
return &quote
}
if o.Quote == "" {
return nil
}
return &o.Quote
}

func (o *CSVOptions) setQuote(ps *string) {
if ps != nil {
o.Quote = *ps
if o.Quote == "" {
o.ForceZeroQuote = true
}
}
}

func bqToCSVOptions(q *bq.CsvOptions) *CSVOptions {
o := &CSVOptions{
AllowJaggedRows: q.AllowJaggedRows,
AllowQuotedNewlines: q.AllowQuotedNewlines,
Encoding: Encoding(q.Encoding),
FieldDelimiter: q.FieldDelimiter,
SkipLeadingRows: q.SkipLeadingRows,
}
o.setQuote(q.Quote)
return o
}

// GoogleSheetsOptions are additional options for GoogleSheets external data sources.
type GoogleSheetsOptions struct {
// The number of rows at the top of a sheet that BigQuery will skip when
// reading data.
SkipLeadingRows int64
}

func (o *GoogleSheetsOptions) populateExternalDataConfig(c *bq.ExternalDataConfiguration) {
c.GoogleSheetsOptions = &bq.GoogleSheetsOptions{
SkipLeadingRows: o.SkipLeadingRows,
}
}

func bqToGoogleSheetsOptions(q *bq.GoogleSheetsOptions) *GoogleSheetsOptions {
return &GoogleSheetsOptions{
SkipLeadingRows: q.SkipLeadingRows,
}
}

// BigtableOptions are additional options for Bigtable external data sources.
type BigtableOptions struct {
// A list of column families to expose in the table schema along with their
// types. If omitted, all column families are present in the table schema and
// their values are read as BYTES.
ColumnFamilies []*BigtableColumnFamily

// If true, then the column families that are not specified in columnFamilies
// list are not exposed in the table schema. Otherwise, they are read with BYTES
// type values. The default is false.
IgnoreUnspecifiedColumnFamilies bool

// If true, then the rowkey column families will be read and converted to string.
// Otherwise they are read with BYTES type values and users need to manually cast
// them with CAST if necessary. The default is false.
ReadRowkeyAsString bool
}

func (o *BigtableOptions) populateExternalDataConfig(c *bq.ExternalDataConfiguration) {
q := &bq.BigtableOptions{
IgnoreUnspecifiedColumnFamilies: o.IgnoreUnspecifiedColumnFamilies,
ReadRowkeyAsString: o.ReadRowkeyAsString,
}
for _, f := range o.ColumnFamilies {
q.ColumnFamilies = append(q.ColumnFamilies, f.toBQ())
}
c.BigtableOptions = q
}

func bqToBigtableOptions(q *bq.BigtableOptions) (*BigtableOptions, error) {
b := &BigtableOptions{
IgnoreUnspecifiedColumnFamilies: q.IgnoreUnspecifiedColumnFamilies,
ReadRowkeyAsString: q.ReadRowkeyAsString,
}
for _, f := range q.ColumnFamilies {
f2, err := bqToBigtableColumnFamily(f)
if err != nil {
return nil, err
}
b.ColumnFamilies = append(b.ColumnFamilies, f2)
}
return b, nil
}

// BigtableColumnFamily describes how BigQuery should access a Bigtable column family.
type BigtableColumnFamily struct {
// Identifier of the column family.
FamilyID string

// Lists of columns that should be exposed as individual fields as opposed to a
// list of (column name, value) pairs. All columns whose qualifier matches a
// qualifier in this list can be accessed as .. Other columns can be accessed as
// a list through .Column field.
Columns []*BigtableColumn

// The encoding of the values when the type is not STRING. Acceptable encoding values are:
// - TEXT - indicates values are alphanumeric text strings.
// - BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions.
// This can be overridden for a specific column by listing that column in 'columns' and
// specifying an encoding for it.
Encoding string

// If true, only the latest version of values are exposed for all columns in this
// column family. This can be overridden for a specific column by listing that
// column in 'columns' and specifying a different setting for that column.
OnlyReadLatest bool

// The type to convert the value in cells of this
// column family. The values are expected to be encoded using HBase
// Bytes.toBytes function when using the BINARY encoding value.
// Following BigQuery types are allowed (case-sensitive):
// BYTES STRING INTEGER FLOAT BOOLEAN.
// The default type is BYTES. This can be overridden for a specific column by
// listing that column in 'columns' and specifying a type for it.
Type string
}

func (b *BigtableColumnFamily) toBQ() *bq.BigtableColumnFamily {
q := &bq.BigtableColumnFamily{
FamilyId: b.FamilyID,
Encoding: b.Encoding,
OnlyReadLatest: b.OnlyReadLatest,
Type: b.Type,
}
for _, col := range b.Columns {
q.Columns = append(q.Columns, col.toBQ())
}
return q
}

func bqToBigtableColumnFamily(q *bq.BigtableColumnFamily) (*BigtableColumnFamily, error) {
b := &BigtableColumnFamily{
FamilyID: q.FamilyId,
Encoding: q.Encoding,
OnlyReadLatest: q.OnlyReadLatest,
Type: q.Type,
}
for _, col := range q.Columns {
c, err := bqToBigtableColumn(col)
if err != nil {
return nil, err
}
b.Columns = append(b.Columns, c)
}
return b, nil
}

// BigtableColumn describes how BigQuery should access a Bigtable column.
type BigtableColumn struct {
// Qualifier of the column. Columns in the parent column family that have this
// exact qualifier are exposed as . field. The column field name is the
// same as the column qualifier.
Qualifier string

// If the qualifier is not a valid BigQuery field identifier i.e. does not match
// [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as the column field
// name and is used as field name in queries.
FieldName string

// If true, only the latest version of values are exposed for this column.
// See BigtableColumnFamily.OnlyReadLatest.
OnlyReadLatest bool

// The encoding of the values when the type is not STRING.
// See BigtableColumnFamily.Encoding
Encoding string

// The type to convert the value in cells of this column.
// See BigtableColumnFamily.Type
Type string
}

func (b *BigtableColumn) toBQ() *bq.BigtableColumn {
q := &bq.BigtableColumn{
FieldName: b.FieldName,
OnlyReadLatest: b.OnlyReadLatest,
Encoding: b.Encoding,
Type: b.Type,
}
if utf8.ValidString(b.Qualifier) {
q.QualifierString = b.Qualifier
} else {
q.QualifierEncoded = base64.RawStdEncoding.EncodeToString([]byte(b.Qualifier))
}
return q
}

func bqToBigtableColumn(q *bq.BigtableColumn) (*BigtableColumn, error) {
b := &BigtableColumn{
FieldName: q.FieldName,
OnlyReadLatest: q.OnlyReadLatest,
Encoding: q.Encoding,
Type: q.Type,
}
if q.QualifierString != "" {
b.Qualifier = q.QualifierString
} else {
bytes, err := base64.RawStdEncoding.DecodeString(q.QualifierEncoded)
if err != nil {
return nil, err
}
b.Qualifier = string(bytes)
}
return b, nil
}

+ 0
- 143
vendor/cloud.google.com/go/bigquery/external_test.go View File

@@ -1,143 +0,0 @@
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package bigquery

import (
"testing"

"cloud.google.com/go/internal/pretty"
"cloud.google.com/go/internal/testutil"
)

func TestExternalDataConfig(t *testing.T) {
// Round-trip of ExternalDataConfig to underlying representation.
for i, want := range []*ExternalDataConfig{
{
SourceFormat: CSV,
SourceURIs: []string{"uri"},
Schema: Schema{{Name: "n", Type: IntegerFieldType}},
AutoDetect: true,
Compression: Gzip,
IgnoreUnknownValues: true,
MaxBadRecords: 17,
Options: &CSVOptions{
AllowJaggedRows: true,
AllowQuotedNewlines: true,
Encoding: UTF_8,
FieldDelimiter: "f",
Quote: "q",
SkipLeadingRows: 3,
},
},
{
SourceFormat: GoogleSheets,
Options: &GoogleSheetsOptions{SkipLeadingRows: 4},
},
{
SourceFormat: Bigtable,
Options: &BigtableOptions{
IgnoreUnspecifiedColumnFamilies: true,
ReadRowkeyAsString: true,
ColumnFamilies: []*BigtableColumnFamily{
{
FamilyID: "f1",
Encoding: "TEXT",
OnlyReadLatest: true,
Type: "FLOAT",
Columns: []*BigtableColumn{
{
Qualifier: "valid-utf-8",
FieldName: "fn",
OnlyReadLatest: true,
Encoding: "BINARY",
Type: "STRING",
},
},
},
},
},
},
} {
q := want.toBQ()
got, err := bqToExternalDataConfig(&q)
if err != nil {
t.Fatal(err)
}
if diff := testutil.Diff(got, want); diff != "" {
t.Errorf("#%d: got=-, want=+:\n%s", i, diff)
}
}
}

func TestQuote(t *testing.T) {
ptr := func(s string) *string { return &s }

for _, test := range []struct {
quote string
force bool
want *string
}{
{"", false, nil},
{"", true, ptr("")},
{"-", false, ptr("-")},
{"-", true, ptr("")},
} {
o := CSVOptions{
Quote: test.quote,
ForceZeroQuote: test.force,
}
got := o.quote()
if (got == nil) != (test.want == nil) {
t.Errorf("%+v\ngot %v\nwant %v", test, pretty.Value(got), pretty.Value(test.want))
}
if got != nil && test.want != nil && *got != *test.want {
t.Errorf("%+v: got %q, want %q", test, *got, *test.want)
}
}
}

func TestQualifier(t *testing.T) {
b := BigtableColumn{Qualifier: "a"}
q := b.toBQ()
if q.QualifierString != b.Qualifier || q.QualifierEncoded != "" {
t.Errorf("got (%q, %q), want (%q, %q)",
q.QualifierString, q.QualifierEncoded, b.Qualifier, "")
}
b2, err := bqToBigtableColumn(q)
if err != nil {
t.Fatal(err)
}
if got, want := b2.Qualifier, b.Qualifier; got != want {
t.Errorf("got %q, want %q", got, want)
}

const (
invalidUTF8 = "\xDF\xFF"
invalidEncoded = "3/8"
)
b = BigtableColumn{Qualifier: invalidUTF8}
q = b.toBQ()
if q.QualifierString != "" || q.QualifierEncoded != invalidEncoded {
t.Errorf("got (%q, %q), want (%q, %q)",
q.QualifierString, "", b.Qualifier, invalidEncoded)
}
b2, err = bqToBigtableColumn(q)
if err != nil {
t.Fatal(err)
}
if got, want := b2.Qualifier, b.Qualifier; got != want {
t.Errorf("got %q, want %q", got, want)
}
}

+ 0
- 110
vendor/cloud.google.com/go/bigquery/extract.go View File

@@ -1,110 +0,0 @@
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package bigquery

import (
"context"

"cloud.google.com/go/internal/trace"
bq "google.golang.org/api/bigquery/v2"
)

// ExtractConfig holds the configuration for an extract job.
type ExtractConfig struct {
// Src is the table from which data will be extracted.
Src *Table

// Dst is the destination into which the data will be extracted.
Dst *GCSReference

// DisableHeader disables the printing of a header row in exported data.
DisableHeader bool

// The labels associated with this job.
Labels map[string]string
}

func (e *ExtractConfig) toBQ() *bq.JobConfiguration {
var printHeader *bool
if e.DisableHeader {
f := false
printHeader = &f
}
return &bq.JobConfiguration{
Labels: e.Labels,
Extract: &bq.JobConfigurationExtract{
DestinationUris: append([]string{}, e.Dst.URIs...),
Compression: string(e.Dst.Compression),
DestinationFormat: string(e.Dst.DestinationFormat),
FieldDelimiter: e.Dst.FieldDelimiter,
SourceTable: e.Src.toBQ(),
PrintHeader: printHeader,
},
}
}

func bqToExtractConfig(q *bq.JobConfiguration, c *Client) *ExtractConfig {
qe := q.Extract
return &ExtractConfig{
Labels: q.Labels,
Dst: &GCSReference{
URIs: qe.DestinationUris,
Compression: Compression(qe.Compression),
DestinationFormat: DataFormat(qe.DestinationFormat),
FileConfig: FileConfig{
CSVOptions: CSVOptions{
FieldDelimiter: qe.FieldDelimiter,
},
},
},
DisableHeader: qe.PrintHeader != nil && !*qe.PrintHeader,
Src: bqToTable(qe.SourceTable, c),
}
}

// An Extractor extracts data from a BigQuery table into Google Cloud Storage.
type Extractor struct {
JobIDConfig
ExtractConfig
c *Client
}

// ExtractorTo returns an Extractor which can be used to extract data from a
// BigQuery table into Google Cloud Storage.
// The returned Extractor may optionally be further configured before its Run method is called.
func (t *Table) ExtractorTo(dst *GCSReference) *Extractor {
return &Extractor{
c: t.c,
ExtractConfig: ExtractConfig{
Src: t,
Dst: dst,
},
}
}

// Run initiates an extract job.
func (e *Extractor) Run(ctx context.Context) (j *Job, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Extractor.Run")
defer func() { trace.EndSpan(ctx, err) }()

return e.c.insertJob(ctx, e.newJob(), nil)
}

func (e *Extractor) newJob() *bq.Job {
return &bq.Job{
JobReference: e.JobIDConfig.createJobRef(e.c),
Configuration: e.ExtractConfig.toBQ(),
}
}

+ 0
- 116
vendor/cloud.google.com/go/bigquery/extract_test.go View File

@@ -1,116 +0,0 @@
// Copyright 2015 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package bigquery

import (
"testing"

"cloud.google.com/go/internal/testutil"
"github.com/google/go-cmp/cmp"
bq "google.golang.org/api/bigquery/v2"
)

func defaultExtractJob() *bq.Job {
return &bq.Job{
JobReference: &bq.JobReference{JobId: "RANDOM", ProjectId: "client-project-id"},
Configuration: &bq.JobConfiguration{
Extract: &bq.JobConfigurationExtract{
SourceTable: &bq.TableReference{
ProjectId: "client-project-id",
DatasetId: "dataset-id",
TableId: "table-id",
},
DestinationUris: []string{"uri"},
},
},
}
}

func defaultGCS() *GCSReference {
return &GCSReference{
URIs: []string{"uri"},
}
}

func TestExtract(t *testing.T) {
defer fixRandomID("RANDOM")()
c := &Client{
projectID: "client-project-id",
}

testCases := []struct {
dst *GCSReference
src *Table
config ExtractConfig
want *bq.Job
}{
{
dst: defaultGCS(),
src: c.Dataset("dataset-id").Table("table-id"),
want: defaultExtractJob(),
},
{
dst: defaultGCS(),
src: c.Dataset("dataset-id").Table("table-id"),
config: ExtractConfig{
DisableHeader: true,
Labels: map[string]string{"a": "b"},
},
want: func() *bq.Job {
j := defaultExtractJob()
j.Configuration.Labels = map[string]string{"a": "b"}
f := false
j.Configuration.Extract.PrintHeader = &f
return j
}(),
},
{
dst: func() *GCSReference {
g := NewGCSReference("uri")
g.Compression = Gzip
g.DestinationFormat = JSON
g.FieldDelimiter = "\t"
return g
}(),
src: c.Dataset("dataset-id").Table("table-id"),
want: func() *bq.Job {
j := defaultExtractJob()
j.Configuration.Extract.Compression = "GZIP"
j.Configuration.Extract.DestinationFormat = "NEWLINE_DELIMITED_JSON"
j.Configuration.Extract.FieldDelimiter = "\t"
return j
}(),
},
}

for i, tc := range testCases {
ext := tc.src.ExtractorTo(tc.dst)
tc.config.Src = ext.Src
tc.config.Dst = ext.Dst
ext.ExtractConfig = tc.config
got := ext.newJob()
checkJob(t, i, got, tc.want)

jc, err := bqToJobConfig(got.Configuration, c)
if err != nil {
t.Fatalf("#%d: %v", i, err)
}
diff := testutil.Diff(jc, &ext.ExtractConfig,
cmp.AllowUnexported(Table{}, Client{}))
if diff != "" {
t.Errorf("#%d: (got=-, want=+:\n%s", i, diff)
}
}
}

+ 0
- 137
vendor/cloud.google.com/go/bigquery/file.go View File

@@ -1,137 +0,0 @@
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package bigquery

import (
"io"

bq "google.golang.org/api/bigquery/v2"
)

// A ReaderSource is a source for a load operation that gets
// data from an io.Reader.
//
// When a ReaderSource is part of a LoadConfig obtained via Job.Config,
// its internal io.Reader will be nil, so it cannot be used for a
// subsequent load operation.
type ReaderSource struct {
r io.Reader
FileConfig
}

// NewReaderSource creates a ReaderSource from an io.Reader. You may
// optionally configure properties on the ReaderSource that describe the
// data being read, before passing it to Table.LoaderFrom.
func NewReaderSource(r io.Reader) *ReaderSource {
return &ReaderSource{r: r}
}

func (r *ReaderSource) populateLoadConfig(lc *bq.JobConfigurationLoad) io.Reader {
r.FileConfig.populateLoadConfig(lc)
return r.r
}

// FileConfig contains configuration options that pertain to files, typically
// text files that require interpretation to be used as a BigQuery table. A
// file may live in Google Cloud Storage (see GCSReference), or it may be
// loaded into a table via the Table.LoaderFromReader.
type FileConfig struct {
// SourceFormat is the format of the data to be read.
// Allowed values are: Avro, CSV, DatastoreBackup, JSON, ORC, and Parquet. The default is CSV.
SourceFormat DataFormat

// Indicates if we should automatically infer the options and
// schema for CSV and JSON sources.
AutoDetect bool

// MaxBadRecords is the maximum number of bad records that will be ignored
// when reading data.
MaxBadRecords int64

// IgnoreUnknownValues causes values not matching the schema to be
// tolerated. Unknown values are ignored. For CSV this ignores extra values
// at the end of a line. For JSON this ignores named values that do not
// match any column name. If this field is not set, records containing
// unknown values are treated as bad records. The MaxBadRecords field can
// be used to customize how bad records are handled.
IgnoreUnknownValues bool

// Schema describes the data. It is required when reading CSV or JSON data,
// unless the data is being loaded into a table that already exists.
Schema Schema

// Additional options for CSV files.
CSVOptions
}

func (fc *FileConfig) populateLoadConfig(conf *bq.JobConfigurationLoad) {
conf.SkipLeadingRows = fc.SkipLeadingRows
conf.SourceFormat = string(fc.SourceFormat)
conf.Autodetect = fc.AutoDetect
conf.AllowJaggedRows = fc.AllowJaggedRows
conf.AllowQuotedNewlines = fc.AllowQuotedNewlines
conf.Encoding = string(fc.Encoding)
conf.FieldDelimiter = fc.FieldDelimiter
conf.IgnoreUnknownValues = fc.IgnoreUnknownValues
conf.MaxBadRecords = fc.MaxBadRecords
if fc.Schema != nil {
conf.Schema = fc.Schema.toBQ()
}
conf.Quote = fc.quote()
}

func bqPopulateFileConfig(conf *bq.JobConfigurationLoad, fc *FileConfig) {
fc.SourceFormat = DataFormat(conf.SourceFormat)
fc.AutoDetect = conf.Autodetect
fc.MaxBadRecords = conf.MaxBadRecords
fc.IgnoreUnknownValues = conf.IgnoreUnknownValues
fc.Schema = bqToSchema(conf.Schema)
fc.SkipLeadingRows = conf.SkipLeadingRows
fc.AllowJaggedRows = conf.AllowJaggedRows
fc.AllowQuotedNewlines = conf.AllowQuotedNewlines
fc.Encoding = Encoding(conf.Encoding)
fc.FieldDelimiter = conf.FieldDelimiter
fc.CSVOptions.setQuote(conf.Quote)
}

func (fc *FileConfig) populateExternalDataConfig(conf *bq.ExternalDataConfiguration) {
format := fc.SourceFormat
if format == "" {
// Format must be explicitly set for external data sources.
format = CSV
}
conf.Autodetect = fc.AutoDetect
conf.IgnoreUnknownValues = fc.IgnoreUnknownValues
conf.MaxBadRecords = fc.MaxBadRecords
conf.SourceFormat = string(format)
if fc.Schema != nil {
conf.Schema = fc.Schema.toBQ()
}
if format == CSV {
fc.CSVOptions.populateExternalDataConfig(conf)
}
}

// Encoding specifies the character encoding of data to be loaded into BigQuery.
// See https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.encoding
// for more details about how this is used.
type Encoding string

const (
// UTF_8 specifies the UTF-8 encoding type.
UTF_8 Encoding = "UTF-8"
// ISO_8859_1 specifies the ISO-8859-1 encoding type.
ISO_8859_1 Encoding = "ISO-8859-1"
)

+ 0
- 98
vendor/cloud.google.com/go/bigquery/file_test.go View File

@@ -1,98 +0,0 @@
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package bigquery

import (
"testing"

"cloud.google.com/go/internal/pretty"
"cloud.google.com/go/internal/testutil"
bq "google.golang.org/api/bigquery/v2"
)

var (
hyphen = "-"
fc = FileConfig{
SourceFormat: CSV,
AutoDetect: true,
MaxBadRecords: 7,
IgnoreUnknownValues: true,
Schema: Schema{
stringFieldSchema(),
nestedFieldSchema(),
},
CSVOptions: CSVOptions{
Quote: hyphen,
FieldDelimiter: "\t",
SkipLeadingRows: 8,
AllowJaggedRows: true,
AllowQuotedNewlines: true,
Encoding: UTF_8,
},
}
)

func TestFileConfigPopulateLoadConfig(t *testing.T) {
want := &bq.JobConfigurationLoad{
SourceFormat: "CSV",
FieldDelimiter: "\t",
SkipLeadingRows: 8,
AllowJaggedRows: true,
AllowQuotedNewlines: true,
Autodetect: true,
Encoding: "UTF-8",
MaxBadRecords: 7,
IgnoreUnknownValues: true,
Schema: &bq.TableSchema{
Fields: []*bq.TableFieldSchema{
bqStringFieldSchema(),
bqNestedFieldSchema(),
}},
Quote: &hyphen,
}
got := &bq.JobConfigurationLoad{}
fc.populateLoadConfig(got)
if !testutil.Equal(got, want) {
t.Errorf("got:\n%v\nwant:\n%v", pretty.Value(got), pretty.Value(want))
}
}

func TestFileConfigPopulateExternalDataConfig(t *testing.T) {
got := &bq.ExternalDataConfiguration{}
fc.populateExternalDataConfig(got)

want := &bq.ExternalDataConfiguration{
SourceFormat: "CSV",
Autodetect: true,
MaxBadRecords: 7,
IgnoreUnknownValues: true,
Schema: &bq.TableSchema{
Fields: []*bq.TableFieldSchema{
bqStringFieldSchema(),
bqNestedFieldSchema(),
}},
CsvOptions: &bq.CsvOptions{
AllowJaggedRows: true,
AllowQuotedNewlines: true,
Encoding: "UTF-8",
FieldDelimiter: "\t",
Quote: &hyphen,
SkipLeadingRows: 8,
},
}
if diff := testutil.Diff(got, want); diff != "" {
t.Errorf("got=-, want=+:\n%s", diff)
}
}

+ 0
- 75
vendor/cloud.google.com/go/bigquery/gcs.go View File

@@ -1,75 +0,0 @@
// Copyright 2015 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package bigquery

import (
"io"

bq "google.golang.org/api/bigquery/v2"
)

// GCSReference is a reference to one or more Google Cloud Storage objects, which together constitute
// an input or output to a BigQuery operation.
type GCSReference struct {
// URIs refer to Google Cloud Storage objects.
URIs []string

FileConfig

// DestinationFormat is the format to use when writing exported files.
// Allowed values are: CSV, Avro, JSON. The default is CSV.
// CSV is not supported for tables with nested or repeated fields.
DestinationFormat DataFormat

// Compression specifies the type of compression to apply when writing data
// to Google Cloud Storage, or using this GCSReference as an ExternalData
// source with CSV or JSON SourceFormat. Default is None.
Compression Compression
}

// NewGCSReference constructs a reference to one or more Google Cloud Storage objects, which together constitute a data source or destination.
// In the simple case, a single URI in the form gs://bucket/object may refer to a single GCS object.
// Data may also be split into mutiple files, if multiple URIs or URIs containing wildcards are provided.
// Each URI may contain one '*' wildcard character, which (if present) must come after the bucket name.
// For more information about the treatment of wildcards and multiple URIs,
// see https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple
func NewGCSReference(uri ...string) *GCSReference {
return &GCSReference{URIs: uri}
}

// Compression is the type of compression to apply when writing data to Google Cloud Storage.
type Compression string

const (
// None specifies no compression.
None Compression = "NONE"
// Gzip specifies gzip compression.
Gzip Compression = "GZIP"
)

func (gcs *GCSReference) populateLoadConfig(lc *bq.JobConfigurationLoad) io.Reader {
lc.SourceUris = gcs.URIs
gcs.FileConfig.populateLoadConfig(lc)
return nil
}

func (gcs *GCSReference) toBQ() bq.ExternalDataConfiguration {
conf := bq.ExternalDataConfiguration{
Compression: string(gcs.Compression),
SourceUris: append([]string{}, gcs.URIs...),
}
gcs.FileConfig.populateExternalDataConfig(&conf)
return conf
}

+ 0
- 238
vendor/cloud.google.com/go/bigquery/inserter.go View File

@@ -1,238 +0,0 @@
// Copyright 2015 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package bigquery

import (
"context"
"errors"
"fmt"
"reflect"

"cloud.google.com/go/internal/trace"
bq "google.golang.org/api/bigquery/v2"
)

// An Inserter does streaming inserts into a BigQuery table.
// It is safe for concurrent use.
type Inserter struct {
t *Table

// SkipInvalidRows causes rows containing invalid data to be silently
// ignored. The default value is false, which causes the entire request to
// fail if there is an attempt to insert an invalid row.
SkipInvalidRows bool

// IgnoreUnknownValues causes values not matching the schema to be ignored.
// The default value is false, which causes records containing such values
// to be treated as invalid records.
IgnoreUnknownValues bool

// A TableTemplateSuffix allows Inserters to create tables automatically.
//
// Experimental: this option is experimental and may be modified or removed in future versions,
// regardless of any other documented package stability guarantees.
//
// When you specify a suffix, the table you upload data to
// will be used as a template for creating a new table, with the same schema,
// called <table> + <suffix>.
//
// More information is available at
// https://cloud.google.com/bigquery/streaming-data-into-bigquery#template-tables
TableTemplateSuffix string
}

// Inserter returns an Inserter that can be used to append rows to t.
// The returned Inserter may optionally be further configured before its Put method is called.
//
// To stream rows into a date-partitioned table at a particular date, add the
// $yyyymmdd suffix to the table name when constructing the Table.
func (t *Table) Inserter() *Inserter {
return &Inserter{t: t}
}

// Uploader calls Inserter.
// Deprecated: use Table.Inserter instead.
func (t *Table) Uploader() *Inserter { return t.Inserter() }

// Put uploads one or more rows to the BigQuery service.
//
// If src is ValueSaver, then its Save method is called to produce a row for uploading.
//
// If src is a struct or pointer to a struct, then a schema is inferred from it
// and used to create a StructSaver. The InsertID of the StructSaver will be
// empty.
//
// If src is a slice of ValueSavers, structs, or struct pointers, then each
// element of the slice is treated as above, and multiple rows are uploaded.
//
// Put returns a PutMultiError if one or more rows failed to be uploaded.
// The PutMultiError contains a RowInsertionError for each failed row.
//
// Put will retry on temporary errors (see
// https://cloud.google.com/bigquery/troubleshooting-errors). This can result
// in duplicate rows if you do not use insert IDs. Also, if the error persists,
// the call will run indefinitely. Pass a context with a timeout to prevent
// hanging calls.
func (u *Inserter) Put(ctx context.Context, src interface{}) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Inserter.Put")
defer func() { trace.EndSpan(ctx, err) }()

savers, err := valueSavers(src)
if err != nil {
return err
}
return u.putMulti(ctx, savers)
}

func valueSavers(src interface{}) ([]ValueSaver, error) {
saver, ok, err := toValueSaver(src)
if err != nil {
return nil, err
}
if ok {
return []ValueSaver{saver}, nil
}
srcVal := reflect.ValueOf(src)
if srcVal.Kind() != reflect.Slice {
return nil, fmt.Errorf("%T is not a ValueSaver, struct, struct pointer, or slice", src)

}
var savers []ValueSaver
for i := 0; i < srcVal.Len(); i++ {
s := srcVal.Index(i).Interface()
saver, ok, err := toValueSaver(s)
if err != nil {
return nil, err
}
if !ok {
return nil, fmt.Errorf("src[%d] has type %T, which is not a ValueSaver, struct or struct pointer", i, s)
}
savers = append(savers, saver)
}
return savers, nil
}

// Make a ValueSaver from x, which must implement ValueSaver already
// or be a struct or pointer to struct.
func toValueSaver(x interface{}) (ValueSaver, bool, error) {
if _, ok := x.(StructSaver); ok {
return nil, false, errors.New("bigquery: use &StructSaver, not StructSaver")
}
var insertID string
// Handle StructSavers specially so we can infer the schema if necessary.
if ss, ok := x.(*StructSaver); ok && ss.Schema == nil {
x = ss.Struct
insertID = ss.InsertID
// Fall through so we can infer the schema.
}
if saver, ok := x.(ValueSaver); ok {
return saver, ok, nil
}
v := reflect.ValueOf(x)
// Support Put with []interface{}
if v.Kind() == reflect.Interface {
v = v.Elem()
}
if v.Kind() == reflect.Ptr {
v = v.Elem()
}
if v.Kind() != reflect.Struct {
return nil, false, nil
}
schema, err := inferSchemaReflectCached(v.Type())
if err != nil {
return nil, false, err
}
return &StructSaver{
Struct: x,
InsertID: insertID,
Schema: schema,
}, true, nil
}

func (u *Inserter) putMulti(ctx context.Context, src []ValueSaver) error {
req, err := u.newInsertRequest(src)
if err != nil {
return err
}
if req == nil {
return nil
}
call := u.t.c.bqs.Tabledata.InsertAll(u.t.ProjectID, u.t.DatasetID, u.t.TableID, req)
call = call.Context(ctx)
setClientHeader(call.Header())
var res *bq.TableDataInsertAllResponse
err = runWithRetry(ctx, func() (err error) {
res, err = call.Do()
return err
})
if err != nil {
return err
}
return handleInsertErrors(res.InsertErrors, req.Rows)
}

func (u *Inserter) newInsertRequest(savers []ValueSaver) (*bq.TableDataInsertAllRequest, error) {
if savers == nil { // If there are no rows, do nothing.
return nil, nil
}
req := &bq.TableDataInsertAllRequest{
TemplateSuffix: u.TableTemplateSuffix,
IgnoreUnknownValues: u.IgnoreUnknownValues,
SkipInvalidRows: u.SkipInvalidRows,
}
for _, saver := range savers {
row, insertID, err := saver.Save()
if err != nil {
return nil, err
}
if insertID == "" {
insertID = randomIDFn()
}
m := make(map[string]bq.JsonValue)
for k, v := range row {
m[k] = bq.JsonValue(v)
}
req.Rows = append(req.Rows, &bq.TableDataInsertAllRequestRows{
InsertId: insertID,
Json: m,
})
}
return req, nil
}

func handleInsertErrors(ierrs []*bq.TableDataInsertAllResponseInsertErrors, rows []*bq.TableDataInsertAllRequestRows) error {
if len(ierrs) == 0 {
return nil
}
var errs PutMultiError
for _, e := range ierrs {
if int(e.Index) > len(rows) {
return fmt.Errorf("internal error: unexpected row index: %v", e.Index)
}
rie := RowInsertionError{
InsertID: rows[e.Index].InsertId,
RowIndex: int(e.Index),
}
for _, errp := range e.Errors {
rie.Errors = append(rie.Errors, bqToError(errp))
}
errs = append(errs, rie)
}
return errs
}

// Uploader is an obsolete name for Inserter.
type Uploader = Inserter

+ 0
- 210
vendor/cloud.google.com/go/bigquery/inserter_test.go View File

@@ -1,210 +0,0 @@
// Copyright 2015 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package bigquery

import (
"errors"
"strconv"
"testing"

"cloud.google.com/go/internal/pretty"
"cloud.google.com/go/internal/testutil"
"github.com/google/go-cmp/cmp"
bq "google.golang.org/api/bigquery/v2"
)

type testSaver struct {
row map[string]Value
insertID string
err error
}

func (ts testSaver) Save() (map[string]Value, string, error) {
return ts.row, ts.insertID, ts.err
}

func TestNewInsertRequest(t *testing.T) {
prev := randomIDFn
n := 0
randomIDFn = func() string { n++; return strconv.Itoa(n) }
defer func() { randomIDFn = prev }()

tests := []struct {
ul *Uploader
savers []ValueSaver
req *bq.TableDataInsertAllRequest
}{
{
ul: &Uploader{},
savers: nil,
req: nil,
},
{
ul: &Uploader{},
savers: []ValueSaver{
testSaver{row: map[string]Value{"one": 1}},
testSaver{row: map[string]Value{"two": 2}},
},
req: &bq.TableDataInsertAllRequest{
Rows: []*bq.TableDataInsertAllRequestRows{
{InsertId: "1", Json: map[string]bq.JsonValue{"one": 1}},
{InsertId: "2", Json: map[string]bq.JsonValue{"two": 2}},
},
},
},
{
ul: &Uploader{
TableTemplateSuffix: "suffix",
IgnoreUnknownValues: true,
SkipInvalidRows: true,
},
savers: []ValueSaver{
testSaver{insertID: "a", row: map[string]Value{"one": 1}},
testSaver{insertID: "", row: map[string]Value{"two": 2}},
},
req: &bq.TableDataInsertAllRequest{
Rows: []*bq.TableDataInsertAllRequestRows{
{InsertId: "a", Json: map[string]bq.JsonValue{"one": 1}},
{InsertId: "3", Json: map[string]bq.JsonValue{"two": 2}},
},
TemplateSuffix: "suffix",
SkipInvalidRows: true,
IgnoreUnknownValues: true,
},
},
}
for i, tc := range tests {
got, err := tc.ul.newInsertRequest(tc.savers)
if err != nil {
t.Fatal(err)
}
want := tc.req
if !testutil.Equal(got, want) {
t.Errorf("%d: %#v: got %#v, want %#v", i, tc.ul, got, want)
}
}
}

func TestNewInsertRequestErrors(t *testing.T) {
var u Uploader
_, err := u.newInsertRequest([]ValueSaver{testSaver{err: errors.New("bang")}})
if err == nil {
t.Error("got nil, want error")
}
}

func TestHandleInsertErrors(t *testing.T) {
rows := []*bq.TableDataInsertAllRequestRows{
{InsertId: "a"},
{InsertId: "b"},
}
for _, test := range []struct {
in []*bq.TableDataInsertAllResponseInsertErrors
want error
}{
{
in: nil,
want: nil,
},
{
in: []*bq.TableDataInsertAllResponseInsertErrors{{Index: 1}},
want: PutMultiError{RowInsertionError{InsertID: "b", RowIndex: 1}},
},
{
in: []*bq.TableDataInsertAllResponseInsertErrors{{Index: 1}},
want: PutMultiError{RowInsertionError{InsertID: "b", RowIndex: 1}},
},
{
in: []*bq.TableDataInsertAllResponseInsertErrors{
{Errors: []*bq.ErrorProto{{Message: "m0"}}, Index: 0},
{Errors: []*bq.ErrorProto{{Message: "m1"}}, Index: 1},
},
want: PutMultiError{
RowInsertionError{InsertID: "a", RowIndex: 0, Errors: []error{&Error{Message: "m0"}}},
RowInsertionError{InsertID: "b", RowIndex: 1, Errors: []error{&Error{Message: "m1"}}},
},
},
} {
got := handleInsertErrors(test.in, rows)
if !testutil.Equal(got, test.want) {
t.Errorf("%#v:\ngot\n%#v\nwant\n%#v", test.in, got, test.want)
}
}
}

func TestValueSavers(t *testing.T) {
ts := &testSaver{}
type T struct{ I int }
schema, err := InferSchema(T{})
if err != nil {
t.Fatal(err)
}
for _, test := range []struct {
in interface{}
want []ValueSaver
}{
{[]interface{}(nil), nil},
{[]interface{}{}, nil},
{ts, []ValueSaver{ts}},
{T{I: 1}, []ValueSaver{&StructSaver{Schema: schema, Struct: T{I: 1}}}},
{[]ValueSaver{ts, ts}, []ValueSaver{ts, ts}},
{[]interface{}{ts, ts}, []ValueSaver{ts, ts}},
{[]T{{I: 1}, {I: 2}}, []ValueSaver{
&StructSaver{Schema: schema, Struct: T{I: 1}},
&StructSaver{Schema: schema, Struct: T{I: 2}},
}},
{[]interface{}{T{I: 1}, &T{I: 2}}, []ValueSaver{
&StructSaver{Schema: schema, Struct: T{I: 1}},
&StructSaver{Schema: schema, Struct: &T{I: 2}},
}},
{&StructSaver{Struct: T{I: 3}, InsertID: "foo"},
[]ValueSaver{
&StructSaver{Schema: schema, Struct: T{I: 3}, InsertID: "foo"},
}},
} {
got, err := valueSavers(test.in)
if err != nil {
t.Fatal(err)
}
if !testutil.Equal(got, test.want, cmp.AllowUnexported(testSaver{})) {
t.Errorf("%+v: got %v, want %v", test.in, pretty.Value(got), pretty.Value(test.want))
}
// Make sure Save is successful.
for i, vs := range got {
_, _, err := vs.Save()
if err != nil {
t.Fatalf("%+v, #%d: got error %v, want nil", test.in, i, err)
}
}
}
}

func TestValueSaversErrors(t *testing.T) {
inputs := []interface{}{
nil,
1,
[]int{1, 2},
[]interface{}{
testSaver{row: map[string]Value{"one": 1}, insertID: "a"},
1,
},
StructSaver{},
}
for _, in := range inputs {
if _, err := valueSavers(in); err == nil {
t.Errorf("%#v: got nil, want error", in)
}
}
}

+ 0
- 2217
vendor/cloud.google.com/go/bigquery/integration_test.go
File diff suppressed because it is too large
View File


+ 0
- 222
vendor/cloud.google.com/go/bigquery/iterator.go View File

@@ -1,222 +0,0 @@
// Copyright 2015 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package bigquery

import (
"context"
"fmt"
"reflect"

bq "google.golang.org/api/bigquery/v2"
"google.golang.org/api/iterator"
)

// Construct a RowIterator.
// If pf is nil, there are no rows in the result set.
func newRowIterator(ctx context.Context, t *Table, pf pageFetcher) *RowIterator {
it := &RowIterator{
ctx: ctx,
table: t,
pf: pf,
}
if pf != nil {
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
it.fetch,
func() int { return len(it.rows) },
func() interface{} { r := it.rows; it.rows = nil; return r })
}
return it
}

// A RowIterator provides access to the result of a BigQuery lookup.
type RowIterator struct {
ctx context.Context
table *Table
pf pageFetcher
pageInfo *iterator.PageInfo
nextFunc func() error

// StartIndex can be set before the first call to Next. If PageInfo().Token
// is also set, StartIndex is ignored.
StartIndex uint64

// The schema of the table. Available after the first call to Next.
Schema Schema

// The total number of rows in the result. Available after the first call to Next.
// May be zero just after rows were inserted.
TotalRows uint64

rows [][]Value
structLoader structLoader // used to populate a pointer to a struct
}

// Next loads the next row into dst. Its return value is iterator.Done if there
// are no more results. Once Next returns iterator.Done, all subsequent calls
// will return iterator.Done.
//
// dst may implement ValueLoader, or may be a *[]Value, *map[string]Value, or struct pointer.
//
// If dst is a *[]Value, it will be set to new []Value whose i'th element
// will be populated with the i'th column of the row.
//
// If dst is a *map[string]Value, a new map will be created if dst is nil. Then
// for each schema column name, the map key of that name will be set to the column's
// value. STRUCT types (RECORD types or nested schemas) become nested maps.
//
// If dst is pointer to a struct, each column in the schema will be matched
// with an exported field of the struct that has the same name, ignoring case.
// Unmatched schema columns and struct fields will be ignored.
//
// Each BigQuery column type corresponds to one or more Go types; a matching struct
// field must be of the correct type. The correspondences are:
//
// STRING string
// BOOL bool
// INTEGER int, int8, int16, int32, int64, uint8, uint16, uint32
// FLOAT float32, float64
// BYTES []byte
// TIMESTAMP time.Time
// DATE civil.Date
// TIME civil.Time
// DATETIME civil.DateTime
//
// A repeated field corresponds to a slice or array of the element type. A STRUCT
// type (RECORD or nested schema) corresponds to a nested struct or struct pointer.
// All calls to Next on the same iterator must use the same struct type.
//
// It is an error to attempt to read a BigQuery NULL value into a struct field,
// unless the field is of type []byte or is one of the special Null types: NullInt64,
// NullFloat64, NullBool, NullString, NullTimestamp, NullDate, NullTime or
// NullDateTime. You can also use a *[]Value or *map[string]Value to read from a
// table with NULLs.
func (it *RowIterator) Next(dst interface{}) error {
if it.pf == nil { // There are no rows in the result set.
return iterator.Done
}
var vl ValueLoader
switch dst := dst.(type) {
case ValueLoader:
vl = dst
case *[]Value:
vl = (*valueList)(dst)
case *map[string]Value:
vl = (*valueMap)(dst)
default:
if !isStructPtr(dst) {
return fmt.Errorf("bigquery: cannot convert %T to ValueLoader (need pointer to []Value, map[string]Value, or struct)", dst)
}
}
if err := it.nextFunc(); err != nil {
return err
}
row := it.rows[0]
it.rows = it.rows[1:]

if vl == nil {
// This can only happen if dst is a pointer to a struct. We couldn't
// set vl above because we need the schema.
if err := it.structLoader.set(dst, it.Schema); err != nil {
return err
}
vl = &it.structLoader
}
return vl.Load(row, it.Schema)
}

func isStructPtr(x interface{}) bool {
t := reflect.TypeOf(x)
return t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct
}

// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *RowIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }

func (it *RowIterator) fetch(pageSize int, pageToken string) (string, error) {
res, err := it.pf(it.ctx, it.table, it.Schema, it.StartIndex, int64(pageSize), pageToken)
if err != nil {
return "", err
}
it.rows = append(it.rows, res.rows...)
it.Schema = res.schema
it.TotalRows = res.totalRows
return res.pageToken, nil
}

// A pageFetcher returns a page of rows from a destination table.
type pageFetcher func(ctx context.Context, _ *Table, _ Schema, startIndex uint64, pageSize int64, pageToken string) (*fetchPageResult, error)

type fetchPageResult struct {
pageToken string
rows [][]Value
totalRows uint64
schema Schema
}

// fetchPage gets a page of rows from t.
func fetchPage(ctx context.Context, t *Table, schema Schema, startIndex uint64, pageSize int64, pageToken string) (*fetchPageResult, error) {
// Fetch the table schema in the background, if necessary.
errc := make(chan error, 1)
if schema != nil {
errc <- nil
} else {
go func() {
var bqt *bq.Table
err := runWithRetry(ctx, func() (err error) {
bqt, err = t.c.bqs.Tables.Get(t.ProjectID, t.DatasetID, t.TableID).
Fields("schema").
Context(ctx).
Do()
return err
})
if err == nil && bqt.Schema != nil {
schema = bqToSchema(bqt.Schema)
}
errc <- err
}()
}
call := t.c.bqs.Tabledata.List(t.ProjectID, t.DatasetID, t.TableID)
setClientHeader(call.Header())
if pageToken != "" {
call.PageToken(pageToken)
} else {
call.StartIndex(startIndex)
}
if pageSize > 0 {
call.MaxResults(pageSize)
}
var res *bq.TableDataList
err := runWithRetry(ctx, func() (err error) {
res, err = call.Context(ctx).Do()
return err
})
if err != nil {
return nil, err
}
err = <-errc
if err != nil {
return nil, err
}
rows, err := convertRows(res.Rows, schema)
if err != nil {
return nil, err
}
return &fetchPageResult{
pageToken: res.PageToken,
rows: rows,
totalRows: uint64(res.TotalRows),
schema: schema,
}, nil
}

+ 0
- 362
vendor/cloud.google.com/go/bigquery/iterator_test.go View File

@@ -1,362 +0,0 @@
// Copyright 2015 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package bigquery

import (
"context"
"errors"
"fmt"
"testing"

"cloud.google.com/go/internal/testutil"
"google.golang.org/api/iterator"
)

type fetchResponse struct {
result *fetchPageResult // The result to return.
err error // The error to return.
}

// pageFetcherStub services fetch requests by returning data from an in-memory list of values.
type pageFetcherStub struct {
fetchResponses map[string]fetchResponse
err error
}

func (pf *pageFetcherStub) fetchPage(ctx context.Context, _ *Table, _ Schema, _ uint64, _ int64, pageToken string) (*fetchPageResult, error) {
call, ok := pf.fetchResponses[pageToken]
if !ok {
pf.err = fmt.Errorf("Unexpected page token: %q", pageToken)
}
return call.result, call.err
}

func TestIterator(t *testing.T) {
var (
iiSchema = Schema{
{Type: IntegerFieldType},
{Type: IntegerFieldType},
}
siSchema = Schema{
{Type: StringFieldType},
{Type: IntegerFieldType},
}
)
fetchFailure := errors.New("fetch failure")

testCases := []struct {
desc string
pageToken string
fetchResponses map[string]fetchResponse
want [][]Value
wantErr error
wantSchema Schema
wantTotalRows uint64
}{
{
desc: "Iteration over single empty page",
fetchResponses: map[string]fetchResponse{
"": {
result: &fetchPageResult{
pageToken: "",
rows: [][]Value{},
schema: Schema{},
},
},
},
want: [][]Value{},
wantSchema: Schema{},
},
{
desc: "Iteration over single page",
fetchResponses: map[string]fetchResponse{
"": {
result: &fetchPageResult{
pageToken: "",
rows: [][]Value{{1, 2}, {11, 12}},
schema: iiSchema,
totalRows: 4,
},
},
},
want: [][]Value{{1, 2}, {11, 12}},
wantSchema: iiSchema,
wantTotalRows: 4,
},
{
desc: "Iteration over single page with different schema",
fetchResponses: map[string]fetchResponse{
"": {
result: &fetchPageResult{
pageToken: "",
rows: [][]Value{{"1", 2}, {"11", 12}},
schema: siSchema,
},
},
},
want: [][]Value{{"1", 2}, {"11", 12}},
wantSchema: siSchema,
},
{
desc: "Iteration over two pages",
fetchResponses: map[string]fetchResponse{
"": {
result: &fetchPageResult{
pageToken: "a",
rows: [][]Value{{1, 2}, {11, 12}},
schema: iiSchema,
totalRows: 4,
},
},
"a": {
result: &fetchPageResult{
pageToken: "",
rows: [][]Value{{101, 102}, {111, 112}},
schema: iiSchema,
totalRows: 4,
},
},
},
want: [][]Value{{1, 2}, {11, 12}, {101, 102}, {111, 112}},
wantSchema: iiSchema,
wantTotalRows: 4,
},
{
desc: "Server response includes empty page",
fetchResponses: map[string]fetchResponse{
"": {
result: &fetchPageResult{
pageToken: "a",
rows: [][]Value{{1, 2}, {11, 12}},
schema: iiSchema,
},
},
"a": {
result: &fetchPageResult{
pageToken: "b",
rows: [][]Value{},
schema: iiSchema,
},
},
"b": {
result: &fetchPageResult{
pageToken: "",
rows: [][]Value{{101, 102}, {111, 112}},
schema: iiSchema,
},
},
},
want: [][]Value{{1, 2}, {11, 12}, {101, 102}, {111, 112}},
wantSchema: iiSchema,
},
{
desc: "Fetch error",
fetchResponses: map[string]fetchResponse{
"": {
result: &fetchPageResult{
pageToken: "a",
rows: [][]Value{{1, 2}, {11, 12}},
schema: iiSchema,
},
},
"a": {
// We returns some data from this fetch, but also an error.
// So the end result should include only data from the previous fetch.
err: fetchFailure,
result: &fetchPageResult{
pageToken: "b",
rows: [][]Value{{101, 102}, {111, 112}},
schema: iiSchema,
},
},
},
want: [][]Value{{1, 2}, {11, 12}},
wantErr: fetchFailure,
wantSchema: iiSchema,
},

{
desc: "Skip over an entire page",
pageToken: "a",
fetchResponses: map[string]fetchResponse{
"": {
result: &fetchPageResult{
pageToken: "a",
rows: [][]Value{{1, 2}, {11, 12}},
schema: iiSchema,
},
},
"a": {
result: &fetchPageResult{
pageToken: "",
rows: [][]Value{{101, 102}, {111, 112}},
schema: iiSchema,
},
},
},
want: [][]Value{{101, 102}, {111, 112}},
wantSchema: iiSchema,
},

{
desc: "Skip beyond all data",
pageToken: "b",
fetchResponses: map[string]fetchResponse{
"": {
result: &fetchPageResult{
pageToken: "a",
rows: [][]Value{{1, 2}, {11, 12}},
schema: iiSchema,
},
},
"a": {
result: &fetchPageResult{
pageToken: "b",
rows: [][]Value{{101, 102}, {111, 112}},
schema: iiSchema,
},
},
"b": {
result: &fetchPageResult{},
},
},
// In this test case, Next will return false on its first call,
// so we won't even attempt to call Get.
want: [][]Value{},
wantSchema: Schema{},
},
}

for _, tc := range testCases {
pf := &pageFetcherStub{
fetchResponses: tc.fetchResponses,
}
it := newRowIterator(context.Background(), nil, pf.fetchPage)
it.PageInfo().Token = tc.pageToken
values, schema, totalRows, err := consumeRowIterator(it)
if err != tc.wantErr {
t.Fatalf("%s: got %v, want %v", tc.desc, err, tc.wantErr)
}
if (len(values) != 0 || len(tc.want) != 0) && !testutil.Equal(values, tc.want) {
t.Errorf("%s: values:\ngot: %v\nwant:%v", tc.desc, values, tc.want)
}
if (len(schema) != 0 || len(tc.wantSchema) != 0) && !testutil.Equal(schema, tc.wantSchema) {
t.Errorf("%s: iterator.Schema:\ngot: %v\nwant: %v", tc.desc, schema, tc.wantSchema)
}
if totalRows != tc.wantTotalRows {
t.Errorf("%s: totalRows: got %d, want %d", tc.desc, totalRows, tc.wantTotalRows)
}
}
}

// consumeRowIterator reads the schema and all values from a RowIterator and returns them.
func consumeRowIterator(it *RowIterator) ([][]Value, Schema, uint64, error) {
var (
got [][]Value
schema Schema
totalRows uint64
)
for {
var vls []Value
err := it.Next(&vls)
if err == iterator.Done {
return got, schema, totalRows, nil
}
if err != nil {
return got, schema, totalRows, err
}
got = append(got, vls)
schema = it.Schema
totalRows = it.TotalRows
}
}

func TestNextDuringErrorState(t *testing.T) {
pf := &pageFetcherStub{
fetchResponses: map[string]fetchResponse{
"": {err: errors.New("bang")},
},
}
it := newRowIterator(context.Background(), nil, pf.fetchPage)
var vals []Value
if err := it.Next(&vals); err == nil {
t.Errorf("Expected error after calling Next")
}
if err := it.Next(&vals); err == nil {
t.Errorf("Expected error calling Next again when iterator has a non-nil error.")
}
}

func TestNextAfterFinished(t *testing.T) {
testCases := []struct {
fetchResponses map[string]fetchResponse
want [][]Value
}{
{
fetchResponses: map[string]fetchResponse{
"": {
result: &fetchPageResult{
pageToken: "",
rows: [][]Value{{1, 2}, {11, 12}},
},
},
},
want: [][]Value{{1, 2}, {11, 12}},
},
{
fetchResponses: map[string]fetchResponse{
"": {
result: &fetchPageResult{
pageToken: "",
rows: [][]Value{},
},
},
},
want: [][]Value{},
},
}

for _, tc := range testCases {
pf := &pageFetcherStub{
fetchResponses: tc.fetchResponses,
}
it := newRowIterator(context.Background(), nil, pf.fetchPage)

values, _, _, err := consumeRowIterator(it)
if err != nil {
t.Fatal(err)
}
if (len(values) != 0 || len(tc.want) != 0) && !testutil.Equal(values, tc.want) {
t.Errorf("values: got:\n%v\nwant:\n%v", values, tc.want)
}
// Try calling Get again.
var vals []Value
if err := it.Next(&vals); err != iterator.Done {
t.Errorf("Expected Done calling Next when there are no more values")
}
}
}

func TestIteratorNextTypes(t *testing.T) {
it := newRowIterator(context.Background(), nil, nil)
for _, v := range []interface{}{3, "s", []int{}, &[]int{},
map[string]Value{}, &map[string]interface{}{},
struct{}{},
} {
if err := it.Next(v); err == nil {
t.Errorf("%v: want error, got nil", v)
}
}
}

+ 0
- 830
vendor/cloud.google.com/go/bigquery/job.go View File

@@ -1,830 +0,0 @@
// Copyright 2015 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package bigquery

import (
"context"
"errors"
"fmt"
"time"

"cloud.google.com/go/internal"
"cloud.google.com/go/internal/trace"
gax "github.com/googleapis/gax-go/v2"
bq "google.golang.org/api/bigquery/v2"
"google.golang.org/api/googleapi"
"google.golang.org/api/iterator"
)

// A Job represents an operation which has been submitted to BigQuery for processing.
type Job struct {
c *Client
projectID string
jobID string
location string
email string
config *bq.JobConfiguration
lastStatus *JobStatus
}

// JobFromID creates a Job which refers to an existing BigQuery job. The job
// need not have been created by this package. For example, the job may have
// been created in the BigQuery console.
//
// For jobs whose location is other than "US" or "EU", set Client.Location or use
// JobFromIDLocation.
func (c *Client) JobFromID(ctx context.Context, id string) (*Job, error) {
return c.JobFromIDLocation(ctx, id, c.Location)
}

// JobFromIDLocation creates a Job which refers to an existing BigQuery job. The job
// need not have been created by this package (for example, it may have
// been created in the BigQuery console), but it must exist in the specified location.
func (c *Client) JobFromIDLocation(ctx context.Context, id, location string) (j *Job, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.JobFromIDLocation")
defer func() { trace.EndSpan(ctx, err) }()

bqjob, err := c.getJobInternal(ctx, id, location, "configuration", "jobReference", "status", "statistics")
if err != nil {
return nil, err
}
return bqToJob(bqjob, c)
}

// ID returns the job's ID.
func (j *Job) ID() string {
return j.jobID
}

// Location returns the job's location.
func (j *Job) Location() string {
return j.location
}

// Email returns the email of the job's creator.
func (j *Job) Email() string {
return j.email
}

// State is one of a sequence of states that a Job progresses through as it is processed.
type State int

const (
// StateUnspecified is the default JobIterator state.
StateUnspecified State = iota
// Pending is a state that describes that the job is pending.
Pending
// Running is a state that describes that the job is running.
Running
// Done is a state that describes that the job is done.
Done
)

// JobStatus contains the current State of a job, and errors encountered while processing that job.
type JobStatus struct {
State State

err error

// All errors encountered during the running of the job.
// Not all Errors are fatal, so errors here do not necessarily mean that the job has completed or was unsuccessful.
Errors []*Error

// Statistics about the job.
Statistics *JobStatistics
}

// JobConfig contains configuration information for a job. It is implemented by
// *CopyConfig, *ExtractConfig, *LoadConfig and *QueryConfig.
type JobConfig interface {
isJobConfig()
}

func (*CopyConfig) isJobConfig() {}
func (*ExtractConfig) isJobConfig() {}
func (*LoadConfig) isJobConfig() {}
func (*QueryConfig) isJobConfig() {}

// Config returns the configuration information for j.
func (j *Job) Config() (JobConfig, error) {
return bqToJobConfig(j.config, j.c)
}

func bqToJobConfig(q *bq.JobConfiguration, c *Client) (JobConfig, error) {
switch {
case q == nil:
return nil, nil
case q.Copy != nil:
return bqToCopyConfig(q, c), nil
case q.Extract != nil:
return bqToExtractConfig(q, c), nil
case q.Load != nil:
return bqToLoadConfig(q, c), nil
case q.Query != nil:
return bqToQueryConfig(q, c)
default:
return nil, nil
}
}

// JobIDConfig describes how to create an ID for a job.
type JobIDConfig struct {
// JobID is the ID to use for the job. If empty, a random job ID will be generated.
JobID string

// If AddJobIDSuffix is true, then a random string will be appended to JobID.
AddJobIDSuffix bool

// Location is the location for the job.
Location string
}

// createJobRef creates a JobReference.
func (j *JobIDConfig) createJobRef(c *Client) *bq.JobReference {
// We don't check whether projectID is empty; the server will return an
// error when it encounters the resulting JobReference.
loc := j.Location
if loc == "" { // Use Client.Location as a default.
loc = c.Location
}
jr := &bq.JobReference{ProjectId: c.projectID, Location: loc}
if j.JobID == "" {
jr.JobId = randomIDFn()
} else if j.AddJobIDSuffix {
jr.JobId = j.JobID + "-" + randomIDFn()
} else {
jr.JobId = j.JobID
}
return jr
}

// Done reports whether the job has completed.
// After Done returns true, the Err method will return an error if the job completed unsuccessfully.
func (s *JobStatus) Done() bool {
return s.State == Done
}

// Err returns the error that caused the job to complete unsuccessfully (if any).
func (s *JobStatus) Err() error {
return s.err
}

// Status retrieves the current status of the job from BigQuery. It fails if the Status could not be determined.
func (j *Job) Status(ctx context.Context) (js *JobStatus, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Job.Status")
defer func() { trace.EndSpan(ctx, err) }()

bqjob, err := j.c.getJobInternal(ctx, j.jobID, j.location, "status", "statistics")
if err != nil {
return nil, err
}
if err := j.setStatus(bqjob.Status); err != nil {
return nil, err
}
j.setStatistics(bqjob.Statistics, j.c)
return j.lastStatus, nil
}

// LastStatus returns the most recently retrieved status of the job. The status is
// retrieved when a new job is created, or when JobFromID or Job.Status is called.
// Call Job.Status to get the most up-to-date information about a job.
func (j *Job) LastStatus() *JobStatus {
return j.lastStatus
}

// Cancel requests that a job be cancelled. This method returns without waiting for
// cancellation to take effect. To check whether the job has terminated, use Job.Status.
// Cancelled jobs may still incur costs.
func (j *Job) Cancel(ctx context.Context) error {
// Jobs.Cancel returns a job entity, but the only relevant piece of
// data it may contain (the status of the job) is unreliable. From the
// docs: "This call will return immediately, and the client will need
// to poll for the job status to see if the cancel completed
// successfully". So it would be misleading to return a status.
call := j.c.bqs.Jobs.Cancel(j.projectID, j.jobID).
Location(j.location).
Fields(). // We don't need any of the response data.
Context(ctx)
setClientHeader(call.Header())
return runWithRetry(ctx, func() error {
_, err := call.Do()
return err
})
}

// Wait blocks until the job or the context is done. It returns the final status
// of the job.
// If an error occurs while retrieving the status, Wait returns that error. But
// Wait returns nil if the status was retrieved successfully, even if
// status.Err() != nil. So callers must check both errors. See the example.
func (j *Job) Wait(ctx context.Context) (js *JobStatus, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Job.Wait")
defer func() { trace.EndSpan(ctx, err) }()

if j.isQuery() {
// We can avoid polling for query jobs.
if _, _, err := j.waitForQuery(ctx, j.projectID); err != nil {
return nil, err
}
// Note: extra RPC even if you just want to wait for the query to finish.
js, err := j.Status(ctx)
if err != nil {
return nil, err
}
return js, nil
}
// Non-query jobs must poll.
err = internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) {
js, err = j.Status(ctx)
if err != nil {
return true, err
}
if js.Done() {
return true, nil
}
return false, nil
})
if err != nil {
return nil, err
}
return js, nil
}

// Read fetches the results of a query job.
// If j is not a query job, Read returns an error.
func (j *Job) Read(ctx context.Context) (ri *RowIterator, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Job.Read")
defer func() { trace.EndSpan(ctx, err) }()

return j.read(ctx, j.waitForQuery, fetchPage)
}

func (j *Job) read(ctx context.Context, waitForQuery func(context.Context, string) (Schema, uint64, error), pf pageFetcher) (*RowIterator, error) {
if !j.isQuery() {
return nil, errors.New("bigquery: cannot read from a non-query job")
}
destTable := j.config.Query.DestinationTable
// The destination table should only be nil if there was a query error.
projectID := j.projectID
if destTable != nil && projectID != destTable.ProjectId {
return nil, fmt.Errorf("bigquery: job project ID is %q, but destination table's is %q", projectID, destTable.ProjectId)
}
schema, totalRows, err := waitForQuery(ctx, projectID)
if err != nil {
return nil, err
}
if destTable == nil {
return nil, errors.New("bigquery: query job missing destination table")
}
dt := bqToTable(destTable, j.c)
if totalRows == 0 {
pf = nil
}
it := newRowIterator(ctx, dt, pf)
it.Schema = schema
it.TotalRows = totalRows
return it, nil
}

// waitForQuery waits for the query job to complete and returns its schema. It also
// returns the total number of rows in the result set.
func (j *Job) waitForQuery(ctx context.Context, projectID string) (Schema, uint64, error) {
// Use GetQueryResults only to wait for completion, not to read results.
call := j.c.bqs.Jobs.GetQueryResults(projectID, j.jobID).Location(j.location).Context(ctx).MaxResults(0)
setClientHeader(call.Header())
backoff := gax.Backoff{
Initial: 1 * time.Second,
Multiplier: 2,
Max: 60 * time.Second,
}
var res *bq.GetQueryResultsResponse
err := internal.Retry(ctx, backoff, func() (stop bool, err error) {
res, err = call.Do()
if err != nil {
return !retryableError(err), err
}
if !res.JobComplete { // GetQueryResults may return early without error; retry.
return false, nil
}
return true, nil
})
if err != nil {
return nil, 0, err
}
return bqToSchema(res.Schema), res.TotalRows, nil
}

// JobStatistics contains statistics about a job.
type JobStatistics struct {
CreationTime time.Time
StartTime time.Time
EndTime time.Time
TotalBytesProcessed int64

Details Statistics
}

// Statistics is one of ExtractStatistics, LoadStatistics or QueryStatistics.
type Statistics interface {
implementsStatistics()
}

// ExtractStatistics contains statistics about an extract job.
type ExtractStatistics struct {
// The number of files per destination URI or URI pattern specified in the
// extract configuration. These values will be in the same order as the
// URIs specified in the 'destinationUris' field.
DestinationURIFileCounts []int64
}

// LoadStatistics contains statistics about a load job.
type LoadStatistics struct {
// The number of bytes of source data in a load job.
InputFileBytes int64

// The number of source files in a load job.
InputFiles int64

// Size of the loaded data in bytes. Note that while a load job is in the
// running state, this value may change.
OutputBytes int64

// The number of rows imported in a load job. Note that while an import job is
// in the running state, this value may change.
OutputRows int64
}

// QueryStatistics contains statistics about a query job.
type QueryStatistics struct {
// Billing tier for the job.
BillingTier int64

// Whether the query result was fetched from the query cache.
CacheHit bool

// The type of query statement, if valid.
StatementType string

// Total bytes billed for the job.
TotalBytesBilled int64

// Total bytes processed for the job.
TotalBytesProcessed int64

// For dry run queries, indicates how accurate the TotalBytesProcessed value is.
// When indicated, values include:
// UNKNOWN: accuracy of the estimate is unknown.
// PRECISE: estimate is precise.
// LOWER_BOUND: estimate is lower bound of what the query would cost.
// UPPER_BOUND: estiamte is upper bound of what the query would cost.
TotalBytesProcessedAccuracy string

// Describes execution plan for the query.
QueryPlan []*ExplainQueryStage

// The number of rows affected by a DML statement. Present only for DML
// statements INSERT, UPDATE or DELETE.
NumDMLAffectedRows int64

// Describes a timeline of job execution.
Timeline []*QueryTimelineSample

// ReferencedTables: [Output-only, Experimental] Referenced tables for
// the job. Queries that reference more than 50 tables will not have a
// complete list.
ReferencedTables []*Table

// The schema of the results. Present only for successful dry run of
// non-legacy SQL queries.
Schema Schema

// Slot-milliseconds consumed by this query job.
SlotMillis int64

// Standard SQL: list of undeclared query parameter names detected during a
// dry run validation.
UndeclaredQueryParameterNames []string

// DDL target table.
DDLTargetTable *Table

// DDL Operation performed on the target table. Used to report how the
// query impacted the DDL target table.
DDLOperationPerformed string
}

// ExplainQueryStage describes one stage of a query.
type ExplainQueryStage struct {
// CompletedParallelInputs: Number of parallel input segments completed.
CompletedParallelInputs int64

// ComputeAvg: Duration the average shard spent on CPU-bound tasks.
ComputeAvg time.Duration

// ComputeMax: Duration the slowest shard spent on CPU-bound tasks.
ComputeMax time.Duration

// Relative amount of the total time the average shard spent on CPU-bound tasks.
ComputeRatioAvg float64

// Relative amount of the total time the slowest shard spent on CPU-bound tasks.
ComputeRatioMax float64

// EndTime: Stage end time.
EndTime time.Time

// Unique ID for stage within plan.
ID int64

// InputStages: IDs for stages that are inputs to this stage.
InputStages []int64

// Human-readable name for stage.
Name string

// ParallelInputs: Number of parallel input segments to be processed.
ParallelInputs int64

// ReadAvg: Duration the average shard spent reading input.
ReadAvg time.Duration

// ReadMax: Duration the slowest shard spent reading input.
ReadMax time.Duration

// Relative amount of the total time the average shard spent reading input.
ReadRatioAvg float64

// Relative amount of the total time the slowest shard spent reading input.
ReadRatioMax float64

// Number of records read into the stage.
RecordsRead int64

// Number of records written by the stage.
RecordsWritten int64

// ShuffleOutputBytes: Total number of bytes written to shuffle.
ShuffleOutputBytes int64

// ShuffleOutputBytesSpilled: Total number of bytes written to shuffle
// and spilled to disk.
ShuffleOutputBytesSpilled int64

// StartTime: Stage start time.
StartTime time.Time

// Current status for the stage.
Status string

// List of operations within the stage in dependency order (approximately
// chronological).
Steps []*ExplainQueryStep

// WaitAvg: Duration the average shard spent waiting to be scheduled.
WaitAvg time.Duration

// WaitMax: Duration the slowest shard spent waiting to be scheduled.
WaitMax time.Duration

// Relative amount of the total time the average shard spent waiting to be scheduled.
WaitRatioAvg float64

// Relative amount of the total time the slowest shard spent waiting to be scheduled.
WaitRatioMax float64

// WriteAvg: Duration the average shard spent on writing output.
WriteAvg time.Duration

// WriteMax: Duration the slowest shard spent on writing output.
WriteMax time.Duration

// Relative amount of the total time the average shard spent on writing output.
WriteRatioAvg float64

// Relative amount of the total time the slowest shard spent on writing output.
WriteRatioMax float64
}

// ExplainQueryStep describes one step of a query stage.
type ExplainQueryStep struct {
// Machine-readable operation type.
Kind string

// Human-readable stage descriptions.
Substeps []string
}

// QueryTimelineSample represents a sample of execution statistics at a point in time.
type QueryTimelineSample struct {

// Total number of units currently being processed by workers, represented as largest value since last sample.
ActiveUnits int64

// Total parallel units of work completed by this query.
CompletedUnits int64

// Time elapsed since start of query execution.
Elapsed time.Duration

// Total parallel units of work remaining for the active stages.
PendingUnits int64

// Cumulative slot-milliseconds consumed by the query.
SlotMillis int64
}

func (*ExtractStatistics) implementsStatistics() {}
func (*LoadStatistics) implementsStatistics() {}
func (*QueryStatistics) implementsStatistics() {}

// Jobs lists jobs within a project.
func (c *Client) Jobs(ctx context.Context) *JobIterator {
it := &JobIterator{
ctx: ctx,
c: c,
ProjectID: c.projectID,
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
it.fetch,
func() int { return len(it.items) },
func() interface{} { b := it.items; it.items = nil; return b })
return it
}

// JobIterator iterates over jobs in a project.
type JobIterator struct {
ProjectID string // Project ID of the jobs to list. Default is the client's project.
AllUsers bool // Whether to list jobs owned by all users in the project, or just the current caller.
State State // List only jobs in the given state. Defaults to all states.
MinCreationTime time.Time // List only jobs created after this time.
MaxCreationTime time.Time // List only jobs created before this time.

ctx context.Context
c *Client
pageInfo *iterator.PageInfo
nextFunc func() error
items []*Job
}

// PageInfo is a getter for the JobIterator's PageInfo.
func (it *JobIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }

// Next returns the next Job. Its second return value is iterator.Done if
// there are no more results. Once Next returns Done, all subsequent calls will
// return Done.
func (it *JobIterator) Next() (*Job, error) {
if err := it.nextFunc(); err != nil {
return nil, err
}
item := it.items[0]
it.items = it.items[1:]
return item, nil
}

func (it *JobIterator) fetch(pageSize int, pageToken string) (string, error) {
var st string
switch it.State {
case StateUnspecified:
st = ""
case Pending:
st = "pending"
case Running:
st = "running"
case Done:
st = "done"
default:
return "", fmt.Errorf("bigquery: invalid value for JobIterator.State: %d", it.State)
}

req := it.c.bqs.Jobs.List(it.ProjectID).
Context(it.ctx).
PageToken(pageToken).
Projection("full").
AllUsers(it.AllUsers)
if st != "" {
req.StateFilter(st)
}
if !it.MinCreationTime.IsZero() {
req.MinCreationTime(uint64(it.MinCreationTime.UnixNano() / 1e6))
}
if !it.MaxCreationTime.IsZero() {
req.MaxCreationTime(uint64(it.MaxCreationTime.UnixNano() / 1e6))
}
setClientHeader(req.Header())
if pageSize > 0 {
req.MaxResults(int64(pageSize))
}
res, err := req.Do()
if err != nil {
return "", err
}
for _, j := range res.Jobs {
job, err := convertListedJob(j, it.c)
if err != nil {
return "", err
}
it.items = append(it.items, job)
}
return res.NextPageToken, nil
}

func convertListedJob(j *bq.JobListJobs, c *Client) (*Job, error) {
return bqToJob2(j.JobReference, j.Configuration, j.Status, j.Statistics, j.UserEmail, c)
}

func (c *Client) getJobInternal(ctx context.Context, jobID, location string, fields ...googleapi.Field) (*bq.Job, error) {
var job *bq.Job
call := c.bqs.Jobs.Get(c.projectID, jobID).Context(ctx)
if location != "" {
call = call.Location(location)
}
if len(fields) > 0 {
call = call.Fields(fields...)
}
setClientHeader(call.Header())
err := runWithRetry(ctx, func() (err error) {
job, err = call.Do()
return err
})
if err != nil {
return nil, err
}
return job, nil
}

func bqToJob(q *bq.Job, c *Client) (*Job, error) {
return bqToJob2(q.JobReference, q.Configuration, q.Status, q.Statistics, q.UserEmail, c)
}

func bqToJob2(qr *bq.JobReference, qc *bq.JobConfiguration, qs *bq.JobStatus, qt *bq.JobStatistics, email string, c *Client) (*Job, error) {
j := &Job{
projectID: qr.ProjectId,
jobID: qr.JobId,
location: qr.Location,
c: c,
email: email,
}
j.setConfig(qc)
if err := j.setStatus(qs); err != nil {
return nil, err
}
j.setStatistics(qt, c)
return j, nil
}

func (j *Job) setConfig(config *bq.JobConfiguration) {
if config == nil {
return
}
j.config = config
}

func (j *Job) isQuery() bool {
return j.config != nil && j.config.Query != nil
}

var stateMap = map[string]State{"PENDING": Pending, "RUNNING": Running, "DONE": Done}

func (j *Job) setStatus(qs *bq.JobStatus) error {
if qs == nil {
return nil
}
state, ok := stateMap[qs.State]
if !ok {
return fmt.Errorf("unexpected job state: %v", qs.State)
}
j.lastStatus = &JobStatus{
State: state,
err: nil,
}
if err := bqToError(qs.ErrorResult); state == Done && err != nil {
j.lastStatus.err = err
}
for _, ep := range qs.Errors {
j.lastStatus.Errors = append(j.lastStatus.Errors, bqToError(ep))
}
return nil
}

func (j *Job) setStatistics(s *bq.JobStatistics, c *Client) {
if s == nil || j.lastStatus == nil {
return
}
js := &JobStatistics{
CreationTime: unixMillisToTime(s.CreationTime),
StartTime: unixMillisToTime(s.StartTime),
EndTime: unixMillisToTime(s.EndTime),
TotalBytesProcessed: s.TotalBytesProcessed,
}
switch {
case s.Extract != nil:
js.Details = &ExtractStatistics{
DestinationURIFileCounts: []int64(s.Extract.DestinationUriFileCounts),
}
case s.Load != nil:
js.Details = &LoadStatistics{
InputFileBytes: s.Load.InputFileBytes,
InputFiles: s.Load.InputFiles,
OutputBytes: s.Load.OutputBytes,
OutputRows: s.Load.OutputRows,
}
case s.Query != nil:
var names []string
for _, qp := range s.Query.UndeclaredQueryParameters {
names = append(names, qp.Name)
}
var tables []*Table
for _, tr := range s.Query.ReferencedTables {
tables = append(tables, bqToTable(tr, c))
}
js.Details = &QueryStatistics{
BillingTier: s.Query.BillingTier,
CacheHit: s.Query.CacheHit,
DDLTargetTable: bqToTable(s.Query.DdlTargetTable, c),
DDLOperationPerformed: s.Query.DdlOperationPerformed,
StatementType: s.Query.StatementType,
TotalBytesBilled: s.Query.TotalBytesBilled,
TotalBytesProcessed: s.Query.TotalBytesProcessed,
TotalBytesProcessedAccuracy: s.Query.TotalBytesProcessedAccuracy,
NumDMLAffectedRows: s.Query.NumDmlAffectedRows,
QueryPlan: queryPlanFromProto(s.Query.QueryPlan),
Schema: bqToSchema(s.Query.Schema),
SlotMillis: s.Query.TotalSlotMs,
Timeline: timelineFromProto(s.Query.Timeline),
ReferencedTables: tables,
UndeclaredQueryParameterNames: names,
}
}
j.lastStatus.Statistics = js
}

func queryPlanFromProto(stages []*bq.ExplainQueryStage) []*ExplainQueryStage {
var res []*ExplainQueryStage
for _, s := range stages {
var steps []*ExplainQueryStep
for _, p := range s.Steps {
steps = append(steps, &ExplainQueryStep{
Kind: p.Kind,
Substeps: p.Substeps,
})
}
res = append(res, &ExplainQueryStage{
CompletedParallelInputs: s.CompletedParallelInputs,
ComputeAvg: time.Duration(s.ComputeMsAvg) * time.Millisecond,
ComputeMax: time.Duration(s.ComputeMsMax) * time.Millisecond,
ComputeRatioAvg: s.ComputeRatioAvg,
ComputeRatioMax: s.ComputeRatioMax,
EndTime: time.Unix(0, s.EndMs*1e6),
ID: s.Id,
InputStages: s.InputStages,
Name: s.Name,
ParallelInputs: s.ParallelInputs,
ReadAvg: time.Duration(s.ReadMsAvg) * time.Millisecond,
ReadMax: time.Duration(s.ReadMsMax) * time.Millisecond,
ReadRatioAvg: s.ReadRatioAvg,
ReadRatioMax: s.ReadRatioMax,
RecordsRead: s.RecordsRead,
RecordsWritten: s.RecordsWritten,
ShuffleOutputBytes: s.ShuffleOutputBytes,
ShuffleOutputBytesSpilled: s.ShuffleOutputBytesSpilled,
StartTime: time.Unix(0, s.StartMs*1e6),
Status: s.Status,
Steps: steps,
WaitAvg: time.Duration(s.WaitMsAvg) * time.Millisecond,
WaitMax: time.Duration(s.WaitMsMax) * time.Millisecond,
WaitRatioAvg: s.WaitRatioAvg,
WaitRatioMax: s.WaitRatioMax,
WriteAvg: time.Duration(s.WriteMsAvg) * time.Millisecond,
WriteMax: time.Duration(s.WriteMsMax) * time.Millisecond,
WriteRatioAvg: s.WriteRatioAvg,
WriteRatioMax: s.WriteRatioMax,
})
}
return res
}

func timelineFromProto(timeline []*bq.QueryTimelineSample) []*QueryTimelineSample {
var res []*QueryTimelineSample
for _, s := range timeline {
res = append(res, &QueryTimelineSample{
ActiveUnits: s.ActiveUnits,
CompletedUnits: s.CompletedUnits,
Elapsed: time.Duration(s.ElapsedMs) * time.Millisecond,
PendingUnits: s.PendingUnits,
SlotMillis: s.TotalSlotMs,
})
}
return res
}

+ 0
- 95
vendor/cloud.google.com/go/bigquery/job_test.go View File

@@ -1,95 +0,0 @@
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package bigquery

import (
"testing"

"cloud.google.com/go/internal/testutil"
bq "google.golang.org/api/bigquery/v2"
)

func TestCreateJobRef(t *testing.T) {
defer fixRandomID("RANDOM")()
cNoLoc := &Client{projectID: "projectID"}
cLoc := &Client{projectID: "projectID", Location: "defaultLoc"}
for _, test := range []struct {
in JobIDConfig
client *Client
want *bq.JobReference
}{
{
in: JobIDConfig{JobID: "foo"},
want: &bq.JobReference{JobId: "foo"},
},
{
in: JobIDConfig{},
want: &bq.JobReference{JobId: "RANDOM"},
},
{
in: JobIDConfig{AddJobIDSuffix: true},
want: &bq.JobReference{JobId: "RANDOM"},
},
{
in: JobIDConfig{JobID: "foo", AddJobIDSuffix: true},
want: &bq.JobReference{JobId: "foo-RANDOM"},
},
{
in: JobIDConfig{JobID: "foo", Location: "loc"},
want: &bq.JobReference{JobId: "foo", Location: "loc"},
},
{
in: JobIDConfig{JobID: "foo"},
client: cLoc,
want: &bq.JobReference{JobId: "foo", Location: "defaultLoc"},
},
{
in: JobIDConfig{JobID: "foo", Location: "loc"},
client: cLoc,
want: &bq.JobReference{JobId: "foo", Location: "loc"},
},
} {
client := test.client
if client == nil {
client = cNoLoc
}
got := test.in.createJobRef(client)
test.want.ProjectId = "projectID"
if !testutil.Equal(got, test.want) {
t.Errorf("%+v: got %+v, want %+v", test.in, got, test.want)
}
}
}

func fixRandomID(s string) func() {
prev := randomIDFn
randomIDFn = func() string { return s }
return func() { randomIDFn = prev }
}

func checkJob(t *testing.T, i int, got, want *bq.Job) {
if got.JobReference == nil {
t.Errorf("#%d: empty job reference", i)
return
}
if got.JobReference.JobId == "" {
t.Errorf("#%d: empty job ID", i)
return
}
d := testutil.Diff(got, want)
if d != "" {
t.Errorf("#%d: (got=-, want=+) %s", i, d)
}
}

+ 0
- 153
vendor/cloud.google.com/go/bigquery/load.go View File

@@ -1,153 +0,0 @@
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package bigquery

import (
"context"
"io"

"cloud.google.com/go/internal/trace"
bq "google.golang.org/api/bigquery/v2"
)

// LoadConfig holds the configuration for a load job.
type LoadConfig struct {
// Src is the source from which data will be loaded.
Src LoadSource

// Dst is the table into which the data will be loaded.
Dst *Table

// CreateDisposition specifies the circumstances under which the destination table will be created.
// The default is CreateIfNeeded.
CreateDisposition TableCreateDisposition

// WriteDisposition specifies how existing data in the destination table is treated.
// The default is WriteAppend.
WriteDisposition TableWriteDisposition

// The labels associated with this job.
Labels map[string]string

// If non-nil, the destination table is partitioned by time.
TimePartitioning *TimePartitioning

// Clustering specifies the data clustering configuration for the destination table.
Clustering *Clustering

// Custom encryption configuration (e.g., Cloud KMS keys).
DestinationEncryptionConfig *EncryptionConfig

// Allows the schema of the destination table to be updated as a side effect of
// the load job.
SchemaUpdateOptions []string

// For Avro-based loads, controls whether logical type annotations are used.
// See https://cloud.google.com/bigquery/docs/loading-data-cloud-storage-avro#logical_types
// for additional information.
UseAvroLogicalTypes bool
}

func (l *LoadConfig) toBQ() (*bq.JobConfiguration, io.Reader) {
config := &bq.JobConfiguration{
Labels: l.Labels,
Load: &bq.JobConfigurationLoad{
CreateDisposition: string(l.CreateDisposition),
WriteDisposition: string(l.WriteDisposition),
DestinationTable: l.Dst.toBQ(),
TimePartitioning: l.TimePartitioning.toBQ(),
Clustering: l.Clustering.toBQ(),
DestinationEncryptionConfiguration: l.DestinationEncryptionConfig.toBQ(),
SchemaUpdateOptions: l.SchemaUpdateOptions,
UseAvroLogicalTypes: l.UseAvroLogicalTypes,
},
}
media := l.Src.populateLoadConfig(config.Load)
return config, media
}

func bqToLoadConfig(q *bq.JobConfiguration, c *Client) *LoadConfig {
lc := &LoadConfig{
Labels: q.Labels,
CreateDisposition: TableCreateDisposition(q.Load.CreateDisposition),
WriteDisposition: TableWriteDisposition(q.Load.WriteDisposition),
Dst: bqToTable(q.Load.DestinationTable, c),
TimePartitioning: bqToTimePartitioning(q.Load.TimePartitioning),
Clustering: bqToClustering(q.Load.Clustering),
DestinationEncryptionConfig: bqToEncryptionConfig(q.Load.DestinationEncryptionConfiguration),
SchemaUpdateOptions: q.Load.SchemaUpdateOptions,
UseAvroLogicalTypes: q.Load.UseAvroLogicalTypes,
}
var fc *FileConfig
if len(q.Load.SourceUris) == 0 {
s := NewReaderSource(nil)
fc = &s.FileConfig
lc.Src = s
} else {
s := NewGCSReference(q.Load.SourceUris...)
fc = &s.FileConfig
lc.Src = s
}
bqPopulateFileConfig(q.Load, fc)
return lc
}

// A Loader loads data from Google Cloud Storage into a BigQuery table.
type Loader struct {
JobIDConfig
LoadConfig
c *Client
}

// A LoadSource represents a source of data that can be loaded into
// a BigQuery table.
//
// This package defines two LoadSources: GCSReference, for Google Cloud Storage
// objects, and ReaderSource, for data read from an io.Reader.
type LoadSource interface {
// populates config, returns media
populateLoadConfig(*bq.JobConfigurationLoad) io.Reader
}

// LoaderFrom returns a Loader which can be used to load data into a BigQuery table.
// The returned Loader may optionally be further configured before its Run method is called.
// See GCSReference and ReaderSource for additional configuration options that
// affect loading.
func (t *Table) LoaderFrom(src LoadSource) *Loader {
return &Loader{
c: t.c,
LoadConfig: LoadConfig{
Src: src,
Dst: t,
},
}
}

// Run initiates a load job.
func (l *Loader) Run(ctx context.Context) (j *Job, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Load.Run")
defer func() { trace.EndSpan(ctx, err) }()

job, media := l.newJob()
return l.c.insertJob(ctx, job, media)
}

func (l *Loader) newJob() (*bq.Job, io.Reader) {
config, media := l.LoadConfig.toBQ()
return &bq.Job{
JobReference: l.JobIDConfig.createJobRef(l.c),
Configuration: config,
}, media
}

+ 0
- 298
vendor/cloud.google.com/go/bigquery/load_test.go View File

@@ -1,298 +0,0 @@
// Copyright 2015 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package bigquery

import (
"strings"
"testing"
"time"

"cloud.google.com/go/internal/testutil"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
bq "google.golang.org/api/bigquery/v2"
)

func defaultLoadJob() *bq.Job {
return &bq.Job{
JobReference: &bq.JobReference{JobId: "RANDOM", ProjectId: "client-project-id"},
Configuration: &bq.JobConfiguration{
Load: &bq.JobConfigurationLoad{
DestinationTable: &bq.TableReference{
ProjectId: "client-project-id",
DatasetId: "dataset-id",
TableId: "table-id",
},
SourceUris: []string{"uri"},
},
},
}
}

func stringFieldSchema() *FieldSchema {
return &FieldSchema{Name: "fieldname", Type: StringFieldType}
}

func nestedFieldSchema() *FieldSchema {
return &FieldSchema{
Name: "nested",
Type: RecordFieldType,
Schema: Schema{stringFieldSchema()},
}
}

func bqStringFieldSchema() *bq.TableFieldSchema {
return &bq.TableFieldSchema{
Name: "fieldname",
Type: "STRING",
}
}

func bqNestedFieldSchema() *bq.TableFieldSchema {
return &bq.TableFieldSchema{
Name: "nested",
Type: "RECORD",
Fields: []*bq.TableFieldSchema{bqStringFieldSchema()},
}
}

func TestLoad(t *testing.T) {
defer fixRandomID("RANDOM")()
c := &Client{projectID: "client-project-id"}

testCases := []struct {
dst *Table
src LoadSource
jobID string
location string
config LoadConfig
want *bq.Job
}{
{
dst: c.Dataset("dataset-id").Table("table-id"),
src: NewGCSReference("uri"),
want: defaultLoadJob(),
},
{
dst: c.Dataset("dataset-id").Table("table-id"),
src: NewGCSReference("uri"),
location: "loc",
want: func() *bq.Job {
j := defaultLoadJob()
j.JobReference.Location = "loc"
return j
}(),
},
{
dst: c.Dataset("dataset-id").Table("table-id"),
jobID: "ajob",
config: LoadConfig{
CreateDisposition: CreateNever,
WriteDisposition: WriteTruncate,
Labels: map[string]string{"a": "b"},
TimePartitioning: &TimePartitioning{Expiration: 1234 * time.Millisecond},
Clustering: &Clustering{Fields: []string{"cfield1"}},
DestinationEncryptionConfig: &EncryptionConfig{KMSKeyName: "keyName"},
SchemaUpdateOptions: []string{"ALLOW_FIELD_ADDITION"},
},
src: NewGCSReference("uri"),
want: func() *bq.Job {
j := defaultLoadJob()
j.Configuration.Labels = map[string]string{"a": "b"}
j.Configuration.Load.CreateDisposition = "CREATE_NEVER"
j.Configuration.Load.WriteDisposition = "WRITE_TRUNCATE"
j.Configuration.Load.TimePartitioning = &bq.TimePartitioning{
Type: "DAY",
ExpirationMs: 1234,
}
j.Configuration.Load.Clustering = &bq.Clustering{
Fields: []string{"cfield1"},
}
j.Configuration.Load.DestinationEncryptionConfiguration = &bq.EncryptionConfiguration{KmsKeyName: "keyName"}
j.JobReference = &bq.JobReference{
JobId: "ajob",
ProjectId: "client-project-id",
}
j.Configuration.Load.SchemaUpdateOptions = []string{"ALLOW_FIELD_ADDITION"}
return j
}(),
},
{
dst: c.Dataset("dataset-id").Table("table-id"),
src: func() *GCSReference {
g := NewGCSReference("uri")
g.MaxBadRecords = 1
g.AllowJaggedRows = true
g.AllowQuotedNewlines = true
g.IgnoreUnknownValues = true
return g
}(),
want: func() *bq.Job {
j := defaultLoadJob()
j.Configuration.Load.MaxBadRecords = 1
j.Configuration.Load.AllowJaggedRows = true
j.Configuration.Load.AllowQuotedNewlines = true
j.Configuration.Load.IgnoreUnknownValues = true
return j
}(),
},
{
dst: c.Dataset("dataset-id").Table("table-id"),
src: func() *GCSReference {
g := NewGCSReference("uri")
g.Schema = Schema{
stringFieldSchema(),
nestedFieldSchema(),
}
return g
}(),
want: func() *bq.Job {
j := defaultLoadJob()
j.Configuration.Load.Schema = &bq.TableSchema{
Fields: []*bq.TableFieldSchema{
bqStringFieldSchema(),
bqNestedFieldSchema(),
}}
return j
}(),
},
{
dst: c.Dataset("dataset-id").Table("table-id"),
src: func() *GCSReference {
g := NewGCSReference("uri")
g.SkipLeadingRows = 1
g.SourceFormat = JSON
g.Encoding = UTF_8
g.FieldDelimiter = "\t"
g.Quote = "-"
return g
}(),
want: func() *bq.Job {
j := defaultLoadJob()
j.Configuration.Load.SkipLeadingRows = 1
j.Configuration.Load.SourceFormat = "NEWLINE_DELIMITED_JSON"
j.Configuration.Load.Encoding = "UTF-8"
j.Configuration.Load.FieldDelimiter = "\t"
hyphen := "-"
j.Configuration.Load.Quote = &hyphen
return j
}(),
},
{
dst: c.Dataset("dataset-id").Table("table-id"),
src: NewGCSReference("uri"),
want: func() *bq.Job {
j := defaultLoadJob()
// Quote is left unset in GCSReference, so should be nil here.
j.Configuration.Load.Quote = nil
return j
}(),
},
{
dst: c.Dataset("dataset-id").Table("table-id"),
src: func() *GCSReference {
g := NewGCSReference("uri")
g.ForceZeroQuote = true
return g
}(),
want: func() *bq.Job {
j := defaultLoadJob()
empty := ""
j.Configuration.Load.Quote = &empty
return j
}(),
},
{
dst: c.Dataset("dataset-id").Table("table-id"),
src: func() *ReaderSource {
r := NewReaderSource(strings.NewReader("foo"))
r.SkipLeadingRows = 1
r.SourceFormat = JSON
r.Encoding = UTF_8
r.FieldDelimiter = "\t"
r.Quote = "-"
return r
}(),
want: func() *bq.Job {
j := defaultLoadJob()
j.Configuration.Load.SourceUris = nil
j.Configuration.Load.SkipLeadingRows = 1
j.Configuration.Load.SourceFormat = "NEWLINE_DELIMITED_JSON"
j.Configuration.Load.Encoding = "UTF-8"
j.Configuration.Load.FieldDelimiter = "\t"
hyphen := "-"
j.Configuration.Load.Quote = &hyphen
return j
}(),
},
{
dst: c.Dataset("dataset-id").Table("table-id"),
src: func() *GCSReference {
g := NewGCSReference("uri")
g.SourceFormat = Avro
return g
}(),
config: LoadConfig{
UseAvroLogicalTypes: true,
},
want: func() *bq.Job {
j := defaultLoadJob()
j.Configuration.Load.SourceFormat = "AVRO"
j.Configuration.Load.UseAvroLogicalTypes = true
return j
}(),
},
{
dst: c.Dataset("dataset-id").Table("table-id"),
src: func() *ReaderSource {
r := NewReaderSource(strings.NewReader("foo"))
r.SourceFormat = Avro
return r
}(),
config: LoadConfig{
UseAvroLogicalTypes: true,
},
want: func() *bq.Job {
j := defaultLoadJob()
j.Configuration.Load.SourceUris = nil
j.Configuration.Load.SourceFormat = "AVRO"
j.Configuration.Load.UseAvroLogicalTypes = true
return j
}(),
},
}

for i, tc := range testCases {
loader := tc.dst.LoaderFrom(tc.src)
loader.JobID = tc.jobID
loader.Location = tc.location
tc.config.Src = tc.src
tc.config.Dst = tc.dst
loader.LoadConfig = tc.config
got, _ := loader.newJob()
checkJob(t, i, got, tc.want)

jc, err := bqToJobConfig(got.Configuration, c)
if err != nil {
t.Fatalf("#%d: %v", i, err)
}
diff := testutil.Diff(jc.(*LoadConfig), &loader.LoadConfig,
cmp.AllowUnexported(Table{}, Client{}),
cmpopts.IgnoreUnexported(ReaderSource{}))
if diff != "" {
t.Errorf("#%d: (got=-, want=+:\n%s", i, diff)
}
}
}

+ 0
- 348
vendor/cloud.google.com/go/bigquery/nulls.go View File

@@ -1,348 +0,0 @@
// Copyright 2015 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package bigquery

import (
"bytes"
"encoding/json"
"fmt"
"reflect"
"strconv"
"time"

"cloud.google.com/go/civil"
)

// NullInt64 represents a BigQuery INT64 that may be NULL.
type NullInt64 struct {
Int64 int64
Valid bool // Valid is true if Int64 is not NULL.
}

func (n NullInt64) String() string { return nullstr(n.Valid, n.Int64) }

// NullString represents a BigQuery STRING that may be NULL.
type NullString struct {
StringVal string
Valid bool // Valid is true if StringVal is not NULL.
}

func (n NullString) String() string { return nullstr(n.Valid, n.StringVal) }

// NullGeography represents a BigQuery GEOGRAPHY string that may be NULL.
type NullGeography struct {
GeographyVal string
Valid bool // Valid is true if GeographyVal is not NULL.
}

func (n NullGeography) String() string { return nullstr(n.Valid, n.GeographyVal) }

// NullFloat64 represents a BigQuery FLOAT64 that may be NULL.
type NullFloat64 struct {
Float64 float64
Valid bool // Valid is true if Float64 is not NULL.
}

func (n NullFloat64) String() string { return nullstr(n.Valid, n.Float64) }

// NullBool represents a BigQuery BOOL that may be NULL.
type NullBool struct {
Bool bool
Valid bool // Valid is true if Bool is not NULL.
}

func (n NullBool) String() string { return nullstr(n.Valid, n.Bool) }

// NullTimestamp represents a BigQuery TIMESTAMP that may be null.
type NullTimestamp struct {
Timestamp time.Time
Valid bool // Valid is true if Time is not NULL.
}

func (n NullTimestamp) String() string { return nullstr(n.Valid, n.Timestamp) }

// NullDate represents a BigQuery DATE that may be null.
type NullDate struct {
Date civil.Date
Valid bool // Valid is true if Date is not NULL.
}

func (n NullDate) String() string { return nullstr(n.Valid, n.Date) }

// NullTime represents a BigQuery TIME that may be null.
type NullTime struct {
Time civil.Time
Valid bool // Valid is true if Time is not NULL.
}

func (n NullTime) String() string {
if !n.Valid {
return "<null>"
}
return CivilTimeString(n.Time)
}

// NullDateTime represents a BigQuery DATETIME that may be null.
type NullDateTime struct {
DateTime civil.DateTime
Valid bool // Valid is true if DateTime is not NULL.
}

func (n NullDateTime) String() string {
if !n.Valid {
return "<null>"
}
return CivilDateTimeString(n.DateTime)
}

// MarshalJSON converts the NullInt64 to JSON.
func (n NullInt64) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Int64) }

// MarshalJSON converts the NullFloat64 to JSON.
func (n NullFloat64) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Float64) }

// MarshalJSON converts the NullBool to JSON.
func (n NullBool) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Bool) }

// MarshalJSON converts the NullString to JSON.
func (n NullString) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.StringVal) }

// MarshalJSON converts the NullGeography to JSON.
func (n NullGeography) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.GeographyVal) }

// MarshalJSON converts the NullTimestamp to JSON.
func (n NullTimestamp) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Timestamp) }

// MarshalJSON converts the NullDate to JSON.
func (n NullDate) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Date) }

// MarshalJSON converts the NullTime to JSON.
func (n NullTime) MarshalJSON() ([]byte, error) {
if !n.Valid {
return jsonNull, nil
}
return []byte(`"` + CivilTimeString(n.Time) + `"`), nil
}

// MarshalJSON converts the NullDateTime to JSON.
func (n NullDateTime) MarshalJSON() ([]byte, error) {
if !n.Valid {
return jsonNull, nil
}
return []byte(`"` + CivilDateTimeString(n.DateTime) + `"`), nil
}

func nullstr(valid bool, v interface{}) string {
if !valid {
return "NULL"
}
return fmt.Sprint(v)
}

var jsonNull = []byte("null")

func nulljson(valid bool, v interface{}) ([]byte, error) {
if !valid {
return jsonNull, nil
}
return json.Marshal(v)
}

// UnmarshalJSON converts JSON into a NullInt64.
func (n *NullInt64) UnmarshalJSON(b []byte) error {
n.Valid = false
n.Int64 = 0
if bytes.Equal(b, jsonNull) {
return nil
}

if err := json.Unmarshal(b, &n.Int64); err != nil {
return err
}
n.Valid = true
return nil
}

// UnmarshalJSON converts JSON into a NullFloat64.
func (n *NullFloat64) UnmarshalJSON(b []byte) error {
n.Valid = false
n.Float64 = 0
if bytes.Equal(b, jsonNull) {
return nil
}

if err := json.Unmarshal(b, &n.Float64); err != nil {
return err
}
n.Valid = true
return nil
}

// UnmarshalJSON converts JSON into a NullBool.
func (n *NullBool) UnmarshalJSON(b []byte) error {
n.Valid = false
n.Bool = false
if bytes.Equal(b, jsonNull) {
return nil
}

if err := json.Unmarshal(b, &n.Bool); err != nil {
return err
}
n.Valid = true
return nil
}

// UnmarshalJSON converts JSON into a NullString.
func (n *NullString) UnmarshalJSON(b []byte) error {
n.Valid = false
n.StringVal = ""
if bytes.Equal(b, jsonNull) {
return nil
}

if err := json.Unmarshal(b, &n.StringVal); err != nil {
return err
}
n.Valid = true
return nil
}

// UnmarshalJSON converts JSON into a NullGeography.
func (n *NullGeography) UnmarshalJSON(b []byte) error {
n.Valid = false
n.GeographyVal = ""
if bytes.Equal(b, jsonNull) {
return nil
}
if err := json.Unmarshal(b, &n.GeographyVal); err != nil {
return err
}
n.Valid = true
return nil
}

// UnmarshalJSON converts JSON into a NullTimestamp.
func (n *NullTimestamp) UnmarshalJSON(b []byte) error {
n.Valid = false
n.Timestamp = time.Time{}
if bytes.Equal(b, jsonNull) {
return nil
}

if err := json.Unmarshal(b, &n.Timestamp); err != nil {
return err
}
n.Valid = true
return nil
}

// UnmarshalJSON converts JSON into a NullDate.
func (n *NullDate) UnmarshalJSON(b []byte) error {
n.Valid = false
n.Date = civil.Date{}
if bytes.Equal(b, jsonNull) {
return nil
}

if err := json.Unmarshal(b, &n.Date); err != nil {
return err
}
n.Valid = true
return nil
}

// UnmarshalJSON converts JSON into a NullTime.
func (n *NullTime) UnmarshalJSON(b []byte) error {
n.Valid = false
n.Time = civil.Time{}
if bytes.Equal(b, jsonNull) {
return nil
}

s, err := strconv.Unquote(string(b))
if err != nil {
return err
}

t, err := civil.ParseTime(s)
if err != nil {
return err
}
n.Time = t

n.Valid = true
return nil
}

// UnmarshalJSON converts JSON into a NullDateTime.
func (n *NullDateTime) UnmarshalJSON(b []byte) error {
n.Valid = false
n.DateTime = civil.DateTime{}
if bytes.Equal(b, jsonNull) {
return nil
}

s, err := strconv.Unquote(string(b))
if err != nil {
return err
}

dt, err := parseCivilDateTime(s)
if err != nil {
return err
}
n.DateTime = dt

n.Valid = true
return nil
}

var (
typeOfNullInt64 = reflect.TypeOf(NullInt64{})
typeOfNullFloat64 = reflect.TypeOf(NullFloat64{})
typeOfNullBool = reflect.TypeOf(NullBool{})
typeOfNullString = reflect.TypeOf(NullString{})
typeOfNullGeography = reflect.TypeOf(NullGeography{})
typeOfNullTimestamp = reflect.TypeOf(NullTimestamp{})
typeOfNullDate = reflect.TypeOf(NullDate{})
typeOfNullTime = reflect.TypeOf(NullTime{})
typeOfNullDateTime = reflect.TypeOf(NullDateTime{})
)

func nullableFieldType(t reflect.Type) FieldType {
switch t {
case typeOfNullInt64:
return IntegerFieldType
case typeOfNullFloat64:
return FloatFieldType
case typeOfNullBool:
return BooleanFieldType
case typeOfNullString:
return StringFieldType
case typeOfNullGeography:
return GeographyFieldType
case typeOfNullTimestamp:
return TimestampFieldType
case typeOfNullDate:
return DateFieldType
case typeOfNullTime:
return TimeFieldType
case typeOfNullDateTime:
return DateTimeFieldType
default:
return ""
}
}

+ 0
- 75
vendor/cloud.google.com/go/bigquery/nulls_test.go View File

@@ -1,75 +0,0 @@
// Copyright 2015 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package bigquery

import (
"encoding/json"
"reflect"
"testing"

"cloud.google.com/go/civil"
"cloud.google.com/go/internal/testutil"
)

var (
nullsTestTime = civil.Time{Hour: 7, Minute: 50, Second: 22, Nanosecond: 1000}
nullsTestDateTime = civil.DateTime{Date: civil.Date{Year: 2016, Month: 11, Day: 5}, Time: nullsTestTime}
)

func TestNullsJSON(t *testing.T) {
for _, test := range []struct {
in interface{}
want string
}{
{&NullInt64{Valid: true, Int64: 3}, `3`},
{&NullFloat64{Valid: true, Float64: 3.14}, `3.14`},
{&NullBool{Valid: true, Bool: true}, `true`},
{&NullString{Valid: true, StringVal: "foo"}, `"foo"`},
{&NullGeography{Valid: true, GeographyVal: "ST_GEOPOINT(47.649154, -122.350220)"}, `"ST_GEOPOINT(47.649154, -122.350220)"`},
{&NullTimestamp{Valid: true, Timestamp: testTimestamp}, `"2016-11-05T07:50:22.000000008Z"`},
{&NullDate{Valid: true, Date: testDate}, `"2016-11-05"`},
{&NullTime{Valid: true, Time: nullsTestTime}, `"07:50:22.000001"`},
{&NullDateTime{Valid: true, DateTime: nullsTestDateTime}, `"2016-11-05 07:50:22.000001"`},

{&NullInt64{}, `null`},
{&NullFloat64{}, `null`},
{&NullBool{}, `null`},
{&NullString{}, `null`},
{&NullGeography{}, `null`},
{&NullTimestamp{}, `null`},
{&NullDate{}, `null`},
{&NullTime{}, `null`},
{&NullDateTime{}, `null`},
} {
bytes, err := json.Marshal(test.in)
if err != nil {
t.Fatal(err)
}
if got, want := string(bytes), test.want; got != want {
t.Errorf("%#v: got %s, want %s", test.in, got, want)
}

typ := reflect.Indirect(reflect.ValueOf(test.in)).Type()
value := reflect.New(typ).Interface()
err = json.Unmarshal(bytes, value)
if err != nil {
t.Fatal(err)
}

if !testutil.Equal(value, test.in) {
t.Errorf("%#v: got %#v, want %#v", test.in, value, test.in)
}
}
}

+ 0
- 38
vendor/cloud.google.com/go/bigquery/oc_test.go View File

@@ -1,38 +0,0 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package bigquery

import (
"context"
"testing"

"cloud.google.com/go/internal/testutil"
)

func TestOCTracing(t *testing.T) {
ctx := context.Background()
client := getClient(t)
defer client.Close()

te := testutil.NewTestExporter()
defer te.Unregister()

q := client.Query("select *")
q.Run(ctx) // Doesn't matter if we get an error; span should be created either way

if len(te.Spans) == 0 {
t.Fatalf("Expected some spans to be created, but got %d", 0)
}
}

+ 0
- 370
vendor/cloud.google.com/go/bigquery/params.go View File

@@ -1,370 +0,0 @@
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package bigquery

import (
"encoding/base64"
"errors"
"fmt"
"math/big"
"reflect"
"regexp"
"time"

"cloud.google.com/go/civil"
"cloud.google.com/go/internal/fields"
bq "google.golang.org/api/bigquery/v2"
)

var (
// See https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#timestamp-type.
timestampFormat = "2006-01-02 15:04:05.999999-07:00"

// See https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#schema.fields.name
validFieldName = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]{0,127}$")
)

const nullableTagOption = "nullable"

func bqTagParser(t reflect.StructTag) (name string, keep bool, other interface{}, err error) {
name, keep, opts, err := fields.ParseStandardTag("bigquery", t)
if err != nil {
return "", false, nil, err
}
if name != "" && !validFieldName.MatchString(name) {
return "", false, nil, invalidFieldNameError(name)
}
for _, opt := range opts {
if opt != nullableTagOption {
return "", false, nil, fmt.Errorf(
"bigquery: invalid tag option %q. The only valid option is %q",
opt, nullableTagOption)
}
}
return name, keep, opts, nil
}

type invalidFieldNameError string

func (e invalidFieldNameError) Error() string {
return fmt.Sprintf("bigquery: invalid name %q of field in struct", string(e))
}

var fieldCache = fields.NewCache(bqTagParser, nil, nil)

var (
int64ParamType = &bq.QueryParameterType{Type: "INT64"}
float64ParamType = &bq.QueryParameterType{Type: "FLOAT64"}
boolParamType = &bq.QueryParameterType{Type: "BOOL"}
stringParamType = &bq.QueryParameterType{Type: "STRING"}
bytesParamType = &bq.QueryParameterType{Type: "BYTES"}
dateParamType = &bq.QueryParameterType{Type: "DATE"}
timeParamType = &bq.QueryParameterType{Type: "TIME"}
dateTimeParamType = &bq.QueryParameterType{Type: "DATETIME"}
timestampParamType = &bq.QueryParameterType{Type: "TIMESTAMP"}
numericParamType = &bq.QueryParameterType{Type: "NUMERIC"}
)

var (
typeOfDate = reflect.TypeOf(civil.Date{})
typeOfTime = reflect.TypeOf(civil.Time{})
typeOfDateTime = reflect.TypeOf(civil.DateTime{})
typeOfGoTime = reflect.TypeOf(time.Time{})
typeOfRat = reflect.TypeOf(&big.Rat{})
)

// A QueryParameter is a parameter to a query.
type QueryParameter struct {
// Name is used for named parameter mode.
// It must match the name in the query case-insensitively.
Name string

// Value is the value of the parameter.
//
// When you create a QueryParameter to send to BigQuery, the following Go types
// are supported, with their corresponding Bigquery types:
// int, int8, int16, int32, int64, uint8, uint16, uint32: INT64
// Note that uint, uint64 and uintptr are not supported, because
// they may contain values that cannot fit into a 64-bit signed integer.
// float32, float64: FLOAT64
// bool: BOOL
// string: STRING
// []byte: BYTES
// time.Time: TIMESTAMP
// *big.Rat: NUMERIC
// Arrays and slices of the above.
// Structs of the above. Only the exported fields are used.
//
// BigQuery does not support params of type GEOGRAPHY. For users wishing
// to parameterize Geography values, use string parameters and cast in the
// SQL query, e.g. `SELECT ST_GeogFromText(@string_param) as geo`
//
// When a QueryParameter is returned inside a QueryConfig from a call to
// Job.Config:
// Integers are of type int64.
// Floating-point values are of type float64.
// Arrays are of type []interface{}, regardless of the array element type.
// Structs are of type map[string]interface{}.
Value interface{}
}

func (p QueryParameter) toBQ() (*bq.QueryParameter, error) {
pv, err := paramValue(reflect.ValueOf(p.Value))
if err != nil {
return nil, err
}
pt, err := paramType(reflect.TypeOf(p.Value))
if err != nil {
return nil, err
}
return &bq.QueryParameter{
Name: p.Name,
ParameterValue: &pv,
ParameterType: pt,
}, nil
}

func paramType(t reflect.Type) (*bq.QueryParameterType, error) {
if t == nil {
return nil, errors.New("bigquery: nil parameter")
}
switch t {
case typeOfDate:
return dateParamType, nil
case typeOfTime:
return timeParamType, nil
case typeOfDateTime:
return dateTimeParamType, nil
case typeOfGoTime:
return timestampParamType, nil
case typeOfRat:
return numericParamType, nil
}
switch t.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint8, reflect.Uint16, reflect.Uint32:
return int64ParamType, nil

case reflect.Float32, reflect.Float64:
return float64ParamType, nil

case reflect.Bool:
return boolParamType, nil

case reflect.String:
return stringParamType, nil

case reflect.Slice:
if t.Elem().Kind() == reflect.Uint8 {
return bytesParamType, nil
}
fallthrough

case reflect.Array:
et, err := paramType(t.Elem())
if err != nil {
return nil, err
}
return &bq.QueryParameterType{Type: "ARRAY", ArrayType: et}, nil

case reflect.Ptr:
if t.Elem().Kind() != reflect.Struct {
break
}
t = t.Elem()
fallthrough

case reflect.Struct:
var fts []*bq.QueryParameterTypeStructTypes
fields, err := fieldCache.Fields(t)
if err != nil {
return nil, err
}
for _, f := range fields {
pt, err := paramType(f.Type)
if err != nil {
return nil, err
}
fts = append(fts, &bq.QueryParameterTypeStructTypes{
Name: f.Name,
Type: pt,
})
}
return &bq.QueryParameterType{Type: "STRUCT", StructTypes: fts}, nil
}
return nil, fmt.Errorf("bigquery: Go type %s cannot be represented as a parameter type", t)
}

func paramValue(v reflect.Value) (bq.QueryParameterValue, error) {
var res bq.QueryParameterValue
if !v.IsValid() {
return res, errors.New("bigquery: nil parameter")
}
t := v.Type()
switch t {
case typeOfDate:
res.Value = v.Interface().(civil.Date).String()
return res, nil

case typeOfTime:
// civil.Time has nanosecond resolution, but BigQuery TIME only microsecond.
// (If we send nanoseconds, then when we try to read the result we get "query job
// missing destination table").
res.Value = CivilTimeString(v.Interface().(civil.Time))
return res, nil

case typeOfDateTime:
res.Value = CivilDateTimeString(v.Interface().(civil.DateTime))
return res, nil

case typeOfGoTime:
res.Value = v.Interface().(time.Time).Format(timestampFormat)
return res, nil

case typeOfRat:
res.Value = NumericString(v.Interface().(*big.Rat))
return res, nil
}
switch t.Kind() {
case reflect.Slice:
if t.Elem().Kind() == reflect.Uint8 {
res.Value = base64.StdEncoding.EncodeToString(v.Interface().([]byte))
return res, nil
}
fallthrough

case reflect.Array:
var vals []*bq.QueryParameterValue
for i := 0; i < v.Len(); i++ {
val, err := paramValue(v.Index(i))
if err != nil {
return bq.QueryParameterValue{}, err
}
vals = append(vals, &val)
}
return bq.QueryParameterValue{ArrayValues: vals}, nil

case reflect.Ptr:
if t.Elem().Kind() != reflect.Struct {
return res, fmt.Errorf("bigquery: Go type %s cannot be represented as a parameter value", t)
}
t = t.Elem()
v = v.Elem()
if !v.IsValid() {
// nil pointer becomes empty value
return res, nil
}
fallthrough

case reflect.Struct:
fields, err := fieldCache.Fields(t)
if err != nil {
return bq.QueryParameterValue{}, err
}
res.StructValues = map[string]bq.QueryParameterValue{}
for _, f := range fields {
fv := v.FieldByIndex(f.Index)
fp, err := paramValue(fv)
if err != nil {
return bq.QueryParameterValue{}, err
}
res.StructValues[f.Name] = fp
}
return res, nil
}
// None of the above: assume a scalar type. (If it's not a valid type,
// paramType will catch the error.)
res.Value = fmt.Sprint(v.Interface())
// Ensure empty string values are sent.
if res.Value == "" {
res.ForceSendFields = append(res.ForceSendFields, "Value")
}
return res, nil
}

func bqToQueryParameter(q *bq.QueryParameter) (QueryParameter, error) {
p := QueryParameter{Name: q.Name}
val, err := convertParamValue(q.ParameterValue, q.ParameterType)
if err != nil {
return QueryParameter{}, err
}
p.Value = val
return p, nil
}

var paramTypeToFieldType = map[string]FieldType{
int64ParamType.Type: IntegerFieldType,
float64ParamType.Type: FloatFieldType,
boolParamType.Type: BooleanFieldType,
stringParamType.Type: StringFieldType,
bytesParamType.Type: BytesFieldType,
dateParamType.Type: DateFieldType,
timeParamType.Type: TimeFieldType,
numericParamType.Type: NumericFieldType,
}

// Convert a parameter value from the service to a Go value. This is similar to, but
// not quite the same as, converting data values.
func convertParamValue(qval *bq.QueryParameterValue, qtype *bq.QueryParameterType) (interface{}, error) {
switch qtype.Type {
case "ARRAY":
if qval == nil {
return []interface{}(nil), nil
}
return convertParamArray(qval.ArrayValues, qtype.ArrayType)
case "STRUCT":
if qval == nil {
return map[string]interface{}(nil), nil
}
return convertParamStruct(qval.StructValues, qtype.StructTypes)
case "TIMESTAMP":
return time.Parse(timestampFormat, qval.Value)
case "DATETIME":
return parseCivilDateTime(qval.Value)
default:
return convertBasicType(qval.Value, paramTypeToFieldType[qtype.Type])
}
}

// convertParamArray converts a query parameter array value to a Go value. It
// always returns a []interface{}.
func convertParamArray(elVals []*bq.QueryParameterValue, elType *bq.QueryParameterType) ([]interface{}, error) {
var vals []interface{}
for _, el := range elVals {
val, err := convertParamValue(el, elType)
if err != nil {
return nil, err
}
vals = append(vals, val)
}
return vals, nil
}

// convertParamStruct converts a query parameter struct value into a Go value. It
// always returns a map[string]interface{}.
func convertParamStruct(sVals map[string]bq.QueryParameterValue, sTypes []*bq.QueryParameterTypeStructTypes) (map[string]interface{}, error) {
vals := map[string]interface{}{}
for _, st := range sTypes {
if sv, ok := sVals[st.Name]; ok {
val, err := convertParamValue(&sv, st.Type)
if err != nil {
return nil, err
}
vals[st.Name] = val
} else {
vals[st.Name] = nil
}
}
return vals, nil
}

+ 0
- 385
vendor/cloud.google.com/go/bigquery/params_test.go View File

@@ -1,385 +0,0 @@
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package bigquery

import (
"context"
"errors"
"math"
"math/big"
"reflect"
"testing"
"time"

"cloud.google.com/go/civil"
"cloud.google.com/go/internal/testutil"
"github.com/google/go-cmp/cmp"
bq "google.golang.org/api/bigquery/v2"
)

var scalarTests = []struct {
val interface{} // The Go value
wantVal string // paramValue's desired output
wantType *bq.QueryParameterType // paramType's desired output
}{
{int64(0), "0", int64ParamType},
{3.14, "3.14", float64ParamType},
{3.14159e-87, "3.14159e-87", float64ParamType},
{true, "true", boolParamType},
{"string", "string", stringParamType},
{"\u65e5\u672c\u8a9e\n", "\u65e5\u672c\u8a9e\n", stringParamType},
{math.NaN(), "NaN", float64ParamType},
{[]byte("foo"), "Zm9v", bytesParamType}, // base64 encoding of "foo"
{time.Date(2016, 3, 20, 4, 22, 9, 5000, time.FixedZone("neg1-2", -3720)),
"2016-03-20 04:22:09.000005-01:02",
timestampParamType},
{civil.Date{Year: 2016, Month: 3, Day: 20}, "2016-03-20", dateParamType},
{civil.Time{Hour: 4, Minute: 5, Second: 6, Nanosecond: 789000000}, "04:05:06.789000", timeParamType},
{civil.DateTime{Date: civil.Date{Year: 2016, Month: 3, Day: 20}, Time: civil.Time{Hour: 4, Minute: 5, Second: 6, Nanosecond: 789000000}},
"2016-03-20 04:05:06.789000",
dateTimeParamType},
{big.NewRat(12345, 1000), "12.345000000", numericParamType},
}

type (
S1 struct {
A int
B *S2
C bool
}
S2 struct {
D string
}
)

var (
s1 = S1{
A: 1,
B: &S2{D: "s"},
C: true,
}

s1ParamType = &bq.QueryParameterType{
Type: "STRUCT",
StructTypes: []*bq.QueryParameterTypeStructTypes{
{Name: "A", Type: int64ParamType},
{Name: "B", Type: &bq.QueryParameterType{
Type: "STRUCT",
StructTypes: []*bq.QueryParameterTypeStructTypes{
{Name: "D", Type: stringParamType},
},
}},
{Name: "C", Type: boolParamType},
},
}

s1ParamValue = bq.QueryParameterValue{
StructValues: map[string]bq.QueryParameterValue{
"A": sval("1"),
"B": {
StructValues: map[string]bq.QueryParameterValue{
"D": sval("s"),
},
},
"C": sval("true"),
},
}

s1ParamReturnValue = map[string]interface{}{
"A": int64(1),
"B": map[string]interface{}{"D": "s"},
"C": true,
}
)

func sval(s string) bq.QueryParameterValue {
return bq.QueryParameterValue{Value: s}
}

func TestParamValueScalar(t *testing.T) {
for _, test := range scalarTests {
got, err := paramValue(reflect.ValueOf(test.val))
if err != nil {
t.Errorf("%v: got %v, want nil", test.val, err)
continue
}
want := sval(test.wantVal)
if !testutil.Equal(got, want) {
t.Errorf("%v:\ngot %+v\nwant %+v", test.val, got, want)
}
}
}

func TestParamValueArray(t *testing.T) {
qpv := bq.QueryParameterValue{ArrayValues: []*bq.QueryParameterValue{
{Value: "1"},
{Value: "2"},
},
}
for _, test := range []struct {
val interface{}
want bq.QueryParameterValue
}{
{[]int(nil), bq.QueryParameterValue{}},
{[]int{}, bq.QueryParameterValue{}},
{[]int{1, 2}, qpv},
{[2]int{1, 2}, qpv},
} {
got, err := paramValue(reflect.ValueOf(test.val))
if err != nil {
t.Fatal(err)
}
if !testutil.Equal(got, test.want) {
t.Errorf("%#v:\ngot %+v\nwant %+v", test.val, got, test.want)
}
}
}

func TestParamValueStruct(t *testing.T) {
got, err := paramValue(reflect.ValueOf(s1))
if err != nil {
t.Fatal(err)
}
if !testutil.Equal(got, s1ParamValue) {
t.Errorf("got %+v\nwant %+v", got, s1ParamValue)
}
}

func TestParamValueErrors(t *testing.T) {
// paramValue lets a few invalid types through, but paramType catches them.
// Since we never call one without the other that's fine.
for _, val := range []interface{}{nil, new([]int)} {
_, err := paramValue(reflect.ValueOf(val))
if err == nil {
t.Errorf("%v (%T): got nil, want error", val, val)
}
}
}

func TestParamType(t *testing.T) {
for _, test := range scalarTests {
got, err := paramType(reflect.TypeOf(test.val))
if err != nil {
t.Fatal(err)
}
if !testutil.Equal(got, test.wantType) {
t.Errorf("%v (%T): got %v, want %v", test.val, test.val, got, test.wantType)
}
}
for _, test := range []struct {
val interface{}
want *bq.QueryParameterType
}{
{uint32(32767), int64ParamType},
{[]byte("foo"), bytesParamType},
{[]int{}, &bq.QueryParameterType{Type: "ARRAY", ArrayType: int64ParamType}},
{[3]bool{}, &bq.QueryParameterType{Type: "ARRAY", ArrayType: boolParamType}},
{S1{}, s1ParamType},
} {
got, err := paramType(reflect.TypeOf(test.val))
if err != nil {
t.Fatal(err)
}
if !testutil.Equal(got, test.want) {
t.Errorf("%v (%T): got %v, want %v", test.val, test.val, got, test.want)
}
}
}

func TestParamTypeErrors(t *testing.T) {
for _, val := range []interface{}{
nil, uint(0), new([]int), make(chan int),
} {
_, err := paramType(reflect.TypeOf(val))
if err == nil {
t.Errorf("%v (%T): got nil, want error", val, val)
}
}
}

func TestConvertParamValue(t *testing.T) {
// Scalars.
for _, test := range scalarTests {
pval, err := paramValue(reflect.ValueOf(test.val))
if err != nil {
t.Fatal(err)
}
ptype, err := paramType(reflect.TypeOf(test.val))
if err != nil {
t.Fatal(err)
}
got, err := convertParamValue(&pval, ptype)
if err != nil {
t.Fatalf("convertParamValue(%+v, %+v): %v", pval, ptype, err)
}
if !testutil.Equal(got, test.val) {
t.Errorf("%#v: got %#v", test.val, got)
}
}
// Arrays.
for _, test := range []struct {
pval *bq.QueryParameterValue
want []interface{}
}{
{
&bq.QueryParameterValue{},
nil,
},
{
&bq.QueryParameterValue{
ArrayValues: []*bq.QueryParameterValue{{Value: "1"}, {Value: "2"}},
},
[]interface{}{int64(1), int64(2)},
},
} {
ptype := &bq.QueryParameterType{Type: "ARRAY", ArrayType: int64ParamType}
got, err := convertParamValue(test.pval, ptype)
if err != nil {
t.Fatalf("%+v: %v", test.pval, err)
}
if !testutil.Equal(got, test.want) {
t.Errorf("%+v: got %+v, want %+v", test.pval, got, test.want)
}
}
// Structs.
got, err := convertParamValue(&s1ParamValue, s1ParamType)
if err != nil {
t.Fatal(err)
}
if !testutil.Equal(got, s1ParamReturnValue) {
t.Errorf("got %+v, want %+v", got, s1ParamReturnValue)
}
}

func TestIntegration_ScalarParam(t *testing.T) {
roundToMicros := cmp.Transformer("RoundToMicros",
func(t time.Time) time.Time { return t.Round(time.Microsecond) })
c := getClient(t)
for _, test := range scalarTests {
gotData, gotParam, err := paramRoundTrip(c, test.val)
if err != nil {
t.Fatal(err)
}
if !testutil.Equal(gotData, test.val, roundToMicros) {
t.Errorf("\ngot %#v (%T)\nwant %#v (%T)", gotData, gotData, test.val, test.val)
}
if !testutil.Equal(gotParam, test.val, roundToMicros) {
t.Errorf("\ngot %#v (%T)\nwant %#v (%T)", gotParam, gotParam, test.val, test.val)
}
}
}

func TestIntegration_OtherParam(t *testing.T) {
c := getClient(t)
for _, test := range []struct {
val interface{}
wantData interface{}
wantParam interface{}
}{
{[]int(nil), []Value(nil), []interface{}(nil)},
{[]int{}, []Value(nil), []interface{}(nil)},
{
[]int{1, 2},
[]Value{int64(1), int64(2)},
[]interface{}{int64(1), int64(2)},
},
{
[3]int{1, 2, 3},
[]Value{int64(1), int64(2), int64(3)},
[]interface{}{int64(1), int64(2), int64(3)},
},
{
S1{},
[]Value{int64(0), nil, false},
map[string]interface{}{
"A": int64(0),
"B": nil,
"C": false,
},
},
{
s1,
[]Value{int64(1), []Value{"s"}, true},
s1ParamReturnValue,
},
} {
gotData, gotParam, err := paramRoundTrip(c, test.val)
if err != nil {
t.Fatal(err)
}
if !testutil.Equal(gotData, test.wantData) {
t.Errorf("%#v:\ngot %#v (%T)\nwant %#v (%T)",
test.val, gotData, gotData, test.wantData, test.wantData)
}
if !testutil.Equal(gotParam, test.wantParam) {
t.Errorf("%#v:\ngot %#v (%T)\nwant %#v (%T)",
test.val, gotParam, gotParam, test.wantParam, test.wantParam)
}
}
}

// paramRoundTrip passes x as a query parameter to BigQuery. It returns
// the resulting data value from running the query and the parameter value from
// the returned job configuration.
func paramRoundTrip(c *Client, x interface{}) (data Value, param interface{}, err error) {
ctx := context.Background()
q := c.Query("select ?")
q.Parameters = []QueryParameter{{Value: x}}
job, err := q.Run(ctx)
if err != nil {
return nil, nil, err
}
it, err := job.Read(ctx)
if err != nil {
return nil, nil, err
}
var val []Value
err = it.Next(&val)
if err != nil {
return nil, nil, err
}
if len(val) != 1 {
return nil, nil, errors.New("wrong number of values")
}
conf, err := job.Config()
if err != nil {
return nil, nil, err
}
return val[0], conf.(*QueryConfig).Parameters[0].Value, nil
}

func TestQueryParameter_toBQ(t *testing.T) {
tests := []struct {
in QueryParameter
want []string
}{
{
in: QueryParameter{Name: "name", Value: ""},
want: []string{"Value"},
},
}

for _, test := range tests {
q, err := test.in.toBQ()
if err != nil {
t.Fatalf("expected no error, got %v", err)
}

got := q.ParameterValue.ForceSendFields
if !cmp.Equal(test.want, got) {
t.Fatalf("want %v, got %v", test.want, got)
}
}
}

+ 0
- 328
vendor/cloud.google.com/go/bigquery/query.go View File

@@ -1,328 +0,0 @@
// Copyright 2015 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package bigquery

import (
"context"
"errors"

"cloud.google.com/go/internal/trace"
bq "google.golang.org/api/bigquery/v2"
)

// QueryConfig holds the configuration for a query job.
type QueryConfig struct {
// Dst is the table into which the results of the query will be written.
// If this field is nil, a temporary table will be created.
Dst *Table

// The query to execute. See https://cloud.google.com/bigquery/query-reference for details.
Q string

// DefaultProjectID and DefaultDatasetID specify the dataset to use for unqualified table names in the query.
// If DefaultProjectID is set, DefaultDatasetID must also be set.
DefaultProjectID string
DefaultDatasetID string

// TableDefinitions describes data sources outside of BigQuery.
// The map keys may be used as table names in the query string.
//
// When a QueryConfig is returned from Job.Config, the map values
// are always of type *ExternalDataConfig.
TableDefinitions map[string]ExternalData

// CreateDisposition specifies the circumstances under which the destination table will be created.
// The default is CreateIfNeeded.
CreateDisposition TableCreateDisposition

// WriteDisposition specifies how existing data in the destination table is treated.
// The default is WriteEmpty.
WriteDisposition TableWriteDisposition

// DisableQueryCache prevents results being fetched from the query cache.
// If this field is false, results are fetched from the cache if they are available.
// The query cache is a best-effort cache that is flushed whenever tables in the query are modified.
// Cached results are only available when TableID is unspecified in the query's destination Table.
// For more information, see https://cloud.google.com/bigquery/querying-data#querycaching
DisableQueryCache bool

// DisableFlattenedResults prevents results being flattened.
// If this field is false, results from nested and repeated fields are flattened.
// DisableFlattenedResults implies AllowLargeResults
// For more information, see https://cloud.google.com/bigquery/docs/data#nested
DisableFlattenedResults bool

// AllowLargeResults allows the query to produce arbitrarily large result tables.
// The destination must be a table.
// When using this option, queries will take longer to execute, even if the result set is small.
// For additional limitations, see https://cloud.google.com/bigquery/querying-data#largequeryresults
AllowLargeResults bool

// Priority specifies the priority with which to schedule the query.
// The default priority is InteractivePriority.
// For more information, see https://cloud.google.com/bigquery/querying-data#batchqueries
Priority QueryPriority

// MaxBillingTier sets the maximum billing tier for a Query.
// Queries that have resource usage beyond this tier will fail (without
// incurring a charge). If this field is zero, the project default will be used.
MaxBillingTier int

// MaxBytesBilled limits the number of bytes billed for
// this job. Queries that would exceed this limit will fail (without incurring
// a charge).
// If this field is less than 1, the project default will be
// used.
MaxBytesBilled int64

// UseStandardSQL causes the query to use standard SQL. The default.
// Deprecated: use UseLegacySQL.
UseStandardSQL bool

// UseLegacySQL causes the query to use legacy SQL.
UseLegacySQL bool

// Parameters is a list of query parameters. The presence of parameters
// implies the use of standard SQL.
// If the query uses positional syntax ("?"), then no parameter may have a name.
// If the query uses named syntax ("@p"), then all parameters must have names.
// It is illegal to mix positional and named syntax.
Parameters []QueryParameter

// TimePartitioning specifies time-based partitioning
// for the destination table.
TimePartitioning *TimePartitioning

// Clustering specifies the data clustering configuration for the destination table.
Clustering *Clustering

// The labels associated with this job.
Labels map[string]string

// If true, don't actually run this job. A valid query will return a mostly
// empty response with some processing statistics, while an invalid query will
// return the same error it would if it wasn't a dry run.
//
// Query.Read will fail with dry-run queries. Call Query.Run instead, and then
// call LastStatus on the returned job to get statistics. Calling Status on a
// dry-run job will fail.
DryRun bool

// Custom encryption configuration (e.g., Cloud KMS keys).
DestinationEncryptionConfig *EncryptionConfig

// Allows the schema of the destination table to be updated as a side effect of
// the query job.
SchemaUpdateOptions []string
}

func (qc *QueryConfig) toBQ() (*bq.JobConfiguration, error) {
qconf := &bq.JobConfigurationQuery{
Query: qc.Q,
CreateDisposition: string(qc.CreateDisposition),
WriteDisposition: string(qc.WriteDisposition),
AllowLargeResults: qc.AllowLargeResults,
Priority: string(qc.Priority),
MaximumBytesBilled: qc.MaxBytesBilled,
TimePartitioning: qc.TimePartitioning.toBQ(),
Clustering: qc.Clustering.toBQ(),
DestinationEncryptionConfiguration: qc.DestinationEncryptionConfig.toBQ(),
SchemaUpdateOptions: qc.SchemaUpdateOptions,
}
if len(qc.TableDefinitions) > 0 {
qconf.TableDefinitions = make(map[string]bq.ExternalDataConfiguration)
}
for name, data := range qc.TableDefinitions {
qconf.TableDefinitions[name] = data.toBQ()
}
if qc.DefaultProjectID != "" || qc.DefaultDatasetID != "" {
qconf.DefaultDataset = &bq.DatasetReference{
DatasetId: qc.DefaultDatasetID,
ProjectId: qc.DefaultProjectID,
}
}
if tier := int64(qc.MaxBillingTier); tier > 0 {
qconf.MaximumBillingTier = &tier
}
f := false
if qc.DisableQueryCache {
qconf.UseQueryCache = &f
}
if qc.DisableFlattenedResults {
qconf.FlattenResults = &f
// DisableFlattenResults implies AllowLargeResults.
qconf.AllowLargeResults = true
}
if qc.UseStandardSQL && qc.UseLegacySQL {
return nil, errors.New("bigquery: cannot provide both UseStandardSQL and UseLegacySQL")
}
if len(qc.Parameters) > 0 && qc.UseLegacySQL {
return nil, errors.New("bigquery: cannot provide both Parameters (implying standard SQL) and UseLegacySQL")
}
ptrue := true
pfalse := false
if qc.UseLegacySQL {
qconf.UseLegacySql = &ptrue
} else {
qconf.UseLegacySql = &pfalse
}
if qc.Dst != nil && !qc.Dst.implicitTable() {
qconf.DestinationTable = qc.Dst.toBQ()
}
for _, p := range qc.Parameters {
qp, err := p.toBQ()
if err != nil {
return nil, err
}
qconf.QueryParameters = append(qconf.QueryParameters, qp)
}
return &bq.JobConfiguration{
Labels: qc.Labels,
DryRun: qc.DryRun,
Query: qconf,
}, nil
}

func bqToQueryConfig(q *bq.JobConfiguration, c *Client) (*QueryConfig, error) {
qq := q.Query
qc := &QueryConfig{
Labels: q.Labels,
DryRun: q.DryRun,
Q: qq.Query,
CreateDisposition: TableCreateDisposition(qq.CreateDisposition),
WriteDisposition: TableWriteDisposition(qq.WriteDisposition),
AllowLargeResults: qq.AllowLargeResults,
Priority: QueryPriority(qq.Priority),
MaxBytesBilled: qq.MaximumBytesBilled,
UseLegacySQL: qq.UseLegacySql == nil || *qq.UseLegacySql,
TimePartitioning: bqToTimePartitioning(qq.TimePartitioning),
Clustering: bqToClustering(qq.Clustering),
DestinationEncryptionConfig: bqToEncryptionConfig(qq.DestinationEncryptionConfiguration),
SchemaUpdateOptions: qq.SchemaUpdateOptions,
}
qc.UseStandardSQL = !qc.UseLegacySQL

if len(qq.TableDefinitions) > 0 {
qc.TableDefinitions = make(map[string]ExternalData)
}
for name, qedc := range qq.TableDefinitions {
edc, err := bqToExternalDataConfig(&qedc)
if err != nil {
return nil, err
}
qc.TableDefinitions[name] = edc
}
if qq.DefaultDataset != nil {
qc.DefaultProjectID = qq.DefaultDataset.ProjectId
qc.DefaultDatasetID = qq.DefaultDataset.DatasetId
}
if qq.MaximumBillingTier != nil {
qc.MaxBillingTier = int(*qq.MaximumBillingTier)
}
if qq.UseQueryCache != nil && !*qq.UseQueryCache {
qc.DisableQueryCache = true
}
if qq.FlattenResults != nil && !*qq.FlattenResults {
qc.DisableFlattenedResults = true
}
if qq.DestinationTable != nil {
qc.Dst = bqToTable(qq.DestinationTable, c)
}
for _, qp := range qq.QueryParameters {
p, err := bqToQueryParameter(qp)
if err != nil {
return nil, err
}
qc.Parameters = append(qc.Parameters, p)
}
return qc, nil
}

// QueryPriority specifies a priority with which a query is to be executed.
type QueryPriority string

const (
// BatchPriority specifies that the query should be scheduled with the
// batch priority. BigQuery queues each batch query on your behalf, and
// starts the query as soon as idle resources are available, usually within
// a few minutes. If BigQuery hasn't started the query within 24 hours,
// BigQuery changes the job priority to interactive. Batch queries don't
// count towards your concurrent rate limit, which can make it easier to
// start many queries at once.
//
// More information can be found at https://cloud.google.com/bigquery/docs/running-queries#batchqueries.
BatchPriority QueryPriority = "BATCH"
// InteractivePriority specifies that the query should be scheduled with
// interactive priority, which means that the query is executed as soon as
// possible. Interactive queries count towards your concurrent rate limit
// and your daily limit. It is the default priority with which queries get
// executed.
//
// More information can be found at https://cloud.google.com/bigquery/docs/running-queries#queries.
InteractivePriority QueryPriority = "INTERACTIVE"
)

// A Query queries data from a BigQuery table. Use Client.Query to create a Query.
type Query struct {
JobIDConfig
QueryConfig
client *Client
}

// Query creates a query with string q.
// The returned Query may optionally be further configured before its Run method is called.
func (c *Client) Query(q string) *Query {
return &Query{
client: c,
QueryConfig: QueryConfig{Q: q},
}
}

// Run initiates a query job.
func (q *Query) Run(ctx context.Context) (j *Job, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Query.Run")
defer func() { trace.EndSpan(ctx, err) }()

job, err := q.newJob()
if err != nil {
return nil, err
}
j, err = q.client.insertJob(ctx, job, nil)
if err != nil {
return nil, err
}
return j, nil
}

func (q *Query) newJob() (*bq.Job, error) {
config, err := q.QueryConfig.toBQ()
if err != nil {
return nil, err
}
return &bq.Job{
JobReference: q.JobIDConfig.createJobRef(q.client),
Configuration: config,
}, nil
}

// Read submits a query for execution and returns the results via a RowIterator.
// It is a shorthand for Query.Run followed by Job.Read.
func (q *Query) Read(ctx context.Context) (*RowIterator, error) {
job, err := q.Run(ctx)
if err != nil {
return nil, err
}
return job.Read(ctx)
}

+ 0
- 408
vendor/cloud.google.com/go/bigquery/query_test.go View File

@@ -1,408 +0,0 @@
// Copyright 2015 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package bigquery

import (
"testing"
"time"

"cloud.google.com/go/internal/testutil"
"github.com/google/go-cmp/cmp"
bq "google.golang.org/api/bigquery/v2"
)

func defaultQueryJob() *bq.Job {
pfalse := false
return &bq.Job{
JobReference: &bq.JobReference{JobId: "RANDOM", ProjectId: "client-project-id"},
Configuration: &bq.JobConfiguration{
Query: &bq.JobConfigurationQuery{
DestinationTable: &bq.TableReference{
ProjectId: "client-project-id",
DatasetId: "dataset-id",
TableId: "table-id",
},
Query: "query string",
DefaultDataset: &bq.DatasetReference{
ProjectId: "def-project-id",
DatasetId: "def-dataset-id",
},
UseLegacySql: &pfalse,
},
},
}
}

var defaultQuery = &QueryConfig{
Q: "query string",
DefaultProjectID: "def-project-id",
DefaultDatasetID: "def-dataset-id",
}

func TestQuery(t *testing.T) {
defer fixRandomID("RANDOM")()
c := &Client{
projectID: "client-project-id",
}
testCases := []struct {
dst *Table
src *QueryConfig
jobIDConfig JobIDConfig
want *bq.Job
}{
{
dst: c.Dataset("dataset-id").Table("table-id"),
src: defaultQuery,
want: defaultQueryJob(),
},
{
dst: c.Dataset("dataset-id").Table("table-id"),
src: &QueryConfig{
Q: "query string",
Labels: map[string]string{"a": "b"},
DryRun: true,
},
want: func() *bq.Job {
j := defaultQueryJob()
j.Configuration.Labels = map[string]string{"a": "b"}
j.Configuration.DryRun = true
j.Configuration.Query.DefaultDataset = nil
return j
}(),
},
{
dst: c.Dataset("dataset-id").Table("table-id"),
jobIDConfig: JobIDConfig{JobID: "jobID", AddJobIDSuffix: true},
src: &QueryConfig{Q: "query string"},
want: func() *bq.Job {
j := defaultQueryJob()
j.Configuration.Query.DefaultDataset = nil
j.JobReference.JobId = "jobID-RANDOM"
return j
}(),
},
{
dst: &Table{},
src: defaultQuery,
want: func() *bq.Job {
j := defaultQueryJob()
j.Configuration.Query.DestinationTable = nil
return j
}(),
},
{
dst: c.Dataset("dataset-id").Table("table-id"),
src: &QueryConfig{
Q: "query string",
TableDefinitions: map[string]ExternalData{
"atable": func() *GCSReference {
g := NewGCSReference("uri")
g.AllowJaggedRows = true
g.AllowQuotedNewlines = true
g.Compression = Gzip
g.Encoding = UTF_8
g.FieldDelimiter = ";"
g.IgnoreUnknownValues = true
g.MaxBadRecords = 1
g.Quote = "'"
g.SkipLeadingRows = 2
g.Schema = Schema{{Name: "name", Type: StringFieldType}}
return g
}(),
},
},
want: func() *bq.Job {
j := defaultQueryJob()
j.Configuration.Query.DefaultDataset = nil
td := make(map[string]bq.ExternalDataConfiguration)
quote := "'"
td["atable"] = bq.ExternalDataConfiguration{
Compression: "GZIP",
IgnoreUnknownValues: true,
MaxBadRecords: 1,
SourceFormat: "CSV", // must be explicitly set.
SourceUris: []string{"uri"},
CsvOptions: &bq.CsvOptions{
AllowJaggedRows: true,
AllowQuotedNewlines: true,
Encoding: "UTF-8",
FieldDelimiter: ";",
SkipLeadingRows: 2,
Quote: &quote,
},
Schema: &bq.TableSchema{
Fields: []*bq.TableFieldSchema{
{Name: "name", Type: "STRING"},
},
},
}
j.Configuration.Query.TableDefinitions = td
return j
}(),
},
{
dst: &Table{
ProjectID: "project-id",
DatasetID: "dataset-id",
TableID: "table-id",
},
src: &QueryConfig{
Q: "query string",
DefaultProjectID: "def-project-id",
DefaultDatasetID: "def-dataset-id",
CreateDisposition: CreateNever,
WriteDisposition: WriteTruncate,
},
want: func() *bq.Job {
j := defaultQueryJob()
j.Configuration.Query.DestinationTable.ProjectId = "project-id"
j.Configuration.Query.WriteDisposition = "WRITE_TRUNCATE"
j.Configuration.Query.CreateDisposition = "CREATE_NEVER"
return j
}(),
},
{
dst: c.Dataset("dataset-id").Table("table-id"),
src: &QueryConfig{
Q: "query string",
DefaultProjectID: "def-project-id",
DefaultDatasetID: "def-dataset-id",
DisableQueryCache: true,
},
want: func() *bq.Job {
j := defaultQueryJob()
f := false
j.Configuration.Query.UseQueryCache = &f
return j
}(),
},
{
dst: c.Dataset("dataset-id").Table("table-id"),
src: &QueryConfig{
Q: "query string",
DefaultProjectID: "def-project-id",
DefaultDatasetID: "def-dataset-id",
AllowLargeResults: true,
},
want: func() *bq.Job {
j := defaultQueryJob()
j.Configuration.Query.AllowLargeResults = true
return j
}(),
},
{
dst: c.Dataset("dataset-id").Table("table-id"),
src: &QueryConfig{
Q: "query string",
DefaultProjectID: "def-project-id",
DefaultDatasetID: "def-dataset-id",
DisableFlattenedResults: true,
},
want: func() *bq.Job {
j := defaultQueryJob()
f := false
j.Configuration.Query.FlattenResults = &f
j.Configuration.Query.AllowLargeResults = true
return j
}(),
},
{
dst: c.Dataset("dataset-id").Table("table-id"),
src: &QueryConfig{
Q: "query string",
DefaultProjectID: "def-project-id",
DefaultDatasetID: "def-dataset-id",
Priority: QueryPriority("low"),
},
want: func() *bq.Job {
j := defaultQueryJob()
j.Configuration.Query.Priority = "low"
return j
}(),
},
{
dst: c.Dataset("dataset-id").Table("table-id"),
src: &QueryConfig{
Q: "query string",
DefaultProjectID: "def-project-id",
DefaultDatasetID: "def-dataset-id",
MaxBillingTier: 3,
MaxBytesBilled: 5,
},
want: func() *bq.Job {
j := defaultQueryJob()
tier := int64(3)
j.Configuration.Query.MaximumBillingTier = &tier
j.Configuration.Query.MaximumBytesBilled = 5
return j
}(),
},
{
dst: c.Dataset("dataset-id").Table("table-id"),
src: &QueryConfig{
Q: "query string",
DefaultProjectID: "def-project-id",
DefaultDatasetID: "def-dataset-id",
UseStandardSQL: true,
},
want: defaultQueryJob(),
},
{
dst: c.Dataset("dataset-id").Table("table-id"),
src: &QueryConfig{
Q: "query string",
DefaultProjectID: "def-project-id",
DefaultDatasetID: "def-dataset-id",
UseLegacySQL: true,
},
want: func() *bq.Job {
j := defaultQueryJob()
ptrue := true
j.Configuration.Query.UseLegacySql = &ptrue
j.Configuration.Query.ForceSendFields = nil
return j
}(),
},
}
for i, tc := range testCases {
query := c.Query("")
query.JobIDConfig = tc.jobIDConfig
query.QueryConfig = *tc.src
query.Dst = tc.dst
got, err := query.newJob()
if err != nil {
t.Errorf("#%d: err calling query: %v", i, err)
continue
}
checkJob(t, i, got, tc.want)

// Round-trip.
jc, err := bqToJobConfig(got.Configuration, c)
if err != nil {
t.Fatalf("#%d: %v", i, err)
}
wantConfig := query.QueryConfig
// We set AllowLargeResults to true when DisableFlattenedResults is true.
if wantConfig.DisableFlattenedResults {
wantConfig.AllowLargeResults = true
}
// A QueryConfig with neither UseXXXSQL field set is equivalent
// to one where UseStandardSQL = true.
if !wantConfig.UseLegacySQL && !wantConfig.UseStandardSQL {
wantConfig.UseStandardSQL = true
}
// Treat nil and empty tables the same, and ignore the client.
tableEqual := func(t1, t2 *Table) bool {
if t1 == nil {
t1 = &Table{}
}
if t2 == nil {
t2 = &Table{}
}
return t1.ProjectID == t2.ProjectID && t1.DatasetID == t2.DatasetID && t1.TableID == t2.TableID
}
// A table definition that is a GCSReference round-trips as an ExternalDataConfig.
// TODO(jba): see if there is a way to express this with a transformer.
gcsRefToEDC := func(g *GCSReference) *ExternalDataConfig {
q := g.toBQ()
e, _ := bqToExternalDataConfig(&q)
return e
}
externalDataEqual := func(e1, e2 ExternalData) bool {
if r, ok := e1.(*GCSReference); ok {
e1 = gcsRefToEDC(r)
}
if r, ok := e2.(*GCSReference); ok {
e2 = gcsRefToEDC(r)
}
return cmp.Equal(e1, e2)
}
diff := testutil.Diff(jc.(*QueryConfig), &wantConfig,
cmp.Comparer(tableEqual),
cmp.Comparer(externalDataEqual),
)
if diff != "" {
t.Errorf("#%d: (got=-, want=+:\n%s", i, diff)
}
}
}

func TestConfiguringQuery(t *testing.T) {
c := &Client{
projectID: "project-id",
}

query := c.Query("q")
query.JobID = "ajob"
query.DefaultProjectID = "def-project-id"
query.DefaultDatasetID = "def-dataset-id"
query.TimePartitioning = &TimePartitioning{Expiration: 1234 * time.Second, Field: "f"}
query.Clustering = &Clustering{
Fields: []string{"cfield1"},
}
query.DestinationEncryptionConfig = &EncryptionConfig{KMSKeyName: "keyName"}
query.SchemaUpdateOptions = []string{"ALLOW_FIELD_ADDITION"}

// Note: Other configuration fields are tested in other tests above.
// A lot of that can be consolidated once Client.Copy is gone.

pfalse := false
want := &bq.Job{
Configuration: &bq.JobConfiguration{
Query: &bq.JobConfigurationQuery{
Query: "q",
DefaultDataset: &bq.DatasetReference{
ProjectId: "def-project-id",
DatasetId: "def-dataset-id",
},
UseLegacySql: &pfalse,
TimePartitioning: &bq.TimePartitioning{ExpirationMs: 1234000, Field: "f", Type: "DAY"},
Clustering: &bq.Clustering{Fields: []string{"cfield1"}},
DestinationEncryptionConfiguration: &bq.EncryptionConfiguration{KmsKeyName: "keyName"},
SchemaUpdateOptions: []string{"ALLOW_FIELD_ADDITION"},
},
},
JobReference: &bq.JobReference{
JobId: "ajob",
ProjectId: "project-id",
},
}

got, err := query.newJob()
if err != nil {
t.Fatalf("err calling Query.newJob: %v", err)
}
if diff := testutil.Diff(got, want); diff != "" {
t.Errorf("querying: -got +want:\n%s", diff)
}
}

func TestQueryLegacySQL(t *testing.T) {
c := &Client{projectID: "project-id"}
q := c.Query("q")
q.UseStandardSQL = true
q.UseLegacySQL = true
_, err := q.newJob()
if err == nil {
t.Error("UseStandardSQL and UseLegacySQL: got nil, want error")
}
q = c.Query("q")
q.Parameters = []QueryParameter{{Name: "p", Value: 3}}
q.UseLegacySQL = true
_, err = q.newJob()
if err == nil {
t.Error("Parameters and UseLegacySQL: got nil, want error")
}
}

+ 0
- 56
vendor/cloud.google.com/go/bigquery/random.go View File

@@ -1,56 +0,0 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package bigquery

import (
"math/rand"
"os"
"sync"
"time"
)

// Support for random values (typically job IDs and insert IDs).

const alphanum = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"

var (
rngMu sync.Mutex
rng = rand.New(rand.NewSource(time.Now().UnixNano() ^ int64(os.Getpid())))
)

// For testing.
var randomIDFn = randomID

// As of August 2017, the BigQuery service uses 27 alphanumeric characters for
// suffixes.
const randomIDLen = 27

func randomID() string {
// This is used for both job IDs and insert IDs.
var b [randomIDLen]byte
rngMu.Lock()
for i := 0; i < len(b); i++ {
b[i] = alphanum[rng.Intn(len(alphanum))]
}
rngMu.Unlock()
return string(b[:])
}

// Seed seeds this package's random number generator, used for generating job and
// insert IDs. Use Seed to obtain repeatable, deterministic behavior from bigquery
// clients. Seed should be called before any clients are created.
func Seed(s int64) {
rng = rand.New(rand.NewSource(s))
}

+ 0
- 233
vendor/cloud.google.com/go/bigquery/read_test.go View File

@@ -1,233 +0,0 @@
// Copyright 2015 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package bigquery

import (
"context"
"errors"
"testing"

"cloud.google.com/go/internal/testutil"
"github.com/google/go-cmp/cmp"
bq "google.golang.org/api/bigquery/v2"
"google.golang.org/api/iterator"
)

type pageFetcherArgs struct {
table *Table
schema Schema
startIndex uint64
pageSize int64
pageToken string
}

// pageFetcherReadStub services read requests by returning data from an in-memory list of values.
type pageFetcherReadStub struct {
// values and pageTokens are used as sources of data to return in response to calls to readTabledata or readQuery.
values [][][]Value // contains pages / rows / columns.
pageTokens map[string]string // maps incoming page token to returned page token.

// arguments are recorded for later inspection.
calls []pageFetcherArgs
}

func (s *pageFetcherReadStub) fetchPage(ctx context.Context, t *Table, schema Schema, startIndex uint64, pageSize int64, pageToken string) (*fetchPageResult, error) {
s.calls = append(s.calls,
pageFetcherArgs{t, schema, startIndex, pageSize, pageToken})
result := &fetchPageResult{
pageToken: s.pageTokens[pageToken],
rows: s.values[0],
}
s.values = s.values[1:]
return result, nil
}

func waitForQueryStub(context.Context, string) (Schema, uint64, error) {
return nil, 1, nil
}

func TestRead(t *testing.T) {
// The data for the service stub to return is populated for each test case in the testCases for loop.
ctx := context.Background()
c := &Client{projectID: "project-id"}
pf := &pageFetcherReadStub{}
queryJob := &Job{
projectID: "project-id",
jobID: "job-id",
c: c,
config: &bq.JobConfiguration{
Query: &bq.JobConfigurationQuery{
DestinationTable: &bq.TableReference{
ProjectId: "project-id",
DatasetId: "dataset-id",
TableId: "table-id",
},
},
},
}

for _, readFunc := range []func() *RowIterator{
func() *RowIterator {
return c.Dataset("dataset-id").Table("table-id").read(ctx, pf.fetchPage)
},
func() *RowIterator {
it, err := queryJob.read(ctx, waitForQueryStub, pf.fetchPage)
if err != nil {
t.Fatal(err)
}
return it
},
} {
testCases := []struct {
data [][][]Value
pageTokens map[string]string
want [][]Value
}{
{
data: [][][]Value{{{1, 2}, {11, 12}}, {{30, 40}, {31, 41}}},
pageTokens: map[string]string{"": "a", "a": ""},
want: [][]Value{{1, 2}, {11, 12}, {30, 40}, {31, 41}},
},
{
data: [][][]Value{{{1, 2}, {11, 12}}, {{30, 40}, {31, 41}}},
pageTokens: map[string]string{"": ""}, // no more pages after first one.
want: [][]Value{{1, 2}, {11, 12}},
},
}
for _, tc := range testCases {
pf.values = tc.data
pf.pageTokens = tc.pageTokens
if got, ok := collectValues(t, readFunc()); ok {
if !testutil.Equal(got, tc.want) {
t.Errorf("reading: got:\n%v\nwant:\n%v", got, tc.want)
}
}
}
}
}

func collectValues(t *testing.T, it *RowIterator) ([][]Value, bool) {
var got [][]Value
for {
var vals []Value
err := it.Next(&vals)
if err == iterator.Done {
break
}
if err != nil {
t.Errorf("err calling Next: %v", err)
return nil, false
}
got = append(got, vals)
}
return got, true
}

func TestNoMoreValues(t *testing.T) {
c := &Client{projectID: "project-id"}
pf := &pageFetcherReadStub{
values: [][][]Value{{{1, 2}, {11, 12}}},
}
it := c.Dataset("dataset-id").Table("table-id").read(context.Background(), pf.fetchPage)
var vals []Value
// We expect to retrieve two values and then fail on the next attempt.
if err := it.Next(&vals); err != nil {
t.Fatalf("Next: got: %v: want: nil", err)
}
if err := it.Next(&vals); err != nil {
t.Fatalf("Next: got: %v: want: nil", err)
}
if err := it.Next(&vals); err != iterator.Done {
t.Fatalf("Next: got: %v: want: iterator.Done", err)
}
}

var errBang = errors.New("bang")

func errorFetchPage(context.Context, *Table, Schema, uint64, int64, string) (*fetchPageResult, error) {
return nil, errBang
}

func TestReadError(t *testing.T) {
// test that service read errors are propagated back to the caller.
c := &Client{projectID: "project-id"}
it := c.Dataset("dataset-id").Table("table-id").read(context.Background(), errorFetchPage)
var vals []Value
if err := it.Next(&vals); err != errBang {
t.Fatalf("Get: got: %v: want: %v", err, errBang)
}
}

func TestReadTabledataOptions(t *testing.T) {
// test that read options are propagated.
s := &pageFetcherReadStub{
values: [][][]Value{{{1, 2}}},
}
c := &Client{projectID: "project-id"}
tr := c.Dataset("dataset-id").Table("table-id")
it := tr.read(context.Background(), s.fetchPage)
it.PageInfo().MaxSize = 5
var vals []Value
if err := it.Next(&vals); err != nil {
t.Fatal(err)
}
want := []pageFetcherArgs{{
table: tr,
pageSize: 5,
pageToken: "",
}}
if diff := testutil.Diff(s.calls, want, cmp.AllowUnexported(pageFetcherArgs{}, pageFetcherReadStub{}, Table{}, Client{})); diff != "" {
t.Errorf("reading (got=-, want=+):\n%s", diff)
}
}

func TestReadQueryOptions(t *testing.T) {
// test that read options are propagated.
c := &Client{projectID: "project-id"}
pf := &pageFetcherReadStub{
values: [][][]Value{{{1, 2}}},
}
tr := &bq.TableReference{
ProjectId: "project-id",
DatasetId: "dataset-id",
TableId: "table-id",
}
queryJob := &Job{
projectID: "project-id",
jobID: "job-id",
c: c,
config: &bq.JobConfiguration{
Query: &bq.JobConfigurationQuery{DestinationTable: tr},
},
}
it, err := queryJob.read(context.Background(), waitForQueryStub, pf.fetchPage)
if err != nil {
t.Fatalf("err calling Read: %v", err)
}
it.PageInfo().MaxSize = 5
var vals []Value
if err := it.Next(&vals); err != nil {
t.Fatalf("Next: got: %v: want: nil", err)
}

want := []pageFetcherArgs{{
table: bqToTable(tr, c),
pageSize: 5,
pageToken: "",
}}
if !testutil.Equal(pf.calls, want, cmp.AllowUnexported(pageFetcherArgs{}, Table{}, Client{})) {
t.Errorf("reading: got:\n%v\nwant:\n%v", pf.calls, want)
}
}

+ 0
- 518
vendor/cloud.google.com/go/bigquery/schema.go View File

@@ -1,518 +0,0 @@
// Copyright 2015 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package bigquery

import (
"encoding/json"
"errors"
"fmt"
"reflect"
"sync"

bq "google.golang.org/api/bigquery/v2"
)

// Schema describes the fields in a table or query result.
type Schema []*FieldSchema

// FieldSchema describes a single field.
type FieldSchema struct {
// The field name.
// Must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_),
// and must start with a letter or underscore.
// The maximum length is 128 characters.
Name string

// A description of the field. The maximum length is 16,384 characters.
Description string

// Whether the field may contain multiple values.
Repeated bool
// Whether the field is required. Ignored if Repeated is true.
Required bool

// The field data type. If Type is Record, then this field contains a nested schema,
// which is described by Schema.
Type FieldType
// Describes the nested schema if Type is set to Record.
Schema Schema
}

func (fs *FieldSchema) toBQ() *bq.TableFieldSchema {
tfs := &bq.TableFieldSchema{
Description: fs.Description,
Name: fs.Name,
Type: string(fs.Type),
}

if fs.Repeated {
tfs.Mode = "REPEATED"
} else if fs.Required {
tfs.Mode = "REQUIRED"
} // else leave as default, which is interpreted as NULLABLE.

for _, f := range fs.Schema {
tfs.Fields = append(tfs.Fields, f.toBQ())
}

return tfs
}

func (s Schema) toBQ() *bq.TableSchema {
var fields []*bq.TableFieldSchema
for _, f := range s {
fields = append(fields, f.toBQ())
}
return &bq.TableSchema{Fields: fields}
}

func bqToFieldSchema(tfs *bq.TableFieldSchema) *FieldSchema {
fs := &FieldSchema{
Description: tfs.Description,
Name: tfs.Name,
Repeated: tfs.Mode == "REPEATED",
Required: tfs.Mode == "REQUIRED",
Type: FieldType(tfs.Type),
}

for _, f := range tfs.Fields {
fs.Schema = append(fs.Schema, bqToFieldSchema(f))
}
return fs
}

func bqToSchema(ts *bq.TableSchema) Schema {
if ts == nil {
return nil
}
var s Schema
for _, f := range ts.Fields {
s = append(s, bqToFieldSchema(f))
}
return s
}

// FieldType is the type of field.
type FieldType string

const (
// StringFieldType is a string field type.
StringFieldType FieldType = "STRING"
// BytesFieldType is a bytes field type.
BytesFieldType FieldType = "BYTES"
// IntegerFieldType is a integer field type.
IntegerFieldType FieldType = "INTEGER"
// FloatFieldType is a float field type.
FloatFieldType FieldType = "FLOAT"
// BooleanFieldType is a boolean field type.
BooleanFieldType FieldType = "BOOLEAN"
// TimestampFieldType is a timestamp field type.
TimestampFieldType FieldType = "TIMESTAMP"
// RecordFieldType is a record field type. It is typically used to create columns with repeated or nested data.
RecordFieldType FieldType = "RECORD"
// DateFieldType is a date field type.
DateFieldType FieldType = "DATE"
// TimeFieldType is a time field type.
TimeFieldType FieldType = "TIME"
// DateTimeFieldType is a datetime field type.
DateTimeFieldType FieldType = "DATETIME"
// NumericFieldType is a numeric field type. Numeric types include integer types, floating point types and the
// NUMERIC data type.
NumericFieldType FieldType = "NUMERIC"
// GeographyFieldType is a string field type. Geography types represent a set of points
// on the Earth's surface, represented in Well Known Text (WKT) format.
GeographyFieldType FieldType = "GEOGRAPHY"
)

var (
errEmptyJSONSchema = errors.New("bigquery: empty JSON schema")
fieldTypes = map[FieldType]bool{
StringFieldType: true,
BytesFieldType: true,
IntegerFieldType: true,
FloatFieldType: true,
BooleanFieldType: true,
TimestampFieldType: true,
RecordFieldType: true,
DateFieldType: true,
TimeFieldType: true,
DateTimeFieldType: true,
NumericFieldType: true,
GeographyFieldType: true,
}
)

var typeOfByteSlice = reflect.TypeOf([]byte{})

// InferSchema tries to derive a BigQuery schema from the supplied struct value.
// Each exported struct field is mapped to a field in the schema.
//
// The following BigQuery types are inferred from the corresponding Go types.
// (This is the same mapping as that used for RowIterator.Next.) Fields inferred
// from these types are marked required (non-nullable).
//
// STRING string
// BOOL bool
// INTEGER int, int8, int16, int32, int64, uint8, uint16, uint32
// FLOAT float32, float64
// BYTES []byte
// TIMESTAMP time.Time
// DATE civil.Date
// TIME civil.Time
// DATETIME civil.DateTime
// NUMERIC *big.Rat
//
// The big.Rat type supports numbers of arbitrary size and precision. Values
// will be rounded to 9 digits after the decimal point before being transmitted
// to BigQuery. See https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#numeric-type
// for more on NUMERIC.
//
// A Go slice or array type is inferred to be a BigQuery repeated field of the
// element type. The element type must be one of the above listed types.
//
// Due to lack of unique native Go type for GEOGRAPHY, there is no schema
// inference to GEOGRAPHY at this time.
//
// Nullable fields are inferred from the NullXXX types, declared in this package:
//
// STRING NullString
// BOOL NullBool
// INTEGER NullInt64
// FLOAT NullFloat64
// TIMESTAMP NullTimestamp
// DATE NullDate
// TIME NullTime
// DATETIME NullDateTime
// GEOGRAPHY NullGeography
//
// For a nullable BYTES field, use the type []byte and tag the field "nullable" (see below).
// For a nullable NUMERIC field, use the type *big.Rat and tag the field "nullable".
//
// A struct field that is of struct type is inferred to be a required field of type
// RECORD with a schema inferred recursively. For backwards compatibility, a field of
// type pointer to struct is also inferred to be required. To get a nullable RECORD
// field, use the "nullable" tag (see below).
//
// InferSchema returns an error if any of the examined fields is of type uint,
// uint64, uintptr, map, interface, complex64, complex128, func, or chan. Future
// versions may handle these cases without error.
//
// Recursively defined structs are also disallowed.
//
// Struct fields may be tagged in a way similar to the encoding/json package.
// A tag of the form
// bigquery:"name"
// uses "name" instead of the struct field name as the BigQuery field name.
// A tag of the form
// bigquery:"-"
// omits the field from the inferred schema.
// The "nullable" option marks the field as nullable (not required). It is only
// needed for []byte, *big.Rat and pointer-to-struct fields, and cannot appear on other
// fields. In this example, the Go name of the field is retained:
// bigquery:",nullable"
func InferSchema(st interface{}) (Schema, error) {
return inferSchemaReflectCached(reflect.TypeOf(st))
}

var schemaCache sync.Map

type cacheVal struct {
schema Schema
err error
}

func inferSchemaReflectCached(t reflect.Type) (Schema, error) {
var cv cacheVal
v, ok := schemaCache.Load(t)
if ok {
cv = v.(cacheVal)
} else {
s, err := inferSchemaReflect(t)
cv = cacheVal{s, err}
schemaCache.Store(t, cv)
}
return cv.schema, cv.err
}

func inferSchemaReflect(t reflect.Type) (Schema, error) {
rec, err := hasRecursiveType(t, nil)
if err != nil {
return nil, err
}
if rec {
return nil, fmt.Errorf("bigquery: schema inference for recursive type %s", t)
}
return inferStruct(t)
}

func inferStruct(t reflect.Type) (Schema, error) {
switch t.Kind() {
case reflect.Ptr:
if t.Elem().Kind() != reflect.Struct {
return nil, noStructError{t}
}
t = t.Elem()
fallthrough

case reflect.Struct:
return inferFields(t)
default:
return nil, noStructError{t}
}
}

// inferFieldSchema infers the FieldSchema for a Go type
func inferFieldSchema(fieldName string, rt reflect.Type, nullable bool) (*FieldSchema, error) {
// Only []byte and struct pointers can be tagged nullable.
if nullable && !(rt == typeOfByteSlice || rt.Kind() == reflect.Ptr && rt.Elem().Kind() == reflect.Struct) {
return nil, badNullableError{fieldName, rt}
}
switch rt {
case typeOfByteSlice:
return &FieldSchema{Required: !nullable, Type: BytesFieldType}, nil
case typeOfGoTime:
return &FieldSchema{Required: true, Type: TimestampFieldType}, nil
case typeOfDate:
return &FieldSchema{Required: true, Type: DateFieldType}, nil
case typeOfTime:
return &FieldSchema{Required: true, Type: TimeFieldType}, nil
case typeOfDateTime:
return &FieldSchema{Required: true, Type: DateTimeFieldType}, nil
case typeOfRat:
return &FieldSchema{Required: !nullable, Type: NumericFieldType}, nil
}
if ft := nullableFieldType(rt); ft != "" {
return &FieldSchema{Required: false, Type: ft}, nil
}
if isSupportedIntType(rt) || isSupportedUintType(rt) {
return &FieldSchema{Required: true, Type: IntegerFieldType}, nil
}
switch rt.Kind() {
case reflect.Slice, reflect.Array:
et := rt.Elem()
if et != typeOfByteSlice && (et.Kind() == reflect.Slice || et.Kind() == reflect.Array) {
// Multi dimensional slices/arrays are not supported by BigQuery
return nil, unsupportedFieldTypeError{fieldName, rt}
}
if nullableFieldType(et) != "" {
// Repeated nullable types are not supported by BigQuery.
return nil, unsupportedFieldTypeError{fieldName, rt}
}
f, err := inferFieldSchema(fieldName, et, false)
if err != nil {
return nil, err
}
f.Repeated = true
f.Required = false
return f, nil
case reflect.Ptr:
if rt.Elem().Kind() != reflect.Struct {
return nil, unsupportedFieldTypeError{fieldName, rt}
}
fallthrough
case reflect.Struct:
nested, err := inferStruct(rt)
if err != nil {
return nil, err
}
return &FieldSchema{Required: !nullable, Type: RecordFieldType, Schema: nested}, nil
case reflect.String:
return &FieldSchema{Required: !nullable, Type: StringFieldType}, nil
case reflect.Bool:
return &FieldSchema{Required: !nullable, Type: BooleanFieldType}, nil
case reflect.Float32, reflect.Float64:
return &FieldSchema{Required: !nullable, Type: FloatFieldType}, nil
default:
return nil, unsupportedFieldTypeError{fieldName, rt}
}
}

// inferFields extracts all exported field types from struct type.
func inferFields(rt reflect.Type) (Schema, error) {
var s Schema
fields, err := fieldCache.Fields(rt)
if err != nil {
return nil, err
}
for _, field := range fields {
var nullable bool
for _, opt := range field.ParsedTag.([]string) {
if opt == nullableTagOption {
nullable = true
break
}
}
f, err := inferFieldSchema(field.Name, field.Type, nullable)
if err != nil {
return nil, err
}
f.Name = field.Name
s = append(s, f)
}
return s, nil
}

// isSupportedIntType reports whether t is an int type that can be properly
// represented by the BigQuery INTEGER/INT64 type.
func isSupportedIntType(t reflect.Type) bool {
switch t.Kind() {
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
return true
default:
return false
}
}

// isSupportedIntType reports whether t is a uint type that can be properly
// represented by the BigQuery INTEGER/INT64 type.
func isSupportedUintType(t reflect.Type) bool {
switch t.Kind() {
case reflect.Uint8, reflect.Uint16, reflect.Uint32:
return true
default:
return false
}
}

// typeList is a linked list of reflect.Types.
type typeList struct {
t reflect.Type
next *typeList
}

func (l *typeList) has(t reflect.Type) bool {
for l != nil {
if l.t == t {
return true
}
l = l.next
}
return false
}

// hasRecursiveType reports whether t or any type inside t refers to itself, directly or indirectly,
// via exported fields. (Schema inference ignores unexported fields.)
func hasRecursiveType(t reflect.Type, seen *typeList) (bool, error) {
for t.Kind() == reflect.Ptr || t.Kind() == reflect.Slice || t.Kind() == reflect.Array {
t = t.Elem()
}
if t.Kind() != reflect.Struct {
return false, nil
}
if seen.has(t) {
return true, nil
}
fields, err := fieldCache.Fields(t)
if err != nil {
return false, err
}
seen = &typeList{t, seen}
// Because seen is a linked list, additions to it from one field's
// recursive call will not affect the value for subsequent fields' calls.
for _, field := range fields {
ok, err := hasRecursiveType(field.Type, seen)
if err != nil {
return false, err
}
if ok {
return true, nil
}
}
return false, nil
}

// bigQuerySchemaJSONField is an individual field in a JSON BigQuery table schema definition
// (as generated by https://github.com/GoogleCloudPlatform/protoc-gen-bq-schema).
type bigQueryJSONField struct {
Description string `json:"description"`
Fields []bigQueryJSONField `json:"fields"`
Mode string `json:"mode"`
Name string `json:"name"`
Type string `json:"type"`
}

// convertSchemaFromJSON generates a Schema:
func convertSchemaFromJSON(fs []bigQueryJSONField) (Schema, error) {
convertedSchema := Schema{}
for _, f := range fs {
convertedFieldSchema := &FieldSchema{
Description: f.Description,
Name: f.Name,
Required: f.Mode == "REQUIRED",
Repeated: f.Mode == "REPEATED",
}
if len(f.Fields) > 0 {
convertedNestedFieldSchema, err := convertSchemaFromJSON(f.Fields)
if err != nil {
return nil, err
}
convertedFieldSchema.Schema = convertedNestedFieldSchema
}

// Check that the field-type (string) maps to a known FieldType:
if _, ok := fieldTypes[FieldType(f.Type)]; !ok {
return nil, fmt.Errorf("unknown field type (%v)", f.Type)
}
convertedFieldSchema.Type = FieldType(f.Type)

convertedSchema = append(convertedSchema, convertedFieldSchema)
}
return convertedSchema, nil
}

// SchemaFromJSON takes a JSON BigQuery table schema definition
// (as generated by https://github.com/GoogleCloudPlatform/protoc-gen-bq-schema)
// and returns a fully-populated Schema.
func SchemaFromJSON(schemaJSON []byte) (Schema, error) {

var bigQuerySchema []bigQueryJSONField

// Make sure we actually have some content:
if len(schemaJSON) == 0 {
return nil, errEmptyJSONSchema
}

if err := json.Unmarshal(schemaJSON, &bigQuerySchema); err != nil {
return nil, err
}

return convertSchemaFromJSON(bigQuerySchema)
}

type noStructError struct {
typ reflect.Type
}

func (e noStructError) Error() string {
return fmt.Sprintf("bigquery: can only infer schema from struct or pointer to struct, not %s", e.typ)
}

type badNullableError struct {
name string
typ reflect.Type
}

func (e badNullableError) Error() string {
return fmt.Sprintf(`bigquery: field %q of type %s: use "nullable" only for []byte and struct pointers; for all other types, use a NullXXX type`, e.name, e.typ)
}

type unsupportedFieldTypeError struct {
name string
typ reflect.Type
}

func (e unsupportedFieldTypeError) Error() string {
return fmt.Sprintf("bigquery: field %q: type %s is not supported", e.name, e.typ)
}

+ 0
- 1044
vendor/cloud.google.com/go/bigquery/schema_test.go
File diff suppressed because it is too large
View File


+ 0
- 255
vendor/cloud.google.com/go/bigquery/storage/apiv1beta1/big_query_storage_client.go View File

@@ -1,255 +0,0 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

// Code generated by gapic-generator. DO NOT EDIT.

package storage

import (
"context"
"fmt"
"time"

gax "github.com/googleapis/gax-go/v2"
"google.golang.org/api/option"
"google.golang.org/api/transport"
storagepb "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1beta1"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
)

// BigQueryStorageCallOptions contains the retry settings for each method of BigQueryStorageClient.
type BigQueryStorageCallOptions struct {
CreateReadSession []gax.CallOption
ReadRows []gax.CallOption
BatchCreateReadSessionStreams []gax.CallOption
FinalizeStream []gax.CallOption
SplitReadStream []gax.CallOption
}

func defaultBigQueryStorageClientOptions() []option.ClientOption {
return []option.ClientOption{
option.WithEndpoint("bigquerystorage.googleapis.com:443"),
option.WithScopes(DefaultAuthScopes()...),
}
}

func defaultBigQueryStorageCallOptions() *BigQueryStorageCallOptions {
retry := map[[2]string][]gax.CallOption{
{"default", "idempotent"}: {
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 100 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 1.3,
})
}),
},
}
return &BigQueryStorageCallOptions{
CreateReadSession: retry[[2]string{"default", "idempotent"}],
ReadRows: retry[[2]string{"default", "idempotent"}],
BatchCreateReadSessionStreams: retry[[2]string{"default", "idempotent"}],
FinalizeStream: retry[[2]string{"default", "idempotent"}],
SplitReadStream: retry[[2]string{"default", "idempotent"}],
}
}

// BigQueryStorageClient is a client for interacting with BigQuery Storage API.
//
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
type BigQueryStorageClient struct {
// The connection to the service.
conn *grpc.ClientConn

// The gRPC API client.
bigQueryStorageClient storagepb.BigQueryStorageClient

// The call options for this service.
CallOptions *BigQueryStorageCallOptions

// The x-goog-* metadata to be sent with each request.
xGoogMetadata metadata.MD
}

// NewBigQueryStorageClient creates a new big query storage client.
//
// BigQuery storage API.
//
// The BigQuery storage API can be used to read data stored in BigQuery.
func NewBigQueryStorageClient(ctx context.Context, opts ...option.ClientOption) (*BigQueryStorageClient, error) {
conn, err := transport.DialGRPC(ctx, append(defaultBigQueryStorageClientOptions(), opts...)...)
if err != nil {
return nil, err
}
c := &BigQueryStorageClient{
conn: conn,
CallOptions: defaultBigQueryStorageCallOptions(),

bigQueryStorageClient: storagepb.NewBigQueryStorageClient(conn),
}
c.setGoogleClientInfo()
return c, nil
}

// Connection returns the client's connection to the API service.
func (c *BigQueryStorageClient) Connection() *grpc.ClientConn {
return c.conn
}

// Close closes the connection to the API service. The user should invoke this when
// the client is no longer required.
func (c *BigQueryStorageClient) Close() error {
return c.conn.Close()
}

// setGoogleClientInfo sets the name and version of the application in
// the `x-goog-api-client` header passed on each request. Intended for
// use by Google-written clients.
func (c *BigQueryStorageClient) setGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", versionGo()}, keyval...)
kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version)
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
}

// CreateReadSession creates a new read session. A read session divides the contents of a
// BigQuery table into one or more streams, which can then be used to read
// data from the table. The read session also specifies properties of the
// data to be read, such as a list of columns or a push-down filter describing
// the rows to be returned.
//
// A particular row can be read by at most one stream. When the caller has
// reached the end of each stream in the session, then all the data in the
// table has been read.
//
// Read sessions automatically expire 24 hours after they are created and do
// not require manual clean-up by the caller.
func (c *BigQueryStorageClient) CreateReadSession(ctx context.Context, req *storagepb.CreateReadSessionRequest, opts ...gax.CallOption) (*storagepb.ReadSession, error) {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v&%s=%v", "table_reference.project_id", req.GetTableReference().GetProjectId(), "table_reference.dataset_id", req.GetTableReference().GetDatasetId()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.CreateReadSession[0:len(c.CallOptions.CreateReadSession):len(c.CallOptions.CreateReadSession)], opts...)
var resp *storagepb.ReadSession
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.bigQueryStorageClient.CreateReadSession(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}

// ReadRows reads rows from the table in the format prescribed by the read session.
// Each response contains one or more table rows, up to a maximum of 10 MiB
// per response; read requests which attempt to read individual rows larger
// than this will fail.
//
// Each request also returns a set of stream statistics reflecting the
// estimated total number of rows in the read stream. This number is computed
// based on the total table size and the number of active streams in the read
// session, and may change as other streams continue to read data.
func (c *BigQueryStorageClient) ReadRows(ctx context.Context, req *storagepb.ReadRowsRequest, opts ...gax.CallOption) (storagepb.BigQueryStorage_ReadRowsClient, error) {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "read_position.stream.name", req.GetReadPosition().GetStream().GetName()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.ReadRows[0:len(c.CallOptions.ReadRows):len(c.CallOptions.ReadRows)], opts...)
var resp storagepb.BigQueryStorage_ReadRowsClient
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.bigQueryStorageClient.ReadRows(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}

// BatchCreateReadSessionStreams creates additional streams for a ReadSession. This API can be used to
// dynamically adjust the parallelism of a batch processing task upwards by
// adding additional workers.
func (c *BigQueryStorageClient) BatchCreateReadSessionStreams(ctx context.Context, req *storagepb.BatchCreateReadSessionStreamsRequest, opts ...gax.CallOption) (*storagepb.BatchCreateReadSessionStreamsResponse, error) {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "session.name", req.GetSession().GetName()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.BatchCreateReadSessionStreams[0:len(c.CallOptions.BatchCreateReadSessionStreams):len(c.CallOptions.BatchCreateReadSessionStreams)], opts...)
var resp *storagepb.BatchCreateReadSessionStreamsResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.bigQueryStorageClient.BatchCreateReadSessionStreams(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}

// FinalizeStream triggers the graceful termination of a single stream in a ReadSession. This
// API can be used to dynamically adjust the parallelism of a batch processing
// task downwards without losing data.
//
// This API does not delete the stream -- it remains visible in the
// ReadSession, and any data processed by the stream is not released to other
// streams. However, no additional data will be assigned to the stream once
// this call completes. Callers must continue reading data on the stream until
// the end of the stream is reached so that data which has already been
// assigned to the stream will be processed.
//
// This method will return an error if there are no other live streams
// in the Session, or if SplitReadStream() has been called on the given
// Stream.
func (c *BigQueryStorageClient) FinalizeStream(ctx context.Context, req *storagepb.FinalizeStreamRequest, opts ...gax.CallOption) error {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "stream.name", req.GetStream().GetName()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.FinalizeStream[0:len(c.CallOptions.FinalizeStream):len(c.CallOptions.FinalizeStream)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
_, err = c.bigQueryStorageClient.FinalizeStream(ctx, req, settings.GRPC...)
return err
}, opts...)
return err
}

// SplitReadStream splits a given read stream into two Streams. These streams are referred to
// as the primary and the residual of the split. The original stream can still
// be read from in the same manner as before. Both of the returned streams can
// also be read from, and the total rows return by both child streams will be
// the same as the rows read from the original stream.
//
// Moreover, the two child streams will be allocated back to back in the
// original Stream. Concretely, it is guaranteed that for streams Original,
// Primary, and Residual, that Original[0-j] = Primary[0-j] and
// Original[j-n] = Residual[0-m] once the streams have been read to
// completion.
//
// This method is guaranteed to be idempotent.
func (c *BigQueryStorageClient) SplitReadStream(ctx context.Context, req *storagepb.SplitReadStreamRequest, opts ...gax.CallOption) (*storagepb.SplitReadStreamResponse, error) {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "original_stream.name", req.GetOriginalStream().GetName()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.SplitReadStream[0:len(c.CallOptions.SplitReadStream):len(c.CallOptions.SplitReadStream)], opts...)
var resp *storagepb.SplitReadStreamResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.bigQueryStorageClient.SplitReadStream(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}

+ 0
- 132
vendor/cloud.google.com/go/bigquery/storage/apiv1beta1/big_query_storage_client_example_test.go View File

@@ -1,132 +0,0 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

// Code generated by gapic-generator. DO NOT EDIT.

package storage_test

import (
"context"
"io"

storage "cloud.google.com/go/bigquery/storage/apiv1beta1"
storagepb "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1beta1"
)

func ExampleNewBigQueryStorageClient() {
ctx := context.Background()
c, err := storage.NewBigQueryStorageClient(ctx)
if err != nil {
// TODO: Handle error.
}
// TODO: Use client.
_ = c
}

func ExampleBigQueryStorageClient_CreateReadSession() {
ctx := context.Background()
c, err := storage.NewBigQueryStorageClient(ctx)
if err != nil {
// TODO: Handle error.
}

req := &storagepb.CreateReadSessionRequest{
// TODO: Fill request struct fields.
}
resp, err := c.CreateReadSession(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}

func ExampleBigQueryStorageClient_ReadRows() {
ctx := context.Background()
c, err := storage.NewBigQueryStorageClient(ctx)
if err != nil {
// TODO: Handle error.
}

req := &storagepb.ReadRowsRequest{
// TODO: Fill request struct fields.
}
stream, err := c.ReadRows(ctx, req)
if err != nil {
// TODO: Handle error.
}
for {
resp, err := stream.Recv()
if err == io.EOF {
break
}
if err != nil {
// TODO: handle error.
}
// TODO: Use resp.
_ = resp
}
}

func ExampleBigQueryStorageClient_BatchCreateReadSessionStreams() {
ctx := context.Background()
c, err := storage.NewBigQueryStorageClient(ctx)
if err != nil {
// TODO: Handle error.
}

req := &storagepb.BatchCreateReadSessionStreamsRequest{
// TODO: Fill request struct fields.
}
resp, err := c.BatchCreateReadSessionStreams(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}

func ExampleBigQueryStorageClient_FinalizeStream() {
ctx := context.Background()
c, err := storage.NewBigQueryStorageClient(ctx)
if err != nil {
// TODO: Handle error.
}

req := &storagepb.FinalizeStreamRequest{
// TODO: Fill request struct fields.
}
err = c.FinalizeStream(ctx, req)
if err != nil {
// TODO: Handle error.
}
}

func ExampleBigQueryStorageClient_SplitReadStream() {
ctx := context.Background()
c, err := storage.NewBigQueryStorageClient(ctx)
if err != nil {
// TODO: Handle error.
}

req := &storagepb.SplitReadStreamRequest{
// TODO: Fill request struct fields.
}
resp, err := c.SplitReadStream(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}

+ 0
- 89
vendor/cloud.google.com/go/bigquery/storage/apiv1beta1/doc.go View File

@@ -1,89 +0,0 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

// Code generated by gapic-generator. DO NOT EDIT.

// Package storage is an auto-generated package for the
// BigQuery Storage API.
//
// NOTE: This package is in beta. It is not stable, and may be subject to changes.
//
package storage // import "cloud.google.com/go/bigquery/storage/apiv1beta1"

import (
"context"
"runtime"
"strings"
"unicode"

"google.golang.org/grpc/metadata"
)

func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
out, _ := metadata.FromOutgoingContext(ctx)
out = out.Copy()
for _, md := range mds {
for k, v := range md {
out[k] = append(out[k], v...)
}
}
return metadata.NewOutgoingContext(ctx, out)
}

// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
func DefaultAuthScopes() []string {
return []string{
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/cloud-platform",
}
}

// versionGo returns the Go runtime version. The returned string
// has no whitespace, suitable for reporting in header.
func versionGo() string {
const develPrefix = "devel +"

s := runtime.Version()
if strings.HasPrefix(s, develPrefix) {
s = s[len(develPrefix):]
if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
s = s[:p]
}
return s
}

notSemverRune := func(r rune) bool {
return strings.IndexRune("0123456789.", r) < 0
}

if strings.HasPrefix(s, "go1") {
s = s[2:]
var prerelease string
if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
s, prerelease = s[:p], s[p:]
}
if strings.HasSuffix(s, ".") {
s += "0"
} else if strings.Count(s, ".") < 2 {
s += ".0"
}
if prerelease != "" {
s += "-" + prerelease
}
return s
}
return "UNKNOWN"
}

const versionClient = "20190306"

+ 0
- 452
vendor/cloud.google.com/go/bigquery/storage/apiv1beta1/mock_test.go View File

@@ -1,452 +0,0 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

// Code generated by gapic-generator. DO NOT EDIT.

package storage

import (
emptypb "github.com/golang/protobuf/ptypes/empty"
storagepb "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1beta1"
)

import (
"context"
"flag"
"fmt"
"io"
"log"
"net"
"os"
"strings"
"testing"

"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
"google.golang.org/api/option"
status "google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
gstatus "google.golang.org/grpc/status"
)

var _ = io.EOF
var _ = ptypes.MarshalAny
var _ status.Status

type mockBigQueryStorageServer struct {
// Embed for forward compatibility.
// Tests will keep working if more methods are added
// in the future.
storagepb.BigQueryStorageServer

reqs []proto.Message

// If set, all calls return this error.
err error

// responses to return if err == nil
resps []proto.Message
}

func (s *mockBigQueryStorageServer) CreateReadSession(ctx context.Context, req *storagepb.CreateReadSessionRequest) (*storagepb.ReadSession, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*storagepb.ReadSession), nil
}

func (s *mockBigQueryStorageServer) ReadRows(req *storagepb.ReadRowsRequest, stream storagepb.BigQueryStorage_ReadRowsServer) error {
md, _ := metadata.FromIncomingContext(stream.Context())
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return s.err
}
for _, v := range s.resps {
if err := stream.Send(v.(*storagepb.ReadRowsResponse)); err != nil {
return err
}
}
return nil
}

func (s *mockBigQueryStorageServer) BatchCreateReadSessionStreams(ctx context.Context, req *storagepb.BatchCreateReadSessionStreamsRequest) (*storagepb.BatchCreateReadSessionStreamsResponse, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*storagepb.BatchCreateReadSessionStreamsResponse), nil
}

func (s *mockBigQueryStorageServer) FinalizeStream(ctx context.Context, req *storagepb.FinalizeStreamRequest) (*emptypb.Empty, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*emptypb.Empty), nil
}

func (s *mockBigQueryStorageServer) SplitReadStream(ctx context.Context, req *storagepb.SplitReadStreamRequest) (*storagepb.SplitReadStreamResponse, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*storagepb.SplitReadStreamResponse), nil
}

// clientOpt is the option tests should use to connect to the test server.
// It is initialized by TestMain.
var clientOpt option.ClientOption

var (
mockBigQueryStorage mockBigQueryStorageServer
)

func TestMain(m *testing.M) {
flag.Parse()

serv := grpc.NewServer()
storagepb.RegisterBigQueryStorageServer(serv, &mockBigQueryStorage)

lis, err := net.Listen("tcp", "localhost:0")
if err != nil {
log.Fatal(err)
}
go serv.Serve(lis)

conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure())
if err != nil {
log.Fatal(err)
}
clientOpt = option.WithGRPCConn(conn)

os.Exit(m.Run())
}

func TestBigQueryStorageCreateReadSession(t *testing.T) {
var name string = "name3373707"
var expectedResponse = &storagepb.ReadSession{
Name: name,
}

mockBigQueryStorage.err = nil
mockBigQueryStorage.reqs = nil

mockBigQueryStorage.resps = append(mockBigQueryStorage.resps[:0], expectedResponse)

var tableReference *storagepb.TableReference = &storagepb.TableReference{}
var parent string = "parent-995424086"
var request = &storagepb.CreateReadSessionRequest{
TableReference: tableReference,
Parent: parent,
}

c, err := NewBigQueryStorageClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}

resp, err := c.CreateReadSession(context.Background(), request)

if err != nil {
t.Fatal(err)
}

if want, got := request, mockBigQueryStorage.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}

if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}

func TestBigQueryStorageCreateReadSessionError(t *testing.T) {
errCode := codes.PermissionDenied
mockBigQueryStorage.err = gstatus.Error(errCode, "test error")

var tableReference *storagepb.TableReference = &storagepb.TableReference{}
var parent string = "parent-995424086"
var request = &storagepb.CreateReadSessionRequest{
TableReference: tableReference,
Parent: parent,
}

c, err := NewBigQueryStorageClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}

resp, err := c.CreateReadSession(context.Background(), request)

if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}
func TestBigQueryStorageReadRows(t *testing.T) {
var expectedResponse *storagepb.ReadRowsResponse = &storagepb.ReadRowsResponse{}

mockBigQueryStorage.err = nil
mockBigQueryStorage.reqs = nil

mockBigQueryStorage.resps = append(mockBigQueryStorage.resps[:0], expectedResponse)

var readPosition *storagepb.StreamPosition = &storagepb.StreamPosition{}
var request = &storagepb.ReadRowsRequest{
ReadPosition: readPosition,
}

c, err := NewBigQueryStorageClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}

stream, err := c.ReadRows(context.Background(), request)
if err != nil {
t.Fatal(err)
}
resp, err := stream.Recv()

if err != nil {
t.Fatal(err)
}

if want, got := request, mockBigQueryStorage.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}

if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}

func TestBigQueryStorageReadRowsError(t *testing.T) {
errCode := codes.PermissionDenied
mockBigQueryStorage.err = gstatus.Error(errCode, "test error")

var readPosition *storagepb.StreamPosition = &storagepb.StreamPosition{}
var request = &storagepb.ReadRowsRequest{
ReadPosition: readPosition,
}

c, err := NewBigQueryStorageClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}

stream, err := c.ReadRows(context.Background(), request)
if err != nil {
t.Fatal(err)
}
resp, err := stream.Recv()

if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}
func TestBigQueryStorageBatchCreateReadSessionStreams(t *testing.T) {
var expectedResponse *storagepb.BatchCreateReadSessionStreamsResponse = &storagepb.BatchCreateReadSessionStreamsResponse{}

mockBigQueryStorage.err = nil
mockBigQueryStorage.reqs = nil

mockBigQueryStorage.resps = append(mockBigQueryStorage.resps[:0], expectedResponse)

var session *storagepb.ReadSession = &storagepb.ReadSession{}
var requestedStreams int32 = 1017221410
var request = &storagepb.BatchCreateReadSessionStreamsRequest{
Session: session,
RequestedStreams: requestedStreams,
}

c, err := NewBigQueryStorageClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}

resp, err := c.BatchCreateReadSessionStreams(context.Background(), request)

if err != nil {
t.Fatal(err)
}

if want, got := request, mockBigQueryStorage.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}

if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}

func TestBigQueryStorageBatchCreateReadSessionStreamsError(t *testing.T) {
errCode := codes.PermissionDenied
mockBigQueryStorage.err = gstatus.Error(errCode, "test error")

var session *storagepb.ReadSession = &storagepb.ReadSession{}
var requestedStreams int32 = 1017221410
var request = &storagepb.BatchCreateReadSessionStreamsRequest{
Session: session,
RequestedStreams: requestedStreams,
}

c, err := NewBigQueryStorageClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}

resp, err := c.BatchCreateReadSessionStreams(context.Background(), request)

if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}
func TestBigQueryStorageFinalizeStream(t *testing.T) {
var expectedResponse *emptypb.Empty = &emptypb.Empty{}

mockBigQueryStorage.err = nil
mockBigQueryStorage.reqs = nil

mockBigQueryStorage.resps = append(mockBigQueryStorage.resps[:0], expectedResponse)

var stream *storagepb.Stream = &storagepb.Stream{}
var request = &storagepb.FinalizeStreamRequest{
Stream: stream,
}

c, err := NewBigQueryStorageClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}

err = c.FinalizeStream(context.Background(), request)

if err != nil {
t.Fatal(err)
}

if want, got := request, mockBigQueryStorage.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}

}

func TestBigQueryStorageFinalizeStreamError(t *testing.T) {
errCode := codes.PermissionDenied
mockBigQueryStorage.err = gstatus.Error(errCode, "test error")

var stream *storagepb.Stream = &storagepb.Stream{}
var request = &storagepb.FinalizeStreamRequest{
Stream: stream,
}

c, err := NewBigQueryStorageClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}

err = c.FinalizeStream(context.Background(), request)

if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
}
func TestBigQueryStorageSplitReadStream(t *testing.T) {
var expectedResponse *storagepb.SplitReadStreamResponse = &storagepb.SplitReadStreamResponse{}

mockBigQueryStorage.err = nil
mockBigQueryStorage.reqs = nil

mockBigQueryStorage.resps = append(mockBigQueryStorage.resps[:0], expectedResponse)

var originalStream *storagepb.Stream = &storagepb.Stream{}
var request = &storagepb.SplitReadStreamRequest{
OriginalStream: originalStream,
}

c, err := NewBigQueryStorageClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}

resp, err := c.SplitReadStream(context.Background(), request)

if err != nil {
t.Fatal(err)
}

if want, got := request, mockBigQueryStorage.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}

if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}

func TestBigQueryStorageSplitReadStreamError(t *testing.T) {
errCode := codes.PermissionDenied
mockBigQueryStorage.err = gstatus.Error(errCode, "test error")

var originalStream *storagepb.Stream = &storagepb.Stream{}
var request = &storagepb.SplitReadStreamRequest{
OriginalStream: originalStream,
}

c, err := NewBigQueryStorageClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}

resp, err := c.SplitReadStream(context.Background(), request)

if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}

+ 0
- 629
vendor/cloud.google.com/go/bigquery/table.go View File

@@ -1,629 +0,0 @@
// Copyright 2015 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package bigquery

import (
"context"
"errors"
"fmt"
"time"

"cloud.google.com/go/internal/optional"
"cloud.google.com/go/internal/trace"
bq "google.golang.org/api/bigquery/v2"
)

// A Table is a reference to a BigQuery table.
type Table struct {
// ProjectID, DatasetID and TableID may be omitted if the Table is the destination for a query.
// In this case the result will be stored in an ephemeral table.
ProjectID string
DatasetID string
// TableID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_).
// The maximum length is 1,024 characters.
TableID string

c *Client
}

// TableMetadata contains information about a BigQuery table.
type TableMetadata struct {
// The following fields can be set when creating a table.

// The user-friendly name for the table.
Name string

// The user-friendly description of the table.
Description string

// The table schema. If provided on create, ViewQuery must be empty.
Schema Schema

// The query to use for a view. If provided on create, Schema must be nil.
ViewQuery string

// Use Legacy SQL for the view query.
// At most one of UseLegacySQL and UseStandardSQL can be true.
UseLegacySQL bool

// Use Legacy SQL for the view query. The default.
// At most one of UseLegacySQL and UseStandardSQL can be true.
// Deprecated: use UseLegacySQL.
UseStandardSQL bool

// If non-nil, the table is partitioned by time.
TimePartitioning *TimePartitioning

// Clustering specifies the data clustering configuration for the table.
Clustering *Clustering

// The time when this table expires. If set, this table will expire at the
// specified time. Expired tables will be deleted and their storage
// reclaimed. The zero value is ignored.
ExpirationTime time.Time

// User-provided labels.
Labels map[string]string

// Information about a table stored outside of BigQuery.
ExternalDataConfig *ExternalDataConfig

// Custom encryption configuration (e.g., Cloud KMS keys).
EncryptionConfig *EncryptionConfig

// All the fields below are read-only.

FullID string // An opaque ID uniquely identifying the table.
Type TableType
CreationTime time.Time
LastModifiedTime time.Time

// The size of the table in bytes.
// This does not include data that is being buffered during a streaming insert.
NumBytes int64

// The number of bytes in the table considered "long-term storage" for reduced
// billing purposes. See https://cloud.google.com/bigquery/pricing#long-term-storage
// for more information.
NumLongTermBytes int64

// The number of rows of data in this table.
// This does not include data that is being buffered during a streaming insert.
NumRows uint64

// Contains information regarding this table's streaming buffer, if one is
// present. This field will be nil if the table is not being streamed to or if
// there is no data in the streaming buffer.
StreamingBuffer *StreamingBuffer

// ETag is the ETag obtained when reading metadata. Pass it to Table.Update to
// ensure that the metadata hasn't changed since it was read.
ETag string
}

// TableCreateDisposition specifies the circumstances under which destination table will be created.
// Default is CreateIfNeeded.
type TableCreateDisposition string

const (
// CreateIfNeeded will create the table if it does not already exist.
// Tables are created atomically on successful completion of a job.
CreateIfNeeded TableCreateDisposition = "CREATE_IF_NEEDED"

// CreateNever ensures the table must already exist and will not be
// automatically created.
CreateNever TableCreateDisposition = "CREATE_NEVER"
)

// TableWriteDisposition specifies how existing data in a destination table is treated.
// Default is WriteAppend.
type TableWriteDisposition string

const (
// WriteAppend will append to any existing data in the destination table.
// Data is appended atomically on successful completion of a job.
WriteAppend TableWriteDisposition = "WRITE_APPEND"

// WriteTruncate overrides the existing data in the destination table.
// Data is overwritten atomically on successful completion of a job.
WriteTruncate TableWriteDisposition = "WRITE_TRUNCATE"

// WriteEmpty fails writes if the destination table already contains data.
WriteEmpty TableWriteDisposition = "WRITE_EMPTY"
)

// TableType is the type of table.
type TableType string

const (
// RegularTable is a regular table.
RegularTable TableType = "TABLE"
// ViewTable is a table type describing that the table is view. See more
// information at https://cloud.google.com/bigquery/docs/views.
ViewTable TableType = "VIEW"
// ExternalTable is a table type describing that the table is an external
// table (also known as a federated data source). See more information at
// https://cloud.google.com/bigquery/external-data-sources.
ExternalTable TableType = "EXTERNAL"
)

// TimePartitioning describes the time-based date partitioning on a table.
// For more information see: https://cloud.google.com/bigquery/docs/creating-partitioned-tables.
type TimePartitioning struct {
// The amount of time to keep the storage for a partition.
// If the duration is empty (0), the data in the partitions do not expire.
Expiration time.Duration

// If empty, the table is partitioned by pseudo column '_PARTITIONTIME'; if set, the
// table is partitioned by this field. The field must be a top-level TIMESTAMP or
// DATE field. Its mode must be NULLABLE or REQUIRED.
Field string

// If true, queries that reference this table must include a filter (e.g. a WHERE predicate)
// that can be used for partition elimination.
RequirePartitionFilter bool
}

func (p *TimePartitioning) toBQ() *bq.TimePartitioning {
if p == nil {
return nil
}
return &bq.TimePartitioning{
Type: "DAY",
ExpirationMs: int64(p.Expiration / time.Millisecond),
Field: p.Field,
RequirePartitionFilter: p.RequirePartitionFilter,
}
}

func bqToTimePartitioning(q *bq.TimePartitioning) *TimePartitioning {
if q == nil {
return nil
}
return &TimePartitioning{
Expiration: time.Duration(q.ExpirationMs) * time.Millisecond,
Field: q.Field,
RequirePartitionFilter: q.RequirePartitionFilter,
}
}

// Clustering governs the organization of data within a partitioned table.
// For more information, see https://cloud.google.com/bigquery/docs/clustered-tables
type Clustering struct {
Fields []string
}

func (c *Clustering) toBQ() *bq.Clustering {
if c == nil {
return nil
}
return &bq.Clustering{
Fields: c.Fields,
}
}

func bqToClustering(q *bq.Clustering) *Clustering {
if q == nil {
return nil
}
return &Clustering{
Fields: q.Fields,
}
}

// EncryptionConfig configures customer-managed encryption on tables.
type EncryptionConfig struct {
// Describes the Cloud KMS encryption key that will be used to protect
// destination BigQuery table. The BigQuery Service Account associated with your
// project requires access to this encryption key.
KMSKeyName string
}

func (e *EncryptionConfig) toBQ() *bq.EncryptionConfiguration {
if e == nil {
return nil
}
return &bq.EncryptionConfiguration{
KmsKeyName: e.KMSKeyName,
}
}

func bqToEncryptionConfig(q *bq.EncryptionConfiguration) *EncryptionConfig {
if q == nil {
return nil
}
return &EncryptionConfig{
KMSKeyName: q.KmsKeyName,
}
}

// StreamingBuffer holds information about the streaming buffer.
type StreamingBuffer struct {
// A lower-bound estimate of the number of bytes currently in the streaming
// buffer.
EstimatedBytes uint64

// A lower-bound estimate of the number of rows currently in the streaming
// buffer.
EstimatedRows uint64

// The time of the oldest entry in the streaming buffer.
OldestEntryTime time.Time
}

func (t *Table) toBQ() *bq.TableReference {
return &bq.TableReference{
ProjectId: t.ProjectID,
DatasetId: t.DatasetID,
TableId: t.TableID,
}
}

// FullyQualifiedName returns the ID of the table in projectID:datasetID.tableID format.
func (t *Table) FullyQualifiedName() string {
return fmt.Sprintf("%s:%s.%s", t.ProjectID, t.DatasetID, t.TableID)
}

// implicitTable reports whether Table is an empty placeholder, which signifies that a new table should be created with an auto-generated Table ID.
func (t *Table) implicitTable() bool {
return t.ProjectID == "" && t.DatasetID == "" && t.TableID == ""
}

// Create creates a table in the BigQuery service.
// Pass in a TableMetadata value to configure the table.
// If tm.View.Query is non-empty, the created table will be of type VIEW.
// If no ExpirationTime is specified, the table will never expire.
// After table creation, a view can be modified only if its table was initially created
// with a view.
func (t *Table) Create(ctx context.Context, tm *TableMetadata) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Table.Create")
defer func() { trace.EndSpan(ctx, err) }()

table, err := tm.toBQ()
if err != nil {
return err
}
table.TableReference = &bq.TableReference{
ProjectId: t.ProjectID,
DatasetId: t.DatasetID,
TableId: t.TableID,
}
req := t.c.bqs.Tables.Insert(t.ProjectID, t.DatasetID, table).Context(ctx)
setClientHeader(req.Header())
_, err = req.Do()
return err
}

func (tm *TableMetadata) toBQ() (*bq.Table, error) {
t := &bq.Table{}
if tm == nil {
return t, nil
}
if tm.Schema != nil && tm.ViewQuery != "" {
return nil, errors.New("bigquery: provide Schema or ViewQuery, not both")
}
t.FriendlyName = tm.Name
t.Description = tm.Description
t.Labels = tm.Labels
if tm.Schema != nil {
t.Schema = tm.Schema.toBQ()
}
if tm.ViewQuery != "" {
if tm.UseStandardSQL && tm.UseLegacySQL {
return nil, errors.New("bigquery: cannot provide both UseStandardSQL and UseLegacySQL")
}
t.View = &bq.ViewDefinition{Query: tm.ViewQuery}
if tm.UseLegacySQL {
t.View.UseLegacySql = true
} else {
t.View.UseLegacySql = false
t.View.ForceSendFields = append(t.View.ForceSendFields, "UseLegacySql")
}
} else if tm.UseLegacySQL || tm.UseStandardSQL {
return nil, errors.New("bigquery: UseLegacy/StandardSQL requires ViewQuery")
}
t.TimePartitioning = tm.TimePartitioning.toBQ()
t.Clustering = tm.Clustering.toBQ()

if !validExpiration(tm.ExpirationTime) {
return nil, fmt.Errorf("invalid expiration time: %v.\n"+
"Valid expiration times are after 1678 and before 2262", tm.ExpirationTime)
}
if !tm.ExpirationTime.IsZero() && tm.ExpirationTime != NeverExpire {
t.ExpirationTime = tm.ExpirationTime.UnixNano() / 1e6
}
if tm.ExternalDataConfig != nil {
edc := tm.ExternalDataConfig.toBQ()
t.ExternalDataConfiguration = &edc
}
t.EncryptionConfiguration = tm.EncryptionConfig.toBQ()
if tm.FullID != "" {
return nil, errors.New("cannot set FullID on create")
}
if tm.Type != "" {
return nil, errors.New("cannot set Type on create")
}
if !tm.CreationTime.IsZero() {
return nil, errors.New("cannot set CreationTime on create")
}
if !tm.LastModifiedTime.IsZero() {
return nil, errors.New("cannot set LastModifiedTime on create")
}
if tm.NumBytes != 0 {
return nil, errors.New("cannot set NumBytes on create")
}
if tm.NumLongTermBytes != 0 {
return nil, errors.New("cannot set NumLongTermBytes on create")
}
if tm.NumRows != 0 {
return nil, errors.New("cannot set NumRows on create")
}
if tm.StreamingBuffer != nil {
return nil, errors.New("cannot set StreamingBuffer on create")
}
if tm.ETag != "" {
return nil, errors.New("cannot set ETag on create")
}
return t, nil
}

// Metadata fetches the metadata for the table.
func (t *Table) Metadata(ctx context.Context) (md *TableMetadata, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Table.Metadata")
defer func() { trace.EndSpan(ctx, err) }()

req := t.c.bqs.Tables.Get(t.ProjectID, t.DatasetID, t.TableID).Context(ctx)
setClientHeader(req.Header())
var table *bq.Table
err = runWithRetry(ctx, func() (err error) {
table, err = req.Do()
return err
})
if err != nil {
return nil, err
}
return bqToTableMetadata(table)
}

func bqToTableMetadata(t *bq.Table) (*TableMetadata, error) {
md := &TableMetadata{
Description: t.Description,
Name: t.FriendlyName,
Type: TableType(t.Type),
FullID: t.Id,
Labels: t.Labels,
NumBytes: t.NumBytes,
NumLongTermBytes: t.NumLongTermBytes,
NumRows: t.NumRows,
ExpirationTime: unixMillisToTime(t.ExpirationTime),
CreationTime: unixMillisToTime(t.CreationTime),
LastModifiedTime: unixMillisToTime(int64(t.LastModifiedTime)),
ETag: t.Etag,
EncryptionConfig: bqToEncryptionConfig(t.EncryptionConfiguration),
}
if t.Schema != nil {
md.Schema = bqToSchema(t.Schema)
}
if t.View != nil {
md.ViewQuery = t.View.Query
md.UseLegacySQL = t.View.UseLegacySql
}
md.TimePartitioning = bqToTimePartitioning(t.TimePartitioning)
md.Clustering = bqToClustering(t.Clustering)
if t.StreamingBuffer != nil {
md.StreamingBuffer = &StreamingBuffer{
EstimatedBytes: t.StreamingBuffer.EstimatedBytes,
EstimatedRows: t.StreamingBuffer.EstimatedRows,
OldestEntryTime: unixMillisToTime(int64(t.StreamingBuffer.OldestEntryTime)),
}
}
if t.ExternalDataConfiguration != nil {
edc, err := bqToExternalDataConfig(t.ExternalDataConfiguration)
if err != nil {
return nil, err
}
md.ExternalDataConfig = edc
}
return md, nil
}

// Delete deletes the table.
func (t *Table) Delete(ctx context.Context) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Table.Delete")
defer func() { trace.EndSpan(ctx, err) }()

req := t.c.bqs.Tables.Delete(t.ProjectID, t.DatasetID, t.TableID).Context(ctx)
setClientHeader(req.Header())
return req.Do()
}

// Read fetches the contents of the table.
func (t *Table) Read(ctx context.Context) *RowIterator {
return t.read(ctx, fetchPage)
}

func (t *Table) read(ctx context.Context, pf pageFetcher) *RowIterator {
return newRowIterator(ctx, t, pf)
}

// NeverExpire is a sentinel value used to remove a table'e expiration time.
var NeverExpire = time.Time{}.Add(-1)

// Update modifies specific Table metadata fields.
func (t *Table) Update(ctx context.Context, tm TableMetadataToUpdate, etag string) (md *TableMetadata, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Table.Update")
defer func() { trace.EndSpan(ctx, err) }()

bqt, err := tm.toBQ()
if err != nil {
return nil, err
}
call := t.c.bqs.Tables.Patch(t.ProjectID, t.DatasetID, t.TableID, bqt).Context(ctx)
setClientHeader(call.Header())
if etag != "" {
call.Header().Set("If-Match", etag)
}
var res *bq.Table
if err := runWithRetry(ctx, func() (err error) {
res, err = call.Do()
return err
}); err != nil {
return nil, err
}
return bqToTableMetadata(res)
}

func (tm *TableMetadataToUpdate) toBQ() (*bq.Table, error) {
t := &bq.Table{}
forceSend := func(field string) {
t.ForceSendFields = append(t.ForceSendFields, field)
}

if tm.Description != nil {
t.Description = optional.ToString(tm.Description)
forceSend("Description")
}
if tm.Name != nil {
t.FriendlyName = optional.ToString(tm.Name)
forceSend("FriendlyName")
}
if tm.Schema != nil {
t.Schema = tm.Schema.toBQ()
forceSend("Schema")
}
if tm.EncryptionConfig != nil {
t.EncryptionConfiguration = tm.EncryptionConfig.toBQ()
}

if !validExpiration(tm.ExpirationTime) {
return nil, fmt.Errorf("invalid expiration time: %v.\n"+
"Valid expiration times are after 1678 and before 2262", tm.ExpirationTime)
}
if tm.ExpirationTime == NeverExpire {
t.NullFields = append(t.NullFields, "ExpirationTime")
} else if !tm.ExpirationTime.IsZero() {
t.ExpirationTime = tm.ExpirationTime.UnixNano() / 1e6
forceSend("ExpirationTime")
}
if tm.TimePartitioning != nil {
t.TimePartitioning = tm.TimePartitioning.toBQ()
t.TimePartitioning.ForceSendFields = []string{"RequirePartitionFilter"}
if tm.TimePartitioning.Expiration == 0 {
t.TimePartitioning.NullFields = []string{"ExpirationMs"}
}
}
if tm.ViewQuery != nil {
t.View = &bq.ViewDefinition{
Query: optional.ToString(tm.ViewQuery),
ForceSendFields: []string{"Query"},
}
}
if tm.UseLegacySQL != nil {
if t.View == nil {
t.View = &bq.ViewDefinition{}
}
t.View.UseLegacySql = optional.ToBool(tm.UseLegacySQL)
t.View.ForceSendFields = append(t.View.ForceSendFields, "UseLegacySql")
}
labels, forces, nulls := tm.update()
t.Labels = labels
t.ForceSendFields = append(t.ForceSendFields, forces...)
t.NullFields = append(t.NullFields, nulls...)
return t, nil
}

// validExpiration ensures a specified time is either the sentinel NeverExpire,
// the zero value, or within the defined range of UnixNano. Internal
// represetations of expiration times are based upon Time.UnixNano. Any time
// before 1678 or after 2262 cannot be represented by an int64 and is therefore
// undefined and invalid. See https://godoc.org/time#Time.UnixNano.
func validExpiration(t time.Time) bool {
return t == NeverExpire || t.IsZero() || time.Unix(0, t.UnixNano()).Equal(t)
}

// TableMetadataToUpdate is used when updating a table's metadata.
// Only non-nil fields will be updated.
type TableMetadataToUpdate struct {
// The user-friendly description of this table.
Description optional.String

// The user-friendly name for this table.
Name optional.String

// The table's schema.
// When updating a schema, you can add columns but not remove them.
Schema Schema

// The table's encryption configuration. When calling Update, ensure that
// all mutable fields of EncryptionConfig are populated.
EncryptionConfig *EncryptionConfig

// The time when this table expires. To remove a table's expiration,
// set ExpirationTime to NeverExpire. The zero value is ignored.
ExpirationTime time.Time

// The query to use for a view.
ViewQuery optional.String

// Use Legacy SQL for the view query.
UseLegacySQL optional.Bool

// TimePartitioning allows modification of certain aspects of partition
// configuration such as partition expiration and whether partition
// filtration is required at query time. When calling Update, ensure
// that all mutable fields of TimePartitioning are populated.
TimePartitioning *TimePartitioning

labelUpdater
}

// labelUpdater contains common code for updating labels.
type labelUpdater struct {
setLabels map[string]string
deleteLabels map[string]bool
}

// SetLabel causes a label to be added or modified on a call to Update.
func (u *labelUpdater) SetLabel(name, value string) {
if u.setLabels == nil {
u.setLabels = map[string]string{}
}
u.setLabels[name] = value
}

// DeleteLabel causes a label to be deleted on a call to Update.
func (u *labelUpdater) DeleteLabel(name string) {
if u.deleteLabels == nil {
u.deleteLabels = map[string]bool{}
}
u.deleteLabels[name] = true
}

func (u *labelUpdater) update() (labels map[string]string, forces, nulls []string) {
if u.setLabels == nil && u.deleteLabels == nil {
return nil, nil, nil
}
labels = map[string]string{}
for k, v := range u.setLabels {
labels[k] = v
}
if len(labels) == 0 && len(u.deleteLabels) > 0 {
forces = []string{"Labels"}
}
for l := range u.deleteLabels {
nulls = append(nulls, "Labels."+l)
}
return labels, forces, nulls
}

+ 0
- 372
vendor/cloud.google.com/go/bigquery/table_test.go View File

@@ -1,372 +0,0 @@
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package bigquery

import (
"testing"
"time"

"cloud.google.com/go/internal/testutil"
bq "google.golang.org/api/bigquery/v2"
)

func TestBQToTableMetadata(t *testing.T) {
aTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local)
aTimeMillis := aTime.UnixNano() / 1e6
for _, test := range []struct {
in *bq.Table
want *TableMetadata
}{
{&bq.Table{}, &TableMetadata{}}, // test minimal case
{
&bq.Table{
CreationTime: aTimeMillis,
Description: "desc",
Etag: "etag",
ExpirationTime: aTimeMillis,
FriendlyName: "fname",
Id: "id",
LastModifiedTime: uint64(aTimeMillis),
Location: "loc",
NumBytes: 123,
NumLongTermBytes: 23,
NumRows: 7,
StreamingBuffer: &bq.Streamingbuffer{
EstimatedBytes: 11,
EstimatedRows: 3,
OldestEntryTime: uint64(aTimeMillis),
},
TimePartitioning: &bq.TimePartitioning{
ExpirationMs: 7890,
Type: "DAY",
Field: "pfield",
},
Clustering: &bq.Clustering{
Fields: []string{"cfield1", "cfield2"},
},
EncryptionConfiguration: &bq.EncryptionConfiguration{KmsKeyName: "keyName"},
Type: "EXTERNAL",
View: &bq.ViewDefinition{Query: "view-query"},
Labels: map[string]string{"a": "b"},
ExternalDataConfiguration: &bq.ExternalDataConfiguration{
SourceFormat: "GOOGLE_SHEETS",
},
},
&TableMetadata{
Description: "desc",
Name: "fname",
ViewQuery: "view-query",
FullID: "id",
Type: ExternalTable,
Labels: map[string]string{"a": "b"},
ExternalDataConfig: &ExternalDataConfig{SourceFormat: GoogleSheets},
ExpirationTime: aTime.Truncate(time.Millisecond),
CreationTime: aTime.Truncate(time.Millisecond),
LastModifiedTime: aTime.Truncate(time.Millisecond),
NumBytes: 123,
NumLongTermBytes: 23,
NumRows: 7,
TimePartitioning: &TimePartitioning{
Expiration: 7890 * time.Millisecond,
Field: "pfield",
},
Clustering: &Clustering{
Fields: []string{"cfield1", "cfield2"},
},
StreamingBuffer: &StreamingBuffer{
EstimatedBytes: 11,
EstimatedRows: 3,
OldestEntryTime: aTime,
},
EncryptionConfig: &EncryptionConfig{KMSKeyName: "keyName"},
ETag: "etag",
},
},
} {
got, err := bqToTableMetadata(test.in)
if err != nil {
t.Fatal(err)
}
if diff := testutil.Diff(got, test.want); diff != "" {
t.Errorf("%+v:\n, -got, +want:\n%s", test.in, diff)
}
}
}

func TestTableMetadataToBQ(t *testing.T) {
aTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local)
aTimeMillis := aTime.UnixNano() / 1e6
sc := Schema{fieldSchema("desc", "name", "STRING", false, true)}

for _, test := range []struct {
in *TableMetadata
want *bq.Table
}{
{nil, &bq.Table{}},
{&TableMetadata{}, &bq.Table{}},
{
&TableMetadata{
Name: "n",
Description: "d",
Schema: sc,
ExpirationTime: aTime,
Labels: map[string]string{"a": "b"},
ExternalDataConfig: &ExternalDataConfig{SourceFormat: Bigtable},
EncryptionConfig: &EncryptionConfig{KMSKeyName: "keyName"},
},
&bq.Table{
FriendlyName: "n",
Description: "d",
Schema: &bq.TableSchema{
Fields: []*bq.TableFieldSchema{
bqTableFieldSchema("desc", "name", "STRING", "REQUIRED"),
},
},
ExpirationTime: aTimeMillis,
Labels: map[string]string{"a": "b"},
ExternalDataConfiguration: &bq.ExternalDataConfiguration{SourceFormat: "BIGTABLE"},
EncryptionConfiguration: &bq.EncryptionConfiguration{KmsKeyName: "keyName"},
},
},
{
&TableMetadata{ViewQuery: "q"},
&bq.Table{
View: &bq.ViewDefinition{
Query: "q",
UseLegacySql: false,
ForceSendFields: []string{"UseLegacySql"},
},
},
},
{
&TableMetadata{
ViewQuery: "q",
UseLegacySQL: true,
TimePartitioning: &TimePartitioning{},
},
&bq.Table{
View: &bq.ViewDefinition{
Query: "q",
UseLegacySql: true,
},
TimePartitioning: &bq.TimePartitioning{
Type: "DAY",
ExpirationMs: 0,
},
},
},
{
&TableMetadata{
ViewQuery: "q",
UseStandardSQL: true,
TimePartitioning: &TimePartitioning{
Expiration: time.Second,
Field: "ofDreams",
},
Clustering: &Clustering{
Fields: []string{"cfield1"},
},
},
&bq.Table{
View: &bq.ViewDefinition{
Query: "q",
UseLegacySql: false,
ForceSendFields: []string{"UseLegacySql"},
},
TimePartitioning: &bq.TimePartitioning{
Type: "DAY",
ExpirationMs: 1000,
Field: "ofDreams",
},
Clustering: &bq.Clustering{
Fields: []string{"cfield1"},
},
},
},
{
&TableMetadata{ExpirationTime: NeverExpire},
&bq.Table{ExpirationTime: 0},
},
} {
got, err := test.in.toBQ()
if err != nil {
t.Fatalf("%+v: %v", test.in, err)
}
if diff := testutil.Diff(got, test.want); diff != "" {
t.Errorf("%+v:\n-got, +want:\n%s", test.in, diff)
}
}

// Errors
for _, in := range []*TableMetadata{
{Schema: sc, ViewQuery: "q"}, // can't have both schema and query
{UseLegacySQL: true}, // UseLegacySQL without query
{UseStandardSQL: true}, // UseStandardSQL without query
// read-only fields
{FullID: "x"},
{Type: "x"},
{CreationTime: aTime},
{LastModifiedTime: aTime},
{NumBytes: 1},
{NumLongTermBytes: 1},
{NumRows: 1},
{StreamingBuffer: &StreamingBuffer{}},
{ETag: "x"},
// expiration time outside allowable range is invalid
// See https://godoc.org/time#Time.UnixNano
{ExpirationTime: time.Date(1677, 9, 21, 0, 12, 43, 145224192, time.UTC).Add(-1)},
{ExpirationTime: time.Date(2262, 04, 11, 23, 47, 16, 854775807, time.UTC).Add(1)},
} {
_, err := in.toBQ()
if err == nil {
t.Errorf("%+v: got nil, want error", in)
}
}
}

func TestTableMetadataToUpdateToBQ(t *testing.T) {
aTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local)
for _, test := range []struct {
tm TableMetadataToUpdate
want *bq.Table
}{
{
tm: TableMetadataToUpdate{},
want: &bq.Table{},
},
{
tm: TableMetadataToUpdate{
Description: "d",
Name: "n",
},
want: &bq.Table{
Description: "d",
FriendlyName: "n",
ForceSendFields: []string{"Description", "FriendlyName"},
},
},
{
tm: TableMetadataToUpdate{
Schema: Schema{fieldSchema("desc", "name", "STRING", false, true)},
ExpirationTime: aTime,
},
want: &bq.Table{
Schema: &bq.TableSchema{
Fields: []*bq.TableFieldSchema{
bqTableFieldSchema("desc", "name", "STRING", "REQUIRED"),
},
},
ExpirationTime: aTime.UnixNano() / 1e6,
ForceSendFields: []string{"Schema", "ExpirationTime"},
},
},
{
tm: TableMetadataToUpdate{ViewQuery: "q"},
want: &bq.Table{
View: &bq.ViewDefinition{Query: "q", ForceSendFields: []string{"Query"}},
},
},
{
tm: TableMetadataToUpdate{UseLegacySQL: false},
want: &bq.Table{
View: &bq.ViewDefinition{
UseLegacySql: false,
ForceSendFields: []string{"UseLegacySql"},
},
},
},
{
tm: TableMetadataToUpdate{ViewQuery: "q", UseLegacySQL: true},
want: &bq.Table{
View: &bq.ViewDefinition{
Query: "q",
UseLegacySql: true,
ForceSendFields: []string{"Query", "UseLegacySql"},
},
},
},
{
tm: func() (tm TableMetadataToUpdate) {
tm.SetLabel("L", "V")
tm.DeleteLabel("D")
return tm
}(),
want: &bq.Table{
Labels: map[string]string{"L": "V"},
NullFields: []string{"Labels.D"},
},
},
{
tm: TableMetadataToUpdate{ExpirationTime: NeverExpire},
want: &bq.Table{
NullFields: []string{"ExpirationTime"},
},
},
{
tm: TableMetadataToUpdate{TimePartitioning: &TimePartitioning{Expiration: 0}},
want: &bq.Table{
TimePartitioning: &bq.TimePartitioning{
Type: "DAY",
ForceSendFields: []string{"RequirePartitionFilter"},
NullFields: []string{"ExpirationMs"},
},
},
},
{
tm: TableMetadataToUpdate{TimePartitioning: &TimePartitioning{Expiration: time.Duration(time.Hour)}},
want: &bq.Table{
TimePartitioning: &bq.TimePartitioning{
ExpirationMs: 3600000,
Type: "DAY",
ForceSendFields: []string{"RequirePartitionFilter"},
},
},
},
} {
got, _ := test.tm.toBQ()
if !testutil.Equal(got, test.want) {
t.Errorf("%+v:\ngot %+v\nwant %+v", test.tm, got, test.want)
}
}
}

func TestTableMetadataToUpdateToBQErrors(t *testing.T) {
// See https://godoc.org/time#Time.UnixNano
start := time.Date(1677, 9, 21, 0, 12, 43, 145224192, time.UTC)
end := time.Date(2262, 04, 11, 23, 47, 16, 854775807, time.UTC)

for _, test := range []struct {
desc string
aTime time.Time
wantErr bool
}{
{desc: "ignored zero value", aTime: time.Time{}, wantErr: false},
{desc: "earliest valid time", aTime: start, wantErr: false},
{desc: "latested valid time", aTime: end, wantErr: false},
{desc: "invalid times before 1678", aTime: start.Add(-1), wantErr: true},
{desc: "invalid times after 2262", aTime: end.Add(1), wantErr: true},
{desc: "valid times after 1678", aTime: start.Add(1), wantErr: false},
{desc: "valid times before 2262", aTime: end.Add(-1), wantErr: false},
} {
tm := &TableMetadataToUpdate{ExpirationTime: test.aTime}
_, err := tm.toBQ()
if test.wantErr && err == nil {
t.Errorf("[%s] got no error, want error", test.desc)
}
if !test.wantErr && err != nil {
t.Errorf("[%s] got error, want no error", test.desc)
}
}
}

+ 0
- 892
vendor/cloud.google.com/go/bigquery/value.go View File

@@ -1,892 +0,0 @@
// Copyright 2015 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package bigquery

import (
"encoding/base64"
"errors"
"fmt"
"math"
"math/big"
"reflect"
"strconv"
"strings"
"time"

"cloud.google.com/go/civil"
bq "google.golang.org/api/bigquery/v2"
)

// Value stores the contents of a single cell from a BigQuery result.
type Value interface{}

// ValueLoader stores a slice of Values representing a result row from a Read operation.
// See RowIterator.Next for more information.
type ValueLoader interface {
Load(v []Value, s Schema) error
}

// valueList converts a []Value to implement ValueLoader.
type valueList []Value

// Load stores a sequence of values in a valueList.
// It resets the slice length to zero, then appends each value to it.
func (vs *valueList) Load(v []Value, _ Schema) error {
*vs = append((*vs)[:0], v...)
return nil
}

// valueMap converts a map[string]Value to implement ValueLoader.
type valueMap map[string]Value

// Load stores a sequence of values in a valueMap.
func (vm *valueMap) Load(v []Value, s Schema) error {
if *vm == nil {
*vm = map[string]Value{}
}
loadMap(*vm, v, s)
return nil
}

func loadMap(m map[string]Value, vals []Value, s Schema) {
for i, f := range s {
val := vals[i]
var v interface{}
switch {
case val == nil:
v = val
case f.Schema == nil:
v = val
case !f.Repeated:
m2 := map[string]Value{}
loadMap(m2, val.([]Value), f.Schema)
v = m2
default: // repeated and nested
sval := val.([]Value)
vs := make([]Value, len(sval))
for j, e := range sval {
m2 := map[string]Value{}
loadMap(m2, e.([]Value), f.Schema)
vs[j] = m2
}
v = vs
}

m[f.Name] = v
}
}

type structLoader struct {
typ reflect.Type // type of struct
err error
ops []structLoaderOp

vstructp reflect.Value // pointer to current struct value; changed by set
}

// A setFunc is a function that sets a struct field or slice/array
// element to a value.
type setFunc func(v reflect.Value, val interface{}) error

// A structLoaderOp instructs the loader to set a struct field to a row value.
type structLoaderOp struct {
fieldIndex []int
valueIndex int
setFunc setFunc
repeated bool
}

var errNoNulls = errors.New("bigquery: NULL values cannot be read into structs")

func setAny(v reflect.Value, x interface{}) error {
if x == nil {
return errNoNulls
}
v.Set(reflect.ValueOf(x))
return nil
}

func setInt(v reflect.Value, x interface{}) error {
if x == nil {
return errNoNulls
}
xx := x.(int64)
if v.OverflowInt(xx) {
return fmt.Errorf("bigquery: value %v overflows struct field of type %v", xx, v.Type())
}
v.SetInt(xx)
return nil
}

func setUint(v reflect.Value, x interface{}) error {
if x == nil {
return errNoNulls
}
xx := x.(int64)
if xx < 0 || v.OverflowUint(uint64(xx)) {
return fmt.Errorf("bigquery: value %v overflows struct field of type %v", xx, v.Type())
}
v.SetUint(uint64(xx))
return nil
}

func setFloat(v reflect.Value, x interface{}) error {
if x == nil {
return errNoNulls
}
xx := x.(float64)
if v.OverflowFloat(xx) {
return fmt.Errorf("bigquery: value %v overflows struct field of type %v", xx, v.Type())
}
v.SetFloat(xx)
return nil
}

func setBool(v reflect.Value, x interface{}) error {
if x == nil {
return errNoNulls
}
v.SetBool(x.(bool))
return nil
}

func setString(v reflect.Value, x interface{}) error {
if x == nil {
return errNoNulls
}
v.SetString(x.(string))
return nil
}

func setGeography(v reflect.Value, x interface{}) error {
if x == nil {
return errNoNulls
}
v.SetString(x.(string))
return nil
}

func setBytes(v reflect.Value, x interface{}) error {
if x == nil {
v.SetBytes(nil)
} else {
v.SetBytes(x.([]byte))
}
return nil
}

func setNull(v reflect.Value, x interface{}, build func() interface{}) error {
if x == nil {
v.Set(reflect.Zero(v.Type()))
} else {
n := build()
v.Set(reflect.ValueOf(n))
}
return nil
}

// set remembers a value for the next call to Load. The value must be
// a pointer to a struct. (This is checked in RowIterator.Next.)
func (sl *structLoader) set(structp interface{}, schema Schema) error {
if sl.err != nil {
return sl.err
}
sl.vstructp = reflect.ValueOf(structp)
typ := sl.vstructp.Type().Elem()
if sl.typ == nil {
// First call: remember the type and compile the schema.
sl.typ = typ
ops, err := compileToOps(typ, schema)
if err != nil {
sl.err = err
return err
}
sl.ops = ops
} else if sl.typ != typ {
return fmt.Errorf("bigquery: struct type changed from %s to %s", sl.typ, typ)
}
return nil
}

// compileToOps produces a sequence of operations that will set the fields of a
// value of structType to the contents of a row with schema.
func compileToOps(structType reflect.Type, schema Schema) ([]structLoaderOp, error) {
var ops []structLoaderOp
fields, err := fieldCache.Fields(structType)
if err != nil {
return nil, err
}
for i, schemaField := range schema {
// Look for an exported struct field with the same name as the schema
// field, ignoring case (BigQuery column names are case-insensitive,
// and we want to act like encoding/json anyway).
structField := fields.Match(schemaField.Name)
if structField == nil {
// Ignore schema fields with no corresponding struct field.
continue
}
op := structLoaderOp{
fieldIndex: structField.Index,
valueIndex: i,
}
t := structField.Type
if schemaField.Repeated {
if t.Kind() != reflect.Slice && t.Kind() != reflect.Array {
return nil, fmt.Errorf("bigquery: repeated schema field %s requires slice or array, but struct field %s has type %s",
schemaField.Name, structField.Name, t)
}
t = t.Elem()
op.repeated = true
}
if schemaField.Type == RecordFieldType {
// Field can be a struct or a pointer to a struct.
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
if t.Kind() != reflect.Struct {
return nil, fmt.Errorf("bigquery: field %s has type %s, expected struct or *struct",
structField.Name, structField.Type)
}
nested, err := compileToOps(t, schemaField.Schema)
if err != nil {
return nil, err
}
op.setFunc = func(v reflect.Value, val interface{}) error {
return setNested(nested, v, val)
}
} else {
op.setFunc = determineSetFunc(t, schemaField.Type)
if op.setFunc == nil {
return nil, fmt.Errorf("bigquery: schema field %s of type %s is not assignable to struct field %s of type %s",
schemaField.Name, schemaField.Type, structField.Name, t)
}
}
ops = append(ops, op)
}
return ops, nil
}

// determineSetFunc chooses the best function for setting a field of type ftype
// to a value whose schema field type is stype. It returns nil if stype
// is not assignable to ftype.
// determineSetFunc considers only basic types. See compileToOps for
// handling of repetition and nesting.
func determineSetFunc(ftype reflect.Type, stype FieldType) setFunc {
switch stype {
case StringFieldType:
if ftype.Kind() == reflect.String {
return setString
}
if ftype == typeOfNullString {
return func(v reflect.Value, x interface{}) error {
return setNull(v, x, func() interface{} {
return NullString{StringVal: x.(string), Valid: true}
})
}
}

case GeographyFieldType:
if ftype.Kind() == reflect.String {
return setGeography
}
if ftype == typeOfNullGeography {
return func(v reflect.Value, x interface{}) error {
return setNull(v, x, func() interface{} {
return NullGeography{GeographyVal: x.(string), Valid: true}
})
}
}

case BytesFieldType:
if ftype == typeOfByteSlice {
return setBytes
}

case IntegerFieldType:
if isSupportedUintType(ftype) {
return setUint
} else if isSupportedIntType(ftype) {
return setInt
}
if ftype == typeOfNullInt64 {
return func(v reflect.Value, x interface{}) error {
return setNull(v, x, func() interface{} {
return NullInt64{Int64: x.(int64), Valid: true}
})
}
}

case FloatFieldType:
switch ftype.Kind() {
case reflect.Float32, reflect.Float64:
return setFloat
}
if ftype == typeOfNullFloat64 {
return func(v reflect.Value, x interface{}) error {
return setNull(v, x, func() interface{} {
return NullFloat64{Float64: x.(float64), Valid: true}
})
}
}

case BooleanFieldType:
if ftype.Kind() == reflect.Bool {
return setBool
}
if ftype == typeOfNullBool {
return func(v reflect.Value, x interface{}) error {
return setNull(v, x, func() interface{} {
return NullBool{Bool: x.(bool), Valid: true}
})
}
}

case TimestampFieldType:
if ftype == typeOfGoTime {
return setAny
}
if ftype == typeOfNullTimestamp {
return func(v reflect.Value, x interface{}) error {
return setNull(v, x, func() interface{} {
return NullTimestamp{Timestamp: x.(time.Time), Valid: true}
})
}
}

case DateFieldType:
if ftype == typeOfDate {
return setAny
}
if ftype == typeOfNullDate {
return func(v reflect.Value, x interface{}) error {
return setNull(v, x, func() interface{} {
return NullDate{Date: x.(civil.Date), Valid: true}
})
}
}

case TimeFieldType:
if ftype == typeOfTime {
return setAny
}
if ftype == typeOfNullTime {
return func(v reflect.Value, x interface{}) error {
return setNull(v, x, func() interface{} {
return NullTime{Time: x.(civil.Time), Valid: true}
})
}
}

case DateTimeFieldType:
if ftype == typeOfDateTime {
return setAny
}
if ftype == typeOfNullDateTime {
return func(v reflect.Value, x interface{}) error {
return setNull(v, x, func() interface{} {
return NullDateTime{DateTime: x.(civil.DateTime), Valid: true}
})
}
}

case NumericFieldType:
if ftype == typeOfRat {
return func(v reflect.Value, x interface{}) error {
return setNull(v, x, func() interface{} { return x.(*big.Rat) })
}
}
}
return nil
}

func (sl *structLoader) Load(values []Value, _ Schema) error {
if sl.err != nil {
return sl.err
}
return runOps(sl.ops, sl.vstructp.Elem(), values)
}

// runOps executes a sequence of ops, setting the fields of vstruct to the
// supplied values.
func runOps(ops []structLoaderOp, vstruct reflect.Value, values []Value) error {
for _, op := range ops {
field := vstruct.FieldByIndex(op.fieldIndex)
var err error
if op.repeated {
err = setRepeated(field, values[op.valueIndex].([]Value), op.setFunc)
} else {
err = op.setFunc(field, values[op.valueIndex])
}
if err != nil {
return err
}
}
return nil
}

func setNested(ops []structLoaderOp, v reflect.Value, val interface{}) error {
// v is either a struct or a pointer to a struct.
if v.Kind() == reflect.Ptr {
// If the value is nil, set the pointer to nil.
if val == nil {
v.Set(reflect.Zero(v.Type()))
return nil
}
// If the pointer is nil, set it to a zero struct value.
if v.IsNil() {
v.Set(reflect.New(v.Type().Elem()))
}
v = v.Elem()
}
return runOps(ops, v, val.([]Value))
}

func setRepeated(field reflect.Value, vslice []Value, setElem setFunc) error {
vlen := len(vslice)
var flen int
switch field.Type().Kind() {
case reflect.Slice:
// Make a slice of the right size, avoiding allocation if possible.
switch {
case field.Len() < vlen:
field.Set(reflect.MakeSlice(field.Type(), vlen, vlen))
case field.Len() > vlen:
field.SetLen(vlen)
}
flen = vlen

case reflect.Array:
flen = field.Len()
if flen > vlen {
// Set extra elements to their zero value.
z := reflect.Zero(field.Type().Elem())
for i := vlen; i < flen; i++ {
field.Index(i).Set(z)
}
}
default:
return fmt.Errorf("bigquery: impossible field type %s", field.Type())
}
for i, val := range vslice {
if i < flen { // avoid writing past the end of a short array
if err := setElem(field.Index(i), val); err != nil {
return err
}
}
}
return nil
}

// A ValueSaver returns a row of data to be inserted into a table.
type ValueSaver interface {
// Save returns a row to be inserted into a BigQuery table, represented
// as a map from field name to Value.
// If insertID is non-empty, BigQuery will use it to de-duplicate
// insertions of this row on a best-effort basis.
Save() (row map[string]Value, insertID string, err error)
}

// ValuesSaver implements ValueSaver for a slice of Values.
type ValuesSaver struct {
Schema Schema

// If non-empty, BigQuery will use InsertID to de-duplicate insertions
// of this row on a best-effort basis.
InsertID string

Row []Value
}

// Save implements ValueSaver.
func (vls *ValuesSaver) Save() (map[string]Value, string, error) {
m, err := valuesToMap(vls.Row, vls.Schema)
return m, vls.InsertID, err
}

func valuesToMap(vs []Value, schema Schema) (map[string]Value, error) {
if len(vs) != len(schema) {
return nil, errors.New("schema does not match length of row to be inserted")
}

m := make(map[string]Value)
for i, fieldSchema := range schema {
if vs[i] == nil {
m[fieldSchema.Name] = nil
continue
}
if fieldSchema.Type != RecordFieldType {
m[fieldSchema.Name] = toUploadValue(vs[i], fieldSchema)
continue
}
// Nested record, possibly repeated.
vals, ok := vs[i].([]Value)
if !ok {
return nil, errors.New("nested record is not a []Value")
}
if !fieldSchema.Repeated {
value, err := valuesToMap(vals, fieldSchema.Schema)
if err != nil {
return nil, err
}
m[fieldSchema.Name] = value
continue
}
// A repeated nested field is converted into a slice of maps.
var maps []Value
for _, v := range vals {
sv, ok := v.([]Value)
if !ok {
return nil, errors.New("nested record in slice is not a []Value")
}
value, err := valuesToMap(sv, fieldSchema.Schema)
if err != nil {
return nil, err
}
maps = append(maps, value)
}
m[fieldSchema.Name] = maps
}
return m, nil
}

// StructSaver implements ValueSaver for a struct.
// The struct is converted to a map of values by using the values of struct
// fields corresponding to schema fields. Additional and missing
// fields are ignored, as are nested struct pointers that are nil.
type StructSaver struct {
// Schema determines what fields of the struct are uploaded. It should
// match the table's schema.
// Schema is optional for StructSavers that are passed to Uploader.Put.
Schema Schema

// If non-empty, BigQuery will use InsertID to de-duplicate insertions
// of this row on a best-effort basis.
InsertID string

// Struct should be a struct or a pointer to a struct.
Struct interface{}
}

// Save implements ValueSaver.
func (ss *StructSaver) Save() (row map[string]Value, insertID string, err error) {
vstruct := reflect.ValueOf(ss.Struct)
row, err = structToMap(vstruct, ss.Schema)
if err != nil {
return nil, "", err
}
return row, ss.InsertID, nil
}

func structToMap(vstruct reflect.Value, schema Schema) (map[string]Value, error) {
if vstruct.Kind() == reflect.Ptr {
vstruct = vstruct.Elem()
}
if !vstruct.IsValid() {
return nil, nil
}
m := map[string]Value{}
if vstruct.Kind() != reflect.Struct {
return nil, fmt.Errorf("bigquery: type is %s, need struct or struct pointer", vstruct.Type())
}
fields, err := fieldCache.Fields(vstruct.Type())
if err != nil {
return nil, err
}
for _, schemaField := range schema {
// Look for an exported struct field with the same name as the schema
// field, ignoring case.
structField := fields.Match(schemaField.Name)
if structField == nil {
continue
}
val, err := structFieldToUploadValue(vstruct.FieldByIndex(structField.Index), schemaField)
if err != nil {
return nil, err
}
// Add the value to the map, unless it is nil.
if val != nil {
m[schemaField.Name] = val
}
}
return m, nil
}

// structFieldToUploadValue converts a struct field to a value suitable for ValueSaver.Save, using
// the schemaField as a guide.
// structFieldToUploadValue is careful to return a true nil interface{} when needed, so its
// caller can easily identify a nil value.
func structFieldToUploadValue(vfield reflect.Value, schemaField *FieldSchema) (interface{}, error) {
if schemaField.Repeated && (vfield.Kind() != reflect.Slice && vfield.Kind() != reflect.Array) {
return nil, fmt.Errorf("bigquery: repeated schema field %s requires slice or array, but value has type %s",
schemaField.Name, vfield.Type())
}

// A non-nested field can be represented by its Go value, except for some types.
if schemaField.Type != RecordFieldType {
return toUploadValueReflect(vfield, schemaField), nil
}
// A non-repeated nested field is converted into a map[string]Value.
if !schemaField.Repeated {
m, err := structToMap(vfield, schemaField.Schema)
if err != nil {
return nil, err
}
if m == nil {
return nil, nil
}
return m, nil
}
// A repeated nested field is converted into a slice of maps.
// If the field is zero-length (but not nil), we return a zero-length []Value.
if vfield.IsNil() {
return nil, nil
}
vals := []Value{}
for i := 0; i < vfield.Len(); i++ {
m, err := structToMap(vfield.Index(i), schemaField.Schema)
if err != nil {
return nil, err
}
vals = append(vals, m)
}
return vals, nil
}

func toUploadValue(val interface{}, fs *FieldSchema) interface{} {
if fs.Type == TimeFieldType || fs.Type == DateTimeFieldType || fs.Type == NumericFieldType {
return toUploadValueReflect(reflect.ValueOf(val), fs)
}
return val
}

func toUploadValueReflect(v reflect.Value, fs *FieldSchema) interface{} {
switch fs.Type {
case TimeFieldType:
if v.Type() == typeOfNullTime {
return v.Interface()
}
return formatUploadValue(v, fs, func(v reflect.Value) string {
return CivilTimeString(v.Interface().(civil.Time))
})
case DateTimeFieldType:
if v.Type() == typeOfNullDateTime {
return v.Interface()
}
return formatUploadValue(v, fs, func(v reflect.Value) string {
return CivilDateTimeString(v.Interface().(civil.DateTime))
})
case NumericFieldType:
if r, ok := v.Interface().(*big.Rat); ok && r == nil {
return nil
}
return formatUploadValue(v, fs, func(v reflect.Value) string {
return NumericString(v.Interface().(*big.Rat))
})
default:
if !fs.Repeated || v.Len() > 0 {
return v.Interface()
}
// The service treats a null repeated field as an error. Return
// nil to omit the field entirely.
return nil
}
}

func formatUploadValue(v reflect.Value, fs *FieldSchema, cvt func(reflect.Value) string) interface{} {
if !fs.Repeated {
return cvt(v)
}
if v.Len() == 0 {
return nil
}
s := make([]string, v.Len())
for i := 0; i < v.Len(); i++ {
s[i] = cvt(v.Index(i))
}
return s
}

// CivilTimeString returns a string representing a civil.Time in a format compatible
// with BigQuery SQL. It rounds the time to the nearest microsecond and returns a
// string with six digits of sub-second precision.
//
// Use CivilTimeString when using civil.Time in DML, for example in INSERT
// statements.
func CivilTimeString(t civil.Time) string {
if t.Nanosecond == 0 {
return t.String()
}
micro := (t.Nanosecond + 500) / 1000 // round to nearest microsecond
t.Nanosecond = 0
return t.String() + fmt.Sprintf(".%06d", micro)
}

// CivilDateTimeString returns a string representing a civil.DateTime in a format compatible
// with BigQuery SQL. It separate the date and time with a space, and formats the time
// with CivilTimeString.
//
// Use CivilDateTimeString when using civil.DateTime in DML, for example in INSERT
// statements.
func CivilDateTimeString(dt civil.DateTime) string {
return dt.Date.String() + " " + CivilTimeString(dt.Time)
}

// parseCivilDateTime parses a date-time represented in a BigQuery SQL
// compatible format and returns a civil.DateTime.
func parseCivilDateTime(s string) (civil.DateTime, error) {
parts := strings.Fields(s)
if len(parts) != 2 {
return civil.DateTime{}, fmt.Errorf("bigquery: bad DATETIME value %q", s)
}
return civil.ParseDateTime(parts[0] + "T" + parts[1])
}

const (
// NumericPrecisionDigits is the maximum number of digits in a NUMERIC value.
NumericPrecisionDigits = 38

// NumericScaleDigits is the maximum number of digits after the decimal point in a NUMERIC value.
NumericScaleDigits = 9
)

// NumericString returns a string representing a *big.Rat in a format compatible
// with BigQuery SQL. It returns a floating-point literal with 9 digits
// after the decimal point.
func NumericString(r *big.Rat) string {
return r.FloatString(NumericScaleDigits)
}

// convertRows converts a series of TableRows into a series of Value slices.
// schema is used to interpret the data from rows; its length must match the
// length of each row.
func convertRows(rows []*bq.TableRow, schema Schema) ([][]Value, error) {
var rs [][]Value
for _, r := range rows {
row, err := convertRow(r, schema)
if err != nil {
return nil, err
}
rs = append(rs, row)
}
return rs, nil
}

func convertRow(r *bq.TableRow, schema Schema) ([]Value, error) {
if len(schema) != len(r.F) {
return nil, errors.New("schema length does not match row length")
}
var values []Value
for i, cell := range r.F {
fs := schema[i]
v, err := convertValue(cell.V, fs.Type, fs.Schema)
if err != nil {
return nil, err
}
values = append(values, v)
}
return values, nil
}

func convertValue(val interface{}, typ FieldType, schema Schema) (Value, error) {
switch val := val.(type) {
case nil:
return nil, nil
case []interface{}:
return convertRepeatedRecord(val, typ, schema)
case map[string]interface{}:
return convertNestedRecord(val, schema)
case string:
return convertBasicType(val, typ)
default:
return nil, fmt.Errorf("got value %v; expected a value of type %s", val, typ)
}
}

func convertRepeatedRecord(vals []interface{}, typ FieldType, schema Schema) (Value, error) {
var values []Value
for _, cell := range vals {
// each cell contains a single entry, keyed by "v"
val := cell.(map[string]interface{})["v"]
v, err := convertValue(val, typ, schema)
if err != nil {
return nil, err
}
values = append(values, v)
}
return values, nil
}

func convertNestedRecord(val map[string]interface{}, schema Schema) (Value, error) {
// convertNestedRecord is similar to convertRow, as a record has the same structure as a row.

// Nested records are wrapped in a map with a single key, "f".
record := val["f"].([]interface{})
if len(record) != len(schema) {
return nil, errors.New("schema length does not match record length")
}

var values []Value
for i, cell := range record {
// each cell contains a single entry, keyed by "v"
val := cell.(map[string]interface{})["v"]
fs := schema[i]
v, err := convertValue(val, fs.Type, fs.Schema)
if err != nil {
return nil, err
}
values = append(values, v)
}
return values, nil
}

// convertBasicType returns val as an interface with a concrete type specified by typ.
func convertBasicType(val string, typ FieldType) (Value, error) {
switch typ {
case StringFieldType:
return val, nil
case BytesFieldType:
return base64.StdEncoding.DecodeString(val)
case IntegerFieldType:
return strconv.ParseInt(val, 10, 64)
case FloatFieldType:
return strconv.ParseFloat(val, 64)
case BooleanFieldType:
return strconv.ParseBool(val)
case TimestampFieldType:
f, err := strconv.ParseFloat(val, 64)
if err != nil {
return nil, err
}
secs := math.Trunc(f)
nanos := (f - secs) * 1e9
return Value(time.Unix(int64(secs), int64(nanos)).UTC()), nil
case DateFieldType:
return civil.ParseDate(val)
case TimeFieldType:
return civil.ParseTime(val)
case DateTimeFieldType:
return civil.ParseDateTime(val)
case NumericFieldType:
r, ok := (&big.Rat{}).SetString(val)
if !ok {
return nil, fmt.Errorf("bigquery: invalid NUMERIC value %q", val)
}
return Value(r), nil
case GeographyFieldType:
return val, nil
default:
return nil, fmt.Errorf("unrecognized type: %s", typ)
}
}

+ 0
- 1240
vendor/cloud.google.com/go/bigquery/value_test.go
File diff suppressed because it is too large
View File


+ 0
- 1133
vendor/cloud.google.com/go/bigtable/admin.go
File diff suppressed because it is too large
View File


+ 0
- 578
vendor/cloud.google.com/go/bigtable/admin_test.go View File

@@ -1,578 +0,0 @@
// Copyright 2015 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package bigtable

import (
"context"
"fmt"
"math"
"sort"
"strings"
"testing"
"time"

"cloud.google.com/go/internal/testutil"
"github.com/golang/protobuf/proto"
"google.golang.org/api/iterator"
btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2"
)

func TestAdminIntegration(t *testing.T) {
testEnv, err := NewIntegrationEnv()
if err != nil {
t.Fatalf("IntegrationEnv: %v", err)
}
defer testEnv.Close()

timeout := 2 * time.Second
if testEnv.Config().UseProd {
timeout = 5 * time.Minute
}
ctx, _ := context.WithTimeout(context.Background(), timeout)

adminClient, err := testEnv.NewAdminClient()
if err != nil {
t.Fatalf("NewAdminClient: %v", err)
}
defer adminClient.Close()

iAdminClient, err := testEnv.NewInstanceAdminClient()
if err != nil {
t.Fatalf("NewInstanceAdminClient: %v", err)
}
if iAdminClient != nil {
defer iAdminClient.Close()

iInfo, err := iAdminClient.InstanceInfo(ctx, adminClient.instance)
if err != nil {
t.Errorf("InstanceInfo: %v", err)
}
if iInfo.Name != adminClient.instance {
t.Errorf("InstanceInfo returned name %#v, want %#v", iInfo.Name, adminClient.instance)
}
}

list := func() []string {
tbls, err := adminClient.Tables(ctx)
if err != nil {
t.Fatalf("Fetching list of tables: %v", err)
}
sort.Strings(tbls)
return tbls
}
containsAll := func(got, want []string) bool {
gotSet := make(map[string]bool)

for _, s := range got {
gotSet[s] = true
}
for _, s := range want {
if !gotSet[s] {
return false
}
}
return true
}

defer adminClient.DeleteTable(ctx, "mytable")

if err := adminClient.CreateTable(ctx, "mytable"); err != nil {
t.Fatalf("Creating table: %v", err)
}

defer adminClient.DeleteTable(ctx, "myothertable")

if err := adminClient.CreateTable(ctx, "myothertable"); err != nil {
t.Fatalf("Creating table: %v", err)
}

if got, want := list(), []string{"myothertable", "mytable"}; !containsAll(got, want) {
t.Errorf("adminClient.Tables returned %#v, want %#v", got, want)
}

must(adminClient.WaitForReplication(ctx, "mytable"))

if err := adminClient.DeleteTable(ctx, "myothertable"); err != nil {
t.Fatalf("Deleting table: %v", err)
}
tables := list()
if got, want := tables, []string{"mytable"}; !containsAll(got, want) {
t.Errorf("adminClient.Tables returned %#v, want %#v", got, want)
}
if got, unwanted := tables, []string{"myothertable"}; containsAll(got, unwanted) {
t.Errorf("adminClient.Tables return %#v. unwanted %#v", got, unwanted)
}

tblConf := TableConf{
TableID: "conftable",
Families: map[string]GCPolicy{
"fam1": MaxVersionsPolicy(1),
"fam2": MaxVersionsPolicy(2),
},
}
if err := adminClient.CreateTableFromConf(ctx, &tblConf); err != nil {
t.Fatalf("Creating table from TableConf: %v", err)
}
defer adminClient.DeleteTable(ctx, tblConf.TableID)

tblInfo, err := adminClient.TableInfo(ctx, tblConf.TableID)
if err != nil {
t.Fatalf("Getting table info: %v", err)
}
sort.Strings(tblInfo.Families)
wantFams := []string{"fam1", "fam2"}
if !testutil.Equal(tblInfo.Families, wantFams) {
t.Errorf("Column family mismatch, got %v, want %v", tblInfo.Families, wantFams)
}

// Populate mytable and drop row ranges
if err = adminClient.CreateColumnFamily(ctx, "mytable", "cf"); err != nil {
t.Fatalf("Creating column family: %v", err)
}

client, err := testEnv.NewClient()
if err != nil {
t.Fatalf("NewClient: %v", err)
}
defer client.Close()

tbl := client.Open("mytable")

prefixes := []string{"a", "b", "c"}
for _, prefix := range prefixes {
for i := 0; i < 5; i++ {
mut := NewMutation()
mut.Set("cf", "col", 1000, []byte("1"))
if err := tbl.Apply(ctx, fmt.Sprintf("%v-%v", prefix, i), mut); err != nil {
t.Fatalf("Mutating row: %v", err)
}
}
}

if err = adminClient.DropRowRange(ctx, "mytable", "a"); err != nil {
t.Errorf("DropRowRange a: %v", err)
}
if err = adminClient.DropRowRange(ctx, "mytable", "c"); err != nil {
t.Errorf("DropRowRange c: %v", err)
}
if err = adminClient.DropRowRange(ctx, "mytable", "x"); err != nil {
t.Errorf("DropRowRange x: %v", err)
}

var gotRowCount int
must(tbl.ReadRows(ctx, RowRange{}, func(row Row) bool {
gotRowCount++
if !strings.HasPrefix(row.Key(), "b") {
t.Errorf("Invalid row after dropping range: %v", row)
}
return true
}))
if gotRowCount != 5 {
t.Errorf("Invalid row count after dropping range: got %v, want %v", gotRowCount, 5)
}
}

func TestInstanceUpdate(t *testing.T) {
testEnv, err := NewIntegrationEnv()
if err != nil {
t.Fatalf("IntegrationEnv: %v", err)
}
defer testEnv.Close()

timeout := 2 * time.Second
if testEnv.Config().UseProd {
timeout = 5 * time.Minute
}
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()

adminClient, err := testEnv.NewAdminClient()
if err != nil {
t.Fatalf("NewAdminClient: %v", err)
}

defer adminClient.Close()

iAdminClient, err := testEnv.NewInstanceAdminClient()
if err != nil {
t.Fatalf("NewInstanceAdminClient: %v", err)
}

if iAdminClient == nil {
return
}

defer iAdminClient.Close()

iInfo, err := iAdminClient.InstanceInfo(ctx, adminClient.instance)
if err != nil {
t.Errorf("InstanceInfo: %v", err)
}

if iInfo.Name != adminClient.instance {
t.Errorf("InstanceInfo returned name %#v, want %#v", iInfo.Name, adminClient.instance)
}

if iInfo.DisplayName != adminClient.instance {
t.Errorf("InstanceInfo returned name %#v, want %#v", iInfo.Name, adminClient.instance)
}

const numNodes = 4
// update cluster nodes
if err := iAdminClient.UpdateCluster(ctx, adminClient.instance, testEnv.Config().Cluster, int32(numNodes)); err != nil {
t.Errorf("UpdateCluster: %v", err)
}

// get cluster after updating
cis, err := iAdminClient.GetCluster(ctx, adminClient.instance, testEnv.Config().Cluster)
if err != nil {
t.Errorf("GetCluster %v", err)
}
if cis.ServeNodes != int(numNodes) {
t.Errorf("ServeNodes returned %d, want %d", cis.ServeNodes, int(numNodes))
}
}

func TestAdminSnapshotIntegration(t *testing.T) {
testEnv, err := NewIntegrationEnv()
if err != nil {
t.Fatalf("IntegrationEnv: %v", err)
}
defer testEnv.Close()

if !testEnv.Config().UseProd {
t.Skip("emulator doesn't support snapshots")
}

timeout := 2 * time.Second
if testEnv.Config().UseProd {
timeout = 5 * time.Minute
}
ctx, _ := context.WithTimeout(context.Background(), timeout)

adminClient, err := testEnv.NewAdminClient()
if err != nil {
t.Fatalf("NewAdminClient: %v", err)
}
defer adminClient.Close()

table := testEnv.Config().Table
cluster := testEnv.Config().Cluster

list := func(cluster string) ([]*SnapshotInfo, error) {
infos := []*SnapshotInfo(nil)

it := adminClient.Snapshots(ctx, cluster)
for {
s, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
return nil, err
}
infos = append(infos, s)
}
return infos, err
}

// Delete the table at the end of the test. Schedule ahead of time
// in case the client fails
defer adminClient.DeleteTable(ctx, table)

if err := adminClient.CreateTable(ctx, table); err != nil {
t.Fatalf("Creating table: %v", err)
}

// Precondition: no snapshots
snapshots, err := list(cluster)
if err != nil {
t.Fatalf("Initial snapshot list: %v", err)
}
if got, want := len(snapshots), 0; got != want {
t.Fatalf("Initial snapshot list len: %d, want: %d", got, want)
}

// Create snapshot
defer adminClient.DeleteSnapshot(ctx, cluster, "mysnapshot")

if err = adminClient.SnapshotTable(ctx, table, cluster, "mysnapshot", 5*time.Hour); err != nil {
t.Fatalf("Creating snaphot: %v", err)
}

// List snapshot
snapshots, err = list(cluster)
if err != nil {
t.Fatalf("Listing snapshots: %v", err)
}
if got, want := len(snapshots), 1; got != want {
t.Fatalf("Listing snapshot count: %d, want: %d", got, want)
}
if got, want := snapshots[0].Name, "mysnapshot"; got != want {
t.Fatalf("Snapshot name: %s, want: %s", got, want)
}
if got, want := snapshots[0].SourceTable, table; got != want {
t.Fatalf("Snapshot SourceTable: %s, want: %s", got, want)
}
if got, want := snapshots[0].DeleteTime, snapshots[0].CreateTime.Add(5*time.Hour); math.Abs(got.Sub(want).Minutes()) > 1 {
t.Fatalf("Snapshot DeleteTime: %s, want: %s", got, want)
}

// Get snapshot
snapshot, err := adminClient.SnapshotInfo(ctx, cluster, "mysnapshot")
if err != nil {
t.Fatalf("SnapshotInfo: %v", snapshot)
}
if got, want := *snapshot, *snapshots[0]; got != want {
t.Fatalf("SnapshotInfo: %v, want: %v", got, want)
}

// Restore
restoredTable := table + "-restored"
defer adminClient.DeleteTable(ctx, restoredTable)
if err = adminClient.CreateTableFromSnapshot(ctx, restoredTable, cluster, "mysnapshot"); err != nil {
t.Fatalf("CreateTableFromSnapshot: %v", err)
}
if _, err := adminClient.TableInfo(ctx, restoredTable); err != nil {
t.Fatalf("Restored TableInfo: %v", err)
}

// Delete snapshot
if err = adminClient.DeleteSnapshot(ctx, cluster, "mysnapshot"); err != nil {
t.Fatalf("DeleteSnapshot: %v", err)
}
snapshots, err = list(cluster)
if err != nil {
t.Fatalf("List after Delete: %v", err)
}
if got, want := len(snapshots), 0; got != want {
t.Fatalf("List after delete len: %d, want: %d", got, want)
}
}

func TestGranularity(t *testing.T) {
testEnv, err := NewIntegrationEnv()
if err != nil {
t.Fatalf("IntegrationEnv: %v", err)
}
defer testEnv.Close()

timeout := 2 * time.Second
if testEnv.Config().UseProd {
timeout = 5 * time.Minute
}
ctx, _ := context.WithTimeout(context.Background(), timeout)

adminClient, err := testEnv.NewAdminClient()
if err != nil {
t.Fatalf("NewAdminClient: %v", err)
}
defer adminClient.Close()

list := func() []string {
tbls, err := adminClient.Tables(ctx)
if err != nil {
t.Fatalf("Fetching list of tables: %v", err)
}
sort.Strings(tbls)
return tbls
}
containsAll := func(got, want []string) bool {
gotSet := make(map[string]bool)

for _, s := range got {
gotSet[s] = true
}
for _, s := range want {
if !gotSet[s] {
return false
}
}
return true
}

defer adminClient.DeleteTable(ctx, "mytable")

if err := adminClient.CreateTable(ctx, "mytable"); err != nil {
t.Fatalf("Creating table: %v", err)
}

tables := list()
if got, want := tables, []string{"mytable"}; !containsAll(got, want) {
t.Errorf("adminClient.Tables returned %#v, want %#v", got, want)
}

// calling ModifyColumnFamilies to check the granularity of table
prefix := adminClient.instancePrefix()
req := &btapb.ModifyColumnFamiliesRequest{
Name: prefix + "/tables/" + "mytable",
Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{
Id: "cf",
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{&btapb.ColumnFamily{}},
}},
}
table, err := adminClient.tClient.ModifyColumnFamilies(ctx, req)
if err != nil {
t.Fatalf("Creating column family: %v", err)
}
if table.Granularity != btapb.Table_TimestampGranularity(btapb.Table_MILLIS) {
t.Errorf("ModifyColumnFamilies returned granularity %#v, want %#v", table.Granularity, btapb.Table_TimestampGranularity(btapb.Table_MILLIS))
}
}

func TestInstanceAdminClient_AppProfile(t *testing.T) {
testEnv, err := NewIntegrationEnv()
if err != nil {
t.Fatalf("IntegrationEnv: %v", err)
}
defer testEnv.Close()

timeout := 2 * time.Second
if testEnv.Config().UseProd {
timeout = 5 * time.Minute
}
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()

adminClient, err := testEnv.NewAdminClient()
if err != nil {
t.Fatalf("NewAdminClient: %v", err)
}
defer adminClient.Close()

iAdminClient, err := testEnv.NewInstanceAdminClient()
if err != nil {
t.Fatalf("NewInstanceAdminClient: %v", err)
}

if iAdminClient == nil {
return
}

defer iAdminClient.Close()
profile := ProfileConf{
ProfileID: "app_profile1",
InstanceID: adminClient.instance,
ClusterID: testEnv.Config().Cluster,
Description: "creating new app profile 1",
RoutingPolicy: SingleClusterRouting,
}

createdProfile, err := iAdminClient.CreateAppProfile(ctx, profile)
if err != nil {
t.Fatalf("Creating app profile: %v", err)

}

gotProfile, err := iAdminClient.GetAppProfile(ctx, adminClient.instance, "app_profile1")

if err != nil {
t.Fatalf("Get app profile: %v", err)
}

if !proto.Equal(createdProfile, gotProfile) {
t.Fatalf("created profile: %s, got profile: %s", createdProfile.Name, gotProfile.Name)

}

list := func(instanceID string) ([]*btapb.AppProfile, error) {
profiles := []*btapb.AppProfile(nil)

it := iAdminClient.ListAppProfiles(ctx, instanceID)
for {
s, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
return nil, err
}
profiles = append(profiles, s)
}
return profiles, err
}

profiles, err := list(adminClient.instance)
if err != nil {
t.Fatalf("List app profile: %v", err)
}

if got, want := len(profiles), 1; got != want {
t.Fatalf("Initial app profile list len: %d, want: %d", got, want)
}

for _, test := range []struct {
desc string
uattrs ProfileAttrsToUpdate
want *btapb.AppProfile // nil means error
}{
{
desc: "empty update",
uattrs: ProfileAttrsToUpdate{},
want: nil,
},

{
desc: "empty description update",
uattrs: ProfileAttrsToUpdate{Description: ""},
want: &btapb.AppProfile{
Name: gotProfile.Name,
Description: "",
RoutingPolicy: gotProfile.RoutingPolicy,
Etag: gotProfile.Etag},
},
{
desc: "routing update",
uattrs: ProfileAttrsToUpdate{
RoutingPolicy: SingleClusterRouting,
ClusterID: testEnv.Config().Cluster,
},
want: &btapb.AppProfile{
Name: gotProfile.Name,
Description: "",
Etag: gotProfile.Etag,
RoutingPolicy: &btapb.AppProfile_SingleClusterRouting_{
SingleClusterRouting: &btapb.AppProfile_SingleClusterRouting{
ClusterId: testEnv.Config().Cluster,
}},
},
},
} {
err = iAdminClient.UpdateAppProfile(ctx, adminClient.instance, "app_profile1", test.uattrs)
if err != nil {
if test.want != nil {
t.Errorf("%s: %v", test.desc, err)
}
continue
}
if err == nil && test.want == nil {
t.Errorf("%s: got nil, want error", test.desc)
continue
}

got, _ := iAdminClient.GetAppProfile(ctx, adminClient.instance, "app_profile1")

if !proto.Equal(got, test.want) {
t.Fatalf("%s : got profile : %v, want profile: %v", test.desc, gotProfile, test.want)
}

}

err = iAdminClient.DeleteAppProfile(ctx, adminClient.instance, "app_profile1")
if err != nil {
t.Fatalf("Delete app profile: %v", err)
}

}

+ 0
- 914
vendor/cloud.google.com/go/bigtable/bigtable.go View File

@@ -1,914 +0,0 @@
/*
Copyright 2015 Google LLC

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package bigtable // import "cloud.google.com/go/bigtable"

import (
"context"
"errors"
"fmt"
"io"
"strconv"
"time"

"cloud.google.com/go/bigtable/internal/gax"
btopt "cloud.google.com/go/bigtable/internal/option"
"github.com/golang/protobuf/proto"
"google.golang.org/api/option"
gtransport "google.golang.org/api/transport/grpc"
btpb "google.golang.org/genproto/googleapis/bigtable/v2"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
)

const prodAddr = "bigtable.googleapis.com:443"

// Client is a client for reading and writing data to tables in an instance.
//
// A Client is safe to use concurrently, except for its Close method.
type Client struct {
conn *grpc.ClientConn
client btpb.BigtableClient
project, instance string
appProfile string
}

// ClientConfig has configurations for the client.
type ClientConfig struct {
// The id of the app profile to associate with all data operations sent from this client.
// If unspecified, the default app profile for the instance will be used.
AppProfile string
}

// NewClient creates a new Client for a given project and instance.
// The default ClientConfig will be used.
func NewClient(ctx context.Context, project, instance string, opts ...option.ClientOption) (*Client, error) {
return NewClientWithConfig(ctx, project, instance, ClientConfig{}, opts...)
}

// NewClientWithConfig creates a new client with the given config.
func NewClientWithConfig(ctx context.Context, project, instance string, config ClientConfig, opts ...option.ClientOption) (*Client, error) {
o, err := btopt.DefaultClientOptions(prodAddr, Scope, clientUserAgent)
if err != nil {
return nil, err
}
// Default to a small connection pool that can be overridden.
o = append(o,
option.WithGRPCConnectionPool(4),
// Set the max size to correspond to server-side limits.
option.WithGRPCDialOption(grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(1<<28), grpc.MaxCallRecvMsgSize(1<<28))),
// TODO(grpc/grpc-go#1388) using connection pool without WithBlock
// can cause RPCs to fail randomly. We can delete this after the issue is fixed.
option.WithGRPCDialOption(grpc.WithBlock()))
o = append(o, opts...)
conn, err := gtransport.Dial(ctx, o...)
if err != nil {
return nil, fmt.Errorf("dialing: %v", err)
}

return &Client{
conn: conn,
client: btpb.NewBigtableClient(conn),
project: project,
instance: instance,
appProfile: config.AppProfile,
}, nil
}

// Close closes the Client.
func (c *Client) Close() error {
return c.conn.Close()
}

var (
idempotentRetryCodes = []codes.Code{codes.DeadlineExceeded, codes.Unavailable, codes.Aborted}
isIdempotentRetryCode = make(map[codes.Code]bool)
retryOptions = []gax.CallOption{
gax.WithDelayTimeoutSettings(100*time.Millisecond, 2000*time.Millisecond, 1.2),
gax.WithRetryCodes(idempotentRetryCodes),
}
)

func init() {
for _, code := range idempotentRetryCodes {
isIdempotentRetryCode[code] = true
}
}

func (c *Client) fullTableName(table string) string {
return fmt.Sprintf("projects/%s/instances/%s/tables/%s", c.project, c.instance, table)
}

// A Table refers to a table.
//
// A Table is safe to use concurrently.
type Table struct {
c *Client
table string

// Metadata to be sent with each request.
md metadata.MD
}

// Open opens a table.
func (c *Client) Open(table string) *Table {
return &Table{
c: c,
table: table,
md: metadata.Pairs(resourcePrefixHeader, c.fullTableName(table)),
}
}

// TODO(dsymonds): Read method that returns a sequence of ReadItems.

// ReadRows reads rows from a table. f is called for each row.
// If f returns false, the stream is shut down and ReadRows returns.
// f owns its argument, and f is called serially in order by row key.
//
// By default, the yielded rows will contain all values in all cells.
// Use RowFilter to limit the cells returned.
func (t *Table) ReadRows(ctx context.Context, arg RowSet, f func(Row) bool, opts ...ReadOption) error {
ctx = mergeOutgoingMetadata(ctx, t.md)

var prevRowKey string
var err error
ctx = traceStartSpan(ctx, "cloud.google.com/go/bigtable.ReadRows")
defer func() { traceEndSpan(ctx, err) }()
attrMap := make(map[string]interface{})
err = gax.Invoke(ctx, func(ctx context.Context) error {
if !arg.valid() {
// Empty row set, no need to make an API call.
// NOTE: we must return early if arg == RowList{} because reading
// an empty RowList from bigtable returns all rows from that table.
return nil
}
req := &btpb.ReadRowsRequest{
TableName: t.c.fullTableName(t.table),
AppProfileId: t.c.appProfile,
Rows: arg.proto(),
}
for _, opt := range opts {
opt.set(req)
}
ctx, cancel := context.WithCancel(ctx) // for aborting the stream
defer cancel()

startTime := time.Now()
stream, err := t.c.client.ReadRows(ctx, req)
if err != nil {
return err
}
cr := newChunkReader()
for {
res, err := stream.Recv()
if err == io.EOF {
break
}
if err != nil {
// Reset arg for next Invoke call.
arg = arg.retainRowsAfter(prevRowKey)
attrMap["rowKey"] = prevRowKey
attrMap["error"] = err.Error()
attrMap["time_secs"] = time.Since(startTime).Seconds()
tracePrintf(ctx, attrMap, "Retry details in ReadRows")
return err
}
attrMap["time_secs"] = time.Since(startTime).Seconds()
attrMap["rowCount"] = len(res.Chunks)
tracePrintf(ctx, attrMap, "Details in ReadRows")

for _, cc := range res.Chunks {
row, err := cr.Process(cc)
if err != nil {
// No need to prepare for a retry, this is an unretryable error.
return err
}
if row == nil {
continue
}
prevRowKey = row.Key()
if !f(row) {
// Cancel and drain stream.
cancel()
for {
if _, err := stream.Recv(); err != nil {
// The stream has ended. We don't return an error
// because the caller has intentionally interrupted the scan.
return nil
}
}
}
}
if err := cr.Close(); err != nil {
// No need to prepare for a retry, this is an unretryable error.
return err
}
}
return err
}, retryOptions...)

return err
}

// ReadRow is a convenience implementation of a single-row reader.
// A missing row will return a zero-length map and a nil error.
func (t *Table) ReadRow(ctx context.Context, row string, opts ...ReadOption) (Row, error) {
var r Row
err := t.ReadRows(ctx, SingleRow(row), func(rr Row) bool {
r = rr
return true
}, opts...)
return r, err
}

// decodeFamilyProto adds the cell data from f to the given row.
func decodeFamilyProto(r Row, row string, f *btpb.Family) {
fam := f.Name // does not have colon
for _, col := range f.Columns {
for _, cell := range col.Cells {
ri := ReadItem{
Row: row,
Column: fam + ":" + string(col.Qualifier),
Timestamp: Timestamp(cell.TimestampMicros),
Value: cell.Value,
}
r[fam] = append(r[fam], ri)
}
}
}

// RowSet is a set of rows to be read. It is satisfied by RowList, RowRange and RowRangeList.
// The serialized size of the RowSet must be no larger than 1MiB.
type RowSet interface {
proto() *btpb.RowSet

// retainRowsAfter returns a new RowSet that does not include the
// given row key or any row key lexicographically less than it.
retainRowsAfter(lastRowKey string) RowSet

// Valid reports whether this set can cover at least one row.
valid() bool
}

// RowList is a sequence of row keys.
type RowList []string

func (r RowList) proto() *btpb.RowSet {
keys := make([][]byte, len(r))
for i, row := range r {
keys[i] = []byte(row)
}
return &btpb.RowSet{RowKeys: keys}
}

func (r RowList) retainRowsAfter(lastRowKey string) RowSet {
var retryKeys RowList
for _, key := range r {
if key > lastRowKey {
retryKeys = append(retryKeys, key)
}
}
return retryKeys
}

func (r RowList) valid() bool {
return len(r) > 0
}

// A RowRange is a half-open interval [Start, Limit) encompassing
// all the rows with keys at least as large as Start, and less than Limit.
// (Bigtable string comparison is the same as Go's.)
// A RowRange can be unbounded, encompassing all keys at least as large as Start.
type RowRange struct {
start string
limit string
}

// NewRange returns the new RowRange [begin, end).
func NewRange(begin, end string) RowRange {
return RowRange{
start: begin,
limit: end,
}
}

// Unbounded tests whether a RowRange is unbounded.
func (r RowRange) Unbounded() bool {
return r.limit == ""
}

// Contains says whether the RowRange contains the key.
func (r RowRange) Contains(row string) bool {
return r.start <= row && (r.limit == "" || r.limit > row)
}

// String provides a printable description of a RowRange.
func (r RowRange) String() string {
a := strconv.Quote(r.start)
if r.Unbounded() {
return fmt.Sprintf("[%s,∞)", a)
}
return fmt.Sprintf("[%s,%q)", a, r.limit)
}

func (r RowRange) proto() *btpb.RowSet {
rr := &btpb.RowRange{
StartKey: &btpb.RowRange_StartKeyClosed{StartKeyClosed: []byte(r.start)},
}
if !r.Unbounded() {
rr.EndKey = &btpb.RowRange_EndKeyOpen{EndKeyOpen: []byte(r.limit)}
}
return &btpb.RowSet{RowRanges: []*btpb.RowRange{rr}}
}

func (r RowRange) retainRowsAfter(lastRowKey string) RowSet {
if lastRowKey == "" || lastRowKey < r.start {
return r
}
// Set the beginning of the range to the row after the last scanned.
start := lastRowKey + "\x00"
if r.Unbounded() {
return InfiniteRange(start)
}
return NewRange(start, r.limit)
}

func (r RowRange) valid() bool {
return r.Unbounded() || r.start < r.limit
}

// RowRangeList is a sequence of RowRanges representing the union of the ranges.
type RowRangeList []RowRange

func (r RowRangeList) proto() *btpb.RowSet {
ranges := make([]*btpb.RowRange, len(r))
for i, rr := range r {
// RowRange.proto() returns a RowSet with a single element RowRange array
ranges[i] = rr.proto().RowRanges[0]
}
return &btpb.RowSet{RowRanges: ranges}
}

func (r RowRangeList) retainRowsAfter(lastRowKey string) RowSet {
if lastRowKey == "" {
return r
}
// Return a list of any range that has not yet been completely processed
var ranges RowRangeList
for _, rr := range r {
retained := rr.retainRowsAfter(lastRowKey)
if retained.valid() {
ranges = append(ranges, retained.(RowRange))
}
}
return ranges
}

func (r RowRangeList) valid() bool {
for _, rr := range r {
if rr.valid() {
return true
}
}
return false
}

// SingleRow returns a RowSet for reading a single row.
func SingleRow(row string) RowSet {
return RowList{row}
}

// PrefixRange returns a RowRange consisting of all keys starting with the prefix.
func PrefixRange(prefix string) RowRange {
return RowRange{
start: prefix,
limit: prefixSuccessor(prefix),
}
}

// InfiniteRange returns the RowRange consisting of all keys at least as
// large as start.
func InfiniteRange(start string) RowRange {
return RowRange{
start: start,
limit: "",
}
}

// prefixSuccessor returns the lexically smallest string greater than the
// prefix, if it exists, or "" otherwise. In either case, it is the string
// needed for the Limit of a RowRange.
func prefixSuccessor(prefix string) string {
if prefix == "" {
return "" // infinite range
}
n := len(prefix)
for n--; n >= 0 && prefix[n] == '\xff'; n-- {
}
if n == -1 {
return ""
}
ans := []byte(prefix[:n])
ans = append(ans, prefix[n]+1)
return string(ans)
}

// A ReadOption is an optional argument to ReadRows.
type ReadOption interface {
set(req *btpb.ReadRowsRequest)
}

// RowFilter returns a ReadOption that applies f to the contents of read rows.
//
// If multiple RowFilters are provided, only the last is used. To combine filters,
// use ChainFilters or InterleaveFilters instead.
func RowFilter(f Filter) ReadOption { return rowFilter{f} }

type rowFilter struct{ f Filter }

func (rf rowFilter) set(req *btpb.ReadRowsRequest) { req.Filter = rf.f.proto() }

// LimitRows returns a ReadOption that will limit the number of rows to be read.
func LimitRows(limit int64) ReadOption { return limitRows{limit} }

type limitRows struct{ limit int64 }

func (lr limitRows) set(req *btpb.ReadRowsRequest) { req.RowsLimit = lr.limit }

// mutationsAreRetryable returns true if all mutations are idempotent
// and therefore retryable. A mutation is idempotent iff all cell timestamps
// have an explicit timestamp set and do not rely on the timestamp being set on the server.
func mutationsAreRetryable(muts []*btpb.Mutation) bool {
serverTime := int64(ServerTime)
for _, mut := range muts {
setCell := mut.GetSetCell()
if setCell != nil && setCell.TimestampMicros == serverTime {
return false
}
}
return true
}

const maxMutations = 100000

// Apply mutates a row atomically. A mutation must contain at least one
// operation and at most 100000 operations.
func (t *Table) Apply(ctx context.Context, row string, m *Mutation, opts ...ApplyOption) error {
ctx = mergeOutgoingMetadata(ctx, t.md)
after := func(res proto.Message) {
for _, o := range opts {
o.after(res)
}
}

var err error
ctx = traceStartSpan(ctx, "cloud.google.com/go/bigtable/Apply")
defer func() { traceEndSpan(ctx, err) }()
var callOptions []gax.CallOption
if m.cond == nil {
req := &btpb.MutateRowRequest{
TableName: t.c.fullTableName(t.table),
AppProfileId: t.c.appProfile,
RowKey: []byte(row),
Mutations: m.ops,
}
if mutationsAreRetryable(m.ops) {
callOptions = retryOptions
}
var res *btpb.MutateRowResponse
err := gax.Invoke(ctx, func(ctx context.Context) error {
var err error
res, err = t.c.client.MutateRow(ctx, req)
return err
}, callOptions...)
if err == nil {
after(res)
}
return err
}

req := &btpb.CheckAndMutateRowRequest{
TableName: t.c.fullTableName(t.table),
AppProfileId: t.c.appProfile,
RowKey: []byte(row),
PredicateFilter: m.cond.proto(),
}
if m.mtrue != nil {
if m.mtrue.cond != nil {
return errors.New("bigtable: conditional mutations cannot be nested")
}
req.TrueMutations = m.mtrue.ops
}
if m.mfalse != nil {
if m.mfalse.cond != nil {
return errors.New("bigtable: conditional mutations cannot be nested")
}
req.FalseMutations = m.mfalse.ops
}
if mutationsAreRetryable(req.TrueMutations) && mutationsAreRetryable(req.FalseMutations) {
callOptions = retryOptions
}
var cmRes *btpb.CheckAndMutateRowResponse
err = gax.Invoke(ctx, func(ctx context.Context) error {
var err error
cmRes, err = t.c.client.CheckAndMutateRow(ctx, req)
return err
}, callOptions...)
if err == nil {
after(cmRes)
}
return err
}

// An ApplyOption is an optional argument to Apply.
type ApplyOption interface {
after(res proto.Message)
}

type applyAfterFunc func(res proto.Message)

func (a applyAfterFunc) after(res proto.Message) { a(res) }

// GetCondMutationResult returns an ApplyOption that reports whether the conditional
// mutation's condition matched.
func GetCondMutationResult(matched *bool) ApplyOption {
return applyAfterFunc(func(res proto.Message) {
if res, ok := res.(*btpb.CheckAndMutateRowResponse); ok {
*matched = res.PredicateMatched
}
})
}

// Mutation represents a set of changes for a single row of a table.
type Mutation struct {
ops []*btpb.Mutation

// for conditional mutations
cond Filter
mtrue, mfalse *Mutation
}

// NewMutation returns a new mutation.
func NewMutation() *Mutation {
return new(Mutation)
}

// NewCondMutation returns a conditional mutation.
// The given row filter determines which mutation is applied:
// If the filter matches any cell in the row, mtrue is applied;
// otherwise, mfalse is applied.
// Either given mutation may be nil.
func NewCondMutation(cond Filter, mtrue, mfalse *Mutation) *Mutation {
return &Mutation{cond: cond, mtrue: mtrue, mfalse: mfalse}
}

// Set sets a value in a specified column, with the given timestamp.
// The timestamp will be truncated to millisecond granularity.
// A timestamp of ServerTime means to use the server timestamp.
func (m *Mutation) Set(family, column string, ts Timestamp, value []byte) {
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_SetCell_{SetCell: &btpb.Mutation_SetCell{
FamilyName: family,
ColumnQualifier: []byte(column),
TimestampMicros: int64(ts.TruncateToMilliseconds()),
Value: value,
}}})
}

// DeleteCellsInColumn will delete all the cells whose columns are family:column.
func (m *Mutation) DeleteCellsInColumn(family, column string) {
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromColumn_{DeleteFromColumn: &btpb.Mutation_DeleteFromColumn{
FamilyName: family,
ColumnQualifier: []byte(column),
}}})
}

// DeleteTimestampRange deletes all cells whose columns are family:column
// and whose timestamps are in the half-open interval [start, end).
// If end is zero, it will be interpreted as infinity.
// The timestamps will be truncated to millisecond granularity.
func (m *Mutation) DeleteTimestampRange(family, column string, start, end Timestamp) {
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromColumn_{DeleteFromColumn: &btpb.Mutation_DeleteFromColumn{
FamilyName: family,
ColumnQualifier: []byte(column),
TimeRange: &btpb.TimestampRange{
StartTimestampMicros: int64(start.TruncateToMilliseconds()),
EndTimestampMicros: int64(end.TruncateToMilliseconds()),
},
}}})
}

// DeleteCellsInFamily will delete all the cells whose columns are family:*.
func (m *Mutation) DeleteCellsInFamily(family string) {
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromFamily_{DeleteFromFamily: &btpb.Mutation_DeleteFromFamily{
FamilyName: family,
}}})
}

// DeleteRow deletes the entire row.
func (m *Mutation) DeleteRow() {
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromRow_{DeleteFromRow: &btpb.Mutation_DeleteFromRow{}}})
}

// entryErr is a container that combines an entry with the error that was returned for it.
// Err may be nil if no error was returned for the Entry, or if the Entry has not yet been processed.
type entryErr struct {
Entry *btpb.MutateRowsRequest_Entry
Err error
}

// ApplyBulk applies multiple Mutations, up to a maximum of 100,000.
// Each mutation is individually applied atomically,
// but the set of mutations may be applied in any order.
//
// Two types of failures may occur. If the entire process
// fails, (nil, err) will be returned. If specific mutations
// fail to apply, ([]err, nil) will be returned, and the errors
// will correspond to the relevant rowKeys/muts arguments.
//
// Conditional mutations cannot be applied in bulk and providing one will result in an error.
func (t *Table) ApplyBulk(ctx context.Context, rowKeys []string, muts []*Mutation, opts ...ApplyOption) ([]error, error) {
ctx = mergeOutgoingMetadata(ctx, t.md)
if len(rowKeys) != len(muts) {
return nil, fmt.Errorf("mismatched rowKeys and mutation array lengths: %d, %d", len(rowKeys), len(muts))
}

origEntries := make([]*entryErr, len(rowKeys))
for i, key := range rowKeys {
mut := muts[i]
if mut.cond != nil {
return nil, errors.New("conditional mutations cannot be applied in bulk")
}
origEntries[i] = &entryErr{Entry: &btpb.MutateRowsRequest_Entry{RowKey: []byte(key), Mutations: mut.ops}}
}

var err error
ctx = traceStartSpan(ctx, "cloud.google.com/go/bigtable/ApplyBulk")
defer func() { traceEndSpan(ctx, err) }()

for _, group := range groupEntries(origEntries, maxMutations) {
attrMap := make(map[string]interface{})
err = gax.Invoke(ctx, func(ctx context.Context) error {
attrMap["rowCount"] = len(group)
tracePrintf(ctx, attrMap, "Row count in ApplyBulk")
err := t.doApplyBulk(ctx, group, opts...)
if err != nil {
// We want to retry the entire request with the current group
return err
}
group = t.getApplyBulkRetries(group)
if len(group) > 0 && len(idempotentRetryCodes) > 0 {
// We have at least one mutation that needs to be retried.
// Return an arbitrary error that is retryable according to callOptions.
return status.Errorf(idempotentRetryCodes[0], "Synthetic error: partial failure of ApplyBulk")
}
return nil
}, retryOptions...)
if err != nil {
return nil, err
}
}

// Accumulate all of the errors into an array to return, interspersed with nils for successful
// entries. The absence of any errors means we should return nil.
var errs []error
var foundErr bool
for _, entry := range origEntries {
if entry.Err != nil {
foundErr = true
}
errs = append(errs, entry.Err)
}
if foundErr {
return errs, nil
}
return nil, nil
}

// getApplyBulkRetries returns the entries that need to be retried
func (t *Table) getApplyBulkRetries(entries []*entryErr) []*entryErr {
var retryEntries []*entryErr
for _, entry := range entries {
err := entry.Err
if err != nil && isIdempotentRetryCode[grpc.Code(err)] && mutationsAreRetryable(entry.Entry.Mutations) {
// There was an error and the entry is retryable.
retryEntries = append(retryEntries, entry)
}
}
return retryEntries
}

// doApplyBulk does the work of a single ApplyBulk invocation
func (t *Table) doApplyBulk(ctx context.Context, entryErrs []*entryErr, opts ...ApplyOption) error {
after := func(res proto.Message) {
for _, o := range opts {
o.after(res)
}
}

entries := make([]*btpb.MutateRowsRequest_Entry, len(entryErrs))
for i, entryErr := range entryErrs {
entries[i] = entryErr.Entry
}
req := &btpb.MutateRowsRequest{
TableName: t.c.fullTableName(t.table),
AppProfileId: t.c.appProfile,
Entries: entries,
}
stream, err := t.c.client.MutateRows(ctx, req)
if err != nil {
return err
}
for {
res, err := stream.Recv()
if err == io.EOF {
break
}
if err != nil {
return err
}

for i, entry := range res.Entries {
s := entry.Status
if s.Code == int32(codes.OK) {
entryErrs[i].Err = nil
} else {
entryErrs[i].Err = status.Errorf(codes.Code(s.Code), s.Message)
}
}
after(res)
}
return nil
}

// groupEntries groups entries into groups of a specified size without breaking up
// individual entries.
func groupEntries(entries []*entryErr, maxSize int) [][]*entryErr {
var (
res [][]*entryErr
start int
gmuts int
)
addGroup := func(end int) {
if end-start > 0 {
res = append(res, entries[start:end])
start = end
gmuts = 0
}
}
for i, e := range entries {
emuts := len(e.Entry.Mutations)
if gmuts+emuts > maxSize {
addGroup(i)
}
gmuts += emuts
}
addGroup(len(entries))
return res
}

// Timestamp is in units of microseconds since 1 January 1970.
type Timestamp int64

// ServerTime is a specific Timestamp that may be passed to (*Mutation).Set.
// It indicates that the server's timestamp should be used.
const ServerTime Timestamp = -1

// Time converts a time.Time into a Timestamp.
func Time(t time.Time) Timestamp { return Timestamp(t.UnixNano() / 1e3) }

// Now returns the Timestamp representation of the current time on the client.
func Now() Timestamp { return Time(time.Now()) }

// Time converts a Timestamp into a time.Time.
func (ts Timestamp) Time() time.Time { return time.Unix(0, int64(ts)*1e3) }

// TruncateToMilliseconds truncates a Timestamp to millisecond granularity,
// which is currently the only granularity supported.
func (ts Timestamp) TruncateToMilliseconds() Timestamp {
if ts == ServerTime {
return ts
}
return ts - ts%1000
}

// ApplyReadModifyWrite applies a ReadModifyWrite to a specific row.
// It returns the newly written cells.
func (t *Table) ApplyReadModifyWrite(ctx context.Context, row string, m *ReadModifyWrite) (Row, error) {
ctx = mergeOutgoingMetadata(ctx, t.md)
req := &btpb.ReadModifyWriteRowRequest{
TableName: t.c.fullTableName(t.table),
AppProfileId: t.c.appProfile,
RowKey: []byte(row),
Rules: m.ops,
}
res, err := t.c.client.ReadModifyWriteRow(ctx, req)
if err != nil {
return nil, err
}
if res.Row == nil {
return nil, errors.New("unable to apply ReadModifyWrite: res.Row=nil")
}
r := make(Row)
for _, fam := range res.Row.Families { // res is *btpb.Row, fam is *btpb.Family
decodeFamilyProto(r, row, fam)
}
return r, nil
}

// ReadModifyWrite represents a set of operations on a single row of a table.
// It is like Mutation but for non-idempotent changes.
// When applied, these operations operate on the latest values of the row's cells,
// and result in a new value being written to the relevant cell with a timestamp
// that is max(existing timestamp, current server time).
//
// The application of a ReadModifyWrite is atomic; concurrent ReadModifyWrites will
// be executed serially by the server.
type ReadModifyWrite struct {
ops []*btpb.ReadModifyWriteRule
}

// NewReadModifyWrite returns a new ReadModifyWrite.
func NewReadModifyWrite() *ReadModifyWrite { return new(ReadModifyWrite) }

// AppendValue appends a value to a specific cell's value.
// If the cell is unset, it will be treated as an empty value.
func (m *ReadModifyWrite) AppendValue(family, column string, v []byte) {
m.ops = append(m.ops, &btpb.ReadModifyWriteRule{
FamilyName: family,
ColumnQualifier: []byte(column),
Rule: &btpb.ReadModifyWriteRule_AppendValue{AppendValue: v},
})
}

// Increment interprets the value in a specific cell as a 64-bit big-endian signed integer,
// and adds a value to it. If the cell is unset, it will be treated as zero.
// If the cell is set and is not an 8-byte value, the entire ApplyReadModifyWrite
// operation will fail.
func (m *ReadModifyWrite) Increment(family, column string, delta int64) {
m.ops = append(m.ops, &btpb.ReadModifyWriteRule{
FamilyName: family,
ColumnQualifier: []byte(column),
Rule: &btpb.ReadModifyWriteRule_IncrementAmount{IncrementAmount: delta},
})
}

// mergeOutgoingMetadata returns a context populated by the existing outgoing metadata,
// if any, joined with internal metadata.
func mergeOutgoingMetadata(ctx context.Context, md metadata.MD) context.Context {
mdCopy, _ := metadata.FromOutgoingContext(ctx)
return metadata.NewOutgoingContext(ctx, metadata.Join(mdCopy, md))
}

// SampleRowKeys returns a sample of row keys in the table. The returned row keys will delimit contiguous sections of
// the table of approximately equal size, which can be used to break up the data for distributed tasks like mapreduces.
func (t *Table) SampleRowKeys(ctx context.Context) ([]string, error) {
ctx = mergeOutgoingMetadata(ctx, t.md)
var sampledRowKeys []string
err := gax.Invoke(ctx, func(ctx context.Context) error {
sampledRowKeys = nil
req := &btpb.SampleRowKeysRequest{
TableName: t.c.fullTableName(t.table),
AppProfileId: t.c.appProfile,
}
ctx, cancel := context.WithCancel(ctx) // for aborting the stream
defer cancel()

stream, err := t.c.client.SampleRowKeys(ctx, req)
if err != nil {
return err
}
for {
res, err := stream.Recv()
if err == io.EOF {
break
}
if err != nil {
return err
}

key := string(res.RowKey)
if key == "" {
continue
}

sampledRowKeys = append(sampledRowKeys, key)
}
return nil
}, retryOptions...)
return sampledRowKeys, err
}

+ 0
- 1255
vendor/cloud.google.com/go/bigtable/bigtable_test.go
File diff suppressed because it is too large
View File


+ 0
- 83
vendor/cloud.google.com/go/bigtable/bttest/example_test.go View File

@@ -1,83 +0,0 @@
/*
Copyright 2016 Google LLC

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package bttest_test

import (
"context"
"fmt"
"log"

"cloud.google.com/go/bigtable"
"cloud.google.com/go/bigtable/bttest"
"google.golang.org/api/option"
"google.golang.org/grpc"
)

func ExampleNewServer() {

srv, err := bttest.NewServer("localhost:0")

if err != nil {
log.Fatalln(err)
}

ctx := context.Background()

conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure())
if err != nil {
log.Fatalln(err)
}

proj, instance := "proj", "instance"

adminClient, err := bigtable.NewAdminClient(ctx, proj, instance, option.WithGRPCConn(conn))
if err != nil {
log.Fatalln(err)
}

if err = adminClient.CreateTable(ctx, "example"); err != nil {
log.Fatalln(err)
}

if err = adminClient.CreateColumnFamily(ctx, "example", "links"); err != nil {
log.Fatalln(err)
}

client, err := bigtable.NewClient(ctx, proj, instance, option.WithGRPCConn(conn))
if err != nil {
log.Fatalln(err)
}
tbl := client.Open("example")

mut := bigtable.NewMutation()
mut.Set("links", "golang.org", bigtable.Now(), []byte("Gophers!"))
if err = tbl.Apply(ctx, "com.google.cloud", mut); err != nil {
log.Fatalln(err)
}

if row, err := tbl.ReadRow(ctx, "com.google.cloud"); err != nil {
log.Fatalln(err)
} else {
for _, column := range row["links"] {
fmt.Println(column.Column)
fmt.Println(string(column.Value))
}
}

// Output:
// links:golang.org
// Gophers!
}

+ 0
- 1406
vendor/cloud.google.com/go/bigtable/bttest/inmem.go
File diff suppressed because it is too large
View File


+ 0
- 1193
vendor/cloud.google.com/go/bigtable/bttest/inmem_test.go
File diff suppressed because it is too large
View File


+ 0
- 1614
vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt.go
File diff suppressed because it is too large
View File


+ 0
- 173
vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt_test.go View File

@@ -1,173 +0,0 @@
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package main

import (
"testing"
"time"

"cloud.google.com/go/bigtable"
"cloud.google.com/go/internal/testutil"
"github.com/google/go-cmp/cmp"
)

func TestParseDuration(t *testing.T) {
tests := []struct {
in string
// out or fail are mutually exclusive
out time.Duration
fail bool
}{
{in: "10ms", out: 10 * time.Millisecond},
{in: "3s", out: 3 * time.Second},
{in: "60m", out: 60 * time.Minute},
{in: "12h", out: 12 * time.Hour},
{in: "7d", out: 168 * time.Hour},

{in: "", fail: true},
{in: "0", fail: true},
{in: "7ns", fail: true},
{in: "14mo", fail: true},
{in: "3.5h", fail: true},
{in: "106752d", fail: true}, // overflow
}
for _, tc := range tests {
got, err := parseDuration(tc.in)
if !tc.fail && err != nil {
t.Errorf("parseDuration(%q) unexpectedly failed: %v", tc.in, err)
continue
}
if tc.fail && err == nil {
t.Errorf("parseDuration(%q) did not fail", tc.in)
continue
}
if tc.fail {
continue
}
if got != tc.out {
t.Errorf("parseDuration(%q) = %v, want %v", tc.in, got, tc.out)
}
}
}

func TestParseArgs(t *testing.T) {
got, err := parseArgs([]string{"a=1", "b=2"}, []string{"a", "b"})
if err != nil {
t.Fatal(err)
}
want := map[string]string{"a": "1", "b": "2"}
if !testutil.Equal(got, want) {
t.Fatalf("got %v, want %v", got, want)
}

if _, err := parseArgs([]string{"a1"}, []string{"a1"}); err == nil {
t.Error("malformed: got nil, want error")
}
if _, err := parseArgs([]string{"a=1"}, []string{"b"}); err == nil {
t.Error("invalid: got nil, want error")
}
}

func TestParseColumnsFilter(t *testing.T) {
tests := []struct {
in string
out bigtable.Filter
fail bool
}{
{
in: "columnA",
out: bigtable.ColumnFilter("columnA"),
},
{
in: "familyA:columnA",
out: bigtable.ChainFilters(bigtable.FamilyFilter("familyA"), bigtable.ColumnFilter("columnA")),
},
{
in: "columnA,columnB",
out: bigtable.InterleaveFilters(bigtable.ColumnFilter("columnA"), bigtable.ColumnFilter("columnB")),
},
{
in: "familyA:columnA,columnB",
out: bigtable.InterleaveFilters(
bigtable.ChainFilters(bigtable.FamilyFilter("familyA"), bigtable.ColumnFilter("columnA")),
bigtable.ColumnFilter("columnB"),
),
},
{
in: "columnA,familyB:columnB",
out: bigtable.InterleaveFilters(
bigtable.ColumnFilter("columnA"),
bigtable.ChainFilters(bigtable.FamilyFilter("familyB"), bigtable.ColumnFilter("columnB")),
),
},
{
in: "familyA:columnA,familyB:columnB",
out: bigtable.InterleaveFilters(
bigtable.ChainFilters(bigtable.FamilyFilter("familyA"), bigtable.ColumnFilter("columnA")),
bigtable.ChainFilters(bigtable.FamilyFilter("familyB"), bigtable.ColumnFilter("columnB")),
),
},
{
in: "familyA:",
out: bigtable.FamilyFilter("familyA"),
},
{
in: ":columnA",
out: bigtable.ColumnFilter("columnA"),
},
{
in: ",:columnA,,familyB:columnB,",
out: bigtable.InterleaveFilters(
bigtable.ColumnFilter("columnA"),
bigtable.ChainFilters(bigtable.FamilyFilter("familyB"), bigtable.ColumnFilter("columnB")),
),
},
{
in: "familyA:columnA:cellA",
fail: true,
},
{
in: "familyA::columnA",
fail: true,
},
}

for _, tc := range tests {
got, err := parseColumnsFilter(tc.in)

if !tc.fail && err != nil {
t.Errorf("parseColumnsFilter(%q) unexpectedly failed: %v", tc.in, err)
continue
}
if tc.fail && err == nil {
t.Errorf("parseColumnsFilter(%q) did not fail", tc.in)
continue
}
if tc.fail {
continue
}

var cmpOpts cmp.Options
cmpOpts =
append(
cmpOpts,
cmp.AllowUnexported(bigtable.ChainFilters([]bigtable.Filter{}...)),
cmp.AllowUnexported(bigtable.InterleaveFilters([]bigtable.Filter{}...)))

if !cmp.Equal(got, tc.out, cmpOpts) {
t.Errorf("parseColumnsFilter(%q) = %v, want %v", tc.in, got, tc.out)
}
}
}

+ 0
- 425
vendor/cloud.google.com/go/bigtable/cmd/cbt/cbtdoc.go View File

@@ -1,425 +0,0 @@
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

// DO NOT EDIT. THIS IS AUTOMATICALLY GENERATED.
// Run "go generate" to regenerate.
//go:generate go run cbt.go gcpolicy.go -o cbtdoc.go doc

/*
Cbt is a tool for doing basic interactions with Cloud Bigtable. To learn how to
install the cbt tool, see the
[cbt overview](https://cloud.google.com/bigtable/docs/cbt-overview).

Usage:

cbt [options] command [arguments]

The commands are:

count Count rows in a table
createinstance Create an instance with an initial cluster
createcluster Create a cluster in the configured instance
createfamily Create a column family
createtable Create a table
updatecluster Update a cluster in the configured instance
deleteinstance Delete an instance
deletecluster Delete a cluster from the configured instance
deletecolumn Delete all cells in a column
deletefamily Delete a column family
deleterow Delete a row
deletetable Delete a table
doc Print godoc-suitable documentation for cbt
help Print help text
listinstances List instances in a project
listclusters List clusters in an instance
lookup Read from a single row
ls List tables and column families
mddoc Print documentation for cbt in Markdown format
read Read rows
set Set value of a cell
setgcpolicy Set the GC policy for a column family
waitforreplication Block until all the completed writes have been replicated to all the clusters
createtablefromsnapshot Create a table from a snapshot (snapshots alpha)
createsnapshot Create a snapshot from a source table (snapshots alpha)
listsnapshots List snapshots in a cluster (snapshots alpha)
getsnapshot Get snapshot info (snapshots alpha)
deletesnapshot Delete snapshot in a cluster (snapshots alpha)
version Print the current cbt version
createappprofile Creates app profile for an instance
getappprofile Reads app profile for an instance
listappprofile Lists app profile for an instance
updateappprofile Updates app profile for an instance
deleteappprofile Deletes app profile for an instance

Use "cbt help <command>" for more information about a command.

The options are:

-project string
project ID, if unset uses gcloud configured project
-instance string
Cloud Bigtable instance
-creds string
if set, use application credentials in this file


Alpha features are not currently available to most Cloud Bigtable customers. The
features might be changed in backward-incompatible ways and are not recommended
for production use. They are not subject to any SLA or deprecation policy.

Note: cbt does not support specifying arbitrary bytes on the command line for
any value that Bigtable otherwise supports (e.g., row key, column qualifier,
etc.).

For convenience, values of the -project, -instance, -creds,
-admin-endpoint and -data-endpoint flags may be specified in
~/.cbtrc in this format:

project = my-project-123
instance = my-instance
creds = path-to-account-key.json
admin-endpoint = hostname:port
data-endpoint = hostname:port

All values are optional, and all will be overridden by flags.



Count rows in a table

Usage:
cbt count <table>




Create an instance with an initial cluster

Usage:
cbt createinstance <instance-id> <display-name> <cluster-id> <zone> <num-nodes> <storage type>
instance-id Permanent, unique id for the instance
display-name Description of the instance
cluster-id Permanent, unique id for the cluster in the instance
zone The zone in which to create the cluster
num-nodes The number of nodes to create
storage-type SSD or HDD





Create a cluster in the configured instance

Usage:
cbt createcluster <cluster-id> <zone> <num-nodes> <storage type>
cluster-id Permanent, unique id for the cluster in the instance
zone The zone in which to create the cluster
num-nodes The number of nodes to create
storage-type SSD or HDD





Create a column family

Usage:
cbt createfamily <table> <family>




Create a table

Usage:
cbt createtable <table> [families=family[:gcpolicy],...] [splits=split,...]
families: Column families and their associated GC policies. For gcpolicy,
see "setgcpolicy".
Example: families=family1:maxage=1w,family2:maxversions=1
splits: Row key to be used to initially split the table




Update a cluster in the configured instance

Usage:
cbt updatecluster <cluster-id> [num-nodes=num-nodes]
cluster-id Permanent, unique id for the cluster in the instance
num-nodes The number of nodes to update to




Delete an instance

Usage:
cbt deleteinstance <instance>




Delete a cluster from the configured instance

Usage:
cbt deletecluster <cluster>




Delete all cells in a column

Usage:
cbt deletecolumn <table> <row> <family> <column> [app-profile=<app profile id>]
app-profile=<app profile id> The app profile id to use for the request





Delete a column family

Usage:
cbt deletefamily <table> <family>




Delete a row

Usage:
cbt deleterow <table> <row> [app-profile=<app profile id>]
app-profile=<app profile id> The app profile id to use for the request





Delete a table

Usage:
cbt deletetable <table>




Print godoc-suitable documentation for cbt

Usage:
cbt doc




Print help text

Usage:
cbt help [command]




List instances in a project

Usage:
cbt listinstances




List clusters in an instance

Usage:
cbt listclusters




Read from a single row

Usage:
cbt lookup <table> <row> [columns=[family]:[qualifier],...] [cells-per-column=<n>] [app-profile=<app profile id>]
columns=[family]:[qualifier],... Read only these columns, comma-separated
cells-per-column=<n> Read only this many cells per column
app-profile=<app profile id> The app profile id to use for the request





List tables and column families

Usage:
cbt ls List tables
cbt ls <table> List column families in <table>




Print documentation for cbt in Markdown format

Usage:
cbt mddoc




Read rows

Usage:
cbt read <table> [start=<row>] [end=<row>] [prefix=<prefix>] [regex=<regex>] [columns=[family]:[qualifier],...] [count=<n>] [cells-per-column=<n>] [app-profile=<app profile id>]
start=<row> Start reading at this row
end=<row> Stop reading before this row
prefix=<prefix> Read rows with this prefix
regex=<regex> Read rows with keys matching this regex
columns=[family]:[qualifier],... Read only these columns, comma-separated
count=<n> Read only this many rows
cells-per-column=<n> Read only this many cells per column
app-profile=<app profile id> The app profile id to use for the request





Set value of a cell

Usage:
cbt set <table> <row> [app-profile=<app profile id>] family:column=val[@ts] ...
app-profile=<app profile id> The app profile id to use for the request
family:column=val[@ts] may be repeated to set multiple cells.

ts is an optional integer timestamp.
If it cannot be parsed, the `@ts` part will be
interpreted as part of the value.




Set the GC policy for a column family

Usage:
cbt setgcpolicy <table> <family> ((maxage=<d> | maxversions=<n>) [(and|or) (maxage=<d> | maxversions=<n>),...] | never)

maxage=<d> Maximum timestamp age to preserve (e.g. "1h", "4d")
maxversions=<n> Maximum number of versions to preserve




Block until all the completed writes have been replicated to all the clusters

Usage:
cbt waitforreplication <table>




Create a table from a snapshot (snapshots alpha)

Usage:
cbt createtablefromsnapshot <table> <cluster> <snapshot>
table The name of the table to create
cluster The cluster where the snapshot is located
snapshot The snapshot to restore




Create a snapshot from a source table (snapshots alpha)

Usage:
cbt createsnapshot <cluster> <snapshot> <table> [ttl=<d>]

[ttl=<d>] Lifespan of the snapshot (e.g. "1h", "4d")





List snapshots in a cluster (snapshots alpha)

Usage:
cbt listsnapshots [<cluster>]




Get snapshot info (snapshots alpha)

Usage:
cbt getsnapshot <cluster> <snapshot>




Delete snapshot in a cluster (snapshots alpha)

Usage:
cbt deletesnapshot <cluster> <snapshot>




Print the current cbt version

Usage:
cbt version




Creates app profile for an instance

Usage:
usage: cbt createappprofile <instance-id> <profile-id> <description> (route-any | [ route-to=<cluster-id> : transactional-writes]) [optional flag]
optional flags may be `force`




Reads app profile for an instance

Usage:
cbt getappprofile <instance-id> <profile-id>




Lists app profile for an instance

Usage:
cbt listappprofile <instance-id>




Updates app profile for an instance

Usage:
usage: cbt updateappprofile <instance-id> <profile-id> <description>(route-any | [ route-to=<cluster-id> : transactional-writes]) [optional flag]
optional flags may be `force`




Deletes app profile for an instance

Usage:
cbt deleteappprofile <instance-id> <profile-id>




*/
package main

+ 0
- 215
vendor/cloud.google.com/go/bigtable/cmd/cbt/gcpolicy.go View File

@@ -1,215 +0,0 @@
/*
Copyright 2015 Google LLC

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package main

import (
"bytes"
"errors"
"fmt"
"io"
"strconv"
"strings"
"unicode"

"cloud.google.com/go/bigtable"
)

// Parse a GC policy. Valid policies include
// never
// maxage = 5d
// maxversions = 3
// maxage = 5d || maxversions = 3
// maxage=30d || (maxage=3d && maxversions=100)
func parseGCPolicy(s string) (bigtable.GCPolicy, error) {
if strings.TrimSpace(s) == "never" {
return bigtable.NoGcPolicy(), nil
}
r := strings.NewReader(s)
p, err := parsePolicyExpr(r)
if err != nil {
return nil, fmt.Errorf("invalid GC policy: %v", err)
}
tok, err := getToken(r)
if err != nil {
return nil, err
}
if tok != "" {
return nil, fmt.Errorf("invalid GC policy: want end of input, got %q", tok)
}
return p, nil
}

// expr ::= term (op term)*
// op ::= "and" | "or" | "&&" | "||"
func parsePolicyExpr(r io.RuneScanner) (bigtable.GCPolicy, error) {
policy, err := parsePolicyTerm(r)
if err != nil {
return nil, err
}
for {
tok, err := getToken(r)
if err != nil {
return nil, err
}
var f func(...bigtable.GCPolicy) bigtable.GCPolicy
switch tok {
case "and", "&&":
f = bigtable.IntersectionPolicy
case "or", "||":
f = bigtable.UnionPolicy
default:
ungetToken(tok)
return policy, nil
}
p2, err := parsePolicyTerm(r)
if err != nil {
return nil, err
}
policy = f(policy, p2)
}
}

// term ::= "maxage" "=" duration | "maxversions" "=" int | "(" policy ")"
func parsePolicyTerm(r io.RuneScanner) (bigtable.GCPolicy, error) {
tok, err := getToken(r)
if err != nil {
return nil, err
}
switch tok {
case "":
return nil, errors.New("empty GC policy term")

case "maxage", "maxversions":
if err := expectToken(r, "="); err != nil {
return nil, err
}
tok2, err := getToken(r)
if err != nil {
return nil, err
}
if tok2 == "" {
return nil, errors.New("expected a token after '='")
}
if tok == "maxage" {
dur, err := parseDuration(tok2)
if err != nil {
return nil, err
}
return bigtable.MaxAgePolicy(dur), nil
}
n, err := strconv.ParseUint(tok2, 10, 16)
if err != nil {
return nil, err
}
return bigtable.MaxVersionsPolicy(int(n)), nil

case "(":
p, err := parsePolicyExpr(r)
if err != nil {
return nil, err
}
if err := expectToken(r, ")"); err != nil {
return nil, err
}
return p, nil

default:
return nil, fmt.Errorf("unexpected token: %q", tok)
}

}

func expectToken(r io.RuneScanner, want string) error {
got, err := getToken(r)
if err != nil {
return err
}
if got != want {
return fmt.Errorf("expected %q, saw %q", want, got)
}
return nil
}

const noToken = "_" // empty token is valid, so use "_" instead

// If not noToken, getToken will return this instead of reading a new token
// from the input.
var ungotToken = noToken

// getToken extracts the first token from the input. Valid tokens include
// any sequence of letters and digits, and these symbols: &&, ||, =, ( and ).
// getToken returns ("", nil) at end of input.
func getToken(r io.RuneScanner) (string, error) {
if ungotToken != noToken {
t := ungotToken
ungotToken = noToken
return t, nil
}
var err error
// Skip leading whitespace.
c := ' '
for unicode.IsSpace(c) {
c, _, err = r.ReadRune()
if err == io.EOF {
return "", nil
}
if err != nil {
return "", err
}
}
switch {
case c == '=' || c == '(' || c == ')':
return string(c), nil

case c == '&' || c == '|':
c2, _, err := r.ReadRune()
if err != nil && err != io.EOF {
return "", err
}
if c != c2 {
return "", fmt.Errorf("expected %c%c", c, c)
}
return string([]rune{c, c}), nil

case unicode.IsLetter(c) || unicode.IsDigit(c):
// Collect an alphanumeric token.
var b bytes.Buffer
for unicode.IsLetter(c) || unicode.IsDigit(c) {
b.WriteRune(c)
c, _, err = r.ReadRune()
if err == io.EOF {
break
}
if err != nil {
return "", err
}
}
r.UnreadRune()
return b.String(), nil

default:
return "", fmt.Errorf("bad rune %q", c)
}
}

// "unget" a token so the next call to getToken will return it.
func ungetToken(tok string) {
if ungotToken != noToken {
panic("ungetToken called twice")
}
ungotToken = tok
}

+ 0
- 196
vendor/cloud.google.com/go/bigtable/cmd/cbt/gcpolicy_test.go View File

@@ -1,196 +0,0 @@
/*
Copyright 2015 Google LLC

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package main

import (
"strings"
"testing"
"time"

"cloud.google.com/go/bigtable"
"github.com/google/go-cmp/cmp"
)

func TestParseGCPolicy(t *testing.T) {
for _, test := range []struct {
in string
want bigtable.GCPolicy
}{
{
"never",
bigtable.NoGcPolicy(),
},
{
"maxage=3h",
bigtable.MaxAgePolicy(3 * time.Hour),
},
{
"maxversions=2",
bigtable.MaxVersionsPolicy(2),
},
{
"maxversions=2 and maxage=1h",
bigtable.IntersectionPolicy(bigtable.MaxVersionsPolicy(2), bigtable.MaxAgePolicy(time.Hour)),
},
{
"(((maxversions=2 and (maxage=1h))))",
bigtable.IntersectionPolicy(bigtable.MaxVersionsPolicy(2), bigtable.MaxAgePolicy(time.Hour)),
},
{
"maxversions=7 or maxage=8h",
bigtable.UnionPolicy(bigtable.MaxVersionsPolicy(7), bigtable.MaxAgePolicy(8*time.Hour)),
},
{
"maxversions = 7||maxage = 8h",
bigtable.UnionPolicy(bigtable.MaxVersionsPolicy(7), bigtable.MaxAgePolicy(8*time.Hour)),
},
{
"maxversions=7||maxage=8h",
bigtable.UnionPolicy(bigtable.MaxVersionsPolicy(7), bigtable.MaxAgePolicy(8*time.Hour)),
},
{
"maxage=30d || (maxage=3d && maxversions=100)",
bigtable.UnionPolicy(
bigtable.MaxAgePolicy(30*24*time.Hour),
bigtable.IntersectionPolicy(
bigtable.MaxAgePolicy(3*24*time.Hour),
bigtable.MaxVersionsPolicy(100))),
},
{
"maxage=30d || (maxage=3d && maxversions=100) || maxversions=7",
bigtable.UnionPolicy(
bigtable.UnionPolicy(
bigtable.MaxAgePolicy(30*24*time.Hour),
bigtable.IntersectionPolicy(
bigtable.MaxAgePolicy(3*24*time.Hour),
bigtable.MaxVersionsPolicy(100))),
bigtable.MaxVersionsPolicy(7)),
},
{
// && and || have same precedence, left associativity
"maxage=1h && maxage=2h || maxage=3h",
bigtable.UnionPolicy(
bigtable.IntersectionPolicy(
bigtable.MaxAgePolicy(1*time.Hour),
bigtable.MaxAgePolicy(2*time.Hour)),
bigtable.MaxAgePolicy(3*time.Hour)),
},
} {
got, err := parseGCPolicy(test.in)
if err != nil {
t.Errorf("%s: %v", test.in, err)
continue
}
if !cmp.Equal(got, test.want, cmp.AllowUnexported(bigtable.IntersectionPolicy(), bigtable.UnionPolicy())) {
t.Errorf("%s: got %+v, want %+v", test.in, got, test.want)
}
}
}

func TestParseGCPolicyErrors(t *testing.T) {
for _, in := range []string{
"",
"a",
"b = 1h",
"c = 1",
"maxage=1", // need duration
"maxversions=1h", // need int
"maxage",
"maxversions",
"never=never",
"maxversions=1 && never",
"(((maxage=1h))",
"((maxage=1h)))",
"maxage=30d || ((maxage=3d && maxversions=100)",
"maxversions = 3 and",
} {
_, err := parseGCPolicy(in)
if err == nil {
t.Errorf("%s: got nil, want error", in)
}
}
}

func TestTokenizeGCPolicy(t *testing.T) {
for _, test := range []struct {
in string
want []string
}{
{
"maxage=5d",
[]string{"maxage", "=", "5d"},
},
{
"maxage = 5d",
[]string{"maxage", "=", "5d"},
},
{
"maxage=5d or maxversions=5",
[]string{"maxage", "=", "5d", "or", "maxversions", "=", "5"},
},
{
"maxage=5d || (maxversions=5)",
[]string{"maxage", "=", "5d", "||", "(", "maxversions", "=", "5", ")"},
},
{
"maxage=5d||( maxversions=5 )",
[]string{"maxage", "=", "5d", "||", "(", "maxversions", "=", "5", ")"},
},
} {
got, err := tokenizeGCPolicy(test.in)
if err != nil {
t.Errorf("%s: %v", test.in, err)
continue
}
if diff := cmp.Diff(got, test.want); diff != "" {
t.Errorf("%s: %s", test.in, diff)
}
}
}

func TestTokenizeGCPolicyErrors(t *testing.T) {
for _, in := range []string{
"a &",
"a & b",
"a &x b",
"a |",
"a | b",
"a |& b",
"a % b",
} {
_, err := tokenizeGCPolicy(in)
if err == nil {
t.Errorf("%s: got nil, want error", in)
}
}
}

func tokenizeGCPolicy(s string) ([]string, error) {
var tokens []string
r := strings.NewReader(s)
for {
tok, err := getToken(r)
if err != nil {
return nil, err
}
if tok == "" {
break
}
tokens = append(tokens, tok)
}
return tokens, nil
}

+ 0
- 52
vendor/cloud.google.com/go/bigtable/cmd/emulator/cbtemulator.go View File

@@ -1,52 +0,0 @@
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

/*
cbtemulator launches the in-memory Cloud Bigtable server on the given address.
*/
package main

import (
"flag"
"fmt"
"log"

"cloud.google.com/go/bigtable/bttest"
"google.golang.org/grpc"
)

var (
host = flag.String("host", "localhost", "the address to bind to on the local machine")
port = flag.Int("port", 9000, "the port number to bind to on the local machine")
)

const (
maxMsgSize = 256 * 1024 * 1024 // 256 MiB
)

func main() {
grpc.EnableTracing = false
flag.Parse()
opts := []grpc.ServerOption{
grpc.MaxRecvMsgSize(maxMsgSize),
grpc.MaxSendMsgSize(maxMsgSize),
}
srv, err := bttest.NewServer(fmt.Sprintf("%s:%d", *host, *port), opts...)
if err != nil {
log.Fatalf("failed to start emulator: %v", err)
}

fmt.Printf("Cloud Bigtable emulator running on %s\n", srv.Addr)
select {}
}

+ 0
- 205
vendor/cloud.google.com/go/bigtable/cmd/loadtest/loadtest.go View File

@@ -1,205 +0,0 @@
/*
Copyright 2015 Google LLC

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

/*
Loadtest does some load testing through the Go client library for Cloud Bigtable.
*/
package main

import (
"bytes"
"context"
"flag"
"fmt"
"log"
"math/rand"
"os"
"os/signal"
"sync"
"sync/atomic"
"time"

"cloud.google.com/go/bigtable"
"cloud.google.com/go/bigtable/internal/cbtconfig"
"cloud.google.com/go/bigtable/internal/stat"
"google.golang.org/api/option"
"google.golang.org/grpc"
)

var (
runFor = flag.Duration("run_for", 5*time.Second,
"how long to run the load test for; 0 to run forever until SIGTERM")
scratchTable = flag.String("scratch_table", "loadtest-scratch", "name of table to use; should not already exist")
csvOutput = flag.String("csv_output", "",
"output path for statistics in .csv format. If this file already exists it will be overwritten.")
poolSize = flag.Int("pool_size", 1, "size of the gRPC connection pool to use for the data client")
reqCount = flag.Int("req_count", 100, "number of concurrent requests")

config *cbtconfig.Config
client *bigtable.Client
adminClient *bigtable.AdminClient
)

func main() {
var err error
config, err = cbtconfig.Load()
if err != nil {
log.Fatal(err)
}
config.RegisterFlags()

flag.Parse()
if err := config.CheckFlags(cbtconfig.ProjectAndInstanceRequired); err != nil {
log.Fatal(err)
}
if config.Creds != "" {
os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", config.Creds)
}
if flag.NArg() != 0 {
flag.Usage()
os.Exit(1)
}

var options []option.ClientOption
if *poolSize > 1 {
options = append(options,
option.WithGRPCConnectionPool(*poolSize),

// TODO(grpc/grpc-go#1388) using connection pool without WithBlock
// can cause RPCs to fail randomly. We can delete this after the issue is fixed.
option.WithGRPCDialOption(grpc.WithBlock()))
}

var csvFile *os.File
if *csvOutput != "" {
csvFile, err = os.Create(*csvOutput)
if err != nil {
log.Fatalf("creating csv output file: %v", err)
}
defer csvFile.Close()
log.Printf("Writing statistics to %q ...", *csvOutput)
}

log.Printf("Dialing connections...")
client, err = bigtable.NewClient(context.Background(), config.Project, config.Instance, options...)
if err != nil {
log.Fatalf("Making bigtable.Client: %v", err)
}
defer client.Close()
adminClient, err = bigtable.NewAdminClient(context.Background(), config.Project, config.Instance)
if err != nil {
log.Fatalf("Making bigtable.AdminClient: %v", err)
}
defer adminClient.Close()

// Create a scratch table.
log.Printf("Setting up scratch table...")
tblConf := bigtable.TableConf{
TableID: *scratchTable,
Families: map[string]bigtable.GCPolicy{"f": bigtable.MaxVersionsPolicy(1)},
}
if err := adminClient.CreateTableFromConf(context.Background(), &tblConf); err != nil {
log.Fatalf("Making scratch table %q: %v", *scratchTable, err)
}
// Upon a successful run, delete the table. Don't bother checking for errors.
defer adminClient.DeleteTable(context.Background(), *scratchTable)

// Also delete the table on SIGTERM.
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
go func() {
s := <-c
log.Printf("Caught %v, cleaning scratch table.", s)
_ = adminClient.DeleteTable(context.Background(), *scratchTable)
os.Exit(1)
}()

log.Printf("Starting load test... (run for %v)", *runFor)
tbl := client.Open(*scratchTable)
sem := make(chan int, *reqCount) // limit the number of requests happening at once
var reads, writes stats
stopTime := time.Now().Add(*runFor)
var wg sync.WaitGroup
for time.Now().Before(stopTime) || *runFor == 0 {
sem <- 1
wg.Add(1)
go func() {
defer wg.Done()
defer func() { <-sem }()

ok := true
opStart := time.Now()
var stats *stats
defer func() {
stats.Record(ok, time.Since(opStart))
}()

row := fmt.Sprintf("row%d", rand.Intn(100)) // operate on 1 of 100 rows

switch rand.Intn(10) {
default:
// read
stats = &reads
_, err := tbl.ReadRow(context.Background(), row, bigtable.RowFilter(bigtable.LatestNFilter(1)))
if err != nil {
log.Printf("Error doing read: %v", err)
ok = false
}
case 0, 1, 2, 3, 4:
// write
stats = &writes
mut := bigtable.NewMutation()
mut.Set("f", "col", bigtable.Now(), bytes.Repeat([]byte("0"), 1<<10)) // 1 KB write
if err := tbl.Apply(context.Background(), row, mut); err != nil {
log.Printf("Error doing mutation: %v", err)
ok = false
}
}
}()
}
wg.Wait()

readsAgg := stat.NewAggregate("reads", reads.ds, reads.tries-reads.ok)
writesAgg := stat.NewAggregate("writes", writes.ds, writes.tries-writes.ok)
log.Printf("Reads (%d ok / %d tries):\n%v", reads.ok, reads.tries, readsAgg)
log.Printf("Writes (%d ok / %d tries):\n%v", writes.ok, writes.tries, writesAgg)

if csvFile != nil {
stat.WriteCSV([]*stat.Aggregate{readsAgg, writesAgg}, csvFile)
}
}

var allStats int64 // atomic

type stats struct {
mu sync.Mutex
tries, ok int
ds []time.Duration
}

func (s *stats) Record(ok bool, d time.Duration) {
s.mu.Lock()
s.tries++
if ok {
s.ok++
}
s.ds = append(s.ds, d)
s.mu.Unlock()

if n := atomic.AddInt64(&allStats, 1); n%1000 == 0 {
log.Printf("Progress: done %d ops", n)
}
}

+ 0
- 155
vendor/cloud.google.com/go/bigtable/cmd/scantest/scantest.go View File

@@ -1,155 +0,0 @@
/*
Copyright 2016 Google LLC

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

/*
Scantest does scan-related load testing against Cloud Bigtable. The logic here
mimics a similar test written using the Java client.
*/
package main

import (
"bytes"
"context"
"flag"
"fmt"
"log"
"math/rand"
"os"
"sync"
"sync/atomic"
"text/tabwriter"
"time"

"cloud.google.com/go/bigtable"
"cloud.google.com/go/bigtable/internal/cbtconfig"
"cloud.google.com/go/bigtable/internal/stat"
)

var (
runFor = flag.Duration("run_for", 5*time.Second, "how long to run the load test for")
numScans = flag.Int("concurrent_scans", 1, "number of concurrent scans")
rowLimit = flag.Int("row_limit", 10000, "max number of records per scan")

config *cbtconfig.Config
client *bigtable.Client
)

func main() {
flag.Usage = func() {
fmt.Printf("Usage: scantest [options] <table_name>\n\n")
flag.PrintDefaults()
}

var err error
config, err = cbtconfig.Load()
if err != nil {
log.Fatal(err)
}
config.RegisterFlags()

flag.Parse()
if err := config.CheckFlags(cbtconfig.ProjectAndInstanceRequired); err != nil {
log.Fatal(err)
}
if config.Creds != "" {
os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", config.Creds)
}
if flag.NArg() != 1 {
flag.Usage()
os.Exit(1)
}

table := flag.Arg(0)

log.Printf("Dialing connections...")
client, err = bigtable.NewClient(context.Background(), config.Project, config.Instance)
if err != nil {
log.Fatalf("Making bigtable.Client: %v", err)
}
defer client.Close()

log.Printf("Starting scan test... (run for %v)", *runFor)
tbl := client.Open(table)
sem := make(chan int, *numScans) // limit the number of requests happening at once
var scans stats

stopTime := time.Now().Add(*runFor)
var wg sync.WaitGroup
for time.Now().Before(stopTime) {
sem <- 1
wg.Add(1)
go func() {
defer wg.Done()
defer func() { <-sem }()

ok := true
opStart := time.Now()
defer func() {
scans.Record(ok, time.Since(opStart))
}()

// Start at a random row key
key := fmt.Sprintf("user%d", rand.Int63())
limit := bigtable.LimitRows(int64(*rowLimit))
noop := func(bigtable.Row) bool { return true }
if err := tbl.ReadRows(context.Background(), bigtable.NewRange(key, ""), noop, limit); err != nil {
log.Printf("Error during scan: %v", err)
ok = false
}
}()
}
wg.Wait()

agg := stat.NewAggregate("scans", scans.ds, scans.tries-scans.ok)
log.Printf("Scans (%d ok / %d tries):\nscan times:\n%v\nthroughput (rows/second):\n%v",
scans.ok, scans.tries, agg, throughputString(agg))
}

func throughputString(agg *stat.Aggregate) string {
var buf bytes.Buffer
tw := tabwriter.NewWriter(&buf, 0, 0, 1, ' ', 0) // one-space padding
rowLimitF := float64(*rowLimit)
fmt.Fprintf(
tw,
"min:\t%.2f\nmedian:\t%.2f\nmax:\t%.2f\n",
rowLimitF/agg.Max.Seconds(),
rowLimitF/agg.Median.Seconds(),
rowLimitF/agg.Min.Seconds())
tw.Flush()
return buf.String()
}

var allStats int64 // atomic

type stats struct {
mu sync.Mutex
tries, ok int
ds []time.Duration
}

func (s *stats) Record(ok bool, d time.Duration) {
s.mu.Lock()
s.tries++
if ok {
s.ok++
}
s.ds = append(s.ds, d)
s.mu.Unlock()

if n := atomic.AddInt64(&allStats, 1); n%1000 == 0 {
log.Printf("Progress: done %d ops", n)
}
}

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save