작성자 | SHA1 | 메시지 | 날짜 |
---|---|---|---|
Fionera | efbde3a3ff | refactor: Fix ZST Support | 3 년 전 |
Fionera | 63e84ec2f3 | refactor: Use a go.mod file and remove vendor folder | 3 년 전 |
@@ -6,10 +6,11 @@ import ( | |||
"os" | |||
"strings" | |||
"git.kiska.pw/kiska/transfer.sh/server" | |||
"github.com/fatih/color" | |||
"github.com/minio/cli" | |||
"google.golang.org/api/googleapi" | |||
"github.com/dutchcoders/transfer.sh/server" | |||
) | |||
var Version = "0.1" | |||
@@ -0,0 +1,27 @@ | |||
module github.com/dutchcoders/transfer.sh | |||
go 1.15 | |||
require ( | |||
github.com/PuerkitoBio/ghost v0.0.0-20160324114900-206e6e460e14 | |||
github.com/VojtechVitek/ratelimit v0.0.0-20160722140851-dc172bc0f6d2 | |||
github.com/dutchcoders/go-clamd v0.0.0-20170520113014-b970184f4d9e | |||
github.com/dutchcoders/go-virustotal v0.0.0-20140923143438-24cc8e6fa329 | |||
github.com/dutchcoders/transfer.sh-web v0.0.0-20210130180835-bc7d8b891391 | |||
github.com/elazarl/go-bindata-assetfs v1.0.1 | |||
github.com/fatih/color v1.10.0 | |||
github.com/goamz/goamz v0.0.0-20180131231218-8b901b531db8 | |||
github.com/golang/gddo v0.0.0-20210115222349-20d68f94ee1f | |||
github.com/gorilla/mux v1.8.0 | |||
github.com/gorilla/securecookie v1.1.1 // indirect | |||
github.com/klauspost/compress v1.11.7 | |||
github.com/minio/cli v1.22.0 | |||
github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d // indirect | |||
github.com/russross/blackfriday v1.6.0 | |||
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e | |||
github.com/vaughan0/go-ini v0.0.0-20130923145212-a98ad7ee00ec // indirect | |||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad | |||
golang.org/x/net v0.0.0-20210119194325-5f4716e94777 | |||
golang.org/x/oauth2 v0.0.0-20210201163806-010130855d6c | |||
google.golang.org/api v0.39.0 | |||
) |
@@ -0,0 +1,496 @@ | |||
cloud.google.com/go v0.16.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= | |||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= | |||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= | |||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= | |||
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= | |||
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= | |||
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= | |||
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= | |||
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= | |||
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= | |||
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= | |||
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= | |||
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= | |||
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= | |||
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= | |||
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= | |||
cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= | |||
cloud.google.com/go v0.74.0 h1:kpgPA77kSSbjSs+fWHkPTxQ6J5Z2Qkruo5jfXEkHxNQ= | |||
cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= | |||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= | |||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= | |||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= | |||
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= | |||
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= | |||
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= | |||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= | |||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= | |||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= | |||
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= | |||
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= | |||
cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= | |||
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= | |||
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= | |||
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= | |||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= | |||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= | |||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= | |||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= | |||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= | |||
github.com/PuerkitoBio/ghost v0.0.0-20160324114900-206e6e460e14 h1:3zOOc7WdrATDXof+h/rBgMsg0sAmZIEVHft1UbWHh94= | |||
github.com/PuerkitoBio/ghost v0.0.0-20160324114900-206e6e460e14/go.mod h1:+VFiaivV54Sa94ijzA/ZHQLoHuoUIS9hIqCK6f/76Zw= | |||
github.com/VojtechVitek/ratelimit v0.0.0-20160722140851-dc172bc0f6d2 h1:sIvihcW4qpN5qGSjmrsDDAbLpEq5tuHjJJfWY0Hud5Y= | |||
github.com/VojtechVitek/ratelimit v0.0.0-20160722140851-dc172bc0f6d2/go.mod h1:3YwJE8rEisS9eraee0hygGG4G3gqX8H8Nyu+nPTUnGU= | |||
github.com/bradfitz/gomemcache v0.0.0-20170208213004-1952afaa557d/go.mod h1:PmM6Mmwb0LSuEubjR8N7PtNe1KxZLtOUHtbeikc5h60= | |||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= | |||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= | |||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= | |||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= | |||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= | |||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= | |||
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= | |||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= | |||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= | |||
github.com/dutchcoders/go-clamd v0.0.0-20170520113014-b970184f4d9e h1:rcHHSQqzCgvlwP0I/fQ8rQMn/MpHE5gWSLdtpxtP6KQ= | |||
github.com/dutchcoders/go-clamd v0.0.0-20170520113014-b970184f4d9e/go.mod h1:Byz7q8MSzSPkouskHJhX0er2mZY/m0Vj5bMeMCkkyY4= | |||
github.com/dutchcoders/go-virustotal v0.0.0-20140923143438-24cc8e6fa329 h1:ERqCkG/uSyT74P1m/j9yR+so+7ynY4fbTvLY/Mr1ZMg= | |||
github.com/dutchcoders/go-virustotal v0.0.0-20140923143438-24cc8e6fa329/go.mod h1:G5qOfE5bQZ5scycLpB7fYWgN4y3xdfXo+pYWM8z2epY= | |||
github.com/dutchcoders/transfer.sh-web v0.0.0-20210130180835-bc7d8b891391 h1:w/RxnpH7GsqihSbhXhDKYINManY/swjG9hV7Vqdr2MU= | |||
github.com/dutchcoders/transfer.sh-web v0.0.0-20210130180835-bc7d8b891391/go.mod h1:jTzXZabwihvQgvmySgD4f4GNszimkXK3o8x1ucH1z5Q= | |||
github.com/elazarl/go-bindata-assetfs v1.0.1 h1:m0kkaHRKEu7tUIUFVwhGGGYClXvyl4RE03qmvRTNfbw= | |||
github.com/elazarl/go-bindata-assetfs v1.0.1/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4= | |||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= | |||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= | |||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= | |||
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= | |||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= | |||
github.com/fatih/color v1.10.0 h1:s36xzo75JdqLaaWoiEHk767eHiwo0598uUxyfiPkDsg= | |||
github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= | |||
github.com/fsnotify/fsnotify v1.4.3-0.20170329110642-4da3e2cfbabc/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= | |||
github.com/garyburd/redigo v1.1.1-0.20170914051019-70e1b1943d4f h1:Sk0u0gIncQaQD23zAoAZs2DNi2u2l5UTLi4CmCBL5v8= | |||
github.com/garyburd/redigo v1.1.1-0.20170914051019-70e1b1943d4f/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= | |||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= | |||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= | |||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= | |||
github.com/go-stack/stack v1.6.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= | |||
github.com/goamz/goamz v0.0.0-20180131231218-8b901b531db8 h1:G1U0vew/vA/1/hBmf1XNeyIzJJbPFVv+kb+HPl6rj6c= | |||
github.com/goamz/goamz v0.0.0-20180131231218-8b901b531db8/go.mod h1:/Ya1YZsqLQp17bDgHdyE9/XBR1uIH1HKasTvLxcoM/A= | |||
github.com/golang/gddo v0.0.0-20210115222349-20d68f94ee1f h1:16RtHeWGkJMc80Etb8RPCcKevXGldr57+LOyZt8zOlg= | |||
github.com/golang/gddo v0.0.0-20210115222349-20d68f94ee1f/go.mod h1:ijRvpgDJDI262hYq/IQVYgf8hd8IHUs93Ol0kvMBAx4= | |||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= | |||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= | |||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= | |||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= | |||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= | |||
github.com/golang/lint v0.0.0-20170918230701-e5d664eb928e/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= | |||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= | |||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= | |||
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= | |||
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= | |||
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= | |||
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= | |||
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= | |||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= | |||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= | |||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= | |||
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= | |||
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= | |||
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= | |||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= | |||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= | |||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= | |||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= | |||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= | |||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= | |||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= | |||
github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= | |||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= | |||
github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= | |||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= | |||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= | |||
github.com/google/go-cmp v0.1.1-0.20171103154506-982329095285/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= | |||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= | |||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= | |||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= | |||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= | |||
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= | |||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= | |||
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= | |||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= | |||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= | |||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= | |||
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= | |||
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= | |||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= | |||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= | |||
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= | |||
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= | |||
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= | |||
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= | |||
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= | |||
github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= | |||
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= | |||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= | |||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= | |||
github.com/googleapis/gax-go v2.0.0+incompatible h1:j0GKcs05QVmm7yesiZq2+9cxHkNK9YM6zKx4D2qucQU= | |||
github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= | |||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= | |||
github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= | |||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= | |||
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= | |||
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= | |||
github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ= | |||
github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= | |||
github.com/gregjones/httpcache v0.0.0-20170920190843-316c5e0ff04e/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= | |||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= | |||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= | |||
github.com/hashicorp/hcl v0.0.0-20170914154624-68e816d1c783/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w= | |||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= | |||
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= | |||
github.com/inconshreveable/log15 v0.0.0-20170622235902-74a0988b5f80/go.mod h1:cOaXtrgN4ScfRrD9Bre7U1thNq5RtJ8ZoP4iXVGRj6o= | |||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= | |||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= | |||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= | |||
github.com/klauspost/compress v1.11.7 h1:0hzRabrMN4tSTvMfnL3SCv1ZGeAP23ynzodBgaHeMeg= | |||
github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= | |||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= | |||
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= | |||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= | |||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= | |||
github.com/magiconair/properties v1.7.4-0.20170902060319-8d7837e64d3c/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= | |||
github.com/mattn/go-colorable v0.0.10-0.20170816031813-ad5389df28cd/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= | |||
github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8= | |||
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= | |||
github.com/mattn/go-isatty v0.0.2/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= | |||
github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= | |||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= | |||
github.com/minio/cli v1.22.0 h1:VTQm7lmXm3quxO917X3p+el1l0Ca5X3S4PM2ruUYO68= | |||
github.com/minio/cli v1.22.0/go.mod h1:bYxnK0uS629N3Bq+AOZZ+6lwF77Sodk4+UL9vNuXhOY= | |||
github.com/mitchellh/mapstructure v0.0.0-20170523030023-d0303fe80992/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= | |||
github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d h1:VhgPp6v9qf9Agr/56bj7Y/xa04UccTW04VP0Qed4vnQ= | |||
github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d/go.mod h1:YUTz3bUH2ZwIWBy3CJBeOBEugqcmXREj14T+iG/4k4U= | |||
github.com/pelletier/go-toml v1.0.1-0.20170904195809-1d6b12b7cb29/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= | |||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= | |||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= | |||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= | |||
github.com/russross/blackfriday v1.6.0 h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww= | |||
github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY= | |||
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e h1:MRM5ITcdelLK2j1vwZ3Je0FKVCfqOLp5zO6trqMLYs0= | |||
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e/go.mod h1:XV66xRDqSt+GTGFMVlhk3ULuV0y9ZmzeVGR4mloJI3M= | |||
github.com/spf13/afero v0.0.0-20170901052352-ee1bd8ee15a1/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= | |||
github.com/spf13/cast v1.1.0/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg= | |||
github.com/spf13/jwalterweatherman v0.0.0-20170901151539-12bd96e66386/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= | |||
github.com/spf13/pflag v1.0.1-0.20170901120850-7aff26db30c1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= | |||
github.com/spf13/viper v1.0.0/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM= | |||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= | |||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= | |||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= | |||
github.com/vaughan0/go-ini v0.0.0-20130923145212-a98ad7ee00ec h1:DGmKwyZwEB8dI7tbLt/I/gQuP559o/0FrAkHKlQM/Ks= | |||
github.com/vaughan0/go-ini v0.0.0-20130923145212-a98ad7ee00ec/go.mod h1:owBmyHYMLkxyrugmfwE/DLJyW8Ro9mkphwuVErQ0iUw= | |||
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= | |||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= | |||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= | |||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= | |||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= | |||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= | |||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= | |||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= | |||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= | |||
go.opencensus.io v0.22.5 h1:dntmOdLpSpHlVqbW5Eay97DelsZHe+55D+xC6i0dDS0= | |||
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= | |||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= | |||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= | |||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= | |||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= | |||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= | |||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad h1:DN0cp81fZ3njFcrLCytUHRSUkqBjfTo4Tx9RJTWs0EY= | |||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= | |||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= | |||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= | |||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= | |||
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= | |||
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= | |||
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= | |||
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= | |||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= | |||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= | |||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= | |||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= | |||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= | |||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= | |||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= | |||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= | |||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= | |||
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= | |||
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= | |||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= | |||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= | |||
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= | |||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= | |||
golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= | |||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= | |||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= | |||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= | |||
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= | |||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= | |||
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= | |||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= | |||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= | |||
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= | |||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= | |||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= | |||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= | |||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= | |||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= | |||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= | |||
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= | |||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= | |||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= | |||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= | |||
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= | |||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= | |||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= | |||
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= | |||
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= | |||
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= | |||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= | |||
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= | |||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= | |||
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= | |||
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= | |||
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= | |||
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= | |||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= | |||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= | |||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= | |||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= | |||
golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= | |||
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= | |||
golang.org/x/net v0.0.0-20210119194325-5f4716e94777 h1:003p0dJM77cxMSyCPFphvZf/Y5/NXf5fzg6ufd1/Oew= | |||
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= | |||
golang.org/x/oauth2 v0.0.0-20170912212905-13449ad91cb2/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= | |||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= | |||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= | |||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= | |||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= | |||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= | |||
golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= | |||
golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= | |||
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= | |||
golang.org/x/oauth2 v0.0.0-20210201163806-010130855d6c h1:HiAZXo96zOhVhtFHchj/ojzoxCFiPrp9/j0GtS38V3g= | |||
golang.org/x/oauth2 v0.0.0-20210201163806-010130855d6c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= | |||
golang.org/x/sync v0.0.0-20170517211232-f52d1811a629/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | |||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | |||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | |||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | |||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | |||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | |||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | |||
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | |||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | |||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | |||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | |||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= | |||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= | |||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | |||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | |||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | |||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | |||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | |||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | |||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | |||
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | |||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | |||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | |||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | |||
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | |||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | |||
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | |||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | |||
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | |||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | |||
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | |||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | |||
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | |||
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | |||
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | |||
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | |||
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | |||
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | |||
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | |||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | |||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | |||
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | |||
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad h1:MCsdmFSdEd4UEa5TKS5JztCRHK/WtvNei1edOj5RSRo= | |||
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | |||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= | |||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= | |||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= | |||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= | |||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= | |||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= | |||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= | |||
golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc= | |||
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= | |||
golang.org/x/time v0.0.0-20170424234030-8be79e1e0910/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= | |||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= | |||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= | |||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= | |||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= | |||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= | |||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= | |||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= | |||
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= | |||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= | |||
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= | |||
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= | |||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= | |||
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= | |||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= | |||
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= | |||
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= | |||
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= | |||
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= | |||
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= | |||
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= | |||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= | |||
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= | |||
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= | |||
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= | |||
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= | |||
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= | |||
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= | |||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= | |||
golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= | |||
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= | |||
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= | |||
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= | |||
golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= | |||
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= | |||
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= | |||
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= | |||
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= | |||
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= | |||
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= | |||
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= | |||
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= | |||
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= | |||
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= | |||
golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= | |||
golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= | |||
golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= | |||
golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= | |||
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= | |||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= | |||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= | |||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= | |||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= | |||
google.golang.org/api v0.0.0-20170921000349-586095a6e407/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= | |||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= | |||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= | |||
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= | |||
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= | |||
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= | |||
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= | |||
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= | |||
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= | |||
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= | |||
google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= | |||
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= | |||
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= | |||
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= | |||
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= | |||
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= | |||
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= | |||
google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= | |||
google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= | |||
google.golang.org/api v0.39.0 h1:zHCTXf0NeDdKTgcSQpT+ZflWAqHsEp1GmdpxW09f3YM= | |||
google.golang.org/api v0.39.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= | |||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= | |||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= | |||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= | |||
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= | |||
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= | |||
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= | |||
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= | |||
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= | |||
google.golang.org/genproto v0.0.0-20170918111702-1e559d0a00ee/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= | |||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= | |||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= | |||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= | |||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= | |||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= | |||
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= | |||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= | |||
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= | |||
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= | |||
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= | |||
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= | |||
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= | |||
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= | |||
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= | |||
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= | |||
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= | |||
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= | |||
google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= | |||
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= | |||
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= | |||
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= | |||
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= | |||
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= | |||
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= | |||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= | |||
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= | |||
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= | |||
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= | |||
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= | |||
google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= | |||
google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= | |||
google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= | |||
google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= | |||
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d h1:HV9Z9qMhQEsdlvxNFELgQ11RkMzO3CMkjEySjCtuLes= | |||
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= | |||
google.golang.org/grpc v1.2.1-0.20170921194603-d4b75ebd4f9f/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= | |||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= | |||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= | |||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= | |||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= | |||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= | |||
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= | |||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= | |||
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= | |||
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= | |||
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= | |||
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= | |||
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= | |||
google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= | |||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= | |||
google.golang.org/grpc v1.34.0 h1:raiipEjMOIC/TO2AvyTxP25XFdLxNIBwzDh3FM3XztI= | |||
google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= | |||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= | |||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= | |||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= | |||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= | |||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= | |||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= | |||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= | |||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= | |||
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= | |||
google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= | |||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= | |||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= | |||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= | |||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= | |||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= | |||
gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0= | |||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= | |||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= | |||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= | |||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= | |||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= | |||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= | |||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= | |||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= | |||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= | |||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= | |||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= |
@@ -1,8 +1,14 @@ | |||
package main | |||
import "git.kiska.pw/kiska/transfer.sh/cmd" | |||
import ( | |||
"log" | |||
"github.com/dutchcoders/transfer.sh/cmd" | |||
) | |||
func main() { | |||
log.SetFlags(log.Lshortfile | log.LstdFlags) | |||
app := cmd.New() | |||
app.RunAndExitOnError() | |||
} |
@@ -32,6 +32,7 @@ import ( | |||
"archive/zip" | |||
"bytes" | |||
"compress/gzip" | |||
"encoding/base64" | |||
"encoding/json" | |||
"errors" | |||
"fmt" | |||
@@ -42,6 +43,7 @@ import ( | |||
"log" | |||
"math/rand" | |||
"mime" | |||
"net" | |||
"net/http" | |||
"net/url" | |||
"os" | |||
@@ -53,14 +55,11 @@ import ( | |||
text_template "text/template" | |||
"time" | |||
"net" | |||
web "github.com/dutchcoders/transfer.sh-web" | |||
"github.com/gorilla/mux" | |||
"github.com/russross/blackfriday" | |||
"encoding/base64" | |||
qrcode "github.com/skip2/go-qrcode" | |||
"github.com/skip2/go-qrcode" | |||
"github.com/klauspost/compress/zstd" | |||
) | |||
@@ -559,9 +558,9 @@ func (s *Server) CheckMetadata(token, filename string) error { | |||
r, _, _, err := s.storage.Get(token, fmt.Sprintf("%s.metadata", filename)) | |||
//if s.storage.IsNotExist(err) { | |||
// return nil | |||
// return nil | |||
//} else if err != nil { | |||
if err != nil { | |||
if err != nil { | |||
return err | |||
} | |||
@@ -860,6 +859,62 @@ func (s *Server) headHandler(w http.ResponseWriter, r *http.Request) { | |||
w.Header().Set("Connection", "close") | |||
} | |||
func (s *Server) getHandlerZst(w http.ResponseWriter, r *http.Request) { | |||
vars := mux.Vars(r) | |||
action := vars["action"] | |||
token := vars["token"] | |||
filename := vars["filename"] | |||
if err := s.CheckMetadata(token, filename+".zst"); err != nil { | |||
log.Printf("Error metadata: %v", err) | |||
http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound) | |||
return | |||
} | |||
reader, contentType, _, err := s.storage.Get(token, filename+".zst") | |||
if s.storage.IsNotExist(err) { | |||
log.Println(err) | |||
http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound) | |||
return | |||
} | |||
if err != nil { | |||
log.Printf("Failed to get from storage: %s", err.Error()) | |||
http.Error(w, "Could not retrieve file.", 500) | |||
return | |||
} | |||
defer reader.Close() | |||
contentType = mime.TypeByExtension(filepath.Ext(filename)) | |||
d, err := zstd.NewReader(reader) | |||
if err != nil { | |||
log.Printf("Failed to create zstd reader: %s", err.Error()) | |||
http.Error(w, "Could not retrieve file.", 500) | |||
return | |||
} | |||
defer d.Close() | |||
var disposition string | |||
if action == "inline" { | |||
disposition = "inline" | |||
} else { | |||
disposition = "attachment" | |||
} | |||
w.Header().Set("Content-Type", contentType) | |||
w.Header().Set("Transfer-Encoding", "chunked") | |||
w.Header().Set("Content-Disposition", fmt.Sprintf("%s; filename=\"%s\"", disposition, filename)) | |||
w.Header().Set("Connection", "keep-alive") | |||
if w.Header().Get("Range") != "" { | |||
log.Printf("Range request with decompression") | |||
http.Error(w, "Range requests with decompression are not supported", 400) | |||
return | |||
} | |||
_, _ = io.Copy(w, d) | |||
} | |||
func (s *Server) getHandler(w http.ResponseWriter, r *http.Request) { | |||
vars := mux.Vars(r) | |||
@@ -868,45 +923,23 @@ func (s *Server) getHandler(w http.ResponseWriter, r *http.Request) { | |||
filename := vars["filename"] | |||
if err := s.CheckMetadata(token, filename); err != nil { | |||
if err2 := s.CheckMetadata(token, filename + ".zst"); err2 != nil { | |||
log.Printf("Error metadata: %s and %s", err.Error(), err2.Error()) | |||
http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound) | |||
return | |||
} | |||
log.Printf("Error metadata: %v; trying with .zst", err) | |||
s.getHandlerZst(w, r) | |||
return | |||
} | |||
reader, contentType, contentLength, err := s.storage.Get(token, filename) | |||
isZstd := false | |||
var d zstd.Decoder | |||
_ = d // Only used when isZstd is true; silence compiler | |||
if s.storage.IsNotExist(err) { | |||
reader, _, _, err := s.storage.Get(token, filename + ".zst") | |||
if s.storage.IsNotExist(err) { | |||
http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound) | |||
return | |||
} else if err != nil { | |||
log.Printf("Failed to get .zst from storage: %s", err.Error()) | |||
http.Error(w, "Could not retrieve file.", 500) | |||
return | |||
} | |||
defer reader.Close() | |||
d, err := zstd.NewReader(reader) | |||
if err != nil { | |||
log.Printf("Failed to create zstd reader: %s", err.Error()) | |||
http.Error(w, "Could not retrieve file.", 500) | |||
return | |||
} | |||
defer d.Close() | |||
isZstd = true | |||
contentType = mime.TypeByExtension(filepath.Ext(filename)) | |||
http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound) | |||
return | |||
} else if err != nil { | |||
log.Printf("Failed to get from storage: %s", err.Error()) | |||
log.Printf("%s", err.Error()) | |||
http.Error(w, "Could not retrieve file.", 500) | |||
return | |||
} else { | |||
defer reader.Close() | |||
} | |||
defer reader.Close() | |||
var disposition string | |||
if action == "inline" { | |||
@@ -916,47 +949,17 @@ func (s *Server) getHandler(w http.ResponseWriter, r *http.Request) { | |||
} | |||
w.Header().Set("Content-Type", contentType) | |||
if !isZstd { | |||
w.Header().Set("Content-Length", strconv.FormatUint(contentLength, 10)) | |||
} else { | |||
w.Header().Set("Transfer-Encoding", "chunked") | |||
} | |||
w.Header().Set("Content-Length", strconv.FormatUint(contentLength, 10)) | |||
w.Header().Set("Content-Disposition", fmt.Sprintf("%s; filename=\"%s\"", disposition, filename)) | |||
w.Header().Set("Connection", "keep-alive") | |||
if w.Header().Get("Range") == "" { | |||
if !isZstd { | |||
if _, err = io.Copy(w, reader); err != nil { | |||
log.Printf("%s", err.Error()) | |||
http.Error(w, "Error occurred copying to output stream", 500) | |||
return | |||
} | |||
} else { | |||
buffer := make([]byte, 1024) | |||
for { | |||
log.Printf("Reading from decoded stream") | |||
n, err := d.Read(buffer) | |||
log.Printf("Read from decoded stream") | |||
if err != nil && err != io.EOF { | |||
log.Printf("Failed to read from file: %s", err.Error()) | |||
panic("Error reading data") | |||
} | |||
log.Printf("Writing to HTTP") | |||
w.Write(buffer[0:n]) | |||
log.Printf("Trying to flush") | |||
if f, ok := w.(http.Flusher); ok { | |||
f.Flush() | |||
} | |||
if err == io.EOF { | |||
break | |||
} | |||
} | |||
if _, err = io.Copy(w, reader); err != nil { | |||
log.Printf("%s", err.Error()) | |||
http.Error(w, "Error occurred copying to output stream", 500) | |||
return | |||
} | |||
return | |||
} else if isZstd { | |||
log.Printf("Range request with decompression") | |||
http.Error(w, "Range requests with decompression are not supported", 400) | |||
return | |||
} | |||
@@ -1,15 +0,0 @@ | |||
# This is the official list of cloud authors for copyright purposes. | |||
# This file is distinct from the CONTRIBUTORS files. | |||
# See the latter for an explanation. | |||
# Names should be added to this file as: | |||
# Name or Organization <email address> | |||
# The email address is not required for organizations. | |||
Filippo Valsorda <hi@filippo.io> | |||
Google Inc. | |||
Ingo Oeser <nightlyone@googlemail.com> | |||
Palm Stone Games, Inc. | |||
Paweł Knap <pawelknap88@gmail.com> | |||
Péter Szilágyi <peterke@gmail.com> | |||
Tyler Treat <ttreat31@gmail.com> |
@@ -1,44 +0,0 @@ | |||
# Contributor Code of Conduct | |||
As contributors and maintainers of this project, | |||
and in the interest of fostering an open and welcoming community, | |||
we pledge to respect all people who contribute through reporting issues, | |||
posting feature requests, updating documentation, | |||
submitting pull requests or patches, and other activities. | |||
We are committed to making participation in this project | |||
a harassment-free experience for everyone, | |||
regardless of level of experience, gender, gender identity and expression, | |||
sexual orientation, disability, personal appearance, | |||
body size, race, ethnicity, age, religion, or nationality. | |||
Examples of unacceptable behavior by participants include: | |||
* The use of sexualized language or imagery | |||
* Personal attacks | |||
* Trolling or insulting/derogatory comments | |||
* Public or private harassment | |||
* Publishing other's private information, | |||
such as physical or electronic | |||
addresses, without explicit permission | |||
* Other unethical or unprofessional conduct. | |||
Project maintainers have the right and responsibility to remove, edit, or reject | |||
comments, commits, code, wiki edits, issues, and other contributions | |||
that are not aligned to this Code of Conduct. | |||
By adopting this Code of Conduct, | |||
project maintainers commit themselves to fairly and consistently | |||
applying these principles to every aspect of managing this project. | |||
Project maintainers who do not follow or enforce the Code of Conduct | |||
may be permanently removed from the project team. | |||
This code of conduct applies both within project spaces and in public spaces | |||
when an individual is representing the project or its community. | |||
Instances of abusive, harassing, or otherwise unacceptable behavior | |||
may be reported by opening an issue | |||
or contacting one or more of the project maintainers. | |||
This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0, | |||
available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/) | |||
@@ -1,234 +0,0 @@ | |||
# Contributing | |||
1. Sign one of the contributor license agreements below. | |||
1. `go get golang.org/x/review/git-codereview` to install the code reviewing | |||
tool. | |||
1. You will need to ensure that your `GOBIN` directory (by default | |||
`$GOPATH/bin`) is in your `PATH` so that git can find the command. | |||
1. If you would like, you may want to set up aliases for git-codereview, | |||
such that `git codereview change` becomes `git change`. See the | |||
[godoc](https://godoc.org/golang.org/x/review/git-codereview) for details. | |||
1. Should you run into issues with the git-codereview tool, please note | |||
that all error messages will assume that you have set up these aliases. | |||
1. Get the cloud package by running `go get -d cloud.google.com/go`. | |||
1. If you have already checked out the source, make sure that the remote | |||
git origin is https://code.googlesource.com/gocloud: | |||
``` | |||
git remote set-url origin https://code.googlesource.com/gocloud | |||
``` | |||
1. Make sure your auth is configured correctly by visiting | |||
https://code.googlesource.com, clicking "Generate Password", and following the | |||
directions. | |||
1. Make changes and create a change by running `git codereview change <name>`, | |||
provide a commit message, and use `git codereview mail` to create a Gerrit CL. | |||
1. Keep amending to the change with `git codereview change` and mail as your | |||
receive feedback. Each new mailed amendment will create a new patch set for | |||
your change in Gerrit. | |||
## Integration Tests | |||
In addition to the unit tests, you may run the integration test suite. These | |||
directions describe setting up your environment to run integration tests for | |||
_all_ packages: note that many of these instructions may be redundant if you | |||
intend only to run integration tests on a single package. | |||
#### GCP Setup | |||
To run the integrations tests, creation and configuration of two projects in | |||
the Google Developers Console is required: one specifically for Firestore | |||
integration tests, and another for all other integration tests. We'll refer to | |||
these projects as "general project" and "Firestore project". | |||
After creating each project, you must [create a service account](https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount) | |||
for each project. Ensure the project-level **Owner** | |||
[IAM role](console.cloud.google.com/iam-admin/iam/project) role is added to | |||
each service account. During the creation of the service account, you should | |||
download the JSON credential file for use later. | |||
Next, ensure the following APIs are enabled in the general project: | |||
- BigQuery API | |||
- BigQuery Data Transfer API | |||
- Cloud Dataproc API | |||
- Cloud Dataproc Control API Private | |||
- Cloud Datastore API | |||
- Cloud Firestore API | |||
- Cloud Key Management Service (KMS) API | |||
- Cloud Natural Language API | |||
- Cloud OS Login API | |||
- Cloud Pub/Sub API | |||
- Cloud Resource Manager API | |||
- Cloud Spanner API | |||
- Cloud Speech API | |||
- Cloud Translation API | |||
- Cloud Video Intelligence API | |||
- Cloud Vision API | |||
- Compute Engine API | |||
- Compute Engine Instance Group Manager API | |||
- Container Registry API | |||
- Firebase Rules API | |||
- Google Cloud APIs | |||
- Google Cloud Deployment Manager V2 API | |||
- Google Cloud SQL | |||
- Google Cloud Storage | |||
- Google Cloud Storage JSON API | |||
- Google Compute Engine Instance Group Updater API | |||
- Google Compute Engine Instance Groups API | |||
- Kubernetes Engine API | |||
- Stackdriver Error Reporting API | |||
Next, create a Datastore database in the general project, and a Firestore | |||
database in the Firestore project. | |||
Finally, in the general project, create an API key for the translate API: | |||
- Go to GCP Developer Console. | |||
- Navigate to APIs & Services > Credentials. | |||
- Click Create Credentials > API Key. | |||
- Save this key for use in `GCLOUD_TESTS_API_KEY` as described below. | |||
#### Local Setup | |||
Once the two projects are created and configured, set the following environment | |||
variables: | |||
- `GCLOUD_TESTS_GOLANG_PROJECT_ID`: Developers Console project's ID (e.g. | |||
bamboo-shift-455) for the general project. | |||
- `GCLOUD_TESTS_GOLANG_KEY`: The path to the JSON key file of the general | |||
project's service account. | |||
- `GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID`: Developers Console project's ID | |||
(e.g. doorway-cliff-677) for the Firestore project. | |||
- `GCLOUD_TESTS_GOLANG_FIRESTORE_KEY`: The path to the JSON key file of the | |||
Firestore project's service account. | |||
- `GCLOUD_TESTS_GOLANG_KEYRING`: The full name of the keyring for the tests, | |||
in the form | |||
"projects/P/locations/L/keyRings/R". The creation of this is described below. | |||
- `GCLOUD_TESTS_API_KEY`: API key for using the Translate API. | |||
- `GCLOUD_TESTS_GOLANG_ZONE`: Compute Engine zone. | |||
Install the [gcloud command-line tool][gcloudcli] to your machine and use it to | |||
create some resources used in integration tests. | |||
From the project's root directory: | |||
``` sh | |||
# Sets the default project in your env. | |||
$ gcloud config set project $GCLOUD_TESTS_GOLANG_PROJECT_ID | |||
# Authenticates the gcloud tool with your account. | |||
$ gcloud auth login | |||
# Create the indexes used in the datastore integration tests. | |||
$ gcloud datastore indexes create datastore/testdata/index.yaml | |||
# Creates a Google Cloud storage bucket with the same name as your test project, | |||
# and with the Stackdriver Logging service account as owner, for the sink | |||
# integration tests in logging. | |||
$ gsutil mb gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID | |||
$ gsutil acl ch -g cloud-logs@google.com:O gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID | |||
# Creates a PubSub topic for integration tests of storage notifications. | |||
$ gcloud beta pubsub topics create go-storage-notification-test | |||
# Next, go to the Pub/Sub dashboard in GCP console. Authorize the user | |||
# "service-<numberic project id>@gs-project-accounts.iam.gserviceaccount.com" | |||
# as a publisher to that topic. | |||
# Creates a Spanner instance for the spanner integration tests. | |||
$ gcloud beta spanner instances create go-integration-test --config regional-us-central1 --nodes 10 --description 'Instance for go client test' | |||
# NOTE: Spanner instances are priced by the node-hour, so you may want to | |||
# delete the instance after testing with 'gcloud beta spanner instances delete'. | |||
$ export MY_KEYRING=some-keyring-name | |||
$ export MY_LOCATION=global | |||
# Creates a KMS keyring, in the same location as the default location for your | |||
# project's buckets. | |||
$ gcloud kms keyrings create $MY_KEYRING --location $MY_LOCATION | |||
# Creates two keys in the keyring, named key1 and key2. | |||
$ gcloud kms keys create key1 --keyring $MY_KEYRING --location $MY_LOCATION --purpose encryption | |||
$ gcloud kms keys create key2 --keyring $MY_KEYRING --location $MY_LOCATION --purpose encryption | |||
# Sets the GCLOUD_TESTS_GOLANG_KEYRING environment variable. | |||
$ export GCLOUD_TESTS_GOLANG_KEYRING=projects/$GCLOUD_TESTS_GOLANG_PROJECT_ID/locations/$MY_LOCATION/keyRings/$MY_KEYRING | |||
# Authorizes Google Cloud Storage to encrypt and decrypt using key1. | |||
gsutil kms authorize -p $GCLOUD_TESTS_GOLANG_PROJECT_ID -k $GCLOUD_TESTS_GOLANG_KEYRING/cryptoKeys/key1 | |||
``` | |||
#### Running | |||
Once you've done the necessary setup, you can run the integration tests by | |||
running: | |||
``` sh | |||
$ go test -v cloud.google.com/go/... | |||
``` | |||
#### Replay | |||
Some packages can record the RPCs during integration tests to a file for | |||
subsequent replay. To record, pass the `-record` flag to `go test`. The | |||
recording will be saved to the _package_`.replay` file. To replay integration | |||
tests from a saved recording, the replay file must be present, the `-short` | |||
flag must be passed to `go test`, and the `GCLOUD_TESTS_GOLANG_ENABLE_REPLAY` | |||
environment variable must have a non-empty value. | |||
## Contributor License Agreements | |||
Before we can accept your pull requests you'll need to sign a Contributor | |||
License Agreement (CLA): | |||
- **If you are an individual writing original source code** and **you own the | |||
intellectual property**, then you'll need to sign an [individual CLA][indvcla]. | |||
- **If you work for a company that wants to allow you to contribute your | |||
work**, then you'll need to sign a [corporate CLA][corpcla]. | |||
You can sign these electronically (just scroll to the bottom). After that, | |||
we'll be able to accept your pull requests. | |||
## Contributor Code of Conduct | |||
As contributors and maintainers of this project, | |||
and in the interest of fostering an open and welcoming community, | |||
we pledge to respect all people who contribute through reporting issues, | |||
posting feature requests, updating documentation, | |||
submitting pull requests or patches, and other activities. | |||
We are committed to making participation in this project | |||
a harassment-free experience for everyone, | |||
regardless of level of experience, gender, gender identity and expression, | |||
sexual orientation, disability, personal appearance, | |||
body size, race, ethnicity, age, religion, or nationality. | |||
Examples of unacceptable behavior by participants include: | |||
* The use of sexualized language or imagery | |||
* Personal attacks | |||
* Trolling or insulting/derogatory comments | |||
* Public or private harassment | |||
* Publishing other's private information, | |||
such as physical or electronic | |||
addresses, without explicit permission | |||
* Other unethical or unprofessional conduct. | |||
Project maintainers have the right and responsibility to remove, edit, or reject | |||
comments, commits, code, wiki edits, issues, and other contributions | |||
that are not aligned to this Code of Conduct. | |||
By adopting this Code of Conduct, | |||
project maintainers commit themselves to fairly and consistently | |||
applying these principles to every aspect of managing this project. | |||
Project maintainers who do not follow or enforce the Code of Conduct | |||
may be permanently removed from the project team. | |||
This code of conduct applies both within project spaces and in public spaces | |||
when an individual is representing the project or its community. | |||
Instances of abusive, harassing, or otherwise unacceptable behavior | |||
may be reported by opening an issue | |||
or contacting one or more of the project maintainers. | |||
This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0, | |||
available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/) | |||
[gcloudcli]: https://developers.google.com/cloud/sdk/gcloud/ | |||
[indvcla]: https://developers.google.com/open-source/cla/individual | |||
[corpcla]: https://developers.google.com/open-source/cla/corporate |
@@ -1,40 +0,0 @@ | |||
# People who have agreed to one of the CLAs and can contribute patches. | |||
# The AUTHORS file lists the copyright holders; this file | |||
# lists people. For example, Google employees are listed here | |||
# but not in AUTHORS, because Google holds the copyright. | |||
# | |||
# https://developers.google.com/open-source/cla/individual | |||
# https://developers.google.com/open-source/cla/corporate | |||
# | |||
# Names should be added to this file as: | |||
# Name <email address> | |||
# Keep the list alphabetically sorted. | |||
Alexis Hunt <lexer@google.com> | |||
Andreas Litt <andreas.litt@gmail.com> | |||
Andrew Gerrand <adg@golang.org> | |||
Brad Fitzpatrick <bradfitz@golang.org> | |||
Burcu Dogan <jbd@google.com> | |||
Dave Day <djd@golang.org> | |||
David Sansome <me@davidsansome.com> | |||
David Symonds <dsymonds@golang.org> | |||
Filippo Valsorda <hi@filippo.io> | |||
Glenn Lewis <gmlewis@google.com> | |||
Ingo Oeser <nightlyone@googlemail.com> | |||
James Hall <james.hall@shopify.com> | |||
Johan Euphrosine <proppy@google.com> | |||
Jonathan Amsterdam <jba@google.com> | |||
Kunpei Sakai <namusyaka@gmail.com> | |||
Luna Duclos <luna.duclos@palmstonegames.com> | |||
Magnus Hiie <magnus.hiie@gmail.com> | |||
Mario Castro <mariocaster@gmail.com> | |||
Michael McGreevy <mcgreevy@golang.org> | |||
Omar Jarjur <ojarjur@google.com> | |||
Paweł Knap <pawelknap88@gmail.com> | |||
Péter Szilágyi <peterke@gmail.com> | |||
Sarah Adams <shadams@google.com> | |||
Thanatat Tamtan <acoshift@gmail.com> | |||
Toby Burress <kurin@google.com> | |||
Tuo Shan <shantuo@google.com> | |||
Tyler Treat <ttreat31@gmail.com> |
@@ -1,202 +0,0 @@ | |||
Apache License | |||
Version 2.0, January 2004 | |||
http://www.apache.org/licenses/ | |||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION | |||
1. Definitions. | |||
"License" shall mean the terms and conditions for use, reproduction, | |||
and distribution as defined by Sections 1 through 9 of this document. | |||
"Licensor" shall mean the copyright owner or entity authorized by | |||
the copyright owner that is granting the License. | |||
"Legal Entity" shall mean the union of the acting entity and all | |||
other entities that control, are controlled by, or are under common | |||
control with that entity. For the purposes of this definition, | |||
"control" means (i) the power, direct or indirect, to cause the | |||
direction or management of such entity, whether by contract or | |||
otherwise, or (ii) ownership of fifty percent (50%) or more of the | |||
outstanding shares, or (iii) beneficial ownership of such entity. | |||
"You" (or "Your") shall mean an individual or Legal Entity | |||
exercising permissions granted by this License. | |||
"Source" form shall mean the preferred form for making modifications, | |||
including but not limited to software source code, documentation | |||
source, and configuration files. | |||
"Object" form shall mean any form resulting from mechanical | |||
transformation or translation of a Source form, including but | |||
not limited to compiled object code, generated documentation, | |||
and conversions to other media types. | |||
"Work" shall mean the work of authorship, whether in Source or | |||
Object form, made available under the License, as indicated by a | |||
copyright notice that is included in or attached to the work | |||
(an example is provided in the Appendix below). | |||
"Derivative Works" shall mean any work, whether in Source or Object | |||
form, that is based on (or derived from) the Work and for which the | |||
editorial revisions, annotations, elaborations, or other modifications | |||
represent, as a whole, an original work of authorship. For the purposes | |||
of this License, Derivative Works shall not include works that remain | |||
separable from, or merely link (or bind by name) to the interfaces of, | |||
the Work and Derivative Works thereof. | |||
"Contribution" shall mean any work of authorship, including | |||
the original version of the Work and any modifications or additions | |||
to that Work or Derivative Works thereof, that is intentionally | |||
submitted to Licensor for inclusion in the Work by the copyright owner | |||
or by an individual or Legal Entity authorized to submit on behalf of | |||
the copyright owner. For the purposes of this definition, "submitted" | |||
means any form of electronic, verbal, or written communication sent | |||
to the Licensor or its representatives, including but not limited to | |||
communication on electronic mailing lists, source code control systems, | |||
and issue tracking systems that are managed by, or on behalf of, the | |||
Licensor for the purpose of discussing and improving the Work, but | |||
excluding communication that is conspicuously marked or otherwise | |||
designated in writing by the copyright owner as "Not a Contribution." | |||
"Contributor" shall mean Licensor and any individual or Legal Entity | |||
on behalf of whom a Contribution has been received by Licensor and | |||
subsequently incorporated within the Work. | |||
2. Grant of Copyright License. Subject to the terms and conditions of | |||
this License, each Contributor hereby grants to You a perpetual, | |||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable | |||
copyright license to reproduce, prepare Derivative Works of, | |||
publicly display, publicly perform, sublicense, and distribute the | |||
Work and such Derivative Works in Source or Object form. | |||
3. Grant of Patent License. Subject to the terms and conditions of | |||
this License, each Contributor hereby grants to You a perpetual, | |||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable | |||
(except as stated in this section) patent license to make, have made, | |||
use, offer to sell, sell, import, and otherwise transfer the Work, | |||
where such license applies only to those patent claims licensable | |||
by such Contributor that are necessarily infringed by their | |||
Contribution(s) alone or by combination of their Contribution(s) | |||
with the Work to which such Contribution(s) was submitted. If You | |||
institute patent litigation against any entity (including a | |||
cross-claim or counterclaim in a lawsuit) alleging that the Work | |||
or a Contribution incorporated within the Work constitutes direct | |||
or contributory patent infringement, then any patent licenses | |||
granted to You under this License for that Work shall terminate | |||
as of the date such litigation is filed. | |||
4. Redistribution. You may reproduce and distribute copies of the | |||
Work or Derivative Works thereof in any medium, with or without | |||
modifications, and in Source or Object form, provided that You | |||
meet the following conditions: | |||
(a) You must give any other recipients of the Work or | |||
Derivative Works a copy of this License; and | |||
(b) You must cause any modified files to carry prominent notices | |||
stating that You changed the files; and | |||
(c) You must retain, in the Source form of any Derivative Works | |||
that You distribute, all copyright, patent, trademark, and | |||
attribution notices from the Source form of the Work, | |||
excluding those notices that do not pertain to any part of | |||
the Derivative Works; and | |||
(d) If the Work includes a "NOTICE" text file as part of its | |||
distribution, then any Derivative Works that You distribute must | |||
include a readable copy of the attribution notices contained | |||
within such NOTICE file, excluding those notices that do not | |||
pertain to any part of the Derivative Works, in at least one | |||
of the following places: within a NOTICE text file distributed | |||
as part of the Derivative Works; within the Source form or | |||
documentation, if provided along with the Derivative Works; or, | |||
within a display generated by the Derivative Works, if and | |||
wherever such third-party notices normally appear. The contents | |||
of the NOTICE file are for informational purposes only and | |||
do not modify the License. You may add Your own attribution | |||
notices within Derivative Works that You distribute, alongside | |||
or as an addendum to the NOTICE text from the Work, provided | |||
that such additional attribution notices cannot be construed | |||
as modifying the License. | |||
You may add Your own copyright statement to Your modifications and | |||
may provide additional or different license terms and conditions | |||
for use, reproduction, or distribution of Your modifications, or | |||
for any such Derivative Works as a whole, provided Your use, | |||
reproduction, and distribution of the Work otherwise complies with | |||
the conditions stated in this License. | |||
5. Submission of Contributions. Unless You explicitly state otherwise, | |||
any Contribution intentionally submitted for inclusion in the Work | |||
by You to the Licensor shall be under the terms and conditions of | |||
this License, without any additional terms or conditions. | |||
Notwithstanding the above, nothing herein shall supersede or modify | |||
the terms of any separate license agreement you may have executed | |||
with Licensor regarding such Contributions. | |||
6. Trademarks. This License does not grant permission to use the trade | |||
names, trademarks, service marks, or product names of the Licensor, | |||
except as required for reasonable and customary use in describing the | |||
origin of the Work and reproducing the content of the NOTICE file. | |||
7. Disclaimer of Warranty. Unless required by applicable law or | |||
agreed to in writing, Licensor provides the Work (and each | |||
Contributor provides its Contributions) on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or | |||
implied, including, without limitation, any warranties or conditions | |||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A | |||
PARTICULAR PURPOSE. You are solely responsible for determining the | |||
appropriateness of using or redistributing the Work and assume any | |||
risks associated with Your exercise of permissions under this License. | |||
8. Limitation of Liability. In no event and under no legal theory, | |||
whether in tort (including negligence), contract, or otherwise, | |||
unless required by applicable law (such as deliberate and grossly | |||
negligent acts) or agreed to in writing, shall any Contributor be | |||
liable to You for damages, including any direct, indirect, special, | |||
incidental, or consequential damages of any character arising as a | |||
result of this License or out of the use or inability to use the | |||
Work (including but not limited to damages for loss of goodwill, | |||
work stoppage, computer failure or malfunction, or any and all | |||
other commercial damages or losses), even if such Contributor | |||
has been advised of the possibility of such damages. | |||
9. Accepting Warranty or Additional Liability. While redistributing | |||
the Work or Derivative Works thereof, You may choose to offer, | |||
and charge a fee for, acceptance of support, warranty, indemnity, | |||
or other liability obligations and/or rights consistent with this | |||
License. However, in accepting such obligations, You may act only | |||
on Your own behalf and on Your sole responsibility, not on behalf | |||
of any other Contributor, and only if You agree to indemnify, | |||
defend, and hold each Contributor harmless for any liability | |||
incurred by, or claims asserted against, such Contributor by reason | |||
of your accepting any such warranty or additional liability. | |||
END OF TERMS AND CONDITIONS | |||
APPENDIX: How to apply the Apache License to your work. | |||
To apply the Apache License to your work, attach the following | |||
boilerplate notice, with the fields enclosed by brackets "[]" | |||
replaced with your own identifying information. (Don't include | |||
the brackets!) The text should be enclosed in the appropriate | |||
comment syntax for the file format. We also recommend that a | |||
file or class name and description of purpose be included on the | |||
same "printed page" as the copyright notice for easier | |||
identification within third-party archives. | |||
Copyright [yyyy] [name of copyright owner] | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. |
@@ -1,505 +0,0 @@ | |||
# Google Cloud Client Libraries for Go | |||
[![GoDoc](https://godoc.org/cloud.google.com/go?status.svg)](https://godoc.org/cloud.google.com/go) | |||
Go packages for [Google Cloud Platform](https://cloud.google.com) services. | |||
``` go | |||
import "cloud.google.com/go" | |||
``` | |||
To install the packages on your system, *do not clone the repo*. Instead use | |||
``` | |||
$ go get -u cloud.google.com/go/... | |||
``` | |||
**NOTE:** Some of these packages are under development, and may occasionally | |||
make backwards-incompatible changes. | |||
**NOTE:** Github repo is a mirror of [https://code.googlesource.com/gocloud](https://code.googlesource.com/gocloud). | |||
* [News](#news) | |||
* [Supported APIs](#supported-apis) | |||
* [Go Versions Supported](#go-versions-supported) | |||
* [Authorization](#authorization) | |||
* [Cloud Datastore](#cloud-datastore-) | |||
* [Cloud Storage](#cloud-storage-) | |||
* [Cloud Pub/Sub](#cloud-pub-sub-) | |||
* [BigQuery](#cloud-bigquery-) | |||
* [Stackdriver Logging](#stackdriver-logging-) | |||
* [Cloud Spanner](#cloud-spanner-) | |||
## News | |||
_7 August 2018_ | |||
As of November 1, the code in the repo will no longer support Go versions 1.8 | |||
and earlier. No one other than AppEngine users should be on those old versions, | |||
and AppEngine | |||
[Standard](https://groups.google.com/forum/#!topic/google-appengine-go/e7oPNomd7ak) | |||
and | |||
[Flex](https://groups.google.com/forum/#!topic/google-appengine-go/wHsYtxvEbXI) | |||
will stop supporting new deployments with those versions on that date. | |||
Changes have been moved to [CHANGES](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/CHANGES.md). | |||
## Supported APIs | |||
Google API | Status | Package | |||
[Asset][cloud-asset] | alpha | [`cloud.google.com/go/asset/v1beta`][cloud-asset-ref] | |||
[BigQuery][cloud-bigquery] | stable | [`cloud.google.com/go/bigquery`][cloud-bigquery-ref] | |||
[Bigtable][cloud-bigtable] | stable | [`cloud.google.com/go/bigtable`][cloud-bigtable-ref] | |||
[Cloudtasks][cloud-tasks] | beta | [`cloud.google.com/go/cloudtasks/apiv2beta3`][cloud-tasks-ref] | |||
[Container][cloud-container] | stable | [`cloud.google.com/go/container/apiv1`][cloud-container-ref] | |||
[ContainerAnalysis][cloud-containeranalysis] | beta | [`cloud.google.com/go/containeranalysis/apiv1beta1`][cloud-containeranalysis-ref] | |||
[Dataproc][cloud-dataproc] | stable | [`cloud.google.com/go/dataproc/apiv1`][cloud-dataproc-ref] | |||
[Datastore][cloud-datastore] | stable | [`cloud.google.com/go/datastore`][cloud-datastore-ref] | |||
[Debugger][cloud-debugger] | alpha | [`cloud.google.com/go/debugger/apiv2`][cloud-debugger-ref] | |||
[Dialogflow][cloud-dialogflow] | alpha | [`cloud.google.com/go/dialogflow/apiv2`][cloud-dialogflow-ref] | |||
[Data Loss Prevention][cloud-dlp] | alpha | [`cloud.google.com/go/dlp/apiv2`][cloud-dlp-ref] | |||
[ErrorReporting][cloud-errors] | alpha | [`cloud.google.com/go/errorreporting`][cloud-errors-ref] | |||
[Firestore][cloud-firestore] | beta | [`cloud.google.com/go/firestore`][cloud-firestore-ref] | |||
[IAM][cloud-iam] | stable | [`cloud.google.com/go/iam`][cloud-iam-ref] | |||
[KMS][cloud-kms] | stable | [`cloud.google.com/go/kms`][cloud-kms-ref] | |||
[Natural Language][cloud-natural-language] | stable | [`cloud.google.com/go/language/apiv1`][cloud-natural-language-ref] | |||
[Logging][cloud-logging] | stable | [`cloud.google.com/go/logging`][cloud-logging-ref] | |||
[Monitoring][cloud-monitoring] | alpha | [`cloud.google.com/go/monitoring/apiv3`][cloud-monitoring-ref] | |||
[OS Login][cloud-oslogin] | alpha | [`cloud.google.com/compute/docs/oslogin/rest`][cloud-oslogin-ref] | |||
[Pub/Sub][cloud-pubsub] | stable | [`cloud.google.com/go/pubsub`][cloud-pubsub-ref] | |||
[Memorystore][cloud-memorystore] | stable | [`cloud.google.com/go/redis/apiv1beta1`][cloud-memorystore-ref] | |||
[Spanner][cloud-spanner] | stable | [`cloud.google.com/go/spanner`][cloud-spanner-ref] | |||
[Speech][cloud-speech] | stable | [`cloud.google.com/go/speech/apiv1`][cloud-speech-ref] | |||
[Storage][cloud-storage] | stable | [`cloud.google.com/go/storage`][cloud-storage-ref] | |||
[Text To Speech][cloud-texttospeech] | alpha | [`cloud.google.com/go/texttospeech/apiv1`][cloud-texttospeech-ref] | |||
[Trace][cloud-trace] | alpha | [`cloud.google.com/go/trace/apiv2`][cloud-trace-ref] | |||
[Translation][cloud-translation] | stable | [`cloud.google.com/go/translate`][cloud-translation-ref] | |||
[Video Intelligence][cloud-video] | alpha | [`cloud.google.com/go/videointelligence/apiv1beta1`][cloud-video-ref] | |||
[Vision][cloud-vision] | stable | [`cloud.google.com/go/vision/apiv1`][cloud-vision-ref] | |||
> **Alpha status**: the API is still being actively developed. As a | |||
> result, it might change in backward-incompatible ways and is not recommended | |||
> for production use. | |||
> | |||
> **Beta status**: the API is largely complete, but still has outstanding | |||
> features and bugs to be addressed. There may be minor backwards-incompatible | |||
> changes where necessary. | |||
> | |||
> **Stable status**: the API is mature and ready for production use. We will | |||
> continue addressing bugs and feature requests. | |||
Documentation and examples are available at | |||
https://godoc.org/cloud.google.com/go | |||
Visit or join the | |||
[google-api-go-announce group](https://groups.google.com/forum/#!forum/google-api-go-announce) | |||
for updates on these packages. | |||
## Go Versions Supported | |||
We support the two most recent major versions of Go. If Google App Engine uses | |||
an older version, we support that as well. | |||
## Authorization | |||
By default, each API will use [Google Application Default Credentials][default-creds] | |||
for authorization credentials used in calling the API endpoints. This will allow your | |||
application to run in many environments without requiring explicit configuration. | |||
[snip]:# (auth) | |||
```go | |||
client, err := storage.NewClient(ctx) | |||
``` | |||
To authorize using a | |||
[JSON key file](https://cloud.google.com/iam/docs/managing-service-account-keys), | |||
pass | |||
[`option.WithCredentialsFile`](https://godoc.org/google.golang.org/api/option#WithCredentialsFile) | |||
to the `NewClient` function of the desired package. For example: | |||
[snip]:# (auth-JSON) | |||
```go | |||
client, err := storage.NewClient(ctx, option.WithCredentialsFile("path/to/keyfile.json")) | |||
``` | |||
You can exert more control over authorization by using the | |||
[`golang.org/x/oauth2`](https://godoc.org/golang.org/x/oauth2) package to | |||
create an `oauth2.TokenSource`. Then pass | |||
[`option.WithTokenSource`](https://godoc.org/google.golang.org/api/option#WithTokenSource) | |||
to the `NewClient` function: | |||
[snip]:# (auth-ts) | |||
```go | |||
tokenSource := ... | |||
client, err := storage.NewClient(ctx, option.WithTokenSource(tokenSource)) | |||
``` | |||
## Cloud Datastore [![GoDoc](https://godoc.org/cloud.google.com/go/datastore?status.svg)](https://godoc.org/cloud.google.com/go/datastore) | |||
- [About Cloud Datastore][cloud-datastore] | |||
- [Activating the API for your project][cloud-datastore-activation] | |||
- [API documentation][cloud-datastore-docs] | |||
- [Go client documentation](https://godoc.org/cloud.google.com/go/datastore) | |||
- [Complete sample program](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/datastore/tasks) | |||
### Example Usage | |||
First create a `datastore.Client` to use throughout your application: | |||
[snip]:# (datastore-1) | |||
```go | |||
client, err := datastore.NewClient(ctx, "my-project-id") | |||
if err != nil { | |||
log.Fatal(err) | |||
} | |||
``` | |||
Then use that client to interact with the API: | |||
[snip]:# (datastore-2) | |||
```go | |||
type Post struct { | |||
Title string | |||
Body string `datastore:",noindex"` | |||
PublishedAt time.Time | |||
} | |||
keys := []*datastore.Key{ | |||
datastore.NameKey("Post", "post1", nil), | |||
datastore.NameKey("Post", "post2", nil), | |||
} | |||
posts := []*Post{ | |||
{Title: "Post 1", Body: "...", PublishedAt: time.Now()}, | |||
{Title: "Post 2", Body: "...", PublishedAt: time.Now()}, | |||
} | |||
if _, err := client.PutMulti(ctx, keys, posts); err != nil { | |||
log.Fatal(err) | |||
} | |||
``` | |||
## Cloud Storage [![GoDoc](https://godoc.org/cloud.google.com/go/storage?status.svg)](https://godoc.org/cloud.google.com/go/storage) | |||
- [About Cloud Storage][cloud-storage] | |||
- [API documentation][cloud-storage-docs] | |||
- [Go client documentation](https://godoc.org/cloud.google.com/go/storage) | |||
- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/storage) | |||
### Example Usage | |||
First create a `storage.Client` to use throughout your application: | |||
[snip]:# (storage-1) | |||
```go | |||
client, err := storage.NewClient(ctx) | |||
if err != nil { | |||
log.Fatal(err) | |||
} | |||
``` | |||
[snip]:# (storage-2) | |||
```go | |||
// Read the object1 from bucket. | |||
rc, err := client.Bucket("bucket").Object("object1").NewReader(ctx) | |||
if err != nil { | |||
log.Fatal(err) | |||
} | |||
defer rc.Close() | |||
body, err := ioutil.ReadAll(rc) | |||
if err != nil { | |||
log.Fatal(err) | |||
} | |||
``` | |||
## Cloud Pub/Sub [![GoDoc](https://godoc.org/cloud.google.com/go/pubsub?status.svg)](https://godoc.org/cloud.google.com/go/pubsub) | |||
- [About Cloud Pubsub][cloud-pubsub] | |||
- [API documentation][cloud-pubsub-docs] | |||
- [Go client documentation](https://godoc.org/cloud.google.com/go/pubsub) | |||
- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/pubsub) | |||
### Example Usage | |||
First create a `pubsub.Client` to use throughout your application: | |||
[snip]:# (pubsub-1) | |||
```go | |||
client, err := pubsub.NewClient(ctx, "project-id") | |||
if err != nil { | |||
log.Fatal(err) | |||
} | |||
``` | |||
Then use the client to publish and subscribe: | |||
[snip]:# (pubsub-2) | |||
```go | |||
// Publish "hello world" on topic1. | |||
topic := client.Topic("topic1") | |||
res := topic.Publish(ctx, &pubsub.Message{ | |||
Data: []byte("hello world"), | |||
}) | |||
// The publish happens asynchronously. | |||
// Later, you can get the result from res: | |||
... | |||
msgID, err := res.Get(ctx) | |||
if err != nil { | |||
log.Fatal(err) | |||
} | |||
// Use a callback to receive messages via subscription1. | |||
sub := client.Subscription("subscription1") | |||
err = sub.Receive(ctx, func(ctx context.Context, m *pubsub.Message) { | |||
fmt.Println(m.Data) | |||
m.Ack() // Acknowledge that we've consumed the message. | |||
}) | |||
if err != nil { | |||
log.Println(err) | |||
} | |||
``` | |||
## BigQuery [![GoDoc](https://godoc.org/cloud.google.com/go/bigquery?status.svg)](https://godoc.org/cloud.google.com/go/bigquery) | |||
- [About BigQuery][cloud-bigquery] | |||
- [API documentation][cloud-bigquery-docs] | |||
- [Go client documentation][cloud-bigquery-ref] | |||
- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/bigquery) | |||
### Example Usage | |||
First create a `bigquery.Client` to use throughout your application: | |||
[snip]:# (bq-1) | |||
```go | |||
c, err := bigquery.NewClient(ctx, "my-project-ID") | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
``` | |||
Then use that client to interact with the API: | |||
[snip]:# (bq-2) | |||
```go | |||
// Construct a query. | |||
q := c.Query(` | |||
SELECT year, SUM(number) | |||
FROM [bigquery-public-data:usa_names.usa_1910_2013] | |||
WHERE name = "William" | |||
GROUP BY year | |||
ORDER BY year | |||
`) | |||
// Execute the query. | |||
it, err := q.Read(ctx) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
// Iterate through the results. | |||
for { | |||
var values []bigquery.Value | |||
err := it.Next(&values) | |||
if err == iterator.Done { | |||
break | |||
} | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
fmt.Println(values) | |||
} | |||
``` | |||
## Stackdriver Logging [![GoDoc](https://godoc.org/cloud.google.com/go/logging?status.svg)](https://godoc.org/cloud.google.com/go/logging) | |||
- [About Stackdriver Logging][cloud-logging] | |||
- [API documentation][cloud-logging-docs] | |||
- [Go client documentation][cloud-logging-ref] | |||
- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/logging) | |||
### Example Usage | |||
First create a `logging.Client` to use throughout your application: | |||
[snip]:# (logging-1) | |||
```go | |||
ctx := context.Background() | |||
client, err := logging.NewClient(ctx, "my-project") | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
``` | |||
Usually, you'll want to add log entries to a buffer to be periodically flushed | |||
(automatically and asynchronously) to the Stackdriver Logging service. | |||
[snip]:# (logging-2) | |||
```go | |||
logger := client.Logger("my-log") | |||
logger.Log(logging.Entry{Payload: "something happened!"}) | |||
``` | |||
Close your client before your program exits, to flush any buffered log entries. | |||
[snip]:# (logging-3) | |||
```go | |||
err = client.Close() | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
``` | |||
## Cloud Spanner [![GoDoc](https://godoc.org/cloud.google.com/go/spanner?status.svg)](https://godoc.org/cloud.google.com/go/spanner) | |||
- [About Cloud Spanner][cloud-spanner] | |||
- [API documentation][cloud-spanner-docs] | |||
- [Go client documentation](https://godoc.org/cloud.google.com/go/spanner) | |||
### Example Usage | |||
First create a `spanner.Client` to use throughout your application: | |||
[snip]:# (spanner-1) | |||
```go | |||
client, err := spanner.NewClient(ctx, "projects/P/instances/I/databases/D") | |||
if err != nil { | |||
log.Fatal(err) | |||
} | |||
``` | |||
[snip]:# (spanner-2) | |||
```go | |||
// Simple Reads And Writes | |||
_, err = client.Apply(ctx, []*spanner.Mutation{ | |||
spanner.Insert("Users", | |||
[]string{"name", "email"}, | |||
[]interface{}{"alice", "a@example.com"})}) | |||
if err != nil { | |||
log.Fatal(err) | |||
} | |||
row, err := client.Single().ReadRow(ctx, "Users", | |||
spanner.Key{"alice"}, []string{"email"}) | |||
if err != nil { | |||
log.Fatal(err) | |||
} | |||
``` | |||
## Contributing | |||
Contributions are welcome. Please, see the | |||
[CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/CONTRIBUTING.md) | |||
document for details. We're using Gerrit for our code reviews. Please don't open pull | |||
requests against this repo, new pull requests will be automatically closed. | |||
Please note that this project is released with a Contributor Code of Conduct. | |||
By participating in this project you agree to abide by its terms. | |||
See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/CONTRIBUTING.md#contributor-code-of-conduct) | |||
for more information. | |||
[cloud-datastore]: https://cloud.google.com/datastore/ | |||
[cloud-datastore-ref]: https://godoc.org/cloud.google.com/go/datastore | |||
[cloud-datastore-docs]: https://cloud.google.com/datastore/docs | |||
[cloud-datastore-activation]: https://cloud.google.com/datastore/docs/activate | |||
[cloud-firestore]: https://cloud.google.com/firestore/ | |||
[cloud-firestore-ref]: https://godoc.org/cloud.google.com/go/firestore | |||
[cloud-firestore-docs]: https://cloud.google.com/firestore/docs | |||
[cloud-firestore-activation]: https://cloud.google.com/firestore/docs/activate | |||
[cloud-pubsub]: https://cloud.google.com/pubsub/ | |||
[cloud-pubsub-ref]: https://godoc.org/cloud.google.com/go/pubsub | |||
[cloud-pubsub-docs]: https://cloud.google.com/pubsub/docs | |||
[cloud-storage]: https://cloud.google.com/storage/ | |||
[cloud-storage-ref]: https://godoc.org/cloud.google.com/go/storage | |||
[cloud-storage-docs]: https://cloud.google.com/storage/docs | |||
[cloud-storage-create-bucket]: https://cloud.google.com/storage/docs/cloud-console#_creatingbuckets | |||
[cloud-bigtable]: https://cloud.google.com/bigtable/ | |||
[cloud-bigtable-ref]: https://godoc.org/cloud.google.com/go/bigtable | |||
[cloud-bigquery]: https://cloud.google.com/bigquery/ | |||
[cloud-bigquery-docs]: https://cloud.google.com/bigquery/docs | |||
[cloud-bigquery-ref]: https://godoc.org/cloud.google.com/go/bigquery | |||
[cloud-logging]: https://cloud.google.com/logging/ | |||
[cloud-logging-docs]: https://cloud.google.com/logging/docs | |||
[cloud-logging-ref]: https://godoc.org/cloud.google.com/go/logging | |||
[cloud-monitoring]: https://cloud.google.com/monitoring/ | |||
[cloud-monitoring-ref]: https://godoc.org/cloud.google.com/go/monitoring/apiv3 | |||
[cloud-vision]: https://cloud.google.com/vision | |||
[cloud-vision-ref]: https://godoc.org/cloud.google.com/go/vision/apiv1 | |||
[cloud-language]: https://cloud.google.com/natural-language | |||
[cloud-language-ref]: https://godoc.org/cloud.google.com/go/language/apiv1 | |||
[cloud-oslogin]: https://cloud.google.com/compute/docs/oslogin/rest | |||
[cloud-oslogin-ref]: https://cloud.google.com/compute/docs/oslogin/rest | |||
[cloud-speech]: https://cloud.google.com/speech | |||
[cloud-speech-ref]: https://godoc.org/cloud.google.com/go/speech/apiv1 | |||
[cloud-spanner]: https://cloud.google.com/spanner/ | |||
[cloud-spanner-ref]: https://godoc.org/cloud.google.com/go/spanner | |||
[cloud-spanner-docs]: https://cloud.google.com/spanner/docs | |||
[cloud-translation]: https://cloud.google.com/translation | |||
[cloud-translation-ref]: https://godoc.org/cloud.google.com/go/translation | |||
[cloud-video]: https://cloud.google.com/video-intelligence/ | |||
[cloud-video-ref]: https://godoc.org/cloud.google.com/go/videointelligence/apiv1beta1 | |||
[cloud-errors]: https://cloud.google.com/error-reporting/ | |||
[cloud-errors-ref]: https://godoc.org/cloud.google.com/go/errorreporting | |||
[cloud-container]: https://cloud.google.com/containers/ | |||
[cloud-container-ref]: https://godoc.org/cloud.google.com/go/container/apiv1 | |||
[cloud-debugger]: https://cloud.google.com/debugger/ | |||
[cloud-debugger-ref]: https://godoc.org/cloud.google.com/go/debugger/apiv2 | |||
[cloud-dlp]: https://cloud.google.com/dlp/ | |||
[cloud-dlp-ref]: https://godoc.org/cloud.google.com/go/dlp/apiv2beta1 | |||
[default-creds]: https://developers.google.com/identity/protocols/application-default-credentials | |||
[cloud-dataproc]: https://cloud.google.com/dataproc/ | |||
[cloud-dataproc-docs]: https://cloud.google.com/dataproc/docs | |||
[cloud-dataproc-ref]: https://godoc.org/cloud.google.com/go/dataproc/apiv1 | |||
[cloud-iam]: https://cloud.google.com/iam/ | |||
[cloud-iam-docs]: https://cloud.google.com/iam/docs | |||
[cloud-iam-ref]: https://godoc.org/cloud.google.com/go/iam | |||
[cloud-kms]: https://cloud.google.com/kms/ | |||
[cloud-kms-docs]: https://cloud.google.com/kms/docs | |||
[cloud-kms-ref]: https://godoc.org/cloud.google.com/go/kms/apiv1 | |||
[cloud-natural-language]: https://cloud.google.com/natural-language/ | |||
[cloud-natural-language-docs]: https://cloud.google.com/natural-language/docs | |||
[cloud-natural-language-ref]: https://godoc.org/cloud.google.com/go/language/apiv1 | |||
[cloud-memorystore]: https://cloud.google.com/memorystore/ | |||
[cloud-memorystore-docs]: https://cloud.google.com/memorystore/docs | |||
[cloud-memorystore-ref]: https://godoc.org/cloud.google.com/go/redis/apiv1beta1 | |||
[cloud-texttospeech]: https://cloud.google.com/texttospeech/ | |||
[cloud-texttospeech-docs]: https://cloud.google.com/texttospeech/docs | |||
[cloud-texttospeech-ref]: https://godoc.org/cloud.google.com/go/texttospeech/apiv1 | |||
[cloud-trace]: https://cloud.google.com/trace/ | |||
[cloud-trace-docs]: https://cloud.google.com/trace/docs | |||
[cloud-trace-ref]: https://godoc.org/cloud.google.com/go/trace/apiv2 | |||
[cloud-dialogflow]: https://cloud.google.com/dialogflow-enterprise/ | |||
[cloud-dialogflow-docs]: https://cloud.google.com/dialogflow-enterprise/docs/ | |||
[cloud-dialogflow-ref]: https://godoc.org/cloud.google.com/go/dialogflow/apiv2 | |||
[cloud-containeranalysis]: https://cloud.google.com/container-registry/docs/container-analysis | |||
[cloud-containeranalysis-docs]: https://cloud.google.com/container-analysis/api/reference/rest/ | |||
[cloud-containeranalysis-ref]: https://godoc.org/cloud.google.com/go/devtools/containeranalysis/apiv1beta1 | |||
[cloud-asset]: https://cloud.google.com/security-command-center/docs/how-to-asset-inventory | |||
[cloud-asset-docs]: https://cloud.google.com/security-command-center/docs/how-to-asset-inventory | |||
[cloud-asset-ref]: https://godoc.org/cloud.google.com/go/asset/apiv1 | |||
[cloud-tasks]: https://cloud.google.com/tasks/ | |||
[cloud-tasks-ref]: https://godoc.org/cloud.google.com/go/cloudtasks/apiv2beta3 |
@@ -1,47 +0,0 @@ | |||
# How to Create a New Release | |||
## Prerequisites | |||
Install [releasetool](https://github.com/googleapis/releasetool). | |||
## Create a release | |||
1. `cd` into the root directory, e.g., `~/go/src/cloud.google.com/go` | |||
1. Checkout the master branch and ensure a clean and up-to-date state. | |||
``` | |||
git checkout master | |||
git pull --tags origin master | |||
``` | |||
1. Run releasetool to generate a changelog from the last version. Note, | |||
releasetool will prompt if the new version is a major, minor, or patch | |||
version. | |||
``` | |||
releasetool start --language go | |||
``` | |||
1. Format the output to match CHANGES.md. | |||
1. Submit a CL with the changes in CHANGES.md. The commit message should look | |||
like this (where `v0.31.0` is instead the correct version number): | |||
``` | |||
all: Release v0.31.0 | |||
``` | |||
1. Wait for approval from all reviewers and then submit the CL. | |||
1. Return to the master branch and pull the release commit. | |||
``` | |||
git checkout master | |||
git pull origin master | |||
``` | |||
1. Tag the current commit with the new version (e.g., `v0.31.0`) | |||
``` | |||
releasetool tag --language go | |||
``` | |||
1. Publish the tag to GoogleSource (i.e., origin): | |||
``` | |||
git push origin $NEW_VERSION | |||
``` | |||
1. Visit the [releases page][releases] on GitHub and click the "Draft a new | |||
release" button. For tag version, enter the tag published in the previous | |||
step. For the release title, use the version (e.g., `v0.31.0`). For the | |||
description, copy the changes added to CHANGES.md. | |||
[releases]: https://github.com/googleapis/google-cloud-go/releases |
@@ -1,249 +0,0 @@ | |||
// Copyright 2019 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// https://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
// Code generated by gapic-generator. DO NOT EDIT. | |||
package asset | |||
import ( | |||
"context" | |||
"time" | |||
"cloud.google.com/go/longrunning" | |||
lroauto "cloud.google.com/go/longrunning/autogen" | |||
gax "github.com/googleapis/gax-go/v2" | |||
"google.golang.org/api/option" | |||
"google.golang.org/api/transport" | |||
assetpb "google.golang.org/genproto/googleapis/cloud/asset/v1beta1" | |||
longrunningpb "google.golang.org/genproto/googleapis/longrunning" | |||
"google.golang.org/grpc" | |||
"google.golang.org/grpc/codes" | |||
"google.golang.org/grpc/metadata" | |||
) | |||
// CallOptions contains the retry settings for each method of Client. | |||
type CallOptions struct { | |||
ExportAssets []gax.CallOption | |||
BatchGetAssetsHistory []gax.CallOption | |||
} | |||
func defaultClientOptions() []option.ClientOption { | |||
return []option.ClientOption{ | |||
option.WithEndpoint("cloudasset.googleapis.com:443"), | |||
option.WithScopes(DefaultAuthScopes()...), | |||
} | |||
} | |||
func defaultCallOptions() *CallOptions { | |||
retry := map[[2]string][]gax.CallOption{ | |||
{"default", "idempotent"}: { | |||
gax.WithRetry(func() gax.Retryer { | |||
return gax.OnCodes([]codes.Code{ | |||
codes.DeadlineExceeded, | |||
codes.Unavailable, | |||
}, gax.Backoff{ | |||
Initial: 100 * time.Millisecond, | |||
Max: 60000 * time.Millisecond, | |||
Multiplier: 1.3, | |||
}) | |||
}), | |||
}, | |||
} | |||
return &CallOptions{ | |||
ExportAssets: retry[[2]string{"default", "non_idempotent"}], | |||
BatchGetAssetsHistory: retry[[2]string{"default", "idempotent"}], | |||
} | |||
} | |||
// Client is a client for interacting with Cloud Asset API. | |||
// | |||
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. | |||
type Client struct { | |||
// The connection to the service. | |||
conn *grpc.ClientConn | |||
// The gRPC API client. | |||
client assetpb.AssetServiceClient | |||
// LROClient is used internally to handle longrunning operations. | |||
// It is exposed so that its CallOptions can be modified if required. | |||
// Users should not Close this client. | |||
LROClient *lroauto.OperationsClient | |||
// The call options for this service. | |||
CallOptions *CallOptions | |||
// The x-goog-* metadata to be sent with each request. | |||
xGoogMetadata metadata.MD | |||
} | |||
// NewClient creates a new asset service client. | |||
// | |||
// Asset service definition. | |||
func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { | |||
conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...) | |||
if err != nil { | |||
return nil, err | |||
} | |||
c := &Client{ | |||
conn: conn, | |||
CallOptions: defaultCallOptions(), | |||
client: assetpb.NewAssetServiceClient(conn), | |||
} | |||
c.setGoogleClientInfo() | |||
c.LROClient, err = lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn)) | |||
if err != nil { | |||
// This error "should not happen", since we are just reusing old connection | |||
// and never actually need to dial. | |||
// If this does happen, we could leak conn. However, we cannot close conn: | |||
// If the user invoked the function with option.WithGRPCConn, | |||
// we would close a connection that's still in use. | |||
// TODO(pongad): investigate error conditions. | |||
return nil, err | |||
} | |||
return c, nil | |||
} | |||
// Connection returns the client's connection to the API service. | |||
func (c *Client) Connection() *grpc.ClientConn { | |||
return c.conn | |||
} | |||
// Close closes the connection to the API service. The user should invoke this when | |||
// the client is no longer required. | |||
func (c *Client) Close() error { | |||
return c.conn.Close() | |||
} | |||
// setGoogleClientInfo sets the name and version of the application in | |||
// the `x-goog-api-client` header passed on each request. Intended for | |||
// use by Google-written clients. | |||
func (c *Client) setGoogleClientInfo(keyval ...string) { | |||
kv := append([]string{"gl-go", versionGo()}, keyval...) | |||
kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version) | |||
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) | |||
} | |||
// ExportAssets exports assets with time and resource types to a given Cloud Storage | |||
// location. The output format is newline-delimited JSON. | |||
// This API implements the | |||
// [google.longrunning.Operation][google.longrunning.Operation] API allowing | |||
// you to keep track of the export. | |||
func (c *Client) ExportAssets(ctx context.Context, req *assetpb.ExportAssetsRequest, opts ...gax.CallOption) (*ExportAssetsOperation, error) { | |||
ctx = insertMetadata(ctx, c.xGoogMetadata) | |||
opts = append(c.CallOptions.ExportAssets[0:len(c.CallOptions.ExportAssets):len(c.CallOptions.ExportAssets)], opts...) | |||
var resp *longrunningpb.Operation | |||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||
var err error | |||
resp, err = c.client.ExportAssets(ctx, req, settings.GRPC...) | |||
return err | |||
}, opts...) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return &ExportAssetsOperation{ | |||
lro: longrunning.InternalNewOperation(c.LROClient, resp), | |||
}, nil | |||
} | |||
// BatchGetAssetsHistory batch gets the update history of assets that overlap a time window. | |||
// For RESOURCE content, this API outputs history with asset in both | |||
// non-delete or deleted status. | |||
// For IAM_POLICY content, this API outputs history when the asset and its | |||
// attached IAM POLICY both exist. This can create gaps in the output history. | |||
func (c *Client) BatchGetAssetsHistory(ctx context.Context, req *assetpb.BatchGetAssetsHistoryRequest, opts ...gax.CallOption) (*assetpb.BatchGetAssetsHistoryResponse, error) { | |||
ctx = insertMetadata(ctx, c.xGoogMetadata) | |||
opts = append(c.CallOptions.BatchGetAssetsHistory[0:len(c.CallOptions.BatchGetAssetsHistory):len(c.CallOptions.BatchGetAssetsHistory)], opts...) | |||
var resp *assetpb.BatchGetAssetsHistoryResponse | |||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||
var err error | |||
resp, err = c.client.BatchGetAssetsHistory(ctx, req, settings.GRPC...) | |||
return err | |||
}, opts...) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return resp, nil | |||
} | |||
// ExportAssetsOperation manages a long-running operation from ExportAssets. | |||
type ExportAssetsOperation struct { | |||
lro *longrunning.Operation | |||
} | |||
// ExportAssetsOperation returns a new ExportAssetsOperation from a given name. | |||
// The name must be that of a previously created ExportAssetsOperation, possibly from a different process. | |||
func (c *Client) ExportAssetsOperation(name string) *ExportAssetsOperation { | |||
return &ExportAssetsOperation{ | |||
lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}), | |||
} | |||
} | |||
// Wait blocks until the long-running operation is completed, returning the response and any errors encountered. | |||
// | |||
// See documentation of Poll for error-handling information. | |||
func (op *ExportAssetsOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*assetpb.ExportAssetsResponse, error) { | |||
var resp assetpb.ExportAssetsResponse | |||
if err := op.lro.WaitWithInterval(ctx, &resp, 5000*time.Millisecond, opts...); err != nil { | |||
return nil, err | |||
} | |||
return &resp, nil | |||
} | |||
// Poll fetches the latest state of the long-running operation. | |||
// | |||
// Poll also fetches the latest metadata, which can be retrieved by Metadata. | |||
// | |||
// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and | |||
// the operation has completed with failure, the error is returned and op.Done will return true. | |||
// If Poll succeeds and the operation has completed successfully, | |||
// op.Done will return true, and the response of the operation is returned. | |||
// If Poll succeeds and the operation has not completed, the returned response and error are both nil. | |||
func (op *ExportAssetsOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*assetpb.ExportAssetsResponse, error) { | |||
var resp assetpb.ExportAssetsResponse | |||
if err := op.lro.Poll(ctx, &resp, opts...); err != nil { | |||
return nil, err | |||
} | |||
if !op.Done() { | |||
return nil, nil | |||
} | |||
return &resp, nil | |||
} | |||
// Metadata returns metadata associated with the long-running operation. | |||
// Metadata itself does not contact the server, but Poll does. | |||
// To get the latest metadata, call this method after a successful call to Poll. | |||
// If the metadata is not available, the returned metadata and error are both nil. | |||
func (op *ExportAssetsOperation) Metadata() (*assetpb.ExportAssetsRequest, error) { | |||
var meta assetpb.ExportAssetsRequest | |||
if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata { | |||
return nil, nil | |||
} else if err != nil { | |||
return nil, err | |||
} | |||
return &meta, nil | |||
} | |||
// Done reports whether the long-running operation has completed. | |||
func (op *ExportAssetsOperation) Done() bool { | |||
return op.lro.Done() | |||
} | |||
// Name returns the name of the long-running operation. | |||
// The name is assigned by the server and is unique within the service from which the operation is created. | |||
func (op *ExportAssetsOperation) Name() string { | |||
return op.lro.Name() | |||
} |
@@ -1,75 +0,0 @@ | |||
// Copyright 2019 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// https://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
// Code generated by gapic-generator. DO NOT EDIT. | |||
package asset_test | |||
import ( | |||
"context" | |||
asset "cloud.google.com/go/asset/apiv1beta1" | |||
assetpb "google.golang.org/genproto/googleapis/cloud/asset/v1beta1" | |||
) | |||
func ExampleNewClient() { | |||
ctx := context.Background() | |||
c, err := asset.NewClient(ctx) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
// TODO: Use client. | |||
_ = c | |||
} | |||
func ExampleClient_ExportAssets() { | |||
ctx := context.Background() | |||
c, err := asset.NewClient(ctx) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
req := &assetpb.ExportAssetsRequest{ | |||
// TODO: Fill request struct fields. | |||
} | |||
op, err := c.ExportAssets(ctx, req) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
resp, err := op.Wait(ctx) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
// TODO: Use resp. | |||
_ = resp | |||
} | |||
func ExampleClient_BatchGetAssetsHistory() { | |||
ctx := context.Background() | |||
c, err := asset.NewClient(ctx) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
req := &assetpb.BatchGetAssetsHistoryRequest{ | |||
// TODO: Fill request struct fields. | |||
} | |||
resp, err := c.BatchGetAssetsHistory(ctx, req) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
// TODO: Use resp. | |||
_ = resp | |||
} |
@@ -1,89 +0,0 @@ | |||
// Copyright 2019 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// https://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
// Code generated by gapic-generator. DO NOT EDIT. | |||
// Package asset is an auto-generated package for the | |||
// Cloud Asset API. | |||
// | |||
// NOTE: This package is in beta. It is not stable, and may be subject to changes. | |||
// | |||
// The cloud asset API manages the history and inventory of cloud resources. | |||
package asset // import "cloud.google.com/go/asset/apiv1beta1" | |||
import ( | |||
"context" | |||
"runtime" | |||
"strings" | |||
"unicode" | |||
"google.golang.org/grpc/metadata" | |||
) | |||
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { | |||
out, _ := metadata.FromOutgoingContext(ctx) | |||
out = out.Copy() | |||
for _, md := range mds { | |||
for k, v := range md { | |||
out[k] = append(out[k], v...) | |||
} | |||
} | |||
return metadata.NewOutgoingContext(ctx, out) | |||
} | |||
// DefaultAuthScopes reports the default set of authentication scopes to use with this package. | |||
func DefaultAuthScopes() []string { | |||
return []string{ | |||
"https://www.googleapis.com/auth/cloud-platform", | |||
} | |||
} | |||
// versionGo returns the Go runtime version. The returned string | |||
// has no whitespace, suitable for reporting in header. | |||
func versionGo() string { | |||
const develPrefix = "devel +" | |||
s := runtime.Version() | |||
if strings.HasPrefix(s, develPrefix) { | |||
s = s[len(develPrefix):] | |||
if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { | |||
s = s[:p] | |||
} | |||
return s | |||
} | |||
notSemverRune := func(r rune) bool { | |||
return strings.IndexRune("0123456789.", r) < 0 | |||
} | |||
if strings.HasPrefix(s, "go1") { | |||
s = s[2:] | |||
var prerelease string | |||
if p := strings.IndexFunc(s, notSemverRune); p >= 0 { | |||
s, prerelease = s[:p], s[p:] | |||
} | |||
if strings.HasSuffix(s, ".") { | |||
s += "0" | |||
} else if strings.Count(s, ".") < 2 { | |||
s += ".0" | |||
} | |||
if prerelease != "" { | |||
s += "-" + prerelease | |||
} | |||
return s | |||
} | |||
return "UNKNOWN" | |||
} | |||
const versionClient = "20190306" |
@@ -1,266 +0,0 @@ | |||
// Copyright 2019 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// https://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
// Code generated by gapic-generator. DO NOT EDIT. | |||
package asset | |||
import ( | |||
assetpb "google.golang.org/genproto/googleapis/cloud/asset/v1beta1" | |||
longrunningpb "google.golang.org/genproto/googleapis/longrunning" | |||
) | |||
import ( | |||
"context" | |||
"flag" | |||
"fmt" | |||
"io" | |||
"log" | |||
"net" | |||
"os" | |||
"strings" | |||
"testing" | |||
"github.com/golang/protobuf/proto" | |||
"github.com/golang/protobuf/ptypes" | |||
"google.golang.org/api/option" | |||
status "google.golang.org/genproto/googleapis/rpc/status" | |||
"google.golang.org/grpc" | |||
"google.golang.org/grpc/codes" | |||
"google.golang.org/grpc/metadata" | |||
gstatus "google.golang.org/grpc/status" | |||
) | |||
var _ = io.EOF | |||
var _ = ptypes.MarshalAny | |||
var _ status.Status | |||
type mockAssetServer struct { | |||
// Embed for forward compatibility. | |||
// Tests will keep working if more methods are added | |||
// in the future. | |||
assetpb.AssetServiceServer | |||
reqs []proto.Message | |||
// If set, all calls return this error. | |||
err error | |||
// responses to return if err == nil | |||
resps []proto.Message | |||
} | |||
func (s *mockAssetServer) ExportAssets(ctx context.Context, req *assetpb.ExportAssetsRequest) (*longrunningpb.Operation, error) { | |||
md, _ := metadata.FromIncomingContext(ctx) | |||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { | |||
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) | |||
} | |||
s.reqs = append(s.reqs, req) | |||
if s.err != nil { | |||
return nil, s.err | |||
} | |||
return s.resps[0].(*longrunningpb.Operation), nil | |||
} | |||
func (s *mockAssetServer) BatchGetAssetsHistory(ctx context.Context, req *assetpb.BatchGetAssetsHistoryRequest) (*assetpb.BatchGetAssetsHistoryResponse, error) { | |||
md, _ := metadata.FromIncomingContext(ctx) | |||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { | |||
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) | |||
} | |||
s.reqs = append(s.reqs, req) | |||
if s.err != nil { | |||
return nil, s.err | |||
} | |||
return s.resps[0].(*assetpb.BatchGetAssetsHistoryResponse), nil | |||
} | |||
// clientOpt is the option tests should use to connect to the test server. | |||
// It is initialized by TestMain. | |||
var clientOpt option.ClientOption | |||
var ( | |||
mockAsset mockAssetServer | |||
) | |||
func TestMain(m *testing.M) { | |||
flag.Parse() | |||
serv := grpc.NewServer() | |||
assetpb.RegisterAssetServiceServer(serv, &mockAsset) | |||
lis, err := net.Listen("tcp", "localhost:0") | |||
if err != nil { | |||
log.Fatal(err) | |||
} | |||
go serv.Serve(lis) | |||
conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) | |||
if err != nil { | |||
log.Fatal(err) | |||
} | |||
clientOpt = option.WithGRPCConn(conn) | |||
os.Exit(m.Run()) | |||
} | |||
func TestAssetServiceExportAssets(t *testing.T) { | |||
var expectedResponse *assetpb.ExportAssetsResponse = &assetpb.ExportAssetsResponse{} | |||
mockAsset.err = nil | |||
mockAsset.reqs = nil | |||
any, err := ptypes.MarshalAny(expectedResponse) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
mockAsset.resps = append(mockAsset.resps[:0], &longrunningpb.Operation{ | |||
Name: "longrunning-test", | |||
Done: true, | |||
Result: &longrunningpb.Operation_Response{Response: any}, | |||
}) | |||
var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") | |||
var outputConfig *assetpb.OutputConfig = &assetpb.OutputConfig{} | |||
var request = &assetpb.ExportAssetsRequest{ | |||
Parent: formattedParent, | |||
OutputConfig: outputConfig, | |||
} | |||
c, err := NewClient(context.Background(), clientOpt) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
respLRO, err := c.ExportAssets(context.Background(), request) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
resp, err := respLRO.Wait(context.Background()) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
if want, got := request, mockAsset.reqs[0]; !proto.Equal(want, got) { | |||
t.Errorf("wrong request %q, want %q", got, want) | |||
} | |||
if want, got := expectedResponse, resp; !proto.Equal(want, got) { | |||
t.Errorf("wrong response %q, want %q)", got, want) | |||
} | |||
} | |||
func TestAssetServiceExportAssetsError(t *testing.T) { | |||
errCode := codes.PermissionDenied | |||
mockAsset.err = nil | |||
mockAsset.resps = append(mockAsset.resps[:0], &longrunningpb.Operation{ | |||
Name: "longrunning-test", | |||
Done: true, | |||
Result: &longrunningpb.Operation_Error{ | |||
Error: &status.Status{ | |||
Code: int32(errCode), | |||
Message: "test error", | |||
}, | |||
}, | |||
}) | |||
var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") | |||
var outputConfig *assetpb.OutputConfig = &assetpb.OutputConfig{} | |||
var request = &assetpb.ExportAssetsRequest{ | |||
Parent: formattedParent, | |||
OutputConfig: outputConfig, | |||
} | |||
c, err := NewClient(context.Background(), clientOpt) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
respLRO, err := c.ExportAssets(context.Background(), request) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
resp, err := respLRO.Wait(context.Background()) | |||
if st, ok := gstatus.FromError(err); !ok { | |||
t.Errorf("got error %v, expected grpc error", err) | |||
} else if c := st.Code(); c != errCode { | |||
t.Errorf("got error code %q, want %q", c, errCode) | |||
} | |||
_ = resp | |||
} | |||
func TestAssetServiceBatchGetAssetsHistory(t *testing.T) { | |||
var expectedResponse *assetpb.BatchGetAssetsHistoryResponse = &assetpb.BatchGetAssetsHistoryResponse{} | |||
mockAsset.err = nil | |||
mockAsset.reqs = nil | |||
mockAsset.resps = append(mockAsset.resps[:0], expectedResponse) | |||
var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") | |||
var contentType assetpb.ContentType = assetpb.ContentType_CONTENT_TYPE_UNSPECIFIED | |||
var readTimeWindow *assetpb.TimeWindow = &assetpb.TimeWindow{} | |||
var request = &assetpb.BatchGetAssetsHistoryRequest{ | |||
Parent: formattedParent, | |||
ContentType: contentType, | |||
ReadTimeWindow: readTimeWindow, | |||
} | |||
c, err := NewClient(context.Background(), clientOpt) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
resp, err := c.BatchGetAssetsHistory(context.Background(), request) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
if want, got := request, mockAsset.reqs[0]; !proto.Equal(want, got) { | |||
t.Errorf("wrong request %q, want %q", got, want) | |||
} | |||
if want, got := expectedResponse, resp; !proto.Equal(want, got) { | |||
t.Errorf("wrong response %q, want %q)", got, want) | |||
} | |||
} | |||
func TestAssetServiceBatchGetAssetsHistoryError(t *testing.T) { | |||
errCode := codes.PermissionDenied | |||
mockAsset.err = gstatus.Error(errCode, "test error") | |||
var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") | |||
var contentType assetpb.ContentType = assetpb.ContentType_CONTENT_TYPE_UNSPECIFIED | |||
var readTimeWindow *assetpb.TimeWindow = &assetpb.TimeWindow{} | |||
var request = &assetpb.BatchGetAssetsHistoryRequest{ | |||
Parent: formattedParent, | |||
ContentType: contentType, | |||
ReadTimeWindow: readTimeWindow, | |||
} | |||
c, err := NewClient(context.Background(), clientOpt) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
resp, err := c.BatchGetAssetsHistory(context.Background(), request) | |||
if st, ok := gstatus.FromError(err); !ok { | |||
t.Errorf("got error %v, expected grpc error", err) | |||
} else if c := st.Code(); c != errCode { | |||
t.Errorf("got error code %q, want %q", c, errCode) | |||
} | |||
_ = resp | |||
} |
@@ -1,248 +0,0 @@ | |||
// Copyright 2018 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// https://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
// AUTO-GENERATED CODE. DO NOT EDIT. | |||
package asset | |||
import ( | |||
"context" | |||
"time" | |||
"cloud.google.com/go/longrunning" | |||
lroauto "cloud.google.com/go/longrunning/autogen" | |||
gax "github.com/googleapis/gax-go/v2" | |||
"google.golang.org/api/option" | |||
"google.golang.org/api/transport" | |||
assetpb "google.golang.org/genproto/googleapis/cloud/asset/v1beta1" | |||
longrunningpb "google.golang.org/genproto/googleapis/longrunning" | |||
"google.golang.org/grpc" | |||
"google.golang.org/grpc/codes" | |||
"google.golang.org/grpc/metadata" | |||
) | |||
// CallOptions contains the retry settings for each method of Client. | |||
type CallOptions struct { | |||
ExportAssets []gax.CallOption | |||
BatchGetAssetsHistory []gax.CallOption | |||
} | |||
func defaultClientOptions() []option.ClientOption { | |||
return []option.ClientOption{ | |||
option.WithEndpoint("cloudasset.googleapis.com:443"), | |||
option.WithScopes(DefaultAuthScopes()...), | |||
} | |||
} | |||
func defaultCallOptions() *CallOptions { | |||
retry := map[[2]string][]gax.CallOption{ | |||
{"default", "idempotent"}: { | |||
gax.WithRetry(func() gax.Retryer { | |||
return gax.OnCodes([]codes.Code{ | |||
codes.DeadlineExceeded, | |||
codes.Unavailable, | |||
}, gax.Backoff{ | |||
Initial: 100 * time.Millisecond, | |||
Max: 60000 * time.Millisecond, | |||
Multiplier: 1.3, | |||
}) | |||
}), | |||
}, | |||
} | |||
return &CallOptions{ | |||
ExportAssets: retry[[2]string{"default", "non_idempotent"}], | |||
BatchGetAssetsHistory: retry[[2]string{"default", "idempotent"}], | |||
} | |||
} | |||
// Client is a client for interacting with Cloud Asset API. | |||
// | |||
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. | |||
type Client struct { | |||
// The connection to the service. | |||
conn *grpc.ClientConn | |||
// The gRPC API client. | |||
client assetpb.AssetServiceClient | |||
// LROClient is used internally to handle longrunning operations. | |||
// It is exposed so that its CallOptions can be modified if required. | |||
// Users should not Close this client. | |||
LROClient *lroauto.OperationsClient | |||
// The call options for this service. | |||
CallOptions *CallOptions | |||
// The x-goog-* metadata to be sent with each request. | |||
xGoogMetadata metadata.MD | |||
} | |||
// NewClient creates a new asset service client. | |||
// | |||
// Asset service definition. | |||
func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { | |||
conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...) | |||
if err != nil { | |||
return nil, err | |||
} | |||
c := &Client{ | |||
conn: conn, | |||
CallOptions: defaultCallOptions(), | |||
client: assetpb.NewAssetServiceClient(conn), | |||
} | |||
c.setGoogleClientInfo() | |||
c.LROClient, err = lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn)) | |||
if err != nil { | |||
// This error "should not happen", since we are just reusing old connection | |||
// and never actually need to dial. | |||
// If this does happen, we could leak conn. However, we cannot close conn: | |||
// If the user invoked the function with option.WithGRPCConn, | |||
// we would close a connection that's still in use. | |||
// TODO(pongad): investigate error conditions. | |||
return nil, err | |||
} | |||
return c, nil | |||
} | |||
// Connection returns the client's connection to the API service. | |||
func (c *Client) Connection() *grpc.ClientConn { | |||
return c.conn | |||
} | |||
// Close closes the connection to the API service. The user should invoke this when | |||
// the client is no longer required. | |||
func (c *Client) Close() error { | |||
return c.conn.Close() | |||
} | |||
// setGoogleClientInfo sets the name and version of the application in | |||
// the `x-goog-api-client` header passed on each request. Intended for | |||
// use by Google-written clients. | |||
func (c *Client) setGoogleClientInfo(keyval ...string) { | |||
kv := append([]string{"gl-go", versionGo()}, keyval...) | |||
kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version) | |||
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) | |||
} | |||
// ExportAssets exports assets with time and resource types to a given Cloud Storage | |||
// location. The output format is newline-delimited JSON. | |||
// This API implements the [google.longrunning.Operation][google.longrunning.Operation] API allowing you | |||
// to keep track of the export. | |||
func (c *Client) ExportAssets(ctx context.Context, req *assetpb.ExportAssetsRequest, opts ...gax.CallOption) (*ExportAssetsOperation, error) { | |||
ctx = insertMetadata(ctx, c.xGoogMetadata) | |||
opts = append(c.CallOptions.ExportAssets[0:len(c.CallOptions.ExportAssets):len(c.CallOptions.ExportAssets)], opts...) | |||
var resp *longrunningpb.Operation | |||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||
var err error | |||
resp, err = c.client.ExportAssets(ctx, req, settings.GRPC...) | |||
return err | |||
}, opts...) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return &ExportAssetsOperation{ | |||
lro: longrunning.InternalNewOperation(c.LROClient, resp), | |||
}, nil | |||
} | |||
// BatchGetAssetsHistory batch gets the update history of assets that overlap a time window. | |||
// For RESOURCE content, this API outputs history with asset in both | |||
// non-delete or deleted status. | |||
// For IAM_POLICY content, this API outputs history when the asset and its | |||
// attached IAM POLICY both exist. This can create gaps in the output history. | |||
func (c *Client) BatchGetAssetsHistory(ctx context.Context, req *assetpb.BatchGetAssetsHistoryRequest, opts ...gax.CallOption) (*assetpb.BatchGetAssetsHistoryResponse, error) { | |||
ctx = insertMetadata(ctx, c.xGoogMetadata) | |||
opts = append(c.CallOptions.BatchGetAssetsHistory[0:len(c.CallOptions.BatchGetAssetsHistory):len(c.CallOptions.BatchGetAssetsHistory)], opts...) | |||
var resp *assetpb.BatchGetAssetsHistoryResponse | |||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||
var err error | |||
resp, err = c.client.BatchGetAssetsHistory(ctx, req, settings.GRPC...) | |||
return err | |||
}, opts...) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return resp, nil | |||
} | |||
// ExportAssetsOperation manages a long-running operation from ExportAssets. | |||
type ExportAssetsOperation struct { | |||
lro *longrunning.Operation | |||
} | |||
// ExportAssetsOperation returns a new ExportAssetsOperation from a given name. | |||
// The name must be that of a previously created ExportAssetsOperation, possibly from a different process. | |||
func (c *Client) ExportAssetsOperation(name string) *ExportAssetsOperation { | |||
return &ExportAssetsOperation{ | |||
lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}), | |||
} | |||
} | |||
// Wait blocks until the long-running operation is completed, returning the response and any errors encountered. | |||
// | |||
// See documentation of Poll for error-handling information. | |||
func (op *ExportAssetsOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*assetpb.ExportAssetsResponse, error) { | |||
var resp assetpb.ExportAssetsResponse | |||
if err := op.lro.WaitWithInterval(ctx, &resp, 5000*time.Millisecond, opts...); err != nil { | |||
return nil, err | |||
} | |||
return &resp, nil | |||
} | |||
// Poll fetches the latest state of the long-running operation. | |||
// | |||
// Poll also fetches the latest metadata, which can be retrieved by Metadata. | |||
// | |||
// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and | |||
// the operation has completed with failure, the error is returned and op.Done will return true. | |||
// If Poll succeeds and the operation has completed successfully, | |||
// op.Done will return true, and the response of the operation is returned. | |||
// If Poll succeeds and the operation has not completed, the returned response and error are both nil. | |||
func (op *ExportAssetsOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*assetpb.ExportAssetsResponse, error) { | |||
var resp assetpb.ExportAssetsResponse | |||
if err := op.lro.Poll(ctx, &resp, opts...); err != nil { | |||
return nil, err | |||
} | |||
if !op.Done() { | |||
return nil, nil | |||
} | |||
return &resp, nil | |||
} | |||
// Metadata returns metadata associated with the long-running operation. | |||
// Metadata itself does not contact the server, but Poll does. | |||
// To get the latest metadata, call this method after a successful call to Poll. | |||
// If the metadata is not available, the returned metadata and error are both nil. | |||
func (op *ExportAssetsOperation) Metadata() (*assetpb.ExportAssetsRequest, error) { | |||
var meta assetpb.ExportAssetsRequest | |||
if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata { | |||
return nil, nil | |||
} else if err != nil { | |||
return nil, err | |||
} | |||
return &meta, nil | |||
} | |||
// Done reports whether the long-running operation has completed. | |||
func (op *ExportAssetsOperation) Done() bool { | |||
return op.lro.Done() | |||
} | |||
// Name returns the name of the long-running operation. | |||
// The name is assigned by the server and is unique within the service from which the operation is created. | |||
func (op *ExportAssetsOperation) Name() string { | |||
return op.lro.Name() | |||
} |
@@ -1,75 +0,0 @@ | |||
// Copyright 2018 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// https://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
// AUTO-GENERATED CODE. DO NOT EDIT. | |||
package asset_test | |||
import ( | |||
"context" | |||
asset "cloud.google.com/go/asset/v1beta1" | |||
assetpb "google.golang.org/genproto/googleapis/cloud/asset/v1beta1" | |||
) | |||
func ExampleNewClient() { | |||
ctx := context.Background() | |||
c, err := asset.NewClient(ctx) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
// TODO: Use client. | |||
_ = c | |||
} | |||
func ExampleClient_ExportAssets() { | |||
ctx := context.Background() | |||
c, err := asset.NewClient(ctx) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
req := &assetpb.ExportAssetsRequest{ | |||
// TODO: Fill request struct fields. | |||
} | |||
op, err := c.ExportAssets(ctx, req) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
resp, err := op.Wait(ctx) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
// TODO: Use resp. | |||
_ = resp | |||
} | |||
func ExampleClient_BatchGetAssetsHistory() { | |||
ctx := context.Background() | |||
c, err := asset.NewClient(ctx) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
req := &assetpb.BatchGetAssetsHistoryRequest{ | |||
// TODO: Fill request struct fields. | |||
} | |||
resp, err := c.BatchGetAssetsHistory(ctx, req) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
// TODO: Use resp. | |||
_ = resp | |||
} |
@@ -1,89 +0,0 @@ | |||
// Copyright 2018 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// https://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
// AUTO-GENERATED CODE. DO NOT EDIT. | |||
// Package asset is an auto-generated package for the | |||
// Cloud Asset API. | |||
// | |||
// NOTE: This package is in alpha. It is not stable, and is likely to change. | |||
// | |||
// The cloud asset API manages the history and inventory of cloud resources. | |||
package asset // import "cloud.google.com/go/asset/v1beta1" | |||
import ( | |||
"context" | |||
"runtime" | |||
"strings" | |||
"unicode" | |||
"google.golang.org/grpc/metadata" | |||
) | |||
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { | |||
out, _ := metadata.FromOutgoingContext(ctx) | |||
out = out.Copy() | |||
for _, md := range mds { | |||
for k, v := range md { | |||
out[k] = append(out[k], v...) | |||
} | |||
} | |||
return metadata.NewOutgoingContext(ctx, out) | |||
} | |||
// DefaultAuthScopes reports the default set of authentication scopes to use with this package. | |||
func DefaultAuthScopes() []string { | |||
return []string{ | |||
"https://www.googleapis.com/auth/cloud-platform", | |||
} | |||
} | |||
// versionGo returns the Go runtime version. The returned string | |||
// has no whitespace, suitable for reporting in header. | |||
func versionGo() string { | |||
const develPrefix = "devel +" | |||
s := runtime.Version() | |||
if strings.HasPrefix(s, develPrefix) { | |||
s = s[len(develPrefix):] | |||
if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { | |||
s = s[:p] | |||
} | |||
return s | |||
} | |||
notSemverRune := func(r rune) bool { | |||
return strings.IndexRune("0123456789.", r) < 0 | |||
} | |||
if strings.HasPrefix(s, "go1") { | |||
s = s[2:] | |||
var prerelease string | |||
if p := strings.IndexFunc(s, notSemverRune); p >= 0 { | |||
s, prerelease = s[:p], s[p:] | |||
} | |||
if strings.HasSuffix(s, ".") { | |||
s += "0" | |||
} else if strings.Count(s, ".") < 2 { | |||
s += ".0" | |||
} | |||
if prerelease != "" { | |||
s += "-" + prerelease | |||
} | |||
return s | |||
} | |||
return "UNKNOWN" | |||
} | |||
const versionClient = "20181219" |
@@ -1,266 +0,0 @@ | |||
// Copyright 2018 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// https://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
// AUTO-GENERATED CODE. DO NOT EDIT. | |||
package asset | |||
import ( | |||
assetpb "google.golang.org/genproto/googleapis/cloud/asset/v1beta1" | |||
longrunningpb "google.golang.org/genproto/googleapis/longrunning" | |||
) | |||
import ( | |||
"context" | |||
"flag" | |||
"fmt" | |||
"io" | |||
"log" | |||
"net" | |||
"os" | |||
"strings" | |||
"testing" | |||
"github.com/golang/protobuf/proto" | |||
"github.com/golang/protobuf/ptypes" | |||
"google.golang.org/api/option" | |||
status "google.golang.org/genproto/googleapis/rpc/status" | |||
"google.golang.org/grpc" | |||
"google.golang.org/grpc/codes" | |||
"google.golang.org/grpc/metadata" | |||
gstatus "google.golang.org/grpc/status" | |||
) | |||
var _ = io.EOF | |||
var _ = ptypes.MarshalAny | |||
var _ status.Status | |||
type mockAssetServer struct { | |||
// Embed for forward compatibility. | |||
// Tests will keep working if more methods are added | |||
// in the future. | |||
assetpb.AssetServiceServer | |||
reqs []proto.Message | |||
// If set, all calls return this error. | |||
err error | |||
// responses to return if err == nil | |||
resps []proto.Message | |||
} | |||
func (s *mockAssetServer) ExportAssets(ctx context.Context, req *assetpb.ExportAssetsRequest) (*longrunningpb.Operation, error) { | |||
md, _ := metadata.FromIncomingContext(ctx) | |||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { | |||
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) | |||
} | |||
s.reqs = append(s.reqs, req) | |||
if s.err != nil { | |||
return nil, s.err | |||
} | |||
return s.resps[0].(*longrunningpb.Operation), nil | |||
} | |||
func (s *mockAssetServer) BatchGetAssetsHistory(ctx context.Context, req *assetpb.BatchGetAssetsHistoryRequest) (*assetpb.BatchGetAssetsHistoryResponse, error) { | |||
md, _ := metadata.FromIncomingContext(ctx) | |||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { | |||
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) | |||
} | |||
s.reqs = append(s.reqs, req) | |||
if s.err != nil { | |||
return nil, s.err | |||
} | |||
return s.resps[0].(*assetpb.BatchGetAssetsHistoryResponse), nil | |||
} | |||
// clientOpt is the option tests should use to connect to the test server. | |||
// It is initialized by TestMain. | |||
var clientOpt option.ClientOption | |||
var ( | |||
mockAsset mockAssetServer | |||
) | |||
func TestMain(m *testing.M) { | |||
flag.Parse() | |||
serv := grpc.NewServer() | |||
assetpb.RegisterAssetServiceServer(serv, &mockAsset) | |||
lis, err := net.Listen("tcp", "localhost:0") | |||
if err != nil { | |||
log.Fatal(err) | |||
} | |||
go serv.Serve(lis) | |||
conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) | |||
if err != nil { | |||
log.Fatal(err) | |||
} | |||
clientOpt = option.WithGRPCConn(conn) | |||
os.Exit(m.Run()) | |||
} | |||
func TestAssetServiceExportAssets(t *testing.T) { | |||
var expectedResponse *assetpb.ExportAssetsResponse = &assetpb.ExportAssetsResponse{} | |||
mockAsset.err = nil | |||
mockAsset.reqs = nil | |||
any, err := ptypes.MarshalAny(expectedResponse) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
mockAsset.resps = append(mockAsset.resps[:0], &longrunningpb.Operation{ | |||
Name: "longrunning-test", | |||
Done: true, | |||
Result: &longrunningpb.Operation_Response{Response: any}, | |||
}) | |||
var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") | |||
var outputConfig *assetpb.OutputConfig = &assetpb.OutputConfig{} | |||
var request = &assetpb.ExportAssetsRequest{ | |||
Parent: formattedParent, | |||
OutputConfig: outputConfig, | |||
} | |||
c, err := NewClient(context.Background(), clientOpt) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
respLRO, err := c.ExportAssets(context.Background(), request) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
resp, err := respLRO.Wait(context.Background()) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
if want, got := request, mockAsset.reqs[0]; !proto.Equal(want, got) { | |||
t.Errorf("wrong request %q, want %q", got, want) | |||
} | |||
if want, got := expectedResponse, resp; !proto.Equal(want, got) { | |||
t.Errorf("wrong response %q, want %q)", got, want) | |||
} | |||
} | |||
func TestAssetServiceExportAssetsError(t *testing.T) { | |||
errCode := codes.PermissionDenied | |||
mockAsset.err = nil | |||
mockAsset.resps = append(mockAsset.resps[:0], &longrunningpb.Operation{ | |||
Name: "longrunning-test", | |||
Done: true, | |||
Result: &longrunningpb.Operation_Error{ | |||
Error: &status.Status{ | |||
Code: int32(errCode), | |||
Message: "test error", | |||
}, | |||
}, | |||
}) | |||
var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") | |||
var outputConfig *assetpb.OutputConfig = &assetpb.OutputConfig{} | |||
var request = &assetpb.ExportAssetsRequest{ | |||
Parent: formattedParent, | |||
OutputConfig: outputConfig, | |||
} | |||
c, err := NewClient(context.Background(), clientOpt) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
respLRO, err := c.ExportAssets(context.Background(), request) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
resp, err := respLRO.Wait(context.Background()) | |||
if st, ok := gstatus.FromError(err); !ok { | |||
t.Errorf("got error %v, expected grpc error", err) | |||
} else if c := st.Code(); c != errCode { | |||
t.Errorf("got error code %q, want %q", c, errCode) | |||
} | |||
_ = resp | |||
} | |||
func TestAssetServiceBatchGetAssetsHistory(t *testing.T) { | |||
var expectedResponse *assetpb.BatchGetAssetsHistoryResponse = &assetpb.BatchGetAssetsHistoryResponse{} | |||
mockAsset.err = nil | |||
mockAsset.reqs = nil | |||
mockAsset.resps = append(mockAsset.resps[:0], expectedResponse) | |||
var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") | |||
var contentType assetpb.ContentType = assetpb.ContentType_CONTENT_TYPE_UNSPECIFIED | |||
var readTimeWindow *assetpb.TimeWindow = &assetpb.TimeWindow{} | |||
var request = &assetpb.BatchGetAssetsHistoryRequest{ | |||
Parent: formattedParent, | |||
ContentType: contentType, | |||
ReadTimeWindow: readTimeWindow, | |||
} | |||
c, err := NewClient(context.Background(), clientOpt) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
resp, err := c.BatchGetAssetsHistory(context.Background(), request) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
if want, got := request, mockAsset.reqs[0]; !proto.Equal(want, got) { | |||
t.Errorf("wrong request %q, want %q", got, want) | |||
} | |||
if want, got := expectedResponse, resp; !proto.Equal(want, got) { | |||
t.Errorf("wrong response %q, want %q)", got, want) | |||
} | |||
} | |||
func TestAssetServiceBatchGetAssetsHistoryError(t *testing.T) { | |||
errCode := codes.PermissionDenied | |||
mockAsset.err = gstatus.Error(errCode, "test error") | |||
var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") | |||
var contentType assetpb.ContentType = assetpb.ContentType_CONTENT_TYPE_UNSPECIFIED | |||
var readTimeWindow *assetpb.TimeWindow = &assetpb.TimeWindow{} | |||
var request = &assetpb.BatchGetAssetsHistoryRequest{ | |||
Parent: formattedParent, | |||
ContentType: contentType, | |||
ReadTimeWindow: readTimeWindow, | |||
} | |||
c, err := NewClient(context.Background(), clientOpt) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
resp, err := c.BatchGetAssetsHistory(context.Background(), request) | |||
if st, ok := gstatus.FromError(err); !ok { | |||
t.Errorf("got error %v, expected grpc error", err) | |||
} else if c := st.Code(); c != errCode { | |||
t.Errorf("got error code %q, want %q", c, errCode) | |||
} | |||
_ = resp | |||
} |
@@ -1,73 +0,0 @@ | |||
// Copyright 2016 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package cloud_test | |||
import ( | |||
"context" | |||
"cloud.google.com/go/datastore" | |||
"cloud.google.com/go/pubsub" | |||
"golang.org/x/oauth2/google" | |||
"google.golang.org/api/option" | |||
) | |||
// Google Application Default Credentials is the recommended way to authorize | |||
// and authenticate clients. | |||
// | |||
// For information on how to create and obtain Application Default Credentials, see | |||
// https://developers.google.com/identity/protocols/application-default-credentials. | |||
func Example_applicationDefaultCredentials() { | |||
client, err := datastore.NewClient(context.Background(), "project-id") | |||
if err != nil { | |||
// TODO: handle error. | |||
} | |||
_ = client // Use the client. | |||
} | |||
// You can use a file with credentials to authenticate and authorize, such as a JSON | |||
// key file associated with a Google service account. Service Account keys can be | |||
// created and downloaded from | |||
// https://console.developers.google.com/permissions/serviceaccounts. | |||
// | |||
// This example uses the Datastore client, but the same steps apply to | |||
// the other client libraries underneath this package. | |||
func Example_credentialsFile() { | |||
client, err := datastore.NewClient(context.Background(), | |||
"project-id", option.WithCredentialsFile("/path/to/service-account-key.json")) | |||
if err != nil { | |||
// TODO: handle error. | |||
} | |||
_ = client // Use the client. | |||
} | |||
// In some cases (for instance, you don't want to store secrets on disk), you can | |||
// create credentials from in-memory JSON and use the WithCredentials option. | |||
// | |||
// The google package in this example is at golang.org/x/oauth2/google. | |||
// | |||
// This example uses the PubSub client, but the same steps apply to | |||
// the other client libraries underneath this package. | |||
func Example_credentialsFromJSON() { | |||
ctx := context.Background() | |||
creds, err := google.CredentialsFromJSON(ctx, []byte("JSON creds"), pubsub.ScopePubSub) | |||
if err != nil { | |||
// TODO: handle error. | |||
} | |||
client, err := pubsub.NewClient(ctx, "project-id", option.WithCredentials(creds)) | |||
if err != nil { | |||
// TODO: handle error. | |||
} | |||
_ = client // Use the client. | |||
} |
@@ -1,8 +0,0 @@ | |||
# BigQuery Benchmark | |||
This directory contains benchmarks for BigQuery client. | |||
## Usage | |||
`go run bench.go -- <your project id> queries.json` | |||
BigQuery service caches requests so the benchmark should be run | |||
at least twice, disregarding the first result. |
@@ -1,85 +0,0 @@ | |||
// Copyright 2017 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
//+build ignore | |||
package main | |||
import ( | |||
"context" | |||
"encoding/json" | |||
"flag" | |||
"io/ioutil" | |||
"log" | |||
"time" | |||
"cloud.google.com/go/bigquery" | |||
"google.golang.org/api/iterator" | |||
) | |||
func main() { | |||
flag.Parse() | |||
ctx := context.Background() | |||
c, err := bigquery.NewClient(ctx, flag.Arg(0)) | |||
if err != nil { | |||
log.Fatal(err) | |||
} | |||
queriesJSON, err := ioutil.ReadFile(flag.Arg(1)) | |||
if err != nil { | |||
log.Fatal(err) | |||
} | |||
var queries []string | |||
if err := json.Unmarshal(queriesJSON, &queries); err != nil { | |||
log.Fatal(err) | |||
} | |||
for _, q := range queries { | |||
doQuery(ctx, c, q) | |||
} | |||
} | |||
func doQuery(ctx context.Context, c *bigquery.Client, qt string) { | |||
startTime := time.Now() | |||
q := c.Query(qt) | |||
it, err := q.Read(ctx) | |||
if err != nil { | |||
log.Fatal(err) | |||
} | |||
numRows, numCols := 0, 0 | |||
var firstByte time.Duration | |||
for { | |||
var values []bigquery.Value | |||
err := it.Next(&values) | |||
if err == iterator.Done { | |||
break | |||
} | |||
if err != nil { | |||
log.Fatal(err) | |||
} | |||
if numRows == 0 { | |||
numCols = len(values) | |||
firstByte = time.Since(startTime) | |||
} else if numCols != len(values) { | |||
log.Fatalf("got %d columns, want %d", len(values), numCols) | |||
} | |||
numRows++ | |||
} | |||
log.Printf("query %q: %d rows, %d cols, first byte %f sec, total %f sec", | |||
qt, numRows, numCols, firstByte.Seconds(), time.Since(startTime).Seconds()) | |||
} |
@@ -1,10 +0,0 @@ | |||
[ | |||
"SELECT * FROM `nyc-tlc.yellow.trips` LIMIT 10000", | |||
"SELECT * FROM `nyc-tlc.yellow.trips` LIMIT 100000", | |||
"SELECT * FROM `nyc-tlc.yellow.trips` LIMIT 1000000", | |||
"SELECT title FROM `bigquery-public-data.samples.wikipedia` ORDER BY title LIMIT 1000", | |||
"SELECT title, id, timestamp, contributor_ip FROM `bigquery-public-data.samples.wikipedia` WHERE title like 'Blo%' ORDER BY id", | |||
"SELECT * FROM `bigquery-public-data.baseball.games_post_wide` ORDER BY gameId", | |||
"SELECT * FROM `bigquery-public-data.samples.github_nested` WHERE repository.has_downloads ORDER BY repository.created_at LIMIT 10000", | |||
"SELECT repo_name, path FROM `bigquery-public-data.github_repos.files` WHERE path LIKE '%.java' ORDER BY id LIMIT 1000000" | |||
] |
@@ -1,162 +0,0 @@ | |||
// Copyright 2015 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package bigquery | |||
import ( | |||
"context" | |||
"fmt" | |||
"io" | |||
"net/http" | |||
"time" | |||
"cloud.google.com/go/internal" | |||
"cloud.google.com/go/internal/version" | |||
gax "github.com/googleapis/gax-go/v2" | |||
bq "google.golang.org/api/bigquery/v2" | |||
"google.golang.org/api/googleapi" | |||
"google.golang.org/api/option" | |||
htransport "google.golang.org/api/transport/http" | |||
) | |||
const ( | |||
prodAddr = "https://www.googleapis.com/bigquery/v2/" | |||
// Scope is the Oauth2 scope for the service. | |||
Scope = "https://www.googleapis.com/auth/bigquery" | |||
userAgent = "gcloud-golang-bigquery/20160429" | |||
) | |||
var xGoogHeader = fmt.Sprintf("gl-go/%s gccl/%s", version.Go(), version.Repo) | |||
func setClientHeader(headers http.Header) { | |||
headers.Set("x-goog-api-client", xGoogHeader) | |||
} | |||
// Client may be used to perform BigQuery operations. | |||
type Client struct { | |||
// Location, if set, will be used as the default location for all subsequent | |||
// dataset creation and job operations. A location specified directly in one of | |||
// those operations will override this value. | |||
Location string | |||
projectID string | |||
bqs *bq.Service | |||
} | |||
// NewClient constructs a new Client which can perform BigQuery operations. | |||
// Operations performed via the client are billed to the specified GCP project. | |||
func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) { | |||
o := []option.ClientOption{ | |||
option.WithEndpoint(prodAddr), | |||
option.WithScopes(Scope), | |||
option.WithUserAgent(userAgent), | |||
} | |||
o = append(o, opts...) | |||
httpClient, endpoint, err := htransport.NewClient(ctx, o...) | |||
if err != nil { | |||
return nil, fmt.Errorf("bigquery: dialing: %v", err) | |||
} | |||
bqs, err := bq.New(httpClient) | |||
if err != nil { | |||
return nil, fmt.Errorf("bigquery: constructing client: %v", err) | |||
} | |||
bqs.BasePath = endpoint | |||
c := &Client{ | |||
projectID: projectID, | |||
bqs: bqs, | |||
} | |||
return c, nil | |||
} | |||
// Close closes any resources held by the client. | |||
// Close should be called when the client is no longer needed. | |||
// It need not be called at program exit. | |||
func (c *Client) Close() error { | |||
return nil | |||
} | |||
// Calls the Jobs.Insert RPC and returns a Job. | |||
func (c *Client) insertJob(ctx context.Context, job *bq.Job, media io.Reader) (*Job, error) { | |||
call := c.bqs.Jobs.Insert(c.projectID, job).Context(ctx) | |||
setClientHeader(call.Header()) | |||
if media != nil { | |||
call.Media(media) | |||
} | |||
var res *bq.Job | |||
var err error | |||
invoke := func() error { | |||
res, err = call.Do() | |||
return err | |||
} | |||
// A job with a client-generated ID can be retried; the presence of the | |||
// ID makes the insert operation idempotent. | |||
// We don't retry if there is media, because it is an io.Reader. We'd | |||
// have to read the contents and keep it in memory, and that could be expensive. | |||
// TODO(jba): Look into retrying if media != nil. | |||
if job.JobReference != nil && media == nil { | |||
err = runWithRetry(ctx, invoke) | |||
} else { | |||
err = invoke() | |||
} | |||
if err != nil { | |||
return nil, err | |||
} | |||
return bqToJob(res, c) | |||
} | |||
// Convert a number of milliseconds since the Unix epoch to a time.Time. | |||
// Treat an input of zero specially: convert it to the zero time, | |||
// rather than the start of the epoch. | |||
func unixMillisToTime(m int64) time.Time { | |||
if m == 0 { | |||
return time.Time{} | |||
} | |||
return time.Unix(0, m*1e6) | |||
} | |||
// runWithRetry calls the function until it returns nil or a non-retryable error, or | |||
// the context is done. | |||
// See the similar function in ../storage/invoke.go. The main difference is the | |||
// reason for retrying. | |||
func runWithRetry(ctx context.Context, call func() error) error { | |||
// These parameters match the suggestions in https://cloud.google.com/bigquery/sla. | |||
backoff := gax.Backoff{ | |||
Initial: 1 * time.Second, | |||
Max: 32 * time.Second, | |||
Multiplier: 2, | |||
} | |||
return internal.Retry(ctx, backoff, func() (stop bool, err error) { | |||
err = call() | |||
if err == nil { | |||
return true, nil | |||
} | |||
return !retryableError(err), err | |||
}) | |||
} | |||
// This is the correct definition of retryable according to the BigQuery team. It | |||
// also considers 502 ("Bad Gateway") and 503 ("Service Unavailable") errors | |||
// retryable; these are returned by systems between the client and the BigQuery | |||
// service. | |||
func retryableError(err error) bool { | |||
e, ok := err.(*googleapi.Error) | |||
if !ok { | |||
return false | |||
} | |||
var reason string | |||
if len(e.Errors) > 0 { | |||
reason = e.Errors[0].Reason | |||
} | |||
return e.Code == http.StatusServiceUnavailable || e.Code == http.StatusBadGateway || reason == "backendError" || reason == "rateLimitExceeded" | |||
} |
@@ -1,107 +0,0 @@ | |||
// Copyright 2016 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package bigquery | |||
import ( | |||
"context" | |||
bq "google.golang.org/api/bigquery/v2" | |||
) | |||
// CopyConfig holds the configuration for a copy job. | |||
type CopyConfig struct { | |||
// Srcs are the tables from which data will be copied. | |||
Srcs []*Table | |||
// Dst is the table into which the data will be copied. | |||
Dst *Table | |||
// CreateDisposition specifies the circumstances under which the destination table will be created. | |||
// The default is CreateIfNeeded. | |||
CreateDisposition TableCreateDisposition | |||
// WriteDisposition specifies how existing data in the destination table is treated. | |||
// The default is WriteEmpty. | |||
WriteDisposition TableWriteDisposition | |||
// The labels associated with this job. | |||
Labels map[string]string | |||
// Custom encryption configuration (e.g., Cloud KMS keys). | |||
DestinationEncryptionConfig *EncryptionConfig | |||
} | |||
func (c *CopyConfig) toBQ() *bq.JobConfiguration { | |||
var ts []*bq.TableReference | |||
for _, t := range c.Srcs { | |||
ts = append(ts, t.toBQ()) | |||
} | |||
return &bq.JobConfiguration{ | |||
Labels: c.Labels, | |||
Copy: &bq.JobConfigurationTableCopy{ | |||
CreateDisposition: string(c.CreateDisposition), | |||
WriteDisposition: string(c.WriteDisposition), | |||
DestinationTable: c.Dst.toBQ(), | |||
DestinationEncryptionConfiguration: c.DestinationEncryptionConfig.toBQ(), | |||
SourceTables: ts, | |||
}, | |||
} | |||
} | |||
func bqToCopyConfig(q *bq.JobConfiguration, c *Client) *CopyConfig { | |||
cc := &CopyConfig{ | |||
Labels: q.Labels, | |||
CreateDisposition: TableCreateDisposition(q.Copy.CreateDisposition), | |||
WriteDisposition: TableWriteDisposition(q.Copy.WriteDisposition), | |||
Dst: bqToTable(q.Copy.DestinationTable, c), | |||
DestinationEncryptionConfig: bqToEncryptionConfig(q.Copy.DestinationEncryptionConfiguration), | |||
} | |||
for _, t := range q.Copy.SourceTables { | |||
cc.Srcs = append(cc.Srcs, bqToTable(t, c)) | |||
} | |||
return cc | |||
} | |||
// A Copier copies data into a BigQuery table from one or more BigQuery tables. | |||
type Copier struct { | |||
JobIDConfig | |||
CopyConfig | |||
c *Client | |||
} | |||
// CopierFrom returns a Copier which can be used to copy data into a | |||
// BigQuery table from one or more BigQuery tables. | |||
// The returned Copier may optionally be further configured before its Run method is called. | |||
func (t *Table) CopierFrom(srcs ...*Table) *Copier { | |||
return &Copier{ | |||
c: t.c, | |||
CopyConfig: CopyConfig{ | |||
Srcs: srcs, | |||
Dst: t, | |||
}, | |||
} | |||
} | |||
// Run initiates a copy job. | |||
func (c *Copier) Run(ctx context.Context) (*Job, error) { | |||
return c.c.insertJob(ctx, c.newJob(), nil) | |||
} | |||
func (c *Copier) newJob() *bq.Job { | |||
return &bq.Job{ | |||
JobReference: c.JobIDConfig.createJobRef(c.c), | |||
Configuration: c.CopyConfig.toBQ(), | |||
} | |||
} |
@@ -1,163 +0,0 @@ | |||
// Copyright 2015 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package bigquery | |||
import ( | |||
"testing" | |||
"cloud.google.com/go/internal/testutil" | |||
"github.com/google/go-cmp/cmp/cmpopts" | |||
bq "google.golang.org/api/bigquery/v2" | |||
) | |||
func defaultCopyJob() *bq.Job { | |||
return &bq.Job{ | |||
JobReference: &bq.JobReference{JobId: "RANDOM", ProjectId: "client-project-id"}, | |||
Configuration: &bq.JobConfiguration{ | |||
Copy: &bq.JobConfigurationTableCopy{ | |||
DestinationTable: &bq.TableReference{ | |||
ProjectId: "d-project-id", | |||
DatasetId: "d-dataset-id", | |||
TableId: "d-table-id", | |||
}, | |||
SourceTables: []*bq.TableReference{ | |||
{ | |||
ProjectId: "s-project-id", | |||
DatasetId: "s-dataset-id", | |||
TableId: "s-table-id", | |||
}, | |||
}, | |||
}, | |||
}, | |||
} | |||
} | |||
func TestCopy(t *testing.T) { | |||
defer fixRandomID("RANDOM")() | |||
testCases := []struct { | |||
dst *Table | |||
srcs []*Table | |||
jobID string | |||
location string | |||
config CopyConfig | |||
want *bq.Job | |||
}{ | |||
{ | |||
dst: &Table{ | |||
ProjectID: "d-project-id", | |||
DatasetID: "d-dataset-id", | |||
TableID: "d-table-id", | |||
}, | |||
srcs: []*Table{ | |||
{ | |||
ProjectID: "s-project-id", | |||
DatasetID: "s-dataset-id", | |||
TableID: "s-table-id", | |||
}, | |||
}, | |||
want: defaultCopyJob(), | |||
}, | |||
{ | |||
dst: &Table{ | |||
ProjectID: "d-project-id", | |||
DatasetID: "d-dataset-id", | |||
TableID: "d-table-id", | |||
}, | |||
srcs: []*Table{ | |||
{ | |||
ProjectID: "s-project-id", | |||
DatasetID: "s-dataset-id", | |||
TableID: "s-table-id", | |||
}, | |||
}, | |||
config: CopyConfig{ | |||
CreateDisposition: CreateNever, | |||
WriteDisposition: WriteTruncate, | |||
DestinationEncryptionConfig: &EncryptionConfig{KMSKeyName: "keyName"}, | |||
Labels: map[string]string{"a": "b"}, | |||
}, | |||
want: func() *bq.Job { | |||
j := defaultCopyJob() | |||
j.Configuration.Labels = map[string]string{"a": "b"} | |||
j.Configuration.Copy.CreateDisposition = "CREATE_NEVER" | |||
j.Configuration.Copy.WriteDisposition = "WRITE_TRUNCATE" | |||
j.Configuration.Copy.DestinationEncryptionConfiguration = &bq.EncryptionConfiguration{KmsKeyName: "keyName"} | |||
return j | |||
}(), | |||
}, | |||
{ | |||
dst: &Table{ | |||
ProjectID: "d-project-id", | |||
DatasetID: "d-dataset-id", | |||
TableID: "d-table-id", | |||
}, | |||
srcs: []*Table{ | |||
{ | |||
ProjectID: "s-project-id", | |||
DatasetID: "s-dataset-id", | |||
TableID: "s-table-id", | |||
}, | |||
}, | |||
jobID: "job-id", | |||
want: func() *bq.Job { | |||
j := defaultCopyJob() | |||
j.JobReference.JobId = "job-id" | |||
return j | |||
}(), | |||
}, | |||
{ | |||
dst: &Table{ | |||
ProjectID: "d-project-id", | |||
DatasetID: "d-dataset-id", | |||
TableID: "d-table-id", | |||
}, | |||
srcs: []*Table{ | |||
{ | |||
ProjectID: "s-project-id", | |||
DatasetID: "s-dataset-id", | |||
TableID: "s-table-id", | |||
}, | |||
}, | |||
location: "asia-northeast1", | |||
want: func() *bq.Job { | |||
j := defaultCopyJob() | |||
j.JobReference.Location = "asia-northeast1" | |||
return j | |||
}(), | |||
}, | |||
} | |||
c := &Client{projectID: "client-project-id"} | |||
for i, tc := range testCases { | |||
tc.dst.c = c | |||
copier := tc.dst.CopierFrom(tc.srcs...) | |||
copier.JobID = tc.jobID | |||
copier.Location = tc.location | |||
tc.config.Srcs = tc.srcs | |||
tc.config.Dst = tc.dst | |||
copier.CopyConfig = tc.config | |||
got := copier.newJob() | |||
checkJob(t, i, got, tc.want) | |||
jc, err := bqToJobConfig(got.Configuration, c) | |||
if err != nil { | |||
t.Fatalf("#%d: %v", i, err) | |||
} | |||
diff := testutil.Diff(jc.(*CopyConfig), &copier.CopyConfig, | |||
cmpopts.IgnoreUnexported(Table{})) | |||
if diff != "" { | |||
t.Errorf("#%d: (got=-, want=+:\n%s", i, diff) | |||
} | |||
} | |||
} |
@@ -1,536 +0,0 @@ | |||
// Copyright 2015 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package bigquery | |||
import ( | |||
"context" | |||
"errors" | |||
"fmt" | |||
"time" | |||
"cloud.google.com/go/internal/optional" | |||
"cloud.google.com/go/internal/trace" | |||
bq "google.golang.org/api/bigquery/v2" | |||
"google.golang.org/api/iterator" | |||
) | |||
// Dataset is a reference to a BigQuery dataset. | |||
type Dataset struct { | |||
ProjectID string | |||
DatasetID string | |||
c *Client | |||
} | |||
// DatasetMetadata contains information about a BigQuery dataset. | |||
type DatasetMetadata struct { | |||
// These fields can be set when creating a dataset. | |||
Name string // The user-friendly name for this dataset. | |||
Description string // The user-friendly description of this dataset. | |||
Location string // The geo location of the dataset. | |||
DefaultTableExpiration time.Duration // The default expiration time for new tables. | |||
Labels map[string]string // User-provided labels. | |||
Access []*AccessEntry // Access permissions. | |||
// These fields are read-only. | |||
CreationTime time.Time | |||
LastModifiedTime time.Time // When the dataset or any of its tables were modified. | |||
FullID string // The full dataset ID in the form projectID:datasetID. | |||
// ETag is the ETag obtained when reading metadata. Pass it to Dataset.Update to | |||
// ensure that the metadata hasn't changed since it was read. | |||
ETag string | |||
} | |||
// DatasetMetadataToUpdate is used when updating a dataset's metadata. | |||
// Only non-nil fields will be updated. | |||
type DatasetMetadataToUpdate struct { | |||
Description optional.String // The user-friendly description of this table. | |||
Name optional.String // The user-friendly name for this dataset. | |||
// DefaultTableExpiration is the default expiration time for new tables. | |||
// If set to time.Duration(0), new tables never expire. | |||
DefaultTableExpiration optional.Duration | |||
// The entire access list. It is not possible to replace individual entries. | |||
Access []*AccessEntry | |||
labelUpdater | |||
} | |||
// Dataset creates a handle to a BigQuery dataset in the client's project. | |||
func (c *Client) Dataset(id string) *Dataset { | |||
return c.DatasetInProject(c.projectID, id) | |||
} | |||
// DatasetInProject creates a handle to a BigQuery dataset in the specified project. | |||
func (c *Client) DatasetInProject(projectID, datasetID string) *Dataset { | |||
return &Dataset{ | |||
ProjectID: projectID, | |||
DatasetID: datasetID, | |||
c: c, | |||
} | |||
} | |||
// Create creates a dataset in the BigQuery service. An error will be returned if the | |||
// dataset already exists. Pass in a DatasetMetadata value to configure the dataset. | |||
func (d *Dataset) Create(ctx context.Context, md *DatasetMetadata) (err error) { | |||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Dataset.Create") | |||
defer func() { trace.EndSpan(ctx, err) }() | |||
ds, err := md.toBQ() | |||
if err != nil { | |||
return err | |||
} | |||
ds.DatasetReference = &bq.DatasetReference{DatasetId: d.DatasetID} | |||
// Use Client.Location as a default. | |||
if ds.Location == "" { | |||
ds.Location = d.c.Location | |||
} | |||
call := d.c.bqs.Datasets.Insert(d.ProjectID, ds).Context(ctx) | |||
setClientHeader(call.Header()) | |||
_, err = call.Do() | |||
return err | |||
} | |||
func (dm *DatasetMetadata) toBQ() (*bq.Dataset, error) { | |||
ds := &bq.Dataset{} | |||
if dm == nil { | |||
return ds, nil | |||
} | |||
ds.FriendlyName = dm.Name | |||
ds.Description = dm.Description | |||
ds.Location = dm.Location | |||
ds.DefaultTableExpirationMs = int64(dm.DefaultTableExpiration / time.Millisecond) | |||
ds.Labels = dm.Labels | |||
var err error | |||
ds.Access, err = accessListToBQ(dm.Access) | |||
if err != nil { | |||
return nil, err | |||
} | |||
if !dm.CreationTime.IsZero() { | |||
return nil, errors.New("bigquery: Dataset.CreationTime is not writable") | |||
} | |||
if !dm.LastModifiedTime.IsZero() { | |||
return nil, errors.New("bigquery: Dataset.LastModifiedTime is not writable") | |||
} | |||
if dm.FullID != "" { | |||
return nil, errors.New("bigquery: Dataset.FullID is not writable") | |||
} | |||
if dm.ETag != "" { | |||
return nil, errors.New("bigquery: Dataset.ETag is not writable") | |||
} | |||
return ds, nil | |||
} | |||
func accessListToBQ(a []*AccessEntry) ([]*bq.DatasetAccess, error) { | |||
var q []*bq.DatasetAccess | |||
for _, e := range a { | |||
a, err := e.toBQ() | |||
if err != nil { | |||
return nil, err | |||
} | |||
q = append(q, a) | |||
} | |||
return q, nil | |||
} | |||
// Delete deletes the dataset. Delete will fail if the dataset is not empty. | |||
func (d *Dataset) Delete(ctx context.Context) (err error) { | |||
return d.deleteInternal(ctx, false) | |||
} | |||
// DeleteWithContents deletes the dataset, as well as contained resources. | |||
func (d *Dataset) DeleteWithContents(ctx context.Context) (err error) { | |||
return d.deleteInternal(ctx, true) | |||
} | |||
func (d *Dataset) deleteInternal(ctx context.Context, deleteContents bool) (err error) { | |||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Dataset.Delete") | |||
defer func() { trace.EndSpan(ctx, err) }() | |||
call := d.c.bqs.Datasets.Delete(d.ProjectID, d.DatasetID).Context(ctx).DeleteContents(deleteContents) | |||
setClientHeader(call.Header()) | |||
return call.Do() | |||
} | |||
// Metadata fetches the metadata for the dataset. | |||
func (d *Dataset) Metadata(ctx context.Context) (md *DatasetMetadata, err error) { | |||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Dataset.Metadata") | |||
defer func() { trace.EndSpan(ctx, err) }() | |||
call := d.c.bqs.Datasets.Get(d.ProjectID, d.DatasetID).Context(ctx) | |||
setClientHeader(call.Header()) | |||
var ds *bq.Dataset | |||
if err := runWithRetry(ctx, func() (err error) { | |||
ds, err = call.Do() | |||
return err | |||
}); err != nil { | |||
return nil, err | |||
} | |||
return bqToDatasetMetadata(ds) | |||
} | |||
func bqToDatasetMetadata(d *bq.Dataset) (*DatasetMetadata, error) { | |||
dm := &DatasetMetadata{ | |||
CreationTime: unixMillisToTime(d.CreationTime), | |||
LastModifiedTime: unixMillisToTime(d.LastModifiedTime), | |||
DefaultTableExpiration: time.Duration(d.DefaultTableExpirationMs) * time.Millisecond, | |||
Description: d.Description, | |||
Name: d.FriendlyName, | |||
FullID: d.Id, | |||
Location: d.Location, | |||
Labels: d.Labels, | |||
ETag: d.Etag, | |||
} | |||
for _, a := range d.Access { | |||
e, err := bqToAccessEntry(a, nil) | |||
if err != nil { | |||
return nil, err | |||
} | |||
dm.Access = append(dm.Access, e) | |||
} | |||
return dm, nil | |||
} | |||
// Update modifies specific Dataset metadata fields. | |||
// To perform a read-modify-write that protects against intervening reads, | |||
// set the etag argument to the DatasetMetadata.ETag field from the read. | |||
// Pass the empty string for etag for a "blind write" that will always succeed. | |||
func (d *Dataset) Update(ctx context.Context, dm DatasetMetadataToUpdate, etag string) (md *DatasetMetadata, err error) { | |||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Dataset.Update") | |||
defer func() { trace.EndSpan(ctx, err) }() | |||
ds, err := dm.toBQ() | |||
if err != nil { | |||
return nil, err | |||
} | |||
call := d.c.bqs.Datasets.Patch(d.ProjectID, d.DatasetID, ds).Context(ctx) | |||
setClientHeader(call.Header()) | |||
if etag != "" { | |||
call.Header().Set("If-Match", etag) | |||
} | |||
var ds2 *bq.Dataset | |||
if err := runWithRetry(ctx, func() (err error) { | |||
ds2, err = call.Do() | |||
return err | |||
}); err != nil { | |||
return nil, err | |||
} | |||
return bqToDatasetMetadata(ds2) | |||
} | |||
func (dm *DatasetMetadataToUpdate) toBQ() (*bq.Dataset, error) { | |||
ds := &bq.Dataset{} | |||
forceSend := func(field string) { | |||
ds.ForceSendFields = append(ds.ForceSendFields, field) | |||
} | |||
if dm.Description != nil { | |||
ds.Description = optional.ToString(dm.Description) | |||
forceSend("Description") | |||
} | |||
if dm.Name != nil { | |||
ds.FriendlyName = optional.ToString(dm.Name) | |||
forceSend("FriendlyName") | |||
} | |||
if dm.DefaultTableExpiration != nil { | |||
dur := optional.ToDuration(dm.DefaultTableExpiration) | |||
if dur == 0 { | |||
// Send a null to delete the field. | |||
ds.NullFields = append(ds.NullFields, "DefaultTableExpirationMs") | |||
} else { | |||
ds.DefaultTableExpirationMs = int64(dur / time.Millisecond) | |||
} | |||
} | |||
if dm.Access != nil { | |||
var err error | |||
ds.Access, err = accessListToBQ(dm.Access) | |||
if err != nil { | |||
return nil, err | |||
} | |||
if len(ds.Access) == 0 { | |||
ds.NullFields = append(ds.NullFields, "Access") | |||
} | |||
} | |||
labels, forces, nulls := dm.update() | |||
ds.Labels = labels | |||
ds.ForceSendFields = append(ds.ForceSendFields, forces...) | |||
ds.NullFields = append(ds.NullFields, nulls...) | |||
return ds, nil | |||
} | |||
// Table creates a handle to a BigQuery table in the dataset. | |||
// To determine if a table exists, call Table.Metadata. | |||
// If the table does not already exist, use Table.Create to create it. | |||
func (d *Dataset) Table(tableID string) *Table { | |||
return &Table{ProjectID: d.ProjectID, DatasetID: d.DatasetID, TableID: tableID, c: d.c} | |||
} | |||
// Tables returns an iterator over the tables in the Dataset. | |||
func (d *Dataset) Tables(ctx context.Context) *TableIterator { | |||
it := &TableIterator{ | |||
ctx: ctx, | |||
dataset: d, | |||
} | |||
it.pageInfo, it.nextFunc = iterator.NewPageInfo( | |||
it.fetch, | |||
func() int { return len(it.tables) }, | |||
func() interface{} { b := it.tables; it.tables = nil; return b }) | |||
return it | |||
} | |||
// A TableIterator is an iterator over Tables. | |||
type TableIterator struct { | |||
ctx context.Context | |||
dataset *Dataset | |||
tables []*Table | |||
pageInfo *iterator.PageInfo | |||
nextFunc func() error | |||
} | |||
// Next returns the next result. Its second return value is Done if there are | |||
// no more results. Once Next returns Done, all subsequent calls will return | |||
// Done. | |||
func (it *TableIterator) Next() (*Table, error) { | |||
if err := it.nextFunc(); err != nil { | |||
return nil, err | |||
} | |||
t := it.tables[0] | |||
it.tables = it.tables[1:] | |||
return t, nil | |||
} | |||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. | |||
func (it *TableIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } | |||
// for testing | |||
var listTables = func(it *TableIterator, pageSize int, pageToken string) (*bq.TableList, error) { | |||
call := it.dataset.c.bqs.Tables.List(it.dataset.ProjectID, it.dataset.DatasetID). | |||
PageToken(pageToken). | |||
Context(it.ctx) | |||
setClientHeader(call.Header()) | |||
if pageSize > 0 { | |||
call.MaxResults(int64(pageSize)) | |||
} | |||
var res *bq.TableList | |||
err := runWithRetry(it.ctx, func() (err error) { | |||
res, err = call.Do() | |||
return err | |||
}) | |||
return res, err | |||
} | |||
func (it *TableIterator) fetch(pageSize int, pageToken string) (string, error) { | |||
res, err := listTables(it, pageSize, pageToken) | |||
if err != nil { | |||
return "", err | |||
} | |||
for _, t := range res.Tables { | |||
it.tables = append(it.tables, bqToTable(t.TableReference, it.dataset.c)) | |||
} | |||
return res.NextPageToken, nil | |||
} | |||
func bqToTable(tr *bq.TableReference, c *Client) *Table { | |||
if tr == nil { | |||
return nil | |||
} | |||
return &Table{ | |||
ProjectID: tr.ProjectId, | |||
DatasetID: tr.DatasetId, | |||
TableID: tr.TableId, | |||
c: c, | |||
} | |||
} | |||
// Datasets returns an iterator over the datasets in a project. | |||
// The Client's project is used by default, but that can be | |||
// changed by setting ProjectID on the returned iterator before calling Next. | |||
func (c *Client) Datasets(ctx context.Context) *DatasetIterator { | |||
return c.DatasetsInProject(ctx, c.projectID) | |||
} | |||
// DatasetsInProject returns an iterator over the datasets in the provided project. | |||
// | |||
// Deprecated: call Client.Datasets, then set ProjectID on the returned iterator. | |||
func (c *Client) DatasetsInProject(ctx context.Context, projectID string) *DatasetIterator { | |||
it := &DatasetIterator{ | |||
ctx: ctx, | |||
c: c, | |||
ProjectID: projectID, | |||
} | |||
it.pageInfo, it.nextFunc = iterator.NewPageInfo( | |||
it.fetch, | |||
func() int { return len(it.items) }, | |||
func() interface{} { b := it.items; it.items = nil; return b }) | |||
return it | |||
} | |||
// DatasetIterator iterates over the datasets in a project. | |||
type DatasetIterator struct { | |||
// ListHidden causes hidden datasets to be listed when set to true. | |||
// Set before the first call to Next. | |||
ListHidden bool | |||
// Filter restricts the datasets returned by label. The filter syntax is described in | |||
// https://cloud.google.com/bigquery/docs/labeling-datasets#filtering_datasets_using_labels | |||
// Set before the first call to Next. | |||
Filter string | |||
// The project ID of the listed datasets. | |||
// Set before the first call to Next. | |||
ProjectID string | |||
ctx context.Context | |||
c *Client | |||
pageInfo *iterator.PageInfo | |||
nextFunc func() error | |||
items []*Dataset | |||
} | |||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. | |||
func (it *DatasetIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } | |||
// Next returns the next Dataset. Its second return value is iterator.Done if | |||
// there are no more results. Once Next returns Done, all subsequent calls will | |||
// return Done. | |||
func (it *DatasetIterator) Next() (*Dataset, error) { | |||
if err := it.nextFunc(); err != nil { | |||
return nil, err | |||
} | |||
item := it.items[0] | |||
it.items = it.items[1:] | |||
return item, nil | |||
} | |||
// for testing | |||
var listDatasets = func(it *DatasetIterator, pageSize int, pageToken string) (*bq.DatasetList, error) { | |||
call := it.c.bqs.Datasets.List(it.ProjectID). | |||
Context(it.ctx). | |||
PageToken(pageToken). | |||
All(it.ListHidden) | |||
setClientHeader(call.Header()) | |||
if pageSize > 0 { | |||
call.MaxResults(int64(pageSize)) | |||
} | |||
if it.Filter != "" { | |||
call.Filter(it.Filter) | |||
} | |||
var res *bq.DatasetList | |||
err := runWithRetry(it.ctx, func() (err error) { | |||
res, err = call.Do() | |||
return err | |||
}) | |||
return res, err | |||
} | |||
func (it *DatasetIterator) fetch(pageSize int, pageToken string) (string, error) { | |||
res, err := listDatasets(it, pageSize, pageToken) | |||
if err != nil { | |||
return "", err | |||
} | |||
for _, d := range res.Datasets { | |||
it.items = append(it.items, &Dataset{ | |||
ProjectID: d.DatasetReference.ProjectId, | |||
DatasetID: d.DatasetReference.DatasetId, | |||
c: it.c, | |||
}) | |||
} | |||
return res.NextPageToken, nil | |||
} | |||
// An AccessEntry describes the permissions that an entity has on a dataset. | |||
type AccessEntry struct { | |||
Role AccessRole // The role of the entity | |||
EntityType EntityType // The type of entity | |||
Entity string // The entity (individual or group) granted access | |||
View *Table // The view granted access (EntityType must be ViewEntity) | |||
} | |||
// AccessRole is the level of access to grant to a dataset. | |||
type AccessRole string | |||
const ( | |||
// OwnerRole is the OWNER AccessRole. | |||
OwnerRole AccessRole = "OWNER" | |||
// ReaderRole is the READER AccessRole. | |||
ReaderRole AccessRole = "READER" | |||
// WriterRole is the WRITER AccessRole. | |||
WriterRole AccessRole = "WRITER" | |||
) | |||
// EntityType is the type of entity in an AccessEntry. | |||
type EntityType int | |||
const ( | |||
// DomainEntity is a domain (e.g. "example.com"). | |||
DomainEntity EntityType = iota + 1 | |||
// GroupEmailEntity is an email address of a Google Group. | |||
GroupEmailEntity | |||
// UserEmailEntity is an email address of an individual user. | |||
UserEmailEntity | |||
// SpecialGroupEntity is a special group: one of projectOwners, projectReaders, projectWriters or | |||
// allAuthenticatedUsers. | |||
SpecialGroupEntity | |||
// ViewEntity is a BigQuery view. | |||
ViewEntity | |||
) | |||
func (e *AccessEntry) toBQ() (*bq.DatasetAccess, error) { | |||
q := &bq.DatasetAccess{Role: string(e.Role)} | |||
switch e.EntityType { | |||
case DomainEntity: | |||
q.Domain = e.Entity | |||
case GroupEmailEntity: | |||
q.GroupByEmail = e.Entity | |||
case UserEmailEntity: | |||
q.UserByEmail = e.Entity | |||
case SpecialGroupEntity: | |||
q.SpecialGroup = e.Entity | |||
case ViewEntity: | |||
q.View = e.View.toBQ() | |||
default: | |||
return nil, fmt.Errorf("bigquery: unknown entity type %d", e.EntityType) | |||
} | |||
return q, nil | |||
} | |||
func bqToAccessEntry(q *bq.DatasetAccess, c *Client) (*AccessEntry, error) { | |||
e := &AccessEntry{Role: AccessRole(q.Role)} | |||
switch { | |||
case q.Domain != "": | |||
e.Entity = q.Domain | |||
e.EntityType = DomainEntity | |||
case q.GroupByEmail != "": | |||
e.Entity = q.GroupByEmail | |||
e.EntityType = GroupEmailEntity | |||
case q.UserByEmail != "": | |||
e.Entity = q.UserByEmail | |||
e.EntityType = UserEmailEntity | |||
case q.SpecialGroup != "": | |||
e.Entity = q.SpecialGroup | |||
e.EntityType = SpecialGroupEntity | |||
case q.View != nil: | |||
e.View = c.DatasetInProject(q.View.ProjectId, q.View.DatasetId).Table(q.View.TableId) | |||
e.EntityType = ViewEntity | |||
default: | |||
return nil, errors.New("bigquery: invalid access value") | |||
} | |||
return e, nil | |||
} |
@@ -1,326 +0,0 @@ | |||
// Copyright 2015 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package bigquery | |||
import ( | |||
"context" | |||
"errors" | |||
"strconv" | |||
"testing" | |||
"time" | |||
"cloud.google.com/go/internal/testutil" | |||
"github.com/google/go-cmp/cmp" | |||
bq "google.golang.org/api/bigquery/v2" | |||
itest "google.golang.org/api/iterator/testing" | |||
) | |||
// readServiceStub services read requests by returning data from an in-memory list of values. | |||
type listTablesStub struct { | |||
expectedProject, expectedDataset string | |||
tables []*bq.TableListTables | |||
} | |||
func (s *listTablesStub) listTables(it *TableIterator, pageSize int, pageToken string) (*bq.TableList, error) { | |||
if it.dataset.ProjectID != s.expectedProject { | |||
return nil, errors.New("wrong project id") | |||
} | |||
if it.dataset.DatasetID != s.expectedDataset { | |||
return nil, errors.New("wrong dataset id") | |||
} | |||
const maxPageSize = 2 | |||
if pageSize <= 0 || pageSize > maxPageSize { | |||
pageSize = maxPageSize | |||
} | |||
start := 0 | |||
if pageToken != "" { | |||
var err error | |||
start, err = strconv.Atoi(pageToken) | |||
if err != nil { | |||
return nil, err | |||
} | |||
} | |||
end := start + pageSize | |||
if end > len(s.tables) { | |||
end = len(s.tables) | |||
} | |||
nextPageToken := "" | |||
if end < len(s.tables) { | |||
nextPageToken = strconv.Itoa(end) | |||
} | |||
return &bq.TableList{ | |||
Tables: s.tables[start:end], | |||
NextPageToken: nextPageToken, | |||
}, nil | |||
} | |||
func TestTables(t *testing.T) { | |||
c := &Client{projectID: "p1"} | |||
inTables := []*bq.TableListTables{ | |||
{TableReference: &bq.TableReference{ProjectId: "p1", DatasetId: "d1", TableId: "t1"}}, | |||
{TableReference: &bq.TableReference{ProjectId: "p1", DatasetId: "d1", TableId: "t2"}}, | |||
{TableReference: &bq.TableReference{ProjectId: "p1", DatasetId: "d1", TableId: "t3"}}, | |||
} | |||
outTables := []*Table{ | |||
{ProjectID: "p1", DatasetID: "d1", TableID: "t1", c: c}, | |||
{ProjectID: "p1", DatasetID: "d1", TableID: "t2", c: c}, | |||
{ProjectID: "p1", DatasetID: "d1", TableID: "t3", c: c}, | |||
} | |||
lts := &listTablesStub{ | |||
expectedProject: "p1", | |||
expectedDataset: "d1", | |||
tables: inTables, | |||
} | |||
old := listTables | |||
listTables = lts.listTables // cannot use t.Parallel with this test | |||
defer func() { listTables = old }() | |||
msg, ok := itest.TestIterator(outTables, | |||
func() interface{} { return c.Dataset("d1").Tables(context.Background()) }, | |||
func(it interface{}) (interface{}, error) { return it.(*TableIterator).Next() }) | |||
if !ok { | |||
t.Error(msg) | |||
} | |||
} | |||
type listDatasetsStub struct { | |||
expectedProject string | |||
datasets []*bq.DatasetListDatasets | |||
hidden map[*bq.DatasetListDatasets]bool | |||
} | |||
func (s *listDatasetsStub) listDatasets(it *DatasetIterator, pageSize int, pageToken string) (*bq.DatasetList, error) { | |||
const maxPageSize = 2 | |||
if pageSize <= 0 || pageSize > maxPageSize { | |||
pageSize = maxPageSize | |||
} | |||
if it.Filter != "" { | |||
return nil, errors.New("filter not supported") | |||
} | |||
if it.ProjectID != s.expectedProject { | |||
return nil, errors.New("bad project ID") | |||
} | |||
start := 0 | |||
if pageToken != "" { | |||
var err error | |||
start, err = strconv.Atoi(pageToken) | |||
if err != nil { | |||
return nil, err | |||
} | |||
} | |||
var ( | |||
i int | |||
result []*bq.DatasetListDatasets | |||
nextPageToken string | |||
) | |||
for i = start; len(result) < pageSize && i < len(s.datasets); i++ { | |||
if s.hidden[s.datasets[i]] && !it.ListHidden { | |||
continue | |||
} | |||
result = append(result, s.datasets[i]) | |||
} | |||
if i < len(s.datasets) { | |||
nextPageToken = strconv.Itoa(i) | |||
} | |||
return &bq.DatasetList{ | |||
Datasets: result, | |||
NextPageToken: nextPageToken, | |||
}, nil | |||
} | |||
func TestDatasets(t *testing.T) { | |||
client := &Client{projectID: "p"} | |||
inDatasets := []*bq.DatasetListDatasets{ | |||
{DatasetReference: &bq.DatasetReference{ProjectId: "p", DatasetId: "a"}}, | |||
{DatasetReference: &bq.DatasetReference{ProjectId: "p", DatasetId: "b"}}, | |||
{DatasetReference: &bq.DatasetReference{ProjectId: "p", DatasetId: "hidden"}}, | |||
{DatasetReference: &bq.DatasetReference{ProjectId: "p", DatasetId: "c"}}, | |||
} | |||
outDatasets := []*Dataset{ | |||
{"p", "a", client}, | |||
{"p", "b", client}, | |||
{"p", "hidden", client}, | |||
{"p", "c", client}, | |||
} | |||
lds := &listDatasetsStub{ | |||
expectedProject: "p", | |||
datasets: inDatasets, | |||
hidden: map[*bq.DatasetListDatasets]bool{inDatasets[2]: true}, | |||
} | |||
old := listDatasets | |||
listDatasets = lds.listDatasets // cannot use t.Parallel with this test | |||
defer func() { listDatasets = old }() | |||
msg, ok := itest.TestIterator(outDatasets, | |||
func() interface{} { it := client.Datasets(context.Background()); it.ListHidden = true; return it }, | |||
func(it interface{}) (interface{}, error) { return it.(*DatasetIterator).Next() }) | |||
if !ok { | |||
t.Fatalf("ListHidden=true: %s", msg) | |||
} | |||
msg, ok = itest.TestIterator([]*Dataset{outDatasets[0], outDatasets[1], outDatasets[3]}, | |||
func() interface{} { it := client.Datasets(context.Background()); it.ListHidden = false; return it }, | |||
func(it interface{}) (interface{}, error) { return it.(*DatasetIterator).Next() }) | |||
if !ok { | |||
t.Fatalf("ListHidden=false: %s", msg) | |||
} | |||
} | |||
func TestDatasetToBQ(t *testing.T) { | |||
for _, test := range []struct { | |||
in *DatasetMetadata | |||
want *bq.Dataset | |||
}{ | |||
{nil, &bq.Dataset{}}, | |||
{&DatasetMetadata{Name: "name"}, &bq.Dataset{FriendlyName: "name"}}, | |||
{&DatasetMetadata{ | |||
Name: "name", | |||
Description: "desc", | |||
DefaultTableExpiration: time.Hour, | |||
Location: "EU", | |||
Labels: map[string]string{"x": "y"}, | |||
Access: []*AccessEntry{{Role: OwnerRole, Entity: "example.com", EntityType: DomainEntity}}, | |||
}, &bq.Dataset{ | |||
FriendlyName: "name", | |||
Description: "desc", | |||
DefaultTableExpirationMs: 60 * 60 * 1000, | |||
Location: "EU", | |||
Labels: map[string]string{"x": "y"}, | |||
Access: []*bq.DatasetAccess{{Role: "OWNER", Domain: "example.com"}}, | |||
}}, | |||
} { | |||
got, err := test.in.toBQ() | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
if !testutil.Equal(got, test.want) { | |||
t.Errorf("%v:\ngot %+v\nwant %+v", test.in, got, test.want) | |||
} | |||
} | |||
// Check that non-writeable fields are unset. | |||
aTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local) | |||
for _, dm := range []*DatasetMetadata{ | |||
{CreationTime: aTime}, | |||
{LastModifiedTime: aTime}, | |||
{FullID: "x"}, | |||
{ETag: "e"}, | |||
} { | |||
if _, err := dm.toBQ(); err == nil { | |||
t.Errorf("%+v: got nil, want error", dm) | |||
} | |||
} | |||
} | |||
func TestBQToDatasetMetadata(t *testing.T) { | |||
cTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local) | |||
cMillis := cTime.UnixNano() / 1e6 | |||
mTime := time.Date(2017, 10, 31, 0, 0, 0, 0, time.Local) | |||
mMillis := mTime.UnixNano() / 1e6 | |||
q := &bq.Dataset{ | |||
CreationTime: cMillis, | |||
LastModifiedTime: mMillis, | |||
FriendlyName: "name", | |||
Description: "desc", | |||
DefaultTableExpirationMs: 60 * 60 * 1000, | |||
Location: "EU", | |||
Labels: map[string]string{"x": "y"}, | |||
Access: []*bq.DatasetAccess{ | |||
{Role: "READER", UserByEmail: "joe@example.com"}, | |||
{Role: "WRITER", GroupByEmail: "users@example.com"}, | |||
}, | |||
Etag: "etag", | |||
} | |||
want := &DatasetMetadata{ | |||
CreationTime: cTime, | |||
LastModifiedTime: mTime, | |||
Name: "name", | |||
Description: "desc", | |||
DefaultTableExpiration: time.Hour, | |||
Location: "EU", | |||
Labels: map[string]string{"x": "y"}, | |||
Access: []*AccessEntry{ | |||
{Role: ReaderRole, Entity: "joe@example.com", EntityType: UserEmailEntity}, | |||
{Role: WriterRole, Entity: "users@example.com", EntityType: GroupEmailEntity}, | |||
}, | |||
ETag: "etag", | |||
} | |||
got, err := bqToDatasetMetadata(q) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
if diff := testutil.Diff(got, want); diff != "" { | |||
t.Errorf("-got, +want:\n%s", diff) | |||
} | |||
} | |||
func TestDatasetMetadataToUpdateToBQ(t *testing.T) { | |||
dm := DatasetMetadataToUpdate{ | |||
Description: "desc", | |||
Name: "name", | |||
DefaultTableExpiration: time.Hour, | |||
} | |||
dm.SetLabel("label", "value") | |||
dm.DeleteLabel("del") | |||
got, err := dm.toBQ() | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
want := &bq.Dataset{ | |||
Description: "desc", | |||
FriendlyName: "name", | |||
DefaultTableExpirationMs: 60 * 60 * 1000, | |||
Labels: map[string]string{"label": "value"}, | |||
ForceSendFields: []string{"Description", "FriendlyName"}, | |||
NullFields: []string{"Labels.del"}, | |||
} | |||
if diff := testutil.Diff(got, want); diff != "" { | |||
t.Errorf("-got, +want:\n%s", diff) | |||
} | |||
} | |||
func TestConvertAccessEntry(t *testing.T) { | |||
c := &Client{projectID: "pid"} | |||
for _, e := range []*AccessEntry{ | |||
{Role: ReaderRole, Entity: "e", EntityType: DomainEntity}, | |||
{Role: WriterRole, Entity: "e", EntityType: GroupEmailEntity}, | |||
{Role: OwnerRole, Entity: "e", EntityType: UserEmailEntity}, | |||
{Role: ReaderRole, Entity: "e", EntityType: SpecialGroupEntity}, | |||
{Role: ReaderRole, EntityType: ViewEntity, | |||
View: &Table{ProjectID: "p", DatasetID: "d", TableID: "t", c: c}}, | |||
} { | |||
q, err := e.toBQ() | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
got, err := bqToAccessEntry(q, c) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
if diff := testutil.Diff(got, e, cmp.AllowUnexported(Table{}, Client{})); diff != "" { | |||
t.Errorf("got=-, want=+:\n%s", diff) | |||
} | |||
} | |||
e := &AccessEntry{Role: ReaderRole, Entity: "e"} | |||
if _, err := e.toBQ(); err == nil { | |||
t.Error("got nil, want error") | |||
} | |||
if _, err := bqToAccessEntry(&bq.DatasetAccess{Role: "WRITER"}, nil); err == nil { | |||
t.Error("got nil, want error") | |||
} | |||
} |
@@ -1,67 +0,0 @@ | |||
// Copyright 2019 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// https://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
// Code generated by gapic-generator. DO NOT EDIT. | |||
package datatransfer | |||
import ( | |||
datatransferpb "google.golang.org/genproto/googleapis/cloud/bigquery/datatransfer/v1" | |||
) | |||
import ( | |||
"context" | |||
"fmt" | |||
"strconv" | |||
"testing" | |||
"time" | |||
"cloud.google.com/go/internal/testutil" | |||
"google.golang.org/api/iterator" | |||
"google.golang.org/api/option" | |||
) | |||
var _ = fmt.Sprintf | |||
var _ = iterator.Done | |||
var _ = strconv.FormatUint | |||
var _ = time.Now | |||
func TestDataTransferServiceSmoke(t *testing.T) { | |||
if testing.Short() { | |||
t.Skip("skipping smoke test in short mode") | |||
} | |||
ctx := context.Background() | |||
ts := testutil.TokenSource(ctx, DefaultAuthScopes()...) | |||
if ts == nil { | |||
t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") | |||
} | |||
projectId := testutil.ProjID() | |||
_ = projectId | |||
c, err := NewClient(ctx, option.WithTokenSource(ts)) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
var formattedParent string = fmt.Sprintf("projects/%s", projectId) | |||
var request = &datatransferpb.ListDataSourcesRequest{ | |||
Parent: formattedParent, | |||
} | |||
iter := c.ListDataSources(ctx, request) | |||
if _, err := iter.Next(); err != nil && err != iterator.Done { | |||
t.Error(err) | |||
} | |||
} |
@@ -1,625 +0,0 @@ | |||
// Copyright 2019 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// https://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
// Code generated by gapic-generator. DO NOT EDIT. | |||
package datatransfer | |||
import ( | |||
"context" | |||
"fmt" | |||
"math" | |||
"time" | |||
"github.com/golang/protobuf/proto" | |||
gax "github.com/googleapis/gax-go/v2" | |||
"google.golang.org/api/iterator" | |||
"google.golang.org/api/option" | |||
"google.golang.org/api/transport" | |||
datatransferpb "google.golang.org/genproto/googleapis/cloud/bigquery/datatransfer/v1" | |||
"google.golang.org/grpc" | |||
"google.golang.org/grpc/codes" | |||
"google.golang.org/grpc/metadata" | |||
) | |||
// CallOptions contains the retry settings for each method of Client. | |||
type CallOptions struct { | |||
GetDataSource []gax.CallOption | |||
ListDataSources []gax.CallOption | |||
CreateTransferConfig []gax.CallOption | |||
UpdateTransferConfig []gax.CallOption | |||
DeleteTransferConfig []gax.CallOption | |||
GetTransferConfig []gax.CallOption | |||
ListTransferConfigs []gax.CallOption | |||
ScheduleTransferRuns []gax.CallOption | |||
GetTransferRun []gax.CallOption | |||
DeleteTransferRun []gax.CallOption | |||
ListTransferRuns []gax.CallOption | |||
ListTransferLogs []gax.CallOption | |||
CheckValidCreds []gax.CallOption | |||
} | |||
func defaultClientOptions() []option.ClientOption { | |||
return []option.ClientOption{ | |||
option.WithEndpoint("bigquerydatatransfer.googleapis.com:443"), | |||
option.WithScopes(DefaultAuthScopes()...), | |||
} | |||
} | |||
func defaultCallOptions() *CallOptions { | |||
retry := map[[2]string][]gax.CallOption{ | |||
{"default", "idempotent"}: { | |||
gax.WithRetry(func() gax.Retryer { | |||
return gax.OnCodes([]codes.Code{ | |||
codes.DeadlineExceeded, | |||
codes.Unavailable, | |||
}, gax.Backoff{ | |||
Initial: 100 * time.Millisecond, | |||
Max: 60000 * time.Millisecond, | |||
Multiplier: 1.3, | |||
}) | |||
}), | |||
}, | |||
} | |||
return &CallOptions{ | |||
GetDataSource: retry[[2]string{"default", "idempotent"}], | |||
ListDataSources: retry[[2]string{"default", "idempotent"}], | |||
CreateTransferConfig: retry[[2]string{"default", "non_idempotent"}], | |||
UpdateTransferConfig: retry[[2]string{"default", "non_idempotent"}], | |||
DeleteTransferConfig: retry[[2]string{"default", "idempotent"}], | |||
GetTransferConfig: retry[[2]string{"default", "idempotent"}], | |||
ListTransferConfigs: retry[[2]string{"default", "idempotent"}], | |||
ScheduleTransferRuns: retry[[2]string{"default", "non_idempotent"}], | |||
GetTransferRun: retry[[2]string{"default", "idempotent"}], | |||
DeleteTransferRun: retry[[2]string{"default", "idempotent"}], | |||
ListTransferRuns: retry[[2]string{"default", "idempotent"}], | |||
ListTransferLogs: retry[[2]string{"default", "idempotent"}], | |||
CheckValidCreds: retry[[2]string{"default", "idempotent"}], | |||
} | |||
} | |||
// Client is a client for interacting with BigQuery Data Transfer API. | |||
// | |||
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. | |||
type Client struct { | |||
// The connection to the service. | |||
conn *grpc.ClientConn | |||
// The gRPC API client. | |||
client datatransferpb.DataTransferServiceClient | |||
// The call options for this service. | |||
CallOptions *CallOptions | |||
// The x-goog-* metadata to be sent with each request. | |||
xGoogMetadata metadata.MD | |||
} | |||
// NewClient creates a new data transfer service client. | |||
// | |||
// The Google BigQuery Data Transfer Service API enables BigQuery users to | |||
// configure the transfer of their data from other Google Products into | |||
// BigQuery. This service contains methods that are end user exposed. It backs | |||
// up the frontend. | |||
func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { | |||
conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...) | |||
if err != nil { | |||
return nil, err | |||
} | |||
c := &Client{ | |||
conn: conn, | |||
CallOptions: defaultCallOptions(), | |||
client: datatransferpb.NewDataTransferServiceClient(conn), | |||
} | |||
c.setGoogleClientInfo() | |||
return c, nil | |||
} | |||
// Connection returns the client's connection to the API service. | |||
func (c *Client) Connection() *grpc.ClientConn { | |||
return c.conn | |||
} | |||
// Close closes the connection to the API service. The user should invoke this when | |||
// the client is no longer required. | |||
func (c *Client) Close() error { | |||
return c.conn.Close() | |||
} | |||
// setGoogleClientInfo sets the name and version of the application in | |||
// the `x-goog-api-client` header passed on each request. Intended for | |||
// use by Google-written clients. | |||
func (c *Client) setGoogleClientInfo(keyval ...string) { | |||
kv := append([]string{"gl-go", versionGo()}, keyval...) | |||
kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version) | |||
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) | |||
} | |||
// GetDataSource retrieves a supported data source and returns its settings, | |||
// which can be used for UI rendering. | |||
func (c *Client) GetDataSource(ctx context.Context, req *datatransferpb.GetDataSourceRequest, opts ...gax.CallOption) (*datatransferpb.DataSource, error) { | |||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName())) | |||
ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||
opts = append(c.CallOptions.GetDataSource[0:len(c.CallOptions.GetDataSource):len(c.CallOptions.GetDataSource)], opts...) | |||
var resp *datatransferpb.DataSource | |||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||
var err error | |||
resp, err = c.client.GetDataSource(ctx, req, settings.GRPC...) | |||
return err | |||
}, opts...) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return resp, nil | |||
} | |||
// ListDataSources lists supported data sources and returns their settings, | |||
// which can be used for UI rendering. | |||
func (c *Client) ListDataSources(ctx context.Context, req *datatransferpb.ListDataSourcesRequest, opts ...gax.CallOption) *DataSourceIterator { | |||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent())) | |||
ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||
opts = append(c.CallOptions.ListDataSources[0:len(c.CallOptions.ListDataSources):len(c.CallOptions.ListDataSources)], opts...) | |||
it := &DataSourceIterator{} | |||
req = proto.Clone(req).(*datatransferpb.ListDataSourcesRequest) | |||
it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.DataSource, string, error) { | |||
var resp *datatransferpb.ListDataSourcesResponse | |||
req.PageToken = pageToken | |||
if pageSize > math.MaxInt32 { | |||
req.PageSize = math.MaxInt32 | |||
} else { | |||
req.PageSize = int32(pageSize) | |||
} | |||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||
var err error | |||
resp, err = c.client.ListDataSources(ctx, req, settings.GRPC...) | |||
return err | |||
}, opts...) | |||
if err != nil { | |||
return nil, "", err | |||
} | |||
return resp.DataSources, resp.NextPageToken, nil | |||
} | |||
fetch := func(pageSize int, pageToken string) (string, error) { | |||
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) | |||
if err != nil { | |||
return "", err | |||
} | |||
it.items = append(it.items, items...) | |||
return nextPageToken, nil | |||
} | |||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) | |||
it.pageInfo.MaxSize = int(req.PageSize) | |||
return it | |||
} | |||
// CreateTransferConfig creates a new data transfer configuration. | |||
func (c *Client) CreateTransferConfig(ctx context.Context, req *datatransferpb.CreateTransferConfigRequest, opts ...gax.CallOption) (*datatransferpb.TransferConfig, error) { | |||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent())) | |||
ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||
opts = append(c.CallOptions.CreateTransferConfig[0:len(c.CallOptions.CreateTransferConfig):len(c.CallOptions.CreateTransferConfig)], opts...) | |||
var resp *datatransferpb.TransferConfig | |||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||
var err error | |||
resp, err = c.client.CreateTransferConfig(ctx, req, settings.GRPC...) | |||
return err | |||
}, opts...) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return resp, nil | |||
} | |||
// UpdateTransferConfig updates a data transfer configuration. | |||
// All fields must be set, even if they are not updated. | |||
func (c *Client) UpdateTransferConfig(ctx context.Context, req *datatransferpb.UpdateTransferConfigRequest, opts ...gax.CallOption) (*datatransferpb.TransferConfig, error) { | |||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "transfer_config.name", req.GetTransferConfig().GetName())) | |||
ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||
opts = append(c.CallOptions.UpdateTransferConfig[0:len(c.CallOptions.UpdateTransferConfig):len(c.CallOptions.UpdateTransferConfig)], opts...) | |||
var resp *datatransferpb.TransferConfig | |||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||
var err error | |||
resp, err = c.client.UpdateTransferConfig(ctx, req, settings.GRPC...) | |||
return err | |||
}, opts...) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return resp, nil | |||
} | |||
// DeleteTransferConfig deletes a data transfer configuration, | |||
// including any associated transfer runs and logs. | |||
func (c *Client) DeleteTransferConfig(ctx context.Context, req *datatransferpb.DeleteTransferConfigRequest, opts ...gax.CallOption) error { | |||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName())) | |||
ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||
opts = append(c.CallOptions.DeleteTransferConfig[0:len(c.CallOptions.DeleteTransferConfig):len(c.CallOptions.DeleteTransferConfig)], opts...) | |||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||
var err error | |||
_, err = c.client.DeleteTransferConfig(ctx, req, settings.GRPC...) | |||
return err | |||
}, opts...) | |||
return err | |||
} | |||
// GetTransferConfig returns information about a data transfer config. | |||
func (c *Client) GetTransferConfig(ctx context.Context, req *datatransferpb.GetTransferConfigRequest, opts ...gax.CallOption) (*datatransferpb.TransferConfig, error) { | |||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName())) | |||
ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||
opts = append(c.CallOptions.GetTransferConfig[0:len(c.CallOptions.GetTransferConfig):len(c.CallOptions.GetTransferConfig)], opts...) | |||
var resp *datatransferpb.TransferConfig | |||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||
var err error | |||
resp, err = c.client.GetTransferConfig(ctx, req, settings.GRPC...) | |||
return err | |||
}, opts...) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return resp, nil | |||
} | |||
// ListTransferConfigs returns information about all data transfers in the project. | |||
func (c *Client) ListTransferConfigs(ctx context.Context, req *datatransferpb.ListTransferConfigsRequest, opts ...gax.CallOption) *TransferConfigIterator { | |||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent())) | |||
ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||
opts = append(c.CallOptions.ListTransferConfigs[0:len(c.CallOptions.ListTransferConfigs):len(c.CallOptions.ListTransferConfigs)], opts...) | |||
it := &TransferConfigIterator{} | |||
req = proto.Clone(req).(*datatransferpb.ListTransferConfigsRequest) | |||
it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.TransferConfig, string, error) { | |||
var resp *datatransferpb.ListTransferConfigsResponse | |||
req.PageToken = pageToken | |||
if pageSize > math.MaxInt32 { | |||
req.PageSize = math.MaxInt32 | |||
} else { | |||
req.PageSize = int32(pageSize) | |||
} | |||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||
var err error | |||
resp, err = c.client.ListTransferConfigs(ctx, req, settings.GRPC...) | |||
return err | |||
}, opts...) | |||
if err != nil { | |||
return nil, "", err | |||
} | |||
return resp.TransferConfigs, resp.NextPageToken, nil | |||
} | |||
fetch := func(pageSize int, pageToken string) (string, error) { | |||
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) | |||
if err != nil { | |||
return "", err | |||
} | |||
it.items = append(it.items, items...) | |||
return nextPageToken, nil | |||
} | |||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) | |||
it.pageInfo.MaxSize = int(req.PageSize) | |||
return it | |||
} | |||
// ScheduleTransferRuns creates transfer runs for a time range [start_time, end_time]. | |||
// For each date - or whatever granularity the data source supports - in the | |||
// range, one transfer run is created. | |||
// Note that runs are created per UTC time in the time range. | |||
func (c *Client) ScheduleTransferRuns(ctx context.Context, req *datatransferpb.ScheduleTransferRunsRequest, opts ...gax.CallOption) (*datatransferpb.ScheduleTransferRunsResponse, error) { | |||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent())) | |||
ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||
opts = append(c.CallOptions.ScheduleTransferRuns[0:len(c.CallOptions.ScheduleTransferRuns):len(c.CallOptions.ScheduleTransferRuns)], opts...) | |||
var resp *datatransferpb.ScheduleTransferRunsResponse | |||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||
var err error | |||
resp, err = c.client.ScheduleTransferRuns(ctx, req, settings.GRPC...) | |||
return err | |||
}, opts...) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return resp, nil | |||
} | |||
// GetTransferRun returns information about the particular transfer run. | |||
func (c *Client) GetTransferRun(ctx context.Context, req *datatransferpb.GetTransferRunRequest, opts ...gax.CallOption) (*datatransferpb.TransferRun, error) { | |||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName())) | |||
ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||
opts = append(c.CallOptions.GetTransferRun[0:len(c.CallOptions.GetTransferRun):len(c.CallOptions.GetTransferRun)], opts...) | |||
var resp *datatransferpb.TransferRun | |||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||
var err error | |||
resp, err = c.client.GetTransferRun(ctx, req, settings.GRPC...) | |||
return err | |||
}, opts...) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return resp, nil | |||
} | |||
// DeleteTransferRun deletes the specified transfer run. | |||
func (c *Client) DeleteTransferRun(ctx context.Context, req *datatransferpb.DeleteTransferRunRequest, opts ...gax.CallOption) error { | |||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName())) | |||
ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||
opts = append(c.CallOptions.DeleteTransferRun[0:len(c.CallOptions.DeleteTransferRun):len(c.CallOptions.DeleteTransferRun)], opts...) | |||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||
var err error | |||
_, err = c.client.DeleteTransferRun(ctx, req, settings.GRPC...) | |||
return err | |||
}, opts...) | |||
return err | |||
} | |||
// ListTransferRuns returns information about running and completed jobs. | |||
func (c *Client) ListTransferRuns(ctx context.Context, req *datatransferpb.ListTransferRunsRequest, opts ...gax.CallOption) *TransferRunIterator { | |||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent())) | |||
ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||
opts = append(c.CallOptions.ListTransferRuns[0:len(c.CallOptions.ListTransferRuns):len(c.CallOptions.ListTransferRuns)], opts...) | |||
it := &TransferRunIterator{} | |||
req = proto.Clone(req).(*datatransferpb.ListTransferRunsRequest) | |||
it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.TransferRun, string, error) { | |||
var resp *datatransferpb.ListTransferRunsResponse | |||
req.PageToken = pageToken | |||
if pageSize > math.MaxInt32 { | |||
req.PageSize = math.MaxInt32 | |||
} else { | |||
req.PageSize = int32(pageSize) | |||
} | |||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||
var err error | |||
resp, err = c.client.ListTransferRuns(ctx, req, settings.GRPC...) | |||
return err | |||
}, opts...) | |||
if err != nil { | |||
return nil, "", err | |||
} | |||
return resp.TransferRuns, resp.NextPageToken, nil | |||
} | |||
fetch := func(pageSize int, pageToken string) (string, error) { | |||
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) | |||
if err != nil { | |||
return "", err | |||
} | |||
it.items = append(it.items, items...) | |||
return nextPageToken, nil | |||
} | |||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) | |||
it.pageInfo.MaxSize = int(req.PageSize) | |||
return it | |||
} | |||
// ListTransferLogs returns user facing log messages for the data transfer run. | |||
func (c *Client) ListTransferLogs(ctx context.Context, req *datatransferpb.ListTransferLogsRequest, opts ...gax.CallOption) *TransferMessageIterator { | |||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent())) | |||
ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||
opts = append(c.CallOptions.ListTransferLogs[0:len(c.CallOptions.ListTransferLogs):len(c.CallOptions.ListTransferLogs)], opts...) | |||
it := &TransferMessageIterator{} | |||
req = proto.Clone(req).(*datatransferpb.ListTransferLogsRequest) | |||
it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.TransferMessage, string, error) { | |||
var resp *datatransferpb.ListTransferLogsResponse | |||
req.PageToken = pageToken | |||
if pageSize > math.MaxInt32 { | |||
req.PageSize = math.MaxInt32 | |||
} else { | |||
req.PageSize = int32(pageSize) | |||
} | |||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||
var err error | |||
resp, err = c.client.ListTransferLogs(ctx, req, settings.GRPC...) | |||
return err | |||
}, opts...) | |||
if err != nil { | |||
return nil, "", err | |||
} | |||
return resp.TransferMessages, resp.NextPageToken, nil | |||
} | |||
fetch := func(pageSize int, pageToken string) (string, error) { | |||
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) | |||
if err != nil { | |||
return "", err | |||
} | |||
it.items = append(it.items, items...) | |||
return nextPageToken, nil | |||
} | |||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) | |||
it.pageInfo.MaxSize = int(req.PageSize) | |||
return it | |||
} | |||
// CheckValidCreds returns true if valid credentials exist for the given data source and | |||
// requesting user. | |||
// Some data sources doesn't support service account, so we need to talk to | |||
// them on behalf of the end user. This API just checks whether we have OAuth | |||
// token for the particular user, which is a pre-requisite before user can | |||
// create a transfer config. | |||
func (c *Client) CheckValidCreds(ctx context.Context, req *datatransferpb.CheckValidCredsRequest, opts ...gax.CallOption) (*datatransferpb.CheckValidCredsResponse, error) { | |||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName())) | |||
ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||
opts = append(c.CallOptions.CheckValidCreds[0:len(c.CallOptions.CheckValidCreds):len(c.CallOptions.CheckValidCreds)], opts...) | |||
var resp *datatransferpb.CheckValidCredsResponse | |||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||
var err error | |||
resp, err = c.client.CheckValidCreds(ctx, req, settings.GRPC...) | |||
return err | |||
}, opts...) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return resp, nil | |||
} | |||
// DataSourceIterator manages a stream of *datatransferpb.DataSource. | |||
type DataSourceIterator struct { | |||
items []*datatransferpb.DataSource | |||
pageInfo *iterator.PageInfo | |||
nextFunc func() error | |||
// InternalFetch is for use by the Google Cloud Libraries only. | |||
// It is not part of the stable interface of this package. | |||
// | |||
// InternalFetch returns results from a single call to the underlying RPC. | |||
// The number of results is no greater than pageSize. | |||
// If there are no more results, nextPageToken is empty and err is nil. | |||
InternalFetch func(pageSize int, pageToken string) (results []*datatransferpb.DataSource, nextPageToken string, err error) | |||
} | |||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. | |||
func (it *DataSourceIterator) PageInfo() *iterator.PageInfo { | |||
return it.pageInfo | |||
} | |||
// Next returns the next result. Its second return value is iterator.Done if there are no more | |||
// results. Once Next returns Done, all subsequent calls will return Done. | |||
func (it *DataSourceIterator) Next() (*datatransferpb.DataSource, error) { | |||
var item *datatransferpb.DataSource | |||
if err := it.nextFunc(); err != nil { | |||
return item, err | |||
} | |||
item = it.items[0] | |||
it.items = it.items[1:] | |||
return item, nil | |||
} | |||
func (it *DataSourceIterator) bufLen() int { | |||
return len(it.items) | |||
} | |||
func (it *DataSourceIterator) takeBuf() interface{} { | |||
b := it.items | |||
it.items = nil | |||
return b | |||
} | |||
// TransferConfigIterator manages a stream of *datatransferpb.TransferConfig. | |||
type TransferConfigIterator struct { | |||
items []*datatransferpb.TransferConfig | |||
pageInfo *iterator.PageInfo | |||
nextFunc func() error | |||
// InternalFetch is for use by the Google Cloud Libraries only. | |||
// It is not part of the stable interface of this package. | |||
// | |||
// InternalFetch returns results from a single call to the underlying RPC. | |||
// The number of results is no greater than pageSize. | |||
// If there are no more results, nextPageToken is empty and err is nil. | |||
InternalFetch func(pageSize int, pageToken string) (results []*datatransferpb.TransferConfig, nextPageToken string, err error) | |||
} | |||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. | |||
func (it *TransferConfigIterator) PageInfo() *iterator.PageInfo { | |||
return it.pageInfo | |||
} | |||
// Next returns the next result. Its second return value is iterator.Done if there are no more | |||
// results. Once Next returns Done, all subsequent calls will return Done. | |||
func (it *TransferConfigIterator) Next() (*datatransferpb.TransferConfig, error) { | |||
var item *datatransferpb.TransferConfig | |||
if err := it.nextFunc(); err != nil { | |||
return item, err | |||
} | |||
item = it.items[0] | |||
it.items = it.items[1:] | |||
return item, nil | |||
} | |||
func (it *TransferConfigIterator) bufLen() int { | |||
return len(it.items) | |||
} | |||
func (it *TransferConfigIterator) takeBuf() interface{} { | |||
b := it.items | |||
it.items = nil | |||
return b | |||
} | |||
// TransferMessageIterator manages a stream of *datatransferpb.TransferMessage. | |||
type TransferMessageIterator struct { | |||
items []*datatransferpb.TransferMessage | |||
pageInfo *iterator.PageInfo | |||
nextFunc func() error | |||
// InternalFetch is for use by the Google Cloud Libraries only. | |||
// It is not part of the stable interface of this package. | |||
// | |||
// InternalFetch returns results from a single call to the underlying RPC. | |||
// The number of results is no greater than pageSize. | |||
// If there are no more results, nextPageToken is empty and err is nil. | |||
InternalFetch func(pageSize int, pageToken string) (results []*datatransferpb.TransferMessage, nextPageToken string, err error) | |||
} | |||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. | |||
func (it *TransferMessageIterator) PageInfo() *iterator.PageInfo { | |||
return it.pageInfo | |||
} | |||
// Next returns the next result. Its second return value is iterator.Done if there are no more | |||
// results. Once Next returns Done, all subsequent calls will return Done. | |||
func (it *TransferMessageIterator) Next() (*datatransferpb.TransferMessage, error) { | |||
var item *datatransferpb.TransferMessage | |||
if err := it.nextFunc(); err != nil { | |||
return item, err | |||
} | |||
item = it.items[0] | |||
it.items = it.items[1:] | |||
return item, nil | |||
} | |||
func (it *TransferMessageIterator) bufLen() int { | |||
return len(it.items) | |||
} | |||
func (it *TransferMessageIterator) takeBuf() interface{} { | |||
b := it.items | |||
it.items = nil | |||
return b | |||
} | |||
// TransferRunIterator manages a stream of *datatransferpb.TransferRun. | |||
type TransferRunIterator struct { | |||
items []*datatransferpb.TransferRun | |||
pageInfo *iterator.PageInfo | |||
nextFunc func() error | |||
// InternalFetch is for use by the Google Cloud Libraries only. | |||
// It is not part of the stable interface of this package. | |||
// | |||
// InternalFetch returns results from a single call to the underlying RPC. | |||
// The number of results is no greater than pageSize. | |||
// If there are no more results, nextPageToken is empty and err is nil. | |||
InternalFetch func(pageSize int, pageToken string) (results []*datatransferpb.TransferRun, nextPageToken string, err error) | |||
} | |||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. | |||
func (it *TransferRunIterator) PageInfo() *iterator.PageInfo { | |||
return it.pageInfo | |||
} | |||
// Next returns the next result. Its second return value is iterator.Done if there are no more | |||
// results. Once Next returns Done, all subsequent calls will return Done. | |||
func (it *TransferRunIterator) Next() (*datatransferpb.TransferRun, error) { | |||
var item *datatransferpb.TransferRun | |||
if err := it.nextFunc(); err != nil { | |||
return item, err | |||
} | |||
item = it.items[0] | |||
it.items = it.items[1:] | |||
return item, nil | |||
} | |||
func (it *TransferRunIterator) bufLen() int { | |||
return len(it.items) | |||
} | |||
func (it *TransferRunIterator) takeBuf() interface{} { | |||
b := it.items | |||
it.items = nil | |||
return b | |||
} |
@@ -1,289 +0,0 @@ | |||
// Copyright 2019 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// https://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
// Code generated by gapic-generator. DO NOT EDIT. | |||
package datatransfer_test | |||
import ( | |||
"context" | |||
datatransfer "cloud.google.com/go/bigquery/datatransfer/apiv1" | |||
"google.golang.org/api/iterator" | |||
datatransferpb "google.golang.org/genproto/googleapis/cloud/bigquery/datatransfer/v1" | |||
) | |||
func ExampleNewClient() { | |||
ctx := context.Background() | |||
c, err := datatransfer.NewClient(ctx) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
// TODO: Use client. | |||
_ = c | |||
} | |||
func ExampleClient_GetDataSource() { | |||
ctx := context.Background() | |||
c, err := datatransfer.NewClient(ctx) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
req := &datatransferpb.GetDataSourceRequest{ | |||
// TODO: Fill request struct fields. | |||
} | |||
resp, err := c.GetDataSource(ctx, req) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
// TODO: Use resp. | |||
_ = resp | |||
} | |||
func ExampleClient_ListDataSources() { | |||
ctx := context.Background() | |||
c, err := datatransfer.NewClient(ctx) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
req := &datatransferpb.ListDataSourcesRequest{ | |||
// TODO: Fill request struct fields. | |||
} | |||
it := c.ListDataSources(ctx, req) | |||
for { | |||
resp, err := it.Next() | |||
if err == iterator.Done { | |||
break | |||
} | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
// TODO: Use resp. | |||
_ = resp | |||
} | |||
} | |||
func ExampleClient_CreateTransferConfig() { | |||
ctx := context.Background() | |||
c, err := datatransfer.NewClient(ctx) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
req := &datatransferpb.CreateTransferConfigRequest{ | |||
// TODO: Fill request struct fields. | |||
} | |||
resp, err := c.CreateTransferConfig(ctx, req) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
// TODO: Use resp. | |||
_ = resp | |||
} | |||
func ExampleClient_UpdateTransferConfig() { | |||
ctx := context.Background() | |||
c, err := datatransfer.NewClient(ctx) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
req := &datatransferpb.UpdateTransferConfigRequest{ | |||
// TODO: Fill request struct fields. | |||
} | |||
resp, err := c.UpdateTransferConfig(ctx, req) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
// TODO: Use resp. | |||
_ = resp | |||
} | |||
func ExampleClient_DeleteTransferConfig() { | |||
ctx := context.Background() | |||
c, err := datatransfer.NewClient(ctx) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
req := &datatransferpb.DeleteTransferConfigRequest{ | |||
// TODO: Fill request struct fields. | |||
} | |||
err = c.DeleteTransferConfig(ctx, req) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
} | |||
func ExampleClient_GetTransferConfig() { | |||
ctx := context.Background() | |||
c, err := datatransfer.NewClient(ctx) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
req := &datatransferpb.GetTransferConfigRequest{ | |||
// TODO: Fill request struct fields. | |||
} | |||
resp, err := c.GetTransferConfig(ctx, req) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
// TODO: Use resp. | |||
_ = resp | |||
} | |||
func ExampleClient_ListTransferConfigs() { | |||
ctx := context.Background() | |||
c, err := datatransfer.NewClient(ctx) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
req := &datatransferpb.ListTransferConfigsRequest{ | |||
// TODO: Fill request struct fields. | |||
} | |||
it := c.ListTransferConfigs(ctx, req) | |||
for { | |||
resp, err := it.Next() | |||
if err == iterator.Done { | |||
break | |||
} | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
// TODO: Use resp. | |||
_ = resp | |||
} | |||
} | |||
func ExampleClient_ScheduleTransferRuns() { | |||
ctx := context.Background() | |||
c, err := datatransfer.NewClient(ctx) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
req := &datatransferpb.ScheduleTransferRunsRequest{ | |||
// TODO: Fill request struct fields. | |||
} | |||
resp, err := c.ScheduleTransferRuns(ctx, req) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
// TODO: Use resp. | |||
_ = resp | |||
} | |||
func ExampleClient_GetTransferRun() { | |||
ctx := context.Background() | |||
c, err := datatransfer.NewClient(ctx) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
req := &datatransferpb.GetTransferRunRequest{ | |||
// TODO: Fill request struct fields. | |||
} | |||
resp, err := c.GetTransferRun(ctx, req) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
// TODO: Use resp. | |||
_ = resp | |||
} | |||
func ExampleClient_DeleteTransferRun() { | |||
ctx := context.Background() | |||
c, err := datatransfer.NewClient(ctx) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
req := &datatransferpb.DeleteTransferRunRequest{ | |||
// TODO: Fill request struct fields. | |||
} | |||
err = c.DeleteTransferRun(ctx, req) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
} | |||
func ExampleClient_ListTransferRuns() { | |||
ctx := context.Background() | |||
c, err := datatransfer.NewClient(ctx) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
req := &datatransferpb.ListTransferRunsRequest{ | |||
// TODO: Fill request struct fields. | |||
} | |||
it := c.ListTransferRuns(ctx, req) | |||
for { | |||
resp, err := it.Next() | |||
if err == iterator.Done { | |||
break | |||
} | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
// TODO: Use resp. | |||
_ = resp | |||
} | |||
} | |||
func ExampleClient_ListTransferLogs() { | |||
ctx := context.Background() | |||
c, err := datatransfer.NewClient(ctx) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
req := &datatransferpb.ListTransferLogsRequest{ | |||
// TODO: Fill request struct fields. | |||
} | |||
it := c.ListTransferLogs(ctx, req) | |||
for { | |||
resp, err := it.Next() | |||
if err == iterator.Done { | |||
break | |||
} | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
// TODO: Use resp. | |||
_ = resp | |||
} | |||
} | |||
func ExampleClient_CheckValidCreds() { | |||
ctx := context.Background() | |||
c, err := datatransfer.NewClient(ctx) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
req := &datatransferpb.CheckValidCredsRequest{ | |||
// TODO: Fill request struct fields. | |||
} | |||
resp, err := c.CheckValidCreds(ctx, req) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
// TODO: Use resp. | |||
_ = resp | |||
} |
@@ -1,90 +0,0 @@ | |||
// Copyright 2019 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// https://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
// Code generated by gapic-generator. DO NOT EDIT. | |||
// Package datatransfer is an auto-generated package for the | |||
// BigQuery Data Transfer API. | |||
// | |||
// NOTE: This package is in alpha. It is not stable, and is likely to change. | |||
// | |||
// Transfers data from partner SaaS applications to Google BigQuery on a | |||
// scheduled, managed basis. | |||
package datatransfer // import "cloud.google.com/go/bigquery/datatransfer/apiv1" | |||
import ( | |||
"context" | |||
"runtime" | |||
"strings" | |||
"unicode" | |||
"google.golang.org/grpc/metadata" | |||
) | |||
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { | |||
out, _ := metadata.FromOutgoingContext(ctx) | |||
out = out.Copy() | |||
for _, md := range mds { | |||
for k, v := range md { | |||
out[k] = append(out[k], v...) | |||
} | |||
} | |||
return metadata.NewOutgoingContext(ctx, out) | |||
} | |||
// DefaultAuthScopes reports the default set of authentication scopes to use with this package. | |||
func DefaultAuthScopes() []string { | |||
return []string{ | |||
"https://www.googleapis.com/auth/cloud-platform", | |||
} | |||
} | |||
// versionGo returns the Go runtime version. The returned string | |||
// has no whitespace, suitable for reporting in header. | |||
func versionGo() string { | |||
const develPrefix = "devel +" | |||
s := runtime.Version() | |||
if strings.HasPrefix(s, develPrefix) { | |||
s = s[len(develPrefix):] | |||
if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { | |||
s = s[:p] | |||
} | |||
return s | |||
} | |||
notSemverRune := func(r rune) bool { | |||
return strings.IndexRune("0123456789.", r) < 0 | |||
} | |||
if strings.HasPrefix(s, "go1") { | |||
s = s[2:] | |||
var prerelease string | |||
if p := strings.IndexFunc(s, notSemverRune); p >= 0 { | |||
s, prerelease = s[:p], s[p:] | |||
} | |||
if strings.HasSuffix(s, ".") { | |||
s += "0" | |||
} else if strings.Count(s, ".") < 2 { | |||
s += ".0" | |||
} | |||
if prerelease != "" { | |||
s += "-" + prerelease | |||
} | |||
return s | |||
} | |||
return "UNKNOWN" | |||
} | |||
const versionClient = "20190306" |
@@ -1,135 +0,0 @@ | |||
// Copyright 2018 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// https://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package datatransfer | |||
// ProjectPath returns the path for the project resource. | |||
// | |||
// Deprecated: Use | |||
// fmt.Sprintf("projects/%s", project) | |||
// instead. | |||
func ProjectPath(project string) string { | |||
return "" + | |||
"projects/" + | |||
project + | |||
"" | |||
} | |||
// LocationPath returns the path for the location resource. | |||
// | |||
// Deprecated: Use | |||
// fmt.Sprintf("projects/%s/locations/%s", project, location) | |||
// instead. | |||
func LocationPath(project, location string) string { | |||
return "" + | |||
"projects/" + | |||
project + | |||
"/locations/" + | |||
location + | |||
"" | |||
} | |||
// LocationDataSourcePath returns the path for the location data source resource. | |||
// | |||
// Deprecated: Use | |||
// fmt.Sprintf("projects/%s/locations/%s/dataSources/%s", project, location, dataSource) | |||
// instead. | |||
func LocationDataSourcePath(project, location, dataSource string) string { | |||
return "" + | |||
"projects/" + | |||
project + | |||
"/locations/" + | |||
location + | |||
"/dataSources/" + | |||
dataSource + | |||
"" | |||
} | |||
// LocationTransferConfigPath returns the path for the location transfer config resource. | |||
// | |||
// Deprecated: Use | |||
// fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s", project, location, transferConfig) | |||
// instead. | |||
func LocationTransferConfigPath(project, location, transferConfig string) string { | |||
return "" + | |||
"projects/" + | |||
project + | |||
"/locations/" + | |||
location + | |||
"/transferConfigs/" + | |||
transferConfig + | |||
"" | |||
} | |||
// LocationRunPath returns the path for the location run resource. | |||
// | |||
// Deprecated: Use | |||
// fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s/runs/%s", project, location, transferConfig, run) | |||
// instead. | |||
func LocationRunPath(project, location, transferConfig, run string) string { | |||
return "" + | |||
"projects/" + | |||
project + | |||
"/locations/" + | |||
location + | |||
"/transferConfigs/" + | |||
transferConfig + | |||
"/runs/" + | |||
run + | |||
"" | |||
} | |||
// DataSourcePath returns the path for the data source resource. | |||
// | |||
// Deprecated: Use | |||
// fmt.Sprintf("projects/%s/dataSources/%s", project, dataSource) | |||
// instead. | |||
func DataSourcePath(project, dataSource string) string { | |||
return "" + | |||
"projects/" + | |||
project + | |||
"/dataSources/" + | |||
dataSource + | |||
"" | |||
} | |||
// TransferConfigPath returns the path for the transfer config resource. | |||
// | |||
// Deprecated: Use | |||
// fmt.Sprintf("projects/%s/transferConfigs/%s", project, transferConfig) | |||
// instead. | |||
func TransferConfigPath(project, transferConfig string) string { | |||
return "" + | |||
"projects/" + | |||
project + | |||
"/transferConfigs/" + | |||
transferConfig + | |||
"" | |||
} | |||
// RunPath returns the path for the run resource. | |||
// | |||
// Deprecated: Use | |||
// fmt.Sprintf("projects/%s/transferConfigs/%s/runs/%s", project, transferConfig, run) | |||
// instead. | |||
func RunPath(project, transferConfig, run string) string { | |||
return "" + | |||
"projects/" + | |||
project + | |||
"/transferConfigs/" + | |||
transferConfig + | |||
"/runs/" + | |||
run + | |||
"" | |||
} |
@@ -1,310 +0,0 @@ | |||
// Copyright 2015 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
/* | |||
Package bigquery provides a client for the BigQuery service. | |||
Note: This package is in beta. Some backwards-incompatible changes may occur. | |||
The following assumes a basic familiarity with BigQuery concepts. | |||
See https://cloud.google.com/bigquery/docs. | |||
See https://godoc.org/cloud.google.com/go for authentication, timeouts, | |||
connection pooling and similar aspects of this package. | |||
Creating a Client | |||
To start working with this package, create a client: | |||
ctx := context.Background() | |||
client, err := bigquery.NewClient(ctx, projectID) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
Querying | |||
To query existing tables, create a Query and call its Read method: | |||
q := client.Query(` | |||
SELECT year, SUM(number) as num | |||
FROM ` + "`bigquery-public-data.usa_names.usa_1910_2013`" + ` | |||
WHERE name = "William" | |||
GROUP BY year | |||
ORDER BY year | |||
`) | |||
it, err := q.Read(ctx) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
Then iterate through the resulting rows. You can store a row using | |||
anything that implements the ValueLoader interface, or with a slice or map of bigquery.Value. | |||
A slice is simplest: | |||
for { | |||
var values []bigquery.Value | |||
err := it.Next(&values) | |||
if err == iterator.Done { | |||
break | |||
} | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
fmt.Println(values) | |||
} | |||
You can also use a struct whose exported fields match the query: | |||
type Count struct { | |||
Year int | |||
Num int | |||
} | |||
for { | |||
var c Count | |||
err := it.Next(&c) | |||
if err == iterator.Done { | |||
break | |||
} | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
fmt.Println(c) | |||
} | |||
You can also start the query running and get the results later. | |||
Create the query as above, but call Run instead of Read. This returns a Job, | |||
which represents an asynchronous operation. | |||
job, err := q.Run(ctx) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
Get the job's ID, a printable string. You can save this string to retrieve | |||
the results at a later time, even in another process. | |||
jobID := job.ID() | |||
fmt.Printf("The job ID is %s\n", jobID) | |||
To retrieve the job's results from the ID, first look up the Job: | |||
job, err = client.JobFromID(ctx, jobID) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
Use the Job.Read method to obtain an iterator, and loop over the rows. | |||
Query.Read is just a convenience method that combines Query.Run and Job.Read. | |||
it, err = job.Read(ctx) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
// Proceed with iteration as above. | |||
Datasets and Tables | |||
You can refer to datasets in the client's project with the Dataset method, and | |||
in other projects with the DatasetInProject method: | |||
myDataset := client.Dataset("my_dataset") | |||
yourDataset := client.DatasetInProject("your-project-id", "your_dataset") | |||
These methods create references to datasets, not the datasets themselves. You can have | |||
a dataset reference even if the dataset doesn't exist yet. Use Dataset.Create to | |||
create a dataset from a reference: | |||
if err := myDataset.Create(ctx, nil); err != nil { | |||
// TODO: Handle error. | |||
} | |||
You can refer to tables with Dataset.Table. Like bigquery.Dataset, bigquery.Table is a reference | |||
to an object in BigQuery that may or may not exist. | |||
table := myDataset.Table("my_table") | |||
You can create, delete and update the metadata of tables with methods on Table. | |||
For instance, you could create a temporary table with: | |||
err = myDataset.Table("temp").Create(ctx, &bigquery.TableMetadata{ | |||
ExpirationTime: time.Now().Add(1*time.Hour)}) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
We'll see how to create a table with a schema in the next section. | |||
Schemas | |||
There are two ways to construct schemas with this package. | |||
You can build a schema by hand, like so: | |||
schema1 := bigquery.Schema{ | |||
{Name: "Name", Required: true, Type: bigquery.StringFieldType}, | |||
{Name: "Grades", Repeated: true, Type: bigquery.IntegerFieldType}, | |||
{Name: "Optional", Required: false, Type: bigquery.IntegerFieldType}, | |||
} | |||
Or you can infer the schema from a struct: | |||
type student struct { | |||
Name string | |||
Grades []int | |||
Optional bigquery.NullInt64 | |||
} | |||
schema2, err := bigquery.InferSchema(student{}) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
// schema1 and schema2 are identical. | |||
Struct inference supports tags like those of the encoding/json package, so you can | |||
change names, ignore fields, or mark a field as nullable (non-required). Fields | |||
declared as one of the Null types (NullInt64, NullFloat64, NullString, NullBool, | |||
NullTimestamp, NullDate, NullTime, NullDateTime, and NullGeography) are | |||
automatically inferred as nullable, so the "nullable" tag is only needed for []byte, | |||
*big.Rat and pointer-to-struct fields. | |||
type student2 struct { | |||
Name string `bigquery:"full_name"` | |||
Grades []int | |||
Secret string `bigquery:"-"` | |||
Optional []byte `bigquery:",nullable" | |||
} | |||
schema3, err := bigquery.InferSchema(student2{}) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
// schema3 has required fields "full_name" and "Grade", and nullable BYTES field "Optional". | |||
Having constructed a schema, you can create a table with it like so: | |||
if err := table.Create(ctx, &bigquery.TableMetadata{Schema: schema1}); err != nil { | |||
// TODO: Handle error. | |||
} | |||
Copying | |||
You can copy one or more tables to another table. Begin by constructing a Copier | |||
describing the copy. Then set any desired copy options, and finally call Run to get a Job: | |||
copier := myDataset.Table("dest").CopierFrom(myDataset.Table("src")) | |||
copier.WriteDisposition = bigquery.WriteTruncate | |||
job, err = copier.Run(ctx) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
You can chain the call to Run if you don't want to set options: | |||
job, err = myDataset.Table("dest").CopierFrom(myDataset.Table("src")).Run(ctx) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
You can wait for your job to complete: | |||
status, err := job.Wait(ctx) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
Job.Wait polls with exponential backoff. You can also poll yourself, if you | |||
wish: | |||
for { | |||
status, err := job.Status(ctx) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
if status.Done() { | |||
if status.Err() != nil { | |||
log.Fatalf("Job failed with error %v", status.Err()) | |||
} | |||
break | |||
} | |||
time.Sleep(pollInterval) | |||
} | |||
Loading and Uploading | |||
There are two ways to populate a table with this package: load the data from a Google Cloud Storage | |||
object, or upload rows directly from your program. | |||
For loading, first create a GCSReference, configuring it if desired. Then make a Loader, optionally configure | |||
it as well, and call its Run method. | |||
gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object") | |||
gcsRef.AllowJaggedRows = true | |||
loader := myDataset.Table("dest").LoaderFrom(gcsRef) | |||
loader.CreateDisposition = bigquery.CreateNever | |||
job, err = loader.Run(ctx) | |||
// Poll the job for completion if desired, as above. | |||
To upload, first define a type that implements the ValueSaver interface, which has a single method named Save. | |||
Then create an Uploader, and call its Put method with a slice of values. | |||
u := table.Uploader() | |||
// Item implements the ValueSaver interface. | |||
items := []*Item{ | |||
{Name: "n1", Size: 32.6, Count: 7}, | |||
{Name: "n2", Size: 4, Count: 2}, | |||
{Name: "n3", Size: 101.5, Count: 1}, | |||
} | |||
if err := u.Put(ctx, items); err != nil { | |||
// TODO: Handle error. | |||
} | |||
You can also upload a struct that doesn't implement ValueSaver. Use the StructSaver type | |||
to specify the schema and insert ID by hand, or just supply the struct or struct pointer | |||
directly and the schema will be inferred: | |||
type Item2 struct { | |||
Name string | |||
Size float64 | |||
Count int | |||
} | |||
// Item implements the ValueSaver interface. | |||
items2 := []*Item2{ | |||
{Name: "n1", Size: 32.6, Count: 7}, | |||
{Name: "n2", Size: 4, Count: 2}, | |||
{Name: "n3", Size: 101.5, Count: 1}, | |||
} | |||
if err := u.Put(ctx, items2); err != nil { | |||
// TODO: Handle error. | |||
} | |||
Extracting | |||
If you've been following so far, extracting data from a BigQuery table | |||
into a Google Cloud Storage object will feel familiar. First create an | |||
Extractor, then optionally configure it, and lastly call its Run method. | |||
extractor := table.ExtractorTo(gcsRef) | |||
extractor.DisableHeader = true | |||
job, err = extractor.Run(ctx) | |||
// Poll the job for completion if desired, as above. | |||
Errors | |||
Errors returned by this client are often of the type [`googleapi.Error`](https://godoc.org/google.golang.org/api/googleapi#Error). | |||
These errors can be introspected for more information by type asserting to the richer `googleapi.Error` type. For example: | |||
if e, ok := err.(*googleapi.Error); ok { | |||
if e.Code = 409 { ... } | |||
} | |||
*/ | |||
package bigquery // import "cloud.google.com/go/bigquery" |
@@ -1,83 +0,0 @@ | |||
// Copyright 2015 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package bigquery | |||
import ( | |||
"fmt" | |||
bq "google.golang.org/api/bigquery/v2" | |||
) | |||
// An Error contains detailed information about a failed bigquery operation. | |||
// Detailed description of possible Reasons can be found here: https://cloud.google.com/bigquery/troubleshooting-errors. | |||
type Error struct { | |||
// Mirrors bq.ErrorProto, but drops DebugInfo | |||
Location, Message, Reason string | |||
} | |||
func (e Error) Error() string { | |||
return fmt.Sprintf("{Location: %q; Message: %q; Reason: %q}", e.Location, e.Message, e.Reason) | |||
} | |||
func bqToError(ep *bq.ErrorProto) *Error { | |||
if ep == nil { | |||
return nil | |||
} | |||
return &Error{ | |||
Location: ep.Location, | |||
Message: ep.Message, | |||
Reason: ep.Reason, | |||
} | |||
} | |||
// A MultiError contains multiple related errors. | |||
type MultiError []error | |||
func (m MultiError) Error() string { | |||
switch len(m) { | |||
case 0: | |||
return "(0 errors)" | |||
case 1: | |||
return m[0].Error() | |||
case 2: | |||
return m[0].Error() + " (and 1 other error)" | |||
} | |||
return fmt.Sprintf("%s (and %d other errors)", m[0].Error(), len(m)-1) | |||
} | |||
// RowInsertionError contains all errors that occurred when attempting to insert a row. | |||
type RowInsertionError struct { | |||
InsertID string // The InsertID associated with the affected row. | |||
RowIndex int // The 0-based index of the affected row in the batch of rows being inserted. | |||
Errors MultiError | |||
} | |||
func (e *RowInsertionError) Error() string { | |||
errFmt := "insertion of row [insertID: %q; insertIndex: %v] failed with error: %s" | |||
return fmt.Sprintf(errFmt, e.InsertID, e.RowIndex, e.Errors.Error()) | |||
} | |||
// PutMultiError contains an error for each row which was not successfully inserted | |||
// into a BigQuery table. | |||
type PutMultiError []RowInsertionError | |||
func (pme PutMultiError) Error() string { | |||
plural := "s" | |||
if len(pme) == 1 { | |||
plural = "" | |||
} | |||
return fmt.Sprintf("%v row insertion%s failed", len(pme), plural) | |||
} |
@@ -1,109 +0,0 @@ | |||
// Copyright 2015 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package bigquery | |||
import ( | |||
"errors" | |||
"strings" | |||
"testing" | |||
"cloud.google.com/go/internal/testutil" | |||
bq "google.golang.org/api/bigquery/v2" | |||
) | |||
func rowInsertionError(msg string) RowInsertionError { | |||
return RowInsertionError{Errors: []error{errors.New(msg)}} | |||
} | |||
func TestPutMultiErrorString(t *testing.T) { | |||
testCases := []struct { | |||
errs PutMultiError | |||
want string | |||
}{ | |||
{ | |||
errs: PutMultiError{}, | |||
want: "0 row insertions failed", | |||
}, | |||
{ | |||
errs: PutMultiError{rowInsertionError("a")}, | |||
want: "1 row insertion failed", | |||
}, | |||
{ | |||
errs: PutMultiError{rowInsertionError("a"), rowInsertionError("b")}, | |||
want: "2 row insertions failed", | |||
}, | |||
} | |||
for _, tc := range testCases { | |||
if tc.errs.Error() != tc.want { | |||
t.Errorf("PutMultiError string: got:\n%v\nwant:\n%v", tc.errs.Error(), tc.want) | |||
} | |||
} | |||
} | |||
func TestMultiErrorString(t *testing.T) { | |||
testCases := []struct { | |||
errs MultiError | |||
want string | |||
}{ | |||
{ | |||
errs: MultiError{}, | |||
want: "(0 errors)", | |||
}, | |||
{ | |||
errs: MultiError{errors.New("a")}, | |||
want: "a", | |||
}, | |||
{ | |||
errs: MultiError{errors.New("a"), errors.New("b")}, | |||
want: "a (and 1 other error)", | |||
}, | |||
{ | |||
errs: MultiError{errors.New("a"), errors.New("b"), errors.New("c")}, | |||
want: "a (and 2 other errors)", | |||
}, | |||
} | |||
for _, tc := range testCases { | |||
if tc.errs.Error() != tc.want { | |||
t.Errorf("PutMultiError string: got:\n%v\nwant:\n%v", tc.errs.Error(), tc.want) | |||
} | |||
} | |||
} | |||
func TestErrorFromErrorProto(t *testing.T) { | |||
for _, test := range []struct { | |||
in *bq.ErrorProto | |||
want *Error | |||
}{ | |||
{nil, nil}, | |||
{ | |||
in: &bq.ErrorProto{Location: "L", Message: "M", Reason: "R"}, | |||
want: &Error{Location: "L", Message: "M", Reason: "R"}, | |||
}, | |||
} { | |||
if got := bqToError(test.in); !testutil.Equal(got, test.want) { | |||
t.Errorf("%v: got %v, want %v", test.in, got, test.want) | |||
} | |||
} | |||
} | |||
func TestErrorString(t *testing.T) { | |||
e := &Error{Location: "<L>", Message: "<M>", Reason: "<R>"} | |||
got := e.Error() | |||
if !strings.Contains(got, "<L>") || !strings.Contains(got, "<M>") || !strings.Contains(got, "<R>") { | |||
t.Errorf(`got %q, expected to see "<L>", "<M>" and "<R>"`, got) | |||
} | |||
} |
@@ -1,829 +0,0 @@ | |||
// Copyright 2016 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package bigquery_test | |||
import ( | |||
"context" | |||
"fmt" | |||
"os" | |||
"time" | |||
"cloud.google.com/go/bigquery" | |||
"google.golang.org/api/iterator" | |||
) | |||
func ExampleNewClient() { | |||
ctx := context.Background() | |||
client, err := bigquery.NewClient(ctx, "project-id") | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
_ = client // TODO: Use client. | |||
} | |||
func ExampleClient_Dataset() { | |||
ctx := context.Background() | |||
client, err := bigquery.NewClient(ctx, "project-id") | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
ds := client.Dataset("my_dataset") | |||
fmt.Println(ds) | |||
} | |||
func ExampleClient_DatasetInProject() { | |||
ctx := context.Background() | |||
client, err := bigquery.NewClient(ctx, "project-id") | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
ds := client.DatasetInProject("their-project-id", "their-dataset") | |||
fmt.Println(ds) | |||
} | |||
func ExampleClient_Datasets() { | |||
ctx := context.Background() | |||
client, err := bigquery.NewClient(ctx, "project-id") | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
it := client.Datasets(ctx) | |||
_ = it // TODO: iterate using Next or iterator.Pager. | |||
} | |||
func ExampleClient_DatasetsInProject() { | |||
ctx := context.Background() | |||
client, err := bigquery.NewClient(ctx, "project-id") | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
it := client.DatasetsInProject(ctx, "their-project-id") | |||
_ = it // TODO: iterate using Next or iterator.Pager. | |||
} | |||
func getJobID() string { return "" } | |||
func ExampleClient_JobFromID() { | |||
ctx := context.Background() | |||
client, err := bigquery.NewClient(ctx, "project-id") | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
jobID := getJobID() // Get a job ID using Job.ID, the console or elsewhere. | |||
job, err := client.JobFromID(ctx, jobID) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
fmt.Println(job.LastStatus()) // Display the job's status. | |||
} | |||
func ExampleClient_Jobs() { | |||
ctx := context.Background() | |||
client, err := bigquery.NewClient(ctx, "project-id") | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
it := client.Jobs(ctx) | |||
it.State = bigquery.Running // list only running jobs. | |||
_ = it // TODO: iterate using Next or iterator.Pager. | |||
} | |||
func ExampleNewGCSReference() { | |||
gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object") | |||
fmt.Println(gcsRef) | |||
} | |||
func ExampleClient_Query() { | |||
ctx := context.Background() | |||
client, err := bigquery.NewClient(ctx, "project-id") | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
q := client.Query("select name, num from t1") | |||
q.DefaultProjectID = "project-id" | |||
// TODO: set other options on the Query. | |||
// TODO: Call Query.Run or Query.Read. | |||
} | |||
func ExampleClient_Query_parameters() { | |||
ctx := context.Background() | |||
client, err := bigquery.NewClient(ctx, "project-id") | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
q := client.Query("select num from t1 where name = @user") | |||
q.Parameters = []bigquery.QueryParameter{ | |||
{Name: "user", Value: "Elizabeth"}, | |||
} | |||
// TODO: set other options on the Query. | |||
// TODO: Call Query.Run or Query.Read. | |||
} | |||
// This example demonstrates how to run a query job on a table | |||
// with a customer-managed encryption key. The same | |||
// applies to load and copy jobs as well. | |||
func ExampleClient_Query_encryptionKey() { | |||
ctx := context.Background() | |||
client, err := bigquery.NewClient(ctx, "project-id") | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
q := client.Query("select name, num from t1") | |||
// TODO: Replace this key with a key you have created in Cloud KMS. | |||
keyName := "projects/P/locations/L/keyRings/R/cryptoKeys/K" | |||
q.DestinationEncryptionConfig = &bigquery.EncryptionConfig{KMSKeyName: keyName} | |||
// TODO: set other options on the Query. | |||
// TODO: Call Query.Run or Query.Read. | |||
} | |||
func ExampleQuery_Read() { | |||
ctx := context.Background() | |||
client, err := bigquery.NewClient(ctx, "project-id") | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
q := client.Query("select name, num from t1") | |||
it, err := q.Read(ctx) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
_ = it // TODO: iterate using Next or iterator.Pager. | |||
} | |||
func ExampleRowIterator_Next() { | |||
ctx := context.Background() | |||
client, err := bigquery.NewClient(ctx, "project-id") | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
q := client.Query("select name, num from t1") | |||
it, err := q.Read(ctx) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
for { | |||
var row []bigquery.Value | |||
err := it.Next(&row) | |||
if err == iterator.Done { | |||
break | |||
} | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
fmt.Println(row) | |||
} | |||
} | |||
func ExampleRowIterator_Next_struct() { | |||
ctx := context.Background() | |||
client, err := bigquery.NewClient(ctx, "project-id") | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
type score struct { | |||
Name string | |||
Num int | |||
} | |||
q := client.Query("select name, num from t1") | |||
it, err := q.Read(ctx) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
for { | |||
var s score | |||
err := it.Next(&s) | |||
if err == iterator.Done { | |||
break | |||
} | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
fmt.Println(s) | |||
} | |||
} | |||
func ExampleJob_Read() { | |||
ctx := context.Background() | |||
client, err := bigquery.NewClient(ctx, "project-id") | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
q := client.Query("select name, num from t1") | |||
// Call Query.Run to get a Job, then call Read on the job. | |||
// Note: Query.Read is a shorthand for this. | |||
job, err := q.Run(ctx) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
it, err := job.Read(ctx) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
_ = it // TODO: iterate using Next or iterator.Pager. | |||
} | |||
func ExampleJob_Wait() { | |||
ctx := context.Background() | |||
client, err := bigquery.NewClient(ctx, "project-id") | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
ds := client.Dataset("my_dataset") | |||
job, err := ds.Table("t1").CopierFrom(ds.Table("t2")).Run(ctx) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
status, err := job.Wait(ctx) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
if status.Err() != nil { | |||
// TODO: Handle error. | |||
} | |||
} | |||
func ExampleJob_Config() { | |||
ctx := context.Background() | |||
client, err := bigquery.NewClient(ctx, "project-id") | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
ds := client.Dataset("my_dataset") | |||
job, err := ds.Table("t1").CopierFrom(ds.Table("t2")).Run(ctx) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
jc, err := job.Config() | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
copyConfig := jc.(*bigquery.CopyConfig) | |||
fmt.Println(copyConfig.Dst, copyConfig.CreateDisposition) | |||
} | |||
func ExampleDataset_Create() { | |||
ctx := context.Background() | |||
client, err := bigquery.NewClient(ctx, "project-id") | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
ds := client.Dataset("my_dataset") | |||
if err := ds.Create(ctx, &bigquery.DatasetMetadata{Location: "EU"}); err != nil { | |||
// TODO: Handle error. | |||
} | |||
} | |||
func ExampleDataset_Delete() { | |||
ctx := context.Background() | |||
client, err := bigquery.NewClient(ctx, "project-id") | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
if err := client.Dataset("my_dataset").Delete(ctx); err != nil { | |||
// TODO: Handle error. | |||
} | |||
} | |||
func ExampleDataset_Metadata() { | |||
ctx := context.Background() | |||
client, err := bigquery.NewClient(ctx, "project-id") | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
md, err := client.Dataset("my_dataset").Metadata(ctx) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
fmt.Println(md) | |||
} | |||
// This example illustrates how to perform a read-modify-write sequence on dataset | |||
// metadata. Passing the metadata's ETag to the Update call ensures that the call | |||
// will fail if the metadata was changed since the read. | |||
func ExampleDataset_Update_readModifyWrite() { | |||
ctx := context.Background() | |||
client, err := bigquery.NewClient(ctx, "project-id") | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
ds := client.Dataset("my_dataset") | |||
md, err := ds.Metadata(ctx) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
md2, err := ds.Update(ctx, | |||
bigquery.DatasetMetadataToUpdate{Name: "new " + md.Name}, | |||
md.ETag) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
fmt.Println(md2) | |||
} | |||
// To perform a blind write, ignoring the existing state (and possibly overwriting | |||
// other updates), pass the empty string as the etag. | |||
func ExampleDataset_Update_blindWrite() { | |||
ctx := context.Background() | |||
client, err := bigquery.NewClient(ctx, "project-id") | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
md, err := client.Dataset("my_dataset").Update(ctx, bigquery.DatasetMetadataToUpdate{Name: "blind"}, "") | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
fmt.Println(md) | |||
} | |||
func ExampleDataset_Table() { | |||
ctx := context.Background() | |||
client, err := bigquery.NewClient(ctx, "project-id") | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
// Table creates a reference to the table. It does not create the actual | |||
// table in BigQuery; to do so, use Table.Create. | |||
t := client.Dataset("my_dataset").Table("my_table") | |||
fmt.Println(t) | |||
} | |||
func ExampleDataset_Tables() { | |||
ctx := context.Background() | |||
client, err := bigquery.NewClient(ctx, "project-id") | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
it := client.Dataset("my_dataset").Tables(ctx) | |||
_ = it // TODO: iterate using Next or iterator.Pager. | |||
} | |||
func ExampleDatasetIterator_Next() { | |||
ctx := context.Background() | |||
client, err := bigquery.NewClient(ctx, "project-id") | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
it := client.Datasets(ctx) | |||
for { | |||
ds, err := it.Next() | |||
if err == iterator.Done { | |||
break | |||
} | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
fmt.Println(ds) | |||
} | |||
} | |||
func ExampleInferSchema() { | |||
type Item struct { | |||
Name string | |||
Size float64 | |||
Count int | |||
} | |||
schema, err := bigquery.InferSchema(Item{}) | |||
if err != nil { | |||
fmt.Println(err) | |||
// TODO: Handle error. | |||
} | |||
for _, fs := range schema { | |||
fmt.Println(fs.Name, fs.Type) | |||
} | |||
// Output: | |||
// Name STRING | |||
// Size FLOAT | |||
// Count INTEGER | |||
} | |||
func ExampleInferSchema_tags() { | |||
type Item struct { | |||
Name string | |||
Size float64 | |||
Count int `bigquery:"number"` | |||
Secret []byte `bigquery:"-"` | |||
Optional bigquery.NullBool | |||
OptBytes []byte `bigquery:",nullable"` | |||
} | |||
schema, err := bigquery.InferSchema(Item{}) | |||
if err != nil { | |||
fmt.Println(err) | |||
// TODO: Handle error. | |||
} | |||
for _, fs := range schema { | |||
fmt.Println(fs.Name, fs.Type, fs.Required) | |||
} | |||
// Output: | |||
// Name STRING true | |||
// Size FLOAT true | |||
// number INTEGER true | |||
// Optional BOOLEAN false | |||
// OptBytes BYTES false | |||
} | |||
func ExampleTable_Create() { | |||
ctx := context.Background() | |||
client, err := bigquery.NewClient(ctx, "project-id") | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
t := client.Dataset("my_dataset").Table("new-table") | |||
if err := t.Create(ctx, nil); err != nil { | |||
// TODO: Handle error. | |||
} | |||
} | |||
// Initialize a new table by passing TableMetadata to Table.Create. | |||
func ExampleTable_Create_initialize() { | |||
ctx := context.Background() | |||
// Infer table schema from a Go type. | |||
schema, err := bigquery.InferSchema(Item{}) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
client, err := bigquery.NewClient(ctx, "project-id") | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
t := client.Dataset("my_dataset").Table("new-table") | |||
if err := t.Create(ctx, | |||
&bigquery.TableMetadata{ | |||
Name: "My New Table", | |||
Schema: schema, | |||
ExpirationTime: time.Now().Add(24 * time.Hour), | |||
}); err != nil { | |||
// TODO: Handle error. | |||
} | |||
} | |||
// This example demonstrates how to create a table with | |||
// a customer-managed encryption key. | |||
func ExampleTable_Create_encryptionKey() { | |||
ctx := context.Background() | |||
// Infer table schema from a Go type. | |||
schema, err := bigquery.InferSchema(Item{}) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
client, err := bigquery.NewClient(ctx, "project-id") | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
t := client.Dataset("my_dataset").Table("new-table") | |||
// TODO: Replace this key with a key you have created in Cloud KMS. | |||
keyName := "projects/P/locations/L/keyRings/R/cryptoKeys/K" | |||
if err := t.Create(ctx, | |||
&bigquery.TableMetadata{ | |||
Name: "My New Table", | |||
Schema: schema, | |||
EncryptionConfig: &bigquery.EncryptionConfig{KMSKeyName: keyName}, | |||
}); err != nil { | |||
// TODO: Handle error. | |||
} | |||
} | |||
func ExampleTable_Delete() { | |||
ctx := context.Background() | |||
client, err := bigquery.NewClient(ctx, "project-id") | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
if err := client.Dataset("my_dataset").Table("my_table").Delete(ctx); err != nil { | |||
// TODO: Handle error. | |||
} | |||
} | |||
func ExampleTable_Metadata() { | |||
ctx := context.Background() | |||
client, err := bigquery.NewClient(ctx, "project-id") | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
md, err := client.Dataset("my_dataset").Table("my_table").Metadata(ctx) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
fmt.Println(md) | |||
} | |||
func ExampleTable_Inserter() { | |||
ctx := context.Background() | |||
client, err := bigquery.NewClient(ctx, "project-id") | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
ins := client.Dataset("my_dataset").Table("my_table").Inserter() | |||
_ = ins // TODO: Use ins. | |||
} | |||
func ExampleTable_Inserter_options() { | |||
ctx := context.Background() | |||
client, err := bigquery.NewClient(ctx, "project-id") | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
ins := client.Dataset("my_dataset").Table("my_table").Inserter() | |||
ins.SkipInvalidRows = true | |||
ins.IgnoreUnknownValues = true | |||
_ = ins // TODO: Use ins. | |||
} | |||
func ExampleTable_CopierFrom() { | |||
ctx := context.Background() | |||
client, err := bigquery.NewClient(ctx, "project-id") | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
ds := client.Dataset("my_dataset") | |||
c := ds.Table("combined").CopierFrom(ds.Table("t1"), ds.Table("t2")) | |||
c.WriteDisposition = bigquery.WriteTruncate | |||
// TODO: set other options on the Copier. | |||
job, err := c.Run(ctx) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
status, err := job.Wait(ctx) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
if status.Err() != nil { | |||
// TODO: Handle error. | |||
} | |||
} | |||
func ExampleTable_ExtractorTo() { | |||
ctx := context.Background() | |||
client, err := bigquery.NewClient(ctx, "project-id") | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object") | |||
gcsRef.FieldDelimiter = ":" | |||
// TODO: set other options on the GCSReference. | |||
ds := client.Dataset("my_dataset") | |||
extractor := ds.Table("my_table").ExtractorTo(gcsRef) | |||
extractor.DisableHeader = true | |||
// TODO: set other options on the Extractor. | |||
job, err := extractor.Run(ctx) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
status, err := job.Wait(ctx) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
if status.Err() != nil { | |||
// TODO: Handle error. | |||
} | |||
} | |||
func ExampleTable_LoaderFrom() { | |||
ctx := context.Background() | |||
client, err := bigquery.NewClient(ctx, "project-id") | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object") | |||
gcsRef.AllowJaggedRows = true | |||
gcsRef.MaxBadRecords = 5 | |||
gcsRef.Schema = schema | |||
// TODO: set other options on the GCSReference. | |||
ds := client.Dataset("my_dataset") | |||
loader := ds.Table("my_table").LoaderFrom(gcsRef) | |||
loader.CreateDisposition = bigquery.CreateNever | |||
// TODO: set other options on the Loader. | |||
job, err := loader.Run(ctx) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
status, err := job.Wait(ctx) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
if status.Err() != nil { | |||
// TODO: Handle error. | |||
} | |||
} | |||
func ExampleTable_LoaderFrom_reader() { | |||
ctx := context.Background() | |||
client, err := bigquery.NewClient(ctx, "project-id") | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
f, err := os.Open("data.csv") | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
rs := bigquery.NewReaderSource(f) | |||
rs.AllowJaggedRows = true | |||
rs.MaxBadRecords = 5 | |||
rs.Schema = schema | |||
// TODO: set other options on the GCSReference. | |||
ds := client.Dataset("my_dataset") | |||
loader := ds.Table("my_table").LoaderFrom(rs) | |||
loader.CreateDisposition = bigquery.CreateNever | |||
// TODO: set other options on the Loader. | |||
job, err := loader.Run(ctx) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
status, err := job.Wait(ctx) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
if status.Err() != nil { | |||
// TODO: Handle error. | |||
} | |||
} | |||
func ExampleTable_Read() { | |||
ctx := context.Background() | |||
client, err := bigquery.NewClient(ctx, "project-id") | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
it := client.Dataset("my_dataset").Table("my_table").Read(ctx) | |||
_ = it // TODO: iterate using Next or iterator.Pager. | |||
} | |||
// This example illustrates how to perform a read-modify-write sequence on table | |||
// metadata. Passing the metadata's ETag to the Update call ensures that the call | |||
// will fail if the metadata was changed since the read. | |||
func ExampleTable_Update_readModifyWrite() { | |||
ctx := context.Background() | |||
client, err := bigquery.NewClient(ctx, "project-id") | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
t := client.Dataset("my_dataset").Table("my_table") | |||
md, err := t.Metadata(ctx) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
md2, err := t.Update(ctx, | |||
bigquery.TableMetadataToUpdate{Name: "new " + md.Name}, | |||
md.ETag) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
fmt.Println(md2) | |||
} | |||
// To perform a blind write, ignoring the existing state (and possibly overwriting | |||
// other updates), pass the empty string as the etag. | |||
func ExampleTable_Update_blindWrite() { | |||
ctx := context.Background() | |||
client, err := bigquery.NewClient(ctx, "project-id") | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
t := client.Dataset("my_dataset").Table("my_table") | |||
tm, err := t.Update(ctx, bigquery.TableMetadataToUpdate{ | |||
Description: "my favorite table", | |||
}, "") | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
fmt.Println(tm) | |||
} | |||
func ExampleTableIterator_Next() { | |||
ctx := context.Background() | |||
client, err := bigquery.NewClient(ctx, "project-id") | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
it := client.Dataset("my_dataset").Tables(ctx) | |||
for { | |||
t, err := it.Next() | |||
if err == iterator.Done { | |||
break | |||
} | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
fmt.Println(t) | |||
} | |||
} | |||
type Item struct { | |||
Name string | |||
Size float64 | |||
Count int | |||
} | |||
// Save implements the ValueSaver interface. | |||
func (i *Item) Save() (map[string]bigquery.Value, string, error) { | |||
return map[string]bigquery.Value{ | |||
"Name": i.Name, | |||
"Size": i.Size, | |||
"Count": i.Count, | |||
}, "", nil | |||
} | |||
func ExampleInserter_Put() { | |||
ctx := context.Background() | |||
client, err := bigquery.NewClient(ctx, "project-id") | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
ins := client.Dataset("my_dataset").Table("my_table").Inserter() | |||
// Item implements the ValueSaver interface. | |||
items := []*Item{ | |||
{Name: "n1", Size: 32.6, Count: 7}, | |||
{Name: "n2", Size: 4, Count: 2}, | |||
{Name: "n3", Size: 101.5, Count: 1}, | |||
} | |||
if err := ins.Put(ctx, items); err != nil { | |||
// TODO: Handle error. | |||
} | |||
} | |||
var schema bigquery.Schema | |||
func ExampleInserter_Put_structSaver() { | |||
ctx := context.Background() | |||
client, err := bigquery.NewClient(ctx, "project-id") | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
ins := client.Dataset("my_dataset").Table("my_table").Inserter() | |||
type score struct { | |||
Name string | |||
Num int | |||
} | |||
// Assume schema holds the table's schema. | |||
savers := []*bigquery.StructSaver{ | |||
{Struct: score{Name: "n1", Num: 12}, Schema: schema, InsertID: "id1"}, | |||
{Struct: score{Name: "n2", Num: 31}, Schema: schema, InsertID: "id2"}, | |||
{Struct: score{Name: "n3", Num: 7}, Schema: schema, InsertID: "id3"}, | |||
} | |||
if err := ins.Put(ctx, savers); err != nil { | |||
// TODO: Handle error. | |||
} | |||
} | |||
func ExampleInserter_Put_struct() { | |||
ctx := context.Background() | |||
client, err := bigquery.NewClient(ctx, "project-id") | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
ins := client.Dataset("my_dataset").Table("my_table").Inserter() | |||
type score struct { | |||
Name string | |||
Num int | |||
} | |||
scores := []score{ | |||
{Name: "n1", Num: 12}, | |||
{Name: "n2", Num: 31}, | |||
{Name: "n3", Num: 7}, | |||
} | |||
// Schema is inferred from the score type. | |||
if err := ins.Put(ctx, scores); err != nil { | |||
// TODO: Handle error. | |||
} | |||
} | |||
func ExampleInserter_Put_valuesSaver() { | |||
ctx := context.Background() | |||
client, err := bigquery.NewClient(ctx, "project-id") | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
ins := client.Dataset("my_dataset").Table("my_table").Inserter() | |||
var vss []*bigquery.ValuesSaver | |||
for i, name := range []string{"n1", "n2", "n3"} { | |||
// Assume schema holds the table's schema. | |||
vss = append(vss, &bigquery.ValuesSaver{ | |||
Schema: schema, | |||
InsertID: name, | |||
Row: []bigquery.Value{name, int64(i)}, | |||
}) | |||
} | |||
if err := ins.Put(ctx, vss); err != nil { | |||
// TODO: Handle error. | |||
} | |||
} |
@@ -1,400 +0,0 @@ | |||
// Copyright 2017 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package bigquery | |||
import ( | |||
"encoding/base64" | |||
"unicode/utf8" | |||
bq "google.golang.org/api/bigquery/v2" | |||
) | |||
// DataFormat describes the format of BigQuery table data. | |||
type DataFormat string | |||
// Constants describing the format of BigQuery table data. | |||
const ( | |||
CSV DataFormat = "CSV" | |||
Avro DataFormat = "AVRO" | |||
JSON DataFormat = "NEWLINE_DELIMITED_JSON" | |||
DatastoreBackup DataFormat = "DATASTORE_BACKUP" | |||
GoogleSheets DataFormat = "GOOGLE_SHEETS" | |||
Bigtable DataFormat = "BIGTABLE" | |||
Parquet DataFormat = "PARQUET" | |||
ORC DataFormat = "ORC" | |||
) | |||
// ExternalData is a table which is stored outside of BigQuery. It is implemented by | |||
// *ExternalDataConfig. | |||
// GCSReference also implements it, for backwards compatibility. | |||
type ExternalData interface { | |||
toBQ() bq.ExternalDataConfiguration | |||
} | |||
// ExternalDataConfig describes data external to BigQuery that can be used | |||
// in queries and to create external tables. | |||
type ExternalDataConfig struct { | |||
// The format of the data. Required. | |||
SourceFormat DataFormat | |||
// The fully-qualified URIs that point to your | |||
// data in Google Cloud. Required. | |||
// | |||
// For Google Cloud Storage URIs, each URI can contain one '*' wildcard character | |||
// and it must come after the 'bucket' name. Size limits related to load jobs | |||
// apply to external data sources. | |||
// | |||
// For Google Cloud Bigtable URIs, exactly one URI can be specified and it has be | |||
// a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. | |||
// | |||
// For Google Cloud Datastore backups, exactly one URI can be specified. Also, | |||
// the '*' wildcard character is not allowed. | |||
SourceURIs []string | |||
// The schema of the data. Required for CSV and JSON; disallowed for the | |||
// other formats. | |||
Schema Schema | |||
// Try to detect schema and format options automatically. | |||
// Any option specified explicitly will be honored. | |||
AutoDetect bool | |||
// The compression type of the data. | |||
Compression Compression | |||
// IgnoreUnknownValues causes values not matching the schema to be | |||
// tolerated. Unknown values are ignored. For CSV this ignores extra values | |||
// at the end of a line. For JSON this ignores named values that do not | |||
// match any column name. If this field is not set, records containing | |||
// unknown values are treated as bad records. The MaxBadRecords field can | |||
// be used to customize how bad records are handled. | |||
IgnoreUnknownValues bool | |||
// MaxBadRecords is the maximum number of bad records that will be ignored | |||
// when reading data. | |||
MaxBadRecords int64 | |||
// Additional options for CSV, GoogleSheets and Bigtable formats. | |||
Options ExternalDataConfigOptions | |||
} | |||
func (e *ExternalDataConfig) toBQ() bq.ExternalDataConfiguration { | |||
q := bq.ExternalDataConfiguration{ | |||
SourceFormat: string(e.SourceFormat), | |||
SourceUris: e.SourceURIs, | |||
Autodetect: e.AutoDetect, | |||
Compression: string(e.Compression), | |||
IgnoreUnknownValues: e.IgnoreUnknownValues, | |||
MaxBadRecords: e.MaxBadRecords, | |||
} | |||
if e.Schema != nil { | |||
q.Schema = e.Schema.toBQ() | |||
} | |||
if e.Options != nil { | |||
e.Options.populateExternalDataConfig(&q) | |||
} | |||
return q | |||
} | |||
func bqToExternalDataConfig(q *bq.ExternalDataConfiguration) (*ExternalDataConfig, error) { | |||
e := &ExternalDataConfig{ | |||
SourceFormat: DataFormat(q.SourceFormat), | |||
SourceURIs: q.SourceUris, | |||
AutoDetect: q.Autodetect, | |||
Compression: Compression(q.Compression), | |||
IgnoreUnknownValues: q.IgnoreUnknownValues, | |||
MaxBadRecords: q.MaxBadRecords, | |||
Schema: bqToSchema(q.Schema), | |||
} | |||
switch { | |||
case q.CsvOptions != nil: | |||
e.Options = bqToCSVOptions(q.CsvOptions) | |||
case q.GoogleSheetsOptions != nil: | |||
e.Options = bqToGoogleSheetsOptions(q.GoogleSheetsOptions) | |||
case q.BigtableOptions != nil: | |||
var err error | |||
e.Options, err = bqToBigtableOptions(q.BigtableOptions) | |||
if err != nil { | |||
return nil, err | |||
} | |||
} | |||
return e, nil | |||
} | |||
// ExternalDataConfigOptions are additional options for external data configurations. | |||
// This interface is implemented by CSVOptions, GoogleSheetsOptions and BigtableOptions. | |||
type ExternalDataConfigOptions interface { | |||
populateExternalDataConfig(*bq.ExternalDataConfiguration) | |||
} | |||
// CSVOptions are additional options for CSV external data sources. | |||
type CSVOptions struct { | |||
// AllowJaggedRows causes missing trailing optional columns to be tolerated | |||
// when reading CSV data. Missing values are treated as nulls. | |||
AllowJaggedRows bool | |||
// AllowQuotedNewlines sets whether quoted data sections containing | |||
// newlines are allowed when reading CSV data. | |||
AllowQuotedNewlines bool | |||
// Encoding is the character encoding of data to be read. | |||
Encoding Encoding | |||
// FieldDelimiter is the separator for fields in a CSV file, used when | |||
// reading or exporting data. The default is ",". | |||
FieldDelimiter string | |||
// Quote is the value used to quote data sections in a CSV file. The | |||
// default quotation character is the double quote ("), which is used if | |||
// both Quote and ForceZeroQuote are unset. | |||
// To specify that no character should be interpreted as a quotation | |||
// character, set ForceZeroQuote to true. | |||
// Only used when reading data. | |||
Quote string | |||
ForceZeroQuote bool | |||
// The number of rows at the top of a CSV file that BigQuery will skip when | |||
// reading data. | |||
SkipLeadingRows int64 | |||
} | |||
func (o *CSVOptions) populateExternalDataConfig(c *bq.ExternalDataConfiguration) { | |||
c.CsvOptions = &bq.CsvOptions{ | |||
AllowJaggedRows: o.AllowJaggedRows, | |||
AllowQuotedNewlines: o.AllowQuotedNewlines, | |||
Encoding: string(o.Encoding), | |||
FieldDelimiter: o.FieldDelimiter, | |||
Quote: o.quote(), | |||
SkipLeadingRows: o.SkipLeadingRows, | |||
} | |||
} | |||
// quote returns the CSV quote character, or nil if unset. | |||
func (o *CSVOptions) quote() *string { | |||
if o.ForceZeroQuote { | |||
quote := "" | |||
return "e | |||
} | |||
if o.Quote == "" { | |||
return nil | |||
} | |||
return &o.Quote | |||
} | |||
func (o *CSVOptions) setQuote(ps *string) { | |||
if ps != nil { | |||
o.Quote = *ps | |||
if o.Quote == "" { | |||
o.ForceZeroQuote = true | |||
} | |||
} | |||
} | |||
func bqToCSVOptions(q *bq.CsvOptions) *CSVOptions { | |||
o := &CSVOptions{ | |||
AllowJaggedRows: q.AllowJaggedRows, | |||
AllowQuotedNewlines: q.AllowQuotedNewlines, | |||
Encoding: Encoding(q.Encoding), | |||
FieldDelimiter: q.FieldDelimiter, | |||
SkipLeadingRows: q.SkipLeadingRows, | |||
} | |||
o.setQuote(q.Quote) | |||
return o | |||
} | |||
// GoogleSheetsOptions are additional options for GoogleSheets external data sources. | |||
type GoogleSheetsOptions struct { | |||
// The number of rows at the top of a sheet that BigQuery will skip when | |||
// reading data. | |||
SkipLeadingRows int64 | |||
} | |||
func (o *GoogleSheetsOptions) populateExternalDataConfig(c *bq.ExternalDataConfiguration) { | |||
c.GoogleSheetsOptions = &bq.GoogleSheetsOptions{ | |||
SkipLeadingRows: o.SkipLeadingRows, | |||
} | |||
} | |||
func bqToGoogleSheetsOptions(q *bq.GoogleSheetsOptions) *GoogleSheetsOptions { | |||
return &GoogleSheetsOptions{ | |||
SkipLeadingRows: q.SkipLeadingRows, | |||
} | |||
} | |||
// BigtableOptions are additional options for Bigtable external data sources. | |||
type BigtableOptions struct { | |||
// A list of column families to expose in the table schema along with their | |||
// types. If omitted, all column families are present in the table schema and | |||
// their values are read as BYTES. | |||
ColumnFamilies []*BigtableColumnFamily | |||
// If true, then the column families that are not specified in columnFamilies | |||
// list are not exposed in the table schema. Otherwise, they are read with BYTES | |||
// type values. The default is false. | |||
IgnoreUnspecifiedColumnFamilies bool | |||
// If true, then the rowkey column families will be read and converted to string. | |||
// Otherwise they are read with BYTES type values and users need to manually cast | |||
// them with CAST if necessary. The default is false. | |||
ReadRowkeyAsString bool | |||
} | |||
func (o *BigtableOptions) populateExternalDataConfig(c *bq.ExternalDataConfiguration) { | |||
q := &bq.BigtableOptions{ | |||
IgnoreUnspecifiedColumnFamilies: o.IgnoreUnspecifiedColumnFamilies, | |||
ReadRowkeyAsString: o.ReadRowkeyAsString, | |||
} | |||
for _, f := range o.ColumnFamilies { | |||
q.ColumnFamilies = append(q.ColumnFamilies, f.toBQ()) | |||
} | |||
c.BigtableOptions = q | |||
} | |||
func bqToBigtableOptions(q *bq.BigtableOptions) (*BigtableOptions, error) { | |||
b := &BigtableOptions{ | |||
IgnoreUnspecifiedColumnFamilies: q.IgnoreUnspecifiedColumnFamilies, | |||
ReadRowkeyAsString: q.ReadRowkeyAsString, | |||
} | |||
for _, f := range q.ColumnFamilies { | |||
f2, err := bqToBigtableColumnFamily(f) | |||
if err != nil { | |||
return nil, err | |||
} | |||
b.ColumnFamilies = append(b.ColumnFamilies, f2) | |||
} | |||
return b, nil | |||
} | |||
// BigtableColumnFamily describes how BigQuery should access a Bigtable column family. | |||
type BigtableColumnFamily struct { | |||
// Identifier of the column family. | |||
FamilyID string | |||
// Lists of columns that should be exposed as individual fields as opposed to a | |||
// list of (column name, value) pairs. All columns whose qualifier matches a | |||
// qualifier in this list can be accessed as .. Other columns can be accessed as | |||
// a list through .Column field. | |||
Columns []*BigtableColumn | |||
// The encoding of the values when the type is not STRING. Acceptable encoding values are: | |||
// - TEXT - indicates values are alphanumeric text strings. | |||
// - BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. | |||
// This can be overridden for a specific column by listing that column in 'columns' and | |||
// specifying an encoding for it. | |||
Encoding string | |||
// If true, only the latest version of values are exposed for all columns in this | |||
// column family. This can be overridden for a specific column by listing that | |||
// column in 'columns' and specifying a different setting for that column. | |||
OnlyReadLatest bool | |||
// The type to convert the value in cells of this | |||
// column family. The values are expected to be encoded using HBase | |||
// Bytes.toBytes function when using the BINARY encoding value. | |||
// Following BigQuery types are allowed (case-sensitive): | |||
// BYTES STRING INTEGER FLOAT BOOLEAN. | |||
// The default type is BYTES. This can be overridden for a specific column by | |||
// listing that column in 'columns' and specifying a type for it. | |||
Type string | |||
} | |||
func (b *BigtableColumnFamily) toBQ() *bq.BigtableColumnFamily { | |||
q := &bq.BigtableColumnFamily{ | |||
FamilyId: b.FamilyID, | |||
Encoding: b.Encoding, | |||
OnlyReadLatest: b.OnlyReadLatest, | |||
Type: b.Type, | |||
} | |||
for _, col := range b.Columns { | |||
q.Columns = append(q.Columns, col.toBQ()) | |||
} | |||
return q | |||
} | |||
func bqToBigtableColumnFamily(q *bq.BigtableColumnFamily) (*BigtableColumnFamily, error) { | |||
b := &BigtableColumnFamily{ | |||
FamilyID: q.FamilyId, | |||
Encoding: q.Encoding, | |||
OnlyReadLatest: q.OnlyReadLatest, | |||
Type: q.Type, | |||
} | |||
for _, col := range q.Columns { | |||
c, err := bqToBigtableColumn(col) | |||
if err != nil { | |||
return nil, err | |||
} | |||
b.Columns = append(b.Columns, c) | |||
} | |||
return b, nil | |||
} | |||
// BigtableColumn describes how BigQuery should access a Bigtable column. | |||
type BigtableColumn struct { | |||
// Qualifier of the column. Columns in the parent column family that have this | |||
// exact qualifier are exposed as . field. The column field name is the | |||
// same as the column qualifier. | |||
Qualifier string | |||
// If the qualifier is not a valid BigQuery field identifier i.e. does not match | |||
// [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as the column field | |||
// name and is used as field name in queries. | |||
FieldName string | |||
// If true, only the latest version of values are exposed for this column. | |||
// See BigtableColumnFamily.OnlyReadLatest. | |||
OnlyReadLatest bool | |||
// The encoding of the values when the type is not STRING. | |||
// See BigtableColumnFamily.Encoding | |||
Encoding string | |||
// The type to convert the value in cells of this column. | |||
// See BigtableColumnFamily.Type | |||
Type string | |||
} | |||
func (b *BigtableColumn) toBQ() *bq.BigtableColumn { | |||
q := &bq.BigtableColumn{ | |||
FieldName: b.FieldName, | |||
OnlyReadLatest: b.OnlyReadLatest, | |||
Encoding: b.Encoding, | |||
Type: b.Type, | |||
} | |||
if utf8.ValidString(b.Qualifier) { | |||
q.QualifierString = b.Qualifier | |||
} else { | |||
q.QualifierEncoded = base64.RawStdEncoding.EncodeToString([]byte(b.Qualifier)) | |||
} | |||
return q | |||
} | |||
func bqToBigtableColumn(q *bq.BigtableColumn) (*BigtableColumn, error) { | |||
b := &BigtableColumn{ | |||
FieldName: q.FieldName, | |||
OnlyReadLatest: q.OnlyReadLatest, | |||
Encoding: q.Encoding, | |||
Type: q.Type, | |||
} | |||
if q.QualifierString != "" { | |||
b.Qualifier = q.QualifierString | |||
} else { | |||
bytes, err := base64.RawStdEncoding.DecodeString(q.QualifierEncoded) | |||
if err != nil { | |||
return nil, err | |||
} | |||
b.Qualifier = string(bytes) | |||
} | |||
return b, nil | |||
} |
@@ -1,143 +0,0 @@ | |||
// Copyright 2017 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package bigquery | |||
import ( | |||
"testing" | |||
"cloud.google.com/go/internal/pretty" | |||
"cloud.google.com/go/internal/testutil" | |||
) | |||
func TestExternalDataConfig(t *testing.T) { | |||
// Round-trip of ExternalDataConfig to underlying representation. | |||
for i, want := range []*ExternalDataConfig{ | |||
{ | |||
SourceFormat: CSV, | |||
SourceURIs: []string{"uri"}, | |||
Schema: Schema{{Name: "n", Type: IntegerFieldType}}, | |||
AutoDetect: true, | |||
Compression: Gzip, | |||
IgnoreUnknownValues: true, | |||
MaxBadRecords: 17, | |||
Options: &CSVOptions{ | |||
AllowJaggedRows: true, | |||
AllowQuotedNewlines: true, | |||
Encoding: UTF_8, | |||
FieldDelimiter: "f", | |||
Quote: "q", | |||
SkipLeadingRows: 3, | |||
}, | |||
}, | |||
{ | |||
SourceFormat: GoogleSheets, | |||
Options: &GoogleSheetsOptions{SkipLeadingRows: 4}, | |||
}, | |||
{ | |||
SourceFormat: Bigtable, | |||
Options: &BigtableOptions{ | |||
IgnoreUnspecifiedColumnFamilies: true, | |||
ReadRowkeyAsString: true, | |||
ColumnFamilies: []*BigtableColumnFamily{ | |||
{ | |||
FamilyID: "f1", | |||
Encoding: "TEXT", | |||
OnlyReadLatest: true, | |||
Type: "FLOAT", | |||
Columns: []*BigtableColumn{ | |||
{ | |||
Qualifier: "valid-utf-8", | |||
FieldName: "fn", | |||
OnlyReadLatest: true, | |||
Encoding: "BINARY", | |||
Type: "STRING", | |||
}, | |||
}, | |||
}, | |||
}, | |||
}, | |||
}, | |||
} { | |||
q := want.toBQ() | |||
got, err := bqToExternalDataConfig(&q) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
if diff := testutil.Diff(got, want); diff != "" { | |||
t.Errorf("#%d: got=-, want=+:\n%s", i, diff) | |||
} | |||
} | |||
} | |||
func TestQuote(t *testing.T) { | |||
ptr := func(s string) *string { return &s } | |||
for _, test := range []struct { | |||
quote string | |||
force bool | |||
want *string | |||
}{ | |||
{"", false, nil}, | |||
{"", true, ptr("")}, | |||
{"-", false, ptr("-")}, | |||
{"-", true, ptr("")}, | |||
} { | |||
o := CSVOptions{ | |||
Quote: test.quote, | |||
ForceZeroQuote: test.force, | |||
} | |||
got := o.quote() | |||
if (got == nil) != (test.want == nil) { | |||
t.Errorf("%+v\ngot %v\nwant %v", test, pretty.Value(got), pretty.Value(test.want)) | |||
} | |||
if got != nil && test.want != nil && *got != *test.want { | |||
t.Errorf("%+v: got %q, want %q", test, *got, *test.want) | |||
} | |||
} | |||
} | |||
func TestQualifier(t *testing.T) { | |||
b := BigtableColumn{Qualifier: "a"} | |||
q := b.toBQ() | |||
if q.QualifierString != b.Qualifier || q.QualifierEncoded != "" { | |||
t.Errorf("got (%q, %q), want (%q, %q)", | |||
q.QualifierString, q.QualifierEncoded, b.Qualifier, "") | |||
} | |||
b2, err := bqToBigtableColumn(q) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
if got, want := b2.Qualifier, b.Qualifier; got != want { | |||
t.Errorf("got %q, want %q", got, want) | |||
} | |||
const ( | |||
invalidUTF8 = "\xDF\xFF" | |||
invalidEncoded = "3/8" | |||
) | |||
b = BigtableColumn{Qualifier: invalidUTF8} | |||
q = b.toBQ() | |||
if q.QualifierString != "" || q.QualifierEncoded != invalidEncoded { | |||
t.Errorf("got (%q, %q), want (%q, %q)", | |||
q.QualifierString, "", b.Qualifier, invalidEncoded) | |||
} | |||
b2, err = bqToBigtableColumn(q) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
if got, want := b2.Qualifier, b.Qualifier; got != want { | |||
t.Errorf("got %q, want %q", got, want) | |||
} | |||
} |
@@ -1,110 +0,0 @@ | |||
// Copyright 2016 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package bigquery | |||
import ( | |||
"context" | |||
"cloud.google.com/go/internal/trace" | |||
bq "google.golang.org/api/bigquery/v2" | |||
) | |||
// ExtractConfig holds the configuration for an extract job. | |||
type ExtractConfig struct { | |||
// Src is the table from which data will be extracted. | |||
Src *Table | |||
// Dst is the destination into which the data will be extracted. | |||
Dst *GCSReference | |||
// DisableHeader disables the printing of a header row in exported data. | |||
DisableHeader bool | |||
// The labels associated with this job. | |||
Labels map[string]string | |||
} | |||
func (e *ExtractConfig) toBQ() *bq.JobConfiguration { | |||
var printHeader *bool | |||
if e.DisableHeader { | |||
f := false | |||
printHeader = &f | |||
} | |||
return &bq.JobConfiguration{ | |||
Labels: e.Labels, | |||
Extract: &bq.JobConfigurationExtract{ | |||
DestinationUris: append([]string{}, e.Dst.URIs...), | |||
Compression: string(e.Dst.Compression), | |||
DestinationFormat: string(e.Dst.DestinationFormat), | |||
FieldDelimiter: e.Dst.FieldDelimiter, | |||
SourceTable: e.Src.toBQ(), | |||
PrintHeader: printHeader, | |||
}, | |||
} | |||
} | |||
func bqToExtractConfig(q *bq.JobConfiguration, c *Client) *ExtractConfig { | |||
qe := q.Extract | |||
return &ExtractConfig{ | |||
Labels: q.Labels, | |||
Dst: &GCSReference{ | |||
URIs: qe.DestinationUris, | |||
Compression: Compression(qe.Compression), | |||
DestinationFormat: DataFormat(qe.DestinationFormat), | |||
FileConfig: FileConfig{ | |||
CSVOptions: CSVOptions{ | |||
FieldDelimiter: qe.FieldDelimiter, | |||
}, | |||
}, | |||
}, | |||
DisableHeader: qe.PrintHeader != nil && !*qe.PrintHeader, | |||
Src: bqToTable(qe.SourceTable, c), | |||
} | |||
} | |||
// An Extractor extracts data from a BigQuery table into Google Cloud Storage. | |||
type Extractor struct { | |||
JobIDConfig | |||
ExtractConfig | |||
c *Client | |||
} | |||
// ExtractorTo returns an Extractor which can be used to extract data from a | |||
// BigQuery table into Google Cloud Storage. | |||
// The returned Extractor may optionally be further configured before its Run method is called. | |||
func (t *Table) ExtractorTo(dst *GCSReference) *Extractor { | |||
return &Extractor{ | |||
c: t.c, | |||
ExtractConfig: ExtractConfig{ | |||
Src: t, | |||
Dst: dst, | |||
}, | |||
} | |||
} | |||
// Run initiates an extract job. | |||
func (e *Extractor) Run(ctx context.Context) (j *Job, err error) { | |||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Extractor.Run") | |||
defer func() { trace.EndSpan(ctx, err) }() | |||
return e.c.insertJob(ctx, e.newJob(), nil) | |||
} | |||
func (e *Extractor) newJob() *bq.Job { | |||
return &bq.Job{ | |||
JobReference: e.JobIDConfig.createJobRef(e.c), | |||
Configuration: e.ExtractConfig.toBQ(), | |||
} | |||
} |
@@ -1,116 +0,0 @@ | |||
// Copyright 2015 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package bigquery | |||
import ( | |||
"testing" | |||
"cloud.google.com/go/internal/testutil" | |||
"github.com/google/go-cmp/cmp" | |||
bq "google.golang.org/api/bigquery/v2" | |||
) | |||
func defaultExtractJob() *bq.Job { | |||
return &bq.Job{ | |||
JobReference: &bq.JobReference{JobId: "RANDOM", ProjectId: "client-project-id"}, | |||
Configuration: &bq.JobConfiguration{ | |||
Extract: &bq.JobConfigurationExtract{ | |||
SourceTable: &bq.TableReference{ | |||
ProjectId: "client-project-id", | |||
DatasetId: "dataset-id", | |||
TableId: "table-id", | |||
}, | |||
DestinationUris: []string{"uri"}, | |||
}, | |||
}, | |||
} | |||
} | |||
func defaultGCS() *GCSReference { | |||
return &GCSReference{ | |||
URIs: []string{"uri"}, | |||
} | |||
} | |||
func TestExtract(t *testing.T) { | |||
defer fixRandomID("RANDOM")() | |||
c := &Client{ | |||
projectID: "client-project-id", | |||
} | |||
testCases := []struct { | |||
dst *GCSReference | |||
src *Table | |||
config ExtractConfig | |||
want *bq.Job | |||
}{ | |||
{ | |||
dst: defaultGCS(), | |||
src: c.Dataset("dataset-id").Table("table-id"), | |||
want: defaultExtractJob(), | |||
}, | |||
{ | |||
dst: defaultGCS(), | |||
src: c.Dataset("dataset-id").Table("table-id"), | |||
config: ExtractConfig{ | |||
DisableHeader: true, | |||
Labels: map[string]string{"a": "b"}, | |||
}, | |||
want: func() *bq.Job { | |||
j := defaultExtractJob() | |||
j.Configuration.Labels = map[string]string{"a": "b"} | |||
f := false | |||
j.Configuration.Extract.PrintHeader = &f | |||
return j | |||
}(), | |||
}, | |||
{ | |||
dst: func() *GCSReference { | |||
g := NewGCSReference("uri") | |||
g.Compression = Gzip | |||
g.DestinationFormat = JSON | |||
g.FieldDelimiter = "\t" | |||
return g | |||
}(), | |||
src: c.Dataset("dataset-id").Table("table-id"), | |||
want: func() *bq.Job { | |||
j := defaultExtractJob() | |||
j.Configuration.Extract.Compression = "GZIP" | |||
j.Configuration.Extract.DestinationFormat = "NEWLINE_DELIMITED_JSON" | |||
j.Configuration.Extract.FieldDelimiter = "\t" | |||
return j | |||
}(), | |||
}, | |||
} | |||
for i, tc := range testCases { | |||
ext := tc.src.ExtractorTo(tc.dst) | |||
tc.config.Src = ext.Src | |||
tc.config.Dst = ext.Dst | |||
ext.ExtractConfig = tc.config | |||
got := ext.newJob() | |||
checkJob(t, i, got, tc.want) | |||
jc, err := bqToJobConfig(got.Configuration, c) | |||
if err != nil { | |||
t.Fatalf("#%d: %v", i, err) | |||
} | |||
diff := testutil.Diff(jc, &ext.ExtractConfig, | |||
cmp.AllowUnexported(Table{}, Client{})) | |||
if diff != "" { | |||
t.Errorf("#%d: (got=-, want=+:\n%s", i, diff) | |||
} | |||
} | |||
} |
@@ -1,137 +0,0 @@ | |||
// Copyright 2016 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package bigquery | |||
import ( | |||
"io" | |||
bq "google.golang.org/api/bigquery/v2" | |||
) | |||
// A ReaderSource is a source for a load operation that gets | |||
// data from an io.Reader. | |||
// | |||
// When a ReaderSource is part of a LoadConfig obtained via Job.Config, | |||
// its internal io.Reader will be nil, so it cannot be used for a | |||
// subsequent load operation. | |||
type ReaderSource struct { | |||
r io.Reader | |||
FileConfig | |||
} | |||
// NewReaderSource creates a ReaderSource from an io.Reader. You may | |||
// optionally configure properties on the ReaderSource that describe the | |||
// data being read, before passing it to Table.LoaderFrom. | |||
func NewReaderSource(r io.Reader) *ReaderSource { | |||
return &ReaderSource{r: r} | |||
} | |||
func (r *ReaderSource) populateLoadConfig(lc *bq.JobConfigurationLoad) io.Reader { | |||
r.FileConfig.populateLoadConfig(lc) | |||
return r.r | |||
} | |||
// FileConfig contains configuration options that pertain to files, typically | |||
// text files that require interpretation to be used as a BigQuery table. A | |||
// file may live in Google Cloud Storage (see GCSReference), or it may be | |||
// loaded into a table via the Table.LoaderFromReader. | |||
type FileConfig struct { | |||
// SourceFormat is the format of the data to be read. | |||
// Allowed values are: Avro, CSV, DatastoreBackup, JSON, ORC, and Parquet. The default is CSV. | |||
SourceFormat DataFormat | |||
// Indicates if we should automatically infer the options and | |||
// schema for CSV and JSON sources. | |||
AutoDetect bool | |||
// MaxBadRecords is the maximum number of bad records that will be ignored | |||
// when reading data. | |||
MaxBadRecords int64 | |||
// IgnoreUnknownValues causes values not matching the schema to be | |||
// tolerated. Unknown values are ignored. For CSV this ignores extra values | |||
// at the end of a line. For JSON this ignores named values that do not | |||
// match any column name. If this field is not set, records containing | |||
// unknown values are treated as bad records. The MaxBadRecords field can | |||
// be used to customize how bad records are handled. | |||
IgnoreUnknownValues bool | |||
// Schema describes the data. It is required when reading CSV or JSON data, | |||
// unless the data is being loaded into a table that already exists. | |||
Schema Schema | |||
// Additional options for CSV files. | |||
CSVOptions | |||
} | |||
func (fc *FileConfig) populateLoadConfig(conf *bq.JobConfigurationLoad) { | |||
conf.SkipLeadingRows = fc.SkipLeadingRows | |||
conf.SourceFormat = string(fc.SourceFormat) | |||
conf.Autodetect = fc.AutoDetect | |||
conf.AllowJaggedRows = fc.AllowJaggedRows | |||
conf.AllowQuotedNewlines = fc.AllowQuotedNewlines | |||
conf.Encoding = string(fc.Encoding) | |||
conf.FieldDelimiter = fc.FieldDelimiter | |||
conf.IgnoreUnknownValues = fc.IgnoreUnknownValues | |||
conf.MaxBadRecords = fc.MaxBadRecords | |||
if fc.Schema != nil { | |||
conf.Schema = fc.Schema.toBQ() | |||
} | |||
conf.Quote = fc.quote() | |||
} | |||
func bqPopulateFileConfig(conf *bq.JobConfigurationLoad, fc *FileConfig) { | |||
fc.SourceFormat = DataFormat(conf.SourceFormat) | |||
fc.AutoDetect = conf.Autodetect | |||
fc.MaxBadRecords = conf.MaxBadRecords | |||
fc.IgnoreUnknownValues = conf.IgnoreUnknownValues | |||
fc.Schema = bqToSchema(conf.Schema) | |||
fc.SkipLeadingRows = conf.SkipLeadingRows | |||
fc.AllowJaggedRows = conf.AllowJaggedRows | |||
fc.AllowQuotedNewlines = conf.AllowQuotedNewlines | |||
fc.Encoding = Encoding(conf.Encoding) | |||
fc.FieldDelimiter = conf.FieldDelimiter | |||
fc.CSVOptions.setQuote(conf.Quote) | |||
} | |||
func (fc *FileConfig) populateExternalDataConfig(conf *bq.ExternalDataConfiguration) { | |||
format := fc.SourceFormat | |||
if format == "" { | |||
// Format must be explicitly set for external data sources. | |||
format = CSV | |||
} | |||
conf.Autodetect = fc.AutoDetect | |||
conf.IgnoreUnknownValues = fc.IgnoreUnknownValues | |||
conf.MaxBadRecords = fc.MaxBadRecords | |||
conf.SourceFormat = string(format) | |||
if fc.Schema != nil { | |||
conf.Schema = fc.Schema.toBQ() | |||
} | |||
if format == CSV { | |||
fc.CSVOptions.populateExternalDataConfig(conf) | |||
} | |||
} | |||
// Encoding specifies the character encoding of data to be loaded into BigQuery. | |||
// See https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.encoding | |||
// for more details about how this is used. | |||
type Encoding string | |||
const ( | |||
// UTF_8 specifies the UTF-8 encoding type. | |||
UTF_8 Encoding = "UTF-8" | |||
// ISO_8859_1 specifies the ISO-8859-1 encoding type. | |||
ISO_8859_1 Encoding = "ISO-8859-1" | |||
) |
@@ -1,98 +0,0 @@ | |||
// Copyright 2016 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package bigquery | |||
import ( | |||
"testing" | |||
"cloud.google.com/go/internal/pretty" | |||
"cloud.google.com/go/internal/testutil" | |||
bq "google.golang.org/api/bigquery/v2" | |||
) | |||
var ( | |||
hyphen = "-" | |||
fc = FileConfig{ | |||
SourceFormat: CSV, | |||
AutoDetect: true, | |||
MaxBadRecords: 7, | |||
IgnoreUnknownValues: true, | |||
Schema: Schema{ | |||
stringFieldSchema(), | |||
nestedFieldSchema(), | |||
}, | |||
CSVOptions: CSVOptions{ | |||
Quote: hyphen, | |||
FieldDelimiter: "\t", | |||
SkipLeadingRows: 8, | |||
AllowJaggedRows: true, | |||
AllowQuotedNewlines: true, | |||
Encoding: UTF_8, | |||
}, | |||
} | |||
) | |||
func TestFileConfigPopulateLoadConfig(t *testing.T) { | |||
want := &bq.JobConfigurationLoad{ | |||
SourceFormat: "CSV", | |||
FieldDelimiter: "\t", | |||
SkipLeadingRows: 8, | |||
AllowJaggedRows: true, | |||
AllowQuotedNewlines: true, | |||
Autodetect: true, | |||
Encoding: "UTF-8", | |||
MaxBadRecords: 7, | |||
IgnoreUnknownValues: true, | |||
Schema: &bq.TableSchema{ | |||
Fields: []*bq.TableFieldSchema{ | |||
bqStringFieldSchema(), | |||
bqNestedFieldSchema(), | |||
}}, | |||
Quote: &hyphen, | |||
} | |||
got := &bq.JobConfigurationLoad{} | |||
fc.populateLoadConfig(got) | |||
if !testutil.Equal(got, want) { | |||
t.Errorf("got:\n%v\nwant:\n%v", pretty.Value(got), pretty.Value(want)) | |||
} | |||
} | |||
func TestFileConfigPopulateExternalDataConfig(t *testing.T) { | |||
got := &bq.ExternalDataConfiguration{} | |||
fc.populateExternalDataConfig(got) | |||
want := &bq.ExternalDataConfiguration{ | |||
SourceFormat: "CSV", | |||
Autodetect: true, | |||
MaxBadRecords: 7, | |||
IgnoreUnknownValues: true, | |||
Schema: &bq.TableSchema{ | |||
Fields: []*bq.TableFieldSchema{ | |||
bqStringFieldSchema(), | |||
bqNestedFieldSchema(), | |||
}}, | |||
CsvOptions: &bq.CsvOptions{ | |||
AllowJaggedRows: true, | |||
AllowQuotedNewlines: true, | |||
Encoding: "UTF-8", | |||
FieldDelimiter: "\t", | |||
Quote: &hyphen, | |||
SkipLeadingRows: 8, | |||
}, | |||
} | |||
if diff := testutil.Diff(got, want); diff != "" { | |||
t.Errorf("got=-, want=+:\n%s", diff) | |||
} | |||
} |
@@ -1,75 +0,0 @@ | |||
// Copyright 2015 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package bigquery | |||
import ( | |||
"io" | |||
bq "google.golang.org/api/bigquery/v2" | |||
) | |||
// GCSReference is a reference to one or more Google Cloud Storage objects, which together constitute | |||
// an input or output to a BigQuery operation. | |||
type GCSReference struct { | |||
// URIs refer to Google Cloud Storage objects. | |||
URIs []string | |||
FileConfig | |||
// DestinationFormat is the format to use when writing exported files. | |||
// Allowed values are: CSV, Avro, JSON. The default is CSV. | |||
// CSV is not supported for tables with nested or repeated fields. | |||
DestinationFormat DataFormat | |||
// Compression specifies the type of compression to apply when writing data | |||
// to Google Cloud Storage, or using this GCSReference as an ExternalData | |||
// source with CSV or JSON SourceFormat. Default is None. | |||
Compression Compression | |||
} | |||
// NewGCSReference constructs a reference to one or more Google Cloud Storage objects, which together constitute a data source or destination. | |||
// In the simple case, a single URI in the form gs://bucket/object may refer to a single GCS object. | |||
// Data may also be split into mutiple files, if multiple URIs or URIs containing wildcards are provided. | |||
// Each URI may contain one '*' wildcard character, which (if present) must come after the bucket name. | |||
// For more information about the treatment of wildcards and multiple URIs, | |||
// see https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple | |||
func NewGCSReference(uri ...string) *GCSReference { | |||
return &GCSReference{URIs: uri} | |||
} | |||
// Compression is the type of compression to apply when writing data to Google Cloud Storage. | |||
type Compression string | |||
const ( | |||
// None specifies no compression. | |||
None Compression = "NONE" | |||
// Gzip specifies gzip compression. | |||
Gzip Compression = "GZIP" | |||
) | |||
func (gcs *GCSReference) populateLoadConfig(lc *bq.JobConfigurationLoad) io.Reader { | |||
lc.SourceUris = gcs.URIs | |||
gcs.FileConfig.populateLoadConfig(lc) | |||
return nil | |||
} | |||
func (gcs *GCSReference) toBQ() bq.ExternalDataConfiguration { | |||
conf := bq.ExternalDataConfiguration{ | |||
Compression: string(gcs.Compression), | |||
SourceUris: append([]string{}, gcs.URIs...), | |||
} | |||
gcs.FileConfig.populateExternalDataConfig(&conf) | |||
return conf | |||
} |
@@ -1,238 +0,0 @@ | |||
// Copyright 2015 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package bigquery | |||
import ( | |||
"context" | |||
"errors" | |||
"fmt" | |||
"reflect" | |||
"cloud.google.com/go/internal/trace" | |||
bq "google.golang.org/api/bigquery/v2" | |||
) | |||
// An Inserter does streaming inserts into a BigQuery table. | |||
// It is safe for concurrent use. | |||
type Inserter struct { | |||
t *Table | |||
// SkipInvalidRows causes rows containing invalid data to be silently | |||
// ignored. The default value is false, which causes the entire request to | |||
// fail if there is an attempt to insert an invalid row. | |||
SkipInvalidRows bool | |||
// IgnoreUnknownValues causes values not matching the schema to be ignored. | |||
// The default value is false, which causes records containing such values | |||
// to be treated as invalid records. | |||
IgnoreUnknownValues bool | |||
// A TableTemplateSuffix allows Inserters to create tables automatically. | |||
// | |||
// Experimental: this option is experimental and may be modified or removed in future versions, | |||
// regardless of any other documented package stability guarantees. | |||
// | |||
// When you specify a suffix, the table you upload data to | |||
// will be used as a template for creating a new table, with the same schema, | |||
// called <table> + <suffix>. | |||
// | |||
// More information is available at | |||
// https://cloud.google.com/bigquery/streaming-data-into-bigquery#template-tables | |||
TableTemplateSuffix string | |||
} | |||
// Inserter returns an Inserter that can be used to append rows to t. | |||
// The returned Inserter may optionally be further configured before its Put method is called. | |||
// | |||
// To stream rows into a date-partitioned table at a particular date, add the | |||
// $yyyymmdd suffix to the table name when constructing the Table. | |||
func (t *Table) Inserter() *Inserter { | |||
return &Inserter{t: t} | |||
} | |||
// Uploader calls Inserter. | |||
// Deprecated: use Table.Inserter instead. | |||
func (t *Table) Uploader() *Inserter { return t.Inserter() } | |||
// Put uploads one or more rows to the BigQuery service. | |||
// | |||
// If src is ValueSaver, then its Save method is called to produce a row for uploading. | |||
// | |||
// If src is a struct or pointer to a struct, then a schema is inferred from it | |||
// and used to create a StructSaver. The InsertID of the StructSaver will be | |||
// empty. | |||
// | |||
// If src is a slice of ValueSavers, structs, or struct pointers, then each | |||
// element of the slice is treated as above, and multiple rows are uploaded. | |||
// | |||
// Put returns a PutMultiError if one or more rows failed to be uploaded. | |||
// The PutMultiError contains a RowInsertionError for each failed row. | |||
// | |||
// Put will retry on temporary errors (see | |||
// https://cloud.google.com/bigquery/troubleshooting-errors). This can result | |||
// in duplicate rows if you do not use insert IDs. Also, if the error persists, | |||
// the call will run indefinitely. Pass a context with a timeout to prevent | |||
// hanging calls. | |||
func (u *Inserter) Put(ctx context.Context, src interface{}) (err error) { | |||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Inserter.Put") | |||
defer func() { trace.EndSpan(ctx, err) }() | |||
savers, err := valueSavers(src) | |||
if err != nil { | |||
return err | |||
} | |||
return u.putMulti(ctx, savers) | |||
} | |||
func valueSavers(src interface{}) ([]ValueSaver, error) { | |||
saver, ok, err := toValueSaver(src) | |||
if err != nil { | |||
return nil, err | |||
} | |||
if ok { | |||
return []ValueSaver{saver}, nil | |||
} | |||
srcVal := reflect.ValueOf(src) | |||
if srcVal.Kind() != reflect.Slice { | |||
return nil, fmt.Errorf("%T is not a ValueSaver, struct, struct pointer, or slice", src) | |||
} | |||
var savers []ValueSaver | |||
for i := 0; i < srcVal.Len(); i++ { | |||
s := srcVal.Index(i).Interface() | |||
saver, ok, err := toValueSaver(s) | |||
if err != nil { | |||
return nil, err | |||
} | |||
if !ok { | |||
return nil, fmt.Errorf("src[%d] has type %T, which is not a ValueSaver, struct or struct pointer", i, s) | |||
} | |||
savers = append(savers, saver) | |||
} | |||
return savers, nil | |||
} | |||
// Make a ValueSaver from x, which must implement ValueSaver already | |||
// or be a struct or pointer to struct. | |||
func toValueSaver(x interface{}) (ValueSaver, bool, error) { | |||
if _, ok := x.(StructSaver); ok { | |||
return nil, false, errors.New("bigquery: use &StructSaver, not StructSaver") | |||
} | |||
var insertID string | |||
// Handle StructSavers specially so we can infer the schema if necessary. | |||
if ss, ok := x.(*StructSaver); ok && ss.Schema == nil { | |||
x = ss.Struct | |||
insertID = ss.InsertID | |||
// Fall through so we can infer the schema. | |||
} | |||
if saver, ok := x.(ValueSaver); ok { | |||
return saver, ok, nil | |||
} | |||
v := reflect.ValueOf(x) | |||
// Support Put with []interface{} | |||
if v.Kind() == reflect.Interface { | |||
v = v.Elem() | |||
} | |||
if v.Kind() == reflect.Ptr { | |||
v = v.Elem() | |||
} | |||
if v.Kind() != reflect.Struct { | |||
return nil, false, nil | |||
} | |||
schema, err := inferSchemaReflectCached(v.Type()) | |||
if err != nil { | |||
return nil, false, err | |||
} | |||
return &StructSaver{ | |||
Struct: x, | |||
InsertID: insertID, | |||
Schema: schema, | |||
}, true, nil | |||
} | |||
func (u *Inserter) putMulti(ctx context.Context, src []ValueSaver) error { | |||
req, err := u.newInsertRequest(src) | |||
if err != nil { | |||
return err | |||
} | |||
if req == nil { | |||
return nil | |||
} | |||
call := u.t.c.bqs.Tabledata.InsertAll(u.t.ProjectID, u.t.DatasetID, u.t.TableID, req) | |||
call = call.Context(ctx) | |||
setClientHeader(call.Header()) | |||
var res *bq.TableDataInsertAllResponse | |||
err = runWithRetry(ctx, func() (err error) { | |||
res, err = call.Do() | |||
return err | |||
}) | |||
if err != nil { | |||
return err | |||
} | |||
return handleInsertErrors(res.InsertErrors, req.Rows) | |||
} | |||
func (u *Inserter) newInsertRequest(savers []ValueSaver) (*bq.TableDataInsertAllRequest, error) { | |||
if savers == nil { // If there are no rows, do nothing. | |||
return nil, nil | |||
} | |||
req := &bq.TableDataInsertAllRequest{ | |||
TemplateSuffix: u.TableTemplateSuffix, | |||
IgnoreUnknownValues: u.IgnoreUnknownValues, | |||
SkipInvalidRows: u.SkipInvalidRows, | |||
} | |||
for _, saver := range savers { | |||
row, insertID, err := saver.Save() | |||
if err != nil { | |||
return nil, err | |||
} | |||
if insertID == "" { | |||
insertID = randomIDFn() | |||
} | |||
m := make(map[string]bq.JsonValue) | |||
for k, v := range row { | |||
m[k] = bq.JsonValue(v) | |||
} | |||
req.Rows = append(req.Rows, &bq.TableDataInsertAllRequestRows{ | |||
InsertId: insertID, | |||
Json: m, | |||
}) | |||
} | |||
return req, nil | |||
} | |||
func handleInsertErrors(ierrs []*bq.TableDataInsertAllResponseInsertErrors, rows []*bq.TableDataInsertAllRequestRows) error { | |||
if len(ierrs) == 0 { | |||
return nil | |||
} | |||
var errs PutMultiError | |||
for _, e := range ierrs { | |||
if int(e.Index) > len(rows) { | |||
return fmt.Errorf("internal error: unexpected row index: %v", e.Index) | |||
} | |||
rie := RowInsertionError{ | |||
InsertID: rows[e.Index].InsertId, | |||
RowIndex: int(e.Index), | |||
} | |||
for _, errp := range e.Errors { | |||
rie.Errors = append(rie.Errors, bqToError(errp)) | |||
} | |||
errs = append(errs, rie) | |||
} | |||
return errs | |||
} | |||
// Uploader is an obsolete name for Inserter. | |||
type Uploader = Inserter |
@@ -1,210 +0,0 @@ | |||
// Copyright 2015 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package bigquery | |||
import ( | |||
"errors" | |||
"strconv" | |||
"testing" | |||
"cloud.google.com/go/internal/pretty" | |||
"cloud.google.com/go/internal/testutil" | |||
"github.com/google/go-cmp/cmp" | |||
bq "google.golang.org/api/bigquery/v2" | |||
) | |||
type testSaver struct { | |||
row map[string]Value | |||
insertID string | |||
err error | |||
} | |||
func (ts testSaver) Save() (map[string]Value, string, error) { | |||
return ts.row, ts.insertID, ts.err | |||
} | |||
func TestNewInsertRequest(t *testing.T) { | |||
prev := randomIDFn | |||
n := 0 | |||
randomIDFn = func() string { n++; return strconv.Itoa(n) } | |||
defer func() { randomIDFn = prev }() | |||
tests := []struct { | |||
ul *Uploader | |||
savers []ValueSaver | |||
req *bq.TableDataInsertAllRequest | |||
}{ | |||
{ | |||
ul: &Uploader{}, | |||
savers: nil, | |||
req: nil, | |||
}, | |||
{ | |||
ul: &Uploader{}, | |||
savers: []ValueSaver{ | |||
testSaver{row: map[string]Value{"one": 1}}, | |||
testSaver{row: map[string]Value{"two": 2}}, | |||
}, | |||
req: &bq.TableDataInsertAllRequest{ | |||
Rows: []*bq.TableDataInsertAllRequestRows{ | |||
{InsertId: "1", Json: map[string]bq.JsonValue{"one": 1}}, | |||
{InsertId: "2", Json: map[string]bq.JsonValue{"two": 2}}, | |||
}, | |||
}, | |||
}, | |||
{ | |||
ul: &Uploader{ | |||
TableTemplateSuffix: "suffix", | |||
IgnoreUnknownValues: true, | |||
SkipInvalidRows: true, | |||
}, | |||
savers: []ValueSaver{ | |||
testSaver{insertID: "a", row: map[string]Value{"one": 1}}, | |||
testSaver{insertID: "", row: map[string]Value{"two": 2}}, | |||
}, | |||
req: &bq.TableDataInsertAllRequest{ | |||
Rows: []*bq.TableDataInsertAllRequestRows{ | |||
{InsertId: "a", Json: map[string]bq.JsonValue{"one": 1}}, | |||
{InsertId: "3", Json: map[string]bq.JsonValue{"two": 2}}, | |||
}, | |||
TemplateSuffix: "suffix", | |||
SkipInvalidRows: true, | |||
IgnoreUnknownValues: true, | |||
}, | |||
}, | |||
} | |||
for i, tc := range tests { | |||
got, err := tc.ul.newInsertRequest(tc.savers) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
want := tc.req | |||
if !testutil.Equal(got, want) { | |||
t.Errorf("%d: %#v: got %#v, want %#v", i, tc.ul, got, want) | |||
} | |||
} | |||
} | |||
func TestNewInsertRequestErrors(t *testing.T) { | |||
var u Uploader | |||
_, err := u.newInsertRequest([]ValueSaver{testSaver{err: errors.New("bang")}}) | |||
if err == nil { | |||
t.Error("got nil, want error") | |||
} | |||
} | |||
func TestHandleInsertErrors(t *testing.T) { | |||
rows := []*bq.TableDataInsertAllRequestRows{ | |||
{InsertId: "a"}, | |||
{InsertId: "b"}, | |||
} | |||
for _, test := range []struct { | |||
in []*bq.TableDataInsertAllResponseInsertErrors | |||
want error | |||
}{ | |||
{ | |||
in: nil, | |||
want: nil, | |||
}, | |||
{ | |||
in: []*bq.TableDataInsertAllResponseInsertErrors{{Index: 1}}, | |||
want: PutMultiError{RowInsertionError{InsertID: "b", RowIndex: 1}}, | |||
}, | |||
{ | |||
in: []*bq.TableDataInsertAllResponseInsertErrors{{Index: 1}}, | |||
want: PutMultiError{RowInsertionError{InsertID: "b", RowIndex: 1}}, | |||
}, | |||
{ | |||
in: []*bq.TableDataInsertAllResponseInsertErrors{ | |||
{Errors: []*bq.ErrorProto{{Message: "m0"}}, Index: 0}, | |||
{Errors: []*bq.ErrorProto{{Message: "m1"}}, Index: 1}, | |||
}, | |||
want: PutMultiError{ | |||
RowInsertionError{InsertID: "a", RowIndex: 0, Errors: []error{&Error{Message: "m0"}}}, | |||
RowInsertionError{InsertID: "b", RowIndex: 1, Errors: []error{&Error{Message: "m1"}}}, | |||
}, | |||
}, | |||
} { | |||
got := handleInsertErrors(test.in, rows) | |||
if !testutil.Equal(got, test.want) { | |||
t.Errorf("%#v:\ngot\n%#v\nwant\n%#v", test.in, got, test.want) | |||
} | |||
} | |||
} | |||
func TestValueSavers(t *testing.T) { | |||
ts := &testSaver{} | |||
type T struct{ I int } | |||
schema, err := InferSchema(T{}) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
for _, test := range []struct { | |||
in interface{} | |||
want []ValueSaver | |||
}{ | |||
{[]interface{}(nil), nil}, | |||
{[]interface{}{}, nil}, | |||
{ts, []ValueSaver{ts}}, | |||
{T{I: 1}, []ValueSaver{&StructSaver{Schema: schema, Struct: T{I: 1}}}}, | |||
{[]ValueSaver{ts, ts}, []ValueSaver{ts, ts}}, | |||
{[]interface{}{ts, ts}, []ValueSaver{ts, ts}}, | |||
{[]T{{I: 1}, {I: 2}}, []ValueSaver{ | |||
&StructSaver{Schema: schema, Struct: T{I: 1}}, | |||
&StructSaver{Schema: schema, Struct: T{I: 2}}, | |||
}}, | |||
{[]interface{}{T{I: 1}, &T{I: 2}}, []ValueSaver{ | |||
&StructSaver{Schema: schema, Struct: T{I: 1}}, | |||
&StructSaver{Schema: schema, Struct: &T{I: 2}}, | |||
}}, | |||
{&StructSaver{Struct: T{I: 3}, InsertID: "foo"}, | |||
[]ValueSaver{ | |||
&StructSaver{Schema: schema, Struct: T{I: 3}, InsertID: "foo"}, | |||
}}, | |||
} { | |||
got, err := valueSavers(test.in) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
if !testutil.Equal(got, test.want, cmp.AllowUnexported(testSaver{})) { | |||
t.Errorf("%+v: got %v, want %v", test.in, pretty.Value(got), pretty.Value(test.want)) | |||
} | |||
// Make sure Save is successful. | |||
for i, vs := range got { | |||
_, _, err := vs.Save() | |||
if err != nil { | |||
t.Fatalf("%+v, #%d: got error %v, want nil", test.in, i, err) | |||
} | |||
} | |||
} | |||
} | |||
func TestValueSaversErrors(t *testing.T) { | |||
inputs := []interface{}{ | |||
nil, | |||
1, | |||
[]int{1, 2}, | |||
[]interface{}{ | |||
testSaver{row: map[string]Value{"one": 1}, insertID: "a"}, | |||
1, | |||
}, | |||
StructSaver{}, | |||
} | |||
for _, in := range inputs { | |||
if _, err := valueSavers(in); err == nil { | |||
t.Errorf("%#v: got nil, want error", in) | |||
} | |||
} | |||
} |
@@ -1,222 +0,0 @@ | |||
// Copyright 2015 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package bigquery | |||
import ( | |||
"context" | |||
"fmt" | |||
"reflect" | |||
bq "google.golang.org/api/bigquery/v2" | |||
"google.golang.org/api/iterator" | |||
) | |||
// Construct a RowIterator. | |||
// If pf is nil, there are no rows in the result set. | |||
func newRowIterator(ctx context.Context, t *Table, pf pageFetcher) *RowIterator { | |||
it := &RowIterator{ | |||
ctx: ctx, | |||
table: t, | |||
pf: pf, | |||
} | |||
if pf != nil { | |||
it.pageInfo, it.nextFunc = iterator.NewPageInfo( | |||
it.fetch, | |||
func() int { return len(it.rows) }, | |||
func() interface{} { r := it.rows; it.rows = nil; return r }) | |||
} | |||
return it | |||
} | |||
// A RowIterator provides access to the result of a BigQuery lookup. | |||
type RowIterator struct { | |||
ctx context.Context | |||
table *Table | |||
pf pageFetcher | |||
pageInfo *iterator.PageInfo | |||
nextFunc func() error | |||
// StartIndex can be set before the first call to Next. If PageInfo().Token | |||
// is also set, StartIndex is ignored. | |||
StartIndex uint64 | |||
// The schema of the table. Available after the first call to Next. | |||
Schema Schema | |||
// The total number of rows in the result. Available after the first call to Next. | |||
// May be zero just after rows were inserted. | |||
TotalRows uint64 | |||
rows [][]Value | |||
structLoader structLoader // used to populate a pointer to a struct | |||
} | |||
// Next loads the next row into dst. Its return value is iterator.Done if there | |||
// are no more results. Once Next returns iterator.Done, all subsequent calls | |||
// will return iterator.Done. | |||
// | |||
// dst may implement ValueLoader, or may be a *[]Value, *map[string]Value, or struct pointer. | |||
// | |||
// If dst is a *[]Value, it will be set to new []Value whose i'th element | |||
// will be populated with the i'th column of the row. | |||
// | |||
// If dst is a *map[string]Value, a new map will be created if dst is nil. Then | |||
// for each schema column name, the map key of that name will be set to the column's | |||
// value. STRUCT types (RECORD types or nested schemas) become nested maps. | |||
// | |||
// If dst is pointer to a struct, each column in the schema will be matched | |||
// with an exported field of the struct that has the same name, ignoring case. | |||
// Unmatched schema columns and struct fields will be ignored. | |||
// | |||
// Each BigQuery column type corresponds to one or more Go types; a matching struct | |||
// field must be of the correct type. The correspondences are: | |||
// | |||
// STRING string | |||
// BOOL bool | |||
// INTEGER int, int8, int16, int32, int64, uint8, uint16, uint32 | |||
// FLOAT float32, float64 | |||
// BYTES []byte | |||
// TIMESTAMP time.Time | |||
// DATE civil.Date | |||
// TIME civil.Time | |||
// DATETIME civil.DateTime | |||
// | |||
// A repeated field corresponds to a slice or array of the element type. A STRUCT | |||
// type (RECORD or nested schema) corresponds to a nested struct or struct pointer. | |||
// All calls to Next on the same iterator must use the same struct type. | |||
// | |||
// It is an error to attempt to read a BigQuery NULL value into a struct field, | |||
// unless the field is of type []byte or is one of the special Null types: NullInt64, | |||
// NullFloat64, NullBool, NullString, NullTimestamp, NullDate, NullTime or | |||
// NullDateTime. You can also use a *[]Value or *map[string]Value to read from a | |||
// table with NULLs. | |||
func (it *RowIterator) Next(dst interface{}) error { | |||
if it.pf == nil { // There are no rows in the result set. | |||
return iterator.Done | |||
} | |||
var vl ValueLoader | |||
switch dst := dst.(type) { | |||
case ValueLoader: | |||
vl = dst | |||
case *[]Value: | |||
vl = (*valueList)(dst) | |||
case *map[string]Value: | |||
vl = (*valueMap)(dst) | |||
default: | |||
if !isStructPtr(dst) { | |||
return fmt.Errorf("bigquery: cannot convert %T to ValueLoader (need pointer to []Value, map[string]Value, or struct)", dst) | |||
} | |||
} | |||
if err := it.nextFunc(); err != nil { | |||
return err | |||
} | |||
row := it.rows[0] | |||
it.rows = it.rows[1:] | |||
if vl == nil { | |||
// This can only happen if dst is a pointer to a struct. We couldn't | |||
// set vl above because we need the schema. | |||
if err := it.structLoader.set(dst, it.Schema); err != nil { | |||
return err | |||
} | |||
vl = &it.structLoader | |||
} | |||
return vl.Load(row, it.Schema) | |||
} | |||
func isStructPtr(x interface{}) bool { | |||
t := reflect.TypeOf(x) | |||
return t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct | |||
} | |||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. | |||
func (it *RowIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } | |||
func (it *RowIterator) fetch(pageSize int, pageToken string) (string, error) { | |||
res, err := it.pf(it.ctx, it.table, it.Schema, it.StartIndex, int64(pageSize), pageToken) | |||
if err != nil { | |||
return "", err | |||
} | |||
it.rows = append(it.rows, res.rows...) | |||
it.Schema = res.schema | |||
it.TotalRows = res.totalRows | |||
return res.pageToken, nil | |||
} | |||
// A pageFetcher returns a page of rows from a destination table. | |||
type pageFetcher func(ctx context.Context, _ *Table, _ Schema, startIndex uint64, pageSize int64, pageToken string) (*fetchPageResult, error) | |||
type fetchPageResult struct { | |||
pageToken string | |||
rows [][]Value | |||
totalRows uint64 | |||
schema Schema | |||
} | |||
// fetchPage gets a page of rows from t. | |||
func fetchPage(ctx context.Context, t *Table, schema Schema, startIndex uint64, pageSize int64, pageToken string) (*fetchPageResult, error) { | |||
// Fetch the table schema in the background, if necessary. | |||
errc := make(chan error, 1) | |||
if schema != nil { | |||
errc <- nil | |||
} else { | |||
go func() { | |||
var bqt *bq.Table | |||
err := runWithRetry(ctx, func() (err error) { | |||
bqt, err = t.c.bqs.Tables.Get(t.ProjectID, t.DatasetID, t.TableID). | |||
Fields("schema"). | |||
Context(ctx). | |||
Do() | |||
return err | |||
}) | |||
if err == nil && bqt.Schema != nil { | |||
schema = bqToSchema(bqt.Schema) | |||
} | |||
errc <- err | |||
}() | |||
} | |||
call := t.c.bqs.Tabledata.List(t.ProjectID, t.DatasetID, t.TableID) | |||
setClientHeader(call.Header()) | |||
if pageToken != "" { | |||
call.PageToken(pageToken) | |||
} else { | |||
call.StartIndex(startIndex) | |||
} | |||
if pageSize > 0 { | |||
call.MaxResults(pageSize) | |||
} | |||
var res *bq.TableDataList | |||
err := runWithRetry(ctx, func() (err error) { | |||
res, err = call.Context(ctx).Do() | |||
return err | |||
}) | |||
if err != nil { | |||
return nil, err | |||
} | |||
err = <-errc | |||
if err != nil { | |||
return nil, err | |||
} | |||
rows, err := convertRows(res.Rows, schema) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return &fetchPageResult{ | |||
pageToken: res.PageToken, | |||
rows: rows, | |||
totalRows: uint64(res.TotalRows), | |||
schema: schema, | |||
}, nil | |||
} |
@@ -1,362 +0,0 @@ | |||
// Copyright 2015 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package bigquery | |||
import ( | |||
"context" | |||
"errors" | |||
"fmt" | |||
"testing" | |||
"cloud.google.com/go/internal/testutil" | |||
"google.golang.org/api/iterator" | |||
) | |||
type fetchResponse struct { | |||
result *fetchPageResult // The result to return. | |||
err error // The error to return. | |||
} | |||
// pageFetcherStub services fetch requests by returning data from an in-memory list of values. | |||
type pageFetcherStub struct { | |||
fetchResponses map[string]fetchResponse | |||
err error | |||
} | |||
func (pf *pageFetcherStub) fetchPage(ctx context.Context, _ *Table, _ Schema, _ uint64, _ int64, pageToken string) (*fetchPageResult, error) { | |||
call, ok := pf.fetchResponses[pageToken] | |||
if !ok { | |||
pf.err = fmt.Errorf("Unexpected page token: %q", pageToken) | |||
} | |||
return call.result, call.err | |||
} | |||
func TestIterator(t *testing.T) { | |||
var ( | |||
iiSchema = Schema{ | |||
{Type: IntegerFieldType}, | |||
{Type: IntegerFieldType}, | |||
} | |||
siSchema = Schema{ | |||
{Type: StringFieldType}, | |||
{Type: IntegerFieldType}, | |||
} | |||
) | |||
fetchFailure := errors.New("fetch failure") | |||
testCases := []struct { | |||
desc string | |||
pageToken string | |||
fetchResponses map[string]fetchResponse | |||
want [][]Value | |||
wantErr error | |||
wantSchema Schema | |||
wantTotalRows uint64 | |||
}{ | |||
{ | |||
desc: "Iteration over single empty page", | |||
fetchResponses: map[string]fetchResponse{ | |||
"": { | |||
result: &fetchPageResult{ | |||
pageToken: "", | |||
rows: [][]Value{}, | |||
schema: Schema{}, | |||
}, | |||
}, | |||
}, | |||
want: [][]Value{}, | |||
wantSchema: Schema{}, | |||
}, | |||
{ | |||
desc: "Iteration over single page", | |||
fetchResponses: map[string]fetchResponse{ | |||
"": { | |||
result: &fetchPageResult{ | |||
pageToken: "", | |||
rows: [][]Value{{1, 2}, {11, 12}}, | |||
schema: iiSchema, | |||
totalRows: 4, | |||
}, | |||
}, | |||
}, | |||
want: [][]Value{{1, 2}, {11, 12}}, | |||
wantSchema: iiSchema, | |||
wantTotalRows: 4, | |||
}, | |||
{ | |||
desc: "Iteration over single page with different schema", | |||
fetchResponses: map[string]fetchResponse{ | |||
"": { | |||
result: &fetchPageResult{ | |||
pageToken: "", | |||
rows: [][]Value{{"1", 2}, {"11", 12}}, | |||
schema: siSchema, | |||
}, | |||
}, | |||
}, | |||
want: [][]Value{{"1", 2}, {"11", 12}}, | |||
wantSchema: siSchema, | |||
}, | |||
{ | |||
desc: "Iteration over two pages", | |||
fetchResponses: map[string]fetchResponse{ | |||
"": { | |||
result: &fetchPageResult{ | |||
pageToken: "a", | |||
rows: [][]Value{{1, 2}, {11, 12}}, | |||
schema: iiSchema, | |||
totalRows: 4, | |||
}, | |||
}, | |||
"a": { | |||
result: &fetchPageResult{ | |||
pageToken: "", | |||
rows: [][]Value{{101, 102}, {111, 112}}, | |||
schema: iiSchema, | |||
totalRows: 4, | |||
}, | |||
}, | |||
}, | |||
want: [][]Value{{1, 2}, {11, 12}, {101, 102}, {111, 112}}, | |||
wantSchema: iiSchema, | |||
wantTotalRows: 4, | |||
}, | |||
{ | |||
desc: "Server response includes empty page", | |||
fetchResponses: map[string]fetchResponse{ | |||
"": { | |||
result: &fetchPageResult{ | |||
pageToken: "a", | |||
rows: [][]Value{{1, 2}, {11, 12}}, | |||
schema: iiSchema, | |||
}, | |||
}, | |||
"a": { | |||
result: &fetchPageResult{ | |||
pageToken: "b", | |||
rows: [][]Value{}, | |||
schema: iiSchema, | |||
}, | |||
}, | |||
"b": { | |||
result: &fetchPageResult{ | |||
pageToken: "", | |||
rows: [][]Value{{101, 102}, {111, 112}}, | |||
schema: iiSchema, | |||
}, | |||
}, | |||
}, | |||
want: [][]Value{{1, 2}, {11, 12}, {101, 102}, {111, 112}}, | |||
wantSchema: iiSchema, | |||
}, | |||
{ | |||
desc: "Fetch error", | |||
fetchResponses: map[string]fetchResponse{ | |||
"": { | |||
result: &fetchPageResult{ | |||
pageToken: "a", | |||
rows: [][]Value{{1, 2}, {11, 12}}, | |||
schema: iiSchema, | |||
}, | |||
}, | |||
"a": { | |||
// We returns some data from this fetch, but also an error. | |||
// So the end result should include only data from the previous fetch. | |||
err: fetchFailure, | |||
result: &fetchPageResult{ | |||
pageToken: "b", | |||
rows: [][]Value{{101, 102}, {111, 112}}, | |||
schema: iiSchema, | |||
}, | |||
}, | |||
}, | |||
want: [][]Value{{1, 2}, {11, 12}}, | |||
wantErr: fetchFailure, | |||
wantSchema: iiSchema, | |||
}, | |||
{ | |||
desc: "Skip over an entire page", | |||
pageToken: "a", | |||
fetchResponses: map[string]fetchResponse{ | |||
"": { | |||
result: &fetchPageResult{ | |||
pageToken: "a", | |||
rows: [][]Value{{1, 2}, {11, 12}}, | |||
schema: iiSchema, | |||
}, | |||
}, | |||
"a": { | |||
result: &fetchPageResult{ | |||
pageToken: "", | |||
rows: [][]Value{{101, 102}, {111, 112}}, | |||
schema: iiSchema, | |||
}, | |||
}, | |||
}, | |||
want: [][]Value{{101, 102}, {111, 112}}, | |||
wantSchema: iiSchema, | |||
}, | |||
{ | |||
desc: "Skip beyond all data", | |||
pageToken: "b", | |||
fetchResponses: map[string]fetchResponse{ | |||
"": { | |||
result: &fetchPageResult{ | |||
pageToken: "a", | |||
rows: [][]Value{{1, 2}, {11, 12}}, | |||
schema: iiSchema, | |||
}, | |||
}, | |||
"a": { | |||
result: &fetchPageResult{ | |||
pageToken: "b", | |||
rows: [][]Value{{101, 102}, {111, 112}}, | |||
schema: iiSchema, | |||
}, | |||
}, | |||
"b": { | |||
result: &fetchPageResult{}, | |||
}, | |||
}, | |||
// In this test case, Next will return false on its first call, | |||
// so we won't even attempt to call Get. | |||
want: [][]Value{}, | |||
wantSchema: Schema{}, | |||
}, | |||
} | |||
for _, tc := range testCases { | |||
pf := &pageFetcherStub{ | |||
fetchResponses: tc.fetchResponses, | |||
} | |||
it := newRowIterator(context.Background(), nil, pf.fetchPage) | |||
it.PageInfo().Token = tc.pageToken | |||
values, schema, totalRows, err := consumeRowIterator(it) | |||
if err != tc.wantErr { | |||
t.Fatalf("%s: got %v, want %v", tc.desc, err, tc.wantErr) | |||
} | |||
if (len(values) != 0 || len(tc.want) != 0) && !testutil.Equal(values, tc.want) { | |||
t.Errorf("%s: values:\ngot: %v\nwant:%v", tc.desc, values, tc.want) | |||
} | |||
if (len(schema) != 0 || len(tc.wantSchema) != 0) && !testutil.Equal(schema, tc.wantSchema) { | |||
t.Errorf("%s: iterator.Schema:\ngot: %v\nwant: %v", tc.desc, schema, tc.wantSchema) | |||
} | |||
if totalRows != tc.wantTotalRows { | |||
t.Errorf("%s: totalRows: got %d, want %d", tc.desc, totalRows, tc.wantTotalRows) | |||
} | |||
} | |||
} | |||
// consumeRowIterator reads the schema and all values from a RowIterator and returns them. | |||
func consumeRowIterator(it *RowIterator) ([][]Value, Schema, uint64, error) { | |||
var ( | |||
got [][]Value | |||
schema Schema | |||
totalRows uint64 | |||
) | |||
for { | |||
var vls []Value | |||
err := it.Next(&vls) | |||
if err == iterator.Done { | |||
return got, schema, totalRows, nil | |||
} | |||
if err != nil { | |||
return got, schema, totalRows, err | |||
} | |||
got = append(got, vls) | |||
schema = it.Schema | |||
totalRows = it.TotalRows | |||
} | |||
} | |||
func TestNextDuringErrorState(t *testing.T) { | |||
pf := &pageFetcherStub{ | |||
fetchResponses: map[string]fetchResponse{ | |||
"": {err: errors.New("bang")}, | |||
}, | |||
} | |||
it := newRowIterator(context.Background(), nil, pf.fetchPage) | |||
var vals []Value | |||
if err := it.Next(&vals); err == nil { | |||
t.Errorf("Expected error after calling Next") | |||
} | |||
if err := it.Next(&vals); err == nil { | |||
t.Errorf("Expected error calling Next again when iterator has a non-nil error.") | |||
} | |||
} | |||
func TestNextAfterFinished(t *testing.T) { | |||
testCases := []struct { | |||
fetchResponses map[string]fetchResponse | |||
want [][]Value | |||
}{ | |||
{ | |||
fetchResponses: map[string]fetchResponse{ | |||
"": { | |||
result: &fetchPageResult{ | |||
pageToken: "", | |||
rows: [][]Value{{1, 2}, {11, 12}}, | |||
}, | |||
}, | |||
}, | |||
want: [][]Value{{1, 2}, {11, 12}}, | |||
}, | |||
{ | |||
fetchResponses: map[string]fetchResponse{ | |||
"": { | |||
result: &fetchPageResult{ | |||
pageToken: "", | |||
rows: [][]Value{}, | |||
}, | |||
}, | |||
}, | |||
want: [][]Value{}, | |||
}, | |||
} | |||
for _, tc := range testCases { | |||
pf := &pageFetcherStub{ | |||
fetchResponses: tc.fetchResponses, | |||
} | |||
it := newRowIterator(context.Background(), nil, pf.fetchPage) | |||
values, _, _, err := consumeRowIterator(it) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
if (len(values) != 0 || len(tc.want) != 0) && !testutil.Equal(values, tc.want) { | |||
t.Errorf("values: got:\n%v\nwant:\n%v", values, tc.want) | |||
} | |||
// Try calling Get again. | |||
var vals []Value | |||
if err := it.Next(&vals); err != iterator.Done { | |||
t.Errorf("Expected Done calling Next when there are no more values") | |||
} | |||
} | |||
} | |||
func TestIteratorNextTypes(t *testing.T) { | |||
it := newRowIterator(context.Background(), nil, nil) | |||
for _, v := range []interface{}{3, "s", []int{}, &[]int{}, | |||
map[string]Value{}, &map[string]interface{}{}, | |||
struct{}{}, | |||
} { | |||
if err := it.Next(v); err == nil { | |||
t.Errorf("%v: want error, got nil", v) | |||
} | |||
} | |||
} |
@@ -1,830 +0,0 @@ | |||
// Copyright 2015 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package bigquery | |||
import ( | |||
"context" | |||
"errors" | |||
"fmt" | |||
"time" | |||
"cloud.google.com/go/internal" | |||
"cloud.google.com/go/internal/trace" | |||
gax "github.com/googleapis/gax-go/v2" | |||
bq "google.golang.org/api/bigquery/v2" | |||
"google.golang.org/api/googleapi" | |||
"google.golang.org/api/iterator" | |||
) | |||
// A Job represents an operation which has been submitted to BigQuery for processing. | |||
type Job struct { | |||
c *Client | |||
projectID string | |||
jobID string | |||
location string | |||
email string | |||
config *bq.JobConfiguration | |||
lastStatus *JobStatus | |||
} | |||
// JobFromID creates a Job which refers to an existing BigQuery job. The job | |||
// need not have been created by this package. For example, the job may have | |||
// been created in the BigQuery console. | |||
// | |||
// For jobs whose location is other than "US" or "EU", set Client.Location or use | |||
// JobFromIDLocation. | |||
func (c *Client) JobFromID(ctx context.Context, id string) (*Job, error) { | |||
return c.JobFromIDLocation(ctx, id, c.Location) | |||
} | |||
// JobFromIDLocation creates a Job which refers to an existing BigQuery job. The job | |||
// need not have been created by this package (for example, it may have | |||
// been created in the BigQuery console), but it must exist in the specified location. | |||
func (c *Client) JobFromIDLocation(ctx context.Context, id, location string) (j *Job, err error) { | |||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.JobFromIDLocation") | |||
defer func() { trace.EndSpan(ctx, err) }() | |||
bqjob, err := c.getJobInternal(ctx, id, location, "configuration", "jobReference", "status", "statistics") | |||
if err != nil { | |||
return nil, err | |||
} | |||
return bqToJob(bqjob, c) | |||
} | |||
// ID returns the job's ID. | |||
func (j *Job) ID() string { | |||
return j.jobID | |||
} | |||
// Location returns the job's location. | |||
func (j *Job) Location() string { | |||
return j.location | |||
} | |||
// Email returns the email of the job's creator. | |||
func (j *Job) Email() string { | |||
return j.email | |||
} | |||
// State is one of a sequence of states that a Job progresses through as it is processed. | |||
type State int | |||
const ( | |||
// StateUnspecified is the default JobIterator state. | |||
StateUnspecified State = iota | |||
// Pending is a state that describes that the job is pending. | |||
Pending | |||
// Running is a state that describes that the job is running. | |||
Running | |||
// Done is a state that describes that the job is done. | |||
Done | |||
) | |||
// JobStatus contains the current State of a job, and errors encountered while processing that job. | |||
type JobStatus struct { | |||
State State | |||
err error | |||
// All errors encountered during the running of the job. | |||
// Not all Errors are fatal, so errors here do not necessarily mean that the job has completed or was unsuccessful. | |||
Errors []*Error | |||
// Statistics about the job. | |||
Statistics *JobStatistics | |||
} | |||
// JobConfig contains configuration information for a job. It is implemented by | |||
// *CopyConfig, *ExtractConfig, *LoadConfig and *QueryConfig. | |||
type JobConfig interface { | |||
isJobConfig() | |||
} | |||
func (*CopyConfig) isJobConfig() {} | |||
func (*ExtractConfig) isJobConfig() {} | |||
func (*LoadConfig) isJobConfig() {} | |||
func (*QueryConfig) isJobConfig() {} | |||
// Config returns the configuration information for j. | |||
func (j *Job) Config() (JobConfig, error) { | |||
return bqToJobConfig(j.config, j.c) | |||
} | |||
func bqToJobConfig(q *bq.JobConfiguration, c *Client) (JobConfig, error) { | |||
switch { | |||
case q == nil: | |||
return nil, nil | |||
case q.Copy != nil: | |||
return bqToCopyConfig(q, c), nil | |||
case q.Extract != nil: | |||
return bqToExtractConfig(q, c), nil | |||
case q.Load != nil: | |||
return bqToLoadConfig(q, c), nil | |||
case q.Query != nil: | |||
return bqToQueryConfig(q, c) | |||
default: | |||
return nil, nil | |||
} | |||
} | |||
// JobIDConfig describes how to create an ID for a job. | |||
type JobIDConfig struct { | |||
// JobID is the ID to use for the job. If empty, a random job ID will be generated. | |||
JobID string | |||
// If AddJobIDSuffix is true, then a random string will be appended to JobID. | |||
AddJobIDSuffix bool | |||
// Location is the location for the job. | |||
Location string | |||
} | |||
// createJobRef creates a JobReference. | |||
func (j *JobIDConfig) createJobRef(c *Client) *bq.JobReference { | |||
// We don't check whether projectID is empty; the server will return an | |||
// error when it encounters the resulting JobReference. | |||
loc := j.Location | |||
if loc == "" { // Use Client.Location as a default. | |||
loc = c.Location | |||
} | |||
jr := &bq.JobReference{ProjectId: c.projectID, Location: loc} | |||
if j.JobID == "" { | |||
jr.JobId = randomIDFn() | |||
} else if j.AddJobIDSuffix { | |||
jr.JobId = j.JobID + "-" + randomIDFn() | |||
} else { | |||
jr.JobId = j.JobID | |||
} | |||
return jr | |||
} | |||
// Done reports whether the job has completed. | |||
// After Done returns true, the Err method will return an error if the job completed unsuccessfully. | |||
func (s *JobStatus) Done() bool { | |||
return s.State == Done | |||
} | |||
// Err returns the error that caused the job to complete unsuccessfully (if any). | |||
func (s *JobStatus) Err() error { | |||
return s.err | |||
} | |||
// Status retrieves the current status of the job from BigQuery. It fails if the Status could not be determined. | |||
func (j *Job) Status(ctx context.Context) (js *JobStatus, err error) { | |||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Job.Status") | |||
defer func() { trace.EndSpan(ctx, err) }() | |||
bqjob, err := j.c.getJobInternal(ctx, j.jobID, j.location, "status", "statistics") | |||
if err != nil { | |||
return nil, err | |||
} | |||
if err := j.setStatus(bqjob.Status); err != nil { | |||
return nil, err | |||
} | |||
j.setStatistics(bqjob.Statistics, j.c) | |||
return j.lastStatus, nil | |||
} | |||
// LastStatus returns the most recently retrieved status of the job. The status is | |||
// retrieved when a new job is created, or when JobFromID or Job.Status is called. | |||
// Call Job.Status to get the most up-to-date information about a job. | |||
func (j *Job) LastStatus() *JobStatus { | |||
return j.lastStatus | |||
} | |||
// Cancel requests that a job be cancelled. This method returns without waiting for | |||
// cancellation to take effect. To check whether the job has terminated, use Job.Status. | |||
// Cancelled jobs may still incur costs. | |||
func (j *Job) Cancel(ctx context.Context) error { | |||
// Jobs.Cancel returns a job entity, but the only relevant piece of | |||
// data it may contain (the status of the job) is unreliable. From the | |||
// docs: "This call will return immediately, and the client will need | |||
// to poll for the job status to see if the cancel completed | |||
// successfully". So it would be misleading to return a status. | |||
call := j.c.bqs.Jobs.Cancel(j.projectID, j.jobID). | |||
Location(j.location). | |||
Fields(). // We don't need any of the response data. | |||
Context(ctx) | |||
setClientHeader(call.Header()) | |||
return runWithRetry(ctx, func() error { | |||
_, err := call.Do() | |||
return err | |||
}) | |||
} | |||
// Wait blocks until the job or the context is done. It returns the final status | |||
// of the job. | |||
// If an error occurs while retrieving the status, Wait returns that error. But | |||
// Wait returns nil if the status was retrieved successfully, even if | |||
// status.Err() != nil. So callers must check both errors. See the example. | |||
func (j *Job) Wait(ctx context.Context) (js *JobStatus, err error) { | |||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Job.Wait") | |||
defer func() { trace.EndSpan(ctx, err) }() | |||
if j.isQuery() { | |||
// We can avoid polling for query jobs. | |||
if _, _, err := j.waitForQuery(ctx, j.projectID); err != nil { | |||
return nil, err | |||
} | |||
// Note: extra RPC even if you just want to wait for the query to finish. | |||
js, err := j.Status(ctx) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return js, nil | |||
} | |||
// Non-query jobs must poll. | |||
err = internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) { | |||
js, err = j.Status(ctx) | |||
if err != nil { | |||
return true, err | |||
} | |||
if js.Done() { | |||
return true, nil | |||
} | |||
return false, nil | |||
}) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return js, nil | |||
} | |||
// Read fetches the results of a query job. | |||
// If j is not a query job, Read returns an error. | |||
func (j *Job) Read(ctx context.Context) (ri *RowIterator, err error) { | |||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Job.Read") | |||
defer func() { trace.EndSpan(ctx, err) }() | |||
return j.read(ctx, j.waitForQuery, fetchPage) | |||
} | |||
func (j *Job) read(ctx context.Context, waitForQuery func(context.Context, string) (Schema, uint64, error), pf pageFetcher) (*RowIterator, error) { | |||
if !j.isQuery() { | |||
return nil, errors.New("bigquery: cannot read from a non-query job") | |||
} | |||
destTable := j.config.Query.DestinationTable | |||
// The destination table should only be nil if there was a query error. | |||
projectID := j.projectID | |||
if destTable != nil && projectID != destTable.ProjectId { | |||
return nil, fmt.Errorf("bigquery: job project ID is %q, but destination table's is %q", projectID, destTable.ProjectId) | |||
} | |||
schema, totalRows, err := waitForQuery(ctx, projectID) | |||
if err != nil { | |||
return nil, err | |||
} | |||
if destTable == nil { | |||
return nil, errors.New("bigquery: query job missing destination table") | |||
} | |||
dt := bqToTable(destTable, j.c) | |||
if totalRows == 0 { | |||
pf = nil | |||
} | |||
it := newRowIterator(ctx, dt, pf) | |||
it.Schema = schema | |||
it.TotalRows = totalRows | |||
return it, nil | |||
} | |||
// waitForQuery waits for the query job to complete and returns its schema. It also | |||
// returns the total number of rows in the result set. | |||
func (j *Job) waitForQuery(ctx context.Context, projectID string) (Schema, uint64, error) { | |||
// Use GetQueryResults only to wait for completion, not to read results. | |||
call := j.c.bqs.Jobs.GetQueryResults(projectID, j.jobID).Location(j.location).Context(ctx).MaxResults(0) | |||
setClientHeader(call.Header()) | |||
backoff := gax.Backoff{ | |||
Initial: 1 * time.Second, | |||
Multiplier: 2, | |||
Max: 60 * time.Second, | |||
} | |||
var res *bq.GetQueryResultsResponse | |||
err := internal.Retry(ctx, backoff, func() (stop bool, err error) { | |||
res, err = call.Do() | |||
if err != nil { | |||
return !retryableError(err), err | |||
} | |||
if !res.JobComplete { // GetQueryResults may return early without error; retry. | |||
return false, nil | |||
} | |||
return true, nil | |||
}) | |||
if err != nil { | |||
return nil, 0, err | |||
} | |||
return bqToSchema(res.Schema), res.TotalRows, nil | |||
} | |||
// JobStatistics contains statistics about a job. | |||
type JobStatistics struct { | |||
CreationTime time.Time | |||
StartTime time.Time | |||
EndTime time.Time | |||
TotalBytesProcessed int64 | |||
Details Statistics | |||
} | |||
// Statistics is one of ExtractStatistics, LoadStatistics or QueryStatistics. | |||
type Statistics interface { | |||
implementsStatistics() | |||
} | |||
// ExtractStatistics contains statistics about an extract job. | |||
type ExtractStatistics struct { | |||
// The number of files per destination URI or URI pattern specified in the | |||
// extract configuration. These values will be in the same order as the | |||
// URIs specified in the 'destinationUris' field. | |||
DestinationURIFileCounts []int64 | |||
} | |||
// LoadStatistics contains statistics about a load job. | |||
type LoadStatistics struct { | |||
// The number of bytes of source data in a load job. | |||
InputFileBytes int64 | |||
// The number of source files in a load job. | |||
InputFiles int64 | |||
// Size of the loaded data in bytes. Note that while a load job is in the | |||
// running state, this value may change. | |||
OutputBytes int64 | |||
// The number of rows imported in a load job. Note that while an import job is | |||
// in the running state, this value may change. | |||
OutputRows int64 | |||
} | |||
// QueryStatistics contains statistics about a query job. | |||
type QueryStatistics struct { | |||
// Billing tier for the job. | |||
BillingTier int64 | |||
// Whether the query result was fetched from the query cache. | |||
CacheHit bool | |||
// The type of query statement, if valid. | |||
StatementType string | |||
// Total bytes billed for the job. | |||
TotalBytesBilled int64 | |||
// Total bytes processed for the job. | |||
TotalBytesProcessed int64 | |||
// For dry run queries, indicates how accurate the TotalBytesProcessed value is. | |||
// When indicated, values include: | |||
// UNKNOWN: accuracy of the estimate is unknown. | |||
// PRECISE: estimate is precise. | |||
// LOWER_BOUND: estimate is lower bound of what the query would cost. | |||
// UPPER_BOUND: estiamte is upper bound of what the query would cost. | |||
TotalBytesProcessedAccuracy string | |||
// Describes execution plan for the query. | |||
QueryPlan []*ExplainQueryStage | |||
// The number of rows affected by a DML statement. Present only for DML | |||
// statements INSERT, UPDATE or DELETE. | |||
NumDMLAffectedRows int64 | |||
// Describes a timeline of job execution. | |||
Timeline []*QueryTimelineSample | |||
// ReferencedTables: [Output-only, Experimental] Referenced tables for | |||
// the job. Queries that reference more than 50 tables will not have a | |||
// complete list. | |||
ReferencedTables []*Table | |||
// The schema of the results. Present only for successful dry run of | |||
// non-legacy SQL queries. | |||
Schema Schema | |||
// Slot-milliseconds consumed by this query job. | |||
SlotMillis int64 | |||
// Standard SQL: list of undeclared query parameter names detected during a | |||
// dry run validation. | |||
UndeclaredQueryParameterNames []string | |||
// DDL target table. | |||
DDLTargetTable *Table | |||
// DDL Operation performed on the target table. Used to report how the | |||
// query impacted the DDL target table. | |||
DDLOperationPerformed string | |||
} | |||
// ExplainQueryStage describes one stage of a query. | |||
type ExplainQueryStage struct { | |||
// CompletedParallelInputs: Number of parallel input segments completed. | |||
CompletedParallelInputs int64 | |||
// ComputeAvg: Duration the average shard spent on CPU-bound tasks. | |||
ComputeAvg time.Duration | |||
// ComputeMax: Duration the slowest shard spent on CPU-bound tasks. | |||
ComputeMax time.Duration | |||
// Relative amount of the total time the average shard spent on CPU-bound tasks. | |||
ComputeRatioAvg float64 | |||
// Relative amount of the total time the slowest shard spent on CPU-bound tasks. | |||
ComputeRatioMax float64 | |||
// EndTime: Stage end time. | |||
EndTime time.Time | |||
// Unique ID for stage within plan. | |||
ID int64 | |||
// InputStages: IDs for stages that are inputs to this stage. | |||
InputStages []int64 | |||
// Human-readable name for stage. | |||
Name string | |||
// ParallelInputs: Number of parallel input segments to be processed. | |||
ParallelInputs int64 | |||
// ReadAvg: Duration the average shard spent reading input. | |||
ReadAvg time.Duration | |||
// ReadMax: Duration the slowest shard spent reading input. | |||
ReadMax time.Duration | |||
// Relative amount of the total time the average shard spent reading input. | |||
ReadRatioAvg float64 | |||
// Relative amount of the total time the slowest shard spent reading input. | |||
ReadRatioMax float64 | |||
// Number of records read into the stage. | |||
RecordsRead int64 | |||
// Number of records written by the stage. | |||
RecordsWritten int64 | |||
// ShuffleOutputBytes: Total number of bytes written to shuffle. | |||
ShuffleOutputBytes int64 | |||
// ShuffleOutputBytesSpilled: Total number of bytes written to shuffle | |||
// and spilled to disk. | |||
ShuffleOutputBytesSpilled int64 | |||
// StartTime: Stage start time. | |||
StartTime time.Time | |||
// Current status for the stage. | |||
Status string | |||
// List of operations within the stage in dependency order (approximately | |||
// chronological). | |||
Steps []*ExplainQueryStep | |||
// WaitAvg: Duration the average shard spent waiting to be scheduled. | |||
WaitAvg time.Duration | |||
// WaitMax: Duration the slowest shard spent waiting to be scheduled. | |||
WaitMax time.Duration | |||
// Relative amount of the total time the average shard spent waiting to be scheduled. | |||
WaitRatioAvg float64 | |||
// Relative amount of the total time the slowest shard spent waiting to be scheduled. | |||
WaitRatioMax float64 | |||
// WriteAvg: Duration the average shard spent on writing output. | |||
WriteAvg time.Duration | |||
// WriteMax: Duration the slowest shard spent on writing output. | |||
WriteMax time.Duration | |||
// Relative amount of the total time the average shard spent on writing output. | |||
WriteRatioAvg float64 | |||
// Relative amount of the total time the slowest shard spent on writing output. | |||
WriteRatioMax float64 | |||
} | |||
// ExplainQueryStep describes one step of a query stage. | |||
type ExplainQueryStep struct { | |||
// Machine-readable operation type. | |||
Kind string | |||
// Human-readable stage descriptions. | |||
Substeps []string | |||
} | |||
// QueryTimelineSample represents a sample of execution statistics at a point in time. | |||
type QueryTimelineSample struct { | |||
// Total number of units currently being processed by workers, represented as largest value since last sample. | |||
ActiveUnits int64 | |||
// Total parallel units of work completed by this query. | |||
CompletedUnits int64 | |||
// Time elapsed since start of query execution. | |||
Elapsed time.Duration | |||
// Total parallel units of work remaining for the active stages. | |||
PendingUnits int64 | |||
// Cumulative slot-milliseconds consumed by the query. | |||
SlotMillis int64 | |||
} | |||
func (*ExtractStatistics) implementsStatistics() {} | |||
func (*LoadStatistics) implementsStatistics() {} | |||
func (*QueryStatistics) implementsStatistics() {} | |||
// Jobs lists jobs within a project. | |||
func (c *Client) Jobs(ctx context.Context) *JobIterator { | |||
it := &JobIterator{ | |||
ctx: ctx, | |||
c: c, | |||
ProjectID: c.projectID, | |||
} | |||
it.pageInfo, it.nextFunc = iterator.NewPageInfo( | |||
it.fetch, | |||
func() int { return len(it.items) }, | |||
func() interface{} { b := it.items; it.items = nil; return b }) | |||
return it | |||
} | |||
// JobIterator iterates over jobs in a project. | |||
type JobIterator struct { | |||
ProjectID string // Project ID of the jobs to list. Default is the client's project. | |||
AllUsers bool // Whether to list jobs owned by all users in the project, or just the current caller. | |||
State State // List only jobs in the given state. Defaults to all states. | |||
MinCreationTime time.Time // List only jobs created after this time. | |||
MaxCreationTime time.Time // List only jobs created before this time. | |||
ctx context.Context | |||
c *Client | |||
pageInfo *iterator.PageInfo | |||
nextFunc func() error | |||
items []*Job | |||
} | |||
// PageInfo is a getter for the JobIterator's PageInfo. | |||
func (it *JobIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } | |||
// Next returns the next Job. Its second return value is iterator.Done if | |||
// there are no more results. Once Next returns Done, all subsequent calls will | |||
// return Done. | |||
func (it *JobIterator) Next() (*Job, error) { | |||
if err := it.nextFunc(); err != nil { | |||
return nil, err | |||
} | |||
item := it.items[0] | |||
it.items = it.items[1:] | |||
return item, nil | |||
} | |||
func (it *JobIterator) fetch(pageSize int, pageToken string) (string, error) { | |||
var st string | |||
switch it.State { | |||
case StateUnspecified: | |||
st = "" | |||
case Pending: | |||
st = "pending" | |||
case Running: | |||
st = "running" | |||
case Done: | |||
st = "done" | |||
default: | |||
return "", fmt.Errorf("bigquery: invalid value for JobIterator.State: %d", it.State) | |||
} | |||
req := it.c.bqs.Jobs.List(it.ProjectID). | |||
Context(it.ctx). | |||
PageToken(pageToken). | |||
Projection("full"). | |||
AllUsers(it.AllUsers) | |||
if st != "" { | |||
req.StateFilter(st) | |||
} | |||
if !it.MinCreationTime.IsZero() { | |||
req.MinCreationTime(uint64(it.MinCreationTime.UnixNano() / 1e6)) | |||
} | |||
if !it.MaxCreationTime.IsZero() { | |||
req.MaxCreationTime(uint64(it.MaxCreationTime.UnixNano() / 1e6)) | |||
} | |||
setClientHeader(req.Header()) | |||
if pageSize > 0 { | |||
req.MaxResults(int64(pageSize)) | |||
} | |||
res, err := req.Do() | |||
if err != nil { | |||
return "", err | |||
} | |||
for _, j := range res.Jobs { | |||
job, err := convertListedJob(j, it.c) | |||
if err != nil { | |||
return "", err | |||
} | |||
it.items = append(it.items, job) | |||
} | |||
return res.NextPageToken, nil | |||
} | |||
func convertListedJob(j *bq.JobListJobs, c *Client) (*Job, error) { | |||
return bqToJob2(j.JobReference, j.Configuration, j.Status, j.Statistics, j.UserEmail, c) | |||
} | |||
func (c *Client) getJobInternal(ctx context.Context, jobID, location string, fields ...googleapi.Field) (*bq.Job, error) { | |||
var job *bq.Job | |||
call := c.bqs.Jobs.Get(c.projectID, jobID).Context(ctx) | |||
if location != "" { | |||
call = call.Location(location) | |||
} | |||
if len(fields) > 0 { | |||
call = call.Fields(fields...) | |||
} | |||
setClientHeader(call.Header()) | |||
err := runWithRetry(ctx, func() (err error) { | |||
job, err = call.Do() | |||
return err | |||
}) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return job, nil | |||
} | |||
func bqToJob(q *bq.Job, c *Client) (*Job, error) { | |||
return bqToJob2(q.JobReference, q.Configuration, q.Status, q.Statistics, q.UserEmail, c) | |||
} | |||
func bqToJob2(qr *bq.JobReference, qc *bq.JobConfiguration, qs *bq.JobStatus, qt *bq.JobStatistics, email string, c *Client) (*Job, error) { | |||
j := &Job{ | |||
projectID: qr.ProjectId, | |||
jobID: qr.JobId, | |||
location: qr.Location, | |||
c: c, | |||
email: email, | |||
} | |||
j.setConfig(qc) | |||
if err := j.setStatus(qs); err != nil { | |||
return nil, err | |||
} | |||
j.setStatistics(qt, c) | |||
return j, nil | |||
} | |||
func (j *Job) setConfig(config *bq.JobConfiguration) { | |||
if config == nil { | |||
return | |||
} | |||
j.config = config | |||
} | |||
func (j *Job) isQuery() bool { | |||
return j.config != nil && j.config.Query != nil | |||
} | |||
var stateMap = map[string]State{"PENDING": Pending, "RUNNING": Running, "DONE": Done} | |||
func (j *Job) setStatus(qs *bq.JobStatus) error { | |||
if qs == nil { | |||
return nil | |||
} | |||
state, ok := stateMap[qs.State] | |||
if !ok { | |||
return fmt.Errorf("unexpected job state: %v", qs.State) | |||
} | |||
j.lastStatus = &JobStatus{ | |||
State: state, | |||
err: nil, | |||
} | |||
if err := bqToError(qs.ErrorResult); state == Done && err != nil { | |||
j.lastStatus.err = err | |||
} | |||
for _, ep := range qs.Errors { | |||
j.lastStatus.Errors = append(j.lastStatus.Errors, bqToError(ep)) | |||
} | |||
return nil | |||
} | |||
func (j *Job) setStatistics(s *bq.JobStatistics, c *Client) { | |||
if s == nil || j.lastStatus == nil { | |||
return | |||
} | |||
js := &JobStatistics{ | |||
CreationTime: unixMillisToTime(s.CreationTime), | |||
StartTime: unixMillisToTime(s.StartTime), | |||
EndTime: unixMillisToTime(s.EndTime), | |||
TotalBytesProcessed: s.TotalBytesProcessed, | |||
} | |||
switch { | |||
case s.Extract != nil: | |||
js.Details = &ExtractStatistics{ | |||
DestinationURIFileCounts: []int64(s.Extract.DestinationUriFileCounts), | |||
} | |||
case s.Load != nil: | |||
js.Details = &LoadStatistics{ | |||
InputFileBytes: s.Load.InputFileBytes, | |||
InputFiles: s.Load.InputFiles, | |||
OutputBytes: s.Load.OutputBytes, | |||
OutputRows: s.Load.OutputRows, | |||
} | |||
case s.Query != nil: | |||
var names []string | |||
for _, qp := range s.Query.UndeclaredQueryParameters { | |||
names = append(names, qp.Name) | |||
} | |||
var tables []*Table | |||
for _, tr := range s.Query.ReferencedTables { | |||
tables = append(tables, bqToTable(tr, c)) | |||
} | |||
js.Details = &QueryStatistics{ | |||
BillingTier: s.Query.BillingTier, | |||
CacheHit: s.Query.CacheHit, | |||
DDLTargetTable: bqToTable(s.Query.DdlTargetTable, c), | |||
DDLOperationPerformed: s.Query.DdlOperationPerformed, | |||
StatementType: s.Query.StatementType, | |||
TotalBytesBilled: s.Query.TotalBytesBilled, | |||
TotalBytesProcessed: s.Query.TotalBytesProcessed, | |||
TotalBytesProcessedAccuracy: s.Query.TotalBytesProcessedAccuracy, | |||
NumDMLAffectedRows: s.Query.NumDmlAffectedRows, | |||
QueryPlan: queryPlanFromProto(s.Query.QueryPlan), | |||
Schema: bqToSchema(s.Query.Schema), | |||
SlotMillis: s.Query.TotalSlotMs, | |||
Timeline: timelineFromProto(s.Query.Timeline), | |||
ReferencedTables: tables, | |||
UndeclaredQueryParameterNames: names, | |||
} | |||
} | |||
j.lastStatus.Statistics = js | |||
} | |||
func queryPlanFromProto(stages []*bq.ExplainQueryStage) []*ExplainQueryStage { | |||
var res []*ExplainQueryStage | |||
for _, s := range stages { | |||
var steps []*ExplainQueryStep | |||
for _, p := range s.Steps { | |||
steps = append(steps, &ExplainQueryStep{ | |||
Kind: p.Kind, | |||
Substeps: p.Substeps, | |||
}) | |||
} | |||
res = append(res, &ExplainQueryStage{ | |||
CompletedParallelInputs: s.CompletedParallelInputs, | |||
ComputeAvg: time.Duration(s.ComputeMsAvg) * time.Millisecond, | |||
ComputeMax: time.Duration(s.ComputeMsMax) * time.Millisecond, | |||
ComputeRatioAvg: s.ComputeRatioAvg, | |||
ComputeRatioMax: s.ComputeRatioMax, | |||
EndTime: time.Unix(0, s.EndMs*1e6), | |||
ID: s.Id, | |||
InputStages: s.InputStages, | |||
Name: s.Name, | |||
ParallelInputs: s.ParallelInputs, | |||
ReadAvg: time.Duration(s.ReadMsAvg) * time.Millisecond, | |||
ReadMax: time.Duration(s.ReadMsMax) * time.Millisecond, | |||
ReadRatioAvg: s.ReadRatioAvg, | |||
ReadRatioMax: s.ReadRatioMax, | |||
RecordsRead: s.RecordsRead, | |||
RecordsWritten: s.RecordsWritten, | |||
ShuffleOutputBytes: s.ShuffleOutputBytes, | |||
ShuffleOutputBytesSpilled: s.ShuffleOutputBytesSpilled, | |||
StartTime: time.Unix(0, s.StartMs*1e6), | |||
Status: s.Status, | |||
Steps: steps, | |||
WaitAvg: time.Duration(s.WaitMsAvg) * time.Millisecond, | |||
WaitMax: time.Duration(s.WaitMsMax) * time.Millisecond, | |||
WaitRatioAvg: s.WaitRatioAvg, | |||
WaitRatioMax: s.WaitRatioMax, | |||
WriteAvg: time.Duration(s.WriteMsAvg) * time.Millisecond, | |||
WriteMax: time.Duration(s.WriteMsMax) * time.Millisecond, | |||
WriteRatioAvg: s.WriteRatioAvg, | |||
WriteRatioMax: s.WriteRatioMax, | |||
}) | |||
} | |||
return res | |||
} | |||
func timelineFromProto(timeline []*bq.QueryTimelineSample) []*QueryTimelineSample { | |||
var res []*QueryTimelineSample | |||
for _, s := range timeline { | |||
res = append(res, &QueryTimelineSample{ | |||
ActiveUnits: s.ActiveUnits, | |||
CompletedUnits: s.CompletedUnits, | |||
Elapsed: time.Duration(s.ElapsedMs) * time.Millisecond, | |||
PendingUnits: s.PendingUnits, | |||
SlotMillis: s.TotalSlotMs, | |||
}) | |||
} | |||
return res | |||
} |
@@ -1,95 +0,0 @@ | |||
// Copyright 2017 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package bigquery | |||
import ( | |||
"testing" | |||
"cloud.google.com/go/internal/testutil" | |||
bq "google.golang.org/api/bigquery/v2" | |||
) | |||
func TestCreateJobRef(t *testing.T) { | |||
defer fixRandomID("RANDOM")() | |||
cNoLoc := &Client{projectID: "projectID"} | |||
cLoc := &Client{projectID: "projectID", Location: "defaultLoc"} | |||
for _, test := range []struct { | |||
in JobIDConfig | |||
client *Client | |||
want *bq.JobReference | |||
}{ | |||
{ | |||
in: JobIDConfig{JobID: "foo"}, | |||
want: &bq.JobReference{JobId: "foo"}, | |||
}, | |||
{ | |||
in: JobIDConfig{}, | |||
want: &bq.JobReference{JobId: "RANDOM"}, | |||
}, | |||
{ | |||
in: JobIDConfig{AddJobIDSuffix: true}, | |||
want: &bq.JobReference{JobId: "RANDOM"}, | |||
}, | |||
{ | |||
in: JobIDConfig{JobID: "foo", AddJobIDSuffix: true}, | |||
want: &bq.JobReference{JobId: "foo-RANDOM"}, | |||
}, | |||
{ | |||
in: JobIDConfig{JobID: "foo", Location: "loc"}, | |||
want: &bq.JobReference{JobId: "foo", Location: "loc"}, | |||
}, | |||
{ | |||
in: JobIDConfig{JobID: "foo"}, | |||
client: cLoc, | |||
want: &bq.JobReference{JobId: "foo", Location: "defaultLoc"}, | |||
}, | |||
{ | |||
in: JobIDConfig{JobID: "foo", Location: "loc"}, | |||
client: cLoc, | |||
want: &bq.JobReference{JobId: "foo", Location: "loc"}, | |||
}, | |||
} { | |||
client := test.client | |||
if client == nil { | |||
client = cNoLoc | |||
} | |||
got := test.in.createJobRef(client) | |||
test.want.ProjectId = "projectID" | |||
if !testutil.Equal(got, test.want) { | |||
t.Errorf("%+v: got %+v, want %+v", test.in, got, test.want) | |||
} | |||
} | |||
} | |||
func fixRandomID(s string) func() { | |||
prev := randomIDFn | |||
randomIDFn = func() string { return s } | |||
return func() { randomIDFn = prev } | |||
} | |||
func checkJob(t *testing.T, i int, got, want *bq.Job) { | |||
if got.JobReference == nil { | |||
t.Errorf("#%d: empty job reference", i) | |||
return | |||
} | |||
if got.JobReference.JobId == "" { | |||
t.Errorf("#%d: empty job ID", i) | |||
return | |||
} | |||
d := testutil.Diff(got, want) | |||
if d != "" { | |||
t.Errorf("#%d: (got=-, want=+) %s", i, d) | |||
} | |||
} |
@@ -1,153 +0,0 @@ | |||
// Copyright 2016 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package bigquery | |||
import ( | |||
"context" | |||
"io" | |||
"cloud.google.com/go/internal/trace" | |||
bq "google.golang.org/api/bigquery/v2" | |||
) | |||
// LoadConfig holds the configuration for a load job. | |||
type LoadConfig struct { | |||
// Src is the source from which data will be loaded. | |||
Src LoadSource | |||
// Dst is the table into which the data will be loaded. | |||
Dst *Table | |||
// CreateDisposition specifies the circumstances under which the destination table will be created. | |||
// The default is CreateIfNeeded. | |||
CreateDisposition TableCreateDisposition | |||
// WriteDisposition specifies how existing data in the destination table is treated. | |||
// The default is WriteAppend. | |||
WriteDisposition TableWriteDisposition | |||
// The labels associated with this job. | |||
Labels map[string]string | |||
// If non-nil, the destination table is partitioned by time. | |||
TimePartitioning *TimePartitioning | |||
// Clustering specifies the data clustering configuration for the destination table. | |||
Clustering *Clustering | |||
// Custom encryption configuration (e.g., Cloud KMS keys). | |||
DestinationEncryptionConfig *EncryptionConfig | |||
// Allows the schema of the destination table to be updated as a side effect of | |||
// the load job. | |||
SchemaUpdateOptions []string | |||
// For Avro-based loads, controls whether logical type annotations are used. | |||
// See https://cloud.google.com/bigquery/docs/loading-data-cloud-storage-avro#logical_types | |||
// for additional information. | |||
UseAvroLogicalTypes bool | |||
} | |||
func (l *LoadConfig) toBQ() (*bq.JobConfiguration, io.Reader) { | |||
config := &bq.JobConfiguration{ | |||
Labels: l.Labels, | |||
Load: &bq.JobConfigurationLoad{ | |||
CreateDisposition: string(l.CreateDisposition), | |||
WriteDisposition: string(l.WriteDisposition), | |||
DestinationTable: l.Dst.toBQ(), | |||
TimePartitioning: l.TimePartitioning.toBQ(), | |||
Clustering: l.Clustering.toBQ(), | |||
DestinationEncryptionConfiguration: l.DestinationEncryptionConfig.toBQ(), | |||
SchemaUpdateOptions: l.SchemaUpdateOptions, | |||
UseAvroLogicalTypes: l.UseAvroLogicalTypes, | |||
}, | |||
} | |||
media := l.Src.populateLoadConfig(config.Load) | |||
return config, media | |||
} | |||
func bqToLoadConfig(q *bq.JobConfiguration, c *Client) *LoadConfig { | |||
lc := &LoadConfig{ | |||
Labels: q.Labels, | |||
CreateDisposition: TableCreateDisposition(q.Load.CreateDisposition), | |||
WriteDisposition: TableWriteDisposition(q.Load.WriteDisposition), | |||
Dst: bqToTable(q.Load.DestinationTable, c), | |||
TimePartitioning: bqToTimePartitioning(q.Load.TimePartitioning), | |||
Clustering: bqToClustering(q.Load.Clustering), | |||
DestinationEncryptionConfig: bqToEncryptionConfig(q.Load.DestinationEncryptionConfiguration), | |||
SchemaUpdateOptions: q.Load.SchemaUpdateOptions, | |||
UseAvroLogicalTypes: q.Load.UseAvroLogicalTypes, | |||
} | |||
var fc *FileConfig | |||
if len(q.Load.SourceUris) == 0 { | |||
s := NewReaderSource(nil) | |||
fc = &s.FileConfig | |||
lc.Src = s | |||
} else { | |||
s := NewGCSReference(q.Load.SourceUris...) | |||
fc = &s.FileConfig | |||
lc.Src = s | |||
} | |||
bqPopulateFileConfig(q.Load, fc) | |||
return lc | |||
} | |||
// A Loader loads data from Google Cloud Storage into a BigQuery table. | |||
type Loader struct { | |||
JobIDConfig | |||
LoadConfig | |||
c *Client | |||
} | |||
// A LoadSource represents a source of data that can be loaded into | |||
// a BigQuery table. | |||
// | |||
// This package defines two LoadSources: GCSReference, for Google Cloud Storage | |||
// objects, and ReaderSource, for data read from an io.Reader. | |||
type LoadSource interface { | |||
// populates config, returns media | |||
populateLoadConfig(*bq.JobConfigurationLoad) io.Reader | |||
} | |||
// LoaderFrom returns a Loader which can be used to load data into a BigQuery table. | |||
// The returned Loader may optionally be further configured before its Run method is called. | |||
// See GCSReference and ReaderSource for additional configuration options that | |||
// affect loading. | |||
func (t *Table) LoaderFrom(src LoadSource) *Loader { | |||
return &Loader{ | |||
c: t.c, | |||
LoadConfig: LoadConfig{ | |||
Src: src, | |||
Dst: t, | |||
}, | |||
} | |||
} | |||
// Run initiates a load job. | |||
func (l *Loader) Run(ctx context.Context) (j *Job, err error) { | |||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Load.Run") | |||
defer func() { trace.EndSpan(ctx, err) }() | |||
job, media := l.newJob() | |||
return l.c.insertJob(ctx, job, media) | |||
} | |||
func (l *Loader) newJob() (*bq.Job, io.Reader) { | |||
config, media := l.LoadConfig.toBQ() | |||
return &bq.Job{ | |||
JobReference: l.JobIDConfig.createJobRef(l.c), | |||
Configuration: config, | |||
}, media | |||
} |
@@ -1,298 +0,0 @@ | |||
// Copyright 2015 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package bigquery | |||
import ( | |||
"strings" | |||
"testing" | |||
"time" | |||
"cloud.google.com/go/internal/testutil" | |||
"github.com/google/go-cmp/cmp" | |||
"github.com/google/go-cmp/cmp/cmpopts" | |||
bq "google.golang.org/api/bigquery/v2" | |||
) | |||
func defaultLoadJob() *bq.Job { | |||
return &bq.Job{ | |||
JobReference: &bq.JobReference{JobId: "RANDOM", ProjectId: "client-project-id"}, | |||
Configuration: &bq.JobConfiguration{ | |||
Load: &bq.JobConfigurationLoad{ | |||
DestinationTable: &bq.TableReference{ | |||
ProjectId: "client-project-id", | |||
DatasetId: "dataset-id", | |||
TableId: "table-id", | |||
}, | |||
SourceUris: []string{"uri"}, | |||
}, | |||
}, | |||
} | |||
} | |||
func stringFieldSchema() *FieldSchema { | |||
return &FieldSchema{Name: "fieldname", Type: StringFieldType} | |||
} | |||
func nestedFieldSchema() *FieldSchema { | |||
return &FieldSchema{ | |||
Name: "nested", | |||
Type: RecordFieldType, | |||
Schema: Schema{stringFieldSchema()}, | |||
} | |||
} | |||
func bqStringFieldSchema() *bq.TableFieldSchema { | |||
return &bq.TableFieldSchema{ | |||
Name: "fieldname", | |||
Type: "STRING", | |||
} | |||
} | |||
func bqNestedFieldSchema() *bq.TableFieldSchema { | |||
return &bq.TableFieldSchema{ | |||
Name: "nested", | |||
Type: "RECORD", | |||
Fields: []*bq.TableFieldSchema{bqStringFieldSchema()}, | |||
} | |||
} | |||
func TestLoad(t *testing.T) { | |||
defer fixRandomID("RANDOM")() | |||
c := &Client{projectID: "client-project-id"} | |||
testCases := []struct { | |||
dst *Table | |||
src LoadSource | |||
jobID string | |||
location string | |||
config LoadConfig | |||
want *bq.Job | |||
}{ | |||
{ | |||
dst: c.Dataset("dataset-id").Table("table-id"), | |||
src: NewGCSReference("uri"), | |||
want: defaultLoadJob(), | |||
}, | |||
{ | |||
dst: c.Dataset("dataset-id").Table("table-id"), | |||
src: NewGCSReference("uri"), | |||
location: "loc", | |||
want: func() *bq.Job { | |||
j := defaultLoadJob() | |||
j.JobReference.Location = "loc" | |||
return j | |||
}(), | |||
}, | |||
{ | |||
dst: c.Dataset("dataset-id").Table("table-id"), | |||
jobID: "ajob", | |||
config: LoadConfig{ | |||
CreateDisposition: CreateNever, | |||
WriteDisposition: WriteTruncate, | |||
Labels: map[string]string{"a": "b"}, | |||
TimePartitioning: &TimePartitioning{Expiration: 1234 * time.Millisecond}, | |||
Clustering: &Clustering{Fields: []string{"cfield1"}}, | |||
DestinationEncryptionConfig: &EncryptionConfig{KMSKeyName: "keyName"}, | |||
SchemaUpdateOptions: []string{"ALLOW_FIELD_ADDITION"}, | |||
}, | |||
src: NewGCSReference("uri"), | |||
want: func() *bq.Job { | |||
j := defaultLoadJob() | |||
j.Configuration.Labels = map[string]string{"a": "b"} | |||
j.Configuration.Load.CreateDisposition = "CREATE_NEVER" | |||
j.Configuration.Load.WriteDisposition = "WRITE_TRUNCATE" | |||
j.Configuration.Load.TimePartitioning = &bq.TimePartitioning{ | |||
Type: "DAY", | |||
ExpirationMs: 1234, | |||
} | |||
j.Configuration.Load.Clustering = &bq.Clustering{ | |||
Fields: []string{"cfield1"}, | |||
} | |||
j.Configuration.Load.DestinationEncryptionConfiguration = &bq.EncryptionConfiguration{KmsKeyName: "keyName"} | |||
j.JobReference = &bq.JobReference{ | |||
JobId: "ajob", | |||
ProjectId: "client-project-id", | |||
} | |||
j.Configuration.Load.SchemaUpdateOptions = []string{"ALLOW_FIELD_ADDITION"} | |||
return j | |||
}(), | |||
}, | |||
{ | |||
dst: c.Dataset("dataset-id").Table("table-id"), | |||
src: func() *GCSReference { | |||
g := NewGCSReference("uri") | |||
g.MaxBadRecords = 1 | |||
g.AllowJaggedRows = true | |||
g.AllowQuotedNewlines = true | |||
g.IgnoreUnknownValues = true | |||
return g | |||
}(), | |||
want: func() *bq.Job { | |||
j := defaultLoadJob() | |||
j.Configuration.Load.MaxBadRecords = 1 | |||
j.Configuration.Load.AllowJaggedRows = true | |||
j.Configuration.Load.AllowQuotedNewlines = true | |||
j.Configuration.Load.IgnoreUnknownValues = true | |||
return j | |||
}(), | |||
}, | |||
{ | |||
dst: c.Dataset("dataset-id").Table("table-id"), | |||
src: func() *GCSReference { | |||
g := NewGCSReference("uri") | |||
g.Schema = Schema{ | |||
stringFieldSchema(), | |||
nestedFieldSchema(), | |||
} | |||
return g | |||
}(), | |||
want: func() *bq.Job { | |||
j := defaultLoadJob() | |||
j.Configuration.Load.Schema = &bq.TableSchema{ | |||
Fields: []*bq.TableFieldSchema{ | |||
bqStringFieldSchema(), | |||
bqNestedFieldSchema(), | |||
}} | |||
return j | |||
}(), | |||
}, | |||
{ | |||
dst: c.Dataset("dataset-id").Table("table-id"), | |||
src: func() *GCSReference { | |||
g := NewGCSReference("uri") | |||
g.SkipLeadingRows = 1 | |||
g.SourceFormat = JSON | |||
g.Encoding = UTF_8 | |||
g.FieldDelimiter = "\t" | |||
g.Quote = "-" | |||
return g | |||
}(), | |||
want: func() *bq.Job { | |||
j := defaultLoadJob() | |||
j.Configuration.Load.SkipLeadingRows = 1 | |||
j.Configuration.Load.SourceFormat = "NEWLINE_DELIMITED_JSON" | |||
j.Configuration.Load.Encoding = "UTF-8" | |||
j.Configuration.Load.FieldDelimiter = "\t" | |||
hyphen := "-" | |||
j.Configuration.Load.Quote = &hyphen | |||
return j | |||
}(), | |||
}, | |||
{ | |||
dst: c.Dataset("dataset-id").Table("table-id"), | |||
src: NewGCSReference("uri"), | |||
want: func() *bq.Job { | |||
j := defaultLoadJob() | |||
// Quote is left unset in GCSReference, so should be nil here. | |||
j.Configuration.Load.Quote = nil | |||
return j | |||
}(), | |||
}, | |||
{ | |||
dst: c.Dataset("dataset-id").Table("table-id"), | |||
src: func() *GCSReference { | |||
g := NewGCSReference("uri") | |||
g.ForceZeroQuote = true | |||
return g | |||
}(), | |||
want: func() *bq.Job { | |||
j := defaultLoadJob() | |||
empty := "" | |||
j.Configuration.Load.Quote = &empty | |||
return j | |||
}(), | |||
}, | |||
{ | |||
dst: c.Dataset("dataset-id").Table("table-id"), | |||
src: func() *ReaderSource { | |||
r := NewReaderSource(strings.NewReader("foo")) | |||
r.SkipLeadingRows = 1 | |||
r.SourceFormat = JSON | |||
r.Encoding = UTF_8 | |||
r.FieldDelimiter = "\t" | |||
r.Quote = "-" | |||
return r | |||
}(), | |||
want: func() *bq.Job { | |||
j := defaultLoadJob() | |||
j.Configuration.Load.SourceUris = nil | |||
j.Configuration.Load.SkipLeadingRows = 1 | |||
j.Configuration.Load.SourceFormat = "NEWLINE_DELIMITED_JSON" | |||
j.Configuration.Load.Encoding = "UTF-8" | |||
j.Configuration.Load.FieldDelimiter = "\t" | |||
hyphen := "-" | |||
j.Configuration.Load.Quote = &hyphen | |||
return j | |||
}(), | |||
}, | |||
{ | |||
dst: c.Dataset("dataset-id").Table("table-id"), | |||
src: func() *GCSReference { | |||
g := NewGCSReference("uri") | |||
g.SourceFormat = Avro | |||
return g | |||
}(), | |||
config: LoadConfig{ | |||
UseAvroLogicalTypes: true, | |||
}, | |||
want: func() *bq.Job { | |||
j := defaultLoadJob() | |||
j.Configuration.Load.SourceFormat = "AVRO" | |||
j.Configuration.Load.UseAvroLogicalTypes = true | |||
return j | |||
}(), | |||
}, | |||
{ | |||
dst: c.Dataset("dataset-id").Table("table-id"), | |||
src: func() *ReaderSource { | |||
r := NewReaderSource(strings.NewReader("foo")) | |||
r.SourceFormat = Avro | |||
return r | |||
}(), | |||
config: LoadConfig{ | |||
UseAvroLogicalTypes: true, | |||
}, | |||
want: func() *bq.Job { | |||
j := defaultLoadJob() | |||
j.Configuration.Load.SourceUris = nil | |||
j.Configuration.Load.SourceFormat = "AVRO" | |||
j.Configuration.Load.UseAvroLogicalTypes = true | |||
return j | |||
}(), | |||
}, | |||
} | |||
for i, tc := range testCases { | |||
loader := tc.dst.LoaderFrom(tc.src) | |||
loader.JobID = tc.jobID | |||
loader.Location = tc.location | |||
tc.config.Src = tc.src | |||
tc.config.Dst = tc.dst | |||
loader.LoadConfig = tc.config | |||
got, _ := loader.newJob() | |||
checkJob(t, i, got, tc.want) | |||
jc, err := bqToJobConfig(got.Configuration, c) | |||
if err != nil { | |||
t.Fatalf("#%d: %v", i, err) | |||
} | |||
diff := testutil.Diff(jc.(*LoadConfig), &loader.LoadConfig, | |||
cmp.AllowUnexported(Table{}, Client{}), | |||
cmpopts.IgnoreUnexported(ReaderSource{})) | |||
if diff != "" { | |||
t.Errorf("#%d: (got=-, want=+:\n%s", i, diff) | |||
} | |||
} | |||
} |
@@ -1,348 +0,0 @@ | |||
// Copyright 2015 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package bigquery | |||
import ( | |||
"bytes" | |||
"encoding/json" | |||
"fmt" | |||
"reflect" | |||
"strconv" | |||
"time" | |||
"cloud.google.com/go/civil" | |||
) | |||
// NullInt64 represents a BigQuery INT64 that may be NULL. | |||
type NullInt64 struct { | |||
Int64 int64 | |||
Valid bool // Valid is true if Int64 is not NULL. | |||
} | |||
func (n NullInt64) String() string { return nullstr(n.Valid, n.Int64) } | |||
// NullString represents a BigQuery STRING that may be NULL. | |||
type NullString struct { | |||
StringVal string | |||
Valid bool // Valid is true if StringVal is not NULL. | |||
} | |||
func (n NullString) String() string { return nullstr(n.Valid, n.StringVal) } | |||
// NullGeography represents a BigQuery GEOGRAPHY string that may be NULL. | |||
type NullGeography struct { | |||
GeographyVal string | |||
Valid bool // Valid is true if GeographyVal is not NULL. | |||
} | |||
func (n NullGeography) String() string { return nullstr(n.Valid, n.GeographyVal) } | |||
// NullFloat64 represents a BigQuery FLOAT64 that may be NULL. | |||
type NullFloat64 struct { | |||
Float64 float64 | |||
Valid bool // Valid is true if Float64 is not NULL. | |||
} | |||
func (n NullFloat64) String() string { return nullstr(n.Valid, n.Float64) } | |||
// NullBool represents a BigQuery BOOL that may be NULL. | |||
type NullBool struct { | |||
Bool bool | |||
Valid bool // Valid is true if Bool is not NULL. | |||
} | |||
func (n NullBool) String() string { return nullstr(n.Valid, n.Bool) } | |||
// NullTimestamp represents a BigQuery TIMESTAMP that may be null. | |||
type NullTimestamp struct { | |||
Timestamp time.Time | |||
Valid bool // Valid is true if Time is not NULL. | |||
} | |||
func (n NullTimestamp) String() string { return nullstr(n.Valid, n.Timestamp) } | |||
// NullDate represents a BigQuery DATE that may be null. | |||
type NullDate struct { | |||
Date civil.Date | |||
Valid bool // Valid is true if Date is not NULL. | |||
} | |||
func (n NullDate) String() string { return nullstr(n.Valid, n.Date) } | |||
// NullTime represents a BigQuery TIME that may be null. | |||
type NullTime struct { | |||
Time civil.Time | |||
Valid bool // Valid is true if Time is not NULL. | |||
} | |||
func (n NullTime) String() string { | |||
if !n.Valid { | |||
return "<null>" | |||
} | |||
return CivilTimeString(n.Time) | |||
} | |||
// NullDateTime represents a BigQuery DATETIME that may be null. | |||
type NullDateTime struct { | |||
DateTime civil.DateTime | |||
Valid bool // Valid is true if DateTime is not NULL. | |||
} | |||
func (n NullDateTime) String() string { | |||
if !n.Valid { | |||
return "<null>" | |||
} | |||
return CivilDateTimeString(n.DateTime) | |||
} | |||
// MarshalJSON converts the NullInt64 to JSON. | |||
func (n NullInt64) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Int64) } | |||
// MarshalJSON converts the NullFloat64 to JSON. | |||
func (n NullFloat64) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Float64) } | |||
// MarshalJSON converts the NullBool to JSON. | |||
func (n NullBool) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Bool) } | |||
// MarshalJSON converts the NullString to JSON. | |||
func (n NullString) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.StringVal) } | |||
// MarshalJSON converts the NullGeography to JSON. | |||
func (n NullGeography) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.GeographyVal) } | |||
// MarshalJSON converts the NullTimestamp to JSON. | |||
func (n NullTimestamp) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Timestamp) } | |||
// MarshalJSON converts the NullDate to JSON. | |||
func (n NullDate) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Date) } | |||
// MarshalJSON converts the NullTime to JSON. | |||
func (n NullTime) MarshalJSON() ([]byte, error) { | |||
if !n.Valid { | |||
return jsonNull, nil | |||
} | |||
return []byte(`"` + CivilTimeString(n.Time) + `"`), nil | |||
} | |||
// MarshalJSON converts the NullDateTime to JSON. | |||
func (n NullDateTime) MarshalJSON() ([]byte, error) { | |||
if !n.Valid { | |||
return jsonNull, nil | |||
} | |||
return []byte(`"` + CivilDateTimeString(n.DateTime) + `"`), nil | |||
} | |||
func nullstr(valid bool, v interface{}) string { | |||
if !valid { | |||
return "NULL" | |||
} | |||
return fmt.Sprint(v) | |||
} | |||
var jsonNull = []byte("null") | |||
func nulljson(valid bool, v interface{}) ([]byte, error) { | |||
if !valid { | |||
return jsonNull, nil | |||
} | |||
return json.Marshal(v) | |||
} | |||
// UnmarshalJSON converts JSON into a NullInt64. | |||
func (n *NullInt64) UnmarshalJSON(b []byte) error { | |||
n.Valid = false | |||
n.Int64 = 0 | |||
if bytes.Equal(b, jsonNull) { | |||
return nil | |||
} | |||
if err := json.Unmarshal(b, &n.Int64); err != nil { | |||
return err | |||
} | |||
n.Valid = true | |||
return nil | |||
} | |||
// UnmarshalJSON converts JSON into a NullFloat64. | |||
func (n *NullFloat64) UnmarshalJSON(b []byte) error { | |||
n.Valid = false | |||
n.Float64 = 0 | |||
if bytes.Equal(b, jsonNull) { | |||
return nil | |||
} | |||
if err := json.Unmarshal(b, &n.Float64); err != nil { | |||
return err | |||
} | |||
n.Valid = true | |||
return nil | |||
} | |||
// UnmarshalJSON converts JSON into a NullBool. | |||
func (n *NullBool) UnmarshalJSON(b []byte) error { | |||
n.Valid = false | |||
n.Bool = false | |||
if bytes.Equal(b, jsonNull) { | |||
return nil | |||
} | |||
if err := json.Unmarshal(b, &n.Bool); err != nil { | |||
return err | |||
} | |||
n.Valid = true | |||
return nil | |||
} | |||
// UnmarshalJSON converts JSON into a NullString. | |||
func (n *NullString) UnmarshalJSON(b []byte) error { | |||
n.Valid = false | |||
n.StringVal = "" | |||
if bytes.Equal(b, jsonNull) { | |||
return nil | |||
} | |||
if err := json.Unmarshal(b, &n.StringVal); err != nil { | |||
return err | |||
} | |||
n.Valid = true | |||
return nil | |||
} | |||
// UnmarshalJSON converts JSON into a NullGeography. | |||
func (n *NullGeography) UnmarshalJSON(b []byte) error { | |||
n.Valid = false | |||
n.GeographyVal = "" | |||
if bytes.Equal(b, jsonNull) { | |||
return nil | |||
} | |||
if err := json.Unmarshal(b, &n.GeographyVal); err != nil { | |||
return err | |||
} | |||
n.Valid = true | |||
return nil | |||
} | |||
// UnmarshalJSON converts JSON into a NullTimestamp. | |||
func (n *NullTimestamp) UnmarshalJSON(b []byte) error { | |||
n.Valid = false | |||
n.Timestamp = time.Time{} | |||
if bytes.Equal(b, jsonNull) { | |||
return nil | |||
} | |||
if err := json.Unmarshal(b, &n.Timestamp); err != nil { | |||
return err | |||
} | |||
n.Valid = true | |||
return nil | |||
} | |||
// UnmarshalJSON converts JSON into a NullDate. | |||
func (n *NullDate) UnmarshalJSON(b []byte) error { | |||
n.Valid = false | |||
n.Date = civil.Date{} | |||
if bytes.Equal(b, jsonNull) { | |||
return nil | |||
} | |||
if err := json.Unmarshal(b, &n.Date); err != nil { | |||
return err | |||
} | |||
n.Valid = true | |||
return nil | |||
} | |||
// UnmarshalJSON converts JSON into a NullTime. | |||
func (n *NullTime) UnmarshalJSON(b []byte) error { | |||
n.Valid = false | |||
n.Time = civil.Time{} | |||
if bytes.Equal(b, jsonNull) { | |||
return nil | |||
} | |||
s, err := strconv.Unquote(string(b)) | |||
if err != nil { | |||
return err | |||
} | |||
t, err := civil.ParseTime(s) | |||
if err != nil { | |||
return err | |||
} | |||
n.Time = t | |||
n.Valid = true | |||
return nil | |||
} | |||
// UnmarshalJSON converts JSON into a NullDateTime. | |||
func (n *NullDateTime) UnmarshalJSON(b []byte) error { | |||
n.Valid = false | |||
n.DateTime = civil.DateTime{} | |||
if bytes.Equal(b, jsonNull) { | |||
return nil | |||
} | |||
s, err := strconv.Unquote(string(b)) | |||
if err != nil { | |||
return err | |||
} | |||
dt, err := parseCivilDateTime(s) | |||
if err != nil { | |||
return err | |||
} | |||
n.DateTime = dt | |||
n.Valid = true | |||
return nil | |||
} | |||
var ( | |||
typeOfNullInt64 = reflect.TypeOf(NullInt64{}) | |||
typeOfNullFloat64 = reflect.TypeOf(NullFloat64{}) | |||
typeOfNullBool = reflect.TypeOf(NullBool{}) | |||
typeOfNullString = reflect.TypeOf(NullString{}) | |||
typeOfNullGeography = reflect.TypeOf(NullGeography{}) | |||
typeOfNullTimestamp = reflect.TypeOf(NullTimestamp{}) | |||
typeOfNullDate = reflect.TypeOf(NullDate{}) | |||
typeOfNullTime = reflect.TypeOf(NullTime{}) | |||
typeOfNullDateTime = reflect.TypeOf(NullDateTime{}) | |||
) | |||
func nullableFieldType(t reflect.Type) FieldType { | |||
switch t { | |||
case typeOfNullInt64: | |||
return IntegerFieldType | |||
case typeOfNullFloat64: | |||
return FloatFieldType | |||
case typeOfNullBool: | |||
return BooleanFieldType | |||
case typeOfNullString: | |||
return StringFieldType | |||
case typeOfNullGeography: | |||
return GeographyFieldType | |||
case typeOfNullTimestamp: | |||
return TimestampFieldType | |||
case typeOfNullDate: | |||
return DateFieldType | |||
case typeOfNullTime: | |||
return TimeFieldType | |||
case typeOfNullDateTime: | |||
return DateTimeFieldType | |||
default: | |||
return "" | |||
} | |||
} |
@@ -1,75 +0,0 @@ | |||
// Copyright 2015 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package bigquery | |||
import ( | |||
"encoding/json" | |||
"reflect" | |||
"testing" | |||
"cloud.google.com/go/civil" | |||
"cloud.google.com/go/internal/testutil" | |||
) | |||
var ( | |||
nullsTestTime = civil.Time{Hour: 7, Minute: 50, Second: 22, Nanosecond: 1000} | |||
nullsTestDateTime = civil.DateTime{Date: civil.Date{Year: 2016, Month: 11, Day: 5}, Time: nullsTestTime} | |||
) | |||
func TestNullsJSON(t *testing.T) { | |||
for _, test := range []struct { | |||
in interface{} | |||
want string | |||
}{ | |||
{&NullInt64{Valid: true, Int64: 3}, `3`}, | |||
{&NullFloat64{Valid: true, Float64: 3.14}, `3.14`}, | |||
{&NullBool{Valid: true, Bool: true}, `true`}, | |||
{&NullString{Valid: true, StringVal: "foo"}, `"foo"`}, | |||
{&NullGeography{Valid: true, GeographyVal: "ST_GEOPOINT(47.649154, -122.350220)"}, `"ST_GEOPOINT(47.649154, -122.350220)"`}, | |||
{&NullTimestamp{Valid: true, Timestamp: testTimestamp}, `"2016-11-05T07:50:22.000000008Z"`}, | |||
{&NullDate{Valid: true, Date: testDate}, `"2016-11-05"`}, | |||
{&NullTime{Valid: true, Time: nullsTestTime}, `"07:50:22.000001"`}, | |||
{&NullDateTime{Valid: true, DateTime: nullsTestDateTime}, `"2016-11-05 07:50:22.000001"`}, | |||
{&NullInt64{}, `null`}, | |||
{&NullFloat64{}, `null`}, | |||
{&NullBool{}, `null`}, | |||
{&NullString{}, `null`}, | |||
{&NullGeography{}, `null`}, | |||
{&NullTimestamp{}, `null`}, | |||
{&NullDate{}, `null`}, | |||
{&NullTime{}, `null`}, | |||
{&NullDateTime{}, `null`}, | |||
} { | |||
bytes, err := json.Marshal(test.in) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
if got, want := string(bytes), test.want; got != want { | |||
t.Errorf("%#v: got %s, want %s", test.in, got, want) | |||
} | |||
typ := reflect.Indirect(reflect.ValueOf(test.in)).Type() | |||
value := reflect.New(typ).Interface() | |||
err = json.Unmarshal(bytes, value) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
if !testutil.Equal(value, test.in) { | |||
t.Errorf("%#v: got %#v, want %#v", test.in, value, test.in) | |||
} | |||
} | |||
} |
@@ -1,38 +0,0 @@ | |||
// Copyright 2018 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package bigquery | |||
import ( | |||
"context" | |||
"testing" | |||
"cloud.google.com/go/internal/testutil" | |||
) | |||
func TestOCTracing(t *testing.T) { | |||
ctx := context.Background() | |||
client := getClient(t) | |||
defer client.Close() | |||
te := testutil.NewTestExporter() | |||
defer te.Unregister() | |||
q := client.Query("select *") | |||
q.Run(ctx) // Doesn't matter if we get an error; span should be created either way | |||
if len(te.Spans) == 0 { | |||
t.Fatalf("Expected some spans to be created, but got %d", 0) | |||
} | |||
} |
@@ -1,370 +0,0 @@ | |||
// Copyright 2016 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package bigquery | |||
import ( | |||
"encoding/base64" | |||
"errors" | |||
"fmt" | |||
"math/big" | |||
"reflect" | |||
"regexp" | |||
"time" | |||
"cloud.google.com/go/civil" | |||
"cloud.google.com/go/internal/fields" | |||
bq "google.golang.org/api/bigquery/v2" | |||
) | |||
var ( | |||
// See https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#timestamp-type. | |||
timestampFormat = "2006-01-02 15:04:05.999999-07:00" | |||
// See https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#schema.fields.name | |||
validFieldName = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]{0,127}$") | |||
) | |||
const nullableTagOption = "nullable" | |||
func bqTagParser(t reflect.StructTag) (name string, keep bool, other interface{}, err error) { | |||
name, keep, opts, err := fields.ParseStandardTag("bigquery", t) | |||
if err != nil { | |||
return "", false, nil, err | |||
} | |||
if name != "" && !validFieldName.MatchString(name) { | |||
return "", false, nil, invalidFieldNameError(name) | |||
} | |||
for _, opt := range opts { | |||
if opt != nullableTagOption { | |||
return "", false, nil, fmt.Errorf( | |||
"bigquery: invalid tag option %q. The only valid option is %q", | |||
opt, nullableTagOption) | |||
} | |||
} | |||
return name, keep, opts, nil | |||
} | |||
type invalidFieldNameError string | |||
func (e invalidFieldNameError) Error() string { | |||
return fmt.Sprintf("bigquery: invalid name %q of field in struct", string(e)) | |||
} | |||
var fieldCache = fields.NewCache(bqTagParser, nil, nil) | |||
var ( | |||
int64ParamType = &bq.QueryParameterType{Type: "INT64"} | |||
float64ParamType = &bq.QueryParameterType{Type: "FLOAT64"} | |||
boolParamType = &bq.QueryParameterType{Type: "BOOL"} | |||
stringParamType = &bq.QueryParameterType{Type: "STRING"} | |||
bytesParamType = &bq.QueryParameterType{Type: "BYTES"} | |||
dateParamType = &bq.QueryParameterType{Type: "DATE"} | |||
timeParamType = &bq.QueryParameterType{Type: "TIME"} | |||
dateTimeParamType = &bq.QueryParameterType{Type: "DATETIME"} | |||
timestampParamType = &bq.QueryParameterType{Type: "TIMESTAMP"} | |||
numericParamType = &bq.QueryParameterType{Type: "NUMERIC"} | |||
) | |||
var ( | |||
typeOfDate = reflect.TypeOf(civil.Date{}) | |||
typeOfTime = reflect.TypeOf(civil.Time{}) | |||
typeOfDateTime = reflect.TypeOf(civil.DateTime{}) | |||
typeOfGoTime = reflect.TypeOf(time.Time{}) | |||
typeOfRat = reflect.TypeOf(&big.Rat{}) | |||
) | |||
// A QueryParameter is a parameter to a query. | |||
type QueryParameter struct { | |||
// Name is used for named parameter mode. | |||
// It must match the name in the query case-insensitively. | |||
Name string | |||
// Value is the value of the parameter. | |||
// | |||
// When you create a QueryParameter to send to BigQuery, the following Go types | |||
// are supported, with their corresponding Bigquery types: | |||
// int, int8, int16, int32, int64, uint8, uint16, uint32: INT64 | |||
// Note that uint, uint64 and uintptr are not supported, because | |||
// they may contain values that cannot fit into a 64-bit signed integer. | |||
// float32, float64: FLOAT64 | |||
// bool: BOOL | |||
// string: STRING | |||
// []byte: BYTES | |||
// time.Time: TIMESTAMP | |||
// *big.Rat: NUMERIC | |||
// Arrays and slices of the above. | |||
// Structs of the above. Only the exported fields are used. | |||
// | |||
// BigQuery does not support params of type GEOGRAPHY. For users wishing | |||
// to parameterize Geography values, use string parameters and cast in the | |||
// SQL query, e.g. `SELECT ST_GeogFromText(@string_param) as geo` | |||
// | |||
// When a QueryParameter is returned inside a QueryConfig from a call to | |||
// Job.Config: | |||
// Integers are of type int64. | |||
// Floating-point values are of type float64. | |||
// Arrays are of type []interface{}, regardless of the array element type. | |||
// Structs are of type map[string]interface{}. | |||
Value interface{} | |||
} | |||
func (p QueryParameter) toBQ() (*bq.QueryParameter, error) { | |||
pv, err := paramValue(reflect.ValueOf(p.Value)) | |||
if err != nil { | |||
return nil, err | |||
} | |||
pt, err := paramType(reflect.TypeOf(p.Value)) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return &bq.QueryParameter{ | |||
Name: p.Name, | |||
ParameterValue: &pv, | |||
ParameterType: pt, | |||
}, nil | |||
} | |||
func paramType(t reflect.Type) (*bq.QueryParameterType, error) { | |||
if t == nil { | |||
return nil, errors.New("bigquery: nil parameter") | |||
} | |||
switch t { | |||
case typeOfDate: | |||
return dateParamType, nil | |||
case typeOfTime: | |||
return timeParamType, nil | |||
case typeOfDateTime: | |||
return dateTimeParamType, nil | |||
case typeOfGoTime: | |||
return timestampParamType, nil | |||
case typeOfRat: | |||
return numericParamType, nil | |||
} | |||
switch t.Kind() { | |||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint8, reflect.Uint16, reflect.Uint32: | |||
return int64ParamType, nil | |||
case reflect.Float32, reflect.Float64: | |||
return float64ParamType, nil | |||
case reflect.Bool: | |||
return boolParamType, nil | |||
case reflect.String: | |||
return stringParamType, nil | |||
case reflect.Slice: | |||
if t.Elem().Kind() == reflect.Uint8 { | |||
return bytesParamType, nil | |||
} | |||
fallthrough | |||
case reflect.Array: | |||
et, err := paramType(t.Elem()) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return &bq.QueryParameterType{Type: "ARRAY", ArrayType: et}, nil | |||
case reflect.Ptr: | |||
if t.Elem().Kind() != reflect.Struct { | |||
break | |||
} | |||
t = t.Elem() | |||
fallthrough | |||
case reflect.Struct: | |||
var fts []*bq.QueryParameterTypeStructTypes | |||
fields, err := fieldCache.Fields(t) | |||
if err != nil { | |||
return nil, err | |||
} | |||
for _, f := range fields { | |||
pt, err := paramType(f.Type) | |||
if err != nil { | |||
return nil, err | |||
} | |||
fts = append(fts, &bq.QueryParameterTypeStructTypes{ | |||
Name: f.Name, | |||
Type: pt, | |||
}) | |||
} | |||
return &bq.QueryParameterType{Type: "STRUCT", StructTypes: fts}, nil | |||
} | |||
return nil, fmt.Errorf("bigquery: Go type %s cannot be represented as a parameter type", t) | |||
} | |||
func paramValue(v reflect.Value) (bq.QueryParameterValue, error) { | |||
var res bq.QueryParameterValue | |||
if !v.IsValid() { | |||
return res, errors.New("bigquery: nil parameter") | |||
} | |||
t := v.Type() | |||
switch t { | |||
case typeOfDate: | |||
res.Value = v.Interface().(civil.Date).String() | |||
return res, nil | |||
case typeOfTime: | |||
// civil.Time has nanosecond resolution, but BigQuery TIME only microsecond. | |||
// (If we send nanoseconds, then when we try to read the result we get "query job | |||
// missing destination table"). | |||
res.Value = CivilTimeString(v.Interface().(civil.Time)) | |||
return res, nil | |||
case typeOfDateTime: | |||
res.Value = CivilDateTimeString(v.Interface().(civil.DateTime)) | |||
return res, nil | |||
case typeOfGoTime: | |||
res.Value = v.Interface().(time.Time).Format(timestampFormat) | |||
return res, nil | |||
case typeOfRat: | |||
res.Value = NumericString(v.Interface().(*big.Rat)) | |||
return res, nil | |||
} | |||
switch t.Kind() { | |||
case reflect.Slice: | |||
if t.Elem().Kind() == reflect.Uint8 { | |||
res.Value = base64.StdEncoding.EncodeToString(v.Interface().([]byte)) | |||
return res, nil | |||
} | |||
fallthrough | |||
case reflect.Array: | |||
var vals []*bq.QueryParameterValue | |||
for i := 0; i < v.Len(); i++ { | |||
val, err := paramValue(v.Index(i)) | |||
if err != nil { | |||
return bq.QueryParameterValue{}, err | |||
} | |||
vals = append(vals, &val) | |||
} | |||
return bq.QueryParameterValue{ArrayValues: vals}, nil | |||
case reflect.Ptr: | |||
if t.Elem().Kind() != reflect.Struct { | |||
return res, fmt.Errorf("bigquery: Go type %s cannot be represented as a parameter value", t) | |||
} | |||
t = t.Elem() | |||
v = v.Elem() | |||
if !v.IsValid() { | |||
// nil pointer becomes empty value | |||
return res, nil | |||
} | |||
fallthrough | |||
case reflect.Struct: | |||
fields, err := fieldCache.Fields(t) | |||
if err != nil { | |||
return bq.QueryParameterValue{}, err | |||
} | |||
res.StructValues = map[string]bq.QueryParameterValue{} | |||
for _, f := range fields { | |||
fv := v.FieldByIndex(f.Index) | |||
fp, err := paramValue(fv) | |||
if err != nil { | |||
return bq.QueryParameterValue{}, err | |||
} | |||
res.StructValues[f.Name] = fp | |||
} | |||
return res, nil | |||
} | |||
// None of the above: assume a scalar type. (If it's not a valid type, | |||
// paramType will catch the error.) | |||
res.Value = fmt.Sprint(v.Interface()) | |||
// Ensure empty string values are sent. | |||
if res.Value == "" { | |||
res.ForceSendFields = append(res.ForceSendFields, "Value") | |||
} | |||
return res, nil | |||
} | |||
func bqToQueryParameter(q *bq.QueryParameter) (QueryParameter, error) { | |||
p := QueryParameter{Name: q.Name} | |||
val, err := convertParamValue(q.ParameterValue, q.ParameterType) | |||
if err != nil { | |||
return QueryParameter{}, err | |||
} | |||
p.Value = val | |||
return p, nil | |||
} | |||
var paramTypeToFieldType = map[string]FieldType{ | |||
int64ParamType.Type: IntegerFieldType, | |||
float64ParamType.Type: FloatFieldType, | |||
boolParamType.Type: BooleanFieldType, | |||
stringParamType.Type: StringFieldType, | |||
bytesParamType.Type: BytesFieldType, | |||
dateParamType.Type: DateFieldType, | |||
timeParamType.Type: TimeFieldType, | |||
numericParamType.Type: NumericFieldType, | |||
} | |||
// Convert a parameter value from the service to a Go value. This is similar to, but | |||
// not quite the same as, converting data values. | |||
func convertParamValue(qval *bq.QueryParameterValue, qtype *bq.QueryParameterType) (interface{}, error) { | |||
switch qtype.Type { | |||
case "ARRAY": | |||
if qval == nil { | |||
return []interface{}(nil), nil | |||
} | |||
return convertParamArray(qval.ArrayValues, qtype.ArrayType) | |||
case "STRUCT": | |||
if qval == nil { | |||
return map[string]interface{}(nil), nil | |||
} | |||
return convertParamStruct(qval.StructValues, qtype.StructTypes) | |||
case "TIMESTAMP": | |||
return time.Parse(timestampFormat, qval.Value) | |||
case "DATETIME": | |||
return parseCivilDateTime(qval.Value) | |||
default: | |||
return convertBasicType(qval.Value, paramTypeToFieldType[qtype.Type]) | |||
} | |||
} | |||
// convertParamArray converts a query parameter array value to a Go value. It | |||
// always returns a []interface{}. | |||
func convertParamArray(elVals []*bq.QueryParameterValue, elType *bq.QueryParameterType) ([]interface{}, error) { | |||
var vals []interface{} | |||
for _, el := range elVals { | |||
val, err := convertParamValue(el, elType) | |||
if err != nil { | |||
return nil, err | |||
} | |||
vals = append(vals, val) | |||
} | |||
return vals, nil | |||
} | |||
// convertParamStruct converts a query parameter struct value into a Go value. It | |||
// always returns a map[string]interface{}. | |||
func convertParamStruct(sVals map[string]bq.QueryParameterValue, sTypes []*bq.QueryParameterTypeStructTypes) (map[string]interface{}, error) { | |||
vals := map[string]interface{}{} | |||
for _, st := range sTypes { | |||
if sv, ok := sVals[st.Name]; ok { | |||
val, err := convertParamValue(&sv, st.Type) | |||
if err != nil { | |||
return nil, err | |||
} | |||
vals[st.Name] = val | |||
} else { | |||
vals[st.Name] = nil | |||
} | |||
} | |||
return vals, nil | |||
} |
@@ -1,385 +0,0 @@ | |||
// Copyright 2016 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package bigquery | |||
import ( | |||
"context" | |||
"errors" | |||
"math" | |||
"math/big" | |||
"reflect" | |||
"testing" | |||
"time" | |||
"cloud.google.com/go/civil" | |||
"cloud.google.com/go/internal/testutil" | |||
"github.com/google/go-cmp/cmp" | |||
bq "google.golang.org/api/bigquery/v2" | |||
) | |||
var scalarTests = []struct { | |||
val interface{} // The Go value | |||
wantVal string // paramValue's desired output | |||
wantType *bq.QueryParameterType // paramType's desired output | |||
}{ | |||
{int64(0), "0", int64ParamType}, | |||
{3.14, "3.14", float64ParamType}, | |||
{3.14159e-87, "3.14159e-87", float64ParamType}, | |||
{true, "true", boolParamType}, | |||
{"string", "string", stringParamType}, | |||
{"\u65e5\u672c\u8a9e\n", "\u65e5\u672c\u8a9e\n", stringParamType}, | |||
{math.NaN(), "NaN", float64ParamType}, | |||
{[]byte("foo"), "Zm9v", bytesParamType}, // base64 encoding of "foo" | |||
{time.Date(2016, 3, 20, 4, 22, 9, 5000, time.FixedZone("neg1-2", -3720)), | |||
"2016-03-20 04:22:09.000005-01:02", | |||
timestampParamType}, | |||
{civil.Date{Year: 2016, Month: 3, Day: 20}, "2016-03-20", dateParamType}, | |||
{civil.Time{Hour: 4, Minute: 5, Second: 6, Nanosecond: 789000000}, "04:05:06.789000", timeParamType}, | |||
{civil.DateTime{Date: civil.Date{Year: 2016, Month: 3, Day: 20}, Time: civil.Time{Hour: 4, Minute: 5, Second: 6, Nanosecond: 789000000}}, | |||
"2016-03-20 04:05:06.789000", | |||
dateTimeParamType}, | |||
{big.NewRat(12345, 1000), "12.345000000", numericParamType}, | |||
} | |||
type ( | |||
S1 struct { | |||
A int | |||
B *S2 | |||
C bool | |||
} | |||
S2 struct { | |||
D string | |||
} | |||
) | |||
var ( | |||
s1 = S1{ | |||
A: 1, | |||
B: &S2{D: "s"}, | |||
C: true, | |||
} | |||
s1ParamType = &bq.QueryParameterType{ | |||
Type: "STRUCT", | |||
StructTypes: []*bq.QueryParameterTypeStructTypes{ | |||
{Name: "A", Type: int64ParamType}, | |||
{Name: "B", Type: &bq.QueryParameterType{ | |||
Type: "STRUCT", | |||
StructTypes: []*bq.QueryParameterTypeStructTypes{ | |||
{Name: "D", Type: stringParamType}, | |||
}, | |||
}}, | |||
{Name: "C", Type: boolParamType}, | |||
}, | |||
} | |||
s1ParamValue = bq.QueryParameterValue{ | |||
StructValues: map[string]bq.QueryParameterValue{ | |||
"A": sval("1"), | |||
"B": { | |||
StructValues: map[string]bq.QueryParameterValue{ | |||
"D": sval("s"), | |||
}, | |||
}, | |||
"C": sval("true"), | |||
}, | |||
} | |||
s1ParamReturnValue = map[string]interface{}{ | |||
"A": int64(1), | |||
"B": map[string]interface{}{"D": "s"}, | |||
"C": true, | |||
} | |||
) | |||
func sval(s string) bq.QueryParameterValue { | |||
return bq.QueryParameterValue{Value: s} | |||
} | |||
func TestParamValueScalar(t *testing.T) { | |||
for _, test := range scalarTests { | |||
got, err := paramValue(reflect.ValueOf(test.val)) | |||
if err != nil { | |||
t.Errorf("%v: got %v, want nil", test.val, err) | |||
continue | |||
} | |||
want := sval(test.wantVal) | |||
if !testutil.Equal(got, want) { | |||
t.Errorf("%v:\ngot %+v\nwant %+v", test.val, got, want) | |||
} | |||
} | |||
} | |||
func TestParamValueArray(t *testing.T) { | |||
qpv := bq.QueryParameterValue{ArrayValues: []*bq.QueryParameterValue{ | |||
{Value: "1"}, | |||
{Value: "2"}, | |||
}, | |||
} | |||
for _, test := range []struct { | |||
val interface{} | |||
want bq.QueryParameterValue | |||
}{ | |||
{[]int(nil), bq.QueryParameterValue{}}, | |||
{[]int{}, bq.QueryParameterValue{}}, | |||
{[]int{1, 2}, qpv}, | |||
{[2]int{1, 2}, qpv}, | |||
} { | |||
got, err := paramValue(reflect.ValueOf(test.val)) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
if !testutil.Equal(got, test.want) { | |||
t.Errorf("%#v:\ngot %+v\nwant %+v", test.val, got, test.want) | |||
} | |||
} | |||
} | |||
func TestParamValueStruct(t *testing.T) { | |||
got, err := paramValue(reflect.ValueOf(s1)) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
if !testutil.Equal(got, s1ParamValue) { | |||
t.Errorf("got %+v\nwant %+v", got, s1ParamValue) | |||
} | |||
} | |||
func TestParamValueErrors(t *testing.T) { | |||
// paramValue lets a few invalid types through, but paramType catches them. | |||
// Since we never call one without the other that's fine. | |||
for _, val := range []interface{}{nil, new([]int)} { | |||
_, err := paramValue(reflect.ValueOf(val)) | |||
if err == nil { | |||
t.Errorf("%v (%T): got nil, want error", val, val) | |||
} | |||
} | |||
} | |||
func TestParamType(t *testing.T) { | |||
for _, test := range scalarTests { | |||
got, err := paramType(reflect.TypeOf(test.val)) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
if !testutil.Equal(got, test.wantType) { | |||
t.Errorf("%v (%T): got %v, want %v", test.val, test.val, got, test.wantType) | |||
} | |||
} | |||
for _, test := range []struct { | |||
val interface{} | |||
want *bq.QueryParameterType | |||
}{ | |||
{uint32(32767), int64ParamType}, | |||
{[]byte("foo"), bytesParamType}, | |||
{[]int{}, &bq.QueryParameterType{Type: "ARRAY", ArrayType: int64ParamType}}, | |||
{[3]bool{}, &bq.QueryParameterType{Type: "ARRAY", ArrayType: boolParamType}}, | |||
{S1{}, s1ParamType}, | |||
} { | |||
got, err := paramType(reflect.TypeOf(test.val)) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
if !testutil.Equal(got, test.want) { | |||
t.Errorf("%v (%T): got %v, want %v", test.val, test.val, got, test.want) | |||
} | |||
} | |||
} | |||
func TestParamTypeErrors(t *testing.T) { | |||
for _, val := range []interface{}{ | |||
nil, uint(0), new([]int), make(chan int), | |||
} { | |||
_, err := paramType(reflect.TypeOf(val)) | |||
if err == nil { | |||
t.Errorf("%v (%T): got nil, want error", val, val) | |||
} | |||
} | |||
} | |||
func TestConvertParamValue(t *testing.T) { | |||
// Scalars. | |||
for _, test := range scalarTests { | |||
pval, err := paramValue(reflect.ValueOf(test.val)) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
ptype, err := paramType(reflect.TypeOf(test.val)) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
got, err := convertParamValue(&pval, ptype) | |||
if err != nil { | |||
t.Fatalf("convertParamValue(%+v, %+v): %v", pval, ptype, err) | |||
} | |||
if !testutil.Equal(got, test.val) { | |||
t.Errorf("%#v: got %#v", test.val, got) | |||
} | |||
} | |||
// Arrays. | |||
for _, test := range []struct { | |||
pval *bq.QueryParameterValue | |||
want []interface{} | |||
}{ | |||
{ | |||
&bq.QueryParameterValue{}, | |||
nil, | |||
}, | |||
{ | |||
&bq.QueryParameterValue{ | |||
ArrayValues: []*bq.QueryParameterValue{{Value: "1"}, {Value: "2"}}, | |||
}, | |||
[]interface{}{int64(1), int64(2)}, | |||
}, | |||
} { | |||
ptype := &bq.QueryParameterType{Type: "ARRAY", ArrayType: int64ParamType} | |||
got, err := convertParamValue(test.pval, ptype) | |||
if err != nil { | |||
t.Fatalf("%+v: %v", test.pval, err) | |||
} | |||
if !testutil.Equal(got, test.want) { | |||
t.Errorf("%+v: got %+v, want %+v", test.pval, got, test.want) | |||
} | |||
} | |||
// Structs. | |||
got, err := convertParamValue(&s1ParamValue, s1ParamType) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
if !testutil.Equal(got, s1ParamReturnValue) { | |||
t.Errorf("got %+v, want %+v", got, s1ParamReturnValue) | |||
} | |||
} | |||
func TestIntegration_ScalarParam(t *testing.T) { | |||
roundToMicros := cmp.Transformer("RoundToMicros", | |||
func(t time.Time) time.Time { return t.Round(time.Microsecond) }) | |||
c := getClient(t) | |||
for _, test := range scalarTests { | |||
gotData, gotParam, err := paramRoundTrip(c, test.val) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
if !testutil.Equal(gotData, test.val, roundToMicros) { | |||
t.Errorf("\ngot %#v (%T)\nwant %#v (%T)", gotData, gotData, test.val, test.val) | |||
} | |||
if !testutil.Equal(gotParam, test.val, roundToMicros) { | |||
t.Errorf("\ngot %#v (%T)\nwant %#v (%T)", gotParam, gotParam, test.val, test.val) | |||
} | |||
} | |||
} | |||
func TestIntegration_OtherParam(t *testing.T) { | |||
c := getClient(t) | |||
for _, test := range []struct { | |||
val interface{} | |||
wantData interface{} | |||
wantParam interface{} | |||
}{ | |||
{[]int(nil), []Value(nil), []interface{}(nil)}, | |||
{[]int{}, []Value(nil), []interface{}(nil)}, | |||
{ | |||
[]int{1, 2}, | |||
[]Value{int64(1), int64(2)}, | |||
[]interface{}{int64(1), int64(2)}, | |||
}, | |||
{ | |||
[3]int{1, 2, 3}, | |||
[]Value{int64(1), int64(2), int64(3)}, | |||
[]interface{}{int64(1), int64(2), int64(3)}, | |||
}, | |||
{ | |||
S1{}, | |||
[]Value{int64(0), nil, false}, | |||
map[string]interface{}{ | |||
"A": int64(0), | |||
"B": nil, | |||
"C": false, | |||
}, | |||
}, | |||
{ | |||
s1, | |||
[]Value{int64(1), []Value{"s"}, true}, | |||
s1ParamReturnValue, | |||
}, | |||
} { | |||
gotData, gotParam, err := paramRoundTrip(c, test.val) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
if !testutil.Equal(gotData, test.wantData) { | |||
t.Errorf("%#v:\ngot %#v (%T)\nwant %#v (%T)", | |||
test.val, gotData, gotData, test.wantData, test.wantData) | |||
} | |||
if !testutil.Equal(gotParam, test.wantParam) { | |||
t.Errorf("%#v:\ngot %#v (%T)\nwant %#v (%T)", | |||
test.val, gotParam, gotParam, test.wantParam, test.wantParam) | |||
} | |||
} | |||
} | |||
// paramRoundTrip passes x as a query parameter to BigQuery. It returns | |||
// the resulting data value from running the query and the parameter value from | |||
// the returned job configuration. | |||
func paramRoundTrip(c *Client, x interface{}) (data Value, param interface{}, err error) { | |||
ctx := context.Background() | |||
q := c.Query("select ?") | |||
q.Parameters = []QueryParameter{{Value: x}} | |||
job, err := q.Run(ctx) | |||
if err != nil { | |||
return nil, nil, err | |||
} | |||
it, err := job.Read(ctx) | |||
if err != nil { | |||
return nil, nil, err | |||
} | |||
var val []Value | |||
err = it.Next(&val) | |||
if err != nil { | |||
return nil, nil, err | |||
} | |||
if len(val) != 1 { | |||
return nil, nil, errors.New("wrong number of values") | |||
} | |||
conf, err := job.Config() | |||
if err != nil { | |||
return nil, nil, err | |||
} | |||
return val[0], conf.(*QueryConfig).Parameters[0].Value, nil | |||
} | |||
func TestQueryParameter_toBQ(t *testing.T) { | |||
tests := []struct { | |||
in QueryParameter | |||
want []string | |||
}{ | |||
{ | |||
in: QueryParameter{Name: "name", Value: ""}, | |||
want: []string{"Value"}, | |||
}, | |||
} | |||
for _, test := range tests { | |||
q, err := test.in.toBQ() | |||
if err != nil { | |||
t.Fatalf("expected no error, got %v", err) | |||
} | |||
got := q.ParameterValue.ForceSendFields | |||
if !cmp.Equal(test.want, got) { | |||
t.Fatalf("want %v, got %v", test.want, got) | |||
} | |||
} | |||
} |
@@ -1,328 +0,0 @@ | |||
// Copyright 2015 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package bigquery | |||
import ( | |||
"context" | |||
"errors" | |||
"cloud.google.com/go/internal/trace" | |||
bq "google.golang.org/api/bigquery/v2" | |||
) | |||
// QueryConfig holds the configuration for a query job. | |||
type QueryConfig struct { | |||
// Dst is the table into which the results of the query will be written. | |||
// If this field is nil, a temporary table will be created. | |||
Dst *Table | |||
// The query to execute. See https://cloud.google.com/bigquery/query-reference for details. | |||
Q string | |||
// DefaultProjectID and DefaultDatasetID specify the dataset to use for unqualified table names in the query. | |||
// If DefaultProjectID is set, DefaultDatasetID must also be set. | |||
DefaultProjectID string | |||
DefaultDatasetID string | |||
// TableDefinitions describes data sources outside of BigQuery. | |||
// The map keys may be used as table names in the query string. | |||
// | |||
// When a QueryConfig is returned from Job.Config, the map values | |||
// are always of type *ExternalDataConfig. | |||
TableDefinitions map[string]ExternalData | |||
// CreateDisposition specifies the circumstances under which the destination table will be created. | |||
// The default is CreateIfNeeded. | |||
CreateDisposition TableCreateDisposition | |||
// WriteDisposition specifies how existing data in the destination table is treated. | |||
// The default is WriteEmpty. | |||
WriteDisposition TableWriteDisposition | |||
// DisableQueryCache prevents results being fetched from the query cache. | |||
// If this field is false, results are fetched from the cache if they are available. | |||
// The query cache is a best-effort cache that is flushed whenever tables in the query are modified. | |||
// Cached results are only available when TableID is unspecified in the query's destination Table. | |||
// For more information, see https://cloud.google.com/bigquery/querying-data#querycaching | |||
DisableQueryCache bool | |||
// DisableFlattenedResults prevents results being flattened. | |||
// If this field is false, results from nested and repeated fields are flattened. | |||
// DisableFlattenedResults implies AllowLargeResults | |||
// For more information, see https://cloud.google.com/bigquery/docs/data#nested | |||
DisableFlattenedResults bool | |||
// AllowLargeResults allows the query to produce arbitrarily large result tables. | |||
// The destination must be a table. | |||
// When using this option, queries will take longer to execute, even if the result set is small. | |||
// For additional limitations, see https://cloud.google.com/bigquery/querying-data#largequeryresults | |||
AllowLargeResults bool | |||
// Priority specifies the priority with which to schedule the query. | |||
// The default priority is InteractivePriority. | |||
// For more information, see https://cloud.google.com/bigquery/querying-data#batchqueries | |||
Priority QueryPriority | |||
// MaxBillingTier sets the maximum billing tier for a Query. | |||
// Queries that have resource usage beyond this tier will fail (without | |||
// incurring a charge). If this field is zero, the project default will be used. | |||
MaxBillingTier int | |||
// MaxBytesBilled limits the number of bytes billed for | |||
// this job. Queries that would exceed this limit will fail (without incurring | |||
// a charge). | |||
// If this field is less than 1, the project default will be | |||
// used. | |||
MaxBytesBilled int64 | |||
// UseStandardSQL causes the query to use standard SQL. The default. | |||
// Deprecated: use UseLegacySQL. | |||
UseStandardSQL bool | |||
// UseLegacySQL causes the query to use legacy SQL. | |||
UseLegacySQL bool | |||
// Parameters is a list of query parameters. The presence of parameters | |||
// implies the use of standard SQL. | |||
// If the query uses positional syntax ("?"), then no parameter may have a name. | |||
// If the query uses named syntax ("@p"), then all parameters must have names. | |||
// It is illegal to mix positional and named syntax. | |||
Parameters []QueryParameter | |||
// TimePartitioning specifies time-based partitioning | |||
// for the destination table. | |||
TimePartitioning *TimePartitioning | |||
// Clustering specifies the data clustering configuration for the destination table. | |||
Clustering *Clustering | |||
// The labels associated with this job. | |||
Labels map[string]string | |||
// If true, don't actually run this job. A valid query will return a mostly | |||
// empty response with some processing statistics, while an invalid query will | |||
// return the same error it would if it wasn't a dry run. | |||
// | |||
// Query.Read will fail with dry-run queries. Call Query.Run instead, and then | |||
// call LastStatus on the returned job to get statistics. Calling Status on a | |||
// dry-run job will fail. | |||
DryRun bool | |||
// Custom encryption configuration (e.g., Cloud KMS keys). | |||
DestinationEncryptionConfig *EncryptionConfig | |||
// Allows the schema of the destination table to be updated as a side effect of | |||
// the query job. | |||
SchemaUpdateOptions []string | |||
} | |||
func (qc *QueryConfig) toBQ() (*bq.JobConfiguration, error) { | |||
qconf := &bq.JobConfigurationQuery{ | |||
Query: qc.Q, | |||
CreateDisposition: string(qc.CreateDisposition), | |||
WriteDisposition: string(qc.WriteDisposition), | |||
AllowLargeResults: qc.AllowLargeResults, | |||
Priority: string(qc.Priority), | |||
MaximumBytesBilled: qc.MaxBytesBilled, | |||
TimePartitioning: qc.TimePartitioning.toBQ(), | |||
Clustering: qc.Clustering.toBQ(), | |||
DestinationEncryptionConfiguration: qc.DestinationEncryptionConfig.toBQ(), | |||
SchemaUpdateOptions: qc.SchemaUpdateOptions, | |||
} | |||
if len(qc.TableDefinitions) > 0 { | |||
qconf.TableDefinitions = make(map[string]bq.ExternalDataConfiguration) | |||
} | |||
for name, data := range qc.TableDefinitions { | |||
qconf.TableDefinitions[name] = data.toBQ() | |||
} | |||
if qc.DefaultProjectID != "" || qc.DefaultDatasetID != "" { | |||
qconf.DefaultDataset = &bq.DatasetReference{ | |||
DatasetId: qc.DefaultDatasetID, | |||
ProjectId: qc.DefaultProjectID, | |||
} | |||
} | |||
if tier := int64(qc.MaxBillingTier); tier > 0 { | |||
qconf.MaximumBillingTier = &tier | |||
} | |||
f := false | |||
if qc.DisableQueryCache { | |||
qconf.UseQueryCache = &f | |||
} | |||
if qc.DisableFlattenedResults { | |||
qconf.FlattenResults = &f | |||
// DisableFlattenResults implies AllowLargeResults. | |||
qconf.AllowLargeResults = true | |||
} | |||
if qc.UseStandardSQL && qc.UseLegacySQL { | |||
return nil, errors.New("bigquery: cannot provide both UseStandardSQL and UseLegacySQL") | |||
} | |||
if len(qc.Parameters) > 0 && qc.UseLegacySQL { | |||
return nil, errors.New("bigquery: cannot provide both Parameters (implying standard SQL) and UseLegacySQL") | |||
} | |||
ptrue := true | |||
pfalse := false | |||
if qc.UseLegacySQL { | |||
qconf.UseLegacySql = &ptrue | |||
} else { | |||
qconf.UseLegacySql = &pfalse | |||
} | |||
if qc.Dst != nil && !qc.Dst.implicitTable() { | |||
qconf.DestinationTable = qc.Dst.toBQ() | |||
} | |||
for _, p := range qc.Parameters { | |||
qp, err := p.toBQ() | |||
if err != nil { | |||
return nil, err | |||
} | |||
qconf.QueryParameters = append(qconf.QueryParameters, qp) | |||
} | |||
return &bq.JobConfiguration{ | |||
Labels: qc.Labels, | |||
DryRun: qc.DryRun, | |||
Query: qconf, | |||
}, nil | |||
} | |||
func bqToQueryConfig(q *bq.JobConfiguration, c *Client) (*QueryConfig, error) { | |||
qq := q.Query | |||
qc := &QueryConfig{ | |||
Labels: q.Labels, | |||
DryRun: q.DryRun, | |||
Q: qq.Query, | |||
CreateDisposition: TableCreateDisposition(qq.CreateDisposition), | |||
WriteDisposition: TableWriteDisposition(qq.WriteDisposition), | |||
AllowLargeResults: qq.AllowLargeResults, | |||
Priority: QueryPriority(qq.Priority), | |||
MaxBytesBilled: qq.MaximumBytesBilled, | |||
UseLegacySQL: qq.UseLegacySql == nil || *qq.UseLegacySql, | |||
TimePartitioning: bqToTimePartitioning(qq.TimePartitioning), | |||
Clustering: bqToClustering(qq.Clustering), | |||
DestinationEncryptionConfig: bqToEncryptionConfig(qq.DestinationEncryptionConfiguration), | |||
SchemaUpdateOptions: qq.SchemaUpdateOptions, | |||
} | |||
qc.UseStandardSQL = !qc.UseLegacySQL | |||
if len(qq.TableDefinitions) > 0 { | |||
qc.TableDefinitions = make(map[string]ExternalData) | |||
} | |||
for name, qedc := range qq.TableDefinitions { | |||
edc, err := bqToExternalDataConfig(&qedc) | |||
if err != nil { | |||
return nil, err | |||
} | |||
qc.TableDefinitions[name] = edc | |||
} | |||
if qq.DefaultDataset != nil { | |||
qc.DefaultProjectID = qq.DefaultDataset.ProjectId | |||
qc.DefaultDatasetID = qq.DefaultDataset.DatasetId | |||
} | |||
if qq.MaximumBillingTier != nil { | |||
qc.MaxBillingTier = int(*qq.MaximumBillingTier) | |||
} | |||
if qq.UseQueryCache != nil && !*qq.UseQueryCache { | |||
qc.DisableQueryCache = true | |||
} | |||
if qq.FlattenResults != nil && !*qq.FlattenResults { | |||
qc.DisableFlattenedResults = true | |||
} | |||
if qq.DestinationTable != nil { | |||
qc.Dst = bqToTable(qq.DestinationTable, c) | |||
} | |||
for _, qp := range qq.QueryParameters { | |||
p, err := bqToQueryParameter(qp) | |||
if err != nil { | |||
return nil, err | |||
} | |||
qc.Parameters = append(qc.Parameters, p) | |||
} | |||
return qc, nil | |||
} | |||
// QueryPriority specifies a priority with which a query is to be executed. | |||
type QueryPriority string | |||
const ( | |||
// BatchPriority specifies that the query should be scheduled with the | |||
// batch priority. BigQuery queues each batch query on your behalf, and | |||
// starts the query as soon as idle resources are available, usually within | |||
// a few minutes. If BigQuery hasn't started the query within 24 hours, | |||
// BigQuery changes the job priority to interactive. Batch queries don't | |||
// count towards your concurrent rate limit, which can make it easier to | |||
// start many queries at once. | |||
// | |||
// More information can be found at https://cloud.google.com/bigquery/docs/running-queries#batchqueries. | |||
BatchPriority QueryPriority = "BATCH" | |||
// InteractivePriority specifies that the query should be scheduled with | |||
// interactive priority, which means that the query is executed as soon as | |||
// possible. Interactive queries count towards your concurrent rate limit | |||
// and your daily limit. It is the default priority with which queries get | |||
// executed. | |||
// | |||
// More information can be found at https://cloud.google.com/bigquery/docs/running-queries#queries. | |||
InteractivePriority QueryPriority = "INTERACTIVE" | |||
) | |||
// A Query queries data from a BigQuery table. Use Client.Query to create a Query. | |||
type Query struct { | |||
JobIDConfig | |||
QueryConfig | |||
client *Client | |||
} | |||
// Query creates a query with string q. | |||
// The returned Query may optionally be further configured before its Run method is called. | |||
func (c *Client) Query(q string) *Query { | |||
return &Query{ | |||
client: c, | |||
QueryConfig: QueryConfig{Q: q}, | |||
} | |||
} | |||
// Run initiates a query job. | |||
func (q *Query) Run(ctx context.Context) (j *Job, err error) { | |||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Query.Run") | |||
defer func() { trace.EndSpan(ctx, err) }() | |||
job, err := q.newJob() | |||
if err != nil { | |||
return nil, err | |||
} | |||
j, err = q.client.insertJob(ctx, job, nil) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return j, nil | |||
} | |||
func (q *Query) newJob() (*bq.Job, error) { | |||
config, err := q.QueryConfig.toBQ() | |||
if err != nil { | |||
return nil, err | |||
} | |||
return &bq.Job{ | |||
JobReference: q.JobIDConfig.createJobRef(q.client), | |||
Configuration: config, | |||
}, nil | |||
} | |||
// Read submits a query for execution and returns the results via a RowIterator. | |||
// It is a shorthand for Query.Run followed by Job.Read. | |||
func (q *Query) Read(ctx context.Context) (*RowIterator, error) { | |||
job, err := q.Run(ctx) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return job.Read(ctx) | |||
} |
@@ -1,408 +0,0 @@ | |||
// Copyright 2015 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package bigquery | |||
import ( | |||
"testing" | |||
"time" | |||
"cloud.google.com/go/internal/testutil" | |||
"github.com/google/go-cmp/cmp" | |||
bq "google.golang.org/api/bigquery/v2" | |||
) | |||
func defaultQueryJob() *bq.Job { | |||
pfalse := false | |||
return &bq.Job{ | |||
JobReference: &bq.JobReference{JobId: "RANDOM", ProjectId: "client-project-id"}, | |||
Configuration: &bq.JobConfiguration{ | |||
Query: &bq.JobConfigurationQuery{ | |||
DestinationTable: &bq.TableReference{ | |||
ProjectId: "client-project-id", | |||
DatasetId: "dataset-id", | |||
TableId: "table-id", | |||
}, | |||
Query: "query string", | |||
DefaultDataset: &bq.DatasetReference{ | |||
ProjectId: "def-project-id", | |||
DatasetId: "def-dataset-id", | |||
}, | |||
UseLegacySql: &pfalse, | |||
}, | |||
}, | |||
} | |||
} | |||
var defaultQuery = &QueryConfig{ | |||
Q: "query string", | |||
DefaultProjectID: "def-project-id", | |||
DefaultDatasetID: "def-dataset-id", | |||
} | |||
func TestQuery(t *testing.T) { | |||
defer fixRandomID("RANDOM")() | |||
c := &Client{ | |||
projectID: "client-project-id", | |||
} | |||
testCases := []struct { | |||
dst *Table | |||
src *QueryConfig | |||
jobIDConfig JobIDConfig | |||
want *bq.Job | |||
}{ | |||
{ | |||
dst: c.Dataset("dataset-id").Table("table-id"), | |||
src: defaultQuery, | |||
want: defaultQueryJob(), | |||
}, | |||
{ | |||
dst: c.Dataset("dataset-id").Table("table-id"), | |||
src: &QueryConfig{ | |||
Q: "query string", | |||
Labels: map[string]string{"a": "b"}, | |||
DryRun: true, | |||
}, | |||
want: func() *bq.Job { | |||
j := defaultQueryJob() | |||
j.Configuration.Labels = map[string]string{"a": "b"} | |||
j.Configuration.DryRun = true | |||
j.Configuration.Query.DefaultDataset = nil | |||
return j | |||
}(), | |||
}, | |||
{ | |||
dst: c.Dataset("dataset-id").Table("table-id"), | |||
jobIDConfig: JobIDConfig{JobID: "jobID", AddJobIDSuffix: true}, | |||
src: &QueryConfig{Q: "query string"}, | |||
want: func() *bq.Job { | |||
j := defaultQueryJob() | |||
j.Configuration.Query.DefaultDataset = nil | |||
j.JobReference.JobId = "jobID-RANDOM" | |||
return j | |||
}(), | |||
}, | |||
{ | |||
dst: &Table{}, | |||
src: defaultQuery, | |||
want: func() *bq.Job { | |||
j := defaultQueryJob() | |||
j.Configuration.Query.DestinationTable = nil | |||
return j | |||
}(), | |||
}, | |||
{ | |||
dst: c.Dataset("dataset-id").Table("table-id"), | |||
src: &QueryConfig{ | |||
Q: "query string", | |||
TableDefinitions: map[string]ExternalData{ | |||
"atable": func() *GCSReference { | |||
g := NewGCSReference("uri") | |||
g.AllowJaggedRows = true | |||
g.AllowQuotedNewlines = true | |||
g.Compression = Gzip | |||
g.Encoding = UTF_8 | |||
g.FieldDelimiter = ";" | |||
g.IgnoreUnknownValues = true | |||
g.MaxBadRecords = 1 | |||
g.Quote = "'" | |||
g.SkipLeadingRows = 2 | |||
g.Schema = Schema{{Name: "name", Type: StringFieldType}} | |||
return g | |||
}(), | |||
}, | |||
}, | |||
want: func() *bq.Job { | |||
j := defaultQueryJob() | |||
j.Configuration.Query.DefaultDataset = nil | |||
td := make(map[string]bq.ExternalDataConfiguration) | |||
quote := "'" | |||
td["atable"] = bq.ExternalDataConfiguration{ | |||
Compression: "GZIP", | |||
IgnoreUnknownValues: true, | |||
MaxBadRecords: 1, | |||
SourceFormat: "CSV", // must be explicitly set. | |||
SourceUris: []string{"uri"}, | |||
CsvOptions: &bq.CsvOptions{ | |||
AllowJaggedRows: true, | |||
AllowQuotedNewlines: true, | |||
Encoding: "UTF-8", | |||
FieldDelimiter: ";", | |||
SkipLeadingRows: 2, | |||
Quote: "e, | |||
}, | |||
Schema: &bq.TableSchema{ | |||
Fields: []*bq.TableFieldSchema{ | |||
{Name: "name", Type: "STRING"}, | |||
}, | |||
}, | |||
} | |||
j.Configuration.Query.TableDefinitions = td | |||
return j | |||
}(), | |||
}, | |||
{ | |||
dst: &Table{ | |||
ProjectID: "project-id", | |||
DatasetID: "dataset-id", | |||
TableID: "table-id", | |||
}, | |||
src: &QueryConfig{ | |||
Q: "query string", | |||
DefaultProjectID: "def-project-id", | |||
DefaultDatasetID: "def-dataset-id", | |||
CreateDisposition: CreateNever, | |||
WriteDisposition: WriteTruncate, | |||
}, | |||
want: func() *bq.Job { | |||
j := defaultQueryJob() | |||
j.Configuration.Query.DestinationTable.ProjectId = "project-id" | |||
j.Configuration.Query.WriteDisposition = "WRITE_TRUNCATE" | |||
j.Configuration.Query.CreateDisposition = "CREATE_NEVER" | |||
return j | |||
}(), | |||
}, | |||
{ | |||
dst: c.Dataset("dataset-id").Table("table-id"), | |||
src: &QueryConfig{ | |||
Q: "query string", | |||
DefaultProjectID: "def-project-id", | |||
DefaultDatasetID: "def-dataset-id", | |||
DisableQueryCache: true, | |||
}, | |||
want: func() *bq.Job { | |||
j := defaultQueryJob() | |||
f := false | |||
j.Configuration.Query.UseQueryCache = &f | |||
return j | |||
}(), | |||
}, | |||
{ | |||
dst: c.Dataset("dataset-id").Table("table-id"), | |||
src: &QueryConfig{ | |||
Q: "query string", | |||
DefaultProjectID: "def-project-id", | |||
DefaultDatasetID: "def-dataset-id", | |||
AllowLargeResults: true, | |||
}, | |||
want: func() *bq.Job { | |||
j := defaultQueryJob() | |||
j.Configuration.Query.AllowLargeResults = true | |||
return j | |||
}(), | |||
}, | |||
{ | |||
dst: c.Dataset("dataset-id").Table("table-id"), | |||
src: &QueryConfig{ | |||
Q: "query string", | |||
DefaultProjectID: "def-project-id", | |||
DefaultDatasetID: "def-dataset-id", | |||
DisableFlattenedResults: true, | |||
}, | |||
want: func() *bq.Job { | |||
j := defaultQueryJob() | |||
f := false | |||
j.Configuration.Query.FlattenResults = &f | |||
j.Configuration.Query.AllowLargeResults = true | |||
return j | |||
}(), | |||
}, | |||
{ | |||
dst: c.Dataset("dataset-id").Table("table-id"), | |||
src: &QueryConfig{ | |||
Q: "query string", | |||
DefaultProjectID: "def-project-id", | |||
DefaultDatasetID: "def-dataset-id", | |||
Priority: QueryPriority("low"), | |||
}, | |||
want: func() *bq.Job { | |||
j := defaultQueryJob() | |||
j.Configuration.Query.Priority = "low" | |||
return j | |||
}(), | |||
}, | |||
{ | |||
dst: c.Dataset("dataset-id").Table("table-id"), | |||
src: &QueryConfig{ | |||
Q: "query string", | |||
DefaultProjectID: "def-project-id", | |||
DefaultDatasetID: "def-dataset-id", | |||
MaxBillingTier: 3, | |||
MaxBytesBilled: 5, | |||
}, | |||
want: func() *bq.Job { | |||
j := defaultQueryJob() | |||
tier := int64(3) | |||
j.Configuration.Query.MaximumBillingTier = &tier | |||
j.Configuration.Query.MaximumBytesBilled = 5 | |||
return j | |||
}(), | |||
}, | |||
{ | |||
dst: c.Dataset("dataset-id").Table("table-id"), | |||
src: &QueryConfig{ | |||
Q: "query string", | |||
DefaultProjectID: "def-project-id", | |||
DefaultDatasetID: "def-dataset-id", | |||
UseStandardSQL: true, | |||
}, | |||
want: defaultQueryJob(), | |||
}, | |||
{ | |||
dst: c.Dataset("dataset-id").Table("table-id"), | |||
src: &QueryConfig{ | |||
Q: "query string", | |||
DefaultProjectID: "def-project-id", | |||
DefaultDatasetID: "def-dataset-id", | |||
UseLegacySQL: true, | |||
}, | |||
want: func() *bq.Job { | |||
j := defaultQueryJob() | |||
ptrue := true | |||
j.Configuration.Query.UseLegacySql = &ptrue | |||
j.Configuration.Query.ForceSendFields = nil | |||
return j | |||
}(), | |||
}, | |||
} | |||
for i, tc := range testCases { | |||
query := c.Query("") | |||
query.JobIDConfig = tc.jobIDConfig | |||
query.QueryConfig = *tc.src | |||
query.Dst = tc.dst | |||
got, err := query.newJob() | |||
if err != nil { | |||
t.Errorf("#%d: err calling query: %v", i, err) | |||
continue | |||
} | |||
checkJob(t, i, got, tc.want) | |||
// Round-trip. | |||
jc, err := bqToJobConfig(got.Configuration, c) | |||
if err != nil { | |||
t.Fatalf("#%d: %v", i, err) | |||
} | |||
wantConfig := query.QueryConfig | |||
// We set AllowLargeResults to true when DisableFlattenedResults is true. | |||
if wantConfig.DisableFlattenedResults { | |||
wantConfig.AllowLargeResults = true | |||
} | |||
// A QueryConfig with neither UseXXXSQL field set is equivalent | |||
// to one where UseStandardSQL = true. | |||
if !wantConfig.UseLegacySQL && !wantConfig.UseStandardSQL { | |||
wantConfig.UseStandardSQL = true | |||
} | |||
// Treat nil and empty tables the same, and ignore the client. | |||
tableEqual := func(t1, t2 *Table) bool { | |||
if t1 == nil { | |||
t1 = &Table{} | |||
} | |||
if t2 == nil { | |||
t2 = &Table{} | |||
} | |||
return t1.ProjectID == t2.ProjectID && t1.DatasetID == t2.DatasetID && t1.TableID == t2.TableID | |||
} | |||
// A table definition that is a GCSReference round-trips as an ExternalDataConfig. | |||
// TODO(jba): see if there is a way to express this with a transformer. | |||
gcsRefToEDC := func(g *GCSReference) *ExternalDataConfig { | |||
q := g.toBQ() | |||
e, _ := bqToExternalDataConfig(&q) | |||
return e | |||
} | |||
externalDataEqual := func(e1, e2 ExternalData) bool { | |||
if r, ok := e1.(*GCSReference); ok { | |||
e1 = gcsRefToEDC(r) | |||
} | |||
if r, ok := e2.(*GCSReference); ok { | |||
e2 = gcsRefToEDC(r) | |||
} | |||
return cmp.Equal(e1, e2) | |||
} | |||
diff := testutil.Diff(jc.(*QueryConfig), &wantConfig, | |||
cmp.Comparer(tableEqual), | |||
cmp.Comparer(externalDataEqual), | |||
) | |||
if diff != "" { | |||
t.Errorf("#%d: (got=-, want=+:\n%s", i, diff) | |||
} | |||
} | |||
} | |||
func TestConfiguringQuery(t *testing.T) { | |||
c := &Client{ | |||
projectID: "project-id", | |||
} | |||
query := c.Query("q") | |||
query.JobID = "ajob" | |||
query.DefaultProjectID = "def-project-id" | |||
query.DefaultDatasetID = "def-dataset-id" | |||
query.TimePartitioning = &TimePartitioning{Expiration: 1234 * time.Second, Field: "f"} | |||
query.Clustering = &Clustering{ | |||
Fields: []string{"cfield1"}, | |||
} | |||
query.DestinationEncryptionConfig = &EncryptionConfig{KMSKeyName: "keyName"} | |||
query.SchemaUpdateOptions = []string{"ALLOW_FIELD_ADDITION"} | |||
// Note: Other configuration fields are tested in other tests above. | |||
// A lot of that can be consolidated once Client.Copy is gone. | |||
pfalse := false | |||
want := &bq.Job{ | |||
Configuration: &bq.JobConfiguration{ | |||
Query: &bq.JobConfigurationQuery{ | |||
Query: "q", | |||
DefaultDataset: &bq.DatasetReference{ | |||
ProjectId: "def-project-id", | |||
DatasetId: "def-dataset-id", | |||
}, | |||
UseLegacySql: &pfalse, | |||
TimePartitioning: &bq.TimePartitioning{ExpirationMs: 1234000, Field: "f", Type: "DAY"}, | |||
Clustering: &bq.Clustering{Fields: []string{"cfield1"}}, | |||
DestinationEncryptionConfiguration: &bq.EncryptionConfiguration{KmsKeyName: "keyName"}, | |||
SchemaUpdateOptions: []string{"ALLOW_FIELD_ADDITION"}, | |||
}, | |||
}, | |||
JobReference: &bq.JobReference{ | |||
JobId: "ajob", | |||
ProjectId: "project-id", | |||
}, | |||
} | |||
got, err := query.newJob() | |||
if err != nil { | |||
t.Fatalf("err calling Query.newJob: %v", err) | |||
} | |||
if diff := testutil.Diff(got, want); diff != "" { | |||
t.Errorf("querying: -got +want:\n%s", diff) | |||
} | |||
} | |||
func TestQueryLegacySQL(t *testing.T) { | |||
c := &Client{projectID: "project-id"} | |||
q := c.Query("q") | |||
q.UseStandardSQL = true | |||
q.UseLegacySQL = true | |||
_, err := q.newJob() | |||
if err == nil { | |||
t.Error("UseStandardSQL and UseLegacySQL: got nil, want error") | |||
} | |||
q = c.Query("q") | |||
q.Parameters = []QueryParameter{{Name: "p", Value: 3}} | |||
q.UseLegacySQL = true | |||
_, err = q.newJob() | |||
if err == nil { | |||
t.Error("Parameters and UseLegacySQL: got nil, want error") | |||
} | |||
} |
@@ -1,56 +0,0 @@ | |||
// Copyright 2018 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package bigquery | |||
import ( | |||
"math/rand" | |||
"os" | |||
"sync" | |||
"time" | |||
) | |||
// Support for random values (typically job IDs and insert IDs). | |||
const alphanum = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" | |||
var ( | |||
rngMu sync.Mutex | |||
rng = rand.New(rand.NewSource(time.Now().UnixNano() ^ int64(os.Getpid()))) | |||
) | |||
// For testing. | |||
var randomIDFn = randomID | |||
// As of August 2017, the BigQuery service uses 27 alphanumeric characters for | |||
// suffixes. | |||
const randomIDLen = 27 | |||
func randomID() string { | |||
// This is used for both job IDs and insert IDs. | |||
var b [randomIDLen]byte | |||
rngMu.Lock() | |||
for i := 0; i < len(b); i++ { | |||
b[i] = alphanum[rng.Intn(len(alphanum))] | |||
} | |||
rngMu.Unlock() | |||
return string(b[:]) | |||
} | |||
// Seed seeds this package's random number generator, used for generating job and | |||
// insert IDs. Use Seed to obtain repeatable, deterministic behavior from bigquery | |||
// clients. Seed should be called before any clients are created. | |||
func Seed(s int64) { | |||
rng = rand.New(rand.NewSource(s)) | |||
} |
@@ -1,233 +0,0 @@ | |||
// Copyright 2015 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package bigquery | |||
import ( | |||
"context" | |||
"errors" | |||
"testing" | |||
"cloud.google.com/go/internal/testutil" | |||
"github.com/google/go-cmp/cmp" | |||
bq "google.golang.org/api/bigquery/v2" | |||
"google.golang.org/api/iterator" | |||
) | |||
type pageFetcherArgs struct { | |||
table *Table | |||
schema Schema | |||
startIndex uint64 | |||
pageSize int64 | |||
pageToken string | |||
} | |||
// pageFetcherReadStub services read requests by returning data from an in-memory list of values. | |||
type pageFetcherReadStub struct { | |||
// values and pageTokens are used as sources of data to return in response to calls to readTabledata or readQuery. | |||
values [][][]Value // contains pages / rows / columns. | |||
pageTokens map[string]string // maps incoming page token to returned page token. | |||
// arguments are recorded for later inspection. | |||
calls []pageFetcherArgs | |||
} | |||
func (s *pageFetcherReadStub) fetchPage(ctx context.Context, t *Table, schema Schema, startIndex uint64, pageSize int64, pageToken string) (*fetchPageResult, error) { | |||
s.calls = append(s.calls, | |||
pageFetcherArgs{t, schema, startIndex, pageSize, pageToken}) | |||
result := &fetchPageResult{ | |||
pageToken: s.pageTokens[pageToken], | |||
rows: s.values[0], | |||
} | |||
s.values = s.values[1:] | |||
return result, nil | |||
} | |||
func waitForQueryStub(context.Context, string) (Schema, uint64, error) { | |||
return nil, 1, nil | |||
} | |||
func TestRead(t *testing.T) { | |||
// The data for the service stub to return is populated for each test case in the testCases for loop. | |||
ctx := context.Background() | |||
c := &Client{projectID: "project-id"} | |||
pf := &pageFetcherReadStub{} | |||
queryJob := &Job{ | |||
projectID: "project-id", | |||
jobID: "job-id", | |||
c: c, | |||
config: &bq.JobConfiguration{ | |||
Query: &bq.JobConfigurationQuery{ | |||
DestinationTable: &bq.TableReference{ | |||
ProjectId: "project-id", | |||
DatasetId: "dataset-id", | |||
TableId: "table-id", | |||
}, | |||
}, | |||
}, | |||
} | |||
for _, readFunc := range []func() *RowIterator{ | |||
func() *RowIterator { | |||
return c.Dataset("dataset-id").Table("table-id").read(ctx, pf.fetchPage) | |||
}, | |||
func() *RowIterator { | |||
it, err := queryJob.read(ctx, waitForQueryStub, pf.fetchPage) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
return it | |||
}, | |||
} { | |||
testCases := []struct { | |||
data [][][]Value | |||
pageTokens map[string]string | |||
want [][]Value | |||
}{ | |||
{ | |||
data: [][][]Value{{{1, 2}, {11, 12}}, {{30, 40}, {31, 41}}}, | |||
pageTokens: map[string]string{"": "a", "a": ""}, | |||
want: [][]Value{{1, 2}, {11, 12}, {30, 40}, {31, 41}}, | |||
}, | |||
{ | |||
data: [][][]Value{{{1, 2}, {11, 12}}, {{30, 40}, {31, 41}}}, | |||
pageTokens: map[string]string{"": ""}, // no more pages after first one. | |||
want: [][]Value{{1, 2}, {11, 12}}, | |||
}, | |||
} | |||
for _, tc := range testCases { | |||
pf.values = tc.data | |||
pf.pageTokens = tc.pageTokens | |||
if got, ok := collectValues(t, readFunc()); ok { | |||
if !testutil.Equal(got, tc.want) { | |||
t.Errorf("reading: got:\n%v\nwant:\n%v", got, tc.want) | |||
} | |||
} | |||
} | |||
} | |||
} | |||
func collectValues(t *testing.T, it *RowIterator) ([][]Value, bool) { | |||
var got [][]Value | |||
for { | |||
var vals []Value | |||
err := it.Next(&vals) | |||
if err == iterator.Done { | |||
break | |||
} | |||
if err != nil { | |||
t.Errorf("err calling Next: %v", err) | |||
return nil, false | |||
} | |||
got = append(got, vals) | |||
} | |||
return got, true | |||
} | |||
func TestNoMoreValues(t *testing.T) { | |||
c := &Client{projectID: "project-id"} | |||
pf := &pageFetcherReadStub{ | |||
values: [][][]Value{{{1, 2}, {11, 12}}}, | |||
} | |||
it := c.Dataset("dataset-id").Table("table-id").read(context.Background(), pf.fetchPage) | |||
var vals []Value | |||
// We expect to retrieve two values and then fail on the next attempt. | |||
if err := it.Next(&vals); err != nil { | |||
t.Fatalf("Next: got: %v: want: nil", err) | |||
} | |||
if err := it.Next(&vals); err != nil { | |||
t.Fatalf("Next: got: %v: want: nil", err) | |||
} | |||
if err := it.Next(&vals); err != iterator.Done { | |||
t.Fatalf("Next: got: %v: want: iterator.Done", err) | |||
} | |||
} | |||
var errBang = errors.New("bang") | |||
func errorFetchPage(context.Context, *Table, Schema, uint64, int64, string) (*fetchPageResult, error) { | |||
return nil, errBang | |||
} | |||
func TestReadError(t *testing.T) { | |||
// test that service read errors are propagated back to the caller. | |||
c := &Client{projectID: "project-id"} | |||
it := c.Dataset("dataset-id").Table("table-id").read(context.Background(), errorFetchPage) | |||
var vals []Value | |||
if err := it.Next(&vals); err != errBang { | |||
t.Fatalf("Get: got: %v: want: %v", err, errBang) | |||
} | |||
} | |||
func TestReadTabledataOptions(t *testing.T) { | |||
// test that read options are propagated. | |||
s := &pageFetcherReadStub{ | |||
values: [][][]Value{{{1, 2}}}, | |||
} | |||
c := &Client{projectID: "project-id"} | |||
tr := c.Dataset("dataset-id").Table("table-id") | |||
it := tr.read(context.Background(), s.fetchPage) | |||
it.PageInfo().MaxSize = 5 | |||
var vals []Value | |||
if err := it.Next(&vals); err != nil { | |||
t.Fatal(err) | |||
} | |||
want := []pageFetcherArgs{{ | |||
table: tr, | |||
pageSize: 5, | |||
pageToken: "", | |||
}} | |||
if diff := testutil.Diff(s.calls, want, cmp.AllowUnexported(pageFetcherArgs{}, pageFetcherReadStub{}, Table{}, Client{})); diff != "" { | |||
t.Errorf("reading (got=-, want=+):\n%s", diff) | |||
} | |||
} | |||
func TestReadQueryOptions(t *testing.T) { | |||
// test that read options are propagated. | |||
c := &Client{projectID: "project-id"} | |||
pf := &pageFetcherReadStub{ | |||
values: [][][]Value{{{1, 2}}}, | |||
} | |||
tr := &bq.TableReference{ | |||
ProjectId: "project-id", | |||
DatasetId: "dataset-id", | |||
TableId: "table-id", | |||
} | |||
queryJob := &Job{ | |||
projectID: "project-id", | |||
jobID: "job-id", | |||
c: c, | |||
config: &bq.JobConfiguration{ | |||
Query: &bq.JobConfigurationQuery{DestinationTable: tr}, | |||
}, | |||
} | |||
it, err := queryJob.read(context.Background(), waitForQueryStub, pf.fetchPage) | |||
if err != nil { | |||
t.Fatalf("err calling Read: %v", err) | |||
} | |||
it.PageInfo().MaxSize = 5 | |||
var vals []Value | |||
if err := it.Next(&vals); err != nil { | |||
t.Fatalf("Next: got: %v: want: nil", err) | |||
} | |||
want := []pageFetcherArgs{{ | |||
table: bqToTable(tr, c), | |||
pageSize: 5, | |||
pageToken: "", | |||
}} | |||
if !testutil.Equal(pf.calls, want, cmp.AllowUnexported(pageFetcherArgs{}, Table{}, Client{})) { | |||
t.Errorf("reading: got:\n%v\nwant:\n%v", pf.calls, want) | |||
} | |||
} |
@@ -1,518 +0,0 @@ | |||
// Copyright 2015 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package bigquery | |||
import ( | |||
"encoding/json" | |||
"errors" | |||
"fmt" | |||
"reflect" | |||
"sync" | |||
bq "google.golang.org/api/bigquery/v2" | |||
) | |||
// Schema describes the fields in a table or query result. | |||
type Schema []*FieldSchema | |||
// FieldSchema describes a single field. | |||
type FieldSchema struct { | |||
// The field name. | |||
// Must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_), | |||
// and must start with a letter or underscore. | |||
// The maximum length is 128 characters. | |||
Name string | |||
// A description of the field. The maximum length is 16,384 characters. | |||
Description string | |||
// Whether the field may contain multiple values. | |||
Repeated bool | |||
// Whether the field is required. Ignored if Repeated is true. | |||
Required bool | |||
// The field data type. If Type is Record, then this field contains a nested schema, | |||
// which is described by Schema. | |||
Type FieldType | |||
// Describes the nested schema if Type is set to Record. | |||
Schema Schema | |||
} | |||
func (fs *FieldSchema) toBQ() *bq.TableFieldSchema { | |||
tfs := &bq.TableFieldSchema{ | |||
Description: fs.Description, | |||
Name: fs.Name, | |||
Type: string(fs.Type), | |||
} | |||
if fs.Repeated { | |||
tfs.Mode = "REPEATED" | |||
} else if fs.Required { | |||
tfs.Mode = "REQUIRED" | |||
} // else leave as default, which is interpreted as NULLABLE. | |||
for _, f := range fs.Schema { | |||
tfs.Fields = append(tfs.Fields, f.toBQ()) | |||
} | |||
return tfs | |||
} | |||
func (s Schema) toBQ() *bq.TableSchema { | |||
var fields []*bq.TableFieldSchema | |||
for _, f := range s { | |||
fields = append(fields, f.toBQ()) | |||
} | |||
return &bq.TableSchema{Fields: fields} | |||
} | |||
func bqToFieldSchema(tfs *bq.TableFieldSchema) *FieldSchema { | |||
fs := &FieldSchema{ | |||
Description: tfs.Description, | |||
Name: tfs.Name, | |||
Repeated: tfs.Mode == "REPEATED", | |||
Required: tfs.Mode == "REQUIRED", | |||
Type: FieldType(tfs.Type), | |||
} | |||
for _, f := range tfs.Fields { | |||
fs.Schema = append(fs.Schema, bqToFieldSchema(f)) | |||
} | |||
return fs | |||
} | |||
func bqToSchema(ts *bq.TableSchema) Schema { | |||
if ts == nil { | |||
return nil | |||
} | |||
var s Schema | |||
for _, f := range ts.Fields { | |||
s = append(s, bqToFieldSchema(f)) | |||
} | |||
return s | |||
} | |||
// FieldType is the type of field. | |||
type FieldType string | |||
const ( | |||
// StringFieldType is a string field type. | |||
StringFieldType FieldType = "STRING" | |||
// BytesFieldType is a bytes field type. | |||
BytesFieldType FieldType = "BYTES" | |||
// IntegerFieldType is a integer field type. | |||
IntegerFieldType FieldType = "INTEGER" | |||
// FloatFieldType is a float field type. | |||
FloatFieldType FieldType = "FLOAT" | |||
// BooleanFieldType is a boolean field type. | |||
BooleanFieldType FieldType = "BOOLEAN" | |||
// TimestampFieldType is a timestamp field type. | |||
TimestampFieldType FieldType = "TIMESTAMP" | |||
// RecordFieldType is a record field type. It is typically used to create columns with repeated or nested data. | |||
RecordFieldType FieldType = "RECORD" | |||
// DateFieldType is a date field type. | |||
DateFieldType FieldType = "DATE" | |||
// TimeFieldType is a time field type. | |||
TimeFieldType FieldType = "TIME" | |||
// DateTimeFieldType is a datetime field type. | |||
DateTimeFieldType FieldType = "DATETIME" | |||
// NumericFieldType is a numeric field type. Numeric types include integer types, floating point types and the | |||
// NUMERIC data type. | |||
NumericFieldType FieldType = "NUMERIC" | |||
// GeographyFieldType is a string field type. Geography types represent a set of points | |||
// on the Earth's surface, represented in Well Known Text (WKT) format. | |||
GeographyFieldType FieldType = "GEOGRAPHY" | |||
) | |||
var ( | |||
errEmptyJSONSchema = errors.New("bigquery: empty JSON schema") | |||
fieldTypes = map[FieldType]bool{ | |||
StringFieldType: true, | |||
BytesFieldType: true, | |||
IntegerFieldType: true, | |||
FloatFieldType: true, | |||
BooleanFieldType: true, | |||
TimestampFieldType: true, | |||
RecordFieldType: true, | |||
DateFieldType: true, | |||
TimeFieldType: true, | |||
DateTimeFieldType: true, | |||
NumericFieldType: true, | |||
GeographyFieldType: true, | |||
} | |||
) | |||
var typeOfByteSlice = reflect.TypeOf([]byte{}) | |||
// InferSchema tries to derive a BigQuery schema from the supplied struct value. | |||
// Each exported struct field is mapped to a field in the schema. | |||
// | |||
// The following BigQuery types are inferred from the corresponding Go types. | |||
// (This is the same mapping as that used for RowIterator.Next.) Fields inferred | |||
// from these types are marked required (non-nullable). | |||
// | |||
// STRING string | |||
// BOOL bool | |||
// INTEGER int, int8, int16, int32, int64, uint8, uint16, uint32 | |||
// FLOAT float32, float64 | |||
// BYTES []byte | |||
// TIMESTAMP time.Time | |||
// DATE civil.Date | |||
// TIME civil.Time | |||
// DATETIME civil.DateTime | |||
// NUMERIC *big.Rat | |||
// | |||
// The big.Rat type supports numbers of arbitrary size and precision. Values | |||
// will be rounded to 9 digits after the decimal point before being transmitted | |||
// to BigQuery. See https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#numeric-type | |||
// for more on NUMERIC. | |||
// | |||
// A Go slice or array type is inferred to be a BigQuery repeated field of the | |||
// element type. The element type must be one of the above listed types. | |||
// | |||
// Due to lack of unique native Go type for GEOGRAPHY, there is no schema | |||
// inference to GEOGRAPHY at this time. | |||
// | |||
// Nullable fields are inferred from the NullXXX types, declared in this package: | |||
// | |||
// STRING NullString | |||
// BOOL NullBool | |||
// INTEGER NullInt64 | |||
// FLOAT NullFloat64 | |||
// TIMESTAMP NullTimestamp | |||
// DATE NullDate | |||
// TIME NullTime | |||
// DATETIME NullDateTime | |||
// GEOGRAPHY NullGeography | |||
// | |||
// For a nullable BYTES field, use the type []byte and tag the field "nullable" (see below). | |||
// For a nullable NUMERIC field, use the type *big.Rat and tag the field "nullable". | |||
// | |||
// A struct field that is of struct type is inferred to be a required field of type | |||
// RECORD with a schema inferred recursively. For backwards compatibility, a field of | |||
// type pointer to struct is also inferred to be required. To get a nullable RECORD | |||
// field, use the "nullable" tag (see below). | |||
// | |||
// InferSchema returns an error if any of the examined fields is of type uint, | |||
// uint64, uintptr, map, interface, complex64, complex128, func, or chan. Future | |||
// versions may handle these cases without error. | |||
// | |||
// Recursively defined structs are also disallowed. | |||
// | |||
// Struct fields may be tagged in a way similar to the encoding/json package. | |||
// A tag of the form | |||
// bigquery:"name" | |||
// uses "name" instead of the struct field name as the BigQuery field name. | |||
// A tag of the form | |||
// bigquery:"-" | |||
// omits the field from the inferred schema. | |||
// The "nullable" option marks the field as nullable (not required). It is only | |||
// needed for []byte, *big.Rat and pointer-to-struct fields, and cannot appear on other | |||
// fields. In this example, the Go name of the field is retained: | |||
// bigquery:",nullable" | |||
func InferSchema(st interface{}) (Schema, error) { | |||
return inferSchemaReflectCached(reflect.TypeOf(st)) | |||
} | |||
var schemaCache sync.Map | |||
type cacheVal struct { | |||
schema Schema | |||
err error | |||
} | |||
func inferSchemaReflectCached(t reflect.Type) (Schema, error) { | |||
var cv cacheVal | |||
v, ok := schemaCache.Load(t) | |||
if ok { | |||
cv = v.(cacheVal) | |||
} else { | |||
s, err := inferSchemaReflect(t) | |||
cv = cacheVal{s, err} | |||
schemaCache.Store(t, cv) | |||
} | |||
return cv.schema, cv.err | |||
} | |||
func inferSchemaReflect(t reflect.Type) (Schema, error) { | |||
rec, err := hasRecursiveType(t, nil) | |||
if err != nil { | |||
return nil, err | |||
} | |||
if rec { | |||
return nil, fmt.Errorf("bigquery: schema inference for recursive type %s", t) | |||
} | |||
return inferStruct(t) | |||
} | |||
func inferStruct(t reflect.Type) (Schema, error) { | |||
switch t.Kind() { | |||
case reflect.Ptr: | |||
if t.Elem().Kind() != reflect.Struct { | |||
return nil, noStructError{t} | |||
} | |||
t = t.Elem() | |||
fallthrough | |||
case reflect.Struct: | |||
return inferFields(t) | |||
default: | |||
return nil, noStructError{t} | |||
} | |||
} | |||
// inferFieldSchema infers the FieldSchema for a Go type | |||
func inferFieldSchema(fieldName string, rt reflect.Type, nullable bool) (*FieldSchema, error) { | |||
// Only []byte and struct pointers can be tagged nullable. | |||
if nullable && !(rt == typeOfByteSlice || rt.Kind() == reflect.Ptr && rt.Elem().Kind() == reflect.Struct) { | |||
return nil, badNullableError{fieldName, rt} | |||
} | |||
switch rt { | |||
case typeOfByteSlice: | |||
return &FieldSchema{Required: !nullable, Type: BytesFieldType}, nil | |||
case typeOfGoTime: | |||
return &FieldSchema{Required: true, Type: TimestampFieldType}, nil | |||
case typeOfDate: | |||
return &FieldSchema{Required: true, Type: DateFieldType}, nil | |||
case typeOfTime: | |||
return &FieldSchema{Required: true, Type: TimeFieldType}, nil | |||
case typeOfDateTime: | |||
return &FieldSchema{Required: true, Type: DateTimeFieldType}, nil | |||
case typeOfRat: | |||
return &FieldSchema{Required: !nullable, Type: NumericFieldType}, nil | |||
} | |||
if ft := nullableFieldType(rt); ft != "" { | |||
return &FieldSchema{Required: false, Type: ft}, nil | |||
} | |||
if isSupportedIntType(rt) || isSupportedUintType(rt) { | |||
return &FieldSchema{Required: true, Type: IntegerFieldType}, nil | |||
} | |||
switch rt.Kind() { | |||
case reflect.Slice, reflect.Array: | |||
et := rt.Elem() | |||
if et != typeOfByteSlice && (et.Kind() == reflect.Slice || et.Kind() == reflect.Array) { | |||
// Multi dimensional slices/arrays are not supported by BigQuery | |||
return nil, unsupportedFieldTypeError{fieldName, rt} | |||
} | |||
if nullableFieldType(et) != "" { | |||
// Repeated nullable types are not supported by BigQuery. | |||
return nil, unsupportedFieldTypeError{fieldName, rt} | |||
} | |||
f, err := inferFieldSchema(fieldName, et, false) | |||
if err != nil { | |||
return nil, err | |||
} | |||
f.Repeated = true | |||
f.Required = false | |||
return f, nil | |||
case reflect.Ptr: | |||
if rt.Elem().Kind() != reflect.Struct { | |||
return nil, unsupportedFieldTypeError{fieldName, rt} | |||
} | |||
fallthrough | |||
case reflect.Struct: | |||
nested, err := inferStruct(rt) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return &FieldSchema{Required: !nullable, Type: RecordFieldType, Schema: nested}, nil | |||
case reflect.String: | |||
return &FieldSchema{Required: !nullable, Type: StringFieldType}, nil | |||
case reflect.Bool: | |||
return &FieldSchema{Required: !nullable, Type: BooleanFieldType}, nil | |||
case reflect.Float32, reflect.Float64: | |||
return &FieldSchema{Required: !nullable, Type: FloatFieldType}, nil | |||
default: | |||
return nil, unsupportedFieldTypeError{fieldName, rt} | |||
} | |||
} | |||
// inferFields extracts all exported field types from struct type. | |||
func inferFields(rt reflect.Type) (Schema, error) { | |||
var s Schema | |||
fields, err := fieldCache.Fields(rt) | |||
if err != nil { | |||
return nil, err | |||
} | |||
for _, field := range fields { | |||
var nullable bool | |||
for _, opt := range field.ParsedTag.([]string) { | |||
if opt == nullableTagOption { | |||
nullable = true | |||
break | |||
} | |||
} | |||
f, err := inferFieldSchema(field.Name, field.Type, nullable) | |||
if err != nil { | |||
return nil, err | |||
} | |||
f.Name = field.Name | |||
s = append(s, f) | |||
} | |||
return s, nil | |||
} | |||
// isSupportedIntType reports whether t is an int type that can be properly | |||
// represented by the BigQuery INTEGER/INT64 type. | |||
func isSupportedIntType(t reflect.Type) bool { | |||
switch t.Kind() { | |||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: | |||
return true | |||
default: | |||
return false | |||
} | |||
} | |||
// isSupportedIntType reports whether t is a uint type that can be properly | |||
// represented by the BigQuery INTEGER/INT64 type. | |||
func isSupportedUintType(t reflect.Type) bool { | |||
switch t.Kind() { | |||
case reflect.Uint8, reflect.Uint16, reflect.Uint32: | |||
return true | |||
default: | |||
return false | |||
} | |||
} | |||
// typeList is a linked list of reflect.Types. | |||
type typeList struct { | |||
t reflect.Type | |||
next *typeList | |||
} | |||
func (l *typeList) has(t reflect.Type) bool { | |||
for l != nil { | |||
if l.t == t { | |||
return true | |||
} | |||
l = l.next | |||
} | |||
return false | |||
} | |||
// hasRecursiveType reports whether t or any type inside t refers to itself, directly or indirectly, | |||
// via exported fields. (Schema inference ignores unexported fields.) | |||
func hasRecursiveType(t reflect.Type, seen *typeList) (bool, error) { | |||
for t.Kind() == reflect.Ptr || t.Kind() == reflect.Slice || t.Kind() == reflect.Array { | |||
t = t.Elem() | |||
} | |||
if t.Kind() != reflect.Struct { | |||
return false, nil | |||
} | |||
if seen.has(t) { | |||
return true, nil | |||
} | |||
fields, err := fieldCache.Fields(t) | |||
if err != nil { | |||
return false, err | |||
} | |||
seen = &typeList{t, seen} | |||
// Because seen is a linked list, additions to it from one field's | |||
// recursive call will not affect the value for subsequent fields' calls. | |||
for _, field := range fields { | |||
ok, err := hasRecursiveType(field.Type, seen) | |||
if err != nil { | |||
return false, err | |||
} | |||
if ok { | |||
return true, nil | |||
} | |||
} | |||
return false, nil | |||
} | |||
// bigQuerySchemaJSONField is an individual field in a JSON BigQuery table schema definition | |||
// (as generated by https://github.com/GoogleCloudPlatform/protoc-gen-bq-schema). | |||
type bigQueryJSONField struct { | |||
Description string `json:"description"` | |||
Fields []bigQueryJSONField `json:"fields"` | |||
Mode string `json:"mode"` | |||
Name string `json:"name"` | |||
Type string `json:"type"` | |||
} | |||
// convertSchemaFromJSON generates a Schema: | |||
func convertSchemaFromJSON(fs []bigQueryJSONField) (Schema, error) { | |||
convertedSchema := Schema{} | |||
for _, f := range fs { | |||
convertedFieldSchema := &FieldSchema{ | |||
Description: f.Description, | |||
Name: f.Name, | |||
Required: f.Mode == "REQUIRED", | |||
Repeated: f.Mode == "REPEATED", | |||
} | |||
if len(f.Fields) > 0 { | |||
convertedNestedFieldSchema, err := convertSchemaFromJSON(f.Fields) | |||
if err != nil { | |||
return nil, err | |||
} | |||
convertedFieldSchema.Schema = convertedNestedFieldSchema | |||
} | |||
// Check that the field-type (string) maps to a known FieldType: | |||
if _, ok := fieldTypes[FieldType(f.Type)]; !ok { | |||
return nil, fmt.Errorf("unknown field type (%v)", f.Type) | |||
} | |||
convertedFieldSchema.Type = FieldType(f.Type) | |||
convertedSchema = append(convertedSchema, convertedFieldSchema) | |||
} | |||
return convertedSchema, nil | |||
} | |||
// SchemaFromJSON takes a JSON BigQuery table schema definition | |||
// (as generated by https://github.com/GoogleCloudPlatform/protoc-gen-bq-schema) | |||
// and returns a fully-populated Schema. | |||
func SchemaFromJSON(schemaJSON []byte) (Schema, error) { | |||
var bigQuerySchema []bigQueryJSONField | |||
// Make sure we actually have some content: | |||
if len(schemaJSON) == 0 { | |||
return nil, errEmptyJSONSchema | |||
} | |||
if err := json.Unmarshal(schemaJSON, &bigQuerySchema); err != nil { | |||
return nil, err | |||
} | |||
return convertSchemaFromJSON(bigQuerySchema) | |||
} | |||
type noStructError struct { | |||
typ reflect.Type | |||
} | |||
func (e noStructError) Error() string { | |||
return fmt.Sprintf("bigquery: can only infer schema from struct or pointer to struct, not %s", e.typ) | |||
} | |||
type badNullableError struct { | |||
name string | |||
typ reflect.Type | |||
} | |||
func (e badNullableError) Error() string { | |||
return fmt.Sprintf(`bigquery: field %q of type %s: use "nullable" only for []byte and struct pointers; for all other types, use a NullXXX type`, e.name, e.typ) | |||
} | |||
type unsupportedFieldTypeError struct { | |||
name string | |||
typ reflect.Type | |||
} | |||
func (e unsupportedFieldTypeError) Error() string { | |||
return fmt.Sprintf("bigquery: field %q: type %s is not supported", e.name, e.typ) | |||
} |
@@ -1,255 +0,0 @@ | |||
// Copyright 2019 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// https://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
// Code generated by gapic-generator. DO NOT EDIT. | |||
package storage | |||
import ( | |||
"context" | |||
"fmt" | |||
"time" | |||
gax "github.com/googleapis/gax-go/v2" | |||
"google.golang.org/api/option" | |||
"google.golang.org/api/transport" | |||
storagepb "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1beta1" | |||
"google.golang.org/grpc" | |||
"google.golang.org/grpc/codes" | |||
"google.golang.org/grpc/metadata" | |||
) | |||
// BigQueryStorageCallOptions contains the retry settings for each method of BigQueryStorageClient. | |||
type BigQueryStorageCallOptions struct { | |||
CreateReadSession []gax.CallOption | |||
ReadRows []gax.CallOption | |||
BatchCreateReadSessionStreams []gax.CallOption | |||
FinalizeStream []gax.CallOption | |||
SplitReadStream []gax.CallOption | |||
} | |||
func defaultBigQueryStorageClientOptions() []option.ClientOption { | |||
return []option.ClientOption{ | |||
option.WithEndpoint("bigquerystorage.googleapis.com:443"), | |||
option.WithScopes(DefaultAuthScopes()...), | |||
} | |||
} | |||
func defaultBigQueryStorageCallOptions() *BigQueryStorageCallOptions { | |||
retry := map[[2]string][]gax.CallOption{ | |||
{"default", "idempotent"}: { | |||
gax.WithRetry(func() gax.Retryer { | |||
return gax.OnCodes([]codes.Code{ | |||
codes.DeadlineExceeded, | |||
codes.Unavailable, | |||
}, gax.Backoff{ | |||
Initial: 100 * time.Millisecond, | |||
Max: 60000 * time.Millisecond, | |||
Multiplier: 1.3, | |||
}) | |||
}), | |||
}, | |||
} | |||
return &BigQueryStorageCallOptions{ | |||
CreateReadSession: retry[[2]string{"default", "idempotent"}], | |||
ReadRows: retry[[2]string{"default", "idempotent"}], | |||
BatchCreateReadSessionStreams: retry[[2]string{"default", "idempotent"}], | |||
FinalizeStream: retry[[2]string{"default", "idempotent"}], | |||
SplitReadStream: retry[[2]string{"default", "idempotent"}], | |||
} | |||
} | |||
// BigQueryStorageClient is a client for interacting with BigQuery Storage API. | |||
// | |||
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. | |||
type BigQueryStorageClient struct { | |||
// The connection to the service. | |||
conn *grpc.ClientConn | |||
// The gRPC API client. | |||
bigQueryStorageClient storagepb.BigQueryStorageClient | |||
// The call options for this service. | |||
CallOptions *BigQueryStorageCallOptions | |||
// The x-goog-* metadata to be sent with each request. | |||
xGoogMetadata metadata.MD | |||
} | |||
// NewBigQueryStorageClient creates a new big query storage client. | |||
// | |||
// BigQuery storage API. | |||
// | |||
// The BigQuery storage API can be used to read data stored in BigQuery. | |||
func NewBigQueryStorageClient(ctx context.Context, opts ...option.ClientOption) (*BigQueryStorageClient, error) { | |||
conn, err := transport.DialGRPC(ctx, append(defaultBigQueryStorageClientOptions(), opts...)...) | |||
if err != nil { | |||
return nil, err | |||
} | |||
c := &BigQueryStorageClient{ | |||
conn: conn, | |||
CallOptions: defaultBigQueryStorageCallOptions(), | |||
bigQueryStorageClient: storagepb.NewBigQueryStorageClient(conn), | |||
} | |||
c.setGoogleClientInfo() | |||
return c, nil | |||
} | |||
// Connection returns the client's connection to the API service. | |||
func (c *BigQueryStorageClient) Connection() *grpc.ClientConn { | |||
return c.conn | |||
} | |||
// Close closes the connection to the API service. The user should invoke this when | |||
// the client is no longer required. | |||
func (c *BigQueryStorageClient) Close() error { | |||
return c.conn.Close() | |||
} | |||
// setGoogleClientInfo sets the name and version of the application in | |||
// the `x-goog-api-client` header passed on each request. Intended for | |||
// use by Google-written clients. | |||
func (c *BigQueryStorageClient) setGoogleClientInfo(keyval ...string) { | |||
kv := append([]string{"gl-go", versionGo()}, keyval...) | |||
kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version) | |||
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) | |||
} | |||
// CreateReadSession creates a new read session. A read session divides the contents of a | |||
// BigQuery table into one or more streams, which can then be used to read | |||
// data from the table. The read session also specifies properties of the | |||
// data to be read, such as a list of columns or a push-down filter describing | |||
// the rows to be returned. | |||
// | |||
// A particular row can be read by at most one stream. When the caller has | |||
// reached the end of each stream in the session, then all the data in the | |||
// table has been read. | |||
// | |||
// Read sessions automatically expire 24 hours after they are created and do | |||
// not require manual clean-up by the caller. | |||
func (c *BigQueryStorageClient) CreateReadSession(ctx context.Context, req *storagepb.CreateReadSessionRequest, opts ...gax.CallOption) (*storagepb.ReadSession, error) { | |||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v&%s=%v", "table_reference.project_id", req.GetTableReference().GetProjectId(), "table_reference.dataset_id", req.GetTableReference().GetDatasetId())) | |||
ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||
opts = append(c.CallOptions.CreateReadSession[0:len(c.CallOptions.CreateReadSession):len(c.CallOptions.CreateReadSession)], opts...) | |||
var resp *storagepb.ReadSession | |||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||
var err error | |||
resp, err = c.bigQueryStorageClient.CreateReadSession(ctx, req, settings.GRPC...) | |||
return err | |||
}, opts...) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return resp, nil | |||
} | |||
// ReadRows reads rows from the table in the format prescribed by the read session. | |||
// Each response contains one or more table rows, up to a maximum of 10 MiB | |||
// per response; read requests which attempt to read individual rows larger | |||
// than this will fail. | |||
// | |||
// Each request also returns a set of stream statistics reflecting the | |||
// estimated total number of rows in the read stream. This number is computed | |||
// based on the total table size and the number of active streams in the read | |||
// session, and may change as other streams continue to read data. | |||
func (c *BigQueryStorageClient) ReadRows(ctx context.Context, req *storagepb.ReadRowsRequest, opts ...gax.CallOption) (storagepb.BigQueryStorage_ReadRowsClient, error) { | |||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "read_position.stream.name", req.GetReadPosition().GetStream().GetName())) | |||
ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||
opts = append(c.CallOptions.ReadRows[0:len(c.CallOptions.ReadRows):len(c.CallOptions.ReadRows)], opts...) | |||
var resp storagepb.BigQueryStorage_ReadRowsClient | |||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||
var err error | |||
resp, err = c.bigQueryStorageClient.ReadRows(ctx, req, settings.GRPC...) | |||
return err | |||
}, opts...) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return resp, nil | |||
} | |||
// BatchCreateReadSessionStreams creates additional streams for a ReadSession. This API can be used to | |||
// dynamically adjust the parallelism of a batch processing task upwards by | |||
// adding additional workers. | |||
func (c *BigQueryStorageClient) BatchCreateReadSessionStreams(ctx context.Context, req *storagepb.BatchCreateReadSessionStreamsRequest, opts ...gax.CallOption) (*storagepb.BatchCreateReadSessionStreamsResponse, error) { | |||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "session.name", req.GetSession().GetName())) | |||
ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||
opts = append(c.CallOptions.BatchCreateReadSessionStreams[0:len(c.CallOptions.BatchCreateReadSessionStreams):len(c.CallOptions.BatchCreateReadSessionStreams)], opts...) | |||
var resp *storagepb.BatchCreateReadSessionStreamsResponse | |||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||
var err error | |||
resp, err = c.bigQueryStorageClient.BatchCreateReadSessionStreams(ctx, req, settings.GRPC...) | |||
return err | |||
}, opts...) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return resp, nil | |||
} | |||
// FinalizeStream triggers the graceful termination of a single stream in a ReadSession. This | |||
// API can be used to dynamically adjust the parallelism of a batch processing | |||
// task downwards without losing data. | |||
// | |||
// This API does not delete the stream -- it remains visible in the | |||
// ReadSession, and any data processed by the stream is not released to other | |||
// streams. However, no additional data will be assigned to the stream once | |||
// this call completes. Callers must continue reading data on the stream until | |||
// the end of the stream is reached so that data which has already been | |||
// assigned to the stream will be processed. | |||
// | |||
// This method will return an error if there are no other live streams | |||
// in the Session, or if SplitReadStream() has been called on the given | |||
// Stream. | |||
func (c *BigQueryStorageClient) FinalizeStream(ctx context.Context, req *storagepb.FinalizeStreamRequest, opts ...gax.CallOption) error { | |||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "stream.name", req.GetStream().GetName())) | |||
ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||
opts = append(c.CallOptions.FinalizeStream[0:len(c.CallOptions.FinalizeStream):len(c.CallOptions.FinalizeStream)], opts...) | |||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||
var err error | |||
_, err = c.bigQueryStorageClient.FinalizeStream(ctx, req, settings.GRPC...) | |||
return err | |||
}, opts...) | |||
return err | |||
} | |||
// SplitReadStream splits a given read stream into two Streams. These streams are referred to | |||
// as the primary and the residual of the split. The original stream can still | |||
// be read from in the same manner as before. Both of the returned streams can | |||
// also be read from, and the total rows return by both child streams will be | |||
// the same as the rows read from the original stream. | |||
// | |||
// Moreover, the two child streams will be allocated back to back in the | |||
// original Stream. Concretely, it is guaranteed that for streams Original, | |||
// Primary, and Residual, that Original[0-j] = Primary[0-j] and | |||
// Original[j-n] = Residual[0-m] once the streams have been read to | |||
// completion. | |||
// | |||
// This method is guaranteed to be idempotent. | |||
func (c *BigQueryStorageClient) SplitReadStream(ctx context.Context, req *storagepb.SplitReadStreamRequest, opts ...gax.CallOption) (*storagepb.SplitReadStreamResponse, error) { | |||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "original_stream.name", req.GetOriginalStream().GetName())) | |||
ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||
opts = append(c.CallOptions.SplitReadStream[0:len(c.CallOptions.SplitReadStream):len(c.CallOptions.SplitReadStream)], opts...) | |||
var resp *storagepb.SplitReadStreamResponse | |||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||
var err error | |||
resp, err = c.bigQueryStorageClient.SplitReadStream(ctx, req, settings.GRPC...) | |||
return err | |||
}, opts...) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return resp, nil | |||
} |
@@ -1,132 +0,0 @@ | |||
// Copyright 2019 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// https://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
// Code generated by gapic-generator. DO NOT EDIT. | |||
package storage_test | |||
import ( | |||
"context" | |||
"io" | |||
storage "cloud.google.com/go/bigquery/storage/apiv1beta1" | |||
storagepb "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1beta1" | |||
) | |||
func ExampleNewBigQueryStorageClient() { | |||
ctx := context.Background() | |||
c, err := storage.NewBigQueryStorageClient(ctx) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
// TODO: Use client. | |||
_ = c | |||
} | |||
func ExampleBigQueryStorageClient_CreateReadSession() { | |||
ctx := context.Background() | |||
c, err := storage.NewBigQueryStorageClient(ctx) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
req := &storagepb.CreateReadSessionRequest{ | |||
// TODO: Fill request struct fields. | |||
} | |||
resp, err := c.CreateReadSession(ctx, req) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
// TODO: Use resp. | |||
_ = resp | |||
} | |||
func ExampleBigQueryStorageClient_ReadRows() { | |||
ctx := context.Background() | |||
c, err := storage.NewBigQueryStorageClient(ctx) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
req := &storagepb.ReadRowsRequest{ | |||
// TODO: Fill request struct fields. | |||
} | |||
stream, err := c.ReadRows(ctx, req) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
for { | |||
resp, err := stream.Recv() | |||
if err == io.EOF { | |||
break | |||
} | |||
if err != nil { | |||
// TODO: handle error. | |||
} | |||
// TODO: Use resp. | |||
_ = resp | |||
} | |||
} | |||
func ExampleBigQueryStorageClient_BatchCreateReadSessionStreams() { | |||
ctx := context.Background() | |||
c, err := storage.NewBigQueryStorageClient(ctx) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
req := &storagepb.BatchCreateReadSessionStreamsRequest{ | |||
// TODO: Fill request struct fields. | |||
} | |||
resp, err := c.BatchCreateReadSessionStreams(ctx, req) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
// TODO: Use resp. | |||
_ = resp | |||
} | |||
func ExampleBigQueryStorageClient_FinalizeStream() { | |||
ctx := context.Background() | |||
c, err := storage.NewBigQueryStorageClient(ctx) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
req := &storagepb.FinalizeStreamRequest{ | |||
// TODO: Fill request struct fields. | |||
} | |||
err = c.FinalizeStream(ctx, req) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
} | |||
func ExampleBigQueryStorageClient_SplitReadStream() { | |||
ctx := context.Background() | |||
c, err := storage.NewBigQueryStorageClient(ctx) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
req := &storagepb.SplitReadStreamRequest{ | |||
// TODO: Fill request struct fields. | |||
} | |||
resp, err := c.SplitReadStream(ctx, req) | |||
if err != nil { | |||
// TODO: Handle error. | |||
} | |||
// TODO: Use resp. | |||
_ = resp | |||
} |
@@ -1,89 +0,0 @@ | |||
// Copyright 2019 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// https://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
// Code generated by gapic-generator. DO NOT EDIT. | |||
// Package storage is an auto-generated package for the | |||
// BigQuery Storage API. | |||
// | |||
// NOTE: This package is in beta. It is not stable, and may be subject to changes. | |||
// | |||
package storage // import "cloud.google.com/go/bigquery/storage/apiv1beta1" | |||
import ( | |||
"context" | |||
"runtime" | |||
"strings" | |||
"unicode" | |||
"google.golang.org/grpc/metadata" | |||
) | |||
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { | |||
out, _ := metadata.FromOutgoingContext(ctx) | |||
out = out.Copy() | |||
for _, md := range mds { | |||
for k, v := range md { | |||
out[k] = append(out[k], v...) | |||
} | |||
} | |||
return metadata.NewOutgoingContext(ctx, out) | |||
} | |||
// DefaultAuthScopes reports the default set of authentication scopes to use with this package. | |||
func DefaultAuthScopes() []string { | |||
return []string{ | |||
"https://www.googleapis.com/auth/bigquery", | |||
"https://www.googleapis.com/auth/cloud-platform", | |||
} | |||
} | |||
// versionGo returns the Go runtime version. The returned string | |||
// has no whitespace, suitable for reporting in header. | |||
func versionGo() string { | |||
const develPrefix = "devel +" | |||
s := runtime.Version() | |||
if strings.HasPrefix(s, develPrefix) { | |||
s = s[len(develPrefix):] | |||
if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { | |||
s = s[:p] | |||
} | |||
return s | |||
} | |||
notSemverRune := func(r rune) bool { | |||
return strings.IndexRune("0123456789.", r) < 0 | |||
} | |||
if strings.HasPrefix(s, "go1") { | |||
s = s[2:] | |||
var prerelease string | |||
if p := strings.IndexFunc(s, notSemverRune); p >= 0 { | |||
s, prerelease = s[:p], s[p:] | |||
} | |||
if strings.HasSuffix(s, ".") { | |||
s += "0" | |||
} else if strings.Count(s, ".") < 2 { | |||
s += ".0" | |||
} | |||
if prerelease != "" { | |||
s += "-" + prerelease | |||
} | |||
return s | |||
} | |||
return "UNKNOWN" | |||
} | |||
const versionClient = "20190306" |
@@ -1,452 +0,0 @@ | |||
// Copyright 2019 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// https://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
// Code generated by gapic-generator. DO NOT EDIT. | |||
package storage | |||
import ( | |||
emptypb "github.com/golang/protobuf/ptypes/empty" | |||
storagepb "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1beta1" | |||
) | |||
import ( | |||
"context" | |||
"flag" | |||
"fmt" | |||
"io" | |||
"log" | |||
"net" | |||
"os" | |||
"strings" | |||
"testing" | |||
"github.com/golang/protobuf/proto" | |||
"github.com/golang/protobuf/ptypes" | |||
"google.golang.org/api/option" | |||
status "google.golang.org/genproto/googleapis/rpc/status" | |||
"google.golang.org/grpc" | |||
"google.golang.org/grpc/codes" | |||
"google.golang.org/grpc/metadata" | |||
gstatus "google.golang.org/grpc/status" | |||
) | |||
var _ = io.EOF | |||
var _ = ptypes.MarshalAny | |||
var _ status.Status | |||
type mockBigQueryStorageServer struct { | |||
// Embed for forward compatibility. | |||
// Tests will keep working if more methods are added | |||
// in the future. | |||
storagepb.BigQueryStorageServer | |||
reqs []proto.Message | |||
// If set, all calls return this error. | |||
err error | |||
// responses to return if err == nil | |||
resps []proto.Message | |||
} | |||
func (s *mockBigQueryStorageServer) CreateReadSession(ctx context.Context, req *storagepb.CreateReadSessionRequest) (*storagepb.ReadSession, error) { | |||
md, _ := metadata.FromIncomingContext(ctx) | |||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { | |||
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) | |||
} | |||
s.reqs = append(s.reqs, req) | |||
if s.err != nil { | |||
return nil, s.err | |||
} | |||
return s.resps[0].(*storagepb.ReadSession), nil | |||
} | |||
func (s *mockBigQueryStorageServer) ReadRows(req *storagepb.ReadRowsRequest, stream storagepb.BigQueryStorage_ReadRowsServer) error { | |||
md, _ := metadata.FromIncomingContext(stream.Context()) | |||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { | |||
return fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) | |||
} | |||
s.reqs = append(s.reqs, req) | |||
if s.err != nil { | |||
return s.err | |||
} | |||
for _, v := range s.resps { | |||
if err := stream.Send(v.(*storagepb.ReadRowsResponse)); err != nil { | |||
return err | |||
} | |||
} | |||
return nil | |||
} | |||
func (s *mockBigQueryStorageServer) BatchCreateReadSessionStreams(ctx context.Context, req *storagepb.BatchCreateReadSessionStreamsRequest) (*storagepb.BatchCreateReadSessionStreamsResponse, error) { | |||
md, _ := metadata.FromIncomingContext(ctx) | |||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { | |||
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) | |||
} | |||
s.reqs = append(s.reqs, req) | |||
if s.err != nil { | |||
return nil, s.err | |||
} | |||
return s.resps[0].(*storagepb.BatchCreateReadSessionStreamsResponse), nil | |||
} | |||
func (s *mockBigQueryStorageServer) FinalizeStream(ctx context.Context, req *storagepb.FinalizeStreamRequest) (*emptypb.Empty, error) { | |||
md, _ := metadata.FromIncomingContext(ctx) | |||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { | |||
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) | |||
} | |||
s.reqs = append(s.reqs, req) | |||
if s.err != nil { | |||
return nil, s.err | |||
} | |||
return s.resps[0].(*emptypb.Empty), nil | |||
} | |||
func (s *mockBigQueryStorageServer) SplitReadStream(ctx context.Context, req *storagepb.SplitReadStreamRequest) (*storagepb.SplitReadStreamResponse, error) { | |||
md, _ := metadata.FromIncomingContext(ctx) | |||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { | |||
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) | |||
} | |||
s.reqs = append(s.reqs, req) | |||
if s.err != nil { | |||
return nil, s.err | |||
} | |||
return s.resps[0].(*storagepb.SplitReadStreamResponse), nil | |||
} | |||
// clientOpt is the option tests should use to connect to the test server. | |||
// It is initialized by TestMain. | |||
var clientOpt option.ClientOption | |||
var ( | |||
mockBigQueryStorage mockBigQueryStorageServer | |||
) | |||
func TestMain(m *testing.M) { | |||
flag.Parse() | |||
serv := grpc.NewServer() | |||
storagepb.RegisterBigQueryStorageServer(serv, &mockBigQueryStorage) | |||
lis, err := net.Listen("tcp", "localhost:0") | |||
if err != nil { | |||
log.Fatal(err) | |||
} | |||
go serv.Serve(lis) | |||
conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) | |||
if err != nil { | |||
log.Fatal(err) | |||
} | |||
clientOpt = option.WithGRPCConn(conn) | |||
os.Exit(m.Run()) | |||
} | |||
func TestBigQueryStorageCreateReadSession(t *testing.T) { | |||
var name string = "name3373707" | |||
var expectedResponse = &storagepb.ReadSession{ | |||
Name: name, | |||
} | |||
mockBigQueryStorage.err = nil | |||
mockBigQueryStorage.reqs = nil | |||
mockBigQueryStorage.resps = append(mockBigQueryStorage.resps[:0], expectedResponse) | |||
var tableReference *storagepb.TableReference = &storagepb.TableReference{} | |||
var parent string = "parent-995424086" | |||
var request = &storagepb.CreateReadSessionRequest{ | |||
TableReference: tableReference, | |||
Parent: parent, | |||
} | |||
c, err := NewBigQueryStorageClient(context.Background(), clientOpt) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
resp, err := c.CreateReadSession(context.Background(), request) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
if want, got := request, mockBigQueryStorage.reqs[0]; !proto.Equal(want, got) { | |||
t.Errorf("wrong request %q, want %q", got, want) | |||
} | |||
if want, got := expectedResponse, resp; !proto.Equal(want, got) { | |||
t.Errorf("wrong response %q, want %q)", got, want) | |||
} | |||
} | |||
func TestBigQueryStorageCreateReadSessionError(t *testing.T) { | |||
errCode := codes.PermissionDenied | |||
mockBigQueryStorage.err = gstatus.Error(errCode, "test error") | |||
var tableReference *storagepb.TableReference = &storagepb.TableReference{} | |||
var parent string = "parent-995424086" | |||
var request = &storagepb.CreateReadSessionRequest{ | |||
TableReference: tableReference, | |||
Parent: parent, | |||
} | |||
c, err := NewBigQueryStorageClient(context.Background(), clientOpt) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
resp, err := c.CreateReadSession(context.Background(), request) | |||
if st, ok := gstatus.FromError(err); !ok { | |||
t.Errorf("got error %v, expected grpc error", err) | |||
} else if c := st.Code(); c != errCode { | |||
t.Errorf("got error code %q, want %q", c, errCode) | |||
} | |||
_ = resp | |||
} | |||
func TestBigQueryStorageReadRows(t *testing.T) { | |||
var expectedResponse *storagepb.ReadRowsResponse = &storagepb.ReadRowsResponse{} | |||
mockBigQueryStorage.err = nil | |||
mockBigQueryStorage.reqs = nil | |||
mockBigQueryStorage.resps = append(mockBigQueryStorage.resps[:0], expectedResponse) | |||
var readPosition *storagepb.StreamPosition = &storagepb.StreamPosition{} | |||
var request = &storagepb.ReadRowsRequest{ | |||
ReadPosition: readPosition, | |||
} | |||
c, err := NewBigQueryStorageClient(context.Background(), clientOpt) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
stream, err := c.ReadRows(context.Background(), request) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
resp, err := stream.Recv() | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
if want, got := request, mockBigQueryStorage.reqs[0]; !proto.Equal(want, got) { | |||
t.Errorf("wrong request %q, want %q", got, want) | |||
} | |||
if want, got := expectedResponse, resp; !proto.Equal(want, got) { | |||
t.Errorf("wrong response %q, want %q)", got, want) | |||
} | |||
} | |||
func TestBigQueryStorageReadRowsError(t *testing.T) { | |||
errCode := codes.PermissionDenied | |||
mockBigQueryStorage.err = gstatus.Error(errCode, "test error") | |||
var readPosition *storagepb.StreamPosition = &storagepb.StreamPosition{} | |||
var request = &storagepb.ReadRowsRequest{ | |||
ReadPosition: readPosition, | |||
} | |||
c, err := NewBigQueryStorageClient(context.Background(), clientOpt) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
stream, err := c.ReadRows(context.Background(), request) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
resp, err := stream.Recv() | |||
if st, ok := gstatus.FromError(err); !ok { | |||
t.Errorf("got error %v, expected grpc error", err) | |||
} else if c := st.Code(); c != errCode { | |||
t.Errorf("got error code %q, want %q", c, errCode) | |||
} | |||
_ = resp | |||
} | |||
func TestBigQueryStorageBatchCreateReadSessionStreams(t *testing.T) { | |||
var expectedResponse *storagepb.BatchCreateReadSessionStreamsResponse = &storagepb.BatchCreateReadSessionStreamsResponse{} | |||
mockBigQueryStorage.err = nil | |||
mockBigQueryStorage.reqs = nil | |||
mockBigQueryStorage.resps = append(mockBigQueryStorage.resps[:0], expectedResponse) | |||
var session *storagepb.ReadSession = &storagepb.ReadSession{} | |||
var requestedStreams int32 = 1017221410 | |||
var request = &storagepb.BatchCreateReadSessionStreamsRequest{ | |||
Session: session, | |||
RequestedStreams: requestedStreams, | |||
} | |||
c, err := NewBigQueryStorageClient(context.Background(), clientOpt) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
resp, err := c.BatchCreateReadSessionStreams(context.Background(), request) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
if want, got := request, mockBigQueryStorage.reqs[0]; !proto.Equal(want, got) { | |||
t.Errorf("wrong request %q, want %q", got, want) | |||
} | |||
if want, got := expectedResponse, resp; !proto.Equal(want, got) { | |||
t.Errorf("wrong response %q, want %q)", got, want) | |||
} | |||
} | |||
func TestBigQueryStorageBatchCreateReadSessionStreamsError(t *testing.T) { | |||
errCode := codes.PermissionDenied | |||
mockBigQueryStorage.err = gstatus.Error(errCode, "test error") | |||
var session *storagepb.ReadSession = &storagepb.ReadSession{} | |||
var requestedStreams int32 = 1017221410 | |||
var request = &storagepb.BatchCreateReadSessionStreamsRequest{ | |||
Session: session, | |||
RequestedStreams: requestedStreams, | |||
} | |||
c, err := NewBigQueryStorageClient(context.Background(), clientOpt) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
resp, err := c.BatchCreateReadSessionStreams(context.Background(), request) | |||
if st, ok := gstatus.FromError(err); !ok { | |||
t.Errorf("got error %v, expected grpc error", err) | |||
} else if c := st.Code(); c != errCode { | |||
t.Errorf("got error code %q, want %q", c, errCode) | |||
} | |||
_ = resp | |||
} | |||
func TestBigQueryStorageFinalizeStream(t *testing.T) { | |||
var expectedResponse *emptypb.Empty = &emptypb.Empty{} | |||
mockBigQueryStorage.err = nil | |||
mockBigQueryStorage.reqs = nil | |||
mockBigQueryStorage.resps = append(mockBigQueryStorage.resps[:0], expectedResponse) | |||
var stream *storagepb.Stream = &storagepb.Stream{} | |||
var request = &storagepb.FinalizeStreamRequest{ | |||
Stream: stream, | |||
} | |||
c, err := NewBigQueryStorageClient(context.Background(), clientOpt) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
err = c.FinalizeStream(context.Background(), request) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
if want, got := request, mockBigQueryStorage.reqs[0]; !proto.Equal(want, got) { | |||
t.Errorf("wrong request %q, want %q", got, want) | |||
} | |||
} | |||
func TestBigQueryStorageFinalizeStreamError(t *testing.T) { | |||
errCode := codes.PermissionDenied | |||
mockBigQueryStorage.err = gstatus.Error(errCode, "test error") | |||
var stream *storagepb.Stream = &storagepb.Stream{} | |||
var request = &storagepb.FinalizeStreamRequest{ | |||
Stream: stream, | |||
} | |||
c, err := NewBigQueryStorageClient(context.Background(), clientOpt) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
err = c.FinalizeStream(context.Background(), request) | |||
if st, ok := gstatus.FromError(err); !ok { | |||
t.Errorf("got error %v, expected grpc error", err) | |||
} else if c := st.Code(); c != errCode { | |||
t.Errorf("got error code %q, want %q", c, errCode) | |||
} | |||
} | |||
func TestBigQueryStorageSplitReadStream(t *testing.T) { | |||
var expectedResponse *storagepb.SplitReadStreamResponse = &storagepb.SplitReadStreamResponse{} | |||
mockBigQueryStorage.err = nil | |||
mockBigQueryStorage.reqs = nil | |||
mockBigQueryStorage.resps = append(mockBigQueryStorage.resps[:0], expectedResponse) | |||
var originalStream *storagepb.Stream = &storagepb.Stream{} | |||
var request = &storagepb.SplitReadStreamRequest{ | |||
OriginalStream: originalStream, | |||
} | |||
c, err := NewBigQueryStorageClient(context.Background(), clientOpt) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
resp, err := c.SplitReadStream(context.Background(), request) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
if want, got := request, mockBigQueryStorage.reqs[0]; !proto.Equal(want, got) { | |||
t.Errorf("wrong request %q, want %q", got, want) | |||
} | |||
if want, got := expectedResponse, resp; !proto.Equal(want, got) { | |||
t.Errorf("wrong response %q, want %q)", got, want) | |||
} | |||
} | |||
func TestBigQueryStorageSplitReadStreamError(t *testing.T) { | |||
errCode := codes.PermissionDenied | |||
mockBigQueryStorage.err = gstatus.Error(errCode, "test error") | |||
var originalStream *storagepb.Stream = &storagepb.Stream{} | |||
var request = &storagepb.SplitReadStreamRequest{ | |||
OriginalStream: originalStream, | |||
} | |||
c, err := NewBigQueryStorageClient(context.Background(), clientOpt) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
resp, err := c.SplitReadStream(context.Background(), request) | |||
if st, ok := gstatus.FromError(err); !ok { | |||
t.Errorf("got error %v, expected grpc error", err) | |||
} else if c := st.Code(); c != errCode { | |||
t.Errorf("got error code %q, want %q", c, errCode) | |||
} | |||
_ = resp | |||
} |
@@ -1,629 +0,0 @@ | |||
// Copyright 2015 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package bigquery | |||
import ( | |||
"context" | |||
"errors" | |||
"fmt" | |||
"time" | |||
"cloud.google.com/go/internal/optional" | |||
"cloud.google.com/go/internal/trace" | |||
bq "google.golang.org/api/bigquery/v2" | |||
) | |||
// A Table is a reference to a BigQuery table. | |||
type Table struct { | |||
// ProjectID, DatasetID and TableID may be omitted if the Table is the destination for a query. | |||
// In this case the result will be stored in an ephemeral table. | |||
ProjectID string | |||
DatasetID string | |||
// TableID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). | |||
// The maximum length is 1,024 characters. | |||
TableID string | |||
c *Client | |||
} | |||
// TableMetadata contains information about a BigQuery table. | |||
type TableMetadata struct { | |||
// The following fields can be set when creating a table. | |||
// The user-friendly name for the table. | |||
Name string | |||
// The user-friendly description of the table. | |||
Description string | |||
// The table schema. If provided on create, ViewQuery must be empty. | |||
Schema Schema | |||
// The query to use for a view. If provided on create, Schema must be nil. | |||
ViewQuery string | |||
// Use Legacy SQL for the view query. | |||
// At most one of UseLegacySQL and UseStandardSQL can be true. | |||
UseLegacySQL bool | |||
// Use Legacy SQL for the view query. The default. | |||
// At most one of UseLegacySQL and UseStandardSQL can be true. | |||
// Deprecated: use UseLegacySQL. | |||
UseStandardSQL bool | |||
// If non-nil, the table is partitioned by time. | |||
TimePartitioning *TimePartitioning | |||
// Clustering specifies the data clustering configuration for the table. | |||
Clustering *Clustering | |||
// The time when this table expires. If set, this table will expire at the | |||
// specified time. Expired tables will be deleted and their storage | |||
// reclaimed. The zero value is ignored. | |||
ExpirationTime time.Time | |||
// User-provided labels. | |||
Labels map[string]string | |||
// Information about a table stored outside of BigQuery. | |||
ExternalDataConfig *ExternalDataConfig | |||
// Custom encryption configuration (e.g., Cloud KMS keys). | |||
EncryptionConfig *EncryptionConfig | |||
// All the fields below are read-only. | |||
FullID string // An opaque ID uniquely identifying the table. | |||
Type TableType | |||
CreationTime time.Time | |||
LastModifiedTime time.Time | |||
// The size of the table in bytes. | |||
// This does not include data that is being buffered during a streaming insert. | |||
NumBytes int64 | |||
// The number of bytes in the table considered "long-term storage" for reduced | |||
// billing purposes. See https://cloud.google.com/bigquery/pricing#long-term-storage | |||
// for more information. | |||
NumLongTermBytes int64 | |||
// The number of rows of data in this table. | |||
// This does not include data that is being buffered during a streaming insert. | |||
NumRows uint64 | |||
// Contains information regarding this table's streaming buffer, if one is | |||
// present. This field will be nil if the table is not being streamed to or if | |||
// there is no data in the streaming buffer. | |||
StreamingBuffer *StreamingBuffer | |||
// ETag is the ETag obtained when reading metadata. Pass it to Table.Update to | |||
// ensure that the metadata hasn't changed since it was read. | |||
ETag string | |||
} | |||
// TableCreateDisposition specifies the circumstances under which destination table will be created. | |||
// Default is CreateIfNeeded. | |||
type TableCreateDisposition string | |||
const ( | |||
// CreateIfNeeded will create the table if it does not already exist. | |||
// Tables are created atomically on successful completion of a job. | |||
CreateIfNeeded TableCreateDisposition = "CREATE_IF_NEEDED" | |||
// CreateNever ensures the table must already exist and will not be | |||
// automatically created. | |||
CreateNever TableCreateDisposition = "CREATE_NEVER" | |||
) | |||
// TableWriteDisposition specifies how existing data in a destination table is treated. | |||
// Default is WriteAppend. | |||
type TableWriteDisposition string | |||
const ( | |||
// WriteAppend will append to any existing data in the destination table. | |||
// Data is appended atomically on successful completion of a job. | |||
WriteAppend TableWriteDisposition = "WRITE_APPEND" | |||
// WriteTruncate overrides the existing data in the destination table. | |||
// Data is overwritten atomically on successful completion of a job. | |||
WriteTruncate TableWriteDisposition = "WRITE_TRUNCATE" | |||
// WriteEmpty fails writes if the destination table already contains data. | |||
WriteEmpty TableWriteDisposition = "WRITE_EMPTY" | |||
) | |||
// TableType is the type of table. | |||
type TableType string | |||
const ( | |||
// RegularTable is a regular table. | |||
RegularTable TableType = "TABLE" | |||
// ViewTable is a table type describing that the table is view. See more | |||
// information at https://cloud.google.com/bigquery/docs/views. | |||
ViewTable TableType = "VIEW" | |||
// ExternalTable is a table type describing that the table is an external | |||
// table (also known as a federated data source). See more information at | |||
// https://cloud.google.com/bigquery/external-data-sources. | |||
ExternalTable TableType = "EXTERNAL" | |||
) | |||
// TimePartitioning describes the time-based date partitioning on a table. | |||
// For more information see: https://cloud.google.com/bigquery/docs/creating-partitioned-tables. | |||
type TimePartitioning struct { | |||
// The amount of time to keep the storage for a partition. | |||
// If the duration is empty (0), the data in the partitions do not expire. | |||
Expiration time.Duration | |||
// If empty, the table is partitioned by pseudo column '_PARTITIONTIME'; if set, the | |||
// table is partitioned by this field. The field must be a top-level TIMESTAMP or | |||
// DATE field. Its mode must be NULLABLE or REQUIRED. | |||
Field string | |||
// If true, queries that reference this table must include a filter (e.g. a WHERE predicate) | |||
// that can be used for partition elimination. | |||
RequirePartitionFilter bool | |||
} | |||
func (p *TimePartitioning) toBQ() *bq.TimePartitioning { | |||
if p == nil { | |||
return nil | |||
} | |||
return &bq.TimePartitioning{ | |||
Type: "DAY", | |||
ExpirationMs: int64(p.Expiration / time.Millisecond), | |||
Field: p.Field, | |||
RequirePartitionFilter: p.RequirePartitionFilter, | |||
} | |||
} | |||
func bqToTimePartitioning(q *bq.TimePartitioning) *TimePartitioning { | |||
if q == nil { | |||
return nil | |||
} | |||
return &TimePartitioning{ | |||
Expiration: time.Duration(q.ExpirationMs) * time.Millisecond, | |||
Field: q.Field, | |||
RequirePartitionFilter: q.RequirePartitionFilter, | |||
} | |||
} | |||
// Clustering governs the organization of data within a partitioned table. | |||
// For more information, see https://cloud.google.com/bigquery/docs/clustered-tables | |||
type Clustering struct { | |||
Fields []string | |||
} | |||
func (c *Clustering) toBQ() *bq.Clustering { | |||
if c == nil { | |||
return nil | |||
} | |||
return &bq.Clustering{ | |||
Fields: c.Fields, | |||
} | |||
} | |||
func bqToClustering(q *bq.Clustering) *Clustering { | |||
if q == nil { | |||
return nil | |||
} | |||
return &Clustering{ | |||
Fields: q.Fields, | |||
} | |||
} | |||
// EncryptionConfig configures customer-managed encryption on tables. | |||
type EncryptionConfig struct { | |||
// Describes the Cloud KMS encryption key that will be used to protect | |||
// destination BigQuery table. The BigQuery Service Account associated with your | |||
// project requires access to this encryption key. | |||
KMSKeyName string | |||
} | |||
func (e *EncryptionConfig) toBQ() *bq.EncryptionConfiguration { | |||
if e == nil { | |||
return nil | |||
} | |||
return &bq.EncryptionConfiguration{ | |||
KmsKeyName: e.KMSKeyName, | |||
} | |||
} | |||
func bqToEncryptionConfig(q *bq.EncryptionConfiguration) *EncryptionConfig { | |||
if q == nil { | |||
return nil | |||
} | |||
return &EncryptionConfig{ | |||
KMSKeyName: q.KmsKeyName, | |||
} | |||
} | |||
// StreamingBuffer holds information about the streaming buffer. | |||
type StreamingBuffer struct { | |||
// A lower-bound estimate of the number of bytes currently in the streaming | |||
// buffer. | |||
EstimatedBytes uint64 | |||
// A lower-bound estimate of the number of rows currently in the streaming | |||
// buffer. | |||
EstimatedRows uint64 | |||
// The time of the oldest entry in the streaming buffer. | |||
OldestEntryTime time.Time | |||
} | |||
func (t *Table) toBQ() *bq.TableReference { | |||
return &bq.TableReference{ | |||
ProjectId: t.ProjectID, | |||
DatasetId: t.DatasetID, | |||
TableId: t.TableID, | |||
} | |||
} | |||
// FullyQualifiedName returns the ID of the table in projectID:datasetID.tableID format. | |||
func (t *Table) FullyQualifiedName() string { | |||
return fmt.Sprintf("%s:%s.%s", t.ProjectID, t.DatasetID, t.TableID) | |||
} | |||
// implicitTable reports whether Table is an empty placeholder, which signifies that a new table should be created with an auto-generated Table ID. | |||
func (t *Table) implicitTable() bool { | |||
return t.ProjectID == "" && t.DatasetID == "" && t.TableID == "" | |||
} | |||
// Create creates a table in the BigQuery service. | |||
// Pass in a TableMetadata value to configure the table. | |||
// If tm.View.Query is non-empty, the created table will be of type VIEW. | |||
// If no ExpirationTime is specified, the table will never expire. | |||
// After table creation, a view can be modified only if its table was initially created | |||
// with a view. | |||
func (t *Table) Create(ctx context.Context, tm *TableMetadata) (err error) { | |||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Table.Create") | |||
defer func() { trace.EndSpan(ctx, err) }() | |||
table, err := tm.toBQ() | |||
if err != nil { | |||
return err | |||
} | |||
table.TableReference = &bq.TableReference{ | |||
ProjectId: t.ProjectID, | |||
DatasetId: t.DatasetID, | |||
TableId: t.TableID, | |||
} | |||
req := t.c.bqs.Tables.Insert(t.ProjectID, t.DatasetID, table).Context(ctx) | |||
setClientHeader(req.Header()) | |||
_, err = req.Do() | |||
return err | |||
} | |||
func (tm *TableMetadata) toBQ() (*bq.Table, error) { | |||
t := &bq.Table{} | |||
if tm == nil { | |||
return t, nil | |||
} | |||
if tm.Schema != nil && tm.ViewQuery != "" { | |||
return nil, errors.New("bigquery: provide Schema or ViewQuery, not both") | |||
} | |||
t.FriendlyName = tm.Name | |||
t.Description = tm.Description | |||
t.Labels = tm.Labels | |||
if tm.Schema != nil { | |||
t.Schema = tm.Schema.toBQ() | |||
} | |||
if tm.ViewQuery != "" { | |||
if tm.UseStandardSQL && tm.UseLegacySQL { | |||
return nil, errors.New("bigquery: cannot provide both UseStandardSQL and UseLegacySQL") | |||
} | |||
t.View = &bq.ViewDefinition{Query: tm.ViewQuery} | |||
if tm.UseLegacySQL { | |||
t.View.UseLegacySql = true | |||
} else { | |||
t.View.UseLegacySql = false | |||
t.View.ForceSendFields = append(t.View.ForceSendFields, "UseLegacySql") | |||
} | |||
} else if tm.UseLegacySQL || tm.UseStandardSQL { | |||
return nil, errors.New("bigquery: UseLegacy/StandardSQL requires ViewQuery") | |||
} | |||
t.TimePartitioning = tm.TimePartitioning.toBQ() | |||
t.Clustering = tm.Clustering.toBQ() | |||
if !validExpiration(tm.ExpirationTime) { | |||
return nil, fmt.Errorf("invalid expiration time: %v.\n"+ | |||
"Valid expiration times are after 1678 and before 2262", tm.ExpirationTime) | |||
} | |||
if !tm.ExpirationTime.IsZero() && tm.ExpirationTime != NeverExpire { | |||
t.ExpirationTime = tm.ExpirationTime.UnixNano() / 1e6 | |||
} | |||
if tm.ExternalDataConfig != nil { | |||
edc := tm.ExternalDataConfig.toBQ() | |||
t.ExternalDataConfiguration = &edc | |||
} | |||
t.EncryptionConfiguration = tm.EncryptionConfig.toBQ() | |||
if tm.FullID != "" { | |||
return nil, errors.New("cannot set FullID on create") | |||
} | |||
if tm.Type != "" { | |||
return nil, errors.New("cannot set Type on create") | |||
} | |||
if !tm.CreationTime.IsZero() { | |||
return nil, errors.New("cannot set CreationTime on create") | |||
} | |||
if !tm.LastModifiedTime.IsZero() { | |||
return nil, errors.New("cannot set LastModifiedTime on create") | |||
} | |||
if tm.NumBytes != 0 { | |||
return nil, errors.New("cannot set NumBytes on create") | |||
} | |||
if tm.NumLongTermBytes != 0 { | |||
return nil, errors.New("cannot set NumLongTermBytes on create") | |||
} | |||
if tm.NumRows != 0 { | |||
return nil, errors.New("cannot set NumRows on create") | |||
} | |||
if tm.StreamingBuffer != nil { | |||
return nil, errors.New("cannot set StreamingBuffer on create") | |||
} | |||
if tm.ETag != "" { | |||
return nil, errors.New("cannot set ETag on create") | |||
} | |||
return t, nil | |||
} | |||
// Metadata fetches the metadata for the table. | |||
func (t *Table) Metadata(ctx context.Context) (md *TableMetadata, err error) { | |||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Table.Metadata") | |||
defer func() { trace.EndSpan(ctx, err) }() | |||
req := t.c.bqs.Tables.Get(t.ProjectID, t.DatasetID, t.TableID).Context(ctx) | |||
setClientHeader(req.Header()) | |||
var table *bq.Table | |||
err = runWithRetry(ctx, func() (err error) { | |||
table, err = req.Do() | |||
return err | |||
}) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return bqToTableMetadata(table) | |||
} | |||
func bqToTableMetadata(t *bq.Table) (*TableMetadata, error) { | |||
md := &TableMetadata{ | |||
Description: t.Description, | |||
Name: t.FriendlyName, | |||
Type: TableType(t.Type), | |||
FullID: t.Id, | |||
Labels: t.Labels, | |||
NumBytes: t.NumBytes, | |||
NumLongTermBytes: t.NumLongTermBytes, | |||
NumRows: t.NumRows, | |||
ExpirationTime: unixMillisToTime(t.ExpirationTime), | |||
CreationTime: unixMillisToTime(t.CreationTime), | |||
LastModifiedTime: unixMillisToTime(int64(t.LastModifiedTime)), | |||
ETag: t.Etag, | |||
EncryptionConfig: bqToEncryptionConfig(t.EncryptionConfiguration), | |||
} | |||
if t.Schema != nil { | |||
md.Schema = bqToSchema(t.Schema) | |||
} | |||
if t.View != nil { | |||
md.ViewQuery = t.View.Query | |||
md.UseLegacySQL = t.View.UseLegacySql | |||
} | |||
md.TimePartitioning = bqToTimePartitioning(t.TimePartitioning) | |||
md.Clustering = bqToClustering(t.Clustering) | |||
if t.StreamingBuffer != nil { | |||
md.StreamingBuffer = &StreamingBuffer{ | |||
EstimatedBytes: t.StreamingBuffer.EstimatedBytes, | |||
EstimatedRows: t.StreamingBuffer.EstimatedRows, | |||
OldestEntryTime: unixMillisToTime(int64(t.StreamingBuffer.OldestEntryTime)), | |||
} | |||
} | |||
if t.ExternalDataConfiguration != nil { | |||
edc, err := bqToExternalDataConfig(t.ExternalDataConfiguration) | |||
if err != nil { | |||
return nil, err | |||
} | |||
md.ExternalDataConfig = edc | |||
} | |||
return md, nil | |||
} | |||
// Delete deletes the table. | |||
func (t *Table) Delete(ctx context.Context) (err error) { | |||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Table.Delete") | |||
defer func() { trace.EndSpan(ctx, err) }() | |||
req := t.c.bqs.Tables.Delete(t.ProjectID, t.DatasetID, t.TableID).Context(ctx) | |||
setClientHeader(req.Header()) | |||
return req.Do() | |||
} | |||
// Read fetches the contents of the table. | |||
func (t *Table) Read(ctx context.Context) *RowIterator { | |||
return t.read(ctx, fetchPage) | |||
} | |||
func (t *Table) read(ctx context.Context, pf pageFetcher) *RowIterator { | |||
return newRowIterator(ctx, t, pf) | |||
} | |||
// NeverExpire is a sentinel value used to remove a table'e expiration time. | |||
var NeverExpire = time.Time{}.Add(-1) | |||
// Update modifies specific Table metadata fields. | |||
func (t *Table) Update(ctx context.Context, tm TableMetadataToUpdate, etag string) (md *TableMetadata, err error) { | |||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Table.Update") | |||
defer func() { trace.EndSpan(ctx, err) }() | |||
bqt, err := tm.toBQ() | |||
if err != nil { | |||
return nil, err | |||
} | |||
call := t.c.bqs.Tables.Patch(t.ProjectID, t.DatasetID, t.TableID, bqt).Context(ctx) | |||
setClientHeader(call.Header()) | |||
if etag != "" { | |||
call.Header().Set("If-Match", etag) | |||
} | |||
var res *bq.Table | |||
if err := runWithRetry(ctx, func() (err error) { | |||
res, err = call.Do() | |||
return err | |||
}); err != nil { | |||
return nil, err | |||
} | |||
return bqToTableMetadata(res) | |||
} | |||
func (tm *TableMetadataToUpdate) toBQ() (*bq.Table, error) { | |||
t := &bq.Table{} | |||
forceSend := func(field string) { | |||
t.ForceSendFields = append(t.ForceSendFields, field) | |||
} | |||
if tm.Description != nil { | |||
t.Description = optional.ToString(tm.Description) | |||
forceSend("Description") | |||
} | |||
if tm.Name != nil { | |||
t.FriendlyName = optional.ToString(tm.Name) | |||
forceSend("FriendlyName") | |||
} | |||
if tm.Schema != nil { | |||
t.Schema = tm.Schema.toBQ() | |||
forceSend("Schema") | |||
} | |||
if tm.EncryptionConfig != nil { | |||
t.EncryptionConfiguration = tm.EncryptionConfig.toBQ() | |||
} | |||
if !validExpiration(tm.ExpirationTime) { | |||
return nil, fmt.Errorf("invalid expiration time: %v.\n"+ | |||
"Valid expiration times are after 1678 and before 2262", tm.ExpirationTime) | |||
} | |||
if tm.ExpirationTime == NeverExpire { | |||
t.NullFields = append(t.NullFields, "ExpirationTime") | |||
} else if !tm.ExpirationTime.IsZero() { | |||
t.ExpirationTime = tm.ExpirationTime.UnixNano() / 1e6 | |||
forceSend("ExpirationTime") | |||
} | |||
if tm.TimePartitioning != nil { | |||
t.TimePartitioning = tm.TimePartitioning.toBQ() | |||
t.TimePartitioning.ForceSendFields = []string{"RequirePartitionFilter"} | |||
if tm.TimePartitioning.Expiration == 0 { | |||
t.TimePartitioning.NullFields = []string{"ExpirationMs"} | |||
} | |||
} | |||
if tm.ViewQuery != nil { | |||
t.View = &bq.ViewDefinition{ | |||
Query: optional.ToString(tm.ViewQuery), | |||
ForceSendFields: []string{"Query"}, | |||
} | |||
} | |||
if tm.UseLegacySQL != nil { | |||
if t.View == nil { | |||
t.View = &bq.ViewDefinition{} | |||
} | |||
t.View.UseLegacySql = optional.ToBool(tm.UseLegacySQL) | |||
t.View.ForceSendFields = append(t.View.ForceSendFields, "UseLegacySql") | |||
} | |||
labels, forces, nulls := tm.update() | |||
t.Labels = labels | |||
t.ForceSendFields = append(t.ForceSendFields, forces...) | |||
t.NullFields = append(t.NullFields, nulls...) | |||
return t, nil | |||
} | |||
// validExpiration ensures a specified time is either the sentinel NeverExpire, | |||
// the zero value, or within the defined range of UnixNano. Internal | |||
// represetations of expiration times are based upon Time.UnixNano. Any time | |||
// before 1678 or after 2262 cannot be represented by an int64 and is therefore | |||
// undefined and invalid. See https://godoc.org/time#Time.UnixNano. | |||
func validExpiration(t time.Time) bool { | |||
return t == NeverExpire || t.IsZero() || time.Unix(0, t.UnixNano()).Equal(t) | |||
} | |||
// TableMetadataToUpdate is used when updating a table's metadata. | |||
// Only non-nil fields will be updated. | |||
type TableMetadataToUpdate struct { | |||
// The user-friendly description of this table. | |||
Description optional.String | |||
// The user-friendly name for this table. | |||
Name optional.String | |||
// The table's schema. | |||
// When updating a schema, you can add columns but not remove them. | |||
Schema Schema | |||
// The table's encryption configuration. When calling Update, ensure that | |||
// all mutable fields of EncryptionConfig are populated. | |||
EncryptionConfig *EncryptionConfig | |||
// The time when this table expires. To remove a table's expiration, | |||
// set ExpirationTime to NeverExpire. The zero value is ignored. | |||
ExpirationTime time.Time | |||
// The query to use for a view. | |||
ViewQuery optional.String | |||
// Use Legacy SQL for the view query. | |||
UseLegacySQL optional.Bool | |||
// TimePartitioning allows modification of certain aspects of partition | |||
// configuration such as partition expiration and whether partition | |||
// filtration is required at query time. When calling Update, ensure | |||
// that all mutable fields of TimePartitioning are populated. | |||
TimePartitioning *TimePartitioning | |||
labelUpdater | |||
} | |||
// labelUpdater contains common code for updating labels. | |||
type labelUpdater struct { | |||
setLabels map[string]string | |||
deleteLabels map[string]bool | |||
} | |||
// SetLabel causes a label to be added or modified on a call to Update. | |||
func (u *labelUpdater) SetLabel(name, value string) { | |||
if u.setLabels == nil { | |||
u.setLabels = map[string]string{} | |||
} | |||
u.setLabels[name] = value | |||
} | |||
// DeleteLabel causes a label to be deleted on a call to Update. | |||
func (u *labelUpdater) DeleteLabel(name string) { | |||
if u.deleteLabels == nil { | |||
u.deleteLabels = map[string]bool{} | |||
} | |||
u.deleteLabels[name] = true | |||
} | |||
func (u *labelUpdater) update() (labels map[string]string, forces, nulls []string) { | |||
if u.setLabels == nil && u.deleteLabels == nil { | |||
return nil, nil, nil | |||
} | |||
labels = map[string]string{} | |||
for k, v := range u.setLabels { | |||
labels[k] = v | |||
} | |||
if len(labels) == 0 && len(u.deleteLabels) > 0 { | |||
forces = []string{"Labels"} | |||
} | |||
for l := range u.deleteLabels { | |||
nulls = append(nulls, "Labels."+l) | |||
} | |||
return labels, forces, nulls | |||
} |
@@ -1,372 +0,0 @@ | |||
// Copyright 2017 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package bigquery | |||
import ( | |||
"testing" | |||
"time" | |||
"cloud.google.com/go/internal/testutil" | |||
bq "google.golang.org/api/bigquery/v2" | |||
) | |||
func TestBQToTableMetadata(t *testing.T) { | |||
aTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local) | |||
aTimeMillis := aTime.UnixNano() / 1e6 | |||
for _, test := range []struct { | |||
in *bq.Table | |||
want *TableMetadata | |||
}{ | |||
{&bq.Table{}, &TableMetadata{}}, // test minimal case | |||
{ | |||
&bq.Table{ | |||
CreationTime: aTimeMillis, | |||
Description: "desc", | |||
Etag: "etag", | |||
ExpirationTime: aTimeMillis, | |||
FriendlyName: "fname", | |||
Id: "id", | |||
LastModifiedTime: uint64(aTimeMillis), | |||
Location: "loc", | |||
NumBytes: 123, | |||
NumLongTermBytes: 23, | |||
NumRows: 7, | |||
StreamingBuffer: &bq.Streamingbuffer{ | |||
EstimatedBytes: 11, | |||
EstimatedRows: 3, | |||
OldestEntryTime: uint64(aTimeMillis), | |||
}, | |||
TimePartitioning: &bq.TimePartitioning{ | |||
ExpirationMs: 7890, | |||
Type: "DAY", | |||
Field: "pfield", | |||
}, | |||
Clustering: &bq.Clustering{ | |||
Fields: []string{"cfield1", "cfield2"}, | |||
}, | |||
EncryptionConfiguration: &bq.EncryptionConfiguration{KmsKeyName: "keyName"}, | |||
Type: "EXTERNAL", | |||
View: &bq.ViewDefinition{Query: "view-query"}, | |||
Labels: map[string]string{"a": "b"}, | |||
ExternalDataConfiguration: &bq.ExternalDataConfiguration{ | |||
SourceFormat: "GOOGLE_SHEETS", | |||
}, | |||
}, | |||
&TableMetadata{ | |||
Description: "desc", | |||
Name: "fname", | |||
ViewQuery: "view-query", | |||
FullID: "id", | |||
Type: ExternalTable, | |||
Labels: map[string]string{"a": "b"}, | |||
ExternalDataConfig: &ExternalDataConfig{SourceFormat: GoogleSheets}, | |||
ExpirationTime: aTime.Truncate(time.Millisecond), | |||
CreationTime: aTime.Truncate(time.Millisecond), | |||
LastModifiedTime: aTime.Truncate(time.Millisecond), | |||
NumBytes: 123, | |||
NumLongTermBytes: 23, | |||
NumRows: 7, | |||
TimePartitioning: &TimePartitioning{ | |||
Expiration: 7890 * time.Millisecond, | |||
Field: "pfield", | |||
}, | |||
Clustering: &Clustering{ | |||
Fields: []string{"cfield1", "cfield2"}, | |||
}, | |||
StreamingBuffer: &StreamingBuffer{ | |||
EstimatedBytes: 11, | |||
EstimatedRows: 3, | |||
OldestEntryTime: aTime, | |||
}, | |||
EncryptionConfig: &EncryptionConfig{KMSKeyName: "keyName"}, | |||
ETag: "etag", | |||
}, | |||
}, | |||
} { | |||
got, err := bqToTableMetadata(test.in) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
if diff := testutil.Diff(got, test.want); diff != "" { | |||
t.Errorf("%+v:\n, -got, +want:\n%s", test.in, diff) | |||
} | |||
} | |||
} | |||
func TestTableMetadataToBQ(t *testing.T) { | |||
aTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local) | |||
aTimeMillis := aTime.UnixNano() / 1e6 | |||
sc := Schema{fieldSchema("desc", "name", "STRING", false, true)} | |||
for _, test := range []struct { | |||
in *TableMetadata | |||
want *bq.Table | |||
}{ | |||
{nil, &bq.Table{}}, | |||
{&TableMetadata{}, &bq.Table{}}, | |||
{ | |||
&TableMetadata{ | |||
Name: "n", | |||
Description: "d", | |||
Schema: sc, | |||
ExpirationTime: aTime, | |||
Labels: map[string]string{"a": "b"}, | |||
ExternalDataConfig: &ExternalDataConfig{SourceFormat: Bigtable}, | |||
EncryptionConfig: &EncryptionConfig{KMSKeyName: "keyName"}, | |||
}, | |||
&bq.Table{ | |||
FriendlyName: "n", | |||
Description: "d", | |||
Schema: &bq.TableSchema{ | |||
Fields: []*bq.TableFieldSchema{ | |||
bqTableFieldSchema("desc", "name", "STRING", "REQUIRED"), | |||
}, | |||
}, | |||
ExpirationTime: aTimeMillis, | |||
Labels: map[string]string{"a": "b"}, | |||
ExternalDataConfiguration: &bq.ExternalDataConfiguration{SourceFormat: "BIGTABLE"}, | |||
EncryptionConfiguration: &bq.EncryptionConfiguration{KmsKeyName: "keyName"}, | |||
}, | |||
}, | |||
{ | |||
&TableMetadata{ViewQuery: "q"}, | |||
&bq.Table{ | |||
View: &bq.ViewDefinition{ | |||
Query: "q", | |||
UseLegacySql: false, | |||
ForceSendFields: []string{"UseLegacySql"}, | |||
}, | |||
}, | |||
}, | |||
{ | |||
&TableMetadata{ | |||
ViewQuery: "q", | |||
UseLegacySQL: true, | |||
TimePartitioning: &TimePartitioning{}, | |||
}, | |||
&bq.Table{ | |||
View: &bq.ViewDefinition{ | |||
Query: "q", | |||
UseLegacySql: true, | |||
}, | |||
TimePartitioning: &bq.TimePartitioning{ | |||
Type: "DAY", | |||
ExpirationMs: 0, | |||
}, | |||
}, | |||
}, | |||
{ | |||
&TableMetadata{ | |||
ViewQuery: "q", | |||
UseStandardSQL: true, | |||
TimePartitioning: &TimePartitioning{ | |||
Expiration: time.Second, | |||
Field: "ofDreams", | |||
}, | |||
Clustering: &Clustering{ | |||
Fields: []string{"cfield1"}, | |||
}, | |||
}, | |||
&bq.Table{ | |||
View: &bq.ViewDefinition{ | |||
Query: "q", | |||
UseLegacySql: false, | |||
ForceSendFields: []string{"UseLegacySql"}, | |||
}, | |||
TimePartitioning: &bq.TimePartitioning{ | |||
Type: "DAY", | |||
ExpirationMs: 1000, | |||
Field: "ofDreams", | |||
}, | |||
Clustering: &bq.Clustering{ | |||
Fields: []string{"cfield1"}, | |||
}, | |||
}, | |||
}, | |||
{ | |||
&TableMetadata{ExpirationTime: NeverExpire}, | |||
&bq.Table{ExpirationTime: 0}, | |||
}, | |||
} { | |||
got, err := test.in.toBQ() | |||
if err != nil { | |||
t.Fatalf("%+v: %v", test.in, err) | |||
} | |||
if diff := testutil.Diff(got, test.want); diff != "" { | |||
t.Errorf("%+v:\n-got, +want:\n%s", test.in, diff) | |||
} | |||
} | |||
// Errors | |||
for _, in := range []*TableMetadata{ | |||
{Schema: sc, ViewQuery: "q"}, // can't have both schema and query | |||
{UseLegacySQL: true}, // UseLegacySQL without query | |||
{UseStandardSQL: true}, // UseStandardSQL without query | |||
// read-only fields | |||
{FullID: "x"}, | |||
{Type: "x"}, | |||
{CreationTime: aTime}, | |||
{LastModifiedTime: aTime}, | |||
{NumBytes: 1}, | |||
{NumLongTermBytes: 1}, | |||
{NumRows: 1}, | |||
{StreamingBuffer: &StreamingBuffer{}}, | |||
{ETag: "x"}, | |||
// expiration time outside allowable range is invalid | |||
// See https://godoc.org/time#Time.UnixNano | |||
{ExpirationTime: time.Date(1677, 9, 21, 0, 12, 43, 145224192, time.UTC).Add(-1)}, | |||
{ExpirationTime: time.Date(2262, 04, 11, 23, 47, 16, 854775807, time.UTC).Add(1)}, | |||
} { | |||
_, err := in.toBQ() | |||
if err == nil { | |||
t.Errorf("%+v: got nil, want error", in) | |||
} | |||
} | |||
} | |||
func TestTableMetadataToUpdateToBQ(t *testing.T) { | |||
aTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local) | |||
for _, test := range []struct { | |||
tm TableMetadataToUpdate | |||
want *bq.Table | |||
}{ | |||
{ | |||
tm: TableMetadataToUpdate{}, | |||
want: &bq.Table{}, | |||
}, | |||
{ | |||
tm: TableMetadataToUpdate{ | |||
Description: "d", | |||
Name: "n", | |||
}, | |||
want: &bq.Table{ | |||
Description: "d", | |||
FriendlyName: "n", | |||
ForceSendFields: []string{"Description", "FriendlyName"}, | |||
}, | |||
}, | |||
{ | |||
tm: TableMetadataToUpdate{ | |||
Schema: Schema{fieldSchema("desc", "name", "STRING", false, true)}, | |||
ExpirationTime: aTime, | |||
}, | |||
want: &bq.Table{ | |||
Schema: &bq.TableSchema{ | |||
Fields: []*bq.TableFieldSchema{ | |||
bqTableFieldSchema("desc", "name", "STRING", "REQUIRED"), | |||
}, | |||
}, | |||
ExpirationTime: aTime.UnixNano() / 1e6, | |||
ForceSendFields: []string{"Schema", "ExpirationTime"}, | |||
}, | |||
}, | |||
{ | |||
tm: TableMetadataToUpdate{ViewQuery: "q"}, | |||
want: &bq.Table{ | |||
View: &bq.ViewDefinition{Query: "q", ForceSendFields: []string{"Query"}}, | |||
}, | |||
}, | |||
{ | |||
tm: TableMetadataToUpdate{UseLegacySQL: false}, | |||
want: &bq.Table{ | |||
View: &bq.ViewDefinition{ | |||
UseLegacySql: false, | |||
ForceSendFields: []string{"UseLegacySql"}, | |||
}, | |||
}, | |||
}, | |||
{ | |||
tm: TableMetadataToUpdate{ViewQuery: "q", UseLegacySQL: true}, | |||
want: &bq.Table{ | |||
View: &bq.ViewDefinition{ | |||
Query: "q", | |||
UseLegacySql: true, | |||
ForceSendFields: []string{"Query", "UseLegacySql"}, | |||
}, | |||
}, | |||
}, | |||
{ | |||
tm: func() (tm TableMetadataToUpdate) { | |||
tm.SetLabel("L", "V") | |||
tm.DeleteLabel("D") | |||
return tm | |||
}(), | |||
want: &bq.Table{ | |||
Labels: map[string]string{"L": "V"}, | |||
NullFields: []string{"Labels.D"}, | |||
}, | |||
}, | |||
{ | |||
tm: TableMetadataToUpdate{ExpirationTime: NeverExpire}, | |||
want: &bq.Table{ | |||
NullFields: []string{"ExpirationTime"}, | |||
}, | |||
}, | |||
{ | |||
tm: TableMetadataToUpdate{TimePartitioning: &TimePartitioning{Expiration: 0}}, | |||
want: &bq.Table{ | |||
TimePartitioning: &bq.TimePartitioning{ | |||
Type: "DAY", | |||
ForceSendFields: []string{"RequirePartitionFilter"}, | |||
NullFields: []string{"ExpirationMs"}, | |||
}, | |||
}, | |||
}, | |||
{ | |||
tm: TableMetadataToUpdate{TimePartitioning: &TimePartitioning{Expiration: time.Duration(time.Hour)}}, | |||
want: &bq.Table{ | |||
TimePartitioning: &bq.TimePartitioning{ | |||
ExpirationMs: 3600000, | |||
Type: "DAY", | |||
ForceSendFields: []string{"RequirePartitionFilter"}, | |||
}, | |||
}, | |||
}, | |||
} { | |||
got, _ := test.tm.toBQ() | |||
if !testutil.Equal(got, test.want) { | |||
t.Errorf("%+v:\ngot %+v\nwant %+v", test.tm, got, test.want) | |||
} | |||
} | |||
} | |||
func TestTableMetadataToUpdateToBQErrors(t *testing.T) { | |||
// See https://godoc.org/time#Time.UnixNano | |||
start := time.Date(1677, 9, 21, 0, 12, 43, 145224192, time.UTC) | |||
end := time.Date(2262, 04, 11, 23, 47, 16, 854775807, time.UTC) | |||
for _, test := range []struct { | |||
desc string | |||
aTime time.Time | |||
wantErr bool | |||
}{ | |||
{desc: "ignored zero value", aTime: time.Time{}, wantErr: false}, | |||
{desc: "earliest valid time", aTime: start, wantErr: false}, | |||
{desc: "latested valid time", aTime: end, wantErr: false}, | |||
{desc: "invalid times before 1678", aTime: start.Add(-1), wantErr: true}, | |||
{desc: "invalid times after 2262", aTime: end.Add(1), wantErr: true}, | |||
{desc: "valid times after 1678", aTime: start.Add(1), wantErr: false}, | |||
{desc: "valid times before 2262", aTime: end.Add(-1), wantErr: false}, | |||
} { | |||
tm := &TableMetadataToUpdate{ExpirationTime: test.aTime} | |||
_, err := tm.toBQ() | |||
if test.wantErr && err == nil { | |||
t.Errorf("[%s] got no error, want error", test.desc) | |||
} | |||
if !test.wantErr && err != nil { | |||
t.Errorf("[%s] got error, want no error", test.desc) | |||
} | |||
} | |||
} |
@@ -1,892 +0,0 @@ | |||
// Copyright 2015 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package bigquery | |||
import ( | |||
"encoding/base64" | |||
"errors" | |||
"fmt" | |||
"math" | |||
"math/big" | |||
"reflect" | |||
"strconv" | |||
"strings" | |||
"time" | |||
"cloud.google.com/go/civil" | |||
bq "google.golang.org/api/bigquery/v2" | |||
) | |||
// Value stores the contents of a single cell from a BigQuery result. | |||
type Value interface{} | |||
// ValueLoader stores a slice of Values representing a result row from a Read operation. | |||
// See RowIterator.Next for more information. | |||
type ValueLoader interface { | |||
Load(v []Value, s Schema) error | |||
} | |||
// valueList converts a []Value to implement ValueLoader. | |||
type valueList []Value | |||
// Load stores a sequence of values in a valueList. | |||
// It resets the slice length to zero, then appends each value to it. | |||
func (vs *valueList) Load(v []Value, _ Schema) error { | |||
*vs = append((*vs)[:0], v...) | |||
return nil | |||
} | |||
// valueMap converts a map[string]Value to implement ValueLoader. | |||
type valueMap map[string]Value | |||
// Load stores a sequence of values in a valueMap. | |||
func (vm *valueMap) Load(v []Value, s Schema) error { | |||
if *vm == nil { | |||
*vm = map[string]Value{} | |||
} | |||
loadMap(*vm, v, s) | |||
return nil | |||
} | |||
func loadMap(m map[string]Value, vals []Value, s Schema) { | |||
for i, f := range s { | |||
val := vals[i] | |||
var v interface{} | |||
switch { | |||
case val == nil: | |||
v = val | |||
case f.Schema == nil: | |||
v = val | |||
case !f.Repeated: | |||
m2 := map[string]Value{} | |||
loadMap(m2, val.([]Value), f.Schema) | |||
v = m2 | |||
default: // repeated and nested | |||
sval := val.([]Value) | |||
vs := make([]Value, len(sval)) | |||
for j, e := range sval { | |||
m2 := map[string]Value{} | |||
loadMap(m2, e.([]Value), f.Schema) | |||
vs[j] = m2 | |||
} | |||
v = vs | |||
} | |||
m[f.Name] = v | |||
} | |||
} | |||
type structLoader struct { | |||
typ reflect.Type // type of struct | |||
err error | |||
ops []structLoaderOp | |||
vstructp reflect.Value // pointer to current struct value; changed by set | |||
} | |||
// A setFunc is a function that sets a struct field or slice/array | |||
// element to a value. | |||
type setFunc func(v reflect.Value, val interface{}) error | |||
// A structLoaderOp instructs the loader to set a struct field to a row value. | |||
type structLoaderOp struct { | |||
fieldIndex []int | |||
valueIndex int | |||
setFunc setFunc | |||
repeated bool | |||
} | |||
var errNoNulls = errors.New("bigquery: NULL values cannot be read into structs") | |||
func setAny(v reflect.Value, x interface{}) error { | |||
if x == nil { | |||
return errNoNulls | |||
} | |||
v.Set(reflect.ValueOf(x)) | |||
return nil | |||
} | |||
func setInt(v reflect.Value, x interface{}) error { | |||
if x == nil { | |||
return errNoNulls | |||
} | |||
xx := x.(int64) | |||
if v.OverflowInt(xx) { | |||
return fmt.Errorf("bigquery: value %v overflows struct field of type %v", xx, v.Type()) | |||
} | |||
v.SetInt(xx) | |||
return nil | |||
} | |||
func setUint(v reflect.Value, x interface{}) error { | |||
if x == nil { | |||
return errNoNulls | |||
} | |||
xx := x.(int64) | |||
if xx < 0 || v.OverflowUint(uint64(xx)) { | |||
return fmt.Errorf("bigquery: value %v overflows struct field of type %v", xx, v.Type()) | |||
} | |||
v.SetUint(uint64(xx)) | |||
return nil | |||
} | |||
func setFloat(v reflect.Value, x interface{}) error { | |||
if x == nil { | |||
return errNoNulls | |||
} | |||
xx := x.(float64) | |||
if v.OverflowFloat(xx) { | |||
return fmt.Errorf("bigquery: value %v overflows struct field of type %v", xx, v.Type()) | |||
} | |||
v.SetFloat(xx) | |||
return nil | |||
} | |||
func setBool(v reflect.Value, x interface{}) error { | |||
if x == nil { | |||
return errNoNulls | |||
} | |||
v.SetBool(x.(bool)) | |||
return nil | |||
} | |||
func setString(v reflect.Value, x interface{}) error { | |||
if x == nil { | |||
return errNoNulls | |||
} | |||
v.SetString(x.(string)) | |||
return nil | |||
} | |||
func setGeography(v reflect.Value, x interface{}) error { | |||
if x == nil { | |||
return errNoNulls | |||
} | |||
v.SetString(x.(string)) | |||
return nil | |||
} | |||
func setBytes(v reflect.Value, x interface{}) error { | |||
if x == nil { | |||
v.SetBytes(nil) | |||
} else { | |||
v.SetBytes(x.([]byte)) | |||
} | |||
return nil | |||
} | |||
func setNull(v reflect.Value, x interface{}, build func() interface{}) error { | |||
if x == nil { | |||
v.Set(reflect.Zero(v.Type())) | |||
} else { | |||
n := build() | |||
v.Set(reflect.ValueOf(n)) | |||
} | |||
return nil | |||
} | |||
// set remembers a value for the next call to Load. The value must be | |||
// a pointer to a struct. (This is checked in RowIterator.Next.) | |||
func (sl *structLoader) set(structp interface{}, schema Schema) error { | |||
if sl.err != nil { | |||
return sl.err | |||
} | |||
sl.vstructp = reflect.ValueOf(structp) | |||
typ := sl.vstructp.Type().Elem() | |||
if sl.typ == nil { | |||
// First call: remember the type and compile the schema. | |||
sl.typ = typ | |||
ops, err := compileToOps(typ, schema) | |||
if err != nil { | |||
sl.err = err | |||
return err | |||
} | |||
sl.ops = ops | |||
} else if sl.typ != typ { | |||
return fmt.Errorf("bigquery: struct type changed from %s to %s", sl.typ, typ) | |||
} | |||
return nil | |||
} | |||
// compileToOps produces a sequence of operations that will set the fields of a | |||
// value of structType to the contents of a row with schema. | |||
func compileToOps(structType reflect.Type, schema Schema) ([]structLoaderOp, error) { | |||
var ops []structLoaderOp | |||
fields, err := fieldCache.Fields(structType) | |||
if err != nil { | |||
return nil, err | |||
} | |||
for i, schemaField := range schema { | |||
// Look for an exported struct field with the same name as the schema | |||
// field, ignoring case (BigQuery column names are case-insensitive, | |||
// and we want to act like encoding/json anyway). | |||
structField := fields.Match(schemaField.Name) | |||
if structField == nil { | |||
// Ignore schema fields with no corresponding struct field. | |||
continue | |||
} | |||
op := structLoaderOp{ | |||
fieldIndex: structField.Index, | |||
valueIndex: i, | |||
} | |||
t := structField.Type | |||
if schemaField.Repeated { | |||
if t.Kind() != reflect.Slice && t.Kind() != reflect.Array { | |||
return nil, fmt.Errorf("bigquery: repeated schema field %s requires slice or array, but struct field %s has type %s", | |||
schemaField.Name, structField.Name, t) | |||
} | |||
t = t.Elem() | |||
op.repeated = true | |||
} | |||
if schemaField.Type == RecordFieldType { | |||
// Field can be a struct or a pointer to a struct. | |||
if t.Kind() == reflect.Ptr { | |||
t = t.Elem() | |||
} | |||
if t.Kind() != reflect.Struct { | |||
return nil, fmt.Errorf("bigquery: field %s has type %s, expected struct or *struct", | |||
structField.Name, structField.Type) | |||
} | |||
nested, err := compileToOps(t, schemaField.Schema) | |||
if err != nil { | |||
return nil, err | |||
} | |||
op.setFunc = func(v reflect.Value, val interface{}) error { | |||
return setNested(nested, v, val) | |||
} | |||
} else { | |||
op.setFunc = determineSetFunc(t, schemaField.Type) | |||
if op.setFunc == nil { | |||
return nil, fmt.Errorf("bigquery: schema field %s of type %s is not assignable to struct field %s of type %s", | |||
schemaField.Name, schemaField.Type, structField.Name, t) | |||
} | |||
} | |||
ops = append(ops, op) | |||
} | |||
return ops, nil | |||
} | |||
// determineSetFunc chooses the best function for setting a field of type ftype | |||
// to a value whose schema field type is stype. It returns nil if stype | |||
// is not assignable to ftype. | |||
// determineSetFunc considers only basic types. See compileToOps for | |||
// handling of repetition and nesting. | |||
func determineSetFunc(ftype reflect.Type, stype FieldType) setFunc { | |||
switch stype { | |||
case StringFieldType: | |||
if ftype.Kind() == reflect.String { | |||
return setString | |||
} | |||
if ftype == typeOfNullString { | |||
return func(v reflect.Value, x interface{}) error { | |||
return setNull(v, x, func() interface{} { | |||
return NullString{StringVal: x.(string), Valid: true} | |||
}) | |||
} | |||
} | |||
case GeographyFieldType: | |||
if ftype.Kind() == reflect.String { | |||
return setGeography | |||
} | |||
if ftype == typeOfNullGeography { | |||
return func(v reflect.Value, x interface{}) error { | |||
return setNull(v, x, func() interface{} { | |||
return NullGeography{GeographyVal: x.(string), Valid: true} | |||
}) | |||
} | |||
} | |||
case BytesFieldType: | |||
if ftype == typeOfByteSlice { | |||
return setBytes | |||
} | |||
case IntegerFieldType: | |||
if isSupportedUintType(ftype) { | |||
return setUint | |||
} else if isSupportedIntType(ftype) { | |||
return setInt | |||
} | |||
if ftype == typeOfNullInt64 { | |||
return func(v reflect.Value, x interface{}) error { | |||
return setNull(v, x, func() interface{} { | |||
return NullInt64{Int64: x.(int64), Valid: true} | |||
}) | |||
} | |||
} | |||
case FloatFieldType: | |||
switch ftype.Kind() { | |||
case reflect.Float32, reflect.Float64: | |||
return setFloat | |||
} | |||
if ftype == typeOfNullFloat64 { | |||
return func(v reflect.Value, x interface{}) error { | |||
return setNull(v, x, func() interface{} { | |||
return NullFloat64{Float64: x.(float64), Valid: true} | |||
}) | |||
} | |||
} | |||
case BooleanFieldType: | |||
if ftype.Kind() == reflect.Bool { | |||
return setBool | |||
} | |||
if ftype == typeOfNullBool { | |||
return func(v reflect.Value, x interface{}) error { | |||
return setNull(v, x, func() interface{} { | |||
return NullBool{Bool: x.(bool), Valid: true} | |||
}) | |||
} | |||
} | |||
case TimestampFieldType: | |||
if ftype == typeOfGoTime { | |||
return setAny | |||
} | |||
if ftype == typeOfNullTimestamp { | |||
return func(v reflect.Value, x interface{}) error { | |||
return setNull(v, x, func() interface{} { | |||
return NullTimestamp{Timestamp: x.(time.Time), Valid: true} | |||
}) | |||
} | |||
} | |||
case DateFieldType: | |||
if ftype == typeOfDate { | |||
return setAny | |||
} | |||
if ftype == typeOfNullDate { | |||
return func(v reflect.Value, x interface{}) error { | |||
return setNull(v, x, func() interface{} { | |||
return NullDate{Date: x.(civil.Date), Valid: true} | |||
}) | |||
} | |||
} | |||
case TimeFieldType: | |||
if ftype == typeOfTime { | |||
return setAny | |||
} | |||
if ftype == typeOfNullTime { | |||
return func(v reflect.Value, x interface{}) error { | |||
return setNull(v, x, func() interface{} { | |||
return NullTime{Time: x.(civil.Time), Valid: true} | |||
}) | |||
} | |||
} | |||
case DateTimeFieldType: | |||
if ftype == typeOfDateTime { | |||
return setAny | |||
} | |||
if ftype == typeOfNullDateTime { | |||
return func(v reflect.Value, x interface{}) error { | |||
return setNull(v, x, func() interface{} { | |||
return NullDateTime{DateTime: x.(civil.DateTime), Valid: true} | |||
}) | |||
} | |||
} | |||
case NumericFieldType: | |||
if ftype == typeOfRat { | |||
return func(v reflect.Value, x interface{}) error { | |||
return setNull(v, x, func() interface{} { return x.(*big.Rat) }) | |||
} | |||
} | |||
} | |||
return nil | |||
} | |||
func (sl *structLoader) Load(values []Value, _ Schema) error { | |||
if sl.err != nil { | |||
return sl.err | |||
} | |||
return runOps(sl.ops, sl.vstructp.Elem(), values) | |||
} | |||
// runOps executes a sequence of ops, setting the fields of vstruct to the | |||
// supplied values. | |||
func runOps(ops []structLoaderOp, vstruct reflect.Value, values []Value) error { | |||
for _, op := range ops { | |||
field := vstruct.FieldByIndex(op.fieldIndex) | |||
var err error | |||
if op.repeated { | |||
err = setRepeated(field, values[op.valueIndex].([]Value), op.setFunc) | |||
} else { | |||
err = op.setFunc(field, values[op.valueIndex]) | |||
} | |||
if err != nil { | |||
return err | |||
} | |||
} | |||
return nil | |||
} | |||
func setNested(ops []structLoaderOp, v reflect.Value, val interface{}) error { | |||
// v is either a struct or a pointer to a struct. | |||
if v.Kind() == reflect.Ptr { | |||
// If the value is nil, set the pointer to nil. | |||
if val == nil { | |||
v.Set(reflect.Zero(v.Type())) | |||
return nil | |||
} | |||
// If the pointer is nil, set it to a zero struct value. | |||
if v.IsNil() { | |||
v.Set(reflect.New(v.Type().Elem())) | |||
} | |||
v = v.Elem() | |||
} | |||
return runOps(ops, v, val.([]Value)) | |||
} | |||
func setRepeated(field reflect.Value, vslice []Value, setElem setFunc) error { | |||
vlen := len(vslice) | |||
var flen int | |||
switch field.Type().Kind() { | |||
case reflect.Slice: | |||
// Make a slice of the right size, avoiding allocation if possible. | |||
switch { | |||
case field.Len() < vlen: | |||
field.Set(reflect.MakeSlice(field.Type(), vlen, vlen)) | |||
case field.Len() > vlen: | |||
field.SetLen(vlen) | |||
} | |||
flen = vlen | |||
case reflect.Array: | |||
flen = field.Len() | |||
if flen > vlen { | |||
// Set extra elements to their zero value. | |||
z := reflect.Zero(field.Type().Elem()) | |||
for i := vlen; i < flen; i++ { | |||
field.Index(i).Set(z) | |||
} | |||
} | |||
default: | |||
return fmt.Errorf("bigquery: impossible field type %s", field.Type()) | |||
} | |||
for i, val := range vslice { | |||
if i < flen { // avoid writing past the end of a short array | |||
if err := setElem(field.Index(i), val); err != nil { | |||
return err | |||
} | |||
} | |||
} | |||
return nil | |||
} | |||
// A ValueSaver returns a row of data to be inserted into a table. | |||
type ValueSaver interface { | |||
// Save returns a row to be inserted into a BigQuery table, represented | |||
// as a map from field name to Value. | |||
// If insertID is non-empty, BigQuery will use it to de-duplicate | |||
// insertions of this row on a best-effort basis. | |||
Save() (row map[string]Value, insertID string, err error) | |||
} | |||
// ValuesSaver implements ValueSaver for a slice of Values. | |||
type ValuesSaver struct { | |||
Schema Schema | |||
// If non-empty, BigQuery will use InsertID to de-duplicate insertions | |||
// of this row on a best-effort basis. | |||
InsertID string | |||
Row []Value | |||
} | |||
// Save implements ValueSaver. | |||
func (vls *ValuesSaver) Save() (map[string]Value, string, error) { | |||
m, err := valuesToMap(vls.Row, vls.Schema) | |||
return m, vls.InsertID, err | |||
} | |||
func valuesToMap(vs []Value, schema Schema) (map[string]Value, error) { | |||
if len(vs) != len(schema) { | |||
return nil, errors.New("schema does not match length of row to be inserted") | |||
} | |||
m := make(map[string]Value) | |||
for i, fieldSchema := range schema { | |||
if vs[i] == nil { | |||
m[fieldSchema.Name] = nil | |||
continue | |||
} | |||
if fieldSchema.Type != RecordFieldType { | |||
m[fieldSchema.Name] = toUploadValue(vs[i], fieldSchema) | |||
continue | |||
} | |||
// Nested record, possibly repeated. | |||
vals, ok := vs[i].([]Value) | |||
if !ok { | |||
return nil, errors.New("nested record is not a []Value") | |||
} | |||
if !fieldSchema.Repeated { | |||
value, err := valuesToMap(vals, fieldSchema.Schema) | |||
if err != nil { | |||
return nil, err | |||
} | |||
m[fieldSchema.Name] = value | |||
continue | |||
} | |||
// A repeated nested field is converted into a slice of maps. | |||
var maps []Value | |||
for _, v := range vals { | |||
sv, ok := v.([]Value) | |||
if !ok { | |||
return nil, errors.New("nested record in slice is not a []Value") | |||
} | |||
value, err := valuesToMap(sv, fieldSchema.Schema) | |||
if err != nil { | |||
return nil, err | |||
} | |||
maps = append(maps, value) | |||
} | |||
m[fieldSchema.Name] = maps | |||
} | |||
return m, nil | |||
} | |||
// StructSaver implements ValueSaver for a struct. | |||
// The struct is converted to a map of values by using the values of struct | |||
// fields corresponding to schema fields. Additional and missing | |||
// fields are ignored, as are nested struct pointers that are nil. | |||
type StructSaver struct { | |||
// Schema determines what fields of the struct are uploaded. It should | |||
// match the table's schema. | |||
// Schema is optional for StructSavers that are passed to Uploader.Put. | |||
Schema Schema | |||
// If non-empty, BigQuery will use InsertID to de-duplicate insertions | |||
// of this row on a best-effort basis. | |||
InsertID string | |||
// Struct should be a struct or a pointer to a struct. | |||
Struct interface{} | |||
} | |||
// Save implements ValueSaver. | |||
func (ss *StructSaver) Save() (row map[string]Value, insertID string, err error) { | |||
vstruct := reflect.ValueOf(ss.Struct) | |||
row, err = structToMap(vstruct, ss.Schema) | |||
if err != nil { | |||
return nil, "", err | |||
} | |||
return row, ss.InsertID, nil | |||
} | |||
func structToMap(vstruct reflect.Value, schema Schema) (map[string]Value, error) { | |||
if vstruct.Kind() == reflect.Ptr { | |||
vstruct = vstruct.Elem() | |||
} | |||
if !vstruct.IsValid() { | |||
return nil, nil | |||
} | |||
m := map[string]Value{} | |||
if vstruct.Kind() != reflect.Struct { | |||
return nil, fmt.Errorf("bigquery: type is %s, need struct or struct pointer", vstruct.Type()) | |||
} | |||
fields, err := fieldCache.Fields(vstruct.Type()) | |||
if err != nil { | |||
return nil, err | |||
} | |||
for _, schemaField := range schema { | |||
// Look for an exported struct field with the same name as the schema | |||
// field, ignoring case. | |||
structField := fields.Match(schemaField.Name) | |||
if structField == nil { | |||
continue | |||
} | |||
val, err := structFieldToUploadValue(vstruct.FieldByIndex(structField.Index), schemaField) | |||
if err != nil { | |||
return nil, err | |||
} | |||
// Add the value to the map, unless it is nil. | |||
if val != nil { | |||
m[schemaField.Name] = val | |||
} | |||
} | |||
return m, nil | |||
} | |||
// structFieldToUploadValue converts a struct field to a value suitable for ValueSaver.Save, using | |||
// the schemaField as a guide. | |||
// structFieldToUploadValue is careful to return a true nil interface{} when needed, so its | |||
// caller can easily identify a nil value. | |||
func structFieldToUploadValue(vfield reflect.Value, schemaField *FieldSchema) (interface{}, error) { | |||
if schemaField.Repeated && (vfield.Kind() != reflect.Slice && vfield.Kind() != reflect.Array) { | |||
return nil, fmt.Errorf("bigquery: repeated schema field %s requires slice or array, but value has type %s", | |||
schemaField.Name, vfield.Type()) | |||
} | |||
// A non-nested field can be represented by its Go value, except for some types. | |||
if schemaField.Type != RecordFieldType { | |||
return toUploadValueReflect(vfield, schemaField), nil | |||
} | |||
// A non-repeated nested field is converted into a map[string]Value. | |||
if !schemaField.Repeated { | |||
m, err := structToMap(vfield, schemaField.Schema) | |||
if err != nil { | |||
return nil, err | |||
} | |||
if m == nil { | |||
return nil, nil | |||
} | |||
return m, nil | |||
} | |||
// A repeated nested field is converted into a slice of maps. | |||
// If the field is zero-length (but not nil), we return a zero-length []Value. | |||
if vfield.IsNil() { | |||
return nil, nil | |||
} | |||
vals := []Value{} | |||
for i := 0; i < vfield.Len(); i++ { | |||
m, err := structToMap(vfield.Index(i), schemaField.Schema) | |||
if err != nil { | |||
return nil, err | |||
} | |||
vals = append(vals, m) | |||
} | |||
return vals, nil | |||
} | |||
func toUploadValue(val interface{}, fs *FieldSchema) interface{} { | |||
if fs.Type == TimeFieldType || fs.Type == DateTimeFieldType || fs.Type == NumericFieldType { | |||
return toUploadValueReflect(reflect.ValueOf(val), fs) | |||
} | |||
return val | |||
} | |||
func toUploadValueReflect(v reflect.Value, fs *FieldSchema) interface{} { | |||
switch fs.Type { | |||
case TimeFieldType: | |||
if v.Type() == typeOfNullTime { | |||
return v.Interface() | |||
} | |||
return formatUploadValue(v, fs, func(v reflect.Value) string { | |||
return CivilTimeString(v.Interface().(civil.Time)) | |||
}) | |||
case DateTimeFieldType: | |||
if v.Type() == typeOfNullDateTime { | |||
return v.Interface() | |||
} | |||
return formatUploadValue(v, fs, func(v reflect.Value) string { | |||
return CivilDateTimeString(v.Interface().(civil.DateTime)) | |||
}) | |||
case NumericFieldType: | |||
if r, ok := v.Interface().(*big.Rat); ok && r == nil { | |||
return nil | |||
} | |||
return formatUploadValue(v, fs, func(v reflect.Value) string { | |||
return NumericString(v.Interface().(*big.Rat)) | |||
}) | |||
default: | |||
if !fs.Repeated || v.Len() > 0 { | |||
return v.Interface() | |||
} | |||
// The service treats a null repeated field as an error. Return | |||
// nil to omit the field entirely. | |||
return nil | |||
} | |||
} | |||
func formatUploadValue(v reflect.Value, fs *FieldSchema, cvt func(reflect.Value) string) interface{} { | |||
if !fs.Repeated { | |||
return cvt(v) | |||
} | |||
if v.Len() == 0 { | |||
return nil | |||
} | |||
s := make([]string, v.Len()) | |||
for i := 0; i < v.Len(); i++ { | |||
s[i] = cvt(v.Index(i)) | |||
} | |||
return s | |||
} | |||
// CivilTimeString returns a string representing a civil.Time in a format compatible | |||
// with BigQuery SQL. It rounds the time to the nearest microsecond and returns a | |||
// string with six digits of sub-second precision. | |||
// | |||
// Use CivilTimeString when using civil.Time in DML, for example in INSERT | |||
// statements. | |||
func CivilTimeString(t civil.Time) string { | |||
if t.Nanosecond == 0 { | |||
return t.String() | |||
} | |||
micro := (t.Nanosecond + 500) / 1000 // round to nearest microsecond | |||
t.Nanosecond = 0 | |||
return t.String() + fmt.Sprintf(".%06d", micro) | |||
} | |||
// CivilDateTimeString returns a string representing a civil.DateTime in a format compatible | |||
// with BigQuery SQL. It separate the date and time with a space, and formats the time | |||
// with CivilTimeString. | |||
// | |||
// Use CivilDateTimeString when using civil.DateTime in DML, for example in INSERT | |||
// statements. | |||
func CivilDateTimeString(dt civil.DateTime) string { | |||
return dt.Date.String() + " " + CivilTimeString(dt.Time) | |||
} | |||
// parseCivilDateTime parses a date-time represented in a BigQuery SQL | |||
// compatible format and returns a civil.DateTime. | |||
func parseCivilDateTime(s string) (civil.DateTime, error) { | |||
parts := strings.Fields(s) | |||
if len(parts) != 2 { | |||
return civil.DateTime{}, fmt.Errorf("bigquery: bad DATETIME value %q", s) | |||
} | |||
return civil.ParseDateTime(parts[0] + "T" + parts[1]) | |||
} | |||
const ( | |||
// NumericPrecisionDigits is the maximum number of digits in a NUMERIC value. | |||
NumericPrecisionDigits = 38 | |||
// NumericScaleDigits is the maximum number of digits after the decimal point in a NUMERIC value. | |||
NumericScaleDigits = 9 | |||
) | |||
// NumericString returns a string representing a *big.Rat in a format compatible | |||
// with BigQuery SQL. It returns a floating-point literal with 9 digits | |||
// after the decimal point. | |||
func NumericString(r *big.Rat) string { | |||
return r.FloatString(NumericScaleDigits) | |||
} | |||
// convertRows converts a series of TableRows into a series of Value slices. | |||
// schema is used to interpret the data from rows; its length must match the | |||
// length of each row. | |||
func convertRows(rows []*bq.TableRow, schema Schema) ([][]Value, error) { | |||
var rs [][]Value | |||
for _, r := range rows { | |||
row, err := convertRow(r, schema) | |||
if err != nil { | |||
return nil, err | |||
} | |||
rs = append(rs, row) | |||
} | |||
return rs, nil | |||
} | |||
func convertRow(r *bq.TableRow, schema Schema) ([]Value, error) { | |||
if len(schema) != len(r.F) { | |||
return nil, errors.New("schema length does not match row length") | |||
} | |||
var values []Value | |||
for i, cell := range r.F { | |||
fs := schema[i] | |||
v, err := convertValue(cell.V, fs.Type, fs.Schema) | |||
if err != nil { | |||
return nil, err | |||
} | |||
values = append(values, v) | |||
} | |||
return values, nil | |||
} | |||
func convertValue(val interface{}, typ FieldType, schema Schema) (Value, error) { | |||
switch val := val.(type) { | |||
case nil: | |||
return nil, nil | |||
case []interface{}: | |||
return convertRepeatedRecord(val, typ, schema) | |||
case map[string]interface{}: | |||
return convertNestedRecord(val, schema) | |||
case string: | |||
return convertBasicType(val, typ) | |||
default: | |||
return nil, fmt.Errorf("got value %v; expected a value of type %s", val, typ) | |||
} | |||
} | |||
func convertRepeatedRecord(vals []interface{}, typ FieldType, schema Schema) (Value, error) { | |||
var values []Value | |||
for _, cell := range vals { | |||
// each cell contains a single entry, keyed by "v" | |||
val := cell.(map[string]interface{})["v"] | |||
v, err := convertValue(val, typ, schema) | |||
if err != nil { | |||
return nil, err | |||
} | |||
values = append(values, v) | |||
} | |||
return values, nil | |||
} | |||
func convertNestedRecord(val map[string]interface{}, schema Schema) (Value, error) { | |||
// convertNestedRecord is similar to convertRow, as a record has the same structure as a row. | |||
// Nested records are wrapped in a map with a single key, "f". | |||
record := val["f"].([]interface{}) | |||
if len(record) != len(schema) { | |||
return nil, errors.New("schema length does not match record length") | |||
} | |||
var values []Value | |||
for i, cell := range record { | |||
// each cell contains a single entry, keyed by "v" | |||
val := cell.(map[string]interface{})["v"] | |||
fs := schema[i] | |||
v, err := convertValue(val, fs.Type, fs.Schema) | |||
if err != nil { | |||
return nil, err | |||
} | |||
values = append(values, v) | |||
} | |||
return values, nil | |||
} | |||
// convertBasicType returns val as an interface with a concrete type specified by typ. | |||
func convertBasicType(val string, typ FieldType) (Value, error) { | |||
switch typ { | |||
case StringFieldType: | |||
return val, nil | |||
case BytesFieldType: | |||
return base64.StdEncoding.DecodeString(val) | |||
case IntegerFieldType: | |||
return strconv.ParseInt(val, 10, 64) | |||
case FloatFieldType: | |||
return strconv.ParseFloat(val, 64) | |||
case BooleanFieldType: | |||
return strconv.ParseBool(val) | |||
case TimestampFieldType: | |||
f, err := strconv.ParseFloat(val, 64) | |||
if err != nil { | |||
return nil, err | |||
} | |||
secs := math.Trunc(f) | |||
nanos := (f - secs) * 1e9 | |||
return Value(time.Unix(int64(secs), int64(nanos)).UTC()), nil | |||
case DateFieldType: | |||
return civil.ParseDate(val) | |||
case TimeFieldType: | |||
return civil.ParseTime(val) | |||
case DateTimeFieldType: | |||
return civil.ParseDateTime(val) | |||
case NumericFieldType: | |||
r, ok := (&big.Rat{}).SetString(val) | |||
if !ok { | |||
return nil, fmt.Errorf("bigquery: invalid NUMERIC value %q", val) | |||
} | |||
return Value(r), nil | |||
case GeographyFieldType: | |||
return val, nil | |||
default: | |||
return nil, fmt.Errorf("unrecognized type: %s", typ) | |||
} | |||
} |
@@ -1,578 +0,0 @@ | |||
// Copyright 2015 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package bigtable | |||
import ( | |||
"context" | |||
"fmt" | |||
"math" | |||
"sort" | |||
"strings" | |||
"testing" | |||
"time" | |||
"cloud.google.com/go/internal/testutil" | |||
"github.com/golang/protobuf/proto" | |||
"google.golang.org/api/iterator" | |||
btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2" | |||
) | |||
func TestAdminIntegration(t *testing.T) { | |||
testEnv, err := NewIntegrationEnv() | |||
if err != nil { | |||
t.Fatalf("IntegrationEnv: %v", err) | |||
} | |||
defer testEnv.Close() | |||
timeout := 2 * time.Second | |||
if testEnv.Config().UseProd { | |||
timeout = 5 * time.Minute | |||
} | |||
ctx, _ := context.WithTimeout(context.Background(), timeout) | |||
adminClient, err := testEnv.NewAdminClient() | |||
if err != nil { | |||
t.Fatalf("NewAdminClient: %v", err) | |||
} | |||
defer adminClient.Close() | |||
iAdminClient, err := testEnv.NewInstanceAdminClient() | |||
if err != nil { | |||
t.Fatalf("NewInstanceAdminClient: %v", err) | |||
} | |||
if iAdminClient != nil { | |||
defer iAdminClient.Close() | |||
iInfo, err := iAdminClient.InstanceInfo(ctx, adminClient.instance) | |||
if err != nil { | |||
t.Errorf("InstanceInfo: %v", err) | |||
} | |||
if iInfo.Name != adminClient.instance { | |||
t.Errorf("InstanceInfo returned name %#v, want %#v", iInfo.Name, adminClient.instance) | |||
} | |||
} | |||
list := func() []string { | |||
tbls, err := adminClient.Tables(ctx) | |||
if err != nil { | |||
t.Fatalf("Fetching list of tables: %v", err) | |||
} | |||
sort.Strings(tbls) | |||
return tbls | |||
} | |||
containsAll := func(got, want []string) bool { | |||
gotSet := make(map[string]bool) | |||
for _, s := range got { | |||
gotSet[s] = true | |||
} | |||
for _, s := range want { | |||
if !gotSet[s] { | |||
return false | |||
} | |||
} | |||
return true | |||
} | |||
defer adminClient.DeleteTable(ctx, "mytable") | |||
if err := adminClient.CreateTable(ctx, "mytable"); err != nil { | |||
t.Fatalf("Creating table: %v", err) | |||
} | |||
defer adminClient.DeleteTable(ctx, "myothertable") | |||
if err := adminClient.CreateTable(ctx, "myothertable"); err != nil { | |||
t.Fatalf("Creating table: %v", err) | |||
} | |||
if got, want := list(), []string{"myothertable", "mytable"}; !containsAll(got, want) { | |||
t.Errorf("adminClient.Tables returned %#v, want %#v", got, want) | |||
} | |||
must(adminClient.WaitForReplication(ctx, "mytable")) | |||
if err := adminClient.DeleteTable(ctx, "myothertable"); err != nil { | |||
t.Fatalf("Deleting table: %v", err) | |||
} | |||
tables := list() | |||
if got, want := tables, []string{"mytable"}; !containsAll(got, want) { | |||
t.Errorf("adminClient.Tables returned %#v, want %#v", got, want) | |||
} | |||
if got, unwanted := tables, []string{"myothertable"}; containsAll(got, unwanted) { | |||
t.Errorf("adminClient.Tables return %#v. unwanted %#v", got, unwanted) | |||
} | |||
tblConf := TableConf{ | |||
TableID: "conftable", | |||
Families: map[string]GCPolicy{ | |||
"fam1": MaxVersionsPolicy(1), | |||
"fam2": MaxVersionsPolicy(2), | |||
}, | |||
} | |||
if err := adminClient.CreateTableFromConf(ctx, &tblConf); err != nil { | |||
t.Fatalf("Creating table from TableConf: %v", err) | |||
} | |||
defer adminClient.DeleteTable(ctx, tblConf.TableID) | |||
tblInfo, err := adminClient.TableInfo(ctx, tblConf.TableID) | |||
if err != nil { | |||
t.Fatalf("Getting table info: %v", err) | |||
} | |||
sort.Strings(tblInfo.Families) | |||
wantFams := []string{"fam1", "fam2"} | |||
if !testutil.Equal(tblInfo.Families, wantFams) { | |||
t.Errorf("Column family mismatch, got %v, want %v", tblInfo.Families, wantFams) | |||
} | |||
// Populate mytable and drop row ranges | |||
if err = adminClient.CreateColumnFamily(ctx, "mytable", "cf"); err != nil { | |||
t.Fatalf("Creating column family: %v", err) | |||
} | |||
client, err := testEnv.NewClient() | |||
if err != nil { | |||
t.Fatalf("NewClient: %v", err) | |||
} | |||
defer client.Close() | |||
tbl := client.Open("mytable") | |||
prefixes := []string{"a", "b", "c"} | |||
for _, prefix := range prefixes { | |||
for i := 0; i < 5; i++ { | |||
mut := NewMutation() | |||
mut.Set("cf", "col", 1000, []byte("1")) | |||
if err := tbl.Apply(ctx, fmt.Sprintf("%v-%v", prefix, i), mut); err != nil { | |||
t.Fatalf("Mutating row: %v", err) | |||
} | |||
} | |||
} | |||
if err = adminClient.DropRowRange(ctx, "mytable", "a"); err != nil { | |||
t.Errorf("DropRowRange a: %v", err) | |||
} | |||
if err = adminClient.DropRowRange(ctx, "mytable", "c"); err != nil { | |||
t.Errorf("DropRowRange c: %v", err) | |||
} | |||
if err = adminClient.DropRowRange(ctx, "mytable", "x"); err != nil { | |||
t.Errorf("DropRowRange x: %v", err) | |||
} | |||
var gotRowCount int | |||
must(tbl.ReadRows(ctx, RowRange{}, func(row Row) bool { | |||
gotRowCount++ | |||
if !strings.HasPrefix(row.Key(), "b") { | |||
t.Errorf("Invalid row after dropping range: %v", row) | |||
} | |||
return true | |||
})) | |||
if gotRowCount != 5 { | |||
t.Errorf("Invalid row count after dropping range: got %v, want %v", gotRowCount, 5) | |||
} | |||
} | |||
func TestInstanceUpdate(t *testing.T) { | |||
testEnv, err := NewIntegrationEnv() | |||
if err != nil { | |||
t.Fatalf("IntegrationEnv: %v", err) | |||
} | |||
defer testEnv.Close() | |||
timeout := 2 * time.Second | |||
if testEnv.Config().UseProd { | |||
timeout = 5 * time.Minute | |||
} | |||
ctx, cancel := context.WithTimeout(context.Background(), timeout) | |||
defer cancel() | |||
adminClient, err := testEnv.NewAdminClient() | |||
if err != nil { | |||
t.Fatalf("NewAdminClient: %v", err) | |||
} | |||
defer adminClient.Close() | |||
iAdminClient, err := testEnv.NewInstanceAdminClient() | |||
if err != nil { | |||
t.Fatalf("NewInstanceAdminClient: %v", err) | |||
} | |||
if iAdminClient == nil { | |||
return | |||
} | |||
defer iAdminClient.Close() | |||
iInfo, err := iAdminClient.InstanceInfo(ctx, adminClient.instance) | |||
if err != nil { | |||
t.Errorf("InstanceInfo: %v", err) | |||
} | |||
if iInfo.Name != adminClient.instance { | |||
t.Errorf("InstanceInfo returned name %#v, want %#v", iInfo.Name, adminClient.instance) | |||
} | |||
if iInfo.DisplayName != adminClient.instance { | |||
t.Errorf("InstanceInfo returned name %#v, want %#v", iInfo.Name, adminClient.instance) | |||
} | |||
const numNodes = 4 | |||
// update cluster nodes | |||
if err := iAdminClient.UpdateCluster(ctx, adminClient.instance, testEnv.Config().Cluster, int32(numNodes)); err != nil { | |||
t.Errorf("UpdateCluster: %v", err) | |||
} | |||
// get cluster after updating | |||
cis, err := iAdminClient.GetCluster(ctx, adminClient.instance, testEnv.Config().Cluster) | |||
if err != nil { | |||
t.Errorf("GetCluster %v", err) | |||
} | |||
if cis.ServeNodes != int(numNodes) { | |||
t.Errorf("ServeNodes returned %d, want %d", cis.ServeNodes, int(numNodes)) | |||
} | |||
} | |||
func TestAdminSnapshotIntegration(t *testing.T) { | |||
testEnv, err := NewIntegrationEnv() | |||
if err != nil { | |||
t.Fatalf("IntegrationEnv: %v", err) | |||
} | |||
defer testEnv.Close() | |||
if !testEnv.Config().UseProd { | |||
t.Skip("emulator doesn't support snapshots") | |||
} | |||
timeout := 2 * time.Second | |||
if testEnv.Config().UseProd { | |||
timeout = 5 * time.Minute | |||
} | |||
ctx, _ := context.WithTimeout(context.Background(), timeout) | |||
adminClient, err := testEnv.NewAdminClient() | |||
if err != nil { | |||
t.Fatalf("NewAdminClient: %v", err) | |||
} | |||
defer adminClient.Close() | |||
table := testEnv.Config().Table | |||
cluster := testEnv.Config().Cluster | |||
list := func(cluster string) ([]*SnapshotInfo, error) { | |||
infos := []*SnapshotInfo(nil) | |||
it := adminClient.Snapshots(ctx, cluster) | |||
for { | |||
s, err := it.Next() | |||
if err == iterator.Done { | |||
break | |||
} | |||
if err != nil { | |||
return nil, err | |||
} | |||
infos = append(infos, s) | |||
} | |||
return infos, err | |||
} | |||
// Delete the table at the end of the test. Schedule ahead of time | |||
// in case the client fails | |||
defer adminClient.DeleteTable(ctx, table) | |||
if err := adminClient.CreateTable(ctx, table); err != nil { | |||
t.Fatalf("Creating table: %v", err) | |||
} | |||
// Precondition: no snapshots | |||
snapshots, err := list(cluster) | |||
if err != nil { | |||
t.Fatalf("Initial snapshot list: %v", err) | |||
} | |||
if got, want := len(snapshots), 0; got != want { | |||
t.Fatalf("Initial snapshot list len: %d, want: %d", got, want) | |||
} | |||
// Create snapshot | |||
defer adminClient.DeleteSnapshot(ctx, cluster, "mysnapshot") | |||
if err = adminClient.SnapshotTable(ctx, table, cluster, "mysnapshot", 5*time.Hour); err != nil { | |||
t.Fatalf("Creating snaphot: %v", err) | |||
} | |||
// List snapshot | |||
snapshots, err = list(cluster) | |||
if err != nil { | |||
t.Fatalf("Listing snapshots: %v", err) | |||
} | |||
if got, want := len(snapshots), 1; got != want { | |||
t.Fatalf("Listing snapshot count: %d, want: %d", got, want) | |||
} | |||
if got, want := snapshots[0].Name, "mysnapshot"; got != want { | |||
t.Fatalf("Snapshot name: %s, want: %s", got, want) | |||
} | |||
if got, want := snapshots[0].SourceTable, table; got != want { | |||
t.Fatalf("Snapshot SourceTable: %s, want: %s", got, want) | |||
} | |||
if got, want := snapshots[0].DeleteTime, snapshots[0].CreateTime.Add(5*time.Hour); math.Abs(got.Sub(want).Minutes()) > 1 { | |||
t.Fatalf("Snapshot DeleteTime: %s, want: %s", got, want) | |||
} | |||
// Get snapshot | |||
snapshot, err := adminClient.SnapshotInfo(ctx, cluster, "mysnapshot") | |||
if err != nil { | |||
t.Fatalf("SnapshotInfo: %v", snapshot) | |||
} | |||
if got, want := *snapshot, *snapshots[0]; got != want { | |||
t.Fatalf("SnapshotInfo: %v, want: %v", got, want) | |||
} | |||
// Restore | |||
restoredTable := table + "-restored" | |||
defer adminClient.DeleteTable(ctx, restoredTable) | |||
if err = adminClient.CreateTableFromSnapshot(ctx, restoredTable, cluster, "mysnapshot"); err != nil { | |||
t.Fatalf("CreateTableFromSnapshot: %v", err) | |||
} | |||
if _, err := adminClient.TableInfo(ctx, restoredTable); err != nil { | |||
t.Fatalf("Restored TableInfo: %v", err) | |||
} | |||
// Delete snapshot | |||
if err = adminClient.DeleteSnapshot(ctx, cluster, "mysnapshot"); err != nil { | |||
t.Fatalf("DeleteSnapshot: %v", err) | |||
} | |||
snapshots, err = list(cluster) | |||
if err != nil { | |||
t.Fatalf("List after Delete: %v", err) | |||
} | |||
if got, want := len(snapshots), 0; got != want { | |||
t.Fatalf("List after delete len: %d, want: %d", got, want) | |||
} | |||
} | |||
func TestGranularity(t *testing.T) { | |||
testEnv, err := NewIntegrationEnv() | |||
if err != nil { | |||
t.Fatalf("IntegrationEnv: %v", err) | |||
} | |||
defer testEnv.Close() | |||
timeout := 2 * time.Second | |||
if testEnv.Config().UseProd { | |||
timeout = 5 * time.Minute | |||
} | |||
ctx, _ := context.WithTimeout(context.Background(), timeout) | |||
adminClient, err := testEnv.NewAdminClient() | |||
if err != nil { | |||
t.Fatalf("NewAdminClient: %v", err) | |||
} | |||
defer adminClient.Close() | |||
list := func() []string { | |||
tbls, err := adminClient.Tables(ctx) | |||
if err != nil { | |||
t.Fatalf("Fetching list of tables: %v", err) | |||
} | |||
sort.Strings(tbls) | |||
return tbls | |||
} | |||
containsAll := func(got, want []string) bool { | |||
gotSet := make(map[string]bool) | |||
for _, s := range got { | |||
gotSet[s] = true | |||
} | |||
for _, s := range want { | |||
if !gotSet[s] { | |||
return false | |||
} | |||
} | |||
return true | |||
} | |||
defer adminClient.DeleteTable(ctx, "mytable") | |||
if err := adminClient.CreateTable(ctx, "mytable"); err != nil { | |||
t.Fatalf("Creating table: %v", err) | |||
} | |||
tables := list() | |||
if got, want := tables, []string{"mytable"}; !containsAll(got, want) { | |||
t.Errorf("adminClient.Tables returned %#v, want %#v", got, want) | |||
} | |||
// calling ModifyColumnFamilies to check the granularity of table | |||
prefix := adminClient.instancePrefix() | |||
req := &btapb.ModifyColumnFamiliesRequest{ | |||
Name: prefix + "/tables/" + "mytable", | |||
Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{ | |||
Id: "cf", | |||
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{&btapb.ColumnFamily{}}, | |||
}}, | |||
} | |||
table, err := adminClient.tClient.ModifyColumnFamilies(ctx, req) | |||
if err != nil { | |||
t.Fatalf("Creating column family: %v", err) | |||
} | |||
if table.Granularity != btapb.Table_TimestampGranularity(btapb.Table_MILLIS) { | |||
t.Errorf("ModifyColumnFamilies returned granularity %#v, want %#v", table.Granularity, btapb.Table_TimestampGranularity(btapb.Table_MILLIS)) | |||
} | |||
} | |||
func TestInstanceAdminClient_AppProfile(t *testing.T) { | |||
testEnv, err := NewIntegrationEnv() | |||
if err != nil { | |||
t.Fatalf("IntegrationEnv: %v", err) | |||
} | |||
defer testEnv.Close() | |||
timeout := 2 * time.Second | |||
if testEnv.Config().UseProd { | |||
timeout = 5 * time.Minute | |||
} | |||
ctx, cancel := context.WithTimeout(context.Background(), timeout) | |||
defer cancel() | |||
adminClient, err := testEnv.NewAdminClient() | |||
if err != nil { | |||
t.Fatalf("NewAdminClient: %v", err) | |||
} | |||
defer adminClient.Close() | |||
iAdminClient, err := testEnv.NewInstanceAdminClient() | |||
if err != nil { | |||
t.Fatalf("NewInstanceAdminClient: %v", err) | |||
} | |||
if iAdminClient == nil { | |||
return | |||
} | |||
defer iAdminClient.Close() | |||
profile := ProfileConf{ | |||
ProfileID: "app_profile1", | |||
InstanceID: adminClient.instance, | |||
ClusterID: testEnv.Config().Cluster, | |||
Description: "creating new app profile 1", | |||
RoutingPolicy: SingleClusterRouting, | |||
} | |||
createdProfile, err := iAdminClient.CreateAppProfile(ctx, profile) | |||
if err != nil { | |||
t.Fatalf("Creating app profile: %v", err) | |||
} | |||
gotProfile, err := iAdminClient.GetAppProfile(ctx, adminClient.instance, "app_profile1") | |||
if err != nil { | |||
t.Fatalf("Get app profile: %v", err) | |||
} | |||
if !proto.Equal(createdProfile, gotProfile) { | |||
t.Fatalf("created profile: %s, got profile: %s", createdProfile.Name, gotProfile.Name) | |||
} | |||
list := func(instanceID string) ([]*btapb.AppProfile, error) { | |||
profiles := []*btapb.AppProfile(nil) | |||
it := iAdminClient.ListAppProfiles(ctx, instanceID) | |||
for { | |||
s, err := it.Next() | |||
if err == iterator.Done { | |||
break | |||
} | |||
if err != nil { | |||
return nil, err | |||
} | |||
profiles = append(profiles, s) | |||
} | |||
return profiles, err | |||
} | |||
profiles, err := list(adminClient.instance) | |||
if err != nil { | |||
t.Fatalf("List app profile: %v", err) | |||
} | |||
if got, want := len(profiles), 1; got != want { | |||
t.Fatalf("Initial app profile list len: %d, want: %d", got, want) | |||
} | |||
for _, test := range []struct { | |||
desc string | |||
uattrs ProfileAttrsToUpdate | |||
want *btapb.AppProfile // nil means error | |||
}{ | |||
{ | |||
desc: "empty update", | |||
uattrs: ProfileAttrsToUpdate{}, | |||
want: nil, | |||
}, | |||
{ | |||
desc: "empty description update", | |||
uattrs: ProfileAttrsToUpdate{Description: ""}, | |||
want: &btapb.AppProfile{ | |||
Name: gotProfile.Name, | |||
Description: "", | |||
RoutingPolicy: gotProfile.RoutingPolicy, | |||
Etag: gotProfile.Etag}, | |||
}, | |||
{ | |||
desc: "routing update", | |||
uattrs: ProfileAttrsToUpdate{ | |||
RoutingPolicy: SingleClusterRouting, | |||
ClusterID: testEnv.Config().Cluster, | |||
}, | |||
want: &btapb.AppProfile{ | |||
Name: gotProfile.Name, | |||
Description: "", | |||
Etag: gotProfile.Etag, | |||
RoutingPolicy: &btapb.AppProfile_SingleClusterRouting_{ | |||
SingleClusterRouting: &btapb.AppProfile_SingleClusterRouting{ | |||
ClusterId: testEnv.Config().Cluster, | |||
}}, | |||
}, | |||
}, | |||
} { | |||
err = iAdminClient.UpdateAppProfile(ctx, adminClient.instance, "app_profile1", test.uattrs) | |||
if err != nil { | |||
if test.want != nil { | |||
t.Errorf("%s: %v", test.desc, err) | |||
} | |||
continue | |||
} | |||
if err == nil && test.want == nil { | |||
t.Errorf("%s: got nil, want error", test.desc) | |||
continue | |||
} | |||
got, _ := iAdminClient.GetAppProfile(ctx, adminClient.instance, "app_profile1") | |||
if !proto.Equal(got, test.want) { | |||
t.Fatalf("%s : got profile : %v, want profile: %v", test.desc, gotProfile, test.want) | |||
} | |||
} | |||
err = iAdminClient.DeleteAppProfile(ctx, adminClient.instance, "app_profile1") | |||
if err != nil { | |||
t.Fatalf("Delete app profile: %v", err) | |||
} | |||
} |
@@ -1,914 +0,0 @@ | |||
/* | |||
Copyright 2015 Google LLC | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. | |||
*/ | |||
package bigtable // import "cloud.google.com/go/bigtable" | |||
import ( | |||
"context" | |||
"errors" | |||
"fmt" | |||
"io" | |||
"strconv" | |||
"time" | |||
"cloud.google.com/go/bigtable/internal/gax" | |||
btopt "cloud.google.com/go/bigtable/internal/option" | |||
"github.com/golang/protobuf/proto" | |||
"google.golang.org/api/option" | |||
gtransport "google.golang.org/api/transport/grpc" | |||
btpb "google.golang.org/genproto/googleapis/bigtable/v2" | |||
"google.golang.org/grpc" | |||
"google.golang.org/grpc/codes" | |||
"google.golang.org/grpc/metadata" | |||
"google.golang.org/grpc/status" | |||
) | |||
const prodAddr = "bigtable.googleapis.com:443" | |||
// Client is a client for reading and writing data to tables in an instance. | |||
// | |||
// A Client is safe to use concurrently, except for its Close method. | |||
type Client struct { | |||
conn *grpc.ClientConn | |||
client btpb.BigtableClient | |||
project, instance string | |||
appProfile string | |||
} | |||
// ClientConfig has configurations for the client. | |||
type ClientConfig struct { | |||
// The id of the app profile to associate with all data operations sent from this client. | |||
// If unspecified, the default app profile for the instance will be used. | |||
AppProfile string | |||
} | |||
// NewClient creates a new Client for a given project and instance. | |||
// The default ClientConfig will be used. | |||
func NewClient(ctx context.Context, project, instance string, opts ...option.ClientOption) (*Client, error) { | |||
return NewClientWithConfig(ctx, project, instance, ClientConfig{}, opts...) | |||
} | |||
// NewClientWithConfig creates a new client with the given config. | |||
func NewClientWithConfig(ctx context.Context, project, instance string, config ClientConfig, opts ...option.ClientOption) (*Client, error) { | |||
o, err := btopt.DefaultClientOptions(prodAddr, Scope, clientUserAgent) | |||
if err != nil { | |||
return nil, err | |||
} | |||
// Default to a small connection pool that can be overridden. | |||
o = append(o, | |||
option.WithGRPCConnectionPool(4), | |||
// Set the max size to correspond to server-side limits. | |||
option.WithGRPCDialOption(grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(1<<28), grpc.MaxCallRecvMsgSize(1<<28))), | |||
// TODO(grpc/grpc-go#1388) using connection pool without WithBlock | |||
// can cause RPCs to fail randomly. We can delete this after the issue is fixed. | |||
option.WithGRPCDialOption(grpc.WithBlock())) | |||
o = append(o, opts...) | |||
conn, err := gtransport.Dial(ctx, o...) | |||
if err != nil { | |||
return nil, fmt.Errorf("dialing: %v", err) | |||
} | |||
return &Client{ | |||
conn: conn, | |||
client: btpb.NewBigtableClient(conn), | |||
project: project, | |||
instance: instance, | |||
appProfile: config.AppProfile, | |||
}, nil | |||
} | |||
// Close closes the Client. | |||
func (c *Client) Close() error { | |||
return c.conn.Close() | |||
} | |||
var ( | |||
idempotentRetryCodes = []codes.Code{codes.DeadlineExceeded, codes.Unavailable, codes.Aborted} | |||
isIdempotentRetryCode = make(map[codes.Code]bool) | |||
retryOptions = []gax.CallOption{ | |||
gax.WithDelayTimeoutSettings(100*time.Millisecond, 2000*time.Millisecond, 1.2), | |||
gax.WithRetryCodes(idempotentRetryCodes), | |||
} | |||
) | |||
func init() { | |||
for _, code := range idempotentRetryCodes { | |||
isIdempotentRetryCode[code] = true | |||
} | |||
} | |||
func (c *Client) fullTableName(table string) string { | |||
return fmt.Sprintf("projects/%s/instances/%s/tables/%s", c.project, c.instance, table) | |||
} | |||
// A Table refers to a table. | |||
// | |||
// A Table is safe to use concurrently. | |||
type Table struct { | |||
c *Client | |||
table string | |||
// Metadata to be sent with each request. | |||
md metadata.MD | |||
} | |||
// Open opens a table. | |||
func (c *Client) Open(table string) *Table { | |||
return &Table{ | |||
c: c, | |||
table: table, | |||
md: metadata.Pairs(resourcePrefixHeader, c.fullTableName(table)), | |||
} | |||
} | |||
// TODO(dsymonds): Read method that returns a sequence of ReadItems. | |||
// ReadRows reads rows from a table. f is called for each row. | |||
// If f returns false, the stream is shut down and ReadRows returns. | |||
// f owns its argument, and f is called serially in order by row key. | |||
// | |||
// By default, the yielded rows will contain all values in all cells. | |||
// Use RowFilter to limit the cells returned. | |||
func (t *Table) ReadRows(ctx context.Context, arg RowSet, f func(Row) bool, opts ...ReadOption) error { | |||
ctx = mergeOutgoingMetadata(ctx, t.md) | |||
var prevRowKey string | |||
var err error | |||
ctx = traceStartSpan(ctx, "cloud.google.com/go/bigtable.ReadRows") | |||
defer func() { traceEndSpan(ctx, err) }() | |||
attrMap := make(map[string]interface{}) | |||
err = gax.Invoke(ctx, func(ctx context.Context) error { | |||
if !arg.valid() { | |||
// Empty row set, no need to make an API call. | |||
// NOTE: we must return early if arg == RowList{} because reading | |||
// an empty RowList from bigtable returns all rows from that table. | |||
return nil | |||
} | |||
req := &btpb.ReadRowsRequest{ | |||
TableName: t.c.fullTableName(t.table), | |||
AppProfileId: t.c.appProfile, | |||
Rows: arg.proto(), | |||
} | |||
for _, opt := range opts { | |||
opt.set(req) | |||
} | |||
ctx, cancel := context.WithCancel(ctx) // for aborting the stream | |||
defer cancel() | |||
startTime := time.Now() | |||
stream, err := t.c.client.ReadRows(ctx, req) | |||
if err != nil { | |||
return err | |||
} | |||
cr := newChunkReader() | |||
for { | |||
res, err := stream.Recv() | |||
if err == io.EOF { | |||
break | |||
} | |||
if err != nil { | |||
// Reset arg for next Invoke call. | |||
arg = arg.retainRowsAfter(prevRowKey) | |||
attrMap["rowKey"] = prevRowKey | |||
attrMap["error"] = err.Error() | |||
attrMap["time_secs"] = time.Since(startTime).Seconds() | |||
tracePrintf(ctx, attrMap, "Retry details in ReadRows") | |||
return err | |||
} | |||
attrMap["time_secs"] = time.Since(startTime).Seconds() | |||
attrMap["rowCount"] = len(res.Chunks) | |||
tracePrintf(ctx, attrMap, "Details in ReadRows") | |||
for _, cc := range res.Chunks { | |||
row, err := cr.Process(cc) | |||
if err != nil { | |||
// No need to prepare for a retry, this is an unretryable error. | |||
return err | |||
} | |||
if row == nil { | |||
continue | |||
} | |||
prevRowKey = row.Key() | |||
if !f(row) { | |||
// Cancel and drain stream. | |||
cancel() | |||
for { | |||
if _, err := stream.Recv(); err != nil { | |||
// The stream has ended. We don't return an error | |||
// because the caller has intentionally interrupted the scan. | |||
return nil | |||
} | |||
} | |||
} | |||
} | |||
if err := cr.Close(); err != nil { | |||
// No need to prepare for a retry, this is an unretryable error. | |||
return err | |||
} | |||
} | |||
return err | |||
}, retryOptions...) | |||
return err | |||
} | |||
// ReadRow is a convenience implementation of a single-row reader. | |||
// A missing row will return a zero-length map and a nil error. | |||
func (t *Table) ReadRow(ctx context.Context, row string, opts ...ReadOption) (Row, error) { | |||
var r Row | |||
err := t.ReadRows(ctx, SingleRow(row), func(rr Row) bool { | |||
r = rr | |||
return true | |||
}, opts...) | |||
return r, err | |||
} | |||
// decodeFamilyProto adds the cell data from f to the given row. | |||
func decodeFamilyProto(r Row, row string, f *btpb.Family) { | |||
fam := f.Name // does not have colon | |||
for _, col := range f.Columns { | |||
for _, cell := range col.Cells { | |||
ri := ReadItem{ | |||
Row: row, | |||
Column: fam + ":" + string(col.Qualifier), | |||
Timestamp: Timestamp(cell.TimestampMicros), | |||
Value: cell.Value, | |||
} | |||
r[fam] = append(r[fam], ri) | |||
} | |||
} | |||
} | |||
// RowSet is a set of rows to be read. It is satisfied by RowList, RowRange and RowRangeList. | |||
// The serialized size of the RowSet must be no larger than 1MiB. | |||
type RowSet interface { | |||
proto() *btpb.RowSet | |||
// retainRowsAfter returns a new RowSet that does not include the | |||
// given row key or any row key lexicographically less than it. | |||
retainRowsAfter(lastRowKey string) RowSet | |||
// Valid reports whether this set can cover at least one row. | |||
valid() bool | |||
} | |||
// RowList is a sequence of row keys. | |||
type RowList []string | |||
func (r RowList) proto() *btpb.RowSet { | |||
keys := make([][]byte, len(r)) | |||
for i, row := range r { | |||
keys[i] = []byte(row) | |||
} | |||
return &btpb.RowSet{RowKeys: keys} | |||
} | |||
func (r RowList) retainRowsAfter(lastRowKey string) RowSet { | |||
var retryKeys RowList | |||
for _, key := range r { | |||
if key > lastRowKey { | |||
retryKeys = append(retryKeys, key) | |||
} | |||
} | |||
return retryKeys | |||
} | |||
func (r RowList) valid() bool { | |||
return len(r) > 0 | |||
} | |||
// A RowRange is a half-open interval [Start, Limit) encompassing | |||
// all the rows with keys at least as large as Start, and less than Limit. | |||
// (Bigtable string comparison is the same as Go's.) | |||
// A RowRange can be unbounded, encompassing all keys at least as large as Start. | |||
type RowRange struct { | |||
start string | |||
limit string | |||
} | |||
// NewRange returns the new RowRange [begin, end). | |||
func NewRange(begin, end string) RowRange { | |||
return RowRange{ | |||
start: begin, | |||
limit: end, | |||
} | |||
} | |||
// Unbounded tests whether a RowRange is unbounded. | |||
func (r RowRange) Unbounded() bool { | |||
return r.limit == "" | |||
} | |||
// Contains says whether the RowRange contains the key. | |||
func (r RowRange) Contains(row string) bool { | |||
return r.start <= row && (r.limit == "" || r.limit > row) | |||
} | |||
// String provides a printable description of a RowRange. | |||
func (r RowRange) String() string { | |||
a := strconv.Quote(r.start) | |||
if r.Unbounded() { | |||
return fmt.Sprintf("[%s,∞)", a) | |||
} | |||
return fmt.Sprintf("[%s,%q)", a, r.limit) | |||
} | |||
func (r RowRange) proto() *btpb.RowSet { | |||
rr := &btpb.RowRange{ | |||
StartKey: &btpb.RowRange_StartKeyClosed{StartKeyClosed: []byte(r.start)}, | |||
} | |||
if !r.Unbounded() { | |||
rr.EndKey = &btpb.RowRange_EndKeyOpen{EndKeyOpen: []byte(r.limit)} | |||
} | |||
return &btpb.RowSet{RowRanges: []*btpb.RowRange{rr}} | |||
} | |||
func (r RowRange) retainRowsAfter(lastRowKey string) RowSet { | |||
if lastRowKey == "" || lastRowKey < r.start { | |||
return r | |||
} | |||
// Set the beginning of the range to the row after the last scanned. | |||
start := lastRowKey + "\x00" | |||
if r.Unbounded() { | |||
return InfiniteRange(start) | |||
} | |||
return NewRange(start, r.limit) | |||
} | |||
func (r RowRange) valid() bool { | |||
return r.Unbounded() || r.start < r.limit | |||
} | |||
// RowRangeList is a sequence of RowRanges representing the union of the ranges. | |||
type RowRangeList []RowRange | |||
func (r RowRangeList) proto() *btpb.RowSet { | |||
ranges := make([]*btpb.RowRange, len(r)) | |||
for i, rr := range r { | |||
// RowRange.proto() returns a RowSet with a single element RowRange array | |||
ranges[i] = rr.proto().RowRanges[0] | |||
} | |||
return &btpb.RowSet{RowRanges: ranges} | |||
} | |||
func (r RowRangeList) retainRowsAfter(lastRowKey string) RowSet { | |||
if lastRowKey == "" { | |||
return r | |||
} | |||
// Return a list of any range that has not yet been completely processed | |||
var ranges RowRangeList | |||
for _, rr := range r { | |||
retained := rr.retainRowsAfter(lastRowKey) | |||
if retained.valid() { | |||
ranges = append(ranges, retained.(RowRange)) | |||
} | |||
} | |||
return ranges | |||
} | |||
func (r RowRangeList) valid() bool { | |||
for _, rr := range r { | |||
if rr.valid() { | |||
return true | |||
} | |||
} | |||
return false | |||
} | |||
// SingleRow returns a RowSet for reading a single row. | |||
func SingleRow(row string) RowSet { | |||
return RowList{row} | |||
} | |||
// PrefixRange returns a RowRange consisting of all keys starting with the prefix. | |||
func PrefixRange(prefix string) RowRange { | |||
return RowRange{ | |||
start: prefix, | |||
limit: prefixSuccessor(prefix), | |||
} | |||
} | |||
// InfiniteRange returns the RowRange consisting of all keys at least as | |||
// large as start. | |||
func InfiniteRange(start string) RowRange { | |||
return RowRange{ | |||
start: start, | |||
limit: "", | |||
} | |||
} | |||
// prefixSuccessor returns the lexically smallest string greater than the | |||
// prefix, if it exists, or "" otherwise. In either case, it is the string | |||
// needed for the Limit of a RowRange. | |||
func prefixSuccessor(prefix string) string { | |||
if prefix == "" { | |||
return "" // infinite range | |||
} | |||
n := len(prefix) | |||
for n--; n >= 0 && prefix[n] == '\xff'; n-- { | |||
} | |||
if n == -1 { | |||
return "" | |||
} | |||
ans := []byte(prefix[:n]) | |||
ans = append(ans, prefix[n]+1) | |||
return string(ans) | |||
} | |||
// A ReadOption is an optional argument to ReadRows. | |||
type ReadOption interface { | |||
set(req *btpb.ReadRowsRequest) | |||
} | |||
// RowFilter returns a ReadOption that applies f to the contents of read rows. | |||
// | |||
// If multiple RowFilters are provided, only the last is used. To combine filters, | |||
// use ChainFilters or InterleaveFilters instead. | |||
func RowFilter(f Filter) ReadOption { return rowFilter{f} } | |||
type rowFilter struct{ f Filter } | |||
func (rf rowFilter) set(req *btpb.ReadRowsRequest) { req.Filter = rf.f.proto() } | |||
// LimitRows returns a ReadOption that will limit the number of rows to be read. | |||
func LimitRows(limit int64) ReadOption { return limitRows{limit} } | |||
type limitRows struct{ limit int64 } | |||
func (lr limitRows) set(req *btpb.ReadRowsRequest) { req.RowsLimit = lr.limit } | |||
// mutationsAreRetryable returns true if all mutations are idempotent | |||
// and therefore retryable. A mutation is idempotent iff all cell timestamps | |||
// have an explicit timestamp set and do not rely on the timestamp being set on the server. | |||
func mutationsAreRetryable(muts []*btpb.Mutation) bool { | |||
serverTime := int64(ServerTime) | |||
for _, mut := range muts { | |||
setCell := mut.GetSetCell() | |||
if setCell != nil && setCell.TimestampMicros == serverTime { | |||
return false | |||
} | |||
} | |||
return true | |||
} | |||
const maxMutations = 100000 | |||
// Apply mutates a row atomically. A mutation must contain at least one | |||
// operation and at most 100000 operations. | |||
func (t *Table) Apply(ctx context.Context, row string, m *Mutation, opts ...ApplyOption) error { | |||
ctx = mergeOutgoingMetadata(ctx, t.md) | |||
after := func(res proto.Message) { | |||
for _, o := range opts { | |||
o.after(res) | |||
} | |||
} | |||
var err error | |||
ctx = traceStartSpan(ctx, "cloud.google.com/go/bigtable/Apply") | |||
defer func() { traceEndSpan(ctx, err) }() | |||
var callOptions []gax.CallOption | |||
if m.cond == nil { | |||
req := &btpb.MutateRowRequest{ | |||
TableName: t.c.fullTableName(t.table), | |||
AppProfileId: t.c.appProfile, | |||
RowKey: []byte(row), | |||
Mutations: m.ops, | |||
} | |||
if mutationsAreRetryable(m.ops) { | |||
callOptions = retryOptions | |||
} | |||
var res *btpb.MutateRowResponse | |||
err := gax.Invoke(ctx, func(ctx context.Context) error { | |||
var err error | |||
res, err = t.c.client.MutateRow(ctx, req) | |||
return err | |||
}, callOptions...) | |||
if err == nil { | |||
after(res) | |||
} | |||
return err | |||
} | |||
req := &btpb.CheckAndMutateRowRequest{ | |||
TableName: t.c.fullTableName(t.table), | |||
AppProfileId: t.c.appProfile, | |||
RowKey: []byte(row), | |||
PredicateFilter: m.cond.proto(), | |||
} | |||
if m.mtrue != nil { | |||
if m.mtrue.cond != nil { | |||
return errors.New("bigtable: conditional mutations cannot be nested") | |||
} | |||
req.TrueMutations = m.mtrue.ops | |||
} | |||
if m.mfalse != nil { | |||
if m.mfalse.cond != nil { | |||
return errors.New("bigtable: conditional mutations cannot be nested") | |||
} | |||
req.FalseMutations = m.mfalse.ops | |||
} | |||
if mutationsAreRetryable(req.TrueMutations) && mutationsAreRetryable(req.FalseMutations) { | |||
callOptions = retryOptions | |||
} | |||
var cmRes *btpb.CheckAndMutateRowResponse | |||
err = gax.Invoke(ctx, func(ctx context.Context) error { | |||
var err error | |||
cmRes, err = t.c.client.CheckAndMutateRow(ctx, req) | |||
return err | |||
}, callOptions...) | |||
if err == nil { | |||
after(cmRes) | |||
} | |||
return err | |||
} | |||
// An ApplyOption is an optional argument to Apply. | |||
type ApplyOption interface { | |||
after(res proto.Message) | |||
} | |||
type applyAfterFunc func(res proto.Message) | |||
func (a applyAfterFunc) after(res proto.Message) { a(res) } | |||
// GetCondMutationResult returns an ApplyOption that reports whether the conditional | |||
// mutation's condition matched. | |||
func GetCondMutationResult(matched *bool) ApplyOption { | |||
return applyAfterFunc(func(res proto.Message) { | |||
if res, ok := res.(*btpb.CheckAndMutateRowResponse); ok { | |||
*matched = res.PredicateMatched | |||
} | |||
}) | |||
} | |||
// Mutation represents a set of changes for a single row of a table. | |||
type Mutation struct { | |||
ops []*btpb.Mutation | |||
// for conditional mutations | |||
cond Filter | |||
mtrue, mfalse *Mutation | |||
} | |||
// NewMutation returns a new mutation. | |||
func NewMutation() *Mutation { | |||
return new(Mutation) | |||
} | |||
// NewCondMutation returns a conditional mutation. | |||
// The given row filter determines which mutation is applied: | |||
// If the filter matches any cell in the row, mtrue is applied; | |||
// otherwise, mfalse is applied. | |||
// Either given mutation may be nil. | |||
func NewCondMutation(cond Filter, mtrue, mfalse *Mutation) *Mutation { | |||
return &Mutation{cond: cond, mtrue: mtrue, mfalse: mfalse} | |||
} | |||
// Set sets a value in a specified column, with the given timestamp. | |||
// The timestamp will be truncated to millisecond granularity. | |||
// A timestamp of ServerTime means to use the server timestamp. | |||
func (m *Mutation) Set(family, column string, ts Timestamp, value []byte) { | |||
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_SetCell_{SetCell: &btpb.Mutation_SetCell{ | |||
FamilyName: family, | |||
ColumnQualifier: []byte(column), | |||
TimestampMicros: int64(ts.TruncateToMilliseconds()), | |||
Value: value, | |||
}}}) | |||
} | |||
// DeleteCellsInColumn will delete all the cells whose columns are family:column. | |||
func (m *Mutation) DeleteCellsInColumn(family, column string) { | |||
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromColumn_{DeleteFromColumn: &btpb.Mutation_DeleteFromColumn{ | |||
FamilyName: family, | |||
ColumnQualifier: []byte(column), | |||
}}}) | |||
} | |||
// DeleteTimestampRange deletes all cells whose columns are family:column | |||
// and whose timestamps are in the half-open interval [start, end). | |||
// If end is zero, it will be interpreted as infinity. | |||
// The timestamps will be truncated to millisecond granularity. | |||
func (m *Mutation) DeleteTimestampRange(family, column string, start, end Timestamp) { | |||
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromColumn_{DeleteFromColumn: &btpb.Mutation_DeleteFromColumn{ | |||
FamilyName: family, | |||
ColumnQualifier: []byte(column), | |||
TimeRange: &btpb.TimestampRange{ | |||
StartTimestampMicros: int64(start.TruncateToMilliseconds()), | |||
EndTimestampMicros: int64(end.TruncateToMilliseconds()), | |||
}, | |||
}}}) | |||
} | |||
// DeleteCellsInFamily will delete all the cells whose columns are family:*. | |||
func (m *Mutation) DeleteCellsInFamily(family string) { | |||
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromFamily_{DeleteFromFamily: &btpb.Mutation_DeleteFromFamily{ | |||
FamilyName: family, | |||
}}}) | |||
} | |||
// DeleteRow deletes the entire row. | |||
func (m *Mutation) DeleteRow() { | |||
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromRow_{DeleteFromRow: &btpb.Mutation_DeleteFromRow{}}}) | |||
} | |||
// entryErr is a container that combines an entry with the error that was returned for it. | |||
// Err may be nil if no error was returned for the Entry, or if the Entry has not yet been processed. | |||
type entryErr struct { | |||
Entry *btpb.MutateRowsRequest_Entry | |||
Err error | |||
} | |||
// ApplyBulk applies multiple Mutations, up to a maximum of 100,000. | |||
// Each mutation is individually applied atomically, | |||
// but the set of mutations may be applied in any order. | |||
// | |||
// Two types of failures may occur. If the entire process | |||
// fails, (nil, err) will be returned. If specific mutations | |||
// fail to apply, ([]err, nil) will be returned, and the errors | |||
// will correspond to the relevant rowKeys/muts arguments. | |||
// | |||
// Conditional mutations cannot be applied in bulk and providing one will result in an error. | |||
func (t *Table) ApplyBulk(ctx context.Context, rowKeys []string, muts []*Mutation, opts ...ApplyOption) ([]error, error) { | |||
ctx = mergeOutgoingMetadata(ctx, t.md) | |||
if len(rowKeys) != len(muts) { | |||
return nil, fmt.Errorf("mismatched rowKeys and mutation array lengths: %d, %d", len(rowKeys), len(muts)) | |||
} | |||
origEntries := make([]*entryErr, len(rowKeys)) | |||
for i, key := range rowKeys { | |||
mut := muts[i] | |||
if mut.cond != nil { | |||
return nil, errors.New("conditional mutations cannot be applied in bulk") | |||
} | |||
origEntries[i] = &entryErr{Entry: &btpb.MutateRowsRequest_Entry{RowKey: []byte(key), Mutations: mut.ops}} | |||
} | |||
var err error | |||
ctx = traceStartSpan(ctx, "cloud.google.com/go/bigtable/ApplyBulk") | |||
defer func() { traceEndSpan(ctx, err) }() | |||
for _, group := range groupEntries(origEntries, maxMutations) { | |||
attrMap := make(map[string]interface{}) | |||
err = gax.Invoke(ctx, func(ctx context.Context) error { | |||
attrMap["rowCount"] = len(group) | |||
tracePrintf(ctx, attrMap, "Row count in ApplyBulk") | |||
err := t.doApplyBulk(ctx, group, opts...) | |||
if err != nil { | |||
// We want to retry the entire request with the current group | |||
return err | |||
} | |||
group = t.getApplyBulkRetries(group) | |||
if len(group) > 0 && len(idempotentRetryCodes) > 0 { | |||
// We have at least one mutation that needs to be retried. | |||
// Return an arbitrary error that is retryable according to callOptions. | |||
return status.Errorf(idempotentRetryCodes[0], "Synthetic error: partial failure of ApplyBulk") | |||
} | |||
return nil | |||
}, retryOptions...) | |||
if err != nil { | |||
return nil, err | |||
} | |||
} | |||
// Accumulate all of the errors into an array to return, interspersed with nils for successful | |||
// entries. The absence of any errors means we should return nil. | |||
var errs []error | |||
var foundErr bool | |||
for _, entry := range origEntries { | |||
if entry.Err != nil { | |||
foundErr = true | |||
} | |||
errs = append(errs, entry.Err) | |||
} | |||
if foundErr { | |||
return errs, nil | |||
} | |||
return nil, nil | |||
} | |||
// getApplyBulkRetries returns the entries that need to be retried | |||
func (t *Table) getApplyBulkRetries(entries []*entryErr) []*entryErr { | |||
var retryEntries []*entryErr | |||
for _, entry := range entries { | |||
err := entry.Err | |||
if err != nil && isIdempotentRetryCode[grpc.Code(err)] && mutationsAreRetryable(entry.Entry.Mutations) { | |||
// There was an error and the entry is retryable. | |||
retryEntries = append(retryEntries, entry) | |||
} | |||
} | |||
return retryEntries | |||
} | |||
// doApplyBulk does the work of a single ApplyBulk invocation | |||
func (t *Table) doApplyBulk(ctx context.Context, entryErrs []*entryErr, opts ...ApplyOption) error { | |||
after := func(res proto.Message) { | |||
for _, o := range opts { | |||
o.after(res) | |||
} | |||
} | |||
entries := make([]*btpb.MutateRowsRequest_Entry, len(entryErrs)) | |||
for i, entryErr := range entryErrs { | |||
entries[i] = entryErr.Entry | |||
} | |||
req := &btpb.MutateRowsRequest{ | |||
TableName: t.c.fullTableName(t.table), | |||
AppProfileId: t.c.appProfile, | |||
Entries: entries, | |||
} | |||
stream, err := t.c.client.MutateRows(ctx, req) | |||
if err != nil { | |||
return err | |||
} | |||
for { | |||
res, err := stream.Recv() | |||
if err == io.EOF { | |||
break | |||
} | |||
if err != nil { | |||
return err | |||
} | |||
for i, entry := range res.Entries { | |||
s := entry.Status | |||
if s.Code == int32(codes.OK) { | |||
entryErrs[i].Err = nil | |||
} else { | |||
entryErrs[i].Err = status.Errorf(codes.Code(s.Code), s.Message) | |||
} | |||
} | |||
after(res) | |||
} | |||
return nil | |||
} | |||
// groupEntries groups entries into groups of a specified size without breaking up | |||
// individual entries. | |||
func groupEntries(entries []*entryErr, maxSize int) [][]*entryErr { | |||
var ( | |||
res [][]*entryErr | |||
start int | |||
gmuts int | |||
) | |||
addGroup := func(end int) { | |||
if end-start > 0 { | |||
res = append(res, entries[start:end]) | |||
start = end | |||
gmuts = 0 | |||
} | |||
} | |||
for i, e := range entries { | |||
emuts := len(e.Entry.Mutations) | |||
if gmuts+emuts > maxSize { | |||
addGroup(i) | |||
} | |||
gmuts += emuts | |||
} | |||
addGroup(len(entries)) | |||
return res | |||
} | |||
// Timestamp is in units of microseconds since 1 January 1970. | |||
type Timestamp int64 | |||
// ServerTime is a specific Timestamp that may be passed to (*Mutation).Set. | |||
// It indicates that the server's timestamp should be used. | |||
const ServerTime Timestamp = -1 | |||
// Time converts a time.Time into a Timestamp. | |||
func Time(t time.Time) Timestamp { return Timestamp(t.UnixNano() / 1e3) } | |||
// Now returns the Timestamp representation of the current time on the client. | |||
func Now() Timestamp { return Time(time.Now()) } | |||
// Time converts a Timestamp into a time.Time. | |||
func (ts Timestamp) Time() time.Time { return time.Unix(0, int64(ts)*1e3) } | |||
// TruncateToMilliseconds truncates a Timestamp to millisecond granularity, | |||
// which is currently the only granularity supported. | |||
func (ts Timestamp) TruncateToMilliseconds() Timestamp { | |||
if ts == ServerTime { | |||
return ts | |||
} | |||
return ts - ts%1000 | |||
} | |||
// ApplyReadModifyWrite applies a ReadModifyWrite to a specific row. | |||
// It returns the newly written cells. | |||
func (t *Table) ApplyReadModifyWrite(ctx context.Context, row string, m *ReadModifyWrite) (Row, error) { | |||
ctx = mergeOutgoingMetadata(ctx, t.md) | |||
req := &btpb.ReadModifyWriteRowRequest{ | |||
TableName: t.c.fullTableName(t.table), | |||
AppProfileId: t.c.appProfile, | |||
RowKey: []byte(row), | |||
Rules: m.ops, | |||
} | |||
res, err := t.c.client.ReadModifyWriteRow(ctx, req) | |||
if err != nil { | |||
return nil, err | |||
} | |||
if res.Row == nil { | |||
return nil, errors.New("unable to apply ReadModifyWrite: res.Row=nil") | |||
} | |||
r := make(Row) | |||
for _, fam := range res.Row.Families { // res is *btpb.Row, fam is *btpb.Family | |||
decodeFamilyProto(r, row, fam) | |||
} | |||
return r, nil | |||
} | |||
// ReadModifyWrite represents a set of operations on a single row of a table. | |||
// It is like Mutation but for non-idempotent changes. | |||
// When applied, these operations operate on the latest values of the row's cells, | |||
// and result in a new value being written to the relevant cell with a timestamp | |||
// that is max(existing timestamp, current server time). | |||
// | |||
// The application of a ReadModifyWrite is atomic; concurrent ReadModifyWrites will | |||
// be executed serially by the server. | |||
type ReadModifyWrite struct { | |||
ops []*btpb.ReadModifyWriteRule | |||
} | |||
// NewReadModifyWrite returns a new ReadModifyWrite. | |||
func NewReadModifyWrite() *ReadModifyWrite { return new(ReadModifyWrite) } | |||
// AppendValue appends a value to a specific cell's value. | |||
// If the cell is unset, it will be treated as an empty value. | |||
func (m *ReadModifyWrite) AppendValue(family, column string, v []byte) { | |||
m.ops = append(m.ops, &btpb.ReadModifyWriteRule{ | |||
FamilyName: family, | |||
ColumnQualifier: []byte(column), | |||
Rule: &btpb.ReadModifyWriteRule_AppendValue{AppendValue: v}, | |||
}) | |||
} | |||
// Increment interprets the value in a specific cell as a 64-bit big-endian signed integer, | |||
// and adds a value to it. If the cell is unset, it will be treated as zero. | |||
// If the cell is set and is not an 8-byte value, the entire ApplyReadModifyWrite | |||
// operation will fail. | |||
func (m *ReadModifyWrite) Increment(family, column string, delta int64) { | |||
m.ops = append(m.ops, &btpb.ReadModifyWriteRule{ | |||
FamilyName: family, | |||
ColumnQualifier: []byte(column), | |||
Rule: &btpb.ReadModifyWriteRule_IncrementAmount{IncrementAmount: delta}, | |||
}) | |||
} | |||
// mergeOutgoingMetadata returns a context populated by the existing outgoing metadata, | |||
// if any, joined with internal metadata. | |||
func mergeOutgoingMetadata(ctx context.Context, md metadata.MD) context.Context { | |||
mdCopy, _ := metadata.FromOutgoingContext(ctx) | |||
return metadata.NewOutgoingContext(ctx, metadata.Join(mdCopy, md)) | |||
} | |||
// SampleRowKeys returns a sample of row keys in the table. The returned row keys will delimit contiguous sections of | |||
// the table of approximately equal size, which can be used to break up the data for distributed tasks like mapreduces. | |||
func (t *Table) SampleRowKeys(ctx context.Context) ([]string, error) { | |||
ctx = mergeOutgoingMetadata(ctx, t.md) | |||
var sampledRowKeys []string | |||
err := gax.Invoke(ctx, func(ctx context.Context) error { | |||
sampledRowKeys = nil | |||
req := &btpb.SampleRowKeysRequest{ | |||
TableName: t.c.fullTableName(t.table), | |||
AppProfileId: t.c.appProfile, | |||
} | |||
ctx, cancel := context.WithCancel(ctx) // for aborting the stream | |||
defer cancel() | |||
stream, err := t.c.client.SampleRowKeys(ctx, req) | |||
if err != nil { | |||
return err | |||
} | |||
for { | |||
res, err := stream.Recv() | |||
if err == io.EOF { | |||
break | |||
} | |||
if err != nil { | |||
return err | |||
} | |||
key := string(res.RowKey) | |||
if key == "" { | |||
continue | |||
} | |||
sampledRowKeys = append(sampledRowKeys, key) | |||
} | |||
return nil | |||
}, retryOptions...) | |||
return sampledRowKeys, err | |||
} |
@@ -1,83 +0,0 @@ | |||
/* | |||
Copyright 2016 Google LLC | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. | |||
*/ | |||
package bttest_test | |||
import ( | |||
"context" | |||
"fmt" | |||
"log" | |||
"cloud.google.com/go/bigtable" | |||
"cloud.google.com/go/bigtable/bttest" | |||
"google.golang.org/api/option" | |||
"google.golang.org/grpc" | |||
) | |||
func ExampleNewServer() { | |||
srv, err := bttest.NewServer("localhost:0") | |||
if err != nil { | |||
log.Fatalln(err) | |||
} | |||
ctx := context.Background() | |||
conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure()) | |||
if err != nil { | |||
log.Fatalln(err) | |||
} | |||
proj, instance := "proj", "instance" | |||
adminClient, err := bigtable.NewAdminClient(ctx, proj, instance, option.WithGRPCConn(conn)) | |||
if err != nil { | |||
log.Fatalln(err) | |||
} | |||
if err = adminClient.CreateTable(ctx, "example"); err != nil { | |||
log.Fatalln(err) | |||
} | |||
if err = adminClient.CreateColumnFamily(ctx, "example", "links"); err != nil { | |||
log.Fatalln(err) | |||
} | |||
client, err := bigtable.NewClient(ctx, proj, instance, option.WithGRPCConn(conn)) | |||
if err != nil { | |||
log.Fatalln(err) | |||
} | |||
tbl := client.Open("example") | |||
mut := bigtable.NewMutation() | |||
mut.Set("links", "golang.org", bigtable.Now(), []byte("Gophers!")) | |||
if err = tbl.Apply(ctx, "com.google.cloud", mut); err != nil { | |||
log.Fatalln(err) | |||
} | |||
if row, err := tbl.ReadRow(ctx, "com.google.cloud"); err != nil { | |||
log.Fatalln(err) | |||
} else { | |||
for _, column := range row["links"] { | |||
fmt.Println(column.Column) | |||
fmt.Println(string(column.Value)) | |||
} | |||
} | |||
// Output: | |||
// links:golang.org | |||
// Gophers! | |||
} |
@@ -1,173 +0,0 @@ | |||
// Copyright 2016 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package main | |||
import ( | |||
"testing" | |||
"time" | |||
"cloud.google.com/go/bigtable" | |||
"cloud.google.com/go/internal/testutil" | |||
"github.com/google/go-cmp/cmp" | |||
) | |||
func TestParseDuration(t *testing.T) { | |||
tests := []struct { | |||
in string | |||
// out or fail are mutually exclusive | |||
out time.Duration | |||
fail bool | |||
}{ | |||
{in: "10ms", out: 10 * time.Millisecond}, | |||
{in: "3s", out: 3 * time.Second}, | |||
{in: "60m", out: 60 * time.Minute}, | |||
{in: "12h", out: 12 * time.Hour}, | |||
{in: "7d", out: 168 * time.Hour}, | |||
{in: "", fail: true}, | |||
{in: "0", fail: true}, | |||
{in: "7ns", fail: true}, | |||
{in: "14mo", fail: true}, | |||
{in: "3.5h", fail: true}, | |||
{in: "106752d", fail: true}, // overflow | |||
} | |||
for _, tc := range tests { | |||
got, err := parseDuration(tc.in) | |||
if !tc.fail && err != nil { | |||
t.Errorf("parseDuration(%q) unexpectedly failed: %v", tc.in, err) | |||
continue | |||
} | |||
if tc.fail && err == nil { | |||
t.Errorf("parseDuration(%q) did not fail", tc.in) | |||
continue | |||
} | |||
if tc.fail { | |||
continue | |||
} | |||
if got != tc.out { | |||
t.Errorf("parseDuration(%q) = %v, want %v", tc.in, got, tc.out) | |||
} | |||
} | |||
} | |||
func TestParseArgs(t *testing.T) { | |||
got, err := parseArgs([]string{"a=1", "b=2"}, []string{"a", "b"}) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
want := map[string]string{"a": "1", "b": "2"} | |||
if !testutil.Equal(got, want) { | |||
t.Fatalf("got %v, want %v", got, want) | |||
} | |||
if _, err := parseArgs([]string{"a1"}, []string{"a1"}); err == nil { | |||
t.Error("malformed: got nil, want error") | |||
} | |||
if _, err := parseArgs([]string{"a=1"}, []string{"b"}); err == nil { | |||
t.Error("invalid: got nil, want error") | |||
} | |||
} | |||
func TestParseColumnsFilter(t *testing.T) { | |||
tests := []struct { | |||
in string | |||
out bigtable.Filter | |||
fail bool | |||
}{ | |||
{ | |||
in: "columnA", | |||
out: bigtable.ColumnFilter("columnA"), | |||
}, | |||
{ | |||
in: "familyA:columnA", | |||
out: bigtable.ChainFilters(bigtable.FamilyFilter("familyA"), bigtable.ColumnFilter("columnA")), | |||
}, | |||
{ | |||
in: "columnA,columnB", | |||
out: bigtable.InterleaveFilters(bigtable.ColumnFilter("columnA"), bigtable.ColumnFilter("columnB")), | |||
}, | |||
{ | |||
in: "familyA:columnA,columnB", | |||
out: bigtable.InterleaveFilters( | |||
bigtable.ChainFilters(bigtable.FamilyFilter("familyA"), bigtable.ColumnFilter("columnA")), | |||
bigtable.ColumnFilter("columnB"), | |||
), | |||
}, | |||
{ | |||
in: "columnA,familyB:columnB", | |||
out: bigtable.InterleaveFilters( | |||
bigtable.ColumnFilter("columnA"), | |||
bigtable.ChainFilters(bigtable.FamilyFilter("familyB"), bigtable.ColumnFilter("columnB")), | |||
), | |||
}, | |||
{ | |||
in: "familyA:columnA,familyB:columnB", | |||
out: bigtable.InterleaveFilters( | |||
bigtable.ChainFilters(bigtable.FamilyFilter("familyA"), bigtable.ColumnFilter("columnA")), | |||
bigtable.ChainFilters(bigtable.FamilyFilter("familyB"), bigtable.ColumnFilter("columnB")), | |||
), | |||
}, | |||
{ | |||
in: "familyA:", | |||
out: bigtable.FamilyFilter("familyA"), | |||
}, | |||
{ | |||
in: ":columnA", | |||
out: bigtable.ColumnFilter("columnA"), | |||
}, | |||
{ | |||
in: ",:columnA,,familyB:columnB,", | |||
out: bigtable.InterleaveFilters( | |||
bigtable.ColumnFilter("columnA"), | |||
bigtable.ChainFilters(bigtable.FamilyFilter("familyB"), bigtable.ColumnFilter("columnB")), | |||
), | |||
}, | |||
{ | |||
in: "familyA:columnA:cellA", | |||
fail: true, | |||
}, | |||
{ | |||
in: "familyA::columnA", | |||
fail: true, | |||
}, | |||
} | |||
for _, tc := range tests { | |||
got, err := parseColumnsFilter(tc.in) | |||
if !tc.fail && err != nil { | |||
t.Errorf("parseColumnsFilter(%q) unexpectedly failed: %v", tc.in, err) | |||
continue | |||
} | |||
if tc.fail && err == nil { | |||
t.Errorf("parseColumnsFilter(%q) did not fail", tc.in) | |||
continue | |||
} | |||
if tc.fail { | |||
continue | |||
} | |||
var cmpOpts cmp.Options | |||
cmpOpts = | |||
append( | |||
cmpOpts, | |||
cmp.AllowUnexported(bigtable.ChainFilters([]bigtable.Filter{}...)), | |||
cmp.AllowUnexported(bigtable.InterleaveFilters([]bigtable.Filter{}...))) | |||
if !cmp.Equal(got, tc.out, cmpOpts) { | |||
t.Errorf("parseColumnsFilter(%q) = %v, want %v", tc.in, got, tc.out) | |||
} | |||
} | |||
} |
@@ -1,425 +0,0 @@ | |||
// Copyright 2016 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
// DO NOT EDIT. THIS IS AUTOMATICALLY GENERATED. | |||
// Run "go generate" to regenerate. | |||
//go:generate go run cbt.go gcpolicy.go -o cbtdoc.go doc | |||
/* | |||
Cbt is a tool for doing basic interactions with Cloud Bigtable. To learn how to | |||
install the cbt tool, see the | |||
[cbt overview](https://cloud.google.com/bigtable/docs/cbt-overview). | |||
Usage: | |||
cbt [options] command [arguments] | |||
The commands are: | |||
count Count rows in a table | |||
createinstance Create an instance with an initial cluster | |||
createcluster Create a cluster in the configured instance | |||
createfamily Create a column family | |||
createtable Create a table | |||
updatecluster Update a cluster in the configured instance | |||
deleteinstance Delete an instance | |||
deletecluster Delete a cluster from the configured instance | |||
deletecolumn Delete all cells in a column | |||
deletefamily Delete a column family | |||
deleterow Delete a row | |||
deletetable Delete a table | |||
doc Print godoc-suitable documentation for cbt | |||
help Print help text | |||
listinstances List instances in a project | |||
listclusters List clusters in an instance | |||
lookup Read from a single row | |||
ls List tables and column families | |||
mddoc Print documentation for cbt in Markdown format | |||
read Read rows | |||
set Set value of a cell | |||
setgcpolicy Set the GC policy for a column family | |||
waitforreplication Block until all the completed writes have been replicated to all the clusters | |||
createtablefromsnapshot Create a table from a snapshot (snapshots alpha) | |||
createsnapshot Create a snapshot from a source table (snapshots alpha) | |||
listsnapshots List snapshots in a cluster (snapshots alpha) | |||
getsnapshot Get snapshot info (snapshots alpha) | |||
deletesnapshot Delete snapshot in a cluster (snapshots alpha) | |||
version Print the current cbt version | |||
createappprofile Creates app profile for an instance | |||
getappprofile Reads app profile for an instance | |||
listappprofile Lists app profile for an instance | |||
updateappprofile Updates app profile for an instance | |||
deleteappprofile Deletes app profile for an instance | |||
Use "cbt help <command>" for more information about a command. | |||
The options are: | |||
-project string | |||
project ID, if unset uses gcloud configured project | |||
-instance string | |||
Cloud Bigtable instance | |||
-creds string | |||
if set, use application credentials in this file | |||
Alpha features are not currently available to most Cloud Bigtable customers. The | |||
features might be changed in backward-incompatible ways and are not recommended | |||
for production use. They are not subject to any SLA or deprecation policy. | |||
Note: cbt does not support specifying arbitrary bytes on the command line for | |||
any value that Bigtable otherwise supports (e.g., row key, column qualifier, | |||
etc.). | |||
For convenience, values of the -project, -instance, -creds, | |||
-admin-endpoint and -data-endpoint flags may be specified in | |||
~/.cbtrc in this format: | |||
project = my-project-123 | |||
instance = my-instance | |||
creds = path-to-account-key.json | |||
admin-endpoint = hostname:port | |||
data-endpoint = hostname:port | |||
All values are optional, and all will be overridden by flags. | |||
Count rows in a table | |||
Usage: | |||
cbt count <table> | |||
Create an instance with an initial cluster | |||
Usage: | |||
cbt createinstance <instance-id> <display-name> <cluster-id> <zone> <num-nodes> <storage type> | |||
instance-id Permanent, unique id for the instance | |||
display-name Description of the instance | |||
cluster-id Permanent, unique id for the cluster in the instance | |||
zone The zone in which to create the cluster | |||
num-nodes The number of nodes to create | |||
storage-type SSD or HDD | |||
Create a cluster in the configured instance | |||
Usage: | |||
cbt createcluster <cluster-id> <zone> <num-nodes> <storage type> | |||
cluster-id Permanent, unique id for the cluster in the instance | |||
zone The zone in which to create the cluster | |||
num-nodes The number of nodes to create | |||
storage-type SSD or HDD | |||
Create a column family | |||
Usage: | |||
cbt createfamily <table> <family> | |||
Create a table | |||
Usage: | |||
cbt createtable <table> [families=family[:gcpolicy],...] [splits=split,...] | |||
families: Column families and their associated GC policies. For gcpolicy, | |||
see "setgcpolicy". | |||
Example: families=family1:maxage=1w,family2:maxversions=1 | |||
splits: Row key to be used to initially split the table | |||
Update a cluster in the configured instance | |||
Usage: | |||
cbt updatecluster <cluster-id> [num-nodes=num-nodes] | |||
cluster-id Permanent, unique id for the cluster in the instance | |||
num-nodes The number of nodes to update to | |||
Delete an instance | |||
Usage: | |||
cbt deleteinstance <instance> | |||
Delete a cluster from the configured instance | |||
Usage: | |||
cbt deletecluster <cluster> | |||
Delete all cells in a column | |||
Usage: | |||
cbt deletecolumn <table> <row> <family> <column> [app-profile=<app profile id>] | |||
app-profile=<app profile id> The app profile id to use for the request | |||
Delete a column family | |||
Usage: | |||
cbt deletefamily <table> <family> | |||
Delete a row | |||
Usage: | |||
cbt deleterow <table> <row> [app-profile=<app profile id>] | |||
app-profile=<app profile id> The app profile id to use for the request | |||
Delete a table | |||
Usage: | |||
cbt deletetable <table> | |||
Print godoc-suitable documentation for cbt | |||
Usage: | |||
cbt doc | |||
Print help text | |||
Usage: | |||
cbt help [command] | |||
List instances in a project | |||
Usage: | |||
cbt listinstances | |||
List clusters in an instance | |||
Usage: | |||
cbt listclusters | |||
Read from a single row | |||
Usage: | |||
cbt lookup <table> <row> [columns=[family]:[qualifier],...] [cells-per-column=<n>] [app-profile=<app profile id>] | |||
columns=[family]:[qualifier],... Read only these columns, comma-separated | |||
cells-per-column=<n> Read only this many cells per column | |||
app-profile=<app profile id> The app profile id to use for the request | |||
List tables and column families | |||
Usage: | |||
cbt ls List tables | |||
cbt ls <table> List column families in <table> | |||
Print documentation for cbt in Markdown format | |||
Usage: | |||
cbt mddoc | |||
Read rows | |||
Usage: | |||
cbt read <table> [start=<row>] [end=<row>] [prefix=<prefix>] [regex=<regex>] [columns=[family]:[qualifier],...] [count=<n>] [cells-per-column=<n>] [app-profile=<app profile id>] | |||
start=<row> Start reading at this row | |||
end=<row> Stop reading before this row | |||
prefix=<prefix> Read rows with this prefix | |||
regex=<regex> Read rows with keys matching this regex | |||
columns=[family]:[qualifier],... Read only these columns, comma-separated | |||
count=<n> Read only this many rows | |||
cells-per-column=<n> Read only this many cells per column | |||
app-profile=<app profile id> The app profile id to use for the request | |||
Set value of a cell | |||
Usage: | |||
cbt set <table> <row> [app-profile=<app profile id>] family:column=val[@ts] ... | |||
app-profile=<app profile id> The app profile id to use for the request | |||
family:column=val[@ts] may be repeated to set multiple cells. | |||
ts is an optional integer timestamp. | |||
If it cannot be parsed, the `@ts` part will be | |||
interpreted as part of the value. | |||
Set the GC policy for a column family | |||
Usage: | |||
cbt setgcpolicy <table> <family> ((maxage=<d> | maxversions=<n>) [(and|or) (maxage=<d> | maxversions=<n>),...] | never) | |||
maxage=<d> Maximum timestamp age to preserve (e.g. "1h", "4d") | |||
maxversions=<n> Maximum number of versions to preserve | |||
Block until all the completed writes have been replicated to all the clusters | |||
Usage: | |||
cbt waitforreplication <table> | |||
Create a table from a snapshot (snapshots alpha) | |||
Usage: | |||
cbt createtablefromsnapshot <table> <cluster> <snapshot> | |||
table The name of the table to create | |||
cluster The cluster where the snapshot is located | |||
snapshot The snapshot to restore | |||
Create a snapshot from a source table (snapshots alpha) | |||
Usage: | |||
cbt createsnapshot <cluster> <snapshot> <table> [ttl=<d>] | |||
[ttl=<d>] Lifespan of the snapshot (e.g. "1h", "4d") | |||
List snapshots in a cluster (snapshots alpha) | |||
Usage: | |||
cbt listsnapshots [<cluster>] | |||
Get snapshot info (snapshots alpha) | |||
Usage: | |||
cbt getsnapshot <cluster> <snapshot> | |||
Delete snapshot in a cluster (snapshots alpha) | |||
Usage: | |||
cbt deletesnapshot <cluster> <snapshot> | |||
Print the current cbt version | |||
Usage: | |||
cbt version | |||
Creates app profile for an instance | |||
Usage: | |||
usage: cbt createappprofile <instance-id> <profile-id> <description> (route-any | [ route-to=<cluster-id> : transactional-writes]) [optional flag] | |||
optional flags may be `force` | |||
Reads app profile for an instance | |||
Usage: | |||
cbt getappprofile <instance-id> <profile-id> | |||
Lists app profile for an instance | |||
Usage: | |||
cbt listappprofile <instance-id> | |||
Updates app profile for an instance | |||
Usage: | |||
usage: cbt updateappprofile <instance-id> <profile-id> <description>(route-any | [ route-to=<cluster-id> : transactional-writes]) [optional flag] | |||
optional flags may be `force` | |||
Deletes app profile for an instance | |||
Usage: | |||
cbt deleteappprofile <instance-id> <profile-id> | |||
*/ | |||
package main |
@@ -1,215 +0,0 @@ | |||
/* | |||
Copyright 2015 Google LLC | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. | |||
*/ | |||
package main | |||
import ( | |||
"bytes" | |||
"errors" | |||
"fmt" | |||
"io" | |||
"strconv" | |||
"strings" | |||
"unicode" | |||
"cloud.google.com/go/bigtable" | |||
) | |||
// Parse a GC policy. Valid policies include | |||
// never | |||
// maxage = 5d | |||
// maxversions = 3 | |||
// maxage = 5d || maxversions = 3 | |||
// maxage=30d || (maxage=3d && maxversions=100) | |||
func parseGCPolicy(s string) (bigtable.GCPolicy, error) { | |||
if strings.TrimSpace(s) == "never" { | |||
return bigtable.NoGcPolicy(), nil | |||
} | |||
r := strings.NewReader(s) | |||
p, err := parsePolicyExpr(r) | |||
if err != nil { | |||
return nil, fmt.Errorf("invalid GC policy: %v", err) | |||
} | |||
tok, err := getToken(r) | |||
if err != nil { | |||
return nil, err | |||
} | |||
if tok != "" { | |||
return nil, fmt.Errorf("invalid GC policy: want end of input, got %q", tok) | |||
} | |||
return p, nil | |||
} | |||
// expr ::= term (op term)* | |||
// op ::= "and" | "or" | "&&" | "||" | |||
func parsePolicyExpr(r io.RuneScanner) (bigtable.GCPolicy, error) { | |||
policy, err := parsePolicyTerm(r) | |||
if err != nil { | |||
return nil, err | |||
} | |||
for { | |||
tok, err := getToken(r) | |||
if err != nil { | |||
return nil, err | |||
} | |||
var f func(...bigtable.GCPolicy) bigtable.GCPolicy | |||
switch tok { | |||
case "and", "&&": | |||
f = bigtable.IntersectionPolicy | |||
case "or", "||": | |||
f = bigtable.UnionPolicy | |||
default: | |||
ungetToken(tok) | |||
return policy, nil | |||
} | |||
p2, err := parsePolicyTerm(r) | |||
if err != nil { | |||
return nil, err | |||
} | |||
policy = f(policy, p2) | |||
} | |||
} | |||
// term ::= "maxage" "=" duration | "maxversions" "=" int | "(" policy ")" | |||
func parsePolicyTerm(r io.RuneScanner) (bigtable.GCPolicy, error) { | |||
tok, err := getToken(r) | |||
if err != nil { | |||
return nil, err | |||
} | |||
switch tok { | |||
case "": | |||
return nil, errors.New("empty GC policy term") | |||
case "maxage", "maxversions": | |||
if err := expectToken(r, "="); err != nil { | |||
return nil, err | |||
} | |||
tok2, err := getToken(r) | |||
if err != nil { | |||
return nil, err | |||
} | |||
if tok2 == "" { | |||
return nil, errors.New("expected a token after '='") | |||
} | |||
if tok == "maxage" { | |||
dur, err := parseDuration(tok2) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return bigtable.MaxAgePolicy(dur), nil | |||
} | |||
n, err := strconv.ParseUint(tok2, 10, 16) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return bigtable.MaxVersionsPolicy(int(n)), nil | |||
case "(": | |||
p, err := parsePolicyExpr(r) | |||
if err != nil { | |||
return nil, err | |||
} | |||
if err := expectToken(r, ")"); err != nil { | |||
return nil, err | |||
} | |||
return p, nil | |||
default: | |||
return nil, fmt.Errorf("unexpected token: %q", tok) | |||
} | |||
} | |||
func expectToken(r io.RuneScanner, want string) error { | |||
got, err := getToken(r) | |||
if err != nil { | |||
return err | |||
} | |||
if got != want { | |||
return fmt.Errorf("expected %q, saw %q", want, got) | |||
} | |||
return nil | |||
} | |||
const noToken = "_" // empty token is valid, so use "_" instead | |||
// If not noToken, getToken will return this instead of reading a new token | |||
// from the input. | |||
var ungotToken = noToken | |||
// getToken extracts the first token from the input. Valid tokens include | |||
// any sequence of letters and digits, and these symbols: &&, ||, =, ( and ). | |||
// getToken returns ("", nil) at end of input. | |||
func getToken(r io.RuneScanner) (string, error) { | |||
if ungotToken != noToken { | |||
t := ungotToken | |||
ungotToken = noToken | |||
return t, nil | |||
} | |||
var err error | |||
// Skip leading whitespace. | |||
c := ' ' | |||
for unicode.IsSpace(c) { | |||
c, _, err = r.ReadRune() | |||
if err == io.EOF { | |||
return "", nil | |||
} | |||
if err != nil { | |||
return "", err | |||
} | |||
} | |||
switch { | |||
case c == '=' || c == '(' || c == ')': | |||
return string(c), nil | |||
case c == '&' || c == '|': | |||
c2, _, err := r.ReadRune() | |||
if err != nil && err != io.EOF { | |||
return "", err | |||
} | |||
if c != c2 { | |||
return "", fmt.Errorf("expected %c%c", c, c) | |||
} | |||
return string([]rune{c, c}), nil | |||
case unicode.IsLetter(c) || unicode.IsDigit(c): | |||
// Collect an alphanumeric token. | |||
var b bytes.Buffer | |||
for unicode.IsLetter(c) || unicode.IsDigit(c) { | |||
b.WriteRune(c) | |||
c, _, err = r.ReadRune() | |||
if err == io.EOF { | |||
break | |||
} | |||
if err != nil { | |||
return "", err | |||
} | |||
} | |||
r.UnreadRune() | |||
return b.String(), nil | |||
default: | |||
return "", fmt.Errorf("bad rune %q", c) | |||
} | |||
} | |||
// "unget" a token so the next call to getToken will return it. | |||
func ungetToken(tok string) { | |||
if ungotToken != noToken { | |||
panic("ungetToken called twice") | |||
} | |||
ungotToken = tok | |||
} |
@@ -1,196 +0,0 @@ | |||
/* | |||
Copyright 2015 Google LLC | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. | |||
*/ | |||
package main | |||
import ( | |||
"strings" | |||
"testing" | |||
"time" | |||
"cloud.google.com/go/bigtable" | |||
"github.com/google/go-cmp/cmp" | |||
) | |||
func TestParseGCPolicy(t *testing.T) { | |||
for _, test := range []struct { | |||
in string | |||
want bigtable.GCPolicy | |||
}{ | |||
{ | |||
"never", | |||
bigtable.NoGcPolicy(), | |||
}, | |||
{ | |||
"maxage=3h", | |||
bigtable.MaxAgePolicy(3 * time.Hour), | |||
}, | |||
{ | |||
"maxversions=2", | |||
bigtable.MaxVersionsPolicy(2), | |||
}, | |||
{ | |||
"maxversions=2 and maxage=1h", | |||
bigtable.IntersectionPolicy(bigtable.MaxVersionsPolicy(2), bigtable.MaxAgePolicy(time.Hour)), | |||
}, | |||
{ | |||
"(((maxversions=2 and (maxage=1h))))", | |||
bigtable.IntersectionPolicy(bigtable.MaxVersionsPolicy(2), bigtable.MaxAgePolicy(time.Hour)), | |||
}, | |||
{ | |||
"maxversions=7 or maxage=8h", | |||
bigtable.UnionPolicy(bigtable.MaxVersionsPolicy(7), bigtable.MaxAgePolicy(8*time.Hour)), | |||
}, | |||
{ | |||
"maxversions = 7||maxage = 8h", | |||
bigtable.UnionPolicy(bigtable.MaxVersionsPolicy(7), bigtable.MaxAgePolicy(8*time.Hour)), | |||
}, | |||
{ | |||
"maxversions=7||maxage=8h", | |||
bigtable.UnionPolicy(bigtable.MaxVersionsPolicy(7), bigtable.MaxAgePolicy(8*time.Hour)), | |||
}, | |||
{ | |||
"maxage=30d || (maxage=3d && maxversions=100)", | |||
bigtable.UnionPolicy( | |||
bigtable.MaxAgePolicy(30*24*time.Hour), | |||
bigtable.IntersectionPolicy( | |||
bigtable.MaxAgePolicy(3*24*time.Hour), | |||
bigtable.MaxVersionsPolicy(100))), | |||
}, | |||
{ | |||
"maxage=30d || (maxage=3d && maxversions=100) || maxversions=7", | |||
bigtable.UnionPolicy( | |||
bigtable.UnionPolicy( | |||
bigtable.MaxAgePolicy(30*24*time.Hour), | |||
bigtable.IntersectionPolicy( | |||
bigtable.MaxAgePolicy(3*24*time.Hour), | |||
bigtable.MaxVersionsPolicy(100))), | |||
bigtable.MaxVersionsPolicy(7)), | |||
}, | |||
{ | |||
// && and || have same precedence, left associativity | |||
"maxage=1h && maxage=2h || maxage=3h", | |||
bigtable.UnionPolicy( | |||
bigtable.IntersectionPolicy( | |||
bigtable.MaxAgePolicy(1*time.Hour), | |||
bigtable.MaxAgePolicy(2*time.Hour)), | |||
bigtable.MaxAgePolicy(3*time.Hour)), | |||
}, | |||
} { | |||
got, err := parseGCPolicy(test.in) | |||
if err != nil { | |||
t.Errorf("%s: %v", test.in, err) | |||
continue | |||
} | |||
if !cmp.Equal(got, test.want, cmp.AllowUnexported(bigtable.IntersectionPolicy(), bigtable.UnionPolicy())) { | |||
t.Errorf("%s: got %+v, want %+v", test.in, got, test.want) | |||
} | |||
} | |||
} | |||
func TestParseGCPolicyErrors(t *testing.T) { | |||
for _, in := range []string{ | |||
"", | |||
"a", | |||
"b = 1h", | |||
"c = 1", | |||
"maxage=1", // need duration | |||
"maxversions=1h", // need int | |||
"maxage", | |||
"maxversions", | |||
"never=never", | |||
"maxversions=1 && never", | |||
"(((maxage=1h))", | |||
"((maxage=1h)))", | |||
"maxage=30d || ((maxage=3d && maxversions=100)", | |||
"maxversions = 3 and", | |||
} { | |||
_, err := parseGCPolicy(in) | |||
if err == nil { | |||
t.Errorf("%s: got nil, want error", in) | |||
} | |||
} | |||
} | |||
func TestTokenizeGCPolicy(t *testing.T) { | |||
for _, test := range []struct { | |||
in string | |||
want []string | |||
}{ | |||
{ | |||
"maxage=5d", | |||
[]string{"maxage", "=", "5d"}, | |||
}, | |||
{ | |||
"maxage = 5d", | |||
[]string{"maxage", "=", "5d"}, | |||
}, | |||
{ | |||
"maxage=5d or maxversions=5", | |||
[]string{"maxage", "=", "5d", "or", "maxversions", "=", "5"}, | |||
}, | |||
{ | |||
"maxage=5d || (maxversions=5)", | |||
[]string{"maxage", "=", "5d", "||", "(", "maxversions", "=", "5", ")"}, | |||
}, | |||
{ | |||
"maxage=5d||( maxversions=5 )", | |||
[]string{"maxage", "=", "5d", "||", "(", "maxversions", "=", "5", ")"}, | |||
}, | |||
} { | |||
got, err := tokenizeGCPolicy(test.in) | |||
if err != nil { | |||
t.Errorf("%s: %v", test.in, err) | |||
continue | |||
} | |||
if diff := cmp.Diff(got, test.want); diff != "" { | |||
t.Errorf("%s: %s", test.in, diff) | |||
} | |||
} | |||
} | |||
func TestTokenizeGCPolicyErrors(t *testing.T) { | |||
for _, in := range []string{ | |||
"a &", | |||
"a & b", | |||
"a &x b", | |||
"a |", | |||
"a | b", | |||
"a |& b", | |||
"a % b", | |||
} { | |||
_, err := tokenizeGCPolicy(in) | |||
if err == nil { | |||
t.Errorf("%s: got nil, want error", in) | |||
} | |||
} | |||
} | |||
func tokenizeGCPolicy(s string) ([]string, error) { | |||
var tokens []string | |||
r := strings.NewReader(s) | |||
for { | |||
tok, err := getToken(r) | |||
if err != nil { | |||
return nil, err | |||
} | |||
if tok == "" { | |||
break | |||
} | |||
tokens = append(tokens, tok) | |||
} | |||
return tokens, nil | |||
} |
@@ -1,52 +0,0 @@ | |||
// Copyright 2016 Google LLC | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
/* | |||
cbtemulator launches the in-memory Cloud Bigtable server on the given address. | |||
*/ | |||
package main | |||
import ( | |||
"flag" | |||
"fmt" | |||
"log" | |||
"cloud.google.com/go/bigtable/bttest" | |||
"google.golang.org/grpc" | |||
) | |||
var ( | |||
host = flag.String("host", "localhost", "the address to bind to on the local machine") | |||
port = flag.Int("port", 9000, "the port number to bind to on the local machine") | |||
) | |||
const ( | |||
maxMsgSize = 256 * 1024 * 1024 // 256 MiB | |||
) | |||
func main() { | |||
grpc.EnableTracing = false | |||
flag.Parse() | |||
opts := []grpc.ServerOption{ | |||
grpc.MaxRecvMsgSize(maxMsgSize), | |||
grpc.MaxSendMsgSize(maxMsgSize), | |||
} | |||
srv, err := bttest.NewServer(fmt.Sprintf("%s:%d", *host, *port), opts...) | |||
if err != nil { | |||
log.Fatalf("failed to start emulator: %v", err) | |||
} | |||
fmt.Printf("Cloud Bigtable emulator running on %s\n", srv.Addr) | |||
select {} | |||
} |
@@ -1,205 +0,0 @@ | |||
/* | |||
Copyright 2015 Google LLC | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. | |||
*/ | |||
/* | |||
Loadtest does some load testing through the Go client library for Cloud Bigtable. | |||
*/ | |||
package main | |||
import ( | |||
"bytes" | |||
"context" | |||
"flag" | |||
"fmt" | |||
"log" | |||
"math/rand" | |||
"os" | |||
"os/signal" | |||
"sync" | |||
"sync/atomic" | |||
"time" | |||
"cloud.google.com/go/bigtable" | |||
"cloud.google.com/go/bigtable/internal/cbtconfig" | |||
"cloud.google.com/go/bigtable/internal/stat" | |||
"google.golang.org/api/option" | |||
"google.golang.org/grpc" | |||
) | |||
var ( | |||
runFor = flag.Duration("run_for", 5*time.Second, | |||
"how long to run the load test for; 0 to run forever until SIGTERM") | |||
scratchTable = flag.String("scratch_table", "loadtest-scratch", "name of table to use; should not already exist") | |||
csvOutput = flag.String("csv_output", "", | |||
"output path for statistics in .csv format. If this file already exists it will be overwritten.") | |||
poolSize = flag.Int("pool_size", 1, "size of the gRPC connection pool to use for the data client") | |||
reqCount = flag.Int("req_count", 100, "number of concurrent requests") | |||
config *cbtconfig.Config | |||
client *bigtable.Client | |||
adminClient *bigtable.AdminClient | |||
) | |||
func main() { | |||
var err error | |||
config, err = cbtconfig.Load() | |||
if err != nil { | |||
log.Fatal(err) | |||
} | |||
config.RegisterFlags() | |||
flag.Parse() | |||
if err := config.CheckFlags(cbtconfig.ProjectAndInstanceRequired); err != nil { | |||
log.Fatal(err) | |||
} | |||
if config.Creds != "" { | |||
os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", config.Creds) | |||
} | |||
if flag.NArg() != 0 { | |||
flag.Usage() | |||
os.Exit(1) | |||
} | |||
var options []option.ClientOption | |||
if *poolSize > 1 { | |||
options = append(options, | |||
option.WithGRPCConnectionPool(*poolSize), | |||
// TODO(grpc/grpc-go#1388) using connection pool without WithBlock | |||
// can cause RPCs to fail randomly. We can delete this after the issue is fixed. | |||
option.WithGRPCDialOption(grpc.WithBlock())) | |||
} | |||
var csvFile *os.File | |||
if *csvOutput != "" { | |||
csvFile, err = os.Create(*csvOutput) | |||
if err != nil { | |||
log.Fatalf("creating csv output file: %v", err) | |||
} | |||
defer csvFile.Close() | |||
log.Printf("Writing statistics to %q ...", *csvOutput) | |||
} | |||
log.Printf("Dialing connections...") | |||
client, err = bigtable.NewClient(context.Background(), config.Project, config.Instance, options...) | |||
if err != nil { | |||
log.Fatalf("Making bigtable.Client: %v", err) | |||
} | |||
defer client.Close() | |||
adminClient, err = bigtable.NewAdminClient(context.Background(), config.Project, config.Instance) | |||
if err != nil { | |||
log.Fatalf("Making bigtable.AdminClient: %v", err) | |||
} | |||
defer adminClient.Close() | |||
// Create a scratch table. | |||
log.Printf("Setting up scratch table...") | |||
tblConf := bigtable.TableConf{ | |||
TableID: *scratchTable, | |||
Families: map[string]bigtable.GCPolicy{"f": bigtable.MaxVersionsPolicy(1)}, | |||
} | |||
if err := adminClient.CreateTableFromConf(context.Background(), &tblConf); err != nil { | |||
log.Fatalf("Making scratch table %q: %v", *scratchTable, err) | |||
} | |||
// Upon a successful run, delete the table. Don't bother checking for errors. | |||
defer adminClient.DeleteTable(context.Background(), *scratchTable) | |||
// Also delete the table on SIGTERM. | |||
c := make(chan os.Signal, 1) | |||
signal.Notify(c, os.Interrupt) | |||
go func() { | |||
s := <-c | |||
log.Printf("Caught %v, cleaning scratch table.", s) | |||
_ = adminClient.DeleteTable(context.Background(), *scratchTable) | |||
os.Exit(1) | |||
}() | |||
log.Printf("Starting load test... (run for %v)", *runFor) | |||
tbl := client.Open(*scratchTable) | |||
sem := make(chan int, *reqCount) // limit the number of requests happening at once | |||
var reads, writes stats | |||
stopTime := time.Now().Add(*runFor) | |||
var wg sync.WaitGroup | |||
for time.Now().Before(stopTime) || *runFor == 0 { | |||
sem <- 1 | |||
wg.Add(1) | |||
go func() { | |||
defer wg.Done() | |||
defer func() { <-sem }() | |||
ok := true | |||
opStart := time.Now() | |||
var stats *stats | |||
defer func() { | |||
stats.Record(ok, time.Since(opStart)) | |||
}() | |||
row := fmt.Sprintf("row%d", rand.Intn(100)) // operate on 1 of 100 rows | |||
switch rand.Intn(10) { | |||
default: | |||
// read | |||
stats = &reads | |||
_, err := tbl.ReadRow(context.Background(), row, bigtable.RowFilter(bigtable.LatestNFilter(1))) | |||
if err != nil { | |||
log.Printf("Error doing read: %v", err) | |||
ok = false | |||
} | |||
case 0, 1, 2, 3, 4: | |||
// write | |||
stats = &writes | |||
mut := bigtable.NewMutation() | |||
mut.Set("f", "col", bigtable.Now(), bytes.Repeat([]byte("0"), 1<<10)) // 1 KB write | |||
if err := tbl.Apply(context.Background(), row, mut); err != nil { | |||
log.Printf("Error doing mutation: %v", err) | |||
ok = false | |||
} | |||
} | |||
}() | |||
} | |||
wg.Wait() | |||
readsAgg := stat.NewAggregate("reads", reads.ds, reads.tries-reads.ok) | |||
writesAgg := stat.NewAggregate("writes", writes.ds, writes.tries-writes.ok) | |||
log.Printf("Reads (%d ok / %d tries):\n%v", reads.ok, reads.tries, readsAgg) | |||
log.Printf("Writes (%d ok / %d tries):\n%v", writes.ok, writes.tries, writesAgg) | |||
if csvFile != nil { | |||
stat.WriteCSV([]*stat.Aggregate{readsAgg, writesAgg}, csvFile) | |||
} | |||
} | |||
var allStats int64 // atomic | |||
type stats struct { | |||
mu sync.Mutex | |||
tries, ok int | |||
ds []time.Duration | |||
} | |||
func (s *stats) Record(ok bool, d time.Duration) { | |||
s.mu.Lock() | |||
s.tries++ | |||
if ok { | |||
s.ok++ | |||
} | |||
s.ds = append(s.ds, d) | |||
s.mu.Unlock() | |||
if n := atomic.AddInt64(&allStats, 1); n%1000 == 0 { | |||
log.Printf("Progress: done %d ops", n) | |||
} | |||
} |
@@ -1,155 +0,0 @@ | |||
/* | |||
Copyright 2016 Google LLC | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. | |||
*/ | |||
/* | |||
Scantest does scan-related load testing against Cloud Bigtable. The logic here | |||
mimics a similar test written using the Java client. | |||
*/ | |||
package main | |||
import ( | |||
"bytes" | |||
"context" | |||
"flag" | |||
"fmt" | |||
"log" | |||
"math/rand" | |||
"os" | |||
"sync" | |||
"sync/atomic" | |||
"text/tabwriter" | |||
"time" | |||
"cloud.google.com/go/bigtable" | |||
"cloud.google.com/go/bigtable/internal/cbtconfig" | |||
"cloud.google.com/go/bigtable/internal/stat" | |||
) | |||
var ( | |||
runFor = flag.Duration("run_for", 5*time.Second, "how long to run the load test for") | |||
numScans = flag.Int("concurrent_scans", 1, "number of concurrent scans") | |||
rowLimit = flag.Int("row_limit", 10000, "max number of records per scan") | |||
config *cbtconfig.Config | |||
client *bigtable.Client | |||
) | |||
func main() { | |||
flag.Usage = func() { | |||
fmt.Printf("Usage: scantest [options] <table_name>\n\n") | |||
flag.PrintDefaults() | |||
} | |||
var err error | |||
config, err = cbtconfig.Load() | |||
if err != nil { | |||
log.Fatal(err) | |||
} | |||
config.RegisterFlags() | |||
flag.Parse() | |||
if err := config.CheckFlags(cbtconfig.ProjectAndInstanceRequired); err != nil { | |||
log.Fatal(err) | |||
} | |||
if config.Creds != "" { | |||
os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", config.Creds) | |||
} | |||
if flag.NArg() != 1 { | |||
flag.Usage() | |||
os.Exit(1) | |||
} | |||
table := flag.Arg(0) | |||
log.Printf("Dialing connections...") | |||
client, err = bigtable.NewClient(context.Background(), config.Project, config.Instance) | |||
if err != nil { | |||
log.Fatalf("Making bigtable.Client: %v", err) | |||
} | |||
defer client.Close() | |||
log.Printf("Starting scan test... (run for %v)", *runFor) | |||
tbl := client.Open(table) | |||
sem := make(chan int, *numScans) // limit the number of requests happening at once | |||
var scans stats | |||
stopTime := time.Now().Add(*runFor) | |||
var wg sync.WaitGroup | |||
for time.Now().Before(stopTime) { | |||
sem <- 1 | |||
wg.Add(1) | |||
go func() { | |||
defer wg.Done() | |||
defer func() { <-sem }() | |||
ok := true | |||
opStart := time.Now() | |||
defer func() { | |||
scans.Record(ok, time.Since(opStart)) | |||
}() | |||
// Start at a random row key | |||
key := fmt.Sprintf("user%d", rand.Int63()) | |||
limit := bigtable.LimitRows(int64(*rowLimit)) | |||
noop := func(bigtable.Row) bool { return true } | |||
if err := tbl.ReadRows(context.Background(), bigtable.NewRange(key, ""), noop, limit); err != nil { | |||
log.Printf("Error during scan: %v", err) | |||
ok = false | |||
} | |||
}() | |||
} | |||
wg.Wait() | |||
agg := stat.NewAggregate("scans", scans.ds, scans.tries-scans.ok) | |||
log.Printf("Scans (%d ok / %d tries):\nscan times:\n%v\nthroughput (rows/second):\n%v", | |||
scans.ok, scans.tries, agg, throughputString(agg)) | |||
} | |||
func throughputString(agg *stat.Aggregate) string { | |||
var buf bytes.Buffer | |||
tw := tabwriter.NewWriter(&buf, 0, 0, 1, ' ', 0) // one-space padding | |||
rowLimitF := float64(*rowLimit) | |||
fmt.Fprintf( | |||
tw, | |||
"min:\t%.2f\nmedian:\t%.2f\nmax:\t%.2f\n", | |||
rowLimitF/agg.Max.Seconds(), | |||
rowLimitF/agg.Median.Seconds(), | |||
rowLimitF/agg.Min.Seconds()) | |||
tw.Flush() | |||
return buf.String() | |||
} | |||
var allStats int64 // atomic | |||
type stats struct { | |||
mu sync.Mutex | |||
tries, ok int | |||
ds []time.Duration | |||
} | |||
func (s *stats) Record(ok bool, d time.Duration) { | |||
s.mu.Lock() | |||
s.tries++ | |||
if ok { | |||
s.ok++ | |||
} | |||
s.ds = append(s.ds, d) | |||
s.mu.Unlock() | |||
if n := atomic.AddInt64(&allStats, 1); n%1000 == 0 { | |||
log.Printf("Progress: done %d ops", n) | |||
} | |||
} |
@@ -1,123 +0,0 @@ | |||
/* | |||
Copyright 2015 Google LLC | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. | |||
*/ | |||
/* | |||
Package bigtable is an API to Google Cloud Bigtable. | |||
See https://cloud.google.com/bigtable/docs/ for general product documentation. | |||
See https://godoc.org/cloud.google.com/go for authentication, timeouts, | |||
connection pooling and similar aspects of this package. | |||
Setup and Credentials | |||
Use NewClient or NewAdminClient to create a client that can be used to access | |||
the data or admin APIs respectively. Both require credentials that have permission | |||
to access the Cloud Bigtable API. | |||
If your program is run on Google App Engine or Google Compute Engine, using the Application Default Credentials | |||
(https://developers.google.com/accounts/docs/application-default-credentials) | |||
is the simplest option. Those credentials will be used by default when NewClient or NewAdminClient are called. | |||
To use alternate credentials, pass them to NewClient or NewAdminClient using option.WithTokenSource. | |||
For instance, you can use service account credentials by visiting | |||
https://cloud.google.com/console/project/_/apiui/credential, | |||
creating a new OAuth "Client ID", storing the JSON key somewhere accessible, and writing | |||
jsonKey, err := ioutil.ReadFile(pathToKeyFile) | |||
... | |||
config, err := google.JWTConfigFromJSON(jsonKey, bigtable.Scope) // or bigtable.AdminScope, etc. | |||
... | |||
client, err := bigtable.NewClient(ctx, project, instance, option.WithTokenSource(config.TokenSource(ctx))) | |||
... | |||
Here, `google` means the golang.org/x/oauth2/google package | |||
and `option` means the google.golang.org/api/option package. | |||
Reading | |||
The principal way to read from a Bigtable is to use the ReadRows method on *Table. | |||
A RowRange specifies a contiguous portion of a table. A Filter may be provided through | |||
RowFilter to limit or transform the data that is returned. | |||
tbl := client.Open("mytable") | |||
... | |||
// Read all the rows starting with "com.google.", | |||
// but only fetch the columns in the "links" family. | |||
rr := bigtable.PrefixRange("com.google.") | |||
err := tbl.ReadRows(ctx, rr, func(r Row) bool { | |||
// do something with r | |||
return true // keep going | |||
}, bigtable.RowFilter(bigtable.FamilyFilter("links"))) | |||
... | |||
To read a single row, use the ReadRow helper method. | |||
r, err := tbl.ReadRow(ctx, "com.google.cloud") // "com.google.cloud" is the entire row key | |||
... | |||
Writing | |||
This API exposes two distinct forms of writing to a Bigtable: a Mutation and a ReadModifyWrite. | |||
The former expresses idempotent operations. | |||
The latter expresses non-idempotent operations and returns the new values of updated cells. | |||
These operations are performed by creating a Mutation or ReadModifyWrite (with NewMutation or NewReadModifyWrite), | |||
building up one or more operations on that, and then using the Apply or ApplyReadModifyWrite | |||
methods on a Table. | |||
For instance, to set a couple of cells in a table, | |||
tbl := client.Open("mytable") | |||
mut := bigtable.NewMutation() | |||
mut.Set("links", "maps.google.com", bigtable.Now(), []byte("1")) | |||
mut.Set("links", "golang.org", bigtable.Now(), []byte("1")) | |||
err := tbl.Apply(ctx, "com.google.cloud", mut) | |||
... | |||
To increment an encoded value in one cell, | |||
tbl := client.Open("mytable") | |||
rmw := bigtable.NewReadModifyWrite() | |||
rmw.Increment("links", "golang.org", 12) // add 12 to the cell in column "links:golang.org" | |||
r, err := tbl.ApplyReadModifyWrite(ctx, "com.google.cloud", rmw) | |||
... | |||
Retries | |||
If a read or write operation encounters a transient error it will be retried until a successful | |||
response, an unretryable error or the context deadline is reached. Non-idempotent writes (where | |||
the timestamp is set to ServerTime) will not be retried. In the case of ReadRows, retried calls | |||
will not re-scan rows that have already been processed. | |||
*/ | |||
package bigtable // import "cloud.google.com/go/bigtable" | |||
// Scope constants for authentication credentials. | |||
// These should be used when using credential creation functions such as oauth.NewServiceAccountFromFile. | |||
const ( | |||
// Scope is the OAuth scope for Cloud Bigtable data operations. | |||
Scope = "https://www.googleapis.com/auth/bigtable.data" | |||
// ReadonlyScope is the OAuth scope for Cloud Bigtable read-only data operations. | |||
ReadonlyScope = "https://www.googleapis.com/auth/bigtable.readonly" | |||
// AdminScope is the OAuth scope for Cloud Bigtable table admin operations. | |||
AdminScope = "https://www.googleapis.com/auth/bigtable.admin.table" | |||
// InstanceAdminScope is the OAuth scope for Cloud Bigtable instance (and cluster) admin operations. | |||
InstanceAdminScope = "https://www.googleapis.com/auth/bigtable.admin.cluster" | |||
) | |||
// clientUserAgent identifies the version of this package. | |||
// It should be bumped upon significant changes only. | |||
const clientUserAgent = "cbt-go/20180601" | |||
// resourcePrefixHeader is the name of the metadata header used to indicate | |||
// the resource being operated on. | |||
const resourcePrefixHeader = "google-cloud-resource-prefix" |
@@ -1,215 +0,0 @@ | |||
/* | |||
Copyright 2016 Google LLC | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. | |||
*/ | |||
package bigtable | |||
import ( | |||
"context" | |||
"errors" | |||
"flag" | |||
"fmt" | |||
"strings" | |||
"time" | |||
"cloud.google.com/go/bigtable/bttest" | |||
"google.golang.org/api/option" | |||
"google.golang.org/grpc" | |||
) | |||
var legacyUseProd string | |||
var integrationConfig IntegrationTestConfig | |||
func init() { | |||
c := &integrationConfig | |||
flag.BoolVar(&c.UseProd, "it.use-prod", false, "Use remote bigtable instead of local emulator") | |||
flag.StringVar(&c.AdminEndpoint, "it.admin-endpoint", "", "Admin api host and port") | |||
flag.StringVar(&c.DataEndpoint, "it.data-endpoint", "", "Data api host and port") | |||
flag.StringVar(&c.Project, "it.project", "", "Project to use for integration test") | |||
flag.StringVar(&c.Instance, "it.instance", "", "Bigtable instance to use") | |||
flag.StringVar(&c.Cluster, "it.cluster", "", "Bigtable cluster to use") | |||
flag.StringVar(&c.Table, "it.table", "", "Bigtable table to create") | |||
// Backwards compat | |||
flag.StringVar(&legacyUseProd, "use_prod", "", `DEPRECATED: if set to "proj,instance,table", run integration test against production`) | |||
} | |||
// IntegrationTestConfig contains parameters to pick and setup a IntegrationEnv for testing | |||
type IntegrationTestConfig struct { | |||
UseProd bool | |||
AdminEndpoint string | |||
DataEndpoint string | |||
Project string | |||
Instance string | |||
Cluster string | |||
Table string | |||
} | |||
// IntegrationEnv represents a testing environment. | |||
// The environment can be implemented using production or an emulator | |||
type IntegrationEnv interface { | |||
Config() IntegrationTestConfig | |||
NewAdminClient() (*AdminClient, error) | |||
// NewInstanceAdminClient will return nil if instance administration is unsupported in this environment | |||
NewInstanceAdminClient() (*InstanceAdminClient, error) | |||
NewClient() (*Client, error) | |||
Close() | |||
} | |||
// NewIntegrationEnv creates a new environment based on the command line args | |||
func NewIntegrationEnv() (IntegrationEnv, error) { | |||
c := integrationConfig | |||
if legacyUseProd != "" { | |||
fmt.Println("WARNING: using legacy commandline arg -use_prod, please switch to -it.*") | |||
parts := strings.SplitN(legacyUseProd, ",", 3) | |||
c.UseProd = true | |||
c.Project = parts[0] | |||
c.Instance = parts[1] | |||
c.Table = parts[2] | |||
} | |||
if integrationConfig.UseProd { | |||
return NewProdEnv(c) | |||
} | |||
return NewEmulatedEnv(c) | |||
} | |||
// EmulatedEnv encapsulates the state of an emulator | |||
type EmulatedEnv struct { | |||
config IntegrationTestConfig | |||
server *bttest.Server | |||
} | |||
// NewEmulatedEnv builds and starts the emulator based environment | |||
func NewEmulatedEnv(config IntegrationTestConfig) (*EmulatedEnv, error) { | |||
srv, err := bttest.NewServer("localhost:0", grpc.MaxRecvMsgSize(200<<20), grpc.MaxSendMsgSize(100<<20)) | |||
if err != nil { | |||
return nil, err | |||
} | |||
if config.Project == "" { | |||
config.Project = "project" | |||
} | |||
if config.Instance == "" { | |||
config.Instance = "instance" | |||
} | |||
if config.Table == "" { | |||
config.Table = "mytable" | |||
} | |||
config.AdminEndpoint = srv.Addr | |||
config.DataEndpoint = srv.Addr | |||
env := &EmulatedEnv{ | |||
config: config, | |||
server: srv, | |||
} | |||
return env, nil | |||
} | |||
// Close stops & cleans up the emulator | |||
func (e *EmulatedEnv) Close() { | |||
e.server.Close() | |||
} | |||
// Config gets the config used to build this environment | |||
func (e *EmulatedEnv) Config() IntegrationTestConfig { | |||
return e.config | |||
} | |||
// NewAdminClient builds a new connected admin client for this environment | |||
func (e *EmulatedEnv) NewAdminClient() (*AdminClient, error) { | |||
timeout := 20 * time.Second | |||
ctx, _ := context.WithTimeout(context.Background(), timeout) | |||
conn, err := grpc.Dial(e.server.Addr, grpc.WithInsecure(), grpc.WithBlock()) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return NewAdminClient(ctx, e.config.Project, e.config.Instance, option.WithGRPCConn(conn)) | |||
} | |||
// NewInstanceAdminClient returns nil for the emulated environment since the API is not implemented. | |||
func (e *EmulatedEnv) NewInstanceAdminClient() (*InstanceAdminClient, error) { | |||
return nil, nil | |||
} | |||
// NewClient builds a new connected data client for this environment | |||
func (e *EmulatedEnv) NewClient() (*Client, error) { | |||
timeout := 20 * time.Second | |||
ctx, _ := context.WithTimeout(context.Background(), timeout) | |||
conn, err := grpc.Dial(e.server.Addr, grpc.WithInsecure(), grpc.WithBlock(), | |||
grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(100<<20), grpc.MaxCallRecvMsgSize(100<<20))) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return NewClient(ctx, e.config.Project, e.config.Instance, option.WithGRPCConn(conn)) | |||
} | |||
// ProdEnv encapsulates the state necessary to connect to the external Bigtable service | |||
type ProdEnv struct { | |||
config IntegrationTestConfig | |||
} | |||
// NewProdEnv builds the environment representation | |||
func NewProdEnv(config IntegrationTestConfig) (*ProdEnv, error) { | |||
if config.Project == "" { | |||
return nil, errors.New("Project not set") | |||
} | |||
if config.Instance == "" { | |||
return nil, errors.New("Instance not set") | |||
} | |||
if config.Table == "" { | |||
return nil, errors.New("Table not set") | |||
} | |||
return &ProdEnv{config}, nil | |||
} | |||
// Close is a no-op for production environments | |||
func (e *ProdEnv) Close() {} | |||
// Config gets the config used to build this environment | |||
func (e *ProdEnv) Config() IntegrationTestConfig { | |||
return e.config | |||
} | |||
// NewAdminClient builds a new connected admin client for this environment | |||
func (e *ProdEnv) NewAdminClient() (*AdminClient, error) { | |||
var clientOpts []option.ClientOption | |||
if endpoint := e.config.AdminEndpoint; endpoint != "" { | |||
clientOpts = append(clientOpts, option.WithEndpoint(endpoint)) | |||
} | |||
return NewAdminClient(context.Background(), e.config.Project, e.config.Instance, clientOpts...) | |||
} | |||
// NewInstanceAdminClient returns a new connected instance admin client for this environment | |||
func (e *ProdEnv) NewInstanceAdminClient() (*InstanceAdminClient, error) { | |||
var clientOpts []option.ClientOption | |||
if endpoint := e.config.AdminEndpoint; endpoint != "" { | |||
clientOpts = append(clientOpts, option.WithEndpoint(endpoint)) | |||
} | |||
return NewInstanceAdminClient(context.Background(), e.config.Project, clientOpts...) | |||
} | |||
// NewClient builds a connected data client for this environment | |||
func (e *ProdEnv) NewClient() (*Client, error) { | |||
var clientOpts []option.ClientOption | |||
if endpoint := e.config.DataEndpoint; endpoint != "" { | |||
clientOpts = append(clientOpts, option.WithEndpoint(endpoint)) | |||
} | |||
return NewClient(context.Background(), e.config.Project, e.config.Instance, clientOpts...) | |||
} |
@@ -1,330 +0,0 @@ | |||
/* | |||
Copyright 2015 Google LLC | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. | |||
*/ | |||
package bigtable | |||
import ( | |||
"fmt" | |||
"strings" | |||
"time" | |||
btpb "google.golang.org/genproto/googleapis/bigtable/v2" | |||
) | |||
// A Filter represents a row filter. | |||
type Filter interface { | |||
String() string | |||
proto() *btpb.RowFilter | |||
} | |||
// ChainFilters returns a filter that applies a sequence of filters. | |||
func ChainFilters(sub ...Filter) Filter { return chainFilter{sub} } | |||
type chainFilter struct { | |||
sub []Filter | |||
} | |||
func (cf chainFilter) String() string { | |||
var ss []string | |||
for _, sf := range cf.sub { | |||
ss = append(ss, sf.String()) | |||
} | |||
return "(" + strings.Join(ss, " | ") + ")" | |||
} | |||
func (cf chainFilter) proto() *btpb.RowFilter { | |||
chain := &btpb.RowFilter_Chain{} | |||
for _, sf := range cf.sub { | |||
chain.Filters = append(chain.Filters, sf.proto()) | |||
} | |||
return &btpb.RowFilter{ | |||
Filter: &btpb.RowFilter_Chain_{Chain: chain}, | |||
} | |||
} | |||
// InterleaveFilters returns a filter that applies a set of filters in parallel | |||
// and interleaves the results. | |||
func InterleaveFilters(sub ...Filter) Filter { return interleaveFilter{sub} } | |||
type interleaveFilter struct { | |||
sub []Filter | |||
} | |||
func (ilf interleaveFilter) String() string { | |||
var ss []string | |||
for _, sf := range ilf.sub { | |||
ss = append(ss, sf.String()) | |||
} | |||
return "(" + strings.Join(ss, " + ") + ")" | |||
} | |||
func (ilf interleaveFilter) proto() *btpb.RowFilter { | |||
inter := &btpb.RowFilter_Interleave{} | |||
for _, sf := range ilf.sub { | |||
inter.Filters = append(inter.Filters, sf.proto()) | |||
} | |||
return &btpb.RowFilter{ | |||
Filter: &btpb.RowFilter_Interleave_{Interleave: inter}, | |||
} | |||
} | |||
// RowKeyFilter returns a filter that matches cells from rows whose | |||
// key matches the provided RE2 pattern. | |||
// See https://github.com/google/re2/wiki/Syntax for the accepted syntax. | |||
func RowKeyFilter(pattern string) Filter { return rowKeyFilter(pattern) } | |||
type rowKeyFilter string | |||
func (rkf rowKeyFilter) String() string { return fmt.Sprintf("row(%s)", string(rkf)) } | |||
func (rkf rowKeyFilter) proto() *btpb.RowFilter { | |||
return &btpb.RowFilter{Filter: &btpb.RowFilter_RowKeyRegexFilter{RowKeyRegexFilter: []byte(rkf)}} | |||
} | |||
// FamilyFilter returns a filter that matches cells whose family name | |||
// matches the provided RE2 pattern. | |||
// See https://github.com/google/re2/wiki/Syntax for the accepted syntax. | |||
func FamilyFilter(pattern string) Filter { return familyFilter(pattern) } | |||
type familyFilter string | |||
func (ff familyFilter) String() string { return fmt.Sprintf("col(%s:)", string(ff)) } | |||
func (ff familyFilter) proto() *btpb.RowFilter { | |||
return &btpb.RowFilter{Filter: &btpb.RowFilter_FamilyNameRegexFilter{FamilyNameRegexFilter: string(ff)}} | |||
} | |||
// ColumnFilter returns a filter that matches cells whose column name | |||
// matches the provided RE2 pattern. | |||
// See https://github.com/google/re2/wiki/Syntax for the accepted syntax. | |||
func ColumnFilter(pattern string) Filter { return columnFilter(pattern) } | |||
type columnFilter string | |||
func (cf columnFilter) String() string { return fmt.Sprintf("col(.*:%s)", string(cf)) } | |||
func (cf columnFilter) proto() *btpb.RowFilter { | |||
return &btpb.RowFilter{Filter: &btpb.RowFilter_ColumnQualifierRegexFilter{ColumnQualifierRegexFilter: []byte(cf)}} | |||
} | |||
// ValueFilter returns a filter that matches cells whose value | |||
// matches the provided RE2 pattern. | |||
// See https://github.com/google/re2/wiki/Syntax for the accepted syntax. | |||
func ValueFilter(pattern string) Filter { return valueFilter(pattern) } | |||
type valueFilter string | |||
func (vf valueFilter) String() string { return fmt.Sprintf("value_match(%s)", string(vf)) } | |||
func (vf valueFilter) proto() *btpb.RowFilter { | |||
return &btpb.RowFilter{Filter: &btpb.RowFilter_ValueRegexFilter{ValueRegexFilter: []byte(vf)}} | |||
} | |||
// LatestNFilter returns a filter that matches the most recent N cells in each column. | |||
func LatestNFilter(n int) Filter { return latestNFilter(n) } | |||
type latestNFilter int32 | |||
func (lnf latestNFilter) String() string { return fmt.Sprintf("col(*,%d)", lnf) } | |||
func (lnf latestNFilter) proto() *btpb.RowFilter { | |||
return &btpb.RowFilter{Filter: &btpb.RowFilter_CellsPerColumnLimitFilter{CellsPerColumnLimitFilter: int32(lnf)}} | |||
} | |||
// StripValueFilter returns a filter that replaces each value with the empty string. | |||
func StripValueFilter() Filter { return stripValueFilter{} } | |||
type stripValueFilter struct{} | |||
func (stripValueFilter) String() string { return "strip_value()" } | |||
func (stripValueFilter) proto() *btpb.RowFilter { | |||
return &btpb.RowFilter{Filter: &btpb.RowFilter_StripValueTransformer{StripValueTransformer: true}} | |||
} | |||
// TimestampRangeFilter returns a filter that matches any cells whose timestamp is within the given time bounds. A zero | |||
// time means no bound. | |||
// The timestamp will be truncated to millisecond granularity. | |||
func TimestampRangeFilter(startTime time.Time, endTime time.Time) Filter { | |||
trf := timestampRangeFilter{} | |||
if !startTime.IsZero() { | |||
trf.startTime = Time(startTime) | |||
} | |||
if !endTime.IsZero() { | |||
trf.endTime = Time(endTime) | |||
} | |||
return trf | |||
} | |||
// TimestampRangeFilterMicros returns a filter that matches any cells whose timestamp is within the given time bounds, | |||
// specified in units of microseconds since 1 January 1970. A zero value for the end time is interpreted as no bound. | |||
// The timestamp will be truncated to millisecond granularity. | |||
func TimestampRangeFilterMicros(startTime Timestamp, endTime Timestamp) Filter { | |||
return timestampRangeFilter{startTime, endTime} | |||
} | |||
type timestampRangeFilter struct { | |||
startTime Timestamp | |||
endTime Timestamp | |||
} | |||
func (trf timestampRangeFilter) String() string { | |||
return fmt.Sprintf("timestamp_range(%v,%v)", trf.startTime, trf.endTime) | |||
} | |||
func (trf timestampRangeFilter) proto() *btpb.RowFilter { | |||
return &btpb.RowFilter{ | |||
Filter: &btpb.RowFilter_TimestampRangeFilter{TimestampRangeFilter: &btpb.TimestampRange{ | |||
StartTimestampMicros: int64(trf.startTime.TruncateToMilliseconds()), | |||
EndTimestampMicros: int64(trf.endTime.TruncateToMilliseconds()), | |||
}, | |||
}} | |||
} | |||
// ColumnRangeFilter returns a filter that matches a contiguous range of columns within a single | |||
// family, as specified by an inclusive start qualifier and exclusive end qualifier. | |||
func ColumnRangeFilter(family, start, end string) Filter { | |||
return columnRangeFilter{family, start, end} | |||
} | |||
type columnRangeFilter struct { | |||
family string | |||
start string | |||
end string | |||
} | |||
func (crf columnRangeFilter) String() string { | |||
return fmt.Sprintf("columnRangeFilter(%s,%s,%s)", crf.family, crf.start, crf.end) | |||
} | |||
func (crf columnRangeFilter) proto() *btpb.RowFilter { | |||
r := &btpb.ColumnRange{FamilyName: crf.family} | |||
if crf.start != "" { | |||
r.StartQualifier = &btpb.ColumnRange_StartQualifierClosed{StartQualifierClosed: []byte(crf.start)} | |||
} | |||
if crf.end != "" { | |||
r.EndQualifier = &btpb.ColumnRange_EndQualifierOpen{EndQualifierOpen: []byte(crf.end)} | |||
} | |||
return &btpb.RowFilter{Filter: &btpb.RowFilter_ColumnRangeFilter{ColumnRangeFilter: r}} | |||
} | |||
// ValueRangeFilter returns a filter that matches cells with values that fall within | |||
// the given range, as specified by an inclusive start value and exclusive end value. | |||
func ValueRangeFilter(start, end []byte) Filter { | |||
return valueRangeFilter{start, end} | |||
} | |||
type valueRangeFilter struct { | |||
start []byte | |||
end []byte | |||
} | |||
func (vrf valueRangeFilter) String() string { | |||
return fmt.Sprintf("valueRangeFilter(%s,%s)", vrf.start, vrf.end) | |||
} | |||
func (vrf valueRangeFilter) proto() *btpb.RowFilter { | |||
r := &btpb.ValueRange{} | |||
if vrf.start != nil { | |||
r.StartValue = &btpb.ValueRange_StartValueClosed{StartValueClosed: vrf.start} | |||
} | |||
if vrf.end != nil { | |||
r.EndValue = &btpb.ValueRange_EndValueOpen{EndValueOpen: vrf.end} | |||
} | |||
return &btpb.RowFilter{Filter: &btpb.RowFilter_ValueRangeFilter{ValueRangeFilter: r}} | |||
} | |||
// ConditionFilter returns a filter that evaluates to one of two possible filters depending | |||
// on whether or not the given predicate filter matches at least one cell. | |||
// If the matched filter is nil then no results will be returned. | |||
// IMPORTANT NOTE: The predicate filter does not execute atomically with the | |||
// true and false filters, which may lead to inconsistent or unexpected | |||
// results. Additionally, condition filters have poor performance, especially | |||
// when filters are set for the false condition. | |||
func ConditionFilter(predicateFilter, trueFilter, falseFilter Filter) Filter { | |||
return conditionFilter{predicateFilter, trueFilter, falseFilter} | |||
} | |||
type conditionFilter struct { | |||
predicateFilter Filter | |||
trueFilter Filter | |||
falseFilter Filter | |||
} | |||
func (cf conditionFilter) String() string { | |||
return fmt.Sprintf("conditionFilter(%s,%s,%s)", cf.predicateFilter, cf.trueFilter, cf.falseFilter) | |||
} | |||
func (cf conditionFilter) proto() *btpb.RowFilter { | |||
var tf *btpb.RowFilter | |||
var ff *btpb.RowFilter | |||
if cf.trueFilter != nil { | |||
tf = cf.trueFilter.proto() | |||
} | |||
if cf.falseFilter != nil { | |||
ff = cf.falseFilter.proto() | |||
} | |||
return &btpb.RowFilter{ | |||
Filter: &btpb.RowFilter_Condition_{Condition: &btpb.RowFilter_Condition{ | |||
PredicateFilter: cf.predicateFilter.proto(), | |||
TrueFilter: tf, | |||
FalseFilter: ff, | |||
}}} | |||
} | |||
// CellsPerRowOffsetFilter returns a filter that skips the first N cells of each row, matching all subsequent cells. | |||
func CellsPerRowOffsetFilter(n int) Filter { | |||
return cellsPerRowOffsetFilter(n) | |||
} | |||
type cellsPerRowOffsetFilter int32 | |||
func (cof cellsPerRowOffsetFilter) String() string { | |||
return fmt.Sprintf("cells_per_row_offset(%d)", cof) | |||
} | |||
func (cof cellsPerRowOffsetFilter) proto() *btpb.RowFilter { | |||
return &btpb.RowFilter{Filter: &btpb.RowFilter_CellsPerRowOffsetFilter{CellsPerRowOffsetFilter: int32(cof)}} | |||
} | |||
// CellsPerRowLimitFilter returns a filter that matches only the first N cells of each row. | |||
func CellsPerRowLimitFilter(n int) Filter { | |||
return cellsPerRowLimitFilter(n) | |||
} | |||
type cellsPerRowLimitFilter int32 | |||
func (clf cellsPerRowLimitFilter) String() string { | |||
return fmt.Sprintf("cells_per_row_limit(%d)", clf) | |||
} | |||
func (clf cellsPerRowLimitFilter) proto() *btpb.RowFilter { | |||
return &btpb.RowFilter{Filter: &btpb.RowFilter_CellsPerRowLimitFilter{CellsPerRowLimitFilter: int32(clf)}} | |||
} | |||
// RowSampleFilter returns a filter that matches a row with a probability of p (must be in the interval (0, 1)). | |||
func RowSampleFilter(p float64) Filter { | |||
return rowSampleFilter(p) | |||
} | |||
type rowSampleFilter float64 | |||
func (rsf rowSampleFilter) String() string { | |||
return fmt.Sprintf("filter(%f)", rsf) | |||
} | |||
func (rsf rowSampleFilter) proto() *btpb.RowFilter { | |||
return &btpb.RowFilter{Filter: &btpb.RowFilter_RowSampleFilter{RowSampleFilter: float64(rsf)}} | |||
} |
@@ -1,167 +0,0 @@ | |||
/* | |||
Copyright 2015 Google LLC | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. | |||
*/ | |||
package bigtable | |||
import ( | |||
"fmt" | |||
"strings" | |||
"time" | |||
durpb "github.com/golang/protobuf/ptypes/duration" | |||
bttdpb "google.golang.org/genproto/googleapis/bigtable/admin/v2" | |||
) | |||
// A GCPolicy represents a rule that determines which cells are eligible for garbage collection. | |||
type GCPolicy interface { | |||
String() string | |||
proto() *bttdpb.GcRule | |||
} | |||
// IntersectionPolicy returns a GC policy that only applies when all its sub-policies apply. | |||
func IntersectionPolicy(sub ...GCPolicy) GCPolicy { return intersectionPolicy{sub} } | |||
type intersectionPolicy struct { | |||
sub []GCPolicy | |||
} | |||
func (ip intersectionPolicy) String() string { | |||
var ss []string | |||
for _, sp := range ip.sub { | |||
ss = append(ss, sp.String()) | |||
} | |||
return "(" + strings.Join(ss, " && ") + ")" | |||
} | |||
func (ip intersectionPolicy) proto() *bttdpb.GcRule { | |||
inter := &bttdpb.GcRule_Intersection{} | |||
for _, sp := range ip.sub { | |||
inter.Rules = append(inter.Rules, sp.proto()) | |||
} | |||
return &bttdpb.GcRule{ | |||
Rule: &bttdpb.GcRule_Intersection_{Intersection: inter}, | |||
} | |||
} | |||
// UnionPolicy returns a GC policy that applies when any of its sub-policies apply. | |||
func UnionPolicy(sub ...GCPolicy) GCPolicy { return unionPolicy{sub} } | |||
type unionPolicy struct { | |||
sub []GCPolicy | |||
} | |||
func (up unionPolicy) String() string { | |||
var ss []string | |||
for _, sp := range up.sub { | |||
ss = append(ss, sp.String()) | |||
} | |||
return "(" + strings.Join(ss, " || ") + ")" | |||
} | |||
func (up unionPolicy) proto() *bttdpb.GcRule { | |||
union := &bttdpb.GcRule_Union{} | |||
for _, sp := range up.sub { | |||
union.Rules = append(union.Rules, sp.proto()) | |||
} | |||
return &bttdpb.GcRule{ | |||
Rule: &bttdpb.GcRule_Union_{Union: union}, | |||
} | |||
} | |||
// MaxVersionsPolicy returns a GC policy that applies to all versions of a cell | |||
// except for the most recent n. | |||
func MaxVersionsPolicy(n int) GCPolicy { return maxVersionsPolicy(n) } | |||
type maxVersionsPolicy int | |||
func (mvp maxVersionsPolicy) String() string { return fmt.Sprintf("versions() > %d", int(mvp)) } | |||
func (mvp maxVersionsPolicy) proto() *bttdpb.GcRule { | |||
return &bttdpb.GcRule{Rule: &bttdpb.GcRule_MaxNumVersions{MaxNumVersions: int32(mvp)}} | |||
} | |||
// MaxAgePolicy returns a GC policy that applies to all cells | |||
// older than the given age. | |||
func MaxAgePolicy(d time.Duration) GCPolicy { return maxAgePolicy(d) } | |||
type maxAgePolicy time.Duration | |||
var units = []struct { | |||
d time.Duration | |||
suffix string | |||
}{ | |||
{24 * time.Hour, "d"}, | |||
{time.Hour, "h"}, | |||
{time.Minute, "m"}, | |||
} | |||
func (ma maxAgePolicy) String() string { | |||
d := time.Duration(ma) | |||
for _, u := range units { | |||
if d%u.d == 0 { | |||
return fmt.Sprintf("age() > %d%s", d/u.d, u.suffix) | |||
} | |||
} | |||
return fmt.Sprintf("age() > %d", d/time.Microsecond) | |||
} | |||
func (ma maxAgePolicy) proto() *bttdpb.GcRule { | |||
// This doesn't handle overflows, etc. | |||
// Fix this if people care about GC policies over 290 years. | |||
ns := time.Duration(ma).Nanoseconds() | |||
return &bttdpb.GcRule{ | |||
Rule: &bttdpb.GcRule_MaxAge{MaxAge: &durpb.Duration{ | |||
Seconds: ns / 1e9, | |||
Nanos: int32(ns % 1e9), | |||
}}, | |||
} | |||
} | |||
type noGCPolicy struct{} | |||
func (n noGCPolicy) String() string { return "" } | |||
func (n noGCPolicy) proto() *bttdpb.GcRule { return &bttdpb.GcRule{Rule: nil} } | |||
// NoGcPolicy applies to all cells setting maxage and maxversions to nil implies no gc policies | |||
func NoGcPolicy() GCPolicy { return noGCPolicy{} } | |||
// GCRuleToString converts the given GcRule proto to a user-visible string. | |||
func GCRuleToString(rule *bttdpb.GcRule) string { | |||
if rule == nil { | |||
return "<never>" | |||
} | |||
switch r := rule.Rule.(type) { | |||
case *bttdpb.GcRule_MaxNumVersions: | |||
return MaxVersionsPolicy(int(r.MaxNumVersions)).String() | |||
case *bttdpb.GcRule_MaxAge: | |||
return MaxAgePolicy(time.Duration(r.MaxAge.Seconds) * time.Second).String() | |||
case *bttdpb.GcRule_Intersection_: | |||
return joinRules(r.Intersection.Rules, " && ") | |||
case *bttdpb.GcRule_Union_: | |||
return joinRules(r.Union.Rules, " || ") | |||
default: | |||
return "" | |||
} | |||
} | |||
func joinRules(rules []*bttdpb.GcRule, sep string) string { | |||
var chunks []string | |||
for _, r := range rules { | |||
chunks = append(chunks, GCRuleToString(r)) | |||
} | |||
return "(" + strings.Join(chunks, sep) + ")" | |||
} |
@@ -1,46 +0,0 @@ | |||
/* | |||
Copyright 2017 Google LLC | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. | |||
*/ | |||
package bigtable | |||
import ( | |||
"testing" | |||
"time" | |||
bttdpb "google.golang.org/genproto/googleapis/bigtable/admin/v2" | |||
) | |||
func TestGcRuleToString(t *testing.T) { | |||
intersection := IntersectionPolicy(MaxVersionsPolicy(5), MaxVersionsPolicy(10), MaxAgePolicy(16*time.Hour)) | |||
var tests = []struct { | |||
proto *bttdpb.GcRule | |||
want string | |||
}{ | |||
{MaxAgePolicy(72 * time.Hour).proto(), "age() > 3d"}, | |||
{MaxVersionsPolicy(5).proto(), "versions() > 5"}, | |||
{intersection.proto(), "(versions() > 5 && versions() > 10 && age() > 16h)"}, | |||
{UnionPolicy(intersection, MaxAgePolicy(72*time.Hour)).proto(), | |||
"((versions() > 5 && versions() > 10 && age() > 16h) || age() > 3d)"}, | |||
} | |||
for _, test := range tests { | |||
got := GCRuleToString(test.proto) | |||
if got != test.want { | |||
t.Errorf("got gc rule string: %v, wanted: %v", got, test.want) | |||
} | |||
} | |||
} |
@@ -1,262 +0,0 @@ | |||
/* | |||
Copyright 2015 Google LLC | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. | |||
*/ | |||
// Package cbtconfig encapsulates common code for reading configuration from .cbtrc and gcloud. | |||
package cbtconfig | |||
import ( | |||
"bufio" | |||
"bytes" | |||
"crypto/tls" | |||
"crypto/x509" | |||
"encoding/json" | |||
"flag" | |||
"fmt" | |||
"io/ioutil" | |||
"log" | |||
"os" | |||
"os/exec" | |||
"path/filepath" | |||
"runtime" | |||
"strings" | |||
"time" | |||
"golang.org/x/oauth2" | |||
"google.golang.org/grpc/credentials" | |||
) | |||
// Config represents a configuration. | |||
type Config struct { | |||
Project, Instance string // required | |||
Creds string // optional | |||
AdminEndpoint string // optional | |||
DataEndpoint string // optional | |||
CertFile string // optional | |||
UserAgent string // optional | |||
TokenSource oauth2.TokenSource // derived | |||
TLSCreds credentials.TransportCredentials // derived | |||
} | |||
// RequiredFlags describes the flag requirements for a cbt command. | |||
type RequiredFlags uint | |||
const ( | |||
// NoneRequired specifies that not flags are required. | |||
NoneRequired RequiredFlags = 0 | |||
// ProjectRequired specifies that the -project flag is required. | |||
ProjectRequired RequiredFlags = 1 << iota | |||
// InstanceRequired specifies that the -instance flag is required. | |||
InstanceRequired | |||
// ProjectAndInstanceRequired specifies that both -project and -instance is required. | |||
ProjectAndInstanceRequired = ProjectRequired | InstanceRequired | |||
) | |||
// RegisterFlags registers a set of standard flags for this config. | |||
// It should be called before flag.Parse. | |||
func (c *Config) RegisterFlags() { | |||
flag.StringVar(&c.Project, "project", c.Project, "project ID, if unset uses gcloud configured project") | |||
flag.StringVar(&c.Instance, "instance", c.Instance, "Cloud Bigtable instance") | |||
flag.StringVar(&c.Creds, "creds", c.Creds, "if set, use application credentials in this file") | |||
flag.StringVar(&c.AdminEndpoint, "admin-endpoint", c.AdminEndpoint, "Override the admin api endpoint") | |||
flag.StringVar(&c.DataEndpoint, "data-endpoint", c.DataEndpoint, "Override the data api endpoint") | |||
flag.StringVar(&c.CertFile, "cert-file", c.CertFile, "Override the TLS certificates file") | |||
flag.StringVar(&c.UserAgent, "user-agent", c.UserAgent, "Override the user agent string") | |||
} | |||
// CheckFlags checks that the required config values are set. | |||
func (c *Config) CheckFlags(required RequiredFlags) error { | |||
var missing []string | |||
if c.CertFile != "" { | |||
b, err := ioutil.ReadFile(c.CertFile) | |||
if err != nil { | |||
return fmt.Errorf("Failed to load certificates from %s: %v", c.CertFile, err) | |||
} | |||
cp := x509.NewCertPool() | |||
if !cp.AppendCertsFromPEM(b) { | |||
return fmt.Errorf("Failed to append certificates from %s", c.CertFile) | |||
} | |||
c.TLSCreds = credentials.NewTLS(&tls.Config{RootCAs: cp}) | |||
} | |||
if required != NoneRequired { | |||
c.SetFromGcloud() | |||
} | |||
if required&ProjectRequired != 0 && c.Project == "" { | |||
missing = append(missing, "-project") | |||
} | |||
if required&InstanceRequired != 0 && c.Instance == "" { | |||
missing = append(missing, "-instance") | |||
} | |||
if len(missing) > 0 { | |||
return fmt.Errorf("Missing %s", strings.Join(missing, " and ")) | |||
} | |||
return nil | |||
} | |||
// Filename returns the filename consulted for standard configuration. | |||
func Filename() string { | |||
// TODO(dsymonds): Might need tweaking for Windows. | |||
return filepath.Join(os.Getenv("HOME"), ".cbtrc") | |||
} | |||
// Load loads a .cbtrc file. | |||
// If the file is not present, an empty config is returned. | |||
func Load() (*Config, error) { | |||
filename := Filename() | |||
data, err := ioutil.ReadFile(filename) | |||
if err != nil { | |||
// silent fail if the file isn't there | |||
if os.IsNotExist(err) { | |||
return &Config{}, nil | |||
} | |||
return nil, fmt.Errorf("Reading %s: %v", filename, err) | |||
} | |||
c := new(Config) | |||
s := bufio.NewScanner(bytes.NewReader(data)) | |||
for s.Scan() { | |||
line := s.Text() | |||
i := strings.Index(line, "=") | |||
if i < 0 { | |||
return nil, fmt.Errorf("Bad line in %s: %q", filename, line) | |||
} | |||
key, val := strings.TrimSpace(line[:i]), strings.TrimSpace(line[i+1:]) | |||
switch key { | |||
default: | |||
return nil, fmt.Errorf("Unknown key in %s: %q", filename, key) | |||
case "project": | |||
c.Project = val | |||
case "instance": | |||
c.Instance = val | |||
case "creds": | |||
c.Creds = val | |||
case "admin-endpoint": | |||
c.AdminEndpoint = val | |||
case "data-endpoint": | |||
c.DataEndpoint = val | |||
case "cert-file": | |||
c.CertFile = val | |||
case "user-agent": | |||
c.UserAgent = val | |||
} | |||
} | |||
return c, s.Err() | |||
} | |||
// GcloudCredential holds gcloud credential information. | |||
type GcloudCredential struct { | |||
AccessToken string `json:"access_token"` | |||
Expiry time.Time `json:"token_expiry"` | |||
} | |||
// Token creates an oauth2 token using gcloud credentials. | |||
func (cred *GcloudCredential) Token() *oauth2.Token { | |||
return &oauth2.Token{AccessToken: cred.AccessToken, TokenType: "Bearer", Expiry: cred.Expiry} | |||
} | |||
// GcloudConfig holds gcloud configuration values. | |||
type GcloudConfig struct { | |||
Configuration struct { | |||
Properties struct { | |||
Core struct { | |||
Project string `json:"project"` | |||
} `json:"core"` | |||
} `json:"properties"` | |||
} `json:"configuration"` | |||
Credential GcloudCredential `json:"credential"` | |||
} | |||
// GcloudCmdTokenSource holds the comamnd arguments. It is only intended to be set by the program. | |||
// TODO(deklerk) Can this be unexported? | |||
type GcloudCmdTokenSource struct { | |||
Command string | |||
Args []string | |||
} | |||
// Token implements the oauth2.TokenSource interface | |||
func (g *GcloudCmdTokenSource) Token() (*oauth2.Token, error) { | |||
gcloudConfig, err := LoadGcloudConfig(g.Command, g.Args) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return gcloudConfig.Credential.Token(), nil | |||
} | |||
// LoadGcloudConfig retrieves the gcloud configuration values we need use via the | |||
// 'config-helper' command | |||
func LoadGcloudConfig(gcloudCmd string, gcloudCmdArgs []string) (*GcloudConfig, error) { | |||
out, err := exec.Command(gcloudCmd, gcloudCmdArgs...).Output() | |||
if err != nil { | |||
return nil, fmt.Errorf("Could not retrieve gcloud configuration") | |||
} | |||
var gcloudConfig GcloudConfig | |||
if err := json.Unmarshal(out, &gcloudConfig); err != nil { | |||
return nil, fmt.Errorf("Could not parse gcloud configuration") | |||
} | |||
return &gcloudConfig, nil | |||
} | |||
// SetFromGcloud retrieves and sets any missing config values from the gcloud | |||
// configuration if possible possible | |||
func (c *Config) SetFromGcloud() error { | |||
if c.Creds == "" { | |||
c.Creds = os.Getenv("GOOGLE_APPLICATION_CREDENTIALS") | |||
if c.Creds == "" { | |||
log.Printf("-creds flag unset, will use gcloud credential") | |||
} | |||
} else { | |||
os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", c.Creds) | |||
} | |||
if c.Project == "" { | |||
log.Printf("-project flag unset, will use gcloud active project") | |||
} | |||
if c.Creds != "" && c.Project != "" { | |||
return nil | |||
} | |||
gcloudCmd := "gcloud" | |||
if runtime.GOOS == "windows" { | |||
gcloudCmd = gcloudCmd + ".cmd" | |||
} | |||
gcloudCmdArgs := []string{"config", "config-helper", | |||
"--format=json(configuration.properties.core.project,credential)"} | |||
gcloudConfig, err := LoadGcloudConfig(gcloudCmd, gcloudCmdArgs) | |||
if err != nil { | |||
return err | |||
} | |||
if c.Project == "" && gcloudConfig.Configuration.Properties.Core.Project != "" { | |||
log.Printf("gcloud active project is \"%s\"", | |||
gcloudConfig.Configuration.Properties.Core.Project) | |||
c.Project = gcloudConfig.Configuration.Properties.Core.Project | |||
} | |||
if c.Creds == "" { | |||
c.TokenSource = oauth2.ReuseTokenSource( | |||
gcloudConfig.Credential.Token(), | |||
&GcloudCmdTokenSource{Command: gcloudCmd, Args: gcloudCmdArgs}) | |||
} | |||
return nil | |||
} |
@@ -1,110 +0,0 @@ | |||
/* | |||
Copyright 2016 Google LLC | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. | |||
*/ | |||
// Package gax is a snapshot from github.com/googleapis/gax-go/v2 with minor modifications. | |||
package gax | |||
import ( | |||
"time" | |||
"google.golang.org/grpc/codes" | |||
) | |||
// CallOption is a generic interface for modifying the behavior of outbound calls. | |||
type CallOption interface { | |||
Resolve(*CallSettings) | |||
} | |||
type callOptions []CallOption | |||
// Resolve resolves all call options individually. | |||
func (opts callOptions) Resolve(s *CallSettings) *CallSettings { | |||
for _, opt := range opts { | |||
opt.Resolve(s) | |||
} | |||
return s | |||
} | |||
// CallSettings encapsulates the call settings for a particular API call. | |||
type CallSettings struct { | |||
Timeout time.Duration | |||
RetrySettings RetrySettings | |||
} | |||
// RetrySettings are per-call configurable settings for retrying upon transient failure. | |||
type RetrySettings struct { | |||
RetryCodes map[codes.Code]bool | |||
BackoffSettings BackoffSettings | |||
} | |||
// BackoffSettings are parameters to the exponential backoff algorithm for retrying. | |||
type BackoffSettings struct { | |||
DelayTimeoutSettings MultipliableDuration | |||
RPCTimeoutSettings MultipliableDuration | |||
} | |||
// MultipliableDuration defines parameters for backoff settings. | |||
type MultipliableDuration struct { | |||
Initial time.Duration | |||
Max time.Duration | |||
Multiplier float64 | |||
} | |||
// Resolve merges the receiver CallSettings into the given CallSettings. | |||
func (w CallSettings) Resolve(s *CallSettings) { | |||
s.Timeout = w.Timeout | |||
s.RetrySettings = w.RetrySettings | |||
s.RetrySettings.RetryCodes = make(map[codes.Code]bool, len(w.RetrySettings.RetryCodes)) | |||
for key, value := range w.RetrySettings.RetryCodes { | |||
s.RetrySettings.RetryCodes[key] = value | |||
} | |||
} | |||
type withRetryCodes []codes.Code | |||
func (w withRetryCodes) Resolve(s *CallSettings) { | |||
s.RetrySettings.RetryCodes = make(map[codes.Code]bool) | |||
for _, code := range w { | |||
s.RetrySettings.RetryCodes[code] = true | |||
} | |||
} | |||
// WithRetryCodes sets a list of Google API canonical error codes upon which a | |||
// retry should be attempted. | |||
func WithRetryCodes(retryCodes []codes.Code) CallOption { | |||
return withRetryCodes(retryCodes) | |||
} | |||
type withDelayTimeoutSettings MultipliableDuration | |||
func (w withDelayTimeoutSettings) Resolve(s *CallSettings) { | |||
s.RetrySettings.BackoffSettings.DelayTimeoutSettings = MultipliableDuration(w) | |||
} | |||
// WithDelayTimeoutSettings specifies: | |||
// - The initial delay time, in milliseconds, between the completion of | |||
// the first failed request and the initiation of the first retrying | |||
// request. | |||
// - The multiplier by which to increase the delay time between the | |||
// completion of failed requests, and the initiation of the subsequent | |||
// retrying request. | |||
// - The maximum delay time, in milliseconds, between requests. When this | |||
// value is reached, `RetryDelayMultiplier` will no longer be used to | |||
// increase delay time. | |||
func WithDelayTimeoutSettings(initial time.Duration, max time.Duration, multiplier float64) CallOption { | |||
return withDelayTimeoutSettings(MultipliableDuration{initial, max, multiplier}) | |||
} |
@@ -1,87 +0,0 @@ | |||
/* | |||
Copyright 2015 Google LLC | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. | |||
*/ | |||
// Package gax is a snapshot from github.com/googleapis/gax-go/v2 with minor modifications. | |||
package gax | |||
import ( | |||
"context" | |||
"log" | |||
"math/rand" | |||
"os" | |||
"time" | |||
"google.golang.org/grpc" | |||
"google.golang.org/grpc/codes" | |||
) | |||
// Logger is a logger that logs to stderr. | |||
var Logger = log.New(os.Stderr, "", log.LstdFlags) | |||
// APICall is a user defined call stub. | |||
type APICall func(context.Context) error | |||
// scaleDuration returns the product of a and mult. | |||
func scaleDuration(a time.Duration, mult float64) time.Duration { | |||
ns := float64(a) * mult | |||
return time.Duration(ns) | |||
} | |||
// invokeWithRetry calls stub using an exponential backoff retry mechanism | |||
// based on the values provided in callSettings. | |||
func invokeWithRetry(ctx context.Context, stub APICall, callSettings CallSettings) error { | |||
retrySettings := callSettings.RetrySettings | |||
backoffSettings := callSettings.RetrySettings.BackoffSettings | |||
delay := backoffSettings.DelayTimeoutSettings.Initial | |||
for { | |||
// If the deadline is exceeded... | |||
if ctx.Err() != nil { | |||
return ctx.Err() | |||
} | |||
err := stub(ctx) | |||
code := grpc.Code(err) | |||
if code == codes.OK { | |||
return nil | |||
} | |||
if !retrySettings.RetryCodes[code] { | |||
return err | |||
} | |||
// Sleep a random amount up to the current delay | |||
d := time.Duration(rand.Int63n(int64(delay))) | |||
delayCtx, _ := context.WithTimeout(ctx, delay) | |||
if Logger != nil { | |||
Logger.Printf("Retryable error: %v, retrying in %v", err, d) | |||
} | |||
<-delayCtx.Done() | |||
delay = scaleDuration(delay, backoffSettings.DelayTimeoutSettings.Multiplier) | |||
if delay > backoffSettings.DelayTimeoutSettings.Max { | |||
delay = backoffSettings.DelayTimeoutSettings.Max | |||
} | |||
} | |||
} | |||
// Invoke calls stub with a child of context modified by the specified options. | |||
func Invoke(ctx context.Context, stub APICall, opts ...CallOption) error { | |||
settings := &CallSettings{} | |||
callOptions(opts).Resolve(settings) | |||
if len(settings.RetrySettings.RetryCodes) > 0 { | |||
return invokeWithRetry(ctx, stub, *settings) | |||
} | |||
return stub(ctx) | |||
} |
@@ -1,49 +0,0 @@ | |||
/* | |||
Copyright 2015 Google LLC | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. | |||
*/ | |||
package gax | |||
import ( | |||
"context" | |||
"testing" | |||
"time" | |||
"google.golang.org/grpc/codes" | |||
"google.golang.org/grpc/status" | |||
) | |||
func TestRandomizedDelays(t *testing.T) { | |||
max := 200 * time.Millisecond | |||
settings := []CallOption{ | |||
WithRetryCodes([]codes.Code{codes.Unavailable, codes.DeadlineExceeded}), | |||
WithDelayTimeoutSettings(10*time.Millisecond, max, 1.5), | |||
} | |||
deadline := time.Now().Add(1 * time.Second) | |||
ctx, _ := context.WithDeadline(context.Background(), deadline) | |||
var invokeTime time.Time | |||
_ = Invoke(ctx, func(childCtx context.Context) error { | |||
// Keep failing, make sure we never slept more than max (plus a fudge factor) | |||
if !invokeTime.IsZero() { | |||
if got, want := time.Since(invokeTime), max; got > (want + 20*time.Millisecond) { | |||
t.Logf("Slept too long. Got: %v, want: %v", got, max) | |||
} | |||
} | |||
invokeTime = time.Now() | |||
// Workaround for `go vet`: https://github.com/grpc/grpc-go/issues/90 | |||
errf := status.Errorf | |||
return errf(codes.Unavailable, "") | |||
}, settings...) | |||
} |