You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 

256 lines
11 KiB

  1. // Copyright 2019 Google LLC
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // https://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. // Code generated by gapic-generator. DO NOT EDIT.
  15. package storage
  16. import (
  17. "context"
  18. "fmt"
  19. "time"
  20. gax "github.com/googleapis/gax-go/v2"
  21. "google.golang.org/api/option"
  22. "google.golang.org/api/transport"
  23. storagepb "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1beta1"
  24. "google.golang.org/grpc"
  25. "google.golang.org/grpc/codes"
  26. "google.golang.org/grpc/metadata"
  27. )
  28. // BigQueryStorageCallOptions contains the retry settings for each method of BigQueryStorageClient.
  29. type BigQueryStorageCallOptions struct {
  30. CreateReadSession []gax.CallOption
  31. ReadRows []gax.CallOption
  32. BatchCreateReadSessionStreams []gax.CallOption
  33. FinalizeStream []gax.CallOption
  34. SplitReadStream []gax.CallOption
  35. }
  36. func defaultBigQueryStorageClientOptions() []option.ClientOption {
  37. return []option.ClientOption{
  38. option.WithEndpoint("bigquerystorage.googleapis.com:443"),
  39. option.WithScopes(DefaultAuthScopes()...),
  40. }
  41. }
  42. func defaultBigQueryStorageCallOptions() *BigQueryStorageCallOptions {
  43. retry := map[[2]string][]gax.CallOption{
  44. {"default", "idempotent"}: {
  45. gax.WithRetry(func() gax.Retryer {
  46. return gax.OnCodes([]codes.Code{
  47. codes.DeadlineExceeded,
  48. codes.Unavailable,
  49. }, gax.Backoff{
  50. Initial: 100 * time.Millisecond,
  51. Max: 60000 * time.Millisecond,
  52. Multiplier: 1.3,
  53. })
  54. }),
  55. },
  56. }
  57. return &BigQueryStorageCallOptions{
  58. CreateReadSession: retry[[2]string{"default", "idempotent"}],
  59. ReadRows: retry[[2]string{"default", "idempotent"}],
  60. BatchCreateReadSessionStreams: retry[[2]string{"default", "idempotent"}],
  61. FinalizeStream: retry[[2]string{"default", "idempotent"}],
  62. SplitReadStream: retry[[2]string{"default", "idempotent"}],
  63. }
  64. }
  65. // BigQueryStorageClient is a client for interacting with BigQuery Storage API.
  66. //
  67. // Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
  68. type BigQueryStorageClient struct {
  69. // The connection to the service.
  70. conn *grpc.ClientConn
  71. // The gRPC API client.
  72. bigQueryStorageClient storagepb.BigQueryStorageClient
  73. // The call options for this service.
  74. CallOptions *BigQueryStorageCallOptions
  75. // The x-goog-* metadata to be sent with each request.
  76. xGoogMetadata metadata.MD
  77. }
  78. // NewBigQueryStorageClient creates a new big query storage client.
  79. //
  80. // BigQuery storage API.
  81. //
  82. // The BigQuery storage API can be used to read data stored in BigQuery.
  83. func NewBigQueryStorageClient(ctx context.Context, opts ...option.ClientOption) (*BigQueryStorageClient, error) {
  84. conn, err := transport.DialGRPC(ctx, append(defaultBigQueryStorageClientOptions(), opts...)...)
  85. if err != nil {
  86. return nil, err
  87. }
  88. c := &BigQueryStorageClient{
  89. conn: conn,
  90. CallOptions: defaultBigQueryStorageCallOptions(),
  91. bigQueryStorageClient: storagepb.NewBigQueryStorageClient(conn),
  92. }
  93. c.setGoogleClientInfo()
  94. return c, nil
  95. }
  96. // Connection returns the client's connection to the API service.
  97. func (c *BigQueryStorageClient) Connection() *grpc.ClientConn {
  98. return c.conn
  99. }
  100. // Close closes the connection to the API service. The user should invoke this when
  101. // the client is no longer required.
  102. func (c *BigQueryStorageClient) Close() error {
  103. return c.conn.Close()
  104. }
  105. // setGoogleClientInfo sets the name and version of the application in
  106. // the `x-goog-api-client` header passed on each request. Intended for
  107. // use by Google-written clients.
  108. func (c *BigQueryStorageClient) setGoogleClientInfo(keyval ...string) {
  109. kv := append([]string{"gl-go", versionGo()}, keyval...)
  110. kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version)
  111. c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
  112. }
  113. // CreateReadSession creates a new read session. A read session divides the contents of a
  114. // BigQuery table into one or more streams, which can then be used to read
  115. // data from the table. The read session also specifies properties of the
  116. // data to be read, such as a list of columns or a push-down filter describing
  117. // the rows to be returned.
  118. //
  119. // A particular row can be read by at most one stream. When the caller has
  120. // reached the end of each stream in the session, then all the data in the
  121. // table has been read.
  122. //
  123. // Read sessions automatically expire 24 hours after they are created and do
  124. // not require manual clean-up by the caller.
  125. func (c *BigQueryStorageClient) CreateReadSession(ctx context.Context, req *storagepb.CreateReadSessionRequest, opts ...gax.CallOption) (*storagepb.ReadSession, error) {
  126. md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v&%s=%v", "table_reference.project_id", req.GetTableReference().GetProjectId(), "table_reference.dataset_id", req.GetTableReference().GetDatasetId()))
  127. ctx = insertMetadata(ctx, c.xGoogMetadata, md)
  128. opts = append(c.CallOptions.CreateReadSession[0:len(c.CallOptions.CreateReadSession):len(c.CallOptions.CreateReadSession)], opts...)
  129. var resp *storagepb.ReadSession
  130. err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
  131. var err error
  132. resp, err = c.bigQueryStorageClient.CreateReadSession(ctx, req, settings.GRPC...)
  133. return err
  134. }, opts...)
  135. if err != nil {
  136. return nil, err
  137. }
  138. return resp, nil
  139. }
  140. // ReadRows reads rows from the table in the format prescribed by the read session.
  141. // Each response contains one or more table rows, up to a maximum of 10 MiB
  142. // per response; read requests which attempt to read individual rows larger
  143. // than this will fail.
  144. //
  145. // Each request also returns a set of stream statistics reflecting the
  146. // estimated total number of rows in the read stream. This number is computed
  147. // based on the total table size and the number of active streams in the read
  148. // session, and may change as other streams continue to read data.
  149. func (c *BigQueryStorageClient) ReadRows(ctx context.Context, req *storagepb.ReadRowsRequest, opts ...gax.CallOption) (storagepb.BigQueryStorage_ReadRowsClient, error) {
  150. md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "read_position.stream.name", req.GetReadPosition().GetStream().GetName()))
  151. ctx = insertMetadata(ctx, c.xGoogMetadata, md)
  152. opts = append(c.CallOptions.ReadRows[0:len(c.CallOptions.ReadRows):len(c.CallOptions.ReadRows)], opts...)
  153. var resp storagepb.BigQueryStorage_ReadRowsClient
  154. err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
  155. var err error
  156. resp, err = c.bigQueryStorageClient.ReadRows(ctx, req, settings.GRPC...)
  157. return err
  158. }, opts...)
  159. if err != nil {
  160. return nil, err
  161. }
  162. return resp, nil
  163. }
  164. // BatchCreateReadSessionStreams creates additional streams for a ReadSession. This API can be used to
  165. // dynamically adjust the parallelism of a batch processing task upwards by
  166. // adding additional workers.
  167. func (c *BigQueryStorageClient) BatchCreateReadSessionStreams(ctx context.Context, req *storagepb.BatchCreateReadSessionStreamsRequest, opts ...gax.CallOption) (*storagepb.BatchCreateReadSessionStreamsResponse, error) {
  168. md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "session.name", req.GetSession().GetName()))
  169. ctx = insertMetadata(ctx, c.xGoogMetadata, md)
  170. opts = append(c.CallOptions.BatchCreateReadSessionStreams[0:len(c.CallOptions.BatchCreateReadSessionStreams):len(c.CallOptions.BatchCreateReadSessionStreams)], opts...)
  171. var resp *storagepb.BatchCreateReadSessionStreamsResponse
  172. err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
  173. var err error
  174. resp, err = c.bigQueryStorageClient.BatchCreateReadSessionStreams(ctx, req, settings.GRPC...)
  175. return err
  176. }, opts...)
  177. if err != nil {
  178. return nil, err
  179. }
  180. return resp, nil
  181. }
  182. // FinalizeStream triggers the graceful termination of a single stream in a ReadSession. This
  183. // API can be used to dynamically adjust the parallelism of a batch processing
  184. // task downwards without losing data.
  185. //
  186. // This API does not delete the stream -- it remains visible in the
  187. // ReadSession, and any data processed by the stream is not released to other
  188. // streams. However, no additional data will be assigned to the stream once
  189. // this call completes. Callers must continue reading data on the stream until
  190. // the end of the stream is reached so that data which has already been
  191. // assigned to the stream will be processed.
  192. //
  193. // This method will return an error if there are no other live streams
  194. // in the Session, or if SplitReadStream() has been called on the given
  195. // Stream.
  196. func (c *BigQueryStorageClient) FinalizeStream(ctx context.Context, req *storagepb.FinalizeStreamRequest, opts ...gax.CallOption) error {
  197. md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "stream.name", req.GetStream().GetName()))
  198. ctx = insertMetadata(ctx, c.xGoogMetadata, md)
  199. opts = append(c.CallOptions.FinalizeStream[0:len(c.CallOptions.FinalizeStream):len(c.CallOptions.FinalizeStream)], opts...)
  200. err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
  201. var err error
  202. _, err = c.bigQueryStorageClient.FinalizeStream(ctx, req, settings.GRPC...)
  203. return err
  204. }, opts...)
  205. return err
  206. }
  207. // SplitReadStream splits a given read stream into two Streams. These streams are referred to
  208. // as the primary and the residual of the split. The original stream can still
  209. // be read from in the same manner as before. Both of the returned streams can
  210. // also be read from, and the total rows return by both child streams will be
  211. // the same as the rows read from the original stream.
  212. //
  213. // Moreover, the two child streams will be allocated back to back in the
  214. // original Stream. Concretely, it is guaranteed that for streams Original,
  215. // Primary, and Residual, that Original[0-j] = Primary[0-j] and
  216. // Original[j-n] = Residual[0-m] once the streams have been read to
  217. // completion.
  218. //
  219. // This method is guaranteed to be idempotent.
  220. func (c *BigQueryStorageClient) SplitReadStream(ctx context.Context, req *storagepb.SplitReadStreamRequest, opts ...gax.CallOption) (*storagepb.SplitReadStreamResponse, error) {
  221. md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "original_stream.name", req.GetOriginalStream().GetName()))
  222. ctx = insertMetadata(ctx, c.xGoogMetadata, md)
  223. opts = append(c.CallOptions.SplitReadStream[0:len(c.CallOptions.SplitReadStream):len(c.CallOptions.SplitReadStream)], opts...)
  224. var resp *storagepb.SplitReadStreamResponse
  225. err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
  226. var err error
  227. resp, err = c.bigQueryStorageClient.SplitReadStream(ctx, req, settings.GRPC...)
  228. return err
  229. }, opts...)
  230. if err != nil {
  231. return nil, err
  232. }
  233. return resp, nil
  234. }