You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 

232 lines
6.9 KiB

  1. // Copyright 2015 Google LLC
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package bigquery
  15. import (
  16. "errors"
  17. "fmt"
  18. "reflect"
  19. "cloud.google.com/go/internal/trace"
  20. "golang.org/x/net/context"
  21. bq "google.golang.org/api/bigquery/v2"
  22. )
  23. // An Uploader does streaming inserts into a BigQuery table.
  24. // It is safe for concurrent use.
  25. type Uploader struct {
  26. t *Table
  27. // SkipInvalidRows causes rows containing invalid data to be silently
  28. // ignored. The default value is false, which causes the entire request to
  29. // fail if there is an attempt to insert an invalid row.
  30. SkipInvalidRows bool
  31. // IgnoreUnknownValues causes values not matching the schema to be ignored.
  32. // The default value is false, which causes records containing such values
  33. // to be treated as invalid records.
  34. IgnoreUnknownValues bool
  35. // A TableTemplateSuffix allows Uploaders to create tables automatically.
  36. //
  37. // Experimental: this option is experimental and may be modified or removed in future versions,
  38. // regardless of any other documented package stability guarantees.
  39. //
  40. // When you specify a suffix, the table you upload data to
  41. // will be used as a template for creating a new table, with the same schema,
  42. // called <table> + <suffix>.
  43. //
  44. // More information is available at
  45. // https://cloud.google.com/bigquery/streaming-data-into-bigquery#template-tables
  46. TableTemplateSuffix string
  47. }
  48. // Uploader returns an Uploader that can be used to append rows to t.
  49. // The returned Uploader may optionally be further configured before its Put method is called.
  50. //
  51. // To stream rows into a date-partitioned table at a particular date, add the
  52. // $yyyymmdd suffix to the table name when constructing the Table.
  53. func (t *Table) Uploader() *Uploader {
  54. return &Uploader{t: t}
  55. }
  56. // Put uploads one or more rows to the BigQuery service.
  57. //
  58. // If src is ValueSaver, then its Save method is called to produce a row for uploading.
  59. //
  60. // If src is a struct or pointer to a struct, then a schema is inferred from it
  61. // and used to create a StructSaver. The InsertID of the StructSaver will be
  62. // empty.
  63. //
  64. // If src is a slice of ValueSavers, structs, or struct pointers, then each
  65. // element of the slice is treated as above, and multiple rows are uploaded.
  66. //
  67. // Put returns a PutMultiError if one or more rows failed to be uploaded.
  68. // The PutMultiError contains a RowInsertionError for each failed row.
  69. //
  70. // Put will retry on temporary errors (see
  71. // https://cloud.google.com/bigquery/troubleshooting-errors). This can result
  72. // in duplicate rows if you do not use insert IDs. Also, if the error persists,
  73. // the call will run indefinitely. Pass a context with a timeout to prevent
  74. // hanging calls.
  75. func (u *Uploader) Put(ctx context.Context, src interface{}) (err error) {
  76. ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Uploader.Put")
  77. defer func() { trace.EndSpan(ctx, err) }()
  78. savers, err := valueSavers(src)
  79. if err != nil {
  80. return err
  81. }
  82. return u.putMulti(ctx, savers)
  83. }
  84. func valueSavers(src interface{}) ([]ValueSaver, error) {
  85. saver, ok, err := toValueSaver(src)
  86. if err != nil {
  87. return nil, err
  88. }
  89. if ok {
  90. return []ValueSaver{saver}, nil
  91. }
  92. srcVal := reflect.ValueOf(src)
  93. if srcVal.Kind() != reflect.Slice {
  94. return nil, fmt.Errorf("%T is not a ValueSaver, struct, struct pointer, or slice", src)
  95. }
  96. var savers []ValueSaver
  97. for i := 0; i < srcVal.Len(); i++ {
  98. s := srcVal.Index(i).Interface()
  99. saver, ok, err := toValueSaver(s)
  100. if err != nil {
  101. return nil, err
  102. }
  103. if !ok {
  104. return nil, fmt.Errorf("src[%d] has type %T, which is not a ValueSaver, struct or struct pointer", i, s)
  105. }
  106. savers = append(savers, saver)
  107. }
  108. return savers, nil
  109. }
  110. // Make a ValueSaver from x, which must implement ValueSaver already
  111. // or be a struct or pointer to struct.
  112. func toValueSaver(x interface{}) (ValueSaver, bool, error) {
  113. if _, ok := x.(StructSaver); ok {
  114. return nil, false, errors.New("bigquery: use &StructSaver, not StructSaver")
  115. }
  116. var insertID string
  117. // Handle StructSavers specially so we can infer the schema if necessary.
  118. if ss, ok := x.(*StructSaver); ok && ss.Schema == nil {
  119. x = ss.Struct
  120. insertID = ss.InsertID
  121. // Fall through so we can infer the schema.
  122. }
  123. if saver, ok := x.(ValueSaver); ok {
  124. return saver, ok, nil
  125. }
  126. v := reflect.ValueOf(x)
  127. // Support Put with []interface{}
  128. if v.Kind() == reflect.Interface {
  129. v = v.Elem()
  130. }
  131. if v.Kind() == reflect.Ptr {
  132. v = v.Elem()
  133. }
  134. if v.Kind() != reflect.Struct {
  135. return nil, false, nil
  136. }
  137. schema, err := inferSchemaReflectCached(v.Type())
  138. if err != nil {
  139. return nil, false, err
  140. }
  141. return &StructSaver{
  142. Struct: x,
  143. InsertID: insertID,
  144. Schema: schema,
  145. }, true, nil
  146. }
  147. func (u *Uploader) putMulti(ctx context.Context, src []ValueSaver) error {
  148. req, err := u.newInsertRequest(src)
  149. if err != nil {
  150. return err
  151. }
  152. if req == nil {
  153. return nil
  154. }
  155. call := u.t.c.bqs.Tabledata.InsertAll(u.t.ProjectID, u.t.DatasetID, u.t.TableID, req)
  156. call = call.Context(ctx)
  157. setClientHeader(call.Header())
  158. var res *bq.TableDataInsertAllResponse
  159. err = runWithRetry(ctx, func() (err error) {
  160. res, err = call.Do()
  161. return err
  162. })
  163. if err != nil {
  164. return err
  165. }
  166. return handleInsertErrors(res.InsertErrors, req.Rows)
  167. }
  168. func (u *Uploader) newInsertRequest(savers []ValueSaver) (*bq.TableDataInsertAllRequest, error) {
  169. if savers == nil { // If there are no rows, do nothing.
  170. return nil, nil
  171. }
  172. req := &bq.TableDataInsertAllRequest{
  173. TemplateSuffix: u.TableTemplateSuffix,
  174. IgnoreUnknownValues: u.IgnoreUnknownValues,
  175. SkipInvalidRows: u.SkipInvalidRows,
  176. }
  177. for _, saver := range savers {
  178. row, insertID, err := saver.Save()
  179. if err != nil {
  180. return nil, err
  181. }
  182. if insertID == "" {
  183. insertID = randomIDFn()
  184. }
  185. m := make(map[string]bq.JsonValue)
  186. for k, v := range row {
  187. m[k] = bq.JsonValue(v)
  188. }
  189. req.Rows = append(req.Rows, &bq.TableDataInsertAllRequestRows{
  190. InsertId: insertID,
  191. Json: m,
  192. })
  193. }
  194. return req, nil
  195. }
  196. func handleInsertErrors(ierrs []*bq.TableDataInsertAllResponseInsertErrors, rows []*bq.TableDataInsertAllRequestRows) error {
  197. if len(ierrs) == 0 {
  198. return nil
  199. }
  200. var errs PutMultiError
  201. for _, e := range ierrs {
  202. if int(e.Index) > len(rows) {
  203. return fmt.Errorf("internal error: unexpected row index: %v", e.Index)
  204. }
  205. rie := RowInsertionError{
  206. InsertID: rows[e.Index].InsertId,
  207. RowIndex: int(e.Index),
  208. }
  209. for _, errp := range e.Errors {
  210. rie.Errors = append(rie.Errors, bqToError(errp))
  211. }
  212. errs = append(errs, rie)
  213. }
  214. return errs
  215. }