25'ten fazla konu seçemezsiniz Konular bir harf veya rakamla başlamalı, kısa çizgiler ('-') içerebilir ve en fazla 35 karakter uzunluğunda olabilir.
 
 
 

705 satır
23 KiB

  1. /*
  2. Copyright 2017 Google LLC
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package spanner
  14. import (
  15. "bytes"
  16. "io"
  17. "log"
  18. "sync/atomic"
  19. "time"
  20. "cloud.google.com/go/internal/protostruct"
  21. proto "github.com/golang/protobuf/proto"
  22. proto3 "github.com/golang/protobuf/ptypes/struct"
  23. "golang.org/x/net/context"
  24. "google.golang.org/api/iterator"
  25. sppb "google.golang.org/genproto/googleapis/spanner/v1"
  26. "google.golang.org/grpc/codes"
  27. )
  28. // streamingReceiver is the interface for receiving data from a client side
  29. // stream.
  30. type streamingReceiver interface {
  31. Recv() (*sppb.PartialResultSet, error)
  32. }
  33. // errEarlyReadEnd returns error for read finishes when gRPC stream is still active.
  34. func errEarlyReadEnd() error {
  35. return spannerErrorf(codes.FailedPrecondition, "read completed with active stream")
  36. }
  37. // stream is the internal fault tolerant method for streaming data from
  38. // Cloud Spanner.
  39. func stream(ctx context.Context, rpc func(ct context.Context, resumeToken []byte) (streamingReceiver, error), setTimestamp func(time.Time), release func(error)) *RowIterator {
  40. ctx, cancel := context.WithCancel(ctx)
  41. ctx = traceStartSpan(ctx, "cloud.google.com/go/spanner.RowIterator")
  42. return &RowIterator{
  43. streamd: newResumableStreamDecoder(ctx, rpc),
  44. rowd: &partialResultSetDecoder{},
  45. setTimestamp: setTimestamp,
  46. release: release,
  47. cancel: cancel,
  48. }
  49. }
  50. // RowIterator is an iterator over Rows.
  51. type RowIterator struct {
  52. // The plan for the query. Available after RowIterator.Next returns iterator.Done
  53. // if QueryWithStats was called.
  54. QueryPlan *sppb.QueryPlan
  55. // Execution statistics for the query. Available after RowIterator.Next returns iterator.Done
  56. // if QueryWithStats was called.
  57. QueryStats map[string]interface{}
  58. streamd *resumableStreamDecoder
  59. rowd *partialResultSetDecoder
  60. setTimestamp func(time.Time)
  61. release func(error)
  62. cancel func()
  63. err error
  64. rows []*Row
  65. }
  66. // Next returns the next result. Its second return value is iterator.Done if
  67. // there are no more results. Once Next returns Done, all subsequent calls
  68. // will return Done.
  69. func (r *RowIterator) Next() (*Row, error) {
  70. if r.err != nil {
  71. return nil, r.err
  72. }
  73. for len(r.rows) == 0 && r.streamd.next() {
  74. prs := r.streamd.get()
  75. if prs.Stats != nil {
  76. r.QueryPlan = prs.Stats.QueryPlan
  77. r.QueryStats = protostruct.DecodeToMap(prs.Stats.QueryStats)
  78. }
  79. r.rows, r.err = r.rowd.add(prs)
  80. if r.err != nil {
  81. return nil, r.err
  82. }
  83. if !r.rowd.ts.IsZero() && r.setTimestamp != nil {
  84. r.setTimestamp(r.rowd.ts)
  85. r.setTimestamp = nil
  86. }
  87. }
  88. if len(r.rows) > 0 {
  89. row := r.rows[0]
  90. r.rows = r.rows[1:]
  91. return row, nil
  92. }
  93. if err := r.streamd.lastErr(); err != nil {
  94. r.err = toSpannerError(err)
  95. } else if !r.rowd.done() {
  96. r.err = errEarlyReadEnd()
  97. } else {
  98. r.err = iterator.Done
  99. }
  100. return nil, r.err
  101. }
  102. // Do calls the provided function once in sequence for each row in the iteration. If the
  103. // function returns a non-nil error, Do immediately returns that error.
  104. //
  105. // If there are no rows in the iterator, Do will return nil without calling the
  106. // provided function.
  107. //
  108. // Do always calls Stop on the iterator.
  109. func (r *RowIterator) Do(f func(r *Row) error) error {
  110. defer r.Stop()
  111. for {
  112. row, err := r.Next()
  113. switch err {
  114. case iterator.Done:
  115. return nil
  116. case nil:
  117. if err = f(row); err != nil {
  118. return err
  119. }
  120. default:
  121. return err
  122. }
  123. }
  124. }
  125. // Stop terminates the iteration. It should be called after you finish using the iterator.
  126. func (r *RowIterator) Stop() {
  127. if r.streamd != nil {
  128. defer traceEndSpan(r.streamd.ctx, r.err)
  129. }
  130. if r.cancel != nil {
  131. r.cancel()
  132. }
  133. if r.release != nil {
  134. r.release(r.err)
  135. if r.err == nil {
  136. r.err = spannerErrorf(codes.FailedPrecondition, "Next called after Stop")
  137. }
  138. r.release = nil
  139. }
  140. }
  141. // partialResultQueue implements a simple FIFO queue. The zero value is a
  142. // valid queue.
  143. type partialResultQueue struct {
  144. q []*sppb.PartialResultSet
  145. first int
  146. last int
  147. n int // number of elements in queue
  148. }
  149. // empty returns if the partialResultQueue is empty.
  150. func (q *partialResultQueue) empty() bool {
  151. return q.n == 0
  152. }
  153. // errEmptyQueue returns error for dequeuing an empty queue.
  154. func errEmptyQueue() error {
  155. return spannerErrorf(codes.OutOfRange, "empty partialResultQueue")
  156. }
  157. // peekLast returns the last item in partialResultQueue; if the queue
  158. // is empty, it returns error.
  159. func (q *partialResultQueue) peekLast() (*sppb.PartialResultSet, error) {
  160. if q.empty() {
  161. return nil, errEmptyQueue()
  162. }
  163. return q.q[(q.last+cap(q.q)-1)%cap(q.q)], nil
  164. }
  165. // push adds an item to the tail of partialResultQueue.
  166. func (q *partialResultQueue) push(r *sppb.PartialResultSet) {
  167. if q.q == nil {
  168. q.q = make([]*sppb.PartialResultSet, 8 /* arbitrary */)
  169. }
  170. if q.n == cap(q.q) {
  171. buf := make([]*sppb.PartialResultSet, cap(q.q)*2)
  172. for i := 0; i < q.n; i++ {
  173. buf[i] = q.q[(q.first+i)%cap(q.q)]
  174. }
  175. q.q = buf
  176. q.first = 0
  177. q.last = q.n
  178. }
  179. q.q[q.last] = r
  180. q.last = (q.last + 1) % cap(q.q)
  181. q.n++
  182. }
  183. // pop removes an item from the head of partialResultQueue and returns
  184. // it.
  185. func (q *partialResultQueue) pop() *sppb.PartialResultSet {
  186. if q.n == 0 {
  187. return nil
  188. }
  189. r := q.q[q.first]
  190. q.q[q.first] = nil
  191. q.first = (q.first + 1) % cap(q.q)
  192. q.n--
  193. return r
  194. }
  195. // clear empties partialResultQueue.
  196. func (q *partialResultQueue) clear() {
  197. *q = partialResultQueue{}
  198. }
  199. // dump retrieves all items from partialResultQueue and return them in a slice.
  200. // It is used only in tests.
  201. func (q *partialResultQueue) dump() []*sppb.PartialResultSet {
  202. var dq []*sppb.PartialResultSet
  203. for i := q.first; len(dq) < q.n; i = (i + 1) % cap(q.q) {
  204. dq = append(dq, q.q[i])
  205. }
  206. return dq
  207. }
  208. // resumableStreamDecoderState encodes resumableStreamDecoder's status.
  209. // See also the comments for resumableStreamDecoder.Next.
  210. type resumableStreamDecoderState int
  211. const (
  212. unConnected resumableStreamDecoderState = iota // 0
  213. queueingRetryable // 1
  214. queueingUnretryable // 2
  215. aborted // 3
  216. finished // 4
  217. )
  218. // resumableStreamDecoder provides a resumable interface for receiving
  219. // sppb.PartialResultSet(s) from a given query wrapped by
  220. // resumableStreamDecoder.rpc().
  221. type resumableStreamDecoder struct {
  222. // state is the current status of resumableStreamDecoder, see also
  223. // the comments for resumableStreamDecoder.Next.
  224. state resumableStreamDecoderState
  225. // stateWitness when non-nil is called to observe state change,
  226. // used for testing.
  227. stateWitness func(resumableStreamDecoderState)
  228. // ctx is the caller's context, used for cancel/timeout Next().
  229. ctx context.Context
  230. // rpc is a factory of streamingReceiver, which might resume
  231. // a previous stream from the point encoded in restartToken.
  232. // rpc is always a wrapper of a Cloud Spanner query which is
  233. // resumable.
  234. rpc func(ctx context.Context, restartToken []byte) (streamingReceiver, error)
  235. // stream is the current RPC streaming receiver.
  236. stream streamingReceiver
  237. // q buffers received yet undecoded partial results.
  238. q partialResultQueue
  239. // bytesBetweenResumeTokens is the proxy of the byte size of PartialResultSets being queued
  240. // between two resume tokens. Once bytesBetweenResumeTokens is greater than
  241. // maxBytesBetweenResumeTokens, resumableStreamDecoder goes into queueingUnretryable state.
  242. bytesBetweenResumeTokens int32
  243. // maxBytesBetweenResumeTokens is the max number of bytes that can be buffered
  244. // between two resume tokens. It is always copied from the global maxBytesBetweenResumeTokens
  245. // atomically.
  246. maxBytesBetweenResumeTokens int32
  247. // np is the next sppb.PartialResultSet ready to be returned
  248. // to caller of resumableStreamDecoder.Get().
  249. np *sppb.PartialResultSet
  250. // resumeToken stores the resume token that resumableStreamDecoder has
  251. // last revealed to caller.
  252. resumeToken []byte
  253. // retryCount is the number of retries that have been carried out so far
  254. retryCount int
  255. // err is the last error resumableStreamDecoder has encountered so far.
  256. err error
  257. // backoff to compute delays between retries.
  258. backoff exponentialBackoff
  259. }
  260. // newResumableStreamDecoder creates a new resumeableStreamDecoder instance.
  261. // Parameter rpc should be a function that creates a new stream
  262. // beginning at the restartToken if non-nil.
  263. func newResumableStreamDecoder(ctx context.Context, rpc func(ct context.Context, restartToken []byte) (streamingReceiver, error)) *resumableStreamDecoder {
  264. return &resumableStreamDecoder{
  265. ctx: ctx,
  266. rpc: rpc,
  267. maxBytesBetweenResumeTokens: atomic.LoadInt32(&maxBytesBetweenResumeTokens),
  268. backoff: defaultBackoff,
  269. }
  270. }
  271. // changeState fulfills state transition for resumableStateDecoder.
  272. func (d *resumableStreamDecoder) changeState(target resumableStreamDecoderState) {
  273. if d.state == queueingRetryable && d.state != target {
  274. // Reset bytesBetweenResumeTokens because it is only meaningful/changed under
  275. // queueingRetryable state.
  276. d.bytesBetweenResumeTokens = 0
  277. }
  278. d.state = target
  279. if d.stateWitness != nil {
  280. d.stateWitness(target)
  281. }
  282. }
  283. // isNewResumeToken returns if the observed resume token is different from
  284. // the one returned from server last time.
  285. func (d *resumableStreamDecoder) isNewResumeToken(rt []byte) bool {
  286. if rt == nil {
  287. return false
  288. }
  289. if bytes.Compare(rt, d.resumeToken) == 0 {
  290. return false
  291. }
  292. return true
  293. }
  294. // Next advances to the next available partial result set. If error or no
  295. // more, returns false, call Err to determine if an error was encountered.
  296. // The following diagram illustrates the state machine of resumableStreamDecoder
  297. // that Next() implements. Note that state transition can be only triggered by
  298. // RPC activities.
  299. /*
  300. rpc() fails retryable
  301. +---------+
  302. | | rpc() fails unretryable/ctx timeouts or cancelled
  303. | | +------------------------------------------------+
  304. | | | |
  305. | v | v
  306. | +---+---+---+ +--------+ +------+--+
  307. +-----+unConnected| |finished| | aborted |<----+
  308. | | ++-----+-+ +------+--+ |
  309. +---+----+--+ ^ ^ ^ |
  310. | ^ | | | |
  311. | | | | recv() fails |
  312. | | | | | |
  313. | |recv() fails retryable | | | |
  314. | |with valid ctx | | | |
  315. | | | | | |
  316. rpc() succeeds | +-----------------------+ | | |
  317. | | | recv EOF recv EOF | |
  318. | | | | | |
  319. v | | Queue size exceeds | | |
  320. +---+----+---+----+threshold +-------+-----------+ | |
  321. +---------->+ +--------------->+ +-+ |
  322. | |queueingRetryable| |queueingUnretryable| |
  323. | | +<---------------+ | |
  324. | +---+----------+--+ pop() returns +--+----+-----------+ |
  325. | | | resume token | ^ |
  326. | | | | | |
  327. | | | | | |
  328. +---------------+ | | | |
  329. recv() succeeds | +----+ |
  330. | recv() succeeds |
  331. | |
  332. | |
  333. | |
  334. | |
  335. | |
  336. +--------------------------------------------------+
  337. recv() fails unretryable
  338. */
  339. var (
  340. // maxBytesBetweenResumeTokens is the maximum amount of bytes that resumableStreamDecoder
  341. // in queueingRetryable state can use to queue PartialResultSets before getting
  342. // into queueingUnretryable state.
  343. maxBytesBetweenResumeTokens = int32(128 * 1024 * 1024)
  344. )
  345. func (d *resumableStreamDecoder) next() bool {
  346. for {
  347. select {
  348. case <-d.ctx.Done():
  349. // Do context check here so that even gRPC failed to do
  350. // so, resumableStreamDecoder can still break the loop
  351. // as expected.
  352. d.err = errContextCanceled(d.ctx, d.err)
  353. d.changeState(aborted)
  354. default:
  355. }
  356. switch d.state {
  357. case unConnected:
  358. // If no gRPC stream is available, try to initiate one.
  359. if d.stream, d.err = d.rpc(d.ctx, d.resumeToken); d.err != nil {
  360. if isRetryable(d.err) {
  361. d.doBackOff()
  362. // Be explicit about state transition, although the
  363. // state doesn't actually change. State transition
  364. // will be triggered only by RPC activity, regardless of
  365. // whether there is an actual state change or not.
  366. d.changeState(unConnected)
  367. continue
  368. }
  369. d.changeState(aborted)
  370. continue
  371. }
  372. d.resetBackOff()
  373. d.changeState(queueingRetryable)
  374. continue
  375. case queueingRetryable:
  376. fallthrough
  377. case queueingUnretryable:
  378. // Receiving queue is not empty.
  379. last, err := d.q.peekLast()
  380. if err != nil {
  381. // Only the case that receiving queue is empty could cause peekLast to
  382. // return error and in such case, we should try to receive from stream.
  383. d.tryRecv()
  384. continue
  385. }
  386. if d.isNewResumeToken(last.ResumeToken) {
  387. // Got new resume token, return buffered sppb.PartialResultSets to caller.
  388. d.np = d.q.pop()
  389. if d.q.empty() {
  390. d.bytesBetweenResumeTokens = 0
  391. // The new resume token was just popped out from queue, record it.
  392. d.resumeToken = d.np.ResumeToken
  393. d.changeState(queueingRetryable)
  394. }
  395. return true
  396. }
  397. if d.bytesBetweenResumeTokens >= d.maxBytesBetweenResumeTokens && d.state == queueingRetryable {
  398. d.changeState(queueingUnretryable)
  399. continue
  400. }
  401. if d.state == queueingUnretryable {
  402. // When there is no resume token observed,
  403. // only yield sppb.PartialResultSets to caller under
  404. // queueingUnretryable state.
  405. d.np = d.q.pop()
  406. return true
  407. }
  408. // Needs to receive more from gRPC stream till a new resume token
  409. // is observed.
  410. d.tryRecv()
  411. continue
  412. case aborted:
  413. // Discard all pending items because none of them
  414. // should be yield to caller.
  415. d.q.clear()
  416. return false
  417. case finished:
  418. // If query has finished, check if there are still buffered messages.
  419. if d.q.empty() {
  420. // No buffered PartialResultSet.
  421. return false
  422. }
  423. // Although query has finished, there are still buffered PartialResultSets.
  424. d.np = d.q.pop()
  425. return true
  426. default:
  427. log.Printf("Unexpected resumableStreamDecoder.state: %v", d.state)
  428. return false
  429. }
  430. }
  431. }
  432. // tryRecv attempts to receive a PartialResultSet from gRPC stream.
  433. func (d *resumableStreamDecoder) tryRecv() {
  434. var res *sppb.PartialResultSet
  435. if res, d.err = d.stream.Recv(); d.err != nil {
  436. if d.err == io.EOF {
  437. d.err = nil
  438. d.changeState(finished)
  439. return
  440. }
  441. if isRetryable(d.err) && d.state == queueingRetryable {
  442. d.err = nil
  443. // Discard all queue items (none have resume tokens).
  444. d.q.clear()
  445. d.stream = nil
  446. d.changeState(unConnected)
  447. d.doBackOff()
  448. return
  449. }
  450. d.changeState(aborted)
  451. return
  452. }
  453. d.q.push(res)
  454. if d.state == queueingRetryable && !d.isNewResumeToken(res.ResumeToken) {
  455. // adjusting d.bytesBetweenResumeTokens
  456. d.bytesBetweenResumeTokens += int32(proto.Size(res))
  457. }
  458. d.resetBackOff()
  459. d.changeState(d.state)
  460. }
  461. // resetBackOff clears the internal retry counter of
  462. // resumableStreamDecoder so that the next exponential
  463. // backoff will start at a fresh state.
  464. func (d *resumableStreamDecoder) resetBackOff() {
  465. d.retryCount = 0
  466. }
  467. // doBackoff does an exponential backoff sleep.
  468. func (d *resumableStreamDecoder) doBackOff() {
  469. delay := d.backoff.delay(d.retryCount)
  470. tracePrintf(d.ctx, nil, "Backing off stream read for %s", delay)
  471. ticker := time.NewTicker(delay)
  472. defer ticker.Stop()
  473. d.retryCount++
  474. select {
  475. case <-d.ctx.Done():
  476. case <-ticker.C:
  477. }
  478. }
  479. // get returns the most recent PartialResultSet generated by a call to next.
  480. func (d *resumableStreamDecoder) get() *sppb.PartialResultSet {
  481. return d.np
  482. }
  483. // lastErr returns the last non-EOF error encountered.
  484. func (d *resumableStreamDecoder) lastErr() error {
  485. return d.err
  486. }
  487. // partialResultSetDecoder assembles PartialResultSet(s) into Cloud Spanner
  488. // Rows.
  489. type partialResultSetDecoder struct {
  490. row Row
  491. tx *sppb.Transaction
  492. chunked bool // if true, next value should be merged with last values entry.
  493. ts time.Time // read timestamp
  494. }
  495. // yield checks we have a complete row, and if so returns it. A row is not
  496. // complete if it doesn't have enough columns, or if this is a chunked response
  497. // and there are no further values to process.
  498. func (p *partialResultSetDecoder) yield(chunked, last bool) *Row {
  499. if len(p.row.vals) == len(p.row.fields) && (!chunked || !last) {
  500. // When partialResultSetDecoder gets enough number of
  501. // Column values, There are two cases that a new Row
  502. // should be yield:
  503. // 1. The incoming PartialResultSet is not chunked;
  504. // 2. The incoming PartialResultSet is chunked, but the
  505. // proto3.Value being merged is not the last one in
  506. // the PartialResultSet.
  507. //
  508. // Use a fresh Row to simplify clients that want to use yielded results
  509. // after the next row is retrieved. Note that fields is never changed
  510. // so it doesn't need to be copied.
  511. fresh := Row{
  512. fields: p.row.fields,
  513. vals: make([]*proto3.Value, len(p.row.vals)),
  514. }
  515. copy(fresh.vals, p.row.vals)
  516. p.row.vals = p.row.vals[:0] // empty and reuse slice
  517. return &fresh
  518. }
  519. return nil
  520. }
  521. // yieldTx returns transaction information via caller supplied callback.
  522. func errChunkedEmptyRow() error {
  523. return spannerErrorf(codes.FailedPrecondition, "got invalid chunked PartialResultSet with empty Row")
  524. }
  525. // add tries to merge a new PartialResultSet into buffered Row. It returns
  526. // any rows that have been completed as a result.
  527. func (p *partialResultSetDecoder) add(r *sppb.PartialResultSet) ([]*Row, error) {
  528. var rows []*Row
  529. if r.Metadata != nil {
  530. // Metadata should only be returned in the first result.
  531. if p.row.fields == nil {
  532. p.row.fields = r.Metadata.RowType.Fields
  533. }
  534. if p.tx == nil && r.Metadata.Transaction != nil {
  535. p.tx = r.Metadata.Transaction
  536. if p.tx.ReadTimestamp != nil {
  537. p.ts = time.Unix(p.tx.ReadTimestamp.Seconds, int64(p.tx.ReadTimestamp.Nanos))
  538. }
  539. }
  540. }
  541. if len(r.Values) == 0 {
  542. return nil, nil
  543. }
  544. if p.chunked {
  545. p.chunked = false
  546. // Try to merge first value in r.Values into
  547. // uncompleted row.
  548. last := len(p.row.vals) - 1
  549. if last < 0 { // sanity check
  550. return nil, errChunkedEmptyRow()
  551. }
  552. var err error
  553. // If p is chunked, then we should always try to merge p.last with r.first.
  554. if p.row.vals[last], err = p.merge(p.row.vals[last], r.Values[0]); err != nil {
  555. return nil, err
  556. }
  557. r.Values = r.Values[1:]
  558. // Merge is done, try to yield a complete Row.
  559. if row := p.yield(r.ChunkedValue, len(r.Values) == 0); row != nil {
  560. rows = append(rows, row)
  561. }
  562. }
  563. for i, v := range r.Values {
  564. // The rest values in r can be appened into p directly.
  565. p.row.vals = append(p.row.vals, v)
  566. // Again, check to see if a complete Row can be yielded because of
  567. // the newly added value.
  568. if row := p.yield(r.ChunkedValue, i == len(r.Values)-1); row != nil {
  569. rows = append(rows, row)
  570. }
  571. }
  572. if r.ChunkedValue {
  573. // After dealing with all values in r, if r is chunked then p must
  574. // be also chunked.
  575. p.chunked = true
  576. }
  577. return rows, nil
  578. }
  579. // isMergeable returns if a protobuf Value can be potentially merged with
  580. // other protobuf Values.
  581. func (p *partialResultSetDecoder) isMergeable(a *proto3.Value) bool {
  582. switch a.Kind.(type) {
  583. case *proto3.Value_StringValue:
  584. return true
  585. case *proto3.Value_ListValue:
  586. return true
  587. default:
  588. return false
  589. }
  590. }
  591. // errIncompatibleMergeTypes returns error for incompatible protobuf types
  592. // that cannot be merged by partialResultSetDecoder.
  593. func errIncompatibleMergeTypes(a, b *proto3.Value) error {
  594. return spannerErrorf(codes.FailedPrecondition, "incompatible type in chunked PartialResultSet. expected (%T), got (%T)", a.Kind, b.Kind)
  595. }
  596. // errUnsupportedMergeType returns error for protobuf type that cannot be
  597. // merged to other protobufs.
  598. func errUnsupportedMergeType(a *proto3.Value) error {
  599. return spannerErrorf(codes.FailedPrecondition, "unsupported type merge (%T)", a.Kind)
  600. }
  601. // merge tries to combine two protobuf Values if possible.
  602. func (p *partialResultSetDecoder) merge(a, b *proto3.Value) (*proto3.Value, error) {
  603. var err error
  604. typeErr := errIncompatibleMergeTypes(a, b)
  605. switch t := a.Kind.(type) {
  606. case *proto3.Value_StringValue:
  607. s, ok := b.Kind.(*proto3.Value_StringValue)
  608. if !ok {
  609. return nil, typeErr
  610. }
  611. return &proto3.Value{
  612. Kind: &proto3.Value_StringValue{StringValue: t.StringValue + s.StringValue},
  613. }, nil
  614. case *proto3.Value_ListValue:
  615. l, ok := b.Kind.(*proto3.Value_ListValue)
  616. if !ok {
  617. return nil, typeErr
  618. }
  619. if l.ListValue == nil || len(l.ListValue.Values) <= 0 {
  620. // b is an empty list, just return a.
  621. return a, nil
  622. }
  623. if t.ListValue == nil || len(t.ListValue.Values) <= 0 {
  624. // a is an empty list, just return b.
  625. return b, nil
  626. }
  627. if la := len(t.ListValue.Values) - 1; p.isMergeable(t.ListValue.Values[la]) {
  628. // When the last item in a is of type String,
  629. // List or Struct(encoded into List by Cloud Spanner),
  630. // try to Merge last item in a and first item in b.
  631. t.ListValue.Values[la], err = p.merge(t.ListValue.Values[la], l.ListValue.Values[0])
  632. if err != nil {
  633. return nil, err
  634. }
  635. l.ListValue.Values = l.ListValue.Values[1:]
  636. }
  637. return &proto3.Value{
  638. Kind: &proto3.Value_ListValue{
  639. ListValue: &proto3.ListValue{
  640. Values: append(t.ListValue.Values, l.ListValue.Values...),
  641. },
  642. },
  643. }, nil
  644. default:
  645. return nil, errUnsupportedMergeType(a)
  646. }
  647. }
  648. // Done returns if partialResultSetDecoder has already done with all buffered
  649. // values.
  650. func (p *partialResultSetDecoder) done() bool {
  651. // There is no explicit end of stream marker, but ending part way
  652. // through a row is obviously bad, or ending with the last column still
  653. // awaiting completion.
  654. return len(p.row.vals) == 0 && !p.chunked
  655. }