Nevar pievienot vairāk kā 25 tēmas Tēmai ir jāsākas ar burtu vai ciparu, tā var saturēt domu zīmes ('-') un var būt līdz 35 simboliem gara.
 
 
 

199 rindas
5.7 KiB

  1. /*
  2. Copyright 2017 Google LLC
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package spanner
  14. import (
  15. "fmt"
  16. "strings"
  17. "time"
  18. "github.com/golang/protobuf/proto"
  19. "github.com/golang/protobuf/ptypes"
  20. "golang.org/x/net/context"
  21. edpb "google.golang.org/genproto/googleapis/rpc/errdetails"
  22. "google.golang.org/grpc/codes"
  23. "google.golang.org/grpc/metadata"
  24. )
  25. const (
  26. retryInfoKey = "google.rpc.retryinfo-bin"
  27. )
  28. // errRetry returns an unavailable error under error namespace EsOther. It is a
  29. // generic retryable error that is used to mask and recover unretryable errors
  30. // in a retry loop.
  31. func errRetry(err error) error {
  32. if se, ok := err.(*Error); ok {
  33. return &Error{codes.Unavailable, fmt.Sprintf("generic Cloud Spanner retryable error: { %v }", se.Error()), se.trailers}
  34. }
  35. return spannerErrorf(codes.Unavailable, "generic Cloud Spanner retryable error: { %v }", err.Error())
  36. }
  37. // isErrorClosing reports whether the error is generated by gRPC layer talking to a closed server.
  38. func isErrorClosing(err error) bool {
  39. if err == nil {
  40. return false
  41. }
  42. if ErrCode(err) == codes.Internal && strings.Contains(ErrDesc(err), "transport is closing") {
  43. // Handle the case when connection is closed unexpectedly.
  44. // TODO: once gRPC is able to categorize
  45. // this as retryable error, we should stop parsing the
  46. // error message here.
  47. return true
  48. }
  49. return false
  50. }
  51. // isErrorRST reports whether the error is generated by gRPC client receiving a RST frame from server.
  52. func isErrorRST(err error) bool {
  53. if err == nil {
  54. return false
  55. }
  56. if ErrCode(err) == codes.Internal && strings.Contains(ErrDesc(err), "stream terminated by RST_STREAM") {
  57. // TODO: once gRPC is able to categorize this error as "go away" or "retryable",
  58. // we should stop parsing the error message.
  59. return true
  60. }
  61. return false
  62. }
  63. // isErrorUnexpectedEOF returns true if error is generated by gRPC layer
  64. // receiving io.EOF unexpectedly.
  65. func isErrorUnexpectedEOF(err error) bool {
  66. if err == nil {
  67. return false
  68. }
  69. if ErrCode(err) == codes.Unknown && strings.Contains(ErrDesc(err), "unexpected EOF") {
  70. // Unexpected EOF is an transport layer issue that
  71. // could be recovered by retries. The most likely
  72. // scenario is a flaky RecvMsg() call due to network
  73. // issues.
  74. // TODO: once gRPC is able to categorize
  75. // this as retryable error, we should stop parsing the
  76. // error message here.
  77. return true
  78. }
  79. return false
  80. }
  81. // isErrorUnavailable returns true if the error is about server being unavailable.
  82. func isErrorUnavailable(err error) bool {
  83. if err == nil {
  84. return false
  85. }
  86. if ErrCode(err) == codes.Unavailable {
  87. return true
  88. }
  89. return false
  90. }
  91. // isRetryable returns true if the Cloud Spanner error being checked is a retryable error.
  92. func isRetryable(err error) bool {
  93. if isErrorClosing(err) {
  94. return true
  95. }
  96. if isErrorUnexpectedEOF(err) {
  97. return true
  98. }
  99. if isErrorRST(err) {
  100. return true
  101. }
  102. if isErrorUnavailable(err) {
  103. return true
  104. }
  105. return false
  106. }
  107. // errContextCanceled returns *spanner.Error for canceled context.
  108. func errContextCanceled(ctx context.Context, lastErr error) error {
  109. if ctx.Err() == context.DeadlineExceeded {
  110. return spannerErrorf(codes.DeadlineExceeded, "%v, lastErr is <%v>", ctx.Err(), lastErr)
  111. }
  112. return spannerErrorf(codes.Canceled, "%v, lastErr is <%v>", ctx.Err(), lastErr)
  113. }
  114. // extractRetryDelay extracts retry backoff if present.
  115. func extractRetryDelay(err error) (time.Duration, bool) {
  116. trailers := errTrailers(err)
  117. if trailers == nil {
  118. return 0, false
  119. }
  120. elem, ok := trailers[retryInfoKey]
  121. if !ok || len(elem) <= 0 {
  122. return 0, false
  123. }
  124. _, b, err := metadata.DecodeKeyValue(retryInfoKey, elem[0])
  125. if err != nil {
  126. return 0, false
  127. }
  128. var retryInfo edpb.RetryInfo
  129. if proto.Unmarshal([]byte(b), &retryInfo) != nil {
  130. return 0, false
  131. }
  132. delay, err := ptypes.Duration(retryInfo.RetryDelay)
  133. if err != nil {
  134. return 0, false
  135. }
  136. return delay, true
  137. }
  138. // runRetryable keeps attempting to run f until one of the following happens:
  139. // 1) f returns nil error or an unretryable error;
  140. // 2) context is cancelled or timeout.
  141. // TODO: consider using https://github.com/googleapis/gax-go once it
  142. // becomes available internally.
  143. func runRetryable(ctx context.Context, f func(context.Context) error) error {
  144. return toSpannerError(runRetryableNoWrap(ctx, f))
  145. }
  146. // Like runRetryable, but doesn't wrap the returned error in a spanner.Error.
  147. func runRetryableNoWrap(ctx context.Context, f func(context.Context) error) error {
  148. var funcErr error
  149. retryCount := 0
  150. for {
  151. select {
  152. case <-ctx.Done():
  153. // Do context check here so that even f() failed to do
  154. // so (for example, gRPC implementation bug), the loop
  155. // can still have a chance to exit as expected.
  156. return errContextCanceled(ctx, funcErr)
  157. default:
  158. }
  159. funcErr = f(ctx)
  160. if funcErr == nil {
  161. return nil
  162. }
  163. if isRetryable(funcErr) {
  164. // Error is retryable, do exponential backoff and continue.
  165. b, ok := extractRetryDelay(funcErr)
  166. if !ok {
  167. b = defaultBackoff.delay(retryCount)
  168. }
  169. tracePrintf(ctx, nil, "Backing off for %s, then retrying", b)
  170. select {
  171. case <-ctx.Done():
  172. return errContextCanceled(ctx, funcErr)
  173. case <-time.After(b):
  174. }
  175. retryCount++
  176. continue
  177. }
  178. // Error isn't retryable / no error, return immediately.
  179. return funcErr
  180. }
  181. }