25'ten fazla konu seçemezsiniz Konular bir harf veya rakamla başlamalı, kısa çizgiler ('-') içerebilir ve en fazla 35 karakter uzunluğunda olabilir.
 
 
 

1487 satır
43 KiB

  1. /*
  2. *
  3. * Copyright 2014 gRPC authors.
  4. *
  5. * Licensed under the Apache License, Version 2.0 (the "License");
  6. * you may not use this file except in compliance with the License.
  7. * You may obtain a copy of the License at
  8. *
  9. * http://www.apache.org/licenses/LICENSE-2.0
  10. *
  11. * Unless required by applicable law or agreed to in writing, software
  12. * distributed under the License is distributed on an "AS IS" BASIS,
  13. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. * See the License for the specific language governing permissions and
  15. * limitations under the License.
  16. *
  17. */
  18. package grpc
  19. import (
  20. "context"
  21. "errors"
  22. "io"
  23. "math"
  24. "strconv"
  25. "sync"
  26. "time"
  27. "golang.org/x/net/trace"
  28. "google.golang.org/grpc/balancer"
  29. "google.golang.org/grpc/codes"
  30. "google.golang.org/grpc/connectivity"
  31. "google.golang.org/grpc/encoding"
  32. "google.golang.org/grpc/grpclog"
  33. "google.golang.org/grpc/internal/binarylog"
  34. "google.golang.org/grpc/internal/channelz"
  35. "google.golang.org/grpc/internal/grpcrand"
  36. "google.golang.org/grpc/internal/transport"
  37. "google.golang.org/grpc/metadata"
  38. "google.golang.org/grpc/peer"
  39. "google.golang.org/grpc/stats"
  40. "google.golang.org/grpc/status"
  41. )
  42. // StreamHandler defines the handler called by gRPC server to complete the
  43. // execution of a streaming RPC. If a StreamHandler returns an error, it
  44. // should be produced by the status package, or else gRPC will use
  45. // codes.Unknown as the status code and err.Error() as the status message
  46. // of the RPC.
  47. type StreamHandler func(srv interface{}, stream ServerStream) error
  48. // StreamDesc represents a streaming RPC service's method specification.
  49. type StreamDesc struct {
  50. StreamName string
  51. Handler StreamHandler
  52. // At least one of these is true.
  53. ServerStreams bool
  54. ClientStreams bool
  55. }
  56. // Stream defines the common interface a client or server stream has to satisfy.
  57. //
  58. // Deprecated: See ClientStream and ServerStream documentation instead.
  59. type Stream interface {
  60. // Deprecated: See ClientStream and ServerStream documentation instead.
  61. Context() context.Context
  62. // Deprecated: See ClientStream and ServerStream documentation instead.
  63. SendMsg(m interface{}) error
  64. // Deprecated: See ClientStream and ServerStream documentation instead.
  65. RecvMsg(m interface{}) error
  66. }
  67. // ClientStream defines the client-side behavior of a streaming RPC.
  68. //
  69. // All errors returned from ClientStream methods are compatible with the
  70. // status package.
  71. type ClientStream interface {
  72. // Header returns the header metadata received from the server if there
  73. // is any. It blocks if the metadata is not ready to read.
  74. Header() (metadata.MD, error)
  75. // Trailer returns the trailer metadata from the server, if there is any.
  76. // It must only be called after stream.CloseAndRecv has returned, or
  77. // stream.Recv has returned a non-nil error (including io.EOF).
  78. Trailer() metadata.MD
  79. // CloseSend closes the send direction of the stream. It closes the stream
  80. // when non-nil error is met. It is also not safe to call CloseSend
  81. // concurrently with SendMsg.
  82. CloseSend() error
  83. // Context returns the context for this stream.
  84. //
  85. // It should not be called until after Header or RecvMsg has returned. Once
  86. // called, subsequent client-side retries are disabled.
  87. Context() context.Context
  88. // SendMsg is generally called by generated code. On error, SendMsg aborts
  89. // the stream. If the error was generated by the client, the status is
  90. // returned directly; otherwise, io.EOF is returned and the status of
  91. // the stream may be discovered using RecvMsg.
  92. //
  93. // SendMsg blocks until:
  94. // - There is sufficient flow control to schedule m with the transport, or
  95. // - The stream is done, or
  96. // - The stream breaks.
  97. //
  98. // SendMsg does not wait until the message is received by the server. An
  99. // untimely stream closure may result in lost messages. To ensure delivery,
  100. // users should ensure the RPC completed successfully using RecvMsg.
  101. //
  102. // It is safe to have a goroutine calling SendMsg and another goroutine
  103. // calling RecvMsg on the same stream at the same time, but it is not safe
  104. // to call SendMsg on the same stream in different goroutines. It is also
  105. // not safe to call CloseSend concurrently with SendMsg.
  106. SendMsg(m interface{}) error
  107. // RecvMsg blocks until it receives a message into m or the stream is
  108. // done. It returns io.EOF when the stream completes successfully. On
  109. // any other error, the stream is aborted and the error contains the RPC
  110. // status.
  111. //
  112. // It is safe to have a goroutine calling SendMsg and another goroutine
  113. // calling RecvMsg on the same stream at the same time, but it is not
  114. // safe to call RecvMsg on the same stream in different goroutines.
  115. RecvMsg(m interface{}) error
  116. }
  117. // NewStream creates a new Stream for the client side. This is typically
  118. // called by generated code. ctx is used for the lifetime of the stream.
  119. //
  120. // To ensure resources are not leaked due to the stream returned, one of the following
  121. // actions must be performed:
  122. //
  123. // 1. Call Close on the ClientConn.
  124. // 2. Cancel the context provided.
  125. // 3. Call RecvMsg until a non-nil error is returned. A protobuf-generated
  126. // client-streaming RPC, for instance, might use the helper function
  127. // CloseAndRecv (note that CloseSend does not Recv, therefore is not
  128. // guaranteed to release all resources).
  129. // 4. Receive a non-nil, non-io.EOF error from Header or SendMsg.
  130. //
  131. // If none of the above happen, a goroutine and a context will be leaked, and grpc
  132. // will not call the optionally-configured stats handler with a stats.End message.
  133. func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) {
  134. // allow interceptor to see all applicable call options, which means those
  135. // configured as defaults from dial option as well as per-call options
  136. opts = combine(cc.dopts.callOptions, opts)
  137. if cc.dopts.streamInt != nil {
  138. return cc.dopts.streamInt(ctx, desc, cc, method, newClientStream, opts...)
  139. }
  140. return newClientStream(ctx, desc, cc, method, opts...)
  141. }
  142. // NewClientStream is a wrapper for ClientConn.NewStream.
  143. func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) {
  144. return cc.NewStream(ctx, desc, method, opts...)
  145. }
  146. func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
  147. if channelz.IsOn() {
  148. cc.incrCallsStarted()
  149. defer func() {
  150. if err != nil {
  151. cc.incrCallsFailed()
  152. }
  153. }()
  154. }
  155. c := defaultCallInfo()
  156. // Provide an opportunity for the first RPC to see the first service config
  157. // provided by the resolver.
  158. if err := cc.waitForResolvedAddrs(ctx); err != nil {
  159. return nil, err
  160. }
  161. mc := cc.GetMethodConfig(method)
  162. if mc.WaitForReady != nil {
  163. c.failFast = !*mc.WaitForReady
  164. }
  165. // Possible context leak:
  166. // The cancel function for the child context we create will only be called
  167. // when RecvMsg returns a non-nil error, if the ClientConn is closed, or if
  168. // an error is generated by SendMsg.
  169. // https://github.com/grpc/grpc-go/issues/1818.
  170. var cancel context.CancelFunc
  171. if mc.Timeout != nil && *mc.Timeout >= 0 {
  172. ctx, cancel = context.WithTimeout(ctx, *mc.Timeout)
  173. } else {
  174. ctx, cancel = context.WithCancel(ctx)
  175. }
  176. defer func() {
  177. if err != nil {
  178. cancel()
  179. }
  180. }()
  181. for _, o := range opts {
  182. if err := o.before(c); err != nil {
  183. return nil, toRPCErr(err)
  184. }
  185. }
  186. c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize)
  187. c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
  188. if err := setCallInfoCodec(c); err != nil {
  189. return nil, err
  190. }
  191. callHdr := &transport.CallHdr{
  192. Host: cc.authority,
  193. Method: method,
  194. ContentSubtype: c.contentSubtype,
  195. }
  196. // Set our outgoing compression according to the UseCompressor CallOption, if
  197. // set. In that case, also find the compressor from the encoding package.
  198. // Otherwise, use the compressor configured by the WithCompressor DialOption,
  199. // if set.
  200. var cp Compressor
  201. var comp encoding.Compressor
  202. if ct := c.compressorType; ct != "" {
  203. callHdr.SendCompress = ct
  204. if ct != encoding.Identity {
  205. comp = encoding.GetCompressor(ct)
  206. if comp == nil {
  207. return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct)
  208. }
  209. }
  210. } else if cc.dopts.cp != nil {
  211. callHdr.SendCompress = cc.dopts.cp.Type()
  212. cp = cc.dopts.cp
  213. }
  214. if c.creds != nil {
  215. callHdr.Creds = c.creds
  216. }
  217. var trInfo traceInfo
  218. if EnableTracing {
  219. trInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method)
  220. trInfo.firstLine.client = true
  221. if deadline, ok := ctx.Deadline(); ok {
  222. trInfo.firstLine.deadline = time.Until(deadline)
  223. }
  224. trInfo.tr.LazyLog(&trInfo.firstLine, false)
  225. ctx = trace.NewContext(ctx, trInfo.tr)
  226. }
  227. ctx = newContextWithRPCInfo(ctx, c.failFast)
  228. sh := cc.dopts.copts.StatsHandler
  229. var beginTime time.Time
  230. if sh != nil {
  231. ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast})
  232. beginTime = time.Now()
  233. begin := &stats.Begin{
  234. Client: true,
  235. BeginTime: beginTime,
  236. FailFast: c.failFast,
  237. }
  238. sh.HandleRPC(ctx, begin)
  239. }
  240. cs := &clientStream{
  241. callHdr: callHdr,
  242. ctx: ctx,
  243. methodConfig: &mc,
  244. opts: opts,
  245. callInfo: c,
  246. cc: cc,
  247. desc: desc,
  248. codec: c.codec,
  249. cp: cp,
  250. comp: comp,
  251. cancel: cancel,
  252. beginTime: beginTime,
  253. firstAttempt: true,
  254. }
  255. if !cc.dopts.disableRetry {
  256. cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler)
  257. }
  258. cs.binlog = binarylog.GetMethodLogger(method)
  259. cs.callInfo.stream = cs
  260. // Only this initial attempt has stats/tracing.
  261. // TODO(dfawley): move to newAttempt when per-attempt stats are implemented.
  262. if err := cs.newAttemptLocked(sh, trInfo); err != nil {
  263. cs.finish(err)
  264. return nil, err
  265. }
  266. op := func(a *csAttempt) error { return a.newStream() }
  267. if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil {
  268. cs.finish(err)
  269. return nil, err
  270. }
  271. if cs.binlog != nil {
  272. md, _ := metadata.FromOutgoingContext(ctx)
  273. logEntry := &binarylog.ClientHeader{
  274. OnClientSide: true,
  275. Header: md,
  276. MethodName: method,
  277. Authority: cs.cc.authority,
  278. }
  279. if deadline, ok := ctx.Deadline(); ok {
  280. logEntry.Timeout = time.Until(deadline)
  281. if logEntry.Timeout < 0 {
  282. logEntry.Timeout = 0
  283. }
  284. }
  285. cs.binlog.Log(logEntry)
  286. }
  287. if desc != unaryStreamDesc {
  288. // Listen on cc and stream contexts to cleanup when the user closes the
  289. // ClientConn or cancels the stream context. In all other cases, an error
  290. // should already be injected into the recv buffer by the transport, which
  291. // the client will eventually receive, and then we will cancel the stream's
  292. // context in clientStream.finish.
  293. go func() {
  294. select {
  295. case <-cc.ctx.Done():
  296. cs.finish(ErrClientConnClosing)
  297. case <-ctx.Done():
  298. cs.finish(toRPCErr(ctx.Err()))
  299. }
  300. }()
  301. }
  302. return cs, nil
  303. }
  304. func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo traceInfo) error {
  305. cs.attempt = &csAttempt{
  306. cs: cs,
  307. dc: cs.cc.dopts.dc,
  308. statsHandler: sh,
  309. trInfo: trInfo,
  310. }
  311. if err := cs.ctx.Err(); err != nil {
  312. return toRPCErr(err)
  313. }
  314. t, done, err := cs.cc.getTransport(cs.ctx, cs.callInfo.failFast, cs.callHdr.Method)
  315. if err != nil {
  316. return err
  317. }
  318. cs.attempt.t = t
  319. cs.attempt.done = done
  320. return nil
  321. }
  322. func (a *csAttempt) newStream() error {
  323. cs := a.cs
  324. cs.callHdr.PreviousAttempts = cs.numRetries
  325. s, err := a.t.NewStream(cs.ctx, cs.callHdr)
  326. if err != nil {
  327. return toRPCErr(err)
  328. }
  329. cs.attempt.s = s
  330. cs.attempt.p = &parser{r: s}
  331. return nil
  332. }
  333. // clientStream implements a client side Stream.
  334. type clientStream struct {
  335. callHdr *transport.CallHdr
  336. opts []CallOption
  337. callInfo *callInfo
  338. cc *ClientConn
  339. desc *StreamDesc
  340. codec baseCodec
  341. cp Compressor
  342. comp encoding.Compressor
  343. cancel context.CancelFunc // cancels all attempts
  344. sentLast bool // sent an end stream
  345. beginTime time.Time
  346. methodConfig *MethodConfig
  347. ctx context.Context // the application's context, wrapped by stats/tracing
  348. retryThrottler *retryThrottler // The throttler active when the RPC began.
  349. binlog *binarylog.MethodLogger // Binary logger, can be nil.
  350. // serverHeaderBinlogged is a boolean for whether server header has been
  351. // logged. Server header will be logged when the first time one of those
  352. // happens: stream.Header(), stream.Recv().
  353. //
  354. // It's only read and used by Recv() and Header(), so it doesn't need to be
  355. // synchronized.
  356. serverHeaderBinlogged bool
  357. mu sync.Mutex
  358. firstAttempt bool // if true, transparent retry is valid
  359. numRetries int // exclusive of transparent retry attempt(s)
  360. numRetriesSincePushback int // retries since pushback; to reset backoff
  361. finished bool // TODO: replace with atomic cmpxchg or sync.Once?
  362. attempt *csAttempt // the active client stream attempt
  363. // TODO(hedging): hedging will have multiple attempts simultaneously.
  364. committed bool // active attempt committed for retry?
  365. buffer []func(a *csAttempt) error // operations to replay on retry
  366. bufferSize int // current size of buffer
  367. }
  368. // csAttempt implements a single transport stream attempt within a
  369. // clientStream.
  370. type csAttempt struct {
  371. cs *clientStream
  372. t transport.ClientTransport
  373. s *transport.Stream
  374. p *parser
  375. done func(balancer.DoneInfo)
  376. finished bool
  377. dc Decompressor
  378. decomp encoding.Compressor
  379. decompSet bool
  380. mu sync.Mutex // guards trInfo.tr
  381. // trInfo.tr is set when created (if EnableTracing is true),
  382. // and cleared when the finish method is called.
  383. trInfo traceInfo
  384. statsHandler stats.Handler
  385. }
  386. func (cs *clientStream) commitAttemptLocked() {
  387. cs.committed = true
  388. cs.buffer = nil
  389. }
  390. func (cs *clientStream) commitAttempt() {
  391. cs.mu.Lock()
  392. cs.commitAttemptLocked()
  393. cs.mu.Unlock()
  394. }
  395. // shouldRetry returns nil if the RPC should be retried; otherwise it returns
  396. // the error that should be returned by the operation.
  397. func (cs *clientStream) shouldRetry(err error) error {
  398. if cs.attempt.s == nil && !cs.callInfo.failFast {
  399. // In the event of any error from NewStream (attempt.s == nil), we
  400. // never attempted to write anything to the wire, so we can retry
  401. // indefinitely for non-fail-fast RPCs.
  402. return nil
  403. }
  404. if cs.finished || cs.committed {
  405. // RPC is finished or committed; cannot retry.
  406. return err
  407. }
  408. // Wait for the trailers.
  409. if cs.attempt.s != nil {
  410. <-cs.attempt.s.Done()
  411. }
  412. if cs.firstAttempt && !cs.callInfo.failFast && (cs.attempt.s == nil || cs.attempt.s.Unprocessed()) {
  413. // First attempt, wait-for-ready, stream unprocessed: transparently retry.
  414. cs.firstAttempt = false
  415. return nil
  416. }
  417. cs.firstAttempt = false
  418. if cs.cc.dopts.disableRetry {
  419. return err
  420. }
  421. pushback := 0
  422. hasPushback := false
  423. if cs.attempt.s != nil {
  424. if to, toErr := cs.attempt.s.TrailersOnly(); toErr != nil || !to {
  425. return err
  426. }
  427. // TODO(retry): Move down if the spec changes to not check server pushback
  428. // before considering this a failure for throttling.
  429. sps := cs.attempt.s.Trailer()["grpc-retry-pushback-ms"]
  430. if len(sps) == 1 {
  431. var e error
  432. if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 {
  433. grpclog.Infof("Server retry pushback specified to abort (%q).", sps[0])
  434. cs.retryThrottler.throttle() // This counts as a failure for throttling.
  435. return err
  436. }
  437. hasPushback = true
  438. } else if len(sps) > 1 {
  439. grpclog.Warningf("Server retry pushback specified multiple values (%q); not retrying.", sps)
  440. cs.retryThrottler.throttle() // This counts as a failure for throttling.
  441. return err
  442. }
  443. }
  444. var code codes.Code
  445. if cs.attempt.s != nil {
  446. code = cs.attempt.s.Status().Code()
  447. } else {
  448. code = status.Convert(err).Code()
  449. }
  450. rp := cs.methodConfig.retryPolicy
  451. if rp == nil || !rp.retryableStatusCodes[code] {
  452. return err
  453. }
  454. // Note: the ordering here is important; we count this as a failure
  455. // only if the code matched a retryable code.
  456. if cs.retryThrottler.throttle() {
  457. return err
  458. }
  459. if cs.numRetries+1 >= rp.maxAttempts {
  460. return err
  461. }
  462. var dur time.Duration
  463. if hasPushback {
  464. dur = time.Millisecond * time.Duration(pushback)
  465. cs.numRetriesSincePushback = 0
  466. } else {
  467. fact := math.Pow(rp.backoffMultiplier, float64(cs.numRetriesSincePushback))
  468. cur := float64(rp.initialBackoff) * fact
  469. if max := float64(rp.maxBackoff); cur > max {
  470. cur = max
  471. }
  472. dur = time.Duration(grpcrand.Int63n(int64(cur)))
  473. cs.numRetriesSincePushback++
  474. }
  475. // TODO(dfawley): we could eagerly fail here if dur puts us past the
  476. // deadline, but unsure if it is worth doing.
  477. t := time.NewTimer(dur)
  478. select {
  479. case <-t.C:
  480. cs.numRetries++
  481. return nil
  482. case <-cs.ctx.Done():
  483. t.Stop()
  484. return status.FromContextError(cs.ctx.Err()).Err()
  485. }
  486. }
  487. // Returns nil if a retry was performed and succeeded; error otherwise.
  488. func (cs *clientStream) retryLocked(lastErr error) error {
  489. for {
  490. cs.attempt.finish(lastErr)
  491. if err := cs.shouldRetry(lastErr); err != nil {
  492. cs.commitAttemptLocked()
  493. return err
  494. }
  495. if err := cs.newAttemptLocked(nil, traceInfo{}); err != nil {
  496. return err
  497. }
  498. if lastErr = cs.replayBufferLocked(); lastErr == nil {
  499. return nil
  500. }
  501. }
  502. }
  503. func (cs *clientStream) Context() context.Context {
  504. cs.commitAttempt()
  505. // No need to lock before using attempt, since we know it is committed and
  506. // cannot change.
  507. return cs.attempt.s.Context()
  508. }
  509. func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error {
  510. cs.mu.Lock()
  511. for {
  512. if cs.committed {
  513. cs.mu.Unlock()
  514. return op(cs.attempt)
  515. }
  516. a := cs.attempt
  517. cs.mu.Unlock()
  518. err := op(a)
  519. cs.mu.Lock()
  520. if a != cs.attempt {
  521. // We started another attempt already.
  522. continue
  523. }
  524. if err == io.EOF {
  525. <-a.s.Done()
  526. }
  527. if err == nil || (err == io.EOF && a.s.Status().Code() == codes.OK) {
  528. onSuccess()
  529. cs.mu.Unlock()
  530. return err
  531. }
  532. if err := cs.retryLocked(err); err != nil {
  533. cs.mu.Unlock()
  534. return err
  535. }
  536. }
  537. }
  538. func (cs *clientStream) Header() (metadata.MD, error) {
  539. var m metadata.MD
  540. err := cs.withRetry(func(a *csAttempt) error {
  541. var err error
  542. m, err = a.s.Header()
  543. return toRPCErr(err)
  544. }, cs.commitAttemptLocked)
  545. if err != nil {
  546. cs.finish(err)
  547. return nil, err
  548. }
  549. if cs.binlog != nil && !cs.serverHeaderBinlogged {
  550. // Only log if binary log is on and header has not been logged.
  551. logEntry := &binarylog.ServerHeader{
  552. OnClientSide: true,
  553. Header: m,
  554. PeerAddr: nil,
  555. }
  556. if peer, ok := peer.FromContext(cs.Context()); ok {
  557. logEntry.PeerAddr = peer.Addr
  558. }
  559. cs.binlog.Log(logEntry)
  560. cs.serverHeaderBinlogged = true
  561. }
  562. return m, err
  563. }
  564. func (cs *clientStream) Trailer() metadata.MD {
  565. // On RPC failure, we never need to retry, because usage requires that
  566. // RecvMsg() returned a non-nil error before calling this function is valid.
  567. // We would have retried earlier if necessary.
  568. //
  569. // Commit the attempt anyway, just in case users are not following those
  570. // directions -- it will prevent races and should not meaningfully impact
  571. // performance.
  572. cs.commitAttempt()
  573. if cs.attempt.s == nil {
  574. return nil
  575. }
  576. return cs.attempt.s.Trailer()
  577. }
  578. func (cs *clientStream) replayBufferLocked() error {
  579. a := cs.attempt
  580. for _, f := range cs.buffer {
  581. if err := f(a); err != nil {
  582. return err
  583. }
  584. }
  585. return nil
  586. }
  587. func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error) {
  588. // Note: we still will buffer if retry is disabled (for transparent retries).
  589. if cs.committed {
  590. return
  591. }
  592. cs.bufferSize += sz
  593. if cs.bufferSize > cs.callInfo.maxRetryRPCBufferSize {
  594. cs.commitAttemptLocked()
  595. return
  596. }
  597. cs.buffer = append(cs.buffer, op)
  598. }
  599. func (cs *clientStream) SendMsg(m interface{}) (err error) {
  600. defer func() {
  601. if err != nil && err != io.EOF {
  602. // Call finish on the client stream for errors generated by this SendMsg
  603. // call, as these indicate problems created by this client. (Transport
  604. // errors are converted to an io.EOF error in csAttempt.sendMsg; the real
  605. // error will be returned from RecvMsg eventually in that case, or be
  606. // retried.)
  607. cs.finish(err)
  608. }
  609. }()
  610. if cs.sentLast {
  611. return status.Errorf(codes.Internal, "SendMsg called after CloseSend")
  612. }
  613. if !cs.desc.ClientStreams {
  614. cs.sentLast = true
  615. }
  616. data, err := encode(cs.codec, m)
  617. if err != nil {
  618. return err
  619. }
  620. compData, err := compress(data, cs.cp, cs.comp)
  621. if err != nil {
  622. return err
  623. }
  624. hdr, payload := msgHeader(data, compData)
  625. // TODO(dfawley): should we be checking len(data) instead?
  626. if len(payload) > *cs.callInfo.maxSendMessageSize {
  627. return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize)
  628. }
  629. msgBytes := data // Store the pointer before setting to nil. For binary logging.
  630. op := func(a *csAttempt) error {
  631. err := a.sendMsg(m, hdr, payload, data)
  632. // nil out the message and uncomp when replaying; they are only needed for
  633. // stats which is disabled for subsequent attempts.
  634. m, data = nil, nil
  635. return err
  636. }
  637. err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) })
  638. if cs.binlog != nil && err == nil {
  639. cs.binlog.Log(&binarylog.ClientMessage{
  640. OnClientSide: true,
  641. Message: msgBytes,
  642. })
  643. }
  644. return
  645. }
  646. func (cs *clientStream) RecvMsg(m interface{}) error {
  647. if cs.binlog != nil && !cs.serverHeaderBinlogged {
  648. // Call Header() to binary log header if it's not already logged.
  649. cs.Header()
  650. }
  651. var recvInfo *payloadInfo
  652. if cs.binlog != nil {
  653. recvInfo = &payloadInfo{}
  654. }
  655. err := cs.withRetry(func(a *csAttempt) error {
  656. return a.recvMsg(m, recvInfo)
  657. }, cs.commitAttemptLocked)
  658. if cs.binlog != nil && err == nil {
  659. cs.binlog.Log(&binarylog.ServerMessage{
  660. OnClientSide: true,
  661. Message: recvInfo.uncompressedBytes,
  662. })
  663. }
  664. if err != nil || !cs.desc.ServerStreams {
  665. // err != nil or non-server-streaming indicates end of stream.
  666. cs.finish(err)
  667. if cs.binlog != nil {
  668. // finish will not log Trailer. Log Trailer here.
  669. logEntry := &binarylog.ServerTrailer{
  670. OnClientSide: true,
  671. Trailer: cs.Trailer(),
  672. Err: err,
  673. }
  674. if logEntry.Err == io.EOF {
  675. logEntry.Err = nil
  676. }
  677. if peer, ok := peer.FromContext(cs.Context()); ok {
  678. logEntry.PeerAddr = peer.Addr
  679. }
  680. cs.binlog.Log(logEntry)
  681. }
  682. }
  683. return err
  684. }
  685. func (cs *clientStream) CloseSend() error {
  686. if cs.sentLast {
  687. // TODO: return an error and finish the stream instead, due to API misuse?
  688. return nil
  689. }
  690. cs.sentLast = true
  691. op := func(a *csAttempt) error {
  692. a.t.Write(a.s, nil, nil, &transport.Options{Last: true})
  693. // Always return nil; io.EOF is the only error that might make sense
  694. // instead, but there is no need to signal the client to call RecvMsg
  695. // as the only use left for the stream after CloseSend is to call
  696. // RecvMsg. This also matches historical behavior.
  697. return nil
  698. }
  699. cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) })
  700. if cs.binlog != nil {
  701. cs.binlog.Log(&binarylog.ClientHalfClose{
  702. OnClientSide: true,
  703. })
  704. }
  705. // We never returned an error here for reasons.
  706. return nil
  707. }
  708. func (cs *clientStream) finish(err error) {
  709. if err == io.EOF {
  710. // Ending a stream with EOF indicates a success.
  711. err = nil
  712. }
  713. cs.mu.Lock()
  714. if cs.finished {
  715. cs.mu.Unlock()
  716. return
  717. }
  718. cs.finished = true
  719. cs.commitAttemptLocked()
  720. cs.mu.Unlock()
  721. // For binary logging. only log cancel in finish (could be caused by RPC ctx
  722. // canceled or ClientConn closed). Trailer will be logged in RecvMsg.
  723. //
  724. // Only one of cancel or trailer needs to be logged. In the cases where
  725. // users don't call RecvMsg, users must have already canceled the RPC.
  726. if cs.binlog != nil && status.Code(err) == codes.Canceled {
  727. cs.binlog.Log(&binarylog.Cancel{
  728. OnClientSide: true,
  729. })
  730. }
  731. if err == nil {
  732. cs.retryThrottler.successfulRPC()
  733. }
  734. if channelz.IsOn() {
  735. if err != nil {
  736. cs.cc.incrCallsFailed()
  737. } else {
  738. cs.cc.incrCallsSucceeded()
  739. }
  740. }
  741. if cs.attempt != nil {
  742. cs.attempt.finish(err)
  743. }
  744. // after functions all rely upon having a stream.
  745. if cs.attempt.s != nil {
  746. for _, o := range cs.opts {
  747. o.after(cs.callInfo)
  748. }
  749. }
  750. cs.cancel()
  751. }
  752. func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error {
  753. cs := a.cs
  754. if EnableTracing {
  755. a.mu.Lock()
  756. if a.trInfo.tr != nil {
  757. a.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
  758. }
  759. a.mu.Unlock()
  760. }
  761. if err := a.t.Write(a.s, hdr, payld, &transport.Options{Last: !cs.desc.ClientStreams}); err != nil {
  762. if !cs.desc.ClientStreams {
  763. // For non-client-streaming RPCs, we return nil instead of EOF on error
  764. // because the generated code requires it. finish is not called; RecvMsg()
  765. // will call it with the stream's status independently.
  766. return nil
  767. }
  768. return io.EOF
  769. }
  770. if a.statsHandler != nil {
  771. a.statsHandler.HandleRPC(cs.ctx, outPayload(true, m, data, payld, time.Now()))
  772. }
  773. if channelz.IsOn() {
  774. a.t.IncrMsgSent()
  775. }
  776. return nil
  777. }
  778. func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) {
  779. cs := a.cs
  780. if a.statsHandler != nil && payInfo == nil {
  781. payInfo = &payloadInfo{}
  782. }
  783. if !a.decompSet {
  784. // Block until we receive headers containing received message encoding.
  785. if ct := a.s.RecvCompress(); ct != "" && ct != encoding.Identity {
  786. if a.dc == nil || a.dc.Type() != ct {
  787. // No configured decompressor, or it does not match the incoming
  788. // message encoding; attempt to find a registered compressor that does.
  789. a.dc = nil
  790. a.decomp = encoding.GetCompressor(ct)
  791. }
  792. } else {
  793. // No compression is used; disable our decompressor.
  794. a.dc = nil
  795. }
  796. // Only initialize this state once per stream.
  797. a.decompSet = true
  798. }
  799. err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp)
  800. if err != nil {
  801. if err == io.EOF {
  802. if statusErr := a.s.Status().Err(); statusErr != nil {
  803. return statusErr
  804. }
  805. return io.EOF // indicates successful end of stream.
  806. }
  807. return toRPCErr(err)
  808. }
  809. if EnableTracing {
  810. a.mu.Lock()
  811. if a.trInfo.tr != nil {
  812. a.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
  813. }
  814. a.mu.Unlock()
  815. }
  816. if a.statsHandler != nil {
  817. a.statsHandler.HandleRPC(cs.ctx, &stats.InPayload{
  818. Client: true,
  819. RecvTime: time.Now(),
  820. Payload: m,
  821. // TODO truncate large payload.
  822. Data: payInfo.uncompressedBytes,
  823. Length: len(payInfo.uncompressedBytes),
  824. })
  825. }
  826. if channelz.IsOn() {
  827. a.t.IncrMsgRecv()
  828. }
  829. if cs.desc.ServerStreams {
  830. // Subsequent messages should be received by subsequent RecvMsg calls.
  831. return nil
  832. }
  833. // Special handling for non-server-stream rpcs.
  834. // This recv expects EOF or errors, so we don't collect inPayload.
  835. err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp)
  836. if err == nil {
  837. return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>"))
  838. }
  839. if err == io.EOF {
  840. return a.s.Status().Err() // non-server streaming Recv returns nil on success
  841. }
  842. return toRPCErr(err)
  843. }
  844. func (a *csAttempt) finish(err error) {
  845. a.mu.Lock()
  846. if a.finished {
  847. a.mu.Unlock()
  848. return
  849. }
  850. a.finished = true
  851. if err == io.EOF {
  852. // Ending a stream with EOF indicates a success.
  853. err = nil
  854. }
  855. var tr metadata.MD
  856. if a.s != nil {
  857. a.t.CloseStream(a.s, err)
  858. tr = a.s.Trailer()
  859. }
  860. if a.done != nil {
  861. br := false
  862. if a.s != nil {
  863. br = a.s.BytesReceived()
  864. }
  865. a.done(balancer.DoneInfo{
  866. Err: err,
  867. Trailer: tr,
  868. BytesSent: a.s != nil,
  869. BytesReceived: br,
  870. })
  871. }
  872. if a.statsHandler != nil {
  873. end := &stats.End{
  874. Client: true,
  875. BeginTime: a.cs.beginTime,
  876. EndTime: time.Now(),
  877. Trailer: tr,
  878. Error: err,
  879. }
  880. a.statsHandler.HandleRPC(a.cs.ctx, end)
  881. }
  882. if a.trInfo.tr != nil {
  883. if err == nil {
  884. a.trInfo.tr.LazyPrintf("RPC: [OK]")
  885. } else {
  886. a.trInfo.tr.LazyPrintf("RPC: [%v]", err)
  887. a.trInfo.tr.SetError()
  888. }
  889. a.trInfo.tr.Finish()
  890. a.trInfo.tr = nil
  891. }
  892. a.mu.Unlock()
  893. }
  894. func (ac *addrConn) newClientStream(ctx context.Context, desc *StreamDesc, method string, t transport.ClientTransport, opts ...CallOption) (_ ClientStream, err error) {
  895. ac.mu.Lock()
  896. if ac.transport != t {
  897. ac.mu.Unlock()
  898. return nil, status.Error(codes.Canceled, "the provided transport is no longer valid to use")
  899. }
  900. // transition to CONNECTING state when an attempt starts
  901. if ac.state != connectivity.Connecting {
  902. ac.updateConnectivityState(connectivity.Connecting)
  903. ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
  904. }
  905. ac.mu.Unlock()
  906. if t == nil {
  907. // TODO: return RPC error here?
  908. return nil, errors.New("transport provided is nil")
  909. }
  910. // defaultCallInfo contains unnecessary info(i.e. failfast, maxRetryRPCBufferSize), so we just initialize an empty struct.
  911. c := &callInfo{}
  912. for _, o := range opts {
  913. if err := o.before(c); err != nil {
  914. return nil, toRPCErr(err)
  915. }
  916. }
  917. c.maxReceiveMessageSize = getMaxSize(nil, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
  918. c.maxSendMessageSize = getMaxSize(nil, c.maxSendMessageSize, defaultServerMaxSendMessageSize)
  919. // Possible context leak:
  920. // The cancel function for the child context we create will only be called
  921. // when RecvMsg returns a non-nil error, if the ClientConn is closed, or if
  922. // an error is generated by SendMsg.
  923. // https://github.com/grpc/grpc-go/issues/1818.
  924. ctx, cancel := context.WithCancel(ctx)
  925. defer func() {
  926. if err != nil {
  927. cancel()
  928. }
  929. }()
  930. if err := setCallInfoCodec(c); err != nil {
  931. return nil, err
  932. }
  933. callHdr := &transport.CallHdr{
  934. Host: ac.cc.authority,
  935. Method: method,
  936. ContentSubtype: c.contentSubtype,
  937. }
  938. // Set our outgoing compression according to the UseCompressor CallOption, if
  939. // set. In that case, also find the compressor from the encoding package.
  940. // Otherwise, use the compressor configured by the WithCompressor DialOption,
  941. // if set.
  942. var cp Compressor
  943. var comp encoding.Compressor
  944. if ct := c.compressorType; ct != "" {
  945. callHdr.SendCompress = ct
  946. if ct != encoding.Identity {
  947. comp = encoding.GetCompressor(ct)
  948. if comp == nil {
  949. return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct)
  950. }
  951. }
  952. } else if ac.cc.dopts.cp != nil {
  953. callHdr.SendCompress = ac.cc.dopts.cp.Type()
  954. cp = ac.cc.dopts.cp
  955. }
  956. if c.creds != nil {
  957. callHdr.Creds = c.creds
  958. }
  959. as := &addrConnStream{
  960. callHdr: callHdr,
  961. ac: ac,
  962. ctx: ctx,
  963. cancel: cancel,
  964. opts: opts,
  965. callInfo: c,
  966. desc: desc,
  967. codec: c.codec,
  968. cp: cp,
  969. comp: comp,
  970. t: t,
  971. }
  972. as.callInfo.stream = as
  973. s, err := as.t.NewStream(as.ctx, as.callHdr)
  974. if err != nil {
  975. err = toRPCErr(err)
  976. return nil, err
  977. }
  978. as.s = s
  979. as.p = &parser{r: s}
  980. ac.incrCallsStarted()
  981. if desc != unaryStreamDesc {
  982. // Listen on cc and stream contexts to cleanup when the user closes the
  983. // ClientConn or cancels the stream context. In all other cases, an error
  984. // should already be injected into the recv buffer by the transport, which
  985. // the client will eventually receive, and then we will cancel the stream's
  986. // context in clientStream.finish.
  987. go func() {
  988. select {
  989. case <-ac.ctx.Done():
  990. as.finish(status.Error(codes.Canceled, "grpc: the SubConn is closing"))
  991. case <-ctx.Done():
  992. as.finish(toRPCErr(ctx.Err()))
  993. }
  994. }()
  995. }
  996. return as, nil
  997. }
  998. type addrConnStream struct {
  999. s *transport.Stream
  1000. ac *addrConn
  1001. callHdr *transport.CallHdr
  1002. cancel context.CancelFunc
  1003. opts []CallOption
  1004. callInfo *callInfo
  1005. t transport.ClientTransport
  1006. ctx context.Context
  1007. sentLast bool
  1008. desc *StreamDesc
  1009. codec baseCodec
  1010. cp Compressor
  1011. comp encoding.Compressor
  1012. decompSet bool
  1013. dc Decompressor
  1014. decomp encoding.Compressor
  1015. p *parser
  1016. mu sync.Mutex
  1017. finished bool
  1018. }
  1019. func (as *addrConnStream) Header() (metadata.MD, error) {
  1020. m, err := as.s.Header()
  1021. if err != nil {
  1022. as.finish(toRPCErr(err))
  1023. }
  1024. return m, err
  1025. }
  1026. func (as *addrConnStream) Trailer() metadata.MD {
  1027. return as.s.Trailer()
  1028. }
  1029. func (as *addrConnStream) CloseSend() error {
  1030. if as.sentLast {
  1031. // TODO: return an error and finish the stream instead, due to API misuse?
  1032. return nil
  1033. }
  1034. as.sentLast = true
  1035. as.t.Write(as.s, nil, nil, &transport.Options{Last: true})
  1036. // Always return nil; io.EOF is the only error that might make sense
  1037. // instead, but there is no need to signal the client to call RecvMsg
  1038. // as the only use left for the stream after CloseSend is to call
  1039. // RecvMsg. This also matches historical behavior.
  1040. return nil
  1041. }
  1042. func (as *addrConnStream) Context() context.Context {
  1043. return as.s.Context()
  1044. }
  1045. func (as *addrConnStream) SendMsg(m interface{}) (err error) {
  1046. defer func() {
  1047. if err != nil && err != io.EOF {
  1048. // Call finish on the client stream for errors generated by this SendMsg
  1049. // call, as these indicate problems created by this client. (Transport
  1050. // errors are converted to an io.EOF error in csAttempt.sendMsg; the real
  1051. // error will be returned from RecvMsg eventually in that case, or be
  1052. // retried.)
  1053. as.finish(err)
  1054. }
  1055. }()
  1056. if as.sentLast {
  1057. return status.Errorf(codes.Internal, "SendMsg called after CloseSend")
  1058. }
  1059. if !as.desc.ClientStreams {
  1060. as.sentLast = true
  1061. }
  1062. data, err := encode(as.codec, m)
  1063. if err != nil {
  1064. return err
  1065. }
  1066. compData, err := compress(data, as.cp, as.comp)
  1067. if err != nil {
  1068. return err
  1069. }
  1070. hdr, payld := msgHeader(data, compData)
  1071. // TODO(dfawley): should we be checking len(data) instead?
  1072. if len(payld) > *as.callInfo.maxSendMessageSize {
  1073. return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payld), *as.callInfo.maxSendMessageSize)
  1074. }
  1075. if err := as.t.Write(as.s, hdr, payld, &transport.Options{Last: !as.desc.ClientStreams}); err != nil {
  1076. if !as.desc.ClientStreams {
  1077. // For non-client-streaming RPCs, we return nil instead of EOF on error
  1078. // because the generated code requires it. finish is not called; RecvMsg()
  1079. // will call it with the stream's status independently.
  1080. return nil
  1081. }
  1082. return io.EOF
  1083. }
  1084. if channelz.IsOn() {
  1085. as.t.IncrMsgSent()
  1086. }
  1087. return nil
  1088. }
  1089. func (as *addrConnStream) RecvMsg(m interface{}) (err error) {
  1090. defer func() {
  1091. if err != nil || !as.desc.ServerStreams {
  1092. // err != nil or non-server-streaming indicates end of stream.
  1093. as.finish(err)
  1094. }
  1095. }()
  1096. if !as.decompSet {
  1097. // Block until we receive headers containing received message encoding.
  1098. if ct := as.s.RecvCompress(); ct != "" && ct != encoding.Identity {
  1099. if as.dc == nil || as.dc.Type() != ct {
  1100. // No configured decompressor, or it does not match the incoming
  1101. // message encoding; attempt to find a registered compressor that does.
  1102. as.dc = nil
  1103. as.decomp = encoding.GetCompressor(ct)
  1104. }
  1105. } else {
  1106. // No compression is used; disable our decompressor.
  1107. as.dc = nil
  1108. }
  1109. // Only initialize this state once per stream.
  1110. as.decompSet = true
  1111. }
  1112. err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp)
  1113. if err != nil {
  1114. if err == io.EOF {
  1115. if statusErr := as.s.Status().Err(); statusErr != nil {
  1116. return statusErr
  1117. }
  1118. return io.EOF // indicates successful end of stream.
  1119. }
  1120. return toRPCErr(err)
  1121. }
  1122. if channelz.IsOn() {
  1123. as.t.IncrMsgRecv()
  1124. }
  1125. if as.desc.ServerStreams {
  1126. // Subsequent messages should be received by subsequent RecvMsg calls.
  1127. return nil
  1128. }
  1129. // Special handling for non-server-stream rpcs.
  1130. // This recv expects EOF or errors, so we don't collect inPayload.
  1131. err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp)
  1132. if err == nil {
  1133. return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>"))
  1134. }
  1135. if err == io.EOF {
  1136. return as.s.Status().Err() // non-server streaming Recv returns nil on success
  1137. }
  1138. return toRPCErr(err)
  1139. }
  1140. func (as *addrConnStream) finish(err error) {
  1141. as.mu.Lock()
  1142. if as.finished {
  1143. as.mu.Unlock()
  1144. return
  1145. }
  1146. as.finished = true
  1147. if err == io.EOF {
  1148. // Ending a stream with EOF indicates a success.
  1149. err = nil
  1150. }
  1151. if as.s != nil {
  1152. as.t.CloseStream(as.s, err)
  1153. }
  1154. if err != nil {
  1155. as.ac.incrCallsFailed()
  1156. } else {
  1157. as.ac.incrCallsSucceeded()
  1158. }
  1159. as.cancel()
  1160. as.mu.Unlock()
  1161. }
  1162. // ServerStream defines the server-side behavior of a streaming RPC.
  1163. //
  1164. // All errors returned from ServerStream methods are compatible with the
  1165. // status package.
  1166. type ServerStream interface {
  1167. // SetHeader sets the header metadata. It may be called multiple times.
  1168. // When call multiple times, all the provided metadata will be merged.
  1169. // All the metadata will be sent out when one of the following happens:
  1170. // - ServerStream.SendHeader() is called;
  1171. // - The first response is sent out;
  1172. // - An RPC status is sent out (error or success).
  1173. SetHeader(metadata.MD) error
  1174. // SendHeader sends the header metadata.
  1175. // The provided md and headers set by SetHeader() will be sent.
  1176. // It fails if called multiple times.
  1177. SendHeader(metadata.MD) error
  1178. // SetTrailer sets the trailer metadata which will be sent with the RPC status.
  1179. // When called more than once, all the provided metadata will be merged.
  1180. SetTrailer(metadata.MD)
  1181. // Context returns the context for this stream.
  1182. Context() context.Context
  1183. // SendMsg sends a message. On error, SendMsg aborts the stream and the
  1184. // error is returned directly.
  1185. //
  1186. // SendMsg blocks until:
  1187. // - There is sufficient flow control to schedule m with the transport, or
  1188. // - The stream is done, or
  1189. // - The stream breaks.
  1190. //
  1191. // SendMsg does not wait until the message is received by the client. An
  1192. // untimely stream closure may result in lost messages.
  1193. //
  1194. // It is safe to have a goroutine calling SendMsg and another goroutine
  1195. // calling RecvMsg on the same stream at the same time, but it is not safe
  1196. // to call SendMsg on the same stream in different goroutines.
  1197. SendMsg(m interface{}) error
  1198. // RecvMsg blocks until it receives a message into m or the stream is
  1199. // done. It returns io.EOF when the client has performed a CloseSend. On
  1200. // any non-EOF error, the stream is aborted and the error contains the
  1201. // RPC status.
  1202. //
  1203. // It is safe to have a goroutine calling SendMsg and another goroutine
  1204. // calling RecvMsg on the same stream at the same time, but it is not
  1205. // safe to call RecvMsg on the same stream in different goroutines.
  1206. RecvMsg(m interface{}) error
  1207. }
  1208. // serverStream implements a server side Stream.
  1209. type serverStream struct {
  1210. ctx context.Context
  1211. t transport.ServerTransport
  1212. s *transport.Stream
  1213. p *parser
  1214. codec baseCodec
  1215. cp Compressor
  1216. dc Decompressor
  1217. comp encoding.Compressor
  1218. decomp encoding.Compressor
  1219. maxReceiveMessageSize int
  1220. maxSendMessageSize int
  1221. trInfo *traceInfo
  1222. statsHandler stats.Handler
  1223. binlog *binarylog.MethodLogger
  1224. // serverHeaderBinlogged indicates whether server header has been logged. It
  1225. // will happen when one of the following two happens: stream.SendHeader(),
  1226. // stream.Send().
  1227. //
  1228. // It's only checked in send and sendHeader, doesn't need to be
  1229. // synchronized.
  1230. serverHeaderBinlogged bool
  1231. mu sync.Mutex // protects trInfo.tr after the service handler runs.
  1232. }
  1233. func (ss *serverStream) Context() context.Context {
  1234. return ss.ctx
  1235. }
  1236. func (ss *serverStream) SetHeader(md metadata.MD) error {
  1237. if md.Len() == 0 {
  1238. return nil
  1239. }
  1240. return ss.s.SetHeader(md)
  1241. }
  1242. func (ss *serverStream) SendHeader(md metadata.MD) error {
  1243. err := ss.t.WriteHeader(ss.s, md)
  1244. if ss.binlog != nil && !ss.serverHeaderBinlogged {
  1245. h, _ := ss.s.Header()
  1246. ss.binlog.Log(&binarylog.ServerHeader{
  1247. Header: h,
  1248. })
  1249. ss.serverHeaderBinlogged = true
  1250. }
  1251. return err
  1252. }
  1253. func (ss *serverStream) SetTrailer(md metadata.MD) {
  1254. if md.Len() == 0 {
  1255. return
  1256. }
  1257. ss.s.SetTrailer(md)
  1258. }
  1259. func (ss *serverStream) SendMsg(m interface{}) (err error) {
  1260. defer func() {
  1261. if ss.trInfo != nil {
  1262. ss.mu.Lock()
  1263. if ss.trInfo.tr != nil {
  1264. if err == nil {
  1265. ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
  1266. } else {
  1267. ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
  1268. ss.trInfo.tr.SetError()
  1269. }
  1270. }
  1271. ss.mu.Unlock()
  1272. }
  1273. if err != nil && err != io.EOF {
  1274. st, _ := status.FromError(toRPCErr(err))
  1275. ss.t.WriteStatus(ss.s, st)
  1276. // Non-user specified status was sent out. This should be an error
  1277. // case (as a server side Cancel maybe).
  1278. //
  1279. // This is not handled specifically now. User will return a final
  1280. // status from the service handler, we will log that error instead.
  1281. // This behavior is similar to an interceptor.
  1282. }
  1283. if channelz.IsOn() && err == nil {
  1284. ss.t.IncrMsgSent()
  1285. }
  1286. }()
  1287. data, err := encode(ss.codec, m)
  1288. if err != nil {
  1289. return err
  1290. }
  1291. compData, err := compress(data, ss.cp, ss.comp)
  1292. if err != nil {
  1293. return err
  1294. }
  1295. hdr, payload := msgHeader(data, compData)
  1296. // TODO(dfawley): should we be checking len(data) instead?
  1297. if len(payload) > ss.maxSendMessageSize {
  1298. return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize)
  1299. }
  1300. if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil {
  1301. return toRPCErr(err)
  1302. }
  1303. if ss.binlog != nil {
  1304. if !ss.serverHeaderBinlogged {
  1305. h, _ := ss.s.Header()
  1306. ss.binlog.Log(&binarylog.ServerHeader{
  1307. Header: h,
  1308. })
  1309. ss.serverHeaderBinlogged = true
  1310. }
  1311. ss.binlog.Log(&binarylog.ServerMessage{
  1312. Message: data,
  1313. })
  1314. }
  1315. if ss.statsHandler != nil {
  1316. ss.statsHandler.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now()))
  1317. }
  1318. return nil
  1319. }
  1320. func (ss *serverStream) RecvMsg(m interface{}) (err error) {
  1321. defer func() {
  1322. if ss.trInfo != nil {
  1323. ss.mu.Lock()
  1324. if ss.trInfo.tr != nil {
  1325. if err == nil {
  1326. ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
  1327. } else if err != io.EOF {
  1328. ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
  1329. ss.trInfo.tr.SetError()
  1330. }
  1331. }
  1332. ss.mu.Unlock()
  1333. }
  1334. if err != nil && err != io.EOF {
  1335. st, _ := status.FromError(toRPCErr(err))
  1336. ss.t.WriteStatus(ss.s, st)
  1337. // Non-user specified status was sent out. This should be an error
  1338. // case (as a server side Cancel maybe).
  1339. //
  1340. // This is not handled specifically now. User will return a final
  1341. // status from the service handler, we will log that error instead.
  1342. // This behavior is similar to an interceptor.
  1343. }
  1344. if channelz.IsOn() && err == nil {
  1345. ss.t.IncrMsgRecv()
  1346. }
  1347. }()
  1348. var payInfo *payloadInfo
  1349. if ss.statsHandler != nil || ss.binlog != nil {
  1350. payInfo = &payloadInfo{}
  1351. }
  1352. if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil {
  1353. if err == io.EOF {
  1354. if ss.binlog != nil {
  1355. ss.binlog.Log(&binarylog.ClientHalfClose{})
  1356. }
  1357. return err
  1358. }
  1359. if err == io.ErrUnexpectedEOF {
  1360. err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error())
  1361. }
  1362. return toRPCErr(err)
  1363. }
  1364. if ss.statsHandler != nil {
  1365. ss.statsHandler.HandleRPC(ss.s.Context(), &stats.InPayload{
  1366. RecvTime: time.Now(),
  1367. Payload: m,
  1368. // TODO truncate large payload.
  1369. Data: payInfo.uncompressedBytes,
  1370. Length: len(payInfo.uncompressedBytes),
  1371. })
  1372. }
  1373. if ss.binlog != nil {
  1374. ss.binlog.Log(&binarylog.ClientMessage{
  1375. Message: payInfo.uncompressedBytes,
  1376. })
  1377. }
  1378. return nil
  1379. }
  1380. // MethodFromServerStream returns the method string for the input stream.
  1381. // The returned string is in the format of "/service/method".
  1382. func MethodFromServerStream(stream ServerStream) (string, bool) {
  1383. return Method(stream.Context())
  1384. }