You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 

2228 rivejä
63 KiB

  1. /*
  2. *
  3. * Copyright 2014 gRPC authors.
  4. *
  5. * Licensed under the Apache License, Version 2.0 (the "License");
  6. * you may not use this file except in compliance with the License.
  7. * You may obtain a copy of the License at
  8. *
  9. * http://www.apache.org/licenses/LICENSE-2.0
  10. *
  11. * Unless required by applicable law or agreed to in writing, software
  12. * distributed under the License is distributed on an "AS IS" BASIS,
  13. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. * See the License for the specific language governing permissions and
  15. * limitations under the License.
  16. *
  17. */
  18. package transport
  19. import (
  20. "bytes"
  21. "context"
  22. "encoding/binary"
  23. "errors"
  24. "fmt"
  25. "io"
  26. "math"
  27. "net"
  28. "reflect"
  29. "runtime"
  30. "strconv"
  31. "strings"
  32. "sync"
  33. "testing"
  34. "time"
  35. "golang.org/x/net/http2"
  36. "golang.org/x/net/http2/hpack"
  37. "google.golang.org/grpc/codes"
  38. "google.golang.org/grpc/internal/leakcheck"
  39. "google.golang.org/grpc/internal/syscall"
  40. "google.golang.org/grpc/keepalive"
  41. "google.golang.org/grpc/status"
  42. )
  43. type server struct {
  44. lis net.Listener
  45. port string
  46. startedErr chan error // error (or nil) with server start value
  47. mu sync.Mutex
  48. conns map[ServerTransport]bool
  49. h *testStreamHandler
  50. ready chan struct{}
  51. }
  52. var (
  53. expectedRequest = []byte("ping")
  54. expectedResponse = []byte("pong")
  55. expectedRequestLarge = make([]byte, initialWindowSize*2)
  56. expectedResponseLarge = make([]byte, initialWindowSize*2)
  57. expectedInvalidHeaderField = "invalid/content-type"
  58. )
  59. func init() {
  60. expectedRequestLarge[0] = 'g'
  61. expectedRequestLarge[len(expectedRequestLarge)-1] = 'r'
  62. expectedResponseLarge[0] = 'p'
  63. expectedResponseLarge[len(expectedResponseLarge)-1] = 'c'
  64. }
  65. type testStreamHandler struct {
  66. t *http2Server
  67. notify chan struct{}
  68. getNotified chan struct{}
  69. }
  70. type hType int
  71. const (
  72. normal hType = iota
  73. suspended
  74. notifyCall
  75. misbehaved
  76. encodingRequiredStatus
  77. invalidHeaderField
  78. delayRead
  79. pingpong
  80. )
  81. func (h *testStreamHandler) handleStreamAndNotify(s *Stream) {
  82. if h.notify == nil {
  83. return
  84. }
  85. go func() {
  86. select {
  87. case <-h.notify:
  88. default:
  89. close(h.notify)
  90. }
  91. }()
  92. }
  93. func (h *testStreamHandler) handleStream(t *testing.T, s *Stream) {
  94. req := expectedRequest
  95. resp := expectedResponse
  96. if s.Method() == "foo.Large" {
  97. req = expectedRequestLarge
  98. resp = expectedResponseLarge
  99. }
  100. p := make([]byte, len(req))
  101. _, err := s.Read(p)
  102. if err != nil {
  103. return
  104. }
  105. if !bytes.Equal(p, req) {
  106. t.Errorf("handleStream got %v, want %v", p, req)
  107. h.t.WriteStatus(s, status.New(codes.Internal, "panic"))
  108. return
  109. }
  110. // send a response back to the client.
  111. h.t.Write(s, nil, resp, &Options{})
  112. // send the trailer to end the stream.
  113. h.t.WriteStatus(s, status.New(codes.OK, ""))
  114. }
  115. func (h *testStreamHandler) handleStreamPingPong(t *testing.T, s *Stream) {
  116. header := make([]byte, 5)
  117. for {
  118. if _, err := s.Read(header); err != nil {
  119. if err == io.EOF {
  120. h.t.WriteStatus(s, status.New(codes.OK, ""))
  121. return
  122. }
  123. t.Errorf("Error on server while reading data header: %v", err)
  124. h.t.WriteStatus(s, status.New(codes.Internal, "panic"))
  125. return
  126. }
  127. sz := binary.BigEndian.Uint32(header[1:])
  128. msg := make([]byte, int(sz))
  129. if _, err := s.Read(msg); err != nil {
  130. t.Errorf("Error on server while reading message: %v", err)
  131. h.t.WriteStatus(s, status.New(codes.Internal, "panic"))
  132. return
  133. }
  134. buf := make([]byte, sz+5)
  135. buf[0] = byte(0)
  136. binary.BigEndian.PutUint32(buf[1:], uint32(sz))
  137. copy(buf[5:], msg)
  138. h.t.Write(s, nil, buf, &Options{})
  139. }
  140. }
  141. func (h *testStreamHandler) handleStreamMisbehave(t *testing.T, s *Stream) {
  142. conn, ok := s.st.(*http2Server)
  143. if !ok {
  144. t.Errorf("Failed to convert %v to *http2Server", s.st)
  145. h.t.WriteStatus(s, status.New(codes.Internal, ""))
  146. return
  147. }
  148. var sent int
  149. p := make([]byte, http2MaxFrameLen)
  150. for sent < initialWindowSize {
  151. n := initialWindowSize - sent
  152. // The last message may be smaller than http2MaxFrameLen
  153. if n <= http2MaxFrameLen {
  154. if s.Method() == "foo.Connection" {
  155. // Violate connection level flow control window of client but do not
  156. // violate any stream level windows.
  157. p = make([]byte, n)
  158. } else {
  159. // Violate stream level flow control window of client.
  160. p = make([]byte, n+1)
  161. }
  162. }
  163. conn.controlBuf.put(&dataFrame{
  164. streamID: s.id,
  165. h: nil,
  166. d: p,
  167. onEachWrite: func() {},
  168. })
  169. sent += len(p)
  170. }
  171. }
  172. func (h *testStreamHandler) handleStreamEncodingRequiredStatus(t *testing.T, s *Stream) {
  173. // raw newline is not accepted by http2 framer so it must be encoded.
  174. h.t.WriteStatus(s, encodingTestStatus)
  175. }
  176. func (h *testStreamHandler) handleStreamInvalidHeaderField(t *testing.T, s *Stream) {
  177. headerFields := []hpack.HeaderField{}
  178. headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: expectedInvalidHeaderField})
  179. h.t.controlBuf.put(&headerFrame{
  180. streamID: s.id,
  181. hf: headerFields,
  182. endStream: false,
  183. })
  184. }
  185. // handleStreamDelayRead delays reads so that the other side has to halt on
  186. // stream-level flow control.
  187. // This handler assumes dynamic flow control is turned off and assumes window
  188. // sizes to be set to defaultWindowSize.
  189. func (h *testStreamHandler) handleStreamDelayRead(t *testing.T, s *Stream) {
  190. req := expectedRequest
  191. resp := expectedResponse
  192. if s.Method() == "foo.Large" {
  193. req = expectedRequestLarge
  194. resp = expectedResponseLarge
  195. }
  196. var (
  197. mu sync.Mutex
  198. total int
  199. )
  200. s.wq.replenish = func(n int) {
  201. mu.Lock()
  202. total += n
  203. mu.Unlock()
  204. s.wq.realReplenish(n)
  205. }
  206. getTotal := func() int {
  207. mu.Lock()
  208. defer mu.Unlock()
  209. return total
  210. }
  211. done := make(chan struct{})
  212. defer close(done)
  213. go func() {
  214. for {
  215. select {
  216. // Prevent goroutine from leaking.
  217. case <-done:
  218. return
  219. default:
  220. }
  221. if getTotal() == defaultWindowSize {
  222. // Signal the client to start reading and
  223. // thereby send window update.
  224. close(h.notify)
  225. return
  226. }
  227. runtime.Gosched()
  228. }
  229. }()
  230. p := make([]byte, len(req))
  231. // Let the other side run out of stream-level window before
  232. // starting to read and thereby sending a window update.
  233. timer := time.NewTimer(time.Second * 10)
  234. select {
  235. case <-h.getNotified:
  236. timer.Stop()
  237. case <-timer.C:
  238. t.Errorf("Server timed-out.")
  239. return
  240. }
  241. _, err := s.Read(p)
  242. if err != nil {
  243. t.Errorf("s.Read(_) = _, %v, want _, <nil>", err)
  244. return
  245. }
  246. if !bytes.Equal(p, req) {
  247. t.Errorf("handleStream got %v, want %v", p, req)
  248. return
  249. }
  250. // This write will cause server to run out of stream level,
  251. // flow control and the other side won't send a window update
  252. // until that happens.
  253. if err := h.t.Write(s, nil, resp, &Options{}); err != nil {
  254. t.Errorf("server Write got %v, want <nil>", err)
  255. return
  256. }
  257. // Read one more time to ensure that everything remains fine and
  258. // that the goroutine, that we launched earlier to signal client
  259. // to read, gets enough time to process.
  260. _, err = s.Read(p)
  261. if err != nil {
  262. t.Errorf("s.Read(_) = _, %v, want _, nil", err)
  263. return
  264. }
  265. // send the trailer to end the stream.
  266. if err := h.t.WriteStatus(s, status.New(codes.OK, "")); err != nil {
  267. t.Errorf("server WriteStatus got %v, want <nil>", err)
  268. return
  269. }
  270. }
  271. // start starts server. Other goroutines should block on s.readyChan for further operations.
  272. func (s *server) start(t *testing.T, port int, serverConfig *ServerConfig, ht hType) {
  273. var err error
  274. if port == 0 {
  275. s.lis, err = net.Listen("tcp", "localhost:0")
  276. } else {
  277. s.lis, err = net.Listen("tcp", "localhost:"+strconv.Itoa(port))
  278. }
  279. if err != nil {
  280. s.startedErr <- fmt.Errorf("failed to listen: %v", err)
  281. return
  282. }
  283. _, p, err := net.SplitHostPort(s.lis.Addr().String())
  284. if err != nil {
  285. s.startedErr <- fmt.Errorf("failed to parse listener address: %v", err)
  286. return
  287. }
  288. s.port = p
  289. s.conns = make(map[ServerTransport]bool)
  290. s.startedErr <- nil
  291. for {
  292. conn, err := s.lis.Accept()
  293. if err != nil {
  294. return
  295. }
  296. transport, err := NewServerTransport("http2", conn, serverConfig)
  297. if err != nil {
  298. return
  299. }
  300. s.mu.Lock()
  301. if s.conns == nil {
  302. s.mu.Unlock()
  303. transport.Close()
  304. return
  305. }
  306. s.conns[transport] = true
  307. h := &testStreamHandler{t: transport.(*http2Server)}
  308. s.h = h
  309. s.mu.Unlock()
  310. switch ht {
  311. case notifyCall:
  312. go transport.HandleStreams(h.handleStreamAndNotify,
  313. func(ctx context.Context, _ string) context.Context {
  314. return ctx
  315. })
  316. case suspended:
  317. go transport.HandleStreams(func(*Stream) {}, // Do nothing to handle the stream.
  318. func(ctx context.Context, method string) context.Context {
  319. return ctx
  320. })
  321. case misbehaved:
  322. go transport.HandleStreams(func(s *Stream) {
  323. go h.handleStreamMisbehave(t, s)
  324. }, func(ctx context.Context, method string) context.Context {
  325. return ctx
  326. })
  327. case encodingRequiredStatus:
  328. go transport.HandleStreams(func(s *Stream) {
  329. go h.handleStreamEncodingRequiredStatus(t, s)
  330. }, func(ctx context.Context, method string) context.Context {
  331. return ctx
  332. })
  333. case invalidHeaderField:
  334. go transport.HandleStreams(func(s *Stream) {
  335. go h.handleStreamInvalidHeaderField(t, s)
  336. }, func(ctx context.Context, method string) context.Context {
  337. return ctx
  338. })
  339. case delayRead:
  340. h.notify = make(chan struct{})
  341. h.getNotified = make(chan struct{})
  342. s.mu.Lock()
  343. close(s.ready)
  344. s.mu.Unlock()
  345. go transport.HandleStreams(func(s *Stream) {
  346. go h.handleStreamDelayRead(t, s)
  347. }, func(ctx context.Context, method string) context.Context {
  348. return ctx
  349. })
  350. case pingpong:
  351. go transport.HandleStreams(func(s *Stream) {
  352. go h.handleStreamPingPong(t, s)
  353. }, func(ctx context.Context, method string) context.Context {
  354. return ctx
  355. })
  356. default:
  357. go transport.HandleStreams(func(s *Stream) {
  358. go h.handleStream(t, s)
  359. }, func(ctx context.Context, method string) context.Context {
  360. return ctx
  361. })
  362. }
  363. }
  364. }
  365. func (s *server) wait(t *testing.T, timeout time.Duration) {
  366. select {
  367. case err := <-s.startedErr:
  368. if err != nil {
  369. t.Fatal(err)
  370. }
  371. case <-time.After(timeout):
  372. t.Fatalf("Timed out after %v waiting for server to be ready", timeout)
  373. }
  374. }
  375. func (s *server) stop() {
  376. s.lis.Close()
  377. s.mu.Lock()
  378. for c := range s.conns {
  379. c.Close()
  380. }
  381. s.conns = nil
  382. s.mu.Unlock()
  383. }
  384. func setUpServerOnly(t *testing.T, port int, serverConfig *ServerConfig, ht hType) *server {
  385. server := &server{startedErr: make(chan error, 1), ready: make(chan struct{})}
  386. go server.start(t, port, serverConfig, ht)
  387. server.wait(t, 2*time.Second)
  388. return server
  389. }
  390. func setUp(t *testing.T, port int, maxStreams uint32, ht hType) (*server, *http2Client, func()) {
  391. return setUpWithOptions(t, port, &ServerConfig{MaxStreams: maxStreams}, ht, ConnectOptions{})
  392. }
  393. func setUpWithOptions(t *testing.T, port int, serverConfig *ServerConfig, ht hType, copts ConnectOptions) (*server, *http2Client, func()) {
  394. server := setUpServerOnly(t, port, serverConfig, ht)
  395. addr := "localhost:" + server.port
  396. target := TargetInfo{
  397. Addr: addr,
  398. }
  399. connectCtx, cancel := context.WithDeadline(context.Background(), time.Now().Add(2*time.Second))
  400. ct, connErr := NewClientTransport(connectCtx, context.Background(), target, copts, func() {}, func(GoAwayReason) {}, func() {})
  401. if connErr != nil {
  402. cancel() // Do not cancel in success path.
  403. t.Fatalf("failed to create transport: %v", connErr)
  404. }
  405. return server, ct.(*http2Client), cancel
  406. }
  407. func setUpWithNoPingServer(t *testing.T, copts ConnectOptions, done chan net.Conn) (*http2Client, func()) {
  408. lis, err := net.Listen("tcp", "localhost:0")
  409. if err != nil {
  410. t.Fatalf("Failed to listen: %v", err)
  411. }
  412. // Launch a non responsive server.
  413. go func() {
  414. defer lis.Close()
  415. conn, err := lis.Accept()
  416. if err != nil {
  417. t.Errorf("Error at server-side while accepting: %v", err)
  418. close(done)
  419. return
  420. }
  421. done <- conn
  422. }()
  423. connectCtx, cancel := context.WithDeadline(context.Background(), time.Now().Add(2*time.Second))
  424. tr, err := NewClientTransport(connectCtx, context.Background(), TargetInfo{Addr: lis.Addr().String()}, copts, func() {}, func(GoAwayReason) {}, func() {})
  425. if err != nil {
  426. cancel() // Do not cancel in success path.
  427. // Server clean-up.
  428. lis.Close()
  429. if conn, ok := <-done; ok {
  430. conn.Close()
  431. }
  432. t.Fatalf("Failed to dial: %v", err)
  433. }
  434. return tr.(*http2Client), cancel
  435. }
  436. // TestInflightStreamClosing ensures that closing in-flight stream
  437. // sends status error to concurrent stream reader.
  438. func TestInflightStreamClosing(t *testing.T) {
  439. serverConfig := &ServerConfig{}
  440. server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{})
  441. defer cancel()
  442. defer server.stop()
  443. defer client.Close()
  444. stream, err := client.NewStream(context.Background(), &CallHdr{})
  445. if err != nil {
  446. t.Fatalf("Client failed to create RPC request: %v", err)
  447. }
  448. donec := make(chan struct{})
  449. serr := status.Error(codes.Internal, "client connection is closing")
  450. go func() {
  451. defer close(donec)
  452. if _, err := stream.Read(make([]byte, defaultWindowSize)); err != serr {
  453. t.Errorf("unexpected Stream error %v, expected %v", err, serr)
  454. }
  455. }()
  456. // should unblock concurrent stream.Read
  457. client.CloseStream(stream, serr)
  458. // wait for stream.Read error
  459. timeout := time.NewTimer(5 * time.Second)
  460. select {
  461. case <-donec:
  462. if !timeout.Stop() {
  463. <-timeout.C
  464. }
  465. case <-timeout.C:
  466. t.Fatalf("Test timed out, expected a status error.")
  467. }
  468. }
  469. // TestMaxConnectionIdle tests that a server will send GoAway to a idle client.
  470. // An idle client is one who doesn't make any RPC calls for a duration of
  471. // MaxConnectionIdle time.
  472. func TestMaxConnectionIdle(t *testing.T) {
  473. serverConfig := &ServerConfig{
  474. KeepaliveParams: keepalive.ServerParameters{
  475. MaxConnectionIdle: 2 * time.Second,
  476. },
  477. }
  478. server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{})
  479. defer cancel()
  480. defer server.stop()
  481. defer client.Close()
  482. stream, err := client.NewStream(context.Background(), &CallHdr{})
  483. if err != nil {
  484. t.Fatalf("Client failed to create RPC request: %v", err)
  485. }
  486. client.closeStream(stream, io.EOF, true, http2.ErrCodeCancel, nil, nil, false)
  487. // wait for server to see that closed stream and max-age logic to send goaway after no new RPCs are mode
  488. timeout := time.NewTimer(time.Second * 4)
  489. select {
  490. case <-client.GoAway():
  491. if !timeout.Stop() {
  492. <-timeout.C
  493. }
  494. case <-timeout.C:
  495. t.Fatalf("Test timed out, expected a GoAway from the server.")
  496. }
  497. }
  498. // TestMaxConenctionIdleNegative tests that a server will not send GoAway to a non-idle(busy) client.
  499. func TestMaxConnectionIdleNegative(t *testing.T) {
  500. serverConfig := &ServerConfig{
  501. KeepaliveParams: keepalive.ServerParameters{
  502. MaxConnectionIdle: 2 * time.Second,
  503. },
  504. }
  505. server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{})
  506. defer cancel()
  507. defer server.stop()
  508. defer client.Close()
  509. _, err := client.NewStream(context.Background(), &CallHdr{})
  510. if err != nil {
  511. t.Fatalf("Client failed to create RPC request: %v", err)
  512. }
  513. timeout := time.NewTimer(time.Second * 4)
  514. select {
  515. case <-client.GoAway():
  516. if !timeout.Stop() {
  517. <-timeout.C
  518. }
  519. t.Fatalf("A non-idle client received a GoAway.")
  520. case <-timeout.C:
  521. }
  522. }
  523. // TestMaxConnectionAge tests that a server will send GoAway after a duration of MaxConnectionAge.
  524. func TestMaxConnectionAge(t *testing.T) {
  525. serverConfig := &ServerConfig{
  526. KeepaliveParams: keepalive.ServerParameters{
  527. MaxConnectionAge: 2 * time.Second,
  528. },
  529. }
  530. server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{})
  531. defer cancel()
  532. defer server.stop()
  533. defer client.Close()
  534. _, err := client.NewStream(context.Background(), &CallHdr{})
  535. if err != nil {
  536. t.Fatalf("Client failed to create stream: %v", err)
  537. }
  538. // Wait for max-age logic to send GoAway.
  539. timeout := time.NewTimer(4 * time.Second)
  540. select {
  541. case <-client.GoAway():
  542. if !timeout.Stop() {
  543. <-timeout.C
  544. }
  545. case <-timeout.C:
  546. t.Fatalf("Test timer out, expected a GoAway from the server.")
  547. }
  548. }
  549. const (
  550. defaultWriteBufSize = 32 * 1024
  551. defaultReadBufSize = 32 * 1024
  552. )
  553. // TestKeepaliveServer tests that a server closes connection with a client that doesn't respond to keepalive pings.
  554. func TestKeepaliveServer(t *testing.T) {
  555. serverConfig := &ServerConfig{
  556. KeepaliveParams: keepalive.ServerParameters{
  557. Time: 2 * time.Second,
  558. Timeout: 1 * time.Second,
  559. },
  560. }
  561. server, c, cancel := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{})
  562. defer cancel()
  563. defer server.stop()
  564. defer c.Close()
  565. client, err := net.Dial("tcp", server.lis.Addr().String())
  566. if err != nil {
  567. t.Fatalf("Failed to dial: %v", err)
  568. }
  569. defer client.Close()
  570. // Set read deadline on client conn so that it doesn't block forever in errorsome cases.
  571. client.SetDeadline(time.Now().Add(10 * time.Second))
  572. if n, err := client.Write(clientPreface); err != nil || n != len(clientPreface) {
  573. t.Fatalf("Error writing client preface; n=%v, err=%v", n, err)
  574. }
  575. framer := newFramer(client, defaultWriteBufSize, defaultReadBufSize, 0)
  576. if err := framer.fr.WriteSettings(http2.Setting{}); err != nil {
  577. t.Fatal("Error writing settings frame:", err)
  578. }
  579. framer.writer.Flush()
  580. // Wait for keepalive logic to close the connection.
  581. time.Sleep(4 * time.Second)
  582. b := make([]byte, 24)
  583. for {
  584. _, err = client.Read(b)
  585. if err == nil {
  586. continue
  587. }
  588. if err != io.EOF {
  589. t.Fatalf("client.Read(_) = _,%v, want io.EOF", err)
  590. }
  591. break
  592. }
  593. }
  594. // TestKeepaliveServerNegative tests that a server doesn't close connection with a client that responds to keepalive pings.
  595. func TestKeepaliveServerNegative(t *testing.T) {
  596. serverConfig := &ServerConfig{
  597. KeepaliveParams: keepalive.ServerParameters{
  598. Time: 2 * time.Second,
  599. Timeout: 1 * time.Second,
  600. },
  601. }
  602. server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{})
  603. defer cancel()
  604. defer server.stop()
  605. defer client.Close()
  606. // Give keepalive logic some time by sleeping.
  607. time.Sleep(4 * time.Second)
  608. // Assert that client is still active.
  609. client.mu.Lock()
  610. defer client.mu.Unlock()
  611. if client.state != reachable {
  612. t.Fatalf("Test failed: Expected server-client connection to be healthy.")
  613. }
  614. }
  615. func TestKeepaliveClientClosesIdleTransport(t *testing.T) {
  616. done := make(chan net.Conn, 1)
  617. tr, cancel := setUpWithNoPingServer(t, ConnectOptions{KeepaliveParams: keepalive.ClientParameters{
  618. Time: 2 * time.Second, // Keepalive time = 2 sec.
  619. Timeout: 1 * time.Second, // Keepalive timeout = 1 sec.
  620. PermitWithoutStream: true, // Run keepalive even with no RPCs.
  621. }}, done)
  622. defer cancel()
  623. defer tr.Close()
  624. conn, ok := <-done
  625. if !ok {
  626. t.Fatalf("Server didn't return connection object")
  627. }
  628. defer conn.Close()
  629. // Sleep for keepalive to close the connection.
  630. time.Sleep(4 * time.Second)
  631. // Assert that the connection was closed.
  632. tr.mu.Lock()
  633. defer tr.mu.Unlock()
  634. if tr.state == reachable {
  635. t.Fatalf("Test Failed: Expected client transport to have closed.")
  636. }
  637. }
  638. func TestKeepaliveClientStaysHealthyOnIdleTransport(t *testing.T) {
  639. done := make(chan net.Conn, 1)
  640. tr, cancel := setUpWithNoPingServer(t, ConnectOptions{KeepaliveParams: keepalive.ClientParameters{
  641. Time: 2 * time.Second, // Keepalive time = 2 sec.
  642. Timeout: 1 * time.Second, // Keepalive timeout = 1 sec.
  643. }}, done)
  644. defer cancel()
  645. defer tr.Close()
  646. conn, ok := <-done
  647. if !ok {
  648. t.Fatalf("server didn't reutrn connection object")
  649. }
  650. defer conn.Close()
  651. // Give keepalive some time.
  652. time.Sleep(4 * time.Second)
  653. // Assert that connections is still healthy.
  654. tr.mu.Lock()
  655. defer tr.mu.Unlock()
  656. if tr.state != reachable {
  657. t.Fatalf("Test failed: Expected client transport to be healthy.")
  658. }
  659. }
  660. func TestKeepaliveClientClosesWithActiveStreams(t *testing.T) {
  661. done := make(chan net.Conn, 1)
  662. tr, cancel := setUpWithNoPingServer(t, ConnectOptions{KeepaliveParams: keepalive.ClientParameters{
  663. Time: 2 * time.Second, // Keepalive time = 2 sec.
  664. Timeout: 1 * time.Second, // Keepalive timeout = 1 sec.
  665. }}, done)
  666. defer cancel()
  667. defer tr.Close()
  668. conn, ok := <-done
  669. if !ok {
  670. t.Fatalf("Server didn't return connection object")
  671. }
  672. defer conn.Close()
  673. // Create a stream.
  674. _, err := tr.NewStream(context.Background(), &CallHdr{})
  675. if err != nil {
  676. t.Fatalf("Failed to create a new stream: %v", err)
  677. }
  678. // Give keepalive some time.
  679. time.Sleep(4 * time.Second)
  680. // Assert that transport was closed.
  681. tr.mu.Lock()
  682. defer tr.mu.Unlock()
  683. if tr.state == reachable {
  684. t.Fatalf("Test failed: Expected client transport to have closed.")
  685. }
  686. }
  687. func TestKeepaliveClientStaysHealthyWithResponsiveServer(t *testing.T) {
  688. s, tr, cancel := setUpWithOptions(t, 0, &ServerConfig{MaxStreams: math.MaxUint32}, normal, ConnectOptions{KeepaliveParams: keepalive.ClientParameters{
  689. Time: 2 * time.Second, // Keepalive time = 2 sec.
  690. Timeout: 1 * time.Second, // Keepalive timeout = 1 sec.
  691. PermitWithoutStream: true, // Run keepalive even with no RPCs.
  692. }})
  693. defer cancel()
  694. defer s.stop()
  695. defer tr.Close()
  696. // Give keep alive some time.
  697. time.Sleep(4 * time.Second)
  698. // Assert that transport is healthy.
  699. tr.mu.Lock()
  700. defer tr.mu.Unlock()
  701. if tr.state != reachable {
  702. t.Fatalf("Test failed: Expected client transport to be healthy.")
  703. }
  704. }
  705. func TestKeepaliveServerEnforcementWithAbusiveClientNoRPC(t *testing.T) {
  706. serverConfig := &ServerConfig{
  707. KeepalivePolicy: keepalive.EnforcementPolicy{
  708. MinTime: 2 * time.Second,
  709. },
  710. }
  711. clientOptions := ConnectOptions{
  712. KeepaliveParams: keepalive.ClientParameters{
  713. Time: 50 * time.Millisecond,
  714. Timeout: 1 * time.Second,
  715. PermitWithoutStream: true,
  716. },
  717. }
  718. server, client, cancel := setUpWithOptions(t, 0, serverConfig, normal, clientOptions)
  719. defer cancel()
  720. defer server.stop()
  721. defer client.Close()
  722. timeout := time.NewTimer(10 * time.Second)
  723. select {
  724. case <-client.GoAway():
  725. if !timeout.Stop() {
  726. <-timeout.C
  727. }
  728. case <-timeout.C:
  729. t.Fatalf("Test failed: Expected a GoAway from server.")
  730. }
  731. time.Sleep(500 * time.Millisecond)
  732. client.mu.Lock()
  733. defer client.mu.Unlock()
  734. if client.state == reachable {
  735. t.Fatalf("Test failed: Expected the connection to be closed.")
  736. }
  737. }
  738. func TestKeepaliveServerEnforcementWithAbusiveClientWithRPC(t *testing.T) {
  739. serverConfig := &ServerConfig{
  740. KeepalivePolicy: keepalive.EnforcementPolicy{
  741. MinTime: 2 * time.Second,
  742. },
  743. }
  744. clientOptions := ConnectOptions{
  745. KeepaliveParams: keepalive.ClientParameters{
  746. Time: 50 * time.Millisecond,
  747. Timeout: 1 * time.Second,
  748. },
  749. }
  750. server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, clientOptions)
  751. defer cancel()
  752. defer server.stop()
  753. defer client.Close()
  754. if _, err := client.NewStream(context.Background(), &CallHdr{}); err != nil {
  755. t.Fatalf("Client failed to create stream.")
  756. }
  757. timeout := time.NewTimer(10 * time.Second)
  758. select {
  759. case <-client.GoAway():
  760. if !timeout.Stop() {
  761. <-timeout.C
  762. }
  763. case <-timeout.C:
  764. t.Fatalf("Test failed: Expected a GoAway from server.")
  765. }
  766. time.Sleep(500 * time.Millisecond)
  767. client.mu.Lock()
  768. defer client.mu.Unlock()
  769. if client.state == reachable {
  770. t.Fatalf("Test failed: Expected the connection to be closed.")
  771. }
  772. }
  773. func TestKeepaliveServerEnforcementWithObeyingClientNoRPC(t *testing.T) {
  774. serverConfig := &ServerConfig{
  775. KeepalivePolicy: keepalive.EnforcementPolicy{
  776. MinTime: 100 * time.Millisecond,
  777. PermitWithoutStream: true,
  778. },
  779. }
  780. clientOptions := ConnectOptions{
  781. KeepaliveParams: keepalive.ClientParameters{
  782. Time: 101 * time.Millisecond,
  783. Timeout: 1 * time.Second,
  784. PermitWithoutStream: true,
  785. },
  786. }
  787. server, client, cancel := setUpWithOptions(t, 0, serverConfig, normal, clientOptions)
  788. defer cancel()
  789. defer server.stop()
  790. defer client.Close()
  791. // Give keepalive enough time.
  792. time.Sleep(3 * time.Second)
  793. // Assert that connection is healthy.
  794. client.mu.Lock()
  795. defer client.mu.Unlock()
  796. if client.state != reachable {
  797. t.Fatalf("Test failed: Expected connection to be healthy.")
  798. }
  799. }
  800. func TestKeepaliveServerEnforcementWithObeyingClientWithRPC(t *testing.T) {
  801. serverConfig := &ServerConfig{
  802. KeepalivePolicy: keepalive.EnforcementPolicy{
  803. MinTime: 100 * time.Millisecond,
  804. },
  805. }
  806. clientOptions := ConnectOptions{
  807. KeepaliveParams: keepalive.ClientParameters{
  808. Time: 101 * time.Millisecond,
  809. Timeout: 1 * time.Second,
  810. },
  811. }
  812. server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, clientOptions)
  813. defer cancel()
  814. defer server.stop()
  815. defer client.Close()
  816. if _, err := client.NewStream(context.Background(), &CallHdr{}); err != nil {
  817. t.Fatalf("Client failed to create stream.")
  818. }
  819. // Give keepalive enough time.
  820. time.Sleep(3 * time.Second)
  821. // Assert that connection is healthy.
  822. client.mu.Lock()
  823. defer client.mu.Unlock()
  824. if client.state != reachable {
  825. t.Fatalf("Test failed: Expected connection to be healthy.")
  826. }
  827. }
  828. func TestClientSendAndReceive(t *testing.T) {
  829. server, ct, cancel := setUp(t, 0, math.MaxUint32, normal)
  830. defer cancel()
  831. callHdr := &CallHdr{
  832. Host: "localhost",
  833. Method: "foo.Small",
  834. }
  835. s1, err1 := ct.NewStream(context.Background(), callHdr)
  836. if err1 != nil {
  837. t.Fatalf("failed to open stream: %v", err1)
  838. }
  839. if s1.id != 1 {
  840. t.Fatalf("wrong stream id: %d", s1.id)
  841. }
  842. s2, err2 := ct.NewStream(context.Background(), callHdr)
  843. if err2 != nil {
  844. t.Fatalf("failed to open stream: %v", err2)
  845. }
  846. if s2.id != 3 {
  847. t.Fatalf("wrong stream id: %d", s2.id)
  848. }
  849. opts := Options{Last: true}
  850. if err := ct.Write(s1, nil, expectedRequest, &opts); err != nil && err != io.EOF {
  851. t.Fatalf("failed to send data: %v", err)
  852. }
  853. p := make([]byte, len(expectedResponse))
  854. _, recvErr := s1.Read(p)
  855. if recvErr != nil || !bytes.Equal(p, expectedResponse) {
  856. t.Fatalf("Error: %v, want <nil>; Result: %v, want %v", recvErr, p, expectedResponse)
  857. }
  858. _, recvErr = s1.Read(p)
  859. if recvErr != io.EOF {
  860. t.Fatalf("Error: %v; want <EOF>", recvErr)
  861. }
  862. ct.Close()
  863. server.stop()
  864. }
  865. func TestClientErrorNotify(t *testing.T) {
  866. server, ct, cancel := setUp(t, 0, math.MaxUint32, normal)
  867. defer cancel()
  868. go server.stop()
  869. // ct.reader should detect the error and activate ct.Error().
  870. <-ct.Error()
  871. ct.Close()
  872. }
  873. func performOneRPC(ct ClientTransport) {
  874. callHdr := &CallHdr{
  875. Host: "localhost",
  876. Method: "foo.Small",
  877. }
  878. s, err := ct.NewStream(context.Background(), callHdr)
  879. if err != nil {
  880. return
  881. }
  882. opts := Options{Last: true}
  883. if err := ct.Write(s, []byte{}, expectedRequest, &opts); err == nil || err == io.EOF {
  884. time.Sleep(5 * time.Millisecond)
  885. // The following s.Recv()'s could error out because the
  886. // underlying transport is gone.
  887. //
  888. // Read response
  889. p := make([]byte, len(expectedResponse))
  890. s.Read(p)
  891. // Read io.EOF
  892. s.Read(p)
  893. }
  894. }
  895. func TestClientMix(t *testing.T) {
  896. s, ct, cancel := setUp(t, 0, math.MaxUint32, normal)
  897. defer cancel()
  898. go func(s *server) {
  899. time.Sleep(5 * time.Second)
  900. s.stop()
  901. }(s)
  902. go func(ct ClientTransport) {
  903. <-ct.Error()
  904. ct.Close()
  905. }(ct)
  906. for i := 0; i < 1000; i++ {
  907. time.Sleep(10 * time.Millisecond)
  908. go performOneRPC(ct)
  909. }
  910. }
  911. func TestLargeMessage(t *testing.T) {
  912. server, ct, cancel := setUp(t, 0, math.MaxUint32, normal)
  913. defer cancel()
  914. callHdr := &CallHdr{
  915. Host: "localhost",
  916. Method: "foo.Large",
  917. }
  918. var wg sync.WaitGroup
  919. for i := 0; i < 2; i++ {
  920. wg.Add(1)
  921. go func() {
  922. defer wg.Done()
  923. s, err := ct.NewStream(context.Background(), callHdr)
  924. if err != nil {
  925. t.Errorf("%v.NewStream(_, _) = _, %v, want _, <nil>", ct, err)
  926. }
  927. if err := ct.Write(s, []byte{}, expectedRequestLarge, &Options{Last: true}); err != nil && err != io.EOF {
  928. t.Errorf("%v.Write(_, _, _) = %v, want <nil>", ct, err)
  929. }
  930. p := make([]byte, len(expectedResponseLarge))
  931. if _, err := s.Read(p); err != nil || !bytes.Equal(p, expectedResponseLarge) {
  932. t.Errorf("s.Read(%v) = _, %v, want %v, <nil>", err, p, expectedResponse)
  933. }
  934. if _, err = s.Read(p); err != io.EOF {
  935. t.Errorf("Failed to complete the stream %v; want <EOF>", err)
  936. }
  937. }()
  938. }
  939. wg.Wait()
  940. ct.Close()
  941. server.stop()
  942. }
  943. func TestLargeMessageWithDelayRead(t *testing.T) {
  944. // Disable dynamic flow control.
  945. sc := &ServerConfig{
  946. InitialWindowSize: defaultWindowSize,
  947. InitialConnWindowSize: defaultWindowSize,
  948. }
  949. co := ConnectOptions{
  950. InitialWindowSize: defaultWindowSize,
  951. InitialConnWindowSize: defaultWindowSize,
  952. }
  953. server, ct, cancel := setUpWithOptions(t, 0, sc, delayRead, co)
  954. defer cancel()
  955. defer server.stop()
  956. defer ct.Close()
  957. server.mu.Lock()
  958. ready := server.ready
  959. server.mu.Unlock()
  960. callHdr := &CallHdr{
  961. Host: "localhost",
  962. Method: "foo.Large",
  963. }
  964. ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Second*10))
  965. defer cancel()
  966. s, err := ct.NewStream(ctx, callHdr)
  967. if err != nil {
  968. t.Fatalf("%v.NewStream(_, _) = _, %v, want _, <nil>", ct, err)
  969. return
  970. }
  971. // Wait for server's handerler to be initialized
  972. select {
  973. case <-ready:
  974. case <-ctx.Done():
  975. t.Fatalf("Client timed out waiting for server handler to be initialized.")
  976. }
  977. server.mu.Lock()
  978. serviceHandler := server.h
  979. server.mu.Unlock()
  980. var (
  981. mu sync.Mutex
  982. total int
  983. )
  984. s.wq.replenish = func(n int) {
  985. mu.Lock()
  986. total += n
  987. mu.Unlock()
  988. s.wq.realReplenish(n)
  989. }
  990. getTotal := func() int {
  991. mu.Lock()
  992. defer mu.Unlock()
  993. return total
  994. }
  995. done := make(chan struct{})
  996. defer close(done)
  997. go func() {
  998. for {
  999. select {
  1000. // Prevent goroutine from leaking in case of error.
  1001. case <-done:
  1002. return
  1003. default:
  1004. }
  1005. if getTotal() == defaultWindowSize {
  1006. // unblock server to be able to read and
  1007. // thereby send stream level window update.
  1008. close(serviceHandler.getNotified)
  1009. return
  1010. }
  1011. runtime.Gosched()
  1012. }
  1013. }()
  1014. // This write will cause client to run out of stream level,
  1015. // flow control and the other side won't send a window update
  1016. // until that happens.
  1017. if err := ct.Write(s, []byte{}, expectedRequestLarge, &Options{}); err != nil {
  1018. t.Fatalf("write(_, _, _) = %v, want <nil>", err)
  1019. }
  1020. p := make([]byte, len(expectedResponseLarge))
  1021. // Wait for the other side to run out of stream level flow control before
  1022. // reading and thereby sending a window update.
  1023. select {
  1024. case <-serviceHandler.notify:
  1025. case <-ctx.Done():
  1026. t.Fatalf("Client timed out")
  1027. }
  1028. if _, err := s.Read(p); err != nil || !bytes.Equal(p, expectedResponseLarge) {
  1029. t.Fatalf("s.Read(_) = _, %v, want _, <nil>", err)
  1030. }
  1031. if err := ct.Write(s, []byte{}, expectedRequestLarge, &Options{Last: true}); err != nil {
  1032. t.Fatalf("Write(_, _, _) = %v, want <nil>", err)
  1033. }
  1034. if _, err = s.Read(p); err != io.EOF {
  1035. t.Fatalf("Failed to complete the stream %v; want <EOF>", err)
  1036. }
  1037. }
  1038. func TestGracefulClose(t *testing.T) {
  1039. server, ct, cancel := setUp(t, 0, math.MaxUint32, pingpong)
  1040. defer cancel()
  1041. defer func() {
  1042. // Stop the server's listener to make the server's goroutines terminate
  1043. // (after the last active stream is done).
  1044. server.lis.Close()
  1045. // Check for goroutine leaks (i.e. GracefulClose with an active stream
  1046. // doesn't eventually close the connection when that stream completes).
  1047. leakcheck.Check(t)
  1048. // Correctly clean up the server
  1049. server.stop()
  1050. }()
  1051. ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Second*10))
  1052. defer cancel()
  1053. s, err := ct.NewStream(ctx, &CallHdr{})
  1054. if err != nil {
  1055. t.Fatalf("NewStream(_, _) = _, %v, want _, <nil>", err)
  1056. }
  1057. msg := make([]byte, 1024)
  1058. outgoingHeader := make([]byte, 5)
  1059. outgoingHeader[0] = byte(0)
  1060. binary.BigEndian.PutUint32(outgoingHeader[1:], uint32(len(msg)))
  1061. incomingHeader := make([]byte, 5)
  1062. if err := ct.Write(s, outgoingHeader, msg, &Options{}); err != nil {
  1063. t.Fatalf("Error while writing: %v", err)
  1064. }
  1065. if _, err := s.Read(incomingHeader); err != nil {
  1066. t.Fatalf("Error while reading: %v", err)
  1067. }
  1068. sz := binary.BigEndian.Uint32(incomingHeader[1:])
  1069. recvMsg := make([]byte, int(sz))
  1070. if _, err := s.Read(recvMsg); err != nil {
  1071. t.Fatalf("Error while reading: %v", err)
  1072. }
  1073. if err = ct.GracefulClose(); err != nil {
  1074. t.Fatalf("GracefulClose() = %v, want <nil>", err)
  1075. }
  1076. var wg sync.WaitGroup
  1077. // Expect the failure for all the follow-up streams because ct has been closed gracefully.
  1078. for i := 0; i < 200; i++ {
  1079. wg.Add(1)
  1080. go func() {
  1081. defer wg.Done()
  1082. str, err := ct.NewStream(context.Background(), &CallHdr{})
  1083. if err == ErrConnClosing {
  1084. return
  1085. } else if err != nil {
  1086. t.Errorf("_.NewStream(_, _) = _, %v, want _, %v", err, ErrConnClosing)
  1087. return
  1088. }
  1089. ct.Write(str, nil, nil, &Options{Last: true})
  1090. if _, err := str.Read(make([]byte, 8)); err != errStreamDrain && err != ErrConnClosing {
  1091. t.Errorf("_.Read(_) = _, %v, want _, %v or %v", err, errStreamDrain, ErrConnClosing)
  1092. }
  1093. }()
  1094. }
  1095. ct.Write(s, nil, nil, &Options{Last: true})
  1096. if _, err := s.Read(incomingHeader); err != io.EOF {
  1097. t.Fatalf("Client expected EOF from the server. Got: %v", err)
  1098. }
  1099. // The stream which was created before graceful close can still proceed.
  1100. wg.Wait()
  1101. }
  1102. func TestLargeMessageSuspension(t *testing.T) {
  1103. server, ct, cancel := setUp(t, 0, math.MaxUint32, suspended)
  1104. defer cancel()
  1105. callHdr := &CallHdr{
  1106. Host: "localhost",
  1107. Method: "foo.Large",
  1108. }
  1109. // Set a long enough timeout for writing a large message out.
  1110. ctx, cancel := context.WithTimeout(context.Background(), time.Second)
  1111. defer cancel()
  1112. s, err := ct.NewStream(ctx, callHdr)
  1113. if err != nil {
  1114. t.Fatalf("failed to open stream: %v", err)
  1115. }
  1116. // Launch a goroutine simillar to the stream monitoring goroutine in
  1117. // stream.go to keep track of context timeout and call CloseStream.
  1118. go func() {
  1119. <-ctx.Done()
  1120. ct.CloseStream(s, ContextErr(ctx.Err()))
  1121. }()
  1122. // Write should not be done successfully due to flow control.
  1123. msg := make([]byte, initialWindowSize*8)
  1124. ct.Write(s, nil, msg, &Options{})
  1125. err = ct.Write(s, nil, msg, &Options{Last: true})
  1126. if err != errStreamDone {
  1127. t.Fatalf("Write got %v, want io.EOF", err)
  1128. }
  1129. expectedErr := status.Error(codes.DeadlineExceeded, context.DeadlineExceeded.Error())
  1130. if _, err := s.Read(make([]byte, 8)); err.Error() != expectedErr.Error() {
  1131. t.Fatalf("Read got %v of type %T, want %v", err, err, expectedErr)
  1132. }
  1133. ct.Close()
  1134. server.stop()
  1135. }
  1136. func TestMaxStreams(t *testing.T) {
  1137. serverConfig := &ServerConfig{
  1138. MaxStreams: 1,
  1139. }
  1140. server, ct, cancel := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{})
  1141. defer cancel()
  1142. defer ct.Close()
  1143. defer server.stop()
  1144. callHdr := &CallHdr{
  1145. Host: "localhost",
  1146. Method: "foo.Large",
  1147. }
  1148. s, err := ct.NewStream(context.Background(), callHdr)
  1149. if err != nil {
  1150. t.Fatalf("Failed to open stream: %v", err)
  1151. }
  1152. // Keep creating streams until one fails with deadline exceeded, marking the application
  1153. // of server settings on client.
  1154. slist := []*Stream{}
  1155. pctx, cancel := context.WithCancel(context.Background())
  1156. defer cancel()
  1157. timer := time.NewTimer(time.Second * 10)
  1158. expectedErr := status.Error(codes.DeadlineExceeded, context.DeadlineExceeded.Error())
  1159. for {
  1160. select {
  1161. case <-timer.C:
  1162. t.Fatalf("Test timeout: client didn't receive server settings.")
  1163. default:
  1164. }
  1165. ctx, cancel := context.WithDeadline(pctx, time.Now().Add(time.Second))
  1166. // This is only to get rid of govet. All these context are based on a base
  1167. // context which is canceled at the end of the test.
  1168. defer cancel()
  1169. if str, err := ct.NewStream(ctx, callHdr); err == nil {
  1170. slist = append(slist, str)
  1171. continue
  1172. } else if err.Error() != expectedErr.Error() {
  1173. t.Fatalf("ct.NewStream(_,_) = _, %v, want _, %v", err, expectedErr)
  1174. }
  1175. timer.Stop()
  1176. break
  1177. }
  1178. done := make(chan struct{})
  1179. // Try and create a new stream.
  1180. go func() {
  1181. defer close(done)
  1182. ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Second*10))
  1183. defer cancel()
  1184. if _, err := ct.NewStream(ctx, callHdr); err != nil {
  1185. t.Errorf("Failed to open stream: %v", err)
  1186. }
  1187. }()
  1188. // Close all the extra streams created and make sure the new stream is not created.
  1189. for _, str := range slist {
  1190. ct.CloseStream(str, nil)
  1191. }
  1192. select {
  1193. case <-done:
  1194. t.Fatalf("Test failed: didn't expect new stream to be created just yet.")
  1195. default:
  1196. }
  1197. // Close the first stream created so that the new stream can finally be created.
  1198. ct.CloseStream(s, nil)
  1199. <-done
  1200. ct.Close()
  1201. <-ct.writerDone
  1202. if ct.maxConcurrentStreams != 1 {
  1203. t.Fatalf("ct.maxConcurrentStreams: %d, want 1", ct.maxConcurrentStreams)
  1204. }
  1205. }
  1206. func TestServerContextCanceledOnClosedConnection(t *testing.T) {
  1207. server, ct, cancel := setUp(t, 0, math.MaxUint32, suspended)
  1208. defer cancel()
  1209. callHdr := &CallHdr{
  1210. Host: "localhost",
  1211. Method: "foo",
  1212. }
  1213. var sc *http2Server
  1214. // Wait until the server transport is setup.
  1215. for {
  1216. server.mu.Lock()
  1217. if len(server.conns) == 0 {
  1218. server.mu.Unlock()
  1219. time.Sleep(time.Millisecond)
  1220. continue
  1221. }
  1222. for k := range server.conns {
  1223. var ok bool
  1224. sc, ok = k.(*http2Server)
  1225. if !ok {
  1226. t.Fatalf("Failed to convert %v to *http2Server", k)
  1227. }
  1228. }
  1229. server.mu.Unlock()
  1230. break
  1231. }
  1232. s, err := ct.NewStream(context.Background(), callHdr)
  1233. if err != nil {
  1234. t.Fatalf("Failed to open stream: %v", err)
  1235. }
  1236. ct.controlBuf.put(&dataFrame{
  1237. streamID: s.id,
  1238. endStream: false,
  1239. h: nil,
  1240. d: make([]byte, http2MaxFrameLen),
  1241. onEachWrite: func() {},
  1242. })
  1243. // Loop until the server side stream is created.
  1244. var ss *Stream
  1245. for {
  1246. time.Sleep(time.Second)
  1247. sc.mu.Lock()
  1248. if len(sc.activeStreams) == 0 {
  1249. sc.mu.Unlock()
  1250. continue
  1251. }
  1252. ss = sc.activeStreams[s.id]
  1253. sc.mu.Unlock()
  1254. break
  1255. }
  1256. ct.Close()
  1257. select {
  1258. case <-ss.Context().Done():
  1259. if ss.Context().Err() != context.Canceled {
  1260. t.Fatalf("ss.Context().Err() got %v, want %v", ss.Context().Err(), context.Canceled)
  1261. }
  1262. case <-time.After(5 * time.Second):
  1263. t.Fatalf("Failed to cancel the context of the sever side stream.")
  1264. }
  1265. server.stop()
  1266. }
  1267. func TestClientConnDecoupledFromApplicationRead(t *testing.T) {
  1268. connectOptions := ConnectOptions{
  1269. InitialWindowSize: defaultWindowSize,
  1270. InitialConnWindowSize: defaultWindowSize,
  1271. }
  1272. server, client, cancel := setUpWithOptions(t, 0, &ServerConfig{}, notifyCall, connectOptions)
  1273. defer cancel()
  1274. defer server.stop()
  1275. defer client.Close()
  1276. waitWhileTrue(t, func() (bool, error) {
  1277. server.mu.Lock()
  1278. defer server.mu.Unlock()
  1279. if len(server.conns) == 0 {
  1280. return true, fmt.Errorf("timed-out while waiting for connection to be created on the server")
  1281. }
  1282. return false, nil
  1283. })
  1284. var st *http2Server
  1285. server.mu.Lock()
  1286. for k := range server.conns {
  1287. st = k.(*http2Server)
  1288. }
  1289. notifyChan := make(chan struct{})
  1290. server.h.notify = notifyChan
  1291. server.mu.Unlock()
  1292. cstream1, err := client.NewStream(context.Background(), &CallHdr{})
  1293. if err != nil {
  1294. t.Fatalf("Client failed to create first stream. Err: %v", err)
  1295. }
  1296. <-notifyChan
  1297. var sstream1 *Stream
  1298. // Access stream on the server.
  1299. st.mu.Lock()
  1300. for _, v := range st.activeStreams {
  1301. if v.id == cstream1.id {
  1302. sstream1 = v
  1303. }
  1304. }
  1305. st.mu.Unlock()
  1306. if sstream1 == nil {
  1307. t.Fatalf("Didn't find stream corresponding to client cstream.id: %v on the server", cstream1.id)
  1308. }
  1309. // Exhaust client's connection window.
  1310. if err := st.Write(sstream1, []byte{}, make([]byte, defaultWindowSize), &Options{}); err != nil {
  1311. t.Fatalf("Server failed to write data. Err: %v", err)
  1312. }
  1313. notifyChan = make(chan struct{})
  1314. server.mu.Lock()
  1315. server.h.notify = notifyChan
  1316. server.mu.Unlock()
  1317. // Create another stream on client.
  1318. cstream2, err := client.NewStream(context.Background(), &CallHdr{})
  1319. if err != nil {
  1320. t.Fatalf("Client failed to create second stream. Err: %v", err)
  1321. }
  1322. <-notifyChan
  1323. var sstream2 *Stream
  1324. st.mu.Lock()
  1325. for _, v := range st.activeStreams {
  1326. if v.id == cstream2.id {
  1327. sstream2 = v
  1328. }
  1329. }
  1330. st.mu.Unlock()
  1331. if sstream2 == nil {
  1332. t.Fatalf("Didn't find stream corresponding to client cstream.id: %v on the server", cstream2.id)
  1333. }
  1334. // Server should be able to send data on the new stream, even though the client hasn't read anything on the first stream.
  1335. if err := st.Write(sstream2, []byte{}, make([]byte, defaultWindowSize), &Options{}); err != nil {
  1336. t.Fatalf("Server failed to write data. Err: %v", err)
  1337. }
  1338. // Client should be able to read data on second stream.
  1339. if _, err := cstream2.Read(make([]byte, defaultWindowSize)); err != nil {
  1340. t.Fatalf("_.Read(_) = _, %v, want _, <nil>", err)
  1341. }
  1342. // Client should be able to read data on first stream.
  1343. if _, err := cstream1.Read(make([]byte, defaultWindowSize)); err != nil {
  1344. t.Fatalf("_.Read(_) = _, %v, want _, <nil>", err)
  1345. }
  1346. }
  1347. func TestServerConnDecoupledFromApplicationRead(t *testing.T) {
  1348. serverConfig := &ServerConfig{
  1349. InitialWindowSize: defaultWindowSize,
  1350. InitialConnWindowSize: defaultWindowSize,
  1351. }
  1352. server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{})
  1353. defer cancel()
  1354. defer server.stop()
  1355. defer client.Close()
  1356. waitWhileTrue(t, func() (bool, error) {
  1357. server.mu.Lock()
  1358. defer server.mu.Unlock()
  1359. if len(server.conns) == 0 {
  1360. return true, fmt.Errorf("timed-out while waiting for connection to be created on the server")
  1361. }
  1362. return false, nil
  1363. })
  1364. var st *http2Server
  1365. server.mu.Lock()
  1366. for k := range server.conns {
  1367. st = k.(*http2Server)
  1368. }
  1369. server.mu.Unlock()
  1370. cstream1, err := client.NewStream(context.Background(), &CallHdr{})
  1371. if err != nil {
  1372. t.Fatalf("Failed to create 1st stream. Err: %v", err)
  1373. }
  1374. // Exhaust server's connection window.
  1375. if err := client.Write(cstream1, nil, make([]byte, defaultWindowSize), &Options{Last: true}); err != nil {
  1376. t.Fatalf("Client failed to write data. Err: %v", err)
  1377. }
  1378. //Client should be able to create another stream and send data on it.
  1379. cstream2, err := client.NewStream(context.Background(), &CallHdr{})
  1380. if err != nil {
  1381. t.Fatalf("Failed to create 2nd stream. Err: %v", err)
  1382. }
  1383. if err := client.Write(cstream2, nil, make([]byte, defaultWindowSize), &Options{}); err != nil {
  1384. t.Fatalf("Client failed to write data. Err: %v", err)
  1385. }
  1386. // Get the streams on server.
  1387. waitWhileTrue(t, func() (bool, error) {
  1388. st.mu.Lock()
  1389. defer st.mu.Unlock()
  1390. if len(st.activeStreams) != 2 {
  1391. return true, fmt.Errorf("timed-out while waiting for server to have created the streams")
  1392. }
  1393. return false, nil
  1394. })
  1395. var sstream1 *Stream
  1396. st.mu.Lock()
  1397. for _, v := range st.activeStreams {
  1398. if v.id == 1 {
  1399. sstream1 = v
  1400. }
  1401. }
  1402. st.mu.Unlock()
  1403. // Reading from the stream on server should succeed.
  1404. if _, err := sstream1.Read(make([]byte, defaultWindowSize)); err != nil {
  1405. t.Fatalf("_.Read(_) = %v, want <nil>", err)
  1406. }
  1407. if _, err := sstream1.Read(make([]byte, 1)); err != io.EOF {
  1408. t.Fatalf("_.Read(_) = %v, want io.EOF", err)
  1409. }
  1410. }
  1411. func TestServerWithMisbehavedClient(t *testing.T) {
  1412. server := setUpServerOnly(t, 0, &ServerConfig{}, suspended)
  1413. defer server.stop()
  1414. // Create a client that can override server stream quota.
  1415. mconn, err := net.Dial("tcp", server.lis.Addr().String())
  1416. if err != nil {
  1417. t.Fatalf("Clent failed to dial:%v", err)
  1418. }
  1419. defer mconn.Close()
  1420. if err := mconn.SetWriteDeadline(time.Now().Add(time.Second * 10)); err != nil {
  1421. t.Fatalf("Failed to set write deadline: %v", err)
  1422. }
  1423. if n, err := mconn.Write(clientPreface); err != nil || n != len(clientPreface) {
  1424. t.Fatalf("mconn.Write(clientPreface) = %d, %v, want %d, <nil>", n, err, len(clientPreface))
  1425. }
  1426. // success chan indicates that reader received a RSTStream from server.
  1427. success := make(chan struct{})
  1428. var mu sync.Mutex
  1429. framer := http2.NewFramer(mconn, mconn)
  1430. if err := framer.WriteSettings(); err != nil {
  1431. t.Fatalf("Error while writing settings: %v", err)
  1432. }
  1433. go func() { // Launch a reader for this misbehaving client.
  1434. for {
  1435. frame, err := framer.ReadFrame()
  1436. if err != nil {
  1437. return
  1438. }
  1439. switch frame := frame.(type) {
  1440. case *http2.PingFrame:
  1441. // Write ping ack back so that server's BDP estimation works right.
  1442. mu.Lock()
  1443. framer.WritePing(true, frame.Data)
  1444. mu.Unlock()
  1445. case *http2.RSTStreamFrame:
  1446. if frame.Header().StreamID != 1 || http2.ErrCode(frame.ErrCode) != http2.ErrCodeFlowControl {
  1447. t.Errorf("RST stream received with streamID: %d and code: %v, want streamID: 1 and code: http2.ErrCodeFlowControl", frame.Header().StreamID, http2.ErrCode(frame.ErrCode))
  1448. }
  1449. close(success)
  1450. return
  1451. default:
  1452. // Do nothing.
  1453. }
  1454. }
  1455. }()
  1456. // Create a stream.
  1457. var buf bytes.Buffer
  1458. henc := hpack.NewEncoder(&buf)
  1459. // TODO(mmukhi): Remove unnecessary fields.
  1460. if err := henc.WriteField(hpack.HeaderField{Name: ":method", Value: "POST"}); err != nil {
  1461. t.Fatalf("Error while encoding header: %v", err)
  1462. }
  1463. if err := henc.WriteField(hpack.HeaderField{Name: ":path", Value: "foo"}); err != nil {
  1464. t.Fatalf("Error while encoding header: %v", err)
  1465. }
  1466. if err := henc.WriteField(hpack.HeaderField{Name: ":authority", Value: "localhost"}); err != nil {
  1467. t.Fatalf("Error while encoding header: %v", err)
  1468. }
  1469. if err := henc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}); err != nil {
  1470. t.Fatalf("Error while encoding header: %v", err)
  1471. }
  1472. mu.Lock()
  1473. if err := framer.WriteHeaders(http2.HeadersFrameParam{StreamID: 1, BlockFragment: buf.Bytes(), EndHeaders: true}); err != nil {
  1474. mu.Unlock()
  1475. t.Fatalf("Error while writing headers: %v", err)
  1476. }
  1477. mu.Unlock()
  1478. // Test server behavior for violation of stream flow control window size restriction.
  1479. timer := time.NewTimer(time.Second * 5)
  1480. dbuf := make([]byte, http2MaxFrameLen)
  1481. for {
  1482. select {
  1483. case <-timer.C:
  1484. t.Fatalf("Test timed out.")
  1485. case <-success:
  1486. return
  1487. default:
  1488. }
  1489. mu.Lock()
  1490. if err := framer.WriteData(1, false, dbuf); err != nil {
  1491. mu.Unlock()
  1492. // Error here means the server could have closed the connection due to flow control
  1493. // violation. Make sure that is the case by waiting for success chan to be closed.
  1494. select {
  1495. case <-timer.C:
  1496. t.Fatalf("Error while writing data: %v", err)
  1497. case <-success:
  1498. return
  1499. }
  1500. }
  1501. mu.Unlock()
  1502. // This for loop is capable of hogging the CPU and cause starvation
  1503. // in Go versions prior to 1.9,
  1504. // in single CPU environment. Explicitly relinquish processor.
  1505. runtime.Gosched()
  1506. }
  1507. }
  1508. func TestClientWithMisbehavedServer(t *testing.T) {
  1509. // Create a misbehaving server.
  1510. lis, err := net.Listen("tcp", "localhost:0")
  1511. if err != nil {
  1512. t.Fatalf("Error while listening: %v", err)
  1513. }
  1514. defer lis.Close()
  1515. // success chan indicates that the server received
  1516. // RSTStream from the client.
  1517. success := make(chan struct{})
  1518. go func() { // Launch the misbehaving server.
  1519. sconn, err := lis.Accept()
  1520. if err != nil {
  1521. t.Errorf("Error while accepting: %v", err)
  1522. return
  1523. }
  1524. defer sconn.Close()
  1525. if _, err := io.ReadFull(sconn, make([]byte, len(clientPreface))); err != nil {
  1526. t.Errorf("Error while reading clieng preface: %v", err)
  1527. return
  1528. }
  1529. sfr := http2.NewFramer(sconn, sconn)
  1530. if err := sfr.WriteSettingsAck(); err != nil {
  1531. t.Errorf("Error while writing settings: %v", err)
  1532. return
  1533. }
  1534. var mu sync.Mutex
  1535. for {
  1536. frame, err := sfr.ReadFrame()
  1537. if err != nil {
  1538. return
  1539. }
  1540. switch frame := frame.(type) {
  1541. case *http2.HeadersFrame:
  1542. // When the client creates a stream, violate the stream flow control.
  1543. go func() {
  1544. buf := make([]byte, http2MaxFrameLen)
  1545. for {
  1546. mu.Lock()
  1547. if err := sfr.WriteData(1, false, buf); err != nil {
  1548. mu.Unlock()
  1549. return
  1550. }
  1551. mu.Unlock()
  1552. // This for loop is capable of hogging the CPU and cause starvation
  1553. // in Go versions prior to 1.9,
  1554. // in single CPU environment. Explicitly relinquish processor.
  1555. runtime.Gosched()
  1556. }
  1557. }()
  1558. case *http2.RSTStreamFrame:
  1559. if frame.Header().StreamID != 1 || http2.ErrCode(frame.ErrCode) != http2.ErrCodeFlowControl {
  1560. t.Errorf("RST stream received with streamID: %d and code: %v, want streamID: 1 and code: http2.ErrCodeFlowControl", frame.Header().StreamID, http2.ErrCode(frame.ErrCode))
  1561. }
  1562. close(success)
  1563. return
  1564. case *http2.PingFrame:
  1565. mu.Lock()
  1566. sfr.WritePing(true, frame.Data)
  1567. mu.Unlock()
  1568. default:
  1569. }
  1570. }
  1571. }()
  1572. connectCtx, cancel := context.WithDeadline(context.Background(), time.Now().Add(2*time.Second))
  1573. defer cancel()
  1574. ct, err := NewClientTransport(connectCtx, context.Background(), TargetInfo{Addr: lis.Addr().String()}, ConnectOptions{}, func() {}, func(GoAwayReason) {}, func() {})
  1575. if err != nil {
  1576. t.Fatalf("Error while creating client transport: %v", err)
  1577. }
  1578. defer ct.Close()
  1579. str, err := ct.NewStream(context.Background(), &CallHdr{})
  1580. if err != nil {
  1581. t.Fatalf("Error while creating stream: %v", err)
  1582. }
  1583. timer := time.NewTimer(time.Second * 5)
  1584. go func() { // This go routine mimics the one in stream.go to call CloseStream.
  1585. <-str.Done()
  1586. ct.CloseStream(str, nil)
  1587. }()
  1588. select {
  1589. case <-timer.C:
  1590. t.Fatalf("Test timed-out.")
  1591. case <-success:
  1592. }
  1593. }
  1594. var encodingTestStatus = status.New(codes.Internal, "\n")
  1595. func TestEncodingRequiredStatus(t *testing.T) {
  1596. server, ct, cancel := setUp(t, 0, math.MaxUint32, encodingRequiredStatus)
  1597. defer cancel()
  1598. callHdr := &CallHdr{
  1599. Host: "localhost",
  1600. Method: "foo",
  1601. }
  1602. s, err := ct.NewStream(context.Background(), callHdr)
  1603. if err != nil {
  1604. return
  1605. }
  1606. opts := Options{Last: true}
  1607. if err := ct.Write(s, nil, expectedRequest, &opts); err != nil && err != errStreamDone {
  1608. t.Fatalf("Failed to write the request: %v", err)
  1609. }
  1610. p := make([]byte, http2MaxFrameLen)
  1611. if _, err := s.trReader.(*transportReader).Read(p); err != io.EOF {
  1612. t.Fatalf("Read got error %v, want %v", err, io.EOF)
  1613. }
  1614. if !reflect.DeepEqual(s.Status(), encodingTestStatus) {
  1615. t.Fatalf("stream with status %v, want %v", s.Status(), encodingTestStatus)
  1616. }
  1617. ct.Close()
  1618. server.stop()
  1619. }
  1620. func TestInvalidHeaderField(t *testing.T) {
  1621. server, ct, cancel := setUp(t, 0, math.MaxUint32, invalidHeaderField)
  1622. defer cancel()
  1623. callHdr := &CallHdr{
  1624. Host: "localhost",
  1625. Method: "foo",
  1626. }
  1627. s, err := ct.NewStream(context.Background(), callHdr)
  1628. if err != nil {
  1629. return
  1630. }
  1631. p := make([]byte, http2MaxFrameLen)
  1632. _, err = s.trReader.(*transportReader).Read(p)
  1633. if se, ok := status.FromError(err); !ok || se.Code() != codes.Internal || !strings.Contains(err.Error(), expectedInvalidHeaderField) {
  1634. t.Fatalf("Read got error %v, want error with code %s and contains %q", err, codes.Internal, expectedInvalidHeaderField)
  1635. }
  1636. ct.Close()
  1637. server.stop()
  1638. }
  1639. func TestIsReservedHeader(t *testing.T) {
  1640. tests := []struct {
  1641. h string
  1642. want bool
  1643. }{
  1644. {"", false}, // but should be rejected earlier
  1645. {"foo", false},
  1646. {"content-type", true},
  1647. {"user-agent", true},
  1648. {":anything", true},
  1649. {"grpc-message-type", true},
  1650. {"grpc-encoding", true},
  1651. {"grpc-message", true},
  1652. {"grpc-status", true},
  1653. {"grpc-timeout", true},
  1654. {"te", true},
  1655. }
  1656. for _, tt := range tests {
  1657. got := isReservedHeader(tt.h)
  1658. if got != tt.want {
  1659. t.Errorf("isReservedHeader(%q) = %v; want %v", tt.h, got, tt.want)
  1660. }
  1661. }
  1662. }
  1663. func TestContextErr(t *testing.T) {
  1664. for _, test := range []struct {
  1665. // input
  1666. errIn error
  1667. // outputs
  1668. errOut error
  1669. }{
  1670. {context.DeadlineExceeded, status.Error(codes.DeadlineExceeded, context.DeadlineExceeded.Error())},
  1671. {context.Canceled, status.Error(codes.Canceled, context.Canceled.Error())},
  1672. } {
  1673. err := ContextErr(test.errIn)
  1674. if err.Error() != test.errOut.Error() {
  1675. t.Fatalf("ContextErr{%v} = %v \nwant %v", test.errIn, err, test.errOut)
  1676. }
  1677. }
  1678. }
  1679. type windowSizeConfig struct {
  1680. serverStream int32
  1681. serverConn int32
  1682. clientStream int32
  1683. clientConn int32
  1684. }
  1685. func TestAccountCheckWindowSizeWithLargeWindow(t *testing.T) {
  1686. wc := windowSizeConfig{
  1687. serverStream: 10 * 1024 * 1024,
  1688. serverConn: 12 * 1024 * 1024,
  1689. clientStream: 6 * 1024 * 1024,
  1690. clientConn: 8 * 1024 * 1024,
  1691. }
  1692. testFlowControlAccountCheck(t, 1024*1024, wc)
  1693. }
  1694. func TestAccountCheckWindowSizeWithSmallWindow(t *testing.T) {
  1695. wc := windowSizeConfig{
  1696. serverStream: defaultWindowSize,
  1697. // Note this is smaller than initialConnWindowSize which is the current default.
  1698. serverConn: defaultWindowSize,
  1699. clientStream: defaultWindowSize,
  1700. clientConn: defaultWindowSize,
  1701. }
  1702. testFlowControlAccountCheck(t, 1024*1024, wc)
  1703. }
  1704. func TestAccountCheckDynamicWindowSmallMessage(t *testing.T) {
  1705. testFlowControlAccountCheck(t, 1024, windowSizeConfig{})
  1706. }
  1707. func TestAccountCheckDynamicWindowLargeMessage(t *testing.T) {
  1708. testFlowControlAccountCheck(t, 1024*1024, windowSizeConfig{})
  1709. }
  1710. func testFlowControlAccountCheck(t *testing.T, msgSize int, wc windowSizeConfig) {
  1711. sc := &ServerConfig{
  1712. InitialWindowSize: wc.serverStream,
  1713. InitialConnWindowSize: wc.serverConn,
  1714. }
  1715. co := ConnectOptions{
  1716. InitialWindowSize: wc.clientStream,
  1717. InitialConnWindowSize: wc.clientConn,
  1718. }
  1719. server, client, cancel := setUpWithOptions(t, 0, sc, pingpong, co)
  1720. defer cancel()
  1721. defer server.stop()
  1722. defer client.Close()
  1723. waitWhileTrue(t, func() (bool, error) {
  1724. server.mu.Lock()
  1725. defer server.mu.Unlock()
  1726. if len(server.conns) == 0 {
  1727. return true, fmt.Errorf("timed out while waiting for server transport to be created")
  1728. }
  1729. return false, nil
  1730. })
  1731. var st *http2Server
  1732. server.mu.Lock()
  1733. for k := range server.conns {
  1734. st = k.(*http2Server)
  1735. }
  1736. server.mu.Unlock()
  1737. const numStreams = 10
  1738. clientStreams := make([]*Stream, numStreams)
  1739. for i := 0; i < numStreams; i++ {
  1740. var err error
  1741. clientStreams[i], err = client.NewStream(context.Background(), &CallHdr{})
  1742. if err != nil {
  1743. t.Fatalf("Failed to create stream. Err: %v", err)
  1744. }
  1745. }
  1746. var wg sync.WaitGroup
  1747. // For each stream send pingpong messages to the server.
  1748. for _, stream := range clientStreams {
  1749. wg.Add(1)
  1750. go func(stream *Stream) {
  1751. defer wg.Done()
  1752. buf := make([]byte, msgSize+5)
  1753. buf[0] = byte(0)
  1754. binary.BigEndian.PutUint32(buf[1:], uint32(msgSize))
  1755. opts := Options{}
  1756. header := make([]byte, 5)
  1757. for i := 1; i <= 10; i++ {
  1758. if err := client.Write(stream, nil, buf, &opts); err != nil {
  1759. t.Errorf("Error on client while writing message: %v", err)
  1760. return
  1761. }
  1762. if _, err := stream.Read(header); err != nil {
  1763. t.Errorf("Error on client while reading data frame header: %v", err)
  1764. return
  1765. }
  1766. sz := binary.BigEndian.Uint32(header[1:])
  1767. recvMsg := make([]byte, int(sz))
  1768. if _, err := stream.Read(recvMsg); err != nil {
  1769. t.Errorf("Error on client while reading data: %v", err)
  1770. return
  1771. }
  1772. if len(recvMsg) != msgSize {
  1773. t.Errorf("Length of message received by client: %v, want: %v", len(recvMsg), msgSize)
  1774. return
  1775. }
  1776. }
  1777. }(stream)
  1778. }
  1779. wg.Wait()
  1780. serverStreams := map[uint32]*Stream{}
  1781. loopyClientStreams := map[uint32]*outStream{}
  1782. loopyServerStreams := map[uint32]*outStream{}
  1783. // Get all the streams from server reader and writer and client writer.
  1784. st.mu.Lock()
  1785. for _, stream := range clientStreams {
  1786. id := stream.id
  1787. serverStreams[id] = st.activeStreams[id]
  1788. loopyServerStreams[id] = st.loopy.estdStreams[id]
  1789. loopyClientStreams[id] = client.loopy.estdStreams[id]
  1790. }
  1791. st.mu.Unlock()
  1792. // Close all streams
  1793. for _, stream := range clientStreams {
  1794. client.Write(stream, nil, nil, &Options{Last: true})
  1795. if _, err := stream.Read(make([]byte, 5)); err != io.EOF {
  1796. t.Fatalf("Client expected an EOF from the server. Got: %v", err)
  1797. }
  1798. }
  1799. // Close down both server and client so that their internals can be read without data
  1800. // races.
  1801. client.Close()
  1802. st.Close()
  1803. <-st.readerDone
  1804. <-st.writerDone
  1805. <-client.readerDone
  1806. <-client.writerDone
  1807. for _, cstream := range clientStreams {
  1808. id := cstream.id
  1809. sstream := serverStreams[id]
  1810. loopyServerStream := loopyServerStreams[id]
  1811. loopyClientStream := loopyClientStreams[id]
  1812. // Check stream flow control.
  1813. if int(cstream.fc.limit+cstream.fc.delta-cstream.fc.pendingData-cstream.fc.pendingUpdate) != int(st.loopy.oiws)-loopyServerStream.bytesOutStanding {
  1814. t.Fatalf("Account mismatch: client stream inflow limit(%d) + delta(%d) - pendingData(%d) - pendingUpdate(%d) != server outgoing InitialWindowSize(%d) - outgoingStream.bytesOutStanding(%d)", cstream.fc.limit, cstream.fc.delta, cstream.fc.pendingData, cstream.fc.pendingUpdate, st.loopy.oiws, loopyServerStream.bytesOutStanding)
  1815. }
  1816. if int(sstream.fc.limit+sstream.fc.delta-sstream.fc.pendingData-sstream.fc.pendingUpdate) != int(client.loopy.oiws)-loopyClientStream.bytesOutStanding {
  1817. t.Fatalf("Account mismatch: server stream inflow limit(%d) + delta(%d) - pendingData(%d) - pendingUpdate(%d) != client outgoing InitialWindowSize(%d) - outgoingStream.bytesOutStanding(%d)", sstream.fc.limit, sstream.fc.delta, sstream.fc.pendingData, sstream.fc.pendingUpdate, client.loopy.oiws, loopyClientStream.bytesOutStanding)
  1818. }
  1819. }
  1820. // Check transport flow control.
  1821. if client.fc.limit != client.fc.unacked+st.loopy.sendQuota {
  1822. t.Fatalf("Account mismatch: client transport inflow(%d) != client unacked(%d) + server sendQuota(%d)", client.fc.limit, client.fc.unacked, st.loopy.sendQuota)
  1823. }
  1824. if st.fc.limit != st.fc.unacked+client.loopy.sendQuota {
  1825. t.Fatalf("Account mismatch: server transport inflow(%d) != server unacked(%d) + client sendQuota(%d)", st.fc.limit, st.fc.unacked, client.loopy.sendQuota)
  1826. }
  1827. }
  1828. func waitWhileTrue(t *testing.T, condition func() (bool, error)) {
  1829. var (
  1830. wait bool
  1831. err error
  1832. )
  1833. timer := time.NewTimer(time.Second * 5)
  1834. for {
  1835. wait, err = condition()
  1836. if wait {
  1837. select {
  1838. case <-timer.C:
  1839. t.Fatalf(err.Error())
  1840. default:
  1841. time.Sleep(50 * time.Millisecond)
  1842. continue
  1843. }
  1844. }
  1845. if !timer.Stop() {
  1846. <-timer.C
  1847. }
  1848. break
  1849. }
  1850. }
  1851. // If any error occurs on a call to Stream.Read, future calls
  1852. // should continue to return that same error.
  1853. func TestReadGivesSameErrorAfterAnyErrorOccurs(t *testing.T) {
  1854. testRecvBuffer := newRecvBuffer()
  1855. s := &Stream{
  1856. ctx: context.Background(),
  1857. buf: testRecvBuffer,
  1858. requestRead: func(int) {},
  1859. }
  1860. s.trReader = &transportReader{
  1861. reader: &recvBufferReader{
  1862. ctx: s.ctx,
  1863. ctxDone: s.ctx.Done(),
  1864. recv: s.buf,
  1865. },
  1866. windowHandler: func(int) {},
  1867. }
  1868. testData := make([]byte, 1)
  1869. testData[0] = 5
  1870. testErr := errors.New("test error")
  1871. s.write(recvMsg{data: testData, err: testErr})
  1872. inBuf := make([]byte, 1)
  1873. actualCount, actualErr := s.Read(inBuf)
  1874. if actualCount != 0 {
  1875. t.Errorf("actualCount, _ := s.Read(_) differs; want 0; got %v", actualCount)
  1876. }
  1877. if actualErr.Error() != testErr.Error() {
  1878. t.Errorf("_ , actualErr := s.Read(_) differs; want actualErr.Error() to be %v; got %v", testErr.Error(), actualErr.Error())
  1879. }
  1880. s.write(recvMsg{data: testData, err: nil})
  1881. s.write(recvMsg{data: testData, err: errors.New("different error from first")})
  1882. for i := 0; i < 2; i++ {
  1883. inBuf := make([]byte, 1)
  1884. actualCount, actualErr := s.Read(inBuf)
  1885. if actualCount != 0 {
  1886. t.Errorf("actualCount, _ := s.Read(_) differs; want %v; got %v", 0, actualCount)
  1887. }
  1888. if actualErr.Error() != testErr.Error() {
  1889. t.Errorf("_ , actualErr := s.Read(_) differs; want actualErr.Error() to be %v; got %v", testErr.Error(), actualErr.Error())
  1890. }
  1891. }
  1892. }
  1893. func TestPingPong1B(t *testing.T) {
  1894. runPingPongTest(t, 1)
  1895. }
  1896. func TestPingPong1KB(t *testing.T) {
  1897. runPingPongTest(t, 1024)
  1898. }
  1899. func TestPingPong64KB(t *testing.T) {
  1900. runPingPongTest(t, 65536)
  1901. }
  1902. func TestPingPong1MB(t *testing.T) {
  1903. runPingPongTest(t, 1048576)
  1904. }
  1905. //This is a stress-test of flow control logic.
  1906. func runPingPongTest(t *testing.T, msgSize int) {
  1907. server, client, cancel := setUp(t, 0, 0, pingpong)
  1908. defer cancel()
  1909. defer server.stop()
  1910. defer client.Close()
  1911. waitWhileTrue(t, func() (bool, error) {
  1912. server.mu.Lock()
  1913. defer server.mu.Unlock()
  1914. if len(server.conns) == 0 {
  1915. return true, fmt.Errorf("timed out while waiting for server transport to be created")
  1916. }
  1917. return false, nil
  1918. })
  1919. stream, err := client.NewStream(context.Background(), &CallHdr{})
  1920. if err != nil {
  1921. t.Fatalf("Failed to create stream. Err: %v", err)
  1922. }
  1923. msg := make([]byte, msgSize)
  1924. outgoingHeader := make([]byte, 5)
  1925. outgoingHeader[0] = byte(0)
  1926. binary.BigEndian.PutUint32(outgoingHeader[1:], uint32(msgSize))
  1927. opts := &Options{}
  1928. incomingHeader := make([]byte, 5)
  1929. done := make(chan struct{})
  1930. go func() {
  1931. timer := time.NewTimer(time.Second * 5)
  1932. <-timer.C
  1933. close(done)
  1934. }()
  1935. for {
  1936. select {
  1937. case <-done:
  1938. client.Write(stream, nil, nil, &Options{Last: true})
  1939. if _, err := stream.Read(incomingHeader); err != io.EOF {
  1940. t.Fatalf("Client expected EOF from the server. Got: %v", err)
  1941. }
  1942. return
  1943. default:
  1944. if err := client.Write(stream, outgoingHeader, msg, opts); err != nil {
  1945. t.Fatalf("Error on client while writing message. Err: %v", err)
  1946. }
  1947. if _, err := stream.Read(incomingHeader); err != nil {
  1948. t.Fatalf("Error on client while reading data header. Err: %v", err)
  1949. }
  1950. sz := binary.BigEndian.Uint32(incomingHeader[1:])
  1951. recvMsg := make([]byte, int(sz))
  1952. if _, err := stream.Read(recvMsg); err != nil {
  1953. t.Fatalf("Error on client while reading data. Err: %v", err)
  1954. }
  1955. }
  1956. }
  1957. }
  1958. type tableSizeLimit struct {
  1959. mu sync.Mutex
  1960. limits []uint32
  1961. }
  1962. func (t *tableSizeLimit) add(limit uint32) {
  1963. t.mu.Lock()
  1964. t.limits = append(t.limits, limit)
  1965. t.mu.Unlock()
  1966. }
  1967. func (t *tableSizeLimit) getLen() int {
  1968. t.mu.Lock()
  1969. defer t.mu.Unlock()
  1970. return len(t.limits)
  1971. }
  1972. func (t *tableSizeLimit) getIndex(i int) uint32 {
  1973. t.mu.Lock()
  1974. defer t.mu.Unlock()
  1975. return t.limits[i]
  1976. }
  1977. func TestHeaderTblSize(t *testing.T) {
  1978. limits := &tableSizeLimit{}
  1979. updateHeaderTblSize = func(e *hpack.Encoder, v uint32) {
  1980. e.SetMaxDynamicTableSizeLimit(v)
  1981. limits.add(v)
  1982. }
  1983. defer func() {
  1984. updateHeaderTblSize = func(e *hpack.Encoder, v uint32) {
  1985. e.SetMaxDynamicTableSizeLimit(v)
  1986. }
  1987. }()
  1988. server, ct, cancel := setUp(t, 0, math.MaxUint32, normal)
  1989. defer cancel()
  1990. defer ct.Close()
  1991. defer server.stop()
  1992. _, err := ct.NewStream(context.Background(), &CallHdr{})
  1993. if err != nil {
  1994. t.Fatalf("failed to open stream: %v", err)
  1995. }
  1996. var svrTransport ServerTransport
  1997. var i int
  1998. for i = 0; i < 1000; i++ {
  1999. server.mu.Lock()
  2000. if len(server.conns) != 0 {
  2001. server.mu.Unlock()
  2002. break
  2003. }
  2004. server.mu.Unlock()
  2005. time.Sleep(10 * time.Millisecond)
  2006. continue
  2007. }
  2008. if i == 1000 {
  2009. t.Fatalf("unable to create any server transport after 10s")
  2010. }
  2011. for st := range server.conns {
  2012. svrTransport = st
  2013. break
  2014. }
  2015. svrTransport.(*http2Server).controlBuf.put(&outgoingSettings{
  2016. ss: []http2.Setting{
  2017. {
  2018. ID: http2.SettingHeaderTableSize,
  2019. Val: uint32(100),
  2020. },
  2021. },
  2022. })
  2023. for i = 0; i < 1000; i++ {
  2024. if limits.getLen() != 1 {
  2025. time.Sleep(10 * time.Millisecond)
  2026. continue
  2027. }
  2028. if val := limits.getIndex(0); val != uint32(100) {
  2029. t.Fatalf("expected limits[0] = 100, got %d", val)
  2030. }
  2031. break
  2032. }
  2033. if i == 1000 {
  2034. t.Fatalf("expected len(limits) = 1 within 10s, got != 1")
  2035. }
  2036. ct.controlBuf.put(&outgoingSettings{
  2037. ss: []http2.Setting{
  2038. {
  2039. ID: http2.SettingHeaderTableSize,
  2040. Val: uint32(200),
  2041. },
  2042. },
  2043. })
  2044. for i := 0; i < 1000; i++ {
  2045. if limits.getLen() != 2 {
  2046. time.Sleep(10 * time.Millisecond)
  2047. continue
  2048. }
  2049. if val := limits.getIndex(1); val != uint32(200) {
  2050. t.Fatalf("expected limits[1] = 200, got %d", val)
  2051. }
  2052. break
  2053. }
  2054. if i == 1000 {
  2055. t.Fatalf("expected len(limits) = 2 within 10s, got != 2")
  2056. }
  2057. }
  2058. // TestTCPUserTimeout tests that the TCP_USER_TIMEOUT socket option is set to the
  2059. // keepalive timeout, as detailed in proposal A18
  2060. func TestTCPUserTimeout(t *testing.T) {
  2061. tests := []struct {
  2062. time time.Duration
  2063. timeout time.Duration
  2064. }{
  2065. {
  2066. 10 * time.Second,
  2067. 10 * time.Second,
  2068. },
  2069. {
  2070. 0,
  2071. 0,
  2072. },
  2073. }
  2074. for _, tt := range tests {
  2075. server, client, cancel := setUpWithOptions(
  2076. t,
  2077. 0,
  2078. &ServerConfig{
  2079. KeepaliveParams: keepalive.ServerParameters{
  2080. Time: tt.timeout,
  2081. Timeout: tt.timeout,
  2082. },
  2083. },
  2084. normal,
  2085. ConnectOptions{
  2086. KeepaliveParams: keepalive.ClientParameters{
  2087. Time: tt.time,
  2088. Timeout: tt.timeout,
  2089. },
  2090. },
  2091. )
  2092. defer cancel()
  2093. defer server.stop()
  2094. defer client.Close()
  2095. stream, err := client.NewStream(context.Background(), &CallHdr{})
  2096. if err != nil {
  2097. t.Fatalf("Client failed to create RPC request: %v", err)
  2098. }
  2099. client.closeStream(stream, io.EOF, true, http2.ErrCodeCancel, nil, nil, false)
  2100. opt, err := syscall.GetTCPUserTimeout(client.conn)
  2101. if err != nil {
  2102. t.Fatalf("GetTCPUserTimeout error: %v", err)
  2103. }
  2104. if opt < 0 {
  2105. t.Skipf("skipping test on unsupported environment")
  2106. }
  2107. if timeoutMS := int(tt.timeout / time.Millisecond); timeoutMS != opt {
  2108. t.Fatalf("wrong TCP_USER_TIMEOUT set on conn. expected %d. got %d",
  2109. timeoutMS, opt)
  2110. }
  2111. }
  2112. }