You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 

208 lines
6.0 KiB

  1. /*
  2. *
  3. * Copyright 2017 gRPC authors.
  4. *
  5. * Licensed under the Apache License, Version 2.0 (the "License");
  6. * you may not use this file except in compliance with the License.
  7. * You may obtain a copy of the License at
  8. *
  9. * http://www.apache.org/licenses/LICENSE-2.0
  10. *
  11. * Unless required by applicable law or agreed to in writing, software
  12. * distributed under the License is distributed on an "AS IS" BASIS,
  13. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. * See the License for the specific language governing permissions and
  15. * limitations under the License.
  16. *
  17. */
  18. /*
  19. Package main provides a client used for benchmarking. Before running the
  20. client, the user would need to launch the grpc server.
  21. To start the server before running the client, you can run look for the command
  22. under the following file:
  23. benchmark/server/main.go
  24. After starting the server, the client can be run. An example of how to run this
  25. command is:
  26. go run benchmark/client/main.go -test_name=grpc_test
  27. If the server is running on a different port than 50051, then use the port flag
  28. for the client to hit the server on the correct port.
  29. An example for how to run this command on a different port can be found here:
  30. go run benchmark/client/main.go -test_name=grpc_test -port=8080
  31. */
  32. package main
  33. import (
  34. "context"
  35. "flag"
  36. "fmt"
  37. "os"
  38. "runtime"
  39. "runtime/pprof"
  40. "sync"
  41. "time"
  42. "google.golang.org/grpc"
  43. "google.golang.org/grpc/benchmark"
  44. testpb "google.golang.org/grpc/benchmark/grpc_testing"
  45. "google.golang.org/grpc/benchmark/stats"
  46. "google.golang.org/grpc/grpclog"
  47. "google.golang.org/grpc/internal/syscall"
  48. )
  49. var (
  50. port = flag.String("port", "50051", "Localhost port to connect to.")
  51. numRPC = flag.Int("r", 1, "The number of concurrent RPCs on each connection.")
  52. numConn = flag.Int("c", 1, "The number of parallel connections.")
  53. warmupDur = flag.Int("w", 10, "Warm-up duration in seconds")
  54. duration = flag.Int("d", 60, "Benchmark duration in seconds")
  55. rqSize = flag.Int("req", 1, "Request message size in bytes.")
  56. rspSize = flag.Int("resp", 1, "Response message size in bytes.")
  57. rpcType = flag.String("rpc_type", "unary",
  58. `Configure different client rpc type. Valid options are:
  59. unary;
  60. streaming.`)
  61. testName = flag.String("test_name", "", "Name of the test used for creating profiles.")
  62. wg sync.WaitGroup
  63. hopts = stats.HistogramOptions{
  64. NumBuckets: 2495,
  65. GrowthFactor: .01,
  66. }
  67. mu sync.Mutex
  68. hists []*stats.Histogram
  69. )
  70. func main() {
  71. flag.Parse()
  72. if *testName == "" {
  73. grpclog.Fatalf("test_name not set")
  74. }
  75. req := &testpb.SimpleRequest{
  76. ResponseType: testpb.PayloadType_COMPRESSABLE,
  77. ResponseSize: int32(*rspSize),
  78. Payload: &testpb.Payload{
  79. Type: testpb.PayloadType_COMPRESSABLE,
  80. Body: make([]byte, *rqSize),
  81. },
  82. }
  83. connectCtx, connectCancel := context.WithDeadline(context.Background(), time.Now().Add(5*time.Second))
  84. defer connectCancel()
  85. ccs := buildConnections(connectCtx)
  86. warmDeadline := time.Now().Add(time.Duration(*warmupDur) * time.Second)
  87. endDeadline := warmDeadline.Add(time.Duration(*duration) * time.Second)
  88. cf, err := os.Create("/tmp/" + *testName + ".cpu")
  89. if err != nil {
  90. grpclog.Fatalf("Error creating file: %v", err)
  91. }
  92. defer cf.Close()
  93. pprof.StartCPUProfile(cf)
  94. cpuBeg := syscall.GetCPUTime()
  95. for _, cc := range ccs {
  96. runWithConn(cc, req, warmDeadline, endDeadline)
  97. }
  98. wg.Wait()
  99. cpu := time.Duration(syscall.GetCPUTime() - cpuBeg)
  100. pprof.StopCPUProfile()
  101. mf, err := os.Create("/tmp/" + *testName + ".mem")
  102. if err != nil {
  103. grpclog.Fatalf("Error creating file: %v", err)
  104. }
  105. defer mf.Close()
  106. runtime.GC() // materialize all statistics
  107. if err := pprof.WriteHeapProfile(mf); err != nil {
  108. grpclog.Fatalf("Error writing memory profile: %v", err)
  109. }
  110. hist := stats.NewHistogram(hopts)
  111. for _, h := range hists {
  112. hist.Merge(h)
  113. }
  114. parseHist(hist)
  115. fmt.Println("Client CPU utilization:", cpu)
  116. fmt.Println("Client CPU profile:", cf.Name())
  117. fmt.Println("Client Mem Profile:", mf.Name())
  118. }
  119. func buildConnections(ctx context.Context) []*grpc.ClientConn {
  120. ccs := make([]*grpc.ClientConn, *numConn)
  121. for i := range ccs {
  122. ccs[i] = benchmark.NewClientConnWithContext(ctx, "localhost:"+*port, grpc.WithInsecure(), grpc.WithBlock())
  123. }
  124. return ccs
  125. }
  126. func runWithConn(cc *grpc.ClientConn, req *testpb.SimpleRequest, warmDeadline, endDeadline time.Time) {
  127. for i := 0; i < *numRPC; i++ {
  128. wg.Add(1)
  129. go func() {
  130. defer wg.Done()
  131. caller := makeCaller(cc, req)
  132. hist := stats.NewHistogram(hopts)
  133. for {
  134. start := time.Now()
  135. if start.After(endDeadline) {
  136. mu.Lock()
  137. hists = append(hists, hist)
  138. mu.Unlock()
  139. return
  140. }
  141. caller()
  142. elapsed := time.Since(start)
  143. if start.After(warmDeadline) {
  144. hist.Add(elapsed.Nanoseconds())
  145. }
  146. }
  147. }()
  148. }
  149. }
  150. func makeCaller(cc *grpc.ClientConn, req *testpb.SimpleRequest) func() {
  151. client := testpb.NewBenchmarkServiceClient(cc)
  152. if *rpcType == "unary" {
  153. return func() {
  154. if _, err := client.UnaryCall(context.Background(), req); err != nil {
  155. grpclog.Fatalf("RPC failed: %v", err)
  156. }
  157. }
  158. }
  159. stream, err := client.StreamingCall(context.Background())
  160. if err != nil {
  161. grpclog.Fatalf("RPC failed: %v", err)
  162. }
  163. return func() {
  164. if err := stream.Send(req); err != nil {
  165. grpclog.Fatalf("Streaming RPC failed to send: %v", err)
  166. }
  167. if _, err := stream.Recv(); err != nil {
  168. grpclog.Fatalf("Streaming RPC failed to read: %v", err)
  169. }
  170. }
  171. }
  172. func parseHist(hist *stats.Histogram) {
  173. fmt.Println("qps:", float64(hist.Count)/float64(*duration))
  174. fmt.Printf("Latency: (50/90/99 %%ile): %v/%v/%v\n",
  175. time.Duration(median(.5, hist)),
  176. time.Duration(median(.9, hist)),
  177. time.Duration(median(.99, hist)))
  178. }
  179. func median(percentile float64, h *stats.Histogram) int64 {
  180. need := int64(float64(h.Count) * percentile)
  181. have := int64(0)
  182. for _, bucket := range h.Buckets {
  183. count := bucket.Count
  184. if have+count >= need {
  185. percent := float64(need-have) / float64(count)
  186. return int64((1.0-percent)*bucket.LowBound + percent*bucket.LowBound*(1.0+hopts.GrowthFactor))
  187. }
  188. have += bucket.Count
  189. }
  190. panic("should have found a bound")
  191. }