You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 

607 lines
20 KiB

  1. /*
  2. *
  3. * Copyright 2017 gRPC authors.
  4. *
  5. * Licensed under the Apache License, Version 2.0 (the "License");
  6. * you may not use this file except in compliance with the License.
  7. * You may obtain a copy of the License at
  8. *
  9. * http://www.apache.org/licenses/LICENSE-2.0
  10. *
  11. * Unless required by applicable law or agreed to in writing, software
  12. * distributed under the License is distributed on an "AS IS" BASIS,
  13. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. * See the License for the specific language governing permissions and
  15. * limitations under the License.
  16. *
  17. */
  18. /*
  19. Package main provides benchmark with setting flags.
  20. An example to run some benchmarks with profiling enabled:
  21. go run benchmark/benchmain/main.go -benchtime=10s -workloads=all \
  22. -compression=on -maxConcurrentCalls=1 -trace=off \
  23. -reqSizeBytes=1,1048576 -respSizeBytes=1,1048576 -networkMode=Local \
  24. -cpuProfile=cpuProf -memProfile=memProf -memProfileRate=10000 -resultFile=result
  25. As a suggestion, when creating a branch, you can run this benchmark and save the result
  26. file "-resultFile=basePerf", and later when you at the middle of the work or finish the
  27. work, you can get the benchmark result and compare it with the base anytime.
  28. Assume there are two result files names as "basePerf" and "curPerf" created by adding
  29. -resultFile=basePerf and -resultFile=curPerf.
  30. To format the curPerf, run:
  31. go run benchmark/benchresult/main.go curPerf
  32. To observe how the performance changes based on a base result, run:
  33. go run benchmark/benchresult/main.go basePerf curPerf
  34. */
  35. package main
  36. import (
  37. "context"
  38. "encoding/gob"
  39. "errors"
  40. "flag"
  41. "fmt"
  42. "io"
  43. "io/ioutil"
  44. "log"
  45. "net"
  46. "os"
  47. "reflect"
  48. "runtime"
  49. "runtime/pprof"
  50. "strconv"
  51. "strings"
  52. "sync"
  53. "sync/atomic"
  54. "testing"
  55. "time"
  56. "google.golang.org/grpc"
  57. bm "google.golang.org/grpc/benchmark"
  58. testpb "google.golang.org/grpc/benchmark/grpc_testing"
  59. "google.golang.org/grpc/benchmark/latency"
  60. "google.golang.org/grpc/benchmark/stats"
  61. "google.golang.org/grpc/grpclog"
  62. "google.golang.org/grpc/internal/channelz"
  63. "google.golang.org/grpc/test/bufconn"
  64. )
  65. const (
  66. modeOn = "on"
  67. modeOff = "off"
  68. modeBoth = "both"
  69. )
  70. var allCompressionModes = []string{modeOn, modeOff, modeBoth}
  71. var allTraceModes = []string{modeOn, modeOff, modeBoth}
  72. const (
  73. workloadsUnary = "unary"
  74. workloadsStreaming = "streaming"
  75. workloadsUnconstrained = "unconstrained"
  76. workloadsAll = "all"
  77. )
  78. var allWorkloads = []string{workloadsUnary, workloadsStreaming, workloadsUnconstrained, workloadsAll}
  79. var (
  80. runMode = []bool{true, true, true} // {runUnary, runStream, runUnconstrained}
  81. // When set the latency to 0 (no delay), the result is slower than the real result with no delay
  82. // because latency simulation section has extra operations
  83. ltc = []time.Duration{0, 40 * time.Millisecond} // if non-positive, no delay.
  84. kbps = []int{0, 10240} // if non-positive, infinite
  85. mtu = []int{0} // if non-positive, infinite
  86. maxConcurrentCalls = []int{1, 8, 64, 512}
  87. reqSizeBytes = []int{1, 1024, 1024 * 1024}
  88. respSizeBytes = []int{1, 1024, 1024 * 1024}
  89. enableTrace []bool
  90. benchtime time.Duration
  91. memProfile, cpuProfile string
  92. memProfileRate int
  93. enableCompressor []bool
  94. enableChannelz []bool
  95. networkMode string
  96. benchmarkResultFile string
  97. networks = map[string]latency.Network{
  98. "Local": latency.Local,
  99. "LAN": latency.LAN,
  100. "WAN": latency.WAN,
  101. "Longhaul": latency.Longhaul,
  102. }
  103. )
  104. func unaryBenchmark(startTimer func(), stopTimer func(uint64), benchFeatures stats.Features, benchtime time.Duration, s *stats.Stats) uint64 {
  105. caller, cleanup := makeFuncUnary(benchFeatures)
  106. defer cleanup()
  107. return runBenchmark(caller, startTimer, stopTimer, benchFeatures, benchtime, s)
  108. }
  109. func streamBenchmark(startTimer func(), stopTimer func(uint64), benchFeatures stats.Features, benchtime time.Duration, s *stats.Stats) uint64 {
  110. caller, cleanup := makeFuncStream(benchFeatures)
  111. defer cleanup()
  112. return runBenchmark(caller, startTimer, stopTimer, benchFeatures, benchtime, s)
  113. }
  114. func unconstrainedStreamBenchmark(benchFeatures stats.Features, warmuptime, benchtime time.Duration) (uint64, uint64) {
  115. sender, recver, cleanup := makeFuncUnconstrainedStream(benchFeatures)
  116. defer cleanup()
  117. var (
  118. wg sync.WaitGroup
  119. requestCount uint64
  120. responseCount uint64
  121. )
  122. wg.Add(2 * benchFeatures.MaxConcurrentCalls)
  123. // Resets the counters once warmed up
  124. go func() {
  125. <-time.NewTimer(warmuptime).C
  126. atomic.StoreUint64(&requestCount, 0)
  127. atomic.StoreUint64(&responseCount, 0)
  128. }()
  129. bmEnd := time.Now().Add(benchtime + warmuptime)
  130. for i := 0; i < benchFeatures.MaxConcurrentCalls; i++ {
  131. go func(pos int) {
  132. for {
  133. t := time.Now()
  134. if t.After(bmEnd) {
  135. break
  136. }
  137. sender(pos)
  138. atomic.AddUint64(&requestCount, 1)
  139. }
  140. wg.Done()
  141. }(i)
  142. go func(pos int) {
  143. for {
  144. t := time.Now()
  145. if t.After(bmEnd) {
  146. break
  147. }
  148. recver(pos)
  149. atomic.AddUint64(&responseCount, 1)
  150. }
  151. wg.Done()
  152. }(i)
  153. }
  154. wg.Wait()
  155. return requestCount, responseCount
  156. }
  157. func makeClient(benchFeatures stats.Features) (testpb.BenchmarkServiceClient, func()) {
  158. nw := &latency.Network{Kbps: benchFeatures.Kbps, Latency: benchFeatures.Latency, MTU: benchFeatures.Mtu}
  159. opts := []grpc.DialOption{}
  160. sopts := []grpc.ServerOption{}
  161. if benchFeatures.EnableCompressor {
  162. sopts = append(sopts,
  163. grpc.RPCCompressor(nopCompressor{}),
  164. grpc.RPCDecompressor(nopDecompressor{}),
  165. )
  166. opts = append(opts,
  167. grpc.WithCompressor(nopCompressor{}),
  168. grpc.WithDecompressor(nopDecompressor{}),
  169. )
  170. }
  171. sopts = append(sopts, grpc.MaxConcurrentStreams(uint32(benchFeatures.MaxConcurrentCalls+1)))
  172. opts = append(opts, grpc.WithInsecure())
  173. var lis net.Listener
  174. if *useBufconn {
  175. bcLis := bufconn.Listen(256 * 1024)
  176. lis = bcLis
  177. opts = append(opts, grpc.WithContextDialer(func(ctx context.Context, address string) (net.Conn, error) {
  178. return nw.ContextDialer(func(context.Context, string, string) (net.Conn, error) {
  179. return bcLis.Dial()
  180. })(ctx, "", "")
  181. }))
  182. } else {
  183. var err error
  184. lis, err = net.Listen("tcp", "localhost:0")
  185. if err != nil {
  186. grpclog.Fatalf("Failed to listen: %v", err)
  187. }
  188. opts = append(opts, grpc.WithContextDialer(func(ctx context.Context, address string) (net.Conn, error) {
  189. return nw.ContextDialer((&net.Dialer{}).DialContext)(ctx, "tcp", lis.Addr().String())
  190. }))
  191. }
  192. lis = nw.Listener(lis)
  193. stopper := bm.StartServer(bm.ServerInfo{Type: "protobuf", Listener: lis}, sopts...)
  194. conn := bm.NewClientConn("" /* target not used */, opts...)
  195. return testpb.NewBenchmarkServiceClient(conn), func() {
  196. conn.Close()
  197. stopper()
  198. }
  199. }
  200. func makeFuncUnary(benchFeatures stats.Features) (func(int), func()) {
  201. tc, cleanup := makeClient(benchFeatures)
  202. return func(int) {
  203. unaryCaller(tc, benchFeatures.ReqSizeBytes, benchFeatures.RespSizeBytes)
  204. }, cleanup
  205. }
  206. func makeFuncStream(benchFeatures stats.Features) (func(int), func()) {
  207. tc, cleanup := makeClient(benchFeatures)
  208. streams := make([]testpb.BenchmarkService_StreamingCallClient, benchFeatures.MaxConcurrentCalls)
  209. for i := 0; i < benchFeatures.MaxConcurrentCalls; i++ {
  210. stream, err := tc.StreamingCall(context.Background())
  211. if err != nil {
  212. grpclog.Fatalf("%v.StreamingCall(_) = _, %v", tc, err)
  213. }
  214. streams[i] = stream
  215. }
  216. return func(pos int) {
  217. streamCaller(streams[pos], benchFeatures.ReqSizeBytes, benchFeatures.RespSizeBytes)
  218. }, cleanup
  219. }
  220. func makeFuncUnconstrainedStream(benchFeatures stats.Features) (func(int), func(int), func()) {
  221. tc, cleanup := makeClient(benchFeatures)
  222. streams := make([]testpb.BenchmarkService_StreamingCallClient, benchFeatures.MaxConcurrentCalls)
  223. for i := 0; i < benchFeatures.MaxConcurrentCalls; i++ {
  224. stream, err := tc.UnconstrainedStreamingCall(context.Background())
  225. if err != nil {
  226. grpclog.Fatalf("%v.UnconstrainedStreamingCall(_) = _, %v", tc, err)
  227. }
  228. streams[i] = stream
  229. }
  230. pl := bm.NewPayload(testpb.PayloadType_COMPRESSABLE, benchFeatures.ReqSizeBytes)
  231. req := &testpb.SimpleRequest{
  232. ResponseType: pl.Type,
  233. ResponseSize: int32(benchFeatures.RespSizeBytes),
  234. Payload: pl,
  235. }
  236. return func(pos int) {
  237. streams[pos].Send(req)
  238. }, func(pos int) {
  239. streams[pos].Recv()
  240. }, cleanup
  241. }
  242. func unaryCaller(client testpb.BenchmarkServiceClient, reqSize, respSize int) {
  243. if err := bm.DoUnaryCall(client, reqSize, respSize); err != nil {
  244. grpclog.Fatalf("DoUnaryCall failed: %v", err)
  245. }
  246. }
  247. func streamCaller(stream testpb.BenchmarkService_StreamingCallClient, reqSize, respSize int) {
  248. if err := bm.DoStreamingRoundTrip(stream, reqSize, respSize); err != nil {
  249. grpclog.Fatalf("DoStreamingRoundTrip failed: %v", err)
  250. }
  251. }
  252. func runBenchmark(caller func(int), startTimer func(), stopTimer func(uint64), benchFeatures stats.Features, benchtime time.Duration, s *stats.Stats) uint64 {
  253. // Warm up connection.
  254. for i := 0; i < 10; i++ {
  255. caller(0)
  256. }
  257. // Run benchmark.
  258. startTimer()
  259. var (
  260. mu sync.Mutex
  261. wg sync.WaitGroup
  262. )
  263. wg.Add(benchFeatures.MaxConcurrentCalls)
  264. bmEnd := time.Now().Add(benchtime)
  265. var count uint64
  266. for i := 0; i < benchFeatures.MaxConcurrentCalls; i++ {
  267. go func(pos int) {
  268. for {
  269. t := time.Now()
  270. if t.After(bmEnd) {
  271. break
  272. }
  273. start := time.Now()
  274. caller(pos)
  275. elapse := time.Since(start)
  276. atomic.AddUint64(&count, 1)
  277. mu.Lock()
  278. s.Add(elapse)
  279. mu.Unlock()
  280. }
  281. wg.Done()
  282. }(i)
  283. }
  284. wg.Wait()
  285. stopTimer(count)
  286. return count
  287. }
  288. var useBufconn = flag.Bool("bufconn", false, "Use in-memory connection instead of system network I/O")
  289. // Initiate main function to get settings of features.
  290. func init() {
  291. var (
  292. workloads, traceMode, compressorMode, readLatency, channelzOn string
  293. readKbps, readMtu, readMaxConcurrentCalls intSliceType
  294. readReqSizeBytes, readRespSizeBytes intSliceType
  295. )
  296. flag.StringVar(&workloads, "workloads", workloadsAll,
  297. fmt.Sprintf("Workloads to execute - One of: %v", strings.Join(allWorkloads, ", ")))
  298. flag.StringVar(&traceMode, "trace", modeOff,
  299. fmt.Sprintf("Trace mode - One of: %v", strings.Join(allTraceModes, ", ")))
  300. flag.StringVar(&readLatency, "latency", "", "Simulated one-way network latency - may be a comma-separated list")
  301. flag.StringVar(&channelzOn, "channelz", modeOff, "whether channelz should be turned on")
  302. flag.DurationVar(&benchtime, "benchtime", time.Second, "Configures the amount of time to run each benchmark")
  303. flag.Var(&readKbps, "kbps", "Simulated network throughput (in kbps) - may be a comma-separated list")
  304. flag.Var(&readMtu, "mtu", "Simulated network MTU (Maximum Transmission Unit) - may be a comma-separated list")
  305. flag.Var(&readMaxConcurrentCalls, "maxConcurrentCalls", "Number of concurrent RPCs during benchmarks")
  306. flag.Var(&readReqSizeBytes, "reqSizeBytes", "Request size in bytes - may be a comma-separated list")
  307. flag.Var(&readRespSizeBytes, "respSizeBytes", "Response size in bytes - may be a comma-separated list")
  308. flag.StringVar(&memProfile, "memProfile", "", "Enables memory profiling output to the filename provided.")
  309. flag.IntVar(&memProfileRate, "memProfileRate", 512*1024, "Configures the memory profiling rate. \n"+
  310. "memProfile should be set before setting profile rate. To include every allocated block in the profile, "+
  311. "set MemProfileRate to 1. To turn off profiling entirely, set MemProfileRate to 0. 512 * 1024 by default.")
  312. flag.StringVar(&cpuProfile, "cpuProfile", "", "Enables CPU profiling output to the filename provided")
  313. flag.StringVar(&compressorMode, "compression", modeOff,
  314. fmt.Sprintf("Compression mode - One of: %v", strings.Join(allCompressionModes, ", ")))
  315. flag.StringVar(&benchmarkResultFile, "resultFile", "", "Save the benchmark result into a binary file")
  316. flag.StringVar(&networkMode, "networkMode", "", "Network mode includes LAN, WAN, Local and Longhaul")
  317. flag.Parse()
  318. if flag.NArg() != 0 {
  319. log.Fatal("Error: unparsed arguments: ", flag.Args())
  320. }
  321. switch workloads {
  322. case workloadsUnary:
  323. runMode[0] = true
  324. runMode[1] = false
  325. runMode[2] = false
  326. case workloadsStreaming:
  327. runMode[0] = false
  328. runMode[1] = true
  329. runMode[2] = false
  330. case workloadsUnconstrained:
  331. runMode[0] = false
  332. runMode[1] = false
  333. runMode[2] = true
  334. case workloadsAll:
  335. runMode[0] = true
  336. runMode[1] = true
  337. runMode[2] = true
  338. default:
  339. log.Fatalf("Unknown workloads setting: %v (want one of: %v)",
  340. workloads, strings.Join(allWorkloads, ", "))
  341. }
  342. enableCompressor = setMode(compressorMode)
  343. enableTrace = setMode(traceMode)
  344. enableChannelz = setMode(channelzOn)
  345. // Time input formats as (time + unit).
  346. readTimeFromInput(&ltc, readLatency)
  347. readIntFromIntSlice(&kbps, readKbps)
  348. readIntFromIntSlice(&mtu, readMtu)
  349. readIntFromIntSlice(&maxConcurrentCalls, readMaxConcurrentCalls)
  350. readIntFromIntSlice(&reqSizeBytes, readReqSizeBytes)
  351. readIntFromIntSlice(&respSizeBytes, readRespSizeBytes)
  352. // Re-write latency, kpbs and mtu if network mode is set.
  353. if network, ok := networks[networkMode]; ok {
  354. ltc = []time.Duration{network.Latency}
  355. kbps = []int{network.Kbps}
  356. mtu = []int{network.MTU}
  357. }
  358. }
  359. func setMode(name string) []bool {
  360. switch name {
  361. case modeOn:
  362. return []bool{true}
  363. case modeOff:
  364. return []bool{false}
  365. case modeBoth:
  366. return []bool{false, true}
  367. default:
  368. log.Fatalf("Unknown %s setting: %v (want one of: %v)",
  369. name, name, strings.Join(allCompressionModes, ", "))
  370. return []bool{}
  371. }
  372. }
  373. type intSliceType []int
  374. func (intSlice *intSliceType) String() string {
  375. return fmt.Sprintf("%v", *intSlice)
  376. }
  377. func (intSlice *intSliceType) Set(value string) error {
  378. if len(*intSlice) > 0 {
  379. return errors.New("interval flag already set")
  380. }
  381. for _, num := range strings.Split(value, ",") {
  382. next, err := strconv.Atoi(num)
  383. if err != nil {
  384. return err
  385. }
  386. *intSlice = append(*intSlice, next)
  387. }
  388. return nil
  389. }
  390. func readIntFromIntSlice(values *[]int, replace intSliceType) {
  391. // If not set replace in the flag, just return to run the default settings.
  392. if len(replace) == 0 {
  393. return
  394. }
  395. *values = replace
  396. }
  397. func readTimeFromInput(values *[]time.Duration, replace string) {
  398. if strings.Compare(replace, "") != 0 {
  399. *values = []time.Duration{}
  400. for _, ltc := range strings.Split(replace, ",") {
  401. duration, err := time.ParseDuration(ltc)
  402. if err != nil {
  403. log.Fatal(err.Error())
  404. }
  405. *values = append(*values, duration)
  406. }
  407. }
  408. }
  409. func printThroughput(requestCount uint64, requestSize int, responseCount uint64, responseSize int) {
  410. requestThroughput := float64(requestCount) * float64(requestSize) * 8 / benchtime.Seconds()
  411. responseThroughput := float64(responseCount) * float64(responseSize) * 8 / benchtime.Seconds()
  412. fmt.Printf("Number of requests: %v\tRequest throughput: %v bit/s\n", requestCount, requestThroughput)
  413. fmt.Printf("Number of responses: %v\tResponse throughput: %v bit/s\n", responseCount, responseThroughput)
  414. fmt.Println()
  415. }
  416. func main() {
  417. before()
  418. featuresPos := make([]int, 9)
  419. // 0:enableTracing 1:ltc 2:kbps 3:mtu 4:maxC 5:reqSize 6:respSize
  420. featuresNum := []int{len(enableTrace), len(ltc), len(kbps), len(mtu),
  421. len(maxConcurrentCalls), len(reqSizeBytes), len(respSizeBytes), len(enableCompressor), len(enableChannelz)}
  422. initalPos := make([]int, len(featuresPos))
  423. s := stats.NewStats(10)
  424. s.SortLatency()
  425. var memStats runtime.MemStats
  426. var results testing.BenchmarkResult
  427. var startAllocs, startBytes uint64
  428. var startTime time.Time
  429. start := true
  430. var startTimer = func() {
  431. runtime.ReadMemStats(&memStats)
  432. startAllocs = memStats.Mallocs
  433. startBytes = memStats.TotalAlloc
  434. startTime = time.Now()
  435. }
  436. var stopTimer = func(count uint64) {
  437. runtime.ReadMemStats(&memStats)
  438. results = testing.BenchmarkResult{N: int(count), T: time.Since(startTime),
  439. Bytes: 0, MemAllocs: memStats.Mallocs - startAllocs, MemBytes: memStats.TotalAlloc - startBytes}
  440. }
  441. sharedPos := make([]bool, len(featuresPos))
  442. for i := 0; i < len(featuresPos); i++ {
  443. if featuresNum[i] <= 1 {
  444. sharedPos[i] = true
  445. }
  446. }
  447. // Run benchmarks
  448. resultSlice := []stats.BenchResults{}
  449. for !reflect.DeepEqual(featuresPos, initalPos) || start {
  450. start = false
  451. benchFeature := stats.Features{
  452. NetworkMode: networkMode,
  453. EnableTrace: enableTrace[featuresPos[0]],
  454. Latency: ltc[featuresPos[1]],
  455. Kbps: kbps[featuresPos[2]],
  456. Mtu: mtu[featuresPos[3]],
  457. MaxConcurrentCalls: maxConcurrentCalls[featuresPos[4]],
  458. ReqSizeBytes: reqSizeBytes[featuresPos[5]],
  459. RespSizeBytes: respSizeBytes[featuresPos[6]],
  460. EnableCompressor: enableCompressor[featuresPos[7]],
  461. EnableChannelz: enableChannelz[featuresPos[8]],
  462. }
  463. grpc.EnableTracing = enableTrace[featuresPos[0]]
  464. if enableChannelz[featuresPos[8]] {
  465. channelz.TurnOn()
  466. }
  467. if runMode[0] {
  468. count := unaryBenchmark(startTimer, stopTimer, benchFeature, benchtime, s)
  469. s.SetBenchmarkResult("Unary", benchFeature, results.N,
  470. results.AllocedBytesPerOp(), results.AllocsPerOp(), sharedPos)
  471. fmt.Println(s.BenchString())
  472. fmt.Println(s.String())
  473. printThroughput(count, benchFeature.ReqSizeBytes, count, benchFeature.RespSizeBytes)
  474. resultSlice = append(resultSlice, s.GetBenchmarkResults())
  475. s.Clear()
  476. }
  477. if runMode[1] {
  478. count := streamBenchmark(startTimer, stopTimer, benchFeature, benchtime, s)
  479. s.SetBenchmarkResult("Stream", benchFeature, results.N,
  480. results.AllocedBytesPerOp(), results.AllocsPerOp(), sharedPos)
  481. fmt.Println(s.BenchString())
  482. fmt.Println(s.String())
  483. printThroughput(count, benchFeature.ReqSizeBytes, count, benchFeature.RespSizeBytes)
  484. resultSlice = append(resultSlice, s.GetBenchmarkResults())
  485. s.Clear()
  486. }
  487. if runMode[2] {
  488. requestCount, responseCount := unconstrainedStreamBenchmark(benchFeature, time.Second, benchtime)
  489. fmt.Printf("Unconstrained Stream-%v\n", benchFeature)
  490. printThroughput(requestCount, benchFeature.ReqSizeBytes, responseCount, benchFeature.RespSizeBytes)
  491. }
  492. bm.AddOne(featuresPos, featuresNum)
  493. }
  494. after(resultSlice)
  495. }
  496. func before() {
  497. if memProfile != "" {
  498. runtime.MemProfileRate = memProfileRate
  499. }
  500. if cpuProfile != "" {
  501. f, err := os.Create(cpuProfile)
  502. if err != nil {
  503. fmt.Fprintf(os.Stderr, "testing: %s\n", err)
  504. return
  505. }
  506. if err := pprof.StartCPUProfile(f); err != nil {
  507. fmt.Fprintf(os.Stderr, "testing: can't start cpu profile: %s\n", err)
  508. f.Close()
  509. return
  510. }
  511. }
  512. }
  513. func after(data []stats.BenchResults) {
  514. if cpuProfile != "" {
  515. pprof.StopCPUProfile() // flushes profile to disk
  516. }
  517. if memProfile != "" {
  518. f, err := os.Create(memProfile)
  519. if err != nil {
  520. fmt.Fprintf(os.Stderr, "testing: %s\n", err)
  521. os.Exit(2)
  522. }
  523. runtime.GC() // materialize all statistics
  524. if err = pprof.WriteHeapProfile(f); err != nil {
  525. fmt.Fprintf(os.Stderr, "testing: can't write heap profile %s: %s\n", memProfile, err)
  526. os.Exit(2)
  527. }
  528. f.Close()
  529. }
  530. if benchmarkResultFile != "" {
  531. f, err := os.Create(benchmarkResultFile)
  532. if err != nil {
  533. log.Fatalf("testing: can't write benchmark result %s: %s\n", benchmarkResultFile, err)
  534. }
  535. dataEncoder := gob.NewEncoder(f)
  536. dataEncoder.Encode(data)
  537. f.Close()
  538. }
  539. }
  540. // nopCompressor is a compressor that just copies data.
  541. type nopCompressor struct{}
  542. func (nopCompressor) Do(w io.Writer, p []byte) error {
  543. n, err := w.Write(p)
  544. if err != nil {
  545. return err
  546. }
  547. if n != len(p) {
  548. return fmt.Errorf("nopCompressor.Write: wrote %v bytes; want %v", n, len(p))
  549. }
  550. return nil
  551. }
  552. func (nopCompressor) Type() string { return "nop" }
  553. // nopDecompressor is a decompressor that just copies data.
  554. type nopDecompressor struct{}
  555. func (nopDecompressor) Do(r io.Reader) ([]byte, error) { return ioutil.ReadAll(r) }
  556. func (nopDecompressor) Type() string { return "nop" }