You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 

1500 lines
62 KiB

  1. // Copyright 2015 The Prometheus Authors
  2. // Licensed under the Apache License, Version 2.0 (the "License");
  3. // you may not use this file except in compliance with the License.
  4. // You may obtain a copy of the License at
  5. //
  6. // http://www.apache.org/licenses/LICENSE-2.0
  7. //
  8. // Unless required by applicable law or agreed to in writing, software
  9. // distributed under the License is distributed on an "AS IS" BASIS,
  10. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  11. // See the License for the specific language governing permissions and
  12. // limitations under the License.
  13. package prometheus
  14. import (
  15. "fmt"
  16. "math"
  17. "runtime"
  18. "sort"
  19. "sync"
  20. "sync/atomic"
  21. "time"
  22. dto "github.com/prometheus/client_model/go"
  23. "google.golang.org/protobuf/proto"
  24. )
  25. // nativeHistogramBounds for the frac of observed values. Only relevant for
  26. // schema > 0. The position in the slice is the schema. (0 is never used, just
  27. // here for convenience of using the schema directly as the index.)
  28. //
  29. // TODO(beorn7): Currently, we do a binary search into these slices. There are
  30. // ways to turn it into a small number of simple array lookups. It probably only
  31. // matters for schema 5 and beyond, but should be investigated. See this comment
  32. // as a starting point:
  33. // https://github.com/open-telemetry/opentelemetry-specification/issues/1776#issuecomment-870164310
  34. var nativeHistogramBounds = [][]float64{
  35. // Schema "0":
  36. {0.5},
  37. // Schema 1:
  38. {0.5, 0.7071067811865475},
  39. // Schema 2:
  40. {0.5, 0.5946035575013605, 0.7071067811865475, 0.8408964152537144},
  41. // Schema 3:
  42. {
  43. 0.5, 0.5452538663326288, 0.5946035575013605, 0.6484197773255048,
  44. 0.7071067811865475, 0.7711054127039704, 0.8408964152537144, 0.9170040432046711,
  45. },
  46. // Schema 4:
  47. {
  48. 0.5, 0.5221368912137069, 0.5452538663326288, 0.5693943173783458,
  49. 0.5946035575013605, 0.620928906036742, 0.6484197773255048, 0.6771277734684463,
  50. 0.7071067811865475, 0.7384130729697496, 0.7711054127039704, 0.805245165974627,
  51. 0.8408964152537144, 0.8781260801866495, 0.9170040432046711, 0.9576032806985735,
  52. },
  53. // Schema 5:
  54. {
  55. 0.5, 0.5109485743270583, 0.5221368912137069, 0.5335702003384117,
  56. 0.5452538663326288, 0.5571933712979462, 0.5693943173783458, 0.5818624293887887,
  57. 0.5946035575013605, 0.6076236799902344, 0.620928906036742, 0.6345254785958666,
  58. 0.6484197773255048, 0.6626183215798706, 0.6771277734684463, 0.6919549409819159,
  59. 0.7071067811865475, 0.7225904034885232, 0.7384130729697496, 0.7545822137967112,
  60. 0.7711054127039704, 0.7879904225539431, 0.805245165974627, 0.8228777390769823,
  61. 0.8408964152537144, 0.8593096490612387, 0.8781260801866495, 0.8973545375015533,
  62. 0.9170040432046711, 0.9370838170551498, 0.9576032806985735, 0.9785720620876999,
  63. },
  64. // Schema 6:
  65. {
  66. 0.5, 0.5054446430258502, 0.5109485743270583, 0.5165124395106142,
  67. 0.5221368912137069, 0.5278225891802786, 0.5335702003384117, 0.5393803988785598,
  68. 0.5452538663326288, 0.5511912916539204, 0.5571933712979462, 0.5632608093041209,
  69. 0.5693943173783458, 0.5755946149764913, 0.5818624293887887, 0.5881984958251406,
  70. 0.5946035575013605, 0.6010783657263515, 0.6076236799902344, 0.6142402680534349,
  71. 0.620928906036742, 0.6276903785123455, 0.6345254785958666, 0.6414350080393891,
  72. 0.6484197773255048, 0.6554806057623822, 0.6626183215798706, 0.6698337620266515,
  73. 0.6771277734684463, 0.6845012114872953, 0.6919549409819159, 0.6994898362691555,
  74. 0.7071067811865475, 0.7148066691959849, 0.7225904034885232, 0.7304588970903234,
  75. 0.7384130729697496, 0.7464538641456323, 0.7545822137967112, 0.762799075372269,
  76. 0.7711054127039704, 0.7795022001189185, 0.7879904225539431, 0.7965710756711334,
  77. 0.805245165974627, 0.8140137109286738, 0.8228777390769823, 0.8318382901633681,
  78. 0.8408964152537144, 0.8500531768592616, 0.8593096490612387, 0.8686669176368529,
  79. 0.8781260801866495, 0.8876882462632604, 0.8973545375015533, 0.9071260877501991,
  80. 0.9170040432046711, 0.9269895625416926, 0.9370838170551498, 0.9472879907934827,
  81. 0.9576032806985735, 0.9680308967461471, 0.9785720620876999, 0.9892280131939752,
  82. },
  83. // Schema 7:
  84. {
  85. 0.5, 0.5027149505564014, 0.5054446430258502, 0.5081891574554764,
  86. 0.5109485743270583, 0.5137229745593818, 0.5165124395106142, 0.5193170509806894,
  87. 0.5221368912137069, 0.5249720429003435, 0.5278225891802786, 0.5306886136446309,
  88. 0.5335702003384117, 0.5364674337629877, 0.5393803988785598, 0.5423091811066545,
  89. 0.5452538663326288, 0.5482145409081883, 0.5511912916539204, 0.5541842058618393,
  90. 0.5571933712979462, 0.5602188762048033, 0.5632608093041209, 0.5663192597993595,
  91. 0.5693943173783458, 0.572486072215902, 0.5755946149764913, 0.5787200368168754,
  92. 0.5818624293887887, 0.585021884841625, 0.5881984958251406, 0.5913923554921704,
  93. 0.5946035575013605, 0.5978321960199137, 0.6010783657263515, 0.6043421618132907,
  94. 0.6076236799902344, 0.6109230164863786, 0.6142402680534349, 0.6175755319684665,
  95. 0.620928906036742, 0.6243004885946023, 0.6276903785123455, 0.6310986751971253,
  96. 0.6345254785958666, 0.637970889198196, 0.6414350080393891, 0.6449179367033329,
  97. 0.6484197773255048, 0.6519406325959679, 0.6554806057623822, 0.659039800633032,
  98. 0.6626183215798706, 0.6662162735415805, 0.6698337620266515, 0.6734708931164728,
  99. 0.6771277734684463, 0.6808045103191123, 0.6845012114872953, 0.688217985377265,
  100. 0.6919549409819159, 0.6957121878859629, 0.6994898362691555, 0.7032879969095076,
  101. 0.7071067811865475, 0.7109463010845827, 0.7148066691959849, 0.718687998724491,
  102. 0.7225904034885232, 0.7265139979245261, 0.7304588970903234, 0.7344252166684908,
  103. 0.7384130729697496, 0.7424225829363761, 0.7464538641456323, 0.7505070348132126,
  104. 0.7545822137967112, 0.7586795205991071, 0.762799075372269, 0.7669409989204777,
  105. 0.7711054127039704, 0.7752924388424999, 0.7795022001189185, 0.7837348199827764,
  106. 0.7879904225539431, 0.7922691326262467, 0.7965710756711334, 0.8008963778413465,
  107. 0.805245165974627, 0.8096175675974316, 0.8140137109286738, 0.8184337248834821,
  108. 0.8228777390769823, 0.8273458838280969, 0.8318382901633681, 0.8363550898207981,
  109. 0.8408964152537144, 0.8454623996346523, 0.8500531768592616, 0.8546688815502312,
  110. 0.8593096490612387, 0.8639756154809185, 0.8686669176368529, 0.8733836930995842,
  111. 0.8781260801866495, 0.8828942179666361, 0.8876882462632604, 0.8925083056594671,
  112. 0.8973545375015533, 0.9022270839033115, 0.9071260877501991, 0.9120516927035263,
  113. 0.9170040432046711, 0.9219832844793128, 0.9269895625416926, 0.9320230241988943,
  114. 0.9370838170551498, 0.9421720895161669, 0.9472879907934827, 0.9524316709088368,
  115. 0.9576032806985735, 0.9628029718180622, 0.9680308967461471, 0.9732872087896164,
  116. 0.9785720620876999, 0.9838856116165875, 0.9892280131939752, 0.9945994234836328,
  117. },
  118. // Schema 8:
  119. {
  120. 0.5, 0.5013556375251013, 0.5027149505564014, 0.5040779490592088,
  121. 0.5054446430258502, 0.5068150424757447, 0.5081891574554764, 0.509566998038869,
  122. 0.5109485743270583, 0.5123338964485679, 0.5137229745593818, 0.5151158188430205,
  123. 0.5165124395106142, 0.5179128468009786, 0.5193170509806894, 0.520725062344158,
  124. 0.5221368912137069, 0.5235525479396449, 0.5249720429003435, 0.526395386502313,
  125. 0.5278225891802786, 0.5292536613972564, 0.5306886136446309, 0.5321274564422321,
  126. 0.5335702003384117, 0.5350168559101208, 0.5364674337629877, 0.5379219445313954,
  127. 0.5393803988785598, 0.5408428074966075, 0.5423091811066545, 0.5437795304588847,
  128. 0.5452538663326288, 0.5467321995364429, 0.5482145409081883, 0.549700901315111,
  129. 0.5511912916539204, 0.5526857228508706, 0.5541842058618393, 0.5556867516724088,
  130. 0.5571933712979462, 0.5587040757836845, 0.5602188762048033, 0.5617377836665098,
  131. 0.5632608093041209, 0.564787964283144, 0.5663192597993595, 0.5678547070789026,
  132. 0.5693943173783458, 0.5709381019847808, 0.572486072215902, 0.5740382394200894,
  133. 0.5755946149764913, 0.5771552102951081, 0.5787200368168754, 0.5802891060137493,
  134. 0.5818624293887887, 0.5834400184762408, 0.585021884841625, 0.5866080400818185,
  135. 0.5881984958251406, 0.5897932637314379, 0.5913923554921704, 0.5929957828304968,
  136. 0.5946035575013605, 0.5962156912915756, 0.5978321960199137, 0.5994530835371903,
  137. 0.6010783657263515, 0.6027080545025619, 0.6043421618132907, 0.6059806996384005,
  138. 0.6076236799902344, 0.6092711149137041, 0.6109230164863786, 0.6125793968185725,
  139. 0.6142402680534349, 0.6159056423670379, 0.6175755319684665, 0.6192499490999082,
  140. 0.620928906036742, 0.622612415087629, 0.6243004885946023, 0.6259931389331581,
  141. 0.6276903785123455, 0.6293922197748583, 0.6310986751971253, 0.6328097572894031,
  142. 0.6345254785958666, 0.6362458516947014, 0.637970889198196, 0.6397006037528346,
  143. 0.6414350080393891, 0.6431741147730128, 0.6449179367033329, 0.6466664866145447,
  144. 0.6484197773255048, 0.6501778216898253, 0.6519406325959679, 0.6537082229673385,
  145. 0.6554806057623822, 0.6572577939746774, 0.659039800633032, 0.6608266388015788,
  146. 0.6626183215798706, 0.6644148621029772, 0.6662162735415805, 0.6680225691020727,
  147. 0.6698337620266515, 0.6716498655934177, 0.6734708931164728, 0.6752968579460171,
  148. 0.6771277734684463, 0.6789636531064505, 0.6808045103191123, 0.6826503586020058,
  149. 0.6845012114872953, 0.6863570825438342, 0.688217985377265, 0.690083933630119,
  150. 0.6919549409819159, 0.6938310211492645, 0.6957121878859629, 0.6975984549830999,
  151. 0.6994898362691555, 0.7013863456101023, 0.7032879969095076, 0.7051948041086352,
  152. 0.7071067811865475, 0.7090239421602076, 0.7109463010845827, 0.7128738720527471,
  153. 0.7148066691959849, 0.7167447066838943, 0.718687998724491, 0.7206365595643126,
  154. 0.7225904034885232, 0.7245495448210174, 0.7265139979245261, 0.7284837772007218,
  155. 0.7304588970903234, 0.7324393720732029, 0.7344252166684908, 0.7364164454346837,
  156. 0.7384130729697496, 0.7404151139112358, 0.7424225829363761, 0.7444354947621984,
  157. 0.7464538641456323, 0.7484777058836176, 0.7505070348132126, 0.7525418658117031,
  158. 0.7545822137967112, 0.7566280937263048, 0.7586795205991071, 0.7607365094544071,
  159. 0.762799075372269, 0.7648672334736434, 0.7669409989204777, 0.7690203869158282,
  160. 0.7711054127039704, 0.7731960915705107, 0.7752924388424999, 0.7773944698885442,
  161. 0.7795022001189185, 0.7816156449856788, 0.7837348199827764, 0.7858597406461707,
  162. 0.7879904225539431, 0.7901268813264122, 0.7922691326262467, 0.7944171921585818,
  163. 0.7965710756711334, 0.7987307989543135, 0.8008963778413465, 0.8030678282083853,
  164. 0.805245165974627, 0.8074284071024302, 0.8096175675974316, 0.8118126635086642,
  165. 0.8140137109286738, 0.8162207259936375, 0.8184337248834821, 0.820652723822003,
  166. 0.8228777390769823, 0.8251087869603088, 0.8273458838280969, 0.8295890460808079,
  167. 0.8318382901633681, 0.8340936325652911, 0.8363550898207981, 0.8386226785089391,
  168. 0.8408964152537144, 0.8431763167241966, 0.8454623996346523, 0.8477546807446661,
  169. 0.8500531768592616, 0.8523579048290255, 0.8546688815502312, 0.8569861239649629,
  170. 0.8593096490612387, 0.8616394738731368, 0.8639756154809185, 0.8663180910111553,
  171. 0.8686669176368529, 0.871022112577578, 0.8733836930995842, 0.8757516765159389,
  172. 0.8781260801866495, 0.8805069215187917, 0.8828942179666361, 0.8852879870317771,
  173. 0.8876882462632604, 0.890095013257712, 0.8925083056594671, 0.8949281411607002,
  174. 0.8973545375015533, 0.8997875124702672, 0.9022270839033115, 0.9046732696855155,
  175. 0.9071260877501991, 0.909585556079304, 0.9120516927035263, 0.9145245157024483,
  176. 0.9170040432046711, 0.9194902933879467, 0.9219832844793128, 0.9244830347552253,
  177. 0.9269895625416926, 0.92950288621441, 0.9320230241988943, 0.9345499949706191,
  178. 0.9370838170551498, 0.93962450902828, 0.9421720895161669, 0.9447265771954693,
  179. 0.9472879907934827, 0.9498563490882775, 0.9524316709088368, 0.9550139751351947,
  180. 0.9576032806985735, 0.9601996065815236, 0.9628029718180622, 0.9654133954938133,
  181. 0.9680308967461471, 0.9706554947643201, 0.9732872087896164, 0.9759260581154889,
  182. 0.9785720620876999, 0.9812252401044634, 0.9838856116165875, 0.9865531961276168,
  183. 0.9892280131939752, 0.9919100824251095, 0.9945994234836328, 0.9972960560854698,
  184. },
  185. }
  186. // The nativeHistogramBounds above can be generated with the code below.
  187. //
  188. // TODO(beorn7): It's tempting to actually use `go generate` to generate the
  189. // code above. However, this could lead to slightly different numbers on
  190. // different architectures. We still need to come to terms if we are fine with
  191. // that, or if we might prefer to specify precise numbers in the standard.
  192. //
  193. // var nativeHistogramBounds [][]float64 = make([][]float64, 9)
  194. //
  195. // func init() {
  196. // // Populate nativeHistogramBounds.
  197. // numBuckets := 1
  198. // for i := range nativeHistogramBounds {
  199. // bounds := []float64{0.5}
  200. // factor := math.Exp2(math.Exp2(float64(-i)))
  201. // for j := 0; j < numBuckets-1; j++ {
  202. // var bound float64
  203. // if (j+1)%2 == 0 {
  204. // // Use previously calculated value for increased precision.
  205. // bound = nativeHistogramBounds[i-1][j/2+1]
  206. // } else {
  207. // bound = bounds[j] * factor
  208. // }
  209. // bounds = append(bounds, bound)
  210. // }
  211. // numBuckets *= 2
  212. // nativeHistogramBounds[i] = bounds
  213. // }
  214. // }
  215. // A Histogram counts individual observations from an event or sample stream in
  216. // configurable static buckets (or in dynamic sparse buckets as part of the
  217. // experimental Native Histograms, see below for more details). Similar to a
  218. // Summary, it also provides a sum of observations and an observation count.
  219. //
  220. // On the Prometheus server, quantiles can be calculated from a Histogram using
  221. // the histogram_quantile PromQL function.
  222. //
  223. // Note that Histograms, in contrast to Summaries, can be aggregated in PromQL
  224. // (see the documentation for detailed procedures). However, Histograms require
  225. // the user to pre-define suitable buckets, and they are in general less
  226. // accurate. (Both problems are addressed by the experimental Native
  227. // Histograms. To use them, configure a NativeHistogramBucketFactor in the
  228. // HistogramOpts. They also require a Prometheus server v2.40+ with the
  229. // corresponding feature flag enabled.)
  230. //
  231. // The Observe method of a Histogram has a very low performance overhead in
  232. // comparison with the Observe method of a Summary.
  233. //
  234. // To create Histogram instances, use NewHistogram.
  235. type Histogram interface {
  236. Metric
  237. Collector
  238. // Observe adds a single observation to the histogram. Observations are
  239. // usually positive or zero. Negative observations are accepted but
  240. // prevent current versions of Prometheus from properly detecting
  241. // counter resets in the sum of observations. (The experimental Native
  242. // Histograms handle negative observations properly.) See
  243. // https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations
  244. // for details.
  245. Observe(float64)
  246. }
  247. // bucketLabel is used for the label that defines the upper bound of a
  248. // bucket of a histogram ("le" -> "less or equal").
  249. const bucketLabel = "le"
  250. // DefBuckets are the default Histogram buckets. The default buckets are
  251. // tailored to broadly measure the response time (in seconds) of a network
  252. // service. Most likely, however, you will be required to define buckets
  253. // customized to your use case.
  254. var DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
  255. // DefNativeHistogramZeroThreshold is the default value for
  256. // NativeHistogramZeroThreshold in the HistogramOpts.
  257. //
  258. // The value is 2^-128 (or 0.5*2^-127 in the actual IEEE 754 representation),
  259. // which is a bucket boundary at all possible resolutions.
  260. const DefNativeHistogramZeroThreshold = 2.938735877055719e-39
  261. // NativeHistogramZeroThresholdZero can be used as NativeHistogramZeroThreshold
  262. // in the HistogramOpts to create a zero bucket of width zero, i.e. a zero
  263. // bucket that only receives observations of precisely zero.
  264. const NativeHistogramZeroThresholdZero = -1
  265. var errBucketLabelNotAllowed = fmt.Errorf(
  266. "%q is not allowed as label name in histograms", bucketLabel,
  267. )
  268. // LinearBuckets creates 'count' regular buckets, each 'width' wide, where the
  269. // lowest bucket has an upper bound of 'start'. The final +Inf bucket is not
  270. // counted and not included in the returned slice. The returned slice is meant
  271. // to be used for the Buckets field of HistogramOpts.
  272. //
  273. // The function panics if 'count' is zero or negative.
  274. func LinearBuckets(start, width float64, count int) []float64 {
  275. if count < 1 {
  276. panic("LinearBuckets needs a positive count")
  277. }
  278. buckets := make([]float64, count)
  279. for i := range buckets {
  280. buckets[i] = start
  281. start += width
  282. }
  283. return buckets
  284. }
  285. // ExponentialBuckets creates 'count' regular buckets, where the lowest bucket
  286. // has an upper bound of 'start' and each following bucket's upper bound is
  287. // 'factor' times the previous bucket's upper bound. The final +Inf bucket is
  288. // not counted and not included in the returned slice. The returned slice is
  289. // meant to be used for the Buckets field of HistogramOpts.
  290. //
  291. // The function panics if 'count' is 0 or negative, if 'start' is 0 or negative,
  292. // or if 'factor' is less than or equal 1.
  293. func ExponentialBuckets(start, factor float64, count int) []float64 {
  294. if count < 1 {
  295. panic("ExponentialBuckets needs a positive count")
  296. }
  297. if start <= 0 {
  298. panic("ExponentialBuckets needs a positive start value")
  299. }
  300. if factor <= 1 {
  301. panic("ExponentialBuckets needs a factor greater than 1")
  302. }
  303. buckets := make([]float64, count)
  304. for i := range buckets {
  305. buckets[i] = start
  306. start *= factor
  307. }
  308. return buckets
  309. }
  310. // ExponentialBucketsRange creates 'count' buckets, where the lowest bucket is
  311. // 'min' and the highest bucket is 'max'. The final +Inf bucket is not counted
  312. // and not included in the returned slice. The returned slice is meant to be
  313. // used for the Buckets field of HistogramOpts.
  314. //
  315. // The function panics if 'count' is 0 or negative, if 'min' is 0 or negative.
  316. func ExponentialBucketsRange(min, max float64, count int) []float64 {
  317. if count < 1 {
  318. panic("ExponentialBucketsRange count needs a positive count")
  319. }
  320. if min <= 0 {
  321. panic("ExponentialBucketsRange min needs to be greater than 0")
  322. }
  323. // Formula for exponential buckets.
  324. // max = min*growthFactor^(bucketCount-1)
  325. // We know max/min and highest bucket. Solve for growthFactor.
  326. growthFactor := math.Pow(max/min, 1.0/float64(count-1))
  327. // Now that we know growthFactor, solve for each bucket.
  328. buckets := make([]float64, count)
  329. for i := 1; i <= count; i++ {
  330. buckets[i-1] = min * math.Pow(growthFactor, float64(i-1))
  331. }
  332. return buckets
  333. }
  334. // HistogramOpts bundles the options for creating a Histogram metric. It is
  335. // mandatory to set Name to a non-empty string. All other fields are optional
  336. // and can safely be left at their zero value, although it is strongly
  337. // encouraged to set a Help string.
  338. type HistogramOpts struct {
  339. // Namespace, Subsystem, and Name are components of the fully-qualified
  340. // name of the Histogram (created by joining these components with
  341. // "_"). Only Name is mandatory, the others merely help structuring the
  342. // name. Note that the fully-qualified name of the Histogram must be a
  343. // valid Prometheus metric name.
  344. Namespace string
  345. Subsystem string
  346. Name string
  347. // Help provides information about this Histogram.
  348. //
  349. // Metrics with the same fully-qualified name must have the same Help
  350. // string.
  351. Help string
  352. // ConstLabels are used to attach fixed labels to this metric. Metrics
  353. // with the same fully-qualified name must have the same label names in
  354. // their ConstLabels.
  355. //
  356. // ConstLabels are only used rarely. In particular, do not use them to
  357. // attach the same labels to all your metrics. Those use cases are
  358. // better covered by target labels set by the scraping Prometheus
  359. // server, or by one specific metric (e.g. a build_info or a
  360. // machine_role metric). See also
  361. // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
  362. ConstLabels Labels
  363. // Buckets defines the buckets into which observations are counted. Each
  364. // element in the slice is the upper inclusive bound of a bucket. The
  365. // values must be sorted in strictly increasing order. There is no need
  366. // to add a highest bucket with +Inf bound, it will be added
  367. // implicitly. If Buckets is left as nil or set to a slice of length
  368. // zero, it is replaced by default buckets. The default buckets are
  369. // DefBuckets if no buckets for a native histogram (see below) are used,
  370. // otherwise the default is no buckets. (In other words, if you want to
  371. // use both reguler buckets and buckets for a native histogram, you have
  372. // to define the regular buckets here explicitly.)
  373. Buckets []float64
  374. // If NativeHistogramBucketFactor is greater than one, so-called sparse
  375. // buckets are used (in addition to the regular buckets, if defined
  376. // above). A Histogram with sparse buckets will be ingested as a Native
  377. // Histogram by a Prometheus server with that feature enabled (requires
  378. // Prometheus v2.40+). Sparse buckets are exponential buckets covering
  379. // the whole float64 range (with the exception of the “zero” bucket, see
  380. // SparseBucketsZeroThreshold below). From any one bucket to the next,
  381. // the width of the bucket grows by a constant
  382. // factor. NativeHistogramBucketFactor provides an upper bound for this
  383. // factor (exception see below). The smaller
  384. // NativeHistogramBucketFactor, the more buckets will be used and thus
  385. // the more costly the histogram will become. A generally good trade-off
  386. // between cost and accuracy is a value of 1.1 (each bucket is at most
  387. // 10% wider than the previous one), which will result in each power of
  388. // two divided into 8 buckets (e.g. there will be 8 buckets between 1
  389. // and 2, same as between 2 and 4, and 4 and 8, etc.).
  390. //
  391. // Details about the actually used factor: The factor is calculated as
  392. // 2^(2^n), where n is an integer number between (and including) -8 and
  393. // 4. n is chosen so that the resulting factor is the largest that is
  394. // still smaller or equal to NativeHistogramBucketFactor. Note that the
  395. // smallest possible factor is therefore approx. 1.00271 (i.e. 2^(2^-8)
  396. // ). If NativeHistogramBucketFactor is greater than 1 but smaller than
  397. // 2^(2^-8), then the actually used factor is still 2^(2^-8) even though
  398. // it is larger than the provided NativeHistogramBucketFactor.
  399. //
  400. // NOTE: Native Histograms are still an experimental feature. Their
  401. // behavior might still change without a major version
  402. // bump. Subsequently, all NativeHistogram... options here might still
  403. // change their behavior or name (or might completely disappear) without
  404. // a major version bump.
  405. NativeHistogramBucketFactor float64
  406. // All observations with an absolute value of less or equal
  407. // NativeHistogramZeroThreshold are accumulated into a “zero”
  408. // bucket. For best results, this should be close to a bucket
  409. // boundary. This is usually the case if picking a power of two. If
  410. // NativeHistogramZeroThreshold is left at zero,
  411. // DefSparseBucketsZeroThreshold is used as the threshold. To configure
  412. // a zero bucket with an actual threshold of zero (i.e. only
  413. // observations of precisely zero will go into the zero bucket), set
  414. // NativeHistogramZeroThreshold to the NativeHistogramZeroThresholdZero
  415. // constant (or any negative float value).
  416. NativeHistogramZeroThreshold float64
  417. // The remaining fields define a strategy to limit the number of
  418. // populated sparse buckets. If NativeHistogramMaxBucketNumber is left
  419. // at zero, the number of buckets is not limited. (Note that this might
  420. // lead to unbounded memory consumption if the values observed by the
  421. // Histogram are sufficiently wide-spread. In particular, this could be
  422. // used as a DoS attack vector. Where the observed values depend on
  423. // external inputs, it is highly recommended to set a
  424. // NativeHistogramMaxBucketNumber.) Once the set
  425. // NativeHistogramMaxBucketNumber is exceeded, the following strategy is
  426. // enacted: First, if the last reset (or the creation) of the histogram
  427. // is at least NativeHistogramMinResetDuration ago, then the whole
  428. // histogram is reset to its initial state (including regular
  429. // buckets). If less time has passed, or if
  430. // NativeHistogramMinResetDuration is zero, no reset is
  431. // performed. Instead, the zero threshold is increased sufficiently to
  432. // reduce the number of buckets to or below
  433. // NativeHistogramMaxBucketNumber, but not to more than
  434. // NativeHistogramMaxZeroThreshold. Thus, if
  435. // NativeHistogramMaxZeroThreshold is already at or below the current
  436. // zero threshold, nothing happens at this step. After that, if the
  437. // number of buckets still exceeds NativeHistogramMaxBucketNumber, the
  438. // resolution of the histogram is reduced by doubling the width of the
  439. // sparse buckets (up to a growth factor between one bucket to the next
  440. // of 2^(2^4) = 65536, see above).
  441. NativeHistogramMaxBucketNumber uint32
  442. NativeHistogramMinResetDuration time.Duration
  443. NativeHistogramMaxZeroThreshold float64
  444. }
  445. // HistogramVecOpts bundles the options to create a HistogramVec metric.
  446. // It is mandatory to set HistogramOpts, see there for mandatory fields. VariableLabels
  447. // is optional and can safely be left to its default value.
  448. type HistogramVecOpts struct {
  449. HistogramOpts
  450. // VariableLabels are used to partition the metric vector by the given set
  451. // of labels. Each label value will be constrained with the optional Contraint
  452. // function, if provided.
  453. VariableLabels ConstrainableLabels
  454. }
  455. // NewHistogram creates a new Histogram based on the provided HistogramOpts. It
  456. // panics if the buckets in HistogramOpts are not in strictly increasing order.
  457. //
  458. // The returned implementation also implements ExemplarObserver. It is safe to
  459. // perform the corresponding type assertion. Exemplars are tracked separately
  460. // for each bucket.
  461. func NewHistogram(opts HistogramOpts) Histogram {
  462. return newHistogram(
  463. NewDesc(
  464. BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
  465. opts.Help,
  466. nil,
  467. opts.ConstLabels,
  468. ),
  469. opts,
  470. )
  471. }
  472. func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram {
  473. if len(desc.variableLabels) != len(labelValues) {
  474. panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), labelValues))
  475. }
  476. for _, n := range desc.variableLabels {
  477. if n.Name == bucketLabel {
  478. panic(errBucketLabelNotAllowed)
  479. }
  480. }
  481. for _, lp := range desc.constLabelPairs {
  482. if lp.GetName() == bucketLabel {
  483. panic(errBucketLabelNotAllowed)
  484. }
  485. }
  486. h := &histogram{
  487. desc: desc,
  488. upperBounds: opts.Buckets,
  489. labelPairs: MakeLabelPairs(desc, labelValues),
  490. nativeHistogramMaxBuckets: opts.NativeHistogramMaxBucketNumber,
  491. nativeHistogramMaxZeroThreshold: opts.NativeHistogramMaxZeroThreshold,
  492. nativeHistogramMinResetDuration: opts.NativeHistogramMinResetDuration,
  493. lastResetTime: time.Now(),
  494. now: time.Now,
  495. }
  496. if len(h.upperBounds) == 0 && opts.NativeHistogramBucketFactor <= 1 {
  497. h.upperBounds = DefBuckets
  498. }
  499. if opts.NativeHistogramBucketFactor <= 1 {
  500. h.nativeHistogramSchema = math.MinInt32 // To mark that there are no sparse buckets.
  501. } else {
  502. switch {
  503. case opts.NativeHistogramZeroThreshold > 0:
  504. h.nativeHistogramZeroThreshold = opts.NativeHistogramZeroThreshold
  505. case opts.NativeHistogramZeroThreshold == 0:
  506. h.nativeHistogramZeroThreshold = DefNativeHistogramZeroThreshold
  507. } // Leave h.nativeHistogramZeroThreshold at 0 otherwise.
  508. h.nativeHistogramSchema = pickSchema(opts.NativeHistogramBucketFactor)
  509. }
  510. for i, upperBound := range h.upperBounds {
  511. if i < len(h.upperBounds)-1 {
  512. if upperBound >= h.upperBounds[i+1] {
  513. panic(fmt.Errorf(
  514. "histogram buckets must be in increasing order: %f >= %f",
  515. upperBound, h.upperBounds[i+1],
  516. ))
  517. }
  518. } else {
  519. if math.IsInf(upperBound, +1) {
  520. // The +Inf bucket is implicit. Remove it here.
  521. h.upperBounds = h.upperBounds[:i]
  522. }
  523. }
  524. }
  525. // Finally we know the final length of h.upperBounds and can make buckets
  526. // for both counts as well as exemplars:
  527. h.counts[0] = &histogramCounts{buckets: make([]uint64, len(h.upperBounds))}
  528. atomic.StoreUint64(&h.counts[0].nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold))
  529. atomic.StoreInt32(&h.counts[0].nativeHistogramSchema, h.nativeHistogramSchema)
  530. h.counts[1] = &histogramCounts{buckets: make([]uint64, len(h.upperBounds))}
  531. atomic.StoreUint64(&h.counts[1].nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold))
  532. atomic.StoreInt32(&h.counts[1].nativeHistogramSchema, h.nativeHistogramSchema)
  533. h.exemplars = make([]atomic.Value, len(h.upperBounds)+1)
  534. h.init(h) // Init self-collection.
  535. return h
  536. }
  537. type histogramCounts struct {
  538. // Order in this struct matters for the alignment required by atomic
  539. // operations, see http://golang.org/pkg/sync/atomic/#pkg-note-BUG
  540. // sumBits contains the bits of the float64 representing the sum of all
  541. // observations.
  542. sumBits uint64
  543. count uint64
  544. // nativeHistogramZeroBucket counts all (positive and negative)
  545. // observations in the zero bucket (with an absolute value less or equal
  546. // the current threshold, see next field.
  547. nativeHistogramZeroBucket uint64
  548. // nativeHistogramZeroThresholdBits is the bit pattern of the current
  549. // threshold for the zero bucket. It's initially equal to
  550. // nativeHistogramZeroThreshold but may change according to the bucket
  551. // count limitation strategy.
  552. nativeHistogramZeroThresholdBits uint64
  553. // nativeHistogramSchema may change over time according to the bucket
  554. // count limitation strategy and therefore has to be saved here.
  555. nativeHistogramSchema int32
  556. // Number of (positive and negative) sparse buckets.
  557. nativeHistogramBucketsNumber uint32
  558. // Regular buckets.
  559. buckets []uint64
  560. // The sparse buckets for native histograms are implemented with a
  561. // sync.Map for now. A dedicated data structure will likely be more
  562. // efficient. There are separate maps for negative and positive
  563. // observations. The map's value is an *int64, counting observations in
  564. // that bucket. (Note that we don't use uint64 as an int64 won't
  565. // overflow in practice, and working with signed numbers from the
  566. // beginning simplifies the handling of deltas.) The map's key is the
  567. // index of the bucket according to the used
  568. // nativeHistogramSchema. Index 0 is for an upper bound of 1.
  569. nativeHistogramBucketsPositive, nativeHistogramBucketsNegative sync.Map
  570. }
  571. // observe manages the parts of observe that only affects
  572. // histogramCounts. doSparse is true if sparse buckets should be done,
  573. // too.
  574. func (hc *histogramCounts) observe(v float64, bucket int, doSparse bool) {
  575. if bucket < len(hc.buckets) {
  576. atomic.AddUint64(&hc.buckets[bucket], 1)
  577. }
  578. atomicAddFloat(&hc.sumBits, v)
  579. if doSparse && !math.IsNaN(v) {
  580. var (
  581. key int
  582. schema = atomic.LoadInt32(&hc.nativeHistogramSchema)
  583. zeroThreshold = math.Float64frombits(atomic.LoadUint64(&hc.nativeHistogramZeroThresholdBits))
  584. bucketCreated, isInf bool
  585. )
  586. if math.IsInf(v, 0) {
  587. // Pretend v is MaxFloat64 but later increment key by one.
  588. if math.IsInf(v, +1) {
  589. v = math.MaxFloat64
  590. } else {
  591. v = -math.MaxFloat64
  592. }
  593. isInf = true
  594. }
  595. frac, exp := math.Frexp(math.Abs(v))
  596. if schema > 0 {
  597. bounds := nativeHistogramBounds[schema]
  598. key = sort.SearchFloat64s(bounds, frac) + (exp-1)*len(bounds)
  599. } else {
  600. key = exp
  601. if frac == 0.5 {
  602. key--
  603. }
  604. div := 1 << -schema
  605. key = (key + div - 1) / div
  606. }
  607. if isInf {
  608. key++
  609. }
  610. switch {
  611. case v > zeroThreshold:
  612. bucketCreated = addToBucket(&hc.nativeHistogramBucketsPositive, key, 1)
  613. case v < -zeroThreshold:
  614. bucketCreated = addToBucket(&hc.nativeHistogramBucketsNegative, key, 1)
  615. default:
  616. atomic.AddUint64(&hc.nativeHistogramZeroBucket, 1)
  617. }
  618. if bucketCreated {
  619. atomic.AddUint32(&hc.nativeHistogramBucketsNumber, 1)
  620. }
  621. }
  622. // Increment count last as we take it as a signal that the observation
  623. // is complete.
  624. atomic.AddUint64(&hc.count, 1)
  625. }
  626. type histogram struct {
  627. // countAndHotIdx enables lock-free writes with use of atomic updates.
  628. // The most significant bit is the hot index [0 or 1] of the count field
  629. // below. Observe calls update the hot one. All remaining bits count the
  630. // number of Observe calls. Observe starts by incrementing this counter,
  631. // and finish by incrementing the count field in the respective
  632. // histogramCounts, as a marker for completion.
  633. //
  634. // Calls of the Write method (which are non-mutating reads from the
  635. // perspective of the histogram) swap the hot–cold under the writeMtx
  636. // lock. A cooldown is awaited (while locked) by comparing the number of
  637. // observations with the initiation count. Once they match, then the
  638. // last observation on the now cool one has completed. All cold fields must
  639. // be merged into the new hot before releasing writeMtx.
  640. //
  641. // Fields with atomic access first! See alignment constraint:
  642. // http://golang.org/pkg/sync/atomic/#pkg-note-BUG
  643. countAndHotIdx uint64
  644. selfCollector
  645. desc *Desc
  646. // Only used in the Write method and for sparse bucket management.
  647. mtx sync.Mutex
  648. // Two counts, one is "hot" for lock-free observations, the other is
  649. // "cold" for writing out a dto.Metric. It has to be an array of
  650. // pointers to guarantee 64bit alignment of the histogramCounts, see
  651. // http://golang.org/pkg/sync/atomic/#pkg-note-BUG.
  652. counts [2]*histogramCounts
  653. upperBounds []float64
  654. labelPairs []*dto.LabelPair
  655. exemplars []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar.
  656. nativeHistogramSchema int32 // The initial schema. Set to math.MinInt32 if no sparse buckets are used.
  657. nativeHistogramZeroThreshold float64 // The initial zero threshold.
  658. nativeHistogramMaxZeroThreshold float64
  659. nativeHistogramMaxBuckets uint32
  660. nativeHistogramMinResetDuration time.Duration
  661. lastResetTime time.Time // Protected by mtx.
  662. now func() time.Time // To mock out time.Now() for testing.
  663. }
  664. func (h *histogram) Desc() *Desc {
  665. return h.desc
  666. }
  667. func (h *histogram) Observe(v float64) {
  668. h.observe(v, h.findBucket(v))
  669. }
  670. func (h *histogram) ObserveWithExemplar(v float64, e Labels) {
  671. i := h.findBucket(v)
  672. h.observe(v, i)
  673. h.updateExemplar(v, i, e)
  674. }
  675. func (h *histogram) Write(out *dto.Metric) error {
  676. // For simplicity, we protect this whole method by a mutex. It is not in
  677. // the hot path, i.e. Observe is called much more often than Write. The
  678. // complication of making Write lock-free isn't worth it, if possible at
  679. // all.
  680. h.mtx.Lock()
  681. defer h.mtx.Unlock()
  682. // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0)
  683. // without touching the count bits. See the struct comments for a full
  684. // description of the algorithm.
  685. n := atomic.AddUint64(&h.countAndHotIdx, 1<<63)
  686. // count is contained unchanged in the lower 63 bits.
  687. count := n & ((1 << 63) - 1)
  688. // The most significant bit tells us which counts is hot. The complement
  689. // is thus the cold one.
  690. hotCounts := h.counts[n>>63]
  691. coldCounts := h.counts[(^n)>>63]
  692. waitForCooldown(count, coldCounts)
  693. his := &dto.Histogram{
  694. Bucket: make([]*dto.Bucket, len(h.upperBounds)),
  695. SampleCount: proto.Uint64(count),
  696. SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
  697. }
  698. out.Histogram = his
  699. out.Label = h.labelPairs
  700. var cumCount uint64
  701. for i, upperBound := range h.upperBounds {
  702. cumCount += atomic.LoadUint64(&coldCounts.buckets[i])
  703. his.Bucket[i] = &dto.Bucket{
  704. CumulativeCount: proto.Uint64(cumCount),
  705. UpperBound: proto.Float64(upperBound),
  706. }
  707. if e := h.exemplars[i].Load(); e != nil {
  708. his.Bucket[i].Exemplar = e.(*dto.Exemplar)
  709. }
  710. }
  711. // If there is an exemplar for the +Inf bucket, we have to add that bucket explicitly.
  712. if e := h.exemplars[len(h.upperBounds)].Load(); e != nil {
  713. b := &dto.Bucket{
  714. CumulativeCount: proto.Uint64(count),
  715. UpperBound: proto.Float64(math.Inf(1)),
  716. Exemplar: e.(*dto.Exemplar),
  717. }
  718. his.Bucket = append(his.Bucket, b)
  719. }
  720. if h.nativeHistogramSchema > math.MinInt32 {
  721. his.ZeroThreshold = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.nativeHistogramZeroThresholdBits)))
  722. his.Schema = proto.Int32(atomic.LoadInt32(&coldCounts.nativeHistogramSchema))
  723. zeroBucket := atomic.LoadUint64(&coldCounts.nativeHistogramZeroBucket)
  724. defer func() {
  725. coldCounts.nativeHistogramBucketsPositive.Range(addAndReset(&hotCounts.nativeHistogramBucketsPositive, &hotCounts.nativeHistogramBucketsNumber))
  726. coldCounts.nativeHistogramBucketsNegative.Range(addAndReset(&hotCounts.nativeHistogramBucketsNegative, &hotCounts.nativeHistogramBucketsNumber))
  727. }()
  728. his.ZeroCount = proto.Uint64(zeroBucket)
  729. his.NegativeSpan, his.NegativeDelta = makeBuckets(&coldCounts.nativeHistogramBucketsNegative)
  730. his.PositiveSpan, his.PositiveDelta = makeBuckets(&coldCounts.nativeHistogramBucketsPositive)
  731. }
  732. addAndResetCounts(hotCounts, coldCounts)
  733. return nil
  734. }
  735. // findBucket returns the index of the bucket for the provided value, or
  736. // len(h.upperBounds) for the +Inf bucket.
  737. func (h *histogram) findBucket(v float64) int {
  738. // TODO(beorn7): For small numbers of buckets (<30), a linear search is
  739. // slightly faster than the binary search. If we really care, we could
  740. // switch from one search strategy to the other depending on the number
  741. // of buckets.
  742. //
  743. // Microbenchmarks (BenchmarkHistogramNoLabels):
  744. // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op
  745. // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
  746. // 300 buckets: 154 ns/op linear - binary 61.6 ns/op
  747. return sort.SearchFloat64s(h.upperBounds, v)
  748. }
  749. // observe is the implementation for Observe without the findBucket part.
  750. func (h *histogram) observe(v float64, bucket int) {
  751. // Do not add to sparse buckets for NaN observations.
  752. doSparse := h.nativeHistogramSchema > math.MinInt32 && !math.IsNaN(v)
  753. // We increment h.countAndHotIdx so that the counter in the lower
  754. // 63 bits gets incremented. At the same time, we get the new value
  755. // back, which we can use to find the currently-hot counts.
  756. n := atomic.AddUint64(&h.countAndHotIdx, 1)
  757. hotCounts := h.counts[n>>63]
  758. hotCounts.observe(v, bucket, doSparse)
  759. if doSparse {
  760. h.limitBuckets(hotCounts, v, bucket)
  761. }
  762. }
  763. // limitSparsebuckets applies a strategy to limit the number of populated sparse
  764. // buckets. It's generally best effort, and there are situations where the
  765. // number can go higher (if even the lowest resolution isn't enough to reduce
  766. // the number sufficiently, or if the provided counts aren't fully updated yet
  767. // by a concurrently happening Write call).
  768. func (h *histogram) limitBuckets(counts *histogramCounts, value float64, bucket int) {
  769. if h.nativeHistogramMaxBuckets == 0 {
  770. return // No limit configured.
  771. }
  772. if h.nativeHistogramMaxBuckets >= atomic.LoadUint32(&counts.nativeHistogramBucketsNumber) {
  773. return // Bucket limit not exceeded yet.
  774. }
  775. h.mtx.Lock()
  776. defer h.mtx.Unlock()
  777. // The hot counts might have been swapped just before we acquired the
  778. // lock. Re-fetch the hot counts first...
  779. n := atomic.LoadUint64(&h.countAndHotIdx)
  780. hotIdx := n >> 63
  781. coldIdx := (^n) >> 63
  782. hotCounts := h.counts[hotIdx]
  783. coldCounts := h.counts[coldIdx]
  784. // ...and then check again if we really have to reduce the bucket count.
  785. if h.nativeHistogramMaxBuckets >= atomic.LoadUint32(&hotCounts.nativeHistogramBucketsNumber) {
  786. return // Bucket limit not exceeded after all.
  787. }
  788. // Try the various strategies in order.
  789. if h.maybeReset(hotCounts, coldCounts, coldIdx, value, bucket) {
  790. return
  791. }
  792. if h.maybeWidenZeroBucket(hotCounts, coldCounts) {
  793. return
  794. }
  795. h.doubleBucketWidth(hotCounts, coldCounts)
  796. }
  797. // maybeReset resests the whole histogram if at least h.nativeHistogramMinResetDuration
  798. // has been passed. It returns true if the histogram has been reset. The caller
  799. // must have locked h.mtx.
  800. func (h *histogram) maybeReset(hot, cold *histogramCounts, coldIdx uint64, value float64, bucket int) bool {
  801. // We are using the possibly mocked h.now() rather than
  802. // time.Since(h.lastResetTime) to enable testing.
  803. if h.nativeHistogramMinResetDuration == 0 || h.now().Sub(h.lastResetTime) < h.nativeHistogramMinResetDuration {
  804. return false
  805. }
  806. // Completely reset coldCounts.
  807. h.resetCounts(cold)
  808. // Repeat the latest observation to not lose it completely.
  809. cold.observe(value, bucket, true)
  810. // Make coldCounts the new hot counts while ressetting countAndHotIdx.
  811. n := atomic.SwapUint64(&h.countAndHotIdx, (coldIdx<<63)+1)
  812. count := n & ((1 << 63) - 1)
  813. waitForCooldown(count, hot)
  814. // Finally, reset the formerly hot counts, too.
  815. h.resetCounts(hot)
  816. h.lastResetTime = h.now()
  817. return true
  818. }
  819. // maybeWidenZeroBucket widens the zero bucket until it includes the existing
  820. // buckets closest to the zero bucket (which could be two, if an equidistant
  821. // negative and a positive bucket exists, but usually it's only one bucket to be
  822. // merged into the new wider zero bucket). h.nativeHistogramMaxZeroThreshold
  823. // limits how far the zero bucket can be extended, and if that's not enough to
  824. // include an existing bucket, the method returns false. The caller must have
  825. // locked h.mtx.
  826. func (h *histogram) maybeWidenZeroBucket(hot, cold *histogramCounts) bool {
  827. currentZeroThreshold := math.Float64frombits(atomic.LoadUint64(&hot.nativeHistogramZeroThresholdBits))
  828. if currentZeroThreshold >= h.nativeHistogramMaxZeroThreshold {
  829. return false
  830. }
  831. // Find the key of the bucket closest to zero.
  832. smallestKey := findSmallestKey(&hot.nativeHistogramBucketsPositive)
  833. smallestNegativeKey := findSmallestKey(&hot.nativeHistogramBucketsNegative)
  834. if smallestNegativeKey < smallestKey {
  835. smallestKey = smallestNegativeKey
  836. }
  837. if smallestKey == math.MaxInt32 {
  838. return false
  839. }
  840. newZeroThreshold := getLe(smallestKey, atomic.LoadInt32(&hot.nativeHistogramSchema))
  841. if newZeroThreshold > h.nativeHistogramMaxZeroThreshold {
  842. return false // New threshold would exceed the max threshold.
  843. }
  844. atomic.StoreUint64(&cold.nativeHistogramZeroThresholdBits, math.Float64bits(newZeroThreshold))
  845. // Remove applicable buckets.
  846. if _, loaded := cold.nativeHistogramBucketsNegative.LoadAndDelete(smallestKey); loaded {
  847. atomicDecUint32(&cold.nativeHistogramBucketsNumber)
  848. }
  849. if _, loaded := cold.nativeHistogramBucketsPositive.LoadAndDelete(smallestKey); loaded {
  850. atomicDecUint32(&cold.nativeHistogramBucketsNumber)
  851. }
  852. // Make cold counts the new hot counts.
  853. n := atomic.AddUint64(&h.countAndHotIdx, 1<<63)
  854. count := n & ((1 << 63) - 1)
  855. // Swap the pointer names to represent the new roles and make
  856. // the rest less confusing.
  857. hot, cold = cold, hot
  858. waitForCooldown(count, cold)
  859. // Add all the now cold counts to the new hot counts...
  860. addAndResetCounts(hot, cold)
  861. // ...adjust the new zero threshold in the cold counts, too...
  862. atomic.StoreUint64(&cold.nativeHistogramZeroThresholdBits, math.Float64bits(newZeroThreshold))
  863. // ...and then merge the newly deleted buckets into the wider zero
  864. // bucket.
  865. mergeAndDeleteOrAddAndReset := func(hotBuckets, coldBuckets *sync.Map) func(k, v interface{}) bool {
  866. return func(k, v interface{}) bool {
  867. key := k.(int)
  868. bucket := v.(*int64)
  869. if key == smallestKey {
  870. // Merge into hot zero bucket...
  871. atomic.AddUint64(&hot.nativeHistogramZeroBucket, uint64(atomic.LoadInt64(bucket)))
  872. // ...and delete from cold counts.
  873. coldBuckets.Delete(key)
  874. atomicDecUint32(&cold.nativeHistogramBucketsNumber)
  875. } else {
  876. // Add to corresponding hot bucket...
  877. if addToBucket(hotBuckets, key, atomic.LoadInt64(bucket)) {
  878. atomic.AddUint32(&hot.nativeHistogramBucketsNumber, 1)
  879. }
  880. // ...and reset cold bucket.
  881. atomic.StoreInt64(bucket, 0)
  882. }
  883. return true
  884. }
  885. }
  886. cold.nativeHistogramBucketsPositive.Range(mergeAndDeleteOrAddAndReset(&hot.nativeHistogramBucketsPositive, &cold.nativeHistogramBucketsPositive))
  887. cold.nativeHistogramBucketsNegative.Range(mergeAndDeleteOrAddAndReset(&hot.nativeHistogramBucketsNegative, &cold.nativeHistogramBucketsNegative))
  888. return true
  889. }
  890. // doubleBucketWidth doubles the bucket width (by decrementing the schema
  891. // number). Note that very sparse buckets could lead to a low reduction of the
  892. // bucket count (or even no reduction at all). The method does nothing if the
  893. // schema is already -4.
  894. func (h *histogram) doubleBucketWidth(hot, cold *histogramCounts) {
  895. coldSchema := atomic.LoadInt32(&cold.nativeHistogramSchema)
  896. if coldSchema == -4 {
  897. return // Already at lowest resolution.
  898. }
  899. coldSchema--
  900. atomic.StoreInt32(&cold.nativeHistogramSchema, coldSchema)
  901. // Play it simple and just delete all cold buckets.
  902. atomic.StoreUint32(&cold.nativeHistogramBucketsNumber, 0)
  903. deleteSyncMap(&cold.nativeHistogramBucketsNegative)
  904. deleteSyncMap(&cold.nativeHistogramBucketsPositive)
  905. // Make coldCounts the new hot counts.
  906. n := atomic.AddUint64(&h.countAndHotIdx, 1<<63)
  907. count := n & ((1 << 63) - 1)
  908. // Swap the pointer names to represent the new roles and make
  909. // the rest less confusing.
  910. hot, cold = cold, hot
  911. waitForCooldown(count, cold)
  912. // Add all the now cold counts to the new hot counts...
  913. addAndResetCounts(hot, cold)
  914. // ...adjust the schema in the cold counts, too...
  915. atomic.StoreInt32(&cold.nativeHistogramSchema, coldSchema)
  916. // ...and then merge the cold buckets into the wider hot buckets.
  917. merge := func(hotBuckets *sync.Map) func(k, v interface{}) bool {
  918. return func(k, v interface{}) bool {
  919. key := k.(int)
  920. bucket := v.(*int64)
  921. // Adjust key to match the bucket to merge into.
  922. if key > 0 {
  923. key++
  924. }
  925. key /= 2
  926. // Add to corresponding hot bucket.
  927. if addToBucket(hotBuckets, key, atomic.LoadInt64(bucket)) {
  928. atomic.AddUint32(&hot.nativeHistogramBucketsNumber, 1)
  929. }
  930. return true
  931. }
  932. }
  933. cold.nativeHistogramBucketsPositive.Range(merge(&hot.nativeHistogramBucketsPositive))
  934. cold.nativeHistogramBucketsNegative.Range(merge(&hot.nativeHistogramBucketsNegative))
  935. // Play it simple again and just delete all cold buckets.
  936. atomic.StoreUint32(&cold.nativeHistogramBucketsNumber, 0)
  937. deleteSyncMap(&cold.nativeHistogramBucketsNegative)
  938. deleteSyncMap(&cold.nativeHistogramBucketsPositive)
  939. }
  940. func (h *histogram) resetCounts(counts *histogramCounts) {
  941. atomic.StoreUint64(&counts.sumBits, 0)
  942. atomic.StoreUint64(&counts.count, 0)
  943. atomic.StoreUint64(&counts.nativeHistogramZeroBucket, 0)
  944. atomic.StoreUint64(&counts.nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold))
  945. atomic.StoreInt32(&counts.nativeHistogramSchema, h.nativeHistogramSchema)
  946. atomic.StoreUint32(&counts.nativeHistogramBucketsNumber, 0)
  947. for i := range h.upperBounds {
  948. atomic.StoreUint64(&counts.buckets[i], 0)
  949. }
  950. deleteSyncMap(&counts.nativeHistogramBucketsNegative)
  951. deleteSyncMap(&counts.nativeHistogramBucketsPositive)
  952. }
  953. // updateExemplar replaces the exemplar for the provided bucket. With empty
  954. // labels, it's a no-op. It panics if any of the labels is invalid.
  955. func (h *histogram) updateExemplar(v float64, bucket int, l Labels) {
  956. if l == nil {
  957. return
  958. }
  959. e, err := newExemplar(v, h.now(), l)
  960. if err != nil {
  961. panic(err)
  962. }
  963. h.exemplars[bucket].Store(e)
  964. }
  965. // HistogramVec is a Collector that bundles a set of Histograms that all share the
  966. // same Desc, but have different values for their variable labels. This is used
  967. // if you want to count the same thing partitioned by various dimensions
  968. // (e.g. HTTP request latencies, partitioned by status code and method). Create
  969. // instances with NewHistogramVec.
  970. type HistogramVec struct {
  971. *MetricVec
  972. }
  973. // NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
  974. // partitioned by the given label names.
  975. func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
  976. return V2.NewHistogramVec(HistogramVecOpts{
  977. HistogramOpts: opts,
  978. VariableLabels: UnconstrainedLabels(labelNames),
  979. })
  980. }
  981. // NewHistogramVec creates a new HistogramVec based on the provided HistogramVecOpts.
  982. func (v2) NewHistogramVec(opts HistogramVecOpts) *HistogramVec {
  983. desc := V2.NewDesc(
  984. BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
  985. opts.Help,
  986. opts.VariableLabels,
  987. opts.ConstLabels,
  988. )
  989. return &HistogramVec{
  990. MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
  991. return newHistogram(desc, opts.HistogramOpts, lvs...)
  992. }),
  993. }
  994. }
  995. // GetMetricWithLabelValues returns the Histogram for the given slice of label
  996. // values (same order as the variable labels in Desc). If that combination of
  997. // label values is accessed for the first time, a new Histogram is created.
  998. //
  999. // It is possible to call this method without using the returned Histogram to only
  1000. // create the new Histogram but leave it at its starting value, a Histogram without
  1001. // any observations.
  1002. //
  1003. // Keeping the Histogram for later use is possible (and should be considered if
  1004. // performance is critical), but keep in mind that Reset, DeleteLabelValues and
  1005. // Delete can be used to delete the Histogram from the HistogramVec. In that case, the
  1006. // Histogram will still exist, but it will not be exported anymore, even if a
  1007. // Histogram with the same label values is created later. See also the CounterVec
  1008. // example.
  1009. //
  1010. // An error is returned if the number of label values is not the same as the
  1011. // number of variable labels in Desc (minus any curried labels).
  1012. //
  1013. // Note that for more than one label value, this method is prone to mistakes
  1014. // caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
  1015. // an alternative to avoid that type of mistake. For higher label numbers, the
  1016. // latter has a much more readable (albeit more verbose) syntax, but it comes
  1017. // with a performance overhead (for creating and processing the Labels map).
  1018. // See also the GaugeVec example.
  1019. func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
  1020. metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...)
  1021. if metric != nil {
  1022. return metric.(Observer), err
  1023. }
  1024. return nil, err
  1025. }
  1026. // GetMetricWith returns the Histogram for the given Labels map (the label names
  1027. // must match those of the variable labels in Desc). If that label map is
  1028. // accessed for the first time, a new Histogram is created. Implications of
  1029. // creating a Histogram without using it and keeping the Histogram for later use
  1030. // are the same as for GetMetricWithLabelValues.
  1031. //
  1032. // An error is returned if the number and names of the Labels are inconsistent
  1033. // with those of the variable labels in Desc (minus any curried labels).
  1034. //
  1035. // This method is used for the same purpose as
  1036. // GetMetricWithLabelValues(...string). See there for pros and cons of the two
  1037. // methods.
  1038. func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) {
  1039. metric, err := v.MetricVec.GetMetricWith(labels)
  1040. if metric != nil {
  1041. return metric.(Observer), err
  1042. }
  1043. return nil, err
  1044. }
  1045. // WithLabelValues works as GetMetricWithLabelValues, but panics where
  1046. // GetMetricWithLabelValues would have returned an error. Not returning an
  1047. // error allows shortcuts like
  1048. //
  1049. // myVec.WithLabelValues("404", "GET").Observe(42.21)
  1050. func (v *HistogramVec) WithLabelValues(lvs ...string) Observer {
  1051. h, err := v.GetMetricWithLabelValues(lvs...)
  1052. if err != nil {
  1053. panic(err)
  1054. }
  1055. return h
  1056. }
  1057. // With works as GetMetricWith but panics where GetMetricWithLabels would have
  1058. // returned an error. Not returning an error allows shortcuts like
  1059. //
  1060. // myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
  1061. func (v *HistogramVec) With(labels Labels) Observer {
  1062. h, err := v.GetMetricWith(labels)
  1063. if err != nil {
  1064. panic(err)
  1065. }
  1066. return h
  1067. }
  1068. // CurryWith returns a vector curried with the provided labels, i.e. the
  1069. // returned vector has those labels pre-set for all labeled operations performed
  1070. // on it. The cardinality of the curried vector is reduced accordingly. The
  1071. // order of the remaining labels stays the same (just with the curried labels
  1072. // taken out of the sequence – which is relevant for the
  1073. // (GetMetric)WithLabelValues methods). It is possible to curry a curried
  1074. // vector, but only with labels not yet used for currying before.
  1075. //
  1076. // The metrics contained in the HistogramVec are shared between the curried and
  1077. // uncurried vectors. They are just accessed differently. Curried and uncurried
  1078. // vectors behave identically in terms of collection. Only one must be
  1079. // registered with a given registry (usually the uncurried version). The Reset
  1080. // method deletes all metrics, even if called on a curried vector.
  1081. func (v *HistogramVec) CurryWith(labels Labels) (ObserverVec, error) {
  1082. vec, err := v.MetricVec.CurryWith(labels)
  1083. if vec != nil {
  1084. return &HistogramVec{vec}, err
  1085. }
  1086. return nil, err
  1087. }
  1088. // MustCurryWith works as CurryWith but panics where CurryWith would have
  1089. // returned an error.
  1090. func (v *HistogramVec) MustCurryWith(labels Labels) ObserverVec {
  1091. vec, err := v.CurryWith(labels)
  1092. if err != nil {
  1093. panic(err)
  1094. }
  1095. return vec
  1096. }
  1097. type constHistogram struct {
  1098. desc *Desc
  1099. count uint64
  1100. sum float64
  1101. buckets map[float64]uint64
  1102. labelPairs []*dto.LabelPair
  1103. }
  1104. func (h *constHistogram) Desc() *Desc {
  1105. return h.desc
  1106. }
  1107. func (h *constHistogram) Write(out *dto.Metric) error {
  1108. his := &dto.Histogram{}
  1109. buckets := make([]*dto.Bucket, 0, len(h.buckets))
  1110. his.SampleCount = proto.Uint64(h.count)
  1111. his.SampleSum = proto.Float64(h.sum)
  1112. for upperBound, count := range h.buckets {
  1113. buckets = append(buckets, &dto.Bucket{
  1114. CumulativeCount: proto.Uint64(count),
  1115. UpperBound: proto.Float64(upperBound),
  1116. })
  1117. }
  1118. if len(buckets) > 0 {
  1119. sort.Sort(buckSort(buckets))
  1120. }
  1121. his.Bucket = buckets
  1122. out.Histogram = his
  1123. out.Label = h.labelPairs
  1124. return nil
  1125. }
  1126. // NewConstHistogram returns a metric representing a Prometheus histogram with
  1127. // fixed values for the count, sum, and bucket counts. As those parameters
  1128. // cannot be changed, the returned value does not implement the Histogram
  1129. // interface (but only the Metric interface). Users of this package will not
  1130. // have much use for it in regular operations. However, when implementing custom
  1131. // Collectors, it is useful as a throw-away metric that is generated on the fly
  1132. // to send it to Prometheus in the Collect method.
  1133. //
  1134. // buckets is a map of upper bounds to cumulative counts, excluding the +Inf
  1135. // bucket. The +Inf bucket is implicit, and its value is equal to the provided count.
  1136. //
  1137. // NewConstHistogram returns an error if the length of labelValues is not
  1138. // consistent with the variable labels in Desc or if Desc is invalid.
  1139. func NewConstHistogram(
  1140. desc *Desc,
  1141. count uint64,
  1142. sum float64,
  1143. buckets map[float64]uint64,
  1144. labelValues ...string,
  1145. ) (Metric, error) {
  1146. if desc.err != nil {
  1147. return nil, desc.err
  1148. }
  1149. if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
  1150. return nil, err
  1151. }
  1152. return &constHistogram{
  1153. desc: desc,
  1154. count: count,
  1155. sum: sum,
  1156. buckets: buckets,
  1157. labelPairs: MakeLabelPairs(desc, labelValues),
  1158. }, nil
  1159. }
  1160. // MustNewConstHistogram is a version of NewConstHistogram that panics where
  1161. // NewConstHistogram would have returned an error.
  1162. func MustNewConstHistogram(
  1163. desc *Desc,
  1164. count uint64,
  1165. sum float64,
  1166. buckets map[float64]uint64,
  1167. labelValues ...string,
  1168. ) Metric {
  1169. m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...)
  1170. if err != nil {
  1171. panic(err)
  1172. }
  1173. return m
  1174. }
  1175. type buckSort []*dto.Bucket
  1176. func (s buckSort) Len() int {
  1177. return len(s)
  1178. }
  1179. func (s buckSort) Swap(i, j int) {
  1180. s[i], s[j] = s[j], s[i]
  1181. }
  1182. func (s buckSort) Less(i, j int) bool {
  1183. return s[i].GetUpperBound() < s[j].GetUpperBound()
  1184. }
  1185. // pickSchema returns the largest number n between -4 and 8 such that
  1186. // 2^(2^-n) is less or equal the provided bucketFactor.
  1187. //
  1188. // Special cases:
  1189. // - bucketFactor <= 1: panics.
  1190. // - bucketFactor < 2^(2^-8) (but > 1): still returns 8.
  1191. func pickSchema(bucketFactor float64) int32 {
  1192. if bucketFactor <= 1 {
  1193. panic(fmt.Errorf("bucketFactor %f is <=1", bucketFactor))
  1194. }
  1195. floor := math.Floor(math.Log2(math.Log2(bucketFactor)))
  1196. switch {
  1197. case floor <= -8:
  1198. return 8
  1199. case floor >= 4:
  1200. return -4
  1201. default:
  1202. return -int32(floor)
  1203. }
  1204. }
  1205. func makeBuckets(buckets *sync.Map) ([]*dto.BucketSpan, []int64) {
  1206. var ii []int
  1207. buckets.Range(func(k, v interface{}) bool {
  1208. ii = append(ii, k.(int))
  1209. return true
  1210. })
  1211. sort.Ints(ii)
  1212. if len(ii) == 0 {
  1213. return nil, nil
  1214. }
  1215. var (
  1216. spans []*dto.BucketSpan
  1217. deltas []int64
  1218. prevCount int64
  1219. nextI int
  1220. )
  1221. appendDelta := func(count int64) {
  1222. *spans[len(spans)-1].Length++
  1223. deltas = append(deltas, count-prevCount)
  1224. prevCount = count
  1225. }
  1226. for n, i := range ii {
  1227. v, _ := buckets.Load(i)
  1228. count := atomic.LoadInt64(v.(*int64))
  1229. // Multiple spans with only small gaps in between are probably
  1230. // encoded more efficiently as one larger span with a few empty
  1231. // buckets. Needs some research to find the sweet spot. For now,
  1232. // we assume that gaps of one ore two buckets should not create
  1233. // a new span.
  1234. iDelta := int32(i - nextI)
  1235. if n == 0 || iDelta > 2 {
  1236. // We have to create a new span, either because we are
  1237. // at the very beginning, or because we have found a gap
  1238. // of more than two buckets.
  1239. spans = append(spans, &dto.BucketSpan{
  1240. Offset: proto.Int32(iDelta),
  1241. Length: proto.Uint32(0),
  1242. })
  1243. } else {
  1244. // We have found a small gap (or no gap at all).
  1245. // Insert empty buckets as needed.
  1246. for j := int32(0); j < iDelta; j++ {
  1247. appendDelta(0)
  1248. }
  1249. }
  1250. appendDelta(count)
  1251. nextI = i + 1
  1252. }
  1253. return spans, deltas
  1254. }
  1255. // addToBucket increments the sparse bucket at key by the provided amount. It
  1256. // returns true if a new sparse bucket had to be created for that.
  1257. func addToBucket(buckets *sync.Map, key int, increment int64) bool {
  1258. if existingBucket, ok := buckets.Load(key); ok {
  1259. // Fast path without allocation.
  1260. atomic.AddInt64(existingBucket.(*int64), increment)
  1261. return false
  1262. }
  1263. // Bucket doesn't exist yet. Slow path allocating new counter.
  1264. newBucket := increment // TODO(beorn7): Check if this is sufficient to not let increment escape.
  1265. if actualBucket, loaded := buckets.LoadOrStore(key, &newBucket); loaded {
  1266. // The bucket was created concurrently in another goroutine.
  1267. // Have to increment after all.
  1268. atomic.AddInt64(actualBucket.(*int64), increment)
  1269. return false
  1270. }
  1271. return true
  1272. }
  1273. // addAndReset returns a function to be used with sync.Map.Range of spare
  1274. // buckets in coldCounts. It increments the buckets in the provided hotBuckets
  1275. // according to the buckets ranged through. It then resets all buckets ranged
  1276. // through to 0 (but leaves them in place so that they don't need to get
  1277. // recreated on the next scrape).
  1278. func addAndReset(hotBuckets *sync.Map, bucketNumber *uint32) func(k, v interface{}) bool {
  1279. return func(k, v interface{}) bool {
  1280. bucket := v.(*int64)
  1281. if addToBucket(hotBuckets, k.(int), atomic.LoadInt64(bucket)) {
  1282. atomic.AddUint32(bucketNumber, 1)
  1283. }
  1284. atomic.StoreInt64(bucket, 0)
  1285. return true
  1286. }
  1287. }
  1288. func deleteSyncMap(m *sync.Map) {
  1289. m.Range(func(k, v interface{}) bool {
  1290. m.Delete(k)
  1291. return true
  1292. })
  1293. }
  1294. func findSmallestKey(m *sync.Map) int {
  1295. result := math.MaxInt32
  1296. m.Range(func(k, v interface{}) bool {
  1297. key := k.(int)
  1298. if key < result {
  1299. result = key
  1300. }
  1301. return true
  1302. })
  1303. return result
  1304. }
  1305. func getLe(key int, schema int32) float64 {
  1306. // Here a bit of context about the behavior for the last bucket counting
  1307. // regular numbers (called simply "last bucket" below) and the bucket
  1308. // counting observations of ±Inf (called "inf bucket" below, with a key
  1309. // one higher than that of the "last bucket"):
  1310. //
  1311. // If we apply the usual formula to the last bucket, its upper bound
  1312. // would be calculated as +Inf. The reason is that the max possible
  1313. // regular float64 number (math.MaxFloat64) doesn't coincide with one of
  1314. // the calculated bucket boundaries. So the calculated boundary has to
  1315. // be larger than math.MaxFloat64, and the only float64 larger than
  1316. // math.MaxFloat64 is +Inf. However, we want to count actual
  1317. // observations of ±Inf in the inf bucket. Therefore, we have to treat
  1318. // the upper bound of the last bucket specially and set it to
  1319. // math.MaxFloat64. (The upper bound of the inf bucket, with its key
  1320. // being one higher than that of the last bucket, naturally comes out as
  1321. // +Inf by the usual formula. So that's fine.)
  1322. //
  1323. // math.MaxFloat64 has a frac of 0.9999999999999999 and an exp of
  1324. // 1024. If there were a float64 number following math.MaxFloat64, it
  1325. // would have a frac of 1.0 and an exp of 1024, or equivalently a frac
  1326. // of 0.5 and an exp of 1025. However, since frac must be smaller than
  1327. // 1, and exp must be smaller than 1025, either representation overflows
  1328. // a float64. (Which, in turn, is the reason that math.MaxFloat64 is the
  1329. // largest possible float64. Q.E.D.) However, the formula for
  1330. // calculating the upper bound from the idx and schema of the last
  1331. // bucket results in precisely that. It is either frac=1.0 & exp=1024
  1332. // (for schema < 0) or frac=0.5 & exp=1025 (for schema >=0). (This is,
  1333. // by the way, a power of two where the exponent itself is a power of
  1334. // two, 2¹⁰ in fact, which coinicides with a bucket boundary in all
  1335. // schemas.) So these are the special cases we have to catch below.
  1336. if schema < 0 {
  1337. exp := key << -schema
  1338. if exp == 1024 {
  1339. // This is the last bucket before the overflow bucket
  1340. // (for ±Inf observations). Return math.MaxFloat64 as
  1341. // explained above.
  1342. return math.MaxFloat64
  1343. }
  1344. return math.Ldexp(1, exp)
  1345. }
  1346. fracIdx := key & ((1 << schema) - 1)
  1347. frac := nativeHistogramBounds[schema][fracIdx]
  1348. exp := (key >> schema) + 1
  1349. if frac == 0.5 && exp == 1025 {
  1350. // This is the last bucket before the overflow bucket (for ±Inf
  1351. // observations). Return math.MaxFloat64 as explained above.
  1352. return math.MaxFloat64
  1353. }
  1354. return math.Ldexp(frac, exp)
  1355. }
  1356. // waitForCooldown returns after the count field in the provided histogramCounts
  1357. // has reached the provided count value.
  1358. func waitForCooldown(count uint64, counts *histogramCounts) {
  1359. for count != atomic.LoadUint64(&counts.count) {
  1360. runtime.Gosched() // Let observations get work done.
  1361. }
  1362. }
  1363. // atomicAddFloat adds the provided float atomically to another float
  1364. // represented by the bit pattern the bits pointer is pointing to.
  1365. func atomicAddFloat(bits *uint64, v float64) {
  1366. for {
  1367. loadedBits := atomic.LoadUint64(bits)
  1368. newBits := math.Float64bits(math.Float64frombits(loadedBits) + v)
  1369. if atomic.CompareAndSwapUint64(bits, loadedBits, newBits) {
  1370. break
  1371. }
  1372. }
  1373. }
  1374. // atomicDecUint32 atomically decrements the uint32 p points to. See
  1375. // https://pkg.go.dev/sync/atomic#AddUint32 to understand how this is done.
  1376. func atomicDecUint32(p *uint32) {
  1377. atomic.AddUint32(p, ^uint32(0))
  1378. }
  1379. // addAndResetCounts adds certain fields (count, sum, conventional buckets, zero
  1380. // bucket) from the cold counts to the corresponding fields in the hot
  1381. // counts. Those fields are then reset to 0 in the cold counts.
  1382. func addAndResetCounts(hot, cold *histogramCounts) {
  1383. atomic.AddUint64(&hot.count, atomic.LoadUint64(&cold.count))
  1384. atomic.StoreUint64(&cold.count, 0)
  1385. coldSum := math.Float64frombits(atomic.LoadUint64(&cold.sumBits))
  1386. atomicAddFloat(&hot.sumBits, coldSum)
  1387. atomic.StoreUint64(&cold.sumBits, 0)
  1388. for i := range hot.buckets {
  1389. atomic.AddUint64(&hot.buckets[i], atomic.LoadUint64(&cold.buckets[i]))
  1390. atomic.StoreUint64(&cold.buckets[i], 0)
  1391. }
  1392. atomic.AddUint64(&hot.nativeHistogramZeroBucket, atomic.LoadUint64(&cold.nativeHistogramZeroBucket))
  1393. atomic.StoreUint64(&cold.nativeHistogramZeroBucket, 0)
  1394. }