You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 

655 lines
19 KiB

  1. // Copyright 2022 The Prometheus Authors
  2. // Licensed under the Apache License, Version 2.0 (the "License");
  3. // you may not use this file except in compliance with the License.
  4. // You may obtain a copy of the License at
  5. //
  6. // http://www.apache.org/licenses/LICENSE-2.0
  7. //
  8. // Unless required by applicable law or agreed to in writing, software
  9. // distributed under the License is distributed on an "AS IS" BASIS,
  10. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  11. // See the License for the specific language governing permissions and
  12. // limitations under the License.
  13. //
  14. // It provides tools to compare sequences of strings and generate textual diffs.
  15. //
  16. // Maintaining `GetUnifiedDiffString` here because original repository
  17. // (https://github.com/pmezard/go-difflib) is no loger maintained.
  18. package internal
  19. import (
  20. "bufio"
  21. "bytes"
  22. "fmt"
  23. "io"
  24. "strings"
  25. )
  26. func min(a, b int) int {
  27. if a < b {
  28. return a
  29. }
  30. return b
  31. }
  32. func max(a, b int) int {
  33. if a > b {
  34. return a
  35. }
  36. return b
  37. }
  38. func calculateRatio(matches, length int) float64 {
  39. if length > 0 {
  40. return 2.0 * float64(matches) / float64(length)
  41. }
  42. return 1.0
  43. }
  44. type Match struct {
  45. A int
  46. B int
  47. Size int
  48. }
  49. type OpCode struct {
  50. Tag byte
  51. I1 int
  52. I2 int
  53. J1 int
  54. J2 int
  55. }
  56. // SequenceMatcher compares sequence of strings. The basic
  57. // algorithm predates, and is a little fancier than, an algorithm
  58. // published in the late 1980's by Ratcliff and Obershelp under the
  59. // hyperbolic name "gestalt pattern matching". The basic idea is to find
  60. // the longest contiguous matching subsequence that contains no "junk"
  61. // elements (R-O doesn't address junk). The same idea is then applied
  62. // recursively to the pieces of the sequences to the left and to the right
  63. // of the matching subsequence. This does not yield minimal edit
  64. // sequences, but does tend to yield matches that "look right" to people.
  65. //
  66. // SequenceMatcher tries to compute a "human-friendly diff" between two
  67. // sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the
  68. // longest *contiguous* & junk-free matching subsequence. That's what
  69. // catches peoples' eyes. The Windows(tm) windiff has another interesting
  70. // notion, pairing up elements that appear uniquely in each sequence.
  71. // That, and the method here, appear to yield more intuitive difference
  72. // reports than does diff. This method appears to be the least vulnerable
  73. // to synching up on blocks of "junk lines", though (like blank lines in
  74. // ordinary text files, or maybe "<P>" lines in HTML files). That may be
  75. // because this is the only method of the 3 that has a *concept* of
  76. // "junk" <wink>.
  77. //
  78. // Timing: Basic R-O is cubic time worst case and quadratic time expected
  79. // case. SequenceMatcher is quadratic time for the worst case and has
  80. // expected-case behavior dependent in a complicated way on how many
  81. // elements the sequences have in common; best case time is linear.
  82. type SequenceMatcher struct {
  83. a []string
  84. b []string
  85. b2j map[string][]int
  86. IsJunk func(string) bool
  87. autoJunk bool
  88. bJunk map[string]struct{}
  89. matchingBlocks []Match
  90. fullBCount map[string]int
  91. bPopular map[string]struct{}
  92. opCodes []OpCode
  93. }
  94. func NewMatcher(a, b []string) *SequenceMatcher {
  95. m := SequenceMatcher{autoJunk: true}
  96. m.SetSeqs(a, b)
  97. return &m
  98. }
  99. func NewMatcherWithJunk(a, b []string, autoJunk bool,
  100. isJunk func(string) bool,
  101. ) *SequenceMatcher {
  102. m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk}
  103. m.SetSeqs(a, b)
  104. return &m
  105. }
  106. // Set two sequences to be compared.
  107. func (m *SequenceMatcher) SetSeqs(a, b []string) {
  108. m.SetSeq1(a)
  109. m.SetSeq2(b)
  110. }
  111. // Set the first sequence to be compared. The second sequence to be compared is
  112. // not changed.
  113. //
  114. // SequenceMatcher computes and caches detailed information about the second
  115. // sequence, so if you want to compare one sequence S against many sequences,
  116. // use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other
  117. // sequences.
  118. //
  119. // See also SetSeqs() and SetSeq2().
  120. func (m *SequenceMatcher) SetSeq1(a []string) {
  121. if &a == &m.a {
  122. return
  123. }
  124. m.a = a
  125. m.matchingBlocks = nil
  126. m.opCodes = nil
  127. }
  128. // Set the second sequence to be compared. The first sequence to be compared is
  129. // not changed.
  130. func (m *SequenceMatcher) SetSeq2(b []string) {
  131. if &b == &m.b {
  132. return
  133. }
  134. m.b = b
  135. m.matchingBlocks = nil
  136. m.opCodes = nil
  137. m.fullBCount = nil
  138. m.chainB()
  139. }
  140. func (m *SequenceMatcher) chainB() {
  141. // Populate line -> index mapping
  142. b2j := map[string][]int{}
  143. for i, s := range m.b {
  144. indices := b2j[s]
  145. indices = append(indices, i)
  146. b2j[s] = indices
  147. }
  148. // Purge junk elements
  149. m.bJunk = map[string]struct{}{}
  150. if m.IsJunk != nil {
  151. junk := m.bJunk
  152. for s := range b2j {
  153. if m.IsJunk(s) {
  154. junk[s] = struct{}{}
  155. }
  156. }
  157. for s := range junk {
  158. delete(b2j, s)
  159. }
  160. }
  161. // Purge remaining popular elements
  162. popular := map[string]struct{}{}
  163. n := len(m.b)
  164. if m.autoJunk && n >= 200 {
  165. ntest := n/100 + 1
  166. for s, indices := range b2j {
  167. if len(indices) > ntest {
  168. popular[s] = struct{}{}
  169. }
  170. }
  171. for s := range popular {
  172. delete(b2j, s)
  173. }
  174. }
  175. m.bPopular = popular
  176. m.b2j = b2j
  177. }
  178. func (m *SequenceMatcher) isBJunk(s string) bool {
  179. _, ok := m.bJunk[s]
  180. return ok
  181. }
  182. // Find longest matching block in a[alo:ahi] and b[blo:bhi].
  183. //
  184. // If IsJunk is not defined:
  185. //
  186. // Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
  187. //
  188. // alo <= i <= i+k <= ahi
  189. // blo <= j <= j+k <= bhi
  190. //
  191. // and for all (i',j',k') meeting those conditions,
  192. //
  193. // k >= k'
  194. // i <= i'
  195. // and if i == i', j <= j'
  196. //
  197. // In other words, of all maximal matching blocks, return one that
  198. // starts earliest in a, and of all those maximal matching blocks that
  199. // start earliest in a, return the one that starts earliest in b.
  200. //
  201. // If IsJunk is defined, first the longest matching block is
  202. // determined as above, but with the additional restriction that no
  203. // junk element appears in the block. Then that block is extended as
  204. // far as possible by matching (only) junk elements on both sides. So
  205. // the resulting block never matches on junk except as identical junk
  206. // happens to be adjacent to an "interesting" match.
  207. //
  208. // If no blocks match, return (alo, blo, 0).
  209. func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match {
  210. // CAUTION: stripping common prefix or suffix would be incorrect.
  211. // E.g.,
  212. // ab
  213. // acab
  214. // Longest matching block is "ab", but if common prefix is
  215. // stripped, it's "a" (tied with "b"). UNIX(tm) diff does so
  216. // strip, so ends up claiming that ab is changed to acab by
  217. // inserting "ca" in the middle. That's minimal but unintuitive:
  218. // "it's obvious" that someone inserted "ac" at the front.
  219. // Windiff ends up at the same place as diff, but by pairing up
  220. // the unique 'b's and then matching the first two 'a's.
  221. besti, bestj, bestsize := alo, blo, 0
  222. // find longest junk-free match
  223. // during an iteration of the loop, j2len[j] = length of longest
  224. // junk-free match ending with a[i-1] and b[j]
  225. j2len := map[int]int{}
  226. for i := alo; i != ahi; i++ {
  227. // look at all instances of a[i] in b; note that because
  228. // b2j has no junk keys, the loop is skipped if a[i] is junk
  229. newj2len := map[int]int{}
  230. for _, j := range m.b2j[m.a[i]] {
  231. // a[i] matches b[j]
  232. if j < blo {
  233. continue
  234. }
  235. if j >= bhi {
  236. break
  237. }
  238. k := j2len[j-1] + 1
  239. newj2len[j] = k
  240. if k > bestsize {
  241. besti, bestj, bestsize = i-k+1, j-k+1, k
  242. }
  243. }
  244. j2len = newj2len
  245. }
  246. // Extend the best by non-junk elements on each end. In particular,
  247. // "popular" non-junk elements aren't in b2j, which greatly speeds
  248. // the inner loop above, but also means "the best" match so far
  249. // doesn't contain any junk *or* popular non-junk elements.
  250. for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) &&
  251. m.a[besti-1] == m.b[bestj-1] {
  252. besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
  253. }
  254. for besti+bestsize < ahi && bestj+bestsize < bhi &&
  255. !m.isBJunk(m.b[bestj+bestsize]) &&
  256. m.a[besti+bestsize] == m.b[bestj+bestsize] {
  257. bestsize++
  258. }
  259. // Now that we have a wholly interesting match (albeit possibly
  260. // empty!), we may as well suck up the matching junk on each
  261. // side of it too. Can't think of a good reason not to, and it
  262. // saves post-processing the (possibly considerable) expense of
  263. // figuring out what to do with it. In the case of an empty
  264. // interesting match, this is clearly the right thing to do,
  265. // because no other kind of match is possible in the regions.
  266. for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) &&
  267. m.a[besti-1] == m.b[bestj-1] {
  268. besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
  269. }
  270. for besti+bestsize < ahi && bestj+bestsize < bhi &&
  271. m.isBJunk(m.b[bestj+bestsize]) &&
  272. m.a[besti+bestsize] == m.b[bestj+bestsize] {
  273. bestsize++
  274. }
  275. return Match{A: besti, B: bestj, Size: bestsize}
  276. }
  277. // Return list of triples describing matching subsequences.
  278. //
  279. // Each triple is of the form (i, j, n), and means that
  280. // a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in
  281. // i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are
  282. // adjacent triples in the list, and the second is not the last triple in the
  283. // list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe
  284. // adjacent equal blocks.
  285. //
  286. // The last triple is a dummy, (len(a), len(b), 0), and is the only
  287. // triple with n==0.
  288. func (m *SequenceMatcher) GetMatchingBlocks() []Match {
  289. if m.matchingBlocks != nil {
  290. return m.matchingBlocks
  291. }
  292. var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match
  293. matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match {
  294. match := m.findLongestMatch(alo, ahi, blo, bhi)
  295. i, j, k := match.A, match.B, match.Size
  296. if match.Size > 0 {
  297. if alo < i && blo < j {
  298. matched = matchBlocks(alo, i, blo, j, matched)
  299. }
  300. matched = append(matched, match)
  301. if i+k < ahi && j+k < bhi {
  302. matched = matchBlocks(i+k, ahi, j+k, bhi, matched)
  303. }
  304. }
  305. return matched
  306. }
  307. matched := matchBlocks(0, len(m.a), 0, len(m.b), nil)
  308. // It's possible that we have adjacent equal blocks in the
  309. // matching_blocks list now.
  310. nonAdjacent := []Match{}
  311. i1, j1, k1 := 0, 0, 0
  312. for _, b := range matched {
  313. // Is this block adjacent to i1, j1, k1?
  314. i2, j2, k2 := b.A, b.B, b.Size
  315. if i1+k1 == i2 && j1+k1 == j2 {
  316. // Yes, so collapse them -- this just increases the length of
  317. // the first block by the length of the second, and the first
  318. // block so lengthened remains the block to compare against.
  319. k1 += k2
  320. } else {
  321. // Not adjacent. Remember the first block (k1==0 means it's
  322. // the dummy we started with), and make the second block the
  323. // new block to compare against.
  324. if k1 > 0 {
  325. nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
  326. }
  327. i1, j1, k1 = i2, j2, k2
  328. }
  329. }
  330. if k1 > 0 {
  331. nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
  332. }
  333. nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0})
  334. m.matchingBlocks = nonAdjacent
  335. return m.matchingBlocks
  336. }
  337. // Return list of 5-tuples describing how to turn a into b.
  338. //
  339. // Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple
  340. // has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
  341. // tuple preceding it, and likewise for j1 == the previous j2.
  342. //
  343. // The tags are characters, with these meanings:
  344. //
  345. // 'r' (replace): a[i1:i2] should be replaced by b[j1:j2]
  346. //
  347. // 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case.
  348. //
  349. // 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case.
  350. //
  351. // 'e' (equal): a[i1:i2] == b[j1:j2]
  352. func (m *SequenceMatcher) GetOpCodes() []OpCode {
  353. if m.opCodes != nil {
  354. return m.opCodes
  355. }
  356. i, j := 0, 0
  357. matching := m.GetMatchingBlocks()
  358. opCodes := make([]OpCode, 0, len(matching))
  359. for _, m := range matching {
  360. // invariant: we've pumped out correct diffs to change
  361. // a[:i] into b[:j], and the next matching block is
  362. // a[ai:ai+size] == b[bj:bj+size]. So we need to pump
  363. // out a diff to change a[i:ai] into b[j:bj], pump out
  364. // the matching block, and move (i,j) beyond the match
  365. ai, bj, size := m.A, m.B, m.Size
  366. tag := byte(0)
  367. if i < ai && j < bj {
  368. tag = 'r'
  369. } else if i < ai {
  370. tag = 'd'
  371. } else if j < bj {
  372. tag = 'i'
  373. }
  374. if tag > 0 {
  375. opCodes = append(opCodes, OpCode{tag, i, ai, j, bj})
  376. }
  377. i, j = ai+size, bj+size
  378. // the list of matching blocks is terminated by a
  379. // sentinel with size 0
  380. if size > 0 {
  381. opCodes = append(opCodes, OpCode{'e', ai, i, bj, j})
  382. }
  383. }
  384. m.opCodes = opCodes
  385. return m.opCodes
  386. }
  387. // Isolate change clusters by eliminating ranges with no changes.
  388. //
  389. // Return a generator of groups with up to n lines of context.
  390. // Each group is in the same format as returned by GetOpCodes().
  391. func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {
  392. if n < 0 {
  393. n = 3
  394. }
  395. codes := m.GetOpCodes()
  396. if len(codes) == 0 {
  397. codes = []OpCode{{'e', 0, 1, 0, 1}}
  398. }
  399. // Fixup leading and trailing groups if they show no changes.
  400. if codes[0].Tag == 'e' {
  401. c := codes[0]
  402. i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
  403. codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2}
  404. }
  405. if codes[len(codes)-1].Tag == 'e' {
  406. c := codes[len(codes)-1]
  407. i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
  408. codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)}
  409. }
  410. nn := n + n
  411. groups := [][]OpCode{}
  412. group := []OpCode{}
  413. for _, c := range codes {
  414. i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
  415. // End the current group and start a new one whenever
  416. // there is a large range with no changes.
  417. if c.Tag == 'e' && i2-i1 > nn {
  418. group = append(group, OpCode{
  419. c.Tag, i1, min(i2, i1+n),
  420. j1, min(j2, j1+n),
  421. })
  422. groups = append(groups, group)
  423. group = []OpCode{}
  424. i1, j1 = max(i1, i2-n), max(j1, j2-n)
  425. }
  426. group = append(group, OpCode{c.Tag, i1, i2, j1, j2})
  427. }
  428. if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') {
  429. groups = append(groups, group)
  430. }
  431. return groups
  432. }
  433. // Return a measure of the sequences' similarity (float in [0,1]).
  434. //
  435. // Where T is the total number of elements in both sequences, and
  436. // M is the number of matches, this is 2.0*M / T.
  437. // Note that this is 1 if the sequences are identical, and 0 if
  438. // they have nothing in common.
  439. //
  440. // .Ratio() is expensive to compute if you haven't already computed
  441. // .GetMatchingBlocks() or .GetOpCodes(), in which case you may
  442. // want to try .QuickRatio() or .RealQuickRation() first to get an
  443. // upper bound.
  444. func (m *SequenceMatcher) Ratio() float64 {
  445. matches := 0
  446. for _, m := range m.GetMatchingBlocks() {
  447. matches += m.Size
  448. }
  449. return calculateRatio(matches, len(m.a)+len(m.b))
  450. }
  451. // Return an upper bound on ratio() relatively quickly.
  452. //
  453. // This isn't defined beyond that it is an upper bound on .Ratio(), and
  454. // is faster to compute.
  455. func (m *SequenceMatcher) QuickRatio() float64 {
  456. // viewing a and b as multisets, set matches to the cardinality
  457. // of their intersection; this counts the number of matches
  458. // without regard to order, so is clearly an upper bound
  459. if m.fullBCount == nil {
  460. m.fullBCount = map[string]int{}
  461. for _, s := range m.b {
  462. m.fullBCount[s]++
  463. }
  464. }
  465. // avail[x] is the number of times x appears in 'b' less the
  466. // number of times we've seen it in 'a' so far ... kinda
  467. avail := map[string]int{}
  468. matches := 0
  469. for _, s := range m.a {
  470. n, ok := avail[s]
  471. if !ok {
  472. n = m.fullBCount[s]
  473. }
  474. avail[s] = n - 1
  475. if n > 0 {
  476. matches++
  477. }
  478. }
  479. return calculateRatio(matches, len(m.a)+len(m.b))
  480. }
  481. // Return an upper bound on ratio() very quickly.
  482. //
  483. // This isn't defined beyond that it is an upper bound on .Ratio(), and
  484. // is faster to compute than either .Ratio() or .QuickRatio().
  485. func (m *SequenceMatcher) RealQuickRatio() float64 {
  486. la, lb := len(m.a), len(m.b)
  487. return calculateRatio(min(la, lb), la+lb)
  488. }
  489. // Convert range to the "ed" format
  490. func formatRangeUnified(start, stop int) string {
  491. // Per the diff spec at http://www.unix.org/single_unix_specification/
  492. beginning := start + 1 // lines start numbering with one
  493. length := stop - start
  494. if length == 1 {
  495. return fmt.Sprintf("%d", beginning)
  496. }
  497. if length == 0 {
  498. beginning-- // empty ranges begin at line just before the range
  499. }
  500. return fmt.Sprintf("%d,%d", beginning, length)
  501. }
  502. // Unified diff parameters
  503. type UnifiedDiff struct {
  504. A []string // First sequence lines
  505. FromFile string // First file name
  506. FromDate string // First file time
  507. B []string // Second sequence lines
  508. ToFile string // Second file name
  509. ToDate string // Second file time
  510. Eol string // Headers end of line, defaults to LF
  511. Context int // Number of context lines
  512. }
  513. // Compare two sequences of lines; generate the delta as a unified diff.
  514. //
  515. // Unified diffs are a compact way of showing line changes and a few
  516. // lines of context. The number of context lines is set by 'n' which
  517. // defaults to three.
  518. //
  519. // By default, the diff control lines (those with ---, +++, or @@) are
  520. // created with a trailing newline. This is helpful so that inputs
  521. // created from file.readlines() result in diffs that are suitable for
  522. // file.writelines() since both the inputs and outputs have trailing
  523. // newlines.
  524. //
  525. // For inputs that do not have trailing newlines, set the lineterm
  526. // argument to "" so that the output will be uniformly newline free.
  527. //
  528. // The unidiff format normally has a header for filenames and modification
  529. // times. Any or all of these may be specified using strings for
  530. // 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
  531. // The modification times are normally expressed in the ISO 8601 format.
  532. func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error {
  533. buf := bufio.NewWriter(writer)
  534. defer buf.Flush()
  535. wf := func(format string, args ...interface{}) error {
  536. _, err := buf.WriteString(fmt.Sprintf(format, args...))
  537. return err
  538. }
  539. ws := func(s string) error {
  540. _, err := buf.WriteString(s)
  541. return err
  542. }
  543. if len(diff.Eol) == 0 {
  544. diff.Eol = "\n"
  545. }
  546. started := false
  547. m := NewMatcher(diff.A, diff.B)
  548. for _, g := range m.GetGroupedOpCodes(diff.Context) {
  549. if !started {
  550. started = true
  551. fromDate := ""
  552. if len(diff.FromDate) > 0 {
  553. fromDate = "\t" + diff.FromDate
  554. }
  555. toDate := ""
  556. if len(diff.ToDate) > 0 {
  557. toDate = "\t" + diff.ToDate
  558. }
  559. if diff.FromFile != "" || diff.ToFile != "" {
  560. err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol)
  561. if err != nil {
  562. return err
  563. }
  564. err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol)
  565. if err != nil {
  566. return err
  567. }
  568. }
  569. }
  570. first, last := g[0], g[len(g)-1]
  571. range1 := formatRangeUnified(first.I1, last.I2)
  572. range2 := formatRangeUnified(first.J1, last.J2)
  573. if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil {
  574. return err
  575. }
  576. for _, c := range g {
  577. i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
  578. if c.Tag == 'e' {
  579. for _, line := range diff.A[i1:i2] {
  580. if err := ws(" " + line); err != nil {
  581. return err
  582. }
  583. }
  584. continue
  585. }
  586. if c.Tag == 'r' || c.Tag == 'd' {
  587. for _, line := range diff.A[i1:i2] {
  588. if err := ws("-" + line); err != nil {
  589. return err
  590. }
  591. }
  592. }
  593. if c.Tag == 'r' || c.Tag == 'i' {
  594. for _, line := range diff.B[j1:j2] {
  595. if err := ws("+" + line); err != nil {
  596. return err
  597. }
  598. }
  599. }
  600. }
  601. }
  602. return nil
  603. }
  604. // Like WriteUnifiedDiff but returns the diff a string.
  605. func GetUnifiedDiffString(diff UnifiedDiff) (string, error) {
  606. w := &bytes.Buffer{}
  607. err := WriteUnifiedDiff(w, diff)
  608. return w.String(), err
  609. }
  610. // Split a string on "\n" while preserving them. The output can be used
  611. // as input for UnifiedDiff and ContextDiff structures.
  612. func SplitLines(s string) []string {
  613. lines := strings.SplitAfter(s, "\n")
  614. lines[len(lines)-1] += "\n"
  615. return lines
  616. }