gopls: migrate internal/lsp to gopls/internal/lsp

This CL was created using the following commands:

    ./gopls/internal/migrate.sh
    git add .
    git codereview gofmt

For golang/go#54509

Change-Id: Iceeec602748a5e6f609c3ceda8d19157e5c94009
Reviewed-on: https://go-review.googlesource.com/c/tools/+/426796
gopls-CI: kokoro <noreply+kokoro@google.com>
Run-TryBot: Robert Findley <rfindley@google.com>
Reviewed-by: Peter Weinberger <pjw@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
diff --git a/internal/diff/diff.go b/internal/diff/diff.go
new file mode 100644
index 0000000..8fd6824
--- /dev/null
+++ b/internal/diff/diff.go
@@ -0,0 +1,159 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package diff supports a pluggable diff algorithm.
+package diff
+
+import (
+	"sort"
+	"strings"
+
+	"golang.org/x/tools/internal/span"
+)
+
+// TextEdit represents a change to a section of a document.
+// The text within the specified span should be replaced by the supplied new text.
+type TextEdit struct {
+	Span    span.Span
+	NewText string
+}
+
+// ComputeEdits is the type for a function that produces a set of edits that
+// convert from the before content to the after content.
+type ComputeEdits func(uri span.URI, before, after string) ([]TextEdit, error)
+
+// SortTextEdits attempts to order all edits by their starting points.
+// The sort is stable so that edits with the same starting point will not
+// be reordered.
+func SortTextEdits(d []TextEdit) {
+	// Use a stable sort to maintain the order of edits inserted at the same position.
+	sort.SliceStable(d, func(i int, j int) bool {
+		return span.Compare(d[i].Span, d[j].Span) < 0
+	})
+}
+
+// ApplyEdits applies the set of edits to the before and returns the resulting
+// content.
+// It may panic or produce garbage if the edits are not valid for the provided
+// before content.
+func ApplyEdits(before string, edits []TextEdit) string {
+	// Preconditions:
+	//   - all of the edits apply to before
+	//   - and all the spans for each TextEdit have the same URI
+	if len(edits) == 0 {
+		return before
+	}
+	edits, _ = prepareEdits(before, edits)
+	after := strings.Builder{}
+	last := 0
+	for _, edit := range edits {
+		start := edit.Span.Start().Offset()
+		if start > last {
+			after.WriteString(before[last:start])
+			last = start
+		}
+		after.WriteString(edit.NewText)
+		last = edit.Span.End().Offset()
+	}
+	if last < len(before) {
+		after.WriteString(before[last:])
+	}
+	return after.String()
+}
+
+// LineEdits takes a set of edits and expands and merges them as necessary
+// to ensure that there are only full line edits left when it is done.
+func LineEdits(before string, edits []TextEdit) []TextEdit {
+	if len(edits) == 0 {
+		return nil
+	}
+	edits, partial := prepareEdits(before, edits)
+	if partial {
+		edits = lineEdits(before, edits)
+	}
+	return edits
+}
+
+// prepareEdits returns a sorted copy of the edits
+func prepareEdits(before string, edits []TextEdit) ([]TextEdit, bool) {
+	partial := false
+	tf := span.NewTokenFile("", []byte(before))
+	copied := make([]TextEdit, len(edits))
+	for i, edit := range edits {
+		edit.Span, _ = edit.Span.WithAll(tf)
+		copied[i] = edit
+		partial = partial ||
+			edit.Span.Start().Offset() >= len(before) ||
+			edit.Span.Start().Column() > 1 || edit.Span.End().Column() > 1
+	}
+	SortTextEdits(copied)
+	return copied, partial
+}
+
+// lineEdits rewrites the edits to always be full line edits
+func lineEdits(before string, edits []TextEdit) []TextEdit {
+	adjusted := make([]TextEdit, 0, len(edits))
+	current := TextEdit{Span: span.Invalid}
+	for _, edit := range edits {
+		if current.Span.IsValid() && edit.Span.Start().Line() <= current.Span.End().Line() {
+			// overlaps with the current edit, need to combine
+			// first get the gap from the previous edit
+			gap := before[current.Span.End().Offset():edit.Span.Start().Offset()]
+			// now add the text of this edit
+			current.NewText += gap + edit.NewText
+			// and then adjust the end position
+			current.Span = span.New(current.Span.URI(), current.Span.Start(), edit.Span.End())
+		} else {
+			// does not overlap, add previous run (if there is one)
+			adjusted = addEdit(before, adjusted, current)
+			// and then remember this edit as the start of the next run
+			current = edit
+		}
+	}
+	// add the current pending run if there is one
+	return addEdit(before, adjusted, current)
+}
+
+func addEdit(before string, edits []TextEdit, edit TextEdit) []TextEdit {
+	if !edit.Span.IsValid() {
+		return edits
+	}
+	// if edit is partial, expand it to full line now
+	start := edit.Span.Start()
+	end := edit.Span.End()
+	if start.Column() > 1 {
+		// prepend the text and adjust to start of line
+		delta := start.Column() - 1
+		start = span.NewPoint(start.Line(), 1, start.Offset()-delta)
+		edit.Span = span.New(edit.Span.URI(), start, end)
+		edit.NewText = before[start.Offset():start.Offset()+delta] + edit.NewText
+	}
+	if start.Offset() >= len(before) && start.Line() > 1 && before[len(before)-1] != '\n' {
+		// after end of file that does not end in eol, so join to last line of file
+		// to do this we need to know where the start of the last line was
+		eol := strings.LastIndex(before, "\n")
+		if eol < 0 {
+			// file is one non terminated line
+			eol = 0
+		}
+		delta := len(before) - eol
+		start = span.NewPoint(start.Line()-1, 1, start.Offset()-delta)
+		edit.Span = span.New(edit.Span.URI(), start, end)
+		edit.NewText = before[start.Offset():start.Offset()+delta] + edit.NewText
+	}
+	if end.Column() > 1 {
+		remains := before[end.Offset():]
+		eol := strings.IndexRune(remains, '\n')
+		if eol < 0 {
+			eol = len(remains)
+		} else {
+			eol++
+		}
+		end = span.NewPoint(end.Line()+1, 1, end.Offset()+eol)
+		edit.Span = span.New(edit.Span.URI(), start, end)
+		edit.NewText = edit.NewText + remains[:eol]
+	}
+	edits = append(edits, edit)
+	return edits
+}
diff --git a/internal/diff/diff_test.go b/internal/diff/diff_test.go
new file mode 100644
index 0000000..1bf03ea
--- /dev/null
+++ b/internal/diff/diff_test.go
@@ -0,0 +1,205 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package diff_test
+
+import (
+	"fmt"
+	"math/rand"
+	"strings"
+	"testing"
+
+	"golang.org/x/tools/internal/diff"
+	"golang.org/x/tools/internal/diff/difftest"
+	"golang.org/x/tools/internal/span"
+)
+
+func TestApplyEdits(t *testing.T) {
+	for _, tc := range difftest.TestCases {
+		t.Run(tc.Name, func(t *testing.T) {
+			t.Helper()
+			if got := diff.ApplyEdits(tc.In, tc.Edits); got != tc.Out {
+				t.Errorf("ApplyEdits edits got %q, want %q", got, tc.Out)
+			}
+			if tc.LineEdits != nil {
+				if got := diff.ApplyEdits(tc.In, tc.LineEdits); got != tc.Out {
+					t.Errorf("ApplyEdits lineEdits got %q, want %q", got, tc.Out)
+				}
+			}
+		})
+	}
+}
+
+func TestNEdits(t *testing.T) {
+	for i, tc := range difftest.TestCases {
+		sp := fmt.Sprintf("file://%s.%d", tc.Name, i)
+		edits, err := diff.NComputeEdits(span.URI(sp), tc.In, tc.Out)
+		if err != nil {
+			t.Fatal(err)
+		}
+		got := diff.ApplyEdits(tc.In, edits)
+		if got != tc.Out {
+			t.Fatalf("%s: got %q wanted %q", tc.Name, got, tc.Out)
+		}
+		if len(edits) < len(tc.Edits) { // should find subline edits
+			t.Errorf("got %v, expected %v for %#v", edits, tc.Edits, tc)
+		}
+	}
+}
+
+func TestNRandom(t *testing.T) {
+	rand.Seed(1)
+	for i := 0; i < 1000; i++ {
+		fname := fmt.Sprintf("file://%x", i)
+		a := randstr("abω", 16)
+		b := randstr("abωc", 16)
+		edits, err := diff.NComputeEdits(span.URI(fname), a, b)
+		if err != nil {
+			t.Fatalf("%q,%q %v", a, b, err)
+		}
+		got := diff.ApplyEdits(a, edits)
+		if got != b {
+			t.Fatalf("%d: got %q, wanted %q, starting with %q", i, got, b, a)
+		}
+	}
+}
+
+func TestNLinesRandom(t *testing.T) {
+	rand.Seed(2)
+	for i := 0; i < 1000; i++ {
+		fname := fmt.Sprintf("file://%x", i)
+		x := randlines("abω", 4) // avg line length is 6, want a change every 3rd line or so
+		v := []rune(x)
+		for i := 0; i < len(v); i++ {
+			if rand.Float64() < .05 {
+				v[i] = 'N'
+			}
+		}
+		y := string(v)
+		// occasionally remove the trailing \n
+		if rand.Float64() < .1 {
+			x = x[:len(x)-1]
+		}
+		if rand.Float64() < .1 {
+			y = y[:len(y)-1]
+		}
+		a, b := strings.SplitAfter(x, "\n"), strings.SplitAfter(y, "\n")
+		edits, err := diff.NComputeLineEdits(span.URI(fname), a, b)
+		if err != nil {
+			t.Fatalf("%q,%q %v", a, b, err)
+		}
+		got := diff.ApplyEdits(x, edits)
+		if got != y {
+			t.Fatalf("%d: got\n%q, wanted\n%q, starting with %q", i, got, y, a)
+		}
+	}
+}
+
+func TestLineEdits(t *testing.T) {
+	for _, tc := range difftest.TestCases {
+		t.Run(tc.Name, func(t *testing.T) {
+			t.Helper()
+			// if line edits not specified, it is the same as edits
+			edits := tc.LineEdits
+			if edits == nil {
+				edits = tc.Edits
+			}
+			if got := diff.LineEdits(tc.In, tc.Edits); diffEdits(got, edits) {
+				t.Errorf("LineEdits got %q, want %q", got, edits)
+			}
+		})
+	}
+}
+
+func TestUnified(t *testing.T) {
+	for _, tc := range difftest.TestCases {
+		t.Run(tc.Name, func(t *testing.T) {
+			t.Helper()
+			unified := fmt.Sprint(diff.ToUnified(difftest.FileA, difftest.FileB, tc.In, tc.Edits))
+			if unified != tc.Unified {
+				t.Errorf("edits got diff:\n%v\nexpected:\n%v", unified, tc.Unified)
+			}
+			if tc.LineEdits != nil {
+				unified := fmt.Sprint(diff.ToUnified(difftest.FileA, difftest.FileB, tc.In, tc.LineEdits))
+				if unified != tc.Unified {
+					t.Errorf("lineEdits got diff:\n%v\nexpected:\n%v", unified, tc.Unified)
+				}
+			}
+		})
+	}
+}
+
+func TestRegressionOld001(t *testing.T) {
+	a := "// Copyright 2019 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage diff_test\n\nimport (\n\t\"fmt\"\n\t\"math/rand\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"golang.org/x/tools/gopls/internal/lsp/diff\"\n\t\"golang.org/x/tools/internal/diff/difftest\"\n\t\"golang.org/x/tools/internal/span\"\n)\n"
+
+	b := "// Copyright 2019 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage diff_test\n\nimport (\n\t\"fmt\"\n\t\"math/rand\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/google/safehtml/template\"\n\t\"golang.org/x/tools/gopls/internal/lsp/diff\"\n\t\"golang.org/x/tools/internal/diff/difftest\"\n\t\"golang.org/x/tools/internal/span\"\n)\n"
+	diffs, err := diff.NComputeEdits(span.URI("file://one"), a, b)
+	if err != nil {
+		t.Error(err)
+	}
+	got := diff.ApplyEdits(a, diffs)
+	if got != b {
+		i := 0
+		for ; i < len(a) && i < len(b) && got[i] == b[i]; i++ {
+		}
+		t.Errorf("oops %vd\n%q\n%q", diffs, got, b)
+		t.Errorf("\n%q\n%q", got[i:], b[i:])
+	}
+}
+
+func TestRegressionOld002(t *testing.T) {
+	a := "n\"\n)\n"
+	b := "n\"\n\t\"golang.org/x//nnal/stack\"\n)\n"
+	diffs, err := diff.NComputeEdits(span.URI("file://two"), a, b)
+	if err != nil {
+		t.Error(err)
+	}
+	got := diff.ApplyEdits(a, diffs)
+	if got != b {
+		i := 0
+		for ; i < len(a) && i < len(b) && got[i] == b[i]; i++ {
+		}
+		t.Errorf("oops %vd\n%q\n%q", diffs, got, b)
+		t.Errorf("\n%q\n%q", got[i:], b[i:])
+	}
+}
+
+func diffEdits(got, want []diff.TextEdit) bool {
+	if len(got) != len(want) {
+		return true
+	}
+	for i, w := range want {
+		g := got[i]
+		if span.Compare(w.Span, g.Span) != 0 {
+			return true
+		}
+		if w.NewText != g.NewText {
+			return true
+		}
+	}
+	return false
+}
+
+// return a random string of length n made of characters from s
+func randstr(s string, n int) string {
+	src := []rune(s)
+	x := make([]rune, n)
+	for i := 0; i < n; i++ {
+		x[i] = src[rand.Intn(len(src))]
+	}
+	return string(x)
+}
+
+// return some random lines, all ending with \n
+func randlines(s string, n int) string {
+	src := []rune(s)
+	var b strings.Builder
+	for i := 0; i < n; i++ {
+		for j := 0; j < 4+rand.Intn(4); j++ {
+			b.WriteRune(src[rand.Intn(len(src))])
+		}
+		b.WriteByte('\n')
+	}
+	return b.String()
+}
diff --git a/internal/diff/difftest/difftest.go b/internal/diff/difftest/difftest.go
new file mode 100644
index 0000000..6161256
--- /dev/null
+++ b/internal/diff/difftest/difftest.go
@@ -0,0 +1,243 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package difftest supplies a set of tests that will operate on any
+// implementation of a diff algorithm as exposed by
+// "golang.org/x/tools/internal/diff"
+package difftest
+
+import (
+	"fmt"
+	"testing"
+
+	"golang.org/x/tools/internal/diff"
+	"golang.org/x/tools/internal/span"
+)
+
+const (
+	FileA         = "from"
+	FileB         = "to"
+	UnifiedPrefix = "--- " + FileA + "\n+++ " + FileB + "\n"
+)
+
+var TestCases = []struct {
+	Name, In, Out, Unified string
+	Edits, LineEdits       []diff.TextEdit
+	NoDiff                 bool
+}{{
+	Name: "empty",
+	In:   "",
+	Out:  "",
+}, {
+	Name: "no_diff",
+	In:   "gargantuan\n",
+	Out:  "gargantuan\n",
+}, {
+	Name: "replace_all",
+	In:   "fruit\n",
+	Out:  "cheese\n",
+	Unified: UnifiedPrefix + `
+@@ -1 +1 @@
+-fruit
++cheese
+`[1:],
+	Edits:     []diff.TextEdit{{Span: newSpan(0, 5), NewText: "cheese"}},
+	LineEdits: []diff.TextEdit{{Span: newSpan(0, 6), NewText: "cheese\n"}},
+}, {
+	Name: "insert_rune",
+	In:   "gord\n",
+	Out:  "gourd\n",
+	Unified: UnifiedPrefix + `
+@@ -1 +1 @@
+-gord
++gourd
+`[1:],
+	Edits:     []diff.TextEdit{{Span: newSpan(2, 2), NewText: "u"}},
+	LineEdits: []diff.TextEdit{{Span: newSpan(0, 5), NewText: "gourd\n"}},
+}, {
+	Name: "delete_rune",
+	In:   "groat\n",
+	Out:  "goat\n",
+	Unified: UnifiedPrefix + `
+@@ -1 +1 @@
+-groat
++goat
+`[1:],
+	Edits:     []diff.TextEdit{{Span: newSpan(1, 2), NewText: ""}},
+	LineEdits: []diff.TextEdit{{Span: newSpan(0, 6), NewText: "goat\n"}},
+}, {
+	Name: "replace_rune",
+	In:   "loud\n",
+	Out:  "lord\n",
+	Unified: UnifiedPrefix + `
+@@ -1 +1 @@
+-loud
++lord
+`[1:],
+	Edits:     []diff.TextEdit{{Span: newSpan(2, 3), NewText: "r"}},
+	LineEdits: []diff.TextEdit{{Span: newSpan(0, 5), NewText: "lord\n"}},
+}, {
+	Name: "replace_partials",
+	In:   "blanket\n",
+	Out:  "bunker\n",
+	Unified: UnifiedPrefix + `
+@@ -1 +1 @@
+-blanket
++bunker
+`[1:],
+	Edits: []diff.TextEdit{
+		{Span: newSpan(1, 3), NewText: "u"},
+		{Span: newSpan(6, 7), NewText: "r"},
+	},
+	LineEdits: []diff.TextEdit{{Span: newSpan(0, 8), NewText: "bunker\n"}},
+}, {
+	Name: "insert_line",
+	In:   "1: one\n3: three\n",
+	Out:  "1: one\n2: two\n3: three\n",
+	Unified: UnifiedPrefix + `
+@@ -1,2 +1,3 @@
+ 1: one
++2: two
+ 3: three
+`[1:],
+	Edits: []diff.TextEdit{{Span: newSpan(7, 7), NewText: "2: two\n"}},
+}, {
+	Name: "replace_no_newline",
+	In:   "A",
+	Out:  "B",
+	Unified: UnifiedPrefix + `
+@@ -1 +1 @@
+-A
+\ No newline at end of file
++B
+\ No newline at end of file
+`[1:],
+	Edits: []diff.TextEdit{{Span: newSpan(0, 1), NewText: "B"}},
+}, {
+	Name: "add_end",
+	In:   "A",
+	Out:  "AB",
+	Unified: UnifiedPrefix + `
+@@ -1 +1 @@
+-A
+\ No newline at end of file
++AB
+\ No newline at end of file
+`[1:],
+	Edits:     []diff.TextEdit{{Span: newSpan(1, 1), NewText: "B"}},
+	LineEdits: []diff.TextEdit{{Span: newSpan(0, 1), NewText: "AB"}},
+}, {
+	Name: "add_newline",
+	In:   "A",
+	Out:  "A\n",
+	Unified: UnifiedPrefix + `
+@@ -1 +1 @@
+-A
+\ No newline at end of file
++A
+`[1:],
+	Edits:     []diff.TextEdit{{Span: newSpan(1, 1), NewText: "\n"}},
+	LineEdits: []diff.TextEdit{{Span: newSpan(0, 1), NewText: "A\n"}},
+}, {
+	Name: "delete_front",
+	In:   "A\nB\nC\nA\nB\nB\nA\n",
+	Out:  "C\nB\nA\nB\nA\nC\n",
+	Unified: UnifiedPrefix + `
+@@ -1,7 +1,6 @@
+-A
+-B
+ C
++B
+ A
+ B
+-B
+ A
++C
+`[1:],
+	Edits: []diff.TextEdit{
+		{Span: newSpan(0, 4), NewText: ""},
+		{Span: newSpan(6, 6), NewText: "B\n"},
+		{Span: newSpan(10, 12), NewText: ""},
+		{Span: newSpan(14, 14), NewText: "C\n"},
+	},
+	NoDiff: true, // diff algorithm produces different delete/insert pattern
+},
+	{
+		Name: "replace_last_line",
+		In:   "A\nB\n",
+		Out:  "A\nC\n\n",
+		Unified: UnifiedPrefix + `
+@@ -1,2 +1,3 @@
+ A
+-B
++C
++
+`[1:],
+		Edits:     []diff.TextEdit{{Span: newSpan(2, 3), NewText: "C\n"}},
+		LineEdits: []diff.TextEdit{{Span: newSpan(2, 4), NewText: "C\n\n"}},
+	},
+	{
+		Name: "multiple_replace",
+		In:   "A\nB\nC\nD\nE\nF\nG\n",
+		Out:  "A\nH\nI\nJ\nE\nF\nK\n",
+		Unified: UnifiedPrefix + `
+@@ -1,7 +1,7 @@
+ A
+-B
+-C
+-D
++H
++I
++J
+ E
+ F
+-G
++K
+`[1:],
+		Edits: []diff.TextEdit{
+			{Span: newSpan(2, 8), NewText: "H\nI\nJ\n"},
+			{Span: newSpan(12, 14), NewText: "K\n"},
+		},
+		NoDiff: true, // diff algorithm produces different delete/insert pattern
+	},
+}
+
+func init() {
+	// expand all the spans to full versions
+	// we need them all to have their line number and column
+	for _, tc := range TestCases {
+		tf := span.NewTokenFile("", []byte(tc.In))
+		for i := range tc.Edits {
+			tc.Edits[i].Span, _ = tc.Edits[i].Span.WithAll(tf)
+		}
+		for i := range tc.LineEdits {
+			tc.LineEdits[i].Span, _ = tc.LineEdits[i].Span.WithAll(tf)
+		}
+	}
+}
+
+func DiffTest(t *testing.T, compute diff.ComputeEdits) {
+	t.Helper()
+	for _, test := range TestCases {
+		t.Run(test.Name, func(t *testing.T) {
+			t.Helper()
+			edits, err := compute(span.URIFromPath("/"+test.Name), test.In, test.Out)
+			if err != nil {
+				t.Fatal(err)
+			}
+			got := diff.ApplyEdits(test.In, edits)
+			unified := fmt.Sprint(diff.ToUnified(FileA, FileB, test.In, edits))
+			if got != test.Out {
+				t.Errorf("got patched:\n%v\nfrom diff:\n%v\nexpected:\n%v", got, unified, test.Out)
+			}
+			if !test.NoDiff && unified != test.Unified {
+				t.Errorf("got diff:\n%v\nexpected:\n%v", unified, test.Unified)
+			}
+		})
+	}
+}
+
+func newSpan(start, end int) span.Span {
+	return span.New("", span.NewPoint(0, 0, start), span.NewPoint(0, 0, end))
+}
diff --git a/internal/diff/difftest/difftest_test.go b/internal/diff/difftest/difftest_test.go
new file mode 100644
index 0000000..d070d32
--- /dev/null
+++ b/internal/diff/difftest/difftest_test.go
@@ -0,0 +1,84 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package difftest supplies a set of tests that will operate on any
+// implementation of a diff algorithm as exposed by
+// "golang.org/x/tools/internal/diff"
+package difftest_test
+
+import (
+	"fmt"
+	"io/ioutil"
+	"os"
+	"os/exec"
+	"strings"
+	"testing"
+
+	"golang.org/x/tools/internal/diff/difftest"
+	"golang.org/x/tools/internal/testenv"
+)
+
+func TestVerifyUnified(t *testing.T) {
+	testenv.NeedsTool(t, "diff")
+	for _, test := range difftest.TestCases {
+		t.Run(test.Name, func(t *testing.T) {
+			t.Helper()
+			if test.NoDiff {
+				t.Skip("diff tool produces expected different results")
+			}
+			diff, err := getDiffOutput(test.In, test.Out)
+			if err != nil {
+				t.Fatal(err)
+			}
+			if len(diff) > 0 {
+				diff = difftest.UnifiedPrefix + diff
+			}
+			if diff != test.Unified {
+				t.Errorf("unified:\n%q\ndiff -u:\n%q", test.Unified, diff)
+			}
+		})
+	}
+}
+
+func getDiffOutput(a, b string) (string, error) {
+	fileA, err := ioutil.TempFile("", "myers.in")
+	if err != nil {
+		return "", err
+	}
+	defer os.Remove(fileA.Name())
+	if _, err := fileA.Write([]byte(a)); err != nil {
+		return "", err
+	}
+	if err := fileA.Close(); err != nil {
+		return "", err
+	}
+	fileB, err := ioutil.TempFile("", "myers.in")
+	if err != nil {
+		return "", err
+	}
+	defer os.Remove(fileB.Name())
+	if _, err := fileB.Write([]byte(b)); err != nil {
+		return "", err
+	}
+	if err := fileB.Close(); err != nil {
+		return "", err
+	}
+	cmd := exec.Command("diff", "-u", fileA.Name(), fileB.Name())
+	cmd.Env = append(cmd.Env, "LANG=en_US.UTF-8")
+	out, err := cmd.CombinedOutput()
+	if err != nil {
+		if _, ok := err.(*exec.ExitError); !ok {
+			return "", fmt.Errorf("failed to run diff -u %v %v: %v\n%v", fileA.Name(), fileB.Name(), err, string(out))
+		}
+	}
+	diff := string(out)
+	if len(diff) <= 0 {
+		return diff, nil
+	}
+	bits := strings.SplitN(diff, "\n", 3)
+	if len(bits) != 3 {
+		return "", fmt.Errorf("diff output did not have file prefix:\n%s", diff)
+	}
+	return bits[2], nil
+}
diff --git a/internal/diff/lcs/common.go b/internal/diff/lcs/common.go
new file mode 100644
index 0000000..e5d0801
--- /dev/null
+++ b/internal/diff/lcs/common.go
@@ -0,0 +1,184 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lcs
+
+import (
+	"log"
+	"sort"
+)
+
+// lcs is a longest common sequence
+type lcs []diag
+
+// A diag is a piece of the edit graph where A[X+i] == B[Y+i], for 0<=i<Len.
+// All computed diagonals are parts of a longest common subsequence.
+type diag struct {
+	X, Y int
+	Len  int
+}
+
+// sort sorts in place, by lowest X, and if tied, inversely by Len
+func (l lcs) sort() lcs {
+	sort.Slice(l, func(i, j int) bool {
+		if l[i].X != l[j].X {
+			return l[i].X < l[j].X
+		}
+		return l[i].Len > l[j].Len
+	})
+	return l
+}
+
+// validate that the elements of the lcs do not overlap
+// (can only happen when the two-sided algorithm ends early)
+// expects the lcs to be sorted
+func (l lcs) valid() bool {
+	for i := 1; i < len(l); i++ {
+		if l[i-1].X+l[i-1].Len > l[i].X {
+			return false
+		}
+		if l[i-1].Y+l[i-1].Len > l[i].Y {
+			return false
+		}
+	}
+	return true
+}
+
+// repair overlapping lcs
+// only called if two-sided stops early
+func (l lcs) fix() lcs {
+	// from the set of diagonals in l, find a maximal non-conflicting set
+	// this problem may be NP-complete, but we use a greedy heuristic,
+	// which is quadratic, but with a better data structure, could be D log D.
+	// indepedent is not enough: {0,3,1} and {3,0,2} can't both occur in an lcs
+	// which has to have monotone x and y
+	if len(l) == 0 {
+		return nil
+	}
+	sort.Slice(l, func(i, j int) bool { return l[i].Len > l[j].Len })
+	tmp := make(lcs, 0, len(l))
+	tmp = append(tmp, l[0])
+	for i := 1; i < len(l); i++ {
+		var dir direction
+		nxt := l[i]
+		for _, in := range tmp {
+			if dir, nxt = overlap(in, nxt); dir == empty || dir == bad {
+				break
+			}
+		}
+		if nxt.Len > 0 && dir != bad {
+			tmp = append(tmp, nxt)
+		}
+	}
+	tmp.sort()
+	if false && !tmp.valid() { // debug checking
+		log.Fatalf("here %d", len(tmp))
+	}
+	return tmp
+}
+
+type direction int
+
+const (
+	empty    direction = iota // diag is empty (so not in lcs)
+	leftdown                  // proposed acceptably to the left and below
+	rightup                   // proposed diag is acceptably to the right and above
+	bad                       // proposed diag is inconsistent with the lcs so far
+)
+
+// overlap trims the proposed diag prop  so it doesn't overlap with
+// the existing diag that has already been added to the lcs.
+func overlap(exist, prop diag) (direction, diag) {
+	if prop.X <= exist.X && exist.X < prop.X+prop.Len {
+		// remove the end of prop where it overlaps with the X end of exist
+		delta := prop.X + prop.Len - exist.X
+		prop.Len -= delta
+		if prop.Len <= 0 {
+			return empty, prop
+		}
+	}
+	if exist.X <= prop.X && prop.X < exist.X+exist.Len {
+		// remove the beginning of prop where overlaps with exist
+		delta := exist.X + exist.Len - prop.X
+		prop.Len -= delta
+		if prop.Len <= 0 {
+			return empty, prop
+		}
+		prop.X += delta
+		prop.Y += delta
+	}
+	if prop.Y <= exist.Y && exist.Y < prop.Y+prop.Len {
+		// remove the end of prop that overlaps (in Y) with exist
+		delta := prop.Y + prop.Len - exist.Y
+		prop.Len -= delta
+		if prop.Len <= 0 {
+			return empty, prop
+		}
+	}
+	if exist.Y <= prop.Y && prop.Y < exist.Y+exist.Len {
+		// remove the beginning of peop that overlaps with exist
+		delta := exist.Y + exist.Len - prop.Y
+		prop.Len -= delta
+		if prop.Len <= 0 {
+			return empty, prop
+		}
+		prop.X += delta // no test reaches this code
+		prop.Y += delta
+	}
+	if prop.X+prop.Len <= exist.X && prop.Y+prop.Len <= exist.Y {
+		return leftdown, prop
+	}
+	if exist.X+exist.Len <= prop.X && exist.Y+exist.Len <= prop.Y {
+		return rightup, prop
+	}
+	// prop can't be in an lcs that contains exist
+	return bad, prop
+}
+
+// manipulating Diag and lcs
+
+// prependlcs a diagonal (x,y)-(x+1,y+1) segment either to an empty lcs
+// or to its first Diag. prependlcs is only called extending diagonals
+// the backward direction.
+func prependlcs(lcs lcs, x, y int) lcs {
+	if len(lcs) > 0 {
+		d := &lcs[0]
+		if int(d.X) == x+1 && int(d.Y) == y+1 {
+			// extend the diagonal down and to the left
+			d.X, d.Y = int(x), int(y)
+			d.Len++
+			return lcs
+		}
+	}
+
+	r := diag{X: int(x), Y: int(y), Len: 1}
+	lcs = append([]diag{r}, lcs...)
+	return lcs
+}
+
+// appendlcs appends a diagonal, or extends the existing one.
+// by adding the edge (x,y)-(x+1.y+1). appendlcs is only called
+// while extending diagonals in the forward direction.
+func appendlcs(lcs lcs, x, y int) lcs {
+	if len(lcs) > 0 {
+		last := &lcs[len(lcs)-1]
+		// Expand last element if adjoining.
+		if last.X+last.Len == x && last.Y+last.Len == y {
+			last.Len++
+			return lcs
+		}
+	}
+
+	return append(lcs, diag{X: x, Y: y, Len: 1})
+}
+
+// enforce constraint on d, k
+func ok(d, k int) bool {
+	return d >= 0 && -d <= k && k <= d
+}
+
+type Diff struct {
+	Start, End int    // offsets in A
+	Text       string // replacement text
+}
diff --git a/internal/diff/lcs/common_test.go b/internal/diff/lcs/common_test.go
new file mode 100644
index 0000000..4aa36ab
--- /dev/null
+++ b/internal/diff/lcs/common_test.go
@@ -0,0 +1,140 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lcs
+
+import (
+	"log"
+	"math/rand"
+	"strings"
+	"testing"
+)
+
+type Btest struct {
+	a, b string
+	lcs  []string
+}
+
+var Btests = []Btest{
+	{"aaabab", "abaab", []string{"abab", "aaab"}},
+	{"aabbba", "baaba", []string{"aaba"}},
+	{"cabbx", "cbabx", []string{"cabx", "cbbx"}},
+	{"c", "cb", []string{"c"}},
+	{"aaba", "bbb", []string{"b"}},
+	{"bbaabb", "b", []string{"b"}},
+	{"baaabb", "bbaba", []string{"bbb", "baa", "bab"}},
+	{"baaabb", "abbab", []string{"abb", "bab", "aab"}},
+	{"baaba", "aaabba", []string{"aaba"}},
+	{"ca", "cba", []string{"ca"}},
+	{"ccbcbc", "abba", []string{"bb"}},
+	{"ccbcbc", "aabba", []string{"bb"}},
+	{"ccb", "cba", []string{"cb"}},
+	{"caef", "axe", []string{"ae"}},
+	{"bbaabb", "baabb", []string{"baabb"}},
+	// Example from Myers:
+	{"abcabba", "cbabac", []string{"caba", "baba", "cbba"}},
+	{"3456aaa", "aaa", []string{"aaa"}},
+	{"aaa", "aaa123", []string{"aaa"}},
+	{"aabaa", "aacaa", []string{"aaaa"}},
+	{"1a", "a", []string{"a"}},
+	{"abab", "bb", []string{"bb"}},
+	{"123", "ab", []string{""}},
+	{"a", "b", []string{""}},
+	{"abc", "123", []string{""}},
+	{"aa", "aa", []string{"aa"}},
+	{"abcde", "12345", []string{""}},
+	{"aaa3456", "aaa", []string{"aaa"}},
+	{"abcde", "12345a", []string{"a"}},
+	{"ab", "123", []string{""}},
+	{"1a2", "a", []string{"a"}},
+	// for two-sided
+	{"babaab", "cccaba", []string{"aba"}},
+	{"aabbab", "cbcabc", []string{"bab"}},
+	{"abaabb", "bcacab", []string{"baab"}},
+	{"abaabb", "abaaaa", []string{"abaa"}},
+	{"bababb", "baaabb", []string{"baabb"}},
+	{"abbbaa", "cabacc", []string{"aba"}},
+	{"aabbaa", "aacaba", []string{"aaaa", "aaba"}},
+}
+
+func init() {
+	log.SetFlags(log.Lshortfile)
+}
+
+func check(t *testing.T, str string, lcs lcs, want []string) {
+	t.Helper()
+	if !lcs.valid() {
+		t.Errorf("bad lcs %v", lcs)
+	}
+	var got strings.Builder
+	for _, dd := range lcs {
+		got.WriteString(str[dd.X : dd.X+dd.Len])
+	}
+	ans := got.String()
+	for _, w := range want {
+		if ans == w {
+			return
+		}
+	}
+	t.Fatalf("str=%q lcs=%v want=%q got=%q", str, lcs, want, ans)
+}
+
+func checkDiffs(t *testing.T, before string, diffs []Diff, after string) {
+	t.Helper()
+	var ans strings.Builder
+	sofar := 0 // index of position in before
+	for _, d := range diffs {
+		if sofar < d.Start {
+			ans.WriteString(before[sofar:d.Start])
+		}
+		ans.WriteString(d.Text)
+		sofar = d.End
+	}
+	ans.WriteString(before[sofar:])
+	if ans.String() != after {
+		t.Fatalf("diff %v took %q to %q, not to %q", diffs, before, ans.String(), after)
+	}
+}
+
+func lcslen(l lcs) int {
+	ans := 0
+	for _, d := range l {
+		ans += int(d.Len)
+	}
+	return ans
+}
+
+// return a random string of length n made of characters from s
+func randstr(s string, n int) string {
+	src := []rune(s)
+	x := make([]rune, n)
+	for i := 0; i < n; i++ {
+		x[i] = src[rand.Intn(len(src))]
+	}
+	return string(x)
+}
+
+func TestLcsFix(t *testing.T) {
+	tests := []struct{ before, after lcs }{
+		{lcs{diag{0, 0, 3}, diag{2, 2, 5}, diag{3, 4, 5}, diag{8, 9, 4}}, lcs{diag{0, 0, 2}, diag{2, 2, 1}, diag{3, 4, 5}, diag{8, 9, 4}}},
+		{lcs{diag{1, 1, 6}, diag{6, 12, 3}}, lcs{diag{1, 1, 5}, diag{6, 12, 3}}},
+		{lcs{diag{0, 0, 4}, diag{3, 5, 4}}, lcs{diag{0, 0, 3}, diag{3, 5, 4}}},
+		{lcs{diag{0, 20, 1}, diag{0, 0, 3}, diag{1, 20, 4}}, lcs{diag{0, 0, 3}, diag{3, 22, 2}}},
+		{lcs{diag{0, 0, 4}, diag{1, 1, 2}}, lcs{diag{0, 0, 4}}},
+		{lcs{diag{0, 0, 4}}, lcs{diag{0, 0, 4}}},
+		{lcs{}, lcs{}},
+		{lcs{diag{0, 0, 4}, diag{1, 1, 6}, diag{3, 3, 2}}, lcs{diag{0, 0, 1}, diag{1, 1, 6}}},
+	}
+	for n, x := range tests {
+		got := x.before.fix()
+		if len(got) != len(x.after) {
+			t.Errorf("got %v, expected %v, for %v", got, x.after, x.before)
+		}
+		olen := lcslen(x.after)
+		glen := lcslen(got)
+		if olen != glen {
+			t.Errorf("%d: lens(%d,%d) differ, %v, %v, %v", n, glen, olen, got, x.after, x.before)
+		}
+	}
+}
diff --git a/internal/diff/lcs/doc.go b/internal/diff/lcs/doc.go
new file mode 100644
index 0000000..dc779f3
--- /dev/null
+++ b/internal/diff/lcs/doc.go
@@ -0,0 +1,156 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// package lcs contains code to find longest-common-subsequences
+// (and diffs)
+package lcs
+
+/*
+Compute longest-common-subsequences of two slices A, B using
+algorithms from Myers' paper. A longest-common-subsequence
+(LCS from now on) of A and B is a maximal set of lexically increasing
+pairs of subscripts (x,y) with A[x]==B[y]. There may be many LCS, but
+they all have the same length. An LCS determines a sequence of edits
+that changes A into B.
+
+The key concept is the edit graph of A and B.
+If A has length N and B has length M, then the edit graph has
+vertices v[i][j] for 0 <= i <= N, 0 <= j <= M. There is a
+horizontal edge from v[i][j] to v[i+1][j] whenever both are in
+the graph, and a vertical edge from v[i][j] to f[i][j+1] similarly.
+When A[i] == B[j] there is a diagonal edge from v[i][j] to v[i+1][j+1].
+
+A path between in the graph between (0,0) and (N,M) determines a sequence
+of edits converting A into B: each horizontal edge corresponds to removing
+an element of A, and each vertical edge corresponds to inserting an
+element of B.
+
+A vertex (x,y) is on (forward) diagonal k if x-y=k. A path in the graph
+is of length D if it has D non-diagonal edges. The algorithms generate
+forward paths (in which at least one of x,y increases at each edge),
+or backward paths (in which at least one of x,y decreases at each edge),
+or a combination. (Note that the orientation is the traditional mathematical one,
+with the origin in the lower-left corner.)
+
+Here is the edit graph for A:"aabbaa", B:"aacaba". (I know the diagonals look weird.)
+          ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------   ⊙
+   a      |   ___/‾‾‾   |   ___/‾‾‾   |             |             |   ___/‾‾‾   |   ___/‾‾‾   |
+          ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------   ⊙
+   b      |             |             |   ___/‾‾‾   |   ___/‾‾‾   |             |             |
+          ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------   ⊙
+   a      |   ___/‾‾‾   |   ___/‾‾‾   |             |             |   ___/‾‾‾   |   ___/‾‾‾   |
+          ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------   ⊙
+   c      |             |             |             |             |             |             |
+          ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------   ⊙
+   a      |   ___/‾‾‾   |   ___/‾‾‾   |             |             |   ___/‾‾‾   |   ___/‾‾‾   |
+          ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------   ⊙
+   a      |   ___/‾‾‾   |   ___/‾‾‾   |             |             |   ___/‾‾‾   |   ___/‾‾‾   |
+          ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------   ⊙
+                 a             a             b             b             a             a
+
+
+The algorithm labels a vertex (x,y) with D,k if it is on diagonal k and at
+the end of a maximal path of length D. (Because x-y=k it suffices to remember
+only the x coordinate of the vertex.)
+
+The forward algorithm: Find the longest diagonal starting at (0,0) and
+label its end with D=0,k=0. From that vertex take a vertical step and
+then follow the longest diagonal (up and to the right), and label that vertex
+with D=1,k=-1. From the D=0,k=0 point take a horizontal step and the follow
+the longest diagonal (up and to the right) and label that vertex
+D=1,k=1. In the same way, having labelled all the D vertices,
+from a vertex labelled D,k find two vertices
+tentatively labelled D+1,k-1 and D+1,k+1. There may be two on the same
+diagonal, in which case take the one with the larger x.
+
+Eventually the path gets to (N,M), and the diagonals on it are the LCS.
+
+Here is the edit graph with the ends of D-paths labelled. (So, for instance,
+0/2,2 indicates that x=2,y=2 is labelled with 0, as it should be, since the first
+step is to go up the longest diagonal from (0,0).)
+A:"aabbaa", B:"aacaba"
+          ⊙   -------   ⊙   -------   ⊙   -------(3/3,6)-------   ⊙   -------(3/5,6)-------(4/6,6)
+   a      |   ___/‾‾‾   |   ___/‾‾‾   |             |             |   ___/‾‾‾   |   ___/‾‾‾   |
+          ⊙   -------   ⊙   -------   ⊙   -------(2/3,5)-------   ⊙   -------   ⊙   -------   ⊙
+   b      |             |             |   ___/‾‾‾   |   ___/‾‾‾   |             |             |
+          ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------(3/5,4)-------   ⊙
+   a      |   ___/‾‾‾   |   ___/‾‾‾   |             |             |   ___/‾‾‾   |   ___/‾‾‾   |
+          ⊙   -------   ⊙   -------(1/2,3)-------(2/3,3)-------   ⊙   -------   ⊙   -------   ⊙
+   c      |             |             |             |             |             |             |
+          ⊙   -------   ⊙   -------(0/2,2)-------(1/3,2)-------(2/4,2)-------(3/5,2)-------(4/6,2)
+   a      |   ___/‾‾‾   |   ___/‾‾‾   |             |             |   ___/‾‾‾   |   ___/‾‾‾   |
+          ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------   ⊙
+   a      |   ___/‾‾‾   |   ___/‾‾‾   |             |             |   ___/‾‾‾   |   ___/‾‾‾   |
+          ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------   ⊙
+                 a             a             b             b             a             a
+
+The 4-path is reconstructed starting at (4/6,6), horizontal to (3/5,6), diagonal to (3,4), vertical
+to (2/3,3), horizontal to (1/2,3), vertical to (0/2,2), and diagonal to (0,0). As expected,
+there are 4 non-diagonal steps, and the diagonals form an LCS.
+
+There is a symmetric backward algorithm, which gives (backwards labels are prefixed with a colon):
+A:"aabbaa", B:"aacaba"
+            ⊙   --------    ⊙   --------    ⊙   --------    ⊙   --------    ⊙   --------    ⊙   --------    ⊙
+    a       |   ____/‾‾‾    |   ____/‾‾‾    |               |               |   ____/‾‾‾    |   ____/‾‾‾    |
+            ⊙   --------    ⊙   --------    ⊙   --------    ⊙   --------    ⊙   --------(:0/5,5)--------    ⊙
+    b       |               |               |   ____/‾‾‾    |   ____/‾‾‾    |               |               |
+            ⊙   --------    ⊙   --------    ⊙   --------(:1/3,4)--------    ⊙   --------    ⊙   --------    ⊙
+    a       |   ____/‾‾‾    |   ____/‾‾‾    |               |               |   ____/‾‾‾    |   ____/‾‾‾    |
+        (:3/0,3)--------(:2/1,3)--------    ⊙   --------(:2/3,3)--------(:1/4,3)--------    ⊙   --------    ⊙
+    c       |               |               |               |               |               |               |
+            ⊙   --------    ⊙   --------    ⊙   --------(:3/3,2)--------(:2/4,2)--------    ⊙   --------    ⊙
+    a       |   ____/‾‾‾    |   ____/‾‾‾    |               |               |   ____/‾‾‾    |   ____/‾‾‾    |
+        (:3/0,1)--------    ⊙   --------    ⊙   --------    ⊙   --------(:3/4,1)--------    ⊙   --------    ⊙
+    a       |   ____/‾‾‾    |   ____/‾‾‾    |               |               |   ____/‾‾‾    |   ____/‾‾‾    |
+        (:4/0,0)--------    ⊙   --------    ⊙   --------    ⊙   --------(:4/4,0)--------    ⊙   --------    ⊙
+                    a               a               b               b               a               a
+
+Neither of these is ideal for use in an editor, where it is undesirable to send very long diffs to the
+front end. It's tricky to decide exactly what 'very long diffs' means, as "replace A by B" is very short.
+We want to control how big D can be, by stopping when it gets too large. The forward algorithm then
+privileges common prefixes, and the backward algorithm privileges common suffixes. Either is an undesirable
+asymmetry.
+
+Fortunately there is a two-sided algorithm, implied by results in Myers' paper. Here's what the labels in
+the edit graph look like.
+A:"aabbaa", B:"aacaba"
+             ⊙    ---------    ⊙    ---------    ⊙    ---------    ⊙    ---------    ⊙    ---------    ⊙    ---------    ⊙
+    a        |    ____/‾‾‾‾    |    ____/‾‾‾‾    |                 |                 |    ____/‾‾‾‾    |    ____/‾‾‾‾    |
+             ⊙    ---------    ⊙    ---------    ⊙    --------- (2/3,5) ---------    ⊙    --------- (:0/5,5)---------    ⊙
+    b        |                 |                 |    ____/‾‾‾‾    |    ____/‾‾‾‾    |                 |                 |
+             ⊙    ---------    ⊙    ---------    ⊙    --------- (:1/3,4)---------    ⊙    ---------    ⊙    ---------    ⊙
+    a        |    ____/‾‾‾‾    |    ____/‾‾‾‾    |                 |                 |    ____/‾‾‾‾    |    ____/‾‾‾‾    |
+             ⊙    --------- (:2/1,3)--------- (1/2,3) ---------(2:2/3,3)--------- (:1/4,3)---------    ⊙    ---------    ⊙
+    c        |                 |                 |                 |                 |                 |                 |
+             ⊙    ---------    ⊙    --------- (0/2,2) --------- (1/3,2) ---------(2:2/4,2)---------    ⊙    ---------    ⊙
+    a        |    ____/‾‾‾‾    |    ____/‾‾‾‾    |                 |                 |    ____/‾‾‾‾    |    ____/‾‾‾‾    |
+             ⊙    ---------    ⊙    ---------    ⊙    ---------    ⊙    ---------    ⊙    ---------    ⊙    ---------    ⊙
+    a        |    ____/‾‾‾‾    |    ____/‾‾‾‾    |                 |                 |    ____/‾‾‾‾    |    ____/‾‾‾‾    |
+             ⊙    ---------    ⊙    ---------    ⊙    ---------    ⊙    ---------    ⊙    ---------    ⊙    ---------    ⊙
+                      a                 a                 b                 b                 a                 a
+
+The algorithm stopped when it saw the backwards 2-path ending at (1,3) and the forwards 2-path ending at (3,5). The criterion
+is a backwards path ending at (u,v) and a forward path ending at (x,y), where u <= x and the two points are on the same
+diagonal. (Here the edgegraph has a diagonal, but the criterion is x-y=u-v.) Myers proves there is a forward
+2-path from (0,0) to (1,3), and that together with the backwards 2-path ending at (1,3) gives the expected 4-path.
+Unfortunately the forward path has to be constructed by another run of the forward algorithm; it can't be found from the
+computed labels. That is the worst case. Had the code noticed (x,y)=(u,v)=(3,3) the whole path could be reconstructed
+from the edgegraph. The implementation looks for a number of special cases to try to avoid computing an extra forward path.
+
+If the two-sided algorithm has stop early (because D has become too large) it will have found a forward LCS and a
+backwards LCS. Ideally these go with disjoint prefixes and suffixes of A and B, but disjointness may fail and the two
+computed LCS may conflict. (An easy example is where A is a suffix of B, and shares a short prefix. The backwards LCS
+is all of A, and the forward LCS is a prefix of A.) The algorithm combines the two
+to form a best-effort LCS. In the worst case the forward partial LCS may have to
+be recomputed.
+*/
+
+/* Eugene Myers paper is titled
+"An O(ND) Difference Algorithm and Its Variations"
+and can be found at
+http://www.xmailserver.org/diff2.pdf
+
+(There is a generic implementation of the algorithm the the repository with git hash
+b9ad7e4ade3a686d608e44475390ad428e60e7fc)
+*/
diff --git a/internal/diff/lcs/git.sh b/internal/diff/lcs/git.sh
new file mode 100644
index 0000000..caa4c42
--- /dev/null
+++ b/internal/diff/lcs/git.sh
@@ -0,0 +1,34 @@
+
+#!/bin/bash
+#
+# Copyright 2022 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+#
+# Creates a zip file containing all numbered versions
+# of the commit history of a large source file, for use
+# as input data for the tests of the diff algorithm.
+#
+# Run script from root of the x/tools repo.
+
+set -eu
+
+# WARNING: This script will install the latest version of $file
+# The largest real source file in the x/tools repo.
+# file=internal/lsp/source/completion/completion.go
+# file=internal/lsp/source/diagnostics.go
+file=internal/lsp/protocol/tsprotocol.go
+
+tmp=$(mktemp -d)
+git log $file |
+  awk '/^commit / {print $2}' |
+  nl -ba -nrz |
+  while read n hash; do
+    git checkout --quiet $hash $file
+    cp -f $file $tmp/$n
+  done
+(cd $tmp && zip -q - *) > testdata.zip
+rm -fr $tmp
+git restore --staged $file
+git restore $file
+echo "Created testdata.zip"
diff --git a/internal/diff/lcs/labels.go b/internal/diff/lcs/labels.go
new file mode 100644
index 0000000..0689f1e
--- /dev/null
+++ b/internal/diff/lcs/labels.go
@@ -0,0 +1,55 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lcs
+
+import (
+	"fmt"
+)
+
+//  For each D, vec[D] has length D+1,
+// and the label for (D, k) is stored in vec[D][(D+k)/2].
+type label struct {
+	vec [][]int
+}
+
+// Temporary checking DO NOT COMMIT true TO PRODUCTION CODE
+const debug = false
+
+// debugging. check that the (d,k) pair is valid
+// (that is, -d<=k<=d and d+k even)
+func checkDK(D, k int) {
+	if k >= -D && k <= D && (D+k)%2 == 0 {
+		return
+	}
+	panic(fmt.Sprintf("out of range, d=%d,k=%d", D, k))
+}
+
+func (t *label) set(D, k, x int) {
+	if debug {
+		checkDK(D, k)
+	}
+	for len(t.vec) <= D {
+		t.vec = append(t.vec, nil)
+	}
+	if t.vec[D] == nil {
+		t.vec[D] = make([]int, D+1)
+	}
+	t.vec[D][(D+k)/2] = x // known that D+k is even
+}
+
+func (t *label) get(d, k int) int {
+	if debug {
+		checkDK(d, k)
+	}
+	return int(t.vec[d][(d+k)/2])
+}
+
+func newtriang(limit int) label {
+	if limit < 100 {
+		// Preallocate if limit is not large.
+		return label{vec: make([][]int, limit)}
+	}
+	return label{}
+}
diff --git a/internal/diff/lcs/old.go b/internal/diff/lcs/old.go
new file mode 100644
index 0000000..a091edd
--- /dev/null
+++ b/internal/diff/lcs/old.go
@@ -0,0 +1,530 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lcs
+
+import (
+	"fmt"
+	"strings"
+)
+
+// non generic code. The names have Old at the end to indicate they are the
+// the implementation that doesn't use generics.
+
+// Compute the Diffs and the lcs.
+func Compute(a, b interface{}, limit int) ([]Diff, lcs) {
+	var ans lcs
+	g := newegraph(a, b, limit)
+	ans = g.twosided()
+	diffs := g.fromlcs(ans)
+	return diffs, ans
+}
+
+// editGraph carries the information for computing the lcs for []byte, []rune, or []string.
+type editGraph struct {
+	eq     eq    // how to compare elements of A, B, and convert slices to strings
+	vf, vb label // forward and backward labels
+
+	limit int // maximal value of D
+	// the bounding rectangle of the current edit graph
+	lx, ly, ux, uy int
+	delta          int // common subexpression: (ux-lx)-(uy-ly)
+}
+
+// abstraction in place of generic
+type eq interface {
+	eq(i, j int) bool
+	substr(i, j int) string // string from b[i:j]
+	lena() int
+	lenb() int
+}
+
+type byteeq struct {
+	a, b []byte // the input was ascii. perhaps these could be strings
+}
+
+func (x *byteeq) eq(i, j int) bool       { return x.a[i] == x.b[j] }
+func (x *byteeq) substr(i, j int) string { return string(x.b[i:j]) }
+func (x *byteeq) lena() int              { return int(len(x.a)) }
+func (x *byteeq) lenb() int              { return int(len(x.b)) }
+
+type runeeq struct {
+	a, b []rune
+}
+
+func (x *runeeq) eq(i, j int) bool       { return x.a[i] == x.b[j] }
+func (x *runeeq) substr(i, j int) string { return string(x.b[i:j]) }
+func (x *runeeq) lena() int              { return int(len(x.a)) }
+func (x *runeeq) lenb() int              { return int(len(x.b)) }
+
+type lineeq struct {
+	a, b []string
+}
+
+func (x *lineeq) eq(i, j int) bool       { return x.a[i] == x.b[j] }
+func (x *lineeq) substr(i, j int) string { return strings.Join(x.b[i:j], "") }
+func (x *lineeq) lena() int              { return int(len(x.a)) }
+func (x *lineeq) lenb() int              { return int(len(x.b)) }
+
+func neweq(a, b interface{}) eq {
+	switch x := a.(type) {
+	case []byte:
+		return &byteeq{a: x, b: b.([]byte)}
+	case []rune:
+		return &runeeq{a: x, b: b.([]rune)}
+	case []string:
+		return &lineeq{a: x, b: b.([]string)}
+	default:
+		panic(fmt.Sprintf("unexpected type %T in neweq", x))
+	}
+}
+
+func (g *editGraph) fromlcs(lcs lcs) []Diff {
+	var ans []Diff
+	var pa, pb int // offsets in a, b
+	for _, l := range lcs {
+		if pa < l.X && pb < l.Y {
+			ans = append(ans, Diff{pa, l.X, g.eq.substr(pb, l.Y)})
+		} else if pa < l.X {
+			ans = append(ans, Diff{pa, l.X, ""})
+		} else if pb < l.Y {
+			ans = append(ans, Diff{pa, l.X, g.eq.substr(pb, l.Y)})
+		}
+		pa = l.X + l.Len
+		pb = l.Y + l.Len
+	}
+	if pa < g.eq.lena() && pb < g.eq.lenb() {
+		ans = append(ans, Diff{pa, g.eq.lena(), g.eq.substr(pb, g.eq.lenb())})
+	} else if pa < g.eq.lena() {
+		ans = append(ans, Diff{pa, g.eq.lena(), ""})
+	} else if pb < g.eq.lenb() {
+		ans = append(ans, Diff{pa, g.eq.lena(), g.eq.substr(pb, g.eq.lenb())})
+	}
+	return ans
+}
+
+func newegraph(a, b interface{}, limit int) *editGraph {
+	if limit <= 0 {
+		limit = 1 << 25 // effectively infinity
+	}
+	var alen, blen int
+	switch a := a.(type) {
+	case []byte:
+		alen, blen = len(a), len(b.([]byte))
+	case []rune:
+		alen, blen = len(a), len(b.([]rune))
+	case []string:
+		alen, blen = len(a), len(b.([]string))
+	default:
+		panic(fmt.Sprintf("unexpected type %T in newegraph", a))
+	}
+	ans := &editGraph{eq: neweq(a, b), vf: newtriang(limit), vb: newtriang(limit), limit: int(limit),
+		ux: alen, uy: blen, delta: alen - blen}
+	return ans
+}
+
+// --- FORWARD ---
+// fdone decides if the forwward path has reached the upper right
+// corner of the rectangele. If so, it also returns the computed lcs.
+func (e *editGraph) fdone(D, k int) (bool, lcs) {
+	// x, y, k are relative to the rectangle
+	x := e.vf.get(D, k)
+	y := x - k
+	if x == e.ux && y == e.uy {
+		return true, e.forwardlcs(D, k)
+	}
+	return false, nil
+}
+
+// run the forward algorithm, until success or up to the limit on D.
+func (e *editGraph) forward() lcs {
+	e.setForward(0, 0, e.lx)
+	if ok, ans := e.fdone(0, 0); ok {
+		return ans
+	}
+	// from D to D+1
+	for D := 0; D < e.limit; D++ {
+		e.setForward(D+1, -(D + 1), e.getForward(D, -D))
+		if ok, ans := e.fdone(D+1, -(D + 1)); ok {
+			return ans
+		}
+		e.setForward(D+1, D+1, e.getForward(D, D)+1)
+		if ok, ans := e.fdone(D+1, D+1); ok {
+			return ans
+		}
+		for k := -D + 1; k <= D-1; k += 2 {
+			// these are tricky and easy to get backwards
+			lookv := e.lookForward(k, e.getForward(D, k-1)+1)
+			lookh := e.lookForward(k, e.getForward(D, k+1))
+			if lookv > lookh {
+				e.setForward(D+1, k, lookv)
+			} else {
+				e.setForward(D+1, k, lookh)
+			}
+			if ok, ans := e.fdone(D+1, k); ok {
+				return ans
+			}
+		}
+	}
+	// D is too large
+	// find the D path with maximal x+y inside the rectangle and
+	// use that to compute the found part of the lcs
+	kmax := -e.limit - 1
+	diagmax := -1
+	for k := -e.limit; k <= e.limit; k += 2 {
+		x := e.getForward(e.limit, k)
+		y := x - k
+		if x+y > diagmax && x <= e.ux && y <= e.uy {
+			diagmax, kmax = x+y, k
+		}
+	}
+	return e.forwardlcs(e.limit, kmax)
+}
+
+// recover the lcs by backtracking from the farthest point reached
+func (e *editGraph) forwardlcs(D, k int) lcs {
+	var ans lcs
+	for x := e.getForward(D, k); x != 0 || x-k != 0; {
+		if ok(D-1, k-1) && x-1 == e.getForward(D-1, k-1) {
+			// if (x-1,y) is labelled D-1, x--,D--,k--,continue
+			D, k, x = D-1, k-1, x-1
+			continue
+		} else if ok(D-1, k+1) && x == e.getForward(D-1, k+1) {
+			// if (x,y-1) is labelled D-1, x, D--,k++, continue
+			D, k = D-1, k+1
+			continue
+		}
+		// if (x-1,y-1)--(x,y) is a diagonal, prepend,x--,y--, continue
+		y := x - k
+		realx, realy := x+e.lx, y+e.ly
+		if e.eq.eq(realx-1, realy-1) {
+			ans = prependlcs(ans, realx-1, realy-1)
+			x--
+		} else {
+			panic("broken path")
+		}
+	}
+	return ans
+}
+
+// start at (x,y), go up the diagonal as far as possible,
+// and label the result with d
+func (e *editGraph) lookForward(k, relx int) int {
+	rely := relx - k
+	x, y := relx+e.lx, rely+e.ly
+	for x < e.ux && y < e.uy && e.eq.eq(x, y) {
+		x++
+		y++
+	}
+	return x
+}
+
+func (e *editGraph) setForward(d, k, relx int) {
+	x := e.lookForward(k, relx)
+	e.vf.set(d, k, x-e.lx)
+}
+
+func (e *editGraph) getForward(d, k int) int {
+	x := e.vf.get(d, k)
+	return x
+}
+
+// --- BACKWARD ---
+// bdone decides if the backward path has reached the lower left corner
+func (e *editGraph) bdone(D, k int) (bool, lcs) {
+	// x, y, k are relative to the rectangle
+	x := e.vb.get(D, k)
+	y := x - (k + e.delta)
+	if x == 0 && y == 0 {
+		return true, e.backwardlcs(D, k)
+	}
+	return false, nil
+}
+
+// run the backward algorithm, until success or up to the limit on D.
+func (e *editGraph) backward() lcs {
+	e.setBackward(0, 0, e.ux)
+	if ok, ans := e.bdone(0, 0); ok {
+		return ans
+	}
+	// from D to D+1
+	for D := 0; D < e.limit; D++ {
+		e.setBackward(D+1, -(D + 1), e.getBackward(D, -D)-1)
+		if ok, ans := e.bdone(D+1, -(D + 1)); ok {
+			return ans
+		}
+		e.setBackward(D+1, D+1, e.getBackward(D, D))
+		if ok, ans := e.bdone(D+1, D+1); ok {
+			return ans
+		}
+		for k := -D + 1; k <= D-1; k += 2 {
+			// these are tricky and easy to get wrong
+			lookv := e.lookBackward(k, e.getBackward(D, k-1))
+			lookh := e.lookBackward(k, e.getBackward(D, k+1)-1)
+			if lookv < lookh {
+				e.setBackward(D+1, k, lookv)
+			} else {
+				e.setBackward(D+1, k, lookh)
+			}
+			if ok, ans := e.bdone(D+1, k); ok {
+				return ans
+			}
+		}
+	}
+
+	// D is too large
+	// find the D path with minimal x+y inside the rectangle and
+	// use that to compute the part of the lcs found
+	kmax := -e.limit - 1
+	diagmin := 1 << 25
+	for k := -e.limit; k <= e.limit; k += 2 {
+		x := e.getBackward(e.limit, k)
+		y := x - (k + e.delta)
+		if x+y < diagmin && x >= 0 && y >= 0 {
+			diagmin, kmax = x+y, k
+		}
+	}
+	if kmax < -e.limit {
+		panic(fmt.Sprintf("no paths when limit=%d?", e.limit))
+	}
+	return e.backwardlcs(e.limit, kmax)
+}
+
+// recover the lcs by backtracking
+func (e *editGraph) backwardlcs(D, k int) lcs {
+	var ans lcs
+	for x := e.getBackward(D, k); x != e.ux || x-(k+e.delta) != e.uy; {
+		if ok(D-1, k-1) && x == e.getBackward(D-1, k-1) {
+			// D--, k--, x unchanged
+			D, k = D-1, k-1
+			continue
+		} else if ok(D-1, k+1) && x+1 == e.getBackward(D-1, k+1) {
+			// D--, k++, x++
+			D, k, x = D-1, k+1, x+1
+			continue
+		}
+		y := x - (k + e.delta)
+		realx, realy := x+e.lx, y+e.ly
+		if e.eq.eq(realx, realy) {
+			ans = appendlcs(ans, realx, realy)
+			x++
+		} else {
+			panic("broken path")
+		}
+	}
+	return ans
+}
+
+// start at (x,y), go down the diagonal as far as possible,
+func (e *editGraph) lookBackward(k, relx int) int {
+	rely := relx - (k + e.delta) // forward k = k + e.delta
+	x, y := relx+e.lx, rely+e.ly
+	for x > 0 && y > 0 && e.eq.eq(x-1, y-1) {
+		x--
+		y--
+	}
+	return x
+}
+
+// convert to rectangle, and label the result with d
+func (e *editGraph) setBackward(d, k, relx int) {
+	x := e.lookBackward(k, relx)
+	e.vb.set(d, k, x-e.lx)
+}
+
+func (e *editGraph) getBackward(d, k int) int {
+	x := e.vb.get(d, k)
+	return x
+}
+
+// -- TWOSIDED ---
+
+func (e *editGraph) twosided() lcs {
+	// The termination condition could be improved, as either the forward
+	// or backward pass could succeed before Myers' Lemma applies.
+	// Aside from questions of efficiency (is the extra testing cost-effective)
+	// this is more likely to matter when e.limit is reached.
+	e.setForward(0, 0, e.lx)
+	e.setBackward(0, 0, e.ux)
+
+	// from D to D+1
+	for D := 0; D < e.limit; D++ {
+		// just finished a backwards pass, so check
+		if got, ok := e.twoDone(D, D); ok {
+			return e.twolcs(D, D, got)
+		}
+		// do a forwards pass (D to D+1)
+		e.setForward(D+1, -(D + 1), e.getForward(D, -D))
+		e.setForward(D+1, D+1, e.getForward(D, D)+1)
+		for k := -D + 1; k <= D-1; k += 2 {
+			// these are tricky and easy to get backwards
+			lookv := e.lookForward(k, e.getForward(D, k-1)+1)
+			lookh := e.lookForward(k, e.getForward(D, k+1))
+			if lookv > lookh {
+				e.setForward(D+1, k, lookv)
+			} else {
+				e.setForward(D+1, k, lookh)
+			}
+		}
+		// just did a forward pass, so check
+		if got, ok := e.twoDone(D+1, D); ok {
+			return e.twolcs(D+1, D, got)
+		}
+		// do a backward pass, D to D+1
+		e.setBackward(D+1, -(D + 1), e.getBackward(D, -D)-1)
+		e.setBackward(D+1, D+1, e.getBackward(D, D))
+		for k := -D + 1; k <= D-1; k += 2 {
+			// these are tricky and easy to get wrong
+			lookv := e.lookBackward(k, e.getBackward(D, k-1))
+			lookh := e.lookBackward(k, e.getBackward(D, k+1)-1)
+			if lookv < lookh {
+				e.setBackward(D+1, k, lookv)
+			} else {
+				e.setBackward(D+1, k, lookh)
+			}
+		}
+	}
+
+	// D too large. combine a forward and backward partial lcs
+	// first, a forward one
+	kmax := -e.limit - 1
+	diagmax := -1
+	for k := -e.limit; k <= e.limit; k += 2 {
+		x := e.getForward(e.limit, k)
+		y := x - k
+		if x+y > diagmax && x <= e.ux && y <= e.uy {
+			diagmax, kmax = x+y, k
+		}
+	}
+	if kmax < -e.limit {
+		panic(fmt.Sprintf("no forward paths when limit=%d?", e.limit))
+	}
+	lcs := e.forwardlcs(e.limit, kmax)
+	// now a backward one
+	// find the D path with minimal x+y inside the rectangle and
+	// use that to compute the lcs
+	diagmin := 1 << 25 // infinity
+	for k := -e.limit; k <= e.limit; k += 2 {
+		x := e.getBackward(e.limit, k)
+		y := x - (k + e.delta)
+		if x+y < diagmin && x >= 0 && y >= 0 {
+			diagmin, kmax = x+y, k
+		}
+	}
+	if kmax < -e.limit {
+		panic(fmt.Sprintf("no backward paths when limit=%d?", e.limit))
+	}
+	lcs = append(lcs, e.backwardlcs(e.limit, kmax)...)
+	// These may overlap (e.forwardlcs and e.backwardlcs return sorted lcs)
+	ans := lcs.fix()
+	return ans
+}
+
+// Does Myers' Lemma apply?
+func (e *editGraph) twoDone(df, db int) (int, bool) {
+	if (df+db+e.delta)%2 != 0 {
+		return 0, false // diagonals cannot overlap
+	}
+	kmin := -db + e.delta
+	if -df > kmin {
+		kmin = -df
+	}
+	kmax := db + e.delta
+	if df < kmax {
+		kmax = df
+	}
+	for k := kmin; k <= kmax; k += 2 {
+		x := e.vf.get(df, k)
+		u := e.vb.get(db, k-e.delta)
+		if u <= x {
+			// is it worth looking at all the other k?
+			for l := k; l <= kmax; l += 2 {
+				x := e.vf.get(df, l)
+				y := x - l
+				u := e.vb.get(db, l-e.delta)
+				v := u - l
+				if x == u || u == 0 || v == 0 || y == e.uy || x == e.ux {
+					return l, true
+				}
+			}
+			return k, true
+		}
+	}
+	return 0, false
+}
+
+func (e *editGraph) twolcs(df, db, kf int) lcs {
+	// db==df || db+1==df
+	x := e.vf.get(df, kf)
+	y := x - kf
+	kb := kf - e.delta
+	u := e.vb.get(db, kb)
+	v := u - kf
+
+	// Myers proved there is a df-path from (0,0) to (u,v)
+	// and a db-path from (x,y) to (N,M).
+	// In the first case the overall path is the forward path
+	// to (u,v) followed by the backward path to (N,M).
+	// In the second case the path is the backward path to (x,y)
+	// followed by the forward path to (x,y) from (0,0).
+
+	// Look for some special cases to avoid computing either of these paths.
+	if x == u {
+		// "babaab" "cccaba"
+		// already patched together
+		lcs := e.forwardlcs(df, kf)
+		lcs = append(lcs, e.backwardlcs(db, kb)...)
+		return lcs.sort()
+	}
+
+	// is (u-1,v) or (u,v-1) labelled df-1?
+	// if so, that forward df-1-path plus a horizontal or vertical edge
+	// is the df-path to (u,v), then plus the db-path to (N,M)
+	if u > 0 && ok(df-1, u-1-v) && e.vf.get(df-1, u-1-v) == u-1 {
+		//  "aabbab" "cbcabc"
+		lcs := e.forwardlcs(df-1, u-1-v)
+		lcs = append(lcs, e.backwardlcs(db, kb)...)
+		return lcs.sort()
+	}
+	if v > 0 && ok(df-1, (u-(v-1))) && e.vf.get(df-1, u-(v-1)) == u {
+		//  "abaabb" "bcacab"
+		lcs := e.forwardlcs(df-1, u-(v-1))
+		lcs = append(lcs, e.backwardlcs(db, kb)...)
+		return lcs.sort()
+	}
+
+	// The path can't possibly contribute to the lcs because it
+	// is all horizontal or vertical edges
+	if u == 0 || v == 0 || x == e.ux || y == e.uy {
+		// "abaabb" "abaaaa"
+		if u == 0 || v == 0 {
+			return e.backwardlcs(db, kb)
+		}
+		return e.forwardlcs(df, kf)
+	}
+
+	// is (x+1,y) or (x,y+1) labelled db-1?
+	if x+1 <= e.ux && ok(db-1, x+1-y-e.delta) && e.vb.get(db-1, x+1-y-e.delta) == x+1 {
+		// "bababb" "baaabb"
+		lcs := e.backwardlcs(db-1, kb+1)
+		lcs = append(lcs, e.forwardlcs(df, kf)...)
+		return lcs.sort()
+	}
+	if y+1 <= e.uy && ok(db-1, x-(y+1)-e.delta) && e.vb.get(db-1, x-(y+1)-e.delta) == x {
+		// "abbbaa" "cabacc"
+		lcs := e.backwardlcs(db-1, kb-1)
+		lcs = append(lcs, e.forwardlcs(df, kf)...)
+		return lcs.sort()
+	}
+
+	// need to compute another path
+	// "aabbaa" "aacaba"
+	lcs := e.backwardlcs(db, kb)
+	oldx, oldy := e.ux, e.uy
+	e.ux = u
+	e.uy = v
+	lcs = append(lcs, e.forward()...)
+	e.ux, e.uy = oldx, oldy
+	return lcs.sort()
+}
diff --git a/internal/diff/lcs/old_test.go b/internal/diff/lcs/old_test.go
new file mode 100644
index 0000000..828a9d9
--- /dev/null
+++ b/internal/diff/lcs/old_test.go
@@ -0,0 +1,203 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lcs
+
+import (
+	"math/rand"
+	"testing"
+)
+
+func TestForwardOld(t *testing.T) {
+	for _, tx := range Btests {
+		lim := len(tx.a) + len(tx.b)
+		left, right := []byte(tx.a), []byte(tx.b)
+		g := newegraph(left, right, lim)
+		lcs := g.forward()
+		diffs := g.fromlcs(lcs)
+		check(t, tx.a, lcs, tx.lcs)
+		checkDiffs(t, tx.a, diffs, tx.b)
+
+		g = newegraph(right, left, lim)
+		lcs = g.forward()
+		diffs = g.fromlcs(lcs)
+		check(t, tx.b, lcs, tx.lcs)
+		checkDiffs(t, tx.b, diffs, tx.a)
+	}
+}
+
+func TestBackwardOld(t *testing.T) {
+	for _, tx := range Btests {
+		lim := len(tx.a) + len(tx.b)
+		left, right := []byte(tx.a), []byte(tx.b)
+		g := newegraph(left, right, lim)
+		lcs := g.backward()
+		check(t, tx.a, lcs, tx.lcs)
+		diffs := g.fromlcs(lcs)
+		checkDiffs(t, tx.a, diffs, tx.b)
+
+		g = newegraph(right, left, lim)
+		lcs = g.backward()
+		diffs = g.fromlcs(lcs)
+		check(t, tx.b, lcs, tx.lcs)
+		checkDiffs(t, tx.b, diffs, tx.a)
+	}
+}
+
+func TestTwosidedOld(t *testing.T) {
+	// test both (a,b) and (b,a)
+	for _, tx := range Btests {
+		left, right := []byte(tx.a), []byte(tx.b)
+		lim := len(tx.a) + len(tx.b)
+		diffs, lcs := Compute(left, right, lim)
+		check(t, tx.a, lcs, tx.lcs)
+		checkDiffs(t, tx.a, diffs, tx.b)
+		diffs, lcs = Compute(right, left, lim)
+		check(t, tx.b, lcs, tx.lcs)
+		checkDiffs(t, tx.b, diffs, tx.a)
+	}
+}
+
+func TestIntOld(t *testing.T) {
+	// need to avoid any characters in btests
+	lfill, rfill := "AAAAAAAAAAAA", "BBBBBBBBBBBB"
+	for _, tx := range Btests {
+		if len(tx.a) < 2 || len(tx.b) < 2 {
+			continue
+		}
+		left := []byte(tx.a + lfill)
+		right := []byte(tx.b + rfill)
+		lim := len(tx.a) + len(tx.b)
+		diffs, lcs := Compute(left, right, lim)
+		check(t, string(left), lcs, tx.lcs)
+		checkDiffs(t, string(left), diffs, string(right))
+		diffs, lcs = Compute(right, left, lim)
+		check(t, string(right), lcs, tx.lcs)
+		checkDiffs(t, string(right), diffs, string(left))
+
+		left = []byte(lfill + tx.a)
+		right = []byte(rfill + tx.b)
+		diffs, lcs = Compute(left, right, lim)
+		check(t, string(left), lcs, tx.lcs)
+		checkDiffs(t, string(left), diffs, string(right))
+		diffs, lcs = Compute(right, left, lim)
+		check(t, string(right), lcs, tx.lcs)
+		checkDiffs(t, string(right), diffs, string(left))
+	}
+}
+
+func TestSpecialOld(t *testing.T) { // needs lcs.fix
+	a := []byte("golang.org/x/tools/intern")
+	b := []byte("github.com/google/safehtml/template\"\n\t\"golang.org/x/tools/intern")
+	diffs, lcs := Compute(a, b, 4)
+	if !lcs.valid() {
+		t.Errorf("%d,%v", len(diffs), lcs)
+	}
+}
+
+func TestRegressionOld001(t *testing.T) {
+	a := "// Copyright 2019 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage diff_test\n\nimport (\n\t\"fmt\"\n\t\"math/rand\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"golang.org/x/tools/gopls/internal/lsp/diff\"\n\t\"golang.org/x/tools/internal/diff/difftest\"\n\t\"golang.org/x/tools/internal/span\"\n)\n"
+
+	b := "// Copyright 2019 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage diff_test\n\nimport (\n\t\"fmt\"\n\t\"math/rand\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/google/safehtml/template\"\n\t\"golang.org/x/tools/gopls/internal/lsp/diff\"\n\t\"golang.org/x/tools/internal/diff/difftest\"\n\t\"golang.org/x/tools/internal/span\"\n)\n"
+	for i := 1; i < len(b); i++ {
+		diffs, lcs := Compute([]byte(a), []byte(b), int(i)) // 14 from gopls
+		if !lcs.valid() {
+			t.Errorf("%d,%v", len(diffs), lcs)
+		}
+		checkDiffs(t, a, diffs, b)
+	}
+}
+
+func TestRegressionOld002(t *testing.T) {
+	a := "n\"\n)\n"
+	b := "n\"\n\t\"golang.org/x//nnal/stack\"\n)\n"
+	for i := 1; i <= len(b); i++ {
+		diffs, lcs := Compute([]byte(a), []byte(b), int(i))
+		if !lcs.valid() {
+			t.Errorf("%d,%v", len(diffs), lcs)
+		}
+		checkDiffs(t, a, diffs, b)
+	}
+}
+
+func TestRegressionOld003(t *testing.T) {
+	a := "golang.org/x/hello v1.0.0\nrequire golang.org/x/unused v1"
+	b := "golang.org/x/hello v1"
+	for i := 1; i <= len(a); i++ {
+		diffs, lcs := Compute([]byte(a), []byte(b), int(i))
+		if !lcs.valid() {
+			t.Errorf("%d,%v", len(diffs), lcs)
+		}
+		checkDiffs(t, a, diffs, b)
+	}
+}
+
+func TestRandOld(t *testing.T) {
+	rand.Seed(1)
+	for i := 0; i < 1000; i++ {
+		a := []rune(randstr("abω", 16))
+		b := []rune(randstr("abωc", 16))
+		g := newegraph(a, b, 24) // large enough to get true lcs
+		two := g.twosided()
+		forw := g.forward()
+		back := g.backward()
+		if lcslen(two) != lcslen(forw) || lcslen(forw) != lcslen(back) {
+			t.Logf("\n%v\n%v\n%v", forw, back, two)
+			t.Fatalf("%d forw:%d back:%d two:%d", i, lcslen(forw), lcslen(back), lcslen(two))
+		}
+		if !two.valid() || !forw.valid() || !back.valid() {
+			t.Errorf("check failure")
+		}
+	}
+}
+
+func BenchmarkTwoOld(b *testing.B) {
+	tests := genBench("abc", 96)
+	for i := 0; i < b.N; i++ {
+		for _, tt := range tests {
+			_, two := Compute([]byte(tt.before), []byte(tt.after), 100)
+			if !two.valid() {
+				b.Error("check failed")
+			}
+		}
+	}
+}
+
+func BenchmarkForwOld(b *testing.B) {
+	tests := genBench("abc", 96)
+	for i := 0; i < b.N; i++ {
+		for _, tt := range tests {
+			_, two := Compute([]byte(tt.before), []byte(tt.after), 100)
+			if !two.valid() {
+				b.Error("check failed")
+			}
+		}
+	}
+}
+
+func genBench(set string, n int) []struct{ before, after string } {
+	// before and after for benchmarks. 24 strings of length n with
+	// before and after differing at least once, and about 5%
+	rand.Seed(3)
+	var ans []struct{ before, after string }
+	for i := 0; i < 24; i++ {
+		// maybe b should have an approximately known number of diffs
+		a := randstr(set, n)
+		cnt := 0
+		bb := make([]rune, 0, n)
+		for _, r := range a {
+			if rand.Float64() < .05 {
+				cnt++
+				r = 'N'
+			}
+			bb = append(bb, r)
+		}
+		if cnt == 0 {
+			// avoid == shortcut
+			bb[n/2] = 'N'
+		}
+		ans = append(ans, struct{ before, after string }{a, string(bb)})
+	}
+	return ans
+}
diff --git a/internal/diff/myers/diff.go b/internal/diff/myers/diff.go
new file mode 100644
index 0000000..07e4028
--- /dev/null
+++ b/internal/diff/myers/diff.go
@@ -0,0 +1,205 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package myers implements the Myers diff algorithm.
+package myers
+
+import (
+	"strings"
+
+	"golang.org/x/tools/internal/diff"
+	"golang.org/x/tools/internal/span"
+)
+
+// Sources:
+// https://blog.jcoglan.com/2017/02/17/the-myers-diff-algorithm-part-3/
+// https://www.codeproject.com/Articles/42279/%2FArticles%2F42279%2FInvestigating-Myers-diff-algorithm-Part-1-of-2
+
+func ComputeEdits(uri span.URI, before, after string) ([]diff.TextEdit, error) {
+	ops := operations(splitLines(before), splitLines(after))
+	edits := make([]diff.TextEdit, 0, len(ops))
+	for _, op := range ops {
+		s := span.New(uri, span.NewPoint(op.I1+1, 1, 0), span.NewPoint(op.I2+1, 1, 0))
+		switch op.Kind {
+		case diff.Delete:
+			// Delete: unformatted[i1:i2] is deleted.
+			edits = append(edits, diff.TextEdit{Span: s})
+		case diff.Insert:
+			// Insert: formatted[j1:j2] is inserted at unformatted[i1:i1].
+			if content := strings.Join(op.Content, ""); content != "" {
+				edits = append(edits, diff.TextEdit{Span: s, NewText: content})
+			}
+		}
+	}
+	return edits, nil
+}
+
+type operation struct {
+	Kind    diff.OpKind
+	Content []string // content from b
+	I1, I2  int      // indices of the line in a
+	J1      int      // indices of the line in b, J2 implied by len(Content)
+}
+
+// operations returns the list of operations to convert a into b, consolidating
+// operations for multiple lines and not including equal lines.
+func operations(a, b []string) []*operation {
+	if len(a) == 0 && len(b) == 0 {
+		return nil
+	}
+
+	trace, offset := shortestEditSequence(a, b)
+	snakes := backtrack(trace, len(a), len(b), offset)
+
+	M, N := len(a), len(b)
+
+	var i int
+	solution := make([]*operation, len(a)+len(b))
+
+	add := func(op *operation, i2, j2 int) {
+		if op == nil {
+			return
+		}
+		op.I2 = i2
+		if op.Kind == diff.Insert {
+			op.Content = b[op.J1:j2]
+		}
+		solution[i] = op
+		i++
+	}
+	x, y := 0, 0
+	for _, snake := range snakes {
+		if len(snake) < 2 {
+			continue
+		}
+		var op *operation
+		// delete (horizontal)
+		for snake[0]-snake[1] > x-y {
+			if op == nil {
+				op = &operation{
+					Kind: diff.Delete,
+					I1:   x,
+					J1:   y,
+				}
+			}
+			x++
+			if x == M {
+				break
+			}
+		}
+		add(op, x, y)
+		op = nil
+		// insert (vertical)
+		for snake[0]-snake[1] < x-y {
+			if op == nil {
+				op = &operation{
+					Kind: diff.Insert,
+					I1:   x,
+					J1:   y,
+				}
+			}
+			y++
+		}
+		add(op, x, y)
+		op = nil
+		// equal (diagonal)
+		for x < snake[0] {
+			x++
+			y++
+		}
+		if x >= M && y >= N {
+			break
+		}
+	}
+	return solution[:i]
+}
+
+// backtrack uses the trace for the edit sequence computation and returns the
+// "snakes" that make up the solution. A "snake" is a single deletion or
+// insertion followed by zero or diagonals.
+func backtrack(trace [][]int, x, y, offset int) [][]int {
+	snakes := make([][]int, len(trace))
+	d := len(trace) - 1
+	for ; x > 0 && y > 0 && d > 0; d-- {
+		V := trace[d]
+		if len(V) == 0 {
+			continue
+		}
+		snakes[d] = []int{x, y}
+
+		k := x - y
+
+		var kPrev int
+		if k == -d || (k != d && V[k-1+offset] < V[k+1+offset]) {
+			kPrev = k + 1
+		} else {
+			kPrev = k - 1
+		}
+
+		x = V[kPrev+offset]
+		y = x - kPrev
+	}
+	if x < 0 || y < 0 {
+		return snakes
+	}
+	snakes[d] = []int{x, y}
+	return snakes
+}
+
+// shortestEditSequence returns the shortest edit sequence that converts a into b.
+func shortestEditSequence(a, b []string) ([][]int, int) {
+	M, N := len(a), len(b)
+	V := make([]int, 2*(N+M)+1)
+	offset := N + M
+	trace := make([][]int, N+M+1)
+
+	// Iterate through the maximum possible length of the SES (N+M).
+	for d := 0; d <= N+M; d++ {
+		copyV := make([]int, len(V))
+		// k lines are represented by the equation y = x - k. We move in
+		// increments of 2 because end points for even d are on even k lines.
+		for k := -d; k <= d; k += 2 {
+			// At each point, we either go down or to the right. We go down if
+			// k == -d, and we go to the right if k == d. We also prioritize
+			// the maximum x value, because we prefer deletions to insertions.
+			var x int
+			if k == -d || (k != d && V[k-1+offset] < V[k+1+offset]) {
+				x = V[k+1+offset] // down
+			} else {
+				x = V[k-1+offset] + 1 // right
+			}
+
+			y := x - k
+
+			// Diagonal moves while we have equal contents.
+			for x < M && y < N && a[x] == b[y] {
+				x++
+				y++
+			}
+
+			V[k+offset] = x
+
+			// Return if we've exceeded the maximum values.
+			if x == M && y == N {
+				// Makes sure to save the state of the array before returning.
+				copy(copyV, V)
+				trace[d] = copyV
+				return trace, offset
+			}
+		}
+
+		// Save the state of the array.
+		copy(copyV, V)
+		trace[d] = copyV
+	}
+	return nil, 0
+}
+
+func splitLines(text string) []string {
+	lines := strings.SplitAfter(text, "\n")
+	if lines[len(lines)-1] == "" {
+		lines = lines[:len(lines)-1]
+	}
+	return lines
+}
diff --git a/internal/diff/myers/diff_test.go b/internal/diff/myers/diff_test.go
new file mode 100644
index 0000000..f244455
--- /dev/null
+++ b/internal/diff/myers/diff_test.go
@@ -0,0 +1,16 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package myers_test
+
+import (
+	"testing"
+
+	"golang.org/x/tools/internal/diff/difftest"
+	"golang.org/x/tools/internal/diff/myers"
+)
+
+func TestDiff(t *testing.T) {
+	difftest.DiffTest(t, myers.ComputeEdits)
+}
diff --git a/internal/diff/ndiff.go b/internal/diff/ndiff.go
new file mode 100644
index 0000000..e537b72
--- /dev/null
+++ b/internal/diff/ndiff.go
@@ -0,0 +1,130 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package diff
+
+import (
+	"strings"
+	"unicode/utf8"
+
+	"golang.org/x/tools/internal/diff/lcs"
+	"golang.org/x/tools/internal/span"
+)
+
+// maxDiffs is a limit on how deeply the lcs algorithm should search
+// the value is just a guess
+const maxDiffs = 30
+
+// NComputeEdits computes TextEdits for strings
+// (both it and the diff in the myers package have type ComputeEdits, which
+// is why the arguments are strings, not []bytes.)
+func NComputeEdits(uri span.URI, before, after string) ([]TextEdit, error) {
+	if before == after {
+		// very frequently true
+		return nil, nil
+	}
+	// the diffs returned by the lcs package use indexes into whatever slice
+	// was passed in. TextEdits  need a span.Span which is computed with
+	// byte offsets, so rune or line offsets need to be converted.
+	if needrunes(before) || needrunes(after) {
+		diffs, _ := lcs.Compute([]rune(before), []rune(after), maxDiffs/2)
+		diffs = runeOffsets(diffs, []rune(before))
+		ans, err := convertDiffs(uri, diffs, []byte(before))
+		return ans, err
+	} else {
+		diffs, _ := lcs.Compute([]byte(before), []byte(after), maxDiffs/2)
+		ans, err := convertDiffs(uri, diffs, []byte(before))
+		return ans, err
+	}
+}
+
+// NComputeLineEdits computes TextEdits for []strings
+func NComputeLineEdits(uri span.URI, before, after []string) ([]TextEdit, error) {
+	diffs, _ := lcs.Compute(before, after, maxDiffs/2)
+	diffs = lineOffsets(diffs, before)
+	ans, err := convertDiffs(uri, diffs, []byte(strJoin(before)))
+	// the code is not coping with possible missing \ns at the ends
+	return ans, err
+}
+
+// convert diffs with byte offsets into diffs with line and column
+func convertDiffs(uri span.URI, diffs []lcs.Diff, src []byte) ([]TextEdit, error) {
+	ans := make([]TextEdit, len(diffs))
+	tf := span.NewTokenFile(uri.Filename(), src)
+	for i, d := range diffs {
+		s := newSpan(uri, d.Start, d.End)
+		s, err := s.WithPosition(tf)
+		if err != nil {
+			return nil, err
+		}
+		ans[i] = TextEdit{s, d.Text}
+	}
+	return ans, nil
+}
+
+// convert diffs with rune offsets into diffs with byte offsets
+func runeOffsets(diffs []lcs.Diff, src []rune) []lcs.Diff {
+	var idx int
+	var tmp strings.Builder // string because []byte([]rune) is illegal
+	for i, d := range diffs {
+		tmp.WriteString(string(src[idx:d.Start]))
+		v := tmp.Len()
+		tmp.WriteString(string(src[d.Start:d.End]))
+		d.Start = v
+		idx = d.End
+		d.End = tmp.Len()
+		diffs[i] = d
+	}
+	return diffs
+}
+
+// convert diffs with line offsets into diffs with byte offsets
+func lineOffsets(diffs []lcs.Diff, src []string) []lcs.Diff {
+	var idx int
+	var tmp strings.Builder // bytes/
+	for i, d := range diffs {
+		tmp.WriteString(strJoin(src[idx:d.Start]))
+		v := tmp.Len()
+		tmp.WriteString(strJoin(src[d.Start:d.End]))
+		d.Start = v
+		idx = d.End
+		d.End = tmp.Len()
+		diffs[i] = d
+	}
+	return diffs
+}
+
+// join lines. (strings.Join doesn't add a trailing separator)
+func strJoin(elems []string) string {
+	if len(elems) == 0 {
+		return ""
+	}
+	n := 0
+	for i := 0; i < len(elems); i++ {
+		n += len(elems[i])
+	}
+
+	var b strings.Builder
+	b.Grow(n)
+	for _, s := range elems {
+		b.WriteString(s)
+		//b.WriteByte('\n')
+	}
+	return b.String()
+}
+
+func newSpan(uri span.URI, left, right int) span.Span {
+	return span.New(uri, span.NewPoint(0, 0, left), span.NewPoint(0, 0, right))
+}
+
+// need runes is true if the string needs to be converted to []rune
+// for random access
+func needrunes(s string) bool {
+	for i := 0; i < len(s); i++ {
+		if s[i] >= utf8.RuneSelf {
+			return true
+		}
+	}
+	return false
+}
diff --git a/internal/diff/unified.go b/internal/diff/unified.go
new file mode 100644
index 0000000..f9eaf4c
--- /dev/null
+++ b/internal/diff/unified.go
@@ -0,0 +1,222 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package diff
+
+import (
+	"fmt"
+	"strings"
+)
+
+// Unified represents a set of edits as a unified diff.
+type Unified struct {
+	// From is the name of the original file.
+	From string
+	// To is the name of the modified file.
+	To string
+	// Hunks is the set of edit hunks needed to transform the file content.
+	Hunks []*Hunk
+}
+
+// Hunk represents a contiguous set of line edits to apply.
+type Hunk struct {
+	// The line in the original source where the hunk starts.
+	FromLine int
+	// The line in the original source where the hunk finishes.
+	ToLine int
+	// The set of line based edits to apply.
+	Lines []Line
+}
+
+// Line represents a single line operation to apply as part of a Hunk.
+type Line struct {
+	// Kind is the type of line this represents, deletion, insertion or copy.
+	Kind OpKind
+	// Content is the content of this line.
+	// For deletion it is the line being removed, for all others it is the line
+	// to put in the output.
+	Content string
+}
+
+// OpKind is used to denote the type of operation a line represents.
+type OpKind int
+
+const (
+	// Delete is the operation kind for a line that is present in the input
+	// but not in the output.
+	Delete OpKind = iota
+	// Insert is the operation kind for a line that is new in the output.
+	Insert
+	// Equal is the operation kind for a line that is the same in the input and
+	// output, often used to provide context around edited lines.
+	Equal
+)
+
+// String returns a human readable representation of an OpKind. It is not
+// intended for machine processing.
+func (k OpKind) String() string {
+	switch k {
+	case Delete:
+		return "delete"
+	case Insert:
+		return "insert"
+	case Equal:
+		return "equal"
+	default:
+		panic("unknown operation kind")
+	}
+}
+
+const (
+	edge = 3
+	gap  = edge * 2
+)
+
+// ToUnified takes a file contents and a sequence of edits, and calculates
+// a unified diff that represents those edits.
+func ToUnified(fromName, toName string, content string, edits []TextEdit) Unified {
+	u := Unified{
+		From: fromName,
+		To:   toName,
+	}
+	if len(edits) == 0 {
+		return u
+	}
+	edits, partial := prepareEdits(content, edits)
+	if partial {
+		edits = lineEdits(content, edits)
+	}
+	lines := splitLines(content)
+	var h *Hunk
+	last := 0
+	toLine := 0
+	for _, edit := range edits {
+		start := edit.Span.Start().Line() - 1
+		end := edit.Span.End().Line() - 1
+		switch {
+		case h != nil && start == last:
+			//direct extension
+		case h != nil && start <= last+gap:
+			//within range of previous lines, add the joiners
+			addEqualLines(h, lines, last, start)
+		default:
+			//need to start a new hunk
+			if h != nil {
+				// add the edge to the previous hunk
+				addEqualLines(h, lines, last, last+edge)
+				u.Hunks = append(u.Hunks, h)
+			}
+			toLine += start - last
+			h = &Hunk{
+				FromLine: start + 1,
+				ToLine:   toLine + 1,
+			}
+			// add the edge to the new hunk
+			delta := addEqualLines(h, lines, start-edge, start)
+			h.FromLine -= delta
+			h.ToLine -= delta
+		}
+		last = start
+		for i := start; i < end; i++ {
+			h.Lines = append(h.Lines, Line{Kind: Delete, Content: lines[i]})
+			last++
+		}
+		if edit.NewText != "" {
+			for _, line := range splitLines(edit.NewText) {
+				h.Lines = append(h.Lines, Line{Kind: Insert, Content: line})
+				toLine++
+			}
+		}
+	}
+	if h != nil {
+		// add the edge to the final hunk
+		addEqualLines(h, lines, last, last+edge)
+		u.Hunks = append(u.Hunks, h)
+	}
+	return u
+}
+
+func splitLines(text string) []string {
+	lines := strings.SplitAfter(text, "\n")
+	if lines[len(lines)-1] == "" {
+		lines = lines[:len(lines)-1]
+	}
+	return lines
+}
+
+func addEqualLines(h *Hunk, lines []string, start, end int) int {
+	delta := 0
+	for i := start; i < end; i++ {
+		if i < 0 {
+			continue
+		}
+		if i >= len(lines) {
+			return delta
+		}
+		h.Lines = append(h.Lines, Line{Kind: Equal, Content: lines[i]})
+		delta++
+	}
+	return delta
+}
+
+// Format write a textual representation of u to f (see the String method).
+//
+// TODO(rfindley): investigate (and possibly remove) this method. It's not
+// clear why Unified implements fmt.Formatter, since the formatting rune is not
+// used. Probably it is sufficient to only implement Stringer, but this method
+// was left here defensively.
+func (u Unified) Format(f fmt.State, r rune) {
+	fmt.Fprintf(f, "%s", u.String())
+}
+
+// String converts a unified diff to the standard textual form for that diff.
+// The output of this function can be passed to tools like patch.
+func (u Unified) String() string {
+	if len(u.Hunks) == 0 {
+		return ""
+	}
+	b := new(strings.Builder)
+	fmt.Fprintf(b, "--- %s\n", u.From)
+	fmt.Fprintf(b, "+++ %s\n", u.To)
+	for _, hunk := range u.Hunks {
+		fromCount, toCount := 0, 0
+		for _, l := range hunk.Lines {
+			switch l.Kind {
+			case Delete:
+				fromCount++
+			case Insert:
+				toCount++
+			default:
+				fromCount++
+				toCount++
+			}
+		}
+		fmt.Fprint(b, "@@")
+		if fromCount > 1 {
+			fmt.Fprintf(b, " -%d,%d", hunk.FromLine, fromCount)
+		} else {
+			fmt.Fprintf(b, " -%d", hunk.FromLine)
+		}
+		if toCount > 1 {
+			fmt.Fprintf(b, " +%d,%d", hunk.ToLine, toCount)
+		} else {
+			fmt.Fprintf(b, " +%d", hunk.ToLine)
+		}
+		fmt.Fprint(b, " @@\n")
+		for _, l := range hunk.Lines {
+			switch l.Kind {
+			case Delete:
+				fmt.Fprintf(b, "-%s", l.Content)
+			case Insert:
+				fmt.Fprintf(b, "+%s", l.Content)
+			default:
+				fmt.Fprintf(b, " %s", l.Content)
+			}
+			if !strings.HasSuffix(l.Content, "\n") {
+				fmt.Fprintf(b, "\n\\ No newline at end of file\n")
+			}
+		}
+	}
+	return b.String()
+}