modfile: copy from cmd/go/internal/modfile
Copied from 4be6b4a73d (CL 202565). No changes other than import paths.
Updates golang/go#34924
Updates golang/go#31761
Change-Id: Ic25cb983f6641045fc24edf76953b06d4aa5cd43
Reviewed-on: https://go-review.googlesource.com/c/mod/+/202543
Reviewed-by: Bryan C. Mills <bcmills@google.com>
diff --git a/modfile/print.go b/modfile/print.go
new file mode 100644
index 0000000..3bbea38
--- /dev/null
+++ b/modfile/print.go
@@ -0,0 +1,165 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Module file printer.
+
+package modfile
+
+import (
+ "bytes"
+ "fmt"
+ "strings"
+)
+
+// Format returns a go.mod file as a byte slice, formatted in standard style.
+func Format(f *FileSyntax) []byte {
+ pr := &printer{}
+ pr.file(f)
+ return pr.Bytes()
+}
+
+// A printer collects the state during printing of a file or expression.
+type printer struct {
+ bytes.Buffer // output buffer
+ comment []Comment // pending end-of-line comments
+ margin int // left margin (indent), a number of tabs
+}
+
+// printf prints to the buffer.
+func (p *printer) printf(format string, args ...interface{}) {
+ fmt.Fprintf(p, format, args...)
+}
+
+// indent returns the position on the current line, in bytes, 0-indexed.
+func (p *printer) indent() int {
+ b := p.Bytes()
+ n := 0
+ for n < len(b) && b[len(b)-1-n] != '\n' {
+ n++
+ }
+ return n
+}
+
+// newline ends the current line, flushing end-of-line comments.
+func (p *printer) newline() {
+ if len(p.comment) > 0 {
+ p.printf(" ")
+ for i, com := range p.comment {
+ if i > 0 {
+ p.trim()
+ p.printf("\n")
+ for i := 0; i < p.margin; i++ {
+ p.printf("\t")
+ }
+ }
+ p.printf("%s", strings.TrimSpace(com.Token))
+ }
+ p.comment = p.comment[:0]
+ }
+
+ p.trim()
+ p.printf("\n")
+ for i := 0; i < p.margin; i++ {
+ p.printf("\t")
+ }
+}
+
+// trim removes trailing spaces and tabs from the current line.
+func (p *printer) trim() {
+ // Remove trailing spaces and tabs from line we're about to end.
+ b := p.Bytes()
+ n := len(b)
+ for n > 0 && (b[n-1] == '\t' || b[n-1] == ' ') {
+ n--
+ }
+ p.Truncate(n)
+}
+
+// file formats the given file into the print buffer.
+func (p *printer) file(f *FileSyntax) {
+ for _, com := range f.Before {
+ p.printf("%s", strings.TrimSpace(com.Token))
+ p.newline()
+ }
+
+ for i, stmt := range f.Stmt {
+ switch x := stmt.(type) {
+ case *CommentBlock:
+ // comments already handled
+ p.expr(x)
+
+ default:
+ p.expr(x)
+ p.newline()
+ }
+
+ for _, com := range stmt.Comment().After {
+ p.printf("%s", strings.TrimSpace(com.Token))
+ p.newline()
+ }
+
+ if i+1 < len(f.Stmt) {
+ p.newline()
+ }
+ }
+}
+
+func (p *printer) expr(x Expr) {
+ // Emit line-comments preceding this expression.
+ if before := x.Comment().Before; len(before) > 0 {
+ // Want to print a line comment.
+ // Line comments must be at the current margin.
+ p.trim()
+ if p.indent() > 0 {
+ // There's other text on the line. Start a new line.
+ p.printf("\n")
+ }
+ // Re-indent to margin.
+ for i := 0; i < p.margin; i++ {
+ p.printf("\t")
+ }
+ for _, com := range before {
+ p.printf("%s", strings.TrimSpace(com.Token))
+ p.newline()
+ }
+ }
+
+ switch x := x.(type) {
+ default:
+ panic(fmt.Errorf("printer: unexpected type %T", x))
+
+ case *CommentBlock:
+ // done
+
+ case *LParen:
+ p.printf("(")
+ case *RParen:
+ p.printf(")")
+
+ case *Line:
+ sep := ""
+ for _, tok := range x.Token {
+ p.printf("%s%s", sep, tok)
+ sep = " "
+ }
+
+ case *LineBlock:
+ for _, tok := range x.Token {
+ p.printf("%s ", tok)
+ }
+ p.expr(&x.LParen)
+ p.margin++
+ for _, l := range x.Line {
+ p.newline()
+ p.expr(l)
+ }
+ p.margin--
+ p.newline()
+ p.expr(&x.RParen)
+ }
+
+ // Queue end-of-line comments for printing when we
+ // reach the end of the line.
+ p.comment = append(p.comment, x.Comment().Suffix...)
+}
diff --git a/modfile/read.go b/modfile/read.go
new file mode 100644
index 0000000..bfa90a5
--- /dev/null
+++ b/modfile/read.go
@@ -0,0 +1,870 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Module file parser.
+// This is a simplified copy of Google's buildifier parser.
+
+package modfile
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+// A Position describes an arbitrary source position in a file, including the
+// file, line, column, and byte offset.
+type Position struct {
+ Line int // line in input (starting at 1)
+ LineRune int // rune in line (starting at 1)
+ Byte int // byte in input (starting at 0)
+}
+
+// add returns the position at the end of s, assuming it starts at p.
+func (p Position) add(s string) Position {
+ p.Byte += len(s)
+ if n := strings.Count(s, "\n"); n > 0 {
+ p.Line += n
+ s = s[strings.LastIndex(s, "\n")+1:]
+ p.LineRune = 1
+ }
+ p.LineRune += utf8.RuneCountInString(s)
+ return p
+}
+
+// An Expr represents an input element.
+type Expr interface {
+ // Span returns the start and end position of the expression,
+ // excluding leading or trailing comments.
+ Span() (start, end Position)
+
+ // Comment returns the comments attached to the expression.
+ // This method would normally be named 'Comments' but that
+ // would interfere with embedding a type of the same name.
+ Comment() *Comments
+}
+
+// A Comment represents a single // comment.
+type Comment struct {
+ Start Position
+ Token string // without trailing newline
+ Suffix bool // an end of line (not whole line) comment
+}
+
+// Comments collects the comments associated with an expression.
+type Comments struct {
+ Before []Comment // whole-line comments before this expression
+ Suffix []Comment // end-of-line comments after this expression
+
+ // For top-level expressions only, After lists whole-line
+ // comments following the expression.
+ After []Comment
+}
+
+// Comment returns the receiver. This isn't useful by itself, but
+// a Comments struct is embedded into all the expression
+// implementation types, and this gives each of those a Comment
+// method to satisfy the Expr interface.
+func (c *Comments) Comment() *Comments {
+ return c
+}
+
+// A FileSyntax represents an entire go.mod file.
+type FileSyntax struct {
+ Name string // file path
+ Comments
+ Stmt []Expr
+}
+
+func (x *FileSyntax) Span() (start, end Position) {
+ if len(x.Stmt) == 0 {
+ return
+ }
+ start, _ = x.Stmt[0].Span()
+ _, end = x.Stmt[len(x.Stmt)-1].Span()
+ return start, end
+}
+
+func (x *FileSyntax) addLine(hint Expr, tokens ...string) *Line {
+ if hint == nil {
+ // If no hint given, add to the last statement of the given type.
+ Loop:
+ for i := len(x.Stmt) - 1; i >= 0; i-- {
+ stmt := x.Stmt[i]
+ switch stmt := stmt.(type) {
+ case *Line:
+ if stmt.Token != nil && stmt.Token[0] == tokens[0] {
+ hint = stmt
+ break Loop
+ }
+ case *LineBlock:
+ if stmt.Token[0] == tokens[0] {
+ hint = stmt
+ break Loop
+ }
+ }
+ }
+ }
+
+ if hint != nil {
+ for i, stmt := range x.Stmt {
+ switch stmt := stmt.(type) {
+ case *Line:
+ if stmt == hint {
+ // Convert line to line block.
+ stmt.InBlock = true
+ block := &LineBlock{Token: stmt.Token[:1], Line: []*Line{stmt}}
+ stmt.Token = stmt.Token[1:]
+ x.Stmt[i] = block
+ new := &Line{Token: tokens[1:], InBlock: true}
+ block.Line = append(block.Line, new)
+ return new
+ }
+ case *LineBlock:
+ if stmt == hint {
+ new := &Line{Token: tokens[1:], InBlock: true}
+ stmt.Line = append(stmt.Line, new)
+ return new
+ }
+ for j, line := range stmt.Line {
+ if line == hint {
+ // Add new line after hint.
+ stmt.Line = append(stmt.Line, nil)
+ copy(stmt.Line[j+2:], stmt.Line[j+1:])
+ new := &Line{Token: tokens[1:], InBlock: true}
+ stmt.Line[j+1] = new
+ return new
+ }
+ }
+ }
+ }
+ }
+
+ new := &Line{Token: tokens}
+ x.Stmt = append(x.Stmt, new)
+ return new
+}
+
+func (x *FileSyntax) updateLine(line *Line, tokens ...string) {
+ if line.InBlock {
+ tokens = tokens[1:]
+ }
+ line.Token = tokens
+}
+
+func (x *FileSyntax) removeLine(line *Line) {
+ line.Token = nil
+}
+
+// Cleanup cleans up the file syntax x after any edit operations.
+// To avoid quadratic behavior, removeLine marks the line as dead
+// by setting line.Token = nil but does not remove it from the slice
+// in which it appears. After edits have all been indicated,
+// calling Cleanup cleans out the dead lines.
+func (x *FileSyntax) Cleanup() {
+ w := 0
+ for _, stmt := range x.Stmt {
+ switch stmt := stmt.(type) {
+ case *Line:
+ if stmt.Token == nil {
+ continue
+ }
+ case *LineBlock:
+ ww := 0
+ for _, line := range stmt.Line {
+ if line.Token != nil {
+ stmt.Line[ww] = line
+ ww++
+ }
+ }
+ if ww == 0 {
+ continue
+ }
+ if ww == 1 {
+ // Collapse block into single line.
+ line := &Line{
+ Comments: Comments{
+ Before: commentsAdd(stmt.Before, stmt.Line[0].Before),
+ Suffix: commentsAdd(stmt.Line[0].Suffix, stmt.Suffix),
+ After: commentsAdd(stmt.Line[0].After, stmt.After),
+ },
+ Token: stringsAdd(stmt.Token, stmt.Line[0].Token),
+ }
+ x.Stmt[w] = line
+ w++
+ continue
+ }
+ stmt.Line = stmt.Line[:ww]
+ }
+ x.Stmt[w] = stmt
+ w++
+ }
+ x.Stmt = x.Stmt[:w]
+}
+
+func commentsAdd(x, y []Comment) []Comment {
+ return append(x[:len(x):len(x)], y...)
+}
+
+func stringsAdd(x, y []string) []string {
+ return append(x[:len(x):len(x)], y...)
+}
+
+// A CommentBlock represents a top-level block of comments separate
+// from any rule.
+type CommentBlock struct {
+ Comments
+ Start Position
+}
+
+func (x *CommentBlock) Span() (start, end Position) {
+ return x.Start, x.Start
+}
+
+// A Line is a single line of tokens.
+type Line struct {
+ Comments
+ Start Position
+ Token []string
+ InBlock bool
+ End Position
+}
+
+func (x *Line) Span() (start, end Position) {
+ return x.Start, x.End
+}
+
+// A LineBlock is a factored block of lines, like
+//
+// require (
+// "x"
+// "y"
+// )
+//
+type LineBlock struct {
+ Comments
+ Start Position
+ LParen LParen
+ Token []string
+ Line []*Line
+ RParen RParen
+}
+
+func (x *LineBlock) Span() (start, end Position) {
+ return x.Start, x.RParen.Pos.add(")")
+}
+
+// An LParen represents the beginning of a parenthesized line block.
+// It is a place to store suffix comments.
+type LParen struct {
+ Comments
+ Pos Position
+}
+
+func (x *LParen) Span() (start, end Position) {
+ return x.Pos, x.Pos.add(")")
+}
+
+// An RParen represents the end of a parenthesized line block.
+// It is a place to store whole-line (before) comments.
+type RParen struct {
+ Comments
+ Pos Position
+}
+
+func (x *RParen) Span() (start, end Position) {
+ return x.Pos, x.Pos.add(")")
+}
+
+// An input represents a single input file being parsed.
+type input struct {
+ // Lexing state.
+ filename string // name of input file, for errors
+ complete []byte // entire input
+ remaining []byte // remaining input
+ token []byte // token being scanned
+ lastToken string // most recently returned token, for error messages
+ pos Position // current input position
+ comments []Comment // accumulated comments
+ endRule int // position of end of current rule
+
+ // Parser state.
+ file *FileSyntax // returned top-level syntax tree
+ parseError error // error encountered during parsing
+
+ // Comment assignment state.
+ pre []Expr // all expressions, in preorder traversal
+ post []Expr // all expressions, in postorder traversal
+}
+
+func newInput(filename string, data []byte) *input {
+ return &input{
+ filename: filename,
+ complete: data,
+ remaining: data,
+ pos: Position{Line: 1, LineRune: 1, Byte: 0},
+ }
+}
+
+// parse parses the input file.
+func parse(file string, data []byte) (f *FileSyntax, err error) {
+ in := newInput(file, data)
+ // The parser panics for both routine errors like syntax errors
+ // and for programmer bugs like array index errors.
+ // Turn both into error returns. Catching bug panics is
+ // especially important when processing many files.
+ defer func() {
+ if e := recover(); e != nil {
+ if e == in.parseError {
+ err = in.parseError
+ } else {
+ err = fmt.Errorf("%s:%d:%d: internal error: %v", in.filename, in.pos.Line, in.pos.LineRune, e)
+ }
+ }
+ }()
+
+ // Invoke the parser.
+ in.parseFile()
+ if in.parseError != nil {
+ return nil, in.parseError
+ }
+ in.file.Name = in.filename
+
+ // Assign comments to nearby syntax.
+ in.assignComments()
+
+ return in.file, nil
+}
+
+// Error is called to report an error.
+// The reason s is often "syntax error".
+// Error does not return: it panics.
+func (in *input) Error(s string) {
+ if s == "syntax error" && in.lastToken != "" {
+ s += " near " + in.lastToken
+ }
+ in.parseError = fmt.Errorf("%s:%d:%d: %v", in.filename, in.pos.Line, in.pos.LineRune, s)
+ panic(in.parseError)
+}
+
+// eof reports whether the input has reached end of file.
+func (in *input) eof() bool {
+ return len(in.remaining) == 0
+}
+
+// peekRune returns the next rune in the input without consuming it.
+func (in *input) peekRune() int {
+ if len(in.remaining) == 0 {
+ return 0
+ }
+ r, _ := utf8.DecodeRune(in.remaining)
+ return int(r)
+}
+
+// peekPrefix reports whether the remaining input begins with the given prefix.
+func (in *input) peekPrefix(prefix string) bool {
+ // This is like bytes.HasPrefix(in.remaining, []byte(prefix))
+ // but without the allocation of the []byte copy of prefix.
+ for i := 0; i < len(prefix); i++ {
+ if i >= len(in.remaining) || in.remaining[i] != prefix[i] {
+ return false
+ }
+ }
+ return true
+}
+
+// readRune consumes and returns the next rune in the input.
+func (in *input) readRune() int {
+ if len(in.remaining) == 0 {
+ in.Error("internal lexer error: readRune at EOF")
+ }
+ r, size := utf8.DecodeRune(in.remaining)
+ in.remaining = in.remaining[size:]
+ if r == '\n' {
+ in.pos.Line++
+ in.pos.LineRune = 1
+ } else {
+ in.pos.LineRune++
+ }
+ in.pos.Byte += size
+ return int(r)
+}
+
+type symType struct {
+ pos Position
+ endPos Position
+ text string
+}
+
+// startToken marks the beginning of the next input token.
+// It must be followed by a call to endToken, once the token has
+// been consumed using readRune.
+func (in *input) startToken(sym *symType) {
+ in.token = in.remaining
+ sym.text = ""
+ sym.pos = in.pos
+}
+
+// endToken marks the end of an input token.
+// It records the actual token string in sym.text if the caller
+// has not done that already.
+func (in *input) endToken(sym *symType) {
+ if sym.text == "" {
+ tok := string(in.token[:len(in.token)-len(in.remaining)])
+ sym.text = tok
+ in.lastToken = sym.text
+ }
+ sym.endPos = in.pos
+}
+
+// lex is called from the parser to obtain the next input token.
+// It returns the token value (either a rune like '+' or a symbolic token _FOR)
+// and sets val to the data associated with the token.
+// For all our input tokens, the associated data is
+// val.Pos (the position where the token begins)
+// and val.Token (the input string corresponding to the token).
+func (in *input) lex(sym *symType) int {
+ // Skip past spaces, stopping at non-space or EOF.
+ countNL := 0 // number of newlines we've skipped past
+ for !in.eof() {
+ // Skip over spaces. Count newlines so we can give the parser
+ // information about where top-level blank lines are,
+ // for top-level comment assignment.
+ c := in.peekRune()
+ if c == ' ' || c == '\t' || c == '\r' {
+ in.readRune()
+ continue
+ }
+
+ // Comment runs to end of line.
+ if in.peekPrefix("//") {
+ in.startToken(sym)
+
+ // Is this comment the only thing on its line?
+ // Find the last \n before this // and see if it's all
+ // spaces from there to here.
+ i := bytes.LastIndex(in.complete[:in.pos.Byte], []byte("\n"))
+ suffix := len(bytes.TrimSpace(in.complete[i+1:in.pos.Byte])) > 0
+ in.readRune()
+ in.readRune()
+
+ // Consume comment.
+ for len(in.remaining) > 0 && in.readRune() != '\n' {
+ }
+ in.endToken(sym)
+
+ sym.text = strings.TrimRight(sym.text, "\n")
+ in.lastToken = "comment"
+
+ // If we are at top level (not in a statement), hand the comment to
+ // the parser as a _COMMENT token. The grammar is written
+ // to handle top-level comments itself.
+ if !suffix {
+ // Not in a statement. Tell parser about top-level comment.
+ return _COMMENT
+ }
+
+ // Otherwise, save comment for later attachment to syntax tree.
+ if countNL > 1 {
+ in.comments = append(in.comments, Comment{sym.pos, "", false})
+ }
+ in.comments = append(in.comments, Comment{sym.pos, sym.text, suffix})
+ countNL = 1
+ return _EOL
+ }
+
+ if in.peekPrefix("/*") {
+ in.Error(fmt.Sprintf("mod files must use // comments (not /* */ comments)"))
+ }
+
+ // Found non-space non-comment.
+ break
+ }
+
+ // Found the beginning of the next token.
+ in.startToken(sym)
+ defer in.endToken(sym)
+
+ // End of file.
+ if in.eof() {
+ in.lastToken = "EOF"
+ return _EOF
+ }
+
+ // Punctuation tokens.
+ switch c := in.peekRune(); c {
+ case '\n':
+ in.readRune()
+ return c
+
+ case '(':
+ in.readRune()
+ return c
+
+ case ')':
+ in.readRune()
+ return c
+
+ case '"', '`': // quoted string
+ quote := c
+ in.readRune()
+ for {
+ if in.eof() {
+ in.pos = sym.pos
+ in.Error("unexpected EOF in string")
+ }
+ if in.peekRune() == '\n' {
+ in.Error("unexpected newline in string")
+ }
+ c := in.readRune()
+ if c == quote {
+ break
+ }
+ if c == '\\' && quote != '`' {
+ if in.eof() {
+ in.pos = sym.pos
+ in.Error("unexpected EOF in string")
+ }
+ in.readRune()
+ }
+ }
+ in.endToken(sym)
+ return _STRING
+ }
+
+ // Checked all punctuation. Must be identifier token.
+ if c := in.peekRune(); !isIdent(c) {
+ in.Error(fmt.Sprintf("unexpected input character %#q", c))
+ }
+
+ // Scan over identifier.
+ for isIdent(in.peekRune()) {
+ if in.peekPrefix("//") {
+ break
+ }
+ if in.peekPrefix("/*") {
+ in.Error(fmt.Sprintf("mod files must use // comments (not /* */ comments)"))
+ }
+ in.readRune()
+ }
+ return _IDENT
+}
+
+// isIdent reports whether c is an identifier rune.
+// We treat nearly all runes as identifier runes.
+func isIdent(c int) bool {
+ return c != 0 && !unicode.IsSpace(rune(c))
+}
+
+// Comment assignment.
+// We build two lists of all subexpressions, preorder and postorder.
+// The preorder list is ordered by start location, with outer expressions first.
+// The postorder list is ordered by end location, with outer expressions last.
+// We use the preorder list to assign each whole-line comment to the syntax
+// immediately following it, and we use the postorder list to assign each
+// end-of-line comment to the syntax immediately preceding it.
+
+// order walks the expression adding it and its subexpressions to the
+// preorder and postorder lists.
+func (in *input) order(x Expr) {
+ if x != nil {
+ in.pre = append(in.pre, x)
+ }
+ switch x := x.(type) {
+ default:
+ panic(fmt.Errorf("order: unexpected type %T", x))
+ case nil:
+ // nothing
+ case *LParen, *RParen:
+ // nothing
+ case *CommentBlock:
+ // nothing
+ case *Line:
+ // nothing
+ case *FileSyntax:
+ for _, stmt := range x.Stmt {
+ in.order(stmt)
+ }
+ case *LineBlock:
+ in.order(&x.LParen)
+ for _, l := range x.Line {
+ in.order(l)
+ }
+ in.order(&x.RParen)
+ }
+ if x != nil {
+ in.post = append(in.post, x)
+ }
+}
+
+// assignComments attaches comments to nearby syntax.
+func (in *input) assignComments() {
+ const debug = false
+
+ // Generate preorder and postorder lists.
+ in.order(in.file)
+
+ // Split into whole-line comments and suffix comments.
+ var line, suffix []Comment
+ for _, com := range in.comments {
+ if com.Suffix {
+ suffix = append(suffix, com)
+ } else {
+ line = append(line, com)
+ }
+ }
+
+ if debug {
+ for _, c := range line {
+ fmt.Fprintf(os.Stderr, "LINE %q :%d:%d #%d\n", c.Token, c.Start.Line, c.Start.LineRune, c.Start.Byte)
+ }
+ }
+
+ // Assign line comments to syntax immediately following.
+ for _, x := range in.pre {
+ start, _ := x.Span()
+ if debug {
+ fmt.Printf("pre %T :%d:%d #%d\n", x, start.Line, start.LineRune, start.Byte)
+ }
+ xcom := x.Comment()
+ for len(line) > 0 && start.Byte >= line[0].Start.Byte {
+ if debug {
+ fmt.Fprintf(os.Stderr, "ASSIGN LINE %q #%d\n", line[0].Token, line[0].Start.Byte)
+ }
+ xcom.Before = append(xcom.Before, line[0])
+ line = line[1:]
+ }
+ }
+
+ // Remaining line comments go at end of file.
+ in.file.After = append(in.file.After, line...)
+
+ if debug {
+ for _, c := range suffix {
+ fmt.Fprintf(os.Stderr, "SUFFIX %q :%d:%d #%d\n", c.Token, c.Start.Line, c.Start.LineRune, c.Start.Byte)
+ }
+ }
+
+ // Assign suffix comments to syntax immediately before.
+ for i := len(in.post) - 1; i >= 0; i-- {
+ x := in.post[i]
+
+ start, end := x.Span()
+ if debug {
+ fmt.Printf("post %T :%d:%d #%d :%d:%d #%d\n", x, start.Line, start.LineRune, start.Byte, end.Line, end.LineRune, end.Byte)
+ }
+
+ // Do not assign suffix comments to end of line block or whole file.
+ // Instead assign them to the last element inside.
+ switch x.(type) {
+ case *FileSyntax:
+ continue
+ }
+
+ // Do not assign suffix comments to something that starts
+ // on an earlier line, so that in
+ //
+ // x ( y
+ // z ) // comment
+ //
+ // we assign the comment to z and not to x ( ... ).
+ if start.Line != end.Line {
+ continue
+ }
+ xcom := x.Comment()
+ for len(suffix) > 0 && end.Byte <= suffix[len(suffix)-1].Start.Byte {
+ if debug {
+ fmt.Fprintf(os.Stderr, "ASSIGN SUFFIX %q #%d\n", suffix[len(suffix)-1].Token, suffix[len(suffix)-1].Start.Byte)
+ }
+ xcom.Suffix = append(xcom.Suffix, suffix[len(suffix)-1])
+ suffix = suffix[:len(suffix)-1]
+ }
+ }
+
+ // We assigned suffix comments in reverse.
+ // If multiple suffix comments were appended to the same
+ // expression node, they are now in reverse. Fix that.
+ for _, x := range in.post {
+ reverseComments(x.Comment().Suffix)
+ }
+
+ // Remaining suffix comments go at beginning of file.
+ in.file.Before = append(in.file.Before, suffix...)
+}
+
+// reverseComments reverses the []Comment list.
+func reverseComments(list []Comment) {
+ for i, j := 0, len(list)-1; i < j; i, j = i+1, j-1 {
+ list[i], list[j] = list[j], list[i]
+ }
+}
+
+func (in *input) parseFile() {
+ in.file = new(FileSyntax)
+ var sym symType
+ var cb *CommentBlock
+ for {
+ tok := in.lex(&sym)
+ switch tok {
+ case '\n':
+ if cb != nil {
+ in.file.Stmt = append(in.file.Stmt, cb)
+ cb = nil
+ }
+ case _COMMENT:
+ if cb == nil {
+ cb = &CommentBlock{Start: sym.pos}
+ }
+ com := cb.Comment()
+ com.Before = append(com.Before, Comment{Start: sym.pos, Token: sym.text})
+ case _EOF:
+ if cb != nil {
+ in.file.Stmt = append(in.file.Stmt, cb)
+ }
+ return
+ default:
+ in.parseStmt(&sym)
+ if cb != nil {
+ in.file.Stmt[len(in.file.Stmt)-1].Comment().Before = cb.Before
+ cb = nil
+ }
+ }
+ }
+}
+
+func (in *input) parseStmt(sym *symType) {
+ start := sym.pos
+ end := sym.endPos
+ token := []string{sym.text}
+ for {
+ tok := in.lex(sym)
+ switch tok {
+ case '\n', _EOF, _EOL:
+ in.file.Stmt = append(in.file.Stmt, &Line{
+ Start: start,
+ Token: token,
+ End: end,
+ })
+ return
+ case '(':
+ in.file.Stmt = append(in.file.Stmt, in.parseLineBlock(start, token, sym))
+ return
+ default:
+ token = append(token, sym.text)
+ end = sym.endPos
+ }
+ }
+}
+
+func (in *input) parseLineBlock(start Position, token []string, sym *symType) *LineBlock {
+ x := &LineBlock{
+ Start: start,
+ Token: token,
+ LParen: LParen{Pos: sym.pos},
+ }
+ var comments []Comment
+ for {
+ tok := in.lex(sym)
+ switch tok {
+ case _EOL:
+ // ignore
+ case '\n':
+ if len(comments) == 0 && len(x.Line) > 0 || len(comments) > 0 && comments[len(comments)-1].Token != "" {
+ comments = append(comments, Comment{})
+ }
+ case _COMMENT:
+ comments = append(comments, Comment{Start: sym.pos, Token: sym.text})
+ case _EOF:
+ in.Error(fmt.Sprintf("syntax error (unterminated block started at %s:%d:%d)", in.filename, x.Start.Line, x.Start.LineRune))
+ case ')':
+ x.RParen.Before = comments
+ x.RParen.Pos = sym.pos
+ tok = in.lex(sym)
+ if tok != '\n' && tok != _EOF && tok != _EOL {
+ in.Error("syntax error (expected newline after closing paren)")
+ }
+ return x
+ default:
+ l := in.parseLine(sym)
+ x.Line = append(x.Line, l)
+ l.Comment().Before = comments
+ comments = nil
+ }
+ }
+}
+
+func (in *input) parseLine(sym *symType) *Line {
+ start := sym.pos
+ end := sym.endPos
+ token := []string{sym.text}
+ for {
+ tok := in.lex(sym)
+ switch tok {
+ case '\n', _EOF, _EOL:
+ return &Line{
+ Start: start,
+ Token: token,
+ End: end,
+ InBlock: true,
+ }
+ default:
+ token = append(token, sym.text)
+ end = sym.endPos
+ }
+ }
+}
+
+const (
+ _EOF = -(1 + iota)
+ _EOL
+ _IDENT
+ _STRING
+ _COMMENT
+)
+
+var (
+ slashSlash = []byte("//")
+ moduleStr = []byte("module")
+)
+
+// ModulePath returns the module path from the gomod file text.
+// If it cannot find a module path, it returns an empty string.
+// It is tolerant of unrelated problems in the go.mod file.
+func ModulePath(mod []byte) string {
+ for len(mod) > 0 {
+ line := mod
+ mod = nil
+ if i := bytes.IndexByte(line, '\n'); i >= 0 {
+ line, mod = line[:i], line[i+1:]
+ }
+ if i := bytes.Index(line, slashSlash); i >= 0 {
+ line = line[:i]
+ }
+ line = bytes.TrimSpace(line)
+ if !bytes.HasPrefix(line, moduleStr) {
+ continue
+ }
+ line = line[len(moduleStr):]
+ n := len(line)
+ line = bytes.TrimSpace(line)
+ if len(line) == n || len(line) == 0 {
+ continue
+ }
+
+ if line[0] == '"' || line[0] == '`' {
+ p, err := strconv.Unquote(string(line))
+ if err != nil {
+ return "" // malformed quoted string or multiline module path
+ }
+ return p
+ }
+
+ return string(line)
+ }
+ return "" // missing module path
+}
diff --git a/modfile/read_test.go b/modfile/read_test.go
new file mode 100644
index 0000000..3240130
--- /dev/null
+++ b/modfile/read_test.go
@@ -0,0 +1,388 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modfile
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "reflect"
+ "strings"
+ "testing"
+)
+
+// exists reports whether the named file exists.
+func exists(name string) bool {
+ _, err := os.Stat(name)
+ return err == nil
+}
+
+// Test that reading and then writing the golden files
+// does not change their output.
+func TestPrintGolden(t *testing.T) {
+ outs, err := filepath.Glob("testdata/*.golden")
+ if err != nil {
+ t.Fatal(err)
+ }
+ for _, out := range outs {
+ testPrint(t, out, out)
+ }
+}
+
+// testPrint is a helper for testing the printer.
+// It reads the file named in, reformats it, and compares
+// the result to the file named out.
+func testPrint(t *testing.T, in, out string) {
+ data, err := ioutil.ReadFile(in)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ golden, err := ioutil.ReadFile(out)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ base := "testdata/" + filepath.Base(in)
+ f, err := parse(in, data)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ ndata := Format(f)
+
+ if !bytes.Equal(ndata, golden) {
+ t.Errorf("formatted %s incorrectly: diff shows -golden, +ours", base)
+ tdiff(t, string(golden), string(ndata))
+ return
+ }
+}
+
+func TestParseLax(t *testing.T) {
+ badFile := []byte(`module m
+ surprise attack
+ x y (
+ z
+ )
+ exclude v1.2.3
+ replace <-!!!
+ `)
+ _, err := ParseLax("file", badFile, nil)
+ if err != nil {
+ t.Fatalf("ParseLax did not ignore irrelevant errors: %v", err)
+ }
+}
+
+// Test that when files in the testdata directory are parsed
+// and printed and parsed again, we get the same parse tree
+// both times.
+func TestPrintParse(t *testing.T) {
+ outs, err := filepath.Glob("testdata/*")
+ if err != nil {
+ t.Fatal(err)
+ }
+ for _, out := range outs {
+ data, err := ioutil.ReadFile(out)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+
+ base := "testdata/" + filepath.Base(out)
+ f, err := parse(base, data)
+ if err != nil {
+ t.Errorf("parsing original: %v", err)
+ continue
+ }
+
+ ndata := Format(f)
+ f2, err := parse(base, ndata)
+ if err != nil {
+ t.Errorf("parsing reformatted: %v", err)
+ continue
+ }
+
+ eq := eqchecker{file: base}
+ if err := eq.check(f, f2); err != nil {
+ t.Errorf("not equal (parse/Format/parse): %v", err)
+ }
+
+ pf1, err := Parse(base, data, nil)
+ if err != nil {
+ switch base {
+ case "testdata/replace2.in", "testdata/gopkg.in.golden":
+ t.Errorf("should parse %v: %v", base, err)
+ }
+ }
+ if err == nil {
+ pf2, err := Parse(base, ndata, nil)
+ if err != nil {
+ t.Errorf("Parsing reformatted: %v", err)
+ continue
+ }
+ eq := eqchecker{file: base}
+ if err := eq.check(pf1, pf2); err != nil {
+ t.Errorf("not equal (parse/Format/Parse): %v", err)
+ }
+
+ ndata2, err := pf1.Format()
+ if err != nil {
+ t.Errorf("reformat: %v", err)
+ }
+ pf3, err := Parse(base, ndata2, nil)
+ if err != nil {
+ t.Errorf("Parsing reformatted2: %v", err)
+ continue
+ }
+ eq = eqchecker{file: base}
+ if err := eq.check(pf1, pf3); err != nil {
+ t.Errorf("not equal (Parse/Format/Parse): %v", err)
+ }
+ ndata = ndata2
+ }
+
+ if strings.HasSuffix(out, ".in") {
+ golden, err := ioutil.ReadFile(strings.TrimSuffix(out, ".in") + ".golden")
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+ if !bytes.Equal(ndata, golden) {
+ t.Errorf("formatted %s incorrectly: diff shows -golden, +ours", base)
+ tdiff(t, string(golden), string(ndata))
+ return
+ }
+ }
+ }
+}
+
+// An eqchecker holds state for checking the equality of two parse trees.
+type eqchecker struct {
+ file string
+ pos Position
+}
+
+// errorf returns an error described by the printf-style format and arguments,
+// inserting the current file position before the error text.
+func (eq *eqchecker) errorf(format string, args ...interface{}) error {
+ return fmt.Errorf("%s:%d: %s", eq.file, eq.pos.Line,
+ fmt.Sprintf(format, args...))
+}
+
+// check checks that v and w represent the same parse tree.
+// If not, it returns an error describing the first difference.
+func (eq *eqchecker) check(v, w interface{}) error {
+ return eq.checkValue(reflect.ValueOf(v), reflect.ValueOf(w))
+}
+
+var (
+ posType = reflect.TypeOf(Position{})
+ commentsType = reflect.TypeOf(Comments{})
+)
+
+// checkValue checks that v and w represent the same parse tree.
+// If not, it returns an error describing the first difference.
+func (eq *eqchecker) checkValue(v, w reflect.Value) error {
+ // inner returns the innermost expression for v.
+ // if v is a non-nil interface value, it returns the concrete
+ // value in the interface.
+ inner := func(v reflect.Value) reflect.Value {
+ for {
+ if v.Kind() == reflect.Interface && !v.IsNil() {
+ v = v.Elem()
+ continue
+ }
+ break
+ }
+ return v
+ }
+
+ v = inner(v)
+ w = inner(w)
+ if v.Kind() == reflect.Invalid && w.Kind() == reflect.Invalid {
+ return nil
+ }
+ if v.Kind() == reflect.Invalid {
+ return eq.errorf("nil interface became %s", w.Type())
+ }
+ if w.Kind() == reflect.Invalid {
+ return eq.errorf("%s became nil interface", v.Type())
+ }
+
+ if v.Type() != w.Type() {
+ return eq.errorf("%s became %s", v.Type(), w.Type())
+ }
+
+ if p, ok := v.Interface().(Expr); ok {
+ eq.pos, _ = p.Span()
+ }
+
+ switch v.Kind() {
+ default:
+ return eq.errorf("unexpected type %s", v.Type())
+
+ case reflect.Bool, reflect.Int, reflect.String:
+ vi := v.Interface()
+ wi := w.Interface()
+ if vi != wi {
+ return eq.errorf("%v became %v", vi, wi)
+ }
+
+ case reflect.Slice:
+ vl := v.Len()
+ wl := w.Len()
+ for i := 0; i < vl || i < wl; i++ {
+ if i >= vl {
+ return eq.errorf("unexpected %s", w.Index(i).Type())
+ }
+ if i >= wl {
+ return eq.errorf("missing %s", v.Index(i).Type())
+ }
+ if err := eq.checkValue(v.Index(i), w.Index(i)); err != nil {
+ return err
+ }
+ }
+
+ case reflect.Struct:
+ // Fields in struct must match.
+ t := v.Type()
+ n := t.NumField()
+ for i := 0; i < n; i++ {
+ tf := t.Field(i)
+ switch {
+ default:
+ if err := eq.checkValue(v.Field(i), w.Field(i)); err != nil {
+ return err
+ }
+
+ case tf.Type == posType: // ignore positions
+ case tf.Type == commentsType: // ignore comment assignment
+ }
+ }
+
+ case reflect.Ptr, reflect.Interface:
+ if v.IsNil() != w.IsNil() {
+ if v.IsNil() {
+ return eq.errorf("unexpected %s", w.Elem().Type())
+ }
+ return eq.errorf("missing %s", v.Elem().Type())
+ }
+ if err := eq.checkValue(v.Elem(), w.Elem()); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// diff returns the output of running diff on b1 and b2.
+func diff(b1, b2 []byte) (data []byte, err error) {
+ f1, err := ioutil.TempFile("", "testdiff")
+ if err != nil {
+ return nil, err
+ }
+ defer os.Remove(f1.Name())
+ defer f1.Close()
+
+ f2, err := ioutil.TempFile("", "testdiff")
+ if err != nil {
+ return nil, err
+ }
+ defer os.Remove(f2.Name())
+ defer f2.Close()
+
+ f1.Write(b1)
+ f2.Write(b2)
+
+ data, err = exec.Command("diff", "-u", f1.Name(), f2.Name()).CombinedOutput()
+ if len(data) > 0 {
+ // diff exits with a non-zero status when the files don't match.
+ // Ignore that failure as long as we get output.
+ err = nil
+ }
+ return
+}
+
+// tdiff logs the diff output to t.Error.
+func tdiff(t *testing.T, a, b string) {
+ data, err := diff([]byte(a), []byte(b))
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ t.Error(string(data))
+}
+
+var modulePathTests = []struct {
+ input []byte
+ expected string
+}{
+ {input: []byte("module \"github.com/rsc/vgotest\""), expected: "github.com/rsc/vgotest"},
+ {input: []byte("module github.com/rsc/vgotest"), expected: "github.com/rsc/vgotest"},
+ {input: []byte("module \"github.com/rsc/vgotest\""), expected: "github.com/rsc/vgotest"},
+ {input: []byte("module github.com/rsc/vgotest"), expected: "github.com/rsc/vgotest"},
+ {input: []byte("module `github.com/rsc/vgotest`"), expected: "github.com/rsc/vgotest"},
+ {input: []byte("module \"github.com/rsc/vgotest/v2\""), expected: "github.com/rsc/vgotest/v2"},
+ {input: []byte("module github.com/rsc/vgotest/v2"), expected: "github.com/rsc/vgotest/v2"},
+ {input: []byte("module \"gopkg.in/yaml.v2\""), expected: "gopkg.in/yaml.v2"},
+ {input: []byte("module gopkg.in/yaml.v2"), expected: "gopkg.in/yaml.v2"},
+ {input: []byte("module \"gopkg.in/check.v1\"\n"), expected: "gopkg.in/check.v1"},
+ {input: []byte("module \"gopkg.in/check.v1\n\""), expected: ""},
+ {input: []byte("module gopkg.in/check.v1\n"), expected: "gopkg.in/check.v1"},
+ {input: []byte("module \"gopkg.in/check.v1\"\r\n"), expected: "gopkg.in/check.v1"},
+ {input: []byte("module gopkg.in/check.v1\r\n"), expected: "gopkg.in/check.v1"},
+ {input: []byte("module \"gopkg.in/check.v1\"\n\n"), expected: "gopkg.in/check.v1"},
+ {input: []byte("module gopkg.in/check.v1\n\n"), expected: "gopkg.in/check.v1"},
+ {input: []byte("module \n\"gopkg.in/check.v1\"\n\n"), expected: ""},
+ {input: []byte("module \ngopkg.in/check.v1\n\n"), expected: ""},
+ {input: []byte("module \"gopkg.in/check.v1\"asd"), expected: ""},
+ {input: []byte("module \n\"gopkg.in/check.v1\"\n\n"), expected: ""},
+ {input: []byte("module \ngopkg.in/check.v1\n\n"), expected: ""},
+ {input: []byte("module \"gopkg.in/check.v1\"asd"), expected: ""},
+ {input: []byte("module \nmodule a/b/c "), expected: "a/b/c"},
+ {input: []byte("module \" \""), expected: " "},
+ {input: []byte("module "), expected: ""},
+ {input: []byte("module \" a/b/c \""), expected: " a/b/c "},
+ {input: []byte("module \"github.com/rsc/vgotest1\" // with a comment"), expected: "github.com/rsc/vgotest1"},
+}
+
+func TestModulePath(t *testing.T) {
+ for _, test := range modulePathTests {
+ t.Run(string(test.input), func(t *testing.T) {
+ result := ModulePath(test.input)
+ if result != test.expected {
+ t.Fatalf("ModulePath(%q): %s, want %s", string(test.input), result, test.expected)
+ }
+ })
+ }
+}
+
+func TestGoVersion(t *testing.T) {
+ for _, test := range []struct {
+ desc, input string
+ ok bool
+ }{
+ {desc: "empty", input: "module m\ngo \n", ok: false},
+ {desc: "one", input: "module m\ngo 1\n", ok: false},
+ {desc: "two", input: "module m\ngo 1.22\n", ok: true},
+ {desc: "three", input: "module m\ngo 1.22.333", ok: false},
+ {desc: "before", input: "module m\ngo v1.2\n", ok: false},
+ {desc: "after", input: "module m\ngo 1.2rc1\n", ok: false},
+ {desc: "space", input: "module m\ngo 1.2 3.4\n", ok: false},
+ } {
+ t.Run(test.desc, func(t *testing.T) {
+ if _, err := Parse("go.mod", []byte(test.input), nil); err == nil && !test.ok {
+ t.Error("unexpected success")
+ } else if err != nil && test.ok {
+ t.Errorf("unexpected error: %v", err)
+ }
+ })
+ }
+}
diff --git a/modfile/rule.go b/modfile/rule.go
new file mode 100644
index 0000000..95fefec
--- /dev/null
+++ b/modfile/rule.go
@@ -0,0 +1,766 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modfile
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "path/filepath"
+ "sort"
+ "strconv"
+ "strings"
+ "unicode"
+
+ "golang.org/x/mod/internal/lazyregexp"
+ "golang.org/x/mod/module"
+)
+
+// A File is the parsed, interpreted form of a go.mod file.
+type File struct {
+ Module *Module
+ Go *Go
+ Require []*Require
+ Exclude []*Exclude
+ Replace []*Replace
+
+ Syntax *FileSyntax
+}
+
+// A Module is the module statement.
+type Module struct {
+ Mod module.Version
+ Syntax *Line
+}
+
+// A Go is the go statement.
+type Go struct {
+ Version string // "1.23"
+ Syntax *Line
+}
+
+// A Require is a single require statement.
+type Require struct {
+ Mod module.Version
+ Indirect bool // has "// indirect" comment
+ Syntax *Line
+}
+
+// An Exclude is a single exclude statement.
+type Exclude struct {
+ Mod module.Version
+ Syntax *Line
+}
+
+// A Replace is a single replace statement.
+type Replace struct {
+ Old module.Version
+ New module.Version
+ Syntax *Line
+}
+
+func (f *File) AddModuleStmt(path string) error {
+ if f.Syntax == nil {
+ f.Syntax = new(FileSyntax)
+ }
+ if f.Module == nil {
+ f.Module = &Module{
+ Mod: module.Version{Path: path},
+ Syntax: f.Syntax.addLine(nil, "module", AutoQuote(path)),
+ }
+ } else {
+ f.Module.Mod.Path = path
+ f.Syntax.updateLine(f.Module.Syntax, "module", AutoQuote(path))
+ }
+ return nil
+}
+
+func (f *File) AddComment(text string) {
+ if f.Syntax == nil {
+ f.Syntax = new(FileSyntax)
+ }
+ f.Syntax.Stmt = append(f.Syntax.Stmt, &CommentBlock{
+ Comments: Comments{
+ Before: []Comment{
+ {
+ Token: text,
+ },
+ },
+ },
+ })
+}
+
+type VersionFixer func(path, version string) (string, error)
+
+// Parse parses the data, reported in errors as being from file,
+// into a File struct. It applies fix, if non-nil, to canonicalize all module versions found.
+func Parse(file string, data []byte, fix VersionFixer) (*File, error) {
+ return parseToFile(file, data, fix, true)
+}
+
+// ParseLax is like Parse but ignores unknown statements.
+// It is used when parsing go.mod files other than the main module,
+// under the theory that most statement types we add in the future will
+// only apply in the main module, like exclude and replace,
+// and so we get better gradual deployments if old go commands
+// simply ignore those statements when found in go.mod files
+// in dependencies.
+func ParseLax(file string, data []byte, fix VersionFixer) (*File, error) {
+ return parseToFile(file, data, fix, false)
+}
+
+func parseToFile(file string, data []byte, fix VersionFixer, strict bool) (*File, error) {
+ fs, err := parse(file, data)
+ if err != nil {
+ return nil, err
+ }
+ f := &File{
+ Syntax: fs,
+ }
+
+ var errs bytes.Buffer
+ for _, x := range fs.Stmt {
+ switch x := x.(type) {
+ case *Line:
+ f.add(&errs, x, x.Token[0], x.Token[1:], fix, strict)
+
+ case *LineBlock:
+ if len(x.Token) > 1 {
+ if strict {
+ fmt.Fprintf(&errs, "%s:%d: unknown block type: %s\n", file, x.Start.Line, strings.Join(x.Token, " "))
+ }
+ continue
+ }
+ switch x.Token[0] {
+ default:
+ if strict {
+ fmt.Fprintf(&errs, "%s:%d: unknown block type: %s\n", file, x.Start.Line, strings.Join(x.Token, " "))
+ }
+ continue
+ case "module", "require", "exclude", "replace":
+ for _, l := range x.Line {
+ f.add(&errs, l, x.Token[0], l.Token, fix, strict)
+ }
+ }
+ }
+ }
+
+ if errs.Len() > 0 {
+ return nil, errors.New(strings.TrimRight(errs.String(), "\n"))
+ }
+ return f, nil
+}
+
+var GoVersionRE = lazyregexp.New(`^([1-9][0-9]*)\.(0|[1-9][0-9]*)$`)
+
+func (f *File) add(errs *bytes.Buffer, line *Line, verb string, args []string, fix VersionFixer, strict bool) {
+ // If strict is false, this module is a dependency.
+ // We ignore all unknown directives as well as main-module-only
+ // directives like replace and exclude. It will work better for
+ // forward compatibility if we can depend on modules that have unknown
+ // statements (presumed relevant only when acting as the main module)
+ // and simply ignore those statements.
+ if !strict {
+ switch verb {
+ case "module", "require", "go":
+ // want these even for dependency go.mods
+ default:
+ return
+ }
+ }
+
+ switch verb {
+ default:
+ fmt.Fprintf(errs, "%s:%d: unknown directive: %s\n", f.Syntax.Name, line.Start.Line, verb)
+
+ case "go":
+ if f.Go != nil {
+ fmt.Fprintf(errs, "%s:%d: repeated go statement\n", f.Syntax.Name, line.Start.Line)
+ return
+ }
+ if len(args) != 1 || !GoVersionRE.MatchString(args[0]) {
+ fmt.Fprintf(errs, "%s:%d: usage: go 1.23\n", f.Syntax.Name, line.Start.Line)
+ return
+ }
+ f.Go = &Go{Syntax: line}
+ f.Go.Version = args[0]
+ case "module":
+ if f.Module != nil {
+ fmt.Fprintf(errs, "%s:%d: repeated module statement\n", f.Syntax.Name, line.Start.Line)
+ return
+ }
+ f.Module = &Module{Syntax: line}
+ if len(args) != 1 {
+
+ fmt.Fprintf(errs, "%s:%d: usage: module module/path\n", f.Syntax.Name, line.Start.Line)
+ return
+ }
+ s, err := parseString(&args[0])
+ if err != nil {
+ fmt.Fprintf(errs, "%s:%d: invalid quoted string: %v\n", f.Syntax.Name, line.Start.Line, err)
+ return
+ }
+ f.Module.Mod = module.Version{Path: s}
+ case "require", "exclude":
+ if len(args) != 2 {
+ fmt.Fprintf(errs, "%s:%d: usage: %s module/path v1.2.3\n", f.Syntax.Name, line.Start.Line, verb)
+ return
+ }
+ s, err := parseString(&args[0])
+ if err != nil {
+ fmt.Fprintf(errs, "%s:%d: invalid quoted string: %v\n", f.Syntax.Name, line.Start.Line, err)
+ return
+ }
+ v, err := parseVersion(verb, s, &args[1], fix)
+ if err != nil {
+ fmt.Fprintf(errs, "%s:%d: %v\n", f.Syntax.Name, line.Start.Line, err)
+ return
+ }
+ pathMajor, err := modulePathMajor(s)
+ if err != nil {
+ fmt.Fprintf(errs, "%s:%d: %v\n", f.Syntax.Name, line.Start.Line, err)
+ return
+ }
+ if err := module.CheckPathMajor(v, pathMajor); err != nil {
+ fmt.Fprintf(errs, "%s:%d: %v\n", f.Syntax.Name, line.Start.Line, &Error{Verb: verb, ModPath: s, Err: err})
+ return
+ }
+ if verb == "require" {
+ f.Require = append(f.Require, &Require{
+ Mod: module.Version{Path: s, Version: v},
+ Syntax: line,
+ Indirect: isIndirect(line),
+ })
+ } else {
+ f.Exclude = append(f.Exclude, &Exclude{
+ Mod: module.Version{Path: s, Version: v},
+ Syntax: line,
+ })
+ }
+ case "replace":
+ arrow := 2
+ if len(args) >= 2 && args[1] == "=>" {
+ arrow = 1
+ }
+ if len(args) < arrow+2 || len(args) > arrow+3 || args[arrow] != "=>" {
+ fmt.Fprintf(errs, "%s:%d: usage: %s module/path [v1.2.3] => other/module v1.4\n\t or %s module/path [v1.2.3] => ../local/directory\n", f.Syntax.Name, line.Start.Line, verb, verb)
+ return
+ }
+ s, err := parseString(&args[0])
+ if err != nil {
+ fmt.Fprintf(errs, "%s:%d: invalid quoted string: %v\n", f.Syntax.Name, line.Start.Line, err)
+ return
+ }
+ pathMajor, err := modulePathMajor(s)
+ if err != nil {
+ fmt.Fprintf(errs, "%s:%d: %v\n", f.Syntax.Name, line.Start.Line, err)
+ return
+ }
+ var v string
+ if arrow == 2 {
+ v, err = parseVersion(verb, s, &args[1], fix)
+ if err != nil {
+ fmt.Fprintf(errs, "%s:%d: %v\n", f.Syntax.Name, line.Start.Line, err)
+ return
+ }
+ if err := module.CheckPathMajor(v, pathMajor); err != nil {
+ fmt.Fprintf(errs, "%s:%d: %v\n", f.Syntax.Name, line.Start.Line, &Error{Verb: verb, ModPath: s, Err: err})
+ return
+ }
+ }
+ ns, err := parseString(&args[arrow+1])
+ if err != nil {
+ fmt.Fprintf(errs, "%s:%d: invalid quoted string: %v\n", f.Syntax.Name, line.Start.Line, err)
+ return
+ }
+ nv := ""
+ if len(args) == arrow+2 {
+ if !IsDirectoryPath(ns) {
+ fmt.Fprintf(errs, "%s:%d: replacement module without version must be directory path (rooted or starting with ./ or ../)\n", f.Syntax.Name, line.Start.Line)
+ return
+ }
+ if filepath.Separator == '/' && strings.Contains(ns, `\`) {
+ fmt.Fprintf(errs, "%s:%d: replacement directory appears to be Windows path (on a non-windows system)\n", f.Syntax.Name, line.Start.Line)
+ return
+ }
+ }
+ if len(args) == arrow+3 {
+ nv, err = parseVersion(verb, ns, &args[arrow+2], fix)
+ if err != nil {
+ fmt.Fprintf(errs, "%s:%d: %v\n", f.Syntax.Name, line.Start.Line, err)
+ return
+ }
+ if IsDirectoryPath(ns) {
+ fmt.Fprintf(errs, "%s:%d: replacement module directory path %q cannot have version\n", f.Syntax.Name, line.Start.Line, ns)
+ return
+ }
+ }
+ f.Replace = append(f.Replace, &Replace{
+ Old: module.Version{Path: s, Version: v},
+ New: module.Version{Path: ns, Version: nv},
+ Syntax: line,
+ })
+ }
+}
+
+// isIndirect reports whether line has a "// indirect" comment,
+// meaning it is in go.mod only for its effect on indirect dependencies,
+// so that it can be dropped entirely once the effective version of the
+// indirect dependency reaches the given minimum version.
+func isIndirect(line *Line) bool {
+ if len(line.Suffix) == 0 {
+ return false
+ }
+ f := strings.Fields(line.Suffix[0].Token)
+ return (len(f) == 2 && f[1] == "indirect" || len(f) > 2 && f[1] == "indirect;") && f[0] == "//"
+}
+
+// setIndirect sets line to have (or not have) a "// indirect" comment.
+func setIndirect(line *Line, indirect bool) {
+ if isIndirect(line) == indirect {
+ return
+ }
+ if indirect {
+ // Adding comment.
+ if len(line.Suffix) == 0 {
+ // New comment.
+ line.Suffix = []Comment{{Token: "// indirect", Suffix: true}}
+ return
+ }
+ // Insert at beginning of existing comment.
+ com := &line.Suffix[0]
+ space := " "
+ if len(com.Token) > 2 && com.Token[2] == ' ' || com.Token[2] == '\t' {
+ space = ""
+ }
+ com.Token = "// indirect;" + space + com.Token[2:]
+ return
+ }
+
+ // Removing comment.
+ f := strings.Fields(line.Suffix[0].Token)
+ if len(f) == 2 {
+ // Remove whole comment.
+ line.Suffix = nil
+ return
+ }
+
+ // Remove comment prefix.
+ com := &line.Suffix[0]
+ i := strings.Index(com.Token, "indirect;")
+ com.Token = "//" + com.Token[i+len("indirect;"):]
+}
+
+// IsDirectoryPath reports whether the given path should be interpreted
+// as a directory path. Just like on the go command line, relative paths
+// and rooted paths are directory paths; the rest are module paths.
+func IsDirectoryPath(ns string) bool {
+ // Because go.mod files can move from one system to another,
+ // we check all known path syntaxes, both Unix and Windows.
+ return strings.HasPrefix(ns, "./") || strings.HasPrefix(ns, "../") || strings.HasPrefix(ns, "/") ||
+ strings.HasPrefix(ns, `.\`) || strings.HasPrefix(ns, `..\`) || strings.HasPrefix(ns, `\`) ||
+ len(ns) >= 2 && ('A' <= ns[0] && ns[0] <= 'Z' || 'a' <= ns[0] && ns[0] <= 'z') && ns[1] == ':'
+}
+
+// MustQuote reports whether s must be quoted in order to appear as
+// a single token in a go.mod line.
+func MustQuote(s string) bool {
+ for _, r := range s {
+ if !unicode.IsPrint(r) || r == ' ' || r == '"' || r == '\'' || r == '`' {
+ return true
+ }
+ }
+ return s == "" || strings.Contains(s, "//") || strings.Contains(s, "/*")
+}
+
+// AutoQuote returns s or, if quoting is required for s to appear in a go.mod,
+// the quotation of s.
+func AutoQuote(s string) string {
+ if MustQuote(s) {
+ return strconv.Quote(s)
+ }
+ return s
+}
+
+func parseString(s *string) (string, error) {
+ t := *s
+ if strings.HasPrefix(t, `"`) {
+ var err error
+ if t, err = strconv.Unquote(t); err != nil {
+ return "", err
+ }
+ } else if strings.ContainsAny(t, "\"'`") {
+ // Other quotes are reserved both for possible future expansion
+ // and to avoid confusion. For example if someone types 'x'
+ // we want that to be a syntax error and not a literal x in literal quotation marks.
+ return "", fmt.Errorf("unquoted string cannot contain quote")
+ }
+ *s = AutoQuote(t)
+ return t, nil
+}
+
+type Error struct {
+ Verb string
+ ModPath string
+ Err error
+}
+
+func (e *Error) Error() string {
+ return fmt.Sprintf("%s %s: %v", e.Verb, e.ModPath, e.Err)
+}
+
+func (e *Error) Unwrap() error { return e.Err }
+
+func parseVersion(verb string, path string, s *string, fix VersionFixer) (string, error) {
+ t, err := parseString(s)
+ if err != nil {
+ return "", &Error{
+ Verb: verb,
+ ModPath: path,
+ Err: &module.InvalidVersionError{
+ Version: *s,
+ Err: err,
+ },
+ }
+ }
+ if fix != nil {
+ var err error
+ t, err = fix(path, t)
+ if err != nil {
+ if err, ok := err.(*module.ModuleError); ok {
+ return "", &Error{
+ Verb: verb,
+ ModPath: path,
+ Err: err.Err,
+ }
+ }
+ return "", err
+ }
+ }
+ if v := module.CanonicalVersion(t); v != "" {
+ *s = v
+ return *s, nil
+ }
+ return "", &Error{
+ Verb: verb,
+ ModPath: path,
+ Err: &module.InvalidVersionError{
+ Version: t,
+ Err: errors.New("must be of the form v1.2.3"),
+ },
+ }
+}
+
+func modulePathMajor(path string) (string, error) {
+ _, major, ok := module.SplitPathVersion(path)
+ if !ok {
+ return "", fmt.Errorf("invalid module path")
+ }
+ return major, nil
+}
+
+func (f *File) Format() ([]byte, error) {
+ return Format(f.Syntax), nil
+}
+
+// Cleanup cleans up the file f after any edit operations.
+// To avoid quadratic behavior, modifications like DropRequire
+// clear the entry but do not remove it from the slice.
+// Cleanup cleans out all the cleared entries.
+func (f *File) Cleanup() {
+ w := 0
+ for _, r := range f.Require {
+ if r.Mod.Path != "" {
+ f.Require[w] = r
+ w++
+ }
+ }
+ f.Require = f.Require[:w]
+
+ w = 0
+ for _, x := range f.Exclude {
+ if x.Mod.Path != "" {
+ f.Exclude[w] = x
+ w++
+ }
+ }
+ f.Exclude = f.Exclude[:w]
+
+ w = 0
+ for _, r := range f.Replace {
+ if r.Old.Path != "" {
+ f.Replace[w] = r
+ w++
+ }
+ }
+ f.Replace = f.Replace[:w]
+
+ f.Syntax.Cleanup()
+}
+
+func (f *File) AddGoStmt(version string) error {
+ if !GoVersionRE.MatchString(version) {
+ return fmt.Errorf("invalid language version string %q", version)
+ }
+ if f.Go == nil {
+ f.Go = &Go{
+ Version: version,
+ Syntax: f.Syntax.addLine(nil, "go", version),
+ }
+ } else {
+ f.Go.Version = version
+ f.Syntax.updateLine(f.Go.Syntax, "go", version)
+ }
+ return nil
+}
+
+func (f *File) AddRequire(path, vers string) error {
+ need := true
+ for _, r := range f.Require {
+ if r.Mod.Path == path {
+ if need {
+ r.Mod.Version = vers
+ f.Syntax.updateLine(r.Syntax, "require", AutoQuote(path), vers)
+ need = false
+ } else {
+ f.Syntax.removeLine(r.Syntax)
+ *r = Require{}
+ }
+ }
+ }
+
+ if need {
+ f.AddNewRequire(path, vers, false)
+ }
+ return nil
+}
+
+func (f *File) AddNewRequire(path, vers string, indirect bool) {
+ line := f.Syntax.addLine(nil, "require", AutoQuote(path), vers)
+ setIndirect(line, indirect)
+ f.Require = append(f.Require, &Require{module.Version{Path: path, Version: vers}, indirect, line})
+}
+
+func (f *File) SetRequire(req []*Require) {
+ need := make(map[string]string)
+ indirect := make(map[string]bool)
+ for _, r := range req {
+ need[r.Mod.Path] = r.Mod.Version
+ indirect[r.Mod.Path] = r.Indirect
+ }
+
+ for _, r := range f.Require {
+ if v, ok := need[r.Mod.Path]; ok {
+ r.Mod.Version = v
+ r.Indirect = indirect[r.Mod.Path]
+ }
+ }
+
+ var newStmts []Expr
+ for _, stmt := range f.Syntax.Stmt {
+ switch stmt := stmt.(type) {
+ case *LineBlock:
+ if len(stmt.Token) > 0 && stmt.Token[0] == "require" {
+ var newLines []*Line
+ for _, line := range stmt.Line {
+ if p, err := parseString(&line.Token[0]); err == nil && need[p] != "" {
+ if len(line.Comments.Before) == 1 && len(line.Comments.Before[0].Token) == 0 {
+ line.Comments.Before = line.Comments.Before[:0]
+ }
+ line.Token[1] = need[p]
+ delete(need, p)
+ setIndirect(line, indirect[p])
+ newLines = append(newLines, line)
+ }
+ }
+ if len(newLines) == 0 {
+ continue // drop stmt
+ }
+ stmt.Line = newLines
+ }
+
+ case *Line:
+ if len(stmt.Token) > 0 && stmt.Token[0] == "require" {
+ if p, err := parseString(&stmt.Token[1]); err == nil && need[p] != "" {
+ stmt.Token[2] = need[p]
+ delete(need, p)
+ setIndirect(stmt, indirect[p])
+ } else {
+ continue // drop stmt
+ }
+ }
+ }
+ newStmts = append(newStmts, stmt)
+ }
+ f.Syntax.Stmt = newStmts
+
+ for path, vers := range need {
+ f.AddNewRequire(path, vers, indirect[path])
+ }
+ f.SortBlocks()
+}
+
+func (f *File) DropRequire(path string) error {
+ for _, r := range f.Require {
+ if r.Mod.Path == path {
+ f.Syntax.removeLine(r.Syntax)
+ *r = Require{}
+ }
+ }
+ return nil
+}
+
+func (f *File) AddExclude(path, vers string) error {
+ var hint *Line
+ for _, x := range f.Exclude {
+ if x.Mod.Path == path && x.Mod.Version == vers {
+ return nil
+ }
+ if x.Mod.Path == path {
+ hint = x.Syntax
+ }
+ }
+
+ f.Exclude = append(f.Exclude, &Exclude{Mod: module.Version{Path: path, Version: vers}, Syntax: f.Syntax.addLine(hint, "exclude", AutoQuote(path), vers)})
+ return nil
+}
+
+func (f *File) DropExclude(path, vers string) error {
+ for _, x := range f.Exclude {
+ if x.Mod.Path == path && x.Mod.Version == vers {
+ f.Syntax.removeLine(x.Syntax)
+ *x = Exclude{}
+ }
+ }
+ return nil
+}
+
+func (f *File) AddReplace(oldPath, oldVers, newPath, newVers string) error {
+ need := true
+ old := module.Version{Path: oldPath, Version: oldVers}
+ new := module.Version{Path: newPath, Version: newVers}
+ tokens := []string{"replace", AutoQuote(oldPath)}
+ if oldVers != "" {
+ tokens = append(tokens, oldVers)
+ }
+ tokens = append(tokens, "=>", AutoQuote(newPath))
+ if newVers != "" {
+ tokens = append(tokens, newVers)
+ }
+
+ var hint *Line
+ for _, r := range f.Replace {
+ if r.Old.Path == oldPath && (oldVers == "" || r.Old.Version == oldVers) {
+ if need {
+ // Found replacement for old; update to use new.
+ r.New = new
+ f.Syntax.updateLine(r.Syntax, tokens...)
+ need = false
+ continue
+ }
+ // Already added; delete other replacements for same.
+ f.Syntax.removeLine(r.Syntax)
+ *r = Replace{}
+ }
+ if r.Old.Path == oldPath {
+ hint = r.Syntax
+ }
+ }
+ if need {
+ f.Replace = append(f.Replace, &Replace{Old: old, New: new, Syntax: f.Syntax.addLine(hint, tokens...)})
+ }
+ return nil
+}
+
+func (f *File) DropReplace(oldPath, oldVers string) error {
+ for _, r := range f.Replace {
+ if r.Old.Path == oldPath && r.Old.Version == oldVers {
+ f.Syntax.removeLine(r.Syntax)
+ *r = Replace{}
+ }
+ }
+ return nil
+}
+
+func (f *File) SortBlocks() {
+ f.removeDups() // otherwise sorting is unsafe
+
+ for _, stmt := range f.Syntax.Stmt {
+ block, ok := stmt.(*LineBlock)
+ if !ok {
+ continue
+ }
+ sort.Slice(block.Line, func(i, j int) bool {
+ li := block.Line[i]
+ lj := block.Line[j]
+ for k := 0; k < len(li.Token) && k < len(lj.Token); k++ {
+ if li.Token[k] != lj.Token[k] {
+ return li.Token[k] < lj.Token[k]
+ }
+ }
+ return len(li.Token) < len(lj.Token)
+ })
+ }
+}
+
+func (f *File) removeDups() {
+ have := make(map[module.Version]bool)
+ kill := make(map[*Line]bool)
+ for _, x := range f.Exclude {
+ if have[x.Mod] {
+ kill[x.Syntax] = true
+ continue
+ }
+ have[x.Mod] = true
+ }
+ var excl []*Exclude
+ for _, x := range f.Exclude {
+ if !kill[x.Syntax] {
+ excl = append(excl, x)
+ }
+ }
+ f.Exclude = excl
+
+ have = make(map[module.Version]bool)
+ // Later replacements take priority over earlier ones.
+ for i := len(f.Replace) - 1; i >= 0; i-- {
+ x := f.Replace[i]
+ if have[x.Old] {
+ kill[x.Syntax] = true
+ continue
+ }
+ have[x.Old] = true
+ }
+ var repl []*Replace
+ for _, x := range f.Replace {
+ if !kill[x.Syntax] {
+ repl = append(repl, x)
+ }
+ }
+ f.Replace = repl
+
+ var stmts []Expr
+ for _, stmt := range f.Syntax.Stmt {
+ switch stmt := stmt.(type) {
+ case *Line:
+ if kill[stmt] {
+ continue
+ }
+ case *LineBlock:
+ var lines []*Line
+ for _, line := range stmt.Line {
+ if !kill[line] {
+ lines = append(lines, line)
+ }
+ }
+ stmt.Line = lines
+ if len(lines) == 0 {
+ continue
+ }
+ }
+ stmts = append(stmts, stmt)
+ }
+ f.Syntax.Stmt = stmts
+}
diff --git a/modfile/rule_test.go b/modfile/rule_test.go
new file mode 100644
index 0000000..73e3386
--- /dev/null
+++ b/modfile/rule_test.go
@@ -0,0 +1,163 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modfile
+
+import (
+ "bytes"
+ "fmt"
+ "testing"
+
+ "golang.org/x/mod/module"
+)
+
+var addRequireTests = []struct {
+ in string
+ path string
+ vers string
+ out string
+}{
+ {
+ `
+ module m
+ require x.y/z v1.2.3
+ `,
+ "x.y/z", "v1.5.6",
+ `
+ module m
+ require x.y/z v1.5.6
+ `,
+ },
+ {
+ `
+ module m
+ require x.y/z v1.2.3
+ `,
+ "x.y/w", "v1.5.6",
+ `
+ module m
+ require (
+ x.y/z v1.2.3
+ x.y/w v1.5.6
+ )
+ `,
+ },
+ {
+ `
+ module m
+ require x.y/z v1.2.3
+ require x.y/q/v2 v2.3.4
+ `,
+ "x.y/w", "v1.5.6",
+ `
+ module m
+ require x.y/z v1.2.3
+ require (
+ x.y/q/v2 v2.3.4
+ x.y/w v1.5.6
+ )
+ `,
+ },
+}
+
+var setRequireTests = []struct {
+ in string
+ mods []struct {
+ path string
+ vers string
+ }
+ out string
+}{
+ {
+ `module m
+ require (
+ x.y/b v1.2.3
+
+ x.y/a v1.2.3
+ )
+ `,
+ []struct {
+ path string
+ vers string
+ }{
+ {"x.y/a", "v1.2.3"},
+ {"x.y/b", "v1.2.3"},
+ {"x.y/c", "v1.2.3"},
+ },
+ `module m
+ require (
+ x.y/a v1.2.3
+ x.y/b v1.2.3
+ x.y/c v1.2.3
+ )
+ `,
+ },
+}
+
+func TestAddRequire(t *testing.T) {
+ for i, tt := range addRequireTests {
+ t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) {
+ f, err := Parse("in", []byte(tt.in), nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ g, err := Parse("out", []byte(tt.out), nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ golden, err := g.Format()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err := f.AddRequire(tt.path, tt.vers); err != nil {
+ t.Fatal(err)
+ }
+ out, err := f.Format()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(out, golden) {
+ t.Errorf("have:\n%s\nwant:\n%s", out, golden)
+ }
+ })
+ }
+}
+
+func TestSetRequire(t *testing.T) {
+ for i, tt := range setRequireTests {
+ t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) {
+ f, err := Parse("in", []byte(tt.in), nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ g, err := Parse("out", []byte(tt.out), nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ golden, err := g.Format()
+ if err != nil {
+ t.Fatal(err)
+ }
+ var mods []*Require
+ for _, mod := range tt.mods {
+ mods = append(mods, &Require{
+ Mod: module.Version{
+ Path: mod.path,
+ Version: mod.vers,
+ },
+ })
+ }
+
+ f.SetRequire(mods)
+ out, err := f.Format()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(out, golden) {
+ t.Errorf("have:\n%s\nwant:\n%s", out, golden)
+ }
+ })
+ }
+}
diff --git a/modfile/testdata/block.golden b/modfile/testdata/block.golden
new file mode 100644
index 0000000..4aa2d63
--- /dev/null
+++ b/modfile/testdata/block.golden
@@ -0,0 +1,29 @@
+// comment
+x "y" z
+
+// block
+block ( // block-eol
+ // x-before-line
+
+ "x" ( y // x-eol
+ "x1"
+ "x2"
+ // line
+ "x3"
+ "x4"
+
+ "x5"
+
+ // y-line
+ "y" // y-eol
+
+ "z" // z-eol
+) // block-eol2
+
+block2 (
+ x
+ y
+ z
+)
+
+// eof
diff --git a/modfile/testdata/block.in b/modfile/testdata/block.in
new file mode 100644
index 0000000..1dfae65
--- /dev/null
+++ b/modfile/testdata/block.in
@@ -0,0 +1,29 @@
+// comment
+x "y" z
+
+// block
+block ( // block-eol
+ // x-before-line
+
+ "x" ( y // x-eol
+ "x1"
+ "x2"
+ // line
+ "x3"
+ "x4"
+
+ "x5"
+
+ // y-line
+ "y" // y-eol
+
+ "z" // z-eol
+) // block-eol2
+
+
+block2 (x
+ y
+ z
+)
+
+// eof
diff --git a/modfile/testdata/comment.golden b/modfile/testdata/comment.golden
new file mode 100644
index 0000000..75f3b84
--- /dev/null
+++ b/modfile/testdata/comment.golden
@@ -0,0 +1,10 @@
+// comment
+module "x" // eol
+
+// mid comment
+
+// comment 2
+// comment 2 line 2
+module "y" // eoy
+
+// comment 3
diff --git a/modfile/testdata/comment.in b/modfile/testdata/comment.in
new file mode 100644
index 0000000..bfc2492
--- /dev/null
+++ b/modfile/testdata/comment.in
@@ -0,0 +1,8 @@
+// comment
+module "x" // eol
+// mid comment
+
+// comment 2
+// comment 2 line 2
+module "y" // eoy
+// comment 3
diff --git a/modfile/testdata/empty.golden b/modfile/testdata/empty.golden
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/modfile/testdata/empty.golden
diff --git a/modfile/testdata/empty.in b/modfile/testdata/empty.in
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/modfile/testdata/empty.in
diff --git a/modfile/testdata/gopkg.in.golden b/modfile/testdata/gopkg.in.golden
new file mode 100644
index 0000000..41669b3
--- /dev/null
+++ b/modfile/testdata/gopkg.in.golden
@@ -0,0 +1,6 @@
+module x
+
+require (
+ gopkg.in/mgo.v2 v2.0.0-20160818020120-3f83fa500528
+ gopkg.in/yaml.v2 v2.2.1
+)
diff --git a/modfile/testdata/module.golden b/modfile/testdata/module.golden
new file mode 100644
index 0000000..78ba943
--- /dev/null
+++ b/modfile/testdata/module.golden
@@ -0,0 +1 @@
+module abc
diff --git a/modfile/testdata/module.in b/modfile/testdata/module.in
new file mode 100644
index 0000000..08f3836
--- /dev/null
+++ b/modfile/testdata/module.in
@@ -0,0 +1 @@
+module "abc"
diff --git a/modfile/testdata/replace.golden b/modfile/testdata/replace.golden
new file mode 100644
index 0000000..5d6abcf
--- /dev/null
+++ b/modfile/testdata/replace.golden
@@ -0,0 +1,5 @@
+module abc
+
+replace xyz v1.2.3 => /tmp/z
+
+replace xyz v1.3.4 => my/xyz v1.3.4-me
diff --git a/modfile/testdata/replace.in b/modfile/testdata/replace.in
new file mode 100644
index 0000000..6852499
--- /dev/null
+++ b/modfile/testdata/replace.in
@@ -0,0 +1,5 @@
+module "abc"
+
+replace "xyz" v1.2.3 => "/tmp/z"
+
+replace "xyz" v1.3.4 => "my/xyz" v1.3.4-me
diff --git a/modfile/testdata/replace2.golden b/modfile/testdata/replace2.golden
new file mode 100644
index 0000000..e1d9c72
--- /dev/null
+++ b/modfile/testdata/replace2.golden
@@ -0,0 +1,10 @@
+module abc
+
+replace (
+ xyz v1.2.3 => /tmp/z
+ xyz v1.3.4 => my/xyz v1.3.4-me
+ xyz v1.4.5 => "/tmp/my dir"
+ xyz v1.5.6 => my/xyz v1.5.6
+
+ xyz => my/other/xyz v1.5.4
+)
diff --git a/modfile/testdata/replace2.in b/modfile/testdata/replace2.in
new file mode 100644
index 0000000..7864698
--- /dev/null
+++ b/modfile/testdata/replace2.in
@@ -0,0 +1,10 @@
+module "abc"
+
+replace (
+ "xyz" v1.2.3 => "/tmp/z"
+ "xyz" v1.3.4 => "my/xyz" "v1.3.4-me"
+ xyz "v1.4.5" => "/tmp/my dir"
+ xyz v1.5.6 => my/xyz v1.5.6
+
+ xyz => my/other/xyz v1.5.4
+)
diff --git a/modfile/testdata/rule1.golden b/modfile/testdata/rule1.golden
new file mode 100644
index 0000000..8a5c725
--- /dev/null
+++ b/modfile/testdata/rule1.golden
@@ -0,0 +1,7 @@
+module "x"
+
+module "y"
+
+require "x"
+
+require x