예제 #1
1
파일: nodes.go 프로젝트: rgmabs19357/gcc
func (p *printer) file(src *ast.File) {
	p.setComment(src.Doc)
	p.print(src.Pos(), token.PACKAGE, blank)
	p.expr(src.Name)

	if len(src.Decls) > 0 {
		tok := token.ILLEGAL
		for _, d := range src.Decls {
			prev := tok
			tok = declToken(d)
			// if the declaration token changed (e.g., from CONST to TYPE)
			// or the next declaration has documentation associated with it,
			// print an empty line between top-level declarations
			// (because p.linebreak is called with the position of d, which
			// is past any documentation, the minimum requirement is satisfied
			// even w/o the extra getDoc(d) nil-check - leave it in case the
			// linebreak logic improves - there's already a TODO).
			min := 1
			if prev != tok || getDoc(d) != nil {
				min = 2
			}
			p.linebreak(p.lineFor(d.Pos()), min, ignore, false)
			p.decl(d)
		}
	}

	p.print(newline)
}
예제 #2
0
// readFile adds the AST for a source file to the reader.
//
func (r *reader) readFile(src *ast.File) {
	// add package documentation
	if src.Doc != nil {
		r.readDoc(src.Doc)
		src.Doc = nil // doc consumed - remove from AST
	}

	// add all declarations
	for _, decl := range src.Decls {
		switch d := decl.(type) {
		case *ast.GenDecl:
			switch d.Tok {
			case token.IMPORT:
				// imports are handled individually
				for _, spec := range d.Specs {
					if s, ok := spec.(*ast.ImportSpec); ok {
						if import_, err := strconv.Unquote(s.Path.Value); err == nil {
							r.imports[import_] = 1
						}
					}
				}
			case token.CONST, token.VAR:
				// constants and variables are always handled as a group
				r.readValue(d)
			case token.TYPE:
				// types are handled individually
				for _, spec := range d.Specs {
					if s, ok := spec.(*ast.TypeSpec); ok {
						// use an individual (possibly fake) declaration
						// for each type; this also ensures that each type
						// gets to (re-)use the declaration documentation
						// if there's none associated with the spec itself
						fake := &ast.GenDecl{
							d.Doc, d.Pos(), token.TYPE, token.NoPos,
							[]ast.Spec{s}, token.NoPos,
						}
						r.readType(fake, s)
					}
				}
			}
		case *ast.FuncDecl:
			r.readFunc(d)
		}
	}

	// collect BUG(...) comments
	for _, c := range src.Comments {
		text := c.List[0].Text
		if m := bug_markers.FindStringIndex(text); m != nil {
			// found a BUG comment; maybe empty
			if btxt := text[m[1]:]; bug_content.MatchString(btxt) {
				// non-empty BUG comment; collect comment without BUG prefix
				list := append([]*ast.Comment(nil), c.List...) // make a copy
				list[0].Text = text[m[1]:]
				r.bugs = append(r.bugs, (&ast.CommentGroup{list}).Text())
			}
		}
	}
	src.Comments = nil // consumed unassociated comments - remove from AST
}
예제 #3
0
func (p *printer) file(src *ast.File) {
	p.setComment(src.Doc)
	p.print(src.Pos(), token.PACKAGE, blank)
	p.expr(src.Name)
	p.declList(src.Decls)
	p.print(newline)
}
예제 #4
0
func generateGodebugIdentifiers(f *ast.File) {
	// Variables that won't have suffixes.
	idents.ctx = createConflictFreeName("ctx", f, false)
	idents.ok = createConflictFreeName("ok", f, false)
	idents.scope = createConflictFreeName("scope", f, false)
	idents.receiver = createConflictFreeName("receiver", f, false)
	idents.recoverChan = createConflictFreeName("rr", f, false)
	idents.recoverChanChan = createConflictFreeName("r", f, false)
	idents.recovers = createConflictFreeName("recovers", f, false)
	idents.panicVal = createConflictFreeName("v", f, false)
	idents.panicChan = createConflictFreeName("panicChan", f, false)

	idents.godebug = generateGodebugPkgName(f)

	// Variables that will have suffixes.
	idents.result = createConflictFreeName("result", f, true)
	idents.input = createConflictFreeName("input", f, true)

	// Variables with names derived from the filename.
	base := strings.Map(func(r rune) rune {
		if !unicode.In(r, unicode.Digit, unicode.Letter) {
			return '_'
		}
		return r
	}, filepath.Base(fs.Position(f.Pos()).Filename))
	if !unicode.IsLetter(rune(base[0])) {
		// identifiers must start with letters
		base = "a" + base
	}
	idents.fileScope = createConflictFreeName(base+"_scope", f, false)
	idents.fileContents = createConflictFreeName(base+"_contents", f, false)
}
예제 #5
0
// addFile adds the AST for a source file to the docReader.
// Adding the same AST multiple times is a no-op.
//
func (doc *docReader) addFile(src *ast.File) {
	// add package documentation
	if src.Doc != nil {
		// TODO(gri) This won't do the right thing if there is more
		//           than one file with package comments. Consider
		//           using ast.MergePackageFiles which handles these
		//           comments correctly (but currently looses BUG(...)
		//           comments).
		doc.doc = src.Doc
		src.Doc = nil // doc consumed - remove from ast.File node
	}

	// add all declarations
	for _, decl := range src.Decls {
		doc.addDecl(decl)
	}

	// collect BUG(...) comments
	for c := src.Comments; c != nil; c = c.Next {
		text := c.List[0].Text
		cstr := string(text)
		if m := bug_markers.ExecuteString(cstr); len(m) > 0 {
			// found a BUG comment; maybe empty
			if bstr := cstr[m[1]:]; bug_content.MatchString(bstr) {
				// non-empty BUG comment; collect comment without BUG prefix
				list := copyCommentList(c.List)
				list[0].Text = text[m[1]:]
				doc.bugs.Push(&ast.CommentGroup{list, nil})
			}
		}
	}
	src.Comments = nil // consumed unassociated comments - remove from ast.File node
}
예제 #6
0
파일: doc.go 프로젝트: aubonbeurre/gcc
// addFile adds the AST for a source file to the docReader.
// Adding the same AST multiple times is a no-op.
//
func (doc *docReader) addFile(src *ast.File) {
	// add package documentation
	if src.Doc != nil {
		doc.addDoc(src.Doc)
		src.Doc = nil // doc consumed - remove from ast.File node
	}

	// add all declarations
	for _, decl := range src.Decls {
		doc.addDecl(decl)
	}

	// collect BUG(...) comments
	for _, c := range src.Comments {
		text := c.List[0].Text
		if m := bug_markers.FindStringIndex(text); m != nil {
			// found a BUG comment; maybe empty
			if btxt := text[m[1]:]; bug_content.MatchString(btxt) {
				// non-empty BUG comment; collect comment without BUG prefix
				list := copyCommentList(c.List)
				list[0].Text = text[m[1]:]
				doc.bugs = append(doc.bugs, &ast.CommentGroup{list})
			}
		}
	}
	src.Comments = nil // consumed unassociated comments - remove from ast.File node
}
예제 #7
0
func insertIntVar(file *ast.File, name string, value int) {
	var before, after []ast.Decl

	if len(file.Decls) > 0 {
		hasImport := false
		if genDecl, ok := file.Decls[0].(*ast.GenDecl); ok {
			hasImport = genDecl.Tok == token.IMPORT
		}

		if hasImport {
			before, after = []ast.Decl{file.Decls[0]}, file.Decls[1:]
		} else {
			after = file.Decls
		}
	}

	file.Decls = append(before,
		&ast.GenDecl{
			Tok: token.VAR,
			Specs: []ast.Spec{
				&ast.ValueSpec{
					Names: []*ast.Ident{ast.NewIdent(name)},
					Type:  ast.NewIdent("int"),
					Values: []ast.Expr{
						&ast.BasicLit{
							Kind:  token.INT,
							Value: fmt.Sprintf("%d", value),
						},
					},
				},
			},
		},
	)
	file.Decls = append(file.Decls, after...)
}
예제 #8
0
파일: build.go 프로젝트: phaikawl/gopherjs
func applyPatches(file *ast.File, fileSet *token.FileSet, importPath string) *ast.File {
	removeImport := func(path string) {
		for _, decl := range file.Decls {
			if d, ok := decl.(*ast.GenDecl); ok && d.Tok == token.IMPORT {
				for j, spec := range d.Specs {
					value := spec.(*ast.ImportSpec).Path.Value
					if value[1:len(value)-1] == path {
						d.Specs[j] = d.Specs[len(d.Specs)-1]
						d.Specs = d.Specs[:len(d.Specs)-1]
						return
					}
				}
			}
		}
	}

	removeFunction := func(name string) {
		for i, decl := range file.Decls {
			if d, ok := decl.(*ast.FuncDecl); ok {
				if d.Name.String() == name {
					file.Decls[i] = file.Decls[len(file.Decls)-1]
					file.Decls = file.Decls[:len(file.Decls)-1]
					return
				}
			}
		}
	}

	basename := filepath.Base(fileSet.Position(file.Pos()).Filename)
	switch {
	case importPath == "bytes_test" && basename == "equal_test.go":
		file, _ = parser.ParseFile(fileSet, basename, "package bytes_test", 0)

	case importPath == "crypto/rc4" && basename == "rc4_ref.go": // see https://codereview.appspot.com/40540049/
		file, _ = parser.ParseFile(fileSet, basename, "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build !amd64,!arm,!386\n\npackage rc4\n\n// XORKeyStream sets dst to the result of XORing src with the key stream.\n// Dst and src may be the same slice but otherwise should not overlap.\nfunc (c *Cipher) XORKeyStream(dst, src []byte) {\n  i, j := c.i, c.j\n  for k, v := range src {\n    i += 1\n    j += uint8(c.s[i])\n    c.s[i], c.s[j] = c.s[j], c.s[i]\n    dst[k] = v ^ uint8(c.s[uint8(c.s[i]+c.s[j])])\n  }\n  c.i, c.j = i, j\n}", 0)

	case importPath == "encoding/json" && basename == "stream_test.go":
		removeImport("net")
		removeFunction("TestBlocking")

	case importPath == "math/big" && basename == "arith.go": // see https://codereview.appspot.com/55470046/
		file, _ = parser.ParseFile(fileSet, basename, "// Copyright 2009 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// This file provides Go implementations of elementary multi-precision\n// arithmetic operations on word vectors. Needed for platforms without\n// assembly implementations of these routines.\n\npackage big\n\n// A Word represents a single digit of a multi-precision unsigned integer.\ntype Word uintptr\n\nconst (\n	// Compute the size _S of a Word in bytes.\n	_m    = ^Word(0)\n	_logS = _m>>8&1 + _m>>16&1 + _m>>32&1\n	_S    = 1 << _logS\n\n	_W = _S << 3 // word size in bits\n	_B = 1 << _W // digit base\n	_M = _B - 1  // digit mask\n\n	_W2 = _W / 2   // half word size in bits\n	_B2 = 1 << _W2 // half digit base\n	_M2 = _B2 - 1  // half digit mask\n)\n\n// ----------------------------------------------------------------------------\n// Elementary operations on words\n//\n// These operations are used by the vector operations below.\n\n// z1<<_W + z0 = x+y+c, with c == 0 or 1\nfunc addWW_g(x, y, c Word) (z1, z0 Word) {\n	yc := y + c\n	z0 = x + yc\n	if z0 < x || yc < y {\n		z1 = 1\n	}\n	return\n}\n\n// z1<<_W + z0 = x-y-c, with c == 0 or 1\nfunc subWW_g(x, y, c Word) (z1, z0 Word) {\n	yc := y + c\n	z0 = x - yc\n	if z0 > x || yc < y {\n		z1 = 1\n	}\n	return\n}\n\n// z1<<_W + z0 = x*y\n// Adapted from Warren, Hacker's Delight, p. 132.\nfunc mulWW_g(x, y Word) (z1, z0 Word) {\n	x0 := x & _M2\n	x1 := x >> _W2\n	y0 := y & _M2\n	y1 := y >> _W2\n	w0 := x0 * y0\n	t := x1*y0 + w0>>_W2\n	w1 := t & _M2\n	w2 := t >> _W2\n	w1 += x0 * y1\n	z1 = x1*y1 + w2 + w1>>_W2\n	z0 = x * y\n	return\n}\n\n// z1<<_W + z0 = x*y + c\nfunc mulAddWWW_g(x, y, c Word) (z1, z0 Word) {\n	z1, zz0 := mulWW(x, y)\n	if z0 = zz0 + c; z0 < zz0 {\n		z1++\n	}\n	return\n}\n\n// Length of x in bits.\nfunc bitLen_g(x Word) (n int) {\n	for ; x >= 0x8000; x >>= 16 {\n		n += 16\n	}\n	if x >= 0x80 {\n		x >>= 8\n		n += 8\n	}\n	if x >= 0x8 {\n		x >>= 4\n		n += 4\n	}\n	if x >= 0x2 {\n		x >>= 2\n		n += 2\n	}\n	if x >= 0x1 {\n		n++\n	}\n	return\n}\n\n// log2 computes the integer binary logarithm of x.\n// The result is the integer n for which 2^n <= x < 2^(n+1).\n// If x == 0, the result is -1.\nfunc log2(x Word) int {\n	return bitLen(x) - 1\n}\n\n// Number of leading zeros in x.\nfunc leadingZeros(x Word) uint {\n	return uint(_W - bitLen(x))\n}\n\n// q = (u1<<_W + u0 - r)/y\n// Adapted from Warren, Hacker's Delight, p. 152.\nfunc divWW_g(u1, u0, v Word) (q, r Word) {\n	if u1 >= v {\n		return 1<<_W - 1, 1<<_W - 1\n	}\n\n	s := leadingZeros(v)\n	v <<= s\n\n	vn1 := v >> _W2\n	vn0 := v & _M2\n	un32 := u1<<s | u0>>(_W-s)\n	un10 := u0 << s\n	un1 := un10 >> _W2\n	un0 := un10 & _M2\n	q1 := un32 / vn1\n	rhat := un32 - q1*vn1\n\n	for q1 >= _B2 || q1*vn0 > _B2*rhat+un1 {\n		q1--\n		rhat += vn1\n		if rhat >= _B2 {\n			break\n		}\n	}\n\n	un21 := un32*_B2 + un1 - q1*v\n	q0 := un21 / vn1\n	rhat = un21 - q0*vn1\n\n	for q0 >= _B2 || q0*vn0 > _B2*rhat+un0 {\n		q0--\n		rhat += vn1\n		if rhat >= _B2 {\n			break\n		}\n	}\n\n	return q1*_B2 + q0, (un21*_B2 + un0 - q0*v) >> s\n}\n\nfunc addVV_g(z, x, y []Word) (c Word) {\n	for i := range z {\n		c, z[i] = addWW_g(x[i], y[i], c)\n	}\n	return\n}\n\nfunc subVV_g(z, x, y []Word) (c Word) {\n	for i := range z {\n		c, z[i] = subWW_g(x[i], y[i], c)\n	}\n	return\n}\n\nfunc addVW_g(z, x []Word, y Word) (c Word) {\n	c = y\n	for i := range z {\n		c, z[i] = addWW_g(x[i], c, 0)\n	}\n	return\n}\n\nfunc subVW_g(z, x []Word, y Word) (c Word) {\n	c = y\n	for i := range z {\n		c, z[i] = subWW_g(x[i], c, 0)\n	}\n	return\n}\n\nfunc shlVU_g(z, x []Word, s uint) (c Word) {\n	if n := len(z); n > 0 {\n		ŝ := _W - s\n		w1 := x[n-1]\n		c = w1 >> ŝ\n		for i := n - 1; i > 0; i-- {\n			w := w1\n			w1 = x[i-1]\n			z[i] = w<<s | w1>>ŝ\n		}\n		z[0] = w1 << s\n	}\n	return\n}\n\nfunc shrVU_g(z, x []Word, s uint) (c Word) {\n	if n := len(z); n > 0 {\n		ŝ := _W - s\n		w1 := x[0]\n		c = w1 << ŝ\n		for i := 0; i < n-1; i++ {\n			w := w1\n			w1 = x[i+1]\n			z[i] = w>>s | w1<<ŝ\n		}\n		z[n-1] = w1 >> s\n	}\n	return\n}\n\nfunc mulAddVWW_g(z, x []Word, y, r Word) (c Word) {\n	c = r\n	for i := range z {\n		c, z[i] = mulAddWWW_g(x[i], y, c)\n	}\n	return\n}\n\nfunc addMulVVW_g(z, x []Word, y Word) (c Word) {\n	for i := range z {\n		z1, z0 := mulAddWWW_g(x[i], y, z[i])\n		c, z[i] = addWW_g(z0, c, 0)\n		c += z1\n	}\n	return\n}\n\nfunc divWVW_g(z []Word, xn Word, x []Word, y Word) (r Word) {\n	r = xn\n	for i := len(z) - 1; i >= 0; i-- {\n		z[i], r = divWW_g(r, x[i], y)\n	}\n	return\n}", 0)

	case importPath == "reflect_test" && basename == "all_test.go":
		removeImport("unsafe")
		removeFunction("TestAlignment")
		removeFunction("TestSliceOverflow")

	case importPath == "runtime" && strings.HasPrefix(basename, "zgoarch_"):
		file, _ = parser.ParseFile(fileSet, basename, "package runtime\nconst theGoarch = `js`\n", 0)

	case importPath == "sync/atomic_test" && basename == "atomic_test.go":
		removeFunction("TestUnaligned64")

	case importPath == "text/template/parse" && basename == "lex.go": // this patch will be removed as soon as goroutines are supported
		file, _ = parser.ParseFile(fileSet, basename, "// Copyright 2011 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\npackage parse\nimport (\n	\"container/list\"\n	\"fmt\"\n	\"strings\"\n	\"unicode\"\n	\"unicode/utf8\"\n)\n// item represents a token or text string returned from the scanner.\ntype item struct {\n	typ itemType // The type of this item.\n	pos Pos      // The starting position, in bytes, of this item in the input string.\n	val string   // The value of this item.\n}\nfunc (i item) String() string {\n	switch {\n	case i.typ == itemEOF:\n		return \"EOF\"\n	case i.typ == itemError:\n		return i.val\n	case i.typ > itemKeyword:\n		return fmt.Sprintf(\"<%s>\", i.val)\n	case len(i.val) > 10:\n		return fmt.Sprintf(\"%.10q...\", i.val)\n	}\n	return fmt.Sprintf(\"%q\", i.val)\n}\n// itemType identifies the type of lex items.\ntype itemType int\nconst (\n	itemError        itemType = iota // error occurred; value is text of error\n	itemBool                         // boolean constant\n	itemChar                         // printable ASCII character; grab bag for comma etc.\n	itemCharConstant                 // character constant\n	itemComplex                      // complex constant (1+2i); imaginary is just a number\n	itemColonEquals                  // colon-equals (':=') introducing a declaration\n	itemEOF\n	itemField      // alphanumeric identifier starting with '.'\n	itemIdentifier // alphanumeric identifier not starting with '.'\n	itemLeftDelim  // left action delimiter\n	itemLeftParen  // '(' inside action\n	itemNumber     // simple number, including imaginary\n	itemPipe       // pipe symbol\n	itemRawString  // raw quoted string (includes quotes)\n	itemRightDelim // right action delimiter\n	itemRightParen // ')' inside action\n	itemSpace      // run of spaces separating arguments\n	itemString     // quoted string (includes quotes)\n	itemText       // plain text\n	itemVariable   // variable starting with '$', such as '$' or  '$1' or '$hello'\n	// Keywords appear after all the rest.\n	itemKeyword  // used only to delimit the keywords\n	itemDot      // the cursor, spelled '.'\n	itemDefine   // define keyword\n	itemElse     // else keyword\n	itemEnd      // end keyword\n	itemIf       // if keyword\n	itemNil      // the untyped nil constant, easiest to treat as a keyword\n	itemRange    // range keyword\n	itemTemplate // template keyword\n	itemWith     // with keyword\n)\nvar key = map[string]itemType{\n	\".\":        itemDot,\n	\"define\":   itemDefine,\n	\"else\":     itemElse,\n	\"end\":      itemEnd,\n	\"if\":       itemIf,\n	\"range\":    itemRange,\n	\"nil\":      itemNil,\n	\"template\": itemTemplate,\n	\"with\":     itemWith,\n}\nconst eof = -1\n// stateFn represents the state of the scanner as a function that returns the next state.\ntype stateFn func(*lexer) stateFn\n// lexer holds the state of the scanner.\ntype lexer struct {\n	name       string     // the name of the input; used only for error reports\n	input      string     // the string being scanned\n	leftDelim  string     // start of action\n	rightDelim string     // end of action\n	state      stateFn    // the next lexing function to enter\n	pos        Pos        // current position in the input\n	start      Pos        // start position of this item\n	width      Pos        // width of last rune read from input\n	lastPos    Pos        // position of most recent item returned by nextItem\n	items      *list.List // scanned items\n	parenDepth int        // nesting depth of ( ) exprs\n}\n// next returns the next rune in the input.\nfunc (l *lexer) next() rune {\n	if int(l.pos) >= len(l.input) {\n		l.width = 0\n		return eof\n	}\n	r, w := utf8.DecodeRuneInString(l.input[l.pos:])\n	l.width = Pos(w)\n	l.pos += l.width\n	return r\n}\n// peek returns but does not consume the next rune in the input.\nfunc (l *lexer) peek() rune {\n	r := l.next()\n	l.backup()\n	return r\n}\n// backup steps back one rune. Can only be called once per call of next.\nfunc (l *lexer) backup() {\n	l.pos -= l.width\n}\n// emit passes an item back to the client.\nfunc (l *lexer) emit(t itemType) {\n	l.items.PushBack(item{t, l.start, l.input[l.start:l.pos]})\n	l.start = l.pos\n}\n// ignore skips over the pending input before this point.\nfunc (l *lexer) ignore() {\n	l.start = l.pos\n}\n// accept consumes the next rune if it's from the valid set.\nfunc (l *lexer) accept(valid string) bool {\n	if strings.IndexRune(valid, l.next()) >= 0 {\n		return true\n	}\n	l.backup()\n	return false\n}\n// acceptRun consumes a run of runes from the valid set.\nfunc (l *lexer) acceptRun(valid string) {\n	for strings.IndexRune(valid, l.next()) >= 0 {\n	}\n	l.backup()\n}\n// lineNumber reports which line we're on, based on the position of\n// the previous item returned by nextItem. Doing it this way\n// means we don't have to worry about peek double counting.\nfunc (l *lexer) lineNumber() int {\n	return 1 + strings.Count(l.input[:l.lastPos], \"\\n\")\n}\n// errorf returns an error token and terminates the scan by passing\n// back a nil pointer that will be the next state, terminating l.nextItem.\nfunc (l *lexer) errorf(format string, args ...interface{}) stateFn {\n	l.items.PushBack(item{itemError, l.start, fmt.Sprintf(format, args...)})\n	return nil\n}\n// nextItem returns the next item from the input.\nfunc (l *lexer) nextItem() item {\n	element := l.items.Front()\n	for element == nil {\n		l.state = l.state(l)\n		element = l.items.Front()\n	}\n	l.items.Remove(element)\n	item := element.Value.(item)\n	l.lastPos = item.pos\n	return item\n}\n// lex creates a new scanner for the input string.\nfunc lex(name, input, left, right string) *lexer {\n	if left == \"\" {\n		left = leftDelim\n	}\n	if right == \"\" {\n		right = rightDelim\n	}\n	l := &lexer{\n		name:       name,\n		input:      input,\n		leftDelim:  left,\n		rightDelim: right,\n		items:      list.New(),\n	}\n	l.state = lexText\n	return l\n}\n// state functions\nconst (\n	leftDelim    = \"{{\"\n	rightDelim   = \"}}\"\n	leftComment  = \"/*\"\n	rightComment = \"*/\"\n)\n// lexText scans until an opening action delimiter, \"{{\".\nfunc lexText(l *lexer) stateFn {\n	for {\n		if strings.HasPrefix(l.input[l.pos:], l.leftDelim) {\n			if l.pos > l.start {\n				l.emit(itemText)\n			}\n			return lexLeftDelim\n		}\n		if l.next() == eof {\n			break\n		}\n	}\n	// Correctly reached EOF.\n	if l.pos > l.start {\n		l.emit(itemText)\n	}\n	l.emit(itemEOF)\n	return nil\n}\n// lexLeftDelim scans the left delimiter, which is known to be present.\nfunc lexLeftDelim(l *lexer) stateFn {\n	l.pos += Pos(len(l.leftDelim))\n	if strings.HasPrefix(l.input[l.pos:], leftComment) {\n		return lexComment\n	}\n	l.emit(itemLeftDelim)\n	l.parenDepth = 0\n	return lexInsideAction\n}\n// lexComment scans a comment. The left comment marker is known to be present.\nfunc lexComment(l *lexer) stateFn {\n	l.pos += Pos(len(leftComment))\n	i := strings.Index(l.input[l.pos:], rightComment)\n	if i < 0 {\n		return l.errorf(\"unclosed comment\")\n	}\n	l.pos += Pos(i + len(rightComment))\n	if !strings.HasPrefix(l.input[l.pos:], l.rightDelim) {\n		return l.errorf(\"comment ends before closing delimiter\")\n	}\n	l.pos += Pos(len(l.rightDelim))\n	l.ignore()\n	return lexText\n}\n// lexRightDelim scans the right delimiter, which is known to be present.\nfunc lexRightDelim(l *lexer) stateFn {\n	l.pos += Pos(len(l.rightDelim))\n	l.emit(itemRightDelim)\n	return lexText\n}\n// lexInsideAction scans the elements inside action delimiters.\nfunc lexInsideAction(l *lexer) stateFn {\n	// Either number, quoted string, or identifier.\n	// Spaces separate arguments; runs of spaces turn into itemSpace.\n	// Pipe symbols separate and are emitted.\n	if strings.HasPrefix(l.input[l.pos:], l.rightDelim) {\n		if l.parenDepth == 0 {\n			return lexRightDelim\n		}\n		return l.errorf(\"unclosed left paren\")\n	}\n	switch r := l.next(); {\n	case r == eof || isEndOfLine(r):\n		return l.errorf(\"unclosed action\")\n	case isSpace(r):\n		return lexSpace\n	case r == ':':\n		if l.next() != '=' {\n			return l.errorf(\"expected :=\")\n		}\n		l.emit(itemColonEquals)\n	case r == '|':\n		l.emit(itemPipe)\n	case r == '\"':\n		return lexQuote\n	case r == '`':\n		return lexRawQuote\n	case r == '$':\n		return lexVariable\n	case r == '\\'':\n		return lexChar\n	case r == '.':\n		// special look-ahead for \".field\" so we don't break l.backup().\n		if l.pos < Pos(len(l.input)) {\n			r := l.input[l.pos]\n			if r < '0' || '9' < r {\n				return lexField\n			}\n		}\n		fallthrough // '.' can start a number.\n	case r == '+' || r == '-' || ('0' <= r && r <= '9'):\n		l.backup()\n		return lexNumber\n	case isAlphaNumeric(r):\n		l.backup()\n		return lexIdentifier\n	case r == '(':\n		l.emit(itemLeftParen)\n		l.parenDepth++\n		return lexInsideAction\n	case r == ')':\n		l.emit(itemRightParen)\n		l.parenDepth--\n		if l.parenDepth < 0 {\n			return l.errorf(\"unexpected right paren %#U\", r)\n		}\n		return lexInsideAction\n	case r <= unicode.MaxASCII && unicode.IsPrint(r):\n		l.emit(itemChar)\n		return lexInsideAction\n	default:\n		return l.errorf(\"unrecognized character in action: %#U\", r)\n	}\n	return lexInsideAction\n}\n// lexSpace scans a run of space characters.\n// One space has already been seen.\nfunc lexSpace(l *lexer) stateFn {\n	for isSpace(l.peek()) {\n		l.next()\n	}\n	l.emit(itemSpace)\n	return lexInsideAction\n}\n// lexIdentifier scans an alphanumeric.\nfunc lexIdentifier(l *lexer) stateFn {\nLoop:\n	for {\n		switch r := l.next(); {\n		case isAlphaNumeric(r):\n			// absorb.\n		default:\n			l.backup()\n			word := l.input[l.start:l.pos]\n			if !l.atTerminator() {\n				return l.errorf(\"bad character %#U\", r)\n			}\n			switch {\n			case key[word] > itemKeyword:\n				l.emit(key[word])\n			case word[0] == '.':\n				l.emit(itemField)\n			case word == \"true\", word == \"false\":\n				l.emit(itemBool)\n			default:\n				l.emit(itemIdentifier)\n			}\n			break Loop\n		}\n	}\n	return lexInsideAction\n}\n// lexField scans a field: .Alphanumeric.\n// The . has been scanned.\nfunc lexField(l *lexer) stateFn {\n	return lexFieldOrVariable(l, itemField)\n}\n// lexVariable scans a Variable: $Alphanumeric.\n// The $ has been scanned.\nfunc lexVariable(l *lexer) stateFn {\n	if l.atTerminator() { // Nothing interesting follows -> \"$\".\n		l.emit(itemVariable)\n		return lexInsideAction\n	}\n	return lexFieldOrVariable(l, itemVariable)\n}\n// lexVariable scans a field or variable: [.$]Alphanumeric.\n// The . or $ has been scanned.\nfunc lexFieldOrVariable(l *lexer, typ itemType) stateFn {\n	if l.atTerminator() { // Nothing interesting follows -> \".\" or \"$\".\n		if typ == itemVariable {\n			l.emit(itemVariable)\n		} else {\n			l.emit(itemDot)\n		}\n		return lexInsideAction\n	}\n	var r rune\n	for {\n		r = l.next()\n		if !isAlphaNumeric(r) {\n			l.backup()\n			break\n		}\n	}\n	if !l.atTerminator() {\n		return l.errorf(\"bad character %#U\", r)\n	}\n	l.emit(typ)\n	return lexInsideAction\n}\n// atTerminator reports whether the input is at valid termination character to\n// appear after an identifier. Breaks .X.Y into two pieces. Also catches cases\n// like \"$x+2\" not being acceptable without a space, in case we decide one\n// day to implement arithmetic.\nfunc (l *lexer) atTerminator() bool {\n	r := l.peek()\n	if isSpace(r) || isEndOfLine(r) {\n		return true\n	}\n	switch r {\n	case eof, '.', ',', '|', ':', ')', '(':\n		return true\n	}\n	// Does r start the delimiter? This can be ambiguous (with delim==\"//\", $x/2 will\n	// succeed but should fail) but only in extremely rare cases caused by willfully\n	// bad choice of delimiter.\n	if rd, _ := utf8.DecodeRuneInString(l.rightDelim); rd == r {\n		return true\n	}\n	return false\n}\n// lexChar scans a character constant. The initial quote is already\n// scanned. Syntax checking is done by the parser.\nfunc lexChar(l *lexer) stateFn {\nLoop:\n	for {\n		switch l.next() {\n		case '\\\\':\n			if r := l.next(); r != eof && r != '\\n' {\n				break\n			}\n			fallthrough\n		case eof, '\\n':\n			return l.errorf(\"unterminated character constant\")\n		case '\\'':\n			break Loop\n		}\n	}\n	l.emit(itemCharConstant)\n	return lexInsideAction\n}\n// lexNumber scans a number: decimal, octal, hex, float, or imaginary. This\n// isn't a perfect number scanner - for instance it accepts \".\" and \"0x0.2\"\n// and \"089\" - but when it's wrong the input is invalid and the parser (via\n// strconv) will notice.\nfunc lexNumber(l *lexer) stateFn {\n	if !l.scanNumber() {\n		return l.errorf(\"bad number syntax: %q\", l.input[l.start:l.pos])\n	}\n	if sign := l.peek(); sign == '+' || sign == '-' {\n		// Complex: 1+2i. No spaces, must end in 'i'.\n		if !l.scanNumber() || l.input[l.pos-1] != 'i' {\n			return l.errorf(\"bad number syntax: %q\", l.input[l.start:l.pos])\n		}\n		l.emit(itemComplex)\n	} else {\n		l.emit(itemNumber)\n	}\n	return lexInsideAction\n}\nfunc (l *lexer) scanNumber() bool {\n	// Optional leading sign.\n	l.accept(\"+-\")\n	// Is it hex?\n	digits := \"0123456789\"\n	if l.accept(\"0\") && l.accept(\"xX\") {\n		digits = \"0123456789abcdefABCDEF\"\n	}\n	l.acceptRun(digits)\n	if l.accept(\".\") {\n		l.acceptRun(digits)\n	}\n	if l.accept(\"eE\") {\n		l.accept(\"+-\")\n		l.acceptRun(\"0123456789\")\n	}\n	// Is it imaginary?\n	l.accept(\"i\")\n	// Next thing mustn't be alphanumeric.\n	if isAlphaNumeric(l.peek()) {\n		l.next()\n		return false\n	}\n	return true\n}\n// lexQuote scans a quoted string.\nfunc lexQuote(l *lexer) stateFn {\nLoop:\n	for {\n		switch l.next() {\n		case '\\\\':\n			if r := l.next(); r != eof && r != '\\n' {\n				break\n			}\n			fallthrough\n		case eof, '\\n':\n			return l.errorf(\"unterminated quoted string\")\n		case '\"':\n			break Loop\n		}\n	}\n	l.emit(itemString)\n	return lexInsideAction\n}\n// lexRawQuote scans a raw quoted string.\nfunc lexRawQuote(l *lexer) stateFn {\nLoop:\n	for {\n		switch l.next() {\n		case eof, '\\n':\n			return l.errorf(\"unterminated raw quoted string\")\n		case '`':\n			break Loop\n		}\n	}\n	l.emit(itemRawString)\n	return lexInsideAction\n}\n// isSpace reports whether r is a space character.\nfunc isSpace(r rune) bool {\n	return r == ' ' || r == '\\t'\n}\n// isEndOfLine reports whether r is an end-of-line character.\nfunc isEndOfLine(r rune) bool {\n	return r == '\\r' || r == '\\n'\n}\n// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore.\nfunc isAlphaNumeric(r rune) bool {\n	return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r)\n}", 0)
	}

	return file
}
예제 #9
0
func offsetLine(fset *token.FileSet, af *ast.File, offset int) (line int) {
	defer func() {
		if err := recover(); err != nil {
			line = 0
		}
	}()
	return fset.File(af.Pos()).Position(token.Pos(offset)).Line
}
예제 #10
0
func parseFileset(wks filesystem.WorkspaceFS, f *ast.File, fs *token.FileSet) []TokenPosition {
	var res []TokenPosition

	if f.Name != nil {
		res = appendTokenPosition(wks, res, fs.Position(f.Pos()), f.Name.Name, "", PACKAGE)
	}
	for _, i := range f.Imports {
		if i.Path != nil {
			res = appendTokenPosition(wks, res, fs.Position(f.Pos()), i.Path.Value, "", IMPORT)
		}
	}
	for _, d := range f.Decls {
		switch x := d.(type) {
		case *ast.FuncDecl:
			pos := fs.Position(x.Pos())
			if x.Recv != nil && len(x.Recv.List) > 0 {
				var recv *string
				n := x.Recv.List[0].Type
				id, ok := n.(*ast.Ident)
				if !ok {
					sid, ok := n.(*ast.StarExpr)
					if ok {
						id, ok = sid.X.(*ast.Ident)
						if ok {
							r := fmt.Sprintf("*%s", id.Name)
							recv = &r
						}
					}
				} else {
					recv = &id.Name
				}
				res = appendTokenPosition(wks, res, pos, x.Name.Name, *recv, METH)
			} else {
				res = appendTokenPosition(wks, res, pos, x.Name.Name, "", FUNC)
			}

		case *ast.GenDecl:
			switch x.Tok {
			case token.CONST:
				for _, s := range x.Specs {
					vs := s.(*ast.ValueSpec)
					res = appendTokenPosition(wks, res, fs.Position(vs.Pos()), vs.Names[0].Name, "", CONST)
				}
			case token.VAR:
				for _, s := range x.Specs {
					vs := s.(*ast.ValueSpec)
					res = appendTokenPosition(wks, res, fs.Position(vs.Pos()), vs.Names[0].Name, "", VAR)
				}
			case token.TYPE:
				for _, s := range x.Specs {
					vs := s.(*ast.TypeSpec)
					res = appendTokenPosition(wks, res, fs.Position(vs.Pos()), vs.Name.Name, "", TYPE)
				}
			}
		}
	}
	return res
}
예제 #11
0
func (scp *schemaParser) packageForFile(gofile *ast.File) (*loader.PackageInfo, error) {
	for pkg, pkgInfo := range scp.program.AllPackages {
		if pkg.Name() == gofile.Name.Name {
			return pkgInfo, nil
		}
	}
	fn := scp.program.Fset.File(gofile.Pos()).Name()
	return nil, fmt.Errorf("unable to determine package for %s", fn)
}
예제 #12
0
파일: fix.go 프로젝트: ssrl/go
// addImport adds the import path to the file f, if absent.
func addImport(f *ast.File, path string) {
	if imports(f, path) {
		return
	}

	newImport := &ast.ImportSpec{
		Path: &ast.BasicLit{
			Kind:  token.STRING,
			Value: strconv.Quote(path),
		},
	}

	var impdecl *ast.GenDecl

	// Find an import decl to add to.
	for _, decl := range f.Decls {
		gen, ok := decl.(*ast.GenDecl)

		if ok && gen.Tok == token.IMPORT {
			impdecl = gen
			break
		}
	}

	// No import decl found.  Add one.
	if impdecl == nil {
		impdecl = &ast.GenDecl{
			Tok: token.IMPORT,
		}
		f.Decls = append(f.Decls, nil)
		copy(f.Decls[1:], f.Decls)
		f.Decls[0] = impdecl
	}

	// Ensure the import decl has parentheses, if needed.
	if len(impdecl.Specs) > 0 && !impdecl.Lparen.IsValid() {
		impdecl.Lparen = impdecl.Pos()
	}

	// Assume the import paths are alphabetically ordered.
	// If they are not, the result is ugly, but legal.
	insertAt := len(impdecl.Specs) // default to end of specs
	for i, spec := range impdecl.Specs {
		impspec := spec.(*ast.ImportSpec)
		if importPath(impspec) > path {
			insertAt = i
			break
		}
	}

	impdecl.Specs = append(impdecl.Specs, nil)
	copy(impdecl.Specs[insertAt+1:], impdecl.Specs[insertAt:])
	impdecl.Specs[insertAt] = newImport

	f.Imports = append(f.Imports, newImport)
}
예제 #13
0
파일: xsdgen.go 프로젝트: rilinor/go-xml
func mergeASTFile(dst, src *ast.File) *ast.File {
	if dst == nil {
		return src
	}
	if dst.Doc != nil {
		dst.Doc = src.Doc
	}
	dst.Decls = append(dst.Decls, src.Decls...)
	return dst
}
예제 #14
0
파일: goinline.go 프로젝트: charl/go-inline
// Inline replaces each instance of identifier k with v.Ident in ast.File f,
// for k, v := range m.
// For all inlines that were triggeres it also adds imports from v.Imports to f.
// In addition, it removes top level type declarations of the form
// type k ...
// for all k in m.
//
// Every k in m should be a valid identifier.
// Every v.Ident should be a valid expression.
func Inline(fset *token.FileSet, f *ast.File, m map[string]Target) error {
	// Build the inline map.
	im := map[string]reflect.Value{}
	for k, v := range m {
		expr, err := parser.ParseExpr(k)
		if err != nil {
			return fmt.Errorf("failed to parse `%s`: %s", k, err)
		}
		if _, ok := expr.(*ast.Ident); !ok {
			return fmt.Errorf("expected identifier, got %s which is %T", k, expr)
		}
		expr, err = parser.ParseExpr(v.Ident)
		if err != nil {
			return fmt.Errorf("failed to parse `%s`: %s", v.Ident, err)
		}
		s := v.Ident
		if _, ok := expr.(*ast.StarExpr); ok {
			s = fmt.Sprintf("(%s)", s)
		}
		im[k] = reflect.ValueOf(ast.Ident{Name: s})
	}
	// Filter `type XXX ...` declarations out if we are inlining XXX.
	cmap := ast.NewCommentMap(fset, f, f.Comments)
	to := 0
	for _, d := range f.Decls {
		skip := false
		if t, ok := d.(*ast.GenDecl); ok {
			for _, s := range t.Specs {
				ts, ok := s.(*ast.TypeSpec)
				if !ok {
					continue
				}
				if _, ok = im[ts.Name.String()]; ok {
					skip = true
				}
			}
		}
		if !skip {
			f.Decls[to] = d
			to++
		}
	}
	if to != len(f.Decls) {
		f.Decls = f.Decls[:to]
		// Remove comments for the declarations that were filtered out.
		f.Comments = cmap.Filter(f).Comments()
	}
	// Add imports for the inlines that were triggered.
	for k := range inline(im, f) {
		for _, imp := range m[k].Imports {
			astutil.AddImport(fset, f, imp)
		}
	}
	return nil
}
예제 #15
0
func extractMultipleStatementsAsFunc(
	astFile *ast.File,
	fileSet *token.FileSet,
	stmtsToExtract []ast.Node,
	parentNode ast.Node,
	extractedFuncName string) {
	params := varIdentsUsedIn(stmtsToExtract)
	varsDeclaredWithinStmtsToExtract := varIdentsDeclaredWithin(stmtsToExtract)
	util.MapStringAstIdentRemoveKeys(params, namesOf(varsDeclaredWithinStmtsToExtract))
	util.MapStringAstIdentRemoveKeys(params, namesOf(globalVarIdents(astFile)))

	allStmts := stmtsFromBlockStmt(parentNode)
	indexOfExtractedStmt := indexOf(stmtsToExtract[0].(ast.Stmt), *allStmts)
	varsUsedAfterwards := overlappingVarsIdentsUsedIn((*allStmts)[indexOfExtractedStmt+len(stmtsToExtract):], varsDeclaredWithinStmtsToExtract)

	newStmt := funcCallStmt(varsUsedAfterwards, extractedFuncName, params, (*allStmts)[indexOfExtractedStmt].Pos())
	replaceStmtsWithFuncCallStmt(newStmt,
		allStmts,
		indexOfExtractedStmt, len(stmtsToExtract))

	areaRemoved := areaRemoved(fileSet, (stmtsToExtract)[0].Pos(), (stmtsToExtract)[len(stmtsToExtract)-1].End())
	lineLengths := lineLengthsFrom(fileSet)
	lineNum, numLinesToCut, newLineLength := replacementModifications(fileSet, (stmtsToExtract)[0].Pos(), (stmtsToExtract)[len(stmtsToExtract)-1].End(), newStmt.End(), lineLengths, areaRemoved)

	shiftPosesAfterPos(astFile, newStmt, (stmtsToExtract)[len(stmtsToExtract)-1].End(), newStmt.End()-stmtsToExtract[len(stmtsToExtract)-1].End())

	multipleStmtFuncDecl := CopyNode(multipleStmtFuncDeclWith(
		extractedFuncName,
		fieldsFrom(params),
		stmtsFromNodes(stmtsToExtract),
		exprsFrom(varsUsedAfterwards),
	)).(*ast.FuncDecl)
	var moveOffset token.Pos
	RecalcPoses(multipleStmtFuncDecl, astFile.End()+2, &moveOffset, 0)
	astFile.Decls = append(astFile.Decls, multipleStmtFuncDecl)

	areaToBeAppended := insertionModificationsForStmts(astFile, multipleStmtFuncDecl, areaRemoved, exprsFrom(varsUsedAfterwards))

	lineLengths = append(
		lineLengths[:lineNum+1],
		lineLengths[lineNum+1+numLinesToCut:]...)
	lineLengths[lineNum] = newLineLength
	lineLengths = append(lineLengths, areaToBeAppended...)

	newFileSet := token.NewFileSet()
	newFileSet.AddFile(fileSet.File(1).Name(), 1, int(astFile.End()))
	newFileSet.File(1).SetLines(ConvertLineLengthsToLineOffsets(lineLengths))
	*fileSet = *newFileSet

	moveComments(astFile, moveOffset /*, needs a range to restict which comments to move*/)

}
예제 #16
0
파일: fix.go 프로젝트: h8liu/golang
// deleteImport deletes the import path from the file f, if present.
func deleteImport(f *ast.File, path string) (deleted bool) {
	oldImport := importSpec(f, path)

	// Find the import node that imports path, if any.
	for i, decl := range f.Decls {
		gen, ok := decl.(*ast.GenDecl)
		if !ok || gen.Tok != token.IMPORT {
			continue
		}
		for j, spec := range gen.Specs {
			impspec := spec.(*ast.ImportSpec)
			if oldImport != impspec {
				continue
			}

			// We found an import spec that imports path.
			// Delete it.
			deleted = true
			copy(gen.Specs[j:], gen.Specs[j+1:])
			gen.Specs = gen.Specs[:len(gen.Specs)-1]

			// If this was the last import spec in this decl,
			// delete the decl, too.
			if len(gen.Specs) == 0 {
				copy(f.Decls[i:], f.Decls[i+1:])
				f.Decls = f.Decls[:len(f.Decls)-1]
			} else if len(gen.Specs) == 1 {
				gen.Lparen = token.NoPos // drop parens
			}
			if j > 0 {
				// We deleted an entry but now there will be
				// a blank line-sized hole where the import was.
				// Close the hole by making the previous
				// import appear to "end" where this one did.
				gen.Specs[j-1].(*ast.ImportSpec).EndPos = impspec.End()
			}
			break
		}
	}

	// Delete it from f.Imports.
	for i, imp := range f.Imports {
		if imp == oldImport {
			copy(f.Imports[i:], f.Imports[i+1:])
			f.Imports = f.Imports[:len(f.Imports)-1]
			break
		}
	}

	return
}
예제 #17
0
파일: imports.go 프로젝트: petar/vitamix
func removeImport(file *ast.File, ipath string) {
	var j int
	for j < len(file.Decls) {
		gen, ok := file.Decls[j].(*ast.GenDecl)
		if !ok || gen.Tok != token.IMPORT {
			j++
			continue
		}
		var i int
		for i < len(gen.Specs) {
			impspec := gen.Specs[i].(*ast.ImportSpec)
			if importPath(impspec) != ipath {
				i++
				continue
			}
			// If we found a match, pop the spec from gen.Specs
			gen.Specs[i] = gen.Specs[len(gen.Specs)-1]
			gen.Specs = gen.Specs[:len(gen.Specs)-1]
		}
		if len(gen.Specs) > 0 {
			j++
			continue
		}
		// Remove entire import decl if no imports left in it
		file.Decls[j] = file.Decls[len(file.Decls)-1]
		file.Decls = file.Decls[:len(file.Decls)-1]
	}
}
예제 #18
0
func transformRegisterPkgValues(file *ast.File, typeNames []string) {

	// Create init function declaration
	fdecl := &ast.FuncDecl{
		Doc:  nil,
		Recv: nil,
		Name: &ast.Ident{Name: "init"},
		Type: &ast.FuncType{},
		Body: &ast.BlockStmt{},
	}
	file.Decls = append(file.Decls, fdecl)

	// Add type registrations to fdecl.Body.List
	for _, name := range typeNames {
		stmt := &ast.ExprStmt{
			X: &ast.CallExpr{
				Fun: &ast.SelectorExpr{
					X:   &ast.Ident{Name: "circuit"}, // Refers to import circuit/use/circuit
					Sel: &ast.Ident{Name: "RegisterValue"},
				},
				Args: []ast.Expr{
					&ast.CompositeLit{
						Type: &ast.Ident{Name: name},
					},
				},
			},
		}
		fdecl.Body.List = append(fdecl.Body.List, stmt)
	}

}
예제 #19
0
파일: main.go 프로젝트: rexposadas/gx
func sortDecls(merged *ast.File) {
	var sortedDecls []ast.Decl
	for _, decl := range merged.Decls {
		if x, ok := decl.(*ast.GenDecl); ok && x.Tok == token.PACKAGE {
			sortedDecls = append(sortedDecls, decl)
		}
	}
	/*for _, decl := range merged.Decls {
		if x, ok := decl.(*ast.GenDecl); ok && x.Tok == token.IMPORT {
			sortedDecls = append(sortedDecls, decl)
			goon.DumpExpr(decl)
		}
	}*/
	var specs []ast.Spec
	for _, importSpec := range merged.Imports {
		if importSpec.Name != nil && importSpec.Name.Name == "." {
			continue
		}
		importSpec.EndPos = 0
		specs = append(specs, importSpec)
	}
	sortedDecls = append(sortedDecls, &ast.GenDecl{
		Tok:    token.IMPORT,
		Lparen: (token.Pos)(1), // Needs to be non-zero to be considered as a group.
		Specs:  specs,
	})
	//goon.DumpExpr(sortedDecls[len(sortedDecls)-1])
	for _, decl := range merged.Decls {
		if x, ok := decl.(*ast.GenDecl); ok && (x.Tok == token.IMPORT || x.Tok == token.PACKAGE) {
			continue
		}
		sortedDecls = append(sortedDecls, decl)
	}
	merged.Decls = sortedDecls
}
예제 #20
0
func InstrumentFunctionsAndInterfaces(f *ast.File) bool {
	addMock4goImport := false

	ast.Inspect(f, func(n ast.Node) bool {
		switch x := n.(type) {
		case *ast.FuncDecl:
			if instrumentFunction(x) {
				addMock4goImport = true
			}
			if x.Recv != nil {
				// fieldList := x.Recv.List[0]
				// name := fieldList.Names[0]
				// with receiver
			} else {
				// without receiver
			}
		case *ast.GenDecl:
			if x.Tok == token.TYPE {
				typeSpec := x.Specs[0].(*ast.TypeSpec)
				if interfaceType, ok := typeSpec.Type.(*ast.InterfaceType); ok {
					if interfaceType.Incomplete {
						// TODO: what should we do here
						panic("incomplete interface type")
					}
					decls := instrumentInterface(typeSpec.Name.Name, interfaceType)
					addMock4goImport = true
					f.Decls = append(f.Decls, decls...)
				}
			}
		}
		return true
	})

	return addMock4goImport
}
예제 #21
0
파일: bolt.go 프로젝트: tav/bolt
func addImport(file *ast.File, pkg string) {
	decl := &ast.GenDecl{
		Tok: token.IMPORT,
	}
	file.Decls = append(file.Decls, nil)
	copy(file.Decls[1:], file.Decls[:])
	file.Decls[0] = decl
	spec := &ast.ImportSpec{
		Path: &ast.BasicLit{
			Kind:  token.STRING,
			Value: pkg,
		},
	}
	decl.Specs = append(decl.Specs, spec)
	file.Imports = append(file.Imports, spec)
}
예제 #22
0
파일: find.go 프로젝트: charlievieth/define
// nodeAtOffset, returns the ast.Node for the given offset.
// Node types: *ast.Ident, *ast.SelectorExpr, *ast.ImportSpec
func nodeAtOffset(af *ast.File, fset *token.FileSet, offset int) (ast.Node, error) {
	file := fset.File(af.Pos())
	if file == nil {
		return nil, errors.New("ast.File not in token.FileSet")
	}
	// Prevent file.Pos from panicking.
	if offset < 0 || file.Size() < offset {
		return nil, fmt.Errorf("invalid offset: %d", offset)
	}
	v := &offsetVisitor{pos: file.Pos(offset)}
	ast.Walk(v, af)
	if v.node == nil {
		return nil, fmt.Errorf("no node at offset: %d", offset)
	}
	return v.node, nil
}
예제 #23
0
파일: fix.go 프로젝트: ssrl/go
// deleteImport deletes the import path from the file f, if present.
func deleteImport(f *ast.File, path string) {
	oldImport := importSpec(f, path)

	// Find the import node that imports path, if any.
	for i, decl := range f.Decls {
		gen, ok := decl.(*ast.GenDecl)
		if !ok || gen.Tok != token.IMPORT {
			continue
		}
		for j, spec := range gen.Specs {
			impspec := spec.(*ast.ImportSpec)

			if oldImport != impspec {
				continue
			}

			// We found an import spec that imports path.
			// Delete it.
			copy(gen.Specs[j:], gen.Specs[j+1:])
			gen.Specs = gen.Specs[:len(gen.Specs)-1]

			// If this was the last import spec in this decl,
			// delete the decl, too.
			if len(gen.Specs) == 0 {
				copy(f.Decls[i:], f.Decls[i+1:])
				f.Decls = f.Decls[:len(f.Decls)-1]
			} else if len(gen.Specs) == 1 {
				gen.Lparen = token.NoPos // drop parens
			}

			break
		}
	}

	// Delete it from f.Imports.
	for i, imp := range f.Imports {
		if imp == oldImport {
			copy(f.Imports[i:], f.Imports[i+1:])
			f.Imports = f.Imports[:len(f.Imports)-1]
			break
		}
	}
}
예제 #24
0
// fileExports removes unexported declarations from src in place.
//
func (r *reader) fileExports(src *ast.File) {
	j := 0
	for _, d := range src.Decls {
		if r.filterDecl(d) {
			src.Decls[j] = d
			j++
		}
	}
	src.Decls = src.Decls[0:j]
}
예제 #25
0
func removeEmptyDeclGroups(f *ast.File) {
	i := 0
	for _, d := range f.Decls {
		if g, ok := d.(*ast.GenDecl); !ok || !isEmpty(f, g) {
			f.Decls[i] = d
			i++
		}
	}
	f.Decls = f.Decls[:i]
}
예제 #26
0
func (p *printer) file(src *ast.File) {
	p.setComment(src.Doc)
	p.print(src.Pos(), token.PACKAGE, blank)
	p.expr(src.Name, ignoreMultiLine)

	if len(src.Decls) > 0 {
		tok := token.ILLEGAL
		for _, d := range src.Decls {
			prev := tok
			tok = declToken(d)
			// if the declaration token changed (e.g., from CONST to TYPE)
			// print an empty line between top-level declarations
			min := 1
			if prev != tok {
				min = 2
			}
			p.linebreak(d.Pos().Line, min, ignore, false)
			p.decl(d, ignoreMultiLine)
		}
	}

	p.print(newline)
}
예제 #27
0
파일: rewrite.go 프로젝트: postfix/prewrite
// RewriteImportComments rewrites package import comments (https://golang.org/s/go14customimport)
func RewriteImportComments(f *ast.File, fset *token.FileSet, prefix string, remove bool) (changed bool, err error) {
	pkgpos := fset.Position(f.Package)
	// Print the AST.
	// ast.Print(fset, f)
	newcommentgroups := make([]*ast.CommentGroup, 0)
	for _, c := range f.Comments {
		commentpos := fset.Position(c.Pos())
		// keep the comment if we are not on the "package <X>" line
		// or the comment after the package statement does not look like import comment
		if commentpos.Line != pkgpos.Line ||
			!strings.HasPrefix(c.Text(), `import "`) {
			newcommentgroups = append(newcommentgroups, c)
			continue
		}
		parts := strings.Split(strings.Trim(c.Text(), "\n\r\t "), " ")
		oldimp, err := strconv.Unquote(parts[1])
		if err != nil {
			log.Fatalf("Error unquoting import value [%v] - %s\n", parts[1], err)
		}

		if remove {
			// the prefix is not there = nothing to remove, keep the comment
			if !strings.HasPrefix(oldimp, prefix) {
				newcommentgroups = append(newcommentgroups, c)
				continue
			}
		} else {
			// the prefix is already in the import path, keep the comment
			if strings.HasPrefix(oldimp, prefix) {
				newcommentgroups = append(newcommentgroups, c)
				continue
			}
		}
		newimp := ""
		if remove {
			newimp = oldimp[len(prefix):]
		} else {
			newimp = prefix + oldimp
		}
		changed = true
		c2 := ast.Comment{Slash: c.Pos(), Text: `// import ` + strconv.Quote(newimp)}
		cg := ast.CommentGroup{List: []*ast.Comment{&c2}}
		newcommentgroups = append(newcommentgroups, &cg)
	}
	// change the AST only if there are pending mods
	if changed {
		f.Comments = newcommentgroups
	}
	return changed, nil
}
예제 #28
0
파일: builder.go 프로젝트: dasnook/godog
func (b *builder) deleteMainFunc(f *ast.File) {
	var decls []ast.Decl
	for _, d := range f.Decls {
		fun, ok := d.(*ast.FuncDecl)
		if !ok {
			decls = append(decls, d)
			continue
		}
		if fun.Name.Name != "main" {
			decls = append(decls, fun)
		}
	}
	f.Decls = decls
}
예제 #29
0
파일: json2ast.go 프로젝트: hamfist/json2go
func Json2Ast(jsonReader io.Reader) (*ast.File, error) {
	var v map[string]interface{}
	var file *ast.File
	var err error

	dec := json.NewDecoder(jsonReader)

	file = &ast.File{
		Name: ast.NewIdent("main"),
	}

	if err := dec.Decode(&v); err != nil {
		return nil, err
	}

	var decls []ast.Decl
	if decls, err = convertMap("Top", v); err != nil {
		return nil, err
	}

	file.Decls = decls

	return file, nil
}
예제 #30
0
func AddMock4goImport(f *ast.File) {
	importSpec := &ast.ImportSpec{
		Name: makeIdent("mock4go"),
		Path: &ast.BasicLit{
			Kind:  token.STRING,
			Value: fmt.Sprintf("%#v", Mock4goImport),
		},
	}

	importDecl := &ast.GenDecl{
		Tok: token.IMPORT, Specs: []ast.Spec{importSpec},
	}

	f.Decls = append([]ast.Decl{importDecl}, f.Decls...)
}