Commit 2ce57ec1 authored by Robert Griesemer's avatar Robert Griesemer

gofmt-ify src/pkg/go (excluding printer directory due to pending CL,

and parser.go and scanner_test.go which have minor formatting issues)

R=rsc
http://go/go-review/1016042
parent 26c3f6c1
This diff is collapsed.
......@@ -11,8 +11,8 @@ package ast
// NOTE: WORK IN PROGRESS
//
type Scope struct {
Outer *Scope;
Names map[string]*Ident
Outer *Scope;
Names map[string]*Ident;
}
......@@ -38,7 +38,7 @@ func (s *Scope) Declare(ident *Ident) bool {
// Lookup looks up an identifier in the current scope chain.
// If the identifier is found, it is returned; otherwise the
// result is nil.
//
//
func (s *Scope) Lookup(name string) *Ident {
for ; s != nil; s = s.Outer {
if ident, found := s.Names[name]; found {
......@@ -91,4 +91,4 @@ var Universe = Scope {
"println": nil,
}
}
*/
*/
\ No newline at end of file
......@@ -31,8 +31,8 @@ func CommentText(comment *ast.CommentGroup) string {
// Remove comment markers.
// The parser has given us exactly the comment text.
switch n := len(c); {
case n >= 4 && c[0:2] == "/*" && c[n-2:n] == "*/":
c = c[2:n-2];
case n >= 4 && c[0:2] == "/*" && c[n-2 : n] == "*/":
c = c[2 : n-2];
case n >= 2 && c[0:2] == "//":
c = c[2:n];
// Remove leading space after //, if there is one.
......@@ -51,7 +51,7 @@ func CommentText(comment *ast.CommentGroup) string {
for m > 0 && (l[m-1] == ' ' || l[m-1] == '\n' || l[m-1] == '\t' || l[m-1] == '\r') {
m--;
}
l = l[0 : m];
l = l[0:m];
// Add to list.
n := len(lines);
......@@ -76,7 +76,7 @@ func CommentText(comment *ast.CommentGroup) string {
n++;
}
}
lines = lines[0 : n];
lines = lines[0:n];
// Add final "" entry to get trailing newline from Join.
// The original loop always leaves room for one more.
......@@ -115,7 +115,7 @@ func split(text []byte) [][]byte {
}
}
if last < len(text) {
out[n] = text[last : len(text)];
out[n] = text[last:len(text)];
}
return out;
......@@ -123,8 +123,8 @@ func split(text []byte) [][]byte {
var (
ldquo = strings.Bytes("&ldquo;");
rdquo = strings.Bytes("&rdquo;");
ldquo = strings.Bytes("&ldquo;");
rdquo = strings.Bytes("&rdquo;");
)
// Escape comment text for HTML.
......@@ -133,7 +133,7 @@ func commentEscape(w io.Writer, s []byte) {
last := 0;
for i := 0; i < len(s)-1; i++ {
if s[i] == s[i+1] && (s[i] == '`' || s[i] == '\'') {
template.HtmlEscape(w, s[last : i]);
template.HtmlEscape(w, s[last:i]);
last = i+2;
switch s[i] {
case '`':
......@@ -144,15 +144,15 @@ func commentEscape(w io.Writer, s []byte) {
i++; // loop will add one more
}
}
template.HtmlEscape(w, s[last : len(s)]);
template.HtmlEscape(w, s[last:len(s)]);
}
var (
html_p = strings.Bytes("<p>\n");
html_endp = strings.Bytes("</p>\n");
html_pre = strings.Bytes("<pre>");
html_endpre = strings.Bytes("</pre>\n");
html_p = strings.Bytes("<p>\n");
html_endp = strings.Bytes("</p>\n");
html_pre = strings.Bytes("<pre>");
html_endpre = strings.Bytes("</pre>\n");
)
......@@ -166,7 +166,7 @@ func indentLen(s []byte) int {
func isBlank(s []byte) bool {
return len(s) == 0 || (len(s) == 1 && s[0] == '\n')
return len(s) == 0 || (len(s) == 1 && s[0] == '\n');
}
......@@ -175,7 +175,7 @@ func commonPrefix(a, b []byte) []byte {
for i < len(a) && i < len(b) && a[i] == b[i] {
i++;
}
return a[0 : i];
return a[0:i];
}
......@@ -196,7 +196,7 @@ func unindent(block [][]byte) {
// remove
for i, line := range block {
if !isBlank(line) {
block[i] = line[n : len(line)];
block[i] = line[n:len(line)];
}
}
}
......@@ -233,7 +233,7 @@ func ToHtml(w io.Writer, s []byte) {
lines := split(s);
unindent(lines);
for i := 0; i < len(lines); {
for i := 0; i < len(lines); {
line := lines[i];
if isBlank(line) {
// close paragraph
......@@ -260,7 +260,7 @@ func ToHtml(w io.Writer, s []byte) {
for j > i && isBlank(lines[j-1]) {
j--;
}
block := lines[i : j];
block := lines[i:j];
i = j;
unindent(block);
......@@ -288,4 +288,3 @@ func ToHtml(w io.Writer, s []byte) {
inpara = false;
}
}
......@@ -245,8 +245,8 @@ func copyCommentList(list []*ast.Comment) []*ast.Comment {
var (
bug_markers = regexp.MustCompile("^/[/*][ \t]*BUG\\(.*\\):[ \t]*"); // BUG(uid):
bug_content = regexp.MustCompile("[^ \n\r\t]+"); // at least one non-whitespace char
bug_markers = regexp.MustCompile("^/[/*][ \t]*BUG\\(.*\\):[ \t]*"); // BUG(uid):
bug_content = regexp.MustCompile("[^ \n\r\t]+"); // at least one non-whitespace char
)
......
......@@ -7,14 +7,14 @@
package parser
import (
"bytes";
"fmt";
"go/ast";
"go/scanner";
"io";
"os";
pathutil "path";
"strings";
"bytes";
"fmt";
"go/ast";
"go/scanner";
"io";
"os";
pathutil "path";
"strings";
)
......
......@@ -250,7 +250,7 @@ func (S *Scanner) scanNumber(seen_decimal_point bool) token.Token {
tok = token.FLOAT;
goto mantissa;
}
// octal int
// octal int
}
goto exit;
}
......@@ -554,7 +554,7 @@ func Tokenize(filename string, src []byte, err ErrorHandler, mode uint, f func(p
var s Scanner;
s.Init(filename, src, err, mode);
for f(s.Scan()) {
// action happens in f
// action happens in f
}
return s.ErrorCount;
}
......@@ -20,27 +20,27 @@ type Token int
// The list of tokens.
const (
// Special tokens
ILLEGAL Token = iota;
ILLEGAL Token = iota;
EOF;
COMMENT;
literal_beg;
// Identifiers and basic type literals
// (these tokens stand for classes of literals)
IDENT; // main
INT; // 12345
FLOAT; // 123.45
CHAR; // 'a'
STRING; // "abc"
IDENT; // main
INT; // 12345
FLOAT; // 123.45
CHAR; // 'a'
STRING; // "abc"
literal_end;
operator_beg;
// Operators and delimiters
ADD; // +
SUB; // -
MUL; // *
QUO; // /
REM; // %
ADD; // +
SUB; // -
MUL; // *
QUO; // /
REM; // %
AND; // &
OR; // |
......@@ -62,17 +62,17 @@ const (
SHR_ASSIGN; // >>=
AND_NOT_ASSIGN; // &^=
LAND; // &&
LOR; // ||
ARROW; // <-
INC; // ++
DEC; // --
LAND; // &&
LOR; // ||
ARROW; // <-
INC; // ++
DEC; // --
EQL; // ==
LSS; // <
GTR; // >
ASSIGN; // =
NOT; // !
EQL; // ==
LSS; // <
GTR; // >
ASSIGN; // =
NOT; // !
NEQ; // !=
LEQ; // <=
......@@ -80,11 +80,11 @@ const (
DEFINE; // :=
ELLIPSIS; // ...
LPAREN; // (
LBRACK; // [
LBRACE; // {
COMMA; // ,
PERIOD; // .
LPAREN; // (
LBRACK; // [
LBRACE; // {
COMMA; // ,
PERIOD; // .
RPAREN; // )
RBRACK; // ]
......@@ -131,103 +131,103 @@ const (
// At the moment we have no array literal syntax that lets us describe
// the index for each element - use a map for now to make sure they are
// in sync.
var tokens = map [Token] string {
ILLEGAL : "ILLEGAL",
EOF : "EOF",
COMMENT : "COMMENT",
IDENT : "IDENT",
INT : "INT",
FLOAT : "FLOAT",
CHAR : "CHAR",
STRING : "STRING",
ADD : "+",
SUB : "-",
MUL : "*",
QUO : "/",
REM : "%",
AND : "&",
OR : "|",
XOR : "^",
SHL : "<<",
SHR : ">>",
AND_NOT : "&^",
ADD_ASSIGN : "+=",
SUB_ASSIGN : "-=",
MUL_ASSIGN : "*=",
QUO_ASSIGN : "/=",
REM_ASSIGN : "%=",
AND_ASSIGN : "&=",
OR_ASSIGN : "|=",
XOR_ASSIGN : "^=",
SHL_ASSIGN : "<<=",
SHR_ASSIGN : ">>=",
AND_NOT_ASSIGN : "&^=",
LAND : "&&",
LOR : "||",
ARROW : "<-",
INC : "++",
DEC : "--",
EQL : "==",
LSS : "<",
GTR : ">",
ASSIGN : "=",
NOT : "!",
NEQ : "!=",
LEQ : "<=",
GEQ : ">=",
DEFINE : ":=",
ELLIPSIS : "...",
LPAREN : "(",
LBRACK : "[",
LBRACE : "{",
COMMA : ",",
PERIOD : ".",
RPAREN : ")",
RBRACK : "]",
RBRACE : "}",
SEMICOLON : ";",
COLON : ":",
BREAK : "break",
CASE : "case",
CHAN : "chan",
CONST : "const",
CONTINUE : "continue",
DEFAULT : "default",
DEFER : "defer",
ELSE : "else",
FALLTHROUGH : "fallthrough",
FOR : "for",
FUNC : "func",
GO : "go",
GOTO : "goto",
IF : "if",
IMPORT : "import",
INTERFACE : "interface",
MAP : "map",
PACKAGE : "package",
RANGE : "range",
RETURN : "return",
SELECT : "select",
STRUCT : "struct",
SWITCH : "switch",
TYPE : "type",
VAR : "var",
var tokens = map[Token]string{
ILLEGAL: "ILLEGAL",
EOF: "EOF",
COMMENT: "COMMENT",
IDENT: "IDENT",
INT: "INT",
FLOAT: "FLOAT",
CHAR: "CHAR",
STRING: "STRING",
ADD: "+",
SUB: "-",
MUL: "*",
QUO: "/",
REM: "%",
AND: "&",
OR: "|",
XOR: "^",
SHL: "<<",
SHR: ">>",
AND_NOT: "&^",
ADD_ASSIGN: "+=",
SUB_ASSIGN: "-=",
MUL_ASSIGN: "*=",
QUO_ASSIGN: "/=",
REM_ASSIGN: "%=",
AND_ASSIGN: "&=",
OR_ASSIGN: "|=",
XOR_ASSIGN: "^=",
SHL_ASSIGN: "<<=",
SHR_ASSIGN: ">>=",
AND_NOT_ASSIGN: "&^=",
LAND: "&&",
LOR: "||",
ARROW: "<-",
INC: "++",
DEC: "--",
EQL: "==",
LSS: "<",
GTR: ">",
ASSIGN: "=",
NOT: "!",
NEQ: "!=",
LEQ: "<=",
GEQ: ">=",
DEFINE: ":=",
ELLIPSIS: "...",
LPAREN: "(",
LBRACK: "[",
LBRACE: "{",
COMMA: ",",
PERIOD: ".",
RPAREN: ")",
RBRACK: "]",
RBRACE: "}",
SEMICOLON: ";",
COLON: ":",
BREAK: "break",
CASE: "case",
CHAN: "chan",
CONST: "const",
CONTINUE: "continue",
DEFAULT: "default",
DEFER: "defer",
ELSE: "else",
FALLTHROUGH: "fallthrough",
FOR: "for",
FUNC: "func",
GO: "go",
GOTO: "goto",
IF: "if",
IMPORT: "import",
INTERFACE: "interface",
MAP: "map",
PACKAGE: "package",
RANGE: "range",
RETURN: "return",
SELECT: "select",
STRUCT: "struct",
SWITCH: "switch",
TYPE: "type",
VAR: "var",
}
......@@ -252,9 +252,9 @@ func (tok Token) String() string {
// selector, indexing, and other operator and delimiter tokens.
//
const (
LowestPrec = 0; // non-operators
UnaryPrec = 7;
HighestPrec = 8;
LowestPrec = 0; // non-operators
UnaryPrec = 7;
HighestPrec = 8;
)
......@@ -281,10 +281,10 @@ func (op Token) Precedence() int {
}
var keywords map [string] Token;
var keywords map[string]Token
func init() {
keywords = make(map [string] Token);
keywords = make(map[string]Token);
for i := keyword_beg + 1; i < keyword_end; i++ {
keywords[tokens[i]] = i;
}
......@@ -331,10 +331,10 @@ func (tok Token) IsKeyword() bool {
// A Position is valid if the line number is > 0.
//
type Position struct {
Filename string; // filename, if any
Offset int; // byte offset, starting at 0
Line int; // line number, starting at 1
Column int; // column number, starting at 1 (character count)
Filename string; // filename, if any
Offset int; // byte offset, starting at 0
Line int; // line number, starting at 1
Column int; // column number, starting at 1 (character count)
}
......@@ -348,7 +348,7 @@ func (pos *Position) Pos() Position {
// IsValid returns true if the position is valid.
func (pos *Position) IsValid() bool {
return pos.Line > 0
return pos.Line > 0;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment