Commit 4081aa12 authored by Andrew Gerrand's avatar Andrew Gerrand

remove non-go1 commands and packages

parent 3895b505
# Copyright 2012 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
include ../../Make.dist
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Cov is a rudimentary code coverage tool.
Usage:
go tool cov [-lsv] [-g substring] [-m minlines] [6.out args]
Given a command to run, it runs the command while tracking which
sections of code have been executed. When the command finishes,
cov prints the line numbers of sections of code in the binary that
were not executed. With no arguments it assumes the command "6.out".
The options are:
-l
print full path names instead of paths relative to the current directory
-s
show the source code that didn't execute, in addition to the line numbers.
-v
print debugging information during the run.
-g substring
restrict the coverage analysis to functions or files whose names contain substring
-m minlines
only report uncovered sections of code larger than minlines lines
The program is the same for all architectures: 386, amd64, and arm.
*/
package documentation
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
* code coverage
*/
#include <u.h>
#include <libc.h>
#include <bio.h>
#include "tree.h"
#include <ureg_amd64.h>
#include <mach.h>
typedef struct Ureg Ureg;
void
usage(void)
{
fprint(2, "usage: cov [-lsv] [-g substring] [-m minlines] [6.out args...]\n");
fprint(2, "-g specifies pattern of interesting functions or files\n");
exits("usage");
}
typedef struct Range Range;
struct Range
{
uvlong pc;
uvlong epc;
};
int chatty;
int fd;
int longnames;
int pid;
int doshowsrc;
Map *mem;
Map *text;
Fhdr fhdr;
char *substring;
char cwd[1000];
int ncwd;
int minlines = -1000;
Tree breakpoints; // code ranges not run
/*
* comparison for Range structures
* they are "equal" if they overlap, so
* that a search for [pc, pc+1) finds the
* Range containing pc.
*/
int
rangecmp(void *va, void *vb)
{
Range *a = va, *b = vb;
if(a->epc <= b->pc)
return 1;
if(b->epc <= a->pc)
return -1;
return 0;
}
/*
* remember that we ran the section of code [pc, epc).
*/
void
ran(uvlong pc, uvlong epc)
{
Range key;
Range *r;
uvlong oldepc;
if(chatty)
print("run %#llux-%#llux\n", pc, epc);
key.pc = pc;
key.epc = pc+1;
r = treeget(&breakpoints, &key);
if(r == nil)
sysfatal("unchecked breakpoint at %#llux+%d", pc, (int)(epc-pc));
// Might be that the tail of the sequence
// was run already, so r->epc is before the end.
// Adjust len.
if(epc > r->epc)
epc = r->epc;
if(r->pc == pc) {
r->pc = epc;
} else {
// Chop r to before pc;
// add new entry for after if needed.
// Changing r->epc does not affect r's position in the tree.
oldepc = r->epc;
r->epc = pc;
if(epc < oldepc) {
Range *n;
n = malloc(sizeof *n);
n->pc = epc;
n->epc = oldepc;
treeput(&breakpoints, n, n);
}
}
}
void
showsrc(char *file, int line1, int line2)
{
Biobuf *b;
char *p;
int n, stop;
if((b = Bopen(file, OREAD)) == nil) {
print("\topen %s: %r\n", file);
return;
}
for(n=1; n<line1 && (p = Brdstr(b, '\n', 1)) != nil; n++)
free(p);
// print up to five lines (this one and 4 more).
// if there are more than five lines, print 4 and "..."
stop = n+4;
if(stop > line2)
stop = line2;
if(stop < line2)
stop--;
for(; n<=stop && (p = Brdstr(b, '\n', 1)) != nil; n++) {
print(" %d %s\n", n, p);
free(p);
}
if(n < line2)
print(" ...\n");
Bterm(b);
}
/*
* if s is in the current directory or below,
* return the relative path.
*/
char*
shortname(char *s)
{
if(!longnames && strlen(s) > ncwd && memcmp(s, cwd, ncwd) == 0 && s[ncwd] == '/')
return s+ncwd+1;
return s;
}
/*
* we've decided that [pc, epc) did not run.
* do something about it.
*/
void
missing(uvlong pc, uvlong epc)
{
char file[1000];
int line1, line2;
char buf[100];
Symbol s;
char *p;
uvlong uv;
if(!findsym(pc, CTEXT, &s) || !fileline(file, sizeof file, pc)) {
notfound:
print("%#llux-%#llux\n", pc, epc);
return;
}
p = strrchr(file, ':');
*p++ = 0;
line1 = atoi(p);
for(uv=pc; uv<epc; ) {
if(!fileline(file, sizeof file, epc-2))
goto notfound;
uv += machdata->instsize(text, uv);
}
p = strrchr(file, ':');
*p++ = 0;
line2 = atoi(p);
if(line2+1-line2 < minlines)
return;
if(pc == s.value) {
// never entered function
print("%s:%d %s never called (%#llux-%#llux)\n", shortname(file), line1, s.name, pc, epc);
return;
}
if(pc <= s.value+13) {
// probably stub for stack growth.
// check whether last instruction is call to morestack.
// the -5 below is the length of
// CALL sys.morestack.
buf[0] = 0;
machdata->das(text, epc-5, 0, buf, sizeof buf);
if(strstr(buf, "morestack"))
return;
}
if(epc - pc == 5) {
// check for CALL sys.panicindex
buf[0] = 0;
machdata->das(text, pc, 0, buf, sizeof buf);
if(strstr(buf, "panicindex"))
return;
}
if(epc - pc == 2 || epc -pc == 3) {
// check for XORL inside shift.
// (on x86 have to implement large left or unsigned right shift with explicit zeroing).
// f+90 0x00002c9f CMPL CX,$20
// f+93 0x00002ca2 JCS f+97(SB)
// f+95 0x00002ca4 XORL AX,AX <<<
// f+97 0x00002ca6 SHLL CL,AX
// f+99 0x00002ca8 MOVL $1,CX
//
// f+c8 0x00002cd7 CMPL CX,$40
// f+cb 0x00002cda JCS f+d0(SB)
// f+cd 0x00002cdc XORQ AX,AX <<<
// f+d0 0x00002cdf SHLQ CL,AX
// f+d3 0x00002ce2 MOVQ $1,CX
buf[0] = 0;
machdata->das(text, pc, 0, buf, sizeof buf);
if(strncmp(buf, "XOR", 3) == 0) {
machdata->das(text, epc, 0, buf, sizeof buf);
if(strncmp(buf, "SHL", 3) == 0 || strncmp(buf, "SHR", 3) == 0)
return;
}
}
if(epc - pc == 3) {
// check for SAR inside shift.
// (on x86 have to implement large signed right shift as >>31).
// f+36 0x00016216 CMPL CX,$20
// f+39 0x00016219 JCS f+3e(SB)
// f+3b 0x0001621b SARL $1f,AX <<<
// f+3e 0x0001621e SARL CL,AX
// f+40 0x00016220 XORL CX,CX
// f+42 0x00016222 CMPL CX,AX
buf[0] = 0;
machdata->das(text, pc, 0, buf, sizeof buf);
if(strncmp(buf, "SAR", 3) == 0) {
machdata->das(text, epc, 0, buf, sizeof buf);
if(strncmp(buf, "SAR", 3) == 0)
return;
}
}
// show first instruction to make clear where we were.
machdata->das(text, pc, 0, buf, sizeof buf);
if(line1 != line2)
print("%s:%d,%d %#llux-%#llux %s\n",
shortname(file), line1, line2, pc, epc, buf);
else
print("%s:%d %#llux-%#llux %s\n",
shortname(file), line1, pc, epc, buf);
if(doshowsrc)
showsrc(file, line1, line2);
}
/*
* walk the tree, calling missing for each non-empty
* section of missing code.
*/
void
walktree(TreeNode *t)
{
Range *n;
if(t == nil)
return;
walktree(t->left);
n = t->key;
if(n->pc < n->epc)
missing(n->pc, n->epc);
walktree(t->right);
}
/*
* set a breakpoint all over [pc, epc)
* and remember that we did.
*/
void
breakpoint(uvlong pc, uvlong epc)
{
Range *r;
r = malloc(sizeof *r);
r->pc = pc;
r->epc = epc;
treeput(&breakpoints, r, r);
for(; pc < epc; pc+=machdata->bpsize)
put1(mem, pc, machdata->bpinst, machdata->bpsize);
}
/*
* install breakpoints over all text symbols
* that match the pattern.
*/
void
cover(void)
{
Symbol s;
char *lastfn;
uvlong lastpc;
int i;
char buf[200];
lastfn = nil;
lastpc = 0;
for(i=0; textsym(&s, i); i++) {
switch(s.type) {
case 'T':
case 't':
if(lastpc != 0) {
breakpoint(lastpc, s.value);
lastpc = 0;
}
// Ignore second entry for a given name;
// that's the debugging blob.
if(lastfn && strcmp(s.name, lastfn) == 0)
break;
lastfn = s.name;
buf[0] = 0;
fileline(buf, sizeof buf, s.value);
if(substring == nil || strstr(buf, substring) || strstr(s.name, substring))
lastpc = s.value;
}
}
}
uvlong
rgetzero(Map *map, char *reg)
{
USED(map);
USED(reg);
return 0;
}
/*
* remove the breakpoints at pc and successive instructions,
* up to and including the first jump or other control flow transfer.
*/
void
uncover(uvlong pc)
{
uchar buf[1000];
int n, n1, n2;
uvlong foll[2];
// Double-check that we stopped at a breakpoint.
if(get1(mem, pc, buf, machdata->bpsize) < 0)
sysfatal("read mem inst at %#llux: %r", pc);
if(memcmp(buf, machdata->bpinst, machdata->bpsize) != 0)
sysfatal("stopped at %#llux; not at breakpoint %d", pc, machdata->bpsize);
// Figure out how many bytes of straight-line code
// there are in the text starting at pc.
n = 0;
while(n < sizeof buf) {
n1 = machdata->instsize(text, pc+n);
if(n+n1 > sizeof buf)
break;
n2 = machdata->foll(text, pc+n, rgetzero, foll);
n += n1;
if(n2 != 1 || foll[0] != pc+n)
break;
}
// Record that this section of code ran.
ran(pc, pc+n);
// Put original instructions back.
if(get1(text, pc, buf, n) < 0)
sysfatal("get1: %r");
if(put1(mem, pc, buf, n) < 0)
sysfatal("put1: %r");
}
int
startprocess(char **argv)
{
int pid;
if((pid = fork()) < 0)
sysfatal("fork: %r");
if(pid == 0) {
pid = getpid();
if(ctlproc(pid, "hang") < 0)
sysfatal("ctlproc hang: %r");
exec(argv[0], argv);
sysfatal("exec %s: %r", argv[0]);
}
if(ctlproc(pid, "attached") < 0 || ctlproc(pid, "waitstop") < 0)
sysfatal("attach %d %s: %r", pid, argv[0]);
return pid;
}
int
go(void)
{
uvlong pc;
char buf[100];
int n;
for(n = 0;; n++) {
ctlproc(pid, "startstop");
if(get8(mem, offsetof(Ureg, ip), &pc) < 0) {
rerrstr(buf, sizeof buf);
if(strstr(buf, "exited") || strstr(buf, "No such process"))
return n;
sysfatal("cannot read pc: %r");
}
pc--;
if(put8(mem, offsetof(Ureg, ip), pc) < 0)
sysfatal("cannot write pc: %r");
uncover(pc);
}
}
void
main(int argc, char **argv)
{
int n;
ARGBEGIN{
case 'g':
substring = EARGF(usage());
break;
case 'l':
longnames++;
break;
case 'n':
minlines = atoi(EARGF(usage()));
break;
case 's':
doshowsrc = 1;
break;
case 'v':
chatty++;
break;
default:
usage();
}ARGEND
getwd(cwd, sizeof cwd);
ncwd = strlen(cwd);
if(argc == 0) {
*--argv = "6.out";
}
fd = open(argv[0], OREAD);
if(fd < 0)
sysfatal("open %s: %r", argv[0]);
if(crackhdr(fd, &fhdr) <= 0)
sysfatal("crackhdr: %r");
machbytype(fhdr.type);
if(syminit(fd, &fhdr) <= 0)
sysfatal("syminit: %r");
text = loadmap(nil, fd, &fhdr);
if(text == nil)
sysfatal("loadmap: %r");
pid = startprocess(argv);
mem = attachproc(pid, &fhdr);
if(mem == nil)
sysfatal("attachproc: %r");
breakpoints.cmp = rangecmp;
cover();
n = go();
walktree(breakpoints.root);
if(chatty)
print("%d breakpoints\n", n);
detachproc(mem);
exits(0);
}
// Renamed from Map to Tree to avoid conflict with libmach.
/*
Copyright (c) 2003-2007 Russ Cox, Tom Bergan, Austin Clements,
Massachusetts Institute of Technology
Portions Copyright (c) 2009 The Go Authors. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
// Mutable map structure, but still based on
// Okasaki, Red Black Trees in a Functional Setting, JFP 1999,
// which is a lot easier than the traditional red-black
// and plenty fast enough for me. (Also I could copy
// and edit fmap.c.)
#include <u.h>
#include <libc.h>
#include "tree.h"
enum
{
Red = 0,
Black = 1
};
// Red-black trees are binary trees with this property:
// 1. No red node has a red parent.
// 2. Every path from the root to a leaf contains the
// same number of black nodes.
static TreeNode*
rwTreeNode(TreeNode *p, int color, TreeNode *left, void *key, void *value, TreeNode *right)
{
if(p == nil)
p = malloc(sizeof *p);
p->color = color;
p->left = left;
p->key = key;
p->value = value;
p->right = right;
return p;
}
static TreeNode*
balance(TreeNode *m0)
{
void *xk, *xv, *yk, *yv, *zk, *zv;
TreeNode *a, *b, *c, *d;
TreeNode *m1, *m2;
int color;
TreeNode *left, *right;
void *key, *value;
color = m0->color;
left = m0->left;
key = m0->key;
value = m0->value;
right = m0->right;
// Okasaki notation: (T is mkTreeNode, B is Black, R is Red, x, y, z are key-value.
//
// balance B (T R (T R a x b) y c) z d
// balance B (T R a x (T R b y c)) z d
// balance B a x (T R (T R b y c) z d)
// balance B a x (T R b y (T R c z d))
//
// = T R (T B a x b) y (T B c z d)
if(color == Black){
if(left && left->color == Red){
if(left->left && left->left->color == Red){
a = left->left->left;
xk = left->left->key;
xv = left->left->value;
b = left->left->right;
yk = left->key;
yv = left->value;
c = left->right;
zk = key;
zv = value;
d = right;
m1 = left;
m2 = left->left;
goto hard;
}else if(left->right && left->right->color == Red){
a = left->left;
xk = left->key;
xv = left->value;
b = left->right->left;
yk = left->right->key;
yv = left->right->value;
c = left->right->right;
zk = key;
zv = value;
d = right;
m1 = left;
m2 = left->right;
goto hard;
}
}else if(right && right->color == Red){
if(right->left && right->left->color == Red){
a = left;
xk = key;
xv = value;
b = right->left->left;
yk = right->left->key;
yv = right->left->value;
c = right->left->right;
zk = right->key;
zv = right->value;
d = right->right;
m1 = right;
m2 = right->left;
goto hard;
}else if(right->right && right->right->color == Red){
a = left;
xk = key;
xv = value;
b = right->left;
yk = right->key;
yv = right->value;
c = right->right->left;
zk = right->right->key;
zv = right->right->value;
d = right->right->right;
m1 = right;
m2 = right->right;
goto hard;
}
}
}
return rwTreeNode(m0, color, left, key, value, right);
hard:
return rwTreeNode(m0, Red, rwTreeNode(m1, Black, a, xk, xv, b),
yk, yv, rwTreeNode(m2, Black, c, zk, zv, d));
}
static TreeNode*
ins0(TreeNode *p, void *k, void *v, TreeNode *rw)
{
if(p == nil)
return rwTreeNode(rw, Red, nil, k, v, nil);
if(p->key == k){
if(rw)
return rwTreeNode(rw, p->color, p->left, k, v, p->right);
p->value = v;
return p;
}
if(p->key < k)
p->left = ins0(p->left, k, v, rw);
else
p->right = ins0(p->right, k, v, rw);
return balance(p);
}
static TreeNode*
ins1(Tree *m, TreeNode *p, void *k, void *v, TreeNode *rw)
{
int i;
if(p == nil)
return rwTreeNode(rw, Red, nil, k, v, nil);
i = m->cmp(p->key, k);
if(i == 0){
if(rw)
return rwTreeNode(rw, p->color, p->left, k, v, p->right);
p->value = v;
return p;
}
if(i < 0)
p->left = ins1(m, p->left, k, v, rw);
else
p->right = ins1(m, p->right, k, v, rw);
return balance(p);
}
void
treeputelem(Tree *m, void *key, void *val, TreeNode *rw)
{
if(m->cmp)
m->root = ins1(m, m->root, key, val, rw);
else
m->root = ins0(m->root, key, val, rw);
}
void
treeput(Tree *m, void *key, void *val)
{
treeputelem(m, key, val, nil);
}
void*
treeget(Tree *m, void *key)
{
int i;
TreeNode *p;
p = m->root;
if(m->cmp){
for(;;){
if(p == nil)
return nil;
i = m->cmp(p->key, key);
if(i < 0)
p = p->left;
else if(i > 0)
p = p->right;
else
return p->value;
}
}else{
for(;;){
if(p == nil)
return nil;
if(p->key == key)
return p->value;
if(p->key < key)
p = p->left;
else
p = p->right;
}
}
}
// Renamed from Map to Tree to avoid conflict with libmach.
/*
Copyright (c) 2003-2007 Russ Cox, Tom Bergan, Austin Clements,
Massachusetts Institute of Technology
Portions Copyright (c) 2009 The Go Authors. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
typedef struct Tree Tree;
typedef struct TreeNode TreeNode;
struct Tree
{
int (*cmp)(void*, void*);
TreeNode *root;
};
struct TreeNode
{
int color;
TreeNode *left;
void *key;
void *value;
TreeNode *right;
};
void *treeget(Tree*, void*);
void treeput(Tree*, void*, void*);
void treeputelem(Tree*, void*, void*, TreeNode*);
# Copyright 2012 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
include ../../Make.dist
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Prof is a rudimentary real-time profiler.
Given a command to run or the process id (pid) of a command already
running, it samples the program's state at regular intervals and reports
on its behavior. With no options, it prints a histogram of the locations
in the code that were sampled during execution.
Since it is a real-time profiler, unlike a traditional profiler it samples
the program's state even when it is not running, such as when it is
asleep or waiting for I/O. Each thread contributes equally to the
statistics.
Usage:
go tool prof -p pid [-t total_secs] [-d delta_msec] [6.out args ...]
The output modes (default -h) are:
-P file.prof:
Write the profile information to file.prof, in the format used by pprof.
At the moment, this only works on Linux amd64 binaries and requires that the
binary be written using 6l -e to produce ELF debug info.
See http://code.google.com/p/google-perftools for details.
-h: histograms
How many times a sample occurred at each location.
-f: dynamic functions
At each sample period, print the name of the executing function.
-l: dynamic file and line numbers
At each sample period, print the file and line number of the executing instruction.
-r: dynamic registers
At each sample period, print the register contents.
-s: dynamic function stack traces
At each sample period, print the symbolic stack trace.
Flag -t sets the maximum real time to sample, in seconds, and -d
sets the sampling interval in milliseconds. The default is to sample
every 100ms until the program completes.
It is installed as go tool prof and is architecture-independent.
*/
package documentation
This diff is collapsed.
This directory tree contains experimental packages and
unfinished code that is subject to even more change than the
rest of the Go tree.
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package ebnf is a library for EBNF grammars. The input is text ([]byte)
// satisfying the following grammar (represented itself in EBNF):
//
// Production = name "=" [ Expression ] "." .
// Expression = Alternative { "|" Alternative } .
// Alternative = Term { Term } .
// Term = name | token [ "…" token ] | Group | Option | Repetition .
// Group = "(" Expression ")" .
// Option = "[" Expression "]" .
// Repetition = "{" Expression "}" .
//
// A name is a Go identifier, a token is a Go string, and comments
// and white space follow the same rules as for the Go language.
// Production names starting with an uppercase Unicode letter denote
// non-terminal productions (i.e., productions which allow white-space
// and comments between tokens); all other production names denote
// lexical productions.
//
package ebnf
import (
"errors"
"fmt"
"text/scanner"
"unicode"
"unicode/utf8"
)
// ----------------------------------------------------------------------------
// Error handling
type errorList []error
func (list errorList) Err() error {
if len(list) == 0 {
return nil
}
return list
}
func (list errorList) Error() string {
switch len(list) {
case 0:
return "no errors"
case 1:
return list[0].Error()
}
return fmt.Sprintf("%s (and %d more errors)", list[0], len(list)-1)
}
func newError(pos scanner.Position, msg string) error {
return errors.New(fmt.Sprintf("%s: %s", pos, msg))
}
// ----------------------------------------------------------------------------
// Internal representation
type (
// An Expression node represents a production expression.
Expression interface {
// Pos is the position of the first character of the syntactic construct
Pos() scanner.Position
}
// An Alternative node represents a non-empty list of alternative expressions.
Alternative []Expression // x | y | z
// A Sequence node represents a non-empty list of sequential expressions.
Sequence []Expression // x y z
// A Name node represents a production name.
Name struct {
StringPos scanner.Position
String string
}
// A Token node represents a literal.
Token struct {
StringPos scanner.Position
String string
}
// A List node represents a range of characters.
Range struct {
Begin, End *Token // begin ... end
}
// A Group node represents a grouped expression.
Group struct {
Lparen scanner.Position
Body Expression // (body)
}
// An Option node represents an optional expression.
Option struct {
Lbrack scanner.Position
Body Expression // [body]
}
// A Repetition node represents a repeated expression.
Repetition struct {
Lbrace scanner.Position
Body Expression // {body}
}
// A Production node represents an EBNF production.
Production struct {
Name *Name
Expr Expression
}
// A Bad node stands for pieces of source code that lead to a parse error.
Bad struct {
TokPos scanner.Position
Error string // parser error message
}
// A Grammar is a set of EBNF productions. The map
// is indexed by production name.
//
Grammar map[string]*Production
)
func (x Alternative) Pos() scanner.Position { return x[0].Pos() } // the parser always generates non-empty Alternative
func (x Sequence) Pos() scanner.Position { return x[0].Pos() } // the parser always generates non-empty Sequences
func (x *Name) Pos() scanner.Position { return x.StringPos }
func (x *Token) Pos() scanner.Position { return x.StringPos }
func (x *Range) Pos() scanner.Position { return x.Begin.Pos() }
func (x *Group) Pos() scanner.Position { return x.Lparen }
func (x *Option) Pos() scanner.Position { return x.Lbrack }
func (x *Repetition) Pos() scanner.Position { return x.Lbrace }
func (x *Production) Pos() scanner.Position { return x.Name.Pos() }
func (x *Bad) Pos() scanner.Position { return x.TokPos }
// ----------------------------------------------------------------------------
// Grammar verification
func isLexical(name string) bool {
ch, _ := utf8.DecodeRuneInString(name)
return !unicode.IsUpper(ch)
}
type verifier struct {
errors errorList
worklist []*Production
reached Grammar // set of productions reached from (and including) the root production
grammar Grammar
}
func (v *verifier) error(pos scanner.Position, msg string) {
v.errors = append(v.errors, newError(pos, msg))
}
func (v *verifier) push(prod *Production) {
name := prod.Name.String
if _, found := v.reached[name]; !found {
v.worklist = append(v.worklist, prod)
v.reached[name] = prod
}
}
func (v *verifier) verifyChar(x *Token) rune {
s := x.String
if utf8.RuneCountInString(s) != 1 {
v.error(x.Pos(), "single char expected, found "+s)
return 0
}
ch, _ := utf8.DecodeRuneInString(s)
return ch
}
func (v *verifier) verifyExpr(expr Expression, lexical bool) {
switch x := expr.(type) {
case nil:
// empty expression
case Alternative:
for _, e := range x {
v.verifyExpr(e, lexical)
}
case Sequence:
for _, e := range x {
v.verifyExpr(e, lexical)
}
case *Name:
// a production with this name must exist;
// add it to the worklist if not yet processed
if prod, found := v.grammar[x.String]; found {
v.push(prod)
} else {
v.error(x.Pos(), "missing production "+x.String)
}
// within a lexical production references
// to non-lexical productions are invalid
if lexical && !isLexical(x.String) {
v.error(x.Pos(), "reference to non-lexical production "+x.String)
}
case *Token:
// nothing to do for now
case *Range:
i := v.verifyChar(x.Begin)
j := v.verifyChar(x.End)
if i >= j {
v.error(x.Pos(), "decreasing character range")
}
case *Group:
v.verifyExpr(x.Body, lexical)
case *Option:
v.verifyExpr(x.Body, lexical)
case *Repetition:
v.verifyExpr(x.Body, lexical)
case *Bad:
v.error(x.Pos(), x.Error)
default:
panic(fmt.Sprintf("internal error: unexpected type %T", expr))
}
}
func (v *verifier) verify(grammar Grammar, start string) {
// find root production
root, found := grammar[start]
if !found {
var noPos scanner.Position
v.error(noPos, "no start production "+start)
return
}
// initialize verifier
v.worklist = v.worklist[0:0]
v.reached = make(Grammar)
v.grammar = grammar
// work through the worklist
v.push(root)
for {
n := len(v.worklist) - 1
if n < 0 {
break
}
prod := v.worklist[n]
v.worklist = v.worklist[0:n]
v.verifyExpr(prod.Expr, isLexical(prod.Name.String))
}
// check if all productions were reached
if len(v.reached) < len(v.grammar) {
for name, prod := range v.grammar {
if _, found := v.reached[name]; !found {
v.error(prod.Pos(), name+" is unreachable")
}
}
}
}
// Verify checks that:
// - all productions used are defined
// - all productions defined are used when beginning at start
// - lexical productions refer only to other lexical productions
//
// Position information is interpreted relative to the file set fset.
//
func Verify(grammar Grammar, start string) error {
var v verifier
v.verify(grammar, start)
return v.errors.Err()
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ebnf
import (
"bytes"
"testing"
)
var goodGrammars = []string{
`Program = .`,
`Program = foo .
foo = "foo" .`,
`Program = "a" | "b" "c" .`,
`Program = "a" … "z" .`,
`Program = Song .
Song = { Note } .
Note = Do | (Re | Mi | Fa | So | La) | Ti .
Do = "c" .
Re = "d" .
Mi = "e" .
Fa = "f" .
So = "g" .
La = "a" .
Ti = ti .
ti = "b" .`,
}
var badGrammars = []string{
`Program = | .`,
`Program = | b .`,
`Program = a … b .`,
`Program = "a" … .`,
`Program = … "b" .`,
`Program = () .`,
`Program = [] .`,
`Program = {} .`,
}
func checkGood(t *testing.T, src string) {
grammar, err := Parse("", bytes.NewBuffer([]byte(src)))
if err != nil {
t.Errorf("Parse(%s) failed: %v", src, err)
return
}
if err = Verify(grammar, "Program"); err != nil {
t.Errorf("Verify(%s) failed: %v", src, err)
}
}
func checkBad(t *testing.T, src string) {
_, err := Parse("", bytes.NewBuffer([]byte(src)))
if err == nil {
t.Errorf("Parse(%s) should have failed", src)
}
}
func TestGrammars(t *testing.T) {
for _, src := range goodGrammars {
checkGood(t, src)
}
for _, src := range badGrammars {
checkBad(t, src)
}
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ebnf
import (
"io"
"strconv"
"text/scanner"
)
type parser struct {
errors errorList
scanner scanner.Scanner
pos scanner.Position // token position
tok rune // one token look-ahead
lit string // token literal
}
func (p *parser) next() {
p.tok = p.scanner.Scan()
p.pos = p.scanner.Position
p.lit = p.scanner.TokenText()
}
func (p *parser) error(pos scanner.Position, msg string) {
p.errors = append(p.errors, newError(pos, msg))
}
func (p *parser) errorExpected(pos scanner.Position, msg string) {
msg = `expected "` + msg + `"`
if pos.Offset == p.pos.Offset {
// the error happened at the current position;
// make the error message more specific
msg += ", found " + scanner.TokenString(p.tok)
if p.tok < 0 {
msg += " " + p.lit
}
}
p.error(pos, msg)
}
func (p *parser) expect(tok rune) scanner.Position {
pos := p.pos
if p.tok != tok {
p.errorExpected(pos, scanner.TokenString(tok))
}
p.next() // make progress in any case
return pos
}
func (p *parser) parseIdentifier() *Name {
pos := p.pos
name := p.lit
p.expect(scanner.Ident)
return &Name{pos, name}
}
func (p *parser) parseToken() *Token {
pos := p.pos
value := ""
if p.tok == scanner.String {
value, _ = strconv.Unquote(p.lit)
// Unquote may fail with an error, but only if the scanner found
// an illegal string in the first place. In this case the error
// has already been reported.
p.next()
} else {
p.expect(scanner.String)
}
return &Token{pos, value}
}
// ParseTerm returns nil if no term was found.
func (p *parser) parseTerm() (x Expression) {
pos := p.pos
switch p.tok {
case scanner.Ident:
x = p.parseIdentifier()
case scanner.String:
tok := p.parseToken()
x = tok
const ellipsis = '…' // U+2026, the horizontal ellipsis character
if p.tok == ellipsis {
p.next()
x = &Range{tok, p.parseToken()}
}
case '(':
p.next()
x = &Group{pos, p.parseExpression()}
p.expect(')')
case '[':
p.next()
x = &Option{pos, p.parseExpression()}
p.expect(']')
case '{':
p.next()
x = &Repetition{pos, p.parseExpression()}
p.expect('}')
}
return x
}
func (p *parser) parseSequence() Expression {
var list Sequence
for x := p.parseTerm(); x != nil; x = p.parseTerm() {
list = append(list, x)
}
// no need for a sequence if list.Len() < 2
switch len(list) {
case 0:
p.errorExpected(p.pos, "term")
return &Bad{p.pos, "term expected"}
case 1:
return list[0]
}
return list
}
func (p *parser) parseExpression() Expression {
var list Alternative
for {
list = append(list, p.parseSequence())
if p.tok != '|' {
break
}
p.next()
}
// len(list) > 0
// no need for an Alternative node if list.Len() < 2
if len(list) == 1 {
return list[0]
}
return list
}
func (p *parser) parseProduction() *Production {
name := p.parseIdentifier()
p.expect('=')
var expr Expression
if p.tok != '.' {
expr = p.parseExpression()
}
p.expect('.')
return &Production{name, expr}
}
func (p *parser) parse(filename string, src io.Reader) Grammar {
p.scanner.Init(src)
p.scanner.Filename = filename
p.next() // initializes pos, tok, lit
grammar := make(Grammar)
for p.tok != scanner.EOF {
prod := p.parseProduction()
name := prod.Name.String
if _, found := grammar[name]; !found {
grammar[name] = prod
} else {
p.error(prod.Pos(), name+" declared already")
}
}
return grammar
}
// Parse parses a set of EBNF productions from source src.
// It returns a set of productions. Errors are reported
// for incorrect syntax and if a production is declared
// more than once; the filename is used only for error
// positions.
//
func Parse(filename string, src io.Reader) (Grammar, error) {
var p parser
grammar := p.parse(filename, src)
return grammar, p.errors.Err()
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Ebnflint verifies that EBNF productions are consistent and grammatically correct.
It reads them from an HTML document such as the Go specification.
Grammar productions are grouped in boxes demarcated by the HTML elements
<pre class="ebnf">
</pre>
Usage:
go tool ebnflint [--start production] [file]
The --start flag specifies the name of the start production for
the grammar; it defaults to "Start".
*/
package documentation
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"bytes"
"exp/ebnf"
"flag"
"fmt"
"go/scanner"
"go/token"
"io"
"io/ioutil"
"os"
"path/filepath"
)
var fset = token.NewFileSet()
var start = flag.String("start", "Start", "name of start production")
func usage() {
fmt.Fprintf(os.Stderr, "usage: go tool ebnflint [flags] [filename]\n")
flag.PrintDefaults()
os.Exit(1)
}
// Markers around EBNF sections in .html files
var (
open = []byte(`<pre class="ebnf">`)
close = []byte(`</pre>`)
)
func report(err error) {
scanner.PrintError(os.Stderr, err)
os.Exit(1)
}
func extractEBNF(src []byte) []byte {
var buf bytes.Buffer
for {
// i = beginning of EBNF text
i := bytes.Index(src, open)
if i < 0 {
break // no EBNF found - we are done
}
i += len(open)
// write as many newlines as found in the excluded text
// to maintain correct line numbers in error messages
for _, ch := range src[0:i] {
if ch == '\n' {
buf.WriteByte('\n')
}
}
// j = end of EBNF text (or end of source)
j := bytes.Index(src[i:], close) // close marker
if j < 0 {
j = len(src) - i
}
j += i
// copy EBNF text
buf.Write(src[i:j])
// advance
src = src[j:]
}
return buf.Bytes()
}
func main() {
flag.Parse()
var (
name string
r io.Reader
)
switch flag.NArg() {
case 0:
name, r = "<stdin>", os.Stdin
case 1:
name = flag.Arg(0)
default:
usage()
}
if err := verify(name, *start, r); err != nil {
report(err)
}
}
func verify(name, start string, r io.Reader) error {
if r == nil {
f, err := os.Open(name)
if err != nil {
return err
}
defer f.Close()
r = f
}
src, err := ioutil.ReadAll(r)
if err != nil {
return err
}
if filepath.Ext(name) == ".html" || bytes.Index(src, open) >= 0 {
src = extractEBNF(src)
}
grammar, err := ebnf.Parse(name, bytes.NewBuffer(src))
if err != nil {
return err
}
return ebnf.Verify(grammar, start)
}
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"runtime"
"testing"
)
func TestSpec(t *testing.T) {
if err := verify(runtime.GOROOT()+"/doc/go_spec.html", "SourceFile", nil); err != nil {
t.Fatal(err)
}
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
The gotype command does syntactic and semantic analysis of Go files
and packages similar to the analysis performed by the front-end of
a Go compiler. Errors are reported if the analysis fails; otherwise
gotype is quiet (unless -v is set).
Without a list of paths, gotype processes the standard input, which must
be the source of a single package file.
Given a list of file names, each file must be a source file belonging to
the same package unless the package name is explicitly specified with the
-p flag.
Given a directory name, gotype collects all .go files in the directory
and processes them as if they were provided as an explicit list of file
names. Each directory is processed independently. Files starting with .
or not ending in .go are ignored.
Usage:
gotype [flags] [path ...]
The flags are:
-e
Print all (including spurious) errors.
-p pkgName
Process only those files in package pkgName.
-r
Recursively process subdirectories.
-v
Verbose mode.
Debugging flags:
-comments
Parse comments (ignored if -ast not set).
-ast
Print AST (disables concurrent parsing).
-trace
Print parse trace (disables concurrent parsing).
Examples
To check the files file.go, old.saved, and .ignored:
gotype file.go old.saved .ignored
To check all .go files belonging to package main in the current directory
and recursively in all subdirectories:
gotype -p main -r .
To verify the output of a pipe:
echo "package foo" | gotype
*/
package documentation
// BUG(gri): At the moment, only single-file scope analysis is performed.
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"errors"
"exp/types"
"flag"
"fmt"
"go/ast"
"go/parser"
"go/scanner"
"go/token"
"io/ioutil"
"os"
"path/filepath"
"strings"
)
var (
// main operation modes
pkgName = flag.String("p", "", "process only those files in package pkgName")
recursive = flag.Bool("r", false, "recursively process subdirectories")
verbose = flag.Bool("v", false, "verbose mode")
allErrors = flag.Bool("e", false, "print all (including spurious) errors")
// debugging support
parseComments = flag.Bool("comments", false, "parse comments (ignored if -ast not set)")
printTrace = flag.Bool("trace", false, "print parse trace")
printAST = flag.Bool("ast", false, "print AST")
)
var exitCode = 0
func usage() {
fmt.Fprintf(os.Stderr, "usage: gotype [flags] [path ...]\n")
flag.PrintDefaults()
os.Exit(2)
}
func report(err error) {
scanner.PrintError(os.Stderr, err)
exitCode = 2
}
// parse returns the AST for the Go source src.
// The filename is for error reporting only.
// The result is nil if there were errors or if
// the file does not belong to the -p package.
func parse(fset *token.FileSet, filename string, src []byte) *ast.File {
if *verbose {
fmt.Println(filename)
}
// ignore files with different package name
if *pkgName != "" {
file, err := parser.ParseFile(fset, filename, src, parser.PackageClauseOnly)
if err != nil {
report(err)
return nil
}
if file.Name.Name != *pkgName {
if *verbose {
fmt.Printf("\tignored (package %s)\n", file.Name.Name)
}
return nil
}
}
// parse entire file
mode := parser.DeclarationErrors
if *allErrors {
mode |= parser.SpuriousErrors
}
if *parseComments && *printAST {
mode |= parser.ParseComments
}
if *printTrace {
mode |= parser.Trace
}
file, err := parser.ParseFile(fset, filename, src, mode)
if err != nil {
report(err)
return nil
}
if *printAST {
ast.Print(fset, file)
}
return file
}
func parseStdin(fset *token.FileSet) (files map[string]*ast.File) {
files = make(map[string]*ast.File)
src, err := ioutil.ReadAll(os.Stdin)
if err != nil {
report(err)
return
}
const filename = "<standard input>"
if file := parse(fset, filename, src); file != nil {
files[filename] = file
}
return
}
func parseFiles(fset *token.FileSet, filenames []string) (files map[string]*ast.File) {
files = make(map[string]*ast.File)
for _, filename := range filenames {
src, err := ioutil.ReadFile(filename)
if err != nil {
report(err)
continue
}
if file := parse(fset, filename, src); file != nil {
if files[filename] != nil {
report(errors.New(fmt.Sprintf("%q: duplicate file", filename)))
continue
}
files[filename] = file
}
}
return
}
func isGoFilename(filename string) bool {
// ignore non-Go files
return !strings.HasPrefix(filename, ".") && strings.HasSuffix(filename, ".go")
}
func processDirectory(dirname string) {
f, err := os.Open(dirname)
if err != nil {
report(err)
return
}
filenames, err := f.Readdirnames(-1)
f.Close()
if err != nil {
report(err)
// continue since filenames may not be empty
}
for i, filename := range filenames {
filenames[i] = filepath.Join(dirname, filename)
}
processFiles(filenames, false)
}
func processFiles(filenames []string, allFiles bool) {
i := 0
for _, filename := range filenames {
switch info, err := os.Stat(filename); {
case err != nil:
report(err)
case info.IsDir():
if allFiles || *recursive {
processDirectory(filename)
}
default:
if allFiles || isGoFilename(info.Name()) {
filenames[i] = filename
i++
}
}
}
fset := token.NewFileSet()
processPackage(fset, parseFiles(fset, filenames[0:i]))
}
func processPackage(fset *token.FileSet, files map[string]*ast.File) {
// make a package (resolve all identifiers)
pkg, err := ast.NewPackage(fset, files, types.GcImport, types.Universe)
if err != nil {
report(err)
return
}
_, err = types.Check(fset, pkg)
if err != nil {
report(err)
}
}
func main() {
flag.Usage = usage
flag.Parse()
if flag.NArg() == 0 {
fset := token.NewFileSet()
processPackage(fset, parseStdin(fset))
} else {
processFiles(flag.Args(), true)
}
os.Exit(exitCode)
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"path/filepath"
"runtime"
"testing"
)
func runTest(t *testing.T, path, pkg string) {
exitCode = 0
*pkgName = pkg
*recursive = false
if pkg == "" {
processFiles([]string{path}, true)
} else {
processDirectory(path)
}
if exitCode != 0 {
t.Errorf("processing %s failed: exitCode = %d", path, exitCode)
}
}
var tests = []struct {
path string
pkg string
}{
// individual files
{"testdata/test1.go", ""},
// directories
{filepath.Join(runtime.GOROOT(), "src/pkg/go/ast"), "ast"},
{filepath.Join(runtime.GOROOT(), "src/pkg/go/doc"), "doc"},
{filepath.Join(runtime.GOROOT(), "src/pkg/go/token"), "scanner"},
{filepath.Join(runtime.GOROOT(), "src/pkg/go/scanner"), "scanner"},
{filepath.Join(runtime.GOROOT(), "src/pkg/go/parser"), "parser"},
{filepath.Join(runtime.GOROOT(), "src/pkg/exp/types"), "types"},
}
func Test(t *testing.T) {
for _, test := range tests {
runTest(t, test.path, test.pkg)
}
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package p
func _() {
// the scope of a local type declaration starts immediately after the type name
type T struct{ _ *T }
}
func _(x interface{}) {
// the variable defined by a TypeSwitchGuard is declared in each TypeCaseClause
switch t := x.(type) {
case int:
_ = t
case float32:
_ = t
default:
_ = t
}
// the variable defined by a TypeSwitchGuard must not conflict with other
// variables declared in the initial simple statement
switch t := 0; t := x.(type) {
}
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package html
// Section 12.2.3.2 of the HTML5 specification says "The following elements
// have varying levels of special parsing rules".
// http://www.whatwg.org/specs/web-apps/current-work/multipage/parsing.html#the-stack-of-open-elements
var isSpecialElementMap = map[string]bool{
"address": true,
"applet": true,
"area": true,
"article": true,
"aside": true,
"base": true,
"basefont": true,
"bgsound": true,
"blockquote": true,
"body": true,
"br": true,
"button": true,
"caption": true,
"center": true,
"col": true,
"colgroup": true,
"command": true,
"dd": true,
"details": true,
"dir": true,
"div": true,
"dl": true,
"dt": true,
"embed": true,
"fieldset": true,
"figcaption": true,
"figure": true,
"footer": true,
"form": true,
"frame": true,
"frameset": true,
"h1": true,
"h2": true,
"h3": true,
"h4": true,
"h5": true,
"h6": true,
"head": true,
"header": true,
"hgroup": true,
"hr": true,
"html": true,
"iframe": true,
"img": true,
"input": true,
"isindex": true,
"li": true,
"link": true,
"listing": true,
"marquee": true,
"menu": true,
"meta": true,
"nav": true,
"noembed": true,
"noframes": true,
"noscript": true,
"object": true,
"ol": true,
"p": true,
"param": true,
"plaintext": true,
"pre": true,
"script": true,
"section": true,
"select": true,
"style": true,
"summary": true,
"table": true,
"tbody": true,
"td": true,
"textarea": true,
"tfoot": true,
"th": true,
"thead": true,
"title": true,
"tr": true,
"ul": true,
"wbr": true,
"xmp": true,
}
func isSpecialElement(element *Node) bool {
switch element.Namespace {
case "", "html":
return isSpecialElementMap[element.Data]
case "svg":
return element.Data == "foreignObject"
}
return false
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package html implements an HTML5-compliant tokenizer and parser.
INCOMPLETE.
Tokenization is done by creating a Tokenizer for an io.Reader r. It is the
caller's responsibility to ensure that r provides UTF-8 encoded HTML.
z := html.NewTokenizer(r)
Given a Tokenizer z, the HTML is tokenized by repeatedly calling z.Next(),
which parses the next token and returns its type, or an error:
for {
tt := z.Next()
if tt == html.ErrorToken {
// ...
return ...
}
// Process the current token.
}
There are two APIs for retrieving the current token. The high-level API is to
call Token; the low-level API is to call Text or TagName / TagAttr. Both APIs
allow optionally calling Raw after Next but before Token, Text, TagName, or
TagAttr. In EBNF notation, the valid call sequence per token is:
Next {Raw} [ Token | Text | TagName {TagAttr} ]
Token returns an independent data structure that completely describes a token.
Entities (such as "&lt;") are unescaped, tag names and attribute keys are
lower-cased, and attributes are collected into a []Attribute. For example:
for {
if z.Next() == html.ErrorToken {
// Returning io.EOF indicates success.
return z.Err()
}
emitToken(z.Token())
}
The low-level API performs fewer allocations and copies, but the contents of
the []byte values returned by Text, TagName and TagAttr may change on the next
call to Next. For example, to extract an HTML page's anchor text:
depth := 0
for {
tt := z.Next()
switch tt {
case ErrorToken:
return z.Err()
case TextToken:
if depth > 0 {
// emitBytes should copy the []byte it receives,
// if it doesn't process it immediately.
emitBytes(z.Text())
}
case StartTagToken, EndTagToken:
tn, _ := z.TagName()
if len(tn) == 1 && tn[0] == 'a' {
if tt == StartTagToken {
depth++
} else {
depth--
}
}
}
}
Parsing is done by calling Parse with an io.Reader, which returns the root of
the parse tree (the document element) as a *Node. It is the caller's
responsibility to ensure that the Reader provides UTF-8 encoded HTML. For
example, to process each anchor node in depth-first order:
doc, err := html.Parse(r)
if err != nil {
// ...
}
var f func(*html.Node)
f = func(n *html.Node) {
if n.Type == html.ElementNode && n.Data == "a" {
// Do something with n...
}
for _, c := range n.Child {
f(c)
}
}
f(doc)
The relevant specifications include:
http://www.whatwg.org/specs/web-apps/current-work/multipage/syntax.html and
http://www.whatwg.org/specs/web-apps/current-work/multipage/tokenization.html
*/
package html
// The tokenization algorithm implemented by this package is not a line-by-line
// transliteration of the relatively verbose state-machine in the WHATWG
// specification. A more direct approach is used instead, where the program
// counter implies the state, such as whether it is tokenizing a tag or a text
// node. Specification compliance is verified by checking expected and actual
// outputs over a test suite rather than aiming for algorithmic fidelity.
// TODO(nigeltao): Does a DOM API belong in this package or a separate one?
// TODO(nigeltao): How does parsing interact with a JavaScript engine?
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package html
import (
"strings"
)
// parseDoctype parses the data from a DoctypeToken into a name,
// public identifier, and system identifier. It returns a Node whose Type
// is DoctypeNode, whose Data is the name, and which has attributes
// named "system" and "public" for the two identifiers if they were present.
// quirks is whether the document should be parsed in "quirks mode".
func parseDoctype(s string) (n *Node, quirks bool) {
n = &Node{Type: DoctypeNode}
// Find the name.
space := strings.IndexAny(s, whitespace)
if space == -1 {
space = len(s)
}
n.Data = s[:space]
// The comparison to "html" is case-sensitive.
if n.Data != "html" {
quirks = true
}
n.Data = strings.ToLower(n.Data)
s = strings.TrimLeft(s[space:], whitespace)
if len(s) < 6 {
// It can't start with "PUBLIC" or "SYSTEM".
// Ignore the rest of the string.
return n, quirks || s != ""
}
key := strings.ToLower(s[:6])
s = s[6:]
for key == "public" || key == "system" {
s = strings.TrimLeft(s, whitespace)
if s == "" {
break
}
quote := s[0]
if quote != '"' && quote != '\'' {
break
}
s = s[1:]
q := strings.IndexRune(s, rune(quote))
var id string
if q == -1 {
id = s
s = ""
} else {
id = s[:q]
s = s[q+1:]
}
n.Attr = append(n.Attr, Attribute{Key: key, Val: id})
if key == "public" {
key = "system"
} else {
key = ""
}
}
if key != "" || s != "" {
quirks = true
} else if len(n.Attr) > 0 {
if n.Attr[0].Key == "public" {
public := strings.ToLower(n.Attr[0].Val)
switch public {
case "-//w3o//dtd w3 html strict 3.0//en//", "-/w3d/dtd html 4.0 transitional/en", "html":
quirks = true
default:
for _, q := range quirkyIDs {
if strings.HasPrefix(public, q) {
quirks = true
break
}
}
}
// The following two public IDs only cause quirks mode if there is no system ID.
if len(n.Attr) == 1 && (strings.HasPrefix(public, "-//w3c//dtd html 4.01 frameset//") ||
strings.HasPrefix(public, "-//w3c//dtd html 4.01 transitional//")) {
quirks = true
}
}
if lastAttr := n.Attr[len(n.Attr)-1]; lastAttr.Key == "system" &&
strings.ToLower(lastAttr.Val) == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd" {
quirks = true
}
}
return n, quirks
}
// quirkyIDs is a list of public doctype identifiers that cause a document
// to be interpreted in quirks mode. The identifiers should be in lower case.
var quirkyIDs = []string{
"+//silmaril//dtd html pro v0r11 19970101//",
"-//advasoft ltd//dtd html 3.0 aswedit + extensions//",
"-//as//dtd html 3.0 aswedit + extensions//",
"-//ietf//dtd html 2.0 level 1//",
"-//ietf//dtd html 2.0 level 2//",
"-//ietf//dtd html 2.0 strict level 1//",
"-//ietf//dtd html 2.0 strict level 2//",
"-//ietf//dtd html 2.0 strict//",
"-//ietf//dtd html 2.0//",
"-//ietf//dtd html 2.1e//",
"-//ietf//dtd html 3.0//",
"-//ietf//dtd html 3.2 final//",
"-//ietf//dtd html 3.2//",
"-//ietf//dtd html 3//",
"-//ietf//dtd html level 0//",
"-//ietf//dtd html level 1//",
"-//ietf//dtd html level 2//",
"-//ietf//dtd html level 3//",
"-//ietf//dtd html strict level 0//",
"-//ietf//dtd html strict level 1//",
"-//ietf//dtd html strict level 2//",
"-//ietf//dtd html strict level 3//",
"-//ietf//dtd html strict//",
"-//ietf//dtd html//",
"-//metrius//dtd metrius presentational//",
"-//microsoft//dtd internet explorer 2.0 html strict//",
"-//microsoft//dtd internet explorer 2.0 html//",
"-//microsoft//dtd internet explorer 2.0 tables//",
"-//microsoft//dtd internet explorer 3.0 html strict//",
"-//microsoft//dtd internet explorer 3.0 html//",
"-//microsoft//dtd internet explorer 3.0 tables//",
"-//netscape comm. corp.//dtd html//",
"-//netscape comm. corp.//dtd strict html//",
"-//o'reilly and associates//dtd html 2.0//",
"-//o'reilly and associates//dtd html extended 1.0//",
"-//o'reilly and associates//dtd html extended relaxed 1.0//",
"-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//",
"-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//",
"-//spyglass//dtd html 2.0 extended//",
"-//sq//dtd html 2.0 hotmetal + extensions//",
"-//sun microsystems corp.//dtd hotjava html//",
"-//sun microsystems corp.//dtd hotjava strict html//",
"-//w3c//dtd html 3 1995-03-24//",
"-//w3c//dtd html 3.2 draft//",
"-//w3c//dtd html 3.2 final//",
"-//w3c//dtd html 3.2//",
"-//w3c//dtd html 3.2s draft//",
"-//w3c//dtd html 4.0 frameset//",
"-//w3c//dtd html 4.0 transitional//",
"-//w3c//dtd html experimental 19960712//",
"-//w3c//dtd html experimental 970421//",
"-//w3c//dtd w3 html//",
"-//w3o//dtd w3 html 3.0//",
"-//webtechs//dtd mozilla html 2.0//",
"-//webtechs//dtd mozilla html//",
}
This diff is collapsed.
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package html
import (
"testing"
"unicode/utf8"
)
func TestEntityLength(t *testing.T) {
// We verify that the length of UTF-8 encoding of each value is <= 1 + len(key).
// The +1 comes from the leading "&". This property implies that the length of
// unescaped text is <= the length of escaped text.
for k, v := range entity {
if 1+len(k) < utf8.RuneLen(v) {
t.Error("escaped entity &" + k + " is shorter than its UTF-8 encoding " + string(v))
}
if len(k) > longestEntityWithoutSemicolon && k[len(k)-1] != ';' {
t.Errorf("entity name %s is %d characters, but longestEntityWithoutSemicolon=%d", k, len(k), longestEntityWithoutSemicolon)
}
}
for k, v := range entity2 {
if 1+len(k) < utf8.RuneLen(v[0])+utf8.RuneLen(v[1]) {
t.Error("escaped entity &" + k + " is shorter than its UTF-8 encoding " + string(v[0]) + string(v[1]))
}
}
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package html
import (
"bytes"
"strings"
"unicode/utf8"
)
// These replacements permit compatibility with old numeric entities that
// assumed Windows-1252 encoding.
// http://www.whatwg.org/specs/web-apps/current-work/multipage/tokenization.html#consume-a-character-reference
var replacementTable = [...]rune{
'\u20AC', // First entry is what 0x80 should be replaced with.
'\u0081',
'\u201A',
'\u0192',
'\u201E',
'\u2026',
'\u2020',
'\u2021',
'\u02C6',
'\u2030',
'\u0160',
'\u2039',
'\u0152',
'\u008D',
'\u017D',
'\u008F',
'\u0090',
'\u2018',
'\u2019',
'\u201C',
'\u201D',
'\u2022',
'\u2013',
'\u2014',
'\u02DC',
'\u2122',
'\u0161',
'\u203A',
'\u0153',
'\u009D',
'\u017E',
'\u0178', // Last entry is 0x9F.
// 0x00->'\uFFFD' is handled programmatically.
// 0x0D->'\u000D' is a no-op.
}
// unescapeEntity reads an entity like "&lt;" from b[src:] and writes the
// corresponding "<" to b[dst:], returning the incremented dst and src cursors.
// Precondition: b[src] == '&' && dst <= src.
// attribute should be true if parsing an attribute value.
func unescapeEntity(b []byte, dst, src int, attribute bool) (dst1, src1 int) {
// http://www.whatwg.org/specs/web-apps/current-work/multipage/tokenization.html#consume-a-character-reference
// i starts at 1 because we already know that s[0] == '&'.
i, s := 1, b[src:]
if len(s) <= 1 {
b[dst] = b[src]
return dst + 1, src + 1
}
if s[i] == '#' {
if len(s) <= 3 { // We need to have at least "&#.".
b[dst] = b[src]
return dst + 1, src + 1
}
i++
c := s[i]
hex := false
if c == 'x' || c == 'X' {
hex = true
i++
}
x := '\x00'
for i < len(s) {
c = s[i]
i++
if hex {
if '0' <= c && c <= '9' {
x = 16*x + rune(c) - '0'
continue
} else if 'a' <= c && c <= 'f' {
x = 16*x + rune(c) - 'a' + 10
continue
} else if 'A' <= c && c <= 'F' {
x = 16*x + rune(c) - 'A' + 10
continue
}
} else if '0' <= c && c <= '9' {
x = 10*x + rune(c) - '0'
continue
}
if c != ';' {
i--
}
break
}
if i <= 3 { // No characters matched.
b[dst] = b[src]
return dst + 1, src + 1
}
if 0x80 <= x && x <= 0x9F {
// Replace characters from Windows-1252 with UTF-8 equivalents.
x = replacementTable[x-0x80]
} else if x == 0 || (0xD800 <= x && x <= 0xDFFF) || x > 0x10FFFF {
// Replace invalid characters with the replacement character.
x = '\uFFFD'
}
return dst + utf8.EncodeRune(b[dst:], x), src + i
}
// Consume the maximum number of characters possible, with the
// consumed characters matching one of the named references.
for i < len(s) {
c := s[i]
i++
// Lower-cased characters are more common in entities, so we check for them first.
if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' {
continue
}
if c != ';' {
i--
}
break
}
entityName := string(s[1:i])
if entityName == "" {
// No-op.
} else if attribute && entityName[len(entityName)-1] != ';' && len(s) > i && s[i] == '=' {
// No-op.
} else if x := entity[entityName]; x != 0 {
return dst + utf8.EncodeRune(b[dst:], x), src + i
} else if x := entity2[entityName]; x[0] != 0 {
dst1 := dst + utf8.EncodeRune(b[dst:], x[0])
return dst1 + utf8.EncodeRune(b[dst1:], x[1]), src + i
} else if !attribute {
maxLen := len(entityName) - 1
if maxLen > longestEntityWithoutSemicolon {
maxLen = longestEntityWithoutSemicolon
}
for j := maxLen; j > 1; j-- {
if x := entity[entityName[:j]]; x != 0 {
return dst + utf8.EncodeRune(b[dst:], x), src + j + 1
}
}
}
dst1, src1 = dst+i, src+i
copy(b[dst:dst1], b[src:src1])
return dst1, src1
}
// unescape unescapes b's entities in-place, so that "a&lt;b" becomes "a<b".
func unescape(b []byte) []byte {
for i, c := range b {
if c == '&' {
dst, src := unescapeEntity(b, i, i, false)
for src < len(b) {
c := b[src]
if c == '&' {
dst, src = unescapeEntity(b, dst, src, false)
} else {
b[dst] = c
dst, src = dst+1, src+1
}
}
return b[0:dst]
}
}
return b
}
// lower lower-cases the A-Z bytes in b in-place, so that "aBc" becomes "abc".
func lower(b []byte) []byte {
for i, c := range b {
if 'A' <= c && c <= 'Z' {
b[i] = c + 'a' - 'A'
}
}
return b
}
const escapedChars = `&'<>"`
func escape(w writer, s string) error {
i := strings.IndexAny(s, escapedChars)
for i != -1 {
if _, err := w.WriteString(s[:i]); err != nil {
return err
}
var esc string
switch s[i] {
case '&':
esc = "&amp;"
case '\'':
esc = "&apos;"
case '<':
esc = "&lt;"
case '>':
esc = "&gt;"
case '"':
esc = "&quot;"
default:
panic("unrecognized escape character")
}
s = s[i+1:]
if _, err := w.WriteString(esc); err != nil {
return err
}
i = strings.IndexAny(s, escapedChars)
}
_, err := w.WriteString(s)
return err
}
// EscapeString escapes special characters like "<" to become "&lt;". It
// escapes only five such characters: amp, apos, lt, gt and quot.
// UnescapeString(EscapeString(s)) == s always holds, but the converse isn't
// always true.
func EscapeString(s string) string {
if strings.IndexAny(s, escapedChars) == -1 {
return s
}
var buf bytes.Buffer
escape(&buf, s)
return buf.String()
}
// UnescapeString unescapes entities like "&lt;" to become "<". It unescapes a
// larger range of entities than EscapeString escapes. For example, "&aacute;"
// unescapes to "á", as does "&#225;" and "&xE1;".
// UnescapeString(EscapeString(s)) == s always holds, but the converse isn't
// always true.
func UnescapeString(s string) string {
for _, c := range s {
if c == '&' {
return string(unescape([]byte(s)))
}
}
return s
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package html
import (
"strings"
)
func adjustForeignAttributes(aa []Attribute) {
for i, a := range aa {
if a.Key == "" || a.Key[0] != 'x' {
continue
}
switch a.Key {
case "xlink:actuate", "xlink:arcrole", "xlink:href", "xlink:role", "xlink:show",
"xlink:title", "xlink:type", "xml:base", "xml:lang", "xml:space", "xmlns:xlink":
j := strings.Index(a.Key, ":")
aa[i].Namespace = a.Key[:j]
aa[i].Key = a.Key[j+1:]
}
}
}
func htmlIntegrationPoint(n *Node) bool {
if n.Type != ElementNode {
return false
}
switch n.Namespace {
case "math":
// TODO: annotation-xml elements whose start tags have "text/html" or
// "application/xhtml+xml" encodings.
case "svg":
switch n.Data {
case "desc", "foreignObject", "title":
return true
}
}
return false
}
// Section 12.2.5.5.
var breakout = map[string]bool{
"b": true,
"big": true,
"blockquote": true,
"body": true,
"br": true,
"center": true,
"code": true,
"dd": true,
"div": true,
"dl": true,
"dt": true,
"em": true,
"embed": true,
"font": true,
"h1": true,
"h2": true,
"h3": true,
"h4": true,
"h5": true,
"h6": true,
"head": true,
"hr": true,
"i": true,
"img": true,
"li": true,
"listing": true,
"menu": true,
"meta": true,
"nobr": true,
"ol": true,
"p": true,
"pre": true,
"ruby": true,
"s": true,
"small": true,
"span": true,
"strong": true,
"strike": true,
"sub": true,
"sup": true,
"table": true,
"tt": true,
"u": true,
"ul": true,
"var": true,
}
// Section 12.2.5.5.
var svgTagNameAdjustments = map[string]string{
"altglyph": "altGlyph",
"altglyphdef": "altGlyphDef",
"altglyphitem": "altGlyphItem",
"animatecolor": "animateColor",
"animatemotion": "animateMotion",
"animatetransform": "animateTransform",
"clippath": "clipPath",
"feblend": "feBlend",
"fecolormatrix": "feColorMatrix",
"fecomponenttransfer": "feComponentTransfer",
"fecomposite": "feComposite",
"feconvolvematrix": "feConvolveMatrix",
"fediffuselighting": "feDiffuseLighting",
"fedisplacementmap": "feDisplacementMap",
"fedistantlight": "feDistantLight",
"feflood": "feFlood",
"fefunca": "feFuncA",
"fefuncb": "feFuncB",
"fefuncg": "feFuncG",
"fefuncr": "feFuncR",
"fegaussianblur": "feGaussianBlur",
"feimage": "feImage",
"femerge": "feMerge",
"femergenode": "feMergeNode",
"femorphology": "feMorphology",
"feoffset": "feOffset",
"fepointlight": "fePointLight",
"fespecularlighting": "feSpecularLighting",
"fespotlight": "feSpotLight",
"fetile": "feTile",
"feturbulence": "feTurbulence",
"foreignobject": "foreignObject",
"glyphref": "glyphRef",
"lineargradient": "linearGradient",
"radialgradient": "radialGradient",
"textpath": "textPath",
}
// TODO: add look-up tables for MathML and SVG attribute adjustments.
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package html
// A NodeType is the type of a Node.
type NodeType int
const (
ErrorNode NodeType = iota
TextNode
DocumentNode
ElementNode
CommentNode
DoctypeNode
scopeMarkerNode
)
// Section 12.2.3.3 says "scope markers are inserted when entering applet
// elements, buttons, object elements, marquees, table cells, and table
// captions, and are used to prevent formatting from 'leaking'".
var scopeMarker = Node{Type: scopeMarkerNode}
// A Node consists of a NodeType and some Data (tag name for element nodes,
// content for text) and are part of a tree of Nodes. Element nodes may also
// have a Namespace and contain a slice of Attributes. Data is unescaped, so
// that it looks like "a<b" rather than "a&lt;b".
//
// An empty Namespace implies a "http://www.w3.org/1999/xhtml" namespace.
// Similarly, "math" is short for "http://www.w3.org/1998/Math/MathML", and
// "svg" is short for "http://www.w3.org/2000/svg".
type Node struct {
Parent *Node
Child []*Node
Type NodeType
Data string
Namespace string
Attr []Attribute
}
// Add adds a node as a child of n.
// It will panic if the child's parent is not nil.
func (n *Node) Add(child *Node) {
if child.Parent != nil {
panic("html: Node.Add called for a child Node that already has a parent")
}
child.Parent = n
n.Child = append(n.Child, child)
}
// Remove removes a node as a child of n.
// It will panic if the child's parent is not n.
func (n *Node) Remove(child *Node) {
if child.Parent == n {
child.Parent = nil
for i, m := range n.Child {
if m == child {
copy(n.Child[i:], n.Child[i+1:])
j := len(n.Child) - 1
n.Child[j] = nil
n.Child = n.Child[:j]
return
}
}
}
panic("html: Node.Remove called for a non-child Node")
}
// reparentChildren reparents all of src's child nodes to dst.
func reparentChildren(dst, src *Node) {
for _, n := range src.Child {
if n.Parent != src {
panic("html: nodes have an inconsistent parent/child relationship")
}
n.Parent = dst
}
dst.Child = append(dst.Child, src.Child...)
src.Child = nil
}
// clone returns a new node with the same type, data and attributes.
// The clone has no parent and no children.
func (n *Node) clone() *Node {
m := &Node{
Type: n.Type,
Data: n.Data,
Attr: make([]Attribute, len(n.Attr)),
}
copy(m.Attr, n.Attr)
return m
}
// nodeStack is a stack of nodes.
type nodeStack []*Node
// pop pops the stack. It will panic if s is empty.
func (s *nodeStack) pop() *Node {
i := len(*s)
n := (*s)[i-1]
*s = (*s)[:i-1]
return n
}
// top returns the most recently pushed node, or nil if s is empty.
func (s *nodeStack) top() *Node {
if i := len(*s); i > 0 {
return (*s)[i-1]
}
return nil
}
// index returns the index of the top-most occurrence of n in the stack, or -1
// if n is not present.
func (s *nodeStack) index(n *Node) int {
for i := len(*s) - 1; i >= 0; i-- {
if (*s)[i] == n {
return i
}
}
return -1
}
// insert inserts a node at the given index.
func (s *nodeStack) insert(i int, n *Node) {
(*s) = append(*s, nil)
copy((*s)[i+1:], (*s)[i:])
(*s)[i] = n
}
// remove removes a node from the stack. It is a no-op if n is not present.
func (s *nodeStack) remove(n *Node) {
i := s.index(n)
if i == -1 {
return
}
copy((*s)[i:], (*s)[i+1:])
j := len(*s) - 1
(*s)[j] = nil
*s = (*s)[:j]
}
// TODO(nigeltao): forTag no longer used. Should it be deleted?
// forTag returns the top-most element node with the given tag.
func (s *nodeStack) forTag(tag string) *Node {
for i := len(*s) - 1; i >= 0; i-- {
n := (*s)[i]
if n.Type == ElementNode && n.Data == tag {
return n
}
}
return nil
}
This diff is collapsed.
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package html
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"os"
"strings"
"testing"
)
// readParseTest reads a single test case from r.
func readParseTest(r *bufio.Reader) (text, want, context string, err error) {
line, err := r.ReadSlice('\n')
if err != nil {
return "", "", "", err
}
var b []byte
// Read the HTML.
if string(line) != "#data\n" {
return "", "", "", fmt.Errorf(`got %q want "#data\n"`, line)
}
for {
line, err = r.ReadSlice('\n')
if err != nil {
return "", "", "", err
}
if line[0] == '#' {
break
}
b = append(b, line...)
}
text = strings.TrimRight(string(b), "\n")
b = b[:0]
// Skip the error list.
if string(line) != "#errors\n" {
return "", "", "", fmt.Errorf(`got %q want "#errors\n"`, line)
}
for {
line, err = r.ReadSlice('\n')
if err != nil {
return "", "", "", err
}
if line[0] == '#' {
break
}
}
if string(line) == "#document-fragment\n" {
line, err = r.ReadSlice('\n')
if err != nil {
return "", "", "", err
}
context = strings.TrimSpace(string(line))
line, err = r.ReadSlice('\n')
if err != nil {
return "", "", "", err
}
}
// Read the dump of what the parse tree should be.
if string(line) != "#document\n" {
return "", "", "", fmt.Errorf(`got %q want "#document\n"`, line)
}
for {
line, err = r.ReadSlice('\n')
if err != nil && err != io.EOF {
return "", "", "", err
}
if len(line) == 0 || len(line) == 1 && line[0] == '\n' {
break
}
b = append(b, line...)
}
return text, string(b), context, nil
}
func dumpIndent(w io.Writer, level int) {
io.WriteString(w, "| ")
for i := 0; i < level; i++ {
io.WriteString(w, " ")
}
}
func dumpLevel(w io.Writer, n *Node, level int) error {
dumpIndent(w, level)
switch n.Type {
case ErrorNode:
return errors.New("unexpected ErrorNode")
case DocumentNode:
return errors.New("unexpected DocumentNode")
case ElementNode:
if n.Namespace != "" {
fmt.Fprintf(w, "<%s %s>", n.Namespace, n.Data)
} else {
fmt.Fprintf(w, "<%s>", n.Data)
}
attr := n.Attr
if len(attr) == 2 && attr[0].Namespace == "xml" && attr[1].Namespace == "xlink" {
// Some of the test cases in tests10.dat change the order of adjusted
// foreign attributes, but that behavior is not in the spec, and could
// simply be an implementation detail of html5lib's python map ordering.
attr[0], attr[1] = attr[1], attr[0]
}
for _, a := range attr {
io.WriteString(w, "\n")
dumpIndent(w, level+1)
if a.Namespace != "" {
fmt.Fprintf(w, `%s %s="%s"`, a.Namespace, a.Key, a.Val)
} else {
fmt.Fprintf(w, `%s="%s"`, a.Key, a.Val)
}
}
case TextNode:
fmt.Fprintf(w, `"%s"`, n.Data)
case CommentNode:
fmt.Fprintf(w, "<!-- %s -->", n.Data)
case DoctypeNode:
fmt.Fprintf(w, "<!DOCTYPE %s", n.Data)
if n.Attr != nil {
var p, s string
for _, a := range n.Attr {
switch a.Key {
case "public":
p = a.Val
case "system":
s = a.Val
}
}
if p != "" || s != "" {
fmt.Fprintf(w, ` "%s"`, p)
fmt.Fprintf(w, ` "%s"`, s)
}
}
io.WriteString(w, ">")
case scopeMarkerNode:
return errors.New("unexpected scopeMarkerNode")
default:
return errors.New("unknown node type")
}
io.WriteString(w, "\n")
for _, c := range n.Child {
if err := dumpLevel(w, c, level+1); err != nil {
return err
}
}
return nil
}
func dump(n *Node) (string, error) {
if n == nil || len(n.Child) == 0 {
return "", nil
}
var b bytes.Buffer
for _, child := range n.Child {
if err := dumpLevel(&b, child, 0); err != nil {
return "", err
}
}
return b.String(), nil
}
func TestParser(t *testing.T) {
testFiles := []struct {
filename string
// n is the number of test cases to run from that file.
// -1 means all test cases.
n int
}{
// TODO(nigeltao): Process all the test cases from all the .dat files.
{"adoption01.dat", -1},
{"doctype01.dat", -1},
{"tests1.dat", -1},
{"tests2.dat", -1},
{"tests3.dat", -1},
{"tests4.dat", -1},
{"tests5.dat", -1},
{"tests6.dat", -1},
{"tests10.dat", 35},
}
for _, tf := range testFiles {
f, err := os.Open("testdata/webkit/" + tf.filename)
if err != nil {
t.Fatal(err)
}
defer f.Close()
r := bufio.NewReader(f)
for i := 0; i != tf.n; i++ {
text, want, context, err := readParseTest(r)
if err == io.EOF && tf.n == -1 {
break
}
if err != nil {
t.Fatal(err)
}
var doc *Node
if context == "" {
doc, err = Parse(strings.NewReader(text))
if err != nil {
t.Fatal(err)
}
} else {
contextNode := &Node{
Type: ElementNode,
Data: context,
}
nodes, err := ParseFragment(strings.NewReader(text), contextNode)
if err != nil {
t.Fatal(err)
}
doc = &Node{
Type: DocumentNode,
}
for _, n := range nodes {
doc.Add(n)
}
}
got, err := dump(doc)
if err != nil {
t.Fatal(err)
}
// Compare the parsed tree to the #document section.
if got != want {
t.Errorf("%s test #%d %q, got vs want:\n----\n%s----\n%s----", tf.filename, i, text, got, want)
continue
}
if renderTestBlacklist[text] || context != "" {
continue
}
// Check that rendering and re-parsing results in an identical tree.
pr, pw := io.Pipe()
go func() {
pw.CloseWithError(Render(pw, doc))
}()
doc1, err := Parse(pr)
if err != nil {
t.Fatal(err)
}
got1, err := dump(doc1)
if err != nil {
t.Fatal(err)
}
if got != got1 {
t.Errorf("%s test #%d %q, got vs got1:\n----\n%s----\n%s----", tf.filename, i, text, got, got1)
continue
}
}
}
}
// Some test input result in parse trees are not 'well-formed' despite
// following the HTML5 recovery algorithms. Rendering and re-parsing such a
// tree will not result in an exact clone of that tree. We blacklist such
// inputs from the render test.
var renderTestBlacklist = map[string]bool{
// The second <a> will be reparented to the first <table>'s parent. This
// results in an <a> whose parent is an <a>, which is not 'well-formed'.
`<a><table><td><a><table></table><a></tr><a></table><b>X</b>C<a>Y`: true,
// More cases of <a> being reparented:
`<a href="blah">aba<table><a href="foo">br<tr><td></td></tr>x</table>aoe`: true,
`<a><table><a></table><p><a><div><a>`: true,
`<a><table><td><a><table></table><a></tr><a></table><a>`: true,
// A <plaintext> element is reparented, putting it before a table.
// A <plaintext> element can't have anything after it in HTML.
`<table><plaintext><td>`: true,
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package html
import (
"bufio"
"errors"
"fmt"
"io"
"strings"
)
type writer interface {
io.Writer
WriteByte(byte) error
WriteString(string) (int, error)
}
// Render renders the parse tree n to the given writer.
//
// Rendering is done on a 'best effort' basis: calling Parse on the output of
// Render will always result in something similar to the original tree, but it
// is not necessarily an exact clone unless the original tree was 'well-formed'.
// 'Well-formed' is not easily specified; the HTML5 specification is
// complicated.
//
// Calling Parse on arbitrary input typically results in a 'well-formed' parse
// tree. However, it is possible for Parse to yield a 'badly-formed' parse tree.
// For example, in a 'well-formed' parse tree, no <a> element is a child of
// another <a> element: parsing "<a><a>" results in two sibling elements.
// Similarly, in a 'well-formed' parse tree, no <a> element is a child of a
// <table> element: parsing "<p><table><a>" results in a <p> with two sibling
// children; the <a> is reparented to the <table>'s parent. However, calling
// Parse on "<a><table><a>" does not return an error, but the result has an <a>
// element with an <a> child, and is therefore not 'well-formed'.
//
// Programmatically constructed trees are typically also 'well-formed', but it
// is possible to construct a tree that looks innocuous but, when rendered and
// re-parsed, results in a different tree. A simple example is that a solitary
// text node would become a tree containing <html>, <head> and <body> elements.
// Another example is that the programmatic equivalent of "a<head>b</head>c"
// becomes "<html><head><head/><body>abc</body></html>".
func Render(w io.Writer, n *Node) error {
if x, ok := w.(writer); ok {
return render(x, n)
}
buf := bufio.NewWriter(w)
if err := render(buf, n); err != nil {
return err
}
return buf.Flush()
}
// plaintextAbort is returned from render1 when a <plaintext> element
// has been rendered. No more end tags should be rendered after that.
var plaintextAbort = errors.New("html: internal error (plaintext abort)")
func render(w writer, n *Node) error {
err := render1(w, n)
if err == plaintextAbort {
err = nil
}
return err
}
func render1(w writer, n *Node) error {
// Render non-element nodes; these are the easy cases.
switch n.Type {
case ErrorNode:
return errors.New("html: cannot render an ErrorNode node")
case TextNode:
return escape(w, n.Data)
case DocumentNode:
for _, c := range n.Child {
if err := render1(w, c); err != nil {
return err
}
}
return nil
case ElementNode:
// No-op.
case CommentNode:
if _, err := w.WriteString("<!--"); err != nil {
return err
}
if _, err := w.WriteString(n.Data); err != nil {
return err
}
if _, err := w.WriteString("-->"); err != nil {
return err
}
return nil
case DoctypeNode:
if _, err := w.WriteString("<!DOCTYPE "); err != nil {
return err
}
if _, err := w.WriteString(n.Data); err != nil {
return err
}
if n.Attr != nil {
var p, s string
for _, a := range n.Attr {
switch a.Key {
case "public":
p = a.Val
case "system":
s = a.Val
}
}
if p != "" {
if _, err := w.WriteString(" PUBLIC "); err != nil {
return err
}
if err := writeQuoted(w, p); err != nil {
return err
}
if s != "" {
if err := w.WriteByte(' '); err != nil {
return err
}
if err := writeQuoted(w, s); err != nil {
return err
}
}
} else if s != "" {
if _, err := w.WriteString(" SYSTEM "); err != nil {
return err
}
if err := writeQuoted(w, s); err != nil {
return err
}
}
}
return w.WriteByte('>')
default:
return errors.New("html: unknown node type")
}
// Render the <xxx> opening tag.
if err := w.WriteByte('<'); err != nil {
return err
}
if _, err := w.WriteString(n.Data); err != nil {
return err
}
for _, a := range n.Attr {
if err := w.WriteByte(' '); err != nil {
return err
}
if a.Namespace != "" {
if _, err := w.WriteString(a.Namespace); err != nil {
return err
}
if err := w.WriteByte(':'); err != nil {
return err
}
}
if _, err := w.WriteString(a.Key); err != nil {
return err
}
if _, err := w.WriteString(`="`); err != nil {
return err
}
if err := escape(w, a.Val); err != nil {
return err
}
if err := w.WriteByte('"'); err != nil {
return err
}
}
if voidElements[n.Data] {
if len(n.Child) != 0 {
return fmt.Errorf("html: void element <%s> has child nodes", n.Data)
}
_, err := w.WriteString("/>")
return err
}
if err := w.WriteByte('>'); err != nil {
return err
}
// Add initial newline where there is danger of a newline beging ignored.
if len(n.Child) > 0 && n.Child[0].Type == TextNode && strings.HasPrefix(n.Child[0].Data, "\n") {
switch n.Data {
case "pre", "listing", "textarea":
if err := w.WriteByte('\n'); err != nil {
return err
}
}
}
// Render any child nodes.
switch n.Data {
case "iframe", "noembed", "noframes", "noscript", "plaintext", "script", "style", "xmp":
for _, c := range n.Child {
if c.Type != TextNode {
return fmt.Errorf("html: raw text element <%s> has non-text child node", n.Data)
}
if _, err := w.WriteString(c.Data); err != nil {
return err
}
}
if n.Data == "plaintext" {
// Don't render anything else. <plaintext> must be the
// last element in the file, with no closing tag.
return plaintextAbort
}
case "textarea", "title":
for _, c := range n.Child {
if c.Type != TextNode {
return fmt.Errorf("html: RCDATA element <%s> has non-text child node", n.Data)
}
if err := render1(w, c); err != nil {
return err
}
}
default:
for _, c := range n.Child {
if err := render1(w, c); err != nil {
return err
}
}
}
// Render the </xxx> closing tag.
if _, err := w.WriteString("</"); err != nil {
return err
}
if _, err := w.WriteString(n.Data); err != nil {
return err
}
return w.WriteByte('>')
}
// writeQuoted writes s to w surrounded by quotes. Normally it will use double
// quotes, but if s contains a double quote, it will use single quotes.
// It is used for writing the identifiers in a doctype declaration.
// In valid HTML, they can't contain both types of quotes.
func writeQuoted(w writer, s string) error {
var q byte = '"'
if strings.Contains(s, `"`) {
q = '\''
}
if err := w.WriteByte(q); err != nil {
return err
}
if _, err := w.WriteString(s); err != nil {
return err
}
if err := w.WriteByte(q); err != nil {
return err
}
return nil
}
// Section 12.1.2, "Elements", gives this list of void elements. Void elements
// are those that can't have any contents.
var voidElements = map[string]bool{
"area": true,
"base": true,
"br": true,
"col": true,
"command": true,
"embed": true,
"hr": true,
"img": true,
"input": true,
"keygen": true,
"link": true,
"meta": true,
"param": true,
"source": true,
"track": true,
"wbr": true,
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package html
import (
"bytes"
"testing"
)
func TestRenderer(t *testing.T) {
n := &Node{
Type: ElementNode,
Data: "html",
Child: []*Node{
{
Type: ElementNode,
Data: "head",
},
{
Type: ElementNode,
Data: "body",
Child: []*Node{
{
Type: TextNode,
Data: "0<1",
},
{
Type: ElementNode,
Data: "p",
Attr: []Attribute{
{
Key: "id",
Val: "A",
},
{
Key: "foo",
Val: `abc"def`,
},
},
Child: []*Node{
{
Type: TextNode,
Data: "2",
},
{
Type: ElementNode,
Data: "b",
Attr: []Attribute{
{
Key: "empty",
Val: "",
},
},
Child: []*Node{
{
Type: TextNode,
Data: "3",
},
},
},
{
Type: ElementNode,
Data: "i",
Attr: []Attribute{
{
Key: "backslash",
Val: `\`,
},
},
Child: []*Node{
{
Type: TextNode,
Data: "&4",
},
},
},
},
},
{
Type: TextNode,
Data: "5",
},
{
Type: ElementNode,
Data: "blockquote",
},
{
Type: ElementNode,
Data: "br",
},
{
Type: TextNode,
Data: "6",
},
},
},
},
}
want := `<html><head></head><body>0&lt;1<p id="A" foo="abc&quot;def">` +
`2<b empty="">3</b><i backslash="\">&amp;4</i></p>` +
`5<blockquote></blockquote><br/>6</body></html>`
b := new(bytes.Buffer)
if err := Render(b, n); err != nil {
t.Fatal(err)
}
if got := b.String(); got != want {
t.Errorf("got vs want:\n%s\n%s\n", got, want)
}
}
The *.dat files in this directory are copied from The WebKit Open Source
Project, specifically $WEBKITROOT/LayoutTests/html5lib/resources.
WebKit is licensed under a BSD style license.
http://webkit.org/coding/bsd-license.html says:
Copyright (C) 2009 Apple Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS "AS IS" AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#data
<a><p></a></p>
#errors
#document
| <html>
| <head>
| <body>
| <a>
| <p>
| <a>
#data
<a>1<p>2</a>3</p>
#errors
#document
| <html>
| <head>
| <body>
| <a>
| "1"
| <p>
| <a>
| "2"
| "3"
#data
<a>1<button>2</a>3</button>
#errors
#document
| <html>
| <head>
| <body>
| <a>
| "1"
| <button>
| <a>
| "2"
| "3"
#data
<a>1<b>2</a>3</b>
#errors
#document
| <html>
| <head>
| <body>
| <a>
| "1"
| <b>
| "2"
| <b>
| "3"
#data
<a>1<div>2<div>3</a>4</div>5</div>
#errors
#document
| <html>
| <head>
| <body>
| <a>
| "1"
| <div>
| <a>
| "2"
| <div>
| <a>
| "3"
| "4"
| "5"
#data
<table><a>1<p>2</a>3</p>
#errors
#document
| <html>
| <head>
| <body>
| <a>
| "1"
| <p>
| <a>
| "2"
| "3"
| <table>
#data
<b><b><a><p></a>
#errors
#document
| <html>
| <head>
| <body>
| <b>
| <b>
| <a>
| <p>
| <a>
#data
<b><a><b><p></a>
#errors
#document
| <html>
| <head>
| <body>
| <b>
| <a>
| <b>
| <b>
| <p>
| <a>
#data
<a><b><b><p></a>
#errors
#document
| <html>
| <head>
| <body>
| <a>
| <b>
| <b>
| <b>
| <b>
| <p>
| <a>
#data
<p>1<s id="A">2<b id="B">3</p>4</s>5</b>
#errors
#document
| <html>
| <head>
| <body>
| <p>
| "1"
| <s>
| id="A"
| "2"
| <b>
| id="B"
| "3"
| <s>
| id="A"
| <b>
| id="B"
| "4"
| <b>
| id="B"
| "5"
#data
<table><a>1<td>2</td>3</table>
#errors
#document
| <html>
| <head>
| <body>
| <a>
| "1"
| <a>
| "3"
| <table>
| <tbody>
| <tr>
| <td>
| "2"
#data
<table>A<td>B</td>C</table>
#errors
#document
| <html>
| <head>
| <body>
| "AC"
| <table>
| <tbody>
| <tr>
| <td>
| "B"
#data
<a><svg><tr><input></a>
#errors
#document
| <html>
| <head>
| <body>
| <a>
| <svg svg>
| <svg tr>
| <svg input>
#data
<b>1<i>2<p>3</b>4
#errors
#document
| <html>
| <head>
| <body>
| <b>
| "1"
| <i>
| "2"
| <i>
| <p>
| <b>
| "3"
| "4"
#data
<a><div><style></style><address><a>
#errors
#document
| <html>
| <head>
| <body>
| <a>
| <div>
| <a>
| <style>
| <address>
| <a>
| <a>
#data
FOO<!-- BAR -->BAZ
#errors
#document
| <html>
| <head>
| <body>
| "FOO"
| <!-- BAR -->
| "BAZ"
#data
FOO<!-- BAR --!>BAZ
#errors
#document
| <html>
| <head>
| <body>
| "FOO"
| <!-- BAR -->
| "BAZ"
#data
FOO<!-- BAR -- >BAZ
#errors
#document
| <html>
| <head>
| <body>
| "FOO"
| <!-- BAR -- >BAZ -->
#data
FOO<!-- BAR -- <QUX> -- MUX -->BAZ
#errors
#document
| <html>
| <head>
| <body>
| "FOO"
| <!-- BAR -- <QUX> -- MUX -->
| "BAZ"
#data
FOO<!-- BAR -- <QUX> -- MUX --!>BAZ
#errors
#document
| <html>
| <head>
| <body>
| "FOO"
| <!-- BAR -- <QUX> -- MUX -->
| "BAZ"
#data
FOO<!-- BAR -- <QUX> -- MUX -- >BAZ
#errors
#document
| <html>
| <head>
| <body>
| "FOO"
| <!-- BAR -- <QUX> -- MUX -- >BAZ -->
#data
FOO<!---->BAZ
#errors
#document
| <html>
| <head>
| <body>
| "FOO"
| <!-- -->
| "BAZ"
#data
FOO<!--->BAZ
#errors
#document
| <html>
| <head>
| <body>
| "FOO"
| <!-- -->
| "BAZ"
#data
FOO<!-->BAZ
#errors
#document
| <html>
| <head>
| <body>
| "FOO"
| <!-- -->
| "BAZ"
#data
<?xml version="1.0">Hi
#errors
#document
| <!-- ?xml version="1.0" -->
| <html>
| <head>
| <body>
| "Hi"
#data
<?xml version="1.0">
#errors
#document
| <!-- ?xml version="1.0" -->
| <html>
| <head>
| <body>
#data
<?xml version
#errors
#document
| <!-- ?xml version -->
| <html>
| <head>
| <body>
#data
FOO<!----->BAZ
#errors
#document
| <html>
| <head>
| <body>
| "FOO"
| <!-- - -->
| "BAZ"
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
#data
<button>1</foo>
#errors
#document
| <html>
| <head>
| <body>
| <button>
| "1"
#data
<foo>1<p>2</foo>
#errors
#document
| <html>
| <head>
| <body>
| <foo>
| "1"
| <p>
| "2"
#data
<dd>1</foo>
#errors
#document
| <html>
| <head>
| <body>
| <dd>
| "1"
#data
<foo>1<dd>2</foo>
#errors
#document
| <html>
| <head>
| <body>
| <foo>
| "1"
| <dd>
| "2"
#data
<isindex>
#errors
#document
| <html>
| <head>
| <body>
| <form>
| <hr>
| <label>
| "This is a searchable index. Enter search keywords: "
| <input>
| name="isindex"
| <hr>
#data
<isindex name="A" action="B" prompt="C" foo="D">
#errors
#document
| <html>
| <head>
| <body>
| <form>
| action="B"
| <hr>
| <label>
| "C"
| <input>
| foo="D"
| name="isindex"
| <hr>
#data
<form><isindex>
#errors
#document
| <html>
| <head>
| <body>
| <form>
#data
<input type="hidden"><frameset>
#errors
21: Start tag seen without seeing a doctype first. Expected “<!DOCTYPE html>”.
31: “frameset” start tag seen.
31: End of file seen and there were open elements.
#document
| <html>
| <head>
| <frameset>
#data
<!DOCTYPE html><table><caption><svg>foo</table>bar
#errors
47: End tag “table” did not match the name of the current open element (“svg”).
47: “table” closed but “caption” was still open.
47: End tag “table” seen, but there were open elements.
36: Unclosed element “svg”.
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <table>
| <caption>
| <svg svg>
| "foo"
| "bar"
#data
FOO&#x000D;ZOO
#errors
#document
| <html>
| <head>
| <body>
| "FOO ZOO"
This diff is collapsed.
#data
<p><b id="A"><script>document.getElementById("A").id = "B"</script></p>TEXT</b>
#errors
#document
| <html>
| <head>
| <body>
| <p>
| <b>
| id="B"
| <script>
| "document.getElementById("A").id = "B""
| <b>
| id="A"
| "TEXT"
#data
1<script>document.write("2")</script>3
#errors
#document
| <html>
| <head>
| <body>
| "1"
| <script>
| "document.write("2")"
| "23"
#data
1<script>document.write("<script>document.write('2')</scr"+ "ipt><script>document.write('3')</scr" + "ipt>")</script>4
#errors
#document
| <html>
| <head>
| <body>
| "1"
| <script>
| "document.write("<script>document.write('2')</scr"+ "ipt><script>document.write('3')</scr" + "ipt>")"
| <script>
| "document.write('2')"
| "2"
| <script>
| "document.write('3')"
| "34"
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
#data
<!DOCTYPE html><body><p>foo<math><mtext><i>baz</i></mtext><annotation-xml><svg><desc><b>eggs</b></desc><g><foreignObject><P>spam<TABLE><tr><td><img></td></table></foreignObject></g><g>quux</g></svg></annotation-xml></math>bar
#errors
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| <p>
| "foo"
| <math math>
| <math mtext>
| <i>
| "baz"
| <math annotation-xml>
| <svg svg>
| <svg desc>
| <b>
| "eggs"
| <svg g>
| <svg foreignObject>
| <p>
| "spam"
| <table>
| <tbody>
| <tr>
| <td>
| <img>
| <svg g>
| "quux"
| "bar"
#data
<!DOCTYPE html><body>foo<math><mtext><i>baz</i></mtext><annotation-xml><svg><desc><b>eggs</b></desc><g><foreignObject><P>spam<TABLE><tr><td><img></td></table></foreignObject></g><g>quux</g></svg></annotation-xml></math>bar
#errors
#document
| <!DOCTYPE html>
| <html>
| <head>
| <body>
| "foo"
| <math math>
| <math mtext>
| <i>
| "baz"
| <math annotation-xml>
| <svg svg>
| <svg desc>
| <b>
| "eggs"
| <svg g>
| <svg foreignObject>
| <p>
| "spam"
| <table>
| <tbody>
| <tr>
| <td>
| <img>
| <svg g>
| "quux"
| "bar"
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment