Commit a0bc379d authored by Russ Cox's avatar Russ Cox

undo CL 13084043 / ef4ee02a5853

There is a cleaner, simpler way.

««« original CL description
cmd/5g, cmd/6g, cmd/8g: faster compilation
Replace linked list walk with memset.
This reduces CPU time taken by 'go install -a std' by ~10%.
Before:
real		user		sys
0m23.561s	0m16.625s	0m5.848s
0m23.766s	0m16.624s	0m5.846s
0m23.742s	0m16.621s	0m5.868s
after:
0m22.714s	0m14.858s	0m6.138s
0m22.644s	0m14.875s	0m6.120s
0m22.604s	0m14.854s	0m6.081s

R=golang-dev, r
CC=golang-dev
https://golang.org/cl/13084043
»»»

TBR=dvyukov
CC=golang-dev
https://golang.org/cl/13352049
parent 8cd6341c
......@@ -63,8 +63,6 @@ peep(Prog *firstp)
g = flowstart(firstp, sizeof(Flow));
if(g == nil)
return;
for(r=g->start, t=0; r!=nil; r=r->link, t++)
r->active = t;
loop1:
if(debug['P'] && debug['v'])
......@@ -345,9 +343,6 @@ gotit:
return 1;
}
static uchar *active;
static int nactive;
/*
* The idea is to remove redundant copies.
* v1->v2 F=0
......@@ -365,17 +360,15 @@ copyprop(Graph *g, Flow *r0)
{
Prog *p;
Adr *v1, *v2;
Flow *r;
p = r0->prog;
v1 = &p->from;
v2 = &p->to;
if(copyas(v1, v2))
return 1;
if(nactive < g->num) {
nactive = g->num;
active = realloc(active, g->num);
}
memset(active, 0, g->num);
for(r=g->start; r!=nil; r=r->link)
r->active = 0;
return copy1(v1, v2, r0->s1, 0);
}
......@@ -385,12 +378,12 @@ copy1(Adr *v1, Adr *v2, Flow *r, int f)
int t;
Prog *p;
if(active[r->active]) {
if(r->active) {
if(debug['P'])
print("act set; return 1\n");
return 1;
}
active[r->active] = 1;
r->active = 1;
if(debug['P'])
print("copy %D->%D f=%d\n", v1, v2, f);
for(; r != nil; r = r->s1) {
......
......@@ -92,8 +92,6 @@ peep(Prog *firstp)
g = flowstart(firstp, sizeof(Flow));
if(g == nil)
return;
for(r=g->start, t=0; r!=nil; r=r->link, t++)
r->active = t;
// byte, word arithmetic elimination.
elimshortmov(g);
......@@ -288,7 +286,7 @@ loop1:
pushback(r);
}
}
flowend(g);
}
......@@ -628,9 +626,6 @@ gotit:
return 1;
}
static uchar *active;
static int nactive;
/*
* The idea is to remove redundant copies.
* v1->v2 F=0
......@@ -648,6 +643,7 @@ copyprop(Graph *g, Flow *r0)
{
Prog *p;
Adr *v1, *v2;
Flow *r;
if(debug['P'] && debug['v'])
print("copyprop %P\n", r0->prog);
......@@ -656,11 +652,8 @@ copyprop(Graph *g, Flow *r0)
v2 = &p->to;
if(copyas(v1, v2))
return 1;
if(nactive < g->num) {
nactive = g->num;
active = realloc(active, g->num);
}
memset(active, 0, g->num);
for(r=g->start; r!=nil; r=r->link)
r->active = 0;
return copy1(v1, v2, r0->s1, 0);
}
......@@ -670,12 +663,12 @@ copy1(Adr *v1, Adr *v2, Flow *r, int f)
int t;
Prog *p;
if(active[r->active]) {
if(r->active) {
if(debug['P'])
print("act set; return 1\n");
return 1;
}
active[r->active] = 1;
r->active = 1;
if(debug['P'])
print("copy %D->%D f=%d\n", v1, v2, f);
for(; r != nil; r = r->s1) {
......
......@@ -91,8 +91,6 @@ peep(Prog *firstp)
g = flowstart(firstp, sizeof(Flow));
if(g == nil)
return;
for(r=g->start, t=0; r!=nil; r=r->link, t++)
r->active = t;
// byte, word arithmetic elimination.
elimshortmov(g);
......@@ -426,9 +424,6 @@ gotit:
return 1;
}
static uchar *active;
static int nactive;
/*
* The idea is to remove redundant copies.
* v1->v2 F=0
......@@ -446,17 +441,15 @@ copyprop(Graph *g, Flow *r0)
{
Prog *p;
Adr *v1, *v2;
Flow *r;
p = r0->prog;
v1 = &p->from;
v2 = &p->to;
if(copyas(v1, v2))
return 1;
if(nactive < g->num) {
nactive = g->num;
active = realloc(active, g->num);
}
memset(active, 0, g->num);
for(r=g->start; r!=nil; r=r->link)
r->active = 0;
return copy1(v1, v2, r0->s1, 0);
}
......@@ -466,12 +459,12 @@ copy1(Adr *v1, Adr *v2, Flow *r, int f)
int t;
Prog *p;
if(active[r->active]) {
if(r->active) {
if(debug['P'])
print("act set; return 1\n");
return 1;
}
active[r->active] = 1;
r->active = 1;
if(debug['P'])
print("copy %D->%D f=%d\n", v1, v2, f);
for(; r != nil; r = r->s1) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment