Commit 8afb3967 authored by Russ Cox's avatar Russ Cox

cmd/8g, cmd/old8a: stop renaming x86 import to i386

Change-Id: If2872e73da4daa4ff1912883d30c8fc9754ef552
Reviewed-on: https://go-review.googlesource.com/6894Reviewed-by: default avatarRob Pike <r@golang.org>
parent 818eff03
......@@ -7,7 +7,7 @@ package main
import (
"cmd/internal/gc"
"cmd/internal/obj"
i386 "cmd/internal/obj/x86"
"cmd/internal/obj/x86"
"fmt"
)
......@@ -272,7 +272,7 @@ func cgen(n *gc.Node, res *gc.Node) {
gc.OMUL:
a = optoas(int(n.Op), nl.Type)
if a == i386.AIMULB {
if a == x86.AIMULB {
cgen_bmul(int(n.Op), nl, nr, res)
break
}
......@@ -331,7 +331,7 @@ func cgen(n *gc.Node, res *gc.Node) {
if gc.Isconst(nl, gc.CTSTR) {
var n1 gc.Node
regalloc(&n1, gc.Types[gc.Tptr], res)
p1 := gins(i386.ALEAL, nil, &n1)
p1 := gins(x86.ALEAL, nil, &n1)
gc.Datastring(nl.Val.U.Sval, &p1.From)
gmove(&n1, res)
regfree(&n1)
......@@ -529,9 +529,9 @@ func igenindex(n *gc.Node, res *gc.Node, bounded int) *obj.Prog {
var zero gc.Node
gc.Nodconst(&zero, gc.Types[gc.TINT32], 0)
gins(i386.ACMPL, &hi, &zero)
gins(x86.ACMPL, &hi, &zero)
splitclean()
return gc.Gbranch(i386.AJNE, nil, +1)
return gc.Gbranch(x86.AJNE, nil, +1)
}
/*
......@@ -565,7 +565,7 @@ func agen(n *gc.Node, res *gc.Node) {
clearfat(&n1)
var n2 gc.Node
regalloc(&n2, gc.Types[gc.Tptr], res)
gins(i386.ALEAL, &n1, &n2)
gins(x86.ALEAL, &n1, &n2)
gmove(&n2, res)
regfree(&n2)
return
......@@ -578,7 +578,7 @@ func agen(n *gc.Node, res *gc.Node) {
}
var n1 gc.Node
regalloc(&n1, gc.Types[gc.Tptr], res)
gins(i386.ALEAL, n, &n1)
gins(x86.ALEAL, n, &n1)
gmove(&n1, res)
regfree(&n1)
return
......@@ -746,7 +746,7 @@ func agen(n *gc.Node, res *gc.Node) {
if gc.Isconst(nl, gc.CTSTR) {
regalloc(&n3, gc.Types[gc.Tptr], res)
p1 := gins(i386.ALEAL, nil, &n3)
p1 := gins(x86.ALEAL, nil, &n3)
gc.Datastring(nl.Val.U.Sval, &p1.From)
p1.From.Scale = 1
p1.From.Index = n2.Val.U.Reg
......@@ -769,7 +769,7 @@ func agen(n *gc.Node, res *gc.Node) {
} else // nothing to do
if w == 1 || w == 2 || w == 4 || w == 8 {
// LEAL (n3)(n2*w), n3
p1 := gins(i386.ALEAL, &n2, &n3)
p1 := gins(x86.ALEAL, &n2, &n3)
p1.From.Scale = int16(w)
p1.From.Type = obj.TYPE_MEM
......@@ -858,7 +858,7 @@ func igen(n *gc.Node, a *gc.Node, res *gc.Node) {
// Increase the refcount of the register so that igen's caller
// has to call regfree.
case gc.OINDREG:
if n.Val.U.Reg != i386.REG_SP {
if n.Val.U.Reg != x86.REG_SP {
reg[n.Val.U.Reg]++
}
*a = *n
......@@ -914,7 +914,7 @@ func igen(n *gc.Node, a *gc.Node, res *gc.Node) {
fp := gc.Structfirst(&flist, gc.Getoutarg(n.Left.Type))
*a = gc.Node{}
a.Op = gc.OINDREG
a.Val.U.Reg = i386.REG_SP
a.Val.U.Reg = x86.REG_SP
a.Addable = 1
a.Xoffset = fp.Width
a.Type = n.Type
......@@ -1030,9 +1030,9 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
var n1 gc.Node
gc.Nodconst(&n1, n.Type, 0)
gins(optoas(gc.OCMP, n.Type), n, &n1)
a := i386.AJNE
a := x86.AJNE
if !true_ {
a = i386.AJEQ
a = x86.AJEQ
}
gc.Patch(gc.Gbranch(a, n.Type, likely), to)
return
......@@ -1228,9 +1228,9 @@ def:
var n2 gc.Node
gc.Nodconst(&n2, n.Type, 0)
gins(optoas(gc.OCMP, n.Type), &n1, &n2)
a := i386.AJNE
a := x86.AJNE
if !true_ {
a = i386.AJEQ
a = x86.AJEQ
}
gc.Patch(gc.Gbranch(a, n.Type, likely), to)
regfree(&n1)
......@@ -1355,9 +1355,9 @@ func sgen(n *gc.Node, res *gc.Node, w int64) {
}
var dst gc.Node
gc.Nodreg(&dst, gc.Types[gc.Tptr], i386.REG_DI)
gc.Nodreg(&dst, gc.Types[gc.Tptr], x86.REG_DI)
var src gc.Node
gc.Nodreg(&src, gc.Types[gc.Tptr], i386.REG_SI)
gc.Nodreg(&src, gc.Types[gc.Tptr], x86.REG_SI)
var tsrc gc.Node
gc.Tempname(&tsrc, gc.Types[gc.Tptr])
......@@ -1392,40 +1392,40 @@ func sgen(n *gc.Node, res *gc.Node, w int64) {
// the src and dst overlap, then reverse direction
if osrc < odst && int64(odst) < int64(osrc)+w {
// reverse direction
gins(i386.ASTD, nil, nil) // set direction flag
gins(x86.ASTD, nil, nil) // set direction flag
if c > 0 {
gconreg(i386.AADDL, w-1, i386.REG_SI)
gconreg(i386.AADDL, w-1, i386.REG_DI)
gconreg(x86.AADDL, w-1, x86.REG_SI)
gconreg(x86.AADDL, w-1, x86.REG_DI)
gconreg(i386.AMOVL, int64(c), i386.REG_CX)
gins(i386.AREP, nil, nil) // repeat
gins(i386.AMOVSB, nil, nil) // MOVB *(SI)-,*(DI)-
gconreg(x86.AMOVL, int64(c), x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.AMOVSB, nil, nil) // MOVB *(SI)-,*(DI)-
}
if q > 0 {
if c > 0 {
gconreg(i386.AADDL, -3, i386.REG_SI)
gconreg(i386.AADDL, -3, i386.REG_DI)
gconreg(x86.AADDL, -3, x86.REG_SI)
gconreg(x86.AADDL, -3, x86.REG_DI)
} else {
gconreg(i386.AADDL, w-4, i386.REG_SI)
gconreg(i386.AADDL, w-4, i386.REG_DI)
gconreg(x86.AADDL, w-4, x86.REG_SI)
gconreg(x86.AADDL, w-4, x86.REG_DI)
}
gconreg(i386.AMOVL, int64(q), i386.REG_CX)
gins(i386.AREP, nil, nil) // repeat
gins(i386.AMOVSL, nil, nil) // MOVL *(SI)-,*(DI)-
gconreg(x86.AMOVL, int64(q), x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.AMOVSL, nil, nil) // MOVL *(SI)-,*(DI)-
}
// we leave with the flag clear
gins(i386.ACLD, nil, nil)
gins(x86.ACLD, nil, nil)
} else {
gins(i386.ACLD, nil, nil) // paranoia. TODO(rsc): remove?
gins(x86.ACLD, nil, nil) // paranoia. TODO(rsc): remove?
// normal direction
if q > 128 || (q >= 4 && gc.Nacl) {
gconreg(i386.AMOVL, int64(q), i386.REG_CX)
gins(i386.AREP, nil, nil) // repeat
gins(i386.AMOVSL, nil, nil) // MOVL *(SI)+,*(DI)+
gconreg(x86.AMOVL, int64(q), x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.AMOVSL, nil, nil) // MOVL *(SI)+,*(DI)+
} else if q >= 4 {
p := gins(obj.ADUFFCOPY, nil, nil)
p.To.Type = obj.TYPE_ADDR
......@@ -1435,7 +1435,7 @@ func sgen(n *gc.Node, res *gc.Node, w int64) {
p.To.Offset = 10 * (128 - int64(q))
} else if !gc.Nacl && c == 0 {
var cx gc.Node
gc.Nodreg(&cx, gc.Types[gc.TINT32], i386.REG_CX)
gc.Nodreg(&cx, gc.Types[gc.TINT32], x86.REG_CX)
// We don't need the MOVSL side-effect of updating SI and DI,
// and issuing a sequence of MOVLs directly is faster.
......@@ -1451,13 +1451,13 @@ func sgen(n *gc.Node, res *gc.Node, w int64) {
}
} else {
for q > 0 {
gins(i386.AMOVSL, nil, nil) // MOVL *(SI)+,*(DI)+
gins(x86.AMOVSL, nil, nil) // MOVL *(SI)+,*(DI)+
q--
}
}
for c > 0 {
gins(i386.AMOVSB, nil, nil) // MOVB *(SI)+,*(DI)+
gins(x86.AMOVSB, nil, nil) // MOVB *(SI)+,*(DI)+
c--
}
}
......
This diff is collapsed.
......@@ -7,14 +7,14 @@ package main
import (
"cmd/internal/gc"
"cmd/internal/obj"
i386 "cmd/internal/obj/x86"
"cmd/internal/obj/x86"
)
var thechar int = '8'
var thestring string = "386"
var thelinkarch *obj.LinkArch = &i386.Link386
var thelinkarch *obj.LinkArch = &x86.Link386
func linkarchinit() {
}
......@@ -43,8 +43,8 @@ func main() {
gc.Thearch.Thestring = thestring
gc.Thearch.Thelinkarch = thelinkarch
gc.Thearch.Typedefs = typedefs
gc.Thearch.REGSP = i386.REGSP
gc.Thearch.REGCTXT = i386.REGCTXT
gc.Thearch.REGSP = x86.REGSP
gc.Thearch.REGCTXT = x86.REGCTXT
gc.Thearch.MAXWIDTH = MAXWIDTH
gc.Thearch.Anyregalloc = anyregalloc
gc.Thearch.Betypeinit = betypeinit
......
......@@ -4,7 +4,7 @@
package main
import i386 "cmd/internal/obj/x86"
import "cmd/internal/obj/x86"
import "cmd/internal/gc"
// TODO(rsc):
......@@ -21,7 +21,7 @@ const (
Fpop2 = 1 << 2
)
var reg [i386.MAXREG]uint8
var reg [x86.MAXREG]uint8
var panicdiv *gc.Node
......
This diff is collapsed.
This diff is collapsed.
......@@ -33,13 +33,13 @@ package main
import (
"cmd/internal/gc"
"cmd/internal/obj"
i386 "cmd/internal/obj/x86"
"cmd/internal/obj/x86"
"fmt"
)
const (
REGEXT = 0
exregoffset = i386.REG_DI
exregoffset = x86.REG_DI
)
var gactive uint32
......@@ -100,20 +100,20 @@ func peep(firstp *obj.Prog) {
for r := g.Start; r != nil; r = r.Link {
p = r.Prog
switch p.As {
case i386.ALEAL:
case x86.ALEAL:
if regtyp(&p.To) {
if p.From.Sym != nil {
if p.From.Index == i386.REG_NONE {
if p.From.Index == x86.REG_NONE {
conprop(r)
}
}
}
case i386.AMOVB,
i386.AMOVW,
i386.AMOVL,
i386.AMOVSS,
i386.AMOVSD:
case x86.AMOVB,
x86.AMOVW,
x86.AMOVL,
x86.AMOVSS,
x86.AMOVSD:
if regtyp(&p.To) {
if p.From.Type == obj.TYPE_CONST || p.From.Type == obj.TYPE_FCONST {
conprop(r)
......@@ -135,9 +135,9 @@ loop1:
for r = g.Start; r != nil; r = r.Link {
p = r.Prog
switch p.As {
case i386.AMOVL,
i386.AMOVSS,
i386.AMOVSD:
case x86.AMOVL,
x86.AMOVSS,
x86.AMOVSD:
if regtyp(&p.To) {
if regtyp(&p.From) {
if copyprop(g, r) {
......@@ -150,66 +150,66 @@ loop1:
}
}
case i386.AMOVBLZX,
i386.AMOVWLZX,
i386.AMOVBLSX,
i386.AMOVWLSX:
case x86.AMOVBLZX,
x86.AMOVWLZX,
x86.AMOVBLSX,
x86.AMOVWLSX:
if regtyp(&p.To) {
r1 = rnops(gc.Uniqs(r))
if r1 != nil {
p1 = r1.Prog
if p.As == p1.As && p.To.Type == p1.From.Type && p.To.Reg == p1.From.Reg {
p1.As = i386.AMOVL
p1.As = x86.AMOVL
t++
}
}
}
case i386.AADDL,
i386.AADDW:
case x86.AADDL,
x86.AADDW:
if p.From.Type != obj.TYPE_CONST || needc(p.Link) {
break
}
if p.From.Offset == -1 {
if p.As == i386.AADDL {
p.As = i386.ADECL
if p.As == x86.AADDL {
p.As = x86.ADECL
} else {
p.As = i386.ADECW
p.As = x86.ADECW
}
p.From = obj.Addr{}
break
}
if p.From.Offset == 1 {
if p.As == i386.AADDL {
p.As = i386.AINCL
if p.As == x86.AADDL {
p.As = x86.AINCL
} else {
p.As = i386.AINCW
p.As = x86.AINCW
}
p.From = obj.Addr{}
break
}
case i386.ASUBL,
i386.ASUBW:
case x86.ASUBL,
x86.ASUBW:
if p.From.Type != obj.TYPE_CONST || needc(p.Link) {
break
}
if p.From.Offset == -1 {
if p.As == i386.ASUBL {
p.As = i386.AINCL
if p.As == x86.ASUBL {
p.As = x86.AINCL
} else {
p.As = i386.AINCW
p.As = x86.AINCW
}
p.From = obj.Addr{}
break
}
if p.From.Offset == 1 {
if p.As == i386.ASUBL {
p.As = i386.ADECL
if p.As == x86.ASUBL {
p.As = x86.ADECL
} else {
p.As = i386.ADECW
p.As = x86.ADECW
}
p.From = obj.Addr{}
break
......@@ -228,10 +228,10 @@ loop1:
// the processor can do better if we do moves using both.
for r := g.Start; r != nil; r = r.Link {
p = r.Prog
if p.As == i386.AMOVSD {
if p.As == x86.AMOVSD {
if regtyp(&p.From) {
if regtyp(&p.To) {
p.As = i386.AMOVAPD
p.As = x86.AMOVAPD
}
}
}
......@@ -252,7 +252,7 @@ func excise(r *gc.Flow) {
}
func regtyp(a *obj.Addr) bool {
return a.Type == obj.TYPE_REG && (i386.REG_AX <= a.Reg && a.Reg <= i386.REG_DI || i386.REG_X0 <= a.Reg && a.Reg <= i386.REG_X7)
return a.Type == obj.TYPE_REG && (x86.REG_AX <= a.Reg && a.Reg <= x86.REG_DI || x86.REG_X0 <= a.Reg && a.Reg <= x86.REG_X7)
}
// movb elimination.
......@@ -269,21 +269,21 @@ func elimshortmov(g *gc.Graph) {
p = r.Prog
if regtyp(&p.To) {
switch p.As {
case i386.AINCB,
i386.AINCW:
p.As = i386.AINCL
case x86.AINCB,
x86.AINCW:
p.As = x86.AINCL
case i386.ADECB,
i386.ADECW:
p.As = i386.ADECL
case x86.ADECB,
x86.ADECW:
p.As = x86.ADECL
case i386.ANEGB,
i386.ANEGW:
p.As = i386.ANEGL
case x86.ANEGB,
x86.ANEGW:
p.As = x86.ANEGL
case i386.ANOTB,
i386.ANOTW:
p.As = i386.ANOTL
case x86.ANOTB,
x86.ANOTW:
p.As = x86.ANOTL
}
if regtyp(&p.From) || p.From.Type == obj.TYPE_CONST {
......@@ -292,54 +292,54 @@ func elimshortmov(g *gc.Graph) {
// we don't switch to 32-bit arithmetic if it can
// change how the carry bit is set (and the carry bit is needed).
switch p.As {
case i386.AMOVB,
i386.AMOVW:
p.As = i386.AMOVL
case x86.AMOVB,
x86.AMOVW:
p.As = x86.AMOVL
case i386.AADDB,
i386.AADDW:
case x86.AADDB,
x86.AADDW:
if !needc(p.Link) {
p.As = i386.AADDL
p.As = x86.AADDL
}
case i386.ASUBB,
i386.ASUBW:
case x86.ASUBB,
x86.ASUBW:
if !needc(p.Link) {
p.As = i386.ASUBL
p.As = x86.ASUBL
}
case i386.AMULB,
i386.AMULW:
p.As = i386.AMULL
case x86.AMULB,
x86.AMULW:
p.As = x86.AMULL
case i386.AIMULB,
i386.AIMULW:
p.As = i386.AIMULL
case x86.AIMULB,
x86.AIMULW:
p.As = x86.AIMULL
case i386.AANDB,
i386.AANDW:
p.As = i386.AANDL
case x86.AANDB,
x86.AANDW:
p.As = x86.AANDL
case i386.AORB,
i386.AORW:
p.As = i386.AORL
case x86.AORB,
x86.AORW:
p.As = x86.AORL
case i386.AXORB,
i386.AXORW:
p.As = i386.AXORL
case x86.AXORB,
x86.AXORW:
p.As = x86.AXORL
case i386.ASHLB,
i386.ASHLW:
p.As = i386.ASHLL
case x86.ASHLB,
x86.ASHLW:
p.As = x86.ASHLL
}
} else {
// explicit zero extension
switch p.As {
case i386.AMOVB:
p.As = i386.AMOVBLZX
case x86.AMOVB:
p.As = x86.AMOVBLZX
case i386.AMOVW:
p.As = i386.AMOVWLZX
case x86.AMOVW:
p.As = x86.AMOVWLZX
}
}
}
......@@ -581,7 +581,7 @@ func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
if REGEXT != 0 /*TypeKind(100016)*/ && v.Type == obj.TYPE_REG && v.Reg <= REGEXT && v.Reg > exregoffset {
return 2
}
if i386.REGARG >= 0 && v.Type == obj.TYPE_REG && v.Reg == i386.REGARG {
if x86.REGARG >= 0 && v.Type == obj.TYPE_REG && v.Reg == x86.REGARG {
return 2
}
if v.Type == p.From.Type && v.Reg == p.From.Reg {
......@@ -601,7 +601,7 @@ func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
return 3
case obj.ATEXT:
if i386.REGARG >= 0 && v.Type == obj.TYPE_REG && v.Reg == i386.REGARG {
if x86.REGARG >= 0 && v.Type == obj.TYPE_REG && v.Reg == x86.REGARG {
return 3
}
return 0
......@@ -666,10 +666,10 @@ func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
* semantics
*/
func copyas(a *obj.Addr, v *obj.Addr) bool {
if i386.REG_AL <= a.Reg && a.Reg <= i386.REG_BL {
if x86.REG_AL <= a.Reg && a.Reg <= x86.REG_BL {
gc.Fatal("use of byte register")
}
if i386.REG_AL <= v.Reg && v.Reg <= i386.REG_BL {
if x86.REG_AL <= v.Reg && v.Reg <= x86.REG_BL {
gc.Fatal("use of byte register")
}
......@@ -728,7 +728,7 @@ func copyau(a *obj.Addr, v *obj.Addr) bool {
func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f int) int {
if copyas(a, v) {
reg := int(s.Reg)
if reg >= i386.REG_AX && reg <= i386.REG_DI || reg >= i386.REG_X0 && reg <= i386.REG_X7 {
if reg >= x86.REG_AX && reg <= x86.REG_DI || reg >= x86.REG_X0 && reg <= x86.REG_X7 {
if f != 0 {
a.Reg = int16(reg)
}
......@@ -740,7 +740,7 @@ func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f int) int {
if regtyp(v) {
reg := int(v.Reg)
if a.Type == obj.TYPE_MEM && int(a.Reg) == reg {
if (s.Reg == i386.REG_BP) && a.Index != obj.TYPE_NONE {
if (s.Reg == x86.REG_BP) && a.Index != obj.TYPE_NONE {
return 1 /* can't use BP-base with index */
}
if f != 0 {
......@@ -813,9 +813,9 @@ loop:
}
func smallindir(a *obj.Addr, reg *obj.Addr) bool {
return regtyp(reg) && a.Type == obj.TYPE_MEM && a.Reg == reg.Reg && a.Index == i386.REG_NONE && 0 <= a.Offset && a.Offset < 4096
return regtyp(reg) && a.Type == obj.TYPE_MEM && a.Reg == reg.Reg && a.Index == x86.REG_NONE && 0 <= a.Offset && a.Offset < 4096
}
func stackaddr(a *obj.Addr) bool {
return a.Type == obj.TYPE_REG && a.Reg == i386.REG_SP
return a.Type == obj.TYPE_REG && a.Reg == x86.REG_SP
}
This diff is collapsed.
......@@ -30,7 +30,7 @@
package main
import i386 "cmd/internal/obj/x86"
import "cmd/internal/obj/x86"
import "cmd/internal/gc"
const (
......@@ -62,28 +62,28 @@ func regnames(n *int) []string {
}
func excludedregs() uint64 {
return RtoB(i386.REG_SP)
return RtoB(x86.REG_SP)
}
func doregbits(r int) uint64 {
b := uint64(0)
if r >= i386.REG_AX && r <= i386.REG_DI {
if r >= x86.REG_AX && r <= x86.REG_DI {
b |= RtoB(r)
} else if r >= i386.REG_AL && r <= i386.REG_BL {
b |= RtoB(r - i386.REG_AL + i386.REG_AX)
} else if r >= i386.REG_AH && r <= i386.REG_BH {
b |= RtoB(r - i386.REG_AH + i386.REG_AX)
} else if r >= i386.REG_X0 && r <= i386.REG_X0+7 {
} else if r >= x86.REG_AL && r <= x86.REG_BL {
b |= RtoB(r - x86.REG_AL + x86.REG_AX)
} else if r >= x86.REG_AH && r <= x86.REG_BH {
b |= RtoB(r - x86.REG_AH + x86.REG_AX)
} else if r >= x86.REG_X0 && r <= x86.REG_X0+7 {
b |= FtoB(r)
}
return b
}
func RtoB(r int) uint64 {
if r < i386.REG_AX || r > i386.REG_DI {
if r < x86.REG_AX || r > x86.REG_DI {
return 0
}
return 1 << uint(r-i386.REG_AX)
return 1 << uint(r-x86.REG_AX)
}
func BtoR(b uint64) int {
......@@ -91,14 +91,14 @@ func BtoR(b uint64) int {
if b == 0 {
return 0
}
return gc.Bitno(b) + i386.REG_AX
return gc.Bitno(b) + x86.REG_AX
}
func FtoB(f int) uint64 {
if f < i386.REG_X0 || f > i386.REG_X7 {
if f < x86.REG_X0 || f > x86.REG_X7 {
return 0
}
return 1 << uint(f-i386.REG_X0+8)
return 1 << uint(f-x86.REG_X0+8)
}
func BtoF(b uint64) int {
......@@ -106,5 +106,5 @@ func BtoF(b uint64) int {
if b == 0 {
return 0
}
return gc.Bitno(b) - 8 + i386.REG_X0
return gc.Bitno(b) - 8 + x86.REG_X0
}
......@@ -131,8 +131,7 @@ func bootstrapFixImports(text, srcFile string) string {
continue
}
if strings.HasPrefix(line, `import "`) || strings.HasPrefix(line, `import . "`) ||
inBlock && (strings.HasPrefix(line, "\t\"") || strings.HasPrefix(line, "\t. \"")) ||
strings.Contains(line, `i386 "cmd/internal/obj/x86"`) {
inBlock && (strings.HasPrefix(line, "\t\"") || strings.HasPrefix(line, "\t. \"")) {
lines[i] = strings.Replace(line, `"cmd/`, `"bootstrap/`, -1)
}
}
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment