Commit 3ef07c41 authored by Michael Munday's avatar Michael Munday

cmd, runtime: remove s390x 3 operand immediate logical ops

These are emulated by the assembler and we don't need them.

Change-Id: I2b07c5315a5b642fdb5e50b468453260ae121164
Reviewed-on: https://go-review.googlesource.com/31758Reviewed-by: default avatarBrad Fitzpatrick <bradfitz@golang.org>
parent 517a44d5
...@@ -809,12 +809,11 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { ...@@ -809,12 +809,11 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
// defer returns in R3: // defer returns in R3:
// 0 if we should continue executing // 0 if we should continue executing
// 1 if we should jump to deferreturn call // 1 if we should jump to deferreturn call
p := gc.Prog(s390x.AAND) p := gc.Prog(s390x.ACMPW)
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_REG
p.From.Offset = 0xFFFFFFFF p.From.Reg = s390x.REG_R3
p.Reg = s390x.REG_R3 p.To.Type = obj.TYPE_CONST
p.To.Type = obj.TYPE_REG p.To.Offset = 0
p.To.Reg = s390x.REG_R3
p = gc.Prog(s390x.ABNE) p = gc.Prog(s390x.ABNE)
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()}) s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
......
...@@ -161,7 +161,6 @@ var optab = []Optab{ ...@@ -161,7 +161,6 @@ var optab = []Optab{
Optab{AAND, C_REG, C_REG, C_NONE, C_REG, 6, 0}, Optab{AAND, C_REG, C_REG, C_NONE, C_REG, 6, 0},
Optab{AAND, C_REG, C_NONE, C_NONE, C_REG, 6, 0}, Optab{AAND, C_REG, C_NONE, C_NONE, C_REG, 6, 0},
Optab{AAND, C_LCON, C_NONE, C_NONE, C_REG, 23, 0}, Optab{AAND, C_LCON, C_NONE, C_NONE, C_REG, 23, 0},
Optab{AAND, C_LCON, C_REG, C_NONE, C_REG, 23, 0},
Optab{AAND, C_LOREG, C_NONE, C_NONE, C_REG, 12, 0}, Optab{AAND, C_LOREG, C_NONE, C_NONE, C_REG, 12, 0},
Optab{AAND, C_LAUTO, C_NONE, C_NONE, C_REG, 12, REGSP}, Optab{AAND, C_LAUTO, C_NONE, C_NONE, C_REG, 12, REGSP},
Optab{AANDW, C_REG, C_REG, C_NONE, C_REG, 6, 0}, Optab{AANDW, C_REG, C_REG, C_NONE, C_REG, 6, 0},
...@@ -3063,57 +3062,37 @@ func asmout(ctxt *obj.Link, asm *[]byte) { ...@@ -3063,57 +3062,37 @@ func asmout(ctxt *obj.Link, asm *[]byte) {
zRIE(_d, oprie, uint32(p.To.Reg), uint32(r), uint32(v), 0, 0, 0, 0, asm) zRIE(_d, oprie, uint32(p.To.Reg), uint32(r), uint32(v), 0, 0, 0, 0, asm)
} }
case 23: // 64-bit logical op $constant [reg] reg case 23: // 64-bit logical op $constant reg
// TODO(mundaym): remove the optional register and merge with case 24. // TODO(mundaym): merge with case 24.
v := vregoff(ctxt, &p.From) v := vregoff(ctxt, &p.From)
var opcode uint32 switch p.As {
r := p.Reg default:
if r == 0 { ctxt.Diag("%v is not supported", p)
r = p.To.Reg case AAND:
} if v >= 0 { // needs zero extend
if r == p.To.Reg { zRIL(_a, op_LGFI, REGTMP, uint32(v), asm)
switch p.As { zRRE(op_NGR, uint32(p.To.Reg), REGTMP, asm)
default: } else if int64(int16(v)) == v {
ctxt.Diag("%v is not supported", p) zRI(op_NILL, uint32(p.To.Reg), uint32(v), asm)
case AAND: } else { // r.To.Reg & 0xffffffff00000000 & uint32(v)
if v >= 0 { // needs zero extend zRIL(_a, op_NILF, uint32(p.To.Reg), uint32(v), asm)
zRIL(_a, op_LGFI, REGTMP, uint32(v), asm)
zRRE(op_NGR, uint32(p.To.Reg), REGTMP, asm)
} else if int64(int16(v)) == v {
zRI(op_NILL, uint32(p.To.Reg), uint32(v), asm)
} else { // r.To.Reg & 0xffffffff00000000 & uint32(v)
zRIL(_a, op_NILF, uint32(p.To.Reg), uint32(v), asm)
}
case AOR:
if int64(uint32(v)) != v { // needs sign extend
zRIL(_a, op_LGFI, REGTMP, uint32(v), asm)
zRRE(op_OGR, uint32(p.To.Reg), REGTMP, asm)
} else if int64(uint16(v)) == v {
zRI(op_OILL, uint32(p.To.Reg), uint32(v), asm)
} else {
zRIL(_a, op_OILF, uint32(p.To.Reg), uint32(v), asm)
}
case AXOR:
if int64(uint32(v)) != v { // needs sign extend
zRIL(_a, op_LGFI, REGTMP, uint32(v), asm)
zRRE(op_XGR, uint32(p.To.Reg), REGTMP, asm)
} else {
zRIL(_a, op_XILF, uint32(p.To.Reg), uint32(v), asm)
}
} }
} else { case AOR:
switch p.As { if int64(uint32(v)) != v { // needs sign extend
default: zRIL(_a, op_LGFI, REGTMP, uint32(v), asm)
ctxt.Diag("%v is not supported", p) zRRE(op_OGR, uint32(p.To.Reg), REGTMP, asm)
case AAND: } else if int64(uint16(v)) == v {
opcode = op_NGRK zRI(op_OILL, uint32(p.To.Reg), uint32(v), asm)
case AOR: } else {
opcode = op_OGRK zRIL(_a, op_OILF, uint32(p.To.Reg), uint32(v), asm)
case AXOR: }
opcode = op_XGRK case AXOR:
if int64(uint32(v)) != v { // needs sign extend
zRIL(_a, op_LGFI, REGTMP, uint32(v), asm)
zRRE(op_XGR, uint32(p.To.Reg), REGTMP, asm)
} else {
zRIL(_a, op_XILF, uint32(p.To.Reg), uint32(v), asm)
} }
zRIL(_a, op_LGFI, REGTMP, uint32(v), asm)
zRRF(opcode, uint32(r), 0, uint32(p.To.Reg), REGTMP, asm)
} }
case 24: // 32-bit logical op $constant reg case 24: // 32-bit logical op $constant reg
......
...@@ -918,12 +918,14 @@ notfoundr0: ...@@ -918,12 +918,14 @@ notfoundr0:
vectorimpl: vectorimpl:
//if the address is not 16byte aligned, use loop for the header //if the address is not 16byte aligned, use loop for the header
AND $15, R3, R8 MOVD R3, R8
AND $15, R8
CMPBGT R8, $0, notaligned CMPBGT R8, $0, notaligned
aligned: aligned:
ADD R6, R4, R8 ADD R6, R4, R8
AND $-16, R8, R7 MOVD R8, R7
AND $-16, R7
// replicate c across V17 // replicate c across V17
VLVGB $0, R5, V19 VLVGB $0, R5, V19
VREPB $0, V19, V17 VREPB $0, V19, V17
...@@ -944,7 +946,8 @@ vectorloop: ...@@ -944,7 +946,8 @@ vectorloop:
RET RET
notaligned: notaligned:
AND $-16, R3, R8 MOVD R3, R8
AND $-16, R8
ADD $16, R8 ADD $16, R8
notalignedloop: notalignedloop:
CMPBEQ R3, R8, aligned CMPBEQ R3, R8, aligned
......
...@@ -141,7 +141,8 @@ TEXT ·Or8(SB), NOSPLIT, $0-9 ...@@ -141,7 +141,8 @@ TEXT ·Or8(SB), NOSPLIT, $0-9
MOVD ptr+0(FP), R3 MOVD ptr+0(FP), R3
MOVBZ val+8(FP), R4 MOVBZ val+8(FP), R4
// Calculate shift. // Calculate shift.
AND $3, R3, R5 MOVD R3, R5
AND $3, R5
XOR $3, R5 // big endian - flip direction XOR $3, R5 // big endian - flip direction
SLD $3, R5 // MUL $8, R5 SLD $3, R5 // MUL $8, R5
SLD R5, R4 SLD R5, R4
...@@ -159,7 +160,8 @@ TEXT ·And8(SB), NOSPLIT, $0-9 ...@@ -159,7 +160,8 @@ TEXT ·And8(SB), NOSPLIT, $0-9
MOVD ptr+0(FP), R3 MOVD ptr+0(FP), R3
MOVBZ val+8(FP), R4 MOVBZ val+8(FP), R4
// Calculate shift. // Calculate shift.
AND $3, R3, R5 MOVD R3, R5
AND $3, R5
XOR $3, R5 // big endian - flip direction XOR $3, R5 // big endian - flip direction
SLD $3, R5 // MUL $8, R5 SLD $3, R5 // MUL $8, R5
OR $-256, R4 // create 0xffffffffffffffxx OR $-256, R4 // create 0xffffffffffffffxx
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment