Author: Maciej Fijalkowski <[email protected]>
Branch: arm64
Changeset: r96446:802c95aa855a
Date: 2019-04-11 11:37 +0000
http://bitbucket.org/pypy/pypy/changeset/802c95aa855a/
Log: more int operations
diff --git a/rpython/jit/backend/aarch64/codebuilder.py
b/rpython/jit/backend/aarch64/codebuilder.py
--- a/rpython/jit/backend/aarch64/codebuilder.py
+++ b/rpython/jit/backend/aarch64/codebuilder.py
@@ -118,10 +118,26 @@
base = 0b10011011000
self.write32((base << 21) | (rm << 16) | (0b11111 << 10) | (rn << 5) |
rd)
+ def UMULH_rr(self, rd, rn, rm):
+ base = 0b10011011110
+ self.write32((base << 21) | (rm << 16) | (0b11111 << 10) | (rn << 5) |
rd)
+
def AND_rr(self, rd, rn, rm):
base = 0b10001010000
self.write32((base << 21) | (rm << 16) | (rn << 5) | rd)
+ def LSL_rr(self, rd, rn, rm):
+ base = 0b10011010110
+ self.write32((base << 21) | (rm << 16) | (0b001000 << 10) | (rn << 5)
| rd)
+
+ def ASR_rr(self, rd, rn, rm):
+ base = 0b10011010110
+ self.write32((base << 21) | (rm << 16) | (0b001010 << 10) | (rn << 5)
| rd)
+
+ def LSR_rr(self, rd, rn, rm):
+ base = 0b10011010110
+ self.write32((base << 21) | (rm << 16) | (0b001001 << 10) | (rn << 5)
| rd)
+
def EOR_rr(self, rd, rn, rm):
base = 0b11001010000
self.write32((base << 21) | (rm << 16) | (rn << 5) | rd)
@@ -135,6 +151,11 @@
assert 0 <= imm <= 4095
self.write32((base << 22) | (imm << 10) | (rn << 5) | 0b11111)
+ def CSET_r_flag(self, rd, cond):
+ base = 0b10011010100
+ self.write32((base << 21) | (0b11111 << 16) | (cond << 12) | (1 << 10)
|
+ (0b11111 << 5) | rd)
+
def NOP(self):
self.write32(0b11010101000000110010000000011111)
diff --git a/rpython/jit/backend/aarch64/opassembler.py
b/rpython/jit/backend/aarch64/opassembler.py
--- a/rpython/jit/backend/aarch64/opassembler.py
+++ b/rpython/jit/backend/aarch64/opassembler.py
@@ -8,6 +8,17 @@
from rpython.jit.backend.llsupport.gcmap import allocate_gcmap
from rpython.jit.metainterp.history import TargetToken
+def gen_comp_op(name, flag):
+ def emit_op(self, op, arglocs):
+ l0, l1, res = arglocs
+
+ if l1.is_imm():
+ self.mc.CMP_ri(l0.value, l1.getint())
+ else:
+ self.mc.CMP_rr(l0.value, l1.value)
+ self.mc.CSET_r_flag(res.value, c.get_opposite_of(flag))
+ emit_op.__name__ = name
+ return emit_op
class ResOpAssembler(BaseAssembler):
def emit_op_int_add(self, op, arglocs):
@@ -61,12 +72,27 @@
l0, l1, res = arglocs
self.mc.EOR_rr(res.value, l0.value, l1.value)
+ def emit_op_int_lshift(self, op, arglocs):
+ l0, l1, res = arglocs
+ self.mc.LSL_rr(res.value, l0.value, l1.value)
+
+ def emit_op_int_rshift(self, op, arglocs):
+ l0, l1, res = arglocs
+ self.mc.ASR_rr(res.value, l0.value, l1.value)
+
+ def emit_op_uint_rshift(self, op, arglocs):
+ l0, l1, res = arglocs
+ self.mc.LSR_rr(res.value, l0.value, l1.value)
+
+ def emit_op_uint_mul_high(self, op, arglocs):
+ l0, l1, res = arglocs
+ self.mc.UMULH_rr(res.value, l0.value, l1.value)
+
def emit_int_comp_op(self, op, arglocs):
l0, l1 = arglocs
if l1.is_imm():
- xxx
- self.mc.CMP_ri(l0.value, imm=l1.getint(), cond=fcond)
+ self.mc.CMP_ri(l0.value, l1.getint())
else:
self.mc.CMP_rr(l0.value, l1.value)
@@ -82,6 +108,13 @@
self.emit_int_comp_op(op, arglocs)
return c.EQ
+ emit_op_int_lt = gen_comp_op('emit_op_int_lt', c.LT)
+ emit_op_int_le = gen_comp_op('emit_op_int_le', c.LE)
+ emit_op_int_gt = gen_comp_op('emit_op_int_gt', c.GT)
+ emit_op_int_ge = gen_comp_op('emit_op_int_ge', c.GE)
+ emit_op_int_eq = gen_comp_op('emit_op_int_eq', c.EQ)
+ emit_op_int_ne = gen_comp_op('emit_op_int_ne', c.NE)
+
def emit_op_increment_debug_counter(self, op, arglocs):
return # XXXX
base_loc, value_loc = arglocs
diff --git a/rpython/jit/backend/aarch64/regalloc.py
b/rpython/jit/backend/aarch64/regalloc.py
--- a/rpython/jit/backend/aarch64/regalloc.py
+++ b/rpython/jit/backend/aarch64/regalloc.py
@@ -345,14 +345,20 @@
self.possibly_free_var(op)
return [reg1, reg2, res]
+ # some of those have forms of imm that they accept, but they're rather
+ # obscure. Can be future optimization
prepare_op_int_and = prepare_op_int_mul
prepare_op_int_or = prepare_op_int_mul
prepare_op_int_xor = prepare_op_int_mul
+ prepare_op_int_lshift = prepare_op_int_mul
+ prepare_op_int_rshift = prepare_op_int_mul
+ prepare_op_uint_rshift = prepare_op_int_mul
+ prepare_op_uint_mul_high = prepare_op_int_mul
def prepare_int_cmp(self, op, res_in_cc):
boxes = op.getarglist()
arg0, arg1 = boxes
- imm_a1 = False # XXX check_imm_box(arg1)
+ imm_a1 = check_imm_box(arg1)
l0 = self.make_sure_var_in_reg(arg0, forbidden_vars=boxes)
if imm_a1:
@@ -363,7 +369,7 @@
self.possibly_free_vars_for_op(op)
self.free_temp_vars()
if not res_in_cc:
- res = self.force_allocate_reg_or_cc(op)
+ res = self.force_allocate_reg(op)
return [l0, l1, res]
return [l0, l1]
@@ -374,6 +380,12 @@
def prepare_op_int_le(self, op):
return self.prepare_int_cmp(op, False)
+ prepare_op_int_lt = prepare_op_int_le
+ prepare_op_int_gt = prepare_op_int_le
+ prepare_op_int_ge = prepare_op_int_le
+ prepare_op_int_eq = prepare_op_int_le
+ prepare_op_int_ne = prepare_op_int_le
+
def prepare_op_label(self, op):
descr = op.getdescr()
assert isinstance(descr, TargetToken)
_______________________________________________
pypy-commit mailing list
[email protected]
https://mail.python.org/mailman/listinfo/pypy-commit