Author: Maciej Fijalkowski <[email protected]>
Branch: arm64
Changeset: r96437:33f62f3022aa
Date: 2019-04-10 14:27 +0000
http://bitbucket.org/pypy/pypy/changeset/33f62f3022aa/
Log: implement a few more int operations
diff --git a/rpython/jit/backend/aarch64/assembler.py
b/rpython/jit/backend/aarch64/assembler.py
--- a/rpython/jit/backend/aarch64/assembler.py
+++ b/rpython/jit/backend/aarch64/assembler.py
@@ -776,22 +776,22 @@
def not_implemented(msg):
- msg = '[ARM/asm] %s\n' % msg
+ msg = '[ARM64/asm] %s\n' % msg
if we_are_translated():
llop.debug_print(lltype.Void, msg)
raise NotImplementedError(msg)
def notimplemented_op(self, op, arglocs):
- print "[ARM/asm] %s not implemented" % op.getopname()
+ print "[ARM64/asm] %s not implemented" % op.getopname()
raise NotImplementedError(op)
def notimplemented_comp_op(self, op, arglocs):
- print "[ARM/asm] %s not implemented" % op.getopname()
+ print "[ARM64/asm] %s not implemented" % op.getopname()
raise NotImplementedError(op)
def notimplemented_guard_op(self, op, fcond, arglocs):
- print "[ARM/asm] %s not implemented" % op.getopname()
+ print "[ARM64/asm] %s not implemented" % op.getopname()
raise NotImplementedError(op)
asm_operations = [notimplemented_op] * (rop._LAST + 1)
diff --git a/rpython/jit/backend/aarch64/codebuilder.py
b/rpython/jit/backend/aarch64/codebuilder.py
--- a/rpython/jit/backend/aarch64/codebuilder.py
+++ b/rpython/jit/backend/aarch64/codebuilder.py
@@ -110,6 +110,22 @@
base = 0b10001011000
self.write32((base << 21) | (rm << 16) | (rn << 5) | (rd))
+ def SUB_rr(self, rd, rn, rm):
+ base = 0b11001011001
+ self.write32((base << 21) | (rm << 16) | (0b11 << 13) | (rn << 5) |
(rd))
+
+ def MUL_rr(self, rd, rn, rm):
+ base = 0b10011011000
+ self.write32((base << 21) | (rm << 16) | (0b11111 << 10) | (rn << 5) |
rd)
+
+ def AND_rr(self, rd, rn, rm):
+ base = 0b10001010000
+ self.write32((base << 21) | (rm << 16) | (rn << 5) | rd)
+
+ def EOR_rr(self, rd, rn, rm):
+ base = 0b11001010000
+ self.write32((base << 21) | (rm << 16) | (rn << 5) | rd)
+
def CMP_rr(self, rn, rm):
base = 0b11101011000
self.write32((base << 21) | (rm << 16) | (rn << 5) | 0b11111)
diff --git a/rpython/jit/backend/aarch64/opassembler.py
b/rpython/jit/backend/aarch64/opassembler.py
--- a/rpython/jit/backend/aarch64/opassembler.py
+++ b/rpython/jit/backend/aarch64/opassembler.py
@@ -20,13 +20,7 @@
s = 1
else:
s = 0
- if l0.is_imm():
- value = l0.getint()
- assert value >= 0
- # reverse substract ftw
- XX
- self.mc.RSB_ri(res.value, l1.value, value)
- elif l1.is_imm():
+ if l1.is_imm():
value = l1.getint()
assert value >= 0
self.mc.SUB_ri(res.value, l0.value, value)
@@ -40,18 +34,33 @@
def int_add_impl(self, op, arglocs, ovfcheck=False):
l0, l1, res = arglocs
+ assert not l0.is_imm()
if ovfcheck:
XXX
s = 1
else:
s = 0
- if l0.is_imm():
- self.mc.ADD_ri(res.value, l1.value, l0.value)
- elif l1.is_imm():
+ if l1.is_imm():
self.mc.ADD_ri(res.value, l0.value, l1.value)
else:
self.mc.ADD_rr(res.value, l0.value, l1.value)
+ def emit_op_int_mul(self, op, arglocs):
+ reg1, reg2, res = arglocs
+ self.mc.MUL_rr(res.value, reg1.value, reg2.value)
+
+ def emit_op_int_and(self, op, arglocs):
+ l0, l1, res = arglocs
+ self.mc.AND_rr(res.value, l0.value, l1.value)
+
+ def emit_op_int_or(self, op, arglocs):
+ l0, l1, res = arglocs
+ self.mc.ORR_rr(res.value, l0.value, l1.value)
+
+ def emit_op_int_xor(self, op, arglocs):
+ l0, l1, res = arglocs
+ self.mc.EOR_rr(res.value, l0.value, l1.value)
+
def emit_int_comp_op(self, op, arglocs):
l0, l1 = arglocs
diff --git a/rpython/jit/backend/aarch64/regalloc.py
b/rpython/jit/backend/aarch64/regalloc.py
--- a/rpython/jit/backend/aarch64/regalloc.py
+++ b/rpython/jit/backend/aarch64/regalloc.py
@@ -298,7 +298,7 @@
self.free_temp_vars()
return [base_loc, value_loc]
- def prepare_op_int_add(self, op):
+ def prepare_int_ri(self, op):
boxes = op.getarglist()
a0, a1 = boxes
imm_a0 = check_imm_box(a0)
@@ -307,8 +307,8 @@
l0 = self.make_sure_var_in_reg(a0, boxes)
l1 = self.convert_to_imm(a1)
elif imm_a0 and not imm_a1:
- l0 = self.convert_to_imm(a0)
- l1 = self.make_sure_var_in_reg(a1, boxes)
+ l1 = self.convert_to_imm(a0)
+ l0 = self.make_sure_var_in_reg(a1, boxes)
else:
l0 = self.make_sure_var_in_reg(a0, boxes)
l1 = self.make_sure_var_in_reg(a1, boxes)
@@ -316,7 +316,38 @@
res = self.force_allocate_reg(op)
return [l0, l1, res]
- prepare_op_int_sub = prepare_op_int_add
+ prepare_op_int_add = prepare_int_ri
+
+ def prepare_op_int_sub(self, op):
+ boxes = op.getarglist()
+ a0, a1 = boxes
+ imm_a1 = check_imm_box(a1)
+ if imm_a1:
+ l0 = self.make_sure_var_in_reg(a0, boxes)
+ l1 = self.convert_to_imm(a1)
+ else:
+ l0 = self.make_sure_var_in_reg(a0, boxes)
+ l1 = self.make_sure_var_in_reg(a1, boxes)
+ self.possibly_free_vars_for_op(op)
+ res = self.force_allocate_reg(op)
+ return [l0, l1, res]
+
+ def prepare_op_int_mul(self, op):
+ boxes = op.getarglist()
+ a0, a1 = boxes
+
+ reg1 = self.make_sure_var_in_reg(a0, forbidden_vars=boxes)
+ reg2 = self.make_sure_var_in_reg(a1, forbidden_vars=boxes)
+
+ self.possibly_free_vars(boxes)
+ self.possibly_free_vars_for_op(op)
+ res = self.force_allocate_reg(op)
+ self.possibly_free_var(op)
+ return [reg1, reg2, res]
+
+ prepare_op_int_and = prepare_op_int_mul
+ prepare_op_int_or = prepare_op_int_mul
+ prepare_op_int_xor = prepare_op_int_mul
def prepare_int_cmp(self, op, res_in_cc):
boxes = op.getarglist()
_______________________________________________
pypy-commit mailing list
[email protected]
https://mail.python.org/mailman/listinfo/pypy-commit