Author: Maciej Fijalkowski <[email protected]>
Branch: arm64
Changeset: r96029:5cb0444d17a2
Date: 2019-02-16 14:41 +0000
http://bitbucket.org/pypy/pypy/changeset/5cb0444d17a2/
Log: add CMP_rr and push forward to next test
diff --git a/rpython/jit/backend/aarch64/TODO b/rpython/jit/backend/aarch64/TODO
new file mode 100644
--- /dev/null
+++ b/rpython/jit/backend/aarch64/TODO
@@ -0,0 +1,2 @@
+* int_add - IMM
+* int_cmp - IMM
diff --git a/rpython/jit/backend/aarch64/assembler.py
b/rpython/jit/backend/aarch64/assembler.py
--- a/rpython/jit/backend/aarch64/assembler.py
+++ b/rpython/jit/backend/aarch64/assembler.py
@@ -5,8 +5,8 @@
#from rpython.jit.backend.arm.helper.regalloc import VMEM_imm_size
from rpython.jit.backend.aarch64.opassembler import ResOpAssembler
from rpython.jit.backend.aarch64.regalloc import (Regalloc,
+ operations as regalloc_operations, guard_operations, comp_operations)
# CoreRegisterManager, check_imm_arg, VFPRegisterManager,
- operations as regalloc_operations)
#from rpython.jit.backend.arm import callbuilder
from rpython.jit.backend.aarch64 import registers as r
from rpython.jit.backend.llsupport import jitframe
@@ -276,6 +276,12 @@
regalloc.possibly_free_vars_for_op(op)
elif not we_are_translated() and op.getopnum() == rop.FORCE_SPILL:
regalloc.prepare_force_spill(op)
+ elif i < len(operations) - 1 and
regalloc.next_op_can_accept_cc(operations, i):
+ arglocs = guard_operations[operations[i + 1].getopnum()](
+ regalloc, operations[i + 1], op)
+ if arglocs is not None:
+ xxx
+ regalloc.next_instruction() # advance one more
else:
arglocs = regalloc_operations[opnum](regalloc, op)
if arglocs is not None:
@@ -292,6 +298,13 @@
self.mc.mark_op(None) # end of the loop
regalloc.operations = None
+ def dispatch_comparison(self, op):
+ opnum = op.getopnum()
+ arglocs = comp_operations[opnum](self._regalloc, op, True)
+ assert arglocs is not None
+ asm_comp_operations[opnum](self, op, arglocs)
+ return arglocs
+
# regalloc support
def load(self, loc, value):
"""load an immediate value into a register"""
@@ -415,12 +428,16 @@
raise NotImplementedError(msg)
-def notimplemented_op(self, op, arglocs, regalloc):
+def notimplemented_op(self, op, arglocs):
print "[ARM/asm] %s not implemented" % op.getopname()
raise NotImplementedError(op)
+def notimplemented_comp_op(self, op, arglocs):
+ print "[ARM/asm] %s not implemented" % op.getopname()
+ raise NotImplementedError(op)
asm_operations = [notimplemented_op] * (rop._LAST + 1)
+asm_comp_operations = [notimplemented_comp_op] * (rop._LAST + 1)
asm_extra_operations = {}
for name, value in ResOpAssembler.__dict__.iteritems():
@@ -432,3 +449,7 @@
opname = name[len('emit_op_'):]
num = getattr(rop, opname.upper())
asm_operations[num] = value
+ elif name.startswith('emit_comp_op_'):
+ opname = name[len('emit_comp_op_'):]
+ num = getattr(rop, opname.upper())
+ asm_comp_operations[num] = value
diff --git a/rpython/jit/backend/aarch64/codebuilder.py
b/rpython/jit/backend/aarch64/codebuilder.py
--- a/rpython/jit/backend/aarch64/codebuilder.py
+++ b/rpython/jit/backend/aarch64/codebuilder.py
@@ -104,6 +104,10 @@
base = 0b10001011000
self.write32((base << 21) | (rm << 16) | (rn << 5) | (rd))
+ def CMP_rr(self, rn, rm):
+ base = 0b11101011000
+ self.write32((base << 21) | (rm << 16) | (rn << 5) | 0b11111)
+
def BRK(self):
self.write32(0b11010100001 << 21)
diff --git a/rpython/jit/backend/aarch64/opassembler.py
b/rpython/jit/backend/aarch64/opassembler.py
--- a/rpython/jit/backend/aarch64/opassembler.py
+++ b/rpython/jit/backend/aarch64/opassembler.py
@@ -2,6 +2,7 @@
from rpython.jit.metainterp.history import (AbstractFailDescr, ConstInt,
INT, FLOAT, REF)
from rpython.jit.backend.aarch64 import registers as r
+from rpython.jit.backend.arm import conditions as c # yes, arm, not aarch64
from rpython.jit.backend.llsupport.assembler import GuardToken, BaseAssembler
class ResOpAssembler(BaseAssembler):
@@ -24,6 +25,17 @@
else:
self.mc.ADD_rr(res.value, l0.value, l1.value)
+ def emit_int_comp_op(self, op, arglocs):
+ l0, l1 = arglocs
+
+ if l1.is_imm():
+ xxx
+ self.mc.CMP_ri(l0.value, imm=l1.getint(), cond=fcond)
+ else:
+ self.mc.CMP_rr(l0.value, l1.value)
+
+ emit_comp_op_int_le = emit_int_comp_op
+
def emit_op_increment_debug_counter(self, op, arglocs):
return # XXXX
base_loc, value_loc = arglocs
@@ -31,6 +43,9 @@
self.mc.ADD_ri(value_loc.value, value_loc.value, 1)
self.mc.STR_ri(value_loc.value, base_loc.value, 0)
+ def emit_op_label(self, op, arglocs):
+ pass
+
def emit_op_finish(self, op, arglocs):
base_ofs = self.cpu.get_baseofs_of_frame_field()
if len(arglocs) > 0:
diff --git a/rpython/jit/backend/aarch64/regalloc.py
b/rpython/jit/backend/aarch64/regalloc.py
--- a/rpython/jit/backend/aarch64/regalloc.py
+++ b/rpython/jit/backend/aarch64/regalloc.py
@@ -177,6 +177,12 @@
self.possibly_free_vars(list(inputargs))
return operations
+ def loc(self, var):
+ if var.type == FLOAT:
+ return self.vfprm.loc(var)
+ else:
+ return self.rm.loc(var)
+
def possibly_free_var(self, var):
if var.type == FLOAT:
self.vfprm.possibly_free_var(var)
@@ -245,7 +251,7 @@
# we would like the boxes to be after the jump.
def _compute_hint_frame_locations_from_descr(self, descr):
- arglocs = self.assembler.target_arglocs(descr)
+ arglocs = descr._arm_arglocs
jump_op = self.final_jump_op
assert len(arglocs) == jump_op.numargs()
for i in range(jump_op.numargs()):
@@ -271,23 +277,6 @@
self.free_temp_vars()
return [base_loc, value_loc]
- def _prepare_op_int_add(self, op, fcond):
- XXX
- boxes = op.getarglist()
- a0, a1 = boxes
- imm_a0 = check_imm_box(a0)
- imm_a1 = check_imm_box(a1)
- if not imm_a0 and imm_a1:
- l0 = self.make_sure_var_in_reg(a0, boxes)
- l1 = self.convert_to_imm(a1)
- elif imm_a0 and not imm_a1:
- l0 = self.convert_to_imm(a0)
- l1 = self.make_sure_var_in_reg(a1, boxes)
- else:
- l0 = self.make_sure_var_in_reg(a0, boxes)
- l1 = self.make_sure_var_in_reg(a1, boxes)
- return [l0, l1]
-
def prepare_op_int_add(self, op):
arg0 = op.getarg(0)
arg1 = op.getarg(1)
@@ -298,6 +287,71 @@
res = self.force_allocate_reg(op)
return [l0, l1, res]
+
+ def prepare_int_cmp(self, op, res_in_cc):
+ boxes = op.getarglist()
+ arg0, arg1 = boxes
+ imm_a1 = False # XXX check_imm_box(arg1)
+
+ l0 = self.make_sure_var_in_reg(arg0, forbidden_vars=boxes)
+ if imm_a1:
+ l1 = self.convert_to_imm(arg1)
+ else:
+ l1 = self.make_sure_var_in_reg(arg1, forbidden_vars=boxes)
+
+ self.possibly_free_vars_for_op(op)
+ self.free_temp_vars()
+ if not res_in_cc:
+ res = self.force_allocate_reg_or_cc(op)
+ return [l0, l1, res]
+ return [l0, l1]
+
+ prepare_comp_op_int_lt = prepare_int_cmp
+ prepare_comp_op_int_le = prepare_int_cmp
+
+ def prepare_op_int_le(self, op):
+ return self.prepare_int_cmp(op, False)
+
+ def prepare_op_label(self, op):
+ descr = op.getdescr()
+ assert isinstance(descr, TargetToken)
+ inputargs = op.getarglist()
+ arglocs = [None] * len(inputargs)
+ #
+ # we use force_spill() on the boxes that are not going to be really
+ # used any more in the loop, but that are kept alive anyway
+ # by being in a next LABEL's or a JUMP's argument or fail_args
+ # of some guard
+ position = self.rm.position
+ for arg in inputargs:
+ assert not isinstance(arg, Const)
+ if self.last_real_usage.get(arg, -1) <= position:
+ self.force_spill_var(arg)
+
+ #
+ for i in range(len(inputargs)):
+ arg = inputargs[i]
+ assert not isinstance(arg, Const)
+ loc = self.loc(arg)
+ arglocs[i] = loc
+ if loc.is_core_reg() or loc.is_vfp_reg():
+ self.frame_manager.mark_as_free(arg)
+ #
+ descr._arm_arglocs = arglocs
+ descr._ll_loop_code = self.assembler.mc.currpos()
+ descr._arm_clt = self.assembler.current_clt
+ self.assembler.target_tokens_currently_compiling[descr] = None
+ self.possibly_free_vars_for_op(op)
+ #
+ # if the LABEL's descr is precisely the target of the JUMP at the
+ # end of the same loop, i.e. if what we are compiling is a single
+ # loop that ends up jumping to this LABEL, then we can now provide
+ # the hints about the expected position of the spilled variables.
+ jump_op = self.final_jump_op
+ if jump_op is not None and jump_op.getdescr() is descr:
+ self._compute_hint_frame_locations_from_descr(descr)
+ return []
+
def prepare_op_finish(self, op):
# the frame is in fp, but we have to point where in the frame is
# the potential argument to FINISH
@@ -308,6 +362,10 @@
locs = []
return locs
+ def prepare_guard_op_guard_true(self, op, prevop):
+ arglocs = self.assembler.dispatch_comparison(prevop)
+ xxx
+
prepare_op_nursery_ptr_increment = prepare_op_int_add
def force_allocate_reg(self, var, forbidden_vars=[], selected_reg=None):
@@ -330,8 +388,17 @@
print "[ARM64/regalloc] %s not implemented" % op.getopname()
raise NotImplementedError(op)
+def notimplemented_guard_op(self, op, prevop):
+ print "[ARM64/regalloc] %s not implemented" % op.getopname()
+ raise NotImplementedError(op)
+
+def notimplemented_comp_op(self, op, res_in_cc):
+ print "[ARM64/regalloc] %s not implemented" % op.getopname()
+ raise NotImplementedError(op)
operations = [notimplemented] * (rop._LAST + 1)
+guard_operations = [notimplemented_guard_op] * (rop._LAST + 1)
+comp_operations = [notimplemented_comp_op] * (rop._LAST + 1)
for key, value in rop.__dict__.items():
@@ -342,3 +409,12 @@
if hasattr(Regalloc, methname):
func = getattr(Regalloc, methname).im_func
operations[value] = func
+ methname = 'prepare_guard_op_%s' % key
+ if hasattr(Regalloc, methname):
+ func = getattr(Regalloc, methname).im_func
+ guard_operations[value] = func
+ methname = 'prepare_comp_op_%s' % key
+ if hasattr(Regalloc, methname):
+ func = getattr(Regalloc, methname).im_func
+ comp_operations[value] = func
+
\ No newline at end of file
diff --git a/rpython/jit/backend/aarch64/test/test_instr_builder.py
b/rpython/jit/backend/aarch64/test/test_instr_builder.py
--- a/rpython/jit/backend/aarch64/test/test_instr_builder.py
+++ b/rpython/jit/backend/aarch64/test/test_instr_builder.py
@@ -124,6 +124,14 @@
cb.ADD_rr(rd.value, rn.value, rm.value)
assert cb.hexdump() == assemble("ADD %r, %r, %r" % (rd, rn, rm))
+ @settings(max_examples=20)
+ @given(rn=st.sampled_from(r.registers),
+ rm=st.sampled_from(r.registers))
+ def test_CMP_rr(self, rn, rm):
+ cb = CodeBuilder()
+ cb.CMP_rr(rn.value, rm.value)
+ assert cb.hexdump() == assemble("CMP %r, %r" % (rn, rm))
+
def test_BRK(self):
cb = CodeBuilder()
cb.BRK()
_______________________________________________
pypy-commit mailing list
[email protected]
https://mail.python.org/mailman/listinfo/pypy-commit