Author: Richard Plangger <[email protected]>
Branch: s390x-backend
Changeset: r82063:621a42ebea23
Date: 2016-02-03 20:19 +0100
http://bitbucket.org/pypy/pypy/changeset/621a42ebea23/
Log: cond_call_gc_wb_array can now not trash a volatile register. wrong
allocation now takes a non volatile register!
diff --git a/rpython/jit/backend/zarch/assembler.py
b/rpython/jit/backend/zarch/assembler.py
--- a/rpython/jit/backend/zarch/assembler.py
+++ b/rpython/jit/backend/zarch/assembler.py
@@ -241,7 +241,7 @@
self._restore_exception(mc, RCS2, RCS3)
if withcards:
- # A final andix before the blr, for the caller. Careful to
+ # A final NILL before the return to the caller. Careful to
# not follow this instruction with another one that changes
# the status of the condition code
card_marking_mask = descr.jit_wb_cards_set_singlebyte
diff --git a/rpython/jit/backend/zarch/opassembler.py
b/rpython/jit/backend/zarch/opassembler.py
--- a/rpython/jit/backend/zarch/opassembler.py
+++ b/rpython/jit/backend/zarch/opassembler.py
@@ -495,7 +495,7 @@
mc.NILL(r.SCRATCH, l.imm(mask & 0xFF))
jz_location = mc.get_relative_pos()
- mc.reserve_cond_jump() # patched later with 'EQ'
+ mc.reserve_cond_jump(short=True) # patched later with 'EQ'
# for cond_call_gc_wb_array, also add another fast path:
# if GCFLAG_CARDS_SET, then we can just set one bit and be done
@@ -535,7 +535,7 @@
# So here, we can simply write again a beq, which will be
# taken if GCFLAG_CARDS_SET is still not set.
jns_location = mc.get_relative_pos()
- mc.reserve_cond_jump()
+ mc.reserve_cond_jump(short=True)
#
# patch the 'NE' above
currpos = mc.currpos()
@@ -547,6 +547,8 @@
# directly the card flag setting
loc_index = arglocs[1]
if loc_index.is_reg():
+ # must a register that is preserved across function calls
+ assert loc_index.value >= 6
tmp_loc = arglocs[2]
n = descr.jit_wb_card_page_shift
@@ -562,15 +564,16 @@
# 0x80 sets zero flag. will store 0 into all not selected bits
mc.RISBGN(r.SCRATCH, loc_index, l.imm(61), l.imm(0x80 | 63),
l.imm(64-n))
+ # set SCRATCH2 to 1 << r1
# invert the bits of tmp_loc
- mc.LCGR(tmp_loc, tmp_loc)
#mc.XIHF(tmp_loc, l.imm(0xffffFFFF))
#mc.XILF(tmp_loc, l.imm(0xffffFFFF))
-
- # set SCRATCH2 to 1 << r1
+ mc.LG(r.SCRATCH2, l.pool(self.pool.constant_64_ones))
+ mc.XGR(tmp_loc, r.SCRATCH2)
mc.LGHI(r.SCRATCH2, l.imm(1))
mc.SLAG(r.SCRATCH2, r.SCRATCH2, l.addr(0,r.SCRATCH))
+
# set this bit inside the byte of interest
addr = l.addr(0, loc_base, tmp_loc)
mc.LLGC(r.SCRATCH, addr)
@@ -591,13 +594,13 @@
# patch the beq just above
currpos = mc.currpos()
pmc = OverwritingBuilder(mc, jns_location, 1)
- pmc.BRCL(c.EQ, l.imm(currpos - jns_location))
+ pmc.BRC(c.EQ, l.imm(currpos - jns_location))
pmc.overwrite()
# patch the JZ above
currpos = mc.currpos()
pmc = OverwritingBuilder(mc, jz_location, 1)
- pmc.BRCL(c.EQ, l.imm(currpos - jz_location))
+ pmc.BRC(c.EQ, l.imm(currpos - jz_location))
pmc.overwrite()
def emit_cond_call_gc_wb(self, op, arglocs, regalloc):
diff --git a/rpython/jit/backend/zarch/regalloc.py
b/rpython/jit/backend/zarch/regalloc.py
--- a/rpython/jit/backend/zarch/regalloc.py
+++ b/rpython/jit/backend/zarch/regalloc.py
@@ -99,9 +99,9 @@
forbidden_vars=self.temp_boxes)
return loc
- def get_scratch_reg(self,):
+ def get_scratch_reg(self, selected_reg=None):
box = TempFloat()
- reg = self.force_allocate_reg(box, forbidden_vars=self.temp_boxes)
+ reg = self.force_allocate_reg(box, forbidden_vars=self.temp_boxes,
selected_reg=selected_reg)
self.temp_boxes.append(box)
return reg
@@ -151,9 +151,9 @@
selected_reg=selected_reg)
return loc
- def get_scratch_reg(self):
+ def get_scratch_reg(self, selected_reg=None):
box = TempInt()
- reg = self.force_allocate_reg(box, forbidden_vars=self.temp_boxes)
+ reg = self.force_allocate_reg(box, forbidden_vars=self.temp_boxes,
selected_reg=selected_reg)
self.temp_boxes.append(box)
return reg
@@ -583,13 +583,13 @@
else:
return self.rm.ensure_reg(box, force_in_reg)
- def ensure_reg_or_16bit_imm(self, box):
+ def ensure_reg_or_16bit_imm(self, box, selected_reg=None):
if box.type == FLOAT:
return self.fprm.ensure_reg(box, True)
else:
if helper.check_imm(box):
return imm(box.getint())
- return self.rm.ensure_reg(box, force_in_reg=True)
+ return self.rm.ensure_reg(box, force_in_reg=True,
selected_reg=selected_reg)
def ensure_reg_or_any_imm(self, box):
if box.type == FLOAT:
@@ -599,11 +599,11 @@
return imm(box.getint())
return self.rm.ensure_reg(box, force_in_reg=True)
- def get_scratch_reg(self, type):
+ def get_scratch_reg(self, type, selected_reg=None):
if type == FLOAT:
return self.fprm.get_scratch_reg()
else:
- return self.rm.get_scratch_reg()
+ return self.rm.get_scratch_reg(selected_reg=selected_reg)
def free_op_vars(self):
# free the boxes in the 'temp_boxes' lists, which contain both
@@ -984,8 +984,11 @@
return arglocs
def prepare_cond_call_gc_wb_array(self, op):
+ # just calling ensure_reg may return a register r2->r6.
+ # but in the assembly a sub routine is called that trashes r2->r6.
+ # thus select two registers that are preserved
arglocs = [self.ensure_reg(op.getarg(0), force_in_reg=True),
- self.ensure_reg_or_16bit_imm(op.getarg(1)),
+ self.ensure_reg_or_16bit_imm(op.getarg(1),
selected_reg=r.r7),
None]
if arglocs[1].is_reg():
arglocs[2] = self.get_scratch_reg(INT)
diff --git a/rpython/jit/backend/zarch/test/test_assembler.py
b/rpython/jit/backend/zarch/test/test_assembler.py
--- a/rpython/jit/backend/zarch/test/test_assembler.py
+++ b/rpython/jit/backend/zarch/test/test_assembler.py
@@ -202,6 +202,13 @@
self.a.mc.BCR(con.ANY, r.r14)
assert run_asm(self.a) == 0
+ def test_complement(self):
+ self.a.mc.load_imm(r.r2, 0)
+ #self.a.mc.LCGR(r.r2, r.r2)
+ self.a.mc.XIHF(r.r2, loc.imm(0xffffFFFF))
+ self.a.mc.XILF(r.r2, loc.imm(0xffffFFFF))
+ self.a.mc.BCR(con.ANY, r.r14)
+ assert run_asm(self.a) == -1
def test_load_small_int_to_reg(self):
self.a.mc.LGHI(r.r2, loc.imm(123))
_______________________________________________
pypy-commit mailing list
[email protected]
https://mail.python.org/mailman/listinfo/pypy-commit