Author: hager <[email protected]>
Branch: ppc-jit-backend
Changeset: r52638:88498311be7e
Date: 2012-02-19 07:55 -0800
http://bitbucket.org/pypy/pypy/changeset/88498311be7e/
Log: replace all occurences of alloc_scratch_reg and free_scratch_reg
with "with scratch_reg(mc):"
diff --git a/pypy/jit/backend/ppc/codebuilder.py
b/pypy/jit/backend/ppc/codebuilder.py
--- a/pypy/jit/backend/ppc/codebuilder.py
+++ b/pypy/jit/backend/ppc/codebuilder.py
@@ -999,13 +999,12 @@
self.ldx(rD.value, 0, rD.value)
def store_reg(self, source_reg, addr):
- self.alloc_scratch_reg()
- self.load_imm(r.SCRATCH, addr)
- if IS_PPC_32:
- self.stwx(source_reg.value, 0, r.SCRATCH.value)
- else:
- self.stdx(source_reg.value, 0, r.SCRATCH.value)
- self.free_scratch_reg()
+ with scratch_reg(self):
+ self.load_imm(r.SCRATCH, addr)
+ if IS_PPC_32:
+ self.stwx(source_reg.value, 0, r.SCRATCH.value)
+ else:
+ self.stdx(source_reg.value, 0, r.SCRATCH.value)
def b_offset(self, target):
curpos = self.currpos()
@@ -1025,17 +1024,15 @@
BI = condition[0]
BO = condition[1]
- self.alloc_scratch_reg()
- self.load_imm(r.SCRATCH, addr)
- self.mtctr(r.SCRATCH.value)
- self.free_scratch_reg()
+ with scratch_reg(self):
+ self.load_imm(r.SCRATCH, addr)
+ self.mtctr(r.SCRATCH.value)
self.bcctr(BO, BI)
def b_abs(self, address, trap=False):
- self.alloc_scratch_reg()
- self.load_imm(r.SCRATCH, address)
- self.mtctr(r.SCRATCH.value)
- self.free_scratch_reg()
+ with scratch_reg(self):
+ self.load_imm(r.SCRATCH, address)
+ self.mtctr(r.SCRATCH.value)
if trap:
self.trap()
self.bctr()
@@ -1043,17 +1040,16 @@
def call(self, address):
""" do a call to an absolute address
"""
- self.alloc_scratch_reg()
- if IS_PPC_32:
- self.load_imm(r.SCRATCH, address)
- else:
- self.store(r.TOC.value, r.SP.value, 5 * WORD)
- self.load_imm(r.r11, address)
- self.load(r.SCRATCH.value, r.r11.value, 0)
- self.load(r.r2.value, r.r11.value, WORD)
- self.load(r.r11.value, r.r11.value, 2 * WORD)
- self.mtctr(r.SCRATCH.value)
- self.free_scratch_reg()
+ with scratch_reg(self):
+ if IS_PPC_32:
+ self.load_imm(r.SCRATCH, address)
+ else:
+ self.store(r.TOC.value, r.SP.value, 5 * WORD)
+ self.load_imm(r.r11, address)
+ self.load(r.SCRATCH.value, r.r11.value, 0)
+ self.load(r.r2.value, r.r11.value, WORD)
+ self.load(r.r11.value, r.r11.value, 2 * WORD)
+ self.mtctr(r.SCRATCH.value)
self.bctrl()
if IS_PPC_64:
diff --git a/pypy/jit/backend/ppc/opassembler.py
b/pypy/jit/backend/ppc/opassembler.py
--- a/pypy/jit/backend/ppc/opassembler.py
+++ b/pypy/jit/backend/ppc/opassembler.py
@@ -210,12 +210,11 @@
# instead of XER could be more efficient
def _emit_ovf_guard(self, op, arglocs, cond):
# move content of XER to GPR
- self.mc.alloc_scratch_reg()
- self.mc.mfspr(r.SCRATCH.value, 1)
- # shift and mask to get comparison result
- self.mc.rlwinm(r.SCRATCH.value, r.SCRATCH.value, 1, 0, 0)
- self.mc.cmp_op(0, r.SCRATCH.value, 0, imm=True)
- self.mc.free_scratch_reg()
+ with scratch_reg(self.mc):
+ self.mc.mfspr(r.SCRATCH.value, 1)
+ # shift and mask to get comparison result
+ self.mc.rlwinm(r.SCRATCH.value, r.SCRATCH.value, 1, 0, 0)
+ self.mc.cmp_op(0, r.SCRATCH.value, 0, imm=True)
self._emit_guard(op, arglocs, cond)
def emit_guard_no_overflow(self, op, arglocs, regalloc):
@@ -244,14 +243,13 @@
def _cmp_guard_class(self, op, locs, regalloc):
offset = locs[2]
if offset is not None:
- self.mc.alloc_scratch_reg()
- if offset.is_imm():
- self.mc.load(r.SCRATCH.value, locs[0].value, offset.value)
- else:
- assert offset.is_reg()
- self.mc.loadx(r.SCRATCH.value, locs[0].value, offset.value)
- self.mc.cmp_op(0, r.SCRATCH.value, locs[1].value)
- self.mc.free_scratch_reg()
+ with scratch_reg(self.mc):
+ if offset.is_imm():
+ self.mc.load(r.SCRATCH.value, locs[0].value, offset.value)
+ else:
+ assert offset.is_reg()
+ self.mc.loadx(r.SCRATCH.value, locs[0].value, offset.value)
+ self.mc.cmp_op(0, r.SCRATCH.value, locs[1].value)
else:
assert 0, "not implemented yet"
self._emit_guard(op, locs[3:], c.NE)
@@ -360,10 +358,9 @@
failargs = arglocs[5:]
self.mc.load_imm(loc1, pos_exception.value)
- self.mc.alloc_scratch_reg()
- self.mc.load(r.SCRATCH.value, loc1.value, 0)
- self.mc.cmp_op(0, r.SCRATCH.value, loc.value)
- self.mc.free_scratch_reg()
+ with scratch_reg(self.mc):
+ self.mc.load(r.SCRATCH.value, loc1.value, 0)
+ self.mc.cmp_op(0, r.SCRATCH.value, loc.value)
self._emit_guard(op, failargs, c.NE, save_exc=True)
self.mc.load_imm(loc, pos_exc_value.value)
@@ -371,11 +368,10 @@
if resloc:
self.mc.load(resloc.value, loc.value, 0)
- self.mc.alloc_scratch_reg()
- self.mc.load_imm(r.SCRATCH, 0)
- self.mc.store(r.SCRATCH.value, loc.value, 0)
- self.mc.store(r.SCRATCH.value, loc1.value, 0)
- self.mc.free_scratch_reg()
+ with scratch_reg(self.mc):
+ self.mc.load_imm(r.SCRATCH, 0)
+ self.mc.store(r.SCRATCH.value, loc.value, 0)
+ self.mc.store(r.SCRATCH.value, loc1.value, 0)
def emit_call(self, op, args, regalloc, force_index=-1):
adr = args[0].value
@@ -424,13 +420,12 @@
param_offset = ((BACKCHAIN_SIZE + MAX_REG_PARAMS)
* WORD) # space for first 8 parameters
- self.mc.alloc_scratch_reg()
- for i, arg in enumerate(stack_args):
- offset = param_offset + i * WORD
- if arg is not None:
- self.regalloc_mov(regalloc.loc(arg), r.SCRATCH)
- self.mc.store(r.SCRATCH.value, r.SP.value, offset)
- self.mc.free_scratch_reg()
+ with scratch_reg(self.mc):
+ for i, arg in enumerate(stack_args):
+ offset = param_offset + i * WORD
+ if arg is not None:
+ self.regalloc_mov(regalloc.loc(arg), r.SCRATCH)
+ self.mc.store(r.SCRATCH.value, r.SP.value, offset)
# collect variables that need to go in registers
# and the registers they will be stored in
@@ -540,26 +535,25 @@
def emit_getinteriorfield_gc(self, op, arglocs, regalloc):
(base_loc, index_loc, res_loc,
ofs_loc, ofs, itemsize, fieldsize) = arglocs
- self.mc.alloc_scratch_reg()
- self.mc.load_imm(r.SCRATCH, itemsize.value)
- self.mc.mullw(r.SCRATCH.value, index_loc.value, r.SCRATCH.value)
- if ofs.value > 0:
- if ofs_loc.is_imm():
- self.mc.addic(r.SCRATCH.value, r.SCRATCH.value, ofs_loc.value)
+ with scratch_reg(self.mc):
+ self.mc.load_imm(r.SCRATCH, itemsize.value)
+ self.mc.mullw(r.SCRATCH.value, index_loc.value, r.SCRATCH.value)
+ if ofs.value > 0:
+ if ofs_loc.is_imm():
+ self.mc.addic(r.SCRATCH.value, r.SCRATCH.value,
ofs_loc.value)
+ else:
+ self.mc.add(r.SCRATCH.value, r.SCRATCH.value,
ofs_loc.value)
+
+ if fieldsize.value == 8:
+ self.mc.ldx(res_loc.value, base_loc.value, r.SCRATCH.value)
+ elif fieldsize.value == 4:
+ self.mc.lwzx(res_loc.value, base_loc.value, r.SCRATCH.value)
+ elif fieldsize.value == 2:
+ self.mc.lhzx(res_loc.value, base_loc.value, r.SCRATCH.value)
+ elif fieldsize.value == 1:
+ self.mc.lbzx(res_loc.value, base_loc.value, r.SCRATCH.value)
else:
- self.mc.add(r.SCRATCH.value, r.SCRATCH.value, ofs_loc.value)
-
- if fieldsize.value == 8:
- self.mc.ldx(res_loc.value, base_loc.value, r.SCRATCH.value)
- elif fieldsize.value == 4:
- self.mc.lwzx(res_loc.value, base_loc.value, r.SCRATCH.value)
- elif fieldsize.value == 2:
- self.mc.lhzx(res_loc.value, base_loc.value, r.SCRATCH.value)
- elif fieldsize.value == 1:
- self.mc.lbzx(res_loc.value, base_loc.value, r.SCRATCH.value)
- else:
- assert 0
- self.mc.free_scratch_reg()
+ assert 0
#XXX Hack, Hack, Hack
if not we_are_translated():
@@ -750,13 +744,12 @@
bytes_loc = regalloc.force_allocate_reg(bytes_box, forbidden_vars)
scale = self._get_unicode_item_scale()
assert length_loc.is_reg()
- self.mc.alloc_scratch_reg()
- self.mc.load_imm(r.SCRATCH, 1 << scale)
- if IS_PPC_32:
- self.mc.mullw(bytes_loc.value, r.SCRATCH.value,
length_loc.value)
- else:
- self.mc.mulld(bytes_loc.value, r.SCRATCH.value,
length_loc.value)
- self.mc.free_scratch_reg()
+ with scratch_reg(self.mc):
+ self.mc.load_imm(r.SCRATCH, 1 << scale)
+ if IS_PPC_32:
+ self.mc.mullw(bytes_loc.value, r.SCRATCH.value,
length_loc.value)
+ else:
+ self.mc.mulld(bytes_loc.value, r.SCRATCH.value,
length_loc.value)
length_box = bytes_box
length_loc = bytes_loc
# call memcpy()
@@ -871,10 +864,9 @@
def set_vtable(self, box, vtable):
if self.cpu.vtable_offset is not None:
adr = rffi.cast(lltype.Signed, vtable)
- self.mc.alloc_scratch_reg()
- self.mc.load_imm(r.SCRATCH, adr)
- self.mc.store(r.SCRATCH.value, r.RES.value, self.cpu.vtable_offset)
- self.mc.free_scratch_reg()
+ with scratch_reg(self.mc):
+ self.mc.load_imm(r.SCRATCH, adr)
+ self.mc.store(r.SCRATCH.value, r.RES.value,
self.cpu.vtable_offset)
def emit_debug_merge_point(self, op, arglocs, regalloc):
pass
@@ -905,26 +897,25 @@
raise AssertionError(opnum)
loc_base = arglocs[0]
- self.mc.alloc_scratch_reg()
- self.mc.load(r.SCRATCH.value, loc_base.value, 0)
+ with scratch_reg(self.mc):
+ self.mc.load(r.SCRATCH.value, loc_base.value, 0)
- # get the position of the bit we want to test
- bitpos = descr.jit_wb_if_flag_bitpos
+ # get the position of the bit we want to test
+ bitpos = descr.jit_wb_if_flag_bitpos
- if IS_PPC_32:
- # put this bit to the rightmost bitposition of r0
- if bitpos > 0:
- self.mc.rlwinm(r.SCRATCH.value, r.SCRATCH.value,
- 32 - bitpos, 31, 31)
- # test whether this bit is set
- self.mc.cmpwi(0, r.SCRATCH.value, 1)
- else:
- if bitpos > 0:
- self.mc.rldicl(r.SCRATCH.value, r.SCRATCH.value,
- 64 - bitpos, 63)
- # test whether this bit is set
- self.mc.cmpdi(0, r.SCRATCH.value, 1)
- self.mc.free_scratch_reg()
+ if IS_PPC_32:
+ # put this bit to the rightmost bitposition of r0
+ if bitpos > 0:
+ self.mc.rlwinm(r.SCRATCH.value, r.SCRATCH.value,
+ 32 - bitpos, 31, 31)
+ # test whether this bit is set
+ self.mc.cmpwi(0, r.SCRATCH.value, 1)
+ else:
+ if bitpos > 0:
+ self.mc.rldicl(r.SCRATCH.value, r.SCRATCH.value,
+ 64 - bitpos, 63)
+ # test whether this bit is set
+ self.mc.cmpdi(0, r.SCRATCH.value, 1)
jz_location = self.mc.currpos()
self.mc.nop()
@@ -988,10 +979,9 @@
# check value
resloc = regalloc.try_allocate_reg(resbox)
assert resloc is r.RES
- self.mc.alloc_scratch_reg()
- self.mc.load_imm(r.SCRATCH, value)
- self.mc.cmp_op(0, resloc.value, r.SCRATCH.value)
- self.mc.free_scratch_reg()
+ with scratch_reg(self.mc):
+ self.mc.load_imm(r.SCRATCH, value)
+ self.mc.cmp_op(0, resloc.value, r.SCRATCH.value)
regalloc.possibly_free_var(resbox)
fast_jmp_pos = self.mc.currpos()
@@ -1034,11 +1024,10 @@
assert isinstance(fielddescr, FieldDescr)
ofs = fielddescr.offset
resloc = regalloc.force_allocate_reg(resbox)
- self.mc.alloc_scratch_reg()
- self.mov_loc_loc(arglocs[1], r.SCRATCH)
- self.mc.li(resloc.value, 0)
- self.mc.storex(resloc.value, 0, r.SCRATCH.value)
- self.mc.free_scratch_reg()
+ with scratch_reg(self.mc):
+ self.mov_loc_loc(arglocs[1], r.SCRATCH)
+ self.mc.li(resloc.value, 0)
+ self.mc.storex(resloc.value, 0, r.SCRATCH.value)
regalloc.possibly_free_var(resbox)
if op.result is not None:
@@ -1054,13 +1043,12 @@
raise AssertionError(kind)
resloc = regalloc.force_allocate_reg(op.result)
regalloc.possibly_free_var(resbox)
- self.mc.alloc_scratch_reg()
- self.mc.load_imm(r.SCRATCH, adr)
- if op.result.type == FLOAT:
- assert 0, "not implemented yet"
- else:
- self.mc.loadx(resloc.value, 0, r.SCRATCH.value)
- self.mc.free_scratch_reg()
+ with scratch_reg(self.mc):
+ self.mc.load_imm(r.SCRATCH, adr)
+ if op.result.type == FLOAT:
+ assert 0, "not implemented yet"
+ else:
+ self.mc.loadx(resloc.value, 0, r.SCRATCH.value)
# merge point
offset = self.mc.currpos() - jmp_pos
@@ -1069,10 +1057,9 @@
pmc.b(offset)
pmc.overwrite()
- self.mc.alloc_scratch_reg()
- self.mc.load(r.SCRATCH.value, r.SPP.value, 0)
- self.mc.cmp_op(0, r.SCRATCH.value, 0, imm=True)
- self.mc.free_scratch_reg()
+ with scratch_reg(self.mc):
+ self.mc.load(r.SCRATCH.value, r.SPP.value, 0)
+ self.mc.cmp_op(0, r.SCRATCH.value, 0, imm=True)
self._emit_guard(guard_op, regalloc._prepare_guard(guard_op), c.LT)
diff --git a/pypy/jit/backend/ppc/ppc_assembler.py
b/pypy/jit/backend/ppc/ppc_assembler.py
--- a/pypy/jit/backend/ppc/ppc_assembler.py
+++ b/pypy/jit/backend/ppc/ppc_assembler.py
@@ -859,11 +859,10 @@
return
# move immediate value to memory
elif loc.is_stack():
- self.mc.alloc_scratch_reg()
- offset = loc.value
- self.mc.load_imm(r.SCRATCH, value)
- self.mc.store(r.SCRATCH.value, r.SPP.value, offset)
- self.mc.free_scratch_reg()
+ with scratch_reg(self.mc):
+ offset = loc.value
+ self.mc.load_imm(r.SCRATCH, value)
+ self.mc.store(r.SCRATCH.value, r.SPP.value, offset)
return
assert 0, "not supported location"
elif prev_loc.is_stack():
@@ -876,10 +875,9 @@
# move in memory
elif loc.is_stack():
target_offset = loc.value
- self.mc.alloc_scratch_reg()
- self.mc.load(r.SCRATCH.value, r.SPP.value, offset)
- self.mc.store(r.SCRATCH.value, r.SPP.value, target_offset)
- self.mc.free_scratch_reg()
+ with scratch_reg(self.mc):
+ self.mc.load(r.SCRATCH.value, r.SPP.value, offset)
+ self.mc.store(r.SCRATCH.value, r.SPP.value, target_offset)
return
assert 0, "not supported location"
elif prev_loc.is_reg():
_______________________________________________
pypy-commit mailing list
[email protected]
http://mail.python.org/mailman/listinfo/pypy-commit