Author: Richard Plangger <[email protected]>
Branch: s390x-backend
Changeset: r81455:5d9f0ada3241
Date: 2015-12-28 14:04 +0100
http://bitbucket.org/pypy/pypy/changeset/5d9f0ada3241/
Log: copied stub for zero_array
diff --git a/rpython/jit/backend/zarch/opassembler.py
b/rpython/jit/backend/zarch/opassembler.py
--- a/rpython/jit/backend/zarch/opassembler.py
+++ b/rpython/jit/backend/zarch/opassembler.py
@@ -892,6 +892,81 @@
self.mc.raw_call()
self.mc.restore_std_frame()
+ def emit_zero_array(self, op, arglocs, regalloc):
+ base_loc, startindex_loc, length_loc, ofs_loc, itemsize_loc = arglocs
+
+ # assume that an array where an item size is N:
+ # * if N is even, then all items are aligned to a multiple of 2
+ # * if N % 4 == 0, then all items are aligned to a multiple of 4
+ # * if N % 8 == 0, then all items are aligned to a multiple of 8
+ itemsize = itemsize_loc.getint()
+ if itemsize & 1: stepsize = 1
+ elif itemsize & 2: stepsize = 2
+ elif itemsize & 4: stepsize = 4
+ else: stepsize = WORD
+
+ repeat_factor = itemsize // stepsize
+ if repeat_factor != 1:
+ # This is only for itemsize not in (1, 2, 4, WORD).
+ # Include the repeat_factor inside length_loc if it is a constant
+ if length_loc.is_imm():
+ length_loc = imm(length_loc.value * repeat_factor)
+ repeat_factor = 1 # included
+
+ unroll = -1
+ if length_loc.is_imm():
+ if length_loc.value <= 8:
+ unroll = length_loc.value
+ if unroll <= 0:
+ return # nothing to do
+
+ ofs_loc = self._apply_scale(ofs_loc, startindex_loc, itemsize_loc)
+ ofs_loc = self._copy_in_scratch2(ofs_loc)
+
+ if unroll > 0:
+ assert repeat_factor == 1
+ self.mc.li(r.SCRATCH.value, 0)
+ self.eza_stXux(r.SCRATCH.value, ofs_loc.value, base_loc.value,
+ itemsize)
+ for i in range(1, unroll):
+ self.eza_stX(r.SCRATCH.value, ofs_loc.value, i * stepsize,
+ itemsize)
+
+ else:
+ if length_loc.is_imm():
+ self.mc.load_imm(r.SCRATCH, length_loc.value)
+ length_loc = r.SCRATCH
+ jz_location = -1
+ assert repeat_factor == 1
+ else:
+ self.mc.cmp_op(0, length_loc.value, 0, imm=True)
+ jz_location = self.mc.currpos()
+ self.mc.trap()
+ length_loc = self._multiply_by_constant(length_loc,
+ repeat_factor,
+ r.SCRATCH)
+ self.mc.mtctr(length_loc.value)
+ self.mc.li(r.SCRATCH.value, 0)
+
+ self.eza_stXux(r.SCRATCH.value, ofs_loc.value, base_loc.value,
+ itemsize)
+ bdz_location = self.mc.currpos()
+ self.mc.trap()
+
+ loop_location = self.mc.currpos()
+ self.eza_stXu(r.SCRATCH.value, ofs_loc.value, stepsize,
+ itemsize)
+ self.mc.bdnz(loop_location - self.mc.currpos())
+
+ pmc = OverwritingBuilder(self.mc, bdz_location, 1)
+ pmc.bdz(self.mc.currpos() - bdz_location)
+ pmc.overwrite()
+
+ if jz_location != -1:
+ pmc = OverwritingBuilder(self.mc, jz_location, 1)
+ pmc.ble(self.mc.currpos() - jz_location) # !GT
+ pmc.overwrite()
+
class ForceOpAssembler(object):
_mixin_ = True
diff --git a/rpython/jit/backend/zarch/regalloc.py
b/rpython/jit/backend/zarch/regalloc.py
--- a/rpython/jit/backend/zarch/regalloc.py
+++ b/rpython/jit/backend/zarch/regalloc.py
@@ -901,6 +901,14 @@
else:
return self._prepare_call_default(op)
+ def prepare_zero_array(self, op):
+ itemsize, ofs, _ = unpack_arraydescr(op.getdescr())
+ base_loc = self.ensure_reg(op.getarg(0), force_in_reg=True)
+ startindex_loc = self.ensure_reg_or_16bit_imm(op.getarg(1))
+ length_loc = self.ensure_reg_or_16bit_imm(op.getarg(2))
+ ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs))
+ return [base_loc, startindex_loc, length_loc, ofs_loc, imm(itemsize)]
+
def prepare_cond_call(self, op):
self.load_condition_into_cc(op.getarg(0))
locs = []
_______________________________________________
pypy-commit mailing list
[email protected]
https://mail.python.org/mailman/listinfo/pypy-commit