Re: [Qemu-devel] [PATCH 4/8] target/ppc: Optimize emulation of vgbbd instruction

2019-06-26 Thread Richard Henderson
On 6/19/19 1:03 PM, Stefan Brankovic wrote:
> Optimize altivec instruction vgbbd (Vector Gather Bits by Bytes by Doubleword)
> All ith bits (i in range 1 to 8) of each byte of doubleword element in
> source register are concatenated and placed into ith byte of appropriate
> doubleword element in destination register.
> 
> Following solution is done for both doubleword elements of source register
> in parallel, in order to reduce the number of instructions needed(that's why
> arrays are used):
> First, both doubleword elements of source register vB are placed in
> appropriate element of array avr. Bits are gathered in 2x8 iterations(2 for
> loops). In first iteration bit 1 of byte 1, bit 2 of byte 2,... bit 8 of
> byte 8 are in their final spots so avr[i], i={0,1} can be and-ed with
> tcg_mask. For every following iteration, both avr[i] and tcg_mask variables
> have to be shifted right for 7 and 8 places, respectively, in order to get
> bit 1 of byte 2, bit 2 of byte 3.. bit 7 of byte 8 in their final spots so
> shifted avr values(saved in tmp) can be and-ed with new value of tcg_mask...
> After first 8 iteration(first loop), all the first bits are in their final
> places, all second bits but second bit from eight byte are in their places...
> only 1 eight bit from eight byte is in it's place). In second loop we do all
> operations symmetrically, in order to get other half of bits in their final
> spots. Results for first and second doubleword elements are saved in
> result[0] and result[1] respectively. In the end those results are saved in
> appropriate doubleword element of destination register vD.
> 
> Signed-off-by: Stefan Brankovic 
> ---
>  target/ppc/helper.h |   1 -
>  target/ppc/int_helper.c | 276 
> 
>  target/ppc/translate/vmx-impl.inc.c |  77 +-
>  3 files changed, 76 insertions(+), 278 deletions(-)

Reviewed-by: Richard Henderson 


r~




[Qemu-devel] [PATCH 4/8] target/ppc: Optimize emulation of vgbbd instruction

2019-06-19 Thread Stefan Brankovic
Optimize altivec instruction vgbbd (Vector Gather Bits by Bytes by Doubleword)
All ith bits (i in range 1 to 8) of each byte of doubleword element in
source register are concatenated and placed into ith byte of appropriate
doubleword element in destination register.

Following solution is done for both doubleword elements of source register
in parallel, in order to reduce the number of instructions needed(that's why
arrays are used):
First, both doubleword elements of source register vB are placed in
appropriate element of array avr. Bits are gathered in 2x8 iterations(2 for
loops). In first iteration bit 1 of byte 1, bit 2 of byte 2,... bit 8 of
byte 8 are in their final spots so avr[i], i={0,1} can be and-ed with
tcg_mask. For every following iteration, both avr[i] and tcg_mask variables
have to be shifted right for 7 and 8 places, respectively, in order to get
bit 1 of byte 2, bit 2 of byte 3.. bit 7 of byte 8 in their final spots so
shifted avr values(saved in tmp) can be and-ed with new value of tcg_mask...
After first 8 iteration(first loop), all the first bits are in their final
places, all second bits but second bit from eight byte are in their places...
only 1 eight bit from eight byte is in it's place). In second loop we do all
operations symmetrically, in order to get other half of bits in their final
spots. Results for first and second doubleword elements are saved in
result[0] and result[1] respectively. In the end those results are saved in
appropriate doubleword element of destination register vD.

Signed-off-by: Stefan Brankovic 
---
 target/ppc/helper.h |   1 -
 target/ppc/int_helper.c | 276 
 target/ppc/translate/vmx-impl.inc.c |  77 +-
 3 files changed, 76 insertions(+), 278 deletions(-)

diff --git a/target/ppc/helper.h b/target/ppc/helper.h
index 7a3d68d..0aa1e05 100644
--- a/target/ppc/helper.h
+++ b/target/ppc/helper.h
@@ -319,7 +319,6 @@ DEF_HELPER_1(vclzlsbb, tl, avr)
 DEF_HELPER_1(vctzlsbb, tl, avr)
 DEF_HELPER_3(vbpermd, void, avr, avr, avr)
 DEF_HELPER_3(vbpermq, void, avr, avr, avr)
-DEF_HELPER_2(vgbbd, void, avr, avr)
 DEF_HELPER_3(vpmsumb, void, avr, avr, avr)
 DEF_HELPER_3(vpmsumh, void, avr, avr, avr)
 DEF_HELPER_3(vpmsumw, void, avr, avr, avr)
diff --git a/target/ppc/int_helper.c b/target/ppc/int_helper.c
index f397380..1e32549 100644
--- a/target/ppc/int_helper.c
+++ b/target/ppc/int_helper.c
@@ -1185,282 +1185,6 @@ void helper_vbpermq(ppc_avr_t *r, ppc_avr_t *a, 
ppc_avr_t *b)
 #undef VBPERMQ_INDEX
 #undef VBPERMQ_DW
 
-static const uint64_t VGBBD_MASKS[256] = {
-0xull, /* 00 */
-0x0080ull, /* 01 */
-0x8000ull, /* 02 */
-0x8080ull, /* 03 */
-0x0080ull, /* 04 */
-0x00800080ull, /* 05 */
-0x00808000ull, /* 06 */
-0x00808080ull, /* 07 */
-0x8000ull, /* 08 */
-0x8080ull, /* 09 */
-0x80008000ull, /* 0A */
-0x80008080ull, /* 0B */
-0x8080ull, /* 0C */
-0x80800080ull, /* 0D */
-0x80808000ull, /* 0E */
-0x80808080ull, /* 0F */
-0x0080ull, /* 10 */
-0x00800080ull, /* 11 */
-0x00808000ull, /* 12 */
-0x00808080ull, /* 13 */
-0x00800080ull, /* 14 */
-0x008000800080ull, /* 15 */
-0x008000808000ull, /* 16 */
-0x008000808080ull, /* 17 */
-0x00808000ull, /* 18 */
-0x00808080ull, /* 19 */
-0x008080008000ull, /* 1A */
-0x008080008080ull, /* 1B */
-0x00808080ull, /* 1C */
-0x008080800080ull, /* 1D */
-0x008080808000ull, /* 1E */
-0x008080808080ull, /* 1F */
-0x8000ull, /* 20 */
-0x8080ull, /* 21 */
-0x80008000ull, /* 22 */
-0x80008080ull, /* 23 */
-0x8080ull, /* 24 */
-0x80800080ull, /* 25 */
-0x80808000ull, /* 26 */
-0x80808080ull, /* 27 */
-0x80008000ull, /* 28 */
-0x80008080ull, /* 29 */
-0x800080008000ull, /* 2A */
-0x800080008080ull, /* 2B */
-0x80008080ull, /* 2C */
-0x800080800080ull, /* 2D */
-0x800080808000ull, /* 2E */
-0x800080808080ull, /* 2F */
-0x8080ull, /* 30 */
-0x80800080ull, /* 31 */
-0x80808000ull, /* 32 */
-0x80808080ull, /* 33 */
-0x80800080ull, /* 34 */
-0x808000800080ull, /* 35 */
-0x808000808000ull, /* 36 */
-0x808000808080ull, /* 37 */
-0x80808000ull, /* 38 */
-0x80808080ull, /* 39 */
-0x808080008000ull, /* 3A */
-0x808080008080ull, /* 3B */
-0x80808080ull, /* 3C */
-0x808080800080ull, /* 3D */
-0x808080808000ull, /* 3E */
-0x808080808080ull, /* 3F */
-0x00800

Re: [Qemu-devel] [PATCH 4/8] target/ppc: Optimize emulation of vgbbd instruction

2019-06-17 Thread Stefan Brankovic



On 6.6.19. 20:19, Richard Henderson wrote:

On 6/6/19 5:15 AM, Stefan Brankovic wrote:

Optimize altivec instruction vgbbd (Vector Gather Bits by Bytes by Doubleword)
All ith bits (i in range 1 to 8) of each byte of doubleword element in
source register are concatenated and placed into ith byte of appropriate
doubleword element in destination register.

Following solution is done for every doubleword element of source register
(placed in shifted variable):
We gather bits in 2x8 iterations.
In first iteration bit 1 of byte 1, bit 2 of byte 2,... bit 8 of byte 8 are
in their final spots so we just and avr with mask. For every next iteration,
we have to shift right both shifted(7 places) and mask(8 places), so we get
bit 1 of byte 2, bit 2 of byte 3.. bit 7 of byte 8 in right places so we and
shifted with new value of mask... After first 8 iteration(first for loop) we
have all first bits in their final place all second bits but second bit from
eight byte in their place,... only 1 eight bit from eight byte is in it's
place), so we and result1 with mask1 to save those bits that are at right
place and save them in result1. In second loop we do all operations
symetrical, so we get other half of bits on their final spots, and save
result in result2. Or of result1 and result2 is placed in appropriate
doubleword element of vD. We repeat this 2 times.

Signed-off-by: Stefan Brankovic 
---
  target/ppc/translate/vmx-impl.inc.c | 99 -
  1 file changed, 98 insertions(+), 1 deletion(-)

diff --git a/target/ppc/translate/vmx-impl.inc.c 
b/target/ppc/translate/vmx-impl.inc.c
index 87f69dc..010f337 100644
--- a/target/ppc/translate/vmx-impl.inc.c
+++ b/target/ppc/translate/vmx-impl.inc.c
@@ -780,6 +780,103 @@ static void trans_vsr(DisasContext *ctx)
  tcg_temp_free_i64(tmp);
  }
  
+/*

+ * vgbbd VRT,VRB - Vector Gather Bits by Bytes by Doubleword
+ *
+ * All ith bits (i in range 1 to 8) of each byte of doubleword element in 
source
+ * register are concatenated and placed into ith byte of appropriate doubleword
+ * element in destination register.
+ *
+ * Following solution is done for every doubleword element of source register
+ * (placed in shifted variable):
+ * We gather bits in 2x8 iterations.
+ * In first iteration bit 1 of byte 1, bit 2 of byte 2,... bit 8 of byte 8 are
+ * in their final spots so we just and avr with mask. For every next iteration,
+ * we have to shift right both shifted(7 places) and mask(8 places), so we get
+ * bit 1 of byte 2, bit 2 of byte 3.. bit 7 of byte 8 in right places so we and
+ * shifted with new value of mask... After first 8 iteration(first for loop) we
+ * have all first bits in their final place all second bits but second bit from
+ * eight byte in their place,... only 1 eight bit from eight byte is in it's
+ * place), so we and result1 with mask1 to save those bits that are at right
+ * place and save them in result1. In second loop we do all operations
+ * symetrical, so we get other half of bits on their final spots, and save
+ * result in result2. Or of result1 and result2 is placed in appropriate
+ * doubleword element of vD. We repeat this 2 times.
+ */
+static void trans_vgbbd(DisasContext *ctx)
+{
+int VT = rD(ctx->opcode);
+int VB = rB(ctx->opcode);
+TCGv_i64 tmp = tcg_temp_new_i64();
+TCGv_i64 avr = tcg_temp_new_i64();
+TCGv_i64 shifted = tcg_temp_new_i64();
+TCGv_i64 result1 = tcg_temp_new_i64();
+TCGv_i64 result2 = tcg_temp_new_i64();
+uint64_t mask = 0x8040201008040201ULL;
+uint64_t mask1 = 0x80c0e0f0f8fcfeffULL;
+uint64_t mask2 = 0x7f3f1f0f07030100ULL;
+int i;
+
+get_avr64(avr, VB, true);
+tcg_gen_movi_i64(result1, 0x0ULL);
+tcg_gen_mov_i64(shifted, avr);
+for (i = 0; i < 8; i++) {
+tcg_gen_andi_i64(tmp, shifted, mask);
+tcg_gen_or_i64(result1, result1, tmp);
+
+tcg_gen_shri_i64(shifted, shifted, 7);
+mask = mask >> 8;
+}
+tcg_gen_andi_i64(result1, result1, mask1);

This masking appears to be redundant with the masking within the loop.


+
+mask = 0x8040201008040201ULL;
+tcg_gen_movi_i64(result2, 0x0ULL);
+for (i = 0; i < 8; i++) {
+tcg_gen_andi_i64(tmp, avr, mask);
+tcg_gen_or_i64(result2, result2, tmp);
+
+tcg_gen_shli_i64(avr, avr, 7);
+mask = mask << 8;
+}
+tcg_gen_andi_i64(result2, result2, mask2);

Similarly.

Also, the first iteration of the second loop is redundant with the first
iteration of the first loop.

I will also note that these are large constants, not easily constructable.
Therefore it would be best to avoid needing to construct them twice.  You can
do this by processing the two doublewords simultaneously.  e.g.

TCGv_i64 avr[2], out[2], tmp, tcg_mask;

identity_mask = 0x8040201008040201ull;
tcg_gen_movi_i64(tcg_mask, identity_mask);
for (j = 0; j < 2; j++) {
get_avr(avr[j], VB, j);
tcg_gen_and_i64(out

Re: [Qemu-devel] [PATCH 4/8] target/ppc: Optimize emulation of vgbbd instruction

2019-06-06 Thread Richard Henderson
On 6/6/19 5:15 AM, Stefan Brankovic wrote:
> Optimize altivec instruction vgbbd (Vector Gather Bits by Bytes by Doubleword)
> All ith bits (i in range 1 to 8) of each byte of doubleword element in
> source register are concatenated and placed into ith byte of appropriate
> doubleword element in destination register.
> 
> Following solution is done for every doubleword element of source register
> (placed in shifted variable):
> We gather bits in 2x8 iterations.
> In first iteration bit 1 of byte 1, bit 2 of byte 2,... bit 8 of byte 8 are
> in their final spots so we just and avr with mask. For every next iteration,
> we have to shift right both shifted(7 places) and mask(8 places), so we get
> bit 1 of byte 2, bit 2 of byte 3.. bit 7 of byte 8 in right places so we and
> shifted with new value of mask... After first 8 iteration(first for loop) we
> have all first bits in their final place all second bits but second bit from
> eight byte in their place,... only 1 eight bit from eight byte is in it's
> place), so we and result1 with mask1 to save those bits that are at right
> place and save them in result1. In second loop we do all operations
> symetrical, so we get other half of bits on their final spots, and save
> result in result2. Or of result1 and result2 is placed in appropriate
> doubleword element of vD. We repeat this 2 times.
> 
> Signed-off-by: Stefan Brankovic 
> ---
>  target/ppc/translate/vmx-impl.inc.c | 99 
> -
>  1 file changed, 98 insertions(+), 1 deletion(-)
> 
> diff --git a/target/ppc/translate/vmx-impl.inc.c 
> b/target/ppc/translate/vmx-impl.inc.c
> index 87f69dc..010f337 100644
> --- a/target/ppc/translate/vmx-impl.inc.c
> +++ b/target/ppc/translate/vmx-impl.inc.c
> @@ -780,6 +780,103 @@ static void trans_vsr(DisasContext *ctx)
>  tcg_temp_free_i64(tmp);
>  }
>  
> +/*
> + * vgbbd VRT,VRB - Vector Gather Bits by Bytes by Doubleword
> + *
> + * All ith bits (i in range 1 to 8) of each byte of doubleword element in 
> source
> + * register are concatenated and placed into ith byte of appropriate 
> doubleword
> + * element in destination register.
> + *
> + * Following solution is done for every doubleword element of source register
> + * (placed in shifted variable):
> + * We gather bits in 2x8 iterations.
> + * In first iteration bit 1 of byte 1, bit 2 of byte 2,... bit 8 of byte 8 
> are
> + * in their final spots so we just and avr with mask. For every next 
> iteration,
> + * we have to shift right both shifted(7 places) and mask(8 places), so we 
> get
> + * bit 1 of byte 2, bit 2 of byte 3.. bit 7 of byte 8 in right places so we 
> and
> + * shifted with new value of mask... After first 8 iteration(first for loop) 
> we
> + * have all first bits in their final place all second bits but second bit 
> from
> + * eight byte in their place,... only 1 eight bit from eight byte is in it's
> + * place), so we and result1 with mask1 to save those bits that are at right
> + * place and save them in result1. In second loop we do all operations
> + * symetrical, so we get other half of bits on their final spots, and save
> + * result in result2. Or of result1 and result2 is placed in appropriate
> + * doubleword element of vD. We repeat this 2 times.
> + */
> +static void trans_vgbbd(DisasContext *ctx)
> +{
> +int VT = rD(ctx->opcode);
> +int VB = rB(ctx->opcode);
> +TCGv_i64 tmp = tcg_temp_new_i64();
> +TCGv_i64 avr = tcg_temp_new_i64();
> +TCGv_i64 shifted = tcg_temp_new_i64();
> +TCGv_i64 result1 = tcg_temp_new_i64();
> +TCGv_i64 result2 = tcg_temp_new_i64();
> +uint64_t mask = 0x8040201008040201ULL;
> +uint64_t mask1 = 0x80c0e0f0f8fcfeffULL;
> +uint64_t mask2 = 0x7f3f1f0f07030100ULL;
> +int i;
> +
> +get_avr64(avr, VB, true);
> +tcg_gen_movi_i64(result1, 0x0ULL);
> +tcg_gen_mov_i64(shifted, avr);
> +for (i = 0; i < 8; i++) {
> +tcg_gen_andi_i64(tmp, shifted, mask);
> +tcg_gen_or_i64(result1, result1, tmp);
> +
> +tcg_gen_shri_i64(shifted, shifted, 7);
> +mask = mask >> 8;
> +}
> +tcg_gen_andi_i64(result1, result1, mask1);

This masking appears to be redundant with the masking within the loop.

> +
> +mask = 0x8040201008040201ULL;
> +tcg_gen_movi_i64(result2, 0x0ULL);
> +for (i = 0; i < 8; i++) {
> +tcg_gen_andi_i64(tmp, avr, mask);
> +tcg_gen_or_i64(result2, result2, tmp);
> +
> +tcg_gen_shli_i64(avr, avr, 7);
> +mask = mask << 8;
> +}
> +tcg_gen_andi_i64(result2, result2, mask2);

Similarly.

Also, the first iteration of the second loop is redundant with the first
iteration of the first loop.

I will also note that these are large constants, not easily constructable.
Therefore it would be best to avoid needing to construct them twice.  You can
do this by processing the two doublewords simultaneously.  e.g.

TCGv_i64 avr[2], out[2], tmp, tcg_mask;

identity_mask = 0x80402010080402

[Qemu-devel] [PATCH 4/8] target/ppc: Optimize emulation of vgbbd instruction

2019-06-06 Thread Stefan Brankovic
Optimize altivec instruction vgbbd (Vector Gather Bits by Bytes by Doubleword)
All ith bits (i in range 1 to 8) of each byte of doubleword element in
source register are concatenated and placed into ith byte of appropriate
doubleword element in destination register.

Following solution is done for every doubleword element of source register
(placed in shifted variable):
We gather bits in 2x8 iterations.
In first iteration bit 1 of byte 1, bit 2 of byte 2,... bit 8 of byte 8 are
in their final spots so we just and avr with mask. For every next iteration,
we have to shift right both shifted(7 places) and mask(8 places), so we get
bit 1 of byte 2, bit 2 of byte 3.. bit 7 of byte 8 in right places so we and
shifted with new value of mask... After first 8 iteration(first for loop) we
have all first bits in their final place all second bits but second bit from
eight byte in their place,... only 1 eight bit from eight byte is in it's
place), so we and result1 with mask1 to save those bits that are at right
place and save them in result1. In second loop we do all operations
symetrical, so we get other half of bits on their final spots, and save
result in result2. Or of result1 and result2 is placed in appropriate
doubleword element of vD. We repeat this 2 times.

Signed-off-by: Stefan Brankovic 
---
 target/ppc/translate/vmx-impl.inc.c | 99 -
 1 file changed, 98 insertions(+), 1 deletion(-)

diff --git a/target/ppc/translate/vmx-impl.inc.c 
b/target/ppc/translate/vmx-impl.inc.c
index 87f69dc..010f337 100644
--- a/target/ppc/translate/vmx-impl.inc.c
+++ b/target/ppc/translate/vmx-impl.inc.c
@@ -780,6 +780,103 @@ static void trans_vsr(DisasContext *ctx)
 tcg_temp_free_i64(tmp);
 }
 
+/*
+ * vgbbd VRT,VRB - Vector Gather Bits by Bytes by Doubleword
+ *
+ * All ith bits (i in range 1 to 8) of each byte of doubleword element in 
source
+ * register are concatenated and placed into ith byte of appropriate doubleword
+ * element in destination register.
+ *
+ * Following solution is done for every doubleword element of source register
+ * (placed in shifted variable):
+ * We gather bits in 2x8 iterations.
+ * In first iteration bit 1 of byte 1, bit 2 of byte 2,... bit 8 of byte 8 are
+ * in their final spots so we just and avr with mask. For every next iteration,
+ * we have to shift right both shifted(7 places) and mask(8 places), so we get
+ * bit 1 of byte 2, bit 2 of byte 3.. bit 7 of byte 8 in right places so we and
+ * shifted with new value of mask... After first 8 iteration(first for loop) we
+ * have all first bits in their final place all second bits but second bit from
+ * eight byte in their place,... only 1 eight bit from eight byte is in it's
+ * place), so we and result1 with mask1 to save those bits that are at right
+ * place and save them in result1. In second loop we do all operations
+ * symetrical, so we get other half of bits on their final spots, and save
+ * result in result2. Or of result1 and result2 is placed in appropriate
+ * doubleword element of vD. We repeat this 2 times.
+ */
+static void trans_vgbbd(DisasContext *ctx)
+{
+int VT = rD(ctx->opcode);
+int VB = rB(ctx->opcode);
+TCGv_i64 tmp = tcg_temp_new_i64();
+TCGv_i64 avr = tcg_temp_new_i64();
+TCGv_i64 shifted = tcg_temp_new_i64();
+TCGv_i64 result1 = tcg_temp_new_i64();
+TCGv_i64 result2 = tcg_temp_new_i64();
+uint64_t mask = 0x8040201008040201ULL;
+uint64_t mask1 = 0x80c0e0f0f8fcfeffULL;
+uint64_t mask2 = 0x7f3f1f0f07030100ULL;
+int i;
+
+get_avr64(avr, VB, true);
+tcg_gen_movi_i64(result1, 0x0ULL);
+tcg_gen_mov_i64(shifted, avr);
+for (i = 0; i < 8; i++) {
+tcg_gen_andi_i64(tmp, shifted, mask);
+tcg_gen_or_i64(result1, result1, tmp);
+
+tcg_gen_shri_i64(shifted, shifted, 7);
+mask = mask >> 8;
+}
+tcg_gen_andi_i64(result1, result1, mask1);
+
+mask = 0x8040201008040201ULL;
+tcg_gen_movi_i64(result2, 0x0ULL);
+for (i = 0; i < 8; i++) {
+tcg_gen_andi_i64(tmp, avr, mask);
+tcg_gen_or_i64(result2, result2, tmp);
+
+tcg_gen_shli_i64(avr, avr, 7);
+mask = mask << 8;
+}
+tcg_gen_andi_i64(result2, result2, mask2);
+
+tcg_gen_or_i64(result2, result2, result1);
+set_avr64(VT, result2, true);
+
+mask = 0x8040201008040201ULL;
+get_avr64(avr, VB, false);
+tcg_gen_movi_i64(result1, 0x0ULL);
+tcg_gen_mov_i64(shifted, avr);
+for (i = 0; i < 8; i++) {
+tcg_gen_andi_i64(tmp, shifted, mask);
+tcg_gen_or_i64(result1, result1, tmp);
+
+tcg_gen_shri_i64(shifted, shifted, 7);
+mask = mask >> 8;
+}
+tcg_gen_andi_i64(result1, result1, mask1);
+
+mask = 0x8040201008040201ULL;
+tcg_gen_movi_i64(result2, 0x0ULL);
+for (i = 0; i < 8; i++) {
+tcg_gen_andi_i64(tmp, avr, mask);
+tcg_gen_or_i64(result2, result2, tmp);
+
+tcg_gen_shli_i64(avr, avr, 7);
+mask = mask <<