Let's increment the length once. While at it, cleanup the comment. The memset() example is given as a programming note in the PoP, so drop the description.
Reviewed-by: Richard Henderson <richard.hender...@linaro.org> Signed-off-by: David Hildenbrand <da...@redhat.com> --- target/s390x/mem_helper.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/target/s390x/mem_helper.c b/target/s390x/mem_helper.c index 2e22c183bd..2bc2cd09c1 100644 --- a/target/s390x/mem_helper.c +++ b/target/s390x/mem_helper.c @@ -320,16 +320,20 @@ static uint32_t do_helper_mvc(CPUS390XState *env, uint32_t l, uint64_t dest, HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n", __func__, l, dest, src); - /* mvc and memmove do not behave the same when areas overlap! */ - /* mvc with source pointing to the byte after the destination is the - same as memset with the first source byte */ + /* MVC always copies one more byte than specified - maximum is 256 */ + l++; + + /* + * "When the operands overlap, the result is obtained as if the operands + * were processed one byte at a time". Only non-destructive overlaps + * behave like memmove(). + */ if (dest == src + 1) { - fast_memset(env, dest, cpu_ldub_data_ra(env, src, ra), l + 1, ra); - } else if (dest < src || src + l < dest) { - fast_memmove(env, dest, src, l + 1, ra); + fast_memset(env, dest, cpu_ldub_data_ra(env, src, ra), l, ra); + } else if (dest < src || src + l <= dest) { + fast_memmove(env, dest, src, l, ra); } else { - /* slow version with byte accesses which always work */ - for (i = 0; i <= l; i++) { + for (i = 0; i < l; i++) { uint8_t x = cpu_ldub_data_ra(env, src + i, ra); cpu_stb_data_ra(env, dest + i, x, ra); } -- 2.21.0