From: Joel Sing <j...@sing.id.au> This prevents a load reservation from being placed in one context/process, then being used in another, resulting in an SC succeeding incorrectly and breaking atomics.
Signed-off-by: Joel Sing <j...@sing.id.au> Reviewed-by: Palmer Dabbelt <pal...@sifive.com> Reviewed-by: Richard Henderson <richard.hender...@linaro.org> Signed-off-by: Palmer Dabbelt <pal...@sifive.com> --- target/riscv/cpu.c | 1 + target/riscv/cpu_helper.c | 10 ++++++++++ target/riscv/insn_trans/trans_rva.inc.c | 8 +++++++- 3 files changed, 18 insertions(+), 1 deletion(-) diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c index 915b9e77df33..f8d07bd20ad7 100644 --- a/target/riscv/cpu.c +++ b/target/riscv/cpu.c @@ -297,6 +297,7 @@ static void riscv_cpu_reset(CPUState *cs) env->pc = env->resetvec; #endif cs->exception_index = EXCP_NONE; + env->load_res = -1; set_default_nan_mode(1, &env->fp_status); } diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c index e1b079e69c60..e32b6126af05 100644 --- a/target/riscv/cpu_helper.c +++ b/target/riscv/cpu_helper.c @@ -132,6 +132,16 @@ void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv) } /* tlb_flush is unnecessary as mode is contained in mmu_idx */ env->priv = newpriv; + + /* + * Clear the load reservation - otherwise a reservation placed in one + * context/process can be used by another, resulting in an SC succeeding + * incorrectly. Version 2.2 of the ISA specification explicitly requires + * this behaviour, while later revisions say that the kernel "should" use + * an SC instruction to force the yielding of a load reservation on a + * preemptive context switch. As a result, do both. + */ + env->load_res = -1; } /* get_physical_address - get the physical address for this virtual address diff --git a/target/riscv/insn_trans/trans_rva.inc.c b/target/riscv/insn_trans/trans_rva.inc.c index f6dbbc065e15..fadd88849e2b 100644 --- a/target/riscv/insn_trans/trans_rva.inc.c +++ b/target/riscv/insn_trans/trans_rva.inc.c @@ -61,7 +61,7 @@ static inline bool gen_sc(DisasContext *ctx, arg_atomic *a, TCGMemOp mop) gen_set_label(l1); /* - * Address comparion failure. However, we still need to + * Address comparison failure. However, we still need to * provide the memory barrier implied by AQ/RL. */ tcg_gen_mb(TCG_MO_ALL + a->aq * TCG_BAR_LDAQ + a->rl * TCG_BAR_STRL); @@ -69,6 +69,12 @@ static inline bool gen_sc(DisasContext *ctx, arg_atomic *a, TCGMemOp mop) gen_set_gpr(a->rd, dat); gen_set_label(l2); + /* + * Clear the load reservation, since an SC must fail if there is + * an SC to any address, in between an LR and SC pair. + */ + tcg_gen_movi_tl(load_res, -1); + tcg_temp_free(dat); tcg_temp_free(src1); tcg_temp_free(src2); -- 2.21.0