From: Pierrick Bouvier <pierrick.bouv...@linaro.org> Count number of tb and insn executed using a conditional callback. We ensure the callback has been called expected number of time (per vcpu).
Reviewed-by: Richard Henderson <richard.hender...@linaro.org> Signed-off-by: Pierrick Bouvier <pierrick.bouv...@linaro.org> Message-Id: <20240502211522.346467-7-pierrick.bouv...@linaro.org> Reviewed-by: Michael S. Tsirkin <m...@redhat.com> Signed-off-by: Alex Bennée <alex.ben...@linaro.org> Message-Id: <20240514174253.694591-9-alex.ben...@linaro.org> diff --git a/tests/plugin/inline.c b/tests/plugin/inline.c index 103c3a22f6..cd63827b7d 100644 --- a/tests/plugin/inline.c +++ b/tests/plugin/inline.c @@ -20,8 +20,14 @@ typedef struct { uint64_t count_insn_inline; uint64_t count_mem; uint64_t count_mem_inline; + uint64_t tb_cond_num_trigger; + uint64_t tb_cond_track_count; + uint64_t insn_cond_num_trigger; + uint64_t insn_cond_track_count; } CPUCount; +static const uint64_t cond_trigger_limit = 100; + typedef struct { uint64_t data_insn; uint64_t data_tb; @@ -35,6 +41,10 @@ static qemu_plugin_u64 count_insn; static qemu_plugin_u64 count_insn_inline; static qemu_plugin_u64 count_mem; static qemu_plugin_u64 count_mem_inline; +static qemu_plugin_u64 tb_cond_num_trigger; +static qemu_plugin_u64 tb_cond_track_count; +static qemu_plugin_u64 insn_cond_num_trigger; +static qemu_plugin_u64 insn_cond_track_count; static struct qemu_plugin_scoreboard *data; static qemu_plugin_u64 data_insn; static qemu_plugin_u64 data_tb; @@ -56,12 +66,19 @@ static void stats_insn(void) const uint64_t per_vcpu = qemu_plugin_u64_sum(count_insn); const uint64_t inl_per_vcpu = qemu_plugin_u64_sum(count_insn_inline); + const uint64_t cond_num_trigger = + qemu_plugin_u64_sum(insn_cond_num_trigger); + const uint64_t cond_track_left = qemu_plugin_u64_sum(insn_cond_track_count); + const uint64_t conditional = + cond_num_trigger * cond_trigger_limit + cond_track_left; printf("insn: %" PRIu64 "\n", expected); printf("insn: %" PRIu64 " (per vcpu)\n", per_vcpu); printf("insn: %" PRIu64 " (per vcpu inline)\n", inl_per_vcpu); + printf("insn: %" PRIu64 " (cond cb)\n", conditional); g_assert(expected > 0); g_assert(per_vcpu == expected); g_assert(inl_per_vcpu == expected); + g_assert(conditional == expected); } static void stats_tb(void) @@ -70,12 +87,18 @@ static void stats_tb(void) const uint64_t per_vcpu = qemu_plugin_u64_sum(count_tb); const uint64_t inl_per_vcpu = qemu_plugin_u64_sum(count_tb_inline); + const uint64_t cond_num_trigger = qemu_plugin_u64_sum(tb_cond_num_trigger); + const uint64_t cond_track_left = qemu_plugin_u64_sum(tb_cond_track_count); + const uint64_t conditional = + cond_num_trigger * cond_trigger_limit + cond_track_left; printf("tb: %" PRIu64 "\n", expected); printf("tb: %" PRIu64 " (per vcpu)\n", per_vcpu); printf("tb: %" PRIu64 " (per vcpu inline)\n", inl_per_vcpu); + printf("tb: %" PRIu64 " (conditional cb)\n", conditional); g_assert(expected > 0); g_assert(per_vcpu == expected); g_assert(inl_per_vcpu == expected); + g_assert(conditional == expected); } static void stats_mem(void) @@ -104,14 +127,35 @@ static void plugin_exit(qemu_plugin_id_t id, void *udata) const uint64_t insn_inline = qemu_plugin_u64_get(count_insn_inline, i); const uint64_t mem = qemu_plugin_u64_get(count_mem, i); const uint64_t mem_inline = qemu_plugin_u64_get(count_mem_inline, i); - printf("cpu %d: tb (%" PRIu64 ", %" PRIu64 ") | " - "insn (%" PRIu64 ", %" PRIu64 ") | " + const uint64_t tb_cond_trigger = + qemu_plugin_u64_get(tb_cond_num_trigger, i); + const uint64_t tb_cond_left = + qemu_plugin_u64_get(tb_cond_track_count, i); + const uint64_t insn_cond_trigger = + qemu_plugin_u64_get(insn_cond_num_trigger, i); + const uint64_t insn_cond_left = + qemu_plugin_u64_get(insn_cond_track_count, i); + printf("cpu %d: tb (%" PRIu64 ", %" PRIu64 + ", %" PRIu64 " * %" PRIu64 " + %" PRIu64 + ") | " + "insn (%" PRIu64 ", %" PRIu64 + ", %" PRIu64 " * %" PRIu64 " + %" PRIu64 + ") | " "mem (%" PRIu64 ", %" PRIu64 ")" "\n", - i, tb, tb_inline, insn, insn_inline, mem, mem_inline); + i, + tb, tb_inline, + tb_cond_trigger, cond_trigger_limit, tb_cond_left, + insn, insn_inline, + insn_cond_trigger, cond_trigger_limit, insn_cond_left, + mem, mem_inline); g_assert(tb == tb_inline); g_assert(insn == insn_inline); g_assert(mem == mem_inline); + g_assert(tb_cond_trigger == tb / cond_trigger_limit); + g_assert(tb_cond_left == tb % cond_trigger_limit); + g_assert(insn_cond_trigger == insn / cond_trigger_limit); + g_assert(insn_cond_left == insn % cond_trigger_limit); } stats_tb(); @@ -132,6 +176,24 @@ static void vcpu_tb_exec(unsigned int cpu_index, void *udata) g_mutex_unlock(&tb_lock); } +static void vcpu_tb_cond_exec(unsigned int cpu_index, void *udata) +{ + g_assert(qemu_plugin_u64_get(tb_cond_track_count, cpu_index) == + cond_trigger_limit); + g_assert(qemu_plugin_u64_get(data_tb, cpu_index) == (uintptr_t) udata); + qemu_plugin_u64_set(tb_cond_track_count, cpu_index, 0); + qemu_plugin_u64_add(tb_cond_num_trigger, cpu_index, 1); +} + +static void vcpu_insn_cond_exec(unsigned int cpu_index, void *udata) +{ + g_assert(qemu_plugin_u64_get(insn_cond_track_count, cpu_index) == + cond_trigger_limit); + g_assert(qemu_plugin_u64_get(data_insn, cpu_index) == (uintptr_t) udata); + qemu_plugin_u64_set(insn_cond_track_count, cpu_index, 0); + qemu_plugin_u64_add(insn_cond_num_trigger, cpu_index, 1); +} + static void vcpu_insn_exec(unsigned int cpu_index, void *udata) { qemu_plugin_u64_add(count_insn, cpu_index, 1); @@ -163,6 +225,12 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu( tb, QEMU_PLUGIN_INLINE_ADD_U64, count_tb_inline, 1); + qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu( + tb, QEMU_PLUGIN_INLINE_ADD_U64, tb_cond_track_count, 1); + qemu_plugin_register_vcpu_tb_exec_cond_cb( + tb, vcpu_tb_cond_exec, QEMU_PLUGIN_CB_NO_REGS, + QEMU_PLUGIN_COND_EQ, tb_cond_track_count, cond_trigger_limit, tb_store); + for (int idx = 0; idx < qemu_plugin_tb_n_insns(tb); ++idx) { struct qemu_plugin_insn *insn = qemu_plugin_tb_get_insn(tb, idx); void *insn_store = insn; @@ -176,6 +244,13 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu( insn, QEMU_PLUGIN_INLINE_ADD_U64, count_insn_inline, 1); + qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu( + insn, QEMU_PLUGIN_INLINE_ADD_U64, insn_cond_track_count, 1); + qemu_plugin_register_vcpu_insn_exec_cond_cb( + insn, vcpu_insn_cond_exec, QEMU_PLUGIN_CB_NO_REGS, + QEMU_PLUGIN_COND_EQ, insn_cond_track_count, cond_trigger_limit, + insn_store); + qemu_plugin_register_vcpu_mem_inline_per_vcpu( insn, QEMU_PLUGIN_MEM_RW, QEMU_PLUGIN_INLINE_STORE_U64, @@ -207,6 +282,14 @@ int qemu_plugin_install(qemu_plugin_id_t id, const qemu_info_t *info, counts, CPUCount, count_insn_inline); count_mem_inline = qemu_plugin_scoreboard_u64_in_struct( counts, CPUCount, count_mem_inline); + tb_cond_num_trigger = qemu_plugin_scoreboard_u64_in_struct( + counts, CPUCount, tb_cond_num_trigger); + tb_cond_track_count = qemu_plugin_scoreboard_u64_in_struct( + counts, CPUCount, tb_cond_track_count); + insn_cond_num_trigger = qemu_plugin_scoreboard_u64_in_struct( + counts, CPUCount, insn_cond_num_trigger); + insn_cond_track_count = qemu_plugin_scoreboard_u64_in_struct( + counts, CPUCount, insn_cond_track_count); data = qemu_plugin_scoreboard_new(sizeof(CPUData)); data_insn = qemu_plugin_scoreboard_u64_in_struct(data, CPUData, data_insn); data_tb = qemu_plugin_scoreboard_u64_in_struct(data, CPUData, data_tb); -- 2.39.2