Hi, Jiaxun,

On Wed, Dec 30, 2020 at 11:41 AM Jiaxun Yang <jiaxun.y...@flygoat.com> wrote:
>
> Victim Cache is defined by Loongson as per-core unified
> private Cache.
> Add this into cacheinfo and make cache levels selfincrement
> instead of hardcode levels.
>
> Signed-off-by: Jiaxun Yang <jiaxun.y...@flygoat.com>
> Reviewed-by: Tiezhu Yang <yangtie...@loongson.cn>
> Tested-by: Tiezhu Yang <yangtie...@loongson.cn>
> ---
>  arch/mips/kernel/cacheinfo.c | 34 ++++++++++++++++++++++++++--------
>  1 file changed, 26 insertions(+), 8 deletions(-)
>
> diff --git a/arch/mips/kernel/cacheinfo.c b/arch/mips/kernel/cacheinfo.c
> index 47312c529410..83548331ee94 100644
> --- a/arch/mips/kernel/cacheinfo.c
> +++ b/arch/mips/kernel/cacheinfo.c
> @@ -35,6 +35,11 @@ static int __init_cache_level(unsigned int cpu)
>
>         leaves += (c->icache.waysize) ? 2 : 1;
>
> +       if (c->vcache.waysize) {
> +               levels++;
> +               leaves++;
> +       }
> +
>         if (c->scache.waysize) {
>                 levels++;
>                 leaves++;
> @@ -74,25 +79,38 @@ static int __populate_cache_leaves(unsigned int cpu)
>         struct cpuinfo_mips *c = &current_cpu_data;
>         struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
>         struct cacheinfo *this_leaf = this_cpu_ci->info_list;
> +       int level = 1;
>
>         if (c->icache.waysize) {
> -               /* L1 caches are per core */
> +               /* D/I caches are per core */
It seems "I/D caches" is better than "D/I caches", see
arch/mips/include/asm/cpu-info.h and search cache_desc.

Huacai
>                 fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map);
> -               populate_cache(dcache, this_leaf, 1, CACHE_TYPE_DATA);
> +               populate_cache(dcache, this_leaf, level, CACHE_TYPE_DATA);
>                 fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map);
> -               populate_cache(icache, this_leaf, 1, CACHE_TYPE_INST);
> +               populate_cache(icache, this_leaf, level, CACHE_TYPE_INST);
> +               level++;
>         } else {
> -               populate_cache(dcache, this_leaf, 1, CACHE_TYPE_UNIFIED);
> +               populate_cache(dcache, this_leaf, level, CACHE_TYPE_UNIFIED);
> +               level++;
> +       }
> +
> +       if (c->vcache.waysize) {
> +               /* Vcache is per core as well */
> +               fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map);
> +               populate_cache(vcache, this_leaf, level, CACHE_TYPE_UNIFIED);
> +               level++;
>         }
>
>         if (c->scache.waysize) {
> -               /* L2 cache is per cluster */
> +               /* Scache is per cluster */
>                 fill_cpumask_cluster(cpu, &this_leaf->shared_cpu_map);
> -               populate_cache(scache, this_leaf, 2, CACHE_TYPE_UNIFIED);
> +               populate_cache(scache, this_leaf, level, CACHE_TYPE_UNIFIED);
> +               level++;
>         }
>
> -       if (c->tcache.waysize)
> -               populate_cache(tcache, this_leaf, 3, CACHE_TYPE_UNIFIED);
> +       if (c->tcache.waysize) {
> +               populate_cache(tcache, this_leaf, level, CACHE_TYPE_UNIFIED);
> +               level++;
> +       }
>
>         this_cpu_ci->cpu_map_populated = true;
>
> --
> 2.30.0
>

Reply via email to