The vCPU count can be had more directly.
Signed-off-by: Jan Beulich
---
In the sh_make_shadow() case the question is whether it really was
intended to count all vCPU-s, rather than e.g. only all initialized
ones. I guess the problem would be the phase before the guest
actually starts secondary processors, but that could perhaps be
covered by using ->max_vcpus if otherwise 1 would result.
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -1214,13 +1214,7 @@ const u8 sh_type_to_size[] = {
* worth to make sure we never return zero. */
static unsigned int shadow_min_acceptable_pages(struct domain *d)
{
-u32 vcpu_count = 1;
-struct vcpu *v;
-
-for_each_vcpu(d, v)
-vcpu_count++;
-
-return (vcpu_count * 128);
+return (d->max_vcpus + 1) * 128;
}
/* Dispatcher function: call the per-mode function that will unhook the
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -1476,16 +1476,14 @@ sh_make_shadow(struct vcpu *v, mfn_t gmf
* pinning l3es. This is not very quick but it doesn't happen
* very often. */
struct page_info *sp, *t;
-struct vcpu *v2;
-int l4count = 0, vcpus = 0;
+unsigned int l4count = 0;
+
page_list_for_each(sp, &d->arch.paging.shadow.pinned_shadows)
{
if ( sp->u.sh.type == SH_type_l4_64_shadow )
l4count++;
}
-for_each_vcpu ( d, v2 )
-vcpus++;
-if ( l4count > 2 * vcpus )
+if ( l4count > 2 * d->max_vcpus )
{
/* Unpin all the pinned l3 tables, and don't pin any more. */
page_list_for_each_safe(sp, t,
&d->arch.paging.shadow.pinned_shadows)
___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel