Re: [kvm-devel] [PATCH] Clean up MMU functions to take struct kvm when appropriate (v2)

2007-10-11 Thread Avi Kivity
Anthony Liguori wrote:
 Sorry, I didn't guilt refresh before sending.  I'll have to modify my 
 patchbomb
 script to check for that to avoid this in the future.

 Some of the MMU functions take a struct kvm_vcpu even though they effect all
 VCPUs.  This patch cleans up some of them to instead take a struct kvm.  This
 makes things a bit more clear.

 The main thing that was confusing me was whether certain functions need to be
 called on all VCPUs.

   

Applied, thanks.

-- 
error compiling committee.c: too many arguments to function


-
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems?  Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now  http://get.splunk.com/
___
kvm-devel mailing list
kvm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/kvm-devel


[kvm-devel] [PATCH] Clean up MMU functions to take struct kvm when appropriate

2007-10-10 Thread Anthony Liguori
Some of the MMU functions take a struct kvm_vcpu even though they effect all
VCPUs.  This patch cleans up some of them to instead take a struct kvm.  This
makes things a bit more clear.

The main thing that was confusing me was whether certain functions need to be
called on all VCPUs.

Signed-off-by: Anthony Liguori [EMAIL PROTECTED]

diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index ece0aa4..c260642 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -606,7 +606,7 @@ static void mmu_page_remove_parent_pte(struct kvm_mmu_page 
*page,
BUG();
 }
 
-static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm_vcpu *vcpu,
+static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm,
gfn_t gfn)
 {
unsigned index;
@@ -616,7 +616,7 @@ static struct kvm_mmu_page *kvm_mmu_lookup_page(struct 
kvm_vcpu *vcpu,
 
pgprintk(%s: looking for gfn %lx\n, __FUNCTION__, gfn);
index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
-   bucket = vcpu-kvm-mmu_page_hash[index];
+   bucket = kvm-mmu_page_hash[index];
hlist_for_each_entry(page, node, bucket, hash_link)
if (page-gfn == gfn  !page-role.metaphysical) {
pgprintk(%s: found role %x\n,
@@ -782,7 +782,7 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int 
kvm_nr_mmu_pages)
kvm-n_alloc_mmu_pages = kvm_nr_mmu_pages;
 }
 
-static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn)
+static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
 {
unsigned index;
struct hlist_head *bucket;
@@ -793,25 +793,25 @@ static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, 
gfn_t gfn)
pgprintk(%s: looking for gfn %lx\n, __FUNCTION__, gfn);
r = 0;
index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
-   bucket = vcpu-kvm-mmu_page_hash[index];
+   bucket = kvm-mmu_page_hash[index];
hlist_for_each_entry_safe(page, node, n, bucket, hash_link)
if (page-gfn == gfn  !page-role.metaphysical) {
pgprintk(%s: gfn %lx role %x\n, __FUNCTION__, gfn,
 page-role.word);
-   kvm_mmu_zap_page(vcpu-kvm, page);
+   kvm_mmu_zap_page(kvm, page);
r = 1;
}
return r;
 }
 
-static void mmu_unshadow(struct kvm_vcpu *vcpu, gfn_t gfn)
+static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
 {
struct kvm_mmu_page *page;
 
-   while ((page = kvm_mmu_lookup_page(vcpu, gfn)) != NULL) {
+   while ((page = kvm_mmu_lookup_page(kvm, gfn)) != NULL) {
pgprintk(%s: zap %lx %x\n,
 __FUNCTION__, gfn, page-role.word);
-   kvm_mmu_zap_page(vcpu-kvm, page);
+   kvm_mmu_zap_page(kvm, page);
}
 }
 
diff --git a/drivers/kvm/paging_tmpl.h b/drivers/kvm/paging_tmpl.h
index 447d2c3..4f6edf8 100644
--- a/drivers/kvm/paging_tmpl.h
+++ b/drivers/kvm/paging_tmpl.h
@@ -268,11 +268,11 @@ static void FNAME(set_pte_common)(struct kvm_vcpu *vcpu,
 
spte |= PT_WRITABLE_MASK;
if (user_fault) {
-   mmu_unshadow(vcpu, gfn);
+   mmu_unshadow(vcpu-kvm, gfn);
goto unshadowed;
}
 
-   shadow = kvm_mmu_lookup_page(vcpu, gfn);
+   shadow = kvm_mmu_lookup_page(vcpu-kvm, gfn);
if (shadow) {
pgprintk(%s: found shadow page for %lx, marking ro\n,
 __FUNCTION__, gfn);

-
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems?  Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now  http://get.splunk.com/
___
kvm-devel mailing list
kvm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/kvm-devel


[kvm-devel] [PATCH] Clean up MMU functions to take struct kvm when appropriate (v2)

2007-10-10 Thread Anthony Liguori
Sorry, I didn't guilt refresh before sending.  I'll have to modify my patchbomb
script to check for that to avoid this in the future.

Some of the MMU functions take a struct kvm_vcpu even though they effect all
VCPUs.  This patch cleans up some of them to instead take a struct kvm.  This
makes things a bit more clear.

The main thing that was confusing me was whether certain functions need to be
called on all VCPUs.

Signed-off-by: Anthony Liguori [EMAIL PROTECTED]

diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index ece0aa4..a5ca945 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -606,7 +606,7 @@ static void mmu_page_remove_parent_pte(struct kvm_mmu_page 
*page,
BUG();
 }
 
-static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm_vcpu *vcpu,
+static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm,
gfn_t gfn)
 {
unsigned index;
@@ -616,7 +616,7 @@ static struct kvm_mmu_page *kvm_mmu_lookup_page(struct 
kvm_vcpu *vcpu,
 
pgprintk(%s: looking for gfn %lx\n, __FUNCTION__, gfn);
index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
-   bucket = vcpu-kvm-mmu_page_hash[index];
+   bucket = kvm-mmu_page_hash[index];
hlist_for_each_entry(page, node, bucket, hash_link)
if (page-gfn == gfn  !page-role.metaphysical) {
pgprintk(%s: found role %x\n,
@@ -782,7 +782,7 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int 
kvm_nr_mmu_pages)
kvm-n_alloc_mmu_pages = kvm_nr_mmu_pages;
 }
 
-static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn)
+static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
 {
unsigned index;
struct hlist_head *bucket;
@@ -793,25 +793,25 @@ static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, 
gfn_t gfn)
pgprintk(%s: looking for gfn %lx\n, __FUNCTION__, gfn);
r = 0;
index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
-   bucket = vcpu-kvm-mmu_page_hash[index];
+   bucket = kvm-mmu_page_hash[index];
hlist_for_each_entry_safe(page, node, n, bucket, hash_link)
if (page-gfn == gfn  !page-role.metaphysical) {
pgprintk(%s: gfn %lx role %x\n, __FUNCTION__, gfn,
 page-role.word);
-   kvm_mmu_zap_page(vcpu-kvm, page);
+   kvm_mmu_zap_page(kvm, page);
r = 1;
}
return r;
 }
 
-static void mmu_unshadow(struct kvm_vcpu *vcpu, gfn_t gfn)
+static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
 {
struct kvm_mmu_page *page;
 
-   while ((page = kvm_mmu_lookup_page(vcpu, gfn)) != NULL) {
+   while ((page = kvm_mmu_lookup_page(kvm, gfn)) != NULL) {
pgprintk(%s: zap %lx %x\n,
 __FUNCTION__, gfn, page-role.word);
-   kvm_mmu_zap_page(vcpu-kvm, page);
+   kvm_mmu_zap_page(kvm, page);
}
 }
 
@@ -1299,7 +1299,7 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, 
gva_t gva)
 {
gpa_t gpa = vcpu-mmu.gva_to_gpa(vcpu, gva);
 
-   return kvm_mmu_unprotect_page(vcpu, gpa  PAGE_SHIFT);
+   return kvm_mmu_unprotect_page(vcpu-kvm, gpa  PAGE_SHIFT);
 }
 
 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
diff --git a/drivers/kvm/paging_tmpl.h b/drivers/kvm/paging_tmpl.h
index 447d2c3..4f6edf8 100644
--- a/drivers/kvm/paging_tmpl.h
+++ b/drivers/kvm/paging_tmpl.h
@@ -268,11 +268,11 @@ static void FNAME(set_pte_common)(struct kvm_vcpu *vcpu,
 
spte |= PT_WRITABLE_MASK;
if (user_fault) {
-   mmu_unshadow(vcpu, gfn);
+   mmu_unshadow(vcpu-kvm, gfn);
goto unshadowed;
}
 
-   shadow = kvm_mmu_lookup_page(vcpu, gfn);
+   shadow = kvm_mmu_lookup_page(vcpu-kvm, gfn);
if (shadow) {
pgprintk(%s: found shadow page for %lx, marking ro\n,
 __FUNCTION__, gfn);

-
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems?  Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now  http://get.splunk.com/
___
kvm-devel mailing list
kvm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/kvm-devel


Re: [kvm-devel] [PATCH] Clean up MMU functions to take struct kvm when appropriate

2007-10-10 Thread Anthony Liguori
Anthony Liguori wrote:
 Some of the MMU functions take a struct kvm_vcpu even though they effect all
 VCPUs.  This patch cleans up some of them to instead take a struct kvm.  This
 makes things a bit more clear.

 The main thing that was confusing me was whether certain functions need to be
 called on all VCPUs.

 Signed-off-by: Anthony Liguori [EMAIL PROTECTED]
   

Please ignore this version of the patch.  It breaks the build b/c I 
forgot to guilt refresh before sending.  v2 is the right version of the 
patch to apply which should already be on the list.  Sorry for the noise.

Regards,

Anthony Liguori

-
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems?  Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now  http://get.splunk.com/
___
kvm-devel mailing list
kvm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/kvm-devel