]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
KVM: direct mmio pfn check
authorBen-Ami Yassour <benami@il.ibm.com>
Mon, 28 Jul 2008 16:26:24 +0000 (19:26 +0300)
committerAvi Kivity <avi@qumranet.com>
Wed, 15 Oct 2008 08:15:17 +0000 (10:15 +0200)
Userspace may specify memory slots that are backed by mmio pages rather than
normal RAM.  In some cases it is not enough to identify these mmio pages
by pfn_valid().  This patch adds checking the PageReserved as well.

Signed-off-by: Ben-Ami Yassour <benami@il.ibm.com>
Signed-off-by: Muli Ben-Yehuda <muli@il.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
virt/kvm/kvm_main.c

index 7dd9b0b85e4eea1aafaff116e993e6392c9cc173..5eb96c7c8d7a608f9968a086e6b6ab71bc16655a 100644 (file)
@@ -76,6 +76,14 @@ static inline int valid_vcpu(int n)
        return likely(n >= 0 && n < KVM_MAX_VCPUS);
 }
 
+static inline int is_mmio_pfn(pfn_t pfn)
+{
+       if (pfn_valid(pfn))
+               return PageReserved(pfn_to_page(pfn));
+
+       return true;
+}
+
 /*
  * Switches to specified vcpu, until a matching vcpu_put()
  */
@@ -740,7 +748,7 @@ pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
                }
 
                pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
-               BUG_ON(pfn_valid(pfn));
+               BUG_ON(!is_mmio_pfn(pfn));
        } else
                pfn = page_to_pfn(page[0]);
 
@@ -754,10 +762,10 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
        pfn_t pfn;
 
        pfn = gfn_to_pfn(kvm, gfn);
-       if (pfn_valid(pfn))
+       if (!is_mmio_pfn(pfn))
                return pfn_to_page(pfn);
 
-       WARN_ON(!pfn_valid(pfn));
+       WARN_ON(is_mmio_pfn(pfn));
 
        get_page(bad_page);
        return bad_page;
@@ -773,7 +781,7 @@ EXPORT_SYMBOL_GPL(kvm_release_page_clean);
 
 void kvm_release_pfn_clean(pfn_t pfn)
 {
-       if (pfn_valid(pfn))
+       if (!is_mmio_pfn(pfn))
                put_page(pfn_to_page(pfn));
 }
 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
@@ -799,7 +807,7 @@ EXPORT_SYMBOL_GPL(kvm_set_page_dirty);
 
 void kvm_set_pfn_dirty(pfn_t pfn)
 {
-       if (pfn_valid(pfn)) {
+       if (!is_mmio_pfn(pfn)) {
                struct page *page = pfn_to_page(pfn);
                if (!PageReserved(page))
                        SetPageDirty(page);
@@ -809,14 +817,14 @@ EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
 
 void kvm_set_pfn_accessed(pfn_t pfn)
 {
-       if (pfn_valid(pfn))
+       if (!is_mmio_pfn(pfn))
                mark_page_accessed(pfn_to_page(pfn));
 }
 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
 
 void kvm_get_pfn(pfn_t pfn)
 {
-       if (pfn_valid(pfn))
+       if (!is_mmio_pfn(pfn))
                get_page(pfn_to_page(pfn));
 }
 EXPORT_SYMBOL_GPL(kvm_get_pfn);