]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
[PATCH] KVM: MMU: Make kvm_mmu_alloc_page() return a kvm_mmu_page pointer
authorAvi Kivity <avi@qumranet.com>
Sat, 6 Jan 2007 00:36:42 +0000 (16:36 -0800)
committerLinus Torvalds <torvalds@woody.osdl.org>
Sat, 6 Jan 2007 07:55:24 +0000 (23:55 -0800)
This allows further manipulation on the shadow page table.

Signed-off-by: Avi Kivity <avi@qumranet.com>
Acked-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
drivers/kvm/mmu.c
drivers/kvm/paging_tmpl.h

index 1dcbbd5116609c4da672f2acfd04ac63567f2abb..da4d7ddb9bdcc9d8450bd17e6bbe3cd8e250b0e3 100644 (file)
@@ -292,12 +292,13 @@ static int is_empty_shadow_page(hpa_t page_hpa)
        return 1;
 }
 
-static hpa_t kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, u64 *parent_pte)
+static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
+                                              u64 *parent_pte)
 {
        struct kvm_mmu_page *page;
 
        if (list_empty(&vcpu->free_pages))
-               return INVALID_PAGE;
+               return NULL;
 
        page = list_entry(vcpu->free_pages.next, struct kvm_mmu_page, link);
        list_del(&page->link);
@@ -306,7 +307,7 @@ static hpa_t kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, u64 *parent_pte)
        page->slot_bitmap = 0;
        page->global = 1;
        page->parent_pte = parent_pte;
-       return page->page_hpa;
+       return page;
 }
 
 static void page_header_update_slot(struct kvm *kvm, void *pte, gpa_t gpa)
@@ -402,19 +403,16 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
                }
 
                if (table[index] == 0) {
-                       hpa_t new_table = kvm_mmu_alloc_page(vcpu,
-                                                            &table[index]);
+                       struct kvm_mmu_page *new_table;
 
-                       if (!VALID_PAGE(new_table)) {
+                       new_table = kvm_mmu_alloc_page(vcpu, &table[index]);
+                       if (!new_table) {
                                pgprintk("nonpaging_map: ENOMEM\n");
                                return -ENOMEM;
                        }
 
-                       if (level == PT32E_ROOT_LEVEL)
-                               table[index] = new_table | PT_PRESENT_MASK;
-                       else
-                               table[index] = new_table | PT_PRESENT_MASK |
-                                               PT_WRITABLE_MASK | PT_USER_MASK;
+                       table[index] = new_table->page_hpa | PT_PRESENT_MASK
+                               | PT_WRITABLE_MASK | PT_USER_MASK;
                }
                table_addr = table[index] & PT64_BASE_ADDR_MASK;
        }
@@ -454,7 +452,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
                hpa_t root = vcpu->mmu.root_hpa;
 
                ASSERT(!VALID_PAGE(root));
-               root = kvm_mmu_alloc_page(vcpu, NULL);
+               root = kvm_mmu_alloc_page(vcpu, NULL)->page_hpa;
                vcpu->mmu.root_hpa = root;
                return;
        }
@@ -463,7 +461,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
                hpa_t root = vcpu->mmu.pae_root[i];
 
                ASSERT(!VALID_PAGE(root));
-               root = kvm_mmu_alloc_page(vcpu, NULL);
+               root = kvm_mmu_alloc_page(vcpu, NULL)->page_hpa;
                vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK;
        }
        vcpu->mmu.root_hpa = __pa(vcpu->mmu.pae_root);
index 7af49ae80e5af0d533ead94be03017b892a34c5f..11cac9ddf26a5ba4a07ba75b75f3f2ee8d502a39 100644 (file)
@@ -179,6 +179,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
        for (; ; level--) {
                u32 index = SHADOW_PT_INDEX(addr, level);
                u64 *shadow_ent = ((u64 *)__va(shadow_addr)) + index;
+               struct kvm_mmu_page *shadow_page;
                u64 shadow_pte;
 
                if (is_present_pte(*shadow_ent) || is_io_pte(*shadow_ent)) {
@@ -204,9 +205,10 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
                        return shadow_ent;
                }
 
-               shadow_addr = kvm_mmu_alloc_page(vcpu, shadow_ent);
-               if (!VALID_PAGE(shadow_addr))
+               shadow_page = kvm_mmu_alloc_page(vcpu, shadow_ent);
+               if (!shadow_page)
                        return ERR_PTR(-ENOMEM);
+               shadow_addr = shadow_page->page_hpa;
                shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK
                        | PT_WRITABLE_MASK | PT_USER_MASK;
                *shadow_ent = shadow_pte;