]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
xen: lazy-mmu operations
authorJeremy Fitzhardinge <jeremy@xensource.com>
Wed, 18 Jul 2007 01:37:06 +0000 (18:37 -0700)
committerJeremy Fitzhardinge <jeremy@goop.org>
Wed, 18 Jul 2007 15:47:44 +0000 (08:47 -0700)
This patch uses the lazy-mmu hooks to batch mmu operations where
possible.  This is primarily useful for batching operations applied to
active pagetables, which happens during mprotect, munmap, mremap and
the like (mmap does not do bulk pagetable operations, so it isn't
helped).

Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Acked-by: Chris Wright <chrisw@sous-sol.org>
arch/i386/xen/enlighten.c
arch/i386/xen/mmu.c
arch/i386/xen/multicalls.c

index a1124b7f1d14e02e74567f4c12003b017db33244..031dc1dcf8195f62274cc752c20e20d828aa202d 100644 (file)
@@ -472,28 +472,38 @@ static void xen_apic_write(unsigned long reg, unsigned long val)
 
 static void xen_flush_tlb(void)
 {
-       struct mmuext_op op;
+       struct mmuext_op *op;
+       struct multicall_space mcs = xen_mc_entry(sizeof(*op));
 
-       op.cmd = MMUEXT_TLB_FLUSH_LOCAL;
-       if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
-               BUG();
+       op = mcs.args;
+       op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
+       MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
+
+       xen_mc_issue(PARAVIRT_LAZY_MMU);
 }
 
 static void xen_flush_tlb_single(unsigned long addr)
 {
-       struct mmuext_op op;
+       struct mmuext_op *op;
+       struct multicall_space mcs = xen_mc_entry(sizeof(*op));
 
-       op.cmd = MMUEXT_INVLPG_LOCAL;
-       op.arg1.linear_addr = addr & PAGE_MASK;
-       if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
-               BUG();
+       op = mcs.args;
+       op->cmd = MMUEXT_INVLPG_LOCAL;
+       op->arg1.linear_addr = addr & PAGE_MASK;
+       MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
+
+       xen_mc_issue(PARAVIRT_LAZY_MMU);
 }
 
 static void xen_flush_tlb_others(const cpumask_t *cpus, struct mm_struct *mm,
                                 unsigned long va)
 {
-       struct mmuext_op op;
+       struct {
+               struct mmuext_op op;
+               cpumask_t mask;
+       } *args;
        cpumask_t cpumask = *cpus;
+       struct multicall_space mcs;
 
        /*
         * A couple of (to be removed) sanity checks:
@@ -510,17 +520,21 @@ static void xen_flush_tlb_others(const cpumask_t *cpus, struct mm_struct *mm,
        if (cpus_empty(cpumask))
                return;
 
+       mcs = xen_mc_entry(sizeof(*args));
+       args = mcs.args;
+       args->mask = cpumask;
+       args->op.arg2.vcpumask = &args->mask;
+
        if (va == TLB_FLUSH_ALL) {
-               op.cmd = MMUEXT_TLB_FLUSH_MULTI;
-               op.arg2.vcpumask = (void *)cpus;
+               args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
        } else {
-               op.cmd = MMUEXT_INVLPG_MULTI;
-               op.arg1.linear_addr = va;
-               op.arg2.vcpumask = (void *)cpus;
+               args->op.cmd = MMUEXT_INVLPG_MULTI;
+               args->op.arg1.linear_addr = va;
        }
 
-       if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
-               BUG();
+       MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
+
+       xen_mc_issue(PARAVIRT_LAZY_MMU);
 }
 
 static unsigned long xen_read_cr2(void)
index f431cf14e6442e0b66baa8faf45259896a621b24..4ae038aa6c244b91ac0d244f84c62ade47fee424 100644 (file)
@@ -98,12 +98,20 @@ void make_lowmem_page_readwrite(void *vaddr)
 
 void xen_set_pmd(pmd_t *ptr, pmd_t val)
 {
-       struct mmu_update u;
+       struct multicall_space mcs;
+       struct mmu_update *u;
 
-       u.ptr = virt_to_machine(ptr).maddr;
-       u.val = pmd_val_ma(val);
-       if (HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0)
-               BUG();
+       preempt_disable();
+
+       mcs = xen_mc_entry(sizeof(*u));
+       u = mcs.args;
+       u->ptr = virt_to_machine(ptr).maddr;
+       u->val = pmd_val_ma(val);
+       MULTI_mmu_update(mcs.mc, u, 1, NULL, DOMID_SELF);
+
+       xen_mc_issue(PARAVIRT_LAZY_MMU);
+
+       preempt_enable();
 }
 
 /*
@@ -146,20 +154,38 @@ void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
 void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
                    pte_t *ptep, pte_t pteval)
 {
-       if ((mm != current->mm && mm != &init_mm) ||
-           HYPERVISOR_update_va_mapping(addr, pteval, 0) != 0)
-               xen_set_pte(ptep, pteval);
+       if (mm == current->mm || mm == &init_mm) {
+               if (xen_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
+                       struct multicall_space mcs;
+                       mcs = xen_mc_entry(0);
+
+                       MULTI_update_va_mapping(mcs.mc, addr, pteval, 0);
+                       xen_mc_issue(PARAVIRT_LAZY_MMU);
+                       return;
+               } else
+                       if (HYPERVISOR_update_va_mapping(addr, pteval, 0) == 0)
+                               return;
+       }
+       xen_set_pte(ptep, pteval);
 }
 
 #ifdef CONFIG_X86_PAE
 void xen_set_pud(pud_t *ptr, pud_t val)
 {
-       struct mmu_update u;
+       struct multicall_space mcs;
+       struct mmu_update *u;
 
-       u.ptr = virt_to_machine(ptr).maddr;
-       u.val = pud_val_ma(val);
-       if (HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0)
-               BUG();
+       preempt_disable();
+
+       mcs = xen_mc_entry(sizeof(*u));
+       u = mcs.args;
+       u->ptr = virt_to_machine(ptr).maddr;
+       u->val = pud_val_ma(val);
+       MULTI_mmu_update(mcs.mc, u, 1, NULL, DOMID_SELF);
+
+       xen_mc_issue(PARAVIRT_LAZY_MMU);
+
+       preempt_enable();
 }
 
 void xen_set_pte(pte_t *ptep, pte_t pte)
index d4015a9ed46c04bf7f85b224491aca85be0d4cdc..c837e8e463db3567c0336357e0f0e0590af1bdaf 100644 (file)
@@ -26,8 +26,8 @@
 
 #include "multicalls.h"
 
-#define MC_BATCH       8
-#define MC_ARGS                (MC_BATCH * 32 / sizeof(u64))
+#define MC_BATCH       32
+#define MC_ARGS                (MC_BATCH * 16 / sizeof(u64))
 
 struct mc_buffer {
        struct multicall_entry entries[MC_BATCH];