]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
AMD IOMMU: add branch hints to completion wait checks
authorJoerg Roedel <joerg.roedel@amd.com>
Thu, 4 Sep 2008 17:01:02 +0000 (19:01 +0200)
committerIngo Molnar <mingo@elte.hu>
Fri, 19 Sep 2008 10:59:08 +0000 (12:59 +0200)
This patch adds branch hints to the cecks if a completion_wait is
necessary. The completion_waits in the mapping paths are unlikly because
they will only happen on software implementations of AMD IOMMU which
don't exists today or with lazy IO/TLB flushing when the allocator wraps
around the address space. With lazy IO/TLB flushing the completion_wait
in the unmapping path is unlikely too.

Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/kernel/amd_iommu.c

index 679f2a8e22ee562833fbf8ce3715ec0331fcc3b4..d743aa0adccc734fdee56d3dc459caa5153b4947 100644 (file)
@@ -876,7 +876,7 @@ static dma_addr_t map_single(struct device *dev, phys_addr_t paddr,
        if (addr == bad_dma_address)
                goto out;
 
-       if (iommu->need_sync)
+       if (unlikely(iommu->need_sync))
                iommu_completion_wait(iommu);
 
 out:
@@ -905,7 +905,7 @@ static void unmap_single(struct device *dev, dma_addr_t dma_addr,
 
        __unmap_single(iommu, domain->priv, dma_addr, size, dir);
 
-       if (iommu->need_sync)
+       if (unlikely(iommu->need_sync))
                iommu_completion_wait(iommu);
 
        spin_unlock_irqrestore(&domain->lock, flags);
@@ -968,7 +968,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
                        goto unmap;
        }
 
-       if (iommu->need_sync)
+       if (unlikely(iommu->need_sync))
                iommu_completion_wait(iommu);
 
 out:
@@ -1014,7 +1014,7 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
                s->dma_address = s->dma_length = 0;
        }
 
-       if (iommu->need_sync)
+       if (unlikely(iommu->need_sync))
                iommu_completion_wait(iommu);
 
        spin_unlock_irqrestore(&domain->lock, flags);
@@ -1061,7 +1061,7 @@ static void *alloc_coherent(struct device *dev, size_t size,
                goto out;
        }
 
-       if (iommu->need_sync)
+       if (unlikely(iommu->need_sync))
                iommu_completion_wait(iommu);
 
 out:
@@ -1093,7 +1093,7 @@ static void free_coherent(struct device *dev, size_t size,
 
        __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
 
-       if (iommu->need_sync)
+       if (unlikely(iommu->need_sync))
                iommu_completion_wait(iommu);
 
        spin_unlock_irqrestore(&domain->lock, flags);