]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
iommu sg merging: IA64: make sba_iommu respect the segment size limits
authorFUJITA Tomonori <tomof@acm.org>
Tue, 5 Feb 2008 06:27:58 +0000 (22:27 -0800)
committerLinus Torvalds <torvalds@woody.linux-foundation.org>
Tue, 5 Feb 2008 17:44:10 +0000 (09:44 -0800)
This patch makes sba iommu respect segment size limits when merging sg
lists.

Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Cc: Jeff Garzik <jeff@garzik.org>
Cc: James Bottomley <James.Bottomley@steeleye.com>
Acked-by: Jens Axboe <jens.axboe@oracle.com>
Cc: "Luck, Tony" <tony.luck@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
arch/ia64/hp/common/sba_iommu.c

index 45bf04eb7d705c2371d8907e8ec13cc7ab5d6be5..c412fe63f8ecbed0bc6549bebe71bcd852b4ad1f 100644 (file)
@@ -1265,7 +1265,7 @@ sba_fill_pdir(
  * the sglist do both.
  */
 static SBA_INLINE int
-sba_coalesce_chunks( struct ioc *ioc,
+sba_coalesce_chunks(struct ioc *ioc, struct device *dev,
        struct scatterlist *startsg,
        int nents)
 {
@@ -1275,6 +1275,7 @@ sba_coalesce_chunks( struct ioc *ioc,
        struct scatterlist *dma_sg;        /* next DMA stream head */
        unsigned long dma_offset, dma_len; /* start/len of DMA stream */
        int n_mappings = 0;
+       unsigned int max_seg_size = dma_get_max_seg_size(dev);
 
        while (nents > 0) {
                unsigned long vaddr = (unsigned long) sba_sg_address(startsg);
@@ -1314,6 +1315,9 @@ sba_coalesce_chunks( struct ioc *ioc,
                            > DMA_CHUNK_SIZE)
                                break;
 
+                       if (dma_len + startsg->length > max_seg_size)
+                               break;
+
                        /*
                        ** Then look for virtually contiguous blocks.
                        **
@@ -1441,7 +1445,7 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di
        ** w/o this association, we wouldn't have coherent DMA!
        ** Access to the virtual address is what forces a two pass algorithm.
        */
-       coalesced = sba_coalesce_chunks(ioc, sglist, nents);
+       coalesced = sba_coalesce_chunks(ioc, dev, sglist, nents);
 
        /*
        ** Program the I/O Pdir