]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
powerpc: Merge 32 and 64-bit dma code
authorBecky Bruce <becky.bruce@freescale.com>
Fri, 12 Sep 2008 10:34:46 +0000 (10:34 +0000)
committerKumar Gala <galak@kernel.crashing.org>
Wed, 24 Sep 2008 21:26:45 +0000 (16:26 -0500)
We essentially adopt the 64-bit dma code, with some changes to support
32-bit systems, including HIGHMEM.  dma functions on 32-bit are now
invoked via accessor functions which call the correct op for a device based
on archdata dma_ops.  If there is no archdata dma_ops, this defaults
to dma_direct_ops.

In addition, the dma_map/unmap_page functions are added to dma_ops
because we can't just fall back on map/unmap_single when HIGHMEM is
enabled. In the case of dma_direct_*, we stop using map/unmap_single
and just use the page version - this saves a lot of ugly
ifdeffing.  We leave map/unmap_single in the dma_ops definition,
though, because they are needed by the iommu code, which does not
implement map/unmap_page.  Ideally, going forward, we will completely
eliminate map/unmap_single and just have map/unmap_page, if it's
workable for 64-bit.

Signed-off-by: Becky Bruce <becky.bruce@freescale.com>
Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
arch/powerpc/include/asm/dma-mapping.h
arch/powerpc/include/asm/machdep.h
arch/powerpc/include/asm/pci.h
arch/powerpc/kernel/Makefile
arch/powerpc/kernel/dma.c
arch/powerpc/kernel/pci-common.c
arch/powerpc/kernel/pci_32.c
arch/powerpc/kernel/pci_64.c

index c7ca45f97dd210e9ef611ba8d4877b6f64067021..fddb229bd74f270231c570726d7e15bef0a201f5 100644 (file)
@@ -44,8 +44,6 @@ extern void __dma_sync_page(struct page *page, unsigned long offset,
 
 #endif /* ! CONFIG_NOT_COHERENT_CACHE */
 
-#ifdef CONFIG_PPC64
-
 static inline unsigned long device_to_mask(struct device *dev)
 {
        if (dev->dma_mask && *dev->dma_mask)
@@ -76,8 +74,24 @@ struct dma_mapping_ops {
                                struct dma_attrs *attrs);
        int             (*dma_supported)(struct device *dev, u64 mask);
        int             (*set_dma_mask)(struct device *dev, u64 dma_mask);
+       dma_addr_t      (*map_page)(struct device *dev, struct page *page,
+                               unsigned long offset, size_t size,
+                               enum dma_data_direction direction,
+                               struct dma_attrs *attrs);
+       void            (*unmap_page)(struct device *dev,
+                               dma_addr_t dma_address, size_t size,
+                               enum dma_data_direction direction,
+                               struct dma_attrs *attrs);
 };
 
+/*
+ * Available generic sets of operations
+ */
+#ifdef CONFIG_PPC64
+extern struct dma_mapping_ops dma_iommu_ops;
+#endif
+extern struct dma_mapping_ops dma_direct_ops;
+
 static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
 {
        /* We don't handle the NULL dev case for ISA for now. We could
@@ -85,8 +99,19 @@ static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
         * only ISA DMA device we support is the floppy and we have a hack
         * in the floppy driver directly to get a device for us.
         */
-       if (unlikely(dev == NULL || dev->archdata.dma_ops == NULL))
+
+       if (unlikely(dev == NULL) || dev->archdata.dma_ops == NULL) {
+#ifdef CONFIG_PPC64
                return NULL;
+#else
+               /* Use default on 32-bit if dma_ops is not set up */
+               /* TODO: Long term, we should fix drivers so that dev and
+                * archdata dma_ops are set up for all buses.
+                */
+               return &dma_direct_ops;
+#endif
+       }
+
        return dev->archdata.dma_ops;
 }
 
@@ -123,6 +148,12 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
        return 0;
 }
 
+/*
+ * TODO: map_/unmap_single will ideally go away, to be completely
+ * replaced by map/unmap_page.   Until then, we allow dma_ops to have
+ * one or the other, or both by checking to see if the specific
+ * function requested exists; and if not, falling back on the other set.
+ */
 static inline dma_addr_t dma_map_single_attrs(struct device *dev,
                                              void *cpu_addr,
                                              size_t size,
@@ -132,7 +163,14 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev,
        struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
 
        BUG_ON(!dma_ops);
-       return dma_ops->map_single(dev, cpu_addr, size, direction, attrs);
+
+       if (dma_ops->map_single)
+               return dma_ops->map_single(dev, cpu_addr, size, direction,
+                                          attrs);
+
+       return dma_ops->map_page(dev, virt_to_page(cpu_addr),
+                                (unsigned long)cpu_addr % PAGE_SIZE, size,
+                                direction, attrs);
 }
 
 static inline void dma_unmap_single_attrs(struct device *dev,
@@ -144,7 +182,13 @@ static inline void dma_unmap_single_attrs(struct device *dev,
        struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
 
        BUG_ON(!dma_ops);
-       dma_ops->unmap_single(dev, dma_addr, size, direction, attrs);
+
+       if (dma_ops->unmap_single) {
+               dma_ops->unmap_single(dev, dma_addr, size, direction, attrs);
+               return;
+       }
+
+       dma_ops->unmap_page(dev, dma_addr, size, direction, attrs);
 }
 
 static inline dma_addr_t dma_map_page_attrs(struct device *dev,
@@ -156,8 +200,13 @@ static inline dma_addr_t dma_map_page_attrs(struct device *dev,
        struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
 
        BUG_ON(!dma_ops);
+
+       if (dma_ops->map_page)
+               return dma_ops->map_page(dev, page, offset, size, direction,
+                                        attrs);
+
        return dma_ops->map_single(dev, page_address(page) + offset, size,
-                       direction, attrs);
+                                  direction, attrs);
 }
 
 static inline void dma_unmap_page_attrs(struct device *dev,
@@ -169,6 +218,12 @@ static inline void dma_unmap_page_attrs(struct device *dev,
        struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
 
        BUG_ON(!dma_ops);
+
+       if (dma_ops->unmap_page) {
+               dma_ops->unmap_page(dev, dma_address, size, direction, attrs);
+               return;
+       }
+
        dma_ops->unmap_single(dev, dma_address, size, direction, attrs);
 }
 
@@ -253,126 +308,6 @@ static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
        dma_unmap_sg_attrs(dev, sg, nhwentries, direction, NULL);
 }
 
-/*
- * Available generic sets of operations
- */
-extern struct dma_mapping_ops dma_iommu_ops;
-extern struct dma_mapping_ops dma_direct_ops;
-
-#else /* CONFIG_PPC64 */
-
-#define dma_supported(dev, mask)       (1)
-
-static inline int dma_set_mask(struct device *dev, u64 dma_mask)
-{
-       if (!dev->dma_mask || !dma_supported(dev, mask))
-               return -EIO;
-
-       *dev->dma_mask = dma_mask;
-
-       return 0;
-}
-
-static inline void *dma_alloc_coherent(struct device *dev, size_t size,
-                                      dma_addr_t * dma_handle,
-                                      gfp_t gfp)
-{
-#ifdef CONFIG_NOT_COHERENT_CACHE
-       return __dma_alloc_coherent(size, dma_handle, gfp);
-#else
-       void *ret;
-       /* ignore region specifiers */
-       gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
-
-       if (dev == NULL || dev->coherent_dma_mask < 0xffffffff)
-               gfp |= GFP_DMA;
-
-       ret = (void *)__get_free_pages(gfp, get_order(size));
-
-       if (ret != NULL) {
-               memset(ret, 0, size);
-               *dma_handle = virt_to_bus(ret);
-       }
-
-       return ret;
-#endif
-}
-
-static inline void
-dma_free_coherent(struct device *dev, size_t size, void *vaddr,
-                 dma_addr_t dma_handle)
-{
-#ifdef CONFIG_NOT_COHERENT_CACHE
-       __dma_free_coherent(size, vaddr);
-#else
-       free_pages((unsigned long)vaddr, get_order(size));
-#endif
-}
-
-static inline dma_addr_t
-dma_map_single(struct device *dev, void *ptr, size_t size,
-              enum dma_data_direction direction)
-{
-       BUG_ON(direction == DMA_NONE);
-
-       __dma_sync(ptr, size, direction);
-
-       return virt_to_bus(ptr);
-}
-
-static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
-                                   size_t size,
-                                   enum dma_data_direction direction)
-{
-       /* We do nothing. */
-}
-
-static inline dma_addr_t
-dma_map_page(struct device *dev, struct page *page,
-            unsigned long offset, size_t size,
-            enum dma_data_direction direction)
-{
-       BUG_ON(direction == DMA_NONE);
-
-       __dma_sync_page(page, offset, size, direction);
-
-       return page_to_bus(page) + offset;
-}
-
-static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
-                                 size_t size,
-                                 enum dma_data_direction direction)
-{
-       /* We do nothing. */
-}
-
-static inline int
-dma_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
-          enum dma_data_direction direction)
-{
-       struct scatterlist *sg;
-       int i;
-
-       BUG_ON(direction == DMA_NONE);
-
-       for_each_sg(sgl, sg, nents, i) {
-               BUG_ON(!sg_page(sg));
-               __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
-               sg->dma_address = page_to_bus(sg_page(sg)) + sg->offset;
-       }
-
-       return nents;
-}
-
-static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
-                               int nhwentries,
-                               enum dma_data_direction direction)
-{
-       /* We don't do anything here. */
-}
-
-#endif /* CONFIG_PPC64 */
-
 static inline void dma_sync_single_for_cpu(struct device *dev,
                dma_addr_t dma_handle, size_t size,
                enum dma_data_direction direction)
index 893aafd87fde2b20ce7eda9dee61f30b79c9ec91..2740c44ff71766e5e9dad9c01b33e7377d0d4bbd 100644 (file)
@@ -88,8 +88,6 @@ struct machdep_calls {
        unsigned long   (*tce_get)(struct iommu_table *tbl,
                                    long index);
        void            (*tce_flush)(struct iommu_table *tbl);
-       void            (*pci_dma_dev_setup)(struct pci_dev *dev);
-       void            (*pci_dma_bus_setup)(struct pci_bus *bus);
 
        void __iomem *  (*ioremap)(phys_addr_t addr, unsigned long size,
                                   unsigned long flags);
@@ -101,6 +99,9 @@ struct machdep_calls {
 #endif
 #endif /* CONFIG_PPC64 */
 
+       void            (*pci_dma_dev_setup)(struct pci_dev *dev);
+       void            (*pci_dma_bus_setup)(struct pci_bus *bus);
+
        int             (*probe)(void);
        void            (*setup_arch)(void); /* Optional, may be NULL */
        void            (*init_early)(void);
index a05a942b1c2573d4dae0ba35e2bbf24d6b3d2f14..0e52c7828ea498d48c34a37155dba0f57b39a349 100644 (file)
@@ -60,6 +60,14 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
        return channel ? 15 : 14;
 }
 
+#ifdef CONFIG_PCI
+extern void set_pci_dma_ops(struct dma_mapping_ops *dma_ops);
+extern struct dma_mapping_ops *get_pci_dma_ops(void);
+#else  /* CONFIG_PCI */
+#define set_pci_dma_ops(d)
+#define get_pci_dma_ops()      NULL
+#endif
+
 #ifdef CONFIG_PPC64
 
 /*
@@ -70,9 +78,6 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
 #define PCI_DISABLE_MWI
 
 #ifdef CONFIG_PCI
-extern void set_pci_dma_ops(struct dma_mapping_ops *dma_ops);
-extern struct dma_mapping_ops *get_pci_dma_ops(void);
-
 static inline void pci_dma_burst_advice(struct pci_dev *pdev,
                                        enum pci_dma_burst_strategy *strat,
                                        unsigned long *strategy_parameter)
@@ -89,9 +94,6 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev,
        *strat = PCI_DMA_BURST_MULTIPLE;
        *strategy_parameter = cacheline_size;
 }
-#else  /* CONFIG_PCI */
-#define set_pci_dma_ops(d)
-#define get_pci_dma_ops()      NULL
 #endif
 
 #else /* 32-bit */
index 09b3cabf2f9157695704fe9c928f242a705fa443..fdb58253fa5b15727580cdc2119eef4512596411 100644 (file)
@@ -70,10 +70,10 @@ extra-$(CONFIG_8xx)         := head_8xx.o
 extra-y                                += vmlinux.lds
 
 obj-y                          += time.o prom.o traps.o setup-common.o \
-                                  udbg.o misc.o io.o \
+                                  udbg.o misc.o io.o dma.o \
                                   misc_$(CONFIG_WORD_SIZE).o
 obj-$(CONFIG_PPC32)            += entry_32.o setup_32.o
-obj-$(CONFIG_PPC64)            += dma.o dma-iommu.o iommu.o
+obj-$(CONFIG_PPC64)            += dma-iommu.o iommu.o
 obj-$(CONFIG_KGDB)             += kgdb.o
 obj-$(CONFIG_PPC_MULTIPLATFORM)        += prom_init.o
 obj-$(CONFIG_MODULES)          += ppc_ksyms.o
index 124f86743bdef1baa22c61f2a33a81534424d418..41fdd48bf43393186bb897bc0ca041320f5a3041 100644 (file)
  * This implementation supports a per-device offset that can be applied if
  * the address at which memory is visible to devices is not 0. Platform code
  * can set archdata.dma_data to an unsigned long holding the offset. By
- * default the offset is zero.
+ * default the offset is PCI_DRAM_OFFSET.
  */
 
 static unsigned long get_dma_direct_offset(struct device *dev)
 {
-       return (unsigned long)dev->archdata.dma_data;
+       if (dev)
+               return (unsigned long)dev->archdata.dma_data;
+
+       return PCI_DRAM_OFFSET;
 }
 
-static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
-                                      dma_addr_t *dma_handle, gfp_t flag)
+void *dma_direct_alloc_coherent(struct device *dev, size_t size,
+                               dma_addr_t *dma_handle, gfp_t flag)
 {
+#ifdef CONFIG_NOT_COHERENT_CACHE
+       return __dma_alloc_coherent(size, dma_handle, flag);
+#else
        struct page *page;
        void *ret;
        int node = dev_to_node(dev);
 
+       /* ignore region specifiers */
+       flag  &= ~(__GFP_HIGHMEM);
+
        page = alloc_pages_node(node, flag, get_order(size));
        if (page == NULL)
                return NULL;
@@ -39,27 +48,17 @@ static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
        *dma_handle = virt_to_abs(ret) + get_dma_direct_offset(dev);
 
        return ret;
+#endif
 }
 
-static void dma_direct_free_coherent(struct device *dev, size_t size,
-                                    void *vaddr, dma_addr_t dma_handle)
+void dma_direct_free_coherent(struct device *dev, size_t size,
+                             void *vaddr, dma_addr_t dma_handle)
 {
+#ifdef CONFIG_NOT_COHERENT_CACHE
+       __dma_free_coherent(size, vaddr);
+#else
        free_pages((unsigned long)vaddr, get_order(size));
-}
-
-static dma_addr_t dma_direct_map_single(struct device *dev, void *ptr,
-                                       size_t size,
-                                       enum dma_data_direction direction,
-                                       struct dma_attrs *attrs)
-{
-       return virt_to_abs(ptr) + get_dma_direct_offset(dev);
-}
-
-static void dma_direct_unmap_single(struct device *dev, dma_addr_t dma_addr,
-                                   size_t size,
-                                   enum dma_data_direction direction,
-                                   struct dma_attrs *attrs)
-{
+#endif
 }
 
 static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
@@ -85,20 +84,44 @@ static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
 
 static int dma_direct_dma_supported(struct device *dev, u64 mask)
 {
+#ifdef CONFIG_PPC64
        /* Could be improved to check for memory though it better be
         * done via some global so platforms can set the limit in case
         * they have limited DMA windows
         */
        return mask >= DMA_32BIT_MASK;
+#else
+       return 1;
+#endif
+}
+
+static inline dma_addr_t dma_direct_map_page(struct device *dev,
+                                            struct page *page,
+                                            unsigned long offset,
+                                            size_t size,
+                                            enum dma_data_direction dir,
+                                            struct dma_attrs *attrs)
+{
+       BUG_ON(dir == DMA_NONE);
+       __dma_sync_page(page, offset, size, dir);
+       return page_to_phys(page) + offset + get_dma_direct_offset(dev);
+}
+
+static inline void dma_direct_unmap_page(struct device *dev,
+                                        dma_addr_t dma_address,
+                                        size_t size,
+                                        enum dma_data_direction direction,
+                                        struct dma_attrs *attrs)
+{
 }
 
 struct dma_mapping_ops dma_direct_ops = {
        .alloc_coherent = dma_direct_alloc_coherent,
        .free_coherent  = dma_direct_free_coherent,
-       .map_single     = dma_direct_map_single,
-       .unmap_single   = dma_direct_unmap_single,
        .map_sg         = dma_direct_map_sg,
        .unmap_sg       = dma_direct_unmap_sg,
        .dma_supported  = dma_direct_dma_supported,
+       .map_page       = dma_direct_map_page,
+       .unmap_page     = dma_direct_unmap_page,
 };
 EXPORT_SYMBOL(dma_direct_ops);
index ea0c61e09b7640553cf70c92b184339ff3bfd0bc..52ccfed416ade534745adebdd27680fbda16b96f 100644 (file)
@@ -56,6 +56,34 @@ resource_size_t isa_mem_base;
 /* Default PCI flags is 0 */
 unsigned int ppc_pci_flags;
 
+static struct dma_mapping_ops *pci_dma_ops;
+
+void set_pci_dma_ops(struct dma_mapping_ops *dma_ops)
+{
+       pci_dma_ops = dma_ops;
+}
+
+struct dma_mapping_ops *get_pci_dma_ops(void)
+{
+       return pci_dma_ops;
+}
+EXPORT_SYMBOL(get_pci_dma_ops);
+
+int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
+{
+       return dma_set_mask(&dev->dev, mask);
+}
+
+int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
+{
+       int rc;
+
+       rc = dma_set_mask(&dev->dev, mask);
+       dev->dev.coherent_dma_mask = dev->dma_mask;
+
+       return rc;
+}
+
 struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
 {
        struct pci_controller *phb;
@@ -180,6 +208,26 @@ char __devinit *pcibios_setup(char *str)
        return str;
 }
 
+void __devinit pcibios_setup_new_device(struct pci_dev *dev)
+{
+       struct dev_archdata *sd = &dev->dev.archdata;
+
+       sd->of_node = pci_device_to_OF_node(dev);
+
+       DBG("PCI: device %s OF node: %s\n", pci_name(dev),
+           sd->of_node ? sd->of_node->full_name : "<none>");
+
+       sd->dma_ops = pci_dma_ops;
+#ifdef CONFIG_PPC32
+       sd->dma_data = (void *)PCI_DRAM_OFFSET;
+#endif
+       set_dev_node(&dev->dev, pcibus_to_node(dev->bus));
+
+       if (ppc_md.pci_dma_dev_setup)
+               ppc_md.pci_dma_dev_setup(dev);
+}
+EXPORT_SYMBOL(pcibios_setup_new_device);
+
 /*
  * Reads the interrupt pin to determine if interrupt is use by card.
  * If the interrupt is used, then gets the interrupt line from the
index 88db4ffaf11c772e5f73ccbbca6cd96f07eb2c88..174b77ee18ff58644447a20e7b9c148536f9ecbd 100644 (file)
@@ -424,6 +424,7 @@ void __devinit pcibios_do_bus_setup(struct pci_bus *bus)
        unsigned long io_offset;
        struct resource *res;
        int i;
+       struct pci_dev *dev;
 
        /* Hookup PHB resources */
        io_offset = (unsigned long)hose->io_base_virt - isa_io_base;
@@ -457,6 +458,12 @@ void __devinit pcibios_do_bus_setup(struct pci_bus *bus)
                        bus->resource[i+1] = res;
                }
        }
+
+       if (ppc_md.pci_dma_bus_setup)
+               ppc_md.pci_dma_bus_setup(bus);
+
+       list_for_each_entry(dev, &bus->devices, bus_list)
+               pcibios_setup_new_device(dev);
 }
 
 /* the next one is stolen from the alpha port... */
index 1f75bf03446f953f0ffa8fcca065b3dab5193ae4..8247cff1cb3e0dc313861d041bcf7afae3c559e4 100644 (file)
@@ -52,35 +52,6 @@ EXPORT_SYMBOL(pci_io_base);
 
 LIST_HEAD(hose_list);
 
-static struct dma_mapping_ops *pci_dma_ops;
-
-void set_pci_dma_ops(struct dma_mapping_ops *dma_ops)
-{
-       pci_dma_ops = dma_ops;
-}
-
-struct dma_mapping_ops *get_pci_dma_ops(void)
-{
-       return pci_dma_ops;
-}
-EXPORT_SYMBOL(get_pci_dma_ops);
-
-
-int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
-{
-       return dma_set_mask(&dev->dev, mask);
-}
-
-int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
-{
-       int rc;
-
-       rc = dma_set_mask(&dev->dev, mask);
-       dev->dev.coherent_dma_mask = dev->dma_mask;
-
-       return rc;
-}
-
 static void fixup_broken_pcnet32(struct pci_dev* dev)
 {
        if ((dev->class>>8 == PCI_CLASS_NETWORK_ETHERNET)) {
@@ -548,23 +519,6 @@ int __devinit pcibios_map_io_space(struct pci_bus *bus)
 }
 EXPORT_SYMBOL_GPL(pcibios_map_io_space);
 
-void __devinit pcibios_setup_new_device(struct pci_dev *dev)
-{
-       struct dev_archdata *sd = &dev->dev.archdata;
-
-       sd->of_node = pci_device_to_OF_node(dev);
-
-       DBG("PCI: device %s OF node: %s\n", pci_name(dev),
-           sd->of_node ? sd->of_node->full_name : "<none>");
-
-       sd->dma_ops = pci_dma_ops;
-       set_dev_node(&dev->dev, pcibus_to_node(dev->bus));
-
-       if (ppc_md.pci_dma_dev_setup)
-               ppc_md.pci_dma_dev_setup(dev);
-}
-EXPORT_SYMBOL(pcibios_setup_new_device);
-
 void __devinit pcibios_do_bus_setup(struct pci_bus *bus)
 {
        struct pci_dev *dev;