- Taken from maemo.org N800 kernel package.
- Implementation specific to OMAP1 and OMAP2.
Signed-off-by: Trilok Soni <soni.trilok@gmail.com>
Signed-off-by: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
obj-$(CONFIG_PM) += pm.o sleep.o
# DSP
-obj-$(CONFIG_OMAP_DSP) += mailbox_mach.o
+obj-$(CONFIG_OMAP_DSP) += mailbox_mach.o mmu.o
mailbox_mach-objs := mailbox.o
led-y := leds.o
--- /dev/null
+/*
+ * linux/arch/arm/mach-omap2/mmu.c
+ *
+ * Support for non-MPU OMAP1 MMUs.
+ *
+ * Copyright (C) 2002-2005 Nokia Corporation
+ *
+ * Written by Toshihiro Kobayashi <toshihiro.kobayashi@nokia.com>
+ * and Paul Mundt <paul.mundt@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/rwsem.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/err.h>
+#include "mmu.h"
+#include <asm/tlbflush.h>
+
+static void *dspvect_page;
+#define DSP_INIT_PAGE 0xfff000
+
+static unsigned int get_cam_l_va_mask(u16 pgsz)
+{
+ switch (pgsz) {
+ case OMAP_MMU_CAM_PAGESIZE_1MB:
+ return OMAP_MMU_CAM_L_VA_TAG_L1_MASK |
+ OMAP_MMU_CAM_L_VA_TAG_L2_MASK_1MB;
+ case OMAP_MMU_CAM_PAGESIZE_64KB:
+ return OMAP_MMU_CAM_L_VA_TAG_L1_MASK |
+ OMAP_MMU_CAM_L_VA_TAG_L2_MASK_64KB;
+ case OMAP_MMU_CAM_PAGESIZE_4KB:
+ return OMAP_MMU_CAM_L_VA_TAG_L1_MASK |
+ OMAP_MMU_CAM_L_VA_TAG_L2_MASK_4KB;
+ case OMAP_MMU_CAM_PAGESIZE_1KB:
+ return OMAP_MMU_CAM_L_VA_TAG_L1_MASK |
+ OMAP_MMU_CAM_L_VA_TAG_L2_MASK_1KB;
+ }
+ return 0;
+}
+
+#define get_cam_va_mask(pgsz) \
+ ((u32)OMAP_MMU_CAM_H_VA_TAG_H_MASK << 22 | \
+ (u32)get_cam_l_va_mask(pgsz) << 6)
+
+static int intmem_usecount;
+
+/* for safety */
+void dsp_mem_usecount_clear(void)
+{
+ if (intmem_usecount != 0) {
+ printk(KERN_WARNING
+ "MMU: unbalanced memory request/release detected.\n"
+ " intmem_usecount is not zero at where "
+ "it should be! ... fixed to be zero.\n");
+ intmem_usecount = 0;
+ omap_dsp_release_mem();
+ }
+}
+
+static int omap1_mmu_mem_enable(struct omap_mmu *mmu, void *addr)
+{
+ int ret = 0;
+
+ if (omap_mmu_internal_memory(mmu, addr)) {
+ if (intmem_usecount++ == 0)
+ ret = omap_dsp_request_mem();
+ } else
+ ret = -EIO;
+
+ return ret;
+}
+
+static int omap1_mmu_mem_disable(struct omap_mmu *mmu, void *addr)
+{
+ int ret = 0;
+
+ if (omap_mmu_internal_memory(mmu, addr)) {
+ if (--intmem_usecount == 0)
+ omap_dsp_release_mem();
+ } else
+ ret = -EIO;
+
+ return ret;
+}
+
+static inline void
+omap1_mmu_read_tlb(struct omap_mmu *mmu, struct cam_ram_regset *cr)
+{
+ /* read a TLB entry */
+ omap_mmu_write_reg(mmu, OMAP_MMU_LD_TLB_RD, OMAP_MMU_LD_TLB);
+
+ cr->cam_h = omap_mmu_read_reg(mmu, OMAP_MMU_READ_CAM_H);
+ cr->cam_l = omap_mmu_read_reg(mmu, OMAP_MMU_READ_CAM_L);
+ cr->ram_h = omap_mmu_read_reg(mmu, OMAP_MMU_READ_RAM_H);
+ cr->ram_l = omap_mmu_read_reg(mmu, OMAP_MMU_READ_RAM_L);
+}
+
+static inline void
+omap1_mmu_load_tlb(struct omap_mmu *mmu, struct cam_ram_regset *cr)
+{
+ /* Set the CAM and RAM entries */
+ omap_mmu_write_reg(mmu, cr->cam_h, OMAP_MMU_CAM_H);
+ omap_mmu_write_reg(mmu, cr->cam_l, OMAP_MMU_CAM_L);
+ omap_mmu_write_reg(mmu, cr->ram_h, OMAP_MMU_RAM_H);
+ omap_mmu_write_reg(mmu, cr->ram_l, OMAP_MMU_RAM_L);
+}
+
+static ssize_t omap1_mmu_show(struct omap_mmu *mmu, char *buf,
+ struct omap_mmu_tlb_lock *tlb_lock)
+{
+ int i, len;
+
+ len = sprintf(buf, "P: preserved, V: valid\n"
+ "ety P V size cam_va ram_pa ap\n");
+ /* 00: P V 4KB 0x300000 0x10171800 FA */
+
+ for (i = 0; i < mmu->nr_tlb_entries; i++) {
+ struct omap_mmu_tlb_entry ent;
+ struct cam_ram_regset cr;
+ struct omap_mmu_tlb_lock entry_lock;
+ char *pgsz_str, *ap_str;
+
+ /* read a TLB entry */
+ entry_lock.base = tlb_lock->base;
+ entry_lock.victim = i;
+ omap_mmu_read_tlb(mmu, &entry_lock, &cr);
+
+ ent.pgsz = cr.cam_l & OMAP_MMU_CAM_PAGESIZE_MASK;
+ ent.prsvd = cr.cam_l & OMAP_MMU_CAM_P;
+ ent.valid = cr.cam_l & OMAP_MMU_CAM_V;
+ ent.ap = cr.ram_l & OMAP_MMU_RAM_L_AP_MASK;
+ ent.va = (u32)(cr.cam_h & OMAP_MMU_CAM_H_VA_TAG_H_MASK) << 22 |
+ (u32)(cr.cam_l & get_cam_l_va_mask(ent.pgsz)) << 6;
+ ent.pa = (unsigned long)cr.ram_h << 16 |
+ (cr.ram_l & OMAP_MMU_RAM_L_RAM_LSB_MASK);
+
+ pgsz_str = (ent.pgsz == OMAP_MMU_CAM_PAGESIZE_1MB) ? " 1MB":
+ (ent.pgsz == OMAP_MMU_CAM_PAGESIZE_64KB) ? "64KB":
+ (ent.pgsz == OMAP_MMU_CAM_PAGESIZE_4KB) ? " 4KB":
+ (ent.pgsz == OMAP_MMU_CAM_PAGESIZE_1KB) ? " 1KB":
+ " ???";
+ ap_str = (ent.ap == OMAP_MMU_RAM_L_AP_RO) ? "RO":
+ (ent.ap == OMAP_MMU_RAM_L_AP_FA) ? "FA":
+ (ent.ap == OMAP_MMU_RAM_L_AP_NA) ? "NA":
+ "??";
+
+ if (i == tlb_lock->base)
+ len += sprintf(buf + len, "lock base = %d\n",
+ tlb_lock->base);
+ if (i == tlb_lock->victim)
+ len += sprintf(buf + len, "victim = %d\n",
+ tlb_lock->victim);
+ len += sprintf(buf + len,
+ /* 00: P V 4KB 0x300000 0x10171800 FA */
+ "%02d: %c %c %s 0x%06lx 0x%08lx %s\n",
+ i,
+ ent.prsvd ? 'P' : ' ',
+ ent.valid ? 'V' : ' ',
+ pgsz_str, ent.va, ent.pa, ap_str);
+ }
+
+ return len;
+}
+
+static int exmap_setup_preserved_entries(struct omap_mmu *mmu)
+{
+ int n = 0;
+
+ exmap_setup_preserved_mem_page(mmu, dspvect_page, DSP_INIT_PAGE, n++);
+
+ return n;
+}
+
+static void exmap_clear_preserved_entries(struct omap_mmu *mmu)
+{
+ exmap_clear_mem_page(mmu, DSP_INIT_PAGE);
+}
+
+static int omap1_mmu_startup(struct omap_mmu *mmu)
+{
+ dspvect_page = (void *)__get_dma_pages(GFP_KERNEL, 0);
+ if (dspvect_page == NULL) {
+ printk(KERN_ERR "MMU: failed to allocate memory "
+ "for dsp vector table\n");
+ return -ENOMEM;
+ }
+
+ mmu->nr_exmap_preserved = exmap_setup_preserved_entries(mmu);
+
+ return 0;
+}
+
+static void omap1_mmu_shutdown(struct omap_mmu *mmu)
+{
+ exmap_clear_preserved_entries(mmu);
+
+ if (dspvect_page != NULL) {
+ unsigned long virt;
+
+ down_read(&mmu->exmap_sem);
+
+ virt = (unsigned long)omap_mmu_to_virt(mmu, DSP_INIT_PAGE);
+ flush_tlb_kernel_range(virt, virt + PAGE_SIZE);
+ free_page((unsigned long)dspvect_page);
+ dspvect_page = NULL;
+
+ up_read(&mmu->exmap_sem);
+ }
+}
+
+static inline unsigned long omap1_mmu_cam_va(struct cam_ram_regset *cr)
+{
+ unsigned int page_size = cr->cam_l & OMAP_MMU_CAM_PAGESIZE_MASK;
+
+ return (u32)(cr->cam_h & OMAP_MMU_CAM_H_VA_TAG_H_MASK) << 22 |
+ (u32)(cr->cam_l & get_cam_l_va_mask(page_size)) << 6;
+}
+
+static struct cam_ram_regset *
+omap1_mmu_cam_ram_alloc(struct omap_mmu_tlb_entry *entry)
+{
+ struct cam_ram_regset *cr;
+
+ if (entry->va & ~(get_cam_va_mask(entry->pgsz))) {
+ printk(KERN_ERR "MMU: mapping vadr (0x%06lx) is not on an "
+ "aligned boundary\n", entry->va);
+ return ERR_PTR(-EINVAL);
+ }
+
+ cr = kmalloc(sizeof(struct cam_ram_regset), GFP_KERNEL);
+
+ cr->cam_h = entry->va >> 22;
+ cr->cam_l = (entry->va >> 6 & get_cam_l_va_mask(entry->pgsz)) |
+ entry->prsvd | entry->pgsz;
+ cr->ram_h = entry->pa >> 16;
+ cr->ram_l = (entry->pa & OMAP_MMU_RAM_L_RAM_LSB_MASK) | entry->ap;
+
+ return cr;
+}
+
+static inline int omap1_mmu_cam_ram_valid(struct cam_ram_regset *cr)
+{
+ return cr->cam_l & OMAP_MMU_CAM_V;
+}
+
+struct omap_mmu_ops omap1_mmu_ops = {
+ .startup = omap1_mmu_startup,
+ .shutdown = omap1_mmu_shutdown,
+ .mem_enable = omap1_mmu_mem_enable,
+ .mem_disable = omap1_mmu_mem_disable,
+ .read_tlb = omap1_mmu_read_tlb,
+ .load_tlb = omap1_mmu_load_tlb,
+ .show = omap1_mmu_show,
+ .cam_va = omap1_mmu_cam_va,
+ .cam_ram_alloc = omap1_mmu_cam_ram_alloc,
+ .cam_ram_valid = omap1_mmu_cam_ram_valid,
+};
+
--- /dev/null
+#ifndef __MACH_OMAP1_MMU_H
+#define __MACH_OMAP1_MMU_H
+
+#include <asm/arch/mmu.h>
+#include <asm/io.h>
+
+#define MMU_LOCK_BASE_MASK (0x3f << 10)
+#define MMU_LOCK_VICTIM_MASK (0x3f << 4)
+
+#define OMAP_MMU_BASE (0xfffed200)
+#define OMAP_MMU_PREFETCH (OMAP_MMU_BASE + 0x00)
+#define OMAP_MMU_WALKING_ST (OMAP_MMU_BASE + 0x04)
+#define OMAP_MMU_CNTL (OMAP_MMU_BASE + 0x08)
+#define OMAP_MMU_FAULT_AD_H (OMAP_MMU_BASE + 0x0c)
+#define OMAP_MMU_FAULT_AD_L (OMAP_MMU_BASE + 0x10)
+#define OMAP_MMU_FAULT_ST (OMAP_MMU_BASE + 0x14)
+#define OMAP_MMU_IT_ACK (OMAP_MMU_BASE + 0x18)
+#define OMAP_MMU_TTB_H (OMAP_MMU_BASE + 0x1c)
+#define OMAP_MMU_TTB_L (OMAP_MMU_BASE + 0x20)
+#define OMAP_MMU_LOCK (OMAP_MMU_BASE + 0x24)
+#define OMAP_MMU_LD_TLB (OMAP_MMU_BASE + 0x28)
+#define OMAP_MMU_CAM_H (OMAP_MMU_BASE + 0x2c)
+#define OMAP_MMU_CAM_L (OMAP_MMU_BASE + 0x30)
+#define OMAP_MMU_RAM_H (OMAP_MMU_BASE + 0x34)
+#define OMAP_MMU_RAM_L (OMAP_MMU_BASE + 0x38)
+#define OMAP_MMU_GFLUSH (OMAP_MMU_BASE + 0x3c)
+#define OMAP_MMU_FLUSH_ENTRY (OMAP_MMU_BASE + 0x40)
+#define OMAP_MMU_READ_CAM_H (OMAP_MMU_BASE + 0x44)
+#define OMAP_MMU_READ_CAM_L (OMAP_MMU_BASE + 0x48)
+#define OMAP_MMU_READ_RAM_H (OMAP_MMU_BASE + 0x4c)
+#define OMAP_MMU_READ_RAM_L (OMAP_MMU_BASE + 0x50)
+
+#define OMAP_MMU_CNTL_BURST_16MNGT_EN 0x0020
+#define OMAP_MMU_CNTL_WTL_EN 0x0004
+#define OMAP_MMU_CNTL_MMU_EN 0x0002
+#define OMAP_MMU_CNTL_RESET_SW 0x0001
+
+#define OMAP_MMU_FAULT_AD_H_DP 0x0100
+#define OMAP_MMU_FAULT_AD_H_ADR_MASK 0x00ff
+
+#define OMAP_MMU_FAULT_ST_PREF 0x0008
+#define OMAP_MMU_FAULT_ST_PERM 0x0004
+#define OMAP_MMU_FAULT_ST_TLB_MISS 0x0002
+#define OMAP_MMU_FAULT_ST_TRANS 0x0001
+
+#define OMAP_MMU_IT_ACK_IT_ACK 0x0001
+
+#define OMAP_MMU_CAM_H_VA_TAG_H_MASK 0x0003
+
+#define OMAP_MMU_CAM_L_VA_TAG_L1_MASK 0xc000
+#define OMAP_MMU_CAM_L_VA_TAG_L2_MASK_1MB 0x0000
+#define OMAP_MMU_CAM_L_VA_TAG_L2_MASK_64KB 0x3c00
+#define OMAP_MMU_CAM_L_VA_TAG_L2_MASK_4KB 0x3fc0
+#define OMAP_MMU_CAM_L_VA_TAG_L2_MASK_1KB 0x3ff0
+#define OMAP_MMU_CAM_L_P 0x0008
+#define OMAP_MMU_CAM_L_V 0x0004
+#define OMAP_MMU_CAM_L_PAGESIZE_MASK 0x0003
+#define OMAP_MMU_CAM_L_PAGESIZE_1MB 0x0000
+#define OMAP_MMU_CAM_L_PAGESIZE_64KB 0x0001
+#define OMAP_MMU_CAM_L_PAGESIZE_4KB 0x0002
+#define OMAP_MMU_CAM_L_PAGESIZE_1KB 0x0003
+
+#define OMAP_MMU_CAM_P OMAP_MMU_CAM_L_P
+#define OMAP_MMU_CAM_V OMAP_MMU_CAM_L_V
+#define OMAP_MMU_CAM_PAGESIZE_MASK OMAP_MMU_CAM_L_PAGESIZE_MASK
+#define OMAP_MMU_CAM_PAGESIZE_1MB OMAP_MMU_CAM_L_PAGESIZE_1MB
+#define OMAP_MMU_CAM_PAGESIZE_64KB OMAP_MMU_CAM_L_PAGESIZE_64KB
+#define OMAP_MMU_CAM_PAGESIZE_4KB OMAP_MMU_CAM_L_PAGESIZE_4KB
+#define OMAP_MMU_CAM_PAGESIZE_1KB OMAP_MMU_CAM_L_PAGESIZE_1KB
+
+#define OMAP_MMU_RAM_L_RAM_LSB_MASK 0xfc00
+#define OMAP_MMU_RAM_L_AP_MASK 0x0300
+#define OMAP_MMU_RAM_L_AP_NA 0x0000
+#define OMAP_MMU_RAM_L_AP_RO 0x0200
+#define OMAP_MMU_RAM_L_AP_FA 0x0300
+
+#define OMAP_MMU_LD_TLB_RD 0x0002
+
+#define INIT_TLB_ENTRY(ent,v,p,ps) \
+do { \
+ (ent)->va = (v); \
+ (ent)->pa = (p); \
+ (ent)->pgsz = (ps); \
+ (ent)->prsvd = 0; \
+ (ent)->ap = OMAP_MMU_RAM_L_AP_FA; \
+} while (0)
+
+#define INIT_TLB_ENTRY_4KB_PRESERVED(ent,v,p) \
+do { \
+ (ent)->va = (v); \
+ (ent)->pa = (p); \
+ (ent)->pgsz = OMAP_MMU_CAM_PAGESIZE_4KB; \
+ (ent)->prsvd = OMAP_MMU_CAM_P; \
+ (ent)->ap = OMAP_MMU_RAM_L_AP_FA; \
+} while (0)
+
+extern struct omap_mmu_ops omap1_mmu_ops;
+
+struct omap_mmu_tlb_entry {
+ unsigned long va;
+ unsigned long pa;
+ unsigned int pgsz, prsvd, valid;
+
+ u16 ap;
+};
+
+static inline unsigned short
+omap_mmu_read_reg(struct omap_mmu *mmu, unsigned long reg)
+{
+ return omap_readw(mmu->base + reg);
+}
+
+static void omap_mmu_write_reg(struct omap_mmu *mmu,
+ unsigned short val, unsigned long reg)
+{
+ omap_writew(val, mmu->base + reg);
+}
+
+int omap_dsp_request_mem(void);
+void omap_dsp_release_mem(void);
+
+static inline void __dsp_mmu_itack(struct omap_mmu *mmu)
+{
+ omap_mmu_write_reg(mmu, OMAP_MMU_IT_ACK_IT_ACK, OMAP_MMU_IT_ACK);
+}
+
+#endif /* __MACH_OMAP1_MMU_H */
obj-$(CONFIG_PM) += pm.o sleep.o
# DSP
-obj-$(CONFIG_OMAP_DSP) += mailbox_mach.o
+obj-$(CONFIG_OMAP_DSP) += mailbox_mach.o mmu.o
mailbox_mach-objs := mailbox.o
# Specific board support
--- /dev/null
+/*
+ * linux/arch/arm/mach-omap2/mmu.c
+ *
+ * Support for non-MPU OMAP2 MMUs.
+ *
+ * Copyright (C) 2002-2005 Nokia Corporation
+ *
+ * Written by Toshihiro Kobayashi <toshihiro.kobayashi@nokia.com>
+ * and Paul Mundt <paul.mundt@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/rwsem.h>
+#include <linux/device.h>
+#include <linux/mm.h>
+#include "mmu.h"
+#include <asm/arch/mmu.h>
+#include <asm/tlbflush.h>
+#include <asm/io.h>
+#include <asm/sizes.h>
+
+static void *dspvect_page;
+#define DSP_INIT_PAGE 0xfff000
+
+static inline void
+omap2_mmu_read_tlb(struct omap_mmu *mmu, struct cam_ram_regset *cr)
+{
+ cr->cam = omap_mmu_read_reg(mmu, MMU_READ_CAM);
+ cr->ram = omap_mmu_read_reg(mmu, MMU_READ_RAM);
+}
+
+static inline void
+omap2_mmu_load_tlb(struct omap_mmu *mmu, struct cam_ram_regset *cr)
+{
+ /* Set the CAM and RAM entries */
+ omap_mmu_write_reg(mmu, cr->cam | OMAP_MMU_CAM_V, MMU_CAM);
+ omap_mmu_write_reg(mmu, cr->ram, MMU_RAM);
+}
+
+static void exmap_setup_iomap_page(struct omap_mmu *mmu, unsigned long phys,
+ unsigned long dsp_io_adr, int index)
+{
+ unsigned long dspadr;
+ void *virt;
+ struct omap_mmu_tlb_entry tlb_ent;
+
+ dspadr = (IOMAP_VAL << 18) + (dsp_io_adr << 1);
+ virt = omap_mmu_to_virt(mmu, dspadr);
+ exmap_set_armmmu((unsigned long)virt, phys, PAGE_SIZE);
+ INIT_EXMAP_TBL_ENTRY_4KB_PRESERVED(mmu->exmap_tbl + index, NULL, virt);
+ INIT_TLB_ENTRY_4KB_ES32_PRESERVED(&tlb_ent, dspadr, phys);
+ omap_mmu_load_tlb_entry(mmu, &tlb_ent);
+}
+
+static void exmap_clear_iomap_page(struct omap_mmu *mmu,
+ unsigned long dsp_io_adr)
+{
+ unsigned long dspadr;
+ void *virt;
+
+ dspadr = (IOMAP_VAL << 18) + (dsp_io_adr << 1);
+ virt = omap_mmu_to_virt(mmu, dspadr);
+ exmap_clear_armmmu((unsigned long)virt, PAGE_SIZE);
+ /* DSP MMU is shutting down. not handled here. */
+}
+
+#define OMAP24XX_MAILBOX_BASE (L4_24XX_BASE + 0x94000)
+#define OMAP2420_GPT5_BASE (L4_24XX_BASE + 0x7c000)
+#define OMAP2420_GPT6_BASE (L4_24XX_BASE + 0x7e000)
+#define OMAP2420_GPT7_BASE (L4_24XX_BASE + 0x80000)
+#define OMAP2420_GPT8_BASE (L4_24XX_BASE + 0x82000)
+#define OMAP24XX_EAC_BASE (L4_24XX_BASE + 0x90000)
+#define OMAP24XX_STI_BASE (L4_24XX_BASE + 0x68000)
+#define OMAP24XX_STI_CH_BASE (L4_24XX_BASE + 0x0c000000)
+
+static int exmap_setup_preserved_entries(struct omap_mmu *mmu)
+{
+ int i, n = 0;
+
+ exmap_setup_preserved_mem_page(mmu, dspvect_page, DSP_INIT_PAGE, n++);
+
+ exmap_setup_iomap_page(mmu, OMAP24XX_PRCM_BASE, 0x7000, n++);
+ exmap_setup_iomap_page(mmu, OMAP24XX_MAILBOX_BASE, 0x11000, n++);
+
+ if (cpu_is_omap2420()) {
+ exmap_setup_iomap_page(mmu, OMAP2420_GPT5_BASE, 0xe000, n++);
+ exmap_setup_iomap_page(mmu, OMAP2420_GPT6_BASE, 0xe800, n++);
+ exmap_setup_iomap_page(mmu, OMAP2420_GPT7_BASE, 0xf000, n++);
+ exmap_setup_iomap_page(mmu, OMAP2420_GPT8_BASE, 0xf800, n++);
+ exmap_setup_iomap_page(mmu, OMAP24XX_EAC_BASE, 0x10000, n++);
+ exmap_setup_iomap_page(mmu, OMAP24XX_STI_BASE, 0xc800, n++);
+ for (i = 0; i < 5; i++)
+ exmap_setup_preserved_mem_page(mmu,
+ __va(OMAP24XX_STI_CH_BASE + i*SZ_4K),
+ 0xfb0000 + i*SZ_4K, n++);
+ }
+
+ return n;
+}
+
+static void exmap_clear_preserved_entries(struct omap_mmu *mmu)
+{
+ int i;
+
+ exmap_clear_iomap_page(mmu, 0x7000); /* PRCM registers */
+ exmap_clear_iomap_page(mmu, 0x11000); /* MAILBOX registers */
+
+ if (cpu_is_omap2420()) {
+ exmap_clear_iomap_page(mmu, 0xe000); /* GPT5 */
+ exmap_clear_iomap_page(mmu, 0xe800); /* GPT6 */
+ exmap_clear_iomap_page(mmu, 0xf000); /* GPT7 */
+ exmap_clear_iomap_page(mmu, 0xf800); /* GPT8 */
+ exmap_clear_iomap_page(mmu, 0x10000); /* EAC */
+ exmap_clear_iomap_page(mmu, 0xc800); /* STI */
+ for (i = 0; i < 5; i++) /* STI CH */
+ exmap_clear_mem_page(mmu, 0xfb0000 + i*SZ_4K);
+ }
+
+ exmap_clear_mem_page(mmu, DSP_INIT_PAGE);
+}
+
+#define MMU_IRQ_MASK \
+ (OMAP_MMU_IRQ_MULTIHITFAULT | \
+ OMAP_MMU_IRQ_TABLEWALKFAULT | \
+ OMAP_MMU_IRQ_EMUMISS | \
+ OMAP_MMU_IRQ_TRANSLATIONFAULT | \
+ OMAP_MMU_IRQ_TLBMISS)
+
+static int omap2_mmu_startup(struct omap_mmu *mmu)
+{
+ dspvect_page = (void *)__get_dma_pages(GFP_KERNEL, 0);
+ if (dspvect_page == NULL) {
+ printk(KERN_ERR "MMU: failed to allocate memory "
+ "for dsp vector table\n");
+ return -ENOMEM;
+ }
+
+ mmu->nr_exmap_preserved = exmap_setup_preserved_entries(mmu);
+
+ omap_mmu_write_reg(mmu, MMU_IRQ_MASK, MMU_IRQENABLE);
+
+ return 0;
+}
+
+static void omap2_mmu_shutdown(struct omap_mmu *mmu)
+{
+ exmap_clear_preserved_entries(mmu);
+
+ if (dspvect_page != NULL) {
+ unsigned long virt;
+
+ down_read(&mmu->exmap_sem);
+
+ virt = (unsigned long)omap_mmu_to_virt(mmu, DSP_INIT_PAGE);
+ flush_tlb_kernel_range(virt, virt + PAGE_SIZE);
+ free_page((unsigned long)dspvect_page);
+ dspvect_page = NULL;
+
+ up_read(&mmu->exmap_sem);
+ }
+}
+
+static ssize_t omap2_mmu_show(struct omap_mmu *mmu, char *buf,
+ struct omap_mmu_tlb_lock *tlb_lock)
+{
+ int i, len;
+
+ len = sprintf(buf, "P: preserved, V: valid\n"
+ "B: big endian, L:little endian, "
+ "M: mixed page attribute\n"
+ "ety P V size cam_va ram_pa E ES M\n");
+ /* 00: P V 4KB 0x300000 0x10171800 B 16 M */
+
+ for (i = 0; i < mmu->nr_tlb_entries; i++) {
+ struct omap_mmu_tlb_entry ent;
+ struct cam_ram_regset cr;
+ struct omap_mmu_tlb_lock entry_lock;
+ char *pgsz_str, *elsz_str;
+
+ /* read a TLB entry */
+ entry_lock.base = tlb_lock->base;
+ entry_lock.victim = i;
+ omap_mmu_read_tlb(mmu, &entry_lock, &cr);
+
+ ent.pgsz = cr.cam & OMAP_MMU_CAM_PAGESIZE_MASK;
+ ent.prsvd = cr.cam & OMAP_MMU_CAM_P;
+ ent.valid = cr.cam & OMAP_MMU_CAM_V;
+ ent.va = cr.cam & OMAP_MMU_CAM_VATAG_MASK;
+ ent.endian = cr.ram & OMAP_MMU_RAM_ENDIANNESS;
+ ent.elsz = cr.ram & OMAP_MMU_RAM_ELEMENTSIZE_MASK;
+ ent.pa = cr.ram & OMAP_MMU_RAM_PADDR_MASK;
+ ent.mixed = cr.ram & OMAP_MMU_RAM_MIXED;
+
+ pgsz_str = (ent.pgsz == OMAP_MMU_CAM_PAGESIZE_16MB) ? "64MB":
+ (ent.pgsz == OMAP_MMU_CAM_PAGESIZE_1MB) ? " 1MB":
+ (ent.pgsz == OMAP_MMU_CAM_PAGESIZE_64KB) ? "64KB":
+ (ent.pgsz == OMAP_MMU_CAM_PAGESIZE_4KB) ? " 4KB":
+ " ???";
+ elsz_str = (ent.elsz == OMAP_MMU_RAM_ELEMENTSIZE_8) ? " 8":
+ (ent.elsz == OMAP_MMU_RAM_ELEMENTSIZE_16) ? "16":
+ (ent.elsz == OMAP_MMU_RAM_ELEMENTSIZE_32) ? "32":
+ "??";
+
+ if (i == tlb_lock->base)
+ len += sprintf(buf + len, "lock base = %d\n",
+ tlb_lock->base);
+ if (i == tlb_lock->victim)
+ len += sprintf(buf + len, "victim = %d\n",
+ tlb_lock->victim);
+
+ len += sprintf(buf + len,
+ /* 00: P V 4KB 0x300000 0x10171800 B 16 M */
+ "%02d: %c %c %s 0x%06lx 0x%08lx %c %s %c\n",
+ i,
+ ent.prsvd ? 'P' : ' ',
+ ent.valid ? 'V' : ' ',
+ pgsz_str, ent.va, ent.pa,
+ ent.endian ? 'B' : 'L',
+ elsz_str,
+ ent.mixed ? 'M' : ' ');
+ }
+
+ return len;
+}
+
+#define get_cam_va_mask(pgsz) \
+ (((pgsz) == OMAP_MMU_CAM_PAGESIZE_16MB) ? 0xff000000 : \
+ ((pgsz) == OMAP_MMU_CAM_PAGESIZE_1MB) ? 0xfff00000 : \
+ ((pgsz) == OMAP_MMU_CAM_PAGESIZE_64KB) ? 0xffff0000 : \
+ ((pgsz) == OMAP_MMU_CAM_PAGESIZE_4KB) ? 0xfffff000 : 0)
+
+static inline unsigned long omap2_mmu_cam_va(struct cam_ram_regset *cr)
+{
+ unsigned int page_size = cr->cam & OMAP_MMU_CAM_PAGESIZE_MASK;
+ unsigned int mask = get_cam_va_mask(cr->cam & page_size);
+
+ return cr->cam & mask;
+}
+
+static struct cam_ram_regset *
+omap2_mmu_cam_ram_alloc(struct omap_mmu_tlb_entry *entry)
+{
+ struct cam_ram_regset *cr;
+
+ if (entry->va & ~(get_cam_va_mask(entry->pgsz))) {
+ printk(KERN_ERR "MMU: mapping vadr (0x%06lx) is not on an "
+ "aligned boundary\n", entry->va);
+ return ERR_PTR(-EINVAL);
+ }
+
+ cr = kmalloc(sizeof(struct cam_ram_regset), GFP_KERNEL);
+
+ cr->cam = (entry->va & OMAP_MMU_CAM_VATAG_MASK) |
+ entry->prsvd | entry->pgsz;
+ cr->ram = entry->pa | entry->endian | entry->elsz;
+
+ return cr;
+}
+
+static inline int omap2_mmu_cam_ram_valid(struct cam_ram_regset *cr)
+{
+ return cr->cam & OMAP_MMU_CAM_V;
+}
+
+struct omap_mmu_ops omap2_mmu_ops = {
+ .startup = omap2_mmu_startup,
+ .shutdown = omap2_mmu_shutdown,
+ .read_tlb = omap2_mmu_read_tlb,
+ .load_tlb = omap2_mmu_load_tlb,
+ .show = omap2_mmu_show,
+ .cam_va = omap2_mmu_cam_va,
+ .cam_ram_alloc = omap2_mmu_cam_ram_alloc,
+ .cam_ram_valid = omap2_mmu_cam_ram_valid,
+};
--- /dev/null
+#ifndef __MACH_OMAP2_MMU_H
+#define __MACH_OMAP2_MMU_H
+
+#include "prcm-regs.h"
+#include <asm/arch/mmu.h>
+#include <asm/io.h>
+
+#define MMU_LOCK_BASE_MASK (0x1f << 10)
+#define MMU_LOCK_VICTIM_MASK (0x1f << 4)
+
+#define OMAP_MMU_IRQ_MULTIHITFAULT 0x00000010
+#define OMAP_MMU_IRQ_TABLEWALKFAULT 0x00000008
+#define OMAP_MMU_IRQ_EMUMISS 0x00000004
+#define OMAP_MMU_IRQ_TRANSLATIONFAULT 0x00000002
+#define OMAP_MMU_IRQ_TLBMISS 0x00000001
+
+#define OMAP_MMU_CAM_VATAG_MASK 0xfffff000
+#define OMAP_MMU_CAM_P 0x00000008
+#define OMAP_MMU_CAM_V 0x00000004
+#define OMAP_MMU_CAM_PAGESIZE_MASK 0x00000003
+#define OMAP_MMU_CAM_PAGESIZE_1MB 0x00000000
+#define OMAP_MMU_CAM_PAGESIZE_64KB 0x00000001
+#define OMAP_MMU_CAM_PAGESIZE_4KB 0x00000002
+#define OMAP_MMU_CAM_PAGESIZE_16MB 0x00000003
+
+#define OMAP_MMU_RAM_PADDR_MASK 0xfffff000
+#define OMAP_MMU_RAM_ENDIANNESS 0x00000200
+#define OMAP_MMU_RAM_ENDIANNESS_BIG 0x00000200
+#define OMAP_MMU_RAM_ENDIANNESS_LITTLE 0x00000000
+#define OMAP_MMU_RAM_ELEMENTSIZE_MASK 0x00000180
+#define OMAP_MMU_RAM_ELEMENTSIZE_8 0x00000000
+#define OMAP_MMU_RAM_ELEMENTSIZE_16 0x00000080
+#define OMAP_MMU_RAM_ELEMENTSIZE_32 0x00000100
+#define OMAP_MMU_RAM_ELEMENTSIZE_NONE 0x00000180
+#define OMAP_MMU_RAM_MIXED 0x00000040
+
+#define IOMAP_VAL 0x3f
+
+#define omap_dsp_request_mem() do { } while (0)
+#define omap_dsp_release_mem() do { } while (0)
+
+#define INIT_TLB_ENTRY(ent,v,p,ps) \
+do { \
+ (ent)->va = (v); \
+ (ent)->pa = (p); \
+ (ent)->pgsz = (ps); \
+ (ent)->prsvd = 0; \
+ (ent)->endian = OMAP_MMU_RAM_ENDIANNESS_LITTLE; \
+ (ent)->elsz = OMAP_MMU_RAM_ELEMENTSIZE_16; \
+ (ent)->mixed = 0; \
+} while (0)
+
+#define INIT_TLB_ENTRY_4KB_PRESERVED(ent,v,p) \
+do { \
+ (ent)->va = (v); \
+ (ent)->pa = (p); \
+ (ent)->pgsz = OMAP_MMU_CAM_PAGESIZE_4KB; \
+ (ent)->prsvd = OMAP_MMU_CAM_P; \
+ (ent)->endian = OMAP_MMU_RAM_ENDIANNESS_LITTLE; \
+ (ent)->elsz = OMAP_MMU_RAM_ELEMENTSIZE_16; \
+ (ent)->mixed = 0; \
+} while (0)
+
+#define INIT_TLB_ENTRY_4KB_ES32_PRESERVED(ent,v,p) \
+do { \
+ (ent)->va = (v); \
+ (ent)->pa = (p); \
+ (ent)->pgsz = OMAP_MMU_CAM_PAGESIZE_4KB; \
+ (ent)->prsvd = OMAP_MMU_CAM_P; \
+ (ent)->endian = OMAP_MMU_RAM_ENDIANNESS_LITTLE; \
+ (ent)->elsz = OMAP_MMU_RAM_ELEMENTSIZE_32; \
+ (ent)->mixed = 0; \
+} while (0)
+
+extern struct omap_mmu_ops omap2_mmu_ops;
+
+struct omap_mmu_tlb_entry {
+ unsigned long va;
+ unsigned long pa;
+ unsigned int pgsz, prsvd, valid;
+
+ u32 endian, elsz, mixed;
+};
+
+static inline unsigned long
+omap_mmu_read_reg(struct omap_mmu *mmu, unsigned long reg)
+{
+ return __raw_readl(mmu->base + reg);
+}
+
+static void omap_mmu_write_reg(struct omap_mmu *mmu,
+ unsigned long val, unsigned long reg)
+{
+ __raw_writel(val, mmu->base + reg);
+}
+
+#endif /* __MACH_OMAP2_MMU_H */