return cr->cam_l & OMAP_MMU_CAM_V;
}
+static void omap1_mmu_interrupt(struct omap_mmu *mmu)
+{
+ unsigned long status;
+ unsigned long adh, adl;
+ unsigned long dp;
+ unsigned long va;
+
+ status = omap_mmu_read_reg(mmu, MMU_FAULT_ST);
+ adh = omap_mmu_read_reg(mmu, MMU_FAULT_AD_H);
+ adl = omap_mmu_read_reg(mmu, MMU_FAULT_AD_L);
+ dp = adh & MMU_FAULT_AD_H_DP;
+ va = MK32(adh & MMU_FAULT_AD_H_ADR_MASK, adl);
+
+ /* if the fault is masked, nothing to do */
+ if ((status & MMUFAULT_MASK) == 0) {
+ pr_debug( "MMU interrupt, but ignoring.\n");
+ /*
+ * note: in OMAP1710,
+ * when CACHE + DMA domain gets out of idle in DSP,
+ * MMU interrupt occurs but MMU_FAULT_ST is not set.
+ * in this case, we just ignore the interrupt.
+ */
+ if (status) {
+ pr_debug( "%s%s%s%s\n",
+ (status & MMU_FAULT_ST_PREF)?
+ " (prefetch err)" : "",
+ (status & MMU_FAULT_ST_PERM)?
+ " (permission fault)" : "",
+ (status & MMU_FAULT_ST_TLB_MISS)?
+ " (TLB miss)" : "",
+ (status & MMU_FAULT_ST_TRANS) ?
+ " (translation fault)": "");
+ pr_debug( "fault address = %#08x\n", va);
+ }
+ enable_irq(mmu->irq);
+ return;
+ }
+
+ pr_info("%s%s%s%s\n",
+ (status & MMU_FAULT_ST_PREF)?
+ (MMUFAULT_MASK & MMU_FAULT_ST_PREF)?
+ " prefetch err":
+ " (prefetch err)":
+ "",
+ (status & MMU_FAULT_ST_PERM)?
+ (MMUFAULT_MASK & MMU_FAULT_ST_PERM)?
+ " permission fault":
+ " (permission fault)":
+ "",
+ (status & MMU_FAULT_ST_TLB_MISS)?
+ (MMUFAULT_MASK & MMU_FAULT_ST_TLB_MISS)?
+ " TLB miss":
+ " (TLB miss)":
+ "",
+ (status & MMU_FAULT_ST_TRANS)?
+ (MMUFAULT_MASK & MMU_FAULT_ST_TRANS)?
+ " translation fault":
+ " (translation fault)":
+ "");
+ pr_info("fault address = %#08x\n", va);
+
+ mmu->fault_address = va;
+ schedule_work(&mmu->irq_work);
+}
+
struct omap_mmu_ops omap1_mmu_ops = {
.startup = omap1_mmu_startup,
.shutdown = omap1_mmu_shutdown,
.cam_va = omap1_mmu_cam_va,
.cam_ram_alloc = omap1_mmu_cam_ram_alloc,
.cam_ram_valid = omap1_mmu_cam_ram_valid,
+ .interrupt = omap1_mmu_interrupt,
};
EXPORT_SYMBOL_GPL(omap1_mmu_ops);
return __raw_readw(mmu->base + reg);
}
-static void omap_mmu_write_reg(struct omap_mmu *mmu,
+static inline void omap_mmu_write_reg(struct omap_mmu *mmu,
unsigned short val, unsigned long reg)
{
__raw_writew(val, mmu->base + reg);
int omap_dsp_request_mem(void);
void omap_dsp_release_mem(void);
-static inline void __dsp_mmu_itack(struct omap_mmu *mmu)
+static inline void omap_mmu_itack(struct omap_mmu *mmu)
{
omap_mmu_write_reg(mmu, OMAP_MMU_IT_ACK_IT_ACK, OMAP_MMU_IT_ACK);
}
#include <linux/rwsem.h>
#include <linux/device.h>
#include <linux/mm.h>
+#include <linux/interrupt.h>
#include "mmu.h"
#include <asm/arch/mmu.h>
#include <asm/tlbflush.h>
return cr->cam & OMAP_MMU_CAM_V;
}
+static void omap2_mmu_interrupt(struct omap_mmu *mmu)
+{
+ unsigned long status, va;
+
+ status = MMU_IRQ_MASK & omap_mmu_read_reg(mmu, MMU_IRQSTATUS);
+ va = omap_mmu_read_reg(mmu, MMU_FAULT_AD);
+
+ pr_info("%s\n", (status & OMAP_MMU_IRQ_MULTIHITFAULT) ? "multi hit":"");
+ pr_info("%s\n", (status & OMAP_MMU_IRQ_TABLEWALKFAULT) ? "table walk fault":"");
+ pr_info("%s\n", (status & OMAP_MMU_IRQ_EMUMISS) ? "EMU miss":"");
+ pr_info("%s\n", (status & OMAP_MMU_IRQ_TRANSLATIONFAULT) ? "translation fault":"");
+ pr_info("%s\n", (status & OMAP_MMU_IRQ_TLBMISS) ? "TLB miss":"");
+ pr_info("fault address = %#08lx\n", va);
+
+ omap_mmu_disable(mmu);
+ omap_mmu_write_reg(mmu, status, MMU_IRQSTATUS);
+
+ mmu->fault_address = va;
+ schedule_work(&mmu->irq_work);
+}
struct omap_mmu_ops omap2_mmu_ops = {
.startup = omap2_mmu_startup,
.shutdown = omap2_mmu_shutdown,
.cam_va = omap2_mmu_cam_va,
.cam_ram_alloc = omap2_mmu_cam_ram_alloc,
.cam_ram_valid = omap2_mmu_cam_ram_valid,
+ .interrupt = omap2_mmu_interrupt,
};
EXPORT_SYMBOL_GPL(omap2_mmu_ops);
return __raw_readl(mmu->base + reg);
}
-static void omap_mmu_write_reg(struct omap_mmu *mmu,
+static inline void omap_mmu_write_reg(struct omap_mmu *mmu,
unsigned long val, unsigned long reg)
{
__raw_writel(val, mmu->base + reg);
}
-
+static inline void omap_mmu_itack(struct omap_mmu *mmu)
+{
+}
#endif /* __MACH_OMAP2_MMU_H */
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/device.h>
+#include <linux/interrupt.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/pgalloc.h>
}
EXPORT_SYMBOL_GPL(omap_mmu_enable);
+static irqreturn_t omap_mmu_interrupt(int irq, void *dev_id)
+{
+ struct omap_mmu *mmu = dev_id;
+
+ if (likely(mmu->ops->interrupt))
+ mmu->ops->interrupt(mmu);
+
+ return IRQ_HANDLED;
+}
+
static int omap_mmu_init(struct omap_mmu *mmu)
{
struct omap_mmu_tlb_lock tlb_lock;
omap_dsp_request_mem();
down_write(&mmu->exmap_sem);
+ ret = request_irq(mmu->irq, omap_mmu_interrupt, IRQF_DISABLED,
+ mmu->name, mmu);
+ if (ret < 0) {
+ printk(KERN_ERR
+ "failed to register MMU interrupt: %d\n", ret);
+ goto fail;
+ }
+
omap_mmu_disable(mmu); /* clear all */
udelay(100);
omap_mmu_enable(mmu, 1);
if (unlikely(mmu->ops->startup))
ret = mmu->ops->startup(mmu);
-
+ fail:
up_write(&mmu->exmap_sem);
omap_dsp_release_mem();
clk_disable(mmu->clk);
static void omap_mmu_shutdown(struct omap_mmu *mmu)
{
+ free_irq(mmu->irq, mmu);
+
if (unlikely(mmu->ops->shutdown))
mmu->ops->shutdown(mmu);
#define __ARCH_OMAP_MMU_H
#include <linux/device.h>
+#include <linux/workqueue.h>
#define MMU_REVISION 0x00
#define MMU_SYSCONFIG 0x10
/* Memory operations */
int (*mem_enable)(struct omap_mmu *, void *);
int (*mem_disable)(struct omap_mmu *, void *);
+
+ void (*interrupt)(struct omap_mmu *);
};
struct omap_mmu {
/* Size of virtual address space, in bits */
unsigned int addrspace;
+ /* Interrupt */
+ unsigned int irq;
+ unsigned long fault_address;
+ struct work_struct irq_work;
+
struct omap_mmu_ops *ops;
};