]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
[ARM] 3370/2: ep93xx: add crunch support
authorLennert Buytenhek <buytenh@wantstofly.org>
Tue, 27 Jun 2006 22:03:03 +0000 (23:03 +0100)
committerRussell King <rmk+kernel@arm.linux.org.uk>
Wed, 28 Jun 2006 16:55:01 +0000 (17:55 +0100)
Patch from Lennert Buytenhek

Add the necessary kernel bits for crunch task switching.

Signed-off-by: Lennert Buytenhek <buytenh@wantstofly.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
arch/arm/kernel/Makefile
arch/arm/kernel/asm-offsets.c
arch/arm/kernel/crunch-bits.S [new file with mode: 0644]
arch/arm/kernel/crunch.c [new file with mode: 0644]
arch/arm/kernel/entry-armv.S
arch/arm/mach-ep93xx/Kconfig
include/asm-arm/fpstate.h
include/asm-arm/thread_info.h

index a601b8b55f3594ba1a0989956c5db5ae9da389fa..7cffbaef064ba60afcec20ca02638ebf70b89a9a 100644 (file)
@@ -22,6 +22,9 @@ obj-$(CONFIG_PCI)             += bios32.o
 obj-$(CONFIG_SMP)              += smp.o
 obj-$(CONFIG_OABI_COMPAT)      += sys_oabi-compat.o
 
+obj-$(CONFIG_CRUNCH)           += crunch.o crunch-bits.o
+AFLAGS_crunch-bits.o           := -Wa,-mcpu=ep9312
+
 obj-$(CONFIG_IWMMXT)           += iwmmxt.o
 AFLAGS_iwmmxt.o                        := -Wa,-mcpu=iwmmxt
 
index 396efba9bacd2d6fb6351e18149576cc1d779665..447ede5143a8f7bf16f3b0ea48d0ebd500d5a3ca 100644 (file)
@@ -59,6 +59,9 @@ int main(void)
   DEFINE(TI_VFPSTATE,          offsetof(struct thread_info, vfpstate));
 #ifdef CONFIG_IWMMXT
   DEFINE(TI_IWMMXT_STATE,      offsetof(struct thread_info, fpstate.iwmmxt));
+#endif
+#ifdef CONFIG_CRUNCH
+  DEFINE(TI_CRUNCH_STATE,      offsetof(struct thread_info, crunchstate));
 #endif
   BLANK();
   DEFINE(S_R0,                 offsetof(struct pt_regs, ARM_r0));
diff --git a/arch/arm/kernel/crunch-bits.S b/arch/arm/kernel/crunch-bits.S
new file mode 100644 (file)
index 0000000..a268867
--- /dev/null
@@ -0,0 +1,305 @@
+/*
+ * arch/arm/kernel/crunch-bits.S
+ * Cirrus MaverickCrunch context switching and handling
+ *
+ * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
+ *
+ * Shamelessly stolen from the iWMMXt code by Nicolas Pitre, which is
+ * Copyright (c) 2003-2004, MontaVista Software, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <asm/ptrace.h>
+#include <asm/thread_info.h>
+#include <asm/asm-offsets.h>
+#include <asm/arch/ep93xx-regs.h>
+
+/*
+ * We can't use hex constants here due to a bug in gas.
+ */
+#define CRUNCH_MVDX0           0
+#define CRUNCH_MVDX1           8
+#define CRUNCH_MVDX2           16
+#define CRUNCH_MVDX3           24
+#define CRUNCH_MVDX4           32
+#define CRUNCH_MVDX5           40
+#define CRUNCH_MVDX6           48
+#define CRUNCH_MVDX7           56
+#define CRUNCH_MVDX8           64
+#define CRUNCH_MVDX9           72
+#define CRUNCH_MVDX10          80
+#define CRUNCH_MVDX11          88
+#define CRUNCH_MVDX12          96
+#define CRUNCH_MVDX13          104
+#define CRUNCH_MVDX14          112
+#define CRUNCH_MVDX15          120
+#define CRUNCH_MVAX0L          128
+#define CRUNCH_MVAX0M          132
+#define CRUNCH_MVAX0H          136
+#define CRUNCH_MVAX1L          140
+#define CRUNCH_MVAX1M          144
+#define CRUNCH_MVAX1H          148
+#define CRUNCH_MVAX2L          152
+#define CRUNCH_MVAX2M          156
+#define CRUNCH_MVAX2H          160
+#define CRUNCH_MVAX3L          164
+#define CRUNCH_MVAX3M          168
+#define CRUNCH_MVAX3H          172
+#define CRUNCH_DSPSC           176
+
+#define CRUNCH_SIZE            184
+
+       .text
+
+/*
+ * Lazy switching of crunch coprocessor context
+ *
+ * r10 = struct thread_info pointer
+ * r9  = ret_from_exception
+ * lr  = undefined instr exit
+ *
+ * called from prefetch exception handler with interrupts disabled
+ */
+ENTRY(crunch_task_enable)
+       ldr     r8, =(EP93XX_APB_VIRT_BASE + 0x00130000)        @ syscon addr
+
+       ldr     r1, [r8, #0x80]
+       tst     r1, #0x00800000                 @ access to crunch enabled?
+       movne   pc, lr                          @ if so no business here
+       mov     r3, #0xaa                       @ unlock syscon swlock
+       str     r3, [r8, #0xc0]
+       orr     r1, r1, #0x00800000             @ enable access to crunch
+       str     r1, [r8, #0x80]
+
+       ldr     r3, =crunch_owner
+       add     r0, r10, #TI_CRUNCH_STATE       @ get task crunch save area
+       ldr     r2, [sp, #60]                   @ current task pc value
+       ldr     r1, [r3]                        @ get current crunch owner
+       str     r0, [r3]                        @ this task now owns crunch
+       sub     r2, r2, #4                      @ adjust pc back
+       str     r2, [sp, #60]
+
+       ldr     r2, [r8, #0x80]
+       mov     r2, r2                          @ flush out enable (@@@)
+
+       teq     r1, #0                          @ test for last ownership
+       mov     lr, r9                          @ normal exit from exception
+       beq     crunch_load                     @ no owner, skip save
+
+crunch_save:
+       cfstr64         mvdx0, [r1, #CRUNCH_MVDX0]      @ save 64b registers
+       cfstr64         mvdx1, [r1, #CRUNCH_MVDX1]
+       cfstr64         mvdx2, [r1, #CRUNCH_MVDX2]
+       cfstr64         mvdx3, [r1, #CRUNCH_MVDX3]
+       cfstr64         mvdx4, [r1, #CRUNCH_MVDX4]
+       cfstr64         mvdx5, [r1, #CRUNCH_MVDX5]
+       cfstr64         mvdx6, [r1, #CRUNCH_MVDX6]
+       cfstr64         mvdx7, [r1, #CRUNCH_MVDX7]
+       cfstr64         mvdx8, [r1, #CRUNCH_MVDX8]
+       cfstr64         mvdx9, [r1, #CRUNCH_MVDX9]
+       cfstr64         mvdx10, [r1, #CRUNCH_MVDX10]
+       cfstr64         mvdx11, [r1, #CRUNCH_MVDX11]
+       cfstr64         mvdx12, [r1, #CRUNCH_MVDX12]
+       cfstr64         mvdx13, [r1, #CRUNCH_MVDX13]
+       cfstr64         mvdx14, [r1, #CRUNCH_MVDX14]
+       cfstr64         mvdx15, [r1, #CRUNCH_MVDX15]
+
+#ifdef __ARMEB__
+#error fix me for ARMEB
+#endif
+
+       cfmv32al        mvfx0, mvax0                    @ save 72b accumulators
+       cfstr32         mvfx0, [r1, #CRUNCH_MVAX0L]
+       cfmv32am        mvfx0, mvax0
+       cfstr32         mvfx0, [r1, #CRUNCH_MVAX0M]
+       cfmv32ah        mvfx0, mvax0
+       cfstr32         mvfx0, [r1, #CRUNCH_MVAX0H]
+       cfmv32al        mvfx0, mvax1
+       cfstr32         mvfx0, [r1, #CRUNCH_MVAX1L]
+       cfmv32am        mvfx0, mvax1
+       cfstr32         mvfx0, [r1, #CRUNCH_MVAX1M]
+       cfmv32ah        mvfx0, mvax1
+       cfstr32         mvfx0, [r1, #CRUNCH_MVAX1H]
+       cfmv32al        mvfx0, mvax2
+       cfstr32         mvfx0, [r1, #CRUNCH_MVAX2L]
+       cfmv32am        mvfx0, mvax2
+       cfstr32         mvfx0, [r1, #CRUNCH_MVAX2M]
+       cfmv32ah        mvfx0, mvax2
+       cfstr32         mvfx0, [r1, #CRUNCH_MVAX2H]
+       cfmv32al        mvfx0, mvax3
+       cfstr32         mvfx0, [r1, #CRUNCH_MVAX3L]
+       cfmv32am        mvfx0, mvax3
+       cfstr32         mvfx0, [r1, #CRUNCH_MVAX3M]
+       cfmv32ah        mvfx0, mvax3
+       cfstr32         mvfx0, [r1, #CRUNCH_MVAX3H]
+
+       cfmv32sc        mvdx0, dspsc                    @ save status word
+       cfstr64         mvdx0, [r1, #CRUNCH_DSPSC]
+
+       teq             r0, #0                          @ anything to load?
+       cfldr64eq       mvdx0, [r1, #CRUNCH_MVDX0]      @ mvdx0 was clobbered
+       moveq           pc, lr
+
+crunch_load:
+       cfldr64         mvdx0, [r0, #CRUNCH_DSPSC]      @ load status word
+       cfmvsc32        dspsc, mvdx0
+
+       cfldr32         mvfx0, [r0, #CRUNCH_MVAX0L]     @ load 72b accumulators
+       cfmval32        mvax0, mvfx0
+       cfldr32         mvfx0, [r0, #CRUNCH_MVAX0M]
+       cfmvam32        mvax0, mvfx0
+       cfldr32         mvfx0, [r0, #CRUNCH_MVAX0H]
+       cfmvah32        mvax0, mvfx0
+       cfldr32         mvfx0, [r0, #CRUNCH_MVAX1L]
+       cfmval32        mvax1, mvfx0
+       cfldr32         mvfx0, [r0, #CRUNCH_MVAX1M]
+       cfmvam32        mvax1, mvfx0
+       cfldr32         mvfx0, [r0, #CRUNCH_MVAX1H]
+       cfmvah32        mvax1, mvfx0
+       cfldr32         mvfx0, [r0, #CRUNCH_MVAX2L]
+       cfmval32        mvax2, mvfx0
+       cfldr32         mvfx0, [r0, #CRUNCH_MVAX2M]
+       cfmvam32        mvax2, mvfx0
+       cfldr32         mvfx0, [r0, #CRUNCH_MVAX2H]
+       cfmvah32        mvax2, mvfx0
+       cfldr32         mvfx0, [r0, #CRUNCH_MVAX3L]
+       cfmval32        mvax3, mvfx0
+       cfldr32         mvfx0, [r0, #CRUNCH_MVAX3M]
+       cfmvam32        mvax3, mvfx0
+       cfldr32         mvfx0, [r0, #CRUNCH_MVAX3H]
+       cfmvah32        mvax3, mvfx0
+
+       cfldr64         mvdx0, [r0, #CRUNCH_MVDX0]      @ load 64b registers
+       cfldr64         mvdx1, [r0, #CRUNCH_MVDX1]
+       cfldr64         mvdx2, [r0, #CRUNCH_MVDX2]
+       cfldr64         mvdx3, [r0, #CRUNCH_MVDX3]
+       cfldr64         mvdx4, [r0, #CRUNCH_MVDX4]
+       cfldr64         mvdx5, [r0, #CRUNCH_MVDX5]
+       cfldr64         mvdx6, [r0, #CRUNCH_MVDX6]
+       cfldr64         mvdx7, [r0, #CRUNCH_MVDX7]
+       cfldr64         mvdx8, [r0, #CRUNCH_MVDX8]
+       cfldr64         mvdx9, [r0, #CRUNCH_MVDX9]
+       cfldr64         mvdx10, [r0, #CRUNCH_MVDX10]
+       cfldr64         mvdx11, [r0, #CRUNCH_MVDX11]
+       cfldr64         mvdx12, [r0, #CRUNCH_MVDX12]
+       cfldr64         mvdx13, [r0, #CRUNCH_MVDX13]
+       cfldr64         mvdx14, [r0, #CRUNCH_MVDX14]
+       cfldr64         mvdx15, [r0, #CRUNCH_MVDX15]
+
+       mov     pc, lr
+
+/*
+ * Back up crunch regs to save area and disable access to them
+ * (mainly for gdb or sleep mode usage)
+ *
+ * r0 = struct thread_info pointer of target task or NULL for any
+ */
+ENTRY(crunch_task_disable)
+       stmfd   sp!, {r4, r5, lr}
+
+       mrs     ip, cpsr
+       orr     r2, ip, #PSR_I_BIT              @ disable interrupts
+       msr     cpsr_c, r2
+
+       ldr     r4, =(EP93XX_APB_VIRT_BASE + 0x00130000)        @ syscon addr
+
+       ldr     r3, =crunch_owner
+       add     r2, r0, #TI_CRUNCH_STATE        @ get task crunch save area
+       ldr     r1, [r3]                        @ get current crunch owner
+       teq     r1, #0                          @ any current owner?
+       beq     1f                              @ no: quit
+       teq     r0, #0                          @ any owner?
+       teqne   r1, r2                          @ or specified one?
+       bne     1f                              @ no: quit
+
+       ldr     r5, [r4, #0x80]                 @ enable access to crunch
+       mov     r2, #0xaa
+       str     r2, [r4, #0xc0]
+       orr     r5, r5, #0x00800000
+       str     r5, [r4, #0x80]
+
+       mov     r0, #0                          @ nothing to load
+       str     r0, [r3]                        @ no more current owner
+       ldr     r2, [r4, #0x80]                 @ flush out enable (@@@)
+       mov     r2, r2
+       bl      crunch_save
+
+       mov     r2, #0xaa                       @ disable access to crunch
+       str     r2, [r4, #0xc0]
+       bic     r5, r5, #0x00800000
+       str     r5, [r4, #0x80]
+       ldr     r5, [r4, #0x80]                 @ flush out enable (@@@)
+       mov     r5, r5
+
+1:     msr     cpsr_c, ip                      @ restore interrupt mode
+       ldmfd   sp!, {r4, r5, pc}
+
+/*
+ * Copy crunch state to given memory address
+ *
+ * r0 = struct thread_info pointer of target task
+ * r1 = memory address where to store crunch state
+ *
+ * this is called mainly in the creation of signal stack frames
+ */
+ENTRY(crunch_task_copy)
+       mrs     ip, cpsr
+       orr     r2, ip, #PSR_I_BIT              @ disable interrupts
+       msr     cpsr_c, r2
+
+       ldr     r3, =crunch_owner
+       add     r2, r0, #TI_CRUNCH_STATE        @ get task crunch save area
+       ldr     r3, [r3]                        @ get current crunch owner
+       teq     r2, r3                          @ does this task own it...
+       beq     1f
+
+       @ current crunch values are in the task save area
+       msr     cpsr_c, ip                      @ restore interrupt mode
+       mov     r0, r1
+       mov     r1, r2
+       mov     r2, #CRUNCH_SIZE
+       b       memcpy
+
+1:     @ this task owns crunch regs -- grab a copy from there
+       mov     r0, #0                          @ nothing to load
+       mov     r3, lr                          @ preserve return address
+       bl      crunch_save
+       msr     cpsr_c, ip                      @ restore interrupt mode
+       mov     pc, r3
+
+/*
+ * Restore crunch state from given memory address
+ *
+ * r0 = struct thread_info pointer of target task
+ * r1 = memory address where to get crunch state from
+ *
+ * this is used to restore crunch state when unwinding a signal stack frame
+ */
+ENTRY(crunch_task_restore)
+       mrs     ip, cpsr
+       orr     r2, ip, #PSR_I_BIT              @ disable interrupts
+       msr     cpsr_c, r2
+
+       ldr     r3, =crunch_owner
+       add     r2, r0, #TI_CRUNCH_STATE        @ get task crunch save area
+       ldr     r3, [r3]                        @ get current crunch owner
+       teq     r2, r3                          @ does this task own it...
+       beq     1f
+
+       @ this task doesn't own crunch regs -- use its save area
+       msr     cpsr_c, ip                      @ restore interrupt mode
+       mov     r0, r2
+       mov     r2, #CRUNCH_SIZE
+       b       memcpy
+
+1:     @ this task owns crunch regs -- load them directly
+       mov     r0, r1
+       mov     r1, #0                          @ nothing to save
+       mov     r3, lr                          @ preserve return address
+       bl      crunch_load
+       msr     cpsr_c, ip                      @ restore interrupt mode
+       mov     pc, r3
diff --git a/arch/arm/kernel/crunch.c b/arch/arm/kernel/crunch.c
new file mode 100644 (file)
index 0000000..7481759
--- /dev/null
@@ -0,0 +1,83 @@
+/*
+ * arch/arm/kernel/crunch.c
+ * Cirrus MaverickCrunch context switching and handling
+ *
+ * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <asm/arch/ep93xx-regs.h>
+#include <asm/thread_notify.h>
+#include <asm/io.h>
+
+struct crunch_state *crunch_owner;
+
+void crunch_task_release(struct thread_info *thread)
+{
+       local_irq_disable();
+       if (crunch_owner == &thread->crunchstate)
+               crunch_owner = NULL;
+       local_irq_enable();
+}
+
+static int crunch_enabled(u32 devcfg)
+{
+       return !!(devcfg & EP93XX_SYSCON_DEVICE_CONFIG_CRUNCH_ENABLE);
+}
+
+static int crunch_do(struct notifier_block *self, unsigned long cmd, void *t)
+{
+       struct thread_info *thread = (struct thread_info *)t;
+       struct crunch_state *crunch_state;
+       u32 devcfg;
+
+       crunch_state = &thread->crunchstate;
+
+       switch (cmd) {
+       case THREAD_NOTIFY_FLUSH:
+               memset(crunch_state, 0, sizeof(*crunch_state));
+
+               /*
+                * FALLTHROUGH: Ensure we don't try to overwrite our newly
+                * initialised state information on the first fault.
+                */
+
+       case THREAD_NOTIFY_RELEASE:
+               crunch_task_release(thread);
+               break;
+
+       case THREAD_NOTIFY_SWITCH:
+               devcfg = __raw_readl(EP93XX_SYSCON_DEVICE_CONFIG);
+               if (crunch_enabled(devcfg) || crunch_owner == crunch_state) {
+                       devcfg ^= EP93XX_SYSCON_DEVICE_CONFIG_CRUNCH_ENABLE;
+                       __raw_writel(0xaa, EP93XX_SYSCON_SWLOCK);
+                       __raw_writel(devcfg, EP93XX_SYSCON_DEVICE_CONFIG);
+               }
+               break;
+       }
+
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block crunch_notifier_block = {
+       .notifier_call  = crunch_do,
+};
+
+static int __init crunch_init(void)
+{
+       thread_register_notifier(&crunch_notifier_block);
+
+       return 0;
+}
+
+late_initcall(crunch_init);
index 86c92523a346704e7d94c7c1bc9660e5bc383aa6..6423a38839b8b4e86f6105399e209e00ce8f607c 100644 (file)
@@ -492,9 +492,15 @@ call_fpe:
        b       do_fpe                          @ CP#1 (FPE)
        b       do_fpe                          @ CP#2 (FPE)
        mov     pc, lr                          @ CP#3
+#ifdef CONFIG_CRUNCH
+       b       crunch_task_enable              @ CP#4 (MaverickCrunch)
+       b       crunch_task_enable              @ CP#5 (MaverickCrunch)
+       b       crunch_task_enable              @ CP#6 (MaverickCrunch)
+#else
        mov     pc, lr                          @ CP#4
        mov     pc, lr                          @ CP#5
        mov     pc, lr                          @ CP#6
+#endif
        mov     pc, lr                          @ CP#7
        mov     pc, lr                          @ CP#8
        mov     pc, lr                          @ CP#9
index cec5a21ca4e341593e5a6b4470a7cc903c111f70..293c1f5c61802ec170afd4d8fe01168ce24cedd9 100644 (file)
@@ -2,6 +2,11 @@ if ARCH_EP93XX
 
 menu "Cirrus EP93xx Implementation Options"
 
+config CRUNCH
+       bool "Support for MaverickCrunch"
+       help
+         Enable kernel support for MaverickCrunch.
+
 comment "EP93xx Platforms"
 
 config MACH_GESBC9312
index 132c3c5628b2a4b4d5be60bee7f1a41f04e627d0..6af4e6bd1290c261dcba28096949fe6262145c8a 100644 (file)
@@ -72,6 +72,14 @@ union fp_state {
 
 #define FP_SIZE (sizeof(union fp_state) / sizeof(int))
 
+struct crunch_state {
+       unsigned int    mvdx[16][2];
+       unsigned int    mvax[4][3];
+       unsigned int    dspsc[2];
+};
+
+#define CRUNCH_SIZE    sizeof(struct crunch_state)
+
 #endif
 
 #endif
index cfbccb63c67b4df54523fa3942b2ba6b096bd09b..c46b5c84275f3f2f5b2ac91d8a60002963fdc9f5 100644 (file)
@@ -59,6 +59,7 @@ struct thread_info {
        struct cpu_context_save cpu_context;    /* cpu context */
        __u8                    used_cp[16];    /* thread used copro */
        unsigned long           tp_value;
+       struct crunch_state     crunchstate;
        union fp_state          fpstate __attribute__((aligned(8)));
        union vfp_state         vfpstate;
        struct restart_block    restart_block;
@@ -101,6 +102,11 @@ extern void free_thread_info(struct thread_info *);
 #define thread_saved_fp(tsk)   \
        ((unsigned long)(task_thread_info(tsk)->cpu_context.fp))
 
+extern void crunch_task_disable(struct thread_info *);
+extern void crunch_task_copy(struct thread_info *, void *);
+extern void crunch_task_restore(struct thread_info *, void *);
+extern void crunch_task_release(struct thread_info *);
+
 extern void iwmmxt_task_disable(struct thread_info *);
 extern void iwmmxt_task_copy(struct thread_info *, void *);
 extern void iwmmxt_task_restore(struct thread_info *, void *);