]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
Blackfin arch: Update adeos blackfin arch patch to 1.9-00
authorPhilippe Gerum <rpm@xenomai.org>
Wed, 4 Mar 2009 08:52:38 +0000 (16:52 +0800)
committerBryan Wu <cooloney@kernel.org>
Wed, 4 Mar 2009 08:52:38 +0000 (16:52 +0800)
Signed-off-by: Philippe Gerum <rpm@xenomai.org>
Signed-off-by: Bryan Wu <cooloney@kernel.org>
arch/blackfin/include/asm/ipipe.h
arch/blackfin/include/asm/ipipe_base.h
arch/blackfin/include/asm/irq.h
arch/blackfin/include/asm/thread_info.h
arch/blackfin/kernel/ipipe.c
arch/blackfin/kernel/irqchip.c
arch/blackfin/kernel/time.c
arch/blackfin/mach-common/entry.S
arch/blackfin/mach-common/interrupt.S
arch/blackfin/mach-common/ints-priority.c

index 76f53d8b9a0d2e25650d34bc48c9b46cef536721..343b56361ec98db86252f75a3e0f01f07bbea7c1 100644 (file)
@@ -35,9 +35,9 @@
 #include <asm/atomic.h>
 #include <asm/traps.h>
 
-#define IPIPE_ARCH_STRING     "1.8-00"
+#define IPIPE_ARCH_STRING     "1.9-00"
 #define IPIPE_MAJOR_NUMBER    1
-#define IPIPE_MINOR_NUMBER    8
+#define IPIPE_MINOR_NUMBER    9
 #define IPIPE_PATCH_NUMBER    0
 
 #ifdef CONFIG_SMP
@@ -83,9 +83,9 @@ struct ipipe_sysinfo {
                                "%2 = CYCLES2\n"                \
                                "CC = %2 == %0\n"               \
                                "if ! CC jump 1b\n"             \
-                               : "=r" (((unsigned long *)&t)[1]),      \
-                                 "=r" (((unsigned long *)&t)[0]),      \
-                                 "=r" (__cy2)                          \
+                               : "=d,a" (((unsigned long *)&t)[1]),    \
+                                 "=d,a" (((unsigned long *)&t)[0]),    \
+                                 "=d,a" (__cy2)                                \
                                : /*no input*/ : "CC");                 \
        t;                                                              \
        })
@@ -118,35 +118,40 @@ void __ipipe_disable_irqdesc(struct ipipe_domain *ipd,
 
 #define __ipipe_disable_irq(irq)       (irq_desc[irq].chip->mask(irq))
 
-#define __ipipe_lock_root()                                    \
-       set_bit(IPIPE_ROOTLOCK_FLAG, &ipipe_root_domain->flags)
+static inline int __ipipe_check_tickdev(const char *devname)
+{
+       return 1;
+}
 
-#define __ipipe_unlock_root()                                  \
-       clear_bit(IPIPE_ROOTLOCK_FLAG, &ipipe_root_domain->flags)
+static inline void __ipipe_lock_root(void)
+{
+       set_bit(IPIPE_SYNCDEFER_FLAG, &ipipe_root_cpudom_var(status));
+}
+
+static inline void __ipipe_unlock_root(void)
+{
+       clear_bit(IPIPE_SYNCDEFER_FLAG, &ipipe_root_cpudom_var(status));
+}
 
 void __ipipe_enable_pipeline(void);
 
 #define __ipipe_hook_critical_ipi(ipd) do { } while (0)
 
-#define __ipipe_sync_pipeline(syncmask)                                        \
-       do {                                                            \
-               struct ipipe_domain *ipd = ipipe_current_domain;        \
-               if (likely(ipd != ipipe_root_domain || !test_bit(IPIPE_ROOTLOCK_FLAG, &ipd->flags))) \
-                       __ipipe_sync_stage(syncmask);                   \
-       } while (0)
+#define __ipipe_sync_pipeline  ___ipipe_sync_pipeline
+void ___ipipe_sync_pipeline(unsigned long syncmask);
 
 void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs);
 
 int __ipipe_get_irq_priority(unsigned irq);
 
-int __ipipe_get_irqthread_priority(unsigned irq);
-
 void __ipipe_stall_root_raw(void);
 
 void __ipipe_unstall_root_raw(void);
 
 void __ipipe_serial_debug(const char *fmt, ...);
 
+asmlinkage void __ipipe_call_irqtail(unsigned long addr);
+
 DECLARE_PER_CPU(struct pt_regs, __ipipe_tick_regs);
 
 extern unsigned long __ipipe_core_clock;
@@ -162,42 +167,25 @@ static inline unsigned long __ipipe_ffnz(unsigned long ul)
 
 #define __ipipe_run_irqtail()  /* Must be a macro */                   \
        do {                                                            \
-               asmlinkage void __ipipe_call_irqtail(void);             \
                unsigned long __pending;                                \
-               CSYNC();                                        \
+               CSYNC();                                                \
                __pending = bfin_read_IPEND();                          \
                if (__pending & 0x8000) {                               \
                        __pending &= ~0x8010;                           \
                        if (__pending && (__pending & (__pending - 1)) == 0) \
-                               __ipipe_call_irqtail();                 \
+                               __ipipe_call_irqtail(__ipipe_irq_tail_hook); \
                }                                                       \
        } while (0)
 
 #define __ipipe_run_isr(ipd, irq)                                      \
        do {                                                            \
                if (ipd == ipipe_root_domain) {                         \
-                       /*                                              \
-                        * Note: the I-pipe implements a threaded interrupt model on \
-                        * this arch for Linux external IRQs. The interrupt handler we \
-                        * call here only wakes up the associated IRQ thread. \
-                        */                                             \
-                       if (ipipe_virtual_irq_p(irq)) {                 \
-                               /* No irqtail here; virtual interrupts have no effect \
-                                  on IPEND so there is no need for processing \
-                                  deferral. */                         \
-                               local_irq_enable_nohead(ipd);           \
+                       local_irq_enable_hw();                          \
+                       if (ipipe_virtual_irq_p(irq))                   \
                                ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie); \
-                               local_irq_disable_nohead(ipd);          \
-                       } else                                          \
-                               /*                                      \
-                                * No need to run the irqtail here either; \
-                                * we can't be preempted by hw IRQs, so \
-                                * non-Linux IRQs cannot stack over the short \
-                                * thread wakeup code. Which in turn means \
-                                * that no irqtail condition could be pending \
-                                * for domains above Linux in the pipeline. \
-                                */                                     \
+                       else                                            \
                                ipd->irqs[irq].handler(irq, &__raw_get_cpu_var(__ipipe_tick_regs)); \
+                       local_irq_disable_hw();                         \
                } else {                                                \
                        __clear_bit(IPIPE_SYNC_FLAG, &ipipe_cpudom_var(ipd, status)); \
                        local_irq_enable_nohead(ipd);                   \
@@ -217,42 +205,24 @@ void ipipe_init_irq_threads(void);
 
 int ipipe_start_irq_thread(unsigned irq, struct irq_desc *desc);
 
-#define IS_SYSIRQ(irq)         ((irq) > IRQ_CORETMR && (irq) <= SYS_IRQS)
-#define IS_GPIOIRQ(irq)                ((irq) >= GPIO_IRQ_BASE && (irq) < NR_IRQS)
-
+#ifdef CONFIG_GENERIC_CLOCKEVENTS
+#define IRQ_SYSTMR             IRQ_CORETMR
+#define IRQ_PRIOTMR            IRQ_CORETMR
+#else
 #define IRQ_SYSTMR             IRQ_TIMER0
 #define IRQ_PRIOTMR            CONFIG_IRQ_TIMER0
+#endif
 
-#if defined(CONFIG_BF531) || defined(CONFIG_BF532) || defined(CONFIG_BF533)
-#define PRIO_GPIODEMUX(irq)    CONFIG_PFA
-#elif defined(CONFIG_BF534) || defined(CONFIG_BF536) || defined(CONFIG_BF537)
-#define PRIO_GPIODEMUX(irq)    CONFIG_IRQ_PROG_INTA
-#elif defined(CONFIG_BF52x)
-#define PRIO_GPIODEMUX(irq)    ((irq) == IRQ_PORTF_INTA ? CONFIG_IRQ_PORTF_INTA : \
-                                (irq) == IRQ_PORTG_INTA ? CONFIG_IRQ_PORTG_INTA : \
-                                (irq) == IRQ_PORTH_INTA ? CONFIG_IRQ_PORTH_INTA : \
-                                -1)
-#elif defined(CONFIG_BF561)
-#define PRIO_GPIODEMUX(irq)    ((irq) == IRQ_PROG0_INTA ? CONFIG_IRQ_PROG0_INTA : \
-                                (irq) == IRQ_PROG1_INTA ? CONFIG_IRQ_PROG1_INTA : \
-                                (irq) == IRQ_PROG2_INTA ? CONFIG_IRQ_PROG2_INTA : \
-                                -1)
+#ifdef CONFIG_BF561
 #define bfin_write_TIMER_DISABLE(val)  bfin_write_TMRS8_DISABLE(val)
 #define bfin_write_TIMER_ENABLE(val)   bfin_write_TMRS8_ENABLE(val)
 #define bfin_write_TIMER_STATUS(val)   bfin_write_TMRS8_STATUS(val)
 #define bfin_read_TIMER_STATUS()       bfin_read_TMRS8_STATUS()
 #elif defined(CONFIG_BF54x)
-#define PRIO_GPIODEMUX(irq)    ((irq) == IRQ_PINT0 ? CONFIG_IRQ_PINT0 : \
-                                (irq) == IRQ_PINT1 ? CONFIG_IRQ_PINT1 : \
-                                (irq) == IRQ_PINT2 ? CONFIG_IRQ_PINT2 : \
-                                (irq) == IRQ_PINT3 ? CONFIG_IRQ_PINT3 : \
-                                -1)
 #define bfin_write_TIMER_DISABLE(val)  bfin_write_TIMER_DISABLE0(val)
 #define bfin_write_TIMER_ENABLE(val)   bfin_write_TIMER_ENABLE0(val)
 #define bfin_write_TIMER_STATUS(val)   bfin_write_TIMER_STATUS0(val)
 #define bfin_read_TIMER_STATUS(val)    bfin_read_TIMER_STATUS0(val)
-#else
-# error "no PRIO_GPIODEMUX() for this part"
 #endif
 
 #define __ipipe_root_tick_p(regs)      ((regs->ipend & 0x10) != 0)
@@ -275,4 +245,6 @@ int ipipe_start_irq_thread(unsigned irq, struct irq_desc *desc);
 
 #endif /* !CONFIG_IPIPE */
 
+#define ipipe_update_tick_evtdev(evtdev)       do { } while (0)
+
 #endif /* !__ASM_BLACKFIN_IPIPE_H */
index cb1025aeabcfc1c6ff89f74bf1ae2fa3cc37ba63..3e8acbd1a3bee6843e348a942e7cc476bfd51918 100644 (file)
@@ -1,5 +1,5 @@
 /*   -*- linux-c -*-
- *   include/asm-blackfin/_baseipipe.h
+ *   include/asm-blackfin/ipipe_base.h
  *
  *   Copyright (C) 2007 Philippe Gerum.
  *
@@ -27,8 +27,9 @@
 #define IPIPE_NR_XIRQS         NR_IRQS
 #define IPIPE_IRQ_ISHIFT       5       /* 2^5 for 32bits arch. */
 
-/* Blackfin-specific, global domain flags */
-#define IPIPE_ROOTLOCK_FLAG    1       /* Lock pipeline for root */
+/* Blackfin-specific, per-cpu pipeline status */
+#define IPIPE_SYNCDEFER_FLAG   15
+#define IPIPE_SYNCDEFER_MASK   (1L << IPIPE_SYNCDEFER_MASK)
 
  /* Blackfin traps -- i.e. exception vector numbers */
 #define IPIPE_NR_FAULTS                52 /* We leave a gap after VEC_ILL_RES. */
 
 #ifndef __ASSEMBLY__
 
-#include <linux/bitops.h>
-
-extern int test_bit(int nr, const void *addr);
-
-
 extern unsigned long __ipipe_root_status; /* Alias to ipipe_root_cpudom_var(status) */
 
 static inline void __ipipe_stall_root(void)
index 3d977909ce7da0a41179af244608d85eb0ab2c1b..7645e85a5f6f70319c2a20158c0f5d39fbe84ce7 100644 (file)
@@ -61,20 +61,38 @@ void __ipipe_restore_root(unsigned long flags);
 #define raw_irqs_disabled_flags(flags) (!irqs_enabled_from_flags_hw(flags))
 #define local_test_iflag_hw(x)         irqs_enabled_from_flags_hw(x)
 
-#define local_save_flags(x)                                            \
-       do {                                                            \
-               (x) = __ipipe_test_root() ? \
+#define local_save_flags(x)                                     \
+       do {                                                     \
+               (x) = __ipipe_test_root() ?                      \
                        __all_masked_irq_flags : bfin_irq_flags; \
+               barrier();                                       \
        } while (0)
 
-#define local_irq_save(x)                              \
-       do {                                            \
-               (x) = __ipipe_test_and_stall_root();    \
+#define local_irq_save(x)                                       \
+       do {                                                     \
+               (x) = __ipipe_test_and_stall_root() ?            \
+                       __all_masked_irq_flags : bfin_irq_flags; \
+               barrier();                                       \
+       } while (0)
+
+static inline void local_irq_restore(unsigned long x)
+{
+       barrier();
+       __ipipe_restore_root(x == __all_masked_irq_flags);
+}
+
+#define local_irq_disable()                    \
+       do {                                    \
+               __ipipe_stall_root();           \
+               barrier();                      \
        } while (0)
 
-#define local_irq_restore(x)   __ipipe_restore_root(x)
-#define local_irq_disable()    __ipipe_stall_root()
-#define local_irq_enable()     __ipipe_unstall_root()
+static inline void local_irq_enable(void)
+{
+       barrier();
+       __ipipe_unstall_root();
+}
+
 #define irqs_disabled()                __ipipe_test_root()
 
 #define local_save_flags_hw(x) \
index e721ce55956c6a96782e3093c76aa88b11914eb6..2920087516f2a08175b54ed3086844434db937c9 100644 (file)
@@ -122,6 +122,7 @@ static inline struct thread_info *current_thread_info(void)
 #define TIF_MEMDIE              4
 #define TIF_RESTORE_SIGMASK    5       /* restore signal mask in do_signal() */
 #define TIF_FREEZE              6       /* is freezing for suspend */
+#define TIF_IRQ_SYNC            7       /* sync pipeline stage */
 
 /* as above, but as bit values */
 #define _TIF_SYSCALL_TRACE     (1<<TIF_SYSCALL_TRACE)
@@ -130,6 +131,7 @@ static inline struct thread_info *current_thread_info(void)
 #define _TIF_POLLING_NRFLAG    (1<<TIF_POLLING_NRFLAG)
 #define _TIF_RESTORE_SIGMASK   (1<<TIF_RESTORE_SIGMASK)
 #define _TIF_FREEZE             (1<<TIF_FREEZE)
+#define _TIF_IRQ_SYNC           (1<<TIF_IRQ_SYNC)
 
 #define _TIF_WORK_MASK         0x0000FFFE      /* work to do on interrupt/exception return */
 
index 339be5a3ae6a64f3f6bcc7bf9f9514a86b167ed8..a5de8d45424cda8b29f21d338e6224c94892cb1b 100644 (file)
 #include <asm/atomic.h>
 #include <asm/io.h>
 
-static int create_irq_threads;
-
 DEFINE_PER_CPU(struct pt_regs, __ipipe_tick_regs);
 
-static DEFINE_PER_CPU(unsigned long, pending_irqthread_mask);
-
-static DEFINE_PER_CPU(int [IVG13 + 1], pending_irq_count);
-
 asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs);
 
 static void __ipipe_no_irqtail(void);
@@ -93,6 +87,7 @@ void __ipipe_enable_pipeline(void)
  */
 void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
 {
+       struct ipipe_percpu_domain_data *p = ipipe_root_cpudom_ptr();
        struct ipipe_domain *this_domain, *next_domain;
        struct list_head *head, *pos;
        int m_ack, s = -1;
@@ -104,7 +99,6 @@ void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
         * interrupt.
         */
        m_ack = (regs == NULL || irq == IRQ_SYSTMR || irq == IRQ_CORETMR);
-
        this_domain = ipipe_current_domain;
 
        if (unlikely(test_bit(IPIPE_STICKY_FLAG, &this_domain->irqs[irq].control)))
@@ -114,49 +108,28 @@ void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
                next_domain = list_entry(head, struct ipipe_domain, p_link);
                if (likely(test_bit(IPIPE_WIRED_FLAG, &next_domain->irqs[irq].control))) {
                        if (!m_ack && next_domain->irqs[irq].acknowledge != NULL)
-                               next_domain->irqs[irq].acknowledge(irq, irq_desc + irq);
-                       if (test_bit(IPIPE_ROOTLOCK_FLAG, &ipipe_root_domain->flags))
-                               s = __test_and_set_bit(IPIPE_STALL_FLAG,
-                                                      &ipipe_root_cpudom_var(status));
+                               next_domain->irqs[irq].acknowledge(irq, irq_to_desc(irq));
+                       if (test_bit(IPIPE_SYNCDEFER_FLAG, &p->status))
+                               s = __test_and_set_bit(IPIPE_STALL_FLAG, &p->status);
                        __ipipe_dispatch_wired(next_domain, irq);
-                               goto finalize;
-                       return;
+                       goto out;
                }
        }
 
        /* Ack the interrupt. */
 
        pos = head;
-
        while (pos != &__ipipe_pipeline) {
                next_domain = list_entry(pos, struct ipipe_domain, p_link);
-               /*
-                * For each domain handling the incoming IRQ, mark it
-                * as pending in its log.
-                */
                if (test_bit(IPIPE_HANDLE_FLAG, &next_domain->irqs[irq].control)) {
-                       /*
-                        * Domains that handle this IRQ are polled for
-                        * acknowledging it by decreasing priority
-                        * order. The interrupt must be made pending
-                        * _first_ in the domain's status flags before
-                        * the PIC is unlocked.
-                        */
                        __ipipe_set_irq_pending(next_domain, irq);
-
                        if (!m_ack && next_domain->irqs[irq].acknowledge != NULL) {
-                               next_domain->irqs[irq].acknowledge(irq, irq_desc + irq);
+                               next_domain->irqs[irq].acknowledge(irq, irq_to_desc(irq));
                                m_ack = 1;
                        }
                }
-
-               /*
-                * If the domain does not want the IRQ to be passed
-                * down the interrupt pipe, exit the loop now.
-                */
                if (!test_bit(IPIPE_PASS_FLAG, &next_domain->irqs[irq].control))
                        break;
-
                pos = next_domain->p_link.next;
        }
 
@@ -166,18 +139,24 @@ void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
         * immediately to the current domain if the interrupt has been
         * marked as 'sticky'. This search does not go beyond the
         * current domain in the pipeline. We also enforce the
-        * additional root stage lock (blackfin-specific). */
+        * additional root stage lock (blackfin-specific).
+        */
+       if (test_bit(IPIPE_SYNCDEFER_FLAG, &p->status))
+               s = __test_and_set_bit(IPIPE_STALL_FLAG, &p->status);
 
-       if (test_bit(IPIPE_ROOTLOCK_FLAG, &ipipe_root_domain->flags))
-               s = __test_and_set_bit(IPIPE_STALL_FLAG,
-                                      &ipipe_root_cpudom_var(status));
-finalize:
+       /*
+        * If the interrupt preempted the head domain, then do not
+        * even try to walk the pipeline, unless an interrupt is
+        * pending for it.
+        */
+       if (test_bit(IPIPE_AHEAD_FLAG, &this_domain->flags) &&
+           ipipe_head_cpudom_var(irqpend_himask) == 0)
+               goto out;
 
        __ipipe_walk_pipeline(head);
-
+out:
        if (!s)
-               __clear_bit(IPIPE_STALL_FLAG,
-                           &ipipe_root_cpudom_var(status));
+               __clear_bit(IPIPE_STALL_FLAG, &p->status);
 }
 
 int __ipipe_check_root(void)
@@ -187,7 +166,7 @@ int __ipipe_check_root(void)
 
 void __ipipe_enable_irqdesc(struct ipipe_domain *ipd, unsigned irq)
 {
-       struct irq_desc *desc = irq_desc + irq;
+       struct irq_desc *desc = irq_to_desc(irq);
        int prio = desc->ic_prio;
 
        desc->depth = 0;
@@ -199,7 +178,7 @@ EXPORT_SYMBOL(__ipipe_enable_irqdesc);
 
 void __ipipe_disable_irqdesc(struct ipipe_domain *ipd, unsigned irq)
 {
-       struct irq_desc *desc = irq_desc + irq;
+       struct irq_desc *desc = irq_to_desc(irq);
        int prio = desc->ic_prio;
 
        if (ipd != &ipipe_root &&
@@ -236,15 +215,18 @@ int __ipipe_syscall_root(struct pt_regs *regs)
 {
        unsigned long flags;
 
-       /* We need to run the IRQ tail hook whenever we don't
+       /*
+        * We need to run the IRQ tail hook whenever we don't
         * propagate a syscall to higher domains, because we know that
         * important operations might be pending there (e.g. Xenomai
-        * deferred rescheduling). */
+        * deferred rescheduling).
+        */
 
-       if (!__ipipe_syscall_watched_p(current, regs->orig_p0)) {
+       if (regs->orig_p0 < NR_syscalls) {
                void (*hook)(void) = (void (*)(void))__ipipe_irq_tail_hook;
                hook();
-               return 0;
+               if ((current->flags & PF_EVNOTIFY) == 0)
+                       return 0;
        }
 
        /*
@@ -312,112 +294,46 @@ int ipipe_trigger_irq(unsigned irq)
 {
        unsigned long flags;
 
+#ifdef CONFIG_IPIPE_DEBUG
        if (irq >= IPIPE_NR_IRQS ||
            (ipipe_virtual_irq_p(irq)
             && !test_bit(irq - IPIPE_VIRQ_BASE, &__ipipe_virtual_irq_map)))
                return -EINVAL;
+#endif
 
        local_irq_save_hw(flags);
-
        __ipipe_handle_irq(irq, NULL);
-
        local_irq_restore_hw(flags);
 
        return 1;
 }
 
-/* Move Linux IRQ to threads. */
-
-static int do_irqd(void *__desc)
+asmlinkage void __ipipe_sync_root(void)
 {
-       struct irq_desc *desc = __desc;
-       unsigned irq = desc - irq_desc;
-       int thrprio = desc->thr_prio;
-       int thrmask = 1 << thrprio;
-       int cpu = smp_processor_id();
-       cpumask_t cpumask;
-
-       sigfillset(&current->blocked);
-       current->flags |= PF_NOFREEZE;
-       cpumask = cpumask_of_cpu(cpu);
-       set_cpus_allowed(current, cpumask);
-       ipipe_setscheduler_root(current, SCHED_FIFO, 50 + thrprio);
-
-       while (!kthread_should_stop()) {
-               local_irq_disable();
-               if (!(desc->status & IRQ_SCHEDULED)) {
-                       set_current_state(TASK_INTERRUPTIBLE);
-resched:
-                       local_irq_enable();
-                       schedule();
-                       local_irq_disable();
-               }
-               __set_current_state(TASK_RUNNING);
-               /*
-                * If higher priority interrupt servers are ready to
-                * run, reschedule immediately. We need this for the
-                * GPIO demux IRQ handler to unmask the interrupt line
-                * _last_, after all GPIO IRQs have run.
-                */
-               if (per_cpu(pending_irqthread_mask, cpu) & ~(thrmask|(thrmask-1)))
-                       goto resched;
-               if (--per_cpu(pending_irq_count[thrprio], cpu) == 0)
-                       per_cpu(pending_irqthread_mask, cpu) &= ~thrmask;
-               desc->status &= ~IRQ_SCHEDULED;
-               desc->thr_handler(irq, &__raw_get_cpu_var(__ipipe_tick_regs));
-               local_irq_enable();
-       }
-       __set_current_state(TASK_RUNNING);
-       return 0;
-}
+       unsigned long flags;
 
-static void kick_irqd(unsigned irq, void *cookie)
-{
-       struct irq_desc *desc = irq_desc + irq;
-       int thrprio = desc->thr_prio;
-       int thrmask = 1 << thrprio;
-       int cpu = smp_processor_id();
-
-       if (!(desc->status & IRQ_SCHEDULED)) {
-               desc->status |= IRQ_SCHEDULED;
-               per_cpu(pending_irqthread_mask, cpu) |= thrmask;
-               ++per_cpu(pending_irq_count[thrprio], cpu);
-               wake_up_process(desc->thread);
-       }
-}
+       BUG_ON(irqs_disabled());
 
-int ipipe_start_irq_thread(unsigned irq, struct irq_desc *desc)
-{
-       if (desc->thread || !create_irq_threads)
-               return 0;
-
-       desc->thread = kthread_create(do_irqd, desc, "IRQ %d", irq);
-       if (desc->thread == NULL) {
-               printk(KERN_ERR "irqd: could not create IRQ thread %d!\n", irq);
-               return -ENOMEM;
-       }
+       local_irq_save_hw(flags);
 
-       wake_up_process(desc->thread);
+       clear_thread_flag(TIF_IRQ_SYNC);
 
-       desc->thr_handler = ipipe_root_domain->irqs[irq].handler;
-       ipipe_root_domain->irqs[irq].handler = &kick_irqd;
+       if (ipipe_root_cpudom_var(irqpend_himask) != 0)
+               __ipipe_sync_pipeline(IPIPE_IRQMASK_ANY);
 
-       return 0;
+       local_irq_restore_hw(flags);
 }
 
-void __init ipipe_init_irq_threads(void)
+void ___ipipe_sync_pipeline(unsigned long syncmask)
 {
-       unsigned irq;
-       struct irq_desc *desc;
-
-       create_irq_threads = 1;
+       struct ipipe_domain *ipd = ipipe_current_domain;
 
-       for (irq = 0; irq < NR_IRQS; irq++) {
-               desc = irq_desc + irq;
-               if (desc->action != NULL ||
-                       (desc->status & IRQ_NOREQUEST) != 0)
-                       ipipe_start_irq_thread(irq, desc);
+       if (ipd == ipipe_root_domain) {
+               if (test_bit(IPIPE_SYNCDEFER_FLAG, &ipipe_root_cpudom_var(status)))
+                       return;
        }
+
+       __ipipe_sync_stage(syncmask);
 }
 
 EXPORT_SYMBOL(show_stack);
index 75724eee6494c65c87545e14682f8cfe213877dd..7fd12656484666ee4c29784a1d2d1e0e4e5fee04 100644 (file)
@@ -144,11 +144,15 @@ asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
 #endif
        generic_handle_irq(irq);
 
-#ifndef CONFIG_IPIPE   /* Useless and bugous over the I-pipe: IRQs are threaded. */
-       /* If we're the only interrupt running (ignoring IRQ15 which is for
-          syscalls), lower our priority to IRQ14 so that softirqs run at
-          that level.  If there's another, lower-level interrupt, irq_exit
-          will defer softirqs to that.  */
+#ifndef CONFIG_IPIPE
+       /*
+        * If we're the only interrupt running (ignoring IRQ15 which
+        * is for syscalls), lower our priority to IRQ14 so that
+        * softirqs run at that level.  If there's another,
+        * lower-level interrupt, irq_exit will defer softirqs to
+        * that. If the interrupt pipeline is enabled, we are already
+        * running at IRQ14 priority, so we don't need this code.
+        */
        CSYNC();
        pending = bfin_read_IPEND() & ~0x8000;
        other_ints = pending & (pending - 1);
index 172b4c588467ff2a51fbcc5993a4507d39e83d91..1bbacfbd4c5d7c6198496fa939a7920dc36319d4 100644 (file)
@@ -134,7 +134,10 @@ irqreturn_t timer_interrupt(int irq, void *dummy)
 
        write_seqlock(&xtime_lock);
 #if defined(CONFIG_TICK_SOURCE_SYSTMR0) && !defined(CONFIG_IPIPE)
-/* FIXME: Here TIMIL0 is not set when IPIPE enabled, why? */
+       /*
+        * TIMIL0 is latched in __ipipe_grab_irq() when the I-Pipe is
+        * enabled.
+        */
        if (get_gptimer_status(0) & TIMER_STATUS_TIMIL0) {
 #endif
                do_timer(1);
index 88de053bbe8ed58dcdb6e7b4bad80657a3e011db..21e65a339a22aacb857d1a76cdbe93863743298b 100644 (file)
@@ -600,6 +600,19 @@ ENTRY(_system_call)
        p2 = [p2];
 
        [p2+(TASK_THREAD+THREAD_KSP)] = sp;
+#ifdef CONFIG_IPIPE
+       r0 = sp;
+       SP += -12;
+       call ___ipipe_syscall_root;
+       SP += 12;
+       cc = r0 == 1;
+       if cc jump .Lsyscall_really_exit;
+       cc = r0 == -1;
+       if cc jump .Lresume_userspace;
+       r3 = [sp + PT_R3];
+       r4 = [sp + PT_R4];
+       p0 = [sp + PT_ORIG_P0];
+#endif /* CONFIG_IPIPE */
 
        /* Check the System Call */
        r7 = __NR_syscall;
@@ -654,6 +667,17 @@ ENTRY(_system_call)
        r7 =  r7 & r4;
 
 .Lsyscall_resched:
+#ifdef CONFIG_IPIPE
+       cc = BITTST(r7, TIF_IRQ_SYNC);
+       if !cc jump .Lsyscall_no_irqsync;
+       [--sp] = reti;
+       r0 = [sp++];
+       SP += -12;
+       call ___ipipe_sync_root;
+       SP += 12;
+       jump .Lresume_userspace_1;
+.Lsyscall_no_irqsync:
+#endif
        cc = BITTST(r7, TIF_NEED_RESCHED);
        if !cc jump .Lsyscall_sigpending;
 
@@ -685,6 +709,10 @@ ENTRY(_system_call)
 .Lsyscall_really_exit:
        r5 = [sp + PT_RESERVED];
        rets = r5;
+#ifdef CONFIG_IPIPE
+       [--sp] = reti;
+       r5 = [sp++];
+#endif /* CONFIG_IPIPE */
        rts;
 ENDPROC(_system_call)
 
@@ -771,6 +799,15 @@ _new_old_task:
 ENDPROC(_resume)
 
 ENTRY(_ret_from_exception)
+#ifdef CONFIG_IPIPE
+       [--sp] = rets;
+       SP += -12;
+       call ___ipipe_check_root
+       SP += 12
+       rets = [sp++];
+       cc = r0 == 0;
+       if cc jump 4f;                /* not on behalf of Linux, get out */
+#endif /* CONFIG_IPIPE */
        p2.l = lo(IPEND);
        p2.h = hi(IPEND);
 
@@ -827,6 +864,28 @@ ENTRY(_ret_from_exception)
        rts;
 ENDPROC(_ret_from_exception)
 
+#ifdef CONFIG_IPIPE
+
+_sync_root_irqs:
+       [--sp] = reti;          /* Reenable interrupts */
+       r0 = [sp++];
+       jump.l ___ipipe_sync_root
+
+_resume_kernel_from_int:
+       r0.l = _sync_root_irqs
+       r0.h = _sync_root_irqs
+       [--sp] = rets;
+       [--sp] = ( r7:4, p5:3 );
+       SP += -12;
+       call ___ipipe_call_irqtail
+       SP += 12;
+       ( r7:4, p5:3 ) = [sp++];
+       rets = [sp++];
+       rts
+#else
+#define _resume_kernel_from_int         2f
+#endif
+
 ENTRY(_return_from_int)
        /* If someone else already raised IRQ 15, do nothing.  */
        csync;
@@ -848,7 +907,7 @@ ENTRY(_return_from_int)
        r1 = r0 - r1;
        r2 = r0 & r1;
        cc = r2 == 0;
-       if !cc jump 2f;
+       if !cc jump _resume_kernel_from_int;
 
        /* Lower the interrupt level to 15.  */
        p0.l = lo(EVT15);
index 43c4eb9acb65e11b0ce1c0fdc480cb1849005a4d..0069c2dd462520db2a1abb7ad4c54108efcc5203 100644 (file)
@@ -235,6 +235,7 @@ ENDPROC(_evt_system_call)
 
 #ifdef CONFIG_IPIPE
 ENTRY(___ipipe_call_irqtail)
+       p0 = r0;
        r0.l = 1f;
        r0.h = 1f;
        reti = r0;
@@ -242,9 +243,6 @@ ENTRY(___ipipe_call_irqtail)
 1:
        [--sp] = rets;
        [--sp] = ( r7:4, p5:3 );
-       p0.l = ___ipipe_irq_tail_hook;
-       p0.h = ___ipipe_irq_tail_hook;
-       p0 = [p0];
        sp += -12;
        call (p0);
        sp += 12;
@@ -259,7 +257,7 @@ ENTRY(___ipipe_call_irqtail)
        p0.h = hi(EVT14);
        [p0] = r0;
        csync;
-       r0 = 0x401f;
+       r0 = 0x401f (z);
        sti r0;
        raise 14;
        [--sp] = reti;          /* IRQs on. */
@@ -277,11 +275,7 @@ ENTRY(___ipipe_call_irqtail)
        p0.h = _bfin_irq_flags;
        r0 = [p0];
        sti r0;
-#if 0 /* FIXME: this actually raises scheduling latencies */
-       /* Reenable interrupts */
-       [--sp] = reti;
-       r0 = [sp++];
-#endif
        rts;
 ENDPROC(___ipipe_call_irqtail)
+
 #endif /* CONFIG_IPIPE */
index 202494568c6c0303b76cafb279b1b06f97fc9963..a7d7b2dd4059a7f5685cec710bb9409174c31589 100644 (file)
@@ -161,11 +161,15 @@ static void bfin_core_unmask_irq(unsigned int irq)
 
 static void bfin_internal_mask_irq(unsigned int irq)
 {
+       unsigned long flags;
+
 #ifdef CONFIG_BF53x
+       local_irq_save_hw(flags);
        bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() &
                             ~(1 << SIC_SYSIRQ(irq)));
 #else
        unsigned mask_bank, mask_bit;
+       local_irq_save_hw(flags);
        mask_bank = SIC_SYSIRQ(irq) / 32;
        mask_bit = SIC_SYSIRQ(irq) % 32;
        bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) &
@@ -175,15 +179,20 @@ static void bfin_internal_mask_irq(unsigned int irq)
                             ~(1 << mask_bit));
 #endif
 #endif
+       local_irq_restore_hw(flags);
 }
 
 static void bfin_internal_unmask_irq(unsigned int irq)
 {
+       unsigned long flags;
+
 #ifdef CONFIG_BF53x
+       local_irq_save_hw(flags);
        bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() |
                             (1 << SIC_SYSIRQ(irq)));
 #else
        unsigned mask_bank, mask_bit;
+       local_irq_save_hw(flags);
        mask_bank = SIC_SYSIRQ(irq) / 32;
        mask_bit = SIC_SYSIRQ(irq) % 32;
        bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) |
@@ -193,6 +202,7 @@ static void bfin_internal_unmask_irq(unsigned int irq)
                             (1 << mask_bit));
 #endif
 #endif
+       local_irq_restore_hw(flags);
 }
 
 #ifdef CONFIG_PM
@@ -390,7 +400,7 @@ static void bfin_demux_error_irq(unsigned int int_err_irq,
 static inline void bfin_set_irq_handler(unsigned irq, irq_flow_handler_t handle)
 {
 #ifdef CONFIG_IPIPE
-       _set_irq_handler(irq, handle_edge_irq);
+       _set_irq_handler(irq, handle_level_irq);
 #else
        struct irq_desc *desc = irq_desc + irq;
        /* May not call generic set_irq_handler() due to spinlock
@@ -1055,13 +1065,18 @@ int __init init_arch_irq(void)
 #endif
                default:
 #ifdef CONFIG_IPIPE
-       /*
-        * We want internal interrupt sources to be masked, because
-        * ISRs may trigger interrupts recursively (e.g. DMA), but
-        * interrupts are _not_ masked at CPU level. So let's handle
-        * them as level interrupts.
-        */
-                       set_irq_handler(irq, handle_level_irq);
+                       /*
+                        * We want internal interrupt sources to be
+                        * masked, because ISRs may trigger interrupts
+                        * recursively (e.g. DMA), but interrupts are
+                        * _not_ masked at CPU level. So let's handle
+                        * most of them as level interrupts, except
+                        * the timer interrupt which is special.
+                        */
+                       if (irq == IRQ_SYSTMR || irq == IRQ_CORETMR)
+                               set_irq_handler(irq, handle_simple_irq);
+                       else
+                               set_irq_handler(irq, handle_level_irq);
 #else /* !CONFIG_IPIPE */
                        set_irq_handler(irq, handle_simple_irq);
 #endif /* !CONFIG_IPIPE */
@@ -1123,9 +1138,8 @@ int __init init_arch_irq(void)
 
 #ifdef CONFIG_IPIPE
        for (irq = 0; irq < NR_IRQS; irq++) {
-               struct irq_desc *desc = irq_desc + irq;
+               struct irq_desc *desc = irq_to_desc(irq);
                desc->ic_prio = __ipipe_get_irq_priority(irq);
-               desc->thr_prio = __ipipe_get_irqthread_priority(irq);
        }
 #endif /* CONFIG_IPIPE */
 
@@ -1208,76 +1222,21 @@ int __ipipe_get_irq_priority(unsigned irq)
        return IVG15;
 }
 
-int __ipipe_get_irqthread_priority(unsigned irq)
-{
-       int ient, prio;
-       int demux_irq;
-
-       /* The returned priority value is rescaled to [0..IVG13+1]
-        * with 0 being the lowest effective priority level. */
-
-       if (irq <= IRQ_CORETMR)
-               return IVG13 - irq + 1;
-
-       /* GPIO IRQs are given the priority of the demux
-        * interrupt. */
-       if (IS_GPIOIRQ(irq)) {
-#if defined(CONFIG_BF54x)
-               u32 bank = PINT_2_BANK(irq2pint_lut[irq - SYS_IRQS]);
-               demux_irq = (bank == 0 ? IRQ_PINT0 :
-                               bank == 1 ? IRQ_PINT1 :
-                               bank == 2 ? IRQ_PINT2 :
-                               IRQ_PINT3);
-#elif defined(CONFIG_BF561)
-               demux_irq = (irq >= IRQ_PF32 ? IRQ_PROG2_INTA :
-                               irq >= IRQ_PF16 ? IRQ_PROG1_INTA :
-                               IRQ_PROG0_INTA);
-#elif defined(CONFIG_BF52x)
-               demux_irq = (irq >= IRQ_PH0 ? IRQ_PORTH_INTA :
-                               irq >= IRQ_PG0 ? IRQ_PORTG_INTA :
-                               IRQ_PORTF_INTA);
-#else
-               demux_irq = irq;
-#endif
-               return IVG13 - PRIO_GPIODEMUX(demux_irq) + 1;
-       }
-
-       /* The GPIO demux interrupt is given a lower priority
-        * than the GPIO IRQs, so that its threaded handler
-        * unmasks the interrupt line after the decoded IRQs
-        * have been processed. */
-       prio = PRIO_GPIODEMUX(irq);
-       /* demux irq? */
-       if (prio != -1)
-               return IVG13 - prio;
-
-       for (ient = 0; ient < NR_PERI_INTS; ient++) {
-               struct ivgx *ivg = ivg_table + ient;
-               if (ivg->irqno == irq) {
-                       for (prio = 0; prio <= IVG13-IVG7; prio++) {
-                               if (ivg7_13[prio].ifirst <= ivg &&
-                                   ivg7_13[prio].istop > ivg)
-                                       return IVG7 - prio;
-                       }
-               }
-       }
-
-       return 0;
-}
-
 /* Hw interrupts are disabled on entry (check SAVE_CONTEXT). */
 #ifdef CONFIG_DO_IRQ_L1
 __attribute__((l1_text))
 #endif
 asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
 {
+       struct ipipe_percpu_domain_data *p = ipipe_root_cpudom_ptr();
+       struct ipipe_domain *this_domain = ipipe_current_domain;
        struct ivgx *ivg_stop = ivg7_13[vec-IVG7].istop;
        struct ivgx *ivg = ivg7_13[vec-IVG7].ifirst;
-       int irq;
+       int irq, s;
 
        if (likely(vec == EVT_IVTMR_P)) {
                irq = IRQ_CORETMR;
-               goto handle_irq;
+               goto core_tick;
        }
 
        SSYNC();
@@ -1319,24 +1278,39 @@ asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
        irq = ivg->irqno;
 
        if (irq == IRQ_SYSTMR) {
+#ifdef CONFIG_GENERIC_CLOCKEVENTS
+core_tick:
+#else
                bfin_write_TIMER_STATUS(1); /* Latch TIMIL0 */
+#endif
                /* This is basically what we need from the register frame. */
                __raw_get_cpu_var(__ipipe_tick_regs).ipend = regs->ipend;
                __raw_get_cpu_var(__ipipe_tick_regs).pc = regs->pc;
-               if (!ipipe_root_domain_p)
-                       __raw_get_cpu_var(__ipipe_tick_regs).ipend |= 0x10;
-               else
+               if (this_domain != ipipe_root_domain)
                        __raw_get_cpu_var(__ipipe_tick_regs).ipend &= ~0x10;
+               else
+                       __raw_get_cpu_var(__ipipe_tick_regs).ipend |= 0x10;
        }
 
-handle_irq:
+#ifndef CONFIG_GENERIC_CLOCKEVENTS
+core_tick:
+#endif
+       if (this_domain == ipipe_root_domain) {
+               s = __test_and_set_bit(IPIPE_SYNCDEFER_FLAG, &p->status);
+               barrier();
+       }
 
        ipipe_trace_irq_entry(irq);
        __ipipe_handle_irq(irq, regs);
-       ipipe_trace_irq_exit(irq);
+       ipipe_trace_irq_exit(irq);
 
-       if (ipipe_root_domain_p)
-               return !test_bit(IPIPE_STALL_FLAG, &ipipe_root_cpudom_var(status));
+       if (this_domain == ipipe_root_domain) {
+               set_thread_flag(TIF_IRQ_SYNC);
+               if (!s) {
+                       __clear_bit(IPIPE_SYNCDEFER_FLAG, &p->status);
+                       return !test_bit(IPIPE_STALL_FLAG, &p->status);
+               }
+       }
 
        return 0;
 }