source "drivers/dca/Kconfig"
+ source "drivers/regulator/Kconfig"
+
source "drivers/uio/Kconfig"
+if ARCH_OMAP
+source "drivers/cbus/Kconfig"
+source "drivers/dsp/dspgateway/Kconfig"
+endif
+
endmenu
source "fs/Kconfig"
--- /dev/null
+ /*
+ * arch/arm/include/asm/pgtable.h
+ *
+ * Copyright (C) 1995-2002 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+ #ifndef _ASMARM_PGTABLE_H
+ #define _ASMARM_PGTABLE_H
+
+ #include <asm-generic/4level-fixup.h>
+ #include <asm/proc-fns.h>
+
+ #ifndef CONFIG_MMU
+
+ #include "pgtable-nommu.h"
+
+ #else
+
+ #include <asm/memory.h>
+ #include <asm/arch/vmalloc.h>
+ #include <asm/pgtable-hwdef.h>
+
+ /*
+ * Just any arbitrary offset to the start of the vmalloc VM area: the
+ * current 8MB value just means that there will be a 8MB "hole" after the
+ * physical memory until the kernel virtual memory starts. That means that
+ * any out-of-bounds memory accesses will hopefully be caught.
+ * The vmalloc() routines leaves a hole of 4kB between each vmalloced
+ * area for the same reason. ;)
+ *
+ * Note that platforms may override VMALLOC_START, but they must provide
+ * VMALLOC_END. VMALLOC_END defines the (exclusive) limit of this space,
+ * which may not overlap IO space.
+ */
+ #ifndef VMALLOC_START
+ #define VMALLOC_OFFSET (8*1024*1024)
+ #define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
+ #endif
+
+ /*
+ * Hardware-wise, we have a two level page table structure, where the first
+ * level has 4096 entries, and the second level has 256 entries. Each entry
+ * is one 32-bit word. Most of the bits in the second level entry are used
+ * by hardware, and there aren't any "accessed" and "dirty" bits.
+ *
+ * Linux on the other hand has a three level page table structure, which can
+ * be wrapped to fit a two level page table structure easily - using the PGD
+ * and PTE only. However, Linux also expects one "PTE" table per page, and
+ * at least a "dirty" bit.
+ *
+ * Therefore, we tweak the implementation slightly - we tell Linux that we
+ * have 2048 entries in the first level, each of which is 8 bytes (iow, two
+ * hardware pointers to the second level.) The second level contains two
+ * hardware PTE tables arranged contiguously, followed by Linux versions
+ * which contain the state information Linux needs. We, therefore, end up
+ * with 512 entries in the "PTE" level.
+ *
+ * This leads to the page tables having the following layout:
+ *
+ * pgd pte
+ * | |
+ * +--------+ +0
+ * | |-----> +------------+ +0
+ * +- - - - + +4 | h/w pt 0 |
+ * | |-----> +------------+ +1024
+ * +--------+ +8 | h/w pt 1 |
+ * | | +------------+ +2048
+ * +- - - - + | Linux pt 0 |
+ * | | +------------+ +3072
+ * +--------+ | Linux pt 1 |
+ * | | +------------+ +4096
+ *
+ * See L_PTE_xxx below for definitions of bits in the "Linux pt", and
+ * PTE_xxx for definitions of bits appearing in the "h/w pt".
+ *
+ * PMD_xxx definitions refer to bits in the first level page table.
+ *
+ * The "dirty" bit is emulated by only granting hardware write permission
+ * iff the page is marked "writable" and "dirty" in the Linux PTE. This
+ * means that a write to a clean page will cause a permission fault, and
+ * the Linux MM layer will mark the page dirty via handle_pte_fault().
+ * For the hardware to notice the permission change, the TLB entry must
+ * be flushed, and ptep_set_access_flags() does that for us.
+ *
+ * The "accessed" or "young" bit is emulated by a similar method; we only
+ * allow accesses to the page if the "young" bit is set. Accesses to the
+ * page will cause a fault, and handle_pte_fault() will set the young bit
+ * for us as long as the page is marked present in the corresponding Linux
+ * PTE entry. Again, ptep_set_access_flags() will ensure that the TLB is
+ * up to date.
+ *
+ * However, when the "young" bit is cleared, we deny access to the page
+ * by clearing the hardware PTE. Currently Linux does not flush the TLB
+ * for us in this case, which means the TLB will retain the transation
+ * until either the TLB entry is evicted under pressure, or a context
+ * switch which changes the user space mapping occurs.
+ */
+ #define PTRS_PER_PTE 512
+ #define PTRS_PER_PMD 1
+ #define PTRS_PER_PGD 2048
+
+ /*
+ * PMD_SHIFT determines the size of the area a second-level page table can map
+ * PGDIR_SHIFT determines what a third-level page table entry can map
+ */
+ #define PMD_SHIFT 21
+ #define PGDIR_SHIFT 21
+
+ #define LIBRARY_TEXT_START 0x0c000000
+
+ #ifndef __ASSEMBLY__
+ extern void __pte_error(const char *file, int line, unsigned long val);
+ extern void __pmd_error(const char *file, int line, unsigned long val);
+ extern void __pgd_error(const char *file, int line, unsigned long val);
+
+ #define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte))
+ #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd))
+ #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
+ #endif /* !__ASSEMBLY__ */
+
+ #define PMD_SIZE (1UL << PMD_SHIFT)
+ #define PMD_MASK (~(PMD_SIZE-1))
+ #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+ #define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+ /*
+ * This is the lowest virtual address we can permit any user space
+ * mapping to be mapped at. This is particularly important for
+ * non-high vector CPUs.
+ */
+ #define FIRST_USER_ADDRESS PAGE_SIZE
+
+ #define FIRST_USER_PGD_NR 1
+ #define USER_PTRS_PER_PGD ((TASK_SIZE/PGDIR_SIZE) - FIRST_USER_PGD_NR)
+
+ /*
+ * section address mask and size definitions.
+ */
+ #define SECTION_SHIFT 20
+ #define SECTION_SIZE (1UL << SECTION_SHIFT)
+ #define SECTION_MASK (~(SECTION_SIZE-1))
+
+ /*
+ * ARMv6 supersection address mask and size definitions.
+ */
+ #define SUPERSECTION_SHIFT 24
+ #define SUPERSECTION_SIZE (1UL << SUPERSECTION_SHIFT)
+ #define SUPERSECTION_MASK (~(SUPERSECTION_SIZE-1))
+
+ /*
+ * "Linux" PTE definitions.
+ *
+ * We keep two sets of PTEs - the hardware and the linux version.
+ * This allows greater flexibility in the way we map the Linux bits
+ * onto the hardware tables, and allows us to have YOUNG and DIRTY
+ * bits.
+ *
+ * The PTE table pointer refers to the hardware entries; the "Linux"
+ * entries are stored 1024 bytes below.
+ */
+ #define L_PTE_PRESENT (1 << 0)
+ #define L_PTE_FILE (1 << 1) /* only when !PRESENT */
+ #define L_PTE_YOUNG (1 << 1)
+ #define L_PTE_BUFFERABLE (1 << 2) /* matches PTE */
+ #define L_PTE_CACHEABLE (1 << 3) /* matches PTE */
+ #define L_PTE_USER (1 << 4)
+ #define L_PTE_WRITE (1 << 5)
+ #define L_PTE_EXEC (1 << 6)
+ #define L_PTE_DIRTY (1 << 7)
+ #define L_PTE_SHARED (1 << 10) /* shared(v6), coherent(xsc3) */
+
+ #ifndef __ASSEMBLY__
+
+ /*
+ * The pgprot_* and protection_map entries will be fixed up in runtime
+ * to include the cachable and bufferable bits based on memory policy,
+ * as well as any architecture dependent bits like global/ASID and SMP
+ * shared mapping bits.
+ */
+ #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_CACHEABLE | L_PTE_BUFFERABLE
+ #define _L_PTE_READ L_PTE_USER | L_PTE_EXEC
+
+ extern pgprot_t pgprot_user;
+ extern pgprot_t pgprot_kernel;
+
+ #define PAGE_NONE pgprot_user
+ #define PAGE_COPY __pgprot(pgprot_val(pgprot_user) | _L_PTE_READ)
+ #define PAGE_SHARED __pgprot(pgprot_val(pgprot_user) | _L_PTE_READ | \
+ L_PTE_WRITE)
+ #define PAGE_READONLY __pgprot(pgprot_val(pgprot_user) | _L_PTE_READ)
+ #define PAGE_KERNEL pgprot_kernel
+
+ #define __PAGE_NONE __pgprot(_L_PTE_DEFAULT)
+ #define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | _L_PTE_READ)
+ #define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | _L_PTE_READ | L_PTE_WRITE)
+ #define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | _L_PTE_READ)
+
+ #endif /* __ASSEMBLY__ */
+
+ /*
+ * The table below defines the page protection levels that we insert into our
+ * Linux page table version. These get translated into the best that the
+ * architecture can perform. Note that on most ARM hardware:
+ * 1) We cannot do execute protection
+ * 2) If we could do execute protection, then read is implied
+ * 3) write implies read permissions
+ */
+ #define __P000 __PAGE_NONE
+ #define __P001 __PAGE_READONLY
+ #define __P010 __PAGE_COPY
+ #define __P011 __PAGE_COPY
+ #define __P100 __PAGE_READONLY
+ #define __P101 __PAGE_READONLY
+ #define __P110 __PAGE_COPY
+ #define __P111 __PAGE_COPY
+
+ #define __S000 __PAGE_NONE
+ #define __S001 __PAGE_READONLY
+ #define __S010 __PAGE_SHARED
+ #define __S011 __PAGE_SHARED
+ #define __S100 __PAGE_READONLY
+ #define __S101 __PAGE_READONLY
+ #define __S110 __PAGE_SHARED
+ #define __S111 __PAGE_SHARED
+
+ #ifndef __ASSEMBLY__
+ /*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
+ extern struct page *empty_zero_page;
+ #define ZERO_PAGE(vaddr) (empty_zero_page)
+
+ #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
+ #define pfn_pte(pfn,prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
+
+ #define pte_none(pte) (!pte_val(pte))
+ #define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0)
+ #define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
+ #define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr))
+ #define pte_offset_map(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr))
+ #define pte_offset_map_nested(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr))
+ #define pte_unmap(pte) do { } while (0)
+ #define pte_unmap_nested(pte) do { } while (0)
+
+ #define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
+
+ #define set_pte_at(mm,addr,ptep,pteval) do { \
+ set_pte_ext(ptep, pteval, (addr) >= TASK_SIZE ? 0 : PTE_EXT_NG); \
+ } while (0)
+
+ /*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
+ #define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT)
+ #define pte_write(pte) (pte_val(pte) & L_PTE_WRITE)
+ #define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
+ #define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
+ #define pte_special(pte) (0)
+
+ /*
+ * The following only works if pte_present() is not true.
+ */
+ #define pte_file(pte) (pte_val(pte) & L_PTE_FILE)
+ #define pte_to_pgoff(x) (pte_val(x) >> 2)
+ #define pgoff_to_pte(x) __pte(((x) << 2) | L_PTE_FILE)
+
+ #define PTE_FILE_MAX_BITS 30
+
+ #define PTE_BIT_FUNC(fn,op) \
+ static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
+
+ PTE_BIT_FUNC(wrprotect, &= ~L_PTE_WRITE);
+ PTE_BIT_FUNC(mkwrite, |= L_PTE_WRITE);
+ PTE_BIT_FUNC(mkclean, &= ~L_PTE_DIRTY);
+ PTE_BIT_FUNC(mkdirty, |= L_PTE_DIRTY);
+ PTE_BIT_FUNC(mkold, &= ~L_PTE_YOUNG);
+ PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG);
+
+ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
+
+ /*
+ * Mark the prot value as uncacheable and unbufferable.
+ */
+ #define pgprot_noncached(prot) __pgprot(pgprot_val(prot) & ~(L_PTE_CACHEABLE | L_PTE_BUFFERABLE))
+ #define pgprot_writecombine(prot) __pgprot(pgprot_val(prot) & ~L_PTE_CACHEABLE)
+
+ #define pmd_none(pmd) (!pmd_val(pmd))
+ #define pmd_present(pmd) (pmd_val(pmd))
+ #define pmd_bad(pmd) (pmd_val(pmd) & 2)
++#define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
+
+ #define copy_pmd(pmdpd,pmdps) \
+ do { \
+ pmdpd[0] = pmdps[0]; \
+ pmdpd[1] = pmdps[1]; \
+ flush_pmd_entry(pmdpd); \
+ } while (0)
+
+ #define pmd_clear(pmdp) \
+ do { \
+ pmdp[0] = __pmd(0); \
+ pmdp[1] = __pmd(0); \
+ clean_pmd_entry(pmdp); \
+ } while (0)
+
+ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
+ {
+ unsigned long ptr;
+
+ ptr = pmd_val(pmd) & ~(PTRS_PER_PTE * sizeof(void *) - 1);
+ ptr += PTRS_PER_PTE * sizeof(void *);
+
+ return __va(ptr);
+ }
+
+ #define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd)))
+
+ /*
+ * Permanent address of a page. We never have highmem, so this is trivial.
+ */
+ #define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT))
+
+ /*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+ #define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot)
+
+ /*
+ * The "pgd_xxx()" functions here are trivial for a folded two-level
+ * setup: the pgd is never bad, and a pmd always exists (as it's folded
+ * into the pgd entry)
+ */
+ #define pgd_none(pgd) (0)
+ #define pgd_bad(pgd) (0)
+ #define pgd_present(pgd) (1)
+ #define pgd_clear(pgdp) do { } while (0)
+ #define set_pgd(pgd,pgdp) do { } while (0)
+
+ /* to find an entry in a page-table-directory */
+ #define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
+
+ #define pgd_offset(mm, addr) ((mm)->pgd+pgd_index(addr))
+
+ /* to find an entry in a kernel page-table-directory */
+ #define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
+
+ /* Find an entry in the second-level page table.. */
+ #define pmd_offset(dir, addr) ((pmd_t *)(dir))
+
+ /* Find an entry in the third-level page table.. */
+ #define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+
+ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+ {
+ const unsigned long mask = L_PTE_EXEC | L_PTE_WRITE | L_PTE_USER;
+ pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
+ return pte;
+ }
+
+ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+
+ /* Encode and decode a swap entry.
+ *
+ * We support up to 32GB of swap on 4k machines
+ */
+ #define __swp_type(x) (((x).val >> 2) & 0x7f)
+ #define __swp_offset(x) ((x).val >> 9)
+ #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 2) | ((offset) << 9) })
+ #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+ #define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
+
+ /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
+ /* FIXME: this is not correct */
+ #define kern_addr_valid(addr) (1)
+
+ #include <asm-generic/pgtable.h>
+
+ /*
+ * We provide our own arch_get_unmapped_area to cope with VIPT caches.
+ */
+ #define HAVE_ARCH_UNMAPPED_AREA
+
+ /*
+ * remap a physical page `pfn' of size `size' with page protection `prot'
+ * into virtual address `from'
+ */
+ #define io_remap_pfn_range(vma,from,pfn,size,prot) \
+ remap_pfn_range(vma, from, pfn, size, prot)
+
+ #define pgtable_cache_init() do { } while (0)
+
+ #endif /* !__ASSEMBLY__ */
+
+ #endif /* CONFIG_MMU */
+
+ #endif /* _ASMARM_PGTABLE_H */
--- /dev/null
+/*
+ * linux/arch/arm/mach-omap2/board-n800.c
+ *
+ * Copyright (C) 2005-2007 Nokia Corporation
+ * Author: Juha Yrjola <juha.yrjola@nokia.com>
+ *
+ * Modified from mach-omap2/board-generic.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/tsc2301.h>
+#include <linux/spi/tsc2005.h>
+#include <linux/input.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/i2c.h>
+#include <linux/i2c/lm8323.h>
+#include <linux/i2c/menelaus.h>
+#include <asm/hardware.h>
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/arch/gpio.h>
+#include <asm/arch/usb.h>
+#include <asm/arch/board.h>
+#include <asm/arch/common.h>
+#include <asm/arch/mcspi.h>
+#include <asm/arch/lcd_mipid.h>
+#include <asm/arch/clock.h>
+#include <asm/arch/gpio-switch.h>
+#include <asm/arch/omapfb.h>
+#include <asm/arch/blizzard.h>
+
+#include <../drivers/cbus/tahvo.h>
+#include <../drivers/media/video/tcm825x.h>
+
+#define N800_BLIZZARD_POWERDOWN_GPIO 15
+#define N800_STI_GPIO 62
+#define N800_KEYB_IRQ_GPIO 109
+#define N800_DAV_IRQ_GPIO 103
+#define N800_TSC2301_RESET_GPIO 118
+
+#ifdef CONFIG_MACH_NOKIA_N810
+static s16 rx44_keymap[LM8323_KEYMAP_SIZE] = {
+ [0x01] = KEY_Q,
+ [0x02] = KEY_K,
+ [0x03] = KEY_O,
+ [0x04] = KEY_P,
+ [0x05] = KEY_BACKSPACE,
+ [0x06] = KEY_A,
+ [0x07] = KEY_S,
+ [0x08] = KEY_D,
+ [0x09] = KEY_F,
+ [0x0a] = KEY_G,
+ [0x0b] = KEY_H,
+ [0x0c] = KEY_J,
+
+ [0x11] = KEY_W,
+ [0x12] = KEY_F4,
+ [0x13] = KEY_L,
+ [0x14] = KEY_APOSTROPHE,
+ [0x16] = KEY_Z,
+ [0x17] = KEY_X,
+ [0x18] = KEY_C,
+ [0x19] = KEY_V,
+ [0x1a] = KEY_B,
+ [0x1b] = KEY_N,
+ [0x1c] = KEY_LEFTSHIFT, /* Actually, this is both shift keys */
+ [0x1f] = KEY_F7,
+
+ [0x21] = KEY_E,
+ [0x22] = KEY_SEMICOLON,
+ [0x23] = KEY_MINUS,
+ [0x24] = KEY_EQUAL,
+ [0x2b] = KEY_FN,
+ [0x2c] = KEY_M,
+ [0x2f] = KEY_F8,
+
+ [0x31] = KEY_R,
+ [0x32] = KEY_RIGHTCTRL,
+ [0x34] = KEY_SPACE,
+ [0x35] = KEY_COMMA,
+ [0x37] = KEY_UP,
+ [0x3c] = KEY_COMPOSE,
+ [0x3f] = KEY_F6,
+
+ [0x41] = KEY_T,
+ [0x44] = KEY_DOT,
+ [0x46] = KEY_RIGHT,
+ [0x4f] = KEY_F5,
+ [0x51] = KEY_Y,
+ [0x53] = KEY_DOWN,
+ [0x55] = KEY_ENTER,
+ [0x5f] = KEY_ESC,
+
+ [0x61] = KEY_U,
+ [0x64] = KEY_LEFT,
+
+ [0x71] = KEY_I,
+ [0x75] = KEY_KPENTER,
+};
+
+static struct lm8323_platform_data lm8323_pdata = {
+ .repeat = 0, /* Repeat is handled in userspace for now. */
+ .keymap = rx44_keymap,
+
+ .name = "Internal keyboard",
+ .pwm1_name = "keyboard",
+ .pwm2_name = "cover",
+};
+#endif
+
+void __init nokia_n800_init_irq(void)
+{
+ omap2_init_common_hw(NULL);
+ omap_init_irq();
+ omap_gpio_init();
+
+#ifdef CONFIG_OMAP_STI
+ if (omap_request_gpio(N800_STI_GPIO) < 0) {
+ printk(KERN_ERR "Failed to request GPIO %d for STI\n",
+ N800_STI_GPIO);
+ return;
+ }
+
+ omap_set_gpio_direction(N800_STI_GPIO, 0);
+ omap_set_gpio_dataout(N800_STI_GPIO, 0);
+#endif
+}
+
+#if defined(CONFIG_MENELAUS) && defined(CONFIG_SENSORS_TMP105)
+
+static int n800_tmp105_set_power(int enable)
+{
+ return menelaus_set_vaux(enable ? 2800 : 0);
+}
+
+#else
+
+#define n800_tmp105_set_power NULL
+
+#endif
+
+static struct omap_uart_config n800_uart_config __initdata = {
+ .enabled_uarts = (1 << 0) | (1 << 2),
+};
+
+#include "../../../drivers/cbus/retu.h"
+
+static struct omap_fbmem_config n800_fbmem0_config __initdata = {
+ .size = 752 * 1024,
+};
+
+static struct omap_fbmem_config n800_fbmem1_config __initdata = {
+ .size = 752 * 1024,
+};
+
+static struct omap_fbmem_config n800_fbmem2_config __initdata = {
+ .size = 752 * 1024,
+};
+
+static struct omap_tmp105_config n800_tmp105_config __initdata = {
+ .tmp105_irq_pin = 125,
+ .set_power = n800_tmp105_set_power,
+};
+
+static void mipid_shutdown(struct mipid_platform_data *pdata)
+{
+ if (pdata->nreset_gpio != -1) {
+ pr_info("shutdown LCD\n");
+ omap_set_gpio_dataout(pdata->nreset_gpio, 0);
+ msleep(120);
+ }
+}
+
+static struct mipid_platform_data n800_mipid_platform_data = {
+ .shutdown = mipid_shutdown,
+};
+
+static void __init mipid_dev_init(void)
+{
+ const struct omap_lcd_config *conf;
+
+ conf = omap_get_config(OMAP_TAG_LCD, struct omap_lcd_config);
+ if (conf != NULL) {
+ n800_mipid_platform_data.nreset_gpio = conf->nreset_gpio;
+ n800_mipid_platform_data.data_lines = conf->data_lines;
+ }
+}
+
+static struct {
+ struct clk *sys_ck;
+} blizzard;
+
+static int blizzard_get_clocks(void)
+{
+ blizzard.sys_ck = clk_get(0, "osc_ck");
+ if (IS_ERR(blizzard.sys_ck)) {
+ printk(KERN_ERR "can't get Blizzard clock\n");
+ return PTR_ERR(blizzard.sys_ck);
+ }
+ return 0;
+}
+
+static unsigned long blizzard_get_clock_rate(struct device *dev)
+{
+ return clk_get_rate(blizzard.sys_ck);
+}
+
+static void blizzard_enable_clocks(int enable)
+{
+ if (enable)
+ clk_enable(blizzard.sys_ck);
+ else
+ clk_disable(blizzard.sys_ck);
+}
+
+static void blizzard_power_up(struct device *dev)
+{
+ /* Vcore to 1.475V */
+ tahvo_set_clear_reg_bits(0x07, 0, 0xf);
+ msleep(10);
+
+ blizzard_enable_clocks(1);
+ omap_set_gpio_dataout(N800_BLIZZARD_POWERDOWN_GPIO, 1);
+}
+
+static void blizzard_power_down(struct device *dev)
+{
+ omap_set_gpio_dataout(N800_BLIZZARD_POWERDOWN_GPIO, 0);
+ blizzard_enable_clocks(0);
+
+ /* Vcore to 1.005V */
+ tahvo_set_clear_reg_bits(0x07, 0xf, 0);
+}
+
+static struct blizzard_platform_data n800_blizzard_data = {
+ .power_up = blizzard_power_up,
+ .power_down = blizzard_power_down,
+ .get_clock_rate = blizzard_get_clock_rate,
+ .te_connected = 1,
+};
+
+static void __init blizzard_dev_init(void)
+{
+ int r;
+
+ r = omap_request_gpio(N800_BLIZZARD_POWERDOWN_GPIO);
+ if (r < 0)
+ return;
+ omap_set_gpio_direction(N800_BLIZZARD_POWERDOWN_GPIO, 0);
+ omap_set_gpio_dataout(N800_BLIZZARD_POWERDOWN_GPIO, 1);
+
+ blizzard_get_clocks();
+ omapfb_set_ctrl_platform_data(&n800_blizzard_data);
+}
+
+static struct omap_mmc_config n800_mmc_config __initdata = {
+ .mmc [0] = {
+ .enabled = 1,
+ .wire4 = 1,
+ },
+};
+
+extern struct omap_mmc_platform_data n800_mmc_data;
+
+static struct omap_board_config_kernel n800_config[] __initdata = {
+ { OMAP_TAG_UART, &n800_uart_config },
+ { OMAP_TAG_FBMEM, &n800_fbmem0_config },
+ { OMAP_TAG_FBMEM, &n800_fbmem1_config },
+ { OMAP_TAG_FBMEM, &n800_fbmem2_config },
+ { OMAP_TAG_TMP105, &n800_tmp105_config },
+ { OMAP_TAG_MMC, &n800_mmc_config },
+};
+
+static struct tsc2301_platform_data tsc2301_config = {
+ .reset_gpio = N800_TSC2301_RESET_GPIO,
+ .keymap = {
+ -1, /* Event for bit 0 */
+ KEY_UP, /* Event for bit 1 (up) */
+ KEY_F5, /* Event for bit 2 (home) */
+ -1, /* Event for bit 3 */
+ KEY_LEFT, /* Event for bit 4 (left) */
+ KEY_ENTER, /* Event for bit 5 (enter) */
+ KEY_RIGHT, /* Event for bit 6 (right) */
+ -1, /* Event for bit 7 */
+ KEY_ESC, /* Event for bit 8 (cycle) */
+ KEY_DOWN, /* Event for bit 9 (down) */
+ KEY_F4, /* Event for bit 10 (menu) */
+ -1, /* Event for bit 11 */
+ KEY_F8, /* Event for bit 12 (Zoom-) */
+ KEY_F6, /* Event for bit 13 (FS) */
+ KEY_F7, /* Event for bit 14 (Zoom+) */
+ -1, /* Event for bit 15 */
+ },
+ .kp_rep = 0,
+ .keyb_name = "Internal keypad",
+};
+
+static void tsc2301_dev_init(void)
+{
+ int r;
+ int gpio = N800_KEYB_IRQ_GPIO;
+
+ r = gpio_request(gpio, "tsc2301 KBD IRQ");
+ if (r >= 0) {
+ gpio_direction_input(gpio);
+ tsc2301_config.keyb_int = OMAP_GPIO_IRQ(gpio);
+ } else {
+ printk(KERN_ERR "unable to get KBD GPIO");
+ }
+
+ gpio = N800_DAV_IRQ_GPIO;
+ r = gpio_request(gpio, "tsc2301 DAV IRQ");
+ if (r >= 0) {
+ gpio_direction_input(gpio);
+ tsc2301_config.dav_int = OMAP_GPIO_IRQ(gpio);
+ } else {
+ printk(KERN_ERR "unable to get DAV GPIO");
+ }
+}
+
+static int __init tea5761_dev_init(void)
+{
+ const struct omap_tea5761_config *info;
+ int enable_gpio = 0;
+
+ info = omap_get_config(OMAP_TAG_TEA5761, struct omap_tea5761_config);
+ if (info)
+ enable_gpio = info->enable_gpio;
+
+ if (enable_gpio) {
+ pr_debug("Enabling tea5761 at GPIO %d\n",
+ enable_gpio);
+
+ if (omap_request_gpio(enable_gpio) < 0) {
+ printk(KERN_ERR "Can't request GPIO %d\n",
+ enable_gpio);
+ return -ENODEV;
+ }
+
+ omap_set_gpio_direction(enable_gpio, 0);
+ udelay(50);
+ omap_set_gpio_dataout(enable_gpio, 1);
+ }
+
+ return 0;
+}
+
+static struct omap2_mcspi_device_config tsc2301_mcspi_config = {
+ .turbo_mode = 0,
+ .single_channel = 1,
+};
+
+static struct omap2_mcspi_device_config mipid_mcspi_config = {
+ .turbo_mode = 0,
+ .single_channel = 1,
+};
+
+static struct omap2_mcspi_device_config cx3110x_mcspi_config = {
+ .turbo_mode = 0,
+ .single_channel = 1,
+};
+
+#ifdef CONFIG_TOUCHSCREEN_TSC2005
+static struct tsc2005_platform_data tsc2005_config = {
+ .reset_gpio = 94,
+ .dav_gpio = 106
+};
+
+static struct omap2_mcspi_device_config tsc2005_mcspi_config = {
+ .turbo_mode = 0,
+ .single_channel = 1,
+};
+#endif
+
+static struct spi_board_info n800_spi_board_info[] __initdata = {
+ {
+ .modalias = "lcd_mipid",
+ .bus_num = 1,
+ .chip_select = 1,
+ .max_speed_hz = 4000000,
+ .controller_data= &mipid_mcspi_config,
+ .platform_data = &n800_mipid_platform_data,
+ }, {
+ .modalias = "cx3110x",
+ .bus_num = 2,
+ .chip_select = 0,
+ .max_speed_hz = 48000000,
+ .controller_data= &cx3110x_mcspi_config,
+ },
+ {
+ .modalias = "tsc2301",
+ .bus_num = 1,
+ .chip_select = 0,
+ .max_speed_hz = 6000000,
+ .controller_data= &tsc2301_mcspi_config,
+ .platform_data = &tsc2301_config,
+ },
+};
+
+static struct spi_board_info n810_spi_board_info[] __initdata = {
+ {
+ .modalias = "lcd_mipid",
+ .bus_num = 1,
+ .chip_select = 1,
+ .max_speed_hz = 4000000,
+ .controller_data = &mipid_mcspi_config,
+ .platform_data = &n800_mipid_platform_data,
+ },
+ {
+ .modalias = "cx3110x",
+ .bus_num = 2,
+ .chip_select = 0,
+ .max_speed_hz = 48000000,
+ .controller_data = &cx3110x_mcspi_config,
+ },
+ {
+ .modalias = "tsc2005",
+ .bus_num = 1,
+ .chip_select = 0,
+ .max_speed_hz = 6000000,
+ .controller_data = &tsc2005_mcspi_config,
+ .platform_data = &tsc2005_config,
+ },
+};
+
+static void __init tsc2005_set_config(void)
+{
+ const struct omap_lcd_config *conf;
+
+ conf = omap_get_config(OMAP_TAG_LCD, struct omap_lcd_config);
+ if (conf != NULL) {
+#ifdef CONFIG_TOUCHSCREEN_TSC2005
+ if (strcmp(conf->panel_name, "lph8923") == 0) {
+ tsc2005_config.ts_x_plate_ohm = 180;
+ tsc2005_config.ts_hw_avg = 0;
+ tsc2005_config.ts_ignore_last = 0;
+ tsc2005_config.ts_touch_pressure = 1500;
+ tsc2005_config.ts_stab_time = 100;
+ tsc2005_config.ts_pressure_max = 2048;
+ tsc2005_config.ts_pressure_fudge = 2;
+ tsc2005_config.ts_x_max = 4096;
+ tsc2005_config.ts_x_fudge = 4;
+ tsc2005_config.ts_y_max = 4096;
+ tsc2005_config.ts_y_fudge = 7;
+ } else if (strcmp(conf->panel_name, "ls041y3") == 0) {
+ tsc2005_config.ts_x_plate_ohm = 280;
+ tsc2005_config.ts_hw_avg = 0;
+ tsc2005_config.ts_ignore_last = 0;
+ tsc2005_config.ts_touch_pressure = 1500;
+ tsc2005_config.ts_stab_time = 1000;
+ tsc2005_config.ts_pressure_max = 2048;
+ tsc2005_config.ts_pressure_fudge = 2;
+ tsc2005_config.ts_x_max = 4096;
+ tsc2005_config.ts_x_fudge = 4;
+ tsc2005_config.ts_y_max = 4096;
+ tsc2005_config.ts_y_fudge = 7;
+ } else {
+ printk(KERN_ERR "Unknown panel type, set default "
+ "touchscreen configuration\n");
+ tsc2005_config.ts_x_plate_ohm = 200;
+ tsc2005_config.ts_stab_time = 100;
+ }
+#endif
+ }
+}
+
+#if defined(CONFIG_CBUS_RETU) && defined(CONFIG_LEDS_OMAP_PWM)
+
+void retu_keypad_led_set_power(struct omap_pwm_led_platform_data *self,
+ int on_off)
+{
+ if (on_off) {
+ retu_write_reg(RETU_REG_CTRL_SET, 1 << 6);
+ msleep(2);
+ retu_write_reg(RETU_REG_CTRL_SET, 1 << 3);
+ } else {
+ retu_write_reg(RETU_REG_CTRL_CLR, (1 << 6) | (1 << 3));
+ }
+}
+
+static struct omap_pwm_led_platform_data n800_keypad_led_data = {
+ .name = "keypad",
+ .intensity_timer = 10,
+ .blink_timer = 9,
+ .set_power = retu_keypad_led_set_power,
+};
+
+static struct platform_device n800_keypad_led_device = {
+ .name = "omap_pwm_led",
+ .id = -1,
+ .dev = {
+ .platform_data = &n800_keypad_led_data,
+ },
+};
+#endif
+
+#if defined(CONFIG_TOUCHSCREEN_TSC2301)
+static void __init n800_ts_set_config(void)
+{
+ const struct omap_lcd_config *conf;
+
+ conf = omap_get_config(OMAP_TAG_LCD, struct omap_lcd_config);
+ if (conf != NULL) {
+ if (strcmp(conf->panel_name, "lph8923") == 0) {
+ tsc2301_config.ts_x_plate_ohm = 180;
+ tsc2301_config.ts_hw_avg = 8;
+ tsc2301_config.ts_max_pressure = 2048;
+ tsc2301_config.ts_touch_pressure = 400;
+ tsc2301_config.ts_stab_time = 100;
+ tsc2301_config.ts_pressure_fudge = 2;
+ tsc2301_config.ts_x_max = 4096;
+ tsc2301_config.ts_x_fudge = 4;
+ tsc2301_config.ts_y_max = 4096;
+ tsc2301_config.ts_y_fudge = 7;
+ } else if (strcmp(conf->panel_name, "ls041y3") == 0) {
+ tsc2301_config.ts_x_plate_ohm = 280;
+ tsc2301_config.ts_hw_avg = 8;
+ tsc2301_config.ts_touch_pressure = 400;
+ tsc2301_config.ts_max_pressure = 2048;
+ tsc2301_config.ts_stab_time = 1000;
+ tsc2301_config.ts_pressure_fudge = 2;
+ tsc2301_config.ts_x_max = 4096;
+ tsc2301_config.ts_x_fudge = 4;
+ tsc2301_config.ts_y_max = 4096;
+ tsc2301_config.ts_y_fudge = 7;
+ } else {
+ printk(KERN_ERR "Unknown panel type, set default "
+ "touchscreen configuration\n");
+ tsc2301_config.ts_x_plate_ohm = 200;
+ tsc2301_config.ts_stab_time = 100;
+ }
+ }
+}
+#else
+static inline void n800_ts_set_config(void)
+{
+}
+#endif
+
+static struct omap_gpio_switch n800_gpio_switches[] __initdata = {
+ {
+ .name = "bat_cover",
+ .gpio = -1,
+ .debounce_rising = 100,
+ .debounce_falling = 0,
+ .notify = n800_mmc_slot1_cover_handler,
+ .notify_data = NULL,
+ }, {
+ .name = "headphone",
+ .gpio = -1,
+ .debounce_rising = 200,
+ .debounce_falling = 200,
+ }, {
+ .name = "cam_act",
+ .gpio = -1,
+ .debounce_rising = 200,
+ .debounce_falling = 200,
+ }, {
+ .name = "cam_turn",
+ .gpio = -1,
+ .debounce_rising = 100,
+ .debounce_falling = 100,
+ },
+};
+
+static struct platform_device *n800_devices[] __initdata = {
+#if defined(CONFIG_CBUS_RETU) && defined(CONFIG_LEDS_OMAP_PWM)
+ &n800_keypad_led_device,
+#endif
+};
+
+#ifdef CONFIG_MENELAUS
+static int n800_auto_sleep_regulators(void)
+{
+ u32 val;
+ int ret;
+
+ val = EN_VPLL_SLEEP | EN_VMMC_SLEEP \
+ | EN_VAUX_SLEEP | EN_VIO_SLEEP \
+ | EN_VMEM_SLEEP | EN_DC3_SLEEP \
+ | EN_VC_SLEEP | EN_DC2_SLEEP;
+
+ ret = menelaus_set_regulator_sleep(1, val);
+ if (ret < 0) {
+ printk(KERN_ERR "Could not set regulators to sleep on "
+ "menelaus: %u\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static int n800_auto_voltage_scale(void)
+{
+ int ret;
+
+ ret = menelaus_set_vcore_hw(1400, 1050);
+ if (ret < 0) {
+ printk(KERN_ERR "Could not set VCORE voltage on "
+ "menelaus: %u\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static int n800_menelaus_init(struct device *dev)
+{
+ int ret;
+
+ ret = n800_auto_voltage_scale();
+ if (ret < 0)
+ return ret;
+ ret = n800_auto_sleep_regulators();
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+static struct menelaus_platform_data n800_menelaus_platform_data = {
+ .late_init = n800_menelaus_init,
+};
+#endif
+
+static struct i2c_board_info __initdata n800_i2c_board_info_1[] = {
+ {
+ I2C_BOARD_INFO("menelaus", 0x72),
+ .irq = INT_24XX_SYS_NIRQ,
+ .platform_data = &n800_menelaus_platform_data,
+ },
+};
+
+extern struct tcm825x_platform_data n800_tcm825x_platform_data;
+
+static struct i2c_board_info __initdata_or_module n8x0_i2c_board_info_2[] = {
+ {
+ I2C_BOARD_INFO(TCM825X_NAME, TCM825X_I2C_ADDR),
++#if defined (CONFIG_VIDEO_TCM825X) || defined (CONFIG_VIDEO_TCM825X_MODULE)
+ .platform_data = &n800_tcm825x_platform_data,
++#endif
+ },
+ {
+ I2C_BOARD_INFO("tsl2563", 0x29),
+ },
+ {
+ I2C_BOARD_INFO("lp5521", 0x32),
+ },
+};
+
+
+static struct i2c_board_info __initdata_or_module n800_i2c_board_info_2[] = {
+ {
+ I2C_BOARD_INFO("tea5761", 0x10),
+ },
+};
+
+static struct i2c_board_info __initdata_or_module n810_i2c_board_info_2[] = {
+ {
+ I2C_BOARD_INFO("lm8323", 0x45),
+ .irq = OMAP_GPIO_IRQ(109),
+ .platform_data = &lm8323_pdata,
+ },
+};
+
+void __init nokia_n800_common_init(void)
+{
+ platform_add_devices(n800_devices, ARRAY_SIZE(n800_devices));
+
+ n800_flash_init();
+ n800_mmc_init();
+ n800_bt_init();
+ n800_dsp_init();
+ n800_usb_init();
+ n800_cam_init();
+ if (machine_is_nokia_n800())
+ spi_register_board_info(n800_spi_board_info,
+ ARRAY_SIZE(n800_spi_board_info));
+ if (machine_is_nokia_n810()) {
+ tsc2005_set_config();
+ spi_register_board_info(n810_spi_board_info,
+ ARRAY_SIZE(n810_spi_board_info));
+ }
+ omap_serial_init();
+ omap_register_i2c_bus(1, 400, n800_i2c_board_info_1,
+ ARRAY_SIZE(n800_i2c_board_info_1));
+ omap_register_i2c_bus(2, 400, n8x0_i2c_board_info_2,
+ ARRAY_SIZE(n800_i2c_board_info_2));
+ if (machine_is_nokia_n800())
+ i2c_register_board_info(2, n800_i2c_board_info_2,
+ ARRAY_SIZE(n800_i2c_board_info_2));
+ if (machine_is_nokia_n810())
+ i2c_register_board_info(2, n810_i2c_board_info_2,
+ ARRAY_SIZE(n810_i2c_board_info_2));
+
+ mipid_dev_init();
+ blizzard_dev_init();
+}
+
+static void __init nokia_n800_init(void)
+{
+ nokia_n800_common_init();
+
+ n800_audio_init(&tsc2301_config);
+ n800_ts_set_config();
+ tsc2301_dev_init();
+ tea5761_dev_init();
+ omap_register_gpio_switches(n800_gpio_switches,
+ ARRAY_SIZE(n800_gpio_switches));
+}
+
+void __init nokia_n800_map_io(void)
+{
+ omap_board_config = n800_config;
+ omap_board_config_size = ARRAY_SIZE(n800_config);
+
+ omap2_set_globals_242x();
+ omap2_map_common_io();
+}
+
+MACHINE_START(NOKIA_N800, "Nokia N800")
+ .phys_io = 0x48000000,
+ .io_pg_offst = ((0xd8000000) >> 18) & 0xfffc,
+ .boot_params = 0x80000100,
+ .map_io = nokia_n800_map_io,
+ .init_irq = nokia_n800_init_irq,
+ .init_machine = nokia_n800_init,
+ .timer = &omap_timer,
+MACHINE_END
--- /dev/null
- set_irq_type(OMAP_GPIO_IRQ(sw->gpio), IRQT_FALLING);
+/*
+ * linux/arch/arm/plat-omap/gpio-switch.c
+ *
+ * Copyright (C) 2004-2006 Nokia Corporation
+ * Written by Juha Yrjölä <juha.yrjola@nokia.com>
+ * and Paul Mundt <paul.mundt@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/timer.h>
+#include <linux/err.h>
+#include <asm/arch/hardware.h>
+#include <asm/arch/gpio.h>
+#include <asm/arch/irqs.h>
+#include <asm/arch/mux.h>
+#include <asm/arch/board.h>
+#include <asm/arch/gpio-switch.h>
+
+struct gpio_switch {
+ char name[14];
+ u16 gpio;
+ unsigned flags:4;
+ unsigned type:4;
+ unsigned state:1;
+ unsigned both_edges:1;
+
+ u16 debounce_rising;
+ u16 debounce_falling;
+
+ void (* notify)(void *data, int state);
+ void *notify_data;
+
+ struct work_struct work;
+ struct timer_list timer;
+ struct platform_device pdev;
+
+ struct list_head node;
+};
+
+static LIST_HEAD(gpio_switches);
+static struct platform_device *gpio_sw_platform_dev;
+static struct platform_driver gpio_sw_driver;
+
+static const struct omap_gpio_switch *board_gpio_sw_table;
+static int board_gpio_sw_count;
+
+static const char *cover_str[2] = { "open", "closed" };
+static const char *connection_str[2] = { "disconnected", "connected" };
+static const char *activity_str[2] = { "inactive", "active" };
+
+/*
+ * GPIO switch state default debounce delay in ms
+ */
+#define OMAP_GPIO_SW_DEFAULT_DEBOUNCE 10
+
+static const char **get_sw_str(struct gpio_switch *sw)
+{
+ switch (sw->type) {
+ case OMAP_GPIO_SWITCH_TYPE_COVER:
+ return cover_str;
+ case OMAP_GPIO_SWITCH_TYPE_CONNECTION:
+ return connection_str;
+ case OMAP_GPIO_SWITCH_TYPE_ACTIVITY:
+ return activity_str;
+ default:
+ BUG();
+ return NULL;
+ }
+}
+
+static const char *get_sw_type(struct gpio_switch *sw)
+{
+ switch (sw->type) {
+ case OMAP_GPIO_SWITCH_TYPE_COVER:
+ return "cover";
+ case OMAP_GPIO_SWITCH_TYPE_CONNECTION:
+ return "connection";
+ case OMAP_GPIO_SWITCH_TYPE_ACTIVITY:
+ return "activity";
+ default:
+ BUG();
+ return NULL;
+ }
+}
+
+static void print_sw_state(struct gpio_switch *sw, int state)
+{
+ const char **str;
+
+ str = get_sw_str(sw);
+ if (str != NULL)
+ printk(KERN_INFO "%s (GPIO %d) is now %s\n", sw->name, sw->gpio, str[state]);
+}
+
+static int gpio_sw_get_state(struct gpio_switch *sw)
+{
+ int state;
+
+ state = omap_get_gpio_datain(sw->gpio);
+ if (sw->flags & OMAP_GPIO_SWITCH_FLAG_INVERTED)
+ state = !state;
+
+ return state;
+}
+
+static ssize_t gpio_sw_state_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct gpio_switch *sw = dev_get_drvdata(dev);
+ const char **str;
+ char state[16];
+ int enable;
+
+ if (!(sw->flags & OMAP_GPIO_SWITCH_FLAG_OUTPUT))
+ return -EPERM;
+
+ if (sscanf(buf, "%15s", state) != 1)
+ return -EINVAL;
+
+ str = get_sw_str(sw);
+ if (strcmp(state, str[0]) == 0)
+ sw->state = enable = 0;
+ else if (strcmp(state, str[1]) == 0)
+ sw->state = enable = 1;
+ else
+ return -EINVAL;
+
+ if (sw->flags & OMAP_GPIO_SWITCH_FLAG_INVERTED)
+ enable = !enable;
+ omap_set_gpio_dataout(sw->gpio, enable);
+
+ return count;
+}
+
+static ssize_t gpio_sw_state_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct gpio_switch *sw = dev_get_drvdata(dev);
+ const char **str;
+
+ str = get_sw_str(sw);
+ return sprintf(buf, "%s\n", str[sw->state]);
+}
+
+static DEVICE_ATTR(state, S_IRUGO | S_IWUSR, gpio_sw_state_show,
+ gpio_sw_state_store);
+
+static ssize_t gpio_sw_type_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct gpio_switch *sw = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s\n", get_sw_type(sw));
+}
+
+static DEVICE_ATTR(type, S_IRUGO, gpio_sw_type_show, NULL);
+
+static ssize_t gpio_sw_direction_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct gpio_switch *sw = dev_get_drvdata(dev);
+ int is_output;
+
+ is_output = sw->flags & OMAP_GPIO_SWITCH_FLAG_OUTPUT;
+ return sprintf(buf, "%s\n", is_output ? "output" : "input");
+}
+
+static DEVICE_ATTR(direction, S_IRUGO, gpio_sw_direction_show, NULL);
+
+
+static irqreturn_t gpio_sw_irq_handler(int irq, void *arg)
+{
+ struct gpio_switch *sw = arg;
+ unsigned long timeout;
+ int state;
+
+ if (!sw->both_edges) {
+ if (omap_get_gpio_datain(sw->gpio))
- set_irq_type(OMAP_GPIO_IRQ(sw->gpio), IRQT_RISING);
++ set_irq_type(OMAP_GPIO_IRQ(sw->gpio), IRQ_TYPE_EDGE_FALLING);
+ else
++ set_irq_type(OMAP_GPIO_IRQ(sw->gpio), IRQ_TYPE_EDGE_RISING);
+ }
+
+ state = gpio_sw_get_state(sw);
+ if (sw->state == state)
+ return IRQ_HANDLED;
+
+ if (state)
+ timeout = sw->debounce_rising;
+ else
+ timeout = sw->debounce_falling;
+ if (!timeout)
+ schedule_work(&sw->work);
+ else
+ mod_timer(&sw->timer, jiffies + msecs_to_jiffies(timeout));
+
+ return IRQ_HANDLED;
+}
+
+static void gpio_sw_timer(unsigned long arg)
+{
+ struct gpio_switch *sw = (struct gpio_switch *) arg;
+
+ schedule_work(&sw->work);
+}
+
+static void gpio_sw_handler(struct work_struct *work)
+{
+ struct gpio_switch *sw = container_of(work, struct gpio_switch, work);
+ int state;
+
+ state = gpio_sw_get_state(sw);
+ if (sw->state == state)
+ return;
+
+ sw->state = state;
+ if (sw->notify != NULL)
+ sw->notify(sw->notify_data, state);
+ sysfs_notify(&sw->pdev.dev.kobj, NULL, "state");
+ print_sw_state(sw, state);
+}
+
+static int __init can_do_both_edges(struct gpio_switch *sw)
+{
+ if (!cpu_class_is_omap1())
+ return 1;
+ if (OMAP_GPIO_IS_MPUIO(sw->gpio))
+ return 0;
+ else
+ return 1;
+}
+
+static void gpio_sw_release(struct device *dev)
+{
+}
+
+static int __init new_switch(struct gpio_switch *sw)
+{
+ int r, direction, trigger;
+
+ switch (sw->type) {
+ case OMAP_GPIO_SWITCH_TYPE_COVER:
+ case OMAP_GPIO_SWITCH_TYPE_CONNECTION:
+ case OMAP_GPIO_SWITCH_TYPE_ACTIVITY:
+ break;
+ default:
+ printk(KERN_ERR "invalid GPIO switch type: %d\n", sw->type);
+ return -EINVAL;
+ }
+
+ sw->pdev.name = sw->name;
+ sw->pdev.id = -1;
+
+ sw->pdev.dev.parent = &gpio_sw_platform_dev->dev;
+ sw->pdev.dev.driver = &gpio_sw_driver.driver;
+ sw->pdev.dev.release = gpio_sw_release;
+
+ r = platform_device_register(&sw->pdev);
+ if (r) {
+ printk(KERN_ERR "gpio-switch: platform device registration "
+ "failed for %s", sw->name);
+ return r;
+ }
+ dev_set_drvdata(&sw->pdev.dev, sw);
+
+ r = omap_request_gpio(sw->gpio);
+ if (r < 0) {
+ platform_device_unregister(&sw->pdev);
+ return r;
+ }
+
+ /* input: 1, output: 0 */
+ direction = !(sw->flags & OMAP_GPIO_SWITCH_FLAG_OUTPUT);
+ omap_set_gpio_direction(sw->gpio, direction);
+
+ sw->state = gpio_sw_get_state(sw);
+
+ r = 0;
+ r |= device_create_file(&sw->pdev.dev, &dev_attr_state);
+ r |= device_create_file(&sw->pdev.dev, &dev_attr_type);
+ r |= device_create_file(&sw->pdev.dev, &dev_attr_direction);
+ if (r)
+ printk(KERN_ERR "gpio-switch: attribute file creation "
+ "failed for %s\n", sw->name);
+
+ if (!direction)
+ return 0;
+
+ if (can_do_both_edges(sw)) {
+ trigger = IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING;
+ sw->both_edges = 1;
+ } else {
+ if (omap_get_gpio_datain(sw->gpio))
+ trigger = IRQF_TRIGGER_FALLING;
+ else
+ trigger = IRQF_TRIGGER_RISING;
+ }
+ r = request_irq(OMAP_GPIO_IRQ(sw->gpio), gpio_sw_irq_handler,
+ IRQF_SHARED | trigger, sw->name, sw);
+ if (r < 0) {
+ printk(KERN_ERR "gpio-switch: request_irq() failed "
+ "for GPIO %d\n", sw->gpio);
+ platform_device_unregister(&sw->pdev);
+ omap_free_gpio(sw->gpio);
+ return r;
+ }
+
+ INIT_WORK(&sw->work, gpio_sw_handler);
+ init_timer(&sw->timer);
+
+ sw->timer.function = gpio_sw_timer;
+ sw->timer.data = (unsigned long)sw;
+
+ list_add(&sw->node, &gpio_switches);
+
+ return 0;
+}
+
+static int __init add_atag_switches(void)
+{
+ const struct omap_gpio_switch_config *cfg;
+ struct gpio_switch *sw;
+ int i, r;
+
+ for (i = 0; ; i++) {
+ cfg = omap_get_nr_config(OMAP_TAG_GPIO_SWITCH,
+ struct omap_gpio_switch_config, i);
+ if (cfg == NULL)
+ break;
+ sw = kzalloc(sizeof(*sw), GFP_KERNEL);
+ if (sw == NULL) {
+ printk(KERN_ERR "gpio-switch: kmalloc failed\n");
+ return -ENOMEM;
+ }
+ strncpy(sw->name, cfg->name, sizeof(cfg->name));
+ sw->gpio = cfg->gpio;
+ sw->flags = cfg->flags;
+ sw->type = cfg->type;
+ sw->debounce_rising = OMAP_GPIO_SW_DEFAULT_DEBOUNCE;
+ sw->debounce_falling = OMAP_GPIO_SW_DEFAULT_DEBOUNCE;
+ if ((r = new_switch(sw)) < 0) {
+ kfree(sw);
+ return r;
+ }
+ }
+ return 0;
+}
+
+static struct gpio_switch * __init find_switch(int gpio, const char *name)
+{
+ struct gpio_switch *sw;
+
+ list_for_each_entry(sw, &gpio_switches, node) {
+ if ((gpio < 0 || sw->gpio != gpio) &&
+ (name == NULL || strcmp(sw->name, name) != 0))
+ continue;
+
+ if (gpio < 0 || name == NULL)
+ goto no_check;
+
+ if (strcmp(sw->name, name) != 0)
+ printk("gpio-switch: name mismatch for %d (%s, %s)\n",
+ gpio, name, sw->name);
+ else if (sw->gpio != gpio)
+ printk("gpio-switch: GPIO mismatch for %s (%d, %d)\n",
+ name, gpio, sw->gpio);
+no_check:
+ return sw;
+ }
+ return NULL;
+}
+
+static int __init add_board_switches(void)
+{
+ int i;
+
+ for (i = 0; i < board_gpio_sw_count; i++) {
+ const struct omap_gpio_switch *cfg;
+ struct gpio_switch *sw;
+ int r;
+
+ cfg = board_gpio_sw_table + i;
+ if (strlen(cfg->name) > sizeof(sw->name) - 1)
+ return -EINVAL;
+ /* Check whether we only update an existing switch
+ * or add a new switch. */
+ sw = find_switch(cfg->gpio, cfg->name);
+ if (sw != NULL) {
+ sw->debounce_rising = cfg->debounce_rising;
+ sw->debounce_falling = cfg->debounce_falling;
+ sw->notify = cfg->notify;
+ sw->notify_data = cfg->notify_data;
+ continue;
+ } else {
+ if (cfg->gpio < 0 || cfg->name == NULL) {
+ printk("gpio-switch: required switch not "
+ "found (%d, %s)\n", cfg->gpio,
+ cfg->name);
+ continue;
+ }
+ }
+ sw = kzalloc(sizeof(*sw), GFP_KERNEL);
+ if (sw == NULL) {
+ printk(KERN_ERR "gpio-switch: kmalloc failed\n");
+ return -ENOMEM;
+ }
+ strlcpy(sw->name, cfg->name, sizeof(sw->name));
+ sw->gpio = cfg->gpio;
+ sw->flags = cfg->flags;
+ sw->type = cfg->type;
+ sw->debounce_rising = cfg->debounce_rising;
+ sw->debounce_falling = cfg->debounce_falling;
+ sw->notify = cfg->notify;
+ sw->notify_data = cfg->notify_data;
+ if ((r = new_switch(sw)) < 0) {
+ kfree(sw);
+ return r;
+ }
+ }
+ return 0;
+}
+
+static void gpio_sw_cleanup(void)
+{
+ struct gpio_switch *sw = NULL, *old = NULL;
+
+ list_for_each_entry(sw, &gpio_switches, node) {
+ if (old != NULL)
+ kfree(old);
+ flush_scheduled_work();
+ del_timer_sync(&sw->timer);
+
+ free_irq(OMAP_GPIO_IRQ(sw->gpio), sw);
+
+ device_remove_file(&sw->pdev.dev, &dev_attr_state);
+ device_remove_file(&sw->pdev.dev, &dev_attr_type);
+ device_remove_file(&sw->pdev.dev, &dev_attr_direction);
+
+ platform_device_unregister(&sw->pdev);
+ omap_free_gpio(sw->gpio);
+ old = sw;
+ }
+ kfree(old);
+}
+
+static void __init report_initial_state(void)
+{
+ struct gpio_switch *sw;
+
+ list_for_each_entry(sw, &gpio_switches, node) {
+ int state;
+
+ state = omap_get_gpio_datain(sw->gpio);
+ if (sw->flags & OMAP_GPIO_SWITCH_FLAG_INVERTED)
+ state = !state;
+ if (sw->notify != NULL)
+ sw->notify(sw->notify_data, state);
+ print_sw_state(sw, state);
+ }
+}
+
+static int gpio_sw_remove(struct platform_device *dev)
+{
+ return 0;
+}
+
+static struct platform_driver gpio_sw_driver = {
+ .remove = gpio_sw_remove,
+ .driver = {
+ .name = "gpio-switch",
+ },
+};
+
+void __init omap_register_gpio_switches(const struct omap_gpio_switch *tbl,
+ int count)
+{
+ BUG_ON(board_gpio_sw_table != NULL);
+
+ board_gpio_sw_table = tbl;
+ board_gpio_sw_count = count;
+}
+
+static int __init gpio_sw_init(void)
+{
+ int r;
+
+ printk(KERN_INFO "OMAP GPIO switch handler initializing\n");
+
+ r = platform_driver_register(&gpio_sw_driver);
+ if (r)
+ return r;
+
+ gpio_sw_platform_dev = platform_device_register_simple("gpio-switch",
+ -1, NULL, 0);
+ if (IS_ERR(gpio_sw_platform_dev)) {
+ r = PTR_ERR(gpio_sw_platform_dev);
+ goto err1;
+ }
+
+ r = add_atag_switches();
+ if (r < 0)
+ goto err2;
+
+ r = add_board_switches();
+ if (r < 0)
+ goto err2;
+
+ report_initial_state();
+
+ return 0;
+err2:
+ gpio_sw_cleanup();
+ platform_device_unregister(gpio_sw_platform_dev);
+err1:
+ platform_driver_unregister(&gpio_sw_driver);
+ return r;
+}
+
+static void __exit gpio_sw_exit(void)
+{
+ gpio_sw_cleanup();
+ platform_device_unregister(gpio_sw_platform_dev);
+ platform_driver_unregister(&gpio_sw_driver);
+}
+
+#ifndef MODULE
+late_initcall(gpio_sw_init);
+#else
+module_init(gpio_sw_init);
+#endif
+module_exit(gpio_sw_exit);
+
+MODULE_AUTHOR("Juha Yrjölä <juha.yrjola@nokia.com>, Paul Mundt <paul.mundt@nokia.com");
+MODULE_DESCRIPTION("GPIO switch driver");
+MODULE_LICENSE("GPL");
--- /dev/null
- set_irq_type(OMAP_GPIO_IRQ(info->btinfo->host_wakeup_gpio), IRQT_NOEDGE);
+/*
+ * linux/drivers/bluetooth/brf6150/brf6150.c
+ *
+ * Copyright (C) 2005 Nokia Corporation
+ * Written by Ville Tervo <ville.tervo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/serial_reg.h>
+#include <linux/skbuff.h>
+#include <linux/firmware.h>
+#include <linux/irq.h>
+#include <linux/timer.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+
+#include <asm/arch/hardware.h>
+#include <asm/arch/gpio.h>
+#include <asm/arch/board.h>
+#include <asm/arch/irqs.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/hci.h>
+
+#include "brf6150.h"
+
+#if 0
+#define NBT_DBG(fmt, arg...) printk("%s: " fmt "" , __FUNCTION__ , ## arg)
+#else
+#define NBT_DBG(...)
+#endif
+
+#if 0
+#define NBT_DBG_FW(fmt, arg...) printk("%s: " fmt "" , __FUNCTION__ , ## arg)
+#else
+#define NBT_DBG_FW(...)
+#endif
+
+#if 0
+#define NBT_DBG_POWER(fmt, arg...) printk("%s: " fmt "" , __FUNCTION__ , ## arg)
+#else
+#define NBT_DBG_POWER(...)
+#endif
+
+#if 0
+#define NBT_DBG_TRANSFER(fmt, arg...) printk("%s: " fmt "" , __FUNCTION__ , ## arg)
+#else
+#define NBT_DBG_TRANSFER(...)
+#endif
+
+#if 0
+#define NBT_DBG_TRANSFER_NF(fmt, arg...) printk(fmt "" , ## arg)
+#else
+#define NBT_DBG_TRANSFER_NF(...)
+#endif
+
+#define PM_TIMEOUT (2000)
+
+static void brf6150_device_release(struct device *dev);
+static struct brf6150_info *exit_info;
+
+static struct platform_device brf6150_device = {
+ .name = BT_DEVICE,
+ .id = -1,
+ .num_resources = 0,
+ .dev = {
+ .release = brf6150_device_release,
+ }
+};
+
+static struct device_driver brf6150_driver = {
+ .name = BT_DRIVER,
+ .bus = &platform_bus_type,
+};
+
+static inline void brf6150_outb(struct brf6150_info *info, unsigned int offset, u8 val)
+{
+ outb(val, info->uart_base + (offset << 2));
+}
+
+static inline u8 brf6150_inb(struct brf6150_info *info, unsigned int offset)
+{
+ return inb(info->uart_base + (offset << 2));
+}
+
+static void brf6150_set_rts(struct brf6150_info *info, int active)
+{
+ u8 b;
+
+ b = brf6150_inb(info, UART_MCR);
+ if (active)
+ b |= UART_MCR_RTS;
+ else
+ b &= ~UART_MCR_RTS;
+ brf6150_outb(info, UART_MCR, b);
+}
+
+static void brf6150_wait_for_cts(struct brf6150_info *info, int active,
+ int timeout_ms)
+{
+ int okay;
+ unsigned long timeout;
+
+ okay = 0;
+ timeout = jiffies + msecs_to_jiffies(timeout_ms);
+ for (;;) {
+ int state;
+
+ state = brf6150_inb(info, UART_MSR) & UART_MSR_CTS;
+ if (active) {
+ if (state)
+ break;
+ } else {
+ if (!state)
+ break;
+ }
+ if (jiffies > timeout)
+ break;
+ }
+}
+
+static inline void brf6150_set_auto_ctsrts(struct brf6150_info *info, int on)
+{
+ u8 lcr, b;
+
+ lcr = brf6150_inb(info, UART_LCR);
+ brf6150_outb(info, UART_LCR, 0xbf);
+ b = brf6150_inb(info, UART_EFR);
+ if (on)
+ b |= UART_EFR_CTS | UART_EFR_RTS;
+ else
+ b &= ~(UART_EFR_CTS | UART_EFR_RTS);
+ brf6150_outb(info, UART_EFR, b);
+ brf6150_outb(info, UART_LCR, lcr);
+}
+
+static inline void brf6150_enable_pm_rx(struct brf6150_info *info)
+{
+ if (info->pm_enabled) {
+ info->rx_pm_enabled = 1;
+ }
+}
+
+static inline void brf6150_disable_pm_rx(struct brf6150_info *info)
+{
+ if (info->pm_enabled) {
+ info->rx_pm_enabled = 0;
+ }
+}
+
+static void brf6150_enable_pm_tx(struct brf6150_info *info)
+{
+ if (info->pm_enabled) {
+ mod_timer(&info->pm_timer, jiffies + msecs_to_jiffies(PM_TIMEOUT));
+ info->tx_pm_enabled = 1;
+ }
+}
+
+static void brf6150_disable_pm_tx(struct brf6150_info *info)
+{
+ if (info->pm_enabled) {
+ info->tx_pm_enabled = 0;
+ omap_set_gpio_dataout(info->btinfo->bt_wakeup_gpio, 1);
+ }
+ if (omap_get_gpio_datain(info->btinfo->host_wakeup_gpio))
+ tasklet_schedule(&info->tx_task);
+}
+
+static void brf6150_pm_timer(unsigned long data)
+{
+ struct brf6150_info *info;
+
+ info = (struct brf6150_info *)data;
+ if (info->tx_pm_enabled && info->rx_pm_enabled && !test_bit(HCI_INQUIRY, &info->hdev->flags))
+ omap_set_gpio_dataout(info->btinfo->bt_wakeup_gpio, 0);
+ else
+ mod_timer(&info->pm_timer, jiffies + msecs_to_jiffies(PM_TIMEOUT));
+}
+
+static int brf6150_change_speed(struct brf6150_info *info, unsigned long speed)
+{
+ unsigned int divisor;
+ u8 lcr, mdr1;
+
+ NBT_DBG("Setting speed %lu\n", speed);
+
+ if (speed >= 460800) {
+ divisor = UART_CLOCK / 13 / speed;
+ mdr1 = 3;
+ } else {
+ divisor = UART_CLOCK / 16 / speed;
+ mdr1 = 0;
+ }
+
+ brf6150_outb(info, UART_OMAP_MDR1, 7); /* Make sure UART mode is disabled */
+ lcr = brf6150_inb(info, UART_LCR);
+ brf6150_outb(info, UART_LCR, UART_LCR_DLAB); /* Set DLAB */
+ brf6150_outb(info, UART_DLL, divisor & 0xff); /* Set speed */
+ brf6150_outb(info, UART_DLM, divisor >> 8);
+ brf6150_outb(info, UART_LCR, lcr);
+ brf6150_outb(info, UART_OMAP_MDR1, mdr1); /* Make sure UART mode is enabled */
+
+ return 0;
+}
+
+/* Firmware handling */
+static int brf6150_open_firmware(struct brf6150_info *info)
+{
+ int err;
+
+ info->fw_pos = 0;
+ err = request_firmware(&info->fw_entry, "brf6150fw.bin", &brf6150_device.dev);
+
+ return err;
+}
+
+static struct sk_buff *brf6150_read_fw_cmd(struct brf6150_info *info, int how)
+{
+ struct sk_buff *skb;
+ unsigned int cmd_len;
+
+ if (info->fw_pos >= info->fw_entry->size) {
+ return NULL;
+ }
+
+ cmd_len = info->fw_entry->data[info->fw_pos++];
+ if (!cmd_len)
+ return NULL;
+
+ if (info->fw_pos + cmd_len > info->fw_entry->size) {
+ printk(KERN_WARNING "Corrupted firmware image\n");
+ return NULL;
+ }
+
+ skb = bt_skb_alloc(cmd_len, how);
+ if (!skb) {
+ printk(KERN_WARNING "Cannot reserve memory for buffer\n");
+ return NULL;
+ }
+ memcpy(skb_put(skb, cmd_len), &info->fw_entry->data[info->fw_pos], cmd_len);
+
+ info->fw_pos += cmd_len;
+
+ return skb;
+}
+
+static int brf6150_close_firmware(struct brf6150_info *info)
+{
+ release_firmware(info->fw_entry);
+ return 0;
+}
+
+static int brf6150_send_alive_packet(struct brf6150_info *info)
+{
+ struct sk_buff *skb;
+
+ NBT_DBG("Sending alive packet\n");
+ skb = brf6150_read_fw_cmd(info, GFP_ATOMIC);
+ if (!skb) {
+ printk(KERN_WARNING "Cannot read alive command");
+ return -1;
+ }
+
+ clk_enable(info->uart_ck);
+ skb_queue_tail(&info->txq, skb);
+ tasklet_schedule(&info->tx_task);
+
+ NBT_DBG("Alive packet sent\n");
+ return 0;
+}
+
+static void brf6150_alive_packet(struct brf6150_info *info, struct sk_buff *skb)
+{
+ NBT_DBG("Received alive packet\n");
+ if (skb->data[1] == 0xCC) {
+ complete(&info->init_completion);
+ }
+
+ kfree_skb(skb);
+}
+
+static int brf6150_send_negotiation(struct brf6150_info *info)
+{
+ struct sk_buff *skb;
+ NBT_DBG("Sending negotiation..\n");
+
+ brf6150_change_speed(info, INIT_SPEED);
+
+ skb = brf6150_read_fw_cmd(info, GFP_KERNEL);
+
+ if (!skb) {
+ printk(KERN_WARNING "Cannot read negoatiation message");
+ return -1;
+ }
+
+ clk_enable(info->uart_ck);
+ skb_queue_tail(&info->txq, skb);
+ tasklet_schedule(&info->tx_task);
+
+
+ NBT_DBG("Negotiation sent\n");
+ return 0;
+}
+
+static void brf6150_negotiation_packet(struct brf6150_info *info,
+ struct sk_buff *skb)
+{
+ if (skb->data[1] == 0x20) {
+ /* Change to operational settings */
+ brf6150_set_rts(info, 0);
+ brf6150_wait_for_cts(info, 0, 100);
+ brf6150_change_speed(info, MAX_BAUD_RATE);
+ brf6150_set_rts(info, 1);
+ brf6150_wait_for_cts(info, 1, 100);
+ brf6150_set_auto_ctsrts(info, 1);
+ brf6150_send_alive_packet(info);
+ } else {
+ printk(KERN_WARNING "Could not negotiate brf6150 settings\n");
+ }
+ kfree_skb(skb);
+}
+
+static int brf6150_get_hdr_len(u8 pkt_type)
+{
+ long retval;
+
+ switch (pkt_type) {
+ case H4_EVT_PKT:
+ retval = HCI_EVENT_HDR_SIZE;
+ break;
+ case H4_ACL_PKT:
+ retval = HCI_ACL_HDR_SIZE;
+ break;
+ case H4_SCO_PKT:
+ retval = HCI_SCO_HDR_SIZE;
+ break;
+ case H4_NEG_PKT:
+ retval = 9;
+ break;
+ case H4_ALIVE_PKT:
+ retval = 3;
+ break;
+ default:
+ printk(KERN_ERR "brf6150: Unknown H4 packet");
+ retval = -1;
+ break;
+ }
+
+ return retval;
+}
+
+static unsigned int brf6150_get_data_len(struct brf6150_info *info,
+ struct sk_buff *skb)
+{
+ long retval = -1;
+ struct hci_event_hdr *evt_hdr;
+ struct hci_acl_hdr *acl_hdr;
+ struct hci_sco_hdr *sco_hdr;
+
+ switch (bt_cb(skb)->pkt_type) {
+ case H4_EVT_PKT:
+ evt_hdr = (struct hci_event_hdr *)skb->data;
+ retval = evt_hdr->plen;
+ break;
+ case H4_ACL_PKT:
+ acl_hdr = (struct hci_acl_hdr *)skb->data;
+ retval = le16_to_cpu(acl_hdr->dlen);
+ break;
+ case H4_SCO_PKT:
+ sco_hdr = (struct hci_sco_hdr *)skb->data;
+ retval = sco_hdr->dlen;
+ break;
+ case H4_NEG_PKT:
+ retval = 0;
+ break;
+ case H4_ALIVE_PKT:
+ retval = 0;
+ break;
+ }
+
+ return retval;
+}
+
+static void brf6150_parse_fw_event(struct brf6150_info *info)
+{
+ struct hci_fw_event *ev;
+
+ if (bt_cb(info->rx_skb)->pkt_type != H4_EVT_PKT) {
+ printk(KERN_WARNING "Got non event fw packet.\n");
+ info->fw_error = 1;
+ return;
+ }
+
+ ev = (struct hci_fw_event *)info->rx_skb->data;
+ if (ev->hev.evt != HCI_EV_CMD_COMPLETE) {
+ printk(KERN_WARNING "Got non cmd complete fw event\n");
+ info->fw_error = 1;
+ return;
+ }
+
+ if (ev->status != 0) {
+ printk(KERN_WARNING "Got error status from fw command\n");
+ info->fw_error = 1;
+ return;
+ }
+
+ complete(&info->fw_completion);
+}
+
+static inline void brf6150_recv_frame(struct brf6150_info *info,
+ struct sk_buff *skb)
+{
+ if (unlikely(!test_bit(HCI_RUNNING, &info->hdev->flags))) {
+ NBT_DBG("fw_event\n");
+ brf6150_parse_fw_event(info);
+ kfree_skb(skb);
+ } else {
+ hci_recv_frame(skb);
+ if (!(brf6150_inb(info, UART_LSR) & UART_LSR_DR))
+ brf6150_enable_pm_rx(info);
+ NBT_DBG("Frame sent to upper layer\n");
+ }
+
+}
+
+static inline void brf6150_rx(struct brf6150_info *info)
+{
+ u8 byte;
+
+ NBT_DBG_TRANSFER("rx_tasklet woke up\ndata ");
+
+ while (brf6150_inb(info, UART_LSR) & UART_LSR_DR) {
+ if (info->rx_skb == NULL) {
+ info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC);
+ if (!info->rx_skb) {
+ printk(KERN_WARNING "brf6150: Can't allocate memory for new packet\n");
+ return;
+ }
+ info->rx_state = WAIT_FOR_PKT_TYPE;
+ info->rx_skb->dev = (void *)info->hdev;
+ brf6150_disable_pm_rx(info);
+ clk_enable(info->uart_ck);
+ }
+
+ byte = brf6150_inb(info, UART_RX);
+ if (info->garbage_bytes) {
+ info->garbage_bytes--;
+ info->hdev->stat.err_rx++;
+ continue;
+ }
+ info->hdev->stat.byte_rx++;
+ NBT_DBG_TRANSFER_NF("0x%.2x ", byte);
+ switch (info->rx_state) {
+ case WAIT_FOR_PKT_TYPE:
+ bt_cb(info->rx_skb)->pkt_type = byte;
+ info->rx_count = brf6150_get_hdr_len(byte);
+ if (info->rx_count >= 0) {
+ info->rx_state = WAIT_FOR_HEADER;
+ } else {
+ info->hdev->stat.err_rx++;
+ kfree_skb(info->rx_skb);
+ info->rx_skb = NULL;
+ clk_disable(info->uart_ck);
+ }
+ break;
+ case WAIT_FOR_HEADER:
+ info->rx_count--;
+ *skb_put(info->rx_skb, 1) = byte;
+ if (info->rx_count == 0) {
+ info->rx_count = brf6150_get_data_len(info, info->rx_skb);
+ if (info->rx_count > skb_tailroom(info->rx_skb)) {
+ printk(KERN_WARNING "brf6150: Frame is %ld bytes too long.\n",
+ info->rx_count - skb_tailroom(info->rx_skb));
+ info->rx_skb = NULL;
+ info->garbage_bytes = info->rx_count - skb_tailroom(info->rx_skb);
+ clk_disable(info->uart_ck);
+ break;
+ }
+ info->rx_state = WAIT_FOR_DATA;
+ if (bt_cb(info->rx_skb)->pkt_type == H4_NEG_PKT) {
+ brf6150_negotiation_packet(info, info->rx_skb);
+ info->rx_skb = NULL;
+ clk_disable(info->uart_ck);
+ return;
+ }
+ if (bt_cb(info->rx_skb)->pkt_type == H4_ALIVE_PKT) {
+ brf6150_alive_packet(info, info->rx_skb);
+ info->rx_skb = NULL;
+ clk_disable(info->uart_ck);
+ return;
+ }
+ }
+ break;
+ case WAIT_FOR_DATA:
+ info->rx_count--;
+ *skb_put(info->rx_skb, 1) = byte;
+ if (info->rx_count == 0) {
+ brf6150_recv_frame(info, info->rx_skb);
+ info->rx_skb = NULL;
+ clk_disable(info->uart_ck);
+ }
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+ }
+
+ NBT_DBG_TRANSFER_NF("\n");
+}
+
+static void brf6150_tx_tasklet(unsigned long data)
+{
+ unsigned int sent = 0;
+ unsigned long flags;
+ struct sk_buff *skb;
+ struct brf6150_info *info = (struct brf6150_info *)data;
+
+ NBT_DBG_TRANSFER("tx_tasklet woke up\n data ");
+
+ skb = skb_dequeue(&info->txq);
+ if (!skb) {
+ /* No data in buffer */
+ brf6150_enable_pm_tx(info);
+ return;
+ }
+
+ /* Copy data to tx fifo */
+ while (!(brf6150_inb(info, UART_OMAP_SSR) & UART_OMAP_SSR_TXFULL) &&
+ (sent < skb->len)) {
+ NBT_DBG_TRANSFER_NF("0x%.2x ", skb->data[sent]);
+ brf6150_outb(info, UART_TX, skb->data[sent]);
+ sent++;
+ }
+
+ info->hdev->stat.byte_tx += sent;
+ NBT_DBG_TRANSFER_NF("\n");
+ if (skb->len == sent) {
+ kfree_skb(skb);
+ clk_disable(info->uart_ck);
+ } else {
+ skb_pull(skb, sent);
+ skb_queue_head(&info->txq, skb);
+ }
+
+ spin_lock_irqsave(&info->lock, flags);
+ brf6150_outb(info, UART_IER, brf6150_inb(info, UART_IER) | UART_IER_THRI);
+ spin_unlock_irqrestore(&info->lock, flags);
+}
+
+static irqreturn_t brf6150_interrupt(int irq, void *data)
+{
+ struct brf6150_info *info = (struct brf6150_info *)data;
+ u8 iir, msr;
+ int ret;
+ unsigned long flags;
+
+ ret = IRQ_NONE;
+
+ clk_enable(info->uart_ck);
+ iir = brf6150_inb(info, UART_IIR);
+ if (iir & UART_IIR_NO_INT) {
+ printk("Interrupt but no reason irq 0x%.2x\n", iir);
+ clk_disable(info->uart_ck);
+ return IRQ_HANDLED;
+ }
+
+ NBT_DBG("In interrupt handler iir 0x%.2x\n", iir);
+
+ iir &= UART_IIR_ID;
+
+ if (iir == UART_IIR_MSI) {
+ msr = brf6150_inb(info, UART_MSR);
+ ret = IRQ_HANDLED;
+ }
+ if (iir == UART_IIR_RLSI) {
+ brf6150_inb(info, UART_RX);
+ brf6150_inb(info, UART_LSR);
+ ret = IRQ_HANDLED;
+ }
+
+ if (iir == UART_IIR_RDI) {
+ brf6150_rx(info);
+ ret = IRQ_HANDLED;
+ }
+
+ if (iir == UART_IIR_THRI) {
+ spin_lock_irqsave(&info->lock, flags);
+ brf6150_outb(info, UART_IER, brf6150_inb(info, UART_IER) & ~UART_IER_THRI);
+ spin_unlock_irqrestore(&info->lock, flags);
+ tasklet_schedule(&info->tx_task);
+ ret = IRQ_HANDLED;
+ }
+
+ clk_disable(info->uart_ck);
+ return ret;
+}
+
+static irqreturn_t brf6150_wakeup_interrupt(int irq, void *dev_inst)
+{
+ struct brf6150_info *info = dev_inst;
+ int should_wakeup;
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->lock, flags);
+ should_wakeup = omap_get_gpio_datain(info->btinfo->host_wakeup_gpio);
+ NBT_DBG_POWER("gpio interrupt %d\n", should_wakeup);
+ if (should_wakeup) {
+ clk_enable(info->uart_ck);
+ brf6150_set_auto_ctsrts(info, 1);
+ brf6150_rx(info);
+ tasklet_schedule(&info->tx_task);
+ } else {
+ brf6150_set_auto_ctsrts(info, 0);
+ brf6150_set_rts(info, 0);
+ clk_disable(info->uart_ck);
+ }
+
+ spin_unlock_irqrestore(&info->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static int brf6150_init_uart(struct brf6150_info *info)
+{
+ int count = 0;
+
+ /* Reset the UART */
+ brf6150_outb(info, UART_OMAP_SYSC, UART_SYSC_OMAP_RESET);
+ while (!(brf6150_inb(info, UART_OMAP_SYSS) & UART_SYSS_RESETDONE)) {
+ if (count++ > 100) {
+ printk(KERN_ERR "brf6150: UART reset timeout\n");
+ return -1;
+ }
+ udelay(1);
+ }
+
+ /* Enable and setup FIFO */
+ brf6150_outb(info, UART_LCR, UART_LCR_WLEN8);
+ brf6150_outb(info, UART_OMAP_MDR1, 0x00); /* Make sure UART mode is enabled */
+ brf6150_outb(info, UART_OMAP_SCR, 0x00);
+ brf6150_outb(info, UART_EFR, brf6150_inb(info, UART_EFR) | UART_EFR_ECB);
+ brf6150_outb(info, UART_MCR, brf6150_inb(info, UART_MCR) | UART_MCR_TCRTLR);
+ brf6150_outb(info, UART_TI752_TLR, 0xff);
+ brf6150_outb(info, UART_TI752_TCR, 0x1f);
+ brf6150_outb(info, UART_FCR, UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
+ brf6150_outb(info, UART_IER, UART_IER_RDI);
+
+ return 0;
+}
+
+static int brf6150_reset(struct brf6150_info *info)
+{
+ omap_set_gpio_dataout(info->btinfo->bt_wakeup_gpio, 0);
+ omap_set_gpio_dataout(info->btinfo->reset_gpio, 0);
+ current->state = TASK_UNINTERRUPTIBLE;
+ schedule_timeout(msecs_to_jiffies(10));
+ omap_set_gpio_dataout(info->btinfo->bt_wakeup_gpio, 1);
+ current->state = TASK_UNINTERRUPTIBLE;
+ schedule_timeout(msecs_to_jiffies(100));
+ omap_set_gpio_dataout(info->btinfo->reset_gpio, 1);
+ current->state = TASK_UNINTERRUPTIBLE;
+ schedule_timeout(msecs_to_jiffies(100));
+
+ return 0;
+}
+
+static int brf6150_send_firmware(struct brf6150_info *info)
+{
+ struct sk_buff *skb;
+
+ init_completion(&info->fw_completion);
+ info->fw_error = 0;
+
+ while ((skb = brf6150_read_fw_cmd(info, GFP_KERNEL)) != NULL) {
+ clk_enable(info->uart_ck);
+ skb_queue_tail(&info->txq, skb);
+ tasklet_schedule(&info->tx_task);
+
+ if (!wait_for_completion_timeout(&info->fw_completion, HZ)) {
+ return -1;
+ }
+
+ if (info->fw_error) {
+ return -1;
+ }
+ }
+ NBT_DBG_FW("Firmware sent\n");
+
+ return 0;
+
+}
+
+/* hci callback functions */
+static int brf6150_hci_flush(struct hci_dev *hdev)
+{
+ struct brf6150_info *info;
+ info = hdev->driver_data;
+
+ skb_queue_purge(&info->txq);
+
+ return 0;
+}
+
+static int brf6150_hci_open(struct hci_dev *hdev)
+{
+ struct brf6150_info *info;
+ int err;
+
+ info = hdev->driver_data;
+
+ if (test_bit(HCI_RUNNING, &hdev->flags))
+ return 0;
+
+ if (brf6150_open_firmware(info) < 0) {
+ printk("Cannot open firmware\n");
+ return -1;
+ }
+
+ info->rx_state = WAIT_FOR_PKT_TYPE;
+ info->rx_count = 0;
+ info->garbage_bytes = 0;
+ info->rx_skb = NULL;
+ info->pm_enabled = 0;
- set_irq_type(OMAP_GPIO_IRQ(info->btinfo->host_wakeup_gpio), IRQT_BOTHEDGE);
++ set_irq_type(OMAP_GPIO_IRQ(info->btinfo->host_wakeup_gpio), IRQ_TYPE_NONE);
+ init_completion(&info->fw_completion);
+
+ clk_enable(info->uart_ck);
+
+ brf6150_init_uart(info);
+ brf6150_set_auto_ctsrts(info, 0);
+ brf6150_set_rts(info, 0);
+ brf6150_reset(info);
+ brf6150_wait_for_cts(info, 1, 10);
+ brf6150_set_rts(info, 1);
+ if (brf6150_send_negotiation(info)) {
+ brf6150_close_firmware(info);
+ return -1;
+ }
+
+ if (!wait_for_completion_interruptible_timeout(&info->init_completion, HZ)) {
+ brf6150_close_firmware(info);
+ clk_disable(info->uart_ck);
+ clear_bit(HCI_RUNNING, &hdev->flags);
+ return -1;
+ }
+ brf6150_set_auto_ctsrts(info, 1);
+
+ err = brf6150_send_firmware(info);
+ brf6150_close_firmware(info);
+ if (err < 0)
+ printk(KERN_ERR "brf6150: Sending firmware failed. Bluetooth won't work properly\n");
+
- set_irq_type(OMAP_GPIO_IRQ(info->btinfo->host_wakeup_gpio), IRQT_NOEDGE);
++ set_irq_type(OMAP_GPIO_IRQ(info->btinfo->host_wakeup_gpio), IRQ_TYPE_EDGE_BOTH);
+ info->pm_enabled = 1;
+ set_bit(HCI_RUNNING, &hdev->flags);
+ return 0;
+}
+
+static int brf6150_hci_close(struct hci_dev *hdev)
+{
+ struct brf6150_info *info = hdev->driver_data;
+ if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
+ return 0;
+
+ brf6150_hci_flush(hdev);
+ clk_disable(info->uart_ck);
+ del_timer_sync(&info->pm_timer);
+ omap_set_gpio_dataout(info->btinfo->bt_wakeup_gpio, 0);
- set_irq_type(OMAP_GPIO_IRQ(info->btinfo->host_wakeup_gpio), IRQT_NOEDGE);
++ set_irq_type(OMAP_GPIO_IRQ(info->btinfo->host_wakeup_gpio), IRQ_TYPE_NONE);
+
+ return 0;
+}
+
+static void brf6150_hci_destruct(struct hci_dev *hdev)
+{
+}
+
+static int brf6150_hci_send_frame(struct sk_buff *skb)
+{
+ struct brf6150_info *info;
+ struct hci_dev *hdev = (struct hci_dev *)skb->dev;
+
+ if (!hdev) {
+ printk(KERN_WARNING "brf6150: Frame for unknown device\n");
+ return -ENODEV;
+ }
+
+ if (!test_bit(HCI_RUNNING, &hdev->flags)) {
+ printk(KERN_WARNING "brf6150: Frame for non-running device\n");
+ return -EIO;
+ }
+
+ info = hdev->driver_data;
+
+ switch (bt_cb(skb)->pkt_type) {
+ case HCI_COMMAND_PKT:
+ hdev->stat.cmd_tx++;
+ break;
+ case HCI_ACLDATA_PKT:
+ hdev->stat.acl_tx++;
+ break;
+ case HCI_SCODATA_PKT:
+ hdev->stat.sco_tx++;
+ break;
+ };
+
+ /* Push frame type to skb */
+ clk_enable(info->uart_ck);
+ *skb_push(skb, 1) = bt_cb(skb)->pkt_type;
+ skb_queue_tail(&info->txq, skb);
+
+ brf6150_disable_pm_tx(info);
+
+ return 0;
+}
+
+static int brf6150_hci_ioctl(struct hci_dev *hdev, unsigned int cmd, unsigned long arg)
+{
+ return -ENOIOCTLCMD;
+}
+
+static void brf6150_device_release(struct device *dev)
+{
+}
+
+static int brf6150_register_hdev(struct brf6150_info *info)
+{
+ struct hci_dev *hdev;
+
+ /* Initialize and register HCI device */
+
+ hdev = hci_alloc_dev();
+ if (!hdev) {
+ printk(KERN_WARNING "brf6150: Can't allocate memory for device\n");
+ return -ENOMEM;
+ }
+ info->hdev = hdev;
+
+ hdev->type = HCI_UART;
+ hdev->driver_data = info;
+
+ hdev->open = brf6150_hci_open;
+ hdev->close = brf6150_hci_close;
+ hdev->destruct = brf6150_hci_destruct;
+ hdev->flush = brf6150_hci_flush;
+ hdev->send = brf6150_hci_send_frame;
+ hdev->destruct = brf6150_hci_destruct;
+ hdev->ioctl = brf6150_hci_ioctl;
+
+ hdev->owner = THIS_MODULE;
+
+ if (hci_register_dev(hdev) < 0) {
+ printk(KERN_WARNING "brf6150: Can't register HCI device %s.\n", hdev->name);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int __init brf6150_init(void)
+{
+ struct brf6150_info *info;
+ int irq, err;
+
+ info = kmalloc(sizeof(struct brf6150_info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+ memset(info, 0, sizeof(struct brf6150_info));
+
+ brf6150_device.dev.driver_data = info;
+ init_completion(&info->init_completion);
+ init_completion(&info->fw_completion);
+ info->pm_enabled = 0;
+ info->rx_pm_enabled = 0;
+ info->tx_pm_enabled = 0;
+ info->garbage_bytes = 0;
+ tasklet_init(&info->tx_task, brf6150_tx_tasklet, (unsigned long)info);
+ spin_lock_init(&info->lock);
+ skb_queue_head_init(&info->txq);
+ init_timer(&info->pm_timer);
+ info->pm_timer.function = brf6150_pm_timer;
+ info->pm_timer.data = (unsigned long)info;
+ exit_info = NULL;
+
+ info->btinfo = omap_get_config(OMAP_TAG_NOKIA_BT, struct omap_bluetooth_config);
+ if (info->btinfo == NULL)
+ return -1;
+
+ NBT_DBG("RESET gpio: %d\n", info->btinfo->reset_gpio);
+ NBT_DBG("BTWU gpio: %d\n", info->btinfo->bt_wakeup_gpio);
+ NBT_DBG("HOSTWU gpio: %d\n", info->btinfo->host_wakeup_gpio);
+ NBT_DBG("Uart: %d\n", info->btinfo->bt_uart);
+ NBT_DBG("sysclk: %d\n", info->btinfo->bt_sysclk);
+
+ err = omap_request_gpio(info->btinfo->reset_gpio);
+ if (err < 0)
+ {
+ printk(KERN_WARNING "Cannot get GPIO line %d",
+ info->btinfo->reset_gpio);
+ kfree(info);
+ return err;
+ }
+
+ err = omap_request_gpio(info->btinfo->bt_wakeup_gpio);
+ if (err < 0)
+ {
+ printk(KERN_WARNING "Cannot get GPIO line 0x%d",
+ info->btinfo->bt_wakeup_gpio);
+ omap_free_gpio(info->btinfo->reset_gpio);
+ kfree(info);
+ return err;
+ }
+
+ err = omap_request_gpio(info->btinfo->host_wakeup_gpio);
+ if (err < 0)
+ {
+ printk(KERN_WARNING "Cannot get GPIO line %d",
+ info->btinfo->host_wakeup_gpio);
+ omap_free_gpio(info->btinfo->reset_gpio);
+ omap_free_gpio(info->btinfo->bt_wakeup_gpio);
+ kfree(info);
+ return err;
+ }
+
+ omap_set_gpio_direction(info->btinfo->reset_gpio, 0);
+ omap_set_gpio_direction(info->btinfo->bt_wakeup_gpio, 0);
+ omap_set_gpio_direction(info->btinfo->host_wakeup_gpio, 1);
++ set_irq_type(OMAP_GPIO_IRQ(info->btinfo->host_wakeup_gpio), IRQ_TYPE_NONE);
+
+ switch (info->btinfo->bt_uart) {
+ case 1:
+ irq = INT_UART1;
+ info->uart_ck = clk_get(NULL, "uart1_ck");
+ info->uart_base = io_p2v((unsigned long)OMAP_UART1_BASE);
+ break;
+ case 2:
+ irq = INT_UART2;
+ info->uart_ck = clk_get(NULL, "uart2_ck");
+ info->uart_base = io_p2v((unsigned long)OMAP_UART2_BASE);
+ break;
+ case 3:
+ irq = INT_UART3;
+ info->uart_ck = clk_get(NULL, "uart3_ck");
+ info->uart_base = io_p2v((unsigned long)OMAP_UART3_BASE);
+ break;
+ default:
+ printk(KERN_ERR "No uart defined\n");
+ goto cleanup;
+ }
+
+ info->irq = irq;
+ err = request_irq(irq, brf6150_interrupt, 0, "brf6150", (void *)info);
+ if (err < 0) {
+ printk(KERN_ERR "brf6150: unable to get IRQ %d\n", irq);
+ goto cleanup;
+ }
+
+ err = request_irq(OMAP_GPIO_IRQ(info->btinfo->host_wakeup_gpio),
+ brf6150_wakeup_interrupt, 0, "brf6150_wkup", (void *)info);
+ if (err < 0) {
+ printk(KERN_ERR "brf6150: unable to get wakeup IRQ %d\n",
+ OMAP_GPIO_IRQ(info->btinfo->host_wakeup_gpio));
+ free_irq(irq, (void *)info);
+ goto cleanup;
+ }
+
+ /* Register with LDM */
+ if (platform_device_register(&brf6150_device)) {
+ printk(KERN_ERR "failed to register brf6150 device\n");
+ err = -ENODEV;
+ goto cleanup_irq;
+ }
+ /* Register the driver with LDM */
+ if (driver_register(&brf6150_driver)) {
+ printk(KERN_WARNING "failed to register brf6150 driver\n");
+ platform_device_unregister(&brf6150_device);
+ err = -ENODEV;
+ goto cleanup_irq;
+ }
+
+ if (brf6150_register_hdev(info) < 0) {
+ printk(KERN_WARNING "failed to register brf6150 hci device\n");
+ platform_device_unregister(&brf6150_device);
+ driver_unregister(&brf6150_driver);
+ goto cleanup_irq;
+ }
+
+ exit_info = info;
+ return 0;
+
+cleanup_irq:
+ free_irq(irq, (void *)info);
+ free_irq(OMAP_GPIO_IRQ(info->btinfo->host_wakeup_gpio), (void *)info);
+cleanup:
+ omap_free_gpio(info->btinfo->reset_gpio);
+ omap_free_gpio(info->btinfo->bt_wakeup_gpio);
+ omap_free_gpio(info->btinfo->host_wakeup_gpio);
+ kfree(info);
+
+ return err;
+}
+
+static void __exit brf6150_exit(void)
+{
+ brf6150_hci_close(exit_info->hdev);
+ hci_free_dev(exit_info->hdev);
+ omap_free_gpio(exit_info->btinfo->reset_gpio);
+ omap_free_gpio(exit_info->btinfo->bt_wakeup_gpio);
+ omap_free_gpio(exit_info->btinfo->host_wakeup_gpio);
+ free_irq(exit_info->irq, (void *)exit_info);
+ free_irq(OMAP_GPIO_IRQ(exit_info->btinfo->host_wakeup_gpio), (void *)exit_info);
+ kfree(exit_info);
+}
+
+module_init(brf6150_init);
+module_exit(brf6150_exit);
+
+MODULE_DESCRIPTION("brf6150 hci driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ville Tervo <ville.tervo@nokia.com>");
--- /dev/null
- set_irq_type(OMAP_GPIO_IRQ(retu_irq_pin), IRQT_RISING);
+/**
+ * drivers/cbus/retu.c
+ *
+ * Support functions for Retu ASIC
+ *
+ * Copyright (C) 2004, 2005 Nokia Corporation
+ *
+ * Written by Juha Yrjölä <juha.yrjola@nokia.com>,
+ * David Weinehall <david.weinehall@nokia.com>, and
+ * Mikko Ylinen <mikko.k.ylinen@nokia.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of this
+ * archive for more details.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/poll.h>
+#include <linux/fs.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+
+#include <asm/uaccess.h>
+
+#include <asm/arch/mux.h>
+#include <asm/arch/gpio.h>
+#include <asm/arch/board.h>
+
+#include "cbus.h"
+#include "retu.h"
+
+#define RETU_ID 0x01
+#define PFX "retu: "
+
+static int retu_initialized;
+static int retu_irq_pin;
+static int retu_is_vilma;
+
+static struct tasklet_struct retu_tasklet;
+spinlock_t retu_lock = SPIN_LOCK_UNLOCKED;
+
+static struct completion device_release;
+
+struct retu_irq_handler_desc {
+ int (*func)(unsigned long);
+ unsigned long arg;
+ char name[8];
+};
+
+static struct retu_irq_handler_desc retu_irq_handlers[MAX_RETU_IRQ_HANDLERS];
+
+/**
+ * retu_read_reg - Read a value from a register in Retu
+ * @reg: the register to read from
+ *
+ * This function returns the contents of the specified register
+ */
+int retu_read_reg(int reg)
+{
+ BUG_ON(!retu_initialized);
+ return cbus_read_reg(cbus_host, RETU_ID, reg);
+}
+
+/**
+ * retu_write_reg - Write a value to a register in Retu
+ * @reg: the register to write to
+ * @reg: the value to write to the register
+ *
+ * This function writes a value to the specified register
+ */
+void retu_write_reg(int reg, u16 val)
+{
+ BUG_ON(!retu_initialized);
+ cbus_write_reg(cbus_host, RETU_ID, reg, val);
+}
+
+void retu_set_clear_reg_bits(int reg, u16 set, u16 clear)
+{
+ unsigned long flags;
+ u16 w;
+
+ spin_lock_irqsave(&retu_lock, flags);
+ w = retu_read_reg(reg);
+ w &= ~clear;
+ w |= set;
+ retu_write_reg(reg, w);
+ spin_unlock_irqrestore(&retu_lock, flags);
+}
+
+#define ADC_MAX_CHAN_NUMBER 13
+
+int retu_read_adc(int channel)
+{
+ unsigned long flags;
+ int res;
+
+ if (channel < 0 || channel > ADC_MAX_CHAN_NUMBER)
+ return -EINVAL;
+
+ spin_lock_irqsave(&retu_lock, flags);
+
+ if ((channel == 8) && retu_is_vilma) {
+ int scr = retu_read_reg(RETU_REG_ADCSCR);
+ int ch = (retu_read_reg(RETU_REG_ADCR) >> 10) & 0xf;
+ if (((scr & 0xff) != 0) && (ch != 8))
+ retu_write_reg (RETU_REG_ADCSCR, (scr & ~0xff));
+ }
+
+ /* Select the channel and read result */
+ retu_write_reg(RETU_REG_ADCR, channel << 10);
+ res = retu_read_reg(RETU_REG_ADCR) & 0x3ff;
+
+ if (retu_is_vilma)
+ retu_write_reg(RETU_REG_ADCR, (1 << 13));
+
+ /* Unlock retu */
+ spin_unlock_irqrestore(&retu_lock, flags);
+
+ return res;
+}
+
+
+static u16 retu_disable_bogus_irqs(u16 mask)
+{
+ int i;
+
+ for (i = 0; i < MAX_RETU_IRQ_HANDLERS; i++) {
+ if (mask & (1 << i))
+ continue;
+ if (retu_irq_handlers[i].func != NULL)
+ continue;
+ /* an IRQ was enabled but we don't have a handler for it */
+ printk(KERN_INFO PFX "disabling bogus IRQ %d\n", i);
+ mask |= (1 << i);
+ }
+ return mask;
+}
+
+/*
+ * Disable given RETU interrupt
+ */
+void retu_disable_irq(int id)
+{
+ unsigned long flags;
+ u16 mask;
+
+ spin_lock_irqsave(&retu_lock, flags);
+ mask = retu_read_reg(RETU_REG_IMR);
+ mask |= 1 << id;
+ mask = retu_disable_bogus_irqs(mask);
+ retu_write_reg(RETU_REG_IMR, mask);
+ spin_unlock_irqrestore(&retu_lock, flags);
+}
+
+/*
+ * Enable given RETU interrupt
+ */
+void retu_enable_irq(int id)
+{
+ unsigned long flags;
+ u16 mask;
+
+ if (id == 3) {
+ printk("Enabling Retu IRQ %d\n", id);
+ dump_stack();
+ }
+ spin_lock_irqsave(&retu_lock, flags);
+ mask = retu_read_reg(RETU_REG_IMR);
+ mask &= ~(1 << id);
+ mask = retu_disable_bogus_irqs(mask);
+ retu_write_reg(RETU_REG_IMR, mask);
+ spin_unlock_irqrestore(&retu_lock, flags);
+}
+
+/*
+ * Acknowledge given RETU interrupt
+ */
+void retu_ack_irq(int id)
+{
+ retu_write_reg(RETU_REG_IDR, 1 << id);
+}
+
+/*
+ * RETU interrupt handler. Only schedules the tasklet.
+ */
+static irqreturn_t retu_irq_handler(int irq, void *dev_id)
+{
+ tasklet_schedule(&retu_tasklet);
+ return IRQ_HANDLED;
+}
+
+/*
+ * Tasklet handler
+ */
+static void retu_tasklet_handler(unsigned long data)
+{
+ struct retu_irq_handler_desc *hnd;
+ u16 id;
+ u16 im;
+ int i;
+
+ for (;;) {
+ id = retu_read_reg(RETU_REG_IDR);
+ im = ~retu_read_reg(RETU_REG_IMR);
+ id &= im;
+
+ if (!id)
+ break;
+
+ for (i = 0; id != 0; i++, id >>= 1) {
+ if (!(id & 1))
+ continue;
+ hnd = &retu_irq_handlers[i];
+ if (hnd->func == NULL) {
+ /* Spurious retu interrupt - disable and ack it */
+ printk(KERN_INFO "Spurious Retu interrupt "
+ "(id %d)\n", i);
+ retu_disable_irq(i);
+ retu_ack_irq(i);
+ continue;
+ }
+ hnd->func(hnd->arg);
+ /*
+ * Don't acknowledge the interrupt here
+ * It must be done explicitly
+ */
+ }
+ }
+}
+
+/*
+ * Register the handler for a given RETU interrupt source.
+ */
+int retu_request_irq(int id, void *irq_handler, unsigned long arg, char *name)
+{
+ struct retu_irq_handler_desc *hnd;
+
+ if (irq_handler == NULL || id >= MAX_RETU_IRQ_HANDLERS ||
+ name == NULL) {
+ printk(KERN_ERR PFX "Invalid arguments to %s\n",
+ __FUNCTION__);
+ return -EINVAL;
+ }
+ hnd = &retu_irq_handlers[id];
+ if (hnd->func != NULL) {
+ printk(KERN_ERR PFX "IRQ %d already reserved\n", id);
+ return -EBUSY;
+ }
+ printk(KERN_INFO PFX "Registering interrupt %d for device %s\n",
+ id, name);
+ hnd->func = irq_handler;
+ hnd->arg = arg;
+ strlcpy(hnd->name, name, sizeof(hnd->name));
+
+ retu_ack_irq(id);
+ retu_enable_irq(id);
+
+ return 0;
+}
+
+/*
+ * Unregister the handler for a given RETU interrupt source.
+ */
+void retu_free_irq(int id)
+{
+ struct retu_irq_handler_desc *hnd;
+
+ if (id >= MAX_RETU_IRQ_HANDLERS) {
+ printk(KERN_ERR PFX "Invalid argument to %s\n",
+ __FUNCTION__);
+ return;
+ }
+ hnd = &retu_irq_handlers[id];
+ if (hnd->func == NULL) {
+ printk(KERN_ERR PFX "IRQ %d already freed\n", id);
+ return;
+ }
+
+ retu_disable_irq(id);
+ hnd->func = NULL;
+}
+
+/**
+ * retu_power_off - Shut down power to system
+ *
+ * This function puts the system in power off state
+ */
+static void retu_power_off(void)
+{
+ /* Ignore power button state */
+ retu_write_reg(RETU_REG_CC1, retu_read_reg(RETU_REG_CC1) | 2);
+ /* Expire watchdog immediately */
+ retu_write_reg(RETU_REG_WATCHDOG, 0);
+ /* Wait for poweroff*/
+ for (;;);
+}
+
+/**
+ * retu_probe - Probe for Retu ASIC
+ * @dev: the Retu device
+ *
+ * Probe for the Retu ASIC and allocate memory
+ * for its device-struct if found
+ */
+static int __devinit retu_probe(struct device *dev)
+{
+ const struct omap_em_asic_bb5_config * em_asic_config;
+ int rev, ret;
+
+ /* Prepare tasklet */
+ tasklet_init(&retu_tasklet, retu_tasklet_handler, 0);
+
+ em_asic_config = omap_get_config(OMAP_TAG_EM_ASIC_BB5,
+ struct omap_em_asic_bb5_config);
+ if (em_asic_config == NULL) {
+ printk(KERN_ERR PFX "Unable to retrieve config data\n");
+ return -ENODATA;
+ }
+
+ retu_irq_pin = em_asic_config->retu_irq_gpio;
+
+ if ((ret = omap_request_gpio(retu_irq_pin)) < 0) {
+ printk(KERN_ERR PFX "Unable to reserve IRQ GPIO\n");
+ return ret;
+ }
+
+ /* Set the pin as input */
+ omap_set_gpio_direction(retu_irq_pin, 1);
+
+ /* Rising edge triggers the IRQ */
++ set_irq_type(OMAP_GPIO_IRQ(retu_irq_pin), IRQ_TYPE_EDGE_RISING);
+
+ retu_initialized = 1;
+
+ rev = retu_read_reg(RETU_REG_ASICR) & 0xff;
+ if (rev & (1 << 7))
+ retu_is_vilma = 1;
+
+ printk(KERN_INFO "%s v%d.%d found\n", retu_is_vilma ? "Vilma" : "Retu",
+ (rev >> 4) & 0x07, rev & 0x0f);
+
+ /* Mask all RETU interrupts */
+ retu_write_reg(RETU_REG_IMR, 0xffff);
+
+ ret = request_irq(OMAP_GPIO_IRQ(retu_irq_pin), retu_irq_handler, 0,
+ "retu", 0);
+ if (ret < 0) {
+ printk(KERN_ERR PFX "Unable to register IRQ handler\n");
+ omap_free_gpio(retu_irq_pin);
+ return ret;
+ }
+ set_irq_wake(OMAP_GPIO_IRQ(retu_irq_pin), 1);
+
+ /* Register power off function */
+ pm_power_off = retu_power_off;
+
+#ifdef CONFIG_CBUS_RETU_USER
+ /* Initialize user-space interface */
+ if (retu_user_init() < 0) {
+ printk(KERN_ERR "Unable to initialize driver\n");
+ free_irq(OMAP_GPIO_IRQ(retu_irq_pin), 0);
+ omap_free_gpio(retu_irq_pin);
+ return ret;
+ }
+#endif
+
+ return 0;
+}
+
+static int retu_remove(struct device *dev)
+{
+#ifdef CONFIG_CBUS_RETU_USER
+ retu_user_cleanup();
+#endif
+ /* Mask all RETU interrupts */
+ retu_write_reg(RETU_REG_IMR, 0xffff);
+ free_irq(OMAP_GPIO_IRQ(retu_irq_pin), 0);
+ omap_free_gpio(retu_irq_pin);
+ tasklet_kill(&retu_tasklet);
+
+ return 0;
+}
+
+static void retu_device_release(struct device *dev)
+{
+ complete(&device_release);
+}
+
+static struct device_driver retu_driver = {
+ .name = "retu",
+ .bus = &platform_bus_type,
+ .probe = retu_probe,
+ .remove = retu_remove,
+};
+
+static struct platform_device retu_device = {
+ .name = "retu",
+ .id = -1,
+ .dev = {
+ .release = retu_device_release,
+ }
+};
+
+/**
+ * retu_init - initialise Retu driver
+ *
+ * Initialise the Retu driver and return 0 if everything worked ok
+ */
+static int __init retu_init(void)
+{
+ int ret = 0;
+
+ printk(KERN_INFO "Retu/Vilma driver initialising\n");
+
+ init_completion(&device_release);
+
+ if ((ret = driver_register(&retu_driver)) < 0)
+ return ret;
+
+ if ((ret = platform_device_register(&retu_device)) < 0) {
+ driver_unregister(&retu_driver);
+ return ret;
+ }
+ return 0;
+}
+
+/*
+ * Cleanup
+ */
+static void __exit retu_exit(void)
+{
+ platform_device_unregister(&retu_device);
+ driver_unregister(&retu_driver);
+ wait_for_completion(&device_release);
+}
+
+EXPORT_SYMBOL(retu_request_irq);
+EXPORT_SYMBOL(retu_free_irq);
+EXPORT_SYMBOL(retu_enable_irq);
+EXPORT_SYMBOL(retu_disable_irq);
+EXPORT_SYMBOL(retu_ack_irq);
+EXPORT_SYMBOL(retu_read_reg);
+EXPORT_SYMBOL(retu_write_reg);
+
+subsys_initcall(retu_init);
+module_exit(retu_exit);
+
+MODULE_DESCRIPTION("Retu ASIC control");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Juha Yrjölä, David Weinehall, and Mikko Ylinen");
--- /dev/null
- set_irq_type(OMAP_GPIO_IRQ(tahvo_irq_pin), IRQT_RISING);
+/**
+ * drivers/cbus/tahvo.c
+ *
+ * Support functions for Tahvo ASIC
+ *
+ * Copyright (C) 2004, 2005 Nokia Corporation
+ *
+ * Written by Juha Yrjölä <juha.yrjola@nokia.com>,
+ * David Weinehall <david.weinehall@nokia.com>, and
+ * Mikko Ylinen <mikko.k.ylinen@nokia.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of this
+ * archive for more details.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/poll.h>
+#include <linux/fs.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+
+#include <asm/uaccess.h>
+
+#include <asm/arch/mux.h>
+#include <asm/arch/gpio.h>
+#include <asm/arch/board.h>
+
+#include "cbus.h"
+#include "tahvo.h"
+
+#define TAHVO_ID 0x02
+#define PFX "tahvo: "
+
+static int tahvo_initialized;
+static int tahvo_irq_pin;
+static int tahvo_is_betty;
+
+static struct tasklet_struct tahvo_tasklet;
+spinlock_t tahvo_lock = SPIN_LOCK_UNLOCKED;
+
+static struct completion device_release;
+
+struct tahvo_irq_handler_desc {
+ int (*func)(unsigned long);
+ unsigned long arg;
+ char name[8];
+};
+
+static struct tahvo_irq_handler_desc tahvo_irq_handlers[MAX_TAHVO_IRQ_HANDLERS];
+
+/**
+ * tahvo_read_reg - Read a value from a register in Tahvo
+ * @reg: the register to read from
+ *
+ * This function returns the contents of the specified register
+ */
+int tahvo_read_reg(int reg)
+{
+ BUG_ON(!tahvo_initialized);
+ return cbus_read_reg(cbus_host, TAHVO_ID, reg);
+}
+
+/**
+ * tahvo_write_reg - Write a value to a register in Tahvo
+ * @reg: the register to write to
+ * @reg: the value to write to the register
+ *
+ * This function writes a value to the specified register
+ */
+void tahvo_write_reg(int reg, u16 val)
+{
+ BUG_ON(!tahvo_initialized);
+ cbus_write_reg(cbus_host, TAHVO_ID, reg, val);
+}
+
+/**
+ * tahvo_set_clear_reg_bits - set and clear register bits atomically
+ * @reg: the register to write to
+ * @bits: the bits to set
+ *
+ * This function sets and clears the specified Tahvo register bits atomically
+ */
+void tahvo_set_clear_reg_bits(int reg, u16 set, u16 clear)
+{
+ unsigned long flags;
+ u16 w;
+
+ spin_lock_irqsave(&tahvo_lock, flags);
+ w = tahvo_read_reg(reg);
+ w &= ~clear;
+ w |= set;
+ tahvo_write_reg(reg, w);
+ spin_unlock_irqrestore(&tahvo_lock, flags);
+}
+
+/*
+ * Disable given TAHVO interrupt
+ */
+void tahvo_disable_irq(int id)
+{
+ unsigned long flags;
+ u16 mask;
+
+ spin_lock_irqsave(&tahvo_lock, flags);
+ mask = tahvo_read_reg(TAHVO_REG_IMR);
+ mask |= 1 << id;
+ tahvo_write_reg(TAHVO_REG_IMR, mask);
+ spin_unlock_irqrestore(&tahvo_lock, flags);
+}
+
+/*
+ * Enable given TAHVO interrupt
+ */
+void tahvo_enable_irq(int id)
+{
+ unsigned long flags;
+ u16 mask;
+
+ spin_lock_irqsave(&tahvo_lock, flags);
+ mask = tahvo_read_reg(TAHVO_REG_IMR);
+ mask &= ~(1 << id);
+ tahvo_write_reg(TAHVO_REG_IMR, mask);
+ spin_unlock_irqrestore(&tahvo_lock, flags);
+}
+
+/*
+ * Acknowledge given TAHVO interrupt
+ */
+void tahvo_ack_irq(int id)
+{
+ tahvo_write_reg(TAHVO_REG_IDR, 1 << id);
+}
+
+static int tahvo_7bit_backlight;
+
+int tahvo_get_backlight_level(void)
+{
+ int mask;
+
+ if (tahvo_7bit_backlight)
+ mask = 0x7f;
+ else
+ mask = 0x0f;
+ return tahvo_read_reg(TAHVO_REG_LEDPWMR) & mask;
+}
+
+int tahvo_get_max_backlight_level(void)
+{
+ if (tahvo_7bit_backlight)
+ return 0x7f;
+ else
+ return 0x0f;
+}
+
+void tahvo_set_backlight_level(int level)
+{
+ int max_level;
+
+ max_level = tahvo_get_max_backlight_level();
+ if (level > max_level)
+ level = max_level;
+ tahvo_write_reg(TAHVO_REG_LEDPWMR, level);
+}
+
+/*
+ * TAHVO interrupt handler. Only schedules the tasklet.
+ */
+static irqreturn_t tahvo_irq_handler(int irq, void *dev_id)
+{
+ tasklet_schedule(&tahvo_tasklet);
+ return IRQ_HANDLED;
+}
+
+/*
+ * Tasklet handler
+ */
+static void tahvo_tasklet_handler(unsigned long data)
+{
+ struct tahvo_irq_handler_desc *hnd;
+ u16 id;
+ u16 im;
+ int i;
+
+ for (;;) {
+ id = tahvo_read_reg(TAHVO_REG_IDR);
+ im = ~tahvo_read_reg(TAHVO_REG_IMR);
+ id &= im;
+
+ if (!id)
+ break;
+
+ for (i = 0; id != 0; i++, id >>= 1) {
+ if (!(id & 1))
+ continue;
+ hnd = &tahvo_irq_handlers[i];
+ if (hnd->func == NULL) {
+ /* Spurious tahvo interrupt - just ack it */
+ printk(KERN_INFO "Spurious Tahvo interrupt "
+ "(id %d)\n", i);
+ tahvo_disable_irq(i);
+ tahvo_ack_irq(i);
+ continue;
+ }
+ hnd->func(hnd->arg);
+ /*
+ * Don't acknowledge the interrupt here
+ * It must be done explicitly
+ */
+ }
+ }
+}
+
+/*
+ * Register the handler for a given TAHVO interrupt source.
+ */
+int tahvo_request_irq(int id, void *irq_handler, unsigned long arg, char *name)
+{
+ struct tahvo_irq_handler_desc *hnd;
+
+ if (irq_handler == NULL || id >= MAX_TAHVO_IRQ_HANDLERS ||
+ name == NULL) {
+ printk(KERN_ERR PFX "Invalid arguments to %s\n",
+ __FUNCTION__);
+ return -EINVAL;
+ }
+ hnd = &tahvo_irq_handlers[id];
+ if (hnd->func != NULL) {
+ printk(KERN_ERR PFX "IRQ %d already reserved\n", id);
+ return -EBUSY;
+ }
+ printk(KERN_INFO PFX "Registering interrupt %d for device %s\n",
+ id, name);
+ hnd->func = irq_handler;
+ hnd->arg = arg;
+ strlcpy(hnd->name, name, sizeof(hnd->name));
+
+ tahvo_ack_irq(id);
+ tahvo_enable_irq(id);
+
+ return 0;
+}
+
+/*
+ * Unregister the handler for a given TAHVO interrupt source.
+ */
+void tahvo_free_irq(int id)
+{
+ struct tahvo_irq_handler_desc *hnd;
+
+ if (id >= MAX_TAHVO_IRQ_HANDLERS) {
+ printk(KERN_ERR PFX "Invalid argument to %s\n",
+ __FUNCTION__);
+ return;
+ }
+ hnd = &tahvo_irq_handlers[id];
+ if (hnd->func == NULL) {
+ printk(KERN_ERR PFX "IRQ %d already freed\n", id);
+ return;
+ }
+
+ tahvo_disable_irq(id);
+ hnd->func = NULL;
+}
+
+/**
+ * tahvo_probe - Probe for Tahvo ASIC
+ * @dev: the Tahvo device
+ *
+ * Probe for the Tahvo ASIC and allocate memory
+ * for its device-struct if found
+ */
+static int __devinit tahvo_probe(struct device *dev)
+{
+ const struct omap_em_asic_bb5_config * em_asic_config;
+ int rev, id, ret;
+
+ /* Prepare tasklet */
+ tasklet_init(&tahvo_tasklet, tahvo_tasklet_handler, 0);
+
+ em_asic_config = omap_get_config(OMAP_TAG_EM_ASIC_BB5,
+ struct omap_em_asic_bb5_config);
+ if (em_asic_config == NULL) {
+ printk(KERN_ERR PFX "Unable to retrieve config data\n");
+ return -ENODATA;
+ }
+
+ tahvo_initialized = 1;
+
+ rev = tahvo_read_reg(TAHVO_REG_ASICR);
+
+ id = (rev >> 8) & 0xff;
+ if (id == 0x03) {
+ if ((rev & 0xff) >= 0x50)
+ tahvo_7bit_backlight = 1;
+ } else if (id == 0x0b) {
+ tahvo_is_betty = 1;
+ tahvo_7bit_backlight = 1;
+ } else {
+ printk(KERN_ERR "Tahvo/Betty chip not found");
+ return -ENODEV;
+ }
+
+ printk(KERN_INFO "%s v%d.%d found\n", tahvo_is_betty ? "Betty" : "Tahvo",
+ (rev >> 4) & 0x0f, rev & 0x0f);
+
+ tahvo_irq_pin = em_asic_config->tahvo_irq_gpio;
+
+ if ((ret = omap_request_gpio(tahvo_irq_pin)) < 0) {
+ printk(KERN_ERR PFX "Unable to reserve IRQ GPIO\n");
+ return ret;
+ }
+
+ /* Set the pin as input */
+ omap_set_gpio_direction(tahvo_irq_pin, 1);
+
+ /* Rising edge triggers the IRQ */
++ set_irq_type(OMAP_GPIO_IRQ(tahvo_irq_pin), IRQ_TYPE_EDGE_RISING);
+
+ /* Mask all TAHVO interrupts */
+ tahvo_write_reg(TAHVO_REG_IMR, 0xffff);
+
+ ret = request_irq(OMAP_GPIO_IRQ(tahvo_irq_pin), tahvo_irq_handler, 0,
+ "tahvo", 0);
+ if (ret < 0) {
+ printk(KERN_ERR PFX "Unable to register IRQ handler\n");
+ omap_free_gpio(tahvo_irq_pin);
+ return ret;
+ }
+#ifdef CONFIG_CBUS_TAHVO_USER
+ /* Initialize user-space interface */
+ if (tahvo_user_init() < 0) {
+ printk(KERN_ERR "Unable to initialize driver\n");
+ free_irq(OMAP_GPIO_IRQ(tahvo_irq_pin), 0);
+ omap_free_gpio(tahvo_irq_pin);
+ return ret;
+ }
+#endif
+ return 0;
+}
+
+static int tahvo_remove(struct device *dev)
+{
+#ifdef CONFIG_CBUS_TAHVO_USER
+ tahvo_user_cleanup();
+#endif
+ /* Mask all TAHVO interrupts */
+ tahvo_write_reg(TAHVO_REG_IMR, 0xffff);
+ free_irq(OMAP_GPIO_IRQ(tahvo_irq_pin), 0);
+ omap_free_gpio(tahvo_irq_pin);
+ tasklet_kill(&tahvo_tasklet);
+
+ return 0;
+}
+
+static void tahvo_device_release(struct device *dev)
+{
+ complete(&device_release);
+}
+
+static struct device_driver tahvo_driver = {
+ .name = "tahvo",
+ .bus = &platform_bus_type,
+ .probe = tahvo_probe,
+ .remove = tahvo_remove,
+};
+
+static struct platform_device tahvo_device = {
+ .name = "tahvo",
+ .id = -1,
+ .dev = {
+ .release = tahvo_device_release,
+ }
+};
+
+/**
+ * tahvo_init - initialise Tahvo driver
+ *
+ * Initialise the Tahvo driver and return 0 if everything worked ok
+ */
+static int __init tahvo_init(void)
+{
+ int ret = 0;
+
+ printk(KERN_INFO "Tahvo/Betty driver initialising\n");
+
+ init_completion(&device_release);
+
+ if ((ret = driver_register(&tahvo_driver)) < 0)
+ return ret;
+
+ if ((ret = platform_device_register(&tahvo_device)) < 0) {
+ driver_unregister(&tahvo_driver);
+ return ret;
+ }
+ return 0;
+}
+
+/*
+ * Cleanup
+ */
+static void __exit tahvo_exit(void)
+{
+ platform_device_unregister(&tahvo_device);
+ driver_unregister(&tahvo_driver);
+ wait_for_completion(&device_release);
+}
+
+EXPORT_SYMBOL(tahvo_request_irq);
+EXPORT_SYMBOL(tahvo_free_irq);
+EXPORT_SYMBOL(tahvo_enable_irq);
+EXPORT_SYMBOL(tahvo_disable_irq);
+EXPORT_SYMBOL(tahvo_ack_irq);
+EXPORT_SYMBOL(tahvo_read_reg);
+EXPORT_SYMBOL(tahvo_write_reg);
+EXPORT_SYMBOL(tahvo_get_backlight_level);
+EXPORT_SYMBOL(tahvo_get_max_backlight_level);
+EXPORT_SYMBOL(tahvo_set_backlight_level);
+
+subsys_initcall(tahvo_init);
+module_exit(tahvo_exit);
+
+MODULE_DESCRIPTION("Tahvo ASIC control");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Juha Yrjölä, David Weinehall, and Mikko Ylinen");
obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
+obj-$(CONFIG_OMAP_SHA1_MD5) += omap-sha1-md5.o
obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
+ obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
+ obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
--- /dev/null
- if (find_task_by_pid(pl->pid) != NULL)
+/*
+ * This file is part of OMAP DSP driver (DSP Gateway version 3.3.1)
+ *
+ * Copyright (C) 2002-2006 Nokia Corporation. All rights reserved.
+ *
+ * Contact: Toshihiro Kobayashi <toshihiro.kobayashi@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/major.h>
+#include <linux/fs.h>
+#include <linux/poll.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+#include <linux/kfifo.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/arch/mailbox.h>
+#include <asm/arch/dsp.h>
+#include "uaccess_dsp.h"
+#include "dsp_mbcmd.h"
+#include "dsp.h"
+#include "ipbuf.h"
+#include "proclist.h"
+
+/*
+ * devstate: task device state machine
+ * NOTASK: task is not attached.
+ * ATTACHED: task is attached.
+ * GARBAGE: task is detached. waiting for all processes to close this device.
+ * ADDREQ: requesting for tadd
+ * DELREQ: requesting for tdel. no process is opening this device.
+ * FREEZED: task is attached, but reserved to be killed.
+ * ADDFAIL: tadd failed.
+ * ADDING: tadd in process.
+ * DELING: tdel in process.
+ * KILLING: tkill in process.
+ */
+#define TASKDEV_ST_NOTASK 0x00000001
+#define TASKDEV_ST_ATTACHED 0x00000002
+#define TASKDEV_ST_GARBAGE 0x00000004
+#define TASKDEV_ST_INVALID 0x00000008
+#define TASKDEV_ST_ADDREQ 0x00000100
+#define TASKDEV_ST_DELREQ 0x00000200
+#define TASKDEV_ST_FREEZED 0x00000400
+#define TASKDEV_ST_ADDFAIL 0x00001000
+#define TASKDEV_ST_ADDING 0x00010000
+#define TASKDEV_ST_DELING 0x00020000
+#define TASKDEV_ST_KILLING 0x00040000
+#define TASKDEV_ST_STATE_MASK 0x7fffffff
+#define TASKDEV_ST_STALE 0x80000000
+
+static struct {
+ long state;
+ char *name;
+} devstate_desc[] = {
+ { TASKDEV_ST_NOTASK, "notask" },
+ { TASKDEV_ST_ATTACHED, "attached" },
+ { TASKDEV_ST_GARBAGE, "garbage" },
+ { TASKDEV_ST_INVALID, "invalid" },
+ { TASKDEV_ST_ADDREQ, "addreq" },
+ { TASKDEV_ST_DELREQ, "delreq" },
+ { TASKDEV_ST_FREEZED, "freezed" },
+ { TASKDEV_ST_ADDFAIL, "addfail" },
+ { TASKDEV_ST_ADDING, "adding" },
+ { TASKDEV_ST_DELING, "deling" },
+ { TASKDEV_ST_KILLING, "killing" },
+};
+
+static char *devstate_name(long state)
+{
+ int i;
+ int max = ARRAY_SIZE(devstate_desc);
+
+ for (i = 0; i < max; i++) {
+ if (state & devstate_desc[i].state)
+ return devstate_desc[i].name;
+ }
+ return "unknown";
+}
+
+struct rcvdt_bk_struct {
+ struct ipblink link;
+ unsigned int rp;
+};
+
+struct taskdev {
+ struct bus_type *bus;
+ struct device dev; /* Generic device interface */
+
+ long state;
+ struct rw_semaphore state_sem;
+ wait_queue_head_t state_wait_q;
+ struct mutex usecount_lock;
+ unsigned int usecount;
+ char name[TNM_LEN];
+ struct file_operations fops;
+ spinlock_t proc_list_lock;
+ struct list_head proc_list;
+ struct dsptask *task;
+
+ /* read stuff */
+ wait_queue_head_t read_wait_q;
+ struct mutex read_mutex;
+ spinlock_t read_lock;
+ union {
+ struct kfifo *fifo; /* for active word */
+ struct rcvdt_bk_struct bk;
+ } rcvdt;
+
+ /* write stuff */
+ wait_queue_head_t write_wait_q;
+ struct mutex write_mutex;
+ spinlock_t wsz_lock;
+ size_t wsz;
+
+ /* tctl stuff */
+ wait_queue_head_t tctl_wait_q;
+ struct mutex tctl_mutex;
+ int tctl_stat;
+ int tctl_ret; /* return value for tctl_show() */
+
+ /* device lock */
+ struct mutex lock;
+ pid_t lock_pid;
+};
+
+#define to_taskdev(n) container_of(n, struct taskdev, dev)
+
+struct dsptask {
+ enum {
+ TASK_ST_ERR = 0,
+ TASK_ST_READY,
+ TASK_ST_CFGREQ
+ } state;
+ u8 tid;
+ char name[TNM_LEN];
+ u16 ttyp;
+ struct taskdev *dev;
+
+ /* read stuff */
+ struct ipbuf_p *ipbuf_pvt_r;
+
+ /* write stuff */
+ struct ipbuf_p *ipbuf_pvt_w;
+
+ /* mmap stuff */
+ void *map_base;
+ size_t map_length;
+};
+
+#define sndtyp_acv(ttyp) ((ttyp) & TTYP_ASND)
+#define sndtyp_psv(ttyp) (!((ttyp) & TTYP_ASND))
+#define sndtyp_bk(ttyp) ((ttyp) & TTYP_BKDM)
+#define sndtyp_wd(ttyp) (!((ttyp) & TTYP_BKDM))
+#define sndtyp_pvt(ttyp) ((ttyp) & TTYP_PVDM)
+#define sndtyp_gbl(ttyp) (!((ttyp) & TTYP_PVDM))
+#define rcvtyp_acv(ttyp) ((ttyp) & TTYP_ARCV)
+#define rcvtyp_psv(ttyp) (!((ttyp) & TTYP_ARCV))
+#define rcvtyp_bk(ttyp) ((ttyp) & TTYP_BKMD)
+#define rcvtyp_wd(ttyp) (!((ttyp) & TTYP_BKMD))
+#define rcvtyp_pvt(ttyp) ((ttyp) & TTYP_PVMD)
+#define rcvtyp_gbl(ttyp) (!((ttyp) & TTYP_PVMD))
+
+static inline int has_taskdev_lock(struct taskdev *dev);
+static int dsp_rmdev_minor(unsigned char minor);
+static int taskdev_init(struct taskdev *dev, char *name, unsigned char minor);
+static void taskdev_delete(unsigned char minor);
+static int taskdev_attach_task(struct taskdev *dev, struct dsptask *task);
+static int dsp_tdel_bh(struct taskdev *dev, u16 type);
+
+static struct bus_type dsptask_bus = {
+ .name = "dsptask",
+};
+
+static struct class *dsp_task_class;
+static DEFINE_MUTEX(devmgr_lock);
+static struct taskdev *taskdev[TASKDEV_MAX];
+static struct dsptask *dsptask[TASKDEV_MAX];
+static DEFINE_MUTEX(cfg_lock);
+static u16 cfg_cmd;
+static u8 cfg_tid;
+static DECLARE_WAIT_QUEUE_HEAD(cfg_wait_q);
+static u8 n_task; /* static task count */
+static void *heap;
+
+#define is_dynamic_task(tid) ((tid) >= n_task)
+
+#define devstate_read_lock(dev, devstate) \
+ devstate_read_lock_timeout(dev, devstate, 0)
+#define devstate_read_unlock(dev) up_read(&(dev)->state_sem)
+#define devstate_write_lock(dev, devstate) \
+ devstate_write_lock_timeout(dev, devstate, 0)
+#define devstate_write_unlock(dev) up_write(&(dev)->state_sem)
+
+static ssize_t devname_show(struct device *d, struct device_attribute *attr,
+ char *buf);
+static ssize_t devstate_show(struct device *d, struct device_attribute *attr,
+ char *buf);
+static ssize_t proc_list_show(struct device *d, struct device_attribute *attr,
+ char *buf);
+static ssize_t taskname_show(struct device *d, struct device_attribute *attr,
+ char *buf);
+static ssize_t ttyp_show(struct device *d, struct device_attribute *attr,
+ char *buf);
+static ssize_t fifosz_show(struct device *d, struct device_attribute *attr,
+ char *buf);
+static int fifosz_store(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t count);
+static ssize_t fifocnt_show(struct device *d, struct device_attribute *attr,
+ char *buf);
+static ssize_t ipblink_show(struct device *d, struct device_attribute *attr,
+ char *buf);
+static ssize_t wsz_show(struct device *d, struct device_attribute *attr,
+ char *buf);
+static ssize_t mmap_show(struct device *d, struct device_attribute *attr,
+ char *buf);
+
+#define __ATTR_RW(_name,_mode) { \
+ .attr = {.name = __stringify(_name), .mode = _mode, .owner = THIS_MODULE }, \
+ .show = _name##_show, \
+ .store = _name##_store, \
+}
+
+static struct device_attribute dev_attr_devname = __ATTR_RO(devname);
+static struct device_attribute dev_attr_devstate = __ATTR_RO(devstate);
+static struct device_attribute dev_attr_proc_list = __ATTR_RO(proc_list);
+static struct device_attribute dev_attr_taskname = __ATTR_RO(taskname);
+static struct device_attribute dev_attr_ttyp = __ATTR_RO(ttyp);
+static struct device_attribute dev_attr_fifosz = __ATTR_RW(fifosz, 0666);
+static struct device_attribute dev_attr_fifocnt = __ATTR_RO(fifocnt);
+static struct device_attribute dev_attr_ipblink = __ATTR_RO(ipblink);
+static struct device_attribute dev_attr_wsz = __ATTR_RO(wsz);
+static struct device_attribute dev_attr_mmap = __ATTR_RO(mmap);
+
+static inline void set_taskdev_state(struct taskdev *dev, int state)
+{
+ pr_debug("omapdsp: devstate: CHANGE %s[%d]:\"%s\"->\"%s\"\n",
+ dev->name,
+ (dev->task ? dev->task->tid : -1),
+ devstate_name(dev->state),
+ devstate_name(state));
+ dev->state = state;
+}
+
+/*
+ * devstate_read_lock_timeout()
+ * devstate_write_lock_timeout():
+ * timeout != 0: dev->state can be diffeent from what you want.
+ * timeout == 0: no timeout
+ */
+#define BUILD_DEVSTATE_LOCK_TIMEOUT(rw) \
+static int devstate_##rw##_lock_timeout(struct taskdev *dev, long devstate, \
+ int timeout) \
+{ \
+ DEFINE_WAIT(wait); \
+ down_##rw(&dev->state_sem); \
+ while (!(dev->state & devstate)) { \
+ up_##rw(&dev->state_sem); \
+ prepare_to_wait(&dev->state_wait_q, &wait, TASK_INTERRUPTIBLE); \
+ if (!timeout) \
+ timeout = MAX_SCHEDULE_TIMEOUT; \
+ timeout = schedule_timeout(timeout); \
+ finish_wait(&dev->state_wait_q, &wait); \
+ if (timeout == 0) \
+ return -ETIME; \
+ if (signal_pending(current)) \
+ return -EINTR; \
+ down_##rw(&dev->state_sem); \
+ } \
+ return 0; \
+}
+BUILD_DEVSTATE_LOCK_TIMEOUT(read)
+BUILD_DEVSTATE_LOCK_TIMEOUT(write)
+
+#define BUILD_DEVSTATE_LOCK_AND_TEST(rw) \
+static int devstate_##rw##_lock_and_test(struct taskdev *dev, long devstate) \
+{ \
+ down_##rw(&dev->state_sem); \
+ if (dev->state & devstate) \
+ return 1; /* success */ \
+ /* failure */ \
+ up_##rw(&dev->state_sem); \
+ return 0; \
+}
+BUILD_DEVSTATE_LOCK_AND_TEST(read)
+BUILD_DEVSTATE_LOCK_AND_TEST(write)
+
+static int taskdev_lock_interruptible(struct taskdev *dev,
+ struct mutex *lock)
+{
+ int ret;
+
+ if (has_taskdev_lock(dev))
+ ret = mutex_lock_interruptible(lock);
+ else {
+ if ((ret = mutex_lock_interruptible(&dev->lock)) != 0)
+ return ret;
+ ret = mutex_lock_interruptible(lock);
+ mutex_unlock(&dev->lock);
+ }
+
+ return ret;
+}
+
+static int taskdev_lock_and_statelock_attached(struct taskdev *dev,
+ struct mutex *lock)
+{
+ int ret;
+
+ if (!devstate_read_lock_and_test(dev, TASKDEV_ST_ATTACHED))
+ return -ENODEV;
+
+ if ((ret = taskdev_lock_interruptible(dev, lock)) != 0)
+ devstate_read_unlock(dev);
+
+ return ret;
+}
+
+static inline void taskdev_unlock_and_stateunlock(struct taskdev *dev,
+ struct mutex *lock)
+{
+ mutex_unlock(lock);
+ devstate_read_unlock(dev);
+}
+
+/*
+ * taskdev_flush_buf()
+ * must be called under state_lock(ATTACHED) and dev->read_mutex.
+ */
+static int taskdev_flush_buf(struct taskdev *dev)
+{
+ u16 ttyp = dev->task->ttyp;
+
+ if (sndtyp_wd(ttyp)) {
+ /* word receiving */
+ kfifo_reset(dev->rcvdt.fifo);
+ } else {
+ /* block receiving */
+ struct rcvdt_bk_struct *rcvdt = &dev->rcvdt.bk;
+
+ if (sndtyp_gbl(ttyp))
+ ipblink_flush(&rcvdt->link);
+ else {
+ ipblink_flush_pvt(&rcvdt->link);
+ release_ipbuf_pvt(dev->task->ipbuf_pvt_r);
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * taskdev_set_fifosz()
+ * must be called under dev->read_mutex.
+ */
+static int taskdev_set_fifosz(struct taskdev *dev, unsigned long sz)
+{
+ u16 ttyp = dev->task->ttyp;
+
+ if (!(sndtyp_wd(ttyp) && sndtyp_acv(ttyp))) {
+ printk(KERN_ERR
+ "omapdsp: buffer size can be changed only for "
+ "active word sending task.\n");
+ return -EINVAL;
+ }
+ if ((sz == 0) || (sz & 1)) {
+ printk(KERN_ERR "omapdsp: illegal buffer size! (%ld)\n"
+ "it must be even and non-zero value.\n", sz);
+ return -EINVAL;
+ }
+
+ if (kfifo_len(dev->rcvdt.fifo)) {
+ printk(KERN_ERR "omapdsp: buffer is not empty!\n");
+ return -EIO;
+ }
+
+ kfifo_free(dev->rcvdt.fifo);
+ dev->rcvdt.fifo = kfifo_alloc(sz, GFP_KERNEL, &dev->read_lock);
+ if (IS_ERR(dev->rcvdt.fifo)) {
+ printk(KERN_ERR
+ "omapdsp: unable to change receive buffer size. "
+ "(%ld bytes for %s)\n", sz, dev->name);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static inline int has_taskdev_lock(struct taskdev *dev)
+{
+ return (dev->lock_pid == current->pid);
+}
+
+static int taskdev_lock(struct taskdev *dev)
+{
+ if (mutex_lock_interruptible(&dev->lock))
+ return -EINTR;
+ dev->lock_pid = current->pid;
+ return 0;
+}
+
+static int taskdev_unlock(struct taskdev *dev)
+{
+ if (!has_taskdev_lock(dev)) {
+ printk(KERN_ERR
+ "omapdsp: an illegal process attempted to "
+ "unlock the dsptask lock!\n");
+ return -EINVAL;
+ }
+ dev->lock_pid = 0;
+ mutex_unlock(&dev->lock);
+ return 0;
+}
+
+static int dsp_task_config(struct dsptask *task, u8 tid)
+{
+ u16 ttyp;
+ int ret;
+
+ task->tid = tid;
+ dsptask[tid] = task;
+
+ /* TCFG request */
+ task->state = TASK_ST_CFGREQ;
+ if (mutex_lock_interruptible(&cfg_lock)) {
+ ret = -EINTR;
+ goto fail_out;
+ }
+ cfg_cmd = MBOX_CMD_DSP_TCFG;
+ mbcompose_send_and_wait(TCFG, tid, 0, &cfg_wait_q);
+ cfg_cmd = 0;
+ mutex_unlock(&cfg_lock);
+
+ if (task->state != TASK_ST_READY) {
+ printk(KERN_ERR "omapdsp: task %d configuration error!\n", tid);
+ ret = -EINVAL;
+ goto fail_out;
+ }
+
+ if (strlen(task->name) <= 1)
+ sprintf(task->name, "%d", tid);
+ pr_info("omapdsp: task %d: name %s\n", tid, task->name);
+
+ ttyp = task->ttyp;
+
+ /*
+ * task info sanity check
+ */
+
+ /* task type check */
+ if (rcvtyp_psv(ttyp) && rcvtyp_pvt(ttyp)) {
+ printk(KERN_ERR "omapdsp: illegal task type(0x%04x), tid=%d\n",
+ tid, ttyp);
+ ret = -EINVAL;
+ goto fail_out;
+ }
+
+ /* private buffer address check */
+ if (sndtyp_pvt(ttyp) &&
+ (ipbuf_p_validate(task->ipbuf_pvt_r, DIR_D2A) < 0)) {
+ ret = -EINVAL;
+ goto fail_out;
+ }
+ if (rcvtyp_pvt(ttyp) &&
+ (ipbuf_p_validate(task->ipbuf_pvt_w, DIR_A2D) < 0)) {
+ ret = -EINVAL;
+ goto fail_out;
+ }
+
+ /* mmap buffer configuration check */
+ if ((task->map_length > 0) &&
+ ((!ALIGN((unsigned long)task->map_base, PAGE_SIZE)) ||
+ (!ALIGN(task->map_length, PAGE_SIZE)) ||
+ (dsp_mem_type(task->map_base, task->map_length) != MEM_TYPE_EXTERN))) {
+ printk(KERN_ERR
+ "omapdsp: illegal mmap buffer address(0x%p) or "
+ "length(0x%x).\n"
+ " It needs to be page-aligned and located at "
+ "external memory.\n",
+ task->map_base, task->map_length);
+ ret = -EINVAL;
+ goto fail_out;
+ }
+
+ return 0;
+
+fail_out:
+ dsptask[tid] = NULL;
+ return ret;
+}
+
+static void dsp_task_init(struct dsptask *task)
+{
+ mbcompose_send(TCTL, task->tid, TCTL_TINIT);
+}
+
+int dsp_task_config_all(u8 n)
+{
+ int i, ret;
+ struct taskdev *devheap;
+ struct dsptask *taskheap;
+ size_t devheapsz, taskheapsz;
+
+ pr_info("omapdsp: found %d task(s)\n", n);
+ if (n == 0)
+ return 0;
+
+ /*
+ * reducing kmalloc!
+ */
+ devheapsz = sizeof(struct taskdev) * n;
+ taskheapsz = sizeof(struct dsptask) * n;
+ heap = kzalloc(devheapsz + taskheapsz, GFP_KERNEL);
+ if (heap == NULL)
+ return -ENOMEM;
+ devheap = heap;
+ taskheap = heap + devheapsz;
+
+ n_task = n;
+ for (i = 0; i < n; i++) {
+ struct taskdev *dev = &devheap[i];
+ struct dsptask *task = &taskheap[i];
+
+ if ((ret = dsp_task_config(task, i)) < 0)
+ return ret;
+ if ((ret = taskdev_init(dev, task->name, i)) < 0)
+ return ret;
+ if ((ret = taskdev_attach_task(dev, task)) < 0)
+ return ret;
+ dsp_task_init(task);
+ pr_info("omapdsp: taskdev %s enabled.\n", dev->name);
+ }
+
+ return 0;
+}
+
+static void dsp_task_unconfig(struct dsptask *task)
+{
+ dsptask[task->tid] = NULL;
+}
+
+void dsp_task_unconfig_all(void)
+{
+ unsigned char minor;
+ u8 tid;
+ struct dsptask *task;
+
+ for (minor = 0; minor < n_task; minor++) {
+ /*
+ * taskdev[minor] can be NULL in case of
+ * configuration failure
+ */
+ if (taskdev[minor])
+ taskdev_delete(minor);
+ }
+ for (; minor < TASKDEV_MAX; minor++) {
+ if (taskdev[minor])
+ dsp_rmdev_minor(minor);
+ }
+
+ for (tid = 0; tid < n_task; tid++) {
+ /*
+ * dsptask[tid] can be NULL in case of
+ * configuration failure
+ */
+ task = dsptask[tid];
+ if (task)
+ dsp_task_unconfig(task);
+ }
+ for (; tid < TASKDEV_MAX; tid++) {
+ task = dsptask[tid];
+ if (task) {
+ /*
+ * on-demand tasks should be deleted in
+ * rmdev_minor(), but just in case.
+ */
+ dsp_task_unconfig(task);
+ kfree(task);
+ }
+ }
+
+ if (heap) {
+ kfree(heap);
+ heap = NULL;
+ }
+
+ n_task = 0;
+}
+
+static struct device_driver dsptask_driver = {
+ .name = "dsptask",
+ .bus = &dsptask_bus,
+};
+
+u8 dsp_task_count(void)
+{
+ return n_task;
+}
+
+int dsp_taskmod_busy(void)
+{
+ struct taskdev *dev;
+ unsigned char minor;
+ unsigned int usecount;
+
+ for (minor = 0; minor < TASKDEV_MAX; minor++) {
+ dev = taskdev[minor];
+ if (dev == NULL)
+ continue;
+ if ((usecount = dev->usecount) > 0) {
+ printk("dsp_taskmod_busy(): %s: usecount=%d\n",
+ dev->name, usecount);
+ return 1;
+ }
+/*
+ if ((dev->state & (TASKDEV_ST_ADDREQ |
+ TASKDEV_ST_DELREQ)) {
+*/
+ if (dev->state & TASKDEV_ST_ADDREQ) {
+ printk("dsp_taskmod_busy(): %s is in %s\n",
+ dev->name, devstate_name(dev->state));
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/*
+ * DSP task device file operations
+ */
+static ssize_t dsp_task_read_wd_acv(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned int minor = MINOR(file->f_dentry->d_inode->i_rdev);
+ struct taskdev *dev = taskdev[minor];
+ int ret = 0;
+ DEFINE_WAIT(wait);
+
+ if (count == 0) {
+ return 0;
+ } else if (count & 0x1) {
+ printk(KERN_ERR
+ "omapdsp: odd count is illegal for DSP task device.\n");
+ return -EINVAL;
+ }
+
+ if (taskdev_lock_and_statelock_attached(dev, &dev->read_mutex))
+ return -ENODEV;
+
+
+ prepare_to_wait(&dev->read_wait_q, &wait, TASK_INTERRUPTIBLE);
+ if (kfifo_len(dev->rcvdt.fifo) == 0)
+ schedule();
+ finish_wait(&dev->read_wait_q, &wait);
+ if (kfifo_len(dev->rcvdt.fifo) == 0) {
+ /* failure */
+ if (signal_pending(current))
+ ret = -EINTR;
+ goto up_out;
+ }
+
+
+ ret = kfifo_get_to_user(dev->rcvdt.fifo, buf, count);
+
+ up_out:
+ taskdev_unlock_and_stateunlock(dev, &dev->read_mutex);
+ return ret;
+}
+
+static ssize_t dsp_task_read_bk_acv(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned int minor = MINOR(file->f_dentry->d_inode->i_rdev);
+ struct taskdev *dev = taskdev[minor];
+ struct rcvdt_bk_struct *rcvdt = &dev->rcvdt.bk;
+ ssize_t ret = 0;
+ DEFINE_WAIT(wait);
+
+ if (count == 0) {
+ return 0;
+ } else if (count & 0x1) {
+ printk(KERN_ERR
+ "omapdsp: odd count is illegal for DSP task device.\n");
+ return -EINVAL;
+ } else if ((int)buf & 0x1) {
+ printk(KERN_ERR
+ "omapdsp: buf should be word aligned for "
+ "dsp_task_read().\n");
+ return -EINVAL;
+ }
+
+ if (taskdev_lock_and_statelock_attached(dev, &dev->read_mutex))
+ return -ENODEV;
+
+ prepare_to_wait(&dev->read_wait_q, &wait, TASK_INTERRUPTIBLE);
+ if (ipblink_empty(&rcvdt->link))
+ schedule();
+ finish_wait(&dev->read_wait_q, &wait);
+ if (ipblink_empty(&rcvdt->link)) {
+ /* failure */
+ if (signal_pending(current))
+ ret = -EINTR;
+ goto up_out;
+ }
+
+ /* copy from delayed IPBUF */
+ if (sndtyp_pvt(dev->task->ttyp)) {
+ /* private */
+ if (!ipblink_empty(&rcvdt->link)) {
+ struct ipbuf_p *ipbp = dev->task->ipbuf_pvt_r;
+ unsigned char *base, *src;
+ size_t bkcnt;
+
+ if (dsp_mem_enable(ipbp) < 0) {
+ ret = -EBUSY;
+ goto up_out;
+ }
+ base = MKVIRT(ipbp->ah, ipbp->al);
+ bkcnt = ((unsigned long)ipbp->c) * 2 - rcvdt->rp;
+ if (dsp_address_validate(base, bkcnt,
+ "task %s read buffer",
+ dev->task->name) < 0) {
+ ret = -EINVAL;
+ goto pv_out1;
+ }
+ if (dsp_mem_enable(base) < 0) {
+ ret = -EBUSY;
+ goto pv_out1;
+ }
+ src = base + rcvdt->rp;
+ if (bkcnt > count) {
+ if (copy_to_user_dsp(buf, src, count)) {
+ ret = -EFAULT;
+ goto pv_out2;
+ }
+ ret = count;
+ rcvdt->rp += count;
+ } else {
+ if (copy_to_user_dsp(buf, src, bkcnt)) {
+ ret = -EFAULT;
+ goto pv_out2;
+ }
+ ret = bkcnt;
+ ipblink_del_pvt(&rcvdt->link);
+ release_ipbuf_pvt(ipbp);
+ rcvdt->rp = 0;
+ }
+ pv_out2:
+ dsp_mem_disable(src);
+ pv_out1:
+ dsp_mem_disable(ipbp);
+ }
+ } else {
+ /* global */
+ if (dsp_mem_enable_ipbuf() < 0) {
+ ret = -EBUSY;
+ goto up_out;
+ }
+ while (!ipblink_empty(&rcvdt->link)) {
+ unsigned char *src;
+ size_t bkcnt;
+ struct ipbuf_head *ipb_h = bid_to_ipbuf(rcvdt->link.top);
+
+ src = ipb_h->p->d + rcvdt->rp;
+ bkcnt = ((unsigned long)ipb_h->p->c) * 2 - rcvdt->rp;
+ if (bkcnt > count) {
+ if (copy_to_user_dsp(buf, src, count)) {
+ ret = -EFAULT;
+ goto gb_out;
+ }
+ ret += count;
+ rcvdt->rp += count;
+ break;
+ } else {
+ if (copy_to_user_dsp(buf, src, bkcnt)) {
+ ret = -EFAULT;
+ goto gb_out;
+ }
+ ret += bkcnt;
+ buf += bkcnt;
+ count -= bkcnt;
+ ipblink_del_top(&rcvdt->link);
+ unuse_ipbuf(ipb_h);
+ rcvdt->rp = 0;
+ }
+ }
+ gb_out:
+ dsp_mem_disable_ipbuf();
+ }
+
+ up_out:
+ taskdev_unlock_and_stateunlock(dev, &dev->read_mutex);
+ return ret;
+}
+
+static ssize_t dsp_task_read_wd_psv(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned int minor = MINOR(file->f_dentry->d_inode->i_rdev);
+ struct taskdev *dev = taskdev[minor];
+ int ret = 0;
+
+ if (count == 0) {
+ return 0;
+ } else if (count & 0x1) {
+ printk(KERN_ERR
+ "omapdsp: odd count is illegal for DSP task device.\n");
+ return -EINVAL;
+ } else {
+ /* force! */
+ count = 2;
+ }
+
+ if (taskdev_lock_and_statelock_attached(dev, &dev->read_mutex))
+ return -ENODEV;
+
+ mbcompose_send_and_wait(WDREQ, dev->task->tid, 0, &dev->read_wait_q);
+
+ if (kfifo_len(dev->rcvdt.fifo) == 0) {
+ /* failure */
+ if (signal_pending(current))
+ ret = -EINTR;
+ goto up_out;
+ }
+
+ ret = kfifo_get_to_user(dev->rcvdt.fifo, buf, count);
+
+up_out:
+ taskdev_unlock_and_stateunlock(dev, &dev->read_mutex);
+ return ret;
+}
+
+static ssize_t dsp_task_read_bk_psv(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned int minor = MINOR(file->f_dentry->d_inode->i_rdev);
+ struct taskdev *dev = taskdev[minor];
+ struct rcvdt_bk_struct *rcvdt = &dev->rcvdt.bk;
+ int ret = 0;
+
+ if (count == 0) {
+ return 0;
+ } else if (count & 0x1) {
+ printk(KERN_ERR
+ "omapdsp: odd count is illegal for DSP task device.\n");
+ return -EINVAL;
+ } else if ((int)buf & 0x1) {
+ printk(KERN_ERR
+ "omapdsp: buf should be word aligned for "
+ "dsp_task_read().\n");
+ return -EINVAL;
+ }
+
+ if (taskdev_lock_and_statelock_attached(dev, &dev->read_mutex))
+ return -ENODEV;
+
+ mbcompose_send_and_wait(BKREQ, dev->task->tid, count/2,
+ &dev->read_wait_q);
+
+ if (ipblink_empty(&rcvdt->link)) {
+ /* failure */
+ if (signal_pending(current))
+ ret = -EINTR;
+ goto up_out;
+ }
+
+ /*
+ * We will not receive more than requested count.
+ */
+ if (sndtyp_pvt(dev->task->ttyp)) {
+ /* private */
+ struct ipbuf_p *ipbp = dev->task->ipbuf_pvt_r;
+ size_t rcvcnt;
+ void *src;
+
+ if (dsp_mem_enable(ipbp) < 0) {
+ ret = -EBUSY;
+ goto up_out;
+ }
+ src = MKVIRT(ipbp->ah, ipbp->al);
+ rcvcnt = ((unsigned long)ipbp->c) * 2;
+ if (dsp_address_validate(src, rcvcnt, "task %s read buffer",
+ dev->task->name) < 0) {
+ ret = -EINVAL;
+ goto pv_out1;
+ }
+ if (dsp_mem_enable(src) < 0) {
+ ret = -EBUSY;
+ goto pv_out1;
+ }
+ if (count > rcvcnt)
+ count = rcvcnt;
+ if (copy_to_user_dsp(buf, src, count)) {
+ ret = -EFAULT;
+ goto pv_out2;
+ }
+ ipblink_del_pvt(&rcvdt->link);
+ release_ipbuf_pvt(ipbp);
+ ret = count;
+pv_out2:
+ dsp_mem_disable(src);
+pv_out1:
+ dsp_mem_disable(ipbp);
+ } else {
+ /* global */
+ struct ipbuf_head *ipb_h = bid_to_ipbuf(rcvdt->link.top);
+ size_t rcvcnt;
+
+ if (dsp_mem_enable_ipbuf() < 0) {
+ ret = -EBUSY;
+ goto up_out;
+ }
+ rcvcnt = ((unsigned long)ipb_h->p->c) * 2;
+ if (count > rcvcnt)
+ count = rcvcnt;
+ if (copy_to_user_dsp(buf, ipb_h->p->d, count)) {
+ ret = -EFAULT;
+ goto gb_out;
+ }
+ ipblink_del_top(&rcvdt->link);
+ unuse_ipbuf(ipb_h);
+ ret = count;
+gb_out:
+ dsp_mem_disable_ipbuf();
+ }
+
+up_out:
+ taskdev_unlock_and_stateunlock(dev, &dev->read_mutex);
+ return ret;
+}
+
+static ssize_t dsp_task_write_wd(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned int minor = MINOR(file->f_dentry->d_inode->i_rdev);
+ struct taskdev *dev = taskdev[minor];
+ u16 wd;
+ int ret = 0;
+ DEFINE_WAIT(wait);
+
+ if (count == 0) {
+ return 0;
+ } else if (count & 0x1) {
+ printk(KERN_ERR
+ "omapdsp: odd count is illegal for DSP task device.\n");
+ return -EINVAL;
+ } else {
+ /* force! */
+ count = 2;
+ }
+
+ if (taskdev_lock_and_statelock_attached(dev, &dev->write_mutex))
+ return -ENODEV;
+
+ prepare_to_wait(&dev->write_wait_q, &wait, TASK_INTERRUPTIBLE);
+ if (dev->wsz == 0)
+ schedule();
+ finish_wait(&dev->write_wait_q, &wait);
+ if (dev->wsz == 0) {
+ /* failure */
+ if (signal_pending(current))
+ ret = -EINTR;
+ goto up_out;
+ }
+
+ if (copy_from_user(&wd, buf, count)) {
+ ret = -EFAULT;
+ goto up_out;
+ }
+
+ spin_lock(&dev->wsz_lock);
+ if (mbcompose_send(WDSND, dev->task->tid, wd) < 0) {
+ spin_unlock(&dev->wsz_lock);
+ goto up_out;
+ }
+ ret = count;
+ if (rcvtyp_acv(dev->task->ttyp))
+ dev->wsz = 0;
+ spin_unlock(&dev->wsz_lock);
+
+ up_out:
+ taskdev_unlock_and_stateunlock(dev, &dev->write_mutex);
+ return ret;
+}
+
+static ssize_t dsp_task_write_bk(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned int minor = MINOR(file->f_dentry->d_inode->i_rdev);
+ struct taskdev *dev = taskdev[minor];
+ int ret = 0;
+ DEFINE_WAIT(wait);
+
+ if (count == 0) {
+ return 0;
+ } else if (count & 0x1) {
+ printk(KERN_ERR
+ "omapdsp: odd count is illegal for DSP task device.\n");
+ return -EINVAL;
+ } else if ((int)buf & 0x1) {
+ printk(KERN_ERR
+ "omapdsp: buf should be word aligned for "
+ "dsp_task_write().\n");
+ return -EINVAL;
+ }
+
+ if (taskdev_lock_and_statelock_attached(dev, &dev->write_mutex))
+ return -ENODEV;
+
+ prepare_to_wait(&dev->write_wait_q, &wait, TASK_INTERRUPTIBLE);
+ if (dev->wsz == 0)
+ schedule();
+ finish_wait(&dev->write_wait_q, &wait);
+ if (dev->wsz == 0) {
+ /* failure */
+ if (signal_pending(current))
+ ret = -EINTR;
+ goto up_out;
+ }
+
+ if (count > dev->wsz)
+ count = dev->wsz;
+
+ if (rcvtyp_pvt(dev->task->ttyp)) {
+ /* private */
+ struct ipbuf_p *ipbp = dev->task->ipbuf_pvt_w;
+ unsigned char *dst;
+
+ if (dsp_mem_enable(ipbp) < 0) {
+ ret = -EBUSY;
+ goto up_out;
+ }
+ dst = MKVIRT(ipbp->ah, ipbp->al);
+ if (dsp_address_validate(dst, count, "task %s write buffer",
+ dev->task->name) < 0) {
+ ret = -EINVAL;
+ goto pv_out1;
+ }
+ if (dsp_mem_enable(dst) < 0) {
+ ret = -EBUSY;
+ goto pv_out1;
+ }
+ if (copy_from_user_dsp(dst, buf, count)) {
+ ret = -EFAULT;
+ goto pv_out2;
+ }
+ ipbp->c = count/2;
+ ipbp->s = dev->task->tid;
+ spin_lock(&dev->wsz_lock);
+ if (mbcompose_send(BKSNDP, dev->task->tid, 0) == 0) {
+ if (rcvtyp_acv(dev->task->ttyp))
+ dev->wsz = 0;
+ ret = count;
+ }
+ spin_unlock(&dev->wsz_lock);
+ pv_out2:
+ dsp_mem_disable(dst);
+ pv_out1:
+ dsp_mem_disable(ipbp);
+ } else {
+ /* global */
+ struct ipbuf_head *ipb_h;
+
+ if (dsp_mem_enable_ipbuf() < 0) {
+ ret = -EBUSY;
+ goto up_out;
+ }
+ if ((ipb_h = get_free_ipbuf(dev->task->tid)) == NULL)
+ goto gb_out;
+ if (copy_from_user_dsp(ipb_h->p->d, buf, count)) {
+ release_ipbuf(ipb_h);
+ ret = -EFAULT;
+ goto gb_out;
+ }
+ ipb_h->p->c = count/2;
+ ipb_h->p->sa = dev->task->tid;
+ spin_lock(&dev->wsz_lock);
+ if (mbcompose_send(BKSND, dev->task->tid, ipb_h->bid) == 0) {
+ if (rcvtyp_acv(dev->task->ttyp))
+ dev->wsz = 0;
+ ret = count;
+ ipb_bsycnt_inc(&ipbcfg);
+ } else
+ release_ipbuf(ipb_h);
+ spin_unlock(&dev->wsz_lock);
+ gb_out:
+ dsp_mem_disable_ipbuf();
+ }
+
+ up_out:
+ taskdev_unlock_and_stateunlock(dev, &dev->write_mutex);
+ return ret;
+}
+
+static unsigned int dsp_task_poll(struct file * file, poll_table * wait)
+{
+ unsigned int minor = MINOR(file->f_dentry->d_inode->i_rdev);
+ struct taskdev *dev = taskdev[minor];
+ struct dsptask *task = dev->task;
+ unsigned int mask = 0;
+
+ if (!devstate_read_lock_and_test(dev, TASKDEV_ST_ATTACHED))
+ return 0;
+ poll_wait(file, &dev->read_wait_q, wait);
+ poll_wait(file, &dev->write_wait_q, wait);
+ if (sndtyp_psv(task->ttyp) ||
+ (sndtyp_wd(task->ttyp) && kfifo_len(dev->rcvdt.fifo)) ||
+ (sndtyp_bk(task->ttyp) && !ipblink_empty(&dev->rcvdt.bk.link)))
+ mask |= POLLIN | POLLRDNORM;
+ if (dev->wsz)
+ mask |= POLLOUT | POLLWRNORM;
+ devstate_read_unlock(dev);
+
+ return mask;
+}
+
+static int dsp_tctl_issue(struct taskdev *dev, u16 cmd, int argc, u16 argv[])
+{
+ int tctl_argc;
+ struct mb_exarg mbarg, *mbargp;
+ int interactive;
+ u8 tid;
+ int ret = 0;
+
+ if (cmd < 0x8000) {
+ /*
+ * 0x0000 - 0x7fff
+ * system reserved TCTL commands
+ */
+ switch (cmd) {
+ case TCTL_TEN:
+ case TCTL_TDIS:
+ tctl_argc = 0;
+ interactive = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ /*
+ * 0x8000 - 0xffff
+ * user-defined TCTL commands
+ */
+ else if (cmd < 0x8100) {
+ /* 0x8000-0x80ff: no arg, non-interactive */
+ tctl_argc = 0;
+ interactive = 0;
+ } else if (cmd < 0x8200) {
+ /* 0x8100-0x81ff: 1 arg, non-interactive */
+ tctl_argc = 1;
+ interactive = 0;
+ } else if (cmd < 0x9000) {
+ /* 0x8200-0x8fff: reserved */
+ return -EINVAL;
+ } else if (cmd < 0x9100) {
+ /* 0x9000-0x90ff: no arg, interactive */
+ tctl_argc = 0;
+ interactive = 1;
+ } else if (cmd < 0x9200) {
+ /* 0x9100-0x91ff: 1 arg, interactive */
+ tctl_argc = 1;
+ interactive = 1;
+ } else {
+ /* 0x9200-0xffff: reserved */
+ return -EINVAL;
+ }
+
+ /*
+ * if argc < 0, use tctl_argc as is.
+ * if argc >= 0, check arg count.
+ */
+ if ((argc >= 0) && (argc != tctl_argc))
+ return -EINVAL;
+
+ /*
+ * issue TCTL
+ */
+ if (taskdev_lock_interruptible(dev, &dev->tctl_mutex))
+ return -EINTR;
+
+ tid = dev->task->tid;
+ if (tctl_argc > 0) {
+ mbarg.argc = tctl_argc;
+ mbarg.tid = tid;
+ mbarg.argv = argv;
+ mbargp = &mbarg;
+ } else
+ mbargp = NULL;
+
+ if (interactive) {
+ dev->tctl_stat = -EINVAL;
+
+ mbcompose_send_and_wait_exarg(TCTL, tid, cmd, mbargp,
+ &dev->tctl_wait_q);
+ if (signal_pending(current)) {
+ ret = -EINTR;
+ goto up_out;
+ }
+ if ((ret = dev->tctl_stat) < 0) {
+ printk(KERN_ERR "omapdsp: TCTL not responding.\n");
+ goto up_out;
+ }
+ } else
+ mbcompose_send_exarg(TCTL, tid, cmd, mbargp);
+
+up_out:
+ mutex_unlock(&dev->tctl_mutex);
+ return ret;
+}
+
+static int dsp_task_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ unsigned int minor = MINOR(inode->i_rdev);
+ struct taskdev *dev = taskdev[minor];
+ int ret;
+
+ if (cmd < 0x10000) {
+ /* issue TCTL */
+ u16 mbargv[1];
+
+ mbargv[0] = arg & 0xffff;
+ return dsp_tctl_issue(dev, cmd, -1, mbargv);
+ }
+
+ /* non TCTL ioctls */
+ switch (cmd) {
+
+ case TASK_IOCTL_LOCK:
+ ret = taskdev_lock(dev);
+ break;
+
+ case TASK_IOCTL_UNLOCK:
+ ret = taskdev_unlock(dev);
+ break;
+
+ case TASK_IOCTL_BFLSH:
+ if (taskdev_lock_and_statelock_attached(dev, &dev->read_mutex))
+ return -ENODEV;
+ ret = taskdev_flush_buf(dev);
+ taskdev_unlock_and_stateunlock(dev, &dev->read_mutex);
+ break;
+
+ case TASK_IOCTL_SETBSZ:
+ if (taskdev_lock_and_statelock_attached(dev, &dev->read_mutex))
+ return -ENODEV;
+ ret = taskdev_set_fifosz(dev, arg);
+ taskdev_unlock_and_stateunlock(dev, &dev->read_mutex);
+ break;
+
+ case TASK_IOCTL_GETNAME:
+ ret = 0;
+ if (copy_to_user((void __user *)arg, dev->name,
+ strlen(dev->name) + 1))
+ ret = -EFAULT;
+ break;
+
+ default:
+ ret = -ENOIOCTLCMD;
+
+ }
+
+ return ret;
+}
+
+static void dsp_task_mmap_open(struct vm_area_struct *vma)
+{
+ struct taskdev *dev = (struct taskdev *)vma->vm_private_data;
+ struct dsptask *task;
+ size_t len = vma->vm_end - vma->vm_start;
+
+ BUG_ON(!(dev->state & TASKDEV_ST_ATTACHED));
+ task = dev->task;
+ omap_mmu_exmap_use(&dsp_mmu, task->map_base, len);
+}
+
+static void dsp_task_mmap_close(struct vm_area_struct *vma)
+{
+ struct taskdev *dev = (struct taskdev *)vma->vm_private_data;
+ struct dsptask *task;
+ size_t len = vma->vm_end - vma->vm_start;
+
+ BUG_ON(!(dev->state & TASKDEV_ST_ATTACHED));
+ task = dev->task;
+ omap_mmu_exmap_unuse(&dsp_mmu, task->map_base, len);
+}
+
+/**
+ * On demand page allocation is not allowed. The mapping area is defined by
+ * corresponding DSP tasks.
+ */
+static int dsp_task_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ return VM_FAULT_NOPAGE;
+}
+
+static struct vm_operations_struct dsp_task_vm_ops = {
+ .open = dsp_task_mmap_open,
+ .close = dsp_task_mmap_close,
+ .fault = dsp_task_mmap_fault,
+};
+
+static int dsp_task_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ void *tmp_vadr;
+ unsigned long tmp_padr, tmp_vmadr, off;
+ size_t req_len, tmp_len;
+ unsigned int minor = MINOR(filp->f_dentry->d_inode->i_rdev);
+ struct taskdev *dev = taskdev[minor];
+ struct dsptask *task;
+ int ret = 0;
+
+ if (!devstate_read_lock_and_test(dev, TASKDEV_ST_ATTACHED))
+ return -ENODEV;
+ task = dev->task;
+
+ /*
+ * Don't swap this area out
+ * Don't dump this area to a core file
+ */
+ vma->vm_flags |= VM_RESERVED | VM_IO;
+
+ /* Do not cache this area */
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ req_len = vma->vm_end - vma->vm_start;
+ off = vma->vm_pgoff << PAGE_SHIFT;
+ tmp_vmadr = vma->vm_start;
+ tmp_vadr = task->map_base + off;
+ do {
+ tmp_padr = omap_mmu_virt_to_phys(&dsp_mmu, tmp_vadr, &tmp_len);
+ if (tmp_padr == 0) {
+ printk(KERN_ERR
+ "omapdsp: task %s: illegal address "
+ "for mmap: %p", task->name, tmp_vadr);
+ /* partial mapping will be cleared in upper layer */
+ ret = -EINVAL;
+ goto unlock_out;
+ }
+ if (tmp_len > req_len)
+ tmp_len = req_len;
+
+ pr_debug("omapdsp: mmap info: "
+ "vmadr = %08lx, padr = %08lx, len = %x\n",
+ tmp_vmadr, tmp_padr, tmp_len);
+ if (remap_pfn_range(vma, tmp_vmadr, tmp_padr >> PAGE_SHIFT,
+ tmp_len, vma->vm_page_prot) != 0) {
+ printk(KERN_ERR
+ "omapdsp: task %s: remap_page_range() failed.\n",
+ task->name);
+ /* partial mapping will be cleared in upper layer */
+ ret = -EINVAL;
+ goto unlock_out;
+ }
+
+ req_len -= tmp_len;
+ tmp_vmadr += tmp_len;
+ tmp_vadr += tmp_len;
+ } while (req_len);
+
+ vma->vm_ops = &dsp_task_vm_ops;
+ vma->vm_private_data = dev;
+ omap_mmu_exmap_use(&dsp_mmu, task->map_base, vma->vm_end - vma->vm_start);
+
+unlock_out:
+ devstate_read_unlock(dev);
+ return ret;
+}
+
+static int dsp_task_open(struct inode *inode, struct file *file)
+{
+ unsigned int minor = MINOR(inode->i_rdev);
+ struct taskdev *dev;
+ int ret = 0;
+
+ if ((minor >= TASKDEV_MAX) || ((dev = taskdev[minor]) == NULL))
+ return -ENODEV;
+
+ restart:
+ mutex_lock(&dev->usecount_lock);
+ down_write(&dev->state_sem);
+
+ /* state can be NOTASK, ATTACHED/FREEZED, KILLING, GARBAGE or INVALID here. */
+ switch (dev->state & TASKDEV_ST_STATE_MASK) {
+ case TASKDEV_ST_NOTASK:
+ break;
+ case TASKDEV_ST_ATTACHED:
+ goto attached;
+
+ case TASKDEV_ST_INVALID:
+ up_write(&dev->state_sem);
+ mutex_unlock(&dev->usecount_lock);
+ return -ENODEV;
+
+ case TASKDEV_ST_FREEZED:
+ case TASKDEV_ST_KILLING:
+ case TASKDEV_ST_GARBAGE:
+ case TASKDEV_ST_DELREQ:
+ /* on the kill process. wait until it becomes NOTASK. */
+ up_write(&dev->state_sem);
+ mutex_unlock(&dev->usecount_lock);
+ if (devstate_write_lock(dev, TASKDEV_ST_NOTASK) < 0)
+ return -EINTR;
+ devstate_write_unlock(dev);
+ goto restart;
+ }
+
+ /* NOTASK */
+ set_taskdev_state(dev, TASKDEV_ST_ADDREQ);
+ /* wake up twch daemon for tadd */
+ dsp_twch_touch();
+ up_write(&dev->state_sem);
+ if (devstate_write_lock(dev, TASKDEV_ST_ATTACHED |
+ TASKDEV_ST_ADDFAIL) < 0) {
+ /* cancelled */
+ if (!devstate_write_lock_and_test(dev, TASKDEV_ST_ADDREQ)) {
+ mutex_unlock(&dev->usecount_lock);
+ /* out of control ??? */
+ return -EINTR;
+ }
+ set_taskdev_state(dev, TASKDEV_ST_NOTASK);
+ ret = -EINTR;
+ goto change_out;
+ }
+ if (dev->state & TASKDEV_ST_ADDFAIL) {
+ printk(KERN_ERR "omapdsp: task attach failed for %s!\n",
+ dev->name);
+ ret = -EBUSY;
+ set_taskdev_state(dev, TASKDEV_ST_NOTASK);
+ goto change_out;
+ }
+
+ attached:
+ ret = proc_list_add(&dev->proc_list_lock,
+ &dev->proc_list, current, file);
+ if (ret)
+ goto out;
+
+ dev->usecount++;
+ file->f_op = &dev->fops;
+ up_write(&dev->state_sem);
+ mutex_unlock(&dev->usecount_lock);
+
+#ifdef DSP_PTE_FREE /* not used currently. */
+ dsp_map_update(current);
+ dsp_cur_users_add(current);
+#endif /* DSP_PTE_FREE */
+ return 0;
+
+ change_out:
+ wake_up_interruptible_all(&dev->state_wait_q);
+ out:
+ up_write(&dev->state_sem);
+ mutex_unlock(&dev->usecount_lock);
+ return ret;
+}
+
+static int dsp_task_release(struct inode *inode, struct file *file)
+{
+ unsigned int minor = MINOR(inode->i_rdev);
+ struct taskdev *dev = taskdev[minor];
+
+#ifdef DSP_PTE_FREE /* not used currently. */
+ dsp_cur_users_del(current);
+#endif /* DSP_PTE_FREE */
+
+ if (has_taskdev_lock(dev))
+ taskdev_unlock(dev);
+
+ proc_list_del(&dev->proc_list_lock, &dev->proc_list, current, file);
+ mutex_lock(&dev->usecount_lock);
+ if (--dev->usecount > 0) {
+ /* other processes are using this device. no state change. */
+ mutex_unlock(&dev->usecount_lock);
+ return 0;
+ }
+
+ /* usecount == 0 */
+ down_write(&dev->state_sem);
+
+ /* state can be ATTACHED/FREEZED, KILLING or GARBAGE here. */
+ switch (dev->state & TASKDEV_ST_STATE_MASK) {
+
+ case TASKDEV_ST_KILLING:
+ break;
+
+ case TASKDEV_ST_GARBAGE:
+ set_taskdev_state(dev, TASKDEV_ST_NOTASK);
+ wake_up_interruptible_all(&dev->state_wait_q);
+ break;
+
+ case TASKDEV_ST_ATTACHED:
+ case TASKDEV_ST_FREEZED:
+ if (is_dynamic_task(minor)) {
+ set_taskdev_state(dev, TASKDEV_ST_DELREQ);
+ /* wake up twch daemon for tdel */
+ dsp_twch_touch();
+ }
+ break;
+
+ }
+
+ up_write(&dev->state_sem);
+ mutex_unlock(&dev->usecount_lock);
+ return 0;
+}
+
+/*
+ * mkdev / rmdev
+ */
+int dsp_mkdev(char *name)
+{
+ struct taskdev *dev;
+ int status;
+ unsigned char minor;
+ int ret;
+
+ if (dsp_cfgstat_get_stat() != CFGSTAT_READY) {
+ printk(KERN_ERR "omapdsp: dsp has not been configured.\n");
+ return -EINVAL;
+ }
+
+ if (mutex_lock_interruptible(&devmgr_lock))
+ return -EINTR;
+
+ /* naming check */
+ for (minor = 0; minor < TASKDEV_MAX; minor++) {
+ if (taskdev[minor] && !strcmp(taskdev[minor]->name, name)) {
+ printk(KERN_ERR
+ "omapdsp: task device name %s is already "
+ "in use.\n", name);
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ /* find free minor number */
+ for (minor = n_task; minor < TASKDEV_MAX; minor++) {
+ if (taskdev[minor] == NULL)
+ goto do_make;
+ }
+ printk(KERN_ERR "omapdsp: Too many task devices.\n");
+ ret = -EBUSY;
+ goto out;
+
+do_make:
+ if ((dev = kzalloc(sizeof(struct taskdev), GFP_KERNEL)) == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ if ((status = taskdev_init(dev, name, minor)) < 0) {
+ kfree(dev);
+ ret = status;
+ goto out;
+ }
+ ret = minor;
+
+out:
+ mutex_unlock(&devmgr_lock);
+ return ret;
+}
+
+int dsp_rmdev(char *name)
+{
+ unsigned char minor;
+ int status;
+ int ret;
+
+ if (dsp_cfgstat_get_stat() != CFGSTAT_READY) {
+ printk(KERN_ERR "omapdsp: dsp has not been configured.\n");
+ return -EINVAL;
+ }
+
+ if (mutex_lock_interruptible(&devmgr_lock))
+ return -EINTR;
+
+ /* find in dynamic devices */
+ for (minor = n_task; minor < TASKDEV_MAX; minor++) {
+ if (taskdev[minor] && !strcmp(taskdev[minor]->name, name))
+ goto do_remove;
+ }
+
+ /* find in static devices */
+ for (minor = 0; minor < n_task; minor++) {
+ if (taskdev[minor] && !strcmp(taskdev[minor]->name, name)) {
+ printk(KERN_ERR
+ "omapdsp: task device %s is static.\n", name);
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ printk(KERN_ERR "omapdsp: task device %s not found.\n", name);
+ return -EINVAL;
+
+do_remove:
+ ret = minor;
+ if ((status = dsp_rmdev_minor(minor)) < 0)
+ ret = status;
+out:
+ mutex_unlock(&devmgr_lock);
+ return ret;
+}
+
+static int dsp_rmdev_minor(unsigned char minor)
+{
+ struct taskdev *dev = taskdev[minor];
+
+ while (!down_write_trylock(&dev->state_sem)) {
+ down_read(&dev->state_sem);
+ if (dev->state & (TASKDEV_ST_ATTACHED |
+ TASKDEV_ST_FREEZED)) {
+ /*
+ * task is working. kill it.
+ * ATTACHED -> FREEZED can be changed under
+ * down_read of state_sem..
+ */
+ set_taskdev_state(dev, TASKDEV_ST_FREEZED);
+ wake_up_interruptible_all(&dev->read_wait_q);
+ wake_up_interruptible_all(&dev->write_wait_q);
+ wake_up_interruptible_all(&dev->tctl_wait_q);
+ }
+ up_read(&dev->state_sem);
+ schedule();
+ }
+
+ switch (dev->state & TASKDEV_ST_STATE_MASK) {
+
+ case TASKDEV_ST_NOTASK:
+ case TASKDEV_ST_INVALID:
+ /* fine */
+ goto notask;
+
+ case TASKDEV_ST_ATTACHED:
+ case TASKDEV_ST_FREEZED:
+ /* task is working. kill it. */
+ set_taskdev_state(dev, TASKDEV_ST_KILLING);
+ up_write(&dev->state_sem);
+ dsp_tdel_bh(dev, TDEL_KILL);
+ goto invalidate;
+
+ case TASKDEV_ST_ADDREQ:
+ /* open() is waiting. drain it. */
+ set_taskdev_state(dev, TASKDEV_ST_ADDFAIL);
+ wake_up_interruptible_all(&dev->state_wait_q);
+ break;
+
+ case TASKDEV_ST_DELREQ:
+ /* nobody is waiting. */
+ set_taskdev_state(dev, TASKDEV_ST_NOTASK);
+ wake_up_interruptible_all(&dev->state_wait_q);
+ break;
+
+ case TASKDEV_ST_ADDING:
+ case TASKDEV_ST_DELING:
+ case TASKDEV_ST_KILLING:
+ case TASKDEV_ST_GARBAGE:
+ case TASKDEV_ST_ADDFAIL:
+ /* transient state. wait for a moment. */
+ break;
+
+ }
+
+ up_write(&dev->state_sem);
+
+invalidate:
+ /* wait for some time and hope the state is settled */
+ devstate_read_lock_timeout(dev, TASKDEV_ST_NOTASK, 5 * HZ);
+ if (!(dev->state & TASKDEV_ST_NOTASK)) {
+ printk(KERN_WARNING
+ "omapdsp: illegal device state (%s) on rmdev %s.\n",
+ devstate_name(dev->state), dev->name);
+ }
+notask:
+ set_taskdev_state(dev, TASKDEV_ST_INVALID);
+ devstate_read_unlock(dev);
+
+ taskdev_delete(minor);
+ kfree(dev);
+
+ return 0;
+}
+
+static struct file_operations dsp_task_fops = {
+ .owner = THIS_MODULE,
+ .poll = dsp_task_poll,
+ .ioctl = dsp_task_ioctl,
+ .open = dsp_task_open,
+ .release = dsp_task_release,
+};
+
+static void dsptask_dev_release(struct device *dev)
+{
+}
+
+static int taskdev_init(struct taskdev *dev, char *name, unsigned char minor)
+{
+ int ret;
+ struct device *task_dev;
+
+ taskdev[minor] = dev;
+
+ spin_lock_init(&dev->proc_list_lock);
+ INIT_LIST_HEAD(&dev->proc_list);
+ init_waitqueue_head(&dev->read_wait_q);
+ init_waitqueue_head(&dev->write_wait_q);
+ init_waitqueue_head(&dev->tctl_wait_q);
+ mutex_init(&dev->read_mutex);
+ mutex_init(&dev->write_mutex);
+ mutex_init(&dev->tctl_mutex);
+ mutex_init(&dev->lock);
+ spin_lock_init(&dev->wsz_lock);
+ dev->tctl_ret = -EINVAL;
+ dev->lock_pid = 0;
+
+ strncpy(dev->name, name, TNM_LEN);
+ dev->name[TNM_LEN-1] = '\0';
+ set_taskdev_state(dev, (minor < n_task) ? TASKDEV_ST_ATTACHED : TASKDEV_ST_NOTASK);
+ dev->usecount = 0;
+ mutex_init(&dev->usecount_lock);
+ memcpy(&dev->fops, &dsp_task_fops, sizeof(struct file_operations));
+
+ dev->dev.parent = omap_dsp->dev;
+ dev->dev.bus = &dsptask_bus;
+ sprintf(dev->dev.bus_id, "dsptask%d", minor);
+ dev->dev.release = dsptask_dev_release;
+ ret = device_register(&dev->dev);
+ if (ret) {
+ printk(KERN_ERR "device_register failed: %d\n", ret);
+ return ret;
+ }
+ ret = device_create_file(&dev->dev, &dev_attr_devname);
+ if (ret)
+ goto fail_create_devname;
+ ret = device_create_file(&dev->dev, &dev_attr_devstate);
+ if (ret)
+ goto fail_create_devstate;
+ ret = device_create_file(&dev->dev, &dev_attr_proc_list);
+ if (ret)
+ goto fail_create_proclist;
+
+ task_dev = device_create(dsp_task_class, NULL,
+ MKDEV(OMAP_DSP_TASK_MAJOR, minor),
+ "dsptask%d", (int)minor);
+
+ if (unlikely(IS_ERR(task_dev))) {
+ ret = -EINVAL;
+ goto fail_create_taskclass;
+ }
+
+ init_waitqueue_head(&dev->state_wait_q);
+ init_rwsem(&dev->state_sem);
+
+ return 0;
+
+ fail_create_taskclass:
+ device_remove_file(&dev->dev, &dev_attr_proc_list);
+ fail_create_proclist:
+ device_remove_file(&dev->dev, &dev_attr_devstate);
+ fail_create_devstate:
+ device_remove_file(&dev->dev, &dev_attr_devname);
+ fail_create_devname:
+ device_unregister(&dev->dev);
+ return ret;
+}
+
+static void taskdev_delete(unsigned char minor)
+{
+ struct taskdev *dev = taskdev[minor];
+
+ if (!dev)
+ return;
+ device_remove_file(&dev->dev, &dev_attr_devname);
+ device_remove_file(&dev->dev, &dev_attr_devstate);
+ device_remove_file(&dev->dev, &dev_attr_proc_list);
+ device_destroy(dsp_task_class, MKDEV(OMAP_DSP_TASK_MAJOR, minor));
+ device_unregister(&dev->dev);
+ proc_list_flush(&dev->proc_list_lock, &dev->proc_list);
+ taskdev[minor] = NULL;
+}
+
+static int taskdev_attach_task(struct taskdev *dev, struct dsptask *task)
+{
+ u16 ttyp = task->ttyp;
+ int ret;
+
+ dev->fops.read =
+ sndtyp_acv(ttyp) ?
+ sndtyp_wd(ttyp) ? dsp_task_read_wd_acv:
+ /* sndtyp_bk */ dsp_task_read_bk_acv:
+ /* sndtyp_psv */
+ sndtyp_wd(ttyp) ? dsp_task_read_wd_psv:
+ /* sndtyp_bk */ dsp_task_read_bk_psv;
+ if (sndtyp_wd(ttyp)) {
+ /* word */
+ size_t fifosz = sndtyp_psv(ttyp) ? 2:32; /* passive:active */
+
+ dev->rcvdt.fifo = kfifo_alloc(fifosz, GFP_KERNEL,
+ &dev->read_lock);
+ if (IS_ERR(dev->rcvdt.fifo)) {
+ printk(KERN_ERR
+ "omapdsp: unable to allocate receive buffer. "
+ "(%d bytes for %s)\n", fifosz, dev->name);
+ return -ENOMEM;
+ }
+ } else {
+ /* block */
+ INIT_IPBLINK(&dev->rcvdt.bk.link);
+ dev->rcvdt.bk.rp = 0;
+ }
+
+ dev->fops.write =
+ rcvtyp_wd(ttyp) ? dsp_task_write_wd:
+ /* rcvbyp_bk */ dsp_task_write_bk;
+ dev->wsz = rcvtyp_acv(ttyp) ? 0 : /* active */
+ rcvtyp_wd(ttyp) ? 2 : /* passive word */
+ ipbcfg.lsz*2; /* passive block */
+
+ if (task->map_length)
+ dev->fops.mmap = dsp_task_mmap;
+
+ ret = device_create_file(&dev->dev, &dev_attr_taskname);
+ if (unlikely(ret))
+ goto fail_create_taskname;
+ ret = device_create_file(&dev->dev, &dev_attr_ttyp);
+ if (unlikely(ret))
+ goto fail_create_ttyp;
+ ret = device_create_file(&dev->dev, &dev_attr_wsz);
+ if (unlikely(ret))
+ goto fail_create_wsz;
+ if (task->map_length) {
+ ret = device_create_file(&dev->dev, &dev_attr_mmap);
+ if (unlikely(ret))
+ goto fail_create_mmap;
+ }
+ if (sndtyp_wd(ttyp)) {
+ ret = device_create_file(&dev->dev, &dev_attr_fifosz);
+ if (unlikely(ret))
+ goto fail_create_fifosz;
+ ret = device_create_file(&dev->dev, &dev_attr_fifocnt);
+ if (unlikely(ret))
+ goto fail_create_fifocnt;
+ } else {
+ ret = device_create_file(&dev->dev, &dev_attr_ipblink);
+ if (unlikely(ret))
+ goto fail_create_ipblink;
+ }
+
+ dev->task = task;
+ task->dev = dev;
+
+ return 0;
+
+ fail_create_fifocnt:
+ device_remove_file(&dev->dev, &dev_attr_fifosz);
+ fail_create_ipblink:
+ fail_create_fifosz:
+ if (task->map_length)
+ device_remove_file(&dev->dev, &dev_attr_mmap);
+ fail_create_mmap:
+ device_remove_file(&dev->dev, &dev_attr_wsz);
+ fail_create_wsz:
+ device_remove_file(&dev->dev, &dev_attr_ttyp);
+ fail_create_ttyp:
+ device_remove_file(&dev->dev, &dev_attr_taskname);
+ fail_create_taskname:
+ if (task->map_length)
+ dev->fops.mmap = NULL;
+
+ dev->fops.write = NULL;
+ dev->wsz = 0;
+
+ dev->fops.read = NULL;
+ taskdev_flush_buf(dev);
+
+ if (sndtyp_wd(ttyp))
+ kfifo_free(dev->rcvdt.fifo);
+
+ dev->task = NULL;
+
+ return ret;
+}
+
+static void taskdev_detach_task(struct taskdev *dev)
+{
+ u16 ttyp = dev->task->ttyp;
+
+ device_remove_file(&dev->dev, &dev_attr_taskname);
+ device_remove_file(&dev->dev, &dev_attr_ttyp);
+ if (sndtyp_wd(ttyp)) {
+ device_remove_file(&dev->dev, &dev_attr_fifosz);
+ device_remove_file(&dev->dev, &dev_attr_fifocnt);
+ } else
+ device_remove_file(&dev->dev, &dev_attr_ipblink);
+ device_remove_file(&dev->dev, &dev_attr_wsz);
+ if (dev->task->map_length) {
+ device_remove_file(&dev->dev, &dev_attr_mmap);
+ dev->fops.mmap = NULL;
+ }
+
+ dev->fops.read = NULL;
+ taskdev_flush_buf(dev);
+ if (sndtyp_wd(ttyp))
+ kfifo_free(dev->rcvdt.fifo);
+
+ dev->fops.write = NULL;
+ dev->wsz = 0;
+
+ pr_info("omapdsp: taskdev %s disabled.\n", dev->name);
+ dev->task = NULL;
+}
+
+/*
+ * tadd / tdel / tkill
+ */
+static int dsp_tadd(struct taskdev *dev, dsp_long_t adr)
+{
+ struct dsptask *task;
+ struct mb_exarg arg;
+ u8 tid, tid_response;
+ u16 argv[2];
+ int ret = 0;
+
+ if (!devstate_write_lock_and_test(dev, TASKDEV_ST_ADDREQ)) {
+ printk(KERN_ERR
+ "omapdsp: taskdev %s is not requesting for tadd. "
+ "(state is %s)\n", dev->name, devstate_name(dev->state));
+ return -EINVAL;
+ }
+ set_taskdev_state(dev, TASKDEV_ST_ADDING);
+ devstate_write_unlock(dev);
+
+ if (adr == TADD_ABORTADR) {
+ /* aborting tadd intentionally */
+ pr_info("omapdsp: tadd address is ABORTADR.\n");
+ goto fail_out;
+ }
+ if (adr >= DSPSPACE_SIZE) {
+ printk(KERN_ERR
+ "omapdsp: illegal address 0x%08x for tadd\n", adr);
+ ret = -EINVAL;
+ goto fail_out;
+ }
+
+ adr >>= 1; /* word address */
+ argv[0] = adr >> 16; /* addrh */
+ argv[1] = adr & 0xffff; /* addrl */
+
+ if (mutex_lock_interruptible(&cfg_lock)) {
+ ret = -EINTR;
+ goto fail_out;
+ }
+ cfg_tid = TID_ANON;
+ cfg_cmd = MBOX_CMD_DSP_TADD;
+ arg.tid = TID_ANON;
+ arg.argc = 2;
+ arg.argv = argv;
+
+ if (dsp_mem_sync_inc() < 0) {
+ printk(KERN_ERR "omapdsp: memory sync failed!\n");
+ ret = -EBUSY;
+ goto fail_out;
+ }
+ mbcompose_send_and_wait_exarg(TADD, 0, 0, &arg, &cfg_wait_q);
+
+ tid = cfg_tid;
+ cfg_tid = TID_ANON;
+ cfg_cmd = 0;
+ mutex_unlock(&cfg_lock);
+
+ if (tid == TID_ANON) {
+ printk(KERN_ERR "omapdsp: tadd failed!\n");
+ ret = -EINVAL;
+ goto fail_out;
+ }
+ if ((tid < n_task) || dsptask[tid]) {
+ printk(KERN_ERR "omapdsp: illegal tid (%d)!\n", tid);
+ ret = -EINVAL;
+ goto fail_out;
+ }
+ if ((task = kzalloc(sizeof(struct dsptask), GFP_KERNEL)) == NULL) {
+ ret = -ENOMEM;
+ goto del_out;
+ }
+
+ if ((ret = dsp_task_config(task, tid)) < 0)
+ goto free_out;
+
+ if (strcmp(dev->name, task->name)) {
+ printk(KERN_ERR
+ "omapdsp: task name (%s) doesn't match with "
+ "device name (%s).\n", task->name, dev->name);
+ ret = -EINVAL;
+ goto free_out;
+ }
+
+ if ((ret = taskdev_attach_task(dev, task)) < 0)
+ goto free_out;
+
+ dsp_task_init(task);
+ pr_info("omapdsp: taskdev %s enabled.\n", dev->name);
+ set_taskdev_state(dev, TASKDEV_ST_ATTACHED);
+ wake_up_interruptible_all(&dev->state_wait_q);
+ return 0;
+
+free_out:
+ kfree(task);
+
+del_out:
+ printk(KERN_ERR "omapdsp: deleting the task...\n");
+
+ set_taskdev_state(dev, TASKDEV_ST_DELING);
+
+ if (mutex_lock_interruptible(&cfg_lock)) {
+ printk(KERN_ERR "omapdsp: aborting tdel process. "
+ "DSP side could be corrupted.\n");
+ goto fail_out;
+ }
+ cfg_tid = TID_ANON;
+ cfg_cmd = MBOX_CMD_DSP_TDEL;
+ mbcompose_send_and_wait(TDEL, tid, TDEL_KILL, &cfg_wait_q);
+ tid_response = cfg_tid;
+ cfg_tid = TID_ANON;
+ cfg_cmd = 0;
+ mutex_unlock(&cfg_lock);
+
+ if (tid_response != tid)
+ printk(KERN_ERR "omapdsp: tdel failed. "
+ "DSP side could be corrupted.\n");
+
+fail_out:
+ set_taskdev_state(dev, TASKDEV_ST_ADDFAIL);
+ wake_up_interruptible_all(&dev->state_wait_q);
+ return ret;
+}
+
+int dsp_tadd_minor(unsigned char minor, dsp_long_t adr)
+{
+ struct taskdev *dev;
+ int status;
+ int ret;
+
+ if (mutex_lock_interruptible(&devmgr_lock))
+ return -EINTR;
+
+ if ((minor >= TASKDEV_MAX) || ((dev = taskdev[minor]) == NULL)) {
+ printk(KERN_ERR
+ "omapdsp: no task device with minor %d\n", minor);
+ ret = -EINVAL;
+ goto out;
+ }
+ ret = minor;
+ if ((status = dsp_tadd(dev, adr)) < 0)
+ ret = status;
+
+out:
+ mutex_unlock(&devmgr_lock);
+ return ret;
+}
+
+static int dsp_tdel(struct taskdev *dev)
+{
+ if (!devstate_write_lock_and_test(dev, TASKDEV_ST_DELREQ)) {
+ printk(KERN_ERR
+ "omapdsp: taskdev %s is not requesting for tdel. "
+ "(state is %s)\n", dev->name, devstate_name(dev->state));
+ return -EINVAL;
+ }
+ set_taskdev_state(dev, TASKDEV_ST_DELING);
+ devstate_write_unlock(dev);
+
+ return dsp_tdel_bh(dev, TDEL_SAFE);
+}
+
+int dsp_tdel_minor(unsigned char minor)
+{
+ struct taskdev *dev;
+ int status;
+ int ret;
+
+ if (mutex_lock_interruptible(&devmgr_lock))
+ return -EINTR;
+
+ if ((minor >= TASKDEV_MAX) || ((dev = taskdev[minor]) == NULL)) {
+ printk(KERN_ERR
+ "omapdsp: no task device with minor %d\n", minor);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = minor;
+ if ((status = dsp_tdel(dev)) < 0)
+ ret = status;
+
+out:
+ mutex_unlock(&devmgr_lock);
+ return ret;
+}
+
+static int dsp_tkill(struct taskdev *dev)
+{
+ while (!down_write_trylock(&dev->state_sem)) {
+ if (!devstate_read_lock_and_test(dev, (TASKDEV_ST_ATTACHED |
+ TASKDEV_ST_FREEZED))) {
+ printk(KERN_ERR
+ "omapdsp: task has not been attached for "
+ "taskdev %s\n", dev->name);
+ return -EINVAL;
+ }
+ /* ATTACHED -> FREEZED can be changed under read semaphore. */
+ set_taskdev_state(dev, TASKDEV_ST_FREEZED);
+ wake_up_interruptible_all(&dev->read_wait_q);
+ wake_up_interruptible_all(&dev->write_wait_q);
+ wake_up_interruptible_all(&dev->tctl_wait_q);
+ devstate_read_unlock(dev);
+ schedule();
+ }
+
+ if (!(dev->state & (TASKDEV_ST_ATTACHED |
+ TASKDEV_ST_FREEZED))) {
+ printk(KERN_ERR
+ "omapdsp: task has not been attached for taskdev %s\n",
+ dev->name);
+ devstate_write_unlock(dev);
+ return -EINVAL;
+ }
+ if (!is_dynamic_task(dev->task->tid)) {
+ printk(KERN_ERR "omapdsp: task %s is not a dynamic task.\n",
+ dev->name);
+ devstate_write_unlock(dev);
+ return -EINVAL;
+ }
+ set_taskdev_state(dev, TASKDEV_ST_KILLING);
+ devstate_write_unlock(dev);
+
+ return dsp_tdel_bh(dev, TDEL_KILL);
+}
+
+int dsp_tkill_minor(unsigned char minor)
+{
+ struct taskdev *dev;
+ int status;
+ int ret;
+
+ if (mutex_lock_interruptible(&devmgr_lock))
+ return -EINTR;
+
+ if ((minor >= TASKDEV_MAX) || ((dev = taskdev[minor]) == NULL)) {
+ printk(KERN_ERR
+ "omapdsp: no task device with minor %d\n", minor);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = minor;
+ if ((status = dsp_tkill(dev)) < 0)
+ ret = status;
+
+out:
+ mutex_unlock(&devmgr_lock);
+ return ret;
+}
+
+static int dsp_tdel_bh(struct taskdev *dev, u16 type)
+{
+ struct dsptask *task;
+ u8 tid, tid_response;
+ int ret = 0;
+
+ task = dev->task;
+ tid = task->tid;
+ if (mutex_lock_interruptible(&cfg_lock)) {
+ if (type == TDEL_SAFE) {
+ set_taskdev_state(dev, TASKDEV_ST_DELREQ);
+ return -EINTR;
+ } else {
+ tid_response = TID_ANON;
+ ret = -EINTR;
+ goto detach_out;
+ }
+ }
+ cfg_tid = TID_ANON;
+ cfg_cmd = MBOX_CMD_DSP_TDEL;
+ mbcompose_send_and_wait(TDEL, tid, type, &cfg_wait_q);
+ tid_response = cfg_tid;
+ cfg_tid = TID_ANON;
+ cfg_cmd = 0;
+ mutex_unlock(&cfg_lock);
+
+detach_out:
+ taskdev_detach_task(dev);
+ dsp_task_unconfig(task);
+ kfree(task);
+
+ if (tid_response != tid) {
+ printk(KERN_ERR "omapdsp: %s failed!\n",
+ (type == TDEL_SAFE) ? "tdel" : "tkill");
+ ret = -EINVAL;
+ }
+ down_write(&dev->state_sem);
+ set_taskdev_state(dev, (dev->usecount > 0) ? TASKDEV_ST_GARBAGE :
+ TASKDEV_ST_NOTASK);
+ wake_up_interruptible_all(&dev->state_wait_q);
+ up_write(&dev->state_sem);
+
+ return ret;
+}
+
+/*
+ * state inquiry
+ */
+long taskdev_state_stale(unsigned char minor)
+{
+ if (taskdev[minor]) {
+ long state = taskdev[minor]->state;
+ taskdev[minor]->state |= TASKDEV_ST_STALE;
+ return state;
+ } else
+ return TASKDEV_ST_NOTASK;
+}
+
+/*
+ * functions called from mailbox interrupt routine
+ */
+void mbox_wdsnd(struct mbcmd *mb)
+{
+ unsigned int n;
+ u8 tid = mb->cmd_l;
+ u16 data = mb->data;
+ struct dsptask *task = dsptask[tid];
+
+ if ((tid >= TASKDEV_MAX) || (task == NULL)) {
+ printk(KERN_ERR "mbox: WDSND with illegal tid! %d\n", tid);
+ return;
+ }
+ if (sndtyp_bk(task->ttyp)) {
+ printk(KERN_ERR
+ "mbox: WDSND from block sending task! (task%d)\n", tid);
+ return;
+ }
+ if (sndtyp_psv(task->ttyp) &&
+ !waitqueue_active(&task->dev->read_wait_q)) {
+ printk(KERN_WARNING
+ "mbox: WDSND from passive sending task (task%d) "
+ "without request!\n", tid);
+ return;
+ }
+
+ n = kfifo_put(task->dev->rcvdt.fifo, (unsigned char *)&data,
+ sizeof(data));
+ if (n != sizeof(data))
+ printk(KERN_WARNING "Receive FIFO(%d) is full\n", tid);
+
+ wake_up_interruptible(&task->dev->read_wait_q);
+}
+
+void mbox_wdreq(struct mbcmd *mb)
+{
+ u8 tid = mb->cmd_l;
+ struct dsptask *task = dsptask[tid];
+ struct taskdev *dev;
+
+ if ((tid >= TASKDEV_MAX) || (task == NULL)) {
+ printk(KERN_ERR "mbox: WDREQ with illegal tid! %d\n", tid);
+ return;
+ }
+ if (rcvtyp_psv(task->ttyp)) {
+ printk(KERN_ERR
+ "mbox: WDREQ from passive receiving task! (task%d)\n",
+ tid);
+ return;
+ }
+
+ dev = task->dev;
+ spin_lock(&dev->wsz_lock);
+ dev->wsz = 2;
+ spin_unlock(&dev->wsz_lock);
+ wake_up_interruptible(&dev->write_wait_q);
+}
+
+void mbox_bksnd(struct mbcmd *mb)
+{
+ u8 tid = mb->cmd_l;
+ u16 bid = mb->data;
+ struct dsptask *task = dsptask[tid];
+ struct ipbuf_head *ipb_h;
+ u16 cnt;
+
+ if (bid >= ipbcfg.ln) {
+ printk(KERN_ERR "mbox: BKSND with illegal bid! %d\n", bid);
+ return;
+ }
+ ipb_h = bid_to_ipbuf(bid);
+ ipb_bsycnt_dec(&ipbcfg);
+ if ((tid >= TASKDEV_MAX) || (task == NULL)) {
+ printk(KERN_ERR "mbox: BKSND with illegal tid! %d\n", tid);
+ goto unuse_ipbuf_out;
+ }
+ if (sndtyp_wd(task->ttyp)) {
+ printk(KERN_ERR
+ "mbox: BKSND from word sending task! (task%d)\n", tid);
+ goto unuse_ipbuf_out;
+ }
+ if (sndtyp_pvt(task->ttyp)) {
+ printk(KERN_ERR
+ "mbox: BKSND from private sending task! (task%d)\n", tid);
+ goto unuse_ipbuf_out;
+ }
+ if (sync_with_dsp(&ipb_h->p->sd, tid, 10) < 0) {
+ printk(KERN_ERR "mbox: BKSND - IPBUF sync failed!\n");
+ return;
+ }
+
+ /* should be done in DSP, but just in case. */
+ ipb_h->p->next = BID_NULL;
+
+ cnt = ipb_h->p->c;
+ if (cnt > ipbcfg.lsz) {
+ printk(KERN_ERR "mbox: BKSND cnt(%d) > ipbuf line size(%d)!\n",
+ cnt, ipbcfg.lsz);
+ goto unuse_ipbuf_out;
+ }
+
+ if (cnt == 0) {
+ /* 0-byte send from DSP */
+ unuse_ipbuf_nowait(ipb_h);
+ goto done;
+ }
+ ipblink_add_tail(&task->dev->rcvdt.bk.link, bid);
+ /* we keep coming bid and return alternative line to DSP. */
+ balance_ipbuf();
+
+done:
+ wake_up_interruptible(&task->dev->read_wait_q);
+ return;
+
+unuse_ipbuf_out:
+ unuse_ipbuf_nowait(ipb_h);
+ return;
+}
+
+void mbox_bkreq(struct mbcmd *mb)
+{
+ u8 tid = mb->cmd_l;
+ u16 cnt = mb->data;
+ struct dsptask *task = dsptask[tid];
+ struct taskdev *dev;
+
+ if ((tid >= TASKDEV_MAX) || (task == NULL)) {
+ printk(KERN_ERR "mbox: BKREQ with illegal tid! %d\n", tid);
+ return;
+ }
+ if (rcvtyp_wd(task->ttyp)) {
+ printk(KERN_ERR
+ "mbox: BKREQ from word receiving task! (task%d)\n", tid);
+ return;
+ }
+ if (rcvtyp_pvt(task->ttyp)) {
+ printk(KERN_ERR
+ "mbox: BKREQ from private receiving task! (task%d)\n",
+ tid);
+ return;
+ }
+ if (rcvtyp_psv(task->ttyp)) {
+ printk(KERN_ERR
+ "mbox: BKREQ from passive receiving task! (task%d)\n",
+ tid);
+ return;
+ }
+
+ dev = task->dev;
+ spin_lock(&dev->wsz_lock);
+ dev->wsz = cnt*2;
+ spin_unlock(&dev->wsz_lock);
+ wake_up_interruptible(&dev->write_wait_q);
+}
+
+void mbox_bkyld(struct mbcmd *mb)
+{
+ u16 bid = mb->data;
+ struct ipbuf_head *ipb_h;
+
+ if (bid >= ipbcfg.ln) {
+ printk(KERN_ERR "mbox: BKYLD with illegal bid! %d\n", bid);
+ return;
+ }
+ ipb_h = bid_to_ipbuf(bid);
+
+ /* should be done in DSP, but just in case. */
+ ipb_h->p->next = BID_NULL;
+
+ /* we don't need to sync with DSP */
+ ipb_bsycnt_dec(&ipbcfg);
+ release_ipbuf(ipb_h);
+}
+
+void mbox_bksndp(struct mbcmd *mb)
+{
+ u8 tid = mb->cmd_l;
+ struct dsptask *task = dsptask[tid];
+ struct ipbuf_p *ipbp;
+
+ if ((tid >= TASKDEV_MAX) || (task == NULL)) {
+ printk(KERN_ERR "mbox: BKSNDP with illegal tid! %d\n", tid);
+ return;
+ }
+ if (sndtyp_wd(task->ttyp)) {
+ printk(KERN_ERR
+ "mbox: BKSNDP from word sending task! (task%d)\n", tid);
+ return;
+ }
+ if (sndtyp_gbl(task->ttyp)) {
+ printk(KERN_ERR
+ "mbox: BKSNDP from non-private sending task! (task%d)\n",
+ tid);
+ return;
+ }
+
+ /*
+ * we should not have delayed block at this point
+ * because read() routine releases the lock of the buffer and
+ * until then DSP can't send next data.
+ */
+
+ ipbp = task->ipbuf_pvt_r;
+ if (sync_with_dsp(&ipbp->s, tid, 10) < 0) {
+ printk(KERN_ERR "mbox: BKSNDP - IPBUF sync failed!\n");
+ return;
+ }
+ pr_debug("mbox: ipbuf_pvt_r->a = 0x%08lx\n",
+ MKLONG(ipbp->ah, ipbp->al));
+ ipblink_add_pvt(&task->dev->rcvdt.bk.link);
+ wake_up_interruptible(&task->dev->read_wait_q);
+}
+
+void mbox_bkreqp(struct mbcmd *mb)
+{
+ u8 tid = mb->cmd_l;
+ struct dsptask *task = dsptask[tid];
+ struct taskdev *dev;
+ struct ipbuf_p *ipbp;
+
+ if ((tid >= TASKDEV_MAX) || (task == NULL)) {
+ printk(KERN_ERR "mbox: BKREQP with illegal tid! %d\n", tid);
+ return;
+ }
+ if (rcvtyp_wd(task->ttyp)) {
+ printk(KERN_ERR
+ "mbox: BKREQP from word receiving task! (task%d)\n", tid);
+ return;
+ }
+ if (rcvtyp_gbl(task->ttyp)) {
+ printk(KERN_ERR
+ "mbox: BKREQP from non-private receiving task! (task%d)\n", tid);
+ return;
+ }
+ if (rcvtyp_psv(task->ttyp)) {
+ printk(KERN_ERR
+ "mbox: BKREQP from passive receiving task! (task%d)\n", tid);
+ return;
+ }
+
+ ipbp = task->ipbuf_pvt_w;
+ if (sync_with_dsp(&ipbp->s, TID_FREE, 10) < 0) {
+ printk(KERN_ERR "mbox: BKREQP - IPBUF sync failed!\n");
+ return;
+ }
+ pr_debug("mbox: ipbuf_pvt_w->a = 0x%08lx\n",
+ MKLONG(ipbp->ah, ipbp->al));
+ dev = task->dev;
+ spin_lock(&dev->wsz_lock);
+ dev->wsz = ipbp->c*2;
+ spin_unlock(&dev->wsz_lock);
+ wake_up_interruptible(&dev->write_wait_q);
+}
+
+void mbox_tctl(struct mbcmd *mb)
+{
+ u8 tid = mb->cmd_l;
+ struct dsptask *task = dsptask[tid];
+
+ if ((tid >= TASKDEV_MAX) || (task == NULL)) {
+ printk(KERN_ERR "mbox: TCTL with illegal tid! %d\n", tid);
+ return;
+ }
+
+ if (!waitqueue_active(&task->dev->tctl_wait_q)) {
+ printk(KERN_WARNING "mbox: unexpected TCTL from DSP!\n");
+ return;
+ }
+
+ task->dev->tctl_stat = mb->data;
+ wake_up_interruptible(&task->dev->tctl_wait_q);
+}
+
+void mbox_tcfg(struct mbcmd *mb)
+{
+ u8 tid = mb->cmd_l;
+ struct dsptask *task = dsptask[tid];
+ u16 *tnm;
+ volatile u16 *buf;
+ int i;
+
+ if ((tid >= TASKDEV_MAX) || (task == NULL)) {
+ printk(KERN_ERR "mbox: TCFG with illegal tid! %d\n", tid);
+ return;
+ }
+ if ((task->state != TASK_ST_CFGREQ) || (cfg_cmd != MBOX_CMD_DSP_TCFG)) {
+ printk(KERN_WARNING "mbox: unexpected TCFG from DSP!\n");
+ return;
+ }
+
+ if (dsp_mem_enable(ipbuf_sys_da) < 0) {
+ printk(KERN_ERR "mbox: TCFG - ipbuf_sys_da read failed!\n");
+ dsp_mem_disable(ipbuf_sys_da);
+ goto out;
+ }
+ if (sync_with_dsp(&ipbuf_sys_da->s, tid, 10) < 0) {
+ printk(KERN_ERR "mbox: TCFG - IPBUF sync failed!\n");
+ dsp_mem_disable(ipbuf_sys_da);
+ goto out;
+ }
+
+ /*
+ * read configuration data on system IPBUF
+ */
+ buf = ipbuf_sys_da->d;
+ task->ttyp = buf[0];
+ task->ipbuf_pvt_r = MKVIRT(buf[1], buf[2]);
+ task->ipbuf_pvt_w = MKVIRT(buf[3], buf[4]);
+ task->map_base = MKVIRT(buf[5], buf[6]);
+ task->map_length = MKLONG(buf[7], buf[8]) << 1; /* word -> byte */
+ tnm = MKVIRT(buf[9], buf[10]);
+ release_ipbuf_pvt(ipbuf_sys_da);
+ dsp_mem_disable(ipbuf_sys_da);
+
+ /*
+ * copy task name string
+ */
+ if (dsp_address_validate(tnm, TNM_LEN, "task name buffer") < 0) {
+ task->name[0] = '\0';
+ goto out;
+ }
+
+ for (i = 0; i < TNM_LEN-1; i++) {
+ /* avoiding byte access */
+ u16 tmp = tnm[i];
+ task->name[i] = tmp & 0x00ff;
+ if (!tmp)
+ break;
+ }
+ task->name[TNM_LEN-1] = '\0';
+
+ task->state = TASK_ST_READY;
+out:
+ wake_up_interruptible(&cfg_wait_q);
+}
+
+void mbox_tadd(struct mbcmd *mb)
+{
+ u8 tid = mb->cmd_l;
+
+ if ((!waitqueue_active(&cfg_wait_q)) || (cfg_cmd != MBOX_CMD_DSP_TADD)) {
+ printk(KERN_WARNING "mbox: unexpected TADD from DSP!\n");
+ return;
+ }
+ cfg_tid = tid;
+ wake_up_interruptible(&cfg_wait_q);
+}
+
+void mbox_tdel(struct mbcmd *mb)
+{
+ u8 tid = mb->cmd_l;
+
+ if ((!waitqueue_active(&cfg_wait_q)) || (cfg_cmd != MBOX_CMD_DSP_TDEL)) {
+ printk(KERN_WARNING "mbox: unexpected TDEL from DSP!\n");
+ return;
+ }
+ cfg_tid = tid;
+ wake_up_interruptible(&cfg_wait_q);
+}
+
+void mbox_err_fatal(u8 tid)
+{
+ struct dsptask *task = dsptask[tid];
+ struct taskdev *dev;
+
+ if ((tid >= TASKDEV_MAX) || (task == NULL)) {
+ printk(KERN_ERR "mbox: FATAL ERR with illegal tid! %d\n", tid);
+ return;
+ }
+
+ /* wake up waiting processes */
+ dev = task->dev;
+ wake_up_interruptible_all(&dev->read_wait_q);
+ wake_up_interruptible_all(&dev->write_wait_q);
+ wake_up_interruptible_all(&dev->tctl_wait_q);
+}
+
+static u16 *dbg_buf;
+static u16 dbg_buf_sz, dbg_line_sz;
+static int dbg_rp;
+
+int dsp_dbg_config(u16 *buf, u16 sz, u16 lsz)
+{
+#ifdef OLD_BINARY_SUPPORT
+ if ((mbox_revision == MBREV_3_0) || (mbox_revision == MBREV_3_2)) {
+ dbg_buf = NULL;
+ dbg_buf_sz = 0;
+ dbg_line_sz = 0;
+ dbg_rp = 0;
+ return 0;
+ }
+#endif
+
+ if (dsp_address_validate(buf, sz, "debug buffer") < 0)
+ return -1;
+
+ if (lsz > sz) {
+ printk(KERN_ERR
+ "omapdsp: dbg_buf lsz (%d) is greater than its "
+ "buffer size (%d)\n", lsz, sz);
+ return -1;
+ }
+
+ dbg_buf = buf;
+ dbg_buf_sz = sz;
+ dbg_line_sz = lsz;
+ dbg_rp = 0;
+
+ return 0;
+}
+
+void dsp_dbg_stop(void)
+{
+ dbg_buf = NULL;
+}
+
+#ifdef OLD_BINARY_SUPPORT
+static void mbox_dbg_old(struct mbcmd *mb);
+#endif
+
+void mbox_dbg(struct mbcmd *mb)
+{
+ u8 tid = mb->cmd_l;
+ int cnt = mb->data;
+ char s[80], *s_end = &s[79], *p;
+ u16 *src;
+ int i;
+
+#ifdef OLD_BINARY_SUPPORT
+ if ((mbox_revision == MBREV_3_0) || (mbox_revision == MBREV_3_2)) {
+ mbox_dbg_old(mb);
+ return;
+ }
+#endif
+
+ if (((tid >= TASKDEV_MAX) || (dsptask[tid] == NULL)) &&
+ (tid != TID_ANON)) {
+ printk(KERN_ERR "mbox: DBG with illegal tid! %d\n", tid);
+ return;
+ }
+ if (dbg_buf == NULL) {
+ printk(KERN_ERR "mbox: DBG command received, but "
+ "dbg_buf has not been configured yet.\n");
+ return;
+ }
+
+ if (dsp_mem_enable(dbg_buf) < 0)
+ return;
+
+ src = &dbg_buf[dbg_rp];
+ p = s;
+ for (i = 0; i < cnt; i++) {
+ u16 tmp;
+ /*
+ * Be carefull that dbg_buf should not be read with
+ * 1-byte access since it might be placed in DARAM/SARAM
+ * and it can cause unexpected byteswap.
+ * For example,
+ * *(p++) = *(src++) & 0xff;
+ * causes 1-byte access!
+ */
+ tmp = *src++;
+ *(p++) = tmp & 0xff;
+ if (*(p-1) == '\n') {
+ *p = '\0';
+ pr_info("%s", s);
+ p = s;
+ continue;
+ }
+ if (p == s_end) {
+ *p = '\0';
+ pr_info("%s\n", s);
+ p = s;
+ continue;
+ }
+ }
+ if (p > s) {
+ *p = '\0';
+ pr_info("%s\n", s);
+ }
+ if ((dbg_rp += cnt + 1) > dbg_buf_sz - dbg_line_sz)
+ dbg_rp = 0;
+
+ dsp_mem_disable(dbg_buf);
+}
+
+#ifdef OLD_BINARY_SUPPORT
+static void mbox_dbg_old(struct mbcmd *mb)
+{
+ u8 tid = mb->cmd_l;
+ char s[80], *s_end = &s[79], *p;
+ u16 *src;
+ volatile u16 *buf;
+ int cnt;
+ int i;
+
+ if (((tid >= TASKDEV_MAX) || (dsptask[tid] == NULL)) &&
+ (tid != TID_ANON)) {
+ printk(KERN_ERR "mbox: DBG with illegal tid! %d\n", tid);
+ return;
+ }
+ if (dsp_mem_enable(ipbuf_sys_da) < 0) {
+ printk(KERN_ERR "mbox: DBG - ipbuf_sys_da read failed!\n");
+ return;
+ }
+ if (sync_with_dsp(&ipbuf_sys_da->s, tid, 10) < 0) {
+ printk(KERN_ERR "mbox: DBG - IPBUF sync failed!\n");
+ goto out1;
+ }
+ buf = ipbuf_sys_da->d;
+ cnt = buf[0];
+ src = MKVIRT(buf[1], buf[2]);
+ if (dsp_address_validate(src, cnt, "dbg buffer") < 0)
+ goto out2;
+
+ if (dsp_mem_enable(src) < 0)
+ goto out2;
+
+ p = s;
+ for (i = 0; i < cnt; i++) {
+ u16 tmp;
+ /*
+ * Be carefull that ipbuf should not be read with
+ * 1-byte access since it might be placed in DARAM/SARAM
+ * and it can cause unexpected byteswap.
+ * For example,
+ * *(p++) = *(src++) & 0xff;
+ * causes 1-byte access!
+ */
+ tmp = *src++;
+ *(p++) = tmp & 0xff;
+ if (*(p-1) == '\n') {
+ *p = '\0';
+ pr_info("%s", s);
+ p = s;
+ continue;
+ }
+ if (p == s_end) {
+ *p = '\0';
+ pr_info("%s\n", s);
+ p = s;
+ continue;
+ }
+ }
+ if (p > s) {
+ *p = '\0';
+ pr_info("%s\n", s);
+ }
+
+ dsp_mem_disable(src);
+out2:
+ release_ipbuf_pvt(ipbuf_sys_da);
+out1:
+ dsp_mem_disable(ipbuf_sys_da);
+}
+#endif /* OLD_BINARY_SUPPORT */
+
+/*
+ * sysfs files: for each device
+ */
+
+/* devname */
+static ssize_t devname_show(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%s\n", to_taskdev(d)->name);
+}
+
+/* devstate */
+static ssize_t devstate_show(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%s\n", devstate_name(to_taskdev(d)->state));
+}
+
+/* proc_list */
+static ssize_t proc_list_show(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct taskdev *dev;
+ struct proc_list *pl;
+ int len = 0;
+
+ dev = to_taskdev(d);
+ spin_lock(&dev->proc_list_lock);
+ list_for_each_entry(pl, &dev->proc_list, list_head) {
+ /* need to lock tasklist_lock before calling
+ * find_task_by_pid_type. */
++ if (find_task_by_pid_type_ns(PIDTYPE_PID, pl->pid, &init_pid_ns) != NULL)
+ len += sprintf(buf + len, "%d\n", pl->pid);
+ read_unlock(&tasklist_lock);
+ }
+ spin_unlock(&dev->proc_list_lock);
+
+ return len;
+}
+
+/* taskname */
+static ssize_t taskname_show(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct taskdev *dev = to_taskdev(d);
+ int len;
+
+ if (!devstate_read_lock_and_test(dev, TASKDEV_ST_ATTACHED))
+ return -ENODEV;
+
+ len = sprintf(buf, "%s\n", dev->task->name);
+
+ devstate_read_unlock(dev);
+ return len;
+}
+
+/* ttyp */
+static ssize_t ttyp_show(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct taskdev *dev = to_taskdev(d);
+ u16 ttyp;
+ int len = 0;
+
+ if (!devstate_read_lock_and_test(dev, TASKDEV_ST_ATTACHED))
+ return -ENODEV;
+
+ ttyp = dev->task->ttyp;
+ len += sprintf(buf + len, "0x%04x\n", ttyp);
+ len += sprintf(buf + len, "%s %s send\n",
+ (sndtyp_acv(ttyp)) ? "active" :
+ "passive",
+ (sndtyp_wd(ttyp)) ? "word" :
+ (sndtyp_pvt(ttyp)) ? "private block" :
+ "global block");
+ len += sprintf(buf + len, "%s %s receive\n",
+ (rcvtyp_acv(ttyp)) ? "active" :
+ "passive",
+ (rcvtyp_wd(ttyp)) ? "word" :
+ (rcvtyp_pvt(ttyp)) ? "private block" :
+ "global block");
+
+ devstate_read_unlock(dev);
+ return len;
+}
+
+/* fifosz */
+static ssize_t fifosz_show(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct kfifo *fifo = to_taskdev(d)->rcvdt.fifo;
+ return sprintf(buf, "%d\n", fifo->size);
+}
+
+static int fifosz_store(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct taskdev *dev = to_taskdev(d);
+ unsigned long fifosz;
+ int ret;
+
+ fifosz = simple_strtol(buf, NULL, 10);
+ if (taskdev_lock_and_statelock_attached(dev, &dev->read_mutex))
+ return -ENODEV;
+ ret = taskdev_set_fifosz(dev, fifosz);
+ taskdev_unlock_and_stateunlock(dev, &dev->read_mutex);
+
+ return (ret < 0) ? ret : strlen(buf);
+}
+
+/* fifocnt */
+static ssize_t fifocnt_show(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct kfifo *fifo = to_taskdev(d)->rcvdt.fifo;
+ return sprintf(buf, "%d\n", fifo->size);
+}
+
+/* ipblink */
+static inline char *bid_name(u16 bid)
+{
+ static char s[6];
+
+ switch (bid) {
+ case BID_NULL:
+ return "NULL";
+ case BID_PVT:
+ return "PRIVATE";
+ default:
+ sprintf(s, "%d", bid);
+ return s;
+ }
+}
+
+static ssize_t ipblink_show(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct rcvdt_bk_struct *rcvdt = &to_taskdev(d)->rcvdt.bk;
+ int len;
+
+ spin_lock(&rcvdt->link.lock);
+ len = sprintf(buf, "top %s\ntail %s\n",
+ bid_name(rcvdt->link.top), bid_name(rcvdt->link.tail));
+ spin_unlock(&rcvdt->link.lock);
+
+ return len;
+}
+
+/* wsz */
+static ssize_t wsz_show(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", to_taskdev(d)->wsz);
+}
+
+/* mmap */
+static ssize_t mmap_show(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct dsptask *task = to_taskdev(d)->task;
+ return sprintf(buf, "0x%p 0x%x\n", task->map_base, task->map_length);
+}
+
+/*
+ * called from ipbuf_show()
+ */
+int ipbuf_is_held(u8 tid, u16 bid)
+{
+ struct dsptask *task = dsptask[tid];
+ struct ipblink *link;
+ u16 b;
+ int ret = 0;
+
+ if (task == NULL)
+ return 0;
+
+ link = &task->dev->rcvdt.bk.link;
+ spin_lock(&link->lock);
+ ipblink_for_each(b, link) {
+ if (b == bid) { /* found */
+ ret = 1;
+ break;
+ }
+ }
+ spin_unlock(&link->lock);
+
+ return ret;
+}
+
+int __init dsp_taskmod_init(void)
+{
+ int retval;
+
+ memset(taskdev, 0, sizeof(void *) * TASKDEV_MAX);
+ memset(dsptask, 0, sizeof(void *) * TASKDEV_MAX);
+
+ retval = register_chrdev(OMAP_DSP_TASK_MAJOR, "dsptask",
+ &dsp_task_fops);
+ if (retval < 0) {
+ printk(KERN_ERR
+ "omapdsp: failed to register task device: %d\n", retval);
+ return retval;
+ }
+
+ retval = bus_register(&dsptask_bus);
+ if (retval) {
+ printk(KERN_ERR
+ "omapdsp: failed to register DSP task bus: %d\n",
+ retval);
+ unregister_chrdev(OMAP_DSP_TASK_MAJOR, "dsptask");
+ return -EINVAL;
+ }
+ retval = driver_register(&dsptask_driver);
+ if (retval) {
+ printk(KERN_ERR
+ "omapdsp: failed to register DSP task driver: %d\n",
+ retval);
+ bus_unregister(&dsptask_bus);
+ unregister_chrdev(OMAP_DSP_TASK_MAJOR, "dsptask");
+ return -EINVAL;
+ }
+ dsp_task_class = class_create(THIS_MODULE, "dsptask");
+ if (IS_ERR(dsp_task_class)) {
+ printk(KERN_ERR "omapdsp: failed to create DSP task class\n");
+ driver_unregister(&dsptask_driver);
+ bus_unregister(&dsptask_bus);
+ unregister_chrdev(OMAP_DSP_TASK_MAJOR, "dsptask");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void dsp_taskmod_exit(void)
+{
+ class_destroy(dsp_task_class);
+ driver_unregister(&dsptask_driver);
+ bus_unregister(&dsptask_bus);
+ unregister_chrdev(OMAP_DSP_TASK_MAJOR, "dsptask");
+}
default y if MACH_OMAP_H3 || MACH_OMAP_OSK
help
If you say yes to this option, support will be included for the
- I2C interface on the Texas Instruments OMAP1/2 family of processors.
- Like OMAP1510/1610/1710/5912 and OMAP242x.
+ I2C interface on the Texas Instruments OMAP1/2/3 family of
+ processors.
+ Like OMAP1510/1610/1710/5912, OMAP242x, OMAP34x and OMAP35x.
For details see http://www.ti.com/omap.
-
- config I2C_PARPORT
- tristate "Parallel port adapter"
- depends on PARPORT
- select I2C_ALGOBIT
- help
- This supports parallel port I2C adapters such as the ones made by
- Philips or Velleman, Analog Devices evaluation boards, and more.
- Basically any adapter using the parallel port as an I2C bus with
- no extra chipset is supported by this driver, or could be.
-
- This driver is a replacement for (and was inspired by) an older
- driver named i2c-philips-par. The new driver supports more devices,
- and makes it easier to add support for new devices.
-
- An adapter type parameter is now mandatory. Please read the file
- Documentation/i2c/busses/i2c-parport for details.
-
- Another driver exists, named i2c-parport-light, which doesn't depend
- on the parport driver. This is meant for embedded systems. Don't say
- Y here if you intend to say Y or M there.
-
- This support is also available as a module. If so, the module
- will be called i2c-parport.
-
- config I2C_PARPORT_LIGHT
- tristate "Parallel port adapter (light)"
- select I2C_ALGOBIT
- help
- This supports parallel port I2C adapters such as the ones made by
- Philips or Velleman, Analog Devices evaluation boards, and more.
- Basically any adapter using the parallel port as an I2C bus with
- no extra chipset is supported by this driver, or could be.
-
- This driver is a light version of i2c-parport. It doesn't depend
- on the parport driver, and uses direct I/O access instead. This
- might be preferred on embedded systems where wasting memory for
- the clean but heavy parport handling is not an option. The
- drawback is a reduced portability and the impossibility to
- daisy-chain other parallel port devices.
-
- Don't say Y here if you said Y or M to i2c-parport. Saying M to
- both is possible but both modules should not be loaded at the same
- time.
-
- This support is also available as a module. If so, the module
- will be called i2c-parport-light.
-
+config I2C2_OMAP_BEAGLE
+ bool "Enable I2C2 for OMAP3 BeagleBoard"
+ depends on ARCH_OMAP && MACH_OMAP3_BEAGLE
+ select OMAP_MUX
+ default n
+ help
+ Say Y here if you want to enable I2C bus 2 at OMAP3 based
+ BeagleBoard.
+ I2C2 at BeagleBoard is connected to expansion connector, i.e. unused
+ if nothing is connected to this connector. As internal OMAP3 pull up
+ resistors are not strong enough, enabled but unused I2C2 bus results
+ in error messages (e.g. I2C timeouts). Enable this only if you have
+ something connected to I2C2 at board's expansion connector and this
+ extension has additional pull up resistors for I2C2 bus.
+
config I2C_PASEMI
tristate "PA Semi SMBus interface"
depends on PPC_PASEMI && PCI
obj-$(CONFIG_I2C_IBM_IIC) += i2c-ibm_iic.o
obj-$(CONFIG_I2C_IOP3XX) += i2c-iop3xx.o
obj-$(CONFIG_I2C_IXP2000) += i2c-ixp2000.o
- obj-$(CONFIG_I2C_POWERMAC) += i2c-powermac.o
obj-$(CONFIG_I2C_MPC) += i2c-mpc.o
obj-$(CONFIG_I2C_MV64XXX) += i2c-mv64xxx.o
- obj-$(CONFIG_I2C_NFORCE2) += i2c-nforce2.o
obj-$(CONFIG_I2C_OCORES) += i2c-ocores.o
obj-$(CONFIG_I2C_OMAP) += i2c-omap.o
- obj-$(CONFIG_I2C_PARPORT) += i2c-parport.o
- obj-$(CONFIG_I2C_PARPORT_LIGHT) += i2c-parport-light.o
obj-$(CONFIG_I2C_PASEMI) += i2c-pasemi.o
- obj-$(CONFIG_I2C_PCA_ISA) += i2c-pca-isa.o
- obj-$(CONFIG_I2C_PCA_PLATFORM) += i2c-pca-platform.o
- obj-$(CONFIG_I2C_PIIX4) += i2c-piix4.o
- obj-$(CONFIG_I2C_PMCMSP) += i2c-pmcmsp.o
obj-$(CONFIG_I2C_PNX) += i2c-pnx.o
- obj-$(CONFIG_I2C_PROSAVAGE) += i2c-prosavage.o
obj-$(CONFIG_I2C_PXA) += i2c-pxa.o
obj-$(CONFIG_I2C_S3C2410) += i2c-s3c2410.o
- obj-$(CONFIG_I2C_SAVAGE4) += i2c-savage4.o
obj-$(CONFIG_I2C_SH7760) += i2c-sh7760.o
obj-$(CONFIG_I2C_SH_MOBILE) += i2c-sh_mobile.o
- obj-$(CONFIG_I2C_SIBYTE) += i2c-sibyte.o
obj-$(CONFIG_I2C_SIMTEC) += i2c-simtec.o
- obj-$(CONFIG_I2C_SIS5595) += i2c-sis5595.o
- obj-$(CONFIG_I2C_SIS630) += i2c-sis630.o
- obj-$(CONFIG_I2C_SIS96X) += i2c-sis96x.o
- obj-$(CONFIG_I2C_STUB) += i2c-stub.o
+ obj-$(CONFIG_I2C_VERSATILE) += i2c-versatile.o
+
+ # External I2C/SMBus adapter drivers
+ obj-$(CONFIG_I2C_PARPORT) += i2c-parport.o
+ obj-$(CONFIG_I2C_PARPORT_LIGHT) += i2c-parport-light.o
obj-$(CONFIG_I2C_TAOS_EVM) += i2c-taos-evm.o
obj-$(CONFIG_I2C_TINY_USB) += i2c-tiny-usb.o
- obj-$(CONFIG_I2C_VERSATILE) += i2c-versatile.o
- obj-$(CONFIG_I2C_ACORN) += i2c-acorn.o
- obj-$(CONFIG_I2C_VIA) += i2c-via.o
- obj-$(CONFIG_I2C_VIAPRO) += i2c-viapro.o
+
+ # Graphics adapter I2C/DDC channel drivers
obj-$(CONFIG_I2C_VOODOO3) += i2c-voodoo3.o
+
+ # Other I2C/SMBus bus drivers
+ obj-$(CONFIG_I2C_ACORN) += i2c-acorn.o
+ obj-$(CONFIG_I2C_ELEKTOR) += i2c-elektor.o
+ obj-$(CONFIG_I2C_PCA_ISA) += i2c-pca-isa.o
+ obj-$(CONFIG_I2C_PCA_PLATFORM) += i2c-pca-platform.o
+ obj-$(CONFIG_I2C_PMCMSP) += i2c-pmcmsp.o
+ obj-$(CONFIG_I2C_SIBYTE) += i2c-sibyte.o
+ obj-$(CONFIG_I2C_STUB) += i2c-stub.o
obj-$(CONFIG_SCx200_ACB) += scx200_acb.o
obj-$(CONFIG_SCx200_I2C) += scx200_i2c.o
+obj-$(CONFIG_I2C_OMAP) += i2c-omap.o
ifeq ($(CONFIG_I2C_DEBUG_BUS),y)
EXTRA_CFLAGS += -DDEBUG
--- /dev/null
- set_irq_type(irq_num, IRQT_FALLING);
+/*
+ * twl4030_core.c - driver for TWL4030 PM and audio CODEC device
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * Modifications to defer interrupt handling to a kernel thread:
+ * Copyright (C) 2006 MontaVista Software, Inc.
+ *
+ * Based on tlv320aic23.c:
+ * Copyright (c) by Kai Svahn <kai.svahn@nokia.com>
+ *
+ * Code cleanup and modifications to IRQ handler.
+ * by syed khasim <x0khasim@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel_stat.h>
+#include <linux/init.h>
+#include <linux/time.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+#include <linux/random.h>
+#include <linux/syscalls.h>
+#include <linux/kthread.h>
+
+#include <linux/i2c.h>
+#include <linux/i2c/twl4030.h>
+#include <linux/i2c/twl4030-gpio.h>
+#include <linux/i2c/twl4030-madc.h>
+#include <linux/i2c/twl4030-pwrirq.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/irq.h>
+
+#include <asm/mach/irq.h>
+
+#include <asm/arch/gpio.h>
+#include <asm/arch/mux.h>
+
+#define DRIVER_NAME "twl4030"
+
+/* Macro Definitions */
+#define TWL_CLIENT_STRING "TWL4030-ID"
+#define TWL_CLIENT_USED 1
+#define TWL_CLIENT_FREE 0
+
+/* IRQ Flags */
+#define FREE 0
+#define USED 1
+
+/* Primary Interrupt Handler on TWL4030 Registers */
+
+/* Register Definitions */
+
+#define REG_PIH_ISR_P1 (0x1)
+#define REG_PIH_ISR_P2 (0x2)
+#define REG_PIH_SIR (0x3)
+
+/* Triton Core internal information (BEGIN) */
+
+/* Last - for index max*/
+#define TWL4030_MODULE_LAST TWL4030_MODULE_SECURED_REG
+
+/* Slave address */
+#define TWL4030_NUM_SLAVES 0x04
+#define TWL4030_SLAVENUM_NUM0 0x00
+#define TWL4030_SLAVENUM_NUM1 0x01
+#define TWL4030_SLAVENUM_NUM2 0x02
+#define TWL4030_SLAVENUM_NUM3 0x03
+#define TWL4030_SLAVEID_ID0 0x48
+#define TWL4030_SLAVEID_ID1 0x49
+#define TWL4030_SLAVEID_ID2 0x4A
+#define TWL4030_SLAVEID_ID3 0x4B
+
+/* Base Address defns */
+/* USB ID */
+#define TWL4030_BASEADD_USB 0x0000
+/* AUD ID */
+#define TWL4030_BASEADD_AUDIO_VOICE 0x0000
+#define TWL4030_BASEADD_GPIO 0x0098
+
+#define TWL4030_BASEADD_INTBR 0x0085
+#define TWL4030_BASEADD_PIH 0x0080
+#define TWL4030_BASEADD_TEST 0x004C
+/* AUX ID */
+#define TWL4030_BASEADD_INTERRUPTS 0x00B9
+#define TWL4030_BASEADD_LED 0x00EE
+#define TWL4030_BASEADD_MADC 0x0000
+#define TWL4030_BASEADD_MAIN_CHARGE 0x0074
+#define TWL4030_BASEADD_PRECHARGE 0x00AA
+#define TWL4030_BASEADD_PWM0 0x00F8
+#define TWL4030_BASEADD_PWM1 0x00FB
+#define TWL4030_BASEADD_PWMA 0x00EF
+#define TWL4030_BASEADD_PWMB 0x00F1
+#define TWL4030_BASEADD_KEYPAD 0x00D2
+/* POWER ID */
+#define TWL4030_BASEADD_BACKUP 0x0014
+#define TWL4030_BASEADD_INT 0x002E
+#define TWL4030_BASEADD_PM_MASTER 0x0036
+#define TWL4030_BASEADD_PM_RECEIVER 0x005B
+#define TWL4030_BASEADD_RTC 0x001C
+#define TWL4030_BASEADD_SECURED_REG 0x0000
+
+/* TWL4030 BCI registers */
+#define TWL4030_INTERRUPTS_BCIIMR1A 0x2
+#define TWL4030_INTERRUPTS_BCIIMR2A 0x3
+#define TWL4030_INTERRUPTS_BCIIMR1B 0x6
+#define TWL4030_INTERRUPTS_BCIIMR2B 0x7
+#define TWL4030_INTERRUPTS_BCIISR1A 0x0
+#define TWL4030_INTERRUPTS_BCIISR2A 0x1
+#define TWL4030_INTERRUPTS_BCIISR1B 0x4
+#define TWL4030_INTERRUPTS_BCIISR2B 0x5
+
+/* TWL4030 keypad registers */
+#define TWL4030_KEYPAD_KEYP_IMR1 0x12
+#define TWL4030_KEYPAD_KEYP_IMR2 0x14
+#define TWL4030_KEYPAD_KEYP_ISR1 0x11
+#define TWL4030_KEYPAD_KEYP_ISR2 0x13
+
+
+/* Triton Core internal information (END) */
+
+/* Few power values */
+#define R_CFG_BOOT 0x05
+#define R_PROTECT_KEY 0x0E
+
+/* access control */
+#define KEY_UNLOCK1 0xce
+#define KEY_UNLOCK2 0xec
+#define KEY_LOCK 0x00
+
+#define HFCLK_FREQ_19p2_MHZ (1 << 0)
+#define HFCLK_FREQ_26_MHZ (2 << 0)
+#define HFCLK_FREQ_38p4_MHZ (3 << 0)
+#define HIGH_PERF_SQ (1 << 3)
+
+/* on I2C-1 for 2430SDP */
+#define CONFIG_I2C_TWL4030_ID 1
+
+/* SIH_CTRL registers that aren't defined elsewhere */
+#define TWL4030_INTERRUPTS_BCISIHCTRL 0x0d
+#define TWL4030_MADC_MADC_SIH_CTRL 0x67
+#define TWL4030_KEYPAD_KEYP_SIH_CTRL 0x17
+
+#define TWL4030_SIH_CTRL_COR_MASK (1 << 2)
+
+/**
+ * struct twl4030_mod_iregs - TWL module IMR/ISR regs to mask/clear at init
+ * @mod_no: TWL4030 module number (e.g., TWL4030_MODULE_GPIO)
+ * @sih_ctrl: address of module SIH_CTRL register
+ * @reg_cnt: number of IMR/ISR regs
+ * @imrs: pointer to array of TWL module interrupt mask register indices
+ * @isrs: pointer to array of TWL module interrupt status register indices
+ *
+ * Ties together TWL4030 modules and lists of IMR/ISR registers to mask/clear
+ * during twl_init_irq().
+ */
+struct twl4030_mod_iregs {
+ const u8 mod_no;
+ const u8 sih_ctrl;
+ const u8 reg_cnt;
+ const u8 *imrs;
+ const u8 *isrs;
+};
+
+/* TWL4030 INT module interrupt mask registers */
+static const u8 __initconst twl4030_int_imr_regs[] = {
+ TWL4030_INT_PWR_IMR1,
+ TWL4030_INT_PWR_IMR2,
+};
+
+/* TWL4030 INT module interrupt status registers */
+static const u8 __initconst twl4030_int_isr_regs[] = {
+ TWL4030_INT_PWR_ISR1,
+ TWL4030_INT_PWR_ISR2,
+};
+
+/* TWL4030 INTERRUPTS module interrupt mask registers */
+static const u8 __initconst twl4030_interrupts_imr_regs[] = {
+ TWL4030_INTERRUPTS_BCIIMR1A,
+ TWL4030_INTERRUPTS_BCIIMR1B,
+ TWL4030_INTERRUPTS_BCIIMR2A,
+ TWL4030_INTERRUPTS_BCIIMR2B,
+};
+
+/* TWL4030 INTERRUPTS module interrupt status registers */
+static const u8 __initconst twl4030_interrupts_isr_regs[] = {
+ TWL4030_INTERRUPTS_BCIISR1A,
+ TWL4030_INTERRUPTS_BCIISR1B,
+ TWL4030_INTERRUPTS_BCIISR2A,
+ TWL4030_INTERRUPTS_BCIISR2B,
+};
+
+/* TWL4030 MADC module interrupt mask registers */
+static const u8 __initconst twl4030_madc_imr_regs[] = {
+ TWL4030_MADC_IMR1,
+ TWL4030_MADC_IMR2,
+};
+
+/* TWL4030 MADC module interrupt status registers */
+static const u8 __initconst twl4030_madc_isr_regs[] = {
+ TWL4030_MADC_ISR1,
+ TWL4030_MADC_ISR2,
+};
+
+/* TWL4030 keypad module interrupt mask registers */
+static const u8 __initconst twl4030_keypad_imr_regs[] = {
+ TWL4030_KEYPAD_KEYP_IMR1,
+ TWL4030_KEYPAD_KEYP_IMR2,
+};
+
+/* TWL4030 keypad module interrupt status registers */
+static const u8 __initconst twl4030_keypad_isr_regs[] = {
+ TWL4030_KEYPAD_KEYP_ISR1,
+ TWL4030_KEYPAD_KEYP_ISR2,
+};
+
+/* TWL4030 GPIO module interrupt mask registers */
+static const u8 __initconst twl4030_gpio_imr_regs[] = {
+ REG_GPIO_IMR1A,
+ REG_GPIO_IMR1B,
+ REG_GPIO_IMR2A,
+ REG_GPIO_IMR2B,
+ REG_GPIO_IMR3A,
+ REG_GPIO_IMR3B,
+};
+
+/* TWL4030 GPIO module interrupt status registers */
+static const u8 __initconst twl4030_gpio_isr_regs[] = {
+ REG_GPIO_ISR1A,
+ REG_GPIO_ISR1B,
+ REG_GPIO_ISR2A,
+ REG_GPIO_ISR2B,
+ REG_GPIO_ISR3A,
+ REG_GPIO_ISR3B,
+};
+
+/* TWL4030 modules that have IMR/ISR registers that must be masked/cleared */
+static const struct twl4030_mod_iregs __initconst twl4030_mod_regs[] = {
+ {
+ .mod_no = TWL4030_MODULE_INT,
+ .sih_ctrl = TWL4030_INT_PWR_SIH_CTRL,
+ .reg_cnt = ARRAY_SIZE(twl4030_int_imr_regs),
+ .imrs = twl4030_int_imr_regs,
+ .isrs = twl4030_int_isr_regs,
+ },
+ {
+ .mod_no = TWL4030_MODULE_INTERRUPTS,
+ .sih_ctrl = TWL4030_INTERRUPTS_BCISIHCTRL,
+ .reg_cnt = ARRAY_SIZE(twl4030_interrupts_imr_regs),
+ .imrs = twl4030_interrupts_imr_regs,
+ .isrs = twl4030_interrupts_isr_regs,
+ },
+ {
+ .mod_no = TWL4030_MODULE_MADC,
+ .sih_ctrl = TWL4030_MADC_MADC_SIH_CTRL,
+ .reg_cnt = ARRAY_SIZE(twl4030_madc_imr_regs),
+ .imrs = twl4030_madc_imr_regs,
+ .isrs = twl4030_madc_isr_regs,
+ },
+ {
+ .mod_no = TWL4030_MODULE_KEYPAD,
+ .sih_ctrl = TWL4030_KEYPAD_KEYP_SIH_CTRL,
+ .reg_cnt = ARRAY_SIZE(twl4030_keypad_imr_regs),
+ .imrs = twl4030_keypad_imr_regs,
+ .isrs = twl4030_keypad_isr_regs,
+ },
+ {
+ .mod_no = TWL4030_MODULE_GPIO,
+ .sih_ctrl = REG_GPIO_SIH_CTRL,
+ .reg_cnt = ARRAY_SIZE(twl4030_gpio_imr_regs),
+ .imrs = twl4030_gpio_imr_regs,
+ .isrs = twl4030_gpio_isr_regs,
+ },
+};
+
+
+/* Helper functions */
+static int
+twl4030_detect_client(struct i2c_adapter *adapter, unsigned char sid);
+static int twl4030_attach_adapter(struct i2c_adapter *adapter);
+static int twl4030_detach_client(struct i2c_client *client);
+static void do_twl4030_irq(unsigned int irq, irq_desc_t *desc);
+
+static void twl_init_irq(void);
+
+/* Data Structures */
+/* To have info on T2 IRQ substem activated or not */
+static unsigned char twl_irq_used = FREE;
+static struct completion irq_event;
+
+/* Structure to define on TWL4030 Slave ID */
+struct twl4030_client {
+ struct i2c_client client;
+ const char client_name[sizeof(TWL_CLIENT_STRING) + 1];
+ const unsigned char address;
+ const char adapter_index;
+ unsigned char inuse;
+
+ /* max numb of i2c_msg required is for read =2 */
+ struct i2c_msg xfer_msg[2];
+
+ /* To lock access to xfer_msg */
+ struct mutex xfer_lock;
+};
+
+/* Module Mapping */
+struct twl4030mapping {
+ unsigned char sid; /* Slave ID */
+ unsigned char base; /* base address */
+};
+
+/* mapping the module id to slave id and base address */
+static struct twl4030mapping twl4030_map[TWL4030_MODULE_LAST + 1] = {
+ { TWL4030_SLAVENUM_NUM0, TWL4030_BASEADD_USB },
+ { TWL4030_SLAVENUM_NUM1, TWL4030_BASEADD_AUDIO_VOICE },
+ { TWL4030_SLAVENUM_NUM1, TWL4030_BASEADD_GPIO },
+ { TWL4030_SLAVENUM_NUM1, TWL4030_BASEADD_INTBR },
+ { TWL4030_SLAVENUM_NUM1, TWL4030_BASEADD_PIH },
+ { TWL4030_SLAVENUM_NUM1, TWL4030_BASEADD_TEST },
+ { TWL4030_SLAVENUM_NUM2, TWL4030_BASEADD_KEYPAD },
+ { TWL4030_SLAVENUM_NUM2, TWL4030_BASEADD_MADC },
+ { TWL4030_SLAVENUM_NUM2, TWL4030_BASEADD_INTERRUPTS },
+ { TWL4030_SLAVENUM_NUM2, TWL4030_BASEADD_LED },
+ { TWL4030_SLAVENUM_NUM2, TWL4030_BASEADD_MAIN_CHARGE },
+ { TWL4030_SLAVENUM_NUM2, TWL4030_BASEADD_PRECHARGE },
+ { TWL4030_SLAVENUM_NUM2, TWL4030_BASEADD_PWM0 },
+ { TWL4030_SLAVENUM_NUM2, TWL4030_BASEADD_PWM1 },
+ { TWL4030_SLAVENUM_NUM2, TWL4030_BASEADD_PWMA },
+ { TWL4030_SLAVENUM_NUM2, TWL4030_BASEADD_PWMB },
+ { TWL4030_SLAVENUM_NUM3, TWL4030_BASEADD_BACKUP },
+ { TWL4030_SLAVENUM_NUM3, TWL4030_BASEADD_INT },
+ { TWL4030_SLAVENUM_NUM3, TWL4030_BASEADD_PM_MASTER },
+ { TWL4030_SLAVENUM_NUM3, TWL4030_BASEADD_PM_RECEIVER },
+ { TWL4030_SLAVENUM_NUM3, TWL4030_BASEADD_RTC },
+ { TWL4030_SLAVENUM_NUM3, TWL4030_BASEADD_SECURED_REG },
+};
+
+static struct twl4030_client twl4030_modules[TWL4030_NUM_SLAVES] = {
+ {
+ .address = TWL4030_SLAVEID_ID0,
+ .client_name = TWL_CLIENT_STRING "0",
+ .adapter_index = CONFIG_I2C_TWL4030_ID,
+ },
+ {
+ .address = TWL4030_SLAVEID_ID1,
+ .client_name = TWL_CLIENT_STRING "1",
+ .adapter_index = CONFIG_I2C_TWL4030_ID,
+ },
+ {
+ .address = TWL4030_SLAVEID_ID2,
+ .client_name = TWL_CLIENT_STRING "2",
+ .adapter_index = CONFIG_I2C_TWL4030_ID,
+ },
+ {
+ .address = TWL4030_SLAVEID_ID3,
+ .client_name = TWL_CLIENT_STRING "3",
+ .adapter_index = CONFIG_I2C_TWL4030_ID,
+ },
+};
+
+/* One Client Driver , 4 Clients */
+static struct i2c_driver twl4030_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+ .attach_adapter = twl4030_attach_adapter,
+ .detach_client = twl4030_detach_client,
+};
+
+/*
+ * TWL4030 doesn't have PIH mask, hence dummy function for mask
+ * and unmask.
+ */
+
+static void twl4030_i2c_ackirq(unsigned int irq) {}
+static void twl4030_i2c_disableint(unsigned int irq) {}
+static void twl4030_i2c_enableint(unsigned int irq) {}
+
+/* information for processing in the Work Item */
+static struct irq_chip twl4030_irq_chip = {
+ .name = "twl4030",
+ .ack = twl4030_i2c_ackirq,
+ .mask = twl4030_i2c_disableint,
+ .unmask = twl4030_i2c_enableint,
+};
+
+/* Global Functions */
+
+/**
+ * twl4030_i2c_write - Writes a n bit register in TWL4030
+ * @mod_no: module number
+ * @value: an array of num_bytes+1 containing data to write
+ * @reg: register address (just offset will do)
+ * @num_bytes: number of bytes to transfer
+ *
+ * IMPORTANT: for 'value' parameter: Allocate value num_bytes+1 and
+ * valid data starts at Offset 1.
+ *
+ * Returns the result of operation - 0 is success
+ */
+int twl4030_i2c_write(u8 mod_no, u8 *value, u8 reg, u8 num_bytes)
+{
+ int ret;
+ int sid;
+ struct twl4030_client *twl;
+ struct i2c_msg *msg;
+
+ if (unlikely(mod_no > TWL4030_MODULE_LAST)) {
+ pr_err("Invalid module Number\n");
+ return -EPERM;
+ }
+ sid = twl4030_map[mod_no].sid;
+ twl = &twl4030_modules[sid];
+
+ if (unlikely(twl->inuse != TWL_CLIENT_USED)) {
+ pr_err("I2C Client[%d] is not initialized[%d]\n",
+ sid, __LINE__);
+ return -EPERM;
+ }
+ mutex_lock(&twl->xfer_lock);
+ /*
+ * [MSG1]: fill the register address data
+ * fill the data Tx buffer
+ */
+ msg = &twl->xfer_msg[0];
+ msg->addr = twl->address;
+ msg->len = num_bytes + 1;
+ msg->flags = 0;
+ msg->buf = value;
+ /* over write the first byte of buffer with the register address */
+ *value = twl4030_map[mod_no].base + reg;
+ ret = i2c_transfer(twl->client.adapter, twl->xfer_msg, 1);
+ mutex_unlock(&twl->xfer_lock);
+
+ /* i2cTransfer returns num messages.translate it pls.. */
+ if (ret >= 0)
+ ret = 0;
+ return ret;
+}
+EXPORT_SYMBOL(twl4030_i2c_write);
+
+/**
+ * twl4030_i2c_read - Reads a n bit register in TWL4030
+ * @mod_no: module number
+ * @value: an array of num_bytes containing data to be read
+ * @reg: register address (just offset will do)
+ * @num_bytes: number of bytes to transfer
+ *
+ * Returns result of operation - num_bytes is success else failure.
+ */
+int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, u8 num_bytes)
+{
+ int ret;
+ u8 val;
+ int sid;
+ struct twl4030_client *twl;
+ struct i2c_msg *msg;
+
+ if (unlikely(mod_no > TWL4030_MODULE_LAST)) {
+ pr_err("Invalid module Number\n");
+ return -EPERM;
+ }
+ sid = twl4030_map[mod_no].sid;
+ twl = &twl4030_modules[sid];
+
+ if (unlikely(twl->inuse != TWL_CLIENT_USED)) {
+ pr_err("I2C Client[%d] is not initialized[%d]\n", sid,
+ __LINE__);
+ return -EPERM;
+ }
+ mutex_lock(&twl->xfer_lock);
+ /* [MSG1] fill the register address data */
+ msg = &twl->xfer_msg[0];
+ msg->addr = twl->address;
+ msg->len = 1;
+ msg->flags = 0; /* Read the register value */
+ val = twl4030_map[mod_no].base + reg;
+ msg->buf = &val;
+ /* [MSG2] fill the data rx buffer */
+ msg = &twl->xfer_msg[1];
+ msg->addr = twl->address;
+ msg->flags = I2C_M_RD; /* Read the register value */
+ msg->len = num_bytes; /* only n bytes */
+ msg->buf = value;
+ ret = i2c_transfer(twl->client.adapter, twl->xfer_msg, 2);
+ mutex_unlock(&twl->xfer_lock);
+
+ /* i2cTransfer returns num messages.translate it pls.. */
+ if (ret >= 0)
+ ret = 0;
+ return ret;
+}
+EXPORT_SYMBOL(twl4030_i2c_read);
+
+/**
+ * twl4030_i2c_write_u8 - Writes a 8 bit register in TWL4030
+ * @mod_no: module number
+ * @value: the value to be written 8 bit
+ * @reg: register address (just offset will do)
+ *
+ * Returns result of operation - 0 is success
+ */
+int twl4030_i2c_write_u8(u8 mod_no, u8 value, u8 reg)
+{
+
+ /* 2 bytes offset 1 contains the data offset 0 is used by i2c_write */
+ u8 temp_buffer[2] = { 0 };
+ /* offset 1 contains the data */
+ temp_buffer[1] = value;
+ return twl4030_i2c_write(mod_no, temp_buffer, reg, 1);
+}
+EXPORT_SYMBOL(twl4030_i2c_write_u8);
+
+/**
+ * twl4030_i2c_read_u8 - Reads a 8 bit register from TWL4030
+ * @mod_no: module number
+ * @value: the value read 8 bit
+ * @reg: register address (just offset will do)
+ *
+ * Returns result of operation - 0 is success
+ */
+int twl4030_i2c_read_u8(u8 mod_no, u8 *value, u8 reg)
+{
+ return twl4030_i2c_read(mod_no, value, reg, 1);
+}
+EXPORT_SYMBOL(twl4030_i2c_read_u8);
+
+/* Helper Functions */
+
+/*
+ * do_twl4030_module_irq() is the desc->handle method for each of the twl4030
+ * module interrupts. It executes in kernel thread context.
+ * On entry, cpu interrupts are disabled.
+ */
+static void do_twl4030_module_irq(unsigned int irq, irq_desc_t *desc)
+{
+ struct irqaction *action;
+ const unsigned int cpu = smp_processor_id();
+
+ /*
+ * Earlier this was desc->triggered = 1;
+ */
+ desc->status |= IRQ_LEVEL;
+
+ /*
+ * The desc->handle method would normally call the desc->chip->ack
+ * method here, but we won't bother since our ack method is NULL.
+ */
+
+ if (!desc->depth) {
+ kstat_cpu(cpu).irqs[irq]++;
+
+ action = desc->action;
+ if (action) {
+ int ret;
+ int status = 0;
+ int retval = 0;
+
+ local_irq_enable();
+
+ do {
+ /* Call the ISR with cpu interrupts enabled */
+ ret = action->handler(irq, action->dev_id);
+ if (ret == IRQ_HANDLED)
+ status |= action->flags;
+ retval |= ret;
+ action = action->next;
+ } while (action);
+
+ if (status & IRQF_SAMPLE_RANDOM)
+ add_interrupt_randomness(irq);
+
+ local_irq_disable();
+
+ if (retval != IRQ_HANDLED)
+ printk(KERN_ERR "ISR for TWL4030 module"
+ " irq %d can't handle interrupt\n",
+ irq);
+
+ /*
+ * Here is where we should call the unmask method, but
+ * again we won't bother since it is NULL.
+ */
+ } else
+ printk(KERN_CRIT "TWL4030 module irq %d has no ISR"
+ " but can't be masked!\n", irq);
+ } else
+ printk(KERN_CRIT "TWL4030 module irq %d is disabled but can't"
+ " be masked!\n", irq);
+}
+
+/*
+ * twl4030_irq_thread() runs as a kernel thread. It queries the twl4030
+ * interrupt controller to see which modules are generating interrupt requests
+ * and then calls the desc->handle method for each module requesting service.
+ */
+static int twl4030_irq_thread(void *data)
+{
+ int irq = (int)data;
+ irq_desc_t *desc = irq_desc + irq;
+ static unsigned i2c_errors;
+ const static unsigned max_i2c_errors = 100;
+
+ daemonize("twl4030-irq");
+ current->flags |= PF_NOFREEZE;
+
+ while (!kthread_should_stop()) {
+ int ret;
+ int module_irq;
+ u8 pih_isr;
+
+ wait_for_completion_interruptible(&irq_event);
+
+ ret = twl4030_i2c_read_u8(TWL4030_MODULE_PIH, &pih_isr,
+ REG_PIH_ISR_P1);
+ if (ret) {
+ printk(KERN_WARNING "I2C error %d while reading TWL4030"
+ " PIH ISR register.\n", ret);
+ if (++i2c_errors >= max_i2c_errors) {
+ printk(KERN_ERR "Maximum I2C error count"
+ " exceeded. Terminating %s.\n",
+ __func__);
+ break;
+ }
+ continue;
+ }
+
+ for (module_irq = TWL4030_IRQ_BASE; 0 != pih_isr;
+ pih_isr >>= 1, module_irq++) {
+ if (pih_isr & 0x1) {
+ irq_desc_t *d = irq_desc + module_irq;
+
+ local_irq_disable();
+
+ d->handle_irq(module_irq, d);
+
+ local_irq_enable();
+ }
+ }
+
+ desc->chip->unmask(irq);
+ }
+
+ return 0;
+}
+
+/*
+ * do_twl4030_irq() is the desc->handle method for the twl4030 interrupt.
+ * This is a chained interrupt, so there is no desc->action method for it.
+ * Now we need to query the interrupt controller in the twl4030 to determine
+ * which module is generating the interrupt request. However, we can't do i2c
+ * transactions in interrupt context, so we must defer that work to a kernel
+ * thread. All we do here is acknowledge and mask the interrupt and wakeup
+ * the kernel thread.
+ */
+static void do_twl4030_irq(unsigned int irq, irq_desc_t *desc)
+{
+ const unsigned int cpu = smp_processor_id();
+
+ /*
+ * Earlier this was desc->triggered = 1;
+ */
+ desc->status |= IRQ_LEVEL;
+
+ /*
+ * Acknowledge, clear _AND_ disable the interrupt.
+ */
+ desc->chip->ack(irq);
+
+ if (!desc->depth) {
+ kstat_cpu(cpu).irqs[irq]++;
+
+ complete(&irq_event);
+ }
+}
+
+/* attach a client to the adapter */
+static int __init twl4030_detect_client(struct i2c_adapter *adapter,
+ unsigned char sid)
+{
+ int err = 0;
+ struct twl4030_client *twl;
+
+ if (unlikely(sid >= TWL4030_NUM_SLAVES)) {
+ pr_err("sid[%d] > MOD_LAST[%d]\n", sid, TWL4030_NUM_SLAVES);
+ return -EPERM;
+ }
+
+ /* Check basic functionality */
+ err = i2c_check_functionality(adapter,
+ I2C_FUNC_SMBUS_WORD_DATA
+ | I2C_FUNC_SMBUS_WRITE_BYTE);
+ if (!err) {
+ pr_err("SlaveID=%d functionality check failed\n", sid);
+ return err;
+ }
+ twl = &twl4030_modules[sid];
+ if (unlikely(twl->inuse)) {
+ pr_err("Client %s is already in use\n", twl->client_name);
+ return -EPERM;
+ }
+
+ memset(&twl->client, 0, sizeof(struct i2c_client));
+
+ twl->client.addr = twl->address;
+ twl->client.adapter = adapter;
+ twl->client.driver = &twl4030_driver;
+
+ memcpy(&twl->client.name, twl->client_name,
+ sizeof(TWL_CLIENT_STRING) + 1);
+
+ pr_info("TWL4030: TRY attach Slave %s on Adapter %s [%x]\n",
+ twl->client_name, adapter->name, err);
+
+ err = i2c_attach_client(&twl->client);
+ if (err) {
+ pr_err("Couldn't attach Slave %s on Adapter"
+ "%s [%x]\n", twl->client_name, adapter->name, err);
+ } else {
+ twl->inuse = TWL_CLIENT_USED;
+ mutex_init(&twl->xfer_lock);
+ }
+
+ return err;
+}
+
+/* adapter callback */
+static int __init twl4030_attach_adapter(struct i2c_adapter *adapter)
+{
+ int i;
+ int ret = 0;
+ static int twl_i2c_adapter = 1;
+
+ for (i = 0; i < TWL4030_NUM_SLAVES; i++) {
+ /* Check if I need to hook on to this adapter or not */
+ if (twl4030_modules[i].adapter_index == twl_i2c_adapter) {
+ ret = twl4030_detect_client(adapter, i);
+ if (ret)
+ goto free_client;
+ }
+ }
+ twl_i2c_adapter++;
+
+ /*
+ * Check if the PIH module is initialized, if yes, then init
+ * the T2 Interrupt subsystem
+ */
+ if ((twl4030_modules[twl4030_map[TWL4030_MODULE_PIH].sid].inuse ==
+ TWL_CLIENT_USED) && (twl_irq_used != USED)) {
+ twl_init_irq();
+ twl_irq_used = USED;
+ }
+ return 0;
+
+free_client:
+ pr_err("TWL_CLIENT(Idx=%d] registration failed[0x%x]\n", i, ret);
+
+ /* ignore current slave..it never got registered */
+ i--;
+ while (i >= 0) {
+ /* now remove all those from the current adapter... */
+ if (twl4030_modules[i].adapter_index == twl_i2c_adapter)
+ (void)twl4030_detach_client(&twl4030_modules[i].client);
+ i--;
+ }
+ return ret;
+}
+
+/* adapter's callback */
+static int twl4030_detach_client(struct i2c_client *client)
+{
+ int err;
+ err = i2c_detach_client(client);
+ if (err) {
+ pr_err("Client detach failed\n");
+ return err;
+ }
+ return 0;
+}
+
+static struct task_struct * __init start_twl4030_irq_thread(int irq)
+{
+ struct task_struct *thread;
+
+ init_completion(&irq_event);
+ thread = kthread_run(twl4030_irq_thread, (void *)irq,
+ "twl4030 irq %d", irq);
+ if (!thread)
+ pr_err("%s: could not create twl4030 irq %d thread!\n",
+ __func__, irq);
+
+ return thread;
+}
+
+/*
+ * These three functions should be part of Voltage frame work
+ * added here to complete the functionality for now.
+ */
+static int __init protect_pm_master(void)
+{
+ int e = 0;
+
+ e = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, KEY_LOCK,
+ R_PROTECT_KEY);
+ return e;
+}
+
+static int __init unprotect_pm_master(void)
+{
+ int e = 0;
+
+ e |= twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, KEY_UNLOCK1,
+ R_PROTECT_KEY);
+ e |= twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, KEY_UNLOCK2,
+ R_PROTECT_KEY);
+ return e;
+}
+
+static int __init power_companion_init(void)
+{
+ struct clk *osc;
+ u32 rate;
+ u8 ctrl = HFCLK_FREQ_26_MHZ;
+ int e = 0;
+
+ if (cpu_is_omap2430())
+ osc = clk_get(NULL, "osc_ck");
+ else
+ osc = clk_get(NULL, "osc_sys_ck");
+ if (IS_ERR(osc)) {
+ printk(KERN_WARNING "Skipping twl3040 internal clock init and "
+ "using bootloader value (unknown osc rate)\n");
+ return 0;
+ }
+
+ rate = clk_get_rate(osc);
+ clk_put(osc);
+
+ switch (rate) {
+ case 19200000 : ctrl = HFCLK_FREQ_19p2_MHZ; break;
+ case 26000000 : ctrl = HFCLK_FREQ_26_MHZ; break;
+ case 38400000 : ctrl = HFCLK_FREQ_38p4_MHZ; break;
+ }
+
+ ctrl |= HIGH_PERF_SQ;
+ e |= unprotect_pm_master();
+ /* effect->MADC+USB ck en */
+ e |= twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, ctrl, R_CFG_BOOT);
+ e |= protect_pm_master();
+
+ return e;
+}
+
+/**
+ * twl4030_i2c_clear_isr - clear TWL4030 SIH ISR regs via read + write
+ * @mod_no: TWL4030 module number
+ * @reg: register index to clear
+ * @cor: value of the <module>_SIH_CTRL.COR bit (1 or 0)
+ *
+ * Either reads (cor == 1) or writes (cor == 0) to a TWL4030 interrupt
+ * status register to ensure that any prior interrupts are cleared.
+ * Returns the status from the I2C read operation.
+ */
+static int __init twl4030_i2c_clear_isr(u8 mod_no, u8 reg, u8 cor)
+{
+ u8 tmp;
+
+ return (cor) ? twl4030_i2c_read_u8(mod_no, &tmp, reg) :
+ twl4030_i2c_write_u8(mod_no, 0xff, reg);
+}
+
+/**
+ * twl4030_read_cor_bit - are TWL module ISRs cleared by reads or writes?
+ * @mod_no: TWL4030 module number
+ * @reg: register index to clear
+ *
+ * Returns 1 if the TWL4030 SIH interrupt status registers (ISRs) for
+ * the specified TWL module are cleared by reads, or 0 if cleared by
+ * writes.
+ */
+static int twl4030_read_cor_bit(u8 mod_no, u8 reg)
+{
+ u8 tmp = 0;
+
+ WARN_ON(twl4030_i2c_read_u8(mod_no, &tmp, reg) < 0);
+
+ tmp &= TWL4030_SIH_CTRL_COR_MASK;
+ tmp >>= __ffs(TWL4030_SIH_CTRL_COR_MASK);
+
+ return tmp;
+}
+
+/**
+ * twl4030_mask_clear_intrs - mask and clear all TWL4030 interrupts
+ * @t: pointer to twl4030_mod_iregs array
+ * @t_sz: ARRAY_SIZE(t) (starting at 1)
+ *
+ * Mask all TWL4030 interrupt mask registers (IMRs) and clear all
+ * interrupt status registers (ISRs). No return value, but will WARN if
+ * any I2C operations fail.
+ */
+static void __init twl4030_mask_clear_intrs(const struct twl4030_mod_iregs *t,
+ const u8 t_sz)
+{
+ int i, j;
+
+ /*
+ * N.B. - further efficiency is possible here. Eight I2C
+ * operations on BCI and GPIO modules are avoidable if I2C
+ * burst read/write transactions were implemented. Would
+ * probably save about 1ms of boot time and a small amount of
+ * power.
+ */
+ for (i = 0; i < t_sz; i++) {
+ const struct twl4030_mod_iregs tmr = t[i];
+ int cor;
+
+ /* Are ISRs cleared by reads or writes? */
+ cor = twl4030_read_cor_bit(tmr.mod_no, tmr.sih_ctrl);
+ WARN_ON(cor < 0);
+
+ for (j = 0; j < tmr.reg_cnt; j++) {
+
+ /* Mask interrupts at the TWL4030 */
+ WARN_ON(twl4030_i2c_write_u8(tmr.mod_no, 0xff,
+ tmr.imrs[j]) < 0);
+
+ /* Clear TWL4030 ISRs */
+ WARN_ON(twl4030_i2c_clear_isr(tmr.mod_no,
+ tmr.isrs[j], cor) < 0);
+ }
+ }
+
+ return;
+}
+
+
+static void twl_init_irq(void)
+{
+ int i;
+ int res = 0;
+ char *msg = "Unable to register interrupt subsystem";
+ unsigned int irq_num;
+
+ /*
+ * Mask and clear all TWL4030 interrupts since initially we do
+ * not have any TWL4030 module interrupt handlers present
+ */
+ twl4030_mask_clear_intrs(twl4030_mod_regs,
+ ARRAY_SIZE(twl4030_mod_regs));
+
+ /* install an irq handler for each of the PIH modules */
+ for (i = TWL4030_IRQ_BASE; i < TWL4030_IRQ_END; i++) {
+ set_irq_chip(i, &twl4030_irq_chip);
+ set_irq_handler(i, do_twl4030_module_irq);
+ set_irq_flags(i, IRQF_VALID);
+ }
+
+ irq_num = (cpu_is_omap2430()) ? INT_24XX_SYS_NIRQ : INT_34XX_SYS_NIRQ;
+
+ /* install an irq handler to demultiplex the TWL4030 interrupt */
+ set_irq_data(irq_num, start_twl4030_irq_thread(irq_num));
++ set_irq_type(irq_num, IRQ_TYPE_EDGE_FALLING);
+ set_irq_chained_handler(irq_num, do_twl4030_irq);
+
+ res = power_companion_init();
+ if (res < 0)
+ pr_err("%s[%d][%d]\n", msg, res, __LINE__);
+}
+
+static int __init twl4030_init(void)
+{
+ return i2c_add_driver(&twl4030_driver);
+}
+
+static void __exit twl4030_exit(void)
+{
+ i2c_del_driver(&twl4030_driver);
+ twl_irq_used = FREE;
+}
+
+subsys_initcall(twl4030_init);
+module_exit(twl4030_exit);
+
+MODULE_ALIAS("i2c:" DRIVER_NAME);
+MODULE_AUTHOR("Texas Instruments, Inc.");
+MODULE_DESCRIPTION("I2C Core interface for TWL4030");
+MODULE_LICENSE("GPL");
--- /dev/null
- set_irq_type(kp->irq, IRQT_FALLING);
+/*
+ * TSC2301 keypad driver
+ *
+ * Copyright (C) 2005-2006 Nokia Corporation
+ *
+ * Written by Jarkko Oikarinen
+ * Rewritten by Juha Yrjola <juha.yrjola@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/spi/spi.h>
+
+#include <linux/spi/tsc2301.h>
+
+#define TSC2301_KEYBOARD_PRODUCT_ID 0x0051
+#define TSC2301_KEYBOARD_PRODUCT_VERSION 0x0001
+#define TSC2301_DEBOUNCE_TIME_2MS 0x0000
+#define TSC2301_DEBOUNCE_TIME_10MS 0x0800
+#define TSC2301_DEBOUNCE_TIME_20MS 0x1000
+#define TSC2301_DEBOUNCE_TIME_50MS 0x1800
+#define TSC2301_DEBOUNCE_TIME_60MS 0x2000
+#define TSC2301_DEBOUNCE_TIME_80MS 0x2800
+#define TSC2301_DEBOUNCE_TIME_100MS 0x3000
+#define TSC2301_DEBOUNCE_TIME_120MS 0x3800
+
+#define TSC2301_DEBOUNCE_TIME TSC2301_DEBOUNCE_TIME_20MS
+
+#define TSC2301_RELEASE_TIMEOUT 50
+
+struct tsc2301_kp {
+ struct input_dev *idev;
+ char phys[32];
+ spinlock_t lock;
+ struct mutex mutex;
+ struct timer_list timer;
+ u16 keys_pressed;
+ unsigned pending:1;
+ unsigned user_disabled:1;
+ unsigned disable_depth;
+
+ struct spi_transfer read_xfer[4];
+ struct spi_message read_msg;
+
+ u16 data;
+ u16 mask;
+
+ int irq;
+ s16 keymap[16];
+};
+
+static inline int tsc2301_kp_disabled(struct tsc2301 *tsc)
+{
+ return tsc->kp->disable_depth != 0;
+}
+
+static void tsc2301_kp_send_key_events(struct tsc2301 *tsc,
+ u16 prev_state,
+ u16 new_state)
+{
+ struct tsc2301_kp *kp = tsc->kp;
+ u16 common, released, pressed;
+ int i;
+
+ common = prev_state & new_state;
+ released = common ^ prev_state;
+ pressed = common ^ new_state;
+ if (!released && !pressed)
+ return;
+ for (i = 0; i < 16 && (released || pressed); i++) {
+ if (released & 1) {
+ dev_dbg(&tsc->spi->dev, "key %d released\n", i);
+ input_report_key(kp->idev, kp->keymap[i], 0);
+ }
+ released >>= 1;
+ if (pressed & 1) {
+ dev_dbg(&tsc->spi->dev, "key %d pressed\n", i);
+ input_report_key(kp->idev, kp->keymap[i], 1);
+ }
+ pressed >>= 1;
+ }
+ input_sync(kp->idev);
+}
+
+static inline void _filter_out(struct tsc2301 *tsc, u16 prev_state,
+ u16 *new_state, int row1, int row2, u8 rect_pat)
+{
+ u16 mask;
+
+ mask = (rect_pat << (row1 * 4)) | (rect_pat << (row2 * 4));
+ mask &= ~prev_state;
+ *new_state &= ~mask;
+ dev_dbg(&tsc->spi->dev, "filtering ghost keys %02x\n", mask);
+}
+
+static void tsc2301_filter_ghost_keys(struct tsc2301 *tsc, u16 prev_state,
+ u16 *new_state)
+{
+ int row1, row2;
+ u16 key_map;
+ u16 row1_map;
+ static const u8 rect_pat[] = {
+ 0x3, 0x5, 0x9, 0x6, 0xa, 0xc, 0,
+ };
+
+ key_map = *new_state;
+ for (row1 = 0; row1 < 4; row1++) {
+ row1_map = (key_map >> (row1 * 4)) & 0xf;
+ if (!row1_map)
+ continue;
+ for (row2 = row1 + 1; row2 < 4; row2++) {
+ u16 rect_map = (key_map >> (row2 * 4)) & 0xf;
+ const u8 *rp;
+
+ rect_map &= row1_map;
+ if (!rect_map)
+ continue;
+ for (rp = rect_pat; *rp; rp++)
+ if ((rect_map & *rp) == *rp)
+ _filter_out(tsc, prev_state, new_state,
+ row1, row2, *rp);
+ }
+ }
+}
+
+static void tsc2301_kp_timer(unsigned long arg)
+{
+ struct tsc2301 *tsc = (void *) arg;
+ struct tsc2301_kp *kp = tsc->kp;
+ unsigned long flags;
+
+ tsc2301_kp_send_key_events(tsc, kp->keys_pressed, 0);
+ spin_lock_irqsave(&kp->lock, flags);
+ kp->keys_pressed = 0;
+ spin_unlock_irqrestore(&kp->lock, flags);
+}
+
+static void tsc2301_kp_rx(void *arg)
+{
+ struct tsc2301 *tsc = arg;
+ struct tsc2301_kp *kp = tsc->kp;
+ unsigned long flags;
+ u16 kp_data;
+
+ kp_data = kp->data;
+ dev_dbg(&tsc->spi->dev, "KP data %04x\n", kp_data);
+
+ tsc2301_filter_ghost_keys(tsc, kp->keys_pressed, &kp_data);
+ tsc2301_kp_send_key_events(tsc, kp->keys_pressed, kp_data);
+ spin_lock_irqsave(&kp->lock, flags);
+ kp->keys_pressed = kp_data;
+ kp->pending = 0;
+ spin_unlock_irqrestore(&kp->lock, flags);
+}
+
+static irqreturn_t tsc2301_kp_irq_handler(int irq, void *dev_id)
+{
+ struct tsc2301 *tsc = dev_id;
+ struct tsc2301_kp *kp = tsc->kp;
+ unsigned long flags;
+ int r;
+
+ spin_lock_irqsave(&kp->lock, flags);
+ if (tsc2301_kp_disabled(tsc)) {
+ spin_unlock_irqrestore(&kp->lock, flags);
+ return IRQ_HANDLED;
+ }
+ kp->pending = 1;
+ spin_unlock_irqrestore(&kp->lock, flags);
+ mod_timer(&kp->timer,
+ jiffies + msecs_to_jiffies(TSC2301_RELEASE_TIMEOUT));
+ r = spi_async(tsc->spi, &tsc->kp->read_msg);
+ if (r)
+ dev_err(&tsc->spi->dev, "kp: spi_async() failed");
+ return IRQ_HANDLED;
+}
+
+static void tsc2301_kp_start_scan(struct tsc2301 *tsc)
+{
+ tsc2301_write_reg(tsc, TSC2301_REG_KPMASK, tsc->kp->mask);
+ tsc2301_write_reg(tsc, TSC2301_REG_KEY, TSC2301_DEBOUNCE_TIME);
+}
+
+static void tsc2301_kp_stop_scan(struct tsc2301 *tsc)
+{
+ tsc2301_write_reg(tsc, TSC2301_REG_KEY, 1 << 14);
+}
+
+/* Must be called with the mutex held */
+static void tsc2301_kp_enable(struct tsc2301 *tsc)
+{
+ struct tsc2301_kp *kp = tsc->kp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&kp->lock, flags);
+ BUG_ON(!tsc2301_kp_disabled(tsc));
+ if (--kp->disable_depth != 0) {
+ spin_unlock_irqrestore(&kp->lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&kp->lock, flags);
+
- set_irq_type(kp->irq, IRQT_NOEDGE);
++ set_irq_type(kp->irq, IRQ_TYPE_EDGE_FALLING);
+ tsc2301_kp_start_scan(tsc);
+ enable_irq(kp->irq);
+}
+
+/* Must be called with the mutex held */
+static int tsc2301_kp_disable(struct tsc2301 *tsc, int release_keys)
+{
+ struct tsc2301_kp *kp = tsc->kp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&kp->lock, flags);
+ if (kp->disable_depth++ != 0) {
+ spin_unlock_irqrestore(&kp->lock, flags);
+ goto out;
+ }
+ disable_irq_nosync(kp->irq);
- set_irq_type(kp->irq, IRQT_FALLING);
++ set_irq_type(kp->irq, IRQ_TYPE_NONE);
+ spin_unlock_irqrestore(&kp->lock, flags);
+
+ while (kp->pending) {
+ msleep(1);
+ }
+
+ tsc2301_kp_stop_scan(tsc);
+out:
+ if (!release_keys)
+ del_timer(&kp->timer); /* let timeout release keys */
+
+ return 0;
+}
+
+/* The following workaround is needed for a HW bug triggered by the
+ * following:
+ * 1. keep any key pressed
+ * 2. disable keypad
+ * 3. release all keys
+ * 4. reenable keypad
+ * 5. disable touch screen controller
+ *
+ * After this the keypad scanner will get stuck in busy state and won't
+ * report any interrupts for further keypresses. One way to recover is to
+ * restart the keypad scanner whenever we enable / disable the
+ * touchscreen controller.
+ */
+void tsc2301_kp_restart(struct tsc2301 *tsc)
+{
+ if (!tsc2301_kp_disabled(tsc)) {
+ tsc2301_kp_start_scan(tsc);
+ }
+}
+
+static ssize_t tsc2301_kp_disable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tsc2301 *tsc = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%u\n", tsc2301_kp_disabled(tsc) ? 1 : 0);
+}
+
+static ssize_t tsc2301_kp_disable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct tsc2301 *tsc = dev_get_drvdata(dev);
+ struct tsc2301_kp *kp = tsc->kp;
+ char *endp;
+ int i;
+
+ i = simple_strtoul(buf, &endp, 10);
+ i = i ? 1 : 0;
+
+ mutex_lock(&kp->mutex);
+ if (i == kp->user_disabled) {
+ mutex_unlock(&kp->mutex);
+ return count;
+ }
+ kp->user_disabled = i;
+
+ if (i)
+ tsc2301_kp_disable(tsc, 1);
+ else
+ tsc2301_kp_enable(tsc);
+ mutex_unlock(&kp->mutex);
+
+ return count;
+}
+
+static DEVICE_ATTR(disable_kp, 0664, tsc2301_kp_disable_show,
+ tsc2301_kp_disable_store);
+
+static const u16 tsc2301_kp_read_data = 0x8000 | TSC2301_REG_KPDATA;
+
+static void tsc2301_kp_setup_spi_xfer(struct tsc2301 *tsc)
+{
+ struct tsc2301_kp *kp = tsc->kp;
+ struct spi_message *m = &kp->read_msg;
+ struct spi_transfer *x = &kp->read_xfer[0];
+
+ spi_message_init(&kp->read_msg);
+
+ x->tx_buf = &tsc2301_kp_read_data;
+ x->len = 2;
+ spi_message_add_tail(x, m);
+ x++;
+
+ x->rx_buf = &kp->data;
+ x->len = 2;
+ spi_message_add_tail(x, m);
+
+ m->complete = tsc2301_kp_rx;
+ m->context = tsc;
+}
+
+#ifdef CONFIG_PM
+int tsc2301_kp_suspend(struct tsc2301 *tsc)
+{
+ struct tsc2301_kp *kp = tsc->kp;
+
+ mutex_lock(&kp->mutex);
+ tsc2301_kp_disable(tsc, 1);
+ mutex_unlock(&kp->mutex);
+ return 0;
+}
+
+void tsc2301_kp_resume(struct tsc2301 *tsc)
+{
+ struct tsc2301_kp *kp = tsc->kp;
+
+ mutex_lock(&kp->mutex);
+ tsc2301_kp_enable(tsc);
+ mutex_unlock(&kp->mutex);
+}
+#endif
+
+int __devinit tsc2301_kp_init(struct tsc2301 *tsc,
+ struct tsc2301_platform_data *pdata)
+{
+ struct input_dev *idev;
+ struct tsc2301_kp *kp;
+ int r, i;
+ u16 mask;
+
+ if (pdata->keyb_int < 0) {
+ dev_err(&tsc->spi->dev, "need kbirq");
+ return -EINVAL;
+ }
+
+ kp = kzalloc(sizeof(*kp), GFP_KERNEL);
+ if (kp == NULL)
+ return -ENOMEM;
+ tsc->kp = kp;
+
+ kp->irq = pdata->keyb_int;
+ spin_lock_init(&kp->lock);
+ mutex_init(&kp->mutex);
+
+ init_timer(&kp->timer);
+ kp->timer.data = (unsigned long) tsc;
+ kp->timer.function = tsc2301_kp_timer;
+
+ idev = input_allocate_device();
+ if (idev == NULL) {
+ r = -ENOMEM;
+ goto err1;
+ }
+ if (pdata->keyb_name)
+ idev->name = pdata->keyb_name;
+ else
+ idev->name = "TSC2301 keypad";
+ snprintf(kp->phys, sizeof(kp->phys), "%s/input-kp", tsc->spi->dev.bus_id);
+ idev->phys = kp->phys;
+
+ mask = 0;
+ idev->evbit[0] = BIT(EV_KEY);
+ for (i = 0; i < 16; i++) {
+ if (pdata->keymap[i] > 0) {
+ set_bit(pdata->keymap[i], idev->keybit);
+ kp->keymap[i] = pdata->keymap[i];
+ } else {
+ kp->keymap[i] = -1;
+ mask |= 1 << i;
+ }
+ }
+
+ if (pdata->kp_rep)
+ set_bit(EV_REP, idev->evbit);
+
+ kp->idev = idev;
+
+ tsc2301_kp_setup_spi_xfer(tsc);
+
+ r = device_create_file(&tsc->spi->dev, &dev_attr_disable_kp);
+ if (r < 0)
+ goto err2;
+
+ tsc2301_kp_start_scan(tsc);
+
+ /* IRQ mode 0 is faulty, it can cause the KBIRQ to get stuck.
+ * Mode 2 deasserts the IRQ at:
+ * - HW or SW reset
+ * - Setting SCS flag in REG_KEY register
+ * - Releasing all keys
+ * - Reading the REG_KPDATA
+ */
+ tsc2301_write_kbc(tsc, 2);
+
+ tsc2301_write_reg(tsc, TSC2301_REG_KPMASK, mask);
+ kp->mask = mask;
+
++ set_irq_type(kp->irq, IRQ_TYPE_EDGE_FALLING);
+
+ r = request_irq(kp->irq, tsc2301_kp_irq_handler, IRQF_SAMPLE_RANDOM,
+ "tsc2301-kp", tsc);
+ if (r < 0) {
+ dev_err(&tsc->spi->dev, "unable to get kbirq IRQ");
+ goto err3;
+ }
+ set_irq_wake(kp->irq, 1);
+
+ /* We need to read the register once..? */
+ tsc2301_read_reg(tsc, TSC2301_REG_KPDATA);
+
+ r = input_register_device(idev);
+ if (r < 0) {
+ dev_err(&tsc->spi->dev, "can't register keypad device\n");
+ goto err4;
+ }
+
+ return 0;
+
+err4:
+ free_irq(kp->irq, tsc);
+err3:
+ tsc2301_kp_stop_scan(tsc);
+ device_remove_file(&tsc->spi->dev, &dev_attr_disable_kp);
+err2:
+ input_free_device(kp->idev);
+err1:
+ kfree(kp);
+ return r;
+}
+
+void __devexit tsc2301_kp_exit(struct tsc2301 *tsc)
+{
+ struct tsc2301_kp *kp = tsc->kp;
+
+ tsc2301_kp_disable(tsc, 1);
+ input_unregister_device(kp->idev);
+ free_irq(kp->irq, tsc);
+ device_remove_file(&tsc->spi->dev, &dev_attr_disable_kp);
+
+ kfree(kp);
+}
To compile this driver as a module, choose M here: the
module will be called touchwin.
+ config TOUCHSCREEN_ATMEL_TSADCC
+ tristate "Atmel Touchscreen Interface"
+ depends on ARCH_AT91SAM9RL
+ help
+ Say Y here if you have a 4-wire touchscreen connected to the
+ ADC Controller on your Atmel SoC (such as the AT91SAM9RL).
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called atmel_tsadcc.
+
+config TOUCHSCREEN_TSC2005
+ tristate "TSC2005 touchscreen support"
+ help
+ Say Y here for if you are using the touchscreen features of TSC2301.
+
+config TOUCHSCREEN_TSC2102
+ tristate "TSC 2102 based touchscreens"
+ depends on SPI_MASTER
+ select SPI_TSC2102
+ help
+ Say Y here if you have a touchscreen interface using the
+ TI TSC 2102 controller, and your board-specific initialization
+ code includes that in its table of SPI devices. Also make
+ sure the proper SPI controller is selected.
+
+ If unsure, say N (but it's safe to say "Y").
+
+ To compile this driver as a module, choose M here: the
+ module will be called tsc2102_ts.
+
+config TOUCHSCREEN_TSC210X
+ tristate "TI TSC210x based touchscreens"
+ depends on SPI_MASTER
+ select SPI_TSC210X
+ help
+ Say Y here if you have a touchscreen interface using a
+ TI TSC210x controller, and your board-specific initialisation
+ code includes that in its table of SPI devices.
+
+ If unsure, say N (but it's safe to say "Y").
+
+ To compile this driver as a module, choose M here: the
+ module will be called tsc210x_ts.
+
+config TOUCHSCREEN_TSC2301
+ tristate "TSC2301 touchscreen support"
+ depends on SPI_TSC2301
+ help
+ Say Y here for if you are using the touchscreen features of TSC2301.
+
config TOUCHSCREEN_UCB1400
tristate "Philips UCB1400 touchscreen"
select AC97_BUS
--- /dev/null
- .owner = THIS_MODULE,
+/*
+ * drivers/media/video/omap24xxcam.c
+ *
+ * OMAP 2 camera block driver.
+ *
+ * Copyright (C) 2004 MontaVista Software, Inc.
+ * Copyright (C) 2004 Texas Instruments.
+ * Copyright (C) 2007 Nokia Corporation.
+ *
+ * Contact: Sakari Ailus <sakari.ailus@nokia.com>
+ *
+ * Based on code from Andy Lowe <source@mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/videodev2.h>
+#include <linux/pci.h> /* needed for videobufs */
+#include <linux/version.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+
+#include <media/v4l2-common.h>
++#include <media/v4l2-ioctl.h>
+
+#include "omap24xxcam.h"
+
+#define OMAP24XXCAM_VERSION KERNEL_VERSION(0, 0, 0)
+
+#define RESET_TIMEOUT_NS 10000
+
+static void omap24xxcam_reset(struct omap24xxcam_device *cam);
+static int omap24xxcam_sensor_if_enable(struct omap24xxcam_device *cam);
+static void omap24xxcam_device_unregister(struct v4l2_int_device *s);
+static int omap24xxcam_remove(struct platform_device *pdev);
+
+/* module parameters */
+static int video_nr = -1; /* video device minor (-1 ==> auto assign) */
+/*
+ * Maximum amount of memory to use for capture buffers.
+ * Default is 4800KB, enough to double-buffer SXGA.
+ */
+static int capture_mem = 1280 * 960 * 2 * 2;
+
+static struct v4l2_int_device omap24xxcam;
+
+/*
+ *
+ * Clocks.
+ *
+ */
+
+static void omap24xxcam_clock_put(struct omap24xxcam_device *cam)
+{
+ if (cam->ick != NULL && !IS_ERR(cam->ick))
+ clk_put(cam->ick);
+ if (cam->fck != NULL && !IS_ERR(cam->fck))
+ clk_put(cam->fck);
+
+ cam->ick = cam->fck = NULL;
+}
+
+static int omap24xxcam_clock_get(struct omap24xxcam_device *cam)
+{
+ int rval = 0;
+
+ cam->fck = clk_get(cam->dev, "cam_fck");
+ if (IS_ERR(cam->fck)) {
+ dev_err(cam->dev, "can't get cam_fck");
+ rval = PTR_ERR(cam->fck);
+ omap24xxcam_clock_put(cam);
+ return rval;
+ }
+
+ cam->ick = clk_get(cam->dev, "cam_ick");
+ if (IS_ERR(cam->ick)) {
+ dev_err(cam->dev, "can't get cam_ick");
+ rval = PTR_ERR(cam->ick);
+ omap24xxcam_clock_put(cam);
+ }
+
+ return rval;
+}
+
+static void omap24xxcam_clock_on(struct omap24xxcam_device *cam)
+{
+ clk_enable(cam->fck);
+ clk_enable(cam->ick);
+}
+
+static void omap24xxcam_clock_off(struct omap24xxcam_device *cam)
+{
+ clk_disable(cam->fck);
+ clk_disable(cam->ick);
+}
+
+/*
+ *
+ * Camera core
+ *
+ */
+
+/*
+ * Set xclk.
+ *
+ * To disable xclk, use value zero.
+ */
+static void omap24xxcam_core_xclk_set(const struct omap24xxcam_device *cam,
+ u32 xclk)
+{
+ if (xclk) {
+ u32 divisor = CAM_MCLK / xclk;
+
+ if (divisor == 1)
+ omap24xxcam_reg_out(cam->mmio_base + CC_REG_OFFSET,
+ CC_CTRL_XCLK,
+ CC_CTRL_XCLK_DIV_BYPASS);
+ else
+ omap24xxcam_reg_out(cam->mmio_base + CC_REG_OFFSET,
+ CC_CTRL_XCLK, divisor);
+ } else
+ omap24xxcam_reg_out(cam->mmio_base + CC_REG_OFFSET,
+ CC_CTRL_XCLK, CC_CTRL_XCLK_DIV_STABLE_LOW);
+}
+
+static void omap24xxcam_core_hwinit(const struct omap24xxcam_device *cam)
+{
+ /*
+ * Setting the camera core AUTOIDLE bit causes problems with frame
+ * synchronization, so we will clear the AUTOIDLE bit instead.
+ */
+ omap24xxcam_reg_out(cam->mmio_base + CC_REG_OFFSET, CC_SYSCONFIG,
+ CC_SYSCONFIG_AUTOIDLE);
+
+ /* program the camera interface DMA packet size */
+ omap24xxcam_reg_out(cam->mmio_base + CC_REG_OFFSET, CC_CTRL_DMA,
+ CC_CTRL_DMA_EN | (DMA_THRESHOLD / 4 - 1));
+
+ /* enable camera core error interrupts */
+ omap24xxcam_reg_out(cam->mmio_base + CC_REG_OFFSET, CC_IRQENABLE,
+ CC_IRQENABLE_FW_ERR_IRQ
+ | CC_IRQENABLE_FSC_ERR_IRQ
+ | CC_IRQENABLE_SSC_ERR_IRQ
+ | CC_IRQENABLE_FIFO_OF_IRQ);
+}
+
+/*
+ * Enable the camera core.
+ *
+ * Data transfer to the camera DMA starts from next starting frame.
+ */
+static void omap24xxcam_core_enable(const struct omap24xxcam_device *cam)
+{
+
+ omap24xxcam_reg_out(cam->mmio_base + CC_REG_OFFSET, CC_CTRL,
+ cam->cc_ctrl);
+}
+
+/*
+ * Disable camera core.
+ *
+ * The data transfer will be stopped immediately (CC_CTRL_CC_RST). The
+ * core internal state machines will be reset. Use
+ * CC_CTRL_CC_FRAME_TRIG instead if you want to transfer the current
+ * frame completely.
+ */
+static void omap24xxcam_core_disable(const struct omap24xxcam_device *cam)
+{
+ omap24xxcam_reg_out(cam->mmio_base + CC_REG_OFFSET, CC_CTRL,
+ CC_CTRL_CC_RST);
+}
+
+/* Interrupt service routine for camera core interrupts. */
+static void omap24xxcam_core_isr(struct omap24xxcam_device *cam)
+{
+ u32 cc_irqstatus;
+ const u32 cc_irqstatus_err =
+ CC_IRQSTATUS_FW_ERR_IRQ
+ | CC_IRQSTATUS_FSC_ERR_IRQ
+ | CC_IRQSTATUS_SSC_ERR_IRQ
+ | CC_IRQSTATUS_FIFO_UF_IRQ
+ | CC_IRQSTATUS_FIFO_OF_IRQ;
+
+ cc_irqstatus = omap24xxcam_reg_in(cam->mmio_base + CC_REG_OFFSET,
+ CC_IRQSTATUS);
+ omap24xxcam_reg_out(cam->mmio_base + CC_REG_OFFSET, CC_IRQSTATUS,
+ cc_irqstatus);
+
+ if (cc_irqstatus & cc_irqstatus_err
+ && !atomic_read(&cam->in_reset)) {
+ dev_dbg(cam->dev, "resetting camera, cc_irqstatus 0x%x\n",
+ cc_irqstatus);
+ omap24xxcam_reset(cam);
+ }
+}
+
+/*
+ *
+ * videobuf_buffer handling.
+ *
+ * Memory for mmapped videobuf_buffers is not allocated
+ * conventionally, but by several kmalloc allocations and then
+ * creating the scatterlist on our own. User-space buffers are handled
+ * normally.
+ *
+ */
+
+/*
+ * Free the memory-mapped buffer memory allocated for a
+ * videobuf_buffer and the associated scatterlist.
+ */
+static void omap24xxcam_vbq_free_mmap_buffer(struct videobuf_buffer *vb)
+{
+ struct videobuf_dmabuf *dma = videobuf_to_dma(vb);
+ size_t alloc_size;
+ struct page *page;
+ int i;
+
+ if (dma->sglist == NULL)
+ return;
+
+ i = dma->sglen;
+ while (i) {
+ i--;
+ alloc_size = sg_dma_len(&dma->sglist[i]);
+ page = sg_page(&dma->sglist[i]);
+ do {
+ ClearPageReserved(page++);
+ } while (alloc_size -= PAGE_SIZE);
+ __free_pages(sg_page(&dma->sglist[i]),
+ get_order(sg_dma_len(&dma->sglist[i])));
+ }
+
+ kfree(dma->sglist);
+ dma->sglist = NULL;
+}
+
+/* Release all memory related to the videobuf_queue. */
+static void omap24xxcam_vbq_free_mmap_buffers(struct videobuf_queue *vbq)
+{
+ int i;
+
+ mutex_lock(&vbq->vb_lock);
+
+ for (i = 0; i < VIDEO_MAX_FRAME; i++) {
+ if (NULL == vbq->bufs[i])
+ continue;
+ if (V4L2_MEMORY_MMAP != vbq->bufs[i]->memory)
+ continue;
+ vbq->ops->buf_release(vbq, vbq->bufs[i]);
+ omap24xxcam_vbq_free_mmap_buffer(vbq->bufs[i]);
+ kfree(vbq->bufs[i]);
+ vbq->bufs[i] = NULL;
+ }
+
+ mutex_unlock(&vbq->vb_lock);
+
+ videobuf_mmap_free(vbq);
+}
+
+/*
+ * Allocate physically as contiguous as possible buffer for video
+ * frame and allocate and build DMA scatter-gather list for it.
+ */
+static int omap24xxcam_vbq_alloc_mmap_buffer(struct videobuf_buffer *vb)
+{
+ unsigned int order;
+ size_t alloc_size, size = vb->bsize; /* vb->bsize is page aligned */
+ struct page *page;
+ int max_pages, err = 0, i = 0;
+ struct videobuf_dmabuf *dma = videobuf_to_dma(vb);
+
+ /*
+ * allocate maximum size scatter-gather list. Note this is
+ * overhead. We may not use as many entries as we allocate
+ */
+ max_pages = vb->bsize >> PAGE_SHIFT;
+ dma->sglist = kcalloc(max_pages, sizeof(*dma->sglist), GFP_KERNEL);
+ if (dma->sglist == NULL) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ while (size) {
+ order = get_order(size);
+ /*
+ * do not over-allocate even if we would get larger
+ * contiguous chunk that way
+ */
+ if ((PAGE_SIZE << order) > size)
+ order--;
+
+ /* try to allocate as many contiguous pages as possible */
+ page = alloc_pages(GFP_KERNEL | GFP_DMA, order);
+ /* if allocation fails, try to allocate smaller amount */
+ while (page == NULL) {
+ order--;
+ page = alloc_pages(GFP_KERNEL | GFP_DMA, order);
+ if (page == NULL && !order) {
+ err = -ENOMEM;
+ goto out;
+ }
+ }
+ size -= (PAGE_SIZE << order);
+
+ /* append allocated chunk of pages into scatter-gather list */
+ sg_set_page(&dma->sglist[i], page, PAGE_SIZE << order, 0);
+ dma->sglen++;
+ i++;
+
+ alloc_size = (PAGE_SIZE << order);
+
+ /* clear pages before giving them to user space */
+ memset(page_address(page), 0, alloc_size);
+
+ /* mark allocated pages reserved */
+ do {
+ SetPageReserved(page++);
+ } while (alloc_size -= PAGE_SIZE);
+ }
+ /*
+ * REVISIT: not fully correct to assign nr_pages == sglen but
+ * video-buf is passing nr_pages for e.g. unmap_sg calls
+ */
+ dma->nr_pages = dma->sglen;
+ dma->direction = PCI_DMA_FROMDEVICE;
+
+ return 0;
+
+out:
+ omap24xxcam_vbq_free_mmap_buffer(vb);
+ return err;
+}
+
+static int omap24xxcam_vbq_alloc_mmap_buffers(struct videobuf_queue *vbq,
+ unsigned int count)
+{
+ int i, err = 0;
+ struct omap24xxcam_fh *fh =
+ container_of(vbq, struct omap24xxcam_fh, vbq);
+
+ mutex_lock(&vbq->vb_lock);
+
+ for (i = 0; i < count; i++) {
+ err = omap24xxcam_vbq_alloc_mmap_buffer(vbq->bufs[i]);
+ if (err)
+ goto out;
+ dev_dbg(fh->cam->dev, "sglen is %d for buffer %d\n",
+ videobuf_to_dma(vbq->bufs[i])->sglen, i);
+ }
+
+ mutex_unlock(&vbq->vb_lock);
+
+ return 0;
+out:
+ while (i) {
+ i--;
+ omap24xxcam_vbq_free_mmap_buffer(vbq->bufs[i]);
+ }
+
+ mutex_unlock(&vbq->vb_lock);
+
+ return err;
+}
+
+/*
+ * This routine is called from interrupt context when a scatter-gather DMA
+ * transfer of a videobuf_buffer completes.
+ */
+static void omap24xxcam_vbq_complete(struct omap24xxcam_sgdma *sgdma,
+ u32 csr, void *arg)
+{
+ struct omap24xxcam_device *cam =
+ container_of(sgdma, struct omap24xxcam_device, sgdma);
+ struct omap24xxcam_fh *fh = cam->streaming->private_data;
+ struct videobuf_buffer *vb = (struct videobuf_buffer *)arg;
+ const u32 csr_error = CAMDMA_CSR_MISALIGNED_ERR
+ | CAMDMA_CSR_SUPERVISOR_ERR | CAMDMA_CSR_SECURE_ERR
+ | CAMDMA_CSR_TRANS_ERR | CAMDMA_CSR_DROP;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cam->core_enable_disable_lock, flags);
+ if (--cam->sgdma_in_queue == 0)
+ omap24xxcam_core_disable(cam);
+ spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
+
+ do_gettimeofday(&vb->ts);
+ vb->field_count = atomic_add_return(2, &fh->field_count);
+ if (csr & csr_error) {
+ vb->state = VIDEOBUF_ERROR;
+ if (!atomic_read(&fh->cam->in_reset)) {
+ dev_dbg(cam->dev, "resetting camera, csr 0x%x\n", csr);
+ omap24xxcam_reset(cam);
+ }
+ } else
+ vb->state = VIDEOBUF_DONE;
+ wake_up(&vb->done);
+}
+
+static void omap24xxcam_vbq_release(struct videobuf_queue *vbq,
+ struct videobuf_buffer *vb)
+{
+ struct videobuf_dmabuf *dma = videobuf_to_dma(vb);
+
+ /* wait for buffer, especially to get out of the sgdma queue */
+ videobuf_waiton(vb, 0, 0);
+ if (vb->memory == V4L2_MEMORY_MMAP) {
+ dma_unmap_sg(vbq->dev, dma->sglist, dma->sglen,
+ dma->direction);
+ dma->direction = DMA_NONE;
+ } else {
+ videobuf_dma_unmap(vbq, videobuf_to_dma(vb));
+ videobuf_dma_free(videobuf_to_dma(vb));
+ }
+
+ vb->state = VIDEOBUF_NEEDS_INIT;
+}
+
+/*
+ * Limit the number of available kernel image capture buffers based on the
+ * number requested, the currently selected image size, and the maximum
+ * amount of memory permitted for kernel capture buffers.
+ */
+static int omap24xxcam_vbq_setup(struct videobuf_queue *vbq, unsigned int *cnt,
+ unsigned int *size)
+{
+ struct omap24xxcam_fh *fh = vbq->priv_data;
+
+ if (*cnt <= 0)
+ *cnt = VIDEO_MAX_FRAME; /* supply a default number of buffers */
+
+ if (*cnt > VIDEO_MAX_FRAME)
+ *cnt = VIDEO_MAX_FRAME;
+
+ *size = fh->pix.sizeimage;
+
+ /* accessing fh->cam->capture_mem is ok, it's constant */
+ while (*size * *cnt > fh->cam->capture_mem)
+ (*cnt)--;
+
+ return 0;
+}
+
+static int omap24xxcam_dma_iolock(struct videobuf_queue *vbq,
+ struct videobuf_dmabuf *dma)
+{
+ int err = 0;
+
+ dma->direction = PCI_DMA_FROMDEVICE;
+ if (!dma_map_sg(vbq->dev, dma->sglist, dma->sglen, dma->direction)) {
+ kfree(dma->sglist);
+ dma->sglist = NULL;
+ dma->sglen = 0;
+ err = -EIO;
+ }
+
+ return err;
+}
+
+static int omap24xxcam_vbq_prepare(struct videobuf_queue *vbq,
+ struct videobuf_buffer *vb,
+ enum v4l2_field field)
+{
+ struct omap24xxcam_fh *fh = vbq->priv_data;
+ int err = 0;
+
+ /*
+ * Accessing pix here is okay since it's constant while
+ * streaming is on (and we only get called then).
+ */
+ if (vb->baddr) {
+ /* This is a userspace buffer. */
+ if (fh->pix.sizeimage > vb->bsize) {
+ /* The buffer isn't big enough. */
+ err = -EINVAL;
+ } else
+ vb->size = fh->pix.sizeimage;
+ } else {
+ if (vb->state != VIDEOBUF_NEEDS_INIT) {
+ /*
+ * We have a kernel bounce buffer that has
+ * already been allocated.
+ */
+ if (fh->pix.sizeimage > vb->size) {
+ /*
+ * The image size has been changed to
+ * a larger size since this buffer was
+ * allocated, so we need to free and
+ * reallocate it.
+ */
+ omap24xxcam_vbq_release(vbq, vb);
+ vb->size = fh->pix.sizeimage;
+ }
+ } else {
+ /* We need to allocate a new kernel bounce buffer. */
+ vb->size = fh->pix.sizeimage;
+ }
+ }
+
+ if (err)
+ return err;
+
+ vb->width = fh->pix.width;
+ vb->height = fh->pix.height;
+ vb->field = field;
+
+ if (vb->state == VIDEOBUF_NEEDS_INIT) {
+ if (vb->memory == V4L2_MEMORY_MMAP)
+ /*
+ * we have built the scatter-gather list by ourself so
+ * do the scatter-gather mapping as well
+ */
+ err = omap24xxcam_dma_iolock(vbq, videobuf_to_dma(vb));
+ else
+ err = videobuf_iolock(vbq, vb, NULL);
+ }
+
+ if (!err)
+ vb->state = VIDEOBUF_PREPARED;
+ else
+ omap24xxcam_vbq_release(vbq, vb);
+
+ return err;
+}
+
+static void omap24xxcam_vbq_queue(struct videobuf_queue *vbq,
+ struct videobuf_buffer *vb)
+{
+ struct omap24xxcam_fh *fh = vbq->priv_data;
+ struct omap24xxcam_device *cam = fh->cam;
+ enum videobuf_state state = vb->state;
+ unsigned long flags;
+ int err;
+
+ /*
+ * FIXME: We're marking the buffer active since we have no
+ * pretty way of marking it active exactly when the
+ * scatter-gather transfer starts.
+ */
+ vb->state = VIDEOBUF_ACTIVE;
+
+ err = omap24xxcam_sgdma_queue(&fh->cam->sgdma,
+ videobuf_to_dma(vb)->sglist,
+ videobuf_to_dma(vb)->sglen, vb->size,
+ omap24xxcam_vbq_complete, vb);
+
+ if (!err) {
+ spin_lock_irqsave(&cam->core_enable_disable_lock, flags);
+ if (++cam->sgdma_in_queue == 1
+ && !atomic_read(&cam->in_reset))
+ omap24xxcam_core_enable(cam);
+ spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
+ } else {
+ /*
+ * Oops. We're not supposed to get any errors here.
+ * The only way we could get an error is if we ran out
+ * of scatter-gather DMA slots, but we are supposed to
+ * have at least as many scatter-gather DMA slots as
+ * video buffers so that can't happen.
+ */
+ dev_err(cam->dev, "failed to queue a video buffer for dma!\n");
+ dev_err(cam->dev, "likely a bug in the driver!\n");
+ vb->state = state;
+ }
+}
+
+static struct videobuf_queue_ops omap24xxcam_vbq_ops = {
+ .buf_setup = omap24xxcam_vbq_setup,
+ .buf_prepare = omap24xxcam_vbq_prepare,
+ .buf_queue = omap24xxcam_vbq_queue,
+ .buf_release = omap24xxcam_vbq_release,
+};
+
+/*
+ *
+ * OMAP main camera system
+ *
+ */
+
+/*
+ * Reset camera block to power-on state.
+ */
+static void omap24xxcam_poweron_reset(const struct omap24xxcam_device *cam)
+{
+ int max_loop = RESET_TIMEOUT_NS;
+
+ /* Reset whole camera subsystem */
+ omap24xxcam_reg_out(cam->mmio_base,
+ CAM_SYSCONFIG,
+ CAM_SYSCONFIG_SOFTRESET);
+
+ /* Wait till it's finished */
+ while (!(omap24xxcam_reg_in(cam->mmio_base, CAM_SYSSTATUS)
+ & CAM_SYSSTATUS_RESETDONE)
+ && --max_loop) {
+ ndelay(1);
+ }
+
+ if (!(omap24xxcam_reg_in(cam->mmio_base, CAM_SYSSTATUS)
+ & CAM_SYSSTATUS_RESETDONE))
+ dev_err(cam->dev, "camera soft reset timeout\n");
+}
+
+/*
+ * (Re)initialise the camera block.
+ */
+static void omap24xxcam_hwinit(const struct omap24xxcam_device *cam)
+{
+ omap24xxcam_poweron_reset(cam);
+
+ /* set the camera subsystem autoidle bit */
+ omap24xxcam_reg_out(cam->mmio_base, CAM_SYSCONFIG,
+ CAM_SYSCONFIG_AUTOIDLE);
+
+ /* set the camera MMU autoidle bit */
+ omap24xxcam_reg_out(cam->mmio_base,
+ CAMMMU_REG_OFFSET + CAMMMU_SYSCONFIG,
+ CAMMMU_SYSCONFIG_AUTOIDLE);
+
+ omap24xxcam_core_hwinit(cam);
+
+ omap24xxcam_dma_hwinit(&cam->sgdma.dma);
+}
+
+/*
+ * Callback for dma transfer stalling.
+ */
+static void omap24xxcam_stalled_dma_reset(unsigned long data)
+{
+ struct omap24xxcam_device *cam = (struct omap24xxcam_device *)data;
+
+ if (!atomic_read(&cam->in_reset)) {
+ dev_dbg(cam->dev, "dma stalled, resetting camera\n");
+ omap24xxcam_reset(cam);
+ }
+}
+
+/*
+ * Stop capture. Mark we're doing a reset, stop DMA transfers and
+ * core. (No new scatter-gather transfers will be queued whilst
+ * in_reset is non-zero.)
+ *
+ * If omap24xxcam_capture_stop is called from several places at
+ * once, only the first call will have an effect. Similarly, the last
+ * call omap24xxcam_streaming_cont will have effect.
+ *
+ * Serialisation is ensured by using cam->core_enable_disable_lock.
+ */
+static void omap24xxcam_capture_stop(struct omap24xxcam_device *cam)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cam->core_enable_disable_lock, flags);
+
+ if (atomic_inc_return(&cam->in_reset) != 1) {
+ spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
+ return;
+ }
+
+ omap24xxcam_core_disable(cam);
+
+ spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
+
+ omap24xxcam_sgdma_sync(&cam->sgdma);
+}
+
+/*
+ * Reset and continue streaming.
+ *
+ * Note: Resetting the camera FIFO via the CC_RST bit in the CC_CTRL
+ * register is supposed to be sufficient to recover from a camera
+ * interface error, but it doesn't seem to be enough. If we only do
+ * that then subsequent image captures are out of sync by either one
+ * or two times DMA_THRESHOLD bytes. Resetting and re-initializing the
+ * entire camera subsystem prevents the problem with frame
+ * synchronization.
+ */
+static void omap24xxcam_capture_cont(struct omap24xxcam_device *cam)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cam->core_enable_disable_lock, flags);
+
+ if (atomic_read(&cam->in_reset) != 1)
+ goto out;
+
+ omap24xxcam_hwinit(cam);
+
+ omap24xxcam_sensor_if_enable(cam);
+
+ omap24xxcam_sgdma_process(&cam->sgdma);
+
+ if (cam->sgdma_in_queue)
+ omap24xxcam_core_enable(cam);
+
+out:
+ atomic_dec(&cam->in_reset);
+ spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
+}
+
+static ssize_t
+omap24xxcam_streaming_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct omap24xxcam_device *cam = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s\n", cam->streaming ? "active" : "inactive");
+}
+static DEVICE_ATTR(streaming, S_IRUGO, omap24xxcam_streaming_show, NULL);
+
+/*
+ * Stop capture and restart it. I.e. reset the camera during use.
+ */
+static void omap24xxcam_reset(struct omap24xxcam_device *cam)
+{
+ omap24xxcam_capture_stop(cam);
+ omap24xxcam_capture_cont(cam);
+}
+
+/*
+ * The main interrupt handler.
+ */
+static irqreturn_t omap24xxcam_isr(int irq, void *arg)
+{
+ struct omap24xxcam_device *cam = (struct omap24xxcam_device *)arg;
+ u32 irqstatus;
+ unsigned int irqhandled = 0;
+
+ irqstatus = omap24xxcam_reg_in(cam->mmio_base, CAM_IRQSTATUS);
+
+ if (irqstatus &
+ (CAM_IRQSTATUS_DMA_IRQ2 | CAM_IRQSTATUS_DMA_IRQ1
+ | CAM_IRQSTATUS_DMA_IRQ0)) {
+ omap24xxcam_dma_isr(&cam->sgdma.dma);
+ irqhandled = 1;
+ }
+ if (irqstatus & CAM_IRQSTATUS_CC_IRQ) {
+ omap24xxcam_core_isr(cam);
+ irqhandled = 1;
+ }
+ if (irqstatus & CAM_IRQSTATUS_MMU_IRQ)
+ dev_err(cam->dev, "unhandled camera MMU interrupt!\n");
+
+ return IRQ_RETVAL(irqhandled);
+}
+
+/*
+ *
+ * Sensor handling.
+ *
+ */
+
+/*
+ * Enable the external sensor interface. Try to negotiate interface
+ * parameters with the sensor and start using the new ones. The calls
+ * to sensor_if_enable and sensor_if_disable need not to be balanced.
+ */
+static int omap24xxcam_sensor_if_enable(struct omap24xxcam_device *cam)
+{
+ int rval;
+ struct v4l2_ifparm p;
+
+ rval = vidioc_int_g_ifparm(cam->sdev, &p);
+ if (rval) {
+ dev_err(cam->dev, "vidioc_int_g_ifparm failed with %d\n", rval);
+ return rval;
+ }
+
+ cam->if_type = p.if_type;
+
+ cam->cc_ctrl = CC_CTRL_CC_EN;
+
+ switch (p.if_type) {
+ case V4L2_IF_TYPE_BT656:
+ if (p.u.bt656.frame_start_on_rising_vs)
+ cam->cc_ctrl |= CC_CTRL_NOBT_SYNCHRO;
+ if (p.u.bt656.bt_sync_correct)
+ cam->cc_ctrl |= CC_CTRL_BT_CORRECT;
+ if (p.u.bt656.swap)
+ cam->cc_ctrl |= CC_CTRL_PAR_ORDERCAM;
+ if (p.u.bt656.latch_clk_inv)
+ cam->cc_ctrl |= CC_CTRL_PAR_CLK_POL;
+ if (p.u.bt656.nobt_hs_inv)
+ cam->cc_ctrl |= CC_CTRL_NOBT_HS_POL;
+ if (p.u.bt656.nobt_vs_inv)
+ cam->cc_ctrl |= CC_CTRL_NOBT_VS_POL;
+
+ switch (p.u.bt656.mode) {
+ case V4L2_IF_TYPE_BT656_MODE_NOBT_8BIT:
+ cam->cc_ctrl |= CC_CTRL_PAR_MODE_NOBT8;
+ break;
+ case V4L2_IF_TYPE_BT656_MODE_NOBT_10BIT:
+ cam->cc_ctrl |= CC_CTRL_PAR_MODE_NOBT10;
+ break;
+ case V4L2_IF_TYPE_BT656_MODE_NOBT_12BIT:
+ cam->cc_ctrl |= CC_CTRL_PAR_MODE_NOBT12;
+ break;
+ case V4L2_IF_TYPE_BT656_MODE_BT_8BIT:
+ cam->cc_ctrl |= CC_CTRL_PAR_MODE_BT8;
+ break;
+ case V4L2_IF_TYPE_BT656_MODE_BT_10BIT:
+ cam->cc_ctrl |= CC_CTRL_PAR_MODE_BT10;
+ break;
+ default:
+ dev_err(cam->dev,
+ "bt656 interface mode %d not supported\n",
+ p.u.bt656.mode);
+ return -EINVAL;
+ }
+ /*
+ * The clock rate that the sensor wants has changed.
+ * We have to adjust the xclk from OMAP 2 side to
+ * match the sensor's wish as closely as possible.
+ */
+ if (p.u.bt656.clock_curr != cam->if_u.bt656.xclk) {
+ u32 xclk = p.u.bt656.clock_curr;
+ u32 divisor;
+
+ if (xclk == 0)
+ return -EINVAL;
+
+ if (xclk > CAM_MCLK)
+ xclk = CAM_MCLK;
+
+ divisor = CAM_MCLK / xclk;
+ if (divisor * xclk < CAM_MCLK)
+ divisor++;
+ if (CAM_MCLK / divisor < p.u.bt656.clock_min
+ && divisor > 1)
+ divisor--;
+ if (divisor > 30)
+ divisor = 30;
+
+ xclk = CAM_MCLK / divisor;
+
+ if (xclk < p.u.bt656.clock_min
+ || xclk > p.u.bt656.clock_max)
+ return -EINVAL;
+
+ cam->if_u.bt656.xclk = xclk;
+ }
+ omap24xxcam_core_xclk_set(cam, cam->if_u.bt656.xclk);
+ break;
+ default:
+ /* FIXME: how about other interfaces? */
+ dev_err(cam->dev, "interface type %d not supported\n",
+ p.if_type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void omap24xxcam_sensor_if_disable(const struct omap24xxcam_device *cam)
+{
+ switch (cam->if_type) {
+ case V4L2_IF_TYPE_BT656:
+ omap24xxcam_core_xclk_set(cam, 0);
+ break;
+ }
+}
+
+/*
+ * Initialise the sensor hardware.
+ */
+static int omap24xxcam_sensor_init(struct omap24xxcam_device *cam)
+{
+ int err = 0;
+ struct v4l2_int_device *sdev = cam->sdev;
+
+ omap24xxcam_clock_on(cam);
+ err = omap24xxcam_sensor_if_enable(cam);
+ if (err) {
+ dev_err(cam->dev, "sensor interface could not be enabled at "
+ "initialisation, %d\n", err);
+ cam->sdev = NULL;
+ goto out;
+ }
+
+ /* power up sensor during sensor initialization */
+ vidioc_int_s_power(sdev, 1);
+
+ err = vidioc_int_dev_init(sdev);
+ if (err) {
+ dev_err(cam->dev, "cannot initialize sensor, error %d\n", err);
+ /* Sensor init failed --- it's nonexistent to us! */
+ cam->sdev = NULL;
+ goto out;
+ }
+
+ dev_info(cam->dev, "sensor is %s\n", sdev->name);
+
+out:
+ omap24xxcam_sensor_if_disable(cam);
+ omap24xxcam_clock_off(cam);
+
+ vidioc_int_s_power(sdev, 0);
+
+ return err;
+}
+
+static void omap24xxcam_sensor_exit(struct omap24xxcam_device *cam)
+{
+ if (cam->sdev)
+ vidioc_int_dev_exit(cam->sdev);
+}
+
+static void omap24xxcam_sensor_disable(struct omap24xxcam_device *cam)
+{
+ omap24xxcam_sensor_if_disable(cam);
+ omap24xxcam_clock_off(cam);
+ vidioc_int_s_power(cam->sdev, 0);
+}
+
+/*
+ * Power-up and configure camera sensor. It's ready for capturing now.
+ */
+static int omap24xxcam_sensor_enable(struct omap24xxcam_device *cam)
+{
+ int rval;
+
+ omap24xxcam_clock_on(cam);
+
+ omap24xxcam_sensor_if_enable(cam);
+
+ rval = vidioc_int_s_power(cam->sdev, 1);
+ if (rval)
+ goto out;
+
+ rval = vidioc_int_init(cam->sdev);
+ if (rval)
+ goto out;
+
+ return 0;
+
+out:
+ omap24xxcam_sensor_disable(cam);
+
+ return rval;
+}
+
+static void omap24xxcam_sensor_reset_work(struct work_struct *work)
+{
+ struct omap24xxcam_device *cam =
+ container_of(work, struct omap24xxcam_device,
+ sensor_reset_work);
+
+ if (atomic_read(&cam->reset_disable))
+ return;
+
+ omap24xxcam_capture_stop(cam);
+
+ if (vidioc_int_reset(cam->sdev) == 0) {
+ vidioc_int_init(cam->sdev);
+ } else {
+ /* Can't reset it by vidioc_int_reset. */
+ omap24xxcam_sensor_disable(cam);
+ omap24xxcam_sensor_enable(cam);
+ }
+
+ omap24xxcam_capture_cont(cam);
+}
+
+/*
+ *
+ * IOCTL interface.
+ *
+ */
+
+static int vidioc_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ struct omap24xxcam_fh *ofh = fh;
+ struct omap24xxcam_device *cam = ofh->cam;
+
+ strlcpy(cap->driver, CAM_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, cam->vfd->name, sizeof(cap->card));
+ cap->version = OMAP24XXCAM_VERSION;
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+
+ return 0;
+}
+
+static int vidioc_enum_fmt_cap(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f)
+{
+ struct omap24xxcam_fh *ofh = fh;
+ struct omap24xxcam_device *cam = ofh->cam;
+ int rval;
+
+ rval = vidioc_int_enum_fmt_cap(cam->sdev, f);
+
+ return rval;
+}
+
+static int vidioc_g_fmt_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct omap24xxcam_fh *ofh = fh;
+ struct omap24xxcam_device *cam = ofh->cam;
+ int rval;
+
+ mutex_lock(&cam->mutex);
+ rval = vidioc_int_g_fmt_cap(cam->sdev, f);
+ mutex_unlock(&cam->mutex);
+
+ return rval;
+}
+
+static int vidioc_s_fmt_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct omap24xxcam_fh *ofh = fh;
+ struct omap24xxcam_device *cam = ofh->cam;
+ int rval;
+
+ mutex_lock(&cam->mutex);
+ if (cam->streaming) {
+ rval = -EBUSY;
+ goto out;
+ }
+
+ rval = vidioc_int_s_fmt_cap(cam->sdev, f);
+
+out:
+ mutex_unlock(&cam->mutex);
+
+ if (!rval) {
+ mutex_lock(&ofh->vbq.vb_lock);
+ ofh->pix = f->fmt.pix;
+ mutex_unlock(&ofh->vbq.vb_lock);
+ }
+
+ memset(f, 0, sizeof(*f));
+ vidioc_g_fmt_cap(file, fh, f);
+
+ return rval;
+}
+
+static int vidioc_try_fmt_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct omap24xxcam_fh *ofh = fh;
+ struct omap24xxcam_device *cam = ofh->cam;
+ int rval;
+
+ mutex_lock(&cam->mutex);
+ rval = vidioc_int_try_fmt_cap(cam->sdev, f);
+ mutex_unlock(&cam->mutex);
+
+ return rval;
+}
+
+static int vidioc_reqbufs(struct file *file, void *fh,
+ struct v4l2_requestbuffers *b)
+{
+ struct omap24xxcam_fh *ofh = fh;
+ struct omap24xxcam_device *cam = ofh->cam;
+ int rval;
+
+ mutex_lock(&cam->mutex);
+ if (cam->streaming) {
+ mutex_unlock(&cam->mutex);
+ return -EBUSY;
+ }
+
+ omap24xxcam_vbq_free_mmap_buffers(&ofh->vbq);
+ mutex_unlock(&cam->mutex);
+
+ rval = videobuf_reqbufs(&ofh->vbq, b);
+
+ /*
+ * Either videobuf_reqbufs failed or the buffers are not
+ * memory-mapped (which would need special attention).
+ */
+ if (rval < 0 || b->memory != V4L2_MEMORY_MMAP)
+ goto out;
+
+ rval = omap24xxcam_vbq_alloc_mmap_buffers(&ofh->vbq, rval);
+ if (rval)
+ omap24xxcam_vbq_free_mmap_buffers(&ofh->vbq);
+
+out:
+ return rval;
+}
+
+static int vidioc_querybuf(struct file *file, void *fh,
+ struct v4l2_buffer *b)
+{
+ struct omap24xxcam_fh *ofh = fh;
+
+ return videobuf_querybuf(&ofh->vbq, b);
+}
+
+static int vidioc_qbuf(struct file *file, void *fh, struct v4l2_buffer *b)
+{
+ struct omap24xxcam_fh *ofh = fh;
+
+ return videobuf_qbuf(&ofh->vbq, b);
+}
+
+static int vidioc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
+{
+ struct omap24xxcam_fh *ofh = fh;
+ struct omap24xxcam_device *cam = ofh->cam;
+ struct videobuf_buffer *vb;
+ int rval;
+
+videobuf_dqbuf_again:
+ rval = videobuf_dqbuf(&ofh->vbq, b, file->f_flags & O_NONBLOCK);
+ if (rval)
+ goto out;
+
+ vb = ofh->vbq.bufs[b->index];
+
+ mutex_lock(&cam->mutex);
+ /* _needs_reset returns -EIO if reset is required. */
+ rval = vidioc_int_g_needs_reset(cam->sdev, (void *)vb->baddr);
+ mutex_unlock(&cam->mutex);
+ if (rval == -EIO)
+ schedule_work(&cam->sensor_reset_work);
+ else
+ rval = 0;
+
+out:
+ /*
+ * This is a hack. We don't want to show -EIO to the user
+ * space. Requeue the buffer and try again if we're not doing
+ * this in non-blocking mode.
+ */
+ if (rval == -EIO) {
+ videobuf_qbuf(&ofh->vbq, b);
+ if (!(file->f_flags & O_NONBLOCK))
+ goto videobuf_dqbuf_again;
+ /*
+ * We don't have a videobuf_buffer now --- maybe next
+ * time...
+ */
+ rval = -EAGAIN;
+ }
+
+ return rval;
+}
+
+static int vidioc_streamon(struct file *file, void *fh, enum v4l2_buf_type i)
+{
+ struct omap24xxcam_fh *ofh = fh;
+ struct omap24xxcam_device *cam = ofh->cam;
+ int rval;
+
+ mutex_lock(&cam->mutex);
+ if (cam->streaming) {
+ rval = -EBUSY;
+ goto out;
+ }
+
+ rval = omap24xxcam_sensor_if_enable(cam);
+ if (rval) {
+ dev_dbg(cam->dev, "vidioc_int_g_ifparm failed\n");
+ goto out;
+ }
+
+ rval = videobuf_streamon(&ofh->vbq);
+ if (!rval) {
+ cam->streaming = file;
+ sysfs_notify(&cam->dev->kobj, NULL, "streaming");
+ }
+
+out:
+ mutex_unlock(&cam->mutex);
+
+ return rval;
+}
+
+static int vidioc_streamoff(struct file *file, void *fh, enum v4l2_buf_type i)
+{
+ struct omap24xxcam_fh *ofh = fh;
+ struct omap24xxcam_device *cam = ofh->cam;
+ struct videobuf_queue *q = &ofh->vbq;
+ int rval;
+
+ atomic_inc(&cam->reset_disable);
+
+ flush_scheduled_work();
+
+ rval = videobuf_streamoff(q);
+ if (!rval) {
+ mutex_lock(&cam->mutex);
+ cam->streaming = NULL;
+ mutex_unlock(&cam->mutex);
+ sysfs_notify(&cam->dev->kobj, NULL, "streaming");
+ }
+
+ atomic_dec(&cam->reset_disable);
+
+ return rval;
+}
+
+static int vidioc_enum_input(struct file *file, void *fh,
+ struct v4l2_input *inp)
+{
+ if (inp->index > 0)
+ return -EINVAL;
+
+ strlcpy(inp->name, "camera", sizeof(inp->name));
+ inp->type = V4L2_INPUT_TYPE_CAMERA;
+
+ return 0;
+}
+
+static int vidioc_g_input(struct file *file, void *fh, unsigned int *i)
+{
+ *i = 0;
+
+ return 0;
+}
+
+static int vidioc_s_input(struct file *file, void *fh, unsigned int i)
+{
+ if (i > 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int vidioc_queryctrl(struct file *file, void *fh,
+ struct v4l2_queryctrl *a)
+{
+ struct omap24xxcam_fh *ofh = fh;
+ struct omap24xxcam_device *cam = ofh->cam;
+ int rval;
+
+ rval = vidioc_int_queryctrl(cam->sdev, a);
+
+ return rval;
+}
+
+static int vidioc_g_ctrl(struct file *file, void *fh,
+ struct v4l2_control *a)
+{
+ struct omap24xxcam_fh *ofh = fh;
+ struct omap24xxcam_device *cam = ofh->cam;
+ int rval;
+
+ mutex_lock(&cam->mutex);
+ rval = vidioc_int_g_ctrl(cam->sdev, a);
+ mutex_unlock(&cam->mutex);
+
+ return rval;
+}
+
+static int vidioc_s_ctrl(struct file *file, void *fh,
+ struct v4l2_control *a)
+{
+ struct omap24xxcam_fh *ofh = fh;
+ struct omap24xxcam_device *cam = ofh->cam;
+ int rval;
+
+ mutex_lock(&cam->mutex);
+ rval = vidioc_int_s_ctrl(cam->sdev, a);
+ mutex_unlock(&cam->mutex);
+
+ return rval;
+}
+
+static int vidioc_g_parm(struct file *file, void *fh,
+ struct v4l2_streamparm *a) {
+ struct omap24xxcam_fh *ofh = fh;
+ struct omap24xxcam_device *cam = ofh->cam;
+ int rval;
+
+ if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ mutex_lock(&cam->mutex);
+ rval = vidioc_int_g_parm(cam->sdev, a);
+ mutex_unlock(&cam->mutex);
+
+ return rval;
+}
+
+static int vidioc_s_parm(struct file *file, void *fh,
+ struct v4l2_streamparm *a)
+{
+ struct omap24xxcam_fh *ofh = fh;
+ struct omap24xxcam_device *cam = ofh->cam;
+ struct v4l2_streamparm old_streamparm;
+ int rval;
+
+ if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ mutex_lock(&cam->mutex);
+ if (cam->streaming) {
+ rval = -EBUSY;
+ goto out;
+ }
+
+ old_streamparm.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ rval = vidioc_int_g_parm(cam->sdev, &old_streamparm);
+ if (rval)
+ goto out;
+
+ rval = vidioc_int_s_parm(cam->sdev, a);
+ if (rval)
+ goto out;
+
+ rval = omap24xxcam_sensor_if_enable(cam);
+ /*
+ * Revert to old streaming parameters if enabling sensor
+ * interface with the new ones failed.
+ */
+ if (rval)
+ vidioc_int_s_parm(cam->sdev, &old_streamparm);
+
+out:
+ mutex_unlock(&cam->mutex);
+
+ return rval;
+}
+
+/*
+ *
+ * File operations.
+ *
+ */
+
+static unsigned int omap24xxcam_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ struct omap24xxcam_fh *fh = file->private_data;
+ struct omap24xxcam_device *cam = fh->cam;
+ struct videobuf_buffer *vb;
+
+ mutex_lock(&cam->mutex);
+ if (cam->streaming != file) {
+ mutex_unlock(&cam->mutex);
+ return POLLERR;
+ }
+ mutex_unlock(&cam->mutex);
+
+ mutex_lock(&fh->vbq.vb_lock);
+ if (list_empty(&fh->vbq.stream)) {
+ mutex_unlock(&fh->vbq.vb_lock);
+ return POLLERR;
+ }
+ vb = list_entry(fh->vbq.stream.next, struct videobuf_buffer, stream);
+ mutex_unlock(&fh->vbq.vb_lock);
+
+ poll_wait(file, &vb->done, wait);
+
+ if (vb->state == VIDEOBUF_DONE || vb->state == VIDEOBUF_ERROR)
+ return POLLIN | POLLRDNORM;
+
+ return 0;
+}
+
+static int omap24xxcam_mmap_buffers(struct file *file,
+ struct vm_area_struct *vma)
+{
+ struct omap24xxcam_fh *fh = file->private_data;
+ struct omap24xxcam_device *cam = fh->cam;
+ struct videobuf_queue *vbq = &fh->vbq;
+ unsigned int first, last, size, i, j;
+ int err = 0;
+
+ mutex_lock(&cam->mutex);
+ if (cam->streaming) {
+ mutex_unlock(&cam->mutex);
+ return -EBUSY;
+ }
+ mutex_unlock(&cam->mutex);
+ mutex_lock(&vbq->vb_lock);
+
+ /* look for first buffer to map */
+ for (first = 0; first < VIDEO_MAX_FRAME; first++) {
+ if (NULL == vbq->bufs[first])
+ continue;
+ if (V4L2_MEMORY_MMAP != vbq->bufs[first]->memory)
+ continue;
+ if (vbq->bufs[first]->boff == (vma->vm_pgoff << PAGE_SHIFT))
+ break;
+ }
+
+ /* look for last buffer to map */
+ for (size = 0, last = first; last < VIDEO_MAX_FRAME; last++) {
+ if (NULL == vbq->bufs[last])
+ continue;
+ if (V4L2_MEMORY_MMAP != vbq->bufs[last]->memory)
+ continue;
+ size += vbq->bufs[last]->bsize;
+ if (size == (vma->vm_end - vma->vm_start))
+ break;
+ }
+
+ size = 0;
+ for (i = first; i <= last; i++) {
+ struct videobuf_dmabuf *dma = videobuf_to_dma(vbq->bufs[i]);
+
+ for (j = 0; j < dma->sglen; j++) {
+ err = remap_pfn_range(
+ vma, vma->vm_start + size,
+ page_to_pfn(sg_page(&dma->sglist[j])),
+ sg_dma_len(&dma->sglist[j]), vma->vm_page_prot);
+ if (err)
+ goto out;
+ size += sg_dma_len(&dma->sglist[j]);
+ }
+ }
+
+out:
+ mutex_unlock(&vbq->vb_lock);
+
+ return err;
+}
+
+static int omap24xxcam_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct omap24xxcam_fh *fh = file->private_data;
+ int rval;
+
+ /* let the video-buf mapper check arguments and set-up structures */
+ rval = videobuf_mmap_mapper(&fh->vbq, vma);
+ if (rval)
+ return rval;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ /* do mapping to our allocated buffers */
+ rval = omap24xxcam_mmap_buffers(file, vma);
+ /*
+ * In case of error, free vma->vm_private_data allocated by
+ * videobuf_mmap_mapper.
+ */
+ if (rval)
+ kfree(vma->vm_private_data);
+
+ return rval;
+}
+
+static int omap24xxcam_open(struct inode *inode, struct file *file)
+{
+ int minor = iminor(inode);
+ struct omap24xxcam_device *cam = omap24xxcam.priv;
+ struct omap24xxcam_fh *fh;
+ struct v4l2_format format;
+
+ if (!cam || !cam->vfd || (cam->vfd->minor != minor))
+ return -ENODEV;
+
+ fh = kzalloc(sizeof(*fh), GFP_KERNEL);
+ if (fh == NULL)
+ return -ENOMEM;
+
+ mutex_lock(&cam->mutex);
+ if (cam->sdev == NULL || !try_module_get(cam->sdev->module)) {
+ mutex_unlock(&cam->mutex);
+ goto out_try_module_get;
+ }
+
+ if (atomic_inc_return(&cam->users) == 1) {
+ omap24xxcam_hwinit(cam);
+ if (omap24xxcam_sensor_enable(cam)) {
+ mutex_unlock(&cam->mutex);
+ goto out_omap24xxcam_sensor_enable;
+ }
+ }
+ mutex_unlock(&cam->mutex);
+
+ fh->cam = cam;
+ mutex_lock(&cam->mutex);
+ vidioc_int_g_fmt_cap(cam->sdev, &format);
+ mutex_unlock(&cam->mutex);
+ /* FIXME: how about fh->pix when there are more users? */
+ fh->pix = format.fmt.pix;
+
+ file->private_data = fh;
+
+ spin_lock_init(&fh->vbq_lock);
+
+ videobuf_queue_sg_init(&fh->vbq, &omap24xxcam_vbq_ops, NULL,
+ &fh->vbq_lock, V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ V4L2_FIELD_NONE,
+ sizeof(struct videobuf_buffer), fh);
+
+ return 0;
+
+out_omap24xxcam_sensor_enable:
+ omap24xxcam_poweron_reset(cam);
+ module_put(cam->sdev->module);
+
+out_try_module_get:
+ kfree(fh);
+
+ return -ENODEV;
+}
+
+static int omap24xxcam_release(struct inode *inode, struct file *file)
+{
+ struct omap24xxcam_fh *fh = file->private_data;
+ struct omap24xxcam_device *cam = fh->cam;
+
+ atomic_inc(&cam->reset_disable);
+
+ flush_scheduled_work();
+
+ /* stop streaming capture */
+ videobuf_streamoff(&fh->vbq);
+
+ mutex_lock(&cam->mutex);
+ if (cam->streaming == file) {
+ cam->streaming = NULL;
+ mutex_unlock(&cam->mutex);
+ sysfs_notify(&cam->dev->kobj, NULL, "streaming");
+ } else {
+ mutex_unlock(&cam->mutex);
+ }
+
+ atomic_dec(&cam->reset_disable);
+
+ omap24xxcam_vbq_free_mmap_buffers(&fh->vbq);
+
+ /*
+ * Make sure the reset work we might have scheduled is not
+ * pending! It may be run *only* if we have users. (And it may
+ * not be scheduled anymore since streaming is already
+ * disabled.)
+ */
+ flush_scheduled_work();
+
+ mutex_lock(&cam->mutex);
+ if (atomic_dec_return(&cam->users) == 0) {
+ omap24xxcam_sensor_disable(cam);
+ omap24xxcam_poweron_reset(cam);
+ }
+ mutex_unlock(&cam->mutex);
+
+ file->private_data = NULL;
+
+ module_put(cam->sdev->module);
+ kfree(fh);
+
+ return 0;
+}
+
+static struct file_operations omap24xxcam_fops = {
+ .llseek = no_llseek,
+ .ioctl = video_ioctl2,
+ .poll = omap24xxcam_poll,
+ .mmap = omap24xxcam_mmap,
+ .open = omap24xxcam_open,
+ .release = omap24xxcam_release,
+};
+
+/*
+ *
+ * Power management.
+ *
+ */
+
+#ifdef CONFIG_PM
+static int omap24xxcam_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct omap24xxcam_device *cam = platform_get_drvdata(pdev);
+
+ if (atomic_read(&cam->users) == 0)
+ return 0;
+
+ if (!atomic_read(&cam->reset_disable))
+ omap24xxcam_capture_stop(cam);
+
+ omap24xxcam_sensor_disable(cam);
+ omap24xxcam_poweron_reset(cam);
+
+ return 0;
+}
+
+static int omap24xxcam_resume(struct platform_device *pdev)
+{
+ struct omap24xxcam_device *cam = platform_get_drvdata(pdev);
+
+ if (atomic_read(&cam->users) == 0)
+ return 0;
+
+ omap24xxcam_hwinit(cam);
+ omap24xxcam_sensor_enable(cam);
+
+ if (!atomic_read(&cam->reset_disable))
+ omap24xxcam_capture_cont(cam);
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+/*
+ *
+ * Camera device (i.e. /dev/video).
+ *
+ */
+
+static int omap24xxcam_device_register(struct v4l2_int_device *s)
+{
+ struct omap24xxcam_device *cam = s->u.slave->master->priv;
+ struct video_device *vfd;
+ int rval;
+
+ /* We already have a slave. */
+ if (cam->sdev)
+ return -EBUSY;
+
+ cam->sdev = s;
+
+ if (device_create_file(cam->dev, &dev_attr_streaming) != 0) {
+ dev_err(cam->dev, "could not register sysfs entry\n");
+ rval = -EBUSY;
+ goto err;
+ }
+
+ /* initialize the video_device struct */
+ vfd = cam->vfd = video_device_alloc();
+ if (!vfd) {
+ dev_err(cam->dev, "could not allocate video device struct\n");
+ rval = -ENOMEM;
+ goto err;
+ }
+ vfd->release = video_device_release;
+
+ vfd->dev = cam->dev;
+
+ strlcpy(vfd->name, CAM_NAME, sizeof(vfd->name));
+ vfd->type = VID_TYPE_CAPTURE | VID_TYPE_CHROMAKEY;
+ vfd->fops = &omap24xxcam_fops;
+ vfd->priv = cam;
+ vfd->minor = -1;
+
+ vfd->vidioc_querycap = vidioc_querycap;
+ vfd->vidioc_enum_fmt_cap = vidioc_enum_fmt_cap;
+ vfd->vidioc_g_fmt_cap = vidioc_g_fmt_cap;
+ vfd->vidioc_s_fmt_cap = vidioc_s_fmt_cap;
+ vfd->vidioc_try_fmt_cap = vidioc_try_fmt_cap;
+ vfd->vidioc_reqbufs = vidioc_reqbufs;
+ vfd->vidioc_querybuf = vidioc_querybuf;
+ vfd->vidioc_qbuf = vidioc_qbuf;
+ vfd->vidioc_dqbuf = vidioc_dqbuf;
+ vfd->vidioc_streamon = vidioc_streamon;
+ vfd->vidioc_streamoff = vidioc_streamoff;
+ vfd->vidioc_enum_input = vidioc_enum_input;
+ vfd->vidioc_g_input = vidioc_g_input;
+ vfd->vidioc_s_input = vidioc_s_input;
+ vfd->vidioc_queryctrl = vidioc_queryctrl;
+ vfd->vidioc_g_ctrl = vidioc_g_ctrl;
+ vfd->vidioc_s_ctrl = vidioc_s_ctrl;
+ vfd->vidioc_g_parm = vidioc_g_parm;
+ vfd->vidioc_s_parm = vidioc_s_parm;
+
+ omap24xxcam_hwinit(cam);
+
+ rval = omap24xxcam_sensor_init(cam);
+ if (rval)
+ goto err;
+
+ if (video_register_device(vfd, VFL_TYPE_GRABBER, video_nr) < 0) {
+ dev_err(cam->dev, "could not register V4L device\n");
+ vfd->minor = -1;
+ rval = -EBUSY;
+ goto err;
+ }
+
+ omap24xxcam_poweron_reset(cam);
+
+ dev_info(cam->dev, "registered device video%d\n", vfd->minor);
+
+ return 0;
+
+err:
+ omap24xxcam_device_unregister(s);
+
+ return rval;
+}
+
+static void omap24xxcam_device_unregister(struct v4l2_int_device *s)
+{
+ struct omap24xxcam_device *cam = s->u.slave->master->priv;
+
+ omap24xxcam_sensor_exit(cam);
+
+ if (cam->vfd) {
+ if (cam->vfd->minor == -1) {
+ /*
+ * The device was never registered, so release the
+ * video_device struct directly.
+ */
+ video_device_release(cam->vfd);
+ } else {
+ /*
+ * The unregister function will release the
+ * video_device struct as well as
+ * unregistering it.
+ */
+ video_unregister_device(cam->vfd);
+ }
+ cam->vfd = NULL;
+ }
+
+ device_remove_file(cam->dev, &dev_attr_streaming);
+
+ cam->sdev = NULL;
+}
+
+static struct v4l2_int_master omap24xxcam_master = {
+ .attach = omap24xxcam_device_register,
+ .detach = omap24xxcam_device_unregister,
+};
+
+static struct v4l2_int_device omap24xxcam = {
+ .module = THIS_MODULE,
+ .name = CAM_NAME,
+ .type = v4l2_int_type_master,
+ .u = {
+ .master = &omap24xxcam_master
+ },
+};
+
+/*
+ *
+ * Driver initialisation and deinitialisation.
+ *
+ */
+
+static int __init omap24xxcam_probe(struct platform_device *pdev)
+{
+ struct omap24xxcam_device *cam;
+ struct resource *mem;
+ int irq;
+
+ cam = kzalloc(sizeof(*cam), GFP_KERNEL);
+ if (!cam) {
+ dev_err(&pdev->dev, "could not allocate memory\n");
+ goto err;
+ }
+
+ platform_set_drvdata(pdev, cam);
+
+ cam->dev = &pdev->dev;
+
+ /*
+ * Impose a lower limit on the amount of memory allocated for
+ * capture. We require at least enough memory to double-buffer
+ * QVGA (300KB).
+ */
+ if (capture_mem < 320 * 240 * 2 * 2)
+ capture_mem = 320 * 240 * 2 * 2;
+ cam->capture_mem = capture_mem;
+
+ /* request the mem region for the camera registers */
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ dev_err(cam->dev, "no mem resource?\n");
+ goto err;
+ }
+ if (!request_mem_region(mem->start, (mem->end - mem->start) + 1,
+ pdev->name)) {
+ dev_err(cam->dev,
+ "cannot reserve camera register I/O region\n");
+ goto err;
+ }
+ cam->mmio_base_phys = mem->start;
+ cam->mmio_size = (mem->end - mem->start) + 1;
+
+ /* map the region */
+ cam->mmio_base = (unsigned long)
+ ioremap_nocache(cam->mmio_base_phys, cam->mmio_size);
+ if (!cam->mmio_base) {
+ dev_err(cam->dev, "cannot map camera register I/O region\n");
+ goto err;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(cam->dev, "no irq for camera?\n");
+ goto err;
+ }
+
+ /* install the interrupt service routine */
+ if (request_irq(irq, omap24xxcam_isr, 0, CAM_NAME, cam)) {
+ dev_err(cam->dev,
+ "could not install interrupt service routine\n");
+ goto err;
+ }
+ cam->irq = irq;
+
+ if (omap24xxcam_clock_get(cam))
+ goto err;
+
+ INIT_WORK(&cam->sensor_reset_work, omap24xxcam_sensor_reset_work);
+
+ mutex_init(&cam->mutex);
+ spin_lock_init(&cam->core_enable_disable_lock);
+
+ omap24xxcam_sgdma_init(&cam->sgdma,
+ cam->mmio_base + CAMDMA_REG_OFFSET,
+ omap24xxcam_stalled_dma_reset,
+ (unsigned long)cam);
+
+ omap24xxcam.priv = cam;
+
+ if (v4l2_int_device_register(&omap24xxcam))
+ goto err;
+
+ return 0;
+
+err:
+ omap24xxcam_remove(pdev);
+ return -ENODEV;
+}
+
+static int omap24xxcam_remove(struct platform_device *pdev)
+{
+ struct omap24xxcam_device *cam = platform_get_drvdata(pdev);
+
+ if (!cam)
+ return 0;
+
+ if (omap24xxcam.priv != NULL)
+ v4l2_int_device_unregister(&omap24xxcam);
+ omap24xxcam.priv = NULL;
+
+ omap24xxcam_clock_put(cam);
+
+ if (cam->irq) {
+ free_irq(cam->irq, cam);
+ cam->irq = 0;
+ }
+
+ if (cam->mmio_base) {
+ iounmap((void *)cam->mmio_base);
+ cam->mmio_base = 0;
+ }
+
+ if (cam->mmio_base_phys) {
+ release_mem_region(cam->mmio_base_phys, cam->mmio_size);
+ cam->mmio_base_phys = 0;
+ }
+
+ kfree(cam);
+
+ return 0;
+}
+
+static struct platform_driver omap24xxcam_driver = {
+ .probe = omap24xxcam_probe,
+ .remove = omap24xxcam_remove,
+#ifdef CONFIG_PM
+ .suspend = omap24xxcam_suspend,
+ .resume = omap24xxcam_resume,
+#endif
+ .driver = {
+ .name = CAM_NAME,
+ },
+};
+
+/*
+ *
+ * Module initialisation and deinitialisation
+ *
+ */
+
+static int __init omap24xxcam_init(void)
+{
+ return platform_driver_register(&omap24xxcam_driver);
+}
+
+static void __exit omap24xxcam_cleanup(void)
+{
+ platform_driver_unregister(&omap24xxcam_driver);
+}
+
+MODULE_AUTHOR("Sakari Ailus <sakari.ailus@nokia.com>");
+MODULE_DESCRIPTION("OMAP24xx Video for Linux camera driver");
+MODULE_LICENSE("GPL");
+module_param(video_nr, int, 0);
+MODULE_PARM_DESC(video_nr,
+ "Minor number for video device (-1 ==> auto assign)");
+module_param(capture_mem, int, 0);
+MODULE_PARM_DESC(capture_mem, "Maximum amount of memory for capture "
+ "buffers (default 4800kiB)");
+
+module_init(omap24xxcam_init);
+module_exit(omap24xxcam_cleanup);
obj-$(CONFIG_IBM_ASM) += ibmasm/
obj-$(CONFIG_HDPU_FEATURES) += hdpuftrs/
- obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o
- obj-$(CONFIG_ACER_WMI) += acer-wmi.o
+obj-$(CONFIG_OMAP_STI) += sti/
obj-$(CONFIG_ASUS_LAPTOP) += asus-laptop.o
obj-$(CONFIG_EEEPC_LAPTOP) += eeepc-laptop.o
+ obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o
+ obj-$(CONFIG_COMPAL_LAPTOP) += compal-laptop.o
+ obj-$(CONFIG_ACER_WMI) += acer-wmi.o
obj-$(CONFIG_ATMEL_PWM) += atmel_pwm.o
obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o
obj-$(CONFIG_ATMEL_TCLIB) += atmel_tclib.o
obj-$(CONFIG_MMC_WBSD) += wbsd.o
obj-$(CONFIG_MMC_AU1X) += au1xmmc.o
obj-$(CONFIG_MMC_OMAP) += omap.o
+obj-$(CONFIG_MMC_OMAP_HS) += omap_hsmmc.o
obj-$(CONFIG_MMC_AT91) += at91_mci.o
+ obj-$(CONFIG_MMC_ATMELMCI) += atmel-mci.o
obj-$(CONFIG_MMC_TIFM_SD) += tifm_sd.o
obj-$(CONFIG_MMC_SPI) += mmc_spi.o
+ obj-$(CONFIG_MMC_S3C) += s3cmci.o
+ obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o
obj-$(CONFIG_MTD_NAND_NANDSIM) += nandsim.o
obj-$(CONFIG_MTD_NAND_CS553X) += cs553x_nand.o
obj-$(CONFIG_MTD_NAND_NDFC) += ndfc.o
- obj-$(CONFIG_MTD_NAND_AT91) += at91_nand.o
+ obj-$(CONFIG_MTD_NAND_ATMEL) += atmel_nand.o
+obj-$(CONFIG_MTD_NAND_OMAP) += omap-nand-flash.o
+obj-$(CONFIG_MTD_NAND_OMAP2) += omap2.o
+obj-$(CONFIG_MTD_NAND_OMAP_HW) += omap-hw.o
obj-$(CONFIG_MTD_NAND_CM_X270) += cmx270_nand.o
obj-$(CONFIG_MTD_NAND_BASLER_EXCITE) += excite_nandflash.o
obj-$(CONFIG_MTD_NAND_PXA3xx) += pxa3xx_nand.o
--- /dev/null
- if (dma_mapping_error(dma_dst)) {
+/*
+ * linux/drivers/mtd/onenand/omap2.c
+ *
+ * OneNAND driver for OMAP2 / OMAP3
+ *
+ * Copyright (C) 2005-2006 Nokia Corporation
+ *
+ * Author: Jarkko Lavinen <jarkko.lavinen@nokia.com> and Juha Yrjola
+ * IRQ and DMA support written by Timo Teras
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; see the file COPYING. If not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/onenand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+
+#include <asm/io.h>
+#include <asm/mach/flash.h>
+#include <asm/arch/gpmc.h>
+#include <asm/arch/onenand.h>
+#include <asm/arch/gpio.h>
+#include <asm/arch/gpmc.h>
+#include <asm/arch/pm.h>
+
+#include <linux/dma-mapping.h>
+#include <asm/dma-mapping.h>
+#include <asm/arch/dma.h>
+
+#include <asm/arch/board.h>
+
+#define DRIVER_NAME "omap2-onenand"
+
+#define ONENAND_IO_SIZE SZ_128K
+#define ONENAND_BUFRAM_SIZE (1024 * 5)
+
+struct omap2_onenand {
+ struct platform_device *pdev;
+ int gpmc_cs;
+ unsigned long phys_base;
+ int gpio_irq;
+ struct mtd_info mtd;
+ struct mtd_partition *parts;
+ struct onenand_chip onenand;
+ struct completion irq_done;
+ struct completion dma_done;
+ int dma_channel;
+ int freq;
+ int (*setup)(void __iomem *base, int freq);
+};
+
+static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data)
+{
+ struct omap2_onenand *c = data;
+
+ complete(&c->dma_done);
+}
+
+static irqreturn_t omap2_onenand_interrupt(int irq, void *dev_id)
+{
+ struct omap2_onenand *c = dev_id;
+
+ complete(&c->irq_done);
+
+ return IRQ_HANDLED;
+}
+
+static inline unsigned short read_reg(struct omap2_onenand *c, int reg)
+{
+ return readw(c->onenand.base + reg);
+}
+
+static inline void write_reg(struct omap2_onenand *c, unsigned short value,
+ int reg)
+{
+ writew(value, c->onenand.base + reg);
+}
+
+static void wait_err(char *msg, int state, unsigned int ctrl, unsigned int intr)
+{
+ printk(KERN_ERR "onenand_wait: %s! state %d ctrl 0x%04x intr 0x%04x\n",
+ msg, state, ctrl, intr);
+}
+
+static void wait_warn(char *msg, int state, unsigned int ctrl,
+ unsigned int intr)
+{
+ printk(KERN_WARNING "onenand_wait: %s! state %d ctrl 0x%04x "
+ "intr 0x%04x\n", msg, state, ctrl, intr);
+}
+
+static int omap2_onenand_wait(struct mtd_info *mtd, int state)
+{
+ struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
+ unsigned int intr = 0;
+ unsigned int ctrl;
+ unsigned long timeout;
+ u32 syscfg;
+
+ if (state == FL_RESETING) {
+ int i;
+
+ for (i = 0; i < 20; i++) {
+ udelay(1);
+ intr = read_reg(c, ONENAND_REG_INTERRUPT);
+ if (intr & ONENAND_INT_MASTER)
+ break;
+ }
+ ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
+ if (ctrl & ONENAND_CTRL_ERROR) {
+ wait_err("controller error", state, ctrl, intr);
+ return -EIO;
+ }
+ if (!(intr & ONENAND_INT_RESET)) {
+ wait_err("timeout", state, ctrl, intr);
+ return -EIO;
+ }
+ return 0;
+ }
+
+ if (state != FL_READING) {
+ int result;
+
+ /* Turn interrupts on */
+ syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
+ syscfg |= ONENAND_SYS_CFG1_IOBE;
+ write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
+
+ INIT_COMPLETION(c->irq_done);
+ if (c->gpio_irq) {
+ result = omap_get_gpio_datain(c->gpio_irq);
+ if (result == -1) {
+ ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
+ intr = read_reg(c, ONENAND_REG_INTERRUPT);
+ wait_err("gpio error", state, ctrl, intr);
+ return -EIO;
+ }
+ } else
+ result = 0;
+ if (result == 0) {
+ int retry_cnt = 0;
+retry:
+ result = wait_for_completion_timeout(&c->irq_done,
+ msecs_to_jiffies(20));
+ if (result == 0) {
+ /* Timeout after 20ms */
+ ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
+ if (ctrl & ONENAND_CTRL_ONGO) {
+ /*
+ * The operation seems to be still going
+ * so give it some more time.
+ */
+ retry_cnt += 1;
+ if (retry_cnt < 3)
+ goto retry;
+ intr = read_reg(c,
+ ONENAND_REG_INTERRUPT);
+ wait_err("timeout", state, ctrl, intr);
+ return -EIO;
+ }
+ intr = read_reg(c, ONENAND_REG_INTERRUPT);
+ if ((intr & ONENAND_INT_MASTER) == 0)
+ wait_warn("timeout", state, ctrl, intr);
+ }
+ }
+ } else {
+ /* Turn interrupts off */
+ syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
+ syscfg &= ~ONENAND_SYS_CFG1_IOBE;
+ write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
+
+ timeout = jiffies + msecs_to_jiffies(20);
+ while (time_before(jiffies, timeout)) {
+ intr = read_reg(c, ONENAND_REG_INTERRUPT);
+ if (intr & ONENAND_INT_MASTER)
+ break;
+ }
+ }
+
+ intr = read_reg(c, ONENAND_REG_INTERRUPT);
+ ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
+
+ if (intr & ONENAND_INT_READ) {
+ int ecc = read_reg(c, ONENAND_REG_ECC_STATUS);
+
+ if (ecc) {
+ unsigned int addr1, addr8;
+
+ addr1 = read_reg(c, ONENAND_REG_START_ADDRESS1);
+ addr8 = read_reg(c, ONENAND_REG_START_ADDRESS8);
+ if (ecc & ONENAND_ECC_2BIT_ALL) {
+ printk(KERN_ERR "onenand_wait: ECC error = "
+ "0x%04x, addr1 %#x, addr8 %#x\n",
+ ecc, addr1, addr8);
+ mtd->ecc_stats.failed++;
+ return -EBADMSG;
+ } else if (ecc & ONENAND_ECC_1BIT_ALL) {
+ printk(KERN_NOTICE "onenand_wait: correctable "
+ "ECC error = 0x%04x, addr1 %#x, "
+ "addr8 %#x\n", ecc, addr1, addr8);
+ mtd->ecc_stats.corrected++;
+ }
+ }
+ } else if (state == FL_READING) {
+ wait_err("timeout", state, ctrl, intr);
+ return -EIO;
+ }
+
+ if (ctrl & ONENAND_CTRL_ERROR) {
+ wait_err("controller error", state, ctrl, intr);
+ if (ctrl & ONENAND_CTRL_LOCK)
+ printk(KERN_ERR "onenand_wait: "
+ "Device is write protected!!!\n");
+ return -EIO;
+ }
+
+ if (ctrl & 0xFE9F)
+ wait_warn("unexpected controller status", state, ctrl, intr);
+
+ return 0;
+}
+
+static inline int omap2_onenand_bufferram_offset(struct mtd_info *mtd, int area)
+{
+ struct onenand_chip *this = mtd->priv;
+
+ if (ONENAND_CURRENT_BUFFERRAM(this)) {
+ if (area == ONENAND_DATARAM)
+ return mtd->writesize;
+ if (area == ONENAND_SPARERAM)
+ return mtd->oobsize;
+ }
+
+ return 0;
+}
+
+#if defined(CONFIG_ARCH_OMAP3) || defined(MULTI_OMAP2)
+
+static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
+ unsigned char *buffer, int offset,
+ size_t count)
+{
+ struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
+ struct onenand_chip *this = mtd->priv;
+ dma_addr_t dma_src, dma_dst;
+ int bram_offset;
+ unsigned long timeout;
+ void *buf = (void *)buffer;
+ size_t xtra;
+ volatile unsigned *done;
+
+ bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
+ if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
+ goto out_copy;
+
+ if (buf >= high_memory) {
+ struct page *p1;
+
+ if (((size_t)buf & PAGE_MASK) !=
+ ((size_t)(buf + count - 1) & PAGE_MASK))
+ goto out_copy;
+ p1 = vmalloc_to_page(buf);
+ if (!p1)
+ goto out_copy;
+ buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
+ }
+
+ xtra = count & 3;
+ if (xtra) {
+ count -= xtra;
+ memcpy(buf + count, this->base + bram_offset + count, xtra);
+ }
+
+ dma_src = c->phys_base + bram_offset;
+ dma_dst = dma_map_single(&c->pdev->dev, buf, count, DMA_FROM_DEVICE);
- if (dma_mapping_error(dma_dst)) {
++ if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
+ dev_err(&c->pdev->dev,
+ "Couldn't DMA map a %d byte buffer\n",
+ count);
+ goto out_copy;
+ }
+
+ omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
+ count >> 2, 1, 0, 0, 0);
+ omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
+ dma_src, 0, 0);
+ omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
+ dma_dst, 0, 0);
+
+ INIT_COMPLETION(c->dma_done);
+ omap_start_dma(c->dma_channel);
+
+ timeout = jiffies + msecs_to_jiffies(20);
+ done = &c->dma_done.done;
+ while (time_before(jiffies, timeout))
+ if (*done)
+ break;
+
+ dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
+
+ if (!*done) {
+ dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
+ goto out_copy;
+ }
+
+ return 0;
+
+out_copy:
+ memcpy(buf, this->base + bram_offset, count);
+ return 0;
+}
+
+static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
+ const unsigned char *buffer,
+ int offset, size_t count)
+{
+ struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
+ struct onenand_chip *this = mtd->priv;
+ dma_addr_t dma_src, dma_dst;
+ int bram_offset;
+ unsigned long timeout;
+ void *buf = (void *)buffer;
+ volatile unsigned *done;
+
+ bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
+ if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
+ goto out_copy;
+
+ /* panic_write() may be in an interrupt context */
+ if (in_interrupt())
+ goto out_copy;
+
+ if (buf >= high_memory) {
+ struct page *p1;
+
+ if (((size_t)buf & PAGE_MASK) !=
+ ((size_t)(buf + count - 1) & PAGE_MASK))
+ goto out_copy;
+ p1 = vmalloc_to_page(buf);
+ if (!p1)
+ goto out_copy;
+ buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
+ }
+
+ dma_src = dma_map_single(&c->pdev->dev, buf, count, DMA_TO_DEVICE);
+ dma_dst = c->phys_base + bram_offset;
- if (dma_mapping_error(dma_dst)) {
++ if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
+ dev_err(&c->pdev->dev,
+ "Couldn't DMA map a %d byte buffer\n",
+ count);
+ return -1;
+ }
+
+ omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
+ count >> 2, 1, 0, 0, 0);
+ omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
+ dma_src, 0, 0);
+ omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
+ dma_dst, 0, 0);
+
+ INIT_COMPLETION(c->dma_done);
+ omap_start_dma(c->dma_channel);
+
+ timeout = jiffies + msecs_to_jiffies(20);
+ done = &c->dma_done.done;
+ while (time_before(jiffies, timeout))
+ if (*done)
+ break;
+
+ dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_TO_DEVICE);
+
+ if (!*done) {
+ dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
+ goto out_copy;
+ }
+
+ return 0;
+
+out_copy:
+ memcpy(this->base + bram_offset, buf, count);
+ return 0;
+}
+
+#else
+
+int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
+ unsigned char *buffer, int offset,
+ size_t count);
+
+int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
+ const unsigned char *buffer,
+ int offset, size_t count);
+
+#endif
+
+#if defined(CONFIG_ARCH_OMAP2) || defined(MULTI_OMAP2)
+
+static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
+ unsigned char *buffer, int offset,
+ size_t count)
+{
+ struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
+ struct onenand_chip *this = mtd->priv;
+ dma_addr_t dma_src, dma_dst;
+ int bram_offset;
+
+ bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
+ /* DMA is not used. Revisit PM requirements before enabling it. */
+ if (1 || (c->dma_channel < 0) ||
+ ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
+ (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
+ memcpy(buffer, (__force void *)(this->base + bram_offset),
+ count);
+ return 0;
+ }
+
+ dma_src = c->phys_base + bram_offset;
+ dma_dst = dma_map_single(&c->pdev->dev, buffer, count,
+ DMA_FROM_DEVICE);
- if (dma_mapping_error(dma_dst)) {
++ if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
+ dev_err(&c->pdev->dev,
+ "Couldn't DMA map a %d byte buffer\n",
+ count);
+ return -1;
+ }
+
+ omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
+ count / 4, 1, 0, 0, 0);
+ omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
+ dma_src, 0, 0);
+ omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
+ dma_dst, 0, 0);
+
+ INIT_COMPLETION(c->dma_done);
+ omap_start_dma(c->dma_channel);
+ wait_for_completion(&c->dma_done);
+
+ dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
+
+ return 0;
+}
+
+static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
+ const unsigned char *buffer,
+ int offset, size_t count)
+{
+ struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
+ struct onenand_chip *this = mtd->priv;
+ dma_addr_t dma_src, dma_dst;
+ int bram_offset;
+
+ bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
+ /* DMA is not used. Revisit PM requirements before enabling it. */
+ if (1 || (c->dma_channel < 0) ||
+ ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
+ (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
+ memcpy((__force void *)(this->base + bram_offset), buffer,
+ count);
+ return 0;
+ }
+
+ dma_src = dma_map_single(&c->pdev->dev, (void *) buffer, count,
+ DMA_TO_DEVICE);
+ dma_dst = c->phys_base + bram_offset;
++ if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
+ dev_err(&c->pdev->dev,
+ "Couldn't DMA map a %d byte buffer\n",
+ count);
+ return -1;
+ }
+
+ omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S16,
+ count / 2, 1, 0, 0, 0);
+ omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
+ dma_src, 0, 0);
+ omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
+ dma_dst, 0, 0);
+
+ INIT_COMPLETION(c->dma_done);
+ omap_start_dma(c->dma_channel);
+ wait_for_completion(&c->dma_done);
+
+ dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_TO_DEVICE);
+
+ return 0;
+}
+
+#else
+
+int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
+ unsigned char *buffer, int offset,
+ size_t count);
+
+int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
+ const unsigned char *buffer,
+ int offset, size_t count);
+
+#endif
+
+static struct platform_driver omap2_onenand_driver;
+
+static int __adjust_timing(struct device *dev, void *data)
+{
+ int ret = 0;
+ struct omap2_onenand *c;
+
+ c = dev_get_drvdata(dev);
+
+ BUG_ON(c->setup == NULL);
+
+ /* DMA is not in use so this is all that is needed */
+ /* Revisit for OMAP3! */
+ ret = c->setup(c->onenand.base, c->freq);
+
+ return ret;
+}
+
+int omap2_onenand_rephase(void)
+{
+ return driver_for_each_device(&omap2_onenand_driver.driver, NULL,
+ NULL, __adjust_timing);
+}
+
+static void __devexit omap2_onenand_shutdown(struct platform_device *pdev)
+{
+ struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
+
+ /* With certain content in the buffer RAM, the OMAP boot ROM code
+ * can recognize the flash chip incorrectly. Zero it out before
+ * soft reset.
+ */
+ memset((__force void *)c->onenand.base, 0, ONENAND_BUFRAM_SIZE);
+}
+
+static int __devinit omap2_onenand_probe(struct platform_device *pdev)
+{
+ struct omap_onenand_platform_data *pdata;
+ struct omap2_onenand *c;
+ int r;
+
+ pdata = pdev->dev.platform_data;
+ if (pdata == NULL) {
+ dev_err(&pdev->dev, "platform data missing\n");
+ return -ENODEV;
+ }
+
+ c = kzalloc(sizeof(struct omap2_onenand), GFP_KERNEL);
+ if (!c)
+ return -ENOMEM;
+
+ init_completion(&c->irq_done);
+ init_completion(&c->dma_done);
+ c->gpmc_cs = pdata->cs;
+ c->gpio_irq = pdata->gpio_irq;
+ c->dma_channel = pdata->dma_channel;
+ if (c->dma_channel < 0) {
+ /* if -1, don't use DMA */
+ c->gpio_irq = 0;
+ }
+
+ r = gpmc_cs_request(c->gpmc_cs, ONENAND_IO_SIZE, &c->phys_base);
+ if (r < 0) {
+ dev_err(&pdev->dev, "Cannot request GPMC CS\n");
+ goto err_kfree;
+ }
+
+ if (request_mem_region(c->phys_base, ONENAND_IO_SIZE,
+ pdev->dev.driver->name) == NULL) {
+ dev_err(&pdev->dev, "Cannot reserve memory region at 0x%08lx, "
+ "size: 0x%x\n", c->phys_base, ONENAND_IO_SIZE);
+ r = -EBUSY;
+ goto err_free_cs;
+ }
+ c->onenand.base = ioremap(c->phys_base, ONENAND_IO_SIZE);
+ if (c->onenand.base == NULL) {
+ r = -ENOMEM;
+ goto err_release_mem_region;
+ }
+
+ if (pdata->onenand_setup != NULL) {
+ r = pdata->onenand_setup(c->onenand.base, c->freq);
+ if (r < 0) {
+ dev_err(&pdev->dev, "Onenand platform setup failed: "
+ "%d\n", r);
+ goto err_iounmap;
+ }
+ c->setup = pdata->onenand_setup;
+ }
+
+ if (c->gpio_irq) {
+ if ((r = omap_request_gpio(c->gpio_irq)) < 0) {
+ dev_err(&pdev->dev, "Failed to request GPIO%d for "
+ "OneNAND\n", c->gpio_irq);
+ goto err_iounmap;
+ }
+ omap_set_gpio_direction(c->gpio_irq, 1);
+
+ if ((r = request_irq(OMAP_GPIO_IRQ(c->gpio_irq),
+ omap2_onenand_interrupt, IRQF_TRIGGER_RISING,
+ pdev->dev.driver->name, c)) < 0)
+ goto err_release_gpio;
+ }
+
+ if (c->dma_channel >= 0) {
+ r = omap_request_dma(0, pdev->dev.driver->name,
+ omap2_onenand_dma_cb, (void *) c,
+ &c->dma_channel);
+ if (r == 0) {
+ omap_set_dma_write_mode(c->dma_channel,
+ OMAP_DMA_WRITE_NON_POSTED);
+ omap_set_dma_src_data_pack(c->dma_channel, 1);
+ omap_set_dma_src_burst_mode(c->dma_channel,
+ OMAP_DMA_DATA_BURST_8);
+ omap_set_dma_dest_data_pack(c->dma_channel, 1);
+ omap_set_dma_dest_burst_mode(c->dma_channel,
+ OMAP_DMA_DATA_BURST_8);
+ } else {
+ dev_info(&pdev->dev,
+ "failed to allocate DMA for OneNAND, "
+ "using PIO instead\n");
+ c->dma_channel = -1;
+ }
+ }
+
+ dev_info(&pdev->dev, "initializing on CS%d, phys base 0x%08lx, virtual "
+ "base %p\n", c->gpmc_cs, c->phys_base,
+ c->onenand.base);
+
+ c->pdev = pdev;
+ c->mtd.name = pdev->dev.bus_id;
+ c->mtd.priv = &c->onenand;
+ c->mtd.owner = THIS_MODULE;
+
+ if (c->dma_channel >= 0) {
+ struct onenand_chip *this = &c->onenand;
+
+ this->wait = omap2_onenand_wait;
+ if (cpu_is_omap34xx()) {
+ this->read_bufferram = omap3_onenand_read_bufferram;
+ this->write_bufferram = omap3_onenand_write_bufferram;
+ } else {
+ this->read_bufferram = omap2_onenand_read_bufferram;
+ this->write_bufferram = omap2_onenand_write_bufferram;
+ }
+ }
+
+ if ((r = onenand_scan(&c->mtd, 1)) < 0)
+ goto err_release_dma;
+
+ switch ((c->onenand.version_id >> 4) & 0xf) {
+ case 0:
+ c->freq = 40;
+ break;
+ case 1:
+ c->freq = 54;
+ break;
+ case 2:
+ c->freq = 66;
+ break;
+ case 3:
+ c->freq = 83;
+ break;
+ }
+
+#ifdef CONFIG_MTD_PARTITIONS
+ if (pdata->parts != NULL)
+ r = add_mtd_partitions(&c->mtd, pdata->parts,
+ pdata->nr_parts);
+ else
+#endif
+ r = add_mtd_device(&c->mtd);
+ if (r < 0)
+ goto err_release_onenand;
+
+ platform_set_drvdata(pdev, c);
+
+ return 0;
+
+err_release_onenand:
+ onenand_release(&c->mtd);
+err_release_dma:
+ if (c->dma_channel != -1)
+ omap_free_dma(c->dma_channel);
+ if (c->gpio_irq)
+ free_irq(OMAP_GPIO_IRQ(c->gpio_irq), c);
+err_release_gpio:
+ if (c->gpio_irq)
+ omap_free_gpio(c->gpio_irq);
+err_iounmap:
+ iounmap(c->onenand.base);
+err_release_mem_region:
+ release_mem_region(c->phys_base, ONENAND_IO_SIZE);
+err_free_cs:
+ gpmc_cs_free(c->gpmc_cs);
+err_kfree:
+ kfree(c);
+
+ return r;
+}
+
+static int __devexit omap2_onenand_remove(struct platform_device *pdev)
+{
+ struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
+
+ BUG_ON(c == NULL);
+
+#ifdef CONFIG_MTD_PARTITIONS
+ if (c->parts)
+ del_mtd_partitions(&c->mtd);
+ else
+ del_mtd_device(&c->mtd);
+#else
+ del_mtd_device(&c->mtd);
+#endif
+
+ onenand_release(&c->mtd);
+ if (c->dma_channel != -1)
+ omap_free_dma(c->dma_channel);
+ omap2_onenand_shutdown(pdev);
+ platform_set_drvdata(pdev, NULL);
+ if (c->gpio_irq) {
+ free_irq(OMAP_GPIO_IRQ(c->gpio_irq), c);
+ omap_free_gpio(c->gpio_irq);
+ }
+ iounmap(c->onenand.base);
+ release_mem_region(c->phys_base, ONENAND_IO_SIZE);
+ kfree(c);
+
+ return 0;
+}
+
+static struct platform_driver omap2_onenand_driver = {
+ .probe = omap2_onenand_probe,
+ .remove = omap2_onenand_remove,
+ .shutdown = omap2_onenand_shutdown,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init omap2_onenand_init(void)
+{
+ printk(KERN_INFO "OneNAND driver initializing\n");
+ return platform_driver_register(&omap2_onenand_driver);
+}
+
+static void __exit omap2_onenand_exit(void)
+{
+ platform_driver_unregister(&omap2_onenand_driver);
+}
+
+module_init(omap2_onenand_init);
+module_exit(omap2_onenand_exit);
+
+MODULE_ALIAS(DRIVER_NAME);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jarkko Lavinen <jarkko.lavinen@nokia.com>");
+MODULE_DESCRIPTION("Glue layer for OneNAND flash on OMAP2 / OMAP3");
tristate "SMSC LAN911[5678] support"
select CRC32
select MII
- depends on ARCH_PXA || SH_MAGIC_PANEL_R2 || ARCH_OMAP24XX || ARCH_OMAP34XX
- depends on ARCH_PXA || SUPERH
++ depends on ARCH_PXA || SUPERH || SH_MAGIC_PANEL_R2 || ARCH_OMAP24XX || ARCH_OMAP34XX
help
This is a driver for SMSC's LAN911x series of Ethernet chipsets
including the new LAN9115, LAN9116, LAN9117, and LAN9118.
#define SMC_USE_16BIT 0
#define SMC_USE_32BIT 1
#define SMC_IRQ_SENSE IRQF_TRIGGER_LOW
+#elif defined(CONFIG_ARCH_OMAP34XX)
+ #define SMC_USE_16BIT 0
+ #define SMC_USE_32BIT 1
+ #define SMC_IRQ_SENSE IRQF_TRIGGER_LOW
+ #define SMC_MEM_RESERVED 1
+#elif defined(CONFIG_ARCH_OMAP24XX)
+ #define SMC_USE_16BIT 0
+ #define SMC_USE_32BIT 1
+ #define SMC_IRQ_SENSE IRQF_TRIGGER_LOW
+ #define SMC_MEM_RESERVED 1
+ #else
+ /*
+ * Default configuration
+ */
+
+ #define SMC_DYNAMIC_BUS_CONFIG
#endif
+ /* store this information for the driver.. */
+ struct smc911x_local {
+ /*
+ * If I have to wait until the DMA is finished and ready to reload a
+ * packet, I will store the skbuff here. Then, the DMA will send it
+ * out and free it.
+ */
+ struct sk_buff *pending_tx_skb;
+
+ /* version/revision of the SMC911x chip */
+ u16 version;
+ u16 revision;
+
+ /* FIFO sizes */
+ int tx_fifo_kb;
+ int tx_fifo_size;
+ int rx_fifo_size;
+ int afc_cfg;
+
+ /* Contains the current active receive/phy mode */
+ int ctl_rfduplx;
+ int ctl_rspeed;
+
+ u32 msg_enable;
+ u32 phy_type;
+ struct mii_if_info mii;
+
+ /* work queue */
+ struct work_struct phy_configure;
+
+ int tx_throttle;
+ spinlock_t lock;
+
+ struct net_device *netdev;
+
+ #ifdef SMC_USE_DMA
+ /* DMA needs the physical address of the chip */
+ u_long physaddr;
+ int rxdma;
+ int txdma;
+ int rxdma_active;
+ int txdma_active;
+ struct sk_buff *current_rx_skb;
+ struct sk_buff *current_tx_skb;
+ struct device *dev;
+ #endif
+ void __iomem *base;
+ #ifdef SMC_DYNAMIC_BUS_CONFIG
+ struct smc911x_platdata cfg;
+ #endif
+ };
/*
* Define the bus width specific IO macros
help
Say Y to enable support for the battery on the OLPC laptop.
+config BATTERY_BQ27x00
+ tristate "BQ27x00 battery driver"
+ help
+ Say Y here to enable support for batteries with BQ27000 or BQ27200 chip.
+
+config BATTERY_BQ27000
+ bool "BQ27000 battery driver"
+ depends on BATTERY_BQ27x00
+ select W1
+ select W1_SLAVE_BQ27000
+ help
+ Say Y here to enable support for batteries with BQ27000(HDQ) chip.
+
+config BATTERY_BQ27200
+ bool "BQ27200 battery driver"
+ depends on BATTERY_BQ27x00
+ select I2C
+ select I2C_OMAP
+ help
+ Say Y here to enable support for batteries with BQ27200(I2C) chip.
+
+config TWL4030_BCI_BATTERY
+ tristate "OMAP TWL4030 BCI Battery driver"
+ depends on (MACH_OMAP_2430SDP || MACH_OMAP_3430SDP) && TWL4030_MADC
+ default y
+ help
+ Support for OMAP TWL4030 BCI Battery driver.
+ This driver can give support for TWL4030 Battery Charge Interface.
+
+ config BATTERY_TOSA
+ tristate "Sharp SL-6000 (tosa) battery"
+ depends on MACH_TOSA && MFD_TC6393XB
+ help
+ Say Y to enable support for the battery on the Sharp Zaurus
+ SL-6000 (tosa) models.
+
+ config BATTERY_PALMTX
+ tristate "Palm T|X battery"
+ depends on MACH_PALMTX
+ help
+ Say Y to enable support for the battery in Palm T|X.
+
endif # POWER_SUPPLY
obj-$(CONFIG_BATTERY_DS2760) += ds2760_battery.o
obj-$(CONFIG_BATTERY_PMU) += pmu_battery.o
obj-$(CONFIG_BATTERY_OLPC) += olpc_battery.o
+obj-$(CONFIG_BATTERY_BQ27x00) += bq27x00_battery.o
+obj-$(CONFIG_TWL4030_BCI_BATTERY) += twl4030_bci_battery.o
+ obj-$(CONFIG_BATTERY_TOSA) += tosa_battery.o
+ obj-$(CONFIG_BATTERY_PALMTX) += palmtx_battery.o
This driver can also be built as a module. If so, the module
will be called at25.
+config SPI_TSC2101
+ depends on SPI_MASTER
+ tristate "TSC2101 chip support"
+ ---help---
+ Say Y here if you want support for the TSC2101 chip.
+ At the moment it provides basic register read / write interface
+ as well as a way to enable the MCLK clock.
+
+config SPI_TSC2102
+ depends on SPI_MASTER
+ tristate "TSC2102 codec support"
+ ---help---
+ Say Y here if you want support for the TSC2102 chip. It
+ will be needed for the touchscreen driver on some boards.
+
+config SPI_TSC210X
+ depends on SPI_MASTER && EXPERIMENTAL
+ tristate "TI TSC210x (TSC2101/TSC2102) support"
+ help
+ Say Y here if you want support for the TSC210x chips. Some
+ boards use these for touchscreen and audio support.
+
+ These are members of a family of highly integrated PDA analog
+ interface circuit. They include a 12-bit ADC used for battery,
+ temperature, touchscreen, and other sensors. They also have
+ an audio DAC and amplifier, and in some models an audio ADC.
+ The audio support is highly chip-specific, but most of the
+ sensor support works the same.
+
+ Note that the device has to be present in the board's SPI
+ devices table for this driver to load. This driver doesn't
+ automatically enable touchscreen, sensors or audio
+ functionality - enable these in their respective menus.
+
+config SPI_TSC2301
+ tristate "TSC2301 driver"
+ depends on SPI_MASTER
+ help
+ Say Y here if you have a TSC2301 chip connected to an SPI
+ bus on your board.
+
+ The TSC2301 is a highly integrated PDA analog interface circuit.
+ It contains a complete 12-bit A/D resistive touch screen
+ converter (ADC) including drivers, touch pressure measurement
+ capability, keypad controller, and 8-bit D/A converter (DAC) output
+ for LCD contrast control.
+
+ To compile this driver as a module, choose M here: the
+ module will be called tsc2301.
+
+config SPI_TSC2301_AUDIO
+ boolean "TSC2301 audio support"
+ depends on SPI_TSC2301 && SND
+ help
+ Say Y here for if you are using the audio features of TSC2301.
+
config SPI_SPIDEV
tristate "User mode SPI device driver support"
- depends on SPI_MASTER && EXPERIMENTAL
+ depends on EXPERIMENTAL
help
This supports user mode SPI protocol drivers.
#define CN_VAL_CIFS 0x1
#define CN_W1_IDX 0x3 /* w1 communication */
#define CN_W1_VAL 0x1
+#define CN_IDX_SX1SND 0x4
+#define CN_VAL_SX1SND 0x1
#define CN_IDX_V86D 0x4
#define CN_VAL_V86D_UVESAFB 0x1
+ #define CN_IDX_BB 0x5 /* BlackBoard, from the TSP GPL sampling framework */
- #define CN_NETLINK_USERS 5
+ #define CN_NETLINK_USERS 6
/*
* Maximum connector's message size.
help
NETMAP is an implementation of static 1:1 NAT mapping of network
addresses. It maps the network address part, while keeping the host
- address part intact. It is similar to Fast NAT, except that
- Netfilter's connection tracking doesn't work well with Fast NAT.
+ address part intact.
+ To compile it as a module, choose M here. If unsure, say N.
+
+config IP_NF_TARGET_IDLETIMER
+ tristate "IDLETIMER target support"
+ depends on IP_NF_IPTABLES
+ help
+ This option adds a `IDLETIMER' target. Each matching packet resets
+ the timer associated with input and/or output interfaces. Timer
+ expiry causes kobject uevent. Idle timer can be read via sysfs.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
+
To compile it as a module, choose M here. If unsure, say N.
config NF_NAT_SNMP_BASIC
subdir-$(CONFIG_SECURITY_SELINUX) += selinux
subdir-$(CONFIG_SECURITY_SMACK) += smack
- # if we don't select a security model, use the default capabilities
- ifneq ($(CONFIG_SECURITY),y)
+ # always enable default capabilities
obj-y += commoncap.o
- endif
# Object file lists
- obj-$(CONFIG_SECURITY) += security.o dummy.o inode.o
+ obj-$(CONFIG_SECURITY) += security.o capability.o inode.o
# Must precede capability.o in order to stack properly.
obj-$(CONFIG_SECURITY_SELINUX) += selinux/built-in.o
- obj-$(CONFIG_SECURITY_SMACK) += commoncap.o smack/built-in.o
- obj-$(CONFIG_SECURITY_CAPABILITIES) += commoncap.o capability.o
- obj-$(CONFIG_SECURITY_ROOTPLUG) += commoncap.o root_plug.o
+ obj-$(CONFIG_SECURITY_SMACK) += smack/built-in.o
+ obj-$(CONFIG_SECURITY_ROOTPLUG) += root_plug.o
+obj-$(CONFIG_SECURITY_LOWMEM) += commoncap.o lowmem.o
obj-$(CONFIG_CGROUP_DEVICE) += device_cgroup.o
Say Y or M if you want to support any AC97 codec attached to
the PXA2xx AC97 interface.
-endif # SND_ARM
+config SND_OMAP_AIC23
+ tristate "OMAP AIC23 alsa driver (osk5912)"
+ depends on ARCH_OMAP && SND
+ select SND_PCM
+ select I2C
+ select I2C_OMAP if ARCH_OMAP
+ select SENSORS_TLV320AIC23
+ help
+ Say Y here if you have a OSK platform board
+ and want to use its AIC23 audio chip.
+
+ To compile this driver as a module, choose M here: the module
+ will be called snd-omap-aic23.
+
+config SND_OMAP_TSC2101
+ tristate "OMAP TSC2101 alsa driver"
+ depends on ARCH_OMAP && SND
+ select SND_PCM
+ select SPI_TSC2101
+ help
+ Say Y here if you have a OMAP platform board
+ and want to use its TSC2101 audio chip. Driver has
+ been tested with H2 and iPAQ h6300.
+
+ To compile this driver as a module, choose M here: the module
+ will be called snd-omap-tsc2101.
+
+config SND_SX1
+ tristate "Siemens SX1 Egold alsa driver"
+ depends on ARCH_OMAP && SND
+ select SND_PCM
+ help
+ Say Y here if you have a OMAP310 based Siemens SX1.
+
+ To compile this driver as a module, choose M here: the module
+ will be called snd-omap-sx1.
+
+config SND_OMAP_TSC2102
+ tristate "OMAP TSC2102 alsa driver"
+ depends on ARCH_OMAP && SND
+ select SND_PCM
+ select SPI_TSC2102
+ help
+ Say Y here if you have an OMAP platform board
+ and want to use its TSC2102 audio chip.
- endmenu
+ To compile this driver as a module, choose M here: the module
+ will be called snd-omap-tsc2102.
+
+config SND_OMAP24XX_EAC
+ tristate "Audio driver for OMAP24xx EAC"
+ depends on SND
+ help
+ Audio driver for Enhanced Audio Controller found in TI's OMAP24xx
+ processors.
+
+ Currently contains only low-level support functions for
+ initializing EAC HW, creating ALSA sound card instance for it
+ and registering mixer controls implemented by a codec driver.
+ PCM stream is expected to be under DSP co-processor control.
+
+ To compile this driver as a module, choose M here: the module
+ will be called snd-omap24xx-eac.
+
++endif # SND_ARM