From 0d66d3a7eab10f316e8c56b34b34e8f628cd9baa Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Thu, 1 Feb 2007 16:12:57 -0800 Subject: [PATCH] FB: Add framebuffer support from N800 tree Add framebuffer support from N800 tree Signed-off-by: Tony Lindgren --- drivers/video/omap/Kconfig | 7 + drivers/video/omap/Makefile | 1 + drivers/video/omap/blizzard.c | 1347 ++++++++++++++++++++++++++++++ drivers/video/omap/dispc.c | 6 +- drivers/video/omap/hwa742.c | 307 +++++-- drivers/video/omap/omapfb_main.c | 10 + drivers/video/omap/rfbi.c | 136 ++- drivers/video/omap/sossi.c | 236 +++++- 8 files changed, 1951 insertions(+), 99 deletions(-) create mode 100644 drivers/video/omap/blizzard.c diff --git a/drivers/video/omap/Kconfig b/drivers/video/omap/Kconfig index 81256ae4a09..d84187faab3 100644 --- a/drivers/video/omap/Kconfig +++ b/drivers/video/omap/Kconfig @@ -18,6 +18,13 @@ config FB_OMAP_LCDC_HWA742 Say Y here if you want to have support for the external Epson HWA742 LCD controller. +config FB_OMAP_LCDC_BLIZZARD + bool "Epson Blizzard LCD controller support" + depends on FB_OMAP && FB_OMAP_LCDC_EXTERNAL + help + Say Y here if you want to have support for the external + Epson Blizzard LCD controller. + config FB_OMAP_MANUAL_UPDATE bool "Default to manual update mode" depends on FB_OMAP && FB_OMAP_LCDC_EXTERNAL diff --git a/drivers/video/omap/Makefile b/drivers/video/omap/Makefile index f4a10333f02..be258017799 100644 --- a/drivers/video/omap/Makefile +++ b/drivers/video/omap/Makefile @@ -13,6 +13,7 @@ objs-$(CONFIG_ARCH_OMAP1)$(CONFIG_FB_OMAP_LCDC_EXTERNAL) += sossi.o objs-$(CONFIG_ARCH_OMAP2)$(CONFIG_FB_OMAP_LCDC_EXTERNAL) += rfbi.o objs-y$(CONFIG_FB_OMAP_LCDC_HWA742) += hwa742.o +objs-y$(CONFIG_FB_OMAP_LCDC_BLIZZARD) += blizzard.o objs-y$(CONFIG_MACH_AMS_DELTA) += lcd_ams_delta.o objs-y$(CONFIG_MACH_OMAP_H4) += lcd_h4.o diff --git a/drivers/video/omap/blizzard.c b/drivers/video/omap/blizzard.c new file mode 100644 index 00000000000..ee88063b7e4 --- /dev/null +++ b/drivers/video/omap/blizzard.c @@ -0,0 +1,1347 @@ +/* + * File: drivers/video/omap/blizzard.c + * + * Epson Blizzard LCD controller driver + * + * Copyright (C) 2004-2005 Nokia Corporation + * Authors: Juha Yrjola + * Imre Deak + * YUV support: Jussi Laako + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "dispc.h" + +#define MODULE_NAME "blizzard" + +#define BLIZZARD_REV_CODE 0x00 +#define BLIZZARD_CONFIG 0x02 +#define BLIZZARD_PLL_DIV 0x04 +#define BLIZZARD_PLL_LOCK_RANGE 0x06 +#define BLIZZARD_PLL_CLOCK_SYNTH_0 0x08 +#define BLIZZARD_PLL_CLOCK_SYNTH_1 0x0a +#define BLIZZARD_PLL_MODE 0x0c +#define BLIZZARD_CLK_SRC 0x0e +#define BLIZZARD_MEM_BANK0_ACTIVATE 0x10 +#define BLIZZARD_MEM_BANK0_STATUS 0x14 +#define BLIZZARD_HDISP 0x2a +#define BLIZZARD_HNDP 0x2c +#define BLIZZARD_VDISP0 0x2e +#define BLIZZARD_VDISP1 0x30 +#define BLIZZARD_VNDP 0x32 +#define BLIZZARD_HSW 0x34 +#define BLIZZARD_VSW 0x38 +#define BLIZZARD_DISPLAY_MODE 0x68 +#define BLIZZARD_INPUT_WIN_X_START_0 0x6c +#define BLIZZARD_DATA_SOURCE_SELECT 0x8e +#define BLIZZARD_DISP_MEM_DATA_PORT 0x90 +#define BLIZZARD_DISP_MEM_READ_ADDR0 0x92 +#define BLIZZARD_POWER_SAVE 0xE6 +#define BLIZZARD_NDISP_CTRL_STATUS 0xE8 + +/* Data source select */ +/* For S1D13745 */ +#define BLIZZARD_SRC_WRITE_LCD_BACKGROUND 0x00 +#define BLIZZARD_SRC_WRITE_LCD_DESTRUCTIVE 0x01 +/* For S1D13744 */ +#define BLIZZARD_SRC_WRITE_LCD 0x00 +#define BLIZZARD_SRC_BLT_LCD 0x06 + +#define BLIZZARD_VERSION_S1D13745 0x01 /* Hailstorm */ +#define BLIZZARD_VERSION_S1D13744 0x02 /* Blizzard */ + +#define BLIZZARD_AUTO_UPDATE_TIME (HZ / 20) + +/* Reserve 4 request slots for requests in irq context */ +#define REQ_POOL_SIZE 24 +#define IRQ_REQ_POOL_SIZE 4 + +#define REQ_FROM_IRQ_POOL 0x01 + +#define REQ_COMPLETE 0 +#define REQ_PENDING 1 + +struct update_param { + int plane; + int x, y, width, height; + int color_mode; + int flags; +}; + +struct blizzard_request { + struct list_head entry; + unsigned int flags; + + int (*handler)(struct blizzard_request *req); + void (*complete)(void *data); + void *complete_data; + + union { + struct update_param update; + struct completion *sync; + } par; +}; + +struct plane_info { + unsigned long offset; + int pos_x, pos_y; + int width, height; + int out_width, out_height; + int scr_width; + int color_mode; + int bpp; +}; + +struct blizzard_struct { + enum omapfb_update_mode update_mode; + enum omapfb_update_mode update_mode_before_suspend; + + struct timer_list auto_update_timer; + int stop_auto_update; + struct omapfb_update_window auto_update_window; + int enabled_planes; + int vid_nonstd_color; + int vid_scaled; + int screen_width; + int screen_height; + unsigned te_connected:1; + unsigned vsync_only:1; + + struct plane_info plane[OMAPFB_PLANE_NUM]; + + struct blizzard_request req_pool[REQ_POOL_SIZE]; + struct list_head pending_req_list; + struct list_head free_req_list; + struct semaphore req_sema; + spinlock_t req_lock; + + unsigned long sys_ck_rate; + struct extif_timings reg_timings, lut_timings; + + u32 max_transmit_size; + u32 extif_clk_period; + int extif_clk_div; + unsigned long pix_tx_time; + unsigned long line_upd_time; + + struct omapfb_device *fbdev; + struct lcd_ctrl_extif *extif; + struct lcd_ctrl *int_ctrl; + + void (*power_up)(struct device *dev); + void (*power_down)(struct device *dev); + + int version; +} blizzard; + +struct lcd_ctrl blizzard_ctrl; + +static u8 blizzard_read_reg(u8 reg) +{ + u8 data; + + blizzard.extif->set_bits_per_cycle(8); + blizzard.extif->write_command(®, 1); + blizzard.extif->read_data(&data, 1); + + return data; +} + +static void blizzard_write_reg(u8 reg, u8 val) +{ + blizzard.extif->set_bits_per_cycle(8); + blizzard.extif->write_command(®, 1); + blizzard.extif->write_data(&val, 1); +} + +static void blizzard_restart_sdram(void) +{ + unsigned long tmo; + + blizzard_write_reg(BLIZZARD_MEM_BANK0_ACTIVATE, 0); + udelay(50); + blizzard_write_reg(BLIZZARD_MEM_BANK0_ACTIVATE, 1); + tmo = jiffies + msecs_to_jiffies(200); + while (!(blizzard_read_reg(BLIZZARD_MEM_BANK0_STATUS) & 0x01)) { + if (time_after(jiffies, tmo)) { + dev_err(blizzard.fbdev->dev, + "s1d1374x: SDRAM not ready"); + break; + } + msleep(1); + } +} + +static void blizzard_stop_sdram(void) +{ + blizzard_write_reg(BLIZZARD_MEM_BANK0_ACTIVATE, 0); +} + +/* Wait until the last window was completely written into the controllers + * SDRAM and we can start transferring the next window. + */ +static void blizzard_wait_line_buffer(void) +{ + unsigned long tmo = jiffies + msecs_to_jiffies(30); + + while (blizzard_read_reg(BLIZZARD_NDISP_CTRL_STATUS) & (1 << 7)) { + if (time_after(jiffies, tmo)) { + if (printk_ratelimit()) + dev_err(blizzard.fbdev->dev, + "s1d1374x: line buffer not ready\n"); + break; + } + } +} + +static void set_window_regs(int x_start, int y_start, int x_end, int y_end) +{ + u8 tmp[8]; + u8 cmd; + + x_end--; + y_end--; + tmp[0] = x_start; + tmp[1] = x_start >> 8; + tmp[2] = y_start; + tmp[3] = y_start >> 8; + tmp[4] = x_end; + tmp[5] = x_end >> 8; + tmp[6] = y_end; + tmp[7] = y_end >> 8; + + blizzard.extif->set_bits_per_cycle(8); + cmd = BLIZZARD_INPUT_WIN_X_START_0; + blizzard.extif->write_command(&cmd, 1); + blizzard.extif->write_data(tmp, 8); + blizzard.extif->write_data(tmp, 8); + + tmp[0] = 0x01; + tmp[1] = blizzard.version == BLIZZARD_VERSION_S1D13744 ? + BLIZZARD_SRC_WRITE_LCD : + BLIZZARD_SRC_WRITE_LCD_DESTRUCTIVE; + blizzard.extif->write_data(tmp, 2); +} + +static void enable_tearsync(int y, int width, int height, int screen_height, + int force_vsync) +{ + u8 b; + + b = blizzard_read_reg(BLIZZARD_NDISP_CTRL_STATUS); + b |= 1 << 3; + blizzard_write_reg(BLIZZARD_NDISP_CTRL_STATUS, b); + + if (likely(blizzard.vsync_only || force_vsync)) { + blizzard.extif->enable_tearsync(1, 0); + return; + } + + if (width * blizzard.pix_tx_time < blizzard.line_upd_time) { + blizzard.extif->enable_tearsync(1, 0); + return; + } + + if ((width * blizzard.pix_tx_time / 1000) * height < + (y + height) * (blizzard.line_upd_time / 1000)) { + blizzard.extif->enable_tearsync(1, 0); + return; + } + + blizzard.extif->enable_tearsync(1, y + 1); +} + +static void disable_tearsync(void) +{ + u8 b; + + blizzard.extif->enable_tearsync(0, 0); + b = blizzard_read_reg(BLIZZARD_NDISP_CTRL_STATUS); + b &= ~(1 << 3); + blizzard_write_reg(BLIZZARD_NDISP_CTRL_STATUS, b); + b = blizzard_read_reg(BLIZZARD_NDISP_CTRL_STATUS); +} + +static inline void set_extif_timings(const struct extif_timings *t); + +static inline struct blizzard_request *alloc_req(void) +{ + unsigned long flags; + struct blizzard_request *req; + int req_flags = 0; + + if (!in_interrupt()) + down(&blizzard.req_sema); + else + req_flags = REQ_FROM_IRQ_POOL; + + spin_lock_irqsave(&blizzard.req_lock, flags); + BUG_ON(list_empty(&blizzard.free_req_list)); + req = list_entry(blizzard.free_req_list.next, + struct blizzard_request, entry); + list_del(&req->entry); + spin_unlock_irqrestore(&blizzard.req_lock, flags); + + INIT_LIST_HEAD(&req->entry); + req->flags = req_flags; + + return req; +} + +static inline void free_req(struct blizzard_request *req) +{ + unsigned long flags; + + spin_lock_irqsave(&blizzard.req_lock, flags); + + list_del(&req->entry); + list_add(&req->entry, &blizzard.free_req_list); + if (!(req->flags & REQ_FROM_IRQ_POOL)) + up(&blizzard.req_sema); + + spin_unlock_irqrestore(&blizzard.req_lock, flags); +} + +static void process_pending_requests(void) +{ + unsigned long flags; + + spin_lock_irqsave(&blizzard.req_lock, flags); + + while (!list_empty(&blizzard.pending_req_list)) { + struct blizzard_request *req; + void (*complete)(void *); + void *complete_data; + + req = list_entry(blizzard.pending_req_list.next, + struct blizzard_request, entry); + spin_unlock_irqrestore(&blizzard.req_lock, flags); + + if (req->handler(req) == REQ_PENDING) + return; + + complete = req->complete; + complete_data = req->complete_data; + free_req(req); + + if (complete) + complete(complete_data); + + spin_lock_irqsave(&blizzard.req_lock, flags); + } + + spin_unlock_irqrestore(&blizzard.req_lock, flags); +} + +static void submit_req_list(struct list_head *head) +{ + unsigned long flags; + int process = 1; + + spin_lock_irqsave(&blizzard.req_lock, flags); + if (likely(!list_empty(&blizzard.pending_req_list))) + process = 0; + list_splice_init(head, blizzard.pending_req_list.prev); + spin_unlock_irqrestore(&blizzard.req_lock, flags); + + if (process) + process_pending_requests(); +} + +static void request_complete(void *data) +{ + struct blizzard_request *req = (struct blizzard_request *)data; + void (*complete)(void *); + void *complete_data; + + complete = req->complete; + complete_data = req->complete_data; + + free_req(req); + + if (complete) + complete(complete_data); + + process_pending_requests(); +} + + +static int do_full_screen_update(struct blizzard_request *req) +{ + int i; + int flags; + + for (i = 0; i < 3; i++) { + struct plane_info *p = &blizzard.plane[i]; + if (!(blizzard.enabled_planes & (1 << i))) { + blizzard.int_ctrl->enable_plane(i, 0); + continue; + } + dev_dbg(blizzard.fbdev->dev, "pw %d ph %d\n", + p->width, p->height); + blizzard.int_ctrl->setup_plane(i, + OMAPFB_CHANNEL_OUT_LCD, p->offset, + p->scr_width, p->pos_x, p->pos_y, + p->width, p->height, + p->color_mode); + blizzard.int_ctrl->enable_plane(i, 1); + } + + dev_dbg(blizzard.fbdev->dev, "sw %d sh %d\n", + blizzard.screen_width, blizzard.screen_height); + blizzard_wait_line_buffer(); + flags = req->par.update.flags; + if (flags & OMAPFB_FORMAT_FLAG_TEARSYNC) + enable_tearsync(0, blizzard.screen_width, + blizzard.screen_height, + blizzard.screen_height, + flags & OMAPFB_FORMAT_FLAG_FORCE_VSYNC); + else + disable_tearsync(); + + set_window_regs(0, 0, blizzard.screen_width, blizzard.screen_height); + + blizzard.extif->set_bits_per_cycle(16); + /* set_window_regs has left the register index at the right + * place, so no need to set it here. + */ + blizzard.extif->transfer_area(blizzard.screen_width, + blizzard.screen_height, + request_complete, req); + return REQ_PENDING; +} + +/* Setup all planes with an overlapping area with the update window. */ +static int do_partial_update(struct blizzard_request *req, int plane, + int x, int y, int w, int h) +{ + int i; + int gx1, gy1, gx2, gy2; + int flags; + + /* Global coordinates, relative to pixel 0,0 of the LCD */ + gx1 = x + blizzard.plane[plane].pos_x; + gy1 = y + blizzard.plane[plane].pos_y; + gx2 = gx1 + w; + gy2 = gy1 + h; + + for (i = 0; i < OMAPFB_PLANE_NUM; i++) { + struct plane_info *p = &blizzard.plane[i]; + int px1, py1; + int px2, py2; + int pw, ph; + int pposx, pposy; + unsigned long offset; + + if (!(blizzard.enabled_planes & (1 << i))) { + blizzard.int_ctrl->enable_plane(i, 0); + continue; + } + /* Plane coordinates */ + if (i == plane) { + /* Plane in which we are doing the update. + * Local coordinates are the one in the update + * request. + */ + px1 = x; + py1 = y; + px2 = x + w; + py2 = y + h; + pposx = 0; + pposy = 0; + } else { + /* Check if this plane has an overlapping part */ + px1 = gx1 - p->pos_x; + py1 = gy1 - p->pos_y; + px2 = gx2 - p->pos_x; + py2 = gy2 - p->pos_y; + if (px1 >= p->width || py1 >= p->height || + px2 <= 0 || py2 <= 0) { + blizzard.int_ctrl->enable_plane(i, 0); + continue; + } + /* Calculate the coordinates for the overlapping + * part in the plane's local coordinates. + */ + pposx = -px1; + pposy = -py1; + if (px1 < 0) + px1 = 0; + if (py1 < 0) + py1 = 0; + if (px2 > p->width) + px2 = p->width; + if (py2 > p->height) + py2 = p->height; + if (pposx < 0) + pposx = 0; + if (pposy < 0) + pposy = 0; + } + pw = px2 - px1; + ph = py2 - py1; + offset = p->offset + (p->scr_width * py1 + px1) * p->bpp / 8; +#ifdef VERBOSE + dev_dbg(blizzard.fbdev->dev, + "plane %d offset %#08lx pposx %d pposy %d " + "px1 %d py1 %d pw %d ph %d\n", + i, offset, pposx, pposy, px1, py1, pw, ph); +#endif + blizzard.int_ctrl->setup_plane(i, + OMAPFB_CHANNEL_OUT_LCD, offset, + p->scr_width, + pposx, pposy, pw, ph, + p->color_mode); + + blizzard.int_ctrl->enable_plane(i, 1); + } + + blizzard_wait_line_buffer(); + flags = req->par.update.flags; + if (flags & OMAPFB_FORMAT_FLAG_TEARSYNC) + enable_tearsync(gy1, gx2 - gx1, gy2 - gy1, + blizzard.screen_height, + flags & OMAPFB_FORMAT_FLAG_FORCE_VSYNC); + else + disable_tearsync(); + + set_window_regs(gx1, gy1, gx2, gy2); + + blizzard.extif->set_bits_per_cycle(16); + /* set_window_regs has left the register index at the right + * place, so no need to set it here. + */ + blizzard.extif->transfer_area(w, h, request_complete, req); + + return REQ_PENDING; +} + +static int send_frame_handler(struct blizzard_request *req) +{ + struct update_param *par = &req->par.update; + int plane = par->plane; + +#ifdef VERBOSE + dev_dbg(blizzard.fbdev->dev, + "send_frame: x %d y %d w %d h %d color_mode %04x flags %04x " + "planes %01x\n", + par->x, par->y, par->width, par->height, + par->color_mode, par->flags, blizzard.enabled_planes); +#endif + + if ((blizzard.enabled_planes & blizzard.vid_nonstd_color) || + (blizzard.enabled_planes & blizzard.vid_scaled)) + return do_full_screen_update(req); + + return do_partial_update(req, plane, par->x, par->y, + par->width, par->height); +} + +static void send_frame_complete(void *data) +{ +} + +#define ADD_PREQ(_x, _y, _w, _h) do { \ + req = alloc_req(); \ + req->handler = send_frame_handler; \ + req->complete = send_frame_complete; \ + req->par.update.plane = plane; \ + req->par.update.x = _x; \ + req->par.update.y = _y; \ + req->par.update.width = _w; \ + req->par.update.height = _h; \ + req->par.update.color_mode = color_mode;\ + req->par.update.flags = flags; \ + list_add_tail(&req->entry, req_head); \ +} while(0) + +static void create_req_list(int plane, + struct omapfb_update_window *win, + struct list_head *req_head) +{ + struct blizzard_request *req; + int x = (win->x & ~0x07); + int y = (win->y & ~0x07); + int width = ((win->x + win->width + 7) & ~0x07) - x; + int height = ((win->y + win->height + 7) & ~0x07) - y; + int color_mode; + int flags; + + flags = win->format & ~OMAPFB_FORMAT_MASK; + color_mode = win->format & OMAPFB_FORMAT_MASK; + + if (x & 1) { + ADD_PREQ(x, y, 1, height); + width--; + x++; + flags &= ~OMAPFB_FORMAT_FLAG_TEARSYNC; + } + if (width & ~1) { + unsigned int xspan = width & ~1; + unsigned int ystart = y; + unsigned int yspan = height; + + if (xspan * height * 2 > blizzard.max_transmit_size) { + yspan = blizzard.max_transmit_size / (xspan * 2); + ADD_PREQ(x, ystart, xspan, yspan); + ystart += yspan; + yspan = height - yspan; + flags &= ~OMAPFB_FORMAT_FLAG_TEARSYNC; + } + + ADD_PREQ(x, ystart, xspan, yspan); + x += xspan; + width -= xspan; + flags &= ~OMAPFB_FORMAT_FLAG_TEARSYNC; + } + if (width) + ADD_PREQ(x, y, 1, height); +} + +static void auto_update_complete(void *data) +{ + if (!blizzard.stop_auto_update) + mod_timer(&blizzard.auto_update_timer, + jiffies + BLIZZARD_AUTO_UPDATE_TIME); +} + +static void blizzard_update_window_auto(unsigned long arg) +{ + LIST_HEAD(req_list); + struct blizzard_request *last; + struct omapfb_plane_struct *plane; + + plane = blizzard.fbdev->fb_info[0]->par; + create_req_list(plane->idx, + &blizzard.auto_update_window, &req_list); + last = list_entry(req_list.prev, struct blizzard_request, entry); + + last->complete = auto_update_complete; + last->complete_data = NULL; + + submit_req_list(&req_list); +} + +int blizzard_update_window_async(struct fb_info *fbi, + struct omapfb_update_window *win, + void (*complete_callback)(void *arg), + void *complete_callback_data) +{ + LIST_HEAD(req_list); + struct blizzard_request *last; + struct omapfb_plane_struct *plane = fbi->par; + + if (unlikely(blizzard.update_mode != OMAPFB_MANUAL_UPDATE)) + return -EINVAL; + if (unlikely(!blizzard.te_connected && + (win->format & OMAPFB_FORMAT_FLAG_TEARSYNC))) + return -EINVAL; + + create_req_list(plane->idx, win, &req_list); + last = list_entry(req_list.prev, struct blizzard_request, entry); + + last->complete = complete_callback; + last->complete_data = (void *)complete_callback_data; + + submit_req_list(&req_list); + + return 0; +} +EXPORT_SYMBOL(blizzard_update_window_async); + +static int blizzard_setup_plane(int plane, int channel_out, + unsigned long offset, int screen_width, + int pos_x, int pos_y, int width, int height, + int color_mode) +{ + struct plane_info *p; + +#ifdef VERBOSE + dev_dbg(blizzard.fbdev->dev, + "plane %d ch_out %d offset %#08lx scr_width %d " + "pos_x %d pos_y %d width %d height %d color_mode %d\n", + plane, channel_out, offset, screen_width, + pos_x, pos_y, width, height, color_mode); +#endif + if ((unsigned)plane > OMAPFB_PLANE_NUM) + return -EINVAL; + p = &blizzard.plane[plane]; + + switch (color_mode) { + case OMAPFB_COLOR_YUV422: + case OMAPFB_COLOR_YUY422: + p->bpp = 16; + blizzard.vid_nonstd_color |= 1 << plane; + break; + case OMAPFB_COLOR_YUV420: + p->bpp = 12; + blizzard.vid_nonstd_color |= 1 << plane; + break; + case OMAPFB_COLOR_RGB565: + p->bpp = 16; + blizzard.vid_nonstd_color &= ~(1 << plane); + break; + default: + return -EINVAL; + } + + p->offset = offset; + p->pos_x = pos_x; + p->pos_y = pos_y; + p->width = width; + p->height = height; + p->scr_width = screen_width; + if (!p->out_width) + p->out_width = width; + if (!p->out_height) + p->out_height = height; + + p->color_mode = color_mode; + + return 0; +} + +static int blizzard_set_scale(int plane, int orig_w, int orig_h, + int out_w, int out_h) +{ + struct plane_info *p = &blizzard.plane[plane]; + int r; + + dev_dbg(blizzard.fbdev->dev, + "plane %d orig_w %d orig_h %d out_w %d out_h %d\n", + plane, orig_w, orig_h, out_w, out_h); + if ((unsigned)plane > OMAPFB_PLANE_NUM) + return -ENODEV; + + r = blizzard.int_ctrl->set_scale(plane, orig_w, orig_h, out_w, out_h); + if (r < 0) + return r; + + p->width = orig_w; + p->height = orig_h; + p->out_width = out_w; + p->out_height = out_h; + if (orig_w == out_w && orig_h == out_h) + blizzard.vid_scaled &= ~(1 << plane); + else + blizzard.vid_scaled |= 1 << plane; + + return 0; +} + +static int blizzard_enable_plane(int plane, int enable) +{ + if (enable) + blizzard.enabled_planes |= 1 << plane; + else + blizzard.enabled_planes &= ~(1 << plane); + + return 0; +} + +static int sync_handler(struct blizzard_request *req) +{ + complete(req->par.sync); + return REQ_COMPLETE; +} + +static void blizzard_sync(void) +{ + LIST_HEAD(req_list); + struct blizzard_request *req; + struct completion comp; + + req = alloc_req(); + + req->handler = sync_handler; + req->complete = NULL; + init_completion(&comp); + req->par.sync = ∁ + + list_add(&req->entry, &req_list); + submit_req_list(&req_list); + + wait_for_completion(&comp); +} + +static void blizzard_bind_client(struct omapfb_notifier_block *nb) +{ + if (blizzard.update_mode == OMAPFB_MANUAL_UPDATE) { + omapfb_notify_clients(blizzard.fbdev, OMAPFB_EVENT_READY); + } +} + +static int blizzard_set_update_mode(enum omapfb_update_mode mode) +{ + if (unlikely(mode != OMAPFB_MANUAL_UPDATE && + mode != OMAPFB_AUTO_UPDATE && + mode != OMAPFB_UPDATE_DISABLED)) + return -EINVAL; + + if (mode == blizzard.update_mode) + return 0; + + dev_info(blizzard.fbdev->dev, "s1d1374x: setting update mode to %s\n", + mode == OMAPFB_UPDATE_DISABLED ? "disabled" : + (mode == OMAPFB_AUTO_UPDATE ? "auto" : "manual")); + + switch (blizzard.update_mode) { + case OMAPFB_MANUAL_UPDATE: + omapfb_notify_clients(blizzard.fbdev, OMAPFB_EVENT_DISABLED); + break; + case OMAPFB_AUTO_UPDATE: + blizzard.stop_auto_update = 1; + del_timer_sync(&blizzard.auto_update_timer); + break; + case OMAPFB_UPDATE_DISABLED: + break; + } + + blizzard.update_mode = mode; + blizzard_sync(); + blizzard.stop_auto_update = 0; + + switch (mode) { + case OMAPFB_MANUAL_UPDATE: + omapfb_notify_clients(blizzard.fbdev, OMAPFB_EVENT_READY); + break; + case OMAPFB_AUTO_UPDATE: + blizzard_update_window_auto(0); + break; + case OMAPFB_UPDATE_DISABLED: + break; + } + + return 0; +} + +static enum omapfb_update_mode blizzard_get_update_mode(void) +{ + return blizzard.update_mode; +} + +static inline void set_extif_timings(const struct extif_timings *t) +{ + blizzard.extif->set_timings(t); +} + +static inline unsigned long round_to_extif_ticks(unsigned long ps, int div) +{ + int bus_tick = blizzard.extif_clk_period * div; + return (ps + bus_tick - 1) / bus_tick * bus_tick; +} + +static int calc_reg_timing(unsigned long sysclk, int div) +{ + struct extif_timings *t; + unsigned long systim; + + /* CSOnTime 0, WEOnTime 2 ns, REOnTime 2 ns, + * AccessTime 2 ns + 12.2 ns (regs), + * WEOffTime = WEOnTime + 1 ns, + * REOffTime = REOnTime + 12 ns (regs), + * CSOffTime = REOffTime + 1 ns + * ReadCycle = 2ns + 2*SYSCLK (regs), + * WriteCycle = 2*SYSCLK + 2 ns, + * CSPulseWidth = 10 ns */ + + systim = 1000000000 / (sysclk / 1000); + dev_dbg(blizzard.fbdev->dev, + "Blizzard systim %lu ps extif_clk_period %u div %d\n", + systim, blizzard.extif_clk_period, div); + + t = &blizzard.reg_timings; + memset(t, 0, sizeof(*t)); + + t->clk_div = div; + + t->cs_on_time = 0; + t->we_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div); + t->re_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div); + t->access_time = round_to_extif_ticks(t->re_on_time + 12200, div); + t->we_off_time = round_to_extif_ticks(t->we_on_time + 1000, div); + t->re_off_time = round_to_extif_ticks(t->re_on_time + 13000, div); + t->cs_off_time = round_to_extif_ticks(t->re_off_time + 1000, div); + t->we_cycle_time = round_to_extif_ticks(2 * systim + 2000, div); + if (t->we_cycle_time < t->we_off_time) + t->we_cycle_time = t->we_off_time; + t->re_cycle_time = round_to_extif_ticks(2 * systim + 2000, div); + if (t->re_cycle_time < t->re_off_time) + t->re_cycle_time = t->re_off_time; + t->cs_pulse_width = 0; + + dev_dbg(blizzard.fbdev->dev, "[reg]cson %d csoff %d reon %d reoff %d\n", + t->cs_on_time, t->cs_off_time, t->re_on_time, t->re_off_time); + dev_dbg(blizzard.fbdev->dev, "[reg]weon %d weoff %d recyc %d wecyc %d\n", + t->we_on_time, t->we_off_time, t->re_cycle_time, + t->we_cycle_time); + dev_dbg(blizzard.fbdev->dev, "[reg]rdaccess %d cspulse %d\n", + t->access_time, t->cs_pulse_width); + + return blizzard.extif->convert_timings(t); +} + +static int calc_lut_timing(unsigned long sysclk, int div) +{ + struct extif_timings *t; + unsigned long systim; + + /* CSOnTime 0, WEOnTime 2 ns, REOnTime 2 ns, + * AccessTime 2 ns + 4 * SYSCLK + 26 (lut), + * WEOffTime = WEOnTime + 1 ns, + * REOffTime = REOnTime + 4*SYSCLK + 26 ns (lut), + * CSOffTime = REOffTime + 1 ns + * ReadCycle = 2ns + 4*SYSCLK + 26 ns (lut), + * WriteCycle = 2*SYSCLK + 2 ns, + * CSPulseWidth = 10 ns */ + + systim = 1000000000 / (sysclk / 1000); + dev_dbg(blizzard.fbdev->dev, + "Blizzard systim %lu ps extif_clk_period %u div %d\n", + systim, blizzard.extif_clk_period, div); + + t = &blizzard.lut_timings; + memset(t, 0, sizeof(*t)); + + t->clk_div = div; + + t->cs_on_time = 0; + t->we_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div); + t->re_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div); + t->access_time = round_to_extif_ticks(t->re_on_time + 4 * systim + + 26000, div); + t->we_off_time = round_to_extif_ticks(t->we_on_time + 1000, div); + t->re_off_time = round_to_extif_ticks(t->re_on_time + 4 * systim + + 26000, div); + t->cs_off_time = round_to_extif_ticks(t->re_off_time + 1000, div); + t->we_cycle_time = round_to_extif_ticks(2 * systim + 2000, div); + if (t->we_cycle_time < t->we_off_time) + t->we_cycle_time = t->we_off_time; + t->re_cycle_time = round_to_extif_ticks(2000 + 4 * systim + 26000, div); + if (t->re_cycle_time < t->re_off_time) + t->re_cycle_time = t->re_off_time; + t->cs_pulse_width = 0; + + dev_dbg(blizzard.fbdev->dev, + "[lut]cson %d csoff %d reon %d reoff %d\n", + t->cs_on_time, t->cs_off_time, t->re_on_time, t->re_off_time); + dev_dbg(blizzard.fbdev->dev, + "[lut]weon %d weoff %d recyc %d wecyc %d\n", + t->we_on_time, t->we_off_time, t->re_cycle_time, + t->we_cycle_time); + dev_dbg(blizzard.fbdev->dev, "[lut]rdaccess %d cspulse %d\n", + t->access_time, t->cs_pulse_width); + + return blizzard.extif->convert_timings(t); +} + +static int calc_extif_timings(unsigned long sysclk, int *extif_mem_div) +{ + int max_clk_div; + int div; + + blizzard.extif->get_clk_info(&blizzard.extif_clk_period, &max_clk_div); + for (div = 1; div <= max_clk_div; div++) { + if (calc_reg_timing(sysclk, div) == 0) + break; + } + if (div > max_clk_div) { + dev_dbg(blizzard.fbdev->dev, "reg timing failed\n"); + goto err; + } + *extif_mem_div = div; + + for (div = 1; div <= max_clk_div; div++) { + if (calc_lut_timing(sysclk, div) == 0) + break; + } + + if (div > max_clk_div) + goto err; + + blizzard.extif_clk_div = div; + + return 0; +err: + dev_err(blizzard.fbdev->dev, "can't setup timings\n"); + return -1; +} + +static void calc_blizzard_clk_rates(unsigned long ext_clk, + unsigned long *sys_clk, unsigned long *pix_clk) +{ + int pix_clk_src; + int sys_div = 0, sys_mul = 0; + int pix_div; + + pix_clk_src = blizzard_read_reg(BLIZZARD_CLK_SRC); + pix_div = ((pix_clk_src >> 3) & 0x1f) + 1; + if ((pix_clk_src & (0x3 << 1)) == 0) { + /* Source is the PLL */ + sys_div = (blizzard_read_reg(BLIZZARD_PLL_DIV) & 0x3f) + 1; + sys_mul = blizzard_read_reg(BLIZZARD_PLL_CLOCK_SYNTH_0); + sys_mul |= ((blizzard_read_reg(BLIZZARD_PLL_CLOCK_SYNTH_1) + & 0x0f) << 11); + *sys_clk = ext_clk * sys_mul / sys_div; + } else /* else source is ext clk, or oscillator */ + *sys_clk = ext_clk; + + *pix_clk = *sys_clk / pix_div; /* HZ */ + dev_dbg(blizzard.fbdev->dev, + "ext_clk %ld pix_src %d pix_div %d sys_div %d sys_mul %d\n", + ext_clk, pix_clk_src & (0x3 << 1), pix_div, sys_div, sys_mul); + dev_dbg(blizzard.fbdev->dev, "sys_clk %ld pix_clk %ld\n", + *sys_clk, *pix_clk); +} + +static int setup_tearsync(unsigned long pix_clk, int extif_div) +{ + int hdisp, vdisp; + int hndp, vndp; + int hsw, vsw; + int hs, vs; + int hs_pol_inv, vs_pol_inv; + int use_hsvs, use_ndp; + u8 b; + + hsw = blizzard_read_reg(BLIZZARD_HSW); + vsw = blizzard_read_reg(BLIZZARD_VSW); + hs_pol_inv = !(hsw & 0x80); + vs_pol_inv = !(vsw & 0x80); + hsw = hsw & 0x7f; + vsw = vsw & 0x3f; + + hdisp = blizzard_read_reg(BLIZZARD_HDISP) * 8; + vdisp = blizzard_read_reg(BLIZZARD_VDISP0) + + ((blizzard_read_reg(BLIZZARD_VDISP1) & 0x3) << 8); + + hndp = blizzard_read_reg(BLIZZARD_HNDP) & 0x3f; + vndp = blizzard_read_reg(BLIZZARD_VNDP); + + /* time to transfer one pixel (16bpp) in ps */ + blizzard.pix_tx_time = blizzard.reg_timings.we_cycle_time; + if (blizzard.extif->get_max_tx_rate != NULL) { + /* The external interface might have a rate limitation, + * if so, we have to maximize our transfer rate. + */ + unsigned long min_tx_time; + unsigned long max_tx_rate = blizzard.extif->get_max_tx_rate(); + + dev_dbg(blizzard.fbdev->dev, "max_tx_rate %ld HZ\n", + max_tx_rate); + min_tx_time = 1000000000 / (max_tx_rate / 1000); /* ps */ + if (blizzard.pix_tx_time < min_tx_time) + blizzard.pix_tx_time = min_tx_time; + } + + /* time to update one line in ps */ + blizzard.line_upd_time = (hdisp + hndp) * 1000000 / (pix_clk / 1000); + blizzard.line_upd_time *= 1000; + if (hdisp * blizzard.pix_tx_time > blizzard.line_upd_time) + /* transfer speed too low, we might have to use both + * HS and VS */ + use_hsvs = 1; + else + /* decent transfer speed, we'll always use only VS */ + use_hsvs = 0; + + if (use_hsvs && (hs_pol_inv || vs_pol_inv)) { + /* HS or'ed with VS doesn't work, use the active high + * TE signal based on HNDP / VNDP */ + use_ndp = 1; + hs_pol_inv = 0; + vs_pol_inv = 0; + hs = hndp; + vs = vndp; + } else { + /* Use HS or'ed with VS as a TE signal if both are needed + * or VNDP if only vsync is needed. */ + use_ndp = 0; + hs = hsw; + vs = vsw; + if (!use_hsvs) { + hs_pol_inv = 0; + vs_pol_inv = 0; + } + } + + hs = hs * 1000000 / (pix_clk / 1000); /* ps */ + hs *= 1000; + + vs = vs * (hdisp + hndp) * 1000000 / (pix_clk / 1000); /* ps */ + vs *= 1000; + + if (vs <= hs) + return -EDOM; + /* set VS to 120% of HS to minimize VS detection time */ + vs = hs * 12 / 10; + /* minimize HS too */ + if (hs > 10000) + hs = 10000; + + b = blizzard_read_reg(BLIZZARD_NDISP_CTRL_STATUS); + b &= ~0x3; + b |= use_hsvs ? 1 : 0; + b |= (use_ndp && use_hsvs) ? 0 : 2; + blizzard_write_reg(BLIZZARD_NDISP_CTRL_STATUS, b); + + blizzard.vsync_only = !use_hsvs; + + dev_dbg(blizzard.fbdev->dev, + "pix_clk %ld HZ pix_tx_time %ld ps line_upd_time %ld ps\n", + pix_clk, blizzard.pix_tx_time, blizzard.line_upd_time); + dev_dbg(blizzard.fbdev->dev, + "hs %d ps vs %d ps mode %d vsync_only %d\n", + hs, vs, b & 0x3, !use_hsvs); + + return blizzard.extif->setup_tearsync(1, hs, vs, + hs_pol_inv, vs_pol_inv, + extif_div); +} + +static unsigned long blizzard_get_caps(void) +{ + unsigned long caps; + + caps = OMAPFB_CAPS_MANUAL_UPDATE; + if (blizzard.te_connected) + caps |= OMAPFB_CAPS_TEARSYNC; + return caps; +} + +static void blizzard_suspend(void) +{ + u32 l; + unsigned long tmo; + + blizzard.update_mode_before_suspend = blizzard.update_mode; + /* the following will disable clocks as well */ + blizzard_set_update_mode(OMAPFB_UPDATE_DISABLED); + + blizzard_stop_sdram(); + + l = blizzard_read_reg(BLIZZARD_POWER_SAVE); + /* Standby, Sleep. We assume we use an external clock. */ + l |= 0x03; + blizzard_write_reg(BLIZZARD_POWER_SAVE, l); + + tmo = jiffies + msecs_to_jiffies(100); + while (!(blizzard_read_reg(BLIZZARD_PLL_MODE) & (1 << 1))) { + if (time_after(jiffies, tmo)) { + dev_err(blizzard.fbdev->dev, + "s1d1374x: sleep timeout, stopping PLL manually\n"); + l = blizzard_read_reg(BLIZZARD_PLL_MODE); + l &= ~0x03; + /* Disable PLL, counter function */ + l |= 0x2; + blizzard_write_reg(BLIZZARD_PLL_MODE, l); + break; + } + msleep(1); + } + + if (blizzard.power_down != NULL) + blizzard.power_down(blizzard.fbdev->dev); +} + +static void blizzard_resume(void) +{ + u32 l; + + if (blizzard.power_up != NULL) + blizzard.power_up(blizzard.fbdev->dev); + + l = blizzard_read_reg(BLIZZARD_POWER_SAVE); + /* Standby, Sleep */ + l &= ~0x03; + blizzard_write_reg(BLIZZARD_POWER_SAVE, l); + + l = blizzard_read_reg(BLIZZARD_PLL_MODE); + l &= ~0x03; + /* Enable PLL, counter function */ + l |= 0x1; + blizzard_write_reg(BLIZZARD_PLL_MODE, l); + + while (!(blizzard_read_reg(BLIZZARD_PLL_DIV) & (1 << 7))) + msleep(1); + blizzard_restart_sdram(); + /* Enable display */ + blizzard_write_reg(BLIZZARD_DISPLAY_MODE, 0x01); + + /* the following will enable clocks as necessary */ + blizzard_set_update_mode(blizzard.update_mode_before_suspend); +} + +static int blizzard_init(struct omapfb_device *fbdev, int ext_mode, + struct omapfb_mem_desc *req_vram) +{ + int r = 0, i; + u8 rev, conf; + unsigned long ext_clk; + int extif_div; + unsigned long sys_clk, pix_clk; + struct omapfb_platform_data *omapfb_conf; + struct blizzard_platform_data *ctrl_conf; + + blizzard.fbdev = fbdev; + + BUG_ON(!fbdev->ext_if || !fbdev->int_ctrl); + + blizzard.fbdev = fbdev; + blizzard.extif = fbdev->ext_if; + blizzard.int_ctrl = fbdev->int_ctrl; + + omapfb_conf = fbdev->dev->platform_data; + ctrl_conf = omapfb_conf->ctrl_platform_data; + if (ctrl_conf == NULL || ctrl_conf->get_clock_rate == NULL) { + dev_err(fbdev->dev, "s1d1374x: missing platform data\n"); + r = -ENOENT; + goto err1; + } + + blizzard.power_down = ctrl_conf->power_down; + blizzard.power_up = ctrl_conf->power_up; + + spin_lock_init(&blizzard.req_lock); + + if ((r = blizzard.int_ctrl->init(fbdev, 1, req_vram)) < 0) + goto err1; + + if ((r = blizzard.extif->init(fbdev)) < 0) + goto err2; + + blizzard_ctrl.set_color_key = blizzard.int_ctrl->set_color_key; + blizzard_ctrl.get_color_key = blizzard.int_ctrl->get_color_key; + + ext_clk = ctrl_conf->get_clock_rate(fbdev->dev); + if ((r = calc_extif_timings(ext_clk, &extif_div)) < 0) + goto err3; + + set_extif_timings(&blizzard.reg_timings); + + if (blizzard.power_up != NULL) + blizzard.power_up(fbdev->dev); + + calc_blizzard_clk_rates(ext_clk, &sys_clk, &pix_clk); + + if ((r = calc_extif_timings(sys_clk, &extif_div)) < 0) + goto err3; + set_extif_timings(&blizzard.reg_timings); + + if (!(blizzard_read_reg(BLIZZARD_PLL_DIV) & 0x80)) { + dev_err(fbdev->dev, + "controller not initialized by the bootloader\n"); + r = -ENODEV; + goto err3; + } + + if (ctrl_conf->te_connected) { + if ((r = setup_tearsync(pix_clk, extif_div)) < 0) + goto err3; + blizzard.te_connected = 1; + } + + rev = blizzard_read_reg(BLIZZARD_REV_CODE); + conf = blizzard_read_reg(BLIZZARD_CONFIG); + + switch (rev & 0xfc) { + case 0x9c: + blizzard.version = BLIZZARD_VERSION_S1D13744; + pr_info("omapfb: s1d13744 LCD controller rev %d " + "initialized (CNF pins %x)\n", rev & 0x03, conf & 0x07); + break; + case 0xa4: + blizzard.version = BLIZZARD_VERSION_S1D13745; + pr_info("omapfb: s1d13745 LCD controller rev %d " + "initialized (CNF pins %x)\n", rev & 0x03, conf & 0x07); + break; + default: + dev_err(fbdev->dev, "invalid s1d1374x revision %02x\n", + rev); + r = -ENODEV; + goto err3; + } + + blizzard.max_transmit_size = blizzard.extif->max_transmit_size; + + blizzard.update_mode = OMAPFB_UPDATE_DISABLED; + + blizzard.auto_update_window.x = 0; + blizzard.auto_update_window.y = 0; + blizzard.auto_update_window.width = fbdev->panel->x_res; + blizzard.auto_update_window.height = fbdev->panel->y_res; + blizzard.auto_update_window.format = 0; + + blizzard.screen_width = fbdev->panel->x_res; + blizzard.screen_height = fbdev->panel->y_res; + + init_timer(&blizzard.auto_update_timer); + blizzard.auto_update_timer.function = blizzard_update_window_auto; + blizzard.auto_update_timer.data = 0; + + INIT_LIST_HEAD(&blizzard.free_req_list); + INIT_LIST_HEAD(&blizzard.pending_req_list); + for (i = 0; i < ARRAY_SIZE(blizzard.req_pool); i++) + list_add(&blizzard.req_pool[i].entry, &blizzard.free_req_list); + BUG_ON(i <= IRQ_REQ_POOL_SIZE); + sema_init(&blizzard.req_sema, i - IRQ_REQ_POOL_SIZE); + + return 0; +err3: + if (blizzard.power_down != NULL) + blizzard.power_down(fbdev->dev); + blizzard.extif->cleanup(); +err2: + blizzard.int_ctrl->cleanup(); +err1: + return r; +} + +static void blizzard_cleanup(void) +{ + blizzard_set_update_mode(OMAPFB_UPDATE_DISABLED); + blizzard.extif->cleanup(); + blizzard.int_ctrl->cleanup(); + if (blizzard.power_down != NULL) + blizzard.power_down(blizzard.fbdev->dev); +} + +struct lcd_ctrl blizzard_ctrl = { + .name = "blizzard", + .init = blizzard_init, + .cleanup = blizzard_cleanup, + .bind_client = blizzard_bind_client, + .get_caps = blizzard_get_caps, + .set_update_mode = blizzard_set_update_mode, + .get_update_mode = blizzard_get_update_mode, + .setup_plane = blizzard_setup_plane, + .set_scale = blizzard_set_scale, + .enable_plane = blizzard_enable_plane, + .update_window = blizzard_update_window_async, + .sync = blizzard_sync, + .suspend = blizzard_suspend, + .resume = blizzard_resume, +}; + diff --git a/drivers/video/omap/dispc.c b/drivers/video/omap/dispc.c index 1f00c289c01..c5acd3c7ca3 100644 --- a/drivers/video/omap/dispc.c +++ b/drivers/video/omap/dispc.c @@ -836,18 +836,14 @@ EXPORT_SYMBOL(omap_dispc_free_irq); static irqreturn_t omap_dispc_irq_handler(int irq, void *dev) { u32 stat = dispc_read_reg(DISPC_IRQSTATUS); - static int jabber; if (stat & DISPC_IRQ_FRAMEMASK) complete(&dispc.frame_done); if (stat & DISPC_IRQ_MASK_ERROR) { - if (jabber++ < 5) { + if (printk_ratelimit()) { dev_err(dispc.fbdev->dev, "irq error status %04x\n", stat & 0x7fff); - } else { - dev_err(dispc.fbdev->dev, "disable irq\n"); - dispc_write_reg(DISPC_IRQENABLE, 0); } } diff --git a/drivers/video/omap/hwa742.c b/drivers/video/omap/hwa742.c index 5c961664cf9..67a7484e921 100644 --- a/drivers/video/omap/hwa742.c +++ b/drivers/video/omap/hwa742.c @@ -30,8 +30,7 @@ #include #include - -#define MODULE_NAME "hwa742" +#include #define HWA742_REV_CODE_REG 0x0 #define HWA742_CONFIG_REG 0x2 @@ -111,6 +110,8 @@ struct { struct timer_list auto_update_timer; int stop_auto_update; struct omapfb_update_window auto_update_window; + unsigned te_connected:1; + unsigned vsync_only:1; struct hwa742_request req_pool[REQ_POOL_SIZE]; struct list_head pending_req_list; @@ -118,7 +119,6 @@ struct { struct semaphore req_sema; spinlock_t req_lock; - struct clk *sys_ck; struct extif_timings reg_timings, lut_timings; int prev_color_mode; @@ -127,10 +127,16 @@ struct { u32 max_transmit_size; u32 extif_clk_period; + unsigned long pix_tx_time; + unsigned long line_upd_time; + struct omapfb_device *fbdev; struct lcd_ctrl_extif *extif; struct lcd_ctrl *int_ctrl; + + void (*power_up)(struct device *dev); + void (*power_down)(struct device *dev); } hwa742; struct lcd_ctrl hwa742_ctrl; @@ -196,6 +202,45 @@ static void set_format_regs(int conv, int transl, int flags) hwa742_write_reg(HWA742_WINDOW_TYPE, hwa742.window_type); } +static void enable_tearsync(int y, int width, int height, int screen_height, + int force_vsync) +{ + u8 b; + + b = hwa742_read_reg(HWA742_NDP_CTRL); + b |= 1 << 2; + hwa742_write_reg(HWA742_NDP_CTRL, b); + + if (likely(hwa742.vsync_only || force_vsync)) { + hwa742.extif->enable_tearsync(1, 0); + return; + } + + if (width * hwa742.pix_tx_time < hwa742.line_upd_time) { + hwa742.extif->enable_tearsync(1, 0); + return; + } + + if ((width * hwa742.pix_tx_time / 1000) * height < + (y + height) * (hwa742.line_upd_time / 1000)) { + hwa742.extif->enable_tearsync(1, 0); + return; + } + + hwa742.extif->enable_tearsync(1, y + 1); +} + +static void disable_tearsync(void) +{ + u8 b; + + hwa742.extif->enable_tearsync(0, 0); + + b = hwa742_read_reg(HWA742_NDP_CTRL); + b &= ~(1 << 2); + hwa742_write_reg(HWA742_NDP_CTRL, b); +} + static inline struct hwa742_request *alloc_req(void) { unsigned long flags; @@ -309,7 +354,8 @@ static int send_frame_handler(struct hwa742_request *req) unsigned long offset; int color_mode = par->color_mode; int flags = par->flags; - int scr_width = 800; + int scr_width = hwa742.fbdev->panel->x_res; + int scr_height = hwa742.fbdev->panel->y_res; #ifdef VERBOSE dev_dbg(hwa742.fbdev->dev, "x %d y %d w %d h %d scr_width %d " @@ -343,6 +389,12 @@ static int send_frame_handler(struct hwa742_request *req) hwa742.prev_color_mode = color_mode; hwa742.prev_flags = flags; } + flags = req->par.update.flags; + if (flags & OMAPFB_FORMAT_FLAG_TEARSYNC) + enable_tearsync(y, scr_width, h, scr_height, + flags & OMAPFB_FORMAT_FLAG_FORCE_VSYNC); + else + disable_tearsync(); set_window_regs(x, y, x + w, y + h); @@ -389,13 +441,14 @@ static void create_req_list(struct omapfb_update_window *win, int color_mode; int flags; - flags = win->format & OMAPFB_FORMAT_FLAG_DOUBLE; + flags = win->format & ~OMAPFB_FORMAT_MASK; color_mode = win->format & OMAPFB_FORMAT_MASK; if (x & 1) { ADD_PREQ(x, y, 1, height); width--; x++; + flags &= ~OMAPFB_FORMAT_FLAG_TEARSYNC; } if (width & ~1) { unsigned int xspan = width & ~1; @@ -407,11 +460,13 @@ static void create_req_list(struct omapfb_update_window *win, ADD_PREQ(x, ystart, xspan, yspan); ystart += yspan; yspan = height - yspan; + flags &= ~OMAPFB_FORMAT_FLAG_TEARSYNC; } ADD_PREQ(x, ystart, xspan, yspan); x += xspan; width -= xspan; + flags &= ~OMAPFB_FORMAT_FLAG_TEARSYNC; } if (width) ADD_PREQ(x, y, 1, height); @@ -452,7 +507,9 @@ int hwa742_update_window_async(struct fb_info *fbi, r = -EINVAL; goto out; } - if (unlikely(win->format & ~(0x03 | OMAPFB_FORMAT_FLAG_DOUBLE))) { + if (unlikely(win->format & + ~(0x03 | OMAPFB_FORMAT_FLAG_DOUBLE | + OMAPFB_FORMAT_FLAG_TEARSYNC | OMAPFB_FORMAT_FLAG_FORCE_VSYNC))) { dev_dbg(hwa742.fbdev->dev, "invalid window flag"); r = -EINVAL; goto out; @@ -535,7 +592,7 @@ static int hwa742_set_update_mode(enum omapfb_update_mode mode) if (mode == hwa742.update_mode) return 0; - pr_info("omapfb: hwa742: setting update mode to %s\n", + dev_info(hwa742.fbdev->dev, "HWA742: setting update mode to %s\n", mode == OMAPFB_UPDATE_DISABLED ? "disabled" : (mode == OMAPFB_AUTO_UPDATE ? "auto" : "manual")); @@ -677,7 +734,7 @@ static int calc_lut_timing(unsigned long sysclk, int div) return hwa742.extif->convert_timings(t); } -static int calc_extif_timings(unsigned long sysclk) +static int calc_extif_timings(unsigned long sysclk, int *extif_mem_div) { int max_clk_div; int div; @@ -687,25 +744,163 @@ static int calc_extif_timings(unsigned long sysclk) if (calc_reg_timing(sysclk, div) == 0) break; } - if (div == max_clk_div) + if (div > max_clk_div) goto err; + *extif_mem_div = div; + for (div = 1; div < max_clk_div; div++) { if (calc_lut_timing(sysclk, div) == 0) break; } - if (div < max_clk_div) - return 0; + if (div > max_clk_div) + goto err; + + return 0; err: dev_err(hwa742.fbdev->dev, "can't setup timings\n"); return -1; } +static void calc_hwa742_clk_rates(unsigned long ext_clk, + unsigned long *sys_clk, unsigned long *pix_clk) +{ + int pix_clk_src; + int sys_div = 0, sys_mul = 0; + int pix_div; + + pix_clk_src = hwa742_read_reg(HWA742_CLK_SRC_REG); + pix_div = ((pix_clk_src >> 3) & 0x1f) + 1; + if ((pix_clk_src & (0x3 << 1)) == 0) { + /* Source is the PLL */ + sys_div = (hwa742_read_reg(HWA742_PLL_DIV_REG) & 0x3f) + 1; + sys_mul = (hwa742_read_reg(HWA742_PLL_4_REG) & 0x7f) + 1; + *sys_clk = ext_clk * sys_mul / sys_div; + } else /* else source is ext clk, or oscillator */ + *sys_clk = ext_clk; + + *pix_clk = *sys_clk / pix_div; /* HZ */ + dev_dbg(hwa742.fbdev->dev, + "ext_clk %ld pix_src %d pix_div %d sys_div %d sys_mul %d\n", + ext_clk, pix_clk_src & (0x3 << 1), pix_div, sys_div, sys_mul); + dev_dbg(hwa742.fbdev->dev, "sys_clk %ld pix_clk %ld\n", + *sys_clk, *pix_clk); +} + + +static int setup_tearsync(unsigned long pix_clk, int extif_div) +{ + int hdisp, vdisp; + int hndp, vndp; + int hsw, vsw; + int hs, vs; + int hs_pol_inv, vs_pol_inv; + int use_hsvs, use_ndp; + u8 b; + + hsw = hwa742_read_reg(HWA742_HS_W_REG); + vsw = hwa742_read_reg(HWA742_VS_W_REG); + hs_pol_inv = !(hsw & 0x80); + vs_pol_inv = !(vsw & 0x80); + hsw = hsw & 0x7f; + vsw = vsw & 0x3f; + + hdisp = (hwa742_read_reg(HWA742_H_DISP_REG) & 0x7f) * 8; + vdisp = hwa742_read_reg(HWA742_V_DISP_1_REG) + + ((hwa742_read_reg(HWA742_V_DISP_2_REG) & 0x3) << 8); + + hndp = hwa742_read_reg(HWA742_H_NDP_REG) & 0x7f; + vndp = hwa742_read_reg(HWA742_V_NDP_REG); + + /* time to transfer one pixel (16bpp) in ps */ + hwa742.pix_tx_time = hwa742.reg_timings.we_cycle_time; + if (hwa742.extif->get_max_tx_rate != NULL) { + /* The external interface might have a rate limitation, + * if so, we have to maximize our transfer rate. + */ + unsigned long min_tx_time; + unsigned long max_tx_rate = hwa742.extif->get_max_tx_rate(); + + dev_dbg(hwa742.fbdev->dev, "max_tx_rate %ld HZ\n", + max_tx_rate); + min_tx_time = 1000000000 / (max_tx_rate / 1000); /* ps */ + if (hwa742.pix_tx_time < min_tx_time) + hwa742.pix_tx_time = min_tx_time; + } + + /* time to update one line in ps */ + hwa742.line_upd_time = (hdisp + hndp) * 1000000 / (pix_clk / 1000); + hwa742.line_upd_time *= 1000; + if (hdisp * hwa742.pix_tx_time > hwa742.line_upd_time) + /* transfer speed too low, we might have to use both + * HS and VS */ + use_hsvs = 1; + else + /* decent transfer speed, we'll always use only VS */ + use_hsvs = 0; + + if (use_hsvs && (hs_pol_inv || vs_pol_inv)) { + /* HS or'ed with VS doesn't work, use the active high + * TE signal based on HNDP / VNDP */ + use_ndp = 1; + hs_pol_inv = 0; + vs_pol_inv = 0; + hs = hndp; + vs = vndp; + } else { + /* Use HS or'ed with VS as a TE signal if both are needed + * or VNDP if only vsync is needed. */ + use_ndp = 0; + hs = hsw; + vs = vsw; + if (!use_hsvs) { + hs_pol_inv = 0; + vs_pol_inv = 0; + } + } + + hs = hs * 1000000 / (pix_clk / 1000); /* ps */ + hs *= 1000; + + vs = vs * (hdisp + hndp) * 1000000 / (pix_clk / 1000); /* ps */ + vs *= 1000; + + if (vs <= hs) + return -EDOM; + /* set VS to 120% of HS to minimize VS detection time */ + vs = hs * 12 / 10; + /* minimize HS too */ + hs = 10000; + + b = hwa742_read_reg(HWA742_NDP_CTRL); + b &= ~0x3; + b |= use_hsvs ? 1 : 0; + b |= (use_ndp && use_hsvs) ? 0 : 2; + hwa742_write_reg(HWA742_NDP_CTRL, b); + + hwa742.vsync_only = !use_hsvs; + + dev_dbg(hwa742.fbdev->dev, + "pix_clk %ld HZ pix_tx_time %ld ps line_upd_time %ld ps\n", + pix_clk, hwa742.pix_tx_time, hwa742.line_upd_time); + dev_dbg(hwa742.fbdev->dev, + "hs %d ps vs %d ps mode %d vsync_only %d\n", + hs, vs, (b & 0x3), !use_hsvs); + + return hwa742.extif->setup_tearsync(1, hs, vs, + hs_pol_inv, vs_pol_inv, extif_div); +} + static unsigned long hwa742_get_caps(void) { - return OMAPFB_CAPS_MANUAL_UPDATE; + unsigned long caps; + + caps = OMAPFB_CAPS_MANUAL_UPDATE; + if (hwa742.te_connected) + caps |= OMAPFB_CAPS_TEARSYNC; + return caps; } static void hwa742_suspend(void) @@ -714,13 +909,14 @@ static void hwa742_suspend(void) hwa742_set_update_mode(OMAPFB_UPDATE_DISABLED); /* Enable sleep mode */ hwa742_write_reg(HWA742_POWER_SAVE, 1 << 1); - clk_disable(hwa742.sys_ck); + if (hwa742.power_down != NULL) + hwa742.power_down(hwa742.fbdev->dev); } static void hwa742_resume(void) { - if (clk_enable(hwa742.sys_ck) != 0) - dev_err(hwa742.fbdev->dev, "failed to enable SYS clock\n"); + if (hwa742.power_up != NULL) + hwa742.power_up(hwa742.fbdev->dev); /* Disable sleep mode */ hwa742_write_reg(HWA742_POWER_SAVE, 0); while (1) { @@ -738,22 +934,11 @@ static int hwa742_init(struct omapfb_device *fbdev, int ext_mode, { int r = 0, i; u8 rev, conf; - unsigned long sysfreq; - int div, nd; - - hwa742.fbdev = fbdev; - - hwa742.sys_ck = clk_get(0, "bclk"); - if (IS_ERR(hwa742.sys_ck)) { - dev_err(fbdev->dev, "can't get SYS clock\n"); - return PTR_ERR(hwa742.sys_ck); - } - - if ((r = clk_enable(hwa742.sys_ck)) != 0) { - dev_err(fbdev->dev, "can't enable SYS clock\n"); - clk_put(hwa742.sys_ck); - return r; - } + unsigned long ext_clk; + unsigned long sys_clk, pix_clk; + int extif_mem_div; + struct omapfb_platform_data *omapfb_conf; + struct hwa742_platform_data *ctrl_conf; BUG_ON(!fbdev->ext_if || !fbdev->int_ctrl); @@ -761,6 +946,18 @@ static int hwa742_init(struct omapfb_device *fbdev, int ext_mode, hwa742.extif = fbdev->ext_if; hwa742.int_ctrl = fbdev->int_ctrl; + omapfb_conf = fbdev->dev->platform_data; + ctrl_conf = omapfb_conf->ctrl_platform_data; + + if (ctrl_conf == NULL || ctrl_conf->get_clock_rate == NULL) { + dev_err(fbdev->dev, "HWA742: missing platform data\n"); + r = -ENOENT; + goto err1; + } + + hwa742.power_down = ctrl_conf->power_down; + hwa742.power_up = ctrl_conf->power_up; + spin_lock_init(&hwa742.req_lock); if ((r = hwa742.int_ctrl->init(fbdev, 1, req_vram)) < 0) @@ -769,31 +966,40 @@ static int hwa742_init(struct omapfb_device *fbdev, int ext_mode, if ((r = hwa742.extif->init(fbdev)) < 0) goto err2; - sysfreq = clk_get_rate(hwa742.sys_ck); - if ((r = calc_extif_timings(sysfreq)) < 0) + ext_clk = ctrl_conf->get_clock_rate(fbdev->dev); + if ((r = calc_extif_timings(ext_clk, &extif_mem_div)) < 0) goto err3; hwa742.extif->set_timings(&hwa742.reg_timings); + if (hwa742.power_up != NULL) + hwa742.power_up(fbdev->dev); - div = (hwa742_read_reg(HWA742_PLL_DIV_REG) & 0x3f) + 1; - - nd = (hwa742_read_reg(HWA742_PLL_4_REG) & 0x7f) + 1; - - if ((r = calc_extif_timings(sysfreq / div * nd)) < 0) - goto err3; + calc_hwa742_clk_rates(ext_clk, &sys_clk, &pix_clk); + if ((r = calc_extif_timings(sys_clk, &extif_mem_div)) < 0) + goto err4; hwa742.extif->set_timings(&hwa742.reg_timings); rev = hwa742_read_reg(HWA742_REV_CODE_REG); if ((rev & 0xfc) != 0x80) { - dev_err(fbdev->dev, "invalid revision %02x\n", rev); + dev_err(fbdev->dev, "HWA742: invalid revision %02x\n", rev); r = -ENODEV; - goto err3; + goto err4; } + if (!(hwa742_read_reg(HWA742_PLL_DIV_REG) & 0x80)) { - dev_err(hwa742.fbdev->dev, - "controller not initialized by the bootloader\n"); + dev_err(fbdev->dev, + "HWA742: controller not initialized by the bootloader\n"); r = -ENODEV; - goto err2; + goto err4; + } + + if (ctrl_conf->te_connected) { + if ((r = setup_tearsync(pix_clk, extif_mem_div)) < 0) { + dev_err(hwa742.fbdev->dev, + "HWA742: can't setup tearing synchronization\n"); + goto err4; + } + hwa742.te_connected = 1; } hwa742.max_transmit_size = hwa742.extif->max_transmit_size; @@ -813,6 +1019,8 @@ static int hwa742_init(struct omapfb_device *fbdev, int ext_mode, hwa742.prev_color_mode = -1; hwa742.prev_flags = 0; + hwa742.fbdev = fbdev; + INIT_LIST_HEAD(&hwa742.free_req_list); INIT_LIST_HEAD(&hwa742.pending_req_list); for (i = 0; i < ARRAY_SIZE(hwa742.req_pool); i++) @@ -821,17 +1029,18 @@ static int hwa742_init(struct omapfb_device *fbdev, int ext_mode, sema_init(&hwa742.req_sema, i - IRQ_REQ_POOL_SIZE); conf = hwa742_read_reg(HWA742_CONFIG_REG); - pr_info("omapfb: hwa742 LCD controller rev. %d " + dev_info(fbdev->dev, ": Epson HWA742 LCD controller rev %d " "initialized (CNF pins %x)\n", rev & 0x03, conf & 0x07); return 0; +err4: + if (hwa742.power_down != NULL) + hwa742.power_down(fbdev->dev); err3: hwa742.extif->cleanup(); err2: hwa742.int_ctrl->cleanup(); err1: - clk_disable(hwa742.sys_ck); - clk_put(hwa742.sys_ck); return r; } @@ -840,8 +1049,8 @@ static void hwa742_cleanup(void) hwa742_set_update_mode(OMAPFB_UPDATE_DISABLED); hwa742.extif->cleanup(); hwa742.int_ctrl->cleanup(); - clk_disable(hwa742.sys_ck); - clk_put(hwa742.sys_ck); + if (hwa742.power_down != NULL) + hwa742.power_down(hwa742.fbdev->dev); } struct lcd_ctrl hwa742_ctrl = { diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c index a7cf760d44d..24709b76805 100644 --- a/drivers/video/omap/omapfb_main.c +++ b/drivers/video/omap/omapfb_main.c @@ -58,6 +58,7 @@ static struct caps_table_struct { const char *name; } omapfb_caps_table[] = { { OMAPFB_CAPS_MANUAL_UPDATE, "manual update" }, + { OMAPFB_CAPS_TEARSYNC, "tearing synchronization" }, { OMAPFB_CAPS_SET_BACKLIGHT, "backlight setting" }, }; @@ -69,6 +70,7 @@ static struct caps_table_struct { extern struct lcd_ctrl omap1_int_ctrl; extern struct lcd_ctrl omap2_int_ctrl; extern struct lcd_ctrl hwa742_ctrl; +extern struct lcd_ctrl blizzard_ctrl; static struct lcd_ctrl *ctrls[] = { #ifdef CONFIG_ARCH_OMAP1 @@ -80,6 +82,9 @@ static struct lcd_ctrl *ctrls[] = { #ifdef CONFIG_FB_OMAP_LCDC_HWA742 &hwa742_ctrl, #endif +#ifdef CONFIG_FB_OMAP_LCDC_BLIZZARD + &blizzard_ctrl, +#endif }; #ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL @@ -954,6 +959,11 @@ static int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, sizeof(p.color_key))) r = -EFAULT; break; + case OMAPFB_GET_CAPS: + p.caps = omapfb_get_caps(fbdev); + if (put_user(p.caps, (unsigned long __user *)arg)) + r = -EFAULT; + break; case OMAPFB_LCD_TEST: { int test_num; diff --git a/drivers/video/omap/rfbi.c b/drivers/video/omap/rfbi.c index ebb317623b5..917f6161366 100644 --- a/drivers/video/omap/rfbi.c +++ b/drivers/video/omap/rfbi.c @@ -34,6 +34,9 @@ #include "dispc.h" +/* To work around an RFBI transfer rate limitation */ +#define OMAP_RFBI_RATE_LIMIT 1 + #define RFBI_BASE 0x48050800 #define RFBI_REVISION 0x0000 #define RFBI_SYSCONFIG 0x0010 @@ -67,6 +70,8 @@ static struct { struct omapfb_device *fbdev; struct clk *dss_ick; struct clk *dss1_fck; + unsigned tearsync_pin_cnt; + unsigned tearsync_mode; } rfbi; static inline void rfbi_write_reg(int idx, u32 val) @@ -181,6 +186,58 @@ static int ps_to_rfbi_ticks(int time, int div) return ret; } +#ifdef OMAP_RFBI_RATE_LIMIT +static unsigned long rfbi_get_max_tx_rate(void) +{ + unsigned long l4_rate, dss1_rate; + int min_l4_ticks = 0; + int i; + + /* According to TI this can't be calculated so make the + * adjustments for a couple of known frequencies and warn for + * others. + */ + static const struct { + unsigned long l4_clk; /* HZ */ + unsigned long dss1_clk; /* HZ */ + unsigned long min_l4_ticks; + } ftab[] = { + { 55, 132, 7, }, /* 7.86 MPix/s */ + { 110, 110, 12, }, /* 9.16 MPix/s */ + { 110, 132, 10, }, /* 11 Mpix/s */ + { 120, 120, 10, }, /* 12 Mpix/s */ + { 133, 133, 10, }, /* 13.3 Mpix/s */ + }; + + l4_rate = rfbi.l4_khz / 1000; + dss1_rate = clk_get_rate(rfbi.dss1_fck) / 1000000; + + for (i = 0; i < ARRAY_SIZE(ftab); i++) { + if (ftab[i].l4_clk == l4_rate && + ftab[i].dss1_clk == dss1_rate) { + min_l4_ticks = ftab[i].min_l4_ticks; + break; + } + } + if (i == ARRAY_SIZE(ftab)) { + /* Can't be sure, return anyway the maximum not + * rate-limited. This might cause a problem only for the + * tearing synchronisation. + */ + dev_err(rfbi.fbdev->dev, + "can't determine maximum RFBI transfer rate\n"); + return rfbi.l4_khz * 1000; + } + return rfbi.l4_khz * 1000 / min_l4_ticks; +} +#else +static int rfbi_get_max_tx_rate(void) +{ + return rfbi.l4_khz * 1000; +} +#endif + + static int rfbi_convert_timings(struct extif_timings *t) { u32 l; @@ -269,6 +326,76 @@ static int rfbi_convert_timings(struct extif_timings *t) return 0; } +static int rfbi_setup_tearsync(unsigned pin_cnt, + unsigned hs_pulse_time, unsigned vs_pulse_time, + int hs_pol_inv, int vs_pol_inv, int extif_div) +{ + int hs, vs; + int min; + u32 l; + + if (pin_cnt != 1 && pin_cnt != 2) + return -EINVAL; + + hs = ps_to_rfbi_ticks(hs_pulse_time, 1); + vs = ps_to_rfbi_ticks(vs_pulse_time, 1); + if (hs < 2) + return -EDOM; + if (pin_cnt == 2) + min = 2; + else + min = 4; + if (vs < min) + return -EDOM; + if (vs == hs) + return -EINVAL; + rfbi.tearsync_pin_cnt = pin_cnt; + dev_dbg(rfbi.fbdev->dev, + "setup_tearsync: pins %d hs %d vs %d hs_inv %d vs_inv %d\n", + pin_cnt, hs, vs, hs_pol_inv, vs_pol_inv); + + rfbi_enable_clocks(1); + rfbi_write_reg(RFBI_HSYNC_WIDTH, hs); + rfbi_write_reg(RFBI_VSYNC_WIDTH, vs); + + l = rfbi_read_reg(RFBI_CONFIG0); + if (hs_pol_inv) + l &= ~(1 << 21); + else + l |= 1 << 21; + if (vs_pol_inv) + l &= ~(1 << 20); + else + l |= 1 << 20; + rfbi_enable_clocks(0); + + return 0; +} + +static int rfbi_enable_tearsync(int enable, unsigned line) +{ + u32 l; + + dev_dbg(rfbi.fbdev->dev, "tearsync %d line %d mode %d\n", + enable, line, rfbi.tearsync_mode); + if (line > (1 << 11) - 1) + return -EINVAL; + + rfbi_enable_clocks(1); + l = rfbi_read_reg(RFBI_CONFIG0); + l &= ~(0x3 << 2); + if (enable) { + rfbi.tearsync_mode = rfbi.tearsync_pin_cnt; + l |= rfbi.tearsync_mode << 2; + } else + rfbi.tearsync_mode = 0; + rfbi_write_reg(RFBI_CONFIG0, l); + rfbi_write_reg(RFBI_LINE_NUMBER, line); + rfbi_enable_clocks(0); + + return 0; +} + static void rfbi_write_command(const void *buf, unsigned int len) { rfbi_enable_clocks(1); @@ -340,8 +467,10 @@ static void rfbi_transfer_area(int width, int height, rfbi_write_reg(RFBI_PIXEL_CNT, width * height); w = rfbi_read_reg(RFBI_CONTROL); - /* Enable, Internal trigger */ - rfbi_write_reg(RFBI_CONTROL, w | (1 << 0) | (1 << 4)); + w |= 1; /* enable */ + if (!rfbi.tearsync_mode) + w |= 1 << 4; /* internal trigger, reset by HW */ + rfbi_write_reg(RFBI_CONTROL, w); omap_dispc_enable_lcd_out(1); } @@ -443,6 +572,7 @@ const struct lcd_ctrl_extif omap2_ext_if = { .init = rfbi_init, .cleanup = rfbi_cleanup, .get_clk_info = rfbi_get_clk_info, + .get_max_tx_rate = rfbi_get_max_tx_rate, .set_bits_per_cycle = rfbi_set_bits_per_cycle, .convert_timings = rfbi_convert_timings, .set_timings = rfbi_set_timings, @@ -450,6 +580,8 @@ const struct lcd_ctrl_extif omap2_ext_if = { .read_data = rfbi_read_data, .write_data = rfbi_write_data, .transfer_area = rfbi_transfer_area, + .setup_tearsync = rfbi_setup_tearsync, + .enable_tearsync = rfbi_enable_tearsync, .max_transmit_size = (u32)~0, }; diff --git a/drivers/video/omap/sossi.c b/drivers/video/omap/sossi.c index 8149de0b94e..71467b30348 100644 --- a/drivers/video/omap/sossi.c +++ b/drivers/video/omap/sossi.c @@ -49,6 +49,8 @@ #define DMA_LCD_CTRL 0xfffee3c4 #define DMA_LCD_LCH_CTRL 0xfffee3ea +#define CONF_SOSSI_RESET_R (1 << 23) + #define RD_ACCESS 0 #define WR_ACCESS 1 @@ -56,10 +58,16 @@ static struct { void __iomem *base; - unsigned long dpll_khz; + struct clk *fck; + unsigned long fck_hz; + spinlock_t lock; + int bus_pick_count; int bus_pick_width; + int tearsync_mode; + int tearsync_line; void (*lcdc_callback)(void *data); void *lcdc_callback_data; + int vsync_dma_pending; /* timing for read and write access */ int clk_div; u8 clk_tw0[2]; @@ -70,7 +78,6 @@ static struct { int last_access; struct omapfb_device *fbdev; - struct lcd_ctrl_extif *extif; } sossi; static inline u32 sossi_read_reg(int reg) @@ -113,17 +120,11 @@ static void sossi_clear_bits(int reg, u32 bits) sossi_write_reg(reg, sossi_read_reg(reg) & ~bits); } -#define MOD_CONF_CTRL_1 0xfffe1110 -#define CONF_SOSSI_RESET_R (1 << 23) -#define CONF_MOD_SOSSI_CLK_EN_R (1 << 16) - -static void sossi_dma_callback(void *data); - -#define KHZ_TO_PS(x) (1000000000 / (x)) +#define HZ_TO_PS(x) (1000000000 / (x / 1000)) static u32 ps_to_sossi_ticks(u32 ps, int div) { - u32 clk_period = KHZ_TO_PS(sossi.dpll_khz) * div; + u32 clk_period = HZ_TO_PS(sossi.fck_hz) * div; return (clk_period + ps - 1) / clk_period; } @@ -220,18 +221,42 @@ static void _set_timing(int div, int tw0, int tw1) #ifdef VERBOSE dev_dbg(sossi.fbdev->dev, "Using TW0 = %d, TW1 = %d, div = %d\n", - tw0 + 1, tw1 + 1, div + 1); + tw0 + 1, tw1 + 1, div); #endif - l = omap_readl(MOD_CONF_CTRL_1); - l &= ~(7 << 17); - l |= div << 17; - omap_writel(l, MOD_CONF_CTRL_1); - + clk_set_rate(sossi.fck, sossi.fck_hz / div); + clk_enable(sossi.fck); l = sossi_read_reg(SOSSI_INIT1_REG); l &= ~((0x0f << 20) | (0x3f << 24)); l |= (tw0 << 20) | (tw1 << 24); sossi_write_reg(SOSSI_INIT1_REG, l); + clk_disable(sossi.fck); +} + +static void _set_bits_per_cycle(int bus_pick_count, int bus_pick_width) +{ + u32 l; + + l = sossi_read_reg(SOSSI_INIT3_REG); + l &= ~0x3ff; + l |= ((bus_pick_count - 1) << 5) | ((bus_pick_width - 1) & 0x1f); + sossi_write_reg(SOSSI_INIT3_REG, l); +} + +static void _set_tearsync_mode(int mode, unsigned line) +{ + u32 l; + + l = sossi_read_reg(SOSSI_TEARING_REG); + l &= ~(((1 << 11) - 1) << 15); + l |= line << 15; + l &= ~(0x3 << 26); + l |= mode << 26; + sossi_write_reg(SOSSI_TEARING_REG, l); + if (mode) + sossi_set_bits(SOSSI_INIT2_REG, 1 << 6); /* TE logic */ + else + sossi_clear_bits(SOSSI_INIT2_REG, 1 << 6); } static inline void set_timing(int access) @@ -249,7 +274,6 @@ static void sossi_start_transfer(void) sossi_clear_bits(SOSSI_INIT2_REG, 1 << 4); /* CS active low */ sossi_clear_bits(SOSSI_INIT1_REG, 1 << 30); - /* FIXME: locking? */ } static void sossi_stop_transfer(void) @@ -258,7 +282,6 @@ static void sossi_stop_transfer(void) sossi_set_bits(SOSSI_INIT2_REG, 1 << 4); /* CS active low */ sossi_set_bits(SOSSI_INIT1_REG, 1 << 30); - /* FIXME: locking? */ } static void wait_end_of_write(void) @@ -313,7 +336,7 @@ static int sossi_convert_timings(struct extif_timings *t) if ((r = calc_wr_timings(t)) < 0) return r; - t->tim[4] = div - 1; + t->tim[4] = div; t->converted = 1; @@ -335,13 +358,12 @@ static void sossi_set_timings(const struct extif_timings *t) static void sossi_get_clk_info(u32 *clk_period, u32 *max_clk_div) { - *clk_period = KHZ_TO_PS(sossi.dpll_khz); + *clk_period = HZ_TO_PS(sossi.fck_hz); *max_clk_div = 8; } static void sossi_set_bits_per_cycle(int bpc) { - u32 l; int bus_pick_count, bus_pick_width; /* We set explicitly the the bus_pick_count as well, although @@ -361,16 +383,79 @@ static void sossi_set_bits_per_cycle(int bpc) BUG(); return; } - l = sossi_read_reg(SOSSI_INIT3_REG); sossi.bus_pick_width = bus_pick_width; - l &= ~0x3ff; - l |= ((bus_pick_count - 1) << 5) | ((bus_pick_width - 1) & 0x1f); - sossi_write_reg(SOSSI_INIT3_REG, l); + sossi.bus_pick_count = bus_pick_count; +} + +static int sossi_setup_tearsync(unsigned pin_cnt, + unsigned hs_pulse_time, unsigned vs_pulse_time, + int hs_pol_inv, int vs_pol_inv, int div) +{ + int hs, vs; + u32 l; + + if (pin_cnt != 1 || div < 1 || div > 8) + return -EINVAL; + + hs = ps_to_sossi_ticks(hs_pulse_time, div); + vs = ps_to_sossi_ticks(vs_pulse_time, div); + if (vs < 8 || vs <= hs || vs >= (1 << 12)) + return -EDOM; + vs /= 8; + vs--; + if (hs > 8) + hs = 8; + if (hs) + hs--; + + dev_dbg(sossi.fbdev->dev, + "setup_tearsync: hs %d vs %d hs_inv %d vs_inv %d\n", + hs, vs, hs_pol_inv, vs_pol_inv); + + clk_enable(sossi.fck); + l = sossi_read_reg(SOSSI_TEARING_REG); + l &= ~((1 << 15) - 1); + l |= vs << 3; + l |= hs; + if (hs_pol_inv) + l |= 1 << 29; + else + l &= ~(1 << 29); + if (vs_pol_inv) + l |= 1 << 28; + else + l &= ~(1 << 28); + sossi_write_reg(SOSSI_TEARING_REG, l); + clk_disable(sossi.fck); + + return 0; +} + +static int sossi_enable_tearsync(int enable, unsigned line) +{ + int mode; + + dev_dbg(sossi.fbdev->dev, "tearsync %d line %d\n", enable, line); + if (line >= 1 << 11) + return -EINVAL; + if (enable) { + if (line) + mode = 2; /* HS or VS */ + else + mode = 3; /* VS only */ + } else + mode = 0; + sossi.tearsync_line = line; + sossi.tearsync_mode = mode; + + return 0; } static void sossi_write_command(const void *data, unsigned int len) { + clk_enable(sossi.fck); set_timing(WR_ACCESS); + _set_bits_per_cycle(sossi.bus_pick_count, sossi.bus_pick_width); /* CMD#/DATA */ sossi_clear_bits(SOSSI_INIT1_REG, 1 << 18); set_cycles(len); @@ -378,11 +463,14 @@ static void sossi_write_command(const void *data, unsigned int len) send_data(data, len); sossi_stop_transfer(); wait_end_of_write(); + clk_disable(sossi.fck); } static void sossi_write_data(const void *data, unsigned int len) { + clk_enable(sossi.fck); set_timing(WR_ACCESS); + _set_bits_per_cycle(sossi.bus_pick_count, sossi.bus_pick_width); /* CMD#/DATA */ sossi_set_bits(SOSSI_INIT1_REG, 1 << 18); set_cycles(len); @@ -390,6 +478,7 @@ static void sossi_write_data(const void *data, unsigned int len) send_data(data, len); sossi_stop_transfer(); wait_end_of_write(); + clk_disable(sossi.fck); } static void sossi_transfer_area(int width, int height, @@ -400,25 +489,44 @@ static void sossi_transfer_area(int width, int height, sossi.lcdc_callback = callback; sossi.lcdc_callback_data = data; + clk_enable(sossi.fck); set_timing(WR_ACCESS); + _set_bits_per_cycle(sossi.bus_pick_count, sossi.bus_pick_width); + _set_tearsync_mode(sossi.tearsync_mode, sossi.tearsync_line); /* CMD#/DATA */ sossi_set_bits(SOSSI_INIT1_REG, 1 << 18); set_cycles(width * height * sossi.bus_pick_width / 8); sossi_start_transfer(); - omap_enable_lcd_dma(); + if (sossi.tearsync_mode) { + /* Wait for the sync signal and start the transfer only + * then. We can't seem to be able to use HW sync DMA for + * this since LCD DMA shows huge latencies, as if it + * would ignore some of the DMA requests from SoSSI. + */ + unsigned long flags; + + spin_lock_irqsave(&sossi.lock, flags); + sossi.vsync_dma_pending++; + spin_unlock_irqrestore(&sossi.lock, flags); + } else + /* Just start the transfer right away. */ + omap_enable_lcd_dma(); } static void sossi_dma_callback(void *data) { omap_stop_lcd_dma(); sossi_stop_transfer(); + clk_disable(sossi.fck); sossi.lcdc_callback(sossi.lcdc_callback_data); } static void sossi_read_data(void *data, unsigned int len) { + clk_enable(sossi.fck); set_timing(RD_ACCESS); + _set_bits_per_cycle(sossi.bus_pick_count, sossi.bus_pick_width); /* CMD#/DATA */ sossi_set_bits(SOSSI_INIT1_REG, 1 << 18); set_cycles(len); @@ -439,25 +547,51 @@ static void sossi_read_data(void *data, unsigned int len) data++; } sossi_stop_transfer(); + clk_disable(sossi.fck); +} + +static irqreturn_t sossi_match_irq(int irq, void *data) +{ + unsigned long flags; + + spin_lock_irqsave(&sossi.lock, flags); + if (sossi.vsync_dma_pending) { + sossi.vsync_dma_pending--; + omap_enable_lcd_dma(); + } + spin_unlock_irqrestore(&sossi.lock, flags); + return IRQ_HANDLED; } static int sossi_init(struct omapfb_device *fbdev) { u32 l, k; - struct clk *dpll_clk; + struct clk *fck; + struct clk *dpll1out_ck; int r; + sossi.base = (void __iomem *)IO_ADDRESS(OMAP_SOSSI_BASE); sossi.fbdev = fbdev; + spin_lock_init(&sossi.lock); - sossi.base = (void __iomem *)IO_ADDRESS(OMAP_SOSSI_BASE); - dpll_clk = clk_get(fbdev->dev, "ck_dpll1"); - if (IS_ERR(dpll_clk)) { - dev_err(fbdev->dev, "can't get dpll1 clock\n"); - return PTR_ERR(dpll_clk); + dpll1out_ck = clk_get(fbdev->dev, "ck_dpll1out"); + if (IS_ERR(dpll1out_ck)) { + dev_err(fbdev->dev, "can't get DPLL1OUT clock\n"); + return PTR_ERR(dpll1out_ck); } + /* We need the parent clock rate, which we might divide further + * depending on the timing requirements of the controller. See + * _set_timings. + */ + sossi.fck_hz = clk_get_rate(dpll1out_ck); + clk_put(dpll1out_ck); - sossi.dpll_khz = clk_get_rate(dpll_clk) / 1000; - clk_put(dpll_clk); + fck = clk_get(fbdev->dev, "ck_sossi"); + if (IS_ERR(fck)) { + dev_err(fbdev->dev, "can't get SoSSI functional clock\n"); + return PTR_ERR(fck); + } + sossi.fck = fck; /* Reset and enable the SoSSI module */ l = omap_readl(MOD_CONF_CTRL_1); @@ -466,11 +600,10 @@ static int sossi_init(struct omapfb_device *fbdev) l &= ~CONF_SOSSI_RESET_R; omap_writel(l, MOD_CONF_CTRL_1); - l |= CONF_MOD_SOSSI_CLK_EN_R; - omap_writel(l, MOD_CONF_CTRL_1); - - omap_writel(omap_readl(ARM_IDLECT2) | (1 << 11), ARM_IDLECT2); - omap_writel(omap_readl(ARM_IDLECT1) | (1 << 6), ARM_IDLECT1); + clk_enable(sossi.fck); + l = omap_readl(ARM_IDLECT2); + l &= ~(1 << 8); /* DMACK_REQ */ + omap_writel(l, ARM_IDLECT2); l = sossi_read_reg(SOSSI_INIT2_REG); /* Enable and reset the SoSSI block */ @@ -487,17 +620,19 @@ static int sossi_init(struct omapfb_device *fbdev) if (l != 0x55555555 || k != 0xaaaaaaaa) { dev_err(fbdev->dev, "invalid SoSSI sync pattern: %08x, %08x\n", l, k); - return -ENODEV; + r = -ENODEV; + goto err; } if ((r = omap_lcdc_set_dma_callback(sossi_dma_callback, NULL)) < 0) { dev_err(fbdev->dev, "can't get LCDC IRQ\n"); - return r; + r = -ENODEV; + goto err; } l = sossi_read_reg(SOSSI_ID_REG); /* Component code */ l = sossi_read_reg(SOSSI_ID_REG); - pr_info("omapfb: SoSSI version %d.%d initialized\n", + dev_info(fbdev->dev, "SoSSI version %d.%d initialized\n", l >> 16, l & 0xffff); l = sossi_read_reg(SOSSI_INIT1_REG); @@ -505,21 +640,36 @@ static int sossi_init(struct omapfb_device *fbdev) l &= ~(1 << 31); /* REORDERING */ sossi_write_reg(SOSSI_INIT1_REG, l); + if ((r = request_irq(INT_SOSSI_MATCH, sossi_match_irq, IRQT_FALLING, + "sossi_match", sossi.fbdev->dev)) < 0) { + dev_err(sossi.fbdev->dev, "can't get SoSSI match IRQ\n"); + goto err; + } + + clk_disable(sossi.fck); return 0; + +err: + clk_disable(sossi.fck); + clk_put(sossi.fck); + return r; } static void sossi_cleanup(void) { omap_lcdc_free_dma_callback(); + clk_put(sossi.fck); } -const struct lcd_ctrl_extif omap1_ext_if = { +struct lcd_ctrl_extif sossi_extif = { .init = sossi_init, .cleanup = sossi_cleanup, .get_clk_info = sossi_get_clk_info, .convert_timings = sossi_convert_timings, .set_timings = sossi_set_timings, .set_bits_per_cycle = sossi_set_bits_per_cycle, + .setup_tearsync = sossi_setup_tearsync, + .enable_tearsync = sossi_enable_tearsync, .write_command = sossi_write_command, .read_data = sossi_read_data, .write_data = sossi_write_data, -- 2.41.1