1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Epson HWA742 LCD controller driver 4 * 5 * Copyright (C) 2004-2005 Nokia Corporation 6 * Authors: Juha Yrjölä <juha.yrjola@nokia.com> 7 * Imre Deak <imre.deak@nokia.com> 8 * YUV support: Jussi Laako <jussi.laako@nokia.com> 9 */ 10 #include <linux/module.h> 11 #include <linux/mm.h> 12 #include <linux/fb.h> 13 #include <linux/delay.h> 14 #include <linux/clk.h> 15 #include <linux/interrupt.h> 16 17 #include "omapfb.h" 18 19 #define HWA742_REV_CODE_REG 0x0 20 #define HWA742_CONFIG_REG 0x2 21 #define HWA742_PLL_DIV_REG 0x4 22 #define HWA742_PLL_0_REG 0x6 23 #define HWA742_PLL_1_REG 0x8 24 #define HWA742_PLL_2_REG 0xa 25 #define HWA742_PLL_3_REG 0xc 26 #define HWA742_PLL_4_REG 0xe 27 #define HWA742_CLK_SRC_REG 0x12 28 #define HWA742_PANEL_TYPE_REG 0x14 29 #define HWA742_H_DISP_REG 0x16 30 #define HWA742_H_NDP_REG 0x18 31 #define HWA742_V_DISP_1_REG 0x1a 32 #define HWA742_V_DISP_2_REG 0x1c 33 #define HWA742_V_NDP_REG 0x1e 34 #define HWA742_HS_W_REG 0x20 35 #define HWA742_HP_S_REG 0x22 36 #define HWA742_VS_W_REG 0x24 37 #define HWA742_VP_S_REG 0x26 38 #define HWA742_PCLK_POL_REG 0x28 39 #define HWA742_INPUT_MODE_REG 0x2a 40 #define HWA742_TRANSL_MODE_REG1 0x2e 41 #define HWA742_DISP_MODE_REG 0x34 42 #define HWA742_WINDOW_TYPE 0x36 43 #define HWA742_WINDOW_X_START_0 0x38 44 #define HWA742_WINDOW_X_START_1 0x3a 45 #define HWA742_WINDOW_Y_START_0 0x3c 46 #define HWA742_WINDOW_Y_START_1 0x3e 47 #define HWA742_WINDOW_X_END_0 0x40 48 #define HWA742_WINDOW_X_END_1 0x42 49 #define HWA742_WINDOW_Y_END_0 0x44 50 #define HWA742_WINDOW_Y_END_1 0x46 51 #define HWA742_MEMORY_WRITE_LSB 0x48 52 #define HWA742_MEMORY_WRITE_MSB 0x49 53 #define HWA742_MEMORY_READ_0 0x4a 54 #define HWA742_MEMORY_READ_1 0x4c 55 #define HWA742_MEMORY_READ_2 0x4e 56 #define HWA742_POWER_SAVE 0x56 57 #define HWA742_NDP_CTRL 0x58 58 59 #define HWA742_AUTO_UPDATE_TIME (HZ / 20) 60 61 /* Reserve 4 request slots for requests in irq context */ 62 #define REQ_POOL_SIZE 24 63 #define IRQ_REQ_POOL_SIZE 4 64 65 #define REQ_FROM_IRQ_POOL 0x01 66 67 #define REQ_COMPLETE 0 68 #define REQ_PENDING 1 69 70 struct update_param { 71 int x, y, width, height; 72 int color_mode; 73 int flags; 74 }; 75 76 struct hwa742_request { 77 struct list_head entry; 78 unsigned int flags; 79 80 int (*handler)(struct hwa742_request *req); 81 void (*complete)(void *data); 82 void *complete_data; 83 84 union { 85 struct update_param update; 86 struct completion *sync; 87 } par; 88 }; 89 90 struct { 91 enum omapfb_update_mode update_mode; 92 enum omapfb_update_mode update_mode_before_suspend; 93 94 struct timer_list auto_update_timer; 95 int stop_auto_update; 96 struct omapfb_update_window auto_update_window; 97 unsigned te_connected:1; 98 unsigned vsync_only:1; 99 100 struct hwa742_request req_pool[REQ_POOL_SIZE]; 101 struct list_head pending_req_list; 102 struct list_head free_req_list; 103 104 /* 105 * @req_lock: protect request slots pool and its tracking lists 106 * @req_sema: counter; slot allocators from task contexts must 107 * push it down before acquiring a slot. This 108 * guarantees that atomic contexts will always have 109 * a minimum of IRQ_REQ_POOL_SIZE slots available. 110 */ 111 struct semaphore req_sema; 112 spinlock_t req_lock; 113 114 struct extif_timings reg_timings, lut_timings; 115 116 int prev_color_mode; 117 int prev_flags; 118 int window_type; 119 120 u32 max_transmit_size; 121 u32 extif_clk_period; 122 unsigned long pix_tx_time; 123 unsigned long line_upd_time; 124 125 126 struct omapfb_device *fbdev; 127 struct lcd_ctrl_extif *extif; 128 const struct lcd_ctrl *int_ctrl; 129 130 struct clk *sys_ck; 131 } hwa742; 132 133 struct lcd_ctrl hwa742_ctrl; 134 135 static u8 hwa742_read_reg(u8 reg) 136 { 137 u8 data; 138 139 hwa742.extif->set_bits_per_cycle(8); 140 hwa742.extif->write_command(®, 1); 141 hwa742.extif->read_data(&data, 1); 142 143 return data; 144 } 145 146 static void hwa742_write_reg(u8 reg, u8 data) 147 { 148 hwa742.extif->set_bits_per_cycle(8); 149 hwa742.extif->write_command(®, 1); 150 hwa742.extif->write_data(&data, 1); 151 } 152 153 static void set_window_regs(int x_start, int y_start, int x_end, int y_end) 154 { 155 u8 tmp[8]; 156 u8 cmd; 157 158 x_end--; 159 y_end--; 160 tmp[0] = x_start; 161 tmp[1] = x_start >> 8; 162 tmp[2] = y_start; 163 tmp[3] = y_start >> 8; 164 tmp[4] = x_end; 165 tmp[5] = x_end >> 8; 166 tmp[6] = y_end; 167 tmp[7] = y_end >> 8; 168 169 hwa742.extif->set_bits_per_cycle(8); 170 cmd = HWA742_WINDOW_X_START_0; 171 172 hwa742.extif->write_command(&cmd, 1); 173 174 hwa742.extif->write_data(tmp, 8); 175 } 176 177 static void set_format_regs(int conv, int transl, int flags) 178 { 179 if (flags & OMAPFB_FORMAT_FLAG_DOUBLE) { 180 hwa742.window_type = ((hwa742.window_type & 0xfc) | 0x01); 181 #ifdef VERBOSE 182 dev_dbg(hwa742.fbdev->dev, "hwa742: enabled pixel doubling\n"); 183 #endif 184 } else { 185 hwa742.window_type = (hwa742.window_type & 0xfc); 186 #ifdef VERBOSE 187 dev_dbg(hwa742.fbdev->dev, "hwa742: disabled pixel doubling\n"); 188 #endif 189 } 190 191 hwa742_write_reg(HWA742_INPUT_MODE_REG, conv); 192 hwa742_write_reg(HWA742_TRANSL_MODE_REG1, transl); 193 hwa742_write_reg(HWA742_WINDOW_TYPE, hwa742.window_type); 194 } 195 196 static void enable_tearsync(int y, int width, int height, int screen_height, 197 int force_vsync) 198 { 199 u8 b; 200 201 b = hwa742_read_reg(HWA742_NDP_CTRL); 202 b |= 1 << 2; 203 hwa742_write_reg(HWA742_NDP_CTRL, b); 204 205 if (likely(hwa742.vsync_only || force_vsync)) { 206 hwa742.extif->enable_tearsync(1, 0); 207 return; 208 } 209 210 if (width * hwa742.pix_tx_time < hwa742.line_upd_time) { 211 hwa742.extif->enable_tearsync(1, 0); 212 return; 213 } 214 215 if ((width * hwa742.pix_tx_time / 1000) * height < 216 (y + height) * (hwa742.line_upd_time / 1000)) { 217 hwa742.extif->enable_tearsync(1, 0); 218 return; 219 } 220 221 hwa742.extif->enable_tearsync(1, y + 1); 222 } 223 224 static void disable_tearsync(void) 225 { 226 u8 b; 227 228 hwa742.extif->enable_tearsync(0, 0); 229 230 b = hwa742_read_reg(HWA742_NDP_CTRL); 231 b &= ~(1 << 2); 232 hwa742_write_reg(HWA742_NDP_CTRL, b); 233 } 234 235 static inline struct hwa742_request *alloc_req(bool can_sleep) 236 { 237 unsigned long flags; 238 struct hwa742_request *req; 239 int req_flags = 0; 240 241 if (can_sleep) 242 down(&hwa742.req_sema); 243 else 244 req_flags = REQ_FROM_IRQ_POOL; 245 246 spin_lock_irqsave(&hwa742.req_lock, flags); 247 BUG_ON(list_empty(&hwa742.free_req_list)); 248 req = list_entry(hwa742.free_req_list.next, 249 struct hwa742_request, entry); 250 list_del(&req->entry); 251 spin_unlock_irqrestore(&hwa742.req_lock, flags); 252 253 INIT_LIST_HEAD(&req->entry); 254 req->flags = req_flags; 255 256 return req; 257 } 258 259 static inline void free_req(struct hwa742_request *req) 260 { 261 unsigned long flags; 262 263 spin_lock_irqsave(&hwa742.req_lock, flags); 264 265 list_move(&req->entry, &hwa742.free_req_list); 266 if (!(req->flags & REQ_FROM_IRQ_POOL)) 267 up(&hwa742.req_sema); 268 269 spin_unlock_irqrestore(&hwa742.req_lock, flags); 270 } 271 272 static void process_pending_requests(void) 273 { 274 unsigned long flags; 275 276 spin_lock_irqsave(&hwa742.req_lock, flags); 277 278 while (!list_empty(&hwa742.pending_req_list)) { 279 struct hwa742_request *req; 280 void (*complete)(void *); 281 void *complete_data; 282 283 req = list_entry(hwa742.pending_req_list.next, 284 struct hwa742_request, entry); 285 spin_unlock_irqrestore(&hwa742.req_lock, flags); 286 287 if (req->handler(req) == REQ_PENDING) 288 return; 289 290 complete = req->complete; 291 complete_data = req->complete_data; 292 free_req(req); 293 294 if (complete) 295 complete(complete_data); 296 297 spin_lock_irqsave(&hwa742.req_lock, flags); 298 } 299 300 spin_unlock_irqrestore(&hwa742.req_lock, flags); 301 } 302 303 static void submit_req_list(struct list_head *head) 304 { 305 unsigned long flags; 306 int process = 1; 307 308 spin_lock_irqsave(&hwa742.req_lock, flags); 309 if (likely(!list_empty(&hwa742.pending_req_list))) 310 process = 0; 311 list_splice_init(head, hwa742.pending_req_list.prev); 312 spin_unlock_irqrestore(&hwa742.req_lock, flags); 313 314 if (process) 315 process_pending_requests(); 316 } 317 318 static void request_complete(void *data) 319 { 320 struct hwa742_request *req = (struct hwa742_request *)data; 321 void (*complete)(void *); 322 void *complete_data; 323 324 complete = req->complete; 325 complete_data = req->complete_data; 326 327 free_req(req); 328 329 if (complete) 330 complete(complete_data); 331 332 process_pending_requests(); 333 } 334 335 static int send_frame_handler(struct hwa742_request *req) 336 { 337 struct update_param *par = &req->par.update; 338 int x = par->x; 339 int y = par->y; 340 int w = par->width; 341 int h = par->height; 342 int bpp; 343 int conv, transl; 344 unsigned long offset; 345 int color_mode = par->color_mode; 346 int flags = par->flags; 347 int scr_width = hwa742.fbdev->panel->x_res; 348 int scr_height = hwa742.fbdev->panel->y_res; 349 350 #ifdef VERBOSE 351 dev_dbg(hwa742.fbdev->dev, "x %d y %d w %d h %d scr_width %d " 352 "color_mode %d flags %d\n", 353 x, y, w, h, scr_width, color_mode, flags); 354 #endif 355 356 switch (color_mode) { 357 case OMAPFB_COLOR_YUV422: 358 bpp = 16; 359 conv = 0x08; 360 transl = 0x25; 361 break; 362 case OMAPFB_COLOR_YUV420: 363 bpp = 12; 364 conv = 0x09; 365 transl = 0x25; 366 break; 367 case OMAPFB_COLOR_RGB565: 368 bpp = 16; 369 conv = 0x01; 370 transl = 0x05; 371 break; 372 default: 373 return -EINVAL; 374 } 375 376 if (hwa742.prev_flags != flags || 377 hwa742.prev_color_mode != color_mode) { 378 set_format_regs(conv, transl, flags); 379 hwa742.prev_color_mode = color_mode; 380 hwa742.prev_flags = flags; 381 } 382 flags = req->par.update.flags; 383 if (flags & OMAPFB_FORMAT_FLAG_TEARSYNC) 384 enable_tearsync(y, scr_width, h, scr_height, 385 flags & OMAPFB_FORMAT_FLAG_FORCE_VSYNC); 386 else 387 disable_tearsync(); 388 389 set_window_regs(x, y, x + w, y + h); 390 391 offset = (scr_width * y + x) * bpp / 8; 392 393 hwa742.int_ctrl->setup_plane(OMAPFB_PLANE_GFX, 394 OMAPFB_CHANNEL_OUT_LCD, offset, scr_width, 0, 0, w, h, 395 color_mode); 396 397 hwa742.extif->set_bits_per_cycle(16); 398 399 hwa742.int_ctrl->enable_plane(OMAPFB_PLANE_GFX, 1); 400 hwa742.extif->transfer_area(w, h, request_complete, req); 401 402 return REQ_PENDING; 403 } 404 405 static void send_frame_complete(void *data) 406 { 407 hwa742.int_ctrl->enable_plane(OMAPFB_PLANE_GFX, 0); 408 } 409 410 #define ADD_PREQ(_x, _y, _w, _h, can_sleep) do {\ 411 req = alloc_req(can_sleep); \ 412 req->handler = send_frame_handler; \ 413 req->complete = send_frame_complete; \ 414 req->par.update.x = _x; \ 415 req->par.update.y = _y; \ 416 req->par.update.width = _w; \ 417 req->par.update.height = _h; \ 418 req->par.update.color_mode = color_mode;\ 419 req->par.update.flags = flags; \ 420 list_add_tail(&req->entry, req_head); \ 421 } while(0) 422 423 static void create_req_list(struct omapfb_update_window *win, 424 struct list_head *req_head, 425 bool can_sleep) 426 { 427 struct hwa742_request *req; 428 int x = win->x; 429 int y = win->y; 430 int width = win->width; 431 int height = win->height; 432 int color_mode; 433 int flags; 434 435 flags = win->format & ~OMAPFB_FORMAT_MASK; 436 color_mode = win->format & OMAPFB_FORMAT_MASK; 437 438 if (x & 1) { 439 ADD_PREQ(x, y, 1, height, can_sleep); 440 width--; 441 x++; 442 flags &= ~OMAPFB_FORMAT_FLAG_TEARSYNC; 443 } 444 if (width & ~1) { 445 unsigned int xspan = width & ~1; 446 unsigned int ystart = y; 447 unsigned int yspan = height; 448 449 if (xspan * height * 2 > hwa742.max_transmit_size) { 450 yspan = hwa742.max_transmit_size / (xspan * 2); 451 ADD_PREQ(x, ystart, xspan, yspan, can_sleep); 452 ystart += yspan; 453 yspan = height - yspan; 454 flags &= ~OMAPFB_FORMAT_FLAG_TEARSYNC; 455 } 456 457 ADD_PREQ(x, ystart, xspan, yspan, can_sleep); 458 x += xspan; 459 width -= xspan; 460 flags &= ~OMAPFB_FORMAT_FLAG_TEARSYNC; 461 } 462 if (width) 463 ADD_PREQ(x, y, 1, height, can_sleep); 464 } 465 466 static void auto_update_complete(void *data) 467 { 468 if (!hwa742.stop_auto_update) 469 mod_timer(&hwa742.auto_update_timer, 470 jiffies + HWA742_AUTO_UPDATE_TIME); 471 } 472 473 static void __hwa742_update_window_auto(bool can_sleep) 474 { 475 LIST_HEAD(req_list); 476 struct hwa742_request *last; 477 478 create_req_list(&hwa742.auto_update_window, &req_list, can_sleep); 479 last = list_entry(req_list.prev, struct hwa742_request, entry); 480 481 last->complete = auto_update_complete; 482 last->complete_data = NULL; 483 484 submit_req_list(&req_list); 485 } 486 487 static void hwa742_update_window_auto(struct timer_list *unused) 488 { 489 __hwa742_update_window_auto(false); 490 } 491 492 int hwa742_update_window_async(struct fb_info *fbi, 493 struct omapfb_update_window *win, 494 void (*complete_callback)(void *arg), 495 void *complete_callback_data) 496 { 497 LIST_HEAD(req_list); 498 struct hwa742_request *last; 499 int r = 0; 500 501 if (hwa742.update_mode != OMAPFB_MANUAL_UPDATE) { 502 dev_dbg(hwa742.fbdev->dev, "invalid update mode\n"); 503 r = -EINVAL; 504 goto out; 505 } 506 if (unlikely(win->format & 507 ~(0x03 | OMAPFB_FORMAT_FLAG_DOUBLE | 508 OMAPFB_FORMAT_FLAG_TEARSYNC | OMAPFB_FORMAT_FLAG_FORCE_VSYNC))) { 509 dev_dbg(hwa742.fbdev->dev, "invalid window flag\n"); 510 r = -EINVAL; 511 goto out; 512 } 513 514 create_req_list(win, &req_list, true); 515 last = list_entry(req_list.prev, struct hwa742_request, entry); 516 517 last->complete = complete_callback; 518 last->complete_data = (void *)complete_callback_data; 519 520 submit_req_list(&req_list); 521 522 out: 523 return r; 524 } 525 EXPORT_SYMBOL(hwa742_update_window_async); 526 527 static int hwa742_setup_plane(int plane, int channel_out, 528 unsigned long offset, int screen_width, 529 int pos_x, int pos_y, int width, int height, 530 int color_mode) 531 { 532 if (plane != OMAPFB_PLANE_GFX || 533 channel_out != OMAPFB_CHANNEL_OUT_LCD) 534 return -EINVAL; 535 536 return 0; 537 } 538 539 static int hwa742_enable_plane(int plane, int enable) 540 { 541 if (plane != 0) 542 return -EINVAL; 543 544 hwa742.int_ctrl->enable_plane(plane, enable); 545 546 return 0; 547 } 548 549 static int sync_handler(struct hwa742_request *req) 550 { 551 complete(req->par.sync); 552 return REQ_COMPLETE; 553 } 554 555 static void hwa742_sync(void) 556 { 557 LIST_HEAD(req_list); 558 struct hwa742_request *req; 559 struct completion comp; 560 561 req = alloc_req(true); 562 563 req->handler = sync_handler; 564 req->complete = NULL; 565 init_completion(&comp); 566 req->par.sync = ∁ 567 568 list_add(&req->entry, &req_list); 569 submit_req_list(&req_list); 570 571 wait_for_completion(&comp); 572 } 573 574 static void hwa742_bind_client(struct omapfb_notifier_block *nb) 575 { 576 dev_dbg(hwa742.fbdev->dev, "update_mode %d\n", hwa742.update_mode); 577 if (hwa742.update_mode == OMAPFB_MANUAL_UPDATE) { 578 omapfb_notify_clients(hwa742.fbdev, OMAPFB_EVENT_READY); 579 } 580 } 581 582 static int hwa742_set_update_mode(enum omapfb_update_mode mode) 583 { 584 if (mode != OMAPFB_MANUAL_UPDATE && mode != OMAPFB_AUTO_UPDATE && 585 mode != OMAPFB_UPDATE_DISABLED) 586 return -EINVAL; 587 588 if (mode == hwa742.update_mode) 589 return 0; 590 591 dev_info(hwa742.fbdev->dev, "HWA742: setting update mode to %s\n", 592 mode == OMAPFB_UPDATE_DISABLED ? "disabled" : 593 (mode == OMAPFB_AUTO_UPDATE ? "auto" : "manual")); 594 595 switch (hwa742.update_mode) { 596 case OMAPFB_MANUAL_UPDATE: 597 omapfb_notify_clients(hwa742.fbdev, OMAPFB_EVENT_DISABLED); 598 break; 599 case OMAPFB_AUTO_UPDATE: 600 hwa742.stop_auto_update = 1; 601 del_timer_sync(&hwa742.auto_update_timer); 602 break; 603 case OMAPFB_UPDATE_DISABLED: 604 break; 605 } 606 607 hwa742.update_mode = mode; 608 hwa742_sync(); 609 hwa742.stop_auto_update = 0; 610 611 switch (mode) { 612 case OMAPFB_MANUAL_UPDATE: 613 omapfb_notify_clients(hwa742.fbdev, OMAPFB_EVENT_READY); 614 break; 615 case OMAPFB_AUTO_UPDATE: 616 __hwa742_update_window_auto(true); 617 break; 618 case OMAPFB_UPDATE_DISABLED: 619 break; 620 } 621 622 return 0; 623 } 624 625 static enum omapfb_update_mode hwa742_get_update_mode(void) 626 { 627 return hwa742.update_mode; 628 } 629 630 static unsigned long round_to_extif_ticks(unsigned long ps, int div) 631 { 632 int bus_tick = hwa742.extif_clk_period * div; 633 return (ps + bus_tick - 1) / bus_tick * bus_tick; 634 } 635 636 static int calc_reg_timing(unsigned long sysclk, int div) 637 { 638 struct extif_timings *t; 639 unsigned long systim; 640 641 /* CSOnTime 0, WEOnTime 2 ns, REOnTime 2 ns, 642 * AccessTime 2 ns + 12.2 ns (regs), 643 * WEOffTime = WEOnTime + 1 ns, 644 * REOffTime = REOnTime + 16 ns (regs), 645 * CSOffTime = REOffTime + 1 ns 646 * ReadCycle = 2ns + 2*SYSCLK (regs), 647 * WriteCycle = 2*SYSCLK + 2 ns, 648 * CSPulseWidth = 10 ns */ 649 systim = 1000000000 / (sysclk / 1000); 650 dev_dbg(hwa742.fbdev->dev, "HWA742 systim %lu ps extif_clk_period %u ps" 651 "extif_clk_div %d\n", systim, hwa742.extif_clk_period, div); 652 653 t = &hwa742.reg_timings; 654 memset(t, 0, sizeof(*t)); 655 t->clk_div = div; 656 t->cs_on_time = 0; 657 t->we_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div); 658 t->re_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div); 659 t->access_time = round_to_extif_ticks(t->re_on_time + 12200, div); 660 t->we_off_time = round_to_extif_ticks(t->we_on_time + 1000, div); 661 t->re_off_time = round_to_extif_ticks(t->re_on_time + 16000, div); 662 t->cs_off_time = round_to_extif_ticks(t->re_off_time + 1000, div); 663 t->we_cycle_time = round_to_extif_ticks(2 * systim + 2000, div); 664 if (t->we_cycle_time < t->we_off_time) 665 t->we_cycle_time = t->we_off_time; 666 t->re_cycle_time = round_to_extif_ticks(2 * systim + 2000, div); 667 if (t->re_cycle_time < t->re_off_time) 668 t->re_cycle_time = t->re_off_time; 669 t->cs_pulse_width = 0; 670 671 dev_dbg(hwa742.fbdev->dev, "[reg]cson %d csoff %d reon %d reoff %d\n", 672 t->cs_on_time, t->cs_off_time, t->re_on_time, t->re_off_time); 673 dev_dbg(hwa742.fbdev->dev, "[reg]weon %d weoff %d recyc %d wecyc %d\n", 674 t->we_on_time, t->we_off_time, t->re_cycle_time, 675 t->we_cycle_time); 676 dev_dbg(hwa742.fbdev->dev, "[reg]rdaccess %d cspulse %d\n", 677 t->access_time, t->cs_pulse_width); 678 679 return hwa742.extif->convert_timings(t); 680 } 681 682 static int calc_lut_timing(unsigned long sysclk, int div) 683 { 684 struct extif_timings *t; 685 unsigned long systim; 686 687 /* CSOnTime 0, WEOnTime 2 ns, REOnTime 2 ns, 688 * AccessTime 2 ns + 4 * SYSCLK + 26 (lut), 689 * WEOffTime = WEOnTime + 1 ns, 690 * REOffTime = REOnTime + 4*SYSCLK + 26 ns (lut), 691 * CSOffTime = REOffTime + 1 ns 692 * ReadCycle = 2ns + 4*SYSCLK + 26 ns (lut), 693 * WriteCycle = 2*SYSCLK + 2 ns, 694 * CSPulseWidth = 10 ns 695 */ 696 systim = 1000000000 / (sysclk / 1000); 697 dev_dbg(hwa742.fbdev->dev, "HWA742 systim %lu ps extif_clk_period %u ps" 698 "extif_clk_div %d\n", systim, hwa742.extif_clk_period, div); 699 700 t = &hwa742.lut_timings; 701 memset(t, 0, sizeof(*t)); 702 703 t->clk_div = div; 704 705 t->cs_on_time = 0; 706 t->we_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div); 707 t->re_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div); 708 t->access_time = round_to_extif_ticks(t->re_on_time + 4 * systim + 709 26000, div); 710 t->we_off_time = round_to_extif_ticks(t->we_on_time + 1000, div); 711 t->re_off_time = round_to_extif_ticks(t->re_on_time + 4 * systim + 712 26000, div); 713 t->cs_off_time = round_to_extif_ticks(t->re_off_time + 1000, div); 714 t->we_cycle_time = round_to_extif_ticks(2 * systim + 2000, div); 715 if (t->we_cycle_time < t->we_off_time) 716 t->we_cycle_time = t->we_off_time; 717 t->re_cycle_time = round_to_extif_ticks(2000 + 4 * systim + 26000, div); 718 if (t->re_cycle_time < t->re_off_time) 719 t->re_cycle_time = t->re_off_time; 720 t->cs_pulse_width = 0; 721 722 dev_dbg(hwa742.fbdev->dev, "[lut]cson %d csoff %d reon %d reoff %d\n", 723 t->cs_on_time, t->cs_off_time, t->re_on_time, t->re_off_time); 724 dev_dbg(hwa742.fbdev->dev, "[lut]weon %d weoff %d recyc %d wecyc %d\n", 725 t->we_on_time, t->we_off_time, t->re_cycle_time, 726 t->we_cycle_time); 727 dev_dbg(hwa742.fbdev->dev, "[lut]rdaccess %d cspulse %d\n", 728 t->access_time, t->cs_pulse_width); 729 730 return hwa742.extif->convert_timings(t); 731 } 732 733 static int calc_extif_timings(unsigned long sysclk, int *extif_mem_div) 734 { 735 int max_clk_div; 736 int div; 737 738 hwa742.extif->get_clk_info(&hwa742.extif_clk_period, &max_clk_div); 739 for (div = 1; div < max_clk_div; div++) { 740 if (calc_reg_timing(sysclk, div) == 0) 741 break; 742 } 743 if (div >= max_clk_div) 744 goto err; 745 746 *extif_mem_div = div; 747 748 for (div = 1; div < max_clk_div; div++) { 749 if (calc_lut_timing(sysclk, div) == 0) 750 break; 751 } 752 753 if (div >= max_clk_div) 754 goto err; 755 756 return 0; 757 758 err: 759 dev_err(hwa742.fbdev->dev, "can't setup timings\n"); 760 return -1; 761 } 762 763 static void calc_hwa742_clk_rates(unsigned long ext_clk, 764 unsigned long *sys_clk, unsigned long *pix_clk) 765 { 766 int pix_clk_src; 767 int sys_div = 0, sys_mul = 0; 768 int pix_div; 769 770 pix_clk_src = hwa742_read_reg(HWA742_CLK_SRC_REG); 771 pix_div = ((pix_clk_src >> 3) & 0x1f) + 1; 772 if ((pix_clk_src & (0x3 << 1)) == 0) { 773 /* Source is the PLL */ 774 sys_div = (hwa742_read_reg(HWA742_PLL_DIV_REG) & 0x3f) + 1; 775 sys_mul = (hwa742_read_reg(HWA742_PLL_4_REG) & 0x7f) + 1; 776 *sys_clk = ext_clk * sys_mul / sys_div; 777 } else /* else source is ext clk, or oscillator */ 778 *sys_clk = ext_clk; 779 780 *pix_clk = *sys_clk / pix_div; /* HZ */ 781 dev_dbg(hwa742.fbdev->dev, 782 "ext_clk %ld pix_src %d pix_div %d sys_div %d sys_mul %d\n", 783 ext_clk, pix_clk_src & (0x3 << 1), pix_div, sys_div, sys_mul); 784 dev_dbg(hwa742.fbdev->dev, "sys_clk %ld pix_clk %ld\n", 785 *sys_clk, *pix_clk); 786 } 787 788 789 static int setup_tearsync(unsigned long pix_clk, int extif_div) 790 { 791 int hdisp, vdisp; 792 int hndp, vndp; 793 int hsw, vsw; 794 int hs, vs; 795 int hs_pol_inv, vs_pol_inv; 796 int use_hsvs, use_ndp; 797 u8 b; 798 799 hsw = hwa742_read_reg(HWA742_HS_W_REG); 800 vsw = hwa742_read_reg(HWA742_VS_W_REG); 801 hs_pol_inv = !(hsw & 0x80); 802 vs_pol_inv = !(vsw & 0x80); 803 hsw = hsw & 0x7f; 804 vsw = vsw & 0x3f; 805 806 hdisp = (hwa742_read_reg(HWA742_H_DISP_REG) & 0x7f) * 8; 807 vdisp = hwa742_read_reg(HWA742_V_DISP_1_REG) + 808 ((hwa742_read_reg(HWA742_V_DISP_2_REG) & 0x3) << 8); 809 810 hndp = hwa742_read_reg(HWA742_H_NDP_REG) & 0x7f; 811 vndp = hwa742_read_reg(HWA742_V_NDP_REG); 812 813 /* time to transfer one pixel (16bpp) in ps */ 814 hwa742.pix_tx_time = hwa742.reg_timings.we_cycle_time; 815 if (hwa742.extif->get_max_tx_rate != NULL) { 816 /* 817 * The external interface might have a rate limitation, 818 * if so, we have to maximize our transfer rate. 819 */ 820 unsigned long min_tx_time; 821 unsigned long max_tx_rate = hwa742.extif->get_max_tx_rate(); 822 823 dev_dbg(hwa742.fbdev->dev, "max_tx_rate %ld HZ\n", 824 max_tx_rate); 825 min_tx_time = 1000000000 / (max_tx_rate / 1000); /* ps */ 826 if (hwa742.pix_tx_time < min_tx_time) 827 hwa742.pix_tx_time = min_tx_time; 828 } 829 830 /* time to update one line in ps */ 831 hwa742.line_upd_time = (hdisp + hndp) * 1000000 / (pix_clk / 1000); 832 hwa742.line_upd_time *= 1000; 833 if (hdisp * hwa742.pix_tx_time > hwa742.line_upd_time) 834 /* 835 * transfer speed too low, we might have to use both 836 * HS and VS 837 */ 838 use_hsvs = 1; 839 else 840 /* decent transfer speed, we'll always use only VS */ 841 use_hsvs = 0; 842 843 if (use_hsvs && (hs_pol_inv || vs_pol_inv)) { 844 /* 845 * HS or'ed with VS doesn't work, use the active high 846 * TE signal based on HNDP / VNDP 847 */ 848 use_ndp = 1; 849 hs_pol_inv = 0; 850 vs_pol_inv = 0; 851 hs = hndp; 852 vs = vndp; 853 } else { 854 /* 855 * Use HS or'ed with VS as a TE signal if both are needed 856 * or VNDP if only vsync is needed. 857 */ 858 use_ndp = 0; 859 hs = hsw; 860 vs = vsw; 861 if (!use_hsvs) { 862 hs_pol_inv = 0; 863 vs_pol_inv = 0; 864 } 865 } 866 867 hs = hs * 1000000 / (pix_clk / 1000); /* ps */ 868 hs *= 1000; 869 870 vs = vs * (hdisp + hndp) * 1000000 / (pix_clk / 1000); /* ps */ 871 vs *= 1000; 872 873 if (vs <= hs) 874 return -EDOM; 875 /* set VS to 120% of HS to minimize VS detection time */ 876 vs = hs * 12 / 10; 877 /* minimize HS too */ 878 hs = 10000; 879 880 b = hwa742_read_reg(HWA742_NDP_CTRL); 881 b &= ~0x3; 882 b |= use_hsvs ? 1 : 0; 883 b |= (use_ndp && use_hsvs) ? 0 : 2; 884 hwa742_write_reg(HWA742_NDP_CTRL, b); 885 886 hwa742.vsync_only = !use_hsvs; 887 888 dev_dbg(hwa742.fbdev->dev, 889 "pix_clk %ld HZ pix_tx_time %ld ps line_upd_time %ld ps\n", 890 pix_clk, hwa742.pix_tx_time, hwa742.line_upd_time); 891 dev_dbg(hwa742.fbdev->dev, 892 "hs %d ps vs %d ps mode %d vsync_only %d\n", 893 hs, vs, (b & 0x3), !use_hsvs); 894 895 return hwa742.extif->setup_tearsync(1, hs, vs, 896 hs_pol_inv, vs_pol_inv, extif_div); 897 } 898 899 static void hwa742_get_caps(int plane, struct omapfb_caps *caps) 900 { 901 hwa742.int_ctrl->get_caps(plane, caps); 902 caps->ctrl |= OMAPFB_CAPS_MANUAL_UPDATE | 903 OMAPFB_CAPS_WINDOW_PIXEL_DOUBLE; 904 if (hwa742.te_connected) 905 caps->ctrl |= OMAPFB_CAPS_TEARSYNC; 906 caps->wnd_color |= (1 << OMAPFB_COLOR_RGB565) | 907 (1 << OMAPFB_COLOR_YUV420); 908 } 909 910 static void hwa742_suspend(void) 911 { 912 hwa742.update_mode_before_suspend = hwa742.update_mode; 913 hwa742_set_update_mode(OMAPFB_UPDATE_DISABLED); 914 /* Enable sleep mode */ 915 hwa742_write_reg(HWA742_POWER_SAVE, 1 << 1); 916 clk_disable(hwa742.sys_ck); 917 } 918 919 static void hwa742_resume(void) 920 { 921 clk_enable(hwa742.sys_ck); 922 923 /* Disable sleep mode */ 924 hwa742_write_reg(HWA742_POWER_SAVE, 0); 925 while (1) { 926 /* Loop until PLL output is stabilized */ 927 if (hwa742_read_reg(HWA742_PLL_DIV_REG) & (1 << 7)) 928 break; 929 set_current_state(TASK_UNINTERRUPTIBLE); 930 schedule_timeout(msecs_to_jiffies(5)); 931 } 932 hwa742_set_update_mode(hwa742.update_mode_before_suspend); 933 } 934 935 static int hwa742_init(struct omapfb_device *fbdev, int ext_mode, 936 struct omapfb_mem_desc *req_vram) 937 { 938 int r = 0, i; 939 u8 rev, conf; 940 unsigned long ext_clk; 941 unsigned long sys_clk, pix_clk; 942 int extif_mem_div; 943 struct omapfb_platform_data *omapfb_conf; 944 945 BUG_ON(!fbdev->ext_if || !fbdev->int_ctrl); 946 947 hwa742.fbdev = fbdev; 948 hwa742.extif = fbdev->ext_if; 949 hwa742.int_ctrl = fbdev->int_ctrl; 950 951 omapfb_conf = dev_get_platdata(fbdev->dev); 952 953 hwa742.sys_ck = clk_get(NULL, "hwa_sys_ck"); 954 955 spin_lock_init(&hwa742.req_lock); 956 957 if ((r = hwa742.int_ctrl->init(fbdev, 1, req_vram)) < 0) 958 goto err1; 959 960 if ((r = hwa742.extif->init(fbdev)) < 0) 961 goto err2; 962 963 ext_clk = clk_get_rate(hwa742.sys_ck); 964 if ((r = calc_extif_timings(ext_clk, &extif_mem_div)) < 0) 965 goto err3; 966 hwa742.extif->set_timings(&hwa742.reg_timings); 967 clk_prepare_enable(hwa742.sys_ck); 968 969 calc_hwa742_clk_rates(ext_clk, &sys_clk, &pix_clk); 970 if ((r = calc_extif_timings(sys_clk, &extif_mem_div)) < 0) 971 goto err4; 972 hwa742.extif->set_timings(&hwa742.reg_timings); 973 974 rev = hwa742_read_reg(HWA742_REV_CODE_REG); 975 if ((rev & 0xfc) != 0x80) { 976 dev_err(fbdev->dev, "HWA742: invalid revision %02x\n", rev); 977 r = -ENODEV; 978 goto err4; 979 } 980 981 982 if (!(hwa742_read_reg(HWA742_PLL_DIV_REG) & 0x80)) { 983 dev_err(fbdev->dev, 984 "HWA742: controller not initialized by the bootloader\n"); 985 r = -ENODEV; 986 goto err4; 987 } 988 989 if ((r = setup_tearsync(pix_clk, extif_mem_div)) < 0) { 990 dev_err(hwa742.fbdev->dev, 991 "HWA742: can't setup tearing synchronization\n"); 992 goto err4; 993 } 994 hwa742.te_connected = 1; 995 996 hwa742.max_transmit_size = hwa742.extif->max_transmit_size; 997 998 hwa742.update_mode = OMAPFB_UPDATE_DISABLED; 999 1000 hwa742.auto_update_window.x = 0; 1001 hwa742.auto_update_window.y = 0; 1002 hwa742.auto_update_window.width = fbdev->panel->x_res; 1003 hwa742.auto_update_window.height = fbdev->panel->y_res; 1004 hwa742.auto_update_window.format = 0; 1005 1006 timer_setup(&hwa742.auto_update_timer, hwa742_update_window_auto, 0); 1007 1008 hwa742.prev_color_mode = -1; 1009 hwa742.prev_flags = 0; 1010 1011 hwa742.fbdev = fbdev; 1012 1013 INIT_LIST_HEAD(&hwa742.free_req_list); 1014 INIT_LIST_HEAD(&hwa742.pending_req_list); 1015 for (i = 0; i < ARRAY_SIZE(hwa742.req_pool); i++) 1016 list_add(&hwa742.req_pool[i].entry, &hwa742.free_req_list); 1017 BUG_ON(i <= IRQ_REQ_POOL_SIZE); 1018 sema_init(&hwa742.req_sema, i - IRQ_REQ_POOL_SIZE); 1019 1020 conf = hwa742_read_reg(HWA742_CONFIG_REG); 1021 dev_info(fbdev->dev, ": Epson HWA742 LCD controller rev %d " 1022 "initialized (CNF pins %x)\n", rev & 0x03, conf & 0x07); 1023 1024 return 0; 1025 err4: 1026 clk_disable_unprepare(hwa742.sys_ck); 1027 err3: 1028 hwa742.extif->cleanup(); 1029 err2: 1030 hwa742.int_ctrl->cleanup(); 1031 err1: 1032 return r; 1033 } 1034 1035 static void hwa742_cleanup(void) 1036 { 1037 hwa742_set_update_mode(OMAPFB_UPDATE_DISABLED); 1038 hwa742.extif->cleanup(); 1039 hwa742.int_ctrl->cleanup(); 1040 clk_disable_unprepare(hwa742.sys_ck); 1041 } 1042 1043 struct lcd_ctrl hwa742_ctrl = { 1044 .name = "hwa742", 1045 .init = hwa742_init, 1046 .cleanup = hwa742_cleanup, 1047 .bind_client = hwa742_bind_client, 1048 .get_caps = hwa742_get_caps, 1049 .set_update_mode = hwa742_set_update_mode, 1050 .get_update_mode = hwa742_get_update_mode, 1051 .setup_plane = hwa742_setup_plane, 1052 .enable_plane = hwa742_enable_plane, 1053 .update_window = hwa742_update_window_async, 1054 .sync = hwa742_sync, 1055 .suspend = hwa742_suspend, 1056 .resume = hwa742_resume, 1057 }; 1058 1059