1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Epson HWA742 LCD controller driver
4 *
5 * Copyright (C) 2004-2005 Nokia Corporation
6 * Authors: Juha Yrjölä <juha.yrjola@nokia.com>
7 * Imre Deak <imre.deak@nokia.com>
8 * YUV support: Jussi Laako <jussi.laako@nokia.com>
9 */
10 #include <linux/module.h>
11 #include <linux/mm.h>
12 #include <linux/fb.h>
13 #include <linux/delay.h>
14 #include <linux/clk.h>
15 #include <linux/interrupt.h>
16
17 #include "omapfb.h"
18
19 #define HWA742_REV_CODE_REG 0x0
20 #define HWA742_CONFIG_REG 0x2
21 #define HWA742_PLL_DIV_REG 0x4
22 #define HWA742_PLL_0_REG 0x6
23 #define HWA742_PLL_1_REG 0x8
24 #define HWA742_PLL_2_REG 0xa
25 #define HWA742_PLL_3_REG 0xc
26 #define HWA742_PLL_4_REG 0xe
27 #define HWA742_CLK_SRC_REG 0x12
28 #define HWA742_PANEL_TYPE_REG 0x14
29 #define HWA742_H_DISP_REG 0x16
30 #define HWA742_H_NDP_REG 0x18
31 #define HWA742_V_DISP_1_REG 0x1a
32 #define HWA742_V_DISP_2_REG 0x1c
33 #define HWA742_V_NDP_REG 0x1e
34 #define HWA742_HS_W_REG 0x20
35 #define HWA742_HP_S_REG 0x22
36 #define HWA742_VS_W_REG 0x24
37 #define HWA742_VP_S_REG 0x26
38 #define HWA742_PCLK_POL_REG 0x28
39 #define HWA742_INPUT_MODE_REG 0x2a
40 #define HWA742_TRANSL_MODE_REG1 0x2e
41 #define HWA742_DISP_MODE_REG 0x34
42 #define HWA742_WINDOW_TYPE 0x36
43 #define HWA742_WINDOW_X_START_0 0x38
44 #define HWA742_WINDOW_X_START_1 0x3a
45 #define HWA742_WINDOW_Y_START_0 0x3c
46 #define HWA742_WINDOW_Y_START_1 0x3e
47 #define HWA742_WINDOW_X_END_0 0x40
48 #define HWA742_WINDOW_X_END_1 0x42
49 #define HWA742_WINDOW_Y_END_0 0x44
50 #define HWA742_WINDOW_Y_END_1 0x46
51 #define HWA742_MEMORY_WRITE_LSB 0x48
52 #define HWA742_MEMORY_WRITE_MSB 0x49
53 #define HWA742_MEMORY_READ_0 0x4a
54 #define HWA742_MEMORY_READ_1 0x4c
55 #define HWA742_MEMORY_READ_2 0x4e
56 #define HWA742_POWER_SAVE 0x56
57 #define HWA742_NDP_CTRL 0x58
58
59 #define HWA742_AUTO_UPDATE_TIME (HZ / 20)
60
61 /* Reserve 4 request slots for requests in irq context */
62 #define REQ_POOL_SIZE 24
63 #define IRQ_REQ_POOL_SIZE 4
64
65 #define REQ_FROM_IRQ_POOL 0x01
66
67 #define REQ_COMPLETE 0
68 #define REQ_PENDING 1
69
70 struct update_param {
71 int x, y, width, height;
72 int color_mode;
73 int flags;
74 };
75
76 struct hwa742_request {
77 struct list_head entry;
78 unsigned int flags;
79
80 int (*handler)(struct hwa742_request *req);
81 void (*complete)(void *data);
82 void *complete_data;
83
84 union {
85 struct update_param update;
86 struct completion *sync;
87 } par;
88 };
89
90 struct {
91 enum omapfb_update_mode update_mode;
92 enum omapfb_update_mode update_mode_before_suspend;
93
94 struct timer_list auto_update_timer;
95 int stop_auto_update;
96 struct omapfb_update_window auto_update_window;
97 unsigned te_connected:1;
98 unsigned vsync_only:1;
99
100 struct hwa742_request req_pool[REQ_POOL_SIZE];
101 struct list_head pending_req_list;
102 struct list_head free_req_list;
103
104 /*
105 * @req_lock: protect request slots pool and its tracking lists
106 * @req_sema: counter; slot allocators from task contexts must
107 * push it down before acquiring a slot. This
108 * guarantees that atomic contexts will always have
109 * a minimum of IRQ_REQ_POOL_SIZE slots available.
110 */
111 struct semaphore req_sema;
112 spinlock_t req_lock;
113
114 struct extif_timings reg_timings, lut_timings;
115
116 int prev_color_mode;
117 int prev_flags;
118 int window_type;
119
120 u32 max_transmit_size;
121 u32 extif_clk_period;
122 unsigned long pix_tx_time;
123 unsigned long line_upd_time;
124
125
126 struct omapfb_device *fbdev;
127 struct lcd_ctrl_extif *extif;
128 const struct lcd_ctrl *int_ctrl;
129
130 struct clk *sys_ck;
131 } hwa742;
132
133 struct lcd_ctrl hwa742_ctrl;
134
hwa742_read_reg(u8 reg)135 static u8 hwa742_read_reg(u8 reg)
136 {
137 u8 data;
138
139 hwa742.extif->set_bits_per_cycle(8);
140 hwa742.extif->write_command(®, 1);
141 hwa742.extif->read_data(&data, 1);
142
143 return data;
144 }
145
hwa742_write_reg(u8 reg,u8 data)146 static void hwa742_write_reg(u8 reg, u8 data)
147 {
148 hwa742.extif->set_bits_per_cycle(8);
149 hwa742.extif->write_command(®, 1);
150 hwa742.extif->write_data(&data, 1);
151 }
152
set_window_regs(int x_start,int y_start,int x_end,int y_end)153 static void set_window_regs(int x_start, int y_start, int x_end, int y_end)
154 {
155 u8 tmp[8];
156 u8 cmd;
157
158 x_end--;
159 y_end--;
160 tmp[0] = x_start;
161 tmp[1] = x_start >> 8;
162 tmp[2] = y_start;
163 tmp[3] = y_start >> 8;
164 tmp[4] = x_end;
165 tmp[5] = x_end >> 8;
166 tmp[6] = y_end;
167 tmp[7] = y_end >> 8;
168
169 hwa742.extif->set_bits_per_cycle(8);
170 cmd = HWA742_WINDOW_X_START_0;
171
172 hwa742.extif->write_command(&cmd, 1);
173
174 hwa742.extif->write_data(tmp, 8);
175 }
176
set_format_regs(int conv,int transl,int flags)177 static void set_format_regs(int conv, int transl, int flags)
178 {
179 if (flags & OMAPFB_FORMAT_FLAG_DOUBLE) {
180 hwa742.window_type = ((hwa742.window_type & 0xfc) | 0x01);
181 #ifdef VERBOSE
182 dev_dbg(hwa742.fbdev->dev, "hwa742: enabled pixel doubling\n");
183 #endif
184 } else {
185 hwa742.window_type = (hwa742.window_type & 0xfc);
186 #ifdef VERBOSE
187 dev_dbg(hwa742.fbdev->dev, "hwa742: disabled pixel doubling\n");
188 #endif
189 }
190
191 hwa742_write_reg(HWA742_INPUT_MODE_REG, conv);
192 hwa742_write_reg(HWA742_TRANSL_MODE_REG1, transl);
193 hwa742_write_reg(HWA742_WINDOW_TYPE, hwa742.window_type);
194 }
195
enable_tearsync(int y,int width,int height,int screen_height,int force_vsync)196 static void enable_tearsync(int y, int width, int height, int screen_height,
197 int force_vsync)
198 {
199 u8 b;
200
201 b = hwa742_read_reg(HWA742_NDP_CTRL);
202 b |= 1 << 2;
203 hwa742_write_reg(HWA742_NDP_CTRL, b);
204
205 if (likely(hwa742.vsync_only || force_vsync)) {
206 hwa742.extif->enable_tearsync(1, 0);
207 return;
208 }
209
210 if (width * hwa742.pix_tx_time < hwa742.line_upd_time) {
211 hwa742.extif->enable_tearsync(1, 0);
212 return;
213 }
214
215 if ((width * hwa742.pix_tx_time / 1000) * height <
216 (y + height) * (hwa742.line_upd_time / 1000)) {
217 hwa742.extif->enable_tearsync(1, 0);
218 return;
219 }
220
221 hwa742.extif->enable_tearsync(1, y + 1);
222 }
223
disable_tearsync(void)224 static void disable_tearsync(void)
225 {
226 u8 b;
227
228 hwa742.extif->enable_tearsync(0, 0);
229
230 b = hwa742_read_reg(HWA742_NDP_CTRL);
231 b &= ~(1 << 2);
232 hwa742_write_reg(HWA742_NDP_CTRL, b);
233 }
234
alloc_req(bool can_sleep)235 static inline struct hwa742_request *alloc_req(bool can_sleep)
236 {
237 unsigned long flags;
238 struct hwa742_request *req;
239 int req_flags = 0;
240
241 if (can_sleep)
242 down(&hwa742.req_sema);
243 else
244 req_flags = REQ_FROM_IRQ_POOL;
245
246 spin_lock_irqsave(&hwa742.req_lock, flags);
247 BUG_ON(list_empty(&hwa742.free_req_list));
248 req = list_entry(hwa742.free_req_list.next,
249 struct hwa742_request, entry);
250 list_del(&req->entry);
251 spin_unlock_irqrestore(&hwa742.req_lock, flags);
252
253 INIT_LIST_HEAD(&req->entry);
254 req->flags = req_flags;
255
256 return req;
257 }
258
free_req(struct hwa742_request * req)259 static inline void free_req(struct hwa742_request *req)
260 {
261 unsigned long flags;
262
263 spin_lock_irqsave(&hwa742.req_lock, flags);
264
265 list_move(&req->entry, &hwa742.free_req_list);
266 if (!(req->flags & REQ_FROM_IRQ_POOL))
267 up(&hwa742.req_sema);
268
269 spin_unlock_irqrestore(&hwa742.req_lock, flags);
270 }
271
process_pending_requests(void)272 static void process_pending_requests(void)
273 {
274 unsigned long flags;
275
276 spin_lock_irqsave(&hwa742.req_lock, flags);
277
278 while (!list_empty(&hwa742.pending_req_list)) {
279 struct hwa742_request *req;
280 void (*complete)(void *);
281 void *complete_data;
282
283 req = list_entry(hwa742.pending_req_list.next,
284 struct hwa742_request, entry);
285 spin_unlock_irqrestore(&hwa742.req_lock, flags);
286
287 if (req->handler(req) == REQ_PENDING)
288 return;
289
290 complete = req->complete;
291 complete_data = req->complete_data;
292 free_req(req);
293
294 if (complete)
295 complete(complete_data);
296
297 spin_lock_irqsave(&hwa742.req_lock, flags);
298 }
299
300 spin_unlock_irqrestore(&hwa742.req_lock, flags);
301 }
302
submit_req_list(struct list_head * head)303 static void submit_req_list(struct list_head *head)
304 {
305 unsigned long flags;
306 int process = 1;
307
308 spin_lock_irqsave(&hwa742.req_lock, flags);
309 if (likely(!list_empty(&hwa742.pending_req_list)))
310 process = 0;
311 list_splice_init(head, hwa742.pending_req_list.prev);
312 spin_unlock_irqrestore(&hwa742.req_lock, flags);
313
314 if (process)
315 process_pending_requests();
316 }
317
request_complete(void * data)318 static void request_complete(void *data)
319 {
320 struct hwa742_request *req = (struct hwa742_request *)data;
321 void (*complete)(void *);
322 void *complete_data;
323
324 complete = req->complete;
325 complete_data = req->complete_data;
326
327 free_req(req);
328
329 if (complete)
330 complete(complete_data);
331
332 process_pending_requests();
333 }
334
send_frame_handler(struct hwa742_request * req)335 static int send_frame_handler(struct hwa742_request *req)
336 {
337 struct update_param *par = &req->par.update;
338 int x = par->x;
339 int y = par->y;
340 int w = par->width;
341 int h = par->height;
342 int bpp;
343 int conv, transl;
344 unsigned long offset;
345 int color_mode = par->color_mode;
346 int flags = par->flags;
347 int scr_width = hwa742.fbdev->panel->x_res;
348 int scr_height = hwa742.fbdev->panel->y_res;
349
350 #ifdef VERBOSE
351 dev_dbg(hwa742.fbdev->dev, "x %d y %d w %d h %d scr_width %d "
352 "color_mode %d flags %d\n",
353 x, y, w, h, scr_width, color_mode, flags);
354 #endif
355
356 switch (color_mode) {
357 case OMAPFB_COLOR_YUV422:
358 bpp = 16;
359 conv = 0x08;
360 transl = 0x25;
361 break;
362 case OMAPFB_COLOR_YUV420:
363 bpp = 12;
364 conv = 0x09;
365 transl = 0x25;
366 break;
367 case OMAPFB_COLOR_RGB565:
368 bpp = 16;
369 conv = 0x01;
370 transl = 0x05;
371 break;
372 default:
373 return -EINVAL;
374 }
375
376 if (hwa742.prev_flags != flags ||
377 hwa742.prev_color_mode != color_mode) {
378 set_format_regs(conv, transl, flags);
379 hwa742.prev_color_mode = color_mode;
380 hwa742.prev_flags = flags;
381 }
382 flags = req->par.update.flags;
383 if (flags & OMAPFB_FORMAT_FLAG_TEARSYNC)
384 enable_tearsync(y, scr_width, h, scr_height,
385 flags & OMAPFB_FORMAT_FLAG_FORCE_VSYNC);
386 else
387 disable_tearsync();
388
389 set_window_regs(x, y, x + w, y + h);
390
391 offset = (scr_width * y + x) * bpp / 8;
392
393 hwa742.int_ctrl->setup_plane(OMAPFB_PLANE_GFX,
394 OMAPFB_CHANNEL_OUT_LCD, offset, scr_width, 0, 0, w, h,
395 color_mode);
396
397 hwa742.extif->set_bits_per_cycle(16);
398
399 hwa742.int_ctrl->enable_plane(OMAPFB_PLANE_GFX, 1);
400 hwa742.extif->transfer_area(w, h, request_complete, req);
401
402 return REQ_PENDING;
403 }
404
send_frame_complete(void * data)405 static void send_frame_complete(void *data)
406 {
407 hwa742.int_ctrl->enable_plane(OMAPFB_PLANE_GFX, 0);
408 }
409
410 #define ADD_PREQ(_x, _y, _w, _h, can_sleep) do {\
411 req = alloc_req(can_sleep); \
412 req->handler = send_frame_handler; \
413 req->complete = send_frame_complete; \
414 req->par.update.x = _x; \
415 req->par.update.y = _y; \
416 req->par.update.width = _w; \
417 req->par.update.height = _h; \
418 req->par.update.color_mode = color_mode;\
419 req->par.update.flags = flags; \
420 list_add_tail(&req->entry, req_head); \
421 } while(0)
422
create_req_list(struct omapfb_update_window * win,struct list_head * req_head,bool can_sleep)423 static void create_req_list(struct omapfb_update_window *win,
424 struct list_head *req_head,
425 bool can_sleep)
426 {
427 struct hwa742_request *req;
428 int x = win->x;
429 int y = win->y;
430 int width = win->width;
431 int height = win->height;
432 int color_mode;
433 int flags;
434
435 flags = win->format & ~OMAPFB_FORMAT_MASK;
436 color_mode = win->format & OMAPFB_FORMAT_MASK;
437
438 if (x & 1) {
439 ADD_PREQ(x, y, 1, height, can_sleep);
440 width--;
441 x++;
442 flags &= ~OMAPFB_FORMAT_FLAG_TEARSYNC;
443 }
444 if (width & ~1) {
445 unsigned int xspan = width & ~1;
446 unsigned int ystart = y;
447 unsigned int yspan = height;
448
449 if (xspan * height * 2 > hwa742.max_transmit_size) {
450 yspan = hwa742.max_transmit_size / (xspan * 2);
451 ADD_PREQ(x, ystart, xspan, yspan, can_sleep);
452 ystart += yspan;
453 yspan = height - yspan;
454 flags &= ~OMAPFB_FORMAT_FLAG_TEARSYNC;
455 }
456
457 ADD_PREQ(x, ystart, xspan, yspan, can_sleep);
458 x += xspan;
459 width -= xspan;
460 flags &= ~OMAPFB_FORMAT_FLAG_TEARSYNC;
461 }
462 if (width)
463 ADD_PREQ(x, y, 1, height, can_sleep);
464 }
465
auto_update_complete(void * data)466 static void auto_update_complete(void *data)
467 {
468 if (!hwa742.stop_auto_update)
469 mod_timer(&hwa742.auto_update_timer,
470 jiffies + HWA742_AUTO_UPDATE_TIME);
471 }
472
__hwa742_update_window_auto(bool can_sleep)473 static void __hwa742_update_window_auto(bool can_sleep)
474 {
475 LIST_HEAD(req_list);
476 struct hwa742_request *last;
477
478 create_req_list(&hwa742.auto_update_window, &req_list, can_sleep);
479 last = list_entry(req_list.prev, struct hwa742_request, entry);
480
481 last->complete = auto_update_complete;
482 last->complete_data = NULL;
483
484 submit_req_list(&req_list);
485 }
486
hwa742_update_window_auto(struct timer_list * unused)487 static void hwa742_update_window_auto(struct timer_list *unused)
488 {
489 __hwa742_update_window_auto(false);
490 }
491
hwa742_update_window_async(struct fb_info * fbi,struct omapfb_update_window * win,void (* complete_callback)(void * arg),void * complete_callback_data)492 static int hwa742_update_window_async(struct fb_info *fbi,
493 struct omapfb_update_window *win,
494 void (*complete_callback)(void *arg),
495 void *complete_callback_data)
496 {
497 LIST_HEAD(req_list);
498 struct hwa742_request *last;
499 int r = 0;
500
501 if (hwa742.update_mode != OMAPFB_MANUAL_UPDATE) {
502 dev_dbg(hwa742.fbdev->dev, "invalid update mode\n");
503 r = -EINVAL;
504 goto out;
505 }
506 if (unlikely(win->format &
507 ~(0x03 | OMAPFB_FORMAT_FLAG_DOUBLE |
508 OMAPFB_FORMAT_FLAG_TEARSYNC | OMAPFB_FORMAT_FLAG_FORCE_VSYNC))) {
509 dev_dbg(hwa742.fbdev->dev, "invalid window flag\n");
510 r = -EINVAL;
511 goto out;
512 }
513
514 create_req_list(win, &req_list, true);
515 last = list_entry(req_list.prev, struct hwa742_request, entry);
516
517 last->complete = complete_callback;
518 last->complete_data = (void *)complete_callback_data;
519
520 submit_req_list(&req_list);
521
522 out:
523 return r;
524 }
525
hwa742_setup_plane(int plane,int channel_out,unsigned long offset,int screen_width,int pos_x,int pos_y,int width,int height,int color_mode)526 static int hwa742_setup_plane(int plane, int channel_out,
527 unsigned long offset, int screen_width,
528 int pos_x, int pos_y, int width, int height,
529 int color_mode)
530 {
531 if (plane != OMAPFB_PLANE_GFX ||
532 channel_out != OMAPFB_CHANNEL_OUT_LCD)
533 return -EINVAL;
534
535 return 0;
536 }
537
hwa742_enable_plane(int plane,int enable)538 static int hwa742_enable_plane(int plane, int enable)
539 {
540 if (plane != 0)
541 return -EINVAL;
542
543 hwa742.int_ctrl->enable_plane(plane, enable);
544
545 return 0;
546 }
547
sync_handler(struct hwa742_request * req)548 static int sync_handler(struct hwa742_request *req)
549 {
550 complete(req->par.sync);
551 return REQ_COMPLETE;
552 }
553
hwa742_sync(void)554 static void hwa742_sync(void)
555 {
556 LIST_HEAD(req_list);
557 struct hwa742_request *req;
558 struct completion comp;
559
560 req = alloc_req(true);
561
562 req->handler = sync_handler;
563 req->complete = NULL;
564 init_completion(&comp);
565 req->par.sync = ∁
566
567 list_add(&req->entry, &req_list);
568 submit_req_list(&req_list);
569
570 wait_for_completion(&comp);
571 }
572
hwa742_bind_client(struct omapfb_notifier_block * nb)573 static void hwa742_bind_client(struct omapfb_notifier_block *nb)
574 {
575 dev_dbg(hwa742.fbdev->dev, "update_mode %d\n", hwa742.update_mode);
576 if (hwa742.update_mode == OMAPFB_MANUAL_UPDATE) {
577 omapfb_notify_clients(hwa742.fbdev, OMAPFB_EVENT_READY);
578 }
579 }
580
hwa742_set_update_mode(enum omapfb_update_mode mode)581 static int hwa742_set_update_mode(enum omapfb_update_mode mode)
582 {
583 if (mode != OMAPFB_MANUAL_UPDATE && mode != OMAPFB_AUTO_UPDATE &&
584 mode != OMAPFB_UPDATE_DISABLED)
585 return -EINVAL;
586
587 if (mode == hwa742.update_mode)
588 return 0;
589
590 dev_info(hwa742.fbdev->dev, "HWA742: setting update mode to %s\n",
591 mode == OMAPFB_UPDATE_DISABLED ? "disabled" :
592 (mode == OMAPFB_AUTO_UPDATE ? "auto" : "manual"));
593
594 switch (hwa742.update_mode) {
595 case OMAPFB_MANUAL_UPDATE:
596 omapfb_notify_clients(hwa742.fbdev, OMAPFB_EVENT_DISABLED);
597 break;
598 case OMAPFB_AUTO_UPDATE:
599 hwa742.stop_auto_update = 1;
600 del_timer_sync(&hwa742.auto_update_timer);
601 break;
602 case OMAPFB_UPDATE_DISABLED:
603 break;
604 }
605
606 hwa742.update_mode = mode;
607 hwa742_sync();
608 hwa742.stop_auto_update = 0;
609
610 switch (mode) {
611 case OMAPFB_MANUAL_UPDATE:
612 omapfb_notify_clients(hwa742.fbdev, OMAPFB_EVENT_READY);
613 break;
614 case OMAPFB_AUTO_UPDATE:
615 __hwa742_update_window_auto(true);
616 break;
617 case OMAPFB_UPDATE_DISABLED:
618 break;
619 }
620
621 return 0;
622 }
623
hwa742_get_update_mode(void)624 static enum omapfb_update_mode hwa742_get_update_mode(void)
625 {
626 return hwa742.update_mode;
627 }
628
round_to_extif_ticks(unsigned long ps,int div)629 static unsigned long round_to_extif_ticks(unsigned long ps, int div)
630 {
631 int bus_tick = hwa742.extif_clk_period * div;
632 return (ps + bus_tick - 1) / bus_tick * bus_tick;
633 }
634
calc_reg_timing(unsigned long sysclk,int div)635 static int calc_reg_timing(unsigned long sysclk, int div)
636 {
637 struct extif_timings *t;
638 unsigned long systim;
639
640 /* CSOnTime 0, WEOnTime 2 ns, REOnTime 2 ns,
641 * AccessTime 2 ns + 12.2 ns (regs),
642 * WEOffTime = WEOnTime + 1 ns,
643 * REOffTime = REOnTime + 16 ns (regs),
644 * CSOffTime = REOffTime + 1 ns
645 * ReadCycle = 2ns + 2*SYSCLK (regs),
646 * WriteCycle = 2*SYSCLK + 2 ns,
647 * CSPulseWidth = 10 ns */
648 systim = 1000000000 / (sysclk / 1000);
649 dev_dbg(hwa742.fbdev->dev, "HWA742 systim %lu ps extif_clk_period %u ps"
650 "extif_clk_div %d\n", systim, hwa742.extif_clk_period, div);
651
652 t = &hwa742.reg_timings;
653 memset(t, 0, sizeof(*t));
654 t->clk_div = div;
655 t->cs_on_time = 0;
656 t->we_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div);
657 t->re_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div);
658 t->access_time = round_to_extif_ticks(t->re_on_time + 12200, div);
659 t->we_off_time = round_to_extif_ticks(t->we_on_time + 1000, div);
660 t->re_off_time = round_to_extif_ticks(t->re_on_time + 16000, div);
661 t->cs_off_time = round_to_extif_ticks(t->re_off_time + 1000, div);
662 t->we_cycle_time = round_to_extif_ticks(2 * systim + 2000, div);
663 if (t->we_cycle_time < t->we_off_time)
664 t->we_cycle_time = t->we_off_time;
665 t->re_cycle_time = round_to_extif_ticks(2 * systim + 2000, div);
666 if (t->re_cycle_time < t->re_off_time)
667 t->re_cycle_time = t->re_off_time;
668 t->cs_pulse_width = 0;
669
670 dev_dbg(hwa742.fbdev->dev, "[reg]cson %d csoff %d reon %d reoff %d\n",
671 t->cs_on_time, t->cs_off_time, t->re_on_time, t->re_off_time);
672 dev_dbg(hwa742.fbdev->dev, "[reg]weon %d weoff %d recyc %d wecyc %d\n",
673 t->we_on_time, t->we_off_time, t->re_cycle_time,
674 t->we_cycle_time);
675 dev_dbg(hwa742.fbdev->dev, "[reg]rdaccess %d cspulse %d\n",
676 t->access_time, t->cs_pulse_width);
677
678 return hwa742.extif->convert_timings(t);
679 }
680
calc_lut_timing(unsigned long sysclk,int div)681 static int calc_lut_timing(unsigned long sysclk, int div)
682 {
683 struct extif_timings *t;
684 unsigned long systim;
685
686 /* CSOnTime 0, WEOnTime 2 ns, REOnTime 2 ns,
687 * AccessTime 2 ns + 4 * SYSCLK + 26 (lut),
688 * WEOffTime = WEOnTime + 1 ns,
689 * REOffTime = REOnTime + 4*SYSCLK + 26 ns (lut),
690 * CSOffTime = REOffTime + 1 ns
691 * ReadCycle = 2ns + 4*SYSCLK + 26 ns (lut),
692 * WriteCycle = 2*SYSCLK + 2 ns,
693 * CSPulseWidth = 10 ns
694 */
695 systim = 1000000000 / (sysclk / 1000);
696 dev_dbg(hwa742.fbdev->dev, "HWA742 systim %lu ps extif_clk_period %u ps"
697 "extif_clk_div %d\n", systim, hwa742.extif_clk_period, div);
698
699 t = &hwa742.lut_timings;
700 memset(t, 0, sizeof(*t));
701
702 t->clk_div = div;
703
704 t->cs_on_time = 0;
705 t->we_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div);
706 t->re_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div);
707 t->access_time = round_to_extif_ticks(t->re_on_time + 4 * systim +
708 26000, div);
709 t->we_off_time = round_to_extif_ticks(t->we_on_time + 1000, div);
710 t->re_off_time = round_to_extif_ticks(t->re_on_time + 4 * systim +
711 26000, div);
712 t->cs_off_time = round_to_extif_ticks(t->re_off_time + 1000, div);
713 t->we_cycle_time = round_to_extif_ticks(2 * systim + 2000, div);
714 if (t->we_cycle_time < t->we_off_time)
715 t->we_cycle_time = t->we_off_time;
716 t->re_cycle_time = round_to_extif_ticks(2000 + 4 * systim + 26000, div);
717 if (t->re_cycle_time < t->re_off_time)
718 t->re_cycle_time = t->re_off_time;
719 t->cs_pulse_width = 0;
720
721 dev_dbg(hwa742.fbdev->dev, "[lut]cson %d csoff %d reon %d reoff %d\n",
722 t->cs_on_time, t->cs_off_time, t->re_on_time, t->re_off_time);
723 dev_dbg(hwa742.fbdev->dev, "[lut]weon %d weoff %d recyc %d wecyc %d\n",
724 t->we_on_time, t->we_off_time, t->re_cycle_time,
725 t->we_cycle_time);
726 dev_dbg(hwa742.fbdev->dev, "[lut]rdaccess %d cspulse %d\n",
727 t->access_time, t->cs_pulse_width);
728
729 return hwa742.extif->convert_timings(t);
730 }
731
calc_extif_timings(unsigned long sysclk,int * extif_mem_div)732 static int calc_extif_timings(unsigned long sysclk, int *extif_mem_div)
733 {
734 int max_clk_div;
735 int div;
736
737 hwa742.extif->get_clk_info(&hwa742.extif_clk_period, &max_clk_div);
738 for (div = 1; div < max_clk_div; div++) {
739 if (calc_reg_timing(sysclk, div) == 0)
740 break;
741 }
742 if (div >= max_clk_div)
743 goto err;
744
745 *extif_mem_div = div;
746
747 for (div = 1; div < max_clk_div; div++) {
748 if (calc_lut_timing(sysclk, div) == 0)
749 break;
750 }
751
752 if (div >= max_clk_div)
753 goto err;
754
755 return 0;
756
757 err:
758 dev_err(hwa742.fbdev->dev, "can't setup timings\n");
759 return -1;
760 }
761
calc_hwa742_clk_rates(unsigned long ext_clk,unsigned long * sys_clk,unsigned long * pix_clk)762 static void calc_hwa742_clk_rates(unsigned long ext_clk,
763 unsigned long *sys_clk, unsigned long *pix_clk)
764 {
765 int pix_clk_src;
766 int sys_div = 0, sys_mul = 0;
767 int pix_div;
768
769 pix_clk_src = hwa742_read_reg(HWA742_CLK_SRC_REG);
770 pix_div = ((pix_clk_src >> 3) & 0x1f) + 1;
771 if ((pix_clk_src & (0x3 << 1)) == 0) {
772 /* Source is the PLL */
773 sys_div = (hwa742_read_reg(HWA742_PLL_DIV_REG) & 0x3f) + 1;
774 sys_mul = (hwa742_read_reg(HWA742_PLL_4_REG) & 0x7f) + 1;
775 *sys_clk = ext_clk * sys_mul / sys_div;
776 } else /* else source is ext clk, or oscillator */
777 *sys_clk = ext_clk;
778
779 *pix_clk = *sys_clk / pix_div; /* HZ */
780 dev_dbg(hwa742.fbdev->dev,
781 "ext_clk %ld pix_src %d pix_div %d sys_div %d sys_mul %d\n",
782 ext_clk, pix_clk_src & (0x3 << 1), pix_div, sys_div, sys_mul);
783 dev_dbg(hwa742.fbdev->dev, "sys_clk %ld pix_clk %ld\n",
784 *sys_clk, *pix_clk);
785 }
786
787
setup_tearsync(unsigned long pix_clk,int extif_div)788 static int setup_tearsync(unsigned long pix_clk, int extif_div)
789 {
790 int hdisp, vdisp;
791 int hndp, vndp;
792 int hsw, vsw;
793 int hs, vs;
794 int hs_pol_inv, vs_pol_inv;
795 int use_hsvs, use_ndp;
796 u8 b;
797
798 hsw = hwa742_read_reg(HWA742_HS_W_REG);
799 vsw = hwa742_read_reg(HWA742_VS_W_REG);
800 hs_pol_inv = !(hsw & 0x80);
801 vs_pol_inv = !(vsw & 0x80);
802 hsw = hsw & 0x7f;
803 vsw = vsw & 0x3f;
804
805 hdisp = (hwa742_read_reg(HWA742_H_DISP_REG) & 0x7f) * 8;
806 vdisp = hwa742_read_reg(HWA742_V_DISP_1_REG) +
807 ((hwa742_read_reg(HWA742_V_DISP_2_REG) & 0x3) << 8);
808
809 hndp = hwa742_read_reg(HWA742_H_NDP_REG) & 0x7f;
810 vndp = hwa742_read_reg(HWA742_V_NDP_REG);
811
812 /* time to transfer one pixel (16bpp) in ps */
813 hwa742.pix_tx_time = hwa742.reg_timings.we_cycle_time;
814 if (hwa742.extif->get_max_tx_rate != NULL) {
815 /*
816 * The external interface might have a rate limitation,
817 * if so, we have to maximize our transfer rate.
818 */
819 unsigned long min_tx_time;
820 unsigned long max_tx_rate = hwa742.extif->get_max_tx_rate();
821
822 dev_dbg(hwa742.fbdev->dev, "max_tx_rate %ld HZ\n",
823 max_tx_rate);
824 min_tx_time = 1000000000 / (max_tx_rate / 1000); /* ps */
825 if (hwa742.pix_tx_time < min_tx_time)
826 hwa742.pix_tx_time = min_tx_time;
827 }
828
829 /* time to update one line in ps */
830 hwa742.line_upd_time = (hdisp + hndp) * 1000000 / (pix_clk / 1000);
831 hwa742.line_upd_time *= 1000;
832 if (hdisp * hwa742.pix_tx_time > hwa742.line_upd_time)
833 /*
834 * transfer speed too low, we might have to use both
835 * HS and VS
836 */
837 use_hsvs = 1;
838 else
839 /* decent transfer speed, we'll always use only VS */
840 use_hsvs = 0;
841
842 if (use_hsvs && (hs_pol_inv || vs_pol_inv)) {
843 /*
844 * HS or'ed with VS doesn't work, use the active high
845 * TE signal based on HNDP / VNDP
846 */
847 use_ndp = 1;
848 hs_pol_inv = 0;
849 vs_pol_inv = 0;
850 hs = hndp;
851 vs = vndp;
852 } else {
853 /*
854 * Use HS or'ed with VS as a TE signal if both are needed
855 * or VNDP if only vsync is needed.
856 */
857 use_ndp = 0;
858 hs = hsw;
859 vs = vsw;
860 if (!use_hsvs) {
861 hs_pol_inv = 0;
862 vs_pol_inv = 0;
863 }
864 }
865
866 hs = hs * 1000000 / (pix_clk / 1000); /* ps */
867 hs *= 1000;
868
869 vs = vs * (hdisp + hndp) * 1000000 / (pix_clk / 1000); /* ps */
870 vs *= 1000;
871
872 if (vs <= hs)
873 return -EDOM;
874 /* set VS to 120% of HS to minimize VS detection time */
875 vs = hs * 12 / 10;
876 /* minimize HS too */
877 hs = 10000;
878
879 b = hwa742_read_reg(HWA742_NDP_CTRL);
880 b &= ~0x3;
881 b |= use_hsvs ? 1 : 0;
882 b |= (use_ndp && use_hsvs) ? 0 : 2;
883 hwa742_write_reg(HWA742_NDP_CTRL, b);
884
885 hwa742.vsync_only = !use_hsvs;
886
887 dev_dbg(hwa742.fbdev->dev,
888 "pix_clk %ld HZ pix_tx_time %ld ps line_upd_time %ld ps\n",
889 pix_clk, hwa742.pix_tx_time, hwa742.line_upd_time);
890 dev_dbg(hwa742.fbdev->dev,
891 "hs %d ps vs %d ps mode %d vsync_only %d\n",
892 hs, vs, (b & 0x3), !use_hsvs);
893
894 return hwa742.extif->setup_tearsync(1, hs, vs,
895 hs_pol_inv, vs_pol_inv, extif_div);
896 }
897
hwa742_get_caps(int plane,struct omapfb_caps * caps)898 static void hwa742_get_caps(int plane, struct omapfb_caps *caps)
899 {
900 hwa742.int_ctrl->get_caps(plane, caps);
901 caps->ctrl |= OMAPFB_CAPS_MANUAL_UPDATE |
902 OMAPFB_CAPS_WINDOW_PIXEL_DOUBLE;
903 if (hwa742.te_connected)
904 caps->ctrl |= OMAPFB_CAPS_TEARSYNC;
905 caps->wnd_color |= (1 << OMAPFB_COLOR_RGB565) |
906 (1 << OMAPFB_COLOR_YUV420);
907 }
908
hwa742_suspend(void)909 static void hwa742_suspend(void)
910 {
911 hwa742.update_mode_before_suspend = hwa742.update_mode;
912 hwa742_set_update_mode(OMAPFB_UPDATE_DISABLED);
913 /* Enable sleep mode */
914 hwa742_write_reg(HWA742_POWER_SAVE, 1 << 1);
915 clk_disable(hwa742.sys_ck);
916 }
917
hwa742_resume(void)918 static void hwa742_resume(void)
919 {
920 clk_enable(hwa742.sys_ck);
921
922 /* Disable sleep mode */
923 hwa742_write_reg(HWA742_POWER_SAVE, 0);
924 while (1) {
925 /* Loop until PLL output is stabilized */
926 if (hwa742_read_reg(HWA742_PLL_DIV_REG) & (1 << 7))
927 break;
928 set_current_state(TASK_UNINTERRUPTIBLE);
929 schedule_timeout(msecs_to_jiffies(5));
930 }
931 hwa742_set_update_mode(hwa742.update_mode_before_suspend);
932 }
933
hwa742_init(struct omapfb_device * fbdev,int ext_mode,struct omapfb_mem_desc * req_vram)934 static int hwa742_init(struct omapfb_device *fbdev, int ext_mode,
935 struct omapfb_mem_desc *req_vram)
936 {
937 int r = 0, i;
938 u8 rev, conf;
939 unsigned long ext_clk;
940 unsigned long sys_clk, pix_clk;
941 int extif_mem_div;
942 struct omapfb_platform_data *omapfb_conf;
943
944 BUG_ON(!fbdev->ext_if || !fbdev->int_ctrl);
945
946 hwa742.fbdev = fbdev;
947 hwa742.extif = fbdev->ext_if;
948 hwa742.int_ctrl = fbdev->int_ctrl;
949
950 omapfb_conf = dev_get_platdata(fbdev->dev);
951
952 hwa742.sys_ck = clk_get(NULL, "hwa_sys_ck");
953
954 spin_lock_init(&hwa742.req_lock);
955
956 if ((r = hwa742.int_ctrl->init(fbdev, 1, req_vram)) < 0)
957 goto err1;
958
959 if ((r = hwa742.extif->init(fbdev)) < 0)
960 goto err2;
961
962 ext_clk = clk_get_rate(hwa742.sys_ck);
963 if ((r = calc_extif_timings(ext_clk, &extif_mem_div)) < 0)
964 goto err3;
965 hwa742.extif->set_timings(&hwa742.reg_timings);
966 clk_prepare_enable(hwa742.sys_ck);
967
968 calc_hwa742_clk_rates(ext_clk, &sys_clk, &pix_clk);
969 if ((r = calc_extif_timings(sys_clk, &extif_mem_div)) < 0)
970 goto err4;
971 hwa742.extif->set_timings(&hwa742.reg_timings);
972
973 rev = hwa742_read_reg(HWA742_REV_CODE_REG);
974 if ((rev & 0xfc) != 0x80) {
975 dev_err(fbdev->dev, "HWA742: invalid revision %02x\n", rev);
976 r = -ENODEV;
977 goto err4;
978 }
979
980
981 if (!(hwa742_read_reg(HWA742_PLL_DIV_REG) & 0x80)) {
982 dev_err(fbdev->dev,
983 "HWA742: controller not initialized by the bootloader\n");
984 r = -ENODEV;
985 goto err4;
986 }
987
988 if ((r = setup_tearsync(pix_clk, extif_mem_div)) < 0) {
989 dev_err(hwa742.fbdev->dev,
990 "HWA742: can't setup tearing synchronization\n");
991 goto err4;
992 }
993 hwa742.te_connected = 1;
994
995 hwa742.max_transmit_size = hwa742.extif->max_transmit_size;
996
997 hwa742.update_mode = OMAPFB_UPDATE_DISABLED;
998
999 hwa742.auto_update_window.x = 0;
1000 hwa742.auto_update_window.y = 0;
1001 hwa742.auto_update_window.width = fbdev->panel->x_res;
1002 hwa742.auto_update_window.height = fbdev->panel->y_res;
1003 hwa742.auto_update_window.format = 0;
1004
1005 timer_setup(&hwa742.auto_update_timer, hwa742_update_window_auto, 0);
1006
1007 hwa742.prev_color_mode = -1;
1008 hwa742.prev_flags = 0;
1009
1010 hwa742.fbdev = fbdev;
1011
1012 INIT_LIST_HEAD(&hwa742.free_req_list);
1013 INIT_LIST_HEAD(&hwa742.pending_req_list);
1014 for (i = 0; i < ARRAY_SIZE(hwa742.req_pool); i++)
1015 list_add(&hwa742.req_pool[i].entry, &hwa742.free_req_list);
1016 BUG_ON(i <= IRQ_REQ_POOL_SIZE);
1017 sema_init(&hwa742.req_sema, i - IRQ_REQ_POOL_SIZE);
1018
1019 conf = hwa742_read_reg(HWA742_CONFIG_REG);
1020 dev_info(fbdev->dev, ": Epson HWA742 LCD controller rev %d "
1021 "initialized (CNF pins %x)\n", rev & 0x03, conf & 0x07);
1022
1023 return 0;
1024 err4:
1025 clk_disable_unprepare(hwa742.sys_ck);
1026 err3:
1027 hwa742.extif->cleanup();
1028 err2:
1029 hwa742.int_ctrl->cleanup();
1030 err1:
1031 return r;
1032 }
1033
hwa742_cleanup(void)1034 static void hwa742_cleanup(void)
1035 {
1036 hwa742_set_update_mode(OMAPFB_UPDATE_DISABLED);
1037 hwa742.extif->cleanup();
1038 hwa742.int_ctrl->cleanup();
1039 clk_disable_unprepare(hwa742.sys_ck);
1040 }
1041
1042 struct lcd_ctrl hwa742_ctrl = {
1043 .name = "hwa742",
1044 .init = hwa742_init,
1045 .cleanup = hwa742_cleanup,
1046 .bind_client = hwa742_bind_client,
1047 .get_caps = hwa742_get_caps,
1048 .set_update_mode = hwa742_set_update_mode,
1049 .get_update_mode = hwa742_get_update_mode,
1050 .setup_plane = hwa742_setup_plane,
1051 .enable_plane = hwa742_enable_plane,
1052 .update_window = hwa742_update_window_async,
1053 .sync = hwa742_sync,
1054 .suspend = hwa742_suspend,
1055 .resume = hwa742_resume,
1056 };
1057
1058