1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2015 MediaTek Inc.
4 */
5
6 #include <linux/clk.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/mailbox_controller.h>
9 #include <linux/of.h>
10 #include <linux/pm_runtime.h>
11 #include <linux/soc/mediatek/mtk-cmdq.h>
12 #include <linux/soc/mediatek/mtk-mmsys.h>
13 #include <linux/soc/mediatek/mtk-mutex.h>
14
15 #include <asm/barrier.h>
16
17 #include <drm/drm_atomic.h>
18 #include <drm/drm_atomic_helper.h>
19 #include <drm/drm_probe_helper.h>
20 #include <drm/drm_vblank.h>
21
22 #include "mtk_drm_drv.h"
23 #include "mtk_drm_crtc.h"
24 #include "mtk_drm_ddp_comp.h"
25 #include "mtk_drm_gem.h"
26 #include "mtk_drm_plane.h"
27
28 /*
29 * struct mtk_drm_crtc - MediaTek specific crtc structure.
30 * @base: crtc object.
31 * @enabled: records whether crtc_enable succeeded
32 * @planes: array of 4 drm_plane structures, one for each overlay plane
33 * @pending_planes: whether any plane has pending changes to be applied
34 * @mmsys_dev: pointer to the mmsys device for configuration registers
35 * @mutex: handle to one of the ten disp_mutex streams
36 * @ddp_comp_nr: number of components in ddp_comp
37 * @ddp_comp: array of pointers the mtk_ddp_comp structures used by this crtc
38 *
39 * TODO: Needs update: this header is missing a bunch of member descriptions.
40 */
41 struct mtk_drm_crtc {
42 struct drm_crtc base;
43 bool enabled;
44
45 bool pending_needs_vblank;
46 struct drm_pending_vblank_event *event;
47
48 struct drm_plane *planes;
49 unsigned int layer_nr;
50 bool pending_planes;
51 bool pending_async_planes;
52
53 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
54 struct cmdq_client cmdq_client;
55 struct cmdq_pkt cmdq_handle;
56 u32 cmdq_event;
57 u32 cmdq_vblank_cnt;
58 wait_queue_head_t cb_blocking_queue;
59 #endif
60
61 struct device *mmsys_dev;
62 struct device *dma_dev;
63 struct mtk_mutex *mutex;
64 unsigned int ddp_comp_nr;
65 struct mtk_ddp_comp **ddp_comp;
66
67 /* lock for display hardware access */
68 struct mutex hw_lock;
69 bool config_updating;
70 };
71
72 struct mtk_crtc_state {
73 struct drm_crtc_state base;
74
75 bool pending_config;
76 unsigned int pending_width;
77 unsigned int pending_height;
78 unsigned int pending_vrefresh;
79 };
80
to_mtk_crtc(struct drm_crtc * c)81 static inline struct mtk_drm_crtc *to_mtk_crtc(struct drm_crtc *c)
82 {
83 return container_of(c, struct mtk_drm_crtc, base);
84 }
85
to_mtk_crtc_state(struct drm_crtc_state * s)86 static inline struct mtk_crtc_state *to_mtk_crtc_state(struct drm_crtc_state *s)
87 {
88 return container_of(s, struct mtk_crtc_state, base);
89 }
90
mtk_drm_crtc_finish_page_flip(struct mtk_drm_crtc * mtk_crtc)91 static void mtk_drm_crtc_finish_page_flip(struct mtk_drm_crtc *mtk_crtc)
92 {
93 struct drm_crtc *crtc = &mtk_crtc->base;
94 unsigned long flags;
95
96 if (mtk_crtc->event) {
97 spin_lock_irqsave(&crtc->dev->event_lock, flags);
98 drm_crtc_send_vblank_event(crtc, mtk_crtc->event);
99 drm_crtc_vblank_put(crtc);
100 mtk_crtc->event = NULL;
101 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
102 }
103 }
104
mtk_drm_finish_page_flip(struct mtk_drm_crtc * mtk_crtc)105 static void mtk_drm_finish_page_flip(struct mtk_drm_crtc *mtk_crtc)
106 {
107 drm_crtc_handle_vblank(&mtk_crtc->base);
108 if (!mtk_crtc->config_updating && mtk_crtc->pending_needs_vblank) {
109 mtk_drm_crtc_finish_page_flip(mtk_crtc);
110 mtk_crtc->pending_needs_vblank = false;
111 }
112 }
113
114 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
mtk_drm_cmdq_pkt_create(struct cmdq_client * client,struct cmdq_pkt * pkt,size_t size)115 static int mtk_drm_cmdq_pkt_create(struct cmdq_client *client, struct cmdq_pkt *pkt,
116 size_t size)
117 {
118 struct device *dev;
119 dma_addr_t dma_addr;
120
121 pkt->va_base = kzalloc(size, GFP_KERNEL);
122 if (!pkt->va_base)
123 return -ENOMEM;
124
125 pkt->buf_size = size;
126 pkt->cl = (void *)client;
127
128 dev = client->chan->mbox->dev;
129 dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size,
130 DMA_TO_DEVICE);
131 if (dma_mapping_error(dev, dma_addr)) {
132 dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size);
133 kfree(pkt->va_base);
134 return -ENOMEM;
135 }
136
137 pkt->pa_base = dma_addr;
138
139 return 0;
140 }
141
mtk_drm_cmdq_pkt_destroy(struct cmdq_pkt * pkt)142 static void mtk_drm_cmdq_pkt_destroy(struct cmdq_pkt *pkt)
143 {
144 struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
145
146 dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size,
147 DMA_TO_DEVICE);
148 kfree(pkt->va_base);
149 }
150 #endif
151
mtk_drm_crtc_destroy(struct drm_crtc * crtc)152 static void mtk_drm_crtc_destroy(struct drm_crtc *crtc)
153 {
154 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
155 int i;
156
157 mtk_mutex_put(mtk_crtc->mutex);
158 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
159 mtk_drm_cmdq_pkt_destroy(&mtk_crtc->cmdq_handle);
160
161 if (mtk_crtc->cmdq_client.chan) {
162 mbox_free_channel(mtk_crtc->cmdq_client.chan);
163 mtk_crtc->cmdq_client.chan = NULL;
164 }
165 #endif
166
167 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
168 struct mtk_ddp_comp *comp;
169
170 comp = mtk_crtc->ddp_comp[i];
171 mtk_ddp_comp_unregister_vblank_cb(comp);
172 }
173
174 drm_crtc_cleanup(crtc);
175 }
176
mtk_drm_crtc_reset(struct drm_crtc * crtc)177 static void mtk_drm_crtc_reset(struct drm_crtc *crtc)
178 {
179 struct mtk_crtc_state *state;
180
181 if (crtc->state)
182 __drm_atomic_helper_crtc_destroy_state(crtc->state);
183
184 kfree(to_mtk_crtc_state(crtc->state));
185 crtc->state = NULL;
186
187 state = kzalloc(sizeof(*state), GFP_KERNEL);
188 if (state)
189 __drm_atomic_helper_crtc_reset(crtc, &state->base);
190 }
191
mtk_drm_crtc_duplicate_state(struct drm_crtc * crtc)192 static struct drm_crtc_state *mtk_drm_crtc_duplicate_state(struct drm_crtc *crtc)
193 {
194 struct mtk_crtc_state *state;
195
196 state = kmalloc(sizeof(*state), GFP_KERNEL);
197 if (!state)
198 return NULL;
199
200 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
201
202 WARN_ON(state->base.crtc != crtc);
203 state->base.crtc = crtc;
204 state->pending_config = false;
205
206 return &state->base;
207 }
208
mtk_drm_crtc_destroy_state(struct drm_crtc * crtc,struct drm_crtc_state * state)209 static void mtk_drm_crtc_destroy_state(struct drm_crtc *crtc,
210 struct drm_crtc_state *state)
211 {
212 __drm_atomic_helper_crtc_destroy_state(state);
213 kfree(to_mtk_crtc_state(state));
214 }
215
mtk_drm_crtc_mode_fixup(struct drm_crtc * crtc,const struct drm_display_mode * mode,struct drm_display_mode * adjusted_mode)216 static bool mtk_drm_crtc_mode_fixup(struct drm_crtc *crtc,
217 const struct drm_display_mode *mode,
218 struct drm_display_mode *adjusted_mode)
219 {
220 /* Nothing to do here, but this callback is mandatory. */
221 return true;
222 }
223
mtk_drm_crtc_mode_set_nofb(struct drm_crtc * crtc)224 static void mtk_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
225 {
226 struct mtk_crtc_state *state = to_mtk_crtc_state(crtc->state);
227
228 state->pending_width = crtc->mode.hdisplay;
229 state->pending_height = crtc->mode.vdisplay;
230 state->pending_vrefresh = drm_mode_vrefresh(&crtc->mode);
231 wmb(); /* Make sure the above parameters are set before update */
232 state->pending_config = true;
233 }
234
mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc * mtk_crtc)235 static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc)
236 {
237 int ret;
238 int i;
239
240 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
241 ret = mtk_ddp_comp_clk_enable(mtk_crtc->ddp_comp[i]);
242 if (ret) {
243 DRM_ERROR("Failed to enable clock %d: %d\n", i, ret);
244 goto err;
245 }
246 }
247
248 return 0;
249 err:
250 while (--i >= 0)
251 mtk_ddp_comp_clk_disable(mtk_crtc->ddp_comp[i]);
252 return ret;
253 }
254
mtk_crtc_ddp_clk_disable(struct mtk_drm_crtc * mtk_crtc)255 static void mtk_crtc_ddp_clk_disable(struct mtk_drm_crtc *mtk_crtc)
256 {
257 int i;
258
259 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
260 mtk_ddp_comp_clk_disable(mtk_crtc->ddp_comp[i]);
261 }
262
263 static
mtk_drm_ddp_comp_for_plane(struct drm_crtc * crtc,struct drm_plane * plane,unsigned int * local_layer)264 struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc,
265 struct drm_plane *plane,
266 unsigned int *local_layer)
267 {
268 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
269 struct mtk_ddp_comp *comp;
270 int i, count = 0;
271 unsigned int local_index = plane - mtk_crtc->planes;
272
273 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
274 comp = mtk_crtc->ddp_comp[i];
275 if (local_index < (count + mtk_ddp_comp_layer_nr(comp))) {
276 *local_layer = local_index - count;
277 return comp;
278 }
279 count += mtk_ddp_comp_layer_nr(comp);
280 }
281
282 WARN(1, "Failed to find component for plane %d\n", plane->index);
283 return NULL;
284 }
285
286 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
ddp_cmdq_cb(struct mbox_client * cl,void * mssg)287 static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg)
288 {
289 struct cmdq_cb_data *data = mssg;
290 struct cmdq_client *cmdq_cl = container_of(cl, struct cmdq_client, client);
291 struct mtk_drm_crtc *mtk_crtc = container_of(cmdq_cl, struct mtk_drm_crtc, cmdq_client);
292 struct mtk_crtc_state *state;
293 unsigned int i;
294
295 if (data->sta < 0)
296 return;
297
298 state = to_mtk_crtc_state(mtk_crtc->base.state);
299
300 state->pending_config = false;
301
302 if (mtk_crtc->pending_planes) {
303 for (i = 0; i < mtk_crtc->layer_nr; i++) {
304 struct drm_plane *plane = &mtk_crtc->planes[i];
305 struct mtk_plane_state *plane_state;
306
307 plane_state = to_mtk_plane_state(plane->state);
308
309 plane_state->pending.config = false;
310 }
311 mtk_crtc->pending_planes = false;
312 }
313
314 if (mtk_crtc->pending_async_planes) {
315 for (i = 0; i < mtk_crtc->layer_nr; i++) {
316 struct drm_plane *plane = &mtk_crtc->planes[i];
317 struct mtk_plane_state *plane_state;
318
319 plane_state = to_mtk_plane_state(plane->state);
320
321 plane_state->pending.async_config = false;
322 }
323 mtk_crtc->pending_async_planes = false;
324 }
325
326 mtk_crtc->cmdq_vblank_cnt = 0;
327 wake_up(&mtk_crtc->cb_blocking_queue);
328 }
329 #endif
330
mtk_crtc_ddp_hw_init(struct mtk_drm_crtc * mtk_crtc)331 static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
332 {
333 struct drm_crtc *crtc = &mtk_crtc->base;
334 struct drm_connector *connector;
335 struct drm_encoder *encoder;
336 struct drm_connector_list_iter conn_iter;
337 unsigned int width, height, vrefresh, bpc = MTK_MAX_BPC;
338 int ret;
339 int i;
340
341 if (WARN_ON(!crtc->state))
342 return -EINVAL;
343
344 width = crtc->state->adjusted_mode.hdisplay;
345 height = crtc->state->adjusted_mode.vdisplay;
346 vrefresh = drm_mode_vrefresh(&crtc->state->adjusted_mode);
347
348 drm_for_each_encoder(encoder, crtc->dev) {
349 if (encoder->crtc != crtc)
350 continue;
351
352 drm_connector_list_iter_begin(crtc->dev, &conn_iter);
353 drm_for_each_connector_iter(connector, &conn_iter) {
354 if (connector->encoder != encoder)
355 continue;
356 if (connector->display_info.bpc != 0 &&
357 bpc > connector->display_info.bpc)
358 bpc = connector->display_info.bpc;
359 }
360 drm_connector_list_iter_end(&conn_iter);
361 }
362
363 ret = pm_runtime_resume_and_get(crtc->dev->dev);
364 if (ret < 0) {
365 DRM_ERROR("Failed to enable power domain: %d\n", ret);
366 return ret;
367 }
368
369 ret = mtk_mutex_prepare(mtk_crtc->mutex);
370 if (ret < 0) {
371 DRM_ERROR("Failed to enable mutex clock: %d\n", ret);
372 goto err_pm_runtime_put;
373 }
374
375 ret = mtk_crtc_ddp_clk_enable(mtk_crtc);
376 if (ret < 0) {
377 DRM_ERROR("Failed to enable component clocks: %d\n", ret);
378 goto err_mutex_unprepare;
379 }
380
381 for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) {
382 if (!mtk_ddp_comp_connect(mtk_crtc->ddp_comp[i], mtk_crtc->mmsys_dev,
383 mtk_crtc->ddp_comp[i + 1]->id))
384 mtk_mmsys_ddp_connect(mtk_crtc->mmsys_dev,
385 mtk_crtc->ddp_comp[i]->id,
386 mtk_crtc->ddp_comp[i + 1]->id);
387 if (!mtk_ddp_comp_add(mtk_crtc->ddp_comp[i], mtk_crtc->mutex))
388 mtk_mutex_add_comp(mtk_crtc->mutex,
389 mtk_crtc->ddp_comp[i]->id);
390 }
391 if (!mtk_ddp_comp_add(mtk_crtc->ddp_comp[i], mtk_crtc->mutex))
392 mtk_mutex_add_comp(mtk_crtc->mutex, mtk_crtc->ddp_comp[i]->id);
393 mtk_mutex_enable(mtk_crtc->mutex);
394
395 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
396 struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[i];
397
398 if (i == 1)
399 mtk_ddp_comp_bgclr_in_on(comp);
400
401 mtk_ddp_comp_config(comp, width, height, vrefresh, bpc, NULL);
402 mtk_ddp_comp_start(comp);
403 }
404
405 /* Initially configure all planes */
406 for (i = 0; i < mtk_crtc->layer_nr; i++) {
407 struct drm_plane *plane = &mtk_crtc->planes[i];
408 struct mtk_plane_state *plane_state;
409 struct mtk_ddp_comp *comp;
410 unsigned int local_layer;
411
412 plane_state = to_mtk_plane_state(plane->state);
413
414 /* should not enable layer before crtc enabled */
415 plane_state->pending.enable = false;
416 comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer);
417 if (comp)
418 mtk_ddp_comp_layer_config(comp, local_layer,
419 plane_state, NULL);
420 }
421
422 return 0;
423
424 err_mutex_unprepare:
425 mtk_mutex_unprepare(mtk_crtc->mutex);
426 err_pm_runtime_put:
427 pm_runtime_put(crtc->dev->dev);
428 return ret;
429 }
430
mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc * mtk_crtc)431 static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc)
432 {
433 struct drm_device *drm = mtk_crtc->base.dev;
434 struct drm_crtc *crtc = &mtk_crtc->base;
435 int i;
436
437 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
438 mtk_ddp_comp_stop(mtk_crtc->ddp_comp[i]);
439 if (i == 1)
440 mtk_ddp_comp_bgclr_in_off(mtk_crtc->ddp_comp[i]);
441 }
442
443 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
444 if (!mtk_ddp_comp_remove(mtk_crtc->ddp_comp[i], mtk_crtc->mutex))
445 mtk_mutex_remove_comp(mtk_crtc->mutex,
446 mtk_crtc->ddp_comp[i]->id);
447 mtk_mutex_disable(mtk_crtc->mutex);
448 for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) {
449 if (!mtk_ddp_comp_disconnect(mtk_crtc->ddp_comp[i], mtk_crtc->mmsys_dev,
450 mtk_crtc->ddp_comp[i + 1]->id))
451 mtk_mmsys_ddp_disconnect(mtk_crtc->mmsys_dev,
452 mtk_crtc->ddp_comp[i]->id,
453 mtk_crtc->ddp_comp[i + 1]->id);
454 if (!mtk_ddp_comp_remove(mtk_crtc->ddp_comp[i], mtk_crtc->mutex))
455 mtk_mutex_remove_comp(mtk_crtc->mutex,
456 mtk_crtc->ddp_comp[i]->id);
457 }
458 if (!mtk_ddp_comp_remove(mtk_crtc->ddp_comp[i], mtk_crtc->mutex))
459 mtk_mutex_remove_comp(mtk_crtc->mutex, mtk_crtc->ddp_comp[i]->id);
460 mtk_crtc_ddp_clk_disable(mtk_crtc);
461 mtk_mutex_unprepare(mtk_crtc->mutex);
462
463 pm_runtime_put(drm->dev);
464
465 if (crtc->state->event && !crtc->state->active) {
466 spin_lock_irq(&crtc->dev->event_lock);
467 drm_crtc_send_vblank_event(crtc, crtc->state->event);
468 crtc->state->event = NULL;
469 spin_unlock_irq(&crtc->dev->event_lock);
470 }
471 }
472
mtk_crtc_ddp_config(struct drm_crtc * crtc,struct cmdq_pkt * cmdq_handle)473 static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
474 struct cmdq_pkt *cmdq_handle)
475 {
476 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
477 struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state);
478 struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
479 unsigned int i;
480 unsigned int local_layer;
481
482 /*
483 * TODO: instead of updating the registers here, we should prepare
484 * working registers in atomic_commit and let the hardware command
485 * queue update module registers on vblank.
486 */
487 if (state->pending_config) {
488 mtk_ddp_comp_config(comp, state->pending_width,
489 state->pending_height,
490 state->pending_vrefresh, 0,
491 cmdq_handle);
492
493 if (!cmdq_handle)
494 state->pending_config = false;
495 }
496
497 if (mtk_crtc->pending_planes) {
498 for (i = 0; i < mtk_crtc->layer_nr; i++) {
499 struct drm_plane *plane = &mtk_crtc->planes[i];
500 struct mtk_plane_state *plane_state;
501
502 plane_state = to_mtk_plane_state(plane->state);
503
504 if (!plane_state->pending.config)
505 continue;
506
507 comp = mtk_drm_ddp_comp_for_plane(crtc, plane,
508 &local_layer);
509
510 if (comp)
511 mtk_ddp_comp_layer_config(comp, local_layer,
512 plane_state,
513 cmdq_handle);
514 if (!cmdq_handle)
515 plane_state->pending.config = false;
516 }
517
518 if (!cmdq_handle)
519 mtk_crtc->pending_planes = false;
520 }
521
522 if (mtk_crtc->pending_async_planes) {
523 for (i = 0; i < mtk_crtc->layer_nr; i++) {
524 struct drm_plane *plane = &mtk_crtc->planes[i];
525 struct mtk_plane_state *plane_state;
526
527 plane_state = to_mtk_plane_state(plane->state);
528
529 if (!plane_state->pending.async_config)
530 continue;
531
532 comp = mtk_drm_ddp_comp_for_plane(crtc, plane,
533 &local_layer);
534
535 if (comp)
536 mtk_ddp_comp_layer_config(comp, local_layer,
537 plane_state,
538 cmdq_handle);
539 if (!cmdq_handle)
540 plane_state->pending.async_config = false;
541 }
542
543 if (!cmdq_handle)
544 mtk_crtc->pending_async_planes = false;
545 }
546 }
547
mtk_drm_crtc_update_config(struct mtk_drm_crtc * mtk_crtc,bool needs_vblank)548 static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc,
549 bool needs_vblank)
550 {
551 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
552 struct cmdq_pkt *cmdq_handle = &mtk_crtc->cmdq_handle;
553 #endif
554 struct drm_crtc *crtc = &mtk_crtc->base;
555 struct mtk_drm_private *priv = crtc->dev->dev_private;
556 unsigned int pending_planes = 0, pending_async_planes = 0;
557 int i;
558
559 mutex_lock(&mtk_crtc->hw_lock);
560 mtk_crtc->config_updating = true;
561 if (needs_vblank)
562 mtk_crtc->pending_needs_vblank = true;
563
564 for (i = 0; i < mtk_crtc->layer_nr; i++) {
565 struct drm_plane *plane = &mtk_crtc->planes[i];
566 struct mtk_plane_state *plane_state;
567
568 plane_state = to_mtk_plane_state(plane->state);
569 if (plane_state->pending.dirty) {
570 plane_state->pending.config = true;
571 plane_state->pending.dirty = false;
572 pending_planes |= BIT(i);
573 } else if (plane_state->pending.async_dirty) {
574 plane_state->pending.async_config = true;
575 plane_state->pending.async_dirty = false;
576 pending_async_planes |= BIT(i);
577 }
578 }
579 if (pending_planes)
580 mtk_crtc->pending_planes = true;
581 if (pending_async_planes)
582 mtk_crtc->pending_async_planes = true;
583
584 if (priv->data->shadow_register) {
585 mtk_mutex_acquire(mtk_crtc->mutex);
586 mtk_crtc_ddp_config(crtc, NULL);
587 mtk_mutex_release(mtk_crtc->mutex);
588 }
589 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
590 if (mtk_crtc->cmdq_client.chan) {
591 mbox_flush(mtk_crtc->cmdq_client.chan, 2000);
592 cmdq_handle->cmd_buf_size = 0;
593 cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event);
594 cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event, false);
595 mtk_crtc_ddp_config(crtc, cmdq_handle);
596 cmdq_pkt_finalize(cmdq_handle);
597 dma_sync_single_for_device(mtk_crtc->cmdq_client.chan->mbox->dev,
598 cmdq_handle->pa_base,
599 cmdq_handle->cmd_buf_size,
600 DMA_TO_DEVICE);
601 /*
602 * CMDQ command should execute in next 3 vblank.
603 * One vblank interrupt before send message (occasionally)
604 * and one vblank interrupt after cmdq done,
605 * so it's timeout after 3 vblank interrupt.
606 * If it fail to execute in next 3 vblank, timeout happen.
607 */
608 mtk_crtc->cmdq_vblank_cnt = 3;
609
610 mbox_send_message(mtk_crtc->cmdq_client.chan, cmdq_handle);
611 mbox_client_txdone(mtk_crtc->cmdq_client.chan, 0);
612 }
613 #endif
614 mtk_crtc->config_updating = false;
615 mutex_unlock(&mtk_crtc->hw_lock);
616 }
617
mtk_crtc_ddp_irq(void * data)618 static void mtk_crtc_ddp_irq(void *data)
619 {
620 struct drm_crtc *crtc = data;
621 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
622 struct mtk_drm_private *priv = crtc->dev->dev_private;
623
624 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
625 if (!priv->data->shadow_register && !mtk_crtc->cmdq_client.chan)
626 mtk_crtc_ddp_config(crtc, NULL);
627 else if (mtk_crtc->cmdq_vblank_cnt > 0 && --mtk_crtc->cmdq_vblank_cnt == 0)
628 DRM_ERROR("mtk_crtc %d CMDQ execute command timeout!\n",
629 drm_crtc_index(&mtk_crtc->base));
630 #else
631 if (!priv->data->shadow_register)
632 mtk_crtc_ddp_config(crtc, NULL);
633 #endif
634 mtk_drm_finish_page_flip(mtk_crtc);
635 }
636
mtk_drm_crtc_enable_vblank(struct drm_crtc * crtc)637 static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc)
638 {
639 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
640 struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
641
642 mtk_ddp_comp_enable_vblank(comp);
643
644 return 0;
645 }
646
mtk_drm_crtc_disable_vblank(struct drm_crtc * crtc)647 static void mtk_drm_crtc_disable_vblank(struct drm_crtc *crtc)
648 {
649 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
650 struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
651
652 mtk_ddp_comp_disable_vblank(comp);
653 }
654
mtk_drm_crtc_plane_check(struct drm_crtc * crtc,struct drm_plane * plane,struct mtk_plane_state * state)655 int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane,
656 struct mtk_plane_state *state)
657 {
658 unsigned int local_layer;
659 struct mtk_ddp_comp *comp;
660
661 comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer);
662 if (comp)
663 return mtk_ddp_comp_layer_check(comp, local_layer, state);
664 return 0;
665 }
666
mtk_drm_crtc_async_update(struct drm_crtc * crtc,struct drm_plane * plane,struct drm_atomic_state * state)667 void mtk_drm_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane,
668 struct drm_atomic_state *state)
669 {
670 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
671
672 if (!mtk_crtc->enabled)
673 return;
674
675 mtk_drm_crtc_update_config(mtk_crtc, false);
676 }
677
mtk_drm_crtc_atomic_enable(struct drm_crtc * crtc,struct drm_atomic_state * state)678 static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
679 struct drm_atomic_state *state)
680 {
681 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
682 struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
683 int ret;
684
685 DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id);
686
687 ret = pm_runtime_resume_and_get(comp->dev);
688 if (ret < 0) {
689 DRM_DEV_ERROR(comp->dev, "Failed to enable power domain: %d\n", ret);
690 return;
691 }
692
693 ret = mtk_crtc_ddp_hw_init(mtk_crtc);
694 if (ret) {
695 pm_runtime_put(comp->dev);
696 return;
697 }
698
699 drm_crtc_vblank_on(crtc);
700 mtk_crtc->enabled = true;
701 }
702
mtk_drm_crtc_atomic_disable(struct drm_crtc * crtc,struct drm_atomic_state * state)703 static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
704 struct drm_atomic_state *state)
705 {
706 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
707 struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
708 int i, ret;
709
710 DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id);
711 if (!mtk_crtc->enabled)
712 return;
713
714 /* Set all pending plane state to disabled */
715 for (i = 0; i < mtk_crtc->layer_nr; i++) {
716 struct drm_plane *plane = &mtk_crtc->planes[i];
717 struct mtk_plane_state *plane_state;
718
719 plane_state = to_mtk_plane_state(plane->state);
720 plane_state->pending.enable = false;
721 plane_state->pending.config = true;
722 }
723 mtk_crtc->pending_planes = true;
724
725 mtk_drm_crtc_update_config(mtk_crtc, false);
726 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
727 /* Wait for planes to be disabled by cmdq */
728 if (mtk_crtc->cmdq_client.chan)
729 wait_event_timeout(mtk_crtc->cb_blocking_queue,
730 mtk_crtc->cmdq_vblank_cnt == 0,
731 msecs_to_jiffies(500));
732 #endif
733 /* Wait for planes to be disabled */
734 drm_crtc_wait_one_vblank(crtc);
735
736 drm_crtc_vblank_off(crtc);
737 mtk_crtc_ddp_hw_fini(mtk_crtc);
738 ret = pm_runtime_put(comp->dev);
739 if (ret < 0)
740 DRM_DEV_ERROR(comp->dev, "Failed to disable power domain: %d\n", ret);
741
742 mtk_crtc->enabled = false;
743 }
744
mtk_drm_crtc_atomic_begin(struct drm_crtc * crtc,struct drm_atomic_state * state)745 static void mtk_drm_crtc_atomic_begin(struct drm_crtc *crtc,
746 struct drm_atomic_state *state)
747 {
748 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
749 crtc);
750 struct mtk_crtc_state *mtk_crtc_state = to_mtk_crtc_state(crtc_state);
751 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
752 unsigned long flags;
753
754 if (mtk_crtc->event && mtk_crtc_state->base.event)
755 DRM_ERROR("new event while there is still a pending event\n");
756
757 if (mtk_crtc_state->base.event) {
758 mtk_crtc_state->base.event->pipe = drm_crtc_index(crtc);
759 WARN_ON(drm_crtc_vblank_get(crtc) != 0);
760
761 spin_lock_irqsave(&crtc->dev->event_lock, flags);
762 mtk_crtc->event = mtk_crtc_state->base.event;
763 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
764
765 mtk_crtc_state->base.event = NULL;
766 }
767 }
768
mtk_drm_crtc_atomic_flush(struct drm_crtc * crtc,struct drm_atomic_state * state)769 static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc,
770 struct drm_atomic_state *state)
771 {
772 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
773 int i;
774
775 if (crtc->state->color_mgmt_changed)
776 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
777 mtk_ddp_gamma_set(mtk_crtc->ddp_comp[i], crtc->state);
778 mtk_ddp_ctm_set(mtk_crtc->ddp_comp[i], crtc->state);
779 }
780 mtk_drm_crtc_update_config(mtk_crtc, !!mtk_crtc->event);
781 }
782
783 static const struct drm_crtc_funcs mtk_crtc_funcs = {
784 .set_config = drm_atomic_helper_set_config,
785 .page_flip = drm_atomic_helper_page_flip,
786 .destroy = mtk_drm_crtc_destroy,
787 .reset = mtk_drm_crtc_reset,
788 .atomic_duplicate_state = mtk_drm_crtc_duplicate_state,
789 .atomic_destroy_state = mtk_drm_crtc_destroy_state,
790 .enable_vblank = mtk_drm_crtc_enable_vblank,
791 .disable_vblank = mtk_drm_crtc_disable_vblank,
792 };
793
794 static const struct drm_crtc_helper_funcs mtk_crtc_helper_funcs = {
795 .mode_fixup = mtk_drm_crtc_mode_fixup,
796 .mode_set_nofb = mtk_drm_crtc_mode_set_nofb,
797 .atomic_begin = mtk_drm_crtc_atomic_begin,
798 .atomic_flush = mtk_drm_crtc_atomic_flush,
799 .atomic_enable = mtk_drm_crtc_atomic_enable,
800 .atomic_disable = mtk_drm_crtc_atomic_disable,
801 };
802
mtk_drm_crtc_init(struct drm_device * drm,struct mtk_drm_crtc * mtk_crtc,unsigned int pipe)803 static int mtk_drm_crtc_init(struct drm_device *drm,
804 struct mtk_drm_crtc *mtk_crtc,
805 unsigned int pipe)
806 {
807 struct drm_plane *primary = NULL;
808 struct drm_plane *cursor = NULL;
809 int i, ret;
810
811 for (i = 0; i < mtk_crtc->layer_nr; i++) {
812 if (mtk_crtc->planes[i].type == DRM_PLANE_TYPE_PRIMARY)
813 primary = &mtk_crtc->planes[i];
814 else if (mtk_crtc->planes[i].type == DRM_PLANE_TYPE_CURSOR)
815 cursor = &mtk_crtc->planes[i];
816 }
817
818 ret = drm_crtc_init_with_planes(drm, &mtk_crtc->base, primary, cursor,
819 &mtk_crtc_funcs, NULL);
820 if (ret)
821 goto err_cleanup_crtc;
822
823 drm_crtc_helper_add(&mtk_crtc->base, &mtk_crtc_helper_funcs);
824
825 return 0;
826
827 err_cleanup_crtc:
828 drm_crtc_cleanup(&mtk_crtc->base);
829 return ret;
830 }
831
mtk_drm_crtc_num_comp_planes(struct mtk_drm_crtc * mtk_crtc,int comp_idx)832 static int mtk_drm_crtc_num_comp_planes(struct mtk_drm_crtc *mtk_crtc,
833 int comp_idx)
834 {
835 struct mtk_ddp_comp *comp;
836
837 if (comp_idx > 1)
838 return 0;
839
840 comp = mtk_crtc->ddp_comp[comp_idx];
841 if (!comp->funcs)
842 return 0;
843
844 if (comp_idx == 1 && !comp->funcs->bgclr_in_on)
845 return 0;
846
847 return mtk_ddp_comp_layer_nr(comp);
848 }
849
850 static inline
mtk_drm_crtc_plane_type(unsigned int plane_idx,unsigned int num_planes)851 enum drm_plane_type mtk_drm_crtc_plane_type(unsigned int plane_idx,
852 unsigned int num_planes)
853 {
854 if (plane_idx == 0)
855 return DRM_PLANE_TYPE_PRIMARY;
856 else if (plane_idx == (num_planes - 1))
857 return DRM_PLANE_TYPE_CURSOR;
858 else
859 return DRM_PLANE_TYPE_OVERLAY;
860
861 }
862
mtk_drm_crtc_init_comp_planes(struct drm_device * drm_dev,struct mtk_drm_crtc * mtk_crtc,int comp_idx,int pipe)863 static int mtk_drm_crtc_init_comp_planes(struct drm_device *drm_dev,
864 struct mtk_drm_crtc *mtk_crtc,
865 int comp_idx, int pipe)
866 {
867 int num_planes = mtk_drm_crtc_num_comp_planes(mtk_crtc, comp_idx);
868 struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[comp_idx];
869 int i, ret;
870
871 for (i = 0; i < num_planes; i++) {
872 ret = mtk_plane_init(drm_dev,
873 &mtk_crtc->planes[mtk_crtc->layer_nr],
874 BIT(pipe),
875 mtk_drm_crtc_plane_type(mtk_crtc->layer_nr,
876 num_planes),
877 mtk_ddp_comp_supported_rotations(comp),
878 mtk_ddp_comp_get_formats(comp),
879 mtk_ddp_comp_get_num_formats(comp));
880 if (ret)
881 return ret;
882
883 mtk_crtc->layer_nr++;
884 }
885 return 0;
886 }
887
mtk_drm_crtc_dma_dev_get(struct drm_crtc * crtc)888 struct device *mtk_drm_crtc_dma_dev_get(struct drm_crtc *crtc)
889 {
890 struct mtk_drm_crtc *mtk_crtc = NULL;
891
892 if (!crtc)
893 return NULL;
894
895 mtk_crtc = to_mtk_crtc(crtc);
896 if (!mtk_crtc)
897 return NULL;
898
899 return mtk_crtc->dma_dev;
900 }
901
mtk_drm_crtc_create(struct drm_device * drm_dev,const unsigned int * path,unsigned int path_len,int priv_data_index)902 int mtk_drm_crtc_create(struct drm_device *drm_dev,
903 const unsigned int *path, unsigned int path_len,
904 int priv_data_index)
905 {
906 struct mtk_drm_private *priv = drm_dev->dev_private;
907 struct device *dev = drm_dev->dev;
908 struct mtk_drm_crtc *mtk_crtc;
909 unsigned int num_comp_planes = 0;
910 int ret;
911 int i;
912 bool has_ctm = false;
913 uint gamma_lut_size = 0;
914 struct drm_crtc *tmp;
915 int crtc_i = 0;
916
917 if (!path)
918 return 0;
919
920 priv = priv->all_drm_private[priv_data_index];
921
922 drm_for_each_crtc(tmp, drm_dev)
923 crtc_i++;
924
925 for (i = 0; i < path_len; i++) {
926 enum mtk_ddp_comp_id comp_id = path[i];
927 struct device_node *node;
928 struct mtk_ddp_comp *comp;
929
930 node = priv->comp_node[comp_id];
931 comp = &priv->ddp_comp[comp_id];
932
933 /* Not all drm components have a DTS device node, such as ovl_adaptor,
934 * which is the drm bring up sub driver
935 */
936 if (!node && comp_id != DDP_COMPONENT_DRM_OVL_ADAPTOR) {
937 dev_info(dev,
938 "Not creating crtc %d because component %d is disabled or missing\n",
939 crtc_i, comp_id);
940 return 0;
941 }
942
943 if (!comp->dev) {
944 dev_err(dev, "Component %pOF not initialized\n", node);
945 return -ENODEV;
946 }
947 }
948
949 mtk_crtc = devm_kzalloc(dev, sizeof(*mtk_crtc), GFP_KERNEL);
950 if (!mtk_crtc)
951 return -ENOMEM;
952
953 mtk_crtc->mmsys_dev = priv->mmsys_dev;
954 mtk_crtc->ddp_comp_nr = path_len;
955 mtk_crtc->ddp_comp = devm_kmalloc_array(dev, mtk_crtc->ddp_comp_nr,
956 sizeof(*mtk_crtc->ddp_comp),
957 GFP_KERNEL);
958 if (!mtk_crtc->ddp_comp)
959 return -ENOMEM;
960
961 mtk_crtc->mutex = mtk_mutex_get(priv->mutex_dev);
962 if (IS_ERR(mtk_crtc->mutex)) {
963 ret = PTR_ERR(mtk_crtc->mutex);
964 dev_err(dev, "Failed to get mutex: %d\n", ret);
965 return ret;
966 }
967
968 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
969 unsigned int comp_id = path[i];
970 struct mtk_ddp_comp *comp;
971
972 comp = &priv->ddp_comp[comp_id];
973 mtk_crtc->ddp_comp[i] = comp;
974
975 if (comp->funcs) {
976 if (comp->funcs->gamma_set)
977 gamma_lut_size = MTK_LUT_SIZE;
978
979 if (comp->funcs->ctm_set)
980 has_ctm = true;
981 }
982
983 mtk_ddp_comp_register_vblank_cb(comp, mtk_crtc_ddp_irq,
984 &mtk_crtc->base);
985 }
986
987 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
988 num_comp_planes += mtk_drm_crtc_num_comp_planes(mtk_crtc, i);
989
990 mtk_crtc->planes = devm_kcalloc(dev, num_comp_planes,
991 sizeof(struct drm_plane), GFP_KERNEL);
992 if (!mtk_crtc->planes)
993 return -ENOMEM;
994
995 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
996 ret = mtk_drm_crtc_init_comp_planes(drm_dev, mtk_crtc, i,
997 crtc_i);
998 if (ret)
999 return ret;
1000 }
1001
1002 /*
1003 * Default to use the first component as the dma dev.
1004 * In the case of ovl_adaptor sub driver, it needs to use the
1005 * dma_dev_get function to get representative dma dev.
1006 */
1007 mtk_crtc->dma_dev = mtk_ddp_comp_dma_dev_get(&priv->ddp_comp[path[0]]);
1008
1009 ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, crtc_i);
1010 if (ret < 0)
1011 return ret;
1012
1013 if (gamma_lut_size)
1014 drm_mode_crtc_set_gamma_size(&mtk_crtc->base, gamma_lut_size);
1015 drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, has_ctm, gamma_lut_size);
1016 mutex_init(&mtk_crtc->hw_lock);
1017
1018 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
1019 i = priv->mbox_index++;
1020 mtk_crtc->cmdq_client.client.dev = mtk_crtc->mmsys_dev;
1021 mtk_crtc->cmdq_client.client.tx_block = false;
1022 mtk_crtc->cmdq_client.client.knows_txdone = true;
1023 mtk_crtc->cmdq_client.client.rx_callback = ddp_cmdq_cb;
1024 mtk_crtc->cmdq_client.chan =
1025 mbox_request_channel(&mtk_crtc->cmdq_client.client, i);
1026 if (IS_ERR(mtk_crtc->cmdq_client.chan)) {
1027 dev_dbg(dev, "mtk_crtc %d failed to create mailbox client, writing register by CPU now\n",
1028 drm_crtc_index(&mtk_crtc->base));
1029 mtk_crtc->cmdq_client.chan = NULL;
1030 }
1031
1032 if (mtk_crtc->cmdq_client.chan) {
1033 ret = of_property_read_u32_index(priv->mutex_node,
1034 "mediatek,gce-events",
1035 i,
1036 &mtk_crtc->cmdq_event);
1037 if (ret) {
1038 dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n",
1039 drm_crtc_index(&mtk_crtc->base));
1040 mbox_free_channel(mtk_crtc->cmdq_client.chan);
1041 mtk_crtc->cmdq_client.chan = NULL;
1042 } else {
1043 ret = mtk_drm_cmdq_pkt_create(&mtk_crtc->cmdq_client,
1044 &mtk_crtc->cmdq_handle,
1045 PAGE_SIZE);
1046 if (ret) {
1047 dev_dbg(dev, "mtk_crtc %d failed to create cmdq packet\n",
1048 drm_crtc_index(&mtk_crtc->base));
1049 mbox_free_channel(mtk_crtc->cmdq_client.chan);
1050 mtk_crtc->cmdq_client.chan = NULL;
1051 }
1052 }
1053
1054 /* for sending blocking cmd in crtc disable */
1055 init_waitqueue_head(&mtk_crtc->cb_blocking_queue);
1056 }
1057 #endif
1058 return 0;
1059 }
1060