xref: /openbmc/linux/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c (revision 3213486f)
1 /*
2  * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 and
6  * only version 2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  */
14 
15 #define pr_fmt(fmt)	"[drm:%s] " fmt, __func__
16 #include "dpu_kms.h"
17 #include "dpu_hw_lm.h"
18 #include "dpu_hw_ctl.h"
19 #include "dpu_hw_pingpong.h"
20 #include "dpu_hw_intf.h"
21 #include "dpu_encoder.h"
22 #include "dpu_trace.h"
23 
24 #define RESERVED_BY_OTHER(h, r)  \
25 		((h)->enc_id && (h)->enc_id != r)
26 
27 /**
28  * struct dpu_rm_requirements - Reservation requirements parameter bundle
29  * @topology:  selected topology for the display
30  * @hw_res:	   Hardware resources required as reported by the encoders
31  */
32 struct dpu_rm_requirements {
33 	struct msm_display_topology topology;
34 	struct dpu_encoder_hw_resources hw_res;
35 };
36 
37 
38 /**
39  * struct dpu_rm_hw_blk - hardware block tracking list member
40  * @list:	List head for list of all hardware blocks tracking items
41  * @id:		Hardware ID number, within it's own space, ie. LM_X
42  * @enc_id:	Encoder id to which this blk is binded
43  * @hw:		Pointer to the hardware register access object for this block
44  */
45 struct dpu_rm_hw_blk {
46 	struct list_head list;
47 	uint32_t id;
48 	uint32_t enc_id;
49 	struct dpu_hw_blk *hw;
50 };
51 
52 void dpu_rm_init_hw_iter(
53 		struct dpu_rm_hw_iter *iter,
54 		uint32_t enc_id,
55 		enum dpu_hw_blk_type type)
56 {
57 	memset(iter, 0, sizeof(*iter));
58 	iter->enc_id = enc_id;
59 	iter->type = type;
60 }
61 
62 static bool _dpu_rm_get_hw_locked(struct dpu_rm *rm, struct dpu_rm_hw_iter *i)
63 {
64 	struct list_head *blk_list;
65 
66 	if (!rm || !i || i->type >= DPU_HW_BLK_MAX) {
67 		DPU_ERROR("invalid rm\n");
68 		return false;
69 	}
70 
71 	i->hw = NULL;
72 	blk_list = &rm->hw_blks[i->type];
73 
74 	if (i->blk && (&i->blk->list == blk_list)) {
75 		DPU_DEBUG("attempt resume iteration past last\n");
76 		return false;
77 	}
78 
79 	i->blk = list_prepare_entry(i->blk, blk_list, list);
80 
81 	list_for_each_entry_continue(i->blk, blk_list, list) {
82 		if (i->enc_id == i->blk->enc_id) {
83 			i->hw = i->blk->hw;
84 			DPU_DEBUG("found type %d id %d for enc %d\n",
85 					i->type, i->blk->id, i->enc_id);
86 			return true;
87 		}
88 	}
89 
90 	DPU_DEBUG("no match, type %d for enc %d\n", i->type, i->enc_id);
91 
92 	return false;
93 }
94 
95 bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *i)
96 {
97 	bool ret;
98 
99 	mutex_lock(&rm->rm_lock);
100 	ret = _dpu_rm_get_hw_locked(rm, i);
101 	mutex_unlock(&rm->rm_lock);
102 
103 	return ret;
104 }
105 
106 static void _dpu_rm_hw_destroy(enum dpu_hw_blk_type type, void *hw)
107 {
108 	switch (type) {
109 	case DPU_HW_BLK_LM:
110 		dpu_hw_lm_destroy(hw);
111 		break;
112 	case DPU_HW_BLK_CTL:
113 		dpu_hw_ctl_destroy(hw);
114 		break;
115 	case DPU_HW_BLK_PINGPONG:
116 		dpu_hw_pingpong_destroy(hw);
117 		break;
118 	case DPU_HW_BLK_INTF:
119 		dpu_hw_intf_destroy(hw);
120 		break;
121 	case DPU_HW_BLK_SSPP:
122 		/* SSPPs are not managed by the resource manager */
123 	case DPU_HW_BLK_TOP:
124 		/* Top is a singleton, not managed in hw_blks list */
125 	case DPU_HW_BLK_MAX:
126 	default:
127 		DPU_ERROR("unsupported block type %d\n", type);
128 		break;
129 	}
130 }
131 
132 int dpu_rm_destroy(struct dpu_rm *rm)
133 {
134 	struct dpu_rm_hw_blk *hw_cur, *hw_nxt;
135 	enum dpu_hw_blk_type type;
136 
137 	for (type = 0; type < DPU_HW_BLK_MAX; type++) {
138 		list_for_each_entry_safe(hw_cur, hw_nxt, &rm->hw_blks[type],
139 				list) {
140 			list_del(&hw_cur->list);
141 			_dpu_rm_hw_destroy(type, hw_cur->hw);
142 			kfree(hw_cur);
143 		}
144 	}
145 
146 	mutex_destroy(&rm->rm_lock);
147 
148 	return 0;
149 }
150 
151 static int _dpu_rm_hw_blk_create(
152 		struct dpu_rm *rm,
153 		struct dpu_mdss_cfg *cat,
154 		void __iomem *mmio,
155 		enum dpu_hw_blk_type type,
156 		uint32_t id,
157 		void *hw_catalog_info)
158 {
159 	struct dpu_rm_hw_blk *blk;
160 	void *hw;
161 
162 	switch (type) {
163 	case DPU_HW_BLK_LM:
164 		hw = dpu_hw_lm_init(id, mmio, cat);
165 		break;
166 	case DPU_HW_BLK_CTL:
167 		hw = dpu_hw_ctl_init(id, mmio, cat);
168 		break;
169 	case DPU_HW_BLK_PINGPONG:
170 		hw = dpu_hw_pingpong_init(id, mmio, cat);
171 		break;
172 	case DPU_HW_BLK_INTF:
173 		hw = dpu_hw_intf_init(id, mmio, cat);
174 		break;
175 	case DPU_HW_BLK_SSPP:
176 		/* SSPPs are not managed by the resource manager */
177 	case DPU_HW_BLK_TOP:
178 		/* Top is a singleton, not managed in hw_blks list */
179 	case DPU_HW_BLK_MAX:
180 	default:
181 		DPU_ERROR("unsupported block type %d\n", type);
182 		return -EINVAL;
183 	}
184 
185 	if (IS_ERR_OR_NULL(hw)) {
186 		DPU_ERROR("failed hw object creation: type %d, err %ld\n",
187 				type, PTR_ERR(hw));
188 		return -EFAULT;
189 	}
190 
191 	blk = kzalloc(sizeof(*blk), GFP_KERNEL);
192 	if (!blk) {
193 		_dpu_rm_hw_destroy(type, hw);
194 		return -ENOMEM;
195 	}
196 
197 	blk->id = id;
198 	blk->hw = hw;
199 	blk->enc_id = 0;
200 	list_add_tail(&blk->list, &rm->hw_blks[type]);
201 
202 	return 0;
203 }
204 
205 int dpu_rm_init(struct dpu_rm *rm,
206 		struct dpu_mdss_cfg *cat,
207 		void __iomem *mmio)
208 {
209 	int rc, i;
210 	enum dpu_hw_blk_type type;
211 
212 	if (!rm || !cat || !mmio) {
213 		DPU_ERROR("invalid kms\n");
214 		return -EINVAL;
215 	}
216 
217 	/* Clear, setup lists */
218 	memset(rm, 0, sizeof(*rm));
219 
220 	mutex_init(&rm->rm_lock);
221 
222 	for (type = 0; type < DPU_HW_BLK_MAX; type++)
223 		INIT_LIST_HEAD(&rm->hw_blks[type]);
224 
225 	/* Interrogate HW catalog and create tracking items for hw blocks */
226 	for (i = 0; i < cat->mixer_count; i++) {
227 		struct dpu_lm_cfg *lm = &cat->mixer[i];
228 
229 		if (lm->pingpong == PINGPONG_MAX) {
230 			DPU_DEBUG("skip mixer %d without pingpong\n", lm->id);
231 			continue;
232 		}
233 
234 		rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_LM,
235 				cat->mixer[i].id, &cat->mixer[i]);
236 		if (rc) {
237 			DPU_ERROR("failed: lm hw not available\n");
238 			goto fail;
239 		}
240 
241 		if (!rm->lm_max_width) {
242 			rm->lm_max_width = lm->sblk->maxwidth;
243 		} else if (rm->lm_max_width != lm->sblk->maxwidth) {
244 			/*
245 			 * Don't expect to have hw where lm max widths differ.
246 			 * If found, take the min.
247 			 */
248 			DPU_ERROR("unsupported: lm maxwidth differs\n");
249 			if (rm->lm_max_width > lm->sblk->maxwidth)
250 				rm->lm_max_width = lm->sblk->maxwidth;
251 		}
252 	}
253 
254 	for (i = 0; i < cat->pingpong_count; i++) {
255 		rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_PINGPONG,
256 				cat->pingpong[i].id, &cat->pingpong[i]);
257 		if (rc) {
258 			DPU_ERROR("failed: pp hw not available\n");
259 			goto fail;
260 		}
261 	}
262 
263 	for (i = 0; i < cat->intf_count; i++) {
264 		if (cat->intf[i].type == INTF_NONE) {
265 			DPU_DEBUG("skip intf %d with type none\n", i);
266 			continue;
267 		}
268 
269 		rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_INTF,
270 				cat->intf[i].id, &cat->intf[i]);
271 		if (rc) {
272 			DPU_ERROR("failed: intf hw not available\n");
273 			goto fail;
274 		}
275 	}
276 
277 	for (i = 0; i < cat->ctl_count; i++) {
278 		rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_CTL,
279 				cat->ctl[i].id, &cat->ctl[i]);
280 		if (rc) {
281 			DPU_ERROR("failed: ctl hw not available\n");
282 			goto fail;
283 		}
284 	}
285 
286 	return 0;
287 
288 fail:
289 	dpu_rm_destroy(rm);
290 
291 	return rc;
292 }
293 
294 static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top)
295 {
296 	return top->num_intf > 1;
297 }
298 
299 /**
300  * _dpu_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets
301  *	proposed use case requirements, incl. hardwired dependent blocks like
302  *	pingpong
303  * @rm: dpu resource manager handle
304  * @enc_id: encoder id requesting for allocation
305  * @reqs: proposed use case requirements
306  * @lm: proposed layer mixer, function checks if lm, and all other hardwired
307  *      blocks connected to the lm (pp) is available and appropriate
308  * @pp: output parameter, pingpong block attached to the layer mixer.
309  *      NULL if pp was not available, or not matching requirements.
310  * @primary_lm: if non-null, this function check if lm is compatible primary_lm
311  *              as well as satisfying all other requirements
312  * @Return: true if lm matches all requirements, false otherwise
313  */
314 static bool _dpu_rm_check_lm_and_get_connected_blks(
315 		struct dpu_rm *rm,
316 		uint32_t enc_id,
317 		struct dpu_rm_requirements *reqs,
318 		struct dpu_rm_hw_blk *lm,
319 		struct dpu_rm_hw_blk **pp,
320 		struct dpu_rm_hw_blk *primary_lm)
321 {
322 	const struct dpu_lm_cfg *lm_cfg = to_dpu_hw_mixer(lm->hw)->cap;
323 	struct dpu_rm_hw_iter iter;
324 
325 	*pp = NULL;
326 
327 	DPU_DEBUG("check lm %d pp %d\n",
328 			   lm_cfg->id, lm_cfg->pingpong);
329 
330 	/* Check if this layer mixer is a peer of the proposed primary LM */
331 	if (primary_lm) {
332 		const struct dpu_lm_cfg *prim_lm_cfg =
333 				to_dpu_hw_mixer(primary_lm->hw)->cap;
334 
335 		if (!test_bit(lm_cfg->id, &prim_lm_cfg->lm_pair_mask)) {
336 			DPU_DEBUG("lm %d not peer of lm %d\n", lm_cfg->id,
337 					prim_lm_cfg->id);
338 			return false;
339 		}
340 	}
341 
342 	/* Already reserved? */
343 	if (RESERVED_BY_OTHER(lm, enc_id)) {
344 		DPU_DEBUG("lm %d already reserved\n", lm_cfg->id);
345 		return false;
346 	}
347 
348 	dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_PINGPONG);
349 	while (_dpu_rm_get_hw_locked(rm, &iter)) {
350 		if (iter.blk->id == lm_cfg->pingpong) {
351 			*pp = iter.blk;
352 			break;
353 		}
354 	}
355 
356 	if (!*pp) {
357 		DPU_ERROR("failed to get pp on lm %d\n", lm_cfg->pingpong);
358 		return false;
359 	}
360 
361 	if (RESERVED_BY_OTHER(*pp, enc_id)) {
362 		DPU_DEBUG("lm %d pp %d already reserved\n", lm->id,
363 				(*pp)->id);
364 		return false;
365 	}
366 
367 	return true;
368 }
369 
370 static int _dpu_rm_reserve_lms(struct dpu_rm *rm, uint32_t enc_id,
371 			       struct dpu_rm_requirements *reqs)
372 
373 {
374 	struct dpu_rm_hw_blk *lm[MAX_BLOCKS];
375 	struct dpu_rm_hw_blk *pp[MAX_BLOCKS];
376 	struct dpu_rm_hw_iter iter_i, iter_j;
377 	int lm_count = 0;
378 	int i, rc = 0;
379 
380 	if (!reqs->topology.num_lm) {
381 		DPU_ERROR("invalid number of lm: %d\n", reqs->topology.num_lm);
382 		return -EINVAL;
383 	}
384 
385 	/* Find a primary mixer */
386 	dpu_rm_init_hw_iter(&iter_i, 0, DPU_HW_BLK_LM);
387 	while (lm_count != reqs->topology.num_lm &&
388 			_dpu_rm_get_hw_locked(rm, &iter_i)) {
389 		memset(&lm, 0, sizeof(lm));
390 		memset(&pp, 0, sizeof(pp));
391 
392 		lm_count = 0;
393 		lm[lm_count] = iter_i.blk;
394 
395 		if (!_dpu_rm_check_lm_and_get_connected_blks(
396 				rm, enc_id, reqs, lm[lm_count],
397 				&pp[lm_count], NULL))
398 			continue;
399 
400 		++lm_count;
401 
402 		/* Valid primary mixer found, find matching peers */
403 		dpu_rm_init_hw_iter(&iter_j, 0, DPU_HW_BLK_LM);
404 
405 		while (lm_count != reqs->topology.num_lm &&
406 				_dpu_rm_get_hw_locked(rm, &iter_j)) {
407 			if (iter_i.blk == iter_j.blk)
408 				continue;
409 
410 			if (!_dpu_rm_check_lm_and_get_connected_blks(
411 					rm, enc_id, reqs, iter_j.blk,
412 					&pp[lm_count], iter_i.blk))
413 				continue;
414 
415 			lm[lm_count] = iter_j.blk;
416 			++lm_count;
417 		}
418 	}
419 
420 	if (lm_count != reqs->topology.num_lm) {
421 		DPU_DEBUG("unable to find appropriate mixers\n");
422 		return -ENAVAIL;
423 	}
424 
425 	for (i = 0; i < ARRAY_SIZE(lm); i++) {
426 		if (!lm[i])
427 			break;
428 
429 		lm[i]->enc_id = enc_id;
430 		pp[i]->enc_id = enc_id;
431 
432 		trace_dpu_rm_reserve_lms(lm[i]->id, enc_id, pp[i]->id);
433 	}
434 
435 	return rc;
436 }
437 
438 static int _dpu_rm_reserve_ctls(
439 		struct dpu_rm *rm,
440 		uint32_t enc_id,
441 		const struct msm_display_topology *top)
442 {
443 	struct dpu_rm_hw_blk *ctls[MAX_BLOCKS];
444 	struct dpu_rm_hw_iter iter;
445 	int i = 0, num_ctls = 0;
446 	bool needs_split_display = false;
447 
448 	memset(&ctls, 0, sizeof(ctls));
449 
450 	/* each hw_intf needs its own hw_ctrl to program its control path */
451 	num_ctls = top->num_intf;
452 
453 	needs_split_display = _dpu_rm_needs_split_display(top);
454 
455 	dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_CTL);
456 	while (_dpu_rm_get_hw_locked(rm, &iter)) {
457 		const struct dpu_hw_ctl *ctl = to_dpu_hw_ctl(iter.blk->hw);
458 		unsigned long features = ctl->caps->features;
459 		bool has_split_display;
460 
461 		if (RESERVED_BY_OTHER(iter.blk, enc_id))
462 			continue;
463 
464 		has_split_display = BIT(DPU_CTL_SPLIT_DISPLAY) & features;
465 
466 		DPU_DEBUG("ctl %d caps 0x%lX\n", iter.blk->id, features);
467 
468 		if (needs_split_display != has_split_display)
469 			continue;
470 
471 		ctls[i] = iter.blk;
472 		DPU_DEBUG("ctl %d match\n", iter.blk->id);
473 
474 		if (++i == num_ctls)
475 			break;
476 	}
477 
478 	if (i != num_ctls)
479 		return -ENAVAIL;
480 
481 	for (i = 0; i < ARRAY_SIZE(ctls) && i < num_ctls; i++) {
482 		ctls[i]->enc_id = enc_id;
483 		trace_dpu_rm_reserve_ctls(ctls[i]->id, enc_id);
484 	}
485 
486 	return 0;
487 }
488 
489 static int _dpu_rm_reserve_intf(
490 		struct dpu_rm *rm,
491 		uint32_t enc_id,
492 		uint32_t id,
493 		enum dpu_hw_blk_type type)
494 {
495 	struct dpu_rm_hw_iter iter;
496 	int ret = 0;
497 
498 	/* Find the block entry in the rm, and note the reservation */
499 	dpu_rm_init_hw_iter(&iter, 0, type);
500 	while (_dpu_rm_get_hw_locked(rm, &iter)) {
501 		if (iter.blk->id != id)
502 			continue;
503 
504 		if (RESERVED_BY_OTHER(iter.blk, enc_id)) {
505 			DPU_ERROR("type %d id %d already reserved\n", type, id);
506 			return -ENAVAIL;
507 		}
508 
509 		iter.blk->enc_id = enc_id;
510 		trace_dpu_rm_reserve_intf(iter.blk->id, enc_id);
511 		break;
512 	}
513 
514 	/* Shouldn't happen since intfs are fixed at probe */
515 	if (!iter.hw) {
516 		DPU_ERROR("couldn't find type %d id %d\n", type, id);
517 		return -EINVAL;
518 	}
519 
520 	return ret;
521 }
522 
523 static int _dpu_rm_reserve_intf_related_hw(
524 		struct dpu_rm *rm,
525 		uint32_t enc_id,
526 		struct dpu_encoder_hw_resources *hw_res)
527 {
528 	int i, ret = 0;
529 	u32 id;
530 
531 	for (i = 0; i < ARRAY_SIZE(hw_res->intfs); i++) {
532 		if (hw_res->intfs[i] == INTF_MODE_NONE)
533 			continue;
534 		id = i + INTF_0;
535 		ret = _dpu_rm_reserve_intf(rm, enc_id, id,
536 				DPU_HW_BLK_INTF);
537 		if (ret)
538 			return ret;
539 	}
540 
541 	return ret;
542 }
543 
544 static int _dpu_rm_make_reservation(
545 		struct dpu_rm *rm,
546 		struct drm_encoder *enc,
547 		struct drm_crtc_state *crtc_state,
548 		struct dpu_rm_requirements *reqs)
549 {
550 	int ret;
551 
552 	ret = _dpu_rm_reserve_lms(rm, enc->base.id, reqs);
553 	if (ret) {
554 		DPU_ERROR("unable to find appropriate mixers\n");
555 		return ret;
556 	}
557 
558 	ret = _dpu_rm_reserve_ctls(rm, enc->base.id, &reqs->topology);
559 	if (ret) {
560 		DPU_ERROR("unable to find appropriate CTL\n");
561 		return ret;
562 	}
563 
564 	ret = _dpu_rm_reserve_intf_related_hw(rm, enc->base.id, &reqs->hw_res);
565 	if (ret)
566 		return ret;
567 
568 	return ret;
569 }
570 
571 static int _dpu_rm_populate_requirements(
572 		struct dpu_rm *rm,
573 		struct drm_encoder *enc,
574 		struct drm_crtc_state *crtc_state,
575 		struct dpu_rm_requirements *reqs,
576 		struct msm_display_topology req_topology)
577 {
578 	dpu_encoder_get_hw_resources(enc, &reqs->hw_res);
579 
580 	reqs->topology = req_topology;
581 
582 	DRM_DEBUG_KMS("num_lm: %d num_enc: %d num_intf: %d\n",
583 		      reqs->topology.num_lm, reqs->topology.num_enc,
584 		      reqs->topology.num_intf);
585 
586 	return 0;
587 }
588 
589 static void _dpu_rm_release_reservation(struct dpu_rm *rm, uint32_t enc_id)
590 {
591 	struct dpu_rm_hw_blk *blk;
592 	enum dpu_hw_blk_type type;
593 
594 	for (type = 0; type < DPU_HW_BLK_MAX; type++) {
595 		list_for_each_entry(blk, &rm->hw_blks[type], list) {
596 			if (blk->enc_id == enc_id) {
597 				blk->enc_id = 0;
598 				DPU_DEBUG("rel enc %d %d %d\n", enc_id,
599 					  type, blk->id);
600 			}
601 		}
602 	}
603 }
604 
605 void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc)
606 {
607 	mutex_lock(&rm->rm_lock);
608 
609 	_dpu_rm_release_reservation(rm, enc->base.id);
610 
611 	mutex_unlock(&rm->rm_lock);
612 }
613 
614 int dpu_rm_reserve(
615 		struct dpu_rm *rm,
616 		struct drm_encoder *enc,
617 		struct drm_crtc_state *crtc_state,
618 		struct msm_display_topology topology,
619 		bool test_only)
620 {
621 	struct dpu_rm_requirements reqs;
622 	int ret;
623 
624 	/* Check if this is just a page-flip */
625 	if (!drm_atomic_crtc_needs_modeset(crtc_state))
626 		return 0;
627 
628 	DRM_DEBUG_KMS("reserving hw for enc %d crtc %d test_only %d\n",
629 		      enc->base.id, crtc_state->crtc->base.id, test_only);
630 
631 	mutex_lock(&rm->rm_lock);
632 
633 	ret = _dpu_rm_populate_requirements(rm, enc, crtc_state, &reqs,
634 					    topology);
635 	if (ret) {
636 		DPU_ERROR("failed to populate hw requirements\n");
637 		goto end;
638 	}
639 
640 	ret = _dpu_rm_make_reservation(rm, enc, crtc_state, &reqs);
641 	if (ret) {
642 		DPU_ERROR("failed to reserve hw resources: %d\n", ret);
643 		_dpu_rm_release_reservation(rm, enc->base.id);
644 	} else if (test_only) {
645 		 /* test_only: test the reservation and then undo */
646 		DPU_DEBUG("test_only: discard test [enc: %d]\n",
647 				enc->base.id);
648 		_dpu_rm_release_reservation(rm, enc->base.id);
649 	}
650 
651 end:
652 	mutex_unlock(&rm->rm_lock);
653 
654 	return ret;
655 }
656