xref: /openbmc/linux/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c (revision a2818ee4)
1 /*
2  * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 and
6  * only version 2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  */
14 
15 #define pr_fmt(fmt)	"[drm:%s] " fmt, __func__
16 #include "dpu_kms.h"
17 #include "dpu_hw_lm.h"
18 #include "dpu_hw_ctl.h"
19 #include "dpu_hw_pingpong.h"
20 #include "dpu_hw_intf.h"
21 #include "dpu_encoder.h"
22 #include "dpu_trace.h"
23 
24 #define RESERVED_BY_OTHER(h, r) \
25 	((h)->rsvp && ((h)->rsvp->enc_id != (r)->enc_id))
26 
27 /**
28  * struct dpu_rm_requirements - Reservation requirements parameter bundle
29  * @topology:  selected topology for the display
30  * @hw_res:	   Hardware resources required as reported by the encoders
31  */
32 struct dpu_rm_requirements {
33 	struct msm_display_topology topology;
34 	struct dpu_encoder_hw_resources hw_res;
35 };
36 
37 /**
38  * struct dpu_rm_rsvp - Use Case Reservation tagging structure
39  *	Used to tag HW blocks as reserved by a CRTC->Encoder->Connector chain
40  *	By using as a tag, rather than lists of pointers to HW blocks used
41  *	we can avoid some list management since we don't know how many blocks
42  *	of each type a given use case may require.
43  * @list:	List head for list of all reservations
44  * @seq:	Global RSVP sequence number for debugging, especially for
45  *		differentiating differenct allocations for same encoder.
46  * @enc_id:	Reservations are tracked by Encoder DRM object ID.
47  *		CRTCs may be connected to multiple Encoders.
48  *		An encoder or connector id identifies the display path.
49  */
50 struct dpu_rm_rsvp {
51 	struct list_head list;
52 	uint32_t seq;
53 	uint32_t enc_id;
54 };
55 
56 /**
57  * struct dpu_rm_hw_blk - hardware block tracking list member
58  * @list:	List head for list of all hardware blocks tracking items
59  * @rsvp:	Pointer to use case reservation if reserved by a client
60  * @rsvp_nxt:	Temporary pointer used during reservation to the incoming
61  *		request. Will be swapped into rsvp if proposal is accepted
62  * @type:	Type of hardware block this structure tracks
63  * @id:		Hardware ID number, within it's own space, ie. LM_X
64  * @catalog:	Pointer to the hardware catalog entry for this block
65  * @hw:		Pointer to the hardware register access object for this block
66  */
67 struct dpu_rm_hw_blk {
68 	struct list_head list;
69 	struct dpu_rm_rsvp *rsvp;
70 	struct dpu_rm_rsvp *rsvp_nxt;
71 	enum dpu_hw_blk_type type;
72 	uint32_t id;
73 	struct dpu_hw_blk *hw;
74 };
75 
76 /**
77  * dpu_rm_dbg_rsvp_stage - enum of steps in making reservation for event logging
78  */
79 enum dpu_rm_dbg_rsvp_stage {
80 	DPU_RM_STAGE_BEGIN,
81 	DPU_RM_STAGE_AFTER_CLEAR,
82 	DPU_RM_STAGE_AFTER_RSVPNEXT,
83 	DPU_RM_STAGE_FINAL
84 };
85 
86 static void _dpu_rm_print_rsvps(
87 		struct dpu_rm *rm,
88 		enum dpu_rm_dbg_rsvp_stage stage)
89 {
90 	struct dpu_rm_rsvp *rsvp;
91 	struct dpu_rm_hw_blk *blk;
92 	enum dpu_hw_blk_type type;
93 
94 	DPU_DEBUG("%d\n", stage);
95 
96 	list_for_each_entry(rsvp, &rm->rsvps, list) {
97 		DRM_DEBUG_KMS("%d rsvp[s%ue%u]\n", stage, rsvp->seq,
98 			      rsvp->enc_id);
99 	}
100 
101 	for (type = 0; type < DPU_HW_BLK_MAX; type++) {
102 		list_for_each_entry(blk, &rm->hw_blks[type], list) {
103 			if (!blk->rsvp && !blk->rsvp_nxt)
104 				continue;
105 
106 			DRM_DEBUG_KMS("%d rsvp[s%ue%u->s%ue%u] %d %d\n", stage,
107 				(blk->rsvp) ? blk->rsvp->seq : 0,
108 				(blk->rsvp) ? blk->rsvp->enc_id : 0,
109 				(blk->rsvp_nxt) ? blk->rsvp_nxt->seq : 0,
110 				(blk->rsvp_nxt) ? blk->rsvp_nxt->enc_id : 0,
111 				blk->type, blk->id);
112 		}
113 	}
114 }
115 
116 struct dpu_hw_mdp *dpu_rm_get_mdp(struct dpu_rm *rm)
117 {
118 	return rm->hw_mdp;
119 }
120 
121 void dpu_rm_init_hw_iter(
122 		struct dpu_rm_hw_iter *iter,
123 		uint32_t enc_id,
124 		enum dpu_hw_blk_type type)
125 {
126 	memset(iter, 0, sizeof(*iter));
127 	iter->enc_id = enc_id;
128 	iter->type = type;
129 }
130 
131 static bool _dpu_rm_get_hw_locked(struct dpu_rm *rm, struct dpu_rm_hw_iter *i)
132 {
133 	struct list_head *blk_list;
134 
135 	if (!rm || !i || i->type >= DPU_HW_BLK_MAX) {
136 		DPU_ERROR("invalid rm\n");
137 		return false;
138 	}
139 
140 	i->hw = NULL;
141 	blk_list = &rm->hw_blks[i->type];
142 
143 	if (i->blk && (&i->blk->list == blk_list)) {
144 		DPU_DEBUG("attempt resume iteration past last\n");
145 		return false;
146 	}
147 
148 	i->blk = list_prepare_entry(i->blk, blk_list, list);
149 
150 	list_for_each_entry_continue(i->blk, blk_list, list) {
151 		struct dpu_rm_rsvp *rsvp = i->blk->rsvp;
152 
153 		if (i->blk->type != i->type) {
154 			DPU_ERROR("found incorrect block type %d on %d list\n",
155 					i->blk->type, i->type);
156 			return false;
157 		}
158 
159 		if ((i->enc_id == 0) || (rsvp && rsvp->enc_id == i->enc_id)) {
160 			i->hw = i->blk->hw;
161 			DPU_DEBUG("found type %d id %d for enc %d\n",
162 					i->type, i->blk->id, i->enc_id);
163 			return true;
164 		}
165 	}
166 
167 	DPU_DEBUG("no match, type %d for enc %d\n", i->type, i->enc_id);
168 
169 	return false;
170 }
171 
172 bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *i)
173 {
174 	bool ret;
175 
176 	mutex_lock(&rm->rm_lock);
177 	ret = _dpu_rm_get_hw_locked(rm, i);
178 	mutex_unlock(&rm->rm_lock);
179 
180 	return ret;
181 }
182 
183 static void _dpu_rm_hw_destroy(enum dpu_hw_blk_type type, void *hw)
184 {
185 	switch (type) {
186 	case DPU_HW_BLK_LM:
187 		dpu_hw_lm_destroy(hw);
188 		break;
189 	case DPU_HW_BLK_CTL:
190 		dpu_hw_ctl_destroy(hw);
191 		break;
192 	case DPU_HW_BLK_PINGPONG:
193 		dpu_hw_pingpong_destroy(hw);
194 		break;
195 	case DPU_HW_BLK_INTF:
196 		dpu_hw_intf_destroy(hw);
197 		break;
198 	case DPU_HW_BLK_SSPP:
199 		/* SSPPs are not managed by the resource manager */
200 	case DPU_HW_BLK_TOP:
201 		/* Top is a singleton, not managed in hw_blks list */
202 	case DPU_HW_BLK_MAX:
203 	default:
204 		DPU_ERROR("unsupported block type %d\n", type);
205 		break;
206 	}
207 }
208 
209 int dpu_rm_destroy(struct dpu_rm *rm)
210 {
211 
212 	struct dpu_rm_rsvp *rsvp_cur, *rsvp_nxt;
213 	struct dpu_rm_hw_blk *hw_cur, *hw_nxt;
214 	enum dpu_hw_blk_type type;
215 
216 	if (!rm) {
217 		DPU_ERROR("invalid rm\n");
218 		return -EINVAL;
219 	}
220 
221 	list_for_each_entry_safe(rsvp_cur, rsvp_nxt, &rm->rsvps, list) {
222 		list_del(&rsvp_cur->list);
223 		kfree(rsvp_cur);
224 	}
225 
226 
227 	for (type = 0; type < DPU_HW_BLK_MAX; type++) {
228 		list_for_each_entry_safe(hw_cur, hw_nxt, &rm->hw_blks[type],
229 				list) {
230 			list_del(&hw_cur->list);
231 			_dpu_rm_hw_destroy(hw_cur->type, hw_cur->hw);
232 			kfree(hw_cur);
233 		}
234 	}
235 
236 	dpu_hw_mdp_destroy(rm->hw_mdp);
237 	rm->hw_mdp = NULL;
238 
239 	mutex_destroy(&rm->rm_lock);
240 
241 	return 0;
242 }
243 
244 static int _dpu_rm_hw_blk_create(
245 		struct dpu_rm *rm,
246 		struct dpu_mdss_cfg *cat,
247 		void __iomem *mmio,
248 		enum dpu_hw_blk_type type,
249 		uint32_t id,
250 		void *hw_catalog_info)
251 {
252 	struct dpu_rm_hw_blk *blk;
253 	struct dpu_hw_mdp *hw_mdp;
254 	void *hw;
255 
256 	hw_mdp = rm->hw_mdp;
257 
258 	switch (type) {
259 	case DPU_HW_BLK_LM:
260 		hw = dpu_hw_lm_init(id, mmio, cat);
261 		break;
262 	case DPU_HW_BLK_CTL:
263 		hw = dpu_hw_ctl_init(id, mmio, cat);
264 		break;
265 	case DPU_HW_BLK_PINGPONG:
266 		hw = dpu_hw_pingpong_init(id, mmio, cat);
267 		break;
268 	case DPU_HW_BLK_INTF:
269 		hw = dpu_hw_intf_init(id, mmio, cat);
270 		break;
271 	case DPU_HW_BLK_SSPP:
272 		/* SSPPs are not managed by the resource manager */
273 	case DPU_HW_BLK_TOP:
274 		/* Top is a singleton, not managed in hw_blks list */
275 	case DPU_HW_BLK_MAX:
276 	default:
277 		DPU_ERROR("unsupported block type %d\n", type);
278 		return -EINVAL;
279 	}
280 
281 	if (IS_ERR_OR_NULL(hw)) {
282 		DPU_ERROR("failed hw object creation: type %d, err %ld\n",
283 				type, PTR_ERR(hw));
284 		return -EFAULT;
285 	}
286 
287 	blk = kzalloc(sizeof(*blk), GFP_KERNEL);
288 	if (!blk) {
289 		_dpu_rm_hw_destroy(type, hw);
290 		return -ENOMEM;
291 	}
292 
293 	blk->type = type;
294 	blk->id = id;
295 	blk->hw = hw;
296 	list_add_tail(&blk->list, &rm->hw_blks[type]);
297 
298 	return 0;
299 }
300 
301 int dpu_rm_init(struct dpu_rm *rm,
302 		struct dpu_mdss_cfg *cat,
303 		void __iomem *mmio,
304 		struct drm_device *dev)
305 {
306 	int rc, i;
307 	enum dpu_hw_blk_type type;
308 
309 	if (!rm || !cat || !mmio || !dev) {
310 		DPU_ERROR("invalid kms\n");
311 		return -EINVAL;
312 	}
313 
314 	/* Clear, setup lists */
315 	memset(rm, 0, sizeof(*rm));
316 
317 	mutex_init(&rm->rm_lock);
318 
319 	INIT_LIST_HEAD(&rm->rsvps);
320 	for (type = 0; type < DPU_HW_BLK_MAX; type++)
321 		INIT_LIST_HEAD(&rm->hw_blks[type]);
322 
323 	rm->dev = dev;
324 
325 	/* Some of the sub-blocks require an mdptop to be created */
326 	rm->hw_mdp = dpu_hw_mdptop_init(MDP_TOP, mmio, cat);
327 	if (IS_ERR_OR_NULL(rm->hw_mdp)) {
328 		rc = PTR_ERR(rm->hw_mdp);
329 		rm->hw_mdp = NULL;
330 		DPU_ERROR("failed: mdp hw not available\n");
331 		goto fail;
332 	}
333 
334 	/* Interrogate HW catalog and create tracking items for hw blocks */
335 	for (i = 0; i < cat->mixer_count; i++) {
336 		struct dpu_lm_cfg *lm = &cat->mixer[i];
337 
338 		if (lm->pingpong == PINGPONG_MAX) {
339 			DPU_DEBUG("skip mixer %d without pingpong\n", lm->id);
340 			continue;
341 		}
342 
343 		rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_LM,
344 				cat->mixer[i].id, &cat->mixer[i]);
345 		if (rc) {
346 			DPU_ERROR("failed: lm hw not available\n");
347 			goto fail;
348 		}
349 
350 		if (!rm->lm_max_width) {
351 			rm->lm_max_width = lm->sblk->maxwidth;
352 		} else if (rm->lm_max_width != lm->sblk->maxwidth) {
353 			/*
354 			 * Don't expect to have hw where lm max widths differ.
355 			 * If found, take the min.
356 			 */
357 			DPU_ERROR("unsupported: lm maxwidth differs\n");
358 			if (rm->lm_max_width > lm->sblk->maxwidth)
359 				rm->lm_max_width = lm->sblk->maxwidth;
360 		}
361 	}
362 
363 	for (i = 0; i < cat->pingpong_count; i++) {
364 		rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_PINGPONG,
365 				cat->pingpong[i].id, &cat->pingpong[i]);
366 		if (rc) {
367 			DPU_ERROR("failed: pp hw not available\n");
368 			goto fail;
369 		}
370 	}
371 
372 	for (i = 0; i < cat->intf_count; i++) {
373 		if (cat->intf[i].type == INTF_NONE) {
374 			DPU_DEBUG("skip intf %d with type none\n", i);
375 			continue;
376 		}
377 
378 		rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_INTF,
379 				cat->intf[i].id, &cat->intf[i]);
380 		if (rc) {
381 			DPU_ERROR("failed: intf hw not available\n");
382 			goto fail;
383 		}
384 	}
385 
386 	for (i = 0; i < cat->ctl_count; i++) {
387 		rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_CTL,
388 				cat->ctl[i].id, &cat->ctl[i]);
389 		if (rc) {
390 			DPU_ERROR("failed: ctl hw not available\n");
391 			goto fail;
392 		}
393 	}
394 
395 	return 0;
396 
397 fail:
398 	dpu_rm_destroy(rm);
399 
400 	return rc;
401 }
402 
403 static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top)
404 {
405 	return top->num_intf > 1;
406 }
407 
408 /**
409  * _dpu_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets
410  *	proposed use case requirements, incl. hardwired dependent blocks like
411  *	pingpong
412  * @rm: dpu resource manager handle
413  * @rsvp: reservation currently being created
414  * @reqs: proposed use case requirements
415  * @lm: proposed layer mixer, function checks if lm, and all other hardwired
416  *      blocks connected to the lm (pp) is available and appropriate
417  * @pp: output parameter, pingpong block attached to the layer mixer.
418  *      NULL if pp was not available, or not matching requirements.
419  * @primary_lm: if non-null, this function check if lm is compatible primary_lm
420  *              as well as satisfying all other requirements
421  * @Return: true if lm matches all requirements, false otherwise
422  */
423 static bool _dpu_rm_check_lm_and_get_connected_blks(
424 		struct dpu_rm *rm,
425 		struct dpu_rm_rsvp *rsvp,
426 		struct dpu_rm_requirements *reqs,
427 		struct dpu_rm_hw_blk *lm,
428 		struct dpu_rm_hw_blk **pp,
429 		struct dpu_rm_hw_blk *primary_lm)
430 {
431 	const struct dpu_lm_cfg *lm_cfg = to_dpu_hw_mixer(lm->hw)->cap;
432 	struct dpu_rm_hw_iter iter;
433 
434 	*pp = NULL;
435 
436 	DPU_DEBUG("check lm %d pp %d\n",
437 			   lm_cfg->id, lm_cfg->pingpong);
438 
439 	/* Check if this layer mixer is a peer of the proposed primary LM */
440 	if (primary_lm) {
441 		const struct dpu_lm_cfg *prim_lm_cfg =
442 				to_dpu_hw_mixer(primary_lm->hw)->cap;
443 
444 		if (!test_bit(lm_cfg->id, &prim_lm_cfg->lm_pair_mask)) {
445 			DPU_DEBUG("lm %d not peer of lm %d\n", lm_cfg->id,
446 					prim_lm_cfg->id);
447 			return false;
448 		}
449 	}
450 
451 	/* Already reserved? */
452 	if (RESERVED_BY_OTHER(lm, rsvp)) {
453 		DPU_DEBUG("lm %d already reserved\n", lm_cfg->id);
454 		return false;
455 	}
456 
457 	dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_PINGPONG);
458 	while (_dpu_rm_get_hw_locked(rm, &iter)) {
459 		if (iter.blk->id == lm_cfg->pingpong) {
460 			*pp = iter.blk;
461 			break;
462 		}
463 	}
464 
465 	if (!*pp) {
466 		DPU_ERROR("failed to get pp on lm %d\n", lm_cfg->pingpong);
467 		return false;
468 	}
469 
470 	if (RESERVED_BY_OTHER(*pp, rsvp)) {
471 		DPU_DEBUG("lm %d pp %d already reserved\n", lm->id,
472 				(*pp)->id);
473 		return false;
474 	}
475 
476 	return true;
477 }
478 
479 static int _dpu_rm_reserve_lms(
480 		struct dpu_rm *rm,
481 		struct dpu_rm_rsvp *rsvp,
482 		struct dpu_rm_requirements *reqs)
483 
484 {
485 	struct dpu_rm_hw_blk *lm[MAX_BLOCKS];
486 	struct dpu_rm_hw_blk *pp[MAX_BLOCKS];
487 	struct dpu_rm_hw_iter iter_i, iter_j;
488 	int lm_count = 0;
489 	int i, rc = 0;
490 
491 	if (!reqs->topology.num_lm) {
492 		DPU_ERROR("invalid number of lm: %d\n", reqs->topology.num_lm);
493 		return -EINVAL;
494 	}
495 
496 	/* Find a primary mixer */
497 	dpu_rm_init_hw_iter(&iter_i, 0, DPU_HW_BLK_LM);
498 	while (lm_count != reqs->topology.num_lm &&
499 			_dpu_rm_get_hw_locked(rm, &iter_i)) {
500 		memset(&lm, 0, sizeof(lm));
501 		memset(&pp, 0, sizeof(pp));
502 
503 		lm_count = 0;
504 		lm[lm_count] = iter_i.blk;
505 
506 		if (!_dpu_rm_check_lm_and_get_connected_blks(
507 				rm, rsvp, reqs, lm[lm_count],
508 				&pp[lm_count], NULL))
509 			continue;
510 
511 		++lm_count;
512 
513 		/* Valid primary mixer found, find matching peers */
514 		dpu_rm_init_hw_iter(&iter_j, 0, DPU_HW_BLK_LM);
515 
516 		while (lm_count != reqs->topology.num_lm &&
517 				_dpu_rm_get_hw_locked(rm, &iter_j)) {
518 			if (iter_i.blk == iter_j.blk)
519 				continue;
520 
521 			if (!_dpu_rm_check_lm_and_get_connected_blks(
522 					rm, rsvp, reqs, iter_j.blk,
523 					&pp[lm_count], iter_i.blk))
524 				continue;
525 
526 			lm[lm_count] = iter_j.blk;
527 			++lm_count;
528 		}
529 	}
530 
531 	if (lm_count != reqs->topology.num_lm) {
532 		DPU_DEBUG("unable to find appropriate mixers\n");
533 		return -ENAVAIL;
534 	}
535 
536 	for (i = 0; i < ARRAY_SIZE(lm); i++) {
537 		if (!lm[i])
538 			break;
539 
540 		lm[i]->rsvp_nxt = rsvp;
541 		pp[i]->rsvp_nxt = rsvp;
542 
543 		trace_dpu_rm_reserve_lms(lm[i]->id, lm[i]->type, rsvp->enc_id,
544 					 pp[i]->id);
545 	}
546 
547 	return rc;
548 }
549 
550 static int _dpu_rm_reserve_ctls(
551 		struct dpu_rm *rm,
552 		struct dpu_rm_rsvp *rsvp,
553 		const struct msm_display_topology *top)
554 {
555 	struct dpu_rm_hw_blk *ctls[MAX_BLOCKS];
556 	struct dpu_rm_hw_iter iter;
557 	int i = 0, num_ctls = 0;
558 	bool needs_split_display = false;
559 
560 	memset(&ctls, 0, sizeof(ctls));
561 
562 	/* each hw_intf needs its own hw_ctrl to program its control path */
563 	num_ctls = top->num_intf;
564 
565 	needs_split_display = _dpu_rm_needs_split_display(top);
566 
567 	dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_CTL);
568 	while (_dpu_rm_get_hw_locked(rm, &iter)) {
569 		const struct dpu_hw_ctl *ctl = to_dpu_hw_ctl(iter.blk->hw);
570 		unsigned long features = ctl->caps->features;
571 		bool has_split_display;
572 
573 		if (RESERVED_BY_OTHER(iter.blk, rsvp))
574 			continue;
575 
576 		has_split_display = BIT(DPU_CTL_SPLIT_DISPLAY) & features;
577 
578 		DPU_DEBUG("ctl %d caps 0x%lX\n", iter.blk->id, features);
579 
580 		if (needs_split_display != has_split_display)
581 			continue;
582 
583 		ctls[i] = iter.blk;
584 		DPU_DEBUG("ctl %d match\n", iter.blk->id);
585 
586 		if (++i == num_ctls)
587 			break;
588 	}
589 
590 	if (i != num_ctls)
591 		return -ENAVAIL;
592 
593 	for (i = 0; i < ARRAY_SIZE(ctls) && i < num_ctls; i++) {
594 		ctls[i]->rsvp_nxt = rsvp;
595 		trace_dpu_rm_reserve_ctls(ctls[i]->id, ctls[i]->type,
596 					  rsvp->enc_id);
597 	}
598 
599 	return 0;
600 }
601 
602 static int _dpu_rm_reserve_intf(
603 		struct dpu_rm *rm,
604 		struct dpu_rm_rsvp *rsvp,
605 		uint32_t id,
606 		enum dpu_hw_blk_type type)
607 {
608 	struct dpu_rm_hw_iter iter;
609 	int ret = 0;
610 
611 	/* Find the block entry in the rm, and note the reservation */
612 	dpu_rm_init_hw_iter(&iter, 0, type);
613 	while (_dpu_rm_get_hw_locked(rm, &iter)) {
614 		if (iter.blk->id != id)
615 			continue;
616 
617 		if (RESERVED_BY_OTHER(iter.blk, rsvp)) {
618 			DPU_ERROR("type %d id %d already reserved\n", type, id);
619 			return -ENAVAIL;
620 		}
621 
622 		iter.blk->rsvp_nxt = rsvp;
623 		trace_dpu_rm_reserve_intf(iter.blk->id, iter.blk->type,
624 					  rsvp->enc_id);
625 		break;
626 	}
627 
628 	/* Shouldn't happen since intfs are fixed at probe */
629 	if (!iter.hw) {
630 		DPU_ERROR("couldn't find type %d id %d\n", type, id);
631 		return -EINVAL;
632 	}
633 
634 	return ret;
635 }
636 
637 static int _dpu_rm_reserve_intf_related_hw(
638 		struct dpu_rm *rm,
639 		struct dpu_rm_rsvp *rsvp,
640 		struct dpu_encoder_hw_resources *hw_res)
641 {
642 	int i, ret = 0;
643 	u32 id;
644 
645 	for (i = 0; i < ARRAY_SIZE(hw_res->intfs); i++) {
646 		if (hw_res->intfs[i] == INTF_MODE_NONE)
647 			continue;
648 		id = i + INTF_0;
649 		ret = _dpu_rm_reserve_intf(rm, rsvp, id,
650 				DPU_HW_BLK_INTF);
651 		if (ret)
652 			return ret;
653 	}
654 
655 	return ret;
656 }
657 
658 static int _dpu_rm_make_next_rsvp(
659 		struct dpu_rm *rm,
660 		struct drm_encoder *enc,
661 		struct drm_crtc_state *crtc_state,
662 		struct dpu_rm_rsvp *rsvp,
663 		struct dpu_rm_requirements *reqs)
664 {
665 	int ret;
666 
667 	/* Create reservation info, tag reserved blocks with it as we go */
668 	rsvp->seq = ++rm->rsvp_next_seq;
669 	rsvp->enc_id = enc->base.id;
670 	list_add_tail(&rsvp->list, &rm->rsvps);
671 
672 	ret = _dpu_rm_reserve_lms(rm, rsvp, reqs);
673 	if (ret) {
674 		DPU_ERROR("unable to find appropriate mixers\n");
675 		return ret;
676 	}
677 
678 	ret = _dpu_rm_reserve_ctls(rm, rsvp, &reqs->topology);
679 	if (ret) {
680 		DPU_ERROR("unable to find appropriate CTL\n");
681 		return ret;
682 	}
683 
684 	ret = _dpu_rm_reserve_intf_related_hw(rm, rsvp, &reqs->hw_res);
685 	if (ret)
686 		return ret;
687 
688 	return ret;
689 }
690 
691 static int _dpu_rm_populate_requirements(
692 		struct dpu_rm *rm,
693 		struct drm_encoder *enc,
694 		struct drm_crtc_state *crtc_state,
695 		struct dpu_rm_requirements *reqs,
696 		struct msm_display_topology req_topology)
697 {
698 	dpu_encoder_get_hw_resources(enc, &reqs->hw_res);
699 
700 	reqs->topology = req_topology;
701 
702 	DRM_DEBUG_KMS("num_lm: %d num_enc: %d num_intf: %d\n",
703 		      reqs->topology.num_lm, reqs->topology.num_enc,
704 		      reqs->topology.num_intf);
705 
706 	return 0;
707 }
708 
709 static struct dpu_rm_rsvp *_dpu_rm_get_rsvp(
710 		struct dpu_rm *rm,
711 		struct drm_encoder *enc)
712 {
713 	struct dpu_rm_rsvp *i;
714 
715 	if (!rm || !enc) {
716 		DPU_ERROR("invalid params\n");
717 		return NULL;
718 	}
719 
720 	if (list_empty(&rm->rsvps))
721 		return NULL;
722 
723 	list_for_each_entry(i, &rm->rsvps, list)
724 		if (i->enc_id == enc->base.id)
725 			return i;
726 
727 	return NULL;
728 }
729 
730 /**
731  * _dpu_rm_release_rsvp - release resources and release a reservation
732  * @rm:	KMS handle
733  * @rsvp:	RSVP pointer to release and release resources for
734  */
735 static void _dpu_rm_release_rsvp(struct dpu_rm *rm, struct dpu_rm_rsvp *rsvp)
736 {
737 	struct dpu_rm_rsvp *rsvp_c, *rsvp_n;
738 	struct dpu_rm_hw_blk *blk;
739 	enum dpu_hw_blk_type type;
740 
741 	if (!rsvp)
742 		return;
743 
744 	DPU_DEBUG("rel rsvp %d enc %d\n", rsvp->seq, rsvp->enc_id);
745 
746 	list_for_each_entry_safe(rsvp_c, rsvp_n, &rm->rsvps, list) {
747 		if (rsvp == rsvp_c) {
748 			list_del(&rsvp_c->list);
749 			break;
750 		}
751 	}
752 
753 	for (type = 0; type < DPU_HW_BLK_MAX; type++) {
754 		list_for_each_entry(blk, &rm->hw_blks[type], list) {
755 			if (blk->rsvp == rsvp) {
756 				blk->rsvp = NULL;
757 				DPU_DEBUG("rel rsvp %d enc %d %d %d\n",
758 						rsvp->seq, rsvp->enc_id,
759 						blk->type, blk->id);
760 			}
761 			if (blk->rsvp_nxt == rsvp) {
762 				blk->rsvp_nxt = NULL;
763 				DPU_DEBUG("rel rsvp_nxt %d enc %d %d %d\n",
764 						rsvp->seq, rsvp->enc_id,
765 						blk->type, blk->id);
766 			}
767 		}
768 	}
769 
770 	kfree(rsvp);
771 }
772 
773 void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc)
774 {
775 	struct dpu_rm_rsvp *rsvp;
776 
777 	if (!rm || !enc) {
778 		DPU_ERROR("invalid params\n");
779 		return;
780 	}
781 
782 	mutex_lock(&rm->rm_lock);
783 
784 	rsvp = _dpu_rm_get_rsvp(rm, enc);
785 	if (!rsvp) {
786 		DPU_ERROR("failed to find rsvp for enc %d\n", enc->base.id);
787 		goto end;
788 	}
789 
790 	_dpu_rm_release_rsvp(rm, rsvp);
791 end:
792 	mutex_unlock(&rm->rm_lock);
793 }
794 
795 static void _dpu_rm_commit_rsvp(struct dpu_rm *rm, struct dpu_rm_rsvp *rsvp)
796 {
797 	struct dpu_rm_hw_blk *blk;
798 	enum dpu_hw_blk_type type;
799 
800 	/* Swap next rsvp to be the active */
801 	for (type = 0; type < DPU_HW_BLK_MAX; type++) {
802 		list_for_each_entry(blk, &rm->hw_blks[type], list) {
803 			if (blk->rsvp_nxt) {
804 				blk->rsvp = blk->rsvp_nxt;
805 				blk->rsvp_nxt = NULL;
806 			}
807 		}
808 	}
809 }
810 
811 int dpu_rm_reserve(
812 		struct dpu_rm *rm,
813 		struct drm_encoder *enc,
814 		struct drm_crtc_state *crtc_state,
815 		struct msm_display_topology topology,
816 		bool test_only)
817 {
818 	struct dpu_rm_rsvp *rsvp_cur, *rsvp_nxt;
819 	struct dpu_rm_requirements reqs;
820 	int ret;
821 
822 	/* Check if this is just a page-flip */
823 	if (!drm_atomic_crtc_needs_modeset(crtc_state))
824 		return 0;
825 
826 	DRM_DEBUG_KMS("reserving hw for enc %d crtc %d test_only %d\n",
827 		      enc->base.id, crtc_state->crtc->base.id, test_only);
828 
829 	mutex_lock(&rm->rm_lock);
830 
831 	_dpu_rm_print_rsvps(rm, DPU_RM_STAGE_BEGIN);
832 
833 	ret = _dpu_rm_populate_requirements(rm, enc, crtc_state, &reqs,
834 					    topology);
835 	if (ret) {
836 		DPU_ERROR("failed to populate hw requirements\n");
837 		goto end;
838 	}
839 
840 	/*
841 	 * We only support one active reservation per-hw-block. But to implement
842 	 * transactional semantics for test-only, and for allowing failure while
843 	 * modifying your existing reservation, over the course of this
844 	 * function we can have two reservations:
845 	 * Current: Existing reservation
846 	 * Next: Proposed reservation. The proposed reservation may fail, or may
847 	 *       be discarded if in test-only mode.
848 	 * If reservation is successful, and we're not in test-only, then we
849 	 * replace the current with the next.
850 	 */
851 	rsvp_nxt = kzalloc(sizeof(*rsvp_nxt), GFP_KERNEL);
852 	if (!rsvp_nxt) {
853 		ret = -ENOMEM;
854 		goto end;
855 	}
856 
857 	rsvp_cur = _dpu_rm_get_rsvp(rm, enc);
858 
859 	/* Check the proposed reservation, store it in hw's "next" field */
860 	ret = _dpu_rm_make_next_rsvp(rm, enc, crtc_state, rsvp_nxt, &reqs);
861 
862 	_dpu_rm_print_rsvps(rm, DPU_RM_STAGE_AFTER_RSVPNEXT);
863 
864 	if (ret) {
865 		DPU_ERROR("failed to reserve hw resources: %d\n", ret);
866 		_dpu_rm_release_rsvp(rm, rsvp_nxt);
867 	} else if (test_only) {
868 		/*
869 		 * Normally, if test_only, test the reservation and then undo
870 		 * However, if the user requests LOCK, then keep the reservation
871 		 * made during the atomic_check phase.
872 		 */
873 		DPU_DEBUG("test_only: discard test rsvp[s%de%d]\n",
874 				rsvp_nxt->seq, rsvp_nxt->enc_id);
875 		_dpu_rm_release_rsvp(rm, rsvp_nxt);
876 	} else {
877 		_dpu_rm_release_rsvp(rm, rsvp_cur);
878 
879 		_dpu_rm_commit_rsvp(rm, rsvp_nxt);
880 	}
881 
882 	_dpu_rm_print_rsvps(rm, DPU_RM_STAGE_FINAL);
883 
884 end:
885 	mutex_unlock(&rm->rm_lock);
886 
887 	return ret;
888 }
889