1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
3  */
4 
5 #include <linux/bitops.h>
6 #include <linux/debugfs.h>
7 #include <linux/slab.h>
8 
9 #include "dpu_core_irq.h"
10 #include "dpu_kms.h"
11 #include "dpu_hw_interrupts.h"
12 #include "dpu_hw_util.h"
13 #include "dpu_hw_mdss.h"
14 #include "dpu_trace.h"
15 
16 /**
17  * Register offsets in MDSS register file for the interrupt registers
18  * w.r.t. to the MDP base
19  */
20 #define MDP_SSPP_TOP0_OFF		0x0
21 #define MDP_INTF_0_OFF			0x6A000
22 #define MDP_INTF_1_OFF			0x6A800
23 #define MDP_INTF_2_OFF			0x6B000
24 #define MDP_INTF_3_OFF			0x6B800
25 #define MDP_INTF_4_OFF			0x6C000
26 #define MDP_AD4_0_OFF			0x7C000
27 #define MDP_AD4_1_OFF			0x7D000
28 #define MDP_AD4_INTR_EN_OFF		0x41c
29 #define MDP_AD4_INTR_CLEAR_OFF		0x424
30 #define MDP_AD4_INTR_STATUS_OFF		0x420
31 #define MDP_INTF_0_OFF_REV_7xxx             0x34000
32 #define MDP_INTF_1_OFF_REV_7xxx             0x35000
33 #define MDP_INTF_2_OFF_REV_7xxx             0x36000
34 #define MDP_INTF_3_OFF_REV_7xxx             0x37000
35 #define MDP_INTF_4_OFF_REV_7xxx             0x38000
36 #define MDP_INTF_5_OFF_REV_7xxx             0x39000
37 
38 /**
39  * struct dpu_intr_reg - array of DPU register sets
40  * @clr_off:	offset to CLEAR reg
41  * @en_off:	offset to ENABLE reg
42  * @status_off:	offset to STATUS reg
43  */
44 struct dpu_intr_reg {
45 	u32 clr_off;
46 	u32 en_off;
47 	u32 status_off;
48 };
49 
50 /*
51  * struct dpu_intr_reg -  List of DPU interrupt registers
52  *
53  * When making changes be sure to sync with dpu_hw_intr_reg
54  */
55 static const struct dpu_intr_reg dpu_intr_set[] = {
56 	{
57 		MDP_SSPP_TOP0_OFF+INTR_CLEAR,
58 		MDP_SSPP_TOP0_OFF+INTR_EN,
59 		MDP_SSPP_TOP0_OFF+INTR_STATUS
60 	},
61 	{
62 		MDP_SSPP_TOP0_OFF+INTR2_CLEAR,
63 		MDP_SSPP_TOP0_OFF+INTR2_EN,
64 		MDP_SSPP_TOP0_OFF+INTR2_STATUS
65 	},
66 	{
67 		MDP_SSPP_TOP0_OFF+HIST_INTR_CLEAR,
68 		MDP_SSPP_TOP0_OFF+HIST_INTR_EN,
69 		MDP_SSPP_TOP0_OFF+HIST_INTR_STATUS
70 	},
71 	{
72 		MDP_INTF_0_OFF+INTF_INTR_CLEAR,
73 		MDP_INTF_0_OFF+INTF_INTR_EN,
74 		MDP_INTF_0_OFF+INTF_INTR_STATUS
75 	},
76 	{
77 		MDP_INTF_1_OFF+INTF_INTR_CLEAR,
78 		MDP_INTF_1_OFF+INTF_INTR_EN,
79 		MDP_INTF_1_OFF+INTF_INTR_STATUS
80 	},
81 	{
82 		MDP_INTF_2_OFF+INTF_INTR_CLEAR,
83 		MDP_INTF_2_OFF+INTF_INTR_EN,
84 		MDP_INTF_2_OFF+INTF_INTR_STATUS
85 	},
86 	{
87 		MDP_INTF_3_OFF+INTF_INTR_CLEAR,
88 		MDP_INTF_3_OFF+INTF_INTR_EN,
89 		MDP_INTF_3_OFF+INTF_INTR_STATUS
90 	},
91 	{
92 		MDP_INTF_4_OFF+INTF_INTR_CLEAR,
93 		MDP_INTF_4_OFF+INTF_INTR_EN,
94 		MDP_INTF_4_OFF+INTF_INTR_STATUS
95 	},
96 	{
97 		MDP_AD4_0_OFF + MDP_AD4_INTR_CLEAR_OFF,
98 		MDP_AD4_0_OFF + MDP_AD4_INTR_EN_OFF,
99 		MDP_AD4_0_OFF + MDP_AD4_INTR_STATUS_OFF,
100 	},
101 	{
102 		MDP_AD4_1_OFF + MDP_AD4_INTR_CLEAR_OFF,
103 		MDP_AD4_1_OFF + MDP_AD4_INTR_EN_OFF,
104 		MDP_AD4_1_OFF + MDP_AD4_INTR_STATUS_OFF,
105 	},
106 	{
107 		MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_CLEAR,
108 		MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_EN,
109 		MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_STATUS
110 	},
111 	{
112 		MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_CLEAR,
113 		MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_EN,
114 		MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_STATUS
115 	},
116 	{
117 		MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_CLEAR,
118 		MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_EN,
119 		MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_STATUS
120 	},
121 	{
122 		MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_CLEAR,
123 		MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_EN,
124 		MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_STATUS
125 	},
126 	{
127 		MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_CLEAR,
128 		MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_EN,
129 		MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_STATUS
130 	},
131 	{
132 		MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_CLEAR,
133 		MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_EN,
134 		MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_STATUS
135 	},
136 };
137 
138 #define DPU_IRQ_REG(irq_idx)	(irq_idx / 32)
139 #define DPU_IRQ_MASK(irq_idx)	(BIT(irq_idx % 32))
140 
141 /**
142  * dpu_core_irq_callback_handler - dispatch core interrupts
143  * @arg:		private data of callback handler
144  * @irq_idx:		interrupt index
145  */
146 static void dpu_core_irq_callback_handler(struct dpu_kms *dpu_kms, int irq_idx)
147 {
148 	struct dpu_irq_callback *cb;
149 
150 	VERB("irq_idx=%d\n", irq_idx);
151 
152 	if (list_empty(&dpu_kms->hw_intr->irq_cb_tbl[irq_idx]))
153 		DRM_ERROR("no registered cb, idx:%d\n", irq_idx);
154 
155 	atomic_inc(&dpu_kms->hw_intr->irq_counts[irq_idx]);
156 
157 	/*
158 	 * Perform registered function callback
159 	 */
160 	list_for_each_entry(cb, &dpu_kms->hw_intr->irq_cb_tbl[irq_idx], list)
161 		if (cb->func)
162 			cb->func(cb->arg, irq_idx);
163 }
164 
165 irqreturn_t dpu_core_irq(struct dpu_kms *dpu_kms)
166 {
167 	struct dpu_hw_intr *intr = dpu_kms->hw_intr;
168 	int reg_idx;
169 	int irq_idx;
170 	u32 irq_status;
171 	u32 enable_mask;
172 	int bit;
173 	unsigned long irq_flags;
174 
175 	if (!intr)
176 		return IRQ_NONE;
177 
178 	spin_lock_irqsave(&intr->irq_lock, irq_flags);
179 	for (reg_idx = 0; reg_idx < ARRAY_SIZE(dpu_intr_set); reg_idx++) {
180 		if (!test_bit(reg_idx, &intr->irq_mask))
181 			continue;
182 
183 		/* Read interrupt status */
184 		irq_status = DPU_REG_READ(&intr->hw, dpu_intr_set[reg_idx].status_off);
185 
186 		/* Read enable mask */
187 		enable_mask = DPU_REG_READ(&intr->hw, dpu_intr_set[reg_idx].en_off);
188 
189 		/* and clear the interrupt */
190 		if (irq_status)
191 			DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off,
192 				     irq_status);
193 
194 		/* Finally update IRQ status based on enable mask */
195 		irq_status &= enable_mask;
196 
197 		if (!irq_status)
198 			continue;
199 
200 		/*
201 		 * Search through matching intr status.
202 		 */
203 		while ((bit = ffs(irq_status)) != 0) {
204 			irq_idx = DPU_IRQ_IDX(reg_idx, bit - 1);
205 
206 			dpu_core_irq_callback_handler(dpu_kms, irq_idx);
207 
208 			/*
209 			 * When callback finish, clear the irq_status
210 			 * with the matching mask. Once irq_status
211 			 * is all cleared, the search can be stopped.
212 			 */
213 			irq_status &= ~BIT(bit - 1);
214 		}
215 	}
216 
217 	/* ensure register writes go through */
218 	wmb();
219 
220 	spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
221 
222 	return IRQ_HANDLED;
223 }
224 
225 static int dpu_hw_intr_enable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
226 {
227 	int reg_idx;
228 	const struct dpu_intr_reg *reg;
229 	const char *dbgstr = NULL;
230 	uint32_t cache_irq_mask;
231 
232 	if (!intr)
233 		return -EINVAL;
234 
235 	if (irq_idx < 0 || irq_idx >= intr->total_irqs) {
236 		pr_err("invalid IRQ index: [%d]\n", irq_idx);
237 		return -EINVAL;
238 	}
239 
240 	/*
241 	 * The cache_irq_mask and hardware RMW operations needs to be done
242 	 * under irq_lock and it's the caller's responsibility to ensure that's
243 	 * held.
244 	 */
245 	assert_spin_locked(&intr->irq_lock);
246 
247 	reg_idx = DPU_IRQ_REG(irq_idx);
248 	reg = &dpu_intr_set[reg_idx];
249 
250 	cache_irq_mask = intr->cache_irq_mask[reg_idx];
251 	if (cache_irq_mask & DPU_IRQ_MASK(irq_idx)) {
252 		dbgstr = "DPU IRQ already set:";
253 	} else {
254 		dbgstr = "DPU IRQ enabled:";
255 
256 		cache_irq_mask |= DPU_IRQ_MASK(irq_idx);
257 		/* Cleaning any pending interrupt */
258 		DPU_REG_WRITE(&intr->hw, reg->clr_off, DPU_IRQ_MASK(irq_idx));
259 		/* Enabling interrupts with the new mask */
260 		DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
261 
262 		/* ensure register write goes through */
263 		wmb();
264 
265 		intr->cache_irq_mask[reg_idx] = cache_irq_mask;
266 	}
267 
268 	pr_debug("%s MASK:0x%.8lx, CACHE-MASK:0x%.8x\n", dbgstr,
269 			DPU_IRQ_MASK(irq_idx), cache_irq_mask);
270 
271 	return 0;
272 }
273 
274 static int dpu_hw_intr_disable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
275 {
276 	int reg_idx;
277 	const struct dpu_intr_reg *reg;
278 	const char *dbgstr = NULL;
279 	uint32_t cache_irq_mask;
280 
281 	if (!intr)
282 		return -EINVAL;
283 
284 	if (irq_idx < 0 || irq_idx >= intr->total_irqs) {
285 		pr_err("invalid IRQ index: [%d]\n", irq_idx);
286 		return -EINVAL;
287 	}
288 
289 	/*
290 	 * The cache_irq_mask and hardware RMW operations needs to be done
291 	 * under irq_lock and it's the caller's responsibility to ensure that's
292 	 * held.
293 	 */
294 	assert_spin_locked(&intr->irq_lock);
295 
296 	reg_idx = DPU_IRQ_REG(irq_idx);
297 	reg = &dpu_intr_set[reg_idx];
298 
299 	cache_irq_mask = intr->cache_irq_mask[reg_idx];
300 	if ((cache_irq_mask & DPU_IRQ_MASK(irq_idx)) == 0) {
301 		dbgstr = "DPU IRQ is already cleared:";
302 	} else {
303 		dbgstr = "DPU IRQ mask disable:";
304 
305 		cache_irq_mask &= ~DPU_IRQ_MASK(irq_idx);
306 		/* Disable interrupts based on the new mask */
307 		DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
308 		/* Cleaning any pending interrupt */
309 		DPU_REG_WRITE(&intr->hw, reg->clr_off, DPU_IRQ_MASK(irq_idx));
310 
311 		/* ensure register write goes through */
312 		wmb();
313 
314 		intr->cache_irq_mask[reg_idx] = cache_irq_mask;
315 	}
316 
317 	pr_debug("%s MASK:0x%.8lx, CACHE-MASK:0x%.8x\n", dbgstr,
318 			DPU_IRQ_MASK(irq_idx), cache_irq_mask);
319 
320 	return 0;
321 }
322 
323 static void dpu_clear_irqs(struct dpu_kms *dpu_kms)
324 {
325 	struct dpu_hw_intr *intr = dpu_kms->hw_intr;
326 	int i;
327 
328 	if (!intr)
329 		return;
330 
331 	for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) {
332 		if (test_bit(i, &intr->irq_mask))
333 			DPU_REG_WRITE(&intr->hw,
334 					dpu_intr_set[i].clr_off, 0xffffffff);
335 	}
336 
337 	/* ensure register writes go through */
338 	wmb();
339 }
340 
341 static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms)
342 {
343 	struct dpu_hw_intr *intr = dpu_kms->hw_intr;
344 	int i;
345 
346 	if (!intr)
347 		return;
348 
349 	for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) {
350 		if (test_bit(i, &intr->irq_mask))
351 			DPU_REG_WRITE(&intr->hw,
352 					dpu_intr_set[i].en_off, 0x00000000);
353 	}
354 
355 	/* ensure register writes go through */
356 	wmb();
357 }
358 
359 u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx, bool clear)
360 {
361 	struct dpu_hw_intr *intr = dpu_kms->hw_intr;
362 	int reg_idx;
363 	unsigned long irq_flags;
364 	u32 intr_status;
365 
366 	if (!intr)
367 		return 0;
368 
369 	if (irq_idx < 0) {
370 		DPU_ERROR("[%pS] invalid irq_idx=%d\n",
371 				__builtin_return_address(0), irq_idx);
372 		return 0;
373 	}
374 
375 	if (irq_idx < 0 || irq_idx >= intr->total_irqs) {
376 		pr_err("invalid IRQ index: [%d]\n", irq_idx);
377 		return 0;
378 	}
379 
380 	spin_lock_irqsave(&intr->irq_lock, irq_flags);
381 
382 	reg_idx = DPU_IRQ_REG(irq_idx);
383 	intr_status = DPU_REG_READ(&intr->hw,
384 			dpu_intr_set[reg_idx].status_off) &
385 		DPU_IRQ_MASK(irq_idx);
386 	if (intr_status && clear)
387 		DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off,
388 				intr_status);
389 
390 	/* ensure register writes go through */
391 	wmb();
392 
393 	spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
394 
395 	return intr_status;
396 }
397 
398 static void __intr_offset(struct dpu_mdss_cfg *m,
399 		void __iomem *addr, struct dpu_hw_blk_reg_map *hw)
400 {
401 	hw->base_off = addr;
402 	hw->blk_off = m->mdp[0].base;
403 	hw->hwversion = m->hwversion;
404 }
405 
406 struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
407 		struct dpu_mdss_cfg *m)
408 {
409 	struct dpu_hw_intr *intr;
410 
411 	if (!addr || !m)
412 		return ERR_PTR(-EINVAL);
413 
414 	intr = kzalloc(sizeof(*intr), GFP_KERNEL);
415 	if (!intr)
416 		return ERR_PTR(-ENOMEM);
417 
418 	__intr_offset(m, addr, &intr->hw);
419 
420 	intr->total_irqs = ARRAY_SIZE(dpu_intr_set) * 32;
421 
422 	intr->cache_irq_mask = kcalloc(ARRAY_SIZE(dpu_intr_set), sizeof(u32),
423 			GFP_KERNEL);
424 	if (intr->cache_irq_mask == NULL) {
425 		kfree(intr);
426 		return ERR_PTR(-ENOMEM);
427 	}
428 
429 	intr->irq_mask = m->mdss_irqs;
430 
431 	spin_lock_init(&intr->irq_lock);
432 
433 	return intr;
434 }
435 
436 void dpu_hw_intr_destroy(struct dpu_hw_intr *intr)
437 {
438 	if (intr) {
439 		kfree(intr->cache_irq_mask);
440 
441 		kfree(intr->irq_cb_tbl);
442 		kfree(intr->irq_counts);
443 
444 		kfree(intr);
445 	}
446 }
447 
448 int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
449 		struct dpu_irq_callback *register_irq_cb)
450 {
451 	unsigned long irq_flags;
452 
453 	if (!dpu_kms->hw_intr->irq_cb_tbl) {
454 		DPU_ERROR("invalid params\n");
455 		return -EINVAL;
456 	}
457 
458 	if (!register_irq_cb || !register_irq_cb->func) {
459 		DPU_ERROR("invalid irq_cb:%d func:%d\n",
460 				register_irq_cb != NULL,
461 				register_irq_cb ?
462 					register_irq_cb->func != NULL : -1);
463 		return -EINVAL;
464 	}
465 
466 	if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) {
467 		DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
468 		return -EINVAL;
469 	}
470 
471 	VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
472 
473 	spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
474 	trace_dpu_core_irq_register_callback(irq_idx, register_irq_cb);
475 	list_del_init(&register_irq_cb->list);
476 	list_add_tail(&register_irq_cb->list,
477 			&dpu_kms->hw_intr->irq_cb_tbl[irq_idx]);
478 	if (list_is_first(&register_irq_cb->list,
479 			&dpu_kms->hw_intr->irq_cb_tbl[irq_idx])) {
480 		int ret = dpu_hw_intr_enable_irq_locked(
481 				dpu_kms->hw_intr,
482 				irq_idx);
483 		if (ret)
484 			DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n",
485 					irq_idx);
486 	}
487 	spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
488 
489 	return 0;
490 }
491 
492 int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx,
493 		struct dpu_irq_callback *register_irq_cb)
494 {
495 	unsigned long irq_flags;
496 
497 	if (!dpu_kms->hw_intr->irq_cb_tbl) {
498 		DPU_ERROR("invalid params\n");
499 		return -EINVAL;
500 	}
501 
502 	if (!register_irq_cb || !register_irq_cb->func) {
503 		DPU_ERROR("invalid irq_cb:%d func:%d\n",
504 				register_irq_cb != NULL,
505 				register_irq_cb ?
506 					register_irq_cb->func != NULL : -1);
507 		return -EINVAL;
508 	}
509 
510 	if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) {
511 		DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
512 		return -EINVAL;
513 	}
514 
515 	VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
516 
517 	spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
518 	trace_dpu_core_irq_unregister_callback(irq_idx, register_irq_cb);
519 	list_del_init(&register_irq_cb->list);
520 	/* empty callback list but interrupt is still enabled */
521 	if (list_empty(&dpu_kms->hw_intr->irq_cb_tbl[irq_idx])) {
522 		int ret = dpu_hw_intr_disable_irq_locked(
523 				dpu_kms->hw_intr,
524 				irq_idx);
525 		if (ret)
526 			DPU_ERROR("Fail to disable IRQ for irq_idx:%d\n",
527 					irq_idx);
528 		VERB("irq_idx=%d ret=%d\n", irq_idx, ret);
529 	}
530 	spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
531 
532 	return 0;
533 }
534 
535 #ifdef CONFIG_DEBUG_FS
536 static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
537 {
538 	struct dpu_kms *dpu_kms = s->private;
539 	struct dpu_irq_callback *cb;
540 	unsigned long irq_flags;
541 	int i, irq_count, cb_count;
542 
543 	if (WARN_ON(!dpu_kms->hw_intr->irq_cb_tbl))
544 		return 0;
545 
546 	for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++) {
547 		spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
548 		cb_count = 0;
549 		irq_count = atomic_read(&dpu_kms->hw_intr->irq_counts[i]);
550 		list_for_each_entry(cb, &dpu_kms->hw_intr->irq_cb_tbl[i], list)
551 			cb_count++;
552 		spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
553 
554 		if (irq_count || cb_count)
555 			seq_printf(s, "idx:%d irq:%d cb:%d\n",
556 					i, irq_count, cb_count);
557 	}
558 
559 	return 0;
560 }
561 
562 DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_core_irq);
563 
564 void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
565 		struct dentry *parent)
566 {
567 	debugfs_create_file("core_irq", 0600, parent, dpu_kms,
568 		&dpu_debugfs_core_irq_fops);
569 }
570 #endif
571 
572 void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms)
573 {
574 	int i;
575 
576 	pm_runtime_get_sync(&dpu_kms->pdev->dev);
577 	dpu_clear_irqs(dpu_kms);
578 	dpu_disable_all_irqs(dpu_kms);
579 	pm_runtime_put_sync(&dpu_kms->pdev->dev);
580 
581 	/* Create irq callbacks for all possible irq_idx */
582 	dpu_kms->hw_intr->irq_cb_tbl = kcalloc(dpu_kms->hw_intr->total_irqs,
583 			sizeof(struct list_head), GFP_KERNEL);
584 	dpu_kms->hw_intr->irq_counts = kcalloc(dpu_kms->hw_intr->total_irqs,
585 			sizeof(atomic_t), GFP_KERNEL);
586 	for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++) {
587 		INIT_LIST_HEAD(&dpu_kms->hw_intr->irq_cb_tbl[i]);
588 		atomic_set(&dpu_kms->hw_intr->irq_counts[i], 0);
589 	}
590 }
591 
592 void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms)
593 {
594 	int i;
595 
596 	pm_runtime_get_sync(&dpu_kms->pdev->dev);
597 	for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++)
598 		if (!list_empty(&dpu_kms->hw_intr->irq_cb_tbl[i]))
599 			DPU_ERROR("irq_idx=%d still enabled/registered\n", i);
600 
601 	dpu_clear_irqs(dpu_kms);
602 	dpu_disable_all_irqs(dpu_kms);
603 	pm_runtime_put_sync(&dpu_kms->pdev->dev);
604 }
605