1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
3  */
4 
5 #include <linux/bitops.h>
6 #include <linux/debugfs.h>
7 #include <linux/slab.h>
8 
9 #include "dpu_core_irq.h"
10 #include "dpu_kms.h"
11 #include "dpu_hw_interrupts.h"
12 #include "dpu_hw_util.h"
13 #include "dpu_hw_mdss.h"
14 #include "dpu_trace.h"
15 
16 /*
17  * Register offsets in MDSS register file for the interrupt registers
18  * w.r.t. the MDP base
19  */
20 #define MDP_INTF_OFF(intf)				(0x6A000 + 0x800 * (intf))
21 #define MDP_INTF_INTR_EN(intf)				(MDP_INTF_OFF(intf) + 0x1c0)
22 #define MDP_INTF_INTR_STATUS(intf)			(MDP_INTF_OFF(intf) + 0x1c4)
23 #define MDP_INTF_INTR_CLEAR(intf)			(MDP_INTF_OFF(intf) + 0x1c8)
24 #define MDP_INTF_TEAR_OFF(intf)				(0x6D700 + 0x100 * (intf))
25 #define MDP_INTF_INTR_TEAR_EN(intf)			(MDP_INTF_TEAR_OFF(intf) + 0x000)
26 #define MDP_INTF_INTR_TEAR_STATUS(intf)			(MDP_INTF_TEAR_OFF(intf) + 0x004)
27 #define MDP_INTF_INTR_TEAR_CLEAR(intf)			(MDP_INTF_TEAR_OFF(intf) + 0x008)
28 #define MDP_AD4_OFF(ad4)				(0x7C000 + 0x1000 * (ad4))
29 #define MDP_AD4_INTR_EN_OFF(ad4)			(MDP_AD4_OFF(ad4) + 0x41c)
30 #define MDP_AD4_INTR_CLEAR_OFF(ad4)			(MDP_AD4_OFF(ad4) + 0x424)
31 #define MDP_AD4_INTR_STATUS_OFF(ad4)			(MDP_AD4_OFF(ad4) + 0x420)
32 #define MDP_INTF_REV_7xxx_OFF(intf)			(0x34000 + 0x1000 * (intf))
33 #define MDP_INTF_REV_7xxx_INTR_EN(intf)			(MDP_INTF_REV_7xxx_OFF(intf) + 0x1c0)
34 #define MDP_INTF_REV_7xxx_INTR_STATUS(intf)		(MDP_INTF_REV_7xxx_OFF(intf) + 0x1c4)
35 #define MDP_INTF_REV_7xxx_INTR_CLEAR(intf)		(MDP_INTF_REV_7xxx_OFF(intf) + 0x1c8)
36 #define MDP_INTF_REV_7xxx_TEAR_OFF(intf)		(0x34800 + 0x1000 * (intf))
37 #define MDP_INTF_REV_7xxx_INTR_TEAR_EN(intf)		(MDP_INTF_REV_7xxx_TEAR_OFF(intf) + 0x000)
38 #define MDP_INTF_REV_7xxx_INTR_TEAR_STATUS(intf)	(MDP_INTF_REV_7xxx_TEAR_OFF(intf) + 0x004)
39 #define MDP_INTF_REV_7xxx_INTR_TEAR_CLEAR(intf)		(MDP_INTF_REV_7xxx_TEAR_OFF(intf) + 0x008)
40 
41 /**
42  * struct dpu_intr_reg - array of DPU register sets
43  * @clr_off:	offset to CLEAR reg
44  * @en_off:	offset to ENABLE reg
45  * @status_off:	offset to STATUS reg
46  */
47 struct dpu_intr_reg {
48 	u32 clr_off;
49 	u32 en_off;
50 	u32 status_off;
51 };
52 
53 /*
54  * struct dpu_intr_reg -  List of DPU interrupt registers
55  *
56  * When making changes be sure to sync with dpu_hw_intr_reg
57  */
58 static const struct dpu_intr_reg dpu_intr_set[] = {
59 	[MDP_SSPP_TOP0_INTR] = {
60 		INTR_CLEAR,
61 		INTR_EN,
62 		INTR_STATUS
63 	},
64 	[MDP_SSPP_TOP0_INTR2] = {
65 		INTR2_CLEAR,
66 		INTR2_EN,
67 		INTR2_STATUS
68 	},
69 	[MDP_SSPP_TOP0_HIST_INTR] = {
70 		HIST_INTR_CLEAR,
71 		HIST_INTR_EN,
72 		HIST_INTR_STATUS
73 	},
74 	[MDP_INTF0_INTR] = {
75 		MDP_INTF_INTR_CLEAR(0),
76 		MDP_INTF_INTR_EN(0),
77 		MDP_INTF_INTR_STATUS(0)
78 	},
79 	[MDP_INTF1_INTR] = {
80 		MDP_INTF_INTR_CLEAR(1),
81 		MDP_INTF_INTR_EN(1),
82 		MDP_INTF_INTR_STATUS(1)
83 	},
84 	[MDP_INTF2_INTR] = {
85 		MDP_INTF_INTR_CLEAR(2),
86 		MDP_INTF_INTR_EN(2),
87 		MDP_INTF_INTR_STATUS(2)
88 	},
89 	[MDP_INTF3_INTR] = {
90 		MDP_INTF_INTR_CLEAR(3),
91 		MDP_INTF_INTR_EN(3),
92 		MDP_INTF_INTR_STATUS(3)
93 	},
94 	[MDP_INTF4_INTR] = {
95 		MDP_INTF_INTR_CLEAR(4),
96 		MDP_INTF_INTR_EN(4),
97 		MDP_INTF_INTR_STATUS(4)
98 	},
99 	[MDP_INTF5_INTR] = {
100 		MDP_INTF_INTR_CLEAR(5),
101 		MDP_INTF_INTR_EN(5),
102 		MDP_INTF_INTR_STATUS(5)
103 	},
104 	[MDP_INTF1_TEAR_INTR] = {
105 		MDP_INTF_INTR_TEAR_CLEAR(1),
106 		MDP_INTF_INTR_TEAR_EN(1),
107 		MDP_INTF_INTR_TEAR_STATUS(1)
108 	},
109 	[MDP_INTF2_TEAR_INTR] = {
110 		MDP_INTF_INTR_TEAR_CLEAR(2),
111 		MDP_INTF_INTR_TEAR_EN(2),
112 		MDP_INTF_INTR_TEAR_STATUS(2)
113 	},
114 	[MDP_AD4_0_INTR] = {
115 		MDP_AD4_INTR_CLEAR_OFF(0),
116 		MDP_AD4_INTR_EN_OFF(0),
117 		MDP_AD4_INTR_STATUS_OFF(0),
118 	},
119 	[MDP_AD4_1_INTR] = {
120 		MDP_AD4_INTR_CLEAR_OFF(1),
121 		MDP_AD4_INTR_EN_OFF(1),
122 		MDP_AD4_INTR_STATUS_OFF(1),
123 	},
124 	[MDP_INTF0_7xxx_INTR] = {
125 		MDP_INTF_REV_7xxx_INTR_CLEAR(0),
126 		MDP_INTF_REV_7xxx_INTR_EN(0),
127 		MDP_INTF_REV_7xxx_INTR_STATUS(0)
128 	},
129 	[MDP_INTF1_7xxx_INTR] = {
130 		MDP_INTF_REV_7xxx_INTR_CLEAR(1),
131 		MDP_INTF_REV_7xxx_INTR_EN(1),
132 		MDP_INTF_REV_7xxx_INTR_STATUS(1)
133 	},
134 	[MDP_INTF1_7xxx_TEAR_INTR] = {
135 		MDP_INTF_REV_7xxx_INTR_TEAR_CLEAR(1),
136 		MDP_INTF_REV_7xxx_INTR_TEAR_EN(1),
137 		MDP_INTF_REV_7xxx_INTR_TEAR_STATUS(1)
138 	},
139 	[MDP_INTF2_7xxx_INTR] = {
140 		MDP_INTF_REV_7xxx_INTR_CLEAR(2),
141 		MDP_INTF_REV_7xxx_INTR_EN(2),
142 		MDP_INTF_REV_7xxx_INTR_STATUS(2)
143 	},
144 	[MDP_INTF2_7xxx_TEAR_INTR] = {
145 		MDP_INTF_REV_7xxx_INTR_TEAR_CLEAR(2),
146 		MDP_INTF_REV_7xxx_INTR_TEAR_EN(2),
147 		MDP_INTF_REV_7xxx_INTR_TEAR_STATUS(2)
148 	},
149 	[MDP_INTF3_7xxx_INTR] = {
150 		MDP_INTF_REV_7xxx_INTR_CLEAR(3),
151 		MDP_INTF_REV_7xxx_INTR_EN(3),
152 		MDP_INTF_REV_7xxx_INTR_STATUS(3)
153 	},
154 	[MDP_INTF4_7xxx_INTR] = {
155 		MDP_INTF_REV_7xxx_INTR_CLEAR(4),
156 		MDP_INTF_REV_7xxx_INTR_EN(4),
157 		MDP_INTF_REV_7xxx_INTR_STATUS(4)
158 	},
159 	[MDP_INTF5_7xxx_INTR] = {
160 		MDP_INTF_REV_7xxx_INTR_CLEAR(5),
161 		MDP_INTF_REV_7xxx_INTR_EN(5),
162 		MDP_INTF_REV_7xxx_INTR_STATUS(5)
163 	},
164 	[MDP_INTF6_7xxx_INTR] = {
165 		MDP_INTF_REV_7xxx_INTR_CLEAR(6),
166 		MDP_INTF_REV_7xxx_INTR_EN(6),
167 		MDP_INTF_REV_7xxx_INTR_STATUS(6)
168 	},
169 	[MDP_INTF7_7xxx_INTR] = {
170 		MDP_INTF_REV_7xxx_INTR_CLEAR(7),
171 		MDP_INTF_REV_7xxx_INTR_EN(7),
172 		MDP_INTF_REV_7xxx_INTR_STATUS(7)
173 	},
174 	[MDP_INTF8_7xxx_INTR] = {
175 		MDP_INTF_REV_7xxx_INTR_CLEAR(8),
176 		MDP_INTF_REV_7xxx_INTR_EN(8),
177 		MDP_INTF_REV_7xxx_INTR_STATUS(8)
178 	},
179 };
180 
181 #define DPU_IRQ_REG(irq_idx)	(irq_idx / 32)
182 #define DPU_IRQ_MASK(irq_idx)	(BIT(irq_idx % 32))
183 
184 /**
185  * dpu_core_irq_callback_handler - dispatch core interrupts
186  * @dpu_kms:		Pointer to DPU's KMS structure
187  * @irq_idx:		interrupt index
188  */
189 static void dpu_core_irq_callback_handler(struct dpu_kms *dpu_kms, int irq_idx)
190 {
191 	VERB("irq_idx=%d\n", irq_idx);
192 
193 	if (!dpu_kms->hw_intr->irq_tbl[irq_idx].cb)
194 		DRM_ERROR("no registered cb, idx:%d\n", irq_idx);
195 
196 	atomic_inc(&dpu_kms->hw_intr->irq_tbl[irq_idx].count);
197 
198 	/*
199 	 * Perform registered function callback
200 	 */
201 	dpu_kms->hw_intr->irq_tbl[irq_idx].cb(dpu_kms->hw_intr->irq_tbl[irq_idx].arg, irq_idx);
202 }
203 
204 irqreturn_t dpu_core_irq(struct msm_kms *kms)
205 {
206 	struct dpu_kms *dpu_kms = to_dpu_kms(kms);
207 	struct dpu_hw_intr *intr = dpu_kms->hw_intr;
208 	int reg_idx;
209 	int irq_idx;
210 	u32 irq_status;
211 	u32 enable_mask;
212 	int bit;
213 	unsigned long irq_flags;
214 
215 	if (!intr)
216 		return IRQ_NONE;
217 
218 	spin_lock_irqsave(&intr->irq_lock, irq_flags);
219 	for (reg_idx = 0; reg_idx < ARRAY_SIZE(dpu_intr_set); reg_idx++) {
220 		if (!test_bit(reg_idx, &intr->irq_mask))
221 			continue;
222 
223 		/* Read interrupt status */
224 		irq_status = DPU_REG_READ(&intr->hw, dpu_intr_set[reg_idx].status_off);
225 
226 		/* Read enable mask */
227 		enable_mask = DPU_REG_READ(&intr->hw, dpu_intr_set[reg_idx].en_off);
228 
229 		/* and clear the interrupt */
230 		if (irq_status)
231 			DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off,
232 				     irq_status);
233 
234 		/* Finally update IRQ status based on enable mask */
235 		irq_status &= enable_mask;
236 
237 		if (!irq_status)
238 			continue;
239 
240 		/*
241 		 * Search through matching intr status.
242 		 */
243 		while ((bit = ffs(irq_status)) != 0) {
244 			irq_idx = DPU_IRQ_IDX(reg_idx, bit - 1);
245 
246 			dpu_core_irq_callback_handler(dpu_kms, irq_idx);
247 
248 			/*
249 			 * When callback finish, clear the irq_status
250 			 * with the matching mask. Once irq_status
251 			 * is all cleared, the search can be stopped.
252 			 */
253 			irq_status &= ~BIT(bit - 1);
254 		}
255 	}
256 
257 	/* ensure register writes go through */
258 	wmb();
259 
260 	spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
261 
262 	return IRQ_HANDLED;
263 }
264 
265 static int dpu_hw_intr_enable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
266 {
267 	int reg_idx;
268 	const struct dpu_intr_reg *reg;
269 	const char *dbgstr = NULL;
270 	uint32_t cache_irq_mask;
271 
272 	if (!intr)
273 		return -EINVAL;
274 
275 	if (irq_idx < 0 || irq_idx >= intr->total_irqs) {
276 		pr_err("invalid IRQ index: [%d]\n", irq_idx);
277 		return -EINVAL;
278 	}
279 
280 	/*
281 	 * The cache_irq_mask and hardware RMW operations needs to be done
282 	 * under irq_lock and it's the caller's responsibility to ensure that's
283 	 * held.
284 	 */
285 	assert_spin_locked(&intr->irq_lock);
286 
287 	reg_idx = DPU_IRQ_REG(irq_idx);
288 	reg = &dpu_intr_set[reg_idx];
289 
290 	cache_irq_mask = intr->cache_irq_mask[reg_idx];
291 	if (cache_irq_mask & DPU_IRQ_MASK(irq_idx)) {
292 		dbgstr = "already ";
293 	} else {
294 		dbgstr = "";
295 
296 		cache_irq_mask |= DPU_IRQ_MASK(irq_idx);
297 		/* Cleaning any pending interrupt */
298 		DPU_REG_WRITE(&intr->hw, reg->clr_off, DPU_IRQ_MASK(irq_idx));
299 		/* Enabling interrupts with the new mask */
300 		DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
301 
302 		/* ensure register write goes through */
303 		wmb();
304 
305 		intr->cache_irq_mask[reg_idx] = cache_irq_mask;
306 	}
307 
308 	pr_debug("DPU IRQ %d %senabled: MASK:0x%.8lx, CACHE-MASK:0x%.8x\n", irq_idx, dbgstr,
309 			DPU_IRQ_MASK(irq_idx), cache_irq_mask);
310 
311 	return 0;
312 }
313 
314 static int dpu_hw_intr_disable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
315 {
316 	int reg_idx;
317 	const struct dpu_intr_reg *reg;
318 	const char *dbgstr = NULL;
319 	uint32_t cache_irq_mask;
320 
321 	if (!intr)
322 		return -EINVAL;
323 
324 	if (irq_idx < 0 || irq_idx >= intr->total_irqs) {
325 		pr_err("invalid IRQ index: [%d]\n", irq_idx);
326 		return -EINVAL;
327 	}
328 
329 	/*
330 	 * The cache_irq_mask and hardware RMW operations needs to be done
331 	 * under irq_lock and it's the caller's responsibility to ensure that's
332 	 * held.
333 	 */
334 	assert_spin_locked(&intr->irq_lock);
335 
336 	reg_idx = DPU_IRQ_REG(irq_idx);
337 	reg = &dpu_intr_set[reg_idx];
338 
339 	cache_irq_mask = intr->cache_irq_mask[reg_idx];
340 	if ((cache_irq_mask & DPU_IRQ_MASK(irq_idx)) == 0) {
341 		dbgstr = "already ";
342 	} else {
343 		dbgstr = "";
344 
345 		cache_irq_mask &= ~DPU_IRQ_MASK(irq_idx);
346 		/* Disable interrupts based on the new mask */
347 		DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
348 		/* Cleaning any pending interrupt */
349 		DPU_REG_WRITE(&intr->hw, reg->clr_off, DPU_IRQ_MASK(irq_idx));
350 
351 		/* ensure register write goes through */
352 		wmb();
353 
354 		intr->cache_irq_mask[reg_idx] = cache_irq_mask;
355 	}
356 
357 	pr_debug("DPU IRQ %d %sdisabled: MASK:0x%.8lx, CACHE-MASK:0x%.8x\n", irq_idx, dbgstr,
358 			DPU_IRQ_MASK(irq_idx), cache_irq_mask);
359 
360 	return 0;
361 }
362 
363 static void dpu_clear_irqs(struct dpu_kms *dpu_kms)
364 {
365 	struct dpu_hw_intr *intr = dpu_kms->hw_intr;
366 	int i;
367 
368 	if (!intr)
369 		return;
370 
371 	for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) {
372 		if (test_bit(i, &intr->irq_mask))
373 			DPU_REG_WRITE(&intr->hw,
374 					dpu_intr_set[i].clr_off, 0xffffffff);
375 	}
376 
377 	/* ensure register writes go through */
378 	wmb();
379 }
380 
381 static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms)
382 {
383 	struct dpu_hw_intr *intr = dpu_kms->hw_intr;
384 	int i;
385 
386 	if (!intr)
387 		return;
388 
389 	for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) {
390 		if (test_bit(i, &intr->irq_mask))
391 			DPU_REG_WRITE(&intr->hw,
392 					dpu_intr_set[i].en_off, 0x00000000);
393 	}
394 
395 	/* ensure register writes go through */
396 	wmb();
397 }
398 
399 u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx)
400 {
401 	struct dpu_hw_intr *intr = dpu_kms->hw_intr;
402 	int reg_idx;
403 	unsigned long irq_flags;
404 	u32 intr_status;
405 
406 	if (!intr)
407 		return 0;
408 
409 	if (irq_idx < 0) {
410 		DPU_ERROR("[%pS] invalid irq_idx=%d\n",
411 				__builtin_return_address(0), irq_idx);
412 		return 0;
413 	}
414 
415 	if (irq_idx < 0 || irq_idx >= intr->total_irqs) {
416 		pr_err("invalid IRQ index: [%d]\n", irq_idx);
417 		return 0;
418 	}
419 
420 	spin_lock_irqsave(&intr->irq_lock, irq_flags);
421 
422 	reg_idx = DPU_IRQ_REG(irq_idx);
423 	intr_status = DPU_REG_READ(&intr->hw,
424 			dpu_intr_set[reg_idx].status_off) &
425 		DPU_IRQ_MASK(irq_idx);
426 	if (intr_status)
427 		DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off,
428 				intr_status);
429 
430 	/* ensure register writes go through */
431 	wmb();
432 
433 	spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
434 
435 	return intr_status;
436 }
437 
438 static void __intr_offset(const struct dpu_mdss_cfg *m,
439 		void __iomem *addr, struct dpu_hw_blk_reg_map *hw)
440 {
441 	hw->blk_addr = addr + m->mdp[0].base;
442 }
443 
444 struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
445 		const struct dpu_mdss_cfg *m)
446 {
447 	struct dpu_hw_intr *intr;
448 	int nirq = MDP_INTR_MAX * 32;
449 
450 	if (!addr || !m)
451 		return ERR_PTR(-EINVAL);
452 
453 	intr = kzalloc(struct_size(intr, irq_tbl, nirq), GFP_KERNEL);
454 	if (!intr)
455 		return ERR_PTR(-ENOMEM);
456 
457 	__intr_offset(m, addr, &intr->hw);
458 
459 	intr->total_irqs = nirq;
460 
461 	intr->irq_mask = m->mdss_irqs;
462 
463 	spin_lock_init(&intr->irq_lock);
464 
465 	return intr;
466 }
467 
468 void dpu_hw_intr_destroy(struct dpu_hw_intr *intr)
469 {
470 	kfree(intr);
471 }
472 
473 int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
474 		void (*irq_cb)(void *arg, int irq_idx),
475 		void *irq_arg)
476 {
477 	unsigned long irq_flags;
478 	int ret;
479 
480 	if (!irq_cb) {
481 		DPU_ERROR("invalid ird_idx:%d irq_cb:%ps\n", irq_idx, irq_cb);
482 		return -EINVAL;
483 	}
484 
485 	if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) {
486 		DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
487 		return -EINVAL;
488 	}
489 
490 	VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
491 
492 	spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
493 
494 	if (unlikely(WARN_ON(dpu_kms->hw_intr->irq_tbl[irq_idx].cb))) {
495 		spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
496 
497 		return -EBUSY;
498 	}
499 
500 	trace_dpu_core_irq_register_callback(irq_idx, irq_cb);
501 	dpu_kms->hw_intr->irq_tbl[irq_idx].arg = irq_arg;
502 	dpu_kms->hw_intr->irq_tbl[irq_idx].cb = irq_cb;
503 
504 	ret = dpu_hw_intr_enable_irq_locked(
505 				dpu_kms->hw_intr,
506 				irq_idx);
507 	if (ret)
508 		DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n",
509 					irq_idx);
510 	spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
511 
512 	trace_dpu_irq_register_success(irq_idx);
513 
514 	return 0;
515 }
516 
517 int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx)
518 {
519 	unsigned long irq_flags;
520 	int ret;
521 
522 	if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) {
523 		DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
524 		return -EINVAL;
525 	}
526 
527 	VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
528 
529 	spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
530 	trace_dpu_core_irq_unregister_callback(irq_idx);
531 
532 	ret = dpu_hw_intr_disable_irq_locked(dpu_kms->hw_intr, irq_idx);
533 	if (ret)
534 		DPU_ERROR("Fail to disable IRQ for irq_idx:%d: %d\n",
535 					irq_idx, ret);
536 
537 	dpu_kms->hw_intr->irq_tbl[irq_idx].cb = NULL;
538 	dpu_kms->hw_intr->irq_tbl[irq_idx].arg = NULL;
539 
540 	spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
541 
542 	trace_dpu_irq_unregister_success(irq_idx);
543 
544 	return 0;
545 }
546 
547 #ifdef CONFIG_DEBUG_FS
548 static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
549 {
550 	struct dpu_kms *dpu_kms = s->private;
551 	unsigned long irq_flags;
552 	int i, irq_count;
553 	void *cb;
554 
555 	for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++) {
556 		spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
557 		irq_count = atomic_read(&dpu_kms->hw_intr->irq_tbl[i].count);
558 		cb = dpu_kms->hw_intr->irq_tbl[i].cb;
559 		spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
560 
561 		if (irq_count || cb)
562 			seq_printf(s, "idx:%d irq:%d cb:%ps\n", i, irq_count, cb);
563 	}
564 
565 	return 0;
566 }
567 
568 DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_core_irq);
569 
570 void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
571 		struct dentry *parent)
572 {
573 	debugfs_create_file("core_irq", 0600, parent, dpu_kms,
574 		&dpu_debugfs_core_irq_fops);
575 }
576 #endif
577 
578 void dpu_core_irq_preinstall(struct msm_kms *kms)
579 {
580 	struct dpu_kms *dpu_kms = to_dpu_kms(kms);
581 	int i;
582 
583 	pm_runtime_get_sync(&dpu_kms->pdev->dev);
584 	dpu_clear_irqs(dpu_kms);
585 	dpu_disable_all_irqs(dpu_kms);
586 	pm_runtime_put_sync(&dpu_kms->pdev->dev);
587 
588 	for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++)
589 		atomic_set(&dpu_kms->hw_intr->irq_tbl[i].count, 0);
590 }
591 
592 void dpu_core_irq_uninstall(struct msm_kms *kms)
593 {
594 	struct dpu_kms *dpu_kms = to_dpu_kms(kms);
595 	int i;
596 
597 	if (!dpu_kms->hw_intr)
598 		return;
599 
600 	pm_runtime_get_sync(&dpu_kms->pdev->dev);
601 	for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++)
602 		if (dpu_kms->hw_intr->irq_tbl[i].cb)
603 			DPU_ERROR("irq_idx=%d still enabled/registered\n", i);
604 
605 	dpu_clear_irqs(dpu_kms);
606 	dpu_disable_all_irqs(dpu_kms);
607 	pm_runtime_put_sync(&dpu_kms->pdev->dev);
608 }
609