1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
3 */
4
5 #include <linux/bitops.h>
6 #include <linux/debugfs.h>
7 #include <linux/slab.h>
8
9 #include "dpu_core_irq.h"
10 #include "dpu_kms.h"
11 #include "dpu_hw_interrupts.h"
12 #include "dpu_hw_util.h"
13 #include "dpu_hw_mdss.h"
14 #include "dpu_trace.h"
15
16 /*
17 * Register offsets in MDSS register file for the interrupt registers
18 * w.r.t. the MDP base
19 */
20 #define MDP_INTF_OFF(intf) (0x6A000 + 0x800 * (intf))
21 #define MDP_INTF_INTR_EN(intf) (MDP_INTF_OFF(intf) + 0x1c0)
22 #define MDP_INTF_INTR_STATUS(intf) (MDP_INTF_OFF(intf) + 0x1c4)
23 #define MDP_INTF_INTR_CLEAR(intf) (MDP_INTF_OFF(intf) + 0x1c8)
24 #define MDP_INTF_TEAR_OFF(intf) (0x6D700 + 0x100 * (intf))
25 #define MDP_INTF_INTR_TEAR_EN(intf) (MDP_INTF_TEAR_OFF(intf) + 0x000)
26 #define MDP_INTF_INTR_TEAR_STATUS(intf) (MDP_INTF_TEAR_OFF(intf) + 0x004)
27 #define MDP_INTF_INTR_TEAR_CLEAR(intf) (MDP_INTF_TEAR_OFF(intf) + 0x008)
28 #define MDP_AD4_OFF(ad4) (0x7C000 + 0x1000 * (ad4))
29 #define MDP_AD4_INTR_EN_OFF(ad4) (MDP_AD4_OFF(ad4) + 0x41c)
30 #define MDP_AD4_INTR_CLEAR_OFF(ad4) (MDP_AD4_OFF(ad4) + 0x424)
31 #define MDP_AD4_INTR_STATUS_OFF(ad4) (MDP_AD4_OFF(ad4) + 0x420)
32 #define MDP_INTF_REV_7xxx_OFF(intf) (0x34000 + 0x1000 * (intf))
33 #define MDP_INTF_REV_7xxx_INTR_EN(intf) (MDP_INTF_REV_7xxx_OFF(intf) + 0x1c0)
34 #define MDP_INTF_REV_7xxx_INTR_STATUS(intf) (MDP_INTF_REV_7xxx_OFF(intf) + 0x1c4)
35 #define MDP_INTF_REV_7xxx_INTR_CLEAR(intf) (MDP_INTF_REV_7xxx_OFF(intf) + 0x1c8)
36 #define MDP_INTF_REV_7xxx_TEAR_OFF(intf) (0x34800 + 0x1000 * (intf))
37 #define MDP_INTF_REV_7xxx_INTR_TEAR_EN(intf) (MDP_INTF_REV_7xxx_TEAR_OFF(intf) + 0x000)
38 #define MDP_INTF_REV_7xxx_INTR_TEAR_STATUS(intf) (MDP_INTF_REV_7xxx_TEAR_OFF(intf) + 0x004)
39 #define MDP_INTF_REV_7xxx_INTR_TEAR_CLEAR(intf) (MDP_INTF_REV_7xxx_TEAR_OFF(intf) + 0x008)
40
41 /**
42 * struct dpu_intr_reg - array of DPU register sets
43 * @clr_off: offset to CLEAR reg
44 * @en_off: offset to ENABLE reg
45 * @status_off: offset to STATUS reg
46 */
47 struct dpu_intr_reg {
48 u32 clr_off;
49 u32 en_off;
50 u32 status_off;
51 };
52
53 /*
54 * dpu_intr_set_legacy - List of DPU interrupt registers for DPU <= 6.x
55 */
56 static const struct dpu_intr_reg dpu_intr_set_legacy[] = {
57 [MDP_SSPP_TOP0_INTR] = {
58 INTR_CLEAR,
59 INTR_EN,
60 INTR_STATUS
61 },
62 [MDP_SSPP_TOP0_INTR2] = {
63 INTR2_CLEAR,
64 INTR2_EN,
65 INTR2_STATUS
66 },
67 [MDP_SSPP_TOP0_HIST_INTR] = {
68 HIST_INTR_CLEAR,
69 HIST_INTR_EN,
70 HIST_INTR_STATUS
71 },
72 [MDP_INTF0_INTR] = {
73 MDP_INTF_INTR_CLEAR(0),
74 MDP_INTF_INTR_EN(0),
75 MDP_INTF_INTR_STATUS(0)
76 },
77 [MDP_INTF1_INTR] = {
78 MDP_INTF_INTR_CLEAR(1),
79 MDP_INTF_INTR_EN(1),
80 MDP_INTF_INTR_STATUS(1)
81 },
82 [MDP_INTF2_INTR] = {
83 MDP_INTF_INTR_CLEAR(2),
84 MDP_INTF_INTR_EN(2),
85 MDP_INTF_INTR_STATUS(2)
86 },
87 [MDP_INTF3_INTR] = {
88 MDP_INTF_INTR_CLEAR(3),
89 MDP_INTF_INTR_EN(3),
90 MDP_INTF_INTR_STATUS(3)
91 },
92 [MDP_INTF4_INTR] = {
93 MDP_INTF_INTR_CLEAR(4),
94 MDP_INTF_INTR_EN(4),
95 MDP_INTF_INTR_STATUS(4)
96 },
97 [MDP_INTF5_INTR] = {
98 MDP_INTF_INTR_CLEAR(5),
99 MDP_INTF_INTR_EN(5),
100 MDP_INTF_INTR_STATUS(5)
101 },
102 [MDP_INTF1_TEAR_INTR] = {
103 MDP_INTF_INTR_TEAR_CLEAR(1),
104 MDP_INTF_INTR_TEAR_EN(1),
105 MDP_INTF_INTR_TEAR_STATUS(1)
106 },
107 [MDP_INTF2_TEAR_INTR] = {
108 MDP_INTF_INTR_TEAR_CLEAR(2),
109 MDP_INTF_INTR_TEAR_EN(2),
110 MDP_INTF_INTR_TEAR_STATUS(2)
111 },
112 [MDP_AD4_0_INTR] = {
113 MDP_AD4_INTR_CLEAR_OFF(0),
114 MDP_AD4_INTR_EN_OFF(0),
115 MDP_AD4_INTR_STATUS_OFF(0),
116 },
117 [MDP_AD4_1_INTR] = {
118 MDP_AD4_INTR_CLEAR_OFF(1),
119 MDP_AD4_INTR_EN_OFF(1),
120 MDP_AD4_INTR_STATUS_OFF(1),
121 },
122 };
123
124 /*
125 * dpu_intr_set_7xxx - List of DPU interrupt registers for DPU >= 7.0
126 */
127 static const struct dpu_intr_reg dpu_intr_set_7xxx[] = {
128 [MDP_SSPP_TOP0_INTR] = {
129 INTR_CLEAR,
130 INTR_EN,
131 INTR_STATUS
132 },
133 [MDP_SSPP_TOP0_INTR2] = {
134 INTR2_CLEAR,
135 INTR2_EN,
136 INTR2_STATUS
137 },
138 [MDP_SSPP_TOP0_HIST_INTR] = {
139 HIST_INTR_CLEAR,
140 HIST_INTR_EN,
141 HIST_INTR_STATUS
142 },
143 [MDP_INTF0_INTR] = {
144 MDP_INTF_REV_7xxx_INTR_CLEAR(0),
145 MDP_INTF_REV_7xxx_INTR_EN(0),
146 MDP_INTF_REV_7xxx_INTR_STATUS(0)
147 },
148 [MDP_INTF1_INTR] = {
149 MDP_INTF_REV_7xxx_INTR_CLEAR(1),
150 MDP_INTF_REV_7xxx_INTR_EN(1),
151 MDP_INTF_REV_7xxx_INTR_STATUS(1)
152 },
153 [MDP_INTF1_TEAR_INTR] = {
154 MDP_INTF_REV_7xxx_INTR_TEAR_CLEAR(1),
155 MDP_INTF_REV_7xxx_INTR_TEAR_EN(1),
156 MDP_INTF_REV_7xxx_INTR_TEAR_STATUS(1)
157 },
158 [MDP_INTF2_INTR] = {
159 MDP_INTF_REV_7xxx_INTR_CLEAR(2),
160 MDP_INTF_REV_7xxx_INTR_EN(2),
161 MDP_INTF_REV_7xxx_INTR_STATUS(2)
162 },
163 [MDP_INTF2_TEAR_INTR] = {
164 MDP_INTF_REV_7xxx_INTR_TEAR_CLEAR(2),
165 MDP_INTF_REV_7xxx_INTR_TEAR_EN(2),
166 MDP_INTF_REV_7xxx_INTR_TEAR_STATUS(2)
167 },
168 [MDP_INTF3_INTR] = {
169 MDP_INTF_REV_7xxx_INTR_CLEAR(3),
170 MDP_INTF_REV_7xxx_INTR_EN(3),
171 MDP_INTF_REV_7xxx_INTR_STATUS(3)
172 },
173 [MDP_INTF4_INTR] = {
174 MDP_INTF_REV_7xxx_INTR_CLEAR(4),
175 MDP_INTF_REV_7xxx_INTR_EN(4),
176 MDP_INTF_REV_7xxx_INTR_STATUS(4)
177 },
178 [MDP_INTF5_INTR] = {
179 MDP_INTF_REV_7xxx_INTR_CLEAR(5),
180 MDP_INTF_REV_7xxx_INTR_EN(5),
181 MDP_INTF_REV_7xxx_INTR_STATUS(5)
182 },
183 [MDP_INTF6_INTR] = {
184 MDP_INTF_REV_7xxx_INTR_CLEAR(6),
185 MDP_INTF_REV_7xxx_INTR_EN(6),
186 MDP_INTF_REV_7xxx_INTR_STATUS(6)
187 },
188 [MDP_INTF7_INTR] = {
189 MDP_INTF_REV_7xxx_INTR_CLEAR(7),
190 MDP_INTF_REV_7xxx_INTR_EN(7),
191 MDP_INTF_REV_7xxx_INTR_STATUS(7)
192 },
193 [MDP_INTF8_INTR] = {
194 MDP_INTF_REV_7xxx_INTR_CLEAR(8),
195 MDP_INTF_REV_7xxx_INTR_EN(8),
196 MDP_INTF_REV_7xxx_INTR_STATUS(8)
197 },
198 };
199
200 #define DPU_IRQ_MASK(irq_idx) (BIT(DPU_IRQ_BIT(irq_idx)))
201
dpu_core_irq_is_valid(int irq_idx)202 static inline bool dpu_core_irq_is_valid(int irq_idx)
203 {
204 return irq_idx >= 0 && irq_idx < DPU_NUM_IRQS;
205 }
206
dpu_core_irq_get_entry(struct dpu_hw_intr * intr,int irq_idx)207 static inline struct dpu_hw_intr_entry *dpu_core_irq_get_entry(struct dpu_hw_intr *intr,
208 int irq_idx)
209 {
210 return &intr->irq_tbl[irq_idx];
211 }
212
213 /**
214 * dpu_core_irq_callback_handler - dispatch core interrupts
215 * @dpu_kms: Pointer to DPU's KMS structure
216 * @irq_idx: interrupt index
217 */
dpu_core_irq_callback_handler(struct dpu_kms * dpu_kms,int irq_idx)218 static void dpu_core_irq_callback_handler(struct dpu_kms *dpu_kms, int irq_idx)
219 {
220 struct dpu_hw_intr_entry *irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, irq_idx);
221
222 VERB("IRQ=[%d, %d]\n", DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
223
224 if (!irq_entry->cb) {
225 DRM_ERROR("no registered cb, IRQ=[%d, %d]\n",
226 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
227 return;
228 }
229
230 atomic_inc(&irq_entry->count);
231
232 /*
233 * Perform registered function callback
234 */
235 irq_entry->cb(irq_entry->arg);
236 }
237
dpu_core_irq(struct msm_kms * kms)238 irqreturn_t dpu_core_irq(struct msm_kms *kms)
239 {
240 struct dpu_kms *dpu_kms = to_dpu_kms(kms);
241 struct dpu_hw_intr *intr = dpu_kms->hw_intr;
242 int reg_idx;
243 int irq_idx;
244 u32 irq_status;
245 u32 enable_mask;
246 int bit;
247 unsigned long irq_flags;
248
249 if (!intr)
250 return IRQ_NONE;
251
252 spin_lock_irqsave(&intr->irq_lock, irq_flags);
253 for (reg_idx = 0; reg_idx < MDP_INTR_MAX; reg_idx++) {
254 if (!test_bit(reg_idx, &intr->irq_mask))
255 continue;
256
257 /* Read interrupt status */
258 irq_status = DPU_REG_READ(&intr->hw, intr->intr_set[reg_idx].status_off);
259
260 /* Read enable mask */
261 enable_mask = DPU_REG_READ(&intr->hw, intr->intr_set[reg_idx].en_off);
262
263 /* and clear the interrupt */
264 if (irq_status)
265 DPU_REG_WRITE(&intr->hw, intr->intr_set[reg_idx].clr_off,
266 irq_status);
267
268 /* Finally update IRQ status based on enable mask */
269 irq_status &= enable_mask;
270
271 if (!irq_status)
272 continue;
273
274 /*
275 * Search through matching intr status.
276 */
277 while ((bit = ffs(irq_status)) != 0) {
278 irq_idx = DPU_IRQ_IDX(reg_idx, bit - 1);
279
280 dpu_core_irq_callback_handler(dpu_kms, irq_idx);
281
282 /*
283 * When callback finish, clear the irq_status
284 * with the matching mask. Once irq_status
285 * is all cleared, the search can be stopped.
286 */
287 irq_status &= ~BIT(bit - 1);
288 }
289 }
290
291 /* ensure register writes go through */
292 wmb();
293
294 spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
295
296 return IRQ_HANDLED;
297 }
298
dpu_hw_intr_enable_irq_locked(struct dpu_hw_intr * intr,int irq_idx)299 static int dpu_hw_intr_enable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
300 {
301 int reg_idx;
302 const struct dpu_intr_reg *reg;
303 const char *dbgstr = NULL;
304 uint32_t cache_irq_mask;
305
306 if (!intr)
307 return -EINVAL;
308
309 if (!dpu_core_irq_is_valid(irq_idx)) {
310 pr_err("invalid IRQ=[%d, %d]\n",
311 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
312 return -EINVAL;
313 }
314
315 /*
316 * The cache_irq_mask and hardware RMW operations needs to be done
317 * under irq_lock and it's the caller's responsibility to ensure that's
318 * held.
319 */
320 assert_spin_locked(&intr->irq_lock);
321
322 reg_idx = DPU_IRQ_REG(irq_idx);
323 reg = &intr->intr_set[reg_idx];
324
325 /* Is this interrupt register supported on the platform */
326 if (WARN_ON(!reg->en_off))
327 return -EINVAL;
328
329 cache_irq_mask = intr->cache_irq_mask[reg_idx];
330 if (cache_irq_mask & DPU_IRQ_MASK(irq_idx)) {
331 dbgstr = "already ";
332 } else {
333 dbgstr = "";
334
335 cache_irq_mask |= DPU_IRQ_MASK(irq_idx);
336 /* Cleaning any pending interrupt */
337 DPU_REG_WRITE(&intr->hw, reg->clr_off, DPU_IRQ_MASK(irq_idx));
338 /* Enabling interrupts with the new mask */
339 DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
340
341 /* ensure register write goes through */
342 wmb();
343
344 intr->cache_irq_mask[reg_idx] = cache_irq_mask;
345 }
346
347 pr_debug("DPU IRQ=[%d, %d] %senabled: MASK:0x%.8lx, CACHE-MASK:0x%.8x\n",
348 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), dbgstr,
349 DPU_IRQ_MASK(irq_idx), cache_irq_mask);
350
351 return 0;
352 }
353
dpu_hw_intr_disable_irq_locked(struct dpu_hw_intr * intr,int irq_idx)354 static int dpu_hw_intr_disable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
355 {
356 int reg_idx;
357 const struct dpu_intr_reg *reg;
358 const char *dbgstr = NULL;
359 uint32_t cache_irq_mask;
360
361 if (!intr)
362 return -EINVAL;
363
364 if (!dpu_core_irq_is_valid(irq_idx)) {
365 pr_err("invalid IRQ=[%d, %d]\n",
366 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
367 return -EINVAL;
368 }
369
370 /*
371 * The cache_irq_mask and hardware RMW operations needs to be done
372 * under irq_lock and it's the caller's responsibility to ensure that's
373 * held.
374 */
375 assert_spin_locked(&intr->irq_lock);
376
377 reg_idx = DPU_IRQ_REG(irq_idx);
378 reg = &intr->intr_set[reg_idx];
379
380 cache_irq_mask = intr->cache_irq_mask[reg_idx];
381 if ((cache_irq_mask & DPU_IRQ_MASK(irq_idx)) == 0) {
382 dbgstr = "already ";
383 } else {
384 dbgstr = "";
385
386 cache_irq_mask &= ~DPU_IRQ_MASK(irq_idx);
387 /* Disable interrupts based on the new mask */
388 DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
389 /* Cleaning any pending interrupt */
390 DPU_REG_WRITE(&intr->hw, reg->clr_off, DPU_IRQ_MASK(irq_idx));
391
392 /* ensure register write goes through */
393 wmb();
394
395 intr->cache_irq_mask[reg_idx] = cache_irq_mask;
396 }
397
398 pr_debug("DPU IRQ=[%d, %d] %sdisabled: MASK:0x%.8lx, CACHE-MASK:0x%.8x\n",
399 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), dbgstr,
400 DPU_IRQ_MASK(irq_idx), cache_irq_mask);
401
402 return 0;
403 }
404
dpu_clear_irqs(struct dpu_kms * dpu_kms)405 static void dpu_clear_irqs(struct dpu_kms *dpu_kms)
406 {
407 struct dpu_hw_intr *intr = dpu_kms->hw_intr;
408 int i;
409
410 if (!intr)
411 return;
412
413 for (i = 0; i < MDP_INTR_MAX; i++) {
414 if (test_bit(i, &intr->irq_mask))
415 DPU_REG_WRITE(&intr->hw,
416 intr->intr_set[i].clr_off, 0xffffffff);
417 }
418
419 /* ensure register writes go through */
420 wmb();
421 }
422
dpu_disable_all_irqs(struct dpu_kms * dpu_kms)423 static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms)
424 {
425 struct dpu_hw_intr *intr = dpu_kms->hw_intr;
426 int i;
427
428 if (!intr)
429 return;
430
431 for (i = 0; i < MDP_INTR_MAX; i++) {
432 if (test_bit(i, &intr->irq_mask))
433 DPU_REG_WRITE(&intr->hw,
434 intr->intr_set[i].en_off, 0x00000000);
435 }
436
437 /* ensure register writes go through */
438 wmb();
439 }
440
dpu_core_irq_read(struct dpu_kms * dpu_kms,int irq_idx)441 u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx)
442 {
443 struct dpu_hw_intr *intr = dpu_kms->hw_intr;
444 int reg_idx;
445 unsigned long irq_flags;
446 u32 intr_status;
447
448 if (!intr)
449 return 0;
450
451 if (!dpu_core_irq_is_valid(irq_idx)) {
452 pr_err("invalid IRQ=[%d, %d]\n", DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
453 return 0;
454 }
455
456 spin_lock_irqsave(&intr->irq_lock, irq_flags);
457
458 reg_idx = DPU_IRQ_REG(irq_idx);
459 intr_status = DPU_REG_READ(&intr->hw,
460 intr->intr_set[reg_idx].status_off) &
461 DPU_IRQ_MASK(irq_idx);
462 if (intr_status)
463 DPU_REG_WRITE(&intr->hw, intr->intr_set[reg_idx].clr_off,
464 intr_status);
465
466 /* ensure register writes go through */
467 wmb();
468
469 spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
470
471 return intr_status;
472 }
473
dpu_hw_intr_init(void __iomem * addr,const struct dpu_mdss_cfg * m)474 struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
475 const struct dpu_mdss_cfg *m)
476 {
477 struct dpu_hw_intr *intr;
478 unsigned int i;
479
480 if (!addr || !m)
481 return ERR_PTR(-EINVAL);
482
483 intr = kzalloc(sizeof(*intr), GFP_KERNEL);
484 if (!intr)
485 return ERR_PTR(-ENOMEM);
486
487 if (m->mdss_ver->core_major_ver >= 7)
488 intr->intr_set = dpu_intr_set_7xxx;
489 else
490 intr->intr_set = dpu_intr_set_legacy;
491
492 intr->hw.blk_addr = addr + m->mdp[0].base;
493
494 intr->irq_mask = BIT(MDP_SSPP_TOP0_INTR) |
495 BIT(MDP_SSPP_TOP0_INTR2) |
496 BIT(MDP_SSPP_TOP0_HIST_INTR);
497 for (i = 0; i < m->intf_count; i++) {
498 const struct dpu_intf_cfg *intf = &m->intf[i];
499
500 if (intf->type == INTF_NONE)
501 continue;
502
503 intr->irq_mask |= BIT(MDP_INTFn_INTR(intf->id));
504
505 if (intf->intr_tear_rd_ptr != -1)
506 intr->irq_mask |= BIT(DPU_IRQ_REG(intf->intr_tear_rd_ptr));
507 }
508
509 spin_lock_init(&intr->irq_lock);
510
511 return intr;
512 }
513
dpu_hw_intr_destroy(struct dpu_hw_intr * intr)514 void dpu_hw_intr_destroy(struct dpu_hw_intr *intr)
515 {
516 kfree(intr);
517 }
518
dpu_core_irq_register_callback(struct dpu_kms * dpu_kms,int irq_idx,void (* irq_cb)(void * arg),void * irq_arg)519 int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
520 void (*irq_cb)(void *arg),
521 void *irq_arg)
522 {
523 struct dpu_hw_intr_entry *irq_entry;
524 unsigned long irq_flags;
525 int ret;
526
527 if (!irq_cb) {
528 DPU_ERROR("IRQ=[%d, %d] NULL callback\n",
529 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
530 return -EINVAL;
531 }
532
533 if (!dpu_core_irq_is_valid(irq_idx)) {
534 DPU_ERROR("invalid IRQ=[%d, %d] irq_cb:%ps\n",
535 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), irq_cb);
536 return -EINVAL;
537 }
538
539 VERB("[%pS] IRQ=[%d, %d]\n", __builtin_return_address(0),
540 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
541
542 spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
543
544 irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, irq_idx);
545 if (unlikely(WARN_ON(irq_entry->cb))) {
546 spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
547
548 return -EBUSY;
549 }
550
551 trace_dpu_core_irq_register_callback(irq_idx, irq_cb);
552 irq_entry->arg = irq_arg;
553 irq_entry->cb = irq_cb;
554
555 ret = dpu_hw_intr_enable_irq_locked(
556 dpu_kms->hw_intr,
557 irq_idx);
558 if (ret)
559 DPU_ERROR("Failed/ to enable IRQ=[%d, %d]\n",
560 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
561 spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
562
563 trace_dpu_irq_register_success(irq_idx);
564
565 return 0;
566 }
567
dpu_core_irq_unregister_callback(struct dpu_kms * dpu_kms,int irq_idx)568 int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx)
569 {
570 struct dpu_hw_intr_entry *irq_entry;
571 unsigned long irq_flags;
572 int ret;
573
574 if (!dpu_core_irq_is_valid(irq_idx)) {
575 DPU_ERROR("invalid IRQ=[%d, %d]\n",
576 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
577 return -EINVAL;
578 }
579
580 VERB("[%pS] IRQ=[%d, %d]\n", __builtin_return_address(0),
581 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
582
583 spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
584 trace_dpu_core_irq_unregister_callback(irq_idx);
585
586 ret = dpu_hw_intr_disable_irq_locked(dpu_kms->hw_intr, irq_idx);
587 if (ret)
588 DPU_ERROR("Failed to disable IRQ=[%d, %d]: %d\n",
589 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), ret);
590
591 irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, irq_idx);
592 irq_entry->cb = NULL;
593 irq_entry->arg = NULL;
594
595 spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
596
597 trace_dpu_irq_unregister_success(irq_idx);
598
599 return 0;
600 }
601
602 #ifdef CONFIG_DEBUG_FS
dpu_debugfs_core_irq_show(struct seq_file * s,void * v)603 static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
604 {
605 struct dpu_kms *dpu_kms = s->private;
606 struct dpu_hw_intr_entry *irq_entry;
607 unsigned long irq_flags;
608 int i, irq_count;
609 void *cb;
610
611 for (i = 0; i < DPU_NUM_IRQS; i++) {
612 spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
613 irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, i);
614 irq_count = atomic_read(&irq_entry->count);
615 cb = irq_entry->cb;
616 spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
617
618 if (irq_count || cb)
619 seq_printf(s, "IRQ=[%d, %d] count:%d cb:%ps\n",
620 DPU_IRQ_REG(i), DPU_IRQ_BIT(i), irq_count, cb);
621 }
622
623 return 0;
624 }
625
626 DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_core_irq);
627
dpu_debugfs_core_irq_init(struct dpu_kms * dpu_kms,struct dentry * parent)628 void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
629 struct dentry *parent)
630 {
631 debugfs_create_file("core_irq", 0600, parent, dpu_kms,
632 &dpu_debugfs_core_irq_fops);
633 }
634 #endif
635
dpu_core_irq_preinstall(struct msm_kms * kms)636 void dpu_core_irq_preinstall(struct msm_kms *kms)
637 {
638 struct dpu_kms *dpu_kms = to_dpu_kms(kms);
639 struct dpu_hw_intr_entry *irq_entry;
640 int i;
641
642 pm_runtime_get_sync(&dpu_kms->pdev->dev);
643 dpu_clear_irqs(dpu_kms);
644 dpu_disable_all_irqs(dpu_kms);
645 pm_runtime_put_sync(&dpu_kms->pdev->dev);
646
647 for (i = 0; i < DPU_NUM_IRQS; i++) {
648 irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, i);
649 atomic_set(&irq_entry->count, 0);
650 }
651 }
652
dpu_core_irq_uninstall(struct msm_kms * kms)653 void dpu_core_irq_uninstall(struct msm_kms *kms)
654 {
655 struct dpu_kms *dpu_kms = to_dpu_kms(kms);
656 struct dpu_hw_intr_entry *irq_entry;
657 int i;
658
659 if (!dpu_kms->hw_intr)
660 return;
661
662 pm_runtime_get_sync(&dpu_kms->pdev->dev);
663 for (i = 0; i < DPU_NUM_IRQS; i++) {
664 irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, i);
665 if (irq_entry->cb)
666 DPU_ERROR("IRQ=[%d, %d] still enabled/registered\n",
667 DPU_IRQ_REG(i), DPU_IRQ_BIT(i));
668 }
669
670 dpu_clear_irqs(dpu_kms);
671 dpu_disable_all_irqs(dpu_kms);
672 pm_runtime_put_sync(&dpu_kms->pdev->dev);
673 }
674