1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
3  */
4 
5 #include <linux/bitops.h>
6 #include <linux/debugfs.h>
7 #include <linux/slab.h>
8 
9 #include "dpu_core_irq.h"
10 #include "dpu_kms.h"
11 #include "dpu_hw_interrupts.h"
12 #include "dpu_hw_util.h"
13 #include "dpu_hw_mdss.h"
14 #include "dpu_trace.h"
15 
16 /**
17  * Register offsets in MDSS register file for the interrupt registers
18  * w.r.t. to the MDP base
19  */
20 #define MDP_SSPP_TOP0_OFF		0x0
21 #define MDP_INTF_0_OFF			0x6A000
22 #define MDP_INTF_1_OFF			0x6A800
23 #define MDP_INTF_2_OFF			0x6B000
24 #define MDP_INTF_3_OFF			0x6B800
25 #define MDP_INTF_4_OFF			0x6C000
26 #define MDP_AD4_0_OFF			0x7C000
27 #define MDP_AD4_1_OFF			0x7D000
28 #define MDP_AD4_INTR_EN_OFF		0x41c
29 #define MDP_AD4_INTR_CLEAR_OFF		0x424
30 #define MDP_AD4_INTR_STATUS_OFF		0x420
31 #define MDP_INTF_0_OFF_REV_7xxx             0x34000
32 #define MDP_INTF_1_OFF_REV_7xxx             0x35000
33 #define MDP_INTF_5_OFF_REV_7xxx             0x39000
34 
35 /**
36  * struct dpu_intr_reg - array of DPU register sets
37  * @clr_off:	offset to CLEAR reg
38  * @en_off:	offset to ENABLE reg
39  * @status_off:	offset to STATUS reg
40  */
41 struct dpu_intr_reg {
42 	u32 clr_off;
43 	u32 en_off;
44 	u32 status_off;
45 };
46 
47 /*
48  * struct dpu_intr_reg -  List of DPU interrupt registers
49  *
50  * When making changes be sure to sync with dpu_hw_intr_reg
51  */
52 static const struct dpu_intr_reg dpu_intr_set[] = {
53 	{
54 		MDP_SSPP_TOP0_OFF+INTR_CLEAR,
55 		MDP_SSPP_TOP0_OFF+INTR_EN,
56 		MDP_SSPP_TOP0_OFF+INTR_STATUS
57 	},
58 	{
59 		MDP_SSPP_TOP0_OFF+INTR2_CLEAR,
60 		MDP_SSPP_TOP0_OFF+INTR2_EN,
61 		MDP_SSPP_TOP0_OFF+INTR2_STATUS
62 	},
63 	{
64 		MDP_SSPP_TOP0_OFF+HIST_INTR_CLEAR,
65 		MDP_SSPP_TOP0_OFF+HIST_INTR_EN,
66 		MDP_SSPP_TOP0_OFF+HIST_INTR_STATUS
67 	},
68 	{
69 		MDP_INTF_0_OFF+INTF_INTR_CLEAR,
70 		MDP_INTF_0_OFF+INTF_INTR_EN,
71 		MDP_INTF_0_OFF+INTF_INTR_STATUS
72 	},
73 	{
74 		MDP_INTF_1_OFF+INTF_INTR_CLEAR,
75 		MDP_INTF_1_OFF+INTF_INTR_EN,
76 		MDP_INTF_1_OFF+INTF_INTR_STATUS
77 	},
78 	{
79 		MDP_INTF_2_OFF+INTF_INTR_CLEAR,
80 		MDP_INTF_2_OFF+INTF_INTR_EN,
81 		MDP_INTF_2_OFF+INTF_INTR_STATUS
82 	},
83 	{
84 		MDP_INTF_3_OFF+INTF_INTR_CLEAR,
85 		MDP_INTF_3_OFF+INTF_INTR_EN,
86 		MDP_INTF_3_OFF+INTF_INTR_STATUS
87 	},
88 	{
89 		MDP_INTF_4_OFF+INTF_INTR_CLEAR,
90 		MDP_INTF_4_OFF+INTF_INTR_EN,
91 		MDP_INTF_4_OFF+INTF_INTR_STATUS
92 	},
93 	{
94 		MDP_AD4_0_OFF + MDP_AD4_INTR_CLEAR_OFF,
95 		MDP_AD4_0_OFF + MDP_AD4_INTR_EN_OFF,
96 		MDP_AD4_0_OFF + MDP_AD4_INTR_STATUS_OFF,
97 	},
98 	{
99 		MDP_AD4_1_OFF + MDP_AD4_INTR_CLEAR_OFF,
100 		MDP_AD4_1_OFF + MDP_AD4_INTR_EN_OFF,
101 		MDP_AD4_1_OFF + MDP_AD4_INTR_STATUS_OFF,
102 	},
103 	{
104 		MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_CLEAR,
105 		MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_EN,
106 		MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_STATUS
107 	},
108 	{
109 		MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_CLEAR,
110 		MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_EN,
111 		MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_STATUS
112 	},
113 	{
114 		MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_CLEAR,
115 		MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_EN,
116 		MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_STATUS
117 	},
118 };
119 
120 #define DPU_IRQ_REG(irq_idx)	(irq_idx / 32)
121 #define DPU_IRQ_MASK(irq_idx)	(BIT(irq_idx % 32))
122 
123 /**
124  * dpu_core_irq_callback_handler - dispatch core interrupts
125  * @arg:		private data of callback handler
126  * @irq_idx:		interrupt index
127  */
128 static void dpu_core_irq_callback_handler(struct dpu_kms *dpu_kms, int irq_idx)
129 {
130 	struct dpu_irq_callback *cb;
131 
132 	VERB("irq_idx=%d\n", irq_idx);
133 
134 	if (list_empty(&dpu_kms->hw_intr->irq_cb_tbl[irq_idx]))
135 		DRM_ERROR("no registered cb, idx:%d\n", irq_idx);
136 
137 	atomic_inc(&dpu_kms->hw_intr->irq_counts[irq_idx]);
138 
139 	/*
140 	 * Perform registered function callback
141 	 */
142 	list_for_each_entry(cb, &dpu_kms->hw_intr->irq_cb_tbl[irq_idx], list)
143 		if (cb->func)
144 			cb->func(cb->arg, irq_idx);
145 }
146 
147 irqreturn_t dpu_core_irq(struct dpu_kms *dpu_kms)
148 {
149 	struct dpu_hw_intr *intr = dpu_kms->hw_intr;
150 	int reg_idx;
151 	int irq_idx;
152 	u32 irq_status;
153 	u32 enable_mask;
154 	int bit;
155 	unsigned long irq_flags;
156 
157 	if (!intr)
158 		return IRQ_NONE;
159 
160 	spin_lock_irqsave(&intr->irq_lock, irq_flags);
161 	for (reg_idx = 0; reg_idx < ARRAY_SIZE(dpu_intr_set); reg_idx++) {
162 		if (!test_bit(reg_idx, &intr->irq_mask))
163 			continue;
164 
165 		/* Read interrupt status */
166 		irq_status = DPU_REG_READ(&intr->hw, dpu_intr_set[reg_idx].status_off);
167 
168 		/* Read enable mask */
169 		enable_mask = DPU_REG_READ(&intr->hw, dpu_intr_set[reg_idx].en_off);
170 
171 		/* and clear the interrupt */
172 		if (irq_status)
173 			DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off,
174 				     irq_status);
175 
176 		/* Finally update IRQ status based on enable mask */
177 		irq_status &= enable_mask;
178 
179 		if (!irq_status)
180 			continue;
181 
182 		/*
183 		 * Search through matching intr status.
184 		 */
185 		while ((bit = ffs(irq_status)) != 0) {
186 			irq_idx = DPU_IRQ_IDX(reg_idx, bit - 1);
187 
188 			dpu_core_irq_callback_handler(dpu_kms, irq_idx);
189 
190 			/*
191 			 * When callback finish, clear the irq_status
192 			 * with the matching mask. Once irq_status
193 			 * is all cleared, the search can be stopped.
194 			 */
195 			irq_status &= ~BIT(bit - 1);
196 		}
197 	}
198 
199 	/* ensure register writes go through */
200 	wmb();
201 
202 	spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
203 
204 	return IRQ_HANDLED;
205 }
206 
207 static int dpu_hw_intr_enable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
208 {
209 	int reg_idx;
210 	const struct dpu_intr_reg *reg;
211 	const char *dbgstr = NULL;
212 	uint32_t cache_irq_mask;
213 
214 	if (!intr)
215 		return -EINVAL;
216 
217 	if (irq_idx < 0 || irq_idx >= intr->total_irqs) {
218 		pr_err("invalid IRQ index: [%d]\n", irq_idx);
219 		return -EINVAL;
220 	}
221 
222 	/*
223 	 * The cache_irq_mask and hardware RMW operations needs to be done
224 	 * under irq_lock and it's the caller's responsibility to ensure that's
225 	 * held.
226 	 */
227 	assert_spin_locked(&intr->irq_lock);
228 
229 	reg_idx = DPU_IRQ_REG(irq_idx);
230 	reg = &dpu_intr_set[reg_idx];
231 
232 	cache_irq_mask = intr->cache_irq_mask[reg_idx];
233 	if (cache_irq_mask & DPU_IRQ_MASK(irq_idx)) {
234 		dbgstr = "DPU IRQ already set:";
235 	} else {
236 		dbgstr = "DPU IRQ enabled:";
237 
238 		cache_irq_mask |= DPU_IRQ_MASK(irq_idx);
239 		/* Cleaning any pending interrupt */
240 		DPU_REG_WRITE(&intr->hw, reg->clr_off, DPU_IRQ_MASK(irq_idx));
241 		/* Enabling interrupts with the new mask */
242 		DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
243 
244 		/* ensure register write goes through */
245 		wmb();
246 
247 		intr->cache_irq_mask[reg_idx] = cache_irq_mask;
248 	}
249 
250 	pr_debug("%s MASK:0x%.8lx, CACHE-MASK:0x%.8x\n", dbgstr,
251 			DPU_IRQ_MASK(irq_idx), cache_irq_mask);
252 
253 	return 0;
254 }
255 
256 static int dpu_hw_intr_disable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
257 {
258 	int reg_idx;
259 	const struct dpu_intr_reg *reg;
260 	const char *dbgstr = NULL;
261 	uint32_t cache_irq_mask;
262 
263 	if (!intr)
264 		return -EINVAL;
265 
266 	if (irq_idx < 0 || irq_idx >= intr->total_irqs) {
267 		pr_err("invalid IRQ index: [%d]\n", irq_idx);
268 		return -EINVAL;
269 	}
270 
271 	/*
272 	 * The cache_irq_mask and hardware RMW operations needs to be done
273 	 * under irq_lock and it's the caller's responsibility to ensure that's
274 	 * held.
275 	 */
276 	assert_spin_locked(&intr->irq_lock);
277 
278 	reg_idx = DPU_IRQ_REG(irq_idx);
279 	reg = &dpu_intr_set[reg_idx];
280 
281 	cache_irq_mask = intr->cache_irq_mask[reg_idx];
282 	if ((cache_irq_mask & DPU_IRQ_MASK(irq_idx)) == 0) {
283 		dbgstr = "DPU IRQ is already cleared:";
284 	} else {
285 		dbgstr = "DPU IRQ mask disable:";
286 
287 		cache_irq_mask &= ~DPU_IRQ_MASK(irq_idx);
288 		/* Disable interrupts based on the new mask */
289 		DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
290 		/* Cleaning any pending interrupt */
291 		DPU_REG_WRITE(&intr->hw, reg->clr_off, DPU_IRQ_MASK(irq_idx));
292 
293 		/* ensure register write goes through */
294 		wmb();
295 
296 		intr->cache_irq_mask[reg_idx] = cache_irq_mask;
297 	}
298 
299 	pr_debug("%s MASK:0x%.8lx, CACHE-MASK:0x%.8x\n", dbgstr,
300 			DPU_IRQ_MASK(irq_idx), cache_irq_mask);
301 
302 	return 0;
303 }
304 
305 static void dpu_clear_irqs(struct dpu_kms *dpu_kms)
306 {
307 	struct dpu_hw_intr *intr = dpu_kms->hw_intr;
308 	int i;
309 
310 	if (!intr)
311 		return;
312 
313 	for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) {
314 		if (test_bit(i, &intr->irq_mask))
315 			DPU_REG_WRITE(&intr->hw,
316 					dpu_intr_set[i].clr_off, 0xffffffff);
317 	}
318 
319 	/* ensure register writes go through */
320 	wmb();
321 }
322 
323 static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms)
324 {
325 	struct dpu_hw_intr *intr = dpu_kms->hw_intr;
326 	int i;
327 
328 	if (!intr)
329 		return;
330 
331 	for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) {
332 		if (test_bit(i, &intr->irq_mask))
333 			DPU_REG_WRITE(&intr->hw,
334 					dpu_intr_set[i].en_off, 0x00000000);
335 	}
336 
337 	/* ensure register writes go through */
338 	wmb();
339 }
340 
341 u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx, bool clear)
342 {
343 	struct dpu_hw_intr *intr = dpu_kms->hw_intr;
344 	int reg_idx;
345 	unsigned long irq_flags;
346 	u32 intr_status;
347 
348 	if (!intr)
349 		return 0;
350 
351 	if (irq_idx < 0) {
352 		DPU_ERROR("[%pS] invalid irq_idx=%d\n",
353 				__builtin_return_address(0), irq_idx);
354 		return 0;
355 	}
356 
357 	if (irq_idx < 0 || irq_idx >= intr->total_irqs) {
358 		pr_err("invalid IRQ index: [%d]\n", irq_idx);
359 		return 0;
360 	}
361 
362 	spin_lock_irqsave(&intr->irq_lock, irq_flags);
363 
364 	reg_idx = DPU_IRQ_REG(irq_idx);
365 	intr_status = DPU_REG_READ(&intr->hw,
366 			dpu_intr_set[reg_idx].status_off) &
367 		DPU_IRQ_MASK(irq_idx);
368 	if (intr_status && clear)
369 		DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off,
370 				intr_status);
371 
372 	/* ensure register writes go through */
373 	wmb();
374 
375 	spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
376 
377 	return intr_status;
378 }
379 
380 static void __intr_offset(struct dpu_mdss_cfg *m,
381 		void __iomem *addr, struct dpu_hw_blk_reg_map *hw)
382 {
383 	hw->base_off = addr;
384 	hw->blk_off = m->mdp[0].base;
385 	hw->hwversion = m->hwversion;
386 }
387 
388 struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
389 		struct dpu_mdss_cfg *m)
390 {
391 	struct dpu_hw_intr *intr;
392 
393 	if (!addr || !m)
394 		return ERR_PTR(-EINVAL);
395 
396 	intr = kzalloc(sizeof(*intr), GFP_KERNEL);
397 	if (!intr)
398 		return ERR_PTR(-ENOMEM);
399 
400 	__intr_offset(m, addr, &intr->hw);
401 
402 	intr->total_irqs = ARRAY_SIZE(dpu_intr_set) * 32;
403 
404 	intr->cache_irq_mask = kcalloc(ARRAY_SIZE(dpu_intr_set), sizeof(u32),
405 			GFP_KERNEL);
406 	if (intr->cache_irq_mask == NULL) {
407 		kfree(intr);
408 		return ERR_PTR(-ENOMEM);
409 	}
410 
411 	intr->irq_mask = m->mdss_irqs;
412 
413 	spin_lock_init(&intr->irq_lock);
414 
415 	return intr;
416 }
417 
418 void dpu_hw_intr_destroy(struct dpu_hw_intr *intr)
419 {
420 	if (intr) {
421 		kfree(intr->cache_irq_mask);
422 
423 		kfree(intr->irq_cb_tbl);
424 		kfree(intr->irq_counts);
425 
426 		kfree(intr);
427 	}
428 }
429 
430 int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
431 		struct dpu_irq_callback *register_irq_cb)
432 {
433 	unsigned long irq_flags;
434 
435 	if (!dpu_kms->hw_intr->irq_cb_tbl) {
436 		DPU_ERROR("invalid params\n");
437 		return -EINVAL;
438 	}
439 
440 	if (!register_irq_cb || !register_irq_cb->func) {
441 		DPU_ERROR("invalid irq_cb:%d func:%d\n",
442 				register_irq_cb != NULL,
443 				register_irq_cb ?
444 					register_irq_cb->func != NULL : -1);
445 		return -EINVAL;
446 	}
447 
448 	if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) {
449 		DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
450 		return -EINVAL;
451 	}
452 
453 	VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
454 
455 	spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
456 	trace_dpu_core_irq_register_callback(irq_idx, register_irq_cb);
457 	list_del_init(&register_irq_cb->list);
458 	list_add_tail(&register_irq_cb->list,
459 			&dpu_kms->hw_intr->irq_cb_tbl[irq_idx]);
460 	if (list_is_first(&register_irq_cb->list,
461 			&dpu_kms->hw_intr->irq_cb_tbl[irq_idx])) {
462 		int ret = dpu_hw_intr_enable_irq_locked(
463 				dpu_kms->hw_intr,
464 				irq_idx);
465 		if (ret)
466 			DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n",
467 					irq_idx);
468 	}
469 	spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
470 
471 	return 0;
472 }
473 
474 int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx,
475 		struct dpu_irq_callback *register_irq_cb)
476 {
477 	unsigned long irq_flags;
478 
479 	if (!dpu_kms->hw_intr->irq_cb_tbl) {
480 		DPU_ERROR("invalid params\n");
481 		return -EINVAL;
482 	}
483 
484 	if (!register_irq_cb || !register_irq_cb->func) {
485 		DPU_ERROR("invalid irq_cb:%d func:%d\n",
486 				register_irq_cb != NULL,
487 				register_irq_cb ?
488 					register_irq_cb->func != NULL : -1);
489 		return -EINVAL;
490 	}
491 
492 	if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) {
493 		DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
494 		return -EINVAL;
495 	}
496 
497 	VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
498 
499 	spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
500 	trace_dpu_core_irq_unregister_callback(irq_idx, register_irq_cb);
501 	list_del_init(&register_irq_cb->list);
502 	/* empty callback list but interrupt is still enabled */
503 	if (list_empty(&dpu_kms->hw_intr->irq_cb_tbl[irq_idx])) {
504 		int ret = dpu_hw_intr_disable_irq_locked(
505 				dpu_kms->hw_intr,
506 				irq_idx);
507 		if (ret)
508 			DPU_ERROR("Fail to disable IRQ for irq_idx:%d\n",
509 					irq_idx);
510 		VERB("irq_idx=%d ret=%d\n", irq_idx, ret);
511 	}
512 	spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
513 
514 	return 0;
515 }
516 
517 #ifdef CONFIG_DEBUG_FS
518 static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
519 {
520 	struct dpu_kms *dpu_kms = s->private;
521 	struct dpu_irq_callback *cb;
522 	unsigned long irq_flags;
523 	int i, irq_count, cb_count;
524 
525 	if (WARN_ON(!dpu_kms->hw_intr->irq_cb_tbl))
526 		return 0;
527 
528 	for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++) {
529 		spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
530 		cb_count = 0;
531 		irq_count = atomic_read(&dpu_kms->hw_intr->irq_counts[i]);
532 		list_for_each_entry(cb, &dpu_kms->hw_intr->irq_cb_tbl[i], list)
533 			cb_count++;
534 		spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
535 
536 		if (irq_count || cb_count)
537 			seq_printf(s, "idx:%d irq:%d cb:%d\n",
538 					i, irq_count, cb_count);
539 	}
540 
541 	return 0;
542 }
543 
544 DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_core_irq);
545 
546 void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
547 		struct dentry *parent)
548 {
549 	debugfs_create_file("core_irq", 0600, parent, dpu_kms,
550 		&dpu_debugfs_core_irq_fops);
551 }
552 #endif
553 
554 void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms)
555 {
556 	int i;
557 
558 	pm_runtime_get_sync(&dpu_kms->pdev->dev);
559 	dpu_clear_irqs(dpu_kms);
560 	dpu_disable_all_irqs(dpu_kms);
561 	pm_runtime_put_sync(&dpu_kms->pdev->dev);
562 
563 	/* Create irq callbacks for all possible irq_idx */
564 	dpu_kms->hw_intr->irq_cb_tbl = kcalloc(dpu_kms->hw_intr->total_irqs,
565 			sizeof(struct list_head), GFP_KERNEL);
566 	dpu_kms->hw_intr->irq_counts = kcalloc(dpu_kms->hw_intr->total_irqs,
567 			sizeof(atomic_t), GFP_KERNEL);
568 	for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++) {
569 		INIT_LIST_HEAD(&dpu_kms->hw_intr->irq_cb_tbl[i]);
570 		atomic_set(&dpu_kms->hw_intr->irq_counts[i], 0);
571 	}
572 }
573 
574 void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms)
575 {
576 	int i;
577 
578 	pm_runtime_get_sync(&dpu_kms->pdev->dev);
579 	for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++)
580 		if (!list_empty(&dpu_kms->hw_intr->irq_cb_tbl[i]))
581 			DPU_ERROR("irq_idx=%d still enabled/registered\n", i);
582 
583 	dpu_clear_irqs(dpu_kms);
584 	dpu_disable_all_irqs(dpu_kms);
585 	pm_runtime_put_sync(&dpu_kms->pdev->dev);
586 }
587