1 // SPDX-License-Identifier:	GPL-2.0
2 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
3 #include <linux/atomic.h>
4 #include <linux/bitfield.h>
5 #include <linux/delay.h>
6 #include <linux/dma-mapping.h>
7 #include <linux/interrupt.h>
8 #include <linux/io.h>
9 #include <linux/iopoll.h>
10 #include <linux/io-pgtable.h>
11 #include <linux/iommu.h>
12 #include <linux/platform_device.h>
13 #include <linux/pm_runtime.h>
14 #include <linux/shmem_fs.h>
15 #include <linux/sizes.h>
16 
17 #include "panfrost_device.h"
18 #include "panfrost_mmu.h"
19 #include "panfrost_gem.h"
20 #include "panfrost_features.h"
21 #include "panfrost_regs.h"
22 
23 #define mmu_write(dev, reg, data) writel(data, dev->iomem + reg)
24 #define mmu_read(dev, reg) readl(dev->iomem + reg)
25 
26 static int wait_ready(struct panfrost_device *pfdev, u32 as_nr)
27 {
28 	int ret;
29 	u32 val;
30 
31 	/* Wait for the MMU status to indicate there is no active command, in
32 	 * case one is pending. */
33 	ret = readl_relaxed_poll_timeout_atomic(pfdev->iomem + AS_STATUS(as_nr),
34 		val, !(val & AS_STATUS_AS_ACTIVE), 10, 1000);
35 
36 	if (ret)
37 		dev_err(pfdev->dev, "AS_ACTIVE bit stuck\n");
38 
39 	return ret;
40 }
41 
42 static int write_cmd(struct panfrost_device *pfdev, u32 as_nr, u32 cmd)
43 {
44 	int status;
45 
46 	/* write AS_COMMAND when MMU is ready to accept another command */
47 	status = wait_ready(pfdev, as_nr);
48 	if (!status)
49 		mmu_write(pfdev, AS_COMMAND(as_nr), cmd);
50 
51 	return status;
52 }
53 
54 static void lock_region(struct panfrost_device *pfdev, u32 as_nr,
55 			u64 iova, size_t size)
56 {
57 	u8 region_width;
58 	u64 region = iova & PAGE_MASK;
59 	/*
60 	 * fls returns:
61 	 * 1 .. 32
62 	 *
63 	 * 10 + fls(num_pages)
64 	 * results in the range (11 .. 42)
65 	 */
66 
67 	size = round_up(size, PAGE_SIZE);
68 
69 	region_width = 10 + fls(size >> PAGE_SHIFT);
70 	if ((size >> PAGE_SHIFT) != (1ul << (region_width - 11))) {
71 		/* not pow2, so must go up to the next pow2 */
72 		region_width += 1;
73 	}
74 	region |= region_width;
75 
76 	/* Lock the region that needs to be updated */
77 	mmu_write(pfdev, AS_LOCKADDR_LO(as_nr), region & 0xFFFFFFFFUL);
78 	mmu_write(pfdev, AS_LOCKADDR_HI(as_nr), (region >> 32) & 0xFFFFFFFFUL);
79 	write_cmd(pfdev, as_nr, AS_COMMAND_LOCK);
80 }
81 
82 
83 static int mmu_hw_do_operation_locked(struct panfrost_device *pfdev, int as_nr,
84 				      u64 iova, size_t size, u32 op)
85 {
86 	if (as_nr < 0)
87 		return 0;
88 
89 	if (op != AS_COMMAND_UNLOCK)
90 		lock_region(pfdev, as_nr, iova, size);
91 
92 	/* Run the MMU operation */
93 	write_cmd(pfdev, as_nr, op);
94 
95 	/* Wait for the flush to complete */
96 	return wait_ready(pfdev, as_nr);
97 }
98 
99 static int mmu_hw_do_operation(struct panfrost_device *pfdev,
100 			       struct panfrost_mmu *mmu,
101 			       u64 iova, size_t size, u32 op)
102 {
103 	int ret;
104 
105 	spin_lock(&pfdev->as_lock);
106 	ret = mmu_hw_do_operation_locked(pfdev, mmu->as, iova, size, op);
107 	spin_unlock(&pfdev->as_lock);
108 	return ret;
109 }
110 
111 static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
112 {
113 	int as_nr = mmu->as;
114 	struct io_pgtable_cfg *cfg = &mmu->pgtbl_cfg;
115 	u64 transtab = cfg->arm_mali_lpae_cfg.transtab;
116 	u64 memattr = cfg->arm_mali_lpae_cfg.memattr;
117 
118 	mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
119 
120 	mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), transtab & 0xffffffffUL);
121 	mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), transtab >> 32);
122 
123 	/* Need to revisit mem attrs.
124 	 * NC is the default, Mali driver is inner WT.
125 	 */
126 	mmu_write(pfdev, AS_MEMATTR_LO(as_nr), memattr & 0xffffffffUL);
127 	mmu_write(pfdev, AS_MEMATTR_HI(as_nr), memattr >> 32);
128 
129 	write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
130 }
131 
132 static void panfrost_mmu_disable(struct panfrost_device *pfdev, u32 as_nr)
133 {
134 	mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
135 
136 	mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), 0);
137 	mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), 0);
138 
139 	mmu_write(pfdev, AS_MEMATTR_LO(as_nr), 0);
140 	mmu_write(pfdev, AS_MEMATTR_HI(as_nr), 0);
141 
142 	write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
143 }
144 
145 u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
146 {
147 	int as;
148 
149 	spin_lock(&pfdev->as_lock);
150 
151 	as = mmu->as;
152 	if (as >= 0) {
153 		int en = atomic_inc_return(&mmu->as_count);
154 		WARN_ON(en >= NUM_JOB_SLOTS);
155 
156 		list_move(&mmu->list, &pfdev->as_lru_list);
157 		goto out;
158 	}
159 
160 	/* Check for a free AS */
161 	as = ffz(pfdev->as_alloc_mask);
162 	if (!(BIT(as) & pfdev->features.as_present)) {
163 		struct panfrost_mmu *lru_mmu;
164 
165 		list_for_each_entry_reverse(lru_mmu, &pfdev->as_lru_list, list) {
166 			if (!atomic_read(&lru_mmu->as_count))
167 				break;
168 		}
169 		WARN_ON(&lru_mmu->list == &pfdev->as_lru_list);
170 
171 		list_del_init(&lru_mmu->list);
172 		as = lru_mmu->as;
173 
174 		WARN_ON(as < 0);
175 		lru_mmu->as = -1;
176 	}
177 
178 	/* Assign the free or reclaimed AS to the FD */
179 	mmu->as = as;
180 	set_bit(as, &pfdev->as_alloc_mask);
181 	atomic_set(&mmu->as_count, 1);
182 	list_add(&mmu->list, &pfdev->as_lru_list);
183 
184 	dev_dbg(pfdev->dev, "Assigned AS%d to mmu %p, alloc_mask=%lx", as, mmu, pfdev->as_alloc_mask);
185 
186 	panfrost_mmu_enable(pfdev, mmu);
187 
188 out:
189 	spin_unlock(&pfdev->as_lock);
190 	return as;
191 }
192 
193 void panfrost_mmu_as_put(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
194 {
195 	atomic_dec(&mmu->as_count);
196 	WARN_ON(atomic_read(&mmu->as_count) < 0);
197 }
198 
199 void panfrost_mmu_reset(struct panfrost_device *pfdev)
200 {
201 	struct panfrost_mmu *mmu, *mmu_tmp;
202 
203 	spin_lock(&pfdev->as_lock);
204 
205 	pfdev->as_alloc_mask = 0;
206 
207 	list_for_each_entry_safe(mmu, mmu_tmp, &pfdev->as_lru_list, list) {
208 		mmu->as = -1;
209 		atomic_set(&mmu->as_count, 0);
210 		list_del_init(&mmu->list);
211 	}
212 
213 	spin_unlock(&pfdev->as_lock);
214 
215 	mmu_write(pfdev, MMU_INT_CLEAR, ~0);
216 	mmu_write(pfdev, MMU_INT_MASK, ~0);
217 }
218 
219 static size_t get_pgsize(u64 addr, size_t size)
220 {
221 	if (addr & (SZ_2M - 1) || size < SZ_2M)
222 		return SZ_4K;
223 
224 	return SZ_2M;
225 }
226 
227 static void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
228 				     struct panfrost_mmu *mmu,
229 				     u64 iova, size_t size)
230 {
231 	if (mmu->as < 0)
232 		return;
233 
234 	pm_runtime_get_noresume(pfdev->dev);
235 
236 	/* Flush the PTs only if we're already awake */
237 	if (pm_runtime_active(pfdev->dev))
238 		mmu_hw_do_operation(pfdev, mmu, iova, size, AS_COMMAND_FLUSH_PT);
239 
240 	pm_runtime_put_sync_autosuspend(pfdev->dev);
241 }
242 
243 static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
244 		      u64 iova, int prot, struct sg_table *sgt)
245 {
246 	unsigned int count;
247 	struct scatterlist *sgl;
248 	struct io_pgtable_ops *ops = mmu->pgtbl_ops;
249 	u64 start_iova = iova;
250 
251 	for_each_sg(sgt->sgl, sgl, sgt->nents, count) {
252 		unsigned long paddr = sg_dma_address(sgl);
253 		size_t len = sg_dma_len(sgl);
254 
255 		dev_dbg(pfdev->dev, "map: as=%d, iova=%llx, paddr=%lx, len=%zx", mmu->as, iova, paddr, len);
256 
257 		while (len) {
258 			size_t pgsize = get_pgsize(iova | paddr, len);
259 
260 			ops->map(ops, iova, paddr, pgsize, prot);
261 			iova += pgsize;
262 			paddr += pgsize;
263 			len -= pgsize;
264 		}
265 	}
266 
267 	panfrost_mmu_flush_range(pfdev, mmu, start_iova, iova - start_iova);
268 
269 	return 0;
270 }
271 
272 int panfrost_mmu_map(struct panfrost_gem_object *bo)
273 {
274 	struct drm_gem_object *obj = &bo->base.base;
275 	struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
276 	struct sg_table *sgt;
277 	int prot = IOMMU_READ | IOMMU_WRITE;
278 
279 	if (WARN_ON(bo->is_mapped))
280 		return 0;
281 
282 	if (bo->noexec)
283 		prot |= IOMMU_NOEXEC;
284 
285 	sgt = drm_gem_shmem_get_pages_sgt(obj);
286 	if (WARN_ON(IS_ERR(sgt)))
287 		return PTR_ERR(sgt);
288 
289 	mmu_map_sg(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, prot, sgt);
290 	bo->is_mapped = true;
291 
292 	return 0;
293 }
294 
295 void panfrost_mmu_unmap(struct panfrost_gem_object *bo)
296 {
297 	struct drm_gem_object *obj = &bo->base.base;
298 	struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
299 	struct io_pgtable_ops *ops = bo->mmu->pgtbl_ops;
300 	u64 iova = bo->node.start << PAGE_SHIFT;
301 	size_t len = bo->node.size << PAGE_SHIFT;
302 	size_t unmapped_len = 0;
303 
304 	if (WARN_ON(!bo->is_mapped))
305 		return;
306 
307 	dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx", bo->mmu->as, iova, len);
308 
309 	while (unmapped_len < len) {
310 		size_t unmapped_page;
311 		size_t pgsize = get_pgsize(iova, len - unmapped_len);
312 
313 		if (ops->iova_to_phys(ops, iova)) {
314 			unmapped_page = ops->unmap(ops, iova, pgsize, NULL);
315 			WARN_ON(unmapped_page != pgsize);
316 		}
317 		iova += pgsize;
318 		unmapped_len += pgsize;
319 	}
320 
321 	panfrost_mmu_flush_range(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, len);
322 	bo->is_mapped = false;
323 }
324 
325 static void mmu_tlb_inv_context_s1(void *cookie)
326 {}
327 
328 static void mmu_tlb_sync_context(void *cookie)
329 {
330 	//struct panfrost_device *pfdev = cookie;
331 	// TODO: Wait 1000 GPU cycles for HW_ISSUE_6367/T60X
332 }
333 
334 static void mmu_tlb_flush_walk(unsigned long iova, size_t size, size_t granule,
335 			       void *cookie)
336 {
337 	mmu_tlb_sync_context(cookie);
338 }
339 
340 static void mmu_tlb_flush_leaf(unsigned long iova, size_t size, size_t granule,
341 			       void *cookie)
342 {
343 	mmu_tlb_sync_context(cookie);
344 }
345 
346 static const struct iommu_flush_ops mmu_tlb_ops = {
347 	.tlb_flush_all	= mmu_tlb_inv_context_s1,
348 	.tlb_flush_walk = mmu_tlb_flush_walk,
349 	.tlb_flush_leaf = mmu_tlb_flush_leaf,
350 };
351 
352 int panfrost_mmu_pgtable_alloc(struct panfrost_file_priv *priv)
353 {
354 	struct panfrost_mmu *mmu = &priv->mmu;
355 	struct panfrost_device *pfdev = priv->pfdev;
356 
357 	INIT_LIST_HEAD(&mmu->list);
358 	mmu->as = -1;
359 
360 	mmu->pgtbl_cfg = (struct io_pgtable_cfg) {
361 		.pgsize_bitmap	= SZ_4K | SZ_2M,
362 		.ias		= FIELD_GET(0xff, pfdev->features.mmu_features),
363 		.oas		= FIELD_GET(0xff00, pfdev->features.mmu_features),
364 		.tlb		= &mmu_tlb_ops,
365 		.iommu_dev	= pfdev->dev,
366 	};
367 
368 	mmu->pgtbl_ops = alloc_io_pgtable_ops(ARM_MALI_LPAE, &mmu->pgtbl_cfg,
369 					      priv);
370 	if (!mmu->pgtbl_ops)
371 		return -EINVAL;
372 
373 	return 0;
374 }
375 
376 void panfrost_mmu_pgtable_free(struct panfrost_file_priv *priv)
377 {
378 	struct panfrost_device *pfdev = priv->pfdev;
379 	struct panfrost_mmu *mmu = &priv->mmu;
380 
381 	spin_lock(&pfdev->as_lock);
382 	if (mmu->as >= 0) {
383 		pm_runtime_get_noresume(pfdev->dev);
384 		if (pm_runtime_active(pfdev->dev))
385 			panfrost_mmu_disable(pfdev, mmu->as);
386 		pm_runtime_put_autosuspend(pfdev->dev);
387 
388 		clear_bit(mmu->as, &pfdev->as_alloc_mask);
389 		clear_bit(mmu->as, &pfdev->as_in_use_mask);
390 		list_del(&mmu->list);
391 	}
392 	spin_unlock(&pfdev->as_lock);
393 
394 	free_io_pgtable_ops(mmu->pgtbl_ops);
395 }
396 
397 static struct panfrost_gem_object *
398 addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr)
399 {
400 	struct panfrost_gem_object *bo = NULL;
401 	struct panfrost_file_priv *priv;
402 	struct drm_mm_node *node;
403 	u64 offset = addr >> PAGE_SHIFT;
404 	struct panfrost_mmu *mmu;
405 
406 	spin_lock(&pfdev->as_lock);
407 	list_for_each_entry(mmu, &pfdev->as_lru_list, list) {
408 		if (as == mmu->as)
409 			goto found_mmu;
410 	}
411 	goto out;
412 
413 found_mmu:
414 	priv = container_of(mmu, struct panfrost_file_priv, mmu);
415 
416 	spin_lock(&priv->mm_lock);
417 
418 	drm_mm_for_each_node(node, &priv->mm) {
419 		if (offset >= node->start &&
420 		    offset < (node->start + node->size)) {
421 			bo = drm_mm_node_to_panfrost_bo(node);
422 			drm_gem_object_get(&bo->base.base);
423 			break;
424 		}
425 	}
426 
427 	spin_unlock(&priv->mm_lock);
428 out:
429 	spin_unlock(&pfdev->as_lock);
430 	return bo;
431 }
432 
433 #define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE)
434 
435 static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
436 				       u64 addr)
437 {
438 	int ret, i;
439 	struct panfrost_gem_object *bo;
440 	struct address_space *mapping;
441 	pgoff_t page_offset;
442 	struct sg_table *sgt;
443 	struct page **pages;
444 
445 	bo = addr_to_drm_mm_node(pfdev, as, addr);
446 	if (!bo)
447 		return -ENOENT;
448 
449 	if (!bo->is_heap) {
450 		dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)",
451 			 bo->node.start << PAGE_SHIFT);
452 		ret = -EINVAL;
453 		goto err_bo;
454 	}
455 	WARN_ON(bo->mmu->as != as);
456 
457 	/* Assume 2MB alignment and size multiple */
458 	addr &= ~((u64)SZ_2M - 1);
459 	page_offset = addr >> PAGE_SHIFT;
460 	page_offset -= bo->node.start;
461 
462 	mutex_lock(&bo->base.pages_lock);
463 
464 	if (!bo->base.pages) {
465 		bo->sgts = kvmalloc_array(bo->base.base.size / SZ_2M,
466 				     sizeof(struct sg_table), GFP_KERNEL | __GFP_ZERO);
467 		if (!bo->sgts) {
468 			mutex_unlock(&bo->base.pages_lock);
469 			ret = -ENOMEM;
470 			goto err_bo;
471 		}
472 
473 		pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT,
474 				       sizeof(struct page *), GFP_KERNEL | __GFP_ZERO);
475 		if (!pages) {
476 			kfree(bo->sgts);
477 			bo->sgts = NULL;
478 			mutex_unlock(&bo->base.pages_lock);
479 			ret = -ENOMEM;
480 			goto err_bo;
481 		}
482 		bo->base.pages = pages;
483 		bo->base.pages_use_count = 1;
484 	} else
485 		pages = bo->base.pages;
486 
487 	mapping = bo->base.base.filp->f_mapping;
488 	mapping_set_unevictable(mapping);
489 
490 	for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) {
491 		pages[i] = shmem_read_mapping_page(mapping, i);
492 		if (IS_ERR(pages[i])) {
493 			mutex_unlock(&bo->base.pages_lock);
494 			ret = PTR_ERR(pages[i]);
495 			goto err_pages;
496 		}
497 	}
498 
499 	mutex_unlock(&bo->base.pages_lock);
500 
501 	sgt = &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)];
502 	ret = sg_alloc_table_from_pages(sgt, pages + page_offset,
503 					NUM_FAULT_PAGES, 0, SZ_2M, GFP_KERNEL);
504 	if (ret)
505 		goto err_pages;
506 
507 	if (!dma_map_sg(pfdev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL)) {
508 		ret = -EINVAL;
509 		goto err_map;
510 	}
511 
512 	mmu_map_sg(pfdev, bo->mmu, addr, IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
513 
514 	bo->is_mapped = true;
515 
516 	dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
517 
518 	drm_gem_object_put_unlocked(&bo->base.base);
519 
520 	return 0;
521 
522 err_map:
523 	sg_free_table(sgt);
524 err_pages:
525 	drm_gem_shmem_put_pages(&bo->base);
526 err_bo:
527 	drm_gem_object_put_unlocked(&bo->base.base);
528 	return ret;
529 }
530 
531 static const char *access_type_name(struct panfrost_device *pfdev,
532 		u32 fault_status)
533 {
534 	switch (fault_status & AS_FAULTSTATUS_ACCESS_TYPE_MASK) {
535 	case AS_FAULTSTATUS_ACCESS_TYPE_ATOMIC:
536 		if (panfrost_has_hw_feature(pfdev, HW_FEATURE_AARCH64_MMU))
537 			return "ATOMIC";
538 		else
539 			return "UNKNOWN";
540 	case AS_FAULTSTATUS_ACCESS_TYPE_READ:
541 		return "READ";
542 	case AS_FAULTSTATUS_ACCESS_TYPE_WRITE:
543 		return "WRITE";
544 	case AS_FAULTSTATUS_ACCESS_TYPE_EX:
545 		return "EXECUTE";
546 	default:
547 		WARN_ON(1);
548 		return NULL;
549 	}
550 }
551 
552 static irqreturn_t panfrost_mmu_irq_handler(int irq, void *data)
553 {
554 	struct panfrost_device *pfdev = data;
555 
556 	if (!mmu_read(pfdev, MMU_INT_STAT))
557 		return IRQ_NONE;
558 
559 	mmu_write(pfdev, MMU_INT_MASK, 0);
560 	return IRQ_WAKE_THREAD;
561 }
562 
563 static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data)
564 {
565 	struct panfrost_device *pfdev = data;
566 	u32 status = mmu_read(pfdev, MMU_INT_RAWSTAT);
567 	int i, ret;
568 
569 	for (i = 0; status; i++) {
570 		u32 mask = BIT(i) | BIT(i + 16);
571 		u64 addr;
572 		u32 fault_status;
573 		u32 exception_type;
574 		u32 access_type;
575 		u32 source_id;
576 
577 		if (!(status & mask))
578 			continue;
579 
580 		fault_status = mmu_read(pfdev, AS_FAULTSTATUS(i));
581 		addr = mmu_read(pfdev, AS_FAULTADDRESS_LO(i));
582 		addr |= (u64)mmu_read(pfdev, AS_FAULTADDRESS_HI(i)) << 32;
583 
584 		/* decode the fault status */
585 		exception_type = fault_status & 0xFF;
586 		access_type = (fault_status >> 8) & 0x3;
587 		source_id = (fault_status >> 16);
588 
589 		/* Page fault only */
590 		if ((status & mask) == BIT(i)) {
591 			WARN_ON(exception_type < 0xC1 || exception_type > 0xC4);
592 
593 			ret = panfrost_mmu_map_fault_addr(pfdev, i, addr);
594 			if (!ret) {
595 				mmu_write(pfdev, MMU_INT_CLEAR, BIT(i));
596 				status &= ~mask;
597 				continue;
598 			}
599 		}
600 
601 		/* terminal fault, print info about the fault */
602 		dev_err(pfdev->dev,
603 			"Unhandled Page fault in AS%d at VA 0x%016llX\n"
604 			"Reason: %s\n"
605 			"raw fault status: 0x%X\n"
606 			"decoded fault status: %s\n"
607 			"exception type 0x%X: %s\n"
608 			"access type 0x%X: %s\n"
609 			"source id 0x%X\n",
610 			i, addr,
611 			"TODO",
612 			fault_status,
613 			(fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
614 			exception_type, panfrost_exception_name(pfdev, exception_type),
615 			access_type, access_type_name(pfdev, fault_status),
616 			source_id);
617 
618 		mmu_write(pfdev, MMU_INT_CLEAR, mask);
619 
620 		status &= ~mask;
621 	}
622 
623 	mmu_write(pfdev, MMU_INT_MASK, ~0);
624 	return IRQ_HANDLED;
625 };
626 
627 int panfrost_mmu_init(struct panfrost_device *pfdev)
628 {
629 	int err, irq;
630 
631 	irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "mmu");
632 	if (irq <= 0)
633 		return -ENODEV;
634 
635 	err = devm_request_threaded_irq(pfdev->dev, irq, panfrost_mmu_irq_handler,
636 					panfrost_mmu_irq_handler_thread,
637 					IRQF_SHARED, "mmu", pfdev);
638 
639 	if (err) {
640 		dev_err(pfdev->dev, "failed to request mmu irq");
641 		return err;
642 	}
643 
644 	return 0;
645 }
646 
647 void panfrost_mmu_fini(struct panfrost_device *pfdev)
648 {
649 	mmu_write(pfdev, MMU_INT_MASK, 0);
650 }
651