1f3ba9122SRob Herring // SPDX-License-Identifier:	GPL-2.0
2f3ba9122SRob Herring /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
37282f764SRob Herring #include <linux/atomic.h>
4f3ba9122SRob Herring #include <linux/bitfield.h>
5f3ba9122SRob Herring #include <linux/delay.h>
6187d2929SRob Herring #include <linux/dma-mapping.h>
7f3ba9122SRob Herring #include <linux/interrupt.h>
8f3ba9122SRob Herring #include <linux/io.h>
9f3ba9122SRob Herring #include <linux/iopoll.h>
10f3ba9122SRob Herring #include <linux/io-pgtable.h>
11f3ba9122SRob Herring #include <linux/iommu.h>
12f3ba9122SRob Herring #include <linux/platform_device.h>
13f3ba9122SRob Herring #include <linux/pm_runtime.h>
14187d2929SRob Herring #include <linux/shmem_fs.h>
15f3ba9122SRob Herring #include <linux/sizes.h>
16f3ba9122SRob Herring 
17f3ba9122SRob Herring #include "panfrost_device.h"
18f3ba9122SRob Herring #include "panfrost_mmu.h"
19f3ba9122SRob Herring #include "panfrost_gem.h"
20f3ba9122SRob Herring #include "panfrost_features.h"
21f3ba9122SRob Herring #include "panfrost_regs.h"
22f3ba9122SRob Herring 
23f3ba9122SRob Herring #define mmu_write(dev, reg, data) writel(data, dev->iomem + reg)
24f3ba9122SRob Herring #define mmu_read(dev, reg) readl(dev->iomem + reg)
25f3ba9122SRob Herring 
26f3ba9122SRob Herring static int wait_ready(struct panfrost_device *pfdev, u32 as_nr)
27f3ba9122SRob Herring {
28f3ba9122SRob Herring 	int ret;
29f3ba9122SRob Herring 	u32 val;
30f3ba9122SRob Herring 
31f3ba9122SRob Herring 	/* Wait for the MMU status to indicate there is no active command, in
32f3ba9122SRob Herring 	 * case one is pending. */
33f3ba9122SRob Herring 	ret = readl_relaxed_poll_timeout_atomic(pfdev->iomem + AS_STATUS(as_nr),
34f3ba9122SRob Herring 		val, !(val & AS_STATUS_AS_ACTIVE), 10, 1000);
35f3ba9122SRob Herring 
36f3ba9122SRob Herring 	if (ret)
37f3ba9122SRob Herring 		dev_err(pfdev->dev, "AS_ACTIVE bit stuck\n");
38f3ba9122SRob Herring 
39f3ba9122SRob Herring 	return ret;
40f3ba9122SRob Herring }
41f3ba9122SRob Herring 
42f3ba9122SRob Herring static int write_cmd(struct panfrost_device *pfdev, u32 as_nr, u32 cmd)
43f3ba9122SRob Herring {
44f3ba9122SRob Herring 	int status;
45f3ba9122SRob Herring 
46f3ba9122SRob Herring 	/* write AS_COMMAND when MMU is ready to accept another command */
47f3ba9122SRob Herring 	status = wait_ready(pfdev, as_nr);
48f3ba9122SRob Herring 	if (!status)
49f3ba9122SRob Herring 		mmu_write(pfdev, AS_COMMAND(as_nr), cmd);
50f3ba9122SRob Herring 
51f3ba9122SRob Herring 	return status;
52f3ba9122SRob Herring }
53f3ba9122SRob Herring 
54f3ba9122SRob Herring static void lock_region(struct panfrost_device *pfdev, u32 as_nr,
55f3ba9122SRob Herring 			u64 iova, size_t size)
56f3ba9122SRob Herring {
57f3ba9122SRob Herring 	u8 region_width;
58f3ba9122SRob Herring 	u64 region = iova & PAGE_MASK;
59f3ba9122SRob Herring 	/*
60f3ba9122SRob Herring 	 * fls returns:
61f3ba9122SRob Herring 	 * 1 .. 32
62f3ba9122SRob Herring 	 *
63f3ba9122SRob Herring 	 * 10 + fls(num_pages)
64f3ba9122SRob Herring 	 * results in the range (11 .. 42)
65f3ba9122SRob Herring 	 */
66f3ba9122SRob Herring 
67f3ba9122SRob Herring 	size = round_up(size, PAGE_SIZE);
68f3ba9122SRob Herring 
69f3ba9122SRob Herring 	region_width = 10 + fls(size >> PAGE_SHIFT);
70f3ba9122SRob Herring 	if ((size >> PAGE_SHIFT) != (1ul << (region_width - 11))) {
71f3ba9122SRob Herring 		/* not pow2, so must go up to the next pow2 */
72f3ba9122SRob Herring 		region_width += 1;
73f3ba9122SRob Herring 	}
74f3ba9122SRob Herring 	region |= region_width;
75f3ba9122SRob Herring 
76f3ba9122SRob Herring 	/* Lock the region that needs to be updated */
77f3ba9122SRob Herring 	mmu_write(pfdev, AS_LOCKADDR_LO(as_nr), region & 0xFFFFFFFFUL);
78f3ba9122SRob Herring 	mmu_write(pfdev, AS_LOCKADDR_HI(as_nr), (region >> 32) & 0xFFFFFFFFUL);
79f3ba9122SRob Herring 	write_cmd(pfdev, as_nr, AS_COMMAND_LOCK);
80f3ba9122SRob Herring }
81f3ba9122SRob Herring 
82f3ba9122SRob Herring 
8386df65f3SRob Herring static int mmu_hw_do_operation_locked(struct panfrost_device *pfdev, int as_nr,
84f3ba9122SRob Herring 				      u64 iova, size_t size, u32 op)
85f3ba9122SRob Herring {
8686df65f3SRob Herring 	if (as_nr < 0)
877282f764SRob Herring 		return 0;
88f3ba9122SRob Herring 
89f3ba9122SRob Herring 	if (op != AS_COMMAND_UNLOCK)
90f3ba9122SRob Herring 		lock_region(pfdev, as_nr, iova, size);
91f3ba9122SRob Herring 
92f3ba9122SRob Herring 	/* Run the MMU operation */
93f3ba9122SRob Herring 	write_cmd(pfdev, as_nr, op);
94f3ba9122SRob Herring 
95f3ba9122SRob Herring 	/* Wait for the flush to complete */
9686df65f3SRob Herring 	return wait_ready(pfdev, as_nr);
9786df65f3SRob Herring }
98f3ba9122SRob Herring 
9986df65f3SRob Herring static int mmu_hw_do_operation(struct panfrost_device *pfdev,
10086df65f3SRob Herring 			       struct panfrost_mmu *mmu,
10186df65f3SRob Herring 			       u64 iova, size_t size, u32 op)
10286df65f3SRob Herring {
10386df65f3SRob Herring 	int ret;
104f3ba9122SRob Herring 
10586df65f3SRob Herring 	spin_lock(&pfdev->as_lock);
10686df65f3SRob Herring 	ret = mmu_hw_do_operation_locked(pfdev, mmu->as, iova, size, op);
1077282f764SRob Herring 	spin_unlock(&pfdev->as_lock);
108f3ba9122SRob Herring 	return ret;
109f3ba9122SRob Herring }
110f3ba9122SRob Herring 
1117282f764SRob Herring static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
112f3ba9122SRob Herring {
1137282f764SRob Herring 	int as_nr = mmu->as;
1147282f764SRob Herring 	struct io_pgtable_cfg *cfg = &mmu->pgtbl_cfg;
115f3ba9122SRob Herring 	u64 transtab = cfg->arm_mali_lpae_cfg.transtab;
116f3ba9122SRob Herring 	u64 memattr = cfg->arm_mali_lpae_cfg.memattr;
117f3ba9122SRob Herring 
1185924d409SRob Herring 	mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
119f3ba9122SRob Herring 
120f3ba9122SRob Herring 	mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), transtab & 0xffffffffUL);
121f3ba9122SRob Herring 	mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), transtab >> 32);
122f3ba9122SRob Herring 
123f3ba9122SRob Herring 	/* Need to revisit mem attrs.
124f3ba9122SRob Herring 	 * NC is the default, Mali driver is inner WT.
125f3ba9122SRob Herring 	 */
126f3ba9122SRob Herring 	mmu_write(pfdev, AS_MEMATTR_LO(as_nr), memattr & 0xffffffffUL);
127f3ba9122SRob Herring 	mmu_write(pfdev, AS_MEMATTR_HI(as_nr), memattr >> 32);
128f3ba9122SRob Herring 
129f3ba9122SRob Herring 	write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
130f3ba9122SRob Herring }
131f3ba9122SRob Herring 
13262f1089fSRob Herring static void panfrost_mmu_disable(struct panfrost_device *pfdev, u32 as_nr)
133f3ba9122SRob Herring {
13462f1089fSRob Herring 	mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
13562f1089fSRob Herring 
136f3ba9122SRob Herring 	mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), 0);
137f3ba9122SRob Herring 	mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), 0);
138f3ba9122SRob Herring 
139f3ba9122SRob Herring 	mmu_write(pfdev, AS_MEMATTR_LO(as_nr), 0);
140f3ba9122SRob Herring 	mmu_write(pfdev, AS_MEMATTR_HI(as_nr), 0);
141f3ba9122SRob Herring 
142f3ba9122SRob Herring 	write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
143f3ba9122SRob Herring }
144f3ba9122SRob Herring 
1457282f764SRob Herring u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
1467282f764SRob Herring {
1477282f764SRob Herring 	int as;
1487282f764SRob Herring 
1497282f764SRob Herring 	spin_lock(&pfdev->as_lock);
1507282f764SRob Herring 
1517282f764SRob Herring 	as = mmu->as;
1527282f764SRob Herring 	if (as >= 0) {
1537282f764SRob Herring 		int en = atomic_inc_return(&mmu->as_count);
154dde2bb2dSBoris Brezillon 
155dde2bb2dSBoris Brezillon 		/*
156dde2bb2dSBoris Brezillon 		 * AS can be retained by active jobs or a perfcnt context,
157dde2bb2dSBoris Brezillon 		 * hence the '+ 1' here.
158dde2bb2dSBoris Brezillon 		 */
159dde2bb2dSBoris Brezillon 		WARN_ON(en >= (NUM_JOB_SLOTS + 1));
1607282f764SRob Herring 
1617282f764SRob Herring 		list_move(&mmu->list, &pfdev->as_lru_list);
1627282f764SRob Herring 		goto out;
1637282f764SRob Herring 	}
1647282f764SRob Herring 
1657282f764SRob Herring 	/* Check for a free AS */
1667282f764SRob Herring 	as = ffz(pfdev->as_alloc_mask);
1677282f764SRob Herring 	if (!(BIT(as) & pfdev->features.as_present)) {
1687282f764SRob Herring 		struct panfrost_mmu *lru_mmu;
1697282f764SRob Herring 
1707282f764SRob Herring 		list_for_each_entry_reverse(lru_mmu, &pfdev->as_lru_list, list) {
1717282f764SRob Herring 			if (!atomic_read(&lru_mmu->as_count))
1727282f764SRob Herring 				break;
1737282f764SRob Herring 		}
1747282f764SRob Herring 		WARN_ON(&lru_mmu->list == &pfdev->as_lru_list);
1757282f764SRob Herring 
1767282f764SRob Herring 		list_del_init(&lru_mmu->list);
1777282f764SRob Herring 		as = lru_mmu->as;
1787282f764SRob Herring 
1797282f764SRob Herring 		WARN_ON(as < 0);
1807282f764SRob Herring 		lru_mmu->as = -1;
1817282f764SRob Herring 	}
1827282f764SRob Herring 
1837282f764SRob Herring 	/* Assign the free or reclaimed AS to the FD */
1847282f764SRob Herring 	mmu->as = as;
1857282f764SRob Herring 	set_bit(as, &pfdev->as_alloc_mask);
1867282f764SRob Herring 	atomic_set(&mmu->as_count, 1);
1877282f764SRob Herring 	list_add(&mmu->list, &pfdev->as_lru_list);
1887282f764SRob Herring 
1897282f764SRob Herring 	dev_dbg(pfdev->dev, "Assigned AS%d to mmu %p, alloc_mask=%lx", as, mmu, pfdev->as_alloc_mask);
1907282f764SRob Herring 
1917282f764SRob Herring 	panfrost_mmu_enable(pfdev, mmu);
1927282f764SRob Herring 
1937282f764SRob Herring out:
1947282f764SRob Herring 	spin_unlock(&pfdev->as_lock);
1957282f764SRob Herring 	return as;
1967282f764SRob Herring }
1977282f764SRob Herring 
1987282f764SRob Herring void panfrost_mmu_as_put(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
1997282f764SRob Herring {
2007282f764SRob Herring 	atomic_dec(&mmu->as_count);
2017282f764SRob Herring 	WARN_ON(atomic_read(&mmu->as_count) < 0);
2027282f764SRob Herring }
2037282f764SRob Herring 
20473e467f6SRob Herring void panfrost_mmu_reset(struct panfrost_device *pfdev)
20573e467f6SRob Herring {
2067282f764SRob Herring 	struct panfrost_mmu *mmu, *mmu_tmp;
2077282f764SRob Herring 
2087282f764SRob Herring 	spin_lock(&pfdev->as_lock);
2097282f764SRob Herring 
2107282f764SRob Herring 	pfdev->as_alloc_mask = 0;
2117282f764SRob Herring 
2127282f764SRob Herring 	list_for_each_entry_safe(mmu, mmu_tmp, &pfdev->as_lru_list, list) {
2137282f764SRob Herring 		mmu->as = -1;
2147282f764SRob Herring 		atomic_set(&mmu->as_count, 0);
2157282f764SRob Herring 		list_del_init(&mmu->list);
2167282f764SRob Herring 	}
2177282f764SRob Herring 
2187282f764SRob Herring 	spin_unlock(&pfdev->as_lock);
21973e467f6SRob Herring 
22073e467f6SRob Herring 	mmu_write(pfdev, MMU_INT_CLEAR, ~0);
22173e467f6SRob Herring 	mmu_write(pfdev, MMU_INT_MASK, ~0);
22273e467f6SRob Herring }
22373e467f6SRob Herring 
2244dff47c7SRob Herring static size_t get_pgsize(u64 addr, size_t size)
2254dff47c7SRob Herring {
2264dff47c7SRob Herring 	if (addr & (SZ_2M - 1) || size < SZ_2M)
2274dff47c7SRob Herring 		return SZ_4K;
2284dff47c7SRob Herring 
2294dff47c7SRob Herring 	return SZ_2M;
2304dff47c7SRob Herring }
2314dff47c7SRob Herring 
2326f39188cSYi Wang static void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
233ec7eba47SRob Herring 				     struct panfrost_mmu *mmu,
234ec7eba47SRob Herring 				     u64 iova, size_t size)
235f3ba9122SRob Herring {
236ec7eba47SRob Herring 	if (mmu->as < 0)
237ec7eba47SRob Herring 		return;
238ec7eba47SRob Herring 
239ec7eba47SRob Herring 	pm_runtime_get_noresume(pfdev->dev);
240ec7eba47SRob Herring 
241ec7eba47SRob Herring 	/* Flush the PTs only if we're already awake */
242ec7eba47SRob Herring 	if (pm_runtime_active(pfdev->dev))
243ec7eba47SRob Herring 		mmu_hw_do_operation(pfdev, mmu, iova, size, AS_COMMAND_FLUSH_PT);
244ec7eba47SRob Herring 
245ec7eba47SRob Herring 	pm_runtime_put_sync_autosuspend(pfdev->dev);
246ec7eba47SRob Herring }
247ec7eba47SRob Herring 
2487282f764SRob Herring static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
2497282f764SRob Herring 		      u64 iova, int prot, struct sg_table *sgt)
250f3ba9122SRob Herring {
251f3ba9122SRob Herring 	unsigned int count;
252f3ba9122SRob Herring 	struct scatterlist *sgl;
2537282f764SRob Herring 	struct io_pgtable_ops *ops = mmu->pgtbl_ops;
254eecbb3cdSRob Herring 	u64 start_iova = iova;
255f3ba9122SRob Herring 
256f3ba9122SRob Herring 	for_each_sg(sgt->sgl, sgl, sgt->nents, count) {
257f3ba9122SRob Herring 		unsigned long paddr = sg_dma_address(sgl);
258f3ba9122SRob Herring 		size_t len = sg_dma_len(sgl);
259f3ba9122SRob Herring 
2607282f764SRob Herring 		dev_dbg(pfdev->dev, "map: as=%d, iova=%llx, paddr=%lx, len=%zx", mmu->as, iova, paddr, len);
261f3ba9122SRob Herring 
262f3ba9122SRob Herring 		while (len) {
2634dff47c7SRob Herring 			size_t pgsize = get_pgsize(iova | paddr, len);
2644dff47c7SRob Herring 
265eecbb3cdSRob Herring 			ops->map(ops, iova, paddr, pgsize, prot);
2664dff47c7SRob Herring 			iova += pgsize;
2674dff47c7SRob Herring 			paddr += pgsize;
2684dff47c7SRob Herring 			len -= pgsize;
269f3ba9122SRob Herring 		}
270f3ba9122SRob Herring 	}
271f3ba9122SRob Herring 
272ec7eba47SRob Herring 	panfrost_mmu_flush_range(pfdev, mmu, start_iova, iova - start_iova);
273f3ba9122SRob Herring 
274eecbb3cdSRob Herring 	return 0;
275eecbb3cdSRob Herring }
276f3ba9122SRob Herring 
277bdefca2dSBoris Brezillon int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
278eecbb3cdSRob Herring {
279bdefca2dSBoris Brezillon 	struct panfrost_gem_object *bo = mapping->obj;
280eecbb3cdSRob Herring 	struct drm_gem_object *obj = &bo->base.base;
281eecbb3cdSRob Herring 	struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
282eecbb3cdSRob Herring 	struct sg_table *sgt;
283eecbb3cdSRob Herring 	int prot = IOMMU_READ | IOMMU_WRITE;
284eecbb3cdSRob Herring 
285bdefca2dSBoris Brezillon 	if (WARN_ON(mapping->active))
286eecbb3cdSRob Herring 		return 0;
287eecbb3cdSRob Herring 
288203270c0SRob Herring 	if (bo->noexec)
289203270c0SRob Herring 		prot |= IOMMU_NOEXEC;
290203270c0SRob Herring 
291eecbb3cdSRob Herring 	sgt = drm_gem_shmem_get_pages_sgt(obj);
292eecbb3cdSRob Herring 	if (WARN_ON(IS_ERR(sgt)))
293eecbb3cdSRob Herring 		return PTR_ERR(sgt);
294eecbb3cdSRob Herring 
295bdefca2dSBoris Brezillon 	mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT,
296bdefca2dSBoris Brezillon 		   prot, sgt);
297bdefca2dSBoris Brezillon 	mapping->active = true;
298f3ba9122SRob Herring 
299f3ba9122SRob Herring 	return 0;
300f3ba9122SRob Herring }
301f3ba9122SRob Herring 
302bdefca2dSBoris Brezillon void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping)
303f3ba9122SRob Herring {
304bdefca2dSBoris Brezillon 	struct panfrost_gem_object *bo = mapping->obj;
305f3ba9122SRob Herring 	struct drm_gem_object *obj = &bo->base.base;
306f3ba9122SRob Herring 	struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
307bdefca2dSBoris Brezillon 	struct io_pgtable_ops *ops = mapping->mmu->pgtbl_ops;
308bdefca2dSBoris Brezillon 	u64 iova = mapping->mmnode.start << PAGE_SHIFT;
309bdefca2dSBoris Brezillon 	size_t len = mapping->mmnode.size << PAGE_SHIFT;
310f3ba9122SRob Herring 	size_t unmapped_len = 0;
311f3ba9122SRob Herring 
312bdefca2dSBoris Brezillon 	if (WARN_ON(!mapping->active))
3139870dc39SBoris Brezillon 		return;
3149870dc39SBoris Brezillon 
315bdefca2dSBoris Brezillon 	dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx",
316bdefca2dSBoris Brezillon 		mapping->mmu->as, iova, len);
317f3ba9122SRob Herring 
318f3ba9122SRob Herring 	while (unmapped_len < len) {
3194dff47c7SRob Herring 		size_t unmapped_page;
3204dff47c7SRob Herring 		size_t pgsize = get_pgsize(iova, len - unmapped_len);
3214dff47c7SRob Herring 
322187d2929SRob Herring 		if (ops->iova_to_phys(ops, iova)) {
323a2d3a382SWill Deacon 			unmapped_page = ops->unmap(ops, iova, pgsize, NULL);
324187d2929SRob Herring 			WARN_ON(unmapped_page != pgsize);
325187d2929SRob Herring 		}
326187d2929SRob Herring 		iova += pgsize;
327187d2929SRob Herring 		unmapped_len += pgsize;
328f3ba9122SRob Herring 	}
329f3ba9122SRob Herring 
330bdefca2dSBoris Brezillon 	panfrost_mmu_flush_range(pfdev, mapping->mmu,
331bdefca2dSBoris Brezillon 				 mapping->mmnode.start << PAGE_SHIFT, len);
332bdefca2dSBoris Brezillon 	mapping->active = false;
333f3ba9122SRob Herring }
334f3ba9122SRob Herring 
335f3ba9122SRob Herring static void mmu_tlb_inv_context_s1(void *cookie)
33662f1089fSRob Herring {}
337f3ba9122SRob Herring 
338f3ba9122SRob Herring static void mmu_tlb_sync_context(void *cookie)
339f3ba9122SRob Herring {
340f3ba9122SRob Herring 	//struct panfrost_device *pfdev = cookie;
341f3ba9122SRob Herring 	// TODO: Wait 1000 GPU cycles for HW_ISSUE_6367/T60X
342f3ba9122SRob Herring }
343f3ba9122SRob Herring 
34405aed941SWill Deacon static void mmu_tlb_flush_walk(unsigned long iova, size_t size, size_t granule,
34505aed941SWill Deacon 			       void *cookie)
34605aed941SWill Deacon {
34705aed941SWill Deacon 	mmu_tlb_sync_context(cookie);
34805aed941SWill Deacon }
34905aed941SWill Deacon 
35005aed941SWill Deacon static void mmu_tlb_flush_leaf(unsigned long iova, size_t size, size_t granule,
35105aed941SWill Deacon 			       void *cookie)
35205aed941SWill Deacon {
35305aed941SWill Deacon 	mmu_tlb_sync_context(cookie);
35405aed941SWill Deacon }
35505aed941SWill Deacon 
356298f7889SWill Deacon static const struct iommu_flush_ops mmu_tlb_ops = {
357f3ba9122SRob Herring 	.tlb_flush_all	= mmu_tlb_inv_context_s1,
35805aed941SWill Deacon 	.tlb_flush_walk = mmu_tlb_flush_walk,
35905aed941SWill Deacon 	.tlb_flush_leaf = mmu_tlb_flush_leaf,
360f3ba9122SRob Herring };
361f3ba9122SRob Herring 
3627282f764SRob Herring int panfrost_mmu_pgtable_alloc(struct panfrost_file_priv *priv)
3637282f764SRob Herring {
3647282f764SRob Herring 	struct panfrost_mmu *mmu = &priv->mmu;
3657282f764SRob Herring 	struct panfrost_device *pfdev = priv->pfdev;
3667282f764SRob Herring 
3677282f764SRob Herring 	INIT_LIST_HEAD(&mmu->list);
3687282f764SRob Herring 	mmu->as = -1;
3697282f764SRob Herring 
3707282f764SRob Herring 	mmu->pgtbl_cfg = (struct io_pgtable_cfg) {
3717282f764SRob Herring 		.pgsize_bitmap	= SZ_4K | SZ_2M,
3727282f764SRob Herring 		.ias		= FIELD_GET(0xff, pfdev->features.mmu_features),
3737282f764SRob Herring 		.oas		= FIELD_GET(0xff00, pfdev->features.mmu_features),
3747282f764SRob Herring 		.tlb		= &mmu_tlb_ops,
3757282f764SRob Herring 		.iommu_dev	= pfdev->dev,
3767282f764SRob Herring 	};
3777282f764SRob Herring 
3787282f764SRob Herring 	mmu->pgtbl_ops = alloc_io_pgtable_ops(ARM_MALI_LPAE, &mmu->pgtbl_cfg,
3797282f764SRob Herring 					      priv);
3807282f764SRob Herring 	if (!mmu->pgtbl_ops)
3817282f764SRob Herring 		return -EINVAL;
3827282f764SRob Herring 
3837282f764SRob Herring 	return 0;
3847282f764SRob Herring }
3857282f764SRob Herring 
3867282f764SRob Herring void panfrost_mmu_pgtable_free(struct panfrost_file_priv *priv)
3877282f764SRob Herring {
3887282f764SRob Herring 	struct panfrost_device *pfdev = priv->pfdev;
3897282f764SRob Herring 	struct panfrost_mmu *mmu = &priv->mmu;
3907282f764SRob Herring 
3917282f764SRob Herring 	spin_lock(&pfdev->as_lock);
3927282f764SRob Herring 	if (mmu->as >= 0) {
39362f1089fSRob Herring 		pm_runtime_get_noresume(pfdev->dev);
39462f1089fSRob Herring 		if (pm_runtime_active(pfdev->dev))
39562f1089fSRob Herring 			panfrost_mmu_disable(pfdev, mmu->as);
39662f1089fSRob Herring 		pm_runtime_put_autosuspend(pfdev->dev);
39762f1089fSRob Herring 
3987282f764SRob Herring 		clear_bit(mmu->as, &pfdev->as_alloc_mask);
3997282f764SRob Herring 		clear_bit(mmu->as, &pfdev->as_in_use_mask);
4007282f764SRob Herring 		list_del(&mmu->list);
4017282f764SRob Herring 	}
4027282f764SRob Herring 	spin_unlock(&pfdev->as_lock);
4037282f764SRob Herring 
4047282f764SRob Herring 	free_io_pgtable_ops(mmu->pgtbl_ops);
4057282f764SRob Herring }
4067282f764SRob Herring 
407bdefca2dSBoris Brezillon static struct panfrost_gem_mapping *
408bdefca2dSBoris Brezillon addr_to_mapping(struct panfrost_device *pfdev, int as, u64 addr)
409187d2929SRob Herring {
410bdefca2dSBoris Brezillon 	struct panfrost_gem_mapping *mapping = NULL;
41165e51e30SSteven Price 	struct panfrost_file_priv *priv;
41265e51e30SSteven Price 	struct drm_mm_node *node;
413187d2929SRob Herring 	u64 offset = addr >> PAGE_SHIFT;
4147282f764SRob Herring 	struct panfrost_mmu *mmu;
415187d2929SRob Herring 
4167282f764SRob Herring 	spin_lock(&pfdev->as_lock);
4177282f764SRob Herring 	list_for_each_entry(mmu, &pfdev->as_lru_list, list) {
41865e51e30SSteven Price 		if (as == mmu->as)
419f70744c6SRobin Murphy 			goto found_mmu;
42065e51e30SSteven Price 	}
42165e51e30SSteven Price 	goto out;
4227282f764SRob Herring 
423f70744c6SRobin Murphy found_mmu:
4247282f764SRob Herring 	priv = container_of(mmu, struct panfrost_file_priv, mmu);
42565e51e30SSteven Price 
42665e51e30SSteven Price 	spin_lock(&priv->mm_lock);
42765e51e30SSteven Price 
4287282f764SRob Herring 	drm_mm_for_each_node(node, &priv->mm) {
42965e51e30SSteven Price 		if (offset >= node->start &&
43065e51e30SSteven Price 		    offset < (node->start + node->size)) {
431bdefca2dSBoris Brezillon 			mapping = drm_mm_node_to_panfrost_mapping(node);
432bdefca2dSBoris Brezillon 
433bdefca2dSBoris Brezillon 			kref_get(&mapping->refcount);
43465e51e30SSteven Price 			break;
435187d2929SRob Herring 		}
4367282f764SRob Herring 	}
4377282f764SRob Herring 
43865e51e30SSteven Price 	spin_unlock(&priv->mm_lock);
4397282f764SRob Herring out:
4407282f764SRob Herring 	spin_unlock(&pfdev->as_lock);
441bdefca2dSBoris Brezillon 	return mapping;
442187d2929SRob Herring }
443187d2929SRob Herring 
444187d2929SRob Herring #define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE)
445187d2929SRob Herring 
4466f39188cSYi Wang static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
4476f39188cSYi Wang 				       u64 addr)
448187d2929SRob Herring {
449187d2929SRob Herring 	int ret, i;
450bdefca2dSBoris Brezillon 	struct panfrost_gem_mapping *bomapping;
451187d2929SRob Herring 	struct panfrost_gem_object *bo;
452187d2929SRob Herring 	struct address_space *mapping;
453187d2929SRob Herring 	pgoff_t page_offset;
454187d2929SRob Herring 	struct sg_table *sgt;
455187d2929SRob Herring 	struct page **pages;
456187d2929SRob Herring 
457bdefca2dSBoris Brezillon 	bomapping = addr_to_mapping(pfdev, as, addr);
458bdefca2dSBoris Brezillon 	if (!bomapping)
459187d2929SRob Herring 		return -ENOENT;
460187d2929SRob Herring 
461bdefca2dSBoris Brezillon 	bo = bomapping->obj;
462187d2929SRob Herring 	if (!bo->is_heap) {
463187d2929SRob Herring 		dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)",
464bdefca2dSBoris Brezillon 			 bomapping->mmnode.start << PAGE_SHIFT);
46565e51e30SSteven Price 		ret = -EINVAL;
46665e51e30SSteven Price 		goto err_bo;
467187d2929SRob Herring 	}
468bdefca2dSBoris Brezillon 	WARN_ON(bomapping->mmu->as != as);
4697282f764SRob Herring 
470187d2929SRob Herring 	/* Assume 2MB alignment and size multiple */
471187d2929SRob Herring 	addr &= ~((u64)SZ_2M - 1);
472187d2929SRob Herring 	page_offset = addr >> PAGE_SHIFT;
473bdefca2dSBoris Brezillon 	page_offset -= bomapping->mmnode.start;
474187d2929SRob Herring 
475187d2929SRob Herring 	mutex_lock(&bo->base.pages_lock);
476187d2929SRob Herring 
477187d2929SRob Herring 	if (!bo->base.pages) {
478187d2929SRob Herring 		bo->sgts = kvmalloc_array(bo->base.base.size / SZ_2M,
479187d2929SRob Herring 				     sizeof(struct sg_table), GFP_KERNEL | __GFP_ZERO);
4803efdf83cSWei Yongjun 		if (!bo->sgts) {
4813efdf83cSWei Yongjun 			mutex_unlock(&bo->base.pages_lock);
48265e51e30SSteven Price 			ret = -ENOMEM;
48365e51e30SSteven Price 			goto err_bo;
4843efdf83cSWei Yongjun 		}
485187d2929SRob Herring 
486187d2929SRob Herring 		pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT,
487187d2929SRob Herring 				       sizeof(struct page *), GFP_KERNEL | __GFP_ZERO);
488187d2929SRob Herring 		if (!pages) {
489187d2929SRob Herring 			kfree(bo->sgts);
490187d2929SRob Herring 			bo->sgts = NULL;
4913efdf83cSWei Yongjun 			mutex_unlock(&bo->base.pages_lock);
49265e51e30SSteven Price 			ret = -ENOMEM;
49365e51e30SSteven Price 			goto err_bo;
494187d2929SRob Herring 		}
495187d2929SRob Herring 		bo->base.pages = pages;
496187d2929SRob Herring 		bo->base.pages_use_count = 1;
497187d2929SRob Herring 	} else
498187d2929SRob Herring 		pages = bo->base.pages;
499187d2929SRob Herring 
500187d2929SRob Herring 	mapping = bo->base.base.filp->f_mapping;
501187d2929SRob Herring 	mapping_set_unevictable(mapping);
502187d2929SRob Herring 
503187d2929SRob Herring 	for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) {
504187d2929SRob Herring 		pages[i] = shmem_read_mapping_page(mapping, i);
505187d2929SRob Herring 		if (IS_ERR(pages[i])) {
506187d2929SRob Herring 			mutex_unlock(&bo->base.pages_lock);
507187d2929SRob Herring 			ret = PTR_ERR(pages[i]);
508187d2929SRob Herring 			goto err_pages;
509187d2929SRob Herring 		}
510187d2929SRob Herring 	}
511187d2929SRob Herring 
512187d2929SRob Herring 	mutex_unlock(&bo->base.pages_lock);
513187d2929SRob Herring 
514187d2929SRob Herring 	sgt = &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)];
515187d2929SRob Herring 	ret = sg_alloc_table_from_pages(sgt, pages + page_offset,
516187d2929SRob Herring 					NUM_FAULT_PAGES, 0, SZ_2M, GFP_KERNEL);
517187d2929SRob Herring 	if (ret)
518187d2929SRob Herring 		goto err_pages;
519187d2929SRob Herring 
520187d2929SRob Herring 	if (!dma_map_sg(pfdev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL)) {
521187d2929SRob Herring 		ret = -EINVAL;
522187d2929SRob Herring 		goto err_map;
523187d2929SRob Herring 	}
524187d2929SRob Herring 
525bdefca2dSBoris Brezillon 	mmu_map_sg(pfdev, bomapping->mmu, addr,
526bdefca2dSBoris Brezillon 		   IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
527187d2929SRob Herring 
528bdefca2dSBoris Brezillon 	bomapping->active = true;
529187d2929SRob Herring 
5307282f764SRob Herring 	dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
531187d2929SRob Herring 
532bdefca2dSBoris Brezillon 	panfrost_gem_mapping_put(bomapping);
53365e51e30SSteven Price 
534187d2929SRob Herring 	return 0;
535187d2929SRob Herring 
536187d2929SRob Herring err_map:
537187d2929SRob Herring 	sg_free_table(sgt);
538187d2929SRob Herring err_pages:
539187d2929SRob Herring 	drm_gem_shmem_put_pages(&bo->base);
54065e51e30SSteven Price err_bo:
541496d0cc6SEmil Velikov 	drm_gem_object_put(&bo->base.base);
542187d2929SRob Herring 	return ret;
543187d2929SRob Herring }
544187d2929SRob Herring 
545f3ba9122SRob Herring static const char *access_type_name(struct panfrost_device *pfdev,
546f3ba9122SRob Herring 		u32 fault_status)
547f3ba9122SRob Herring {
548f3ba9122SRob Herring 	switch (fault_status & AS_FAULTSTATUS_ACCESS_TYPE_MASK) {
549f3ba9122SRob Herring 	case AS_FAULTSTATUS_ACCESS_TYPE_ATOMIC:
550f3ba9122SRob Herring 		if (panfrost_has_hw_feature(pfdev, HW_FEATURE_AARCH64_MMU))
551f3ba9122SRob Herring 			return "ATOMIC";
552f3ba9122SRob Herring 		else
553f3ba9122SRob Herring 			return "UNKNOWN";
554f3ba9122SRob Herring 	case AS_FAULTSTATUS_ACCESS_TYPE_READ:
555f3ba9122SRob Herring 		return "READ";
556f3ba9122SRob Herring 	case AS_FAULTSTATUS_ACCESS_TYPE_WRITE:
557f3ba9122SRob Herring 		return "WRITE";
558f3ba9122SRob Herring 	case AS_FAULTSTATUS_ACCESS_TYPE_EX:
559f3ba9122SRob Herring 		return "EXECUTE";
560f3ba9122SRob Herring 	default:
561f3ba9122SRob Herring 		WARN_ON(1);
562f3ba9122SRob Herring 		return NULL;
563f3ba9122SRob Herring 	}
564f3ba9122SRob Herring }
565f3ba9122SRob Herring 
566f3ba9122SRob Herring static irqreturn_t panfrost_mmu_irq_handler(int irq, void *data)
567f3ba9122SRob Herring {
568f3ba9122SRob Herring 	struct panfrost_device *pfdev = data;
569f3ba9122SRob Herring 
570b31bdd13SRob Herring 	if (!mmu_read(pfdev, MMU_INT_STAT))
571f3ba9122SRob Herring 		return IRQ_NONE;
572f3ba9122SRob Herring 
573b31bdd13SRob Herring 	mmu_write(pfdev, MMU_INT_MASK, 0);
574b31bdd13SRob Herring 	return IRQ_WAKE_THREAD;
575b31bdd13SRob Herring }
576b31bdd13SRob Herring 
577b31bdd13SRob Herring static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data)
578b31bdd13SRob Herring {
579b31bdd13SRob Herring 	struct panfrost_device *pfdev = data;
580b31bdd13SRob Herring 	u32 status = mmu_read(pfdev, MMU_INT_RAWSTAT);
581187d2929SRob Herring 	int i, ret;
582f3ba9122SRob Herring 
583f3ba9122SRob Herring 	for (i = 0; status; i++) {
584f3ba9122SRob Herring 		u32 mask = BIT(i) | BIT(i + 16);
585f3ba9122SRob Herring 		u64 addr;
586f3ba9122SRob Herring 		u32 fault_status;
587f3ba9122SRob Herring 		u32 exception_type;
588f3ba9122SRob Herring 		u32 access_type;
589f3ba9122SRob Herring 		u32 source_id;
590f3ba9122SRob Herring 
591f3ba9122SRob Herring 		if (!(status & mask))
592f3ba9122SRob Herring 			continue;
593f3ba9122SRob Herring 
594f3ba9122SRob Herring 		fault_status = mmu_read(pfdev, AS_FAULTSTATUS(i));
595f3ba9122SRob Herring 		addr = mmu_read(pfdev, AS_FAULTADDRESS_LO(i));
596f3ba9122SRob Herring 		addr |= (u64)mmu_read(pfdev, AS_FAULTADDRESS_HI(i)) << 32;
597f3ba9122SRob Herring 
598f3ba9122SRob Herring 		/* decode the fault status */
599f3ba9122SRob Herring 		exception_type = fault_status & 0xFF;
600f3ba9122SRob Herring 		access_type = (fault_status >> 8) & 0x3;
601f3ba9122SRob Herring 		source_id = (fault_status >> 16);
602f3ba9122SRob Herring 
603187d2929SRob Herring 		/* Page fault only */
604eb9d8ddbSTomeu Vizoso 		ret = -1;
605eb9d8ddbSTomeu Vizoso 		if ((status & mask) == BIT(i) && (exception_type & 0xF8) == 0xC0)
606187d2929SRob Herring 			ret = panfrost_mmu_map_fault_addr(pfdev, i, addr);
607187d2929SRob Herring 
608eb9d8ddbSTomeu Vizoso 		if (ret)
609f3ba9122SRob Herring 			/* terminal fault, print info about the fault */
610f3ba9122SRob Herring 			dev_err(pfdev->dev,
611f3ba9122SRob Herring 				"Unhandled Page fault in AS%d at VA 0x%016llX\n"
612f3ba9122SRob Herring 				"Reason: %s\n"
613f3ba9122SRob Herring 				"raw fault status: 0x%X\n"
614f3ba9122SRob Herring 				"decoded fault status: %s\n"
615f3ba9122SRob Herring 				"exception type 0x%X: %s\n"
616f3ba9122SRob Herring 				"access type 0x%X: %s\n"
617f3ba9122SRob Herring 				"source id 0x%X\n",
618f3ba9122SRob Herring 				i, addr,
619f3ba9122SRob Herring 				"TODO",
620f3ba9122SRob Herring 				fault_status,
621f3ba9122SRob Herring 				(fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
622f3ba9122SRob Herring 				exception_type, panfrost_exception_name(pfdev, exception_type),
623f3ba9122SRob Herring 				access_type, access_type_name(pfdev, fault_status),
624f3ba9122SRob Herring 				source_id);
625f3ba9122SRob Herring 
626f3ba9122SRob Herring 		mmu_write(pfdev, MMU_INT_CLEAR, mask);
627f3ba9122SRob Herring 
628f3ba9122SRob Herring 		status &= ~mask;
629f3ba9122SRob Herring 	}
630f3ba9122SRob Herring 
631b31bdd13SRob Herring 	mmu_write(pfdev, MMU_INT_MASK, ~0);
632f3ba9122SRob Herring 	return IRQ_HANDLED;
633f3ba9122SRob Herring };
634f3ba9122SRob Herring 
635f3ba9122SRob Herring int panfrost_mmu_init(struct panfrost_device *pfdev)
636f3ba9122SRob Herring {
637f3ba9122SRob Herring 	int err, irq;
638f3ba9122SRob Herring 
639f3ba9122SRob Herring 	irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "mmu");
640f3ba9122SRob Herring 	if (irq <= 0)
641f3ba9122SRob Herring 		return -ENODEV;
642f3ba9122SRob Herring 
64373896f60SEzequiel Garcia 	err = devm_request_threaded_irq(pfdev->dev, irq,
64473896f60SEzequiel Garcia 					panfrost_mmu_irq_handler,
645b31bdd13SRob Herring 					panfrost_mmu_irq_handler_thread,
64673896f60SEzequiel Garcia 					IRQF_SHARED, KBUILD_MODNAME "-mmu",
64773896f60SEzequiel Garcia 					pfdev);
648f3ba9122SRob Herring 
649f3ba9122SRob Herring 	if (err) {
650f3ba9122SRob Herring 		dev_err(pfdev->dev, "failed to request mmu irq");
651f3ba9122SRob Herring 		return err;
652f3ba9122SRob Herring 	}
653f3ba9122SRob Herring 
654f3ba9122SRob Herring 	return 0;
655f3ba9122SRob Herring }
656f3ba9122SRob Herring 
657f3ba9122SRob Herring void panfrost_mmu_fini(struct panfrost_device *pfdev)
658f3ba9122SRob Herring {
659f3ba9122SRob Herring 	mmu_write(pfdev, MMU_INT_MASK, 0);
660f3ba9122SRob Herring }
661