1f3ba9122SRob Herring // SPDX-License-Identifier: GPL-2.0
2f3ba9122SRob Herring /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
37fdc48ccSBoris Brezillon
47fdc48ccSBoris Brezillon #include <drm/panfrost_drm.h>
57fdc48ccSBoris Brezillon
67282f764SRob Herring #include <linux/atomic.h>
7f3ba9122SRob Herring #include <linux/bitfield.h>
8f3ba9122SRob Herring #include <linux/delay.h>
9187d2929SRob Herring #include <linux/dma-mapping.h>
10f3ba9122SRob Herring #include <linux/interrupt.h>
11f3ba9122SRob Herring #include <linux/io.h>
12f3ba9122SRob Herring #include <linux/iopoll.h>
13f3ba9122SRob Herring #include <linux/io-pgtable.h>
14f3ba9122SRob Herring #include <linux/iommu.h>
15f3ba9122SRob Herring #include <linux/platform_device.h>
16f3ba9122SRob Herring #include <linux/pm_runtime.h>
17187d2929SRob Herring #include <linux/shmem_fs.h>
18f3ba9122SRob Herring #include <linux/sizes.h>
19f3ba9122SRob Herring
20f3ba9122SRob Herring #include "panfrost_device.h"
21f3ba9122SRob Herring #include "panfrost_mmu.h"
22f3ba9122SRob Herring #include "panfrost_gem.h"
23f3ba9122SRob Herring #include "panfrost_features.h"
24f3ba9122SRob Herring #include "panfrost_regs.h"
25f3ba9122SRob Herring
26f3ba9122SRob Herring #define mmu_write(dev, reg, data) writel(data, dev->iomem + reg)
27f3ba9122SRob Herring #define mmu_read(dev, reg) readl(dev->iomem + reg)
28f3ba9122SRob Herring
wait_ready(struct panfrost_device * pfdev,u32 as_nr)29f3ba9122SRob Herring static int wait_ready(struct panfrost_device *pfdev, u32 as_nr)
30f3ba9122SRob Herring {
31f3ba9122SRob Herring int ret;
32f3ba9122SRob Herring u32 val;
33f3ba9122SRob Herring
34f3ba9122SRob Herring /* Wait for the MMU status to indicate there is no active command, in
35f3ba9122SRob Herring * case one is pending. */
36f3ba9122SRob Herring ret = readl_relaxed_poll_timeout_atomic(pfdev->iomem + AS_STATUS(as_nr),
370ec187f6SBoris Brezillon val, !(val & AS_STATUS_AS_ACTIVE), 10, 100000);
38f3ba9122SRob Herring
39f9ab9c66SBoris Brezillon if (ret) {
40f9ab9c66SBoris Brezillon /* The GPU hung, let's trigger a reset */
41f9ab9c66SBoris Brezillon panfrost_device_schedule_reset(pfdev);
42f3ba9122SRob Herring dev_err(pfdev->dev, "AS_ACTIVE bit stuck\n");
43f9ab9c66SBoris Brezillon }
44f3ba9122SRob Herring
45f3ba9122SRob Herring return ret;
46f3ba9122SRob Herring }
47f3ba9122SRob Herring
write_cmd(struct panfrost_device * pfdev,u32 as_nr,u32 cmd)48f3ba9122SRob Herring static int write_cmd(struct panfrost_device *pfdev, u32 as_nr, u32 cmd)
49f3ba9122SRob Herring {
50f3ba9122SRob Herring int status;
51f3ba9122SRob Herring
52f3ba9122SRob Herring /* write AS_COMMAND when MMU is ready to accept another command */
53f3ba9122SRob Herring status = wait_ready(pfdev, as_nr);
54f3ba9122SRob Herring if (!status)
55f3ba9122SRob Herring mmu_write(pfdev, AS_COMMAND(as_nr), cmd);
56f3ba9122SRob Herring
57f3ba9122SRob Herring return status;
58f3ba9122SRob Herring }
59f3ba9122SRob Herring
lock_region(struct panfrost_device * pfdev,u32 as_nr,u64 region_start,u64 size)60f3ba9122SRob Herring static void lock_region(struct panfrost_device *pfdev, u32 as_nr,
61a53f2c03SSteven Price u64 region_start, u64 size)
62f3ba9122SRob Herring {
63f3ba9122SRob Herring u8 region_width;
64a53f2c03SSteven Price u64 region;
65a53f2c03SSteven Price u64 region_end = region_start + size;
66b5fab345SAlyssa Rosenzweig
67a53f2c03SSteven Price if (!size)
68a53f2c03SSteven Price return;
69a53f2c03SSteven Price
70a53f2c03SSteven Price /*
71a53f2c03SSteven Price * The locked region is a naturally aligned power of 2 block encoded as
72a53f2c03SSteven Price * log2 minus(1).
73a53f2c03SSteven Price * Calculate the desired start/end and look for the highest bit which
74a53f2c03SSteven Price * differs. The smallest naturally aligned block must include this bit
75a53f2c03SSteven Price * change, the desired region starts with this bit (and subsequent bits)
76a53f2c03SSteven Price * zeroed and ends with the bit (and subsequent bits) set to one.
77f3ba9122SRob Herring */
78a53f2c03SSteven Price region_width = max(fls64(region_start ^ (region_end - 1)),
79a53f2c03SSteven Price const_ilog2(AS_LOCK_REGION_MIN_SIZE)) - 1;
80a53f2c03SSteven Price
81a53f2c03SSteven Price /*
82a53f2c03SSteven Price * Mask off the low bits of region_start (which would be ignored by
83a53f2c03SSteven Price * the hardware anyway)
84a53f2c03SSteven Price */
85a53f2c03SSteven Price region_start &= GENMASK_ULL(63, region_width);
86a53f2c03SSteven Price
87a53f2c03SSteven Price region = region_width | region_start;
88f3ba9122SRob Herring
89f3ba9122SRob Herring /* Lock the region that needs to be updated */
90e9ae220dSAlyssa Rosenzweig mmu_write(pfdev, AS_LOCKADDR_LO(as_nr), lower_32_bits(region));
91e9ae220dSAlyssa Rosenzweig mmu_write(pfdev, AS_LOCKADDR_HI(as_nr), upper_32_bits(region));
92f3ba9122SRob Herring write_cmd(pfdev, as_nr, AS_COMMAND_LOCK);
93f3ba9122SRob Herring }
94f3ba9122SRob Herring
95f3ba9122SRob Herring
mmu_hw_do_operation_locked(struct panfrost_device * pfdev,int as_nr,u64 iova,u64 size,u32 op)9686df65f3SRob Herring static int mmu_hw_do_operation_locked(struct panfrost_device *pfdev, int as_nr,
97a77b5882SAlyssa Rosenzweig u64 iova, u64 size, u32 op)
98f3ba9122SRob Herring {
9986df65f3SRob Herring if (as_nr < 0)
1007282f764SRob Herring return 0;
101f3ba9122SRob Herring
102f3ba9122SRob Herring if (op != AS_COMMAND_UNLOCK)
103f3ba9122SRob Herring lock_region(pfdev, as_nr, iova, size);
104f3ba9122SRob Herring
105f3ba9122SRob Herring /* Run the MMU operation */
106f3ba9122SRob Herring write_cmd(pfdev, as_nr, op);
107f3ba9122SRob Herring
108f3ba9122SRob Herring /* Wait for the flush to complete */
10986df65f3SRob Herring return wait_ready(pfdev, as_nr);
11086df65f3SRob Herring }
111f3ba9122SRob Herring
mmu_hw_do_operation(struct panfrost_device * pfdev,struct panfrost_mmu * mmu,u64 iova,u64 size,u32 op)11286df65f3SRob Herring static int mmu_hw_do_operation(struct panfrost_device *pfdev,
11386df65f3SRob Herring struct panfrost_mmu *mmu,
114a77b5882SAlyssa Rosenzweig u64 iova, u64 size, u32 op)
11586df65f3SRob Herring {
11686df65f3SRob Herring int ret;
117f3ba9122SRob Herring
11886df65f3SRob Herring spin_lock(&pfdev->as_lock);
11986df65f3SRob Herring ret = mmu_hw_do_operation_locked(pfdev, mmu->as, iova, size, op);
1207282f764SRob Herring spin_unlock(&pfdev->as_lock);
121f3ba9122SRob Herring return ret;
122f3ba9122SRob Herring }
123f3ba9122SRob Herring
panfrost_mmu_enable(struct panfrost_device * pfdev,struct panfrost_mmu * mmu)1247282f764SRob Herring static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
125f3ba9122SRob Herring {
1267282f764SRob Herring int as_nr = mmu->as;
1277282f764SRob Herring struct io_pgtable_cfg *cfg = &mmu->pgtbl_cfg;
128f3ba9122SRob Herring u64 transtab = cfg->arm_mali_lpae_cfg.transtab;
129f3ba9122SRob Herring u64 memattr = cfg->arm_mali_lpae_cfg.memattr;
130f3ba9122SRob Herring
131a77b5882SAlyssa Rosenzweig mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM);
132f3ba9122SRob Herring
133e9ae220dSAlyssa Rosenzweig mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), lower_32_bits(transtab));
134e9ae220dSAlyssa Rosenzweig mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), upper_32_bits(transtab));
135f3ba9122SRob Herring
136f3ba9122SRob Herring /* Need to revisit mem attrs.
137f3ba9122SRob Herring * NC is the default, Mali driver is inner WT.
138f3ba9122SRob Herring */
139e9ae220dSAlyssa Rosenzweig mmu_write(pfdev, AS_MEMATTR_LO(as_nr), lower_32_bits(memattr));
140e9ae220dSAlyssa Rosenzweig mmu_write(pfdev, AS_MEMATTR_HI(as_nr), upper_32_bits(memattr));
141f3ba9122SRob Herring
142f3ba9122SRob Herring write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
143f3ba9122SRob Herring }
144f3ba9122SRob Herring
panfrost_mmu_disable(struct panfrost_device * pfdev,u32 as_nr)14562f1089fSRob Herring static void panfrost_mmu_disable(struct panfrost_device *pfdev, u32 as_nr)
146f3ba9122SRob Herring {
147a77b5882SAlyssa Rosenzweig mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM);
14862f1089fSRob Herring
149f3ba9122SRob Herring mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), 0);
150f3ba9122SRob Herring mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), 0);
151f3ba9122SRob Herring
152f3ba9122SRob Herring mmu_write(pfdev, AS_MEMATTR_LO(as_nr), 0);
153f3ba9122SRob Herring mmu_write(pfdev, AS_MEMATTR_HI(as_nr), 0);
154f3ba9122SRob Herring
155f3ba9122SRob Herring write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
156f3ba9122SRob Herring }
157f3ba9122SRob Herring
panfrost_mmu_as_get(struct panfrost_device * pfdev,struct panfrost_mmu * mmu)1587282f764SRob Herring u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
1597282f764SRob Herring {
1607282f764SRob Herring int as;
1617282f764SRob Herring
1627282f764SRob Herring spin_lock(&pfdev->as_lock);
1637282f764SRob Herring
1647282f764SRob Herring as = mmu->as;
1657282f764SRob Herring if (as >= 0) {
1667282f764SRob Herring int en = atomic_inc_return(&mmu->as_count);
167ed7a34c5SBoris Brezillon u32 mask = BIT(as) | BIT(16 + as);
168dde2bb2dSBoris Brezillon
169dde2bb2dSBoris Brezillon /*
170dde2bb2dSBoris Brezillon * AS can be retained by active jobs or a perfcnt context,
171dde2bb2dSBoris Brezillon * hence the '+ 1' here.
172dde2bb2dSBoris Brezillon */
173dde2bb2dSBoris Brezillon WARN_ON(en >= (NUM_JOB_SLOTS + 1));
1747282f764SRob Herring
1757282f764SRob Herring list_move(&mmu->list, &pfdev->as_lru_list);
176ed7a34c5SBoris Brezillon
177ed7a34c5SBoris Brezillon if (pfdev->as_faulty_mask & mask) {
178ed7a34c5SBoris Brezillon /* Unhandled pagefault on this AS, the MMU was
179ed7a34c5SBoris Brezillon * disabled. We need to re-enable the MMU after
180ed7a34c5SBoris Brezillon * clearing+unmasking the AS interrupts.
181ed7a34c5SBoris Brezillon */
182ed7a34c5SBoris Brezillon mmu_write(pfdev, MMU_INT_CLEAR, mask);
183ed7a34c5SBoris Brezillon mmu_write(pfdev, MMU_INT_MASK, ~pfdev->as_faulty_mask);
184ed7a34c5SBoris Brezillon pfdev->as_faulty_mask &= ~mask;
185ed7a34c5SBoris Brezillon panfrost_mmu_enable(pfdev, mmu);
186ed7a34c5SBoris Brezillon }
187ed7a34c5SBoris Brezillon
1887282f764SRob Herring goto out;
1897282f764SRob Herring }
1907282f764SRob Herring
1917282f764SRob Herring /* Check for a free AS */
1927282f764SRob Herring as = ffz(pfdev->as_alloc_mask);
1937282f764SRob Herring if (!(BIT(as) & pfdev->features.as_present)) {
1947282f764SRob Herring struct panfrost_mmu *lru_mmu;
1957282f764SRob Herring
1967282f764SRob Herring list_for_each_entry_reverse(lru_mmu, &pfdev->as_lru_list, list) {
1977282f764SRob Herring if (!atomic_read(&lru_mmu->as_count))
1987282f764SRob Herring break;
1997282f764SRob Herring }
2007282f764SRob Herring WARN_ON(&lru_mmu->list == &pfdev->as_lru_list);
2017282f764SRob Herring
2027282f764SRob Herring list_del_init(&lru_mmu->list);
2037282f764SRob Herring as = lru_mmu->as;
2047282f764SRob Herring
2057282f764SRob Herring WARN_ON(as < 0);
2067282f764SRob Herring lru_mmu->as = -1;
2077282f764SRob Herring }
2087282f764SRob Herring
2097282f764SRob Herring /* Assign the free or reclaimed AS to the FD */
2107282f764SRob Herring mmu->as = as;
2117282f764SRob Herring set_bit(as, &pfdev->as_alloc_mask);
2127282f764SRob Herring atomic_set(&mmu->as_count, 1);
2137282f764SRob Herring list_add(&mmu->list, &pfdev->as_lru_list);
2147282f764SRob Herring
2157282f764SRob Herring dev_dbg(pfdev->dev, "Assigned AS%d to mmu %p, alloc_mask=%lx", as, mmu, pfdev->as_alloc_mask);
2167282f764SRob Herring
2177282f764SRob Herring panfrost_mmu_enable(pfdev, mmu);
2187282f764SRob Herring
2197282f764SRob Herring out:
2207282f764SRob Herring spin_unlock(&pfdev->as_lock);
2217282f764SRob Herring return as;
2227282f764SRob Herring }
2237282f764SRob Herring
panfrost_mmu_as_put(struct panfrost_device * pfdev,struct panfrost_mmu * mmu)2247282f764SRob Herring void panfrost_mmu_as_put(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
2257282f764SRob Herring {
2267282f764SRob Herring atomic_dec(&mmu->as_count);
2277282f764SRob Herring WARN_ON(atomic_read(&mmu->as_count) < 0);
2287282f764SRob Herring }
2297282f764SRob Herring
panfrost_mmu_reset(struct panfrost_device * pfdev)23073e467f6SRob Herring void panfrost_mmu_reset(struct panfrost_device *pfdev)
23173e467f6SRob Herring {
2327282f764SRob Herring struct panfrost_mmu *mmu, *mmu_tmp;
2337282f764SRob Herring
2347282f764SRob Herring spin_lock(&pfdev->as_lock);
2357282f764SRob Herring
2367282f764SRob Herring pfdev->as_alloc_mask = 0;
237ed7a34c5SBoris Brezillon pfdev->as_faulty_mask = 0;
2387282f764SRob Herring
2397282f764SRob Herring list_for_each_entry_safe(mmu, mmu_tmp, &pfdev->as_lru_list, list) {
2407282f764SRob Herring mmu->as = -1;
2417282f764SRob Herring atomic_set(&mmu->as_count, 0);
2427282f764SRob Herring list_del_init(&mmu->list);
2437282f764SRob Herring }
2447282f764SRob Herring
2457282f764SRob Herring spin_unlock(&pfdev->as_lock);
24673e467f6SRob Herring
24773e467f6SRob Herring mmu_write(pfdev, MMU_INT_CLEAR, ~0);
24873e467f6SRob Herring mmu_write(pfdev, MMU_INT_MASK, ~0);
24973e467f6SRob Herring }
25073e467f6SRob Herring
get_pgsize(u64 addr,size_t size,size_t * count)2513740b081SRobin Murphy static size_t get_pgsize(u64 addr, size_t size, size_t *count)
2524dff47c7SRob Herring {
253f352262fSRobin Murphy /*
254f352262fSRobin Murphy * io-pgtable only operates on multiple pages within a single table
255f352262fSRobin Murphy * entry, so we need to split at boundaries of the table size, i.e.
256f352262fSRobin Murphy * the next block size up. The distance from address A to the next
257f352262fSRobin Murphy * boundary of block size B is logically B - A % B, but in unsigned
258f352262fSRobin Murphy * two's complement where B is a power of two we get the equivalence
259f352262fSRobin Murphy * B - A % B == (B - A) % B == (n * B - A) % B, and choose n = 0 :)
260f352262fSRobin Murphy */
2613740b081SRobin Murphy size_t blk_offset = -addr % SZ_2M;
2624dff47c7SRob Herring
2633740b081SRobin Murphy if (blk_offset || size < SZ_2M) {
2643740b081SRobin Murphy *count = min_not_zero(blk_offset, size) / SZ_4K;
2653740b081SRobin Murphy return SZ_4K;
2663740b081SRobin Murphy }
267f352262fSRobin Murphy blk_offset = -addr % SZ_1G ?: SZ_1G;
268f352262fSRobin Murphy *count = min(blk_offset, size) / SZ_2M;
2694dff47c7SRob Herring return SZ_2M;
2704dff47c7SRob Herring }
2714dff47c7SRob Herring
panfrost_mmu_flush_range(struct panfrost_device * pfdev,struct panfrost_mmu * mmu,u64 iova,u64 size)2726f39188cSYi Wang static void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
273ec7eba47SRob Herring struct panfrost_mmu *mmu,
274a77b5882SAlyssa Rosenzweig u64 iova, u64 size)
275f3ba9122SRob Herring {
276ec7eba47SRob Herring if (mmu->as < 0)
277ec7eba47SRob Herring return;
278ec7eba47SRob Herring
279ec7eba47SRob Herring pm_runtime_get_noresume(pfdev->dev);
280ec7eba47SRob Herring
281ec7eba47SRob Herring /* Flush the PTs only if we're already awake */
282ec7eba47SRob Herring if (pm_runtime_active(pfdev->dev))
283ec7eba47SRob Herring mmu_hw_do_operation(pfdev, mmu, iova, size, AS_COMMAND_FLUSH_PT);
284ec7eba47SRob Herring
285ba3be66fSDmitry Osipenko pm_runtime_put_autosuspend(pfdev->dev);
286ec7eba47SRob Herring }
287ec7eba47SRob Herring
mmu_map_sg(struct panfrost_device * pfdev,struct panfrost_mmu * mmu,u64 iova,int prot,struct sg_table * sgt)2887282f764SRob Herring static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
2897282f764SRob Herring u64 iova, int prot, struct sg_table *sgt)
290f3ba9122SRob Herring {
291f3ba9122SRob Herring unsigned int count;
292f3ba9122SRob Herring struct scatterlist *sgl;
2937282f764SRob Herring struct io_pgtable_ops *ops = mmu->pgtbl_ops;
294eecbb3cdSRob Herring u64 start_iova = iova;
295f3ba9122SRob Herring
29634a4e66fSMarek Szyprowski for_each_sgtable_dma_sg(sgt, sgl, count) {
297f3ba9122SRob Herring unsigned long paddr = sg_dma_address(sgl);
298f3ba9122SRob Herring size_t len = sg_dma_len(sgl);
299f3ba9122SRob Herring
3007282f764SRob Herring dev_dbg(pfdev->dev, "map: as=%d, iova=%llx, paddr=%lx, len=%zx", mmu->as, iova, paddr, len);
301f3ba9122SRob Herring
302f3ba9122SRob Herring while (len) {
3033740b081SRobin Murphy size_t pgcount, mapped = 0;
3043740b081SRobin Murphy size_t pgsize = get_pgsize(iova | paddr, len, &pgcount);
3054dff47c7SRob Herring
3063740b081SRobin Murphy ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot,
3073740b081SRobin Murphy GFP_KERNEL, &mapped);
3083740b081SRobin Murphy /* Don't get stuck if things have gone wrong */
3093740b081SRobin Murphy mapped = max(mapped, pgsize);
3103740b081SRobin Murphy iova += mapped;
3113740b081SRobin Murphy paddr += mapped;
3123740b081SRobin Murphy len -= mapped;
313f3ba9122SRob Herring }
314f3ba9122SRob Herring }
315f3ba9122SRob Herring
316ec7eba47SRob Herring panfrost_mmu_flush_range(pfdev, mmu, start_iova, iova - start_iova);
317f3ba9122SRob Herring
318eecbb3cdSRob Herring return 0;
319eecbb3cdSRob Herring }
320f3ba9122SRob Herring
panfrost_mmu_map(struct panfrost_gem_mapping * mapping)321bdefca2dSBoris Brezillon int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
322eecbb3cdSRob Herring {
323bdefca2dSBoris Brezillon struct panfrost_gem_object *bo = mapping->obj;
324a193f3b4SThomas Zimmermann struct drm_gem_shmem_object *shmem = &bo->base;
325a193f3b4SThomas Zimmermann struct drm_gem_object *obj = &shmem->base;
326eecbb3cdSRob Herring struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
327eecbb3cdSRob Herring struct sg_table *sgt;
328eecbb3cdSRob Herring int prot = IOMMU_READ | IOMMU_WRITE;
329eecbb3cdSRob Herring
330bdefca2dSBoris Brezillon if (WARN_ON(mapping->active))
331eecbb3cdSRob Herring return 0;
332eecbb3cdSRob Herring
333203270c0SRob Herring if (bo->noexec)
334203270c0SRob Herring prot |= IOMMU_NOEXEC;
335203270c0SRob Herring
336a193f3b4SThomas Zimmermann sgt = drm_gem_shmem_get_pages_sgt(shmem);
337eecbb3cdSRob Herring if (WARN_ON(IS_ERR(sgt)))
338eecbb3cdSRob Herring return PTR_ERR(sgt);
339eecbb3cdSRob Herring
340bdefca2dSBoris Brezillon mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT,
341bdefca2dSBoris Brezillon prot, sgt);
342bdefca2dSBoris Brezillon mapping->active = true;
343f3ba9122SRob Herring
344f3ba9122SRob Herring return 0;
345f3ba9122SRob Herring }
346f3ba9122SRob Herring
panfrost_mmu_unmap(struct panfrost_gem_mapping * mapping)347bdefca2dSBoris Brezillon void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping)
348f3ba9122SRob Herring {
349bdefca2dSBoris Brezillon struct panfrost_gem_object *bo = mapping->obj;
350f3ba9122SRob Herring struct drm_gem_object *obj = &bo->base.base;
351f3ba9122SRob Herring struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
352bdefca2dSBoris Brezillon struct io_pgtable_ops *ops = mapping->mmu->pgtbl_ops;
353bdefca2dSBoris Brezillon u64 iova = mapping->mmnode.start << PAGE_SHIFT;
354bdefca2dSBoris Brezillon size_t len = mapping->mmnode.size << PAGE_SHIFT;
355f3ba9122SRob Herring size_t unmapped_len = 0;
356f3ba9122SRob Herring
357bdefca2dSBoris Brezillon if (WARN_ON(!mapping->active))
3589870dc39SBoris Brezillon return;
3599870dc39SBoris Brezillon
360bdefca2dSBoris Brezillon dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx",
361bdefca2dSBoris Brezillon mapping->mmu->as, iova, len);
362f3ba9122SRob Herring
363f3ba9122SRob Herring while (unmapped_len < len) {
3643740b081SRobin Murphy size_t unmapped_page, pgcount;
3653740b081SRobin Murphy size_t pgsize = get_pgsize(iova, len - unmapped_len, &pgcount);
3664dff47c7SRob Herring
3673740b081SRobin Murphy if (bo->is_heap)
3683740b081SRobin Murphy pgcount = 1;
3693740b081SRobin Murphy if (!bo->is_heap || ops->iova_to_phys(ops, iova)) {
3703740b081SRobin Murphy unmapped_page = ops->unmap_pages(ops, iova, pgsize, pgcount, NULL);
3713740b081SRobin Murphy WARN_ON(unmapped_page != pgsize * pgcount);
372187d2929SRob Herring }
3733740b081SRobin Murphy iova += pgsize * pgcount;
3743740b081SRobin Murphy unmapped_len += pgsize * pgcount;
375f3ba9122SRob Herring }
376f3ba9122SRob Herring
377bdefca2dSBoris Brezillon panfrost_mmu_flush_range(pfdev, mapping->mmu,
378bdefca2dSBoris Brezillon mapping->mmnode.start << PAGE_SHIFT, len);
379bdefca2dSBoris Brezillon mapping->active = false;
380f3ba9122SRob Herring }
381f3ba9122SRob Herring
mmu_tlb_inv_context_s1(void * cookie)382f3ba9122SRob Herring static void mmu_tlb_inv_context_s1(void *cookie)
38362f1089fSRob Herring {}
384f3ba9122SRob Herring
mmu_tlb_sync_context(void * cookie)385f3ba9122SRob Herring static void mmu_tlb_sync_context(void *cookie)
386f3ba9122SRob Herring {
3877fdc48ccSBoris Brezillon //struct panfrost_mmu *mmu = cookie;
388f3ba9122SRob Herring // TODO: Wait 1000 GPU cycles for HW_ISSUE_6367/T60X
389f3ba9122SRob Herring }
390f3ba9122SRob Herring
mmu_tlb_flush_walk(unsigned long iova,size_t size,size_t granule,void * cookie)39105aed941SWill Deacon static void mmu_tlb_flush_walk(unsigned long iova, size_t size, size_t granule,
39205aed941SWill Deacon void *cookie)
39305aed941SWill Deacon {
39405aed941SWill Deacon mmu_tlb_sync_context(cookie);
39505aed941SWill Deacon }
39605aed941SWill Deacon
397298f7889SWill Deacon static const struct iommu_flush_ops mmu_tlb_ops = {
398f3ba9122SRob Herring .tlb_flush_all = mmu_tlb_inv_context_s1,
39905aed941SWill Deacon .tlb_flush_walk = mmu_tlb_flush_walk,
400f3ba9122SRob Herring };
401f3ba9122SRob Herring
402bdefca2dSBoris Brezillon static struct panfrost_gem_mapping *
addr_to_mapping(struct panfrost_device * pfdev,int as,u64 addr)403bdefca2dSBoris Brezillon addr_to_mapping(struct panfrost_device *pfdev, int as, u64 addr)
404187d2929SRob Herring {
405bdefca2dSBoris Brezillon struct panfrost_gem_mapping *mapping = NULL;
40665e51e30SSteven Price struct drm_mm_node *node;
407187d2929SRob Herring u64 offset = addr >> PAGE_SHIFT;
4087282f764SRob Herring struct panfrost_mmu *mmu;
409187d2929SRob Herring
4107282f764SRob Herring spin_lock(&pfdev->as_lock);
4117282f764SRob Herring list_for_each_entry(mmu, &pfdev->as_lru_list, list) {
41265e51e30SSteven Price if (as == mmu->as)
413f70744c6SRobin Murphy goto found_mmu;
41465e51e30SSteven Price }
41565e51e30SSteven Price goto out;
4167282f764SRob Herring
417f70744c6SRobin Murphy found_mmu:
41865e51e30SSteven Price
4197fdc48ccSBoris Brezillon spin_lock(&mmu->mm_lock);
42065e51e30SSteven Price
4217fdc48ccSBoris Brezillon drm_mm_for_each_node(node, &mmu->mm) {
42265e51e30SSteven Price if (offset >= node->start &&
42365e51e30SSteven Price offset < (node->start + node->size)) {
424bdefca2dSBoris Brezillon mapping = drm_mm_node_to_panfrost_mapping(node);
425bdefca2dSBoris Brezillon
426bdefca2dSBoris Brezillon kref_get(&mapping->refcount);
42765e51e30SSteven Price break;
428187d2929SRob Herring }
4297282f764SRob Herring }
4307282f764SRob Herring
4317fdc48ccSBoris Brezillon spin_unlock(&mmu->mm_lock);
4327282f764SRob Herring out:
4337282f764SRob Herring spin_unlock(&pfdev->as_lock);
434bdefca2dSBoris Brezillon return mapping;
435187d2929SRob Herring }
436187d2929SRob Herring
437187d2929SRob Herring #define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE)
438187d2929SRob Herring
panfrost_mmu_map_fault_addr(struct panfrost_device * pfdev,int as,u64 addr)4396f39188cSYi Wang static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
4406f39188cSYi Wang u64 addr)
441187d2929SRob Herring {
442187d2929SRob Herring int ret, i;
443bdefca2dSBoris Brezillon struct panfrost_gem_mapping *bomapping;
444187d2929SRob Herring struct panfrost_gem_object *bo;
445187d2929SRob Herring struct address_space *mapping;
44621aa27ddSDmitry Osipenko struct drm_gem_object *obj;
447187d2929SRob Herring pgoff_t page_offset;
448187d2929SRob Herring struct sg_table *sgt;
449187d2929SRob Herring struct page **pages;
450187d2929SRob Herring
451bdefca2dSBoris Brezillon bomapping = addr_to_mapping(pfdev, as, addr);
452bdefca2dSBoris Brezillon if (!bomapping)
453187d2929SRob Herring return -ENOENT;
454187d2929SRob Herring
455bdefca2dSBoris Brezillon bo = bomapping->obj;
456187d2929SRob Herring if (!bo->is_heap) {
457187d2929SRob Herring dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)",
458bdefca2dSBoris Brezillon bomapping->mmnode.start << PAGE_SHIFT);
45965e51e30SSteven Price ret = -EINVAL;
46065e51e30SSteven Price goto err_bo;
461187d2929SRob Herring }
462bdefca2dSBoris Brezillon WARN_ON(bomapping->mmu->as != as);
4637282f764SRob Herring
464187d2929SRob Herring /* Assume 2MB alignment and size multiple */
465187d2929SRob Herring addr &= ~((u64)SZ_2M - 1);
466187d2929SRob Herring page_offset = addr >> PAGE_SHIFT;
467bdefca2dSBoris Brezillon page_offset -= bomapping->mmnode.start;
468187d2929SRob Herring
46921aa27ddSDmitry Osipenko obj = &bo->base.base;
47021aa27ddSDmitry Osipenko
47121aa27ddSDmitry Osipenko dma_resv_lock(obj->resv, NULL);
472187d2929SRob Herring
473187d2929SRob Herring if (!bo->base.pages) {
474187d2929SRob Herring bo->sgts = kvmalloc_array(bo->base.base.size / SZ_2M,
475187d2929SRob Herring sizeof(struct sg_table), GFP_KERNEL | __GFP_ZERO);
4763efdf83cSWei Yongjun if (!bo->sgts) {
47765e51e30SSteven Price ret = -ENOMEM;
47821aa27ddSDmitry Osipenko goto err_unlock;
4793efdf83cSWei Yongjun }
480187d2929SRob Herring
481187d2929SRob Herring pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT,
482187d2929SRob Herring sizeof(struct page *), GFP_KERNEL | __GFP_ZERO);
483187d2929SRob Herring if (!pages) {
484114427b8SDenis Efremov kvfree(bo->sgts);
485187d2929SRob Herring bo->sgts = NULL;
48665e51e30SSteven Price ret = -ENOMEM;
48721aa27ddSDmitry Osipenko goto err_unlock;
488187d2929SRob Herring }
489187d2929SRob Herring bo->base.pages = pages;
490187d2929SRob Herring bo->base.pages_use_count = 1;
491f45da820SBoris Brezillon } else {
492187d2929SRob Herring pages = bo->base.pages;
493f45da820SBoris Brezillon if (pages[page_offset]) {
494f45da820SBoris Brezillon /* Pages are already mapped, bail out. */
495f45da820SBoris Brezillon goto out;
496f45da820SBoris Brezillon }
497f45da820SBoris Brezillon }
498187d2929SRob Herring
499187d2929SRob Herring mapping = bo->base.base.filp->f_mapping;
500187d2929SRob Herring mapping_set_unevictable(mapping);
501187d2929SRob Herring
502187d2929SRob Herring for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) {
50331806711SBoris Brezillon /* Can happen if the last fault only partially filled this
50431806711SBoris Brezillon * section of the pages array before failing. In that case
50531806711SBoris Brezillon * we skip already filled pages.
50631806711SBoris Brezillon */
50731806711SBoris Brezillon if (pages[i])
50831806711SBoris Brezillon continue;
50931806711SBoris Brezillon
510187d2929SRob Herring pages[i] = shmem_read_mapping_page(mapping, i);
511187d2929SRob Herring if (IS_ERR(pages[i])) {
512187d2929SRob Herring ret = PTR_ERR(pages[i]);
513764a2ab9SBoris Brezillon pages[i] = NULL;
51431806711SBoris Brezillon goto err_unlock;
515187d2929SRob Herring }
516187d2929SRob Herring }
517187d2929SRob Herring
518187d2929SRob Herring sgt = &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)];
519187d2929SRob Herring ret = sg_alloc_table_from_pages(sgt, pages + page_offset,
520187d2929SRob Herring NUM_FAULT_PAGES, 0, SZ_2M, GFP_KERNEL);
521187d2929SRob Herring if (ret)
52231806711SBoris Brezillon goto err_unlock;
523187d2929SRob Herring
52434a4e66fSMarek Szyprowski ret = dma_map_sgtable(pfdev->dev, sgt, DMA_BIDIRECTIONAL, 0);
52534a4e66fSMarek Szyprowski if (ret)
526187d2929SRob Herring goto err_map;
527187d2929SRob Herring
528bdefca2dSBoris Brezillon mmu_map_sg(pfdev, bomapping->mmu, addr,
529bdefca2dSBoris Brezillon IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
530187d2929SRob Herring
531bdefca2dSBoris Brezillon bomapping->active = true;
532187d2929SRob Herring
5337282f764SRob Herring dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
534187d2929SRob Herring
535f45da820SBoris Brezillon out:
53621aa27ddSDmitry Osipenko dma_resv_unlock(obj->resv);
53721aa27ddSDmitry Osipenko
538bdefca2dSBoris Brezillon panfrost_gem_mapping_put(bomapping);
53965e51e30SSteven Price
540187d2929SRob Herring return 0;
541187d2929SRob Herring
542187d2929SRob Herring err_map:
543187d2929SRob Herring sg_free_table(sgt);
54421aa27ddSDmitry Osipenko err_unlock:
54521aa27ddSDmitry Osipenko dma_resv_unlock(obj->resv);
54665e51e30SSteven Price err_bo:
547fb6e0637SDmitry Osipenko panfrost_gem_mapping_put(bomapping);
548187d2929SRob Herring return ret;
549187d2929SRob Herring }
550187d2929SRob Herring
panfrost_mmu_release_ctx(struct kref * kref)5517fdc48ccSBoris Brezillon static void panfrost_mmu_release_ctx(struct kref *kref)
5527fdc48ccSBoris Brezillon {
5537fdc48ccSBoris Brezillon struct panfrost_mmu *mmu = container_of(kref, struct panfrost_mmu,
5547fdc48ccSBoris Brezillon refcount);
5557fdc48ccSBoris Brezillon struct panfrost_device *pfdev = mmu->pfdev;
5567fdc48ccSBoris Brezillon
5577fdc48ccSBoris Brezillon spin_lock(&pfdev->as_lock);
5587fdc48ccSBoris Brezillon if (mmu->as >= 0) {
5597fdc48ccSBoris Brezillon pm_runtime_get_noresume(pfdev->dev);
5607fdc48ccSBoris Brezillon if (pm_runtime_active(pfdev->dev))
5617fdc48ccSBoris Brezillon panfrost_mmu_disable(pfdev, mmu->as);
5627fdc48ccSBoris Brezillon pm_runtime_put_autosuspend(pfdev->dev);
5637fdc48ccSBoris Brezillon
5647fdc48ccSBoris Brezillon clear_bit(mmu->as, &pfdev->as_alloc_mask);
5657fdc48ccSBoris Brezillon clear_bit(mmu->as, &pfdev->as_in_use_mask);
5667fdc48ccSBoris Brezillon list_del(&mmu->list);
5677fdc48ccSBoris Brezillon }
5687fdc48ccSBoris Brezillon spin_unlock(&pfdev->as_lock);
5697fdc48ccSBoris Brezillon
5707fdc48ccSBoris Brezillon free_io_pgtable_ops(mmu->pgtbl_ops);
5717fdc48ccSBoris Brezillon drm_mm_takedown(&mmu->mm);
5727fdc48ccSBoris Brezillon kfree(mmu);
5737fdc48ccSBoris Brezillon }
5747fdc48ccSBoris Brezillon
panfrost_mmu_ctx_put(struct panfrost_mmu * mmu)5757fdc48ccSBoris Brezillon void panfrost_mmu_ctx_put(struct panfrost_mmu *mmu)
5767fdc48ccSBoris Brezillon {
5777fdc48ccSBoris Brezillon kref_put(&mmu->refcount, panfrost_mmu_release_ctx);
5787fdc48ccSBoris Brezillon }
5797fdc48ccSBoris Brezillon
panfrost_mmu_ctx_get(struct panfrost_mmu * mmu)5807fdc48ccSBoris Brezillon struct panfrost_mmu *panfrost_mmu_ctx_get(struct panfrost_mmu *mmu)
5817fdc48ccSBoris Brezillon {
5827fdc48ccSBoris Brezillon kref_get(&mmu->refcount);
5837fdc48ccSBoris Brezillon
5847fdc48ccSBoris Brezillon return mmu;
5857fdc48ccSBoris Brezillon }
5867fdc48ccSBoris Brezillon
5877fdc48ccSBoris Brezillon #define PFN_4G (SZ_4G >> PAGE_SHIFT)
5887fdc48ccSBoris Brezillon #define PFN_4G_MASK (PFN_4G - 1)
5897fdc48ccSBoris Brezillon #define PFN_16M (SZ_16M >> PAGE_SHIFT)
5907fdc48ccSBoris Brezillon
panfrost_drm_mm_color_adjust(const struct drm_mm_node * node,unsigned long color,u64 * start,u64 * end)5917fdc48ccSBoris Brezillon static void panfrost_drm_mm_color_adjust(const struct drm_mm_node *node,
5927fdc48ccSBoris Brezillon unsigned long color,
5937fdc48ccSBoris Brezillon u64 *start, u64 *end)
5947fdc48ccSBoris Brezillon {
5957fdc48ccSBoris Brezillon /* Executable buffers can't start or end on a 4GB boundary */
5967fdc48ccSBoris Brezillon if (!(color & PANFROST_BO_NOEXEC)) {
5977fdc48ccSBoris Brezillon u64 next_seg;
5987fdc48ccSBoris Brezillon
5997fdc48ccSBoris Brezillon if ((*start & PFN_4G_MASK) == 0)
6007fdc48ccSBoris Brezillon (*start)++;
6017fdc48ccSBoris Brezillon
6027fdc48ccSBoris Brezillon if ((*end & PFN_4G_MASK) == 0)
6037fdc48ccSBoris Brezillon (*end)--;
6047fdc48ccSBoris Brezillon
6057fdc48ccSBoris Brezillon next_seg = ALIGN(*start, PFN_4G);
6067fdc48ccSBoris Brezillon if (next_seg - *start <= PFN_16M)
6077fdc48ccSBoris Brezillon *start = next_seg + 1;
6087fdc48ccSBoris Brezillon
6097fdc48ccSBoris Brezillon *end = min(*end, ALIGN(*start, PFN_4G) - 1);
6107fdc48ccSBoris Brezillon }
6117fdc48ccSBoris Brezillon }
6127fdc48ccSBoris Brezillon
panfrost_mmu_ctx_create(struct panfrost_device * pfdev)6137fdc48ccSBoris Brezillon struct panfrost_mmu *panfrost_mmu_ctx_create(struct panfrost_device *pfdev)
6147fdc48ccSBoris Brezillon {
6157fdc48ccSBoris Brezillon struct panfrost_mmu *mmu;
6167fdc48ccSBoris Brezillon
6177fdc48ccSBoris Brezillon mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
6187fdc48ccSBoris Brezillon if (!mmu)
6197fdc48ccSBoris Brezillon return ERR_PTR(-ENOMEM);
6207fdc48ccSBoris Brezillon
6217fdc48ccSBoris Brezillon mmu->pfdev = pfdev;
6227fdc48ccSBoris Brezillon spin_lock_init(&mmu->mm_lock);
6237fdc48ccSBoris Brezillon
6247fdc48ccSBoris Brezillon /* 4G enough for now. can be 48-bit */
6257fdc48ccSBoris Brezillon drm_mm_init(&mmu->mm, SZ_32M >> PAGE_SHIFT, (SZ_4G - SZ_32M) >> PAGE_SHIFT);
6267fdc48ccSBoris Brezillon mmu->mm.color_adjust = panfrost_drm_mm_color_adjust;
6277fdc48ccSBoris Brezillon
6287fdc48ccSBoris Brezillon INIT_LIST_HEAD(&mmu->list);
6297fdc48ccSBoris Brezillon mmu->as = -1;
6307fdc48ccSBoris Brezillon
6317fdc48ccSBoris Brezillon mmu->pgtbl_cfg = (struct io_pgtable_cfg) {
6327fdc48ccSBoris Brezillon .pgsize_bitmap = SZ_4K | SZ_2M,
6337fdc48ccSBoris Brezillon .ias = FIELD_GET(0xff, pfdev->features.mmu_features),
6347fdc48ccSBoris Brezillon .oas = FIELD_GET(0xff00, pfdev->features.mmu_features),
6357fdc48ccSBoris Brezillon .coherent_walk = pfdev->coherent,
6367fdc48ccSBoris Brezillon .tlb = &mmu_tlb_ops,
6377fdc48ccSBoris Brezillon .iommu_dev = pfdev->dev,
6387fdc48ccSBoris Brezillon };
6397fdc48ccSBoris Brezillon
6407fdc48ccSBoris Brezillon mmu->pgtbl_ops = alloc_io_pgtable_ops(ARM_MALI_LPAE, &mmu->pgtbl_cfg,
6417fdc48ccSBoris Brezillon mmu);
6427fdc48ccSBoris Brezillon if (!mmu->pgtbl_ops) {
6437fdc48ccSBoris Brezillon kfree(mmu);
6447fdc48ccSBoris Brezillon return ERR_PTR(-EINVAL);
6457fdc48ccSBoris Brezillon }
6467fdc48ccSBoris Brezillon
6477fdc48ccSBoris Brezillon kref_init(&mmu->refcount);
6487fdc48ccSBoris Brezillon
6497fdc48ccSBoris Brezillon return mmu;
6507fdc48ccSBoris Brezillon }
6517fdc48ccSBoris Brezillon
access_type_name(struct panfrost_device * pfdev,u32 fault_status)652f3ba9122SRob Herring static const char *access_type_name(struct panfrost_device *pfdev,
653f3ba9122SRob Herring u32 fault_status)
654f3ba9122SRob Herring {
655f3ba9122SRob Herring switch (fault_status & AS_FAULTSTATUS_ACCESS_TYPE_MASK) {
656f3ba9122SRob Herring case AS_FAULTSTATUS_ACCESS_TYPE_ATOMIC:
657f3ba9122SRob Herring if (panfrost_has_hw_feature(pfdev, HW_FEATURE_AARCH64_MMU))
658f3ba9122SRob Herring return "ATOMIC";
659f3ba9122SRob Herring else
660f3ba9122SRob Herring return "UNKNOWN";
661f3ba9122SRob Herring case AS_FAULTSTATUS_ACCESS_TYPE_READ:
662f3ba9122SRob Herring return "READ";
663f3ba9122SRob Herring case AS_FAULTSTATUS_ACCESS_TYPE_WRITE:
664f3ba9122SRob Herring return "WRITE";
665f3ba9122SRob Herring case AS_FAULTSTATUS_ACCESS_TYPE_EX:
666f3ba9122SRob Herring return "EXECUTE";
667f3ba9122SRob Herring default:
668f3ba9122SRob Herring WARN_ON(1);
669f3ba9122SRob Herring return NULL;
670f3ba9122SRob Herring }
671f3ba9122SRob Herring }
672f3ba9122SRob Herring
panfrost_mmu_irq_handler(int irq,void * data)673f3ba9122SRob Herring static irqreturn_t panfrost_mmu_irq_handler(int irq, void *data)
674f3ba9122SRob Herring {
675f3ba9122SRob Herring struct panfrost_device *pfdev = data;
676f3ba9122SRob Herring
677b31bdd13SRob Herring if (!mmu_read(pfdev, MMU_INT_STAT))
678f3ba9122SRob Herring return IRQ_NONE;
679f3ba9122SRob Herring
680b31bdd13SRob Herring mmu_write(pfdev, MMU_INT_MASK, 0);
681b31bdd13SRob Herring return IRQ_WAKE_THREAD;
682b31bdd13SRob Herring }
683b31bdd13SRob Herring
panfrost_mmu_irq_handler_thread(int irq,void * data)684b31bdd13SRob Herring static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data)
685b31bdd13SRob Herring {
686b31bdd13SRob Herring struct panfrost_device *pfdev = data;
687b31bdd13SRob Herring u32 status = mmu_read(pfdev, MMU_INT_RAWSTAT);
6880eae01beSBoris Brezillon int ret;
689f3ba9122SRob Herring
6900eae01beSBoris Brezillon while (status) {
6910eae01beSBoris Brezillon u32 as = ffs(status | (status >> 16)) - 1;
6920eae01beSBoris Brezillon u32 mask = BIT(as) | BIT(as + 16);
693f3ba9122SRob Herring u64 addr;
694f3ba9122SRob Herring u32 fault_status;
695f3ba9122SRob Herring u32 exception_type;
696f3ba9122SRob Herring u32 access_type;
697f3ba9122SRob Herring u32 source_id;
698f3ba9122SRob Herring
6990eae01beSBoris Brezillon fault_status = mmu_read(pfdev, AS_FAULTSTATUS(as));
7000eae01beSBoris Brezillon addr = mmu_read(pfdev, AS_FAULTADDRESS_LO(as));
7010eae01beSBoris Brezillon addr |= (u64)mmu_read(pfdev, AS_FAULTADDRESS_HI(as)) << 32;
702f3ba9122SRob Herring
703f3ba9122SRob Herring /* decode the fault status */
704f3ba9122SRob Herring exception_type = fault_status & 0xFF;
705f3ba9122SRob Herring access_type = (fault_status >> 8) & 0x3;
706f3ba9122SRob Herring source_id = (fault_status >> 16);
707f3ba9122SRob Herring
7083aa0a80fSBoris Brezillon mmu_write(pfdev, MMU_INT_CLEAR, mask);
7093aa0a80fSBoris Brezillon
710187d2929SRob Herring /* Page fault only */
711eb9d8ddbSTomeu Vizoso ret = -1;
7120eae01beSBoris Brezillon if ((status & mask) == BIT(as) && (exception_type & 0xF8) == 0xC0)
7130eae01beSBoris Brezillon ret = panfrost_mmu_map_fault_addr(pfdev, as, addr);
714187d2929SRob Herring
715ed7a34c5SBoris Brezillon if (ret) {
716f3ba9122SRob Herring /* terminal fault, print info about the fault */
717f3ba9122SRob Herring dev_err(pfdev->dev,
718f3ba9122SRob Herring "Unhandled Page fault in AS%d at VA 0x%016llX\n"
719f3ba9122SRob Herring "Reason: %s\n"
720f3ba9122SRob Herring "raw fault status: 0x%X\n"
721f3ba9122SRob Herring "decoded fault status: %s\n"
722f3ba9122SRob Herring "exception type 0x%X: %s\n"
723f3ba9122SRob Herring "access type 0x%X: %s\n"
724f3ba9122SRob Herring "source id 0x%X\n",
7250eae01beSBoris Brezillon as, addr,
726f3ba9122SRob Herring "TODO",
727f3ba9122SRob Herring fault_status,
728f3ba9122SRob Herring (fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
7296ef2f37fSBoris Brezillon exception_type, panfrost_exception_name(exception_type),
730f3ba9122SRob Herring access_type, access_type_name(pfdev, fault_status),
731f3ba9122SRob Herring source_id);
732f3ba9122SRob Herring
733ed7a34c5SBoris Brezillon spin_lock(&pfdev->as_lock);
734ed7a34c5SBoris Brezillon /* Ignore MMU interrupts on this AS until it's been
735ed7a34c5SBoris Brezillon * re-enabled.
736ed7a34c5SBoris Brezillon */
737ed7a34c5SBoris Brezillon pfdev->as_faulty_mask |= mask;
738ed7a34c5SBoris Brezillon
739ed7a34c5SBoris Brezillon /* Disable the MMU to kill jobs on this AS. */
740ed7a34c5SBoris Brezillon panfrost_mmu_disable(pfdev, as);
741ed7a34c5SBoris Brezillon spin_unlock(&pfdev->as_lock);
742ed7a34c5SBoris Brezillon }
743ed7a34c5SBoris Brezillon
744f3ba9122SRob Herring status &= ~mask;
7450eae01beSBoris Brezillon
7460eae01beSBoris Brezillon /* If we received new MMU interrupts, process them before returning. */
7470eae01beSBoris Brezillon if (!status)
748ed7a34c5SBoris Brezillon status = mmu_read(pfdev, MMU_INT_RAWSTAT) & ~pfdev->as_faulty_mask;
749f3ba9122SRob Herring }
750f3ba9122SRob Herring
751ed7a34c5SBoris Brezillon spin_lock(&pfdev->as_lock);
752ed7a34c5SBoris Brezillon mmu_write(pfdev, MMU_INT_MASK, ~pfdev->as_faulty_mask);
753ed7a34c5SBoris Brezillon spin_unlock(&pfdev->as_lock);
754ed7a34c5SBoris Brezillon
755f3ba9122SRob Herring return IRQ_HANDLED;
756f3ba9122SRob Herring };
757f3ba9122SRob Herring
panfrost_mmu_init(struct panfrost_device * pfdev)758f3ba9122SRob Herring int panfrost_mmu_init(struct panfrost_device *pfdev)
759f3ba9122SRob Herring {
760f3ba9122SRob Herring int err, irq;
761f3ba9122SRob Herring
762f3ba9122SRob Herring irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "mmu");
763f3ba9122SRob Herring if (irq <= 0)
764f3ba9122SRob Herring return -ENODEV;
765f3ba9122SRob Herring
76673896f60SEzequiel Garcia err = devm_request_threaded_irq(pfdev->dev, irq,
76773896f60SEzequiel Garcia panfrost_mmu_irq_handler,
768b31bdd13SRob Herring panfrost_mmu_irq_handler_thread,
76973896f60SEzequiel Garcia IRQF_SHARED, KBUILD_MODNAME "-mmu",
77073896f60SEzequiel Garcia pfdev);
771f3ba9122SRob Herring
772f3ba9122SRob Herring if (err) {
773f3ba9122SRob Herring dev_err(pfdev->dev, "failed to request mmu irq");
774f3ba9122SRob Herring return err;
775f3ba9122SRob Herring }
776f3ba9122SRob Herring
777f3ba9122SRob Herring return 0;
778f3ba9122SRob Herring }
779f3ba9122SRob Herring
panfrost_mmu_fini(struct panfrost_device * pfdev)780f3ba9122SRob Herring void panfrost_mmu_fini(struct panfrost_device *pfdev)
781f3ba9122SRob Herring {
782f3ba9122SRob Herring mmu_write(pfdev, MMU_INT_MASK, 0);
783f3ba9122SRob Herring }
784