1 // SPDX-License-Identifier:	GPL-2.0
2 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
3 #include <linux/bitfield.h>
4 #include <linux/delay.h>
5 #include <linux/interrupt.h>
6 #include <linux/io.h>
7 #include <linux/iopoll.h>
8 #include <linux/io-pgtable.h>
9 #include <linux/iommu.h>
10 #include <linux/platform_device.h>
11 #include <linux/pm_runtime.h>
12 #include <linux/sizes.h>
13 
14 #include "panfrost_device.h"
15 #include "panfrost_mmu.h"
16 #include "panfrost_gem.h"
17 #include "panfrost_features.h"
18 #include "panfrost_regs.h"
19 
20 #define mmu_write(dev, reg, data) writel(data, dev->iomem + reg)
21 #define mmu_read(dev, reg) readl(dev->iomem + reg)
22 
23 struct panfrost_mmu {
24 	struct io_pgtable_cfg pgtbl_cfg;
25 	struct io_pgtable_ops *pgtbl_ops;
26 	struct mutex lock;
27 };
28 
29 static int wait_ready(struct panfrost_device *pfdev, u32 as_nr)
30 {
31 	int ret;
32 	u32 val;
33 
34 	/* Wait for the MMU status to indicate there is no active command, in
35 	 * case one is pending. */
36 	ret = readl_relaxed_poll_timeout_atomic(pfdev->iomem + AS_STATUS(as_nr),
37 		val, !(val & AS_STATUS_AS_ACTIVE), 10, 1000);
38 
39 	if (ret)
40 		dev_err(pfdev->dev, "AS_ACTIVE bit stuck\n");
41 
42 	return ret;
43 }
44 
45 static int write_cmd(struct panfrost_device *pfdev, u32 as_nr, u32 cmd)
46 {
47 	int status;
48 
49 	/* write AS_COMMAND when MMU is ready to accept another command */
50 	status = wait_ready(pfdev, as_nr);
51 	if (!status)
52 		mmu_write(pfdev, AS_COMMAND(as_nr), cmd);
53 
54 	return status;
55 }
56 
57 static void lock_region(struct panfrost_device *pfdev, u32 as_nr,
58 			u64 iova, size_t size)
59 {
60 	u8 region_width;
61 	u64 region = iova & PAGE_MASK;
62 	/*
63 	 * fls returns:
64 	 * 1 .. 32
65 	 *
66 	 * 10 + fls(num_pages)
67 	 * results in the range (11 .. 42)
68 	 */
69 
70 	size = round_up(size, PAGE_SIZE);
71 
72 	region_width = 10 + fls(size >> PAGE_SHIFT);
73 	if ((size >> PAGE_SHIFT) != (1ul << (region_width - 11))) {
74 		/* not pow2, so must go up to the next pow2 */
75 		region_width += 1;
76 	}
77 	region |= region_width;
78 
79 	/* Lock the region that needs to be updated */
80 	mmu_write(pfdev, AS_LOCKADDR_LO(as_nr), region & 0xFFFFFFFFUL);
81 	mmu_write(pfdev, AS_LOCKADDR_HI(as_nr), (region >> 32) & 0xFFFFFFFFUL);
82 	write_cmd(pfdev, as_nr, AS_COMMAND_LOCK);
83 }
84 
85 
86 static int mmu_hw_do_operation(struct panfrost_device *pfdev, u32 as_nr,
87 		u64 iova, size_t size, u32 op)
88 {
89 	unsigned long flags;
90 	int ret;
91 
92 	spin_lock_irqsave(&pfdev->hwaccess_lock, flags);
93 
94 	if (op != AS_COMMAND_UNLOCK)
95 		lock_region(pfdev, as_nr, iova, size);
96 
97 	/* Run the MMU operation */
98 	write_cmd(pfdev, as_nr, op);
99 
100 	/* Wait for the flush to complete */
101 	ret = wait_ready(pfdev, as_nr);
102 
103 	spin_unlock_irqrestore(&pfdev->hwaccess_lock, flags);
104 
105 	return ret;
106 }
107 
108 void panfrost_mmu_enable(struct panfrost_device *pfdev, u32 as_nr)
109 {
110 	struct io_pgtable_cfg *cfg = &pfdev->mmu->pgtbl_cfg;
111 	u64 transtab = cfg->arm_mali_lpae_cfg.transtab;
112 	u64 memattr = cfg->arm_mali_lpae_cfg.memattr;
113 
114 	mmu_write(pfdev, MMU_INT_CLEAR, ~0);
115 	mmu_write(pfdev, MMU_INT_MASK, ~0);
116 
117 	mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), transtab & 0xffffffffUL);
118 	mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), transtab >> 32);
119 
120 	/* Need to revisit mem attrs.
121 	 * NC is the default, Mali driver is inner WT.
122 	 */
123 	mmu_write(pfdev, AS_MEMATTR_LO(as_nr), memattr & 0xffffffffUL);
124 	mmu_write(pfdev, AS_MEMATTR_HI(as_nr), memattr >> 32);
125 
126 	write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
127 }
128 
129 static void mmu_disable(struct panfrost_device *pfdev, u32 as_nr)
130 {
131 	mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), 0);
132 	mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), 0);
133 
134 	mmu_write(pfdev, AS_MEMATTR_LO(as_nr), 0);
135 	mmu_write(pfdev, AS_MEMATTR_HI(as_nr), 0);
136 
137 	write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
138 }
139 
140 static size_t get_pgsize(u64 addr, size_t size)
141 {
142 	if (addr & (SZ_2M - 1) || size < SZ_2M)
143 		return SZ_4K;
144 
145 	return SZ_2M;
146 }
147 
148 int panfrost_mmu_map(struct panfrost_gem_object *bo)
149 {
150 	struct drm_gem_object *obj = &bo->base.base;
151 	struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
152 	struct io_pgtable_ops *ops = pfdev->mmu->pgtbl_ops;
153 	u64 iova = bo->node.start << PAGE_SHIFT;
154 	unsigned int count;
155 	struct scatterlist *sgl;
156 	struct sg_table *sgt;
157 	int ret;
158 
159 	if (WARN_ON(bo->is_mapped))
160 		return 0;
161 
162 	sgt = drm_gem_shmem_get_pages_sgt(obj);
163 	if (WARN_ON(IS_ERR(sgt)))
164 		return PTR_ERR(sgt);
165 
166 	ret = pm_runtime_get_sync(pfdev->dev);
167 	if (ret < 0)
168 		return ret;
169 
170 	mutex_lock(&pfdev->mmu->lock);
171 
172 	for_each_sg(sgt->sgl, sgl, sgt->nents, count) {
173 		unsigned long paddr = sg_dma_address(sgl);
174 		size_t len = sg_dma_len(sgl);
175 
176 		dev_dbg(pfdev->dev, "map: iova=%llx, paddr=%lx, len=%zx", iova, paddr, len);
177 
178 		while (len) {
179 			size_t pgsize = get_pgsize(iova | paddr, len);
180 
181 			ops->map(ops, iova, paddr, pgsize, IOMMU_WRITE | IOMMU_READ);
182 			iova += pgsize;
183 			paddr += pgsize;
184 			len -= pgsize;
185 		}
186 	}
187 
188 	mmu_hw_do_operation(pfdev, 0, bo->node.start << PAGE_SHIFT,
189 			    bo->node.size << PAGE_SHIFT, AS_COMMAND_FLUSH_PT);
190 
191 	mutex_unlock(&pfdev->mmu->lock);
192 
193 	pm_runtime_mark_last_busy(pfdev->dev);
194 	pm_runtime_put_autosuspend(pfdev->dev);
195 	bo->is_mapped = true;
196 
197 	return 0;
198 }
199 
200 void panfrost_mmu_unmap(struct panfrost_gem_object *bo)
201 {
202 	struct drm_gem_object *obj = &bo->base.base;
203 	struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
204 	struct io_pgtable_ops *ops = pfdev->mmu->pgtbl_ops;
205 	u64 iova = bo->node.start << PAGE_SHIFT;
206 	size_t len = bo->node.size << PAGE_SHIFT;
207 	size_t unmapped_len = 0;
208 	int ret;
209 
210 	if (WARN_ON(!bo->is_mapped))
211 		return;
212 
213 	dev_dbg(pfdev->dev, "unmap: iova=%llx, len=%zx", iova, len);
214 
215 	ret = pm_runtime_get_sync(pfdev->dev);
216 	if (ret < 0)
217 		return;
218 
219 	mutex_lock(&pfdev->mmu->lock);
220 
221 	while (unmapped_len < len) {
222 		size_t unmapped_page;
223 		size_t pgsize = get_pgsize(iova, len - unmapped_len);
224 
225 		unmapped_page = ops->unmap(ops, iova, pgsize);
226 		if (!unmapped_page)
227 			break;
228 
229 		iova += unmapped_page;
230 		unmapped_len += unmapped_page;
231 	}
232 
233 	mmu_hw_do_operation(pfdev, 0, bo->node.start << PAGE_SHIFT,
234 			    bo->node.size << PAGE_SHIFT, AS_COMMAND_FLUSH_PT);
235 
236 	mutex_unlock(&pfdev->mmu->lock);
237 
238 	pm_runtime_mark_last_busy(pfdev->dev);
239 	pm_runtime_put_autosuspend(pfdev->dev);
240 	bo->is_mapped = false;
241 }
242 
243 static void mmu_tlb_inv_context_s1(void *cookie)
244 {
245 	struct panfrost_device *pfdev = cookie;
246 
247 	mmu_hw_do_operation(pfdev, 0, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
248 }
249 
250 static void mmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
251 				     size_t granule, bool leaf, void *cookie)
252 {}
253 
254 static void mmu_tlb_sync_context(void *cookie)
255 {
256 	//struct panfrost_device *pfdev = cookie;
257 	// TODO: Wait 1000 GPU cycles for HW_ISSUE_6367/T60X
258 }
259 
260 static const struct iommu_gather_ops mmu_tlb_ops = {
261 	.tlb_flush_all	= mmu_tlb_inv_context_s1,
262 	.tlb_add_flush	= mmu_tlb_inv_range_nosync,
263 	.tlb_sync	= mmu_tlb_sync_context,
264 };
265 
266 static const char *access_type_name(struct panfrost_device *pfdev,
267 		u32 fault_status)
268 {
269 	switch (fault_status & AS_FAULTSTATUS_ACCESS_TYPE_MASK) {
270 	case AS_FAULTSTATUS_ACCESS_TYPE_ATOMIC:
271 		if (panfrost_has_hw_feature(pfdev, HW_FEATURE_AARCH64_MMU))
272 			return "ATOMIC";
273 		else
274 			return "UNKNOWN";
275 	case AS_FAULTSTATUS_ACCESS_TYPE_READ:
276 		return "READ";
277 	case AS_FAULTSTATUS_ACCESS_TYPE_WRITE:
278 		return "WRITE";
279 	case AS_FAULTSTATUS_ACCESS_TYPE_EX:
280 		return "EXECUTE";
281 	default:
282 		WARN_ON(1);
283 		return NULL;
284 	}
285 }
286 
287 static irqreturn_t panfrost_mmu_irq_handler(int irq, void *data)
288 {
289 	struct panfrost_device *pfdev = data;
290 	u32 status = mmu_read(pfdev, MMU_INT_STAT);
291 	int i;
292 
293 	if (!status)
294 		return IRQ_NONE;
295 
296 	dev_err(pfdev->dev, "mmu irq status=%x\n", status);
297 
298 	for (i = 0; status; i++) {
299 		u32 mask = BIT(i) | BIT(i + 16);
300 		u64 addr;
301 		u32 fault_status;
302 		u32 exception_type;
303 		u32 access_type;
304 		u32 source_id;
305 
306 		if (!(status & mask))
307 			continue;
308 
309 		fault_status = mmu_read(pfdev, AS_FAULTSTATUS(i));
310 		addr = mmu_read(pfdev, AS_FAULTADDRESS_LO(i));
311 		addr |= (u64)mmu_read(pfdev, AS_FAULTADDRESS_HI(i)) << 32;
312 
313 		/* decode the fault status */
314 		exception_type = fault_status & 0xFF;
315 		access_type = (fault_status >> 8) & 0x3;
316 		source_id = (fault_status >> 16);
317 
318 		/* terminal fault, print info about the fault */
319 		dev_err(pfdev->dev,
320 			"Unhandled Page fault in AS%d at VA 0x%016llX\n"
321 			"Reason: %s\n"
322 			"raw fault status: 0x%X\n"
323 			"decoded fault status: %s\n"
324 			"exception type 0x%X: %s\n"
325 			"access type 0x%X: %s\n"
326 			"source id 0x%X\n",
327 			i, addr,
328 			"TODO",
329 			fault_status,
330 			(fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
331 			exception_type, panfrost_exception_name(pfdev, exception_type),
332 			access_type, access_type_name(pfdev, fault_status),
333 			source_id);
334 
335 		mmu_write(pfdev, MMU_INT_CLEAR, mask);
336 
337 		status &= ~mask;
338 	}
339 
340 	return IRQ_HANDLED;
341 };
342 
343 int panfrost_mmu_init(struct panfrost_device *pfdev)
344 {
345 	struct io_pgtable_ops *pgtbl_ops;
346 	int err, irq;
347 
348 	pfdev->mmu = devm_kzalloc(pfdev->dev, sizeof(*pfdev->mmu), GFP_KERNEL);
349 	if (!pfdev->mmu)
350 		return -ENOMEM;
351 
352 	mutex_init(&pfdev->mmu->lock);
353 
354 	irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "mmu");
355 	if (irq <= 0)
356 		return -ENODEV;
357 
358 	err = devm_request_irq(pfdev->dev, irq, panfrost_mmu_irq_handler,
359 			       IRQF_SHARED, "mmu", pfdev);
360 
361 	if (err) {
362 		dev_err(pfdev->dev, "failed to request mmu irq");
363 		return err;
364 	}
365 	mmu_write(pfdev, MMU_INT_CLEAR, ~0);
366 	mmu_write(pfdev, MMU_INT_MASK, ~0);
367 
368 	pfdev->mmu->pgtbl_cfg = (struct io_pgtable_cfg) {
369 		.pgsize_bitmap	= SZ_4K | SZ_2M,
370 		.ias		= FIELD_GET(0xff, pfdev->features.mmu_features),
371 		.oas		= FIELD_GET(0xff00, pfdev->features.mmu_features),
372 		.tlb		= &mmu_tlb_ops,
373 		.iommu_dev	= pfdev->dev,
374 	};
375 
376 	pgtbl_ops = alloc_io_pgtable_ops(ARM_MALI_LPAE, &pfdev->mmu->pgtbl_cfg,
377 					 pfdev);
378 	if (!pgtbl_ops)
379 		return -ENOMEM;
380 
381 	pfdev->mmu->pgtbl_ops = pgtbl_ops;
382 
383 	panfrost_mmu_enable(pfdev, 0);
384 
385 	return 0;
386 }
387 
388 void panfrost_mmu_fini(struct panfrost_device *pfdev)
389 {
390 	mmu_write(pfdev, MMU_INT_MASK, 0);
391 	mmu_disable(pfdev, 0);
392 
393 	free_io_pgtable_ops(pfdev->mmu->pgtbl_ops);
394 }
395