1 // SPDX-License-Identifier:	GPL-2.0
2 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
3 #include <linux/bitfield.h>
4 #include <linux/delay.h>
5 #include <linux/interrupt.h>
6 #include <linux/io.h>
7 #include <linux/iopoll.h>
8 #include <linux/io-pgtable.h>
9 #include <linux/iommu.h>
10 #include <linux/platform_device.h>
11 #include <linux/pm_runtime.h>
12 #include <linux/sizes.h>
13 
14 #include "panfrost_device.h"
15 #include "panfrost_mmu.h"
16 #include "panfrost_gem.h"
17 #include "panfrost_features.h"
18 #include "panfrost_regs.h"
19 
20 #define mmu_write(dev, reg, data) writel(data, dev->iomem + reg)
21 #define mmu_read(dev, reg) readl(dev->iomem + reg)
22 
23 struct panfrost_mmu {
24 	struct io_pgtable_cfg pgtbl_cfg;
25 	struct io_pgtable_ops *pgtbl_ops;
26 	struct mutex lock;
27 };
28 
29 static int wait_ready(struct panfrost_device *pfdev, u32 as_nr)
30 {
31 	int ret;
32 	u32 val;
33 
34 	/* Wait for the MMU status to indicate there is no active command, in
35 	 * case one is pending. */
36 	ret = readl_relaxed_poll_timeout_atomic(pfdev->iomem + AS_STATUS(as_nr),
37 		val, !(val & AS_STATUS_AS_ACTIVE), 10, 1000);
38 
39 	if (ret)
40 		dev_err(pfdev->dev, "AS_ACTIVE bit stuck\n");
41 
42 	return ret;
43 }
44 
45 static int write_cmd(struct panfrost_device *pfdev, u32 as_nr, u32 cmd)
46 {
47 	int status;
48 
49 	/* write AS_COMMAND when MMU is ready to accept another command */
50 	status = wait_ready(pfdev, as_nr);
51 	if (!status)
52 		mmu_write(pfdev, AS_COMMAND(as_nr), cmd);
53 
54 	return status;
55 }
56 
57 static void lock_region(struct panfrost_device *pfdev, u32 as_nr,
58 			u64 iova, size_t size)
59 {
60 	u8 region_width;
61 	u64 region = iova & PAGE_MASK;
62 	/*
63 	 * fls returns:
64 	 * 1 .. 32
65 	 *
66 	 * 10 + fls(num_pages)
67 	 * results in the range (11 .. 42)
68 	 */
69 
70 	size = round_up(size, PAGE_SIZE);
71 
72 	region_width = 10 + fls(size >> PAGE_SHIFT);
73 	if ((size >> PAGE_SHIFT) != (1ul << (region_width - 11))) {
74 		/* not pow2, so must go up to the next pow2 */
75 		region_width += 1;
76 	}
77 	region |= region_width;
78 
79 	/* Lock the region that needs to be updated */
80 	mmu_write(pfdev, AS_LOCKADDR_LO(as_nr), region & 0xFFFFFFFFUL);
81 	mmu_write(pfdev, AS_LOCKADDR_HI(as_nr), (region >> 32) & 0xFFFFFFFFUL);
82 	write_cmd(pfdev, as_nr, AS_COMMAND_LOCK);
83 }
84 
85 
86 static int mmu_hw_do_operation(struct panfrost_device *pfdev, u32 as_nr,
87 		u64 iova, size_t size, u32 op)
88 {
89 	unsigned long flags;
90 	int ret;
91 
92 	spin_lock_irqsave(&pfdev->hwaccess_lock, flags);
93 
94 	if (op != AS_COMMAND_UNLOCK)
95 		lock_region(pfdev, as_nr, iova, size);
96 
97 	/* Run the MMU operation */
98 	write_cmd(pfdev, as_nr, op);
99 
100 	/* Wait for the flush to complete */
101 	ret = wait_ready(pfdev, as_nr);
102 
103 	spin_unlock_irqrestore(&pfdev->hwaccess_lock, flags);
104 
105 	return ret;
106 }
107 
108 void panfrost_mmu_enable(struct panfrost_device *pfdev, u32 as_nr)
109 {
110 	struct io_pgtable_cfg *cfg = &pfdev->mmu->pgtbl_cfg;
111 	u64 transtab = cfg->arm_mali_lpae_cfg.transtab;
112 	u64 memattr = cfg->arm_mali_lpae_cfg.memattr;
113 
114 	mmu_write(pfdev, MMU_INT_CLEAR, ~0);
115 	mmu_write(pfdev, MMU_INT_MASK, ~0);
116 
117 	mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), transtab & 0xffffffffUL);
118 	mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), transtab >> 32);
119 
120 	/* Need to revisit mem attrs.
121 	 * NC is the default, Mali driver is inner WT.
122 	 */
123 	mmu_write(pfdev, AS_MEMATTR_LO(as_nr), memattr & 0xffffffffUL);
124 	mmu_write(pfdev, AS_MEMATTR_HI(as_nr), memattr >> 32);
125 
126 	write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
127 }
128 
129 static void mmu_disable(struct panfrost_device *pfdev, u32 as_nr)
130 {
131 	mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), 0);
132 	mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), 0);
133 
134 	mmu_write(pfdev, AS_MEMATTR_LO(as_nr), 0);
135 	mmu_write(pfdev, AS_MEMATTR_HI(as_nr), 0);
136 
137 	write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
138 }
139 
140 static size_t get_pgsize(u64 addr, size_t size)
141 {
142 	if (addr & (SZ_2M - 1) || size < SZ_2M)
143 		return SZ_4K;
144 
145 	return SZ_2M;
146 }
147 
148 int panfrost_mmu_map(struct panfrost_gem_object *bo)
149 {
150 	struct drm_gem_object *obj = &bo->base.base;
151 	struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
152 	struct io_pgtable_ops *ops = pfdev->mmu->pgtbl_ops;
153 	u64 iova = bo->node.start << PAGE_SHIFT;
154 	unsigned int count;
155 	struct scatterlist *sgl;
156 	struct sg_table *sgt;
157 	int ret;
158 
159 	if (WARN_ON(bo->is_mapped))
160 		return 0;
161 
162 	sgt = drm_gem_shmem_get_pages_sgt(obj);
163 	if (WARN_ON(IS_ERR(sgt)))
164 		return PTR_ERR(sgt);
165 
166 	ret = pm_runtime_get_sync(pfdev->dev);
167 	if (ret < 0)
168 		return ret;
169 
170 	mutex_lock(&pfdev->mmu->lock);
171 
172 	for_each_sg(sgt->sgl, sgl, sgt->nents, count) {
173 		unsigned long paddr = sg_dma_address(sgl);
174 		size_t len = sg_dma_len(sgl);
175 
176 		dev_dbg(pfdev->dev, "map: iova=%llx, paddr=%lx, len=%zx", iova, paddr, len);
177 
178 		while (len) {
179 			size_t pgsize = get_pgsize(iova | paddr, len);
180 
181 			ops->map(ops, iova, paddr, pgsize, IOMMU_WRITE | IOMMU_READ);
182 			iova += pgsize;
183 			paddr += pgsize;
184 			len -= pgsize;
185 		}
186 	}
187 
188 	mmu_hw_do_operation(pfdev, 0, bo->node.start << PAGE_SHIFT,
189 			    bo->node.size << PAGE_SHIFT, AS_COMMAND_FLUSH_PT);
190 
191 	mutex_unlock(&pfdev->mmu->lock);
192 
193 	pm_runtime_mark_last_busy(pfdev->dev);
194 	pm_runtime_put_autosuspend(pfdev->dev);
195 	bo->is_mapped = true;
196 
197 	return 0;
198 }
199 
200 void panfrost_mmu_unmap(struct panfrost_gem_object *bo)
201 {
202 	struct drm_gem_object *obj = &bo->base.base;
203 	struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
204 	struct io_pgtable_ops *ops = pfdev->mmu->pgtbl_ops;
205 	u64 iova = bo->node.start << PAGE_SHIFT;
206 	size_t len = bo->node.size << PAGE_SHIFT;
207 	size_t unmapped_len = 0;
208 	int ret;
209 
210 	if (WARN_ON(!bo->is_mapped))
211 		return;
212 
213 	dev_dbg(pfdev->dev, "unmap: iova=%llx, len=%zx", iova, len);
214 
215 	ret = pm_runtime_get_sync(pfdev->dev);
216 	if (ret < 0)
217 		return;
218 
219 	mutex_lock(&pfdev->mmu->lock);
220 
221 	while (unmapped_len < len) {
222 		size_t unmapped_page;
223 		size_t pgsize = get_pgsize(iova, len - unmapped_len);
224 
225 		unmapped_page = ops->unmap(ops, iova, pgsize, NULL);
226 		if (!unmapped_page)
227 			break;
228 
229 		iova += unmapped_page;
230 		unmapped_len += unmapped_page;
231 	}
232 
233 	mmu_hw_do_operation(pfdev, 0, bo->node.start << PAGE_SHIFT,
234 			    bo->node.size << PAGE_SHIFT, AS_COMMAND_FLUSH_PT);
235 
236 	mutex_unlock(&pfdev->mmu->lock);
237 
238 	pm_runtime_mark_last_busy(pfdev->dev);
239 	pm_runtime_put_autosuspend(pfdev->dev);
240 	bo->is_mapped = false;
241 }
242 
243 static void mmu_tlb_inv_context_s1(void *cookie)
244 {
245 	struct panfrost_device *pfdev = cookie;
246 
247 	mmu_hw_do_operation(pfdev, 0, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
248 }
249 
250 static void mmu_tlb_sync_context(void *cookie)
251 {
252 	//struct panfrost_device *pfdev = cookie;
253 	// TODO: Wait 1000 GPU cycles for HW_ISSUE_6367/T60X
254 }
255 
256 static void mmu_tlb_flush_walk(unsigned long iova, size_t size, size_t granule,
257 			       void *cookie)
258 {
259 	mmu_tlb_sync_context(cookie);
260 }
261 
262 static void mmu_tlb_flush_leaf(unsigned long iova, size_t size, size_t granule,
263 			       void *cookie)
264 {
265 	mmu_tlb_sync_context(cookie);
266 }
267 
268 static const struct iommu_flush_ops mmu_tlb_ops = {
269 	.tlb_flush_all	= mmu_tlb_inv_context_s1,
270 	.tlb_flush_walk = mmu_tlb_flush_walk,
271 	.tlb_flush_leaf = mmu_tlb_flush_leaf,
272 };
273 
274 static const char *access_type_name(struct panfrost_device *pfdev,
275 		u32 fault_status)
276 {
277 	switch (fault_status & AS_FAULTSTATUS_ACCESS_TYPE_MASK) {
278 	case AS_FAULTSTATUS_ACCESS_TYPE_ATOMIC:
279 		if (panfrost_has_hw_feature(pfdev, HW_FEATURE_AARCH64_MMU))
280 			return "ATOMIC";
281 		else
282 			return "UNKNOWN";
283 	case AS_FAULTSTATUS_ACCESS_TYPE_READ:
284 		return "READ";
285 	case AS_FAULTSTATUS_ACCESS_TYPE_WRITE:
286 		return "WRITE";
287 	case AS_FAULTSTATUS_ACCESS_TYPE_EX:
288 		return "EXECUTE";
289 	default:
290 		WARN_ON(1);
291 		return NULL;
292 	}
293 }
294 
295 static irqreturn_t panfrost_mmu_irq_handler(int irq, void *data)
296 {
297 	struct panfrost_device *pfdev = data;
298 	u32 status = mmu_read(pfdev, MMU_INT_STAT);
299 	int i;
300 
301 	if (!status)
302 		return IRQ_NONE;
303 
304 	dev_err(pfdev->dev, "mmu irq status=%x\n", status);
305 
306 	for (i = 0; status; i++) {
307 		u32 mask = BIT(i) | BIT(i + 16);
308 		u64 addr;
309 		u32 fault_status;
310 		u32 exception_type;
311 		u32 access_type;
312 		u32 source_id;
313 
314 		if (!(status & mask))
315 			continue;
316 
317 		fault_status = mmu_read(pfdev, AS_FAULTSTATUS(i));
318 		addr = mmu_read(pfdev, AS_FAULTADDRESS_LO(i));
319 		addr |= (u64)mmu_read(pfdev, AS_FAULTADDRESS_HI(i)) << 32;
320 
321 		/* decode the fault status */
322 		exception_type = fault_status & 0xFF;
323 		access_type = (fault_status >> 8) & 0x3;
324 		source_id = (fault_status >> 16);
325 
326 		/* terminal fault, print info about the fault */
327 		dev_err(pfdev->dev,
328 			"Unhandled Page fault in AS%d at VA 0x%016llX\n"
329 			"Reason: %s\n"
330 			"raw fault status: 0x%X\n"
331 			"decoded fault status: %s\n"
332 			"exception type 0x%X: %s\n"
333 			"access type 0x%X: %s\n"
334 			"source id 0x%X\n",
335 			i, addr,
336 			"TODO",
337 			fault_status,
338 			(fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
339 			exception_type, panfrost_exception_name(pfdev, exception_type),
340 			access_type, access_type_name(pfdev, fault_status),
341 			source_id);
342 
343 		mmu_write(pfdev, MMU_INT_CLEAR, mask);
344 
345 		status &= ~mask;
346 	}
347 
348 	return IRQ_HANDLED;
349 };
350 
351 int panfrost_mmu_init(struct panfrost_device *pfdev)
352 {
353 	struct io_pgtable_ops *pgtbl_ops;
354 	int err, irq;
355 
356 	pfdev->mmu = devm_kzalloc(pfdev->dev, sizeof(*pfdev->mmu), GFP_KERNEL);
357 	if (!pfdev->mmu)
358 		return -ENOMEM;
359 
360 	mutex_init(&pfdev->mmu->lock);
361 
362 	irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "mmu");
363 	if (irq <= 0)
364 		return -ENODEV;
365 
366 	err = devm_request_irq(pfdev->dev, irq, panfrost_mmu_irq_handler,
367 			       IRQF_SHARED, "mmu", pfdev);
368 
369 	if (err) {
370 		dev_err(pfdev->dev, "failed to request mmu irq");
371 		return err;
372 	}
373 	mmu_write(pfdev, MMU_INT_CLEAR, ~0);
374 	mmu_write(pfdev, MMU_INT_MASK, ~0);
375 
376 	pfdev->mmu->pgtbl_cfg = (struct io_pgtable_cfg) {
377 		.pgsize_bitmap	= SZ_4K | SZ_2M,
378 		.ias		= FIELD_GET(0xff, pfdev->features.mmu_features),
379 		.oas		= FIELD_GET(0xff00, pfdev->features.mmu_features),
380 		.tlb		= &mmu_tlb_ops,
381 		.iommu_dev	= pfdev->dev,
382 	};
383 
384 	pgtbl_ops = alloc_io_pgtable_ops(ARM_MALI_LPAE, &pfdev->mmu->pgtbl_cfg,
385 					 pfdev);
386 	if (!pgtbl_ops)
387 		return -ENOMEM;
388 
389 	pfdev->mmu->pgtbl_ops = pgtbl_ops;
390 
391 	panfrost_mmu_enable(pfdev, 0);
392 
393 	return 0;
394 }
395 
396 void panfrost_mmu_fini(struct panfrost_device *pfdev)
397 {
398 	mmu_write(pfdev, MMU_INT_MASK, 0);
399 	mmu_disable(pfdev, 0);
400 
401 	free_io_pgtable_ops(pfdev->mmu->pgtbl_ops);
402 }
403