1 // SPDX-License-Identifier:	GPL-2.0
2 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
3 #include <linux/bitfield.h>
4 #include <linux/delay.h>
5 #include <linux/interrupt.h>
6 #include <linux/io.h>
7 #include <linux/iopoll.h>
8 #include <linux/io-pgtable.h>
9 #include <linux/iommu.h>
10 #include <linux/platform_device.h>
11 #include <linux/pm_runtime.h>
12 #include <linux/sizes.h>
13 
14 #include "panfrost_device.h"
15 #include "panfrost_mmu.h"
16 #include "panfrost_gem.h"
17 #include "panfrost_features.h"
18 #include "panfrost_regs.h"
19 
20 #define mmu_write(dev, reg, data) writel(data, dev->iomem + reg)
21 #define mmu_read(dev, reg) readl(dev->iomem + reg)
22 
23 struct panfrost_mmu {
24 	struct io_pgtable_cfg pgtbl_cfg;
25 	struct io_pgtable_ops *pgtbl_ops;
26 	struct mutex lock;
27 };
28 
29 static int wait_ready(struct panfrost_device *pfdev, u32 as_nr)
30 {
31 	int ret;
32 	u32 val;
33 
34 	/* Wait for the MMU status to indicate there is no active command, in
35 	 * case one is pending. */
36 	ret = readl_relaxed_poll_timeout_atomic(pfdev->iomem + AS_STATUS(as_nr),
37 		val, !(val & AS_STATUS_AS_ACTIVE), 10, 1000);
38 
39 	if (ret)
40 		dev_err(pfdev->dev, "AS_ACTIVE bit stuck\n");
41 
42 	return ret;
43 }
44 
45 static int write_cmd(struct panfrost_device *pfdev, u32 as_nr, u32 cmd)
46 {
47 	int status;
48 
49 	/* write AS_COMMAND when MMU is ready to accept another command */
50 	status = wait_ready(pfdev, as_nr);
51 	if (!status)
52 		mmu_write(pfdev, AS_COMMAND(as_nr), cmd);
53 
54 	return status;
55 }
56 
57 static void lock_region(struct panfrost_device *pfdev, u32 as_nr,
58 			u64 iova, size_t size)
59 {
60 	u8 region_width;
61 	u64 region = iova & PAGE_MASK;
62 	/*
63 	 * fls returns:
64 	 * 1 .. 32
65 	 *
66 	 * 10 + fls(num_pages)
67 	 * results in the range (11 .. 42)
68 	 */
69 
70 	size = round_up(size, PAGE_SIZE);
71 
72 	region_width = 10 + fls(size >> PAGE_SHIFT);
73 	if ((size >> PAGE_SHIFT) != (1ul << (region_width - 11))) {
74 		/* not pow2, so must go up to the next pow2 */
75 		region_width += 1;
76 	}
77 	region |= region_width;
78 
79 	/* Lock the region that needs to be updated */
80 	mmu_write(pfdev, AS_LOCKADDR_LO(as_nr), region & 0xFFFFFFFFUL);
81 	mmu_write(pfdev, AS_LOCKADDR_HI(as_nr), (region >> 32) & 0xFFFFFFFFUL);
82 	write_cmd(pfdev, as_nr, AS_COMMAND_LOCK);
83 }
84 
85 
86 static int mmu_hw_do_operation(struct panfrost_device *pfdev, u32 as_nr,
87 		u64 iova, size_t size, u32 op)
88 {
89 	unsigned long flags;
90 	int ret;
91 
92 	spin_lock_irqsave(&pfdev->hwaccess_lock, flags);
93 
94 	if (op != AS_COMMAND_UNLOCK)
95 		lock_region(pfdev, as_nr, iova, size);
96 
97 	/* Run the MMU operation */
98 	write_cmd(pfdev, as_nr, op);
99 
100 	/* Wait for the flush to complete */
101 	ret = wait_ready(pfdev, as_nr);
102 
103 	spin_unlock_irqrestore(&pfdev->hwaccess_lock, flags);
104 
105 	return ret;
106 }
107 
108 void panfrost_mmu_enable(struct panfrost_device *pfdev, u32 as_nr)
109 {
110 	struct io_pgtable_cfg *cfg = &pfdev->mmu->pgtbl_cfg;
111 	u64 transtab = cfg->arm_mali_lpae_cfg.transtab;
112 	u64 memattr = cfg->arm_mali_lpae_cfg.memattr;
113 
114 	mmu_write(pfdev, MMU_INT_CLEAR, ~0);
115 	mmu_write(pfdev, MMU_INT_MASK, ~0);
116 
117 	mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), transtab & 0xffffffffUL);
118 	mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), transtab >> 32);
119 
120 	/* Need to revisit mem attrs.
121 	 * NC is the default, Mali driver is inner WT.
122 	 */
123 	mmu_write(pfdev, AS_MEMATTR_LO(as_nr), memattr & 0xffffffffUL);
124 	mmu_write(pfdev, AS_MEMATTR_HI(as_nr), memattr >> 32);
125 
126 	write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
127 }
128 
129 static void mmu_disable(struct panfrost_device *pfdev, u32 as_nr)
130 {
131 	mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), 0);
132 	mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), 0);
133 
134 	mmu_write(pfdev, AS_MEMATTR_LO(as_nr), 0);
135 	mmu_write(pfdev, AS_MEMATTR_HI(as_nr), 0);
136 
137 	write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
138 }
139 
140 static size_t get_pgsize(u64 addr, size_t size)
141 {
142 	if (addr & (SZ_2M - 1) || size < SZ_2M)
143 		return SZ_4K;
144 
145 	return SZ_2M;
146 }
147 
148 int panfrost_mmu_map(struct panfrost_gem_object *bo)
149 {
150 	struct drm_gem_object *obj = &bo->base.base;
151 	struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
152 	struct io_pgtable_ops *ops = pfdev->mmu->pgtbl_ops;
153 	u64 iova = bo->node.start << PAGE_SHIFT;
154 	unsigned int count;
155 	struct scatterlist *sgl;
156 	struct sg_table *sgt;
157 	int ret;
158 
159 	sgt = drm_gem_shmem_get_pages_sgt(obj);
160 	if (WARN_ON(IS_ERR(sgt)))
161 		return PTR_ERR(sgt);
162 
163 	ret = pm_runtime_get_sync(pfdev->dev);
164 	if (ret < 0)
165 		return ret;
166 
167 	mutex_lock(&pfdev->mmu->lock);
168 
169 	for_each_sg(sgt->sgl, sgl, sgt->nents, count) {
170 		unsigned long paddr = sg_dma_address(sgl);
171 		size_t len = sg_dma_len(sgl);
172 
173 		dev_dbg(pfdev->dev, "map: iova=%llx, paddr=%lx, len=%zx", iova, paddr, len);
174 
175 		while (len) {
176 			size_t pgsize = get_pgsize(iova | paddr, len);
177 
178 			ops->map(ops, iova, paddr, pgsize, IOMMU_WRITE | IOMMU_READ);
179 			iova += pgsize;
180 			paddr += pgsize;
181 			len -= pgsize;
182 		}
183 	}
184 
185 	mmu_hw_do_operation(pfdev, 0, bo->node.start << PAGE_SHIFT,
186 			    bo->node.size << PAGE_SHIFT, AS_COMMAND_FLUSH_PT);
187 
188 	mutex_unlock(&pfdev->mmu->lock);
189 
190 	pm_runtime_mark_last_busy(pfdev->dev);
191 	pm_runtime_put_autosuspend(pfdev->dev);
192 
193 	return 0;
194 }
195 
196 void panfrost_mmu_unmap(struct panfrost_gem_object *bo)
197 {
198 	struct drm_gem_object *obj = &bo->base.base;
199 	struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
200 	struct io_pgtable_ops *ops = pfdev->mmu->pgtbl_ops;
201 	u64 iova = bo->node.start << PAGE_SHIFT;
202 	size_t len = bo->node.size << PAGE_SHIFT;
203 	size_t unmapped_len = 0;
204 	int ret;
205 
206 	dev_dbg(pfdev->dev, "unmap: iova=%llx, len=%zx", iova, len);
207 
208 	ret = pm_runtime_get_sync(pfdev->dev);
209 	if (ret < 0)
210 		return;
211 
212 	mutex_lock(&pfdev->mmu->lock);
213 
214 	while (unmapped_len < len) {
215 		size_t unmapped_page;
216 		size_t pgsize = get_pgsize(iova, len - unmapped_len);
217 
218 		unmapped_page = ops->unmap(ops, iova, pgsize);
219 		if (!unmapped_page)
220 			break;
221 
222 		iova += unmapped_page;
223 		unmapped_len += unmapped_page;
224 	}
225 
226 	mmu_hw_do_operation(pfdev, 0, bo->node.start << PAGE_SHIFT,
227 			    bo->node.size << PAGE_SHIFT, AS_COMMAND_FLUSH_PT);
228 
229 	mutex_unlock(&pfdev->mmu->lock);
230 
231 	pm_runtime_mark_last_busy(pfdev->dev);
232 	pm_runtime_put_autosuspend(pfdev->dev);
233 }
234 
235 static void mmu_tlb_inv_context_s1(void *cookie)
236 {
237 	struct panfrost_device *pfdev = cookie;
238 
239 	mmu_hw_do_operation(pfdev, 0, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
240 }
241 
242 static void mmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
243 				     size_t granule, bool leaf, void *cookie)
244 {}
245 
246 static void mmu_tlb_sync_context(void *cookie)
247 {
248 	//struct panfrost_device *pfdev = cookie;
249 	// TODO: Wait 1000 GPU cycles for HW_ISSUE_6367/T60X
250 }
251 
252 static const struct iommu_gather_ops mmu_tlb_ops = {
253 	.tlb_flush_all	= mmu_tlb_inv_context_s1,
254 	.tlb_add_flush	= mmu_tlb_inv_range_nosync,
255 	.tlb_sync	= mmu_tlb_sync_context,
256 };
257 
258 static const char *access_type_name(struct panfrost_device *pfdev,
259 		u32 fault_status)
260 {
261 	switch (fault_status & AS_FAULTSTATUS_ACCESS_TYPE_MASK) {
262 	case AS_FAULTSTATUS_ACCESS_TYPE_ATOMIC:
263 		if (panfrost_has_hw_feature(pfdev, HW_FEATURE_AARCH64_MMU))
264 			return "ATOMIC";
265 		else
266 			return "UNKNOWN";
267 	case AS_FAULTSTATUS_ACCESS_TYPE_READ:
268 		return "READ";
269 	case AS_FAULTSTATUS_ACCESS_TYPE_WRITE:
270 		return "WRITE";
271 	case AS_FAULTSTATUS_ACCESS_TYPE_EX:
272 		return "EXECUTE";
273 	default:
274 		WARN_ON(1);
275 		return NULL;
276 	}
277 }
278 
279 static irqreturn_t panfrost_mmu_irq_handler(int irq, void *data)
280 {
281 	struct panfrost_device *pfdev = data;
282 	u32 status = mmu_read(pfdev, MMU_INT_STAT);
283 	int i;
284 
285 	if (!status)
286 		return IRQ_NONE;
287 
288 	dev_err(pfdev->dev, "mmu irq status=%x\n", status);
289 
290 	for (i = 0; status; i++) {
291 		u32 mask = BIT(i) | BIT(i + 16);
292 		u64 addr;
293 		u32 fault_status;
294 		u32 exception_type;
295 		u32 access_type;
296 		u32 source_id;
297 
298 		if (!(status & mask))
299 			continue;
300 
301 		fault_status = mmu_read(pfdev, AS_FAULTSTATUS(i));
302 		addr = mmu_read(pfdev, AS_FAULTADDRESS_LO(i));
303 		addr |= (u64)mmu_read(pfdev, AS_FAULTADDRESS_HI(i)) << 32;
304 
305 		/* decode the fault status */
306 		exception_type = fault_status & 0xFF;
307 		access_type = (fault_status >> 8) & 0x3;
308 		source_id = (fault_status >> 16);
309 
310 		/* terminal fault, print info about the fault */
311 		dev_err(pfdev->dev,
312 			"Unhandled Page fault in AS%d at VA 0x%016llX\n"
313 			"Reason: %s\n"
314 			"raw fault status: 0x%X\n"
315 			"decoded fault status: %s\n"
316 			"exception type 0x%X: %s\n"
317 			"access type 0x%X: %s\n"
318 			"source id 0x%X\n",
319 			i, addr,
320 			"TODO",
321 			fault_status,
322 			(fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
323 			exception_type, panfrost_exception_name(pfdev, exception_type),
324 			access_type, access_type_name(pfdev, fault_status),
325 			source_id);
326 
327 		mmu_write(pfdev, MMU_INT_CLEAR, mask);
328 
329 		status &= ~mask;
330 	}
331 
332 	return IRQ_HANDLED;
333 };
334 
335 int panfrost_mmu_init(struct panfrost_device *pfdev)
336 {
337 	struct io_pgtable_ops *pgtbl_ops;
338 	int err, irq;
339 
340 	pfdev->mmu = devm_kzalloc(pfdev->dev, sizeof(*pfdev->mmu), GFP_KERNEL);
341 	if (!pfdev->mmu)
342 		return -ENOMEM;
343 
344 	mutex_init(&pfdev->mmu->lock);
345 
346 	irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "mmu");
347 	if (irq <= 0)
348 		return -ENODEV;
349 
350 	err = devm_request_irq(pfdev->dev, irq, panfrost_mmu_irq_handler,
351 			       IRQF_SHARED, "mmu", pfdev);
352 
353 	if (err) {
354 		dev_err(pfdev->dev, "failed to request mmu irq");
355 		return err;
356 	}
357 	mmu_write(pfdev, MMU_INT_CLEAR, ~0);
358 	mmu_write(pfdev, MMU_INT_MASK, ~0);
359 
360 	pfdev->mmu->pgtbl_cfg = (struct io_pgtable_cfg) {
361 		.pgsize_bitmap	= SZ_4K | SZ_2M,
362 		.ias		= FIELD_GET(0xff, pfdev->features.mmu_features),
363 		.oas		= FIELD_GET(0xff00, pfdev->features.mmu_features),
364 		.tlb		= &mmu_tlb_ops,
365 		.iommu_dev	= pfdev->dev,
366 	};
367 
368 	pgtbl_ops = alloc_io_pgtable_ops(ARM_MALI_LPAE, &pfdev->mmu->pgtbl_cfg,
369 					 pfdev);
370 	if (!pgtbl_ops)
371 		return -ENOMEM;
372 
373 	pfdev->mmu->pgtbl_ops = pgtbl_ops;
374 
375 	panfrost_mmu_enable(pfdev, 0);
376 
377 	return 0;
378 }
379 
380 void panfrost_mmu_fini(struct panfrost_device *pfdev)
381 {
382 	mmu_write(pfdev, MMU_INT_MASK, 0);
383 	mmu_disable(pfdev, 0);
384 
385 	free_io_pgtable_ops(pfdev->mmu->pgtbl_ops);
386 }
387