xref: /openbmc/linux/drivers/iommu/ipmmu-vmsa.c (revision 53809828)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * IPMMU VMSA
4  *
5  * Copyright (C) 2014 Renesas Electronics Corporation
6  */
7 
8 #include <linux/bitmap.h>
9 #include <linux/delay.h>
10 #include <linux/dma-iommu.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/err.h>
13 #include <linux/export.h>
14 #include <linux/interrupt.h>
15 #include <linux/io.h>
16 #include <linux/iommu.h>
17 #include <linux/module.h>
18 #include <linux/of.h>
19 #include <linux/of_device.h>
20 #include <linux/of_iommu.h>
21 #include <linux/of_platform.h>
22 #include <linux/platform_device.h>
23 #include <linux/sizes.h>
24 #include <linux/slab.h>
25 #include <linux/sys_soc.h>
26 
27 #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
28 #include <asm/dma-iommu.h>
29 #include <asm/pgalloc.h>
30 #else
31 #define arm_iommu_create_mapping(...)	NULL
32 #define arm_iommu_attach_device(...)	-ENODEV
33 #define arm_iommu_release_mapping(...)	do {} while (0)
34 #define arm_iommu_detach_device(...)	do {} while (0)
35 #endif
36 
37 #include "io-pgtable.h"
38 
39 #define IPMMU_CTX_MAX 8
40 
41 struct ipmmu_features {
42 	bool use_ns_alias_offset;
43 	bool has_cache_leaf_nodes;
44 	unsigned int number_of_contexts;
45 	bool setup_imbuscr;
46 	bool twobit_imttbcr_sl0;
47 	bool reserved_context;
48 };
49 
50 struct ipmmu_vmsa_device {
51 	struct device *dev;
52 	void __iomem *base;
53 	struct iommu_device iommu;
54 	struct ipmmu_vmsa_device *root;
55 	const struct ipmmu_features *features;
56 	unsigned int num_utlbs;
57 	unsigned int num_ctx;
58 	spinlock_t lock;			/* Protects ctx and domains[] */
59 	DECLARE_BITMAP(ctx, IPMMU_CTX_MAX);
60 	struct ipmmu_vmsa_domain *domains[IPMMU_CTX_MAX];
61 
62 	struct iommu_group *group;
63 	struct dma_iommu_mapping *mapping;
64 };
65 
66 struct ipmmu_vmsa_domain {
67 	struct ipmmu_vmsa_device *mmu;
68 	struct iommu_domain io_domain;
69 
70 	struct io_pgtable_cfg cfg;
71 	struct io_pgtable_ops *iop;
72 
73 	unsigned int context_id;
74 	struct mutex mutex;			/* Protects mappings */
75 };
76 
77 static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom)
78 {
79 	return container_of(dom, struct ipmmu_vmsa_domain, io_domain);
80 }
81 
82 static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev)
83 {
84 	return dev->iommu_fwspec ? dev->iommu_fwspec->iommu_priv : NULL;
85 }
86 
87 #define TLB_LOOP_TIMEOUT		100	/* 100us */
88 
89 /* -----------------------------------------------------------------------------
90  * Registers Definition
91  */
92 
93 #define IM_NS_ALIAS_OFFSET		0x800
94 
95 #define IM_CTX_SIZE			0x40
96 
97 #define IMCTR				0x0000
98 #define IMCTR_TRE			(1 << 17)
99 #define IMCTR_AFE			(1 << 16)
100 #define IMCTR_RTSEL_MASK		(3 << 4)
101 #define IMCTR_RTSEL_SHIFT		4
102 #define IMCTR_TREN			(1 << 3)
103 #define IMCTR_INTEN			(1 << 2)
104 #define IMCTR_FLUSH			(1 << 1)
105 #define IMCTR_MMUEN			(1 << 0)
106 
107 #define IMCAAR				0x0004
108 
109 #define IMTTBCR				0x0008
110 #define IMTTBCR_EAE			(1 << 31)
111 #define IMTTBCR_PMB			(1 << 30)
112 #define IMTTBCR_SH1_NON_SHAREABLE	(0 << 28)
113 #define IMTTBCR_SH1_OUTER_SHAREABLE	(2 << 28)
114 #define IMTTBCR_SH1_INNER_SHAREABLE	(3 << 28)
115 #define IMTTBCR_SH1_MASK		(3 << 28)
116 #define IMTTBCR_ORGN1_NC		(0 << 26)
117 #define IMTTBCR_ORGN1_WB_WA		(1 << 26)
118 #define IMTTBCR_ORGN1_WT		(2 << 26)
119 #define IMTTBCR_ORGN1_WB		(3 << 26)
120 #define IMTTBCR_ORGN1_MASK		(3 << 26)
121 #define IMTTBCR_IRGN1_NC		(0 << 24)
122 #define IMTTBCR_IRGN1_WB_WA		(1 << 24)
123 #define IMTTBCR_IRGN1_WT		(2 << 24)
124 #define IMTTBCR_IRGN1_WB		(3 << 24)
125 #define IMTTBCR_IRGN1_MASK		(3 << 24)
126 #define IMTTBCR_TSZ1_MASK		(7 << 16)
127 #define IMTTBCR_TSZ1_SHIFT		16
128 #define IMTTBCR_SH0_NON_SHAREABLE	(0 << 12)
129 #define IMTTBCR_SH0_OUTER_SHAREABLE	(2 << 12)
130 #define IMTTBCR_SH0_INNER_SHAREABLE	(3 << 12)
131 #define IMTTBCR_SH0_MASK		(3 << 12)
132 #define IMTTBCR_ORGN0_NC		(0 << 10)
133 #define IMTTBCR_ORGN0_WB_WA		(1 << 10)
134 #define IMTTBCR_ORGN0_WT		(2 << 10)
135 #define IMTTBCR_ORGN0_WB		(3 << 10)
136 #define IMTTBCR_ORGN0_MASK		(3 << 10)
137 #define IMTTBCR_IRGN0_NC		(0 << 8)
138 #define IMTTBCR_IRGN0_WB_WA		(1 << 8)
139 #define IMTTBCR_IRGN0_WT		(2 << 8)
140 #define IMTTBCR_IRGN0_WB		(3 << 8)
141 #define IMTTBCR_IRGN0_MASK		(3 << 8)
142 #define IMTTBCR_SL0_LVL_2		(0 << 4)
143 #define IMTTBCR_SL0_LVL_1		(1 << 4)
144 #define IMTTBCR_TSZ0_MASK		(7 << 0)
145 #define IMTTBCR_TSZ0_SHIFT		O
146 
147 #define IMTTBCR_SL0_TWOBIT_LVL_3	(0 << 6)
148 #define IMTTBCR_SL0_TWOBIT_LVL_2	(1 << 6)
149 #define IMTTBCR_SL0_TWOBIT_LVL_1	(2 << 6)
150 
151 #define IMBUSCR				0x000c
152 #define IMBUSCR_DVM			(1 << 2)
153 #define IMBUSCR_BUSSEL_SYS		(0 << 0)
154 #define IMBUSCR_BUSSEL_CCI		(1 << 0)
155 #define IMBUSCR_BUSSEL_IMCAAR		(2 << 0)
156 #define IMBUSCR_BUSSEL_CCI_IMCAAR	(3 << 0)
157 #define IMBUSCR_BUSSEL_MASK		(3 << 0)
158 
159 #define IMTTLBR0			0x0010
160 #define IMTTUBR0			0x0014
161 #define IMTTLBR1			0x0018
162 #define IMTTUBR1			0x001c
163 
164 #define IMSTR				0x0020
165 #define IMSTR_ERRLVL_MASK		(3 << 12)
166 #define IMSTR_ERRLVL_SHIFT		12
167 #define IMSTR_ERRCODE_TLB_FORMAT	(1 << 8)
168 #define IMSTR_ERRCODE_ACCESS_PERM	(4 << 8)
169 #define IMSTR_ERRCODE_SECURE_ACCESS	(5 << 8)
170 #define IMSTR_ERRCODE_MASK		(7 << 8)
171 #define IMSTR_MHIT			(1 << 4)
172 #define IMSTR_ABORT			(1 << 2)
173 #define IMSTR_PF			(1 << 1)
174 #define IMSTR_TF			(1 << 0)
175 
176 #define IMMAIR0				0x0028
177 #define IMMAIR1				0x002c
178 #define IMMAIR_ATTR_MASK		0xff
179 #define IMMAIR_ATTR_DEVICE		0x04
180 #define IMMAIR_ATTR_NC			0x44
181 #define IMMAIR_ATTR_WBRWA		0xff
182 #define IMMAIR_ATTR_SHIFT(n)		((n) << 3)
183 #define IMMAIR_ATTR_IDX_NC		0
184 #define IMMAIR_ATTR_IDX_WBRWA		1
185 #define IMMAIR_ATTR_IDX_DEV		2
186 
187 #define IMEAR				0x0030
188 
189 #define IMPCTR				0x0200
190 #define IMPSTR				0x0208
191 #define IMPEAR				0x020c
192 #define IMPMBA(n)			(0x0280 + ((n) * 4))
193 #define IMPMBD(n)			(0x02c0 + ((n) * 4))
194 
195 #define IMUCTR(n)			((n) < 32 ? IMUCTR0(n) : IMUCTR32(n))
196 #define IMUCTR0(n)			(0x0300 + ((n) * 16))
197 #define IMUCTR32(n)			(0x0600 + (((n) - 32) * 16))
198 #define IMUCTR_FIXADDEN			(1 << 31)
199 #define IMUCTR_FIXADD_MASK		(0xff << 16)
200 #define IMUCTR_FIXADD_SHIFT		16
201 #define IMUCTR_TTSEL_MMU(n)		((n) << 4)
202 #define IMUCTR_TTSEL_PMB		(8 << 4)
203 #define IMUCTR_TTSEL_MASK		(15 << 4)
204 #define IMUCTR_FLUSH			(1 << 1)
205 #define IMUCTR_MMUEN			(1 << 0)
206 
207 #define IMUASID(n)			((n) < 32 ? IMUASID0(n) : IMUASID32(n))
208 #define IMUASID0(n)			(0x0308 + ((n) * 16))
209 #define IMUASID32(n)			(0x0608 + (((n) - 32) * 16))
210 #define IMUASID_ASID8_MASK		(0xff << 8)
211 #define IMUASID_ASID8_SHIFT		8
212 #define IMUASID_ASID0_MASK		(0xff << 0)
213 #define IMUASID_ASID0_SHIFT		0
214 
215 /* -----------------------------------------------------------------------------
216  * Root device handling
217  */
218 
219 static struct platform_driver ipmmu_driver;
220 
221 static bool ipmmu_is_root(struct ipmmu_vmsa_device *mmu)
222 {
223 	return mmu->root == mmu;
224 }
225 
226 static int __ipmmu_check_device(struct device *dev, void *data)
227 {
228 	struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev);
229 	struct ipmmu_vmsa_device **rootp = data;
230 
231 	if (ipmmu_is_root(mmu))
232 		*rootp = mmu;
233 
234 	return 0;
235 }
236 
237 static struct ipmmu_vmsa_device *ipmmu_find_root(void)
238 {
239 	struct ipmmu_vmsa_device *root = NULL;
240 
241 	return driver_for_each_device(&ipmmu_driver.driver, NULL, &root,
242 				      __ipmmu_check_device) == 0 ? root : NULL;
243 }
244 
245 /* -----------------------------------------------------------------------------
246  * Read/Write Access
247  */
248 
249 static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset)
250 {
251 	return ioread32(mmu->base + offset);
252 }
253 
254 static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset,
255 			u32 data)
256 {
257 	iowrite32(data, mmu->base + offset);
258 }
259 
260 static u32 ipmmu_ctx_read_root(struct ipmmu_vmsa_domain *domain,
261 			       unsigned int reg)
262 {
263 	return ipmmu_read(domain->mmu->root,
264 			  domain->context_id * IM_CTX_SIZE + reg);
265 }
266 
267 static void ipmmu_ctx_write_root(struct ipmmu_vmsa_domain *domain,
268 				 unsigned int reg, u32 data)
269 {
270 	ipmmu_write(domain->mmu->root,
271 		    domain->context_id * IM_CTX_SIZE + reg, data);
272 }
273 
274 static void ipmmu_ctx_write_all(struct ipmmu_vmsa_domain *domain,
275 				unsigned int reg, u32 data)
276 {
277 	if (domain->mmu != domain->mmu->root)
278 		ipmmu_write(domain->mmu,
279 			    domain->context_id * IM_CTX_SIZE + reg, data);
280 
281 	ipmmu_write(domain->mmu->root,
282 		    domain->context_id * IM_CTX_SIZE + reg, data);
283 }
284 
285 /* -----------------------------------------------------------------------------
286  * TLB and microTLB Management
287  */
288 
289 /* Wait for any pending TLB invalidations to complete */
290 static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain)
291 {
292 	unsigned int count = 0;
293 
294 	while (ipmmu_ctx_read_root(domain, IMCTR) & IMCTR_FLUSH) {
295 		cpu_relax();
296 		if (++count == TLB_LOOP_TIMEOUT) {
297 			dev_err_ratelimited(domain->mmu->dev,
298 			"TLB sync timed out -- MMU may be deadlocked\n");
299 			return;
300 		}
301 		udelay(1);
302 	}
303 }
304 
305 static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain)
306 {
307 	u32 reg;
308 
309 	reg = ipmmu_ctx_read_root(domain, IMCTR);
310 	reg |= IMCTR_FLUSH;
311 	ipmmu_ctx_write_all(domain, IMCTR, reg);
312 
313 	ipmmu_tlb_sync(domain);
314 }
315 
316 /*
317  * Enable MMU translation for the microTLB.
318  */
319 static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain,
320 			      unsigned int utlb)
321 {
322 	struct ipmmu_vmsa_device *mmu = domain->mmu;
323 
324 	/*
325 	 * TODO: Reference-count the microTLB as several bus masters can be
326 	 * connected to the same microTLB.
327 	 */
328 
329 	/* TODO: What should we set the ASID to ? */
330 	ipmmu_write(mmu, IMUASID(utlb), 0);
331 	/* TODO: Do we need to flush the microTLB ? */
332 	ipmmu_write(mmu, IMUCTR(utlb),
333 		    IMUCTR_TTSEL_MMU(domain->context_id) | IMUCTR_FLUSH |
334 		    IMUCTR_MMUEN);
335 }
336 
337 /*
338  * Disable MMU translation for the microTLB.
339  */
340 static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain,
341 			       unsigned int utlb)
342 {
343 	struct ipmmu_vmsa_device *mmu = domain->mmu;
344 
345 	ipmmu_write(mmu, IMUCTR(utlb), 0);
346 }
347 
348 static void ipmmu_tlb_flush_all(void *cookie)
349 {
350 	struct ipmmu_vmsa_domain *domain = cookie;
351 
352 	ipmmu_tlb_invalidate(domain);
353 }
354 
355 static void ipmmu_tlb_add_flush(unsigned long iova, size_t size,
356 				size_t granule, bool leaf, void *cookie)
357 {
358 	/* The hardware doesn't support selective TLB flush. */
359 }
360 
361 static const struct iommu_gather_ops ipmmu_gather_ops = {
362 	.tlb_flush_all = ipmmu_tlb_flush_all,
363 	.tlb_add_flush = ipmmu_tlb_add_flush,
364 	.tlb_sync = ipmmu_tlb_flush_all,
365 };
366 
367 /* -----------------------------------------------------------------------------
368  * Domain/Context Management
369  */
370 
371 static int ipmmu_domain_allocate_context(struct ipmmu_vmsa_device *mmu,
372 					 struct ipmmu_vmsa_domain *domain)
373 {
374 	unsigned long flags;
375 	int ret;
376 
377 	spin_lock_irqsave(&mmu->lock, flags);
378 
379 	ret = find_first_zero_bit(mmu->ctx, mmu->num_ctx);
380 	if (ret != mmu->num_ctx) {
381 		mmu->domains[ret] = domain;
382 		set_bit(ret, mmu->ctx);
383 	} else
384 		ret = -EBUSY;
385 
386 	spin_unlock_irqrestore(&mmu->lock, flags);
387 
388 	return ret;
389 }
390 
391 static void ipmmu_domain_free_context(struct ipmmu_vmsa_device *mmu,
392 				      unsigned int context_id)
393 {
394 	unsigned long flags;
395 
396 	spin_lock_irqsave(&mmu->lock, flags);
397 
398 	clear_bit(context_id, mmu->ctx);
399 	mmu->domains[context_id] = NULL;
400 
401 	spin_unlock_irqrestore(&mmu->lock, flags);
402 }
403 
404 static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
405 {
406 	u64 ttbr;
407 	u32 tmp;
408 	int ret;
409 
410 	/*
411 	 * Allocate the page table operations.
412 	 *
413 	 * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory
414 	 * access, Long-descriptor format" that the NStable bit being set in a
415 	 * table descriptor will result in the NStable and NS bits of all child
416 	 * entries being ignored and considered as being set. The IPMMU seems
417 	 * not to comply with this, as it generates a secure access page fault
418 	 * if any of the NStable and NS bits isn't set when running in
419 	 * non-secure mode.
420 	 */
421 	domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS;
422 	domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K;
423 	domain->cfg.ias = 32;
424 	domain->cfg.oas = 40;
425 	domain->cfg.tlb = &ipmmu_gather_ops;
426 	domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32);
427 	domain->io_domain.geometry.force_aperture = true;
428 	/*
429 	 * TODO: Add support for coherent walk through CCI with DVM and remove
430 	 * cache handling. For now, delegate it to the io-pgtable code.
431 	 */
432 	domain->cfg.iommu_dev = domain->mmu->root->dev;
433 
434 	/*
435 	 * Find an unused context.
436 	 */
437 	ret = ipmmu_domain_allocate_context(domain->mmu->root, domain);
438 	if (ret < 0)
439 		return ret;
440 
441 	domain->context_id = ret;
442 
443 	domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,
444 					   domain);
445 	if (!domain->iop) {
446 		ipmmu_domain_free_context(domain->mmu->root,
447 					  domain->context_id);
448 		return -EINVAL;
449 	}
450 
451 	/* TTBR0 */
452 	ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr[0];
453 	ipmmu_ctx_write_root(domain, IMTTLBR0, ttbr);
454 	ipmmu_ctx_write_root(domain, IMTTUBR0, ttbr >> 32);
455 
456 	/*
457 	 * TTBCR
458 	 * We use long descriptors with inner-shareable WBWA tables and allocate
459 	 * the whole 32-bit VA space to TTBR0.
460 	 */
461 	if (domain->mmu->features->twobit_imttbcr_sl0)
462 		tmp = IMTTBCR_SL0_TWOBIT_LVL_1;
463 	else
464 		tmp = IMTTBCR_SL0_LVL_1;
465 
466 	ipmmu_ctx_write_root(domain, IMTTBCR, IMTTBCR_EAE |
467 			     IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA |
468 			     IMTTBCR_IRGN0_WB_WA | tmp);
469 
470 	/* MAIR0 */
471 	ipmmu_ctx_write_root(domain, IMMAIR0,
472 			     domain->cfg.arm_lpae_s1_cfg.mair[0]);
473 
474 	/* IMBUSCR */
475 	if (domain->mmu->features->setup_imbuscr)
476 		ipmmu_ctx_write_root(domain, IMBUSCR,
477 				     ipmmu_ctx_read_root(domain, IMBUSCR) &
478 				     ~(IMBUSCR_DVM | IMBUSCR_BUSSEL_MASK));
479 
480 	/*
481 	 * IMSTR
482 	 * Clear all interrupt flags.
483 	 */
484 	ipmmu_ctx_write_root(domain, IMSTR, ipmmu_ctx_read_root(domain, IMSTR));
485 
486 	/*
487 	 * IMCTR
488 	 * Enable the MMU and interrupt generation. The long-descriptor
489 	 * translation table format doesn't use TEX remapping. Don't enable AF
490 	 * software management as we have no use for it. Flush the TLB as
491 	 * required when modifying the context registers.
492 	 */
493 	ipmmu_ctx_write_all(domain, IMCTR,
494 			    IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN);
495 
496 	return 0;
497 }
498 
499 static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain)
500 {
501 	/*
502 	 * Disable the context. Flush the TLB as required when modifying the
503 	 * context registers.
504 	 *
505 	 * TODO: Is TLB flush really needed ?
506 	 */
507 	ipmmu_ctx_write_all(domain, IMCTR, IMCTR_FLUSH);
508 	ipmmu_tlb_sync(domain);
509 	ipmmu_domain_free_context(domain->mmu->root, domain->context_id);
510 }
511 
512 /* -----------------------------------------------------------------------------
513  * Fault Handling
514  */
515 
516 static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
517 {
518 	const u32 err_mask = IMSTR_MHIT | IMSTR_ABORT | IMSTR_PF | IMSTR_TF;
519 	struct ipmmu_vmsa_device *mmu = domain->mmu;
520 	u32 status;
521 	u32 iova;
522 
523 	status = ipmmu_ctx_read_root(domain, IMSTR);
524 	if (!(status & err_mask))
525 		return IRQ_NONE;
526 
527 	iova = ipmmu_ctx_read_root(domain, IMEAR);
528 
529 	/*
530 	 * Clear the error status flags. Unlike traditional interrupt flag
531 	 * registers that must be cleared by writing 1, this status register
532 	 * seems to require 0. The error address register must be read before,
533 	 * otherwise its value will be 0.
534 	 */
535 	ipmmu_ctx_write_root(domain, IMSTR, 0);
536 
537 	/* Log fatal errors. */
538 	if (status & IMSTR_MHIT)
539 		dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%08x\n",
540 				    iova);
541 	if (status & IMSTR_ABORT)
542 		dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%08x\n",
543 				    iova);
544 
545 	if (!(status & (IMSTR_PF | IMSTR_TF)))
546 		return IRQ_NONE;
547 
548 	/*
549 	 * Try to handle page faults and translation faults.
550 	 *
551 	 * TODO: We need to look up the faulty device based on the I/O VA. Use
552 	 * the IOMMU device for now.
553 	 */
554 	if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0))
555 		return IRQ_HANDLED;
556 
557 	dev_err_ratelimited(mmu->dev,
558 			    "Unhandled fault: status 0x%08x iova 0x%08x\n",
559 			    status, iova);
560 
561 	return IRQ_HANDLED;
562 }
563 
564 static irqreturn_t ipmmu_irq(int irq, void *dev)
565 {
566 	struct ipmmu_vmsa_device *mmu = dev;
567 	irqreturn_t status = IRQ_NONE;
568 	unsigned int i;
569 	unsigned long flags;
570 
571 	spin_lock_irqsave(&mmu->lock, flags);
572 
573 	/*
574 	 * Check interrupts for all active contexts.
575 	 */
576 	for (i = 0; i < mmu->num_ctx; i++) {
577 		if (!mmu->domains[i])
578 			continue;
579 		if (ipmmu_domain_irq(mmu->domains[i]) == IRQ_HANDLED)
580 			status = IRQ_HANDLED;
581 	}
582 
583 	spin_unlock_irqrestore(&mmu->lock, flags);
584 
585 	return status;
586 }
587 
588 /* -----------------------------------------------------------------------------
589  * IOMMU Operations
590  */
591 
592 static struct iommu_domain *__ipmmu_domain_alloc(unsigned type)
593 {
594 	struct ipmmu_vmsa_domain *domain;
595 
596 	domain = kzalloc(sizeof(*domain), GFP_KERNEL);
597 	if (!domain)
598 		return NULL;
599 
600 	mutex_init(&domain->mutex);
601 
602 	return &domain->io_domain;
603 }
604 
605 static struct iommu_domain *ipmmu_domain_alloc(unsigned type)
606 {
607 	struct iommu_domain *io_domain = NULL;
608 
609 	switch (type) {
610 	case IOMMU_DOMAIN_UNMANAGED:
611 		io_domain = __ipmmu_domain_alloc(type);
612 		break;
613 
614 	case IOMMU_DOMAIN_DMA:
615 		io_domain = __ipmmu_domain_alloc(type);
616 		if (io_domain && iommu_get_dma_cookie(io_domain)) {
617 			kfree(io_domain);
618 			io_domain = NULL;
619 		}
620 		break;
621 	}
622 
623 	return io_domain;
624 }
625 
626 static void ipmmu_domain_free(struct iommu_domain *io_domain)
627 {
628 	struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
629 
630 	/*
631 	 * Free the domain resources. We assume that all devices have already
632 	 * been detached.
633 	 */
634 	iommu_put_dma_cookie(io_domain);
635 	ipmmu_domain_destroy_context(domain);
636 	free_io_pgtable_ops(domain->iop);
637 	kfree(domain);
638 }
639 
640 static int ipmmu_attach_device(struct iommu_domain *io_domain,
641 			       struct device *dev)
642 {
643 	struct iommu_fwspec *fwspec = dev->iommu_fwspec;
644 	struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
645 	struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
646 	unsigned int i;
647 	int ret = 0;
648 
649 	if (!mmu) {
650 		dev_err(dev, "Cannot attach to IPMMU\n");
651 		return -ENXIO;
652 	}
653 
654 	mutex_lock(&domain->mutex);
655 
656 	if (!domain->mmu) {
657 		/* The domain hasn't been used yet, initialize it. */
658 		domain->mmu = mmu;
659 		ret = ipmmu_domain_init_context(domain);
660 		if (ret < 0) {
661 			dev_err(dev, "Unable to initialize IPMMU context\n");
662 			domain->mmu = NULL;
663 		} else {
664 			dev_info(dev, "Using IPMMU context %u\n",
665 				 domain->context_id);
666 		}
667 	} else if (domain->mmu != mmu) {
668 		/*
669 		 * Something is wrong, we can't attach two devices using
670 		 * different IOMMUs to the same domain.
671 		 */
672 		dev_err(dev, "Can't attach IPMMU %s to domain on IPMMU %s\n",
673 			dev_name(mmu->dev), dev_name(domain->mmu->dev));
674 		ret = -EINVAL;
675 	} else
676 		dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id);
677 
678 	mutex_unlock(&domain->mutex);
679 
680 	if (ret < 0)
681 		return ret;
682 
683 	for (i = 0; i < fwspec->num_ids; ++i)
684 		ipmmu_utlb_enable(domain, fwspec->ids[i]);
685 
686 	return 0;
687 }
688 
689 static void ipmmu_detach_device(struct iommu_domain *io_domain,
690 				struct device *dev)
691 {
692 	struct iommu_fwspec *fwspec = dev->iommu_fwspec;
693 	struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
694 	unsigned int i;
695 
696 	for (i = 0; i < fwspec->num_ids; ++i)
697 		ipmmu_utlb_disable(domain, fwspec->ids[i]);
698 
699 	/*
700 	 * TODO: Optimize by disabling the context when no device is attached.
701 	 */
702 }
703 
704 static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
705 		     phys_addr_t paddr, size_t size, int prot)
706 {
707 	struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
708 
709 	if (!domain)
710 		return -ENODEV;
711 
712 	return domain->iop->map(domain->iop, iova, paddr, size, prot);
713 }
714 
715 static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
716 			  size_t size)
717 {
718 	struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
719 
720 	return domain->iop->unmap(domain->iop, iova, size);
721 }
722 
723 static void ipmmu_iotlb_sync(struct iommu_domain *io_domain)
724 {
725 	struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
726 
727 	if (domain->mmu)
728 		ipmmu_tlb_flush_all(domain);
729 }
730 
731 static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
732 				      dma_addr_t iova)
733 {
734 	struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
735 
736 	/* TODO: Is locking needed ? */
737 
738 	return domain->iop->iova_to_phys(domain->iop, iova);
739 }
740 
741 static int ipmmu_init_platform_device(struct device *dev,
742 				      struct of_phandle_args *args)
743 {
744 	struct platform_device *ipmmu_pdev;
745 
746 	ipmmu_pdev = of_find_device_by_node(args->np);
747 	if (!ipmmu_pdev)
748 		return -ENODEV;
749 
750 	dev->iommu_fwspec->iommu_priv = platform_get_drvdata(ipmmu_pdev);
751 	return 0;
752 }
753 
754 static bool ipmmu_slave_whitelist(struct device *dev)
755 {
756 	/* By default, do not allow use of IPMMU */
757 	return false;
758 }
759 
760 static const struct soc_device_attribute soc_rcar_gen3[] = {
761 	{ .soc_id = "r8a7795", },
762 	{ .soc_id = "r8a7796", },
763 	{ .soc_id = "r8a77965", },
764 	{ .soc_id = "r8a77970", },
765 	{ .soc_id = "r8a77995", },
766 	{ /* sentinel */ }
767 };
768 
769 static int ipmmu_of_xlate(struct device *dev,
770 			  struct of_phandle_args *spec)
771 {
772 	/* For R-Car Gen3 use a white list to opt-in slave devices */
773 	if (soc_device_match(soc_rcar_gen3) && !ipmmu_slave_whitelist(dev))
774 		return -ENODEV;
775 
776 	iommu_fwspec_add_ids(dev, spec->args, 1);
777 
778 	/* Initialize once - xlate() will call multiple times */
779 	if (to_ipmmu(dev))
780 		return 0;
781 
782 	return ipmmu_init_platform_device(dev, spec);
783 }
784 
785 static int ipmmu_init_arm_mapping(struct device *dev)
786 {
787 	struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
788 	struct iommu_group *group;
789 	int ret;
790 
791 	/* Create a device group and add the device to it. */
792 	group = iommu_group_alloc();
793 	if (IS_ERR(group)) {
794 		dev_err(dev, "Failed to allocate IOMMU group\n");
795 		return PTR_ERR(group);
796 	}
797 
798 	ret = iommu_group_add_device(group, dev);
799 	iommu_group_put(group);
800 
801 	if (ret < 0) {
802 		dev_err(dev, "Failed to add device to IPMMU group\n");
803 		return ret;
804 	}
805 
806 	/*
807 	 * Create the ARM mapping, used by the ARM DMA mapping core to allocate
808 	 * VAs. This will allocate a corresponding IOMMU domain.
809 	 *
810 	 * TODO:
811 	 * - Create one mapping per context (TLB).
812 	 * - Make the mapping size configurable ? We currently use a 2GB mapping
813 	 *   at a 1GB offset to ensure that NULL VAs will fault.
814 	 */
815 	if (!mmu->mapping) {
816 		struct dma_iommu_mapping *mapping;
817 
818 		mapping = arm_iommu_create_mapping(&platform_bus_type,
819 						   SZ_1G, SZ_2G);
820 		if (IS_ERR(mapping)) {
821 			dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n");
822 			ret = PTR_ERR(mapping);
823 			goto error;
824 		}
825 
826 		mmu->mapping = mapping;
827 	}
828 
829 	/* Attach the ARM VA mapping to the device. */
830 	ret = arm_iommu_attach_device(dev, mmu->mapping);
831 	if (ret < 0) {
832 		dev_err(dev, "Failed to attach device to VA mapping\n");
833 		goto error;
834 	}
835 
836 	return 0;
837 
838 error:
839 	iommu_group_remove_device(dev);
840 	if (mmu->mapping)
841 		arm_iommu_release_mapping(mmu->mapping);
842 
843 	return ret;
844 }
845 
846 static int ipmmu_add_device(struct device *dev)
847 {
848 	struct iommu_group *group;
849 
850 	/*
851 	 * Only let through devices that have been verified in xlate()
852 	 */
853 	if (!to_ipmmu(dev))
854 		return -ENODEV;
855 
856 	if (IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA))
857 		return ipmmu_init_arm_mapping(dev);
858 
859 	group = iommu_group_get_for_dev(dev);
860 	if (IS_ERR(group))
861 		return PTR_ERR(group);
862 
863 	iommu_group_put(group);
864 	return 0;
865 }
866 
867 static void ipmmu_remove_device(struct device *dev)
868 {
869 	arm_iommu_detach_device(dev);
870 	iommu_group_remove_device(dev);
871 }
872 
873 static struct iommu_group *ipmmu_find_group(struct device *dev)
874 {
875 	struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
876 	struct iommu_group *group;
877 
878 	if (mmu->group)
879 		return iommu_group_ref_get(mmu->group);
880 
881 	group = iommu_group_alloc();
882 	if (!IS_ERR(group))
883 		mmu->group = group;
884 
885 	return group;
886 }
887 
888 static const struct iommu_ops ipmmu_ops = {
889 	.domain_alloc = ipmmu_domain_alloc,
890 	.domain_free = ipmmu_domain_free,
891 	.attach_dev = ipmmu_attach_device,
892 	.detach_dev = ipmmu_detach_device,
893 	.map = ipmmu_map,
894 	.unmap = ipmmu_unmap,
895 	.flush_iotlb_all = ipmmu_iotlb_sync,
896 	.iotlb_sync = ipmmu_iotlb_sync,
897 	.iova_to_phys = ipmmu_iova_to_phys,
898 	.add_device = ipmmu_add_device,
899 	.remove_device = ipmmu_remove_device,
900 	.device_group = ipmmu_find_group,
901 	.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
902 	.of_xlate = ipmmu_of_xlate,
903 };
904 
905 /* -----------------------------------------------------------------------------
906  * Probe/remove and init
907  */
908 
909 static void ipmmu_device_reset(struct ipmmu_vmsa_device *mmu)
910 {
911 	unsigned int i;
912 
913 	/* Disable all contexts. */
914 	for (i = 0; i < mmu->num_ctx; ++i)
915 		ipmmu_write(mmu, i * IM_CTX_SIZE + IMCTR, 0);
916 }
917 
918 static const struct ipmmu_features ipmmu_features_default = {
919 	.use_ns_alias_offset = true,
920 	.has_cache_leaf_nodes = false,
921 	.number_of_contexts = 1, /* software only tested with one context */
922 	.setup_imbuscr = true,
923 	.twobit_imttbcr_sl0 = false,
924 	.reserved_context = false,
925 };
926 
927 static const struct ipmmu_features ipmmu_features_rcar_gen3 = {
928 	.use_ns_alias_offset = false,
929 	.has_cache_leaf_nodes = true,
930 	.number_of_contexts = 8,
931 	.setup_imbuscr = false,
932 	.twobit_imttbcr_sl0 = true,
933 	.reserved_context = true,
934 };
935 
936 static const struct of_device_id ipmmu_of_ids[] = {
937 	{
938 		.compatible = "renesas,ipmmu-vmsa",
939 		.data = &ipmmu_features_default,
940 	}, {
941 		.compatible = "renesas,ipmmu-r8a7795",
942 		.data = &ipmmu_features_rcar_gen3,
943 	}, {
944 		.compatible = "renesas,ipmmu-r8a7796",
945 		.data = &ipmmu_features_rcar_gen3,
946 	}, {
947 		.compatible = "renesas,ipmmu-r8a77965",
948 		.data = &ipmmu_features_rcar_gen3,
949 	}, {
950 		.compatible = "renesas,ipmmu-r8a77970",
951 		.data = &ipmmu_features_rcar_gen3,
952 	}, {
953 		.compatible = "renesas,ipmmu-r8a77995",
954 		.data = &ipmmu_features_rcar_gen3,
955 	}, {
956 		/* Terminator */
957 	},
958 };
959 
960 MODULE_DEVICE_TABLE(of, ipmmu_of_ids);
961 
962 static int ipmmu_probe(struct platform_device *pdev)
963 {
964 	struct ipmmu_vmsa_device *mmu;
965 	struct resource *res;
966 	int irq;
967 	int ret;
968 
969 	mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL);
970 	if (!mmu) {
971 		dev_err(&pdev->dev, "cannot allocate device data\n");
972 		return -ENOMEM;
973 	}
974 
975 	mmu->dev = &pdev->dev;
976 	mmu->num_utlbs = 48;
977 	spin_lock_init(&mmu->lock);
978 	bitmap_zero(mmu->ctx, IPMMU_CTX_MAX);
979 	mmu->features = of_device_get_match_data(&pdev->dev);
980 	dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
981 
982 	/* Map I/O memory and request IRQ. */
983 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
984 	mmu->base = devm_ioremap_resource(&pdev->dev, res);
985 	if (IS_ERR(mmu->base))
986 		return PTR_ERR(mmu->base);
987 
988 	/*
989 	 * The IPMMU has two register banks, for secure and non-secure modes.
990 	 * The bank mapped at the beginning of the IPMMU address space
991 	 * corresponds to the running mode of the CPU. When running in secure
992 	 * mode the non-secure register bank is also available at an offset.
993 	 *
994 	 * Secure mode operation isn't clearly documented and is thus currently
995 	 * not implemented in the driver. Furthermore, preliminary tests of
996 	 * non-secure operation with the main register bank were not successful.
997 	 * Offset the registers base unconditionally to point to the non-secure
998 	 * alias space for now.
999 	 */
1000 	if (mmu->features->use_ns_alias_offset)
1001 		mmu->base += IM_NS_ALIAS_OFFSET;
1002 
1003 	mmu->num_ctx = min_t(unsigned int, IPMMU_CTX_MAX,
1004 			     mmu->features->number_of_contexts);
1005 
1006 	irq = platform_get_irq(pdev, 0);
1007 
1008 	/*
1009 	 * Determine if this IPMMU instance is a root device by checking for
1010 	 * the lack of has_cache_leaf_nodes flag or renesas,ipmmu-main property.
1011 	 */
1012 	if (!mmu->features->has_cache_leaf_nodes ||
1013 	    !of_find_property(pdev->dev.of_node, "renesas,ipmmu-main", NULL))
1014 		mmu->root = mmu;
1015 	else
1016 		mmu->root = ipmmu_find_root();
1017 
1018 	/*
1019 	 * Wait until the root device has been registered for sure.
1020 	 */
1021 	if (!mmu->root)
1022 		return -EPROBE_DEFER;
1023 
1024 	/* Root devices have mandatory IRQs */
1025 	if (ipmmu_is_root(mmu)) {
1026 		if (irq < 0) {
1027 			dev_err(&pdev->dev, "no IRQ found\n");
1028 			return irq;
1029 		}
1030 
1031 		ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0,
1032 				       dev_name(&pdev->dev), mmu);
1033 		if (ret < 0) {
1034 			dev_err(&pdev->dev, "failed to request IRQ %d\n", irq);
1035 			return ret;
1036 		}
1037 
1038 		ipmmu_device_reset(mmu);
1039 
1040 		if (mmu->features->reserved_context) {
1041 			dev_info(&pdev->dev, "IPMMU context 0 is reserved\n");
1042 			set_bit(0, mmu->ctx);
1043 		}
1044 	}
1045 
1046 	/*
1047 	 * Register the IPMMU to the IOMMU subsystem in the following cases:
1048 	 * - R-Car Gen2 IPMMU (all devices registered)
1049 	 * - R-Car Gen3 IPMMU (leaf devices only - skip root IPMMU-MM device)
1050 	 */
1051 	if (!mmu->features->has_cache_leaf_nodes || !ipmmu_is_root(mmu)) {
1052 		ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL,
1053 					     dev_name(&pdev->dev));
1054 		if (ret)
1055 			return ret;
1056 
1057 		iommu_device_set_ops(&mmu->iommu, &ipmmu_ops);
1058 		iommu_device_set_fwnode(&mmu->iommu,
1059 					&pdev->dev.of_node->fwnode);
1060 
1061 		ret = iommu_device_register(&mmu->iommu);
1062 		if (ret)
1063 			return ret;
1064 
1065 #if defined(CONFIG_IOMMU_DMA)
1066 		if (!iommu_present(&platform_bus_type))
1067 			bus_set_iommu(&platform_bus_type, &ipmmu_ops);
1068 #endif
1069 	}
1070 
1071 	/*
1072 	 * We can't create the ARM mapping here as it requires the bus to have
1073 	 * an IOMMU, which only happens when bus_set_iommu() is called in
1074 	 * ipmmu_init() after the probe function returns.
1075 	 */
1076 
1077 	platform_set_drvdata(pdev, mmu);
1078 
1079 	return 0;
1080 }
1081 
1082 static int ipmmu_remove(struct platform_device *pdev)
1083 {
1084 	struct ipmmu_vmsa_device *mmu = platform_get_drvdata(pdev);
1085 
1086 	iommu_device_sysfs_remove(&mmu->iommu);
1087 	iommu_device_unregister(&mmu->iommu);
1088 
1089 	arm_iommu_release_mapping(mmu->mapping);
1090 
1091 	ipmmu_device_reset(mmu);
1092 
1093 	return 0;
1094 }
1095 
1096 static struct platform_driver ipmmu_driver = {
1097 	.driver = {
1098 		.name = "ipmmu-vmsa",
1099 		.of_match_table = of_match_ptr(ipmmu_of_ids),
1100 	},
1101 	.probe = ipmmu_probe,
1102 	.remove	= ipmmu_remove,
1103 };
1104 
1105 static int __init ipmmu_init(void)
1106 {
1107 	struct device_node *np;
1108 	static bool setup_done;
1109 	int ret;
1110 
1111 	if (setup_done)
1112 		return 0;
1113 
1114 	np = of_find_matching_node(NULL, ipmmu_of_ids);
1115 	if (!np)
1116 		return 0;
1117 
1118 	of_node_put(np);
1119 
1120 	ret = platform_driver_register(&ipmmu_driver);
1121 	if (ret < 0)
1122 		return ret;
1123 
1124 #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
1125 	if (!iommu_present(&platform_bus_type))
1126 		bus_set_iommu(&platform_bus_type, &ipmmu_ops);
1127 #endif
1128 
1129 	setup_done = true;
1130 	return 0;
1131 }
1132 
1133 static void __exit ipmmu_exit(void)
1134 {
1135 	return platform_driver_unregister(&ipmmu_driver);
1136 }
1137 
1138 subsys_initcall(ipmmu_init);
1139 module_exit(ipmmu_exit);
1140 
1141 MODULE_DESCRIPTION("IOMMU API for Renesas VMSA-compatible IPMMU");
1142 MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
1143 MODULE_LICENSE("GPL v2");
1144