1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * IOMMU API for ARM architected SMMUv3 implementations.
4  *
5  * Copyright (C) 2015 ARM Limited
6  *
7  * Author: Will Deacon <will.deacon@arm.com>
8  *
9  * This driver is powered by bad coffee and bombay mix.
10  */
11 
12 #include <linux/acpi.h>
13 #include <linux/acpi_iort.h>
14 #include <linux/bitops.h>
15 #include <linux/crash_dump.h>
16 #include <linux/delay.h>
17 #include <linux/err.h>
18 #include <linux/interrupt.h>
19 #include <linux/io-pgtable.h>
20 #include <linux/iopoll.h>
21 #include <linux/module.h>
22 #include <linux/msi.h>
23 #include <linux/of.h>
24 #include <linux/of_address.h>
25 #include <linux/of_platform.h>
26 #include <linux/pci.h>
27 #include <linux/pci-ats.h>
28 #include <linux/platform_device.h>
29 
30 #include "arm-smmu-v3.h"
31 #include "../../dma-iommu.h"
32 #include "../../iommu-sva.h"
33 
34 static bool disable_bypass = true;
35 module_param(disable_bypass, bool, 0444);
36 MODULE_PARM_DESC(disable_bypass,
37 	"Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
38 
39 static bool disable_msipolling;
40 module_param(disable_msipolling, bool, 0444);
41 MODULE_PARM_DESC(disable_msipolling,
42 	"Disable MSI-based polling for CMD_SYNC completion.");
43 
44 enum arm_smmu_msi_index {
45 	EVTQ_MSI_INDEX,
46 	GERROR_MSI_INDEX,
47 	PRIQ_MSI_INDEX,
48 	ARM_SMMU_MAX_MSIS,
49 };
50 
51 static phys_addr_t arm_smmu_msi_cfg[ARM_SMMU_MAX_MSIS][3] = {
52 	[EVTQ_MSI_INDEX] = {
53 		ARM_SMMU_EVTQ_IRQ_CFG0,
54 		ARM_SMMU_EVTQ_IRQ_CFG1,
55 		ARM_SMMU_EVTQ_IRQ_CFG2,
56 	},
57 	[GERROR_MSI_INDEX] = {
58 		ARM_SMMU_GERROR_IRQ_CFG0,
59 		ARM_SMMU_GERROR_IRQ_CFG1,
60 		ARM_SMMU_GERROR_IRQ_CFG2,
61 	},
62 	[PRIQ_MSI_INDEX] = {
63 		ARM_SMMU_PRIQ_IRQ_CFG0,
64 		ARM_SMMU_PRIQ_IRQ_CFG1,
65 		ARM_SMMU_PRIQ_IRQ_CFG2,
66 	},
67 };
68 
69 struct arm_smmu_option_prop {
70 	u32 opt;
71 	const char *prop;
72 };
73 
74 DEFINE_XARRAY_ALLOC1(arm_smmu_asid_xa);
75 DEFINE_MUTEX(arm_smmu_asid_lock);
76 
77 /*
78  * Special value used by SVA when a process dies, to quiesce a CD without
79  * disabling it.
80  */
81 struct arm_smmu_ctx_desc quiet_cd = { 0 };
82 
83 static struct arm_smmu_option_prop arm_smmu_options[] = {
84 	{ ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" },
85 	{ ARM_SMMU_OPT_PAGE0_REGS_ONLY, "cavium,cn9900-broken-page1-regspace"},
86 	{ 0, NULL},
87 };
88 
89 static void parse_driver_options(struct arm_smmu_device *smmu)
90 {
91 	int i = 0;
92 
93 	do {
94 		if (of_property_read_bool(smmu->dev->of_node,
95 						arm_smmu_options[i].prop)) {
96 			smmu->options |= arm_smmu_options[i].opt;
97 			dev_notice(smmu->dev, "option %s\n",
98 				arm_smmu_options[i].prop);
99 		}
100 	} while (arm_smmu_options[++i].opt);
101 }
102 
103 /* Low-level queue manipulation functions */
104 static bool queue_has_space(struct arm_smmu_ll_queue *q, u32 n)
105 {
106 	u32 space, prod, cons;
107 
108 	prod = Q_IDX(q, q->prod);
109 	cons = Q_IDX(q, q->cons);
110 
111 	if (Q_WRP(q, q->prod) == Q_WRP(q, q->cons))
112 		space = (1 << q->max_n_shift) - (prod - cons);
113 	else
114 		space = cons - prod;
115 
116 	return space >= n;
117 }
118 
119 static bool queue_full(struct arm_smmu_ll_queue *q)
120 {
121 	return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
122 	       Q_WRP(q, q->prod) != Q_WRP(q, q->cons);
123 }
124 
125 static bool queue_empty(struct arm_smmu_ll_queue *q)
126 {
127 	return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
128 	       Q_WRP(q, q->prod) == Q_WRP(q, q->cons);
129 }
130 
131 static bool queue_consumed(struct arm_smmu_ll_queue *q, u32 prod)
132 {
133 	return ((Q_WRP(q, q->cons) == Q_WRP(q, prod)) &&
134 		(Q_IDX(q, q->cons) > Q_IDX(q, prod))) ||
135 	       ((Q_WRP(q, q->cons) != Q_WRP(q, prod)) &&
136 		(Q_IDX(q, q->cons) <= Q_IDX(q, prod)));
137 }
138 
139 static void queue_sync_cons_out(struct arm_smmu_queue *q)
140 {
141 	/*
142 	 * Ensure that all CPU accesses (reads and writes) to the queue
143 	 * are complete before we update the cons pointer.
144 	 */
145 	__iomb();
146 	writel_relaxed(q->llq.cons, q->cons_reg);
147 }
148 
149 static void queue_inc_cons(struct arm_smmu_ll_queue *q)
150 {
151 	u32 cons = (Q_WRP(q, q->cons) | Q_IDX(q, q->cons)) + 1;
152 	q->cons = Q_OVF(q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons);
153 }
154 
155 static int queue_sync_prod_in(struct arm_smmu_queue *q)
156 {
157 	u32 prod;
158 	int ret = 0;
159 
160 	/*
161 	 * We can't use the _relaxed() variant here, as we must prevent
162 	 * speculative reads of the queue before we have determined that
163 	 * prod has indeed moved.
164 	 */
165 	prod = readl(q->prod_reg);
166 
167 	if (Q_OVF(prod) != Q_OVF(q->llq.prod))
168 		ret = -EOVERFLOW;
169 
170 	q->llq.prod = prod;
171 	return ret;
172 }
173 
174 static u32 queue_inc_prod_n(struct arm_smmu_ll_queue *q, int n)
175 {
176 	u32 prod = (Q_WRP(q, q->prod) | Q_IDX(q, q->prod)) + n;
177 	return Q_OVF(q->prod) | Q_WRP(q, prod) | Q_IDX(q, prod);
178 }
179 
180 static void queue_poll_init(struct arm_smmu_device *smmu,
181 			    struct arm_smmu_queue_poll *qp)
182 {
183 	qp->delay = 1;
184 	qp->spin_cnt = 0;
185 	qp->wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV);
186 	qp->timeout = ktime_add_us(ktime_get(), ARM_SMMU_POLL_TIMEOUT_US);
187 }
188 
189 static int queue_poll(struct arm_smmu_queue_poll *qp)
190 {
191 	if (ktime_compare(ktime_get(), qp->timeout) > 0)
192 		return -ETIMEDOUT;
193 
194 	if (qp->wfe) {
195 		wfe();
196 	} else if (++qp->spin_cnt < ARM_SMMU_POLL_SPIN_COUNT) {
197 		cpu_relax();
198 	} else {
199 		udelay(qp->delay);
200 		qp->delay *= 2;
201 		qp->spin_cnt = 0;
202 	}
203 
204 	return 0;
205 }
206 
207 static void queue_write(__le64 *dst, u64 *src, size_t n_dwords)
208 {
209 	int i;
210 
211 	for (i = 0; i < n_dwords; ++i)
212 		*dst++ = cpu_to_le64(*src++);
213 }
214 
215 static void queue_read(u64 *dst, __le64 *src, size_t n_dwords)
216 {
217 	int i;
218 
219 	for (i = 0; i < n_dwords; ++i)
220 		*dst++ = le64_to_cpu(*src++);
221 }
222 
223 static int queue_remove_raw(struct arm_smmu_queue *q, u64 *ent)
224 {
225 	if (queue_empty(&q->llq))
226 		return -EAGAIN;
227 
228 	queue_read(ent, Q_ENT(q, q->llq.cons), q->ent_dwords);
229 	queue_inc_cons(&q->llq);
230 	queue_sync_cons_out(q);
231 	return 0;
232 }
233 
234 /* High-level queue accessors */
235 static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
236 {
237 	memset(cmd, 0, 1 << CMDQ_ENT_SZ_SHIFT);
238 	cmd[0] |= FIELD_PREP(CMDQ_0_OP, ent->opcode);
239 
240 	switch (ent->opcode) {
241 	case CMDQ_OP_TLBI_EL2_ALL:
242 	case CMDQ_OP_TLBI_NSNH_ALL:
243 		break;
244 	case CMDQ_OP_PREFETCH_CFG:
245 		cmd[0] |= FIELD_PREP(CMDQ_PREFETCH_0_SID, ent->prefetch.sid);
246 		break;
247 	case CMDQ_OP_CFGI_CD:
248 		cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SSID, ent->cfgi.ssid);
249 		fallthrough;
250 	case CMDQ_OP_CFGI_STE:
251 		cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SID, ent->cfgi.sid);
252 		cmd[1] |= FIELD_PREP(CMDQ_CFGI_1_LEAF, ent->cfgi.leaf);
253 		break;
254 	case CMDQ_OP_CFGI_CD_ALL:
255 		cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SID, ent->cfgi.sid);
256 		break;
257 	case CMDQ_OP_CFGI_ALL:
258 		/* Cover the entire SID range */
259 		cmd[1] |= FIELD_PREP(CMDQ_CFGI_1_RANGE, 31);
260 		break;
261 	case CMDQ_OP_TLBI_NH_VA:
262 		cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid);
263 		fallthrough;
264 	case CMDQ_OP_TLBI_EL2_VA:
265 		cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_NUM, ent->tlbi.num);
266 		cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_SCALE, ent->tlbi.scale);
267 		cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid);
268 		cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_LEAF, ent->tlbi.leaf);
269 		cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_TTL, ent->tlbi.ttl);
270 		cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_TG, ent->tlbi.tg);
271 		cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK;
272 		break;
273 	case CMDQ_OP_TLBI_S2_IPA:
274 		cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_NUM, ent->tlbi.num);
275 		cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_SCALE, ent->tlbi.scale);
276 		cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid);
277 		cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_LEAF, ent->tlbi.leaf);
278 		cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_TTL, ent->tlbi.ttl);
279 		cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_TG, ent->tlbi.tg);
280 		cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_IPA_MASK;
281 		break;
282 	case CMDQ_OP_TLBI_NH_ASID:
283 		cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid);
284 		fallthrough;
285 	case CMDQ_OP_TLBI_S12_VMALL:
286 		cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid);
287 		break;
288 	case CMDQ_OP_TLBI_EL2_ASID:
289 		cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid);
290 		break;
291 	case CMDQ_OP_ATC_INV:
292 		cmd[0] |= FIELD_PREP(CMDQ_0_SSV, ent->substream_valid);
293 		cmd[0] |= FIELD_PREP(CMDQ_ATC_0_GLOBAL, ent->atc.global);
294 		cmd[0] |= FIELD_PREP(CMDQ_ATC_0_SSID, ent->atc.ssid);
295 		cmd[0] |= FIELD_PREP(CMDQ_ATC_0_SID, ent->atc.sid);
296 		cmd[1] |= FIELD_PREP(CMDQ_ATC_1_SIZE, ent->atc.size);
297 		cmd[1] |= ent->atc.addr & CMDQ_ATC_1_ADDR_MASK;
298 		break;
299 	case CMDQ_OP_PRI_RESP:
300 		cmd[0] |= FIELD_PREP(CMDQ_0_SSV, ent->substream_valid);
301 		cmd[0] |= FIELD_PREP(CMDQ_PRI_0_SSID, ent->pri.ssid);
302 		cmd[0] |= FIELD_PREP(CMDQ_PRI_0_SID, ent->pri.sid);
303 		cmd[1] |= FIELD_PREP(CMDQ_PRI_1_GRPID, ent->pri.grpid);
304 		switch (ent->pri.resp) {
305 		case PRI_RESP_DENY:
306 		case PRI_RESP_FAIL:
307 		case PRI_RESP_SUCC:
308 			break;
309 		default:
310 			return -EINVAL;
311 		}
312 		cmd[1] |= FIELD_PREP(CMDQ_PRI_1_RESP, ent->pri.resp);
313 		break;
314 	case CMDQ_OP_RESUME:
315 		cmd[0] |= FIELD_PREP(CMDQ_RESUME_0_SID, ent->resume.sid);
316 		cmd[0] |= FIELD_PREP(CMDQ_RESUME_0_RESP, ent->resume.resp);
317 		cmd[1] |= FIELD_PREP(CMDQ_RESUME_1_STAG, ent->resume.stag);
318 		break;
319 	case CMDQ_OP_CMD_SYNC:
320 		if (ent->sync.msiaddr) {
321 			cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_CS, CMDQ_SYNC_0_CS_IRQ);
322 			cmd[1] |= ent->sync.msiaddr & CMDQ_SYNC_1_MSIADDR_MASK;
323 		} else {
324 			cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_CS, CMDQ_SYNC_0_CS_SEV);
325 		}
326 		cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSH, ARM_SMMU_SH_ISH);
327 		cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSIATTR, ARM_SMMU_MEMATTR_OIWB);
328 		break;
329 	default:
330 		return -ENOENT;
331 	}
332 
333 	return 0;
334 }
335 
336 static struct arm_smmu_cmdq *arm_smmu_get_cmdq(struct arm_smmu_device *smmu)
337 {
338 	return &smmu->cmdq;
339 }
340 
341 static void arm_smmu_cmdq_build_sync_cmd(u64 *cmd, struct arm_smmu_device *smmu,
342 					 struct arm_smmu_queue *q, u32 prod)
343 {
344 	struct arm_smmu_cmdq_ent ent = {
345 		.opcode = CMDQ_OP_CMD_SYNC,
346 	};
347 
348 	/*
349 	 * Beware that Hi16xx adds an extra 32 bits of goodness to its MSI
350 	 * payload, so the write will zero the entire command on that platform.
351 	 */
352 	if (smmu->options & ARM_SMMU_OPT_MSIPOLL) {
353 		ent.sync.msiaddr = q->base_dma + Q_IDX(&q->llq, prod) *
354 				   q->ent_dwords * 8;
355 	}
356 
357 	arm_smmu_cmdq_build_cmd(cmd, &ent);
358 }
359 
360 static void __arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu,
361 				     struct arm_smmu_queue *q)
362 {
363 	static const char * const cerror_str[] = {
364 		[CMDQ_ERR_CERROR_NONE_IDX]	= "No error",
365 		[CMDQ_ERR_CERROR_ILL_IDX]	= "Illegal command",
366 		[CMDQ_ERR_CERROR_ABT_IDX]	= "Abort on command fetch",
367 		[CMDQ_ERR_CERROR_ATC_INV_IDX]	= "ATC invalidate timeout",
368 	};
369 
370 	int i;
371 	u64 cmd[CMDQ_ENT_DWORDS];
372 	u32 cons = readl_relaxed(q->cons_reg);
373 	u32 idx = FIELD_GET(CMDQ_CONS_ERR, cons);
374 	struct arm_smmu_cmdq_ent cmd_sync = {
375 		.opcode = CMDQ_OP_CMD_SYNC,
376 	};
377 
378 	dev_err(smmu->dev, "CMDQ error (cons 0x%08x): %s\n", cons,
379 		idx < ARRAY_SIZE(cerror_str) ?  cerror_str[idx] : "Unknown");
380 
381 	switch (idx) {
382 	case CMDQ_ERR_CERROR_ABT_IDX:
383 		dev_err(smmu->dev, "retrying command fetch\n");
384 		return;
385 	case CMDQ_ERR_CERROR_NONE_IDX:
386 		return;
387 	case CMDQ_ERR_CERROR_ATC_INV_IDX:
388 		/*
389 		 * ATC Invalidation Completion timeout. CONS is still pointing
390 		 * at the CMD_SYNC. Attempt to complete other pending commands
391 		 * by repeating the CMD_SYNC, though we might well end up back
392 		 * here since the ATC invalidation may still be pending.
393 		 */
394 		return;
395 	case CMDQ_ERR_CERROR_ILL_IDX:
396 	default:
397 		break;
398 	}
399 
400 	/*
401 	 * We may have concurrent producers, so we need to be careful
402 	 * not to touch any of the shadow cmdq state.
403 	 */
404 	queue_read(cmd, Q_ENT(q, cons), q->ent_dwords);
405 	dev_err(smmu->dev, "skipping command in error state:\n");
406 	for (i = 0; i < ARRAY_SIZE(cmd); ++i)
407 		dev_err(smmu->dev, "\t0x%016llx\n", (unsigned long long)cmd[i]);
408 
409 	/* Convert the erroneous command into a CMD_SYNC */
410 	arm_smmu_cmdq_build_cmd(cmd, &cmd_sync);
411 
412 	queue_write(Q_ENT(q, cons), cmd, q->ent_dwords);
413 }
414 
415 static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
416 {
417 	__arm_smmu_cmdq_skip_err(smmu, &smmu->cmdq.q);
418 }
419 
420 /*
421  * Command queue locking.
422  * This is a form of bastardised rwlock with the following major changes:
423  *
424  * - The only LOCK routines are exclusive_trylock() and shared_lock().
425  *   Neither have barrier semantics, and instead provide only a control
426  *   dependency.
427  *
428  * - The UNLOCK routines are supplemented with shared_tryunlock(), which
429  *   fails if the caller appears to be the last lock holder (yes, this is
430  *   racy). All successful UNLOCK routines have RELEASE semantics.
431  */
432 static void arm_smmu_cmdq_shared_lock(struct arm_smmu_cmdq *cmdq)
433 {
434 	int val;
435 
436 	/*
437 	 * We can try to avoid the cmpxchg() loop by simply incrementing the
438 	 * lock counter. When held in exclusive state, the lock counter is set
439 	 * to INT_MIN so these increments won't hurt as the value will remain
440 	 * negative.
441 	 */
442 	if (atomic_fetch_inc_relaxed(&cmdq->lock) >= 0)
443 		return;
444 
445 	do {
446 		val = atomic_cond_read_relaxed(&cmdq->lock, VAL >= 0);
447 	} while (atomic_cmpxchg_relaxed(&cmdq->lock, val, val + 1) != val);
448 }
449 
450 static void arm_smmu_cmdq_shared_unlock(struct arm_smmu_cmdq *cmdq)
451 {
452 	(void)atomic_dec_return_release(&cmdq->lock);
453 }
454 
455 static bool arm_smmu_cmdq_shared_tryunlock(struct arm_smmu_cmdq *cmdq)
456 {
457 	if (atomic_read(&cmdq->lock) == 1)
458 		return false;
459 
460 	arm_smmu_cmdq_shared_unlock(cmdq);
461 	return true;
462 }
463 
464 #define arm_smmu_cmdq_exclusive_trylock_irqsave(cmdq, flags)		\
465 ({									\
466 	bool __ret;							\
467 	local_irq_save(flags);						\
468 	__ret = !atomic_cmpxchg_relaxed(&cmdq->lock, 0, INT_MIN);	\
469 	if (!__ret)							\
470 		local_irq_restore(flags);				\
471 	__ret;								\
472 })
473 
474 #define arm_smmu_cmdq_exclusive_unlock_irqrestore(cmdq, flags)		\
475 ({									\
476 	atomic_set_release(&cmdq->lock, 0);				\
477 	local_irq_restore(flags);					\
478 })
479 
480 
481 /*
482  * Command queue insertion.
483  * This is made fiddly by our attempts to achieve some sort of scalability
484  * since there is one queue shared amongst all of the CPUs in the system.  If
485  * you like mixed-size concurrency, dependency ordering and relaxed atomics,
486  * then you'll *love* this monstrosity.
487  *
488  * The basic idea is to split the queue up into ranges of commands that are
489  * owned by a given CPU; the owner may not have written all of the commands
490  * itself, but is responsible for advancing the hardware prod pointer when
491  * the time comes. The algorithm is roughly:
492  *
493  * 	1. Allocate some space in the queue. At this point we also discover
494  *	   whether the head of the queue is currently owned by another CPU,
495  *	   or whether we are the owner.
496  *
497  *	2. Write our commands into our allocated slots in the queue.
498  *
499  *	3. Mark our slots as valid in arm_smmu_cmdq.valid_map.
500  *
501  *	4. If we are an owner:
502  *		a. Wait for the previous owner to finish.
503  *		b. Mark the queue head as unowned, which tells us the range
504  *		   that we are responsible for publishing.
505  *		c. Wait for all commands in our owned range to become valid.
506  *		d. Advance the hardware prod pointer.
507  *		e. Tell the next owner we've finished.
508  *
509  *	5. If we are inserting a CMD_SYNC (we may or may not have been an
510  *	   owner), then we need to stick around until it has completed:
511  *		a. If we have MSIs, the SMMU can write back into the CMD_SYNC
512  *		   to clear the first 4 bytes.
513  *		b. Otherwise, we spin waiting for the hardware cons pointer to
514  *		   advance past our command.
515  *
516  * The devil is in the details, particularly the use of locking for handling
517  * SYNC completion and freeing up space in the queue before we think that it is
518  * full.
519  */
520 static void __arm_smmu_cmdq_poll_set_valid_map(struct arm_smmu_cmdq *cmdq,
521 					       u32 sprod, u32 eprod, bool set)
522 {
523 	u32 swidx, sbidx, ewidx, ebidx;
524 	struct arm_smmu_ll_queue llq = {
525 		.max_n_shift	= cmdq->q.llq.max_n_shift,
526 		.prod		= sprod,
527 	};
528 
529 	ewidx = BIT_WORD(Q_IDX(&llq, eprod));
530 	ebidx = Q_IDX(&llq, eprod) % BITS_PER_LONG;
531 
532 	while (llq.prod != eprod) {
533 		unsigned long mask;
534 		atomic_long_t *ptr;
535 		u32 limit = BITS_PER_LONG;
536 
537 		swidx = BIT_WORD(Q_IDX(&llq, llq.prod));
538 		sbidx = Q_IDX(&llq, llq.prod) % BITS_PER_LONG;
539 
540 		ptr = &cmdq->valid_map[swidx];
541 
542 		if ((swidx == ewidx) && (sbidx < ebidx))
543 			limit = ebidx;
544 
545 		mask = GENMASK(limit - 1, sbidx);
546 
547 		/*
548 		 * The valid bit is the inverse of the wrap bit. This means
549 		 * that a zero-initialised queue is invalid and, after marking
550 		 * all entries as valid, they become invalid again when we
551 		 * wrap.
552 		 */
553 		if (set) {
554 			atomic_long_xor(mask, ptr);
555 		} else { /* Poll */
556 			unsigned long valid;
557 
558 			valid = (ULONG_MAX + !!Q_WRP(&llq, llq.prod)) & mask;
559 			atomic_long_cond_read_relaxed(ptr, (VAL & mask) == valid);
560 		}
561 
562 		llq.prod = queue_inc_prod_n(&llq, limit - sbidx);
563 	}
564 }
565 
566 /* Mark all entries in the range [sprod, eprod) as valid */
567 static void arm_smmu_cmdq_set_valid_map(struct arm_smmu_cmdq *cmdq,
568 					u32 sprod, u32 eprod)
569 {
570 	__arm_smmu_cmdq_poll_set_valid_map(cmdq, sprod, eprod, true);
571 }
572 
573 /* Wait for all entries in the range [sprod, eprod) to become valid */
574 static void arm_smmu_cmdq_poll_valid_map(struct arm_smmu_cmdq *cmdq,
575 					 u32 sprod, u32 eprod)
576 {
577 	__arm_smmu_cmdq_poll_set_valid_map(cmdq, sprod, eprod, false);
578 }
579 
580 /* Wait for the command queue to become non-full */
581 static int arm_smmu_cmdq_poll_until_not_full(struct arm_smmu_device *smmu,
582 					     struct arm_smmu_ll_queue *llq)
583 {
584 	unsigned long flags;
585 	struct arm_smmu_queue_poll qp;
586 	struct arm_smmu_cmdq *cmdq = arm_smmu_get_cmdq(smmu);
587 	int ret = 0;
588 
589 	/*
590 	 * Try to update our copy of cons by grabbing exclusive cmdq access. If
591 	 * that fails, spin until somebody else updates it for us.
592 	 */
593 	if (arm_smmu_cmdq_exclusive_trylock_irqsave(cmdq, flags)) {
594 		WRITE_ONCE(cmdq->q.llq.cons, readl_relaxed(cmdq->q.cons_reg));
595 		arm_smmu_cmdq_exclusive_unlock_irqrestore(cmdq, flags);
596 		llq->val = READ_ONCE(cmdq->q.llq.val);
597 		return 0;
598 	}
599 
600 	queue_poll_init(smmu, &qp);
601 	do {
602 		llq->val = READ_ONCE(cmdq->q.llq.val);
603 		if (!queue_full(llq))
604 			break;
605 
606 		ret = queue_poll(&qp);
607 	} while (!ret);
608 
609 	return ret;
610 }
611 
612 /*
613  * Wait until the SMMU signals a CMD_SYNC completion MSI.
614  * Must be called with the cmdq lock held in some capacity.
615  */
616 static int __arm_smmu_cmdq_poll_until_msi(struct arm_smmu_device *smmu,
617 					  struct arm_smmu_ll_queue *llq)
618 {
619 	int ret = 0;
620 	struct arm_smmu_queue_poll qp;
621 	struct arm_smmu_cmdq *cmdq = arm_smmu_get_cmdq(smmu);
622 	u32 *cmd = (u32 *)(Q_ENT(&cmdq->q, llq->prod));
623 
624 	queue_poll_init(smmu, &qp);
625 
626 	/*
627 	 * The MSI won't generate an event, since it's being written back
628 	 * into the command queue.
629 	 */
630 	qp.wfe = false;
631 	smp_cond_load_relaxed(cmd, !VAL || (ret = queue_poll(&qp)));
632 	llq->cons = ret ? llq->prod : queue_inc_prod_n(llq, 1);
633 	return ret;
634 }
635 
636 /*
637  * Wait until the SMMU cons index passes llq->prod.
638  * Must be called with the cmdq lock held in some capacity.
639  */
640 static int __arm_smmu_cmdq_poll_until_consumed(struct arm_smmu_device *smmu,
641 					       struct arm_smmu_ll_queue *llq)
642 {
643 	struct arm_smmu_queue_poll qp;
644 	struct arm_smmu_cmdq *cmdq = arm_smmu_get_cmdq(smmu);
645 	u32 prod = llq->prod;
646 	int ret = 0;
647 
648 	queue_poll_init(smmu, &qp);
649 	llq->val = READ_ONCE(cmdq->q.llq.val);
650 	do {
651 		if (queue_consumed(llq, prod))
652 			break;
653 
654 		ret = queue_poll(&qp);
655 
656 		/*
657 		 * This needs to be a readl() so that our subsequent call
658 		 * to arm_smmu_cmdq_shared_tryunlock() can fail accurately.
659 		 *
660 		 * Specifically, we need to ensure that we observe all
661 		 * shared_lock()s by other CMD_SYNCs that share our owner,
662 		 * so that a failing call to tryunlock() means that we're
663 		 * the last one out and therefore we can safely advance
664 		 * cmdq->q.llq.cons. Roughly speaking:
665 		 *
666 		 * CPU 0		CPU1			CPU2 (us)
667 		 *
668 		 * if (sync)
669 		 * 	shared_lock();
670 		 *
671 		 * dma_wmb();
672 		 * set_valid_map();
673 		 *
674 		 * 			if (owner) {
675 		 *				poll_valid_map();
676 		 *				<control dependency>
677 		 *				writel(prod_reg);
678 		 *
679 		 *						readl(cons_reg);
680 		 *						tryunlock();
681 		 *
682 		 * Requires us to see CPU 0's shared_lock() acquisition.
683 		 */
684 		llq->cons = readl(cmdq->q.cons_reg);
685 	} while (!ret);
686 
687 	return ret;
688 }
689 
690 static int arm_smmu_cmdq_poll_until_sync(struct arm_smmu_device *smmu,
691 					 struct arm_smmu_ll_queue *llq)
692 {
693 	if (smmu->options & ARM_SMMU_OPT_MSIPOLL)
694 		return __arm_smmu_cmdq_poll_until_msi(smmu, llq);
695 
696 	return __arm_smmu_cmdq_poll_until_consumed(smmu, llq);
697 }
698 
699 static void arm_smmu_cmdq_write_entries(struct arm_smmu_cmdq *cmdq, u64 *cmds,
700 					u32 prod, int n)
701 {
702 	int i;
703 	struct arm_smmu_ll_queue llq = {
704 		.max_n_shift	= cmdq->q.llq.max_n_shift,
705 		.prod		= prod,
706 	};
707 
708 	for (i = 0; i < n; ++i) {
709 		u64 *cmd = &cmds[i * CMDQ_ENT_DWORDS];
710 
711 		prod = queue_inc_prod_n(&llq, i);
712 		queue_write(Q_ENT(&cmdq->q, prod), cmd, CMDQ_ENT_DWORDS);
713 	}
714 }
715 
716 /*
717  * This is the actual insertion function, and provides the following
718  * ordering guarantees to callers:
719  *
720  * - There is a dma_wmb() before publishing any commands to the queue.
721  *   This can be relied upon to order prior writes to data structures
722  *   in memory (such as a CD or an STE) before the command.
723  *
724  * - On completion of a CMD_SYNC, there is a control dependency.
725  *   This can be relied upon to order subsequent writes to memory (e.g.
726  *   freeing an IOVA) after completion of the CMD_SYNC.
727  *
728  * - Command insertion is totally ordered, so if two CPUs each race to
729  *   insert their own list of commands then all of the commands from one
730  *   CPU will appear before any of the commands from the other CPU.
731  */
732 static int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu,
733 				       u64 *cmds, int n, bool sync)
734 {
735 	u64 cmd_sync[CMDQ_ENT_DWORDS];
736 	u32 prod;
737 	unsigned long flags;
738 	bool owner;
739 	struct arm_smmu_cmdq *cmdq = arm_smmu_get_cmdq(smmu);
740 	struct arm_smmu_ll_queue llq, head;
741 	int ret = 0;
742 
743 	llq.max_n_shift = cmdq->q.llq.max_n_shift;
744 
745 	/* 1. Allocate some space in the queue */
746 	local_irq_save(flags);
747 	llq.val = READ_ONCE(cmdq->q.llq.val);
748 	do {
749 		u64 old;
750 
751 		while (!queue_has_space(&llq, n + sync)) {
752 			local_irq_restore(flags);
753 			if (arm_smmu_cmdq_poll_until_not_full(smmu, &llq))
754 				dev_err_ratelimited(smmu->dev, "CMDQ timeout\n");
755 			local_irq_save(flags);
756 		}
757 
758 		head.cons = llq.cons;
759 		head.prod = queue_inc_prod_n(&llq, n + sync) |
760 					     CMDQ_PROD_OWNED_FLAG;
761 
762 		old = cmpxchg_relaxed(&cmdq->q.llq.val, llq.val, head.val);
763 		if (old == llq.val)
764 			break;
765 
766 		llq.val = old;
767 	} while (1);
768 	owner = !(llq.prod & CMDQ_PROD_OWNED_FLAG);
769 	head.prod &= ~CMDQ_PROD_OWNED_FLAG;
770 	llq.prod &= ~CMDQ_PROD_OWNED_FLAG;
771 
772 	/*
773 	 * 2. Write our commands into the queue
774 	 * Dependency ordering from the cmpxchg() loop above.
775 	 */
776 	arm_smmu_cmdq_write_entries(cmdq, cmds, llq.prod, n);
777 	if (sync) {
778 		prod = queue_inc_prod_n(&llq, n);
779 		arm_smmu_cmdq_build_sync_cmd(cmd_sync, smmu, &cmdq->q, prod);
780 		queue_write(Q_ENT(&cmdq->q, prod), cmd_sync, CMDQ_ENT_DWORDS);
781 
782 		/*
783 		 * In order to determine completion of our CMD_SYNC, we must
784 		 * ensure that the queue can't wrap twice without us noticing.
785 		 * We achieve that by taking the cmdq lock as shared before
786 		 * marking our slot as valid.
787 		 */
788 		arm_smmu_cmdq_shared_lock(cmdq);
789 	}
790 
791 	/* 3. Mark our slots as valid, ensuring commands are visible first */
792 	dma_wmb();
793 	arm_smmu_cmdq_set_valid_map(cmdq, llq.prod, head.prod);
794 
795 	/* 4. If we are the owner, take control of the SMMU hardware */
796 	if (owner) {
797 		/* a. Wait for previous owner to finish */
798 		atomic_cond_read_relaxed(&cmdq->owner_prod, VAL == llq.prod);
799 
800 		/* b. Stop gathering work by clearing the owned flag */
801 		prod = atomic_fetch_andnot_relaxed(CMDQ_PROD_OWNED_FLAG,
802 						   &cmdq->q.llq.atomic.prod);
803 		prod &= ~CMDQ_PROD_OWNED_FLAG;
804 
805 		/*
806 		 * c. Wait for any gathered work to be written to the queue.
807 		 * Note that we read our own entries so that we have the control
808 		 * dependency required by (d).
809 		 */
810 		arm_smmu_cmdq_poll_valid_map(cmdq, llq.prod, prod);
811 
812 		/*
813 		 * d. Advance the hardware prod pointer
814 		 * Control dependency ordering from the entries becoming valid.
815 		 */
816 		writel_relaxed(prod, cmdq->q.prod_reg);
817 
818 		/*
819 		 * e. Tell the next owner we're done
820 		 * Make sure we've updated the hardware first, so that we don't
821 		 * race to update prod and potentially move it backwards.
822 		 */
823 		atomic_set_release(&cmdq->owner_prod, prod);
824 	}
825 
826 	/* 5. If we are inserting a CMD_SYNC, we must wait for it to complete */
827 	if (sync) {
828 		llq.prod = queue_inc_prod_n(&llq, n);
829 		ret = arm_smmu_cmdq_poll_until_sync(smmu, &llq);
830 		if (ret) {
831 			dev_err_ratelimited(smmu->dev,
832 					    "CMD_SYNC timeout at 0x%08x [hwprod 0x%08x, hwcons 0x%08x]\n",
833 					    llq.prod,
834 					    readl_relaxed(cmdq->q.prod_reg),
835 					    readl_relaxed(cmdq->q.cons_reg));
836 		}
837 
838 		/*
839 		 * Try to unlock the cmdq lock. This will fail if we're the last
840 		 * reader, in which case we can safely update cmdq->q.llq.cons
841 		 */
842 		if (!arm_smmu_cmdq_shared_tryunlock(cmdq)) {
843 			WRITE_ONCE(cmdq->q.llq.cons, llq.cons);
844 			arm_smmu_cmdq_shared_unlock(cmdq);
845 		}
846 	}
847 
848 	local_irq_restore(flags);
849 	return ret;
850 }
851 
852 static int __arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
853 				     struct arm_smmu_cmdq_ent *ent,
854 				     bool sync)
855 {
856 	u64 cmd[CMDQ_ENT_DWORDS];
857 
858 	if (unlikely(arm_smmu_cmdq_build_cmd(cmd, ent))) {
859 		dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n",
860 			 ent->opcode);
861 		return -EINVAL;
862 	}
863 
864 	return arm_smmu_cmdq_issue_cmdlist(smmu, cmd, 1, sync);
865 }
866 
867 static int arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
868 				   struct arm_smmu_cmdq_ent *ent)
869 {
870 	return __arm_smmu_cmdq_issue_cmd(smmu, ent, false);
871 }
872 
873 static int arm_smmu_cmdq_issue_cmd_with_sync(struct arm_smmu_device *smmu,
874 					     struct arm_smmu_cmdq_ent *ent)
875 {
876 	return __arm_smmu_cmdq_issue_cmd(smmu, ent, true);
877 }
878 
879 static void arm_smmu_cmdq_batch_add(struct arm_smmu_device *smmu,
880 				    struct arm_smmu_cmdq_batch *cmds,
881 				    struct arm_smmu_cmdq_ent *cmd)
882 {
883 	int index;
884 
885 	if (cmds->num == CMDQ_BATCH_ENTRIES) {
886 		arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, false);
887 		cmds->num = 0;
888 	}
889 
890 	index = cmds->num * CMDQ_ENT_DWORDS;
891 	if (unlikely(arm_smmu_cmdq_build_cmd(&cmds->cmds[index], cmd))) {
892 		dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n",
893 			 cmd->opcode);
894 		return;
895 	}
896 
897 	cmds->num++;
898 }
899 
900 static int arm_smmu_cmdq_batch_submit(struct arm_smmu_device *smmu,
901 				      struct arm_smmu_cmdq_batch *cmds)
902 {
903 	return arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, true);
904 }
905 
906 static int arm_smmu_page_response(struct device *dev,
907 				  struct iommu_fault_event *unused,
908 				  struct iommu_page_response *resp)
909 {
910 	struct arm_smmu_cmdq_ent cmd = {0};
911 	struct arm_smmu_master *master = dev_iommu_priv_get(dev);
912 	int sid = master->streams[0].id;
913 
914 	if (master->stall_enabled) {
915 		cmd.opcode		= CMDQ_OP_RESUME;
916 		cmd.resume.sid		= sid;
917 		cmd.resume.stag		= resp->grpid;
918 		switch (resp->code) {
919 		case IOMMU_PAGE_RESP_INVALID:
920 		case IOMMU_PAGE_RESP_FAILURE:
921 			cmd.resume.resp = CMDQ_RESUME_0_RESP_ABORT;
922 			break;
923 		case IOMMU_PAGE_RESP_SUCCESS:
924 			cmd.resume.resp = CMDQ_RESUME_0_RESP_RETRY;
925 			break;
926 		default:
927 			return -EINVAL;
928 		}
929 	} else {
930 		return -ENODEV;
931 	}
932 
933 	arm_smmu_cmdq_issue_cmd(master->smmu, &cmd);
934 	/*
935 	 * Don't send a SYNC, it doesn't do anything for RESUME or PRI_RESP.
936 	 * RESUME consumption guarantees that the stalled transaction will be
937 	 * terminated... at some point in the future. PRI_RESP is fire and
938 	 * forget.
939 	 */
940 
941 	return 0;
942 }
943 
944 /* Context descriptor manipulation functions */
945 void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid)
946 {
947 	struct arm_smmu_cmdq_ent cmd = {
948 		.opcode	= smmu->features & ARM_SMMU_FEAT_E2H ?
949 			CMDQ_OP_TLBI_EL2_ASID : CMDQ_OP_TLBI_NH_ASID,
950 		.tlbi.asid = asid,
951 	};
952 
953 	arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
954 }
955 
956 static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain,
957 			     int ssid, bool leaf)
958 {
959 	size_t i;
960 	unsigned long flags;
961 	struct arm_smmu_master *master;
962 	struct arm_smmu_cmdq_batch cmds;
963 	struct arm_smmu_device *smmu = smmu_domain->smmu;
964 	struct arm_smmu_cmdq_ent cmd = {
965 		.opcode	= CMDQ_OP_CFGI_CD,
966 		.cfgi	= {
967 			.ssid	= ssid,
968 			.leaf	= leaf,
969 		},
970 	};
971 
972 	cmds.num = 0;
973 
974 	spin_lock_irqsave(&smmu_domain->devices_lock, flags);
975 	list_for_each_entry(master, &smmu_domain->devices, domain_head) {
976 		for (i = 0; i < master->num_streams; i++) {
977 			cmd.cfgi.sid = master->streams[i].id;
978 			arm_smmu_cmdq_batch_add(smmu, &cmds, &cmd);
979 		}
980 	}
981 	spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
982 
983 	arm_smmu_cmdq_batch_submit(smmu, &cmds);
984 }
985 
986 static int arm_smmu_alloc_cd_leaf_table(struct arm_smmu_device *smmu,
987 					struct arm_smmu_l1_ctx_desc *l1_desc)
988 {
989 	size_t size = CTXDESC_L2_ENTRIES * (CTXDESC_CD_DWORDS << 3);
990 
991 	l1_desc->l2ptr = dmam_alloc_coherent(smmu->dev, size,
992 					     &l1_desc->l2ptr_dma, GFP_KERNEL);
993 	if (!l1_desc->l2ptr) {
994 		dev_warn(smmu->dev,
995 			 "failed to allocate context descriptor table\n");
996 		return -ENOMEM;
997 	}
998 	return 0;
999 }
1000 
1001 static void arm_smmu_write_cd_l1_desc(__le64 *dst,
1002 				      struct arm_smmu_l1_ctx_desc *l1_desc)
1003 {
1004 	u64 val = (l1_desc->l2ptr_dma & CTXDESC_L1_DESC_L2PTR_MASK) |
1005 		  CTXDESC_L1_DESC_V;
1006 
1007 	/* See comment in arm_smmu_write_ctx_desc() */
1008 	WRITE_ONCE(*dst, cpu_to_le64(val));
1009 }
1010 
1011 static __le64 *arm_smmu_get_cd_ptr(struct arm_smmu_domain *smmu_domain,
1012 				   u32 ssid)
1013 {
1014 	__le64 *l1ptr;
1015 	unsigned int idx;
1016 	struct arm_smmu_l1_ctx_desc *l1_desc;
1017 	struct arm_smmu_device *smmu = smmu_domain->smmu;
1018 	struct arm_smmu_ctx_desc_cfg *cdcfg = &smmu_domain->s1_cfg.cdcfg;
1019 
1020 	if (smmu_domain->s1_cfg.s1fmt == STRTAB_STE_0_S1FMT_LINEAR)
1021 		return cdcfg->cdtab + ssid * CTXDESC_CD_DWORDS;
1022 
1023 	idx = ssid >> CTXDESC_SPLIT;
1024 	l1_desc = &cdcfg->l1_desc[idx];
1025 	if (!l1_desc->l2ptr) {
1026 		if (arm_smmu_alloc_cd_leaf_table(smmu, l1_desc))
1027 			return NULL;
1028 
1029 		l1ptr = cdcfg->cdtab + idx * CTXDESC_L1_DESC_DWORDS;
1030 		arm_smmu_write_cd_l1_desc(l1ptr, l1_desc);
1031 		/* An invalid L1CD can be cached */
1032 		arm_smmu_sync_cd(smmu_domain, ssid, false);
1033 	}
1034 	idx = ssid & (CTXDESC_L2_ENTRIES - 1);
1035 	return l1_desc->l2ptr + idx * CTXDESC_CD_DWORDS;
1036 }
1037 
1038 int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, int ssid,
1039 			    struct arm_smmu_ctx_desc *cd)
1040 {
1041 	/*
1042 	 * This function handles the following cases:
1043 	 *
1044 	 * (1) Install primary CD, for normal DMA traffic (SSID = 0).
1045 	 * (2) Install a secondary CD, for SID+SSID traffic.
1046 	 * (3) Update ASID of a CD. Atomically write the first 64 bits of the
1047 	 *     CD, then invalidate the old entry and mappings.
1048 	 * (4) Quiesce the context without clearing the valid bit. Disable
1049 	 *     translation, and ignore any translation fault.
1050 	 * (5) Remove a secondary CD.
1051 	 */
1052 	u64 val;
1053 	bool cd_live;
1054 	__le64 *cdptr;
1055 
1056 	if (WARN_ON(ssid >= (1 << smmu_domain->s1_cfg.s1cdmax)))
1057 		return -E2BIG;
1058 
1059 	cdptr = arm_smmu_get_cd_ptr(smmu_domain, ssid);
1060 	if (!cdptr)
1061 		return -ENOMEM;
1062 
1063 	val = le64_to_cpu(cdptr[0]);
1064 	cd_live = !!(val & CTXDESC_CD_0_V);
1065 
1066 	if (!cd) { /* (5) */
1067 		val = 0;
1068 	} else if (cd == &quiet_cd) { /* (4) */
1069 		val |= CTXDESC_CD_0_TCR_EPD0;
1070 	} else if (cd_live) { /* (3) */
1071 		val &= ~CTXDESC_CD_0_ASID;
1072 		val |= FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid);
1073 		/*
1074 		 * Until CD+TLB invalidation, both ASIDs may be used for tagging
1075 		 * this substream's traffic
1076 		 */
1077 	} else { /* (1) and (2) */
1078 		cdptr[1] = cpu_to_le64(cd->ttbr & CTXDESC_CD_1_TTB0_MASK);
1079 		cdptr[2] = 0;
1080 		cdptr[3] = cpu_to_le64(cd->mair);
1081 
1082 		/*
1083 		 * STE is live, and the SMMU might read dwords of this CD in any
1084 		 * order. Ensure that it observes valid values before reading
1085 		 * V=1.
1086 		 */
1087 		arm_smmu_sync_cd(smmu_domain, ssid, true);
1088 
1089 		val = cd->tcr |
1090 #ifdef __BIG_ENDIAN
1091 			CTXDESC_CD_0_ENDI |
1092 #endif
1093 			CTXDESC_CD_0_R | CTXDESC_CD_0_A |
1094 			(cd->mm ? 0 : CTXDESC_CD_0_ASET) |
1095 			CTXDESC_CD_0_AA64 |
1096 			FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid) |
1097 			CTXDESC_CD_0_V;
1098 
1099 		if (smmu_domain->stall_enabled)
1100 			val |= CTXDESC_CD_0_S;
1101 	}
1102 
1103 	/*
1104 	 * The SMMU accesses 64-bit values atomically. See IHI0070Ca 3.21.3
1105 	 * "Configuration structures and configuration invalidation completion"
1106 	 *
1107 	 *   The size of single-copy atomic reads made by the SMMU is
1108 	 *   IMPLEMENTATION DEFINED but must be at least 64 bits. Any single
1109 	 *   field within an aligned 64-bit span of a structure can be altered
1110 	 *   without first making the structure invalid.
1111 	 */
1112 	WRITE_ONCE(cdptr[0], cpu_to_le64(val));
1113 	arm_smmu_sync_cd(smmu_domain, ssid, true);
1114 	return 0;
1115 }
1116 
1117 static int arm_smmu_alloc_cd_tables(struct arm_smmu_domain *smmu_domain)
1118 {
1119 	int ret;
1120 	size_t l1size;
1121 	size_t max_contexts;
1122 	struct arm_smmu_device *smmu = smmu_domain->smmu;
1123 	struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
1124 	struct arm_smmu_ctx_desc_cfg *cdcfg = &cfg->cdcfg;
1125 
1126 	max_contexts = 1 << cfg->s1cdmax;
1127 
1128 	if (!(smmu->features & ARM_SMMU_FEAT_2_LVL_CDTAB) ||
1129 	    max_contexts <= CTXDESC_L2_ENTRIES) {
1130 		cfg->s1fmt = STRTAB_STE_0_S1FMT_LINEAR;
1131 		cdcfg->num_l1_ents = max_contexts;
1132 
1133 		l1size = max_contexts * (CTXDESC_CD_DWORDS << 3);
1134 	} else {
1135 		cfg->s1fmt = STRTAB_STE_0_S1FMT_64K_L2;
1136 		cdcfg->num_l1_ents = DIV_ROUND_UP(max_contexts,
1137 						  CTXDESC_L2_ENTRIES);
1138 
1139 		cdcfg->l1_desc = devm_kcalloc(smmu->dev, cdcfg->num_l1_ents,
1140 					      sizeof(*cdcfg->l1_desc),
1141 					      GFP_KERNEL);
1142 		if (!cdcfg->l1_desc)
1143 			return -ENOMEM;
1144 
1145 		l1size = cdcfg->num_l1_ents * (CTXDESC_L1_DESC_DWORDS << 3);
1146 	}
1147 
1148 	cdcfg->cdtab = dmam_alloc_coherent(smmu->dev, l1size, &cdcfg->cdtab_dma,
1149 					   GFP_KERNEL);
1150 	if (!cdcfg->cdtab) {
1151 		dev_warn(smmu->dev, "failed to allocate context descriptor\n");
1152 		ret = -ENOMEM;
1153 		goto err_free_l1;
1154 	}
1155 
1156 	return 0;
1157 
1158 err_free_l1:
1159 	if (cdcfg->l1_desc) {
1160 		devm_kfree(smmu->dev, cdcfg->l1_desc);
1161 		cdcfg->l1_desc = NULL;
1162 	}
1163 	return ret;
1164 }
1165 
1166 static void arm_smmu_free_cd_tables(struct arm_smmu_domain *smmu_domain)
1167 {
1168 	int i;
1169 	size_t size, l1size;
1170 	struct arm_smmu_device *smmu = smmu_domain->smmu;
1171 	struct arm_smmu_ctx_desc_cfg *cdcfg = &smmu_domain->s1_cfg.cdcfg;
1172 
1173 	if (cdcfg->l1_desc) {
1174 		size = CTXDESC_L2_ENTRIES * (CTXDESC_CD_DWORDS << 3);
1175 
1176 		for (i = 0; i < cdcfg->num_l1_ents; i++) {
1177 			if (!cdcfg->l1_desc[i].l2ptr)
1178 				continue;
1179 
1180 			dmam_free_coherent(smmu->dev, size,
1181 					   cdcfg->l1_desc[i].l2ptr,
1182 					   cdcfg->l1_desc[i].l2ptr_dma);
1183 		}
1184 		devm_kfree(smmu->dev, cdcfg->l1_desc);
1185 		cdcfg->l1_desc = NULL;
1186 
1187 		l1size = cdcfg->num_l1_ents * (CTXDESC_L1_DESC_DWORDS << 3);
1188 	} else {
1189 		l1size = cdcfg->num_l1_ents * (CTXDESC_CD_DWORDS << 3);
1190 	}
1191 
1192 	dmam_free_coherent(smmu->dev, l1size, cdcfg->cdtab, cdcfg->cdtab_dma);
1193 	cdcfg->cdtab_dma = 0;
1194 	cdcfg->cdtab = NULL;
1195 }
1196 
1197 bool arm_smmu_free_asid(struct arm_smmu_ctx_desc *cd)
1198 {
1199 	bool free;
1200 	struct arm_smmu_ctx_desc *old_cd;
1201 
1202 	if (!cd->asid)
1203 		return false;
1204 
1205 	free = refcount_dec_and_test(&cd->refs);
1206 	if (free) {
1207 		old_cd = xa_erase(&arm_smmu_asid_xa, cd->asid);
1208 		WARN_ON(old_cd != cd);
1209 	}
1210 	return free;
1211 }
1212 
1213 /* Stream table manipulation functions */
1214 static void
1215 arm_smmu_write_strtab_l1_desc(__le64 *dst, struct arm_smmu_strtab_l1_desc *desc)
1216 {
1217 	u64 val = 0;
1218 
1219 	val |= FIELD_PREP(STRTAB_L1_DESC_SPAN, desc->span);
1220 	val |= desc->l2ptr_dma & STRTAB_L1_DESC_L2PTR_MASK;
1221 
1222 	/* See comment in arm_smmu_write_ctx_desc() */
1223 	WRITE_ONCE(*dst, cpu_to_le64(val));
1224 }
1225 
1226 static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid)
1227 {
1228 	struct arm_smmu_cmdq_ent cmd = {
1229 		.opcode	= CMDQ_OP_CFGI_STE,
1230 		.cfgi	= {
1231 			.sid	= sid,
1232 			.leaf	= true,
1233 		},
1234 	};
1235 
1236 	arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
1237 }
1238 
1239 static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
1240 				      __le64 *dst)
1241 {
1242 	/*
1243 	 * This is hideously complicated, but we only really care about
1244 	 * three cases at the moment:
1245 	 *
1246 	 * 1. Invalid (all zero) -> bypass/fault (init)
1247 	 * 2. Bypass/fault -> translation/bypass (attach)
1248 	 * 3. Translation/bypass -> bypass/fault (detach)
1249 	 *
1250 	 * Given that we can't update the STE atomically and the SMMU
1251 	 * doesn't read the thing in a defined order, that leaves us
1252 	 * with the following maintenance requirements:
1253 	 *
1254 	 * 1. Update Config, return (init time STEs aren't live)
1255 	 * 2. Write everything apart from dword 0, sync, write dword 0, sync
1256 	 * 3. Update Config, sync
1257 	 */
1258 	u64 val = le64_to_cpu(dst[0]);
1259 	bool ste_live = false;
1260 	struct arm_smmu_device *smmu = NULL;
1261 	struct arm_smmu_s1_cfg *s1_cfg = NULL;
1262 	struct arm_smmu_s2_cfg *s2_cfg = NULL;
1263 	struct arm_smmu_domain *smmu_domain = NULL;
1264 	struct arm_smmu_cmdq_ent prefetch_cmd = {
1265 		.opcode		= CMDQ_OP_PREFETCH_CFG,
1266 		.prefetch	= {
1267 			.sid	= sid,
1268 		},
1269 	};
1270 
1271 	if (master) {
1272 		smmu_domain = master->domain;
1273 		smmu = master->smmu;
1274 	}
1275 
1276 	if (smmu_domain) {
1277 		switch (smmu_domain->stage) {
1278 		case ARM_SMMU_DOMAIN_S1:
1279 			s1_cfg = &smmu_domain->s1_cfg;
1280 			break;
1281 		case ARM_SMMU_DOMAIN_S2:
1282 		case ARM_SMMU_DOMAIN_NESTED:
1283 			s2_cfg = &smmu_domain->s2_cfg;
1284 			break;
1285 		default:
1286 			break;
1287 		}
1288 	}
1289 
1290 	if (val & STRTAB_STE_0_V) {
1291 		switch (FIELD_GET(STRTAB_STE_0_CFG, val)) {
1292 		case STRTAB_STE_0_CFG_BYPASS:
1293 			break;
1294 		case STRTAB_STE_0_CFG_S1_TRANS:
1295 		case STRTAB_STE_0_CFG_S2_TRANS:
1296 			ste_live = true;
1297 			break;
1298 		case STRTAB_STE_0_CFG_ABORT:
1299 			BUG_ON(!disable_bypass);
1300 			break;
1301 		default:
1302 			BUG(); /* STE corruption */
1303 		}
1304 	}
1305 
1306 	/* Nuke the existing STE_0 value, as we're going to rewrite it */
1307 	val = STRTAB_STE_0_V;
1308 
1309 	/* Bypass/fault */
1310 	if (!smmu_domain || !(s1_cfg || s2_cfg)) {
1311 		if (!smmu_domain && disable_bypass)
1312 			val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT);
1313 		else
1314 			val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS);
1315 
1316 		dst[0] = cpu_to_le64(val);
1317 		dst[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG,
1318 						STRTAB_STE_1_SHCFG_INCOMING));
1319 		dst[2] = 0; /* Nuke the VMID */
1320 		/*
1321 		 * The SMMU can perform negative caching, so we must sync
1322 		 * the STE regardless of whether the old value was live.
1323 		 */
1324 		if (smmu)
1325 			arm_smmu_sync_ste_for_sid(smmu, sid);
1326 		return;
1327 	}
1328 
1329 	if (s1_cfg) {
1330 		u64 strw = smmu->features & ARM_SMMU_FEAT_E2H ?
1331 			STRTAB_STE_1_STRW_EL2 : STRTAB_STE_1_STRW_NSEL1;
1332 
1333 		BUG_ON(ste_live);
1334 		dst[1] = cpu_to_le64(
1335 			 FIELD_PREP(STRTAB_STE_1_S1DSS, STRTAB_STE_1_S1DSS_SSID0) |
1336 			 FIELD_PREP(STRTAB_STE_1_S1CIR, STRTAB_STE_1_S1C_CACHE_WBRA) |
1337 			 FIELD_PREP(STRTAB_STE_1_S1COR, STRTAB_STE_1_S1C_CACHE_WBRA) |
1338 			 FIELD_PREP(STRTAB_STE_1_S1CSH, ARM_SMMU_SH_ISH) |
1339 			 FIELD_PREP(STRTAB_STE_1_STRW, strw));
1340 
1341 		if (smmu->features & ARM_SMMU_FEAT_STALLS &&
1342 		    !master->stall_enabled)
1343 			dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
1344 
1345 		val |= (s1_cfg->cdcfg.cdtab_dma & STRTAB_STE_0_S1CTXPTR_MASK) |
1346 			FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S1_TRANS) |
1347 			FIELD_PREP(STRTAB_STE_0_S1CDMAX, s1_cfg->s1cdmax) |
1348 			FIELD_PREP(STRTAB_STE_0_S1FMT, s1_cfg->s1fmt);
1349 	}
1350 
1351 	if (s2_cfg) {
1352 		BUG_ON(ste_live);
1353 		dst[2] = cpu_to_le64(
1354 			 FIELD_PREP(STRTAB_STE_2_S2VMID, s2_cfg->vmid) |
1355 			 FIELD_PREP(STRTAB_STE_2_VTCR, s2_cfg->vtcr) |
1356 #ifdef __BIG_ENDIAN
1357 			 STRTAB_STE_2_S2ENDI |
1358 #endif
1359 			 STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2AA64 |
1360 			 STRTAB_STE_2_S2R);
1361 
1362 		dst[3] = cpu_to_le64(s2_cfg->vttbr & STRTAB_STE_3_S2TTB_MASK);
1363 
1364 		val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S2_TRANS);
1365 	}
1366 
1367 	if (master->ats_enabled)
1368 		dst[1] |= cpu_to_le64(FIELD_PREP(STRTAB_STE_1_EATS,
1369 						 STRTAB_STE_1_EATS_TRANS));
1370 
1371 	arm_smmu_sync_ste_for_sid(smmu, sid);
1372 	/* See comment in arm_smmu_write_ctx_desc() */
1373 	WRITE_ONCE(dst[0], cpu_to_le64(val));
1374 	arm_smmu_sync_ste_for_sid(smmu, sid);
1375 
1376 	/* It's likely that we'll want to use the new STE soon */
1377 	if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH))
1378 		arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
1379 }
1380 
1381 static void arm_smmu_init_bypass_stes(__le64 *strtab, unsigned int nent, bool force)
1382 {
1383 	unsigned int i;
1384 	u64 val = STRTAB_STE_0_V;
1385 
1386 	if (disable_bypass && !force)
1387 		val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT);
1388 	else
1389 		val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS);
1390 
1391 	for (i = 0; i < nent; ++i) {
1392 		strtab[0] = cpu_to_le64(val);
1393 		strtab[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG,
1394 						   STRTAB_STE_1_SHCFG_INCOMING));
1395 		strtab[2] = 0;
1396 		strtab += STRTAB_STE_DWORDS;
1397 	}
1398 }
1399 
1400 static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
1401 {
1402 	size_t size;
1403 	void *strtab;
1404 	struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
1405 	struct arm_smmu_strtab_l1_desc *desc = &cfg->l1_desc[sid >> STRTAB_SPLIT];
1406 
1407 	if (desc->l2ptr)
1408 		return 0;
1409 
1410 	size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3);
1411 	strtab = &cfg->strtab[(sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS];
1412 
1413 	desc->span = STRTAB_SPLIT + 1;
1414 	desc->l2ptr = dmam_alloc_coherent(smmu->dev, size, &desc->l2ptr_dma,
1415 					  GFP_KERNEL);
1416 	if (!desc->l2ptr) {
1417 		dev_err(smmu->dev,
1418 			"failed to allocate l2 stream table for SID %u\n",
1419 			sid);
1420 		return -ENOMEM;
1421 	}
1422 
1423 	arm_smmu_init_bypass_stes(desc->l2ptr, 1 << STRTAB_SPLIT, false);
1424 	arm_smmu_write_strtab_l1_desc(strtab, desc);
1425 	return 0;
1426 }
1427 
1428 static struct arm_smmu_master *
1429 arm_smmu_find_master(struct arm_smmu_device *smmu, u32 sid)
1430 {
1431 	struct rb_node *node;
1432 	struct arm_smmu_stream *stream;
1433 
1434 	lockdep_assert_held(&smmu->streams_mutex);
1435 
1436 	node = smmu->streams.rb_node;
1437 	while (node) {
1438 		stream = rb_entry(node, struct arm_smmu_stream, node);
1439 		if (stream->id < sid)
1440 			node = node->rb_right;
1441 		else if (stream->id > sid)
1442 			node = node->rb_left;
1443 		else
1444 			return stream->master;
1445 	}
1446 
1447 	return NULL;
1448 }
1449 
1450 /* IRQ and event handlers */
1451 static int arm_smmu_handle_evt(struct arm_smmu_device *smmu, u64 *evt)
1452 {
1453 	int ret;
1454 	u32 reason;
1455 	u32 perm = 0;
1456 	struct arm_smmu_master *master;
1457 	bool ssid_valid = evt[0] & EVTQ_0_SSV;
1458 	u32 sid = FIELD_GET(EVTQ_0_SID, evt[0]);
1459 	struct iommu_fault_event fault_evt = { };
1460 	struct iommu_fault *flt = &fault_evt.fault;
1461 
1462 	switch (FIELD_GET(EVTQ_0_ID, evt[0])) {
1463 	case EVT_ID_TRANSLATION_FAULT:
1464 		reason = IOMMU_FAULT_REASON_PTE_FETCH;
1465 		break;
1466 	case EVT_ID_ADDR_SIZE_FAULT:
1467 		reason = IOMMU_FAULT_REASON_OOR_ADDRESS;
1468 		break;
1469 	case EVT_ID_ACCESS_FAULT:
1470 		reason = IOMMU_FAULT_REASON_ACCESS;
1471 		break;
1472 	case EVT_ID_PERMISSION_FAULT:
1473 		reason = IOMMU_FAULT_REASON_PERMISSION;
1474 		break;
1475 	default:
1476 		return -EOPNOTSUPP;
1477 	}
1478 
1479 	/* Stage-2 is always pinned at the moment */
1480 	if (evt[1] & EVTQ_1_S2)
1481 		return -EFAULT;
1482 
1483 	if (evt[1] & EVTQ_1_RnW)
1484 		perm |= IOMMU_FAULT_PERM_READ;
1485 	else
1486 		perm |= IOMMU_FAULT_PERM_WRITE;
1487 
1488 	if (evt[1] & EVTQ_1_InD)
1489 		perm |= IOMMU_FAULT_PERM_EXEC;
1490 
1491 	if (evt[1] & EVTQ_1_PnU)
1492 		perm |= IOMMU_FAULT_PERM_PRIV;
1493 
1494 	if (evt[1] & EVTQ_1_STALL) {
1495 		flt->type = IOMMU_FAULT_PAGE_REQ;
1496 		flt->prm = (struct iommu_fault_page_request) {
1497 			.flags = IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE,
1498 			.grpid = FIELD_GET(EVTQ_1_STAG, evt[1]),
1499 			.perm = perm,
1500 			.addr = FIELD_GET(EVTQ_2_ADDR, evt[2]),
1501 		};
1502 
1503 		if (ssid_valid) {
1504 			flt->prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
1505 			flt->prm.pasid = FIELD_GET(EVTQ_0_SSID, evt[0]);
1506 		}
1507 	} else {
1508 		flt->type = IOMMU_FAULT_DMA_UNRECOV;
1509 		flt->event = (struct iommu_fault_unrecoverable) {
1510 			.reason = reason,
1511 			.flags = IOMMU_FAULT_UNRECOV_ADDR_VALID,
1512 			.perm = perm,
1513 			.addr = FIELD_GET(EVTQ_2_ADDR, evt[2]),
1514 		};
1515 
1516 		if (ssid_valid) {
1517 			flt->event.flags |= IOMMU_FAULT_UNRECOV_PASID_VALID;
1518 			flt->event.pasid = FIELD_GET(EVTQ_0_SSID, evt[0]);
1519 		}
1520 	}
1521 
1522 	mutex_lock(&smmu->streams_mutex);
1523 	master = arm_smmu_find_master(smmu, sid);
1524 	if (!master) {
1525 		ret = -EINVAL;
1526 		goto out_unlock;
1527 	}
1528 
1529 	ret = iommu_report_device_fault(master->dev, &fault_evt);
1530 	if (ret && flt->type == IOMMU_FAULT_PAGE_REQ) {
1531 		/* Nobody cared, abort the access */
1532 		struct iommu_page_response resp = {
1533 			.pasid		= flt->prm.pasid,
1534 			.grpid		= flt->prm.grpid,
1535 			.code		= IOMMU_PAGE_RESP_FAILURE,
1536 		};
1537 		arm_smmu_page_response(master->dev, &fault_evt, &resp);
1538 	}
1539 
1540 out_unlock:
1541 	mutex_unlock(&smmu->streams_mutex);
1542 	return ret;
1543 }
1544 
1545 static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev)
1546 {
1547 	int i, ret;
1548 	struct arm_smmu_device *smmu = dev;
1549 	struct arm_smmu_queue *q = &smmu->evtq.q;
1550 	struct arm_smmu_ll_queue *llq = &q->llq;
1551 	static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
1552 				      DEFAULT_RATELIMIT_BURST);
1553 	u64 evt[EVTQ_ENT_DWORDS];
1554 
1555 	do {
1556 		while (!queue_remove_raw(q, evt)) {
1557 			u8 id = FIELD_GET(EVTQ_0_ID, evt[0]);
1558 
1559 			ret = arm_smmu_handle_evt(smmu, evt);
1560 			if (!ret || !__ratelimit(&rs))
1561 				continue;
1562 
1563 			dev_info(smmu->dev, "event 0x%02x received:\n", id);
1564 			for (i = 0; i < ARRAY_SIZE(evt); ++i)
1565 				dev_info(smmu->dev, "\t0x%016llx\n",
1566 					 (unsigned long long)evt[i]);
1567 
1568 			cond_resched();
1569 		}
1570 
1571 		/*
1572 		 * Not much we can do on overflow, so scream and pretend we're
1573 		 * trying harder.
1574 		 */
1575 		if (queue_sync_prod_in(q) == -EOVERFLOW)
1576 			dev_err(smmu->dev, "EVTQ overflow detected -- events lost\n");
1577 	} while (!queue_empty(llq));
1578 
1579 	/* Sync our overflow flag, as we believe we're up to speed */
1580 	llq->cons = Q_OVF(llq->prod) | Q_WRP(llq, llq->cons) |
1581 		    Q_IDX(llq, llq->cons);
1582 	return IRQ_HANDLED;
1583 }
1584 
1585 static void arm_smmu_handle_ppr(struct arm_smmu_device *smmu, u64 *evt)
1586 {
1587 	u32 sid, ssid;
1588 	u16 grpid;
1589 	bool ssv, last;
1590 
1591 	sid = FIELD_GET(PRIQ_0_SID, evt[0]);
1592 	ssv = FIELD_GET(PRIQ_0_SSID_V, evt[0]);
1593 	ssid = ssv ? FIELD_GET(PRIQ_0_SSID, evt[0]) : 0;
1594 	last = FIELD_GET(PRIQ_0_PRG_LAST, evt[0]);
1595 	grpid = FIELD_GET(PRIQ_1_PRG_IDX, evt[1]);
1596 
1597 	dev_info(smmu->dev, "unexpected PRI request received:\n");
1598 	dev_info(smmu->dev,
1599 		 "\tsid 0x%08x.0x%05x: [%u%s] %sprivileged %s%s%s access at iova 0x%016llx\n",
1600 		 sid, ssid, grpid, last ? "L" : "",
1601 		 evt[0] & PRIQ_0_PERM_PRIV ? "" : "un",
1602 		 evt[0] & PRIQ_0_PERM_READ ? "R" : "",
1603 		 evt[0] & PRIQ_0_PERM_WRITE ? "W" : "",
1604 		 evt[0] & PRIQ_0_PERM_EXEC ? "X" : "",
1605 		 evt[1] & PRIQ_1_ADDR_MASK);
1606 
1607 	if (last) {
1608 		struct arm_smmu_cmdq_ent cmd = {
1609 			.opcode			= CMDQ_OP_PRI_RESP,
1610 			.substream_valid	= ssv,
1611 			.pri			= {
1612 				.sid	= sid,
1613 				.ssid	= ssid,
1614 				.grpid	= grpid,
1615 				.resp	= PRI_RESP_DENY,
1616 			},
1617 		};
1618 
1619 		arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1620 	}
1621 }
1622 
1623 static irqreturn_t arm_smmu_priq_thread(int irq, void *dev)
1624 {
1625 	struct arm_smmu_device *smmu = dev;
1626 	struct arm_smmu_queue *q = &smmu->priq.q;
1627 	struct arm_smmu_ll_queue *llq = &q->llq;
1628 	u64 evt[PRIQ_ENT_DWORDS];
1629 
1630 	do {
1631 		while (!queue_remove_raw(q, evt))
1632 			arm_smmu_handle_ppr(smmu, evt);
1633 
1634 		if (queue_sync_prod_in(q) == -EOVERFLOW)
1635 			dev_err(smmu->dev, "PRIQ overflow detected -- requests lost\n");
1636 	} while (!queue_empty(llq));
1637 
1638 	/* Sync our overflow flag, as we believe we're up to speed */
1639 	llq->cons = Q_OVF(llq->prod) | Q_WRP(llq, llq->cons) |
1640 		      Q_IDX(llq, llq->cons);
1641 	queue_sync_cons_out(q);
1642 	return IRQ_HANDLED;
1643 }
1644 
1645 static int arm_smmu_device_disable(struct arm_smmu_device *smmu);
1646 
1647 static irqreturn_t arm_smmu_gerror_handler(int irq, void *dev)
1648 {
1649 	u32 gerror, gerrorn, active;
1650 	struct arm_smmu_device *smmu = dev;
1651 
1652 	gerror = readl_relaxed(smmu->base + ARM_SMMU_GERROR);
1653 	gerrorn = readl_relaxed(smmu->base + ARM_SMMU_GERRORN);
1654 
1655 	active = gerror ^ gerrorn;
1656 	if (!(active & GERROR_ERR_MASK))
1657 		return IRQ_NONE; /* No errors pending */
1658 
1659 	dev_warn(smmu->dev,
1660 		 "unexpected global error reported (0x%08x), this could be serious\n",
1661 		 active);
1662 
1663 	if (active & GERROR_SFM_ERR) {
1664 		dev_err(smmu->dev, "device has entered Service Failure Mode!\n");
1665 		arm_smmu_device_disable(smmu);
1666 	}
1667 
1668 	if (active & GERROR_MSI_GERROR_ABT_ERR)
1669 		dev_warn(smmu->dev, "GERROR MSI write aborted\n");
1670 
1671 	if (active & GERROR_MSI_PRIQ_ABT_ERR)
1672 		dev_warn(smmu->dev, "PRIQ MSI write aborted\n");
1673 
1674 	if (active & GERROR_MSI_EVTQ_ABT_ERR)
1675 		dev_warn(smmu->dev, "EVTQ MSI write aborted\n");
1676 
1677 	if (active & GERROR_MSI_CMDQ_ABT_ERR)
1678 		dev_warn(smmu->dev, "CMDQ MSI write aborted\n");
1679 
1680 	if (active & GERROR_PRIQ_ABT_ERR)
1681 		dev_err(smmu->dev, "PRIQ write aborted -- events may have been lost\n");
1682 
1683 	if (active & GERROR_EVTQ_ABT_ERR)
1684 		dev_err(smmu->dev, "EVTQ write aborted -- events may have been lost\n");
1685 
1686 	if (active & GERROR_CMDQ_ERR)
1687 		arm_smmu_cmdq_skip_err(smmu);
1688 
1689 	writel(gerror, smmu->base + ARM_SMMU_GERRORN);
1690 	return IRQ_HANDLED;
1691 }
1692 
1693 static irqreturn_t arm_smmu_combined_irq_thread(int irq, void *dev)
1694 {
1695 	struct arm_smmu_device *smmu = dev;
1696 
1697 	arm_smmu_evtq_thread(irq, dev);
1698 	if (smmu->features & ARM_SMMU_FEAT_PRI)
1699 		arm_smmu_priq_thread(irq, dev);
1700 
1701 	return IRQ_HANDLED;
1702 }
1703 
1704 static irqreturn_t arm_smmu_combined_irq_handler(int irq, void *dev)
1705 {
1706 	arm_smmu_gerror_handler(irq, dev);
1707 	return IRQ_WAKE_THREAD;
1708 }
1709 
1710 static void
1711 arm_smmu_atc_inv_to_cmd(int ssid, unsigned long iova, size_t size,
1712 			struct arm_smmu_cmdq_ent *cmd)
1713 {
1714 	size_t log2_span;
1715 	size_t span_mask;
1716 	/* ATC invalidates are always on 4096-bytes pages */
1717 	size_t inval_grain_shift = 12;
1718 	unsigned long page_start, page_end;
1719 
1720 	/*
1721 	 * ATS and PASID:
1722 	 *
1723 	 * If substream_valid is clear, the PCIe TLP is sent without a PASID
1724 	 * prefix. In that case all ATC entries within the address range are
1725 	 * invalidated, including those that were requested with a PASID! There
1726 	 * is no way to invalidate only entries without PASID.
1727 	 *
1728 	 * When using STRTAB_STE_1_S1DSS_SSID0 (reserving CD 0 for non-PASID
1729 	 * traffic), translation requests without PASID create ATC entries
1730 	 * without PASID, which must be invalidated with substream_valid clear.
1731 	 * This has the unpleasant side-effect of invalidating all PASID-tagged
1732 	 * ATC entries within the address range.
1733 	 */
1734 	*cmd = (struct arm_smmu_cmdq_ent) {
1735 		.opcode			= CMDQ_OP_ATC_INV,
1736 		.substream_valid	= !!ssid,
1737 		.atc.ssid		= ssid,
1738 	};
1739 
1740 	if (!size) {
1741 		cmd->atc.size = ATC_INV_SIZE_ALL;
1742 		return;
1743 	}
1744 
1745 	page_start	= iova >> inval_grain_shift;
1746 	page_end	= (iova + size - 1) >> inval_grain_shift;
1747 
1748 	/*
1749 	 * In an ATS Invalidate Request, the address must be aligned on the
1750 	 * range size, which must be a power of two number of page sizes. We
1751 	 * thus have to choose between grossly over-invalidating the region, or
1752 	 * splitting the invalidation into multiple commands. For simplicity
1753 	 * we'll go with the first solution, but should refine it in the future
1754 	 * if multiple commands are shown to be more efficient.
1755 	 *
1756 	 * Find the smallest power of two that covers the range. The most
1757 	 * significant differing bit between the start and end addresses,
1758 	 * fls(start ^ end), indicates the required span. For example:
1759 	 *
1760 	 * We want to invalidate pages [8; 11]. This is already the ideal range:
1761 	 *		x = 0b1000 ^ 0b1011 = 0b11
1762 	 *		span = 1 << fls(x) = 4
1763 	 *
1764 	 * To invalidate pages [7; 10], we need to invalidate [0; 15]:
1765 	 *		x = 0b0111 ^ 0b1010 = 0b1101
1766 	 *		span = 1 << fls(x) = 16
1767 	 */
1768 	log2_span	= fls_long(page_start ^ page_end);
1769 	span_mask	= (1ULL << log2_span) - 1;
1770 
1771 	page_start	&= ~span_mask;
1772 
1773 	cmd->atc.addr	= page_start << inval_grain_shift;
1774 	cmd->atc.size	= log2_span;
1775 }
1776 
1777 static int arm_smmu_atc_inv_master(struct arm_smmu_master *master)
1778 {
1779 	int i;
1780 	struct arm_smmu_cmdq_ent cmd;
1781 	struct arm_smmu_cmdq_batch cmds;
1782 
1783 	arm_smmu_atc_inv_to_cmd(0, 0, 0, &cmd);
1784 
1785 	cmds.num = 0;
1786 	for (i = 0; i < master->num_streams; i++) {
1787 		cmd.atc.sid = master->streams[i].id;
1788 		arm_smmu_cmdq_batch_add(master->smmu, &cmds, &cmd);
1789 	}
1790 
1791 	return arm_smmu_cmdq_batch_submit(master->smmu, &cmds);
1792 }
1793 
1794 int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid,
1795 			    unsigned long iova, size_t size)
1796 {
1797 	int i;
1798 	unsigned long flags;
1799 	struct arm_smmu_cmdq_ent cmd;
1800 	struct arm_smmu_master *master;
1801 	struct arm_smmu_cmdq_batch cmds;
1802 
1803 	if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_ATS))
1804 		return 0;
1805 
1806 	/*
1807 	 * Ensure that we've completed prior invalidation of the main TLBs
1808 	 * before we read 'nr_ats_masters' in case of a concurrent call to
1809 	 * arm_smmu_enable_ats():
1810 	 *
1811 	 *	// unmap()			// arm_smmu_enable_ats()
1812 	 *	TLBI+SYNC			atomic_inc(&nr_ats_masters);
1813 	 *	smp_mb();			[...]
1814 	 *	atomic_read(&nr_ats_masters);	pci_enable_ats() // writel()
1815 	 *
1816 	 * Ensures that we always see the incremented 'nr_ats_masters' count if
1817 	 * ATS was enabled at the PCI device before completion of the TLBI.
1818 	 */
1819 	smp_mb();
1820 	if (!atomic_read(&smmu_domain->nr_ats_masters))
1821 		return 0;
1822 
1823 	arm_smmu_atc_inv_to_cmd(ssid, iova, size, &cmd);
1824 
1825 	cmds.num = 0;
1826 
1827 	spin_lock_irqsave(&smmu_domain->devices_lock, flags);
1828 	list_for_each_entry(master, &smmu_domain->devices, domain_head) {
1829 		if (!master->ats_enabled)
1830 			continue;
1831 
1832 		for (i = 0; i < master->num_streams; i++) {
1833 			cmd.atc.sid = master->streams[i].id;
1834 			arm_smmu_cmdq_batch_add(smmu_domain->smmu, &cmds, &cmd);
1835 		}
1836 	}
1837 	spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
1838 
1839 	return arm_smmu_cmdq_batch_submit(smmu_domain->smmu, &cmds);
1840 }
1841 
1842 /* IO_PGTABLE API */
1843 static void arm_smmu_tlb_inv_context(void *cookie)
1844 {
1845 	struct arm_smmu_domain *smmu_domain = cookie;
1846 	struct arm_smmu_device *smmu = smmu_domain->smmu;
1847 	struct arm_smmu_cmdq_ent cmd;
1848 
1849 	/*
1850 	 * NOTE: when io-pgtable is in non-strict mode, we may get here with
1851 	 * PTEs previously cleared by unmaps on the current CPU not yet visible
1852 	 * to the SMMU. We are relying on the dma_wmb() implicit during cmd
1853 	 * insertion to guarantee those are observed before the TLBI. Do be
1854 	 * careful, 007.
1855 	 */
1856 	if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1857 		arm_smmu_tlb_inv_asid(smmu, smmu_domain->s1_cfg.cd.asid);
1858 	} else {
1859 		cmd.opcode	= CMDQ_OP_TLBI_S12_VMALL;
1860 		cmd.tlbi.vmid	= smmu_domain->s2_cfg.vmid;
1861 		arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
1862 	}
1863 	arm_smmu_atc_inv_domain(smmu_domain, 0, 0, 0);
1864 }
1865 
1866 static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd,
1867 				     unsigned long iova, size_t size,
1868 				     size_t granule,
1869 				     struct arm_smmu_domain *smmu_domain)
1870 {
1871 	struct arm_smmu_device *smmu = smmu_domain->smmu;
1872 	unsigned long end = iova + size, num_pages = 0, tg = 0;
1873 	size_t inv_range = granule;
1874 	struct arm_smmu_cmdq_batch cmds;
1875 
1876 	if (!size)
1877 		return;
1878 
1879 	if (smmu->features & ARM_SMMU_FEAT_RANGE_INV) {
1880 		/* Get the leaf page size */
1881 		tg = __ffs(smmu_domain->domain.pgsize_bitmap);
1882 
1883 		/* Convert page size of 12,14,16 (log2) to 1,2,3 */
1884 		cmd->tlbi.tg = (tg - 10) / 2;
1885 
1886 		/* Determine what level the granule is at */
1887 		cmd->tlbi.ttl = 4 - ((ilog2(granule) - 3) / (tg - 3));
1888 
1889 		num_pages = size >> tg;
1890 	}
1891 
1892 	cmds.num = 0;
1893 
1894 	while (iova < end) {
1895 		if (smmu->features & ARM_SMMU_FEAT_RANGE_INV) {
1896 			/*
1897 			 * On each iteration of the loop, the range is 5 bits
1898 			 * worth of the aligned size remaining.
1899 			 * The range in pages is:
1900 			 *
1901 			 * range = (num_pages & (0x1f << __ffs(num_pages)))
1902 			 */
1903 			unsigned long scale, num;
1904 
1905 			/* Determine the power of 2 multiple number of pages */
1906 			scale = __ffs(num_pages);
1907 			cmd->tlbi.scale = scale;
1908 
1909 			/* Determine how many chunks of 2^scale size we have */
1910 			num = (num_pages >> scale) & CMDQ_TLBI_RANGE_NUM_MAX;
1911 			cmd->tlbi.num = num - 1;
1912 
1913 			/* range is num * 2^scale * pgsize */
1914 			inv_range = num << (scale + tg);
1915 
1916 			/* Clear out the lower order bits for the next iteration */
1917 			num_pages -= num << scale;
1918 		}
1919 
1920 		cmd->tlbi.addr = iova;
1921 		arm_smmu_cmdq_batch_add(smmu, &cmds, cmd);
1922 		iova += inv_range;
1923 	}
1924 	arm_smmu_cmdq_batch_submit(smmu, &cmds);
1925 }
1926 
1927 static void arm_smmu_tlb_inv_range_domain(unsigned long iova, size_t size,
1928 					  size_t granule, bool leaf,
1929 					  struct arm_smmu_domain *smmu_domain)
1930 {
1931 	struct arm_smmu_cmdq_ent cmd = {
1932 		.tlbi = {
1933 			.leaf	= leaf,
1934 		},
1935 	};
1936 
1937 	if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1938 		cmd.opcode	= smmu_domain->smmu->features & ARM_SMMU_FEAT_E2H ?
1939 				  CMDQ_OP_TLBI_EL2_VA : CMDQ_OP_TLBI_NH_VA;
1940 		cmd.tlbi.asid	= smmu_domain->s1_cfg.cd.asid;
1941 	} else {
1942 		cmd.opcode	= CMDQ_OP_TLBI_S2_IPA;
1943 		cmd.tlbi.vmid	= smmu_domain->s2_cfg.vmid;
1944 	}
1945 	__arm_smmu_tlb_inv_range(&cmd, iova, size, granule, smmu_domain);
1946 
1947 	/*
1948 	 * Unfortunately, this can't be leaf-only since we may have
1949 	 * zapped an entire table.
1950 	 */
1951 	arm_smmu_atc_inv_domain(smmu_domain, 0, iova, size);
1952 }
1953 
1954 void arm_smmu_tlb_inv_range_asid(unsigned long iova, size_t size, int asid,
1955 				 size_t granule, bool leaf,
1956 				 struct arm_smmu_domain *smmu_domain)
1957 {
1958 	struct arm_smmu_cmdq_ent cmd = {
1959 		.opcode	= smmu_domain->smmu->features & ARM_SMMU_FEAT_E2H ?
1960 			  CMDQ_OP_TLBI_EL2_VA : CMDQ_OP_TLBI_NH_VA,
1961 		.tlbi = {
1962 			.asid	= asid,
1963 			.leaf	= leaf,
1964 		},
1965 	};
1966 
1967 	__arm_smmu_tlb_inv_range(&cmd, iova, size, granule, smmu_domain);
1968 }
1969 
1970 static void arm_smmu_tlb_inv_page_nosync(struct iommu_iotlb_gather *gather,
1971 					 unsigned long iova, size_t granule,
1972 					 void *cookie)
1973 {
1974 	struct arm_smmu_domain *smmu_domain = cookie;
1975 	struct iommu_domain *domain = &smmu_domain->domain;
1976 
1977 	iommu_iotlb_gather_add_page(domain, gather, iova, granule);
1978 }
1979 
1980 static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size,
1981 				  size_t granule, void *cookie)
1982 {
1983 	arm_smmu_tlb_inv_range_domain(iova, size, granule, false, cookie);
1984 }
1985 
1986 static const struct iommu_flush_ops arm_smmu_flush_ops = {
1987 	.tlb_flush_all	= arm_smmu_tlb_inv_context,
1988 	.tlb_flush_walk = arm_smmu_tlb_inv_walk,
1989 	.tlb_add_page	= arm_smmu_tlb_inv_page_nosync,
1990 };
1991 
1992 /* IOMMU API */
1993 static bool arm_smmu_capable(struct device *dev, enum iommu_cap cap)
1994 {
1995 	struct arm_smmu_master *master = dev_iommu_priv_get(dev);
1996 
1997 	switch (cap) {
1998 	case IOMMU_CAP_CACHE_COHERENCY:
1999 		/* Assume that a coherent TCU implies coherent TBUs */
2000 		return master->smmu->features & ARM_SMMU_FEAT_COHERENCY;
2001 	case IOMMU_CAP_NOEXEC:
2002 		return true;
2003 	default:
2004 		return false;
2005 	}
2006 }
2007 
2008 static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
2009 {
2010 	struct arm_smmu_domain *smmu_domain;
2011 
2012 	if (type == IOMMU_DOMAIN_SVA)
2013 		return arm_smmu_sva_domain_alloc();
2014 
2015 	if (type != IOMMU_DOMAIN_UNMANAGED &&
2016 	    type != IOMMU_DOMAIN_DMA &&
2017 	    type != IOMMU_DOMAIN_DMA_FQ &&
2018 	    type != IOMMU_DOMAIN_IDENTITY)
2019 		return NULL;
2020 
2021 	/*
2022 	 * Allocate the domain and initialise some of its data structures.
2023 	 * We can't really do anything meaningful until we've added a
2024 	 * master.
2025 	 */
2026 	smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
2027 	if (!smmu_domain)
2028 		return NULL;
2029 
2030 	mutex_init(&smmu_domain->init_mutex);
2031 	INIT_LIST_HEAD(&smmu_domain->devices);
2032 	spin_lock_init(&smmu_domain->devices_lock);
2033 	INIT_LIST_HEAD(&smmu_domain->mmu_notifiers);
2034 
2035 	return &smmu_domain->domain;
2036 }
2037 
2038 static int arm_smmu_bitmap_alloc(unsigned long *map, int span)
2039 {
2040 	int idx, size = 1 << span;
2041 
2042 	do {
2043 		idx = find_first_zero_bit(map, size);
2044 		if (idx == size)
2045 			return -ENOSPC;
2046 	} while (test_and_set_bit(idx, map));
2047 
2048 	return idx;
2049 }
2050 
2051 static void arm_smmu_bitmap_free(unsigned long *map, int idx)
2052 {
2053 	clear_bit(idx, map);
2054 }
2055 
2056 static void arm_smmu_domain_free(struct iommu_domain *domain)
2057 {
2058 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2059 	struct arm_smmu_device *smmu = smmu_domain->smmu;
2060 
2061 	free_io_pgtable_ops(smmu_domain->pgtbl_ops);
2062 
2063 	/* Free the CD and ASID, if we allocated them */
2064 	if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
2065 		struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
2066 
2067 		/* Prevent SVA from touching the CD while we're freeing it */
2068 		mutex_lock(&arm_smmu_asid_lock);
2069 		if (cfg->cdcfg.cdtab)
2070 			arm_smmu_free_cd_tables(smmu_domain);
2071 		arm_smmu_free_asid(&cfg->cd);
2072 		mutex_unlock(&arm_smmu_asid_lock);
2073 	} else {
2074 		struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
2075 		if (cfg->vmid)
2076 			arm_smmu_bitmap_free(smmu->vmid_map, cfg->vmid);
2077 	}
2078 
2079 	kfree(smmu_domain);
2080 }
2081 
2082 static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
2083 				       struct arm_smmu_master *master,
2084 				       struct io_pgtable_cfg *pgtbl_cfg)
2085 {
2086 	int ret;
2087 	u32 asid;
2088 	struct arm_smmu_device *smmu = smmu_domain->smmu;
2089 	struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
2090 	typeof(&pgtbl_cfg->arm_lpae_s1_cfg.tcr) tcr = &pgtbl_cfg->arm_lpae_s1_cfg.tcr;
2091 
2092 	refcount_set(&cfg->cd.refs, 1);
2093 
2094 	/* Prevent SVA from modifying the ASID until it is written to the CD */
2095 	mutex_lock(&arm_smmu_asid_lock);
2096 	ret = xa_alloc(&arm_smmu_asid_xa, &asid, &cfg->cd,
2097 		       XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL);
2098 	if (ret)
2099 		goto out_unlock;
2100 
2101 	cfg->s1cdmax = master->ssid_bits;
2102 
2103 	smmu_domain->stall_enabled = master->stall_enabled;
2104 
2105 	ret = arm_smmu_alloc_cd_tables(smmu_domain);
2106 	if (ret)
2107 		goto out_free_asid;
2108 
2109 	cfg->cd.asid	= (u16)asid;
2110 	cfg->cd.ttbr	= pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
2111 	cfg->cd.tcr	= FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, tcr->tsz) |
2112 			  FIELD_PREP(CTXDESC_CD_0_TCR_TG0, tcr->tg) |
2113 			  FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0, tcr->irgn) |
2114 			  FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0, tcr->orgn) |
2115 			  FIELD_PREP(CTXDESC_CD_0_TCR_SH0, tcr->sh) |
2116 			  FIELD_PREP(CTXDESC_CD_0_TCR_IPS, tcr->ips) |
2117 			  CTXDESC_CD_0_TCR_EPD1 | CTXDESC_CD_0_AA64;
2118 	cfg->cd.mair	= pgtbl_cfg->arm_lpae_s1_cfg.mair;
2119 
2120 	/*
2121 	 * Note that this will end up calling arm_smmu_sync_cd() before
2122 	 * the master has been added to the devices list for this domain.
2123 	 * This isn't an issue because the STE hasn't been installed yet.
2124 	 */
2125 	ret = arm_smmu_write_ctx_desc(smmu_domain, 0, &cfg->cd);
2126 	if (ret)
2127 		goto out_free_cd_tables;
2128 
2129 	mutex_unlock(&arm_smmu_asid_lock);
2130 	return 0;
2131 
2132 out_free_cd_tables:
2133 	arm_smmu_free_cd_tables(smmu_domain);
2134 out_free_asid:
2135 	arm_smmu_free_asid(&cfg->cd);
2136 out_unlock:
2137 	mutex_unlock(&arm_smmu_asid_lock);
2138 	return ret;
2139 }
2140 
2141 static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
2142 				       struct arm_smmu_master *master,
2143 				       struct io_pgtable_cfg *pgtbl_cfg)
2144 {
2145 	int vmid;
2146 	struct arm_smmu_device *smmu = smmu_domain->smmu;
2147 	struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
2148 	typeof(&pgtbl_cfg->arm_lpae_s2_cfg.vtcr) vtcr;
2149 
2150 	vmid = arm_smmu_bitmap_alloc(smmu->vmid_map, smmu->vmid_bits);
2151 	if (vmid < 0)
2152 		return vmid;
2153 
2154 	vtcr = &pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
2155 	cfg->vmid	= (u16)vmid;
2156 	cfg->vttbr	= pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
2157 	cfg->vtcr	= FIELD_PREP(STRTAB_STE_2_VTCR_S2T0SZ, vtcr->tsz) |
2158 			  FIELD_PREP(STRTAB_STE_2_VTCR_S2SL0, vtcr->sl) |
2159 			  FIELD_PREP(STRTAB_STE_2_VTCR_S2IR0, vtcr->irgn) |
2160 			  FIELD_PREP(STRTAB_STE_2_VTCR_S2OR0, vtcr->orgn) |
2161 			  FIELD_PREP(STRTAB_STE_2_VTCR_S2SH0, vtcr->sh) |
2162 			  FIELD_PREP(STRTAB_STE_2_VTCR_S2TG, vtcr->tg) |
2163 			  FIELD_PREP(STRTAB_STE_2_VTCR_S2PS, vtcr->ps);
2164 	return 0;
2165 }
2166 
2167 static int arm_smmu_domain_finalise(struct iommu_domain *domain,
2168 				    struct arm_smmu_master *master)
2169 {
2170 	int ret;
2171 	unsigned long ias, oas;
2172 	enum io_pgtable_fmt fmt;
2173 	struct io_pgtable_cfg pgtbl_cfg;
2174 	struct io_pgtable_ops *pgtbl_ops;
2175 	int (*finalise_stage_fn)(struct arm_smmu_domain *,
2176 				 struct arm_smmu_master *,
2177 				 struct io_pgtable_cfg *);
2178 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2179 	struct arm_smmu_device *smmu = smmu_domain->smmu;
2180 
2181 	if (domain->type == IOMMU_DOMAIN_IDENTITY) {
2182 		smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
2183 		return 0;
2184 	}
2185 
2186 	/* Restrict the stage to what we can actually support */
2187 	if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
2188 		smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
2189 	if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
2190 		smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
2191 
2192 	switch (smmu_domain->stage) {
2193 	case ARM_SMMU_DOMAIN_S1:
2194 		ias = (smmu->features & ARM_SMMU_FEAT_VAX) ? 52 : 48;
2195 		ias = min_t(unsigned long, ias, VA_BITS);
2196 		oas = smmu->ias;
2197 		fmt = ARM_64_LPAE_S1;
2198 		finalise_stage_fn = arm_smmu_domain_finalise_s1;
2199 		break;
2200 	case ARM_SMMU_DOMAIN_NESTED:
2201 	case ARM_SMMU_DOMAIN_S2:
2202 		ias = smmu->ias;
2203 		oas = smmu->oas;
2204 		fmt = ARM_64_LPAE_S2;
2205 		finalise_stage_fn = arm_smmu_domain_finalise_s2;
2206 		break;
2207 	default:
2208 		return -EINVAL;
2209 	}
2210 
2211 	pgtbl_cfg = (struct io_pgtable_cfg) {
2212 		.pgsize_bitmap	= smmu->pgsize_bitmap,
2213 		.ias		= ias,
2214 		.oas		= oas,
2215 		.coherent_walk	= smmu->features & ARM_SMMU_FEAT_COHERENCY,
2216 		.tlb		= &arm_smmu_flush_ops,
2217 		.iommu_dev	= smmu->dev,
2218 	};
2219 
2220 	pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
2221 	if (!pgtbl_ops)
2222 		return -ENOMEM;
2223 
2224 	domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
2225 	domain->geometry.aperture_end = (1UL << pgtbl_cfg.ias) - 1;
2226 	domain->geometry.force_aperture = true;
2227 
2228 	ret = finalise_stage_fn(smmu_domain, master, &pgtbl_cfg);
2229 	if (ret < 0) {
2230 		free_io_pgtable_ops(pgtbl_ops);
2231 		return ret;
2232 	}
2233 
2234 	smmu_domain->pgtbl_ops = pgtbl_ops;
2235 	return 0;
2236 }
2237 
2238 static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
2239 {
2240 	__le64 *step;
2241 	struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2242 
2243 	if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
2244 		struct arm_smmu_strtab_l1_desc *l1_desc;
2245 		int idx;
2246 
2247 		/* Two-level walk */
2248 		idx = (sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS;
2249 		l1_desc = &cfg->l1_desc[idx];
2250 		idx = (sid & ((1 << STRTAB_SPLIT) - 1)) * STRTAB_STE_DWORDS;
2251 		step = &l1_desc->l2ptr[idx];
2252 	} else {
2253 		/* Simple linear lookup */
2254 		step = &cfg->strtab[sid * STRTAB_STE_DWORDS];
2255 	}
2256 
2257 	return step;
2258 }
2259 
2260 static void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master)
2261 {
2262 	int i, j;
2263 	struct arm_smmu_device *smmu = master->smmu;
2264 
2265 	for (i = 0; i < master->num_streams; ++i) {
2266 		u32 sid = master->streams[i].id;
2267 		__le64 *step = arm_smmu_get_step_for_sid(smmu, sid);
2268 
2269 		/* Bridged PCI devices may end up with duplicated IDs */
2270 		for (j = 0; j < i; j++)
2271 			if (master->streams[j].id == sid)
2272 				break;
2273 		if (j < i)
2274 			continue;
2275 
2276 		arm_smmu_write_strtab_ent(master, sid, step);
2277 	}
2278 }
2279 
2280 static bool arm_smmu_ats_supported(struct arm_smmu_master *master)
2281 {
2282 	struct device *dev = master->dev;
2283 	struct arm_smmu_device *smmu = master->smmu;
2284 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2285 
2286 	if (!(smmu->features & ARM_SMMU_FEAT_ATS))
2287 		return false;
2288 
2289 	if (!(fwspec->flags & IOMMU_FWSPEC_PCI_RC_ATS))
2290 		return false;
2291 
2292 	return dev_is_pci(dev) && pci_ats_supported(to_pci_dev(dev));
2293 }
2294 
2295 static void arm_smmu_enable_ats(struct arm_smmu_master *master)
2296 {
2297 	size_t stu;
2298 	struct pci_dev *pdev;
2299 	struct arm_smmu_device *smmu = master->smmu;
2300 	struct arm_smmu_domain *smmu_domain = master->domain;
2301 
2302 	/* Don't enable ATS at the endpoint if it's not enabled in the STE */
2303 	if (!master->ats_enabled)
2304 		return;
2305 
2306 	/* Smallest Translation Unit: log2 of the smallest supported granule */
2307 	stu = __ffs(smmu->pgsize_bitmap);
2308 	pdev = to_pci_dev(master->dev);
2309 
2310 	atomic_inc(&smmu_domain->nr_ats_masters);
2311 	arm_smmu_atc_inv_domain(smmu_domain, 0, 0, 0);
2312 	if (pci_enable_ats(pdev, stu))
2313 		dev_err(master->dev, "Failed to enable ATS (STU %zu)\n", stu);
2314 }
2315 
2316 static void arm_smmu_disable_ats(struct arm_smmu_master *master)
2317 {
2318 	struct arm_smmu_domain *smmu_domain = master->domain;
2319 
2320 	if (!master->ats_enabled)
2321 		return;
2322 
2323 	pci_disable_ats(to_pci_dev(master->dev));
2324 	/*
2325 	 * Ensure ATS is disabled at the endpoint before we issue the
2326 	 * ATC invalidation via the SMMU.
2327 	 */
2328 	wmb();
2329 	arm_smmu_atc_inv_master(master);
2330 	atomic_dec(&smmu_domain->nr_ats_masters);
2331 }
2332 
2333 static int arm_smmu_enable_pasid(struct arm_smmu_master *master)
2334 {
2335 	int ret;
2336 	int features;
2337 	int num_pasids;
2338 	struct pci_dev *pdev;
2339 
2340 	if (!dev_is_pci(master->dev))
2341 		return -ENODEV;
2342 
2343 	pdev = to_pci_dev(master->dev);
2344 
2345 	features = pci_pasid_features(pdev);
2346 	if (features < 0)
2347 		return features;
2348 
2349 	num_pasids = pci_max_pasids(pdev);
2350 	if (num_pasids <= 0)
2351 		return num_pasids;
2352 
2353 	ret = pci_enable_pasid(pdev, features);
2354 	if (ret) {
2355 		dev_err(&pdev->dev, "Failed to enable PASID\n");
2356 		return ret;
2357 	}
2358 
2359 	master->ssid_bits = min_t(u8, ilog2(num_pasids),
2360 				  master->smmu->ssid_bits);
2361 	return 0;
2362 }
2363 
2364 static void arm_smmu_disable_pasid(struct arm_smmu_master *master)
2365 {
2366 	struct pci_dev *pdev;
2367 
2368 	if (!dev_is_pci(master->dev))
2369 		return;
2370 
2371 	pdev = to_pci_dev(master->dev);
2372 
2373 	if (!pdev->pasid_enabled)
2374 		return;
2375 
2376 	master->ssid_bits = 0;
2377 	pci_disable_pasid(pdev);
2378 }
2379 
2380 static void arm_smmu_detach_dev(struct arm_smmu_master *master)
2381 {
2382 	unsigned long flags;
2383 	struct arm_smmu_domain *smmu_domain = master->domain;
2384 
2385 	if (!smmu_domain)
2386 		return;
2387 
2388 	arm_smmu_disable_ats(master);
2389 
2390 	spin_lock_irqsave(&smmu_domain->devices_lock, flags);
2391 	list_del(&master->domain_head);
2392 	spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
2393 
2394 	master->domain = NULL;
2395 	master->ats_enabled = false;
2396 	arm_smmu_install_ste_for_dev(master);
2397 }
2398 
2399 static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
2400 {
2401 	int ret = 0;
2402 	unsigned long flags;
2403 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2404 	struct arm_smmu_device *smmu;
2405 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2406 	struct arm_smmu_master *master;
2407 
2408 	if (!fwspec)
2409 		return -ENOENT;
2410 
2411 	master = dev_iommu_priv_get(dev);
2412 	smmu = master->smmu;
2413 
2414 	/*
2415 	 * Checking that SVA is disabled ensures that this device isn't bound to
2416 	 * any mm, and can be safely detached from its old domain. Bonds cannot
2417 	 * be removed concurrently since we're holding the group mutex.
2418 	 */
2419 	if (arm_smmu_master_sva_enabled(master)) {
2420 		dev_err(dev, "cannot attach - SVA enabled\n");
2421 		return -EBUSY;
2422 	}
2423 
2424 	arm_smmu_detach_dev(master);
2425 
2426 	mutex_lock(&smmu_domain->init_mutex);
2427 
2428 	if (!smmu_domain->smmu) {
2429 		smmu_domain->smmu = smmu;
2430 		ret = arm_smmu_domain_finalise(domain, master);
2431 		if (ret) {
2432 			smmu_domain->smmu = NULL;
2433 			goto out_unlock;
2434 		}
2435 	} else if (smmu_domain->smmu != smmu) {
2436 		ret = -EINVAL;
2437 		goto out_unlock;
2438 	} else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1 &&
2439 		   master->ssid_bits != smmu_domain->s1_cfg.s1cdmax) {
2440 		ret = -EINVAL;
2441 		goto out_unlock;
2442 	} else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1 &&
2443 		   smmu_domain->stall_enabled != master->stall_enabled) {
2444 		ret = -EINVAL;
2445 		goto out_unlock;
2446 	}
2447 
2448 	master->domain = smmu_domain;
2449 
2450 	if (smmu_domain->stage != ARM_SMMU_DOMAIN_BYPASS)
2451 		master->ats_enabled = arm_smmu_ats_supported(master);
2452 
2453 	arm_smmu_install_ste_for_dev(master);
2454 
2455 	spin_lock_irqsave(&smmu_domain->devices_lock, flags);
2456 	list_add(&master->domain_head, &smmu_domain->devices);
2457 	spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
2458 
2459 	arm_smmu_enable_ats(master);
2460 
2461 out_unlock:
2462 	mutex_unlock(&smmu_domain->init_mutex);
2463 	return ret;
2464 }
2465 
2466 static int arm_smmu_map_pages(struct iommu_domain *domain, unsigned long iova,
2467 			      phys_addr_t paddr, size_t pgsize, size_t pgcount,
2468 			      int prot, gfp_t gfp, size_t *mapped)
2469 {
2470 	struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
2471 
2472 	if (!ops)
2473 		return -ENODEV;
2474 
2475 	return ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, gfp, mapped);
2476 }
2477 
2478 static size_t arm_smmu_unmap_pages(struct iommu_domain *domain, unsigned long iova,
2479 				   size_t pgsize, size_t pgcount,
2480 				   struct iommu_iotlb_gather *gather)
2481 {
2482 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2483 	struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
2484 
2485 	if (!ops)
2486 		return 0;
2487 
2488 	return ops->unmap_pages(ops, iova, pgsize, pgcount, gather);
2489 }
2490 
2491 static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
2492 {
2493 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2494 
2495 	if (smmu_domain->smmu)
2496 		arm_smmu_tlb_inv_context(smmu_domain);
2497 }
2498 
2499 static void arm_smmu_iotlb_sync(struct iommu_domain *domain,
2500 				struct iommu_iotlb_gather *gather)
2501 {
2502 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2503 
2504 	if (!gather->pgsize)
2505 		return;
2506 
2507 	arm_smmu_tlb_inv_range_domain(gather->start,
2508 				      gather->end - gather->start + 1,
2509 				      gather->pgsize, true, smmu_domain);
2510 }
2511 
2512 static phys_addr_t
2513 arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
2514 {
2515 	struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
2516 
2517 	if (!ops)
2518 		return 0;
2519 
2520 	return ops->iova_to_phys(ops, iova);
2521 }
2522 
2523 static struct platform_driver arm_smmu_driver;
2524 
2525 static
2526 struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
2527 {
2528 	struct device *dev = driver_find_device_by_fwnode(&arm_smmu_driver.driver,
2529 							  fwnode);
2530 	put_device(dev);
2531 	return dev ? dev_get_drvdata(dev) : NULL;
2532 }
2533 
2534 static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid)
2535 {
2536 	unsigned long limit = smmu->strtab_cfg.num_l1_ents;
2537 
2538 	if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
2539 		limit *= 1UL << STRTAB_SPLIT;
2540 
2541 	return sid < limit;
2542 }
2543 
2544 static int arm_smmu_init_sid_strtab(struct arm_smmu_device *smmu, u32 sid)
2545 {
2546 	/* Check the SIDs are in range of the SMMU and our stream table */
2547 	if (!arm_smmu_sid_in_range(smmu, sid))
2548 		return -ERANGE;
2549 
2550 	/* Ensure l2 strtab is initialised */
2551 	if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
2552 		return arm_smmu_init_l2_strtab(smmu, sid);
2553 
2554 	return 0;
2555 }
2556 
2557 static int arm_smmu_insert_master(struct arm_smmu_device *smmu,
2558 				  struct arm_smmu_master *master)
2559 {
2560 	int i;
2561 	int ret = 0;
2562 	struct arm_smmu_stream *new_stream, *cur_stream;
2563 	struct rb_node **new_node, *parent_node = NULL;
2564 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(master->dev);
2565 
2566 	master->streams = kcalloc(fwspec->num_ids, sizeof(*master->streams),
2567 				  GFP_KERNEL);
2568 	if (!master->streams)
2569 		return -ENOMEM;
2570 	master->num_streams = fwspec->num_ids;
2571 
2572 	mutex_lock(&smmu->streams_mutex);
2573 	for (i = 0; i < fwspec->num_ids; i++) {
2574 		u32 sid = fwspec->ids[i];
2575 
2576 		new_stream = &master->streams[i];
2577 		new_stream->id = sid;
2578 		new_stream->master = master;
2579 
2580 		ret = arm_smmu_init_sid_strtab(smmu, sid);
2581 		if (ret)
2582 			break;
2583 
2584 		/* Insert into SID tree */
2585 		new_node = &(smmu->streams.rb_node);
2586 		while (*new_node) {
2587 			cur_stream = rb_entry(*new_node, struct arm_smmu_stream,
2588 					      node);
2589 			parent_node = *new_node;
2590 			if (cur_stream->id > new_stream->id) {
2591 				new_node = &((*new_node)->rb_left);
2592 			} else if (cur_stream->id < new_stream->id) {
2593 				new_node = &((*new_node)->rb_right);
2594 			} else {
2595 				dev_warn(master->dev,
2596 					 "stream %u already in tree\n",
2597 					 cur_stream->id);
2598 				ret = -EINVAL;
2599 				break;
2600 			}
2601 		}
2602 		if (ret)
2603 			break;
2604 
2605 		rb_link_node(&new_stream->node, parent_node, new_node);
2606 		rb_insert_color(&new_stream->node, &smmu->streams);
2607 	}
2608 
2609 	if (ret) {
2610 		for (i--; i >= 0; i--)
2611 			rb_erase(&master->streams[i].node, &smmu->streams);
2612 		kfree(master->streams);
2613 	}
2614 	mutex_unlock(&smmu->streams_mutex);
2615 
2616 	return ret;
2617 }
2618 
2619 static void arm_smmu_remove_master(struct arm_smmu_master *master)
2620 {
2621 	int i;
2622 	struct arm_smmu_device *smmu = master->smmu;
2623 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(master->dev);
2624 
2625 	if (!smmu || !master->streams)
2626 		return;
2627 
2628 	mutex_lock(&smmu->streams_mutex);
2629 	for (i = 0; i < fwspec->num_ids; i++)
2630 		rb_erase(&master->streams[i].node, &smmu->streams);
2631 	mutex_unlock(&smmu->streams_mutex);
2632 
2633 	kfree(master->streams);
2634 }
2635 
2636 static struct iommu_ops arm_smmu_ops;
2637 
2638 static struct iommu_device *arm_smmu_probe_device(struct device *dev)
2639 {
2640 	int ret;
2641 	struct arm_smmu_device *smmu;
2642 	struct arm_smmu_master *master;
2643 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2644 
2645 	if (!fwspec || fwspec->ops != &arm_smmu_ops)
2646 		return ERR_PTR(-ENODEV);
2647 
2648 	if (WARN_ON_ONCE(dev_iommu_priv_get(dev)))
2649 		return ERR_PTR(-EBUSY);
2650 
2651 	smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
2652 	if (!smmu)
2653 		return ERR_PTR(-ENODEV);
2654 
2655 	master = kzalloc(sizeof(*master), GFP_KERNEL);
2656 	if (!master)
2657 		return ERR_PTR(-ENOMEM);
2658 
2659 	master->dev = dev;
2660 	master->smmu = smmu;
2661 	INIT_LIST_HEAD(&master->bonds);
2662 	dev_iommu_priv_set(dev, master);
2663 
2664 	ret = arm_smmu_insert_master(smmu, master);
2665 	if (ret)
2666 		goto err_free_master;
2667 
2668 	device_property_read_u32(dev, "pasid-num-bits", &master->ssid_bits);
2669 	master->ssid_bits = min(smmu->ssid_bits, master->ssid_bits);
2670 
2671 	/*
2672 	 * Note that PASID must be enabled before, and disabled after ATS:
2673 	 * PCI Express Base 4.0r1.0 - 10.5.1.3 ATS Control Register
2674 	 *
2675 	 *   Behavior is undefined if this bit is Set and the value of the PASID
2676 	 *   Enable, Execute Requested Enable, or Privileged Mode Requested bits
2677 	 *   are changed.
2678 	 */
2679 	arm_smmu_enable_pasid(master);
2680 
2681 	if (!(smmu->features & ARM_SMMU_FEAT_2_LVL_CDTAB))
2682 		master->ssid_bits = min_t(u8, master->ssid_bits,
2683 					  CTXDESC_LINEAR_CDMAX);
2684 
2685 	if ((smmu->features & ARM_SMMU_FEAT_STALLS &&
2686 	     device_property_read_bool(dev, "dma-can-stall")) ||
2687 	    smmu->features & ARM_SMMU_FEAT_STALL_FORCE)
2688 		master->stall_enabled = true;
2689 
2690 	return &smmu->iommu;
2691 
2692 err_free_master:
2693 	kfree(master);
2694 	dev_iommu_priv_set(dev, NULL);
2695 	return ERR_PTR(ret);
2696 }
2697 
2698 static void arm_smmu_release_device(struct device *dev)
2699 {
2700 	struct arm_smmu_master *master = dev_iommu_priv_get(dev);
2701 
2702 	if (WARN_ON(arm_smmu_master_sva_enabled(master)))
2703 		iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
2704 	arm_smmu_detach_dev(master);
2705 	arm_smmu_disable_pasid(master);
2706 	arm_smmu_remove_master(master);
2707 	kfree(master);
2708 }
2709 
2710 static struct iommu_group *arm_smmu_device_group(struct device *dev)
2711 {
2712 	struct iommu_group *group;
2713 
2714 	/*
2715 	 * We don't support devices sharing stream IDs other than PCI RID
2716 	 * aliases, since the necessary ID-to-device lookup becomes rather
2717 	 * impractical given a potential sparse 32-bit stream ID space.
2718 	 */
2719 	if (dev_is_pci(dev))
2720 		group = pci_device_group(dev);
2721 	else
2722 		group = generic_device_group(dev);
2723 
2724 	return group;
2725 }
2726 
2727 static int arm_smmu_enable_nesting(struct iommu_domain *domain)
2728 {
2729 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2730 	int ret = 0;
2731 
2732 	mutex_lock(&smmu_domain->init_mutex);
2733 	if (smmu_domain->smmu)
2734 		ret = -EPERM;
2735 	else
2736 		smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
2737 	mutex_unlock(&smmu_domain->init_mutex);
2738 
2739 	return ret;
2740 }
2741 
2742 static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
2743 {
2744 	return iommu_fwspec_add_ids(dev, args->args, 1);
2745 }
2746 
2747 static void arm_smmu_get_resv_regions(struct device *dev,
2748 				      struct list_head *head)
2749 {
2750 	struct iommu_resv_region *region;
2751 	int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
2752 
2753 	region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
2754 					 prot, IOMMU_RESV_SW_MSI, GFP_KERNEL);
2755 	if (!region)
2756 		return;
2757 
2758 	list_add_tail(&region->list, head);
2759 
2760 	iommu_dma_get_resv_regions(dev, head);
2761 }
2762 
2763 static int arm_smmu_dev_enable_feature(struct device *dev,
2764 				       enum iommu_dev_features feat)
2765 {
2766 	struct arm_smmu_master *master = dev_iommu_priv_get(dev);
2767 
2768 	if (!master)
2769 		return -ENODEV;
2770 
2771 	switch (feat) {
2772 	case IOMMU_DEV_FEAT_IOPF:
2773 		if (!arm_smmu_master_iopf_supported(master))
2774 			return -EINVAL;
2775 		if (master->iopf_enabled)
2776 			return -EBUSY;
2777 		master->iopf_enabled = true;
2778 		return 0;
2779 	case IOMMU_DEV_FEAT_SVA:
2780 		if (!arm_smmu_master_sva_supported(master))
2781 			return -EINVAL;
2782 		if (arm_smmu_master_sva_enabled(master))
2783 			return -EBUSY;
2784 		return arm_smmu_master_enable_sva(master);
2785 	default:
2786 		return -EINVAL;
2787 	}
2788 }
2789 
2790 static int arm_smmu_dev_disable_feature(struct device *dev,
2791 					enum iommu_dev_features feat)
2792 {
2793 	struct arm_smmu_master *master = dev_iommu_priv_get(dev);
2794 
2795 	if (!master)
2796 		return -EINVAL;
2797 
2798 	switch (feat) {
2799 	case IOMMU_DEV_FEAT_IOPF:
2800 		if (!master->iopf_enabled)
2801 			return -EINVAL;
2802 		if (master->sva_enabled)
2803 			return -EBUSY;
2804 		master->iopf_enabled = false;
2805 		return 0;
2806 	case IOMMU_DEV_FEAT_SVA:
2807 		if (!arm_smmu_master_sva_enabled(master))
2808 			return -EINVAL;
2809 		return arm_smmu_master_disable_sva(master);
2810 	default:
2811 		return -EINVAL;
2812 	}
2813 }
2814 
2815 /*
2816  * HiSilicon PCIe tune and trace device can be used to trace TLP headers on the
2817  * PCIe link and save the data to memory by DMA. The hardware is restricted to
2818  * use identity mapping only.
2819  */
2820 #define IS_HISI_PTT_DEVICE(pdev)	((pdev)->vendor == PCI_VENDOR_ID_HUAWEI && \
2821 					 (pdev)->device == 0xa12e)
2822 
2823 static int arm_smmu_def_domain_type(struct device *dev)
2824 {
2825 	if (dev_is_pci(dev)) {
2826 		struct pci_dev *pdev = to_pci_dev(dev);
2827 
2828 		if (IS_HISI_PTT_DEVICE(pdev))
2829 			return IOMMU_DOMAIN_IDENTITY;
2830 	}
2831 
2832 	return 0;
2833 }
2834 
2835 static void arm_smmu_remove_dev_pasid(struct device *dev, ioasid_t pasid)
2836 {
2837 	struct iommu_domain *domain;
2838 
2839 	domain = iommu_get_domain_for_dev_pasid(dev, pasid, IOMMU_DOMAIN_SVA);
2840 	if (WARN_ON(IS_ERR(domain)) || !domain)
2841 		return;
2842 
2843 	arm_smmu_sva_remove_dev_pasid(domain, dev, pasid);
2844 }
2845 
2846 static struct iommu_ops arm_smmu_ops = {
2847 	.capable		= arm_smmu_capable,
2848 	.domain_alloc		= arm_smmu_domain_alloc,
2849 	.probe_device		= arm_smmu_probe_device,
2850 	.release_device		= arm_smmu_release_device,
2851 	.device_group		= arm_smmu_device_group,
2852 	.of_xlate		= arm_smmu_of_xlate,
2853 	.get_resv_regions	= arm_smmu_get_resv_regions,
2854 	.remove_dev_pasid	= arm_smmu_remove_dev_pasid,
2855 	.dev_enable_feat	= arm_smmu_dev_enable_feature,
2856 	.dev_disable_feat	= arm_smmu_dev_disable_feature,
2857 	.page_response		= arm_smmu_page_response,
2858 	.def_domain_type	= arm_smmu_def_domain_type,
2859 	.pgsize_bitmap		= -1UL, /* Restricted during device attach */
2860 	.owner			= THIS_MODULE,
2861 	.default_domain_ops = &(const struct iommu_domain_ops) {
2862 		.attach_dev		= arm_smmu_attach_dev,
2863 		.map_pages		= arm_smmu_map_pages,
2864 		.unmap_pages		= arm_smmu_unmap_pages,
2865 		.flush_iotlb_all	= arm_smmu_flush_iotlb_all,
2866 		.iotlb_sync		= arm_smmu_iotlb_sync,
2867 		.iova_to_phys		= arm_smmu_iova_to_phys,
2868 		.enable_nesting		= arm_smmu_enable_nesting,
2869 		.free			= arm_smmu_domain_free,
2870 	}
2871 };
2872 
2873 /* Probing and initialisation functions */
2874 static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
2875 				   struct arm_smmu_queue *q,
2876 				   void __iomem *page,
2877 				   unsigned long prod_off,
2878 				   unsigned long cons_off,
2879 				   size_t dwords, const char *name)
2880 {
2881 	size_t qsz;
2882 
2883 	do {
2884 		qsz = ((1 << q->llq.max_n_shift) * dwords) << 3;
2885 		q->base = dmam_alloc_coherent(smmu->dev, qsz, &q->base_dma,
2886 					      GFP_KERNEL);
2887 		if (q->base || qsz < PAGE_SIZE)
2888 			break;
2889 
2890 		q->llq.max_n_shift--;
2891 	} while (1);
2892 
2893 	if (!q->base) {
2894 		dev_err(smmu->dev,
2895 			"failed to allocate queue (0x%zx bytes) for %s\n",
2896 			qsz, name);
2897 		return -ENOMEM;
2898 	}
2899 
2900 	if (!WARN_ON(q->base_dma & (qsz - 1))) {
2901 		dev_info(smmu->dev, "allocated %u entries for %s\n",
2902 			 1 << q->llq.max_n_shift, name);
2903 	}
2904 
2905 	q->prod_reg	= page + prod_off;
2906 	q->cons_reg	= page + cons_off;
2907 	q->ent_dwords	= dwords;
2908 
2909 	q->q_base  = Q_BASE_RWA;
2910 	q->q_base |= q->base_dma & Q_BASE_ADDR_MASK;
2911 	q->q_base |= FIELD_PREP(Q_BASE_LOG2SIZE, q->llq.max_n_shift);
2912 
2913 	q->llq.prod = q->llq.cons = 0;
2914 	return 0;
2915 }
2916 
2917 static int arm_smmu_cmdq_init(struct arm_smmu_device *smmu)
2918 {
2919 	struct arm_smmu_cmdq *cmdq = &smmu->cmdq;
2920 	unsigned int nents = 1 << cmdq->q.llq.max_n_shift;
2921 
2922 	atomic_set(&cmdq->owner_prod, 0);
2923 	atomic_set(&cmdq->lock, 0);
2924 
2925 	cmdq->valid_map = (atomic_long_t *)devm_bitmap_zalloc(smmu->dev, nents,
2926 							      GFP_KERNEL);
2927 	if (!cmdq->valid_map)
2928 		return -ENOMEM;
2929 
2930 	return 0;
2931 }
2932 
2933 static int arm_smmu_init_queues(struct arm_smmu_device *smmu)
2934 {
2935 	int ret;
2936 
2937 	/* cmdq */
2938 	ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, smmu->base,
2939 				      ARM_SMMU_CMDQ_PROD, ARM_SMMU_CMDQ_CONS,
2940 				      CMDQ_ENT_DWORDS, "cmdq");
2941 	if (ret)
2942 		return ret;
2943 
2944 	ret = arm_smmu_cmdq_init(smmu);
2945 	if (ret)
2946 		return ret;
2947 
2948 	/* evtq */
2949 	ret = arm_smmu_init_one_queue(smmu, &smmu->evtq.q, smmu->page1,
2950 				      ARM_SMMU_EVTQ_PROD, ARM_SMMU_EVTQ_CONS,
2951 				      EVTQ_ENT_DWORDS, "evtq");
2952 	if (ret)
2953 		return ret;
2954 
2955 	if ((smmu->features & ARM_SMMU_FEAT_SVA) &&
2956 	    (smmu->features & ARM_SMMU_FEAT_STALLS)) {
2957 		smmu->evtq.iopf = iopf_queue_alloc(dev_name(smmu->dev));
2958 		if (!smmu->evtq.iopf)
2959 			return -ENOMEM;
2960 	}
2961 
2962 	/* priq */
2963 	if (!(smmu->features & ARM_SMMU_FEAT_PRI))
2964 		return 0;
2965 
2966 	return arm_smmu_init_one_queue(smmu, &smmu->priq.q, smmu->page1,
2967 				       ARM_SMMU_PRIQ_PROD, ARM_SMMU_PRIQ_CONS,
2968 				       PRIQ_ENT_DWORDS, "priq");
2969 }
2970 
2971 static int arm_smmu_init_l1_strtab(struct arm_smmu_device *smmu)
2972 {
2973 	unsigned int i;
2974 	struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2975 	void *strtab = smmu->strtab_cfg.strtab;
2976 
2977 	cfg->l1_desc = devm_kcalloc(smmu->dev, cfg->num_l1_ents,
2978 				    sizeof(*cfg->l1_desc), GFP_KERNEL);
2979 	if (!cfg->l1_desc)
2980 		return -ENOMEM;
2981 
2982 	for (i = 0; i < cfg->num_l1_ents; ++i) {
2983 		arm_smmu_write_strtab_l1_desc(strtab, &cfg->l1_desc[i]);
2984 		strtab += STRTAB_L1_DESC_DWORDS << 3;
2985 	}
2986 
2987 	return 0;
2988 }
2989 
2990 static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
2991 {
2992 	void *strtab;
2993 	u64 reg;
2994 	u32 size, l1size;
2995 	struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2996 
2997 	/* Calculate the L1 size, capped to the SIDSIZE. */
2998 	size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
2999 	size = min(size, smmu->sid_bits - STRTAB_SPLIT);
3000 	cfg->num_l1_ents = 1 << size;
3001 
3002 	size += STRTAB_SPLIT;
3003 	if (size < smmu->sid_bits)
3004 		dev_warn(smmu->dev,
3005 			 "2-level strtab only covers %u/%u bits of SID\n",
3006 			 size, smmu->sid_bits);
3007 
3008 	l1size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3);
3009 	strtab = dmam_alloc_coherent(smmu->dev, l1size, &cfg->strtab_dma,
3010 				     GFP_KERNEL);
3011 	if (!strtab) {
3012 		dev_err(smmu->dev,
3013 			"failed to allocate l1 stream table (%u bytes)\n",
3014 			l1size);
3015 		return -ENOMEM;
3016 	}
3017 	cfg->strtab = strtab;
3018 
3019 	/* Configure strtab_base_cfg for 2 levels */
3020 	reg  = FIELD_PREP(STRTAB_BASE_CFG_FMT, STRTAB_BASE_CFG_FMT_2LVL);
3021 	reg |= FIELD_PREP(STRTAB_BASE_CFG_LOG2SIZE, size);
3022 	reg |= FIELD_PREP(STRTAB_BASE_CFG_SPLIT, STRTAB_SPLIT);
3023 	cfg->strtab_base_cfg = reg;
3024 
3025 	return arm_smmu_init_l1_strtab(smmu);
3026 }
3027 
3028 static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu)
3029 {
3030 	void *strtab;
3031 	u64 reg;
3032 	u32 size;
3033 	struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
3034 
3035 	size = (1 << smmu->sid_bits) * (STRTAB_STE_DWORDS << 3);
3036 	strtab = dmam_alloc_coherent(smmu->dev, size, &cfg->strtab_dma,
3037 				     GFP_KERNEL);
3038 	if (!strtab) {
3039 		dev_err(smmu->dev,
3040 			"failed to allocate linear stream table (%u bytes)\n",
3041 			size);
3042 		return -ENOMEM;
3043 	}
3044 	cfg->strtab = strtab;
3045 	cfg->num_l1_ents = 1 << smmu->sid_bits;
3046 
3047 	/* Configure strtab_base_cfg for a linear table covering all SIDs */
3048 	reg  = FIELD_PREP(STRTAB_BASE_CFG_FMT, STRTAB_BASE_CFG_FMT_LINEAR);
3049 	reg |= FIELD_PREP(STRTAB_BASE_CFG_LOG2SIZE, smmu->sid_bits);
3050 	cfg->strtab_base_cfg = reg;
3051 
3052 	arm_smmu_init_bypass_stes(strtab, cfg->num_l1_ents, false);
3053 	return 0;
3054 }
3055 
3056 static int arm_smmu_init_strtab(struct arm_smmu_device *smmu)
3057 {
3058 	u64 reg;
3059 	int ret;
3060 
3061 	if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
3062 		ret = arm_smmu_init_strtab_2lvl(smmu);
3063 	else
3064 		ret = arm_smmu_init_strtab_linear(smmu);
3065 
3066 	if (ret)
3067 		return ret;
3068 
3069 	/* Set the strtab base address */
3070 	reg  = smmu->strtab_cfg.strtab_dma & STRTAB_BASE_ADDR_MASK;
3071 	reg |= STRTAB_BASE_RA;
3072 	smmu->strtab_cfg.strtab_base = reg;
3073 
3074 	/* Allocate the first VMID for stage-2 bypass STEs */
3075 	set_bit(0, smmu->vmid_map);
3076 	return 0;
3077 }
3078 
3079 static int arm_smmu_init_structures(struct arm_smmu_device *smmu)
3080 {
3081 	int ret;
3082 
3083 	mutex_init(&smmu->streams_mutex);
3084 	smmu->streams = RB_ROOT;
3085 
3086 	ret = arm_smmu_init_queues(smmu);
3087 	if (ret)
3088 		return ret;
3089 
3090 	return arm_smmu_init_strtab(smmu);
3091 }
3092 
3093 static int arm_smmu_write_reg_sync(struct arm_smmu_device *smmu, u32 val,
3094 				   unsigned int reg_off, unsigned int ack_off)
3095 {
3096 	u32 reg;
3097 
3098 	writel_relaxed(val, smmu->base + reg_off);
3099 	return readl_relaxed_poll_timeout(smmu->base + ack_off, reg, reg == val,
3100 					  1, ARM_SMMU_POLL_TIMEOUT_US);
3101 }
3102 
3103 /* GBPA is "special" */
3104 static int arm_smmu_update_gbpa(struct arm_smmu_device *smmu, u32 set, u32 clr)
3105 {
3106 	int ret;
3107 	u32 reg, __iomem *gbpa = smmu->base + ARM_SMMU_GBPA;
3108 
3109 	ret = readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE),
3110 					 1, ARM_SMMU_POLL_TIMEOUT_US);
3111 	if (ret)
3112 		return ret;
3113 
3114 	reg &= ~clr;
3115 	reg |= set;
3116 	writel_relaxed(reg | GBPA_UPDATE, gbpa);
3117 	ret = readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE),
3118 					 1, ARM_SMMU_POLL_TIMEOUT_US);
3119 
3120 	if (ret)
3121 		dev_err(smmu->dev, "GBPA not responding to update\n");
3122 	return ret;
3123 }
3124 
3125 static void arm_smmu_free_msis(void *data)
3126 {
3127 	struct device *dev = data;
3128 	platform_msi_domain_free_irqs(dev);
3129 }
3130 
3131 static void arm_smmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
3132 {
3133 	phys_addr_t doorbell;
3134 	struct device *dev = msi_desc_to_dev(desc);
3135 	struct arm_smmu_device *smmu = dev_get_drvdata(dev);
3136 	phys_addr_t *cfg = arm_smmu_msi_cfg[desc->msi_index];
3137 
3138 	doorbell = (((u64)msg->address_hi) << 32) | msg->address_lo;
3139 	doorbell &= MSI_CFG0_ADDR_MASK;
3140 
3141 	writeq_relaxed(doorbell, smmu->base + cfg[0]);
3142 	writel_relaxed(msg->data, smmu->base + cfg[1]);
3143 	writel_relaxed(ARM_SMMU_MEMATTR_DEVICE_nGnRE, smmu->base + cfg[2]);
3144 }
3145 
3146 static void arm_smmu_setup_msis(struct arm_smmu_device *smmu)
3147 {
3148 	int ret, nvec = ARM_SMMU_MAX_MSIS;
3149 	struct device *dev = smmu->dev;
3150 
3151 	/* Clear the MSI address regs */
3152 	writeq_relaxed(0, smmu->base + ARM_SMMU_GERROR_IRQ_CFG0);
3153 	writeq_relaxed(0, smmu->base + ARM_SMMU_EVTQ_IRQ_CFG0);
3154 
3155 	if (smmu->features & ARM_SMMU_FEAT_PRI)
3156 		writeq_relaxed(0, smmu->base + ARM_SMMU_PRIQ_IRQ_CFG0);
3157 	else
3158 		nvec--;
3159 
3160 	if (!(smmu->features & ARM_SMMU_FEAT_MSI))
3161 		return;
3162 
3163 	if (!dev->msi.domain) {
3164 		dev_info(smmu->dev, "msi_domain absent - falling back to wired irqs\n");
3165 		return;
3166 	}
3167 
3168 	/* Allocate MSIs for evtq, gerror and priq. Ignore cmdq */
3169 	ret = platform_msi_domain_alloc_irqs(dev, nvec, arm_smmu_write_msi_msg);
3170 	if (ret) {
3171 		dev_warn(dev, "failed to allocate MSIs - falling back to wired irqs\n");
3172 		return;
3173 	}
3174 
3175 	smmu->evtq.q.irq = msi_get_virq(dev, EVTQ_MSI_INDEX);
3176 	smmu->gerr_irq = msi_get_virq(dev, GERROR_MSI_INDEX);
3177 	smmu->priq.q.irq = msi_get_virq(dev, PRIQ_MSI_INDEX);
3178 
3179 	/* Add callback to free MSIs on teardown */
3180 	devm_add_action(dev, arm_smmu_free_msis, dev);
3181 }
3182 
3183 static void arm_smmu_setup_unique_irqs(struct arm_smmu_device *smmu)
3184 {
3185 	int irq, ret;
3186 
3187 	arm_smmu_setup_msis(smmu);
3188 
3189 	/* Request interrupt lines */
3190 	irq = smmu->evtq.q.irq;
3191 	if (irq) {
3192 		ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
3193 						arm_smmu_evtq_thread,
3194 						IRQF_ONESHOT,
3195 						"arm-smmu-v3-evtq", smmu);
3196 		if (ret < 0)
3197 			dev_warn(smmu->dev, "failed to enable evtq irq\n");
3198 	} else {
3199 		dev_warn(smmu->dev, "no evtq irq - events will not be reported!\n");
3200 	}
3201 
3202 	irq = smmu->gerr_irq;
3203 	if (irq) {
3204 		ret = devm_request_irq(smmu->dev, irq, arm_smmu_gerror_handler,
3205 				       0, "arm-smmu-v3-gerror", smmu);
3206 		if (ret < 0)
3207 			dev_warn(smmu->dev, "failed to enable gerror irq\n");
3208 	} else {
3209 		dev_warn(smmu->dev, "no gerr irq - errors will not be reported!\n");
3210 	}
3211 
3212 	if (smmu->features & ARM_SMMU_FEAT_PRI) {
3213 		irq = smmu->priq.q.irq;
3214 		if (irq) {
3215 			ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
3216 							arm_smmu_priq_thread,
3217 							IRQF_ONESHOT,
3218 							"arm-smmu-v3-priq",
3219 							smmu);
3220 			if (ret < 0)
3221 				dev_warn(smmu->dev,
3222 					 "failed to enable priq irq\n");
3223 		} else {
3224 			dev_warn(smmu->dev, "no priq irq - PRI will be broken\n");
3225 		}
3226 	}
3227 }
3228 
3229 static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
3230 {
3231 	int ret, irq;
3232 	u32 irqen_flags = IRQ_CTRL_EVTQ_IRQEN | IRQ_CTRL_GERROR_IRQEN;
3233 
3234 	/* Disable IRQs first */
3235 	ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_IRQ_CTRL,
3236 				      ARM_SMMU_IRQ_CTRLACK);
3237 	if (ret) {
3238 		dev_err(smmu->dev, "failed to disable irqs\n");
3239 		return ret;
3240 	}
3241 
3242 	irq = smmu->combined_irq;
3243 	if (irq) {
3244 		/*
3245 		 * Cavium ThunderX2 implementation doesn't support unique irq
3246 		 * lines. Use a single irq line for all the SMMUv3 interrupts.
3247 		 */
3248 		ret = devm_request_threaded_irq(smmu->dev, irq,
3249 					arm_smmu_combined_irq_handler,
3250 					arm_smmu_combined_irq_thread,
3251 					IRQF_ONESHOT,
3252 					"arm-smmu-v3-combined-irq", smmu);
3253 		if (ret < 0)
3254 			dev_warn(smmu->dev, "failed to enable combined irq\n");
3255 	} else
3256 		arm_smmu_setup_unique_irqs(smmu);
3257 
3258 	if (smmu->features & ARM_SMMU_FEAT_PRI)
3259 		irqen_flags |= IRQ_CTRL_PRIQ_IRQEN;
3260 
3261 	/* Enable interrupt generation on the SMMU */
3262 	ret = arm_smmu_write_reg_sync(smmu, irqen_flags,
3263 				      ARM_SMMU_IRQ_CTRL, ARM_SMMU_IRQ_CTRLACK);
3264 	if (ret)
3265 		dev_warn(smmu->dev, "failed to enable irqs\n");
3266 
3267 	return 0;
3268 }
3269 
3270 static int arm_smmu_device_disable(struct arm_smmu_device *smmu)
3271 {
3272 	int ret;
3273 
3274 	ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_CR0, ARM_SMMU_CR0ACK);
3275 	if (ret)
3276 		dev_err(smmu->dev, "failed to clear cr0\n");
3277 
3278 	return ret;
3279 }
3280 
3281 static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
3282 {
3283 	int ret;
3284 	u32 reg, enables;
3285 	struct arm_smmu_cmdq_ent cmd;
3286 
3287 	/* Clear CR0 and sync (disables SMMU and queue processing) */
3288 	reg = readl_relaxed(smmu->base + ARM_SMMU_CR0);
3289 	if (reg & CR0_SMMUEN) {
3290 		dev_warn(smmu->dev, "SMMU currently enabled! Resetting...\n");
3291 		WARN_ON(is_kdump_kernel() && !disable_bypass);
3292 		arm_smmu_update_gbpa(smmu, GBPA_ABORT, 0);
3293 	}
3294 
3295 	ret = arm_smmu_device_disable(smmu);
3296 	if (ret)
3297 		return ret;
3298 
3299 	/* CR1 (table and queue memory attributes) */
3300 	reg = FIELD_PREP(CR1_TABLE_SH, ARM_SMMU_SH_ISH) |
3301 	      FIELD_PREP(CR1_TABLE_OC, CR1_CACHE_WB) |
3302 	      FIELD_PREP(CR1_TABLE_IC, CR1_CACHE_WB) |
3303 	      FIELD_PREP(CR1_QUEUE_SH, ARM_SMMU_SH_ISH) |
3304 	      FIELD_PREP(CR1_QUEUE_OC, CR1_CACHE_WB) |
3305 	      FIELD_PREP(CR1_QUEUE_IC, CR1_CACHE_WB);
3306 	writel_relaxed(reg, smmu->base + ARM_SMMU_CR1);
3307 
3308 	/* CR2 (random crap) */
3309 	reg = CR2_PTM | CR2_RECINVSID;
3310 
3311 	if (smmu->features & ARM_SMMU_FEAT_E2H)
3312 		reg |= CR2_E2H;
3313 
3314 	writel_relaxed(reg, smmu->base + ARM_SMMU_CR2);
3315 
3316 	/* Stream table */
3317 	writeq_relaxed(smmu->strtab_cfg.strtab_base,
3318 		       smmu->base + ARM_SMMU_STRTAB_BASE);
3319 	writel_relaxed(smmu->strtab_cfg.strtab_base_cfg,
3320 		       smmu->base + ARM_SMMU_STRTAB_BASE_CFG);
3321 
3322 	/* Command queue */
3323 	writeq_relaxed(smmu->cmdq.q.q_base, smmu->base + ARM_SMMU_CMDQ_BASE);
3324 	writel_relaxed(smmu->cmdq.q.llq.prod, smmu->base + ARM_SMMU_CMDQ_PROD);
3325 	writel_relaxed(smmu->cmdq.q.llq.cons, smmu->base + ARM_SMMU_CMDQ_CONS);
3326 
3327 	enables = CR0_CMDQEN;
3328 	ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
3329 				      ARM_SMMU_CR0ACK);
3330 	if (ret) {
3331 		dev_err(smmu->dev, "failed to enable command queue\n");
3332 		return ret;
3333 	}
3334 
3335 	/* Invalidate any cached configuration */
3336 	cmd.opcode = CMDQ_OP_CFGI_ALL;
3337 	arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
3338 
3339 	/* Invalidate any stale TLB entries */
3340 	if (smmu->features & ARM_SMMU_FEAT_HYP) {
3341 		cmd.opcode = CMDQ_OP_TLBI_EL2_ALL;
3342 		arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
3343 	}
3344 
3345 	cmd.opcode = CMDQ_OP_TLBI_NSNH_ALL;
3346 	arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
3347 
3348 	/* Event queue */
3349 	writeq_relaxed(smmu->evtq.q.q_base, smmu->base + ARM_SMMU_EVTQ_BASE);
3350 	writel_relaxed(smmu->evtq.q.llq.prod, smmu->page1 + ARM_SMMU_EVTQ_PROD);
3351 	writel_relaxed(smmu->evtq.q.llq.cons, smmu->page1 + ARM_SMMU_EVTQ_CONS);
3352 
3353 	enables |= CR0_EVTQEN;
3354 	ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
3355 				      ARM_SMMU_CR0ACK);
3356 	if (ret) {
3357 		dev_err(smmu->dev, "failed to enable event queue\n");
3358 		return ret;
3359 	}
3360 
3361 	/* PRI queue */
3362 	if (smmu->features & ARM_SMMU_FEAT_PRI) {
3363 		writeq_relaxed(smmu->priq.q.q_base,
3364 			       smmu->base + ARM_SMMU_PRIQ_BASE);
3365 		writel_relaxed(smmu->priq.q.llq.prod,
3366 			       smmu->page1 + ARM_SMMU_PRIQ_PROD);
3367 		writel_relaxed(smmu->priq.q.llq.cons,
3368 			       smmu->page1 + ARM_SMMU_PRIQ_CONS);
3369 
3370 		enables |= CR0_PRIQEN;
3371 		ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
3372 					      ARM_SMMU_CR0ACK);
3373 		if (ret) {
3374 			dev_err(smmu->dev, "failed to enable PRI queue\n");
3375 			return ret;
3376 		}
3377 	}
3378 
3379 	if (smmu->features & ARM_SMMU_FEAT_ATS) {
3380 		enables |= CR0_ATSCHK;
3381 		ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
3382 					      ARM_SMMU_CR0ACK);
3383 		if (ret) {
3384 			dev_err(smmu->dev, "failed to enable ATS check\n");
3385 			return ret;
3386 		}
3387 	}
3388 
3389 	ret = arm_smmu_setup_irqs(smmu);
3390 	if (ret) {
3391 		dev_err(smmu->dev, "failed to setup irqs\n");
3392 		return ret;
3393 	}
3394 
3395 	if (is_kdump_kernel())
3396 		enables &= ~(CR0_EVTQEN | CR0_PRIQEN);
3397 
3398 	/* Enable the SMMU interface, or ensure bypass */
3399 	if (!bypass || disable_bypass) {
3400 		enables |= CR0_SMMUEN;
3401 	} else {
3402 		ret = arm_smmu_update_gbpa(smmu, 0, GBPA_ABORT);
3403 		if (ret)
3404 			return ret;
3405 	}
3406 	ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
3407 				      ARM_SMMU_CR0ACK);
3408 	if (ret) {
3409 		dev_err(smmu->dev, "failed to enable SMMU interface\n");
3410 		return ret;
3411 	}
3412 
3413 	return 0;
3414 }
3415 
3416 static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
3417 {
3418 	u32 reg;
3419 	bool coherent = smmu->features & ARM_SMMU_FEAT_COHERENCY;
3420 
3421 	/* IDR0 */
3422 	reg = readl_relaxed(smmu->base + ARM_SMMU_IDR0);
3423 
3424 	/* 2-level structures */
3425 	if (FIELD_GET(IDR0_ST_LVL, reg) == IDR0_ST_LVL_2LVL)
3426 		smmu->features |= ARM_SMMU_FEAT_2_LVL_STRTAB;
3427 
3428 	if (reg & IDR0_CD2L)
3429 		smmu->features |= ARM_SMMU_FEAT_2_LVL_CDTAB;
3430 
3431 	/*
3432 	 * Translation table endianness.
3433 	 * We currently require the same endianness as the CPU, but this
3434 	 * could be changed later by adding a new IO_PGTABLE_QUIRK.
3435 	 */
3436 	switch (FIELD_GET(IDR0_TTENDIAN, reg)) {
3437 	case IDR0_TTENDIAN_MIXED:
3438 		smmu->features |= ARM_SMMU_FEAT_TT_LE | ARM_SMMU_FEAT_TT_BE;
3439 		break;
3440 #ifdef __BIG_ENDIAN
3441 	case IDR0_TTENDIAN_BE:
3442 		smmu->features |= ARM_SMMU_FEAT_TT_BE;
3443 		break;
3444 #else
3445 	case IDR0_TTENDIAN_LE:
3446 		smmu->features |= ARM_SMMU_FEAT_TT_LE;
3447 		break;
3448 #endif
3449 	default:
3450 		dev_err(smmu->dev, "unknown/unsupported TT endianness!\n");
3451 		return -ENXIO;
3452 	}
3453 
3454 	/* Boolean feature flags */
3455 	if (IS_ENABLED(CONFIG_PCI_PRI) && reg & IDR0_PRI)
3456 		smmu->features |= ARM_SMMU_FEAT_PRI;
3457 
3458 	if (IS_ENABLED(CONFIG_PCI_ATS) && reg & IDR0_ATS)
3459 		smmu->features |= ARM_SMMU_FEAT_ATS;
3460 
3461 	if (reg & IDR0_SEV)
3462 		smmu->features |= ARM_SMMU_FEAT_SEV;
3463 
3464 	if (reg & IDR0_MSI) {
3465 		smmu->features |= ARM_SMMU_FEAT_MSI;
3466 		if (coherent && !disable_msipolling)
3467 			smmu->options |= ARM_SMMU_OPT_MSIPOLL;
3468 	}
3469 
3470 	if (reg & IDR0_HYP) {
3471 		smmu->features |= ARM_SMMU_FEAT_HYP;
3472 		if (cpus_have_cap(ARM64_HAS_VIRT_HOST_EXTN))
3473 			smmu->features |= ARM_SMMU_FEAT_E2H;
3474 	}
3475 
3476 	/*
3477 	 * The coherency feature as set by FW is used in preference to the ID
3478 	 * register, but warn on mismatch.
3479 	 */
3480 	if (!!(reg & IDR0_COHACC) != coherent)
3481 		dev_warn(smmu->dev, "IDR0.COHACC overridden by FW configuration (%s)\n",
3482 			 coherent ? "true" : "false");
3483 
3484 	switch (FIELD_GET(IDR0_STALL_MODEL, reg)) {
3485 	case IDR0_STALL_MODEL_FORCE:
3486 		smmu->features |= ARM_SMMU_FEAT_STALL_FORCE;
3487 		fallthrough;
3488 	case IDR0_STALL_MODEL_STALL:
3489 		smmu->features |= ARM_SMMU_FEAT_STALLS;
3490 	}
3491 
3492 	if (reg & IDR0_S1P)
3493 		smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
3494 
3495 	if (reg & IDR0_S2P)
3496 		smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
3497 
3498 	if (!(reg & (IDR0_S1P | IDR0_S2P))) {
3499 		dev_err(smmu->dev, "no translation support!\n");
3500 		return -ENXIO;
3501 	}
3502 
3503 	/* We only support the AArch64 table format at present */
3504 	switch (FIELD_GET(IDR0_TTF, reg)) {
3505 	case IDR0_TTF_AARCH32_64:
3506 		smmu->ias = 40;
3507 		fallthrough;
3508 	case IDR0_TTF_AARCH64:
3509 		break;
3510 	default:
3511 		dev_err(smmu->dev, "AArch64 table format not supported!\n");
3512 		return -ENXIO;
3513 	}
3514 
3515 	/* ASID/VMID sizes */
3516 	smmu->asid_bits = reg & IDR0_ASID16 ? 16 : 8;
3517 	smmu->vmid_bits = reg & IDR0_VMID16 ? 16 : 8;
3518 
3519 	/* IDR1 */
3520 	reg = readl_relaxed(smmu->base + ARM_SMMU_IDR1);
3521 	if (reg & (IDR1_TABLES_PRESET | IDR1_QUEUES_PRESET | IDR1_REL)) {
3522 		dev_err(smmu->dev, "embedded implementation not supported\n");
3523 		return -ENXIO;
3524 	}
3525 
3526 	/* Queue sizes, capped to ensure natural alignment */
3527 	smmu->cmdq.q.llq.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT,
3528 					     FIELD_GET(IDR1_CMDQS, reg));
3529 	if (smmu->cmdq.q.llq.max_n_shift <= ilog2(CMDQ_BATCH_ENTRIES)) {
3530 		/*
3531 		 * We don't support splitting up batches, so one batch of
3532 		 * commands plus an extra sync needs to fit inside the command
3533 		 * queue. There's also no way we can handle the weird alignment
3534 		 * restrictions on the base pointer for a unit-length queue.
3535 		 */
3536 		dev_err(smmu->dev, "command queue size <= %d entries not supported\n",
3537 			CMDQ_BATCH_ENTRIES);
3538 		return -ENXIO;
3539 	}
3540 
3541 	smmu->evtq.q.llq.max_n_shift = min_t(u32, EVTQ_MAX_SZ_SHIFT,
3542 					     FIELD_GET(IDR1_EVTQS, reg));
3543 	smmu->priq.q.llq.max_n_shift = min_t(u32, PRIQ_MAX_SZ_SHIFT,
3544 					     FIELD_GET(IDR1_PRIQS, reg));
3545 
3546 	/* SID/SSID sizes */
3547 	smmu->ssid_bits = FIELD_GET(IDR1_SSIDSIZE, reg);
3548 	smmu->sid_bits = FIELD_GET(IDR1_SIDSIZE, reg);
3549 	smmu->iommu.max_pasids = 1UL << smmu->ssid_bits;
3550 
3551 	/*
3552 	 * If the SMMU supports fewer bits than would fill a single L2 stream
3553 	 * table, use a linear table instead.
3554 	 */
3555 	if (smmu->sid_bits <= STRTAB_SPLIT)
3556 		smmu->features &= ~ARM_SMMU_FEAT_2_LVL_STRTAB;
3557 
3558 	/* IDR3 */
3559 	reg = readl_relaxed(smmu->base + ARM_SMMU_IDR3);
3560 	if (FIELD_GET(IDR3_RIL, reg))
3561 		smmu->features |= ARM_SMMU_FEAT_RANGE_INV;
3562 
3563 	/* IDR5 */
3564 	reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5);
3565 
3566 	/* Maximum number of outstanding stalls */
3567 	smmu->evtq.max_stalls = FIELD_GET(IDR5_STALL_MAX, reg);
3568 
3569 	/* Page sizes */
3570 	if (reg & IDR5_GRAN64K)
3571 		smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
3572 	if (reg & IDR5_GRAN16K)
3573 		smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
3574 	if (reg & IDR5_GRAN4K)
3575 		smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
3576 
3577 	/* Input address size */
3578 	if (FIELD_GET(IDR5_VAX, reg) == IDR5_VAX_52_BIT)
3579 		smmu->features |= ARM_SMMU_FEAT_VAX;
3580 
3581 	/* Output address size */
3582 	switch (FIELD_GET(IDR5_OAS, reg)) {
3583 	case IDR5_OAS_32_BIT:
3584 		smmu->oas = 32;
3585 		break;
3586 	case IDR5_OAS_36_BIT:
3587 		smmu->oas = 36;
3588 		break;
3589 	case IDR5_OAS_40_BIT:
3590 		smmu->oas = 40;
3591 		break;
3592 	case IDR5_OAS_42_BIT:
3593 		smmu->oas = 42;
3594 		break;
3595 	case IDR5_OAS_44_BIT:
3596 		smmu->oas = 44;
3597 		break;
3598 	case IDR5_OAS_52_BIT:
3599 		smmu->oas = 52;
3600 		smmu->pgsize_bitmap |= 1ULL << 42; /* 4TB */
3601 		break;
3602 	default:
3603 		dev_info(smmu->dev,
3604 			"unknown output address size. Truncating to 48-bit\n");
3605 		fallthrough;
3606 	case IDR5_OAS_48_BIT:
3607 		smmu->oas = 48;
3608 	}
3609 
3610 	if (arm_smmu_ops.pgsize_bitmap == -1UL)
3611 		arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
3612 	else
3613 		arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
3614 
3615 	/* Set the DMA mask for our table walker */
3616 	if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(smmu->oas)))
3617 		dev_warn(smmu->dev,
3618 			 "failed to set DMA mask for table walker\n");
3619 
3620 	smmu->ias = max(smmu->ias, smmu->oas);
3621 
3622 	if (arm_smmu_sva_supported(smmu))
3623 		smmu->features |= ARM_SMMU_FEAT_SVA;
3624 
3625 	dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n",
3626 		 smmu->ias, smmu->oas, smmu->features);
3627 	return 0;
3628 }
3629 
3630 #ifdef CONFIG_ACPI
3631 static void acpi_smmu_get_options(u32 model, struct arm_smmu_device *smmu)
3632 {
3633 	switch (model) {
3634 	case ACPI_IORT_SMMU_V3_CAVIUM_CN99XX:
3635 		smmu->options |= ARM_SMMU_OPT_PAGE0_REGS_ONLY;
3636 		break;
3637 	case ACPI_IORT_SMMU_V3_HISILICON_HI161X:
3638 		smmu->options |= ARM_SMMU_OPT_SKIP_PREFETCH;
3639 		break;
3640 	}
3641 
3642 	dev_notice(smmu->dev, "option mask 0x%x\n", smmu->options);
3643 }
3644 
3645 static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
3646 				      struct arm_smmu_device *smmu)
3647 {
3648 	struct acpi_iort_smmu_v3 *iort_smmu;
3649 	struct device *dev = smmu->dev;
3650 	struct acpi_iort_node *node;
3651 
3652 	node = *(struct acpi_iort_node **)dev_get_platdata(dev);
3653 
3654 	/* Retrieve SMMUv3 specific data */
3655 	iort_smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
3656 
3657 	acpi_smmu_get_options(iort_smmu->model, smmu);
3658 
3659 	if (iort_smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE)
3660 		smmu->features |= ARM_SMMU_FEAT_COHERENCY;
3661 
3662 	return 0;
3663 }
3664 #else
3665 static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
3666 					     struct arm_smmu_device *smmu)
3667 {
3668 	return -ENODEV;
3669 }
3670 #endif
3671 
3672 static int arm_smmu_device_dt_probe(struct platform_device *pdev,
3673 				    struct arm_smmu_device *smmu)
3674 {
3675 	struct device *dev = &pdev->dev;
3676 	u32 cells;
3677 	int ret = -EINVAL;
3678 
3679 	if (of_property_read_u32(dev->of_node, "#iommu-cells", &cells))
3680 		dev_err(dev, "missing #iommu-cells property\n");
3681 	else if (cells != 1)
3682 		dev_err(dev, "invalid #iommu-cells value (%d)\n", cells);
3683 	else
3684 		ret = 0;
3685 
3686 	parse_driver_options(smmu);
3687 
3688 	if (of_dma_is_coherent(dev->of_node))
3689 		smmu->features |= ARM_SMMU_FEAT_COHERENCY;
3690 
3691 	return ret;
3692 }
3693 
3694 static unsigned long arm_smmu_resource_size(struct arm_smmu_device *smmu)
3695 {
3696 	if (smmu->options & ARM_SMMU_OPT_PAGE0_REGS_ONLY)
3697 		return SZ_64K;
3698 	else
3699 		return SZ_128K;
3700 }
3701 
3702 static void __iomem *arm_smmu_ioremap(struct device *dev, resource_size_t start,
3703 				      resource_size_t size)
3704 {
3705 	struct resource res = DEFINE_RES_MEM(start, size);
3706 
3707 	return devm_ioremap_resource(dev, &res);
3708 }
3709 
3710 static void arm_smmu_rmr_install_bypass_ste(struct arm_smmu_device *smmu)
3711 {
3712 	struct list_head rmr_list;
3713 	struct iommu_resv_region *e;
3714 
3715 	INIT_LIST_HEAD(&rmr_list);
3716 	iort_get_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
3717 
3718 	list_for_each_entry(e, &rmr_list, list) {
3719 		__le64 *step;
3720 		struct iommu_iort_rmr_data *rmr;
3721 		int ret, i;
3722 
3723 		rmr = container_of(e, struct iommu_iort_rmr_data, rr);
3724 		for (i = 0; i < rmr->num_sids; i++) {
3725 			ret = arm_smmu_init_sid_strtab(smmu, rmr->sids[i]);
3726 			if (ret) {
3727 				dev_err(smmu->dev, "RMR SID(0x%x) bypass failed\n",
3728 					rmr->sids[i]);
3729 				continue;
3730 			}
3731 
3732 			step = arm_smmu_get_step_for_sid(smmu, rmr->sids[i]);
3733 			arm_smmu_init_bypass_stes(step, 1, true);
3734 		}
3735 	}
3736 
3737 	iort_put_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
3738 }
3739 
3740 static int arm_smmu_device_probe(struct platform_device *pdev)
3741 {
3742 	int irq, ret;
3743 	struct resource *res;
3744 	resource_size_t ioaddr;
3745 	struct arm_smmu_device *smmu;
3746 	struct device *dev = &pdev->dev;
3747 	bool bypass;
3748 
3749 	smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
3750 	if (!smmu)
3751 		return -ENOMEM;
3752 	smmu->dev = dev;
3753 
3754 	if (dev->of_node) {
3755 		ret = arm_smmu_device_dt_probe(pdev, smmu);
3756 	} else {
3757 		ret = arm_smmu_device_acpi_probe(pdev, smmu);
3758 		if (ret == -ENODEV)
3759 			return ret;
3760 	}
3761 
3762 	/* Set bypass mode according to firmware probing result */
3763 	bypass = !!ret;
3764 
3765 	/* Base address */
3766 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3767 	if (!res)
3768 		return -EINVAL;
3769 	if (resource_size(res) < arm_smmu_resource_size(smmu)) {
3770 		dev_err(dev, "MMIO region too small (%pr)\n", res);
3771 		return -EINVAL;
3772 	}
3773 	ioaddr = res->start;
3774 
3775 	/*
3776 	 * Don't map the IMPLEMENTATION DEFINED regions, since they may contain
3777 	 * the PMCG registers which are reserved by the PMU driver.
3778 	 */
3779 	smmu->base = arm_smmu_ioremap(dev, ioaddr, ARM_SMMU_REG_SZ);
3780 	if (IS_ERR(smmu->base))
3781 		return PTR_ERR(smmu->base);
3782 
3783 	if (arm_smmu_resource_size(smmu) > SZ_64K) {
3784 		smmu->page1 = arm_smmu_ioremap(dev, ioaddr + SZ_64K,
3785 					       ARM_SMMU_REG_SZ);
3786 		if (IS_ERR(smmu->page1))
3787 			return PTR_ERR(smmu->page1);
3788 	} else {
3789 		smmu->page1 = smmu->base;
3790 	}
3791 
3792 	/* Interrupt lines */
3793 
3794 	irq = platform_get_irq_byname_optional(pdev, "combined");
3795 	if (irq > 0)
3796 		smmu->combined_irq = irq;
3797 	else {
3798 		irq = platform_get_irq_byname_optional(pdev, "eventq");
3799 		if (irq > 0)
3800 			smmu->evtq.q.irq = irq;
3801 
3802 		irq = platform_get_irq_byname_optional(pdev, "priq");
3803 		if (irq > 0)
3804 			smmu->priq.q.irq = irq;
3805 
3806 		irq = platform_get_irq_byname_optional(pdev, "gerror");
3807 		if (irq > 0)
3808 			smmu->gerr_irq = irq;
3809 	}
3810 	/* Probe the h/w */
3811 	ret = arm_smmu_device_hw_probe(smmu);
3812 	if (ret)
3813 		return ret;
3814 
3815 	/* Initialise in-memory data structures */
3816 	ret = arm_smmu_init_structures(smmu);
3817 	if (ret)
3818 		return ret;
3819 
3820 	/* Record our private device structure */
3821 	platform_set_drvdata(pdev, smmu);
3822 
3823 	/* Check for RMRs and install bypass STEs if any */
3824 	arm_smmu_rmr_install_bypass_ste(smmu);
3825 
3826 	/* Reset the device */
3827 	ret = arm_smmu_device_reset(smmu, bypass);
3828 	if (ret)
3829 		return ret;
3830 
3831 	/* And we're up. Go go go! */
3832 	ret = iommu_device_sysfs_add(&smmu->iommu, dev, NULL,
3833 				     "smmu3.%pa", &ioaddr);
3834 	if (ret)
3835 		return ret;
3836 
3837 	ret = iommu_device_register(&smmu->iommu, &arm_smmu_ops, dev);
3838 	if (ret) {
3839 		dev_err(dev, "Failed to register iommu\n");
3840 		iommu_device_sysfs_remove(&smmu->iommu);
3841 		return ret;
3842 	}
3843 
3844 	return 0;
3845 }
3846 
3847 static int arm_smmu_device_remove(struct platform_device *pdev)
3848 {
3849 	struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
3850 
3851 	iommu_device_unregister(&smmu->iommu);
3852 	iommu_device_sysfs_remove(&smmu->iommu);
3853 	arm_smmu_device_disable(smmu);
3854 	iopf_queue_free(smmu->evtq.iopf);
3855 
3856 	return 0;
3857 }
3858 
3859 static void arm_smmu_device_shutdown(struct platform_device *pdev)
3860 {
3861 	struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
3862 
3863 	arm_smmu_device_disable(smmu);
3864 }
3865 
3866 static const struct of_device_id arm_smmu_of_match[] = {
3867 	{ .compatible = "arm,smmu-v3", },
3868 	{ },
3869 };
3870 MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
3871 
3872 static void arm_smmu_driver_unregister(struct platform_driver *drv)
3873 {
3874 	arm_smmu_sva_notifier_synchronize();
3875 	platform_driver_unregister(drv);
3876 }
3877 
3878 static struct platform_driver arm_smmu_driver = {
3879 	.driver	= {
3880 		.name			= "arm-smmu-v3",
3881 		.of_match_table		= arm_smmu_of_match,
3882 		.suppress_bind_attrs	= true,
3883 	},
3884 	.probe	= arm_smmu_device_probe,
3885 	.remove	= arm_smmu_device_remove,
3886 	.shutdown = arm_smmu_device_shutdown,
3887 };
3888 module_driver(arm_smmu_driver, platform_driver_register,
3889 	      arm_smmu_driver_unregister);
3890 
3891 MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations");
3892 MODULE_AUTHOR("Will Deacon <will@kernel.org>");
3893 MODULE_ALIAS("platform:arm-smmu-v3");
3894 MODULE_LICENSE("GPL v2");
3895