xref: /openbmc/linux/drivers/acpi/arm64/iort.c (revision ce746d43)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2016, Semihalf
4  *	Author: Tomasz Nowicki <tn@semihalf.com>
5  *
6  * This file implements early detection/parsing of I/O mapping
7  * reported to OS through firmware via I/O Remapping Table (IORT)
8  * IORT document number: ARM DEN 0049A
9  */
10 
11 #define pr_fmt(fmt)	"ACPI: IORT: " fmt
12 
13 #include <linux/acpi_iort.h>
14 #include <linux/bitfield.h>
15 #include <linux/iommu.h>
16 #include <linux/kernel.h>
17 #include <linux/list.h>
18 #include <linux/pci.h>
19 #include <linux/platform_device.h>
20 #include <linux/slab.h>
21 
22 #define IORT_TYPE_MASK(type)	(1 << (type))
23 #define IORT_MSI_TYPE		(1 << ACPI_IORT_NODE_ITS_GROUP)
24 #define IORT_IOMMU_TYPE		((1 << ACPI_IORT_NODE_SMMU) |	\
25 				(1 << ACPI_IORT_NODE_SMMU_V3))
26 
27 struct iort_its_msi_chip {
28 	struct list_head	list;
29 	struct fwnode_handle	*fw_node;
30 	phys_addr_t		base_addr;
31 	u32			translation_id;
32 };
33 
34 struct iort_fwnode {
35 	struct list_head list;
36 	struct acpi_iort_node *iort_node;
37 	struct fwnode_handle *fwnode;
38 };
39 static LIST_HEAD(iort_fwnode_list);
40 static DEFINE_SPINLOCK(iort_fwnode_lock);
41 
42 /**
43  * iort_set_fwnode() - Create iort_fwnode and use it to register
44  *		       iommu data in the iort_fwnode_list
45  *
46  * @node: IORT table node associated with the IOMMU
47  * @fwnode: fwnode associated with the IORT node
48  *
49  * Returns: 0 on success
50  *          <0 on failure
51  */
52 static inline int iort_set_fwnode(struct acpi_iort_node *iort_node,
53 				  struct fwnode_handle *fwnode)
54 {
55 	struct iort_fwnode *np;
56 
57 	np = kzalloc(sizeof(struct iort_fwnode), GFP_ATOMIC);
58 
59 	if (WARN_ON(!np))
60 		return -ENOMEM;
61 
62 	INIT_LIST_HEAD(&np->list);
63 	np->iort_node = iort_node;
64 	np->fwnode = fwnode;
65 
66 	spin_lock(&iort_fwnode_lock);
67 	list_add_tail(&np->list, &iort_fwnode_list);
68 	spin_unlock(&iort_fwnode_lock);
69 
70 	return 0;
71 }
72 
73 /**
74  * iort_get_fwnode() - Retrieve fwnode associated with an IORT node
75  *
76  * @node: IORT table node to be looked-up
77  *
78  * Returns: fwnode_handle pointer on success, NULL on failure
79  */
80 static inline struct fwnode_handle *iort_get_fwnode(
81 			struct acpi_iort_node *node)
82 {
83 	struct iort_fwnode *curr;
84 	struct fwnode_handle *fwnode = NULL;
85 
86 	spin_lock(&iort_fwnode_lock);
87 	list_for_each_entry(curr, &iort_fwnode_list, list) {
88 		if (curr->iort_node == node) {
89 			fwnode = curr->fwnode;
90 			break;
91 		}
92 	}
93 	spin_unlock(&iort_fwnode_lock);
94 
95 	return fwnode;
96 }
97 
98 /**
99  * iort_delete_fwnode() - Delete fwnode associated with an IORT node
100  *
101  * @node: IORT table node associated with fwnode to delete
102  */
103 static inline void iort_delete_fwnode(struct acpi_iort_node *node)
104 {
105 	struct iort_fwnode *curr, *tmp;
106 
107 	spin_lock(&iort_fwnode_lock);
108 	list_for_each_entry_safe(curr, tmp, &iort_fwnode_list, list) {
109 		if (curr->iort_node == node) {
110 			list_del(&curr->list);
111 			kfree(curr);
112 			break;
113 		}
114 	}
115 	spin_unlock(&iort_fwnode_lock);
116 }
117 
118 /**
119  * iort_get_iort_node() - Retrieve iort_node associated with an fwnode
120  *
121  * @fwnode: fwnode associated with device to be looked-up
122  *
123  * Returns: iort_node pointer on success, NULL on failure
124  */
125 static inline struct acpi_iort_node *iort_get_iort_node(
126 			struct fwnode_handle *fwnode)
127 {
128 	struct iort_fwnode *curr;
129 	struct acpi_iort_node *iort_node = NULL;
130 
131 	spin_lock(&iort_fwnode_lock);
132 	list_for_each_entry(curr, &iort_fwnode_list, list) {
133 		if (curr->fwnode == fwnode) {
134 			iort_node = curr->iort_node;
135 			break;
136 		}
137 	}
138 	spin_unlock(&iort_fwnode_lock);
139 
140 	return iort_node;
141 }
142 
143 typedef acpi_status (*iort_find_node_callback)
144 	(struct acpi_iort_node *node, void *context);
145 
146 /* Root pointer to the mapped IORT table */
147 static struct acpi_table_header *iort_table;
148 
149 static LIST_HEAD(iort_msi_chip_list);
150 static DEFINE_SPINLOCK(iort_msi_chip_lock);
151 
152 /**
153  * iort_register_domain_token() - register domain token along with related
154  * ITS ID and base address to the list from where we can get it back later on.
155  * @trans_id: ITS ID.
156  * @base: ITS base address.
157  * @fw_node: Domain token.
158  *
159  * Returns: 0 on success, -ENOMEM if no memory when allocating list element
160  */
161 int iort_register_domain_token(int trans_id, phys_addr_t base,
162 			       struct fwnode_handle *fw_node)
163 {
164 	struct iort_its_msi_chip *its_msi_chip;
165 
166 	its_msi_chip = kzalloc(sizeof(*its_msi_chip), GFP_KERNEL);
167 	if (!its_msi_chip)
168 		return -ENOMEM;
169 
170 	its_msi_chip->fw_node = fw_node;
171 	its_msi_chip->translation_id = trans_id;
172 	its_msi_chip->base_addr = base;
173 
174 	spin_lock(&iort_msi_chip_lock);
175 	list_add(&its_msi_chip->list, &iort_msi_chip_list);
176 	spin_unlock(&iort_msi_chip_lock);
177 
178 	return 0;
179 }
180 
181 /**
182  * iort_deregister_domain_token() - Deregister domain token based on ITS ID
183  * @trans_id: ITS ID.
184  *
185  * Returns: none.
186  */
187 void iort_deregister_domain_token(int trans_id)
188 {
189 	struct iort_its_msi_chip *its_msi_chip, *t;
190 
191 	spin_lock(&iort_msi_chip_lock);
192 	list_for_each_entry_safe(its_msi_chip, t, &iort_msi_chip_list, list) {
193 		if (its_msi_chip->translation_id == trans_id) {
194 			list_del(&its_msi_chip->list);
195 			kfree(its_msi_chip);
196 			break;
197 		}
198 	}
199 	spin_unlock(&iort_msi_chip_lock);
200 }
201 
202 /**
203  * iort_find_domain_token() - Find domain token based on given ITS ID
204  * @trans_id: ITS ID.
205  *
206  * Returns: domain token when find on the list, NULL otherwise
207  */
208 struct fwnode_handle *iort_find_domain_token(int trans_id)
209 {
210 	struct fwnode_handle *fw_node = NULL;
211 	struct iort_its_msi_chip *its_msi_chip;
212 
213 	spin_lock(&iort_msi_chip_lock);
214 	list_for_each_entry(its_msi_chip, &iort_msi_chip_list, list) {
215 		if (its_msi_chip->translation_id == trans_id) {
216 			fw_node = its_msi_chip->fw_node;
217 			break;
218 		}
219 	}
220 	spin_unlock(&iort_msi_chip_lock);
221 
222 	return fw_node;
223 }
224 
225 static struct acpi_iort_node *iort_scan_node(enum acpi_iort_node_type type,
226 					     iort_find_node_callback callback,
227 					     void *context)
228 {
229 	struct acpi_iort_node *iort_node, *iort_end;
230 	struct acpi_table_iort *iort;
231 	int i;
232 
233 	if (!iort_table)
234 		return NULL;
235 
236 	/* Get the first IORT node */
237 	iort = (struct acpi_table_iort *)iort_table;
238 	iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
239 				 iort->node_offset);
240 	iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
241 				iort_table->length);
242 
243 	for (i = 0; i < iort->node_count; i++) {
244 		if (WARN_TAINT(iort_node >= iort_end, TAINT_FIRMWARE_WORKAROUND,
245 			       "IORT node pointer overflows, bad table!\n"))
246 			return NULL;
247 
248 		if (iort_node->type == type &&
249 		    ACPI_SUCCESS(callback(iort_node, context)))
250 			return iort_node;
251 
252 		iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
253 					 iort_node->length);
254 	}
255 
256 	return NULL;
257 }
258 
259 static acpi_status iort_match_node_callback(struct acpi_iort_node *node,
260 					    void *context)
261 {
262 	struct device *dev = context;
263 	acpi_status status = AE_NOT_FOUND;
264 
265 	if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT) {
266 		struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
267 		struct acpi_device *adev;
268 		struct acpi_iort_named_component *ncomp;
269 		struct device *nc_dev = dev;
270 
271 		/*
272 		 * Walk the device tree to find a device with an
273 		 * ACPI companion; there is no point in scanning
274 		 * IORT for a device matching a named component if
275 		 * the device does not have an ACPI companion to
276 		 * start with.
277 		 */
278 		do {
279 			adev = ACPI_COMPANION(nc_dev);
280 			if (adev)
281 				break;
282 
283 			nc_dev = nc_dev->parent;
284 		} while (nc_dev);
285 
286 		if (!adev)
287 			goto out;
288 
289 		status = acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &buf);
290 		if (ACPI_FAILURE(status)) {
291 			dev_warn(nc_dev, "Can't get device full path name\n");
292 			goto out;
293 		}
294 
295 		ncomp = (struct acpi_iort_named_component *)node->node_data;
296 		status = !strcmp(ncomp->device_name, buf.pointer) ?
297 							AE_OK : AE_NOT_FOUND;
298 		acpi_os_free(buf.pointer);
299 	} else if (node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
300 		struct acpi_iort_root_complex *pci_rc;
301 		struct pci_bus *bus;
302 
303 		bus = to_pci_bus(dev);
304 		pci_rc = (struct acpi_iort_root_complex *)node->node_data;
305 
306 		/*
307 		 * It is assumed that PCI segment numbers maps one-to-one
308 		 * with root complexes. Each segment number can represent only
309 		 * one root complex.
310 		 */
311 		status = pci_rc->pci_segment_number == pci_domain_nr(bus) ?
312 							AE_OK : AE_NOT_FOUND;
313 	}
314 out:
315 	return status;
316 }
317 
318 static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in,
319 		       u32 *rid_out, bool check_overlap)
320 {
321 	/* Single mapping does not care for input id */
322 	if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) {
323 		if (type == ACPI_IORT_NODE_NAMED_COMPONENT ||
324 		    type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
325 			*rid_out = map->output_base;
326 			return 0;
327 		}
328 
329 		pr_warn(FW_BUG "[map %p] SINGLE MAPPING flag not allowed for node type %d, skipping ID map\n",
330 			map, type);
331 		return -ENXIO;
332 	}
333 
334 	if (rid_in < map->input_base ||
335 	    (rid_in > map->input_base + map->id_count))
336 		return -ENXIO;
337 
338 	if (check_overlap) {
339 		/*
340 		 * We already found a mapping for this input ID at the end of
341 		 * another region. If it coincides with the start of this
342 		 * region, we assume the prior match was due to the off-by-1
343 		 * issue mentioned below, and allow it to be superseded.
344 		 * Otherwise, things are *really* broken, and we just disregard
345 		 * duplicate matches entirely to retain compatibility.
346 		 */
347 		pr_err(FW_BUG "[map %p] conflicting mapping for input ID 0x%x\n",
348 		       map, rid_in);
349 		if (rid_in != map->input_base)
350 			return -ENXIO;
351 
352 		pr_err(FW_BUG "applying workaround.\n");
353 	}
354 
355 	*rid_out = map->output_base + (rid_in - map->input_base);
356 
357 	/*
358 	 * Due to confusion regarding the meaning of the id_count field (which
359 	 * carries the number of IDs *minus 1*), we may have to disregard this
360 	 * match if it is at the end of the range, and overlaps with the start
361 	 * of another one.
362 	 */
363 	if (map->id_count > 0 && rid_in == map->input_base + map->id_count)
364 		return -EAGAIN;
365 	return 0;
366 }
367 
368 static struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node,
369 					       u32 *id_out, int index)
370 {
371 	struct acpi_iort_node *parent;
372 	struct acpi_iort_id_mapping *map;
373 
374 	if (!node->mapping_offset || !node->mapping_count ||
375 				     index >= node->mapping_count)
376 		return NULL;
377 
378 	map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
379 			   node->mapping_offset + index * sizeof(*map));
380 
381 	/* Firmware bug! */
382 	if (!map->output_reference) {
383 		pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
384 		       node, node->type);
385 		return NULL;
386 	}
387 
388 	parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
389 			       map->output_reference);
390 
391 	if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) {
392 		if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT ||
393 		    node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX ||
394 		    node->type == ACPI_IORT_NODE_SMMU_V3 ||
395 		    node->type == ACPI_IORT_NODE_PMCG) {
396 			*id_out = map->output_base;
397 			return parent;
398 		}
399 	}
400 
401 	return NULL;
402 }
403 
404 static int iort_get_id_mapping_index(struct acpi_iort_node *node)
405 {
406 	struct acpi_iort_smmu_v3 *smmu;
407 	struct acpi_iort_pmcg *pmcg;
408 
409 	switch (node->type) {
410 	case ACPI_IORT_NODE_SMMU_V3:
411 		/*
412 		 * SMMUv3 dev ID mapping index was introduced in revision 1
413 		 * table, not available in revision 0
414 		 */
415 		if (node->revision < 1)
416 			return -EINVAL;
417 
418 		smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
419 		/*
420 		 * ID mapping index is only ignored if all interrupts are
421 		 * GSIV based
422 		 */
423 		if (smmu->event_gsiv && smmu->pri_gsiv && smmu->gerr_gsiv
424 		    && smmu->sync_gsiv)
425 			return -EINVAL;
426 
427 		if (smmu->id_mapping_index >= node->mapping_count) {
428 			pr_err(FW_BUG "[node %p type %d] ID mapping index overflows valid mappings\n",
429 			       node, node->type);
430 			return -EINVAL;
431 		}
432 
433 		return smmu->id_mapping_index;
434 	case ACPI_IORT_NODE_PMCG:
435 		pmcg = (struct acpi_iort_pmcg *)node->node_data;
436 		if (pmcg->overflow_gsiv || node->mapping_count == 0)
437 			return -EINVAL;
438 
439 		return 0;
440 	default:
441 		return -EINVAL;
442 	}
443 }
444 
445 static struct acpi_iort_node *iort_node_map_id(struct acpi_iort_node *node,
446 					       u32 id_in, u32 *id_out,
447 					       u8 type_mask)
448 {
449 	u32 id = id_in;
450 
451 	/* Parse the ID mapping tree to find specified node type */
452 	while (node) {
453 		struct acpi_iort_id_mapping *map;
454 		int i, index, rc = 0;
455 		u32 out_ref = 0, map_id = id;
456 
457 		if (IORT_TYPE_MASK(node->type) & type_mask) {
458 			if (id_out)
459 				*id_out = id;
460 			return node;
461 		}
462 
463 		if (!node->mapping_offset || !node->mapping_count)
464 			goto fail_map;
465 
466 		map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
467 				   node->mapping_offset);
468 
469 		/* Firmware bug! */
470 		if (!map->output_reference) {
471 			pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
472 			       node, node->type);
473 			goto fail_map;
474 		}
475 
476 		/*
477 		 * Get the special ID mapping index (if any) and skip its
478 		 * associated ID map to prevent erroneous multi-stage
479 		 * IORT ID translations.
480 		 */
481 		index = iort_get_id_mapping_index(node);
482 
483 		/* Do the ID translation */
484 		for (i = 0; i < node->mapping_count; i++, map++) {
485 			/* if it is special mapping index, skip it */
486 			if (i == index)
487 				continue;
488 
489 			rc = iort_id_map(map, node->type, map_id, &id, out_ref);
490 			if (!rc)
491 				break;
492 			if (rc == -EAGAIN)
493 				out_ref = map->output_reference;
494 		}
495 
496 		if (i == node->mapping_count && !out_ref)
497 			goto fail_map;
498 
499 		node = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
500 				    rc ? out_ref : map->output_reference);
501 	}
502 
503 fail_map:
504 	/* Map input ID to output ID unchanged on mapping failure */
505 	if (id_out)
506 		*id_out = id_in;
507 
508 	return NULL;
509 }
510 
511 static struct acpi_iort_node *iort_node_map_platform_id(
512 		struct acpi_iort_node *node, u32 *id_out, u8 type_mask,
513 		int index)
514 {
515 	struct acpi_iort_node *parent;
516 	u32 id;
517 
518 	/* step 1: retrieve the initial dev id */
519 	parent = iort_node_get_id(node, &id, index);
520 	if (!parent)
521 		return NULL;
522 
523 	/*
524 	 * optional step 2: map the initial dev id if its parent is not
525 	 * the target type we want, map it again for the use cases such
526 	 * as NC (named component) -> SMMU -> ITS. If the type is matched,
527 	 * return the initial dev id and its parent pointer directly.
528 	 */
529 	if (!(IORT_TYPE_MASK(parent->type) & type_mask))
530 		parent = iort_node_map_id(parent, id, id_out, type_mask);
531 	else
532 		if (id_out)
533 			*id_out = id;
534 
535 	return parent;
536 }
537 
538 static struct acpi_iort_node *iort_find_dev_node(struct device *dev)
539 {
540 	struct pci_bus *pbus;
541 
542 	if (!dev_is_pci(dev)) {
543 		struct acpi_iort_node *node;
544 		/*
545 		 * scan iort_fwnode_list to see if it's an iort platform
546 		 * device (such as SMMU, PMCG),its iort node already cached
547 		 * and associated with fwnode when iort platform devices
548 		 * were initialized.
549 		 */
550 		node = iort_get_iort_node(dev->fwnode);
551 		if (node)
552 			return node;
553 		/*
554 		 * if not, then it should be a platform device defined in
555 		 * DSDT/SSDT (with Named Component node in IORT)
556 		 */
557 		return iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
558 				      iort_match_node_callback, dev);
559 	}
560 
561 	pbus = to_pci_dev(dev)->bus;
562 
563 	return iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
564 			      iort_match_node_callback, &pbus->dev);
565 }
566 
567 /**
568  * iort_msi_map_id() - Map a MSI input ID for a device
569  * @dev: The device for which the mapping is to be done.
570  * @input_id: The device input ID.
571  *
572  * Returns: mapped MSI ID on success, input ID otherwise
573  */
574 u32 iort_msi_map_id(struct device *dev, u32 input_id)
575 {
576 	struct acpi_iort_node *node;
577 	u32 dev_id;
578 
579 	node = iort_find_dev_node(dev);
580 	if (!node)
581 		return input_id;
582 
583 	iort_node_map_id(node, input_id, &dev_id, IORT_MSI_TYPE);
584 	return dev_id;
585 }
586 
587 /**
588  * iort_pmsi_get_dev_id() - Get the device id for a device
589  * @dev: The device for which the mapping is to be done.
590  * @dev_id: The device ID found.
591  *
592  * Returns: 0 for successful find a dev id, -ENODEV on error
593  */
594 int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id)
595 {
596 	int i, index;
597 	struct acpi_iort_node *node;
598 
599 	node = iort_find_dev_node(dev);
600 	if (!node)
601 		return -ENODEV;
602 
603 	index = iort_get_id_mapping_index(node);
604 	/* if there is a valid index, go get the dev_id directly */
605 	if (index >= 0) {
606 		if (iort_node_get_id(node, dev_id, index))
607 			return 0;
608 	} else {
609 		for (i = 0; i < node->mapping_count; i++) {
610 			if (iort_node_map_platform_id(node, dev_id,
611 						      IORT_MSI_TYPE, i))
612 				return 0;
613 		}
614 	}
615 
616 	return -ENODEV;
617 }
618 
619 static int __maybe_unused iort_find_its_base(u32 its_id, phys_addr_t *base)
620 {
621 	struct iort_its_msi_chip *its_msi_chip;
622 	int ret = -ENODEV;
623 
624 	spin_lock(&iort_msi_chip_lock);
625 	list_for_each_entry(its_msi_chip, &iort_msi_chip_list, list) {
626 		if (its_msi_chip->translation_id == its_id) {
627 			*base = its_msi_chip->base_addr;
628 			ret = 0;
629 			break;
630 		}
631 	}
632 	spin_unlock(&iort_msi_chip_lock);
633 
634 	return ret;
635 }
636 
637 /**
638  * iort_dev_find_its_id() - Find the ITS identifier for a device
639  * @dev: The device.
640  * @id: Device's ID
641  * @idx: Index of the ITS identifier list.
642  * @its_id: ITS identifier.
643  *
644  * Returns: 0 on success, appropriate error value otherwise
645  */
646 static int iort_dev_find_its_id(struct device *dev, u32 id,
647 				unsigned int idx, int *its_id)
648 {
649 	struct acpi_iort_its_group *its;
650 	struct acpi_iort_node *node;
651 
652 	node = iort_find_dev_node(dev);
653 	if (!node)
654 		return -ENXIO;
655 
656 	node = iort_node_map_id(node, id, NULL, IORT_MSI_TYPE);
657 	if (!node)
658 		return -ENXIO;
659 
660 	/* Move to ITS specific data */
661 	its = (struct acpi_iort_its_group *)node->node_data;
662 	if (idx >= its->its_count) {
663 		dev_err(dev, "requested ITS ID index [%d] overruns ITS entries [%d]\n",
664 			idx, its->its_count);
665 		return -ENXIO;
666 	}
667 
668 	*its_id = its->identifiers[idx];
669 	return 0;
670 }
671 
672 /**
673  * iort_get_device_domain() - Find MSI domain related to a device
674  * @dev: The device.
675  * @req_id: Requester ID for the device.
676  *
677  * Returns: the MSI domain for this device, NULL otherwise
678  */
679 struct irq_domain *iort_get_device_domain(struct device *dev, u32 id,
680 					  enum irq_domain_bus_token bus_token)
681 {
682 	struct fwnode_handle *handle;
683 	int its_id;
684 
685 	if (iort_dev_find_its_id(dev, id, 0, &its_id))
686 		return NULL;
687 
688 	handle = iort_find_domain_token(its_id);
689 	if (!handle)
690 		return NULL;
691 
692 	return irq_find_matching_fwnode(handle, bus_token);
693 }
694 
695 static void iort_set_device_domain(struct device *dev,
696 				   struct acpi_iort_node *node)
697 {
698 	struct acpi_iort_its_group *its;
699 	struct acpi_iort_node *msi_parent;
700 	struct acpi_iort_id_mapping *map;
701 	struct fwnode_handle *iort_fwnode;
702 	struct irq_domain *domain;
703 	int index;
704 
705 	index = iort_get_id_mapping_index(node);
706 	if (index < 0)
707 		return;
708 
709 	map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
710 			   node->mapping_offset + index * sizeof(*map));
711 
712 	/* Firmware bug! */
713 	if (!map->output_reference ||
714 	    !(map->flags & ACPI_IORT_ID_SINGLE_MAPPING)) {
715 		pr_err(FW_BUG "[node %p type %d] Invalid MSI mapping\n",
716 		       node, node->type);
717 		return;
718 	}
719 
720 	msi_parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
721 				  map->output_reference);
722 
723 	if (!msi_parent || msi_parent->type != ACPI_IORT_NODE_ITS_GROUP)
724 		return;
725 
726 	/* Move to ITS specific data */
727 	its = (struct acpi_iort_its_group *)msi_parent->node_data;
728 
729 	iort_fwnode = iort_find_domain_token(its->identifiers[0]);
730 	if (!iort_fwnode)
731 		return;
732 
733 	domain = irq_find_matching_fwnode(iort_fwnode, DOMAIN_BUS_PLATFORM_MSI);
734 	if (domain)
735 		dev_set_msi_domain(dev, domain);
736 }
737 
738 /**
739  * iort_get_platform_device_domain() - Find MSI domain related to a
740  * platform device
741  * @dev: the dev pointer associated with the platform device
742  *
743  * Returns: the MSI domain for this device, NULL otherwise
744  */
745 static struct irq_domain *iort_get_platform_device_domain(struct device *dev)
746 {
747 	struct acpi_iort_node *node, *msi_parent = NULL;
748 	struct fwnode_handle *iort_fwnode;
749 	struct acpi_iort_its_group *its;
750 	int i;
751 
752 	/* find its associated iort node */
753 	node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
754 			      iort_match_node_callback, dev);
755 	if (!node)
756 		return NULL;
757 
758 	/* then find its msi parent node */
759 	for (i = 0; i < node->mapping_count; i++) {
760 		msi_parent = iort_node_map_platform_id(node, NULL,
761 						       IORT_MSI_TYPE, i);
762 		if (msi_parent)
763 			break;
764 	}
765 
766 	if (!msi_parent)
767 		return NULL;
768 
769 	/* Move to ITS specific data */
770 	its = (struct acpi_iort_its_group *)msi_parent->node_data;
771 
772 	iort_fwnode = iort_find_domain_token(its->identifiers[0]);
773 	if (!iort_fwnode)
774 		return NULL;
775 
776 	return irq_find_matching_fwnode(iort_fwnode, DOMAIN_BUS_PLATFORM_MSI);
777 }
778 
779 void acpi_configure_pmsi_domain(struct device *dev)
780 {
781 	struct irq_domain *msi_domain;
782 
783 	msi_domain = iort_get_platform_device_domain(dev);
784 	if (msi_domain)
785 		dev_set_msi_domain(dev, msi_domain);
786 }
787 
788 #ifdef CONFIG_IOMMU_API
789 static struct acpi_iort_node *iort_get_msi_resv_iommu(struct device *dev)
790 {
791 	struct acpi_iort_node *iommu;
792 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
793 
794 	iommu = iort_get_iort_node(fwspec->iommu_fwnode);
795 
796 	if (iommu && (iommu->type == ACPI_IORT_NODE_SMMU_V3)) {
797 		struct acpi_iort_smmu_v3 *smmu;
798 
799 		smmu = (struct acpi_iort_smmu_v3 *)iommu->node_data;
800 		if (smmu->model == ACPI_IORT_SMMU_V3_HISILICON_HI161X)
801 			return iommu;
802 	}
803 
804 	return NULL;
805 }
806 
807 static inline const struct iommu_ops *iort_fwspec_iommu_ops(struct device *dev)
808 {
809 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
810 
811 	return (fwspec && fwspec->ops) ? fwspec->ops : NULL;
812 }
813 
814 static inline int iort_add_device_replay(const struct iommu_ops *ops,
815 					 struct device *dev)
816 {
817 	int err = 0;
818 
819 	if (dev->bus && !device_iommu_mapped(dev))
820 		err = iommu_probe_device(dev);
821 
822 	return err;
823 }
824 
825 /**
826  * iort_iommu_msi_get_resv_regions - Reserved region driver helper
827  * @dev: Device from iommu_get_resv_regions()
828  * @head: Reserved region list from iommu_get_resv_regions()
829  *
830  * Returns: Number of msi reserved regions on success (0 if platform
831  *          doesn't require the reservation or no associated msi regions),
832  *          appropriate error value otherwise. The ITS interrupt translation
833  *          spaces (ITS_base + SZ_64K, SZ_64K) associated with the device
834  *          are the msi reserved regions.
835  */
836 int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
837 {
838 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
839 	struct acpi_iort_its_group *its;
840 	struct acpi_iort_node *iommu_node, *its_node = NULL;
841 	int i, resv = 0;
842 
843 	iommu_node = iort_get_msi_resv_iommu(dev);
844 	if (!iommu_node)
845 		return 0;
846 
847 	/*
848 	 * Current logic to reserve ITS regions relies on HW topologies
849 	 * where a given PCI or named component maps its IDs to only one
850 	 * ITS group; if a PCI or named component can map its IDs to
851 	 * different ITS groups through IORT mappings this function has
852 	 * to be reworked to ensure we reserve regions for all ITS groups
853 	 * a given PCI or named component may map IDs to.
854 	 */
855 
856 	for (i = 0; i < fwspec->num_ids; i++) {
857 		its_node = iort_node_map_id(iommu_node,
858 					fwspec->ids[i],
859 					NULL, IORT_MSI_TYPE);
860 		if (its_node)
861 			break;
862 	}
863 
864 	if (!its_node)
865 		return 0;
866 
867 	/* Move to ITS specific data */
868 	its = (struct acpi_iort_its_group *)its_node->node_data;
869 
870 	for (i = 0; i < its->its_count; i++) {
871 		phys_addr_t base;
872 
873 		if (!iort_find_its_base(its->identifiers[i], &base)) {
874 			int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
875 			struct iommu_resv_region *region;
876 
877 			region = iommu_alloc_resv_region(base + SZ_64K, SZ_64K,
878 							 prot, IOMMU_RESV_MSI);
879 			if (region) {
880 				list_add_tail(&region->list, head);
881 				resv++;
882 			}
883 		}
884 	}
885 
886 	return (resv == its->its_count) ? resv : -ENODEV;
887 }
888 
889 static inline bool iort_iommu_driver_enabled(u8 type)
890 {
891 	switch (type) {
892 	case ACPI_IORT_NODE_SMMU_V3:
893 		return IS_ENABLED(CONFIG_ARM_SMMU_V3);
894 	case ACPI_IORT_NODE_SMMU:
895 		return IS_ENABLED(CONFIG_ARM_SMMU);
896 	default:
897 		pr_warn("IORT node type %u does not describe an SMMU\n", type);
898 		return false;
899 	}
900 }
901 
902 static int arm_smmu_iort_xlate(struct device *dev, u32 streamid,
903 			       struct fwnode_handle *fwnode,
904 			       const struct iommu_ops *ops)
905 {
906 	int ret = iommu_fwspec_init(dev, fwnode, ops);
907 
908 	if (!ret)
909 		ret = iommu_fwspec_add_ids(dev, &streamid, 1);
910 
911 	return ret;
912 }
913 
914 static bool iort_pci_rc_supports_ats(struct acpi_iort_node *node)
915 {
916 	struct acpi_iort_root_complex *pci_rc;
917 
918 	pci_rc = (struct acpi_iort_root_complex *)node->node_data;
919 	return pci_rc->ats_attribute & ACPI_IORT_ATS_SUPPORTED;
920 }
921 
922 static int iort_iommu_xlate(struct device *dev, struct acpi_iort_node *node,
923 			    u32 streamid)
924 {
925 	const struct iommu_ops *ops;
926 	struct fwnode_handle *iort_fwnode;
927 
928 	if (!node)
929 		return -ENODEV;
930 
931 	iort_fwnode = iort_get_fwnode(node);
932 	if (!iort_fwnode)
933 		return -ENODEV;
934 
935 	/*
936 	 * If the ops look-up fails, this means that either
937 	 * the SMMU drivers have not been probed yet or that
938 	 * the SMMU drivers are not built in the kernel;
939 	 * Depending on whether the SMMU drivers are built-in
940 	 * in the kernel or not, defer the IOMMU configuration
941 	 * or just abort it.
942 	 */
943 	ops = iommu_ops_from_fwnode(iort_fwnode);
944 	if (!ops)
945 		return iort_iommu_driver_enabled(node->type) ?
946 		       -EPROBE_DEFER : -ENODEV;
947 
948 	return arm_smmu_iort_xlate(dev, streamid, iort_fwnode, ops);
949 }
950 
951 struct iort_pci_alias_info {
952 	struct device *dev;
953 	struct acpi_iort_node *node;
954 };
955 
956 static int iort_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
957 {
958 	struct iort_pci_alias_info *info = data;
959 	struct acpi_iort_node *parent;
960 	u32 streamid;
961 
962 	parent = iort_node_map_id(info->node, alias, &streamid,
963 				  IORT_IOMMU_TYPE);
964 	return iort_iommu_xlate(info->dev, parent, streamid);
965 }
966 
967 static void iort_named_component_init(struct device *dev,
968 				      struct acpi_iort_node *node)
969 {
970 	struct acpi_iort_named_component *nc;
971 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
972 
973 	if (!fwspec)
974 		return;
975 
976 	nc = (struct acpi_iort_named_component *)node->node_data;
977 	fwspec->num_pasid_bits = FIELD_GET(ACPI_IORT_NC_PASID_BITS,
978 					   nc->node_flags);
979 }
980 
981 static int iort_nc_iommu_map(struct device *dev, struct acpi_iort_node *node)
982 {
983 	struct acpi_iort_node *parent;
984 	int err = -ENODEV, i = 0;
985 	u32 streamid = 0;
986 
987 	do {
988 
989 		parent = iort_node_map_platform_id(node, &streamid,
990 						   IORT_IOMMU_TYPE,
991 						   i++);
992 
993 		if (parent)
994 			err = iort_iommu_xlate(dev, parent, streamid);
995 	} while (parent && !err);
996 
997 	return err;
998 }
999 
1000 static int iort_nc_iommu_map_id(struct device *dev,
1001 				struct acpi_iort_node *node,
1002 				const u32 *in_id)
1003 {
1004 	struct acpi_iort_node *parent;
1005 	u32 streamid;
1006 
1007 	parent = iort_node_map_id(node, *in_id, &streamid, IORT_IOMMU_TYPE);
1008 	if (parent)
1009 		return iort_iommu_xlate(dev, parent, streamid);
1010 
1011 	return -ENODEV;
1012 }
1013 
1014 
1015 /**
1016  * iort_iommu_configure_id - Set-up IOMMU configuration for a device.
1017  *
1018  * @dev: device to configure
1019  * @id_in: optional input id const value pointer
1020  *
1021  * Returns: iommu_ops pointer on configuration success
1022  *          NULL on configuration failure
1023  */
1024 const struct iommu_ops *iort_iommu_configure_id(struct device *dev,
1025 						const u32 *id_in)
1026 {
1027 	struct acpi_iort_node *node;
1028 	const struct iommu_ops *ops;
1029 	int err = -ENODEV;
1030 
1031 	/*
1032 	 * If we already translated the fwspec there
1033 	 * is nothing left to do, return the iommu_ops.
1034 	 */
1035 	ops = iort_fwspec_iommu_ops(dev);
1036 	if (ops)
1037 		return ops;
1038 
1039 	if (dev_is_pci(dev)) {
1040 		struct iommu_fwspec *fwspec;
1041 		struct pci_bus *bus = to_pci_dev(dev)->bus;
1042 		struct iort_pci_alias_info info = { .dev = dev };
1043 
1044 		node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
1045 				      iort_match_node_callback, &bus->dev);
1046 		if (!node)
1047 			return NULL;
1048 
1049 		info.node = node;
1050 		err = pci_for_each_dma_alias(to_pci_dev(dev),
1051 					     iort_pci_iommu_init, &info);
1052 
1053 		fwspec = dev_iommu_fwspec_get(dev);
1054 		if (fwspec && iort_pci_rc_supports_ats(node))
1055 			fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS;
1056 	} else {
1057 		node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
1058 				      iort_match_node_callback, dev);
1059 		if (!node)
1060 			return NULL;
1061 
1062 		err = id_in ? iort_nc_iommu_map_id(dev, node, id_in) :
1063 			      iort_nc_iommu_map(dev, node);
1064 
1065 		if (!err)
1066 			iort_named_component_init(dev, node);
1067 	}
1068 
1069 	/*
1070 	 * If we have reason to believe the IOMMU driver missed the initial
1071 	 * add_device callback for dev, replay it to get things in order.
1072 	 */
1073 	if (!err) {
1074 		ops = iort_fwspec_iommu_ops(dev);
1075 		err = iort_add_device_replay(ops, dev);
1076 	}
1077 
1078 	/* Ignore all other errors apart from EPROBE_DEFER */
1079 	if (err == -EPROBE_DEFER) {
1080 		ops = ERR_PTR(err);
1081 	} else if (err) {
1082 		dev_dbg(dev, "Adding to IOMMU failed: %d\n", err);
1083 		ops = NULL;
1084 	}
1085 
1086 	return ops;
1087 }
1088 
1089 #else
1090 static inline const struct iommu_ops *iort_fwspec_iommu_ops(struct device *dev)
1091 { return NULL; }
1092 static inline int iort_add_device_replay(const struct iommu_ops *ops,
1093 					 struct device *dev)
1094 { return 0; }
1095 int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
1096 { return 0; }
1097 const struct iommu_ops *iort_iommu_configure_id(struct device *dev,
1098 						const u32 *input_id)
1099 { return NULL; }
1100 #endif
1101 
1102 static int nc_dma_get_range(struct device *dev, u64 *size)
1103 {
1104 	struct acpi_iort_node *node;
1105 	struct acpi_iort_named_component *ncomp;
1106 
1107 	node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
1108 			      iort_match_node_callback, dev);
1109 	if (!node)
1110 		return -ENODEV;
1111 
1112 	ncomp = (struct acpi_iort_named_component *)node->node_data;
1113 
1114 	*size = ncomp->memory_address_limit >= 64 ? U64_MAX :
1115 			1ULL<<ncomp->memory_address_limit;
1116 
1117 	return 0;
1118 }
1119 
1120 static int rc_dma_get_range(struct device *dev, u64 *size)
1121 {
1122 	struct acpi_iort_node *node;
1123 	struct acpi_iort_root_complex *rc;
1124 	struct pci_bus *pbus = to_pci_dev(dev)->bus;
1125 
1126 	node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
1127 			      iort_match_node_callback, &pbus->dev);
1128 	if (!node || node->revision < 1)
1129 		return -ENODEV;
1130 
1131 	rc = (struct acpi_iort_root_complex *)node->node_data;
1132 
1133 	*size = rc->memory_address_limit >= 64 ? U64_MAX :
1134 			1ULL<<rc->memory_address_limit;
1135 
1136 	return 0;
1137 }
1138 
1139 /**
1140  * iort_dma_setup() - Set-up device DMA parameters.
1141  *
1142  * @dev: device to configure
1143  * @dma_addr: device DMA address result pointer
1144  * @size: DMA range size result pointer
1145  */
1146 void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size)
1147 {
1148 	u64 end, mask, dmaaddr = 0, size = 0, offset = 0;
1149 	int ret;
1150 
1151 	/*
1152 	 * If @dev is expected to be DMA-capable then the bus code that created
1153 	 * it should have initialised its dma_mask pointer by this point. For
1154 	 * now, we'll continue the legacy behaviour of coercing it to the
1155 	 * coherent mask if not, but we'll no longer do so quietly.
1156 	 */
1157 	if (!dev->dma_mask) {
1158 		dev_warn(dev, "DMA mask not set\n");
1159 		dev->dma_mask = &dev->coherent_dma_mask;
1160 	}
1161 
1162 	if (dev->coherent_dma_mask)
1163 		size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1);
1164 	else
1165 		size = 1ULL << 32;
1166 
1167 	ret = acpi_dma_get_range(dev, &dmaaddr, &offset, &size);
1168 	if (ret == -ENODEV)
1169 		ret = dev_is_pci(dev) ? rc_dma_get_range(dev, &size)
1170 				      : nc_dma_get_range(dev, &size);
1171 
1172 	if (!ret) {
1173 		/*
1174 		 * Limit coherent and dma mask based on size retrieved from
1175 		 * firmware.
1176 		 */
1177 		end = dmaaddr + size - 1;
1178 		mask = DMA_BIT_MASK(ilog2(end) + 1);
1179 		dev->bus_dma_limit = end;
1180 		dev->coherent_dma_mask = mask;
1181 		*dev->dma_mask = mask;
1182 	}
1183 
1184 	*dma_addr = dmaaddr;
1185 	*dma_size = size;
1186 
1187 	dev->dma_pfn_offset = PFN_DOWN(offset);
1188 	dev_dbg(dev, "dma_pfn_offset(%#08llx)\n", offset);
1189 }
1190 
1191 static void __init acpi_iort_register_irq(int hwirq, const char *name,
1192 					  int trigger,
1193 					  struct resource *res)
1194 {
1195 	int irq = acpi_register_gsi(NULL, hwirq, trigger,
1196 				    ACPI_ACTIVE_HIGH);
1197 
1198 	if (irq <= 0) {
1199 		pr_err("could not register gsi hwirq %d name [%s]\n", hwirq,
1200 								      name);
1201 		return;
1202 	}
1203 
1204 	res->start = irq;
1205 	res->end = irq;
1206 	res->flags = IORESOURCE_IRQ;
1207 	res->name = name;
1208 }
1209 
1210 static int __init arm_smmu_v3_count_resources(struct acpi_iort_node *node)
1211 {
1212 	struct acpi_iort_smmu_v3 *smmu;
1213 	/* Always present mem resource */
1214 	int num_res = 1;
1215 
1216 	/* Retrieve SMMUv3 specific data */
1217 	smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
1218 
1219 	if (smmu->event_gsiv)
1220 		num_res++;
1221 
1222 	if (smmu->pri_gsiv)
1223 		num_res++;
1224 
1225 	if (smmu->gerr_gsiv)
1226 		num_res++;
1227 
1228 	if (smmu->sync_gsiv)
1229 		num_res++;
1230 
1231 	return num_res;
1232 }
1233 
1234 static bool arm_smmu_v3_is_combined_irq(struct acpi_iort_smmu_v3 *smmu)
1235 {
1236 	/*
1237 	 * Cavium ThunderX2 implementation doesn't not support unique
1238 	 * irq line. Use single irq line for all the SMMUv3 interrupts.
1239 	 */
1240 	if (smmu->model != ACPI_IORT_SMMU_V3_CAVIUM_CN99XX)
1241 		return false;
1242 
1243 	/*
1244 	 * ThunderX2 doesn't support MSIs from the SMMU, so we're checking
1245 	 * SPI numbers here.
1246 	 */
1247 	return smmu->event_gsiv == smmu->pri_gsiv &&
1248 	       smmu->event_gsiv == smmu->gerr_gsiv &&
1249 	       smmu->event_gsiv == smmu->sync_gsiv;
1250 }
1251 
1252 static unsigned long arm_smmu_v3_resource_size(struct acpi_iort_smmu_v3 *smmu)
1253 {
1254 	/*
1255 	 * Override the size, for Cavium ThunderX2 implementation
1256 	 * which doesn't support the page 1 SMMU register space.
1257 	 */
1258 	if (smmu->model == ACPI_IORT_SMMU_V3_CAVIUM_CN99XX)
1259 		return SZ_64K;
1260 
1261 	return SZ_128K;
1262 }
1263 
1264 static void __init arm_smmu_v3_init_resources(struct resource *res,
1265 					      struct acpi_iort_node *node)
1266 {
1267 	struct acpi_iort_smmu_v3 *smmu;
1268 	int num_res = 0;
1269 
1270 	/* Retrieve SMMUv3 specific data */
1271 	smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
1272 
1273 	res[num_res].start = smmu->base_address;
1274 	res[num_res].end = smmu->base_address +
1275 				arm_smmu_v3_resource_size(smmu) - 1;
1276 	res[num_res].flags = IORESOURCE_MEM;
1277 
1278 	num_res++;
1279 	if (arm_smmu_v3_is_combined_irq(smmu)) {
1280 		if (smmu->event_gsiv)
1281 			acpi_iort_register_irq(smmu->event_gsiv, "combined",
1282 					       ACPI_EDGE_SENSITIVE,
1283 					       &res[num_res++]);
1284 	} else {
1285 
1286 		if (smmu->event_gsiv)
1287 			acpi_iort_register_irq(smmu->event_gsiv, "eventq",
1288 					       ACPI_EDGE_SENSITIVE,
1289 					       &res[num_res++]);
1290 
1291 		if (smmu->pri_gsiv)
1292 			acpi_iort_register_irq(smmu->pri_gsiv, "priq",
1293 					       ACPI_EDGE_SENSITIVE,
1294 					       &res[num_res++]);
1295 
1296 		if (smmu->gerr_gsiv)
1297 			acpi_iort_register_irq(smmu->gerr_gsiv, "gerror",
1298 					       ACPI_EDGE_SENSITIVE,
1299 					       &res[num_res++]);
1300 
1301 		if (smmu->sync_gsiv)
1302 			acpi_iort_register_irq(smmu->sync_gsiv, "cmdq-sync",
1303 					       ACPI_EDGE_SENSITIVE,
1304 					       &res[num_res++]);
1305 	}
1306 }
1307 
1308 static void __init arm_smmu_v3_dma_configure(struct device *dev,
1309 					     struct acpi_iort_node *node)
1310 {
1311 	struct acpi_iort_smmu_v3 *smmu;
1312 	enum dev_dma_attr attr;
1313 
1314 	/* Retrieve SMMUv3 specific data */
1315 	smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
1316 
1317 	attr = (smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE) ?
1318 			DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT;
1319 
1320 	/* We expect the dma masks to be equivalent for all SMMUv3 set-ups */
1321 	dev->dma_mask = &dev->coherent_dma_mask;
1322 
1323 	/* Configure DMA for the page table walker */
1324 	acpi_dma_configure(dev, attr);
1325 }
1326 
1327 #if defined(CONFIG_ACPI_NUMA)
1328 /*
1329  * set numa proximity domain for smmuv3 device
1330  */
1331 static int  __init arm_smmu_v3_set_proximity(struct device *dev,
1332 					      struct acpi_iort_node *node)
1333 {
1334 	struct acpi_iort_smmu_v3 *smmu;
1335 
1336 	smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
1337 	if (smmu->flags & ACPI_IORT_SMMU_V3_PXM_VALID) {
1338 		int dev_node = acpi_map_pxm_to_node(smmu->pxm);
1339 
1340 		if (dev_node != NUMA_NO_NODE && !node_online(dev_node))
1341 			return -EINVAL;
1342 
1343 		set_dev_node(dev, dev_node);
1344 		pr_info("SMMU-v3[%llx] Mapped to Proximity domain %d\n",
1345 			smmu->base_address,
1346 			smmu->pxm);
1347 	}
1348 	return 0;
1349 }
1350 #else
1351 #define arm_smmu_v3_set_proximity NULL
1352 #endif
1353 
1354 static int __init arm_smmu_count_resources(struct acpi_iort_node *node)
1355 {
1356 	struct acpi_iort_smmu *smmu;
1357 
1358 	/* Retrieve SMMU specific data */
1359 	smmu = (struct acpi_iort_smmu *)node->node_data;
1360 
1361 	/*
1362 	 * Only consider the global fault interrupt and ignore the
1363 	 * configuration access interrupt.
1364 	 *
1365 	 * MMIO address and global fault interrupt resources are always
1366 	 * present so add them to the context interrupt count as a static
1367 	 * value.
1368 	 */
1369 	return smmu->context_interrupt_count + 2;
1370 }
1371 
1372 static void __init arm_smmu_init_resources(struct resource *res,
1373 					   struct acpi_iort_node *node)
1374 {
1375 	struct acpi_iort_smmu *smmu;
1376 	int i, hw_irq, trigger, num_res = 0;
1377 	u64 *ctx_irq, *glb_irq;
1378 
1379 	/* Retrieve SMMU specific data */
1380 	smmu = (struct acpi_iort_smmu *)node->node_data;
1381 
1382 	res[num_res].start = smmu->base_address;
1383 	res[num_res].end = smmu->base_address + smmu->span - 1;
1384 	res[num_res].flags = IORESOURCE_MEM;
1385 	num_res++;
1386 
1387 	glb_irq = ACPI_ADD_PTR(u64, node, smmu->global_interrupt_offset);
1388 	/* Global IRQs */
1389 	hw_irq = IORT_IRQ_MASK(glb_irq[0]);
1390 	trigger = IORT_IRQ_TRIGGER_MASK(glb_irq[0]);
1391 
1392 	acpi_iort_register_irq(hw_irq, "arm-smmu-global", trigger,
1393 				     &res[num_res++]);
1394 
1395 	/* Context IRQs */
1396 	ctx_irq = ACPI_ADD_PTR(u64, node, smmu->context_interrupt_offset);
1397 	for (i = 0; i < smmu->context_interrupt_count; i++) {
1398 		hw_irq = IORT_IRQ_MASK(ctx_irq[i]);
1399 		trigger = IORT_IRQ_TRIGGER_MASK(ctx_irq[i]);
1400 
1401 		acpi_iort_register_irq(hw_irq, "arm-smmu-context", trigger,
1402 				       &res[num_res++]);
1403 	}
1404 }
1405 
1406 static void __init arm_smmu_dma_configure(struct device *dev,
1407 					  struct acpi_iort_node *node)
1408 {
1409 	struct acpi_iort_smmu *smmu;
1410 	enum dev_dma_attr attr;
1411 
1412 	/* Retrieve SMMU specific data */
1413 	smmu = (struct acpi_iort_smmu *)node->node_data;
1414 
1415 	attr = (smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK) ?
1416 			DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT;
1417 
1418 	/* We expect the dma masks to be equivalent for SMMU set-ups */
1419 	dev->dma_mask = &dev->coherent_dma_mask;
1420 
1421 	/* Configure DMA for the page table walker */
1422 	acpi_dma_configure(dev, attr);
1423 }
1424 
1425 static int __init arm_smmu_v3_pmcg_count_resources(struct acpi_iort_node *node)
1426 {
1427 	struct acpi_iort_pmcg *pmcg;
1428 
1429 	/* Retrieve PMCG specific data */
1430 	pmcg = (struct acpi_iort_pmcg *)node->node_data;
1431 
1432 	/*
1433 	 * There are always 2 memory resources.
1434 	 * If the overflow_gsiv is present then add that for a total of 3.
1435 	 */
1436 	return pmcg->overflow_gsiv ? 3 : 2;
1437 }
1438 
1439 static void __init arm_smmu_v3_pmcg_init_resources(struct resource *res,
1440 						   struct acpi_iort_node *node)
1441 {
1442 	struct acpi_iort_pmcg *pmcg;
1443 
1444 	/* Retrieve PMCG specific data */
1445 	pmcg = (struct acpi_iort_pmcg *)node->node_data;
1446 
1447 	res[0].start = pmcg->page0_base_address;
1448 	res[0].end = pmcg->page0_base_address + SZ_4K - 1;
1449 	res[0].flags = IORESOURCE_MEM;
1450 	res[1].start = pmcg->page1_base_address;
1451 	res[1].end = pmcg->page1_base_address + SZ_4K - 1;
1452 	res[1].flags = IORESOURCE_MEM;
1453 
1454 	if (pmcg->overflow_gsiv)
1455 		acpi_iort_register_irq(pmcg->overflow_gsiv, "overflow",
1456 				       ACPI_EDGE_SENSITIVE, &res[2]);
1457 }
1458 
1459 static struct acpi_platform_list pmcg_plat_info[] __initdata = {
1460 	/* HiSilicon Hip08 Platform */
1461 	{"HISI  ", "HIP08   ", 0, ACPI_SIG_IORT, greater_than_or_equal,
1462 	 "Erratum #162001800", IORT_SMMU_V3_PMCG_HISI_HIP08},
1463 	{ }
1464 };
1465 
1466 static int __init arm_smmu_v3_pmcg_add_platdata(struct platform_device *pdev)
1467 {
1468 	u32 model;
1469 	int idx;
1470 
1471 	idx = acpi_match_platform_list(pmcg_plat_info);
1472 	if (idx >= 0)
1473 		model = pmcg_plat_info[idx].data;
1474 	else
1475 		model = IORT_SMMU_V3_PMCG_GENERIC;
1476 
1477 	return platform_device_add_data(pdev, &model, sizeof(model));
1478 }
1479 
1480 struct iort_dev_config {
1481 	const char *name;
1482 	int (*dev_init)(struct acpi_iort_node *node);
1483 	void (*dev_dma_configure)(struct device *dev,
1484 				  struct acpi_iort_node *node);
1485 	int (*dev_count_resources)(struct acpi_iort_node *node);
1486 	void (*dev_init_resources)(struct resource *res,
1487 				     struct acpi_iort_node *node);
1488 	int (*dev_set_proximity)(struct device *dev,
1489 				    struct acpi_iort_node *node);
1490 	int (*dev_add_platdata)(struct platform_device *pdev);
1491 };
1492 
1493 static const struct iort_dev_config iort_arm_smmu_v3_cfg __initconst = {
1494 	.name = "arm-smmu-v3",
1495 	.dev_dma_configure = arm_smmu_v3_dma_configure,
1496 	.dev_count_resources = arm_smmu_v3_count_resources,
1497 	.dev_init_resources = arm_smmu_v3_init_resources,
1498 	.dev_set_proximity = arm_smmu_v3_set_proximity,
1499 };
1500 
1501 static const struct iort_dev_config iort_arm_smmu_cfg __initconst = {
1502 	.name = "arm-smmu",
1503 	.dev_dma_configure = arm_smmu_dma_configure,
1504 	.dev_count_resources = arm_smmu_count_resources,
1505 	.dev_init_resources = arm_smmu_init_resources,
1506 };
1507 
1508 static const struct iort_dev_config iort_arm_smmu_v3_pmcg_cfg __initconst = {
1509 	.name = "arm-smmu-v3-pmcg",
1510 	.dev_count_resources = arm_smmu_v3_pmcg_count_resources,
1511 	.dev_init_resources = arm_smmu_v3_pmcg_init_resources,
1512 	.dev_add_platdata = arm_smmu_v3_pmcg_add_platdata,
1513 };
1514 
1515 static __init const struct iort_dev_config *iort_get_dev_cfg(
1516 			struct acpi_iort_node *node)
1517 {
1518 	switch (node->type) {
1519 	case ACPI_IORT_NODE_SMMU_V3:
1520 		return &iort_arm_smmu_v3_cfg;
1521 	case ACPI_IORT_NODE_SMMU:
1522 		return &iort_arm_smmu_cfg;
1523 	case ACPI_IORT_NODE_PMCG:
1524 		return &iort_arm_smmu_v3_pmcg_cfg;
1525 	default:
1526 		return NULL;
1527 	}
1528 }
1529 
1530 /**
1531  * iort_add_platform_device() - Allocate a platform device for IORT node
1532  * @node: Pointer to device ACPI IORT node
1533  *
1534  * Returns: 0 on success, <0 failure
1535  */
1536 static int __init iort_add_platform_device(struct acpi_iort_node *node,
1537 					   const struct iort_dev_config *ops)
1538 {
1539 	struct fwnode_handle *fwnode;
1540 	struct platform_device *pdev;
1541 	struct resource *r;
1542 	int ret, count;
1543 
1544 	pdev = platform_device_alloc(ops->name, PLATFORM_DEVID_AUTO);
1545 	if (!pdev)
1546 		return -ENOMEM;
1547 
1548 	if (ops->dev_set_proximity) {
1549 		ret = ops->dev_set_proximity(&pdev->dev, node);
1550 		if (ret)
1551 			goto dev_put;
1552 	}
1553 
1554 	count = ops->dev_count_resources(node);
1555 
1556 	r = kcalloc(count, sizeof(*r), GFP_KERNEL);
1557 	if (!r) {
1558 		ret = -ENOMEM;
1559 		goto dev_put;
1560 	}
1561 
1562 	ops->dev_init_resources(r, node);
1563 
1564 	ret = platform_device_add_resources(pdev, r, count);
1565 	/*
1566 	 * Resources are duplicated in platform_device_add_resources,
1567 	 * free their allocated memory
1568 	 */
1569 	kfree(r);
1570 
1571 	if (ret)
1572 		goto dev_put;
1573 
1574 	/*
1575 	 * Platform devices based on PMCG nodes uses platform_data to
1576 	 * pass the hardware model info to the driver. For others, add
1577 	 * a copy of IORT node pointer to platform_data to be used to
1578 	 * retrieve IORT data information.
1579 	 */
1580 	if (ops->dev_add_platdata)
1581 		ret = ops->dev_add_platdata(pdev);
1582 	else
1583 		ret = platform_device_add_data(pdev, &node, sizeof(node));
1584 
1585 	if (ret)
1586 		goto dev_put;
1587 
1588 	fwnode = iort_get_fwnode(node);
1589 
1590 	if (!fwnode) {
1591 		ret = -ENODEV;
1592 		goto dev_put;
1593 	}
1594 
1595 	pdev->dev.fwnode = fwnode;
1596 
1597 	if (ops->dev_dma_configure)
1598 		ops->dev_dma_configure(&pdev->dev, node);
1599 
1600 	iort_set_device_domain(&pdev->dev, node);
1601 
1602 	ret = platform_device_add(pdev);
1603 	if (ret)
1604 		goto dma_deconfigure;
1605 
1606 	return 0;
1607 
1608 dma_deconfigure:
1609 	arch_teardown_dma_ops(&pdev->dev);
1610 dev_put:
1611 	platform_device_put(pdev);
1612 
1613 	return ret;
1614 }
1615 
1616 #ifdef CONFIG_PCI
1617 static void __init iort_enable_acs(struct acpi_iort_node *iort_node)
1618 {
1619 	static bool acs_enabled __initdata;
1620 
1621 	if (acs_enabled)
1622 		return;
1623 
1624 	if (iort_node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
1625 		struct acpi_iort_node *parent;
1626 		struct acpi_iort_id_mapping *map;
1627 		int i;
1628 
1629 		map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, iort_node,
1630 				   iort_node->mapping_offset);
1631 
1632 		for (i = 0; i < iort_node->mapping_count; i++, map++) {
1633 			if (!map->output_reference)
1634 				continue;
1635 
1636 			parent = ACPI_ADD_PTR(struct acpi_iort_node,
1637 					iort_table,  map->output_reference);
1638 			/*
1639 			 * If we detect a RC->SMMU mapping, make sure
1640 			 * we enable ACS on the system.
1641 			 */
1642 			if ((parent->type == ACPI_IORT_NODE_SMMU) ||
1643 				(parent->type == ACPI_IORT_NODE_SMMU_V3)) {
1644 				pci_request_acs();
1645 				acs_enabled = true;
1646 				return;
1647 			}
1648 		}
1649 	}
1650 }
1651 #else
1652 static inline void iort_enable_acs(struct acpi_iort_node *iort_node) { }
1653 #endif
1654 
1655 static void __init iort_init_platform_devices(void)
1656 {
1657 	struct acpi_iort_node *iort_node, *iort_end;
1658 	struct acpi_table_iort *iort;
1659 	struct fwnode_handle *fwnode;
1660 	int i, ret;
1661 	const struct iort_dev_config *ops;
1662 
1663 	/*
1664 	 * iort_table and iort both point to the start of IORT table, but
1665 	 * have different struct types
1666 	 */
1667 	iort = (struct acpi_table_iort *)iort_table;
1668 
1669 	/* Get the first IORT node */
1670 	iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
1671 				 iort->node_offset);
1672 	iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort,
1673 				iort_table->length);
1674 
1675 	for (i = 0; i < iort->node_count; i++) {
1676 		if (iort_node >= iort_end) {
1677 			pr_err("iort node pointer overflows, bad table\n");
1678 			return;
1679 		}
1680 
1681 		iort_enable_acs(iort_node);
1682 
1683 		ops = iort_get_dev_cfg(iort_node);
1684 		if (ops) {
1685 			fwnode = acpi_alloc_fwnode_static();
1686 			if (!fwnode)
1687 				return;
1688 
1689 			iort_set_fwnode(iort_node, fwnode);
1690 
1691 			ret = iort_add_platform_device(iort_node, ops);
1692 			if (ret) {
1693 				iort_delete_fwnode(iort_node);
1694 				acpi_free_fwnode_static(fwnode);
1695 				return;
1696 			}
1697 		}
1698 
1699 		iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
1700 					 iort_node->length);
1701 	}
1702 }
1703 
1704 void __init acpi_iort_init(void)
1705 {
1706 	acpi_status status;
1707 
1708 	/* iort_table will be used at runtime after the iort init,
1709 	 * so we don't need to call acpi_put_table() to release
1710 	 * the IORT table mapping.
1711 	 */
1712 	status = acpi_get_table(ACPI_SIG_IORT, 0, &iort_table);
1713 	if (ACPI_FAILURE(status)) {
1714 		if (status != AE_NOT_FOUND) {
1715 			const char *msg = acpi_format_exception(status);
1716 
1717 			pr_err("Failed to get table, %s\n", msg);
1718 		}
1719 
1720 		return;
1721 	}
1722 
1723 	iort_init_platform_devices();
1724 }
1725