xref: /openbmc/linux/drivers/acpi/arm64/iort.c (revision 9144f784f852f9a125cabe9927b986d909bfa439)
1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * Copyright (C) 2016, Semihalf
4   *	Author: Tomasz Nowicki <tn@semihalf.com>
5   *
6   * This file implements early detection/parsing of I/O mapping
7   * reported to OS through firmware via I/O Remapping Table (IORT)
8   * IORT document number: ARM DEN 0049A
9   */
10  
11  #define pr_fmt(fmt)	"ACPI: IORT: " fmt
12  
13  #include <linux/acpi_iort.h>
14  #include <linux/bitfield.h>
15  #include <linux/iommu.h>
16  #include <linux/kernel.h>
17  #include <linux/list.h>
18  #include <linux/pci.h>
19  #include <linux/platform_device.h>
20  #include <linux/slab.h>
21  #include <linux/dma-map-ops.h>
22  #include "init.h"
23  
24  #define IORT_TYPE_MASK(type)	(1 << (type))
25  #define IORT_MSI_TYPE		(1 << ACPI_IORT_NODE_ITS_GROUP)
26  #define IORT_IOMMU_TYPE		((1 << ACPI_IORT_NODE_SMMU) |	\
27  				(1 << ACPI_IORT_NODE_SMMU_V3))
28  
29  struct iort_its_msi_chip {
30  	struct list_head	list;
31  	struct fwnode_handle	*fw_node;
32  	phys_addr_t		base_addr;
33  	u32			translation_id;
34  };
35  
36  struct iort_fwnode {
37  	struct list_head list;
38  	struct acpi_iort_node *iort_node;
39  	struct fwnode_handle *fwnode;
40  };
41  static LIST_HEAD(iort_fwnode_list);
42  static DEFINE_SPINLOCK(iort_fwnode_lock);
43  
44  /**
45   * iort_set_fwnode() - Create iort_fwnode and use it to register
46   *		       iommu data in the iort_fwnode_list
47   *
48   * @iort_node: IORT table node associated with the IOMMU
49   * @fwnode: fwnode associated with the IORT node
50   *
51   * Returns: 0 on success
52   *          <0 on failure
53   */
iort_set_fwnode(struct acpi_iort_node * iort_node,struct fwnode_handle * fwnode)54  static inline int iort_set_fwnode(struct acpi_iort_node *iort_node,
55  				  struct fwnode_handle *fwnode)
56  {
57  	struct iort_fwnode *np;
58  
59  	np = kzalloc(sizeof(struct iort_fwnode), GFP_ATOMIC);
60  
61  	if (WARN_ON(!np))
62  		return -ENOMEM;
63  
64  	INIT_LIST_HEAD(&np->list);
65  	np->iort_node = iort_node;
66  	np->fwnode = fwnode;
67  
68  	spin_lock(&iort_fwnode_lock);
69  	list_add_tail(&np->list, &iort_fwnode_list);
70  	spin_unlock(&iort_fwnode_lock);
71  
72  	return 0;
73  }
74  
75  /**
76   * iort_get_fwnode() - Retrieve fwnode associated with an IORT node
77   *
78   * @node: IORT table node to be looked-up
79   *
80   * Returns: fwnode_handle pointer on success, NULL on failure
81   */
iort_get_fwnode(struct acpi_iort_node * node)82  static inline struct fwnode_handle *iort_get_fwnode(
83  			struct acpi_iort_node *node)
84  {
85  	struct iort_fwnode *curr;
86  	struct fwnode_handle *fwnode = NULL;
87  
88  	spin_lock(&iort_fwnode_lock);
89  	list_for_each_entry(curr, &iort_fwnode_list, list) {
90  		if (curr->iort_node == node) {
91  			fwnode = curr->fwnode;
92  			break;
93  		}
94  	}
95  	spin_unlock(&iort_fwnode_lock);
96  
97  	return fwnode;
98  }
99  
100  /**
101   * iort_delete_fwnode() - Delete fwnode associated with an IORT node
102   *
103   * @node: IORT table node associated with fwnode to delete
104   */
iort_delete_fwnode(struct acpi_iort_node * node)105  static inline void iort_delete_fwnode(struct acpi_iort_node *node)
106  {
107  	struct iort_fwnode *curr, *tmp;
108  
109  	spin_lock(&iort_fwnode_lock);
110  	list_for_each_entry_safe(curr, tmp, &iort_fwnode_list, list) {
111  		if (curr->iort_node == node) {
112  			list_del(&curr->list);
113  			kfree(curr);
114  			break;
115  		}
116  	}
117  	spin_unlock(&iort_fwnode_lock);
118  }
119  
120  /**
121   * iort_get_iort_node() - Retrieve iort_node associated with an fwnode
122   *
123   * @fwnode: fwnode associated with device to be looked-up
124   *
125   * Returns: iort_node pointer on success, NULL on failure
126   */
iort_get_iort_node(struct fwnode_handle * fwnode)127  static inline struct acpi_iort_node *iort_get_iort_node(
128  			struct fwnode_handle *fwnode)
129  {
130  	struct iort_fwnode *curr;
131  	struct acpi_iort_node *iort_node = NULL;
132  
133  	spin_lock(&iort_fwnode_lock);
134  	list_for_each_entry(curr, &iort_fwnode_list, list) {
135  		if (curr->fwnode == fwnode) {
136  			iort_node = curr->iort_node;
137  			break;
138  		}
139  	}
140  	spin_unlock(&iort_fwnode_lock);
141  
142  	return iort_node;
143  }
144  
145  typedef acpi_status (*iort_find_node_callback)
146  	(struct acpi_iort_node *node, void *context);
147  
148  /* Root pointer to the mapped IORT table */
149  static struct acpi_table_header *iort_table;
150  
151  static LIST_HEAD(iort_msi_chip_list);
152  static DEFINE_SPINLOCK(iort_msi_chip_lock);
153  
154  /**
155   * iort_register_domain_token() - register domain token along with related
156   * ITS ID and base address to the list from where we can get it back later on.
157   * @trans_id: ITS ID.
158   * @base: ITS base address.
159   * @fw_node: Domain token.
160   *
161   * Returns: 0 on success, -ENOMEM if no memory when allocating list element
162   */
iort_register_domain_token(int trans_id,phys_addr_t base,struct fwnode_handle * fw_node)163  int iort_register_domain_token(int trans_id, phys_addr_t base,
164  			       struct fwnode_handle *fw_node)
165  {
166  	struct iort_its_msi_chip *its_msi_chip;
167  
168  	its_msi_chip = kzalloc(sizeof(*its_msi_chip), GFP_KERNEL);
169  	if (!its_msi_chip)
170  		return -ENOMEM;
171  
172  	its_msi_chip->fw_node = fw_node;
173  	its_msi_chip->translation_id = trans_id;
174  	its_msi_chip->base_addr = base;
175  
176  	spin_lock(&iort_msi_chip_lock);
177  	list_add(&its_msi_chip->list, &iort_msi_chip_list);
178  	spin_unlock(&iort_msi_chip_lock);
179  
180  	return 0;
181  }
182  
183  /**
184   * iort_deregister_domain_token() - Deregister domain token based on ITS ID
185   * @trans_id: ITS ID.
186   *
187   * Returns: none.
188   */
iort_deregister_domain_token(int trans_id)189  void iort_deregister_domain_token(int trans_id)
190  {
191  	struct iort_its_msi_chip *its_msi_chip, *t;
192  
193  	spin_lock(&iort_msi_chip_lock);
194  	list_for_each_entry_safe(its_msi_chip, t, &iort_msi_chip_list, list) {
195  		if (its_msi_chip->translation_id == trans_id) {
196  			list_del(&its_msi_chip->list);
197  			kfree(its_msi_chip);
198  			break;
199  		}
200  	}
201  	spin_unlock(&iort_msi_chip_lock);
202  }
203  
204  /**
205   * iort_find_domain_token() - Find domain token based on given ITS ID
206   * @trans_id: ITS ID.
207   *
208   * Returns: domain token when find on the list, NULL otherwise
209   */
iort_find_domain_token(int trans_id)210  struct fwnode_handle *iort_find_domain_token(int trans_id)
211  {
212  	struct fwnode_handle *fw_node = NULL;
213  	struct iort_its_msi_chip *its_msi_chip;
214  
215  	spin_lock(&iort_msi_chip_lock);
216  	list_for_each_entry(its_msi_chip, &iort_msi_chip_list, list) {
217  		if (its_msi_chip->translation_id == trans_id) {
218  			fw_node = its_msi_chip->fw_node;
219  			break;
220  		}
221  	}
222  	spin_unlock(&iort_msi_chip_lock);
223  
224  	return fw_node;
225  }
226  
iort_scan_node(enum acpi_iort_node_type type,iort_find_node_callback callback,void * context)227  static struct acpi_iort_node *iort_scan_node(enum acpi_iort_node_type type,
228  					     iort_find_node_callback callback,
229  					     void *context)
230  {
231  	struct acpi_iort_node *iort_node, *iort_end;
232  	struct acpi_table_iort *iort;
233  	int i;
234  
235  	if (!iort_table)
236  		return NULL;
237  
238  	/* Get the first IORT node */
239  	iort = (struct acpi_table_iort *)iort_table;
240  	iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
241  				 iort->node_offset);
242  	iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
243  				iort_table->length);
244  
245  	for (i = 0; i < iort->node_count; i++) {
246  		if (WARN_TAINT(iort_node >= iort_end, TAINT_FIRMWARE_WORKAROUND,
247  			       "IORT node pointer overflows, bad table!\n"))
248  			return NULL;
249  
250  		if (iort_node->type == type &&
251  		    ACPI_SUCCESS(callback(iort_node, context)))
252  			return iort_node;
253  
254  		iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
255  					 iort_node->length);
256  	}
257  
258  	return NULL;
259  }
260  
iort_match_node_callback(struct acpi_iort_node * node,void * context)261  static acpi_status iort_match_node_callback(struct acpi_iort_node *node,
262  					    void *context)
263  {
264  	struct device *dev = context;
265  	acpi_status status = AE_NOT_FOUND;
266  
267  	if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT) {
268  		struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
269  		struct acpi_device *adev;
270  		struct acpi_iort_named_component *ncomp;
271  		struct device *nc_dev = dev;
272  
273  		/*
274  		 * Walk the device tree to find a device with an
275  		 * ACPI companion; there is no point in scanning
276  		 * IORT for a device matching a named component if
277  		 * the device does not have an ACPI companion to
278  		 * start with.
279  		 */
280  		do {
281  			adev = ACPI_COMPANION(nc_dev);
282  			if (adev)
283  				break;
284  
285  			nc_dev = nc_dev->parent;
286  		} while (nc_dev);
287  
288  		if (!adev)
289  			goto out;
290  
291  		status = acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &buf);
292  		if (ACPI_FAILURE(status)) {
293  			dev_warn(nc_dev, "Can't get device full path name\n");
294  			goto out;
295  		}
296  
297  		ncomp = (struct acpi_iort_named_component *)node->node_data;
298  		status = !strcmp(ncomp->device_name, buf.pointer) ?
299  							AE_OK : AE_NOT_FOUND;
300  		acpi_os_free(buf.pointer);
301  	} else if (node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
302  		struct acpi_iort_root_complex *pci_rc;
303  		struct pci_bus *bus;
304  
305  		bus = to_pci_bus(dev);
306  		pci_rc = (struct acpi_iort_root_complex *)node->node_data;
307  
308  		/*
309  		 * It is assumed that PCI segment numbers maps one-to-one
310  		 * with root complexes. Each segment number can represent only
311  		 * one root complex.
312  		 */
313  		status = pci_rc->pci_segment_number == pci_domain_nr(bus) ?
314  							AE_OK : AE_NOT_FOUND;
315  	}
316  out:
317  	return status;
318  }
319  
iort_id_map(struct acpi_iort_id_mapping * map,u8 type,u32 rid_in,u32 * rid_out,bool check_overlap)320  static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in,
321  		       u32 *rid_out, bool check_overlap)
322  {
323  	/* Single mapping does not care for input id */
324  	if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) {
325  		if (type == ACPI_IORT_NODE_NAMED_COMPONENT ||
326  		    type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
327  			*rid_out = map->output_base;
328  			return 0;
329  		}
330  
331  		pr_warn(FW_BUG "[map %p] SINGLE MAPPING flag not allowed for node type %d, skipping ID map\n",
332  			map, type);
333  		return -ENXIO;
334  	}
335  
336  	if (rid_in < map->input_base ||
337  	    (rid_in > map->input_base + map->id_count))
338  		return -ENXIO;
339  
340  	if (check_overlap) {
341  		/*
342  		 * We already found a mapping for this input ID at the end of
343  		 * another region. If it coincides with the start of this
344  		 * region, we assume the prior match was due to the off-by-1
345  		 * issue mentioned below, and allow it to be superseded.
346  		 * Otherwise, things are *really* broken, and we just disregard
347  		 * duplicate matches entirely to retain compatibility.
348  		 */
349  		pr_err(FW_BUG "[map %p] conflicting mapping for input ID 0x%x\n",
350  		       map, rid_in);
351  		if (rid_in != map->input_base)
352  			return -ENXIO;
353  
354  		pr_err(FW_BUG "applying workaround.\n");
355  	}
356  
357  	*rid_out = map->output_base + (rid_in - map->input_base);
358  
359  	/*
360  	 * Due to confusion regarding the meaning of the id_count field (which
361  	 * carries the number of IDs *minus 1*), we may have to disregard this
362  	 * match if it is at the end of the range, and overlaps with the start
363  	 * of another one.
364  	 */
365  	if (map->id_count > 0 && rid_in == map->input_base + map->id_count)
366  		return -EAGAIN;
367  	return 0;
368  }
369  
iort_node_get_id(struct acpi_iort_node * node,u32 * id_out,int index)370  static struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node,
371  					       u32 *id_out, int index)
372  {
373  	struct acpi_iort_node *parent;
374  	struct acpi_iort_id_mapping *map;
375  
376  	if (!node->mapping_offset || !node->mapping_count ||
377  				     index >= node->mapping_count)
378  		return NULL;
379  
380  	map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
381  			   node->mapping_offset + index * sizeof(*map));
382  
383  	/* Firmware bug! */
384  	if (!map->output_reference) {
385  		pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
386  		       node, node->type);
387  		return NULL;
388  	}
389  
390  	parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
391  			       map->output_reference);
392  
393  	if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) {
394  		if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT ||
395  		    node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX ||
396  		    node->type == ACPI_IORT_NODE_SMMU_V3 ||
397  		    node->type == ACPI_IORT_NODE_PMCG) {
398  			*id_out = map->output_base;
399  			return parent;
400  		}
401  	}
402  
403  	return NULL;
404  }
405  
406  #ifndef ACPI_IORT_SMMU_V3_DEVICEID_VALID
407  #define ACPI_IORT_SMMU_V3_DEVICEID_VALID (1 << 4)
408  #endif
409  
iort_get_id_mapping_index(struct acpi_iort_node * node)410  static int iort_get_id_mapping_index(struct acpi_iort_node *node)
411  {
412  	struct acpi_iort_smmu_v3 *smmu;
413  	struct acpi_iort_pmcg *pmcg;
414  
415  	switch (node->type) {
416  	case ACPI_IORT_NODE_SMMU_V3:
417  		/*
418  		 * SMMUv3 dev ID mapping index was introduced in revision 1
419  		 * table, not available in revision 0
420  		 */
421  		if (node->revision < 1)
422  			return -EINVAL;
423  
424  		smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
425  		/*
426  		 * Until IORT E.e (node rev. 5), the ID mapping index was
427  		 * defined to be valid unless all interrupts are GSIV-based.
428  		 */
429  		if (node->revision < 5) {
430  			if (smmu->event_gsiv && smmu->pri_gsiv &&
431  			    smmu->gerr_gsiv && smmu->sync_gsiv)
432  				return -EINVAL;
433  		} else if (!(smmu->flags & ACPI_IORT_SMMU_V3_DEVICEID_VALID)) {
434  			return -EINVAL;
435  		}
436  
437  		if (smmu->id_mapping_index >= node->mapping_count) {
438  			pr_err(FW_BUG "[node %p type %d] ID mapping index overflows valid mappings\n",
439  			       node, node->type);
440  			return -EINVAL;
441  		}
442  
443  		return smmu->id_mapping_index;
444  	case ACPI_IORT_NODE_PMCG:
445  		pmcg = (struct acpi_iort_pmcg *)node->node_data;
446  		if (pmcg->overflow_gsiv || node->mapping_count == 0)
447  			return -EINVAL;
448  
449  		return 0;
450  	default:
451  		return -EINVAL;
452  	}
453  }
454  
iort_node_map_id(struct acpi_iort_node * node,u32 id_in,u32 * id_out,u8 type_mask)455  static struct acpi_iort_node *iort_node_map_id(struct acpi_iort_node *node,
456  					       u32 id_in, u32 *id_out,
457  					       u8 type_mask)
458  {
459  	u32 id = id_in;
460  
461  	/* Parse the ID mapping tree to find specified node type */
462  	while (node) {
463  		struct acpi_iort_id_mapping *map;
464  		int i, index, rc = 0;
465  		u32 out_ref = 0, map_id = id;
466  
467  		if (IORT_TYPE_MASK(node->type) & type_mask) {
468  			if (id_out)
469  				*id_out = id;
470  			return node;
471  		}
472  
473  		if (!node->mapping_offset || !node->mapping_count)
474  			goto fail_map;
475  
476  		map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
477  				   node->mapping_offset);
478  
479  		/* Firmware bug! */
480  		if (!map->output_reference) {
481  			pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
482  			       node, node->type);
483  			goto fail_map;
484  		}
485  
486  		/*
487  		 * Get the special ID mapping index (if any) and skip its
488  		 * associated ID map to prevent erroneous multi-stage
489  		 * IORT ID translations.
490  		 */
491  		index = iort_get_id_mapping_index(node);
492  
493  		/* Do the ID translation */
494  		for (i = 0; i < node->mapping_count; i++, map++) {
495  			/* if it is special mapping index, skip it */
496  			if (i == index)
497  				continue;
498  
499  			rc = iort_id_map(map, node->type, map_id, &id, out_ref);
500  			if (!rc)
501  				break;
502  			if (rc == -EAGAIN)
503  				out_ref = map->output_reference;
504  		}
505  
506  		if (i == node->mapping_count && !out_ref)
507  			goto fail_map;
508  
509  		node = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
510  				    rc ? out_ref : map->output_reference);
511  	}
512  
513  fail_map:
514  	/* Map input ID to output ID unchanged on mapping failure */
515  	if (id_out)
516  		*id_out = id_in;
517  
518  	return NULL;
519  }
520  
iort_node_map_platform_id(struct acpi_iort_node * node,u32 * id_out,u8 type_mask,int index)521  static struct acpi_iort_node *iort_node_map_platform_id(
522  		struct acpi_iort_node *node, u32 *id_out, u8 type_mask,
523  		int index)
524  {
525  	struct acpi_iort_node *parent;
526  	u32 id;
527  
528  	/* step 1: retrieve the initial dev id */
529  	parent = iort_node_get_id(node, &id, index);
530  	if (!parent)
531  		return NULL;
532  
533  	/*
534  	 * optional step 2: map the initial dev id if its parent is not
535  	 * the target type we want, map it again for the use cases such
536  	 * as NC (named component) -> SMMU -> ITS. If the type is matched,
537  	 * return the initial dev id and its parent pointer directly.
538  	 */
539  	if (!(IORT_TYPE_MASK(parent->type) & type_mask))
540  		parent = iort_node_map_id(parent, id, id_out, type_mask);
541  	else
542  		if (id_out)
543  			*id_out = id;
544  
545  	return parent;
546  }
547  
iort_find_dev_node(struct device * dev)548  static struct acpi_iort_node *iort_find_dev_node(struct device *dev)
549  {
550  	struct pci_bus *pbus;
551  
552  	if (!dev_is_pci(dev)) {
553  		struct acpi_iort_node *node;
554  		/*
555  		 * scan iort_fwnode_list to see if it's an iort platform
556  		 * device (such as SMMU, PMCG),its iort node already cached
557  		 * and associated with fwnode when iort platform devices
558  		 * were initialized.
559  		 */
560  		node = iort_get_iort_node(dev->fwnode);
561  		if (node)
562  			return node;
563  		/*
564  		 * if not, then it should be a platform device defined in
565  		 * DSDT/SSDT (with Named Component node in IORT)
566  		 */
567  		return iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
568  				      iort_match_node_callback, dev);
569  	}
570  
571  	pbus = to_pci_dev(dev)->bus;
572  
573  	return iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
574  			      iort_match_node_callback, &pbus->dev);
575  }
576  
577  /**
578   * iort_msi_map_id() - Map a MSI input ID for a device
579   * @dev: The device for which the mapping is to be done.
580   * @input_id: The device input ID.
581   *
582   * Returns: mapped MSI ID on success, input ID otherwise
583   */
iort_msi_map_id(struct device * dev,u32 input_id)584  u32 iort_msi_map_id(struct device *dev, u32 input_id)
585  {
586  	struct acpi_iort_node *node;
587  	u32 dev_id;
588  
589  	node = iort_find_dev_node(dev);
590  	if (!node)
591  		return input_id;
592  
593  	iort_node_map_id(node, input_id, &dev_id, IORT_MSI_TYPE);
594  	return dev_id;
595  }
596  
597  /**
598   * iort_pmsi_get_dev_id() - Get the device id for a device
599   * @dev: The device for which the mapping is to be done.
600   * @dev_id: The device ID found.
601   *
602   * Returns: 0 for successful find a dev id, -ENODEV on error
603   */
iort_pmsi_get_dev_id(struct device * dev,u32 * dev_id)604  int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id)
605  {
606  	int i, index;
607  	struct acpi_iort_node *node;
608  
609  	node = iort_find_dev_node(dev);
610  	if (!node)
611  		return -ENODEV;
612  
613  	index = iort_get_id_mapping_index(node);
614  	/* if there is a valid index, go get the dev_id directly */
615  	if (index >= 0) {
616  		if (iort_node_get_id(node, dev_id, index))
617  			return 0;
618  	} else {
619  		for (i = 0; i < node->mapping_count; i++) {
620  			if (iort_node_map_platform_id(node, dev_id,
621  						      IORT_MSI_TYPE, i))
622  				return 0;
623  		}
624  	}
625  
626  	return -ENODEV;
627  }
628  
iort_find_its_base(u32 its_id,phys_addr_t * base)629  static int __maybe_unused iort_find_its_base(u32 its_id, phys_addr_t *base)
630  {
631  	struct iort_its_msi_chip *its_msi_chip;
632  	int ret = -ENODEV;
633  
634  	spin_lock(&iort_msi_chip_lock);
635  	list_for_each_entry(its_msi_chip, &iort_msi_chip_list, list) {
636  		if (its_msi_chip->translation_id == its_id) {
637  			*base = its_msi_chip->base_addr;
638  			ret = 0;
639  			break;
640  		}
641  	}
642  	spin_unlock(&iort_msi_chip_lock);
643  
644  	return ret;
645  }
646  
647  /**
648   * iort_dev_find_its_id() - Find the ITS identifier for a device
649   * @dev: The device.
650   * @id: Device's ID
651   * @idx: Index of the ITS identifier list.
652   * @its_id: ITS identifier.
653   *
654   * Returns: 0 on success, appropriate error value otherwise
655   */
iort_dev_find_its_id(struct device * dev,u32 id,unsigned int idx,int * its_id)656  static int iort_dev_find_its_id(struct device *dev, u32 id,
657  				unsigned int idx, int *its_id)
658  {
659  	struct acpi_iort_its_group *its;
660  	struct acpi_iort_node *node;
661  
662  	node = iort_find_dev_node(dev);
663  	if (!node)
664  		return -ENXIO;
665  
666  	node = iort_node_map_id(node, id, NULL, IORT_MSI_TYPE);
667  	if (!node)
668  		return -ENXIO;
669  
670  	/* Move to ITS specific data */
671  	its = (struct acpi_iort_its_group *)node->node_data;
672  	if (idx >= its->its_count) {
673  		dev_err(dev, "requested ITS ID index [%d] overruns ITS entries [%d]\n",
674  			idx, its->its_count);
675  		return -ENXIO;
676  	}
677  
678  	*its_id = its->identifiers[idx];
679  	return 0;
680  }
681  
682  /**
683   * iort_get_device_domain() - Find MSI domain related to a device
684   * @dev: The device.
685   * @id: Requester ID for the device.
686   * @bus_token: irq domain bus token.
687   *
688   * Returns: the MSI domain for this device, NULL otherwise
689   */
iort_get_device_domain(struct device * dev,u32 id,enum irq_domain_bus_token bus_token)690  struct irq_domain *iort_get_device_domain(struct device *dev, u32 id,
691  					  enum irq_domain_bus_token bus_token)
692  {
693  	struct fwnode_handle *handle;
694  	int its_id;
695  
696  	if (iort_dev_find_its_id(dev, id, 0, &its_id))
697  		return NULL;
698  
699  	handle = iort_find_domain_token(its_id);
700  	if (!handle)
701  		return NULL;
702  
703  	return irq_find_matching_fwnode(handle, bus_token);
704  }
705  
iort_set_device_domain(struct device * dev,struct acpi_iort_node * node)706  static void iort_set_device_domain(struct device *dev,
707  				   struct acpi_iort_node *node)
708  {
709  	struct acpi_iort_its_group *its;
710  	struct acpi_iort_node *msi_parent;
711  	struct acpi_iort_id_mapping *map;
712  	struct fwnode_handle *iort_fwnode;
713  	struct irq_domain *domain;
714  	int index;
715  
716  	index = iort_get_id_mapping_index(node);
717  	if (index < 0)
718  		return;
719  
720  	map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
721  			   node->mapping_offset + index * sizeof(*map));
722  
723  	/* Firmware bug! */
724  	if (!map->output_reference ||
725  	    !(map->flags & ACPI_IORT_ID_SINGLE_MAPPING)) {
726  		pr_err(FW_BUG "[node %p type %d] Invalid MSI mapping\n",
727  		       node, node->type);
728  		return;
729  	}
730  
731  	msi_parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
732  				  map->output_reference);
733  
734  	if (!msi_parent || msi_parent->type != ACPI_IORT_NODE_ITS_GROUP)
735  		return;
736  
737  	/* Move to ITS specific data */
738  	its = (struct acpi_iort_its_group *)msi_parent->node_data;
739  
740  	iort_fwnode = iort_find_domain_token(its->identifiers[0]);
741  	if (!iort_fwnode)
742  		return;
743  
744  	domain = irq_find_matching_fwnode(iort_fwnode, DOMAIN_BUS_PLATFORM_MSI);
745  	if (domain)
746  		dev_set_msi_domain(dev, domain);
747  }
748  
749  /**
750   * iort_get_platform_device_domain() - Find MSI domain related to a
751   * platform device
752   * @dev: the dev pointer associated with the platform device
753   *
754   * Returns: the MSI domain for this device, NULL otherwise
755   */
iort_get_platform_device_domain(struct device * dev)756  static struct irq_domain *iort_get_platform_device_domain(struct device *dev)
757  {
758  	struct acpi_iort_node *node, *msi_parent = NULL;
759  	struct fwnode_handle *iort_fwnode;
760  	struct acpi_iort_its_group *its;
761  	int i;
762  
763  	/* find its associated iort node */
764  	node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
765  			      iort_match_node_callback, dev);
766  	if (!node)
767  		return NULL;
768  
769  	/* then find its msi parent node */
770  	for (i = 0; i < node->mapping_count; i++) {
771  		msi_parent = iort_node_map_platform_id(node, NULL,
772  						       IORT_MSI_TYPE, i);
773  		if (msi_parent)
774  			break;
775  	}
776  
777  	if (!msi_parent)
778  		return NULL;
779  
780  	/* Move to ITS specific data */
781  	its = (struct acpi_iort_its_group *)msi_parent->node_data;
782  
783  	iort_fwnode = iort_find_domain_token(its->identifiers[0]);
784  	if (!iort_fwnode)
785  		return NULL;
786  
787  	return irq_find_matching_fwnode(iort_fwnode, DOMAIN_BUS_PLATFORM_MSI);
788  }
789  
acpi_configure_pmsi_domain(struct device * dev)790  void acpi_configure_pmsi_domain(struct device *dev)
791  {
792  	struct irq_domain *msi_domain;
793  
794  	msi_domain = iort_get_platform_device_domain(dev);
795  	if (msi_domain)
796  		dev_set_msi_domain(dev, msi_domain);
797  }
798  
799  #ifdef CONFIG_IOMMU_API
iort_rmr_free(struct device * dev,struct iommu_resv_region * region)800  static void iort_rmr_free(struct device *dev,
801  			  struct iommu_resv_region *region)
802  {
803  	struct iommu_iort_rmr_data *rmr_data;
804  
805  	rmr_data = container_of(region, struct iommu_iort_rmr_data, rr);
806  	kfree(rmr_data->sids);
807  	kfree(rmr_data);
808  }
809  
iort_rmr_alloc(struct acpi_iort_rmr_desc * rmr_desc,int prot,enum iommu_resv_type type,u32 * sids,u32 num_sids)810  static struct iommu_iort_rmr_data *iort_rmr_alloc(
811  					struct acpi_iort_rmr_desc *rmr_desc,
812  					int prot, enum iommu_resv_type type,
813  					u32 *sids, u32 num_sids)
814  {
815  	struct iommu_iort_rmr_data *rmr_data;
816  	struct iommu_resv_region *region;
817  	u32 *sids_copy;
818  	u64 addr = rmr_desc->base_address, size = rmr_desc->length;
819  
820  	rmr_data = kmalloc(sizeof(*rmr_data), GFP_KERNEL);
821  	if (!rmr_data)
822  		return NULL;
823  
824  	/* Create a copy of SIDs array to associate with this rmr_data */
825  	sids_copy = kmemdup(sids, num_sids * sizeof(*sids), GFP_KERNEL);
826  	if (!sids_copy) {
827  		kfree(rmr_data);
828  		return NULL;
829  	}
830  	rmr_data->sids = sids_copy;
831  	rmr_data->num_sids = num_sids;
832  
833  	if (!IS_ALIGNED(addr, SZ_64K) || !IS_ALIGNED(size, SZ_64K)) {
834  		/* PAGE align base addr and size */
835  		addr &= PAGE_MASK;
836  		size = PAGE_ALIGN(size + offset_in_page(rmr_desc->base_address));
837  
838  		pr_err(FW_BUG "RMR descriptor[0x%llx - 0x%llx] not aligned to 64K, continue with [0x%llx - 0x%llx]\n",
839  		       rmr_desc->base_address,
840  		       rmr_desc->base_address + rmr_desc->length - 1,
841  		       addr, addr + size - 1);
842  	}
843  
844  	region = &rmr_data->rr;
845  	INIT_LIST_HEAD(&region->list);
846  	region->start = addr;
847  	region->length = size;
848  	region->prot = prot;
849  	region->type = type;
850  	region->free = iort_rmr_free;
851  
852  	return rmr_data;
853  }
854  
iort_rmr_desc_check_overlap(struct acpi_iort_rmr_desc * desc,u32 count)855  static void iort_rmr_desc_check_overlap(struct acpi_iort_rmr_desc *desc,
856  					u32 count)
857  {
858  	int i, j;
859  
860  	for (i = 0; i < count; i++) {
861  		u64 end, start = desc[i].base_address, length = desc[i].length;
862  
863  		if (!length) {
864  			pr_err(FW_BUG "RMR descriptor[0x%llx] with zero length, continue anyway\n",
865  			       start);
866  			continue;
867  		}
868  
869  		end = start + length - 1;
870  
871  		/* Check for address overlap */
872  		for (j = i + 1; j < count; j++) {
873  			u64 e_start = desc[j].base_address;
874  			u64 e_end = e_start + desc[j].length - 1;
875  
876  			if (start <= e_end && end >= e_start)
877  				pr_err(FW_BUG "RMR descriptor[0x%llx - 0x%llx] overlaps, continue anyway\n",
878  				       start, end);
879  		}
880  	}
881  }
882  
883  /*
884   * Please note, we will keep the already allocated RMR reserve
885   * regions in case of a memory allocation failure.
886   */
iort_get_rmrs(struct acpi_iort_node * node,struct acpi_iort_node * smmu,u32 * sids,u32 num_sids,struct list_head * head)887  static void iort_get_rmrs(struct acpi_iort_node *node,
888  			  struct acpi_iort_node *smmu,
889  			  u32 *sids, u32 num_sids,
890  			  struct list_head *head)
891  {
892  	struct acpi_iort_rmr *rmr = (struct acpi_iort_rmr *)node->node_data;
893  	struct acpi_iort_rmr_desc *rmr_desc;
894  	int i;
895  
896  	rmr_desc = ACPI_ADD_PTR(struct acpi_iort_rmr_desc, node,
897  				rmr->rmr_offset);
898  
899  	iort_rmr_desc_check_overlap(rmr_desc, rmr->rmr_count);
900  
901  	for (i = 0; i < rmr->rmr_count; i++, rmr_desc++) {
902  		struct iommu_iort_rmr_data *rmr_data;
903  		enum iommu_resv_type type;
904  		int prot = IOMMU_READ | IOMMU_WRITE;
905  
906  		if (rmr->flags & ACPI_IORT_RMR_REMAP_PERMITTED)
907  			type = IOMMU_RESV_DIRECT_RELAXABLE;
908  		else
909  			type = IOMMU_RESV_DIRECT;
910  
911  		if (rmr->flags & ACPI_IORT_RMR_ACCESS_PRIVILEGE)
912  			prot |= IOMMU_PRIV;
913  
914  		/* Attributes 0x00 - 0x03 represents device memory */
915  		if (ACPI_IORT_RMR_ACCESS_ATTRIBUTES(rmr->flags) <=
916  				ACPI_IORT_RMR_ATTR_DEVICE_GRE)
917  			prot |= IOMMU_MMIO;
918  		else if (ACPI_IORT_RMR_ACCESS_ATTRIBUTES(rmr->flags) ==
919  				ACPI_IORT_RMR_ATTR_NORMAL_IWB_OWB)
920  			prot |= IOMMU_CACHE;
921  
922  		rmr_data = iort_rmr_alloc(rmr_desc, prot, type,
923  					  sids, num_sids);
924  		if (!rmr_data)
925  			return;
926  
927  		list_add_tail(&rmr_data->rr.list, head);
928  	}
929  }
930  
iort_rmr_alloc_sids(u32 * sids,u32 count,u32 id_start,u32 new_count)931  static u32 *iort_rmr_alloc_sids(u32 *sids, u32 count, u32 id_start,
932  				u32 new_count)
933  {
934  	u32 *new_sids;
935  	u32 total_count = count + new_count;
936  	int i;
937  
938  	new_sids = krealloc_array(sids, count + new_count,
939  				  sizeof(*new_sids), GFP_KERNEL);
940  	if (!new_sids)
941  		return NULL;
942  
943  	for (i = count; i < total_count; i++)
944  		new_sids[i] = id_start++;
945  
946  	return new_sids;
947  }
948  
iort_rmr_has_dev(struct device * dev,u32 id_start,u32 id_count)949  static bool iort_rmr_has_dev(struct device *dev, u32 id_start,
950  			     u32 id_count)
951  {
952  	int i;
953  	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
954  
955  	/*
956  	 * Make sure the kernel has preserved the boot firmware PCIe
957  	 * configuration. This is required to ensure that the RMR PCIe
958  	 * StreamIDs are still valid (Refer: ARM DEN 0049E.d Section 3.1.1.5).
959  	 */
960  	if (dev_is_pci(dev)) {
961  		struct pci_dev *pdev = to_pci_dev(dev);
962  		struct pci_host_bridge *host = pci_find_host_bridge(pdev->bus);
963  
964  		if (!host->preserve_config)
965  			return false;
966  	}
967  
968  	for (i = 0; i < fwspec->num_ids; i++) {
969  		if (fwspec->ids[i] >= id_start &&
970  		    fwspec->ids[i] <= id_start + id_count)
971  			return true;
972  	}
973  
974  	return false;
975  }
976  
iort_node_get_rmr_info(struct acpi_iort_node * node,struct acpi_iort_node * iommu,struct device * dev,struct list_head * head)977  static void iort_node_get_rmr_info(struct acpi_iort_node *node,
978  				   struct acpi_iort_node *iommu,
979  				   struct device *dev, struct list_head *head)
980  {
981  	struct acpi_iort_node *smmu = NULL;
982  	struct acpi_iort_rmr *rmr;
983  	struct acpi_iort_id_mapping *map;
984  	u32 *sids = NULL;
985  	u32 num_sids = 0;
986  	int i;
987  
988  	if (!node->mapping_offset || !node->mapping_count) {
989  		pr_err(FW_BUG "Invalid ID mapping, skipping RMR node %p\n",
990  		       node);
991  		return;
992  	}
993  
994  	rmr = (struct acpi_iort_rmr *)node->node_data;
995  	if (!rmr->rmr_offset || !rmr->rmr_count)
996  		return;
997  
998  	map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
999  			   node->mapping_offset);
1000  
1001  	/*
1002  	 * Go through the ID mappings and see if we have a match for SMMU
1003  	 * and dev(if !NULL). If found, get the sids for the Node.
1004  	 * Please note, id_count is equal to the number of IDs  in the
1005  	 * range minus one.
1006  	 */
1007  	for (i = 0; i < node->mapping_count; i++, map++) {
1008  		struct acpi_iort_node *parent;
1009  
1010  		parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
1011  				      map->output_reference);
1012  		if (parent != iommu)
1013  			continue;
1014  
1015  		/* If dev is valid, check RMR node corresponds to the dev SID */
1016  		if (dev && !iort_rmr_has_dev(dev, map->output_base,
1017  					     map->id_count))
1018  			continue;
1019  
1020  		/* Retrieve SIDs associated with the Node. */
1021  		sids = iort_rmr_alloc_sids(sids, num_sids, map->output_base,
1022  					   map->id_count + 1);
1023  		if (!sids)
1024  			return;
1025  
1026  		num_sids += map->id_count + 1;
1027  	}
1028  
1029  	if (!sids)
1030  		return;
1031  
1032  	iort_get_rmrs(node, smmu, sids, num_sids, head);
1033  	kfree(sids);
1034  }
1035  
iort_find_rmrs(struct acpi_iort_node * iommu,struct device * dev,struct list_head * head)1036  static void iort_find_rmrs(struct acpi_iort_node *iommu, struct device *dev,
1037  			   struct list_head *head)
1038  {
1039  	struct acpi_table_iort *iort;
1040  	struct acpi_iort_node *iort_node, *iort_end;
1041  	int i;
1042  
1043  	/* Only supports ARM DEN 0049E.d onwards */
1044  	if (iort_table->revision < 5)
1045  		return;
1046  
1047  	iort = (struct acpi_table_iort *)iort_table;
1048  
1049  	iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
1050  				 iort->node_offset);
1051  	iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort,
1052  				iort_table->length);
1053  
1054  	for (i = 0; i < iort->node_count; i++) {
1055  		if (WARN_TAINT(iort_node >= iort_end, TAINT_FIRMWARE_WORKAROUND,
1056  			       "IORT node pointer overflows, bad table!\n"))
1057  			return;
1058  
1059  		if (iort_node->type == ACPI_IORT_NODE_RMR)
1060  			iort_node_get_rmr_info(iort_node, iommu, dev, head);
1061  
1062  		iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
1063  					 iort_node->length);
1064  	}
1065  }
1066  
1067  /*
1068   * Populate the RMR list associated with a given IOMMU and dev(if provided).
1069   * If dev is NULL, the function populates all the RMRs associated with the
1070   * given IOMMU.
1071   */
iort_iommu_rmr_get_resv_regions(struct fwnode_handle * iommu_fwnode,struct device * dev,struct list_head * head)1072  static void iort_iommu_rmr_get_resv_regions(struct fwnode_handle *iommu_fwnode,
1073  					    struct device *dev,
1074  					    struct list_head *head)
1075  {
1076  	struct acpi_iort_node *iommu;
1077  
1078  	iommu = iort_get_iort_node(iommu_fwnode);
1079  	if (!iommu)
1080  		return;
1081  
1082  	iort_find_rmrs(iommu, dev, head);
1083  }
1084  
iort_get_msi_resv_iommu(struct device * dev)1085  static struct acpi_iort_node *iort_get_msi_resv_iommu(struct device *dev)
1086  {
1087  	struct acpi_iort_node *iommu;
1088  	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1089  
1090  	iommu = iort_get_iort_node(fwspec->iommu_fwnode);
1091  
1092  	if (iommu && (iommu->type == ACPI_IORT_NODE_SMMU_V3)) {
1093  		struct acpi_iort_smmu_v3 *smmu;
1094  
1095  		smmu = (struct acpi_iort_smmu_v3 *)iommu->node_data;
1096  		if (smmu->model == ACPI_IORT_SMMU_V3_HISILICON_HI161X)
1097  			return iommu;
1098  	}
1099  
1100  	return NULL;
1101  }
1102  
1103  /*
1104   * Retrieve platform specific HW MSI reserve regions.
1105   * The ITS interrupt translation spaces (ITS_base + SZ_64K, SZ_64K)
1106   * associated with the device are the HW MSI reserved regions.
1107   */
iort_iommu_msi_get_resv_regions(struct device * dev,struct list_head * head)1108  static void iort_iommu_msi_get_resv_regions(struct device *dev,
1109  					    struct list_head *head)
1110  {
1111  	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1112  	struct acpi_iort_its_group *its;
1113  	struct acpi_iort_node *iommu_node, *its_node = NULL;
1114  	int i;
1115  
1116  	iommu_node = iort_get_msi_resv_iommu(dev);
1117  	if (!iommu_node)
1118  		return;
1119  
1120  	/*
1121  	 * Current logic to reserve ITS regions relies on HW topologies
1122  	 * where a given PCI or named component maps its IDs to only one
1123  	 * ITS group; if a PCI or named component can map its IDs to
1124  	 * different ITS groups through IORT mappings this function has
1125  	 * to be reworked to ensure we reserve regions for all ITS groups
1126  	 * a given PCI or named component may map IDs to.
1127  	 */
1128  
1129  	for (i = 0; i < fwspec->num_ids; i++) {
1130  		its_node = iort_node_map_id(iommu_node,
1131  					fwspec->ids[i],
1132  					NULL, IORT_MSI_TYPE);
1133  		if (its_node)
1134  			break;
1135  	}
1136  
1137  	if (!its_node)
1138  		return;
1139  
1140  	/* Move to ITS specific data */
1141  	its = (struct acpi_iort_its_group *)its_node->node_data;
1142  
1143  	for (i = 0; i < its->its_count; i++) {
1144  		phys_addr_t base;
1145  
1146  		if (!iort_find_its_base(its->identifiers[i], &base)) {
1147  			int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1148  			struct iommu_resv_region *region;
1149  
1150  			region = iommu_alloc_resv_region(base + SZ_64K, SZ_64K,
1151  							 prot, IOMMU_RESV_MSI,
1152  							 GFP_KERNEL);
1153  			if (region)
1154  				list_add_tail(&region->list, head);
1155  		}
1156  	}
1157  }
1158  
1159  /**
1160   * iort_iommu_get_resv_regions - Generic helper to retrieve reserved regions.
1161   * @dev: Device from iommu_get_resv_regions()
1162   * @head: Reserved region list from iommu_get_resv_regions()
1163   */
iort_iommu_get_resv_regions(struct device * dev,struct list_head * head)1164  void iort_iommu_get_resv_regions(struct device *dev, struct list_head *head)
1165  {
1166  	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1167  
1168  	iort_iommu_msi_get_resv_regions(dev, head);
1169  	iort_iommu_rmr_get_resv_regions(fwspec->iommu_fwnode, dev, head);
1170  }
1171  
1172  /**
1173   * iort_get_rmr_sids - Retrieve IORT RMR node reserved regions with
1174   *                     associated StreamIDs information.
1175   * @iommu_fwnode: fwnode associated with IOMMU
1176   * @head: Resereved region list
1177   */
iort_get_rmr_sids(struct fwnode_handle * iommu_fwnode,struct list_head * head)1178  void iort_get_rmr_sids(struct fwnode_handle *iommu_fwnode,
1179  		       struct list_head *head)
1180  {
1181  	iort_iommu_rmr_get_resv_regions(iommu_fwnode, NULL, head);
1182  }
1183  EXPORT_SYMBOL_GPL(iort_get_rmr_sids);
1184  
1185  /**
1186   * iort_put_rmr_sids - Free memory allocated for RMR reserved regions.
1187   * @iommu_fwnode: fwnode associated with IOMMU
1188   * @head: Resereved region list
1189   */
iort_put_rmr_sids(struct fwnode_handle * iommu_fwnode,struct list_head * head)1190  void iort_put_rmr_sids(struct fwnode_handle *iommu_fwnode,
1191  		       struct list_head *head)
1192  {
1193  	struct iommu_resv_region *entry, *next;
1194  
1195  	list_for_each_entry_safe(entry, next, head, list)
1196  		entry->free(NULL, entry);
1197  }
1198  EXPORT_SYMBOL_GPL(iort_put_rmr_sids);
1199  
iort_iommu_driver_enabled(u8 type)1200  static inline bool iort_iommu_driver_enabled(u8 type)
1201  {
1202  	switch (type) {
1203  	case ACPI_IORT_NODE_SMMU_V3:
1204  		return IS_ENABLED(CONFIG_ARM_SMMU_V3);
1205  	case ACPI_IORT_NODE_SMMU:
1206  		return IS_ENABLED(CONFIG_ARM_SMMU);
1207  	default:
1208  		pr_warn("IORT node type %u does not describe an SMMU\n", type);
1209  		return false;
1210  	}
1211  }
1212  
iort_pci_rc_supports_ats(struct acpi_iort_node * node)1213  static bool iort_pci_rc_supports_ats(struct acpi_iort_node *node)
1214  {
1215  	struct acpi_iort_root_complex *pci_rc;
1216  
1217  	pci_rc = (struct acpi_iort_root_complex *)node->node_data;
1218  	return pci_rc->ats_attribute & ACPI_IORT_ATS_SUPPORTED;
1219  }
1220  
iort_iommu_xlate(struct device * dev,struct acpi_iort_node * node,u32 streamid)1221  static int iort_iommu_xlate(struct device *dev, struct acpi_iort_node *node,
1222  			    u32 streamid)
1223  {
1224  	const struct iommu_ops *ops;
1225  	struct fwnode_handle *iort_fwnode;
1226  
1227  	if (!node)
1228  		return -ENODEV;
1229  
1230  	iort_fwnode = iort_get_fwnode(node);
1231  	if (!iort_fwnode)
1232  		return -ENODEV;
1233  
1234  	/*
1235  	 * If the ops look-up fails, this means that either
1236  	 * the SMMU drivers have not been probed yet or that
1237  	 * the SMMU drivers are not built in the kernel;
1238  	 * Depending on whether the SMMU drivers are built-in
1239  	 * in the kernel or not, defer the IOMMU configuration
1240  	 * or just abort it.
1241  	 */
1242  	ops = iommu_ops_from_fwnode(iort_fwnode);
1243  	if (!ops)
1244  		return iort_iommu_driver_enabled(node->type) ?
1245  		       -EPROBE_DEFER : -ENODEV;
1246  
1247  	return acpi_iommu_fwspec_init(dev, streamid, iort_fwnode, ops);
1248  }
1249  
1250  struct iort_pci_alias_info {
1251  	struct device *dev;
1252  	struct acpi_iort_node *node;
1253  };
1254  
iort_pci_iommu_init(struct pci_dev * pdev,u16 alias,void * data)1255  static int iort_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
1256  {
1257  	struct iort_pci_alias_info *info = data;
1258  	struct acpi_iort_node *parent;
1259  	u32 streamid;
1260  
1261  	parent = iort_node_map_id(info->node, alias, &streamid,
1262  				  IORT_IOMMU_TYPE);
1263  	return iort_iommu_xlate(info->dev, parent, streamid);
1264  }
1265  
iort_named_component_init(struct device * dev,struct acpi_iort_node * node)1266  static void iort_named_component_init(struct device *dev,
1267  				      struct acpi_iort_node *node)
1268  {
1269  	struct property_entry props[3] = {};
1270  	struct acpi_iort_named_component *nc;
1271  
1272  	nc = (struct acpi_iort_named_component *)node->node_data;
1273  	props[0] = PROPERTY_ENTRY_U32("pasid-num-bits",
1274  				      FIELD_GET(ACPI_IORT_NC_PASID_BITS,
1275  						nc->node_flags));
1276  	if (nc->node_flags & ACPI_IORT_NC_STALL_SUPPORTED)
1277  		props[1] = PROPERTY_ENTRY_BOOL("dma-can-stall");
1278  
1279  	if (device_create_managed_software_node(dev, props, NULL))
1280  		dev_warn(dev, "Could not add device properties\n");
1281  }
1282  
iort_nc_iommu_map(struct device * dev,struct acpi_iort_node * node)1283  static int iort_nc_iommu_map(struct device *dev, struct acpi_iort_node *node)
1284  {
1285  	struct acpi_iort_node *parent;
1286  	int err = -ENODEV, i = 0;
1287  	u32 streamid = 0;
1288  
1289  	do {
1290  
1291  		parent = iort_node_map_platform_id(node, &streamid,
1292  						   IORT_IOMMU_TYPE,
1293  						   i++);
1294  
1295  		if (parent)
1296  			err = iort_iommu_xlate(dev, parent, streamid);
1297  	} while (parent && !err);
1298  
1299  	return err;
1300  }
1301  
iort_nc_iommu_map_id(struct device * dev,struct acpi_iort_node * node,const u32 * in_id)1302  static int iort_nc_iommu_map_id(struct device *dev,
1303  				struct acpi_iort_node *node,
1304  				const u32 *in_id)
1305  {
1306  	struct acpi_iort_node *parent;
1307  	u32 streamid;
1308  
1309  	parent = iort_node_map_id(node, *in_id, &streamid, IORT_IOMMU_TYPE);
1310  	if (parent)
1311  		return iort_iommu_xlate(dev, parent, streamid);
1312  
1313  	return -ENODEV;
1314  }
1315  
1316  
1317  /**
1318   * iort_iommu_configure_id - Set-up IOMMU configuration for a device.
1319   *
1320   * @dev: device to configure
1321   * @id_in: optional input id const value pointer
1322   *
1323   * Returns: 0 on success, <0 on failure
1324   */
iort_iommu_configure_id(struct device * dev,const u32 * id_in)1325  int iort_iommu_configure_id(struct device *dev, const u32 *id_in)
1326  {
1327  	struct acpi_iort_node *node;
1328  	int err = -ENODEV;
1329  
1330  	if (dev_is_pci(dev)) {
1331  		struct iommu_fwspec *fwspec;
1332  		struct pci_bus *bus = to_pci_dev(dev)->bus;
1333  		struct iort_pci_alias_info info = { .dev = dev };
1334  
1335  		node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
1336  				      iort_match_node_callback, &bus->dev);
1337  		if (!node)
1338  			return -ENODEV;
1339  
1340  		info.node = node;
1341  		err = pci_for_each_dma_alias(to_pci_dev(dev),
1342  					     iort_pci_iommu_init, &info);
1343  
1344  		fwspec = dev_iommu_fwspec_get(dev);
1345  		if (fwspec && iort_pci_rc_supports_ats(node))
1346  			fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS;
1347  	} else {
1348  		node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
1349  				      iort_match_node_callback, dev);
1350  		if (!node)
1351  			return -ENODEV;
1352  
1353  		err = id_in ? iort_nc_iommu_map_id(dev, node, id_in) :
1354  			      iort_nc_iommu_map(dev, node);
1355  
1356  		if (!err)
1357  			iort_named_component_init(dev, node);
1358  	}
1359  
1360  	return err;
1361  }
1362  
1363  #else
iort_iommu_get_resv_regions(struct device * dev,struct list_head * head)1364  void iort_iommu_get_resv_regions(struct device *dev, struct list_head *head)
1365  { }
iort_iommu_configure_id(struct device * dev,const u32 * input_id)1366  int iort_iommu_configure_id(struct device *dev, const u32 *input_id)
1367  { return -ENODEV; }
1368  #endif
1369  
nc_dma_get_range(struct device * dev,u64 * size)1370  static int nc_dma_get_range(struct device *dev, u64 *size)
1371  {
1372  	struct acpi_iort_node *node;
1373  	struct acpi_iort_named_component *ncomp;
1374  
1375  	node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
1376  			      iort_match_node_callback, dev);
1377  	if (!node)
1378  		return -ENODEV;
1379  
1380  	ncomp = (struct acpi_iort_named_component *)node->node_data;
1381  
1382  	if (!ncomp->memory_address_limit) {
1383  		pr_warn(FW_BUG "Named component missing memory address limit\n");
1384  		return -EINVAL;
1385  	}
1386  
1387  	*size = ncomp->memory_address_limit >= 64 ? U64_MAX :
1388  			1ULL<<ncomp->memory_address_limit;
1389  
1390  	return 0;
1391  }
1392  
rc_dma_get_range(struct device * dev,u64 * size)1393  static int rc_dma_get_range(struct device *dev, u64 *size)
1394  {
1395  	struct acpi_iort_node *node;
1396  	struct acpi_iort_root_complex *rc;
1397  	struct pci_bus *pbus = to_pci_dev(dev)->bus;
1398  
1399  	node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
1400  			      iort_match_node_callback, &pbus->dev);
1401  	if (!node || node->revision < 1)
1402  		return -ENODEV;
1403  
1404  	rc = (struct acpi_iort_root_complex *)node->node_data;
1405  
1406  	if (!rc->memory_address_limit) {
1407  		pr_warn(FW_BUG "Root complex missing memory address limit\n");
1408  		return -EINVAL;
1409  	}
1410  
1411  	*size = rc->memory_address_limit >= 64 ? U64_MAX :
1412  			1ULL<<rc->memory_address_limit;
1413  
1414  	return 0;
1415  }
1416  
1417  /**
1418   * iort_dma_get_ranges() - Look up DMA addressing limit for the device
1419   * @dev: device to lookup
1420   * @size: DMA range size result pointer
1421   *
1422   * Return: 0 on success, an error otherwise.
1423   */
iort_dma_get_ranges(struct device * dev,u64 * size)1424  int iort_dma_get_ranges(struct device *dev, u64 *size)
1425  {
1426  	if (dev_is_pci(dev))
1427  		return rc_dma_get_range(dev, size);
1428  	else
1429  		return nc_dma_get_range(dev, size);
1430  }
1431  
acpi_iort_register_irq(int hwirq,const char * name,int trigger,struct resource * res)1432  static void __init acpi_iort_register_irq(int hwirq, const char *name,
1433  					  int trigger,
1434  					  struct resource *res)
1435  {
1436  	int irq = acpi_register_gsi(NULL, hwirq, trigger,
1437  				    ACPI_ACTIVE_HIGH);
1438  
1439  	if (irq <= 0) {
1440  		pr_err("could not register gsi hwirq %d name [%s]\n", hwirq,
1441  								      name);
1442  		return;
1443  	}
1444  
1445  	res->start = irq;
1446  	res->end = irq;
1447  	res->flags = IORESOURCE_IRQ;
1448  	res->name = name;
1449  }
1450  
arm_smmu_v3_count_resources(struct acpi_iort_node * node)1451  static int __init arm_smmu_v3_count_resources(struct acpi_iort_node *node)
1452  {
1453  	struct acpi_iort_smmu_v3 *smmu;
1454  	/* Always present mem resource */
1455  	int num_res = 1;
1456  
1457  	/* Retrieve SMMUv3 specific data */
1458  	smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
1459  
1460  	if (smmu->event_gsiv)
1461  		num_res++;
1462  
1463  	if (smmu->pri_gsiv)
1464  		num_res++;
1465  
1466  	if (smmu->gerr_gsiv)
1467  		num_res++;
1468  
1469  	if (smmu->sync_gsiv)
1470  		num_res++;
1471  
1472  	return num_res;
1473  }
1474  
arm_smmu_v3_is_combined_irq(struct acpi_iort_smmu_v3 * smmu)1475  static bool arm_smmu_v3_is_combined_irq(struct acpi_iort_smmu_v3 *smmu)
1476  {
1477  	/*
1478  	 * Cavium ThunderX2 implementation doesn't not support unique
1479  	 * irq line. Use single irq line for all the SMMUv3 interrupts.
1480  	 */
1481  	if (smmu->model != ACPI_IORT_SMMU_V3_CAVIUM_CN99XX)
1482  		return false;
1483  
1484  	/*
1485  	 * ThunderX2 doesn't support MSIs from the SMMU, so we're checking
1486  	 * SPI numbers here.
1487  	 */
1488  	return smmu->event_gsiv == smmu->pri_gsiv &&
1489  	       smmu->event_gsiv == smmu->gerr_gsiv &&
1490  	       smmu->event_gsiv == smmu->sync_gsiv;
1491  }
1492  
arm_smmu_v3_resource_size(struct acpi_iort_smmu_v3 * smmu)1493  static unsigned long arm_smmu_v3_resource_size(struct acpi_iort_smmu_v3 *smmu)
1494  {
1495  	/*
1496  	 * Override the size, for Cavium ThunderX2 implementation
1497  	 * which doesn't support the page 1 SMMU register space.
1498  	 */
1499  	if (smmu->model == ACPI_IORT_SMMU_V3_CAVIUM_CN99XX)
1500  		return SZ_64K;
1501  
1502  	return SZ_128K;
1503  }
1504  
arm_smmu_v3_init_resources(struct resource * res,struct acpi_iort_node * node)1505  static void __init arm_smmu_v3_init_resources(struct resource *res,
1506  					      struct acpi_iort_node *node)
1507  {
1508  	struct acpi_iort_smmu_v3 *smmu;
1509  	int num_res = 0;
1510  
1511  	/* Retrieve SMMUv3 specific data */
1512  	smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
1513  
1514  	res[num_res].start = smmu->base_address;
1515  	res[num_res].end = smmu->base_address +
1516  				arm_smmu_v3_resource_size(smmu) - 1;
1517  	res[num_res].flags = IORESOURCE_MEM;
1518  
1519  	num_res++;
1520  	if (arm_smmu_v3_is_combined_irq(smmu)) {
1521  		if (smmu->event_gsiv)
1522  			acpi_iort_register_irq(smmu->event_gsiv, "combined",
1523  					       ACPI_EDGE_SENSITIVE,
1524  					       &res[num_res++]);
1525  	} else {
1526  
1527  		if (smmu->event_gsiv)
1528  			acpi_iort_register_irq(smmu->event_gsiv, "eventq",
1529  					       ACPI_EDGE_SENSITIVE,
1530  					       &res[num_res++]);
1531  
1532  		if (smmu->pri_gsiv)
1533  			acpi_iort_register_irq(smmu->pri_gsiv, "priq",
1534  					       ACPI_EDGE_SENSITIVE,
1535  					       &res[num_res++]);
1536  
1537  		if (smmu->gerr_gsiv)
1538  			acpi_iort_register_irq(smmu->gerr_gsiv, "gerror",
1539  					       ACPI_EDGE_SENSITIVE,
1540  					       &res[num_res++]);
1541  
1542  		if (smmu->sync_gsiv)
1543  			acpi_iort_register_irq(smmu->sync_gsiv, "cmdq-sync",
1544  					       ACPI_EDGE_SENSITIVE,
1545  					       &res[num_res++]);
1546  	}
1547  }
1548  
arm_smmu_v3_dma_configure(struct device * dev,struct acpi_iort_node * node)1549  static void __init arm_smmu_v3_dma_configure(struct device *dev,
1550  					     struct acpi_iort_node *node)
1551  {
1552  	struct acpi_iort_smmu_v3 *smmu;
1553  	enum dev_dma_attr attr;
1554  
1555  	/* Retrieve SMMUv3 specific data */
1556  	smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
1557  
1558  	attr = (smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE) ?
1559  			DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT;
1560  
1561  	/* We expect the dma masks to be equivalent for all SMMUv3 set-ups */
1562  	dev->dma_mask = &dev->coherent_dma_mask;
1563  
1564  	/* Configure DMA for the page table walker */
1565  	acpi_dma_configure(dev, attr);
1566  }
1567  
1568  #if defined(CONFIG_ACPI_NUMA)
1569  /*
1570   * set numa proximity domain for smmuv3 device
1571   */
arm_smmu_v3_set_proximity(struct device * dev,struct acpi_iort_node * node)1572  static int  __init arm_smmu_v3_set_proximity(struct device *dev,
1573  					      struct acpi_iort_node *node)
1574  {
1575  	struct acpi_iort_smmu_v3 *smmu;
1576  
1577  	smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
1578  	if (smmu->flags & ACPI_IORT_SMMU_V3_PXM_VALID) {
1579  		int dev_node = pxm_to_node(smmu->pxm);
1580  
1581  		if (dev_node != NUMA_NO_NODE && !node_online(dev_node))
1582  			return -EINVAL;
1583  
1584  		set_dev_node(dev, dev_node);
1585  		pr_info("SMMU-v3[%llx] Mapped to Proximity domain %d\n",
1586  			smmu->base_address,
1587  			smmu->pxm);
1588  	}
1589  	return 0;
1590  }
1591  #else
1592  #define arm_smmu_v3_set_proximity NULL
1593  #endif
1594  
arm_smmu_count_resources(struct acpi_iort_node * node)1595  static int __init arm_smmu_count_resources(struct acpi_iort_node *node)
1596  {
1597  	struct acpi_iort_smmu *smmu;
1598  
1599  	/* Retrieve SMMU specific data */
1600  	smmu = (struct acpi_iort_smmu *)node->node_data;
1601  
1602  	/*
1603  	 * Only consider the global fault interrupt and ignore the
1604  	 * configuration access interrupt.
1605  	 *
1606  	 * MMIO address and global fault interrupt resources are always
1607  	 * present so add them to the context interrupt count as a static
1608  	 * value.
1609  	 */
1610  	return smmu->context_interrupt_count + 2;
1611  }
1612  
arm_smmu_init_resources(struct resource * res,struct acpi_iort_node * node)1613  static void __init arm_smmu_init_resources(struct resource *res,
1614  					   struct acpi_iort_node *node)
1615  {
1616  	struct acpi_iort_smmu *smmu;
1617  	int i, hw_irq, trigger, num_res = 0;
1618  	u64 *ctx_irq, *glb_irq;
1619  
1620  	/* Retrieve SMMU specific data */
1621  	smmu = (struct acpi_iort_smmu *)node->node_data;
1622  
1623  	res[num_res].start = smmu->base_address;
1624  	res[num_res].end = smmu->base_address + smmu->span - 1;
1625  	res[num_res].flags = IORESOURCE_MEM;
1626  	num_res++;
1627  
1628  	glb_irq = ACPI_ADD_PTR(u64, node, smmu->global_interrupt_offset);
1629  	/* Global IRQs */
1630  	hw_irq = IORT_IRQ_MASK(glb_irq[0]);
1631  	trigger = IORT_IRQ_TRIGGER_MASK(glb_irq[0]);
1632  
1633  	acpi_iort_register_irq(hw_irq, "arm-smmu-global", trigger,
1634  				     &res[num_res++]);
1635  
1636  	/* Context IRQs */
1637  	ctx_irq = ACPI_ADD_PTR(u64, node, smmu->context_interrupt_offset);
1638  	for (i = 0; i < smmu->context_interrupt_count; i++) {
1639  		hw_irq = IORT_IRQ_MASK(ctx_irq[i]);
1640  		trigger = IORT_IRQ_TRIGGER_MASK(ctx_irq[i]);
1641  
1642  		acpi_iort_register_irq(hw_irq, "arm-smmu-context", trigger,
1643  				       &res[num_res++]);
1644  	}
1645  }
1646  
arm_smmu_dma_configure(struct device * dev,struct acpi_iort_node * node)1647  static void __init arm_smmu_dma_configure(struct device *dev,
1648  					  struct acpi_iort_node *node)
1649  {
1650  	struct acpi_iort_smmu *smmu;
1651  	enum dev_dma_attr attr;
1652  
1653  	/* Retrieve SMMU specific data */
1654  	smmu = (struct acpi_iort_smmu *)node->node_data;
1655  
1656  	attr = (smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK) ?
1657  			DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT;
1658  
1659  	/* We expect the dma masks to be equivalent for SMMU set-ups */
1660  	dev->dma_mask = &dev->coherent_dma_mask;
1661  
1662  	/* Configure DMA for the page table walker */
1663  	acpi_dma_configure(dev, attr);
1664  }
1665  
arm_smmu_v3_pmcg_count_resources(struct acpi_iort_node * node)1666  static int __init arm_smmu_v3_pmcg_count_resources(struct acpi_iort_node *node)
1667  {
1668  	struct acpi_iort_pmcg *pmcg;
1669  
1670  	/* Retrieve PMCG specific data */
1671  	pmcg = (struct acpi_iort_pmcg *)node->node_data;
1672  
1673  	/*
1674  	 * There are always 2 memory resources.
1675  	 * If the overflow_gsiv is present then add that for a total of 3.
1676  	 */
1677  	return pmcg->overflow_gsiv ? 3 : 2;
1678  }
1679  
arm_smmu_v3_pmcg_init_resources(struct resource * res,struct acpi_iort_node * node)1680  static void __init arm_smmu_v3_pmcg_init_resources(struct resource *res,
1681  						   struct acpi_iort_node *node)
1682  {
1683  	struct acpi_iort_pmcg *pmcg;
1684  
1685  	/* Retrieve PMCG specific data */
1686  	pmcg = (struct acpi_iort_pmcg *)node->node_data;
1687  
1688  	res[0].start = pmcg->page0_base_address;
1689  	res[0].end = pmcg->page0_base_address + SZ_4K - 1;
1690  	res[0].flags = IORESOURCE_MEM;
1691  	/*
1692  	 * The initial version in DEN0049C lacked a way to describe register
1693  	 * page 1, which makes it broken for most PMCG implementations; in
1694  	 * that case, just let the driver fail gracefully if it expects to
1695  	 * find a second memory resource.
1696  	 */
1697  	if (node->revision > 0) {
1698  		res[1].start = pmcg->page1_base_address;
1699  		res[1].end = pmcg->page1_base_address + SZ_4K - 1;
1700  		res[1].flags = IORESOURCE_MEM;
1701  	}
1702  
1703  	if (pmcg->overflow_gsiv)
1704  		acpi_iort_register_irq(pmcg->overflow_gsiv, "overflow",
1705  				       ACPI_EDGE_SENSITIVE, &res[2]);
1706  }
1707  
1708  static struct acpi_platform_list pmcg_plat_info[] __initdata = {
1709  	/* HiSilicon Hip08 Platform */
1710  	{"HISI  ", "HIP08   ", 0, ACPI_SIG_IORT, greater_than_or_equal,
1711  	 "Erratum #162001800, Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP08},
1712  	/* HiSilicon Hip09 Platform */
1713  	{"HISI  ", "HIP09   ", 0, ACPI_SIG_IORT, greater_than_or_equal,
1714  	 "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
1715  	{"HISI  ", "HIP09A  ", 0, ACPI_SIG_IORT, greater_than_or_equal,
1716  	 "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
1717  	/* HiSilicon Hip10/11 Platform uses the same SMMU IP with Hip09 */
1718  	{"HISI  ", "HIP10   ", 0, ACPI_SIG_IORT, greater_than_or_equal,
1719  	 "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
1720  	{"HISI  ", "HIP10C  ", 0, ACPI_SIG_IORT, greater_than_or_equal,
1721  	 "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
1722  	{"HISI  ", "HIP11   ", 0, ACPI_SIG_IORT, greater_than_or_equal,
1723  	 "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
1724  	{ }
1725  };
1726  
arm_smmu_v3_pmcg_add_platdata(struct platform_device * pdev)1727  static int __init arm_smmu_v3_pmcg_add_platdata(struct platform_device *pdev)
1728  {
1729  	u32 model;
1730  	int idx;
1731  
1732  	idx = acpi_match_platform_list(pmcg_plat_info);
1733  	if (idx >= 0)
1734  		model = pmcg_plat_info[idx].data;
1735  	else
1736  		model = IORT_SMMU_V3_PMCG_GENERIC;
1737  
1738  	return platform_device_add_data(pdev, &model, sizeof(model));
1739  }
1740  
1741  struct iort_dev_config {
1742  	const char *name;
1743  	int (*dev_init)(struct acpi_iort_node *node);
1744  	void (*dev_dma_configure)(struct device *dev,
1745  				  struct acpi_iort_node *node);
1746  	int (*dev_count_resources)(struct acpi_iort_node *node);
1747  	void (*dev_init_resources)(struct resource *res,
1748  				     struct acpi_iort_node *node);
1749  	int (*dev_set_proximity)(struct device *dev,
1750  				    struct acpi_iort_node *node);
1751  	int (*dev_add_platdata)(struct platform_device *pdev);
1752  };
1753  
1754  static const struct iort_dev_config iort_arm_smmu_v3_cfg __initconst = {
1755  	.name = "arm-smmu-v3",
1756  	.dev_dma_configure = arm_smmu_v3_dma_configure,
1757  	.dev_count_resources = arm_smmu_v3_count_resources,
1758  	.dev_init_resources = arm_smmu_v3_init_resources,
1759  	.dev_set_proximity = arm_smmu_v3_set_proximity,
1760  };
1761  
1762  static const struct iort_dev_config iort_arm_smmu_cfg __initconst = {
1763  	.name = "arm-smmu",
1764  	.dev_dma_configure = arm_smmu_dma_configure,
1765  	.dev_count_resources = arm_smmu_count_resources,
1766  	.dev_init_resources = arm_smmu_init_resources,
1767  };
1768  
1769  static const struct iort_dev_config iort_arm_smmu_v3_pmcg_cfg __initconst = {
1770  	.name = "arm-smmu-v3-pmcg",
1771  	.dev_count_resources = arm_smmu_v3_pmcg_count_resources,
1772  	.dev_init_resources = arm_smmu_v3_pmcg_init_resources,
1773  	.dev_add_platdata = arm_smmu_v3_pmcg_add_platdata,
1774  };
1775  
iort_get_dev_cfg(struct acpi_iort_node * node)1776  static __init const struct iort_dev_config *iort_get_dev_cfg(
1777  			struct acpi_iort_node *node)
1778  {
1779  	switch (node->type) {
1780  	case ACPI_IORT_NODE_SMMU_V3:
1781  		return &iort_arm_smmu_v3_cfg;
1782  	case ACPI_IORT_NODE_SMMU:
1783  		return &iort_arm_smmu_cfg;
1784  	case ACPI_IORT_NODE_PMCG:
1785  		return &iort_arm_smmu_v3_pmcg_cfg;
1786  	default:
1787  		return NULL;
1788  	}
1789  }
1790  
1791  /**
1792   * iort_add_platform_device() - Allocate a platform device for IORT node
1793   * @node: Pointer to device ACPI IORT node
1794   * @ops: Pointer to IORT device config struct
1795   *
1796   * Returns: 0 on success, <0 failure
1797   */
iort_add_platform_device(struct acpi_iort_node * node,const struct iort_dev_config * ops)1798  static int __init iort_add_platform_device(struct acpi_iort_node *node,
1799  					   const struct iort_dev_config *ops)
1800  {
1801  	struct fwnode_handle *fwnode;
1802  	struct platform_device *pdev;
1803  	struct resource *r;
1804  	int ret, count;
1805  
1806  	pdev = platform_device_alloc(ops->name, PLATFORM_DEVID_AUTO);
1807  	if (!pdev)
1808  		return -ENOMEM;
1809  
1810  	if (ops->dev_set_proximity) {
1811  		ret = ops->dev_set_proximity(&pdev->dev, node);
1812  		if (ret)
1813  			goto dev_put;
1814  	}
1815  
1816  	count = ops->dev_count_resources(node);
1817  
1818  	r = kcalloc(count, sizeof(*r), GFP_KERNEL);
1819  	if (!r) {
1820  		ret = -ENOMEM;
1821  		goto dev_put;
1822  	}
1823  
1824  	ops->dev_init_resources(r, node);
1825  
1826  	ret = platform_device_add_resources(pdev, r, count);
1827  	/*
1828  	 * Resources are duplicated in platform_device_add_resources,
1829  	 * free their allocated memory
1830  	 */
1831  	kfree(r);
1832  
1833  	if (ret)
1834  		goto dev_put;
1835  
1836  	/*
1837  	 * Platform devices based on PMCG nodes uses platform_data to
1838  	 * pass the hardware model info to the driver. For others, add
1839  	 * a copy of IORT node pointer to platform_data to be used to
1840  	 * retrieve IORT data information.
1841  	 */
1842  	if (ops->dev_add_platdata)
1843  		ret = ops->dev_add_platdata(pdev);
1844  	else
1845  		ret = platform_device_add_data(pdev, &node, sizeof(node));
1846  
1847  	if (ret)
1848  		goto dev_put;
1849  
1850  	fwnode = iort_get_fwnode(node);
1851  
1852  	if (!fwnode) {
1853  		ret = -ENODEV;
1854  		goto dev_put;
1855  	}
1856  
1857  	pdev->dev.fwnode = fwnode;
1858  
1859  	if (ops->dev_dma_configure)
1860  		ops->dev_dma_configure(&pdev->dev, node);
1861  
1862  	iort_set_device_domain(&pdev->dev, node);
1863  
1864  	ret = platform_device_add(pdev);
1865  	if (ret)
1866  		goto dma_deconfigure;
1867  
1868  	return 0;
1869  
1870  dma_deconfigure:
1871  	arch_teardown_dma_ops(&pdev->dev);
1872  dev_put:
1873  	platform_device_put(pdev);
1874  
1875  	return ret;
1876  }
1877  
1878  #ifdef CONFIG_PCI
iort_enable_acs(struct acpi_iort_node * iort_node)1879  static void __init iort_enable_acs(struct acpi_iort_node *iort_node)
1880  {
1881  	static bool acs_enabled __initdata;
1882  
1883  	if (acs_enabled)
1884  		return;
1885  
1886  	if (iort_node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
1887  		struct acpi_iort_node *parent;
1888  		struct acpi_iort_id_mapping *map;
1889  		int i;
1890  
1891  		map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, iort_node,
1892  				   iort_node->mapping_offset);
1893  
1894  		for (i = 0; i < iort_node->mapping_count; i++, map++) {
1895  			if (!map->output_reference)
1896  				continue;
1897  
1898  			parent = ACPI_ADD_PTR(struct acpi_iort_node,
1899  					iort_table,  map->output_reference);
1900  			/*
1901  			 * If we detect a RC->SMMU mapping, make sure
1902  			 * we enable ACS on the system.
1903  			 */
1904  			if ((parent->type == ACPI_IORT_NODE_SMMU) ||
1905  				(parent->type == ACPI_IORT_NODE_SMMU_V3)) {
1906  				pci_request_acs();
1907  				acs_enabled = true;
1908  				return;
1909  			}
1910  		}
1911  	}
1912  }
1913  #else
iort_enable_acs(struct acpi_iort_node * iort_node)1914  static inline void iort_enable_acs(struct acpi_iort_node *iort_node) { }
1915  #endif
1916  
iort_init_platform_devices(void)1917  static void __init iort_init_platform_devices(void)
1918  {
1919  	struct acpi_iort_node *iort_node, *iort_end;
1920  	struct acpi_table_iort *iort;
1921  	struct fwnode_handle *fwnode;
1922  	int i, ret;
1923  	const struct iort_dev_config *ops;
1924  
1925  	/*
1926  	 * iort_table and iort both point to the start of IORT table, but
1927  	 * have different struct types
1928  	 */
1929  	iort = (struct acpi_table_iort *)iort_table;
1930  
1931  	/* Get the first IORT node */
1932  	iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
1933  				 iort->node_offset);
1934  	iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort,
1935  				iort_table->length);
1936  
1937  	for (i = 0; i < iort->node_count; i++) {
1938  		if (iort_node >= iort_end) {
1939  			pr_err("iort node pointer overflows, bad table\n");
1940  			return;
1941  		}
1942  
1943  		iort_enable_acs(iort_node);
1944  
1945  		ops = iort_get_dev_cfg(iort_node);
1946  		if (ops) {
1947  			fwnode = acpi_alloc_fwnode_static();
1948  			if (!fwnode)
1949  				return;
1950  
1951  			iort_set_fwnode(iort_node, fwnode);
1952  
1953  			ret = iort_add_platform_device(iort_node, ops);
1954  			if (ret) {
1955  				iort_delete_fwnode(iort_node);
1956  				acpi_free_fwnode_static(fwnode);
1957  				return;
1958  			}
1959  		}
1960  
1961  		iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
1962  					 iort_node->length);
1963  	}
1964  }
1965  
acpi_iort_init(void)1966  void __init acpi_iort_init(void)
1967  {
1968  	acpi_status status;
1969  
1970  	/* iort_table will be used at runtime after the iort init,
1971  	 * so we don't need to call acpi_put_table() to release
1972  	 * the IORT table mapping.
1973  	 */
1974  	status = acpi_get_table(ACPI_SIG_IORT, 0, &iort_table);
1975  	if (ACPI_FAILURE(status)) {
1976  		if (status != AE_NOT_FOUND) {
1977  			const char *msg = acpi_format_exception(status);
1978  
1979  			pr_err("Failed to get table, %s\n", msg);
1980  		}
1981  
1982  		return;
1983  	}
1984  
1985  	iort_init_platform_devices();
1986  }
1987  
1988  #ifdef CONFIG_ZONE_DMA
1989  /*
1990   * Extract the highest CPU physical address accessible to all DMA masters in
1991   * the system. PHYS_ADDR_MAX is returned when no constrained device is found.
1992   */
acpi_iort_dma_get_max_cpu_address(void)1993  phys_addr_t __init acpi_iort_dma_get_max_cpu_address(void)
1994  {
1995  	phys_addr_t limit = PHYS_ADDR_MAX;
1996  	struct acpi_iort_node *node, *end;
1997  	struct acpi_table_iort *iort;
1998  	acpi_status status;
1999  	int i;
2000  
2001  	if (acpi_disabled)
2002  		return limit;
2003  
2004  	status = acpi_get_table(ACPI_SIG_IORT, 0,
2005  				(struct acpi_table_header **)&iort);
2006  	if (ACPI_FAILURE(status))
2007  		return limit;
2008  
2009  	node = ACPI_ADD_PTR(struct acpi_iort_node, iort, iort->node_offset);
2010  	end = ACPI_ADD_PTR(struct acpi_iort_node, iort, iort->header.length);
2011  
2012  	for (i = 0; i < iort->node_count; i++) {
2013  		if (node >= end)
2014  			break;
2015  
2016  		switch (node->type) {
2017  			struct acpi_iort_named_component *ncomp;
2018  			struct acpi_iort_root_complex *rc;
2019  			phys_addr_t local_limit;
2020  
2021  		case ACPI_IORT_NODE_NAMED_COMPONENT:
2022  			ncomp = (struct acpi_iort_named_component *)node->node_data;
2023  			local_limit = DMA_BIT_MASK(ncomp->memory_address_limit);
2024  			limit = min_not_zero(limit, local_limit);
2025  			break;
2026  
2027  		case ACPI_IORT_NODE_PCI_ROOT_COMPLEX:
2028  			if (node->revision < 1)
2029  				break;
2030  
2031  			rc = (struct acpi_iort_root_complex *)node->node_data;
2032  			local_limit = DMA_BIT_MASK(rc->memory_address_limit);
2033  			limit = min_not_zero(limit, local_limit);
2034  			break;
2035  		}
2036  		node = ACPI_ADD_PTR(struct acpi_iort_node, node, node->length);
2037  	}
2038  	acpi_put_table(&iort->header);
2039  	return limit;
2040  }
2041  #endif
2042