xref: /openbmc/linux/include/linux/iommu.h (revision 0cd08b10)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
4  * Author: Joerg Roedel <joerg.roedel@amd.com>
5  */
6 
7 #ifndef __LINUX_IOMMU_H
8 #define __LINUX_IOMMU_H
9 
10 #include <linux/scatterlist.h>
11 #include <linux/device.h>
12 #include <linux/types.h>
13 #include <linux/errno.h>
14 #include <linux/err.h>
15 #include <linux/of.h>
16 #include <linux/ioasid.h>
17 #include <uapi/linux/iommu.h>
18 
19 #define IOMMU_READ	(1 << 0)
20 #define IOMMU_WRITE	(1 << 1)
21 #define IOMMU_CACHE	(1 << 2) /* DMA cache coherency */
22 #define IOMMU_NOEXEC	(1 << 3)
23 #define IOMMU_MMIO	(1 << 4) /* e.g. things like MSI doorbells */
24 /*
25  * Where the bus hardware includes a privilege level as part of its access type
26  * markings, and certain devices are capable of issuing transactions marked as
27  * either 'supervisor' or 'user', the IOMMU_PRIV flag requests that the other
28  * given permission flags only apply to accesses at the higher privilege level,
29  * and that unprivileged transactions should have as little access as possible.
30  * This would usually imply the same permissions as kernel mappings on the CPU,
31  * if the IOMMU page table format is equivalent.
32  */
33 #define IOMMU_PRIV	(1 << 5)
34 /*
35  * Non-coherent masters can use this page protection flag to set cacheable
36  * memory attributes for only a transparent outer level of cache, also known as
37  * the last-level or system cache.
38  */
39 #define IOMMU_SYS_CACHE_ONLY	(1 << 6)
40 
41 struct iommu_ops;
42 struct iommu_group;
43 struct bus_type;
44 struct device;
45 struct iommu_domain;
46 struct notifier_block;
47 struct iommu_sva;
48 struct iommu_fault_event;
49 
50 /* iommu fault flags */
51 #define IOMMU_FAULT_READ	0x0
52 #define IOMMU_FAULT_WRITE	0x1
53 
54 typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
55 			struct device *, unsigned long, int, void *);
56 typedef int (*iommu_dev_fault_handler_t)(struct iommu_fault *, void *);
57 
58 struct iommu_domain_geometry {
59 	dma_addr_t aperture_start; /* First address that can be mapped    */
60 	dma_addr_t aperture_end;   /* Last address that can be mapped     */
61 	bool force_aperture;       /* DMA only allowed in mappable range? */
62 };
63 
64 /* Domain feature flags */
65 #define __IOMMU_DOMAIN_PAGING	(1U << 0)  /* Support for iommu_map/unmap */
66 #define __IOMMU_DOMAIN_DMA_API	(1U << 1)  /* Domain for use in DMA-API
67 					      implementation              */
68 #define __IOMMU_DOMAIN_PT	(1U << 2)  /* Domain is identity mapped   */
69 
70 /*
71  * This are the possible domain-types
72  *
73  *	IOMMU_DOMAIN_BLOCKED	- All DMA is blocked, can be used to isolate
74  *				  devices
75  *	IOMMU_DOMAIN_IDENTITY	- DMA addresses are system physical addresses
76  *	IOMMU_DOMAIN_UNMANAGED	- DMA mappings managed by IOMMU-API user, used
77  *				  for VMs
78  *	IOMMU_DOMAIN_DMA	- Internally used for DMA-API implementations.
79  *				  This flag allows IOMMU drivers to implement
80  *				  certain optimizations for these domains
81  */
82 #define IOMMU_DOMAIN_BLOCKED	(0U)
83 #define IOMMU_DOMAIN_IDENTITY	(__IOMMU_DOMAIN_PT)
84 #define IOMMU_DOMAIN_UNMANAGED	(__IOMMU_DOMAIN_PAGING)
85 #define IOMMU_DOMAIN_DMA	(__IOMMU_DOMAIN_PAGING |	\
86 				 __IOMMU_DOMAIN_DMA_API)
87 
88 struct iommu_domain {
89 	unsigned type;
90 	const struct iommu_ops *ops;
91 	unsigned long pgsize_bitmap;	/* Bitmap of page sizes in use */
92 	iommu_fault_handler_t handler;
93 	void *handler_token;
94 	struct iommu_domain_geometry geometry;
95 	void *iova_cookie;
96 };
97 
98 enum iommu_cap {
99 	IOMMU_CAP_CACHE_COHERENCY,	/* IOMMU can enforce cache coherent DMA
100 					   transactions */
101 	IOMMU_CAP_INTR_REMAP,		/* IOMMU supports interrupt isolation */
102 	IOMMU_CAP_NOEXEC,		/* IOMMU_NOEXEC flag */
103 };
104 
105 /*
106  * Following constraints are specifc to FSL_PAMUV1:
107  *  -aperture must be power of 2, and naturally aligned
108  *  -number of windows must be power of 2, and address space size
109  *   of each window is determined by aperture size / # of windows
110  *  -the actual size of the mapped region of a window must be power
111  *   of 2 starting with 4KB and physical address must be naturally
112  *   aligned.
113  * DOMAIN_ATTR_FSL_PAMUV1 corresponds to the above mentioned contraints.
114  * The caller can invoke iommu_domain_get_attr to check if the underlying
115  * iommu implementation supports these constraints.
116  */
117 
118 enum iommu_attr {
119 	DOMAIN_ATTR_GEOMETRY,
120 	DOMAIN_ATTR_PAGING,
121 	DOMAIN_ATTR_WINDOWS,
122 	DOMAIN_ATTR_FSL_PAMU_STASH,
123 	DOMAIN_ATTR_FSL_PAMU_ENABLE,
124 	DOMAIN_ATTR_FSL_PAMUV1,
125 	DOMAIN_ATTR_NESTING,	/* two stages of translation */
126 	DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE,
127 	DOMAIN_ATTR_MAX,
128 };
129 
130 /* These are the possible reserved region types */
131 enum iommu_resv_type {
132 	/* Memory regions which must be mapped 1:1 at all times */
133 	IOMMU_RESV_DIRECT,
134 	/*
135 	 * Memory regions which are advertised to be 1:1 but are
136 	 * commonly considered relaxable in some conditions,
137 	 * for instance in device assignment use case (USB, Graphics)
138 	 */
139 	IOMMU_RESV_DIRECT_RELAXABLE,
140 	/* Arbitrary "never map this or give it to a device" address ranges */
141 	IOMMU_RESV_RESERVED,
142 	/* Hardware MSI region (untranslated) */
143 	IOMMU_RESV_MSI,
144 	/* Software-managed MSI translation window */
145 	IOMMU_RESV_SW_MSI,
146 };
147 
148 /**
149  * struct iommu_resv_region - descriptor for a reserved memory region
150  * @list: Linked list pointers
151  * @start: System physical start address of the region
152  * @length: Length of the region in bytes
153  * @prot: IOMMU Protection flags (READ/WRITE/...)
154  * @type: Type of the reserved region
155  */
156 struct iommu_resv_region {
157 	struct list_head	list;
158 	phys_addr_t		start;
159 	size_t			length;
160 	int			prot;
161 	enum iommu_resv_type	type;
162 };
163 
164 /* Per device IOMMU features */
165 enum iommu_dev_features {
166 	IOMMU_DEV_FEAT_AUX,	/* Aux-domain feature */
167 	IOMMU_DEV_FEAT_SVA,	/* Shared Virtual Addresses */
168 };
169 
170 #define IOMMU_PASID_INVALID	(-1U)
171 
172 #ifdef CONFIG_IOMMU_API
173 
174 /**
175  * struct iommu_iotlb_gather - Range information for a pending IOTLB flush
176  *
177  * @start: IOVA representing the start of the range to be flushed
178  * @end: IOVA representing the end of the range to be flushed (exclusive)
179  * @pgsize: The interval at which to perform the flush
180  *
181  * This structure is intended to be updated by multiple calls to the
182  * ->unmap() function in struct iommu_ops before eventually being passed
183  * into ->iotlb_sync().
184  */
185 struct iommu_iotlb_gather {
186 	unsigned long		start;
187 	unsigned long		end;
188 	size_t			pgsize;
189 };
190 
191 /**
192  * struct iommu_ops - iommu ops and capabilities
193  * @capable: check capability
194  * @domain_alloc: allocate iommu domain
195  * @domain_free: free iommu domain
196  * @attach_dev: attach device to an iommu domain
197  * @detach_dev: detach device from an iommu domain
198  * @map: map a physically contiguous memory region to an iommu domain
199  * @unmap: unmap a physically contiguous memory region from an iommu domain
200  * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain
201  * @iotlb_sync_map: Sync mappings created recently using @map to the hardware
202  * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
203  *            queue
204  * @iova_to_phys: translate iova to physical address
205  * @probe_device: Add device to iommu driver handling
206  * @release_device: Remove device from iommu driver handling
207  * @probe_finalize: Do final setup work after the device is added to an IOMMU
208  *                  group and attached to the groups domain
209  * @device_group: find iommu group for a particular device
210  * @domain_get_attr: Query domain attributes
211  * @domain_set_attr: Change domain attributes
212  * @get_resv_regions: Request list of reserved regions for a device
213  * @put_resv_regions: Free list of reserved regions for a device
214  * @apply_resv_region: Temporary helper call-back for iova reserved ranges
215  * @domain_window_enable: Configure and enable a particular window for a domain
216  * @domain_window_disable: Disable a particular window for a domain
217  * @of_xlate: add OF master IDs to iommu grouping
218  * @is_attach_deferred: Check if domain attach should be deferred from iommu
219  *                      driver init to device driver init (default no)
220  * @dev_has/enable/disable_feat: per device entries to check/enable/disable
221  *                               iommu specific features.
222  * @dev_feat_enabled: check enabled feature
223  * @aux_attach/detach_dev: aux-domain specific attach/detach entries.
224  * @aux_get_pasid: get the pasid given an aux-domain
225  * @sva_bind: Bind process address space to device
226  * @sva_unbind: Unbind process address space from device
227  * @sva_get_pasid: Get PASID associated to a SVA handle
228  * @page_response: handle page request response
229  * @cache_invalidate: invalidate translation caches
230  * @sva_bind_gpasid: bind guest pasid and mm
231  * @sva_unbind_gpasid: unbind guest pasid and mm
232  * @def_domain_type: device default domain type, return value:
233  *		- IOMMU_DOMAIN_IDENTITY: must use an identity domain
234  *		- IOMMU_DOMAIN_DMA: must use a dma domain
235  *		- 0: use the default setting
236  * @pgsize_bitmap: bitmap of all possible supported page sizes
237  * @owner: Driver module providing these ops
238  */
239 struct iommu_ops {
240 	bool (*capable)(enum iommu_cap);
241 
242 	/* Domain allocation and freeing by the iommu driver */
243 	struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type);
244 	void (*domain_free)(struct iommu_domain *);
245 
246 	int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
247 	void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
248 	int (*map)(struct iommu_domain *domain, unsigned long iova,
249 		   phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
250 	size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
251 		     size_t size, struct iommu_iotlb_gather *iotlb_gather);
252 	void (*flush_iotlb_all)(struct iommu_domain *domain);
253 	void (*iotlb_sync_map)(struct iommu_domain *domain);
254 	void (*iotlb_sync)(struct iommu_domain *domain,
255 			   struct iommu_iotlb_gather *iotlb_gather);
256 	phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
257 	struct iommu_device *(*probe_device)(struct device *dev);
258 	void (*release_device)(struct device *dev);
259 	void (*probe_finalize)(struct device *dev);
260 	struct iommu_group *(*device_group)(struct device *dev);
261 	int (*domain_get_attr)(struct iommu_domain *domain,
262 			       enum iommu_attr attr, void *data);
263 	int (*domain_set_attr)(struct iommu_domain *domain,
264 			       enum iommu_attr attr, void *data);
265 
266 	/* Request/Free a list of reserved regions for a device */
267 	void (*get_resv_regions)(struct device *dev, struct list_head *list);
268 	void (*put_resv_regions)(struct device *dev, struct list_head *list);
269 	void (*apply_resv_region)(struct device *dev,
270 				  struct iommu_domain *domain,
271 				  struct iommu_resv_region *region);
272 
273 	/* Window handling functions */
274 	int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,
275 				    phys_addr_t paddr, u64 size, int prot);
276 	void (*domain_window_disable)(struct iommu_domain *domain, u32 wnd_nr);
277 
278 	int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
279 	bool (*is_attach_deferred)(struct iommu_domain *domain, struct device *dev);
280 
281 	/* Per device IOMMU features */
282 	bool (*dev_has_feat)(struct device *dev, enum iommu_dev_features f);
283 	bool (*dev_feat_enabled)(struct device *dev, enum iommu_dev_features f);
284 	int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f);
285 	int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f);
286 
287 	/* Aux-domain specific attach/detach entries */
288 	int (*aux_attach_dev)(struct iommu_domain *domain, struct device *dev);
289 	void (*aux_detach_dev)(struct iommu_domain *domain, struct device *dev);
290 	int (*aux_get_pasid)(struct iommu_domain *domain, struct device *dev);
291 
292 	struct iommu_sva *(*sva_bind)(struct device *dev, struct mm_struct *mm,
293 				      void *drvdata);
294 	void (*sva_unbind)(struct iommu_sva *handle);
295 	int (*sva_get_pasid)(struct iommu_sva *handle);
296 
297 	int (*page_response)(struct device *dev,
298 			     struct iommu_fault_event *evt,
299 			     struct iommu_page_response *msg);
300 	int (*cache_invalidate)(struct iommu_domain *domain, struct device *dev,
301 				struct iommu_cache_invalidate_info *inv_info);
302 	int (*sva_bind_gpasid)(struct iommu_domain *domain,
303 			struct device *dev, struct iommu_gpasid_bind_data *data);
304 
305 	int (*sva_unbind_gpasid)(struct device *dev, int pasid);
306 
307 	int (*def_domain_type)(struct device *dev);
308 
309 	unsigned long pgsize_bitmap;
310 	struct module *owner;
311 };
312 
313 /**
314  * struct iommu_device - IOMMU core representation of one IOMMU hardware
315  *			 instance
316  * @list: Used by the iommu-core to keep a list of registered iommus
317  * @ops: iommu-ops for talking to this iommu
318  * @dev: struct device for sysfs handling
319  */
320 struct iommu_device {
321 	struct list_head list;
322 	const struct iommu_ops *ops;
323 	struct fwnode_handle *fwnode;
324 	struct device *dev;
325 };
326 
327 /**
328  * struct iommu_fault_event - Generic fault event
329  *
330  * Can represent recoverable faults such as a page requests or
331  * unrecoverable faults such as DMA or IRQ remapping faults.
332  *
333  * @fault: fault descriptor
334  * @list: pending fault event list, used for tracking responses
335  */
336 struct iommu_fault_event {
337 	struct iommu_fault fault;
338 	struct list_head list;
339 };
340 
341 /**
342  * struct iommu_fault_param - per-device IOMMU fault data
343  * @handler: Callback function to handle IOMMU faults at device level
344  * @data: handler private data
345  * @faults: holds the pending faults which needs response
346  * @lock: protect pending faults list
347  */
348 struct iommu_fault_param {
349 	iommu_dev_fault_handler_t handler;
350 	void *data;
351 	struct list_head faults;
352 	struct mutex lock;
353 };
354 
355 /**
356  * struct dev_iommu - Collection of per-device IOMMU data
357  *
358  * @fault_param: IOMMU detected device fault reporting data
359  * @fwspec:	 IOMMU fwspec data
360  * @iommu_dev:	 IOMMU device this device is linked to
361  * @priv:	 IOMMU Driver private data
362  *
363  * TODO: migrate other per device data pointers under iommu_dev_data, e.g.
364  *	struct iommu_group	*iommu_group;
365  */
366 struct dev_iommu {
367 	struct mutex lock;
368 	struct iommu_fault_param	*fault_param;
369 	struct iommu_fwspec		*fwspec;
370 	struct iommu_device		*iommu_dev;
371 	void				*priv;
372 };
373 
374 int  iommu_device_register(struct iommu_device *iommu);
375 void iommu_device_unregister(struct iommu_device *iommu);
376 int  iommu_device_sysfs_add(struct iommu_device *iommu,
377 			    struct device *parent,
378 			    const struct attribute_group **groups,
379 			    const char *fmt, ...) __printf(4, 5);
380 void iommu_device_sysfs_remove(struct iommu_device *iommu);
381 int  iommu_device_link(struct iommu_device   *iommu, struct device *link);
382 void iommu_device_unlink(struct iommu_device *iommu, struct device *link);
383 
384 static inline void __iommu_device_set_ops(struct iommu_device *iommu,
385 					  const struct iommu_ops *ops)
386 {
387 	iommu->ops = ops;
388 }
389 
390 #define iommu_device_set_ops(iommu, ops)				\
391 do {									\
392 	struct iommu_ops *__ops = (struct iommu_ops *)(ops);		\
393 	__ops->owner = THIS_MODULE;					\
394 	__iommu_device_set_ops(iommu, __ops);				\
395 } while (0)
396 
397 static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
398 					   struct fwnode_handle *fwnode)
399 {
400 	iommu->fwnode = fwnode;
401 }
402 
403 static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
404 {
405 	return (struct iommu_device *)dev_get_drvdata(dev);
406 }
407 
408 static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
409 {
410 	*gather = (struct iommu_iotlb_gather) {
411 		.start	= ULONG_MAX,
412 	};
413 }
414 
415 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE		1 /* Device added */
416 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE		2 /* Pre Device removed */
417 #define IOMMU_GROUP_NOTIFY_BIND_DRIVER		3 /* Pre Driver bind */
418 #define IOMMU_GROUP_NOTIFY_BOUND_DRIVER		4 /* Post Driver bind */
419 #define IOMMU_GROUP_NOTIFY_UNBIND_DRIVER	5 /* Pre Driver unbind */
420 #define IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER	6 /* Post Driver unbind */
421 
422 extern int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops);
423 extern int bus_iommu_probe(struct bus_type *bus);
424 extern bool iommu_present(struct bus_type *bus);
425 extern bool iommu_capable(struct bus_type *bus, enum iommu_cap cap);
426 extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus);
427 extern struct iommu_group *iommu_group_get_by_id(int id);
428 extern void iommu_domain_free(struct iommu_domain *domain);
429 extern int iommu_attach_device(struct iommu_domain *domain,
430 			       struct device *dev);
431 extern void iommu_detach_device(struct iommu_domain *domain,
432 				struct device *dev);
433 extern int iommu_cache_invalidate(struct iommu_domain *domain,
434 				  struct device *dev,
435 				  struct iommu_cache_invalidate_info *inv_info);
436 extern int iommu_sva_bind_gpasid(struct iommu_domain *domain,
437 		struct device *dev, struct iommu_gpasid_bind_data *data);
438 extern int iommu_sva_unbind_gpasid(struct iommu_domain *domain,
439 				struct device *dev, ioasid_t pasid);
440 extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
441 extern struct iommu_domain *iommu_get_dma_domain(struct device *dev);
442 extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
443 		     phys_addr_t paddr, size_t size, int prot);
444 extern int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
445 			    phys_addr_t paddr, size_t size, int prot);
446 extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
447 			  size_t size);
448 extern size_t iommu_unmap_fast(struct iommu_domain *domain,
449 			       unsigned long iova, size_t size,
450 			       struct iommu_iotlb_gather *iotlb_gather);
451 extern size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
452 			   struct scatterlist *sg,unsigned int nents, int prot);
453 extern size_t iommu_map_sg_atomic(struct iommu_domain *domain,
454 				  unsigned long iova, struct scatterlist *sg,
455 				  unsigned int nents, int prot);
456 extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
457 extern void iommu_set_fault_handler(struct iommu_domain *domain,
458 			iommu_fault_handler_t handler, void *token);
459 
460 /**
461  * iommu_map_sgtable - Map the given buffer to the IOMMU domain
462  * @domain:	The IOMMU domain to perform the mapping
463  * @iova:	The start address to map the buffer
464  * @sgt:	The sg_table object describing the buffer
465  * @prot:	IOMMU protection bits
466  *
467  * Creates a mapping at @iova for the buffer described by a scatterlist
468  * stored in the given sg_table object in the provided IOMMU domain.
469  */
470 static inline size_t iommu_map_sgtable(struct iommu_domain *domain,
471 			unsigned long iova, struct sg_table *sgt, int prot)
472 {
473 	return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot);
474 }
475 
476 extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
477 extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
478 extern void generic_iommu_put_resv_regions(struct device *dev,
479 					   struct list_head *list);
480 extern void iommu_set_default_passthrough(bool cmd_line);
481 extern void iommu_set_default_translated(bool cmd_line);
482 extern bool iommu_default_passthrough(void);
483 extern struct iommu_resv_region *
484 iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot,
485 			enum iommu_resv_type type);
486 extern int iommu_get_group_resv_regions(struct iommu_group *group,
487 					struct list_head *head);
488 
489 extern int iommu_attach_group(struct iommu_domain *domain,
490 			      struct iommu_group *group);
491 extern void iommu_detach_group(struct iommu_domain *domain,
492 			       struct iommu_group *group);
493 extern struct iommu_group *iommu_group_alloc(void);
494 extern void *iommu_group_get_iommudata(struct iommu_group *group);
495 extern void iommu_group_set_iommudata(struct iommu_group *group,
496 				      void *iommu_data,
497 				      void (*release)(void *iommu_data));
498 extern int iommu_group_set_name(struct iommu_group *group, const char *name);
499 extern int iommu_group_add_device(struct iommu_group *group,
500 				  struct device *dev);
501 extern void iommu_group_remove_device(struct device *dev);
502 extern int iommu_group_for_each_dev(struct iommu_group *group, void *data,
503 				    int (*fn)(struct device *, void *));
504 extern struct iommu_group *iommu_group_get(struct device *dev);
505 extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group);
506 extern void iommu_group_put(struct iommu_group *group);
507 extern int iommu_group_register_notifier(struct iommu_group *group,
508 					 struct notifier_block *nb);
509 extern int iommu_group_unregister_notifier(struct iommu_group *group,
510 					   struct notifier_block *nb);
511 extern int iommu_register_device_fault_handler(struct device *dev,
512 					iommu_dev_fault_handler_t handler,
513 					void *data);
514 
515 extern int iommu_unregister_device_fault_handler(struct device *dev);
516 
517 extern int iommu_report_device_fault(struct device *dev,
518 				     struct iommu_fault_event *evt);
519 extern int iommu_page_response(struct device *dev,
520 			       struct iommu_page_response *msg);
521 
522 extern int iommu_group_id(struct iommu_group *group);
523 extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *);
524 
525 extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr,
526 				 void *data);
527 extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr,
528 				 void *data);
529 
530 /* Window handling function prototypes */
531 extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
532 				      phys_addr_t offset, u64 size,
533 				      int prot);
534 extern void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr);
535 
536 extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
537 			      unsigned long iova, int flags);
538 
539 static inline void iommu_flush_tlb_all(struct iommu_domain *domain)
540 {
541 	if (domain->ops->flush_iotlb_all)
542 		domain->ops->flush_iotlb_all(domain);
543 }
544 
545 static inline void iommu_tlb_sync(struct iommu_domain *domain,
546 				  struct iommu_iotlb_gather *iotlb_gather)
547 {
548 	if (domain->ops->iotlb_sync)
549 		domain->ops->iotlb_sync(domain, iotlb_gather);
550 
551 	iommu_iotlb_gather_init(iotlb_gather);
552 }
553 
554 static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
555 					       struct iommu_iotlb_gather *gather,
556 					       unsigned long iova, size_t size)
557 {
558 	unsigned long start = iova, end = start + size;
559 
560 	/*
561 	 * If the new page is disjoint from the current range or is mapped at
562 	 * a different granularity, then sync the TLB so that the gather
563 	 * structure can be rewritten.
564 	 */
565 	if (gather->pgsize != size ||
566 	    end < gather->start || start > gather->end) {
567 		if (gather->pgsize)
568 			iommu_tlb_sync(domain, gather);
569 		gather->pgsize = size;
570 	}
571 
572 	if (gather->end < end)
573 		gather->end = end;
574 
575 	if (gather->start > start)
576 		gather->start = start;
577 }
578 
579 /* PCI device grouping function */
580 extern struct iommu_group *pci_device_group(struct device *dev);
581 /* Generic device grouping function */
582 extern struct iommu_group *generic_device_group(struct device *dev);
583 /* FSL-MC device grouping function */
584 struct iommu_group *fsl_mc_device_group(struct device *dev);
585 
586 /**
587  * struct iommu_fwspec - per-device IOMMU instance data
588  * @ops: ops for this device's IOMMU
589  * @iommu_fwnode: firmware handle for this device's IOMMU
590  * @iommu_priv: IOMMU driver private data for this device
591  * @num_pasid_bits: number of PASID bits supported by this device
592  * @num_ids: number of associated device IDs
593  * @ids: IDs which this device may present to the IOMMU
594  */
595 struct iommu_fwspec {
596 	const struct iommu_ops	*ops;
597 	struct fwnode_handle	*iommu_fwnode;
598 	u32			flags;
599 	u32			num_pasid_bits;
600 	unsigned int		num_ids;
601 	u32			ids[];
602 };
603 
604 /* ATS is supported */
605 #define IOMMU_FWSPEC_PCI_RC_ATS			(1 << 0)
606 
607 /**
608  * struct iommu_sva - handle to a device-mm bond
609  */
610 struct iommu_sva {
611 	struct device			*dev;
612 };
613 
614 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
615 		      const struct iommu_ops *ops);
616 void iommu_fwspec_free(struct device *dev);
617 int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids);
618 const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode);
619 
620 static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
621 {
622 	if (dev->iommu)
623 		return dev->iommu->fwspec;
624 	else
625 		return NULL;
626 }
627 
628 static inline void dev_iommu_fwspec_set(struct device *dev,
629 					struct iommu_fwspec *fwspec)
630 {
631 	dev->iommu->fwspec = fwspec;
632 }
633 
634 static inline void *dev_iommu_priv_get(struct device *dev)
635 {
636 	return dev->iommu->priv;
637 }
638 
639 static inline void dev_iommu_priv_set(struct device *dev, void *priv)
640 {
641 	dev->iommu->priv = priv;
642 }
643 
644 int iommu_probe_device(struct device *dev);
645 void iommu_release_device(struct device *dev);
646 
647 bool iommu_dev_has_feature(struct device *dev, enum iommu_dev_features f);
648 int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f);
649 int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features f);
650 bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features f);
651 int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev);
652 void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev);
653 int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev);
654 
655 struct iommu_sva *iommu_sva_bind_device(struct device *dev,
656 					struct mm_struct *mm,
657 					void *drvdata);
658 void iommu_sva_unbind_device(struct iommu_sva *handle);
659 int iommu_sva_get_pasid(struct iommu_sva *handle);
660 
661 #else /* CONFIG_IOMMU_API */
662 
663 struct iommu_ops {};
664 struct iommu_group {};
665 struct iommu_fwspec {};
666 struct iommu_device {};
667 struct iommu_fault_param {};
668 struct iommu_iotlb_gather {};
669 
670 static inline bool iommu_present(struct bus_type *bus)
671 {
672 	return false;
673 }
674 
675 static inline bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
676 {
677 	return false;
678 }
679 
680 static inline struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
681 {
682 	return NULL;
683 }
684 
685 static inline struct iommu_group *iommu_group_get_by_id(int id)
686 {
687 	return NULL;
688 }
689 
690 static inline void iommu_domain_free(struct iommu_domain *domain)
691 {
692 }
693 
694 static inline int iommu_attach_device(struct iommu_domain *domain,
695 				      struct device *dev)
696 {
697 	return -ENODEV;
698 }
699 
700 static inline void iommu_detach_device(struct iommu_domain *domain,
701 				       struct device *dev)
702 {
703 }
704 
705 static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
706 {
707 	return NULL;
708 }
709 
710 static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
711 			    phys_addr_t paddr, size_t size, int prot)
712 {
713 	return -ENODEV;
714 }
715 
716 static inline int iommu_map_atomic(struct iommu_domain *domain,
717 				   unsigned long iova, phys_addr_t paddr,
718 				   size_t size, int prot)
719 {
720 	return -ENODEV;
721 }
722 
723 static inline size_t iommu_unmap(struct iommu_domain *domain,
724 				 unsigned long iova, size_t size)
725 {
726 	return 0;
727 }
728 
729 static inline size_t iommu_unmap_fast(struct iommu_domain *domain,
730 				      unsigned long iova, int gfp_order,
731 				      struct iommu_iotlb_gather *iotlb_gather)
732 {
733 	return 0;
734 }
735 
736 static inline size_t iommu_map_sg(struct iommu_domain *domain,
737 				  unsigned long iova, struct scatterlist *sg,
738 				  unsigned int nents, int prot)
739 {
740 	return 0;
741 }
742 
743 static inline size_t iommu_map_sg_atomic(struct iommu_domain *domain,
744 				  unsigned long iova, struct scatterlist *sg,
745 				  unsigned int nents, int prot)
746 {
747 	return 0;
748 }
749 
750 static inline void iommu_flush_tlb_all(struct iommu_domain *domain)
751 {
752 }
753 
754 static inline void iommu_tlb_sync(struct iommu_domain *domain,
755 				  struct iommu_iotlb_gather *iotlb_gather)
756 {
757 }
758 
759 static inline int iommu_domain_window_enable(struct iommu_domain *domain,
760 					     u32 wnd_nr, phys_addr_t paddr,
761 					     u64 size, int prot)
762 {
763 	return -ENODEV;
764 }
765 
766 static inline void iommu_domain_window_disable(struct iommu_domain *domain,
767 					       u32 wnd_nr)
768 {
769 }
770 
771 static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
772 {
773 	return 0;
774 }
775 
776 static inline void iommu_set_fault_handler(struct iommu_domain *domain,
777 				iommu_fault_handler_t handler, void *token)
778 {
779 }
780 
781 static inline void iommu_get_resv_regions(struct device *dev,
782 					struct list_head *list)
783 {
784 }
785 
786 static inline void iommu_put_resv_regions(struct device *dev,
787 					struct list_head *list)
788 {
789 }
790 
791 static inline int iommu_get_group_resv_regions(struct iommu_group *group,
792 					       struct list_head *head)
793 {
794 	return -ENODEV;
795 }
796 
797 static inline void iommu_set_default_passthrough(bool cmd_line)
798 {
799 }
800 
801 static inline void iommu_set_default_translated(bool cmd_line)
802 {
803 }
804 
805 static inline bool iommu_default_passthrough(void)
806 {
807 	return true;
808 }
809 
810 static inline int iommu_attach_group(struct iommu_domain *domain,
811 				     struct iommu_group *group)
812 {
813 	return -ENODEV;
814 }
815 
816 static inline void iommu_detach_group(struct iommu_domain *domain,
817 				      struct iommu_group *group)
818 {
819 }
820 
821 static inline struct iommu_group *iommu_group_alloc(void)
822 {
823 	return ERR_PTR(-ENODEV);
824 }
825 
826 static inline void *iommu_group_get_iommudata(struct iommu_group *group)
827 {
828 	return NULL;
829 }
830 
831 static inline void iommu_group_set_iommudata(struct iommu_group *group,
832 					     void *iommu_data,
833 					     void (*release)(void *iommu_data))
834 {
835 }
836 
837 static inline int iommu_group_set_name(struct iommu_group *group,
838 				       const char *name)
839 {
840 	return -ENODEV;
841 }
842 
843 static inline int iommu_group_add_device(struct iommu_group *group,
844 					 struct device *dev)
845 {
846 	return -ENODEV;
847 }
848 
849 static inline void iommu_group_remove_device(struct device *dev)
850 {
851 }
852 
853 static inline int iommu_group_for_each_dev(struct iommu_group *group,
854 					   void *data,
855 					   int (*fn)(struct device *, void *))
856 {
857 	return -ENODEV;
858 }
859 
860 static inline struct iommu_group *iommu_group_get(struct device *dev)
861 {
862 	return NULL;
863 }
864 
865 static inline void iommu_group_put(struct iommu_group *group)
866 {
867 }
868 
869 static inline int iommu_group_register_notifier(struct iommu_group *group,
870 						struct notifier_block *nb)
871 {
872 	return -ENODEV;
873 }
874 
875 static inline int iommu_group_unregister_notifier(struct iommu_group *group,
876 						  struct notifier_block *nb)
877 {
878 	return 0;
879 }
880 
881 static inline
882 int iommu_register_device_fault_handler(struct device *dev,
883 					iommu_dev_fault_handler_t handler,
884 					void *data)
885 {
886 	return -ENODEV;
887 }
888 
889 static inline int iommu_unregister_device_fault_handler(struct device *dev)
890 {
891 	return 0;
892 }
893 
894 static inline
895 int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
896 {
897 	return -ENODEV;
898 }
899 
900 static inline int iommu_page_response(struct device *dev,
901 				      struct iommu_page_response *msg)
902 {
903 	return -ENODEV;
904 }
905 
906 static inline int iommu_group_id(struct iommu_group *group)
907 {
908 	return -ENODEV;
909 }
910 
911 static inline int iommu_domain_get_attr(struct iommu_domain *domain,
912 					enum iommu_attr attr, void *data)
913 {
914 	return -EINVAL;
915 }
916 
917 static inline int iommu_domain_set_attr(struct iommu_domain *domain,
918 					enum iommu_attr attr, void *data)
919 {
920 	return -EINVAL;
921 }
922 
923 static inline int  iommu_device_register(struct iommu_device *iommu)
924 {
925 	return -ENODEV;
926 }
927 
928 static inline void iommu_device_set_ops(struct iommu_device *iommu,
929 					const struct iommu_ops *ops)
930 {
931 }
932 
933 static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
934 					   struct fwnode_handle *fwnode)
935 {
936 }
937 
938 static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
939 {
940 	return NULL;
941 }
942 
943 static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
944 {
945 }
946 
947 static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
948 					       struct iommu_iotlb_gather *gather,
949 					       unsigned long iova, size_t size)
950 {
951 }
952 
953 static inline void iommu_device_unregister(struct iommu_device *iommu)
954 {
955 }
956 
957 static inline int  iommu_device_sysfs_add(struct iommu_device *iommu,
958 					  struct device *parent,
959 					  const struct attribute_group **groups,
960 					  const char *fmt, ...)
961 {
962 	return -ENODEV;
963 }
964 
965 static inline void iommu_device_sysfs_remove(struct iommu_device *iommu)
966 {
967 }
968 
969 static inline int iommu_device_link(struct device *dev, struct device *link)
970 {
971 	return -EINVAL;
972 }
973 
974 static inline void iommu_device_unlink(struct device *dev, struct device *link)
975 {
976 }
977 
978 static inline int iommu_fwspec_init(struct device *dev,
979 				    struct fwnode_handle *iommu_fwnode,
980 				    const struct iommu_ops *ops)
981 {
982 	return -ENODEV;
983 }
984 
985 static inline void iommu_fwspec_free(struct device *dev)
986 {
987 }
988 
989 static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids,
990 				       int num_ids)
991 {
992 	return -ENODEV;
993 }
994 
995 static inline
996 const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
997 {
998 	return NULL;
999 }
1000 
1001 static inline bool
1002 iommu_dev_has_feature(struct device *dev, enum iommu_dev_features feat)
1003 {
1004 	return false;
1005 }
1006 
1007 static inline bool
1008 iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
1009 {
1010 	return false;
1011 }
1012 
1013 static inline int
1014 iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
1015 {
1016 	return -ENODEV;
1017 }
1018 
1019 static inline int
1020 iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
1021 {
1022 	return -ENODEV;
1023 }
1024 
1025 static inline int
1026 iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev)
1027 {
1028 	return -ENODEV;
1029 }
1030 
1031 static inline void
1032 iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev)
1033 {
1034 }
1035 
1036 static inline int
1037 iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
1038 {
1039 	return -ENODEV;
1040 }
1041 
1042 static inline struct iommu_sva *
1043 iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata)
1044 {
1045 	return NULL;
1046 }
1047 
1048 static inline void iommu_sva_unbind_device(struct iommu_sva *handle)
1049 {
1050 }
1051 
1052 static inline int iommu_sva_get_pasid(struct iommu_sva *handle)
1053 {
1054 	return IOMMU_PASID_INVALID;
1055 }
1056 
1057 static inline int
1058 iommu_cache_invalidate(struct iommu_domain *domain,
1059 		       struct device *dev,
1060 		       struct iommu_cache_invalidate_info *inv_info)
1061 {
1062 	return -ENODEV;
1063 }
1064 static inline int iommu_sva_bind_gpasid(struct iommu_domain *domain,
1065 				struct device *dev, struct iommu_gpasid_bind_data *data)
1066 {
1067 	return -ENODEV;
1068 }
1069 
1070 static inline int iommu_sva_unbind_gpasid(struct iommu_domain *domain,
1071 					   struct device *dev, int pasid)
1072 {
1073 	return -ENODEV;
1074 }
1075 
1076 static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
1077 {
1078 	return NULL;
1079 }
1080 #endif /* CONFIG_IOMMU_API */
1081 
1082 #ifdef CONFIG_IOMMU_DEBUGFS
1083 extern	struct dentry *iommu_debugfs_dir;
1084 void iommu_debugfs_setup(void);
1085 #else
1086 static inline void iommu_debugfs_setup(void) {}
1087 #endif
1088 
1089 #endif /* __LINUX_IOMMU_H */
1090