1 /* 2 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. 3 * Author: Joerg Roedel <joerg.roedel@amd.com> 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 as published 7 * by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 */ 18 19 #ifndef __LINUX_IOMMU_H 20 #define __LINUX_IOMMU_H 21 22 #include <linux/scatterlist.h> 23 #include <linux/device.h> 24 #include <linux/types.h> 25 #include <linux/errno.h> 26 #include <linux/err.h> 27 #include <linux/of.h> 28 29 #define IOMMU_READ (1 << 0) 30 #define IOMMU_WRITE (1 << 1) 31 #define IOMMU_CACHE (1 << 2) /* DMA cache coherency */ 32 #define IOMMU_NOEXEC (1 << 3) 33 #define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */ 34 /* 35 * Where the bus hardware includes a privilege level as part of its access type 36 * markings, and certain devices are capable of issuing transactions marked as 37 * either 'supervisor' or 'user', the IOMMU_PRIV flag requests that the other 38 * given permission flags only apply to accesses at the higher privilege level, 39 * and that unprivileged transactions should have as little access as possible. 40 * This would usually imply the same permissions as kernel mappings on the CPU, 41 * if the IOMMU page table format is equivalent. 42 */ 43 #define IOMMU_PRIV (1 << 5) 44 45 struct iommu_ops; 46 struct iommu_group; 47 struct bus_type; 48 struct device; 49 struct iommu_domain; 50 struct notifier_block; 51 struct iommu_sva; 52 53 /* iommu fault flags */ 54 #define IOMMU_FAULT_READ 0x0 55 #define IOMMU_FAULT_WRITE 0x1 56 57 typedef int (*iommu_fault_handler_t)(struct iommu_domain *, 58 struct device *, unsigned long, int, void *); 59 typedef int (*iommu_mm_exit_handler_t)(struct device *dev, struct iommu_sva *, 60 void *); 61 62 struct iommu_domain_geometry { 63 dma_addr_t aperture_start; /* First address that can be mapped */ 64 dma_addr_t aperture_end; /* Last address that can be mapped */ 65 bool force_aperture; /* DMA only allowed in mappable range? */ 66 }; 67 68 /* Domain feature flags */ 69 #define __IOMMU_DOMAIN_PAGING (1U << 0) /* Support for iommu_map/unmap */ 70 #define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API 71 implementation */ 72 #define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */ 73 74 /* 75 * This are the possible domain-types 76 * 77 * IOMMU_DOMAIN_BLOCKED - All DMA is blocked, can be used to isolate 78 * devices 79 * IOMMU_DOMAIN_IDENTITY - DMA addresses are system physical addresses 80 * IOMMU_DOMAIN_UNMANAGED - DMA mappings managed by IOMMU-API user, used 81 * for VMs 82 * IOMMU_DOMAIN_DMA - Internally used for DMA-API implementations. 83 * This flag allows IOMMU drivers to implement 84 * certain optimizations for these domains 85 */ 86 #define IOMMU_DOMAIN_BLOCKED (0U) 87 #define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT) 88 #define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING) 89 #define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \ 90 __IOMMU_DOMAIN_DMA_API) 91 92 struct iommu_domain { 93 unsigned type; 94 const struct iommu_ops *ops; 95 unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */ 96 iommu_fault_handler_t handler; 97 void *handler_token; 98 struct iommu_domain_geometry geometry; 99 void *iova_cookie; 100 }; 101 102 enum iommu_cap { 103 IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA 104 transactions */ 105 IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */ 106 IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */ 107 }; 108 109 /* 110 * Following constraints are specifc to FSL_PAMUV1: 111 * -aperture must be power of 2, and naturally aligned 112 * -number of windows must be power of 2, and address space size 113 * of each window is determined by aperture size / # of windows 114 * -the actual size of the mapped region of a window must be power 115 * of 2 starting with 4KB and physical address must be naturally 116 * aligned. 117 * DOMAIN_ATTR_FSL_PAMUV1 corresponds to the above mentioned contraints. 118 * The caller can invoke iommu_domain_get_attr to check if the underlying 119 * iommu implementation supports these constraints. 120 */ 121 122 enum iommu_attr { 123 DOMAIN_ATTR_GEOMETRY, 124 DOMAIN_ATTR_PAGING, 125 DOMAIN_ATTR_WINDOWS, 126 DOMAIN_ATTR_FSL_PAMU_STASH, 127 DOMAIN_ATTR_FSL_PAMU_ENABLE, 128 DOMAIN_ATTR_FSL_PAMUV1, 129 DOMAIN_ATTR_NESTING, /* two stages of translation */ 130 DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, 131 DOMAIN_ATTR_MAX, 132 }; 133 134 /* These are the possible reserved region types */ 135 enum iommu_resv_type { 136 /* Memory regions which must be mapped 1:1 at all times */ 137 IOMMU_RESV_DIRECT, 138 /* Arbitrary "never map this or give it to a device" address ranges */ 139 IOMMU_RESV_RESERVED, 140 /* Hardware MSI region (untranslated) */ 141 IOMMU_RESV_MSI, 142 /* Software-managed MSI translation window */ 143 IOMMU_RESV_SW_MSI, 144 }; 145 146 /** 147 * struct iommu_resv_region - descriptor for a reserved memory region 148 * @list: Linked list pointers 149 * @start: System physical start address of the region 150 * @length: Length of the region in bytes 151 * @prot: IOMMU Protection flags (READ/WRITE/...) 152 * @type: Type of the reserved region 153 */ 154 struct iommu_resv_region { 155 struct list_head list; 156 phys_addr_t start; 157 size_t length; 158 int prot; 159 enum iommu_resv_type type; 160 }; 161 162 /* Per device IOMMU features */ 163 enum iommu_dev_features { 164 IOMMU_DEV_FEAT_AUX, /* Aux-domain feature */ 165 IOMMU_DEV_FEAT_SVA, /* Shared Virtual Addresses */ 166 }; 167 168 #define IOMMU_PASID_INVALID (-1U) 169 170 /** 171 * struct iommu_sva_ops - device driver callbacks for an SVA context 172 * 173 * @mm_exit: called when the mm is about to be torn down by exit_mmap. After 174 * @mm_exit returns, the device must not issue any more transaction 175 * with the PASID given as argument. 176 * 177 * The @mm_exit handler is allowed to sleep. Be careful about the 178 * locks taken in @mm_exit, because they might lead to deadlocks if 179 * they are also held when dropping references to the mm. Consider the 180 * following call chain: 181 * mutex_lock(A); mmput(mm) -> exit_mm() -> @mm_exit() -> mutex_lock(A) 182 * Using mmput_async() prevents this scenario. 183 * 184 */ 185 struct iommu_sva_ops { 186 iommu_mm_exit_handler_t mm_exit; 187 }; 188 189 #ifdef CONFIG_IOMMU_API 190 191 /** 192 * struct iommu_ops - iommu ops and capabilities 193 * @capable: check capability 194 * @domain_alloc: allocate iommu domain 195 * @domain_free: free iommu domain 196 * @attach_dev: attach device to an iommu domain 197 * @detach_dev: detach device from an iommu domain 198 * @map: map a physically contiguous memory region to an iommu domain 199 * @unmap: unmap a physically contiguous memory region from an iommu domain 200 * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain 201 * @iotlb_range_add: Add a given iova range to the flush queue for this domain 202 * @iotlb_sync_map: Sync mappings created recently using @map to the hardware 203 * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush 204 * queue 205 * @iova_to_phys: translate iova to physical address 206 * @add_device: add device to iommu grouping 207 * @remove_device: remove device from iommu grouping 208 * @device_group: find iommu group for a particular device 209 * @domain_get_attr: Query domain attributes 210 * @domain_set_attr: Change domain attributes 211 * @get_resv_regions: Request list of reserved regions for a device 212 * @put_resv_regions: Free list of reserved regions for a device 213 * @apply_resv_region: Temporary helper call-back for iova reserved ranges 214 * @domain_window_enable: Configure and enable a particular window for a domain 215 * @domain_window_disable: Disable a particular window for a domain 216 * @of_xlate: add OF master IDs to iommu grouping 217 * @is_attach_deferred: Check if domain attach should be deferred from iommu 218 * driver init to device driver init (default no) 219 * @dev_has/enable/disable_feat: per device entries to check/enable/disable 220 * iommu specific features. 221 * @dev_feat_enabled: check enabled feature 222 * @aux_attach/detach_dev: aux-domain specific attach/detach entries. 223 * @aux_get_pasid: get the pasid given an aux-domain 224 * @sva_bind: Bind process address space to device 225 * @sva_unbind: Unbind process address space from device 226 * @sva_get_pasid: Get PASID associated to a SVA handle 227 * @pgsize_bitmap: bitmap of all possible supported page sizes 228 */ 229 struct iommu_ops { 230 bool (*capable)(enum iommu_cap); 231 232 /* Domain allocation and freeing by the iommu driver */ 233 struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type); 234 void (*domain_free)(struct iommu_domain *); 235 236 int (*attach_dev)(struct iommu_domain *domain, struct device *dev); 237 void (*detach_dev)(struct iommu_domain *domain, struct device *dev); 238 int (*map)(struct iommu_domain *domain, unsigned long iova, 239 phys_addr_t paddr, size_t size, int prot); 240 size_t (*unmap)(struct iommu_domain *domain, unsigned long iova, 241 size_t size); 242 void (*flush_iotlb_all)(struct iommu_domain *domain); 243 void (*iotlb_range_add)(struct iommu_domain *domain, 244 unsigned long iova, size_t size); 245 void (*iotlb_sync_map)(struct iommu_domain *domain); 246 void (*iotlb_sync)(struct iommu_domain *domain); 247 phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova); 248 int (*add_device)(struct device *dev); 249 void (*remove_device)(struct device *dev); 250 struct iommu_group *(*device_group)(struct device *dev); 251 int (*domain_get_attr)(struct iommu_domain *domain, 252 enum iommu_attr attr, void *data); 253 int (*domain_set_attr)(struct iommu_domain *domain, 254 enum iommu_attr attr, void *data); 255 256 /* Request/Free a list of reserved regions for a device */ 257 void (*get_resv_regions)(struct device *dev, struct list_head *list); 258 void (*put_resv_regions)(struct device *dev, struct list_head *list); 259 void (*apply_resv_region)(struct device *dev, 260 struct iommu_domain *domain, 261 struct iommu_resv_region *region); 262 263 /* Window handling functions */ 264 int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr, 265 phys_addr_t paddr, u64 size, int prot); 266 void (*domain_window_disable)(struct iommu_domain *domain, u32 wnd_nr); 267 268 int (*of_xlate)(struct device *dev, struct of_phandle_args *args); 269 bool (*is_attach_deferred)(struct iommu_domain *domain, struct device *dev); 270 271 /* Per device IOMMU features */ 272 bool (*dev_has_feat)(struct device *dev, enum iommu_dev_features f); 273 bool (*dev_feat_enabled)(struct device *dev, enum iommu_dev_features f); 274 int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f); 275 int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f); 276 277 /* Aux-domain specific attach/detach entries */ 278 int (*aux_attach_dev)(struct iommu_domain *domain, struct device *dev); 279 void (*aux_detach_dev)(struct iommu_domain *domain, struct device *dev); 280 int (*aux_get_pasid)(struct iommu_domain *domain, struct device *dev); 281 282 struct iommu_sva *(*sva_bind)(struct device *dev, struct mm_struct *mm, 283 void *drvdata); 284 void (*sva_unbind)(struct iommu_sva *handle); 285 int (*sva_get_pasid)(struct iommu_sva *handle); 286 287 unsigned long pgsize_bitmap; 288 }; 289 290 /** 291 * struct iommu_device - IOMMU core representation of one IOMMU hardware 292 * instance 293 * @list: Used by the iommu-core to keep a list of registered iommus 294 * @ops: iommu-ops for talking to this iommu 295 * @dev: struct device for sysfs handling 296 */ 297 struct iommu_device { 298 struct list_head list; 299 const struct iommu_ops *ops; 300 struct fwnode_handle *fwnode; 301 struct device *dev; 302 }; 303 304 int iommu_device_register(struct iommu_device *iommu); 305 void iommu_device_unregister(struct iommu_device *iommu); 306 int iommu_device_sysfs_add(struct iommu_device *iommu, 307 struct device *parent, 308 const struct attribute_group **groups, 309 const char *fmt, ...) __printf(4, 5); 310 void iommu_device_sysfs_remove(struct iommu_device *iommu); 311 int iommu_device_link(struct iommu_device *iommu, struct device *link); 312 void iommu_device_unlink(struct iommu_device *iommu, struct device *link); 313 314 static inline void iommu_device_set_ops(struct iommu_device *iommu, 315 const struct iommu_ops *ops) 316 { 317 iommu->ops = ops; 318 } 319 320 static inline void iommu_device_set_fwnode(struct iommu_device *iommu, 321 struct fwnode_handle *fwnode) 322 { 323 iommu->fwnode = fwnode; 324 } 325 326 static inline struct iommu_device *dev_to_iommu_device(struct device *dev) 327 { 328 return (struct iommu_device *)dev_get_drvdata(dev); 329 } 330 331 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */ 332 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */ 333 #define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */ 334 #define IOMMU_GROUP_NOTIFY_BOUND_DRIVER 4 /* Post Driver bind */ 335 #define IOMMU_GROUP_NOTIFY_UNBIND_DRIVER 5 /* Pre Driver unbind */ 336 #define IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER 6 /* Post Driver unbind */ 337 338 extern int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops); 339 extern bool iommu_present(struct bus_type *bus); 340 extern bool iommu_capable(struct bus_type *bus, enum iommu_cap cap); 341 extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus); 342 extern struct iommu_group *iommu_group_get_by_id(int id); 343 extern void iommu_domain_free(struct iommu_domain *domain); 344 extern int iommu_attach_device(struct iommu_domain *domain, 345 struct device *dev); 346 extern void iommu_detach_device(struct iommu_domain *domain, 347 struct device *dev); 348 extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev); 349 extern struct iommu_domain *iommu_get_dma_domain(struct device *dev); 350 extern int iommu_map(struct iommu_domain *domain, unsigned long iova, 351 phys_addr_t paddr, size_t size, int prot); 352 extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, 353 size_t size); 354 extern size_t iommu_unmap_fast(struct iommu_domain *domain, 355 unsigned long iova, size_t size); 356 extern size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, 357 struct scatterlist *sg,unsigned int nents, int prot); 358 extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova); 359 extern void iommu_set_fault_handler(struct iommu_domain *domain, 360 iommu_fault_handler_t handler, void *token); 361 362 extern void iommu_get_resv_regions(struct device *dev, struct list_head *list); 363 extern void iommu_put_resv_regions(struct device *dev, struct list_head *list); 364 extern int iommu_request_dm_for_dev(struct device *dev); 365 extern struct iommu_resv_region * 366 iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, 367 enum iommu_resv_type type); 368 extern int iommu_get_group_resv_regions(struct iommu_group *group, 369 struct list_head *head); 370 371 extern int iommu_attach_group(struct iommu_domain *domain, 372 struct iommu_group *group); 373 extern void iommu_detach_group(struct iommu_domain *domain, 374 struct iommu_group *group); 375 extern struct iommu_group *iommu_group_alloc(void); 376 extern void *iommu_group_get_iommudata(struct iommu_group *group); 377 extern void iommu_group_set_iommudata(struct iommu_group *group, 378 void *iommu_data, 379 void (*release)(void *iommu_data)); 380 extern int iommu_group_set_name(struct iommu_group *group, const char *name); 381 extern int iommu_group_add_device(struct iommu_group *group, 382 struct device *dev); 383 extern void iommu_group_remove_device(struct device *dev); 384 extern int iommu_group_for_each_dev(struct iommu_group *group, void *data, 385 int (*fn)(struct device *, void *)); 386 extern struct iommu_group *iommu_group_get(struct device *dev); 387 extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group); 388 extern void iommu_group_put(struct iommu_group *group); 389 extern int iommu_group_register_notifier(struct iommu_group *group, 390 struct notifier_block *nb); 391 extern int iommu_group_unregister_notifier(struct iommu_group *group, 392 struct notifier_block *nb); 393 extern int iommu_group_id(struct iommu_group *group); 394 extern struct iommu_group *iommu_group_get_for_dev(struct device *dev); 395 extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *); 396 397 extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr, 398 void *data); 399 extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr, 400 void *data); 401 402 /* Window handling function prototypes */ 403 extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, 404 phys_addr_t offset, u64 size, 405 int prot); 406 extern void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr); 407 408 extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev, 409 unsigned long iova, int flags); 410 411 static inline void iommu_flush_tlb_all(struct iommu_domain *domain) 412 { 413 if (domain->ops->flush_iotlb_all) 414 domain->ops->flush_iotlb_all(domain); 415 } 416 417 static inline void iommu_tlb_range_add(struct iommu_domain *domain, 418 unsigned long iova, size_t size) 419 { 420 if (domain->ops->iotlb_range_add) 421 domain->ops->iotlb_range_add(domain, iova, size); 422 } 423 424 static inline void iommu_tlb_sync(struct iommu_domain *domain) 425 { 426 if (domain->ops->iotlb_sync) 427 domain->ops->iotlb_sync(domain); 428 } 429 430 /* PCI device grouping function */ 431 extern struct iommu_group *pci_device_group(struct device *dev); 432 /* Generic device grouping function */ 433 extern struct iommu_group *generic_device_group(struct device *dev); 434 /* FSL-MC device grouping function */ 435 struct iommu_group *fsl_mc_device_group(struct device *dev); 436 437 /** 438 * struct iommu_fwspec - per-device IOMMU instance data 439 * @ops: ops for this device's IOMMU 440 * @iommu_fwnode: firmware handle for this device's IOMMU 441 * @iommu_priv: IOMMU driver private data for this device 442 * @num_ids: number of associated device IDs 443 * @ids: IDs which this device may present to the IOMMU 444 */ 445 struct iommu_fwspec { 446 const struct iommu_ops *ops; 447 struct fwnode_handle *iommu_fwnode; 448 void *iommu_priv; 449 u32 flags; 450 unsigned int num_ids; 451 u32 ids[1]; 452 }; 453 454 /* ATS is supported */ 455 #define IOMMU_FWSPEC_PCI_RC_ATS (1 << 0) 456 457 /** 458 * struct iommu_sva - handle to a device-mm bond 459 */ 460 struct iommu_sva { 461 struct device *dev; 462 const struct iommu_sva_ops *ops; 463 }; 464 465 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, 466 const struct iommu_ops *ops); 467 void iommu_fwspec_free(struct device *dev); 468 int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids); 469 const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode); 470 471 static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev) 472 { 473 return dev->iommu_fwspec; 474 } 475 476 static inline void dev_iommu_fwspec_set(struct device *dev, 477 struct iommu_fwspec *fwspec) 478 { 479 dev->iommu_fwspec = fwspec; 480 } 481 482 int iommu_probe_device(struct device *dev); 483 void iommu_release_device(struct device *dev); 484 485 bool iommu_dev_has_feature(struct device *dev, enum iommu_dev_features f); 486 int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f); 487 int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features f); 488 bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features f); 489 int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev); 490 void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev); 491 int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev); 492 493 struct iommu_sva *iommu_sva_bind_device(struct device *dev, 494 struct mm_struct *mm, 495 void *drvdata); 496 void iommu_sva_unbind_device(struct iommu_sva *handle); 497 int iommu_sva_set_ops(struct iommu_sva *handle, 498 const struct iommu_sva_ops *ops); 499 int iommu_sva_get_pasid(struct iommu_sva *handle); 500 501 #else /* CONFIG_IOMMU_API */ 502 503 struct iommu_ops {}; 504 struct iommu_group {}; 505 struct iommu_fwspec {}; 506 struct iommu_device {}; 507 508 static inline bool iommu_present(struct bus_type *bus) 509 { 510 return false; 511 } 512 513 static inline bool iommu_capable(struct bus_type *bus, enum iommu_cap cap) 514 { 515 return false; 516 } 517 518 static inline struct iommu_domain *iommu_domain_alloc(struct bus_type *bus) 519 { 520 return NULL; 521 } 522 523 static inline struct iommu_group *iommu_group_get_by_id(int id) 524 { 525 return NULL; 526 } 527 528 static inline void iommu_domain_free(struct iommu_domain *domain) 529 { 530 } 531 532 static inline int iommu_attach_device(struct iommu_domain *domain, 533 struct device *dev) 534 { 535 return -ENODEV; 536 } 537 538 static inline void iommu_detach_device(struct iommu_domain *domain, 539 struct device *dev) 540 { 541 } 542 543 static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev) 544 { 545 return NULL; 546 } 547 548 static inline int iommu_map(struct iommu_domain *domain, unsigned long iova, 549 phys_addr_t paddr, size_t size, int prot) 550 { 551 return -ENODEV; 552 } 553 554 static inline size_t iommu_unmap(struct iommu_domain *domain, 555 unsigned long iova, size_t size) 556 { 557 return 0; 558 } 559 560 static inline size_t iommu_unmap_fast(struct iommu_domain *domain, 561 unsigned long iova, int gfp_order) 562 { 563 return 0; 564 } 565 566 static inline size_t iommu_map_sg(struct iommu_domain *domain, 567 unsigned long iova, struct scatterlist *sg, 568 unsigned int nents, int prot) 569 { 570 return 0; 571 } 572 573 static inline void iommu_flush_tlb_all(struct iommu_domain *domain) 574 { 575 } 576 577 static inline void iommu_tlb_range_add(struct iommu_domain *domain, 578 unsigned long iova, size_t size) 579 { 580 } 581 582 static inline void iommu_tlb_sync(struct iommu_domain *domain) 583 { 584 } 585 586 static inline int iommu_domain_window_enable(struct iommu_domain *domain, 587 u32 wnd_nr, phys_addr_t paddr, 588 u64 size, int prot) 589 { 590 return -ENODEV; 591 } 592 593 static inline void iommu_domain_window_disable(struct iommu_domain *domain, 594 u32 wnd_nr) 595 { 596 } 597 598 static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) 599 { 600 return 0; 601 } 602 603 static inline void iommu_set_fault_handler(struct iommu_domain *domain, 604 iommu_fault_handler_t handler, void *token) 605 { 606 } 607 608 static inline void iommu_get_resv_regions(struct device *dev, 609 struct list_head *list) 610 { 611 } 612 613 static inline void iommu_put_resv_regions(struct device *dev, 614 struct list_head *list) 615 { 616 } 617 618 static inline int iommu_get_group_resv_regions(struct iommu_group *group, 619 struct list_head *head) 620 { 621 return -ENODEV; 622 } 623 624 static inline int iommu_request_dm_for_dev(struct device *dev) 625 { 626 return -ENODEV; 627 } 628 629 static inline int iommu_attach_group(struct iommu_domain *domain, 630 struct iommu_group *group) 631 { 632 return -ENODEV; 633 } 634 635 static inline void iommu_detach_group(struct iommu_domain *domain, 636 struct iommu_group *group) 637 { 638 } 639 640 static inline struct iommu_group *iommu_group_alloc(void) 641 { 642 return ERR_PTR(-ENODEV); 643 } 644 645 static inline void *iommu_group_get_iommudata(struct iommu_group *group) 646 { 647 return NULL; 648 } 649 650 static inline void iommu_group_set_iommudata(struct iommu_group *group, 651 void *iommu_data, 652 void (*release)(void *iommu_data)) 653 { 654 } 655 656 static inline int iommu_group_set_name(struct iommu_group *group, 657 const char *name) 658 { 659 return -ENODEV; 660 } 661 662 static inline int iommu_group_add_device(struct iommu_group *group, 663 struct device *dev) 664 { 665 return -ENODEV; 666 } 667 668 static inline void iommu_group_remove_device(struct device *dev) 669 { 670 } 671 672 static inline int iommu_group_for_each_dev(struct iommu_group *group, 673 void *data, 674 int (*fn)(struct device *, void *)) 675 { 676 return -ENODEV; 677 } 678 679 static inline struct iommu_group *iommu_group_get(struct device *dev) 680 { 681 return NULL; 682 } 683 684 static inline void iommu_group_put(struct iommu_group *group) 685 { 686 } 687 688 static inline int iommu_group_register_notifier(struct iommu_group *group, 689 struct notifier_block *nb) 690 { 691 return -ENODEV; 692 } 693 694 static inline int iommu_group_unregister_notifier(struct iommu_group *group, 695 struct notifier_block *nb) 696 { 697 return 0; 698 } 699 700 static inline int iommu_group_id(struct iommu_group *group) 701 { 702 return -ENODEV; 703 } 704 705 static inline int iommu_domain_get_attr(struct iommu_domain *domain, 706 enum iommu_attr attr, void *data) 707 { 708 return -EINVAL; 709 } 710 711 static inline int iommu_domain_set_attr(struct iommu_domain *domain, 712 enum iommu_attr attr, void *data) 713 { 714 return -EINVAL; 715 } 716 717 static inline int iommu_device_register(struct iommu_device *iommu) 718 { 719 return -ENODEV; 720 } 721 722 static inline void iommu_device_set_ops(struct iommu_device *iommu, 723 const struct iommu_ops *ops) 724 { 725 } 726 727 static inline void iommu_device_set_fwnode(struct iommu_device *iommu, 728 struct fwnode_handle *fwnode) 729 { 730 } 731 732 static inline struct iommu_device *dev_to_iommu_device(struct device *dev) 733 { 734 return NULL; 735 } 736 737 static inline void iommu_device_unregister(struct iommu_device *iommu) 738 { 739 } 740 741 static inline int iommu_device_sysfs_add(struct iommu_device *iommu, 742 struct device *parent, 743 const struct attribute_group **groups, 744 const char *fmt, ...) 745 { 746 return -ENODEV; 747 } 748 749 static inline void iommu_device_sysfs_remove(struct iommu_device *iommu) 750 { 751 } 752 753 static inline int iommu_device_link(struct device *dev, struct device *link) 754 { 755 return -EINVAL; 756 } 757 758 static inline void iommu_device_unlink(struct device *dev, struct device *link) 759 { 760 } 761 762 static inline int iommu_fwspec_init(struct device *dev, 763 struct fwnode_handle *iommu_fwnode, 764 const struct iommu_ops *ops) 765 { 766 return -ENODEV; 767 } 768 769 static inline void iommu_fwspec_free(struct device *dev) 770 { 771 } 772 773 static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids, 774 int num_ids) 775 { 776 return -ENODEV; 777 } 778 779 static inline 780 const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode) 781 { 782 return NULL; 783 } 784 785 static inline bool 786 iommu_dev_has_feature(struct device *dev, enum iommu_dev_features feat) 787 { 788 return false; 789 } 790 791 static inline bool 792 iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat) 793 { 794 return false; 795 } 796 797 static inline int 798 iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat) 799 { 800 return -ENODEV; 801 } 802 803 static inline int 804 iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat) 805 { 806 return -ENODEV; 807 } 808 809 static inline int 810 iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev) 811 { 812 return -ENODEV; 813 } 814 815 static inline void 816 iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev) 817 { 818 } 819 820 static inline int 821 iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev) 822 { 823 return -ENODEV; 824 } 825 826 static inline struct iommu_sva * 827 iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata) 828 { 829 return NULL; 830 } 831 832 static inline void iommu_sva_unbind_device(struct iommu_sva *handle) 833 { 834 } 835 836 static inline int iommu_sva_set_ops(struct iommu_sva *handle, 837 const struct iommu_sva_ops *ops) 838 { 839 return -EINVAL; 840 } 841 842 static inline int iommu_sva_get_pasid(struct iommu_sva *handle) 843 { 844 return IOMMU_PASID_INVALID; 845 } 846 847 #endif /* CONFIG_IOMMU_API */ 848 849 #ifdef CONFIG_IOMMU_DEBUGFS 850 extern struct dentry *iommu_debugfs_dir; 851 void iommu_debugfs_setup(void); 852 #else 853 static inline void iommu_debugfs_setup(void) {} 854 #endif 855 856 #endif /* __LINUX_IOMMU_H */ 857