11a59d1b8SThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-or-later */
2b8b572e1SStephen Rothwell /*
3b8b572e1SStephen Rothwell * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
4b8b572e1SStephen Rothwell * Rewrite, cleanup:
5b8b572e1SStephen Rothwell * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
6b8b572e1SStephen Rothwell */
7b8b572e1SStephen Rothwell
8b8b572e1SStephen Rothwell #ifndef _ASM_IOMMU_H
9b8b572e1SStephen Rothwell #define _ASM_IOMMU_H
10b8b572e1SStephen Rothwell #ifdef __KERNEL__
11b8b572e1SStephen Rothwell
12b8b572e1SStephen Rothwell #include <linux/compiler.h>
13b8b572e1SStephen Rothwell #include <linux/spinlock.h>
14b8b572e1SStephen Rothwell #include <linux/device.h>
150a0f0d8bSChristoph Hellwig #include <linux/dma-map-ops.h>
16b8b572e1SStephen Rothwell #include <linux/bitops.h>
17b8b572e1SStephen Rothwell #include <asm/machdep.h>
18b8b572e1SStephen Rothwell #include <asm/types.h>
19798248a3SDaniel Axtens #include <asm/pci-bridge.h>
20ec0c464cSChristophe Leroy #include <asm/asm-const.h>
21b8b572e1SStephen Rothwell
22e589a440SAlistair Popple #define IOMMU_PAGE_SHIFT_4K 12
23e589a440SAlistair Popple #define IOMMU_PAGE_SIZE_4K (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K)
24e589a440SAlistair Popple #define IOMMU_PAGE_MASK_4K (~((1 << IOMMU_PAGE_SHIFT_4K) - 1))
25b7115316SChristophe Leroy #define IOMMU_PAGE_ALIGN_4K(addr) ALIGN(addr, IOMMU_PAGE_SIZE_4K)
26b8b572e1SStephen Rothwell
27d0847757SAlistair Popple #define IOMMU_PAGE_SIZE(tblptr) (ASM_CONST(1) << (tblptr)->it_page_shift)
28d0847757SAlistair Popple #define IOMMU_PAGE_MASK(tblptr) (~((1 << (tblptr)->it_page_shift) - 1))
29b7115316SChristophe Leroy #define IOMMU_PAGE_ALIGN(addr, tblptr) ALIGN(addr, IOMMU_PAGE_SIZE(tblptr))
30d0847757SAlistair Popple
31*89c9ce1cSMichal Suchanek #define DIRECT64_PROPNAME "linux,direct64-ddr-window-info"
32*89c9ce1cSMichal Suchanek #define DMA64_PROPNAME "linux,dma64-ddr-window-info"
33*89c9ce1cSMichal Suchanek
34b8b572e1SStephen Rothwell /* Boot time flags */
35b8b572e1SStephen Rothwell extern int iommu_is_off;
36b8b572e1SStephen Rothwell extern int iommu_force_on;
37b8b572e1SStephen Rothwell
38da004c36SAlexey Kardashevskiy struct iommu_table_ops {
3905c6cfb9SAlexey Kardashevskiy /*
4005c6cfb9SAlexey Kardashevskiy * When called with direction==DMA_NONE, it is equal to clear().
4105c6cfb9SAlexey Kardashevskiy * uaddr is a linear map address.
4205c6cfb9SAlexey Kardashevskiy */
43da004c36SAlexey Kardashevskiy int (*set)(struct iommu_table *tbl,
44da004c36SAlexey Kardashevskiy long index, long npages,
45da004c36SAlexey Kardashevskiy unsigned long uaddr,
46da004c36SAlexey Kardashevskiy enum dma_data_direction direction,
4700085f1eSKrzysztof Kozlowski unsigned long attrs);
4805c6cfb9SAlexey Kardashevskiy #ifdef CONFIG_IOMMU_API
4905c6cfb9SAlexey Kardashevskiy /*
5005c6cfb9SAlexey Kardashevskiy * Exchanges existing TCE with new TCE plus direction bits;
5105c6cfb9SAlexey Kardashevskiy * returns old TCE and DMA direction mask.
5205c6cfb9SAlexey Kardashevskiy * @tce is a physical address.
5305c6cfb9SAlexey Kardashevskiy */
5435872480SAlexey Kardashevskiy int (*xchg_no_kill)(struct iommu_table *tbl,
5505c6cfb9SAlexey Kardashevskiy long index,
5605c6cfb9SAlexey Kardashevskiy unsigned long *hpa,
57cad32d9dSAlexey Kardashevskiy enum dma_data_direction *direction);
5835872480SAlexey Kardashevskiy
5935872480SAlexey Kardashevskiy void (*tce_kill)(struct iommu_table *tbl,
6035872480SAlexey Kardashevskiy unsigned long index,
61cad32d9dSAlexey Kardashevskiy unsigned long pages);
62090bad39SAlexey Kardashevskiy
63a68bd126SAlexey Kardashevskiy __be64 *(*useraddrptr)(struct iommu_table *tbl, long index, bool alloc);
6405c6cfb9SAlexey Kardashevskiy #endif
65da004c36SAlexey Kardashevskiy void (*clear)(struct iommu_table *tbl,
66da004c36SAlexey Kardashevskiy long index, long npages);
6705c6cfb9SAlexey Kardashevskiy /* get() returns a physical address */
68da004c36SAlexey Kardashevskiy unsigned long (*get)(struct iommu_table *tbl, long index);
69da004c36SAlexey Kardashevskiy void (*flush)(struct iommu_table *tbl);
704793d65dSAlexey Kardashevskiy void (*free)(struct iommu_table *tbl);
71da004c36SAlexey Kardashevskiy };
72da004c36SAlexey Kardashevskiy
73da004c36SAlexey Kardashevskiy /* These are used by VIO */
74da004c36SAlexey Kardashevskiy extern struct iommu_table_ops iommu_table_lpar_multi_ops;
75da004c36SAlexey Kardashevskiy extern struct iommu_table_ops iommu_table_pseries_ops;
76da004c36SAlexey Kardashevskiy
77b8b572e1SStephen Rothwell /*
78b8b572e1SStephen Rothwell * IOMAP_MAX_ORDER defines the largest contiguous block
79b8b572e1SStephen Rothwell * of dma space we can get. IOMAP_MAX_ORDER = 13
80b8b572e1SStephen Rothwell * allows up to 2**12 pages (4096 * 4096) = 16 MB
81b8b572e1SStephen Rothwell */
82b8b572e1SStephen Rothwell #define IOMAP_MAX_ORDER 13
83b8b572e1SStephen Rothwell
84b4c3a872SAnton Blanchard #define IOMMU_POOL_HASHBITS 2
85b4c3a872SAnton Blanchard #define IOMMU_NR_POOLS (1 << IOMMU_POOL_HASHBITS)
86b4c3a872SAnton Blanchard
87b4c3a872SAnton Blanchard struct iommu_pool {
88b4c3a872SAnton Blanchard unsigned long start;
89b4c3a872SAnton Blanchard unsigned long end;
90b4c3a872SAnton Blanchard unsigned long hint;
91b4c3a872SAnton Blanchard spinlock_t lock;
92b4c3a872SAnton Blanchard } ____cacheline_aligned_in_smp;
93b4c3a872SAnton Blanchard
94b8b572e1SStephen Rothwell struct iommu_table {
95b8b572e1SStephen Rothwell unsigned long it_busno; /* Bus number this table belongs to */
96b8b572e1SStephen Rothwell unsigned long it_size; /* Size of iommu table in entries */
97bbb845c4SAlexey Kardashevskiy unsigned long it_indirect_levels;
98bbb845c4SAlexey Kardashevskiy unsigned long it_level_size;
9900547193SAlexey Kardashevskiy unsigned long it_allocated_size;
100b8b572e1SStephen Rothwell unsigned long it_offset; /* Offset into global table */
101b8b572e1SStephen Rothwell unsigned long it_base; /* mapped address of tce table */
102b8b572e1SStephen Rothwell unsigned long it_index; /* which iommu table this is */
103b8b572e1SStephen Rothwell unsigned long it_type; /* type: PCI or Virtual Bus */
104b8b572e1SStephen Rothwell unsigned long it_blocksize; /* Entries in each block (cacheline) */
105b4c3a872SAnton Blanchard unsigned long poolsize;
106b4c3a872SAnton Blanchard unsigned long nr_pools;
107b4c3a872SAnton Blanchard struct iommu_pool large_pool;
108b4c3a872SAnton Blanchard struct iommu_pool pools[IOMMU_NR_POOLS];
109b8b572e1SStephen Rothwell unsigned long *it_map; /* A simple allocation bitmap for now */
1103a553170SAlistair Popple unsigned long it_page_shift;/* table iommu page size */
1110eaf4defSAlexey Kardashevskiy struct list_head it_group_list;/* List of iommu_table_group_link */
11200a5c58dSAlexey Kardashevskiy __be64 *it_userspace; /* userspace view of the table */
113da004c36SAlexey Kardashevskiy struct iommu_table_ops *it_ops;
114e5afdf9dSAlexey Kardashevskiy struct kref it_kref;
115a68bd126SAlexey Kardashevskiy int it_nid;
116201ed7f3SAlexey Kardashevskiy unsigned long it_reserved_start; /* Start of not-DMA-able (MMIO) area */
117201ed7f3SAlexey Kardashevskiy unsigned long it_reserved_end;
118b8b572e1SStephen Rothwell };
119b8b572e1SStephen Rothwell
1206e301a8eSAlexey Kardashevskiy #define IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry) \
121a68bd126SAlexey Kardashevskiy ((tbl)->it_ops->useraddrptr((tbl), (entry), false))
1222157e7b8SAlexey Kardashevskiy #define IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry) \
123a68bd126SAlexey Kardashevskiy ((tbl)->it_ops->useraddrptr((tbl), (entry), true))
1242157e7b8SAlexey Kardashevskiy
125d0847757SAlistair Popple /* Pure 2^n version of get_order */
126d0847757SAlistair Popple static inline __attribute_const__
get_iommu_order(unsigned long size,struct iommu_table * tbl)127d0847757SAlistair Popple int get_iommu_order(unsigned long size, struct iommu_table *tbl)
128d0847757SAlistair Popple {
129d0847757SAlistair Popple return __ilog2((size - 1) >> tbl->it_page_shift) + 1;
130d0847757SAlistair Popple }
131d0847757SAlistair Popple
132d0847757SAlistair Popple
133b8b572e1SStephen Rothwell struct scatterlist;
134b8b572e1SStephen Rothwell
1352db4928bSBenjamin Herrenschmidt #ifdef CONFIG_PPC64
1362db4928bSBenjamin Herrenschmidt
set_iommu_table_base(struct device * dev,struct iommu_table * base)1372db4928bSBenjamin Herrenschmidt static inline void set_iommu_table_base(struct device *dev,
1382db4928bSBenjamin Herrenschmidt struct iommu_table *base)
139738ef42eSBecky Bruce {
1402db4928bSBenjamin Herrenschmidt dev->archdata.iommu_table_base = base;
141738ef42eSBecky Bruce }
142738ef42eSBecky Bruce
get_iommu_table_base(struct device * dev)143738ef42eSBecky Bruce static inline void *get_iommu_table_base(struct device *dev)
144738ef42eSBecky Bruce {
1452db4928bSBenjamin Herrenschmidt return dev->archdata.iommu_table_base;
146738ef42eSBecky Bruce }
147738ef42eSBecky Bruce
1482db4928bSBenjamin Herrenschmidt extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
1492db4928bSBenjamin Herrenschmidt
150e5afdf9dSAlexey Kardashevskiy extern struct iommu_table *iommu_tce_table_get(struct iommu_table *tbl);
151e5afdf9dSAlexey Kardashevskiy extern int iommu_tce_table_put(struct iommu_table *tbl);
152b8b572e1SStephen Rothwell
153b8b572e1SStephen Rothwell /* Initializes an iommu_table based in values set in the passed-in
154b8b572e1SStephen Rothwell * structure
155b8b572e1SStephen Rothwell */
156b8b572e1SStephen Rothwell extern struct iommu_table *iommu_init_table(struct iommu_table *tbl,
157201ed7f3SAlexey Kardashevskiy int nid, unsigned long res_start, unsigned long res_end);
1583c33066aSLeonardo Bras bool iommu_table_in_use(struct iommu_table *tbl);
159201ed7f3SAlexey Kardashevskiy
160e633bc86SAlexey Kardashevskiy #define IOMMU_TABLE_GROUP_MAX_TABLES 2
161b348aa65SAlexey Kardashevskiy
162f87a8864SAlexey Kardashevskiy struct iommu_table_group;
163f87a8864SAlexey Kardashevskiy
164f87a8864SAlexey Kardashevskiy struct iommu_table_group_ops {
16500547193SAlexey Kardashevskiy unsigned long (*get_table_size)(
16600547193SAlexey Kardashevskiy __u32 page_shift,
16700547193SAlexey Kardashevskiy __u64 window_size,
16800547193SAlexey Kardashevskiy __u32 levels);
1694793d65dSAlexey Kardashevskiy long (*create_table)(struct iommu_table_group *table_group,
1704793d65dSAlexey Kardashevskiy int num,
1714793d65dSAlexey Kardashevskiy __u32 page_shift,
1724793d65dSAlexey Kardashevskiy __u64 window_size,
1734793d65dSAlexey Kardashevskiy __u32 levels,
1744793d65dSAlexey Kardashevskiy struct iommu_table **ptbl);
1754793d65dSAlexey Kardashevskiy long (*set_window)(struct iommu_table_group *table_group,
1764793d65dSAlexey Kardashevskiy int num,
1774793d65dSAlexey Kardashevskiy struct iommu_table *tblnew);
1784793d65dSAlexey Kardashevskiy long (*unset_window)(struct iommu_table_group *table_group,
1794793d65dSAlexey Kardashevskiy int num);
180f87a8864SAlexey Kardashevskiy /* Switch ownership from platform code to external user (e.g. VFIO) */
1819d67c943SAlexey Kardashevskiy long (*take_ownership)(struct iommu_table_group *table_group);
182f87a8864SAlexey Kardashevskiy /* Switch ownership from external user (e.g. VFIO) back to core */
183f87a8864SAlexey Kardashevskiy void (*release_ownership)(struct iommu_table_group *table_group);
184f87a8864SAlexey Kardashevskiy };
185f87a8864SAlexey Kardashevskiy
1860eaf4defSAlexey Kardashevskiy struct iommu_table_group_link {
1870eaf4defSAlexey Kardashevskiy struct list_head next;
1880eaf4defSAlexey Kardashevskiy struct rcu_head rcu;
1890eaf4defSAlexey Kardashevskiy struct iommu_table_group *table_group;
1900eaf4defSAlexey Kardashevskiy };
1910eaf4defSAlexey Kardashevskiy
192b348aa65SAlexey Kardashevskiy struct iommu_table_group {
1934793d65dSAlexey Kardashevskiy /* IOMMU properties */
1944793d65dSAlexey Kardashevskiy __u32 tce32_start;
1954793d65dSAlexey Kardashevskiy __u32 tce32_size;
1964793d65dSAlexey Kardashevskiy __u64 pgsizes; /* Bitmap of supported page sizes */
1974793d65dSAlexey Kardashevskiy __u32 max_dynamic_windows_supported;
1984793d65dSAlexey Kardashevskiy __u32 max_levels;
1994793d65dSAlexey Kardashevskiy
200b348aa65SAlexey Kardashevskiy struct iommu_group *group;
201b348aa65SAlexey Kardashevskiy struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES];
202f87a8864SAlexey Kardashevskiy struct iommu_table_group_ops *ops;
203b348aa65SAlexey Kardashevskiy };
204b348aa65SAlexey Kardashevskiy
205d905c5dfSAlexey Kardashevskiy #ifdef CONFIG_IOMMU_API
206b348aa65SAlexey Kardashevskiy
207b348aa65SAlexey Kardashevskiy extern void iommu_register_group(struct iommu_table_group *table_group,
2084e13c1acSAlexey Kardashevskiy int pci_domain_number, unsigned long pe_num);
209c4e9d3c1SAlexey Kardashevskiy extern int iommu_add_device(struct iommu_table_group *table_group,
210c4e9d3c1SAlexey Kardashevskiy struct device *dev);
211c10c21efSAlexey Kardashevskiy extern long iommu_tce_xchg(struct mm_struct *mm, struct iommu_table *tbl,
212c10c21efSAlexey Kardashevskiy unsigned long entry, unsigned long *hpa,
213c10c21efSAlexey Kardashevskiy enum dma_data_direction *direction);
21435872480SAlexey Kardashevskiy extern long iommu_tce_xchg_no_kill(struct mm_struct *mm,
21535872480SAlexey Kardashevskiy struct iommu_table *tbl,
21635872480SAlexey Kardashevskiy unsigned long entry, unsigned long *hpa,
21735872480SAlexey Kardashevskiy enum dma_data_direction *direction);
21835872480SAlexey Kardashevskiy extern void iommu_tce_kill(struct iommu_table *tbl,
21935872480SAlexey Kardashevskiy unsigned long entry, unsigned long pages);
2209d67c943SAlexey Kardashevskiy
2219d67c943SAlexey Kardashevskiy extern struct iommu_table_group_ops spapr_tce_table_group_ops;
222d905c5dfSAlexey Kardashevskiy #else
iommu_register_group(struct iommu_table_group * table_group,int pci_domain_number,unsigned long pe_num)223b348aa65SAlexey Kardashevskiy static inline void iommu_register_group(struct iommu_table_group *table_group,
224d905c5dfSAlexey Kardashevskiy int pci_domain_number,
225d905c5dfSAlexey Kardashevskiy unsigned long pe_num)
226d905c5dfSAlexey Kardashevskiy {
227d905c5dfSAlexey Kardashevskiy }
228d905c5dfSAlexey Kardashevskiy
iommu_add_device(struct iommu_table_group * table_group,struct device * dev)229c4e9d3c1SAlexey Kardashevskiy static inline int iommu_add_device(struct iommu_table_group *table_group,
230c4e9d3c1SAlexey Kardashevskiy struct device *dev)
231d905c5dfSAlexey Kardashevskiy {
232d905c5dfSAlexey Kardashevskiy return 0;
233d905c5dfSAlexey Kardashevskiy }
234d905c5dfSAlexey Kardashevskiy #endif /* !CONFIG_IOMMU_API */
235d905c5dfSAlexey Kardashevskiy
236a20f507fSChristoph Hellwig u64 dma_iommu_get_required_mask(struct device *dev);
2372db4928bSBenjamin Herrenschmidt #else
2382db4928bSBenjamin Herrenschmidt
get_iommu_table_base(struct device * dev)2392db4928bSBenjamin Herrenschmidt static inline void *get_iommu_table_base(struct device *dev)
2402db4928bSBenjamin Herrenschmidt {
2412db4928bSBenjamin Herrenschmidt return NULL;
2422db4928bSBenjamin Herrenschmidt }
2432db4928bSBenjamin Herrenschmidt
dma_iommu_dma_supported(struct device * dev,u64 mask)2442db4928bSBenjamin Herrenschmidt static inline int dma_iommu_dma_supported(struct device *dev, u64 mask)
2452db4928bSBenjamin Herrenschmidt {
2462db4928bSBenjamin Herrenschmidt return 0;
2472db4928bSBenjamin Herrenschmidt }
2482db4928bSBenjamin Herrenschmidt
2492db4928bSBenjamin Herrenschmidt #endif /* CONFIG_PPC64 */
2502db4928bSBenjamin Herrenschmidt
2510690cbd2SJoerg Roedel extern int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
252b8b572e1SStephen Rothwell struct scatterlist *sglist, int nelems,
2530690cbd2SJoerg Roedel unsigned long mask,
2540690cbd2SJoerg Roedel enum dma_data_direction direction,
25500085f1eSKrzysztof Kozlowski unsigned long attrs);
2560690cbd2SJoerg Roedel extern void ppc_iommu_unmap_sg(struct iommu_table *tbl,
2570690cbd2SJoerg Roedel struct scatterlist *sglist,
2580690cbd2SJoerg Roedel int nelems,
2590690cbd2SJoerg Roedel enum dma_data_direction direction,
26000085f1eSKrzysztof Kozlowski unsigned long attrs);
261b8b572e1SStephen Rothwell
262b8b572e1SStephen Rothwell extern void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
263b8b572e1SStephen Rothwell size_t size, dma_addr_t *dma_handle,
264b8b572e1SStephen Rothwell unsigned long mask, gfp_t flag, int node);
265b8b572e1SStephen Rothwell extern void iommu_free_coherent(struct iommu_table *tbl, size_t size,
266b8b572e1SStephen Rothwell void *vaddr, dma_addr_t dma_handle);
267f9226d57SMark Nelson extern dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
268f9226d57SMark Nelson struct page *page, unsigned long offset,
269f9226d57SMark Nelson size_t size, unsigned long mask,
270b8b572e1SStephen Rothwell enum dma_data_direction direction,
27100085f1eSKrzysztof Kozlowski unsigned long attrs);
272f9226d57SMark Nelson extern void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
273b8b572e1SStephen Rothwell size_t size, enum dma_data_direction direction,
27400085f1eSKrzysztof Kozlowski unsigned long attrs);
275b8b572e1SStephen Rothwell
276e14ff96dSNick Child void __init iommu_init_early_pSeries(void);
277798248a3SDaniel Axtens extern void iommu_init_early_dart(struct pci_controller_ops *controller_ops);
278b8b572e1SStephen Rothwell extern void iommu_init_early_pasemi(void);
279b8b572e1SStephen Rothwell
280b8b572e1SStephen Rothwell #if defined(CONFIG_PPC64) && defined(CONFIG_PM)
iommu_restore(void)281b8b572e1SStephen Rothwell static inline void iommu_restore(void)
282b8b572e1SStephen Rothwell {
283b8b572e1SStephen Rothwell if (ppc_md.iommu_restore)
284b8b572e1SStephen Rothwell ppc_md.iommu_restore();
285b8b572e1SStephen Rothwell }
286b8b572e1SStephen Rothwell #endif
287b8b572e1SStephen Rothwell
2884e13c1acSAlexey Kardashevskiy /* The API to support IOMMU operations for VFIO */
289b1af23d8SAlexey Kardashevskiy extern int iommu_tce_check_ioba(unsigned long page_shift,
290b1af23d8SAlexey Kardashevskiy unsigned long offset, unsigned long size,
291b1af23d8SAlexey Kardashevskiy unsigned long ioba, unsigned long npages);
292b1af23d8SAlexey Kardashevskiy extern int iommu_tce_check_gpa(unsigned long page_shift,
293b1af23d8SAlexey Kardashevskiy unsigned long gpa);
294b1af23d8SAlexey Kardashevskiy
295b1af23d8SAlexey Kardashevskiy #define iommu_tce_clear_param_check(tbl, ioba, tce_value, npages) \
296b1af23d8SAlexey Kardashevskiy (iommu_tce_check_ioba((tbl)->it_page_shift, \
297b1af23d8SAlexey Kardashevskiy (tbl)->it_offset, (tbl)->it_size, \
298b1af23d8SAlexey Kardashevskiy (ioba), (npages)) || (tce_value))
299b1af23d8SAlexey Kardashevskiy #define iommu_tce_put_param_check(tbl, ioba, gpa) \
300b1af23d8SAlexey Kardashevskiy (iommu_tce_check_ioba((tbl)->it_page_shift, \
301b1af23d8SAlexey Kardashevskiy (tbl)->it_offset, (tbl)->it_size, \
302b1af23d8SAlexey Kardashevskiy (ioba), 1) || \
303b1af23d8SAlexey Kardashevskiy iommu_tce_check_gpa((tbl)->it_page_shift, (gpa)))
3044e13c1acSAlexey Kardashevskiy
3054e13c1acSAlexey Kardashevskiy extern void iommu_flush_tce(struct iommu_table *tbl);
3064e13c1acSAlexey Kardashevskiy
3074e13c1acSAlexey Kardashevskiy extern enum dma_data_direction iommu_tce_direction(unsigned long tce);
30810b35b2bSAlexey Kardashevskiy extern unsigned long iommu_direction_to_tce_perm(enum dma_data_direction dir);
3094e13c1acSAlexey Kardashevskiy
310ba767b52SChristoph Hellwig #ifdef CONFIG_PPC_CELL_NATIVE
311ba767b52SChristoph Hellwig extern bool iommu_fixed_is_weak;
312ba767b52SChristoph Hellwig #else
313ba767b52SChristoph Hellwig #define iommu_fixed_is_weak false
314ba767b52SChristoph Hellwig #endif
315ba767b52SChristoph Hellwig
3164a605e2dSChristoph Hellwig extern const struct dma_map_ops dma_iommu_ops;
3174a605e2dSChristoph Hellwig
318b8b572e1SStephen Rothwell #endif /* __KERNEL__ */
319b8b572e1SStephen Rothwell #endif /* _ASM_IOMMU_H */
320