1adbb3901SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2828b35f6SJan Glauber /*
3828b35f6SJan Glauber * Copyright IBM Corp. 2012
4828b35f6SJan Glauber *
5828b35f6SJan Glauber * Author(s):
6828b35f6SJan Glauber * Jan Glauber <jang@linux.vnet.ibm.com>
7828b35f6SJan Glauber */
8828b35f6SJan Glauber
9828b35f6SJan Glauber #include <linux/kernel.h>
10828b35f6SJan Glauber #include <linux/slab.h>
11828b35f6SJan Glauber #include <linux/export.h>
12828b35f6SJan Glauber #include <linux/iommu-helper.h>
130a0f0d8bSChristoph Hellwig #include <linux/dma-map-ops.h>
1422459321SSebastian Ott #include <linux/vmalloc.h>
15828b35f6SJan Glauber #include <linux/pci.h>
16828b35f6SJan Glauber #include <asm/pci_dma.h>
17828b35f6SJan Glauber
18828b35f6SJan Glauber static struct kmem_cache *dma_region_table_cache;
19828b35f6SJan Glauber static struct kmem_cache *dma_page_table_cache;
20c60d1ae4SGerald Schaefer static int s390_iommu_strict;
216aefbf1cSNiklas Schnelle static u64 s390_iommu_aperture;
226aefbf1cSNiklas Schnelle static u32 s390_iommu_aperture_factor = 1;
23c60d1ae4SGerald Schaefer
zpci_refresh_global(struct zpci_dev * zdev)24c60d1ae4SGerald Schaefer static int zpci_refresh_global(struct zpci_dev *zdev)
25c60d1ae4SGerald Schaefer {
26c60d1ae4SGerald Schaefer return zpci_refresh_trans((u64) zdev->fh << 32, zdev->start_dma,
27c60d1ae4SGerald Schaefer zdev->iommu_pages * PAGE_SIZE);
28c60d1ae4SGerald Schaefer }
29828b35f6SJan Glauber
dma_alloc_cpu_table(gfp_t gfp)30d3b82825SJason Gunthorpe unsigned long *dma_alloc_cpu_table(gfp_t gfp)
31828b35f6SJan Glauber {
32828b35f6SJan Glauber unsigned long *table, *entry;
33828b35f6SJan Glauber
34d3b82825SJason Gunthorpe table = kmem_cache_alloc(dma_region_table_cache, gfp);
35828b35f6SJan Glauber if (!table)
36828b35f6SJan Glauber return NULL;
37828b35f6SJan Glauber
38828b35f6SJan Glauber for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++)
394d5a6b72SSebastian Ott *entry = ZPCI_TABLE_INVALID;
40828b35f6SJan Glauber return table;
41828b35f6SJan Glauber }
42828b35f6SJan Glauber
dma_free_cpu_table(void * table)43828b35f6SJan Glauber static void dma_free_cpu_table(void *table)
44828b35f6SJan Glauber {
45828b35f6SJan Glauber kmem_cache_free(dma_region_table_cache, table);
46828b35f6SJan Glauber }
47828b35f6SJan Glauber
dma_alloc_page_table(gfp_t gfp)48d3b82825SJason Gunthorpe static unsigned long *dma_alloc_page_table(gfp_t gfp)
49828b35f6SJan Glauber {
50828b35f6SJan Glauber unsigned long *table, *entry;
51828b35f6SJan Glauber
52d3b82825SJason Gunthorpe table = kmem_cache_alloc(dma_page_table_cache, gfp);
53828b35f6SJan Glauber if (!table)
54828b35f6SJan Glauber return NULL;
55828b35f6SJan Glauber
56828b35f6SJan Glauber for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++)
574d5a6b72SSebastian Ott *entry = ZPCI_PTE_INVALID;
58828b35f6SJan Glauber return table;
59828b35f6SJan Glauber }
60828b35f6SJan Glauber
dma_free_page_table(void * table)61828b35f6SJan Glauber static void dma_free_page_table(void *table)
62828b35f6SJan Glauber {
63828b35f6SJan Glauber kmem_cache_free(dma_page_table_cache, table);
64828b35f6SJan Glauber }
65828b35f6SJan Glauber
dma_get_seg_table_origin(unsigned long * rtep,gfp_t gfp)66d3b82825SJason Gunthorpe static unsigned long *dma_get_seg_table_origin(unsigned long *rtep, gfp_t gfp)
67828b35f6SJan Glauber {
6821c1f902SNiklas Schnelle unsigned long old_rte, rte;
69828b35f6SJan Glauber unsigned long *sto;
70828b35f6SJan Glauber
7121c1f902SNiklas Schnelle rte = READ_ONCE(*rtep);
7221c1f902SNiklas Schnelle if (reg_entry_isvalid(rte)) {
7321c1f902SNiklas Schnelle sto = get_rt_sto(rte);
7421c1f902SNiklas Schnelle } else {
75d3b82825SJason Gunthorpe sto = dma_alloc_cpu_table(gfp);
76828b35f6SJan Glauber if (!sto)
77828b35f6SJan Glauber return NULL;
78828b35f6SJan Glauber
7921c1f902SNiklas Schnelle set_rt_sto(&rte, virt_to_phys(sto));
8021c1f902SNiklas Schnelle validate_rt_entry(&rte);
8121c1f902SNiklas Schnelle entry_clr_protected(&rte);
8221c1f902SNiklas Schnelle
8321c1f902SNiklas Schnelle old_rte = cmpxchg(rtep, ZPCI_TABLE_INVALID, rte);
8421c1f902SNiklas Schnelle if (old_rte != ZPCI_TABLE_INVALID) {
8521c1f902SNiklas Schnelle /* Somone else was faster, use theirs */
8621c1f902SNiklas Schnelle dma_free_cpu_table(sto);
8721c1f902SNiklas Schnelle sto = get_rt_sto(old_rte);
8821c1f902SNiklas Schnelle }
89828b35f6SJan Glauber }
90828b35f6SJan Glauber return sto;
91828b35f6SJan Glauber }
92828b35f6SJan Glauber
dma_get_page_table_origin(unsigned long * step,gfp_t gfp)93d3b82825SJason Gunthorpe static unsigned long *dma_get_page_table_origin(unsigned long *step, gfp_t gfp)
94828b35f6SJan Glauber {
9521c1f902SNiklas Schnelle unsigned long old_ste, ste;
96828b35f6SJan Glauber unsigned long *pto;
97828b35f6SJan Glauber
9821c1f902SNiklas Schnelle ste = READ_ONCE(*step);
9921c1f902SNiklas Schnelle if (reg_entry_isvalid(ste)) {
10021c1f902SNiklas Schnelle pto = get_st_pto(ste);
10121c1f902SNiklas Schnelle } else {
102d3b82825SJason Gunthorpe pto = dma_alloc_page_table(gfp);
103828b35f6SJan Glauber if (!pto)
104828b35f6SJan Glauber return NULL;
10521c1f902SNiklas Schnelle set_st_pto(&ste, virt_to_phys(pto));
10621c1f902SNiklas Schnelle validate_st_entry(&ste);
10721c1f902SNiklas Schnelle entry_clr_protected(&ste);
10821c1f902SNiklas Schnelle
10921c1f902SNiklas Schnelle old_ste = cmpxchg(step, ZPCI_TABLE_INVALID, ste);
11021c1f902SNiklas Schnelle if (old_ste != ZPCI_TABLE_INVALID) {
11121c1f902SNiklas Schnelle /* Somone else was faster, use theirs */
11221c1f902SNiklas Schnelle dma_free_page_table(pto);
11321c1f902SNiklas Schnelle pto = get_st_pto(old_ste);
11421c1f902SNiklas Schnelle }
115828b35f6SJan Glauber }
116828b35f6SJan Glauber return pto;
117828b35f6SJan Glauber }
118828b35f6SJan Glauber
dma_walk_cpu_trans(unsigned long * rto,dma_addr_t dma_addr,gfp_t gfp)119d3b82825SJason Gunthorpe unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr,
120d3b82825SJason Gunthorpe gfp_t gfp)
121828b35f6SJan Glauber {
122828b35f6SJan Glauber unsigned long *sto, *pto;
123828b35f6SJan Glauber unsigned int rtx, sx, px;
124828b35f6SJan Glauber
125828b35f6SJan Glauber rtx = calc_rtx(dma_addr);
126d3b82825SJason Gunthorpe sto = dma_get_seg_table_origin(&rto[rtx], gfp);
127828b35f6SJan Glauber if (!sto)
128828b35f6SJan Glauber return NULL;
129828b35f6SJan Glauber
130828b35f6SJan Glauber sx = calc_sx(dma_addr);
131d3b82825SJason Gunthorpe pto = dma_get_page_table_origin(&sto[sx], gfp);
132828b35f6SJan Glauber if (!pto)
133828b35f6SJan Glauber return NULL;
134828b35f6SJan Glauber
135828b35f6SJan Glauber px = calc_px(dma_addr);
136828b35f6SJan Glauber return &pto[px];
137828b35f6SJan Glauber }
138828b35f6SJan Glauber
dma_update_cpu_trans(unsigned long * ptep,phys_addr_t page_addr,int flags)13921c1f902SNiklas Schnelle void dma_update_cpu_trans(unsigned long *ptep, phys_addr_t page_addr, int flags)
140828b35f6SJan Glauber {
14121c1f902SNiklas Schnelle unsigned long pte;
14221c1f902SNiklas Schnelle
14321c1f902SNiklas Schnelle pte = READ_ONCE(*ptep);
144828b35f6SJan Glauber if (flags & ZPCI_PTE_INVALID) {
14521c1f902SNiklas Schnelle invalidate_pt_entry(&pte);
146828b35f6SJan Glauber } else {
14721c1f902SNiklas Schnelle set_pt_pfaa(&pte, page_addr);
14821c1f902SNiklas Schnelle validate_pt_entry(&pte);
149828b35f6SJan Glauber }
150828b35f6SJan Glauber
151828b35f6SJan Glauber if (flags & ZPCI_TABLE_PROTECTED)
15221c1f902SNiklas Schnelle entry_set_protected(&pte);
153828b35f6SJan Glauber else
15421c1f902SNiklas Schnelle entry_clr_protected(&pte);
15521c1f902SNiklas Schnelle
15621c1f902SNiklas Schnelle xchg(ptep, pte);
157828b35f6SJan Glauber }
158828b35f6SJan Glauber
__dma_update_trans(struct zpci_dev * zdev,phys_addr_t pa,dma_addr_t dma_addr,size_t size,int flags)159568de506SNiklas Schnelle static int __dma_update_trans(struct zpci_dev *zdev, phys_addr_t pa,
160828b35f6SJan Glauber dma_addr_t dma_addr, size_t size, int flags)
161828b35f6SJan Glauber {
162828b35f6SJan Glauber unsigned int nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
163568de506SNiklas Schnelle phys_addr_t page_addr = (pa & PAGE_MASK);
16466728eeeSSebastian Ott unsigned long *entry;
165828b35f6SJan Glauber int i, rc = 0;
166828b35f6SJan Glauber
167828b35f6SJan Glauber if (!nr_pages)
168828b35f6SJan Glauber return -EINVAL;
169828b35f6SJan Glauber
17021c1f902SNiklas Schnelle if (!zdev->dma_table)
17121c1f902SNiklas Schnelle return -EINVAL;
172828b35f6SJan Glauber
173828b35f6SJan Glauber for (i = 0; i < nr_pages; i++) {
174d3b82825SJason Gunthorpe entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr,
175d3b82825SJason Gunthorpe GFP_ATOMIC);
17666728eeeSSebastian Ott if (!entry) {
17766728eeeSSebastian Ott rc = -ENOMEM;
17866728eeeSSebastian Ott goto undo_cpu_trans;
17966728eeeSSebastian Ott }
18066728eeeSSebastian Ott dma_update_cpu_trans(entry, page_addr, flags);
181828b35f6SJan Glauber page_addr += PAGE_SIZE;
182828b35f6SJan Glauber dma_addr += PAGE_SIZE;
183828b35f6SJan Glauber }
184828b35f6SJan Glauber
18566728eeeSSebastian Ott undo_cpu_trans:
18666728eeeSSebastian Ott if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) {
18766728eeeSSebastian Ott flags = ZPCI_PTE_INVALID;
18866728eeeSSebastian Ott while (i-- > 0) {
18966728eeeSSebastian Ott page_addr -= PAGE_SIZE;
19066728eeeSSebastian Ott dma_addr -= PAGE_SIZE;
191d3b82825SJason Gunthorpe entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr,
192d3b82825SJason Gunthorpe GFP_ATOMIC);
19366728eeeSSebastian Ott if (!entry)
19466728eeeSSebastian Ott break;
19566728eeeSSebastian Ott dma_update_cpu_trans(entry, page_addr, flags);
19666728eeeSSebastian Ott }
19766728eeeSSebastian Ott }
198828b35f6SJan Glauber return rc;
199828b35f6SJan Glauber }
200828b35f6SJan Glauber
__dma_purge_tlb(struct zpci_dev * zdev,dma_addr_t dma_addr,size_t size,int flags)2011f166e9eSSebastian Ott static int __dma_purge_tlb(struct zpci_dev *zdev, dma_addr_t dma_addr,
2021f166e9eSSebastian Ott size_t size, int flags)
2031f166e9eSSebastian Ott {
204a5f10055SSebastian Ott unsigned long irqflags;
205a5f10055SSebastian Ott int ret;
206a5f10055SSebastian Ott
2071f166e9eSSebastian Ott /*
2081f166e9eSSebastian Ott * With zdev->tlb_refresh == 0, rpcit is not required to establish new
2091f166e9eSSebastian Ott * translations when previously invalid translation-table entries are
2104f5359e9SSebastian Ott * validated. With lazy unmap, rpcit is skipped for previously valid
2111f166e9eSSebastian Ott * entries, but a global rpcit is then required before any address can
2121f166e9eSSebastian Ott * be re-used, i.e. after each iommu bitmap wrap-around.
2131f166e9eSSebastian Ott */
2144f5359e9SSebastian Ott if ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID) {
2154f5359e9SSebastian Ott if (!zdev->tlb_refresh)
2161f166e9eSSebastian Ott return 0;
2174f5359e9SSebastian Ott } else {
2184f5359e9SSebastian Ott if (!s390_iommu_strict)
2194f5359e9SSebastian Ott return 0;
2204f5359e9SSebastian Ott }
2211f166e9eSSebastian Ott
222a5f10055SSebastian Ott ret = zpci_refresh_trans((u64) zdev->fh << 32, dma_addr,
2231f166e9eSSebastian Ott PAGE_ALIGN(size));
224a5f10055SSebastian Ott if (ret == -ENOMEM && !s390_iommu_strict) {
225a5f10055SSebastian Ott /* enable the hypervisor to free some resources */
226a5f10055SSebastian Ott if (zpci_refresh_global(zdev))
227a5f10055SSebastian Ott goto out;
228a5f10055SSebastian Ott
229a5f10055SSebastian Ott spin_lock_irqsave(&zdev->iommu_bitmap_lock, irqflags);
230a5f10055SSebastian Ott bitmap_andnot(zdev->iommu_bitmap, zdev->iommu_bitmap,
231a5f10055SSebastian Ott zdev->lazy_bitmap, zdev->iommu_pages);
232a5f10055SSebastian Ott bitmap_zero(zdev->lazy_bitmap, zdev->iommu_pages);
233a5f10055SSebastian Ott spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, irqflags);
234a5f10055SSebastian Ott ret = 0;
235a5f10055SSebastian Ott }
236a5f10055SSebastian Ott out:
237a5f10055SSebastian Ott return ret;
2381f166e9eSSebastian Ott }
2391f166e9eSSebastian Ott
dma_update_trans(struct zpci_dev * zdev,phys_addr_t pa,dma_addr_t dma_addr,size_t size,int flags)240568de506SNiklas Schnelle static int dma_update_trans(struct zpci_dev *zdev, phys_addr_t pa,
2411f166e9eSSebastian Ott dma_addr_t dma_addr, size_t size, int flags)
2421f166e9eSSebastian Ott {
2431f166e9eSSebastian Ott int rc;
2441f166e9eSSebastian Ott
2451f166e9eSSebastian Ott rc = __dma_update_trans(zdev, pa, dma_addr, size, flags);
2461f166e9eSSebastian Ott if (rc)
2471f166e9eSSebastian Ott return rc;
2481f166e9eSSebastian Ott
2491f166e9eSSebastian Ott rc = __dma_purge_tlb(zdev, dma_addr, size, flags);
2501f166e9eSSebastian Ott if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID))
2511f166e9eSSebastian Ott __dma_update_trans(zdev, pa, dma_addr, size, ZPCI_PTE_INVALID);
2521f166e9eSSebastian Ott
2531f166e9eSSebastian Ott return rc;
2541f166e9eSSebastian Ott }
2551f166e9eSSebastian Ott
dma_free_seg_table(unsigned long entry)2568128f23cSGerald Schaefer void dma_free_seg_table(unsigned long entry)
257828b35f6SJan Glauber {
258828b35f6SJan Glauber unsigned long *sto = get_rt_sto(entry);
259828b35f6SJan Glauber int sx;
260828b35f6SJan Glauber
261828b35f6SJan Glauber for (sx = 0; sx < ZPCI_TABLE_ENTRIES; sx++)
262828b35f6SJan Glauber if (reg_entry_isvalid(sto[sx]))
263828b35f6SJan Glauber dma_free_page_table(get_st_pto(sto[sx]));
264828b35f6SJan Glauber
265828b35f6SJan Glauber dma_free_cpu_table(sto);
266828b35f6SJan Glauber }
267828b35f6SJan Glauber
dma_cleanup_tables(unsigned long * table)2688128f23cSGerald Schaefer void dma_cleanup_tables(unsigned long *table)
269828b35f6SJan Glauber {
270828b35f6SJan Glauber int rtx;
271828b35f6SJan Glauber
2728128f23cSGerald Schaefer if (!table)
273828b35f6SJan Glauber return;
274828b35f6SJan Glauber
275828b35f6SJan Glauber for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++)
276828b35f6SJan Glauber if (reg_entry_isvalid(table[rtx]))
277828b35f6SJan Glauber dma_free_seg_table(table[rtx]);
278828b35f6SJan Glauber
279828b35f6SJan Glauber dma_free_cpu_table(table);
280828b35f6SJan Glauber }
281828b35f6SJan Glauber
__dma_alloc_iommu(struct device * dev,unsigned long start,int size)2829a99649fSSebastian Ott static unsigned long __dma_alloc_iommu(struct device *dev,
2835ec6d491SGerald Schaefer unsigned long start, int size)
284828b35f6SJan Glauber {
2859a99649fSSebastian Ott struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
286828b35f6SJan Glauber
287828b35f6SJan Glauber return iommu_area_alloc(zdev->iommu_bitmap, zdev->iommu_pages,
2888ee2db3cSSebastian Ott start, size, zdev->start_dma >> PAGE_SHIFT,
2891e9d90dbSNicolin Chen dma_get_seg_boundary_nr_pages(dev, PAGE_SHIFT),
2901e9d90dbSNicolin Chen 0);
291828b35f6SJan Glauber }
292828b35f6SJan Glauber
dma_alloc_address(struct device * dev,int size)2938cb63b78SSebastian Ott static dma_addr_t dma_alloc_address(struct device *dev, int size)
294828b35f6SJan Glauber {
2959a99649fSSebastian Ott struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
296828b35f6SJan Glauber unsigned long offset, flags;
297828b35f6SJan Glauber
298828b35f6SJan Glauber spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
2999a99649fSSebastian Ott offset = __dma_alloc_iommu(dev, zdev->next_bit, size);
300c60d1ae4SGerald Schaefer if (offset == -1) {
3014f5359e9SSebastian Ott if (!s390_iommu_strict) {
30213954fd6SSebastian Ott /* global flush before DMA addresses are reused */
30313954fd6SSebastian Ott if (zpci_refresh_global(zdev))
30413954fd6SSebastian Ott goto out_error;
30513954fd6SSebastian Ott
30613954fd6SSebastian Ott bitmap_andnot(zdev->iommu_bitmap, zdev->iommu_bitmap,
30713954fd6SSebastian Ott zdev->lazy_bitmap, zdev->iommu_pages);
30813954fd6SSebastian Ott bitmap_zero(zdev->lazy_bitmap, zdev->iommu_pages);
30913954fd6SSebastian Ott }
310c60d1ae4SGerald Schaefer /* wrap-around */
3119a99649fSSebastian Ott offset = __dma_alloc_iommu(dev, 0, size);
31213954fd6SSebastian Ott if (offset == -1)
31313954fd6SSebastian Ott goto out_error;
314828b35f6SJan Glauber }
3158cb63b78SSebastian Ott zdev->next_bit = offset + size;
316828b35f6SJan Glauber spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
3178cb63b78SSebastian Ott
3188cb63b78SSebastian Ott return zdev->start_dma + offset * PAGE_SIZE;
31913954fd6SSebastian Ott
32013954fd6SSebastian Ott out_error:
32113954fd6SSebastian Ott spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
32244899aa3SChristoph Hellwig return DMA_MAPPING_ERROR;
323828b35f6SJan Glauber }
324828b35f6SJan Glauber
dma_free_address(struct device * dev,dma_addr_t dma_addr,int size)3258cb63b78SSebastian Ott static void dma_free_address(struct device *dev, dma_addr_t dma_addr, int size)
326828b35f6SJan Glauber {
3279a99649fSSebastian Ott struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
3288cb63b78SSebastian Ott unsigned long flags, offset;
3298cb63b78SSebastian Ott
3308cb63b78SSebastian Ott offset = (dma_addr - zdev->start_dma) >> PAGE_SHIFT;
331828b35f6SJan Glauber
332828b35f6SJan Glauber spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
333828b35f6SJan Glauber if (!zdev->iommu_bitmap)
334828b35f6SJan Glauber goto out;
33513954fd6SSebastian Ott
3364f5359e9SSebastian Ott if (s390_iommu_strict)
337828b35f6SJan Glauber bitmap_clear(zdev->iommu_bitmap, offset, size);
33813954fd6SSebastian Ott else
33913954fd6SSebastian Ott bitmap_set(zdev->lazy_bitmap, offset, size);
34013954fd6SSebastian Ott
341828b35f6SJan Glauber out:
342828b35f6SJan Glauber spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
343828b35f6SJan Glauber }
344828b35f6SJan Glauber
zpci_err_dma(unsigned long rc,unsigned long addr)34552d43d81SSebastian Ott static inline void zpci_err_dma(unsigned long rc, unsigned long addr)
34652d43d81SSebastian Ott {
34752d43d81SSebastian Ott struct {
34852d43d81SSebastian Ott unsigned long rc;
34952d43d81SSebastian Ott unsigned long addr;
35052d43d81SSebastian Ott } __packed data = {rc, addr};
35152d43d81SSebastian Ott
35252d43d81SSebastian Ott zpci_err_hex(&data, sizeof(data));
35352d43d81SSebastian Ott }
35452d43d81SSebastian Ott
s390_dma_map_pages(struct device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction direction,unsigned long attrs)355828b35f6SJan Glauber static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
356828b35f6SJan Glauber unsigned long offset, size_t size,
357828b35f6SJan Glauber enum dma_data_direction direction,
35800085f1eSKrzysztof Kozlowski unsigned long attrs)
359828b35f6SJan Glauber {
360198a5278SSebastian Ott struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
361828b35f6SJan Glauber unsigned long pa = page_to_phys(page) + offset;
362828b35f6SJan Glauber int flags = ZPCI_PTE_VALID;
3638cb63b78SSebastian Ott unsigned long nr_pages;
364828b35f6SJan Glauber dma_addr_t dma_addr;
36552d43d81SSebastian Ott int ret;
366828b35f6SJan Glauber
367828b35f6SJan Glauber /* This rounds up number of pages based on size and offset */
368828b35f6SJan Glauber nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
3698cb63b78SSebastian Ott dma_addr = dma_alloc_address(dev, nr_pages);
37044899aa3SChristoph Hellwig if (dma_addr == DMA_MAPPING_ERROR) {
37152d43d81SSebastian Ott ret = -ENOSPC;
372828b35f6SJan Glauber goto out_err;
37352d43d81SSebastian Ott }
374828b35f6SJan Glauber
375828b35f6SJan Glauber /* Use rounded up size */
376828b35f6SJan Glauber size = nr_pages * PAGE_SIZE;
377828b35f6SJan Glauber
378828b35f6SJan Glauber if (direction == DMA_NONE || direction == DMA_TO_DEVICE)
379828b35f6SJan Glauber flags |= ZPCI_TABLE_PROTECTED;
380828b35f6SJan Glauber
38152d43d81SSebastian Ott ret = dma_update_trans(zdev, pa, dma_addr, size, flags);
38252d43d81SSebastian Ott if (ret)
38352d43d81SSebastian Ott goto out_free;
38452d43d81SSebastian Ott
3856001018aSSebastian Ott atomic64_add(nr_pages, &zdev->mapped_pages);
386186f50faSGerald Schaefer return dma_addr + (offset & ~PAGE_MASK);
387828b35f6SJan Glauber
388828b35f6SJan Glauber out_free:
3898cb63b78SSebastian Ott dma_free_address(dev, dma_addr, nr_pages);
390828b35f6SJan Glauber out_err:
3911f1dcbd4SSebastian Ott zpci_err("map error:\n");
39252d43d81SSebastian Ott zpci_err_dma(ret, pa);
39344899aa3SChristoph Hellwig return DMA_MAPPING_ERROR;
394828b35f6SJan Glauber }
395828b35f6SJan Glauber
s390_dma_unmap_pages(struct device * dev,dma_addr_t dma_addr,size_t size,enum dma_data_direction direction,unsigned long attrs)396828b35f6SJan Glauber static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
397828b35f6SJan Glauber size_t size, enum dma_data_direction direction,
39800085f1eSKrzysztof Kozlowski unsigned long attrs)
399828b35f6SJan Glauber {
400198a5278SSebastian Ott struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
40152d43d81SSebastian Ott int npages, ret;
402828b35f6SJan Glauber
403828b35f6SJan Glauber npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
404828b35f6SJan Glauber dma_addr = dma_addr & PAGE_MASK;
40552d43d81SSebastian Ott ret = dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE,
40652d43d81SSebastian Ott ZPCI_PTE_INVALID);
40752d43d81SSebastian Ott if (ret) {
4081f1dcbd4SSebastian Ott zpci_err("unmap error:\n");
40952d43d81SSebastian Ott zpci_err_dma(ret, dma_addr);
41052d43d81SSebastian Ott return;
4111f1dcbd4SSebastian Ott }
412828b35f6SJan Glauber
4136001018aSSebastian Ott atomic64_add(npages, &zdev->unmapped_pages);
4148cb63b78SSebastian Ott dma_free_address(dev, dma_addr, npages);
415828b35f6SJan Glauber }
416828b35f6SJan Glauber
s390_dma_alloc(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t flag,unsigned long attrs)417828b35f6SJan Glauber static void *s390_dma_alloc(struct device *dev, size_t size,
418828b35f6SJan Glauber dma_addr_t *dma_handle, gfp_t flag,
41900085f1eSKrzysztof Kozlowski unsigned long attrs)
420828b35f6SJan Glauber {
421198a5278SSebastian Ott struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
422828b35f6SJan Glauber struct page *page;
423568de506SNiklas Schnelle phys_addr_t pa;
424828b35f6SJan Glauber dma_addr_t map;
425828b35f6SJan Glauber
426828b35f6SJan Glauber size = PAGE_ALIGN(size);
427518a2f19SChristoph Hellwig page = alloc_pages(flag | __GFP_ZERO, get_order(size));
428828b35f6SJan Glauber if (!page)
429828b35f6SJan Glauber return NULL;
430d0b08853SJan Glauber
431828b35f6SJan Glauber pa = page_to_phys(page);
43200085f1eSKrzysztof Kozlowski map = s390_dma_map_pages(dev, page, 0, size, DMA_BIDIRECTIONAL, 0);
433828b35f6SJan Glauber if (dma_mapping_error(dev, map)) {
434568de506SNiklas Schnelle __free_pages(page, get_order(size));
435828b35f6SJan Glauber return NULL;
436828b35f6SJan Glauber }
437828b35f6SJan Glauber
4386001018aSSebastian Ott atomic64_add(size / PAGE_SIZE, &zdev->allocated_pages);
439828b35f6SJan Glauber if (dma_handle)
440828b35f6SJan Glauber *dma_handle = map;
441568de506SNiklas Schnelle return phys_to_virt(pa);
442828b35f6SJan Glauber }
443828b35f6SJan Glauber
s390_dma_free(struct device * dev,size_t size,void * vaddr,dma_addr_t dma_handle,unsigned long attrs)444828b35f6SJan Glauber static void s390_dma_free(struct device *dev, size_t size,
445568de506SNiklas Schnelle void *vaddr, dma_addr_t dma_handle,
44600085f1eSKrzysztof Kozlowski unsigned long attrs)
447828b35f6SJan Glauber {
448198a5278SSebastian Ott struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
449f7038b7cSSebastian Ott
450f7038b7cSSebastian Ott size = PAGE_ALIGN(size);
4516001018aSSebastian Ott atomic64_sub(size / PAGE_SIZE, &zdev->allocated_pages);
45200085f1eSKrzysztof Kozlowski s390_dma_unmap_pages(dev, dma_handle, size, DMA_BIDIRECTIONAL, 0);
453568de506SNiklas Schnelle free_pages((unsigned long)vaddr, get_order(size));
454828b35f6SJan Glauber }
455828b35f6SJan Glauber
456ee877b81SSebastian Ott /* Map a segment into a contiguous dma address area */
__s390_dma_map_sg(struct device * dev,struct scatterlist * sg,size_t size,dma_addr_t * handle,enum dma_data_direction dir)457ee877b81SSebastian Ott static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
458ee877b81SSebastian Ott size_t size, dma_addr_t *handle,
459ee877b81SSebastian Ott enum dma_data_direction dir)
460ee877b81SSebastian Ott {
4616b7df3ceSSebastian Ott unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
462ee877b81SSebastian Ott struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
463ee877b81SSebastian Ott dma_addr_t dma_addr_base, dma_addr;
464ee877b81SSebastian Ott int flags = ZPCI_PTE_VALID;
465ee877b81SSebastian Ott struct scatterlist *s;
466568de506SNiklas Schnelle phys_addr_t pa = 0;
467ee877b81SSebastian Ott int ret;
468ee877b81SSebastian Ott
4696b7df3ceSSebastian Ott dma_addr_base = dma_alloc_address(dev, nr_pages);
47044899aa3SChristoph Hellwig if (dma_addr_base == DMA_MAPPING_ERROR)
471ee877b81SSebastian Ott return -ENOMEM;
472ee877b81SSebastian Ott
473ee877b81SSebastian Ott dma_addr = dma_addr_base;
474ee877b81SSebastian Ott if (dir == DMA_NONE || dir == DMA_TO_DEVICE)
475ee877b81SSebastian Ott flags |= ZPCI_TABLE_PROTECTED;
476ee877b81SSebastian Ott
477ee877b81SSebastian Ott for (s = sg; dma_addr < dma_addr_base + size; s = sg_next(s)) {
4786b7df3ceSSebastian Ott pa = page_to_phys(sg_page(s));
4796b7df3ceSSebastian Ott ret = __dma_update_trans(zdev, pa, dma_addr,
4806b7df3ceSSebastian Ott s->offset + s->length, flags);
481ee877b81SSebastian Ott if (ret)
482ee877b81SSebastian Ott goto unmap;
483ee877b81SSebastian Ott
4846b7df3ceSSebastian Ott dma_addr += s->offset + s->length;
485ee877b81SSebastian Ott }
4861f166e9eSSebastian Ott ret = __dma_purge_tlb(zdev, dma_addr_base, size, flags);
4871f166e9eSSebastian Ott if (ret)
4881f166e9eSSebastian Ott goto unmap;
4891f166e9eSSebastian Ott
490ee877b81SSebastian Ott *handle = dma_addr_base;
4916b7df3ceSSebastian Ott atomic64_add(nr_pages, &zdev->mapped_pages);
492ee877b81SSebastian Ott
493ee877b81SSebastian Ott return ret;
494ee877b81SSebastian Ott
495ee877b81SSebastian Ott unmap:
496ee877b81SSebastian Ott dma_update_trans(zdev, 0, dma_addr_base, dma_addr - dma_addr_base,
497ee877b81SSebastian Ott ZPCI_PTE_INVALID);
4986b7df3ceSSebastian Ott dma_free_address(dev, dma_addr_base, nr_pages);
499ee877b81SSebastian Ott zpci_err("map error:\n");
500ee877b81SSebastian Ott zpci_err_dma(ret, pa);
501ee877b81SSebastian Ott return ret;
502ee877b81SSebastian Ott }
503ee877b81SSebastian Ott
s390_dma_map_sg(struct device * dev,struct scatterlist * sg,int nr_elements,enum dma_data_direction dir,unsigned long attrs)504828b35f6SJan Glauber static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
505828b35f6SJan Glauber int nr_elements, enum dma_data_direction dir,
50600085f1eSKrzysztof Kozlowski unsigned long attrs)
507828b35f6SJan Glauber {
508ee877b81SSebastian Ott struct scatterlist *s = sg, *start = sg, *dma = sg;
509ee877b81SSebastian Ott unsigned int max = dma_get_max_seg_size(dev);
510ee877b81SSebastian Ott unsigned int size = s->offset + s->length;
511ee877b81SSebastian Ott unsigned int offset = s->offset;
512911ace0bSMartin Oliveira int count = 0, i, ret;
513828b35f6SJan Glauber
514ee877b81SSebastian Ott for (i = 1; i < nr_elements; i++) {
515ee877b81SSebastian Ott s = sg_next(s);
516828b35f6SJan Glauber
517828b35f6SJan Glauber s->dma_length = 0;
518ee877b81SSebastian Ott
519ee877b81SSebastian Ott if (s->offset || (size & ~PAGE_MASK) ||
520ee877b81SSebastian Ott size + s->length > max) {
521911ace0bSMartin Oliveira ret = __s390_dma_map_sg(dev, start, size,
522911ace0bSMartin Oliveira &dma->dma_address, dir);
523911ace0bSMartin Oliveira if (ret)
524ee877b81SSebastian Ott goto unmap;
525ee877b81SSebastian Ott
526ee877b81SSebastian Ott dma->dma_address += offset;
527ee877b81SSebastian Ott dma->dma_length = size - offset;
528ee877b81SSebastian Ott
529ee877b81SSebastian Ott size = offset = s->offset;
530ee877b81SSebastian Ott start = s;
531ee877b81SSebastian Ott dma = sg_next(dma);
532ee877b81SSebastian Ott count++;
533828b35f6SJan Glauber }
534ee877b81SSebastian Ott size += s->length;
535ee877b81SSebastian Ott }
536911ace0bSMartin Oliveira ret = __s390_dma_map_sg(dev, start, size, &dma->dma_address, dir);
537911ace0bSMartin Oliveira if (ret)
538ee877b81SSebastian Ott goto unmap;
539ee877b81SSebastian Ott
540ee877b81SSebastian Ott dma->dma_address += offset;
541ee877b81SSebastian Ott dma->dma_length = size - offset;
542ee877b81SSebastian Ott
543ee877b81SSebastian Ott return count + 1;
544ee877b81SSebastian Ott unmap:
545ee877b81SSebastian Ott for_each_sg(sg, s, count, i)
546ee877b81SSebastian Ott s390_dma_unmap_pages(dev, sg_dma_address(s), sg_dma_len(s),
547ee877b81SSebastian Ott dir, attrs);
548ee877b81SSebastian Ott
549911ace0bSMartin Oliveira return ret;
550828b35f6SJan Glauber }
551828b35f6SJan Glauber
s390_dma_unmap_sg(struct device * dev,struct scatterlist * sg,int nr_elements,enum dma_data_direction dir,unsigned long attrs)552828b35f6SJan Glauber static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
553828b35f6SJan Glauber int nr_elements, enum dma_data_direction dir,
55400085f1eSKrzysztof Kozlowski unsigned long attrs)
555828b35f6SJan Glauber {
556828b35f6SJan Glauber struct scatterlist *s;
557828b35f6SJan Glauber int i;
558828b35f6SJan Glauber
559828b35f6SJan Glauber for_each_sg(sg, s, nr_elements, i) {
560ee877b81SSebastian Ott if (s->dma_length)
561ee877b81SSebastian Ott s390_dma_unmap_pages(dev, s->dma_address, s->dma_length,
562ee877b81SSebastian Ott dir, attrs);
563828b35f6SJan Glauber s->dma_address = 0;
564828b35f6SJan Glauber s->dma_length = 0;
565828b35f6SJan Glauber }
566828b35f6SJan Glauber }
567828b35f6SJan Glauber
bitmap_vzalloc(size_t bits,gfp_t flags)568*c1ae1c59SNiklas Schnelle static unsigned long *bitmap_vzalloc(size_t bits, gfp_t flags)
569*c1ae1c59SNiklas Schnelle {
570*c1ae1c59SNiklas Schnelle size_t n = BITS_TO_LONGS(bits);
571*c1ae1c59SNiklas Schnelle size_t bytes;
572*c1ae1c59SNiklas Schnelle
573*c1ae1c59SNiklas Schnelle if (unlikely(check_mul_overflow(n, sizeof(unsigned long), &bytes)))
574*c1ae1c59SNiklas Schnelle return NULL;
575*c1ae1c59SNiklas Schnelle
576*c1ae1c59SNiklas Schnelle return vzalloc(bytes);
577*c1ae1c59SNiklas Schnelle }
578*c1ae1c59SNiklas Schnelle
zpci_dma_init_device(struct zpci_dev * zdev)579828b35f6SJan Glauber int zpci_dma_init_device(struct zpci_dev *zdev)
580828b35f6SJan Glauber {
58159bbf596SNiklas Schnelle u8 status;
582828b35f6SJan Glauber int rc;
583828b35f6SJan Glauber
5848128f23cSGerald Schaefer /*
5858128f23cSGerald Schaefer * At this point, if the device is part of an IOMMU domain, this would
5868128f23cSGerald Schaefer * be a strong hint towards a bug in the IOMMU API (common) code and/or
5878128f23cSGerald Schaefer * simultaneous access via IOMMU and DMA API. So let's issue a warning.
5888128f23cSGerald Schaefer */
5898128f23cSGerald Schaefer WARN_ON(zdev->s390_domain);
5908128f23cSGerald Schaefer
591828b35f6SJan Glauber spin_lock_init(&zdev->iommu_bitmap_lock);
592828b35f6SJan Glauber
593429f27e3SJason Gunthorpe zdev->dma_table = dma_alloc_cpu_table(GFP_KERNEL);
594828b35f6SJan Glauber if (!zdev->dma_table) {
595828b35f6SJan Glauber rc = -ENOMEM;
596dba59909SSebastian Ott goto out;
597828b35f6SJan Glauber }
598828b35f6SJan Glauber
59969eea95cSGerald Schaefer /*
60069eea95cSGerald Schaefer * Restrict the iommu bitmap size to the minimum of the following:
6016aefbf1cSNiklas Schnelle * - s390_iommu_aperture which defaults to high_memory
60269eea95cSGerald Schaefer * - 3-level pagetable address limit minus start_dma offset
60369eea95cSGerald Schaefer * - DMA address range allowed by the hardware (clp query pci fn)
60469eea95cSGerald Schaefer *
60569eea95cSGerald Schaefer * Also set zdev->end_dma to the actual end address of the usable
60669eea95cSGerald Schaefer * range, instead of the theoretical maximum as reported by hardware.
6076aefbf1cSNiklas Schnelle *
6086aefbf1cSNiklas Schnelle * This limits the number of concurrently usable DMA mappings since
6096aefbf1cSNiklas Schnelle * for each DMA mapped memory address we need a DMA address including
6106aefbf1cSNiklas Schnelle * extra DMA addresses for multiple mappings of the same memory address.
61169eea95cSGerald Schaefer */
61253b1bc9aSSebastian Ott zdev->start_dma = PAGE_ALIGN(zdev->start_dma);
6136aefbf1cSNiklas Schnelle zdev->iommu_size = min3(s390_iommu_aperture,
61469eea95cSGerald Schaefer ZPCI_TABLE_SIZE_RT - zdev->start_dma,
61569eea95cSGerald Schaefer zdev->end_dma - zdev->start_dma + 1);
61669eea95cSGerald Schaefer zdev->end_dma = zdev->start_dma + zdev->iommu_size - 1;
617828b35f6SJan Glauber zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT;
618*c1ae1c59SNiklas Schnelle zdev->iommu_bitmap = bitmap_vzalloc(zdev->iommu_pages, GFP_KERNEL);
619828b35f6SJan Glauber if (!zdev->iommu_bitmap) {
620828b35f6SJan Glauber rc = -ENOMEM;
621dba59909SSebastian Ott goto free_dma_table;
622828b35f6SJan Glauber }
6234f5359e9SSebastian Ott if (!s390_iommu_strict) {
624*c1ae1c59SNiklas Schnelle zdev->lazy_bitmap = bitmap_vzalloc(zdev->iommu_pages, GFP_KERNEL);
62513954fd6SSebastian Ott if (!zdev->lazy_bitmap) {
62613954fd6SSebastian Ott rc = -ENOMEM;
62713954fd6SSebastian Ott goto free_bitmap;
62813954fd6SSebastian Ott }
629828b35f6SJan Glauber
63013954fd6SSebastian Ott }
6311f3f7681SNiklas Schnelle if (zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
63259bbf596SNiklas Schnelle virt_to_phys(zdev->dma_table), &status)) {
6331f3f7681SNiklas Schnelle rc = -EIO;
634dba59909SSebastian Ott goto free_bitmap;
6351f3f7681SNiklas Schnelle }
636828b35f6SJan Glauber
637dba59909SSebastian Ott return 0;
638dba59909SSebastian Ott free_bitmap:
639dba59909SSebastian Ott vfree(zdev->iommu_bitmap);
640dba59909SSebastian Ott zdev->iommu_bitmap = NULL;
64113954fd6SSebastian Ott vfree(zdev->lazy_bitmap);
64213954fd6SSebastian Ott zdev->lazy_bitmap = NULL;
643dba59909SSebastian Ott free_dma_table:
644828b35f6SJan Glauber dma_free_cpu_table(zdev->dma_table);
645dba59909SSebastian Ott zdev->dma_table = NULL;
646dba59909SSebastian Ott out:
647828b35f6SJan Glauber return rc;
648828b35f6SJan Glauber }
649828b35f6SJan Glauber
zpci_dma_exit_device(struct zpci_dev * zdev)6501f3f7681SNiklas Schnelle int zpci_dma_exit_device(struct zpci_dev *zdev)
651828b35f6SJan Glauber {
6521f3f7681SNiklas Schnelle int cc = 0;
6531f3f7681SNiklas Schnelle
6548128f23cSGerald Schaefer /*
6558128f23cSGerald Schaefer * At this point, if the device is part of an IOMMU domain, this would
6568128f23cSGerald Schaefer * be a strong hint towards a bug in the IOMMU API (common) code and/or
6578128f23cSGerald Schaefer * simultaneous access via IOMMU and DMA API. So let's issue a warning.
6588128f23cSGerald Schaefer */
6598128f23cSGerald Schaefer WARN_ON(zdev->s390_domain);
6601f3f7681SNiklas Schnelle if (zdev_enabled(zdev))
6611f3f7681SNiklas Schnelle cc = zpci_unregister_ioat(zdev, 0);
6621f3f7681SNiklas Schnelle /*
6631f3f7681SNiklas Schnelle * cc == 3 indicates the function is gone already. This can happen
6641f3f7681SNiklas Schnelle * if the function was deconfigured/disabled suddenly and we have not
6651f3f7681SNiklas Schnelle * received a new handle yet.
6661f3f7681SNiklas Schnelle */
6671f3f7681SNiklas Schnelle if (cc && cc != 3)
6681f3f7681SNiklas Schnelle return -EIO;
66972570834SSebastian Ott
6708128f23cSGerald Schaefer dma_cleanup_tables(zdev->dma_table);
6718128f23cSGerald Schaefer zdev->dma_table = NULL;
67222459321SSebastian Ott vfree(zdev->iommu_bitmap);
673828b35f6SJan Glauber zdev->iommu_bitmap = NULL;
67413954fd6SSebastian Ott vfree(zdev->lazy_bitmap);
67513954fd6SSebastian Ott zdev->lazy_bitmap = NULL;
676828b35f6SJan Glauber zdev->next_bit = 0;
6771f3f7681SNiklas Schnelle return 0;
678828b35f6SJan Glauber }
679828b35f6SJan Glauber
dma_alloc_cpu_table_caches(void)680828b35f6SJan Glauber static int __init dma_alloc_cpu_table_caches(void)
681828b35f6SJan Glauber {
682828b35f6SJan Glauber dma_region_table_cache = kmem_cache_create("PCI_DMA_region_tables",
683828b35f6SJan Glauber ZPCI_TABLE_SIZE, ZPCI_TABLE_ALIGN,
684828b35f6SJan Glauber 0, NULL);
685828b35f6SJan Glauber if (!dma_region_table_cache)
686828b35f6SJan Glauber return -ENOMEM;
687828b35f6SJan Glauber
688828b35f6SJan Glauber dma_page_table_cache = kmem_cache_create("PCI_DMA_page_tables",
689828b35f6SJan Glauber ZPCI_PT_SIZE, ZPCI_PT_ALIGN,
690828b35f6SJan Glauber 0, NULL);
691828b35f6SJan Glauber if (!dma_page_table_cache) {
692828b35f6SJan Glauber kmem_cache_destroy(dma_region_table_cache);
693828b35f6SJan Glauber return -ENOMEM;
694828b35f6SJan Glauber }
695828b35f6SJan Glauber return 0;
696828b35f6SJan Glauber }
697828b35f6SJan Glauber
zpci_dma_init(void)698828b35f6SJan Glauber int __init zpci_dma_init(void)
699828b35f6SJan Glauber {
7002187582cSNiklas Schnelle s390_iommu_aperture = (u64)virt_to_phys(high_memory);
7016aefbf1cSNiklas Schnelle if (!s390_iommu_aperture_factor)
7026aefbf1cSNiklas Schnelle s390_iommu_aperture = ULONG_MAX;
7036aefbf1cSNiklas Schnelle else
7046aefbf1cSNiklas Schnelle s390_iommu_aperture *= s390_iommu_aperture_factor;
7056aefbf1cSNiklas Schnelle
706828b35f6SJan Glauber return dma_alloc_cpu_table_caches();
707828b35f6SJan Glauber }
708828b35f6SJan Glauber
zpci_dma_exit(void)709828b35f6SJan Glauber void zpci_dma_exit(void)
710828b35f6SJan Glauber {
711828b35f6SJan Glauber kmem_cache_destroy(dma_page_table_cache);
712828b35f6SJan Glauber kmem_cache_destroy(dma_region_table_cache);
713828b35f6SJan Glauber }
714828b35f6SJan Glauber
7155299709dSBart Van Assche const struct dma_map_ops s390_pci_dma_ops = {
716828b35f6SJan Glauber .alloc = s390_dma_alloc,
717828b35f6SJan Glauber .free = s390_dma_free,
718828b35f6SJan Glauber .map_sg = s390_dma_map_sg,
719828b35f6SJan Glauber .unmap_sg = s390_dma_unmap_sg,
720828b35f6SJan Glauber .map_page = s390_dma_map_pages,
721828b35f6SJan Glauber .unmap_page = s390_dma_unmap_pages,
722f9f3232aSChristoph Hellwig .mmap = dma_common_mmap,
723f9f3232aSChristoph Hellwig .get_sgtable = dma_common_get_sgtable,
724efa70f2fSChristoph Hellwig .alloc_pages = dma_common_alloc_pages,
725efa70f2fSChristoph Hellwig .free_pages = dma_common_free_pages,
726828b35f6SJan Glauber /* dma_supported is unconditionally true without a callback */
727828b35f6SJan Glauber };
728e82becfcSChristian Borntraeger EXPORT_SYMBOL_GPL(s390_pci_dma_ops);
729c60d1ae4SGerald Schaefer
s390_iommu_setup(char * str)730c60d1ae4SGerald Schaefer static int __init s390_iommu_setup(char *str)
731c60d1ae4SGerald Schaefer {
7328dec6bd4SVasily Gorbik if (!strcmp(str, "strict"))
733c60d1ae4SGerald Schaefer s390_iommu_strict = 1;
7348dec6bd4SVasily Gorbik return 1;
735c60d1ae4SGerald Schaefer }
736c60d1ae4SGerald Schaefer
737c60d1ae4SGerald Schaefer __setup("s390_iommu=", s390_iommu_setup);
7386aefbf1cSNiklas Schnelle
s390_iommu_aperture_setup(char * str)7396aefbf1cSNiklas Schnelle static int __init s390_iommu_aperture_setup(char *str)
7406aefbf1cSNiklas Schnelle {
7416aefbf1cSNiklas Schnelle if (kstrtou32(str, 10, &s390_iommu_aperture_factor))
7426aefbf1cSNiklas Schnelle s390_iommu_aperture_factor = 1;
7436aefbf1cSNiklas Schnelle return 1;
7446aefbf1cSNiklas Schnelle }
7456aefbf1cSNiklas Schnelle
7466aefbf1cSNiklas Schnelle __setup("s390_iommu_aperture=", s390_iommu_aperture_setup);
747