xref: /openbmc/linux/arch/s390/pci/pci_dma.c (revision 4d7b04c0cda365f190c4a8f7fddc535b93aae9f9)
1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * Copyright IBM Corp. 2012
4   *
5   * Author(s):
6   *   Jan Glauber <jang@linux.vnet.ibm.com>
7   */
8  
9  #include <linux/kernel.h>
10  #include <linux/slab.h>
11  #include <linux/export.h>
12  #include <linux/iommu-helper.h>
13  #include <linux/dma-map-ops.h>
14  #include <linux/vmalloc.h>
15  #include <linux/pci.h>
16  #include <asm/pci_dma.h>
17  
18  static struct kmem_cache *dma_region_table_cache;
19  static struct kmem_cache *dma_page_table_cache;
20  static int s390_iommu_strict;
21  static u64 s390_iommu_aperture;
22  static u32 s390_iommu_aperture_factor = 1;
23  
zpci_refresh_global(struct zpci_dev * zdev)24  static int zpci_refresh_global(struct zpci_dev *zdev)
25  {
26  	return zpci_refresh_trans((u64) zdev->fh << 32, zdev->start_dma,
27  				  zdev->iommu_pages * PAGE_SIZE);
28  }
29  
dma_alloc_cpu_table(gfp_t gfp)30  unsigned long *dma_alloc_cpu_table(gfp_t gfp)
31  {
32  	unsigned long *table, *entry;
33  
34  	table = kmem_cache_alloc(dma_region_table_cache, gfp);
35  	if (!table)
36  		return NULL;
37  
38  	for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++)
39  		*entry = ZPCI_TABLE_INVALID;
40  	return table;
41  }
42  
dma_free_cpu_table(void * table)43  static void dma_free_cpu_table(void *table)
44  {
45  	kmem_cache_free(dma_region_table_cache, table);
46  }
47  
dma_alloc_page_table(gfp_t gfp)48  static unsigned long *dma_alloc_page_table(gfp_t gfp)
49  {
50  	unsigned long *table, *entry;
51  
52  	table = kmem_cache_alloc(dma_page_table_cache, gfp);
53  	if (!table)
54  		return NULL;
55  
56  	for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++)
57  		*entry = ZPCI_PTE_INVALID;
58  	return table;
59  }
60  
dma_free_page_table(void * table)61  static void dma_free_page_table(void *table)
62  {
63  	kmem_cache_free(dma_page_table_cache, table);
64  }
65  
dma_get_seg_table_origin(unsigned long * rtep,gfp_t gfp)66  static unsigned long *dma_get_seg_table_origin(unsigned long *rtep, gfp_t gfp)
67  {
68  	unsigned long old_rte, rte;
69  	unsigned long *sto;
70  
71  	rte = READ_ONCE(*rtep);
72  	if (reg_entry_isvalid(rte)) {
73  		sto = get_rt_sto(rte);
74  	} else {
75  		sto = dma_alloc_cpu_table(gfp);
76  		if (!sto)
77  			return NULL;
78  
79  		set_rt_sto(&rte, virt_to_phys(sto));
80  		validate_rt_entry(&rte);
81  		entry_clr_protected(&rte);
82  
83  		old_rte = cmpxchg(rtep, ZPCI_TABLE_INVALID, rte);
84  		if (old_rte != ZPCI_TABLE_INVALID) {
85  			/* Somone else was faster, use theirs */
86  			dma_free_cpu_table(sto);
87  			sto = get_rt_sto(old_rte);
88  		}
89  	}
90  	return sto;
91  }
92  
dma_get_page_table_origin(unsigned long * step,gfp_t gfp)93  static unsigned long *dma_get_page_table_origin(unsigned long *step, gfp_t gfp)
94  {
95  	unsigned long old_ste, ste;
96  	unsigned long *pto;
97  
98  	ste = READ_ONCE(*step);
99  	if (reg_entry_isvalid(ste)) {
100  		pto = get_st_pto(ste);
101  	} else {
102  		pto = dma_alloc_page_table(gfp);
103  		if (!pto)
104  			return NULL;
105  		set_st_pto(&ste, virt_to_phys(pto));
106  		validate_st_entry(&ste);
107  		entry_clr_protected(&ste);
108  
109  		old_ste = cmpxchg(step, ZPCI_TABLE_INVALID, ste);
110  		if (old_ste != ZPCI_TABLE_INVALID) {
111  			/* Somone else was faster, use theirs */
112  			dma_free_page_table(pto);
113  			pto = get_st_pto(old_ste);
114  		}
115  	}
116  	return pto;
117  }
118  
dma_walk_cpu_trans(unsigned long * rto,dma_addr_t dma_addr,gfp_t gfp)119  unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr,
120  				  gfp_t gfp)
121  {
122  	unsigned long *sto, *pto;
123  	unsigned int rtx, sx, px;
124  
125  	rtx = calc_rtx(dma_addr);
126  	sto = dma_get_seg_table_origin(&rto[rtx], gfp);
127  	if (!sto)
128  		return NULL;
129  
130  	sx = calc_sx(dma_addr);
131  	pto = dma_get_page_table_origin(&sto[sx], gfp);
132  	if (!pto)
133  		return NULL;
134  
135  	px = calc_px(dma_addr);
136  	return &pto[px];
137  }
138  
dma_update_cpu_trans(unsigned long * ptep,phys_addr_t page_addr,int flags)139  void dma_update_cpu_trans(unsigned long *ptep, phys_addr_t page_addr, int flags)
140  {
141  	unsigned long pte;
142  
143  	pte = READ_ONCE(*ptep);
144  	if (flags & ZPCI_PTE_INVALID) {
145  		invalidate_pt_entry(&pte);
146  	} else {
147  		set_pt_pfaa(&pte, page_addr);
148  		validate_pt_entry(&pte);
149  	}
150  
151  	if (flags & ZPCI_TABLE_PROTECTED)
152  		entry_set_protected(&pte);
153  	else
154  		entry_clr_protected(&pte);
155  
156  	xchg(ptep, pte);
157  }
158  
__dma_update_trans(struct zpci_dev * zdev,phys_addr_t pa,dma_addr_t dma_addr,size_t size,int flags)159  static int __dma_update_trans(struct zpci_dev *zdev, phys_addr_t pa,
160  			      dma_addr_t dma_addr, size_t size, int flags)
161  {
162  	unsigned int nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
163  	phys_addr_t page_addr = (pa & PAGE_MASK);
164  	unsigned long *entry;
165  	int i, rc = 0;
166  
167  	if (!nr_pages)
168  		return -EINVAL;
169  
170  	if (!zdev->dma_table)
171  		return -EINVAL;
172  
173  	for (i = 0; i < nr_pages; i++) {
174  		entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr,
175  					   GFP_ATOMIC);
176  		if (!entry) {
177  			rc = -ENOMEM;
178  			goto undo_cpu_trans;
179  		}
180  		dma_update_cpu_trans(entry, page_addr, flags);
181  		page_addr += PAGE_SIZE;
182  		dma_addr += PAGE_SIZE;
183  	}
184  
185  undo_cpu_trans:
186  	if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) {
187  		flags = ZPCI_PTE_INVALID;
188  		while (i-- > 0) {
189  			page_addr -= PAGE_SIZE;
190  			dma_addr -= PAGE_SIZE;
191  			entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr,
192  						   GFP_ATOMIC);
193  			if (!entry)
194  				break;
195  			dma_update_cpu_trans(entry, page_addr, flags);
196  		}
197  	}
198  	return rc;
199  }
200  
__dma_purge_tlb(struct zpci_dev * zdev,dma_addr_t dma_addr,size_t size,int flags)201  static int __dma_purge_tlb(struct zpci_dev *zdev, dma_addr_t dma_addr,
202  			   size_t size, int flags)
203  {
204  	unsigned long irqflags;
205  	int ret;
206  
207  	/*
208  	 * With zdev->tlb_refresh == 0, rpcit is not required to establish new
209  	 * translations when previously invalid translation-table entries are
210  	 * validated. With lazy unmap, rpcit is skipped for previously valid
211  	 * entries, but a global rpcit is then required before any address can
212  	 * be re-used, i.e. after each iommu bitmap wrap-around.
213  	 */
214  	if ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID) {
215  		if (!zdev->tlb_refresh)
216  			return 0;
217  	} else {
218  		if (!s390_iommu_strict)
219  			return 0;
220  	}
221  
222  	ret = zpci_refresh_trans((u64) zdev->fh << 32, dma_addr,
223  				 PAGE_ALIGN(size));
224  	if (ret == -ENOMEM && !s390_iommu_strict) {
225  		/* enable the hypervisor to free some resources */
226  		if (zpci_refresh_global(zdev))
227  			goto out;
228  
229  		spin_lock_irqsave(&zdev->iommu_bitmap_lock, irqflags);
230  		bitmap_andnot(zdev->iommu_bitmap, zdev->iommu_bitmap,
231  			      zdev->lazy_bitmap, zdev->iommu_pages);
232  		bitmap_zero(zdev->lazy_bitmap, zdev->iommu_pages);
233  		spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, irqflags);
234  		ret = 0;
235  	}
236  out:
237  	return ret;
238  }
239  
dma_update_trans(struct zpci_dev * zdev,phys_addr_t pa,dma_addr_t dma_addr,size_t size,int flags)240  static int dma_update_trans(struct zpci_dev *zdev, phys_addr_t pa,
241  			    dma_addr_t dma_addr, size_t size, int flags)
242  {
243  	int rc;
244  
245  	rc = __dma_update_trans(zdev, pa, dma_addr, size, flags);
246  	if (rc)
247  		return rc;
248  
249  	rc = __dma_purge_tlb(zdev, dma_addr, size, flags);
250  	if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID))
251  		__dma_update_trans(zdev, pa, dma_addr, size, ZPCI_PTE_INVALID);
252  
253  	return rc;
254  }
255  
dma_free_seg_table(unsigned long entry)256  void dma_free_seg_table(unsigned long entry)
257  {
258  	unsigned long *sto = get_rt_sto(entry);
259  	int sx;
260  
261  	for (sx = 0; sx < ZPCI_TABLE_ENTRIES; sx++)
262  		if (reg_entry_isvalid(sto[sx]))
263  			dma_free_page_table(get_st_pto(sto[sx]));
264  
265  	dma_free_cpu_table(sto);
266  }
267  
dma_cleanup_tables(unsigned long * table)268  void dma_cleanup_tables(unsigned long *table)
269  {
270  	int rtx;
271  
272  	if (!table)
273  		return;
274  
275  	for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++)
276  		if (reg_entry_isvalid(table[rtx]))
277  			dma_free_seg_table(table[rtx]);
278  
279  	dma_free_cpu_table(table);
280  }
281  
__dma_alloc_iommu(struct device * dev,unsigned long start,int size)282  static unsigned long __dma_alloc_iommu(struct device *dev,
283  				       unsigned long start, int size)
284  {
285  	struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
286  
287  	return iommu_area_alloc(zdev->iommu_bitmap, zdev->iommu_pages,
288  				start, size, zdev->start_dma >> PAGE_SHIFT,
289  				dma_get_seg_boundary_nr_pages(dev, PAGE_SHIFT),
290  				0);
291  }
292  
dma_alloc_address(struct device * dev,int size)293  static dma_addr_t dma_alloc_address(struct device *dev, int size)
294  {
295  	struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
296  	unsigned long offset, flags;
297  
298  	spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
299  	offset = __dma_alloc_iommu(dev, zdev->next_bit, size);
300  	if (offset == -1) {
301  		if (!s390_iommu_strict) {
302  			/* global flush before DMA addresses are reused */
303  			if (zpci_refresh_global(zdev))
304  				goto out_error;
305  
306  			bitmap_andnot(zdev->iommu_bitmap, zdev->iommu_bitmap,
307  				      zdev->lazy_bitmap, zdev->iommu_pages);
308  			bitmap_zero(zdev->lazy_bitmap, zdev->iommu_pages);
309  		}
310  		/* wrap-around */
311  		offset = __dma_alloc_iommu(dev, 0, size);
312  		if (offset == -1)
313  			goto out_error;
314  	}
315  	zdev->next_bit = offset + size;
316  	spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
317  
318  	return zdev->start_dma + offset * PAGE_SIZE;
319  
320  out_error:
321  	spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
322  	return DMA_MAPPING_ERROR;
323  }
324  
dma_free_address(struct device * dev,dma_addr_t dma_addr,int size)325  static void dma_free_address(struct device *dev, dma_addr_t dma_addr, int size)
326  {
327  	struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
328  	unsigned long flags, offset;
329  
330  	offset = (dma_addr - zdev->start_dma) >> PAGE_SHIFT;
331  
332  	spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
333  	if (!zdev->iommu_bitmap)
334  		goto out;
335  
336  	if (s390_iommu_strict)
337  		bitmap_clear(zdev->iommu_bitmap, offset, size);
338  	else
339  		bitmap_set(zdev->lazy_bitmap, offset, size);
340  
341  out:
342  	spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
343  }
344  
zpci_err_dma(unsigned long rc,unsigned long addr)345  static inline void zpci_err_dma(unsigned long rc, unsigned long addr)
346  {
347  	struct {
348  		unsigned long rc;
349  		unsigned long addr;
350  	} __packed data = {rc, addr};
351  
352  	zpci_err_hex(&data, sizeof(data));
353  }
354  
s390_dma_map_pages(struct device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction direction,unsigned long attrs)355  static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
356  				     unsigned long offset, size_t size,
357  				     enum dma_data_direction direction,
358  				     unsigned long attrs)
359  {
360  	struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
361  	unsigned long pa = page_to_phys(page) + offset;
362  	int flags = ZPCI_PTE_VALID;
363  	unsigned long nr_pages;
364  	dma_addr_t dma_addr;
365  	int ret;
366  
367  	/* This rounds up number of pages based on size and offset */
368  	nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
369  	dma_addr = dma_alloc_address(dev, nr_pages);
370  	if (dma_addr == DMA_MAPPING_ERROR) {
371  		ret = -ENOSPC;
372  		goto out_err;
373  	}
374  
375  	/* Use rounded up size */
376  	size = nr_pages * PAGE_SIZE;
377  
378  	if (direction == DMA_NONE || direction == DMA_TO_DEVICE)
379  		flags |= ZPCI_TABLE_PROTECTED;
380  
381  	ret = dma_update_trans(zdev, pa, dma_addr, size, flags);
382  	if (ret)
383  		goto out_free;
384  
385  	atomic64_add(nr_pages, &zdev->mapped_pages);
386  	return dma_addr + (offset & ~PAGE_MASK);
387  
388  out_free:
389  	dma_free_address(dev, dma_addr, nr_pages);
390  out_err:
391  	zpci_err("map error:\n");
392  	zpci_err_dma(ret, pa);
393  	return DMA_MAPPING_ERROR;
394  }
395  
s390_dma_unmap_pages(struct device * dev,dma_addr_t dma_addr,size_t size,enum dma_data_direction direction,unsigned long attrs)396  static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
397  				 size_t size, enum dma_data_direction direction,
398  				 unsigned long attrs)
399  {
400  	struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
401  	int npages, ret;
402  
403  	npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
404  	dma_addr = dma_addr & PAGE_MASK;
405  	ret = dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE,
406  			       ZPCI_PTE_INVALID);
407  	if (ret) {
408  		zpci_err("unmap error:\n");
409  		zpci_err_dma(ret, dma_addr);
410  		return;
411  	}
412  
413  	atomic64_add(npages, &zdev->unmapped_pages);
414  	dma_free_address(dev, dma_addr, npages);
415  }
416  
s390_dma_alloc(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t flag,unsigned long attrs)417  static void *s390_dma_alloc(struct device *dev, size_t size,
418  			    dma_addr_t *dma_handle, gfp_t flag,
419  			    unsigned long attrs)
420  {
421  	struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
422  	struct page *page;
423  	phys_addr_t pa;
424  	dma_addr_t map;
425  
426  	size = PAGE_ALIGN(size);
427  	page = alloc_pages(flag | __GFP_ZERO, get_order(size));
428  	if (!page)
429  		return NULL;
430  
431  	pa = page_to_phys(page);
432  	map = s390_dma_map_pages(dev, page, 0, size, DMA_BIDIRECTIONAL, 0);
433  	if (dma_mapping_error(dev, map)) {
434  		__free_pages(page, get_order(size));
435  		return NULL;
436  	}
437  
438  	atomic64_add(size / PAGE_SIZE, &zdev->allocated_pages);
439  	if (dma_handle)
440  		*dma_handle = map;
441  	return phys_to_virt(pa);
442  }
443  
s390_dma_free(struct device * dev,size_t size,void * vaddr,dma_addr_t dma_handle,unsigned long attrs)444  static void s390_dma_free(struct device *dev, size_t size,
445  			  void *vaddr, dma_addr_t dma_handle,
446  			  unsigned long attrs)
447  {
448  	struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
449  
450  	size = PAGE_ALIGN(size);
451  	atomic64_sub(size / PAGE_SIZE, &zdev->allocated_pages);
452  	s390_dma_unmap_pages(dev, dma_handle, size, DMA_BIDIRECTIONAL, 0);
453  	free_pages((unsigned long)vaddr, get_order(size));
454  }
455  
456  /* Map a segment into a contiguous dma address area */
__s390_dma_map_sg(struct device * dev,struct scatterlist * sg,size_t size,dma_addr_t * handle,enum dma_data_direction dir)457  static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
458  			     size_t size, dma_addr_t *handle,
459  			     enum dma_data_direction dir)
460  {
461  	unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
462  	struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
463  	dma_addr_t dma_addr_base, dma_addr;
464  	int flags = ZPCI_PTE_VALID;
465  	struct scatterlist *s;
466  	phys_addr_t pa = 0;
467  	int ret;
468  
469  	dma_addr_base = dma_alloc_address(dev, nr_pages);
470  	if (dma_addr_base == DMA_MAPPING_ERROR)
471  		return -ENOMEM;
472  
473  	dma_addr = dma_addr_base;
474  	if (dir == DMA_NONE || dir == DMA_TO_DEVICE)
475  		flags |= ZPCI_TABLE_PROTECTED;
476  
477  	for (s = sg; dma_addr < dma_addr_base + size; s = sg_next(s)) {
478  		pa = page_to_phys(sg_page(s));
479  		ret = __dma_update_trans(zdev, pa, dma_addr,
480  					 s->offset + s->length, flags);
481  		if (ret)
482  			goto unmap;
483  
484  		dma_addr += s->offset + s->length;
485  	}
486  	ret = __dma_purge_tlb(zdev, dma_addr_base, size, flags);
487  	if (ret)
488  		goto unmap;
489  
490  	*handle = dma_addr_base;
491  	atomic64_add(nr_pages, &zdev->mapped_pages);
492  
493  	return ret;
494  
495  unmap:
496  	dma_update_trans(zdev, 0, dma_addr_base, dma_addr - dma_addr_base,
497  			 ZPCI_PTE_INVALID);
498  	dma_free_address(dev, dma_addr_base, nr_pages);
499  	zpci_err("map error:\n");
500  	zpci_err_dma(ret, pa);
501  	return ret;
502  }
503  
s390_dma_map_sg(struct device * dev,struct scatterlist * sg,int nr_elements,enum dma_data_direction dir,unsigned long attrs)504  static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
505  			   int nr_elements, enum dma_data_direction dir,
506  			   unsigned long attrs)
507  {
508  	struct scatterlist *s = sg, *start = sg, *dma = sg;
509  	unsigned int max = dma_get_max_seg_size(dev);
510  	unsigned int size = s->offset + s->length;
511  	unsigned int offset = s->offset;
512  	int count = 0, i, ret;
513  
514  	for (i = 1; i < nr_elements; i++) {
515  		s = sg_next(s);
516  
517  		s->dma_length = 0;
518  
519  		if (s->offset || (size & ~PAGE_MASK) ||
520  		    size + s->length > max) {
521  			ret = __s390_dma_map_sg(dev, start, size,
522  						&dma->dma_address, dir);
523  			if (ret)
524  				goto unmap;
525  
526  			dma->dma_address += offset;
527  			dma->dma_length = size - offset;
528  
529  			size = offset = s->offset;
530  			start = s;
531  			dma = sg_next(dma);
532  			count++;
533  		}
534  		size += s->length;
535  	}
536  	ret = __s390_dma_map_sg(dev, start, size, &dma->dma_address, dir);
537  	if (ret)
538  		goto unmap;
539  
540  	dma->dma_address += offset;
541  	dma->dma_length = size - offset;
542  
543  	return count + 1;
544  unmap:
545  	for_each_sg(sg, s, count, i)
546  		s390_dma_unmap_pages(dev, sg_dma_address(s), sg_dma_len(s),
547  				     dir, attrs);
548  
549  	return ret;
550  }
551  
s390_dma_unmap_sg(struct device * dev,struct scatterlist * sg,int nr_elements,enum dma_data_direction dir,unsigned long attrs)552  static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
553  			      int nr_elements, enum dma_data_direction dir,
554  			      unsigned long attrs)
555  {
556  	struct scatterlist *s;
557  	int i;
558  
559  	for_each_sg(sg, s, nr_elements, i) {
560  		if (s->dma_length)
561  			s390_dma_unmap_pages(dev, s->dma_address, s->dma_length,
562  					     dir, attrs);
563  		s->dma_address = 0;
564  		s->dma_length = 0;
565  	}
566  }
567  
bitmap_vzalloc(size_t bits,gfp_t flags)568  static unsigned long *bitmap_vzalloc(size_t bits, gfp_t flags)
569  {
570  	size_t n = BITS_TO_LONGS(bits);
571  	size_t bytes;
572  
573  	if (unlikely(check_mul_overflow(n, sizeof(unsigned long), &bytes)))
574  		return NULL;
575  
576  	return vzalloc(bytes);
577  }
578  
zpci_dma_init_device(struct zpci_dev * zdev)579  int zpci_dma_init_device(struct zpci_dev *zdev)
580  {
581  	u8 status;
582  	int rc;
583  
584  	/*
585  	 * At this point, if the device is part of an IOMMU domain, this would
586  	 * be a strong hint towards a bug in the IOMMU API (common) code and/or
587  	 * simultaneous access via IOMMU and DMA API. So let's issue a warning.
588  	 */
589  	WARN_ON(zdev->s390_domain);
590  
591  	spin_lock_init(&zdev->iommu_bitmap_lock);
592  
593  	zdev->dma_table = dma_alloc_cpu_table(GFP_KERNEL);
594  	if (!zdev->dma_table) {
595  		rc = -ENOMEM;
596  		goto out;
597  	}
598  
599  	/*
600  	 * Restrict the iommu bitmap size to the minimum of the following:
601  	 * - s390_iommu_aperture which defaults to high_memory
602  	 * - 3-level pagetable address limit minus start_dma offset
603  	 * - DMA address range allowed by the hardware (clp query pci fn)
604  	 *
605  	 * Also set zdev->end_dma to the actual end address of the usable
606  	 * range, instead of the theoretical maximum as reported by hardware.
607  	 *
608  	 * This limits the number of concurrently usable DMA mappings since
609  	 * for each DMA mapped memory address we need a DMA address including
610  	 * extra DMA addresses for multiple mappings of the same memory address.
611  	 */
612  	zdev->start_dma = PAGE_ALIGN(zdev->start_dma);
613  	zdev->iommu_size = min3(s390_iommu_aperture,
614  				ZPCI_TABLE_SIZE_RT - zdev->start_dma,
615  				zdev->end_dma - zdev->start_dma + 1);
616  	zdev->end_dma = zdev->start_dma + zdev->iommu_size - 1;
617  	zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT;
618  	zdev->iommu_bitmap = bitmap_vzalloc(zdev->iommu_pages, GFP_KERNEL);
619  	if (!zdev->iommu_bitmap) {
620  		rc = -ENOMEM;
621  		goto free_dma_table;
622  	}
623  	if (!s390_iommu_strict) {
624  		zdev->lazy_bitmap = bitmap_vzalloc(zdev->iommu_pages, GFP_KERNEL);
625  		if (!zdev->lazy_bitmap) {
626  			rc = -ENOMEM;
627  			goto free_bitmap;
628  		}
629  
630  	}
631  	if (zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
632  			       virt_to_phys(zdev->dma_table), &status)) {
633  		rc = -EIO;
634  		goto free_bitmap;
635  	}
636  
637  	return 0;
638  free_bitmap:
639  	vfree(zdev->iommu_bitmap);
640  	zdev->iommu_bitmap = NULL;
641  	vfree(zdev->lazy_bitmap);
642  	zdev->lazy_bitmap = NULL;
643  free_dma_table:
644  	dma_free_cpu_table(zdev->dma_table);
645  	zdev->dma_table = NULL;
646  out:
647  	return rc;
648  }
649  
zpci_dma_exit_device(struct zpci_dev * zdev)650  int zpci_dma_exit_device(struct zpci_dev *zdev)
651  {
652  	int cc = 0;
653  
654  	/*
655  	 * At this point, if the device is part of an IOMMU domain, this would
656  	 * be a strong hint towards a bug in the IOMMU API (common) code and/or
657  	 * simultaneous access via IOMMU and DMA API. So let's issue a warning.
658  	 */
659  	WARN_ON(zdev->s390_domain);
660  	if (zdev_enabled(zdev))
661  		cc = zpci_unregister_ioat(zdev, 0);
662  	/*
663  	 * cc == 3 indicates the function is gone already. This can happen
664  	 * if the function was deconfigured/disabled suddenly and we have not
665  	 * received a new handle yet.
666  	 */
667  	if (cc && cc != 3)
668  		return -EIO;
669  
670  	dma_cleanup_tables(zdev->dma_table);
671  	zdev->dma_table = NULL;
672  	vfree(zdev->iommu_bitmap);
673  	zdev->iommu_bitmap = NULL;
674  	vfree(zdev->lazy_bitmap);
675  	zdev->lazy_bitmap = NULL;
676  	zdev->next_bit = 0;
677  	return 0;
678  }
679  
dma_alloc_cpu_table_caches(void)680  static int __init dma_alloc_cpu_table_caches(void)
681  {
682  	dma_region_table_cache = kmem_cache_create("PCI_DMA_region_tables",
683  					ZPCI_TABLE_SIZE, ZPCI_TABLE_ALIGN,
684  					0, NULL);
685  	if (!dma_region_table_cache)
686  		return -ENOMEM;
687  
688  	dma_page_table_cache = kmem_cache_create("PCI_DMA_page_tables",
689  					ZPCI_PT_SIZE, ZPCI_PT_ALIGN,
690  					0, NULL);
691  	if (!dma_page_table_cache) {
692  		kmem_cache_destroy(dma_region_table_cache);
693  		return -ENOMEM;
694  	}
695  	return 0;
696  }
697  
zpci_dma_init(void)698  int __init zpci_dma_init(void)
699  {
700  	s390_iommu_aperture = (u64)virt_to_phys(high_memory);
701  	if (!s390_iommu_aperture_factor)
702  		s390_iommu_aperture = ULONG_MAX;
703  	else
704  		s390_iommu_aperture *= s390_iommu_aperture_factor;
705  
706  	return dma_alloc_cpu_table_caches();
707  }
708  
zpci_dma_exit(void)709  void zpci_dma_exit(void)
710  {
711  	kmem_cache_destroy(dma_page_table_cache);
712  	kmem_cache_destroy(dma_region_table_cache);
713  }
714  
715  const struct dma_map_ops s390_pci_dma_ops = {
716  	.alloc		= s390_dma_alloc,
717  	.free		= s390_dma_free,
718  	.map_sg		= s390_dma_map_sg,
719  	.unmap_sg	= s390_dma_unmap_sg,
720  	.map_page	= s390_dma_map_pages,
721  	.unmap_page	= s390_dma_unmap_pages,
722  	.mmap		= dma_common_mmap,
723  	.get_sgtable	= dma_common_get_sgtable,
724  	.alloc_pages	= dma_common_alloc_pages,
725  	.free_pages	= dma_common_free_pages,
726  	/* dma_supported is unconditionally true without a callback */
727  };
728  EXPORT_SYMBOL_GPL(s390_pci_dma_ops);
729  
s390_iommu_setup(char * str)730  static int __init s390_iommu_setup(char *str)
731  {
732  	if (!strcmp(str, "strict"))
733  		s390_iommu_strict = 1;
734  	return 1;
735  }
736  
737  __setup("s390_iommu=", s390_iommu_setup);
738  
s390_iommu_aperture_setup(char * str)739  static int __init s390_iommu_aperture_setup(char *str)
740  {
741  	if (kstrtou32(str, 10, &s390_iommu_aperture_factor))
742  		s390_iommu_aperture_factor = 1;
743  	return 1;
744  }
745  
746  __setup("s390_iommu_aperture=", s390_iommu_aperture_setup);
747