xref: /openbmc/linux/arch/s390/pci/pci_dma.c (revision 4bce6fce)
1 /*
2  * Copyright IBM Corp. 2012
3  *
4  * Author(s):
5  *   Jan Glauber <jang@linux.vnet.ibm.com>
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/slab.h>
10 #include <linux/export.h>
11 #include <linux/iommu-helper.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/vmalloc.h>
14 #include <linux/pci.h>
15 #include <asm/pci_dma.h>
16 
17 static struct kmem_cache *dma_region_table_cache;
18 static struct kmem_cache *dma_page_table_cache;
19 static int s390_iommu_strict;
20 
21 static int zpci_refresh_global(struct zpci_dev *zdev)
22 {
23 	return zpci_refresh_trans((u64) zdev->fh << 32, zdev->start_dma,
24 				  zdev->iommu_pages * PAGE_SIZE);
25 }
26 
27 static unsigned long *dma_alloc_cpu_table(void)
28 {
29 	unsigned long *table, *entry;
30 
31 	table = kmem_cache_alloc(dma_region_table_cache, GFP_ATOMIC);
32 	if (!table)
33 		return NULL;
34 
35 	for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++)
36 		*entry = ZPCI_TABLE_INVALID | ZPCI_TABLE_PROTECTED;
37 	return table;
38 }
39 
40 static void dma_free_cpu_table(void *table)
41 {
42 	kmem_cache_free(dma_region_table_cache, table);
43 }
44 
45 static unsigned long *dma_alloc_page_table(void)
46 {
47 	unsigned long *table, *entry;
48 
49 	table = kmem_cache_alloc(dma_page_table_cache, GFP_ATOMIC);
50 	if (!table)
51 		return NULL;
52 
53 	for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++)
54 		*entry = ZPCI_PTE_INVALID | ZPCI_TABLE_PROTECTED;
55 	return table;
56 }
57 
58 static void dma_free_page_table(void *table)
59 {
60 	kmem_cache_free(dma_page_table_cache, table);
61 }
62 
63 static unsigned long *dma_get_seg_table_origin(unsigned long *entry)
64 {
65 	unsigned long *sto;
66 
67 	if (reg_entry_isvalid(*entry))
68 		sto = get_rt_sto(*entry);
69 	else {
70 		sto = dma_alloc_cpu_table();
71 		if (!sto)
72 			return NULL;
73 
74 		set_rt_sto(entry, sto);
75 		validate_rt_entry(entry);
76 		entry_clr_protected(entry);
77 	}
78 	return sto;
79 }
80 
81 static unsigned long *dma_get_page_table_origin(unsigned long *entry)
82 {
83 	unsigned long *pto;
84 
85 	if (reg_entry_isvalid(*entry))
86 		pto = get_st_pto(*entry);
87 	else {
88 		pto = dma_alloc_page_table();
89 		if (!pto)
90 			return NULL;
91 		set_st_pto(entry, pto);
92 		validate_st_entry(entry);
93 		entry_clr_protected(entry);
94 	}
95 	return pto;
96 }
97 
98 static unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr)
99 {
100 	unsigned long *sto, *pto;
101 	unsigned int rtx, sx, px;
102 
103 	rtx = calc_rtx(dma_addr);
104 	sto = dma_get_seg_table_origin(&rto[rtx]);
105 	if (!sto)
106 		return NULL;
107 
108 	sx = calc_sx(dma_addr);
109 	pto = dma_get_page_table_origin(&sto[sx]);
110 	if (!pto)
111 		return NULL;
112 
113 	px = calc_px(dma_addr);
114 	return &pto[px];
115 }
116 
117 static void dma_update_cpu_trans(struct zpci_dev *zdev, void *page_addr,
118 				 dma_addr_t dma_addr, int flags)
119 {
120 	unsigned long *entry;
121 
122 	entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
123 	if (!entry) {
124 		WARN_ON_ONCE(1);
125 		return;
126 	}
127 
128 	if (flags & ZPCI_PTE_INVALID) {
129 		invalidate_pt_entry(entry);
130 		return;
131 	} else {
132 		set_pt_pfaa(entry, page_addr);
133 		validate_pt_entry(entry);
134 	}
135 
136 	if (flags & ZPCI_TABLE_PROTECTED)
137 		entry_set_protected(entry);
138 	else
139 		entry_clr_protected(entry);
140 }
141 
142 static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
143 			    dma_addr_t dma_addr, size_t size, int flags)
144 {
145 	unsigned int nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
146 	u8 *page_addr = (u8 *) (pa & PAGE_MASK);
147 	dma_addr_t start_dma_addr = dma_addr;
148 	unsigned long irq_flags;
149 	int i, rc = 0;
150 
151 	if (!nr_pages)
152 		return -EINVAL;
153 
154 	spin_lock_irqsave(&zdev->dma_table_lock, irq_flags);
155 	if (!zdev->dma_table)
156 		goto no_refresh;
157 
158 	for (i = 0; i < nr_pages; i++) {
159 		dma_update_cpu_trans(zdev, page_addr, dma_addr, flags);
160 		page_addr += PAGE_SIZE;
161 		dma_addr += PAGE_SIZE;
162 	}
163 
164 	/*
165 	 * With zdev->tlb_refresh == 0, rpcit is not required to establish new
166 	 * translations when previously invalid translation-table entries are
167 	 * validated. With lazy unmap, it also is skipped for previously valid
168 	 * entries, but a global rpcit is then required before any address can
169 	 * be re-used, i.e. after each iommu bitmap wrap-around.
170 	 */
171 	if (!zdev->tlb_refresh &&
172 			(!s390_iommu_strict ||
173 			((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)))
174 		goto no_refresh;
175 
176 	rc = zpci_refresh_trans((u64) zdev->fh << 32, start_dma_addr,
177 				nr_pages * PAGE_SIZE);
178 
179 no_refresh:
180 	spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags);
181 	return rc;
182 }
183 
184 static void dma_free_seg_table(unsigned long entry)
185 {
186 	unsigned long *sto = get_rt_sto(entry);
187 	int sx;
188 
189 	for (sx = 0; sx < ZPCI_TABLE_ENTRIES; sx++)
190 		if (reg_entry_isvalid(sto[sx]))
191 			dma_free_page_table(get_st_pto(sto[sx]));
192 
193 	dma_free_cpu_table(sto);
194 }
195 
196 static void dma_cleanup_tables(struct zpci_dev *zdev)
197 {
198 	unsigned long *table;
199 	int rtx;
200 
201 	if (!zdev || !zdev->dma_table)
202 		return;
203 
204 	table = zdev->dma_table;
205 	for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++)
206 		if (reg_entry_isvalid(table[rtx]))
207 			dma_free_seg_table(table[rtx]);
208 
209 	dma_free_cpu_table(table);
210 	zdev->dma_table = NULL;
211 }
212 
213 static unsigned long __dma_alloc_iommu(struct zpci_dev *zdev,
214 				       unsigned long start, int size)
215 {
216 	unsigned long boundary_size;
217 
218 	boundary_size = ALIGN(dma_get_seg_boundary(&zdev->pdev->dev) + 1,
219 			      PAGE_SIZE) >> PAGE_SHIFT;
220 	return iommu_area_alloc(zdev->iommu_bitmap, zdev->iommu_pages,
221 				start, size, 0, boundary_size, 0);
222 }
223 
224 static unsigned long dma_alloc_iommu(struct zpci_dev *zdev, int size)
225 {
226 	unsigned long offset, flags;
227 	int wrap = 0;
228 
229 	spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
230 	offset = __dma_alloc_iommu(zdev, zdev->next_bit, size);
231 	if (offset == -1) {
232 		/* wrap-around */
233 		offset = __dma_alloc_iommu(zdev, 0, size);
234 		wrap = 1;
235 	}
236 
237 	if (offset != -1) {
238 		zdev->next_bit = offset + size;
239 		if (!zdev->tlb_refresh && !s390_iommu_strict && wrap)
240 			/* global flush after wrap-around with lazy unmap */
241 			zpci_refresh_global(zdev);
242 	}
243 	spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
244 	return offset;
245 }
246 
247 static void dma_free_iommu(struct zpci_dev *zdev, unsigned long offset, int size)
248 {
249 	unsigned long flags;
250 
251 	spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
252 	if (!zdev->iommu_bitmap)
253 		goto out;
254 	bitmap_clear(zdev->iommu_bitmap, offset, size);
255 	/*
256 	 * Lazy flush for unmap: need to move next_bit to avoid address re-use
257 	 * until wrap-around.
258 	 */
259 	if (!s390_iommu_strict && offset >= zdev->next_bit)
260 		zdev->next_bit = offset + size;
261 out:
262 	spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
263 }
264 
265 int dma_set_mask(struct device *dev, u64 mask)
266 {
267 	if (!dev->dma_mask || !dma_supported(dev, mask))
268 		return -EIO;
269 
270 	*dev->dma_mask = mask;
271 	return 0;
272 }
273 EXPORT_SYMBOL_GPL(dma_set_mask);
274 
275 static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
276 				     unsigned long offset, size_t size,
277 				     enum dma_data_direction direction,
278 				     struct dma_attrs *attrs)
279 {
280 	struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
281 	unsigned long nr_pages, iommu_page_index;
282 	unsigned long pa = page_to_phys(page) + offset;
283 	int flags = ZPCI_PTE_VALID;
284 	dma_addr_t dma_addr;
285 
286 	/* This rounds up number of pages based on size and offset */
287 	nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
288 	iommu_page_index = dma_alloc_iommu(zdev, nr_pages);
289 	if (iommu_page_index == -1)
290 		goto out_err;
291 
292 	/* Use rounded up size */
293 	size = nr_pages * PAGE_SIZE;
294 
295 	dma_addr = zdev->start_dma + iommu_page_index * PAGE_SIZE;
296 	if (dma_addr + size > zdev->end_dma)
297 		goto out_free;
298 
299 	if (direction == DMA_NONE || direction == DMA_TO_DEVICE)
300 		flags |= ZPCI_TABLE_PROTECTED;
301 
302 	if (!dma_update_trans(zdev, pa, dma_addr, size, flags)) {
303 		atomic64_add(nr_pages, &zdev->mapped_pages);
304 		return dma_addr + (offset & ~PAGE_MASK);
305 	}
306 
307 out_free:
308 	dma_free_iommu(zdev, iommu_page_index, nr_pages);
309 out_err:
310 	zpci_err("map error:\n");
311 	zpci_err_hex(&pa, sizeof(pa));
312 	return DMA_ERROR_CODE;
313 }
314 
315 static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
316 				 size_t size, enum dma_data_direction direction,
317 				 struct dma_attrs *attrs)
318 {
319 	struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
320 	unsigned long iommu_page_index;
321 	int npages;
322 
323 	npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
324 	dma_addr = dma_addr & PAGE_MASK;
325 	if (dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE,
326 			     ZPCI_TABLE_PROTECTED | ZPCI_PTE_INVALID)) {
327 		zpci_err("unmap error:\n");
328 		zpci_err_hex(&dma_addr, sizeof(dma_addr));
329 	}
330 
331 	atomic64_add(npages, &zdev->unmapped_pages);
332 	iommu_page_index = (dma_addr - zdev->start_dma) >> PAGE_SHIFT;
333 	dma_free_iommu(zdev, iommu_page_index, npages);
334 }
335 
336 static void *s390_dma_alloc(struct device *dev, size_t size,
337 			    dma_addr_t *dma_handle, gfp_t flag,
338 			    struct dma_attrs *attrs)
339 {
340 	struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
341 	struct page *page;
342 	unsigned long pa;
343 	dma_addr_t map;
344 
345 	size = PAGE_ALIGN(size);
346 	page = alloc_pages(flag, get_order(size));
347 	if (!page)
348 		return NULL;
349 
350 	pa = page_to_phys(page);
351 	memset((void *) pa, 0, size);
352 
353 	map = s390_dma_map_pages(dev, page, pa % PAGE_SIZE,
354 				 size, DMA_BIDIRECTIONAL, NULL);
355 	if (dma_mapping_error(dev, map)) {
356 		free_pages(pa, get_order(size));
357 		return NULL;
358 	}
359 
360 	atomic64_add(size / PAGE_SIZE, &zdev->allocated_pages);
361 	if (dma_handle)
362 		*dma_handle = map;
363 	return (void *) pa;
364 }
365 
366 static void s390_dma_free(struct device *dev, size_t size,
367 			  void *pa, dma_addr_t dma_handle,
368 			  struct dma_attrs *attrs)
369 {
370 	struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
371 
372 	size = PAGE_ALIGN(size);
373 	atomic64_sub(size / PAGE_SIZE, &zdev->allocated_pages);
374 	s390_dma_unmap_pages(dev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
375 	free_pages((unsigned long) pa, get_order(size));
376 }
377 
378 static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
379 			   int nr_elements, enum dma_data_direction dir,
380 			   struct dma_attrs *attrs)
381 {
382 	int mapped_elements = 0;
383 	struct scatterlist *s;
384 	int i;
385 
386 	for_each_sg(sg, s, nr_elements, i) {
387 		struct page *page = sg_page(s);
388 		s->dma_address = s390_dma_map_pages(dev, page, s->offset,
389 						    s->length, dir, NULL);
390 		if (!dma_mapping_error(dev, s->dma_address)) {
391 			s->dma_length = s->length;
392 			mapped_elements++;
393 		} else
394 			goto unmap;
395 	}
396 out:
397 	return mapped_elements;
398 
399 unmap:
400 	for_each_sg(sg, s, mapped_elements, i) {
401 		if (s->dma_address)
402 			s390_dma_unmap_pages(dev, s->dma_address, s->dma_length,
403 					     dir, NULL);
404 		s->dma_address = 0;
405 		s->dma_length = 0;
406 	}
407 	mapped_elements = 0;
408 	goto out;
409 }
410 
411 static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
412 			      int nr_elements, enum dma_data_direction dir,
413 			      struct dma_attrs *attrs)
414 {
415 	struct scatterlist *s;
416 	int i;
417 
418 	for_each_sg(sg, s, nr_elements, i) {
419 		s390_dma_unmap_pages(dev, s->dma_address, s->dma_length, dir, NULL);
420 		s->dma_address = 0;
421 		s->dma_length = 0;
422 	}
423 }
424 
425 int zpci_dma_init_device(struct zpci_dev *zdev)
426 {
427 	int rc;
428 
429 	spin_lock_init(&zdev->iommu_bitmap_lock);
430 	spin_lock_init(&zdev->dma_table_lock);
431 
432 	zdev->dma_table = dma_alloc_cpu_table();
433 	if (!zdev->dma_table) {
434 		rc = -ENOMEM;
435 		goto out_clean;
436 	}
437 
438 	zdev->iommu_size = (unsigned long) high_memory - PAGE_OFFSET;
439 	zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT;
440 	zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8);
441 	if (!zdev->iommu_bitmap) {
442 		rc = -ENOMEM;
443 		goto out_reg;
444 	}
445 
446 	rc = zpci_register_ioat(zdev,
447 				0,
448 				zdev->start_dma + PAGE_OFFSET,
449 				zdev->start_dma + zdev->iommu_size - 1,
450 				(u64) zdev->dma_table);
451 	if (rc)
452 		goto out_reg;
453 	return 0;
454 
455 out_reg:
456 	dma_free_cpu_table(zdev->dma_table);
457 out_clean:
458 	return rc;
459 }
460 
461 void zpci_dma_exit_device(struct zpci_dev *zdev)
462 {
463 	zpci_unregister_ioat(zdev, 0);
464 	dma_cleanup_tables(zdev);
465 	vfree(zdev->iommu_bitmap);
466 	zdev->iommu_bitmap = NULL;
467 	zdev->next_bit = 0;
468 }
469 
470 static int __init dma_alloc_cpu_table_caches(void)
471 {
472 	dma_region_table_cache = kmem_cache_create("PCI_DMA_region_tables",
473 					ZPCI_TABLE_SIZE, ZPCI_TABLE_ALIGN,
474 					0, NULL);
475 	if (!dma_region_table_cache)
476 		return -ENOMEM;
477 
478 	dma_page_table_cache = kmem_cache_create("PCI_DMA_page_tables",
479 					ZPCI_PT_SIZE, ZPCI_PT_ALIGN,
480 					0, NULL);
481 	if (!dma_page_table_cache) {
482 		kmem_cache_destroy(dma_region_table_cache);
483 		return -ENOMEM;
484 	}
485 	return 0;
486 }
487 
488 int __init zpci_dma_init(void)
489 {
490 	return dma_alloc_cpu_table_caches();
491 }
492 
493 void zpci_dma_exit(void)
494 {
495 	kmem_cache_destroy(dma_page_table_cache);
496 	kmem_cache_destroy(dma_region_table_cache);
497 }
498 
499 #define PREALLOC_DMA_DEBUG_ENTRIES	(1 << 16)
500 
501 static int __init dma_debug_do_init(void)
502 {
503 	dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
504 	return 0;
505 }
506 fs_initcall(dma_debug_do_init);
507 
508 struct dma_map_ops s390_dma_ops = {
509 	.alloc		= s390_dma_alloc,
510 	.free		= s390_dma_free,
511 	.map_sg		= s390_dma_map_sg,
512 	.unmap_sg	= s390_dma_unmap_sg,
513 	.map_page	= s390_dma_map_pages,
514 	.unmap_page	= s390_dma_unmap_pages,
515 	/* if we support direct DMA this must be conditional */
516 	.is_phys	= 0,
517 	/* dma_supported is unconditionally true without a callback */
518 };
519 EXPORT_SYMBOL_GPL(s390_dma_ops);
520 
521 static int __init s390_iommu_setup(char *str)
522 {
523 	if (!strncmp(str, "strict", 6))
524 		s390_iommu_strict = 1;
525 	return 0;
526 }
527 
528 __setup("s390_iommu=", s390_iommu_setup);
529