xref: /openbmc/linux/arch/s390/pci/pci_dma.c (revision 206204a1)
1 /*
2  * Copyright IBM Corp. 2012
3  *
4  * Author(s):
5  *   Jan Glauber <jang@linux.vnet.ibm.com>
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/slab.h>
10 #include <linux/export.h>
11 #include <linux/iommu-helper.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/vmalloc.h>
14 #include <linux/pci.h>
15 #include <asm/pci_dma.h>
16 
17 static struct kmem_cache *dma_region_table_cache;
18 static struct kmem_cache *dma_page_table_cache;
19 
20 static unsigned long *dma_alloc_cpu_table(void)
21 {
22 	unsigned long *table, *entry;
23 
24 	table = kmem_cache_alloc(dma_region_table_cache, GFP_ATOMIC);
25 	if (!table)
26 		return NULL;
27 
28 	for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++)
29 		*entry = ZPCI_TABLE_INVALID | ZPCI_TABLE_PROTECTED;
30 	return table;
31 }
32 
33 static void dma_free_cpu_table(void *table)
34 {
35 	kmem_cache_free(dma_region_table_cache, table);
36 }
37 
38 static unsigned long *dma_alloc_page_table(void)
39 {
40 	unsigned long *table, *entry;
41 
42 	table = kmem_cache_alloc(dma_page_table_cache, GFP_ATOMIC);
43 	if (!table)
44 		return NULL;
45 
46 	for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++)
47 		*entry = ZPCI_PTE_INVALID | ZPCI_TABLE_PROTECTED;
48 	return table;
49 }
50 
51 static void dma_free_page_table(void *table)
52 {
53 	kmem_cache_free(dma_page_table_cache, table);
54 }
55 
56 static unsigned long *dma_get_seg_table_origin(unsigned long *entry)
57 {
58 	unsigned long *sto;
59 
60 	if (reg_entry_isvalid(*entry))
61 		sto = get_rt_sto(*entry);
62 	else {
63 		sto = dma_alloc_cpu_table();
64 		if (!sto)
65 			return NULL;
66 
67 		set_rt_sto(entry, sto);
68 		validate_rt_entry(entry);
69 		entry_clr_protected(entry);
70 	}
71 	return sto;
72 }
73 
74 static unsigned long *dma_get_page_table_origin(unsigned long *entry)
75 {
76 	unsigned long *pto;
77 
78 	if (reg_entry_isvalid(*entry))
79 		pto = get_st_pto(*entry);
80 	else {
81 		pto = dma_alloc_page_table();
82 		if (!pto)
83 			return NULL;
84 		set_st_pto(entry, pto);
85 		validate_st_entry(entry);
86 		entry_clr_protected(entry);
87 	}
88 	return pto;
89 }
90 
91 static unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr)
92 {
93 	unsigned long *sto, *pto;
94 	unsigned int rtx, sx, px;
95 
96 	rtx = calc_rtx(dma_addr);
97 	sto = dma_get_seg_table_origin(&rto[rtx]);
98 	if (!sto)
99 		return NULL;
100 
101 	sx = calc_sx(dma_addr);
102 	pto = dma_get_page_table_origin(&sto[sx]);
103 	if (!pto)
104 		return NULL;
105 
106 	px = calc_px(dma_addr);
107 	return &pto[px];
108 }
109 
110 static void dma_update_cpu_trans(struct zpci_dev *zdev, void *page_addr,
111 				 dma_addr_t dma_addr, int flags)
112 {
113 	unsigned long *entry;
114 
115 	entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
116 	if (!entry) {
117 		WARN_ON_ONCE(1);
118 		return;
119 	}
120 
121 	if (flags & ZPCI_PTE_INVALID) {
122 		invalidate_pt_entry(entry);
123 		return;
124 	} else {
125 		set_pt_pfaa(entry, page_addr);
126 		validate_pt_entry(entry);
127 	}
128 
129 	if (flags & ZPCI_TABLE_PROTECTED)
130 		entry_set_protected(entry);
131 	else
132 		entry_clr_protected(entry);
133 }
134 
135 static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
136 			    dma_addr_t dma_addr, size_t size, int flags)
137 {
138 	unsigned int nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
139 	u8 *page_addr = (u8 *) (pa & PAGE_MASK);
140 	dma_addr_t start_dma_addr = dma_addr;
141 	unsigned long irq_flags;
142 	int i, rc = 0;
143 
144 	if (!nr_pages)
145 		return -EINVAL;
146 
147 	spin_lock_irqsave(&zdev->dma_table_lock, irq_flags);
148 	if (!zdev->dma_table)
149 		goto no_refresh;
150 
151 	for (i = 0; i < nr_pages; i++) {
152 		dma_update_cpu_trans(zdev, page_addr, dma_addr, flags);
153 		page_addr += PAGE_SIZE;
154 		dma_addr += PAGE_SIZE;
155 	}
156 
157 	/*
158 	 * rpcit is not required to establish new translations when previously
159 	 * invalid translation-table entries are validated, however it is
160 	 * required when altering previously valid entries.
161 	 */
162 	if (!zdev->tlb_refresh &&
163 	    ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID))
164 		/*
165 		 * TODO: also need to check that the old entry is indeed INVALID
166 		 * and not only for one page but for the whole range...
167 		 * -> now we WARN_ON in that case but with lazy unmap that
168 		 * needs to be redone!
169 		 */
170 		goto no_refresh;
171 
172 	rc = zpci_refresh_trans((u64) zdev->fh << 32, start_dma_addr,
173 				nr_pages * PAGE_SIZE);
174 
175 no_refresh:
176 	spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags);
177 	return rc;
178 }
179 
180 static void dma_free_seg_table(unsigned long entry)
181 {
182 	unsigned long *sto = get_rt_sto(entry);
183 	int sx;
184 
185 	for (sx = 0; sx < ZPCI_TABLE_ENTRIES; sx++)
186 		if (reg_entry_isvalid(sto[sx]))
187 			dma_free_page_table(get_st_pto(sto[sx]));
188 
189 	dma_free_cpu_table(sto);
190 }
191 
192 static void dma_cleanup_tables(struct zpci_dev *zdev)
193 {
194 	unsigned long *table;
195 	int rtx;
196 
197 	if (!zdev || !zdev->dma_table)
198 		return;
199 
200 	table = zdev->dma_table;
201 	for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++)
202 		if (reg_entry_isvalid(table[rtx]))
203 			dma_free_seg_table(table[rtx]);
204 
205 	dma_free_cpu_table(table);
206 	zdev->dma_table = NULL;
207 }
208 
209 static unsigned long __dma_alloc_iommu(struct zpci_dev *zdev,
210 				       unsigned long start, int size)
211 {
212 	unsigned long boundary_size;
213 
214 	boundary_size = ALIGN(dma_get_seg_boundary(&zdev->pdev->dev) + 1,
215 			      PAGE_SIZE) >> PAGE_SHIFT;
216 	return iommu_area_alloc(zdev->iommu_bitmap, zdev->iommu_pages,
217 				start, size, 0, boundary_size, 0);
218 }
219 
220 static unsigned long dma_alloc_iommu(struct zpci_dev *zdev, int size)
221 {
222 	unsigned long offset, flags;
223 
224 	spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
225 	offset = __dma_alloc_iommu(zdev, zdev->next_bit, size);
226 	if (offset == -1)
227 		offset = __dma_alloc_iommu(zdev, 0, size);
228 
229 	if (offset != -1) {
230 		zdev->next_bit = offset + size;
231 		if (zdev->next_bit >= zdev->iommu_pages)
232 			zdev->next_bit = 0;
233 	}
234 	spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
235 	return offset;
236 }
237 
238 static void dma_free_iommu(struct zpci_dev *zdev, unsigned long offset, int size)
239 {
240 	unsigned long flags;
241 
242 	spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
243 	if (!zdev->iommu_bitmap)
244 		goto out;
245 	bitmap_clear(zdev->iommu_bitmap, offset, size);
246 	if (offset >= zdev->next_bit)
247 		zdev->next_bit = offset + size;
248 out:
249 	spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
250 }
251 
252 int dma_set_mask(struct device *dev, u64 mask)
253 {
254 	if (!dev->dma_mask || !dma_supported(dev, mask))
255 		return -EIO;
256 
257 	*dev->dma_mask = mask;
258 	return 0;
259 }
260 EXPORT_SYMBOL_GPL(dma_set_mask);
261 
262 static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
263 				     unsigned long offset, size_t size,
264 				     enum dma_data_direction direction,
265 				     struct dma_attrs *attrs)
266 {
267 	struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
268 	unsigned long nr_pages, iommu_page_index;
269 	unsigned long pa = page_to_phys(page) + offset;
270 	int flags = ZPCI_PTE_VALID;
271 	dma_addr_t dma_addr;
272 
273 	/* This rounds up number of pages based on size and offset */
274 	nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
275 	iommu_page_index = dma_alloc_iommu(zdev, nr_pages);
276 	if (iommu_page_index == -1)
277 		goto out_err;
278 
279 	/* Use rounded up size */
280 	size = nr_pages * PAGE_SIZE;
281 
282 	dma_addr = zdev->start_dma + iommu_page_index * PAGE_SIZE;
283 	if (dma_addr + size > zdev->end_dma)
284 		goto out_free;
285 
286 	if (direction == DMA_NONE || direction == DMA_TO_DEVICE)
287 		flags |= ZPCI_TABLE_PROTECTED;
288 
289 	if (!dma_update_trans(zdev, pa, dma_addr, size, flags)) {
290 		atomic64_add(nr_pages, &zdev->fmb->mapped_pages);
291 		return dma_addr + (offset & ~PAGE_MASK);
292 	}
293 
294 out_free:
295 	dma_free_iommu(zdev, iommu_page_index, nr_pages);
296 out_err:
297 	zpci_err("map error:\n");
298 	zpci_err_hex(&pa, sizeof(pa));
299 	return DMA_ERROR_CODE;
300 }
301 
302 static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
303 				 size_t size, enum dma_data_direction direction,
304 				 struct dma_attrs *attrs)
305 {
306 	struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
307 	unsigned long iommu_page_index;
308 	int npages;
309 
310 	npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
311 	dma_addr = dma_addr & PAGE_MASK;
312 	if (dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE,
313 			     ZPCI_TABLE_PROTECTED | ZPCI_PTE_INVALID)) {
314 		zpci_err("unmap error:\n");
315 		zpci_err_hex(&dma_addr, sizeof(dma_addr));
316 	}
317 
318 	atomic64_add(npages, &zdev->fmb->unmapped_pages);
319 	iommu_page_index = (dma_addr - zdev->start_dma) >> PAGE_SHIFT;
320 	dma_free_iommu(zdev, iommu_page_index, npages);
321 }
322 
323 static void *s390_dma_alloc(struct device *dev, size_t size,
324 			    dma_addr_t *dma_handle, gfp_t flag,
325 			    struct dma_attrs *attrs)
326 {
327 	struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
328 	struct page *page;
329 	unsigned long pa;
330 	dma_addr_t map;
331 
332 	size = PAGE_ALIGN(size);
333 	page = alloc_pages(flag, get_order(size));
334 	if (!page)
335 		return NULL;
336 
337 	pa = page_to_phys(page);
338 	memset((void *) pa, 0, size);
339 
340 	map = s390_dma_map_pages(dev, page, pa % PAGE_SIZE,
341 				 size, DMA_BIDIRECTIONAL, NULL);
342 	if (dma_mapping_error(dev, map)) {
343 		free_pages(pa, get_order(size));
344 		return NULL;
345 	}
346 
347 	atomic64_add(size / PAGE_SIZE, &zdev->fmb->allocated_pages);
348 	if (dma_handle)
349 		*dma_handle = map;
350 	return (void *) pa;
351 }
352 
353 static void s390_dma_free(struct device *dev, size_t size,
354 			  void *pa, dma_addr_t dma_handle,
355 			  struct dma_attrs *attrs)
356 {
357 	struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
358 
359 	size = PAGE_ALIGN(size);
360 	atomic64_sub(size / PAGE_SIZE, &zdev->fmb->allocated_pages);
361 	s390_dma_unmap_pages(dev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
362 	free_pages((unsigned long) pa, get_order(size));
363 }
364 
365 static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
366 			   int nr_elements, enum dma_data_direction dir,
367 			   struct dma_attrs *attrs)
368 {
369 	int mapped_elements = 0;
370 	struct scatterlist *s;
371 	int i;
372 
373 	for_each_sg(sg, s, nr_elements, i) {
374 		struct page *page = sg_page(s);
375 		s->dma_address = s390_dma_map_pages(dev, page, s->offset,
376 						    s->length, dir, NULL);
377 		if (!dma_mapping_error(dev, s->dma_address)) {
378 			s->dma_length = s->length;
379 			mapped_elements++;
380 		} else
381 			goto unmap;
382 	}
383 out:
384 	return mapped_elements;
385 
386 unmap:
387 	for_each_sg(sg, s, mapped_elements, i) {
388 		if (s->dma_address)
389 			s390_dma_unmap_pages(dev, s->dma_address, s->dma_length,
390 					     dir, NULL);
391 		s->dma_address = 0;
392 		s->dma_length = 0;
393 	}
394 	mapped_elements = 0;
395 	goto out;
396 }
397 
398 static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
399 			      int nr_elements, enum dma_data_direction dir,
400 			      struct dma_attrs *attrs)
401 {
402 	struct scatterlist *s;
403 	int i;
404 
405 	for_each_sg(sg, s, nr_elements, i) {
406 		s390_dma_unmap_pages(dev, s->dma_address, s->dma_length, dir, NULL);
407 		s->dma_address = 0;
408 		s->dma_length = 0;
409 	}
410 }
411 
412 int zpci_dma_init_device(struct zpci_dev *zdev)
413 {
414 	int rc;
415 
416 	spin_lock_init(&zdev->iommu_bitmap_lock);
417 	spin_lock_init(&zdev->dma_table_lock);
418 
419 	zdev->dma_table = dma_alloc_cpu_table();
420 	if (!zdev->dma_table) {
421 		rc = -ENOMEM;
422 		goto out_clean;
423 	}
424 
425 	zdev->iommu_size = (unsigned long) high_memory - PAGE_OFFSET;
426 	zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT;
427 	zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8);
428 	if (!zdev->iommu_bitmap) {
429 		rc = -ENOMEM;
430 		goto out_reg;
431 	}
432 
433 	rc = zpci_register_ioat(zdev,
434 				0,
435 				zdev->start_dma + PAGE_OFFSET,
436 				zdev->start_dma + zdev->iommu_size - 1,
437 				(u64) zdev->dma_table);
438 	if (rc)
439 		goto out_reg;
440 	return 0;
441 
442 out_reg:
443 	dma_free_cpu_table(zdev->dma_table);
444 out_clean:
445 	return rc;
446 }
447 
448 void zpci_dma_exit_device(struct zpci_dev *zdev)
449 {
450 	zpci_unregister_ioat(zdev, 0);
451 	dma_cleanup_tables(zdev);
452 	vfree(zdev->iommu_bitmap);
453 	zdev->iommu_bitmap = NULL;
454 	zdev->next_bit = 0;
455 }
456 
457 static int __init dma_alloc_cpu_table_caches(void)
458 {
459 	dma_region_table_cache = kmem_cache_create("PCI_DMA_region_tables",
460 					ZPCI_TABLE_SIZE, ZPCI_TABLE_ALIGN,
461 					0, NULL);
462 	if (!dma_region_table_cache)
463 		return -ENOMEM;
464 
465 	dma_page_table_cache = kmem_cache_create("PCI_DMA_page_tables",
466 					ZPCI_PT_SIZE, ZPCI_PT_ALIGN,
467 					0, NULL);
468 	if (!dma_page_table_cache) {
469 		kmem_cache_destroy(dma_region_table_cache);
470 		return -ENOMEM;
471 	}
472 	return 0;
473 }
474 
475 int __init zpci_dma_init(void)
476 {
477 	return dma_alloc_cpu_table_caches();
478 }
479 
480 void zpci_dma_exit(void)
481 {
482 	kmem_cache_destroy(dma_page_table_cache);
483 	kmem_cache_destroy(dma_region_table_cache);
484 }
485 
486 #define PREALLOC_DMA_DEBUG_ENTRIES	(1 << 16)
487 
488 static int __init dma_debug_do_init(void)
489 {
490 	dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
491 	return 0;
492 }
493 fs_initcall(dma_debug_do_init);
494 
495 struct dma_map_ops s390_dma_ops = {
496 	.alloc		= s390_dma_alloc,
497 	.free		= s390_dma_free,
498 	.map_sg		= s390_dma_map_sg,
499 	.unmap_sg	= s390_dma_unmap_sg,
500 	.map_page	= s390_dma_map_pages,
501 	.unmap_page	= s390_dma_unmap_pages,
502 	/* if we support direct DMA this must be conditional */
503 	.is_phys	= 0,
504 	/* dma_supported is unconditionally true without a callback */
505 };
506 EXPORT_SYMBOL_GPL(s390_dma_ops);
507