xref: /openbmc/linux/arch/s390/pci/pci_dma.c (revision 9d749629)
1 /*
2  * Copyright IBM Corp. 2012
3  *
4  * Author(s):
5  *   Jan Glauber <jang@linux.vnet.ibm.com>
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/slab.h>
10 #include <linux/export.h>
11 #include <linux/iommu-helper.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/pci.h>
14 #include <asm/pci_dma.h>
15 
16 static struct kmem_cache *dma_region_table_cache;
17 static struct kmem_cache *dma_page_table_cache;
18 
19 static unsigned long *dma_alloc_cpu_table(void)
20 {
21 	unsigned long *table, *entry;
22 
23 	table = kmem_cache_alloc(dma_region_table_cache, GFP_ATOMIC);
24 	if (!table)
25 		return NULL;
26 
27 	for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++)
28 		*entry = ZPCI_TABLE_INVALID | ZPCI_TABLE_PROTECTED;
29 	return table;
30 }
31 
32 static void dma_free_cpu_table(void *table)
33 {
34 	kmem_cache_free(dma_region_table_cache, table);
35 }
36 
37 static unsigned long *dma_alloc_page_table(void)
38 {
39 	unsigned long *table, *entry;
40 
41 	table = kmem_cache_alloc(dma_page_table_cache, GFP_ATOMIC);
42 	if (!table)
43 		return NULL;
44 
45 	for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++)
46 		*entry = ZPCI_PTE_INVALID | ZPCI_TABLE_PROTECTED;
47 	return table;
48 }
49 
50 static void dma_free_page_table(void *table)
51 {
52 	kmem_cache_free(dma_page_table_cache, table);
53 }
54 
55 static unsigned long *dma_get_seg_table_origin(unsigned long *entry)
56 {
57 	unsigned long *sto;
58 
59 	if (reg_entry_isvalid(*entry))
60 		sto = get_rt_sto(*entry);
61 	else {
62 		sto = dma_alloc_cpu_table();
63 		if (!sto)
64 			return NULL;
65 
66 		set_rt_sto(entry, sto);
67 		validate_rt_entry(entry);
68 		entry_clr_protected(entry);
69 	}
70 	return sto;
71 }
72 
73 static unsigned long *dma_get_page_table_origin(unsigned long *entry)
74 {
75 	unsigned long *pto;
76 
77 	if (reg_entry_isvalid(*entry))
78 		pto = get_st_pto(*entry);
79 	else {
80 		pto = dma_alloc_page_table();
81 		if (!pto)
82 			return NULL;
83 		set_st_pto(entry, pto);
84 		validate_st_entry(entry);
85 		entry_clr_protected(entry);
86 	}
87 	return pto;
88 }
89 
90 static unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr)
91 {
92 	unsigned long *sto, *pto;
93 	unsigned int rtx, sx, px;
94 
95 	rtx = calc_rtx(dma_addr);
96 	sto = dma_get_seg_table_origin(&rto[rtx]);
97 	if (!sto)
98 		return NULL;
99 
100 	sx = calc_sx(dma_addr);
101 	pto = dma_get_page_table_origin(&sto[sx]);
102 	if (!pto)
103 		return NULL;
104 
105 	px = calc_px(dma_addr);
106 	return &pto[px];
107 }
108 
109 static void dma_update_cpu_trans(struct zpci_dev *zdev, void *page_addr,
110 				 dma_addr_t dma_addr, int flags)
111 {
112 	unsigned long *entry;
113 
114 	entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
115 	if (!entry) {
116 		WARN_ON_ONCE(1);
117 		return;
118 	}
119 
120 	if (flags & ZPCI_PTE_INVALID) {
121 		invalidate_pt_entry(entry);
122 		return;
123 	} else {
124 		set_pt_pfaa(entry, page_addr);
125 		validate_pt_entry(entry);
126 	}
127 
128 	if (flags & ZPCI_TABLE_PROTECTED)
129 		entry_set_protected(entry);
130 	else
131 		entry_clr_protected(entry);
132 }
133 
134 static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
135 			    dma_addr_t dma_addr, size_t size, int flags)
136 {
137 	unsigned int nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
138 	u8 *page_addr = (u8 *) (pa & PAGE_MASK);
139 	dma_addr_t start_dma_addr = dma_addr;
140 	unsigned long irq_flags;
141 	int i, rc = 0;
142 
143 	if (!nr_pages)
144 		return -EINVAL;
145 
146 	spin_lock_irqsave(&zdev->dma_table_lock, irq_flags);
147 	if (!zdev->dma_table) {
148 		dev_err(&zdev->pdev->dev, "Missing DMA table\n");
149 		goto no_refresh;
150 	}
151 
152 	for (i = 0; i < nr_pages; i++) {
153 		dma_update_cpu_trans(zdev, page_addr, dma_addr, flags);
154 		page_addr += PAGE_SIZE;
155 		dma_addr += PAGE_SIZE;
156 	}
157 
158 	/*
159 	 * rpcit is not required to establish new translations when previously
160 	 * invalid translation-table entries are validated, however it is
161 	 * required when altering previously valid entries.
162 	 */
163 	if (!zdev->tlb_refresh &&
164 	    ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID))
165 		/*
166 		 * TODO: also need to check that the old entry is indeed INVALID
167 		 * and not only for one page but for the whole range...
168 		 * -> now we WARN_ON in that case but with lazy unmap that
169 		 * needs to be redone!
170 		 */
171 		goto no_refresh;
172 	rc = rpcit_instr((u64) zdev->fh << 32, start_dma_addr,
173 			  nr_pages * PAGE_SIZE);
174 
175 no_refresh:
176 	spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags);
177 	return rc;
178 }
179 
180 static void dma_free_seg_table(unsigned long entry)
181 {
182 	unsigned long *sto = get_rt_sto(entry);
183 	int sx;
184 
185 	for (sx = 0; sx < ZPCI_TABLE_ENTRIES; sx++)
186 		if (reg_entry_isvalid(sto[sx]))
187 			dma_free_page_table(get_st_pto(sto[sx]));
188 
189 	dma_free_cpu_table(sto);
190 }
191 
192 static void dma_cleanup_tables(struct zpci_dev *zdev)
193 {
194 	unsigned long *table;
195 	int rtx;
196 
197 	if (!zdev || !zdev->dma_table)
198 		return;
199 
200 	table = zdev->dma_table;
201 	for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++)
202 		if (reg_entry_isvalid(table[rtx]))
203 			dma_free_seg_table(table[rtx]);
204 
205 	dma_free_cpu_table(table);
206 	zdev->dma_table = NULL;
207 }
208 
209 static unsigned long __dma_alloc_iommu(struct zpci_dev *zdev, unsigned long start,
210 				   int size)
211 {
212 	unsigned long boundary_size = 0x1000000;
213 
214 	return iommu_area_alloc(zdev->iommu_bitmap, zdev->iommu_pages,
215 				start, size, 0, boundary_size, 0);
216 }
217 
218 static unsigned long dma_alloc_iommu(struct zpci_dev *zdev, int size)
219 {
220 	unsigned long offset, flags;
221 
222 	spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
223 	offset = __dma_alloc_iommu(zdev, zdev->next_bit, size);
224 	if (offset == -1)
225 		offset = __dma_alloc_iommu(zdev, 0, size);
226 
227 	if (offset != -1) {
228 		zdev->next_bit = offset + size;
229 		if (zdev->next_bit >= zdev->iommu_pages)
230 			zdev->next_bit = 0;
231 	}
232 	spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
233 	return offset;
234 }
235 
236 static void dma_free_iommu(struct zpci_dev *zdev, unsigned long offset, int size)
237 {
238 	unsigned long flags;
239 
240 	spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
241 	if (!zdev->iommu_bitmap)
242 		goto out;
243 	bitmap_clear(zdev->iommu_bitmap, offset, size);
244 	if (offset >= zdev->next_bit)
245 		zdev->next_bit = offset + size;
246 out:
247 	spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
248 }
249 
250 int dma_set_mask(struct device *dev, u64 mask)
251 {
252 	if (!dev->dma_mask || !dma_supported(dev, mask))
253 		return -EIO;
254 
255 	*dev->dma_mask = mask;
256 	return 0;
257 }
258 EXPORT_SYMBOL_GPL(dma_set_mask);
259 
260 static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
261 				     unsigned long offset, size_t size,
262 				     enum dma_data_direction direction,
263 				     struct dma_attrs *attrs)
264 {
265 	struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev));
266 	unsigned long nr_pages, iommu_page_index;
267 	unsigned long pa = page_to_phys(page) + offset;
268 	int flags = ZPCI_PTE_VALID;
269 	dma_addr_t dma_addr;
270 
271 	WARN_ON_ONCE(offset > PAGE_SIZE);
272 
273 	/* This rounds up number of pages based on size and offset */
274 	nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
275 	iommu_page_index = dma_alloc_iommu(zdev, nr_pages);
276 	if (iommu_page_index == -1)
277 		goto out_err;
278 
279 	/* Use rounded up size */
280 	size = nr_pages * PAGE_SIZE;
281 
282 	dma_addr = zdev->start_dma + iommu_page_index * PAGE_SIZE;
283 	if (dma_addr + size > zdev->end_dma) {
284 		dev_err(dev, "(dma_addr: 0x%16.16LX + size: 0x%16.16lx) > end_dma: 0x%16.16Lx\n",
285 			 dma_addr, size, zdev->end_dma);
286 		goto out_free;
287 	}
288 
289 	if (direction == DMA_NONE || direction == DMA_TO_DEVICE)
290 		flags |= ZPCI_TABLE_PROTECTED;
291 
292 	if (!dma_update_trans(zdev, pa, dma_addr, size, flags)) {
293 		atomic64_add(nr_pages, (atomic64_t *) &zdev->fmb->mapped_pages);
294 		return dma_addr + offset;
295 	}
296 
297 out_free:
298 	dma_free_iommu(zdev, iommu_page_index, nr_pages);
299 out_err:
300 	dev_err(dev, "Failed to map addr: %lx\n", pa);
301 	return DMA_ERROR_CODE;
302 }
303 
304 static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
305 				 size_t size, enum dma_data_direction direction,
306 				 struct dma_attrs *attrs)
307 {
308 	struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev));
309 	unsigned long iommu_page_index;
310 	int npages;
311 
312 	npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
313 	dma_addr = dma_addr & PAGE_MASK;
314 	if (dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE,
315 			     ZPCI_TABLE_PROTECTED | ZPCI_PTE_INVALID))
316 		dev_err(dev, "Failed to unmap addr: %Lx\n", dma_addr);
317 
318 	atomic64_add(npages, (atomic64_t *) &zdev->fmb->unmapped_pages);
319 	iommu_page_index = (dma_addr - zdev->start_dma) >> PAGE_SHIFT;
320 	dma_free_iommu(zdev, iommu_page_index, npages);
321 }
322 
323 static void *s390_dma_alloc(struct device *dev, size_t size,
324 			    dma_addr_t *dma_handle, gfp_t flag,
325 			    struct dma_attrs *attrs)
326 {
327 	struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev));
328 	struct page *page;
329 	unsigned long pa;
330 	dma_addr_t map;
331 
332 	size = PAGE_ALIGN(size);
333 	page = alloc_pages(flag, get_order(size));
334 	if (!page)
335 		return NULL;
336 
337 	atomic64_add(size / PAGE_SIZE, (atomic64_t *) &zdev->fmb->allocated_pages);
338 	pa = page_to_phys(page);
339 	memset((void *) pa, 0, size);
340 
341 	map = s390_dma_map_pages(dev, page, pa % PAGE_SIZE,
342 				 size, DMA_BIDIRECTIONAL, NULL);
343 	if (dma_mapping_error(dev, map)) {
344 		free_pages(pa, get_order(size));
345 		return NULL;
346 	}
347 
348 	if (dma_handle)
349 		*dma_handle = map;
350 	return (void *) pa;
351 }
352 
353 static void s390_dma_free(struct device *dev, size_t size,
354 			  void *pa, dma_addr_t dma_handle,
355 			  struct dma_attrs *attrs)
356 {
357 	s390_dma_unmap_pages(dev, dma_handle, PAGE_ALIGN(size),
358 			     DMA_BIDIRECTIONAL, NULL);
359 	free_pages((unsigned long) pa, get_order(size));
360 }
361 
362 static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
363 			   int nr_elements, enum dma_data_direction dir,
364 			   struct dma_attrs *attrs)
365 {
366 	int mapped_elements = 0;
367 	struct scatterlist *s;
368 	int i;
369 
370 	for_each_sg(sg, s, nr_elements, i) {
371 		struct page *page = sg_page(s);
372 		s->dma_address = s390_dma_map_pages(dev, page, s->offset,
373 						    s->length, dir, NULL);
374 		if (!dma_mapping_error(dev, s->dma_address)) {
375 			s->dma_length = s->length;
376 			mapped_elements++;
377 		} else
378 			goto unmap;
379 	}
380 out:
381 	return mapped_elements;
382 
383 unmap:
384 	for_each_sg(sg, s, mapped_elements, i) {
385 		if (s->dma_address)
386 			s390_dma_unmap_pages(dev, s->dma_address, s->dma_length,
387 					     dir, NULL);
388 		s->dma_address = 0;
389 		s->dma_length = 0;
390 	}
391 	mapped_elements = 0;
392 	goto out;
393 }
394 
395 static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
396 			      int nr_elements, enum dma_data_direction dir,
397 			      struct dma_attrs *attrs)
398 {
399 	struct scatterlist *s;
400 	int i;
401 
402 	for_each_sg(sg, s, nr_elements, i) {
403 		s390_dma_unmap_pages(dev, s->dma_address, s->dma_length, dir, NULL);
404 		s->dma_address = 0;
405 		s->dma_length = 0;
406 	}
407 }
408 
409 int zpci_dma_init_device(struct zpci_dev *zdev)
410 {
411 	unsigned int bitmap_order;
412 	int rc;
413 
414 	spin_lock_init(&zdev->iommu_bitmap_lock);
415 	spin_lock_init(&zdev->dma_table_lock);
416 
417 	zdev->dma_table = dma_alloc_cpu_table();
418 	if (!zdev->dma_table) {
419 		rc = -ENOMEM;
420 		goto out_clean;
421 	}
422 
423 	zdev->iommu_size = (unsigned long) high_memory - PAGE_OFFSET;
424 	zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT;
425 	bitmap_order = get_order(zdev->iommu_pages / 8);
426 	pr_info("iommu_size: 0x%lx  iommu_pages: 0x%lx  bitmap_order: %i\n",
427 		 zdev->iommu_size, zdev->iommu_pages, bitmap_order);
428 
429 	zdev->iommu_bitmap = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
430 						       bitmap_order);
431 	if (!zdev->iommu_bitmap) {
432 		rc = -ENOMEM;
433 		goto out_reg;
434 	}
435 
436 	rc = zpci_register_ioat(zdev,
437 				0,
438 				zdev->start_dma + PAGE_OFFSET,
439 				zdev->start_dma + zdev->iommu_size - 1,
440 				(u64) zdev->dma_table);
441 	if (rc)
442 		goto out_reg;
443 	return 0;
444 
445 out_reg:
446 	dma_free_cpu_table(zdev->dma_table);
447 out_clean:
448 	return rc;
449 }
450 
451 void zpci_dma_exit_device(struct zpci_dev *zdev)
452 {
453 	zpci_unregister_ioat(zdev, 0);
454 	dma_cleanup_tables(zdev);
455 	free_pages((unsigned long) zdev->iommu_bitmap,
456 		   get_order(zdev->iommu_pages / 8));
457 	zdev->iommu_bitmap = NULL;
458 	zdev->next_bit = 0;
459 }
460 
461 static int __init dma_alloc_cpu_table_caches(void)
462 {
463 	dma_region_table_cache = kmem_cache_create("PCI_DMA_region_tables",
464 					ZPCI_TABLE_SIZE, ZPCI_TABLE_ALIGN,
465 					0, NULL);
466 	if (!dma_region_table_cache)
467 		return -ENOMEM;
468 
469 	dma_page_table_cache = kmem_cache_create("PCI_DMA_page_tables",
470 					ZPCI_PT_SIZE, ZPCI_PT_ALIGN,
471 					0, NULL);
472 	if (!dma_page_table_cache) {
473 		kmem_cache_destroy(dma_region_table_cache);
474 		return -ENOMEM;
475 	}
476 	return 0;
477 }
478 
479 int __init zpci_dma_init(void)
480 {
481 	return dma_alloc_cpu_table_caches();
482 }
483 
484 void zpci_dma_exit(void)
485 {
486 	kmem_cache_destroy(dma_page_table_cache);
487 	kmem_cache_destroy(dma_region_table_cache);
488 }
489 
490 #define PREALLOC_DMA_DEBUG_ENTRIES	(1 << 16)
491 
492 static int __init dma_debug_do_init(void)
493 {
494 	dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
495 	return 0;
496 }
497 fs_initcall(dma_debug_do_init);
498 
499 struct dma_map_ops s390_dma_ops = {
500 	.alloc		= s390_dma_alloc,
501 	.free		= s390_dma_free,
502 	.map_sg		= s390_dma_map_sg,
503 	.unmap_sg	= s390_dma_unmap_sg,
504 	.map_page	= s390_dma_map_pages,
505 	.unmap_page	= s390_dma_unmap_pages,
506 	/* if we support direct DMA this must be conditional */
507 	.is_phys	= 0,
508 	/* dma_supported is unconditionally true without a callback */
509 };
510 EXPORT_SYMBOL_GPL(s390_dma_ops);
511