xref: /openbmc/linux/arch/parisc/kernel/pci-dma.c (revision b85d4594)
1 /*
2 ** PARISC 1.1 Dynamic DMA mapping support.
3 ** This implementation is for PA-RISC platforms that do not support
4 ** I/O TLBs (aka DMA address translation hardware).
5 ** See Documentation/DMA-API-HOWTO.txt for interface definitions.
6 **
7 **      (c) Copyright 1999,2000 Hewlett-Packard Company
8 **      (c) Copyright 2000 Grant Grundler
9 **	(c) Copyright 2000 Philipp Rumpf <prumpf@tux.org>
10 **      (c) Copyright 2000 John Marvin
11 **
12 ** "leveraged" from 2.3.47: arch/ia64/kernel/pci-dma.c.
13 ** (I assume it's from David Mosberger-Tang but there was no Copyright)
14 **
15 ** AFAIK, all PA7100LC and PA7300LC platforms can use this code.
16 **
17 ** - ggg
18 */
19 
20 #include <linux/init.h>
21 #include <linux/gfp.h>
22 #include <linux/mm.h>
23 #include <linux/pci.h>
24 #include <linux/proc_fs.h>
25 #include <linux/seq_file.h>
26 #include <linux/string.h>
27 #include <linux/types.h>
28 #include <linux/scatterlist.h>
29 #include <linux/export.h>
30 
31 #include <asm/cacheflush.h>
32 #include <asm/dma.h>    /* for DMA_CHUNK_SIZE */
33 #include <asm/io.h>
34 #include <asm/page.h>	/* get_order */
35 #include <asm/pgalloc.h>
36 #include <asm/uaccess.h>
37 #include <asm/tlbflush.h>	/* for purge_tlb_*() macros */
38 
39 static struct proc_dir_entry * proc_gsc_root __read_mostly = NULL;
40 static unsigned long pcxl_used_bytes __read_mostly = 0;
41 static unsigned long pcxl_used_pages __read_mostly = 0;
42 
43 extern unsigned long pcxl_dma_start; /* Start of pcxl dma mapping area */
44 static spinlock_t   pcxl_res_lock;
45 static char    *pcxl_res_map;
46 static int     pcxl_res_hint;
47 static int     pcxl_res_size;
48 
49 #ifdef DEBUG_PCXL_RESOURCE
50 #define DBG_RES(x...)	printk(x)
51 #else
52 #define DBG_RES(x...)
53 #endif
54 
55 
56 /*
57 ** Dump a hex representation of the resource map.
58 */
59 
60 #ifdef DUMP_RESMAP
61 static
62 void dump_resmap(void)
63 {
64 	u_long *res_ptr = (unsigned long *)pcxl_res_map;
65 	u_long i = 0;
66 
67 	printk("res_map: ");
68 	for(; i < (pcxl_res_size / sizeof(unsigned long)); ++i, ++res_ptr)
69 		printk("%08lx ", *res_ptr);
70 
71 	printk("\n");
72 }
73 #else
74 static inline void dump_resmap(void) {;}
75 #endif
76 
77 static int pa11_dma_supported( struct device *dev, u64 mask)
78 {
79 	return 1;
80 }
81 
82 static inline int map_pte_uncached(pte_t * pte,
83 		unsigned long vaddr,
84 		unsigned long size, unsigned long *paddr_ptr)
85 {
86 	unsigned long end;
87 	unsigned long orig_vaddr = vaddr;
88 
89 	vaddr &= ~PMD_MASK;
90 	end = vaddr + size;
91 	if (end > PMD_SIZE)
92 		end = PMD_SIZE;
93 	do {
94 		unsigned long flags;
95 
96 		if (!pte_none(*pte))
97 			printk(KERN_ERR "map_pte_uncached: page already exists\n");
98 		set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC));
99 		purge_tlb_start(flags);
100 		pdtlb_kernel(orig_vaddr);
101 		purge_tlb_end(flags);
102 		vaddr += PAGE_SIZE;
103 		orig_vaddr += PAGE_SIZE;
104 		(*paddr_ptr) += PAGE_SIZE;
105 		pte++;
106 	} while (vaddr < end);
107 	return 0;
108 }
109 
110 static inline int map_pmd_uncached(pmd_t * pmd, unsigned long vaddr,
111 		unsigned long size, unsigned long *paddr_ptr)
112 {
113 	unsigned long end;
114 	unsigned long orig_vaddr = vaddr;
115 
116 	vaddr &= ~PGDIR_MASK;
117 	end = vaddr + size;
118 	if (end > PGDIR_SIZE)
119 		end = PGDIR_SIZE;
120 	do {
121 		pte_t * pte = pte_alloc_kernel(pmd, vaddr);
122 		if (!pte)
123 			return -ENOMEM;
124 		if (map_pte_uncached(pte, orig_vaddr, end - vaddr, paddr_ptr))
125 			return -ENOMEM;
126 		vaddr = (vaddr + PMD_SIZE) & PMD_MASK;
127 		orig_vaddr += PMD_SIZE;
128 		pmd++;
129 	} while (vaddr < end);
130 	return 0;
131 }
132 
133 static inline int map_uncached_pages(unsigned long vaddr, unsigned long size,
134 		unsigned long paddr)
135 {
136 	pgd_t * dir;
137 	unsigned long end = vaddr + size;
138 
139 	dir = pgd_offset_k(vaddr);
140 	do {
141 		pmd_t *pmd;
142 
143 		pmd = pmd_alloc(NULL, dir, vaddr);
144 		if (!pmd)
145 			return -ENOMEM;
146 		if (map_pmd_uncached(pmd, vaddr, end - vaddr, &paddr))
147 			return -ENOMEM;
148 		vaddr = vaddr + PGDIR_SIZE;
149 		dir++;
150 	} while (vaddr && (vaddr < end));
151 	return 0;
152 }
153 
154 static inline void unmap_uncached_pte(pmd_t * pmd, unsigned long vaddr,
155 		unsigned long size)
156 {
157 	pte_t * pte;
158 	unsigned long end;
159 	unsigned long orig_vaddr = vaddr;
160 
161 	if (pmd_none(*pmd))
162 		return;
163 	if (pmd_bad(*pmd)) {
164 		pmd_ERROR(*pmd);
165 		pmd_clear(pmd);
166 		return;
167 	}
168 	pte = pte_offset_map(pmd, vaddr);
169 	vaddr &= ~PMD_MASK;
170 	end = vaddr + size;
171 	if (end > PMD_SIZE)
172 		end = PMD_SIZE;
173 	do {
174 		unsigned long flags;
175 		pte_t page = *pte;
176 
177 		pte_clear(&init_mm, vaddr, pte);
178 		purge_tlb_start(flags);
179 		pdtlb_kernel(orig_vaddr);
180 		purge_tlb_end(flags);
181 		vaddr += PAGE_SIZE;
182 		orig_vaddr += PAGE_SIZE;
183 		pte++;
184 		if (pte_none(page) || pte_present(page))
185 			continue;
186 		printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n");
187 	} while (vaddr < end);
188 }
189 
190 static inline void unmap_uncached_pmd(pgd_t * dir, unsigned long vaddr,
191 		unsigned long size)
192 {
193 	pmd_t * pmd;
194 	unsigned long end;
195 	unsigned long orig_vaddr = vaddr;
196 
197 	if (pgd_none(*dir))
198 		return;
199 	if (pgd_bad(*dir)) {
200 		pgd_ERROR(*dir);
201 		pgd_clear(dir);
202 		return;
203 	}
204 	pmd = pmd_offset(dir, vaddr);
205 	vaddr &= ~PGDIR_MASK;
206 	end = vaddr + size;
207 	if (end > PGDIR_SIZE)
208 		end = PGDIR_SIZE;
209 	do {
210 		unmap_uncached_pte(pmd, orig_vaddr, end - vaddr);
211 		vaddr = (vaddr + PMD_SIZE) & PMD_MASK;
212 		orig_vaddr += PMD_SIZE;
213 		pmd++;
214 	} while (vaddr < end);
215 }
216 
217 static void unmap_uncached_pages(unsigned long vaddr, unsigned long size)
218 {
219 	pgd_t * dir;
220 	unsigned long end = vaddr + size;
221 
222 	dir = pgd_offset_k(vaddr);
223 	do {
224 		unmap_uncached_pmd(dir, vaddr, end - vaddr);
225 		vaddr = vaddr + PGDIR_SIZE;
226 		dir++;
227 	} while (vaddr && (vaddr < end));
228 }
229 
230 #define PCXL_SEARCH_LOOP(idx, mask, size)  \
231        for(; res_ptr < res_end; ++res_ptr) \
232        { \
233                if(0 == ((*res_ptr) & mask)) { \
234                        *res_ptr |= mask; \
235 		       idx = (int)((u_long)res_ptr - (u_long)pcxl_res_map); \
236 		       pcxl_res_hint = idx + (size >> 3); \
237                        goto resource_found; \
238                } \
239        }
240 
241 #define PCXL_FIND_FREE_MAPPING(idx, mask, size)  { \
242        u##size *res_ptr = (u##size *)&(pcxl_res_map[pcxl_res_hint & ~((size >> 3) - 1)]); \
243        u##size *res_end = (u##size *)&pcxl_res_map[pcxl_res_size]; \
244        PCXL_SEARCH_LOOP(idx, mask, size); \
245        res_ptr = (u##size *)&pcxl_res_map[0]; \
246        PCXL_SEARCH_LOOP(idx, mask, size); \
247 }
248 
249 unsigned long
250 pcxl_alloc_range(size_t size)
251 {
252 	int res_idx;
253 	u_long mask, flags;
254 	unsigned int pages_needed = size >> PAGE_SHIFT;
255 
256 	mask = (u_long) -1L;
257  	mask >>= BITS_PER_LONG - pages_needed;
258 
259 	DBG_RES("pcxl_alloc_range() size: %d pages_needed %d pages_mask 0x%08lx\n",
260 		size, pages_needed, mask);
261 
262 	spin_lock_irqsave(&pcxl_res_lock, flags);
263 
264 	if(pages_needed <= 8) {
265 		PCXL_FIND_FREE_MAPPING(res_idx, mask, 8);
266 	} else if(pages_needed <= 16) {
267 		PCXL_FIND_FREE_MAPPING(res_idx, mask, 16);
268 	} else if(pages_needed <= 32) {
269 		PCXL_FIND_FREE_MAPPING(res_idx, mask, 32);
270 	} else {
271 		panic("%s: pcxl_alloc_range() Too many pages to map.\n",
272 		      __FILE__);
273 	}
274 
275 	dump_resmap();
276 	panic("%s: pcxl_alloc_range() out of dma mapping resources\n",
277 	      __FILE__);
278 
279 resource_found:
280 
281 	DBG_RES("pcxl_alloc_range() res_idx %d mask 0x%08lx res_hint: %d\n",
282 		res_idx, mask, pcxl_res_hint);
283 
284 	pcxl_used_pages += pages_needed;
285 	pcxl_used_bytes += ((pages_needed >> 3) ? (pages_needed >> 3) : 1);
286 
287 	spin_unlock_irqrestore(&pcxl_res_lock, flags);
288 
289 	dump_resmap();
290 
291 	/*
292 	** return the corresponding vaddr in the pcxl dma map
293 	*/
294 	return (pcxl_dma_start + (res_idx << (PAGE_SHIFT + 3)));
295 }
296 
297 #define PCXL_FREE_MAPPINGS(idx, m, size) \
298 		u##size *res_ptr = (u##size *)&(pcxl_res_map[(idx) + (((size >> 3) - 1) & (~((size >> 3) - 1)))]); \
299 		/* BUG_ON((*res_ptr & m) != m); */ \
300 		*res_ptr &= ~m;
301 
302 /*
303 ** clear bits in the pcxl resource map
304 */
305 static void
306 pcxl_free_range(unsigned long vaddr, size_t size)
307 {
308 	u_long mask, flags;
309 	unsigned int res_idx = (vaddr - pcxl_dma_start) >> (PAGE_SHIFT + 3);
310 	unsigned int pages_mapped = size >> PAGE_SHIFT;
311 
312 	mask = (u_long) -1L;
313  	mask >>= BITS_PER_LONG - pages_mapped;
314 
315 	DBG_RES("pcxl_free_range() res_idx: %d size: %d pages_mapped %d mask 0x%08lx\n",
316 		res_idx, size, pages_mapped, mask);
317 
318 	spin_lock_irqsave(&pcxl_res_lock, flags);
319 
320 	if(pages_mapped <= 8) {
321 		PCXL_FREE_MAPPINGS(res_idx, mask, 8);
322 	} else if(pages_mapped <= 16) {
323 		PCXL_FREE_MAPPINGS(res_idx, mask, 16);
324 	} else if(pages_mapped <= 32) {
325 		PCXL_FREE_MAPPINGS(res_idx, mask, 32);
326 	} else {
327 		panic("%s: pcxl_free_range() Too many pages to unmap.\n",
328 		      __FILE__);
329 	}
330 
331 	pcxl_used_pages -= (pages_mapped ? pages_mapped : 1);
332 	pcxl_used_bytes -= ((pages_mapped >> 3) ? (pages_mapped >> 3) : 1);
333 
334 	spin_unlock_irqrestore(&pcxl_res_lock, flags);
335 
336 	dump_resmap();
337 }
338 
339 static int proc_pcxl_dma_show(struct seq_file *m, void *v)
340 {
341 #if 0
342 	u_long i = 0;
343 	unsigned long *res_ptr = (u_long *)pcxl_res_map;
344 #endif
345 	unsigned long total_pages = pcxl_res_size << 3;   /* 8 bits per byte */
346 
347 	seq_printf(m, "\nDMA Mapping Area size    : %d bytes (%ld pages)\n",
348 		PCXL_DMA_MAP_SIZE, total_pages);
349 
350 	seq_printf(m, "Resource bitmap : %d bytes\n", pcxl_res_size);
351 
352 	seq_puts(m,  "     	  total:    free:    used:   % used:\n");
353 	seq_printf(m, "blocks  %8d %8ld %8ld %8ld%%\n", pcxl_res_size,
354 		pcxl_res_size - pcxl_used_bytes, pcxl_used_bytes,
355 		(pcxl_used_bytes * 100) / pcxl_res_size);
356 
357 	seq_printf(m, "pages   %8ld %8ld %8ld %8ld%%\n", total_pages,
358 		total_pages - pcxl_used_pages, pcxl_used_pages,
359 		(pcxl_used_pages * 100 / total_pages));
360 
361 #if 0
362 	seq_puts(m, "\nResource bitmap:");
363 
364 	for(; i < (pcxl_res_size / sizeof(u_long)); ++i, ++res_ptr) {
365 		if ((i & 7) == 0)
366 		    seq_puts(m,"\n   ");
367 		seq_printf(m, "%s %08lx", buf, *res_ptr);
368 	}
369 #endif
370 	seq_putc(m, '\n');
371 	return 0;
372 }
373 
374 static int proc_pcxl_dma_open(struct inode *inode, struct file *file)
375 {
376 	return single_open(file, proc_pcxl_dma_show, NULL);
377 }
378 
379 static const struct file_operations proc_pcxl_dma_ops = {
380 	.owner		= THIS_MODULE,
381 	.open		= proc_pcxl_dma_open,
382 	.read		= seq_read,
383 	.llseek		= seq_lseek,
384 	.release	= single_release,
385 };
386 
387 static int __init
388 pcxl_dma_init(void)
389 {
390 	if (pcxl_dma_start == 0)
391 		return 0;
392 
393 	spin_lock_init(&pcxl_res_lock);
394 	pcxl_res_size = PCXL_DMA_MAP_SIZE >> (PAGE_SHIFT + 3);
395 	pcxl_res_hint = 0;
396 	pcxl_res_map = (char *)__get_free_pages(GFP_KERNEL,
397 					    get_order(pcxl_res_size));
398 	memset(pcxl_res_map, 0, pcxl_res_size);
399 	proc_gsc_root = proc_mkdir("gsc", NULL);
400 	if (!proc_gsc_root)
401     		printk(KERN_WARNING
402 			"pcxl_dma_init: Unable to create gsc /proc dir entry\n");
403 	else {
404 		struct proc_dir_entry* ent;
405 		ent = proc_create("pcxl_dma", 0, proc_gsc_root,
406 				  &proc_pcxl_dma_ops);
407 		if (!ent)
408 			printk(KERN_WARNING
409 				"pci-dma.c: Unable to create pcxl_dma /proc entry.\n");
410 	}
411 	return 0;
412 }
413 
414 __initcall(pcxl_dma_init);
415 
416 static void * pa11_dma_alloc_consistent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag)
417 {
418 	unsigned long vaddr;
419 	unsigned long paddr;
420 	int order;
421 
422 	order = get_order(size);
423 	size = 1 << (order + PAGE_SHIFT);
424 	vaddr = pcxl_alloc_range(size);
425 	paddr = __get_free_pages(flag, order);
426 	flush_kernel_dcache_range(paddr, size);
427 	paddr = __pa(paddr);
428 	map_uncached_pages(vaddr, size, paddr);
429 	*dma_handle = (dma_addr_t) paddr;
430 
431 #if 0
432 /* This probably isn't needed to support EISA cards.
433 ** ISA cards will certainly only support 24-bit DMA addressing.
434 ** Not clear if we can, want, or need to support ISA.
435 */
436 	if (!dev || *dev->coherent_dma_mask < 0xffffffff)
437 		gfp |= GFP_DMA;
438 #endif
439 	return (void *)vaddr;
440 }
441 
442 static void pa11_dma_free_consistent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle)
443 {
444 	int order;
445 
446 	order = get_order(size);
447 	size = 1 << (order + PAGE_SHIFT);
448 	unmap_uncached_pages((unsigned long)vaddr, size);
449 	pcxl_free_range((unsigned long)vaddr, size);
450 	free_pages((unsigned long)__va(dma_handle), order);
451 }
452 
453 static dma_addr_t pa11_dma_map_single(struct device *dev, void *addr, size_t size, enum dma_data_direction direction)
454 {
455 	BUG_ON(direction == DMA_NONE);
456 
457 	flush_kernel_dcache_range((unsigned long) addr, size);
458 	return virt_to_phys(addr);
459 }
460 
461 static void pa11_dma_unmap_single(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
462 {
463 	BUG_ON(direction == DMA_NONE);
464 
465 	if (direction == DMA_TO_DEVICE)
466 	    return;
467 
468 	/*
469 	 * For PCI_DMA_FROMDEVICE this flush is not necessary for the
470 	 * simple map/unmap case. However, it IS necessary if if
471 	 * pci_dma_sync_single_* has been called and the buffer reused.
472 	 */
473 
474 	flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle), size);
475 	return;
476 }
477 
478 static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
479 {
480 	int i;
481 	struct scatterlist *sg;
482 
483 	BUG_ON(direction == DMA_NONE);
484 
485 	for_each_sg(sglist, sg, nents, i) {
486 		unsigned long vaddr = (unsigned long)sg_virt(sg);
487 
488 		sg_dma_address(sg) = (dma_addr_t) virt_to_phys(vaddr);
489 		sg_dma_len(sg) = sg->length;
490 		flush_kernel_dcache_range(vaddr, sg->length);
491 	}
492 	return nents;
493 }
494 
495 static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
496 {
497 	int i;
498 	struct scatterlist *sg;
499 
500 	BUG_ON(direction == DMA_NONE);
501 
502 	if (direction == DMA_TO_DEVICE)
503 	    return;
504 
505 	/* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
506 
507 	for_each_sg(sglist, sg, nents, i)
508 		flush_kernel_vmap_range(sg_virt(sg), sg->length);
509 	return;
510 }
511 
512 static void pa11_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction)
513 {
514 	BUG_ON(direction == DMA_NONE);
515 
516 	flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle) + offset, size);
517 }
518 
519 static void pa11_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction)
520 {
521 	BUG_ON(direction == DMA_NONE);
522 
523 	flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle) + offset, size);
524 }
525 
526 static void pa11_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
527 {
528 	int i;
529 	struct scatterlist *sg;
530 
531 	/* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
532 
533 	for_each_sg(sglist, sg, nents, i)
534 		flush_kernel_vmap_range(sg_virt(sg), sg->length);
535 }
536 
537 static void pa11_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
538 {
539 	int i;
540 	struct scatterlist *sg;
541 
542 	/* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
543 
544 	for_each_sg(sglist, sg, nents, i)
545 		flush_kernel_vmap_range(sg_virt(sg), sg->length);
546 }
547 
548 struct hppa_dma_ops pcxl_dma_ops = {
549 	.dma_supported =	pa11_dma_supported,
550 	.alloc_consistent =	pa11_dma_alloc_consistent,
551 	.alloc_noncoherent =	pa11_dma_alloc_consistent,
552 	.free_consistent =	pa11_dma_free_consistent,
553 	.map_single =		pa11_dma_map_single,
554 	.unmap_single =		pa11_dma_unmap_single,
555 	.map_sg =		pa11_dma_map_sg,
556 	.unmap_sg =		pa11_dma_unmap_sg,
557 	.dma_sync_single_for_cpu = pa11_dma_sync_single_for_cpu,
558 	.dma_sync_single_for_device = pa11_dma_sync_single_for_device,
559 	.dma_sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu,
560 	.dma_sync_sg_for_device = pa11_dma_sync_sg_for_device,
561 };
562 
563 static void *fail_alloc_consistent(struct device *dev, size_t size,
564 				   dma_addr_t *dma_handle, gfp_t flag)
565 {
566 	return NULL;
567 }
568 
569 static void *pa11_dma_alloc_noncoherent(struct device *dev, size_t size,
570 					  dma_addr_t *dma_handle, gfp_t flag)
571 {
572 	void *addr;
573 
574 	addr = (void *)__get_free_pages(flag, get_order(size));
575 	if (addr)
576 		*dma_handle = (dma_addr_t)virt_to_phys(addr);
577 
578 	return addr;
579 }
580 
581 static void pa11_dma_free_noncoherent(struct device *dev, size_t size,
582 					void *vaddr, dma_addr_t iova)
583 {
584 	free_pages((unsigned long)vaddr, get_order(size));
585 	return;
586 }
587 
588 struct hppa_dma_ops pcx_dma_ops = {
589 	.dma_supported =	pa11_dma_supported,
590 	.alloc_consistent =	fail_alloc_consistent,
591 	.alloc_noncoherent =	pa11_dma_alloc_noncoherent,
592 	.free_consistent =	pa11_dma_free_noncoherent,
593 	.map_single =		pa11_dma_map_single,
594 	.unmap_single =		pa11_dma_unmap_single,
595 	.map_sg =		pa11_dma_map_sg,
596 	.unmap_sg =		pa11_dma_unmap_sg,
597 	.dma_sync_single_for_cpu =	pa11_dma_sync_single_for_cpu,
598 	.dma_sync_single_for_device =	pa11_dma_sync_single_for_device,
599 	.dma_sync_sg_for_cpu =		pa11_dma_sync_sg_for_cpu,
600 	.dma_sync_sg_for_device =	pa11_dma_sync_sg_for_device,
601 };
602