xref: /openbmc/linux/sound/pci/emu10k1/memory.c (revision 22246614)
1 /*
2  *  Copyright (c) by Jaroslav Kysela <perex@perex.cz>
3  *  Copyright (c) by Takashi Iwai <tiwai@suse.de>
4  *
5  *  EMU10K1 memory page allocation (PTB area)
6  *
7  *
8  *   This program is free software; you can redistribute it and/or modify
9  *   it under the terms of the GNU General Public License as published by
10  *   the Free Software Foundation; either version 2 of the License, or
11  *   (at your option) any later version.
12  *
13  *   This program is distributed in the hope that it will be useful,
14  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *   GNU General Public License for more details.
17  *
18  *   You should have received a copy of the GNU General Public License
19  *   along with this program; if not, write to the Free Software
20  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
21  *
22  */
23 
24 #include <linux/pci.h>
25 #include <linux/time.h>
26 #include <linux/mutex.h>
27 
28 #include <sound/core.h>
29 #include <sound/emu10k1.h>
30 
31 /* page arguments of these two macros are Emu page (4096 bytes), not like
32  * aligned pages in others
33  */
34 #define __set_ptb_entry(emu,page,addr) \
35 	(((u32 *)(emu)->ptb_pages.area)[page] = cpu_to_le32(((addr) << 1) | (page)))
36 
37 #define UNIT_PAGES		(PAGE_SIZE / EMUPAGESIZE)
38 #define MAX_ALIGN_PAGES		(MAXPAGES / UNIT_PAGES)
39 /* get aligned page from offset address */
40 #define get_aligned_page(offset)	((offset) >> PAGE_SHIFT)
41 /* get offset address from aligned page */
42 #define aligned_page_offset(page)	((page) << PAGE_SHIFT)
43 
44 #if PAGE_SIZE == 4096
45 /* page size == EMUPAGESIZE */
46 /* fill PTB entrie(s) corresponding to page with addr */
47 #define set_ptb_entry(emu,page,addr)	__set_ptb_entry(emu,page,addr)
48 /* fill PTB entrie(s) corresponding to page with silence pointer */
49 #define set_silent_ptb(emu,page)	__set_ptb_entry(emu,page,emu->silent_page.addr)
50 #else
51 /* fill PTB entries -- we need to fill UNIT_PAGES entries */
52 static inline void set_ptb_entry(struct snd_emu10k1 *emu, int page, dma_addr_t addr)
53 {
54 	int i;
55 	page *= UNIT_PAGES;
56 	for (i = 0; i < UNIT_PAGES; i++, page++) {
57 		__set_ptb_entry(emu, page, addr);
58 		addr += EMUPAGESIZE;
59 	}
60 }
61 static inline void set_silent_ptb(struct snd_emu10k1 *emu, int page)
62 {
63 	int i;
64 	page *= UNIT_PAGES;
65 	for (i = 0; i < UNIT_PAGES; i++, page++)
66 		/* do not increment ptr */
67 		__set_ptb_entry(emu, page, emu->silent_page.addr);
68 }
69 #endif /* PAGE_SIZE */
70 
71 
72 /*
73  */
74 static int synth_alloc_pages(struct snd_emu10k1 *hw, struct snd_emu10k1_memblk *blk);
75 static int synth_free_pages(struct snd_emu10k1 *hw, struct snd_emu10k1_memblk *blk);
76 
77 #define get_emu10k1_memblk(l,member)	list_entry(l, struct snd_emu10k1_memblk, member)
78 
79 
80 /* initialize emu10k1 part */
81 static void emu10k1_memblk_init(struct snd_emu10k1_memblk *blk)
82 {
83 	blk->mapped_page = -1;
84 	INIT_LIST_HEAD(&blk->mapped_link);
85 	INIT_LIST_HEAD(&blk->mapped_order_link);
86 	blk->map_locked = 0;
87 
88 	blk->first_page = get_aligned_page(blk->mem.offset);
89 	blk->last_page = get_aligned_page(blk->mem.offset + blk->mem.size - 1);
90 	blk->pages = blk->last_page - blk->first_page + 1;
91 }
92 
93 /*
94  * search empty region on PTB with the given size
95  *
96  * if an empty region is found, return the page and store the next mapped block
97  * in nextp
98  * if not found, return a negative error code.
99  */
100 static int search_empty_map_area(struct snd_emu10k1 *emu, int npages, struct list_head **nextp)
101 {
102 	int page = 0, found_page = -ENOMEM;
103 	int max_size = npages;
104 	int size;
105 	struct list_head *candidate = &emu->mapped_link_head;
106 	struct list_head *pos;
107 
108 	list_for_each (pos, &emu->mapped_link_head) {
109 		struct snd_emu10k1_memblk *blk = get_emu10k1_memblk(pos, mapped_link);
110 		snd_assert(blk->mapped_page >= 0, continue);
111 		size = blk->mapped_page - page;
112 		if (size == npages) {
113 			*nextp = pos;
114 			return page;
115 		}
116 		else if (size > max_size) {
117 			/* we look for the maximum empty hole */
118 			max_size = size;
119 			candidate = pos;
120 			found_page = page;
121 		}
122 		page = blk->mapped_page + blk->pages;
123 	}
124 	size = MAX_ALIGN_PAGES - page;
125 	if (size >= max_size) {
126 		*nextp = pos;
127 		return page;
128 	}
129 	*nextp = candidate;
130 	return found_page;
131 }
132 
133 /*
134  * map a memory block onto emu10k1's PTB
135  *
136  * call with memblk_lock held
137  */
138 static int map_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
139 {
140 	int page, pg;
141 	struct list_head *next;
142 
143 	page = search_empty_map_area(emu, blk->pages, &next);
144 	if (page < 0) /* not found */
145 		return page;
146 	/* insert this block in the proper position of mapped list */
147 	list_add_tail(&blk->mapped_link, next);
148 	/* append this as a newest block in order list */
149 	list_add_tail(&blk->mapped_order_link, &emu->mapped_order_link_head);
150 	blk->mapped_page = page;
151 	/* fill PTB */
152 	for (pg = blk->first_page; pg <= blk->last_page; pg++) {
153 		set_ptb_entry(emu, page, emu->page_addr_table[pg]);
154 		page++;
155 	}
156 	return 0;
157 }
158 
159 /*
160  * unmap the block
161  * return the size of resultant empty pages
162  *
163  * call with memblk_lock held
164  */
165 static int unmap_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
166 {
167 	int start_page, end_page, mpage, pg;
168 	struct list_head *p;
169 	struct snd_emu10k1_memblk *q;
170 
171 	/* calculate the expected size of empty region */
172 	if ((p = blk->mapped_link.prev) != &emu->mapped_link_head) {
173 		q = get_emu10k1_memblk(p, mapped_link);
174 		start_page = q->mapped_page + q->pages;
175 	} else
176 		start_page = 0;
177 	if ((p = blk->mapped_link.next) != &emu->mapped_link_head) {
178 		q = get_emu10k1_memblk(p, mapped_link);
179 		end_page = q->mapped_page;
180 	} else
181 		end_page = MAX_ALIGN_PAGES;
182 
183 	/* remove links */
184 	list_del(&blk->mapped_link);
185 	list_del(&blk->mapped_order_link);
186 	/* clear PTB */
187 	mpage = blk->mapped_page;
188 	for (pg = blk->first_page; pg <= blk->last_page; pg++) {
189 		set_silent_ptb(emu, mpage);
190 		mpage++;
191 	}
192 	blk->mapped_page = -1;
193 	return end_page - start_page; /* return the new empty size */
194 }
195 
196 /*
197  * search empty pages with the given size, and create a memory block
198  *
199  * unlike synth_alloc the memory block is aligned to the page start
200  */
201 static struct snd_emu10k1_memblk *
202 search_empty(struct snd_emu10k1 *emu, int size)
203 {
204 	struct list_head *p;
205 	struct snd_emu10k1_memblk *blk;
206 	int page, psize;
207 
208 	psize = get_aligned_page(size + PAGE_SIZE -1);
209 	page = 0;
210 	list_for_each(p, &emu->memhdr->block) {
211 		blk = get_emu10k1_memblk(p, mem.list);
212 		if (page + psize <= blk->first_page)
213 			goto __found_pages;
214 		page = blk->last_page + 1;
215 	}
216 	if (page + psize > emu->max_cache_pages)
217 		return NULL;
218 
219 __found_pages:
220 	/* create a new memory block */
221 	blk = (struct snd_emu10k1_memblk *)__snd_util_memblk_new(emu->memhdr, psize << PAGE_SHIFT, p->prev);
222 	if (blk == NULL)
223 		return NULL;
224 	blk->mem.offset = aligned_page_offset(page); /* set aligned offset */
225 	emu10k1_memblk_init(blk);
226 	return blk;
227 }
228 
229 
230 /*
231  * check if the given pointer is valid for pages
232  */
233 static int is_valid_page(struct snd_emu10k1 *emu, dma_addr_t addr)
234 {
235 	if (addr & ~emu->dma_mask) {
236 		snd_printk(KERN_ERR "max memory size is 0x%lx (addr = 0x%lx)!!\n", emu->dma_mask, (unsigned long)addr);
237 		return 0;
238 	}
239 	if (addr & (EMUPAGESIZE-1)) {
240 		snd_printk(KERN_ERR "page is not aligned\n");
241 		return 0;
242 	}
243 	return 1;
244 }
245 
246 /*
247  * map the given memory block on PTB.
248  * if the block is already mapped, update the link order.
249  * if no empty pages are found, tries to release unsed memory blocks
250  * and retry the mapping.
251  */
252 int snd_emu10k1_memblk_map(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
253 {
254 	int err;
255 	int size;
256 	struct list_head *p, *nextp;
257 	struct snd_emu10k1_memblk *deleted;
258 	unsigned long flags;
259 
260 	spin_lock_irqsave(&emu->memblk_lock, flags);
261 	if (blk->mapped_page >= 0) {
262 		/* update order link */
263 		list_del(&blk->mapped_order_link);
264 		list_add_tail(&blk->mapped_order_link, &emu->mapped_order_link_head);
265 		spin_unlock_irqrestore(&emu->memblk_lock, flags);
266 		return 0;
267 	}
268 	if ((err = map_memblk(emu, blk)) < 0) {
269 		/* no enough page - try to unmap some blocks */
270 		/* starting from the oldest block */
271 		p = emu->mapped_order_link_head.next;
272 		for (; p != &emu->mapped_order_link_head; p = nextp) {
273 			nextp = p->next;
274 			deleted = get_emu10k1_memblk(p, mapped_order_link);
275 			if (deleted->map_locked)
276 				continue;
277 			size = unmap_memblk(emu, deleted);
278 			if (size >= blk->pages) {
279 				/* ok the empty region is enough large */
280 				err = map_memblk(emu, blk);
281 				break;
282 			}
283 		}
284 	}
285 	spin_unlock_irqrestore(&emu->memblk_lock, flags);
286 	return err;
287 }
288 
289 EXPORT_SYMBOL(snd_emu10k1_memblk_map);
290 
291 /*
292  * page allocation for DMA
293  */
294 struct snd_util_memblk *
295 snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *substream)
296 {
297 	struct snd_pcm_runtime *runtime = substream->runtime;
298 	struct snd_sg_buf *sgbuf = snd_pcm_substream_sgbuf(substream);
299 	struct snd_util_memhdr *hdr;
300 	struct snd_emu10k1_memblk *blk;
301 	int page, err, idx;
302 
303 	snd_assert(emu, return NULL);
304 	snd_assert(runtime->dma_bytes > 0 && runtime->dma_bytes < MAXPAGES * EMUPAGESIZE, return NULL);
305 	hdr = emu->memhdr;
306 	snd_assert(hdr, return NULL);
307 
308 	mutex_lock(&hdr->block_mutex);
309 	blk = search_empty(emu, runtime->dma_bytes);
310 	if (blk == NULL) {
311 		mutex_unlock(&hdr->block_mutex);
312 		return NULL;
313 	}
314 	/* fill buffer addresses but pointers are not stored so that
315 	 * snd_free_pci_page() is not called in in synth_free()
316 	 */
317 	idx = 0;
318 	for (page = blk->first_page; page <= blk->last_page; page++, idx++) {
319 		dma_addr_t addr;
320 #ifdef CONFIG_SND_DEBUG
321 		if (idx >= sgbuf->pages) {
322 			printk(KERN_ERR "emu: pages overflow! (%d-%d) for %d\n",
323 			       blk->first_page, blk->last_page, sgbuf->pages);
324 			mutex_unlock(&hdr->block_mutex);
325 			return NULL;
326 		}
327 #endif
328 		addr = sgbuf->table[idx].addr;
329 		if (! is_valid_page(emu, addr)) {
330 			printk(KERN_ERR "emu: failure page = %d\n", idx);
331 			mutex_unlock(&hdr->block_mutex);
332 			return NULL;
333 		}
334 		emu->page_addr_table[page] = addr;
335 		emu->page_ptr_table[page] = NULL;
336 	}
337 
338 	/* set PTB entries */
339 	blk->map_locked = 1; /* do not unmap this block! */
340 	err = snd_emu10k1_memblk_map(emu, blk);
341 	if (err < 0) {
342 		__snd_util_mem_free(hdr, (struct snd_util_memblk *)blk);
343 		mutex_unlock(&hdr->block_mutex);
344 		return NULL;
345 	}
346 	mutex_unlock(&hdr->block_mutex);
347 	return (struct snd_util_memblk *)blk;
348 }
349 
350 
351 /*
352  * release DMA buffer from page table
353  */
354 int snd_emu10k1_free_pages(struct snd_emu10k1 *emu, struct snd_util_memblk *blk)
355 {
356 	snd_assert(emu && blk, return -EINVAL);
357 	return snd_emu10k1_synth_free(emu, blk);
358 }
359 
360 
361 /*
362  * memory allocation using multiple pages (for synth)
363  * Unlike the DMA allocation above, non-contiguous pages are assined.
364  */
365 
366 /*
367  * allocate a synth sample area
368  */
369 struct snd_util_memblk *
370 snd_emu10k1_synth_alloc(struct snd_emu10k1 *hw, unsigned int size)
371 {
372 	struct snd_emu10k1_memblk *blk;
373 	struct snd_util_memhdr *hdr = hw->memhdr;
374 
375 	mutex_lock(&hdr->block_mutex);
376 	blk = (struct snd_emu10k1_memblk *)__snd_util_mem_alloc(hdr, size);
377 	if (blk == NULL) {
378 		mutex_unlock(&hdr->block_mutex);
379 		return NULL;
380 	}
381 	if (synth_alloc_pages(hw, blk)) {
382 		__snd_util_mem_free(hdr, (struct snd_util_memblk *)blk);
383 		mutex_unlock(&hdr->block_mutex);
384 		return NULL;
385 	}
386 	snd_emu10k1_memblk_map(hw, blk);
387 	mutex_unlock(&hdr->block_mutex);
388 	return (struct snd_util_memblk *)blk;
389 }
390 
391 EXPORT_SYMBOL(snd_emu10k1_synth_alloc);
392 
393 /*
394  * free a synth sample area
395  */
396 int
397 snd_emu10k1_synth_free(struct snd_emu10k1 *emu, struct snd_util_memblk *memblk)
398 {
399 	struct snd_util_memhdr *hdr = emu->memhdr;
400 	struct snd_emu10k1_memblk *blk = (struct snd_emu10k1_memblk *)memblk;
401 	unsigned long flags;
402 
403 	mutex_lock(&hdr->block_mutex);
404 	spin_lock_irqsave(&emu->memblk_lock, flags);
405 	if (blk->mapped_page >= 0)
406 		unmap_memblk(emu, blk);
407 	spin_unlock_irqrestore(&emu->memblk_lock, flags);
408 	synth_free_pages(emu, blk);
409 	 __snd_util_mem_free(hdr, memblk);
410 	mutex_unlock(&hdr->block_mutex);
411 	return 0;
412 }
413 
414 EXPORT_SYMBOL(snd_emu10k1_synth_free);
415 
416 /* check new allocation range */
417 static void get_single_page_range(struct snd_util_memhdr *hdr,
418 				  struct snd_emu10k1_memblk *blk,
419 				  int *first_page_ret, int *last_page_ret)
420 {
421 	struct list_head *p;
422 	struct snd_emu10k1_memblk *q;
423 	int first_page, last_page;
424 	first_page = blk->first_page;
425 	if ((p = blk->mem.list.prev) != &hdr->block) {
426 		q = get_emu10k1_memblk(p, mem.list);
427 		if (q->last_page == first_page)
428 			first_page++;  /* first page was already allocated */
429 	}
430 	last_page = blk->last_page;
431 	if ((p = blk->mem.list.next) != &hdr->block) {
432 		q = get_emu10k1_memblk(p, mem.list);
433 		if (q->first_page == last_page)
434 			last_page--; /* last page was already allocated */
435 	}
436 	*first_page_ret = first_page;
437 	*last_page_ret = last_page;
438 }
439 
440 /*
441  * allocate kernel pages
442  */
443 static int synth_alloc_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
444 {
445 	int page, first_page, last_page;
446 	struct snd_dma_buffer dmab;
447 
448 	emu10k1_memblk_init(blk);
449 	get_single_page_range(emu->memhdr, blk, &first_page, &last_page);
450 	/* allocate kernel pages */
451 	for (page = first_page; page <= last_page; page++) {
452 		if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(emu->pci),
453 					PAGE_SIZE, &dmab) < 0)
454 			goto __fail;
455 		if (! is_valid_page(emu, dmab.addr)) {
456 			snd_dma_free_pages(&dmab);
457 			goto __fail;
458 		}
459 		emu->page_addr_table[page] = dmab.addr;
460 		emu->page_ptr_table[page] = dmab.area;
461 	}
462 	return 0;
463 
464 __fail:
465 	/* release allocated pages */
466 	last_page = page - 1;
467 	for (page = first_page; page <= last_page; page++) {
468 		dmab.area = emu->page_ptr_table[page];
469 		dmab.addr = emu->page_addr_table[page];
470 		dmab.bytes = PAGE_SIZE;
471 		snd_dma_free_pages(&dmab);
472 		emu->page_addr_table[page] = 0;
473 		emu->page_ptr_table[page] = NULL;
474 	}
475 
476 	return -ENOMEM;
477 }
478 
479 /*
480  * free pages
481  */
482 static int synth_free_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
483 {
484 	int page, first_page, last_page;
485 	struct snd_dma_buffer dmab;
486 
487 	get_single_page_range(emu->memhdr, blk, &first_page, &last_page);
488 	dmab.dev.type = SNDRV_DMA_TYPE_DEV;
489 	dmab.dev.dev = snd_dma_pci_data(emu->pci);
490 	for (page = first_page; page <= last_page; page++) {
491 		if (emu->page_ptr_table[page] == NULL)
492 			continue;
493 		dmab.area = emu->page_ptr_table[page];
494 		dmab.addr = emu->page_addr_table[page];
495 		dmab.bytes = PAGE_SIZE;
496 		snd_dma_free_pages(&dmab);
497 		emu->page_addr_table[page] = 0;
498 		emu->page_ptr_table[page] = NULL;
499 	}
500 
501 	return 0;
502 }
503 
504 /* calculate buffer pointer from offset address */
505 static inline void *offset_ptr(struct snd_emu10k1 *emu, int page, int offset)
506 {
507 	char *ptr;
508 	snd_assert(page >= 0 && page < emu->max_cache_pages, return NULL);
509 	ptr = emu->page_ptr_table[page];
510 	if (! ptr) {
511 		printk(KERN_ERR "emu10k1: access to NULL ptr: page = %d\n", page);
512 		return NULL;
513 	}
514 	ptr += offset & (PAGE_SIZE - 1);
515 	return (void*)ptr;
516 }
517 
518 /*
519  * bzero(blk + offset, size)
520  */
521 int snd_emu10k1_synth_bzero(struct snd_emu10k1 *emu, struct snd_util_memblk *blk,
522 			    int offset, int size)
523 {
524 	int page, nextofs, end_offset, temp, temp1;
525 	void *ptr;
526 	struct snd_emu10k1_memblk *p = (struct snd_emu10k1_memblk *)blk;
527 
528 	offset += blk->offset & (PAGE_SIZE - 1);
529 	end_offset = offset + size;
530 	page = get_aligned_page(offset);
531 	do {
532 		nextofs = aligned_page_offset(page + 1);
533 		temp = nextofs - offset;
534 		temp1 = end_offset - offset;
535 		if (temp1 < temp)
536 			temp = temp1;
537 		ptr = offset_ptr(emu, page + p->first_page, offset);
538 		if (ptr)
539 			memset(ptr, 0, temp);
540 		offset = nextofs;
541 		page++;
542 	} while (offset < end_offset);
543 	return 0;
544 }
545 
546 EXPORT_SYMBOL(snd_emu10k1_synth_bzero);
547 
548 /*
549  * copy_from_user(blk + offset, data, size)
550  */
551 int snd_emu10k1_synth_copy_from_user(struct snd_emu10k1 *emu, struct snd_util_memblk *blk,
552 				     int offset, const char __user *data, int size)
553 {
554 	int page, nextofs, end_offset, temp, temp1;
555 	void *ptr;
556 	struct snd_emu10k1_memblk *p = (struct snd_emu10k1_memblk *)blk;
557 
558 	offset += blk->offset & (PAGE_SIZE - 1);
559 	end_offset = offset + size;
560 	page = get_aligned_page(offset);
561 	do {
562 		nextofs = aligned_page_offset(page + 1);
563 		temp = nextofs - offset;
564 		temp1 = end_offset - offset;
565 		if (temp1 < temp)
566 			temp = temp1;
567 		ptr = offset_ptr(emu, page + p->first_page, offset);
568 		if (ptr && copy_from_user(ptr, data, temp))
569 			return -EFAULT;
570 		offset = nextofs;
571 		data += temp;
572 		page++;
573 	} while (offset < end_offset);
574 	return 0;
575 }
576 
577 EXPORT_SYMBOL(snd_emu10k1_synth_copy_from_user);
578