xref: /openbmc/linux/sound/pci/emu10k1/memory.c (revision 96de0e252cedffad61b3cb5e05662c591898e69a)
1 /*
2  *  Copyright (c) by Jaroslav Kysela <perex@perex.cz>
3  *  Copyright (c) by Takashi Iwai <tiwai@suse.de>
4  *
5  *  EMU10K1 memory page allocation (PTB area)
6  *
7  *
8  *   This program is free software; you can redistribute it and/or modify
9  *   it under the terms of the GNU General Public License as published by
10  *   the Free Software Foundation; either version 2 of the License, or
11  *   (at your option) any later version.
12  *
13  *   This program is distributed in the hope that it will be useful,
14  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *   GNU General Public License for more details.
17  *
18  *   You should have received a copy of the GNU General Public License
19  *   along with this program; if not, write to the Free Software
20  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
21  *
22  */
23 
24 #include <sound/driver.h>
25 #include <linux/pci.h>
26 #include <linux/time.h>
27 #include <linux/mutex.h>
28 
29 #include <sound/core.h>
30 #include <sound/emu10k1.h>
31 
32 /* page arguments of these two macros are Emu page (4096 bytes), not like
33  * aligned pages in others
34  */
35 #define __set_ptb_entry(emu,page,addr) \
36 	(((u32 *)(emu)->ptb_pages.area)[page] = cpu_to_le32(((addr) << 1) | (page)))
37 
38 #define UNIT_PAGES		(PAGE_SIZE / EMUPAGESIZE)
39 #define MAX_ALIGN_PAGES		(MAXPAGES / UNIT_PAGES)
40 /* get aligned page from offset address */
41 #define get_aligned_page(offset)	((offset) >> PAGE_SHIFT)
42 /* get offset address from aligned page */
43 #define aligned_page_offset(page)	((page) << PAGE_SHIFT)
44 
45 #if PAGE_SIZE == 4096
46 /* page size == EMUPAGESIZE */
47 /* fill PTB entrie(s) corresponding to page with addr */
48 #define set_ptb_entry(emu,page,addr)	__set_ptb_entry(emu,page,addr)
49 /* fill PTB entrie(s) corresponding to page with silence pointer */
50 #define set_silent_ptb(emu,page)	__set_ptb_entry(emu,page,emu->silent_page.addr)
51 #else
52 /* fill PTB entries -- we need to fill UNIT_PAGES entries */
53 static inline void set_ptb_entry(struct snd_emu10k1 *emu, int page, dma_addr_t addr)
54 {
55 	int i;
56 	page *= UNIT_PAGES;
57 	for (i = 0; i < UNIT_PAGES; i++, page++) {
58 		__set_ptb_entry(emu, page, addr);
59 		addr += EMUPAGESIZE;
60 	}
61 }
62 static inline void set_silent_ptb(struct snd_emu10k1 *emu, int page)
63 {
64 	int i;
65 	page *= UNIT_PAGES;
66 	for (i = 0; i < UNIT_PAGES; i++, page++)
67 		/* do not increment ptr */
68 		__set_ptb_entry(emu, page, emu->silent_page.addr);
69 }
70 #endif /* PAGE_SIZE */
71 
72 
73 /*
74  */
75 static int synth_alloc_pages(struct snd_emu10k1 *hw, struct snd_emu10k1_memblk *blk);
76 static int synth_free_pages(struct snd_emu10k1 *hw, struct snd_emu10k1_memblk *blk);
77 
78 #define get_emu10k1_memblk(l,member)	list_entry(l, struct snd_emu10k1_memblk, member)
79 
80 
81 /* initialize emu10k1 part */
82 static void emu10k1_memblk_init(struct snd_emu10k1_memblk *blk)
83 {
84 	blk->mapped_page = -1;
85 	INIT_LIST_HEAD(&blk->mapped_link);
86 	INIT_LIST_HEAD(&blk->mapped_order_link);
87 	blk->map_locked = 0;
88 
89 	blk->first_page = get_aligned_page(blk->mem.offset);
90 	blk->last_page = get_aligned_page(blk->mem.offset + blk->mem.size - 1);
91 	blk->pages = blk->last_page - blk->first_page + 1;
92 }
93 
94 /*
95  * search empty region on PTB with the given size
96  *
97  * if an empty region is found, return the page and store the next mapped block
98  * in nextp
99  * if not found, return a negative error code.
100  */
101 static int search_empty_map_area(struct snd_emu10k1 *emu, int npages, struct list_head **nextp)
102 {
103 	int page = 0, found_page = -ENOMEM;
104 	int max_size = npages;
105 	int size;
106 	struct list_head *candidate = &emu->mapped_link_head;
107 	struct list_head *pos;
108 
109 	list_for_each (pos, &emu->mapped_link_head) {
110 		struct snd_emu10k1_memblk *blk = get_emu10k1_memblk(pos, mapped_link);
111 		snd_assert(blk->mapped_page >= 0, continue);
112 		size = blk->mapped_page - page;
113 		if (size == npages) {
114 			*nextp = pos;
115 			return page;
116 		}
117 		else if (size > max_size) {
118 			/* we look for the maximum empty hole */
119 			max_size = size;
120 			candidate = pos;
121 			found_page = page;
122 		}
123 		page = blk->mapped_page + blk->pages;
124 	}
125 	size = MAX_ALIGN_PAGES - page;
126 	if (size >= max_size) {
127 		*nextp = pos;
128 		return page;
129 	}
130 	*nextp = candidate;
131 	return found_page;
132 }
133 
134 /*
135  * map a memory block onto emu10k1's PTB
136  *
137  * call with memblk_lock held
138  */
139 static int map_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
140 {
141 	int page, pg;
142 	struct list_head *next;
143 
144 	page = search_empty_map_area(emu, blk->pages, &next);
145 	if (page < 0) /* not found */
146 		return page;
147 	/* insert this block in the proper position of mapped list */
148 	list_add_tail(&blk->mapped_link, next);
149 	/* append this as a newest block in order list */
150 	list_add_tail(&blk->mapped_order_link, &emu->mapped_order_link_head);
151 	blk->mapped_page = page;
152 	/* fill PTB */
153 	for (pg = blk->first_page; pg <= blk->last_page; pg++) {
154 		set_ptb_entry(emu, page, emu->page_addr_table[pg]);
155 		page++;
156 	}
157 	return 0;
158 }
159 
160 /*
161  * unmap the block
162  * return the size of resultant empty pages
163  *
164  * call with memblk_lock held
165  */
166 static int unmap_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
167 {
168 	int start_page, end_page, mpage, pg;
169 	struct list_head *p;
170 	struct snd_emu10k1_memblk *q;
171 
172 	/* calculate the expected size of empty region */
173 	if ((p = blk->mapped_link.prev) != &emu->mapped_link_head) {
174 		q = get_emu10k1_memblk(p, mapped_link);
175 		start_page = q->mapped_page + q->pages;
176 	} else
177 		start_page = 0;
178 	if ((p = blk->mapped_link.next) != &emu->mapped_link_head) {
179 		q = get_emu10k1_memblk(p, mapped_link);
180 		end_page = q->mapped_page;
181 	} else
182 		end_page = MAX_ALIGN_PAGES;
183 
184 	/* remove links */
185 	list_del(&blk->mapped_link);
186 	list_del(&blk->mapped_order_link);
187 	/* clear PTB */
188 	mpage = blk->mapped_page;
189 	for (pg = blk->first_page; pg <= blk->last_page; pg++) {
190 		set_silent_ptb(emu, mpage);
191 		mpage++;
192 	}
193 	blk->mapped_page = -1;
194 	return end_page - start_page; /* return the new empty size */
195 }
196 
197 /*
198  * search empty pages with the given size, and create a memory block
199  *
200  * unlike synth_alloc the memory block is aligned to the page start
201  */
202 static struct snd_emu10k1_memblk *
203 search_empty(struct snd_emu10k1 *emu, int size)
204 {
205 	struct list_head *p;
206 	struct snd_emu10k1_memblk *blk;
207 	int page, psize;
208 
209 	psize = get_aligned_page(size + PAGE_SIZE -1);
210 	page = 0;
211 	list_for_each(p, &emu->memhdr->block) {
212 		blk = get_emu10k1_memblk(p, mem.list);
213 		if (page + psize <= blk->first_page)
214 			goto __found_pages;
215 		page = blk->last_page + 1;
216 	}
217 	if (page + psize > emu->max_cache_pages)
218 		return NULL;
219 
220 __found_pages:
221 	/* create a new memory block */
222 	blk = (struct snd_emu10k1_memblk *)__snd_util_memblk_new(emu->memhdr, psize << PAGE_SHIFT, p->prev);
223 	if (blk == NULL)
224 		return NULL;
225 	blk->mem.offset = aligned_page_offset(page); /* set aligned offset */
226 	emu10k1_memblk_init(blk);
227 	return blk;
228 }
229 
230 
231 /*
232  * check if the given pointer is valid for pages
233  */
234 static int is_valid_page(struct snd_emu10k1 *emu, dma_addr_t addr)
235 {
236 	if (addr & ~emu->dma_mask) {
237 		snd_printk(KERN_ERR "max memory size is 0x%lx (addr = 0x%lx)!!\n", emu->dma_mask, (unsigned long)addr);
238 		return 0;
239 	}
240 	if (addr & (EMUPAGESIZE-1)) {
241 		snd_printk(KERN_ERR "page is not aligned\n");
242 		return 0;
243 	}
244 	return 1;
245 }
246 
247 /*
248  * map the given memory block on PTB.
249  * if the block is already mapped, update the link order.
250  * if no empty pages are found, tries to release unsed memory blocks
251  * and retry the mapping.
252  */
253 int snd_emu10k1_memblk_map(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
254 {
255 	int err;
256 	int size;
257 	struct list_head *p, *nextp;
258 	struct snd_emu10k1_memblk *deleted;
259 	unsigned long flags;
260 
261 	spin_lock_irqsave(&emu->memblk_lock, flags);
262 	if (blk->mapped_page >= 0) {
263 		/* update order link */
264 		list_del(&blk->mapped_order_link);
265 		list_add_tail(&blk->mapped_order_link, &emu->mapped_order_link_head);
266 		spin_unlock_irqrestore(&emu->memblk_lock, flags);
267 		return 0;
268 	}
269 	if ((err = map_memblk(emu, blk)) < 0) {
270 		/* no enough page - try to unmap some blocks */
271 		/* starting from the oldest block */
272 		p = emu->mapped_order_link_head.next;
273 		for (; p != &emu->mapped_order_link_head; p = nextp) {
274 			nextp = p->next;
275 			deleted = get_emu10k1_memblk(p, mapped_order_link);
276 			if (deleted->map_locked)
277 				continue;
278 			size = unmap_memblk(emu, deleted);
279 			if (size >= blk->pages) {
280 				/* ok the empty region is enough large */
281 				err = map_memblk(emu, blk);
282 				break;
283 			}
284 		}
285 	}
286 	spin_unlock_irqrestore(&emu->memblk_lock, flags);
287 	return err;
288 }
289 
290 EXPORT_SYMBOL(snd_emu10k1_memblk_map);
291 
292 /*
293  * page allocation for DMA
294  */
295 struct snd_util_memblk *
296 snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *substream)
297 {
298 	struct snd_pcm_runtime *runtime = substream->runtime;
299 	struct snd_sg_buf *sgbuf = snd_pcm_substream_sgbuf(substream);
300 	struct snd_util_memhdr *hdr;
301 	struct snd_emu10k1_memblk *blk;
302 	int page, err, idx;
303 
304 	snd_assert(emu, return NULL);
305 	snd_assert(runtime->dma_bytes > 0 && runtime->dma_bytes < MAXPAGES * EMUPAGESIZE, return NULL);
306 	hdr = emu->memhdr;
307 	snd_assert(hdr, return NULL);
308 
309 	mutex_lock(&hdr->block_mutex);
310 	blk = search_empty(emu, runtime->dma_bytes);
311 	if (blk == NULL) {
312 		mutex_unlock(&hdr->block_mutex);
313 		return NULL;
314 	}
315 	/* fill buffer addresses but pointers are not stored so that
316 	 * snd_free_pci_page() is not called in in synth_free()
317 	 */
318 	idx = 0;
319 	for (page = blk->first_page; page <= blk->last_page; page++, idx++) {
320 		dma_addr_t addr;
321 #ifdef CONFIG_SND_DEBUG
322 		if (idx >= sgbuf->pages) {
323 			printk(KERN_ERR "emu: pages overflow! (%d-%d) for %d\n",
324 			       blk->first_page, blk->last_page, sgbuf->pages);
325 			mutex_unlock(&hdr->block_mutex);
326 			return NULL;
327 		}
328 #endif
329 		addr = sgbuf->table[idx].addr;
330 		if (! is_valid_page(emu, addr)) {
331 			printk(KERN_ERR "emu: failure page = %d\n", idx);
332 			mutex_unlock(&hdr->block_mutex);
333 			return NULL;
334 		}
335 		emu->page_addr_table[page] = addr;
336 		emu->page_ptr_table[page] = NULL;
337 	}
338 
339 	/* set PTB entries */
340 	blk->map_locked = 1; /* do not unmap this block! */
341 	err = snd_emu10k1_memblk_map(emu, blk);
342 	if (err < 0) {
343 		__snd_util_mem_free(hdr, (struct snd_util_memblk *)blk);
344 		mutex_unlock(&hdr->block_mutex);
345 		return NULL;
346 	}
347 	mutex_unlock(&hdr->block_mutex);
348 	return (struct snd_util_memblk *)blk;
349 }
350 
351 
352 /*
353  * release DMA buffer from page table
354  */
355 int snd_emu10k1_free_pages(struct snd_emu10k1 *emu, struct snd_util_memblk *blk)
356 {
357 	snd_assert(emu && blk, return -EINVAL);
358 	return snd_emu10k1_synth_free(emu, blk);
359 }
360 
361 
362 /*
363  * memory allocation using multiple pages (for synth)
364  * Unlike the DMA allocation above, non-contiguous pages are assined.
365  */
366 
367 /*
368  * allocate a synth sample area
369  */
370 struct snd_util_memblk *
371 snd_emu10k1_synth_alloc(struct snd_emu10k1 *hw, unsigned int size)
372 {
373 	struct snd_emu10k1_memblk *blk;
374 	struct snd_util_memhdr *hdr = hw->memhdr;
375 
376 	mutex_lock(&hdr->block_mutex);
377 	blk = (struct snd_emu10k1_memblk *)__snd_util_mem_alloc(hdr, size);
378 	if (blk == NULL) {
379 		mutex_unlock(&hdr->block_mutex);
380 		return NULL;
381 	}
382 	if (synth_alloc_pages(hw, blk)) {
383 		__snd_util_mem_free(hdr, (struct snd_util_memblk *)blk);
384 		mutex_unlock(&hdr->block_mutex);
385 		return NULL;
386 	}
387 	snd_emu10k1_memblk_map(hw, blk);
388 	mutex_unlock(&hdr->block_mutex);
389 	return (struct snd_util_memblk *)blk;
390 }
391 
392 EXPORT_SYMBOL(snd_emu10k1_synth_alloc);
393 
394 /*
395  * free a synth sample area
396  */
397 int
398 snd_emu10k1_synth_free(struct snd_emu10k1 *emu, struct snd_util_memblk *memblk)
399 {
400 	struct snd_util_memhdr *hdr = emu->memhdr;
401 	struct snd_emu10k1_memblk *blk = (struct snd_emu10k1_memblk *)memblk;
402 	unsigned long flags;
403 
404 	mutex_lock(&hdr->block_mutex);
405 	spin_lock_irqsave(&emu->memblk_lock, flags);
406 	if (blk->mapped_page >= 0)
407 		unmap_memblk(emu, blk);
408 	spin_unlock_irqrestore(&emu->memblk_lock, flags);
409 	synth_free_pages(emu, blk);
410 	 __snd_util_mem_free(hdr, memblk);
411 	mutex_unlock(&hdr->block_mutex);
412 	return 0;
413 }
414 
415 EXPORT_SYMBOL(snd_emu10k1_synth_free);
416 
417 /* check new allocation range */
418 static void get_single_page_range(struct snd_util_memhdr *hdr,
419 				  struct snd_emu10k1_memblk *blk,
420 				  int *first_page_ret, int *last_page_ret)
421 {
422 	struct list_head *p;
423 	struct snd_emu10k1_memblk *q;
424 	int first_page, last_page;
425 	first_page = blk->first_page;
426 	if ((p = blk->mem.list.prev) != &hdr->block) {
427 		q = get_emu10k1_memblk(p, mem.list);
428 		if (q->last_page == first_page)
429 			first_page++;  /* first page was already allocated */
430 	}
431 	last_page = blk->last_page;
432 	if ((p = blk->mem.list.next) != &hdr->block) {
433 		q = get_emu10k1_memblk(p, mem.list);
434 		if (q->first_page == last_page)
435 			last_page--; /* last page was already allocated */
436 	}
437 	*first_page_ret = first_page;
438 	*last_page_ret = last_page;
439 }
440 
441 /*
442  * allocate kernel pages
443  */
444 static int synth_alloc_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
445 {
446 	int page, first_page, last_page;
447 	struct snd_dma_buffer dmab;
448 
449 	emu10k1_memblk_init(blk);
450 	get_single_page_range(emu->memhdr, blk, &first_page, &last_page);
451 	/* allocate kernel pages */
452 	for (page = first_page; page <= last_page; page++) {
453 		if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(emu->pci),
454 					PAGE_SIZE, &dmab) < 0)
455 			goto __fail;
456 		if (! is_valid_page(emu, dmab.addr)) {
457 			snd_dma_free_pages(&dmab);
458 			goto __fail;
459 		}
460 		emu->page_addr_table[page] = dmab.addr;
461 		emu->page_ptr_table[page] = dmab.area;
462 	}
463 	return 0;
464 
465 __fail:
466 	/* release allocated pages */
467 	last_page = page - 1;
468 	for (page = first_page; page <= last_page; page++) {
469 		dmab.area = emu->page_ptr_table[page];
470 		dmab.addr = emu->page_addr_table[page];
471 		dmab.bytes = PAGE_SIZE;
472 		snd_dma_free_pages(&dmab);
473 		emu->page_addr_table[page] = 0;
474 		emu->page_ptr_table[page] = NULL;
475 	}
476 
477 	return -ENOMEM;
478 }
479 
480 /*
481  * free pages
482  */
483 static int synth_free_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
484 {
485 	int page, first_page, last_page;
486 	struct snd_dma_buffer dmab;
487 
488 	get_single_page_range(emu->memhdr, blk, &first_page, &last_page);
489 	dmab.dev.type = SNDRV_DMA_TYPE_DEV;
490 	dmab.dev.dev = snd_dma_pci_data(emu->pci);
491 	for (page = first_page; page <= last_page; page++) {
492 		if (emu->page_ptr_table[page] == NULL)
493 			continue;
494 		dmab.area = emu->page_ptr_table[page];
495 		dmab.addr = emu->page_addr_table[page];
496 		dmab.bytes = PAGE_SIZE;
497 		snd_dma_free_pages(&dmab);
498 		emu->page_addr_table[page] = 0;
499 		emu->page_ptr_table[page] = NULL;
500 	}
501 
502 	return 0;
503 }
504 
505 /* calculate buffer pointer from offset address */
506 static inline void *offset_ptr(struct snd_emu10k1 *emu, int page, int offset)
507 {
508 	char *ptr;
509 	snd_assert(page >= 0 && page < emu->max_cache_pages, return NULL);
510 	ptr = emu->page_ptr_table[page];
511 	if (! ptr) {
512 		printk(KERN_ERR "emu10k1: access to NULL ptr: page = %d\n", page);
513 		return NULL;
514 	}
515 	ptr += offset & (PAGE_SIZE - 1);
516 	return (void*)ptr;
517 }
518 
519 /*
520  * bzero(blk + offset, size)
521  */
522 int snd_emu10k1_synth_bzero(struct snd_emu10k1 *emu, struct snd_util_memblk *blk,
523 			    int offset, int size)
524 {
525 	int page, nextofs, end_offset, temp, temp1;
526 	void *ptr;
527 	struct snd_emu10k1_memblk *p = (struct snd_emu10k1_memblk *)blk;
528 
529 	offset += blk->offset & (PAGE_SIZE - 1);
530 	end_offset = offset + size;
531 	page = get_aligned_page(offset);
532 	do {
533 		nextofs = aligned_page_offset(page + 1);
534 		temp = nextofs - offset;
535 		temp1 = end_offset - offset;
536 		if (temp1 < temp)
537 			temp = temp1;
538 		ptr = offset_ptr(emu, page + p->first_page, offset);
539 		if (ptr)
540 			memset(ptr, 0, temp);
541 		offset = nextofs;
542 		page++;
543 	} while (offset < end_offset);
544 	return 0;
545 }
546 
547 EXPORT_SYMBOL(snd_emu10k1_synth_bzero);
548 
549 /*
550  * copy_from_user(blk + offset, data, size)
551  */
552 int snd_emu10k1_synth_copy_from_user(struct snd_emu10k1 *emu, struct snd_util_memblk *blk,
553 				     int offset, const char __user *data, int size)
554 {
555 	int page, nextofs, end_offset, temp, temp1;
556 	void *ptr;
557 	struct snd_emu10k1_memblk *p = (struct snd_emu10k1_memblk *)blk;
558 
559 	offset += blk->offset & (PAGE_SIZE - 1);
560 	end_offset = offset + size;
561 	page = get_aligned_page(offset);
562 	do {
563 		nextofs = aligned_page_offset(page + 1);
564 		temp = nextofs - offset;
565 		temp1 = end_offset - offset;
566 		if (temp1 < temp)
567 			temp = temp1;
568 		ptr = offset_ptr(emu, page + p->first_page, offset);
569 		if (ptr && copy_from_user(ptr, data, temp))
570 			return -EFAULT;
571 		offset = nextofs;
572 		data += temp;
573 		page++;
574 	} while (offset < end_offset);
575 	return 0;
576 }
577 
578 EXPORT_SYMBOL(snd_emu10k1_synth_copy_from_user);
579