1 /* 2 * Copyright (c) by Jaroslav Kysela <perex@perex.cz> 3 * Copyright (c) by Takashi Iwai <tiwai@suse.de> 4 * 5 * EMU10K1 memory page allocation (PTB area) 6 * 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 21 * 22 */ 23 24 #include <linux/pci.h> 25 #include <linux/gfp.h> 26 #include <linux/time.h> 27 #include <linux/mutex.h> 28 29 #include <sound/core.h> 30 #include <sound/emu10k1.h> 31 32 /* page arguments of these two macros are Emu page (4096 bytes), not like 33 * aligned pages in others 34 */ 35 #define __set_ptb_entry(emu,page,addr) \ 36 (((u32 *)(emu)->ptb_pages.area)[page] = cpu_to_le32(((addr) << 1) | (page))) 37 38 #define UNIT_PAGES (PAGE_SIZE / EMUPAGESIZE) 39 #define MAX_ALIGN_PAGES (MAXPAGES / UNIT_PAGES) 40 /* get aligned page from offset address */ 41 #define get_aligned_page(offset) ((offset) >> PAGE_SHIFT) 42 /* get offset address from aligned page */ 43 #define aligned_page_offset(page) ((page) << PAGE_SHIFT) 44 45 #if PAGE_SIZE == 4096 46 /* page size == EMUPAGESIZE */ 47 /* fill PTB entrie(s) corresponding to page with addr */ 48 #define set_ptb_entry(emu,page,addr) __set_ptb_entry(emu,page,addr) 49 /* fill PTB entrie(s) corresponding to page with silence pointer */ 50 #define set_silent_ptb(emu,page) __set_ptb_entry(emu,page,emu->silent_page.addr) 51 #else 52 /* fill PTB entries -- we need to fill UNIT_PAGES entries */ 53 static inline void set_ptb_entry(struct snd_emu10k1 *emu, int page, dma_addr_t addr) 54 { 55 int i; 56 page *= UNIT_PAGES; 57 for (i = 0; i < UNIT_PAGES; i++, page++) { 58 __set_ptb_entry(emu, page, addr); 59 addr += EMUPAGESIZE; 60 } 61 } 62 static inline void set_silent_ptb(struct snd_emu10k1 *emu, int page) 63 { 64 int i; 65 page *= UNIT_PAGES; 66 for (i = 0; i < UNIT_PAGES; i++, page++) 67 /* do not increment ptr */ 68 __set_ptb_entry(emu, page, emu->silent_page.addr); 69 } 70 #endif /* PAGE_SIZE */ 71 72 73 /* 74 */ 75 static int synth_alloc_pages(struct snd_emu10k1 *hw, struct snd_emu10k1_memblk *blk); 76 static int synth_free_pages(struct snd_emu10k1 *hw, struct snd_emu10k1_memblk *blk); 77 78 #define get_emu10k1_memblk(l,member) list_entry(l, struct snd_emu10k1_memblk, member) 79 80 81 /* initialize emu10k1 part */ 82 static void emu10k1_memblk_init(struct snd_emu10k1_memblk *blk) 83 { 84 blk->mapped_page = -1; 85 INIT_LIST_HEAD(&blk->mapped_link); 86 INIT_LIST_HEAD(&blk->mapped_order_link); 87 blk->map_locked = 0; 88 89 blk->first_page = get_aligned_page(blk->mem.offset); 90 blk->last_page = get_aligned_page(blk->mem.offset + blk->mem.size - 1); 91 blk->pages = blk->last_page - blk->first_page + 1; 92 } 93 94 /* 95 * search empty region on PTB with the given size 96 * 97 * if an empty region is found, return the page and store the next mapped block 98 * in nextp 99 * if not found, return a negative error code. 100 */ 101 static int search_empty_map_area(struct snd_emu10k1 *emu, int npages, struct list_head **nextp) 102 { 103 int page = 0, found_page = -ENOMEM; 104 int max_size = npages; 105 int size; 106 struct list_head *candidate = &emu->mapped_link_head; 107 struct list_head *pos; 108 109 list_for_each (pos, &emu->mapped_link_head) { 110 struct snd_emu10k1_memblk *blk = get_emu10k1_memblk(pos, mapped_link); 111 if (blk->mapped_page < 0) 112 continue; 113 size = blk->mapped_page - page; 114 if (size == npages) { 115 *nextp = pos; 116 return page; 117 } 118 else if (size > max_size) { 119 /* we look for the maximum empty hole */ 120 max_size = size; 121 candidate = pos; 122 found_page = page; 123 } 124 page = blk->mapped_page + blk->pages; 125 } 126 size = MAX_ALIGN_PAGES - page; 127 if (size >= max_size) { 128 *nextp = pos; 129 return page; 130 } 131 *nextp = candidate; 132 return found_page; 133 } 134 135 /* 136 * map a memory block onto emu10k1's PTB 137 * 138 * call with memblk_lock held 139 */ 140 static int map_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk) 141 { 142 int page, pg; 143 struct list_head *next; 144 145 page = search_empty_map_area(emu, blk->pages, &next); 146 if (page < 0) /* not found */ 147 return page; 148 /* insert this block in the proper position of mapped list */ 149 list_add_tail(&blk->mapped_link, next); 150 /* append this as a newest block in order list */ 151 list_add_tail(&blk->mapped_order_link, &emu->mapped_order_link_head); 152 blk->mapped_page = page; 153 /* fill PTB */ 154 for (pg = blk->first_page; pg <= blk->last_page; pg++) { 155 set_ptb_entry(emu, page, emu->page_addr_table[pg]); 156 page++; 157 } 158 return 0; 159 } 160 161 /* 162 * unmap the block 163 * return the size of resultant empty pages 164 * 165 * call with memblk_lock held 166 */ 167 static int unmap_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk) 168 { 169 int start_page, end_page, mpage, pg; 170 struct list_head *p; 171 struct snd_emu10k1_memblk *q; 172 173 /* calculate the expected size of empty region */ 174 if ((p = blk->mapped_link.prev) != &emu->mapped_link_head) { 175 q = get_emu10k1_memblk(p, mapped_link); 176 start_page = q->mapped_page + q->pages; 177 } else 178 start_page = 0; 179 if ((p = blk->mapped_link.next) != &emu->mapped_link_head) { 180 q = get_emu10k1_memblk(p, mapped_link); 181 end_page = q->mapped_page; 182 } else 183 end_page = MAX_ALIGN_PAGES; 184 185 /* remove links */ 186 list_del(&blk->mapped_link); 187 list_del(&blk->mapped_order_link); 188 /* clear PTB */ 189 mpage = blk->mapped_page; 190 for (pg = blk->first_page; pg <= blk->last_page; pg++) { 191 set_silent_ptb(emu, mpage); 192 mpage++; 193 } 194 blk->mapped_page = -1; 195 return end_page - start_page; /* return the new empty size */ 196 } 197 198 /* 199 * search empty pages with the given size, and create a memory block 200 * 201 * unlike synth_alloc the memory block is aligned to the page start 202 */ 203 static struct snd_emu10k1_memblk * 204 search_empty(struct snd_emu10k1 *emu, int size) 205 { 206 struct list_head *p; 207 struct snd_emu10k1_memblk *blk; 208 int page, psize; 209 210 psize = get_aligned_page(size + PAGE_SIZE -1); 211 page = 0; 212 list_for_each(p, &emu->memhdr->block) { 213 blk = get_emu10k1_memblk(p, mem.list); 214 if (page + psize <= blk->first_page) 215 goto __found_pages; 216 page = blk->last_page + 1; 217 } 218 if (page + psize > emu->max_cache_pages) 219 return NULL; 220 221 __found_pages: 222 /* create a new memory block */ 223 blk = (struct snd_emu10k1_memblk *)__snd_util_memblk_new(emu->memhdr, psize << PAGE_SHIFT, p->prev); 224 if (blk == NULL) 225 return NULL; 226 blk->mem.offset = aligned_page_offset(page); /* set aligned offset */ 227 emu10k1_memblk_init(blk); 228 return blk; 229 } 230 231 232 /* 233 * check if the given pointer is valid for pages 234 */ 235 static int is_valid_page(struct snd_emu10k1 *emu, dma_addr_t addr) 236 { 237 if (addr & ~emu->dma_mask) { 238 snd_printk(KERN_ERR "max memory size is 0x%lx (addr = 0x%lx)!!\n", emu->dma_mask, (unsigned long)addr); 239 return 0; 240 } 241 if (addr & (EMUPAGESIZE-1)) { 242 snd_printk(KERN_ERR "page is not aligned\n"); 243 return 0; 244 } 245 return 1; 246 } 247 248 /* 249 * map the given memory block on PTB. 250 * if the block is already mapped, update the link order. 251 * if no empty pages are found, tries to release unsed memory blocks 252 * and retry the mapping. 253 */ 254 int snd_emu10k1_memblk_map(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk) 255 { 256 int err; 257 int size; 258 struct list_head *p, *nextp; 259 struct snd_emu10k1_memblk *deleted; 260 unsigned long flags; 261 262 spin_lock_irqsave(&emu->memblk_lock, flags); 263 if (blk->mapped_page >= 0) { 264 /* update order link */ 265 list_del(&blk->mapped_order_link); 266 list_add_tail(&blk->mapped_order_link, &emu->mapped_order_link_head); 267 spin_unlock_irqrestore(&emu->memblk_lock, flags); 268 return 0; 269 } 270 if ((err = map_memblk(emu, blk)) < 0) { 271 /* no enough page - try to unmap some blocks */ 272 /* starting from the oldest block */ 273 p = emu->mapped_order_link_head.next; 274 for (; p != &emu->mapped_order_link_head; p = nextp) { 275 nextp = p->next; 276 deleted = get_emu10k1_memblk(p, mapped_order_link); 277 if (deleted->map_locked) 278 continue; 279 size = unmap_memblk(emu, deleted); 280 if (size >= blk->pages) { 281 /* ok the empty region is enough large */ 282 err = map_memblk(emu, blk); 283 break; 284 } 285 } 286 } 287 spin_unlock_irqrestore(&emu->memblk_lock, flags); 288 return err; 289 } 290 291 EXPORT_SYMBOL(snd_emu10k1_memblk_map); 292 293 /* 294 * page allocation for DMA 295 */ 296 struct snd_util_memblk * 297 snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *substream) 298 { 299 struct snd_pcm_runtime *runtime = substream->runtime; 300 struct snd_util_memhdr *hdr; 301 struct snd_emu10k1_memblk *blk; 302 int page, err, idx; 303 304 if (snd_BUG_ON(!emu)) 305 return NULL; 306 if (snd_BUG_ON(runtime->dma_bytes <= 0 || 307 runtime->dma_bytes >= MAXPAGES * EMUPAGESIZE)) 308 return NULL; 309 hdr = emu->memhdr; 310 if (snd_BUG_ON(!hdr)) 311 return NULL; 312 313 idx = runtime->period_size >= runtime->buffer_size ? 314 (emu->delay_pcm_irq * 2) : 0; 315 mutex_lock(&hdr->block_mutex); 316 blk = search_empty(emu, runtime->dma_bytes + idx); 317 if (blk == NULL) { 318 mutex_unlock(&hdr->block_mutex); 319 return NULL; 320 } 321 /* fill buffer addresses but pointers are not stored so that 322 * snd_free_pci_page() is not called in in synth_free() 323 */ 324 idx = 0; 325 for (page = blk->first_page; page <= blk->last_page; page++, idx++) { 326 unsigned long ofs = idx << PAGE_SHIFT; 327 dma_addr_t addr; 328 addr = snd_pcm_sgbuf_get_addr(substream, ofs); 329 if (! is_valid_page(emu, addr)) { 330 printk(KERN_ERR "emu: failure page = %d\n", idx); 331 mutex_unlock(&hdr->block_mutex); 332 return NULL; 333 } 334 emu->page_addr_table[page] = addr; 335 emu->page_ptr_table[page] = NULL; 336 } 337 338 /* set PTB entries */ 339 blk->map_locked = 1; /* do not unmap this block! */ 340 err = snd_emu10k1_memblk_map(emu, blk); 341 if (err < 0) { 342 __snd_util_mem_free(hdr, (struct snd_util_memblk *)blk); 343 mutex_unlock(&hdr->block_mutex); 344 return NULL; 345 } 346 mutex_unlock(&hdr->block_mutex); 347 return (struct snd_util_memblk *)blk; 348 } 349 350 351 /* 352 * release DMA buffer from page table 353 */ 354 int snd_emu10k1_free_pages(struct snd_emu10k1 *emu, struct snd_util_memblk *blk) 355 { 356 if (snd_BUG_ON(!emu || !blk)) 357 return -EINVAL; 358 return snd_emu10k1_synth_free(emu, blk); 359 } 360 361 362 /* 363 * memory allocation using multiple pages (for synth) 364 * Unlike the DMA allocation above, non-contiguous pages are assined. 365 */ 366 367 /* 368 * allocate a synth sample area 369 */ 370 struct snd_util_memblk * 371 snd_emu10k1_synth_alloc(struct snd_emu10k1 *hw, unsigned int size) 372 { 373 struct snd_emu10k1_memblk *blk; 374 struct snd_util_memhdr *hdr = hw->memhdr; 375 376 mutex_lock(&hdr->block_mutex); 377 blk = (struct snd_emu10k1_memblk *)__snd_util_mem_alloc(hdr, size); 378 if (blk == NULL) { 379 mutex_unlock(&hdr->block_mutex); 380 return NULL; 381 } 382 if (synth_alloc_pages(hw, blk)) { 383 __snd_util_mem_free(hdr, (struct snd_util_memblk *)blk); 384 mutex_unlock(&hdr->block_mutex); 385 return NULL; 386 } 387 snd_emu10k1_memblk_map(hw, blk); 388 mutex_unlock(&hdr->block_mutex); 389 return (struct snd_util_memblk *)blk; 390 } 391 392 EXPORT_SYMBOL(snd_emu10k1_synth_alloc); 393 394 /* 395 * free a synth sample area 396 */ 397 int 398 snd_emu10k1_synth_free(struct snd_emu10k1 *emu, struct snd_util_memblk *memblk) 399 { 400 struct snd_util_memhdr *hdr = emu->memhdr; 401 struct snd_emu10k1_memblk *blk = (struct snd_emu10k1_memblk *)memblk; 402 unsigned long flags; 403 404 mutex_lock(&hdr->block_mutex); 405 spin_lock_irqsave(&emu->memblk_lock, flags); 406 if (blk->mapped_page >= 0) 407 unmap_memblk(emu, blk); 408 spin_unlock_irqrestore(&emu->memblk_lock, flags); 409 synth_free_pages(emu, blk); 410 __snd_util_mem_free(hdr, memblk); 411 mutex_unlock(&hdr->block_mutex); 412 return 0; 413 } 414 415 EXPORT_SYMBOL(snd_emu10k1_synth_free); 416 417 /* check new allocation range */ 418 static void get_single_page_range(struct snd_util_memhdr *hdr, 419 struct snd_emu10k1_memblk *blk, 420 int *first_page_ret, int *last_page_ret) 421 { 422 struct list_head *p; 423 struct snd_emu10k1_memblk *q; 424 int first_page, last_page; 425 first_page = blk->first_page; 426 if ((p = blk->mem.list.prev) != &hdr->block) { 427 q = get_emu10k1_memblk(p, mem.list); 428 if (q->last_page == first_page) 429 first_page++; /* first page was already allocated */ 430 } 431 last_page = blk->last_page; 432 if ((p = blk->mem.list.next) != &hdr->block) { 433 q = get_emu10k1_memblk(p, mem.list); 434 if (q->first_page == last_page) 435 last_page--; /* last page was already allocated */ 436 } 437 *first_page_ret = first_page; 438 *last_page_ret = last_page; 439 } 440 441 /* release allocated pages */ 442 static void __synth_free_pages(struct snd_emu10k1 *emu, int first_page, 443 int last_page) 444 { 445 int page; 446 447 for (page = first_page; page <= last_page; page++) { 448 free_page((unsigned long)emu->page_ptr_table[page]); 449 emu->page_addr_table[page] = 0; 450 emu->page_ptr_table[page] = NULL; 451 } 452 } 453 454 /* 455 * allocate kernel pages 456 */ 457 static int synth_alloc_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk) 458 { 459 int page, first_page, last_page; 460 461 emu10k1_memblk_init(blk); 462 get_single_page_range(emu->memhdr, blk, &first_page, &last_page); 463 /* allocate kernel pages */ 464 for (page = first_page; page <= last_page; page++) { 465 /* first try to allocate from <4GB zone */ 466 struct page *p = alloc_page(GFP_KERNEL | GFP_DMA32 | 467 __GFP_NOWARN); 468 if (!p || (page_to_pfn(p) & ~(emu->dma_mask >> PAGE_SHIFT))) { 469 if (p) 470 __free_page(p); 471 /* try to allocate from <16MB zone */ 472 p = alloc_page(GFP_ATOMIC | GFP_DMA | 473 __GFP_NORETRY | /* no OOM-killer */ 474 __GFP_NOWARN); 475 } 476 if (!p) { 477 __synth_free_pages(emu, first_page, page - 1); 478 return -ENOMEM; 479 } 480 emu->page_addr_table[page] = page_to_phys(p); 481 emu->page_ptr_table[page] = page_address(p); 482 } 483 return 0; 484 } 485 486 /* 487 * free pages 488 */ 489 static int synth_free_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk) 490 { 491 int first_page, last_page; 492 493 get_single_page_range(emu->memhdr, blk, &first_page, &last_page); 494 __synth_free_pages(emu, first_page, last_page); 495 return 0; 496 } 497 498 /* calculate buffer pointer from offset address */ 499 static inline void *offset_ptr(struct snd_emu10k1 *emu, int page, int offset) 500 { 501 char *ptr; 502 if (snd_BUG_ON(page < 0 || page >= emu->max_cache_pages)) 503 return NULL; 504 ptr = emu->page_ptr_table[page]; 505 if (! ptr) { 506 printk(KERN_ERR "emu10k1: access to NULL ptr: page = %d\n", page); 507 return NULL; 508 } 509 ptr += offset & (PAGE_SIZE - 1); 510 return (void*)ptr; 511 } 512 513 /* 514 * bzero(blk + offset, size) 515 */ 516 int snd_emu10k1_synth_bzero(struct snd_emu10k1 *emu, struct snd_util_memblk *blk, 517 int offset, int size) 518 { 519 int page, nextofs, end_offset, temp, temp1; 520 void *ptr; 521 struct snd_emu10k1_memblk *p = (struct snd_emu10k1_memblk *)blk; 522 523 offset += blk->offset & (PAGE_SIZE - 1); 524 end_offset = offset + size; 525 page = get_aligned_page(offset); 526 do { 527 nextofs = aligned_page_offset(page + 1); 528 temp = nextofs - offset; 529 temp1 = end_offset - offset; 530 if (temp1 < temp) 531 temp = temp1; 532 ptr = offset_ptr(emu, page + p->first_page, offset); 533 if (ptr) 534 memset(ptr, 0, temp); 535 offset = nextofs; 536 page++; 537 } while (offset < end_offset); 538 return 0; 539 } 540 541 EXPORT_SYMBOL(snd_emu10k1_synth_bzero); 542 543 /* 544 * copy_from_user(blk + offset, data, size) 545 */ 546 int snd_emu10k1_synth_copy_from_user(struct snd_emu10k1 *emu, struct snd_util_memblk *blk, 547 int offset, const char __user *data, int size) 548 { 549 int page, nextofs, end_offset, temp, temp1; 550 void *ptr; 551 struct snd_emu10k1_memblk *p = (struct snd_emu10k1_memblk *)blk; 552 553 offset += blk->offset & (PAGE_SIZE - 1); 554 end_offset = offset + size; 555 page = get_aligned_page(offset); 556 do { 557 nextofs = aligned_page_offset(page + 1); 558 temp = nextofs - offset; 559 temp1 = end_offset - offset; 560 if (temp1 < temp) 561 temp = temp1; 562 ptr = offset_ptr(emu, page + p->first_page, offset); 563 if (ptr && copy_from_user(ptr, data, temp)) 564 return -EFAULT; 565 offset = nextofs; 566 data += temp; 567 page++; 568 } while (offset < end_offset); 569 return 0; 570 } 571 572 EXPORT_SYMBOL(snd_emu10k1_synth_copy_from_user); 573