1 /* 2 * Copyright (c) by Jaroslav Kysela <perex@suse.cz> 3 * Copyright (c) by Takashi Iwai <tiwai@suse.de> 4 * 5 * EMU10K1 memory page allocation (PTB area) 6 * 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 21 * 22 */ 23 24 #include <sound/driver.h> 25 #include <linux/pci.h> 26 #include <linux/time.h> 27 #include <sound/core.h> 28 #include <sound/emu10k1.h> 29 30 /* page arguments of these two macros are Emu page (4096 bytes), not like 31 * aligned pages in others 32 */ 33 #define __set_ptb_entry(emu,page,addr) \ 34 (((u32 *)(emu)->ptb_pages.area)[page] = cpu_to_le32(((addr) << 1) | (page))) 35 36 #define UNIT_PAGES (PAGE_SIZE / EMUPAGESIZE) 37 #define MAX_ALIGN_PAGES (MAXPAGES / UNIT_PAGES) 38 /* get aligned page from offset address */ 39 #define get_aligned_page(offset) ((offset) >> PAGE_SHIFT) 40 /* get offset address from aligned page */ 41 #define aligned_page_offset(page) ((page) << PAGE_SHIFT) 42 43 #if PAGE_SIZE == 4096 44 /* page size == EMUPAGESIZE */ 45 /* fill PTB entrie(s) corresponding to page with addr */ 46 #define set_ptb_entry(emu,page,addr) __set_ptb_entry(emu,page,addr) 47 /* fill PTB entrie(s) corresponding to page with silence pointer */ 48 #define set_silent_ptb(emu,page) __set_ptb_entry(emu,page,emu->silent_page.addr) 49 #else 50 /* fill PTB entries -- we need to fill UNIT_PAGES entries */ 51 static inline void set_ptb_entry(emu10k1_t *emu, int page, dma_addr_t addr) 52 { 53 int i; 54 page *= UNIT_PAGES; 55 for (i = 0; i < UNIT_PAGES; i++, page++) { 56 __set_ptb_entry(emu, page, addr); 57 addr += EMUPAGESIZE; 58 } 59 } 60 static inline void set_silent_ptb(emu10k1_t *emu, int page) 61 { 62 int i; 63 page *= UNIT_PAGES; 64 for (i = 0; i < UNIT_PAGES; i++, page++) 65 /* do not increment ptr */ 66 __set_ptb_entry(emu, page, emu->silent_page.addr); 67 } 68 #endif /* PAGE_SIZE */ 69 70 71 /* 72 */ 73 static int synth_alloc_pages(emu10k1_t *hw, emu10k1_memblk_t *blk); 74 static int synth_free_pages(emu10k1_t *hw, emu10k1_memblk_t *blk); 75 76 #define get_emu10k1_memblk(l,member) list_entry(l, emu10k1_memblk_t, member) 77 78 79 /* initialize emu10k1 part */ 80 static void emu10k1_memblk_init(emu10k1_memblk_t *blk) 81 { 82 blk->mapped_page = -1; 83 INIT_LIST_HEAD(&blk->mapped_link); 84 INIT_LIST_HEAD(&blk->mapped_order_link); 85 blk->map_locked = 0; 86 87 blk->first_page = get_aligned_page(blk->mem.offset); 88 blk->last_page = get_aligned_page(blk->mem.offset + blk->mem.size - 1); 89 blk->pages = blk->last_page - blk->first_page + 1; 90 } 91 92 /* 93 * search empty region on PTB with the given size 94 * 95 * if an empty region is found, return the page and store the next mapped block 96 * in nextp 97 * if not found, return a negative error code. 98 */ 99 static int search_empty_map_area(emu10k1_t *emu, int npages, struct list_head **nextp) 100 { 101 int page = 0, found_page = -ENOMEM; 102 int max_size = npages; 103 int size; 104 struct list_head *candidate = &emu->mapped_link_head; 105 struct list_head *pos; 106 107 list_for_each (pos, &emu->mapped_link_head) { 108 emu10k1_memblk_t *blk = get_emu10k1_memblk(pos, mapped_link); 109 snd_assert(blk->mapped_page >= 0, continue); 110 size = blk->mapped_page - page; 111 if (size == npages) { 112 *nextp = pos; 113 return page; 114 } 115 else if (size > max_size) { 116 /* we look for the maximum empty hole */ 117 max_size = size; 118 candidate = pos; 119 found_page = page; 120 } 121 page = blk->mapped_page + blk->pages; 122 } 123 size = MAX_ALIGN_PAGES - page; 124 if (size >= max_size) { 125 *nextp = pos; 126 return page; 127 } 128 *nextp = candidate; 129 return found_page; 130 } 131 132 /* 133 * map a memory block onto emu10k1's PTB 134 * 135 * call with memblk_lock held 136 */ 137 static int map_memblk(emu10k1_t *emu, emu10k1_memblk_t *blk) 138 { 139 int page, pg; 140 struct list_head *next; 141 142 page = search_empty_map_area(emu, blk->pages, &next); 143 if (page < 0) /* not found */ 144 return page; 145 /* insert this block in the proper position of mapped list */ 146 list_add_tail(&blk->mapped_link, next); 147 /* append this as a newest block in order list */ 148 list_add_tail(&blk->mapped_order_link, &emu->mapped_order_link_head); 149 blk->mapped_page = page; 150 /* fill PTB */ 151 for (pg = blk->first_page; pg <= blk->last_page; pg++) { 152 set_ptb_entry(emu, page, emu->page_addr_table[pg]); 153 page++; 154 } 155 return 0; 156 } 157 158 /* 159 * unmap the block 160 * return the size of resultant empty pages 161 * 162 * call with memblk_lock held 163 */ 164 static int unmap_memblk(emu10k1_t *emu, emu10k1_memblk_t *blk) 165 { 166 int start_page, end_page, mpage, pg; 167 struct list_head *p; 168 emu10k1_memblk_t *q; 169 170 /* calculate the expected size of empty region */ 171 if ((p = blk->mapped_link.prev) != &emu->mapped_link_head) { 172 q = get_emu10k1_memblk(p, mapped_link); 173 start_page = q->mapped_page + q->pages; 174 } else 175 start_page = 0; 176 if ((p = blk->mapped_link.next) != &emu->mapped_link_head) { 177 q = get_emu10k1_memblk(p, mapped_link); 178 end_page = q->mapped_page; 179 } else 180 end_page = MAX_ALIGN_PAGES; 181 182 /* remove links */ 183 list_del(&blk->mapped_link); 184 list_del(&blk->mapped_order_link); 185 /* clear PTB */ 186 mpage = blk->mapped_page; 187 for (pg = blk->first_page; pg <= blk->last_page; pg++) { 188 set_silent_ptb(emu, mpage); 189 mpage++; 190 } 191 blk->mapped_page = -1; 192 return end_page - start_page; /* return the new empty size */ 193 } 194 195 /* 196 * search empty pages with the given size, and create a memory block 197 * 198 * unlike synth_alloc the memory block is aligned to the page start 199 */ 200 static emu10k1_memblk_t * 201 search_empty(emu10k1_t *emu, int size) 202 { 203 struct list_head *p; 204 emu10k1_memblk_t *blk; 205 int page, psize; 206 207 psize = get_aligned_page(size + PAGE_SIZE -1); 208 page = 0; 209 list_for_each(p, &emu->memhdr->block) { 210 blk = get_emu10k1_memblk(p, mem.list); 211 if (page + psize <= blk->first_page) 212 goto __found_pages; 213 page = blk->last_page + 1; 214 } 215 if (page + psize > emu->max_cache_pages) 216 return NULL; 217 218 __found_pages: 219 /* create a new memory block */ 220 blk = (emu10k1_memblk_t *)__snd_util_memblk_new(emu->memhdr, psize << PAGE_SHIFT, p->prev); 221 if (blk == NULL) 222 return NULL; 223 blk->mem.offset = aligned_page_offset(page); /* set aligned offset */ 224 emu10k1_memblk_init(blk); 225 return blk; 226 } 227 228 229 /* 230 * check if the given pointer is valid for pages 231 */ 232 static int is_valid_page(emu10k1_t *emu, dma_addr_t addr) 233 { 234 if (addr & ~emu->dma_mask) { 235 snd_printk("max memory size is 0x%lx (addr = 0x%lx)!!\n", emu->dma_mask, (unsigned long)addr); 236 return 0; 237 } 238 if (addr & (EMUPAGESIZE-1)) { 239 snd_printk("page is not aligned\n"); 240 return 0; 241 } 242 return 1; 243 } 244 245 /* 246 * map the given memory block on PTB. 247 * if the block is already mapped, update the link order. 248 * if no empty pages are found, tries to release unsed memory blocks 249 * and retry the mapping. 250 */ 251 int snd_emu10k1_memblk_map(emu10k1_t *emu, emu10k1_memblk_t *blk) 252 { 253 int err; 254 int size; 255 struct list_head *p, *nextp; 256 emu10k1_memblk_t *deleted; 257 unsigned long flags; 258 259 spin_lock_irqsave(&emu->memblk_lock, flags); 260 if (blk->mapped_page >= 0) { 261 /* update order link */ 262 list_del(&blk->mapped_order_link); 263 list_add_tail(&blk->mapped_order_link, &emu->mapped_order_link_head); 264 spin_unlock_irqrestore(&emu->memblk_lock, flags); 265 return 0; 266 } 267 if ((err = map_memblk(emu, blk)) < 0) { 268 /* no enough page - try to unmap some blocks */ 269 /* starting from the oldest block */ 270 p = emu->mapped_order_link_head.next; 271 for (; p != &emu->mapped_order_link_head; p = nextp) { 272 nextp = p->next; 273 deleted = get_emu10k1_memblk(p, mapped_order_link); 274 if (deleted->map_locked) 275 continue; 276 size = unmap_memblk(emu, deleted); 277 if (size >= blk->pages) { 278 /* ok the empty region is enough large */ 279 err = map_memblk(emu, blk); 280 break; 281 } 282 } 283 } 284 spin_unlock_irqrestore(&emu->memblk_lock, flags); 285 return err; 286 } 287 288 /* 289 * page allocation for DMA 290 */ 291 snd_util_memblk_t * 292 snd_emu10k1_alloc_pages(emu10k1_t *emu, snd_pcm_substream_t *substream) 293 { 294 snd_pcm_runtime_t *runtime = substream->runtime; 295 struct snd_sg_buf *sgbuf = snd_pcm_substream_sgbuf(substream); 296 snd_util_memhdr_t *hdr; 297 emu10k1_memblk_t *blk; 298 int page, err, idx; 299 300 snd_assert(emu, return NULL); 301 snd_assert(runtime->dma_bytes > 0 && runtime->dma_bytes < MAXPAGES * EMUPAGESIZE, return NULL); 302 hdr = emu->memhdr; 303 snd_assert(hdr, return NULL); 304 305 down(&hdr->block_mutex); 306 blk = search_empty(emu, runtime->dma_bytes); 307 if (blk == NULL) { 308 up(&hdr->block_mutex); 309 return NULL; 310 } 311 /* fill buffer addresses but pointers are not stored so that 312 * snd_free_pci_page() is not called in in synth_free() 313 */ 314 idx = 0; 315 for (page = blk->first_page; page <= blk->last_page; page++, idx++) { 316 dma_addr_t addr; 317 #ifdef CONFIG_SND_DEBUG 318 if (idx >= sgbuf->pages) { 319 printk(KERN_ERR "emu: pages overflow! (%d-%d) for %d\n", 320 blk->first_page, blk->last_page, sgbuf->pages); 321 up(&hdr->block_mutex); 322 return NULL; 323 } 324 #endif 325 addr = sgbuf->table[idx].addr; 326 if (! is_valid_page(emu, addr)) { 327 printk(KERN_ERR "emu: failure page = %d\n", idx); 328 up(&hdr->block_mutex); 329 return NULL; 330 } 331 emu->page_addr_table[page] = addr; 332 emu->page_ptr_table[page] = NULL; 333 } 334 335 /* set PTB entries */ 336 blk->map_locked = 1; /* do not unmap this block! */ 337 err = snd_emu10k1_memblk_map(emu, blk); 338 if (err < 0) { 339 __snd_util_mem_free(hdr, (snd_util_memblk_t *)blk); 340 up(&hdr->block_mutex); 341 return NULL; 342 } 343 up(&hdr->block_mutex); 344 return (snd_util_memblk_t *)blk; 345 } 346 347 348 /* 349 * release DMA buffer from page table 350 */ 351 int snd_emu10k1_free_pages(emu10k1_t *emu, snd_util_memblk_t *blk) 352 { 353 snd_assert(emu && blk, return -EINVAL); 354 return snd_emu10k1_synth_free(emu, blk); 355 } 356 357 358 /* 359 * memory allocation using multiple pages (for synth) 360 * Unlike the DMA allocation above, non-contiguous pages are assined. 361 */ 362 363 /* 364 * allocate a synth sample area 365 */ 366 snd_util_memblk_t * 367 snd_emu10k1_synth_alloc(emu10k1_t *hw, unsigned int size) 368 { 369 emu10k1_memblk_t *blk; 370 snd_util_memhdr_t *hdr = hw->memhdr; 371 372 down(&hdr->block_mutex); 373 blk = (emu10k1_memblk_t *)__snd_util_mem_alloc(hdr, size); 374 if (blk == NULL) { 375 up(&hdr->block_mutex); 376 return NULL; 377 } 378 if (synth_alloc_pages(hw, blk)) { 379 __snd_util_mem_free(hdr, (snd_util_memblk_t *)blk); 380 up(&hdr->block_mutex); 381 return NULL; 382 } 383 snd_emu10k1_memblk_map(hw, blk); 384 up(&hdr->block_mutex); 385 return (snd_util_memblk_t *)blk; 386 } 387 388 389 /* 390 * free a synth sample area 391 */ 392 int 393 snd_emu10k1_synth_free(emu10k1_t *emu, snd_util_memblk_t *memblk) 394 { 395 snd_util_memhdr_t *hdr = emu->memhdr; 396 emu10k1_memblk_t *blk = (emu10k1_memblk_t *)memblk; 397 unsigned long flags; 398 399 down(&hdr->block_mutex); 400 spin_lock_irqsave(&emu->memblk_lock, flags); 401 if (blk->mapped_page >= 0) 402 unmap_memblk(emu, blk); 403 spin_unlock_irqrestore(&emu->memblk_lock, flags); 404 synth_free_pages(emu, blk); 405 __snd_util_mem_free(hdr, memblk); 406 up(&hdr->block_mutex); 407 return 0; 408 } 409 410 411 /* check new allocation range */ 412 static void get_single_page_range(snd_util_memhdr_t *hdr, emu10k1_memblk_t *blk, int *first_page_ret, int *last_page_ret) 413 { 414 struct list_head *p; 415 emu10k1_memblk_t *q; 416 int first_page, last_page; 417 first_page = blk->first_page; 418 if ((p = blk->mem.list.prev) != &hdr->block) { 419 q = get_emu10k1_memblk(p, mem.list); 420 if (q->last_page == first_page) 421 first_page++; /* first page was already allocated */ 422 } 423 last_page = blk->last_page; 424 if ((p = blk->mem.list.next) != &hdr->block) { 425 q = get_emu10k1_memblk(p, mem.list); 426 if (q->first_page == last_page) 427 last_page--; /* last page was already allocated */ 428 } 429 *first_page_ret = first_page; 430 *last_page_ret = last_page; 431 } 432 433 /* 434 * allocate kernel pages 435 */ 436 static int synth_alloc_pages(emu10k1_t *emu, emu10k1_memblk_t *blk) 437 { 438 int page, first_page, last_page; 439 struct snd_dma_buffer dmab; 440 441 emu10k1_memblk_init(blk); 442 get_single_page_range(emu->memhdr, blk, &first_page, &last_page); 443 /* allocate kernel pages */ 444 for (page = first_page; page <= last_page; page++) { 445 if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(emu->pci), 446 PAGE_SIZE, &dmab) < 0) 447 goto __fail; 448 if (! is_valid_page(emu, dmab.addr)) { 449 snd_dma_free_pages(&dmab); 450 goto __fail; 451 } 452 emu->page_addr_table[page] = dmab.addr; 453 emu->page_ptr_table[page] = dmab.area; 454 } 455 return 0; 456 457 __fail: 458 /* release allocated pages */ 459 last_page = page - 1; 460 for (page = first_page; page <= last_page; page++) { 461 dmab.area = emu->page_ptr_table[page]; 462 dmab.addr = emu->page_addr_table[page]; 463 dmab.bytes = PAGE_SIZE; 464 snd_dma_free_pages(&dmab); 465 emu->page_addr_table[page] = 0; 466 emu->page_ptr_table[page] = NULL; 467 } 468 469 return -ENOMEM; 470 } 471 472 /* 473 * free pages 474 */ 475 static int synth_free_pages(emu10k1_t *emu, emu10k1_memblk_t *blk) 476 { 477 int page, first_page, last_page; 478 struct snd_dma_buffer dmab; 479 480 get_single_page_range(emu->memhdr, blk, &first_page, &last_page); 481 dmab.dev.type = SNDRV_DMA_TYPE_DEV; 482 dmab.dev.dev = snd_dma_pci_data(emu->pci); 483 for (page = first_page; page <= last_page; page++) { 484 if (emu->page_ptr_table[page] == NULL) 485 continue; 486 dmab.area = emu->page_ptr_table[page]; 487 dmab.addr = emu->page_addr_table[page]; 488 dmab.bytes = PAGE_SIZE; 489 snd_dma_free_pages(&dmab); 490 emu->page_addr_table[page] = 0; 491 emu->page_ptr_table[page] = NULL; 492 } 493 494 return 0; 495 } 496 497 /* calculate buffer pointer from offset address */ 498 inline static void *offset_ptr(emu10k1_t *emu, int page, int offset) 499 { 500 char *ptr; 501 snd_assert(page >= 0 && page < emu->max_cache_pages, return NULL); 502 ptr = emu->page_ptr_table[page]; 503 if (! ptr) { 504 printk("emu10k1: access to NULL ptr: page = %d\n", page); 505 return NULL; 506 } 507 ptr += offset & (PAGE_SIZE - 1); 508 return (void*)ptr; 509 } 510 511 /* 512 * bzero(blk + offset, size) 513 */ 514 int snd_emu10k1_synth_bzero(emu10k1_t *emu, snd_util_memblk_t *blk, int offset, int size) 515 { 516 int page, nextofs, end_offset, temp, temp1; 517 void *ptr; 518 emu10k1_memblk_t *p = (emu10k1_memblk_t *)blk; 519 520 offset += blk->offset & (PAGE_SIZE - 1); 521 end_offset = offset + size; 522 page = get_aligned_page(offset); 523 do { 524 nextofs = aligned_page_offset(page + 1); 525 temp = nextofs - offset; 526 temp1 = end_offset - offset; 527 if (temp1 < temp) 528 temp = temp1; 529 ptr = offset_ptr(emu, page + p->first_page, offset); 530 if (ptr) 531 memset(ptr, 0, temp); 532 offset = nextofs; 533 page++; 534 } while (offset < end_offset); 535 return 0; 536 } 537 538 /* 539 * copy_from_user(blk + offset, data, size) 540 */ 541 int snd_emu10k1_synth_copy_from_user(emu10k1_t *emu, snd_util_memblk_t *blk, int offset, const char __user *data, int size) 542 { 543 int page, nextofs, end_offset, temp, temp1; 544 void *ptr; 545 emu10k1_memblk_t *p = (emu10k1_memblk_t *)blk; 546 547 offset += blk->offset & (PAGE_SIZE - 1); 548 end_offset = offset + size; 549 page = get_aligned_page(offset); 550 do { 551 nextofs = aligned_page_offset(page + 1); 552 temp = nextofs - offset; 553 temp1 = end_offset - offset; 554 if (temp1 < temp) 555 temp = temp1; 556 ptr = offset_ptr(emu, page + p->first_page, offset); 557 if (ptr && copy_from_user(ptr, data, temp)) 558 return -EFAULT; 559 offset = nextofs; 560 data += temp; 561 page++; 562 } while (offset < end_offset); 563 return 0; 564 } 565