1 /* 2 * Copyright (c) by Jaroslav Kysela <perex@perex.cz> 3 * Copyright (c) by Takashi Iwai <tiwai@suse.de> 4 * Copyright (c) by Scott McNab <sdm@fractalgraphics.com.au> 5 * 6 * Trident 4DWave-NX memory page allocation (TLB area) 7 * Trident chip can handle only 16MByte of the memory at the same time. 8 * 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 23 * 24 */ 25 26 #include <asm/io.h> 27 #include <linux/pci.h> 28 #include <linux/time.h> 29 #include <linux/mutex.h> 30 31 #include <sound/core.h> 32 #include <sound/trident.h> 33 34 /* page arguments of these two macros are Trident page (4096 bytes), not like 35 * aligned pages in others 36 */ 37 #define __set_tlb_bus(trident,page,ptr,addr) \ 38 do { (trident)->tlb.entries[page] = cpu_to_le32((addr) & ~(SNDRV_TRIDENT_PAGE_SIZE-1)); \ 39 (trident)->tlb.shadow_entries[page] = (ptr); } while (0) 40 #define __tlb_to_ptr(trident,page) \ 41 (void*)((trident)->tlb.shadow_entries[page]) 42 #define __tlb_to_addr(trident,page) \ 43 (dma_addr_t)le32_to_cpu((trident->tlb.entries[page]) & ~(SNDRV_TRIDENT_PAGE_SIZE - 1)) 44 45 #if PAGE_SIZE == 4096 46 /* page size == SNDRV_TRIDENT_PAGE_SIZE */ 47 #define ALIGN_PAGE_SIZE PAGE_SIZE /* minimum page size for allocation */ 48 #define MAX_ALIGN_PAGES SNDRV_TRIDENT_MAX_PAGES /* maxmium aligned pages */ 49 /* fill TLB entrie(s) corresponding to page with ptr */ 50 #define set_tlb_bus(trident,page,ptr,addr) __set_tlb_bus(trident,page,ptr,addr) 51 /* fill TLB entrie(s) corresponding to page with silence pointer */ 52 #define set_silent_tlb(trident,page) __set_tlb_bus(trident, page, (unsigned long)trident->tlb.silent_page.area, trident->tlb.silent_page.addr) 53 /* get aligned page from offset address */ 54 #define get_aligned_page(offset) ((offset) >> 12) 55 /* get offset address from aligned page */ 56 #define aligned_page_offset(page) ((page) << 12) 57 /* get buffer address from aligned page */ 58 #define page_to_ptr(trident,page) __tlb_to_ptr(trident, page) 59 /* get PCI physical address from aligned page */ 60 #define page_to_addr(trident,page) __tlb_to_addr(trident, page) 61 62 #elif PAGE_SIZE == 8192 63 /* page size == SNDRV_TRIDENT_PAGE_SIZE x 2*/ 64 #define ALIGN_PAGE_SIZE PAGE_SIZE 65 #define MAX_ALIGN_PAGES (SNDRV_TRIDENT_MAX_PAGES / 2) 66 #define get_aligned_page(offset) ((offset) >> 13) 67 #define aligned_page_offset(page) ((page) << 13) 68 #define page_to_ptr(trident,page) __tlb_to_ptr(trident, (page) << 1) 69 #define page_to_addr(trident,page) __tlb_to_addr(trident, (page) << 1) 70 71 /* fill TLB entries -- we need to fill two entries */ 72 static inline void set_tlb_bus(struct snd_trident *trident, int page, 73 unsigned long ptr, dma_addr_t addr) 74 { 75 page <<= 1; 76 __set_tlb_bus(trident, page, ptr, addr); 77 __set_tlb_bus(trident, page+1, ptr + SNDRV_TRIDENT_PAGE_SIZE, addr + SNDRV_TRIDENT_PAGE_SIZE); 78 } 79 static inline void set_silent_tlb(struct snd_trident *trident, int page) 80 { 81 page <<= 1; 82 __set_tlb_bus(trident, page, (unsigned long)trident->tlb.silent_page.area, trident->tlb.silent_page.addr); 83 __set_tlb_bus(trident, page+1, (unsigned long)trident->tlb.silent_page.area, trident->tlb.silent_page.addr); 84 } 85 86 #else 87 /* arbitrary size */ 88 #define UNIT_PAGES (PAGE_SIZE / SNDRV_TRIDENT_PAGE_SIZE) 89 #define ALIGN_PAGE_SIZE (SNDRV_TRIDENT_PAGE_SIZE * UNIT_PAGES) 90 #define MAX_ALIGN_PAGES (SNDRV_TRIDENT_MAX_PAGES / UNIT_PAGES) 91 /* Note: if alignment doesn't match to the maximum size, the last few blocks 92 * become unusable. To use such blocks, you'll need to check the validity 93 * of accessing page in set_tlb_bus and set_silent_tlb. search_empty() 94 * should also check it, too. 95 */ 96 #define get_aligned_page(offset) ((offset) / ALIGN_PAGE_SIZE) 97 #define aligned_page_offset(page) ((page) * ALIGN_PAGE_SIZE) 98 #define page_to_ptr(trident,page) __tlb_to_ptr(trident, (page) * UNIT_PAGES) 99 #define page_to_addr(trident,page) __tlb_to_addr(trident, (page) * UNIT_PAGES) 100 101 /* fill TLB entries -- UNIT_PAGES entries must be filled */ 102 static inline void set_tlb_bus(struct snd_trident *trident, int page, 103 unsigned long ptr, dma_addr_t addr) 104 { 105 int i; 106 page *= UNIT_PAGES; 107 for (i = 0; i < UNIT_PAGES; i++, page++) { 108 __set_tlb_bus(trident, page, ptr, addr); 109 ptr += SNDRV_TRIDENT_PAGE_SIZE; 110 addr += SNDRV_TRIDENT_PAGE_SIZE; 111 } 112 } 113 static inline void set_silent_tlb(struct snd_trident *trident, int page) 114 { 115 int i; 116 page *= UNIT_PAGES; 117 for (i = 0; i < UNIT_PAGES; i++, page++) 118 __set_tlb_bus(trident, page, (unsigned long)trident->tlb.silent_page.area, trident->tlb.silent_page.addr); 119 } 120 121 #endif /* PAGE_SIZE */ 122 123 /* calculate buffer pointer from offset address */ 124 static inline void *offset_ptr(struct snd_trident *trident, int offset) 125 { 126 char *ptr; 127 ptr = page_to_ptr(trident, get_aligned_page(offset)); 128 ptr += offset % ALIGN_PAGE_SIZE; 129 return (void*)ptr; 130 } 131 132 /* first and last (aligned) pages of memory block */ 133 #define firstpg(blk) (((struct snd_trident_memblk_arg *)snd_util_memblk_argptr(blk))->first_page) 134 #define lastpg(blk) (((struct snd_trident_memblk_arg *)snd_util_memblk_argptr(blk))->last_page) 135 136 /* 137 * search empty pages which may contain given size 138 */ 139 static struct snd_util_memblk * 140 search_empty(struct snd_util_memhdr *hdr, int size) 141 { 142 struct snd_util_memblk *blk, *prev; 143 int page, psize; 144 struct list_head *p; 145 146 psize = get_aligned_page(size + ALIGN_PAGE_SIZE -1); 147 prev = NULL; 148 page = 0; 149 list_for_each(p, &hdr->block) { 150 blk = list_entry(p, struct snd_util_memblk, list); 151 if (page + psize <= firstpg(blk)) 152 goto __found_pages; 153 page = lastpg(blk) + 1; 154 } 155 if (page + psize > MAX_ALIGN_PAGES) 156 return NULL; 157 158 __found_pages: 159 /* create a new memory block */ 160 blk = __snd_util_memblk_new(hdr, psize * ALIGN_PAGE_SIZE, p->prev); 161 if (blk == NULL) 162 return NULL; 163 blk->offset = aligned_page_offset(page); /* set aligned offset */ 164 firstpg(blk) = page; 165 lastpg(blk) = page + psize - 1; 166 return blk; 167 } 168 169 170 /* 171 * check if the given pointer is valid for pages 172 */ 173 static int is_valid_page(unsigned long ptr) 174 { 175 if (ptr & ~0x3fffffffUL) { 176 snd_printk(KERN_ERR "max memory size is 1GB!!\n"); 177 return 0; 178 } 179 if (ptr & (SNDRV_TRIDENT_PAGE_SIZE-1)) { 180 snd_printk(KERN_ERR "page is not aligned\n"); 181 return 0; 182 } 183 return 1; 184 } 185 186 /* 187 * page allocation for DMA (Scatter-Gather version) 188 */ 189 static struct snd_util_memblk * 190 snd_trident_alloc_sg_pages(struct snd_trident *trident, 191 struct snd_pcm_substream *substream) 192 { 193 struct snd_util_memhdr *hdr; 194 struct snd_util_memblk *blk; 195 struct snd_pcm_runtime *runtime = substream->runtime; 196 int idx, page; 197 struct snd_sg_buf *sgbuf = snd_pcm_substream_sgbuf(substream); 198 199 snd_assert(runtime->dma_bytes > 0 && runtime->dma_bytes <= SNDRV_TRIDENT_MAX_PAGES * SNDRV_TRIDENT_PAGE_SIZE, return NULL); 200 hdr = trident->tlb.memhdr; 201 snd_assert(hdr != NULL, return NULL); 202 203 204 205 mutex_lock(&hdr->block_mutex); 206 blk = search_empty(hdr, runtime->dma_bytes); 207 if (blk == NULL) { 208 mutex_unlock(&hdr->block_mutex); 209 return NULL; 210 } 211 if (lastpg(blk) - firstpg(blk) >= sgbuf->pages) { 212 snd_printk(KERN_ERR "page calculation doesn't match: allocated pages = %d, trident = %d/%d\n", sgbuf->pages, firstpg(blk), lastpg(blk)); 213 __snd_util_mem_free(hdr, blk); 214 mutex_unlock(&hdr->block_mutex); 215 return NULL; 216 } 217 218 /* set TLB entries */ 219 idx = 0; 220 for (page = firstpg(blk); page <= lastpg(blk); page++, idx++) { 221 dma_addr_t addr = sgbuf->table[idx].addr; 222 unsigned long ptr = (unsigned long)sgbuf->table[idx].buf; 223 if (! is_valid_page(addr)) { 224 __snd_util_mem_free(hdr, blk); 225 mutex_unlock(&hdr->block_mutex); 226 return NULL; 227 } 228 set_tlb_bus(trident, page, ptr, addr); 229 } 230 mutex_unlock(&hdr->block_mutex); 231 return blk; 232 } 233 234 /* 235 * page allocation for DMA (contiguous version) 236 */ 237 static struct snd_util_memblk * 238 snd_trident_alloc_cont_pages(struct snd_trident *trident, 239 struct snd_pcm_substream *substream) 240 { 241 struct snd_util_memhdr *hdr; 242 struct snd_util_memblk *blk; 243 int page; 244 struct snd_pcm_runtime *runtime = substream->runtime; 245 dma_addr_t addr; 246 unsigned long ptr; 247 248 snd_assert(runtime->dma_bytes> 0 && runtime->dma_bytes <= SNDRV_TRIDENT_MAX_PAGES * SNDRV_TRIDENT_PAGE_SIZE, return NULL); 249 hdr = trident->tlb.memhdr; 250 snd_assert(hdr != NULL, return NULL); 251 252 mutex_lock(&hdr->block_mutex); 253 blk = search_empty(hdr, runtime->dma_bytes); 254 if (blk == NULL) { 255 mutex_unlock(&hdr->block_mutex); 256 return NULL; 257 } 258 259 /* set TLB entries */ 260 addr = runtime->dma_addr; 261 ptr = (unsigned long)runtime->dma_area; 262 for (page = firstpg(blk); page <= lastpg(blk); page++, 263 ptr += SNDRV_TRIDENT_PAGE_SIZE, addr += SNDRV_TRIDENT_PAGE_SIZE) { 264 if (! is_valid_page(addr)) { 265 __snd_util_mem_free(hdr, blk); 266 mutex_unlock(&hdr->block_mutex); 267 return NULL; 268 } 269 set_tlb_bus(trident, page, ptr, addr); 270 } 271 mutex_unlock(&hdr->block_mutex); 272 return blk; 273 } 274 275 /* 276 * page allocation for DMA 277 */ 278 struct snd_util_memblk * 279 snd_trident_alloc_pages(struct snd_trident *trident, 280 struct snd_pcm_substream *substream) 281 { 282 snd_assert(trident != NULL, return NULL); 283 snd_assert(substream != NULL, return NULL); 284 if (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV_SG) 285 return snd_trident_alloc_sg_pages(trident, substream); 286 else 287 return snd_trident_alloc_cont_pages(trident, substream); 288 } 289 290 291 /* 292 * release DMA buffer from page table 293 */ 294 int snd_trident_free_pages(struct snd_trident *trident, 295 struct snd_util_memblk *blk) 296 { 297 struct snd_util_memhdr *hdr; 298 int page; 299 300 snd_assert(trident != NULL, return -EINVAL); 301 snd_assert(blk != NULL, return -EINVAL); 302 303 hdr = trident->tlb.memhdr; 304 mutex_lock(&hdr->block_mutex); 305 /* reset TLB entries */ 306 for (page = firstpg(blk); page <= lastpg(blk); page++) 307 set_silent_tlb(trident, page); 308 /* free memory block */ 309 __snd_util_mem_free(hdr, blk); 310 mutex_unlock(&hdr->block_mutex); 311 return 0; 312 } 313 314 315 /*---------------------------------------------------------------- 316 * memory allocation using multiple pages (for synth) 317 *---------------------------------------------------------------- 318 * Unlike the DMA allocation above, non-contiguous pages are 319 * assigned to TLB. 320 *----------------------------------------------------------------*/ 321 322 /* 323 */ 324 static int synth_alloc_pages(struct snd_trident *hw, struct snd_util_memblk *blk); 325 static int synth_free_pages(struct snd_trident *hw, struct snd_util_memblk *blk); 326 327 /* 328 * allocate a synth sample area 329 */ 330 struct snd_util_memblk * 331 snd_trident_synth_alloc(struct snd_trident *hw, unsigned int size) 332 { 333 struct snd_util_memblk *blk; 334 struct snd_util_memhdr *hdr = hw->tlb.memhdr; 335 336 mutex_lock(&hdr->block_mutex); 337 blk = __snd_util_mem_alloc(hdr, size); 338 if (blk == NULL) { 339 mutex_unlock(&hdr->block_mutex); 340 return NULL; 341 } 342 if (synth_alloc_pages(hw, blk)) { 343 __snd_util_mem_free(hdr, blk); 344 mutex_unlock(&hdr->block_mutex); 345 return NULL; 346 } 347 mutex_unlock(&hdr->block_mutex); 348 return blk; 349 } 350 351 EXPORT_SYMBOL(snd_trident_synth_alloc); 352 353 /* 354 * free a synth sample area 355 */ 356 int 357 snd_trident_synth_free(struct snd_trident *hw, struct snd_util_memblk *blk) 358 { 359 struct snd_util_memhdr *hdr = hw->tlb.memhdr; 360 361 mutex_lock(&hdr->block_mutex); 362 synth_free_pages(hw, blk); 363 __snd_util_mem_free(hdr, blk); 364 mutex_unlock(&hdr->block_mutex); 365 return 0; 366 } 367 368 EXPORT_SYMBOL(snd_trident_synth_free); 369 370 /* 371 * reset TLB entry and free kernel page 372 */ 373 static void clear_tlb(struct snd_trident *trident, int page) 374 { 375 void *ptr = page_to_ptr(trident, page); 376 dma_addr_t addr = page_to_addr(trident, page); 377 set_silent_tlb(trident, page); 378 if (ptr) { 379 struct snd_dma_buffer dmab; 380 dmab.dev.type = SNDRV_DMA_TYPE_DEV; 381 dmab.dev.dev = snd_dma_pci_data(trident->pci); 382 dmab.area = ptr; 383 dmab.addr = addr; 384 dmab.bytes = ALIGN_PAGE_SIZE; 385 snd_dma_free_pages(&dmab); 386 } 387 } 388 389 /* check new allocation range */ 390 static void get_single_page_range(struct snd_util_memhdr *hdr, 391 struct snd_util_memblk *blk, 392 int *first_page_ret, int *last_page_ret) 393 { 394 struct list_head *p; 395 struct snd_util_memblk *q; 396 int first_page, last_page; 397 first_page = firstpg(blk); 398 if ((p = blk->list.prev) != &hdr->block) { 399 q = list_entry(p, struct snd_util_memblk, list); 400 if (lastpg(q) == first_page) 401 first_page++; /* first page was already allocated */ 402 } 403 last_page = lastpg(blk); 404 if ((p = blk->list.next) != &hdr->block) { 405 q = list_entry(p, struct snd_util_memblk, list); 406 if (firstpg(q) == last_page) 407 last_page--; /* last page was already allocated */ 408 } 409 *first_page_ret = first_page; 410 *last_page_ret = last_page; 411 } 412 413 /* 414 * allocate kernel pages and assign them to TLB 415 */ 416 static int synth_alloc_pages(struct snd_trident *hw, struct snd_util_memblk *blk) 417 { 418 int page, first_page, last_page; 419 struct snd_dma_buffer dmab; 420 421 firstpg(blk) = get_aligned_page(blk->offset); 422 lastpg(blk) = get_aligned_page(blk->offset + blk->size - 1); 423 get_single_page_range(hw->tlb.memhdr, blk, &first_page, &last_page); 424 425 /* allocate a kernel page for each Trident page - 426 * fortunately Trident page size and kernel PAGE_SIZE is identical! 427 */ 428 for (page = first_page; page <= last_page; page++) { 429 if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(hw->pci), 430 ALIGN_PAGE_SIZE, &dmab) < 0) 431 goto __fail; 432 if (! is_valid_page(dmab.addr)) { 433 snd_dma_free_pages(&dmab); 434 goto __fail; 435 } 436 set_tlb_bus(hw, page, (unsigned long)dmab.area, dmab.addr); 437 } 438 return 0; 439 440 __fail: 441 /* release allocated pages */ 442 last_page = page - 1; 443 for (page = first_page; page <= last_page; page++) 444 clear_tlb(hw, page); 445 446 return -ENOMEM; 447 } 448 449 /* 450 * free pages 451 */ 452 static int synth_free_pages(struct snd_trident *trident, struct snd_util_memblk *blk) 453 { 454 int page, first_page, last_page; 455 456 get_single_page_range(trident->tlb.memhdr, blk, &first_page, &last_page); 457 for (page = first_page; page <= last_page; page++) 458 clear_tlb(trident, page); 459 460 return 0; 461 } 462 463 /* 464 * copy_from_user(blk + offset, data, size) 465 */ 466 int snd_trident_synth_copy_from_user(struct snd_trident *trident, 467 struct snd_util_memblk *blk, 468 int offset, const char __user *data, int size) 469 { 470 int page, nextofs, end_offset, temp, temp1; 471 472 offset += blk->offset; 473 end_offset = offset + size; 474 page = get_aligned_page(offset) + 1; 475 do { 476 nextofs = aligned_page_offset(page); 477 temp = nextofs - offset; 478 temp1 = end_offset - offset; 479 if (temp1 < temp) 480 temp = temp1; 481 if (copy_from_user(offset_ptr(trident, offset), data, temp)) 482 return -EFAULT; 483 offset = nextofs; 484 data += temp; 485 page++; 486 } while (offset < end_offset); 487 return 0; 488 } 489 490 EXPORT_SYMBOL(snd_trident_synth_copy_from_user); 491