1 /* 2 * SPU file system -- file contents 3 * 4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 5 * 6 * Author: Arnd Bergmann <arndb@de.ibm.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2, or (at your option) 11 * any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 */ 22 23 #undef DEBUG 24 25 #include <linux/fs.h> 26 #include <linux/ioctl.h> 27 #include <linux/module.h> 28 #include <linux/pagemap.h> 29 #include <linux/poll.h> 30 #include <linux/ptrace.h> 31 #include <linux/seq_file.h> 32 #include <linux/marker.h> 33 34 #include <asm/io.h> 35 #include <asm/time.h> 36 #include <asm/spu.h> 37 #include <asm/spu_info.h> 38 #include <asm/uaccess.h> 39 40 #include "spufs.h" 41 42 #define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000) 43 44 /* Simple attribute files */ 45 struct spufs_attr { 46 int (*get)(void *, u64 *); 47 int (*set)(void *, u64); 48 char get_buf[24]; /* enough to store a u64 and "\n\0" */ 49 char set_buf[24]; 50 void *data; 51 const char *fmt; /* format for read operation */ 52 struct mutex mutex; /* protects access to these buffers */ 53 }; 54 55 static int spufs_attr_open(struct inode *inode, struct file *file, 56 int (*get)(void *, u64 *), int (*set)(void *, u64), 57 const char *fmt) 58 { 59 struct spufs_attr *attr; 60 61 attr = kmalloc(sizeof(*attr), GFP_KERNEL); 62 if (!attr) 63 return -ENOMEM; 64 65 attr->get = get; 66 attr->set = set; 67 attr->data = inode->i_private; 68 attr->fmt = fmt; 69 mutex_init(&attr->mutex); 70 file->private_data = attr; 71 72 return nonseekable_open(inode, file); 73 } 74 75 static int spufs_attr_release(struct inode *inode, struct file *file) 76 { 77 kfree(file->private_data); 78 return 0; 79 } 80 81 static ssize_t spufs_attr_read(struct file *file, char __user *buf, 82 size_t len, loff_t *ppos) 83 { 84 struct spufs_attr *attr; 85 size_t size; 86 ssize_t ret; 87 88 attr = file->private_data; 89 if (!attr->get) 90 return -EACCES; 91 92 ret = mutex_lock_interruptible(&attr->mutex); 93 if (ret) 94 return ret; 95 96 if (*ppos) { /* continued read */ 97 size = strlen(attr->get_buf); 98 } else { /* first read */ 99 u64 val; 100 ret = attr->get(attr->data, &val); 101 if (ret) 102 goto out; 103 104 size = scnprintf(attr->get_buf, sizeof(attr->get_buf), 105 attr->fmt, (unsigned long long)val); 106 } 107 108 ret = simple_read_from_buffer(buf, len, ppos, attr->get_buf, size); 109 out: 110 mutex_unlock(&attr->mutex); 111 return ret; 112 } 113 114 static ssize_t spufs_attr_write(struct file *file, const char __user *buf, 115 size_t len, loff_t *ppos) 116 { 117 struct spufs_attr *attr; 118 u64 val; 119 size_t size; 120 ssize_t ret; 121 122 attr = file->private_data; 123 if (!attr->set) 124 return -EACCES; 125 126 ret = mutex_lock_interruptible(&attr->mutex); 127 if (ret) 128 return ret; 129 130 ret = -EFAULT; 131 size = min(sizeof(attr->set_buf) - 1, len); 132 if (copy_from_user(attr->set_buf, buf, size)) 133 goto out; 134 135 ret = len; /* claim we got the whole input */ 136 attr->set_buf[size] = '\0'; 137 val = simple_strtol(attr->set_buf, NULL, 0); 138 attr->set(attr->data, val); 139 out: 140 mutex_unlock(&attr->mutex); 141 return ret; 142 } 143 144 #define DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \ 145 static int __fops ## _open(struct inode *inode, struct file *file) \ 146 { \ 147 __simple_attr_check_format(__fmt, 0ull); \ 148 return spufs_attr_open(inode, file, __get, __set, __fmt); \ 149 } \ 150 static struct file_operations __fops = { \ 151 .owner = THIS_MODULE, \ 152 .open = __fops ## _open, \ 153 .release = spufs_attr_release, \ 154 .read = spufs_attr_read, \ 155 .write = spufs_attr_write, \ 156 }; 157 158 159 static int 160 spufs_mem_open(struct inode *inode, struct file *file) 161 { 162 struct spufs_inode_info *i = SPUFS_I(inode); 163 struct spu_context *ctx = i->i_ctx; 164 165 mutex_lock(&ctx->mapping_lock); 166 file->private_data = ctx; 167 if (!i->i_openers++) 168 ctx->local_store = inode->i_mapping; 169 mutex_unlock(&ctx->mapping_lock); 170 return 0; 171 } 172 173 static int 174 spufs_mem_release(struct inode *inode, struct file *file) 175 { 176 struct spufs_inode_info *i = SPUFS_I(inode); 177 struct spu_context *ctx = i->i_ctx; 178 179 mutex_lock(&ctx->mapping_lock); 180 if (!--i->i_openers) 181 ctx->local_store = NULL; 182 mutex_unlock(&ctx->mapping_lock); 183 return 0; 184 } 185 186 static ssize_t 187 __spufs_mem_read(struct spu_context *ctx, char __user *buffer, 188 size_t size, loff_t *pos) 189 { 190 char *local_store = ctx->ops->get_ls(ctx); 191 return simple_read_from_buffer(buffer, size, pos, local_store, 192 LS_SIZE); 193 } 194 195 static ssize_t 196 spufs_mem_read(struct file *file, char __user *buffer, 197 size_t size, loff_t *pos) 198 { 199 struct spu_context *ctx = file->private_data; 200 ssize_t ret; 201 202 ret = spu_acquire(ctx); 203 if (ret) 204 return ret; 205 ret = __spufs_mem_read(ctx, buffer, size, pos); 206 spu_release(ctx); 207 208 return ret; 209 } 210 211 static ssize_t 212 spufs_mem_write(struct file *file, const char __user *buffer, 213 size_t size, loff_t *ppos) 214 { 215 struct spu_context *ctx = file->private_data; 216 char *local_store; 217 loff_t pos = *ppos; 218 int ret; 219 220 if (pos < 0) 221 return -EINVAL; 222 if (pos > LS_SIZE) 223 return -EFBIG; 224 if (size > LS_SIZE - pos) 225 size = LS_SIZE - pos; 226 227 ret = spu_acquire(ctx); 228 if (ret) 229 return ret; 230 231 local_store = ctx->ops->get_ls(ctx); 232 ret = copy_from_user(local_store + pos, buffer, size); 233 spu_release(ctx); 234 235 if (ret) 236 return -EFAULT; 237 *ppos = pos + size; 238 return size; 239 } 240 241 static unsigned long spufs_mem_mmap_nopfn(struct vm_area_struct *vma, 242 unsigned long address) 243 { 244 struct spu_context *ctx = vma->vm_file->private_data; 245 unsigned long pfn, offset, addr0 = address; 246 #ifdef CONFIG_SPU_FS_64K_LS 247 struct spu_state *csa = &ctx->csa; 248 int psize; 249 250 /* Check what page size we are using */ 251 psize = get_slice_psize(vma->vm_mm, address); 252 253 /* Some sanity checking */ 254 BUG_ON(csa->use_big_pages != (psize == MMU_PAGE_64K)); 255 256 /* Wow, 64K, cool, we need to align the address though */ 257 if (csa->use_big_pages) { 258 BUG_ON(vma->vm_start & 0xffff); 259 address &= ~0xfffful; 260 } 261 #endif /* CONFIG_SPU_FS_64K_LS */ 262 263 offset = (address - vma->vm_start) + (vma->vm_pgoff << PAGE_SHIFT); 264 if (offset >= LS_SIZE) 265 return NOPFN_SIGBUS; 266 267 pr_debug("spufs_mem_mmap_nopfn address=0x%lx -> 0x%lx, offset=0x%lx\n", 268 addr0, address, offset); 269 270 if (spu_acquire(ctx)) 271 return NOPFN_REFAULT; 272 273 if (ctx->state == SPU_STATE_SAVED) { 274 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 275 & ~_PAGE_NO_CACHE); 276 pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset); 277 } else { 278 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 279 | _PAGE_NO_CACHE); 280 pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT; 281 } 282 vm_insert_pfn(vma, address, pfn); 283 284 spu_release(ctx); 285 286 return NOPFN_REFAULT; 287 } 288 289 290 static struct vm_operations_struct spufs_mem_mmap_vmops = { 291 .nopfn = spufs_mem_mmap_nopfn, 292 }; 293 294 static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma) 295 { 296 #ifdef CONFIG_SPU_FS_64K_LS 297 struct spu_context *ctx = file->private_data; 298 struct spu_state *csa = &ctx->csa; 299 300 /* Sanity check VMA alignment */ 301 if (csa->use_big_pages) { 302 pr_debug("spufs_mem_mmap 64K, start=0x%lx, end=0x%lx," 303 " pgoff=0x%lx\n", vma->vm_start, vma->vm_end, 304 vma->vm_pgoff); 305 if (vma->vm_start & 0xffff) 306 return -EINVAL; 307 if (vma->vm_pgoff & 0xf) 308 return -EINVAL; 309 } 310 #endif /* CONFIG_SPU_FS_64K_LS */ 311 312 if (!(vma->vm_flags & VM_SHARED)) 313 return -EINVAL; 314 315 vma->vm_flags |= VM_IO | VM_PFNMAP; 316 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 317 | _PAGE_NO_CACHE); 318 319 vma->vm_ops = &spufs_mem_mmap_vmops; 320 return 0; 321 } 322 323 #ifdef CONFIG_SPU_FS_64K_LS 324 static unsigned long spufs_get_unmapped_area(struct file *file, 325 unsigned long addr, unsigned long len, unsigned long pgoff, 326 unsigned long flags) 327 { 328 struct spu_context *ctx = file->private_data; 329 struct spu_state *csa = &ctx->csa; 330 331 /* If not using big pages, fallback to normal MM g_u_a */ 332 if (!csa->use_big_pages) 333 return current->mm->get_unmapped_area(file, addr, len, 334 pgoff, flags); 335 336 /* Else, try to obtain a 64K pages slice */ 337 return slice_get_unmapped_area(addr, len, flags, 338 MMU_PAGE_64K, 1, 0); 339 } 340 #endif /* CONFIG_SPU_FS_64K_LS */ 341 342 static const struct file_operations spufs_mem_fops = { 343 .open = spufs_mem_open, 344 .release = spufs_mem_release, 345 .read = spufs_mem_read, 346 .write = spufs_mem_write, 347 .llseek = generic_file_llseek, 348 .mmap = spufs_mem_mmap, 349 #ifdef CONFIG_SPU_FS_64K_LS 350 .get_unmapped_area = spufs_get_unmapped_area, 351 #endif 352 }; 353 354 static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma, 355 unsigned long address, 356 unsigned long ps_offs, 357 unsigned long ps_size) 358 { 359 struct spu_context *ctx = vma->vm_file->private_data; 360 unsigned long area, offset = address - vma->vm_start; 361 int ret = 0; 362 363 spu_context_nospu_trace(spufs_ps_nopfn__enter, ctx); 364 365 offset += vma->vm_pgoff << PAGE_SHIFT; 366 if (offset >= ps_size) 367 return NOPFN_SIGBUS; 368 369 /* 370 * Because we release the mmap_sem, the context may be destroyed while 371 * we're in spu_wait. Grab an extra reference so it isn't destroyed 372 * in the meantime. 373 */ 374 get_spu_context(ctx); 375 376 /* 377 * We have to wait for context to be loaded before we have 378 * pages to hand out to the user, but we don't want to wait 379 * with the mmap_sem held. 380 * It is possible to drop the mmap_sem here, but then we need 381 * to return NOPFN_REFAULT because the mappings may have 382 * hanged. 383 */ 384 if (spu_acquire(ctx)) 385 goto refault; 386 387 if (ctx->state == SPU_STATE_SAVED) { 388 up_read(¤t->mm->mmap_sem); 389 spu_context_nospu_trace(spufs_ps_nopfn__sleep, ctx); 390 ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE); 391 spu_context_trace(spufs_ps_nopfn__wake, ctx, ctx->spu); 392 down_read(¤t->mm->mmap_sem); 393 } else { 394 area = ctx->spu->problem_phys + ps_offs; 395 vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT); 396 spu_context_trace(spufs_ps_nopfn__insert, ctx, ctx->spu); 397 } 398 399 if (!ret) 400 spu_release(ctx); 401 402 refault: 403 put_spu_context(ctx); 404 return NOPFN_REFAULT; 405 } 406 407 #if SPUFS_MMAP_4K 408 static unsigned long spufs_cntl_mmap_nopfn(struct vm_area_struct *vma, 409 unsigned long address) 410 { 411 return spufs_ps_nopfn(vma, address, 0x4000, 0x1000); 412 } 413 414 static struct vm_operations_struct spufs_cntl_mmap_vmops = { 415 .nopfn = spufs_cntl_mmap_nopfn, 416 }; 417 418 /* 419 * mmap support for problem state control area [0x4000 - 0x4fff]. 420 */ 421 static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma) 422 { 423 if (!(vma->vm_flags & VM_SHARED)) 424 return -EINVAL; 425 426 vma->vm_flags |= VM_IO | VM_PFNMAP; 427 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 428 | _PAGE_NO_CACHE | _PAGE_GUARDED); 429 430 vma->vm_ops = &spufs_cntl_mmap_vmops; 431 return 0; 432 } 433 #else /* SPUFS_MMAP_4K */ 434 #define spufs_cntl_mmap NULL 435 #endif /* !SPUFS_MMAP_4K */ 436 437 static int spufs_cntl_get(void *data, u64 *val) 438 { 439 struct spu_context *ctx = data; 440 int ret; 441 442 ret = spu_acquire(ctx); 443 if (ret) 444 return ret; 445 *val = ctx->ops->status_read(ctx); 446 spu_release(ctx); 447 448 return 0; 449 } 450 451 static int spufs_cntl_set(void *data, u64 val) 452 { 453 struct spu_context *ctx = data; 454 int ret; 455 456 ret = spu_acquire(ctx); 457 if (ret) 458 return ret; 459 ctx->ops->runcntl_write(ctx, val); 460 spu_release(ctx); 461 462 return 0; 463 } 464 465 static int spufs_cntl_open(struct inode *inode, struct file *file) 466 { 467 struct spufs_inode_info *i = SPUFS_I(inode); 468 struct spu_context *ctx = i->i_ctx; 469 470 mutex_lock(&ctx->mapping_lock); 471 file->private_data = ctx; 472 if (!i->i_openers++) 473 ctx->cntl = inode->i_mapping; 474 mutex_unlock(&ctx->mapping_lock); 475 return simple_attr_open(inode, file, spufs_cntl_get, 476 spufs_cntl_set, "0x%08lx"); 477 } 478 479 static int 480 spufs_cntl_release(struct inode *inode, struct file *file) 481 { 482 struct spufs_inode_info *i = SPUFS_I(inode); 483 struct spu_context *ctx = i->i_ctx; 484 485 simple_attr_release(inode, file); 486 487 mutex_lock(&ctx->mapping_lock); 488 if (!--i->i_openers) 489 ctx->cntl = NULL; 490 mutex_unlock(&ctx->mapping_lock); 491 return 0; 492 } 493 494 static const struct file_operations spufs_cntl_fops = { 495 .open = spufs_cntl_open, 496 .release = spufs_cntl_release, 497 .read = simple_attr_read, 498 .write = simple_attr_write, 499 .mmap = spufs_cntl_mmap, 500 }; 501 502 static int 503 spufs_regs_open(struct inode *inode, struct file *file) 504 { 505 struct spufs_inode_info *i = SPUFS_I(inode); 506 file->private_data = i->i_ctx; 507 return 0; 508 } 509 510 static ssize_t 511 __spufs_regs_read(struct spu_context *ctx, char __user *buffer, 512 size_t size, loff_t *pos) 513 { 514 struct spu_lscsa *lscsa = ctx->csa.lscsa; 515 return simple_read_from_buffer(buffer, size, pos, 516 lscsa->gprs, sizeof lscsa->gprs); 517 } 518 519 static ssize_t 520 spufs_regs_read(struct file *file, char __user *buffer, 521 size_t size, loff_t *pos) 522 { 523 int ret; 524 struct spu_context *ctx = file->private_data; 525 526 ret = spu_acquire_saved(ctx); 527 if (ret) 528 return ret; 529 ret = __spufs_regs_read(ctx, buffer, size, pos); 530 spu_release_saved(ctx); 531 return ret; 532 } 533 534 static ssize_t 535 spufs_regs_write(struct file *file, const char __user *buffer, 536 size_t size, loff_t *pos) 537 { 538 struct spu_context *ctx = file->private_data; 539 struct spu_lscsa *lscsa = ctx->csa.lscsa; 540 int ret; 541 542 size = min_t(ssize_t, sizeof lscsa->gprs - *pos, size); 543 if (size <= 0) 544 return -EFBIG; 545 *pos += size; 546 547 ret = spu_acquire_saved(ctx); 548 if (ret) 549 return ret; 550 551 ret = copy_from_user(lscsa->gprs + *pos - size, 552 buffer, size) ? -EFAULT : size; 553 554 spu_release_saved(ctx); 555 return ret; 556 } 557 558 static const struct file_operations spufs_regs_fops = { 559 .open = spufs_regs_open, 560 .read = spufs_regs_read, 561 .write = spufs_regs_write, 562 .llseek = generic_file_llseek, 563 }; 564 565 static ssize_t 566 __spufs_fpcr_read(struct spu_context *ctx, char __user * buffer, 567 size_t size, loff_t * pos) 568 { 569 struct spu_lscsa *lscsa = ctx->csa.lscsa; 570 return simple_read_from_buffer(buffer, size, pos, 571 &lscsa->fpcr, sizeof(lscsa->fpcr)); 572 } 573 574 static ssize_t 575 spufs_fpcr_read(struct file *file, char __user * buffer, 576 size_t size, loff_t * pos) 577 { 578 int ret; 579 struct spu_context *ctx = file->private_data; 580 581 ret = spu_acquire_saved(ctx); 582 if (ret) 583 return ret; 584 ret = __spufs_fpcr_read(ctx, buffer, size, pos); 585 spu_release_saved(ctx); 586 return ret; 587 } 588 589 static ssize_t 590 spufs_fpcr_write(struct file *file, const char __user * buffer, 591 size_t size, loff_t * pos) 592 { 593 struct spu_context *ctx = file->private_data; 594 struct spu_lscsa *lscsa = ctx->csa.lscsa; 595 int ret; 596 597 size = min_t(ssize_t, sizeof(lscsa->fpcr) - *pos, size); 598 if (size <= 0) 599 return -EFBIG; 600 601 ret = spu_acquire_saved(ctx); 602 if (ret) 603 return ret; 604 605 *pos += size; 606 ret = copy_from_user((char *)&lscsa->fpcr + *pos - size, 607 buffer, size) ? -EFAULT : size; 608 609 spu_release_saved(ctx); 610 return ret; 611 } 612 613 static const struct file_operations spufs_fpcr_fops = { 614 .open = spufs_regs_open, 615 .read = spufs_fpcr_read, 616 .write = spufs_fpcr_write, 617 .llseek = generic_file_llseek, 618 }; 619 620 /* generic open function for all pipe-like files */ 621 static int spufs_pipe_open(struct inode *inode, struct file *file) 622 { 623 struct spufs_inode_info *i = SPUFS_I(inode); 624 file->private_data = i->i_ctx; 625 626 return nonseekable_open(inode, file); 627 } 628 629 /* 630 * Read as many bytes from the mailbox as possible, until 631 * one of the conditions becomes true: 632 * 633 * - no more data available in the mailbox 634 * - end of the user provided buffer 635 * - end of the mapped area 636 */ 637 static ssize_t spufs_mbox_read(struct file *file, char __user *buf, 638 size_t len, loff_t *pos) 639 { 640 struct spu_context *ctx = file->private_data; 641 u32 mbox_data, __user *udata; 642 ssize_t count; 643 644 if (len < 4) 645 return -EINVAL; 646 647 if (!access_ok(VERIFY_WRITE, buf, len)) 648 return -EFAULT; 649 650 udata = (void __user *)buf; 651 652 count = spu_acquire(ctx); 653 if (count) 654 return count; 655 656 for (count = 0; (count + 4) <= len; count += 4, udata++) { 657 int ret; 658 ret = ctx->ops->mbox_read(ctx, &mbox_data); 659 if (ret == 0) 660 break; 661 662 /* 663 * at the end of the mapped area, we can fault 664 * but still need to return the data we have 665 * read successfully so far. 666 */ 667 ret = __put_user(mbox_data, udata); 668 if (ret) { 669 if (!count) 670 count = -EFAULT; 671 break; 672 } 673 } 674 spu_release(ctx); 675 676 if (!count) 677 count = -EAGAIN; 678 679 return count; 680 } 681 682 static const struct file_operations spufs_mbox_fops = { 683 .open = spufs_pipe_open, 684 .read = spufs_mbox_read, 685 }; 686 687 static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf, 688 size_t len, loff_t *pos) 689 { 690 struct spu_context *ctx = file->private_data; 691 ssize_t ret; 692 u32 mbox_stat; 693 694 if (len < 4) 695 return -EINVAL; 696 697 ret = spu_acquire(ctx); 698 if (ret) 699 return ret; 700 701 mbox_stat = ctx->ops->mbox_stat_read(ctx) & 0xff; 702 703 spu_release(ctx); 704 705 if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat)) 706 return -EFAULT; 707 708 return 4; 709 } 710 711 static const struct file_operations spufs_mbox_stat_fops = { 712 .open = spufs_pipe_open, 713 .read = spufs_mbox_stat_read, 714 }; 715 716 /* low-level ibox access function */ 717 size_t spu_ibox_read(struct spu_context *ctx, u32 *data) 718 { 719 return ctx->ops->ibox_read(ctx, data); 720 } 721 722 static int spufs_ibox_fasync(int fd, struct file *file, int on) 723 { 724 struct spu_context *ctx = file->private_data; 725 726 return fasync_helper(fd, file, on, &ctx->ibox_fasync); 727 } 728 729 /* interrupt-level ibox callback function. */ 730 void spufs_ibox_callback(struct spu *spu) 731 { 732 struct spu_context *ctx = spu->ctx; 733 734 if (!ctx) 735 return; 736 737 wake_up_all(&ctx->ibox_wq); 738 kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN); 739 } 740 741 /* 742 * Read as many bytes from the interrupt mailbox as possible, until 743 * one of the conditions becomes true: 744 * 745 * - no more data available in the mailbox 746 * - end of the user provided buffer 747 * - end of the mapped area 748 * 749 * If the file is opened without O_NONBLOCK, we wait here until 750 * any data is available, but return when we have been able to 751 * read something. 752 */ 753 static ssize_t spufs_ibox_read(struct file *file, char __user *buf, 754 size_t len, loff_t *pos) 755 { 756 struct spu_context *ctx = file->private_data; 757 u32 ibox_data, __user *udata; 758 ssize_t count; 759 760 if (len < 4) 761 return -EINVAL; 762 763 if (!access_ok(VERIFY_WRITE, buf, len)) 764 return -EFAULT; 765 766 udata = (void __user *)buf; 767 768 count = spu_acquire(ctx); 769 if (count) 770 goto out; 771 772 /* wait only for the first element */ 773 count = 0; 774 if (file->f_flags & O_NONBLOCK) { 775 if (!spu_ibox_read(ctx, &ibox_data)) { 776 count = -EAGAIN; 777 goto out_unlock; 778 } 779 } else { 780 count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data)); 781 if (count) 782 goto out; 783 } 784 785 /* if we can't write at all, return -EFAULT */ 786 count = __put_user(ibox_data, udata); 787 if (count) 788 goto out_unlock; 789 790 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) { 791 int ret; 792 ret = ctx->ops->ibox_read(ctx, &ibox_data); 793 if (ret == 0) 794 break; 795 /* 796 * at the end of the mapped area, we can fault 797 * but still need to return the data we have 798 * read successfully so far. 799 */ 800 ret = __put_user(ibox_data, udata); 801 if (ret) 802 break; 803 } 804 805 out_unlock: 806 spu_release(ctx); 807 out: 808 return count; 809 } 810 811 static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait) 812 { 813 struct spu_context *ctx = file->private_data; 814 unsigned int mask; 815 816 poll_wait(file, &ctx->ibox_wq, wait); 817 818 /* 819 * For now keep this uninterruptible and also ignore the rule 820 * that poll should not sleep. Will be fixed later. 821 */ 822 mutex_lock(&ctx->state_mutex); 823 mask = ctx->ops->mbox_stat_poll(ctx, POLLIN | POLLRDNORM); 824 spu_release(ctx); 825 826 return mask; 827 } 828 829 static const struct file_operations spufs_ibox_fops = { 830 .open = spufs_pipe_open, 831 .read = spufs_ibox_read, 832 .poll = spufs_ibox_poll, 833 .fasync = spufs_ibox_fasync, 834 }; 835 836 static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf, 837 size_t len, loff_t *pos) 838 { 839 struct spu_context *ctx = file->private_data; 840 ssize_t ret; 841 u32 ibox_stat; 842 843 if (len < 4) 844 return -EINVAL; 845 846 ret = spu_acquire(ctx); 847 if (ret) 848 return ret; 849 ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff; 850 spu_release(ctx); 851 852 if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat)) 853 return -EFAULT; 854 855 return 4; 856 } 857 858 static const struct file_operations spufs_ibox_stat_fops = { 859 .open = spufs_pipe_open, 860 .read = spufs_ibox_stat_read, 861 }; 862 863 /* low-level mailbox write */ 864 size_t spu_wbox_write(struct spu_context *ctx, u32 data) 865 { 866 return ctx->ops->wbox_write(ctx, data); 867 } 868 869 static int spufs_wbox_fasync(int fd, struct file *file, int on) 870 { 871 struct spu_context *ctx = file->private_data; 872 int ret; 873 874 ret = fasync_helper(fd, file, on, &ctx->wbox_fasync); 875 876 return ret; 877 } 878 879 /* interrupt-level wbox callback function. */ 880 void spufs_wbox_callback(struct spu *spu) 881 { 882 struct spu_context *ctx = spu->ctx; 883 884 if (!ctx) 885 return; 886 887 wake_up_all(&ctx->wbox_wq); 888 kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT); 889 } 890 891 /* 892 * Write as many bytes to the interrupt mailbox as possible, until 893 * one of the conditions becomes true: 894 * 895 * - the mailbox is full 896 * - end of the user provided buffer 897 * - end of the mapped area 898 * 899 * If the file is opened without O_NONBLOCK, we wait here until 900 * space is availabyl, but return when we have been able to 901 * write something. 902 */ 903 static ssize_t spufs_wbox_write(struct file *file, const char __user *buf, 904 size_t len, loff_t *pos) 905 { 906 struct spu_context *ctx = file->private_data; 907 u32 wbox_data, __user *udata; 908 ssize_t count; 909 910 if (len < 4) 911 return -EINVAL; 912 913 udata = (void __user *)buf; 914 if (!access_ok(VERIFY_READ, buf, len)) 915 return -EFAULT; 916 917 if (__get_user(wbox_data, udata)) 918 return -EFAULT; 919 920 count = spu_acquire(ctx); 921 if (count) 922 goto out; 923 924 /* 925 * make sure we can at least write one element, by waiting 926 * in case of !O_NONBLOCK 927 */ 928 count = 0; 929 if (file->f_flags & O_NONBLOCK) { 930 if (!spu_wbox_write(ctx, wbox_data)) { 931 count = -EAGAIN; 932 goto out_unlock; 933 } 934 } else { 935 count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data)); 936 if (count) 937 goto out; 938 } 939 940 941 /* write as much as possible */ 942 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) { 943 int ret; 944 ret = __get_user(wbox_data, udata); 945 if (ret) 946 break; 947 948 ret = spu_wbox_write(ctx, wbox_data); 949 if (ret == 0) 950 break; 951 } 952 953 out_unlock: 954 spu_release(ctx); 955 out: 956 return count; 957 } 958 959 static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait) 960 { 961 struct spu_context *ctx = file->private_data; 962 unsigned int mask; 963 964 poll_wait(file, &ctx->wbox_wq, wait); 965 966 /* 967 * For now keep this uninterruptible and also ignore the rule 968 * that poll should not sleep. Will be fixed later. 969 */ 970 mutex_lock(&ctx->state_mutex); 971 mask = ctx->ops->mbox_stat_poll(ctx, POLLOUT | POLLWRNORM); 972 spu_release(ctx); 973 974 return mask; 975 } 976 977 static const struct file_operations spufs_wbox_fops = { 978 .open = spufs_pipe_open, 979 .write = spufs_wbox_write, 980 .poll = spufs_wbox_poll, 981 .fasync = spufs_wbox_fasync, 982 }; 983 984 static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf, 985 size_t len, loff_t *pos) 986 { 987 struct spu_context *ctx = file->private_data; 988 ssize_t ret; 989 u32 wbox_stat; 990 991 if (len < 4) 992 return -EINVAL; 993 994 ret = spu_acquire(ctx); 995 if (ret) 996 return ret; 997 wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff; 998 spu_release(ctx); 999 1000 if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat)) 1001 return -EFAULT; 1002 1003 return 4; 1004 } 1005 1006 static const struct file_operations spufs_wbox_stat_fops = { 1007 .open = spufs_pipe_open, 1008 .read = spufs_wbox_stat_read, 1009 }; 1010 1011 static int spufs_signal1_open(struct inode *inode, struct file *file) 1012 { 1013 struct spufs_inode_info *i = SPUFS_I(inode); 1014 struct spu_context *ctx = i->i_ctx; 1015 1016 mutex_lock(&ctx->mapping_lock); 1017 file->private_data = ctx; 1018 if (!i->i_openers++) 1019 ctx->signal1 = inode->i_mapping; 1020 mutex_unlock(&ctx->mapping_lock); 1021 return nonseekable_open(inode, file); 1022 } 1023 1024 static int 1025 spufs_signal1_release(struct inode *inode, struct file *file) 1026 { 1027 struct spufs_inode_info *i = SPUFS_I(inode); 1028 struct spu_context *ctx = i->i_ctx; 1029 1030 mutex_lock(&ctx->mapping_lock); 1031 if (!--i->i_openers) 1032 ctx->signal1 = NULL; 1033 mutex_unlock(&ctx->mapping_lock); 1034 return 0; 1035 } 1036 1037 static ssize_t __spufs_signal1_read(struct spu_context *ctx, char __user *buf, 1038 size_t len, loff_t *pos) 1039 { 1040 int ret = 0; 1041 u32 data; 1042 1043 if (len < 4) 1044 return -EINVAL; 1045 1046 if (ctx->csa.spu_chnlcnt_RW[3]) { 1047 data = ctx->csa.spu_chnldata_RW[3]; 1048 ret = 4; 1049 } 1050 1051 if (!ret) 1052 goto out; 1053 1054 if (copy_to_user(buf, &data, 4)) 1055 return -EFAULT; 1056 1057 out: 1058 return ret; 1059 } 1060 1061 static ssize_t spufs_signal1_read(struct file *file, char __user *buf, 1062 size_t len, loff_t *pos) 1063 { 1064 int ret; 1065 struct spu_context *ctx = file->private_data; 1066 1067 ret = spu_acquire_saved(ctx); 1068 if (ret) 1069 return ret; 1070 ret = __spufs_signal1_read(ctx, buf, len, pos); 1071 spu_release_saved(ctx); 1072 1073 return ret; 1074 } 1075 1076 static ssize_t spufs_signal1_write(struct file *file, const char __user *buf, 1077 size_t len, loff_t *pos) 1078 { 1079 struct spu_context *ctx; 1080 ssize_t ret; 1081 u32 data; 1082 1083 ctx = file->private_data; 1084 1085 if (len < 4) 1086 return -EINVAL; 1087 1088 if (copy_from_user(&data, buf, 4)) 1089 return -EFAULT; 1090 1091 ret = spu_acquire(ctx); 1092 if (ret) 1093 return ret; 1094 ctx->ops->signal1_write(ctx, data); 1095 spu_release(ctx); 1096 1097 return 4; 1098 } 1099 1100 static unsigned long spufs_signal1_mmap_nopfn(struct vm_area_struct *vma, 1101 unsigned long address) 1102 { 1103 #if PAGE_SIZE == 0x1000 1104 return spufs_ps_nopfn(vma, address, 0x14000, 0x1000); 1105 #elif PAGE_SIZE == 0x10000 1106 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole 1107 * signal 1 and 2 area 1108 */ 1109 return spufs_ps_nopfn(vma, address, 0x10000, 0x10000); 1110 #else 1111 #error unsupported page size 1112 #endif 1113 } 1114 1115 static struct vm_operations_struct spufs_signal1_mmap_vmops = { 1116 .nopfn = spufs_signal1_mmap_nopfn, 1117 }; 1118 1119 static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma) 1120 { 1121 if (!(vma->vm_flags & VM_SHARED)) 1122 return -EINVAL; 1123 1124 vma->vm_flags |= VM_IO | VM_PFNMAP; 1125 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 1126 | _PAGE_NO_CACHE | _PAGE_GUARDED); 1127 1128 vma->vm_ops = &spufs_signal1_mmap_vmops; 1129 return 0; 1130 } 1131 1132 static const struct file_operations spufs_signal1_fops = { 1133 .open = spufs_signal1_open, 1134 .release = spufs_signal1_release, 1135 .read = spufs_signal1_read, 1136 .write = spufs_signal1_write, 1137 .mmap = spufs_signal1_mmap, 1138 }; 1139 1140 static const struct file_operations spufs_signal1_nosched_fops = { 1141 .open = spufs_signal1_open, 1142 .release = spufs_signal1_release, 1143 .write = spufs_signal1_write, 1144 .mmap = spufs_signal1_mmap, 1145 }; 1146 1147 static int spufs_signal2_open(struct inode *inode, struct file *file) 1148 { 1149 struct spufs_inode_info *i = SPUFS_I(inode); 1150 struct spu_context *ctx = i->i_ctx; 1151 1152 mutex_lock(&ctx->mapping_lock); 1153 file->private_data = ctx; 1154 if (!i->i_openers++) 1155 ctx->signal2 = inode->i_mapping; 1156 mutex_unlock(&ctx->mapping_lock); 1157 return nonseekable_open(inode, file); 1158 } 1159 1160 static int 1161 spufs_signal2_release(struct inode *inode, struct file *file) 1162 { 1163 struct spufs_inode_info *i = SPUFS_I(inode); 1164 struct spu_context *ctx = i->i_ctx; 1165 1166 mutex_lock(&ctx->mapping_lock); 1167 if (!--i->i_openers) 1168 ctx->signal2 = NULL; 1169 mutex_unlock(&ctx->mapping_lock); 1170 return 0; 1171 } 1172 1173 static ssize_t __spufs_signal2_read(struct spu_context *ctx, char __user *buf, 1174 size_t len, loff_t *pos) 1175 { 1176 int ret = 0; 1177 u32 data; 1178 1179 if (len < 4) 1180 return -EINVAL; 1181 1182 if (ctx->csa.spu_chnlcnt_RW[4]) { 1183 data = ctx->csa.spu_chnldata_RW[4]; 1184 ret = 4; 1185 } 1186 1187 if (!ret) 1188 goto out; 1189 1190 if (copy_to_user(buf, &data, 4)) 1191 return -EFAULT; 1192 1193 out: 1194 return ret; 1195 } 1196 1197 static ssize_t spufs_signal2_read(struct file *file, char __user *buf, 1198 size_t len, loff_t *pos) 1199 { 1200 struct spu_context *ctx = file->private_data; 1201 int ret; 1202 1203 ret = spu_acquire_saved(ctx); 1204 if (ret) 1205 return ret; 1206 ret = __spufs_signal2_read(ctx, buf, len, pos); 1207 spu_release_saved(ctx); 1208 1209 return ret; 1210 } 1211 1212 static ssize_t spufs_signal2_write(struct file *file, const char __user *buf, 1213 size_t len, loff_t *pos) 1214 { 1215 struct spu_context *ctx; 1216 ssize_t ret; 1217 u32 data; 1218 1219 ctx = file->private_data; 1220 1221 if (len < 4) 1222 return -EINVAL; 1223 1224 if (copy_from_user(&data, buf, 4)) 1225 return -EFAULT; 1226 1227 ret = spu_acquire(ctx); 1228 if (ret) 1229 return ret; 1230 ctx->ops->signal2_write(ctx, data); 1231 spu_release(ctx); 1232 1233 return 4; 1234 } 1235 1236 #if SPUFS_MMAP_4K 1237 static unsigned long spufs_signal2_mmap_nopfn(struct vm_area_struct *vma, 1238 unsigned long address) 1239 { 1240 #if PAGE_SIZE == 0x1000 1241 return spufs_ps_nopfn(vma, address, 0x1c000, 0x1000); 1242 #elif PAGE_SIZE == 0x10000 1243 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole 1244 * signal 1 and 2 area 1245 */ 1246 return spufs_ps_nopfn(vma, address, 0x10000, 0x10000); 1247 #else 1248 #error unsupported page size 1249 #endif 1250 } 1251 1252 static struct vm_operations_struct spufs_signal2_mmap_vmops = { 1253 .nopfn = spufs_signal2_mmap_nopfn, 1254 }; 1255 1256 static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma) 1257 { 1258 if (!(vma->vm_flags & VM_SHARED)) 1259 return -EINVAL; 1260 1261 vma->vm_flags |= VM_IO | VM_PFNMAP; 1262 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 1263 | _PAGE_NO_CACHE | _PAGE_GUARDED); 1264 1265 vma->vm_ops = &spufs_signal2_mmap_vmops; 1266 return 0; 1267 } 1268 #else /* SPUFS_MMAP_4K */ 1269 #define spufs_signal2_mmap NULL 1270 #endif /* !SPUFS_MMAP_4K */ 1271 1272 static const struct file_operations spufs_signal2_fops = { 1273 .open = spufs_signal2_open, 1274 .release = spufs_signal2_release, 1275 .read = spufs_signal2_read, 1276 .write = spufs_signal2_write, 1277 .mmap = spufs_signal2_mmap, 1278 }; 1279 1280 static const struct file_operations spufs_signal2_nosched_fops = { 1281 .open = spufs_signal2_open, 1282 .release = spufs_signal2_release, 1283 .write = spufs_signal2_write, 1284 .mmap = spufs_signal2_mmap, 1285 }; 1286 1287 /* 1288 * This is a wrapper around DEFINE_SIMPLE_ATTRIBUTE which does the 1289 * work of acquiring (or not) the SPU context before calling through 1290 * to the actual get routine. The set routine is called directly. 1291 */ 1292 #define SPU_ATTR_NOACQUIRE 0 1293 #define SPU_ATTR_ACQUIRE 1 1294 #define SPU_ATTR_ACQUIRE_SAVED 2 1295 1296 #define DEFINE_SPUFS_ATTRIBUTE(__name, __get, __set, __fmt, __acquire) \ 1297 static int __##__get(void *data, u64 *val) \ 1298 { \ 1299 struct spu_context *ctx = data; \ 1300 int ret = 0; \ 1301 \ 1302 if (__acquire == SPU_ATTR_ACQUIRE) { \ 1303 ret = spu_acquire(ctx); \ 1304 if (ret) \ 1305 return ret; \ 1306 *val = __get(ctx); \ 1307 spu_release(ctx); \ 1308 } else if (__acquire == SPU_ATTR_ACQUIRE_SAVED) { \ 1309 ret = spu_acquire_saved(ctx); \ 1310 if (ret) \ 1311 return ret; \ 1312 *val = __get(ctx); \ 1313 spu_release_saved(ctx); \ 1314 } else \ 1315 *val = __get(ctx); \ 1316 \ 1317 return 0; \ 1318 } \ 1319 DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__name, __##__get, __set, __fmt); 1320 1321 static int spufs_signal1_type_set(void *data, u64 val) 1322 { 1323 struct spu_context *ctx = data; 1324 int ret; 1325 1326 ret = spu_acquire(ctx); 1327 if (ret) 1328 return ret; 1329 ctx->ops->signal1_type_set(ctx, val); 1330 spu_release(ctx); 1331 1332 return 0; 1333 } 1334 1335 static u64 spufs_signal1_type_get(struct spu_context *ctx) 1336 { 1337 return ctx->ops->signal1_type_get(ctx); 1338 } 1339 DEFINE_SPUFS_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get, 1340 spufs_signal1_type_set, "%llu\n", SPU_ATTR_ACQUIRE); 1341 1342 1343 static int spufs_signal2_type_set(void *data, u64 val) 1344 { 1345 struct spu_context *ctx = data; 1346 int ret; 1347 1348 ret = spu_acquire(ctx); 1349 if (ret) 1350 return ret; 1351 ctx->ops->signal2_type_set(ctx, val); 1352 spu_release(ctx); 1353 1354 return 0; 1355 } 1356 1357 static u64 spufs_signal2_type_get(struct spu_context *ctx) 1358 { 1359 return ctx->ops->signal2_type_get(ctx); 1360 } 1361 DEFINE_SPUFS_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get, 1362 spufs_signal2_type_set, "%llu\n", SPU_ATTR_ACQUIRE); 1363 1364 #if SPUFS_MMAP_4K 1365 static unsigned long spufs_mss_mmap_nopfn(struct vm_area_struct *vma, 1366 unsigned long address) 1367 { 1368 return spufs_ps_nopfn(vma, address, 0x0000, 0x1000); 1369 } 1370 1371 static struct vm_operations_struct spufs_mss_mmap_vmops = { 1372 .nopfn = spufs_mss_mmap_nopfn, 1373 }; 1374 1375 /* 1376 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff]. 1377 */ 1378 static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma) 1379 { 1380 if (!(vma->vm_flags & VM_SHARED)) 1381 return -EINVAL; 1382 1383 vma->vm_flags |= VM_IO | VM_PFNMAP; 1384 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 1385 | _PAGE_NO_CACHE | _PAGE_GUARDED); 1386 1387 vma->vm_ops = &spufs_mss_mmap_vmops; 1388 return 0; 1389 } 1390 #else /* SPUFS_MMAP_4K */ 1391 #define spufs_mss_mmap NULL 1392 #endif /* !SPUFS_MMAP_4K */ 1393 1394 static int spufs_mss_open(struct inode *inode, struct file *file) 1395 { 1396 struct spufs_inode_info *i = SPUFS_I(inode); 1397 struct spu_context *ctx = i->i_ctx; 1398 1399 file->private_data = i->i_ctx; 1400 1401 mutex_lock(&ctx->mapping_lock); 1402 if (!i->i_openers++) 1403 ctx->mss = inode->i_mapping; 1404 mutex_unlock(&ctx->mapping_lock); 1405 return nonseekable_open(inode, file); 1406 } 1407 1408 static int 1409 spufs_mss_release(struct inode *inode, struct file *file) 1410 { 1411 struct spufs_inode_info *i = SPUFS_I(inode); 1412 struct spu_context *ctx = i->i_ctx; 1413 1414 mutex_lock(&ctx->mapping_lock); 1415 if (!--i->i_openers) 1416 ctx->mss = NULL; 1417 mutex_unlock(&ctx->mapping_lock); 1418 return 0; 1419 } 1420 1421 static const struct file_operations spufs_mss_fops = { 1422 .open = spufs_mss_open, 1423 .release = spufs_mss_release, 1424 .mmap = spufs_mss_mmap, 1425 }; 1426 1427 static unsigned long spufs_psmap_mmap_nopfn(struct vm_area_struct *vma, 1428 unsigned long address) 1429 { 1430 return spufs_ps_nopfn(vma, address, 0x0000, 0x20000); 1431 } 1432 1433 static struct vm_operations_struct spufs_psmap_mmap_vmops = { 1434 .nopfn = spufs_psmap_mmap_nopfn, 1435 }; 1436 1437 /* 1438 * mmap support for full problem state area [0x00000 - 0x1ffff]. 1439 */ 1440 static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma) 1441 { 1442 if (!(vma->vm_flags & VM_SHARED)) 1443 return -EINVAL; 1444 1445 vma->vm_flags |= VM_IO | VM_PFNMAP; 1446 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 1447 | _PAGE_NO_CACHE | _PAGE_GUARDED); 1448 1449 vma->vm_ops = &spufs_psmap_mmap_vmops; 1450 return 0; 1451 } 1452 1453 static int spufs_psmap_open(struct inode *inode, struct file *file) 1454 { 1455 struct spufs_inode_info *i = SPUFS_I(inode); 1456 struct spu_context *ctx = i->i_ctx; 1457 1458 mutex_lock(&ctx->mapping_lock); 1459 file->private_data = i->i_ctx; 1460 if (!i->i_openers++) 1461 ctx->psmap = inode->i_mapping; 1462 mutex_unlock(&ctx->mapping_lock); 1463 return nonseekable_open(inode, file); 1464 } 1465 1466 static int 1467 spufs_psmap_release(struct inode *inode, struct file *file) 1468 { 1469 struct spufs_inode_info *i = SPUFS_I(inode); 1470 struct spu_context *ctx = i->i_ctx; 1471 1472 mutex_lock(&ctx->mapping_lock); 1473 if (!--i->i_openers) 1474 ctx->psmap = NULL; 1475 mutex_unlock(&ctx->mapping_lock); 1476 return 0; 1477 } 1478 1479 static const struct file_operations spufs_psmap_fops = { 1480 .open = spufs_psmap_open, 1481 .release = spufs_psmap_release, 1482 .mmap = spufs_psmap_mmap, 1483 }; 1484 1485 1486 #if SPUFS_MMAP_4K 1487 static unsigned long spufs_mfc_mmap_nopfn(struct vm_area_struct *vma, 1488 unsigned long address) 1489 { 1490 return spufs_ps_nopfn(vma, address, 0x3000, 0x1000); 1491 } 1492 1493 static struct vm_operations_struct spufs_mfc_mmap_vmops = { 1494 .nopfn = spufs_mfc_mmap_nopfn, 1495 }; 1496 1497 /* 1498 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff]. 1499 */ 1500 static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma) 1501 { 1502 if (!(vma->vm_flags & VM_SHARED)) 1503 return -EINVAL; 1504 1505 vma->vm_flags |= VM_IO | VM_PFNMAP; 1506 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 1507 | _PAGE_NO_CACHE | _PAGE_GUARDED); 1508 1509 vma->vm_ops = &spufs_mfc_mmap_vmops; 1510 return 0; 1511 } 1512 #else /* SPUFS_MMAP_4K */ 1513 #define spufs_mfc_mmap NULL 1514 #endif /* !SPUFS_MMAP_4K */ 1515 1516 static int spufs_mfc_open(struct inode *inode, struct file *file) 1517 { 1518 struct spufs_inode_info *i = SPUFS_I(inode); 1519 struct spu_context *ctx = i->i_ctx; 1520 1521 /* we don't want to deal with DMA into other processes */ 1522 if (ctx->owner != current->mm) 1523 return -EINVAL; 1524 1525 if (atomic_read(&inode->i_count) != 1) 1526 return -EBUSY; 1527 1528 mutex_lock(&ctx->mapping_lock); 1529 file->private_data = ctx; 1530 if (!i->i_openers++) 1531 ctx->mfc = inode->i_mapping; 1532 mutex_unlock(&ctx->mapping_lock); 1533 return nonseekable_open(inode, file); 1534 } 1535 1536 static int 1537 spufs_mfc_release(struct inode *inode, struct file *file) 1538 { 1539 struct spufs_inode_info *i = SPUFS_I(inode); 1540 struct spu_context *ctx = i->i_ctx; 1541 1542 mutex_lock(&ctx->mapping_lock); 1543 if (!--i->i_openers) 1544 ctx->mfc = NULL; 1545 mutex_unlock(&ctx->mapping_lock); 1546 return 0; 1547 } 1548 1549 /* interrupt-level mfc callback function. */ 1550 void spufs_mfc_callback(struct spu *spu) 1551 { 1552 struct spu_context *ctx = spu->ctx; 1553 1554 if (!ctx) 1555 return; 1556 1557 wake_up_all(&ctx->mfc_wq); 1558 1559 pr_debug("%s %s\n", __func__, spu->name); 1560 if (ctx->mfc_fasync) { 1561 u32 free_elements, tagstatus; 1562 unsigned int mask; 1563 1564 /* no need for spu_acquire in interrupt context */ 1565 free_elements = ctx->ops->get_mfc_free_elements(ctx); 1566 tagstatus = ctx->ops->read_mfc_tagstatus(ctx); 1567 1568 mask = 0; 1569 if (free_elements & 0xffff) 1570 mask |= POLLOUT; 1571 if (tagstatus & ctx->tagwait) 1572 mask |= POLLIN; 1573 1574 kill_fasync(&ctx->mfc_fasync, SIGIO, mask); 1575 } 1576 } 1577 1578 static int spufs_read_mfc_tagstatus(struct spu_context *ctx, u32 *status) 1579 { 1580 /* See if there is one tag group is complete */ 1581 /* FIXME we need locking around tagwait */ 1582 *status = ctx->ops->read_mfc_tagstatus(ctx) & ctx->tagwait; 1583 ctx->tagwait &= ~*status; 1584 if (*status) 1585 return 1; 1586 1587 /* enable interrupt waiting for any tag group, 1588 may silently fail if interrupts are already enabled */ 1589 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1); 1590 return 0; 1591 } 1592 1593 static ssize_t spufs_mfc_read(struct file *file, char __user *buffer, 1594 size_t size, loff_t *pos) 1595 { 1596 struct spu_context *ctx = file->private_data; 1597 int ret = -EINVAL; 1598 u32 status; 1599 1600 if (size != 4) 1601 goto out; 1602 1603 ret = spu_acquire(ctx); 1604 if (ret) 1605 return ret; 1606 1607 ret = -EINVAL; 1608 if (file->f_flags & O_NONBLOCK) { 1609 status = ctx->ops->read_mfc_tagstatus(ctx); 1610 if (!(status & ctx->tagwait)) 1611 ret = -EAGAIN; 1612 else 1613 /* XXX(hch): shouldn't we clear ret here? */ 1614 ctx->tagwait &= ~status; 1615 } else { 1616 ret = spufs_wait(ctx->mfc_wq, 1617 spufs_read_mfc_tagstatus(ctx, &status)); 1618 if (ret) 1619 goto out; 1620 } 1621 spu_release(ctx); 1622 1623 ret = 4; 1624 if (copy_to_user(buffer, &status, 4)) 1625 ret = -EFAULT; 1626 1627 out: 1628 return ret; 1629 } 1630 1631 static int spufs_check_valid_dma(struct mfc_dma_command *cmd) 1632 { 1633 pr_debug("queueing DMA %x %lx %x %x %x\n", cmd->lsa, 1634 cmd->ea, cmd->size, cmd->tag, cmd->cmd); 1635 1636 switch (cmd->cmd) { 1637 case MFC_PUT_CMD: 1638 case MFC_PUTF_CMD: 1639 case MFC_PUTB_CMD: 1640 case MFC_GET_CMD: 1641 case MFC_GETF_CMD: 1642 case MFC_GETB_CMD: 1643 break; 1644 default: 1645 pr_debug("invalid DMA opcode %x\n", cmd->cmd); 1646 return -EIO; 1647 } 1648 1649 if ((cmd->lsa & 0xf) != (cmd->ea &0xf)) { 1650 pr_debug("invalid DMA alignment, ea %lx lsa %x\n", 1651 cmd->ea, cmd->lsa); 1652 return -EIO; 1653 } 1654 1655 switch (cmd->size & 0xf) { 1656 case 1: 1657 break; 1658 case 2: 1659 if (cmd->lsa & 1) 1660 goto error; 1661 break; 1662 case 4: 1663 if (cmd->lsa & 3) 1664 goto error; 1665 break; 1666 case 8: 1667 if (cmd->lsa & 7) 1668 goto error; 1669 break; 1670 case 0: 1671 if (cmd->lsa & 15) 1672 goto error; 1673 break; 1674 error: 1675 default: 1676 pr_debug("invalid DMA alignment %x for size %x\n", 1677 cmd->lsa & 0xf, cmd->size); 1678 return -EIO; 1679 } 1680 1681 if (cmd->size > 16 * 1024) { 1682 pr_debug("invalid DMA size %x\n", cmd->size); 1683 return -EIO; 1684 } 1685 1686 if (cmd->tag & 0xfff0) { 1687 /* we reserve the higher tag numbers for kernel use */ 1688 pr_debug("invalid DMA tag\n"); 1689 return -EIO; 1690 } 1691 1692 if (cmd->class) { 1693 /* not supported in this version */ 1694 pr_debug("invalid DMA class\n"); 1695 return -EIO; 1696 } 1697 1698 return 0; 1699 } 1700 1701 static int spu_send_mfc_command(struct spu_context *ctx, 1702 struct mfc_dma_command cmd, 1703 int *error) 1704 { 1705 *error = ctx->ops->send_mfc_command(ctx, &cmd); 1706 if (*error == -EAGAIN) { 1707 /* wait for any tag group to complete 1708 so we have space for the new command */ 1709 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1); 1710 /* try again, because the queue might be 1711 empty again */ 1712 *error = ctx->ops->send_mfc_command(ctx, &cmd); 1713 if (*error == -EAGAIN) 1714 return 0; 1715 } 1716 return 1; 1717 } 1718 1719 static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer, 1720 size_t size, loff_t *pos) 1721 { 1722 struct spu_context *ctx = file->private_data; 1723 struct mfc_dma_command cmd; 1724 int ret = -EINVAL; 1725 1726 if (size != sizeof cmd) 1727 goto out; 1728 1729 ret = -EFAULT; 1730 if (copy_from_user(&cmd, buffer, sizeof cmd)) 1731 goto out; 1732 1733 ret = spufs_check_valid_dma(&cmd); 1734 if (ret) 1735 goto out; 1736 1737 ret = spu_acquire(ctx); 1738 if (ret) 1739 goto out; 1740 1741 ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE); 1742 if (ret) 1743 goto out; 1744 1745 if (file->f_flags & O_NONBLOCK) { 1746 ret = ctx->ops->send_mfc_command(ctx, &cmd); 1747 } else { 1748 int status; 1749 ret = spufs_wait(ctx->mfc_wq, 1750 spu_send_mfc_command(ctx, cmd, &status)); 1751 if (ret) 1752 goto out; 1753 if (status) 1754 ret = status; 1755 } 1756 1757 if (ret) 1758 goto out_unlock; 1759 1760 ctx->tagwait |= 1 << cmd.tag; 1761 ret = size; 1762 1763 out_unlock: 1764 spu_release(ctx); 1765 out: 1766 return ret; 1767 } 1768 1769 static unsigned int spufs_mfc_poll(struct file *file,poll_table *wait) 1770 { 1771 struct spu_context *ctx = file->private_data; 1772 u32 free_elements, tagstatus; 1773 unsigned int mask; 1774 1775 poll_wait(file, &ctx->mfc_wq, wait); 1776 1777 /* 1778 * For now keep this uninterruptible and also ignore the rule 1779 * that poll should not sleep. Will be fixed later. 1780 */ 1781 mutex_lock(&ctx->state_mutex); 1782 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2); 1783 free_elements = ctx->ops->get_mfc_free_elements(ctx); 1784 tagstatus = ctx->ops->read_mfc_tagstatus(ctx); 1785 spu_release(ctx); 1786 1787 mask = 0; 1788 if (free_elements & 0xffff) 1789 mask |= POLLOUT | POLLWRNORM; 1790 if (tagstatus & ctx->tagwait) 1791 mask |= POLLIN | POLLRDNORM; 1792 1793 pr_debug("%s: free %d tagstatus %d tagwait %d\n", __func__, 1794 free_elements, tagstatus, ctx->tagwait); 1795 1796 return mask; 1797 } 1798 1799 static int spufs_mfc_flush(struct file *file, fl_owner_t id) 1800 { 1801 struct spu_context *ctx = file->private_data; 1802 int ret; 1803 1804 ret = spu_acquire(ctx); 1805 if (ret) 1806 goto out; 1807 #if 0 1808 /* this currently hangs */ 1809 ret = spufs_wait(ctx->mfc_wq, 1810 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2)); 1811 if (ret) 1812 goto out; 1813 ret = spufs_wait(ctx->mfc_wq, 1814 ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait); 1815 if (ret) 1816 goto out; 1817 #else 1818 ret = 0; 1819 #endif 1820 spu_release(ctx); 1821 out: 1822 return ret; 1823 } 1824 1825 static int spufs_mfc_fsync(struct file *file, struct dentry *dentry, 1826 int datasync) 1827 { 1828 return spufs_mfc_flush(file, NULL); 1829 } 1830 1831 static int spufs_mfc_fasync(int fd, struct file *file, int on) 1832 { 1833 struct spu_context *ctx = file->private_data; 1834 1835 return fasync_helper(fd, file, on, &ctx->mfc_fasync); 1836 } 1837 1838 static const struct file_operations spufs_mfc_fops = { 1839 .open = spufs_mfc_open, 1840 .release = spufs_mfc_release, 1841 .read = spufs_mfc_read, 1842 .write = spufs_mfc_write, 1843 .poll = spufs_mfc_poll, 1844 .flush = spufs_mfc_flush, 1845 .fsync = spufs_mfc_fsync, 1846 .fasync = spufs_mfc_fasync, 1847 .mmap = spufs_mfc_mmap, 1848 }; 1849 1850 static int spufs_npc_set(void *data, u64 val) 1851 { 1852 struct spu_context *ctx = data; 1853 int ret; 1854 1855 ret = spu_acquire(ctx); 1856 if (ret) 1857 return ret; 1858 ctx->ops->npc_write(ctx, val); 1859 spu_release(ctx); 1860 1861 return 0; 1862 } 1863 1864 static u64 spufs_npc_get(struct spu_context *ctx) 1865 { 1866 return ctx->ops->npc_read(ctx); 1867 } 1868 DEFINE_SPUFS_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set, 1869 "0x%llx\n", SPU_ATTR_ACQUIRE); 1870 1871 static int spufs_decr_set(void *data, u64 val) 1872 { 1873 struct spu_context *ctx = data; 1874 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1875 int ret; 1876 1877 ret = spu_acquire_saved(ctx); 1878 if (ret) 1879 return ret; 1880 lscsa->decr.slot[0] = (u32) val; 1881 spu_release_saved(ctx); 1882 1883 return 0; 1884 } 1885 1886 static u64 spufs_decr_get(struct spu_context *ctx) 1887 { 1888 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1889 return lscsa->decr.slot[0]; 1890 } 1891 DEFINE_SPUFS_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set, 1892 "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED); 1893 1894 static int spufs_decr_status_set(void *data, u64 val) 1895 { 1896 struct spu_context *ctx = data; 1897 int ret; 1898 1899 ret = spu_acquire_saved(ctx); 1900 if (ret) 1901 return ret; 1902 if (val) 1903 ctx->csa.priv2.mfc_control_RW |= MFC_CNTL_DECREMENTER_RUNNING; 1904 else 1905 ctx->csa.priv2.mfc_control_RW &= ~MFC_CNTL_DECREMENTER_RUNNING; 1906 spu_release_saved(ctx); 1907 1908 return 0; 1909 } 1910 1911 static u64 spufs_decr_status_get(struct spu_context *ctx) 1912 { 1913 if (ctx->csa.priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING) 1914 return SPU_DECR_STATUS_RUNNING; 1915 else 1916 return 0; 1917 } 1918 DEFINE_SPUFS_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get, 1919 spufs_decr_status_set, "0x%llx\n", 1920 SPU_ATTR_ACQUIRE_SAVED); 1921 1922 static int spufs_event_mask_set(void *data, u64 val) 1923 { 1924 struct spu_context *ctx = data; 1925 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1926 int ret; 1927 1928 ret = spu_acquire_saved(ctx); 1929 if (ret) 1930 return ret; 1931 lscsa->event_mask.slot[0] = (u32) val; 1932 spu_release_saved(ctx); 1933 1934 return 0; 1935 } 1936 1937 static u64 spufs_event_mask_get(struct spu_context *ctx) 1938 { 1939 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1940 return lscsa->event_mask.slot[0]; 1941 } 1942 1943 DEFINE_SPUFS_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get, 1944 spufs_event_mask_set, "0x%llx\n", 1945 SPU_ATTR_ACQUIRE_SAVED); 1946 1947 static u64 spufs_event_status_get(struct spu_context *ctx) 1948 { 1949 struct spu_state *state = &ctx->csa; 1950 u64 stat; 1951 stat = state->spu_chnlcnt_RW[0]; 1952 if (stat) 1953 return state->spu_chnldata_RW[0]; 1954 return 0; 1955 } 1956 DEFINE_SPUFS_ATTRIBUTE(spufs_event_status_ops, spufs_event_status_get, 1957 NULL, "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED) 1958 1959 static int spufs_srr0_set(void *data, u64 val) 1960 { 1961 struct spu_context *ctx = data; 1962 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1963 int ret; 1964 1965 ret = spu_acquire_saved(ctx); 1966 if (ret) 1967 return ret; 1968 lscsa->srr0.slot[0] = (u32) val; 1969 spu_release_saved(ctx); 1970 1971 return 0; 1972 } 1973 1974 static u64 spufs_srr0_get(struct spu_context *ctx) 1975 { 1976 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1977 return lscsa->srr0.slot[0]; 1978 } 1979 DEFINE_SPUFS_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set, 1980 "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED) 1981 1982 static u64 spufs_id_get(struct spu_context *ctx) 1983 { 1984 u64 num; 1985 1986 if (ctx->state == SPU_STATE_RUNNABLE) 1987 num = ctx->spu->number; 1988 else 1989 num = (unsigned int)-1; 1990 1991 return num; 1992 } 1993 DEFINE_SPUFS_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n", 1994 SPU_ATTR_ACQUIRE) 1995 1996 static u64 spufs_object_id_get(struct spu_context *ctx) 1997 { 1998 /* FIXME: Should there really be no locking here? */ 1999 return ctx->object_id; 2000 } 2001 2002 static int spufs_object_id_set(void *data, u64 id) 2003 { 2004 struct spu_context *ctx = data; 2005 ctx->object_id = id; 2006 2007 return 0; 2008 } 2009 2010 DEFINE_SPUFS_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get, 2011 spufs_object_id_set, "0x%llx\n", SPU_ATTR_NOACQUIRE); 2012 2013 static u64 spufs_lslr_get(struct spu_context *ctx) 2014 { 2015 return ctx->csa.priv2.spu_lslr_RW; 2016 } 2017 DEFINE_SPUFS_ATTRIBUTE(spufs_lslr_ops, spufs_lslr_get, NULL, "0x%llx\n", 2018 SPU_ATTR_ACQUIRE_SAVED); 2019 2020 static int spufs_info_open(struct inode *inode, struct file *file) 2021 { 2022 struct spufs_inode_info *i = SPUFS_I(inode); 2023 struct spu_context *ctx = i->i_ctx; 2024 file->private_data = ctx; 2025 return 0; 2026 } 2027 2028 static int spufs_caps_show(struct seq_file *s, void *private) 2029 { 2030 struct spu_context *ctx = s->private; 2031 2032 if (!(ctx->flags & SPU_CREATE_NOSCHED)) 2033 seq_puts(s, "sched\n"); 2034 if (!(ctx->flags & SPU_CREATE_ISOLATE)) 2035 seq_puts(s, "step\n"); 2036 return 0; 2037 } 2038 2039 static int spufs_caps_open(struct inode *inode, struct file *file) 2040 { 2041 return single_open(file, spufs_caps_show, SPUFS_I(inode)->i_ctx); 2042 } 2043 2044 static const struct file_operations spufs_caps_fops = { 2045 .open = spufs_caps_open, 2046 .read = seq_read, 2047 .llseek = seq_lseek, 2048 .release = single_release, 2049 }; 2050 2051 static ssize_t __spufs_mbox_info_read(struct spu_context *ctx, 2052 char __user *buf, size_t len, loff_t *pos) 2053 { 2054 u32 data; 2055 2056 /* EOF if there's no entry in the mbox */ 2057 if (!(ctx->csa.prob.mb_stat_R & 0x0000ff)) 2058 return 0; 2059 2060 data = ctx->csa.prob.pu_mb_R; 2061 2062 return simple_read_from_buffer(buf, len, pos, &data, sizeof data); 2063 } 2064 2065 static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf, 2066 size_t len, loff_t *pos) 2067 { 2068 int ret; 2069 struct spu_context *ctx = file->private_data; 2070 2071 if (!access_ok(VERIFY_WRITE, buf, len)) 2072 return -EFAULT; 2073 2074 ret = spu_acquire_saved(ctx); 2075 if (ret) 2076 return ret; 2077 spin_lock(&ctx->csa.register_lock); 2078 ret = __spufs_mbox_info_read(ctx, buf, len, pos); 2079 spin_unlock(&ctx->csa.register_lock); 2080 spu_release_saved(ctx); 2081 2082 return ret; 2083 } 2084 2085 static const struct file_operations spufs_mbox_info_fops = { 2086 .open = spufs_info_open, 2087 .read = spufs_mbox_info_read, 2088 .llseek = generic_file_llseek, 2089 }; 2090 2091 static ssize_t __spufs_ibox_info_read(struct spu_context *ctx, 2092 char __user *buf, size_t len, loff_t *pos) 2093 { 2094 u32 data; 2095 2096 /* EOF if there's no entry in the ibox */ 2097 if (!(ctx->csa.prob.mb_stat_R & 0xff0000)) 2098 return 0; 2099 2100 data = ctx->csa.priv2.puint_mb_R; 2101 2102 return simple_read_from_buffer(buf, len, pos, &data, sizeof data); 2103 } 2104 2105 static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf, 2106 size_t len, loff_t *pos) 2107 { 2108 struct spu_context *ctx = file->private_data; 2109 int ret; 2110 2111 if (!access_ok(VERIFY_WRITE, buf, len)) 2112 return -EFAULT; 2113 2114 ret = spu_acquire_saved(ctx); 2115 if (ret) 2116 return ret; 2117 spin_lock(&ctx->csa.register_lock); 2118 ret = __spufs_ibox_info_read(ctx, buf, len, pos); 2119 spin_unlock(&ctx->csa.register_lock); 2120 spu_release_saved(ctx); 2121 2122 return ret; 2123 } 2124 2125 static const struct file_operations spufs_ibox_info_fops = { 2126 .open = spufs_info_open, 2127 .read = spufs_ibox_info_read, 2128 .llseek = generic_file_llseek, 2129 }; 2130 2131 static ssize_t __spufs_wbox_info_read(struct spu_context *ctx, 2132 char __user *buf, size_t len, loff_t *pos) 2133 { 2134 int i, cnt; 2135 u32 data[4]; 2136 u32 wbox_stat; 2137 2138 wbox_stat = ctx->csa.prob.mb_stat_R; 2139 cnt = 4 - ((wbox_stat & 0x00ff00) >> 8); 2140 for (i = 0; i < cnt; i++) { 2141 data[i] = ctx->csa.spu_mailbox_data[i]; 2142 } 2143 2144 return simple_read_from_buffer(buf, len, pos, &data, 2145 cnt * sizeof(u32)); 2146 } 2147 2148 static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf, 2149 size_t len, loff_t *pos) 2150 { 2151 struct spu_context *ctx = file->private_data; 2152 int ret; 2153 2154 if (!access_ok(VERIFY_WRITE, buf, len)) 2155 return -EFAULT; 2156 2157 ret = spu_acquire_saved(ctx); 2158 if (ret) 2159 return ret; 2160 spin_lock(&ctx->csa.register_lock); 2161 ret = __spufs_wbox_info_read(ctx, buf, len, pos); 2162 spin_unlock(&ctx->csa.register_lock); 2163 spu_release_saved(ctx); 2164 2165 return ret; 2166 } 2167 2168 static const struct file_operations spufs_wbox_info_fops = { 2169 .open = spufs_info_open, 2170 .read = spufs_wbox_info_read, 2171 .llseek = generic_file_llseek, 2172 }; 2173 2174 static ssize_t __spufs_dma_info_read(struct spu_context *ctx, 2175 char __user *buf, size_t len, loff_t *pos) 2176 { 2177 struct spu_dma_info info; 2178 struct mfc_cq_sr *qp, *spuqp; 2179 int i; 2180 2181 info.dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW; 2182 info.dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0]; 2183 info.dma_info_status = ctx->csa.spu_chnldata_RW[24]; 2184 info.dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25]; 2185 info.dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27]; 2186 for (i = 0; i < 16; i++) { 2187 qp = &info.dma_info_command_data[i]; 2188 spuqp = &ctx->csa.priv2.spuq[i]; 2189 2190 qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW; 2191 qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW; 2192 qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW; 2193 qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW; 2194 } 2195 2196 return simple_read_from_buffer(buf, len, pos, &info, 2197 sizeof info); 2198 } 2199 2200 static ssize_t spufs_dma_info_read(struct file *file, char __user *buf, 2201 size_t len, loff_t *pos) 2202 { 2203 struct spu_context *ctx = file->private_data; 2204 int ret; 2205 2206 if (!access_ok(VERIFY_WRITE, buf, len)) 2207 return -EFAULT; 2208 2209 ret = spu_acquire_saved(ctx); 2210 if (ret) 2211 return ret; 2212 spin_lock(&ctx->csa.register_lock); 2213 ret = __spufs_dma_info_read(ctx, buf, len, pos); 2214 spin_unlock(&ctx->csa.register_lock); 2215 spu_release_saved(ctx); 2216 2217 return ret; 2218 } 2219 2220 static const struct file_operations spufs_dma_info_fops = { 2221 .open = spufs_info_open, 2222 .read = spufs_dma_info_read, 2223 }; 2224 2225 static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx, 2226 char __user *buf, size_t len, loff_t *pos) 2227 { 2228 struct spu_proxydma_info info; 2229 struct mfc_cq_sr *qp, *puqp; 2230 int ret = sizeof info; 2231 int i; 2232 2233 if (len < ret) 2234 return -EINVAL; 2235 2236 if (!access_ok(VERIFY_WRITE, buf, len)) 2237 return -EFAULT; 2238 2239 info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW; 2240 info.proxydma_info_mask = ctx->csa.prob.dma_querymask_RW; 2241 info.proxydma_info_status = ctx->csa.prob.dma_tagstatus_R; 2242 for (i = 0; i < 8; i++) { 2243 qp = &info.proxydma_info_command_data[i]; 2244 puqp = &ctx->csa.priv2.puq[i]; 2245 2246 qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW; 2247 qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW; 2248 qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW; 2249 qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW; 2250 } 2251 2252 return simple_read_from_buffer(buf, len, pos, &info, 2253 sizeof info); 2254 } 2255 2256 static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf, 2257 size_t len, loff_t *pos) 2258 { 2259 struct spu_context *ctx = file->private_data; 2260 int ret; 2261 2262 ret = spu_acquire_saved(ctx); 2263 if (ret) 2264 return ret; 2265 spin_lock(&ctx->csa.register_lock); 2266 ret = __spufs_proxydma_info_read(ctx, buf, len, pos); 2267 spin_unlock(&ctx->csa.register_lock); 2268 spu_release_saved(ctx); 2269 2270 return ret; 2271 } 2272 2273 static const struct file_operations spufs_proxydma_info_fops = { 2274 .open = spufs_info_open, 2275 .read = spufs_proxydma_info_read, 2276 }; 2277 2278 static int spufs_show_tid(struct seq_file *s, void *private) 2279 { 2280 struct spu_context *ctx = s->private; 2281 2282 seq_printf(s, "%d\n", ctx->tid); 2283 return 0; 2284 } 2285 2286 static int spufs_tid_open(struct inode *inode, struct file *file) 2287 { 2288 return single_open(file, spufs_show_tid, SPUFS_I(inode)->i_ctx); 2289 } 2290 2291 static const struct file_operations spufs_tid_fops = { 2292 .open = spufs_tid_open, 2293 .read = seq_read, 2294 .llseek = seq_lseek, 2295 .release = single_release, 2296 }; 2297 2298 static const char *ctx_state_names[] = { 2299 "user", "system", "iowait", "loaded" 2300 }; 2301 2302 static unsigned long long spufs_acct_time(struct spu_context *ctx, 2303 enum spu_utilization_state state) 2304 { 2305 struct timespec ts; 2306 unsigned long long time = ctx->stats.times[state]; 2307 2308 /* 2309 * In general, utilization statistics are updated by the controlling 2310 * thread as the spu context moves through various well defined 2311 * state transitions, but if the context is lazily loaded its 2312 * utilization statistics are not updated as the controlling thread 2313 * is not tightly coupled with the execution of the spu context. We 2314 * calculate and apply the time delta from the last recorded state 2315 * of the spu context. 2316 */ 2317 if (ctx->spu && ctx->stats.util_state == state) { 2318 ktime_get_ts(&ts); 2319 time += timespec_to_ns(&ts) - ctx->stats.tstamp; 2320 } 2321 2322 return time / NSEC_PER_MSEC; 2323 } 2324 2325 static unsigned long long spufs_slb_flts(struct spu_context *ctx) 2326 { 2327 unsigned long long slb_flts = ctx->stats.slb_flt; 2328 2329 if (ctx->state == SPU_STATE_RUNNABLE) { 2330 slb_flts += (ctx->spu->stats.slb_flt - 2331 ctx->stats.slb_flt_base); 2332 } 2333 2334 return slb_flts; 2335 } 2336 2337 static unsigned long long spufs_class2_intrs(struct spu_context *ctx) 2338 { 2339 unsigned long long class2_intrs = ctx->stats.class2_intr; 2340 2341 if (ctx->state == SPU_STATE_RUNNABLE) { 2342 class2_intrs += (ctx->spu->stats.class2_intr - 2343 ctx->stats.class2_intr_base); 2344 } 2345 2346 return class2_intrs; 2347 } 2348 2349 2350 static int spufs_show_stat(struct seq_file *s, void *private) 2351 { 2352 struct spu_context *ctx = s->private; 2353 int ret; 2354 2355 ret = spu_acquire(ctx); 2356 if (ret) 2357 return ret; 2358 2359 seq_printf(s, "%s %llu %llu %llu %llu " 2360 "%llu %llu %llu %llu %llu %llu %llu %llu\n", 2361 ctx_state_names[ctx->stats.util_state], 2362 spufs_acct_time(ctx, SPU_UTIL_USER), 2363 spufs_acct_time(ctx, SPU_UTIL_SYSTEM), 2364 spufs_acct_time(ctx, SPU_UTIL_IOWAIT), 2365 spufs_acct_time(ctx, SPU_UTIL_IDLE_LOADED), 2366 ctx->stats.vol_ctx_switch, 2367 ctx->stats.invol_ctx_switch, 2368 spufs_slb_flts(ctx), 2369 ctx->stats.hash_flt, 2370 ctx->stats.min_flt, 2371 ctx->stats.maj_flt, 2372 spufs_class2_intrs(ctx), 2373 ctx->stats.libassist); 2374 spu_release(ctx); 2375 return 0; 2376 } 2377 2378 static int spufs_stat_open(struct inode *inode, struct file *file) 2379 { 2380 return single_open(file, spufs_show_stat, SPUFS_I(inode)->i_ctx); 2381 } 2382 2383 static const struct file_operations spufs_stat_fops = { 2384 .open = spufs_stat_open, 2385 .read = seq_read, 2386 .llseek = seq_lseek, 2387 .release = single_release, 2388 }; 2389 2390 static inline int spufs_switch_log_used(struct spu_context *ctx) 2391 { 2392 return (ctx->switch_log->head - ctx->switch_log->tail) % 2393 SWITCH_LOG_BUFSIZE; 2394 } 2395 2396 static inline int spufs_switch_log_avail(struct spu_context *ctx) 2397 { 2398 return SWITCH_LOG_BUFSIZE - spufs_switch_log_used(ctx); 2399 } 2400 2401 static int spufs_switch_log_open(struct inode *inode, struct file *file) 2402 { 2403 struct spu_context *ctx = SPUFS_I(inode)->i_ctx; 2404 2405 /* 2406 * We (ab-)use the mapping_lock here because it serves the similar 2407 * purpose for synchronizing open/close elsewhere. Maybe it should 2408 * be renamed eventually. 2409 */ 2410 mutex_lock(&ctx->mapping_lock); 2411 if (ctx->switch_log) { 2412 spin_lock(&ctx->switch_log->lock); 2413 ctx->switch_log->head = 0; 2414 ctx->switch_log->tail = 0; 2415 spin_unlock(&ctx->switch_log->lock); 2416 } else { 2417 /* 2418 * We allocate the switch log data structures on first open. 2419 * They will never be free because we assume a context will 2420 * be traced until it goes away. 2421 */ 2422 ctx->switch_log = kzalloc(sizeof(struct switch_log) + 2423 SWITCH_LOG_BUFSIZE * sizeof(struct switch_log_entry), 2424 GFP_KERNEL); 2425 if (!ctx->switch_log) 2426 goto out; 2427 spin_lock_init(&ctx->switch_log->lock); 2428 init_waitqueue_head(&ctx->switch_log->wait); 2429 } 2430 mutex_unlock(&ctx->mapping_lock); 2431 2432 return 0; 2433 out: 2434 mutex_unlock(&ctx->mapping_lock); 2435 return -ENOMEM; 2436 } 2437 2438 static int switch_log_sprint(struct spu_context *ctx, char *tbuf, int n) 2439 { 2440 struct switch_log_entry *p; 2441 2442 p = ctx->switch_log->log + ctx->switch_log->tail % SWITCH_LOG_BUFSIZE; 2443 2444 return snprintf(tbuf, n, "%u.%09u %d %u %u %llu\n", 2445 (unsigned int) p->tstamp.tv_sec, 2446 (unsigned int) p->tstamp.tv_nsec, 2447 p->spu_id, 2448 (unsigned int) p->type, 2449 (unsigned int) p->val, 2450 (unsigned long long) p->timebase); 2451 } 2452 2453 static ssize_t spufs_switch_log_read(struct file *file, char __user *buf, 2454 size_t len, loff_t *ppos) 2455 { 2456 struct inode *inode = file->f_path.dentry->d_inode; 2457 struct spu_context *ctx = SPUFS_I(inode)->i_ctx; 2458 int error = 0, cnt = 0; 2459 2460 if (!buf || len < 0) 2461 return -EINVAL; 2462 2463 while (cnt < len) { 2464 char tbuf[128]; 2465 int width; 2466 2467 if (file->f_flags & O_NONBLOCK) { 2468 if (spufs_switch_log_used(ctx) <= 0) 2469 return cnt ? cnt : -EAGAIN; 2470 } else { 2471 /* Wait for data in buffer */ 2472 error = wait_event_interruptible(ctx->switch_log->wait, 2473 spufs_switch_log_used(ctx) > 0); 2474 if (error) 2475 break; 2476 } 2477 2478 spin_lock(&ctx->switch_log->lock); 2479 if (ctx->switch_log->head == ctx->switch_log->tail) { 2480 /* multiple readers race? */ 2481 spin_unlock(&ctx->switch_log->lock); 2482 continue; 2483 } 2484 2485 width = switch_log_sprint(ctx, tbuf, sizeof(tbuf)); 2486 if (width < len) { 2487 ctx->switch_log->tail = 2488 (ctx->switch_log->tail + 1) % 2489 SWITCH_LOG_BUFSIZE; 2490 } 2491 2492 spin_unlock(&ctx->switch_log->lock); 2493 2494 /* 2495 * If the record is greater than space available return 2496 * partial buffer (so far) 2497 */ 2498 if (width >= len) 2499 break; 2500 2501 error = copy_to_user(buf + cnt, tbuf, width); 2502 if (error) 2503 break; 2504 cnt += width; 2505 } 2506 2507 return cnt == 0 ? error : cnt; 2508 } 2509 2510 static unsigned int spufs_switch_log_poll(struct file *file, poll_table *wait) 2511 { 2512 struct inode *inode = file->f_path.dentry->d_inode; 2513 struct spu_context *ctx = SPUFS_I(inode)->i_ctx; 2514 unsigned int mask = 0; 2515 2516 poll_wait(file, &ctx->switch_log->wait, wait); 2517 2518 if (spufs_switch_log_used(ctx) > 0) 2519 mask |= POLLIN; 2520 2521 return mask; 2522 } 2523 2524 static const struct file_operations spufs_switch_log_fops = { 2525 .owner = THIS_MODULE, 2526 .open = spufs_switch_log_open, 2527 .read = spufs_switch_log_read, 2528 .poll = spufs_switch_log_poll, 2529 }; 2530 2531 void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx, 2532 u32 type, u32 val) 2533 { 2534 if (!ctx->switch_log) 2535 return; 2536 2537 spin_lock(&ctx->switch_log->lock); 2538 if (spufs_switch_log_avail(ctx) > 1) { 2539 struct switch_log_entry *p; 2540 2541 p = ctx->switch_log->log + ctx->switch_log->head; 2542 ktime_get_ts(&p->tstamp); 2543 p->timebase = get_tb(); 2544 p->spu_id = spu ? spu->number : -1; 2545 p->type = type; 2546 p->val = val; 2547 2548 ctx->switch_log->head = 2549 (ctx->switch_log->head + 1) % SWITCH_LOG_BUFSIZE; 2550 } 2551 spin_unlock(&ctx->switch_log->lock); 2552 2553 wake_up(&ctx->switch_log->wait); 2554 } 2555 2556 struct tree_descr spufs_dir_contents[] = { 2557 { "capabilities", &spufs_caps_fops, 0444, }, 2558 { "mem", &spufs_mem_fops, 0666, }, 2559 { "regs", &spufs_regs_fops, 0666, }, 2560 { "mbox", &spufs_mbox_fops, 0444, }, 2561 { "ibox", &spufs_ibox_fops, 0444, }, 2562 { "wbox", &spufs_wbox_fops, 0222, }, 2563 { "mbox_stat", &spufs_mbox_stat_fops, 0444, }, 2564 { "ibox_stat", &spufs_ibox_stat_fops, 0444, }, 2565 { "wbox_stat", &spufs_wbox_stat_fops, 0444, }, 2566 { "signal1", &spufs_signal1_fops, 0666, }, 2567 { "signal2", &spufs_signal2_fops, 0666, }, 2568 { "signal1_type", &spufs_signal1_type, 0666, }, 2569 { "signal2_type", &spufs_signal2_type, 0666, }, 2570 { "cntl", &spufs_cntl_fops, 0666, }, 2571 { "fpcr", &spufs_fpcr_fops, 0666, }, 2572 { "lslr", &spufs_lslr_ops, 0444, }, 2573 { "mfc", &spufs_mfc_fops, 0666, }, 2574 { "mss", &spufs_mss_fops, 0666, }, 2575 { "npc", &spufs_npc_ops, 0666, }, 2576 { "srr0", &spufs_srr0_ops, 0666, }, 2577 { "decr", &spufs_decr_ops, 0666, }, 2578 { "decr_status", &spufs_decr_status_ops, 0666, }, 2579 { "event_mask", &spufs_event_mask_ops, 0666, }, 2580 { "event_status", &spufs_event_status_ops, 0444, }, 2581 { "psmap", &spufs_psmap_fops, 0666, }, 2582 { "phys-id", &spufs_id_ops, 0666, }, 2583 { "object-id", &spufs_object_id_ops, 0666, }, 2584 { "mbox_info", &spufs_mbox_info_fops, 0444, }, 2585 { "ibox_info", &spufs_ibox_info_fops, 0444, }, 2586 { "wbox_info", &spufs_wbox_info_fops, 0444, }, 2587 { "dma_info", &spufs_dma_info_fops, 0444, }, 2588 { "proxydma_info", &spufs_proxydma_info_fops, 0444, }, 2589 { "tid", &spufs_tid_fops, 0444, }, 2590 { "stat", &spufs_stat_fops, 0444, }, 2591 { "switch_log", &spufs_switch_log_fops, 0444 }, 2592 {}, 2593 }; 2594 2595 struct tree_descr spufs_dir_nosched_contents[] = { 2596 { "capabilities", &spufs_caps_fops, 0444, }, 2597 { "mem", &spufs_mem_fops, 0666, }, 2598 { "mbox", &spufs_mbox_fops, 0444, }, 2599 { "ibox", &spufs_ibox_fops, 0444, }, 2600 { "wbox", &spufs_wbox_fops, 0222, }, 2601 { "mbox_stat", &spufs_mbox_stat_fops, 0444, }, 2602 { "ibox_stat", &spufs_ibox_stat_fops, 0444, }, 2603 { "wbox_stat", &spufs_wbox_stat_fops, 0444, }, 2604 { "signal1", &spufs_signal1_nosched_fops, 0222, }, 2605 { "signal2", &spufs_signal2_nosched_fops, 0222, }, 2606 { "signal1_type", &spufs_signal1_type, 0666, }, 2607 { "signal2_type", &spufs_signal2_type, 0666, }, 2608 { "mss", &spufs_mss_fops, 0666, }, 2609 { "mfc", &spufs_mfc_fops, 0666, }, 2610 { "cntl", &spufs_cntl_fops, 0666, }, 2611 { "npc", &spufs_npc_ops, 0666, }, 2612 { "psmap", &spufs_psmap_fops, 0666, }, 2613 { "phys-id", &spufs_id_ops, 0666, }, 2614 { "object-id", &spufs_object_id_ops, 0666, }, 2615 { "tid", &spufs_tid_fops, 0444, }, 2616 { "stat", &spufs_stat_fops, 0444, }, 2617 {}, 2618 }; 2619 2620 struct spufs_coredump_reader spufs_coredump_read[] = { 2621 { "regs", __spufs_regs_read, NULL, sizeof(struct spu_reg128[128])}, 2622 { "fpcr", __spufs_fpcr_read, NULL, sizeof(struct spu_reg128) }, 2623 { "lslr", NULL, spufs_lslr_get, 19 }, 2624 { "decr", NULL, spufs_decr_get, 19 }, 2625 { "decr_status", NULL, spufs_decr_status_get, 19 }, 2626 { "mem", __spufs_mem_read, NULL, LS_SIZE, }, 2627 { "signal1", __spufs_signal1_read, NULL, sizeof(u32) }, 2628 { "signal1_type", NULL, spufs_signal1_type_get, 19 }, 2629 { "signal2", __spufs_signal2_read, NULL, sizeof(u32) }, 2630 { "signal2_type", NULL, spufs_signal2_type_get, 19 }, 2631 { "event_mask", NULL, spufs_event_mask_get, 19 }, 2632 { "event_status", NULL, spufs_event_status_get, 19 }, 2633 { "mbox_info", __spufs_mbox_info_read, NULL, sizeof(u32) }, 2634 { "ibox_info", __spufs_ibox_info_read, NULL, sizeof(u32) }, 2635 { "wbox_info", __spufs_wbox_info_read, NULL, 4 * sizeof(u32)}, 2636 { "dma_info", __spufs_dma_info_read, NULL, sizeof(struct spu_dma_info)}, 2637 { "proxydma_info", __spufs_proxydma_info_read, 2638 NULL, sizeof(struct spu_proxydma_info)}, 2639 { "object-id", NULL, spufs_object_id_get, 19 }, 2640 { "npc", NULL, spufs_npc_get, 19 }, 2641 { NULL }, 2642 }; 2643