1 /* 2 * SPU file system -- file contents 3 * 4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 5 * 6 * Author: Arnd Bergmann <arndb@de.ibm.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2, or (at your option) 11 * any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 */ 22 23 #undef DEBUG 24 25 #include <linux/fs.h> 26 #include <linux/ioctl.h> 27 #include <linux/module.h> 28 #include <linux/pagemap.h> 29 #include <linux/poll.h> 30 #include <linux/ptrace.h> 31 #include <linux/seq_file.h> 32 33 #include <asm/io.h> 34 #include <asm/semaphore.h> 35 #include <asm/spu.h> 36 #include <asm/spu_info.h> 37 #include <asm/uaccess.h> 38 39 #include "spufs.h" 40 41 #define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000) 42 43 /* Simple attribute files */ 44 struct spufs_attr { 45 int (*get)(void *, u64 *); 46 int (*set)(void *, u64); 47 char get_buf[24]; /* enough to store a u64 and "\n\0" */ 48 char set_buf[24]; 49 void *data; 50 const char *fmt; /* format for read operation */ 51 struct mutex mutex; /* protects access to these buffers */ 52 }; 53 54 static int spufs_attr_open(struct inode *inode, struct file *file, 55 int (*get)(void *, u64 *), int (*set)(void *, u64), 56 const char *fmt) 57 { 58 struct spufs_attr *attr; 59 60 attr = kmalloc(sizeof(*attr), GFP_KERNEL); 61 if (!attr) 62 return -ENOMEM; 63 64 attr->get = get; 65 attr->set = set; 66 attr->data = inode->i_private; 67 attr->fmt = fmt; 68 mutex_init(&attr->mutex); 69 file->private_data = attr; 70 71 return nonseekable_open(inode, file); 72 } 73 74 static int spufs_attr_release(struct inode *inode, struct file *file) 75 { 76 kfree(file->private_data); 77 return 0; 78 } 79 80 static ssize_t spufs_attr_read(struct file *file, char __user *buf, 81 size_t len, loff_t *ppos) 82 { 83 struct spufs_attr *attr; 84 size_t size; 85 ssize_t ret; 86 87 attr = file->private_data; 88 if (!attr->get) 89 return -EACCES; 90 91 ret = mutex_lock_interruptible(&attr->mutex); 92 if (ret) 93 return ret; 94 95 if (*ppos) { /* continued read */ 96 size = strlen(attr->get_buf); 97 } else { /* first read */ 98 u64 val; 99 ret = attr->get(attr->data, &val); 100 if (ret) 101 goto out; 102 103 size = scnprintf(attr->get_buf, sizeof(attr->get_buf), 104 attr->fmt, (unsigned long long)val); 105 } 106 107 ret = simple_read_from_buffer(buf, len, ppos, attr->get_buf, size); 108 out: 109 mutex_unlock(&attr->mutex); 110 return ret; 111 } 112 113 static ssize_t spufs_attr_write(struct file *file, const char __user *buf, 114 size_t len, loff_t *ppos) 115 { 116 struct spufs_attr *attr; 117 u64 val; 118 size_t size; 119 ssize_t ret; 120 121 attr = file->private_data; 122 if (!attr->set) 123 return -EACCES; 124 125 ret = mutex_lock_interruptible(&attr->mutex); 126 if (ret) 127 return ret; 128 129 ret = -EFAULT; 130 size = min(sizeof(attr->set_buf) - 1, len); 131 if (copy_from_user(attr->set_buf, buf, size)) 132 goto out; 133 134 ret = len; /* claim we got the whole input */ 135 attr->set_buf[size] = '\0'; 136 val = simple_strtol(attr->set_buf, NULL, 0); 137 attr->set(attr->data, val); 138 out: 139 mutex_unlock(&attr->mutex); 140 return ret; 141 } 142 143 #define DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \ 144 static int __fops ## _open(struct inode *inode, struct file *file) \ 145 { \ 146 __simple_attr_check_format(__fmt, 0ull); \ 147 return spufs_attr_open(inode, file, __get, __set, __fmt); \ 148 } \ 149 static struct file_operations __fops = { \ 150 .owner = THIS_MODULE, \ 151 .open = __fops ## _open, \ 152 .release = spufs_attr_release, \ 153 .read = spufs_attr_read, \ 154 .write = spufs_attr_write, \ 155 }; 156 157 158 static int 159 spufs_mem_open(struct inode *inode, struct file *file) 160 { 161 struct spufs_inode_info *i = SPUFS_I(inode); 162 struct spu_context *ctx = i->i_ctx; 163 164 mutex_lock(&ctx->mapping_lock); 165 file->private_data = ctx; 166 if (!i->i_openers++) 167 ctx->local_store = inode->i_mapping; 168 mutex_unlock(&ctx->mapping_lock); 169 return 0; 170 } 171 172 static int 173 spufs_mem_release(struct inode *inode, struct file *file) 174 { 175 struct spufs_inode_info *i = SPUFS_I(inode); 176 struct spu_context *ctx = i->i_ctx; 177 178 mutex_lock(&ctx->mapping_lock); 179 if (!--i->i_openers) 180 ctx->local_store = NULL; 181 mutex_unlock(&ctx->mapping_lock); 182 return 0; 183 } 184 185 static ssize_t 186 __spufs_mem_read(struct spu_context *ctx, char __user *buffer, 187 size_t size, loff_t *pos) 188 { 189 char *local_store = ctx->ops->get_ls(ctx); 190 return simple_read_from_buffer(buffer, size, pos, local_store, 191 LS_SIZE); 192 } 193 194 static ssize_t 195 spufs_mem_read(struct file *file, char __user *buffer, 196 size_t size, loff_t *pos) 197 { 198 struct spu_context *ctx = file->private_data; 199 ssize_t ret; 200 201 ret = spu_acquire(ctx); 202 if (ret) 203 return ret; 204 ret = __spufs_mem_read(ctx, buffer, size, pos); 205 spu_release(ctx); 206 207 return ret; 208 } 209 210 static ssize_t 211 spufs_mem_write(struct file *file, const char __user *buffer, 212 size_t size, loff_t *ppos) 213 { 214 struct spu_context *ctx = file->private_data; 215 char *local_store; 216 loff_t pos = *ppos; 217 int ret; 218 219 if (pos < 0) 220 return -EINVAL; 221 if (pos > LS_SIZE) 222 return -EFBIG; 223 if (size > LS_SIZE - pos) 224 size = LS_SIZE - pos; 225 226 ret = spu_acquire(ctx); 227 if (ret) 228 return ret; 229 230 local_store = ctx->ops->get_ls(ctx); 231 ret = copy_from_user(local_store + pos, buffer, size); 232 spu_release(ctx); 233 234 if (ret) 235 return -EFAULT; 236 *ppos = pos + size; 237 return size; 238 } 239 240 static unsigned long spufs_mem_mmap_nopfn(struct vm_area_struct *vma, 241 unsigned long address) 242 { 243 struct spu_context *ctx = vma->vm_file->private_data; 244 unsigned long pfn, offset, addr0 = address; 245 #ifdef CONFIG_SPU_FS_64K_LS 246 struct spu_state *csa = &ctx->csa; 247 int psize; 248 249 /* Check what page size we are using */ 250 psize = get_slice_psize(vma->vm_mm, address); 251 252 /* Some sanity checking */ 253 BUG_ON(csa->use_big_pages != (psize == MMU_PAGE_64K)); 254 255 /* Wow, 64K, cool, we need to align the address though */ 256 if (csa->use_big_pages) { 257 BUG_ON(vma->vm_start & 0xffff); 258 address &= ~0xfffful; 259 } 260 #endif /* CONFIG_SPU_FS_64K_LS */ 261 262 offset = (address - vma->vm_start) + (vma->vm_pgoff << PAGE_SHIFT); 263 if (offset >= LS_SIZE) 264 return NOPFN_SIGBUS; 265 266 pr_debug("spufs_mem_mmap_nopfn address=0x%lx -> 0x%lx, offset=0x%lx\n", 267 addr0, address, offset); 268 269 if (spu_acquire(ctx)) 270 return NOPFN_REFAULT; 271 272 if (ctx->state == SPU_STATE_SAVED) { 273 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 274 & ~_PAGE_NO_CACHE); 275 pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset); 276 } else { 277 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 278 | _PAGE_NO_CACHE); 279 pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT; 280 } 281 vm_insert_pfn(vma, address, pfn); 282 283 spu_release(ctx); 284 285 return NOPFN_REFAULT; 286 } 287 288 289 static struct vm_operations_struct spufs_mem_mmap_vmops = { 290 .nopfn = spufs_mem_mmap_nopfn, 291 }; 292 293 static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma) 294 { 295 #ifdef CONFIG_SPU_FS_64K_LS 296 struct spu_context *ctx = file->private_data; 297 struct spu_state *csa = &ctx->csa; 298 299 /* Sanity check VMA alignment */ 300 if (csa->use_big_pages) { 301 pr_debug("spufs_mem_mmap 64K, start=0x%lx, end=0x%lx," 302 " pgoff=0x%lx\n", vma->vm_start, vma->vm_end, 303 vma->vm_pgoff); 304 if (vma->vm_start & 0xffff) 305 return -EINVAL; 306 if (vma->vm_pgoff & 0xf) 307 return -EINVAL; 308 } 309 #endif /* CONFIG_SPU_FS_64K_LS */ 310 311 if (!(vma->vm_flags & VM_SHARED)) 312 return -EINVAL; 313 314 vma->vm_flags |= VM_IO | VM_PFNMAP; 315 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 316 | _PAGE_NO_CACHE); 317 318 vma->vm_ops = &spufs_mem_mmap_vmops; 319 return 0; 320 } 321 322 #ifdef CONFIG_SPU_FS_64K_LS 323 static unsigned long spufs_get_unmapped_area(struct file *file, 324 unsigned long addr, unsigned long len, unsigned long pgoff, 325 unsigned long flags) 326 { 327 struct spu_context *ctx = file->private_data; 328 struct spu_state *csa = &ctx->csa; 329 330 /* If not using big pages, fallback to normal MM g_u_a */ 331 if (!csa->use_big_pages) 332 return current->mm->get_unmapped_area(file, addr, len, 333 pgoff, flags); 334 335 /* Else, try to obtain a 64K pages slice */ 336 return slice_get_unmapped_area(addr, len, flags, 337 MMU_PAGE_64K, 1, 0); 338 } 339 #endif /* CONFIG_SPU_FS_64K_LS */ 340 341 static const struct file_operations spufs_mem_fops = { 342 .open = spufs_mem_open, 343 .release = spufs_mem_release, 344 .read = spufs_mem_read, 345 .write = spufs_mem_write, 346 .llseek = generic_file_llseek, 347 .mmap = spufs_mem_mmap, 348 #ifdef CONFIG_SPU_FS_64K_LS 349 .get_unmapped_area = spufs_get_unmapped_area, 350 #endif 351 }; 352 353 static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma, 354 unsigned long address, 355 unsigned long ps_offs, 356 unsigned long ps_size) 357 { 358 struct spu_context *ctx = vma->vm_file->private_data; 359 unsigned long area, offset = address - vma->vm_start; 360 361 offset += vma->vm_pgoff << PAGE_SHIFT; 362 if (offset >= ps_size) 363 return NOPFN_SIGBUS; 364 365 /* 366 * We have to wait for context to be loaded before we have 367 * pages to hand out to the user, but we don't want to wait 368 * with the mmap_sem held. 369 * It is possible to drop the mmap_sem here, but then we need 370 * to return NOPFN_REFAULT because the mappings may have 371 * hanged. 372 */ 373 if (spu_acquire(ctx)) 374 return NOPFN_REFAULT; 375 376 if (ctx->state == SPU_STATE_SAVED) { 377 up_read(¤t->mm->mmap_sem); 378 spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE); 379 down_read(¤t->mm->mmap_sem); 380 } else { 381 area = ctx->spu->problem_phys + ps_offs; 382 vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT); 383 } 384 385 spu_release(ctx); 386 return NOPFN_REFAULT; 387 } 388 389 #if SPUFS_MMAP_4K 390 static unsigned long spufs_cntl_mmap_nopfn(struct vm_area_struct *vma, 391 unsigned long address) 392 { 393 return spufs_ps_nopfn(vma, address, 0x4000, 0x1000); 394 } 395 396 static struct vm_operations_struct spufs_cntl_mmap_vmops = { 397 .nopfn = spufs_cntl_mmap_nopfn, 398 }; 399 400 /* 401 * mmap support for problem state control area [0x4000 - 0x4fff]. 402 */ 403 static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma) 404 { 405 if (!(vma->vm_flags & VM_SHARED)) 406 return -EINVAL; 407 408 vma->vm_flags |= VM_IO | VM_PFNMAP; 409 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 410 | _PAGE_NO_CACHE | _PAGE_GUARDED); 411 412 vma->vm_ops = &spufs_cntl_mmap_vmops; 413 return 0; 414 } 415 #else /* SPUFS_MMAP_4K */ 416 #define spufs_cntl_mmap NULL 417 #endif /* !SPUFS_MMAP_4K */ 418 419 static int spufs_cntl_get(void *data, u64 *val) 420 { 421 struct spu_context *ctx = data; 422 int ret; 423 424 ret = spu_acquire(ctx); 425 if (ret) 426 return ret; 427 *val = ctx->ops->status_read(ctx); 428 spu_release(ctx); 429 430 return 0; 431 } 432 433 static int spufs_cntl_set(void *data, u64 val) 434 { 435 struct spu_context *ctx = data; 436 int ret; 437 438 ret = spu_acquire(ctx); 439 if (ret) 440 return ret; 441 ctx->ops->runcntl_write(ctx, val); 442 spu_release(ctx); 443 444 return 0; 445 } 446 447 static int spufs_cntl_open(struct inode *inode, struct file *file) 448 { 449 struct spufs_inode_info *i = SPUFS_I(inode); 450 struct spu_context *ctx = i->i_ctx; 451 452 mutex_lock(&ctx->mapping_lock); 453 file->private_data = ctx; 454 if (!i->i_openers++) 455 ctx->cntl = inode->i_mapping; 456 mutex_unlock(&ctx->mapping_lock); 457 return spufs_attr_open(inode, file, spufs_cntl_get, 458 spufs_cntl_set, "0x%08lx"); 459 } 460 461 static int 462 spufs_cntl_release(struct inode *inode, struct file *file) 463 { 464 struct spufs_inode_info *i = SPUFS_I(inode); 465 struct spu_context *ctx = i->i_ctx; 466 467 spufs_attr_release(inode, file); 468 469 mutex_lock(&ctx->mapping_lock); 470 if (!--i->i_openers) 471 ctx->cntl = NULL; 472 mutex_unlock(&ctx->mapping_lock); 473 return 0; 474 } 475 476 static const struct file_operations spufs_cntl_fops = { 477 .open = spufs_cntl_open, 478 .release = spufs_cntl_release, 479 .read = spufs_attr_read, 480 .write = spufs_attr_write, 481 .mmap = spufs_cntl_mmap, 482 }; 483 484 static int 485 spufs_regs_open(struct inode *inode, struct file *file) 486 { 487 struct spufs_inode_info *i = SPUFS_I(inode); 488 file->private_data = i->i_ctx; 489 return 0; 490 } 491 492 static ssize_t 493 __spufs_regs_read(struct spu_context *ctx, char __user *buffer, 494 size_t size, loff_t *pos) 495 { 496 struct spu_lscsa *lscsa = ctx->csa.lscsa; 497 return simple_read_from_buffer(buffer, size, pos, 498 lscsa->gprs, sizeof lscsa->gprs); 499 } 500 501 static ssize_t 502 spufs_regs_read(struct file *file, char __user *buffer, 503 size_t size, loff_t *pos) 504 { 505 int ret; 506 struct spu_context *ctx = file->private_data; 507 508 ret = spu_acquire_saved(ctx); 509 if (ret) 510 return ret; 511 ret = __spufs_regs_read(ctx, buffer, size, pos); 512 spu_release_saved(ctx); 513 return ret; 514 } 515 516 static ssize_t 517 spufs_regs_write(struct file *file, const char __user *buffer, 518 size_t size, loff_t *pos) 519 { 520 struct spu_context *ctx = file->private_data; 521 struct spu_lscsa *lscsa = ctx->csa.lscsa; 522 int ret; 523 524 size = min_t(ssize_t, sizeof lscsa->gprs - *pos, size); 525 if (size <= 0) 526 return -EFBIG; 527 *pos += size; 528 529 ret = spu_acquire_saved(ctx); 530 if (ret) 531 return ret; 532 533 ret = copy_from_user(lscsa->gprs + *pos - size, 534 buffer, size) ? -EFAULT : size; 535 536 spu_release_saved(ctx); 537 return ret; 538 } 539 540 static const struct file_operations spufs_regs_fops = { 541 .open = spufs_regs_open, 542 .read = spufs_regs_read, 543 .write = spufs_regs_write, 544 .llseek = generic_file_llseek, 545 }; 546 547 static ssize_t 548 __spufs_fpcr_read(struct spu_context *ctx, char __user * buffer, 549 size_t size, loff_t * pos) 550 { 551 struct spu_lscsa *lscsa = ctx->csa.lscsa; 552 return simple_read_from_buffer(buffer, size, pos, 553 &lscsa->fpcr, sizeof(lscsa->fpcr)); 554 } 555 556 static ssize_t 557 spufs_fpcr_read(struct file *file, char __user * buffer, 558 size_t size, loff_t * pos) 559 { 560 int ret; 561 struct spu_context *ctx = file->private_data; 562 563 ret = spu_acquire_saved(ctx); 564 if (ret) 565 return ret; 566 ret = __spufs_fpcr_read(ctx, buffer, size, pos); 567 spu_release_saved(ctx); 568 return ret; 569 } 570 571 static ssize_t 572 spufs_fpcr_write(struct file *file, const char __user * buffer, 573 size_t size, loff_t * pos) 574 { 575 struct spu_context *ctx = file->private_data; 576 struct spu_lscsa *lscsa = ctx->csa.lscsa; 577 int ret; 578 579 size = min_t(ssize_t, sizeof(lscsa->fpcr) - *pos, size); 580 if (size <= 0) 581 return -EFBIG; 582 583 ret = spu_acquire_saved(ctx); 584 if (ret) 585 return ret; 586 587 *pos += size; 588 ret = copy_from_user((char *)&lscsa->fpcr + *pos - size, 589 buffer, size) ? -EFAULT : size; 590 591 spu_release_saved(ctx); 592 return ret; 593 } 594 595 static const struct file_operations spufs_fpcr_fops = { 596 .open = spufs_regs_open, 597 .read = spufs_fpcr_read, 598 .write = spufs_fpcr_write, 599 .llseek = generic_file_llseek, 600 }; 601 602 /* generic open function for all pipe-like files */ 603 static int spufs_pipe_open(struct inode *inode, struct file *file) 604 { 605 struct spufs_inode_info *i = SPUFS_I(inode); 606 file->private_data = i->i_ctx; 607 608 return nonseekable_open(inode, file); 609 } 610 611 /* 612 * Read as many bytes from the mailbox as possible, until 613 * one of the conditions becomes true: 614 * 615 * - no more data available in the mailbox 616 * - end of the user provided buffer 617 * - end of the mapped area 618 */ 619 static ssize_t spufs_mbox_read(struct file *file, char __user *buf, 620 size_t len, loff_t *pos) 621 { 622 struct spu_context *ctx = file->private_data; 623 u32 mbox_data, __user *udata; 624 ssize_t count; 625 626 if (len < 4) 627 return -EINVAL; 628 629 if (!access_ok(VERIFY_WRITE, buf, len)) 630 return -EFAULT; 631 632 udata = (void __user *)buf; 633 634 count = spu_acquire(ctx); 635 if (count) 636 return count; 637 638 for (count = 0; (count + 4) <= len; count += 4, udata++) { 639 int ret; 640 ret = ctx->ops->mbox_read(ctx, &mbox_data); 641 if (ret == 0) 642 break; 643 644 /* 645 * at the end of the mapped area, we can fault 646 * but still need to return the data we have 647 * read successfully so far. 648 */ 649 ret = __put_user(mbox_data, udata); 650 if (ret) { 651 if (!count) 652 count = -EFAULT; 653 break; 654 } 655 } 656 spu_release(ctx); 657 658 if (!count) 659 count = -EAGAIN; 660 661 return count; 662 } 663 664 static const struct file_operations spufs_mbox_fops = { 665 .open = spufs_pipe_open, 666 .read = spufs_mbox_read, 667 }; 668 669 static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf, 670 size_t len, loff_t *pos) 671 { 672 struct spu_context *ctx = file->private_data; 673 ssize_t ret; 674 u32 mbox_stat; 675 676 if (len < 4) 677 return -EINVAL; 678 679 ret = spu_acquire(ctx); 680 if (ret) 681 return ret; 682 683 mbox_stat = ctx->ops->mbox_stat_read(ctx) & 0xff; 684 685 spu_release(ctx); 686 687 if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat)) 688 return -EFAULT; 689 690 return 4; 691 } 692 693 static const struct file_operations spufs_mbox_stat_fops = { 694 .open = spufs_pipe_open, 695 .read = spufs_mbox_stat_read, 696 }; 697 698 /* low-level ibox access function */ 699 size_t spu_ibox_read(struct spu_context *ctx, u32 *data) 700 { 701 return ctx->ops->ibox_read(ctx, data); 702 } 703 704 static int spufs_ibox_fasync(int fd, struct file *file, int on) 705 { 706 struct spu_context *ctx = file->private_data; 707 708 return fasync_helper(fd, file, on, &ctx->ibox_fasync); 709 } 710 711 /* interrupt-level ibox callback function. */ 712 void spufs_ibox_callback(struct spu *spu) 713 { 714 struct spu_context *ctx = spu->ctx; 715 716 if (!ctx) 717 return; 718 719 wake_up_all(&ctx->ibox_wq); 720 kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN); 721 } 722 723 /* 724 * Read as many bytes from the interrupt mailbox as possible, until 725 * one of the conditions becomes true: 726 * 727 * - no more data available in the mailbox 728 * - end of the user provided buffer 729 * - end of the mapped area 730 * 731 * If the file is opened without O_NONBLOCK, we wait here until 732 * any data is available, but return when we have been able to 733 * read something. 734 */ 735 static ssize_t spufs_ibox_read(struct file *file, char __user *buf, 736 size_t len, loff_t *pos) 737 { 738 struct spu_context *ctx = file->private_data; 739 u32 ibox_data, __user *udata; 740 ssize_t count; 741 742 if (len < 4) 743 return -EINVAL; 744 745 if (!access_ok(VERIFY_WRITE, buf, len)) 746 return -EFAULT; 747 748 udata = (void __user *)buf; 749 750 count = spu_acquire(ctx); 751 if (count) 752 return count; 753 754 /* wait only for the first element */ 755 count = 0; 756 if (file->f_flags & O_NONBLOCK) { 757 if (!spu_ibox_read(ctx, &ibox_data)) 758 count = -EAGAIN; 759 } else { 760 count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data)); 761 } 762 if (count) 763 goto out; 764 765 /* if we can't write at all, return -EFAULT */ 766 count = __put_user(ibox_data, udata); 767 if (count) 768 goto out; 769 770 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) { 771 int ret; 772 ret = ctx->ops->ibox_read(ctx, &ibox_data); 773 if (ret == 0) 774 break; 775 /* 776 * at the end of the mapped area, we can fault 777 * but still need to return the data we have 778 * read successfully so far. 779 */ 780 ret = __put_user(ibox_data, udata); 781 if (ret) 782 break; 783 } 784 785 out: 786 spu_release(ctx); 787 788 return count; 789 } 790 791 static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait) 792 { 793 struct spu_context *ctx = file->private_data; 794 unsigned int mask; 795 796 poll_wait(file, &ctx->ibox_wq, wait); 797 798 /* 799 * For now keep this uninterruptible and also ignore the rule 800 * that poll should not sleep. Will be fixed later. 801 */ 802 mutex_lock(&ctx->state_mutex); 803 mask = ctx->ops->mbox_stat_poll(ctx, POLLIN | POLLRDNORM); 804 spu_release(ctx); 805 806 return mask; 807 } 808 809 static const struct file_operations spufs_ibox_fops = { 810 .open = spufs_pipe_open, 811 .read = spufs_ibox_read, 812 .poll = spufs_ibox_poll, 813 .fasync = spufs_ibox_fasync, 814 }; 815 816 static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf, 817 size_t len, loff_t *pos) 818 { 819 struct spu_context *ctx = file->private_data; 820 ssize_t ret; 821 u32 ibox_stat; 822 823 if (len < 4) 824 return -EINVAL; 825 826 ret = spu_acquire(ctx); 827 if (ret) 828 return ret; 829 ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff; 830 spu_release(ctx); 831 832 if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat)) 833 return -EFAULT; 834 835 return 4; 836 } 837 838 static const struct file_operations spufs_ibox_stat_fops = { 839 .open = spufs_pipe_open, 840 .read = spufs_ibox_stat_read, 841 }; 842 843 /* low-level mailbox write */ 844 size_t spu_wbox_write(struct spu_context *ctx, u32 data) 845 { 846 return ctx->ops->wbox_write(ctx, data); 847 } 848 849 static int spufs_wbox_fasync(int fd, struct file *file, int on) 850 { 851 struct spu_context *ctx = file->private_data; 852 int ret; 853 854 ret = fasync_helper(fd, file, on, &ctx->wbox_fasync); 855 856 return ret; 857 } 858 859 /* interrupt-level wbox callback function. */ 860 void spufs_wbox_callback(struct spu *spu) 861 { 862 struct spu_context *ctx = spu->ctx; 863 864 if (!ctx) 865 return; 866 867 wake_up_all(&ctx->wbox_wq); 868 kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT); 869 } 870 871 /* 872 * Write as many bytes to the interrupt mailbox as possible, until 873 * one of the conditions becomes true: 874 * 875 * - the mailbox is full 876 * - end of the user provided buffer 877 * - end of the mapped area 878 * 879 * If the file is opened without O_NONBLOCK, we wait here until 880 * space is availabyl, but return when we have been able to 881 * write something. 882 */ 883 static ssize_t spufs_wbox_write(struct file *file, const char __user *buf, 884 size_t len, loff_t *pos) 885 { 886 struct spu_context *ctx = file->private_data; 887 u32 wbox_data, __user *udata; 888 ssize_t count; 889 890 if (len < 4) 891 return -EINVAL; 892 893 udata = (void __user *)buf; 894 if (!access_ok(VERIFY_READ, buf, len)) 895 return -EFAULT; 896 897 if (__get_user(wbox_data, udata)) 898 return -EFAULT; 899 900 count = spu_acquire(ctx); 901 if (count) 902 return count; 903 904 /* 905 * make sure we can at least write one element, by waiting 906 * in case of !O_NONBLOCK 907 */ 908 count = 0; 909 if (file->f_flags & O_NONBLOCK) { 910 if (!spu_wbox_write(ctx, wbox_data)) 911 count = -EAGAIN; 912 } else { 913 count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data)); 914 } 915 916 if (count) 917 goto out; 918 919 /* write as much as possible */ 920 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) { 921 int ret; 922 ret = __get_user(wbox_data, udata); 923 if (ret) 924 break; 925 926 ret = spu_wbox_write(ctx, wbox_data); 927 if (ret == 0) 928 break; 929 } 930 931 out: 932 spu_release(ctx); 933 return count; 934 } 935 936 static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait) 937 { 938 struct spu_context *ctx = file->private_data; 939 unsigned int mask; 940 941 poll_wait(file, &ctx->wbox_wq, wait); 942 943 /* 944 * For now keep this uninterruptible and also ignore the rule 945 * that poll should not sleep. Will be fixed later. 946 */ 947 mutex_lock(&ctx->state_mutex); 948 mask = ctx->ops->mbox_stat_poll(ctx, POLLOUT | POLLWRNORM); 949 spu_release(ctx); 950 951 return mask; 952 } 953 954 static const struct file_operations spufs_wbox_fops = { 955 .open = spufs_pipe_open, 956 .write = spufs_wbox_write, 957 .poll = spufs_wbox_poll, 958 .fasync = spufs_wbox_fasync, 959 }; 960 961 static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf, 962 size_t len, loff_t *pos) 963 { 964 struct spu_context *ctx = file->private_data; 965 ssize_t ret; 966 u32 wbox_stat; 967 968 if (len < 4) 969 return -EINVAL; 970 971 ret = spu_acquire(ctx); 972 if (ret) 973 return ret; 974 wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff; 975 spu_release(ctx); 976 977 if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat)) 978 return -EFAULT; 979 980 return 4; 981 } 982 983 static const struct file_operations spufs_wbox_stat_fops = { 984 .open = spufs_pipe_open, 985 .read = spufs_wbox_stat_read, 986 }; 987 988 static int spufs_signal1_open(struct inode *inode, struct file *file) 989 { 990 struct spufs_inode_info *i = SPUFS_I(inode); 991 struct spu_context *ctx = i->i_ctx; 992 993 mutex_lock(&ctx->mapping_lock); 994 file->private_data = ctx; 995 if (!i->i_openers++) 996 ctx->signal1 = inode->i_mapping; 997 mutex_unlock(&ctx->mapping_lock); 998 return nonseekable_open(inode, file); 999 } 1000 1001 static int 1002 spufs_signal1_release(struct inode *inode, struct file *file) 1003 { 1004 struct spufs_inode_info *i = SPUFS_I(inode); 1005 struct spu_context *ctx = i->i_ctx; 1006 1007 mutex_lock(&ctx->mapping_lock); 1008 if (!--i->i_openers) 1009 ctx->signal1 = NULL; 1010 mutex_unlock(&ctx->mapping_lock); 1011 return 0; 1012 } 1013 1014 static ssize_t __spufs_signal1_read(struct spu_context *ctx, char __user *buf, 1015 size_t len, loff_t *pos) 1016 { 1017 int ret = 0; 1018 u32 data; 1019 1020 if (len < 4) 1021 return -EINVAL; 1022 1023 if (ctx->csa.spu_chnlcnt_RW[3]) { 1024 data = ctx->csa.spu_chnldata_RW[3]; 1025 ret = 4; 1026 } 1027 1028 if (!ret) 1029 goto out; 1030 1031 if (copy_to_user(buf, &data, 4)) 1032 return -EFAULT; 1033 1034 out: 1035 return ret; 1036 } 1037 1038 static ssize_t spufs_signal1_read(struct file *file, char __user *buf, 1039 size_t len, loff_t *pos) 1040 { 1041 int ret; 1042 struct spu_context *ctx = file->private_data; 1043 1044 ret = spu_acquire_saved(ctx); 1045 if (ret) 1046 return ret; 1047 ret = __spufs_signal1_read(ctx, buf, len, pos); 1048 spu_release_saved(ctx); 1049 1050 return ret; 1051 } 1052 1053 static ssize_t spufs_signal1_write(struct file *file, const char __user *buf, 1054 size_t len, loff_t *pos) 1055 { 1056 struct spu_context *ctx; 1057 ssize_t ret; 1058 u32 data; 1059 1060 ctx = file->private_data; 1061 1062 if (len < 4) 1063 return -EINVAL; 1064 1065 if (copy_from_user(&data, buf, 4)) 1066 return -EFAULT; 1067 1068 ret = spu_acquire(ctx); 1069 if (ret) 1070 return ret; 1071 ctx->ops->signal1_write(ctx, data); 1072 spu_release(ctx); 1073 1074 return 4; 1075 } 1076 1077 static unsigned long spufs_signal1_mmap_nopfn(struct vm_area_struct *vma, 1078 unsigned long address) 1079 { 1080 #if PAGE_SIZE == 0x1000 1081 return spufs_ps_nopfn(vma, address, 0x14000, 0x1000); 1082 #elif PAGE_SIZE == 0x10000 1083 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole 1084 * signal 1 and 2 area 1085 */ 1086 return spufs_ps_nopfn(vma, address, 0x10000, 0x10000); 1087 #else 1088 #error unsupported page size 1089 #endif 1090 } 1091 1092 static struct vm_operations_struct spufs_signal1_mmap_vmops = { 1093 .nopfn = spufs_signal1_mmap_nopfn, 1094 }; 1095 1096 static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma) 1097 { 1098 if (!(vma->vm_flags & VM_SHARED)) 1099 return -EINVAL; 1100 1101 vma->vm_flags |= VM_IO | VM_PFNMAP; 1102 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 1103 | _PAGE_NO_CACHE | _PAGE_GUARDED); 1104 1105 vma->vm_ops = &spufs_signal1_mmap_vmops; 1106 return 0; 1107 } 1108 1109 static const struct file_operations spufs_signal1_fops = { 1110 .open = spufs_signal1_open, 1111 .release = spufs_signal1_release, 1112 .read = spufs_signal1_read, 1113 .write = spufs_signal1_write, 1114 .mmap = spufs_signal1_mmap, 1115 }; 1116 1117 static const struct file_operations spufs_signal1_nosched_fops = { 1118 .open = spufs_signal1_open, 1119 .release = spufs_signal1_release, 1120 .write = spufs_signal1_write, 1121 .mmap = spufs_signal1_mmap, 1122 }; 1123 1124 static int spufs_signal2_open(struct inode *inode, struct file *file) 1125 { 1126 struct spufs_inode_info *i = SPUFS_I(inode); 1127 struct spu_context *ctx = i->i_ctx; 1128 1129 mutex_lock(&ctx->mapping_lock); 1130 file->private_data = ctx; 1131 if (!i->i_openers++) 1132 ctx->signal2 = inode->i_mapping; 1133 mutex_unlock(&ctx->mapping_lock); 1134 return nonseekable_open(inode, file); 1135 } 1136 1137 static int 1138 spufs_signal2_release(struct inode *inode, struct file *file) 1139 { 1140 struct spufs_inode_info *i = SPUFS_I(inode); 1141 struct spu_context *ctx = i->i_ctx; 1142 1143 mutex_lock(&ctx->mapping_lock); 1144 if (!--i->i_openers) 1145 ctx->signal2 = NULL; 1146 mutex_unlock(&ctx->mapping_lock); 1147 return 0; 1148 } 1149 1150 static ssize_t __spufs_signal2_read(struct spu_context *ctx, char __user *buf, 1151 size_t len, loff_t *pos) 1152 { 1153 int ret = 0; 1154 u32 data; 1155 1156 if (len < 4) 1157 return -EINVAL; 1158 1159 if (ctx->csa.spu_chnlcnt_RW[4]) { 1160 data = ctx->csa.spu_chnldata_RW[4]; 1161 ret = 4; 1162 } 1163 1164 if (!ret) 1165 goto out; 1166 1167 if (copy_to_user(buf, &data, 4)) 1168 return -EFAULT; 1169 1170 out: 1171 return ret; 1172 } 1173 1174 static ssize_t spufs_signal2_read(struct file *file, char __user *buf, 1175 size_t len, loff_t *pos) 1176 { 1177 struct spu_context *ctx = file->private_data; 1178 int ret; 1179 1180 ret = spu_acquire_saved(ctx); 1181 if (ret) 1182 return ret; 1183 ret = __spufs_signal2_read(ctx, buf, len, pos); 1184 spu_release_saved(ctx); 1185 1186 return ret; 1187 } 1188 1189 static ssize_t spufs_signal2_write(struct file *file, const char __user *buf, 1190 size_t len, loff_t *pos) 1191 { 1192 struct spu_context *ctx; 1193 ssize_t ret; 1194 u32 data; 1195 1196 ctx = file->private_data; 1197 1198 if (len < 4) 1199 return -EINVAL; 1200 1201 if (copy_from_user(&data, buf, 4)) 1202 return -EFAULT; 1203 1204 ret = spu_acquire(ctx); 1205 if (ret) 1206 return ret; 1207 ctx->ops->signal2_write(ctx, data); 1208 spu_release(ctx); 1209 1210 return 4; 1211 } 1212 1213 #if SPUFS_MMAP_4K 1214 static unsigned long spufs_signal2_mmap_nopfn(struct vm_area_struct *vma, 1215 unsigned long address) 1216 { 1217 #if PAGE_SIZE == 0x1000 1218 return spufs_ps_nopfn(vma, address, 0x1c000, 0x1000); 1219 #elif PAGE_SIZE == 0x10000 1220 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole 1221 * signal 1 and 2 area 1222 */ 1223 return spufs_ps_nopfn(vma, address, 0x10000, 0x10000); 1224 #else 1225 #error unsupported page size 1226 #endif 1227 } 1228 1229 static struct vm_operations_struct spufs_signal2_mmap_vmops = { 1230 .nopfn = spufs_signal2_mmap_nopfn, 1231 }; 1232 1233 static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma) 1234 { 1235 if (!(vma->vm_flags & VM_SHARED)) 1236 return -EINVAL; 1237 1238 vma->vm_flags |= VM_IO | VM_PFNMAP; 1239 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 1240 | _PAGE_NO_CACHE | _PAGE_GUARDED); 1241 1242 vma->vm_ops = &spufs_signal2_mmap_vmops; 1243 return 0; 1244 } 1245 #else /* SPUFS_MMAP_4K */ 1246 #define spufs_signal2_mmap NULL 1247 #endif /* !SPUFS_MMAP_4K */ 1248 1249 static const struct file_operations spufs_signal2_fops = { 1250 .open = spufs_signal2_open, 1251 .release = spufs_signal2_release, 1252 .read = spufs_signal2_read, 1253 .write = spufs_signal2_write, 1254 .mmap = spufs_signal2_mmap, 1255 }; 1256 1257 static const struct file_operations spufs_signal2_nosched_fops = { 1258 .open = spufs_signal2_open, 1259 .release = spufs_signal2_release, 1260 .write = spufs_signal2_write, 1261 .mmap = spufs_signal2_mmap, 1262 }; 1263 1264 /* 1265 * This is a wrapper around DEFINE_SIMPLE_ATTRIBUTE which does the 1266 * work of acquiring (or not) the SPU context before calling through 1267 * to the actual get routine. The set routine is called directly. 1268 */ 1269 #define SPU_ATTR_NOACQUIRE 0 1270 #define SPU_ATTR_ACQUIRE 1 1271 #define SPU_ATTR_ACQUIRE_SAVED 2 1272 1273 #define DEFINE_SPUFS_ATTRIBUTE(__name, __get, __set, __fmt, __acquire) \ 1274 static int __##__get(void *data, u64 *val) \ 1275 { \ 1276 struct spu_context *ctx = data; \ 1277 int ret = 0; \ 1278 \ 1279 if (__acquire == SPU_ATTR_ACQUIRE) { \ 1280 ret = spu_acquire(ctx); \ 1281 if (ret) \ 1282 return ret; \ 1283 *val = __get(ctx); \ 1284 spu_release(ctx); \ 1285 } else if (__acquire == SPU_ATTR_ACQUIRE_SAVED) { \ 1286 ret = spu_acquire_saved(ctx); \ 1287 if (ret) \ 1288 return ret; \ 1289 *val = __get(ctx); \ 1290 spu_release_saved(ctx); \ 1291 } else \ 1292 *val = __get(ctx); \ 1293 \ 1294 return 0; \ 1295 } \ 1296 DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__name, __##__get, __set, __fmt); 1297 1298 static int spufs_signal1_type_set(void *data, u64 val) 1299 { 1300 struct spu_context *ctx = data; 1301 int ret; 1302 1303 ret = spu_acquire(ctx); 1304 if (ret) 1305 return ret; 1306 ctx->ops->signal1_type_set(ctx, val); 1307 spu_release(ctx); 1308 1309 return 0; 1310 } 1311 1312 static u64 spufs_signal1_type_get(struct spu_context *ctx) 1313 { 1314 return ctx->ops->signal1_type_get(ctx); 1315 } 1316 DEFINE_SPUFS_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get, 1317 spufs_signal1_type_set, "%llu", SPU_ATTR_ACQUIRE); 1318 1319 1320 static int spufs_signal2_type_set(void *data, u64 val) 1321 { 1322 struct spu_context *ctx = data; 1323 int ret; 1324 1325 ret = spu_acquire(ctx); 1326 if (ret) 1327 return ret; 1328 ctx->ops->signal2_type_set(ctx, val); 1329 spu_release(ctx); 1330 1331 return 0; 1332 } 1333 1334 static u64 spufs_signal2_type_get(struct spu_context *ctx) 1335 { 1336 return ctx->ops->signal2_type_get(ctx); 1337 } 1338 DEFINE_SPUFS_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get, 1339 spufs_signal2_type_set, "%llu", SPU_ATTR_ACQUIRE); 1340 1341 #if SPUFS_MMAP_4K 1342 static unsigned long spufs_mss_mmap_nopfn(struct vm_area_struct *vma, 1343 unsigned long address) 1344 { 1345 return spufs_ps_nopfn(vma, address, 0x0000, 0x1000); 1346 } 1347 1348 static struct vm_operations_struct spufs_mss_mmap_vmops = { 1349 .nopfn = spufs_mss_mmap_nopfn, 1350 }; 1351 1352 /* 1353 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff]. 1354 */ 1355 static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma) 1356 { 1357 if (!(vma->vm_flags & VM_SHARED)) 1358 return -EINVAL; 1359 1360 vma->vm_flags |= VM_IO | VM_PFNMAP; 1361 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 1362 | _PAGE_NO_CACHE | _PAGE_GUARDED); 1363 1364 vma->vm_ops = &spufs_mss_mmap_vmops; 1365 return 0; 1366 } 1367 #else /* SPUFS_MMAP_4K */ 1368 #define spufs_mss_mmap NULL 1369 #endif /* !SPUFS_MMAP_4K */ 1370 1371 static int spufs_mss_open(struct inode *inode, struct file *file) 1372 { 1373 struct spufs_inode_info *i = SPUFS_I(inode); 1374 struct spu_context *ctx = i->i_ctx; 1375 1376 file->private_data = i->i_ctx; 1377 1378 mutex_lock(&ctx->mapping_lock); 1379 if (!i->i_openers++) 1380 ctx->mss = inode->i_mapping; 1381 mutex_unlock(&ctx->mapping_lock); 1382 return nonseekable_open(inode, file); 1383 } 1384 1385 static int 1386 spufs_mss_release(struct inode *inode, struct file *file) 1387 { 1388 struct spufs_inode_info *i = SPUFS_I(inode); 1389 struct spu_context *ctx = i->i_ctx; 1390 1391 mutex_lock(&ctx->mapping_lock); 1392 if (!--i->i_openers) 1393 ctx->mss = NULL; 1394 mutex_unlock(&ctx->mapping_lock); 1395 return 0; 1396 } 1397 1398 static const struct file_operations spufs_mss_fops = { 1399 .open = spufs_mss_open, 1400 .release = spufs_mss_release, 1401 .mmap = spufs_mss_mmap, 1402 }; 1403 1404 static unsigned long spufs_psmap_mmap_nopfn(struct vm_area_struct *vma, 1405 unsigned long address) 1406 { 1407 return spufs_ps_nopfn(vma, address, 0x0000, 0x20000); 1408 } 1409 1410 static struct vm_operations_struct spufs_psmap_mmap_vmops = { 1411 .nopfn = spufs_psmap_mmap_nopfn, 1412 }; 1413 1414 /* 1415 * mmap support for full problem state area [0x00000 - 0x1ffff]. 1416 */ 1417 static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma) 1418 { 1419 if (!(vma->vm_flags & VM_SHARED)) 1420 return -EINVAL; 1421 1422 vma->vm_flags |= VM_IO | VM_PFNMAP; 1423 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 1424 | _PAGE_NO_CACHE | _PAGE_GUARDED); 1425 1426 vma->vm_ops = &spufs_psmap_mmap_vmops; 1427 return 0; 1428 } 1429 1430 static int spufs_psmap_open(struct inode *inode, struct file *file) 1431 { 1432 struct spufs_inode_info *i = SPUFS_I(inode); 1433 struct spu_context *ctx = i->i_ctx; 1434 1435 mutex_lock(&ctx->mapping_lock); 1436 file->private_data = i->i_ctx; 1437 if (!i->i_openers++) 1438 ctx->psmap = inode->i_mapping; 1439 mutex_unlock(&ctx->mapping_lock); 1440 return nonseekable_open(inode, file); 1441 } 1442 1443 static int 1444 spufs_psmap_release(struct inode *inode, struct file *file) 1445 { 1446 struct spufs_inode_info *i = SPUFS_I(inode); 1447 struct spu_context *ctx = i->i_ctx; 1448 1449 mutex_lock(&ctx->mapping_lock); 1450 if (!--i->i_openers) 1451 ctx->psmap = NULL; 1452 mutex_unlock(&ctx->mapping_lock); 1453 return 0; 1454 } 1455 1456 static const struct file_operations spufs_psmap_fops = { 1457 .open = spufs_psmap_open, 1458 .release = spufs_psmap_release, 1459 .mmap = spufs_psmap_mmap, 1460 }; 1461 1462 1463 #if SPUFS_MMAP_4K 1464 static unsigned long spufs_mfc_mmap_nopfn(struct vm_area_struct *vma, 1465 unsigned long address) 1466 { 1467 return spufs_ps_nopfn(vma, address, 0x3000, 0x1000); 1468 } 1469 1470 static struct vm_operations_struct spufs_mfc_mmap_vmops = { 1471 .nopfn = spufs_mfc_mmap_nopfn, 1472 }; 1473 1474 /* 1475 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff]. 1476 */ 1477 static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma) 1478 { 1479 if (!(vma->vm_flags & VM_SHARED)) 1480 return -EINVAL; 1481 1482 vma->vm_flags |= VM_IO | VM_PFNMAP; 1483 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 1484 | _PAGE_NO_CACHE | _PAGE_GUARDED); 1485 1486 vma->vm_ops = &spufs_mfc_mmap_vmops; 1487 return 0; 1488 } 1489 #else /* SPUFS_MMAP_4K */ 1490 #define spufs_mfc_mmap NULL 1491 #endif /* !SPUFS_MMAP_4K */ 1492 1493 static int spufs_mfc_open(struct inode *inode, struct file *file) 1494 { 1495 struct spufs_inode_info *i = SPUFS_I(inode); 1496 struct spu_context *ctx = i->i_ctx; 1497 1498 /* we don't want to deal with DMA into other processes */ 1499 if (ctx->owner != current->mm) 1500 return -EINVAL; 1501 1502 if (atomic_read(&inode->i_count) != 1) 1503 return -EBUSY; 1504 1505 mutex_lock(&ctx->mapping_lock); 1506 file->private_data = ctx; 1507 if (!i->i_openers++) 1508 ctx->mfc = inode->i_mapping; 1509 mutex_unlock(&ctx->mapping_lock); 1510 return nonseekable_open(inode, file); 1511 } 1512 1513 static int 1514 spufs_mfc_release(struct inode *inode, struct file *file) 1515 { 1516 struct spufs_inode_info *i = SPUFS_I(inode); 1517 struct spu_context *ctx = i->i_ctx; 1518 1519 mutex_lock(&ctx->mapping_lock); 1520 if (!--i->i_openers) 1521 ctx->mfc = NULL; 1522 mutex_unlock(&ctx->mapping_lock); 1523 return 0; 1524 } 1525 1526 /* interrupt-level mfc callback function. */ 1527 void spufs_mfc_callback(struct spu *spu) 1528 { 1529 struct spu_context *ctx = spu->ctx; 1530 1531 if (!ctx) 1532 return; 1533 1534 wake_up_all(&ctx->mfc_wq); 1535 1536 pr_debug("%s %s\n", __FUNCTION__, spu->name); 1537 if (ctx->mfc_fasync) { 1538 u32 free_elements, tagstatus; 1539 unsigned int mask; 1540 1541 /* no need for spu_acquire in interrupt context */ 1542 free_elements = ctx->ops->get_mfc_free_elements(ctx); 1543 tagstatus = ctx->ops->read_mfc_tagstatus(ctx); 1544 1545 mask = 0; 1546 if (free_elements & 0xffff) 1547 mask |= POLLOUT; 1548 if (tagstatus & ctx->tagwait) 1549 mask |= POLLIN; 1550 1551 kill_fasync(&ctx->mfc_fasync, SIGIO, mask); 1552 } 1553 } 1554 1555 static int spufs_read_mfc_tagstatus(struct spu_context *ctx, u32 *status) 1556 { 1557 /* See if there is one tag group is complete */ 1558 /* FIXME we need locking around tagwait */ 1559 *status = ctx->ops->read_mfc_tagstatus(ctx) & ctx->tagwait; 1560 ctx->tagwait &= ~*status; 1561 if (*status) 1562 return 1; 1563 1564 /* enable interrupt waiting for any tag group, 1565 may silently fail if interrupts are already enabled */ 1566 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1); 1567 return 0; 1568 } 1569 1570 static ssize_t spufs_mfc_read(struct file *file, char __user *buffer, 1571 size_t size, loff_t *pos) 1572 { 1573 struct spu_context *ctx = file->private_data; 1574 int ret = -EINVAL; 1575 u32 status; 1576 1577 if (size != 4) 1578 goto out; 1579 1580 ret = spu_acquire(ctx); 1581 if (ret) 1582 return ret; 1583 1584 ret = -EINVAL; 1585 if (file->f_flags & O_NONBLOCK) { 1586 status = ctx->ops->read_mfc_tagstatus(ctx); 1587 if (!(status & ctx->tagwait)) 1588 ret = -EAGAIN; 1589 else 1590 /* XXX(hch): shouldn't we clear ret here? */ 1591 ctx->tagwait &= ~status; 1592 } else { 1593 ret = spufs_wait(ctx->mfc_wq, 1594 spufs_read_mfc_tagstatus(ctx, &status)); 1595 } 1596 spu_release(ctx); 1597 1598 if (ret) 1599 goto out; 1600 1601 ret = 4; 1602 if (copy_to_user(buffer, &status, 4)) 1603 ret = -EFAULT; 1604 1605 out: 1606 return ret; 1607 } 1608 1609 static int spufs_check_valid_dma(struct mfc_dma_command *cmd) 1610 { 1611 pr_debug("queueing DMA %x %lx %x %x %x\n", cmd->lsa, 1612 cmd->ea, cmd->size, cmd->tag, cmd->cmd); 1613 1614 switch (cmd->cmd) { 1615 case MFC_PUT_CMD: 1616 case MFC_PUTF_CMD: 1617 case MFC_PUTB_CMD: 1618 case MFC_GET_CMD: 1619 case MFC_GETF_CMD: 1620 case MFC_GETB_CMD: 1621 break; 1622 default: 1623 pr_debug("invalid DMA opcode %x\n", cmd->cmd); 1624 return -EIO; 1625 } 1626 1627 if ((cmd->lsa & 0xf) != (cmd->ea &0xf)) { 1628 pr_debug("invalid DMA alignment, ea %lx lsa %x\n", 1629 cmd->ea, cmd->lsa); 1630 return -EIO; 1631 } 1632 1633 switch (cmd->size & 0xf) { 1634 case 1: 1635 break; 1636 case 2: 1637 if (cmd->lsa & 1) 1638 goto error; 1639 break; 1640 case 4: 1641 if (cmd->lsa & 3) 1642 goto error; 1643 break; 1644 case 8: 1645 if (cmd->lsa & 7) 1646 goto error; 1647 break; 1648 case 0: 1649 if (cmd->lsa & 15) 1650 goto error; 1651 break; 1652 error: 1653 default: 1654 pr_debug("invalid DMA alignment %x for size %x\n", 1655 cmd->lsa & 0xf, cmd->size); 1656 return -EIO; 1657 } 1658 1659 if (cmd->size > 16 * 1024) { 1660 pr_debug("invalid DMA size %x\n", cmd->size); 1661 return -EIO; 1662 } 1663 1664 if (cmd->tag & 0xfff0) { 1665 /* we reserve the higher tag numbers for kernel use */ 1666 pr_debug("invalid DMA tag\n"); 1667 return -EIO; 1668 } 1669 1670 if (cmd->class) { 1671 /* not supported in this version */ 1672 pr_debug("invalid DMA class\n"); 1673 return -EIO; 1674 } 1675 1676 return 0; 1677 } 1678 1679 static int spu_send_mfc_command(struct spu_context *ctx, 1680 struct mfc_dma_command cmd, 1681 int *error) 1682 { 1683 *error = ctx->ops->send_mfc_command(ctx, &cmd); 1684 if (*error == -EAGAIN) { 1685 /* wait for any tag group to complete 1686 so we have space for the new command */ 1687 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1); 1688 /* try again, because the queue might be 1689 empty again */ 1690 *error = ctx->ops->send_mfc_command(ctx, &cmd); 1691 if (*error == -EAGAIN) 1692 return 0; 1693 } 1694 return 1; 1695 } 1696 1697 static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer, 1698 size_t size, loff_t *pos) 1699 { 1700 struct spu_context *ctx = file->private_data; 1701 struct mfc_dma_command cmd; 1702 int ret = -EINVAL; 1703 1704 if (size != sizeof cmd) 1705 goto out; 1706 1707 ret = -EFAULT; 1708 if (copy_from_user(&cmd, buffer, sizeof cmd)) 1709 goto out; 1710 1711 ret = spufs_check_valid_dma(&cmd); 1712 if (ret) 1713 goto out; 1714 1715 ret = spu_acquire(ctx); 1716 if (ret) 1717 goto out; 1718 1719 ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE); 1720 if (ret) 1721 goto out; 1722 1723 if (file->f_flags & O_NONBLOCK) { 1724 ret = ctx->ops->send_mfc_command(ctx, &cmd); 1725 } else { 1726 int status; 1727 ret = spufs_wait(ctx->mfc_wq, 1728 spu_send_mfc_command(ctx, cmd, &status)); 1729 if (status) 1730 ret = status; 1731 } 1732 1733 if (ret) 1734 goto out_unlock; 1735 1736 ctx->tagwait |= 1 << cmd.tag; 1737 ret = size; 1738 1739 out_unlock: 1740 spu_release(ctx); 1741 out: 1742 return ret; 1743 } 1744 1745 static unsigned int spufs_mfc_poll(struct file *file,poll_table *wait) 1746 { 1747 struct spu_context *ctx = file->private_data; 1748 u32 free_elements, tagstatus; 1749 unsigned int mask; 1750 1751 poll_wait(file, &ctx->mfc_wq, wait); 1752 1753 /* 1754 * For now keep this uninterruptible and also ignore the rule 1755 * that poll should not sleep. Will be fixed later. 1756 */ 1757 mutex_lock(&ctx->state_mutex); 1758 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2); 1759 free_elements = ctx->ops->get_mfc_free_elements(ctx); 1760 tagstatus = ctx->ops->read_mfc_tagstatus(ctx); 1761 spu_release(ctx); 1762 1763 mask = 0; 1764 if (free_elements & 0xffff) 1765 mask |= POLLOUT | POLLWRNORM; 1766 if (tagstatus & ctx->tagwait) 1767 mask |= POLLIN | POLLRDNORM; 1768 1769 pr_debug("%s: free %d tagstatus %d tagwait %d\n", __FUNCTION__, 1770 free_elements, tagstatus, ctx->tagwait); 1771 1772 return mask; 1773 } 1774 1775 static int spufs_mfc_flush(struct file *file, fl_owner_t id) 1776 { 1777 struct spu_context *ctx = file->private_data; 1778 int ret; 1779 1780 ret = spu_acquire(ctx); 1781 if (ret) 1782 return ret; 1783 #if 0 1784 /* this currently hangs */ 1785 ret = spufs_wait(ctx->mfc_wq, 1786 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2)); 1787 if (ret) 1788 goto out; 1789 ret = spufs_wait(ctx->mfc_wq, 1790 ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait); 1791 out: 1792 #else 1793 ret = 0; 1794 #endif 1795 spu_release(ctx); 1796 1797 return ret; 1798 } 1799 1800 static int spufs_mfc_fsync(struct file *file, struct dentry *dentry, 1801 int datasync) 1802 { 1803 return spufs_mfc_flush(file, NULL); 1804 } 1805 1806 static int spufs_mfc_fasync(int fd, struct file *file, int on) 1807 { 1808 struct spu_context *ctx = file->private_data; 1809 1810 return fasync_helper(fd, file, on, &ctx->mfc_fasync); 1811 } 1812 1813 static const struct file_operations spufs_mfc_fops = { 1814 .open = spufs_mfc_open, 1815 .release = spufs_mfc_release, 1816 .read = spufs_mfc_read, 1817 .write = spufs_mfc_write, 1818 .poll = spufs_mfc_poll, 1819 .flush = spufs_mfc_flush, 1820 .fsync = spufs_mfc_fsync, 1821 .fasync = spufs_mfc_fasync, 1822 .mmap = spufs_mfc_mmap, 1823 }; 1824 1825 static int spufs_npc_set(void *data, u64 val) 1826 { 1827 struct spu_context *ctx = data; 1828 int ret; 1829 1830 ret = spu_acquire(ctx); 1831 if (ret) 1832 return ret; 1833 ctx->ops->npc_write(ctx, val); 1834 spu_release(ctx); 1835 1836 return 0; 1837 } 1838 1839 static u64 spufs_npc_get(struct spu_context *ctx) 1840 { 1841 return ctx->ops->npc_read(ctx); 1842 } 1843 DEFINE_SPUFS_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set, 1844 "0x%llx\n", SPU_ATTR_ACQUIRE); 1845 1846 static int spufs_decr_set(void *data, u64 val) 1847 { 1848 struct spu_context *ctx = data; 1849 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1850 int ret; 1851 1852 ret = spu_acquire_saved(ctx); 1853 if (ret) 1854 return ret; 1855 lscsa->decr.slot[0] = (u32) val; 1856 spu_release_saved(ctx); 1857 1858 return 0; 1859 } 1860 1861 static u64 spufs_decr_get(struct spu_context *ctx) 1862 { 1863 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1864 return lscsa->decr.slot[0]; 1865 } 1866 DEFINE_SPUFS_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set, 1867 "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED); 1868 1869 static int spufs_decr_status_set(void *data, u64 val) 1870 { 1871 struct spu_context *ctx = data; 1872 int ret; 1873 1874 ret = spu_acquire_saved(ctx); 1875 if (ret) 1876 return ret; 1877 if (val) 1878 ctx->csa.priv2.mfc_control_RW |= MFC_CNTL_DECREMENTER_RUNNING; 1879 else 1880 ctx->csa.priv2.mfc_control_RW &= ~MFC_CNTL_DECREMENTER_RUNNING; 1881 spu_release_saved(ctx); 1882 1883 return 0; 1884 } 1885 1886 static u64 spufs_decr_status_get(struct spu_context *ctx) 1887 { 1888 if (ctx->csa.priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING) 1889 return SPU_DECR_STATUS_RUNNING; 1890 else 1891 return 0; 1892 } 1893 DEFINE_SPUFS_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get, 1894 spufs_decr_status_set, "0x%llx\n", 1895 SPU_ATTR_ACQUIRE_SAVED); 1896 1897 static int spufs_event_mask_set(void *data, u64 val) 1898 { 1899 struct spu_context *ctx = data; 1900 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1901 int ret; 1902 1903 ret = spu_acquire_saved(ctx); 1904 if (ret) 1905 return ret; 1906 lscsa->event_mask.slot[0] = (u32) val; 1907 spu_release_saved(ctx); 1908 1909 return 0; 1910 } 1911 1912 static u64 spufs_event_mask_get(struct spu_context *ctx) 1913 { 1914 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1915 return lscsa->event_mask.slot[0]; 1916 } 1917 1918 DEFINE_SPUFS_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get, 1919 spufs_event_mask_set, "0x%llx\n", 1920 SPU_ATTR_ACQUIRE_SAVED); 1921 1922 static u64 spufs_event_status_get(struct spu_context *ctx) 1923 { 1924 struct spu_state *state = &ctx->csa; 1925 u64 stat; 1926 stat = state->spu_chnlcnt_RW[0]; 1927 if (stat) 1928 return state->spu_chnldata_RW[0]; 1929 return 0; 1930 } 1931 DEFINE_SPUFS_ATTRIBUTE(spufs_event_status_ops, spufs_event_status_get, 1932 NULL, "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED) 1933 1934 static int spufs_srr0_set(void *data, u64 val) 1935 { 1936 struct spu_context *ctx = data; 1937 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1938 int ret; 1939 1940 ret = spu_acquire_saved(ctx); 1941 if (ret) 1942 return ret; 1943 lscsa->srr0.slot[0] = (u32) val; 1944 spu_release_saved(ctx); 1945 1946 return 0; 1947 } 1948 1949 static u64 spufs_srr0_get(struct spu_context *ctx) 1950 { 1951 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1952 return lscsa->srr0.slot[0]; 1953 } 1954 DEFINE_SPUFS_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set, 1955 "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED) 1956 1957 static u64 spufs_id_get(struct spu_context *ctx) 1958 { 1959 u64 num; 1960 1961 if (ctx->state == SPU_STATE_RUNNABLE) 1962 num = ctx->spu->number; 1963 else 1964 num = (unsigned int)-1; 1965 1966 return num; 1967 } 1968 DEFINE_SPUFS_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n", 1969 SPU_ATTR_ACQUIRE) 1970 1971 static u64 spufs_object_id_get(struct spu_context *ctx) 1972 { 1973 /* FIXME: Should there really be no locking here? */ 1974 return ctx->object_id; 1975 } 1976 1977 static int spufs_object_id_set(void *data, u64 id) 1978 { 1979 struct spu_context *ctx = data; 1980 ctx->object_id = id; 1981 1982 return 0; 1983 } 1984 1985 DEFINE_SPUFS_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get, 1986 spufs_object_id_set, "0x%llx\n", SPU_ATTR_NOACQUIRE); 1987 1988 static u64 spufs_lslr_get(struct spu_context *ctx) 1989 { 1990 return ctx->csa.priv2.spu_lslr_RW; 1991 } 1992 DEFINE_SPUFS_ATTRIBUTE(spufs_lslr_ops, spufs_lslr_get, NULL, "0x%llx\n", 1993 SPU_ATTR_ACQUIRE_SAVED); 1994 1995 static int spufs_info_open(struct inode *inode, struct file *file) 1996 { 1997 struct spufs_inode_info *i = SPUFS_I(inode); 1998 struct spu_context *ctx = i->i_ctx; 1999 file->private_data = ctx; 2000 return 0; 2001 } 2002 2003 static int spufs_caps_show(struct seq_file *s, void *private) 2004 { 2005 struct spu_context *ctx = s->private; 2006 2007 if (!(ctx->flags & SPU_CREATE_NOSCHED)) 2008 seq_puts(s, "sched\n"); 2009 if (!(ctx->flags & SPU_CREATE_ISOLATE)) 2010 seq_puts(s, "step\n"); 2011 return 0; 2012 } 2013 2014 static int spufs_caps_open(struct inode *inode, struct file *file) 2015 { 2016 return single_open(file, spufs_caps_show, SPUFS_I(inode)->i_ctx); 2017 } 2018 2019 static const struct file_operations spufs_caps_fops = { 2020 .open = spufs_caps_open, 2021 .read = seq_read, 2022 .llseek = seq_lseek, 2023 .release = single_release, 2024 }; 2025 2026 static ssize_t __spufs_mbox_info_read(struct spu_context *ctx, 2027 char __user *buf, size_t len, loff_t *pos) 2028 { 2029 u32 data; 2030 2031 /* EOF if there's no entry in the mbox */ 2032 if (!(ctx->csa.prob.mb_stat_R & 0x0000ff)) 2033 return 0; 2034 2035 data = ctx->csa.prob.pu_mb_R; 2036 2037 return simple_read_from_buffer(buf, len, pos, &data, sizeof data); 2038 } 2039 2040 static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf, 2041 size_t len, loff_t *pos) 2042 { 2043 int ret; 2044 struct spu_context *ctx = file->private_data; 2045 2046 if (!access_ok(VERIFY_WRITE, buf, len)) 2047 return -EFAULT; 2048 2049 ret = spu_acquire_saved(ctx); 2050 if (ret) 2051 return ret; 2052 spin_lock(&ctx->csa.register_lock); 2053 ret = __spufs_mbox_info_read(ctx, buf, len, pos); 2054 spin_unlock(&ctx->csa.register_lock); 2055 spu_release_saved(ctx); 2056 2057 return ret; 2058 } 2059 2060 static const struct file_operations spufs_mbox_info_fops = { 2061 .open = spufs_info_open, 2062 .read = spufs_mbox_info_read, 2063 .llseek = generic_file_llseek, 2064 }; 2065 2066 static ssize_t __spufs_ibox_info_read(struct spu_context *ctx, 2067 char __user *buf, size_t len, loff_t *pos) 2068 { 2069 u32 data; 2070 2071 /* EOF if there's no entry in the ibox */ 2072 if (!(ctx->csa.prob.mb_stat_R & 0xff0000)) 2073 return 0; 2074 2075 data = ctx->csa.priv2.puint_mb_R; 2076 2077 return simple_read_from_buffer(buf, len, pos, &data, sizeof data); 2078 } 2079 2080 static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf, 2081 size_t len, loff_t *pos) 2082 { 2083 struct spu_context *ctx = file->private_data; 2084 int ret; 2085 2086 if (!access_ok(VERIFY_WRITE, buf, len)) 2087 return -EFAULT; 2088 2089 ret = spu_acquire_saved(ctx); 2090 if (ret) 2091 return ret; 2092 spin_lock(&ctx->csa.register_lock); 2093 ret = __spufs_ibox_info_read(ctx, buf, len, pos); 2094 spin_unlock(&ctx->csa.register_lock); 2095 spu_release_saved(ctx); 2096 2097 return ret; 2098 } 2099 2100 static const struct file_operations spufs_ibox_info_fops = { 2101 .open = spufs_info_open, 2102 .read = spufs_ibox_info_read, 2103 .llseek = generic_file_llseek, 2104 }; 2105 2106 static ssize_t __spufs_wbox_info_read(struct spu_context *ctx, 2107 char __user *buf, size_t len, loff_t *pos) 2108 { 2109 int i, cnt; 2110 u32 data[4]; 2111 u32 wbox_stat; 2112 2113 wbox_stat = ctx->csa.prob.mb_stat_R; 2114 cnt = 4 - ((wbox_stat & 0x00ff00) >> 8); 2115 for (i = 0; i < cnt; i++) { 2116 data[i] = ctx->csa.spu_mailbox_data[i]; 2117 } 2118 2119 return simple_read_from_buffer(buf, len, pos, &data, 2120 cnt * sizeof(u32)); 2121 } 2122 2123 static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf, 2124 size_t len, loff_t *pos) 2125 { 2126 struct spu_context *ctx = file->private_data; 2127 int ret; 2128 2129 if (!access_ok(VERIFY_WRITE, buf, len)) 2130 return -EFAULT; 2131 2132 ret = spu_acquire_saved(ctx); 2133 if (ret) 2134 return ret; 2135 spin_lock(&ctx->csa.register_lock); 2136 ret = __spufs_wbox_info_read(ctx, buf, len, pos); 2137 spin_unlock(&ctx->csa.register_lock); 2138 spu_release_saved(ctx); 2139 2140 return ret; 2141 } 2142 2143 static const struct file_operations spufs_wbox_info_fops = { 2144 .open = spufs_info_open, 2145 .read = spufs_wbox_info_read, 2146 .llseek = generic_file_llseek, 2147 }; 2148 2149 static ssize_t __spufs_dma_info_read(struct spu_context *ctx, 2150 char __user *buf, size_t len, loff_t *pos) 2151 { 2152 struct spu_dma_info info; 2153 struct mfc_cq_sr *qp, *spuqp; 2154 int i; 2155 2156 info.dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW; 2157 info.dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0]; 2158 info.dma_info_status = ctx->csa.spu_chnldata_RW[24]; 2159 info.dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25]; 2160 info.dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27]; 2161 for (i = 0; i < 16; i++) { 2162 qp = &info.dma_info_command_data[i]; 2163 spuqp = &ctx->csa.priv2.spuq[i]; 2164 2165 qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW; 2166 qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW; 2167 qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW; 2168 qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW; 2169 } 2170 2171 return simple_read_from_buffer(buf, len, pos, &info, 2172 sizeof info); 2173 } 2174 2175 static ssize_t spufs_dma_info_read(struct file *file, char __user *buf, 2176 size_t len, loff_t *pos) 2177 { 2178 struct spu_context *ctx = file->private_data; 2179 int ret; 2180 2181 if (!access_ok(VERIFY_WRITE, buf, len)) 2182 return -EFAULT; 2183 2184 ret = spu_acquire_saved(ctx); 2185 if (ret) 2186 return ret; 2187 spin_lock(&ctx->csa.register_lock); 2188 ret = __spufs_dma_info_read(ctx, buf, len, pos); 2189 spin_unlock(&ctx->csa.register_lock); 2190 spu_release_saved(ctx); 2191 2192 return ret; 2193 } 2194 2195 static const struct file_operations spufs_dma_info_fops = { 2196 .open = spufs_info_open, 2197 .read = spufs_dma_info_read, 2198 }; 2199 2200 static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx, 2201 char __user *buf, size_t len, loff_t *pos) 2202 { 2203 struct spu_proxydma_info info; 2204 struct mfc_cq_sr *qp, *puqp; 2205 int ret = sizeof info; 2206 int i; 2207 2208 if (len < ret) 2209 return -EINVAL; 2210 2211 if (!access_ok(VERIFY_WRITE, buf, len)) 2212 return -EFAULT; 2213 2214 info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW; 2215 info.proxydma_info_mask = ctx->csa.prob.dma_querymask_RW; 2216 info.proxydma_info_status = ctx->csa.prob.dma_tagstatus_R; 2217 for (i = 0; i < 8; i++) { 2218 qp = &info.proxydma_info_command_data[i]; 2219 puqp = &ctx->csa.priv2.puq[i]; 2220 2221 qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW; 2222 qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW; 2223 qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW; 2224 qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW; 2225 } 2226 2227 return simple_read_from_buffer(buf, len, pos, &info, 2228 sizeof info); 2229 } 2230 2231 static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf, 2232 size_t len, loff_t *pos) 2233 { 2234 struct spu_context *ctx = file->private_data; 2235 int ret; 2236 2237 ret = spu_acquire_saved(ctx); 2238 if (ret) 2239 return ret; 2240 spin_lock(&ctx->csa.register_lock); 2241 ret = __spufs_proxydma_info_read(ctx, buf, len, pos); 2242 spin_unlock(&ctx->csa.register_lock); 2243 spu_release_saved(ctx); 2244 2245 return ret; 2246 } 2247 2248 static const struct file_operations spufs_proxydma_info_fops = { 2249 .open = spufs_info_open, 2250 .read = spufs_proxydma_info_read, 2251 }; 2252 2253 static int spufs_show_tid(struct seq_file *s, void *private) 2254 { 2255 struct spu_context *ctx = s->private; 2256 2257 seq_printf(s, "%d\n", ctx->tid); 2258 return 0; 2259 } 2260 2261 static int spufs_tid_open(struct inode *inode, struct file *file) 2262 { 2263 return single_open(file, spufs_show_tid, SPUFS_I(inode)->i_ctx); 2264 } 2265 2266 static const struct file_operations spufs_tid_fops = { 2267 .open = spufs_tid_open, 2268 .read = seq_read, 2269 .llseek = seq_lseek, 2270 .release = single_release, 2271 }; 2272 2273 static const char *ctx_state_names[] = { 2274 "user", "system", "iowait", "loaded" 2275 }; 2276 2277 static unsigned long long spufs_acct_time(struct spu_context *ctx, 2278 enum spu_utilization_state state) 2279 { 2280 struct timespec ts; 2281 unsigned long long time = ctx->stats.times[state]; 2282 2283 /* 2284 * In general, utilization statistics are updated by the controlling 2285 * thread as the spu context moves through various well defined 2286 * state transitions, but if the context is lazily loaded its 2287 * utilization statistics are not updated as the controlling thread 2288 * is not tightly coupled with the execution of the spu context. We 2289 * calculate and apply the time delta from the last recorded state 2290 * of the spu context. 2291 */ 2292 if (ctx->spu && ctx->stats.util_state == state) { 2293 ktime_get_ts(&ts); 2294 time += timespec_to_ns(&ts) - ctx->stats.tstamp; 2295 } 2296 2297 return time / NSEC_PER_MSEC; 2298 } 2299 2300 static unsigned long long spufs_slb_flts(struct spu_context *ctx) 2301 { 2302 unsigned long long slb_flts = ctx->stats.slb_flt; 2303 2304 if (ctx->state == SPU_STATE_RUNNABLE) { 2305 slb_flts += (ctx->spu->stats.slb_flt - 2306 ctx->stats.slb_flt_base); 2307 } 2308 2309 return slb_flts; 2310 } 2311 2312 static unsigned long long spufs_class2_intrs(struct spu_context *ctx) 2313 { 2314 unsigned long long class2_intrs = ctx->stats.class2_intr; 2315 2316 if (ctx->state == SPU_STATE_RUNNABLE) { 2317 class2_intrs += (ctx->spu->stats.class2_intr - 2318 ctx->stats.class2_intr_base); 2319 } 2320 2321 return class2_intrs; 2322 } 2323 2324 2325 static int spufs_show_stat(struct seq_file *s, void *private) 2326 { 2327 struct spu_context *ctx = s->private; 2328 int ret; 2329 2330 ret = spu_acquire(ctx); 2331 if (ret) 2332 return ret; 2333 2334 seq_printf(s, "%s %llu %llu %llu %llu " 2335 "%llu %llu %llu %llu %llu %llu %llu %llu\n", 2336 ctx_state_names[ctx->stats.util_state], 2337 spufs_acct_time(ctx, SPU_UTIL_USER), 2338 spufs_acct_time(ctx, SPU_UTIL_SYSTEM), 2339 spufs_acct_time(ctx, SPU_UTIL_IOWAIT), 2340 spufs_acct_time(ctx, SPU_UTIL_IDLE_LOADED), 2341 ctx->stats.vol_ctx_switch, 2342 ctx->stats.invol_ctx_switch, 2343 spufs_slb_flts(ctx), 2344 ctx->stats.hash_flt, 2345 ctx->stats.min_flt, 2346 ctx->stats.maj_flt, 2347 spufs_class2_intrs(ctx), 2348 ctx->stats.libassist); 2349 spu_release(ctx); 2350 return 0; 2351 } 2352 2353 static int spufs_stat_open(struct inode *inode, struct file *file) 2354 { 2355 return single_open(file, spufs_show_stat, SPUFS_I(inode)->i_ctx); 2356 } 2357 2358 static const struct file_operations spufs_stat_fops = { 2359 .open = spufs_stat_open, 2360 .read = seq_read, 2361 .llseek = seq_lseek, 2362 .release = single_release, 2363 }; 2364 2365 2366 struct tree_descr spufs_dir_contents[] = { 2367 { "capabilities", &spufs_caps_fops, 0444, }, 2368 { "mem", &spufs_mem_fops, 0666, }, 2369 { "regs", &spufs_regs_fops, 0666, }, 2370 { "mbox", &spufs_mbox_fops, 0444, }, 2371 { "ibox", &spufs_ibox_fops, 0444, }, 2372 { "wbox", &spufs_wbox_fops, 0222, }, 2373 { "mbox_stat", &spufs_mbox_stat_fops, 0444, }, 2374 { "ibox_stat", &spufs_ibox_stat_fops, 0444, }, 2375 { "wbox_stat", &spufs_wbox_stat_fops, 0444, }, 2376 { "signal1", &spufs_signal1_fops, 0666, }, 2377 { "signal2", &spufs_signal2_fops, 0666, }, 2378 { "signal1_type", &spufs_signal1_type, 0666, }, 2379 { "signal2_type", &spufs_signal2_type, 0666, }, 2380 { "cntl", &spufs_cntl_fops, 0666, }, 2381 { "fpcr", &spufs_fpcr_fops, 0666, }, 2382 { "lslr", &spufs_lslr_ops, 0444, }, 2383 { "mfc", &spufs_mfc_fops, 0666, }, 2384 { "mss", &spufs_mss_fops, 0666, }, 2385 { "npc", &spufs_npc_ops, 0666, }, 2386 { "srr0", &spufs_srr0_ops, 0666, }, 2387 { "decr", &spufs_decr_ops, 0666, }, 2388 { "decr_status", &spufs_decr_status_ops, 0666, }, 2389 { "event_mask", &spufs_event_mask_ops, 0666, }, 2390 { "event_status", &spufs_event_status_ops, 0444, }, 2391 { "psmap", &spufs_psmap_fops, 0666, }, 2392 { "phys-id", &spufs_id_ops, 0666, }, 2393 { "object-id", &spufs_object_id_ops, 0666, }, 2394 { "mbox_info", &spufs_mbox_info_fops, 0444, }, 2395 { "ibox_info", &spufs_ibox_info_fops, 0444, }, 2396 { "wbox_info", &spufs_wbox_info_fops, 0444, }, 2397 { "dma_info", &spufs_dma_info_fops, 0444, }, 2398 { "proxydma_info", &spufs_proxydma_info_fops, 0444, }, 2399 { "tid", &spufs_tid_fops, 0444, }, 2400 { "stat", &spufs_stat_fops, 0444, }, 2401 {}, 2402 }; 2403 2404 struct tree_descr spufs_dir_nosched_contents[] = { 2405 { "capabilities", &spufs_caps_fops, 0444, }, 2406 { "mem", &spufs_mem_fops, 0666, }, 2407 { "mbox", &spufs_mbox_fops, 0444, }, 2408 { "ibox", &spufs_ibox_fops, 0444, }, 2409 { "wbox", &spufs_wbox_fops, 0222, }, 2410 { "mbox_stat", &spufs_mbox_stat_fops, 0444, }, 2411 { "ibox_stat", &spufs_ibox_stat_fops, 0444, }, 2412 { "wbox_stat", &spufs_wbox_stat_fops, 0444, }, 2413 { "signal1", &spufs_signal1_nosched_fops, 0222, }, 2414 { "signal2", &spufs_signal2_nosched_fops, 0222, }, 2415 { "signal1_type", &spufs_signal1_type, 0666, }, 2416 { "signal2_type", &spufs_signal2_type, 0666, }, 2417 { "mss", &spufs_mss_fops, 0666, }, 2418 { "mfc", &spufs_mfc_fops, 0666, }, 2419 { "cntl", &spufs_cntl_fops, 0666, }, 2420 { "npc", &spufs_npc_ops, 0666, }, 2421 { "psmap", &spufs_psmap_fops, 0666, }, 2422 { "phys-id", &spufs_id_ops, 0666, }, 2423 { "object-id", &spufs_object_id_ops, 0666, }, 2424 { "tid", &spufs_tid_fops, 0444, }, 2425 { "stat", &spufs_stat_fops, 0444, }, 2426 {}, 2427 }; 2428 2429 struct spufs_coredump_reader spufs_coredump_read[] = { 2430 { "regs", __spufs_regs_read, NULL, sizeof(struct spu_reg128[128])}, 2431 { "fpcr", __spufs_fpcr_read, NULL, sizeof(struct spu_reg128) }, 2432 { "lslr", NULL, spufs_lslr_get, 19 }, 2433 { "decr", NULL, spufs_decr_get, 19 }, 2434 { "decr_status", NULL, spufs_decr_status_get, 19 }, 2435 { "mem", __spufs_mem_read, NULL, LS_SIZE, }, 2436 { "signal1", __spufs_signal1_read, NULL, sizeof(u32) }, 2437 { "signal1_type", NULL, spufs_signal1_type_get, 19 }, 2438 { "signal2", __spufs_signal2_read, NULL, sizeof(u32) }, 2439 { "signal2_type", NULL, spufs_signal2_type_get, 19 }, 2440 { "event_mask", NULL, spufs_event_mask_get, 19 }, 2441 { "event_status", NULL, spufs_event_status_get, 19 }, 2442 { "mbox_info", __spufs_mbox_info_read, NULL, sizeof(u32) }, 2443 { "ibox_info", __spufs_ibox_info_read, NULL, sizeof(u32) }, 2444 { "wbox_info", __spufs_wbox_info_read, NULL, 4 * sizeof(u32)}, 2445 { "dma_info", __spufs_dma_info_read, NULL, sizeof(struct spu_dma_info)}, 2446 { "proxydma_info", __spufs_proxydma_info_read, 2447 NULL, sizeof(struct spu_proxydma_info)}, 2448 { "object-id", NULL, spufs_object_id_get, 19 }, 2449 { "npc", NULL, spufs_npc_get, 19 }, 2450 { NULL }, 2451 }; 2452