1 /* 2 * SPU file system -- file contents 3 * 4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 5 * 6 * Author: Arnd Bergmann <arndb@de.ibm.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2, or (at your option) 11 * any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 */ 22 23 #undef DEBUG 24 25 #include <linux/fs.h> 26 #include <linux/ioctl.h> 27 #include <linux/module.h> 28 #include <linux/pagemap.h> 29 #include <linux/poll.h> 30 #include <linux/ptrace.h> 31 #include <linux/seq_file.h> 32 #include <linux/slab.h> 33 34 #include <asm/io.h> 35 #include <asm/time.h> 36 #include <asm/spu.h> 37 #include <asm/spu_info.h> 38 #include <asm/uaccess.h> 39 40 #include "spufs.h" 41 #include "sputrace.h" 42 43 #define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000) 44 45 /* Simple attribute files */ 46 struct spufs_attr { 47 int (*get)(void *, u64 *); 48 int (*set)(void *, u64); 49 char get_buf[24]; /* enough to store a u64 and "\n\0" */ 50 char set_buf[24]; 51 void *data; 52 const char *fmt; /* format for read operation */ 53 struct mutex mutex; /* protects access to these buffers */ 54 }; 55 56 static int spufs_attr_open(struct inode *inode, struct file *file, 57 int (*get)(void *, u64 *), int (*set)(void *, u64), 58 const char *fmt) 59 { 60 struct spufs_attr *attr; 61 62 attr = kmalloc(sizeof(*attr), GFP_KERNEL); 63 if (!attr) 64 return -ENOMEM; 65 66 attr->get = get; 67 attr->set = set; 68 attr->data = inode->i_private; 69 attr->fmt = fmt; 70 mutex_init(&attr->mutex); 71 file->private_data = attr; 72 73 return nonseekable_open(inode, file); 74 } 75 76 static int spufs_attr_release(struct inode *inode, struct file *file) 77 { 78 kfree(file->private_data); 79 return 0; 80 } 81 82 static ssize_t spufs_attr_read(struct file *file, char __user *buf, 83 size_t len, loff_t *ppos) 84 { 85 struct spufs_attr *attr; 86 size_t size; 87 ssize_t ret; 88 89 attr = file->private_data; 90 if (!attr->get) 91 return -EACCES; 92 93 ret = mutex_lock_interruptible(&attr->mutex); 94 if (ret) 95 return ret; 96 97 if (*ppos) { /* continued read */ 98 size = strlen(attr->get_buf); 99 } else { /* first read */ 100 u64 val; 101 ret = attr->get(attr->data, &val); 102 if (ret) 103 goto out; 104 105 size = scnprintf(attr->get_buf, sizeof(attr->get_buf), 106 attr->fmt, (unsigned long long)val); 107 } 108 109 ret = simple_read_from_buffer(buf, len, ppos, attr->get_buf, size); 110 out: 111 mutex_unlock(&attr->mutex); 112 return ret; 113 } 114 115 static ssize_t spufs_attr_write(struct file *file, const char __user *buf, 116 size_t len, loff_t *ppos) 117 { 118 struct spufs_attr *attr; 119 u64 val; 120 size_t size; 121 ssize_t ret; 122 123 attr = file->private_data; 124 if (!attr->set) 125 return -EACCES; 126 127 ret = mutex_lock_interruptible(&attr->mutex); 128 if (ret) 129 return ret; 130 131 ret = -EFAULT; 132 size = min(sizeof(attr->set_buf) - 1, len); 133 if (copy_from_user(attr->set_buf, buf, size)) 134 goto out; 135 136 ret = len; /* claim we got the whole input */ 137 attr->set_buf[size] = '\0'; 138 val = simple_strtol(attr->set_buf, NULL, 0); 139 attr->set(attr->data, val); 140 out: 141 mutex_unlock(&attr->mutex); 142 return ret; 143 } 144 145 #define DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \ 146 static int __fops ## _open(struct inode *inode, struct file *file) \ 147 { \ 148 __simple_attr_check_format(__fmt, 0ull); \ 149 return spufs_attr_open(inode, file, __get, __set, __fmt); \ 150 } \ 151 static const struct file_operations __fops = { \ 152 .owner = THIS_MODULE, \ 153 .open = __fops ## _open, \ 154 .release = spufs_attr_release, \ 155 .read = spufs_attr_read, \ 156 .write = spufs_attr_write, \ 157 .llseek = generic_file_llseek, \ 158 }; 159 160 161 static int 162 spufs_mem_open(struct inode *inode, struct file *file) 163 { 164 struct spufs_inode_info *i = SPUFS_I(inode); 165 struct spu_context *ctx = i->i_ctx; 166 167 mutex_lock(&ctx->mapping_lock); 168 file->private_data = ctx; 169 if (!i->i_openers++) 170 ctx->local_store = inode->i_mapping; 171 mutex_unlock(&ctx->mapping_lock); 172 return 0; 173 } 174 175 static int 176 spufs_mem_release(struct inode *inode, struct file *file) 177 { 178 struct spufs_inode_info *i = SPUFS_I(inode); 179 struct spu_context *ctx = i->i_ctx; 180 181 mutex_lock(&ctx->mapping_lock); 182 if (!--i->i_openers) 183 ctx->local_store = NULL; 184 mutex_unlock(&ctx->mapping_lock); 185 return 0; 186 } 187 188 static ssize_t 189 __spufs_mem_read(struct spu_context *ctx, char __user *buffer, 190 size_t size, loff_t *pos) 191 { 192 char *local_store = ctx->ops->get_ls(ctx); 193 return simple_read_from_buffer(buffer, size, pos, local_store, 194 LS_SIZE); 195 } 196 197 static ssize_t 198 spufs_mem_read(struct file *file, char __user *buffer, 199 size_t size, loff_t *pos) 200 { 201 struct spu_context *ctx = file->private_data; 202 ssize_t ret; 203 204 ret = spu_acquire(ctx); 205 if (ret) 206 return ret; 207 ret = __spufs_mem_read(ctx, buffer, size, pos); 208 spu_release(ctx); 209 210 return ret; 211 } 212 213 static ssize_t 214 spufs_mem_write(struct file *file, const char __user *buffer, 215 size_t size, loff_t *ppos) 216 { 217 struct spu_context *ctx = file->private_data; 218 char *local_store; 219 loff_t pos = *ppos; 220 int ret; 221 222 if (pos < 0) 223 return -EINVAL; 224 if (pos > LS_SIZE) 225 return -EFBIG; 226 if (size > LS_SIZE - pos) 227 size = LS_SIZE - pos; 228 229 ret = spu_acquire(ctx); 230 if (ret) 231 return ret; 232 233 local_store = ctx->ops->get_ls(ctx); 234 ret = copy_from_user(local_store + pos, buffer, size); 235 spu_release(ctx); 236 237 if (ret) 238 return -EFAULT; 239 *ppos = pos + size; 240 return size; 241 } 242 243 static int 244 spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 245 { 246 struct spu_context *ctx = vma->vm_file->private_data; 247 unsigned long address = (unsigned long)vmf->virtual_address; 248 unsigned long pfn, offset; 249 250 #ifdef CONFIG_SPU_FS_64K_LS 251 struct spu_state *csa = &ctx->csa; 252 int psize; 253 254 /* Check what page size we are using */ 255 psize = get_slice_psize(vma->vm_mm, address); 256 257 /* Some sanity checking */ 258 BUG_ON(csa->use_big_pages != (psize == MMU_PAGE_64K)); 259 260 /* Wow, 64K, cool, we need to align the address though */ 261 if (csa->use_big_pages) { 262 BUG_ON(vma->vm_start & 0xffff); 263 address &= ~0xfffful; 264 } 265 #endif /* CONFIG_SPU_FS_64K_LS */ 266 267 offset = vmf->pgoff << PAGE_SHIFT; 268 if (offset >= LS_SIZE) 269 return VM_FAULT_SIGBUS; 270 271 pr_debug("spufs_mem_mmap_fault address=0x%lx, offset=0x%lx\n", 272 address, offset); 273 274 if (spu_acquire(ctx)) 275 return VM_FAULT_NOPAGE; 276 277 if (ctx->state == SPU_STATE_SAVED) { 278 vma->vm_page_prot = pgprot_cached(vma->vm_page_prot); 279 pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset); 280 } else { 281 vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot); 282 pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT; 283 } 284 vm_insert_pfn(vma, address, pfn); 285 286 spu_release(ctx); 287 288 return VM_FAULT_NOPAGE; 289 } 290 291 static int spufs_mem_mmap_access(struct vm_area_struct *vma, 292 unsigned long address, 293 void *buf, int len, int write) 294 { 295 struct spu_context *ctx = vma->vm_file->private_data; 296 unsigned long offset = address - vma->vm_start; 297 char *local_store; 298 299 if (write && !(vma->vm_flags & VM_WRITE)) 300 return -EACCES; 301 if (spu_acquire(ctx)) 302 return -EINTR; 303 if ((offset + len) > vma->vm_end) 304 len = vma->vm_end - offset; 305 local_store = ctx->ops->get_ls(ctx); 306 if (write) 307 memcpy_toio(local_store + offset, buf, len); 308 else 309 memcpy_fromio(buf, local_store + offset, len); 310 spu_release(ctx); 311 return len; 312 } 313 314 static const struct vm_operations_struct spufs_mem_mmap_vmops = { 315 .fault = spufs_mem_mmap_fault, 316 .access = spufs_mem_mmap_access, 317 }; 318 319 static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma) 320 { 321 #ifdef CONFIG_SPU_FS_64K_LS 322 struct spu_context *ctx = file->private_data; 323 struct spu_state *csa = &ctx->csa; 324 325 /* Sanity check VMA alignment */ 326 if (csa->use_big_pages) { 327 pr_debug("spufs_mem_mmap 64K, start=0x%lx, end=0x%lx," 328 " pgoff=0x%lx\n", vma->vm_start, vma->vm_end, 329 vma->vm_pgoff); 330 if (vma->vm_start & 0xffff) 331 return -EINVAL; 332 if (vma->vm_pgoff & 0xf) 333 return -EINVAL; 334 } 335 #endif /* CONFIG_SPU_FS_64K_LS */ 336 337 if (!(vma->vm_flags & VM_SHARED)) 338 return -EINVAL; 339 340 vma->vm_flags |= VM_IO | VM_PFNMAP; 341 vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot); 342 343 vma->vm_ops = &spufs_mem_mmap_vmops; 344 return 0; 345 } 346 347 #ifdef CONFIG_SPU_FS_64K_LS 348 static unsigned long spufs_get_unmapped_area(struct file *file, 349 unsigned long addr, unsigned long len, unsigned long pgoff, 350 unsigned long flags) 351 { 352 struct spu_context *ctx = file->private_data; 353 struct spu_state *csa = &ctx->csa; 354 355 /* If not using big pages, fallback to normal MM g_u_a */ 356 if (!csa->use_big_pages) 357 return current->mm->get_unmapped_area(file, addr, len, 358 pgoff, flags); 359 360 /* Else, try to obtain a 64K pages slice */ 361 return slice_get_unmapped_area(addr, len, flags, 362 MMU_PAGE_64K, 1, 0); 363 } 364 #endif /* CONFIG_SPU_FS_64K_LS */ 365 366 static const struct file_operations spufs_mem_fops = { 367 .open = spufs_mem_open, 368 .release = spufs_mem_release, 369 .read = spufs_mem_read, 370 .write = spufs_mem_write, 371 .llseek = generic_file_llseek, 372 .mmap = spufs_mem_mmap, 373 #ifdef CONFIG_SPU_FS_64K_LS 374 .get_unmapped_area = spufs_get_unmapped_area, 375 #endif 376 }; 377 378 static int spufs_ps_fault(struct vm_area_struct *vma, 379 struct vm_fault *vmf, 380 unsigned long ps_offs, 381 unsigned long ps_size) 382 { 383 struct spu_context *ctx = vma->vm_file->private_data; 384 unsigned long area, offset = vmf->pgoff << PAGE_SHIFT; 385 int ret = 0; 386 387 spu_context_nospu_trace(spufs_ps_fault__enter, ctx); 388 389 if (offset >= ps_size) 390 return VM_FAULT_SIGBUS; 391 392 if (fatal_signal_pending(current)) 393 return VM_FAULT_SIGBUS; 394 395 /* 396 * Because we release the mmap_sem, the context may be destroyed while 397 * we're in spu_wait. Grab an extra reference so it isn't destroyed 398 * in the meantime. 399 */ 400 get_spu_context(ctx); 401 402 /* 403 * We have to wait for context to be loaded before we have 404 * pages to hand out to the user, but we don't want to wait 405 * with the mmap_sem held. 406 * It is possible to drop the mmap_sem here, but then we need 407 * to return VM_FAULT_NOPAGE because the mappings may have 408 * hanged. 409 */ 410 if (spu_acquire(ctx)) 411 goto refault; 412 413 if (ctx->state == SPU_STATE_SAVED) { 414 up_read(¤t->mm->mmap_sem); 415 spu_context_nospu_trace(spufs_ps_fault__sleep, ctx); 416 ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE); 417 spu_context_trace(spufs_ps_fault__wake, ctx, ctx->spu); 418 down_read(¤t->mm->mmap_sem); 419 } else { 420 area = ctx->spu->problem_phys + ps_offs; 421 vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, 422 (area + offset) >> PAGE_SHIFT); 423 spu_context_trace(spufs_ps_fault__insert, ctx, ctx->spu); 424 } 425 426 if (!ret) 427 spu_release(ctx); 428 429 refault: 430 put_spu_context(ctx); 431 return VM_FAULT_NOPAGE; 432 } 433 434 #if SPUFS_MMAP_4K 435 static int spufs_cntl_mmap_fault(struct vm_area_struct *vma, 436 struct vm_fault *vmf) 437 { 438 return spufs_ps_fault(vma, vmf, 0x4000, SPUFS_CNTL_MAP_SIZE); 439 } 440 441 static const struct vm_operations_struct spufs_cntl_mmap_vmops = { 442 .fault = spufs_cntl_mmap_fault, 443 }; 444 445 /* 446 * mmap support for problem state control area [0x4000 - 0x4fff]. 447 */ 448 static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma) 449 { 450 if (!(vma->vm_flags & VM_SHARED)) 451 return -EINVAL; 452 453 vma->vm_flags |= VM_IO | VM_PFNMAP; 454 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 455 456 vma->vm_ops = &spufs_cntl_mmap_vmops; 457 return 0; 458 } 459 #else /* SPUFS_MMAP_4K */ 460 #define spufs_cntl_mmap NULL 461 #endif /* !SPUFS_MMAP_4K */ 462 463 static int spufs_cntl_get(void *data, u64 *val) 464 { 465 struct spu_context *ctx = data; 466 int ret; 467 468 ret = spu_acquire(ctx); 469 if (ret) 470 return ret; 471 *val = ctx->ops->status_read(ctx); 472 spu_release(ctx); 473 474 return 0; 475 } 476 477 static int spufs_cntl_set(void *data, u64 val) 478 { 479 struct spu_context *ctx = data; 480 int ret; 481 482 ret = spu_acquire(ctx); 483 if (ret) 484 return ret; 485 ctx->ops->runcntl_write(ctx, val); 486 spu_release(ctx); 487 488 return 0; 489 } 490 491 static int spufs_cntl_open(struct inode *inode, struct file *file) 492 { 493 struct spufs_inode_info *i = SPUFS_I(inode); 494 struct spu_context *ctx = i->i_ctx; 495 496 mutex_lock(&ctx->mapping_lock); 497 file->private_data = ctx; 498 if (!i->i_openers++) 499 ctx->cntl = inode->i_mapping; 500 mutex_unlock(&ctx->mapping_lock); 501 return simple_attr_open(inode, file, spufs_cntl_get, 502 spufs_cntl_set, "0x%08lx"); 503 } 504 505 static int 506 spufs_cntl_release(struct inode *inode, struct file *file) 507 { 508 struct spufs_inode_info *i = SPUFS_I(inode); 509 struct spu_context *ctx = i->i_ctx; 510 511 simple_attr_release(inode, file); 512 513 mutex_lock(&ctx->mapping_lock); 514 if (!--i->i_openers) 515 ctx->cntl = NULL; 516 mutex_unlock(&ctx->mapping_lock); 517 return 0; 518 } 519 520 static const struct file_operations spufs_cntl_fops = { 521 .open = spufs_cntl_open, 522 .release = spufs_cntl_release, 523 .read = simple_attr_read, 524 .write = simple_attr_write, 525 .llseek = generic_file_llseek, 526 .mmap = spufs_cntl_mmap, 527 }; 528 529 static int 530 spufs_regs_open(struct inode *inode, struct file *file) 531 { 532 struct spufs_inode_info *i = SPUFS_I(inode); 533 file->private_data = i->i_ctx; 534 return 0; 535 } 536 537 static ssize_t 538 __spufs_regs_read(struct spu_context *ctx, char __user *buffer, 539 size_t size, loff_t *pos) 540 { 541 struct spu_lscsa *lscsa = ctx->csa.lscsa; 542 return simple_read_from_buffer(buffer, size, pos, 543 lscsa->gprs, sizeof lscsa->gprs); 544 } 545 546 static ssize_t 547 spufs_regs_read(struct file *file, char __user *buffer, 548 size_t size, loff_t *pos) 549 { 550 int ret; 551 struct spu_context *ctx = file->private_data; 552 553 /* pre-check for file position: if we'd return EOF, there's no point 554 * causing a deschedule */ 555 if (*pos >= sizeof(ctx->csa.lscsa->gprs)) 556 return 0; 557 558 ret = spu_acquire_saved(ctx); 559 if (ret) 560 return ret; 561 ret = __spufs_regs_read(ctx, buffer, size, pos); 562 spu_release_saved(ctx); 563 return ret; 564 } 565 566 static ssize_t 567 spufs_regs_write(struct file *file, const char __user *buffer, 568 size_t size, loff_t *pos) 569 { 570 struct spu_context *ctx = file->private_data; 571 struct spu_lscsa *lscsa = ctx->csa.lscsa; 572 int ret; 573 574 if (*pos >= sizeof(lscsa->gprs)) 575 return -EFBIG; 576 577 size = min_t(ssize_t, sizeof(lscsa->gprs) - *pos, size); 578 *pos += size; 579 580 ret = spu_acquire_saved(ctx); 581 if (ret) 582 return ret; 583 584 ret = copy_from_user((char *)lscsa->gprs + *pos - size, 585 buffer, size) ? -EFAULT : size; 586 587 spu_release_saved(ctx); 588 return ret; 589 } 590 591 static const struct file_operations spufs_regs_fops = { 592 .open = spufs_regs_open, 593 .read = spufs_regs_read, 594 .write = spufs_regs_write, 595 .llseek = generic_file_llseek, 596 }; 597 598 static ssize_t 599 __spufs_fpcr_read(struct spu_context *ctx, char __user * buffer, 600 size_t size, loff_t * pos) 601 { 602 struct spu_lscsa *lscsa = ctx->csa.lscsa; 603 return simple_read_from_buffer(buffer, size, pos, 604 &lscsa->fpcr, sizeof(lscsa->fpcr)); 605 } 606 607 static ssize_t 608 spufs_fpcr_read(struct file *file, char __user * buffer, 609 size_t size, loff_t * pos) 610 { 611 int ret; 612 struct spu_context *ctx = file->private_data; 613 614 ret = spu_acquire_saved(ctx); 615 if (ret) 616 return ret; 617 ret = __spufs_fpcr_read(ctx, buffer, size, pos); 618 spu_release_saved(ctx); 619 return ret; 620 } 621 622 static ssize_t 623 spufs_fpcr_write(struct file *file, const char __user * buffer, 624 size_t size, loff_t * pos) 625 { 626 struct spu_context *ctx = file->private_data; 627 struct spu_lscsa *lscsa = ctx->csa.lscsa; 628 int ret; 629 630 if (*pos >= sizeof(lscsa->fpcr)) 631 return -EFBIG; 632 633 size = min_t(ssize_t, sizeof(lscsa->fpcr) - *pos, size); 634 635 ret = spu_acquire_saved(ctx); 636 if (ret) 637 return ret; 638 639 *pos += size; 640 ret = copy_from_user((char *)&lscsa->fpcr + *pos - size, 641 buffer, size) ? -EFAULT : size; 642 643 spu_release_saved(ctx); 644 return ret; 645 } 646 647 static const struct file_operations spufs_fpcr_fops = { 648 .open = spufs_regs_open, 649 .read = spufs_fpcr_read, 650 .write = spufs_fpcr_write, 651 .llseek = generic_file_llseek, 652 }; 653 654 /* generic open function for all pipe-like files */ 655 static int spufs_pipe_open(struct inode *inode, struct file *file) 656 { 657 struct spufs_inode_info *i = SPUFS_I(inode); 658 file->private_data = i->i_ctx; 659 660 return nonseekable_open(inode, file); 661 } 662 663 /* 664 * Read as many bytes from the mailbox as possible, until 665 * one of the conditions becomes true: 666 * 667 * - no more data available in the mailbox 668 * - end of the user provided buffer 669 * - end of the mapped area 670 */ 671 static ssize_t spufs_mbox_read(struct file *file, char __user *buf, 672 size_t len, loff_t *pos) 673 { 674 struct spu_context *ctx = file->private_data; 675 u32 mbox_data, __user *udata; 676 ssize_t count; 677 678 if (len < 4) 679 return -EINVAL; 680 681 if (!access_ok(VERIFY_WRITE, buf, len)) 682 return -EFAULT; 683 684 udata = (void __user *)buf; 685 686 count = spu_acquire(ctx); 687 if (count) 688 return count; 689 690 for (count = 0; (count + 4) <= len; count += 4, udata++) { 691 int ret; 692 ret = ctx->ops->mbox_read(ctx, &mbox_data); 693 if (ret == 0) 694 break; 695 696 /* 697 * at the end of the mapped area, we can fault 698 * but still need to return the data we have 699 * read successfully so far. 700 */ 701 ret = __put_user(mbox_data, udata); 702 if (ret) { 703 if (!count) 704 count = -EFAULT; 705 break; 706 } 707 } 708 spu_release(ctx); 709 710 if (!count) 711 count = -EAGAIN; 712 713 return count; 714 } 715 716 static const struct file_operations spufs_mbox_fops = { 717 .open = spufs_pipe_open, 718 .read = spufs_mbox_read, 719 .llseek = no_llseek, 720 }; 721 722 static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf, 723 size_t len, loff_t *pos) 724 { 725 struct spu_context *ctx = file->private_data; 726 ssize_t ret; 727 u32 mbox_stat; 728 729 if (len < 4) 730 return -EINVAL; 731 732 ret = spu_acquire(ctx); 733 if (ret) 734 return ret; 735 736 mbox_stat = ctx->ops->mbox_stat_read(ctx) & 0xff; 737 738 spu_release(ctx); 739 740 if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat)) 741 return -EFAULT; 742 743 return 4; 744 } 745 746 static const struct file_operations spufs_mbox_stat_fops = { 747 .open = spufs_pipe_open, 748 .read = spufs_mbox_stat_read, 749 .llseek = no_llseek, 750 }; 751 752 /* low-level ibox access function */ 753 size_t spu_ibox_read(struct spu_context *ctx, u32 *data) 754 { 755 return ctx->ops->ibox_read(ctx, data); 756 } 757 758 static int spufs_ibox_fasync(int fd, struct file *file, int on) 759 { 760 struct spu_context *ctx = file->private_data; 761 762 return fasync_helper(fd, file, on, &ctx->ibox_fasync); 763 } 764 765 /* interrupt-level ibox callback function. */ 766 void spufs_ibox_callback(struct spu *spu) 767 { 768 struct spu_context *ctx = spu->ctx; 769 770 if (!ctx) 771 return; 772 773 wake_up_all(&ctx->ibox_wq); 774 kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN); 775 } 776 777 /* 778 * Read as many bytes from the interrupt mailbox as possible, until 779 * one of the conditions becomes true: 780 * 781 * - no more data available in the mailbox 782 * - end of the user provided buffer 783 * - end of the mapped area 784 * 785 * If the file is opened without O_NONBLOCK, we wait here until 786 * any data is available, but return when we have been able to 787 * read something. 788 */ 789 static ssize_t spufs_ibox_read(struct file *file, char __user *buf, 790 size_t len, loff_t *pos) 791 { 792 struct spu_context *ctx = file->private_data; 793 u32 ibox_data, __user *udata; 794 ssize_t count; 795 796 if (len < 4) 797 return -EINVAL; 798 799 if (!access_ok(VERIFY_WRITE, buf, len)) 800 return -EFAULT; 801 802 udata = (void __user *)buf; 803 804 count = spu_acquire(ctx); 805 if (count) 806 goto out; 807 808 /* wait only for the first element */ 809 count = 0; 810 if (file->f_flags & O_NONBLOCK) { 811 if (!spu_ibox_read(ctx, &ibox_data)) { 812 count = -EAGAIN; 813 goto out_unlock; 814 } 815 } else { 816 count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data)); 817 if (count) 818 goto out; 819 } 820 821 /* if we can't write at all, return -EFAULT */ 822 count = __put_user(ibox_data, udata); 823 if (count) 824 goto out_unlock; 825 826 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) { 827 int ret; 828 ret = ctx->ops->ibox_read(ctx, &ibox_data); 829 if (ret == 0) 830 break; 831 /* 832 * at the end of the mapped area, we can fault 833 * but still need to return the data we have 834 * read successfully so far. 835 */ 836 ret = __put_user(ibox_data, udata); 837 if (ret) 838 break; 839 } 840 841 out_unlock: 842 spu_release(ctx); 843 out: 844 return count; 845 } 846 847 static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait) 848 { 849 struct spu_context *ctx = file->private_data; 850 unsigned int mask; 851 852 poll_wait(file, &ctx->ibox_wq, wait); 853 854 /* 855 * For now keep this uninterruptible and also ignore the rule 856 * that poll should not sleep. Will be fixed later. 857 */ 858 mutex_lock(&ctx->state_mutex); 859 mask = ctx->ops->mbox_stat_poll(ctx, POLLIN | POLLRDNORM); 860 spu_release(ctx); 861 862 return mask; 863 } 864 865 static const struct file_operations spufs_ibox_fops = { 866 .open = spufs_pipe_open, 867 .read = spufs_ibox_read, 868 .poll = spufs_ibox_poll, 869 .fasync = spufs_ibox_fasync, 870 .llseek = no_llseek, 871 }; 872 873 static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf, 874 size_t len, loff_t *pos) 875 { 876 struct spu_context *ctx = file->private_data; 877 ssize_t ret; 878 u32 ibox_stat; 879 880 if (len < 4) 881 return -EINVAL; 882 883 ret = spu_acquire(ctx); 884 if (ret) 885 return ret; 886 ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff; 887 spu_release(ctx); 888 889 if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat)) 890 return -EFAULT; 891 892 return 4; 893 } 894 895 static const struct file_operations spufs_ibox_stat_fops = { 896 .open = spufs_pipe_open, 897 .read = spufs_ibox_stat_read, 898 .llseek = no_llseek, 899 }; 900 901 /* low-level mailbox write */ 902 size_t spu_wbox_write(struct spu_context *ctx, u32 data) 903 { 904 return ctx->ops->wbox_write(ctx, data); 905 } 906 907 static int spufs_wbox_fasync(int fd, struct file *file, int on) 908 { 909 struct spu_context *ctx = file->private_data; 910 int ret; 911 912 ret = fasync_helper(fd, file, on, &ctx->wbox_fasync); 913 914 return ret; 915 } 916 917 /* interrupt-level wbox callback function. */ 918 void spufs_wbox_callback(struct spu *spu) 919 { 920 struct spu_context *ctx = spu->ctx; 921 922 if (!ctx) 923 return; 924 925 wake_up_all(&ctx->wbox_wq); 926 kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT); 927 } 928 929 /* 930 * Write as many bytes to the interrupt mailbox as possible, until 931 * one of the conditions becomes true: 932 * 933 * - the mailbox is full 934 * - end of the user provided buffer 935 * - end of the mapped area 936 * 937 * If the file is opened without O_NONBLOCK, we wait here until 938 * space is availabyl, but return when we have been able to 939 * write something. 940 */ 941 static ssize_t spufs_wbox_write(struct file *file, const char __user *buf, 942 size_t len, loff_t *pos) 943 { 944 struct spu_context *ctx = file->private_data; 945 u32 wbox_data, __user *udata; 946 ssize_t count; 947 948 if (len < 4) 949 return -EINVAL; 950 951 udata = (void __user *)buf; 952 if (!access_ok(VERIFY_READ, buf, len)) 953 return -EFAULT; 954 955 if (__get_user(wbox_data, udata)) 956 return -EFAULT; 957 958 count = spu_acquire(ctx); 959 if (count) 960 goto out; 961 962 /* 963 * make sure we can at least write one element, by waiting 964 * in case of !O_NONBLOCK 965 */ 966 count = 0; 967 if (file->f_flags & O_NONBLOCK) { 968 if (!spu_wbox_write(ctx, wbox_data)) { 969 count = -EAGAIN; 970 goto out_unlock; 971 } 972 } else { 973 count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data)); 974 if (count) 975 goto out; 976 } 977 978 979 /* write as much as possible */ 980 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) { 981 int ret; 982 ret = __get_user(wbox_data, udata); 983 if (ret) 984 break; 985 986 ret = spu_wbox_write(ctx, wbox_data); 987 if (ret == 0) 988 break; 989 } 990 991 out_unlock: 992 spu_release(ctx); 993 out: 994 return count; 995 } 996 997 static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait) 998 { 999 struct spu_context *ctx = file->private_data; 1000 unsigned int mask; 1001 1002 poll_wait(file, &ctx->wbox_wq, wait); 1003 1004 /* 1005 * For now keep this uninterruptible and also ignore the rule 1006 * that poll should not sleep. Will be fixed later. 1007 */ 1008 mutex_lock(&ctx->state_mutex); 1009 mask = ctx->ops->mbox_stat_poll(ctx, POLLOUT | POLLWRNORM); 1010 spu_release(ctx); 1011 1012 return mask; 1013 } 1014 1015 static const struct file_operations spufs_wbox_fops = { 1016 .open = spufs_pipe_open, 1017 .write = spufs_wbox_write, 1018 .poll = spufs_wbox_poll, 1019 .fasync = spufs_wbox_fasync, 1020 .llseek = no_llseek, 1021 }; 1022 1023 static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf, 1024 size_t len, loff_t *pos) 1025 { 1026 struct spu_context *ctx = file->private_data; 1027 ssize_t ret; 1028 u32 wbox_stat; 1029 1030 if (len < 4) 1031 return -EINVAL; 1032 1033 ret = spu_acquire(ctx); 1034 if (ret) 1035 return ret; 1036 wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff; 1037 spu_release(ctx); 1038 1039 if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat)) 1040 return -EFAULT; 1041 1042 return 4; 1043 } 1044 1045 static const struct file_operations spufs_wbox_stat_fops = { 1046 .open = spufs_pipe_open, 1047 .read = spufs_wbox_stat_read, 1048 .llseek = no_llseek, 1049 }; 1050 1051 static int spufs_signal1_open(struct inode *inode, struct file *file) 1052 { 1053 struct spufs_inode_info *i = SPUFS_I(inode); 1054 struct spu_context *ctx = i->i_ctx; 1055 1056 mutex_lock(&ctx->mapping_lock); 1057 file->private_data = ctx; 1058 if (!i->i_openers++) 1059 ctx->signal1 = inode->i_mapping; 1060 mutex_unlock(&ctx->mapping_lock); 1061 return nonseekable_open(inode, file); 1062 } 1063 1064 static int 1065 spufs_signal1_release(struct inode *inode, struct file *file) 1066 { 1067 struct spufs_inode_info *i = SPUFS_I(inode); 1068 struct spu_context *ctx = i->i_ctx; 1069 1070 mutex_lock(&ctx->mapping_lock); 1071 if (!--i->i_openers) 1072 ctx->signal1 = NULL; 1073 mutex_unlock(&ctx->mapping_lock); 1074 return 0; 1075 } 1076 1077 static ssize_t __spufs_signal1_read(struct spu_context *ctx, char __user *buf, 1078 size_t len, loff_t *pos) 1079 { 1080 int ret = 0; 1081 u32 data; 1082 1083 if (len < 4) 1084 return -EINVAL; 1085 1086 if (ctx->csa.spu_chnlcnt_RW[3]) { 1087 data = ctx->csa.spu_chnldata_RW[3]; 1088 ret = 4; 1089 } 1090 1091 if (!ret) 1092 goto out; 1093 1094 if (copy_to_user(buf, &data, 4)) 1095 return -EFAULT; 1096 1097 out: 1098 return ret; 1099 } 1100 1101 static ssize_t spufs_signal1_read(struct file *file, char __user *buf, 1102 size_t len, loff_t *pos) 1103 { 1104 int ret; 1105 struct spu_context *ctx = file->private_data; 1106 1107 ret = spu_acquire_saved(ctx); 1108 if (ret) 1109 return ret; 1110 ret = __spufs_signal1_read(ctx, buf, len, pos); 1111 spu_release_saved(ctx); 1112 1113 return ret; 1114 } 1115 1116 static ssize_t spufs_signal1_write(struct file *file, const char __user *buf, 1117 size_t len, loff_t *pos) 1118 { 1119 struct spu_context *ctx; 1120 ssize_t ret; 1121 u32 data; 1122 1123 ctx = file->private_data; 1124 1125 if (len < 4) 1126 return -EINVAL; 1127 1128 if (copy_from_user(&data, buf, 4)) 1129 return -EFAULT; 1130 1131 ret = spu_acquire(ctx); 1132 if (ret) 1133 return ret; 1134 ctx->ops->signal1_write(ctx, data); 1135 spu_release(ctx); 1136 1137 return 4; 1138 } 1139 1140 static int 1141 spufs_signal1_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1142 { 1143 #if SPUFS_SIGNAL_MAP_SIZE == 0x1000 1144 return spufs_ps_fault(vma, vmf, 0x14000, SPUFS_SIGNAL_MAP_SIZE); 1145 #elif SPUFS_SIGNAL_MAP_SIZE == 0x10000 1146 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole 1147 * signal 1 and 2 area 1148 */ 1149 return spufs_ps_fault(vma, vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE); 1150 #else 1151 #error unsupported page size 1152 #endif 1153 } 1154 1155 static const struct vm_operations_struct spufs_signal1_mmap_vmops = { 1156 .fault = spufs_signal1_mmap_fault, 1157 }; 1158 1159 static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma) 1160 { 1161 if (!(vma->vm_flags & VM_SHARED)) 1162 return -EINVAL; 1163 1164 vma->vm_flags |= VM_IO | VM_PFNMAP; 1165 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1166 1167 vma->vm_ops = &spufs_signal1_mmap_vmops; 1168 return 0; 1169 } 1170 1171 static const struct file_operations spufs_signal1_fops = { 1172 .open = spufs_signal1_open, 1173 .release = spufs_signal1_release, 1174 .read = spufs_signal1_read, 1175 .write = spufs_signal1_write, 1176 .mmap = spufs_signal1_mmap, 1177 .llseek = no_llseek, 1178 }; 1179 1180 static const struct file_operations spufs_signal1_nosched_fops = { 1181 .open = spufs_signal1_open, 1182 .release = spufs_signal1_release, 1183 .write = spufs_signal1_write, 1184 .mmap = spufs_signal1_mmap, 1185 .llseek = no_llseek, 1186 }; 1187 1188 static int spufs_signal2_open(struct inode *inode, struct file *file) 1189 { 1190 struct spufs_inode_info *i = SPUFS_I(inode); 1191 struct spu_context *ctx = i->i_ctx; 1192 1193 mutex_lock(&ctx->mapping_lock); 1194 file->private_data = ctx; 1195 if (!i->i_openers++) 1196 ctx->signal2 = inode->i_mapping; 1197 mutex_unlock(&ctx->mapping_lock); 1198 return nonseekable_open(inode, file); 1199 } 1200 1201 static int 1202 spufs_signal2_release(struct inode *inode, struct file *file) 1203 { 1204 struct spufs_inode_info *i = SPUFS_I(inode); 1205 struct spu_context *ctx = i->i_ctx; 1206 1207 mutex_lock(&ctx->mapping_lock); 1208 if (!--i->i_openers) 1209 ctx->signal2 = NULL; 1210 mutex_unlock(&ctx->mapping_lock); 1211 return 0; 1212 } 1213 1214 static ssize_t __spufs_signal2_read(struct spu_context *ctx, char __user *buf, 1215 size_t len, loff_t *pos) 1216 { 1217 int ret = 0; 1218 u32 data; 1219 1220 if (len < 4) 1221 return -EINVAL; 1222 1223 if (ctx->csa.spu_chnlcnt_RW[4]) { 1224 data = ctx->csa.spu_chnldata_RW[4]; 1225 ret = 4; 1226 } 1227 1228 if (!ret) 1229 goto out; 1230 1231 if (copy_to_user(buf, &data, 4)) 1232 return -EFAULT; 1233 1234 out: 1235 return ret; 1236 } 1237 1238 static ssize_t spufs_signal2_read(struct file *file, char __user *buf, 1239 size_t len, loff_t *pos) 1240 { 1241 struct spu_context *ctx = file->private_data; 1242 int ret; 1243 1244 ret = spu_acquire_saved(ctx); 1245 if (ret) 1246 return ret; 1247 ret = __spufs_signal2_read(ctx, buf, len, pos); 1248 spu_release_saved(ctx); 1249 1250 return ret; 1251 } 1252 1253 static ssize_t spufs_signal2_write(struct file *file, const char __user *buf, 1254 size_t len, loff_t *pos) 1255 { 1256 struct spu_context *ctx; 1257 ssize_t ret; 1258 u32 data; 1259 1260 ctx = file->private_data; 1261 1262 if (len < 4) 1263 return -EINVAL; 1264 1265 if (copy_from_user(&data, buf, 4)) 1266 return -EFAULT; 1267 1268 ret = spu_acquire(ctx); 1269 if (ret) 1270 return ret; 1271 ctx->ops->signal2_write(ctx, data); 1272 spu_release(ctx); 1273 1274 return 4; 1275 } 1276 1277 #if SPUFS_MMAP_4K 1278 static int 1279 spufs_signal2_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1280 { 1281 #if SPUFS_SIGNAL_MAP_SIZE == 0x1000 1282 return spufs_ps_fault(vma, vmf, 0x1c000, SPUFS_SIGNAL_MAP_SIZE); 1283 #elif SPUFS_SIGNAL_MAP_SIZE == 0x10000 1284 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole 1285 * signal 1 and 2 area 1286 */ 1287 return spufs_ps_fault(vma, vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE); 1288 #else 1289 #error unsupported page size 1290 #endif 1291 } 1292 1293 static const struct vm_operations_struct spufs_signal2_mmap_vmops = { 1294 .fault = spufs_signal2_mmap_fault, 1295 }; 1296 1297 static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma) 1298 { 1299 if (!(vma->vm_flags & VM_SHARED)) 1300 return -EINVAL; 1301 1302 vma->vm_flags |= VM_IO | VM_PFNMAP; 1303 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1304 1305 vma->vm_ops = &spufs_signal2_mmap_vmops; 1306 return 0; 1307 } 1308 #else /* SPUFS_MMAP_4K */ 1309 #define spufs_signal2_mmap NULL 1310 #endif /* !SPUFS_MMAP_4K */ 1311 1312 static const struct file_operations spufs_signal2_fops = { 1313 .open = spufs_signal2_open, 1314 .release = spufs_signal2_release, 1315 .read = spufs_signal2_read, 1316 .write = spufs_signal2_write, 1317 .mmap = spufs_signal2_mmap, 1318 .llseek = no_llseek, 1319 }; 1320 1321 static const struct file_operations spufs_signal2_nosched_fops = { 1322 .open = spufs_signal2_open, 1323 .release = spufs_signal2_release, 1324 .write = spufs_signal2_write, 1325 .mmap = spufs_signal2_mmap, 1326 .llseek = no_llseek, 1327 }; 1328 1329 /* 1330 * This is a wrapper around DEFINE_SIMPLE_ATTRIBUTE which does the 1331 * work of acquiring (or not) the SPU context before calling through 1332 * to the actual get routine. The set routine is called directly. 1333 */ 1334 #define SPU_ATTR_NOACQUIRE 0 1335 #define SPU_ATTR_ACQUIRE 1 1336 #define SPU_ATTR_ACQUIRE_SAVED 2 1337 1338 #define DEFINE_SPUFS_ATTRIBUTE(__name, __get, __set, __fmt, __acquire) \ 1339 static int __##__get(void *data, u64 *val) \ 1340 { \ 1341 struct spu_context *ctx = data; \ 1342 int ret = 0; \ 1343 \ 1344 if (__acquire == SPU_ATTR_ACQUIRE) { \ 1345 ret = spu_acquire(ctx); \ 1346 if (ret) \ 1347 return ret; \ 1348 *val = __get(ctx); \ 1349 spu_release(ctx); \ 1350 } else if (__acquire == SPU_ATTR_ACQUIRE_SAVED) { \ 1351 ret = spu_acquire_saved(ctx); \ 1352 if (ret) \ 1353 return ret; \ 1354 *val = __get(ctx); \ 1355 spu_release_saved(ctx); \ 1356 } else \ 1357 *val = __get(ctx); \ 1358 \ 1359 return 0; \ 1360 } \ 1361 DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__name, __##__get, __set, __fmt); 1362 1363 static int spufs_signal1_type_set(void *data, u64 val) 1364 { 1365 struct spu_context *ctx = data; 1366 int ret; 1367 1368 ret = spu_acquire(ctx); 1369 if (ret) 1370 return ret; 1371 ctx->ops->signal1_type_set(ctx, val); 1372 spu_release(ctx); 1373 1374 return 0; 1375 } 1376 1377 static u64 spufs_signal1_type_get(struct spu_context *ctx) 1378 { 1379 return ctx->ops->signal1_type_get(ctx); 1380 } 1381 DEFINE_SPUFS_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get, 1382 spufs_signal1_type_set, "%llu\n", SPU_ATTR_ACQUIRE); 1383 1384 1385 static int spufs_signal2_type_set(void *data, u64 val) 1386 { 1387 struct spu_context *ctx = data; 1388 int ret; 1389 1390 ret = spu_acquire(ctx); 1391 if (ret) 1392 return ret; 1393 ctx->ops->signal2_type_set(ctx, val); 1394 spu_release(ctx); 1395 1396 return 0; 1397 } 1398 1399 static u64 spufs_signal2_type_get(struct spu_context *ctx) 1400 { 1401 return ctx->ops->signal2_type_get(ctx); 1402 } 1403 DEFINE_SPUFS_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get, 1404 spufs_signal2_type_set, "%llu\n", SPU_ATTR_ACQUIRE); 1405 1406 #if SPUFS_MMAP_4K 1407 static int 1408 spufs_mss_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1409 { 1410 return spufs_ps_fault(vma, vmf, 0x0000, SPUFS_MSS_MAP_SIZE); 1411 } 1412 1413 static const struct vm_operations_struct spufs_mss_mmap_vmops = { 1414 .fault = spufs_mss_mmap_fault, 1415 }; 1416 1417 /* 1418 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff]. 1419 */ 1420 static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma) 1421 { 1422 if (!(vma->vm_flags & VM_SHARED)) 1423 return -EINVAL; 1424 1425 vma->vm_flags |= VM_IO | VM_PFNMAP; 1426 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1427 1428 vma->vm_ops = &spufs_mss_mmap_vmops; 1429 return 0; 1430 } 1431 #else /* SPUFS_MMAP_4K */ 1432 #define spufs_mss_mmap NULL 1433 #endif /* !SPUFS_MMAP_4K */ 1434 1435 static int spufs_mss_open(struct inode *inode, struct file *file) 1436 { 1437 struct spufs_inode_info *i = SPUFS_I(inode); 1438 struct spu_context *ctx = i->i_ctx; 1439 1440 file->private_data = i->i_ctx; 1441 1442 mutex_lock(&ctx->mapping_lock); 1443 if (!i->i_openers++) 1444 ctx->mss = inode->i_mapping; 1445 mutex_unlock(&ctx->mapping_lock); 1446 return nonseekable_open(inode, file); 1447 } 1448 1449 static int 1450 spufs_mss_release(struct inode *inode, struct file *file) 1451 { 1452 struct spufs_inode_info *i = SPUFS_I(inode); 1453 struct spu_context *ctx = i->i_ctx; 1454 1455 mutex_lock(&ctx->mapping_lock); 1456 if (!--i->i_openers) 1457 ctx->mss = NULL; 1458 mutex_unlock(&ctx->mapping_lock); 1459 return 0; 1460 } 1461 1462 static const struct file_operations spufs_mss_fops = { 1463 .open = spufs_mss_open, 1464 .release = spufs_mss_release, 1465 .mmap = spufs_mss_mmap, 1466 .llseek = no_llseek, 1467 }; 1468 1469 static int 1470 spufs_psmap_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1471 { 1472 return spufs_ps_fault(vma, vmf, 0x0000, SPUFS_PS_MAP_SIZE); 1473 } 1474 1475 static const struct vm_operations_struct spufs_psmap_mmap_vmops = { 1476 .fault = spufs_psmap_mmap_fault, 1477 }; 1478 1479 /* 1480 * mmap support for full problem state area [0x00000 - 0x1ffff]. 1481 */ 1482 static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma) 1483 { 1484 if (!(vma->vm_flags & VM_SHARED)) 1485 return -EINVAL; 1486 1487 vma->vm_flags |= VM_IO | VM_PFNMAP; 1488 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1489 1490 vma->vm_ops = &spufs_psmap_mmap_vmops; 1491 return 0; 1492 } 1493 1494 static int spufs_psmap_open(struct inode *inode, struct file *file) 1495 { 1496 struct spufs_inode_info *i = SPUFS_I(inode); 1497 struct spu_context *ctx = i->i_ctx; 1498 1499 mutex_lock(&ctx->mapping_lock); 1500 file->private_data = i->i_ctx; 1501 if (!i->i_openers++) 1502 ctx->psmap = inode->i_mapping; 1503 mutex_unlock(&ctx->mapping_lock); 1504 return nonseekable_open(inode, file); 1505 } 1506 1507 static int 1508 spufs_psmap_release(struct inode *inode, struct file *file) 1509 { 1510 struct spufs_inode_info *i = SPUFS_I(inode); 1511 struct spu_context *ctx = i->i_ctx; 1512 1513 mutex_lock(&ctx->mapping_lock); 1514 if (!--i->i_openers) 1515 ctx->psmap = NULL; 1516 mutex_unlock(&ctx->mapping_lock); 1517 return 0; 1518 } 1519 1520 static const struct file_operations spufs_psmap_fops = { 1521 .open = spufs_psmap_open, 1522 .release = spufs_psmap_release, 1523 .mmap = spufs_psmap_mmap, 1524 .llseek = no_llseek, 1525 }; 1526 1527 1528 #if SPUFS_MMAP_4K 1529 static int 1530 spufs_mfc_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1531 { 1532 return spufs_ps_fault(vma, vmf, 0x3000, SPUFS_MFC_MAP_SIZE); 1533 } 1534 1535 static const struct vm_operations_struct spufs_mfc_mmap_vmops = { 1536 .fault = spufs_mfc_mmap_fault, 1537 }; 1538 1539 /* 1540 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff]. 1541 */ 1542 static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma) 1543 { 1544 if (!(vma->vm_flags & VM_SHARED)) 1545 return -EINVAL; 1546 1547 vma->vm_flags |= VM_IO | VM_PFNMAP; 1548 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1549 1550 vma->vm_ops = &spufs_mfc_mmap_vmops; 1551 return 0; 1552 } 1553 #else /* SPUFS_MMAP_4K */ 1554 #define spufs_mfc_mmap NULL 1555 #endif /* !SPUFS_MMAP_4K */ 1556 1557 static int spufs_mfc_open(struct inode *inode, struct file *file) 1558 { 1559 struct spufs_inode_info *i = SPUFS_I(inode); 1560 struct spu_context *ctx = i->i_ctx; 1561 1562 /* we don't want to deal with DMA into other processes */ 1563 if (ctx->owner != current->mm) 1564 return -EINVAL; 1565 1566 if (atomic_read(&inode->i_count) != 1) 1567 return -EBUSY; 1568 1569 mutex_lock(&ctx->mapping_lock); 1570 file->private_data = ctx; 1571 if (!i->i_openers++) 1572 ctx->mfc = inode->i_mapping; 1573 mutex_unlock(&ctx->mapping_lock); 1574 return nonseekable_open(inode, file); 1575 } 1576 1577 static int 1578 spufs_mfc_release(struct inode *inode, struct file *file) 1579 { 1580 struct spufs_inode_info *i = SPUFS_I(inode); 1581 struct spu_context *ctx = i->i_ctx; 1582 1583 mutex_lock(&ctx->mapping_lock); 1584 if (!--i->i_openers) 1585 ctx->mfc = NULL; 1586 mutex_unlock(&ctx->mapping_lock); 1587 return 0; 1588 } 1589 1590 /* interrupt-level mfc callback function. */ 1591 void spufs_mfc_callback(struct spu *spu) 1592 { 1593 struct spu_context *ctx = spu->ctx; 1594 1595 if (!ctx) 1596 return; 1597 1598 wake_up_all(&ctx->mfc_wq); 1599 1600 pr_debug("%s %s\n", __func__, spu->name); 1601 if (ctx->mfc_fasync) { 1602 u32 free_elements, tagstatus; 1603 unsigned int mask; 1604 1605 /* no need for spu_acquire in interrupt context */ 1606 free_elements = ctx->ops->get_mfc_free_elements(ctx); 1607 tagstatus = ctx->ops->read_mfc_tagstatus(ctx); 1608 1609 mask = 0; 1610 if (free_elements & 0xffff) 1611 mask |= POLLOUT; 1612 if (tagstatus & ctx->tagwait) 1613 mask |= POLLIN; 1614 1615 kill_fasync(&ctx->mfc_fasync, SIGIO, mask); 1616 } 1617 } 1618 1619 static int spufs_read_mfc_tagstatus(struct spu_context *ctx, u32 *status) 1620 { 1621 /* See if there is one tag group is complete */ 1622 /* FIXME we need locking around tagwait */ 1623 *status = ctx->ops->read_mfc_tagstatus(ctx) & ctx->tagwait; 1624 ctx->tagwait &= ~*status; 1625 if (*status) 1626 return 1; 1627 1628 /* enable interrupt waiting for any tag group, 1629 may silently fail if interrupts are already enabled */ 1630 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1); 1631 return 0; 1632 } 1633 1634 static ssize_t spufs_mfc_read(struct file *file, char __user *buffer, 1635 size_t size, loff_t *pos) 1636 { 1637 struct spu_context *ctx = file->private_data; 1638 int ret = -EINVAL; 1639 u32 status; 1640 1641 if (size != 4) 1642 goto out; 1643 1644 ret = spu_acquire(ctx); 1645 if (ret) 1646 return ret; 1647 1648 ret = -EINVAL; 1649 if (file->f_flags & O_NONBLOCK) { 1650 status = ctx->ops->read_mfc_tagstatus(ctx); 1651 if (!(status & ctx->tagwait)) 1652 ret = -EAGAIN; 1653 else 1654 /* XXX(hch): shouldn't we clear ret here? */ 1655 ctx->tagwait &= ~status; 1656 } else { 1657 ret = spufs_wait(ctx->mfc_wq, 1658 spufs_read_mfc_tagstatus(ctx, &status)); 1659 if (ret) 1660 goto out; 1661 } 1662 spu_release(ctx); 1663 1664 ret = 4; 1665 if (copy_to_user(buffer, &status, 4)) 1666 ret = -EFAULT; 1667 1668 out: 1669 return ret; 1670 } 1671 1672 static int spufs_check_valid_dma(struct mfc_dma_command *cmd) 1673 { 1674 pr_debug("queueing DMA %x %llx %x %x %x\n", cmd->lsa, 1675 cmd->ea, cmd->size, cmd->tag, cmd->cmd); 1676 1677 switch (cmd->cmd) { 1678 case MFC_PUT_CMD: 1679 case MFC_PUTF_CMD: 1680 case MFC_PUTB_CMD: 1681 case MFC_GET_CMD: 1682 case MFC_GETF_CMD: 1683 case MFC_GETB_CMD: 1684 break; 1685 default: 1686 pr_debug("invalid DMA opcode %x\n", cmd->cmd); 1687 return -EIO; 1688 } 1689 1690 if ((cmd->lsa & 0xf) != (cmd->ea &0xf)) { 1691 pr_debug("invalid DMA alignment, ea %llx lsa %x\n", 1692 cmd->ea, cmd->lsa); 1693 return -EIO; 1694 } 1695 1696 switch (cmd->size & 0xf) { 1697 case 1: 1698 break; 1699 case 2: 1700 if (cmd->lsa & 1) 1701 goto error; 1702 break; 1703 case 4: 1704 if (cmd->lsa & 3) 1705 goto error; 1706 break; 1707 case 8: 1708 if (cmd->lsa & 7) 1709 goto error; 1710 break; 1711 case 0: 1712 if (cmd->lsa & 15) 1713 goto error; 1714 break; 1715 error: 1716 default: 1717 pr_debug("invalid DMA alignment %x for size %x\n", 1718 cmd->lsa & 0xf, cmd->size); 1719 return -EIO; 1720 } 1721 1722 if (cmd->size > 16 * 1024) { 1723 pr_debug("invalid DMA size %x\n", cmd->size); 1724 return -EIO; 1725 } 1726 1727 if (cmd->tag & 0xfff0) { 1728 /* we reserve the higher tag numbers for kernel use */ 1729 pr_debug("invalid DMA tag\n"); 1730 return -EIO; 1731 } 1732 1733 if (cmd->class) { 1734 /* not supported in this version */ 1735 pr_debug("invalid DMA class\n"); 1736 return -EIO; 1737 } 1738 1739 return 0; 1740 } 1741 1742 static int spu_send_mfc_command(struct spu_context *ctx, 1743 struct mfc_dma_command cmd, 1744 int *error) 1745 { 1746 *error = ctx->ops->send_mfc_command(ctx, &cmd); 1747 if (*error == -EAGAIN) { 1748 /* wait for any tag group to complete 1749 so we have space for the new command */ 1750 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1); 1751 /* try again, because the queue might be 1752 empty again */ 1753 *error = ctx->ops->send_mfc_command(ctx, &cmd); 1754 if (*error == -EAGAIN) 1755 return 0; 1756 } 1757 return 1; 1758 } 1759 1760 static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer, 1761 size_t size, loff_t *pos) 1762 { 1763 struct spu_context *ctx = file->private_data; 1764 struct mfc_dma_command cmd; 1765 int ret = -EINVAL; 1766 1767 if (size != sizeof cmd) 1768 goto out; 1769 1770 ret = -EFAULT; 1771 if (copy_from_user(&cmd, buffer, sizeof cmd)) 1772 goto out; 1773 1774 ret = spufs_check_valid_dma(&cmd); 1775 if (ret) 1776 goto out; 1777 1778 ret = spu_acquire(ctx); 1779 if (ret) 1780 goto out; 1781 1782 ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE); 1783 if (ret) 1784 goto out; 1785 1786 if (file->f_flags & O_NONBLOCK) { 1787 ret = ctx->ops->send_mfc_command(ctx, &cmd); 1788 } else { 1789 int status; 1790 ret = spufs_wait(ctx->mfc_wq, 1791 spu_send_mfc_command(ctx, cmd, &status)); 1792 if (ret) 1793 goto out; 1794 if (status) 1795 ret = status; 1796 } 1797 1798 if (ret) 1799 goto out_unlock; 1800 1801 ctx->tagwait |= 1 << cmd.tag; 1802 ret = size; 1803 1804 out_unlock: 1805 spu_release(ctx); 1806 out: 1807 return ret; 1808 } 1809 1810 static unsigned int spufs_mfc_poll(struct file *file,poll_table *wait) 1811 { 1812 struct spu_context *ctx = file->private_data; 1813 u32 free_elements, tagstatus; 1814 unsigned int mask; 1815 1816 poll_wait(file, &ctx->mfc_wq, wait); 1817 1818 /* 1819 * For now keep this uninterruptible and also ignore the rule 1820 * that poll should not sleep. Will be fixed later. 1821 */ 1822 mutex_lock(&ctx->state_mutex); 1823 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2); 1824 free_elements = ctx->ops->get_mfc_free_elements(ctx); 1825 tagstatus = ctx->ops->read_mfc_tagstatus(ctx); 1826 spu_release(ctx); 1827 1828 mask = 0; 1829 if (free_elements & 0xffff) 1830 mask |= POLLOUT | POLLWRNORM; 1831 if (tagstatus & ctx->tagwait) 1832 mask |= POLLIN | POLLRDNORM; 1833 1834 pr_debug("%s: free %d tagstatus %d tagwait %d\n", __func__, 1835 free_elements, tagstatus, ctx->tagwait); 1836 1837 return mask; 1838 } 1839 1840 static int spufs_mfc_flush(struct file *file, fl_owner_t id) 1841 { 1842 struct spu_context *ctx = file->private_data; 1843 int ret; 1844 1845 ret = spu_acquire(ctx); 1846 if (ret) 1847 goto out; 1848 #if 0 1849 /* this currently hangs */ 1850 ret = spufs_wait(ctx->mfc_wq, 1851 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2)); 1852 if (ret) 1853 goto out; 1854 ret = spufs_wait(ctx->mfc_wq, 1855 ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait); 1856 if (ret) 1857 goto out; 1858 #else 1859 ret = 0; 1860 #endif 1861 spu_release(ctx); 1862 out: 1863 return ret; 1864 } 1865 1866 static int spufs_mfc_fsync(struct file *file, int datasync) 1867 { 1868 return spufs_mfc_flush(file, NULL); 1869 } 1870 1871 static int spufs_mfc_fasync(int fd, struct file *file, int on) 1872 { 1873 struct spu_context *ctx = file->private_data; 1874 1875 return fasync_helper(fd, file, on, &ctx->mfc_fasync); 1876 } 1877 1878 static const struct file_operations spufs_mfc_fops = { 1879 .open = spufs_mfc_open, 1880 .release = spufs_mfc_release, 1881 .read = spufs_mfc_read, 1882 .write = spufs_mfc_write, 1883 .poll = spufs_mfc_poll, 1884 .flush = spufs_mfc_flush, 1885 .fsync = spufs_mfc_fsync, 1886 .fasync = spufs_mfc_fasync, 1887 .mmap = spufs_mfc_mmap, 1888 .llseek = no_llseek, 1889 }; 1890 1891 static int spufs_npc_set(void *data, u64 val) 1892 { 1893 struct spu_context *ctx = data; 1894 int ret; 1895 1896 ret = spu_acquire(ctx); 1897 if (ret) 1898 return ret; 1899 ctx->ops->npc_write(ctx, val); 1900 spu_release(ctx); 1901 1902 return 0; 1903 } 1904 1905 static u64 spufs_npc_get(struct spu_context *ctx) 1906 { 1907 return ctx->ops->npc_read(ctx); 1908 } 1909 DEFINE_SPUFS_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set, 1910 "0x%llx\n", SPU_ATTR_ACQUIRE); 1911 1912 static int spufs_decr_set(void *data, u64 val) 1913 { 1914 struct spu_context *ctx = data; 1915 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1916 int ret; 1917 1918 ret = spu_acquire_saved(ctx); 1919 if (ret) 1920 return ret; 1921 lscsa->decr.slot[0] = (u32) val; 1922 spu_release_saved(ctx); 1923 1924 return 0; 1925 } 1926 1927 static u64 spufs_decr_get(struct spu_context *ctx) 1928 { 1929 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1930 return lscsa->decr.slot[0]; 1931 } 1932 DEFINE_SPUFS_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set, 1933 "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED); 1934 1935 static int spufs_decr_status_set(void *data, u64 val) 1936 { 1937 struct spu_context *ctx = data; 1938 int ret; 1939 1940 ret = spu_acquire_saved(ctx); 1941 if (ret) 1942 return ret; 1943 if (val) 1944 ctx->csa.priv2.mfc_control_RW |= MFC_CNTL_DECREMENTER_RUNNING; 1945 else 1946 ctx->csa.priv2.mfc_control_RW &= ~MFC_CNTL_DECREMENTER_RUNNING; 1947 spu_release_saved(ctx); 1948 1949 return 0; 1950 } 1951 1952 static u64 spufs_decr_status_get(struct spu_context *ctx) 1953 { 1954 if (ctx->csa.priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING) 1955 return SPU_DECR_STATUS_RUNNING; 1956 else 1957 return 0; 1958 } 1959 DEFINE_SPUFS_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get, 1960 spufs_decr_status_set, "0x%llx\n", 1961 SPU_ATTR_ACQUIRE_SAVED); 1962 1963 static int spufs_event_mask_set(void *data, u64 val) 1964 { 1965 struct spu_context *ctx = data; 1966 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1967 int ret; 1968 1969 ret = spu_acquire_saved(ctx); 1970 if (ret) 1971 return ret; 1972 lscsa->event_mask.slot[0] = (u32) val; 1973 spu_release_saved(ctx); 1974 1975 return 0; 1976 } 1977 1978 static u64 spufs_event_mask_get(struct spu_context *ctx) 1979 { 1980 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1981 return lscsa->event_mask.slot[0]; 1982 } 1983 1984 DEFINE_SPUFS_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get, 1985 spufs_event_mask_set, "0x%llx\n", 1986 SPU_ATTR_ACQUIRE_SAVED); 1987 1988 static u64 spufs_event_status_get(struct spu_context *ctx) 1989 { 1990 struct spu_state *state = &ctx->csa; 1991 u64 stat; 1992 stat = state->spu_chnlcnt_RW[0]; 1993 if (stat) 1994 return state->spu_chnldata_RW[0]; 1995 return 0; 1996 } 1997 DEFINE_SPUFS_ATTRIBUTE(spufs_event_status_ops, spufs_event_status_get, 1998 NULL, "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED) 1999 2000 static int spufs_srr0_set(void *data, u64 val) 2001 { 2002 struct spu_context *ctx = data; 2003 struct spu_lscsa *lscsa = ctx->csa.lscsa; 2004 int ret; 2005 2006 ret = spu_acquire_saved(ctx); 2007 if (ret) 2008 return ret; 2009 lscsa->srr0.slot[0] = (u32) val; 2010 spu_release_saved(ctx); 2011 2012 return 0; 2013 } 2014 2015 static u64 spufs_srr0_get(struct spu_context *ctx) 2016 { 2017 struct spu_lscsa *lscsa = ctx->csa.lscsa; 2018 return lscsa->srr0.slot[0]; 2019 } 2020 DEFINE_SPUFS_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set, 2021 "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED) 2022 2023 static u64 spufs_id_get(struct spu_context *ctx) 2024 { 2025 u64 num; 2026 2027 if (ctx->state == SPU_STATE_RUNNABLE) 2028 num = ctx->spu->number; 2029 else 2030 num = (unsigned int)-1; 2031 2032 return num; 2033 } 2034 DEFINE_SPUFS_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n", 2035 SPU_ATTR_ACQUIRE) 2036 2037 static u64 spufs_object_id_get(struct spu_context *ctx) 2038 { 2039 /* FIXME: Should there really be no locking here? */ 2040 return ctx->object_id; 2041 } 2042 2043 static int spufs_object_id_set(void *data, u64 id) 2044 { 2045 struct spu_context *ctx = data; 2046 ctx->object_id = id; 2047 2048 return 0; 2049 } 2050 2051 DEFINE_SPUFS_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get, 2052 spufs_object_id_set, "0x%llx\n", SPU_ATTR_NOACQUIRE); 2053 2054 static u64 spufs_lslr_get(struct spu_context *ctx) 2055 { 2056 return ctx->csa.priv2.spu_lslr_RW; 2057 } 2058 DEFINE_SPUFS_ATTRIBUTE(spufs_lslr_ops, spufs_lslr_get, NULL, "0x%llx\n", 2059 SPU_ATTR_ACQUIRE_SAVED); 2060 2061 static int spufs_info_open(struct inode *inode, struct file *file) 2062 { 2063 struct spufs_inode_info *i = SPUFS_I(inode); 2064 struct spu_context *ctx = i->i_ctx; 2065 file->private_data = ctx; 2066 return 0; 2067 } 2068 2069 static int spufs_caps_show(struct seq_file *s, void *private) 2070 { 2071 struct spu_context *ctx = s->private; 2072 2073 if (!(ctx->flags & SPU_CREATE_NOSCHED)) 2074 seq_puts(s, "sched\n"); 2075 if (!(ctx->flags & SPU_CREATE_ISOLATE)) 2076 seq_puts(s, "step\n"); 2077 return 0; 2078 } 2079 2080 static int spufs_caps_open(struct inode *inode, struct file *file) 2081 { 2082 return single_open(file, spufs_caps_show, SPUFS_I(inode)->i_ctx); 2083 } 2084 2085 static const struct file_operations spufs_caps_fops = { 2086 .open = spufs_caps_open, 2087 .read = seq_read, 2088 .llseek = seq_lseek, 2089 .release = single_release, 2090 }; 2091 2092 static ssize_t __spufs_mbox_info_read(struct spu_context *ctx, 2093 char __user *buf, size_t len, loff_t *pos) 2094 { 2095 u32 data; 2096 2097 /* EOF if there's no entry in the mbox */ 2098 if (!(ctx->csa.prob.mb_stat_R & 0x0000ff)) 2099 return 0; 2100 2101 data = ctx->csa.prob.pu_mb_R; 2102 2103 return simple_read_from_buffer(buf, len, pos, &data, sizeof data); 2104 } 2105 2106 static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf, 2107 size_t len, loff_t *pos) 2108 { 2109 int ret; 2110 struct spu_context *ctx = file->private_data; 2111 2112 if (!access_ok(VERIFY_WRITE, buf, len)) 2113 return -EFAULT; 2114 2115 ret = spu_acquire_saved(ctx); 2116 if (ret) 2117 return ret; 2118 spin_lock(&ctx->csa.register_lock); 2119 ret = __spufs_mbox_info_read(ctx, buf, len, pos); 2120 spin_unlock(&ctx->csa.register_lock); 2121 spu_release_saved(ctx); 2122 2123 return ret; 2124 } 2125 2126 static const struct file_operations spufs_mbox_info_fops = { 2127 .open = spufs_info_open, 2128 .read = spufs_mbox_info_read, 2129 .llseek = generic_file_llseek, 2130 }; 2131 2132 static ssize_t __spufs_ibox_info_read(struct spu_context *ctx, 2133 char __user *buf, size_t len, loff_t *pos) 2134 { 2135 u32 data; 2136 2137 /* EOF if there's no entry in the ibox */ 2138 if (!(ctx->csa.prob.mb_stat_R & 0xff0000)) 2139 return 0; 2140 2141 data = ctx->csa.priv2.puint_mb_R; 2142 2143 return simple_read_from_buffer(buf, len, pos, &data, sizeof data); 2144 } 2145 2146 static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf, 2147 size_t len, loff_t *pos) 2148 { 2149 struct spu_context *ctx = file->private_data; 2150 int ret; 2151 2152 if (!access_ok(VERIFY_WRITE, buf, len)) 2153 return -EFAULT; 2154 2155 ret = spu_acquire_saved(ctx); 2156 if (ret) 2157 return ret; 2158 spin_lock(&ctx->csa.register_lock); 2159 ret = __spufs_ibox_info_read(ctx, buf, len, pos); 2160 spin_unlock(&ctx->csa.register_lock); 2161 spu_release_saved(ctx); 2162 2163 return ret; 2164 } 2165 2166 static const struct file_operations spufs_ibox_info_fops = { 2167 .open = spufs_info_open, 2168 .read = spufs_ibox_info_read, 2169 .llseek = generic_file_llseek, 2170 }; 2171 2172 static ssize_t __spufs_wbox_info_read(struct spu_context *ctx, 2173 char __user *buf, size_t len, loff_t *pos) 2174 { 2175 int i, cnt; 2176 u32 data[4]; 2177 u32 wbox_stat; 2178 2179 wbox_stat = ctx->csa.prob.mb_stat_R; 2180 cnt = 4 - ((wbox_stat & 0x00ff00) >> 8); 2181 for (i = 0; i < cnt; i++) { 2182 data[i] = ctx->csa.spu_mailbox_data[i]; 2183 } 2184 2185 return simple_read_from_buffer(buf, len, pos, &data, 2186 cnt * sizeof(u32)); 2187 } 2188 2189 static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf, 2190 size_t len, loff_t *pos) 2191 { 2192 struct spu_context *ctx = file->private_data; 2193 int ret; 2194 2195 if (!access_ok(VERIFY_WRITE, buf, len)) 2196 return -EFAULT; 2197 2198 ret = spu_acquire_saved(ctx); 2199 if (ret) 2200 return ret; 2201 spin_lock(&ctx->csa.register_lock); 2202 ret = __spufs_wbox_info_read(ctx, buf, len, pos); 2203 spin_unlock(&ctx->csa.register_lock); 2204 spu_release_saved(ctx); 2205 2206 return ret; 2207 } 2208 2209 static const struct file_operations spufs_wbox_info_fops = { 2210 .open = spufs_info_open, 2211 .read = spufs_wbox_info_read, 2212 .llseek = generic_file_llseek, 2213 }; 2214 2215 static ssize_t __spufs_dma_info_read(struct spu_context *ctx, 2216 char __user *buf, size_t len, loff_t *pos) 2217 { 2218 struct spu_dma_info info; 2219 struct mfc_cq_sr *qp, *spuqp; 2220 int i; 2221 2222 info.dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW; 2223 info.dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0]; 2224 info.dma_info_status = ctx->csa.spu_chnldata_RW[24]; 2225 info.dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25]; 2226 info.dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27]; 2227 for (i = 0; i < 16; i++) { 2228 qp = &info.dma_info_command_data[i]; 2229 spuqp = &ctx->csa.priv2.spuq[i]; 2230 2231 qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW; 2232 qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW; 2233 qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW; 2234 qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW; 2235 } 2236 2237 return simple_read_from_buffer(buf, len, pos, &info, 2238 sizeof info); 2239 } 2240 2241 static ssize_t spufs_dma_info_read(struct file *file, char __user *buf, 2242 size_t len, loff_t *pos) 2243 { 2244 struct spu_context *ctx = file->private_data; 2245 int ret; 2246 2247 if (!access_ok(VERIFY_WRITE, buf, len)) 2248 return -EFAULT; 2249 2250 ret = spu_acquire_saved(ctx); 2251 if (ret) 2252 return ret; 2253 spin_lock(&ctx->csa.register_lock); 2254 ret = __spufs_dma_info_read(ctx, buf, len, pos); 2255 spin_unlock(&ctx->csa.register_lock); 2256 spu_release_saved(ctx); 2257 2258 return ret; 2259 } 2260 2261 static const struct file_operations spufs_dma_info_fops = { 2262 .open = spufs_info_open, 2263 .read = spufs_dma_info_read, 2264 .llseek = no_llseek, 2265 }; 2266 2267 static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx, 2268 char __user *buf, size_t len, loff_t *pos) 2269 { 2270 struct spu_proxydma_info info; 2271 struct mfc_cq_sr *qp, *puqp; 2272 int ret = sizeof info; 2273 int i; 2274 2275 if (len < ret) 2276 return -EINVAL; 2277 2278 if (!access_ok(VERIFY_WRITE, buf, len)) 2279 return -EFAULT; 2280 2281 info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW; 2282 info.proxydma_info_mask = ctx->csa.prob.dma_querymask_RW; 2283 info.proxydma_info_status = ctx->csa.prob.dma_tagstatus_R; 2284 for (i = 0; i < 8; i++) { 2285 qp = &info.proxydma_info_command_data[i]; 2286 puqp = &ctx->csa.priv2.puq[i]; 2287 2288 qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW; 2289 qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW; 2290 qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW; 2291 qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW; 2292 } 2293 2294 return simple_read_from_buffer(buf, len, pos, &info, 2295 sizeof info); 2296 } 2297 2298 static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf, 2299 size_t len, loff_t *pos) 2300 { 2301 struct spu_context *ctx = file->private_data; 2302 int ret; 2303 2304 ret = spu_acquire_saved(ctx); 2305 if (ret) 2306 return ret; 2307 spin_lock(&ctx->csa.register_lock); 2308 ret = __spufs_proxydma_info_read(ctx, buf, len, pos); 2309 spin_unlock(&ctx->csa.register_lock); 2310 spu_release_saved(ctx); 2311 2312 return ret; 2313 } 2314 2315 static const struct file_operations spufs_proxydma_info_fops = { 2316 .open = spufs_info_open, 2317 .read = spufs_proxydma_info_read, 2318 .llseek = no_llseek, 2319 }; 2320 2321 static int spufs_show_tid(struct seq_file *s, void *private) 2322 { 2323 struct spu_context *ctx = s->private; 2324 2325 seq_printf(s, "%d\n", ctx->tid); 2326 return 0; 2327 } 2328 2329 static int spufs_tid_open(struct inode *inode, struct file *file) 2330 { 2331 return single_open(file, spufs_show_tid, SPUFS_I(inode)->i_ctx); 2332 } 2333 2334 static const struct file_operations spufs_tid_fops = { 2335 .open = spufs_tid_open, 2336 .read = seq_read, 2337 .llseek = seq_lseek, 2338 .release = single_release, 2339 }; 2340 2341 static const char *ctx_state_names[] = { 2342 "user", "system", "iowait", "loaded" 2343 }; 2344 2345 static unsigned long long spufs_acct_time(struct spu_context *ctx, 2346 enum spu_utilization_state state) 2347 { 2348 struct timespec ts; 2349 unsigned long long time = ctx->stats.times[state]; 2350 2351 /* 2352 * In general, utilization statistics are updated by the controlling 2353 * thread as the spu context moves through various well defined 2354 * state transitions, but if the context is lazily loaded its 2355 * utilization statistics are not updated as the controlling thread 2356 * is not tightly coupled with the execution of the spu context. We 2357 * calculate and apply the time delta from the last recorded state 2358 * of the spu context. 2359 */ 2360 if (ctx->spu && ctx->stats.util_state == state) { 2361 ktime_get_ts(&ts); 2362 time += timespec_to_ns(&ts) - ctx->stats.tstamp; 2363 } 2364 2365 return time / NSEC_PER_MSEC; 2366 } 2367 2368 static unsigned long long spufs_slb_flts(struct spu_context *ctx) 2369 { 2370 unsigned long long slb_flts = ctx->stats.slb_flt; 2371 2372 if (ctx->state == SPU_STATE_RUNNABLE) { 2373 slb_flts += (ctx->spu->stats.slb_flt - 2374 ctx->stats.slb_flt_base); 2375 } 2376 2377 return slb_flts; 2378 } 2379 2380 static unsigned long long spufs_class2_intrs(struct spu_context *ctx) 2381 { 2382 unsigned long long class2_intrs = ctx->stats.class2_intr; 2383 2384 if (ctx->state == SPU_STATE_RUNNABLE) { 2385 class2_intrs += (ctx->spu->stats.class2_intr - 2386 ctx->stats.class2_intr_base); 2387 } 2388 2389 return class2_intrs; 2390 } 2391 2392 2393 static int spufs_show_stat(struct seq_file *s, void *private) 2394 { 2395 struct spu_context *ctx = s->private; 2396 int ret; 2397 2398 ret = spu_acquire(ctx); 2399 if (ret) 2400 return ret; 2401 2402 seq_printf(s, "%s %llu %llu %llu %llu " 2403 "%llu %llu %llu %llu %llu %llu %llu %llu\n", 2404 ctx_state_names[ctx->stats.util_state], 2405 spufs_acct_time(ctx, SPU_UTIL_USER), 2406 spufs_acct_time(ctx, SPU_UTIL_SYSTEM), 2407 spufs_acct_time(ctx, SPU_UTIL_IOWAIT), 2408 spufs_acct_time(ctx, SPU_UTIL_IDLE_LOADED), 2409 ctx->stats.vol_ctx_switch, 2410 ctx->stats.invol_ctx_switch, 2411 spufs_slb_flts(ctx), 2412 ctx->stats.hash_flt, 2413 ctx->stats.min_flt, 2414 ctx->stats.maj_flt, 2415 spufs_class2_intrs(ctx), 2416 ctx->stats.libassist); 2417 spu_release(ctx); 2418 return 0; 2419 } 2420 2421 static int spufs_stat_open(struct inode *inode, struct file *file) 2422 { 2423 return single_open(file, spufs_show_stat, SPUFS_I(inode)->i_ctx); 2424 } 2425 2426 static const struct file_operations spufs_stat_fops = { 2427 .open = spufs_stat_open, 2428 .read = seq_read, 2429 .llseek = seq_lseek, 2430 .release = single_release, 2431 }; 2432 2433 static inline int spufs_switch_log_used(struct spu_context *ctx) 2434 { 2435 return (ctx->switch_log->head - ctx->switch_log->tail) % 2436 SWITCH_LOG_BUFSIZE; 2437 } 2438 2439 static inline int spufs_switch_log_avail(struct spu_context *ctx) 2440 { 2441 return SWITCH_LOG_BUFSIZE - spufs_switch_log_used(ctx); 2442 } 2443 2444 static int spufs_switch_log_open(struct inode *inode, struct file *file) 2445 { 2446 struct spu_context *ctx = SPUFS_I(inode)->i_ctx; 2447 int rc; 2448 2449 rc = spu_acquire(ctx); 2450 if (rc) 2451 return rc; 2452 2453 if (ctx->switch_log) { 2454 rc = -EBUSY; 2455 goto out; 2456 } 2457 2458 ctx->switch_log = kmalloc(sizeof(struct switch_log) + 2459 SWITCH_LOG_BUFSIZE * sizeof(struct switch_log_entry), 2460 GFP_KERNEL); 2461 2462 if (!ctx->switch_log) { 2463 rc = -ENOMEM; 2464 goto out; 2465 } 2466 2467 ctx->switch_log->head = ctx->switch_log->tail = 0; 2468 init_waitqueue_head(&ctx->switch_log->wait); 2469 rc = 0; 2470 2471 out: 2472 spu_release(ctx); 2473 return rc; 2474 } 2475 2476 static int spufs_switch_log_release(struct inode *inode, struct file *file) 2477 { 2478 struct spu_context *ctx = SPUFS_I(inode)->i_ctx; 2479 int rc; 2480 2481 rc = spu_acquire(ctx); 2482 if (rc) 2483 return rc; 2484 2485 kfree(ctx->switch_log); 2486 ctx->switch_log = NULL; 2487 spu_release(ctx); 2488 2489 return 0; 2490 } 2491 2492 static int switch_log_sprint(struct spu_context *ctx, char *tbuf, int n) 2493 { 2494 struct switch_log_entry *p; 2495 2496 p = ctx->switch_log->log + ctx->switch_log->tail % SWITCH_LOG_BUFSIZE; 2497 2498 return snprintf(tbuf, n, "%u.%09u %d %u %u %llu\n", 2499 (unsigned int) p->tstamp.tv_sec, 2500 (unsigned int) p->tstamp.tv_nsec, 2501 p->spu_id, 2502 (unsigned int) p->type, 2503 (unsigned int) p->val, 2504 (unsigned long long) p->timebase); 2505 } 2506 2507 static ssize_t spufs_switch_log_read(struct file *file, char __user *buf, 2508 size_t len, loff_t *ppos) 2509 { 2510 struct inode *inode = file->f_path.dentry->d_inode; 2511 struct spu_context *ctx = SPUFS_I(inode)->i_ctx; 2512 int error = 0, cnt = 0; 2513 2514 if (!buf) 2515 return -EINVAL; 2516 2517 error = spu_acquire(ctx); 2518 if (error) 2519 return error; 2520 2521 while (cnt < len) { 2522 char tbuf[128]; 2523 int width; 2524 2525 if (spufs_switch_log_used(ctx) == 0) { 2526 if (cnt > 0) { 2527 /* If there's data ready to go, we can 2528 * just return straight away */ 2529 break; 2530 2531 } else if (file->f_flags & O_NONBLOCK) { 2532 error = -EAGAIN; 2533 break; 2534 2535 } else { 2536 /* spufs_wait will drop the mutex and 2537 * re-acquire, but since we're in read(), the 2538 * file cannot be _released (and so 2539 * ctx->switch_log is stable). 2540 */ 2541 error = spufs_wait(ctx->switch_log->wait, 2542 spufs_switch_log_used(ctx) > 0); 2543 2544 /* On error, spufs_wait returns without the 2545 * state mutex held */ 2546 if (error) 2547 return error; 2548 2549 /* We may have had entries read from underneath 2550 * us while we dropped the mutex in spufs_wait, 2551 * so re-check */ 2552 if (spufs_switch_log_used(ctx) == 0) 2553 continue; 2554 } 2555 } 2556 2557 width = switch_log_sprint(ctx, tbuf, sizeof(tbuf)); 2558 if (width < len) 2559 ctx->switch_log->tail = 2560 (ctx->switch_log->tail + 1) % 2561 SWITCH_LOG_BUFSIZE; 2562 else 2563 /* If the record is greater than space available return 2564 * partial buffer (so far) */ 2565 break; 2566 2567 error = copy_to_user(buf + cnt, tbuf, width); 2568 if (error) 2569 break; 2570 cnt += width; 2571 } 2572 2573 spu_release(ctx); 2574 2575 return cnt == 0 ? error : cnt; 2576 } 2577 2578 static unsigned int spufs_switch_log_poll(struct file *file, poll_table *wait) 2579 { 2580 struct inode *inode = file->f_path.dentry->d_inode; 2581 struct spu_context *ctx = SPUFS_I(inode)->i_ctx; 2582 unsigned int mask = 0; 2583 int rc; 2584 2585 poll_wait(file, &ctx->switch_log->wait, wait); 2586 2587 rc = spu_acquire(ctx); 2588 if (rc) 2589 return rc; 2590 2591 if (spufs_switch_log_used(ctx) > 0) 2592 mask |= POLLIN; 2593 2594 spu_release(ctx); 2595 2596 return mask; 2597 } 2598 2599 static const struct file_operations spufs_switch_log_fops = { 2600 .owner = THIS_MODULE, 2601 .open = spufs_switch_log_open, 2602 .read = spufs_switch_log_read, 2603 .poll = spufs_switch_log_poll, 2604 .release = spufs_switch_log_release, 2605 .llseek = no_llseek, 2606 }; 2607 2608 /** 2609 * Log a context switch event to a switch log reader. 2610 * 2611 * Must be called with ctx->state_mutex held. 2612 */ 2613 void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx, 2614 u32 type, u32 val) 2615 { 2616 if (!ctx->switch_log) 2617 return; 2618 2619 if (spufs_switch_log_avail(ctx) > 1) { 2620 struct switch_log_entry *p; 2621 2622 p = ctx->switch_log->log + ctx->switch_log->head; 2623 ktime_get_ts(&p->tstamp); 2624 p->timebase = get_tb(); 2625 p->spu_id = spu ? spu->number : -1; 2626 p->type = type; 2627 p->val = val; 2628 2629 ctx->switch_log->head = 2630 (ctx->switch_log->head + 1) % SWITCH_LOG_BUFSIZE; 2631 } 2632 2633 wake_up(&ctx->switch_log->wait); 2634 } 2635 2636 static int spufs_show_ctx(struct seq_file *s, void *private) 2637 { 2638 struct spu_context *ctx = s->private; 2639 u64 mfc_control_RW; 2640 2641 mutex_lock(&ctx->state_mutex); 2642 if (ctx->spu) { 2643 struct spu *spu = ctx->spu; 2644 struct spu_priv2 __iomem *priv2 = spu->priv2; 2645 2646 spin_lock_irq(&spu->register_lock); 2647 mfc_control_RW = in_be64(&priv2->mfc_control_RW); 2648 spin_unlock_irq(&spu->register_lock); 2649 } else { 2650 struct spu_state *csa = &ctx->csa; 2651 2652 mfc_control_RW = csa->priv2.mfc_control_RW; 2653 } 2654 2655 seq_printf(s, "%c flgs(%lx) sflgs(%lx) pri(%d) ts(%d) spu(%02d)" 2656 " %c %llx %llx %llx %llx %x %x\n", 2657 ctx->state == SPU_STATE_SAVED ? 'S' : 'R', 2658 ctx->flags, 2659 ctx->sched_flags, 2660 ctx->prio, 2661 ctx->time_slice, 2662 ctx->spu ? ctx->spu->number : -1, 2663 !list_empty(&ctx->rq) ? 'q' : ' ', 2664 ctx->csa.class_0_pending, 2665 ctx->csa.class_0_dar, 2666 ctx->csa.class_1_dsisr, 2667 mfc_control_RW, 2668 ctx->ops->runcntl_read(ctx), 2669 ctx->ops->status_read(ctx)); 2670 2671 mutex_unlock(&ctx->state_mutex); 2672 2673 return 0; 2674 } 2675 2676 static int spufs_ctx_open(struct inode *inode, struct file *file) 2677 { 2678 return single_open(file, spufs_show_ctx, SPUFS_I(inode)->i_ctx); 2679 } 2680 2681 static const struct file_operations spufs_ctx_fops = { 2682 .open = spufs_ctx_open, 2683 .read = seq_read, 2684 .llseek = seq_lseek, 2685 .release = single_release, 2686 }; 2687 2688 const struct spufs_tree_descr spufs_dir_contents[] = { 2689 { "capabilities", &spufs_caps_fops, 0444, }, 2690 { "mem", &spufs_mem_fops, 0666, LS_SIZE, }, 2691 { "regs", &spufs_regs_fops, 0666, sizeof(struct spu_reg128[128]), }, 2692 { "mbox", &spufs_mbox_fops, 0444, }, 2693 { "ibox", &spufs_ibox_fops, 0444, }, 2694 { "wbox", &spufs_wbox_fops, 0222, }, 2695 { "mbox_stat", &spufs_mbox_stat_fops, 0444, sizeof(u32), }, 2696 { "ibox_stat", &spufs_ibox_stat_fops, 0444, sizeof(u32), }, 2697 { "wbox_stat", &spufs_wbox_stat_fops, 0444, sizeof(u32), }, 2698 { "signal1", &spufs_signal1_fops, 0666, }, 2699 { "signal2", &spufs_signal2_fops, 0666, }, 2700 { "signal1_type", &spufs_signal1_type, 0666, }, 2701 { "signal2_type", &spufs_signal2_type, 0666, }, 2702 { "cntl", &spufs_cntl_fops, 0666, }, 2703 { "fpcr", &spufs_fpcr_fops, 0666, sizeof(struct spu_reg128), }, 2704 { "lslr", &spufs_lslr_ops, 0444, }, 2705 { "mfc", &spufs_mfc_fops, 0666, }, 2706 { "mss", &spufs_mss_fops, 0666, }, 2707 { "npc", &spufs_npc_ops, 0666, }, 2708 { "srr0", &spufs_srr0_ops, 0666, }, 2709 { "decr", &spufs_decr_ops, 0666, }, 2710 { "decr_status", &spufs_decr_status_ops, 0666, }, 2711 { "event_mask", &spufs_event_mask_ops, 0666, }, 2712 { "event_status", &spufs_event_status_ops, 0444, }, 2713 { "psmap", &spufs_psmap_fops, 0666, SPUFS_PS_MAP_SIZE, }, 2714 { "phys-id", &spufs_id_ops, 0666, }, 2715 { "object-id", &spufs_object_id_ops, 0666, }, 2716 { "mbox_info", &spufs_mbox_info_fops, 0444, sizeof(u32), }, 2717 { "ibox_info", &spufs_ibox_info_fops, 0444, sizeof(u32), }, 2718 { "wbox_info", &spufs_wbox_info_fops, 0444, sizeof(u32), }, 2719 { "dma_info", &spufs_dma_info_fops, 0444, 2720 sizeof(struct spu_dma_info), }, 2721 { "proxydma_info", &spufs_proxydma_info_fops, 0444, 2722 sizeof(struct spu_proxydma_info)}, 2723 { "tid", &spufs_tid_fops, 0444, }, 2724 { "stat", &spufs_stat_fops, 0444, }, 2725 { "switch_log", &spufs_switch_log_fops, 0444 }, 2726 {}, 2727 }; 2728 2729 const struct spufs_tree_descr spufs_dir_nosched_contents[] = { 2730 { "capabilities", &spufs_caps_fops, 0444, }, 2731 { "mem", &spufs_mem_fops, 0666, LS_SIZE, }, 2732 { "mbox", &spufs_mbox_fops, 0444, }, 2733 { "ibox", &spufs_ibox_fops, 0444, }, 2734 { "wbox", &spufs_wbox_fops, 0222, }, 2735 { "mbox_stat", &spufs_mbox_stat_fops, 0444, sizeof(u32), }, 2736 { "ibox_stat", &spufs_ibox_stat_fops, 0444, sizeof(u32), }, 2737 { "wbox_stat", &spufs_wbox_stat_fops, 0444, sizeof(u32), }, 2738 { "signal1", &spufs_signal1_nosched_fops, 0222, }, 2739 { "signal2", &spufs_signal2_nosched_fops, 0222, }, 2740 { "signal1_type", &spufs_signal1_type, 0666, }, 2741 { "signal2_type", &spufs_signal2_type, 0666, }, 2742 { "mss", &spufs_mss_fops, 0666, }, 2743 { "mfc", &spufs_mfc_fops, 0666, }, 2744 { "cntl", &spufs_cntl_fops, 0666, }, 2745 { "npc", &spufs_npc_ops, 0666, }, 2746 { "psmap", &spufs_psmap_fops, 0666, SPUFS_PS_MAP_SIZE, }, 2747 { "phys-id", &spufs_id_ops, 0666, }, 2748 { "object-id", &spufs_object_id_ops, 0666, }, 2749 { "tid", &spufs_tid_fops, 0444, }, 2750 { "stat", &spufs_stat_fops, 0444, }, 2751 {}, 2752 }; 2753 2754 const struct spufs_tree_descr spufs_dir_debug_contents[] = { 2755 { ".ctx", &spufs_ctx_fops, 0444, }, 2756 {}, 2757 }; 2758 2759 const struct spufs_coredump_reader spufs_coredump_read[] = { 2760 { "regs", __spufs_regs_read, NULL, sizeof(struct spu_reg128[128])}, 2761 { "fpcr", __spufs_fpcr_read, NULL, sizeof(struct spu_reg128) }, 2762 { "lslr", NULL, spufs_lslr_get, 19 }, 2763 { "decr", NULL, spufs_decr_get, 19 }, 2764 { "decr_status", NULL, spufs_decr_status_get, 19 }, 2765 { "mem", __spufs_mem_read, NULL, LS_SIZE, }, 2766 { "signal1", __spufs_signal1_read, NULL, sizeof(u32) }, 2767 { "signal1_type", NULL, spufs_signal1_type_get, 19 }, 2768 { "signal2", __spufs_signal2_read, NULL, sizeof(u32) }, 2769 { "signal2_type", NULL, spufs_signal2_type_get, 19 }, 2770 { "event_mask", NULL, spufs_event_mask_get, 19 }, 2771 { "event_status", NULL, spufs_event_status_get, 19 }, 2772 { "mbox_info", __spufs_mbox_info_read, NULL, sizeof(u32) }, 2773 { "ibox_info", __spufs_ibox_info_read, NULL, sizeof(u32) }, 2774 { "wbox_info", __spufs_wbox_info_read, NULL, 4 * sizeof(u32)}, 2775 { "dma_info", __spufs_dma_info_read, NULL, sizeof(struct spu_dma_info)}, 2776 { "proxydma_info", __spufs_proxydma_info_read, 2777 NULL, sizeof(struct spu_proxydma_info)}, 2778 { "object-id", NULL, spufs_object_id_get, 19 }, 2779 { "npc", NULL, spufs_npc_get, 19 }, 2780 { NULL }, 2781 }; 2782