1 /* 2 * SPU file system -- file contents 3 * 4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 5 * 6 * Author: Arnd Bergmann <arndb@de.ibm.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2, or (at your option) 11 * any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 */ 22 23 #undef DEBUG 24 25 #include <linux/fs.h> 26 #include <linux/ioctl.h> 27 #include <linux/export.h> 28 #include <linux/pagemap.h> 29 #include <linux/poll.h> 30 #include <linux/ptrace.h> 31 #include <linux/seq_file.h> 32 #include <linux/slab.h> 33 34 #include <asm/io.h> 35 #include <asm/time.h> 36 #include <asm/spu.h> 37 #include <asm/spu_info.h> 38 #include <linux/uaccess.h> 39 40 #include "spufs.h" 41 #include "sputrace.h" 42 43 #define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000) 44 45 /* Simple attribute files */ 46 struct spufs_attr { 47 int (*get)(void *, u64 *); 48 int (*set)(void *, u64); 49 char get_buf[24]; /* enough to store a u64 and "\n\0" */ 50 char set_buf[24]; 51 void *data; 52 const char *fmt; /* format for read operation */ 53 struct mutex mutex; /* protects access to these buffers */ 54 }; 55 56 static int spufs_attr_open(struct inode *inode, struct file *file, 57 int (*get)(void *, u64 *), int (*set)(void *, u64), 58 const char *fmt) 59 { 60 struct spufs_attr *attr; 61 62 attr = kmalloc(sizeof(*attr), GFP_KERNEL); 63 if (!attr) 64 return -ENOMEM; 65 66 attr->get = get; 67 attr->set = set; 68 attr->data = inode->i_private; 69 attr->fmt = fmt; 70 mutex_init(&attr->mutex); 71 file->private_data = attr; 72 73 return nonseekable_open(inode, file); 74 } 75 76 static int spufs_attr_release(struct inode *inode, struct file *file) 77 { 78 kfree(file->private_data); 79 return 0; 80 } 81 82 static ssize_t spufs_attr_read(struct file *file, char __user *buf, 83 size_t len, loff_t *ppos) 84 { 85 struct spufs_attr *attr; 86 size_t size; 87 ssize_t ret; 88 89 attr = file->private_data; 90 if (!attr->get) 91 return -EACCES; 92 93 ret = mutex_lock_interruptible(&attr->mutex); 94 if (ret) 95 return ret; 96 97 if (*ppos) { /* continued read */ 98 size = strlen(attr->get_buf); 99 } else { /* first read */ 100 u64 val; 101 ret = attr->get(attr->data, &val); 102 if (ret) 103 goto out; 104 105 size = scnprintf(attr->get_buf, sizeof(attr->get_buf), 106 attr->fmt, (unsigned long long)val); 107 } 108 109 ret = simple_read_from_buffer(buf, len, ppos, attr->get_buf, size); 110 out: 111 mutex_unlock(&attr->mutex); 112 return ret; 113 } 114 115 static ssize_t spufs_attr_write(struct file *file, const char __user *buf, 116 size_t len, loff_t *ppos) 117 { 118 struct spufs_attr *attr; 119 u64 val; 120 size_t size; 121 ssize_t ret; 122 123 attr = file->private_data; 124 if (!attr->set) 125 return -EACCES; 126 127 ret = mutex_lock_interruptible(&attr->mutex); 128 if (ret) 129 return ret; 130 131 ret = -EFAULT; 132 size = min(sizeof(attr->set_buf) - 1, len); 133 if (copy_from_user(attr->set_buf, buf, size)) 134 goto out; 135 136 ret = len; /* claim we got the whole input */ 137 attr->set_buf[size] = '\0'; 138 val = simple_strtol(attr->set_buf, NULL, 0); 139 attr->set(attr->data, val); 140 out: 141 mutex_unlock(&attr->mutex); 142 return ret; 143 } 144 145 #define DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \ 146 static int __fops ## _open(struct inode *inode, struct file *file) \ 147 { \ 148 __simple_attr_check_format(__fmt, 0ull); \ 149 return spufs_attr_open(inode, file, __get, __set, __fmt); \ 150 } \ 151 static const struct file_operations __fops = { \ 152 .open = __fops ## _open, \ 153 .release = spufs_attr_release, \ 154 .read = spufs_attr_read, \ 155 .write = spufs_attr_write, \ 156 .llseek = generic_file_llseek, \ 157 }; 158 159 160 static int 161 spufs_mem_open(struct inode *inode, struct file *file) 162 { 163 struct spufs_inode_info *i = SPUFS_I(inode); 164 struct spu_context *ctx = i->i_ctx; 165 166 mutex_lock(&ctx->mapping_lock); 167 file->private_data = ctx; 168 if (!i->i_openers++) 169 ctx->local_store = inode->i_mapping; 170 mutex_unlock(&ctx->mapping_lock); 171 return 0; 172 } 173 174 static int 175 spufs_mem_release(struct inode *inode, struct file *file) 176 { 177 struct spufs_inode_info *i = SPUFS_I(inode); 178 struct spu_context *ctx = i->i_ctx; 179 180 mutex_lock(&ctx->mapping_lock); 181 if (!--i->i_openers) 182 ctx->local_store = NULL; 183 mutex_unlock(&ctx->mapping_lock); 184 return 0; 185 } 186 187 static ssize_t 188 __spufs_mem_read(struct spu_context *ctx, char __user *buffer, 189 size_t size, loff_t *pos) 190 { 191 char *local_store = ctx->ops->get_ls(ctx); 192 return simple_read_from_buffer(buffer, size, pos, local_store, 193 LS_SIZE); 194 } 195 196 static ssize_t 197 spufs_mem_read(struct file *file, char __user *buffer, 198 size_t size, loff_t *pos) 199 { 200 struct spu_context *ctx = file->private_data; 201 ssize_t ret; 202 203 ret = spu_acquire(ctx); 204 if (ret) 205 return ret; 206 ret = __spufs_mem_read(ctx, buffer, size, pos); 207 spu_release(ctx); 208 209 return ret; 210 } 211 212 static ssize_t 213 spufs_mem_write(struct file *file, const char __user *buffer, 214 size_t size, loff_t *ppos) 215 { 216 struct spu_context *ctx = file->private_data; 217 char *local_store; 218 loff_t pos = *ppos; 219 int ret; 220 221 if (pos > LS_SIZE) 222 return -EFBIG; 223 224 ret = spu_acquire(ctx); 225 if (ret) 226 return ret; 227 228 local_store = ctx->ops->get_ls(ctx); 229 size = simple_write_to_buffer(local_store, LS_SIZE, ppos, buffer, size); 230 spu_release(ctx); 231 232 return size; 233 } 234 235 static int 236 spufs_mem_mmap_fault(struct vm_fault *vmf) 237 { 238 struct vm_area_struct *vma = vmf->vma; 239 struct spu_context *ctx = vma->vm_file->private_data; 240 unsigned long pfn, offset; 241 242 offset = vmf->pgoff << PAGE_SHIFT; 243 if (offset >= LS_SIZE) 244 return VM_FAULT_SIGBUS; 245 246 pr_debug("spufs_mem_mmap_fault address=0x%lx, offset=0x%lx\n", 247 vmf->address, offset); 248 249 if (spu_acquire(ctx)) 250 return VM_FAULT_NOPAGE; 251 252 if (ctx->state == SPU_STATE_SAVED) { 253 vma->vm_page_prot = pgprot_cached(vma->vm_page_prot); 254 pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset); 255 } else { 256 vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot); 257 pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT; 258 } 259 vm_insert_pfn(vma, vmf->address, pfn); 260 261 spu_release(ctx); 262 263 return VM_FAULT_NOPAGE; 264 } 265 266 static int spufs_mem_mmap_access(struct vm_area_struct *vma, 267 unsigned long address, 268 void *buf, int len, int write) 269 { 270 struct spu_context *ctx = vma->vm_file->private_data; 271 unsigned long offset = address - vma->vm_start; 272 char *local_store; 273 274 if (write && !(vma->vm_flags & VM_WRITE)) 275 return -EACCES; 276 if (spu_acquire(ctx)) 277 return -EINTR; 278 if ((offset + len) > vma->vm_end) 279 len = vma->vm_end - offset; 280 local_store = ctx->ops->get_ls(ctx); 281 if (write) 282 memcpy_toio(local_store + offset, buf, len); 283 else 284 memcpy_fromio(buf, local_store + offset, len); 285 spu_release(ctx); 286 return len; 287 } 288 289 static const struct vm_operations_struct spufs_mem_mmap_vmops = { 290 .fault = spufs_mem_mmap_fault, 291 .access = spufs_mem_mmap_access, 292 }; 293 294 static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma) 295 { 296 if (!(vma->vm_flags & VM_SHARED)) 297 return -EINVAL; 298 299 vma->vm_flags |= VM_IO | VM_PFNMAP; 300 vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot); 301 302 vma->vm_ops = &spufs_mem_mmap_vmops; 303 return 0; 304 } 305 306 static const struct file_operations spufs_mem_fops = { 307 .open = spufs_mem_open, 308 .release = spufs_mem_release, 309 .read = spufs_mem_read, 310 .write = spufs_mem_write, 311 .llseek = generic_file_llseek, 312 .mmap = spufs_mem_mmap, 313 }; 314 315 static int spufs_ps_fault(struct vm_fault *vmf, 316 unsigned long ps_offs, 317 unsigned long ps_size) 318 { 319 struct spu_context *ctx = vmf->vma->vm_file->private_data; 320 unsigned long area, offset = vmf->pgoff << PAGE_SHIFT; 321 int ret = 0; 322 323 spu_context_nospu_trace(spufs_ps_fault__enter, ctx); 324 325 if (offset >= ps_size) 326 return VM_FAULT_SIGBUS; 327 328 if (fatal_signal_pending(current)) 329 return VM_FAULT_SIGBUS; 330 331 /* 332 * Because we release the mmap_sem, the context may be destroyed while 333 * we're in spu_wait. Grab an extra reference so it isn't destroyed 334 * in the meantime. 335 */ 336 get_spu_context(ctx); 337 338 /* 339 * We have to wait for context to be loaded before we have 340 * pages to hand out to the user, but we don't want to wait 341 * with the mmap_sem held. 342 * It is possible to drop the mmap_sem here, but then we need 343 * to return VM_FAULT_NOPAGE because the mappings may have 344 * hanged. 345 */ 346 if (spu_acquire(ctx)) 347 goto refault; 348 349 if (ctx->state == SPU_STATE_SAVED) { 350 up_read(¤t->mm->mmap_sem); 351 spu_context_nospu_trace(spufs_ps_fault__sleep, ctx); 352 ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE); 353 spu_context_trace(spufs_ps_fault__wake, ctx, ctx->spu); 354 down_read(¤t->mm->mmap_sem); 355 } else { 356 area = ctx->spu->problem_phys + ps_offs; 357 vm_insert_pfn(vmf->vma, vmf->address, (area + offset) >> PAGE_SHIFT); 358 spu_context_trace(spufs_ps_fault__insert, ctx, ctx->spu); 359 } 360 361 if (!ret) 362 spu_release(ctx); 363 364 refault: 365 put_spu_context(ctx); 366 return VM_FAULT_NOPAGE; 367 } 368 369 #if SPUFS_MMAP_4K 370 static int spufs_cntl_mmap_fault(struct vm_fault *vmf) 371 { 372 return spufs_ps_fault(vmf, 0x4000, SPUFS_CNTL_MAP_SIZE); 373 } 374 375 static const struct vm_operations_struct spufs_cntl_mmap_vmops = { 376 .fault = spufs_cntl_mmap_fault, 377 }; 378 379 /* 380 * mmap support for problem state control area [0x4000 - 0x4fff]. 381 */ 382 static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma) 383 { 384 if (!(vma->vm_flags & VM_SHARED)) 385 return -EINVAL; 386 387 vma->vm_flags |= VM_IO | VM_PFNMAP; 388 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 389 390 vma->vm_ops = &spufs_cntl_mmap_vmops; 391 return 0; 392 } 393 #else /* SPUFS_MMAP_4K */ 394 #define spufs_cntl_mmap NULL 395 #endif /* !SPUFS_MMAP_4K */ 396 397 static int spufs_cntl_get(void *data, u64 *val) 398 { 399 struct spu_context *ctx = data; 400 int ret; 401 402 ret = spu_acquire(ctx); 403 if (ret) 404 return ret; 405 *val = ctx->ops->status_read(ctx); 406 spu_release(ctx); 407 408 return 0; 409 } 410 411 static int spufs_cntl_set(void *data, u64 val) 412 { 413 struct spu_context *ctx = data; 414 int ret; 415 416 ret = spu_acquire(ctx); 417 if (ret) 418 return ret; 419 ctx->ops->runcntl_write(ctx, val); 420 spu_release(ctx); 421 422 return 0; 423 } 424 425 static int spufs_cntl_open(struct inode *inode, struct file *file) 426 { 427 struct spufs_inode_info *i = SPUFS_I(inode); 428 struct spu_context *ctx = i->i_ctx; 429 430 mutex_lock(&ctx->mapping_lock); 431 file->private_data = ctx; 432 if (!i->i_openers++) 433 ctx->cntl = inode->i_mapping; 434 mutex_unlock(&ctx->mapping_lock); 435 return simple_attr_open(inode, file, spufs_cntl_get, 436 spufs_cntl_set, "0x%08lx"); 437 } 438 439 static int 440 spufs_cntl_release(struct inode *inode, struct file *file) 441 { 442 struct spufs_inode_info *i = SPUFS_I(inode); 443 struct spu_context *ctx = i->i_ctx; 444 445 simple_attr_release(inode, file); 446 447 mutex_lock(&ctx->mapping_lock); 448 if (!--i->i_openers) 449 ctx->cntl = NULL; 450 mutex_unlock(&ctx->mapping_lock); 451 return 0; 452 } 453 454 static const struct file_operations spufs_cntl_fops = { 455 .open = spufs_cntl_open, 456 .release = spufs_cntl_release, 457 .read = simple_attr_read, 458 .write = simple_attr_write, 459 .llseek = generic_file_llseek, 460 .mmap = spufs_cntl_mmap, 461 }; 462 463 static int 464 spufs_regs_open(struct inode *inode, struct file *file) 465 { 466 struct spufs_inode_info *i = SPUFS_I(inode); 467 file->private_data = i->i_ctx; 468 return 0; 469 } 470 471 static ssize_t 472 __spufs_regs_read(struct spu_context *ctx, char __user *buffer, 473 size_t size, loff_t *pos) 474 { 475 struct spu_lscsa *lscsa = ctx->csa.lscsa; 476 return simple_read_from_buffer(buffer, size, pos, 477 lscsa->gprs, sizeof lscsa->gprs); 478 } 479 480 static ssize_t 481 spufs_regs_read(struct file *file, char __user *buffer, 482 size_t size, loff_t *pos) 483 { 484 int ret; 485 struct spu_context *ctx = file->private_data; 486 487 /* pre-check for file position: if we'd return EOF, there's no point 488 * causing a deschedule */ 489 if (*pos >= sizeof(ctx->csa.lscsa->gprs)) 490 return 0; 491 492 ret = spu_acquire_saved(ctx); 493 if (ret) 494 return ret; 495 ret = __spufs_regs_read(ctx, buffer, size, pos); 496 spu_release_saved(ctx); 497 return ret; 498 } 499 500 static ssize_t 501 spufs_regs_write(struct file *file, const char __user *buffer, 502 size_t size, loff_t *pos) 503 { 504 struct spu_context *ctx = file->private_data; 505 struct spu_lscsa *lscsa = ctx->csa.lscsa; 506 int ret; 507 508 if (*pos >= sizeof(lscsa->gprs)) 509 return -EFBIG; 510 511 ret = spu_acquire_saved(ctx); 512 if (ret) 513 return ret; 514 515 size = simple_write_to_buffer(lscsa->gprs, sizeof(lscsa->gprs), pos, 516 buffer, size); 517 518 spu_release_saved(ctx); 519 return size; 520 } 521 522 static const struct file_operations spufs_regs_fops = { 523 .open = spufs_regs_open, 524 .read = spufs_regs_read, 525 .write = spufs_regs_write, 526 .llseek = generic_file_llseek, 527 }; 528 529 static ssize_t 530 __spufs_fpcr_read(struct spu_context *ctx, char __user * buffer, 531 size_t size, loff_t * pos) 532 { 533 struct spu_lscsa *lscsa = ctx->csa.lscsa; 534 return simple_read_from_buffer(buffer, size, pos, 535 &lscsa->fpcr, sizeof(lscsa->fpcr)); 536 } 537 538 static ssize_t 539 spufs_fpcr_read(struct file *file, char __user * buffer, 540 size_t size, loff_t * pos) 541 { 542 int ret; 543 struct spu_context *ctx = file->private_data; 544 545 ret = spu_acquire_saved(ctx); 546 if (ret) 547 return ret; 548 ret = __spufs_fpcr_read(ctx, buffer, size, pos); 549 spu_release_saved(ctx); 550 return ret; 551 } 552 553 static ssize_t 554 spufs_fpcr_write(struct file *file, const char __user * buffer, 555 size_t size, loff_t * pos) 556 { 557 struct spu_context *ctx = file->private_data; 558 struct spu_lscsa *lscsa = ctx->csa.lscsa; 559 int ret; 560 561 if (*pos >= sizeof(lscsa->fpcr)) 562 return -EFBIG; 563 564 ret = spu_acquire_saved(ctx); 565 if (ret) 566 return ret; 567 568 size = simple_write_to_buffer(&lscsa->fpcr, sizeof(lscsa->fpcr), pos, 569 buffer, size); 570 571 spu_release_saved(ctx); 572 return size; 573 } 574 575 static const struct file_operations spufs_fpcr_fops = { 576 .open = spufs_regs_open, 577 .read = spufs_fpcr_read, 578 .write = spufs_fpcr_write, 579 .llseek = generic_file_llseek, 580 }; 581 582 /* generic open function for all pipe-like files */ 583 static int spufs_pipe_open(struct inode *inode, struct file *file) 584 { 585 struct spufs_inode_info *i = SPUFS_I(inode); 586 file->private_data = i->i_ctx; 587 588 return nonseekable_open(inode, file); 589 } 590 591 /* 592 * Read as many bytes from the mailbox as possible, until 593 * one of the conditions becomes true: 594 * 595 * - no more data available in the mailbox 596 * - end of the user provided buffer 597 * - end of the mapped area 598 */ 599 static ssize_t spufs_mbox_read(struct file *file, char __user *buf, 600 size_t len, loff_t *pos) 601 { 602 struct spu_context *ctx = file->private_data; 603 u32 mbox_data, __user *udata; 604 ssize_t count; 605 606 if (len < 4) 607 return -EINVAL; 608 609 if (!access_ok(VERIFY_WRITE, buf, len)) 610 return -EFAULT; 611 612 udata = (void __user *)buf; 613 614 count = spu_acquire(ctx); 615 if (count) 616 return count; 617 618 for (count = 0; (count + 4) <= len; count += 4, udata++) { 619 int ret; 620 ret = ctx->ops->mbox_read(ctx, &mbox_data); 621 if (ret == 0) 622 break; 623 624 /* 625 * at the end of the mapped area, we can fault 626 * but still need to return the data we have 627 * read successfully so far. 628 */ 629 ret = __put_user(mbox_data, udata); 630 if (ret) { 631 if (!count) 632 count = -EFAULT; 633 break; 634 } 635 } 636 spu_release(ctx); 637 638 if (!count) 639 count = -EAGAIN; 640 641 return count; 642 } 643 644 static const struct file_operations spufs_mbox_fops = { 645 .open = spufs_pipe_open, 646 .read = spufs_mbox_read, 647 .llseek = no_llseek, 648 }; 649 650 static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf, 651 size_t len, loff_t *pos) 652 { 653 struct spu_context *ctx = file->private_data; 654 ssize_t ret; 655 u32 mbox_stat; 656 657 if (len < 4) 658 return -EINVAL; 659 660 ret = spu_acquire(ctx); 661 if (ret) 662 return ret; 663 664 mbox_stat = ctx->ops->mbox_stat_read(ctx) & 0xff; 665 666 spu_release(ctx); 667 668 if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat)) 669 return -EFAULT; 670 671 return 4; 672 } 673 674 static const struct file_operations spufs_mbox_stat_fops = { 675 .open = spufs_pipe_open, 676 .read = spufs_mbox_stat_read, 677 .llseek = no_llseek, 678 }; 679 680 /* low-level ibox access function */ 681 size_t spu_ibox_read(struct spu_context *ctx, u32 *data) 682 { 683 return ctx->ops->ibox_read(ctx, data); 684 } 685 686 static int spufs_ibox_fasync(int fd, struct file *file, int on) 687 { 688 struct spu_context *ctx = file->private_data; 689 690 return fasync_helper(fd, file, on, &ctx->ibox_fasync); 691 } 692 693 /* interrupt-level ibox callback function. */ 694 void spufs_ibox_callback(struct spu *spu) 695 { 696 struct spu_context *ctx = spu->ctx; 697 698 if (!ctx) 699 return; 700 701 wake_up_all(&ctx->ibox_wq); 702 kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN); 703 } 704 705 /* 706 * Read as many bytes from the interrupt mailbox as possible, until 707 * one of the conditions becomes true: 708 * 709 * - no more data available in the mailbox 710 * - end of the user provided buffer 711 * - end of the mapped area 712 * 713 * If the file is opened without O_NONBLOCK, we wait here until 714 * any data is available, but return when we have been able to 715 * read something. 716 */ 717 static ssize_t spufs_ibox_read(struct file *file, char __user *buf, 718 size_t len, loff_t *pos) 719 { 720 struct spu_context *ctx = file->private_data; 721 u32 ibox_data, __user *udata; 722 ssize_t count; 723 724 if (len < 4) 725 return -EINVAL; 726 727 if (!access_ok(VERIFY_WRITE, buf, len)) 728 return -EFAULT; 729 730 udata = (void __user *)buf; 731 732 count = spu_acquire(ctx); 733 if (count) 734 goto out; 735 736 /* wait only for the first element */ 737 count = 0; 738 if (file->f_flags & O_NONBLOCK) { 739 if (!spu_ibox_read(ctx, &ibox_data)) { 740 count = -EAGAIN; 741 goto out_unlock; 742 } 743 } else { 744 count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data)); 745 if (count) 746 goto out; 747 } 748 749 /* if we can't write at all, return -EFAULT */ 750 count = __put_user(ibox_data, udata); 751 if (count) 752 goto out_unlock; 753 754 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) { 755 int ret; 756 ret = ctx->ops->ibox_read(ctx, &ibox_data); 757 if (ret == 0) 758 break; 759 /* 760 * at the end of the mapped area, we can fault 761 * but still need to return the data we have 762 * read successfully so far. 763 */ 764 ret = __put_user(ibox_data, udata); 765 if (ret) 766 break; 767 } 768 769 out_unlock: 770 spu_release(ctx); 771 out: 772 return count; 773 } 774 775 static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait) 776 { 777 struct spu_context *ctx = file->private_data; 778 unsigned int mask; 779 780 poll_wait(file, &ctx->ibox_wq, wait); 781 782 /* 783 * For now keep this uninterruptible and also ignore the rule 784 * that poll should not sleep. Will be fixed later. 785 */ 786 mutex_lock(&ctx->state_mutex); 787 mask = ctx->ops->mbox_stat_poll(ctx, POLLIN | POLLRDNORM); 788 spu_release(ctx); 789 790 return mask; 791 } 792 793 static const struct file_operations spufs_ibox_fops = { 794 .open = spufs_pipe_open, 795 .read = spufs_ibox_read, 796 .poll = spufs_ibox_poll, 797 .fasync = spufs_ibox_fasync, 798 .llseek = no_llseek, 799 }; 800 801 static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf, 802 size_t len, loff_t *pos) 803 { 804 struct spu_context *ctx = file->private_data; 805 ssize_t ret; 806 u32 ibox_stat; 807 808 if (len < 4) 809 return -EINVAL; 810 811 ret = spu_acquire(ctx); 812 if (ret) 813 return ret; 814 ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff; 815 spu_release(ctx); 816 817 if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat)) 818 return -EFAULT; 819 820 return 4; 821 } 822 823 static const struct file_operations spufs_ibox_stat_fops = { 824 .open = spufs_pipe_open, 825 .read = spufs_ibox_stat_read, 826 .llseek = no_llseek, 827 }; 828 829 /* low-level mailbox write */ 830 size_t spu_wbox_write(struct spu_context *ctx, u32 data) 831 { 832 return ctx->ops->wbox_write(ctx, data); 833 } 834 835 static int spufs_wbox_fasync(int fd, struct file *file, int on) 836 { 837 struct spu_context *ctx = file->private_data; 838 int ret; 839 840 ret = fasync_helper(fd, file, on, &ctx->wbox_fasync); 841 842 return ret; 843 } 844 845 /* interrupt-level wbox callback function. */ 846 void spufs_wbox_callback(struct spu *spu) 847 { 848 struct spu_context *ctx = spu->ctx; 849 850 if (!ctx) 851 return; 852 853 wake_up_all(&ctx->wbox_wq); 854 kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT); 855 } 856 857 /* 858 * Write as many bytes to the interrupt mailbox as possible, until 859 * one of the conditions becomes true: 860 * 861 * - the mailbox is full 862 * - end of the user provided buffer 863 * - end of the mapped area 864 * 865 * If the file is opened without O_NONBLOCK, we wait here until 866 * space is available, but return when we have been able to 867 * write something. 868 */ 869 static ssize_t spufs_wbox_write(struct file *file, const char __user *buf, 870 size_t len, loff_t *pos) 871 { 872 struct spu_context *ctx = file->private_data; 873 u32 wbox_data, __user *udata; 874 ssize_t count; 875 876 if (len < 4) 877 return -EINVAL; 878 879 udata = (void __user *)buf; 880 if (!access_ok(VERIFY_READ, buf, len)) 881 return -EFAULT; 882 883 if (__get_user(wbox_data, udata)) 884 return -EFAULT; 885 886 count = spu_acquire(ctx); 887 if (count) 888 goto out; 889 890 /* 891 * make sure we can at least write one element, by waiting 892 * in case of !O_NONBLOCK 893 */ 894 count = 0; 895 if (file->f_flags & O_NONBLOCK) { 896 if (!spu_wbox_write(ctx, wbox_data)) { 897 count = -EAGAIN; 898 goto out_unlock; 899 } 900 } else { 901 count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data)); 902 if (count) 903 goto out; 904 } 905 906 907 /* write as much as possible */ 908 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) { 909 int ret; 910 ret = __get_user(wbox_data, udata); 911 if (ret) 912 break; 913 914 ret = spu_wbox_write(ctx, wbox_data); 915 if (ret == 0) 916 break; 917 } 918 919 out_unlock: 920 spu_release(ctx); 921 out: 922 return count; 923 } 924 925 static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait) 926 { 927 struct spu_context *ctx = file->private_data; 928 unsigned int mask; 929 930 poll_wait(file, &ctx->wbox_wq, wait); 931 932 /* 933 * For now keep this uninterruptible and also ignore the rule 934 * that poll should not sleep. Will be fixed later. 935 */ 936 mutex_lock(&ctx->state_mutex); 937 mask = ctx->ops->mbox_stat_poll(ctx, POLLOUT | POLLWRNORM); 938 spu_release(ctx); 939 940 return mask; 941 } 942 943 static const struct file_operations spufs_wbox_fops = { 944 .open = spufs_pipe_open, 945 .write = spufs_wbox_write, 946 .poll = spufs_wbox_poll, 947 .fasync = spufs_wbox_fasync, 948 .llseek = no_llseek, 949 }; 950 951 static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf, 952 size_t len, loff_t *pos) 953 { 954 struct spu_context *ctx = file->private_data; 955 ssize_t ret; 956 u32 wbox_stat; 957 958 if (len < 4) 959 return -EINVAL; 960 961 ret = spu_acquire(ctx); 962 if (ret) 963 return ret; 964 wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff; 965 spu_release(ctx); 966 967 if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat)) 968 return -EFAULT; 969 970 return 4; 971 } 972 973 static const struct file_operations spufs_wbox_stat_fops = { 974 .open = spufs_pipe_open, 975 .read = spufs_wbox_stat_read, 976 .llseek = no_llseek, 977 }; 978 979 static int spufs_signal1_open(struct inode *inode, struct file *file) 980 { 981 struct spufs_inode_info *i = SPUFS_I(inode); 982 struct spu_context *ctx = i->i_ctx; 983 984 mutex_lock(&ctx->mapping_lock); 985 file->private_data = ctx; 986 if (!i->i_openers++) 987 ctx->signal1 = inode->i_mapping; 988 mutex_unlock(&ctx->mapping_lock); 989 return nonseekable_open(inode, file); 990 } 991 992 static int 993 spufs_signal1_release(struct inode *inode, struct file *file) 994 { 995 struct spufs_inode_info *i = SPUFS_I(inode); 996 struct spu_context *ctx = i->i_ctx; 997 998 mutex_lock(&ctx->mapping_lock); 999 if (!--i->i_openers) 1000 ctx->signal1 = NULL; 1001 mutex_unlock(&ctx->mapping_lock); 1002 return 0; 1003 } 1004 1005 static ssize_t __spufs_signal1_read(struct spu_context *ctx, char __user *buf, 1006 size_t len, loff_t *pos) 1007 { 1008 int ret = 0; 1009 u32 data; 1010 1011 if (len < 4) 1012 return -EINVAL; 1013 1014 if (ctx->csa.spu_chnlcnt_RW[3]) { 1015 data = ctx->csa.spu_chnldata_RW[3]; 1016 ret = 4; 1017 } 1018 1019 if (!ret) 1020 goto out; 1021 1022 if (copy_to_user(buf, &data, 4)) 1023 return -EFAULT; 1024 1025 out: 1026 return ret; 1027 } 1028 1029 static ssize_t spufs_signal1_read(struct file *file, char __user *buf, 1030 size_t len, loff_t *pos) 1031 { 1032 int ret; 1033 struct spu_context *ctx = file->private_data; 1034 1035 ret = spu_acquire_saved(ctx); 1036 if (ret) 1037 return ret; 1038 ret = __spufs_signal1_read(ctx, buf, len, pos); 1039 spu_release_saved(ctx); 1040 1041 return ret; 1042 } 1043 1044 static ssize_t spufs_signal1_write(struct file *file, const char __user *buf, 1045 size_t len, loff_t *pos) 1046 { 1047 struct spu_context *ctx; 1048 ssize_t ret; 1049 u32 data; 1050 1051 ctx = file->private_data; 1052 1053 if (len < 4) 1054 return -EINVAL; 1055 1056 if (copy_from_user(&data, buf, 4)) 1057 return -EFAULT; 1058 1059 ret = spu_acquire(ctx); 1060 if (ret) 1061 return ret; 1062 ctx->ops->signal1_write(ctx, data); 1063 spu_release(ctx); 1064 1065 return 4; 1066 } 1067 1068 static int 1069 spufs_signal1_mmap_fault(struct vm_fault *vmf) 1070 { 1071 #if SPUFS_SIGNAL_MAP_SIZE == 0x1000 1072 return spufs_ps_fault(vmf, 0x14000, SPUFS_SIGNAL_MAP_SIZE); 1073 #elif SPUFS_SIGNAL_MAP_SIZE == 0x10000 1074 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole 1075 * signal 1 and 2 area 1076 */ 1077 return spufs_ps_fault(vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE); 1078 #else 1079 #error unsupported page size 1080 #endif 1081 } 1082 1083 static const struct vm_operations_struct spufs_signal1_mmap_vmops = { 1084 .fault = spufs_signal1_mmap_fault, 1085 }; 1086 1087 static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma) 1088 { 1089 if (!(vma->vm_flags & VM_SHARED)) 1090 return -EINVAL; 1091 1092 vma->vm_flags |= VM_IO | VM_PFNMAP; 1093 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1094 1095 vma->vm_ops = &spufs_signal1_mmap_vmops; 1096 return 0; 1097 } 1098 1099 static const struct file_operations spufs_signal1_fops = { 1100 .open = spufs_signal1_open, 1101 .release = spufs_signal1_release, 1102 .read = spufs_signal1_read, 1103 .write = spufs_signal1_write, 1104 .mmap = spufs_signal1_mmap, 1105 .llseek = no_llseek, 1106 }; 1107 1108 static const struct file_operations spufs_signal1_nosched_fops = { 1109 .open = spufs_signal1_open, 1110 .release = spufs_signal1_release, 1111 .write = spufs_signal1_write, 1112 .mmap = spufs_signal1_mmap, 1113 .llseek = no_llseek, 1114 }; 1115 1116 static int spufs_signal2_open(struct inode *inode, struct file *file) 1117 { 1118 struct spufs_inode_info *i = SPUFS_I(inode); 1119 struct spu_context *ctx = i->i_ctx; 1120 1121 mutex_lock(&ctx->mapping_lock); 1122 file->private_data = ctx; 1123 if (!i->i_openers++) 1124 ctx->signal2 = inode->i_mapping; 1125 mutex_unlock(&ctx->mapping_lock); 1126 return nonseekable_open(inode, file); 1127 } 1128 1129 static int 1130 spufs_signal2_release(struct inode *inode, struct file *file) 1131 { 1132 struct spufs_inode_info *i = SPUFS_I(inode); 1133 struct spu_context *ctx = i->i_ctx; 1134 1135 mutex_lock(&ctx->mapping_lock); 1136 if (!--i->i_openers) 1137 ctx->signal2 = NULL; 1138 mutex_unlock(&ctx->mapping_lock); 1139 return 0; 1140 } 1141 1142 static ssize_t __spufs_signal2_read(struct spu_context *ctx, char __user *buf, 1143 size_t len, loff_t *pos) 1144 { 1145 int ret = 0; 1146 u32 data; 1147 1148 if (len < 4) 1149 return -EINVAL; 1150 1151 if (ctx->csa.spu_chnlcnt_RW[4]) { 1152 data = ctx->csa.spu_chnldata_RW[4]; 1153 ret = 4; 1154 } 1155 1156 if (!ret) 1157 goto out; 1158 1159 if (copy_to_user(buf, &data, 4)) 1160 return -EFAULT; 1161 1162 out: 1163 return ret; 1164 } 1165 1166 static ssize_t spufs_signal2_read(struct file *file, char __user *buf, 1167 size_t len, loff_t *pos) 1168 { 1169 struct spu_context *ctx = file->private_data; 1170 int ret; 1171 1172 ret = spu_acquire_saved(ctx); 1173 if (ret) 1174 return ret; 1175 ret = __spufs_signal2_read(ctx, buf, len, pos); 1176 spu_release_saved(ctx); 1177 1178 return ret; 1179 } 1180 1181 static ssize_t spufs_signal2_write(struct file *file, const char __user *buf, 1182 size_t len, loff_t *pos) 1183 { 1184 struct spu_context *ctx; 1185 ssize_t ret; 1186 u32 data; 1187 1188 ctx = file->private_data; 1189 1190 if (len < 4) 1191 return -EINVAL; 1192 1193 if (copy_from_user(&data, buf, 4)) 1194 return -EFAULT; 1195 1196 ret = spu_acquire(ctx); 1197 if (ret) 1198 return ret; 1199 ctx->ops->signal2_write(ctx, data); 1200 spu_release(ctx); 1201 1202 return 4; 1203 } 1204 1205 #if SPUFS_MMAP_4K 1206 static int 1207 spufs_signal2_mmap_fault(struct vm_fault *vmf) 1208 { 1209 #if SPUFS_SIGNAL_MAP_SIZE == 0x1000 1210 return spufs_ps_fault(vmf, 0x1c000, SPUFS_SIGNAL_MAP_SIZE); 1211 #elif SPUFS_SIGNAL_MAP_SIZE == 0x10000 1212 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole 1213 * signal 1 and 2 area 1214 */ 1215 return spufs_ps_fault(vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE); 1216 #else 1217 #error unsupported page size 1218 #endif 1219 } 1220 1221 static const struct vm_operations_struct spufs_signal2_mmap_vmops = { 1222 .fault = spufs_signal2_mmap_fault, 1223 }; 1224 1225 static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma) 1226 { 1227 if (!(vma->vm_flags & VM_SHARED)) 1228 return -EINVAL; 1229 1230 vma->vm_flags |= VM_IO | VM_PFNMAP; 1231 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1232 1233 vma->vm_ops = &spufs_signal2_mmap_vmops; 1234 return 0; 1235 } 1236 #else /* SPUFS_MMAP_4K */ 1237 #define spufs_signal2_mmap NULL 1238 #endif /* !SPUFS_MMAP_4K */ 1239 1240 static const struct file_operations spufs_signal2_fops = { 1241 .open = spufs_signal2_open, 1242 .release = spufs_signal2_release, 1243 .read = spufs_signal2_read, 1244 .write = spufs_signal2_write, 1245 .mmap = spufs_signal2_mmap, 1246 .llseek = no_llseek, 1247 }; 1248 1249 static const struct file_operations spufs_signal2_nosched_fops = { 1250 .open = spufs_signal2_open, 1251 .release = spufs_signal2_release, 1252 .write = spufs_signal2_write, 1253 .mmap = spufs_signal2_mmap, 1254 .llseek = no_llseek, 1255 }; 1256 1257 /* 1258 * This is a wrapper around DEFINE_SIMPLE_ATTRIBUTE which does the 1259 * work of acquiring (or not) the SPU context before calling through 1260 * to the actual get routine. The set routine is called directly. 1261 */ 1262 #define SPU_ATTR_NOACQUIRE 0 1263 #define SPU_ATTR_ACQUIRE 1 1264 #define SPU_ATTR_ACQUIRE_SAVED 2 1265 1266 #define DEFINE_SPUFS_ATTRIBUTE(__name, __get, __set, __fmt, __acquire) \ 1267 static int __##__get(void *data, u64 *val) \ 1268 { \ 1269 struct spu_context *ctx = data; \ 1270 int ret = 0; \ 1271 \ 1272 if (__acquire == SPU_ATTR_ACQUIRE) { \ 1273 ret = spu_acquire(ctx); \ 1274 if (ret) \ 1275 return ret; \ 1276 *val = __get(ctx); \ 1277 spu_release(ctx); \ 1278 } else if (__acquire == SPU_ATTR_ACQUIRE_SAVED) { \ 1279 ret = spu_acquire_saved(ctx); \ 1280 if (ret) \ 1281 return ret; \ 1282 *val = __get(ctx); \ 1283 spu_release_saved(ctx); \ 1284 } else \ 1285 *val = __get(ctx); \ 1286 \ 1287 return 0; \ 1288 } \ 1289 DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__name, __##__get, __set, __fmt); 1290 1291 static int spufs_signal1_type_set(void *data, u64 val) 1292 { 1293 struct spu_context *ctx = data; 1294 int ret; 1295 1296 ret = spu_acquire(ctx); 1297 if (ret) 1298 return ret; 1299 ctx->ops->signal1_type_set(ctx, val); 1300 spu_release(ctx); 1301 1302 return 0; 1303 } 1304 1305 static u64 spufs_signal1_type_get(struct spu_context *ctx) 1306 { 1307 return ctx->ops->signal1_type_get(ctx); 1308 } 1309 DEFINE_SPUFS_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get, 1310 spufs_signal1_type_set, "%llu\n", SPU_ATTR_ACQUIRE); 1311 1312 1313 static int spufs_signal2_type_set(void *data, u64 val) 1314 { 1315 struct spu_context *ctx = data; 1316 int ret; 1317 1318 ret = spu_acquire(ctx); 1319 if (ret) 1320 return ret; 1321 ctx->ops->signal2_type_set(ctx, val); 1322 spu_release(ctx); 1323 1324 return 0; 1325 } 1326 1327 static u64 spufs_signal2_type_get(struct spu_context *ctx) 1328 { 1329 return ctx->ops->signal2_type_get(ctx); 1330 } 1331 DEFINE_SPUFS_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get, 1332 spufs_signal2_type_set, "%llu\n", SPU_ATTR_ACQUIRE); 1333 1334 #if SPUFS_MMAP_4K 1335 static int 1336 spufs_mss_mmap_fault(struct vm_fault *vmf) 1337 { 1338 return spufs_ps_fault(vmf, 0x0000, SPUFS_MSS_MAP_SIZE); 1339 } 1340 1341 static const struct vm_operations_struct spufs_mss_mmap_vmops = { 1342 .fault = spufs_mss_mmap_fault, 1343 }; 1344 1345 /* 1346 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff]. 1347 */ 1348 static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma) 1349 { 1350 if (!(vma->vm_flags & VM_SHARED)) 1351 return -EINVAL; 1352 1353 vma->vm_flags |= VM_IO | VM_PFNMAP; 1354 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1355 1356 vma->vm_ops = &spufs_mss_mmap_vmops; 1357 return 0; 1358 } 1359 #else /* SPUFS_MMAP_4K */ 1360 #define spufs_mss_mmap NULL 1361 #endif /* !SPUFS_MMAP_4K */ 1362 1363 static int spufs_mss_open(struct inode *inode, struct file *file) 1364 { 1365 struct spufs_inode_info *i = SPUFS_I(inode); 1366 struct spu_context *ctx = i->i_ctx; 1367 1368 file->private_data = i->i_ctx; 1369 1370 mutex_lock(&ctx->mapping_lock); 1371 if (!i->i_openers++) 1372 ctx->mss = inode->i_mapping; 1373 mutex_unlock(&ctx->mapping_lock); 1374 return nonseekable_open(inode, file); 1375 } 1376 1377 static int 1378 spufs_mss_release(struct inode *inode, struct file *file) 1379 { 1380 struct spufs_inode_info *i = SPUFS_I(inode); 1381 struct spu_context *ctx = i->i_ctx; 1382 1383 mutex_lock(&ctx->mapping_lock); 1384 if (!--i->i_openers) 1385 ctx->mss = NULL; 1386 mutex_unlock(&ctx->mapping_lock); 1387 return 0; 1388 } 1389 1390 static const struct file_operations spufs_mss_fops = { 1391 .open = spufs_mss_open, 1392 .release = spufs_mss_release, 1393 .mmap = spufs_mss_mmap, 1394 .llseek = no_llseek, 1395 }; 1396 1397 static int 1398 spufs_psmap_mmap_fault(struct vm_fault *vmf) 1399 { 1400 return spufs_ps_fault(vmf, 0x0000, SPUFS_PS_MAP_SIZE); 1401 } 1402 1403 static const struct vm_operations_struct spufs_psmap_mmap_vmops = { 1404 .fault = spufs_psmap_mmap_fault, 1405 }; 1406 1407 /* 1408 * mmap support for full problem state area [0x00000 - 0x1ffff]. 1409 */ 1410 static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma) 1411 { 1412 if (!(vma->vm_flags & VM_SHARED)) 1413 return -EINVAL; 1414 1415 vma->vm_flags |= VM_IO | VM_PFNMAP; 1416 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1417 1418 vma->vm_ops = &spufs_psmap_mmap_vmops; 1419 return 0; 1420 } 1421 1422 static int spufs_psmap_open(struct inode *inode, struct file *file) 1423 { 1424 struct spufs_inode_info *i = SPUFS_I(inode); 1425 struct spu_context *ctx = i->i_ctx; 1426 1427 mutex_lock(&ctx->mapping_lock); 1428 file->private_data = i->i_ctx; 1429 if (!i->i_openers++) 1430 ctx->psmap = inode->i_mapping; 1431 mutex_unlock(&ctx->mapping_lock); 1432 return nonseekable_open(inode, file); 1433 } 1434 1435 static int 1436 spufs_psmap_release(struct inode *inode, struct file *file) 1437 { 1438 struct spufs_inode_info *i = SPUFS_I(inode); 1439 struct spu_context *ctx = i->i_ctx; 1440 1441 mutex_lock(&ctx->mapping_lock); 1442 if (!--i->i_openers) 1443 ctx->psmap = NULL; 1444 mutex_unlock(&ctx->mapping_lock); 1445 return 0; 1446 } 1447 1448 static const struct file_operations spufs_psmap_fops = { 1449 .open = spufs_psmap_open, 1450 .release = spufs_psmap_release, 1451 .mmap = spufs_psmap_mmap, 1452 .llseek = no_llseek, 1453 }; 1454 1455 1456 #if SPUFS_MMAP_4K 1457 static int 1458 spufs_mfc_mmap_fault(struct vm_fault *vmf) 1459 { 1460 return spufs_ps_fault(vmf, 0x3000, SPUFS_MFC_MAP_SIZE); 1461 } 1462 1463 static const struct vm_operations_struct spufs_mfc_mmap_vmops = { 1464 .fault = spufs_mfc_mmap_fault, 1465 }; 1466 1467 /* 1468 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff]. 1469 */ 1470 static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma) 1471 { 1472 if (!(vma->vm_flags & VM_SHARED)) 1473 return -EINVAL; 1474 1475 vma->vm_flags |= VM_IO | VM_PFNMAP; 1476 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1477 1478 vma->vm_ops = &spufs_mfc_mmap_vmops; 1479 return 0; 1480 } 1481 #else /* SPUFS_MMAP_4K */ 1482 #define spufs_mfc_mmap NULL 1483 #endif /* !SPUFS_MMAP_4K */ 1484 1485 static int spufs_mfc_open(struct inode *inode, struct file *file) 1486 { 1487 struct spufs_inode_info *i = SPUFS_I(inode); 1488 struct spu_context *ctx = i->i_ctx; 1489 1490 /* we don't want to deal with DMA into other processes */ 1491 if (ctx->owner != current->mm) 1492 return -EINVAL; 1493 1494 if (atomic_read(&inode->i_count) != 1) 1495 return -EBUSY; 1496 1497 mutex_lock(&ctx->mapping_lock); 1498 file->private_data = ctx; 1499 if (!i->i_openers++) 1500 ctx->mfc = inode->i_mapping; 1501 mutex_unlock(&ctx->mapping_lock); 1502 return nonseekable_open(inode, file); 1503 } 1504 1505 static int 1506 spufs_mfc_release(struct inode *inode, struct file *file) 1507 { 1508 struct spufs_inode_info *i = SPUFS_I(inode); 1509 struct spu_context *ctx = i->i_ctx; 1510 1511 mutex_lock(&ctx->mapping_lock); 1512 if (!--i->i_openers) 1513 ctx->mfc = NULL; 1514 mutex_unlock(&ctx->mapping_lock); 1515 return 0; 1516 } 1517 1518 /* interrupt-level mfc callback function. */ 1519 void spufs_mfc_callback(struct spu *spu) 1520 { 1521 struct spu_context *ctx = spu->ctx; 1522 1523 if (!ctx) 1524 return; 1525 1526 wake_up_all(&ctx->mfc_wq); 1527 1528 pr_debug("%s %s\n", __func__, spu->name); 1529 if (ctx->mfc_fasync) { 1530 u32 free_elements, tagstatus; 1531 unsigned int mask; 1532 1533 /* no need for spu_acquire in interrupt context */ 1534 free_elements = ctx->ops->get_mfc_free_elements(ctx); 1535 tagstatus = ctx->ops->read_mfc_tagstatus(ctx); 1536 1537 mask = 0; 1538 if (free_elements & 0xffff) 1539 mask |= POLLOUT; 1540 if (tagstatus & ctx->tagwait) 1541 mask |= POLLIN; 1542 1543 kill_fasync(&ctx->mfc_fasync, SIGIO, mask); 1544 } 1545 } 1546 1547 static int spufs_read_mfc_tagstatus(struct spu_context *ctx, u32 *status) 1548 { 1549 /* See if there is one tag group is complete */ 1550 /* FIXME we need locking around tagwait */ 1551 *status = ctx->ops->read_mfc_tagstatus(ctx) & ctx->tagwait; 1552 ctx->tagwait &= ~*status; 1553 if (*status) 1554 return 1; 1555 1556 /* enable interrupt waiting for any tag group, 1557 may silently fail if interrupts are already enabled */ 1558 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1); 1559 return 0; 1560 } 1561 1562 static ssize_t spufs_mfc_read(struct file *file, char __user *buffer, 1563 size_t size, loff_t *pos) 1564 { 1565 struct spu_context *ctx = file->private_data; 1566 int ret = -EINVAL; 1567 u32 status; 1568 1569 if (size != 4) 1570 goto out; 1571 1572 ret = spu_acquire(ctx); 1573 if (ret) 1574 return ret; 1575 1576 ret = -EINVAL; 1577 if (file->f_flags & O_NONBLOCK) { 1578 status = ctx->ops->read_mfc_tagstatus(ctx); 1579 if (!(status & ctx->tagwait)) 1580 ret = -EAGAIN; 1581 else 1582 /* XXX(hch): shouldn't we clear ret here? */ 1583 ctx->tagwait &= ~status; 1584 } else { 1585 ret = spufs_wait(ctx->mfc_wq, 1586 spufs_read_mfc_tagstatus(ctx, &status)); 1587 if (ret) 1588 goto out; 1589 } 1590 spu_release(ctx); 1591 1592 ret = 4; 1593 if (copy_to_user(buffer, &status, 4)) 1594 ret = -EFAULT; 1595 1596 out: 1597 return ret; 1598 } 1599 1600 static int spufs_check_valid_dma(struct mfc_dma_command *cmd) 1601 { 1602 pr_debug("queueing DMA %x %llx %x %x %x\n", cmd->lsa, 1603 cmd->ea, cmd->size, cmd->tag, cmd->cmd); 1604 1605 switch (cmd->cmd) { 1606 case MFC_PUT_CMD: 1607 case MFC_PUTF_CMD: 1608 case MFC_PUTB_CMD: 1609 case MFC_GET_CMD: 1610 case MFC_GETF_CMD: 1611 case MFC_GETB_CMD: 1612 break; 1613 default: 1614 pr_debug("invalid DMA opcode %x\n", cmd->cmd); 1615 return -EIO; 1616 } 1617 1618 if ((cmd->lsa & 0xf) != (cmd->ea &0xf)) { 1619 pr_debug("invalid DMA alignment, ea %llx lsa %x\n", 1620 cmd->ea, cmd->lsa); 1621 return -EIO; 1622 } 1623 1624 switch (cmd->size & 0xf) { 1625 case 1: 1626 break; 1627 case 2: 1628 if (cmd->lsa & 1) 1629 goto error; 1630 break; 1631 case 4: 1632 if (cmd->lsa & 3) 1633 goto error; 1634 break; 1635 case 8: 1636 if (cmd->lsa & 7) 1637 goto error; 1638 break; 1639 case 0: 1640 if (cmd->lsa & 15) 1641 goto error; 1642 break; 1643 error: 1644 default: 1645 pr_debug("invalid DMA alignment %x for size %x\n", 1646 cmd->lsa & 0xf, cmd->size); 1647 return -EIO; 1648 } 1649 1650 if (cmd->size > 16 * 1024) { 1651 pr_debug("invalid DMA size %x\n", cmd->size); 1652 return -EIO; 1653 } 1654 1655 if (cmd->tag & 0xfff0) { 1656 /* we reserve the higher tag numbers for kernel use */ 1657 pr_debug("invalid DMA tag\n"); 1658 return -EIO; 1659 } 1660 1661 if (cmd->class) { 1662 /* not supported in this version */ 1663 pr_debug("invalid DMA class\n"); 1664 return -EIO; 1665 } 1666 1667 return 0; 1668 } 1669 1670 static int spu_send_mfc_command(struct spu_context *ctx, 1671 struct mfc_dma_command cmd, 1672 int *error) 1673 { 1674 *error = ctx->ops->send_mfc_command(ctx, &cmd); 1675 if (*error == -EAGAIN) { 1676 /* wait for any tag group to complete 1677 so we have space for the new command */ 1678 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1); 1679 /* try again, because the queue might be 1680 empty again */ 1681 *error = ctx->ops->send_mfc_command(ctx, &cmd); 1682 if (*error == -EAGAIN) 1683 return 0; 1684 } 1685 return 1; 1686 } 1687 1688 static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer, 1689 size_t size, loff_t *pos) 1690 { 1691 struct spu_context *ctx = file->private_data; 1692 struct mfc_dma_command cmd; 1693 int ret = -EINVAL; 1694 1695 if (size != sizeof cmd) 1696 goto out; 1697 1698 ret = -EFAULT; 1699 if (copy_from_user(&cmd, buffer, sizeof cmd)) 1700 goto out; 1701 1702 ret = spufs_check_valid_dma(&cmd); 1703 if (ret) 1704 goto out; 1705 1706 ret = spu_acquire(ctx); 1707 if (ret) 1708 goto out; 1709 1710 ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE); 1711 if (ret) 1712 goto out; 1713 1714 if (file->f_flags & O_NONBLOCK) { 1715 ret = ctx->ops->send_mfc_command(ctx, &cmd); 1716 } else { 1717 int status; 1718 ret = spufs_wait(ctx->mfc_wq, 1719 spu_send_mfc_command(ctx, cmd, &status)); 1720 if (ret) 1721 goto out; 1722 if (status) 1723 ret = status; 1724 } 1725 1726 if (ret) 1727 goto out_unlock; 1728 1729 ctx->tagwait |= 1 << cmd.tag; 1730 ret = size; 1731 1732 out_unlock: 1733 spu_release(ctx); 1734 out: 1735 return ret; 1736 } 1737 1738 static unsigned int spufs_mfc_poll(struct file *file,poll_table *wait) 1739 { 1740 struct spu_context *ctx = file->private_data; 1741 u32 free_elements, tagstatus; 1742 unsigned int mask; 1743 1744 poll_wait(file, &ctx->mfc_wq, wait); 1745 1746 /* 1747 * For now keep this uninterruptible and also ignore the rule 1748 * that poll should not sleep. Will be fixed later. 1749 */ 1750 mutex_lock(&ctx->state_mutex); 1751 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2); 1752 free_elements = ctx->ops->get_mfc_free_elements(ctx); 1753 tagstatus = ctx->ops->read_mfc_tagstatus(ctx); 1754 spu_release(ctx); 1755 1756 mask = 0; 1757 if (free_elements & 0xffff) 1758 mask |= POLLOUT | POLLWRNORM; 1759 if (tagstatus & ctx->tagwait) 1760 mask |= POLLIN | POLLRDNORM; 1761 1762 pr_debug("%s: free %d tagstatus %d tagwait %d\n", __func__, 1763 free_elements, tagstatus, ctx->tagwait); 1764 1765 return mask; 1766 } 1767 1768 static int spufs_mfc_flush(struct file *file, fl_owner_t id) 1769 { 1770 struct spu_context *ctx = file->private_data; 1771 int ret; 1772 1773 ret = spu_acquire(ctx); 1774 if (ret) 1775 goto out; 1776 #if 0 1777 /* this currently hangs */ 1778 ret = spufs_wait(ctx->mfc_wq, 1779 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2)); 1780 if (ret) 1781 goto out; 1782 ret = spufs_wait(ctx->mfc_wq, 1783 ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait); 1784 if (ret) 1785 goto out; 1786 #else 1787 ret = 0; 1788 #endif 1789 spu_release(ctx); 1790 out: 1791 return ret; 1792 } 1793 1794 static int spufs_mfc_fsync(struct file *file, loff_t start, loff_t end, int datasync) 1795 { 1796 struct inode *inode = file_inode(file); 1797 int err = filemap_write_and_wait_range(inode->i_mapping, start, end); 1798 if (!err) { 1799 inode_lock(inode); 1800 err = spufs_mfc_flush(file, NULL); 1801 inode_unlock(inode); 1802 } 1803 return err; 1804 } 1805 1806 static int spufs_mfc_fasync(int fd, struct file *file, int on) 1807 { 1808 struct spu_context *ctx = file->private_data; 1809 1810 return fasync_helper(fd, file, on, &ctx->mfc_fasync); 1811 } 1812 1813 static const struct file_operations spufs_mfc_fops = { 1814 .open = spufs_mfc_open, 1815 .release = spufs_mfc_release, 1816 .read = spufs_mfc_read, 1817 .write = spufs_mfc_write, 1818 .poll = spufs_mfc_poll, 1819 .flush = spufs_mfc_flush, 1820 .fsync = spufs_mfc_fsync, 1821 .fasync = spufs_mfc_fasync, 1822 .mmap = spufs_mfc_mmap, 1823 .llseek = no_llseek, 1824 }; 1825 1826 static int spufs_npc_set(void *data, u64 val) 1827 { 1828 struct spu_context *ctx = data; 1829 int ret; 1830 1831 ret = spu_acquire(ctx); 1832 if (ret) 1833 return ret; 1834 ctx->ops->npc_write(ctx, val); 1835 spu_release(ctx); 1836 1837 return 0; 1838 } 1839 1840 static u64 spufs_npc_get(struct spu_context *ctx) 1841 { 1842 return ctx->ops->npc_read(ctx); 1843 } 1844 DEFINE_SPUFS_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set, 1845 "0x%llx\n", SPU_ATTR_ACQUIRE); 1846 1847 static int spufs_decr_set(void *data, u64 val) 1848 { 1849 struct spu_context *ctx = data; 1850 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1851 int ret; 1852 1853 ret = spu_acquire_saved(ctx); 1854 if (ret) 1855 return ret; 1856 lscsa->decr.slot[0] = (u32) val; 1857 spu_release_saved(ctx); 1858 1859 return 0; 1860 } 1861 1862 static u64 spufs_decr_get(struct spu_context *ctx) 1863 { 1864 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1865 return lscsa->decr.slot[0]; 1866 } 1867 DEFINE_SPUFS_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set, 1868 "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED); 1869 1870 static int spufs_decr_status_set(void *data, u64 val) 1871 { 1872 struct spu_context *ctx = data; 1873 int ret; 1874 1875 ret = spu_acquire_saved(ctx); 1876 if (ret) 1877 return ret; 1878 if (val) 1879 ctx->csa.priv2.mfc_control_RW |= MFC_CNTL_DECREMENTER_RUNNING; 1880 else 1881 ctx->csa.priv2.mfc_control_RW &= ~MFC_CNTL_DECREMENTER_RUNNING; 1882 spu_release_saved(ctx); 1883 1884 return 0; 1885 } 1886 1887 static u64 spufs_decr_status_get(struct spu_context *ctx) 1888 { 1889 if (ctx->csa.priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING) 1890 return SPU_DECR_STATUS_RUNNING; 1891 else 1892 return 0; 1893 } 1894 DEFINE_SPUFS_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get, 1895 spufs_decr_status_set, "0x%llx\n", 1896 SPU_ATTR_ACQUIRE_SAVED); 1897 1898 static int spufs_event_mask_set(void *data, u64 val) 1899 { 1900 struct spu_context *ctx = data; 1901 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1902 int ret; 1903 1904 ret = spu_acquire_saved(ctx); 1905 if (ret) 1906 return ret; 1907 lscsa->event_mask.slot[0] = (u32) val; 1908 spu_release_saved(ctx); 1909 1910 return 0; 1911 } 1912 1913 static u64 spufs_event_mask_get(struct spu_context *ctx) 1914 { 1915 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1916 return lscsa->event_mask.slot[0]; 1917 } 1918 1919 DEFINE_SPUFS_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get, 1920 spufs_event_mask_set, "0x%llx\n", 1921 SPU_ATTR_ACQUIRE_SAVED); 1922 1923 static u64 spufs_event_status_get(struct spu_context *ctx) 1924 { 1925 struct spu_state *state = &ctx->csa; 1926 u64 stat; 1927 stat = state->spu_chnlcnt_RW[0]; 1928 if (stat) 1929 return state->spu_chnldata_RW[0]; 1930 return 0; 1931 } 1932 DEFINE_SPUFS_ATTRIBUTE(spufs_event_status_ops, spufs_event_status_get, 1933 NULL, "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED) 1934 1935 static int spufs_srr0_set(void *data, u64 val) 1936 { 1937 struct spu_context *ctx = data; 1938 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1939 int ret; 1940 1941 ret = spu_acquire_saved(ctx); 1942 if (ret) 1943 return ret; 1944 lscsa->srr0.slot[0] = (u32) val; 1945 spu_release_saved(ctx); 1946 1947 return 0; 1948 } 1949 1950 static u64 spufs_srr0_get(struct spu_context *ctx) 1951 { 1952 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1953 return lscsa->srr0.slot[0]; 1954 } 1955 DEFINE_SPUFS_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set, 1956 "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED) 1957 1958 static u64 spufs_id_get(struct spu_context *ctx) 1959 { 1960 u64 num; 1961 1962 if (ctx->state == SPU_STATE_RUNNABLE) 1963 num = ctx->spu->number; 1964 else 1965 num = (unsigned int)-1; 1966 1967 return num; 1968 } 1969 DEFINE_SPUFS_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n", 1970 SPU_ATTR_ACQUIRE) 1971 1972 static u64 spufs_object_id_get(struct spu_context *ctx) 1973 { 1974 /* FIXME: Should there really be no locking here? */ 1975 return ctx->object_id; 1976 } 1977 1978 static int spufs_object_id_set(void *data, u64 id) 1979 { 1980 struct spu_context *ctx = data; 1981 ctx->object_id = id; 1982 1983 return 0; 1984 } 1985 1986 DEFINE_SPUFS_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get, 1987 spufs_object_id_set, "0x%llx\n", SPU_ATTR_NOACQUIRE); 1988 1989 static u64 spufs_lslr_get(struct spu_context *ctx) 1990 { 1991 return ctx->csa.priv2.spu_lslr_RW; 1992 } 1993 DEFINE_SPUFS_ATTRIBUTE(spufs_lslr_ops, spufs_lslr_get, NULL, "0x%llx\n", 1994 SPU_ATTR_ACQUIRE_SAVED); 1995 1996 static int spufs_info_open(struct inode *inode, struct file *file) 1997 { 1998 struct spufs_inode_info *i = SPUFS_I(inode); 1999 struct spu_context *ctx = i->i_ctx; 2000 file->private_data = ctx; 2001 return 0; 2002 } 2003 2004 static int spufs_caps_show(struct seq_file *s, void *private) 2005 { 2006 struct spu_context *ctx = s->private; 2007 2008 if (!(ctx->flags & SPU_CREATE_NOSCHED)) 2009 seq_puts(s, "sched\n"); 2010 if (!(ctx->flags & SPU_CREATE_ISOLATE)) 2011 seq_puts(s, "step\n"); 2012 return 0; 2013 } 2014 2015 static int spufs_caps_open(struct inode *inode, struct file *file) 2016 { 2017 return single_open(file, spufs_caps_show, SPUFS_I(inode)->i_ctx); 2018 } 2019 2020 static const struct file_operations spufs_caps_fops = { 2021 .open = spufs_caps_open, 2022 .read = seq_read, 2023 .llseek = seq_lseek, 2024 .release = single_release, 2025 }; 2026 2027 static ssize_t __spufs_mbox_info_read(struct spu_context *ctx, 2028 char __user *buf, size_t len, loff_t *pos) 2029 { 2030 u32 data; 2031 2032 /* EOF if there's no entry in the mbox */ 2033 if (!(ctx->csa.prob.mb_stat_R & 0x0000ff)) 2034 return 0; 2035 2036 data = ctx->csa.prob.pu_mb_R; 2037 2038 return simple_read_from_buffer(buf, len, pos, &data, sizeof data); 2039 } 2040 2041 static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf, 2042 size_t len, loff_t *pos) 2043 { 2044 int ret; 2045 struct spu_context *ctx = file->private_data; 2046 2047 if (!access_ok(VERIFY_WRITE, buf, len)) 2048 return -EFAULT; 2049 2050 ret = spu_acquire_saved(ctx); 2051 if (ret) 2052 return ret; 2053 spin_lock(&ctx->csa.register_lock); 2054 ret = __spufs_mbox_info_read(ctx, buf, len, pos); 2055 spin_unlock(&ctx->csa.register_lock); 2056 spu_release_saved(ctx); 2057 2058 return ret; 2059 } 2060 2061 static const struct file_operations spufs_mbox_info_fops = { 2062 .open = spufs_info_open, 2063 .read = spufs_mbox_info_read, 2064 .llseek = generic_file_llseek, 2065 }; 2066 2067 static ssize_t __spufs_ibox_info_read(struct spu_context *ctx, 2068 char __user *buf, size_t len, loff_t *pos) 2069 { 2070 u32 data; 2071 2072 /* EOF if there's no entry in the ibox */ 2073 if (!(ctx->csa.prob.mb_stat_R & 0xff0000)) 2074 return 0; 2075 2076 data = ctx->csa.priv2.puint_mb_R; 2077 2078 return simple_read_from_buffer(buf, len, pos, &data, sizeof data); 2079 } 2080 2081 static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf, 2082 size_t len, loff_t *pos) 2083 { 2084 struct spu_context *ctx = file->private_data; 2085 int ret; 2086 2087 if (!access_ok(VERIFY_WRITE, buf, len)) 2088 return -EFAULT; 2089 2090 ret = spu_acquire_saved(ctx); 2091 if (ret) 2092 return ret; 2093 spin_lock(&ctx->csa.register_lock); 2094 ret = __spufs_ibox_info_read(ctx, buf, len, pos); 2095 spin_unlock(&ctx->csa.register_lock); 2096 spu_release_saved(ctx); 2097 2098 return ret; 2099 } 2100 2101 static const struct file_operations spufs_ibox_info_fops = { 2102 .open = spufs_info_open, 2103 .read = spufs_ibox_info_read, 2104 .llseek = generic_file_llseek, 2105 }; 2106 2107 static ssize_t __spufs_wbox_info_read(struct spu_context *ctx, 2108 char __user *buf, size_t len, loff_t *pos) 2109 { 2110 int i, cnt; 2111 u32 data[4]; 2112 u32 wbox_stat; 2113 2114 wbox_stat = ctx->csa.prob.mb_stat_R; 2115 cnt = 4 - ((wbox_stat & 0x00ff00) >> 8); 2116 for (i = 0; i < cnt; i++) { 2117 data[i] = ctx->csa.spu_mailbox_data[i]; 2118 } 2119 2120 return simple_read_from_buffer(buf, len, pos, &data, 2121 cnt * sizeof(u32)); 2122 } 2123 2124 static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf, 2125 size_t len, loff_t *pos) 2126 { 2127 struct spu_context *ctx = file->private_data; 2128 int ret; 2129 2130 if (!access_ok(VERIFY_WRITE, buf, len)) 2131 return -EFAULT; 2132 2133 ret = spu_acquire_saved(ctx); 2134 if (ret) 2135 return ret; 2136 spin_lock(&ctx->csa.register_lock); 2137 ret = __spufs_wbox_info_read(ctx, buf, len, pos); 2138 spin_unlock(&ctx->csa.register_lock); 2139 spu_release_saved(ctx); 2140 2141 return ret; 2142 } 2143 2144 static const struct file_operations spufs_wbox_info_fops = { 2145 .open = spufs_info_open, 2146 .read = spufs_wbox_info_read, 2147 .llseek = generic_file_llseek, 2148 }; 2149 2150 static ssize_t __spufs_dma_info_read(struct spu_context *ctx, 2151 char __user *buf, size_t len, loff_t *pos) 2152 { 2153 struct spu_dma_info info; 2154 struct mfc_cq_sr *qp, *spuqp; 2155 int i; 2156 2157 info.dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW; 2158 info.dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0]; 2159 info.dma_info_status = ctx->csa.spu_chnldata_RW[24]; 2160 info.dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25]; 2161 info.dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27]; 2162 for (i = 0; i < 16; i++) { 2163 qp = &info.dma_info_command_data[i]; 2164 spuqp = &ctx->csa.priv2.spuq[i]; 2165 2166 qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW; 2167 qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW; 2168 qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW; 2169 qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW; 2170 } 2171 2172 return simple_read_from_buffer(buf, len, pos, &info, 2173 sizeof info); 2174 } 2175 2176 static ssize_t spufs_dma_info_read(struct file *file, char __user *buf, 2177 size_t len, loff_t *pos) 2178 { 2179 struct spu_context *ctx = file->private_data; 2180 int ret; 2181 2182 if (!access_ok(VERIFY_WRITE, buf, len)) 2183 return -EFAULT; 2184 2185 ret = spu_acquire_saved(ctx); 2186 if (ret) 2187 return ret; 2188 spin_lock(&ctx->csa.register_lock); 2189 ret = __spufs_dma_info_read(ctx, buf, len, pos); 2190 spin_unlock(&ctx->csa.register_lock); 2191 spu_release_saved(ctx); 2192 2193 return ret; 2194 } 2195 2196 static const struct file_operations spufs_dma_info_fops = { 2197 .open = spufs_info_open, 2198 .read = spufs_dma_info_read, 2199 .llseek = no_llseek, 2200 }; 2201 2202 static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx, 2203 char __user *buf, size_t len, loff_t *pos) 2204 { 2205 struct spu_proxydma_info info; 2206 struct mfc_cq_sr *qp, *puqp; 2207 int ret = sizeof info; 2208 int i; 2209 2210 if (len < ret) 2211 return -EINVAL; 2212 2213 if (!access_ok(VERIFY_WRITE, buf, len)) 2214 return -EFAULT; 2215 2216 info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW; 2217 info.proxydma_info_mask = ctx->csa.prob.dma_querymask_RW; 2218 info.proxydma_info_status = ctx->csa.prob.dma_tagstatus_R; 2219 for (i = 0; i < 8; i++) { 2220 qp = &info.proxydma_info_command_data[i]; 2221 puqp = &ctx->csa.priv2.puq[i]; 2222 2223 qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW; 2224 qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW; 2225 qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW; 2226 qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW; 2227 } 2228 2229 return simple_read_from_buffer(buf, len, pos, &info, 2230 sizeof info); 2231 } 2232 2233 static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf, 2234 size_t len, loff_t *pos) 2235 { 2236 struct spu_context *ctx = file->private_data; 2237 int ret; 2238 2239 ret = spu_acquire_saved(ctx); 2240 if (ret) 2241 return ret; 2242 spin_lock(&ctx->csa.register_lock); 2243 ret = __spufs_proxydma_info_read(ctx, buf, len, pos); 2244 spin_unlock(&ctx->csa.register_lock); 2245 spu_release_saved(ctx); 2246 2247 return ret; 2248 } 2249 2250 static const struct file_operations spufs_proxydma_info_fops = { 2251 .open = spufs_info_open, 2252 .read = spufs_proxydma_info_read, 2253 .llseek = no_llseek, 2254 }; 2255 2256 static int spufs_show_tid(struct seq_file *s, void *private) 2257 { 2258 struct spu_context *ctx = s->private; 2259 2260 seq_printf(s, "%d\n", ctx->tid); 2261 return 0; 2262 } 2263 2264 static int spufs_tid_open(struct inode *inode, struct file *file) 2265 { 2266 return single_open(file, spufs_show_tid, SPUFS_I(inode)->i_ctx); 2267 } 2268 2269 static const struct file_operations spufs_tid_fops = { 2270 .open = spufs_tid_open, 2271 .read = seq_read, 2272 .llseek = seq_lseek, 2273 .release = single_release, 2274 }; 2275 2276 static const char *ctx_state_names[] = { 2277 "user", "system", "iowait", "loaded" 2278 }; 2279 2280 static unsigned long long spufs_acct_time(struct spu_context *ctx, 2281 enum spu_utilization_state state) 2282 { 2283 unsigned long long time = ctx->stats.times[state]; 2284 2285 /* 2286 * In general, utilization statistics are updated by the controlling 2287 * thread as the spu context moves through various well defined 2288 * state transitions, but if the context is lazily loaded its 2289 * utilization statistics are not updated as the controlling thread 2290 * is not tightly coupled with the execution of the spu context. We 2291 * calculate and apply the time delta from the last recorded state 2292 * of the spu context. 2293 */ 2294 if (ctx->spu && ctx->stats.util_state == state) { 2295 time += ktime_get_ns() - ctx->stats.tstamp; 2296 } 2297 2298 return time / NSEC_PER_MSEC; 2299 } 2300 2301 static unsigned long long spufs_slb_flts(struct spu_context *ctx) 2302 { 2303 unsigned long long slb_flts = ctx->stats.slb_flt; 2304 2305 if (ctx->state == SPU_STATE_RUNNABLE) { 2306 slb_flts += (ctx->spu->stats.slb_flt - 2307 ctx->stats.slb_flt_base); 2308 } 2309 2310 return slb_flts; 2311 } 2312 2313 static unsigned long long spufs_class2_intrs(struct spu_context *ctx) 2314 { 2315 unsigned long long class2_intrs = ctx->stats.class2_intr; 2316 2317 if (ctx->state == SPU_STATE_RUNNABLE) { 2318 class2_intrs += (ctx->spu->stats.class2_intr - 2319 ctx->stats.class2_intr_base); 2320 } 2321 2322 return class2_intrs; 2323 } 2324 2325 2326 static int spufs_show_stat(struct seq_file *s, void *private) 2327 { 2328 struct spu_context *ctx = s->private; 2329 int ret; 2330 2331 ret = spu_acquire(ctx); 2332 if (ret) 2333 return ret; 2334 2335 seq_printf(s, "%s %llu %llu %llu %llu " 2336 "%llu %llu %llu %llu %llu %llu %llu %llu\n", 2337 ctx_state_names[ctx->stats.util_state], 2338 spufs_acct_time(ctx, SPU_UTIL_USER), 2339 spufs_acct_time(ctx, SPU_UTIL_SYSTEM), 2340 spufs_acct_time(ctx, SPU_UTIL_IOWAIT), 2341 spufs_acct_time(ctx, SPU_UTIL_IDLE_LOADED), 2342 ctx->stats.vol_ctx_switch, 2343 ctx->stats.invol_ctx_switch, 2344 spufs_slb_flts(ctx), 2345 ctx->stats.hash_flt, 2346 ctx->stats.min_flt, 2347 ctx->stats.maj_flt, 2348 spufs_class2_intrs(ctx), 2349 ctx->stats.libassist); 2350 spu_release(ctx); 2351 return 0; 2352 } 2353 2354 static int spufs_stat_open(struct inode *inode, struct file *file) 2355 { 2356 return single_open(file, spufs_show_stat, SPUFS_I(inode)->i_ctx); 2357 } 2358 2359 static const struct file_operations spufs_stat_fops = { 2360 .open = spufs_stat_open, 2361 .read = seq_read, 2362 .llseek = seq_lseek, 2363 .release = single_release, 2364 }; 2365 2366 static inline int spufs_switch_log_used(struct spu_context *ctx) 2367 { 2368 return (ctx->switch_log->head - ctx->switch_log->tail) % 2369 SWITCH_LOG_BUFSIZE; 2370 } 2371 2372 static inline int spufs_switch_log_avail(struct spu_context *ctx) 2373 { 2374 return SWITCH_LOG_BUFSIZE - spufs_switch_log_used(ctx); 2375 } 2376 2377 static int spufs_switch_log_open(struct inode *inode, struct file *file) 2378 { 2379 struct spu_context *ctx = SPUFS_I(inode)->i_ctx; 2380 int rc; 2381 2382 rc = spu_acquire(ctx); 2383 if (rc) 2384 return rc; 2385 2386 if (ctx->switch_log) { 2387 rc = -EBUSY; 2388 goto out; 2389 } 2390 2391 ctx->switch_log = kmalloc(sizeof(struct switch_log) + 2392 SWITCH_LOG_BUFSIZE * sizeof(struct switch_log_entry), 2393 GFP_KERNEL); 2394 2395 if (!ctx->switch_log) { 2396 rc = -ENOMEM; 2397 goto out; 2398 } 2399 2400 ctx->switch_log->head = ctx->switch_log->tail = 0; 2401 init_waitqueue_head(&ctx->switch_log->wait); 2402 rc = 0; 2403 2404 out: 2405 spu_release(ctx); 2406 return rc; 2407 } 2408 2409 static int spufs_switch_log_release(struct inode *inode, struct file *file) 2410 { 2411 struct spu_context *ctx = SPUFS_I(inode)->i_ctx; 2412 int rc; 2413 2414 rc = spu_acquire(ctx); 2415 if (rc) 2416 return rc; 2417 2418 kfree(ctx->switch_log); 2419 ctx->switch_log = NULL; 2420 spu_release(ctx); 2421 2422 return 0; 2423 } 2424 2425 static int switch_log_sprint(struct spu_context *ctx, char *tbuf, int n) 2426 { 2427 struct switch_log_entry *p; 2428 2429 p = ctx->switch_log->log + ctx->switch_log->tail % SWITCH_LOG_BUFSIZE; 2430 2431 return snprintf(tbuf, n, "%u.%09u %d %u %u %llu\n", 2432 (unsigned int) p->tstamp.tv_sec, 2433 (unsigned int) p->tstamp.tv_nsec, 2434 p->spu_id, 2435 (unsigned int) p->type, 2436 (unsigned int) p->val, 2437 (unsigned long long) p->timebase); 2438 } 2439 2440 static ssize_t spufs_switch_log_read(struct file *file, char __user *buf, 2441 size_t len, loff_t *ppos) 2442 { 2443 struct inode *inode = file_inode(file); 2444 struct spu_context *ctx = SPUFS_I(inode)->i_ctx; 2445 int error = 0, cnt = 0; 2446 2447 if (!buf) 2448 return -EINVAL; 2449 2450 error = spu_acquire(ctx); 2451 if (error) 2452 return error; 2453 2454 while (cnt < len) { 2455 char tbuf[128]; 2456 int width; 2457 2458 if (spufs_switch_log_used(ctx) == 0) { 2459 if (cnt > 0) { 2460 /* If there's data ready to go, we can 2461 * just return straight away */ 2462 break; 2463 2464 } else if (file->f_flags & O_NONBLOCK) { 2465 error = -EAGAIN; 2466 break; 2467 2468 } else { 2469 /* spufs_wait will drop the mutex and 2470 * re-acquire, but since we're in read(), the 2471 * file cannot be _released (and so 2472 * ctx->switch_log is stable). 2473 */ 2474 error = spufs_wait(ctx->switch_log->wait, 2475 spufs_switch_log_used(ctx) > 0); 2476 2477 /* On error, spufs_wait returns without the 2478 * state mutex held */ 2479 if (error) 2480 return error; 2481 2482 /* We may have had entries read from underneath 2483 * us while we dropped the mutex in spufs_wait, 2484 * so re-check */ 2485 if (spufs_switch_log_used(ctx) == 0) 2486 continue; 2487 } 2488 } 2489 2490 width = switch_log_sprint(ctx, tbuf, sizeof(tbuf)); 2491 if (width < len) 2492 ctx->switch_log->tail = 2493 (ctx->switch_log->tail + 1) % 2494 SWITCH_LOG_BUFSIZE; 2495 else 2496 /* If the record is greater than space available return 2497 * partial buffer (so far) */ 2498 break; 2499 2500 error = copy_to_user(buf + cnt, tbuf, width); 2501 if (error) 2502 break; 2503 cnt += width; 2504 } 2505 2506 spu_release(ctx); 2507 2508 return cnt == 0 ? error : cnt; 2509 } 2510 2511 static unsigned int spufs_switch_log_poll(struct file *file, poll_table *wait) 2512 { 2513 struct inode *inode = file_inode(file); 2514 struct spu_context *ctx = SPUFS_I(inode)->i_ctx; 2515 unsigned int mask = 0; 2516 int rc; 2517 2518 poll_wait(file, &ctx->switch_log->wait, wait); 2519 2520 rc = spu_acquire(ctx); 2521 if (rc) 2522 return rc; 2523 2524 if (spufs_switch_log_used(ctx) > 0) 2525 mask |= POLLIN; 2526 2527 spu_release(ctx); 2528 2529 return mask; 2530 } 2531 2532 static const struct file_operations spufs_switch_log_fops = { 2533 .open = spufs_switch_log_open, 2534 .read = spufs_switch_log_read, 2535 .poll = spufs_switch_log_poll, 2536 .release = spufs_switch_log_release, 2537 .llseek = no_llseek, 2538 }; 2539 2540 /** 2541 * Log a context switch event to a switch log reader. 2542 * 2543 * Must be called with ctx->state_mutex held. 2544 */ 2545 void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx, 2546 u32 type, u32 val) 2547 { 2548 if (!ctx->switch_log) 2549 return; 2550 2551 if (spufs_switch_log_avail(ctx) > 1) { 2552 struct switch_log_entry *p; 2553 2554 p = ctx->switch_log->log + ctx->switch_log->head; 2555 ktime_get_ts(&p->tstamp); 2556 p->timebase = get_tb(); 2557 p->spu_id = spu ? spu->number : -1; 2558 p->type = type; 2559 p->val = val; 2560 2561 ctx->switch_log->head = 2562 (ctx->switch_log->head + 1) % SWITCH_LOG_BUFSIZE; 2563 } 2564 2565 wake_up(&ctx->switch_log->wait); 2566 } 2567 2568 static int spufs_show_ctx(struct seq_file *s, void *private) 2569 { 2570 struct spu_context *ctx = s->private; 2571 u64 mfc_control_RW; 2572 2573 mutex_lock(&ctx->state_mutex); 2574 if (ctx->spu) { 2575 struct spu *spu = ctx->spu; 2576 struct spu_priv2 __iomem *priv2 = spu->priv2; 2577 2578 spin_lock_irq(&spu->register_lock); 2579 mfc_control_RW = in_be64(&priv2->mfc_control_RW); 2580 spin_unlock_irq(&spu->register_lock); 2581 } else { 2582 struct spu_state *csa = &ctx->csa; 2583 2584 mfc_control_RW = csa->priv2.mfc_control_RW; 2585 } 2586 2587 seq_printf(s, "%c flgs(%lx) sflgs(%lx) pri(%d) ts(%d) spu(%02d)" 2588 " %c %llx %llx %llx %llx %x %x\n", 2589 ctx->state == SPU_STATE_SAVED ? 'S' : 'R', 2590 ctx->flags, 2591 ctx->sched_flags, 2592 ctx->prio, 2593 ctx->time_slice, 2594 ctx->spu ? ctx->spu->number : -1, 2595 !list_empty(&ctx->rq) ? 'q' : ' ', 2596 ctx->csa.class_0_pending, 2597 ctx->csa.class_0_dar, 2598 ctx->csa.class_1_dsisr, 2599 mfc_control_RW, 2600 ctx->ops->runcntl_read(ctx), 2601 ctx->ops->status_read(ctx)); 2602 2603 mutex_unlock(&ctx->state_mutex); 2604 2605 return 0; 2606 } 2607 2608 static int spufs_ctx_open(struct inode *inode, struct file *file) 2609 { 2610 return single_open(file, spufs_show_ctx, SPUFS_I(inode)->i_ctx); 2611 } 2612 2613 static const struct file_operations spufs_ctx_fops = { 2614 .open = spufs_ctx_open, 2615 .read = seq_read, 2616 .llseek = seq_lseek, 2617 .release = single_release, 2618 }; 2619 2620 const struct spufs_tree_descr spufs_dir_contents[] = { 2621 { "capabilities", &spufs_caps_fops, 0444, }, 2622 { "mem", &spufs_mem_fops, 0666, LS_SIZE, }, 2623 { "regs", &spufs_regs_fops, 0666, sizeof(struct spu_reg128[128]), }, 2624 { "mbox", &spufs_mbox_fops, 0444, }, 2625 { "ibox", &spufs_ibox_fops, 0444, }, 2626 { "wbox", &spufs_wbox_fops, 0222, }, 2627 { "mbox_stat", &spufs_mbox_stat_fops, 0444, sizeof(u32), }, 2628 { "ibox_stat", &spufs_ibox_stat_fops, 0444, sizeof(u32), }, 2629 { "wbox_stat", &spufs_wbox_stat_fops, 0444, sizeof(u32), }, 2630 { "signal1", &spufs_signal1_fops, 0666, }, 2631 { "signal2", &spufs_signal2_fops, 0666, }, 2632 { "signal1_type", &spufs_signal1_type, 0666, }, 2633 { "signal2_type", &spufs_signal2_type, 0666, }, 2634 { "cntl", &spufs_cntl_fops, 0666, }, 2635 { "fpcr", &spufs_fpcr_fops, 0666, sizeof(struct spu_reg128), }, 2636 { "lslr", &spufs_lslr_ops, 0444, }, 2637 { "mfc", &spufs_mfc_fops, 0666, }, 2638 { "mss", &spufs_mss_fops, 0666, }, 2639 { "npc", &spufs_npc_ops, 0666, }, 2640 { "srr0", &spufs_srr0_ops, 0666, }, 2641 { "decr", &spufs_decr_ops, 0666, }, 2642 { "decr_status", &spufs_decr_status_ops, 0666, }, 2643 { "event_mask", &spufs_event_mask_ops, 0666, }, 2644 { "event_status", &spufs_event_status_ops, 0444, }, 2645 { "psmap", &spufs_psmap_fops, 0666, SPUFS_PS_MAP_SIZE, }, 2646 { "phys-id", &spufs_id_ops, 0666, }, 2647 { "object-id", &spufs_object_id_ops, 0666, }, 2648 { "mbox_info", &spufs_mbox_info_fops, 0444, sizeof(u32), }, 2649 { "ibox_info", &spufs_ibox_info_fops, 0444, sizeof(u32), }, 2650 { "wbox_info", &spufs_wbox_info_fops, 0444, sizeof(u32), }, 2651 { "dma_info", &spufs_dma_info_fops, 0444, 2652 sizeof(struct spu_dma_info), }, 2653 { "proxydma_info", &spufs_proxydma_info_fops, 0444, 2654 sizeof(struct spu_proxydma_info)}, 2655 { "tid", &spufs_tid_fops, 0444, }, 2656 { "stat", &spufs_stat_fops, 0444, }, 2657 { "switch_log", &spufs_switch_log_fops, 0444 }, 2658 {}, 2659 }; 2660 2661 const struct spufs_tree_descr spufs_dir_nosched_contents[] = { 2662 { "capabilities", &spufs_caps_fops, 0444, }, 2663 { "mem", &spufs_mem_fops, 0666, LS_SIZE, }, 2664 { "mbox", &spufs_mbox_fops, 0444, }, 2665 { "ibox", &spufs_ibox_fops, 0444, }, 2666 { "wbox", &spufs_wbox_fops, 0222, }, 2667 { "mbox_stat", &spufs_mbox_stat_fops, 0444, sizeof(u32), }, 2668 { "ibox_stat", &spufs_ibox_stat_fops, 0444, sizeof(u32), }, 2669 { "wbox_stat", &spufs_wbox_stat_fops, 0444, sizeof(u32), }, 2670 { "signal1", &spufs_signal1_nosched_fops, 0222, }, 2671 { "signal2", &spufs_signal2_nosched_fops, 0222, }, 2672 { "signal1_type", &spufs_signal1_type, 0666, }, 2673 { "signal2_type", &spufs_signal2_type, 0666, }, 2674 { "mss", &spufs_mss_fops, 0666, }, 2675 { "mfc", &spufs_mfc_fops, 0666, }, 2676 { "cntl", &spufs_cntl_fops, 0666, }, 2677 { "npc", &spufs_npc_ops, 0666, }, 2678 { "psmap", &spufs_psmap_fops, 0666, SPUFS_PS_MAP_SIZE, }, 2679 { "phys-id", &spufs_id_ops, 0666, }, 2680 { "object-id", &spufs_object_id_ops, 0666, }, 2681 { "tid", &spufs_tid_fops, 0444, }, 2682 { "stat", &spufs_stat_fops, 0444, }, 2683 {}, 2684 }; 2685 2686 const struct spufs_tree_descr spufs_dir_debug_contents[] = { 2687 { ".ctx", &spufs_ctx_fops, 0444, }, 2688 {}, 2689 }; 2690 2691 const struct spufs_coredump_reader spufs_coredump_read[] = { 2692 { "regs", __spufs_regs_read, NULL, sizeof(struct spu_reg128[128])}, 2693 { "fpcr", __spufs_fpcr_read, NULL, sizeof(struct spu_reg128) }, 2694 { "lslr", NULL, spufs_lslr_get, 19 }, 2695 { "decr", NULL, spufs_decr_get, 19 }, 2696 { "decr_status", NULL, spufs_decr_status_get, 19 }, 2697 { "mem", __spufs_mem_read, NULL, LS_SIZE, }, 2698 { "signal1", __spufs_signal1_read, NULL, sizeof(u32) }, 2699 { "signal1_type", NULL, spufs_signal1_type_get, 19 }, 2700 { "signal2", __spufs_signal2_read, NULL, sizeof(u32) }, 2701 { "signal2_type", NULL, spufs_signal2_type_get, 19 }, 2702 { "event_mask", NULL, spufs_event_mask_get, 19 }, 2703 { "event_status", NULL, spufs_event_status_get, 19 }, 2704 { "mbox_info", __spufs_mbox_info_read, NULL, sizeof(u32) }, 2705 { "ibox_info", __spufs_ibox_info_read, NULL, sizeof(u32) }, 2706 { "wbox_info", __spufs_wbox_info_read, NULL, 4 * sizeof(u32)}, 2707 { "dma_info", __spufs_dma_info_read, NULL, sizeof(struct spu_dma_info)}, 2708 { "proxydma_info", __spufs_proxydma_info_read, 2709 NULL, sizeof(struct spu_proxydma_info)}, 2710 { "object-id", NULL, spufs_object_id_get, 19 }, 2711 { "npc", NULL, spufs_npc_get, 19 }, 2712 { NULL }, 2713 }; 2714