1 /* 2 * SPU file system -- file contents 3 * 4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 5 * 6 * Author: Arnd Bergmann <arndb@de.ibm.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2, or (at your option) 11 * any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 */ 22 23 #undef DEBUG 24 25 #include <linux/fs.h> 26 #include <linux/ioctl.h> 27 #include <linux/export.h> 28 #include <linux/pagemap.h> 29 #include <linux/poll.h> 30 #include <linux/ptrace.h> 31 #include <linux/seq_file.h> 32 #include <linux/slab.h> 33 34 #include <asm/io.h> 35 #include <asm/time.h> 36 #include <asm/spu.h> 37 #include <asm/spu_info.h> 38 #include <linux/uaccess.h> 39 40 #include "spufs.h" 41 #include "sputrace.h" 42 43 #define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000) 44 45 /* Simple attribute files */ 46 struct spufs_attr { 47 int (*get)(void *, u64 *); 48 int (*set)(void *, u64); 49 char get_buf[24]; /* enough to store a u64 and "\n\0" */ 50 char set_buf[24]; 51 void *data; 52 const char *fmt; /* format for read operation */ 53 struct mutex mutex; /* protects access to these buffers */ 54 }; 55 56 static int spufs_attr_open(struct inode *inode, struct file *file, 57 int (*get)(void *, u64 *), int (*set)(void *, u64), 58 const char *fmt) 59 { 60 struct spufs_attr *attr; 61 62 attr = kmalloc(sizeof(*attr), GFP_KERNEL); 63 if (!attr) 64 return -ENOMEM; 65 66 attr->get = get; 67 attr->set = set; 68 attr->data = inode->i_private; 69 attr->fmt = fmt; 70 mutex_init(&attr->mutex); 71 file->private_data = attr; 72 73 return nonseekable_open(inode, file); 74 } 75 76 static int spufs_attr_release(struct inode *inode, struct file *file) 77 { 78 kfree(file->private_data); 79 return 0; 80 } 81 82 static ssize_t spufs_attr_read(struct file *file, char __user *buf, 83 size_t len, loff_t *ppos) 84 { 85 struct spufs_attr *attr; 86 size_t size; 87 ssize_t ret; 88 89 attr = file->private_data; 90 if (!attr->get) 91 return -EACCES; 92 93 ret = mutex_lock_interruptible(&attr->mutex); 94 if (ret) 95 return ret; 96 97 if (*ppos) { /* continued read */ 98 size = strlen(attr->get_buf); 99 } else { /* first read */ 100 u64 val; 101 ret = attr->get(attr->data, &val); 102 if (ret) 103 goto out; 104 105 size = scnprintf(attr->get_buf, sizeof(attr->get_buf), 106 attr->fmt, (unsigned long long)val); 107 } 108 109 ret = simple_read_from_buffer(buf, len, ppos, attr->get_buf, size); 110 out: 111 mutex_unlock(&attr->mutex); 112 return ret; 113 } 114 115 static ssize_t spufs_attr_write(struct file *file, const char __user *buf, 116 size_t len, loff_t *ppos) 117 { 118 struct spufs_attr *attr; 119 u64 val; 120 size_t size; 121 ssize_t ret; 122 123 attr = file->private_data; 124 if (!attr->set) 125 return -EACCES; 126 127 ret = mutex_lock_interruptible(&attr->mutex); 128 if (ret) 129 return ret; 130 131 ret = -EFAULT; 132 size = min(sizeof(attr->set_buf) - 1, len); 133 if (copy_from_user(attr->set_buf, buf, size)) 134 goto out; 135 136 ret = len; /* claim we got the whole input */ 137 attr->set_buf[size] = '\0'; 138 val = simple_strtol(attr->set_buf, NULL, 0); 139 attr->set(attr->data, val); 140 out: 141 mutex_unlock(&attr->mutex); 142 return ret; 143 } 144 145 #define DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \ 146 static int __fops ## _open(struct inode *inode, struct file *file) \ 147 { \ 148 __simple_attr_check_format(__fmt, 0ull); \ 149 return spufs_attr_open(inode, file, __get, __set, __fmt); \ 150 } \ 151 static const struct file_operations __fops = { \ 152 .open = __fops ## _open, \ 153 .release = spufs_attr_release, \ 154 .read = spufs_attr_read, \ 155 .write = spufs_attr_write, \ 156 .llseek = generic_file_llseek, \ 157 }; 158 159 160 static int 161 spufs_mem_open(struct inode *inode, struct file *file) 162 { 163 struct spufs_inode_info *i = SPUFS_I(inode); 164 struct spu_context *ctx = i->i_ctx; 165 166 mutex_lock(&ctx->mapping_lock); 167 file->private_data = ctx; 168 if (!i->i_openers++) 169 ctx->local_store = inode->i_mapping; 170 mutex_unlock(&ctx->mapping_lock); 171 return 0; 172 } 173 174 static int 175 spufs_mem_release(struct inode *inode, struct file *file) 176 { 177 struct spufs_inode_info *i = SPUFS_I(inode); 178 struct spu_context *ctx = i->i_ctx; 179 180 mutex_lock(&ctx->mapping_lock); 181 if (!--i->i_openers) 182 ctx->local_store = NULL; 183 mutex_unlock(&ctx->mapping_lock); 184 return 0; 185 } 186 187 static ssize_t 188 __spufs_mem_read(struct spu_context *ctx, char __user *buffer, 189 size_t size, loff_t *pos) 190 { 191 char *local_store = ctx->ops->get_ls(ctx); 192 return simple_read_from_buffer(buffer, size, pos, local_store, 193 LS_SIZE); 194 } 195 196 static ssize_t 197 spufs_mem_read(struct file *file, char __user *buffer, 198 size_t size, loff_t *pos) 199 { 200 struct spu_context *ctx = file->private_data; 201 ssize_t ret; 202 203 ret = spu_acquire(ctx); 204 if (ret) 205 return ret; 206 ret = __spufs_mem_read(ctx, buffer, size, pos); 207 spu_release(ctx); 208 209 return ret; 210 } 211 212 static ssize_t 213 spufs_mem_write(struct file *file, const char __user *buffer, 214 size_t size, loff_t *ppos) 215 { 216 struct spu_context *ctx = file->private_data; 217 char *local_store; 218 loff_t pos = *ppos; 219 int ret; 220 221 if (pos > LS_SIZE) 222 return -EFBIG; 223 224 ret = spu_acquire(ctx); 225 if (ret) 226 return ret; 227 228 local_store = ctx->ops->get_ls(ctx); 229 size = simple_write_to_buffer(local_store, LS_SIZE, ppos, buffer, size); 230 spu_release(ctx); 231 232 return size; 233 } 234 235 static vm_fault_t 236 spufs_mem_mmap_fault(struct vm_fault *vmf) 237 { 238 struct vm_area_struct *vma = vmf->vma; 239 struct spu_context *ctx = vma->vm_file->private_data; 240 unsigned long pfn, offset; 241 vm_fault_t ret; 242 243 offset = vmf->pgoff << PAGE_SHIFT; 244 if (offset >= LS_SIZE) 245 return VM_FAULT_SIGBUS; 246 247 pr_debug("spufs_mem_mmap_fault address=0x%lx, offset=0x%lx\n", 248 vmf->address, offset); 249 250 if (spu_acquire(ctx)) 251 return VM_FAULT_NOPAGE; 252 253 if (ctx->state == SPU_STATE_SAVED) { 254 vma->vm_page_prot = pgprot_cached(vma->vm_page_prot); 255 pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset); 256 } else { 257 vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot); 258 pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT; 259 } 260 ret = vmf_insert_pfn(vma, vmf->address, pfn); 261 262 spu_release(ctx); 263 264 return ret; 265 } 266 267 static int spufs_mem_mmap_access(struct vm_area_struct *vma, 268 unsigned long address, 269 void *buf, int len, int write) 270 { 271 struct spu_context *ctx = vma->vm_file->private_data; 272 unsigned long offset = address - vma->vm_start; 273 char *local_store; 274 275 if (write && !(vma->vm_flags & VM_WRITE)) 276 return -EACCES; 277 if (spu_acquire(ctx)) 278 return -EINTR; 279 if ((offset + len) > vma->vm_end) 280 len = vma->vm_end - offset; 281 local_store = ctx->ops->get_ls(ctx); 282 if (write) 283 memcpy_toio(local_store + offset, buf, len); 284 else 285 memcpy_fromio(buf, local_store + offset, len); 286 spu_release(ctx); 287 return len; 288 } 289 290 static const struct vm_operations_struct spufs_mem_mmap_vmops = { 291 .fault = spufs_mem_mmap_fault, 292 .access = spufs_mem_mmap_access, 293 }; 294 295 static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma) 296 { 297 if (!(vma->vm_flags & VM_SHARED)) 298 return -EINVAL; 299 300 vma->vm_flags |= VM_IO | VM_PFNMAP; 301 vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot); 302 303 vma->vm_ops = &spufs_mem_mmap_vmops; 304 return 0; 305 } 306 307 static const struct file_operations spufs_mem_fops = { 308 .open = spufs_mem_open, 309 .release = spufs_mem_release, 310 .read = spufs_mem_read, 311 .write = spufs_mem_write, 312 .llseek = generic_file_llseek, 313 .mmap = spufs_mem_mmap, 314 }; 315 316 static vm_fault_t spufs_ps_fault(struct vm_fault *vmf, 317 unsigned long ps_offs, 318 unsigned long ps_size) 319 { 320 struct spu_context *ctx = vmf->vma->vm_file->private_data; 321 unsigned long area, offset = vmf->pgoff << PAGE_SHIFT; 322 int err = 0; 323 vm_fault_t ret = VM_FAULT_NOPAGE; 324 325 spu_context_nospu_trace(spufs_ps_fault__enter, ctx); 326 327 if (offset >= ps_size) 328 return VM_FAULT_SIGBUS; 329 330 if (fatal_signal_pending(current)) 331 return VM_FAULT_SIGBUS; 332 333 /* 334 * Because we release the mmap_sem, the context may be destroyed while 335 * we're in spu_wait. Grab an extra reference so it isn't destroyed 336 * in the meantime. 337 */ 338 get_spu_context(ctx); 339 340 /* 341 * We have to wait for context to be loaded before we have 342 * pages to hand out to the user, but we don't want to wait 343 * with the mmap_sem held. 344 * It is possible to drop the mmap_sem here, but then we need 345 * to return VM_FAULT_NOPAGE because the mappings may have 346 * hanged. 347 */ 348 if (spu_acquire(ctx)) 349 goto refault; 350 351 if (ctx->state == SPU_STATE_SAVED) { 352 up_read(¤t->mm->mmap_sem); 353 spu_context_nospu_trace(spufs_ps_fault__sleep, ctx); 354 err = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE); 355 spu_context_trace(spufs_ps_fault__wake, ctx, ctx->spu); 356 down_read(¤t->mm->mmap_sem); 357 } else { 358 area = ctx->spu->problem_phys + ps_offs; 359 ret = vmf_insert_pfn(vmf->vma, vmf->address, 360 (area + offset) >> PAGE_SHIFT); 361 spu_context_trace(spufs_ps_fault__insert, ctx, ctx->spu); 362 } 363 364 if (!err) 365 spu_release(ctx); 366 367 refault: 368 put_spu_context(ctx); 369 return ret; 370 } 371 372 #if SPUFS_MMAP_4K 373 static vm_fault_t spufs_cntl_mmap_fault(struct vm_fault *vmf) 374 { 375 return spufs_ps_fault(vmf, 0x4000, SPUFS_CNTL_MAP_SIZE); 376 } 377 378 static const struct vm_operations_struct spufs_cntl_mmap_vmops = { 379 .fault = spufs_cntl_mmap_fault, 380 }; 381 382 /* 383 * mmap support for problem state control area [0x4000 - 0x4fff]. 384 */ 385 static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma) 386 { 387 if (!(vma->vm_flags & VM_SHARED)) 388 return -EINVAL; 389 390 vma->vm_flags |= VM_IO | VM_PFNMAP; 391 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 392 393 vma->vm_ops = &spufs_cntl_mmap_vmops; 394 return 0; 395 } 396 #else /* SPUFS_MMAP_4K */ 397 #define spufs_cntl_mmap NULL 398 #endif /* !SPUFS_MMAP_4K */ 399 400 static int spufs_cntl_get(void *data, u64 *val) 401 { 402 struct spu_context *ctx = data; 403 int ret; 404 405 ret = spu_acquire(ctx); 406 if (ret) 407 return ret; 408 *val = ctx->ops->status_read(ctx); 409 spu_release(ctx); 410 411 return 0; 412 } 413 414 static int spufs_cntl_set(void *data, u64 val) 415 { 416 struct spu_context *ctx = data; 417 int ret; 418 419 ret = spu_acquire(ctx); 420 if (ret) 421 return ret; 422 ctx->ops->runcntl_write(ctx, val); 423 spu_release(ctx); 424 425 return 0; 426 } 427 428 static int spufs_cntl_open(struct inode *inode, struct file *file) 429 { 430 struct spufs_inode_info *i = SPUFS_I(inode); 431 struct spu_context *ctx = i->i_ctx; 432 433 mutex_lock(&ctx->mapping_lock); 434 file->private_data = ctx; 435 if (!i->i_openers++) 436 ctx->cntl = inode->i_mapping; 437 mutex_unlock(&ctx->mapping_lock); 438 return simple_attr_open(inode, file, spufs_cntl_get, 439 spufs_cntl_set, "0x%08lx"); 440 } 441 442 static int 443 spufs_cntl_release(struct inode *inode, struct file *file) 444 { 445 struct spufs_inode_info *i = SPUFS_I(inode); 446 struct spu_context *ctx = i->i_ctx; 447 448 simple_attr_release(inode, file); 449 450 mutex_lock(&ctx->mapping_lock); 451 if (!--i->i_openers) 452 ctx->cntl = NULL; 453 mutex_unlock(&ctx->mapping_lock); 454 return 0; 455 } 456 457 static const struct file_operations spufs_cntl_fops = { 458 .open = spufs_cntl_open, 459 .release = spufs_cntl_release, 460 .read = simple_attr_read, 461 .write = simple_attr_write, 462 .llseek = generic_file_llseek, 463 .mmap = spufs_cntl_mmap, 464 }; 465 466 static int 467 spufs_regs_open(struct inode *inode, struct file *file) 468 { 469 struct spufs_inode_info *i = SPUFS_I(inode); 470 file->private_data = i->i_ctx; 471 return 0; 472 } 473 474 static ssize_t 475 __spufs_regs_read(struct spu_context *ctx, char __user *buffer, 476 size_t size, loff_t *pos) 477 { 478 struct spu_lscsa *lscsa = ctx->csa.lscsa; 479 return simple_read_from_buffer(buffer, size, pos, 480 lscsa->gprs, sizeof lscsa->gprs); 481 } 482 483 static ssize_t 484 spufs_regs_read(struct file *file, char __user *buffer, 485 size_t size, loff_t *pos) 486 { 487 int ret; 488 struct spu_context *ctx = file->private_data; 489 490 /* pre-check for file position: if we'd return EOF, there's no point 491 * causing a deschedule */ 492 if (*pos >= sizeof(ctx->csa.lscsa->gprs)) 493 return 0; 494 495 ret = spu_acquire_saved(ctx); 496 if (ret) 497 return ret; 498 ret = __spufs_regs_read(ctx, buffer, size, pos); 499 spu_release_saved(ctx); 500 return ret; 501 } 502 503 static ssize_t 504 spufs_regs_write(struct file *file, const char __user *buffer, 505 size_t size, loff_t *pos) 506 { 507 struct spu_context *ctx = file->private_data; 508 struct spu_lscsa *lscsa = ctx->csa.lscsa; 509 int ret; 510 511 if (*pos >= sizeof(lscsa->gprs)) 512 return -EFBIG; 513 514 ret = spu_acquire_saved(ctx); 515 if (ret) 516 return ret; 517 518 size = simple_write_to_buffer(lscsa->gprs, sizeof(lscsa->gprs), pos, 519 buffer, size); 520 521 spu_release_saved(ctx); 522 return size; 523 } 524 525 static const struct file_operations spufs_regs_fops = { 526 .open = spufs_regs_open, 527 .read = spufs_regs_read, 528 .write = spufs_regs_write, 529 .llseek = generic_file_llseek, 530 }; 531 532 static ssize_t 533 __spufs_fpcr_read(struct spu_context *ctx, char __user * buffer, 534 size_t size, loff_t * pos) 535 { 536 struct spu_lscsa *lscsa = ctx->csa.lscsa; 537 return simple_read_from_buffer(buffer, size, pos, 538 &lscsa->fpcr, sizeof(lscsa->fpcr)); 539 } 540 541 static ssize_t 542 spufs_fpcr_read(struct file *file, char __user * buffer, 543 size_t size, loff_t * pos) 544 { 545 int ret; 546 struct spu_context *ctx = file->private_data; 547 548 ret = spu_acquire_saved(ctx); 549 if (ret) 550 return ret; 551 ret = __spufs_fpcr_read(ctx, buffer, size, pos); 552 spu_release_saved(ctx); 553 return ret; 554 } 555 556 static ssize_t 557 spufs_fpcr_write(struct file *file, const char __user * buffer, 558 size_t size, loff_t * pos) 559 { 560 struct spu_context *ctx = file->private_data; 561 struct spu_lscsa *lscsa = ctx->csa.lscsa; 562 int ret; 563 564 if (*pos >= sizeof(lscsa->fpcr)) 565 return -EFBIG; 566 567 ret = spu_acquire_saved(ctx); 568 if (ret) 569 return ret; 570 571 size = simple_write_to_buffer(&lscsa->fpcr, sizeof(lscsa->fpcr), pos, 572 buffer, size); 573 574 spu_release_saved(ctx); 575 return size; 576 } 577 578 static const struct file_operations spufs_fpcr_fops = { 579 .open = spufs_regs_open, 580 .read = spufs_fpcr_read, 581 .write = spufs_fpcr_write, 582 .llseek = generic_file_llseek, 583 }; 584 585 /* generic open function for all pipe-like files */ 586 static int spufs_pipe_open(struct inode *inode, struct file *file) 587 { 588 struct spufs_inode_info *i = SPUFS_I(inode); 589 file->private_data = i->i_ctx; 590 591 return nonseekable_open(inode, file); 592 } 593 594 /* 595 * Read as many bytes from the mailbox as possible, until 596 * one of the conditions becomes true: 597 * 598 * - no more data available in the mailbox 599 * - end of the user provided buffer 600 * - end of the mapped area 601 */ 602 static ssize_t spufs_mbox_read(struct file *file, char __user *buf, 603 size_t len, loff_t *pos) 604 { 605 struct spu_context *ctx = file->private_data; 606 u32 mbox_data, __user *udata; 607 ssize_t count; 608 609 if (len < 4) 610 return -EINVAL; 611 612 if (!access_ok(VERIFY_WRITE, buf, len)) 613 return -EFAULT; 614 615 udata = (void __user *)buf; 616 617 count = spu_acquire(ctx); 618 if (count) 619 return count; 620 621 for (count = 0; (count + 4) <= len; count += 4, udata++) { 622 int ret; 623 ret = ctx->ops->mbox_read(ctx, &mbox_data); 624 if (ret == 0) 625 break; 626 627 /* 628 * at the end of the mapped area, we can fault 629 * but still need to return the data we have 630 * read successfully so far. 631 */ 632 ret = __put_user(mbox_data, udata); 633 if (ret) { 634 if (!count) 635 count = -EFAULT; 636 break; 637 } 638 } 639 spu_release(ctx); 640 641 if (!count) 642 count = -EAGAIN; 643 644 return count; 645 } 646 647 static const struct file_operations spufs_mbox_fops = { 648 .open = spufs_pipe_open, 649 .read = spufs_mbox_read, 650 .llseek = no_llseek, 651 }; 652 653 static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf, 654 size_t len, loff_t *pos) 655 { 656 struct spu_context *ctx = file->private_data; 657 ssize_t ret; 658 u32 mbox_stat; 659 660 if (len < 4) 661 return -EINVAL; 662 663 ret = spu_acquire(ctx); 664 if (ret) 665 return ret; 666 667 mbox_stat = ctx->ops->mbox_stat_read(ctx) & 0xff; 668 669 spu_release(ctx); 670 671 if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat)) 672 return -EFAULT; 673 674 return 4; 675 } 676 677 static const struct file_operations spufs_mbox_stat_fops = { 678 .open = spufs_pipe_open, 679 .read = spufs_mbox_stat_read, 680 .llseek = no_llseek, 681 }; 682 683 /* low-level ibox access function */ 684 size_t spu_ibox_read(struct spu_context *ctx, u32 *data) 685 { 686 return ctx->ops->ibox_read(ctx, data); 687 } 688 689 /* interrupt-level ibox callback function. */ 690 void spufs_ibox_callback(struct spu *spu) 691 { 692 struct spu_context *ctx = spu->ctx; 693 694 if (ctx) 695 wake_up_all(&ctx->ibox_wq); 696 } 697 698 /* 699 * Read as many bytes from the interrupt mailbox as possible, until 700 * one of the conditions becomes true: 701 * 702 * - no more data available in the mailbox 703 * - end of the user provided buffer 704 * - end of the mapped area 705 * 706 * If the file is opened without O_NONBLOCK, we wait here until 707 * any data is available, but return when we have been able to 708 * read something. 709 */ 710 static ssize_t spufs_ibox_read(struct file *file, char __user *buf, 711 size_t len, loff_t *pos) 712 { 713 struct spu_context *ctx = file->private_data; 714 u32 ibox_data, __user *udata; 715 ssize_t count; 716 717 if (len < 4) 718 return -EINVAL; 719 720 if (!access_ok(VERIFY_WRITE, buf, len)) 721 return -EFAULT; 722 723 udata = (void __user *)buf; 724 725 count = spu_acquire(ctx); 726 if (count) 727 goto out; 728 729 /* wait only for the first element */ 730 count = 0; 731 if (file->f_flags & O_NONBLOCK) { 732 if (!spu_ibox_read(ctx, &ibox_data)) { 733 count = -EAGAIN; 734 goto out_unlock; 735 } 736 } else { 737 count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data)); 738 if (count) 739 goto out; 740 } 741 742 /* if we can't write at all, return -EFAULT */ 743 count = __put_user(ibox_data, udata); 744 if (count) 745 goto out_unlock; 746 747 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) { 748 int ret; 749 ret = ctx->ops->ibox_read(ctx, &ibox_data); 750 if (ret == 0) 751 break; 752 /* 753 * at the end of the mapped area, we can fault 754 * but still need to return the data we have 755 * read successfully so far. 756 */ 757 ret = __put_user(ibox_data, udata); 758 if (ret) 759 break; 760 } 761 762 out_unlock: 763 spu_release(ctx); 764 out: 765 return count; 766 } 767 768 static __poll_t spufs_ibox_poll(struct file *file, poll_table *wait) 769 { 770 struct spu_context *ctx = file->private_data; 771 __poll_t mask; 772 773 poll_wait(file, &ctx->ibox_wq, wait); 774 775 /* 776 * For now keep this uninterruptible and also ignore the rule 777 * that poll should not sleep. Will be fixed later. 778 */ 779 mutex_lock(&ctx->state_mutex); 780 mask = ctx->ops->mbox_stat_poll(ctx, EPOLLIN | EPOLLRDNORM); 781 spu_release(ctx); 782 783 return mask; 784 } 785 786 static const struct file_operations spufs_ibox_fops = { 787 .open = spufs_pipe_open, 788 .read = spufs_ibox_read, 789 .poll = spufs_ibox_poll, 790 .llseek = no_llseek, 791 }; 792 793 static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf, 794 size_t len, loff_t *pos) 795 { 796 struct spu_context *ctx = file->private_data; 797 ssize_t ret; 798 u32 ibox_stat; 799 800 if (len < 4) 801 return -EINVAL; 802 803 ret = spu_acquire(ctx); 804 if (ret) 805 return ret; 806 ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff; 807 spu_release(ctx); 808 809 if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat)) 810 return -EFAULT; 811 812 return 4; 813 } 814 815 static const struct file_operations spufs_ibox_stat_fops = { 816 .open = spufs_pipe_open, 817 .read = spufs_ibox_stat_read, 818 .llseek = no_llseek, 819 }; 820 821 /* low-level mailbox write */ 822 size_t spu_wbox_write(struct spu_context *ctx, u32 data) 823 { 824 return ctx->ops->wbox_write(ctx, data); 825 } 826 827 /* interrupt-level wbox callback function. */ 828 void spufs_wbox_callback(struct spu *spu) 829 { 830 struct spu_context *ctx = spu->ctx; 831 832 if (ctx) 833 wake_up_all(&ctx->wbox_wq); 834 } 835 836 /* 837 * Write as many bytes to the interrupt mailbox as possible, until 838 * one of the conditions becomes true: 839 * 840 * - the mailbox is full 841 * - end of the user provided buffer 842 * - end of the mapped area 843 * 844 * If the file is opened without O_NONBLOCK, we wait here until 845 * space is available, but return when we have been able to 846 * write something. 847 */ 848 static ssize_t spufs_wbox_write(struct file *file, const char __user *buf, 849 size_t len, loff_t *pos) 850 { 851 struct spu_context *ctx = file->private_data; 852 u32 wbox_data, __user *udata; 853 ssize_t count; 854 855 if (len < 4) 856 return -EINVAL; 857 858 udata = (void __user *)buf; 859 if (!access_ok(VERIFY_READ, buf, len)) 860 return -EFAULT; 861 862 if (__get_user(wbox_data, udata)) 863 return -EFAULT; 864 865 count = spu_acquire(ctx); 866 if (count) 867 goto out; 868 869 /* 870 * make sure we can at least write one element, by waiting 871 * in case of !O_NONBLOCK 872 */ 873 count = 0; 874 if (file->f_flags & O_NONBLOCK) { 875 if (!spu_wbox_write(ctx, wbox_data)) { 876 count = -EAGAIN; 877 goto out_unlock; 878 } 879 } else { 880 count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data)); 881 if (count) 882 goto out; 883 } 884 885 886 /* write as much as possible */ 887 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) { 888 int ret; 889 ret = __get_user(wbox_data, udata); 890 if (ret) 891 break; 892 893 ret = spu_wbox_write(ctx, wbox_data); 894 if (ret == 0) 895 break; 896 } 897 898 out_unlock: 899 spu_release(ctx); 900 out: 901 return count; 902 } 903 904 static __poll_t spufs_wbox_poll(struct file *file, poll_table *wait) 905 { 906 struct spu_context *ctx = file->private_data; 907 __poll_t mask; 908 909 poll_wait(file, &ctx->wbox_wq, wait); 910 911 /* 912 * For now keep this uninterruptible and also ignore the rule 913 * that poll should not sleep. Will be fixed later. 914 */ 915 mutex_lock(&ctx->state_mutex); 916 mask = ctx->ops->mbox_stat_poll(ctx, EPOLLOUT | EPOLLWRNORM); 917 spu_release(ctx); 918 919 return mask; 920 } 921 922 static const struct file_operations spufs_wbox_fops = { 923 .open = spufs_pipe_open, 924 .write = spufs_wbox_write, 925 .poll = spufs_wbox_poll, 926 .llseek = no_llseek, 927 }; 928 929 static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf, 930 size_t len, loff_t *pos) 931 { 932 struct spu_context *ctx = file->private_data; 933 ssize_t ret; 934 u32 wbox_stat; 935 936 if (len < 4) 937 return -EINVAL; 938 939 ret = spu_acquire(ctx); 940 if (ret) 941 return ret; 942 wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff; 943 spu_release(ctx); 944 945 if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat)) 946 return -EFAULT; 947 948 return 4; 949 } 950 951 static const struct file_operations spufs_wbox_stat_fops = { 952 .open = spufs_pipe_open, 953 .read = spufs_wbox_stat_read, 954 .llseek = no_llseek, 955 }; 956 957 static int spufs_signal1_open(struct inode *inode, struct file *file) 958 { 959 struct spufs_inode_info *i = SPUFS_I(inode); 960 struct spu_context *ctx = i->i_ctx; 961 962 mutex_lock(&ctx->mapping_lock); 963 file->private_data = ctx; 964 if (!i->i_openers++) 965 ctx->signal1 = inode->i_mapping; 966 mutex_unlock(&ctx->mapping_lock); 967 return nonseekable_open(inode, file); 968 } 969 970 static int 971 spufs_signal1_release(struct inode *inode, struct file *file) 972 { 973 struct spufs_inode_info *i = SPUFS_I(inode); 974 struct spu_context *ctx = i->i_ctx; 975 976 mutex_lock(&ctx->mapping_lock); 977 if (!--i->i_openers) 978 ctx->signal1 = NULL; 979 mutex_unlock(&ctx->mapping_lock); 980 return 0; 981 } 982 983 static ssize_t __spufs_signal1_read(struct spu_context *ctx, char __user *buf, 984 size_t len, loff_t *pos) 985 { 986 int ret = 0; 987 u32 data; 988 989 if (len < 4) 990 return -EINVAL; 991 992 if (ctx->csa.spu_chnlcnt_RW[3]) { 993 data = ctx->csa.spu_chnldata_RW[3]; 994 ret = 4; 995 } 996 997 if (!ret) 998 goto out; 999 1000 if (copy_to_user(buf, &data, 4)) 1001 return -EFAULT; 1002 1003 out: 1004 return ret; 1005 } 1006 1007 static ssize_t spufs_signal1_read(struct file *file, char __user *buf, 1008 size_t len, loff_t *pos) 1009 { 1010 int ret; 1011 struct spu_context *ctx = file->private_data; 1012 1013 ret = spu_acquire_saved(ctx); 1014 if (ret) 1015 return ret; 1016 ret = __spufs_signal1_read(ctx, buf, len, pos); 1017 spu_release_saved(ctx); 1018 1019 return ret; 1020 } 1021 1022 static ssize_t spufs_signal1_write(struct file *file, const char __user *buf, 1023 size_t len, loff_t *pos) 1024 { 1025 struct spu_context *ctx; 1026 ssize_t ret; 1027 u32 data; 1028 1029 ctx = file->private_data; 1030 1031 if (len < 4) 1032 return -EINVAL; 1033 1034 if (copy_from_user(&data, buf, 4)) 1035 return -EFAULT; 1036 1037 ret = spu_acquire(ctx); 1038 if (ret) 1039 return ret; 1040 ctx->ops->signal1_write(ctx, data); 1041 spu_release(ctx); 1042 1043 return 4; 1044 } 1045 1046 static vm_fault_t 1047 spufs_signal1_mmap_fault(struct vm_fault *vmf) 1048 { 1049 #if SPUFS_SIGNAL_MAP_SIZE == 0x1000 1050 return spufs_ps_fault(vmf, 0x14000, SPUFS_SIGNAL_MAP_SIZE); 1051 #elif SPUFS_SIGNAL_MAP_SIZE == 0x10000 1052 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole 1053 * signal 1 and 2 area 1054 */ 1055 return spufs_ps_fault(vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE); 1056 #else 1057 #error unsupported page size 1058 #endif 1059 } 1060 1061 static const struct vm_operations_struct spufs_signal1_mmap_vmops = { 1062 .fault = spufs_signal1_mmap_fault, 1063 }; 1064 1065 static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma) 1066 { 1067 if (!(vma->vm_flags & VM_SHARED)) 1068 return -EINVAL; 1069 1070 vma->vm_flags |= VM_IO | VM_PFNMAP; 1071 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1072 1073 vma->vm_ops = &spufs_signal1_mmap_vmops; 1074 return 0; 1075 } 1076 1077 static const struct file_operations spufs_signal1_fops = { 1078 .open = spufs_signal1_open, 1079 .release = spufs_signal1_release, 1080 .read = spufs_signal1_read, 1081 .write = spufs_signal1_write, 1082 .mmap = spufs_signal1_mmap, 1083 .llseek = no_llseek, 1084 }; 1085 1086 static const struct file_operations spufs_signal1_nosched_fops = { 1087 .open = spufs_signal1_open, 1088 .release = spufs_signal1_release, 1089 .write = spufs_signal1_write, 1090 .mmap = spufs_signal1_mmap, 1091 .llseek = no_llseek, 1092 }; 1093 1094 static int spufs_signal2_open(struct inode *inode, struct file *file) 1095 { 1096 struct spufs_inode_info *i = SPUFS_I(inode); 1097 struct spu_context *ctx = i->i_ctx; 1098 1099 mutex_lock(&ctx->mapping_lock); 1100 file->private_data = ctx; 1101 if (!i->i_openers++) 1102 ctx->signal2 = inode->i_mapping; 1103 mutex_unlock(&ctx->mapping_lock); 1104 return nonseekable_open(inode, file); 1105 } 1106 1107 static int 1108 spufs_signal2_release(struct inode *inode, struct file *file) 1109 { 1110 struct spufs_inode_info *i = SPUFS_I(inode); 1111 struct spu_context *ctx = i->i_ctx; 1112 1113 mutex_lock(&ctx->mapping_lock); 1114 if (!--i->i_openers) 1115 ctx->signal2 = NULL; 1116 mutex_unlock(&ctx->mapping_lock); 1117 return 0; 1118 } 1119 1120 static ssize_t __spufs_signal2_read(struct spu_context *ctx, char __user *buf, 1121 size_t len, loff_t *pos) 1122 { 1123 int ret = 0; 1124 u32 data; 1125 1126 if (len < 4) 1127 return -EINVAL; 1128 1129 if (ctx->csa.spu_chnlcnt_RW[4]) { 1130 data = ctx->csa.spu_chnldata_RW[4]; 1131 ret = 4; 1132 } 1133 1134 if (!ret) 1135 goto out; 1136 1137 if (copy_to_user(buf, &data, 4)) 1138 return -EFAULT; 1139 1140 out: 1141 return ret; 1142 } 1143 1144 static ssize_t spufs_signal2_read(struct file *file, char __user *buf, 1145 size_t len, loff_t *pos) 1146 { 1147 struct spu_context *ctx = file->private_data; 1148 int ret; 1149 1150 ret = spu_acquire_saved(ctx); 1151 if (ret) 1152 return ret; 1153 ret = __spufs_signal2_read(ctx, buf, len, pos); 1154 spu_release_saved(ctx); 1155 1156 return ret; 1157 } 1158 1159 static ssize_t spufs_signal2_write(struct file *file, const char __user *buf, 1160 size_t len, loff_t *pos) 1161 { 1162 struct spu_context *ctx; 1163 ssize_t ret; 1164 u32 data; 1165 1166 ctx = file->private_data; 1167 1168 if (len < 4) 1169 return -EINVAL; 1170 1171 if (copy_from_user(&data, buf, 4)) 1172 return -EFAULT; 1173 1174 ret = spu_acquire(ctx); 1175 if (ret) 1176 return ret; 1177 ctx->ops->signal2_write(ctx, data); 1178 spu_release(ctx); 1179 1180 return 4; 1181 } 1182 1183 #if SPUFS_MMAP_4K 1184 static vm_fault_t 1185 spufs_signal2_mmap_fault(struct vm_fault *vmf) 1186 { 1187 #if SPUFS_SIGNAL_MAP_SIZE == 0x1000 1188 return spufs_ps_fault(vmf, 0x1c000, SPUFS_SIGNAL_MAP_SIZE); 1189 #elif SPUFS_SIGNAL_MAP_SIZE == 0x10000 1190 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole 1191 * signal 1 and 2 area 1192 */ 1193 return spufs_ps_fault(vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE); 1194 #else 1195 #error unsupported page size 1196 #endif 1197 } 1198 1199 static const struct vm_operations_struct spufs_signal2_mmap_vmops = { 1200 .fault = spufs_signal2_mmap_fault, 1201 }; 1202 1203 static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma) 1204 { 1205 if (!(vma->vm_flags & VM_SHARED)) 1206 return -EINVAL; 1207 1208 vma->vm_flags |= VM_IO | VM_PFNMAP; 1209 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1210 1211 vma->vm_ops = &spufs_signal2_mmap_vmops; 1212 return 0; 1213 } 1214 #else /* SPUFS_MMAP_4K */ 1215 #define spufs_signal2_mmap NULL 1216 #endif /* !SPUFS_MMAP_4K */ 1217 1218 static const struct file_operations spufs_signal2_fops = { 1219 .open = spufs_signal2_open, 1220 .release = spufs_signal2_release, 1221 .read = spufs_signal2_read, 1222 .write = spufs_signal2_write, 1223 .mmap = spufs_signal2_mmap, 1224 .llseek = no_llseek, 1225 }; 1226 1227 static const struct file_operations spufs_signal2_nosched_fops = { 1228 .open = spufs_signal2_open, 1229 .release = spufs_signal2_release, 1230 .write = spufs_signal2_write, 1231 .mmap = spufs_signal2_mmap, 1232 .llseek = no_llseek, 1233 }; 1234 1235 /* 1236 * This is a wrapper around DEFINE_SIMPLE_ATTRIBUTE which does the 1237 * work of acquiring (or not) the SPU context before calling through 1238 * to the actual get routine. The set routine is called directly. 1239 */ 1240 #define SPU_ATTR_NOACQUIRE 0 1241 #define SPU_ATTR_ACQUIRE 1 1242 #define SPU_ATTR_ACQUIRE_SAVED 2 1243 1244 #define DEFINE_SPUFS_ATTRIBUTE(__name, __get, __set, __fmt, __acquire) \ 1245 static int __##__get(void *data, u64 *val) \ 1246 { \ 1247 struct spu_context *ctx = data; \ 1248 int ret = 0; \ 1249 \ 1250 if (__acquire == SPU_ATTR_ACQUIRE) { \ 1251 ret = spu_acquire(ctx); \ 1252 if (ret) \ 1253 return ret; \ 1254 *val = __get(ctx); \ 1255 spu_release(ctx); \ 1256 } else if (__acquire == SPU_ATTR_ACQUIRE_SAVED) { \ 1257 ret = spu_acquire_saved(ctx); \ 1258 if (ret) \ 1259 return ret; \ 1260 *val = __get(ctx); \ 1261 spu_release_saved(ctx); \ 1262 } else \ 1263 *val = __get(ctx); \ 1264 \ 1265 return 0; \ 1266 } \ 1267 DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__name, __##__get, __set, __fmt); 1268 1269 static int spufs_signal1_type_set(void *data, u64 val) 1270 { 1271 struct spu_context *ctx = data; 1272 int ret; 1273 1274 ret = spu_acquire(ctx); 1275 if (ret) 1276 return ret; 1277 ctx->ops->signal1_type_set(ctx, val); 1278 spu_release(ctx); 1279 1280 return 0; 1281 } 1282 1283 static u64 spufs_signal1_type_get(struct spu_context *ctx) 1284 { 1285 return ctx->ops->signal1_type_get(ctx); 1286 } 1287 DEFINE_SPUFS_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get, 1288 spufs_signal1_type_set, "%llu\n", SPU_ATTR_ACQUIRE); 1289 1290 1291 static int spufs_signal2_type_set(void *data, u64 val) 1292 { 1293 struct spu_context *ctx = data; 1294 int ret; 1295 1296 ret = spu_acquire(ctx); 1297 if (ret) 1298 return ret; 1299 ctx->ops->signal2_type_set(ctx, val); 1300 spu_release(ctx); 1301 1302 return 0; 1303 } 1304 1305 static u64 spufs_signal2_type_get(struct spu_context *ctx) 1306 { 1307 return ctx->ops->signal2_type_get(ctx); 1308 } 1309 DEFINE_SPUFS_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get, 1310 spufs_signal2_type_set, "%llu\n", SPU_ATTR_ACQUIRE); 1311 1312 #if SPUFS_MMAP_4K 1313 static vm_fault_t 1314 spufs_mss_mmap_fault(struct vm_fault *vmf) 1315 { 1316 return spufs_ps_fault(vmf, 0x0000, SPUFS_MSS_MAP_SIZE); 1317 } 1318 1319 static const struct vm_operations_struct spufs_mss_mmap_vmops = { 1320 .fault = spufs_mss_mmap_fault, 1321 }; 1322 1323 /* 1324 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff]. 1325 */ 1326 static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma) 1327 { 1328 if (!(vma->vm_flags & VM_SHARED)) 1329 return -EINVAL; 1330 1331 vma->vm_flags |= VM_IO | VM_PFNMAP; 1332 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1333 1334 vma->vm_ops = &spufs_mss_mmap_vmops; 1335 return 0; 1336 } 1337 #else /* SPUFS_MMAP_4K */ 1338 #define spufs_mss_mmap NULL 1339 #endif /* !SPUFS_MMAP_4K */ 1340 1341 static int spufs_mss_open(struct inode *inode, struct file *file) 1342 { 1343 struct spufs_inode_info *i = SPUFS_I(inode); 1344 struct spu_context *ctx = i->i_ctx; 1345 1346 file->private_data = i->i_ctx; 1347 1348 mutex_lock(&ctx->mapping_lock); 1349 if (!i->i_openers++) 1350 ctx->mss = inode->i_mapping; 1351 mutex_unlock(&ctx->mapping_lock); 1352 return nonseekable_open(inode, file); 1353 } 1354 1355 static int 1356 spufs_mss_release(struct inode *inode, struct file *file) 1357 { 1358 struct spufs_inode_info *i = SPUFS_I(inode); 1359 struct spu_context *ctx = i->i_ctx; 1360 1361 mutex_lock(&ctx->mapping_lock); 1362 if (!--i->i_openers) 1363 ctx->mss = NULL; 1364 mutex_unlock(&ctx->mapping_lock); 1365 return 0; 1366 } 1367 1368 static const struct file_operations spufs_mss_fops = { 1369 .open = spufs_mss_open, 1370 .release = spufs_mss_release, 1371 .mmap = spufs_mss_mmap, 1372 .llseek = no_llseek, 1373 }; 1374 1375 static vm_fault_t 1376 spufs_psmap_mmap_fault(struct vm_fault *vmf) 1377 { 1378 return spufs_ps_fault(vmf, 0x0000, SPUFS_PS_MAP_SIZE); 1379 } 1380 1381 static const struct vm_operations_struct spufs_psmap_mmap_vmops = { 1382 .fault = spufs_psmap_mmap_fault, 1383 }; 1384 1385 /* 1386 * mmap support for full problem state area [0x00000 - 0x1ffff]. 1387 */ 1388 static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma) 1389 { 1390 if (!(vma->vm_flags & VM_SHARED)) 1391 return -EINVAL; 1392 1393 vma->vm_flags |= VM_IO | VM_PFNMAP; 1394 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1395 1396 vma->vm_ops = &spufs_psmap_mmap_vmops; 1397 return 0; 1398 } 1399 1400 static int spufs_psmap_open(struct inode *inode, struct file *file) 1401 { 1402 struct spufs_inode_info *i = SPUFS_I(inode); 1403 struct spu_context *ctx = i->i_ctx; 1404 1405 mutex_lock(&ctx->mapping_lock); 1406 file->private_data = i->i_ctx; 1407 if (!i->i_openers++) 1408 ctx->psmap = inode->i_mapping; 1409 mutex_unlock(&ctx->mapping_lock); 1410 return nonseekable_open(inode, file); 1411 } 1412 1413 static int 1414 spufs_psmap_release(struct inode *inode, struct file *file) 1415 { 1416 struct spufs_inode_info *i = SPUFS_I(inode); 1417 struct spu_context *ctx = i->i_ctx; 1418 1419 mutex_lock(&ctx->mapping_lock); 1420 if (!--i->i_openers) 1421 ctx->psmap = NULL; 1422 mutex_unlock(&ctx->mapping_lock); 1423 return 0; 1424 } 1425 1426 static const struct file_operations spufs_psmap_fops = { 1427 .open = spufs_psmap_open, 1428 .release = spufs_psmap_release, 1429 .mmap = spufs_psmap_mmap, 1430 .llseek = no_llseek, 1431 }; 1432 1433 1434 #if SPUFS_MMAP_4K 1435 static vm_fault_t 1436 spufs_mfc_mmap_fault(struct vm_fault *vmf) 1437 { 1438 return spufs_ps_fault(vmf, 0x3000, SPUFS_MFC_MAP_SIZE); 1439 } 1440 1441 static const struct vm_operations_struct spufs_mfc_mmap_vmops = { 1442 .fault = spufs_mfc_mmap_fault, 1443 }; 1444 1445 /* 1446 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff]. 1447 */ 1448 static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma) 1449 { 1450 if (!(vma->vm_flags & VM_SHARED)) 1451 return -EINVAL; 1452 1453 vma->vm_flags |= VM_IO | VM_PFNMAP; 1454 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1455 1456 vma->vm_ops = &spufs_mfc_mmap_vmops; 1457 return 0; 1458 } 1459 #else /* SPUFS_MMAP_4K */ 1460 #define spufs_mfc_mmap NULL 1461 #endif /* !SPUFS_MMAP_4K */ 1462 1463 static int spufs_mfc_open(struct inode *inode, struct file *file) 1464 { 1465 struct spufs_inode_info *i = SPUFS_I(inode); 1466 struct spu_context *ctx = i->i_ctx; 1467 1468 /* we don't want to deal with DMA into other processes */ 1469 if (ctx->owner != current->mm) 1470 return -EINVAL; 1471 1472 if (atomic_read(&inode->i_count) != 1) 1473 return -EBUSY; 1474 1475 mutex_lock(&ctx->mapping_lock); 1476 file->private_data = ctx; 1477 if (!i->i_openers++) 1478 ctx->mfc = inode->i_mapping; 1479 mutex_unlock(&ctx->mapping_lock); 1480 return nonseekable_open(inode, file); 1481 } 1482 1483 static int 1484 spufs_mfc_release(struct inode *inode, struct file *file) 1485 { 1486 struct spufs_inode_info *i = SPUFS_I(inode); 1487 struct spu_context *ctx = i->i_ctx; 1488 1489 mutex_lock(&ctx->mapping_lock); 1490 if (!--i->i_openers) 1491 ctx->mfc = NULL; 1492 mutex_unlock(&ctx->mapping_lock); 1493 return 0; 1494 } 1495 1496 /* interrupt-level mfc callback function. */ 1497 void spufs_mfc_callback(struct spu *spu) 1498 { 1499 struct spu_context *ctx = spu->ctx; 1500 1501 if (ctx) 1502 wake_up_all(&ctx->mfc_wq); 1503 } 1504 1505 static int spufs_read_mfc_tagstatus(struct spu_context *ctx, u32 *status) 1506 { 1507 /* See if there is one tag group is complete */ 1508 /* FIXME we need locking around tagwait */ 1509 *status = ctx->ops->read_mfc_tagstatus(ctx) & ctx->tagwait; 1510 ctx->tagwait &= ~*status; 1511 if (*status) 1512 return 1; 1513 1514 /* enable interrupt waiting for any tag group, 1515 may silently fail if interrupts are already enabled */ 1516 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1); 1517 return 0; 1518 } 1519 1520 static ssize_t spufs_mfc_read(struct file *file, char __user *buffer, 1521 size_t size, loff_t *pos) 1522 { 1523 struct spu_context *ctx = file->private_data; 1524 int ret = -EINVAL; 1525 u32 status; 1526 1527 if (size != 4) 1528 goto out; 1529 1530 ret = spu_acquire(ctx); 1531 if (ret) 1532 return ret; 1533 1534 ret = -EINVAL; 1535 if (file->f_flags & O_NONBLOCK) { 1536 status = ctx->ops->read_mfc_tagstatus(ctx); 1537 if (!(status & ctx->tagwait)) 1538 ret = -EAGAIN; 1539 else 1540 /* XXX(hch): shouldn't we clear ret here? */ 1541 ctx->tagwait &= ~status; 1542 } else { 1543 ret = spufs_wait(ctx->mfc_wq, 1544 spufs_read_mfc_tagstatus(ctx, &status)); 1545 if (ret) 1546 goto out; 1547 } 1548 spu_release(ctx); 1549 1550 ret = 4; 1551 if (copy_to_user(buffer, &status, 4)) 1552 ret = -EFAULT; 1553 1554 out: 1555 return ret; 1556 } 1557 1558 static int spufs_check_valid_dma(struct mfc_dma_command *cmd) 1559 { 1560 pr_debug("queueing DMA %x %llx %x %x %x\n", cmd->lsa, 1561 cmd->ea, cmd->size, cmd->tag, cmd->cmd); 1562 1563 switch (cmd->cmd) { 1564 case MFC_PUT_CMD: 1565 case MFC_PUTF_CMD: 1566 case MFC_PUTB_CMD: 1567 case MFC_GET_CMD: 1568 case MFC_GETF_CMD: 1569 case MFC_GETB_CMD: 1570 break; 1571 default: 1572 pr_debug("invalid DMA opcode %x\n", cmd->cmd); 1573 return -EIO; 1574 } 1575 1576 if ((cmd->lsa & 0xf) != (cmd->ea &0xf)) { 1577 pr_debug("invalid DMA alignment, ea %llx lsa %x\n", 1578 cmd->ea, cmd->lsa); 1579 return -EIO; 1580 } 1581 1582 switch (cmd->size & 0xf) { 1583 case 1: 1584 break; 1585 case 2: 1586 if (cmd->lsa & 1) 1587 goto error; 1588 break; 1589 case 4: 1590 if (cmd->lsa & 3) 1591 goto error; 1592 break; 1593 case 8: 1594 if (cmd->lsa & 7) 1595 goto error; 1596 break; 1597 case 0: 1598 if (cmd->lsa & 15) 1599 goto error; 1600 break; 1601 error: 1602 default: 1603 pr_debug("invalid DMA alignment %x for size %x\n", 1604 cmd->lsa & 0xf, cmd->size); 1605 return -EIO; 1606 } 1607 1608 if (cmd->size > 16 * 1024) { 1609 pr_debug("invalid DMA size %x\n", cmd->size); 1610 return -EIO; 1611 } 1612 1613 if (cmd->tag & 0xfff0) { 1614 /* we reserve the higher tag numbers for kernel use */ 1615 pr_debug("invalid DMA tag\n"); 1616 return -EIO; 1617 } 1618 1619 if (cmd->class) { 1620 /* not supported in this version */ 1621 pr_debug("invalid DMA class\n"); 1622 return -EIO; 1623 } 1624 1625 return 0; 1626 } 1627 1628 static int spu_send_mfc_command(struct spu_context *ctx, 1629 struct mfc_dma_command cmd, 1630 int *error) 1631 { 1632 *error = ctx->ops->send_mfc_command(ctx, &cmd); 1633 if (*error == -EAGAIN) { 1634 /* wait for any tag group to complete 1635 so we have space for the new command */ 1636 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1); 1637 /* try again, because the queue might be 1638 empty again */ 1639 *error = ctx->ops->send_mfc_command(ctx, &cmd); 1640 if (*error == -EAGAIN) 1641 return 0; 1642 } 1643 return 1; 1644 } 1645 1646 static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer, 1647 size_t size, loff_t *pos) 1648 { 1649 struct spu_context *ctx = file->private_data; 1650 struct mfc_dma_command cmd; 1651 int ret = -EINVAL; 1652 1653 if (size != sizeof cmd) 1654 goto out; 1655 1656 ret = -EFAULT; 1657 if (copy_from_user(&cmd, buffer, sizeof cmd)) 1658 goto out; 1659 1660 ret = spufs_check_valid_dma(&cmd); 1661 if (ret) 1662 goto out; 1663 1664 ret = spu_acquire(ctx); 1665 if (ret) 1666 goto out; 1667 1668 ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE); 1669 if (ret) 1670 goto out; 1671 1672 if (file->f_flags & O_NONBLOCK) { 1673 ret = ctx->ops->send_mfc_command(ctx, &cmd); 1674 } else { 1675 int status; 1676 ret = spufs_wait(ctx->mfc_wq, 1677 spu_send_mfc_command(ctx, cmd, &status)); 1678 if (ret) 1679 goto out; 1680 if (status) 1681 ret = status; 1682 } 1683 1684 if (ret) 1685 goto out_unlock; 1686 1687 ctx->tagwait |= 1 << cmd.tag; 1688 ret = size; 1689 1690 out_unlock: 1691 spu_release(ctx); 1692 out: 1693 return ret; 1694 } 1695 1696 static __poll_t spufs_mfc_poll(struct file *file,poll_table *wait) 1697 { 1698 struct spu_context *ctx = file->private_data; 1699 u32 free_elements, tagstatus; 1700 __poll_t mask; 1701 1702 poll_wait(file, &ctx->mfc_wq, wait); 1703 1704 /* 1705 * For now keep this uninterruptible and also ignore the rule 1706 * that poll should not sleep. Will be fixed later. 1707 */ 1708 mutex_lock(&ctx->state_mutex); 1709 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2); 1710 free_elements = ctx->ops->get_mfc_free_elements(ctx); 1711 tagstatus = ctx->ops->read_mfc_tagstatus(ctx); 1712 spu_release(ctx); 1713 1714 mask = 0; 1715 if (free_elements & 0xffff) 1716 mask |= EPOLLOUT | EPOLLWRNORM; 1717 if (tagstatus & ctx->tagwait) 1718 mask |= EPOLLIN | EPOLLRDNORM; 1719 1720 pr_debug("%s: free %d tagstatus %d tagwait %d\n", __func__, 1721 free_elements, tagstatus, ctx->tagwait); 1722 1723 return mask; 1724 } 1725 1726 static int spufs_mfc_flush(struct file *file, fl_owner_t id) 1727 { 1728 struct spu_context *ctx = file->private_data; 1729 int ret; 1730 1731 ret = spu_acquire(ctx); 1732 if (ret) 1733 goto out; 1734 #if 0 1735 /* this currently hangs */ 1736 ret = spufs_wait(ctx->mfc_wq, 1737 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2)); 1738 if (ret) 1739 goto out; 1740 ret = spufs_wait(ctx->mfc_wq, 1741 ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait); 1742 if (ret) 1743 goto out; 1744 #else 1745 ret = 0; 1746 #endif 1747 spu_release(ctx); 1748 out: 1749 return ret; 1750 } 1751 1752 static int spufs_mfc_fsync(struct file *file, loff_t start, loff_t end, int datasync) 1753 { 1754 struct inode *inode = file_inode(file); 1755 int err = file_write_and_wait_range(file, start, end); 1756 if (!err) { 1757 inode_lock(inode); 1758 err = spufs_mfc_flush(file, NULL); 1759 inode_unlock(inode); 1760 } 1761 return err; 1762 } 1763 1764 static const struct file_operations spufs_mfc_fops = { 1765 .open = spufs_mfc_open, 1766 .release = spufs_mfc_release, 1767 .read = spufs_mfc_read, 1768 .write = spufs_mfc_write, 1769 .poll = spufs_mfc_poll, 1770 .flush = spufs_mfc_flush, 1771 .fsync = spufs_mfc_fsync, 1772 .mmap = spufs_mfc_mmap, 1773 .llseek = no_llseek, 1774 }; 1775 1776 static int spufs_npc_set(void *data, u64 val) 1777 { 1778 struct spu_context *ctx = data; 1779 int ret; 1780 1781 ret = spu_acquire(ctx); 1782 if (ret) 1783 return ret; 1784 ctx->ops->npc_write(ctx, val); 1785 spu_release(ctx); 1786 1787 return 0; 1788 } 1789 1790 static u64 spufs_npc_get(struct spu_context *ctx) 1791 { 1792 return ctx->ops->npc_read(ctx); 1793 } 1794 DEFINE_SPUFS_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set, 1795 "0x%llx\n", SPU_ATTR_ACQUIRE); 1796 1797 static int spufs_decr_set(void *data, u64 val) 1798 { 1799 struct spu_context *ctx = data; 1800 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1801 int ret; 1802 1803 ret = spu_acquire_saved(ctx); 1804 if (ret) 1805 return ret; 1806 lscsa->decr.slot[0] = (u32) val; 1807 spu_release_saved(ctx); 1808 1809 return 0; 1810 } 1811 1812 static u64 spufs_decr_get(struct spu_context *ctx) 1813 { 1814 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1815 return lscsa->decr.slot[0]; 1816 } 1817 DEFINE_SPUFS_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set, 1818 "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED); 1819 1820 static int spufs_decr_status_set(void *data, u64 val) 1821 { 1822 struct spu_context *ctx = data; 1823 int ret; 1824 1825 ret = spu_acquire_saved(ctx); 1826 if (ret) 1827 return ret; 1828 if (val) 1829 ctx->csa.priv2.mfc_control_RW |= MFC_CNTL_DECREMENTER_RUNNING; 1830 else 1831 ctx->csa.priv2.mfc_control_RW &= ~MFC_CNTL_DECREMENTER_RUNNING; 1832 spu_release_saved(ctx); 1833 1834 return 0; 1835 } 1836 1837 static u64 spufs_decr_status_get(struct spu_context *ctx) 1838 { 1839 if (ctx->csa.priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING) 1840 return SPU_DECR_STATUS_RUNNING; 1841 else 1842 return 0; 1843 } 1844 DEFINE_SPUFS_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get, 1845 spufs_decr_status_set, "0x%llx\n", 1846 SPU_ATTR_ACQUIRE_SAVED); 1847 1848 static int spufs_event_mask_set(void *data, u64 val) 1849 { 1850 struct spu_context *ctx = data; 1851 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1852 int ret; 1853 1854 ret = spu_acquire_saved(ctx); 1855 if (ret) 1856 return ret; 1857 lscsa->event_mask.slot[0] = (u32) val; 1858 spu_release_saved(ctx); 1859 1860 return 0; 1861 } 1862 1863 static u64 spufs_event_mask_get(struct spu_context *ctx) 1864 { 1865 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1866 return lscsa->event_mask.slot[0]; 1867 } 1868 1869 DEFINE_SPUFS_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get, 1870 spufs_event_mask_set, "0x%llx\n", 1871 SPU_ATTR_ACQUIRE_SAVED); 1872 1873 static u64 spufs_event_status_get(struct spu_context *ctx) 1874 { 1875 struct spu_state *state = &ctx->csa; 1876 u64 stat; 1877 stat = state->spu_chnlcnt_RW[0]; 1878 if (stat) 1879 return state->spu_chnldata_RW[0]; 1880 return 0; 1881 } 1882 DEFINE_SPUFS_ATTRIBUTE(spufs_event_status_ops, spufs_event_status_get, 1883 NULL, "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED) 1884 1885 static int spufs_srr0_set(void *data, u64 val) 1886 { 1887 struct spu_context *ctx = data; 1888 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1889 int ret; 1890 1891 ret = spu_acquire_saved(ctx); 1892 if (ret) 1893 return ret; 1894 lscsa->srr0.slot[0] = (u32) val; 1895 spu_release_saved(ctx); 1896 1897 return 0; 1898 } 1899 1900 static u64 spufs_srr0_get(struct spu_context *ctx) 1901 { 1902 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1903 return lscsa->srr0.slot[0]; 1904 } 1905 DEFINE_SPUFS_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set, 1906 "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED) 1907 1908 static u64 spufs_id_get(struct spu_context *ctx) 1909 { 1910 u64 num; 1911 1912 if (ctx->state == SPU_STATE_RUNNABLE) 1913 num = ctx->spu->number; 1914 else 1915 num = (unsigned int)-1; 1916 1917 return num; 1918 } 1919 DEFINE_SPUFS_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n", 1920 SPU_ATTR_ACQUIRE) 1921 1922 static u64 spufs_object_id_get(struct spu_context *ctx) 1923 { 1924 /* FIXME: Should there really be no locking here? */ 1925 return ctx->object_id; 1926 } 1927 1928 static int spufs_object_id_set(void *data, u64 id) 1929 { 1930 struct spu_context *ctx = data; 1931 ctx->object_id = id; 1932 1933 return 0; 1934 } 1935 1936 DEFINE_SPUFS_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get, 1937 spufs_object_id_set, "0x%llx\n", SPU_ATTR_NOACQUIRE); 1938 1939 static u64 spufs_lslr_get(struct spu_context *ctx) 1940 { 1941 return ctx->csa.priv2.spu_lslr_RW; 1942 } 1943 DEFINE_SPUFS_ATTRIBUTE(spufs_lslr_ops, spufs_lslr_get, NULL, "0x%llx\n", 1944 SPU_ATTR_ACQUIRE_SAVED); 1945 1946 static int spufs_info_open(struct inode *inode, struct file *file) 1947 { 1948 struct spufs_inode_info *i = SPUFS_I(inode); 1949 struct spu_context *ctx = i->i_ctx; 1950 file->private_data = ctx; 1951 return 0; 1952 } 1953 1954 static int spufs_caps_show(struct seq_file *s, void *private) 1955 { 1956 struct spu_context *ctx = s->private; 1957 1958 if (!(ctx->flags & SPU_CREATE_NOSCHED)) 1959 seq_puts(s, "sched\n"); 1960 if (!(ctx->flags & SPU_CREATE_ISOLATE)) 1961 seq_puts(s, "step\n"); 1962 return 0; 1963 } 1964 1965 static int spufs_caps_open(struct inode *inode, struct file *file) 1966 { 1967 return single_open(file, spufs_caps_show, SPUFS_I(inode)->i_ctx); 1968 } 1969 1970 static const struct file_operations spufs_caps_fops = { 1971 .open = spufs_caps_open, 1972 .read = seq_read, 1973 .llseek = seq_lseek, 1974 .release = single_release, 1975 }; 1976 1977 static ssize_t __spufs_mbox_info_read(struct spu_context *ctx, 1978 char __user *buf, size_t len, loff_t *pos) 1979 { 1980 u32 data; 1981 1982 /* EOF if there's no entry in the mbox */ 1983 if (!(ctx->csa.prob.mb_stat_R & 0x0000ff)) 1984 return 0; 1985 1986 data = ctx->csa.prob.pu_mb_R; 1987 1988 return simple_read_from_buffer(buf, len, pos, &data, sizeof data); 1989 } 1990 1991 static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf, 1992 size_t len, loff_t *pos) 1993 { 1994 int ret; 1995 struct spu_context *ctx = file->private_data; 1996 1997 if (!access_ok(VERIFY_WRITE, buf, len)) 1998 return -EFAULT; 1999 2000 ret = spu_acquire_saved(ctx); 2001 if (ret) 2002 return ret; 2003 spin_lock(&ctx->csa.register_lock); 2004 ret = __spufs_mbox_info_read(ctx, buf, len, pos); 2005 spin_unlock(&ctx->csa.register_lock); 2006 spu_release_saved(ctx); 2007 2008 return ret; 2009 } 2010 2011 static const struct file_operations spufs_mbox_info_fops = { 2012 .open = spufs_info_open, 2013 .read = spufs_mbox_info_read, 2014 .llseek = generic_file_llseek, 2015 }; 2016 2017 static ssize_t __spufs_ibox_info_read(struct spu_context *ctx, 2018 char __user *buf, size_t len, loff_t *pos) 2019 { 2020 u32 data; 2021 2022 /* EOF if there's no entry in the ibox */ 2023 if (!(ctx->csa.prob.mb_stat_R & 0xff0000)) 2024 return 0; 2025 2026 data = ctx->csa.priv2.puint_mb_R; 2027 2028 return simple_read_from_buffer(buf, len, pos, &data, sizeof data); 2029 } 2030 2031 static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf, 2032 size_t len, loff_t *pos) 2033 { 2034 struct spu_context *ctx = file->private_data; 2035 int ret; 2036 2037 if (!access_ok(VERIFY_WRITE, buf, len)) 2038 return -EFAULT; 2039 2040 ret = spu_acquire_saved(ctx); 2041 if (ret) 2042 return ret; 2043 spin_lock(&ctx->csa.register_lock); 2044 ret = __spufs_ibox_info_read(ctx, buf, len, pos); 2045 spin_unlock(&ctx->csa.register_lock); 2046 spu_release_saved(ctx); 2047 2048 return ret; 2049 } 2050 2051 static const struct file_operations spufs_ibox_info_fops = { 2052 .open = spufs_info_open, 2053 .read = spufs_ibox_info_read, 2054 .llseek = generic_file_llseek, 2055 }; 2056 2057 static ssize_t __spufs_wbox_info_read(struct spu_context *ctx, 2058 char __user *buf, size_t len, loff_t *pos) 2059 { 2060 int i, cnt; 2061 u32 data[4]; 2062 u32 wbox_stat; 2063 2064 wbox_stat = ctx->csa.prob.mb_stat_R; 2065 cnt = 4 - ((wbox_stat & 0x00ff00) >> 8); 2066 for (i = 0; i < cnt; i++) { 2067 data[i] = ctx->csa.spu_mailbox_data[i]; 2068 } 2069 2070 return simple_read_from_buffer(buf, len, pos, &data, 2071 cnt * sizeof(u32)); 2072 } 2073 2074 static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf, 2075 size_t len, loff_t *pos) 2076 { 2077 struct spu_context *ctx = file->private_data; 2078 int ret; 2079 2080 if (!access_ok(VERIFY_WRITE, buf, len)) 2081 return -EFAULT; 2082 2083 ret = spu_acquire_saved(ctx); 2084 if (ret) 2085 return ret; 2086 spin_lock(&ctx->csa.register_lock); 2087 ret = __spufs_wbox_info_read(ctx, buf, len, pos); 2088 spin_unlock(&ctx->csa.register_lock); 2089 spu_release_saved(ctx); 2090 2091 return ret; 2092 } 2093 2094 static const struct file_operations spufs_wbox_info_fops = { 2095 .open = spufs_info_open, 2096 .read = spufs_wbox_info_read, 2097 .llseek = generic_file_llseek, 2098 }; 2099 2100 static ssize_t __spufs_dma_info_read(struct spu_context *ctx, 2101 char __user *buf, size_t len, loff_t *pos) 2102 { 2103 struct spu_dma_info info; 2104 struct mfc_cq_sr *qp, *spuqp; 2105 int i; 2106 2107 info.dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW; 2108 info.dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0]; 2109 info.dma_info_status = ctx->csa.spu_chnldata_RW[24]; 2110 info.dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25]; 2111 info.dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27]; 2112 for (i = 0; i < 16; i++) { 2113 qp = &info.dma_info_command_data[i]; 2114 spuqp = &ctx->csa.priv2.spuq[i]; 2115 2116 qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW; 2117 qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW; 2118 qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW; 2119 qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW; 2120 } 2121 2122 return simple_read_from_buffer(buf, len, pos, &info, 2123 sizeof info); 2124 } 2125 2126 static ssize_t spufs_dma_info_read(struct file *file, char __user *buf, 2127 size_t len, loff_t *pos) 2128 { 2129 struct spu_context *ctx = file->private_data; 2130 int ret; 2131 2132 if (!access_ok(VERIFY_WRITE, buf, len)) 2133 return -EFAULT; 2134 2135 ret = spu_acquire_saved(ctx); 2136 if (ret) 2137 return ret; 2138 spin_lock(&ctx->csa.register_lock); 2139 ret = __spufs_dma_info_read(ctx, buf, len, pos); 2140 spin_unlock(&ctx->csa.register_lock); 2141 spu_release_saved(ctx); 2142 2143 return ret; 2144 } 2145 2146 static const struct file_operations spufs_dma_info_fops = { 2147 .open = spufs_info_open, 2148 .read = spufs_dma_info_read, 2149 .llseek = no_llseek, 2150 }; 2151 2152 static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx, 2153 char __user *buf, size_t len, loff_t *pos) 2154 { 2155 struct spu_proxydma_info info; 2156 struct mfc_cq_sr *qp, *puqp; 2157 int ret = sizeof info; 2158 int i; 2159 2160 if (len < ret) 2161 return -EINVAL; 2162 2163 if (!access_ok(VERIFY_WRITE, buf, len)) 2164 return -EFAULT; 2165 2166 info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW; 2167 info.proxydma_info_mask = ctx->csa.prob.dma_querymask_RW; 2168 info.proxydma_info_status = ctx->csa.prob.dma_tagstatus_R; 2169 for (i = 0; i < 8; i++) { 2170 qp = &info.proxydma_info_command_data[i]; 2171 puqp = &ctx->csa.priv2.puq[i]; 2172 2173 qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW; 2174 qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW; 2175 qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW; 2176 qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW; 2177 } 2178 2179 return simple_read_from_buffer(buf, len, pos, &info, 2180 sizeof info); 2181 } 2182 2183 static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf, 2184 size_t len, loff_t *pos) 2185 { 2186 struct spu_context *ctx = file->private_data; 2187 int ret; 2188 2189 ret = spu_acquire_saved(ctx); 2190 if (ret) 2191 return ret; 2192 spin_lock(&ctx->csa.register_lock); 2193 ret = __spufs_proxydma_info_read(ctx, buf, len, pos); 2194 spin_unlock(&ctx->csa.register_lock); 2195 spu_release_saved(ctx); 2196 2197 return ret; 2198 } 2199 2200 static const struct file_operations spufs_proxydma_info_fops = { 2201 .open = spufs_info_open, 2202 .read = spufs_proxydma_info_read, 2203 .llseek = no_llseek, 2204 }; 2205 2206 static int spufs_show_tid(struct seq_file *s, void *private) 2207 { 2208 struct spu_context *ctx = s->private; 2209 2210 seq_printf(s, "%d\n", ctx->tid); 2211 return 0; 2212 } 2213 2214 static int spufs_tid_open(struct inode *inode, struct file *file) 2215 { 2216 return single_open(file, spufs_show_tid, SPUFS_I(inode)->i_ctx); 2217 } 2218 2219 static const struct file_operations spufs_tid_fops = { 2220 .open = spufs_tid_open, 2221 .read = seq_read, 2222 .llseek = seq_lseek, 2223 .release = single_release, 2224 }; 2225 2226 static const char *ctx_state_names[] = { 2227 "user", "system", "iowait", "loaded" 2228 }; 2229 2230 static unsigned long long spufs_acct_time(struct spu_context *ctx, 2231 enum spu_utilization_state state) 2232 { 2233 unsigned long long time = ctx->stats.times[state]; 2234 2235 /* 2236 * In general, utilization statistics are updated by the controlling 2237 * thread as the spu context moves through various well defined 2238 * state transitions, but if the context is lazily loaded its 2239 * utilization statistics are not updated as the controlling thread 2240 * is not tightly coupled with the execution of the spu context. We 2241 * calculate and apply the time delta from the last recorded state 2242 * of the spu context. 2243 */ 2244 if (ctx->spu && ctx->stats.util_state == state) { 2245 time += ktime_get_ns() - ctx->stats.tstamp; 2246 } 2247 2248 return time / NSEC_PER_MSEC; 2249 } 2250 2251 static unsigned long long spufs_slb_flts(struct spu_context *ctx) 2252 { 2253 unsigned long long slb_flts = ctx->stats.slb_flt; 2254 2255 if (ctx->state == SPU_STATE_RUNNABLE) { 2256 slb_flts += (ctx->spu->stats.slb_flt - 2257 ctx->stats.slb_flt_base); 2258 } 2259 2260 return slb_flts; 2261 } 2262 2263 static unsigned long long spufs_class2_intrs(struct spu_context *ctx) 2264 { 2265 unsigned long long class2_intrs = ctx->stats.class2_intr; 2266 2267 if (ctx->state == SPU_STATE_RUNNABLE) { 2268 class2_intrs += (ctx->spu->stats.class2_intr - 2269 ctx->stats.class2_intr_base); 2270 } 2271 2272 return class2_intrs; 2273 } 2274 2275 2276 static int spufs_show_stat(struct seq_file *s, void *private) 2277 { 2278 struct spu_context *ctx = s->private; 2279 int ret; 2280 2281 ret = spu_acquire(ctx); 2282 if (ret) 2283 return ret; 2284 2285 seq_printf(s, "%s %llu %llu %llu %llu " 2286 "%llu %llu %llu %llu %llu %llu %llu %llu\n", 2287 ctx_state_names[ctx->stats.util_state], 2288 spufs_acct_time(ctx, SPU_UTIL_USER), 2289 spufs_acct_time(ctx, SPU_UTIL_SYSTEM), 2290 spufs_acct_time(ctx, SPU_UTIL_IOWAIT), 2291 spufs_acct_time(ctx, SPU_UTIL_IDLE_LOADED), 2292 ctx->stats.vol_ctx_switch, 2293 ctx->stats.invol_ctx_switch, 2294 spufs_slb_flts(ctx), 2295 ctx->stats.hash_flt, 2296 ctx->stats.min_flt, 2297 ctx->stats.maj_flt, 2298 spufs_class2_intrs(ctx), 2299 ctx->stats.libassist); 2300 spu_release(ctx); 2301 return 0; 2302 } 2303 2304 static int spufs_stat_open(struct inode *inode, struct file *file) 2305 { 2306 return single_open(file, spufs_show_stat, SPUFS_I(inode)->i_ctx); 2307 } 2308 2309 static const struct file_operations spufs_stat_fops = { 2310 .open = spufs_stat_open, 2311 .read = seq_read, 2312 .llseek = seq_lseek, 2313 .release = single_release, 2314 }; 2315 2316 static inline int spufs_switch_log_used(struct spu_context *ctx) 2317 { 2318 return (ctx->switch_log->head - ctx->switch_log->tail) % 2319 SWITCH_LOG_BUFSIZE; 2320 } 2321 2322 static inline int spufs_switch_log_avail(struct spu_context *ctx) 2323 { 2324 return SWITCH_LOG_BUFSIZE - spufs_switch_log_used(ctx); 2325 } 2326 2327 static int spufs_switch_log_open(struct inode *inode, struct file *file) 2328 { 2329 struct spu_context *ctx = SPUFS_I(inode)->i_ctx; 2330 int rc; 2331 2332 rc = spu_acquire(ctx); 2333 if (rc) 2334 return rc; 2335 2336 if (ctx->switch_log) { 2337 rc = -EBUSY; 2338 goto out; 2339 } 2340 2341 ctx->switch_log = kmalloc(sizeof(struct switch_log) + 2342 SWITCH_LOG_BUFSIZE * sizeof(struct switch_log_entry), 2343 GFP_KERNEL); 2344 2345 if (!ctx->switch_log) { 2346 rc = -ENOMEM; 2347 goto out; 2348 } 2349 2350 ctx->switch_log->head = ctx->switch_log->tail = 0; 2351 init_waitqueue_head(&ctx->switch_log->wait); 2352 rc = 0; 2353 2354 out: 2355 spu_release(ctx); 2356 return rc; 2357 } 2358 2359 static int spufs_switch_log_release(struct inode *inode, struct file *file) 2360 { 2361 struct spu_context *ctx = SPUFS_I(inode)->i_ctx; 2362 int rc; 2363 2364 rc = spu_acquire(ctx); 2365 if (rc) 2366 return rc; 2367 2368 kfree(ctx->switch_log); 2369 ctx->switch_log = NULL; 2370 spu_release(ctx); 2371 2372 return 0; 2373 } 2374 2375 static int switch_log_sprint(struct spu_context *ctx, char *tbuf, int n) 2376 { 2377 struct switch_log_entry *p; 2378 2379 p = ctx->switch_log->log + ctx->switch_log->tail % SWITCH_LOG_BUFSIZE; 2380 2381 return snprintf(tbuf, n, "%llu.%09u %d %u %u %llu\n", 2382 (unsigned long long) p->tstamp.tv_sec, 2383 (unsigned int) p->tstamp.tv_nsec, 2384 p->spu_id, 2385 (unsigned int) p->type, 2386 (unsigned int) p->val, 2387 (unsigned long long) p->timebase); 2388 } 2389 2390 static ssize_t spufs_switch_log_read(struct file *file, char __user *buf, 2391 size_t len, loff_t *ppos) 2392 { 2393 struct inode *inode = file_inode(file); 2394 struct spu_context *ctx = SPUFS_I(inode)->i_ctx; 2395 int error = 0, cnt = 0; 2396 2397 if (!buf) 2398 return -EINVAL; 2399 2400 error = spu_acquire(ctx); 2401 if (error) 2402 return error; 2403 2404 while (cnt < len) { 2405 char tbuf[128]; 2406 int width; 2407 2408 if (spufs_switch_log_used(ctx) == 0) { 2409 if (cnt > 0) { 2410 /* If there's data ready to go, we can 2411 * just return straight away */ 2412 break; 2413 2414 } else if (file->f_flags & O_NONBLOCK) { 2415 error = -EAGAIN; 2416 break; 2417 2418 } else { 2419 /* spufs_wait will drop the mutex and 2420 * re-acquire, but since we're in read(), the 2421 * file cannot be _released (and so 2422 * ctx->switch_log is stable). 2423 */ 2424 error = spufs_wait(ctx->switch_log->wait, 2425 spufs_switch_log_used(ctx) > 0); 2426 2427 /* On error, spufs_wait returns without the 2428 * state mutex held */ 2429 if (error) 2430 return error; 2431 2432 /* We may have had entries read from underneath 2433 * us while we dropped the mutex in spufs_wait, 2434 * so re-check */ 2435 if (spufs_switch_log_used(ctx) == 0) 2436 continue; 2437 } 2438 } 2439 2440 width = switch_log_sprint(ctx, tbuf, sizeof(tbuf)); 2441 if (width < len) 2442 ctx->switch_log->tail = 2443 (ctx->switch_log->tail + 1) % 2444 SWITCH_LOG_BUFSIZE; 2445 else 2446 /* If the record is greater than space available return 2447 * partial buffer (so far) */ 2448 break; 2449 2450 error = copy_to_user(buf + cnt, tbuf, width); 2451 if (error) 2452 break; 2453 cnt += width; 2454 } 2455 2456 spu_release(ctx); 2457 2458 return cnt == 0 ? error : cnt; 2459 } 2460 2461 static __poll_t spufs_switch_log_poll(struct file *file, poll_table *wait) 2462 { 2463 struct inode *inode = file_inode(file); 2464 struct spu_context *ctx = SPUFS_I(inode)->i_ctx; 2465 __poll_t mask = 0; 2466 int rc; 2467 2468 poll_wait(file, &ctx->switch_log->wait, wait); 2469 2470 rc = spu_acquire(ctx); 2471 if (rc) 2472 return rc; 2473 2474 if (spufs_switch_log_used(ctx) > 0) 2475 mask |= EPOLLIN; 2476 2477 spu_release(ctx); 2478 2479 return mask; 2480 } 2481 2482 static const struct file_operations spufs_switch_log_fops = { 2483 .open = spufs_switch_log_open, 2484 .read = spufs_switch_log_read, 2485 .poll = spufs_switch_log_poll, 2486 .release = spufs_switch_log_release, 2487 .llseek = no_llseek, 2488 }; 2489 2490 /** 2491 * Log a context switch event to a switch log reader. 2492 * 2493 * Must be called with ctx->state_mutex held. 2494 */ 2495 void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx, 2496 u32 type, u32 val) 2497 { 2498 if (!ctx->switch_log) 2499 return; 2500 2501 if (spufs_switch_log_avail(ctx) > 1) { 2502 struct switch_log_entry *p; 2503 2504 p = ctx->switch_log->log + ctx->switch_log->head; 2505 ktime_get_ts64(&p->tstamp); 2506 p->timebase = get_tb(); 2507 p->spu_id = spu ? spu->number : -1; 2508 p->type = type; 2509 p->val = val; 2510 2511 ctx->switch_log->head = 2512 (ctx->switch_log->head + 1) % SWITCH_LOG_BUFSIZE; 2513 } 2514 2515 wake_up(&ctx->switch_log->wait); 2516 } 2517 2518 static int spufs_show_ctx(struct seq_file *s, void *private) 2519 { 2520 struct spu_context *ctx = s->private; 2521 u64 mfc_control_RW; 2522 2523 mutex_lock(&ctx->state_mutex); 2524 if (ctx->spu) { 2525 struct spu *spu = ctx->spu; 2526 struct spu_priv2 __iomem *priv2 = spu->priv2; 2527 2528 spin_lock_irq(&spu->register_lock); 2529 mfc_control_RW = in_be64(&priv2->mfc_control_RW); 2530 spin_unlock_irq(&spu->register_lock); 2531 } else { 2532 struct spu_state *csa = &ctx->csa; 2533 2534 mfc_control_RW = csa->priv2.mfc_control_RW; 2535 } 2536 2537 seq_printf(s, "%c flgs(%lx) sflgs(%lx) pri(%d) ts(%d) spu(%02d)" 2538 " %c %llx %llx %llx %llx %x %x\n", 2539 ctx->state == SPU_STATE_SAVED ? 'S' : 'R', 2540 ctx->flags, 2541 ctx->sched_flags, 2542 ctx->prio, 2543 ctx->time_slice, 2544 ctx->spu ? ctx->spu->number : -1, 2545 !list_empty(&ctx->rq) ? 'q' : ' ', 2546 ctx->csa.class_0_pending, 2547 ctx->csa.class_0_dar, 2548 ctx->csa.class_1_dsisr, 2549 mfc_control_RW, 2550 ctx->ops->runcntl_read(ctx), 2551 ctx->ops->status_read(ctx)); 2552 2553 mutex_unlock(&ctx->state_mutex); 2554 2555 return 0; 2556 } 2557 2558 static int spufs_ctx_open(struct inode *inode, struct file *file) 2559 { 2560 return single_open(file, spufs_show_ctx, SPUFS_I(inode)->i_ctx); 2561 } 2562 2563 static const struct file_operations spufs_ctx_fops = { 2564 .open = spufs_ctx_open, 2565 .read = seq_read, 2566 .llseek = seq_lseek, 2567 .release = single_release, 2568 }; 2569 2570 const struct spufs_tree_descr spufs_dir_contents[] = { 2571 { "capabilities", &spufs_caps_fops, 0444, }, 2572 { "mem", &spufs_mem_fops, 0666, LS_SIZE, }, 2573 { "regs", &spufs_regs_fops, 0666, sizeof(struct spu_reg128[128]), }, 2574 { "mbox", &spufs_mbox_fops, 0444, }, 2575 { "ibox", &spufs_ibox_fops, 0444, }, 2576 { "wbox", &spufs_wbox_fops, 0222, }, 2577 { "mbox_stat", &spufs_mbox_stat_fops, 0444, sizeof(u32), }, 2578 { "ibox_stat", &spufs_ibox_stat_fops, 0444, sizeof(u32), }, 2579 { "wbox_stat", &spufs_wbox_stat_fops, 0444, sizeof(u32), }, 2580 { "signal1", &spufs_signal1_fops, 0666, }, 2581 { "signal2", &spufs_signal2_fops, 0666, }, 2582 { "signal1_type", &spufs_signal1_type, 0666, }, 2583 { "signal2_type", &spufs_signal2_type, 0666, }, 2584 { "cntl", &spufs_cntl_fops, 0666, }, 2585 { "fpcr", &spufs_fpcr_fops, 0666, sizeof(struct spu_reg128), }, 2586 { "lslr", &spufs_lslr_ops, 0444, }, 2587 { "mfc", &spufs_mfc_fops, 0666, }, 2588 { "mss", &spufs_mss_fops, 0666, }, 2589 { "npc", &spufs_npc_ops, 0666, }, 2590 { "srr0", &spufs_srr0_ops, 0666, }, 2591 { "decr", &spufs_decr_ops, 0666, }, 2592 { "decr_status", &spufs_decr_status_ops, 0666, }, 2593 { "event_mask", &spufs_event_mask_ops, 0666, }, 2594 { "event_status", &spufs_event_status_ops, 0444, }, 2595 { "psmap", &spufs_psmap_fops, 0666, SPUFS_PS_MAP_SIZE, }, 2596 { "phys-id", &spufs_id_ops, 0666, }, 2597 { "object-id", &spufs_object_id_ops, 0666, }, 2598 { "mbox_info", &spufs_mbox_info_fops, 0444, sizeof(u32), }, 2599 { "ibox_info", &spufs_ibox_info_fops, 0444, sizeof(u32), }, 2600 { "wbox_info", &spufs_wbox_info_fops, 0444, sizeof(u32), }, 2601 { "dma_info", &spufs_dma_info_fops, 0444, 2602 sizeof(struct spu_dma_info), }, 2603 { "proxydma_info", &spufs_proxydma_info_fops, 0444, 2604 sizeof(struct spu_proxydma_info)}, 2605 { "tid", &spufs_tid_fops, 0444, }, 2606 { "stat", &spufs_stat_fops, 0444, }, 2607 { "switch_log", &spufs_switch_log_fops, 0444 }, 2608 {}, 2609 }; 2610 2611 const struct spufs_tree_descr spufs_dir_nosched_contents[] = { 2612 { "capabilities", &spufs_caps_fops, 0444, }, 2613 { "mem", &spufs_mem_fops, 0666, LS_SIZE, }, 2614 { "mbox", &spufs_mbox_fops, 0444, }, 2615 { "ibox", &spufs_ibox_fops, 0444, }, 2616 { "wbox", &spufs_wbox_fops, 0222, }, 2617 { "mbox_stat", &spufs_mbox_stat_fops, 0444, sizeof(u32), }, 2618 { "ibox_stat", &spufs_ibox_stat_fops, 0444, sizeof(u32), }, 2619 { "wbox_stat", &spufs_wbox_stat_fops, 0444, sizeof(u32), }, 2620 { "signal1", &spufs_signal1_nosched_fops, 0222, }, 2621 { "signal2", &spufs_signal2_nosched_fops, 0222, }, 2622 { "signal1_type", &spufs_signal1_type, 0666, }, 2623 { "signal2_type", &spufs_signal2_type, 0666, }, 2624 { "mss", &spufs_mss_fops, 0666, }, 2625 { "mfc", &spufs_mfc_fops, 0666, }, 2626 { "cntl", &spufs_cntl_fops, 0666, }, 2627 { "npc", &spufs_npc_ops, 0666, }, 2628 { "psmap", &spufs_psmap_fops, 0666, SPUFS_PS_MAP_SIZE, }, 2629 { "phys-id", &spufs_id_ops, 0666, }, 2630 { "object-id", &spufs_object_id_ops, 0666, }, 2631 { "tid", &spufs_tid_fops, 0444, }, 2632 { "stat", &spufs_stat_fops, 0444, }, 2633 {}, 2634 }; 2635 2636 const struct spufs_tree_descr spufs_dir_debug_contents[] = { 2637 { ".ctx", &spufs_ctx_fops, 0444, }, 2638 {}, 2639 }; 2640 2641 const struct spufs_coredump_reader spufs_coredump_read[] = { 2642 { "regs", __spufs_regs_read, NULL, sizeof(struct spu_reg128[128])}, 2643 { "fpcr", __spufs_fpcr_read, NULL, sizeof(struct spu_reg128) }, 2644 { "lslr", NULL, spufs_lslr_get, 19 }, 2645 { "decr", NULL, spufs_decr_get, 19 }, 2646 { "decr_status", NULL, spufs_decr_status_get, 19 }, 2647 { "mem", __spufs_mem_read, NULL, LS_SIZE, }, 2648 { "signal1", __spufs_signal1_read, NULL, sizeof(u32) }, 2649 { "signal1_type", NULL, spufs_signal1_type_get, 19 }, 2650 { "signal2", __spufs_signal2_read, NULL, sizeof(u32) }, 2651 { "signal2_type", NULL, spufs_signal2_type_get, 19 }, 2652 { "event_mask", NULL, spufs_event_mask_get, 19 }, 2653 { "event_status", NULL, spufs_event_status_get, 19 }, 2654 { "mbox_info", __spufs_mbox_info_read, NULL, sizeof(u32) }, 2655 { "ibox_info", __spufs_ibox_info_read, NULL, sizeof(u32) }, 2656 { "wbox_info", __spufs_wbox_info_read, NULL, 4 * sizeof(u32)}, 2657 { "dma_info", __spufs_dma_info_read, NULL, sizeof(struct spu_dma_info)}, 2658 { "proxydma_info", __spufs_proxydma_info_read, 2659 NULL, sizeof(struct spu_proxydma_info)}, 2660 { "object-id", NULL, spufs_object_id_get, 19 }, 2661 { "npc", NULL, spufs_npc_get, 19 }, 2662 { NULL }, 2663 }; 2664