1 /* 2 * SPU file system -- file contents 3 * 4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 5 * 6 * Author: Arnd Bergmann <arndb@de.ibm.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2, or (at your option) 11 * any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 */ 22 23 #undef DEBUG 24 25 #include <linux/fs.h> 26 #include <linux/ioctl.h> 27 #include <linux/module.h> 28 #include <linux/pagemap.h> 29 #include <linux/poll.h> 30 #include <linux/ptrace.h> 31 #include <linux/seq_file.h> 32 #include <linux/marker.h> 33 34 #include <asm/io.h> 35 #include <asm/spu.h> 36 #include <asm/spu_info.h> 37 #include <asm/uaccess.h> 38 39 #include "spufs.h" 40 41 #define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000) 42 43 /* Simple attribute files */ 44 struct spufs_attr { 45 int (*get)(void *, u64 *); 46 int (*set)(void *, u64); 47 char get_buf[24]; /* enough to store a u64 and "\n\0" */ 48 char set_buf[24]; 49 void *data; 50 const char *fmt; /* format for read operation */ 51 struct mutex mutex; /* protects access to these buffers */ 52 }; 53 54 static int spufs_attr_open(struct inode *inode, struct file *file, 55 int (*get)(void *, u64 *), int (*set)(void *, u64), 56 const char *fmt) 57 { 58 struct spufs_attr *attr; 59 60 attr = kmalloc(sizeof(*attr), GFP_KERNEL); 61 if (!attr) 62 return -ENOMEM; 63 64 attr->get = get; 65 attr->set = set; 66 attr->data = inode->i_private; 67 attr->fmt = fmt; 68 mutex_init(&attr->mutex); 69 file->private_data = attr; 70 71 return nonseekable_open(inode, file); 72 } 73 74 static int spufs_attr_release(struct inode *inode, struct file *file) 75 { 76 kfree(file->private_data); 77 return 0; 78 } 79 80 static ssize_t spufs_attr_read(struct file *file, char __user *buf, 81 size_t len, loff_t *ppos) 82 { 83 struct spufs_attr *attr; 84 size_t size; 85 ssize_t ret; 86 87 attr = file->private_data; 88 if (!attr->get) 89 return -EACCES; 90 91 ret = mutex_lock_interruptible(&attr->mutex); 92 if (ret) 93 return ret; 94 95 if (*ppos) { /* continued read */ 96 size = strlen(attr->get_buf); 97 } else { /* first read */ 98 u64 val; 99 ret = attr->get(attr->data, &val); 100 if (ret) 101 goto out; 102 103 size = scnprintf(attr->get_buf, sizeof(attr->get_buf), 104 attr->fmt, (unsigned long long)val); 105 } 106 107 ret = simple_read_from_buffer(buf, len, ppos, attr->get_buf, size); 108 out: 109 mutex_unlock(&attr->mutex); 110 return ret; 111 } 112 113 static ssize_t spufs_attr_write(struct file *file, const char __user *buf, 114 size_t len, loff_t *ppos) 115 { 116 struct spufs_attr *attr; 117 u64 val; 118 size_t size; 119 ssize_t ret; 120 121 attr = file->private_data; 122 if (!attr->set) 123 return -EACCES; 124 125 ret = mutex_lock_interruptible(&attr->mutex); 126 if (ret) 127 return ret; 128 129 ret = -EFAULT; 130 size = min(sizeof(attr->set_buf) - 1, len); 131 if (copy_from_user(attr->set_buf, buf, size)) 132 goto out; 133 134 ret = len; /* claim we got the whole input */ 135 attr->set_buf[size] = '\0'; 136 val = simple_strtol(attr->set_buf, NULL, 0); 137 attr->set(attr->data, val); 138 out: 139 mutex_unlock(&attr->mutex); 140 return ret; 141 } 142 143 #define DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \ 144 static int __fops ## _open(struct inode *inode, struct file *file) \ 145 { \ 146 __simple_attr_check_format(__fmt, 0ull); \ 147 return spufs_attr_open(inode, file, __get, __set, __fmt); \ 148 } \ 149 static struct file_operations __fops = { \ 150 .owner = THIS_MODULE, \ 151 .open = __fops ## _open, \ 152 .release = spufs_attr_release, \ 153 .read = spufs_attr_read, \ 154 .write = spufs_attr_write, \ 155 }; 156 157 158 static int 159 spufs_mem_open(struct inode *inode, struct file *file) 160 { 161 struct spufs_inode_info *i = SPUFS_I(inode); 162 struct spu_context *ctx = i->i_ctx; 163 164 mutex_lock(&ctx->mapping_lock); 165 file->private_data = ctx; 166 if (!i->i_openers++) 167 ctx->local_store = inode->i_mapping; 168 mutex_unlock(&ctx->mapping_lock); 169 return 0; 170 } 171 172 static int 173 spufs_mem_release(struct inode *inode, struct file *file) 174 { 175 struct spufs_inode_info *i = SPUFS_I(inode); 176 struct spu_context *ctx = i->i_ctx; 177 178 mutex_lock(&ctx->mapping_lock); 179 if (!--i->i_openers) 180 ctx->local_store = NULL; 181 mutex_unlock(&ctx->mapping_lock); 182 return 0; 183 } 184 185 static ssize_t 186 __spufs_mem_read(struct spu_context *ctx, char __user *buffer, 187 size_t size, loff_t *pos) 188 { 189 char *local_store = ctx->ops->get_ls(ctx); 190 return simple_read_from_buffer(buffer, size, pos, local_store, 191 LS_SIZE); 192 } 193 194 static ssize_t 195 spufs_mem_read(struct file *file, char __user *buffer, 196 size_t size, loff_t *pos) 197 { 198 struct spu_context *ctx = file->private_data; 199 ssize_t ret; 200 201 ret = spu_acquire(ctx); 202 if (ret) 203 return ret; 204 ret = __spufs_mem_read(ctx, buffer, size, pos); 205 spu_release(ctx); 206 207 return ret; 208 } 209 210 static ssize_t 211 spufs_mem_write(struct file *file, const char __user *buffer, 212 size_t size, loff_t *ppos) 213 { 214 struct spu_context *ctx = file->private_data; 215 char *local_store; 216 loff_t pos = *ppos; 217 int ret; 218 219 if (pos < 0) 220 return -EINVAL; 221 if (pos > LS_SIZE) 222 return -EFBIG; 223 if (size > LS_SIZE - pos) 224 size = LS_SIZE - pos; 225 226 ret = spu_acquire(ctx); 227 if (ret) 228 return ret; 229 230 local_store = ctx->ops->get_ls(ctx); 231 ret = copy_from_user(local_store + pos, buffer, size); 232 spu_release(ctx); 233 234 if (ret) 235 return -EFAULT; 236 *ppos = pos + size; 237 return size; 238 } 239 240 static unsigned long spufs_mem_mmap_nopfn(struct vm_area_struct *vma, 241 unsigned long address) 242 { 243 struct spu_context *ctx = vma->vm_file->private_data; 244 unsigned long pfn, offset, addr0 = address; 245 #ifdef CONFIG_SPU_FS_64K_LS 246 struct spu_state *csa = &ctx->csa; 247 int psize; 248 249 /* Check what page size we are using */ 250 psize = get_slice_psize(vma->vm_mm, address); 251 252 /* Some sanity checking */ 253 BUG_ON(csa->use_big_pages != (psize == MMU_PAGE_64K)); 254 255 /* Wow, 64K, cool, we need to align the address though */ 256 if (csa->use_big_pages) { 257 BUG_ON(vma->vm_start & 0xffff); 258 address &= ~0xfffful; 259 } 260 #endif /* CONFIG_SPU_FS_64K_LS */ 261 262 offset = (address - vma->vm_start) + (vma->vm_pgoff << PAGE_SHIFT); 263 if (offset >= LS_SIZE) 264 return NOPFN_SIGBUS; 265 266 pr_debug("spufs_mem_mmap_nopfn address=0x%lx -> 0x%lx, offset=0x%lx\n", 267 addr0, address, offset); 268 269 if (spu_acquire(ctx)) 270 return NOPFN_REFAULT; 271 272 if (ctx->state == SPU_STATE_SAVED) { 273 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 274 & ~_PAGE_NO_CACHE); 275 pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset); 276 } else { 277 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 278 | _PAGE_NO_CACHE); 279 pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT; 280 } 281 vm_insert_pfn(vma, address, pfn); 282 283 spu_release(ctx); 284 285 return NOPFN_REFAULT; 286 } 287 288 289 static struct vm_operations_struct spufs_mem_mmap_vmops = { 290 .nopfn = spufs_mem_mmap_nopfn, 291 }; 292 293 static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma) 294 { 295 #ifdef CONFIG_SPU_FS_64K_LS 296 struct spu_context *ctx = file->private_data; 297 struct spu_state *csa = &ctx->csa; 298 299 /* Sanity check VMA alignment */ 300 if (csa->use_big_pages) { 301 pr_debug("spufs_mem_mmap 64K, start=0x%lx, end=0x%lx," 302 " pgoff=0x%lx\n", vma->vm_start, vma->vm_end, 303 vma->vm_pgoff); 304 if (vma->vm_start & 0xffff) 305 return -EINVAL; 306 if (vma->vm_pgoff & 0xf) 307 return -EINVAL; 308 } 309 #endif /* CONFIG_SPU_FS_64K_LS */ 310 311 if (!(vma->vm_flags & VM_SHARED)) 312 return -EINVAL; 313 314 vma->vm_flags |= VM_IO | VM_PFNMAP; 315 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 316 | _PAGE_NO_CACHE); 317 318 vma->vm_ops = &spufs_mem_mmap_vmops; 319 return 0; 320 } 321 322 #ifdef CONFIG_SPU_FS_64K_LS 323 static unsigned long spufs_get_unmapped_area(struct file *file, 324 unsigned long addr, unsigned long len, unsigned long pgoff, 325 unsigned long flags) 326 { 327 struct spu_context *ctx = file->private_data; 328 struct spu_state *csa = &ctx->csa; 329 330 /* If not using big pages, fallback to normal MM g_u_a */ 331 if (!csa->use_big_pages) 332 return current->mm->get_unmapped_area(file, addr, len, 333 pgoff, flags); 334 335 /* Else, try to obtain a 64K pages slice */ 336 return slice_get_unmapped_area(addr, len, flags, 337 MMU_PAGE_64K, 1, 0); 338 } 339 #endif /* CONFIG_SPU_FS_64K_LS */ 340 341 static const struct file_operations spufs_mem_fops = { 342 .open = spufs_mem_open, 343 .release = spufs_mem_release, 344 .read = spufs_mem_read, 345 .write = spufs_mem_write, 346 .llseek = generic_file_llseek, 347 .mmap = spufs_mem_mmap, 348 #ifdef CONFIG_SPU_FS_64K_LS 349 .get_unmapped_area = spufs_get_unmapped_area, 350 #endif 351 }; 352 353 static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma, 354 unsigned long address, 355 unsigned long ps_offs, 356 unsigned long ps_size) 357 { 358 struct spu_context *ctx = vma->vm_file->private_data; 359 unsigned long area, offset = address - vma->vm_start; 360 int ret = 0; 361 362 spu_context_nospu_trace(spufs_ps_nopfn__enter, ctx); 363 364 offset += vma->vm_pgoff << PAGE_SHIFT; 365 if (offset >= ps_size) 366 return NOPFN_SIGBUS; 367 368 /* 369 * Because we release the mmap_sem, the context may be destroyed while 370 * we're in spu_wait. Grab an extra reference so it isn't destroyed 371 * in the meantime. 372 */ 373 get_spu_context(ctx); 374 375 /* 376 * We have to wait for context to be loaded before we have 377 * pages to hand out to the user, but we don't want to wait 378 * with the mmap_sem held. 379 * It is possible to drop the mmap_sem here, but then we need 380 * to return NOPFN_REFAULT because the mappings may have 381 * hanged. 382 */ 383 if (spu_acquire(ctx)) 384 goto refault; 385 386 if (ctx->state == SPU_STATE_SAVED) { 387 up_read(¤t->mm->mmap_sem); 388 spu_context_nospu_trace(spufs_ps_nopfn__sleep, ctx); 389 ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE); 390 spu_context_trace(spufs_ps_nopfn__wake, ctx, ctx->spu); 391 down_read(¤t->mm->mmap_sem); 392 } else { 393 area = ctx->spu->problem_phys + ps_offs; 394 vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT); 395 spu_context_trace(spufs_ps_nopfn__insert, ctx, ctx->spu); 396 } 397 398 if (!ret) 399 spu_release(ctx); 400 401 refault: 402 put_spu_context(ctx); 403 return NOPFN_REFAULT; 404 } 405 406 #if SPUFS_MMAP_4K 407 static unsigned long spufs_cntl_mmap_nopfn(struct vm_area_struct *vma, 408 unsigned long address) 409 { 410 return spufs_ps_nopfn(vma, address, 0x4000, 0x1000); 411 } 412 413 static struct vm_operations_struct spufs_cntl_mmap_vmops = { 414 .nopfn = spufs_cntl_mmap_nopfn, 415 }; 416 417 /* 418 * mmap support for problem state control area [0x4000 - 0x4fff]. 419 */ 420 static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma) 421 { 422 if (!(vma->vm_flags & VM_SHARED)) 423 return -EINVAL; 424 425 vma->vm_flags |= VM_IO | VM_PFNMAP; 426 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 427 | _PAGE_NO_CACHE | _PAGE_GUARDED); 428 429 vma->vm_ops = &spufs_cntl_mmap_vmops; 430 return 0; 431 } 432 #else /* SPUFS_MMAP_4K */ 433 #define spufs_cntl_mmap NULL 434 #endif /* !SPUFS_MMAP_4K */ 435 436 static int spufs_cntl_get(void *data, u64 *val) 437 { 438 struct spu_context *ctx = data; 439 int ret; 440 441 ret = spu_acquire(ctx); 442 if (ret) 443 return ret; 444 *val = ctx->ops->status_read(ctx); 445 spu_release(ctx); 446 447 return 0; 448 } 449 450 static int spufs_cntl_set(void *data, u64 val) 451 { 452 struct spu_context *ctx = data; 453 int ret; 454 455 ret = spu_acquire(ctx); 456 if (ret) 457 return ret; 458 ctx->ops->runcntl_write(ctx, val); 459 spu_release(ctx); 460 461 return 0; 462 } 463 464 static int spufs_cntl_open(struct inode *inode, struct file *file) 465 { 466 struct spufs_inode_info *i = SPUFS_I(inode); 467 struct spu_context *ctx = i->i_ctx; 468 469 mutex_lock(&ctx->mapping_lock); 470 file->private_data = ctx; 471 if (!i->i_openers++) 472 ctx->cntl = inode->i_mapping; 473 mutex_unlock(&ctx->mapping_lock); 474 return simple_attr_open(inode, file, spufs_cntl_get, 475 spufs_cntl_set, "0x%08lx"); 476 } 477 478 static int 479 spufs_cntl_release(struct inode *inode, struct file *file) 480 { 481 struct spufs_inode_info *i = SPUFS_I(inode); 482 struct spu_context *ctx = i->i_ctx; 483 484 simple_attr_release(inode, file); 485 486 mutex_lock(&ctx->mapping_lock); 487 if (!--i->i_openers) 488 ctx->cntl = NULL; 489 mutex_unlock(&ctx->mapping_lock); 490 return 0; 491 } 492 493 static const struct file_operations spufs_cntl_fops = { 494 .open = spufs_cntl_open, 495 .release = spufs_cntl_release, 496 .read = simple_attr_read, 497 .write = simple_attr_write, 498 .mmap = spufs_cntl_mmap, 499 }; 500 501 static int 502 spufs_regs_open(struct inode *inode, struct file *file) 503 { 504 struct spufs_inode_info *i = SPUFS_I(inode); 505 file->private_data = i->i_ctx; 506 return 0; 507 } 508 509 static ssize_t 510 __spufs_regs_read(struct spu_context *ctx, char __user *buffer, 511 size_t size, loff_t *pos) 512 { 513 struct spu_lscsa *lscsa = ctx->csa.lscsa; 514 return simple_read_from_buffer(buffer, size, pos, 515 lscsa->gprs, sizeof lscsa->gprs); 516 } 517 518 static ssize_t 519 spufs_regs_read(struct file *file, char __user *buffer, 520 size_t size, loff_t *pos) 521 { 522 int ret; 523 struct spu_context *ctx = file->private_data; 524 525 ret = spu_acquire_saved(ctx); 526 if (ret) 527 return ret; 528 ret = __spufs_regs_read(ctx, buffer, size, pos); 529 spu_release_saved(ctx); 530 return ret; 531 } 532 533 static ssize_t 534 spufs_regs_write(struct file *file, const char __user *buffer, 535 size_t size, loff_t *pos) 536 { 537 struct spu_context *ctx = file->private_data; 538 struct spu_lscsa *lscsa = ctx->csa.lscsa; 539 int ret; 540 541 size = min_t(ssize_t, sizeof lscsa->gprs - *pos, size); 542 if (size <= 0) 543 return -EFBIG; 544 *pos += size; 545 546 ret = spu_acquire_saved(ctx); 547 if (ret) 548 return ret; 549 550 ret = copy_from_user(lscsa->gprs + *pos - size, 551 buffer, size) ? -EFAULT : size; 552 553 spu_release_saved(ctx); 554 return ret; 555 } 556 557 static const struct file_operations spufs_regs_fops = { 558 .open = spufs_regs_open, 559 .read = spufs_regs_read, 560 .write = spufs_regs_write, 561 .llseek = generic_file_llseek, 562 }; 563 564 static ssize_t 565 __spufs_fpcr_read(struct spu_context *ctx, char __user * buffer, 566 size_t size, loff_t * pos) 567 { 568 struct spu_lscsa *lscsa = ctx->csa.lscsa; 569 return simple_read_from_buffer(buffer, size, pos, 570 &lscsa->fpcr, sizeof(lscsa->fpcr)); 571 } 572 573 static ssize_t 574 spufs_fpcr_read(struct file *file, char __user * buffer, 575 size_t size, loff_t * pos) 576 { 577 int ret; 578 struct spu_context *ctx = file->private_data; 579 580 ret = spu_acquire_saved(ctx); 581 if (ret) 582 return ret; 583 ret = __spufs_fpcr_read(ctx, buffer, size, pos); 584 spu_release_saved(ctx); 585 return ret; 586 } 587 588 static ssize_t 589 spufs_fpcr_write(struct file *file, const char __user * buffer, 590 size_t size, loff_t * pos) 591 { 592 struct spu_context *ctx = file->private_data; 593 struct spu_lscsa *lscsa = ctx->csa.lscsa; 594 int ret; 595 596 size = min_t(ssize_t, sizeof(lscsa->fpcr) - *pos, size); 597 if (size <= 0) 598 return -EFBIG; 599 600 ret = spu_acquire_saved(ctx); 601 if (ret) 602 return ret; 603 604 *pos += size; 605 ret = copy_from_user((char *)&lscsa->fpcr + *pos - size, 606 buffer, size) ? -EFAULT : size; 607 608 spu_release_saved(ctx); 609 return ret; 610 } 611 612 static const struct file_operations spufs_fpcr_fops = { 613 .open = spufs_regs_open, 614 .read = spufs_fpcr_read, 615 .write = spufs_fpcr_write, 616 .llseek = generic_file_llseek, 617 }; 618 619 /* generic open function for all pipe-like files */ 620 static int spufs_pipe_open(struct inode *inode, struct file *file) 621 { 622 struct spufs_inode_info *i = SPUFS_I(inode); 623 file->private_data = i->i_ctx; 624 625 return nonseekable_open(inode, file); 626 } 627 628 /* 629 * Read as many bytes from the mailbox as possible, until 630 * one of the conditions becomes true: 631 * 632 * - no more data available in the mailbox 633 * - end of the user provided buffer 634 * - end of the mapped area 635 */ 636 static ssize_t spufs_mbox_read(struct file *file, char __user *buf, 637 size_t len, loff_t *pos) 638 { 639 struct spu_context *ctx = file->private_data; 640 u32 mbox_data, __user *udata; 641 ssize_t count; 642 643 if (len < 4) 644 return -EINVAL; 645 646 if (!access_ok(VERIFY_WRITE, buf, len)) 647 return -EFAULT; 648 649 udata = (void __user *)buf; 650 651 count = spu_acquire(ctx); 652 if (count) 653 return count; 654 655 for (count = 0; (count + 4) <= len; count += 4, udata++) { 656 int ret; 657 ret = ctx->ops->mbox_read(ctx, &mbox_data); 658 if (ret == 0) 659 break; 660 661 /* 662 * at the end of the mapped area, we can fault 663 * but still need to return the data we have 664 * read successfully so far. 665 */ 666 ret = __put_user(mbox_data, udata); 667 if (ret) { 668 if (!count) 669 count = -EFAULT; 670 break; 671 } 672 } 673 spu_release(ctx); 674 675 if (!count) 676 count = -EAGAIN; 677 678 return count; 679 } 680 681 static const struct file_operations spufs_mbox_fops = { 682 .open = spufs_pipe_open, 683 .read = spufs_mbox_read, 684 }; 685 686 static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf, 687 size_t len, loff_t *pos) 688 { 689 struct spu_context *ctx = file->private_data; 690 ssize_t ret; 691 u32 mbox_stat; 692 693 if (len < 4) 694 return -EINVAL; 695 696 ret = spu_acquire(ctx); 697 if (ret) 698 return ret; 699 700 mbox_stat = ctx->ops->mbox_stat_read(ctx) & 0xff; 701 702 spu_release(ctx); 703 704 if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat)) 705 return -EFAULT; 706 707 return 4; 708 } 709 710 static const struct file_operations spufs_mbox_stat_fops = { 711 .open = spufs_pipe_open, 712 .read = spufs_mbox_stat_read, 713 }; 714 715 /* low-level ibox access function */ 716 size_t spu_ibox_read(struct spu_context *ctx, u32 *data) 717 { 718 return ctx->ops->ibox_read(ctx, data); 719 } 720 721 static int spufs_ibox_fasync(int fd, struct file *file, int on) 722 { 723 struct spu_context *ctx = file->private_data; 724 725 return fasync_helper(fd, file, on, &ctx->ibox_fasync); 726 } 727 728 /* interrupt-level ibox callback function. */ 729 void spufs_ibox_callback(struct spu *spu) 730 { 731 struct spu_context *ctx = spu->ctx; 732 733 if (!ctx) 734 return; 735 736 wake_up_all(&ctx->ibox_wq); 737 kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN); 738 } 739 740 /* 741 * Read as many bytes from the interrupt mailbox as possible, until 742 * one of the conditions becomes true: 743 * 744 * - no more data available in the mailbox 745 * - end of the user provided buffer 746 * - end of the mapped area 747 * 748 * If the file is opened without O_NONBLOCK, we wait here until 749 * any data is available, but return when we have been able to 750 * read something. 751 */ 752 static ssize_t spufs_ibox_read(struct file *file, char __user *buf, 753 size_t len, loff_t *pos) 754 { 755 struct spu_context *ctx = file->private_data; 756 u32 ibox_data, __user *udata; 757 ssize_t count; 758 759 if (len < 4) 760 return -EINVAL; 761 762 if (!access_ok(VERIFY_WRITE, buf, len)) 763 return -EFAULT; 764 765 udata = (void __user *)buf; 766 767 count = spu_acquire(ctx); 768 if (count) 769 goto out; 770 771 /* wait only for the first element */ 772 count = 0; 773 if (file->f_flags & O_NONBLOCK) { 774 if (!spu_ibox_read(ctx, &ibox_data)) { 775 count = -EAGAIN; 776 goto out_unlock; 777 } 778 } else { 779 count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data)); 780 if (count) 781 goto out; 782 } 783 784 /* if we can't write at all, return -EFAULT */ 785 count = __put_user(ibox_data, udata); 786 if (count) 787 goto out_unlock; 788 789 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) { 790 int ret; 791 ret = ctx->ops->ibox_read(ctx, &ibox_data); 792 if (ret == 0) 793 break; 794 /* 795 * at the end of the mapped area, we can fault 796 * but still need to return the data we have 797 * read successfully so far. 798 */ 799 ret = __put_user(ibox_data, udata); 800 if (ret) 801 break; 802 } 803 804 out_unlock: 805 spu_release(ctx); 806 out: 807 return count; 808 } 809 810 static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait) 811 { 812 struct spu_context *ctx = file->private_data; 813 unsigned int mask; 814 815 poll_wait(file, &ctx->ibox_wq, wait); 816 817 /* 818 * For now keep this uninterruptible and also ignore the rule 819 * that poll should not sleep. Will be fixed later. 820 */ 821 mutex_lock(&ctx->state_mutex); 822 mask = ctx->ops->mbox_stat_poll(ctx, POLLIN | POLLRDNORM); 823 spu_release(ctx); 824 825 return mask; 826 } 827 828 static const struct file_operations spufs_ibox_fops = { 829 .open = spufs_pipe_open, 830 .read = spufs_ibox_read, 831 .poll = spufs_ibox_poll, 832 .fasync = spufs_ibox_fasync, 833 }; 834 835 static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf, 836 size_t len, loff_t *pos) 837 { 838 struct spu_context *ctx = file->private_data; 839 ssize_t ret; 840 u32 ibox_stat; 841 842 if (len < 4) 843 return -EINVAL; 844 845 ret = spu_acquire(ctx); 846 if (ret) 847 return ret; 848 ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff; 849 spu_release(ctx); 850 851 if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat)) 852 return -EFAULT; 853 854 return 4; 855 } 856 857 static const struct file_operations spufs_ibox_stat_fops = { 858 .open = spufs_pipe_open, 859 .read = spufs_ibox_stat_read, 860 }; 861 862 /* low-level mailbox write */ 863 size_t spu_wbox_write(struct spu_context *ctx, u32 data) 864 { 865 return ctx->ops->wbox_write(ctx, data); 866 } 867 868 static int spufs_wbox_fasync(int fd, struct file *file, int on) 869 { 870 struct spu_context *ctx = file->private_data; 871 int ret; 872 873 ret = fasync_helper(fd, file, on, &ctx->wbox_fasync); 874 875 return ret; 876 } 877 878 /* interrupt-level wbox callback function. */ 879 void spufs_wbox_callback(struct spu *spu) 880 { 881 struct spu_context *ctx = spu->ctx; 882 883 if (!ctx) 884 return; 885 886 wake_up_all(&ctx->wbox_wq); 887 kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT); 888 } 889 890 /* 891 * Write as many bytes to the interrupt mailbox as possible, until 892 * one of the conditions becomes true: 893 * 894 * - the mailbox is full 895 * - end of the user provided buffer 896 * - end of the mapped area 897 * 898 * If the file is opened without O_NONBLOCK, we wait here until 899 * space is availabyl, but return when we have been able to 900 * write something. 901 */ 902 static ssize_t spufs_wbox_write(struct file *file, const char __user *buf, 903 size_t len, loff_t *pos) 904 { 905 struct spu_context *ctx = file->private_data; 906 u32 wbox_data, __user *udata; 907 ssize_t count; 908 909 if (len < 4) 910 return -EINVAL; 911 912 udata = (void __user *)buf; 913 if (!access_ok(VERIFY_READ, buf, len)) 914 return -EFAULT; 915 916 if (__get_user(wbox_data, udata)) 917 return -EFAULT; 918 919 count = spu_acquire(ctx); 920 if (count) 921 goto out; 922 923 /* 924 * make sure we can at least write one element, by waiting 925 * in case of !O_NONBLOCK 926 */ 927 count = 0; 928 if (file->f_flags & O_NONBLOCK) { 929 if (!spu_wbox_write(ctx, wbox_data)) { 930 count = -EAGAIN; 931 goto out_unlock; 932 } 933 } else { 934 count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data)); 935 if (count) 936 goto out; 937 } 938 939 940 /* write as much as possible */ 941 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) { 942 int ret; 943 ret = __get_user(wbox_data, udata); 944 if (ret) 945 break; 946 947 ret = spu_wbox_write(ctx, wbox_data); 948 if (ret == 0) 949 break; 950 } 951 952 out_unlock: 953 spu_release(ctx); 954 out: 955 return count; 956 } 957 958 static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait) 959 { 960 struct spu_context *ctx = file->private_data; 961 unsigned int mask; 962 963 poll_wait(file, &ctx->wbox_wq, wait); 964 965 /* 966 * For now keep this uninterruptible and also ignore the rule 967 * that poll should not sleep. Will be fixed later. 968 */ 969 mutex_lock(&ctx->state_mutex); 970 mask = ctx->ops->mbox_stat_poll(ctx, POLLOUT | POLLWRNORM); 971 spu_release(ctx); 972 973 return mask; 974 } 975 976 static const struct file_operations spufs_wbox_fops = { 977 .open = spufs_pipe_open, 978 .write = spufs_wbox_write, 979 .poll = spufs_wbox_poll, 980 .fasync = spufs_wbox_fasync, 981 }; 982 983 static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf, 984 size_t len, loff_t *pos) 985 { 986 struct spu_context *ctx = file->private_data; 987 ssize_t ret; 988 u32 wbox_stat; 989 990 if (len < 4) 991 return -EINVAL; 992 993 ret = spu_acquire(ctx); 994 if (ret) 995 return ret; 996 wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff; 997 spu_release(ctx); 998 999 if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat)) 1000 return -EFAULT; 1001 1002 return 4; 1003 } 1004 1005 static const struct file_operations spufs_wbox_stat_fops = { 1006 .open = spufs_pipe_open, 1007 .read = spufs_wbox_stat_read, 1008 }; 1009 1010 static int spufs_signal1_open(struct inode *inode, struct file *file) 1011 { 1012 struct spufs_inode_info *i = SPUFS_I(inode); 1013 struct spu_context *ctx = i->i_ctx; 1014 1015 mutex_lock(&ctx->mapping_lock); 1016 file->private_data = ctx; 1017 if (!i->i_openers++) 1018 ctx->signal1 = inode->i_mapping; 1019 mutex_unlock(&ctx->mapping_lock); 1020 return nonseekable_open(inode, file); 1021 } 1022 1023 static int 1024 spufs_signal1_release(struct inode *inode, struct file *file) 1025 { 1026 struct spufs_inode_info *i = SPUFS_I(inode); 1027 struct spu_context *ctx = i->i_ctx; 1028 1029 mutex_lock(&ctx->mapping_lock); 1030 if (!--i->i_openers) 1031 ctx->signal1 = NULL; 1032 mutex_unlock(&ctx->mapping_lock); 1033 return 0; 1034 } 1035 1036 static ssize_t __spufs_signal1_read(struct spu_context *ctx, char __user *buf, 1037 size_t len, loff_t *pos) 1038 { 1039 int ret = 0; 1040 u32 data; 1041 1042 if (len < 4) 1043 return -EINVAL; 1044 1045 if (ctx->csa.spu_chnlcnt_RW[3]) { 1046 data = ctx->csa.spu_chnldata_RW[3]; 1047 ret = 4; 1048 } 1049 1050 if (!ret) 1051 goto out; 1052 1053 if (copy_to_user(buf, &data, 4)) 1054 return -EFAULT; 1055 1056 out: 1057 return ret; 1058 } 1059 1060 static ssize_t spufs_signal1_read(struct file *file, char __user *buf, 1061 size_t len, loff_t *pos) 1062 { 1063 int ret; 1064 struct spu_context *ctx = file->private_data; 1065 1066 ret = spu_acquire_saved(ctx); 1067 if (ret) 1068 return ret; 1069 ret = __spufs_signal1_read(ctx, buf, len, pos); 1070 spu_release_saved(ctx); 1071 1072 return ret; 1073 } 1074 1075 static ssize_t spufs_signal1_write(struct file *file, const char __user *buf, 1076 size_t len, loff_t *pos) 1077 { 1078 struct spu_context *ctx; 1079 ssize_t ret; 1080 u32 data; 1081 1082 ctx = file->private_data; 1083 1084 if (len < 4) 1085 return -EINVAL; 1086 1087 if (copy_from_user(&data, buf, 4)) 1088 return -EFAULT; 1089 1090 ret = spu_acquire(ctx); 1091 if (ret) 1092 return ret; 1093 ctx->ops->signal1_write(ctx, data); 1094 spu_release(ctx); 1095 1096 return 4; 1097 } 1098 1099 static unsigned long spufs_signal1_mmap_nopfn(struct vm_area_struct *vma, 1100 unsigned long address) 1101 { 1102 #if PAGE_SIZE == 0x1000 1103 return spufs_ps_nopfn(vma, address, 0x14000, 0x1000); 1104 #elif PAGE_SIZE == 0x10000 1105 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole 1106 * signal 1 and 2 area 1107 */ 1108 return spufs_ps_nopfn(vma, address, 0x10000, 0x10000); 1109 #else 1110 #error unsupported page size 1111 #endif 1112 } 1113 1114 static struct vm_operations_struct spufs_signal1_mmap_vmops = { 1115 .nopfn = spufs_signal1_mmap_nopfn, 1116 }; 1117 1118 static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma) 1119 { 1120 if (!(vma->vm_flags & VM_SHARED)) 1121 return -EINVAL; 1122 1123 vma->vm_flags |= VM_IO | VM_PFNMAP; 1124 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 1125 | _PAGE_NO_CACHE | _PAGE_GUARDED); 1126 1127 vma->vm_ops = &spufs_signal1_mmap_vmops; 1128 return 0; 1129 } 1130 1131 static const struct file_operations spufs_signal1_fops = { 1132 .open = spufs_signal1_open, 1133 .release = spufs_signal1_release, 1134 .read = spufs_signal1_read, 1135 .write = spufs_signal1_write, 1136 .mmap = spufs_signal1_mmap, 1137 }; 1138 1139 static const struct file_operations spufs_signal1_nosched_fops = { 1140 .open = spufs_signal1_open, 1141 .release = spufs_signal1_release, 1142 .write = spufs_signal1_write, 1143 .mmap = spufs_signal1_mmap, 1144 }; 1145 1146 static int spufs_signal2_open(struct inode *inode, struct file *file) 1147 { 1148 struct spufs_inode_info *i = SPUFS_I(inode); 1149 struct spu_context *ctx = i->i_ctx; 1150 1151 mutex_lock(&ctx->mapping_lock); 1152 file->private_data = ctx; 1153 if (!i->i_openers++) 1154 ctx->signal2 = inode->i_mapping; 1155 mutex_unlock(&ctx->mapping_lock); 1156 return nonseekable_open(inode, file); 1157 } 1158 1159 static int 1160 spufs_signal2_release(struct inode *inode, struct file *file) 1161 { 1162 struct spufs_inode_info *i = SPUFS_I(inode); 1163 struct spu_context *ctx = i->i_ctx; 1164 1165 mutex_lock(&ctx->mapping_lock); 1166 if (!--i->i_openers) 1167 ctx->signal2 = NULL; 1168 mutex_unlock(&ctx->mapping_lock); 1169 return 0; 1170 } 1171 1172 static ssize_t __spufs_signal2_read(struct spu_context *ctx, char __user *buf, 1173 size_t len, loff_t *pos) 1174 { 1175 int ret = 0; 1176 u32 data; 1177 1178 if (len < 4) 1179 return -EINVAL; 1180 1181 if (ctx->csa.spu_chnlcnt_RW[4]) { 1182 data = ctx->csa.spu_chnldata_RW[4]; 1183 ret = 4; 1184 } 1185 1186 if (!ret) 1187 goto out; 1188 1189 if (copy_to_user(buf, &data, 4)) 1190 return -EFAULT; 1191 1192 out: 1193 return ret; 1194 } 1195 1196 static ssize_t spufs_signal2_read(struct file *file, char __user *buf, 1197 size_t len, loff_t *pos) 1198 { 1199 struct spu_context *ctx = file->private_data; 1200 int ret; 1201 1202 ret = spu_acquire_saved(ctx); 1203 if (ret) 1204 return ret; 1205 ret = __spufs_signal2_read(ctx, buf, len, pos); 1206 spu_release_saved(ctx); 1207 1208 return ret; 1209 } 1210 1211 static ssize_t spufs_signal2_write(struct file *file, const char __user *buf, 1212 size_t len, loff_t *pos) 1213 { 1214 struct spu_context *ctx; 1215 ssize_t ret; 1216 u32 data; 1217 1218 ctx = file->private_data; 1219 1220 if (len < 4) 1221 return -EINVAL; 1222 1223 if (copy_from_user(&data, buf, 4)) 1224 return -EFAULT; 1225 1226 ret = spu_acquire(ctx); 1227 if (ret) 1228 return ret; 1229 ctx->ops->signal2_write(ctx, data); 1230 spu_release(ctx); 1231 1232 return 4; 1233 } 1234 1235 #if SPUFS_MMAP_4K 1236 static unsigned long spufs_signal2_mmap_nopfn(struct vm_area_struct *vma, 1237 unsigned long address) 1238 { 1239 #if PAGE_SIZE == 0x1000 1240 return spufs_ps_nopfn(vma, address, 0x1c000, 0x1000); 1241 #elif PAGE_SIZE == 0x10000 1242 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole 1243 * signal 1 and 2 area 1244 */ 1245 return spufs_ps_nopfn(vma, address, 0x10000, 0x10000); 1246 #else 1247 #error unsupported page size 1248 #endif 1249 } 1250 1251 static struct vm_operations_struct spufs_signal2_mmap_vmops = { 1252 .nopfn = spufs_signal2_mmap_nopfn, 1253 }; 1254 1255 static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma) 1256 { 1257 if (!(vma->vm_flags & VM_SHARED)) 1258 return -EINVAL; 1259 1260 vma->vm_flags |= VM_IO | VM_PFNMAP; 1261 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 1262 | _PAGE_NO_CACHE | _PAGE_GUARDED); 1263 1264 vma->vm_ops = &spufs_signal2_mmap_vmops; 1265 return 0; 1266 } 1267 #else /* SPUFS_MMAP_4K */ 1268 #define spufs_signal2_mmap NULL 1269 #endif /* !SPUFS_MMAP_4K */ 1270 1271 static const struct file_operations spufs_signal2_fops = { 1272 .open = spufs_signal2_open, 1273 .release = spufs_signal2_release, 1274 .read = spufs_signal2_read, 1275 .write = spufs_signal2_write, 1276 .mmap = spufs_signal2_mmap, 1277 }; 1278 1279 static const struct file_operations spufs_signal2_nosched_fops = { 1280 .open = spufs_signal2_open, 1281 .release = spufs_signal2_release, 1282 .write = spufs_signal2_write, 1283 .mmap = spufs_signal2_mmap, 1284 }; 1285 1286 /* 1287 * This is a wrapper around DEFINE_SIMPLE_ATTRIBUTE which does the 1288 * work of acquiring (or not) the SPU context before calling through 1289 * to the actual get routine. The set routine is called directly. 1290 */ 1291 #define SPU_ATTR_NOACQUIRE 0 1292 #define SPU_ATTR_ACQUIRE 1 1293 #define SPU_ATTR_ACQUIRE_SAVED 2 1294 1295 #define DEFINE_SPUFS_ATTRIBUTE(__name, __get, __set, __fmt, __acquire) \ 1296 static int __##__get(void *data, u64 *val) \ 1297 { \ 1298 struct spu_context *ctx = data; \ 1299 int ret = 0; \ 1300 \ 1301 if (__acquire == SPU_ATTR_ACQUIRE) { \ 1302 ret = spu_acquire(ctx); \ 1303 if (ret) \ 1304 return ret; \ 1305 *val = __get(ctx); \ 1306 spu_release(ctx); \ 1307 } else if (__acquire == SPU_ATTR_ACQUIRE_SAVED) { \ 1308 ret = spu_acquire_saved(ctx); \ 1309 if (ret) \ 1310 return ret; \ 1311 *val = __get(ctx); \ 1312 spu_release_saved(ctx); \ 1313 } else \ 1314 *val = __get(ctx); \ 1315 \ 1316 return 0; \ 1317 } \ 1318 DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__name, __##__get, __set, __fmt); 1319 1320 static int spufs_signal1_type_set(void *data, u64 val) 1321 { 1322 struct spu_context *ctx = data; 1323 int ret; 1324 1325 ret = spu_acquire(ctx); 1326 if (ret) 1327 return ret; 1328 ctx->ops->signal1_type_set(ctx, val); 1329 spu_release(ctx); 1330 1331 return 0; 1332 } 1333 1334 static u64 spufs_signal1_type_get(struct spu_context *ctx) 1335 { 1336 return ctx->ops->signal1_type_get(ctx); 1337 } 1338 DEFINE_SPUFS_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get, 1339 spufs_signal1_type_set, "%llu", SPU_ATTR_ACQUIRE); 1340 1341 1342 static int spufs_signal2_type_set(void *data, u64 val) 1343 { 1344 struct spu_context *ctx = data; 1345 int ret; 1346 1347 ret = spu_acquire(ctx); 1348 if (ret) 1349 return ret; 1350 ctx->ops->signal2_type_set(ctx, val); 1351 spu_release(ctx); 1352 1353 return 0; 1354 } 1355 1356 static u64 spufs_signal2_type_get(struct spu_context *ctx) 1357 { 1358 return ctx->ops->signal2_type_get(ctx); 1359 } 1360 DEFINE_SPUFS_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get, 1361 spufs_signal2_type_set, "%llu", SPU_ATTR_ACQUIRE); 1362 1363 #if SPUFS_MMAP_4K 1364 static unsigned long spufs_mss_mmap_nopfn(struct vm_area_struct *vma, 1365 unsigned long address) 1366 { 1367 return spufs_ps_nopfn(vma, address, 0x0000, 0x1000); 1368 } 1369 1370 static struct vm_operations_struct spufs_mss_mmap_vmops = { 1371 .nopfn = spufs_mss_mmap_nopfn, 1372 }; 1373 1374 /* 1375 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff]. 1376 */ 1377 static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma) 1378 { 1379 if (!(vma->vm_flags & VM_SHARED)) 1380 return -EINVAL; 1381 1382 vma->vm_flags |= VM_IO | VM_PFNMAP; 1383 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 1384 | _PAGE_NO_CACHE | _PAGE_GUARDED); 1385 1386 vma->vm_ops = &spufs_mss_mmap_vmops; 1387 return 0; 1388 } 1389 #else /* SPUFS_MMAP_4K */ 1390 #define spufs_mss_mmap NULL 1391 #endif /* !SPUFS_MMAP_4K */ 1392 1393 static int spufs_mss_open(struct inode *inode, struct file *file) 1394 { 1395 struct spufs_inode_info *i = SPUFS_I(inode); 1396 struct spu_context *ctx = i->i_ctx; 1397 1398 file->private_data = i->i_ctx; 1399 1400 mutex_lock(&ctx->mapping_lock); 1401 if (!i->i_openers++) 1402 ctx->mss = inode->i_mapping; 1403 mutex_unlock(&ctx->mapping_lock); 1404 return nonseekable_open(inode, file); 1405 } 1406 1407 static int 1408 spufs_mss_release(struct inode *inode, struct file *file) 1409 { 1410 struct spufs_inode_info *i = SPUFS_I(inode); 1411 struct spu_context *ctx = i->i_ctx; 1412 1413 mutex_lock(&ctx->mapping_lock); 1414 if (!--i->i_openers) 1415 ctx->mss = NULL; 1416 mutex_unlock(&ctx->mapping_lock); 1417 return 0; 1418 } 1419 1420 static const struct file_operations spufs_mss_fops = { 1421 .open = spufs_mss_open, 1422 .release = spufs_mss_release, 1423 .mmap = spufs_mss_mmap, 1424 }; 1425 1426 static unsigned long spufs_psmap_mmap_nopfn(struct vm_area_struct *vma, 1427 unsigned long address) 1428 { 1429 return spufs_ps_nopfn(vma, address, 0x0000, 0x20000); 1430 } 1431 1432 static struct vm_operations_struct spufs_psmap_mmap_vmops = { 1433 .nopfn = spufs_psmap_mmap_nopfn, 1434 }; 1435 1436 /* 1437 * mmap support for full problem state area [0x00000 - 0x1ffff]. 1438 */ 1439 static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma) 1440 { 1441 if (!(vma->vm_flags & VM_SHARED)) 1442 return -EINVAL; 1443 1444 vma->vm_flags |= VM_IO | VM_PFNMAP; 1445 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 1446 | _PAGE_NO_CACHE | _PAGE_GUARDED); 1447 1448 vma->vm_ops = &spufs_psmap_mmap_vmops; 1449 return 0; 1450 } 1451 1452 static int spufs_psmap_open(struct inode *inode, struct file *file) 1453 { 1454 struct spufs_inode_info *i = SPUFS_I(inode); 1455 struct spu_context *ctx = i->i_ctx; 1456 1457 mutex_lock(&ctx->mapping_lock); 1458 file->private_data = i->i_ctx; 1459 if (!i->i_openers++) 1460 ctx->psmap = inode->i_mapping; 1461 mutex_unlock(&ctx->mapping_lock); 1462 return nonseekable_open(inode, file); 1463 } 1464 1465 static int 1466 spufs_psmap_release(struct inode *inode, struct file *file) 1467 { 1468 struct spufs_inode_info *i = SPUFS_I(inode); 1469 struct spu_context *ctx = i->i_ctx; 1470 1471 mutex_lock(&ctx->mapping_lock); 1472 if (!--i->i_openers) 1473 ctx->psmap = NULL; 1474 mutex_unlock(&ctx->mapping_lock); 1475 return 0; 1476 } 1477 1478 static const struct file_operations spufs_psmap_fops = { 1479 .open = spufs_psmap_open, 1480 .release = spufs_psmap_release, 1481 .mmap = spufs_psmap_mmap, 1482 }; 1483 1484 1485 #if SPUFS_MMAP_4K 1486 static unsigned long spufs_mfc_mmap_nopfn(struct vm_area_struct *vma, 1487 unsigned long address) 1488 { 1489 return spufs_ps_nopfn(vma, address, 0x3000, 0x1000); 1490 } 1491 1492 static struct vm_operations_struct spufs_mfc_mmap_vmops = { 1493 .nopfn = spufs_mfc_mmap_nopfn, 1494 }; 1495 1496 /* 1497 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff]. 1498 */ 1499 static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma) 1500 { 1501 if (!(vma->vm_flags & VM_SHARED)) 1502 return -EINVAL; 1503 1504 vma->vm_flags |= VM_IO | VM_PFNMAP; 1505 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 1506 | _PAGE_NO_CACHE | _PAGE_GUARDED); 1507 1508 vma->vm_ops = &spufs_mfc_mmap_vmops; 1509 return 0; 1510 } 1511 #else /* SPUFS_MMAP_4K */ 1512 #define spufs_mfc_mmap NULL 1513 #endif /* !SPUFS_MMAP_4K */ 1514 1515 static int spufs_mfc_open(struct inode *inode, struct file *file) 1516 { 1517 struct spufs_inode_info *i = SPUFS_I(inode); 1518 struct spu_context *ctx = i->i_ctx; 1519 1520 /* we don't want to deal with DMA into other processes */ 1521 if (ctx->owner != current->mm) 1522 return -EINVAL; 1523 1524 if (atomic_read(&inode->i_count) != 1) 1525 return -EBUSY; 1526 1527 mutex_lock(&ctx->mapping_lock); 1528 file->private_data = ctx; 1529 if (!i->i_openers++) 1530 ctx->mfc = inode->i_mapping; 1531 mutex_unlock(&ctx->mapping_lock); 1532 return nonseekable_open(inode, file); 1533 } 1534 1535 static int 1536 spufs_mfc_release(struct inode *inode, struct file *file) 1537 { 1538 struct spufs_inode_info *i = SPUFS_I(inode); 1539 struct spu_context *ctx = i->i_ctx; 1540 1541 mutex_lock(&ctx->mapping_lock); 1542 if (!--i->i_openers) 1543 ctx->mfc = NULL; 1544 mutex_unlock(&ctx->mapping_lock); 1545 return 0; 1546 } 1547 1548 /* interrupt-level mfc callback function. */ 1549 void spufs_mfc_callback(struct spu *spu) 1550 { 1551 struct spu_context *ctx = spu->ctx; 1552 1553 if (!ctx) 1554 return; 1555 1556 wake_up_all(&ctx->mfc_wq); 1557 1558 pr_debug("%s %s\n", __FUNCTION__, spu->name); 1559 if (ctx->mfc_fasync) { 1560 u32 free_elements, tagstatus; 1561 unsigned int mask; 1562 1563 /* no need for spu_acquire in interrupt context */ 1564 free_elements = ctx->ops->get_mfc_free_elements(ctx); 1565 tagstatus = ctx->ops->read_mfc_tagstatus(ctx); 1566 1567 mask = 0; 1568 if (free_elements & 0xffff) 1569 mask |= POLLOUT; 1570 if (tagstatus & ctx->tagwait) 1571 mask |= POLLIN; 1572 1573 kill_fasync(&ctx->mfc_fasync, SIGIO, mask); 1574 } 1575 } 1576 1577 static int spufs_read_mfc_tagstatus(struct spu_context *ctx, u32 *status) 1578 { 1579 /* See if there is one tag group is complete */ 1580 /* FIXME we need locking around tagwait */ 1581 *status = ctx->ops->read_mfc_tagstatus(ctx) & ctx->tagwait; 1582 ctx->tagwait &= ~*status; 1583 if (*status) 1584 return 1; 1585 1586 /* enable interrupt waiting for any tag group, 1587 may silently fail if interrupts are already enabled */ 1588 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1); 1589 return 0; 1590 } 1591 1592 static ssize_t spufs_mfc_read(struct file *file, char __user *buffer, 1593 size_t size, loff_t *pos) 1594 { 1595 struct spu_context *ctx = file->private_data; 1596 int ret = -EINVAL; 1597 u32 status; 1598 1599 if (size != 4) 1600 goto out; 1601 1602 ret = spu_acquire(ctx); 1603 if (ret) 1604 return ret; 1605 1606 ret = -EINVAL; 1607 if (file->f_flags & O_NONBLOCK) { 1608 status = ctx->ops->read_mfc_tagstatus(ctx); 1609 if (!(status & ctx->tagwait)) 1610 ret = -EAGAIN; 1611 else 1612 /* XXX(hch): shouldn't we clear ret here? */ 1613 ctx->tagwait &= ~status; 1614 } else { 1615 ret = spufs_wait(ctx->mfc_wq, 1616 spufs_read_mfc_tagstatus(ctx, &status)); 1617 if (ret) 1618 goto out; 1619 } 1620 spu_release(ctx); 1621 1622 ret = 4; 1623 if (copy_to_user(buffer, &status, 4)) 1624 ret = -EFAULT; 1625 1626 out: 1627 return ret; 1628 } 1629 1630 static int spufs_check_valid_dma(struct mfc_dma_command *cmd) 1631 { 1632 pr_debug("queueing DMA %x %lx %x %x %x\n", cmd->lsa, 1633 cmd->ea, cmd->size, cmd->tag, cmd->cmd); 1634 1635 switch (cmd->cmd) { 1636 case MFC_PUT_CMD: 1637 case MFC_PUTF_CMD: 1638 case MFC_PUTB_CMD: 1639 case MFC_GET_CMD: 1640 case MFC_GETF_CMD: 1641 case MFC_GETB_CMD: 1642 break; 1643 default: 1644 pr_debug("invalid DMA opcode %x\n", cmd->cmd); 1645 return -EIO; 1646 } 1647 1648 if ((cmd->lsa & 0xf) != (cmd->ea &0xf)) { 1649 pr_debug("invalid DMA alignment, ea %lx lsa %x\n", 1650 cmd->ea, cmd->lsa); 1651 return -EIO; 1652 } 1653 1654 switch (cmd->size & 0xf) { 1655 case 1: 1656 break; 1657 case 2: 1658 if (cmd->lsa & 1) 1659 goto error; 1660 break; 1661 case 4: 1662 if (cmd->lsa & 3) 1663 goto error; 1664 break; 1665 case 8: 1666 if (cmd->lsa & 7) 1667 goto error; 1668 break; 1669 case 0: 1670 if (cmd->lsa & 15) 1671 goto error; 1672 break; 1673 error: 1674 default: 1675 pr_debug("invalid DMA alignment %x for size %x\n", 1676 cmd->lsa & 0xf, cmd->size); 1677 return -EIO; 1678 } 1679 1680 if (cmd->size > 16 * 1024) { 1681 pr_debug("invalid DMA size %x\n", cmd->size); 1682 return -EIO; 1683 } 1684 1685 if (cmd->tag & 0xfff0) { 1686 /* we reserve the higher tag numbers for kernel use */ 1687 pr_debug("invalid DMA tag\n"); 1688 return -EIO; 1689 } 1690 1691 if (cmd->class) { 1692 /* not supported in this version */ 1693 pr_debug("invalid DMA class\n"); 1694 return -EIO; 1695 } 1696 1697 return 0; 1698 } 1699 1700 static int spu_send_mfc_command(struct spu_context *ctx, 1701 struct mfc_dma_command cmd, 1702 int *error) 1703 { 1704 *error = ctx->ops->send_mfc_command(ctx, &cmd); 1705 if (*error == -EAGAIN) { 1706 /* wait for any tag group to complete 1707 so we have space for the new command */ 1708 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1); 1709 /* try again, because the queue might be 1710 empty again */ 1711 *error = ctx->ops->send_mfc_command(ctx, &cmd); 1712 if (*error == -EAGAIN) 1713 return 0; 1714 } 1715 return 1; 1716 } 1717 1718 static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer, 1719 size_t size, loff_t *pos) 1720 { 1721 struct spu_context *ctx = file->private_data; 1722 struct mfc_dma_command cmd; 1723 int ret = -EINVAL; 1724 1725 if (size != sizeof cmd) 1726 goto out; 1727 1728 ret = -EFAULT; 1729 if (copy_from_user(&cmd, buffer, sizeof cmd)) 1730 goto out; 1731 1732 ret = spufs_check_valid_dma(&cmd); 1733 if (ret) 1734 goto out; 1735 1736 ret = spu_acquire(ctx); 1737 if (ret) 1738 goto out; 1739 1740 ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE); 1741 if (ret) 1742 goto out; 1743 1744 if (file->f_flags & O_NONBLOCK) { 1745 ret = ctx->ops->send_mfc_command(ctx, &cmd); 1746 } else { 1747 int status; 1748 ret = spufs_wait(ctx->mfc_wq, 1749 spu_send_mfc_command(ctx, cmd, &status)); 1750 if (ret) 1751 goto out; 1752 if (status) 1753 ret = status; 1754 } 1755 1756 if (ret) 1757 goto out_unlock; 1758 1759 ctx->tagwait |= 1 << cmd.tag; 1760 ret = size; 1761 1762 out_unlock: 1763 spu_release(ctx); 1764 out: 1765 return ret; 1766 } 1767 1768 static unsigned int spufs_mfc_poll(struct file *file,poll_table *wait) 1769 { 1770 struct spu_context *ctx = file->private_data; 1771 u32 free_elements, tagstatus; 1772 unsigned int mask; 1773 1774 poll_wait(file, &ctx->mfc_wq, wait); 1775 1776 /* 1777 * For now keep this uninterruptible and also ignore the rule 1778 * that poll should not sleep. Will be fixed later. 1779 */ 1780 mutex_lock(&ctx->state_mutex); 1781 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2); 1782 free_elements = ctx->ops->get_mfc_free_elements(ctx); 1783 tagstatus = ctx->ops->read_mfc_tagstatus(ctx); 1784 spu_release(ctx); 1785 1786 mask = 0; 1787 if (free_elements & 0xffff) 1788 mask |= POLLOUT | POLLWRNORM; 1789 if (tagstatus & ctx->tagwait) 1790 mask |= POLLIN | POLLRDNORM; 1791 1792 pr_debug("%s: free %d tagstatus %d tagwait %d\n", __FUNCTION__, 1793 free_elements, tagstatus, ctx->tagwait); 1794 1795 return mask; 1796 } 1797 1798 static int spufs_mfc_flush(struct file *file, fl_owner_t id) 1799 { 1800 struct spu_context *ctx = file->private_data; 1801 int ret; 1802 1803 ret = spu_acquire(ctx); 1804 if (ret) 1805 goto out; 1806 #if 0 1807 /* this currently hangs */ 1808 ret = spufs_wait(ctx->mfc_wq, 1809 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2)); 1810 if (ret) 1811 goto out; 1812 ret = spufs_wait(ctx->mfc_wq, 1813 ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait); 1814 if (ret) 1815 goto out; 1816 #else 1817 ret = 0; 1818 #endif 1819 spu_release(ctx); 1820 out: 1821 return ret; 1822 } 1823 1824 static int spufs_mfc_fsync(struct file *file, struct dentry *dentry, 1825 int datasync) 1826 { 1827 return spufs_mfc_flush(file, NULL); 1828 } 1829 1830 static int spufs_mfc_fasync(int fd, struct file *file, int on) 1831 { 1832 struct spu_context *ctx = file->private_data; 1833 1834 return fasync_helper(fd, file, on, &ctx->mfc_fasync); 1835 } 1836 1837 static const struct file_operations spufs_mfc_fops = { 1838 .open = spufs_mfc_open, 1839 .release = spufs_mfc_release, 1840 .read = spufs_mfc_read, 1841 .write = spufs_mfc_write, 1842 .poll = spufs_mfc_poll, 1843 .flush = spufs_mfc_flush, 1844 .fsync = spufs_mfc_fsync, 1845 .fasync = spufs_mfc_fasync, 1846 .mmap = spufs_mfc_mmap, 1847 }; 1848 1849 static int spufs_npc_set(void *data, u64 val) 1850 { 1851 struct spu_context *ctx = data; 1852 int ret; 1853 1854 ret = spu_acquire(ctx); 1855 if (ret) 1856 return ret; 1857 ctx->ops->npc_write(ctx, val); 1858 spu_release(ctx); 1859 1860 return 0; 1861 } 1862 1863 static u64 spufs_npc_get(struct spu_context *ctx) 1864 { 1865 return ctx->ops->npc_read(ctx); 1866 } 1867 DEFINE_SPUFS_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set, 1868 "0x%llx\n", SPU_ATTR_ACQUIRE); 1869 1870 static int spufs_decr_set(void *data, u64 val) 1871 { 1872 struct spu_context *ctx = data; 1873 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1874 int ret; 1875 1876 ret = spu_acquire_saved(ctx); 1877 if (ret) 1878 return ret; 1879 lscsa->decr.slot[0] = (u32) val; 1880 spu_release_saved(ctx); 1881 1882 return 0; 1883 } 1884 1885 static u64 spufs_decr_get(struct spu_context *ctx) 1886 { 1887 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1888 return lscsa->decr.slot[0]; 1889 } 1890 DEFINE_SPUFS_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set, 1891 "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED); 1892 1893 static int spufs_decr_status_set(void *data, u64 val) 1894 { 1895 struct spu_context *ctx = data; 1896 int ret; 1897 1898 ret = spu_acquire_saved(ctx); 1899 if (ret) 1900 return ret; 1901 if (val) 1902 ctx->csa.priv2.mfc_control_RW |= MFC_CNTL_DECREMENTER_RUNNING; 1903 else 1904 ctx->csa.priv2.mfc_control_RW &= ~MFC_CNTL_DECREMENTER_RUNNING; 1905 spu_release_saved(ctx); 1906 1907 return 0; 1908 } 1909 1910 static u64 spufs_decr_status_get(struct spu_context *ctx) 1911 { 1912 if (ctx->csa.priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING) 1913 return SPU_DECR_STATUS_RUNNING; 1914 else 1915 return 0; 1916 } 1917 DEFINE_SPUFS_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get, 1918 spufs_decr_status_set, "0x%llx\n", 1919 SPU_ATTR_ACQUIRE_SAVED); 1920 1921 static int spufs_event_mask_set(void *data, u64 val) 1922 { 1923 struct spu_context *ctx = data; 1924 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1925 int ret; 1926 1927 ret = spu_acquire_saved(ctx); 1928 if (ret) 1929 return ret; 1930 lscsa->event_mask.slot[0] = (u32) val; 1931 spu_release_saved(ctx); 1932 1933 return 0; 1934 } 1935 1936 static u64 spufs_event_mask_get(struct spu_context *ctx) 1937 { 1938 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1939 return lscsa->event_mask.slot[0]; 1940 } 1941 1942 DEFINE_SPUFS_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get, 1943 spufs_event_mask_set, "0x%llx\n", 1944 SPU_ATTR_ACQUIRE_SAVED); 1945 1946 static u64 spufs_event_status_get(struct spu_context *ctx) 1947 { 1948 struct spu_state *state = &ctx->csa; 1949 u64 stat; 1950 stat = state->spu_chnlcnt_RW[0]; 1951 if (stat) 1952 return state->spu_chnldata_RW[0]; 1953 return 0; 1954 } 1955 DEFINE_SPUFS_ATTRIBUTE(spufs_event_status_ops, spufs_event_status_get, 1956 NULL, "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED) 1957 1958 static int spufs_srr0_set(void *data, u64 val) 1959 { 1960 struct spu_context *ctx = data; 1961 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1962 int ret; 1963 1964 ret = spu_acquire_saved(ctx); 1965 if (ret) 1966 return ret; 1967 lscsa->srr0.slot[0] = (u32) val; 1968 spu_release_saved(ctx); 1969 1970 return 0; 1971 } 1972 1973 static u64 spufs_srr0_get(struct spu_context *ctx) 1974 { 1975 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1976 return lscsa->srr0.slot[0]; 1977 } 1978 DEFINE_SPUFS_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set, 1979 "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED) 1980 1981 static u64 spufs_id_get(struct spu_context *ctx) 1982 { 1983 u64 num; 1984 1985 if (ctx->state == SPU_STATE_RUNNABLE) 1986 num = ctx->spu->number; 1987 else 1988 num = (unsigned int)-1; 1989 1990 return num; 1991 } 1992 DEFINE_SPUFS_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n", 1993 SPU_ATTR_ACQUIRE) 1994 1995 static u64 spufs_object_id_get(struct spu_context *ctx) 1996 { 1997 /* FIXME: Should there really be no locking here? */ 1998 return ctx->object_id; 1999 } 2000 2001 static int spufs_object_id_set(void *data, u64 id) 2002 { 2003 struct spu_context *ctx = data; 2004 ctx->object_id = id; 2005 2006 return 0; 2007 } 2008 2009 DEFINE_SPUFS_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get, 2010 spufs_object_id_set, "0x%llx\n", SPU_ATTR_NOACQUIRE); 2011 2012 static u64 spufs_lslr_get(struct spu_context *ctx) 2013 { 2014 return ctx->csa.priv2.spu_lslr_RW; 2015 } 2016 DEFINE_SPUFS_ATTRIBUTE(spufs_lslr_ops, spufs_lslr_get, NULL, "0x%llx\n", 2017 SPU_ATTR_ACQUIRE_SAVED); 2018 2019 static int spufs_info_open(struct inode *inode, struct file *file) 2020 { 2021 struct spufs_inode_info *i = SPUFS_I(inode); 2022 struct spu_context *ctx = i->i_ctx; 2023 file->private_data = ctx; 2024 return 0; 2025 } 2026 2027 static int spufs_caps_show(struct seq_file *s, void *private) 2028 { 2029 struct spu_context *ctx = s->private; 2030 2031 if (!(ctx->flags & SPU_CREATE_NOSCHED)) 2032 seq_puts(s, "sched\n"); 2033 if (!(ctx->flags & SPU_CREATE_ISOLATE)) 2034 seq_puts(s, "step\n"); 2035 return 0; 2036 } 2037 2038 static int spufs_caps_open(struct inode *inode, struct file *file) 2039 { 2040 return single_open(file, spufs_caps_show, SPUFS_I(inode)->i_ctx); 2041 } 2042 2043 static const struct file_operations spufs_caps_fops = { 2044 .open = spufs_caps_open, 2045 .read = seq_read, 2046 .llseek = seq_lseek, 2047 .release = single_release, 2048 }; 2049 2050 static ssize_t __spufs_mbox_info_read(struct spu_context *ctx, 2051 char __user *buf, size_t len, loff_t *pos) 2052 { 2053 u32 data; 2054 2055 /* EOF if there's no entry in the mbox */ 2056 if (!(ctx->csa.prob.mb_stat_R & 0x0000ff)) 2057 return 0; 2058 2059 data = ctx->csa.prob.pu_mb_R; 2060 2061 return simple_read_from_buffer(buf, len, pos, &data, sizeof data); 2062 } 2063 2064 static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf, 2065 size_t len, loff_t *pos) 2066 { 2067 int ret; 2068 struct spu_context *ctx = file->private_data; 2069 2070 if (!access_ok(VERIFY_WRITE, buf, len)) 2071 return -EFAULT; 2072 2073 ret = spu_acquire_saved(ctx); 2074 if (ret) 2075 return ret; 2076 spin_lock(&ctx->csa.register_lock); 2077 ret = __spufs_mbox_info_read(ctx, buf, len, pos); 2078 spin_unlock(&ctx->csa.register_lock); 2079 spu_release_saved(ctx); 2080 2081 return ret; 2082 } 2083 2084 static const struct file_operations spufs_mbox_info_fops = { 2085 .open = spufs_info_open, 2086 .read = spufs_mbox_info_read, 2087 .llseek = generic_file_llseek, 2088 }; 2089 2090 static ssize_t __spufs_ibox_info_read(struct spu_context *ctx, 2091 char __user *buf, size_t len, loff_t *pos) 2092 { 2093 u32 data; 2094 2095 /* EOF if there's no entry in the ibox */ 2096 if (!(ctx->csa.prob.mb_stat_R & 0xff0000)) 2097 return 0; 2098 2099 data = ctx->csa.priv2.puint_mb_R; 2100 2101 return simple_read_from_buffer(buf, len, pos, &data, sizeof data); 2102 } 2103 2104 static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf, 2105 size_t len, loff_t *pos) 2106 { 2107 struct spu_context *ctx = file->private_data; 2108 int ret; 2109 2110 if (!access_ok(VERIFY_WRITE, buf, len)) 2111 return -EFAULT; 2112 2113 ret = spu_acquire_saved(ctx); 2114 if (ret) 2115 return ret; 2116 spin_lock(&ctx->csa.register_lock); 2117 ret = __spufs_ibox_info_read(ctx, buf, len, pos); 2118 spin_unlock(&ctx->csa.register_lock); 2119 spu_release_saved(ctx); 2120 2121 return ret; 2122 } 2123 2124 static const struct file_operations spufs_ibox_info_fops = { 2125 .open = spufs_info_open, 2126 .read = spufs_ibox_info_read, 2127 .llseek = generic_file_llseek, 2128 }; 2129 2130 static ssize_t __spufs_wbox_info_read(struct spu_context *ctx, 2131 char __user *buf, size_t len, loff_t *pos) 2132 { 2133 int i, cnt; 2134 u32 data[4]; 2135 u32 wbox_stat; 2136 2137 wbox_stat = ctx->csa.prob.mb_stat_R; 2138 cnt = 4 - ((wbox_stat & 0x00ff00) >> 8); 2139 for (i = 0; i < cnt; i++) { 2140 data[i] = ctx->csa.spu_mailbox_data[i]; 2141 } 2142 2143 return simple_read_from_buffer(buf, len, pos, &data, 2144 cnt * sizeof(u32)); 2145 } 2146 2147 static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf, 2148 size_t len, loff_t *pos) 2149 { 2150 struct spu_context *ctx = file->private_data; 2151 int ret; 2152 2153 if (!access_ok(VERIFY_WRITE, buf, len)) 2154 return -EFAULT; 2155 2156 ret = spu_acquire_saved(ctx); 2157 if (ret) 2158 return ret; 2159 spin_lock(&ctx->csa.register_lock); 2160 ret = __spufs_wbox_info_read(ctx, buf, len, pos); 2161 spin_unlock(&ctx->csa.register_lock); 2162 spu_release_saved(ctx); 2163 2164 return ret; 2165 } 2166 2167 static const struct file_operations spufs_wbox_info_fops = { 2168 .open = spufs_info_open, 2169 .read = spufs_wbox_info_read, 2170 .llseek = generic_file_llseek, 2171 }; 2172 2173 static ssize_t __spufs_dma_info_read(struct spu_context *ctx, 2174 char __user *buf, size_t len, loff_t *pos) 2175 { 2176 struct spu_dma_info info; 2177 struct mfc_cq_sr *qp, *spuqp; 2178 int i; 2179 2180 info.dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW; 2181 info.dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0]; 2182 info.dma_info_status = ctx->csa.spu_chnldata_RW[24]; 2183 info.dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25]; 2184 info.dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27]; 2185 for (i = 0; i < 16; i++) { 2186 qp = &info.dma_info_command_data[i]; 2187 spuqp = &ctx->csa.priv2.spuq[i]; 2188 2189 qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW; 2190 qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW; 2191 qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW; 2192 qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW; 2193 } 2194 2195 return simple_read_from_buffer(buf, len, pos, &info, 2196 sizeof info); 2197 } 2198 2199 static ssize_t spufs_dma_info_read(struct file *file, char __user *buf, 2200 size_t len, loff_t *pos) 2201 { 2202 struct spu_context *ctx = file->private_data; 2203 int ret; 2204 2205 if (!access_ok(VERIFY_WRITE, buf, len)) 2206 return -EFAULT; 2207 2208 ret = spu_acquire_saved(ctx); 2209 if (ret) 2210 return ret; 2211 spin_lock(&ctx->csa.register_lock); 2212 ret = __spufs_dma_info_read(ctx, buf, len, pos); 2213 spin_unlock(&ctx->csa.register_lock); 2214 spu_release_saved(ctx); 2215 2216 return ret; 2217 } 2218 2219 static const struct file_operations spufs_dma_info_fops = { 2220 .open = spufs_info_open, 2221 .read = spufs_dma_info_read, 2222 }; 2223 2224 static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx, 2225 char __user *buf, size_t len, loff_t *pos) 2226 { 2227 struct spu_proxydma_info info; 2228 struct mfc_cq_sr *qp, *puqp; 2229 int ret = sizeof info; 2230 int i; 2231 2232 if (len < ret) 2233 return -EINVAL; 2234 2235 if (!access_ok(VERIFY_WRITE, buf, len)) 2236 return -EFAULT; 2237 2238 info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW; 2239 info.proxydma_info_mask = ctx->csa.prob.dma_querymask_RW; 2240 info.proxydma_info_status = ctx->csa.prob.dma_tagstatus_R; 2241 for (i = 0; i < 8; i++) { 2242 qp = &info.proxydma_info_command_data[i]; 2243 puqp = &ctx->csa.priv2.puq[i]; 2244 2245 qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW; 2246 qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW; 2247 qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW; 2248 qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW; 2249 } 2250 2251 return simple_read_from_buffer(buf, len, pos, &info, 2252 sizeof info); 2253 } 2254 2255 static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf, 2256 size_t len, loff_t *pos) 2257 { 2258 struct spu_context *ctx = file->private_data; 2259 int ret; 2260 2261 ret = spu_acquire_saved(ctx); 2262 if (ret) 2263 return ret; 2264 spin_lock(&ctx->csa.register_lock); 2265 ret = __spufs_proxydma_info_read(ctx, buf, len, pos); 2266 spin_unlock(&ctx->csa.register_lock); 2267 spu_release_saved(ctx); 2268 2269 return ret; 2270 } 2271 2272 static const struct file_operations spufs_proxydma_info_fops = { 2273 .open = spufs_info_open, 2274 .read = spufs_proxydma_info_read, 2275 }; 2276 2277 static int spufs_show_tid(struct seq_file *s, void *private) 2278 { 2279 struct spu_context *ctx = s->private; 2280 2281 seq_printf(s, "%d\n", ctx->tid); 2282 return 0; 2283 } 2284 2285 static int spufs_tid_open(struct inode *inode, struct file *file) 2286 { 2287 return single_open(file, spufs_show_tid, SPUFS_I(inode)->i_ctx); 2288 } 2289 2290 static const struct file_operations spufs_tid_fops = { 2291 .open = spufs_tid_open, 2292 .read = seq_read, 2293 .llseek = seq_lseek, 2294 .release = single_release, 2295 }; 2296 2297 static const char *ctx_state_names[] = { 2298 "user", "system", "iowait", "loaded" 2299 }; 2300 2301 static unsigned long long spufs_acct_time(struct spu_context *ctx, 2302 enum spu_utilization_state state) 2303 { 2304 struct timespec ts; 2305 unsigned long long time = ctx->stats.times[state]; 2306 2307 /* 2308 * In general, utilization statistics are updated by the controlling 2309 * thread as the spu context moves through various well defined 2310 * state transitions, but if the context is lazily loaded its 2311 * utilization statistics are not updated as the controlling thread 2312 * is not tightly coupled with the execution of the spu context. We 2313 * calculate and apply the time delta from the last recorded state 2314 * of the spu context. 2315 */ 2316 if (ctx->spu && ctx->stats.util_state == state) { 2317 ktime_get_ts(&ts); 2318 time += timespec_to_ns(&ts) - ctx->stats.tstamp; 2319 } 2320 2321 return time / NSEC_PER_MSEC; 2322 } 2323 2324 static unsigned long long spufs_slb_flts(struct spu_context *ctx) 2325 { 2326 unsigned long long slb_flts = ctx->stats.slb_flt; 2327 2328 if (ctx->state == SPU_STATE_RUNNABLE) { 2329 slb_flts += (ctx->spu->stats.slb_flt - 2330 ctx->stats.slb_flt_base); 2331 } 2332 2333 return slb_flts; 2334 } 2335 2336 static unsigned long long spufs_class2_intrs(struct spu_context *ctx) 2337 { 2338 unsigned long long class2_intrs = ctx->stats.class2_intr; 2339 2340 if (ctx->state == SPU_STATE_RUNNABLE) { 2341 class2_intrs += (ctx->spu->stats.class2_intr - 2342 ctx->stats.class2_intr_base); 2343 } 2344 2345 return class2_intrs; 2346 } 2347 2348 2349 static int spufs_show_stat(struct seq_file *s, void *private) 2350 { 2351 struct spu_context *ctx = s->private; 2352 int ret; 2353 2354 ret = spu_acquire(ctx); 2355 if (ret) 2356 return ret; 2357 2358 seq_printf(s, "%s %llu %llu %llu %llu " 2359 "%llu %llu %llu %llu %llu %llu %llu %llu\n", 2360 ctx_state_names[ctx->stats.util_state], 2361 spufs_acct_time(ctx, SPU_UTIL_USER), 2362 spufs_acct_time(ctx, SPU_UTIL_SYSTEM), 2363 spufs_acct_time(ctx, SPU_UTIL_IOWAIT), 2364 spufs_acct_time(ctx, SPU_UTIL_IDLE_LOADED), 2365 ctx->stats.vol_ctx_switch, 2366 ctx->stats.invol_ctx_switch, 2367 spufs_slb_flts(ctx), 2368 ctx->stats.hash_flt, 2369 ctx->stats.min_flt, 2370 ctx->stats.maj_flt, 2371 spufs_class2_intrs(ctx), 2372 ctx->stats.libassist); 2373 spu_release(ctx); 2374 return 0; 2375 } 2376 2377 static int spufs_stat_open(struct inode *inode, struct file *file) 2378 { 2379 return single_open(file, spufs_show_stat, SPUFS_I(inode)->i_ctx); 2380 } 2381 2382 static const struct file_operations spufs_stat_fops = { 2383 .open = spufs_stat_open, 2384 .read = seq_read, 2385 .llseek = seq_lseek, 2386 .release = single_release, 2387 }; 2388 2389 2390 struct tree_descr spufs_dir_contents[] = { 2391 { "capabilities", &spufs_caps_fops, 0444, }, 2392 { "mem", &spufs_mem_fops, 0666, }, 2393 { "regs", &spufs_regs_fops, 0666, }, 2394 { "mbox", &spufs_mbox_fops, 0444, }, 2395 { "ibox", &spufs_ibox_fops, 0444, }, 2396 { "wbox", &spufs_wbox_fops, 0222, }, 2397 { "mbox_stat", &spufs_mbox_stat_fops, 0444, }, 2398 { "ibox_stat", &spufs_ibox_stat_fops, 0444, }, 2399 { "wbox_stat", &spufs_wbox_stat_fops, 0444, }, 2400 { "signal1", &spufs_signal1_fops, 0666, }, 2401 { "signal2", &spufs_signal2_fops, 0666, }, 2402 { "signal1_type", &spufs_signal1_type, 0666, }, 2403 { "signal2_type", &spufs_signal2_type, 0666, }, 2404 { "cntl", &spufs_cntl_fops, 0666, }, 2405 { "fpcr", &spufs_fpcr_fops, 0666, }, 2406 { "lslr", &spufs_lslr_ops, 0444, }, 2407 { "mfc", &spufs_mfc_fops, 0666, }, 2408 { "mss", &spufs_mss_fops, 0666, }, 2409 { "npc", &spufs_npc_ops, 0666, }, 2410 { "srr0", &spufs_srr0_ops, 0666, }, 2411 { "decr", &spufs_decr_ops, 0666, }, 2412 { "decr_status", &spufs_decr_status_ops, 0666, }, 2413 { "event_mask", &spufs_event_mask_ops, 0666, }, 2414 { "event_status", &spufs_event_status_ops, 0444, }, 2415 { "psmap", &spufs_psmap_fops, 0666, }, 2416 { "phys-id", &spufs_id_ops, 0666, }, 2417 { "object-id", &spufs_object_id_ops, 0666, }, 2418 { "mbox_info", &spufs_mbox_info_fops, 0444, }, 2419 { "ibox_info", &spufs_ibox_info_fops, 0444, }, 2420 { "wbox_info", &spufs_wbox_info_fops, 0444, }, 2421 { "dma_info", &spufs_dma_info_fops, 0444, }, 2422 { "proxydma_info", &spufs_proxydma_info_fops, 0444, }, 2423 { "tid", &spufs_tid_fops, 0444, }, 2424 { "stat", &spufs_stat_fops, 0444, }, 2425 {}, 2426 }; 2427 2428 struct tree_descr spufs_dir_nosched_contents[] = { 2429 { "capabilities", &spufs_caps_fops, 0444, }, 2430 { "mem", &spufs_mem_fops, 0666, }, 2431 { "mbox", &spufs_mbox_fops, 0444, }, 2432 { "ibox", &spufs_ibox_fops, 0444, }, 2433 { "wbox", &spufs_wbox_fops, 0222, }, 2434 { "mbox_stat", &spufs_mbox_stat_fops, 0444, }, 2435 { "ibox_stat", &spufs_ibox_stat_fops, 0444, }, 2436 { "wbox_stat", &spufs_wbox_stat_fops, 0444, }, 2437 { "signal1", &spufs_signal1_nosched_fops, 0222, }, 2438 { "signal2", &spufs_signal2_nosched_fops, 0222, }, 2439 { "signal1_type", &spufs_signal1_type, 0666, }, 2440 { "signal2_type", &spufs_signal2_type, 0666, }, 2441 { "mss", &spufs_mss_fops, 0666, }, 2442 { "mfc", &spufs_mfc_fops, 0666, }, 2443 { "cntl", &spufs_cntl_fops, 0666, }, 2444 { "npc", &spufs_npc_ops, 0666, }, 2445 { "psmap", &spufs_psmap_fops, 0666, }, 2446 { "phys-id", &spufs_id_ops, 0666, }, 2447 { "object-id", &spufs_object_id_ops, 0666, }, 2448 { "tid", &spufs_tid_fops, 0444, }, 2449 { "stat", &spufs_stat_fops, 0444, }, 2450 {}, 2451 }; 2452 2453 struct spufs_coredump_reader spufs_coredump_read[] = { 2454 { "regs", __spufs_regs_read, NULL, sizeof(struct spu_reg128[128])}, 2455 { "fpcr", __spufs_fpcr_read, NULL, sizeof(struct spu_reg128) }, 2456 { "lslr", NULL, spufs_lslr_get, 19 }, 2457 { "decr", NULL, spufs_decr_get, 19 }, 2458 { "decr_status", NULL, spufs_decr_status_get, 19 }, 2459 { "mem", __spufs_mem_read, NULL, LS_SIZE, }, 2460 { "signal1", __spufs_signal1_read, NULL, sizeof(u32) }, 2461 { "signal1_type", NULL, spufs_signal1_type_get, 19 }, 2462 { "signal2", __spufs_signal2_read, NULL, sizeof(u32) }, 2463 { "signal2_type", NULL, spufs_signal2_type_get, 19 }, 2464 { "event_mask", NULL, spufs_event_mask_get, 19 }, 2465 { "event_status", NULL, spufs_event_status_get, 19 }, 2466 { "mbox_info", __spufs_mbox_info_read, NULL, sizeof(u32) }, 2467 { "ibox_info", __spufs_ibox_info_read, NULL, sizeof(u32) }, 2468 { "wbox_info", __spufs_wbox_info_read, NULL, 4 * sizeof(u32)}, 2469 { "dma_info", __spufs_dma_info_read, NULL, sizeof(struct spu_dma_info)}, 2470 { "proxydma_info", __spufs_proxydma_info_read, 2471 NULL, sizeof(struct spu_proxydma_info)}, 2472 { "object-id", NULL, spufs_object_id_get, 19 }, 2473 { "npc", NULL, spufs_npc_get, 19 }, 2474 { NULL }, 2475 }; 2476