1 /* 2 * SPU file system -- file contents 3 * 4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 5 * 6 * Author: Arnd Bergmann <arndb@de.ibm.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2, or (at your option) 11 * any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 */ 22 23 #undef DEBUG 24 25 #include <linux/fs.h> 26 #include <linux/ioctl.h> 27 #include <linux/module.h> 28 #include <linux/pagemap.h> 29 #include <linux/poll.h> 30 #include <linux/ptrace.h> 31 #include <linux/seq_file.h> 32 #include <linux/marker.h> 33 34 #include <asm/io.h> 35 #include <asm/time.h> 36 #include <asm/spu.h> 37 #include <asm/spu_info.h> 38 #include <asm/uaccess.h> 39 40 #include "spufs.h" 41 42 #define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000) 43 44 /* Simple attribute files */ 45 struct spufs_attr { 46 int (*get)(void *, u64 *); 47 int (*set)(void *, u64); 48 char get_buf[24]; /* enough to store a u64 and "\n\0" */ 49 char set_buf[24]; 50 void *data; 51 const char *fmt; /* format for read operation */ 52 struct mutex mutex; /* protects access to these buffers */ 53 }; 54 55 static int spufs_attr_open(struct inode *inode, struct file *file, 56 int (*get)(void *, u64 *), int (*set)(void *, u64), 57 const char *fmt) 58 { 59 struct spufs_attr *attr; 60 61 attr = kmalloc(sizeof(*attr), GFP_KERNEL); 62 if (!attr) 63 return -ENOMEM; 64 65 attr->get = get; 66 attr->set = set; 67 attr->data = inode->i_private; 68 attr->fmt = fmt; 69 mutex_init(&attr->mutex); 70 file->private_data = attr; 71 72 return nonseekable_open(inode, file); 73 } 74 75 static int spufs_attr_release(struct inode *inode, struct file *file) 76 { 77 kfree(file->private_data); 78 return 0; 79 } 80 81 static ssize_t spufs_attr_read(struct file *file, char __user *buf, 82 size_t len, loff_t *ppos) 83 { 84 struct spufs_attr *attr; 85 size_t size; 86 ssize_t ret; 87 88 attr = file->private_data; 89 if (!attr->get) 90 return -EACCES; 91 92 ret = mutex_lock_interruptible(&attr->mutex); 93 if (ret) 94 return ret; 95 96 if (*ppos) { /* continued read */ 97 size = strlen(attr->get_buf); 98 } else { /* first read */ 99 u64 val; 100 ret = attr->get(attr->data, &val); 101 if (ret) 102 goto out; 103 104 size = scnprintf(attr->get_buf, sizeof(attr->get_buf), 105 attr->fmt, (unsigned long long)val); 106 } 107 108 ret = simple_read_from_buffer(buf, len, ppos, attr->get_buf, size); 109 out: 110 mutex_unlock(&attr->mutex); 111 return ret; 112 } 113 114 static ssize_t spufs_attr_write(struct file *file, const char __user *buf, 115 size_t len, loff_t *ppos) 116 { 117 struct spufs_attr *attr; 118 u64 val; 119 size_t size; 120 ssize_t ret; 121 122 attr = file->private_data; 123 if (!attr->set) 124 return -EACCES; 125 126 ret = mutex_lock_interruptible(&attr->mutex); 127 if (ret) 128 return ret; 129 130 ret = -EFAULT; 131 size = min(sizeof(attr->set_buf) - 1, len); 132 if (copy_from_user(attr->set_buf, buf, size)) 133 goto out; 134 135 ret = len; /* claim we got the whole input */ 136 attr->set_buf[size] = '\0'; 137 val = simple_strtol(attr->set_buf, NULL, 0); 138 attr->set(attr->data, val); 139 out: 140 mutex_unlock(&attr->mutex); 141 return ret; 142 } 143 144 #define DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \ 145 static int __fops ## _open(struct inode *inode, struct file *file) \ 146 { \ 147 __simple_attr_check_format(__fmt, 0ull); \ 148 return spufs_attr_open(inode, file, __get, __set, __fmt); \ 149 } \ 150 static struct file_operations __fops = { \ 151 .owner = THIS_MODULE, \ 152 .open = __fops ## _open, \ 153 .release = spufs_attr_release, \ 154 .read = spufs_attr_read, \ 155 .write = spufs_attr_write, \ 156 }; 157 158 159 static int 160 spufs_mem_open(struct inode *inode, struct file *file) 161 { 162 struct spufs_inode_info *i = SPUFS_I(inode); 163 struct spu_context *ctx = i->i_ctx; 164 165 mutex_lock(&ctx->mapping_lock); 166 file->private_data = ctx; 167 if (!i->i_openers++) 168 ctx->local_store = inode->i_mapping; 169 mutex_unlock(&ctx->mapping_lock); 170 return 0; 171 } 172 173 static int 174 spufs_mem_release(struct inode *inode, struct file *file) 175 { 176 struct spufs_inode_info *i = SPUFS_I(inode); 177 struct spu_context *ctx = i->i_ctx; 178 179 mutex_lock(&ctx->mapping_lock); 180 if (!--i->i_openers) 181 ctx->local_store = NULL; 182 mutex_unlock(&ctx->mapping_lock); 183 return 0; 184 } 185 186 static ssize_t 187 __spufs_mem_read(struct spu_context *ctx, char __user *buffer, 188 size_t size, loff_t *pos) 189 { 190 char *local_store = ctx->ops->get_ls(ctx); 191 return simple_read_from_buffer(buffer, size, pos, local_store, 192 LS_SIZE); 193 } 194 195 static ssize_t 196 spufs_mem_read(struct file *file, char __user *buffer, 197 size_t size, loff_t *pos) 198 { 199 struct spu_context *ctx = file->private_data; 200 ssize_t ret; 201 202 ret = spu_acquire(ctx); 203 if (ret) 204 return ret; 205 ret = __spufs_mem_read(ctx, buffer, size, pos); 206 spu_release(ctx); 207 208 return ret; 209 } 210 211 static ssize_t 212 spufs_mem_write(struct file *file, const char __user *buffer, 213 size_t size, loff_t *ppos) 214 { 215 struct spu_context *ctx = file->private_data; 216 char *local_store; 217 loff_t pos = *ppos; 218 int ret; 219 220 if (pos < 0) 221 return -EINVAL; 222 if (pos > LS_SIZE) 223 return -EFBIG; 224 if (size > LS_SIZE - pos) 225 size = LS_SIZE - pos; 226 227 ret = spu_acquire(ctx); 228 if (ret) 229 return ret; 230 231 local_store = ctx->ops->get_ls(ctx); 232 ret = copy_from_user(local_store + pos, buffer, size); 233 spu_release(ctx); 234 235 if (ret) 236 return -EFAULT; 237 *ppos = pos + size; 238 return size; 239 } 240 241 static int 242 spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 243 { 244 struct spu_context *ctx = vma->vm_file->private_data; 245 unsigned long address = (unsigned long)vmf->virtual_address; 246 unsigned long pfn, offset; 247 248 #ifdef CONFIG_SPU_FS_64K_LS 249 struct spu_state *csa = &ctx->csa; 250 int psize; 251 252 /* Check what page size we are using */ 253 psize = get_slice_psize(vma->vm_mm, address); 254 255 /* Some sanity checking */ 256 BUG_ON(csa->use_big_pages != (psize == MMU_PAGE_64K)); 257 258 /* Wow, 64K, cool, we need to align the address though */ 259 if (csa->use_big_pages) { 260 BUG_ON(vma->vm_start & 0xffff); 261 address &= ~0xfffful; 262 } 263 #endif /* CONFIG_SPU_FS_64K_LS */ 264 265 offset = vmf->pgoff << PAGE_SHIFT; 266 if (offset >= LS_SIZE) 267 return VM_FAULT_SIGBUS; 268 269 pr_debug("spufs_mem_mmap_fault address=0x%lx, offset=0x%lx\n", 270 address, offset); 271 272 if (spu_acquire(ctx)) 273 return VM_FAULT_NOPAGE; 274 275 if (ctx->state == SPU_STATE_SAVED) { 276 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 277 & ~_PAGE_NO_CACHE); 278 pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset); 279 } else { 280 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 281 | _PAGE_NO_CACHE); 282 pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT; 283 } 284 vm_insert_pfn(vma, address, pfn); 285 286 spu_release(ctx); 287 288 return VM_FAULT_NOPAGE; 289 } 290 291 static int spufs_mem_mmap_access(struct vm_area_struct *vma, 292 unsigned long address, 293 void *buf, int len, int write) 294 { 295 struct spu_context *ctx = vma->vm_file->private_data; 296 unsigned long offset = address - vma->vm_start; 297 char *local_store; 298 299 if (write && !(vma->vm_flags & VM_WRITE)) 300 return -EACCES; 301 if (spu_acquire(ctx)) 302 return -EINTR; 303 if ((offset + len) > vma->vm_end) 304 len = vma->vm_end - offset; 305 local_store = ctx->ops->get_ls(ctx); 306 if (write) 307 memcpy_toio(local_store + offset, buf, len); 308 else 309 memcpy_fromio(buf, local_store + offset, len); 310 spu_release(ctx); 311 return len; 312 } 313 314 static struct vm_operations_struct spufs_mem_mmap_vmops = { 315 .fault = spufs_mem_mmap_fault, 316 .access = spufs_mem_mmap_access, 317 }; 318 319 static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma) 320 { 321 #ifdef CONFIG_SPU_FS_64K_LS 322 struct spu_context *ctx = file->private_data; 323 struct spu_state *csa = &ctx->csa; 324 325 /* Sanity check VMA alignment */ 326 if (csa->use_big_pages) { 327 pr_debug("spufs_mem_mmap 64K, start=0x%lx, end=0x%lx," 328 " pgoff=0x%lx\n", vma->vm_start, vma->vm_end, 329 vma->vm_pgoff); 330 if (vma->vm_start & 0xffff) 331 return -EINVAL; 332 if (vma->vm_pgoff & 0xf) 333 return -EINVAL; 334 } 335 #endif /* CONFIG_SPU_FS_64K_LS */ 336 337 if (!(vma->vm_flags & VM_SHARED)) 338 return -EINVAL; 339 340 vma->vm_flags |= VM_IO | VM_PFNMAP; 341 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 342 | _PAGE_NO_CACHE); 343 344 vma->vm_ops = &spufs_mem_mmap_vmops; 345 return 0; 346 } 347 348 #ifdef CONFIG_SPU_FS_64K_LS 349 static unsigned long spufs_get_unmapped_area(struct file *file, 350 unsigned long addr, unsigned long len, unsigned long pgoff, 351 unsigned long flags) 352 { 353 struct spu_context *ctx = file->private_data; 354 struct spu_state *csa = &ctx->csa; 355 356 /* If not using big pages, fallback to normal MM g_u_a */ 357 if (!csa->use_big_pages) 358 return current->mm->get_unmapped_area(file, addr, len, 359 pgoff, flags); 360 361 /* Else, try to obtain a 64K pages slice */ 362 return slice_get_unmapped_area(addr, len, flags, 363 MMU_PAGE_64K, 1, 0); 364 } 365 #endif /* CONFIG_SPU_FS_64K_LS */ 366 367 static const struct file_operations spufs_mem_fops = { 368 .open = spufs_mem_open, 369 .release = spufs_mem_release, 370 .read = spufs_mem_read, 371 .write = spufs_mem_write, 372 .llseek = generic_file_llseek, 373 .mmap = spufs_mem_mmap, 374 #ifdef CONFIG_SPU_FS_64K_LS 375 .get_unmapped_area = spufs_get_unmapped_area, 376 #endif 377 }; 378 379 static int spufs_ps_fault(struct vm_area_struct *vma, 380 struct vm_fault *vmf, 381 unsigned long ps_offs, 382 unsigned long ps_size) 383 { 384 struct spu_context *ctx = vma->vm_file->private_data; 385 unsigned long area, offset = vmf->pgoff << PAGE_SHIFT; 386 int ret = 0; 387 388 spu_context_nospu_trace(spufs_ps_fault__enter, ctx); 389 390 if (offset >= ps_size) 391 return VM_FAULT_SIGBUS; 392 393 /* 394 * Because we release the mmap_sem, the context may be destroyed while 395 * we're in spu_wait. Grab an extra reference so it isn't destroyed 396 * in the meantime. 397 */ 398 get_spu_context(ctx); 399 400 /* 401 * We have to wait for context to be loaded before we have 402 * pages to hand out to the user, but we don't want to wait 403 * with the mmap_sem held. 404 * It is possible to drop the mmap_sem here, but then we need 405 * to return VM_FAULT_NOPAGE because the mappings may have 406 * hanged. 407 */ 408 if (spu_acquire(ctx)) 409 goto refault; 410 411 if (ctx->state == SPU_STATE_SAVED) { 412 up_read(¤t->mm->mmap_sem); 413 spu_context_nospu_trace(spufs_ps_fault__sleep, ctx); 414 ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE); 415 spu_context_trace(spufs_ps_fault__wake, ctx, ctx->spu); 416 down_read(¤t->mm->mmap_sem); 417 } else { 418 area = ctx->spu->problem_phys + ps_offs; 419 vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, 420 (area + offset) >> PAGE_SHIFT); 421 spu_context_trace(spufs_ps_fault__insert, ctx, ctx->spu); 422 } 423 424 if (!ret) 425 spu_release(ctx); 426 427 refault: 428 put_spu_context(ctx); 429 return VM_FAULT_NOPAGE; 430 } 431 432 #if SPUFS_MMAP_4K 433 static int spufs_cntl_mmap_fault(struct vm_area_struct *vma, 434 struct vm_fault *vmf) 435 { 436 return spufs_ps_fault(vma, vmf, 0x4000, SPUFS_CNTL_MAP_SIZE); 437 } 438 439 static struct vm_operations_struct spufs_cntl_mmap_vmops = { 440 .fault = spufs_cntl_mmap_fault, 441 }; 442 443 /* 444 * mmap support for problem state control area [0x4000 - 0x4fff]. 445 */ 446 static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma) 447 { 448 if (!(vma->vm_flags & VM_SHARED)) 449 return -EINVAL; 450 451 vma->vm_flags |= VM_IO | VM_PFNMAP; 452 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 453 | _PAGE_NO_CACHE | _PAGE_GUARDED); 454 455 vma->vm_ops = &spufs_cntl_mmap_vmops; 456 return 0; 457 } 458 #else /* SPUFS_MMAP_4K */ 459 #define spufs_cntl_mmap NULL 460 #endif /* !SPUFS_MMAP_4K */ 461 462 static int spufs_cntl_get(void *data, u64 *val) 463 { 464 struct spu_context *ctx = data; 465 int ret; 466 467 ret = spu_acquire(ctx); 468 if (ret) 469 return ret; 470 *val = ctx->ops->status_read(ctx); 471 spu_release(ctx); 472 473 return 0; 474 } 475 476 static int spufs_cntl_set(void *data, u64 val) 477 { 478 struct spu_context *ctx = data; 479 int ret; 480 481 ret = spu_acquire(ctx); 482 if (ret) 483 return ret; 484 ctx->ops->runcntl_write(ctx, val); 485 spu_release(ctx); 486 487 return 0; 488 } 489 490 static int spufs_cntl_open(struct inode *inode, struct file *file) 491 { 492 struct spufs_inode_info *i = SPUFS_I(inode); 493 struct spu_context *ctx = i->i_ctx; 494 495 mutex_lock(&ctx->mapping_lock); 496 file->private_data = ctx; 497 if (!i->i_openers++) 498 ctx->cntl = inode->i_mapping; 499 mutex_unlock(&ctx->mapping_lock); 500 return simple_attr_open(inode, file, spufs_cntl_get, 501 spufs_cntl_set, "0x%08lx"); 502 } 503 504 static int 505 spufs_cntl_release(struct inode *inode, struct file *file) 506 { 507 struct spufs_inode_info *i = SPUFS_I(inode); 508 struct spu_context *ctx = i->i_ctx; 509 510 simple_attr_release(inode, file); 511 512 mutex_lock(&ctx->mapping_lock); 513 if (!--i->i_openers) 514 ctx->cntl = NULL; 515 mutex_unlock(&ctx->mapping_lock); 516 return 0; 517 } 518 519 static const struct file_operations spufs_cntl_fops = { 520 .open = spufs_cntl_open, 521 .release = spufs_cntl_release, 522 .read = simple_attr_read, 523 .write = simple_attr_write, 524 .mmap = spufs_cntl_mmap, 525 }; 526 527 static int 528 spufs_regs_open(struct inode *inode, struct file *file) 529 { 530 struct spufs_inode_info *i = SPUFS_I(inode); 531 file->private_data = i->i_ctx; 532 return 0; 533 } 534 535 static ssize_t 536 __spufs_regs_read(struct spu_context *ctx, char __user *buffer, 537 size_t size, loff_t *pos) 538 { 539 struct spu_lscsa *lscsa = ctx->csa.lscsa; 540 return simple_read_from_buffer(buffer, size, pos, 541 lscsa->gprs, sizeof lscsa->gprs); 542 } 543 544 static ssize_t 545 spufs_regs_read(struct file *file, char __user *buffer, 546 size_t size, loff_t *pos) 547 { 548 int ret; 549 struct spu_context *ctx = file->private_data; 550 551 ret = spu_acquire_saved(ctx); 552 if (ret) 553 return ret; 554 ret = __spufs_regs_read(ctx, buffer, size, pos); 555 spu_release_saved(ctx); 556 return ret; 557 } 558 559 static ssize_t 560 spufs_regs_write(struct file *file, const char __user *buffer, 561 size_t size, loff_t *pos) 562 { 563 struct spu_context *ctx = file->private_data; 564 struct spu_lscsa *lscsa = ctx->csa.lscsa; 565 int ret; 566 567 size = min_t(ssize_t, sizeof lscsa->gprs - *pos, size); 568 if (size <= 0) 569 return -EFBIG; 570 *pos += size; 571 572 ret = spu_acquire_saved(ctx); 573 if (ret) 574 return ret; 575 576 ret = copy_from_user(lscsa->gprs + *pos - size, 577 buffer, size) ? -EFAULT : size; 578 579 spu_release_saved(ctx); 580 return ret; 581 } 582 583 static const struct file_operations spufs_regs_fops = { 584 .open = spufs_regs_open, 585 .read = spufs_regs_read, 586 .write = spufs_regs_write, 587 .llseek = generic_file_llseek, 588 }; 589 590 static ssize_t 591 __spufs_fpcr_read(struct spu_context *ctx, char __user * buffer, 592 size_t size, loff_t * pos) 593 { 594 struct spu_lscsa *lscsa = ctx->csa.lscsa; 595 return simple_read_from_buffer(buffer, size, pos, 596 &lscsa->fpcr, sizeof(lscsa->fpcr)); 597 } 598 599 static ssize_t 600 spufs_fpcr_read(struct file *file, char __user * buffer, 601 size_t size, loff_t * pos) 602 { 603 int ret; 604 struct spu_context *ctx = file->private_data; 605 606 ret = spu_acquire_saved(ctx); 607 if (ret) 608 return ret; 609 ret = __spufs_fpcr_read(ctx, buffer, size, pos); 610 spu_release_saved(ctx); 611 return ret; 612 } 613 614 static ssize_t 615 spufs_fpcr_write(struct file *file, const char __user * buffer, 616 size_t size, loff_t * pos) 617 { 618 struct spu_context *ctx = file->private_data; 619 struct spu_lscsa *lscsa = ctx->csa.lscsa; 620 int ret; 621 622 size = min_t(ssize_t, sizeof(lscsa->fpcr) - *pos, size); 623 if (size <= 0) 624 return -EFBIG; 625 626 ret = spu_acquire_saved(ctx); 627 if (ret) 628 return ret; 629 630 *pos += size; 631 ret = copy_from_user((char *)&lscsa->fpcr + *pos - size, 632 buffer, size) ? -EFAULT : size; 633 634 spu_release_saved(ctx); 635 return ret; 636 } 637 638 static const struct file_operations spufs_fpcr_fops = { 639 .open = spufs_regs_open, 640 .read = spufs_fpcr_read, 641 .write = spufs_fpcr_write, 642 .llseek = generic_file_llseek, 643 }; 644 645 /* generic open function for all pipe-like files */ 646 static int spufs_pipe_open(struct inode *inode, struct file *file) 647 { 648 struct spufs_inode_info *i = SPUFS_I(inode); 649 file->private_data = i->i_ctx; 650 651 return nonseekable_open(inode, file); 652 } 653 654 /* 655 * Read as many bytes from the mailbox as possible, until 656 * one of the conditions becomes true: 657 * 658 * - no more data available in the mailbox 659 * - end of the user provided buffer 660 * - end of the mapped area 661 */ 662 static ssize_t spufs_mbox_read(struct file *file, char __user *buf, 663 size_t len, loff_t *pos) 664 { 665 struct spu_context *ctx = file->private_data; 666 u32 mbox_data, __user *udata; 667 ssize_t count; 668 669 if (len < 4) 670 return -EINVAL; 671 672 if (!access_ok(VERIFY_WRITE, buf, len)) 673 return -EFAULT; 674 675 udata = (void __user *)buf; 676 677 count = spu_acquire(ctx); 678 if (count) 679 return count; 680 681 for (count = 0; (count + 4) <= len; count += 4, udata++) { 682 int ret; 683 ret = ctx->ops->mbox_read(ctx, &mbox_data); 684 if (ret == 0) 685 break; 686 687 /* 688 * at the end of the mapped area, we can fault 689 * but still need to return the data we have 690 * read successfully so far. 691 */ 692 ret = __put_user(mbox_data, udata); 693 if (ret) { 694 if (!count) 695 count = -EFAULT; 696 break; 697 } 698 } 699 spu_release(ctx); 700 701 if (!count) 702 count = -EAGAIN; 703 704 return count; 705 } 706 707 static const struct file_operations spufs_mbox_fops = { 708 .open = spufs_pipe_open, 709 .read = spufs_mbox_read, 710 }; 711 712 static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf, 713 size_t len, loff_t *pos) 714 { 715 struct spu_context *ctx = file->private_data; 716 ssize_t ret; 717 u32 mbox_stat; 718 719 if (len < 4) 720 return -EINVAL; 721 722 ret = spu_acquire(ctx); 723 if (ret) 724 return ret; 725 726 mbox_stat = ctx->ops->mbox_stat_read(ctx) & 0xff; 727 728 spu_release(ctx); 729 730 if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat)) 731 return -EFAULT; 732 733 return 4; 734 } 735 736 static const struct file_operations spufs_mbox_stat_fops = { 737 .open = spufs_pipe_open, 738 .read = spufs_mbox_stat_read, 739 }; 740 741 /* low-level ibox access function */ 742 size_t spu_ibox_read(struct spu_context *ctx, u32 *data) 743 { 744 return ctx->ops->ibox_read(ctx, data); 745 } 746 747 static int spufs_ibox_fasync(int fd, struct file *file, int on) 748 { 749 struct spu_context *ctx = file->private_data; 750 751 return fasync_helper(fd, file, on, &ctx->ibox_fasync); 752 } 753 754 /* interrupt-level ibox callback function. */ 755 void spufs_ibox_callback(struct spu *spu) 756 { 757 struct spu_context *ctx = spu->ctx; 758 759 if (!ctx) 760 return; 761 762 wake_up_all(&ctx->ibox_wq); 763 kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN); 764 } 765 766 /* 767 * Read as many bytes from the interrupt mailbox as possible, until 768 * one of the conditions becomes true: 769 * 770 * - no more data available in the mailbox 771 * - end of the user provided buffer 772 * - end of the mapped area 773 * 774 * If the file is opened without O_NONBLOCK, we wait here until 775 * any data is available, but return when we have been able to 776 * read something. 777 */ 778 static ssize_t spufs_ibox_read(struct file *file, char __user *buf, 779 size_t len, loff_t *pos) 780 { 781 struct spu_context *ctx = file->private_data; 782 u32 ibox_data, __user *udata; 783 ssize_t count; 784 785 if (len < 4) 786 return -EINVAL; 787 788 if (!access_ok(VERIFY_WRITE, buf, len)) 789 return -EFAULT; 790 791 udata = (void __user *)buf; 792 793 count = spu_acquire(ctx); 794 if (count) 795 goto out; 796 797 /* wait only for the first element */ 798 count = 0; 799 if (file->f_flags & O_NONBLOCK) { 800 if (!spu_ibox_read(ctx, &ibox_data)) { 801 count = -EAGAIN; 802 goto out_unlock; 803 } 804 } else { 805 count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data)); 806 if (count) 807 goto out; 808 } 809 810 /* if we can't write at all, return -EFAULT */ 811 count = __put_user(ibox_data, udata); 812 if (count) 813 goto out_unlock; 814 815 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) { 816 int ret; 817 ret = ctx->ops->ibox_read(ctx, &ibox_data); 818 if (ret == 0) 819 break; 820 /* 821 * at the end of the mapped area, we can fault 822 * but still need to return the data we have 823 * read successfully so far. 824 */ 825 ret = __put_user(ibox_data, udata); 826 if (ret) 827 break; 828 } 829 830 out_unlock: 831 spu_release(ctx); 832 out: 833 return count; 834 } 835 836 static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait) 837 { 838 struct spu_context *ctx = file->private_data; 839 unsigned int mask; 840 841 poll_wait(file, &ctx->ibox_wq, wait); 842 843 /* 844 * For now keep this uninterruptible and also ignore the rule 845 * that poll should not sleep. Will be fixed later. 846 */ 847 mutex_lock(&ctx->state_mutex); 848 mask = ctx->ops->mbox_stat_poll(ctx, POLLIN | POLLRDNORM); 849 spu_release(ctx); 850 851 return mask; 852 } 853 854 static const struct file_operations spufs_ibox_fops = { 855 .open = spufs_pipe_open, 856 .read = spufs_ibox_read, 857 .poll = spufs_ibox_poll, 858 .fasync = spufs_ibox_fasync, 859 }; 860 861 static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf, 862 size_t len, loff_t *pos) 863 { 864 struct spu_context *ctx = file->private_data; 865 ssize_t ret; 866 u32 ibox_stat; 867 868 if (len < 4) 869 return -EINVAL; 870 871 ret = spu_acquire(ctx); 872 if (ret) 873 return ret; 874 ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff; 875 spu_release(ctx); 876 877 if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat)) 878 return -EFAULT; 879 880 return 4; 881 } 882 883 static const struct file_operations spufs_ibox_stat_fops = { 884 .open = spufs_pipe_open, 885 .read = spufs_ibox_stat_read, 886 }; 887 888 /* low-level mailbox write */ 889 size_t spu_wbox_write(struct spu_context *ctx, u32 data) 890 { 891 return ctx->ops->wbox_write(ctx, data); 892 } 893 894 static int spufs_wbox_fasync(int fd, struct file *file, int on) 895 { 896 struct spu_context *ctx = file->private_data; 897 int ret; 898 899 ret = fasync_helper(fd, file, on, &ctx->wbox_fasync); 900 901 return ret; 902 } 903 904 /* interrupt-level wbox callback function. */ 905 void spufs_wbox_callback(struct spu *spu) 906 { 907 struct spu_context *ctx = spu->ctx; 908 909 if (!ctx) 910 return; 911 912 wake_up_all(&ctx->wbox_wq); 913 kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT); 914 } 915 916 /* 917 * Write as many bytes to the interrupt mailbox as possible, until 918 * one of the conditions becomes true: 919 * 920 * - the mailbox is full 921 * - end of the user provided buffer 922 * - end of the mapped area 923 * 924 * If the file is opened without O_NONBLOCK, we wait here until 925 * space is availabyl, but return when we have been able to 926 * write something. 927 */ 928 static ssize_t spufs_wbox_write(struct file *file, const char __user *buf, 929 size_t len, loff_t *pos) 930 { 931 struct spu_context *ctx = file->private_data; 932 u32 wbox_data, __user *udata; 933 ssize_t count; 934 935 if (len < 4) 936 return -EINVAL; 937 938 udata = (void __user *)buf; 939 if (!access_ok(VERIFY_READ, buf, len)) 940 return -EFAULT; 941 942 if (__get_user(wbox_data, udata)) 943 return -EFAULT; 944 945 count = spu_acquire(ctx); 946 if (count) 947 goto out; 948 949 /* 950 * make sure we can at least write one element, by waiting 951 * in case of !O_NONBLOCK 952 */ 953 count = 0; 954 if (file->f_flags & O_NONBLOCK) { 955 if (!spu_wbox_write(ctx, wbox_data)) { 956 count = -EAGAIN; 957 goto out_unlock; 958 } 959 } else { 960 count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data)); 961 if (count) 962 goto out; 963 } 964 965 966 /* write as much as possible */ 967 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) { 968 int ret; 969 ret = __get_user(wbox_data, udata); 970 if (ret) 971 break; 972 973 ret = spu_wbox_write(ctx, wbox_data); 974 if (ret == 0) 975 break; 976 } 977 978 out_unlock: 979 spu_release(ctx); 980 out: 981 return count; 982 } 983 984 static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait) 985 { 986 struct spu_context *ctx = file->private_data; 987 unsigned int mask; 988 989 poll_wait(file, &ctx->wbox_wq, wait); 990 991 /* 992 * For now keep this uninterruptible and also ignore the rule 993 * that poll should not sleep. Will be fixed later. 994 */ 995 mutex_lock(&ctx->state_mutex); 996 mask = ctx->ops->mbox_stat_poll(ctx, POLLOUT | POLLWRNORM); 997 spu_release(ctx); 998 999 return mask; 1000 } 1001 1002 static const struct file_operations spufs_wbox_fops = { 1003 .open = spufs_pipe_open, 1004 .write = spufs_wbox_write, 1005 .poll = spufs_wbox_poll, 1006 .fasync = spufs_wbox_fasync, 1007 }; 1008 1009 static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf, 1010 size_t len, loff_t *pos) 1011 { 1012 struct spu_context *ctx = file->private_data; 1013 ssize_t ret; 1014 u32 wbox_stat; 1015 1016 if (len < 4) 1017 return -EINVAL; 1018 1019 ret = spu_acquire(ctx); 1020 if (ret) 1021 return ret; 1022 wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff; 1023 spu_release(ctx); 1024 1025 if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat)) 1026 return -EFAULT; 1027 1028 return 4; 1029 } 1030 1031 static const struct file_operations spufs_wbox_stat_fops = { 1032 .open = spufs_pipe_open, 1033 .read = spufs_wbox_stat_read, 1034 }; 1035 1036 static int spufs_signal1_open(struct inode *inode, struct file *file) 1037 { 1038 struct spufs_inode_info *i = SPUFS_I(inode); 1039 struct spu_context *ctx = i->i_ctx; 1040 1041 mutex_lock(&ctx->mapping_lock); 1042 file->private_data = ctx; 1043 if (!i->i_openers++) 1044 ctx->signal1 = inode->i_mapping; 1045 mutex_unlock(&ctx->mapping_lock); 1046 return nonseekable_open(inode, file); 1047 } 1048 1049 static int 1050 spufs_signal1_release(struct inode *inode, struct file *file) 1051 { 1052 struct spufs_inode_info *i = SPUFS_I(inode); 1053 struct spu_context *ctx = i->i_ctx; 1054 1055 mutex_lock(&ctx->mapping_lock); 1056 if (!--i->i_openers) 1057 ctx->signal1 = NULL; 1058 mutex_unlock(&ctx->mapping_lock); 1059 return 0; 1060 } 1061 1062 static ssize_t __spufs_signal1_read(struct spu_context *ctx, char __user *buf, 1063 size_t len, loff_t *pos) 1064 { 1065 int ret = 0; 1066 u32 data; 1067 1068 if (len < 4) 1069 return -EINVAL; 1070 1071 if (ctx->csa.spu_chnlcnt_RW[3]) { 1072 data = ctx->csa.spu_chnldata_RW[3]; 1073 ret = 4; 1074 } 1075 1076 if (!ret) 1077 goto out; 1078 1079 if (copy_to_user(buf, &data, 4)) 1080 return -EFAULT; 1081 1082 out: 1083 return ret; 1084 } 1085 1086 static ssize_t spufs_signal1_read(struct file *file, char __user *buf, 1087 size_t len, loff_t *pos) 1088 { 1089 int ret; 1090 struct spu_context *ctx = file->private_data; 1091 1092 ret = spu_acquire_saved(ctx); 1093 if (ret) 1094 return ret; 1095 ret = __spufs_signal1_read(ctx, buf, len, pos); 1096 spu_release_saved(ctx); 1097 1098 return ret; 1099 } 1100 1101 static ssize_t spufs_signal1_write(struct file *file, const char __user *buf, 1102 size_t len, loff_t *pos) 1103 { 1104 struct spu_context *ctx; 1105 ssize_t ret; 1106 u32 data; 1107 1108 ctx = file->private_data; 1109 1110 if (len < 4) 1111 return -EINVAL; 1112 1113 if (copy_from_user(&data, buf, 4)) 1114 return -EFAULT; 1115 1116 ret = spu_acquire(ctx); 1117 if (ret) 1118 return ret; 1119 ctx->ops->signal1_write(ctx, data); 1120 spu_release(ctx); 1121 1122 return 4; 1123 } 1124 1125 static int 1126 spufs_signal1_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1127 { 1128 #if SPUFS_SIGNAL_MAP_SIZE == 0x1000 1129 return spufs_ps_fault(vma, vmf, 0x14000, SPUFS_SIGNAL_MAP_SIZE); 1130 #elif SPUFS_SIGNAL_MAP_SIZE == 0x10000 1131 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole 1132 * signal 1 and 2 area 1133 */ 1134 return spufs_ps_fault(vma, vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE); 1135 #else 1136 #error unsupported page size 1137 #endif 1138 } 1139 1140 static struct vm_operations_struct spufs_signal1_mmap_vmops = { 1141 .fault = spufs_signal1_mmap_fault, 1142 }; 1143 1144 static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma) 1145 { 1146 if (!(vma->vm_flags & VM_SHARED)) 1147 return -EINVAL; 1148 1149 vma->vm_flags |= VM_IO | VM_PFNMAP; 1150 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 1151 | _PAGE_NO_CACHE | _PAGE_GUARDED); 1152 1153 vma->vm_ops = &spufs_signal1_mmap_vmops; 1154 return 0; 1155 } 1156 1157 static const struct file_operations spufs_signal1_fops = { 1158 .open = spufs_signal1_open, 1159 .release = spufs_signal1_release, 1160 .read = spufs_signal1_read, 1161 .write = spufs_signal1_write, 1162 .mmap = spufs_signal1_mmap, 1163 }; 1164 1165 static const struct file_operations spufs_signal1_nosched_fops = { 1166 .open = spufs_signal1_open, 1167 .release = spufs_signal1_release, 1168 .write = spufs_signal1_write, 1169 .mmap = spufs_signal1_mmap, 1170 }; 1171 1172 static int spufs_signal2_open(struct inode *inode, struct file *file) 1173 { 1174 struct spufs_inode_info *i = SPUFS_I(inode); 1175 struct spu_context *ctx = i->i_ctx; 1176 1177 mutex_lock(&ctx->mapping_lock); 1178 file->private_data = ctx; 1179 if (!i->i_openers++) 1180 ctx->signal2 = inode->i_mapping; 1181 mutex_unlock(&ctx->mapping_lock); 1182 return nonseekable_open(inode, file); 1183 } 1184 1185 static int 1186 spufs_signal2_release(struct inode *inode, struct file *file) 1187 { 1188 struct spufs_inode_info *i = SPUFS_I(inode); 1189 struct spu_context *ctx = i->i_ctx; 1190 1191 mutex_lock(&ctx->mapping_lock); 1192 if (!--i->i_openers) 1193 ctx->signal2 = NULL; 1194 mutex_unlock(&ctx->mapping_lock); 1195 return 0; 1196 } 1197 1198 static ssize_t __spufs_signal2_read(struct spu_context *ctx, char __user *buf, 1199 size_t len, loff_t *pos) 1200 { 1201 int ret = 0; 1202 u32 data; 1203 1204 if (len < 4) 1205 return -EINVAL; 1206 1207 if (ctx->csa.spu_chnlcnt_RW[4]) { 1208 data = ctx->csa.spu_chnldata_RW[4]; 1209 ret = 4; 1210 } 1211 1212 if (!ret) 1213 goto out; 1214 1215 if (copy_to_user(buf, &data, 4)) 1216 return -EFAULT; 1217 1218 out: 1219 return ret; 1220 } 1221 1222 static ssize_t spufs_signal2_read(struct file *file, char __user *buf, 1223 size_t len, loff_t *pos) 1224 { 1225 struct spu_context *ctx = file->private_data; 1226 int ret; 1227 1228 ret = spu_acquire_saved(ctx); 1229 if (ret) 1230 return ret; 1231 ret = __spufs_signal2_read(ctx, buf, len, pos); 1232 spu_release_saved(ctx); 1233 1234 return ret; 1235 } 1236 1237 static ssize_t spufs_signal2_write(struct file *file, const char __user *buf, 1238 size_t len, loff_t *pos) 1239 { 1240 struct spu_context *ctx; 1241 ssize_t ret; 1242 u32 data; 1243 1244 ctx = file->private_data; 1245 1246 if (len < 4) 1247 return -EINVAL; 1248 1249 if (copy_from_user(&data, buf, 4)) 1250 return -EFAULT; 1251 1252 ret = spu_acquire(ctx); 1253 if (ret) 1254 return ret; 1255 ctx->ops->signal2_write(ctx, data); 1256 spu_release(ctx); 1257 1258 return 4; 1259 } 1260 1261 #if SPUFS_MMAP_4K 1262 static int 1263 spufs_signal2_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1264 { 1265 #if SPUFS_SIGNAL_MAP_SIZE == 0x1000 1266 return spufs_ps_fault(vma, vmf, 0x1c000, SPUFS_SIGNAL_MAP_SIZE); 1267 #elif SPUFS_SIGNAL_MAP_SIZE == 0x10000 1268 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole 1269 * signal 1 and 2 area 1270 */ 1271 return spufs_ps_fault(vma, vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE); 1272 #else 1273 #error unsupported page size 1274 #endif 1275 } 1276 1277 static struct vm_operations_struct spufs_signal2_mmap_vmops = { 1278 .fault = spufs_signal2_mmap_fault, 1279 }; 1280 1281 static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma) 1282 { 1283 if (!(vma->vm_flags & VM_SHARED)) 1284 return -EINVAL; 1285 1286 vma->vm_flags |= VM_IO | VM_PFNMAP; 1287 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 1288 | _PAGE_NO_CACHE | _PAGE_GUARDED); 1289 1290 vma->vm_ops = &spufs_signal2_mmap_vmops; 1291 return 0; 1292 } 1293 #else /* SPUFS_MMAP_4K */ 1294 #define spufs_signal2_mmap NULL 1295 #endif /* !SPUFS_MMAP_4K */ 1296 1297 static const struct file_operations spufs_signal2_fops = { 1298 .open = spufs_signal2_open, 1299 .release = spufs_signal2_release, 1300 .read = spufs_signal2_read, 1301 .write = spufs_signal2_write, 1302 .mmap = spufs_signal2_mmap, 1303 }; 1304 1305 static const struct file_operations spufs_signal2_nosched_fops = { 1306 .open = spufs_signal2_open, 1307 .release = spufs_signal2_release, 1308 .write = spufs_signal2_write, 1309 .mmap = spufs_signal2_mmap, 1310 }; 1311 1312 /* 1313 * This is a wrapper around DEFINE_SIMPLE_ATTRIBUTE which does the 1314 * work of acquiring (or not) the SPU context before calling through 1315 * to the actual get routine. The set routine is called directly. 1316 */ 1317 #define SPU_ATTR_NOACQUIRE 0 1318 #define SPU_ATTR_ACQUIRE 1 1319 #define SPU_ATTR_ACQUIRE_SAVED 2 1320 1321 #define DEFINE_SPUFS_ATTRIBUTE(__name, __get, __set, __fmt, __acquire) \ 1322 static int __##__get(void *data, u64 *val) \ 1323 { \ 1324 struct spu_context *ctx = data; \ 1325 int ret = 0; \ 1326 \ 1327 if (__acquire == SPU_ATTR_ACQUIRE) { \ 1328 ret = spu_acquire(ctx); \ 1329 if (ret) \ 1330 return ret; \ 1331 *val = __get(ctx); \ 1332 spu_release(ctx); \ 1333 } else if (__acquire == SPU_ATTR_ACQUIRE_SAVED) { \ 1334 ret = spu_acquire_saved(ctx); \ 1335 if (ret) \ 1336 return ret; \ 1337 *val = __get(ctx); \ 1338 spu_release_saved(ctx); \ 1339 } else \ 1340 *val = __get(ctx); \ 1341 \ 1342 return 0; \ 1343 } \ 1344 DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__name, __##__get, __set, __fmt); 1345 1346 static int spufs_signal1_type_set(void *data, u64 val) 1347 { 1348 struct spu_context *ctx = data; 1349 int ret; 1350 1351 ret = spu_acquire(ctx); 1352 if (ret) 1353 return ret; 1354 ctx->ops->signal1_type_set(ctx, val); 1355 spu_release(ctx); 1356 1357 return 0; 1358 } 1359 1360 static u64 spufs_signal1_type_get(struct spu_context *ctx) 1361 { 1362 return ctx->ops->signal1_type_get(ctx); 1363 } 1364 DEFINE_SPUFS_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get, 1365 spufs_signal1_type_set, "%llu\n", SPU_ATTR_ACQUIRE); 1366 1367 1368 static int spufs_signal2_type_set(void *data, u64 val) 1369 { 1370 struct spu_context *ctx = data; 1371 int ret; 1372 1373 ret = spu_acquire(ctx); 1374 if (ret) 1375 return ret; 1376 ctx->ops->signal2_type_set(ctx, val); 1377 spu_release(ctx); 1378 1379 return 0; 1380 } 1381 1382 static u64 spufs_signal2_type_get(struct spu_context *ctx) 1383 { 1384 return ctx->ops->signal2_type_get(ctx); 1385 } 1386 DEFINE_SPUFS_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get, 1387 spufs_signal2_type_set, "%llu\n", SPU_ATTR_ACQUIRE); 1388 1389 #if SPUFS_MMAP_4K 1390 static int 1391 spufs_mss_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1392 { 1393 return spufs_ps_fault(vma, vmf, 0x0000, SPUFS_MSS_MAP_SIZE); 1394 } 1395 1396 static struct vm_operations_struct spufs_mss_mmap_vmops = { 1397 .fault = spufs_mss_mmap_fault, 1398 }; 1399 1400 /* 1401 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff]. 1402 */ 1403 static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma) 1404 { 1405 if (!(vma->vm_flags & VM_SHARED)) 1406 return -EINVAL; 1407 1408 vma->vm_flags |= VM_IO | VM_PFNMAP; 1409 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 1410 | _PAGE_NO_CACHE | _PAGE_GUARDED); 1411 1412 vma->vm_ops = &spufs_mss_mmap_vmops; 1413 return 0; 1414 } 1415 #else /* SPUFS_MMAP_4K */ 1416 #define spufs_mss_mmap NULL 1417 #endif /* !SPUFS_MMAP_4K */ 1418 1419 static int spufs_mss_open(struct inode *inode, struct file *file) 1420 { 1421 struct spufs_inode_info *i = SPUFS_I(inode); 1422 struct spu_context *ctx = i->i_ctx; 1423 1424 file->private_data = i->i_ctx; 1425 1426 mutex_lock(&ctx->mapping_lock); 1427 if (!i->i_openers++) 1428 ctx->mss = inode->i_mapping; 1429 mutex_unlock(&ctx->mapping_lock); 1430 return nonseekable_open(inode, file); 1431 } 1432 1433 static int 1434 spufs_mss_release(struct inode *inode, struct file *file) 1435 { 1436 struct spufs_inode_info *i = SPUFS_I(inode); 1437 struct spu_context *ctx = i->i_ctx; 1438 1439 mutex_lock(&ctx->mapping_lock); 1440 if (!--i->i_openers) 1441 ctx->mss = NULL; 1442 mutex_unlock(&ctx->mapping_lock); 1443 return 0; 1444 } 1445 1446 static const struct file_operations spufs_mss_fops = { 1447 .open = spufs_mss_open, 1448 .release = spufs_mss_release, 1449 .mmap = spufs_mss_mmap, 1450 }; 1451 1452 static int 1453 spufs_psmap_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1454 { 1455 return spufs_ps_fault(vma, vmf, 0x0000, SPUFS_PS_MAP_SIZE); 1456 } 1457 1458 static struct vm_operations_struct spufs_psmap_mmap_vmops = { 1459 .fault = spufs_psmap_mmap_fault, 1460 }; 1461 1462 /* 1463 * mmap support for full problem state area [0x00000 - 0x1ffff]. 1464 */ 1465 static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma) 1466 { 1467 if (!(vma->vm_flags & VM_SHARED)) 1468 return -EINVAL; 1469 1470 vma->vm_flags |= VM_IO | VM_PFNMAP; 1471 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 1472 | _PAGE_NO_CACHE | _PAGE_GUARDED); 1473 1474 vma->vm_ops = &spufs_psmap_mmap_vmops; 1475 return 0; 1476 } 1477 1478 static int spufs_psmap_open(struct inode *inode, struct file *file) 1479 { 1480 struct spufs_inode_info *i = SPUFS_I(inode); 1481 struct spu_context *ctx = i->i_ctx; 1482 1483 mutex_lock(&ctx->mapping_lock); 1484 file->private_data = i->i_ctx; 1485 if (!i->i_openers++) 1486 ctx->psmap = inode->i_mapping; 1487 mutex_unlock(&ctx->mapping_lock); 1488 return nonseekable_open(inode, file); 1489 } 1490 1491 static int 1492 spufs_psmap_release(struct inode *inode, struct file *file) 1493 { 1494 struct spufs_inode_info *i = SPUFS_I(inode); 1495 struct spu_context *ctx = i->i_ctx; 1496 1497 mutex_lock(&ctx->mapping_lock); 1498 if (!--i->i_openers) 1499 ctx->psmap = NULL; 1500 mutex_unlock(&ctx->mapping_lock); 1501 return 0; 1502 } 1503 1504 static const struct file_operations spufs_psmap_fops = { 1505 .open = spufs_psmap_open, 1506 .release = spufs_psmap_release, 1507 .mmap = spufs_psmap_mmap, 1508 }; 1509 1510 1511 #if SPUFS_MMAP_4K 1512 static int 1513 spufs_mfc_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1514 { 1515 return spufs_ps_fault(vma, vmf, 0x3000, SPUFS_MFC_MAP_SIZE); 1516 } 1517 1518 static struct vm_operations_struct spufs_mfc_mmap_vmops = { 1519 .fault = spufs_mfc_mmap_fault, 1520 }; 1521 1522 /* 1523 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff]. 1524 */ 1525 static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma) 1526 { 1527 if (!(vma->vm_flags & VM_SHARED)) 1528 return -EINVAL; 1529 1530 vma->vm_flags |= VM_IO | VM_PFNMAP; 1531 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 1532 | _PAGE_NO_CACHE | _PAGE_GUARDED); 1533 1534 vma->vm_ops = &spufs_mfc_mmap_vmops; 1535 return 0; 1536 } 1537 #else /* SPUFS_MMAP_4K */ 1538 #define spufs_mfc_mmap NULL 1539 #endif /* !SPUFS_MMAP_4K */ 1540 1541 static int spufs_mfc_open(struct inode *inode, struct file *file) 1542 { 1543 struct spufs_inode_info *i = SPUFS_I(inode); 1544 struct spu_context *ctx = i->i_ctx; 1545 1546 /* we don't want to deal with DMA into other processes */ 1547 if (ctx->owner != current->mm) 1548 return -EINVAL; 1549 1550 if (atomic_read(&inode->i_count) != 1) 1551 return -EBUSY; 1552 1553 mutex_lock(&ctx->mapping_lock); 1554 file->private_data = ctx; 1555 if (!i->i_openers++) 1556 ctx->mfc = inode->i_mapping; 1557 mutex_unlock(&ctx->mapping_lock); 1558 return nonseekable_open(inode, file); 1559 } 1560 1561 static int 1562 spufs_mfc_release(struct inode *inode, struct file *file) 1563 { 1564 struct spufs_inode_info *i = SPUFS_I(inode); 1565 struct spu_context *ctx = i->i_ctx; 1566 1567 mutex_lock(&ctx->mapping_lock); 1568 if (!--i->i_openers) 1569 ctx->mfc = NULL; 1570 mutex_unlock(&ctx->mapping_lock); 1571 return 0; 1572 } 1573 1574 /* interrupt-level mfc callback function. */ 1575 void spufs_mfc_callback(struct spu *spu) 1576 { 1577 struct spu_context *ctx = spu->ctx; 1578 1579 if (!ctx) 1580 return; 1581 1582 wake_up_all(&ctx->mfc_wq); 1583 1584 pr_debug("%s %s\n", __func__, spu->name); 1585 if (ctx->mfc_fasync) { 1586 u32 free_elements, tagstatus; 1587 unsigned int mask; 1588 1589 /* no need for spu_acquire in interrupt context */ 1590 free_elements = ctx->ops->get_mfc_free_elements(ctx); 1591 tagstatus = ctx->ops->read_mfc_tagstatus(ctx); 1592 1593 mask = 0; 1594 if (free_elements & 0xffff) 1595 mask |= POLLOUT; 1596 if (tagstatus & ctx->tagwait) 1597 mask |= POLLIN; 1598 1599 kill_fasync(&ctx->mfc_fasync, SIGIO, mask); 1600 } 1601 } 1602 1603 static int spufs_read_mfc_tagstatus(struct spu_context *ctx, u32 *status) 1604 { 1605 /* See if there is one tag group is complete */ 1606 /* FIXME we need locking around tagwait */ 1607 *status = ctx->ops->read_mfc_tagstatus(ctx) & ctx->tagwait; 1608 ctx->tagwait &= ~*status; 1609 if (*status) 1610 return 1; 1611 1612 /* enable interrupt waiting for any tag group, 1613 may silently fail if interrupts are already enabled */ 1614 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1); 1615 return 0; 1616 } 1617 1618 static ssize_t spufs_mfc_read(struct file *file, char __user *buffer, 1619 size_t size, loff_t *pos) 1620 { 1621 struct spu_context *ctx = file->private_data; 1622 int ret = -EINVAL; 1623 u32 status; 1624 1625 if (size != 4) 1626 goto out; 1627 1628 ret = spu_acquire(ctx); 1629 if (ret) 1630 return ret; 1631 1632 ret = -EINVAL; 1633 if (file->f_flags & O_NONBLOCK) { 1634 status = ctx->ops->read_mfc_tagstatus(ctx); 1635 if (!(status & ctx->tagwait)) 1636 ret = -EAGAIN; 1637 else 1638 /* XXX(hch): shouldn't we clear ret here? */ 1639 ctx->tagwait &= ~status; 1640 } else { 1641 ret = spufs_wait(ctx->mfc_wq, 1642 spufs_read_mfc_tagstatus(ctx, &status)); 1643 if (ret) 1644 goto out; 1645 } 1646 spu_release(ctx); 1647 1648 ret = 4; 1649 if (copy_to_user(buffer, &status, 4)) 1650 ret = -EFAULT; 1651 1652 out: 1653 return ret; 1654 } 1655 1656 static int spufs_check_valid_dma(struct mfc_dma_command *cmd) 1657 { 1658 pr_debug("queueing DMA %x %lx %x %x %x\n", cmd->lsa, 1659 cmd->ea, cmd->size, cmd->tag, cmd->cmd); 1660 1661 switch (cmd->cmd) { 1662 case MFC_PUT_CMD: 1663 case MFC_PUTF_CMD: 1664 case MFC_PUTB_CMD: 1665 case MFC_GET_CMD: 1666 case MFC_GETF_CMD: 1667 case MFC_GETB_CMD: 1668 break; 1669 default: 1670 pr_debug("invalid DMA opcode %x\n", cmd->cmd); 1671 return -EIO; 1672 } 1673 1674 if ((cmd->lsa & 0xf) != (cmd->ea &0xf)) { 1675 pr_debug("invalid DMA alignment, ea %lx lsa %x\n", 1676 cmd->ea, cmd->lsa); 1677 return -EIO; 1678 } 1679 1680 switch (cmd->size & 0xf) { 1681 case 1: 1682 break; 1683 case 2: 1684 if (cmd->lsa & 1) 1685 goto error; 1686 break; 1687 case 4: 1688 if (cmd->lsa & 3) 1689 goto error; 1690 break; 1691 case 8: 1692 if (cmd->lsa & 7) 1693 goto error; 1694 break; 1695 case 0: 1696 if (cmd->lsa & 15) 1697 goto error; 1698 break; 1699 error: 1700 default: 1701 pr_debug("invalid DMA alignment %x for size %x\n", 1702 cmd->lsa & 0xf, cmd->size); 1703 return -EIO; 1704 } 1705 1706 if (cmd->size > 16 * 1024) { 1707 pr_debug("invalid DMA size %x\n", cmd->size); 1708 return -EIO; 1709 } 1710 1711 if (cmd->tag & 0xfff0) { 1712 /* we reserve the higher tag numbers for kernel use */ 1713 pr_debug("invalid DMA tag\n"); 1714 return -EIO; 1715 } 1716 1717 if (cmd->class) { 1718 /* not supported in this version */ 1719 pr_debug("invalid DMA class\n"); 1720 return -EIO; 1721 } 1722 1723 return 0; 1724 } 1725 1726 static int spu_send_mfc_command(struct spu_context *ctx, 1727 struct mfc_dma_command cmd, 1728 int *error) 1729 { 1730 *error = ctx->ops->send_mfc_command(ctx, &cmd); 1731 if (*error == -EAGAIN) { 1732 /* wait for any tag group to complete 1733 so we have space for the new command */ 1734 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1); 1735 /* try again, because the queue might be 1736 empty again */ 1737 *error = ctx->ops->send_mfc_command(ctx, &cmd); 1738 if (*error == -EAGAIN) 1739 return 0; 1740 } 1741 return 1; 1742 } 1743 1744 static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer, 1745 size_t size, loff_t *pos) 1746 { 1747 struct spu_context *ctx = file->private_data; 1748 struct mfc_dma_command cmd; 1749 int ret = -EINVAL; 1750 1751 if (size != sizeof cmd) 1752 goto out; 1753 1754 ret = -EFAULT; 1755 if (copy_from_user(&cmd, buffer, sizeof cmd)) 1756 goto out; 1757 1758 ret = spufs_check_valid_dma(&cmd); 1759 if (ret) 1760 goto out; 1761 1762 ret = spu_acquire(ctx); 1763 if (ret) 1764 goto out; 1765 1766 ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE); 1767 if (ret) 1768 goto out; 1769 1770 if (file->f_flags & O_NONBLOCK) { 1771 ret = ctx->ops->send_mfc_command(ctx, &cmd); 1772 } else { 1773 int status; 1774 ret = spufs_wait(ctx->mfc_wq, 1775 spu_send_mfc_command(ctx, cmd, &status)); 1776 if (ret) 1777 goto out; 1778 if (status) 1779 ret = status; 1780 } 1781 1782 if (ret) 1783 goto out_unlock; 1784 1785 ctx->tagwait |= 1 << cmd.tag; 1786 ret = size; 1787 1788 out_unlock: 1789 spu_release(ctx); 1790 out: 1791 return ret; 1792 } 1793 1794 static unsigned int spufs_mfc_poll(struct file *file,poll_table *wait) 1795 { 1796 struct spu_context *ctx = file->private_data; 1797 u32 free_elements, tagstatus; 1798 unsigned int mask; 1799 1800 poll_wait(file, &ctx->mfc_wq, wait); 1801 1802 /* 1803 * For now keep this uninterruptible and also ignore the rule 1804 * that poll should not sleep. Will be fixed later. 1805 */ 1806 mutex_lock(&ctx->state_mutex); 1807 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2); 1808 free_elements = ctx->ops->get_mfc_free_elements(ctx); 1809 tagstatus = ctx->ops->read_mfc_tagstatus(ctx); 1810 spu_release(ctx); 1811 1812 mask = 0; 1813 if (free_elements & 0xffff) 1814 mask |= POLLOUT | POLLWRNORM; 1815 if (tagstatus & ctx->tagwait) 1816 mask |= POLLIN | POLLRDNORM; 1817 1818 pr_debug("%s: free %d tagstatus %d tagwait %d\n", __func__, 1819 free_elements, tagstatus, ctx->tagwait); 1820 1821 return mask; 1822 } 1823 1824 static int spufs_mfc_flush(struct file *file, fl_owner_t id) 1825 { 1826 struct spu_context *ctx = file->private_data; 1827 int ret; 1828 1829 ret = spu_acquire(ctx); 1830 if (ret) 1831 goto out; 1832 #if 0 1833 /* this currently hangs */ 1834 ret = spufs_wait(ctx->mfc_wq, 1835 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2)); 1836 if (ret) 1837 goto out; 1838 ret = spufs_wait(ctx->mfc_wq, 1839 ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait); 1840 if (ret) 1841 goto out; 1842 #else 1843 ret = 0; 1844 #endif 1845 spu_release(ctx); 1846 out: 1847 return ret; 1848 } 1849 1850 static int spufs_mfc_fsync(struct file *file, struct dentry *dentry, 1851 int datasync) 1852 { 1853 return spufs_mfc_flush(file, NULL); 1854 } 1855 1856 static int spufs_mfc_fasync(int fd, struct file *file, int on) 1857 { 1858 struct spu_context *ctx = file->private_data; 1859 1860 return fasync_helper(fd, file, on, &ctx->mfc_fasync); 1861 } 1862 1863 static const struct file_operations spufs_mfc_fops = { 1864 .open = spufs_mfc_open, 1865 .release = spufs_mfc_release, 1866 .read = spufs_mfc_read, 1867 .write = spufs_mfc_write, 1868 .poll = spufs_mfc_poll, 1869 .flush = spufs_mfc_flush, 1870 .fsync = spufs_mfc_fsync, 1871 .fasync = spufs_mfc_fasync, 1872 .mmap = spufs_mfc_mmap, 1873 }; 1874 1875 static int spufs_npc_set(void *data, u64 val) 1876 { 1877 struct spu_context *ctx = data; 1878 int ret; 1879 1880 ret = spu_acquire(ctx); 1881 if (ret) 1882 return ret; 1883 ctx->ops->npc_write(ctx, val); 1884 spu_release(ctx); 1885 1886 return 0; 1887 } 1888 1889 static u64 spufs_npc_get(struct spu_context *ctx) 1890 { 1891 return ctx->ops->npc_read(ctx); 1892 } 1893 DEFINE_SPUFS_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set, 1894 "0x%llx\n", SPU_ATTR_ACQUIRE); 1895 1896 static int spufs_decr_set(void *data, u64 val) 1897 { 1898 struct spu_context *ctx = data; 1899 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1900 int ret; 1901 1902 ret = spu_acquire_saved(ctx); 1903 if (ret) 1904 return ret; 1905 lscsa->decr.slot[0] = (u32) val; 1906 spu_release_saved(ctx); 1907 1908 return 0; 1909 } 1910 1911 static u64 spufs_decr_get(struct spu_context *ctx) 1912 { 1913 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1914 return lscsa->decr.slot[0]; 1915 } 1916 DEFINE_SPUFS_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set, 1917 "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED); 1918 1919 static int spufs_decr_status_set(void *data, u64 val) 1920 { 1921 struct spu_context *ctx = data; 1922 int ret; 1923 1924 ret = spu_acquire_saved(ctx); 1925 if (ret) 1926 return ret; 1927 if (val) 1928 ctx->csa.priv2.mfc_control_RW |= MFC_CNTL_DECREMENTER_RUNNING; 1929 else 1930 ctx->csa.priv2.mfc_control_RW &= ~MFC_CNTL_DECREMENTER_RUNNING; 1931 spu_release_saved(ctx); 1932 1933 return 0; 1934 } 1935 1936 static u64 spufs_decr_status_get(struct spu_context *ctx) 1937 { 1938 if (ctx->csa.priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING) 1939 return SPU_DECR_STATUS_RUNNING; 1940 else 1941 return 0; 1942 } 1943 DEFINE_SPUFS_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get, 1944 spufs_decr_status_set, "0x%llx\n", 1945 SPU_ATTR_ACQUIRE_SAVED); 1946 1947 static int spufs_event_mask_set(void *data, u64 val) 1948 { 1949 struct spu_context *ctx = data; 1950 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1951 int ret; 1952 1953 ret = spu_acquire_saved(ctx); 1954 if (ret) 1955 return ret; 1956 lscsa->event_mask.slot[0] = (u32) val; 1957 spu_release_saved(ctx); 1958 1959 return 0; 1960 } 1961 1962 static u64 spufs_event_mask_get(struct spu_context *ctx) 1963 { 1964 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1965 return lscsa->event_mask.slot[0]; 1966 } 1967 1968 DEFINE_SPUFS_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get, 1969 spufs_event_mask_set, "0x%llx\n", 1970 SPU_ATTR_ACQUIRE_SAVED); 1971 1972 static u64 spufs_event_status_get(struct spu_context *ctx) 1973 { 1974 struct spu_state *state = &ctx->csa; 1975 u64 stat; 1976 stat = state->spu_chnlcnt_RW[0]; 1977 if (stat) 1978 return state->spu_chnldata_RW[0]; 1979 return 0; 1980 } 1981 DEFINE_SPUFS_ATTRIBUTE(spufs_event_status_ops, spufs_event_status_get, 1982 NULL, "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED) 1983 1984 static int spufs_srr0_set(void *data, u64 val) 1985 { 1986 struct spu_context *ctx = data; 1987 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1988 int ret; 1989 1990 ret = spu_acquire_saved(ctx); 1991 if (ret) 1992 return ret; 1993 lscsa->srr0.slot[0] = (u32) val; 1994 spu_release_saved(ctx); 1995 1996 return 0; 1997 } 1998 1999 static u64 spufs_srr0_get(struct spu_context *ctx) 2000 { 2001 struct spu_lscsa *lscsa = ctx->csa.lscsa; 2002 return lscsa->srr0.slot[0]; 2003 } 2004 DEFINE_SPUFS_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set, 2005 "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED) 2006 2007 static u64 spufs_id_get(struct spu_context *ctx) 2008 { 2009 u64 num; 2010 2011 if (ctx->state == SPU_STATE_RUNNABLE) 2012 num = ctx->spu->number; 2013 else 2014 num = (unsigned int)-1; 2015 2016 return num; 2017 } 2018 DEFINE_SPUFS_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n", 2019 SPU_ATTR_ACQUIRE) 2020 2021 static u64 spufs_object_id_get(struct spu_context *ctx) 2022 { 2023 /* FIXME: Should there really be no locking here? */ 2024 return ctx->object_id; 2025 } 2026 2027 static int spufs_object_id_set(void *data, u64 id) 2028 { 2029 struct spu_context *ctx = data; 2030 ctx->object_id = id; 2031 2032 return 0; 2033 } 2034 2035 DEFINE_SPUFS_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get, 2036 spufs_object_id_set, "0x%llx\n", SPU_ATTR_NOACQUIRE); 2037 2038 static u64 spufs_lslr_get(struct spu_context *ctx) 2039 { 2040 return ctx->csa.priv2.spu_lslr_RW; 2041 } 2042 DEFINE_SPUFS_ATTRIBUTE(spufs_lslr_ops, spufs_lslr_get, NULL, "0x%llx\n", 2043 SPU_ATTR_ACQUIRE_SAVED); 2044 2045 static int spufs_info_open(struct inode *inode, struct file *file) 2046 { 2047 struct spufs_inode_info *i = SPUFS_I(inode); 2048 struct spu_context *ctx = i->i_ctx; 2049 file->private_data = ctx; 2050 return 0; 2051 } 2052 2053 static int spufs_caps_show(struct seq_file *s, void *private) 2054 { 2055 struct spu_context *ctx = s->private; 2056 2057 if (!(ctx->flags & SPU_CREATE_NOSCHED)) 2058 seq_puts(s, "sched\n"); 2059 if (!(ctx->flags & SPU_CREATE_ISOLATE)) 2060 seq_puts(s, "step\n"); 2061 return 0; 2062 } 2063 2064 static int spufs_caps_open(struct inode *inode, struct file *file) 2065 { 2066 return single_open(file, spufs_caps_show, SPUFS_I(inode)->i_ctx); 2067 } 2068 2069 static const struct file_operations spufs_caps_fops = { 2070 .open = spufs_caps_open, 2071 .read = seq_read, 2072 .llseek = seq_lseek, 2073 .release = single_release, 2074 }; 2075 2076 static ssize_t __spufs_mbox_info_read(struct spu_context *ctx, 2077 char __user *buf, size_t len, loff_t *pos) 2078 { 2079 u32 data; 2080 2081 /* EOF if there's no entry in the mbox */ 2082 if (!(ctx->csa.prob.mb_stat_R & 0x0000ff)) 2083 return 0; 2084 2085 data = ctx->csa.prob.pu_mb_R; 2086 2087 return simple_read_from_buffer(buf, len, pos, &data, sizeof data); 2088 } 2089 2090 static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf, 2091 size_t len, loff_t *pos) 2092 { 2093 int ret; 2094 struct spu_context *ctx = file->private_data; 2095 2096 if (!access_ok(VERIFY_WRITE, buf, len)) 2097 return -EFAULT; 2098 2099 ret = spu_acquire_saved(ctx); 2100 if (ret) 2101 return ret; 2102 spin_lock(&ctx->csa.register_lock); 2103 ret = __spufs_mbox_info_read(ctx, buf, len, pos); 2104 spin_unlock(&ctx->csa.register_lock); 2105 spu_release_saved(ctx); 2106 2107 return ret; 2108 } 2109 2110 static const struct file_operations spufs_mbox_info_fops = { 2111 .open = spufs_info_open, 2112 .read = spufs_mbox_info_read, 2113 .llseek = generic_file_llseek, 2114 }; 2115 2116 static ssize_t __spufs_ibox_info_read(struct spu_context *ctx, 2117 char __user *buf, size_t len, loff_t *pos) 2118 { 2119 u32 data; 2120 2121 /* EOF if there's no entry in the ibox */ 2122 if (!(ctx->csa.prob.mb_stat_R & 0xff0000)) 2123 return 0; 2124 2125 data = ctx->csa.priv2.puint_mb_R; 2126 2127 return simple_read_from_buffer(buf, len, pos, &data, sizeof data); 2128 } 2129 2130 static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf, 2131 size_t len, loff_t *pos) 2132 { 2133 struct spu_context *ctx = file->private_data; 2134 int ret; 2135 2136 if (!access_ok(VERIFY_WRITE, buf, len)) 2137 return -EFAULT; 2138 2139 ret = spu_acquire_saved(ctx); 2140 if (ret) 2141 return ret; 2142 spin_lock(&ctx->csa.register_lock); 2143 ret = __spufs_ibox_info_read(ctx, buf, len, pos); 2144 spin_unlock(&ctx->csa.register_lock); 2145 spu_release_saved(ctx); 2146 2147 return ret; 2148 } 2149 2150 static const struct file_operations spufs_ibox_info_fops = { 2151 .open = spufs_info_open, 2152 .read = spufs_ibox_info_read, 2153 .llseek = generic_file_llseek, 2154 }; 2155 2156 static ssize_t __spufs_wbox_info_read(struct spu_context *ctx, 2157 char __user *buf, size_t len, loff_t *pos) 2158 { 2159 int i, cnt; 2160 u32 data[4]; 2161 u32 wbox_stat; 2162 2163 wbox_stat = ctx->csa.prob.mb_stat_R; 2164 cnt = 4 - ((wbox_stat & 0x00ff00) >> 8); 2165 for (i = 0; i < cnt; i++) { 2166 data[i] = ctx->csa.spu_mailbox_data[i]; 2167 } 2168 2169 return simple_read_from_buffer(buf, len, pos, &data, 2170 cnt * sizeof(u32)); 2171 } 2172 2173 static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf, 2174 size_t len, loff_t *pos) 2175 { 2176 struct spu_context *ctx = file->private_data; 2177 int ret; 2178 2179 if (!access_ok(VERIFY_WRITE, buf, len)) 2180 return -EFAULT; 2181 2182 ret = spu_acquire_saved(ctx); 2183 if (ret) 2184 return ret; 2185 spin_lock(&ctx->csa.register_lock); 2186 ret = __spufs_wbox_info_read(ctx, buf, len, pos); 2187 spin_unlock(&ctx->csa.register_lock); 2188 spu_release_saved(ctx); 2189 2190 return ret; 2191 } 2192 2193 static const struct file_operations spufs_wbox_info_fops = { 2194 .open = spufs_info_open, 2195 .read = spufs_wbox_info_read, 2196 .llseek = generic_file_llseek, 2197 }; 2198 2199 static ssize_t __spufs_dma_info_read(struct spu_context *ctx, 2200 char __user *buf, size_t len, loff_t *pos) 2201 { 2202 struct spu_dma_info info; 2203 struct mfc_cq_sr *qp, *spuqp; 2204 int i; 2205 2206 info.dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW; 2207 info.dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0]; 2208 info.dma_info_status = ctx->csa.spu_chnldata_RW[24]; 2209 info.dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25]; 2210 info.dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27]; 2211 for (i = 0; i < 16; i++) { 2212 qp = &info.dma_info_command_data[i]; 2213 spuqp = &ctx->csa.priv2.spuq[i]; 2214 2215 qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW; 2216 qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW; 2217 qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW; 2218 qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW; 2219 } 2220 2221 return simple_read_from_buffer(buf, len, pos, &info, 2222 sizeof info); 2223 } 2224 2225 static ssize_t spufs_dma_info_read(struct file *file, char __user *buf, 2226 size_t len, loff_t *pos) 2227 { 2228 struct spu_context *ctx = file->private_data; 2229 int ret; 2230 2231 if (!access_ok(VERIFY_WRITE, buf, len)) 2232 return -EFAULT; 2233 2234 ret = spu_acquire_saved(ctx); 2235 if (ret) 2236 return ret; 2237 spin_lock(&ctx->csa.register_lock); 2238 ret = __spufs_dma_info_read(ctx, buf, len, pos); 2239 spin_unlock(&ctx->csa.register_lock); 2240 spu_release_saved(ctx); 2241 2242 return ret; 2243 } 2244 2245 static const struct file_operations spufs_dma_info_fops = { 2246 .open = spufs_info_open, 2247 .read = spufs_dma_info_read, 2248 }; 2249 2250 static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx, 2251 char __user *buf, size_t len, loff_t *pos) 2252 { 2253 struct spu_proxydma_info info; 2254 struct mfc_cq_sr *qp, *puqp; 2255 int ret = sizeof info; 2256 int i; 2257 2258 if (len < ret) 2259 return -EINVAL; 2260 2261 if (!access_ok(VERIFY_WRITE, buf, len)) 2262 return -EFAULT; 2263 2264 info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW; 2265 info.proxydma_info_mask = ctx->csa.prob.dma_querymask_RW; 2266 info.proxydma_info_status = ctx->csa.prob.dma_tagstatus_R; 2267 for (i = 0; i < 8; i++) { 2268 qp = &info.proxydma_info_command_data[i]; 2269 puqp = &ctx->csa.priv2.puq[i]; 2270 2271 qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW; 2272 qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW; 2273 qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW; 2274 qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW; 2275 } 2276 2277 return simple_read_from_buffer(buf, len, pos, &info, 2278 sizeof info); 2279 } 2280 2281 static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf, 2282 size_t len, loff_t *pos) 2283 { 2284 struct spu_context *ctx = file->private_data; 2285 int ret; 2286 2287 ret = spu_acquire_saved(ctx); 2288 if (ret) 2289 return ret; 2290 spin_lock(&ctx->csa.register_lock); 2291 ret = __spufs_proxydma_info_read(ctx, buf, len, pos); 2292 spin_unlock(&ctx->csa.register_lock); 2293 spu_release_saved(ctx); 2294 2295 return ret; 2296 } 2297 2298 static const struct file_operations spufs_proxydma_info_fops = { 2299 .open = spufs_info_open, 2300 .read = spufs_proxydma_info_read, 2301 }; 2302 2303 static int spufs_show_tid(struct seq_file *s, void *private) 2304 { 2305 struct spu_context *ctx = s->private; 2306 2307 seq_printf(s, "%d\n", ctx->tid); 2308 return 0; 2309 } 2310 2311 static int spufs_tid_open(struct inode *inode, struct file *file) 2312 { 2313 return single_open(file, spufs_show_tid, SPUFS_I(inode)->i_ctx); 2314 } 2315 2316 static const struct file_operations spufs_tid_fops = { 2317 .open = spufs_tid_open, 2318 .read = seq_read, 2319 .llseek = seq_lseek, 2320 .release = single_release, 2321 }; 2322 2323 static const char *ctx_state_names[] = { 2324 "user", "system", "iowait", "loaded" 2325 }; 2326 2327 static unsigned long long spufs_acct_time(struct spu_context *ctx, 2328 enum spu_utilization_state state) 2329 { 2330 struct timespec ts; 2331 unsigned long long time = ctx->stats.times[state]; 2332 2333 /* 2334 * In general, utilization statistics are updated by the controlling 2335 * thread as the spu context moves through various well defined 2336 * state transitions, but if the context is lazily loaded its 2337 * utilization statistics are not updated as the controlling thread 2338 * is not tightly coupled with the execution of the spu context. We 2339 * calculate and apply the time delta from the last recorded state 2340 * of the spu context. 2341 */ 2342 if (ctx->spu && ctx->stats.util_state == state) { 2343 ktime_get_ts(&ts); 2344 time += timespec_to_ns(&ts) - ctx->stats.tstamp; 2345 } 2346 2347 return time / NSEC_PER_MSEC; 2348 } 2349 2350 static unsigned long long spufs_slb_flts(struct spu_context *ctx) 2351 { 2352 unsigned long long slb_flts = ctx->stats.slb_flt; 2353 2354 if (ctx->state == SPU_STATE_RUNNABLE) { 2355 slb_flts += (ctx->spu->stats.slb_flt - 2356 ctx->stats.slb_flt_base); 2357 } 2358 2359 return slb_flts; 2360 } 2361 2362 static unsigned long long spufs_class2_intrs(struct spu_context *ctx) 2363 { 2364 unsigned long long class2_intrs = ctx->stats.class2_intr; 2365 2366 if (ctx->state == SPU_STATE_RUNNABLE) { 2367 class2_intrs += (ctx->spu->stats.class2_intr - 2368 ctx->stats.class2_intr_base); 2369 } 2370 2371 return class2_intrs; 2372 } 2373 2374 2375 static int spufs_show_stat(struct seq_file *s, void *private) 2376 { 2377 struct spu_context *ctx = s->private; 2378 int ret; 2379 2380 ret = spu_acquire(ctx); 2381 if (ret) 2382 return ret; 2383 2384 seq_printf(s, "%s %llu %llu %llu %llu " 2385 "%llu %llu %llu %llu %llu %llu %llu %llu\n", 2386 ctx_state_names[ctx->stats.util_state], 2387 spufs_acct_time(ctx, SPU_UTIL_USER), 2388 spufs_acct_time(ctx, SPU_UTIL_SYSTEM), 2389 spufs_acct_time(ctx, SPU_UTIL_IOWAIT), 2390 spufs_acct_time(ctx, SPU_UTIL_IDLE_LOADED), 2391 ctx->stats.vol_ctx_switch, 2392 ctx->stats.invol_ctx_switch, 2393 spufs_slb_flts(ctx), 2394 ctx->stats.hash_flt, 2395 ctx->stats.min_flt, 2396 ctx->stats.maj_flt, 2397 spufs_class2_intrs(ctx), 2398 ctx->stats.libassist); 2399 spu_release(ctx); 2400 return 0; 2401 } 2402 2403 static int spufs_stat_open(struct inode *inode, struct file *file) 2404 { 2405 return single_open(file, spufs_show_stat, SPUFS_I(inode)->i_ctx); 2406 } 2407 2408 static const struct file_operations spufs_stat_fops = { 2409 .open = spufs_stat_open, 2410 .read = seq_read, 2411 .llseek = seq_lseek, 2412 .release = single_release, 2413 }; 2414 2415 static inline int spufs_switch_log_used(struct spu_context *ctx) 2416 { 2417 return (ctx->switch_log->head - ctx->switch_log->tail) % 2418 SWITCH_LOG_BUFSIZE; 2419 } 2420 2421 static inline int spufs_switch_log_avail(struct spu_context *ctx) 2422 { 2423 return SWITCH_LOG_BUFSIZE - spufs_switch_log_used(ctx); 2424 } 2425 2426 static int spufs_switch_log_open(struct inode *inode, struct file *file) 2427 { 2428 struct spu_context *ctx = SPUFS_I(inode)->i_ctx; 2429 2430 /* 2431 * We (ab-)use the mapping_lock here because it serves the similar 2432 * purpose for synchronizing open/close elsewhere. Maybe it should 2433 * be renamed eventually. 2434 */ 2435 mutex_lock(&ctx->mapping_lock); 2436 if (ctx->switch_log) { 2437 spin_lock(&ctx->switch_log->lock); 2438 ctx->switch_log->head = 0; 2439 ctx->switch_log->tail = 0; 2440 spin_unlock(&ctx->switch_log->lock); 2441 } else { 2442 /* 2443 * We allocate the switch log data structures on first open. 2444 * They will never be free because we assume a context will 2445 * be traced until it goes away. 2446 */ 2447 ctx->switch_log = kzalloc(sizeof(struct switch_log) + 2448 SWITCH_LOG_BUFSIZE * sizeof(struct switch_log_entry), 2449 GFP_KERNEL); 2450 if (!ctx->switch_log) 2451 goto out; 2452 spin_lock_init(&ctx->switch_log->lock); 2453 init_waitqueue_head(&ctx->switch_log->wait); 2454 } 2455 mutex_unlock(&ctx->mapping_lock); 2456 2457 return 0; 2458 out: 2459 mutex_unlock(&ctx->mapping_lock); 2460 return -ENOMEM; 2461 } 2462 2463 static int switch_log_sprint(struct spu_context *ctx, char *tbuf, int n) 2464 { 2465 struct switch_log_entry *p; 2466 2467 p = ctx->switch_log->log + ctx->switch_log->tail % SWITCH_LOG_BUFSIZE; 2468 2469 return snprintf(tbuf, n, "%u.%09u %d %u %u %llu\n", 2470 (unsigned int) p->tstamp.tv_sec, 2471 (unsigned int) p->tstamp.tv_nsec, 2472 p->spu_id, 2473 (unsigned int) p->type, 2474 (unsigned int) p->val, 2475 (unsigned long long) p->timebase); 2476 } 2477 2478 static ssize_t spufs_switch_log_read(struct file *file, char __user *buf, 2479 size_t len, loff_t *ppos) 2480 { 2481 struct inode *inode = file->f_path.dentry->d_inode; 2482 struct spu_context *ctx = SPUFS_I(inode)->i_ctx; 2483 int error = 0, cnt = 0; 2484 2485 if (!buf || len < 0) 2486 return -EINVAL; 2487 2488 while (cnt < len) { 2489 char tbuf[128]; 2490 int width; 2491 2492 if (file->f_flags & O_NONBLOCK) { 2493 if (spufs_switch_log_used(ctx) <= 0) 2494 return cnt ? cnt : -EAGAIN; 2495 } else { 2496 /* Wait for data in buffer */ 2497 error = wait_event_interruptible(ctx->switch_log->wait, 2498 spufs_switch_log_used(ctx) > 0); 2499 if (error) 2500 break; 2501 } 2502 2503 spin_lock(&ctx->switch_log->lock); 2504 if (ctx->switch_log->head == ctx->switch_log->tail) { 2505 /* multiple readers race? */ 2506 spin_unlock(&ctx->switch_log->lock); 2507 continue; 2508 } 2509 2510 width = switch_log_sprint(ctx, tbuf, sizeof(tbuf)); 2511 if (width < len) { 2512 ctx->switch_log->tail = 2513 (ctx->switch_log->tail + 1) % 2514 SWITCH_LOG_BUFSIZE; 2515 } 2516 2517 spin_unlock(&ctx->switch_log->lock); 2518 2519 /* 2520 * If the record is greater than space available return 2521 * partial buffer (so far) 2522 */ 2523 if (width >= len) 2524 break; 2525 2526 error = copy_to_user(buf + cnt, tbuf, width); 2527 if (error) 2528 break; 2529 cnt += width; 2530 } 2531 2532 return cnt == 0 ? error : cnt; 2533 } 2534 2535 static unsigned int spufs_switch_log_poll(struct file *file, poll_table *wait) 2536 { 2537 struct inode *inode = file->f_path.dentry->d_inode; 2538 struct spu_context *ctx = SPUFS_I(inode)->i_ctx; 2539 unsigned int mask = 0; 2540 2541 poll_wait(file, &ctx->switch_log->wait, wait); 2542 2543 if (spufs_switch_log_used(ctx) > 0) 2544 mask |= POLLIN; 2545 2546 return mask; 2547 } 2548 2549 static const struct file_operations spufs_switch_log_fops = { 2550 .owner = THIS_MODULE, 2551 .open = spufs_switch_log_open, 2552 .read = spufs_switch_log_read, 2553 .poll = spufs_switch_log_poll, 2554 }; 2555 2556 void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx, 2557 u32 type, u32 val) 2558 { 2559 if (!ctx->switch_log) 2560 return; 2561 2562 spin_lock(&ctx->switch_log->lock); 2563 if (spufs_switch_log_avail(ctx) > 1) { 2564 struct switch_log_entry *p; 2565 2566 p = ctx->switch_log->log + ctx->switch_log->head; 2567 ktime_get_ts(&p->tstamp); 2568 p->timebase = get_tb(); 2569 p->spu_id = spu ? spu->number : -1; 2570 p->type = type; 2571 p->val = val; 2572 2573 ctx->switch_log->head = 2574 (ctx->switch_log->head + 1) % SWITCH_LOG_BUFSIZE; 2575 } 2576 spin_unlock(&ctx->switch_log->lock); 2577 2578 wake_up(&ctx->switch_log->wait); 2579 } 2580 2581 static int spufs_show_ctx(struct seq_file *s, void *private) 2582 { 2583 struct spu_context *ctx = s->private; 2584 u64 mfc_control_RW; 2585 2586 mutex_lock(&ctx->state_mutex); 2587 if (ctx->spu) { 2588 struct spu *spu = ctx->spu; 2589 struct spu_priv2 __iomem *priv2 = spu->priv2; 2590 2591 spin_lock_irq(&spu->register_lock); 2592 mfc_control_RW = in_be64(&priv2->mfc_control_RW); 2593 spin_unlock_irq(&spu->register_lock); 2594 } else { 2595 struct spu_state *csa = &ctx->csa; 2596 2597 mfc_control_RW = csa->priv2.mfc_control_RW; 2598 } 2599 2600 seq_printf(s, "%c flgs(%lx) sflgs(%lx) pri(%d) ts(%d) spu(%02d)" 2601 " %c %lx %lx %lx %lx %x %x\n", 2602 ctx->state == SPU_STATE_SAVED ? 'S' : 'R', 2603 ctx->flags, 2604 ctx->sched_flags, 2605 ctx->prio, 2606 ctx->time_slice, 2607 ctx->spu ? ctx->spu->number : -1, 2608 !list_empty(&ctx->rq) ? 'q' : ' ', 2609 ctx->csa.class_0_pending, 2610 ctx->csa.class_0_dar, 2611 ctx->csa.class_1_dsisr, 2612 mfc_control_RW, 2613 ctx->ops->runcntl_read(ctx), 2614 ctx->ops->status_read(ctx)); 2615 2616 mutex_unlock(&ctx->state_mutex); 2617 2618 return 0; 2619 } 2620 2621 static int spufs_ctx_open(struct inode *inode, struct file *file) 2622 { 2623 return single_open(file, spufs_show_ctx, SPUFS_I(inode)->i_ctx); 2624 } 2625 2626 static const struct file_operations spufs_ctx_fops = { 2627 .open = spufs_ctx_open, 2628 .read = seq_read, 2629 .llseek = seq_lseek, 2630 .release = single_release, 2631 }; 2632 2633 struct spufs_tree_descr spufs_dir_contents[] = { 2634 { "capabilities", &spufs_caps_fops, 0444, }, 2635 { "mem", &spufs_mem_fops, 0666, LS_SIZE, }, 2636 { "regs", &spufs_regs_fops, 0666, sizeof(struct spu_reg128[128]), }, 2637 { "mbox", &spufs_mbox_fops, 0444, }, 2638 { "ibox", &spufs_ibox_fops, 0444, }, 2639 { "wbox", &spufs_wbox_fops, 0222, }, 2640 { "mbox_stat", &spufs_mbox_stat_fops, 0444, sizeof(u32), }, 2641 { "ibox_stat", &spufs_ibox_stat_fops, 0444, sizeof(u32), }, 2642 { "wbox_stat", &spufs_wbox_stat_fops, 0444, sizeof(u32), }, 2643 { "signal1", &spufs_signal1_fops, 0666, }, 2644 { "signal2", &spufs_signal2_fops, 0666, }, 2645 { "signal1_type", &spufs_signal1_type, 0666, }, 2646 { "signal2_type", &spufs_signal2_type, 0666, }, 2647 { "cntl", &spufs_cntl_fops, 0666, }, 2648 { "fpcr", &spufs_fpcr_fops, 0666, sizeof(struct spu_reg128), }, 2649 { "lslr", &spufs_lslr_ops, 0444, }, 2650 { "mfc", &spufs_mfc_fops, 0666, }, 2651 { "mss", &spufs_mss_fops, 0666, }, 2652 { "npc", &spufs_npc_ops, 0666, }, 2653 { "srr0", &spufs_srr0_ops, 0666, }, 2654 { "decr", &spufs_decr_ops, 0666, }, 2655 { "decr_status", &spufs_decr_status_ops, 0666, }, 2656 { "event_mask", &spufs_event_mask_ops, 0666, }, 2657 { "event_status", &spufs_event_status_ops, 0444, }, 2658 { "psmap", &spufs_psmap_fops, 0666, SPUFS_PS_MAP_SIZE, }, 2659 { "phys-id", &spufs_id_ops, 0666, }, 2660 { "object-id", &spufs_object_id_ops, 0666, }, 2661 { "mbox_info", &spufs_mbox_info_fops, 0444, sizeof(u32), }, 2662 { "ibox_info", &spufs_ibox_info_fops, 0444, sizeof(u32), }, 2663 { "wbox_info", &spufs_wbox_info_fops, 0444, sizeof(u32), }, 2664 { "dma_info", &spufs_dma_info_fops, 0444, 2665 sizeof(struct spu_dma_info), }, 2666 { "proxydma_info", &spufs_proxydma_info_fops, 0444, 2667 sizeof(struct spu_proxydma_info)}, 2668 { "tid", &spufs_tid_fops, 0444, }, 2669 { "stat", &spufs_stat_fops, 0444, }, 2670 { "switch_log", &spufs_switch_log_fops, 0444 }, 2671 {}, 2672 }; 2673 2674 struct spufs_tree_descr spufs_dir_nosched_contents[] = { 2675 { "capabilities", &spufs_caps_fops, 0444, }, 2676 { "mem", &spufs_mem_fops, 0666, LS_SIZE, }, 2677 { "mbox", &spufs_mbox_fops, 0444, }, 2678 { "ibox", &spufs_ibox_fops, 0444, }, 2679 { "wbox", &spufs_wbox_fops, 0222, }, 2680 { "mbox_stat", &spufs_mbox_stat_fops, 0444, sizeof(u32), }, 2681 { "ibox_stat", &spufs_ibox_stat_fops, 0444, sizeof(u32), }, 2682 { "wbox_stat", &spufs_wbox_stat_fops, 0444, sizeof(u32), }, 2683 { "signal1", &spufs_signal1_nosched_fops, 0222, }, 2684 { "signal2", &spufs_signal2_nosched_fops, 0222, }, 2685 { "signal1_type", &spufs_signal1_type, 0666, }, 2686 { "signal2_type", &spufs_signal2_type, 0666, }, 2687 { "mss", &spufs_mss_fops, 0666, }, 2688 { "mfc", &spufs_mfc_fops, 0666, }, 2689 { "cntl", &spufs_cntl_fops, 0666, }, 2690 { "npc", &spufs_npc_ops, 0666, }, 2691 { "psmap", &spufs_psmap_fops, 0666, SPUFS_PS_MAP_SIZE, }, 2692 { "phys-id", &spufs_id_ops, 0666, }, 2693 { "object-id", &spufs_object_id_ops, 0666, }, 2694 { "tid", &spufs_tid_fops, 0444, }, 2695 { "stat", &spufs_stat_fops, 0444, }, 2696 {}, 2697 }; 2698 2699 struct spufs_tree_descr spufs_dir_debug_contents[] = { 2700 { ".ctx", &spufs_ctx_fops, 0444, }, 2701 {}, 2702 }; 2703 2704 struct spufs_coredump_reader spufs_coredump_read[] = { 2705 { "regs", __spufs_regs_read, NULL, sizeof(struct spu_reg128[128])}, 2706 { "fpcr", __spufs_fpcr_read, NULL, sizeof(struct spu_reg128) }, 2707 { "lslr", NULL, spufs_lslr_get, 19 }, 2708 { "decr", NULL, spufs_decr_get, 19 }, 2709 { "decr_status", NULL, spufs_decr_status_get, 19 }, 2710 { "mem", __spufs_mem_read, NULL, LS_SIZE, }, 2711 { "signal1", __spufs_signal1_read, NULL, sizeof(u32) }, 2712 { "signal1_type", NULL, spufs_signal1_type_get, 19 }, 2713 { "signal2", __spufs_signal2_read, NULL, sizeof(u32) }, 2714 { "signal2_type", NULL, spufs_signal2_type_get, 19 }, 2715 { "event_mask", NULL, spufs_event_mask_get, 19 }, 2716 { "event_status", NULL, spufs_event_status_get, 19 }, 2717 { "mbox_info", __spufs_mbox_info_read, NULL, sizeof(u32) }, 2718 { "ibox_info", __spufs_ibox_info_read, NULL, sizeof(u32) }, 2719 { "wbox_info", __spufs_wbox_info_read, NULL, 4 * sizeof(u32)}, 2720 { "dma_info", __spufs_dma_info_read, NULL, sizeof(struct spu_dma_info)}, 2721 { "proxydma_info", __spufs_proxydma_info_read, 2722 NULL, sizeof(struct spu_proxydma_info)}, 2723 { "object-id", NULL, spufs_object_id_get, 19 }, 2724 { "npc", NULL, spufs_npc_get, 19 }, 2725 { NULL }, 2726 }; 2727