1 /*
2  * SPU file system -- file contents
3  *
4  * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5  *
6  * Author: Arnd Bergmann <arndb@de.ibm.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2, or (at your option)
11  * any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  */
22 
23 #undef DEBUG
24 
25 #include <linux/fs.h>
26 #include <linux/ioctl.h>
27 #include <linux/export.h>
28 #include <linux/pagemap.h>
29 #include <linux/poll.h>
30 #include <linux/ptrace.h>
31 #include <linux/seq_file.h>
32 #include <linux/slab.h>
33 
34 #include <asm/io.h>
35 #include <asm/time.h>
36 #include <asm/spu.h>
37 #include <asm/spu_info.h>
38 #include <asm/uaccess.h>
39 
40 #include "spufs.h"
41 #include "sputrace.h"
42 
43 #define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
44 
45 /* Simple attribute files */
46 struct spufs_attr {
47 	int (*get)(void *, u64 *);
48 	int (*set)(void *, u64);
49 	char get_buf[24];       /* enough to store a u64 and "\n\0" */
50 	char set_buf[24];
51 	void *data;
52 	const char *fmt;        /* format for read operation */
53 	struct mutex mutex;     /* protects access to these buffers */
54 };
55 
56 static int spufs_attr_open(struct inode *inode, struct file *file,
57 		int (*get)(void *, u64 *), int (*set)(void *, u64),
58 		const char *fmt)
59 {
60 	struct spufs_attr *attr;
61 
62 	attr = kmalloc(sizeof(*attr), GFP_KERNEL);
63 	if (!attr)
64 		return -ENOMEM;
65 
66 	attr->get = get;
67 	attr->set = set;
68 	attr->data = inode->i_private;
69 	attr->fmt = fmt;
70 	mutex_init(&attr->mutex);
71 	file->private_data = attr;
72 
73 	return nonseekable_open(inode, file);
74 }
75 
76 static int spufs_attr_release(struct inode *inode, struct file *file)
77 {
78        kfree(file->private_data);
79 	return 0;
80 }
81 
82 static ssize_t spufs_attr_read(struct file *file, char __user *buf,
83 		size_t len, loff_t *ppos)
84 {
85 	struct spufs_attr *attr;
86 	size_t size;
87 	ssize_t ret;
88 
89 	attr = file->private_data;
90 	if (!attr->get)
91 		return -EACCES;
92 
93 	ret = mutex_lock_interruptible(&attr->mutex);
94 	if (ret)
95 		return ret;
96 
97 	if (*ppos) {		/* continued read */
98 		size = strlen(attr->get_buf);
99 	} else {		/* first read */
100 		u64 val;
101 		ret = attr->get(attr->data, &val);
102 		if (ret)
103 			goto out;
104 
105 		size = scnprintf(attr->get_buf, sizeof(attr->get_buf),
106 				 attr->fmt, (unsigned long long)val);
107 	}
108 
109 	ret = simple_read_from_buffer(buf, len, ppos, attr->get_buf, size);
110 out:
111 	mutex_unlock(&attr->mutex);
112 	return ret;
113 }
114 
115 static ssize_t spufs_attr_write(struct file *file, const char __user *buf,
116 		size_t len, loff_t *ppos)
117 {
118 	struct spufs_attr *attr;
119 	u64 val;
120 	size_t size;
121 	ssize_t ret;
122 
123 	attr = file->private_data;
124 	if (!attr->set)
125 		return -EACCES;
126 
127 	ret = mutex_lock_interruptible(&attr->mutex);
128 	if (ret)
129 		return ret;
130 
131 	ret = -EFAULT;
132 	size = min(sizeof(attr->set_buf) - 1, len);
133 	if (copy_from_user(attr->set_buf, buf, size))
134 		goto out;
135 
136 	ret = len; /* claim we got the whole input */
137 	attr->set_buf[size] = '\0';
138 	val = simple_strtol(attr->set_buf, NULL, 0);
139 	attr->set(attr->data, val);
140 out:
141 	mutex_unlock(&attr->mutex);
142 	return ret;
143 }
144 
145 #define DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt)	\
146 static int __fops ## _open(struct inode *inode, struct file *file)	\
147 {									\
148 	__simple_attr_check_format(__fmt, 0ull);			\
149 	return spufs_attr_open(inode, file, __get, __set, __fmt);	\
150 }									\
151 static const struct file_operations __fops = {				\
152 	.open	 = __fops ## _open,					\
153 	.release = spufs_attr_release,					\
154 	.read	 = spufs_attr_read,					\
155 	.write	 = spufs_attr_write,					\
156 	.llseek  = generic_file_llseek,					\
157 };
158 
159 
160 static int
161 spufs_mem_open(struct inode *inode, struct file *file)
162 {
163 	struct spufs_inode_info *i = SPUFS_I(inode);
164 	struct spu_context *ctx = i->i_ctx;
165 
166 	mutex_lock(&ctx->mapping_lock);
167 	file->private_data = ctx;
168 	if (!i->i_openers++)
169 		ctx->local_store = inode->i_mapping;
170 	mutex_unlock(&ctx->mapping_lock);
171 	return 0;
172 }
173 
174 static int
175 spufs_mem_release(struct inode *inode, struct file *file)
176 {
177 	struct spufs_inode_info *i = SPUFS_I(inode);
178 	struct spu_context *ctx = i->i_ctx;
179 
180 	mutex_lock(&ctx->mapping_lock);
181 	if (!--i->i_openers)
182 		ctx->local_store = NULL;
183 	mutex_unlock(&ctx->mapping_lock);
184 	return 0;
185 }
186 
187 static ssize_t
188 __spufs_mem_read(struct spu_context *ctx, char __user *buffer,
189 			size_t size, loff_t *pos)
190 {
191 	char *local_store = ctx->ops->get_ls(ctx);
192 	return simple_read_from_buffer(buffer, size, pos, local_store,
193 					LS_SIZE);
194 }
195 
196 static ssize_t
197 spufs_mem_read(struct file *file, char __user *buffer,
198 				size_t size, loff_t *pos)
199 {
200 	struct spu_context *ctx = file->private_data;
201 	ssize_t ret;
202 
203 	ret = spu_acquire(ctx);
204 	if (ret)
205 		return ret;
206 	ret = __spufs_mem_read(ctx, buffer, size, pos);
207 	spu_release(ctx);
208 
209 	return ret;
210 }
211 
212 static ssize_t
213 spufs_mem_write(struct file *file, const char __user *buffer,
214 					size_t size, loff_t *ppos)
215 {
216 	struct spu_context *ctx = file->private_data;
217 	char *local_store;
218 	loff_t pos = *ppos;
219 	int ret;
220 
221 	if (pos > LS_SIZE)
222 		return -EFBIG;
223 
224 	ret = spu_acquire(ctx);
225 	if (ret)
226 		return ret;
227 
228 	local_store = ctx->ops->get_ls(ctx);
229 	size = simple_write_to_buffer(local_store, LS_SIZE, ppos, buffer, size);
230 	spu_release(ctx);
231 
232 	return size;
233 }
234 
235 static int
236 spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
237 {
238 	struct spu_context *ctx	= vma->vm_file->private_data;
239 	unsigned long address = (unsigned long)vmf->virtual_address;
240 	unsigned long pfn, offset;
241 
242 	offset = vmf->pgoff << PAGE_SHIFT;
243 	if (offset >= LS_SIZE)
244 		return VM_FAULT_SIGBUS;
245 
246 	pr_debug("spufs_mem_mmap_fault address=0x%lx, offset=0x%lx\n",
247 			address, offset);
248 
249 	if (spu_acquire(ctx))
250 		return VM_FAULT_NOPAGE;
251 
252 	if (ctx->state == SPU_STATE_SAVED) {
253 		vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
254 		pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset);
255 	} else {
256 		vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
257 		pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT;
258 	}
259 	vm_insert_pfn(vma, address, pfn);
260 
261 	spu_release(ctx);
262 
263 	return VM_FAULT_NOPAGE;
264 }
265 
266 static int spufs_mem_mmap_access(struct vm_area_struct *vma,
267 				unsigned long address,
268 				void *buf, int len, int write)
269 {
270 	struct spu_context *ctx = vma->vm_file->private_data;
271 	unsigned long offset = address - vma->vm_start;
272 	char *local_store;
273 
274 	if (write && !(vma->vm_flags & VM_WRITE))
275 		return -EACCES;
276 	if (spu_acquire(ctx))
277 		return -EINTR;
278 	if ((offset + len) > vma->vm_end)
279 		len = vma->vm_end - offset;
280 	local_store = ctx->ops->get_ls(ctx);
281 	if (write)
282 		memcpy_toio(local_store + offset, buf, len);
283 	else
284 		memcpy_fromio(buf, local_store + offset, len);
285 	spu_release(ctx);
286 	return len;
287 }
288 
289 static const struct vm_operations_struct spufs_mem_mmap_vmops = {
290 	.fault = spufs_mem_mmap_fault,
291 	.access = spufs_mem_mmap_access,
292 };
293 
294 static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
295 {
296 	if (!(vma->vm_flags & VM_SHARED))
297 		return -EINVAL;
298 
299 	vma->vm_flags |= VM_IO | VM_PFNMAP;
300 	vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
301 
302 	vma->vm_ops = &spufs_mem_mmap_vmops;
303 	return 0;
304 }
305 
306 static const struct file_operations spufs_mem_fops = {
307 	.open			= spufs_mem_open,
308 	.release		= spufs_mem_release,
309 	.read			= spufs_mem_read,
310 	.write			= spufs_mem_write,
311 	.llseek			= generic_file_llseek,
312 	.mmap			= spufs_mem_mmap,
313 };
314 
315 static int spufs_ps_fault(struct vm_area_struct *vma,
316 				    struct vm_fault *vmf,
317 				    unsigned long ps_offs,
318 				    unsigned long ps_size)
319 {
320 	struct spu_context *ctx = vma->vm_file->private_data;
321 	unsigned long area, offset = vmf->pgoff << PAGE_SHIFT;
322 	int ret = 0;
323 
324 	spu_context_nospu_trace(spufs_ps_fault__enter, ctx);
325 
326 	if (offset >= ps_size)
327 		return VM_FAULT_SIGBUS;
328 
329 	if (fatal_signal_pending(current))
330 		return VM_FAULT_SIGBUS;
331 
332 	/*
333 	 * Because we release the mmap_sem, the context may be destroyed while
334 	 * we're in spu_wait. Grab an extra reference so it isn't destroyed
335 	 * in the meantime.
336 	 */
337 	get_spu_context(ctx);
338 
339 	/*
340 	 * We have to wait for context to be loaded before we have
341 	 * pages to hand out to the user, but we don't want to wait
342 	 * with the mmap_sem held.
343 	 * It is possible to drop the mmap_sem here, but then we need
344 	 * to return VM_FAULT_NOPAGE because the mappings may have
345 	 * hanged.
346 	 */
347 	if (spu_acquire(ctx))
348 		goto refault;
349 
350 	if (ctx->state == SPU_STATE_SAVED) {
351 		up_read(&current->mm->mmap_sem);
352 		spu_context_nospu_trace(spufs_ps_fault__sleep, ctx);
353 		ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
354 		spu_context_trace(spufs_ps_fault__wake, ctx, ctx->spu);
355 		down_read(&current->mm->mmap_sem);
356 	} else {
357 		area = ctx->spu->problem_phys + ps_offs;
358 		vm_insert_pfn(vma, (unsigned long)vmf->virtual_address,
359 					(area + offset) >> PAGE_SHIFT);
360 		spu_context_trace(spufs_ps_fault__insert, ctx, ctx->spu);
361 	}
362 
363 	if (!ret)
364 		spu_release(ctx);
365 
366 refault:
367 	put_spu_context(ctx);
368 	return VM_FAULT_NOPAGE;
369 }
370 
371 #if SPUFS_MMAP_4K
372 static int spufs_cntl_mmap_fault(struct vm_area_struct *vma,
373 					   struct vm_fault *vmf)
374 {
375 	return spufs_ps_fault(vma, vmf, 0x4000, SPUFS_CNTL_MAP_SIZE);
376 }
377 
378 static const struct vm_operations_struct spufs_cntl_mmap_vmops = {
379 	.fault = spufs_cntl_mmap_fault,
380 };
381 
382 /*
383  * mmap support for problem state control area [0x4000 - 0x4fff].
384  */
385 static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
386 {
387 	if (!(vma->vm_flags & VM_SHARED))
388 		return -EINVAL;
389 
390 	vma->vm_flags |= VM_IO | VM_PFNMAP;
391 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
392 
393 	vma->vm_ops = &spufs_cntl_mmap_vmops;
394 	return 0;
395 }
396 #else /* SPUFS_MMAP_4K */
397 #define spufs_cntl_mmap NULL
398 #endif /* !SPUFS_MMAP_4K */
399 
400 static int spufs_cntl_get(void *data, u64 *val)
401 {
402 	struct spu_context *ctx = data;
403 	int ret;
404 
405 	ret = spu_acquire(ctx);
406 	if (ret)
407 		return ret;
408 	*val = ctx->ops->status_read(ctx);
409 	spu_release(ctx);
410 
411 	return 0;
412 }
413 
414 static int spufs_cntl_set(void *data, u64 val)
415 {
416 	struct spu_context *ctx = data;
417 	int ret;
418 
419 	ret = spu_acquire(ctx);
420 	if (ret)
421 		return ret;
422 	ctx->ops->runcntl_write(ctx, val);
423 	spu_release(ctx);
424 
425 	return 0;
426 }
427 
428 static int spufs_cntl_open(struct inode *inode, struct file *file)
429 {
430 	struct spufs_inode_info *i = SPUFS_I(inode);
431 	struct spu_context *ctx = i->i_ctx;
432 
433 	mutex_lock(&ctx->mapping_lock);
434 	file->private_data = ctx;
435 	if (!i->i_openers++)
436 		ctx->cntl = inode->i_mapping;
437 	mutex_unlock(&ctx->mapping_lock);
438 	return simple_attr_open(inode, file, spufs_cntl_get,
439 					spufs_cntl_set, "0x%08lx");
440 }
441 
442 static int
443 spufs_cntl_release(struct inode *inode, struct file *file)
444 {
445 	struct spufs_inode_info *i = SPUFS_I(inode);
446 	struct spu_context *ctx = i->i_ctx;
447 
448 	simple_attr_release(inode, file);
449 
450 	mutex_lock(&ctx->mapping_lock);
451 	if (!--i->i_openers)
452 		ctx->cntl = NULL;
453 	mutex_unlock(&ctx->mapping_lock);
454 	return 0;
455 }
456 
457 static const struct file_operations spufs_cntl_fops = {
458 	.open = spufs_cntl_open,
459 	.release = spufs_cntl_release,
460 	.read = simple_attr_read,
461 	.write = simple_attr_write,
462 	.llseek	= generic_file_llseek,
463 	.mmap = spufs_cntl_mmap,
464 };
465 
466 static int
467 spufs_regs_open(struct inode *inode, struct file *file)
468 {
469 	struct spufs_inode_info *i = SPUFS_I(inode);
470 	file->private_data = i->i_ctx;
471 	return 0;
472 }
473 
474 static ssize_t
475 __spufs_regs_read(struct spu_context *ctx, char __user *buffer,
476 			size_t size, loff_t *pos)
477 {
478 	struct spu_lscsa *lscsa = ctx->csa.lscsa;
479 	return simple_read_from_buffer(buffer, size, pos,
480 				      lscsa->gprs, sizeof lscsa->gprs);
481 }
482 
483 static ssize_t
484 spufs_regs_read(struct file *file, char __user *buffer,
485 		size_t size, loff_t *pos)
486 {
487 	int ret;
488 	struct spu_context *ctx = file->private_data;
489 
490 	/* pre-check for file position: if we'd return EOF, there's no point
491 	 * causing a deschedule */
492 	if (*pos >= sizeof(ctx->csa.lscsa->gprs))
493 		return 0;
494 
495 	ret = spu_acquire_saved(ctx);
496 	if (ret)
497 		return ret;
498 	ret = __spufs_regs_read(ctx, buffer, size, pos);
499 	spu_release_saved(ctx);
500 	return ret;
501 }
502 
503 static ssize_t
504 spufs_regs_write(struct file *file, const char __user *buffer,
505 		 size_t size, loff_t *pos)
506 {
507 	struct spu_context *ctx = file->private_data;
508 	struct spu_lscsa *lscsa = ctx->csa.lscsa;
509 	int ret;
510 
511 	if (*pos >= sizeof(lscsa->gprs))
512 		return -EFBIG;
513 
514 	ret = spu_acquire_saved(ctx);
515 	if (ret)
516 		return ret;
517 
518 	size = simple_write_to_buffer(lscsa->gprs, sizeof(lscsa->gprs), pos,
519 					buffer, size);
520 
521 	spu_release_saved(ctx);
522 	return size;
523 }
524 
525 static const struct file_operations spufs_regs_fops = {
526 	.open	 = spufs_regs_open,
527 	.read    = spufs_regs_read,
528 	.write   = spufs_regs_write,
529 	.llseek  = generic_file_llseek,
530 };
531 
532 static ssize_t
533 __spufs_fpcr_read(struct spu_context *ctx, char __user * buffer,
534 			size_t size, loff_t * pos)
535 {
536 	struct spu_lscsa *lscsa = ctx->csa.lscsa;
537 	return simple_read_from_buffer(buffer, size, pos,
538 				      &lscsa->fpcr, sizeof(lscsa->fpcr));
539 }
540 
541 static ssize_t
542 spufs_fpcr_read(struct file *file, char __user * buffer,
543 		size_t size, loff_t * pos)
544 {
545 	int ret;
546 	struct spu_context *ctx = file->private_data;
547 
548 	ret = spu_acquire_saved(ctx);
549 	if (ret)
550 		return ret;
551 	ret = __spufs_fpcr_read(ctx, buffer, size, pos);
552 	spu_release_saved(ctx);
553 	return ret;
554 }
555 
556 static ssize_t
557 spufs_fpcr_write(struct file *file, const char __user * buffer,
558 		 size_t size, loff_t * pos)
559 {
560 	struct spu_context *ctx = file->private_data;
561 	struct spu_lscsa *lscsa = ctx->csa.lscsa;
562 	int ret;
563 
564 	if (*pos >= sizeof(lscsa->fpcr))
565 		return -EFBIG;
566 
567 	ret = spu_acquire_saved(ctx);
568 	if (ret)
569 		return ret;
570 
571 	size = simple_write_to_buffer(&lscsa->fpcr, sizeof(lscsa->fpcr), pos,
572 					buffer, size);
573 
574 	spu_release_saved(ctx);
575 	return size;
576 }
577 
578 static const struct file_operations spufs_fpcr_fops = {
579 	.open = spufs_regs_open,
580 	.read = spufs_fpcr_read,
581 	.write = spufs_fpcr_write,
582 	.llseek = generic_file_llseek,
583 };
584 
585 /* generic open function for all pipe-like files */
586 static int spufs_pipe_open(struct inode *inode, struct file *file)
587 {
588 	struct spufs_inode_info *i = SPUFS_I(inode);
589 	file->private_data = i->i_ctx;
590 
591 	return nonseekable_open(inode, file);
592 }
593 
594 /*
595  * Read as many bytes from the mailbox as possible, until
596  * one of the conditions becomes true:
597  *
598  * - no more data available in the mailbox
599  * - end of the user provided buffer
600  * - end of the mapped area
601  */
602 static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
603 			size_t len, loff_t *pos)
604 {
605 	struct spu_context *ctx = file->private_data;
606 	u32 mbox_data, __user *udata;
607 	ssize_t count;
608 
609 	if (len < 4)
610 		return -EINVAL;
611 
612 	if (!access_ok(VERIFY_WRITE, buf, len))
613 		return -EFAULT;
614 
615 	udata = (void __user *)buf;
616 
617 	count = spu_acquire(ctx);
618 	if (count)
619 		return count;
620 
621 	for (count = 0; (count + 4) <= len; count += 4, udata++) {
622 		int ret;
623 		ret = ctx->ops->mbox_read(ctx, &mbox_data);
624 		if (ret == 0)
625 			break;
626 
627 		/*
628 		 * at the end of the mapped area, we can fault
629 		 * but still need to return the data we have
630 		 * read successfully so far.
631 		 */
632 		ret = __put_user(mbox_data, udata);
633 		if (ret) {
634 			if (!count)
635 				count = -EFAULT;
636 			break;
637 		}
638 	}
639 	spu_release(ctx);
640 
641 	if (!count)
642 		count = -EAGAIN;
643 
644 	return count;
645 }
646 
647 static const struct file_operations spufs_mbox_fops = {
648 	.open	= spufs_pipe_open,
649 	.read	= spufs_mbox_read,
650 	.llseek	= no_llseek,
651 };
652 
653 static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf,
654 			size_t len, loff_t *pos)
655 {
656 	struct spu_context *ctx = file->private_data;
657 	ssize_t ret;
658 	u32 mbox_stat;
659 
660 	if (len < 4)
661 		return -EINVAL;
662 
663 	ret = spu_acquire(ctx);
664 	if (ret)
665 		return ret;
666 
667 	mbox_stat = ctx->ops->mbox_stat_read(ctx) & 0xff;
668 
669 	spu_release(ctx);
670 
671 	if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat))
672 		return -EFAULT;
673 
674 	return 4;
675 }
676 
677 static const struct file_operations spufs_mbox_stat_fops = {
678 	.open	= spufs_pipe_open,
679 	.read	= spufs_mbox_stat_read,
680 	.llseek = no_llseek,
681 };
682 
683 /* low-level ibox access function */
684 size_t spu_ibox_read(struct spu_context *ctx, u32 *data)
685 {
686 	return ctx->ops->ibox_read(ctx, data);
687 }
688 
689 static int spufs_ibox_fasync(int fd, struct file *file, int on)
690 {
691 	struct spu_context *ctx = file->private_data;
692 
693 	return fasync_helper(fd, file, on, &ctx->ibox_fasync);
694 }
695 
696 /* interrupt-level ibox callback function. */
697 void spufs_ibox_callback(struct spu *spu)
698 {
699 	struct spu_context *ctx = spu->ctx;
700 
701 	if (!ctx)
702 		return;
703 
704 	wake_up_all(&ctx->ibox_wq);
705 	kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN);
706 }
707 
708 /*
709  * Read as many bytes from the interrupt mailbox as possible, until
710  * one of the conditions becomes true:
711  *
712  * - no more data available in the mailbox
713  * - end of the user provided buffer
714  * - end of the mapped area
715  *
716  * If the file is opened without O_NONBLOCK, we wait here until
717  * any data is available, but return when we have been able to
718  * read something.
719  */
720 static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
721 			size_t len, loff_t *pos)
722 {
723 	struct spu_context *ctx = file->private_data;
724 	u32 ibox_data, __user *udata;
725 	ssize_t count;
726 
727 	if (len < 4)
728 		return -EINVAL;
729 
730 	if (!access_ok(VERIFY_WRITE, buf, len))
731 		return -EFAULT;
732 
733 	udata = (void __user *)buf;
734 
735 	count = spu_acquire(ctx);
736 	if (count)
737 		goto out;
738 
739 	/* wait only for the first element */
740 	count = 0;
741 	if (file->f_flags & O_NONBLOCK) {
742 		if (!spu_ibox_read(ctx, &ibox_data)) {
743 			count = -EAGAIN;
744 			goto out_unlock;
745 		}
746 	} else {
747 		count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data));
748 		if (count)
749 			goto out;
750 	}
751 
752 	/* if we can't write at all, return -EFAULT */
753 	count = __put_user(ibox_data, udata);
754 	if (count)
755 		goto out_unlock;
756 
757 	for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
758 		int ret;
759 		ret = ctx->ops->ibox_read(ctx, &ibox_data);
760 		if (ret == 0)
761 			break;
762 		/*
763 		 * at the end of the mapped area, we can fault
764 		 * but still need to return the data we have
765 		 * read successfully so far.
766 		 */
767 		ret = __put_user(ibox_data, udata);
768 		if (ret)
769 			break;
770 	}
771 
772 out_unlock:
773 	spu_release(ctx);
774 out:
775 	return count;
776 }
777 
778 static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait)
779 {
780 	struct spu_context *ctx = file->private_data;
781 	unsigned int mask;
782 
783 	poll_wait(file, &ctx->ibox_wq, wait);
784 
785 	/*
786 	 * For now keep this uninterruptible and also ignore the rule
787 	 * that poll should not sleep.  Will be fixed later.
788 	 */
789 	mutex_lock(&ctx->state_mutex);
790 	mask = ctx->ops->mbox_stat_poll(ctx, POLLIN | POLLRDNORM);
791 	spu_release(ctx);
792 
793 	return mask;
794 }
795 
796 static const struct file_operations spufs_ibox_fops = {
797 	.open	= spufs_pipe_open,
798 	.read	= spufs_ibox_read,
799 	.poll	= spufs_ibox_poll,
800 	.fasync	= spufs_ibox_fasync,
801 	.llseek = no_llseek,
802 };
803 
804 static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf,
805 			size_t len, loff_t *pos)
806 {
807 	struct spu_context *ctx = file->private_data;
808 	ssize_t ret;
809 	u32 ibox_stat;
810 
811 	if (len < 4)
812 		return -EINVAL;
813 
814 	ret = spu_acquire(ctx);
815 	if (ret)
816 		return ret;
817 	ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff;
818 	spu_release(ctx);
819 
820 	if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat))
821 		return -EFAULT;
822 
823 	return 4;
824 }
825 
826 static const struct file_operations spufs_ibox_stat_fops = {
827 	.open	= spufs_pipe_open,
828 	.read	= spufs_ibox_stat_read,
829 	.llseek = no_llseek,
830 };
831 
832 /* low-level mailbox write */
833 size_t spu_wbox_write(struct spu_context *ctx, u32 data)
834 {
835 	return ctx->ops->wbox_write(ctx, data);
836 }
837 
838 static int spufs_wbox_fasync(int fd, struct file *file, int on)
839 {
840 	struct spu_context *ctx = file->private_data;
841 	int ret;
842 
843 	ret = fasync_helper(fd, file, on, &ctx->wbox_fasync);
844 
845 	return ret;
846 }
847 
848 /* interrupt-level wbox callback function. */
849 void spufs_wbox_callback(struct spu *spu)
850 {
851 	struct spu_context *ctx = spu->ctx;
852 
853 	if (!ctx)
854 		return;
855 
856 	wake_up_all(&ctx->wbox_wq);
857 	kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT);
858 }
859 
860 /*
861  * Write as many bytes to the interrupt mailbox as possible, until
862  * one of the conditions becomes true:
863  *
864  * - the mailbox is full
865  * - end of the user provided buffer
866  * - end of the mapped area
867  *
868  * If the file is opened without O_NONBLOCK, we wait here until
869  * space is availabyl, but return when we have been able to
870  * write something.
871  */
872 static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
873 			size_t len, loff_t *pos)
874 {
875 	struct spu_context *ctx = file->private_data;
876 	u32 wbox_data, __user *udata;
877 	ssize_t count;
878 
879 	if (len < 4)
880 		return -EINVAL;
881 
882 	udata = (void __user *)buf;
883 	if (!access_ok(VERIFY_READ, buf, len))
884 		return -EFAULT;
885 
886 	if (__get_user(wbox_data, udata))
887 		return -EFAULT;
888 
889 	count = spu_acquire(ctx);
890 	if (count)
891 		goto out;
892 
893 	/*
894 	 * make sure we can at least write one element, by waiting
895 	 * in case of !O_NONBLOCK
896 	 */
897 	count = 0;
898 	if (file->f_flags & O_NONBLOCK) {
899 		if (!spu_wbox_write(ctx, wbox_data)) {
900 			count = -EAGAIN;
901 			goto out_unlock;
902 		}
903 	} else {
904 		count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data));
905 		if (count)
906 			goto out;
907 	}
908 
909 
910 	/* write as much as possible */
911 	for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
912 		int ret;
913 		ret = __get_user(wbox_data, udata);
914 		if (ret)
915 			break;
916 
917 		ret = spu_wbox_write(ctx, wbox_data);
918 		if (ret == 0)
919 			break;
920 	}
921 
922 out_unlock:
923 	spu_release(ctx);
924 out:
925 	return count;
926 }
927 
928 static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait)
929 {
930 	struct spu_context *ctx = file->private_data;
931 	unsigned int mask;
932 
933 	poll_wait(file, &ctx->wbox_wq, wait);
934 
935 	/*
936 	 * For now keep this uninterruptible and also ignore the rule
937 	 * that poll should not sleep.  Will be fixed later.
938 	 */
939 	mutex_lock(&ctx->state_mutex);
940 	mask = ctx->ops->mbox_stat_poll(ctx, POLLOUT | POLLWRNORM);
941 	spu_release(ctx);
942 
943 	return mask;
944 }
945 
946 static const struct file_operations spufs_wbox_fops = {
947 	.open	= spufs_pipe_open,
948 	.write	= spufs_wbox_write,
949 	.poll	= spufs_wbox_poll,
950 	.fasync	= spufs_wbox_fasync,
951 	.llseek = no_llseek,
952 };
953 
954 static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf,
955 			size_t len, loff_t *pos)
956 {
957 	struct spu_context *ctx = file->private_data;
958 	ssize_t ret;
959 	u32 wbox_stat;
960 
961 	if (len < 4)
962 		return -EINVAL;
963 
964 	ret = spu_acquire(ctx);
965 	if (ret)
966 		return ret;
967 	wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff;
968 	spu_release(ctx);
969 
970 	if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat))
971 		return -EFAULT;
972 
973 	return 4;
974 }
975 
976 static const struct file_operations spufs_wbox_stat_fops = {
977 	.open	= spufs_pipe_open,
978 	.read	= spufs_wbox_stat_read,
979 	.llseek = no_llseek,
980 };
981 
982 static int spufs_signal1_open(struct inode *inode, struct file *file)
983 {
984 	struct spufs_inode_info *i = SPUFS_I(inode);
985 	struct spu_context *ctx = i->i_ctx;
986 
987 	mutex_lock(&ctx->mapping_lock);
988 	file->private_data = ctx;
989 	if (!i->i_openers++)
990 		ctx->signal1 = inode->i_mapping;
991 	mutex_unlock(&ctx->mapping_lock);
992 	return nonseekable_open(inode, file);
993 }
994 
995 static int
996 spufs_signal1_release(struct inode *inode, struct file *file)
997 {
998 	struct spufs_inode_info *i = SPUFS_I(inode);
999 	struct spu_context *ctx = i->i_ctx;
1000 
1001 	mutex_lock(&ctx->mapping_lock);
1002 	if (!--i->i_openers)
1003 		ctx->signal1 = NULL;
1004 	mutex_unlock(&ctx->mapping_lock);
1005 	return 0;
1006 }
1007 
1008 static ssize_t __spufs_signal1_read(struct spu_context *ctx, char __user *buf,
1009 			size_t len, loff_t *pos)
1010 {
1011 	int ret = 0;
1012 	u32 data;
1013 
1014 	if (len < 4)
1015 		return -EINVAL;
1016 
1017 	if (ctx->csa.spu_chnlcnt_RW[3]) {
1018 		data = ctx->csa.spu_chnldata_RW[3];
1019 		ret = 4;
1020 	}
1021 
1022 	if (!ret)
1023 		goto out;
1024 
1025 	if (copy_to_user(buf, &data, 4))
1026 		return -EFAULT;
1027 
1028 out:
1029 	return ret;
1030 }
1031 
1032 static ssize_t spufs_signal1_read(struct file *file, char __user *buf,
1033 			size_t len, loff_t *pos)
1034 {
1035 	int ret;
1036 	struct spu_context *ctx = file->private_data;
1037 
1038 	ret = spu_acquire_saved(ctx);
1039 	if (ret)
1040 		return ret;
1041 	ret = __spufs_signal1_read(ctx, buf, len, pos);
1042 	spu_release_saved(ctx);
1043 
1044 	return ret;
1045 }
1046 
1047 static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
1048 			size_t len, loff_t *pos)
1049 {
1050 	struct spu_context *ctx;
1051 	ssize_t ret;
1052 	u32 data;
1053 
1054 	ctx = file->private_data;
1055 
1056 	if (len < 4)
1057 		return -EINVAL;
1058 
1059 	if (copy_from_user(&data, buf, 4))
1060 		return -EFAULT;
1061 
1062 	ret = spu_acquire(ctx);
1063 	if (ret)
1064 		return ret;
1065 	ctx->ops->signal1_write(ctx, data);
1066 	spu_release(ctx);
1067 
1068 	return 4;
1069 }
1070 
1071 static int
1072 spufs_signal1_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1073 {
1074 #if SPUFS_SIGNAL_MAP_SIZE == 0x1000
1075 	return spufs_ps_fault(vma, vmf, 0x14000, SPUFS_SIGNAL_MAP_SIZE);
1076 #elif SPUFS_SIGNAL_MAP_SIZE == 0x10000
1077 	/* For 64k pages, both signal1 and signal2 can be used to mmap the whole
1078 	 * signal 1 and 2 area
1079 	 */
1080 	return spufs_ps_fault(vma, vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE);
1081 #else
1082 #error unsupported page size
1083 #endif
1084 }
1085 
1086 static const struct vm_operations_struct spufs_signal1_mmap_vmops = {
1087 	.fault = spufs_signal1_mmap_fault,
1088 };
1089 
1090 static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
1091 {
1092 	if (!(vma->vm_flags & VM_SHARED))
1093 		return -EINVAL;
1094 
1095 	vma->vm_flags |= VM_IO | VM_PFNMAP;
1096 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1097 
1098 	vma->vm_ops = &spufs_signal1_mmap_vmops;
1099 	return 0;
1100 }
1101 
1102 static const struct file_operations spufs_signal1_fops = {
1103 	.open = spufs_signal1_open,
1104 	.release = spufs_signal1_release,
1105 	.read = spufs_signal1_read,
1106 	.write = spufs_signal1_write,
1107 	.mmap = spufs_signal1_mmap,
1108 	.llseek = no_llseek,
1109 };
1110 
1111 static const struct file_operations spufs_signal1_nosched_fops = {
1112 	.open = spufs_signal1_open,
1113 	.release = spufs_signal1_release,
1114 	.write = spufs_signal1_write,
1115 	.mmap = spufs_signal1_mmap,
1116 	.llseek = no_llseek,
1117 };
1118 
1119 static int spufs_signal2_open(struct inode *inode, struct file *file)
1120 {
1121 	struct spufs_inode_info *i = SPUFS_I(inode);
1122 	struct spu_context *ctx = i->i_ctx;
1123 
1124 	mutex_lock(&ctx->mapping_lock);
1125 	file->private_data = ctx;
1126 	if (!i->i_openers++)
1127 		ctx->signal2 = inode->i_mapping;
1128 	mutex_unlock(&ctx->mapping_lock);
1129 	return nonseekable_open(inode, file);
1130 }
1131 
1132 static int
1133 spufs_signal2_release(struct inode *inode, struct file *file)
1134 {
1135 	struct spufs_inode_info *i = SPUFS_I(inode);
1136 	struct spu_context *ctx = i->i_ctx;
1137 
1138 	mutex_lock(&ctx->mapping_lock);
1139 	if (!--i->i_openers)
1140 		ctx->signal2 = NULL;
1141 	mutex_unlock(&ctx->mapping_lock);
1142 	return 0;
1143 }
1144 
1145 static ssize_t __spufs_signal2_read(struct spu_context *ctx, char __user *buf,
1146 			size_t len, loff_t *pos)
1147 {
1148 	int ret = 0;
1149 	u32 data;
1150 
1151 	if (len < 4)
1152 		return -EINVAL;
1153 
1154 	if (ctx->csa.spu_chnlcnt_RW[4]) {
1155 		data =  ctx->csa.spu_chnldata_RW[4];
1156 		ret = 4;
1157 	}
1158 
1159 	if (!ret)
1160 		goto out;
1161 
1162 	if (copy_to_user(buf, &data, 4))
1163 		return -EFAULT;
1164 
1165 out:
1166 	return ret;
1167 }
1168 
1169 static ssize_t spufs_signal2_read(struct file *file, char __user *buf,
1170 			size_t len, loff_t *pos)
1171 {
1172 	struct spu_context *ctx = file->private_data;
1173 	int ret;
1174 
1175 	ret = spu_acquire_saved(ctx);
1176 	if (ret)
1177 		return ret;
1178 	ret = __spufs_signal2_read(ctx, buf, len, pos);
1179 	spu_release_saved(ctx);
1180 
1181 	return ret;
1182 }
1183 
1184 static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
1185 			size_t len, loff_t *pos)
1186 {
1187 	struct spu_context *ctx;
1188 	ssize_t ret;
1189 	u32 data;
1190 
1191 	ctx = file->private_data;
1192 
1193 	if (len < 4)
1194 		return -EINVAL;
1195 
1196 	if (copy_from_user(&data, buf, 4))
1197 		return -EFAULT;
1198 
1199 	ret = spu_acquire(ctx);
1200 	if (ret)
1201 		return ret;
1202 	ctx->ops->signal2_write(ctx, data);
1203 	spu_release(ctx);
1204 
1205 	return 4;
1206 }
1207 
1208 #if SPUFS_MMAP_4K
1209 static int
1210 spufs_signal2_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1211 {
1212 #if SPUFS_SIGNAL_MAP_SIZE == 0x1000
1213 	return spufs_ps_fault(vma, vmf, 0x1c000, SPUFS_SIGNAL_MAP_SIZE);
1214 #elif SPUFS_SIGNAL_MAP_SIZE == 0x10000
1215 	/* For 64k pages, both signal1 and signal2 can be used to mmap the whole
1216 	 * signal 1 and 2 area
1217 	 */
1218 	return spufs_ps_fault(vma, vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE);
1219 #else
1220 #error unsupported page size
1221 #endif
1222 }
1223 
1224 static const struct vm_operations_struct spufs_signal2_mmap_vmops = {
1225 	.fault = spufs_signal2_mmap_fault,
1226 };
1227 
1228 static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
1229 {
1230 	if (!(vma->vm_flags & VM_SHARED))
1231 		return -EINVAL;
1232 
1233 	vma->vm_flags |= VM_IO | VM_PFNMAP;
1234 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1235 
1236 	vma->vm_ops = &spufs_signal2_mmap_vmops;
1237 	return 0;
1238 }
1239 #else /* SPUFS_MMAP_4K */
1240 #define spufs_signal2_mmap NULL
1241 #endif /* !SPUFS_MMAP_4K */
1242 
1243 static const struct file_operations spufs_signal2_fops = {
1244 	.open = spufs_signal2_open,
1245 	.release = spufs_signal2_release,
1246 	.read = spufs_signal2_read,
1247 	.write = spufs_signal2_write,
1248 	.mmap = spufs_signal2_mmap,
1249 	.llseek = no_llseek,
1250 };
1251 
1252 static const struct file_operations spufs_signal2_nosched_fops = {
1253 	.open = spufs_signal2_open,
1254 	.release = spufs_signal2_release,
1255 	.write = spufs_signal2_write,
1256 	.mmap = spufs_signal2_mmap,
1257 	.llseek = no_llseek,
1258 };
1259 
1260 /*
1261  * This is a wrapper around DEFINE_SIMPLE_ATTRIBUTE which does the
1262  * work of acquiring (or not) the SPU context before calling through
1263  * to the actual get routine. The set routine is called directly.
1264  */
1265 #define SPU_ATTR_NOACQUIRE	0
1266 #define SPU_ATTR_ACQUIRE	1
1267 #define SPU_ATTR_ACQUIRE_SAVED	2
1268 
1269 #define DEFINE_SPUFS_ATTRIBUTE(__name, __get, __set, __fmt, __acquire)	\
1270 static int __##__get(void *data, u64 *val)				\
1271 {									\
1272 	struct spu_context *ctx = data;					\
1273 	int ret = 0;							\
1274 									\
1275 	if (__acquire == SPU_ATTR_ACQUIRE) {				\
1276 		ret = spu_acquire(ctx);					\
1277 		if (ret)						\
1278 			return ret;					\
1279 		*val = __get(ctx);					\
1280 		spu_release(ctx);					\
1281 	} else if (__acquire == SPU_ATTR_ACQUIRE_SAVED)	{		\
1282 		ret = spu_acquire_saved(ctx);				\
1283 		if (ret)						\
1284 			return ret;					\
1285 		*val = __get(ctx);					\
1286 		spu_release_saved(ctx);					\
1287 	} else								\
1288 		*val = __get(ctx);					\
1289 									\
1290 	return 0;							\
1291 }									\
1292 DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__name, __##__get, __set, __fmt);
1293 
1294 static int spufs_signal1_type_set(void *data, u64 val)
1295 {
1296 	struct spu_context *ctx = data;
1297 	int ret;
1298 
1299 	ret = spu_acquire(ctx);
1300 	if (ret)
1301 		return ret;
1302 	ctx->ops->signal1_type_set(ctx, val);
1303 	spu_release(ctx);
1304 
1305 	return 0;
1306 }
1307 
1308 static u64 spufs_signal1_type_get(struct spu_context *ctx)
1309 {
1310 	return ctx->ops->signal1_type_get(ctx);
1311 }
1312 DEFINE_SPUFS_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get,
1313 		       spufs_signal1_type_set, "%llu\n", SPU_ATTR_ACQUIRE);
1314 
1315 
1316 static int spufs_signal2_type_set(void *data, u64 val)
1317 {
1318 	struct spu_context *ctx = data;
1319 	int ret;
1320 
1321 	ret = spu_acquire(ctx);
1322 	if (ret)
1323 		return ret;
1324 	ctx->ops->signal2_type_set(ctx, val);
1325 	spu_release(ctx);
1326 
1327 	return 0;
1328 }
1329 
1330 static u64 spufs_signal2_type_get(struct spu_context *ctx)
1331 {
1332 	return ctx->ops->signal2_type_get(ctx);
1333 }
1334 DEFINE_SPUFS_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get,
1335 		       spufs_signal2_type_set, "%llu\n", SPU_ATTR_ACQUIRE);
1336 
1337 #if SPUFS_MMAP_4K
1338 static int
1339 spufs_mss_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1340 {
1341 	return spufs_ps_fault(vma, vmf, 0x0000, SPUFS_MSS_MAP_SIZE);
1342 }
1343 
1344 static const struct vm_operations_struct spufs_mss_mmap_vmops = {
1345 	.fault = spufs_mss_mmap_fault,
1346 };
1347 
1348 /*
1349  * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1350  */
1351 static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
1352 {
1353 	if (!(vma->vm_flags & VM_SHARED))
1354 		return -EINVAL;
1355 
1356 	vma->vm_flags |= VM_IO | VM_PFNMAP;
1357 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1358 
1359 	vma->vm_ops = &spufs_mss_mmap_vmops;
1360 	return 0;
1361 }
1362 #else /* SPUFS_MMAP_4K */
1363 #define spufs_mss_mmap NULL
1364 #endif /* !SPUFS_MMAP_4K */
1365 
1366 static int spufs_mss_open(struct inode *inode, struct file *file)
1367 {
1368 	struct spufs_inode_info *i = SPUFS_I(inode);
1369 	struct spu_context *ctx = i->i_ctx;
1370 
1371 	file->private_data = i->i_ctx;
1372 
1373 	mutex_lock(&ctx->mapping_lock);
1374 	if (!i->i_openers++)
1375 		ctx->mss = inode->i_mapping;
1376 	mutex_unlock(&ctx->mapping_lock);
1377 	return nonseekable_open(inode, file);
1378 }
1379 
1380 static int
1381 spufs_mss_release(struct inode *inode, struct file *file)
1382 {
1383 	struct spufs_inode_info *i = SPUFS_I(inode);
1384 	struct spu_context *ctx = i->i_ctx;
1385 
1386 	mutex_lock(&ctx->mapping_lock);
1387 	if (!--i->i_openers)
1388 		ctx->mss = NULL;
1389 	mutex_unlock(&ctx->mapping_lock);
1390 	return 0;
1391 }
1392 
1393 static const struct file_operations spufs_mss_fops = {
1394 	.open	 = spufs_mss_open,
1395 	.release = spufs_mss_release,
1396 	.mmap	 = spufs_mss_mmap,
1397 	.llseek  = no_llseek,
1398 };
1399 
1400 static int
1401 spufs_psmap_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1402 {
1403 	return spufs_ps_fault(vma, vmf, 0x0000, SPUFS_PS_MAP_SIZE);
1404 }
1405 
1406 static const struct vm_operations_struct spufs_psmap_mmap_vmops = {
1407 	.fault = spufs_psmap_mmap_fault,
1408 };
1409 
1410 /*
1411  * mmap support for full problem state area [0x00000 - 0x1ffff].
1412  */
1413 static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma)
1414 {
1415 	if (!(vma->vm_flags & VM_SHARED))
1416 		return -EINVAL;
1417 
1418 	vma->vm_flags |= VM_IO | VM_PFNMAP;
1419 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1420 
1421 	vma->vm_ops = &spufs_psmap_mmap_vmops;
1422 	return 0;
1423 }
1424 
1425 static int spufs_psmap_open(struct inode *inode, struct file *file)
1426 {
1427 	struct spufs_inode_info *i = SPUFS_I(inode);
1428 	struct spu_context *ctx = i->i_ctx;
1429 
1430 	mutex_lock(&ctx->mapping_lock);
1431 	file->private_data = i->i_ctx;
1432 	if (!i->i_openers++)
1433 		ctx->psmap = inode->i_mapping;
1434 	mutex_unlock(&ctx->mapping_lock);
1435 	return nonseekable_open(inode, file);
1436 }
1437 
1438 static int
1439 spufs_psmap_release(struct inode *inode, struct file *file)
1440 {
1441 	struct spufs_inode_info *i = SPUFS_I(inode);
1442 	struct spu_context *ctx = i->i_ctx;
1443 
1444 	mutex_lock(&ctx->mapping_lock);
1445 	if (!--i->i_openers)
1446 		ctx->psmap = NULL;
1447 	mutex_unlock(&ctx->mapping_lock);
1448 	return 0;
1449 }
1450 
1451 static const struct file_operations spufs_psmap_fops = {
1452 	.open	 = spufs_psmap_open,
1453 	.release = spufs_psmap_release,
1454 	.mmap	 = spufs_psmap_mmap,
1455 	.llseek  = no_llseek,
1456 };
1457 
1458 
1459 #if SPUFS_MMAP_4K
1460 static int
1461 spufs_mfc_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1462 {
1463 	return spufs_ps_fault(vma, vmf, 0x3000, SPUFS_MFC_MAP_SIZE);
1464 }
1465 
1466 static const struct vm_operations_struct spufs_mfc_mmap_vmops = {
1467 	.fault = spufs_mfc_mmap_fault,
1468 };
1469 
1470 /*
1471  * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1472  */
1473 static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
1474 {
1475 	if (!(vma->vm_flags & VM_SHARED))
1476 		return -EINVAL;
1477 
1478 	vma->vm_flags |= VM_IO | VM_PFNMAP;
1479 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1480 
1481 	vma->vm_ops = &spufs_mfc_mmap_vmops;
1482 	return 0;
1483 }
1484 #else /* SPUFS_MMAP_4K */
1485 #define spufs_mfc_mmap NULL
1486 #endif /* !SPUFS_MMAP_4K */
1487 
1488 static int spufs_mfc_open(struct inode *inode, struct file *file)
1489 {
1490 	struct spufs_inode_info *i = SPUFS_I(inode);
1491 	struct spu_context *ctx = i->i_ctx;
1492 
1493 	/* we don't want to deal with DMA into other processes */
1494 	if (ctx->owner != current->mm)
1495 		return -EINVAL;
1496 
1497 	if (atomic_read(&inode->i_count) != 1)
1498 		return -EBUSY;
1499 
1500 	mutex_lock(&ctx->mapping_lock);
1501 	file->private_data = ctx;
1502 	if (!i->i_openers++)
1503 		ctx->mfc = inode->i_mapping;
1504 	mutex_unlock(&ctx->mapping_lock);
1505 	return nonseekable_open(inode, file);
1506 }
1507 
1508 static int
1509 spufs_mfc_release(struct inode *inode, struct file *file)
1510 {
1511 	struct spufs_inode_info *i = SPUFS_I(inode);
1512 	struct spu_context *ctx = i->i_ctx;
1513 
1514 	mutex_lock(&ctx->mapping_lock);
1515 	if (!--i->i_openers)
1516 		ctx->mfc = NULL;
1517 	mutex_unlock(&ctx->mapping_lock);
1518 	return 0;
1519 }
1520 
1521 /* interrupt-level mfc callback function. */
1522 void spufs_mfc_callback(struct spu *spu)
1523 {
1524 	struct spu_context *ctx = spu->ctx;
1525 
1526 	if (!ctx)
1527 		return;
1528 
1529 	wake_up_all(&ctx->mfc_wq);
1530 
1531 	pr_debug("%s %s\n", __func__, spu->name);
1532 	if (ctx->mfc_fasync) {
1533 		u32 free_elements, tagstatus;
1534 		unsigned int mask;
1535 
1536 		/* no need for spu_acquire in interrupt context */
1537 		free_elements = ctx->ops->get_mfc_free_elements(ctx);
1538 		tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1539 
1540 		mask = 0;
1541 		if (free_elements & 0xffff)
1542 			mask |= POLLOUT;
1543 		if (tagstatus & ctx->tagwait)
1544 			mask |= POLLIN;
1545 
1546 		kill_fasync(&ctx->mfc_fasync, SIGIO, mask);
1547 	}
1548 }
1549 
1550 static int spufs_read_mfc_tagstatus(struct spu_context *ctx, u32 *status)
1551 {
1552 	/* See if there is one tag group is complete */
1553 	/* FIXME we need locking around tagwait */
1554 	*status = ctx->ops->read_mfc_tagstatus(ctx) & ctx->tagwait;
1555 	ctx->tagwait &= ~*status;
1556 	if (*status)
1557 		return 1;
1558 
1559 	/* enable interrupt waiting for any tag group,
1560 	   may silently fail if interrupts are already enabled */
1561 	ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1562 	return 0;
1563 }
1564 
1565 static ssize_t spufs_mfc_read(struct file *file, char __user *buffer,
1566 			size_t size, loff_t *pos)
1567 {
1568 	struct spu_context *ctx = file->private_data;
1569 	int ret = -EINVAL;
1570 	u32 status;
1571 
1572 	if (size != 4)
1573 		goto out;
1574 
1575 	ret = spu_acquire(ctx);
1576 	if (ret)
1577 		return ret;
1578 
1579 	ret = -EINVAL;
1580 	if (file->f_flags & O_NONBLOCK) {
1581 		status = ctx->ops->read_mfc_tagstatus(ctx);
1582 		if (!(status & ctx->tagwait))
1583 			ret = -EAGAIN;
1584 		else
1585 			/* XXX(hch): shouldn't we clear ret here? */
1586 			ctx->tagwait &= ~status;
1587 	} else {
1588 		ret = spufs_wait(ctx->mfc_wq,
1589 			   spufs_read_mfc_tagstatus(ctx, &status));
1590 		if (ret)
1591 			goto out;
1592 	}
1593 	spu_release(ctx);
1594 
1595 	ret = 4;
1596 	if (copy_to_user(buffer, &status, 4))
1597 		ret = -EFAULT;
1598 
1599 out:
1600 	return ret;
1601 }
1602 
1603 static int spufs_check_valid_dma(struct mfc_dma_command *cmd)
1604 {
1605 	pr_debug("queueing DMA %x %llx %x %x %x\n", cmd->lsa,
1606 		 cmd->ea, cmd->size, cmd->tag, cmd->cmd);
1607 
1608 	switch (cmd->cmd) {
1609 	case MFC_PUT_CMD:
1610 	case MFC_PUTF_CMD:
1611 	case MFC_PUTB_CMD:
1612 	case MFC_GET_CMD:
1613 	case MFC_GETF_CMD:
1614 	case MFC_GETB_CMD:
1615 		break;
1616 	default:
1617 		pr_debug("invalid DMA opcode %x\n", cmd->cmd);
1618 		return -EIO;
1619 	}
1620 
1621 	if ((cmd->lsa & 0xf) != (cmd->ea &0xf)) {
1622 		pr_debug("invalid DMA alignment, ea %llx lsa %x\n",
1623 				cmd->ea, cmd->lsa);
1624 		return -EIO;
1625 	}
1626 
1627 	switch (cmd->size & 0xf) {
1628 	case 1:
1629 		break;
1630 	case 2:
1631 		if (cmd->lsa & 1)
1632 			goto error;
1633 		break;
1634 	case 4:
1635 		if (cmd->lsa & 3)
1636 			goto error;
1637 		break;
1638 	case 8:
1639 		if (cmd->lsa & 7)
1640 			goto error;
1641 		break;
1642 	case 0:
1643 		if (cmd->lsa & 15)
1644 			goto error;
1645 		break;
1646 	error:
1647 	default:
1648 		pr_debug("invalid DMA alignment %x for size %x\n",
1649 			cmd->lsa & 0xf, cmd->size);
1650 		return -EIO;
1651 	}
1652 
1653 	if (cmd->size > 16 * 1024) {
1654 		pr_debug("invalid DMA size %x\n", cmd->size);
1655 		return -EIO;
1656 	}
1657 
1658 	if (cmd->tag & 0xfff0) {
1659 		/* we reserve the higher tag numbers for kernel use */
1660 		pr_debug("invalid DMA tag\n");
1661 		return -EIO;
1662 	}
1663 
1664 	if (cmd->class) {
1665 		/* not supported in this version */
1666 		pr_debug("invalid DMA class\n");
1667 		return -EIO;
1668 	}
1669 
1670 	return 0;
1671 }
1672 
1673 static int spu_send_mfc_command(struct spu_context *ctx,
1674 				struct mfc_dma_command cmd,
1675 				int *error)
1676 {
1677 	*error = ctx->ops->send_mfc_command(ctx, &cmd);
1678 	if (*error == -EAGAIN) {
1679 		/* wait for any tag group to complete
1680 		   so we have space for the new command */
1681 		ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1682 		/* try again, because the queue might be
1683 		   empty again */
1684 		*error = ctx->ops->send_mfc_command(ctx, &cmd);
1685 		if (*error == -EAGAIN)
1686 			return 0;
1687 	}
1688 	return 1;
1689 }
1690 
1691 static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,
1692 			size_t size, loff_t *pos)
1693 {
1694 	struct spu_context *ctx = file->private_data;
1695 	struct mfc_dma_command cmd;
1696 	int ret = -EINVAL;
1697 
1698 	if (size != sizeof cmd)
1699 		goto out;
1700 
1701 	ret = -EFAULT;
1702 	if (copy_from_user(&cmd, buffer, sizeof cmd))
1703 		goto out;
1704 
1705 	ret = spufs_check_valid_dma(&cmd);
1706 	if (ret)
1707 		goto out;
1708 
1709 	ret = spu_acquire(ctx);
1710 	if (ret)
1711 		goto out;
1712 
1713 	ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
1714 	if (ret)
1715 		goto out;
1716 
1717 	if (file->f_flags & O_NONBLOCK) {
1718 		ret = ctx->ops->send_mfc_command(ctx, &cmd);
1719 	} else {
1720 		int status;
1721 		ret = spufs_wait(ctx->mfc_wq,
1722 				 spu_send_mfc_command(ctx, cmd, &status));
1723 		if (ret)
1724 			goto out;
1725 		if (status)
1726 			ret = status;
1727 	}
1728 
1729 	if (ret)
1730 		goto out_unlock;
1731 
1732 	ctx->tagwait |= 1 << cmd.tag;
1733 	ret = size;
1734 
1735 out_unlock:
1736 	spu_release(ctx);
1737 out:
1738 	return ret;
1739 }
1740 
1741 static unsigned int spufs_mfc_poll(struct file *file,poll_table *wait)
1742 {
1743 	struct spu_context *ctx = file->private_data;
1744 	u32 free_elements, tagstatus;
1745 	unsigned int mask;
1746 
1747 	poll_wait(file, &ctx->mfc_wq, wait);
1748 
1749 	/*
1750 	 * For now keep this uninterruptible and also ignore the rule
1751 	 * that poll should not sleep.  Will be fixed later.
1752 	 */
1753 	mutex_lock(&ctx->state_mutex);
1754 	ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2);
1755 	free_elements = ctx->ops->get_mfc_free_elements(ctx);
1756 	tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1757 	spu_release(ctx);
1758 
1759 	mask = 0;
1760 	if (free_elements & 0xffff)
1761 		mask |= POLLOUT | POLLWRNORM;
1762 	if (tagstatus & ctx->tagwait)
1763 		mask |= POLLIN | POLLRDNORM;
1764 
1765 	pr_debug("%s: free %d tagstatus %d tagwait %d\n", __func__,
1766 		free_elements, tagstatus, ctx->tagwait);
1767 
1768 	return mask;
1769 }
1770 
1771 static int spufs_mfc_flush(struct file *file, fl_owner_t id)
1772 {
1773 	struct spu_context *ctx = file->private_data;
1774 	int ret;
1775 
1776 	ret = spu_acquire(ctx);
1777 	if (ret)
1778 		goto out;
1779 #if 0
1780 /* this currently hangs */
1781 	ret = spufs_wait(ctx->mfc_wq,
1782 			 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2));
1783 	if (ret)
1784 		goto out;
1785 	ret = spufs_wait(ctx->mfc_wq,
1786 			 ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait);
1787 	if (ret)
1788 		goto out;
1789 #else
1790 	ret = 0;
1791 #endif
1792 	spu_release(ctx);
1793 out:
1794 	return ret;
1795 }
1796 
1797 static int spufs_mfc_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1798 {
1799 	struct inode *inode = file_inode(file);
1800 	int err = filemap_write_and_wait_range(inode->i_mapping, start, end);
1801 	if (!err) {
1802 		inode_lock(inode);
1803 		err = spufs_mfc_flush(file, NULL);
1804 		inode_unlock(inode);
1805 	}
1806 	return err;
1807 }
1808 
1809 static int spufs_mfc_fasync(int fd, struct file *file, int on)
1810 {
1811 	struct spu_context *ctx = file->private_data;
1812 
1813 	return fasync_helper(fd, file, on, &ctx->mfc_fasync);
1814 }
1815 
1816 static const struct file_operations spufs_mfc_fops = {
1817 	.open	 = spufs_mfc_open,
1818 	.release = spufs_mfc_release,
1819 	.read	 = spufs_mfc_read,
1820 	.write	 = spufs_mfc_write,
1821 	.poll	 = spufs_mfc_poll,
1822 	.flush	 = spufs_mfc_flush,
1823 	.fsync	 = spufs_mfc_fsync,
1824 	.fasync	 = spufs_mfc_fasync,
1825 	.mmap	 = spufs_mfc_mmap,
1826 	.llseek  = no_llseek,
1827 };
1828 
1829 static int spufs_npc_set(void *data, u64 val)
1830 {
1831 	struct spu_context *ctx = data;
1832 	int ret;
1833 
1834 	ret = spu_acquire(ctx);
1835 	if (ret)
1836 		return ret;
1837 	ctx->ops->npc_write(ctx, val);
1838 	spu_release(ctx);
1839 
1840 	return 0;
1841 }
1842 
1843 static u64 spufs_npc_get(struct spu_context *ctx)
1844 {
1845 	return ctx->ops->npc_read(ctx);
1846 }
1847 DEFINE_SPUFS_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set,
1848 		       "0x%llx\n", SPU_ATTR_ACQUIRE);
1849 
1850 static int spufs_decr_set(void *data, u64 val)
1851 {
1852 	struct spu_context *ctx = data;
1853 	struct spu_lscsa *lscsa = ctx->csa.lscsa;
1854 	int ret;
1855 
1856 	ret = spu_acquire_saved(ctx);
1857 	if (ret)
1858 		return ret;
1859 	lscsa->decr.slot[0] = (u32) val;
1860 	spu_release_saved(ctx);
1861 
1862 	return 0;
1863 }
1864 
1865 static u64 spufs_decr_get(struct spu_context *ctx)
1866 {
1867 	struct spu_lscsa *lscsa = ctx->csa.lscsa;
1868 	return lscsa->decr.slot[0];
1869 }
1870 DEFINE_SPUFS_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set,
1871 		       "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED);
1872 
1873 static int spufs_decr_status_set(void *data, u64 val)
1874 {
1875 	struct spu_context *ctx = data;
1876 	int ret;
1877 
1878 	ret = spu_acquire_saved(ctx);
1879 	if (ret)
1880 		return ret;
1881 	if (val)
1882 		ctx->csa.priv2.mfc_control_RW |= MFC_CNTL_DECREMENTER_RUNNING;
1883 	else
1884 		ctx->csa.priv2.mfc_control_RW &= ~MFC_CNTL_DECREMENTER_RUNNING;
1885 	spu_release_saved(ctx);
1886 
1887 	return 0;
1888 }
1889 
1890 static u64 spufs_decr_status_get(struct spu_context *ctx)
1891 {
1892 	if (ctx->csa.priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING)
1893 		return SPU_DECR_STATUS_RUNNING;
1894 	else
1895 		return 0;
1896 }
1897 DEFINE_SPUFS_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get,
1898 		       spufs_decr_status_set, "0x%llx\n",
1899 		       SPU_ATTR_ACQUIRE_SAVED);
1900 
1901 static int spufs_event_mask_set(void *data, u64 val)
1902 {
1903 	struct spu_context *ctx = data;
1904 	struct spu_lscsa *lscsa = ctx->csa.lscsa;
1905 	int ret;
1906 
1907 	ret = spu_acquire_saved(ctx);
1908 	if (ret)
1909 		return ret;
1910 	lscsa->event_mask.slot[0] = (u32) val;
1911 	spu_release_saved(ctx);
1912 
1913 	return 0;
1914 }
1915 
1916 static u64 spufs_event_mask_get(struct spu_context *ctx)
1917 {
1918 	struct spu_lscsa *lscsa = ctx->csa.lscsa;
1919 	return lscsa->event_mask.slot[0];
1920 }
1921 
1922 DEFINE_SPUFS_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get,
1923 		       spufs_event_mask_set, "0x%llx\n",
1924 		       SPU_ATTR_ACQUIRE_SAVED);
1925 
1926 static u64 spufs_event_status_get(struct spu_context *ctx)
1927 {
1928 	struct spu_state *state = &ctx->csa;
1929 	u64 stat;
1930 	stat = state->spu_chnlcnt_RW[0];
1931 	if (stat)
1932 		return state->spu_chnldata_RW[0];
1933 	return 0;
1934 }
1935 DEFINE_SPUFS_ATTRIBUTE(spufs_event_status_ops, spufs_event_status_get,
1936 		       NULL, "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED)
1937 
1938 static int spufs_srr0_set(void *data, u64 val)
1939 {
1940 	struct spu_context *ctx = data;
1941 	struct spu_lscsa *lscsa = ctx->csa.lscsa;
1942 	int ret;
1943 
1944 	ret = spu_acquire_saved(ctx);
1945 	if (ret)
1946 		return ret;
1947 	lscsa->srr0.slot[0] = (u32) val;
1948 	spu_release_saved(ctx);
1949 
1950 	return 0;
1951 }
1952 
1953 static u64 spufs_srr0_get(struct spu_context *ctx)
1954 {
1955 	struct spu_lscsa *lscsa = ctx->csa.lscsa;
1956 	return lscsa->srr0.slot[0];
1957 }
1958 DEFINE_SPUFS_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set,
1959 		       "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED)
1960 
1961 static u64 spufs_id_get(struct spu_context *ctx)
1962 {
1963 	u64 num;
1964 
1965 	if (ctx->state == SPU_STATE_RUNNABLE)
1966 		num = ctx->spu->number;
1967 	else
1968 		num = (unsigned int)-1;
1969 
1970 	return num;
1971 }
1972 DEFINE_SPUFS_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n",
1973 		       SPU_ATTR_ACQUIRE)
1974 
1975 static u64 spufs_object_id_get(struct spu_context *ctx)
1976 {
1977 	/* FIXME: Should there really be no locking here? */
1978 	return ctx->object_id;
1979 }
1980 
1981 static int spufs_object_id_set(void *data, u64 id)
1982 {
1983 	struct spu_context *ctx = data;
1984 	ctx->object_id = id;
1985 
1986 	return 0;
1987 }
1988 
1989 DEFINE_SPUFS_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get,
1990 		       spufs_object_id_set, "0x%llx\n", SPU_ATTR_NOACQUIRE);
1991 
1992 static u64 spufs_lslr_get(struct spu_context *ctx)
1993 {
1994 	return ctx->csa.priv2.spu_lslr_RW;
1995 }
1996 DEFINE_SPUFS_ATTRIBUTE(spufs_lslr_ops, spufs_lslr_get, NULL, "0x%llx\n",
1997 		       SPU_ATTR_ACQUIRE_SAVED);
1998 
1999 static int spufs_info_open(struct inode *inode, struct file *file)
2000 {
2001 	struct spufs_inode_info *i = SPUFS_I(inode);
2002 	struct spu_context *ctx = i->i_ctx;
2003 	file->private_data = ctx;
2004 	return 0;
2005 }
2006 
2007 static int spufs_caps_show(struct seq_file *s, void *private)
2008 {
2009 	struct spu_context *ctx = s->private;
2010 
2011 	if (!(ctx->flags & SPU_CREATE_NOSCHED))
2012 		seq_puts(s, "sched\n");
2013 	if (!(ctx->flags & SPU_CREATE_ISOLATE))
2014 		seq_puts(s, "step\n");
2015 	return 0;
2016 }
2017 
2018 static int spufs_caps_open(struct inode *inode, struct file *file)
2019 {
2020 	return single_open(file, spufs_caps_show, SPUFS_I(inode)->i_ctx);
2021 }
2022 
2023 static const struct file_operations spufs_caps_fops = {
2024 	.open		= spufs_caps_open,
2025 	.read		= seq_read,
2026 	.llseek		= seq_lseek,
2027 	.release	= single_release,
2028 };
2029 
2030 static ssize_t __spufs_mbox_info_read(struct spu_context *ctx,
2031 			char __user *buf, size_t len, loff_t *pos)
2032 {
2033 	u32 data;
2034 
2035 	/* EOF if there's no entry in the mbox */
2036 	if (!(ctx->csa.prob.mb_stat_R & 0x0000ff))
2037 		return 0;
2038 
2039 	data = ctx->csa.prob.pu_mb_R;
2040 
2041 	return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
2042 }
2043 
2044 static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf,
2045 				   size_t len, loff_t *pos)
2046 {
2047 	int ret;
2048 	struct spu_context *ctx = file->private_data;
2049 
2050 	if (!access_ok(VERIFY_WRITE, buf, len))
2051 		return -EFAULT;
2052 
2053 	ret = spu_acquire_saved(ctx);
2054 	if (ret)
2055 		return ret;
2056 	spin_lock(&ctx->csa.register_lock);
2057 	ret = __spufs_mbox_info_read(ctx, buf, len, pos);
2058 	spin_unlock(&ctx->csa.register_lock);
2059 	spu_release_saved(ctx);
2060 
2061 	return ret;
2062 }
2063 
2064 static const struct file_operations spufs_mbox_info_fops = {
2065 	.open = spufs_info_open,
2066 	.read = spufs_mbox_info_read,
2067 	.llseek  = generic_file_llseek,
2068 };
2069 
2070 static ssize_t __spufs_ibox_info_read(struct spu_context *ctx,
2071 				char __user *buf, size_t len, loff_t *pos)
2072 {
2073 	u32 data;
2074 
2075 	/* EOF if there's no entry in the ibox */
2076 	if (!(ctx->csa.prob.mb_stat_R & 0xff0000))
2077 		return 0;
2078 
2079 	data = ctx->csa.priv2.puint_mb_R;
2080 
2081 	return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
2082 }
2083 
2084 static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf,
2085 				   size_t len, loff_t *pos)
2086 {
2087 	struct spu_context *ctx = file->private_data;
2088 	int ret;
2089 
2090 	if (!access_ok(VERIFY_WRITE, buf, len))
2091 		return -EFAULT;
2092 
2093 	ret = spu_acquire_saved(ctx);
2094 	if (ret)
2095 		return ret;
2096 	spin_lock(&ctx->csa.register_lock);
2097 	ret = __spufs_ibox_info_read(ctx, buf, len, pos);
2098 	spin_unlock(&ctx->csa.register_lock);
2099 	spu_release_saved(ctx);
2100 
2101 	return ret;
2102 }
2103 
2104 static const struct file_operations spufs_ibox_info_fops = {
2105 	.open = spufs_info_open,
2106 	.read = spufs_ibox_info_read,
2107 	.llseek  = generic_file_llseek,
2108 };
2109 
2110 static ssize_t __spufs_wbox_info_read(struct spu_context *ctx,
2111 			char __user *buf, size_t len, loff_t *pos)
2112 {
2113 	int i, cnt;
2114 	u32 data[4];
2115 	u32 wbox_stat;
2116 
2117 	wbox_stat = ctx->csa.prob.mb_stat_R;
2118 	cnt = 4 - ((wbox_stat & 0x00ff00) >> 8);
2119 	for (i = 0; i < cnt; i++) {
2120 		data[i] = ctx->csa.spu_mailbox_data[i];
2121 	}
2122 
2123 	return simple_read_from_buffer(buf, len, pos, &data,
2124 				cnt * sizeof(u32));
2125 }
2126 
2127 static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf,
2128 				   size_t len, loff_t *pos)
2129 {
2130 	struct spu_context *ctx = file->private_data;
2131 	int ret;
2132 
2133 	if (!access_ok(VERIFY_WRITE, buf, len))
2134 		return -EFAULT;
2135 
2136 	ret = spu_acquire_saved(ctx);
2137 	if (ret)
2138 		return ret;
2139 	spin_lock(&ctx->csa.register_lock);
2140 	ret = __spufs_wbox_info_read(ctx, buf, len, pos);
2141 	spin_unlock(&ctx->csa.register_lock);
2142 	spu_release_saved(ctx);
2143 
2144 	return ret;
2145 }
2146 
2147 static const struct file_operations spufs_wbox_info_fops = {
2148 	.open = spufs_info_open,
2149 	.read = spufs_wbox_info_read,
2150 	.llseek  = generic_file_llseek,
2151 };
2152 
2153 static ssize_t __spufs_dma_info_read(struct spu_context *ctx,
2154 			char __user *buf, size_t len, loff_t *pos)
2155 {
2156 	struct spu_dma_info info;
2157 	struct mfc_cq_sr *qp, *spuqp;
2158 	int i;
2159 
2160 	info.dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW;
2161 	info.dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0];
2162 	info.dma_info_status = ctx->csa.spu_chnldata_RW[24];
2163 	info.dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25];
2164 	info.dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27];
2165 	for (i = 0; i < 16; i++) {
2166 		qp = &info.dma_info_command_data[i];
2167 		spuqp = &ctx->csa.priv2.spuq[i];
2168 
2169 		qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW;
2170 		qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW;
2171 		qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW;
2172 		qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW;
2173 	}
2174 
2175 	return simple_read_from_buffer(buf, len, pos, &info,
2176 				sizeof info);
2177 }
2178 
2179 static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
2180 			      size_t len, loff_t *pos)
2181 {
2182 	struct spu_context *ctx = file->private_data;
2183 	int ret;
2184 
2185 	if (!access_ok(VERIFY_WRITE, buf, len))
2186 		return -EFAULT;
2187 
2188 	ret = spu_acquire_saved(ctx);
2189 	if (ret)
2190 		return ret;
2191 	spin_lock(&ctx->csa.register_lock);
2192 	ret = __spufs_dma_info_read(ctx, buf, len, pos);
2193 	spin_unlock(&ctx->csa.register_lock);
2194 	spu_release_saved(ctx);
2195 
2196 	return ret;
2197 }
2198 
2199 static const struct file_operations spufs_dma_info_fops = {
2200 	.open = spufs_info_open,
2201 	.read = spufs_dma_info_read,
2202 	.llseek = no_llseek,
2203 };
2204 
2205 static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx,
2206 			char __user *buf, size_t len, loff_t *pos)
2207 {
2208 	struct spu_proxydma_info info;
2209 	struct mfc_cq_sr *qp, *puqp;
2210 	int ret = sizeof info;
2211 	int i;
2212 
2213 	if (len < ret)
2214 		return -EINVAL;
2215 
2216 	if (!access_ok(VERIFY_WRITE, buf, len))
2217 		return -EFAULT;
2218 
2219 	info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW;
2220 	info.proxydma_info_mask = ctx->csa.prob.dma_querymask_RW;
2221 	info.proxydma_info_status = ctx->csa.prob.dma_tagstatus_R;
2222 	for (i = 0; i < 8; i++) {
2223 		qp = &info.proxydma_info_command_data[i];
2224 		puqp = &ctx->csa.priv2.puq[i];
2225 
2226 		qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW;
2227 		qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW;
2228 		qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW;
2229 		qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW;
2230 	}
2231 
2232 	return simple_read_from_buffer(buf, len, pos, &info,
2233 				sizeof info);
2234 }
2235 
2236 static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf,
2237 				   size_t len, loff_t *pos)
2238 {
2239 	struct spu_context *ctx = file->private_data;
2240 	int ret;
2241 
2242 	ret = spu_acquire_saved(ctx);
2243 	if (ret)
2244 		return ret;
2245 	spin_lock(&ctx->csa.register_lock);
2246 	ret = __spufs_proxydma_info_read(ctx, buf, len, pos);
2247 	spin_unlock(&ctx->csa.register_lock);
2248 	spu_release_saved(ctx);
2249 
2250 	return ret;
2251 }
2252 
2253 static const struct file_operations spufs_proxydma_info_fops = {
2254 	.open = spufs_info_open,
2255 	.read = spufs_proxydma_info_read,
2256 	.llseek = no_llseek,
2257 };
2258 
2259 static int spufs_show_tid(struct seq_file *s, void *private)
2260 {
2261 	struct spu_context *ctx = s->private;
2262 
2263 	seq_printf(s, "%d\n", ctx->tid);
2264 	return 0;
2265 }
2266 
2267 static int spufs_tid_open(struct inode *inode, struct file *file)
2268 {
2269 	return single_open(file, spufs_show_tid, SPUFS_I(inode)->i_ctx);
2270 }
2271 
2272 static const struct file_operations spufs_tid_fops = {
2273 	.open		= spufs_tid_open,
2274 	.read		= seq_read,
2275 	.llseek		= seq_lseek,
2276 	.release	= single_release,
2277 };
2278 
2279 static const char *ctx_state_names[] = {
2280 	"user", "system", "iowait", "loaded"
2281 };
2282 
2283 static unsigned long long spufs_acct_time(struct spu_context *ctx,
2284 		enum spu_utilization_state state)
2285 {
2286 	unsigned long long time = ctx->stats.times[state];
2287 
2288 	/*
2289 	 * In general, utilization statistics are updated by the controlling
2290 	 * thread as the spu context moves through various well defined
2291 	 * state transitions, but if the context is lazily loaded its
2292 	 * utilization statistics are not updated as the controlling thread
2293 	 * is not tightly coupled with the execution of the spu context.  We
2294 	 * calculate and apply the time delta from the last recorded state
2295 	 * of the spu context.
2296 	 */
2297 	if (ctx->spu && ctx->stats.util_state == state) {
2298 		time += ktime_get_ns() - ctx->stats.tstamp;
2299 	}
2300 
2301 	return time / NSEC_PER_MSEC;
2302 }
2303 
2304 static unsigned long long spufs_slb_flts(struct spu_context *ctx)
2305 {
2306 	unsigned long long slb_flts = ctx->stats.slb_flt;
2307 
2308 	if (ctx->state == SPU_STATE_RUNNABLE) {
2309 		slb_flts += (ctx->spu->stats.slb_flt -
2310 			     ctx->stats.slb_flt_base);
2311 	}
2312 
2313 	return slb_flts;
2314 }
2315 
2316 static unsigned long long spufs_class2_intrs(struct spu_context *ctx)
2317 {
2318 	unsigned long long class2_intrs = ctx->stats.class2_intr;
2319 
2320 	if (ctx->state == SPU_STATE_RUNNABLE) {
2321 		class2_intrs += (ctx->spu->stats.class2_intr -
2322 				 ctx->stats.class2_intr_base);
2323 	}
2324 
2325 	return class2_intrs;
2326 }
2327 
2328 
2329 static int spufs_show_stat(struct seq_file *s, void *private)
2330 {
2331 	struct spu_context *ctx = s->private;
2332 	int ret;
2333 
2334 	ret = spu_acquire(ctx);
2335 	if (ret)
2336 		return ret;
2337 
2338 	seq_printf(s, "%s %llu %llu %llu %llu "
2339 		      "%llu %llu %llu %llu %llu %llu %llu %llu\n",
2340 		ctx_state_names[ctx->stats.util_state],
2341 		spufs_acct_time(ctx, SPU_UTIL_USER),
2342 		spufs_acct_time(ctx, SPU_UTIL_SYSTEM),
2343 		spufs_acct_time(ctx, SPU_UTIL_IOWAIT),
2344 		spufs_acct_time(ctx, SPU_UTIL_IDLE_LOADED),
2345 		ctx->stats.vol_ctx_switch,
2346 		ctx->stats.invol_ctx_switch,
2347 		spufs_slb_flts(ctx),
2348 		ctx->stats.hash_flt,
2349 		ctx->stats.min_flt,
2350 		ctx->stats.maj_flt,
2351 		spufs_class2_intrs(ctx),
2352 		ctx->stats.libassist);
2353 	spu_release(ctx);
2354 	return 0;
2355 }
2356 
2357 static int spufs_stat_open(struct inode *inode, struct file *file)
2358 {
2359 	return single_open(file, spufs_show_stat, SPUFS_I(inode)->i_ctx);
2360 }
2361 
2362 static const struct file_operations spufs_stat_fops = {
2363 	.open		= spufs_stat_open,
2364 	.read		= seq_read,
2365 	.llseek		= seq_lseek,
2366 	.release	= single_release,
2367 };
2368 
2369 static inline int spufs_switch_log_used(struct spu_context *ctx)
2370 {
2371 	return (ctx->switch_log->head - ctx->switch_log->tail) %
2372 		SWITCH_LOG_BUFSIZE;
2373 }
2374 
2375 static inline int spufs_switch_log_avail(struct spu_context *ctx)
2376 {
2377 	return SWITCH_LOG_BUFSIZE - spufs_switch_log_used(ctx);
2378 }
2379 
2380 static int spufs_switch_log_open(struct inode *inode, struct file *file)
2381 {
2382 	struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
2383 	int rc;
2384 
2385 	rc = spu_acquire(ctx);
2386 	if (rc)
2387 		return rc;
2388 
2389 	if (ctx->switch_log) {
2390 		rc = -EBUSY;
2391 		goto out;
2392 	}
2393 
2394 	ctx->switch_log = kmalloc(sizeof(struct switch_log) +
2395 		SWITCH_LOG_BUFSIZE * sizeof(struct switch_log_entry),
2396 		GFP_KERNEL);
2397 
2398 	if (!ctx->switch_log) {
2399 		rc = -ENOMEM;
2400 		goto out;
2401 	}
2402 
2403 	ctx->switch_log->head = ctx->switch_log->tail = 0;
2404 	init_waitqueue_head(&ctx->switch_log->wait);
2405 	rc = 0;
2406 
2407 out:
2408 	spu_release(ctx);
2409 	return rc;
2410 }
2411 
2412 static int spufs_switch_log_release(struct inode *inode, struct file *file)
2413 {
2414 	struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
2415 	int rc;
2416 
2417 	rc = spu_acquire(ctx);
2418 	if (rc)
2419 		return rc;
2420 
2421 	kfree(ctx->switch_log);
2422 	ctx->switch_log = NULL;
2423 	spu_release(ctx);
2424 
2425 	return 0;
2426 }
2427 
2428 static int switch_log_sprint(struct spu_context *ctx, char *tbuf, int n)
2429 {
2430 	struct switch_log_entry *p;
2431 
2432 	p = ctx->switch_log->log + ctx->switch_log->tail % SWITCH_LOG_BUFSIZE;
2433 
2434 	return snprintf(tbuf, n, "%u.%09u %d %u %u %llu\n",
2435 			(unsigned int) p->tstamp.tv_sec,
2436 			(unsigned int) p->tstamp.tv_nsec,
2437 			p->spu_id,
2438 			(unsigned int) p->type,
2439 			(unsigned int) p->val,
2440 			(unsigned long long) p->timebase);
2441 }
2442 
2443 static ssize_t spufs_switch_log_read(struct file *file, char __user *buf,
2444 			     size_t len, loff_t *ppos)
2445 {
2446 	struct inode *inode = file_inode(file);
2447 	struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
2448 	int error = 0, cnt = 0;
2449 
2450 	if (!buf)
2451 		return -EINVAL;
2452 
2453 	error = spu_acquire(ctx);
2454 	if (error)
2455 		return error;
2456 
2457 	while (cnt < len) {
2458 		char tbuf[128];
2459 		int width;
2460 
2461 		if (spufs_switch_log_used(ctx) == 0) {
2462 			if (cnt > 0) {
2463 				/* If there's data ready to go, we can
2464 				 * just return straight away */
2465 				break;
2466 
2467 			} else if (file->f_flags & O_NONBLOCK) {
2468 				error = -EAGAIN;
2469 				break;
2470 
2471 			} else {
2472 				/* spufs_wait will drop the mutex and
2473 				 * re-acquire, but since we're in read(), the
2474 				 * file cannot be _released (and so
2475 				 * ctx->switch_log is stable).
2476 				 */
2477 				error = spufs_wait(ctx->switch_log->wait,
2478 						spufs_switch_log_used(ctx) > 0);
2479 
2480 				/* On error, spufs_wait returns without the
2481 				 * state mutex held */
2482 				if (error)
2483 					return error;
2484 
2485 				/* We may have had entries read from underneath
2486 				 * us while we dropped the mutex in spufs_wait,
2487 				 * so re-check */
2488 				if (spufs_switch_log_used(ctx) == 0)
2489 					continue;
2490 			}
2491 		}
2492 
2493 		width = switch_log_sprint(ctx, tbuf, sizeof(tbuf));
2494 		if (width < len)
2495 			ctx->switch_log->tail =
2496 				(ctx->switch_log->tail + 1) %
2497 				 SWITCH_LOG_BUFSIZE;
2498 		else
2499 			/* If the record is greater than space available return
2500 			 * partial buffer (so far) */
2501 			break;
2502 
2503 		error = copy_to_user(buf + cnt, tbuf, width);
2504 		if (error)
2505 			break;
2506 		cnt += width;
2507 	}
2508 
2509 	spu_release(ctx);
2510 
2511 	return cnt == 0 ? error : cnt;
2512 }
2513 
2514 static unsigned int spufs_switch_log_poll(struct file *file, poll_table *wait)
2515 {
2516 	struct inode *inode = file_inode(file);
2517 	struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
2518 	unsigned int mask = 0;
2519 	int rc;
2520 
2521 	poll_wait(file, &ctx->switch_log->wait, wait);
2522 
2523 	rc = spu_acquire(ctx);
2524 	if (rc)
2525 		return rc;
2526 
2527 	if (spufs_switch_log_used(ctx) > 0)
2528 		mask |= POLLIN;
2529 
2530 	spu_release(ctx);
2531 
2532 	return mask;
2533 }
2534 
2535 static const struct file_operations spufs_switch_log_fops = {
2536 	.open		= spufs_switch_log_open,
2537 	.read		= spufs_switch_log_read,
2538 	.poll		= spufs_switch_log_poll,
2539 	.release	= spufs_switch_log_release,
2540 	.llseek		= no_llseek,
2541 };
2542 
2543 /**
2544  * Log a context switch event to a switch log reader.
2545  *
2546  * Must be called with ctx->state_mutex held.
2547  */
2548 void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx,
2549 		u32 type, u32 val)
2550 {
2551 	if (!ctx->switch_log)
2552 		return;
2553 
2554 	if (spufs_switch_log_avail(ctx) > 1) {
2555 		struct switch_log_entry *p;
2556 
2557 		p = ctx->switch_log->log + ctx->switch_log->head;
2558 		ktime_get_ts(&p->tstamp);
2559 		p->timebase = get_tb();
2560 		p->spu_id = spu ? spu->number : -1;
2561 		p->type = type;
2562 		p->val = val;
2563 
2564 		ctx->switch_log->head =
2565 			(ctx->switch_log->head + 1) % SWITCH_LOG_BUFSIZE;
2566 	}
2567 
2568 	wake_up(&ctx->switch_log->wait);
2569 }
2570 
2571 static int spufs_show_ctx(struct seq_file *s, void *private)
2572 {
2573 	struct spu_context *ctx = s->private;
2574 	u64 mfc_control_RW;
2575 
2576 	mutex_lock(&ctx->state_mutex);
2577 	if (ctx->spu) {
2578 		struct spu *spu = ctx->spu;
2579 		struct spu_priv2 __iomem *priv2 = spu->priv2;
2580 
2581 		spin_lock_irq(&spu->register_lock);
2582 		mfc_control_RW = in_be64(&priv2->mfc_control_RW);
2583 		spin_unlock_irq(&spu->register_lock);
2584 	} else {
2585 		struct spu_state *csa = &ctx->csa;
2586 
2587 		mfc_control_RW = csa->priv2.mfc_control_RW;
2588 	}
2589 
2590 	seq_printf(s, "%c flgs(%lx) sflgs(%lx) pri(%d) ts(%d) spu(%02d)"
2591 		" %c %llx %llx %llx %llx %x %x\n",
2592 		ctx->state == SPU_STATE_SAVED ? 'S' : 'R',
2593 		ctx->flags,
2594 		ctx->sched_flags,
2595 		ctx->prio,
2596 		ctx->time_slice,
2597 		ctx->spu ? ctx->spu->number : -1,
2598 		!list_empty(&ctx->rq) ? 'q' : ' ',
2599 		ctx->csa.class_0_pending,
2600 		ctx->csa.class_0_dar,
2601 		ctx->csa.class_1_dsisr,
2602 		mfc_control_RW,
2603 		ctx->ops->runcntl_read(ctx),
2604 		ctx->ops->status_read(ctx));
2605 
2606 	mutex_unlock(&ctx->state_mutex);
2607 
2608 	return 0;
2609 }
2610 
2611 static int spufs_ctx_open(struct inode *inode, struct file *file)
2612 {
2613 	return single_open(file, spufs_show_ctx, SPUFS_I(inode)->i_ctx);
2614 }
2615 
2616 static const struct file_operations spufs_ctx_fops = {
2617 	.open           = spufs_ctx_open,
2618 	.read           = seq_read,
2619 	.llseek         = seq_lseek,
2620 	.release        = single_release,
2621 };
2622 
2623 const struct spufs_tree_descr spufs_dir_contents[] = {
2624 	{ "capabilities", &spufs_caps_fops, 0444, },
2625 	{ "mem",  &spufs_mem_fops,  0666, LS_SIZE, },
2626 	{ "regs", &spufs_regs_fops,  0666, sizeof(struct spu_reg128[128]), },
2627 	{ "mbox", &spufs_mbox_fops, 0444, },
2628 	{ "ibox", &spufs_ibox_fops, 0444, },
2629 	{ "wbox", &spufs_wbox_fops, 0222, },
2630 	{ "mbox_stat", &spufs_mbox_stat_fops, 0444, sizeof(u32), },
2631 	{ "ibox_stat", &spufs_ibox_stat_fops, 0444, sizeof(u32), },
2632 	{ "wbox_stat", &spufs_wbox_stat_fops, 0444, sizeof(u32), },
2633 	{ "signal1", &spufs_signal1_fops, 0666, },
2634 	{ "signal2", &spufs_signal2_fops, 0666, },
2635 	{ "signal1_type", &spufs_signal1_type, 0666, },
2636 	{ "signal2_type", &spufs_signal2_type, 0666, },
2637 	{ "cntl", &spufs_cntl_fops,  0666, },
2638 	{ "fpcr", &spufs_fpcr_fops, 0666, sizeof(struct spu_reg128), },
2639 	{ "lslr", &spufs_lslr_ops, 0444, },
2640 	{ "mfc", &spufs_mfc_fops, 0666, },
2641 	{ "mss", &spufs_mss_fops, 0666, },
2642 	{ "npc", &spufs_npc_ops, 0666, },
2643 	{ "srr0", &spufs_srr0_ops, 0666, },
2644 	{ "decr", &spufs_decr_ops, 0666, },
2645 	{ "decr_status", &spufs_decr_status_ops, 0666, },
2646 	{ "event_mask", &spufs_event_mask_ops, 0666, },
2647 	{ "event_status", &spufs_event_status_ops, 0444, },
2648 	{ "psmap", &spufs_psmap_fops, 0666, SPUFS_PS_MAP_SIZE, },
2649 	{ "phys-id", &spufs_id_ops, 0666, },
2650 	{ "object-id", &spufs_object_id_ops, 0666, },
2651 	{ "mbox_info", &spufs_mbox_info_fops, 0444, sizeof(u32), },
2652 	{ "ibox_info", &spufs_ibox_info_fops, 0444, sizeof(u32), },
2653 	{ "wbox_info", &spufs_wbox_info_fops, 0444, sizeof(u32), },
2654 	{ "dma_info", &spufs_dma_info_fops, 0444,
2655 		sizeof(struct spu_dma_info), },
2656 	{ "proxydma_info", &spufs_proxydma_info_fops, 0444,
2657 		sizeof(struct spu_proxydma_info)},
2658 	{ "tid", &spufs_tid_fops, 0444, },
2659 	{ "stat", &spufs_stat_fops, 0444, },
2660 	{ "switch_log", &spufs_switch_log_fops, 0444 },
2661 	{},
2662 };
2663 
2664 const struct spufs_tree_descr spufs_dir_nosched_contents[] = {
2665 	{ "capabilities", &spufs_caps_fops, 0444, },
2666 	{ "mem",  &spufs_mem_fops,  0666, LS_SIZE, },
2667 	{ "mbox", &spufs_mbox_fops, 0444, },
2668 	{ "ibox", &spufs_ibox_fops, 0444, },
2669 	{ "wbox", &spufs_wbox_fops, 0222, },
2670 	{ "mbox_stat", &spufs_mbox_stat_fops, 0444, sizeof(u32), },
2671 	{ "ibox_stat", &spufs_ibox_stat_fops, 0444, sizeof(u32), },
2672 	{ "wbox_stat", &spufs_wbox_stat_fops, 0444, sizeof(u32), },
2673 	{ "signal1", &spufs_signal1_nosched_fops, 0222, },
2674 	{ "signal2", &spufs_signal2_nosched_fops, 0222, },
2675 	{ "signal1_type", &spufs_signal1_type, 0666, },
2676 	{ "signal2_type", &spufs_signal2_type, 0666, },
2677 	{ "mss", &spufs_mss_fops, 0666, },
2678 	{ "mfc", &spufs_mfc_fops, 0666, },
2679 	{ "cntl", &spufs_cntl_fops,  0666, },
2680 	{ "npc", &spufs_npc_ops, 0666, },
2681 	{ "psmap", &spufs_psmap_fops, 0666, SPUFS_PS_MAP_SIZE, },
2682 	{ "phys-id", &spufs_id_ops, 0666, },
2683 	{ "object-id", &spufs_object_id_ops, 0666, },
2684 	{ "tid", &spufs_tid_fops, 0444, },
2685 	{ "stat", &spufs_stat_fops, 0444, },
2686 	{},
2687 };
2688 
2689 const struct spufs_tree_descr spufs_dir_debug_contents[] = {
2690 	{ ".ctx", &spufs_ctx_fops, 0444, },
2691 	{},
2692 };
2693 
2694 const struct spufs_coredump_reader spufs_coredump_read[] = {
2695 	{ "regs", __spufs_regs_read, NULL, sizeof(struct spu_reg128[128])},
2696 	{ "fpcr", __spufs_fpcr_read, NULL, sizeof(struct spu_reg128) },
2697 	{ "lslr", NULL, spufs_lslr_get, 19 },
2698 	{ "decr", NULL, spufs_decr_get, 19 },
2699 	{ "decr_status", NULL, spufs_decr_status_get, 19 },
2700 	{ "mem", __spufs_mem_read, NULL, LS_SIZE, },
2701 	{ "signal1", __spufs_signal1_read, NULL, sizeof(u32) },
2702 	{ "signal1_type", NULL, spufs_signal1_type_get, 19 },
2703 	{ "signal2", __spufs_signal2_read, NULL, sizeof(u32) },
2704 	{ "signal2_type", NULL, spufs_signal2_type_get, 19 },
2705 	{ "event_mask", NULL, spufs_event_mask_get, 19 },
2706 	{ "event_status", NULL, spufs_event_status_get, 19 },
2707 	{ "mbox_info", __spufs_mbox_info_read, NULL, sizeof(u32) },
2708 	{ "ibox_info", __spufs_ibox_info_read, NULL, sizeof(u32) },
2709 	{ "wbox_info", __spufs_wbox_info_read, NULL, 4 * sizeof(u32)},
2710 	{ "dma_info", __spufs_dma_info_read, NULL, sizeof(struct spu_dma_info)},
2711 	{ "proxydma_info", __spufs_proxydma_info_read,
2712 			   NULL, sizeof(struct spu_proxydma_info)},
2713 	{ "object-id", NULL, spufs_object_id_get, 19 },
2714 	{ "npc", NULL, spufs_npc_get, 19 },
2715 	{ NULL },
2716 };
2717