1 /*
2  * SPU file system -- file contents
3  *
4  * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5  *
6  * Author: Arnd Bergmann <arndb@de.ibm.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2, or (at your option)
11  * any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  */
22 
23 #undef DEBUG
24 
25 #include <linux/fs.h>
26 #include <linux/ioctl.h>
27 #include <linux/module.h>
28 #include <linux/pagemap.h>
29 #include <linux/poll.h>
30 #include <linux/ptrace.h>
31 #include <linux/seq_file.h>
32 
33 #include <asm/io.h>
34 #include <asm/semaphore.h>
35 #include <asm/spu.h>
36 #include <asm/spu_info.h>
37 #include <asm/uaccess.h>
38 
39 #include "spufs.h"
40 
41 #define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
42 
43 
44 static int
45 spufs_mem_open(struct inode *inode, struct file *file)
46 {
47 	struct spufs_inode_info *i = SPUFS_I(inode);
48 	struct spu_context *ctx = i->i_ctx;
49 
50 	mutex_lock(&ctx->mapping_lock);
51 	file->private_data = ctx;
52 	if (!i->i_openers++)
53 		ctx->local_store = inode->i_mapping;
54 	mutex_unlock(&ctx->mapping_lock);
55 	return 0;
56 }
57 
58 static int
59 spufs_mem_release(struct inode *inode, struct file *file)
60 {
61 	struct spufs_inode_info *i = SPUFS_I(inode);
62 	struct spu_context *ctx = i->i_ctx;
63 
64 	mutex_lock(&ctx->mapping_lock);
65 	if (!--i->i_openers)
66 		ctx->local_store = NULL;
67 	mutex_unlock(&ctx->mapping_lock);
68 	return 0;
69 }
70 
71 static ssize_t
72 __spufs_mem_read(struct spu_context *ctx, char __user *buffer,
73 			size_t size, loff_t *pos)
74 {
75 	char *local_store = ctx->ops->get_ls(ctx);
76 	return simple_read_from_buffer(buffer, size, pos, local_store,
77 					LS_SIZE);
78 }
79 
80 static ssize_t
81 spufs_mem_read(struct file *file, char __user *buffer,
82 				size_t size, loff_t *pos)
83 {
84 	struct spu_context *ctx = file->private_data;
85 	ssize_t ret;
86 
87 	spu_acquire(ctx);
88 	ret = __spufs_mem_read(ctx, buffer, size, pos);
89 	spu_release(ctx);
90 	return ret;
91 }
92 
93 static ssize_t
94 spufs_mem_write(struct file *file, const char __user *buffer,
95 					size_t size, loff_t *ppos)
96 {
97 	struct spu_context *ctx = file->private_data;
98 	char *local_store;
99 	loff_t pos = *ppos;
100 	int ret;
101 
102 	if (pos < 0)
103 		return -EINVAL;
104 	if (pos > LS_SIZE)
105 		return -EFBIG;
106 	if (size > LS_SIZE - pos)
107 		size = LS_SIZE - pos;
108 
109 	spu_acquire(ctx);
110 	local_store = ctx->ops->get_ls(ctx);
111 	ret = copy_from_user(local_store + pos, buffer, size);
112 	spu_release(ctx);
113 
114 	if (ret)
115 		return -EFAULT;
116 	*ppos = pos + size;
117 	return size;
118 }
119 
120 static unsigned long spufs_mem_mmap_nopfn(struct vm_area_struct *vma,
121 					  unsigned long address)
122 {
123 	struct spu_context *ctx	= vma->vm_file->private_data;
124 	unsigned long pfn, offset, addr0 = address;
125 #ifdef CONFIG_SPU_FS_64K_LS
126 	struct spu_state *csa = &ctx->csa;
127 	int psize;
128 
129 	/* Check what page size we are using */
130 	psize = get_slice_psize(vma->vm_mm, address);
131 
132 	/* Some sanity checking */
133 	BUG_ON(csa->use_big_pages != (psize == MMU_PAGE_64K));
134 
135 	/* Wow, 64K, cool, we need to align the address though */
136 	if (csa->use_big_pages) {
137 		BUG_ON(vma->vm_start & 0xffff);
138 		address &= ~0xfffful;
139 	}
140 #endif /* CONFIG_SPU_FS_64K_LS */
141 
142 	offset = (address - vma->vm_start) + (vma->vm_pgoff << PAGE_SHIFT);
143 	if (offset >= LS_SIZE)
144 		return NOPFN_SIGBUS;
145 
146 	pr_debug("spufs_mem_mmap_nopfn address=0x%lx -> 0x%lx, offset=0x%lx\n",
147 		 addr0, address, offset);
148 
149 	spu_acquire(ctx);
150 
151 	if (ctx->state == SPU_STATE_SAVED) {
152 		vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
153 							& ~_PAGE_NO_CACHE);
154 		pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset);
155 	} else {
156 		vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
157 					     | _PAGE_NO_CACHE);
158 		pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT;
159 	}
160 	vm_insert_pfn(vma, address, pfn);
161 
162 	spu_release(ctx);
163 
164 	return NOPFN_REFAULT;
165 }
166 
167 
168 static struct vm_operations_struct spufs_mem_mmap_vmops = {
169 	.nopfn = spufs_mem_mmap_nopfn,
170 };
171 
172 static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
173 {
174 #ifdef CONFIG_SPU_FS_64K_LS
175 	struct spu_context	*ctx = file->private_data;
176 	struct spu_state	*csa = &ctx->csa;
177 
178 	/* Sanity check VMA alignment */
179 	if (csa->use_big_pages) {
180 		pr_debug("spufs_mem_mmap 64K, start=0x%lx, end=0x%lx,"
181 			 " pgoff=0x%lx\n", vma->vm_start, vma->vm_end,
182 			 vma->vm_pgoff);
183 		if (vma->vm_start & 0xffff)
184 			return -EINVAL;
185 		if (vma->vm_pgoff & 0xf)
186 			return -EINVAL;
187 	}
188 #endif /* CONFIG_SPU_FS_64K_LS */
189 
190 	if (!(vma->vm_flags & VM_SHARED))
191 		return -EINVAL;
192 
193 	vma->vm_flags |= VM_IO | VM_PFNMAP;
194 	vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
195 				     | _PAGE_NO_CACHE);
196 
197 	vma->vm_ops = &spufs_mem_mmap_vmops;
198 	return 0;
199 }
200 
201 #ifdef CONFIG_SPU_FS_64K_LS
202 unsigned long spufs_get_unmapped_area(struct file *file, unsigned long addr,
203 				      unsigned long len, unsigned long pgoff,
204 				      unsigned long flags)
205 {
206 	struct spu_context	*ctx = file->private_data;
207 	struct spu_state	*csa = &ctx->csa;
208 
209 	/* If not using big pages, fallback to normal MM g_u_a */
210 	if (!csa->use_big_pages)
211 		return current->mm->get_unmapped_area(file, addr, len,
212 						      pgoff, flags);
213 
214 	/* Else, try to obtain a 64K pages slice */
215 	return slice_get_unmapped_area(addr, len, flags,
216 				       MMU_PAGE_64K, 1, 0);
217 }
218 #endif /* CONFIG_SPU_FS_64K_LS */
219 
220 static const struct file_operations spufs_mem_fops = {
221 	.open			= spufs_mem_open,
222 	.release		= spufs_mem_release,
223 	.read			= spufs_mem_read,
224 	.write			= spufs_mem_write,
225 	.llseek			= generic_file_llseek,
226 	.mmap			= spufs_mem_mmap,
227 #ifdef CONFIG_SPU_FS_64K_LS
228 	.get_unmapped_area	= spufs_get_unmapped_area,
229 #endif
230 };
231 
232 static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
233 				    unsigned long address,
234 				    unsigned long ps_offs,
235 				    unsigned long ps_size)
236 {
237 	struct spu_context *ctx = vma->vm_file->private_data;
238 	unsigned long area, offset = address - vma->vm_start;
239 	int ret;
240 
241 	offset += vma->vm_pgoff << PAGE_SHIFT;
242 	if (offset >= ps_size)
243 		return NOPFN_SIGBUS;
244 
245 	/* error here usually means a signal.. we might want to test
246 	 * the error code more precisely though
247 	 */
248 	ret = spu_acquire_runnable(ctx, 0);
249 	if (ret)
250 		return NOPFN_REFAULT;
251 
252 	area = ctx->spu->problem_phys + ps_offs;
253 	vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT);
254 	spu_release(ctx);
255 
256 	return NOPFN_REFAULT;
257 }
258 
259 #if SPUFS_MMAP_4K
260 static unsigned long spufs_cntl_mmap_nopfn(struct vm_area_struct *vma,
261 					   unsigned long address)
262 {
263 	return spufs_ps_nopfn(vma, address, 0x4000, 0x1000);
264 }
265 
266 static struct vm_operations_struct spufs_cntl_mmap_vmops = {
267 	.nopfn = spufs_cntl_mmap_nopfn,
268 };
269 
270 /*
271  * mmap support for problem state control area [0x4000 - 0x4fff].
272  */
273 static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
274 {
275 	if (!(vma->vm_flags & VM_SHARED))
276 		return -EINVAL;
277 
278 	vma->vm_flags |= VM_IO | VM_PFNMAP;
279 	vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
280 				     | _PAGE_NO_CACHE | _PAGE_GUARDED);
281 
282 	vma->vm_ops = &spufs_cntl_mmap_vmops;
283 	return 0;
284 }
285 #else /* SPUFS_MMAP_4K */
286 #define spufs_cntl_mmap NULL
287 #endif /* !SPUFS_MMAP_4K */
288 
289 static u64 spufs_cntl_get(void *data)
290 {
291 	struct spu_context *ctx = data;
292 	u64 val;
293 
294 	spu_acquire(ctx);
295 	val = ctx->ops->status_read(ctx);
296 	spu_release(ctx);
297 
298 	return val;
299 }
300 
301 static void spufs_cntl_set(void *data, u64 val)
302 {
303 	struct spu_context *ctx = data;
304 
305 	spu_acquire(ctx);
306 	ctx->ops->runcntl_write(ctx, val);
307 	spu_release(ctx);
308 }
309 
310 static int spufs_cntl_open(struct inode *inode, struct file *file)
311 {
312 	struct spufs_inode_info *i = SPUFS_I(inode);
313 	struct spu_context *ctx = i->i_ctx;
314 
315 	mutex_lock(&ctx->mapping_lock);
316 	file->private_data = ctx;
317 	if (!i->i_openers++)
318 		ctx->cntl = inode->i_mapping;
319 	mutex_unlock(&ctx->mapping_lock);
320 	return simple_attr_open(inode, file, spufs_cntl_get,
321 					spufs_cntl_set, "0x%08lx");
322 }
323 
324 static int
325 spufs_cntl_release(struct inode *inode, struct file *file)
326 {
327 	struct spufs_inode_info *i = SPUFS_I(inode);
328 	struct spu_context *ctx = i->i_ctx;
329 
330 	simple_attr_close(inode, file);
331 
332 	mutex_lock(&ctx->mapping_lock);
333 	if (!--i->i_openers)
334 		ctx->cntl = NULL;
335 	mutex_unlock(&ctx->mapping_lock);
336 	return 0;
337 }
338 
339 static const struct file_operations spufs_cntl_fops = {
340 	.open = spufs_cntl_open,
341 	.release = spufs_cntl_release,
342 	.read = simple_attr_read,
343 	.write = simple_attr_write,
344 	.mmap = spufs_cntl_mmap,
345 };
346 
347 static int
348 spufs_regs_open(struct inode *inode, struct file *file)
349 {
350 	struct spufs_inode_info *i = SPUFS_I(inode);
351 	file->private_data = i->i_ctx;
352 	return 0;
353 }
354 
355 static ssize_t
356 __spufs_regs_read(struct spu_context *ctx, char __user *buffer,
357 			size_t size, loff_t *pos)
358 {
359 	struct spu_lscsa *lscsa = ctx->csa.lscsa;
360 	return simple_read_from_buffer(buffer, size, pos,
361 				      lscsa->gprs, sizeof lscsa->gprs);
362 }
363 
364 static ssize_t
365 spufs_regs_read(struct file *file, char __user *buffer,
366 		size_t size, loff_t *pos)
367 {
368 	int ret;
369 	struct spu_context *ctx = file->private_data;
370 
371 	spu_acquire_saved(ctx);
372 	ret = __spufs_regs_read(ctx, buffer, size, pos);
373 	spu_release_saved(ctx);
374 	return ret;
375 }
376 
377 static ssize_t
378 spufs_regs_write(struct file *file, const char __user *buffer,
379 		 size_t size, loff_t *pos)
380 {
381 	struct spu_context *ctx = file->private_data;
382 	struct spu_lscsa *lscsa = ctx->csa.lscsa;
383 	int ret;
384 
385 	size = min_t(ssize_t, sizeof lscsa->gprs - *pos, size);
386 	if (size <= 0)
387 		return -EFBIG;
388 	*pos += size;
389 
390 	spu_acquire_saved(ctx);
391 
392 	ret = copy_from_user(lscsa->gprs + *pos - size,
393 			     buffer, size) ? -EFAULT : size;
394 
395 	spu_release_saved(ctx);
396 	return ret;
397 }
398 
399 static const struct file_operations spufs_regs_fops = {
400 	.open	 = spufs_regs_open,
401 	.read    = spufs_regs_read,
402 	.write   = spufs_regs_write,
403 	.llseek  = generic_file_llseek,
404 };
405 
406 static ssize_t
407 __spufs_fpcr_read(struct spu_context *ctx, char __user * buffer,
408 			size_t size, loff_t * pos)
409 {
410 	struct spu_lscsa *lscsa = ctx->csa.lscsa;
411 	return simple_read_from_buffer(buffer, size, pos,
412 				      &lscsa->fpcr, sizeof(lscsa->fpcr));
413 }
414 
415 static ssize_t
416 spufs_fpcr_read(struct file *file, char __user * buffer,
417 		size_t size, loff_t * pos)
418 {
419 	int ret;
420 	struct spu_context *ctx = file->private_data;
421 
422 	spu_acquire_saved(ctx);
423 	ret = __spufs_fpcr_read(ctx, buffer, size, pos);
424 	spu_release_saved(ctx);
425 	return ret;
426 }
427 
428 static ssize_t
429 spufs_fpcr_write(struct file *file, const char __user * buffer,
430 		 size_t size, loff_t * pos)
431 {
432 	struct spu_context *ctx = file->private_data;
433 	struct spu_lscsa *lscsa = ctx->csa.lscsa;
434 	int ret;
435 
436 	size = min_t(ssize_t, sizeof(lscsa->fpcr) - *pos, size);
437 	if (size <= 0)
438 		return -EFBIG;
439 	*pos += size;
440 
441 	spu_acquire_saved(ctx);
442 
443 	ret = copy_from_user((char *)&lscsa->fpcr + *pos - size,
444 			     buffer, size) ? -EFAULT : size;
445 
446 	spu_release_saved(ctx);
447 	return ret;
448 }
449 
450 static const struct file_operations spufs_fpcr_fops = {
451 	.open = spufs_regs_open,
452 	.read = spufs_fpcr_read,
453 	.write = spufs_fpcr_write,
454 	.llseek = generic_file_llseek,
455 };
456 
457 /* generic open function for all pipe-like files */
458 static int spufs_pipe_open(struct inode *inode, struct file *file)
459 {
460 	struct spufs_inode_info *i = SPUFS_I(inode);
461 	file->private_data = i->i_ctx;
462 
463 	return nonseekable_open(inode, file);
464 }
465 
466 /*
467  * Read as many bytes from the mailbox as possible, until
468  * one of the conditions becomes true:
469  *
470  * - no more data available in the mailbox
471  * - end of the user provided buffer
472  * - end of the mapped area
473  */
474 static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
475 			size_t len, loff_t *pos)
476 {
477 	struct spu_context *ctx = file->private_data;
478 	u32 mbox_data, __user *udata;
479 	ssize_t count;
480 
481 	if (len < 4)
482 		return -EINVAL;
483 
484 	if (!access_ok(VERIFY_WRITE, buf, len))
485 		return -EFAULT;
486 
487 	udata = (void __user *)buf;
488 
489 	spu_acquire(ctx);
490 	for (count = 0; (count + 4) <= len; count += 4, udata++) {
491 		int ret;
492 		ret = ctx->ops->mbox_read(ctx, &mbox_data);
493 		if (ret == 0)
494 			break;
495 
496 		/*
497 		 * at the end of the mapped area, we can fault
498 		 * but still need to return the data we have
499 		 * read successfully so far.
500 		 */
501 		ret = __put_user(mbox_data, udata);
502 		if (ret) {
503 			if (!count)
504 				count = -EFAULT;
505 			break;
506 		}
507 	}
508 	spu_release(ctx);
509 
510 	if (!count)
511 		count = -EAGAIN;
512 
513 	return count;
514 }
515 
516 static const struct file_operations spufs_mbox_fops = {
517 	.open	= spufs_pipe_open,
518 	.read	= spufs_mbox_read,
519 };
520 
521 static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf,
522 			size_t len, loff_t *pos)
523 {
524 	struct spu_context *ctx = file->private_data;
525 	u32 mbox_stat;
526 
527 	if (len < 4)
528 		return -EINVAL;
529 
530 	spu_acquire(ctx);
531 
532 	mbox_stat = ctx->ops->mbox_stat_read(ctx) & 0xff;
533 
534 	spu_release(ctx);
535 
536 	if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat))
537 		return -EFAULT;
538 
539 	return 4;
540 }
541 
542 static const struct file_operations spufs_mbox_stat_fops = {
543 	.open	= spufs_pipe_open,
544 	.read	= spufs_mbox_stat_read,
545 };
546 
547 /* low-level ibox access function */
548 size_t spu_ibox_read(struct spu_context *ctx, u32 *data)
549 {
550 	return ctx->ops->ibox_read(ctx, data);
551 }
552 
553 static int spufs_ibox_fasync(int fd, struct file *file, int on)
554 {
555 	struct spu_context *ctx = file->private_data;
556 
557 	return fasync_helper(fd, file, on, &ctx->ibox_fasync);
558 }
559 
560 /* interrupt-level ibox callback function. */
561 void spufs_ibox_callback(struct spu *spu)
562 {
563 	struct spu_context *ctx = spu->ctx;
564 
565 	wake_up_all(&ctx->ibox_wq);
566 	kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN);
567 }
568 
569 /*
570  * Read as many bytes from the interrupt mailbox as possible, until
571  * one of the conditions becomes true:
572  *
573  * - no more data available in the mailbox
574  * - end of the user provided buffer
575  * - end of the mapped area
576  *
577  * If the file is opened without O_NONBLOCK, we wait here until
578  * any data is available, but return when we have been able to
579  * read something.
580  */
581 static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
582 			size_t len, loff_t *pos)
583 {
584 	struct spu_context *ctx = file->private_data;
585 	u32 ibox_data, __user *udata;
586 	ssize_t count;
587 
588 	if (len < 4)
589 		return -EINVAL;
590 
591 	if (!access_ok(VERIFY_WRITE, buf, len))
592 		return -EFAULT;
593 
594 	udata = (void __user *)buf;
595 
596 	spu_acquire(ctx);
597 
598 	/* wait only for the first element */
599 	count = 0;
600 	if (file->f_flags & O_NONBLOCK) {
601 		if (!spu_ibox_read(ctx, &ibox_data))
602 			count = -EAGAIN;
603 	} else {
604 		count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data));
605 	}
606 	if (count)
607 		goto out;
608 
609 	/* if we can't write at all, return -EFAULT */
610 	count = __put_user(ibox_data, udata);
611 	if (count)
612 		goto out;
613 
614 	for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
615 		int ret;
616 		ret = ctx->ops->ibox_read(ctx, &ibox_data);
617 		if (ret == 0)
618 			break;
619 		/*
620 		 * at the end of the mapped area, we can fault
621 		 * but still need to return the data we have
622 		 * read successfully so far.
623 		 */
624 		ret = __put_user(ibox_data, udata);
625 		if (ret)
626 			break;
627 	}
628 
629 out:
630 	spu_release(ctx);
631 
632 	return count;
633 }
634 
635 static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait)
636 {
637 	struct spu_context *ctx = file->private_data;
638 	unsigned int mask;
639 
640 	poll_wait(file, &ctx->ibox_wq, wait);
641 
642 	spu_acquire(ctx);
643 	mask = ctx->ops->mbox_stat_poll(ctx, POLLIN | POLLRDNORM);
644 	spu_release(ctx);
645 
646 	return mask;
647 }
648 
649 static const struct file_operations spufs_ibox_fops = {
650 	.open	= spufs_pipe_open,
651 	.read	= spufs_ibox_read,
652 	.poll	= spufs_ibox_poll,
653 	.fasync	= spufs_ibox_fasync,
654 };
655 
656 static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf,
657 			size_t len, loff_t *pos)
658 {
659 	struct spu_context *ctx = file->private_data;
660 	u32 ibox_stat;
661 
662 	if (len < 4)
663 		return -EINVAL;
664 
665 	spu_acquire(ctx);
666 	ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff;
667 	spu_release(ctx);
668 
669 	if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat))
670 		return -EFAULT;
671 
672 	return 4;
673 }
674 
675 static const struct file_operations spufs_ibox_stat_fops = {
676 	.open	= spufs_pipe_open,
677 	.read	= spufs_ibox_stat_read,
678 };
679 
680 /* low-level mailbox write */
681 size_t spu_wbox_write(struct spu_context *ctx, u32 data)
682 {
683 	return ctx->ops->wbox_write(ctx, data);
684 }
685 
686 static int spufs_wbox_fasync(int fd, struct file *file, int on)
687 {
688 	struct spu_context *ctx = file->private_data;
689 	int ret;
690 
691 	ret = fasync_helper(fd, file, on, &ctx->wbox_fasync);
692 
693 	return ret;
694 }
695 
696 /* interrupt-level wbox callback function. */
697 void spufs_wbox_callback(struct spu *spu)
698 {
699 	struct spu_context *ctx = spu->ctx;
700 
701 	wake_up_all(&ctx->wbox_wq);
702 	kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT);
703 }
704 
705 /*
706  * Write as many bytes to the interrupt mailbox as possible, until
707  * one of the conditions becomes true:
708  *
709  * - the mailbox is full
710  * - end of the user provided buffer
711  * - end of the mapped area
712  *
713  * If the file is opened without O_NONBLOCK, we wait here until
714  * space is availabyl, but return when we have been able to
715  * write something.
716  */
717 static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
718 			size_t len, loff_t *pos)
719 {
720 	struct spu_context *ctx = file->private_data;
721 	u32 wbox_data, __user *udata;
722 	ssize_t count;
723 
724 	if (len < 4)
725 		return -EINVAL;
726 
727 	udata = (void __user *)buf;
728 	if (!access_ok(VERIFY_READ, buf, len))
729 		return -EFAULT;
730 
731 	if (__get_user(wbox_data, udata))
732 		return -EFAULT;
733 
734 	spu_acquire(ctx);
735 
736 	/*
737 	 * make sure we can at least write one element, by waiting
738 	 * in case of !O_NONBLOCK
739 	 */
740 	count = 0;
741 	if (file->f_flags & O_NONBLOCK) {
742 		if (!spu_wbox_write(ctx, wbox_data))
743 			count = -EAGAIN;
744 	} else {
745 		count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data));
746 	}
747 
748 	if (count)
749 		goto out;
750 
751 	/* write aѕ much as possible */
752 	for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
753 		int ret;
754 		ret = __get_user(wbox_data, udata);
755 		if (ret)
756 			break;
757 
758 		ret = spu_wbox_write(ctx, wbox_data);
759 		if (ret == 0)
760 			break;
761 	}
762 
763 out:
764 	spu_release(ctx);
765 	return count;
766 }
767 
768 static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait)
769 {
770 	struct spu_context *ctx = file->private_data;
771 	unsigned int mask;
772 
773 	poll_wait(file, &ctx->wbox_wq, wait);
774 
775 	spu_acquire(ctx);
776 	mask = ctx->ops->mbox_stat_poll(ctx, POLLOUT | POLLWRNORM);
777 	spu_release(ctx);
778 
779 	return mask;
780 }
781 
782 static const struct file_operations spufs_wbox_fops = {
783 	.open	= spufs_pipe_open,
784 	.write	= spufs_wbox_write,
785 	.poll	= spufs_wbox_poll,
786 	.fasync	= spufs_wbox_fasync,
787 };
788 
789 static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf,
790 			size_t len, loff_t *pos)
791 {
792 	struct spu_context *ctx = file->private_data;
793 	u32 wbox_stat;
794 
795 	if (len < 4)
796 		return -EINVAL;
797 
798 	spu_acquire(ctx);
799 	wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff;
800 	spu_release(ctx);
801 
802 	if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat))
803 		return -EFAULT;
804 
805 	return 4;
806 }
807 
808 static const struct file_operations spufs_wbox_stat_fops = {
809 	.open	= spufs_pipe_open,
810 	.read	= spufs_wbox_stat_read,
811 };
812 
813 static int spufs_signal1_open(struct inode *inode, struct file *file)
814 {
815 	struct spufs_inode_info *i = SPUFS_I(inode);
816 	struct spu_context *ctx = i->i_ctx;
817 
818 	mutex_lock(&ctx->mapping_lock);
819 	file->private_data = ctx;
820 	if (!i->i_openers++)
821 		ctx->signal1 = inode->i_mapping;
822 	mutex_unlock(&ctx->mapping_lock);
823 	return nonseekable_open(inode, file);
824 }
825 
826 static int
827 spufs_signal1_release(struct inode *inode, struct file *file)
828 {
829 	struct spufs_inode_info *i = SPUFS_I(inode);
830 	struct spu_context *ctx = i->i_ctx;
831 
832 	mutex_lock(&ctx->mapping_lock);
833 	if (!--i->i_openers)
834 		ctx->signal1 = NULL;
835 	mutex_unlock(&ctx->mapping_lock);
836 	return 0;
837 }
838 
839 static ssize_t __spufs_signal1_read(struct spu_context *ctx, char __user *buf,
840 			size_t len, loff_t *pos)
841 {
842 	int ret = 0;
843 	u32 data;
844 
845 	if (len < 4)
846 		return -EINVAL;
847 
848 	if (ctx->csa.spu_chnlcnt_RW[3]) {
849 		data = ctx->csa.spu_chnldata_RW[3];
850 		ret = 4;
851 	}
852 
853 	if (!ret)
854 		goto out;
855 
856 	if (copy_to_user(buf, &data, 4))
857 		return -EFAULT;
858 
859 out:
860 	return ret;
861 }
862 
863 static ssize_t spufs_signal1_read(struct file *file, char __user *buf,
864 			size_t len, loff_t *pos)
865 {
866 	int ret;
867 	struct spu_context *ctx = file->private_data;
868 
869 	spu_acquire_saved(ctx);
870 	ret = __spufs_signal1_read(ctx, buf, len, pos);
871 	spu_release_saved(ctx);
872 
873 	return ret;
874 }
875 
876 static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
877 			size_t len, loff_t *pos)
878 {
879 	struct spu_context *ctx;
880 	u32 data;
881 
882 	ctx = file->private_data;
883 
884 	if (len < 4)
885 		return -EINVAL;
886 
887 	if (copy_from_user(&data, buf, 4))
888 		return -EFAULT;
889 
890 	spu_acquire(ctx);
891 	ctx->ops->signal1_write(ctx, data);
892 	spu_release(ctx);
893 
894 	return 4;
895 }
896 
897 static unsigned long spufs_signal1_mmap_nopfn(struct vm_area_struct *vma,
898 					      unsigned long address)
899 {
900 #if PAGE_SIZE == 0x1000
901 	return spufs_ps_nopfn(vma, address, 0x14000, 0x1000);
902 #elif PAGE_SIZE == 0x10000
903 	/* For 64k pages, both signal1 and signal2 can be used to mmap the whole
904 	 * signal 1 and 2 area
905 	 */
906 	return spufs_ps_nopfn(vma, address, 0x10000, 0x10000);
907 #else
908 #error unsupported page size
909 #endif
910 }
911 
912 static struct vm_operations_struct spufs_signal1_mmap_vmops = {
913 	.nopfn = spufs_signal1_mmap_nopfn,
914 };
915 
916 static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
917 {
918 	if (!(vma->vm_flags & VM_SHARED))
919 		return -EINVAL;
920 
921 	vma->vm_flags |= VM_IO | VM_PFNMAP;
922 	vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
923 				     | _PAGE_NO_CACHE | _PAGE_GUARDED);
924 
925 	vma->vm_ops = &spufs_signal1_mmap_vmops;
926 	return 0;
927 }
928 
929 static const struct file_operations spufs_signal1_fops = {
930 	.open = spufs_signal1_open,
931 	.release = spufs_signal1_release,
932 	.read = spufs_signal1_read,
933 	.write = spufs_signal1_write,
934 	.mmap = spufs_signal1_mmap,
935 };
936 
937 static const struct file_operations spufs_signal1_nosched_fops = {
938 	.open = spufs_signal1_open,
939 	.release = spufs_signal1_release,
940 	.write = spufs_signal1_write,
941 	.mmap = spufs_signal1_mmap,
942 };
943 
944 static int spufs_signal2_open(struct inode *inode, struct file *file)
945 {
946 	struct spufs_inode_info *i = SPUFS_I(inode);
947 	struct spu_context *ctx = i->i_ctx;
948 
949 	mutex_lock(&ctx->mapping_lock);
950 	file->private_data = ctx;
951 	if (!i->i_openers++)
952 		ctx->signal2 = inode->i_mapping;
953 	mutex_unlock(&ctx->mapping_lock);
954 	return nonseekable_open(inode, file);
955 }
956 
957 static int
958 spufs_signal2_release(struct inode *inode, struct file *file)
959 {
960 	struct spufs_inode_info *i = SPUFS_I(inode);
961 	struct spu_context *ctx = i->i_ctx;
962 
963 	mutex_lock(&ctx->mapping_lock);
964 	if (!--i->i_openers)
965 		ctx->signal2 = NULL;
966 	mutex_unlock(&ctx->mapping_lock);
967 	return 0;
968 }
969 
970 static ssize_t __spufs_signal2_read(struct spu_context *ctx, char __user *buf,
971 			size_t len, loff_t *pos)
972 {
973 	int ret = 0;
974 	u32 data;
975 
976 	if (len < 4)
977 		return -EINVAL;
978 
979 	if (ctx->csa.spu_chnlcnt_RW[4]) {
980 		data =  ctx->csa.spu_chnldata_RW[4];
981 		ret = 4;
982 	}
983 
984 	if (!ret)
985 		goto out;
986 
987 	if (copy_to_user(buf, &data, 4))
988 		return -EFAULT;
989 
990 out:
991 	return ret;
992 }
993 
994 static ssize_t spufs_signal2_read(struct file *file, char __user *buf,
995 			size_t len, loff_t *pos)
996 {
997 	struct spu_context *ctx = file->private_data;
998 	int ret;
999 
1000 	spu_acquire_saved(ctx);
1001 	ret = __spufs_signal2_read(ctx, buf, len, pos);
1002 	spu_release_saved(ctx);
1003 
1004 	return ret;
1005 }
1006 
1007 static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
1008 			size_t len, loff_t *pos)
1009 {
1010 	struct spu_context *ctx;
1011 	u32 data;
1012 
1013 	ctx = file->private_data;
1014 
1015 	if (len < 4)
1016 		return -EINVAL;
1017 
1018 	if (copy_from_user(&data, buf, 4))
1019 		return -EFAULT;
1020 
1021 	spu_acquire(ctx);
1022 	ctx->ops->signal2_write(ctx, data);
1023 	spu_release(ctx);
1024 
1025 	return 4;
1026 }
1027 
1028 #if SPUFS_MMAP_4K
1029 static unsigned long spufs_signal2_mmap_nopfn(struct vm_area_struct *vma,
1030 					      unsigned long address)
1031 {
1032 #if PAGE_SIZE == 0x1000
1033 	return spufs_ps_nopfn(vma, address, 0x1c000, 0x1000);
1034 #elif PAGE_SIZE == 0x10000
1035 	/* For 64k pages, both signal1 and signal2 can be used to mmap the whole
1036 	 * signal 1 and 2 area
1037 	 */
1038 	return spufs_ps_nopfn(vma, address, 0x10000, 0x10000);
1039 #else
1040 #error unsupported page size
1041 #endif
1042 }
1043 
1044 static struct vm_operations_struct spufs_signal2_mmap_vmops = {
1045 	.nopfn = spufs_signal2_mmap_nopfn,
1046 };
1047 
1048 static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
1049 {
1050 	if (!(vma->vm_flags & VM_SHARED))
1051 		return -EINVAL;
1052 
1053 	vma->vm_flags |= VM_IO | VM_PFNMAP;
1054 	vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1055 				     | _PAGE_NO_CACHE | _PAGE_GUARDED);
1056 
1057 	vma->vm_ops = &spufs_signal2_mmap_vmops;
1058 	return 0;
1059 }
1060 #else /* SPUFS_MMAP_4K */
1061 #define spufs_signal2_mmap NULL
1062 #endif /* !SPUFS_MMAP_4K */
1063 
1064 static const struct file_operations spufs_signal2_fops = {
1065 	.open = spufs_signal2_open,
1066 	.release = spufs_signal2_release,
1067 	.read = spufs_signal2_read,
1068 	.write = spufs_signal2_write,
1069 	.mmap = spufs_signal2_mmap,
1070 };
1071 
1072 static const struct file_operations spufs_signal2_nosched_fops = {
1073 	.open = spufs_signal2_open,
1074 	.release = spufs_signal2_release,
1075 	.write = spufs_signal2_write,
1076 	.mmap = spufs_signal2_mmap,
1077 };
1078 
1079 static void spufs_signal1_type_set(void *data, u64 val)
1080 {
1081 	struct spu_context *ctx = data;
1082 
1083 	spu_acquire(ctx);
1084 	ctx->ops->signal1_type_set(ctx, val);
1085 	spu_release(ctx);
1086 }
1087 
1088 static u64 __spufs_signal1_type_get(void *data)
1089 {
1090 	struct spu_context *ctx = data;
1091 	return ctx->ops->signal1_type_get(ctx);
1092 }
1093 
1094 static u64 spufs_signal1_type_get(void *data)
1095 {
1096 	struct spu_context *ctx = data;
1097 	u64 ret;
1098 
1099 	spu_acquire(ctx);
1100 	ret = __spufs_signal1_type_get(data);
1101 	spu_release(ctx);
1102 
1103 	return ret;
1104 }
1105 DEFINE_SIMPLE_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get,
1106 					spufs_signal1_type_set, "%llu");
1107 
1108 static void spufs_signal2_type_set(void *data, u64 val)
1109 {
1110 	struct spu_context *ctx = data;
1111 
1112 	spu_acquire(ctx);
1113 	ctx->ops->signal2_type_set(ctx, val);
1114 	spu_release(ctx);
1115 }
1116 
1117 static u64 __spufs_signal2_type_get(void *data)
1118 {
1119 	struct spu_context *ctx = data;
1120 	return ctx->ops->signal2_type_get(ctx);
1121 }
1122 
1123 static u64 spufs_signal2_type_get(void *data)
1124 {
1125 	struct spu_context *ctx = data;
1126 	u64 ret;
1127 
1128 	spu_acquire(ctx);
1129 	ret = __spufs_signal2_type_get(data);
1130 	spu_release(ctx);
1131 
1132 	return ret;
1133 }
1134 DEFINE_SIMPLE_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get,
1135 					spufs_signal2_type_set, "%llu");
1136 
1137 #if SPUFS_MMAP_4K
1138 static unsigned long spufs_mss_mmap_nopfn(struct vm_area_struct *vma,
1139 					  unsigned long address)
1140 {
1141 	return spufs_ps_nopfn(vma, address, 0x0000, 0x1000);
1142 }
1143 
1144 static struct vm_operations_struct spufs_mss_mmap_vmops = {
1145 	.nopfn = spufs_mss_mmap_nopfn,
1146 };
1147 
1148 /*
1149  * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1150  */
1151 static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
1152 {
1153 	if (!(vma->vm_flags & VM_SHARED))
1154 		return -EINVAL;
1155 
1156 	vma->vm_flags |= VM_IO | VM_PFNMAP;
1157 	vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1158 				     | _PAGE_NO_CACHE | _PAGE_GUARDED);
1159 
1160 	vma->vm_ops = &spufs_mss_mmap_vmops;
1161 	return 0;
1162 }
1163 #else /* SPUFS_MMAP_4K */
1164 #define spufs_mss_mmap NULL
1165 #endif /* !SPUFS_MMAP_4K */
1166 
1167 static int spufs_mss_open(struct inode *inode, struct file *file)
1168 {
1169 	struct spufs_inode_info *i = SPUFS_I(inode);
1170 	struct spu_context *ctx = i->i_ctx;
1171 
1172 	file->private_data = i->i_ctx;
1173 
1174 	mutex_lock(&ctx->mapping_lock);
1175 	if (!i->i_openers++)
1176 		ctx->mss = inode->i_mapping;
1177 	mutex_unlock(&ctx->mapping_lock);
1178 	return nonseekable_open(inode, file);
1179 }
1180 
1181 static int
1182 spufs_mss_release(struct inode *inode, struct file *file)
1183 {
1184 	struct spufs_inode_info *i = SPUFS_I(inode);
1185 	struct spu_context *ctx = i->i_ctx;
1186 
1187 	mutex_lock(&ctx->mapping_lock);
1188 	if (!--i->i_openers)
1189 		ctx->mss = NULL;
1190 	mutex_unlock(&ctx->mapping_lock);
1191 	return 0;
1192 }
1193 
1194 static const struct file_operations spufs_mss_fops = {
1195 	.open	 = spufs_mss_open,
1196 	.release = spufs_mss_release,
1197 	.mmap	 = spufs_mss_mmap,
1198 };
1199 
1200 static unsigned long spufs_psmap_mmap_nopfn(struct vm_area_struct *vma,
1201 					    unsigned long address)
1202 {
1203 	return spufs_ps_nopfn(vma, address, 0x0000, 0x20000);
1204 }
1205 
1206 static struct vm_operations_struct spufs_psmap_mmap_vmops = {
1207 	.nopfn = spufs_psmap_mmap_nopfn,
1208 };
1209 
1210 /*
1211  * mmap support for full problem state area [0x00000 - 0x1ffff].
1212  */
1213 static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma)
1214 {
1215 	if (!(vma->vm_flags & VM_SHARED))
1216 		return -EINVAL;
1217 
1218 	vma->vm_flags |= VM_IO | VM_PFNMAP;
1219 	vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1220 				     | _PAGE_NO_CACHE | _PAGE_GUARDED);
1221 
1222 	vma->vm_ops = &spufs_psmap_mmap_vmops;
1223 	return 0;
1224 }
1225 
1226 static int spufs_psmap_open(struct inode *inode, struct file *file)
1227 {
1228 	struct spufs_inode_info *i = SPUFS_I(inode);
1229 	struct spu_context *ctx = i->i_ctx;
1230 
1231 	mutex_lock(&ctx->mapping_lock);
1232 	file->private_data = i->i_ctx;
1233 	if (!i->i_openers++)
1234 		ctx->psmap = inode->i_mapping;
1235 	mutex_unlock(&ctx->mapping_lock);
1236 	return nonseekable_open(inode, file);
1237 }
1238 
1239 static int
1240 spufs_psmap_release(struct inode *inode, struct file *file)
1241 {
1242 	struct spufs_inode_info *i = SPUFS_I(inode);
1243 	struct spu_context *ctx = i->i_ctx;
1244 
1245 	mutex_lock(&ctx->mapping_lock);
1246 	if (!--i->i_openers)
1247 		ctx->psmap = NULL;
1248 	mutex_unlock(&ctx->mapping_lock);
1249 	return 0;
1250 }
1251 
1252 static const struct file_operations spufs_psmap_fops = {
1253 	.open	 = spufs_psmap_open,
1254 	.release = spufs_psmap_release,
1255 	.mmap	 = spufs_psmap_mmap,
1256 };
1257 
1258 
1259 #if SPUFS_MMAP_4K
1260 static unsigned long spufs_mfc_mmap_nopfn(struct vm_area_struct *vma,
1261 					  unsigned long address)
1262 {
1263 	return spufs_ps_nopfn(vma, address, 0x3000, 0x1000);
1264 }
1265 
1266 static struct vm_operations_struct spufs_mfc_mmap_vmops = {
1267 	.nopfn = spufs_mfc_mmap_nopfn,
1268 };
1269 
1270 /*
1271  * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1272  */
1273 static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
1274 {
1275 	if (!(vma->vm_flags & VM_SHARED))
1276 		return -EINVAL;
1277 
1278 	vma->vm_flags |= VM_IO | VM_PFNMAP;
1279 	vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1280 				     | _PAGE_NO_CACHE | _PAGE_GUARDED);
1281 
1282 	vma->vm_ops = &spufs_mfc_mmap_vmops;
1283 	return 0;
1284 }
1285 #else /* SPUFS_MMAP_4K */
1286 #define spufs_mfc_mmap NULL
1287 #endif /* !SPUFS_MMAP_4K */
1288 
1289 static int spufs_mfc_open(struct inode *inode, struct file *file)
1290 {
1291 	struct spufs_inode_info *i = SPUFS_I(inode);
1292 	struct spu_context *ctx = i->i_ctx;
1293 
1294 	/* we don't want to deal with DMA into other processes */
1295 	if (ctx->owner != current->mm)
1296 		return -EINVAL;
1297 
1298 	if (atomic_read(&inode->i_count) != 1)
1299 		return -EBUSY;
1300 
1301 	mutex_lock(&ctx->mapping_lock);
1302 	file->private_data = ctx;
1303 	if (!i->i_openers++)
1304 		ctx->mfc = inode->i_mapping;
1305 	mutex_unlock(&ctx->mapping_lock);
1306 	return nonseekable_open(inode, file);
1307 }
1308 
1309 static int
1310 spufs_mfc_release(struct inode *inode, struct file *file)
1311 {
1312 	struct spufs_inode_info *i = SPUFS_I(inode);
1313 	struct spu_context *ctx = i->i_ctx;
1314 
1315 	mutex_lock(&ctx->mapping_lock);
1316 	if (!--i->i_openers)
1317 		ctx->mfc = NULL;
1318 	mutex_unlock(&ctx->mapping_lock);
1319 	return 0;
1320 }
1321 
1322 /* interrupt-level mfc callback function. */
1323 void spufs_mfc_callback(struct spu *spu)
1324 {
1325 	struct spu_context *ctx = spu->ctx;
1326 
1327 	wake_up_all(&ctx->mfc_wq);
1328 
1329 	pr_debug("%s %s\n", __FUNCTION__, spu->name);
1330 	if (ctx->mfc_fasync) {
1331 		u32 free_elements, tagstatus;
1332 		unsigned int mask;
1333 
1334 		/* no need for spu_acquire in interrupt context */
1335 		free_elements = ctx->ops->get_mfc_free_elements(ctx);
1336 		tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1337 
1338 		mask = 0;
1339 		if (free_elements & 0xffff)
1340 			mask |= POLLOUT;
1341 		if (tagstatus & ctx->tagwait)
1342 			mask |= POLLIN;
1343 
1344 		kill_fasync(&ctx->mfc_fasync, SIGIO, mask);
1345 	}
1346 }
1347 
1348 static int spufs_read_mfc_tagstatus(struct spu_context *ctx, u32 *status)
1349 {
1350 	/* See if there is one tag group is complete */
1351 	/* FIXME we need locking around tagwait */
1352 	*status = ctx->ops->read_mfc_tagstatus(ctx) & ctx->tagwait;
1353 	ctx->tagwait &= ~*status;
1354 	if (*status)
1355 		return 1;
1356 
1357 	/* enable interrupt waiting for any tag group,
1358 	   may silently fail if interrupts are already enabled */
1359 	ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1360 	return 0;
1361 }
1362 
1363 static ssize_t spufs_mfc_read(struct file *file, char __user *buffer,
1364 			size_t size, loff_t *pos)
1365 {
1366 	struct spu_context *ctx = file->private_data;
1367 	int ret = -EINVAL;
1368 	u32 status;
1369 
1370 	if (size != 4)
1371 		goto out;
1372 
1373 	spu_acquire(ctx);
1374 	if (file->f_flags & O_NONBLOCK) {
1375 		status = ctx->ops->read_mfc_tagstatus(ctx);
1376 		if (!(status & ctx->tagwait))
1377 			ret = -EAGAIN;
1378 		else
1379 			ctx->tagwait &= ~status;
1380 	} else {
1381 		ret = spufs_wait(ctx->mfc_wq,
1382 			   spufs_read_mfc_tagstatus(ctx, &status));
1383 	}
1384 	spu_release(ctx);
1385 
1386 	if (ret)
1387 		goto out;
1388 
1389 	ret = 4;
1390 	if (copy_to_user(buffer, &status, 4))
1391 		ret = -EFAULT;
1392 
1393 out:
1394 	return ret;
1395 }
1396 
1397 static int spufs_check_valid_dma(struct mfc_dma_command *cmd)
1398 {
1399 	pr_debug("queueing DMA %x %lx %x %x %x\n", cmd->lsa,
1400 		 cmd->ea, cmd->size, cmd->tag, cmd->cmd);
1401 
1402 	switch (cmd->cmd) {
1403 	case MFC_PUT_CMD:
1404 	case MFC_PUTF_CMD:
1405 	case MFC_PUTB_CMD:
1406 	case MFC_GET_CMD:
1407 	case MFC_GETF_CMD:
1408 	case MFC_GETB_CMD:
1409 		break;
1410 	default:
1411 		pr_debug("invalid DMA opcode %x\n", cmd->cmd);
1412 		return -EIO;
1413 	}
1414 
1415 	if ((cmd->lsa & 0xf) != (cmd->ea &0xf)) {
1416 		pr_debug("invalid DMA alignment, ea %lx lsa %x\n",
1417 				cmd->ea, cmd->lsa);
1418 		return -EIO;
1419 	}
1420 
1421 	switch (cmd->size & 0xf) {
1422 	case 1:
1423 		break;
1424 	case 2:
1425 		if (cmd->lsa & 1)
1426 			goto error;
1427 		break;
1428 	case 4:
1429 		if (cmd->lsa & 3)
1430 			goto error;
1431 		break;
1432 	case 8:
1433 		if (cmd->lsa & 7)
1434 			goto error;
1435 		break;
1436 	case 0:
1437 		if (cmd->lsa & 15)
1438 			goto error;
1439 		break;
1440 	error:
1441 	default:
1442 		pr_debug("invalid DMA alignment %x for size %x\n",
1443 			cmd->lsa & 0xf, cmd->size);
1444 		return -EIO;
1445 	}
1446 
1447 	if (cmd->size > 16 * 1024) {
1448 		pr_debug("invalid DMA size %x\n", cmd->size);
1449 		return -EIO;
1450 	}
1451 
1452 	if (cmd->tag & 0xfff0) {
1453 		/* we reserve the higher tag numbers for kernel use */
1454 		pr_debug("invalid DMA tag\n");
1455 		return -EIO;
1456 	}
1457 
1458 	if (cmd->class) {
1459 		/* not supported in this version */
1460 		pr_debug("invalid DMA class\n");
1461 		return -EIO;
1462 	}
1463 
1464 	return 0;
1465 }
1466 
1467 static int spu_send_mfc_command(struct spu_context *ctx,
1468 				struct mfc_dma_command cmd,
1469 				int *error)
1470 {
1471 	*error = ctx->ops->send_mfc_command(ctx, &cmd);
1472 	if (*error == -EAGAIN) {
1473 		/* wait for any tag group to complete
1474 		   so we have space for the new command */
1475 		ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1476 		/* try again, because the queue might be
1477 		   empty again */
1478 		*error = ctx->ops->send_mfc_command(ctx, &cmd);
1479 		if (*error == -EAGAIN)
1480 			return 0;
1481 	}
1482 	return 1;
1483 }
1484 
1485 static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,
1486 			size_t size, loff_t *pos)
1487 {
1488 	struct spu_context *ctx = file->private_data;
1489 	struct mfc_dma_command cmd;
1490 	int ret = -EINVAL;
1491 
1492 	if (size != sizeof cmd)
1493 		goto out;
1494 
1495 	ret = -EFAULT;
1496 	if (copy_from_user(&cmd, buffer, sizeof cmd))
1497 		goto out;
1498 
1499 	ret = spufs_check_valid_dma(&cmd);
1500 	if (ret)
1501 		goto out;
1502 
1503 	ret = spu_acquire_runnable(ctx, 0);
1504 	if (ret)
1505 		goto out;
1506 
1507 	if (file->f_flags & O_NONBLOCK) {
1508 		ret = ctx->ops->send_mfc_command(ctx, &cmd);
1509 	} else {
1510 		int status;
1511 		ret = spufs_wait(ctx->mfc_wq,
1512 				 spu_send_mfc_command(ctx, cmd, &status));
1513 		if (status)
1514 			ret = status;
1515 	}
1516 
1517 	if (ret)
1518 		goto out_unlock;
1519 
1520 	ctx->tagwait |= 1 << cmd.tag;
1521 	ret = size;
1522 
1523 out_unlock:
1524 	spu_release(ctx);
1525 out:
1526 	return ret;
1527 }
1528 
1529 static unsigned int spufs_mfc_poll(struct file *file,poll_table *wait)
1530 {
1531 	struct spu_context *ctx = file->private_data;
1532 	u32 free_elements, tagstatus;
1533 	unsigned int mask;
1534 
1535 	poll_wait(file, &ctx->mfc_wq, wait);
1536 
1537 	spu_acquire(ctx);
1538 	ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2);
1539 	free_elements = ctx->ops->get_mfc_free_elements(ctx);
1540 	tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1541 	spu_release(ctx);
1542 
1543 	mask = 0;
1544 	if (free_elements & 0xffff)
1545 		mask |= POLLOUT | POLLWRNORM;
1546 	if (tagstatus & ctx->tagwait)
1547 		mask |= POLLIN | POLLRDNORM;
1548 
1549 	pr_debug("%s: free %d tagstatus %d tagwait %d\n", __FUNCTION__,
1550 		free_elements, tagstatus, ctx->tagwait);
1551 
1552 	return mask;
1553 }
1554 
1555 static int spufs_mfc_flush(struct file *file, fl_owner_t id)
1556 {
1557 	struct spu_context *ctx = file->private_data;
1558 	int ret;
1559 
1560 	spu_acquire(ctx);
1561 #if 0
1562 /* this currently hangs */
1563 	ret = spufs_wait(ctx->mfc_wq,
1564 			 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2));
1565 	if (ret)
1566 		goto out;
1567 	ret = spufs_wait(ctx->mfc_wq,
1568 			 ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait);
1569 out:
1570 #else
1571 	ret = 0;
1572 #endif
1573 	spu_release(ctx);
1574 
1575 	return ret;
1576 }
1577 
1578 static int spufs_mfc_fsync(struct file *file, struct dentry *dentry,
1579 			   int datasync)
1580 {
1581 	return spufs_mfc_flush(file, NULL);
1582 }
1583 
1584 static int spufs_mfc_fasync(int fd, struct file *file, int on)
1585 {
1586 	struct spu_context *ctx = file->private_data;
1587 
1588 	return fasync_helper(fd, file, on, &ctx->mfc_fasync);
1589 }
1590 
1591 static const struct file_operations spufs_mfc_fops = {
1592 	.open	 = spufs_mfc_open,
1593 	.release = spufs_mfc_release,
1594 	.read	 = spufs_mfc_read,
1595 	.write	 = spufs_mfc_write,
1596 	.poll	 = spufs_mfc_poll,
1597 	.flush	 = spufs_mfc_flush,
1598 	.fsync	 = spufs_mfc_fsync,
1599 	.fasync	 = spufs_mfc_fasync,
1600 	.mmap	 = spufs_mfc_mmap,
1601 };
1602 
1603 static void spufs_npc_set(void *data, u64 val)
1604 {
1605 	struct spu_context *ctx = data;
1606 	spu_acquire(ctx);
1607 	ctx->ops->npc_write(ctx, val);
1608 	spu_release(ctx);
1609 }
1610 
1611 static u64 spufs_npc_get(void *data)
1612 {
1613 	struct spu_context *ctx = data;
1614 	u64 ret;
1615 	spu_acquire(ctx);
1616 	ret = ctx->ops->npc_read(ctx);
1617 	spu_release(ctx);
1618 	return ret;
1619 }
1620 DEFINE_SIMPLE_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set,
1621 			"0x%llx\n")
1622 
1623 static void spufs_decr_set(void *data, u64 val)
1624 {
1625 	struct spu_context *ctx = data;
1626 	struct spu_lscsa *lscsa = ctx->csa.lscsa;
1627 	spu_acquire_saved(ctx);
1628 	lscsa->decr.slot[0] = (u32) val;
1629 	spu_release_saved(ctx);
1630 }
1631 
1632 static u64 __spufs_decr_get(void *data)
1633 {
1634 	struct spu_context *ctx = data;
1635 	struct spu_lscsa *lscsa = ctx->csa.lscsa;
1636 	return lscsa->decr.slot[0];
1637 }
1638 
1639 static u64 spufs_decr_get(void *data)
1640 {
1641 	struct spu_context *ctx = data;
1642 	u64 ret;
1643 	spu_acquire_saved(ctx);
1644 	ret = __spufs_decr_get(data);
1645 	spu_release_saved(ctx);
1646 	return ret;
1647 }
1648 DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set,
1649 			"0x%llx\n")
1650 
1651 static void spufs_decr_status_set(void *data, u64 val)
1652 {
1653 	struct spu_context *ctx = data;
1654 	spu_acquire_saved(ctx);
1655 	if (val)
1656 		ctx->csa.priv2.mfc_control_RW |= MFC_CNTL_DECREMENTER_RUNNING;
1657 	else
1658 		ctx->csa.priv2.mfc_control_RW &= ~MFC_CNTL_DECREMENTER_RUNNING;
1659 	spu_release_saved(ctx);
1660 }
1661 
1662 static u64 __spufs_decr_status_get(void *data)
1663 {
1664 	struct spu_context *ctx = data;
1665 	if (ctx->csa.priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING)
1666 		return SPU_DECR_STATUS_RUNNING;
1667 	else
1668 		return 0;
1669 }
1670 
1671 static u64 spufs_decr_status_get(void *data)
1672 {
1673 	struct spu_context *ctx = data;
1674 	u64 ret;
1675 	spu_acquire_saved(ctx);
1676 	ret = __spufs_decr_status_get(data);
1677 	spu_release_saved(ctx);
1678 	return ret;
1679 }
1680 DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get,
1681 			spufs_decr_status_set, "0x%llx\n")
1682 
1683 static void spufs_event_mask_set(void *data, u64 val)
1684 {
1685 	struct spu_context *ctx = data;
1686 	struct spu_lscsa *lscsa = ctx->csa.lscsa;
1687 	spu_acquire_saved(ctx);
1688 	lscsa->event_mask.slot[0] = (u32) val;
1689 	spu_release_saved(ctx);
1690 }
1691 
1692 static u64 __spufs_event_mask_get(void *data)
1693 {
1694 	struct spu_context *ctx = data;
1695 	struct spu_lscsa *lscsa = ctx->csa.lscsa;
1696 	return lscsa->event_mask.slot[0];
1697 }
1698 
1699 static u64 spufs_event_mask_get(void *data)
1700 {
1701 	struct spu_context *ctx = data;
1702 	u64 ret;
1703 	spu_acquire_saved(ctx);
1704 	ret = __spufs_event_mask_get(data);
1705 	spu_release_saved(ctx);
1706 	return ret;
1707 }
1708 DEFINE_SIMPLE_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get,
1709 			spufs_event_mask_set, "0x%llx\n")
1710 
1711 static u64 __spufs_event_status_get(void *data)
1712 {
1713 	struct spu_context *ctx = data;
1714 	struct spu_state *state = &ctx->csa;
1715 	u64 stat;
1716 	stat = state->spu_chnlcnt_RW[0];
1717 	if (stat)
1718 		return state->spu_chnldata_RW[0];
1719 	return 0;
1720 }
1721 
1722 static u64 spufs_event_status_get(void *data)
1723 {
1724 	struct spu_context *ctx = data;
1725 	u64 ret = 0;
1726 
1727 	spu_acquire_saved(ctx);
1728 	ret = __spufs_event_status_get(data);
1729 	spu_release_saved(ctx);
1730 	return ret;
1731 }
1732 DEFINE_SIMPLE_ATTRIBUTE(spufs_event_status_ops, spufs_event_status_get,
1733 			NULL, "0x%llx\n")
1734 
1735 static void spufs_srr0_set(void *data, u64 val)
1736 {
1737 	struct spu_context *ctx = data;
1738 	struct spu_lscsa *lscsa = ctx->csa.lscsa;
1739 	spu_acquire_saved(ctx);
1740 	lscsa->srr0.slot[0] = (u32) val;
1741 	spu_release_saved(ctx);
1742 }
1743 
1744 static u64 spufs_srr0_get(void *data)
1745 {
1746 	struct spu_context *ctx = data;
1747 	struct spu_lscsa *lscsa = ctx->csa.lscsa;
1748 	u64 ret;
1749 	spu_acquire_saved(ctx);
1750 	ret = lscsa->srr0.slot[0];
1751 	spu_release_saved(ctx);
1752 	return ret;
1753 }
1754 DEFINE_SIMPLE_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set,
1755 			"0x%llx\n")
1756 
1757 static u64 spufs_id_get(void *data)
1758 {
1759 	struct spu_context *ctx = data;
1760 	u64 num;
1761 
1762 	spu_acquire(ctx);
1763 	if (ctx->state == SPU_STATE_RUNNABLE)
1764 		num = ctx->spu->number;
1765 	else
1766 		num = (unsigned int)-1;
1767 	spu_release(ctx);
1768 
1769 	return num;
1770 }
1771 DEFINE_SIMPLE_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n")
1772 
1773 static u64 __spufs_object_id_get(void *data)
1774 {
1775 	struct spu_context *ctx = data;
1776 	return ctx->object_id;
1777 }
1778 
1779 static u64 spufs_object_id_get(void *data)
1780 {
1781 	/* FIXME: Should there really be no locking here? */
1782 	return __spufs_object_id_get(data);
1783 }
1784 
1785 static void spufs_object_id_set(void *data, u64 id)
1786 {
1787 	struct spu_context *ctx = data;
1788 	ctx->object_id = id;
1789 }
1790 
1791 DEFINE_SIMPLE_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get,
1792 		spufs_object_id_set, "0x%llx\n");
1793 
1794 static u64 __spufs_lslr_get(void *data)
1795 {
1796 	struct spu_context *ctx = data;
1797 	return ctx->csa.priv2.spu_lslr_RW;
1798 }
1799 
1800 static u64 spufs_lslr_get(void *data)
1801 {
1802 	struct spu_context *ctx = data;
1803 	u64 ret;
1804 
1805 	spu_acquire_saved(ctx);
1806 	ret = __spufs_lslr_get(data);
1807 	spu_release_saved(ctx);
1808 
1809 	return ret;
1810 }
1811 DEFINE_SIMPLE_ATTRIBUTE(spufs_lslr_ops, spufs_lslr_get, NULL, "0x%llx\n")
1812 
1813 static int spufs_info_open(struct inode *inode, struct file *file)
1814 {
1815 	struct spufs_inode_info *i = SPUFS_I(inode);
1816 	struct spu_context *ctx = i->i_ctx;
1817 	file->private_data = ctx;
1818 	return 0;
1819 }
1820 
1821 static int spufs_caps_show(struct seq_file *s, void *private)
1822 {
1823 	struct spu_context *ctx = s->private;
1824 
1825 	if (!(ctx->flags & SPU_CREATE_NOSCHED))
1826 		seq_puts(s, "sched\n");
1827 	if (!(ctx->flags & SPU_CREATE_ISOLATE))
1828 		seq_puts(s, "step\n");
1829 	return 0;
1830 }
1831 
1832 static int spufs_caps_open(struct inode *inode, struct file *file)
1833 {
1834 	return single_open(file, spufs_caps_show, SPUFS_I(inode)->i_ctx);
1835 }
1836 
1837 static const struct file_operations spufs_caps_fops = {
1838 	.open		= spufs_caps_open,
1839 	.read		= seq_read,
1840 	.llseek		= seq_lseek,
1841 	.release	= single_release,
1842 };
1843 
1844 static ssize_t __spufs_mbox_info_read(struct spu_context *ctx,
1845 			char __user *buf, size_t len, loff_t *pos)
1846 {
1847 	u32 mbox_stat;
1848 	u32 data;
1849 
1850 	mbox_stat = ctx->csa.prob.mb_stat_R;
1851 	if (mbox_stat & 0x0000ff) {
1852 		data = ctx->csa.prob.pu_mb_R;
1853 	}
1854 
1855 	return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
1856 }
1857 
1858 static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf,
1859 				   size_t len, loff_t *pos)
1860 {
1861 	int ret;
1862 	struct spu_context *ctx = file->private_data;
1863 
1864 	if (!access_ok(VERIFY_WRITE, buf, len))
1865 		return -EFAULT;
1866 
1867 	spu_acquire_saved(ctx);
1868 	spin_lock(&ctx->csa.register_lock);
1869 	ret = __spufs_mbox_info_read(ctx, buf, len, pos);
1870 	spin_unlock(&ctx->csa.register_lock);
1871 	spu_release_saved(ctx);
1872 
1873 	return ret;
1874 }
1875 
1876 static const struct file_operations spufs_mbox_info_fops = {
1877 	.open = spufs_info_open,
1878 	.read = spufs_mbox_info_read,
1879 	.llseek  = generic_file_llseek,
1880 };
1881 
1882 static ssize_t __spufs_ibox_info_read(struct spu_context *ctx,
1883 				char __user *buf, size_t len, loff_t *pos)
1884 {
1885 	u32 ibox_stat;
1886 	u32 data;
1887 
1888 	ibox_stat = ctx->csa.prob.mb_stat_R;
1889 	if (ibox_stat & 0xff0000) {
1890 		data = ctx->csa.priv2.puint_mb_R;
1891 	}
1892 
1893 	return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
1894 }
1895 
1896 static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf,
1897 				   size_t len, loff_t *pos)
1898 {
1899 	struct spu_context *ctx = file->private_data;
1900 	int ret;
1901 
1902 	if (!access_ok(VERIFY_WRITE, buf, len))
1903 		return -EFAULT;
1904 
1905 	spu_acquire_saved(ctx);
1906 	spin_lock(&ctx->csa.register_lock);
1907 	ret = __spufs_ibox_info_read(ctx, buf, len, pos);
1908 	spin_unlock(&ctx->csa.register_lock);
1909 	spu_release_saved(ctx);
1910 
1911 	return ret;
1912 }
1913 
1914 static const struct file_operations spufs_ibox_info_fops = {
1915 	.open = spufs_info_open,
1916 	.read = spufs_ibox_info_read,
1917 	.llseek  = generic_file_llseek,
1918 };
1919 
1920 static ssize_t __spufs_wbox_info_read(struct spu_context *ctx,
1921 			char __user *buf, size_t len, loff_t *pos)
1922 {
1923 	int i, cnt;
1924 	u32 data[4];
1925 	u32 wbox_stat;
1926 
1927 	wbox_stat = ctx->csa.prob.mb_stat_R;
1928 	cnt = 4 - ((wbox_stat & 0x00ff00) >> 8);
1929 	for (i = 0; i < cnt; i++) {
1930 		data[i] = ctx->csa.spu_mailbox_data[i];
1931 	}
1932 
1933 	return simple_read_from_buffer(buf, len, pos, &data,
1934 				cnt * sizeof(u32));
1935 }
1936 
1937 static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf,
1938 				   size_t len, loff_t *pos)
1939 {
1940 	struct spu_context *ctx = file->private_data;
1941 	int ret;
1942 
1943 	if (!access_ok(VERIFY_WRITE, buf, len))
1944 		return -EFAULT;
1945 
1946 	spu_acquire_saved(ctx);
1947 	spin_lock(&ctx->csa.register_lock);
1948 	ret = __spufs_wbox_info_read(ctx, buf, len, pos);
1949 	spin_unlock(&ctx->csa.register_lock);
1950 	spu_release_saved(ctx);
1951 
1952 	return ret;
1953 }
1954 
1955 static const struct file_operations spufs_wbox_info_fops = {
1956 	.open = spufs_info_open,
1957 	.read = spufs_wbox_info_read,
1958 	.llseek  = generic_file_llseek,
1959 };
1960 
1961 static ssize_t __spufs_dma_info_read(struct spu_context *ctx,
1962 			char __user *buf, size_t len, loff_t *pos)
1963 {
1964 	struct spu_dma_info info;
1965 	struct mfc_cq_sr *qp, *spuqp;
1966 	int i;
1967 
1968 	info.dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW;
1969 	info.dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0];
1970 	info.dma_info_status = ctx->csa.spu_chnldata_RW[24];
1971 	info.dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25];
1972 	info.dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27];
1973 	for (i = 0; i < 16; i++) {
1974 		qp = &info.dma_info_command_data[i];
1975 		spuqp = &ctx->csa.priv2.spuq[i];
1976 
1977 		qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW;
1978 		qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW;
1979 		qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW;
1980 		qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW;
1981 	}
1982 
1983 	return simple_read_from_buffer(buf, len, pos, &info,
1984 				sizeof info);
1985 }
1986 
1987 static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
1988 			      size_t len, loff_t *pos)
1989 {
1990 	struct spu_context *ctx = file->private_data;
1991 	int ret;
1992 
1993 	if (!access_ok(VERIFY_WRITE, buf, len))
1994 		return -EFAULT;
1995 
1996 	spu_acquire_saved(ctx);
1997 	spin_lock(&ctx->csa.register_lock);
1998 	ret = __spufs_dma_info_read(ctx, buf, len, pos);
1999 	spin_unlock(&ctx->csa.register_lock);
2000 	spu_release_saved(ctx);
2001 
2002 	return ret;
2003 }
2004 
2005 static const struct file_operations spufs_dma_info_fops = {
2006 	.open = spufs_info_open,
2007 	.read = spufs_dma_info_read,
2008 };
2009 
2010 static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx,
2011 			char __user *buf, size_t len, loff_t *pos)
2012 {
2013 	struct spu_proxydma_info info;
2014 	struct mfc_cq_sr *qp, *puqp;
2015 	int ret = sizeof info;
2016 	int i;
2017 
2018 	if (len < ret)
2019 		return -EINVAL;
2020 
2021 	if (!access_ok(VERIFY_WRITE, buf, len))
2022 		return -EFAULT;
2023 
2024 	info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW;
2025 	info.proxydma_info_mask = ctx->csa.prob.dma_querymask_RW;
2026 	info.proxydma_info_status = ctx->csa.prob.dma_tagstatus_R;
2027 	for (i = 0; i < 8; i++) {
2028 		qp = &info.proxydma_info_command_data[i];
2029 		puqp = &ctx->csa.priv2.puq[i];
2030 
2031 		qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW;
2032 		qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW;
2033 		qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW;
2034 		qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW;
2035 	}
2036 
2037 	return simple_read_from_buffer(buf, len, pos, &info,
2038 				sizeof info);
2039 }
2040 
2041 static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf,
2042 				   size_t len, loff_t *pos)
2043 {
2044 	struct spu_context *ctx = file->private_data;
2045 	int ret;
2046 
2047 	spu_acquire_saved(ctx);
2048 	spin_lock(&ctx->csa.register_lock);
2049 	ret = __spufs_proxydma_info_read(ctx, buf, len, pos);
2050 	spin_unlock(&ctx->csa.register_lock);
2051 	spu_release_saved(ctx);
2052 
2053 	return ret;
2054 }
2055 
2056 static const struct file_operations spufs_proxydma_info_fops = {
2057 	.open = spufs_info_open,
2058 	.read = spufs_proxydma_info_read,
2059 };
2060 
2061 static int spufs_show_tid(struct seq_file *s, void *private)
2062 {
2063 	struct spu_context *ctx = s->private;
2064 
2065 	seq_printf(s, "%d\n", ctx->tid);
2066 	return 0;
2067 }
2068 
2069 static int spufs_tid_open(struct inode *inode, struct file *file)
2070 {
2071 	return single_open(file, spufs_show_tid, SPUFS_I(inode)->i_ctx);
2072 }
2073 
2074 static const struct file_operations spufs_tid_fops = {
2075 	.open		= spufs_tid_open,
2076 	.read		= seq_read,
2077 	.llseek		= seq_lseek,
2078 	.release	= single_release,
2079 };
2080 
2081 static const char *ctx_state_names[] = {
2082 	"user", "system", "iowait", "loaded"
2083 };
2084 
2085 static unsigned long long spufs_acct_time(struct spu_context *ctx,
2086 		enum spu_utilization_state state)
2087 {
2088 	struct timespec ts;
2089 	unsigned long long time = ctx->stats.times[state];
2090 
2091 	/*
2092 	 * In general, utilization statistics are updated by the controlling
2093 	 * thread as the spu context moves through various well defined
2094 	 * state transitions, but if the context is lazily loaded its
2095 	 * utilization statistics are not updated as the controlling thread
2096 	 * is not tightly coupled with the execution of the spu context.  We
2097 	 * calculate and apply the time delta from the last recorded state
2098 	 * of the spu context.
2099 	 */
2100 	if (ctx->spu && ctx->stats.util_state == state) {
2101 		ktime_get_ts(&ts);
2102 		time += timespec_to_ns(&ts) - ctx->stats.tstamp;
2103 	}
2104 
2105 	return time / NSEC_PER_MSEC;
2106 }
2107 
2108 static unsigned long long spufs_slb_flts(struct spu_context *ctx)
2109 {
2110 	unsigned long long slb_flts = ctx->stats.slb_flt;
2111 
2112 	if (ctx->state == SPU_STATE_RUNNABLE) {
2113 		slb_flts += (ctx->spu->stats.slb_flt -
2114 			     ctx->stats.slb_flt_base);
2115 	}
2116 
2117 	return slb_flts;
2118 }
2119 
2120 static unsigned long long spufs_class2_intrs(struct spu_context *ctx)
2121 {
2122 	unsigned long long class2_intrs = ctx->stats.class2_intr;
2123 
2124 	if (ctx->state == SPU_STATE_RUNNABLE) {
2125 		class2_intrs += (ctx->spu->stats.class2_intr -
2126 				 ctx->stats.class2_intr_base);
2127 	}
2128 
2129 	return class2_intrs;
2130 }
2131 
2132 
2133 static int spufs_show_stat(struct seq_file *s, void *private)
2134 {
2135 	struct spu_context *ctx = s->private;
2136 
2137 	spu_acquire(ctx);
2138 	seq_printf(s, "%s %llu %llu %llu %llu "
2139 		      "%llu %llu %llu %llu %llu %llu %llu %llu\n",
2140 		ctx_state_names[ctx->stats.util_state],
2141 		spufs_acct_time(ctx, SPU_UTIL_USER),
2142 		spufs_acct_time(ctx, SPU_UTIL_SYSTEM),
2143 		spufs_acct_time(ctx, SPU_UTIL_IOWAIT),
2144 		spufs_acct_time(ctx, SPU_UTIL_IDLE_LOADED),
2145 		ctx->stats.vol_ctx_switch,
2146 		ctx->stats.invol_ctx_switch,
2147 		spufs_slb_flts(ctx),
2148 		ctx->stats.hash_flt,
2149 		ctx->stats.min_flt,
2150 		ctx->stats.maj_flt,
2151 		spufs_class2_intrs(ctx),
2152 		ctx->stats.libassist);
2153 	spu_release(ctx);
2154 	return 0;
2155 }
2156 
2157 static int spufs_stat_open(struct inode *inode, struct file *file)
2158 {
2159 	return single_open(file, spufs_show_stat, SPUFS_I(inode)->i_ctx);
2160 }
2161 
2162 static const struct file_operations spufs_stat_fops = {
2163 	.open		= spufs_stat_open,
2164 	.read		= seq_read,
2165 	.llseek		= seq_lseek,
2166 	.release	= single_release,
2167 };
2168 
2169 
2170 struct tree_descr spufs_dir_contents[] = {
2171 	{ "capabilities", &spufs_caps_fops, 0444, },
2172 	{ "mem",  &spufs_mem_fops,  0666, },
2173 	{ "regs", &spufs_regs_fops,  0666, },
2174 	{ "mbox", &spufs_mbox_fops, 0444, },
2175 	{ "ibox", &spufs_ibox_fops, 0444, },
2176 	{ "wbox", &spufs_wbox_fops, 0222, },
2177 	{ "mbox_stat", &spufs_mbox_stat_fops, 0444, },
2178 	{ "ibox_stat", &spufs_ibox_stat_fops, 0444, },
2179 	{ "wbox_stat", &spufs_wbox_stat_fops, 0444, },
2180 	{ "signal1", &spufs_signal1_nosched_fops, 0222, },
2181 	{ "signal2", &spufs_signal2_nosched_fops, 0222, },
2182 	{ "signal1_type", &spufs_signal1_type, 0666, },
2183 	{ "signal2_type", &spufs_signal2_type, 0666, },
2184 	{ "cntl", &spufs_cntl_fops,  0666, },
2185 	{ "fpcr", &spufs_fpcr_fops, 0666, },
2186 	{ "lslr", &spufs_lslr_ops, 0444, },
2187 	{ "mfc", &spufs_mfc_fops, 0666, },
2188 	{ "mss", &spufs_mss_fops, 0666, },
2189 	{ "npc", &spufs_npc_ops, 0666, },
2190 	{ "srr0", &spufs_srr0_ops, 0666, },
2191 	{ "decr", &spufs_decr_ops, 0666, },
2192 	{ "decr_status", &spufs_decr_status_ops, 0666, },
2193 	{ "event_mask", &spufs_event_mask_ops, 0666, },
2194 	{ "event_status", &spufs_event_status_ops, 0444, },
2195 	{ "psmap", &spufs_psmap_fops, 0666, },
2196 	{ "phys-id", &spufs_id_ops, 0666, },
2197 	{ "object-id", &spufs_object_id_ops, 0666, },
2198 	{ "mbox_info", &spufs_mbox_info_fops, 0444, },
2199 	{ "ibox_info", &spufs_ibox_info_fops, 0444, },
2200 	{ "wbox_info", &spufs_wbox_info_fops, 0444, },
2201 	{ "dma_info", &spufs_dma_info_fops, 0444, },
2202 	{ "proxydma_info", &spufs_proxydma_info_fops, 0444, },
2203 	{ "tid", &spufs_tid_fops, 0444, },
2204 	{ "stat", &spufs_stat_fops, 0444, },
2205 	{},
2206 };
2207 
2208 struct tree_descr spufs_dir_nosched_contents[] = {
2209 	{ "capabilities", &spufs_caps_fops, 0444, },
2210 	{ "mem",  &spufs_mem_fops,  0666, },
2211 	{ "mbox", &spufs_mbox_fops, 0444, },
2212 	{ "ibox", &spufs_ibox_fops, 0444, },
2213 	{ "wbox", &spufs_wbox_fops, 0222, },
2214 	{ "mbox_stat", &spufs_mbox_stat_fops, 0444, },
2215 	{ "ibox_stat", &spufs_ibox_stat_fops, 0444, },
2216 	{ "wbox_stat", &spufs_wbox_stat_fops, 0444, },
2217 	{ "signal1", &spufs_signal1_nosched_fops, 0222, },
2218 	{ "signal2", &spufs_signal2_nosched_fops, 0222, },
2219 	{ "signal1_type", &spufs_signal1_type, 0666, },
2220 	{ "signal2_type", &spufs_signal2_type, 0666, },
2221 	{ "mss", &spufs_mss_fops, 0666, },
2222 	{ "mfc", &spufs_mfc_fops, 0666, },
2223 	{ "cntl", &spufs_cntl_fops,  0666, },
2224 	{ "npc", &spufs_npc_ops, 0666, },
2225 	{ "psmap", &spufs_psmap_fops, 0666, },
2226 	{ "phys-id", &spufs_id_ops, 0666, },
2227 	{ "object-id", &spufs_object_id_ops, 0666, },
2228 	{ "tid", &spufs_tid_fops, 0444, },
2229 	{ "stat", &spufs_stat_fops, 0444, },
2230 	{},
2231 };
2232 
2233 struct spufs_coredump_reader spufs_coredump_read[] = {
2234 	{ "regs", __spufs_regs_read, NULL, 128 * 16 },
2235 	{ "fpcr", __spufs_fpcr_read, NULL, 16 },
2236 	{ "lslr", NULL, __spufs_lslr_get, 11 },
2237 	{ "decr", NULL, __spufs_decr_get, 11 },
2238 	{ "decr_status", NULL, __spufs_decr_status_get, 11 },
2239 	{ "mem", __spufs_mem_read, NULL, 256 * 1024, },
2240 	{ "signal1", __spufs_signal1_read, NULL, 4 },
2241 	{ "signal1_type", NULL, __spufs_signal1_type_get, 2 },
2242 	{ "signal2", __spufs_signal2_read, NULL, 4 },
2243 	{ "signal2_type", NULL, __spufs_signal2_type_get, 2 },
2244 	{ "event_mask", NULL, __spufs_event_mask_get, 8 },
2245 	{ "event_status", NULL, __spufs_event_status_get, 8 },
2246 	{ "mbox_info", __spufs_mbox_info_read, NULL, 4 },
2247 	{ "ibox_info", __spufs_ibox_info_read, NULL, 4 },
2248 	{ "wbox_info", __spufs_wbox_info_read, NULL, 16 },
2249 	{ "dma_info", __spufs_dma_info_read, NULL, 69 * 8 },
2250 	{ "proxydma_info", __spufs_proxydma_info_read, NULL, 35 * 8 },
2251 	{ "object-id", NULL, __spufs_object_id_get, 19 },
2252 	{ },
2253 };
2254 int spufs_coredump_num_notes = ARRAY_SIZE(spufs_coredump_read) - 1;
2255 
2256