xref: /openbmc/linux/arch/powerpc/platforms/cell/spufs/file.c (revision 96de0e252cedffad61b3cb5e05662c591898e69a)
1 /*
2  * SPU file system -- file contents
3  *
4  * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5  *
6  * Author: Arnd Bergmann <arndb@de.ibm.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2, or (at your option)
11  * any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  */
22 
23 #undef DEBUG
24 
25 #include <linux/fs.h>
26 #include <linux/ioctl.h>
27 #include <linux/module.h>
28 #include <linux/pagemap.h>
29 #include <linux/poll.h>
30 #include <linux/ptrace.h>
31 #include <linux/seq_file.h>
32 
33 #include <asm/io.h>
34 #include <asm/semaphore.h>
35 #include <asm/spu.h>
36 #include <asm/spu_info.h>
37 #include <asm/uaccess.h>
38 
39 #include "spufs.h"
40 
41 #define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
42 
43 
44 static int
45 spufs_mem_open(struct inode *inode, struct file *file)
46 {
47 	struct spufs_inode_info *i = SPUFS_I(inode);
48 	struct spu_context *ctx = i->i_ctx;
49 
50 	mutex_lock(&ctx->mapping_lock);
51 	file->private_data = ctx;
52 	if (!i->i_openers++)
53 		ctx->local_store = inode->i_mapping;
54 	mutex_unlock(&ctx->mapping_lock);
55 	return 0;
56 }
57 
58 static int
59 spufs_mem_release(struct inode *inode, struct file *file)
60 {
61 	struct spufs_inode_info *i = SPUFS_I(inode);
62 	struct spu_context *ctx = i->i_ctx;
63 
64 	mutex_lock(&ctx->mapping_lock);
65 	if (!--i->i_openers)
66 		ctx->local_store = NULL;
67 	mutex_unlock(&ctx->mapping_lock);
68 	return 0;
69 }
70 
71 static ssize_t
72 __spufs_mem_read(struct spu_context *ctx, char __user *buffer,
73 			size_t size, loff_t *pos)
74 {
75 	char *local_store = ctx->ops->get_ls(ctx);
76 	return simple_read_from_buffer(buffer, size, pos, local_store,
77 					LS_SIZE);
78 }
79 
80 static ssize_t
81 spufs_mem_read(struct file *file, char __user *buffer,
82 				size_t size, loff_t *pos)
83 {
84 	struct spu_context *ctx = file->private_data;
85 	ssize_t ret;
86 
87 	spu_acquire(ctx);
88 	ret = __spufs_mem_read(ctx, buffer, size, pos);
89 	spu_release(ctx);
90 	return ret;
91 }
92 
93 static ssize_t
94 spufs_mem_write(struct file *file, const char __user *buffer,
95 					size_t size, loff_t *ppos)
96 {
97 	struct spu_context *ctx = file->private_data;
98 	char *local_store;
99 	loff_t pos = *ppos;
100 	int ret;
101 
102 	if (pos < 0)
103 		return -EINVAL;
104 	if (pos > LS_SIZE)
105 		return -EFBIG;
106 	if (size > LS_SIZE - pos)
107 		size = LS_SIZE - pos;
108 
109 	spu_acquire(ctx);
110 	local_store = ctx->ops->get_ls(ctx);
111 	ret = copy_from_user(local_store + pos, buffer, size);
112 	spu_release(ctx);
113 
114 	if (ret)
115 		return -EFAULT;
116 	*ppos = pos + size;
117 	return size;
118 }
119 
120 static unsigned long spufs_mem_mmap_nopfn(struct vm_area_struct *vma,
121 					  unsigned long address)
122 {
123 	struct spu_context *ctx	= vma->vm_file->private_data;
124 	unsigned long pfn, offset, addr0 = address;
125 #ifdef CONFIG_SPU_FS_64K_LS
126 	struct spu_state *csa = &ctx->csa;
127 	int psize;
128 
129 	/* Check what page size we are using */
130 	psize = get_slice_psize(vma->vm_mm, address);
131 
132 	/* Some sanity checking */
133 	BUG_ON(csa->use_big_pages != (psize == MMU_PAGE_64K));
134 
135 	/* Wow, 64K, cool, we need to align the address though */
136 	if (csa->use_big_pages) {
137 		BUG_ON(vma->vm_start & 0xffff);
138 		address &= ~0xfffful;
139 	}
140 #endif /* CONFIG_SPU_FS_64K_LS */
141 
142 	offset = (address - vma->vm_start) + (vma->vm_pgoff << PAGE_SHIFT);
143 	if (offset >= LS_SIZE)
144 		return NOPFN_SIGBUS;
145 
146 	pr_debug("spufs_mem_mmap_nopfn address=0x%lx -> 0x%lx, offset=0x%lx\n",
147 		 addr0, address, offset);
148 
149 	spu_acquire(ctx);
150 
151 	if (ctx->state == SPU_STATE_SAVED) {
152 		vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
153 							& ~_PAGE_NO_CACHE);
154 		pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset);
155 	} else {
156 		vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
157 					     | _PAGE_NO_CACHE);
158 		pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT;
159 	}
160 	vm_insert_pfn(vma, address, pfn);
161 
162 	spu_release(ctx);
163 
164 	return NOPFN_REFAULT;
165 }
166 
167 
168 static struct vm_operations_struct spufs_mem_mmap_vmops = {
169 	.nopfn = spufs_mem_mmap_nopfn,
170 };
171 
172 static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
173 {
174 #ifdef CONFIG_SPU_FS_64K_LS
175 	struct spu_context	*ctx = file->private_data;
176 	struct spu_state	*csa = &ctx->csa;
177 
178 	/* Sanity check VMA alignment */
179 	if (csa->use_big_pages) {
180 		pr_debug("spufs_mem_mmap 64K, start=0x%lx, end=0x%lx,"
181 			 " pgoff=0x%lx\n", vma->vm_start, vma->vm_end,
182 			 vma->vm_pgoff);
183 		if (vma->vm_start & 0xffff)
184 			return -EINVAL;
185 		if (vma->vm_pgoff & 0xf)
186 			return -EINVAL;
187 	}
188 #endif /* CONFIG_SPU_FS_64K_LS */
189 
190 	if (!(vma->vm_flags & VM_SHARED))
191 		return -EINVAL;
192 
193 	vma->vm_flags |= VM_IO | VM_PFNMAP;
194 	vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
195 				     | _PAGE_NO_CACHE);
196 
197 	vma->vm_ops = &spufs_mem_mmap_vmops;
198 	return 0;
199 }
200 
201 #ifdef CONFIG_SPU_FS_64K_LS
202 static unsigned long spufs_get_unmapped_area(struct file *file,
203 		unsigned long addr, unsigned long len, unsigned long pgoff,
204 		unsigned long flags)
205 {
206 	struct spu_context	*ctx = file->private_data;
207 	struct spu_state	*csa = &ctx->csa;
208 
209 	/* If not using big pages, fallback to normal MM g_u_a */
210 	if (!csa->use_big_pages)
211 		return current->mm->get_unmapped_area(file, addr, len,
212 						      pgoff, flags);
213 
214 	/* Else, try to obtain a 64K pages slice */
215 	return slice_get_unmapped_area(addr, len, flags,
216 				       MMU_PAGE_64K, 1, 0);
217 }
218 #endif /* CONFIG_SPU_FS_64K_LS */
219 
220 static const struct file_operations spufs_mem_fops = {
221 	.open			= spufs_mem_open,
222 	.release		= spufs_mem_release,
223 	.read			= spufs_mem_read,
224 	.write			= spufs_mem_write,
225 	.llseek			= generic_file_llseek,
226 	.mmap			= spufs_mem_mmap,
227 #ifdef CONFIG_SPU_FS_64K_LS
228 	.get_unmapped_area	= spufs_get_unmapped_area,
229 #endif
230 };
231 
232 static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
233 				    unsigned long address,
234 				    unsigned long ps_offs,
235 				    unsigned long ps_size)
236 {
237 	struct spu_context *ctx = vma->vm_file->private_data;
238 	unsigned long area, offset = address - vma->vm_start;
239 	int ret;
240 
241 	offset += vma->vm_pgoff << PAGE_SHIFT;
242 	if (offset >= ps_size)
243 		return NOPFN_SIGBUS;
244 
245 	/* error here usually means a signal.. we might want to test
246 	 * the error code more precisely though
247 	 */
248 	ret = spu_acquire_runnable(ctx, 0);
249 	if (ret)
250 		return NOPFN_REFAULT;
251 
252 	area = ctx->spu->problem_phys + ps_offs;
253 	vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT);
254 	spu_release(ctx);
255 
256 	return NOPFN_REFAULT;
257 }
258 
259 #if SPUFS_MMAP_4K
260 static unsigned long spufs_cntl_mmap_nopfn(struct vm_area_struct *vma,
261 					   unsigned long address)
262 {
263 	return spufs_ps_nopfn(vma, address, 0x4000, 0x1000);
264 }
265 
266 static struct vm_operations_struct spufs_cntl_mmap_vmops = {
267 	.nopfn = spufs_cntl_mmap_nopfn,
268 };
269 
270 /*
271  * mmap support for problem state control area [0x4000 - 0x4fff].
272  */
273 static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
274 {
275 	if (!(vma->vm_flags & VM_SHARED))
276 		return -EINVAL;
277 
278 	vma->vm_flags |= VM_IO | VM_PFNMAP;
279 	vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
280 				     | _PAGE_NO_CACHE | _PAGE_GUARDED);
281 
282 	vma->vm_ops = &spufs_cntl_mmap_vmops;
283 	return 0;
284 }
285 #else /* SPUFS_MMAP_4K */
286 #define spufs_cntl_mmap NULL
287 #endif /* !SPUFS_MMAP_4K */
288 
289 static u64 spufs_cntl_get(void *data)
290 {
291 	struct spu_context *ctx = data;
292 	u64 val;
293 
294 	spu_acquire(ctx);
295 	val = ctx->ops->status_read(ctx);
296 	spu_release(ctx);
297 
298 	return val;
299 }
300 
301 static void spufs_cntl_set(void *data, u64 val)
302 {
303 	struct spu_context *ctx = data;
304 
305 	spu_acquire(ctx);
306 	ctx->ops->runcntl_write(ctx, val);
307 	spu_release(ctx);
308 }
309 
310 static int spufs_cntl_open(struct inode *inode, struct file *file)
311 {
312 	struct spufs_inode_info *i = SPUFS_I(inode);
313 	struct spu_context *ctx = i->i_ctx;
314 
315 	mutex_lock(&ctx->mapping_lock);
316 	file->private_data = ctx;
317 	if (!i->i_openers++)
318 		ctx->cntl = inode->i_mapping;
319 	mutex_unlock(&ctx->mapping_lock);
320 	return simple_attr_open(inode, file, spufs_cntl_get,
321 					spufs_cntl_set, "0x%08lx");
322 }
323 
324 static int
325 spufs_cntl_release(struct inode *inode, struct file *file)
326 {
327 	struct spufs_inode_info *i = SPUFS_I(inode);
328 	struct spu_context *ctx = i->i_ctx;
329 
330 	simple_attr_close(inode, file);
331 
332 	mutex_lock(&ctx->mapping_lock);
333 	if (!--i->i_openers)
334 		ctx->cntl = NULL;
335 	mutex_unlock(&ctx->mapping_lock);
336 	return 0;
337 }
338 
339 static const struct file_operations spufs_cntl_fops = {
340 	.open = spufs_cntl_open,
341 	.release = spufs_cntl_release,
342 	.read = simple_attr_read,
343 	.write = simple_attr_write,
344 	.mmap = spufs_cntl_mmap,
345 };
346 
347 static int
348 spufs_regs_open(struct inode *inode, struct file *file)
349 {
350 	struct spufs_inode_info *i = SPUFS_I(inode);
351 	file->private_data = i->i_ctx;
352 	return 0;
353 }
354 
355 static ssize_t
356 __spufs_regs_read(struct spu_context *ctx, char __user *buffer,
357 			size_t size, loff_t *pos)
358 {
359 	struct spu_lscsa *lscsa = ctx->csa.lscsa;
360 	return simple_read_from_buffer(buffer, size, pos,
361 				      lscsa->gprs, sizeof lscsa->gprs);
362 }
363 
364 static ssize_t
365 spufs_regs_read(struct file *file, char __user *buffer,
366 		size_t size, loff_t *pos)
367 {
368 	int ret;
369 	struct spu_context *ctx = file->private_data;
370 
371 	spu_acquire_saved(ctx);
372 	ret = __spufs_regs_read(ctx, buffer, size, pos);
373 	spu_release_saved(ctx);
374 	return ret;
375 }
376 
377 static ssize_t
378 spufs_regs_write(struct file *file, const char __user *buffer,
379 		 size_t size, loff_t *pos)
380 {
381 	struct spu_context *ctx = file->private_data;
382 	struct spu_lscsa *lscsa = ctx->csa.lscsa;
383 	int ret;
384 
385 	size = min_t(ssize_t, sizeof lscsa->gprs - *pos, size);
386 	if (size <= 0)
387 		return -EFBIG;
388 	*pos += size;
389 
390 	spu_acquire_saved(ctx);
391 
392 	ret = copy_from_user(lscsa->gprs + *pos - size,
393 			     buffer, size) ? -EFAULT : size;
394 
395 	spu_release_saved(ctx);
396 	return ret;
397 }
398 
399 static const struct file_operations spufs_regs_fops = {
400 	.open	 = spufs_regs_open,
401 	.read    = spufs_regs_read,
402 	.write   = spufs_regs_write,
403 	.llseek  = generic_file_llseek,
404 };
405 
406 static ssize_t
407 __spufs_fpcr_read(struct spu_context *ctx, char __user * buffer,
408 			size_t size, loff_t * pos)
409 {
410 	struct spu_lscsa *lscsa = ctx->csa.lscsa;
411 	return simple_read_from_buffer(buffer, size, pos,
412 				      &lscsa->fpcr, sizeof(lscsa->fpcr));
413 }
414 
415 static ssize_t
416 spufs_fpcr_read(struct file *file, char __user * buffer,
417 		size_t size, loff_t * pos)
418 {
419 	int ret;
420 	struct spu_context *ctx = file->private_data;
421 
422 	spu_acquire_saved(ctx);
423 	ret = __spufs_fpcr_read(ctx, buffer, size, pos);
424 	spu_release_saved(ctx);
425 	return ret;
426 }
427 
428 static ssize_t
429 spufs_fpcr_write(struct file *file, const char __user * buffer,
430 		 size_t size, loff_t * pos)
431 {
432 	struct spu_context *ctx = file->private_data;
433 	struct spu_lscsa *lscsa = ctx->csa.lscsa;
434 	int ret;
435 
436 	size = min_t(ssize_t, sizeof(lscsa->fpcr) - *pos, size);
437 	if (size <= 0)
438 		return -EFBIG;
439 	*pos += size;
440 
441 	spu_acquire_saved(ctx);
442 
443 	ret = copy_from_user((char *)&lscsa->fpcr + *pos - size,
444 			     buffer, size) ? -EFAULT : size;
445 
446 	spu_release_saved(ctx);
447 	return ret;
448 }
449 
450 static const struct file_operations spufs_fpcr_fops = {
451 	.open = spufs_regs_open,
452 	.read = spufs_fpcr_read,
453 	.write = spufs_fpcr_write,
454 	.llseek = generic_file_llseek,
455 };
456 
457 /* generic open function for all pipe-like files */
458 static int spufs_pipe_open(struct inode *inode, struct file *file)
459 {
460 	struct spufs_inode_info *i = SPUFS_I(inode);
461 	file->private_data = i->i_ctx;
462 
463 	return nonseekable_open(inode, file);
464 }
465 
466 /*
467  * Read as many bytes from the mailbox as possible, until
468  * one of the conditions becomes true:
469  *
470  * - no more data available in the mailbox
471  * - end of the user provided buffer
472  * - end of the mapped area
473  */
474 static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
475 			size_t len, loff_t *pos)
476 {
477 	struct spu_context *ctx = file->private_data;
478 	u32 mbox_data, __user *udata;
479 	ssize_t count;
480 
481 	if (len < 4)
482 		return -EINVAL;
483 
484 	if (!access_ok(VERIFY_WRITE, buf, len))
485 		return -EFAULT;
486 
487 	udata = (void __user *)buf;
488 
489 	spu_acquire(ctx);
490 	for (count = 0; (count + 4) <= len; count += 4, udata++) {
491 		int ret;
492 		ret = ctx->ops->mbox_read(ctx, &mbox_data);
493 		if (ret == 0)
494 			break;
495 
496 		/*
497 		 * at the end of the mapped area, we can fault
498 		 * but still need to return the data we have
499 		 * read successfully so far.
500 		 */
501 		ret = __put_user(mbox_data, udata);
502 		if (ret) {
503 			if (!count)
504 				count = -EFAULT;
505 			break;
506 		}
507 	}
508 	spu_release(ctx);
509 
510 	if (!count)
511 		count = -EAGAIN;
512 
513 	return count;
514 }
515 
516 static const struct file_operations spufs_mbox_fops = {
517 	.open	= spufs_pipe_open,
518 	.read	= spufs_mbox_read,
519 };
520 
521 static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf,
522 			size_t len, loff_t *pos)
523 {
524 	struct spu_context *ctx = file->private_data;
525 	u32 mbox_stat;
526 
527 	if (len < 4)
528 		return -EINVAL;
529 
530 	spu_acquire(ctx);
531 
532 	mbox_stat = ctx->ops->mbox_stat_read(ctx) & 0xff;
533 
534 	spu_release(ctx);
535 
536 	if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat))
537 		return -EFAULT;
538 
539 	return 4;
540 }
541 
542 static const struct file_operations spufs_mbox_stat_fops = {
543 	.open	= spufs_pipe_open,
544 	.read	= spufs_mbox_stat_read,
545 };
546 
547 /* low-level ibox access function */
548 size_t spu_ibox_read(struct spu_context *ctx, u32 *data)
549 {
550 	return ctx->ops->ibox_read(ctx, data);
551 }
552 
553 static int spufs_ibox_fasync(int fd, struct file *file, int on)
554 {
555 	struct spu_context *ctx = file->private_data;
556 
557 	return fasync_helper(fd, file, on, &ctx->ibox_fasync);
558 }
559 
560 /* interrupt-level ibox callback function. */
561 void spufs_ibox_callback(struct spu *spu)
562 {
563 	struct spu_context *ctx = spu->ctx;
564 
565 	wake_up_all(&ctx->ibox_wq);
566 	kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN);
567 }
568 
569 /*
570  * Read as many bytes from the interrupt mailbox as possible, until
571  * one of the conditions becomes true:
572  *
573  * - no more data available in the mailbox
574  * - end of the user provided buffer
575  * - end of the mapped area
576  *
577  * If the file is opened without O_NONBLOCK, we wait here until
578  * any data is available, but return when we have been able to
579  * read something.
580  */
581 static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
582 			size_t len, loff_t *pos)
583 {
584 	struct spu_context *ctx = file->private_data;
585 	u32 ibox_data, __user *udata;
586 	ssize_t count;
587 
588 	if (len < 4)
589 		return -EINVAL;
590 
591 	if (!access_ok(VERIFY_WRITE, buf, len))
592 		return -EFAULT;
593 
594 	udata = (void __user *)buf;
595 
596 	spu_acquire(ctx);
597 
598 	/* wait only for the first element */
599 	count = 0;
600 	if (file->f_flags & O_NONBLOCK) {
601 		if (!spu_ibox_read(ctx, &ibox_data))
602 			count = -EAGAIN;
603 	} else {
604 		count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data));
605 	}
606 	if (count)
607 		goto out;
608 
609 	/* if we can't write at all, return -EFAULT */
610 	count = __put_user(ibox_data, udata);
611 	if (count)
612 		goto out;
613 
614 	for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
615 		int ret;
616 		ret = ctx->ops->ibox_read(ctx, &ibox_data);
617 		if (ret == 0)
618 			break;
619 		/*
620 		 * at the end of the mapped area, we can fault
621 		 * but still need to return the data we have
622 		 * read successfully so far.
623 		 */
624 		ret = __put_user(ibox_data, udata);
625 		if (ret)
626 			break;
627 	}
628 
629 out:
630 	spu_release(ctx);
631 
632 	return count;
633 }
634 
635 static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait)
636 {
637 	struct spu_context *ctx = file->private_data;
638 	unsigned int mask;
639 
640 	poll_wait(file, &ctx->ibox_wq, wait);
641 
642 	spu_acquire(ctx);
643 	mask = ctx->ops->mbox_stat_poll(ctx, POLLIN | POLLRDNORM);
644 	spu_release(ctx);
645 
646 	return mask;
647 }
648 
649 static const struct file_operations spufs_ibox_fops = {
650 	.open	= spufs_pipe_open,
651 	.read	= spufs_ibox_read,
652 	.poll	= spufs_ibox_poll,
653 	.fasync	= spufs_ibox_fasync,
654 };
655 
656 static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf,
657 			size_t len, loff_t *pos)
658 {
659 	struct spu_context *ctx = file->private_data;
660 	u32 ibox_stat;
661 
662 	if (len < 4)
663 		return -EINVAL;
664 
665 	spu_acquire(ctx);
666 	ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff;
667 	spu_release(ctx);
668 
669 	if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat))
670 		return -EFAULT;
671 
672 	return 4;
673 }
674 
675 static const struct file_operations spufs_ibox_stat_fops = {
676 	.open	= spufs_pipe_open,
677 	.read	= spufs_ibox_stat_read,
678 };
679 
680 /* low-level mailbox write */
681 size_t spu_wbox_write(struct spu_context *ctx, u32 data)
682 {
683 	return ctx->ops->wbox_write(ctx, data);
684 }
685 
686 static int spufs_wbox_fasync(int fd, struct file *file, int on)
687 {
688 	struct spu_context *ctx = file->private_data;
689 	int ret;
690 
691 	ret = fasync_helper(fd, file, on, &ctx->wbox_fasync);
692 
693 	return ret;
694 }
695 
696 /* interrupt-level wbox callback function. */
697 void spufs_wbox_callback(struct spu *spu)
698 {
699 	struct spu_context *ctx = spu->ctx;
700 
701 	wake_up_all(&ctx->wbox_wq);
702 	kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT);
703 }
704 
705 /*
706  * Write as many bytes to the interrupt mailbox as possible, until
707  * one of the conditions becomes true:
708  *
709  * - the mailbox is full
710  * - end of the user provided buffer
711  * - end of the mapped area
712  *
713  * If the file is opened without O_NONBLOCK, we wait here until
714  * space is availabyl, but return when we have been able to
715  * write something.
716  */
717 static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
718 			size_t len, loff_t *pos)
719 {
720 	struct spu_context *ctx = file->private_data;
721 	u32 wbox_data, __user *udata;
722 	ssize_t count;
723 
724 	if (len < 4)
725 		return -EINVAL;
726 
727 	udata = (void __user *)buf;
728 	if (!access_ok(VERIFY_READ, buf, len))
729 		return -EFAULT;
730 
731 	if (__get_user(wbox_data, udata))
732 		return -EFAULT;
733 
734 	spu_acquire(ctx);
735 
736 	/*
737 	 * make sure we can at least write one element, by waiting
738 	 * in case of !O_NONBLOCK
739 	 */
740 	count = 0;
741 	if (file->f_flags & O_NONBLOCK) {
742 		if (!spu_wbox_write(ctx, wbox_data))
743 			count = -EAGAIN;
744 	} else {
745 		count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data));
746 	}
747 
748 	if (count)
749 		goto out;
750 
751 	/* write as much as possible */
752 	for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
753 		int ret;
754 		ret = __get_user(wbox_data, udata);
755 		if (ret)
756 			break;
757 
758 		ret = spu_wbox_write(ctx, wbox_data);
759 		if (ret == 0)
760 			break;
761 	}
762 
763 out:
764 	spu_release(ctx);
765 	return count;
766 }
767 
768 static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait)
769 {
770 	struct spu_context *ctx = file->private_data;
771 	unsigned int mask;
772 
773 	poll_wait(file, &ctx->wbox_wq, wait);
774 
775 	spu_acquire(ctx);
776 	mask = ctx->ops->mbox_stat_poll(ctx, POLLOUT | POLLWRNORM);
777 	spu_release(ctx);
778 
779 	return mask;
780 }
781 
782 static const struct file_operations spufs_wbox_fops = {
783 	.open	= spufs_pipe_open,
784 	.write	= spufs_wbox_write,
785 	.poll	= spufs_wbox_poll,
786 	.fasync	= spufs_wbox_fasync,
787 };
788 
789 static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf,
790 			size_t len, loff_t *pos)
791 {
792 	struct spu_context *ctx = file->private_data;
793 	u32 wbox_stat;
794 
795 	if (len < 4)
796 		return -EINVAL;
797 
798 	spu_acquire(ctx);
799 	wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff;
800 	spu_release(ctx);
801 
802 	if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat))
803 		return -EFAULT;
804 
805 	return 4;
806 }
807 
808 static const struct file_operations spufs_wbox_stat_fops = {
809 	.open	= spufs_pipe_open,
810 	.read	= spufs_wbox_stat_read,
811 };
812 
813 static int spufs_signal1_open(struct inode *inode, struct file *file)
814 {
815 	struct spufs_inode_info *i = SPUFS_I(inode);
816 	struct spu_context *ctx = i->i_ctx;
817 
818 	mutex_lock(&ctx->mapping_lock);
819 	file->private_data = ctx;
820 	if (!i->i_openers++)
821 		ctx->signal1 = inode->i_mapping;
822 	mutex_unlock(&ctx->mapping_lock);
823 	return nonseekable_open(inode, file);
824 }
825 
826 static int
827 spufs_signal1_release(struct inode *inode, struct file *file)
828 {
829 	struct spufs_inode_info *i = SPUFS_I(inode);
830 	struct spu_context *ctx = i->i_ctx;
831 
832 	mutex_lock(&ctx->mapping_lock);
833 	if (!--i->i_openers)
834 		ctx->signal1 = NULL;
835 	mutex_unlock(&ctx->mapping_lock);
836 	return 0;
837 }
838 
839 static ssize_t __spufs_signal1_read(struct spu_context *ctx, char __user *buf,
840 			size_t len, loff_t *pos)
841 {
842 	int ret = 0;
843 	u32 data;
844 
845 	if (len < 4)
846 		return -EINVAL;
847 
848 	if (ctx->csa.spu_chnlcnt_RW[3]) {
849 		data = ctx->csa.spu_chnldata_RW[3];
850 		ret = 4;
851 	}
852 
853 	if (!ret)
854 		goto out;
855 
856 	if (copy_to_user(buf, &data, 4))
857 		return -EFAULT;
858 
859 out:
860 	return ret;
861 }
862 
863 static ssize_t spufs_signal1_read(struct file *file, char __user *buf,
864 			size_t len, loff_t *pos)
865 {
866 	int ret;
867 	struct spu_context *ctx = file->private_data;
868 
869 	spu_acquire_saved(ctx);
870 	ret = __spufs_signal1_read(ctx, buf, len, pos);
871 	spu_release_saved(ctx);
872 
873 	return ret;
874 }
875 
876 static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
877 			size_t len, loff_t *pos)
878 {
879 	struct spu_context *ctx;
880 	u32 data;
881 
882 	ctx = file->private_data;
883 
884 	if (len < 4)
885 		return -EINVAL;
886 
887 	if (copy_from_user(&data, buf, 4))
888 		return -EFAULT;
889 
890 	spu_acquire(ctx);
891 	ctx->ops->signal1_write(ctx, data);
892 	spu_release(ctx);
893 
894 	return 4;
895 }
896 
897 static unsigned long spufs_signal1_mmap_nopfn(struct vm_area_struct *vma,
898 					      unsigned long address)
899 {
900 #if PAGE_SIZE == 0x1000
901 	return spufs_ps_nopfn(vma, address, 0x14000, 0x1000);
902 #elif PAGE_SIZE == 0x10000
903 	/* For 64k pages, both signal1 and signal2 can be used to mmap the whole
904 	 * signal 1 and 2 area
905 	 */
906 	return spufs_ps_nopfn(vma, address, 0x10000, 0x10000);
907 #else
908 #error unsupported page size
909 #endif
910 }
911 
912 static struct vm_operations_struct spufs_signal1_mmap_vmops = {
913 	.nopfn = spufs_signal1_mmap_nopfn,
914 };
915 
916 static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
917 {
918 	if (!(vma->vm_flags & VM_SHARED))
919 		return -EINVAL;
920 
921 	vma->vm_flags |= VM_IO | VM_PFNMAP;
922 	vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
923 				     | _PAGE_NO_CACHE | _PAGE_GUARDED);
924 
925 	vma->vm_ops = &spufs_signal1_mmap_vmops;
926 	return 0;
927 }
928 
929 static const struct file_operations spufs_signal1_fops = {
930 	.open = spufs_signal1_open,
931 	.release = spufs_signal1_release,
932 	.read = spufs_signal1_read,
933 	.write = spufs_signal1_write,
934 	.mmap = spufs_signal1_mmap,
935 };
936 
937 static const struct file_operations spufs_signal1_nosched_fops = {
938 	.open = spufs_signal1_open,
939 	.release = spufs_signal1_release,
940 	.write = spufs_signal1_write,
941 	.mmap = spufs_signal1_mmap,
942 };
943 
944 static int spufs_signal2_open(struct inode *inode, struct file *file)
945 {
946 	struct spufs_inode_info *i = SPUFS_I(inode);
947 	struct spu_context *ctx = i->i_ctx;
948 
949 	mutex_lock(&ctx->mapping_lock);
950 	file->private_data = ctx;
951 	if (!i->i_openers++)
952 		ctx->signal2 = inode->i_mapping;
953 	mutex_unlock(&ctx->mapping_lock);
954 	return nonseekable_open(inode, file);
955 }
956 
957 static int
958 spufs_signal2_release(struct inode *inode, struct file *file)
959 {
960 	struct spufs_inode_info *i = SPUFS_I(inode);
961 	struct spu_context *ctx = i->i_ctx;
962 
963 	mutex_lock(&ctx->mapping_lock);
964 	if (!--i->i_openers)
965 		ctx->signal2 = NULL;
966 	mutex_unlock(&ctx->mapping_lock);
967 	return 0;
968 }
969 
970 static ssize_t __spufs_signal2_read(struct spu_context *ctx, char __user *buf,
971 			size_t len, loff_t *pos)
972 {
973 	int ret = 0;
974 	u32 data;
975 
976 	if (len < 4)
977 		return -EINVAL;
978 
979 	if (ctx->csa.spu_chnlcnt_RW[4]) {
980 		data =  ctx->csa.spu_chnldata_RW[4];
981 		ret = 4;
982 	}
983 
984 	if (!ret)
985 		goto out;
986 
987 	if (copy_to_user(buf, &data, 4))
988 		return -EFAULT;
989 
990 out:
991 	return ret;
992 }
993 
994 static ssize_t spufs_signal2_read(struct file *file, char __user *buf,
995 			size_t len, loff_t *pos)
996 {
997 	struct spu_context *ctx = file->private_data;
998 	int ret;
999 
1000 	spu_acquire_saved(ctx);
1001 	ret = __spufs_signal2_read(ctx, buf, len, pos);
1002 	spu_release_saved(ctx);
1003 
1004 	return ret;
1005 }
1006 
1007 static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
1008 			size_t len, loff_t *pos)
1009 {
1010 	struct spu_context *ctx;
1011 	u32 data;
1012 
1013 	ctx = file->private_data;
1014 
1015 	if (len < 4)
1016 		return -EINVAL;
1017 
1018 	if (copy_from_user(&data, buf, 4))
1019 		return -EFAULT;
1020 
1021 	spu_acquire(ctx);
1022 	ctx->ops->signal2_write(ctx, data);
1023 	spu_release(ctx);
1024 
1025 	return 4;
1026 }
1027 
1028 #if SPUFS_MMAP_4K
1029 static unsigned long spufs_signal2_mmap_nopfn(struct vm_area_struct *vma,
1030 					      unsigned long address)
1031 {
1032 #if PAGE_SIZE == 0x1000
1033 	return spufs_ps_nopfn(vma, address, 0x1c000, 0x1000);
1034 #elif PAGE_SIZE == 0x10000
1035 	/* For 64k pages, both signal1 and signal2 can be used to mmap the whole
1036 	 * signal 1 and 2 area
1037 	 */
1038 	return spufs_ps_nopfn(vma, address, 0x10000, 0x10000);
1039 #else
1040 #error unsupported page size
1041 #endif
1042 }
1043 
1044 static struct vm_operations_struct spufs_signal2_mmap_vmops = {
1045 	.nopfn = spufs_signal2_mmap_nopfn,
1046 };
1047 
1048 static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
1049 {
1050 	if (!(vma->vm_flags & VM_SHARED))
1051 		return -EINVAL;
1052 
1053 	vma->vm_flags |= VM_IO | VM_PFNMAP;
1054 	vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1055 				     | _PAGE_NO_CACHE | _PAGE_GUARDED);
1056 
1057 	vma->vm_ops = &spufs_signal2_mmap_vmops;
1058 	return 0;
1059 }
1060 #else /* SPUFS_MMAP_4K */
1061 #define spufs_signal2_mmap NULL
1062 #endif /* !SPUFS_MMAP_4K */
1063 
1064 static const struct file_operations spufs_signal2_fops = {
1065 	.open = spufs_signal2_open,
1066 	.release = spufs_signal2_release,
1067 	.read = spufs_signal2_read,
1068 	.write = spufs_signal2_write,
1069 	.mmap = spufs_signal2_mmap,
1070 };
1071 
1072 static const struct file_operations spufs_signal2_nosched_fops = {
1073 	.open = spufs_signal2_open,
1074 	.release = spufs_signal2_release,
1075 	.write = spufs_signal2_write,
1076 	.mmap = spufs_signal2_mmap,
1077 };
1078 
1079 /*
1080  * This is a wrapper around DEFINE_SIMPLE_ATTRIBUTE which does the
1081  * work of acquiring (or not) the SPU context before calling through
1082  * to the actual get routine. The set routine is called directly.
1083  */
1084 #define SPU_ATTR_NOACQUIRE	0
1085 #define SPU_ATTR_ACQUIRE	1
1086 #define SPU_ATTR_ACQUIRE_SAVED	2
1087 
1088 #define DEFINE_SPUFS_ATTRIBUTE(__name, __get, __set, __fmt, __acquire)	\
1089 static u64 __##__get(void *data)					\
1090 {									\
1091 	struct spu_context *ctx = data;					\
1092 	u64 ret;							\
1093 									\
1094 	if (__acquire == SPU_ATTR_ACQUIRE) {				\
1095 		spu_acquire(ctx);					\
1096 		ret = __get(ctx);					\
1097 		spu_release(ctx);					\
1098 	} else if (__acquire == SPU_ATTR_ACQUIRE_SAVED)	{		\
1099 		spu_acquire_saved(ctx);					\
1100 		ret = __get(ctx);					\
1101 		spu_release_saved(ctx);					\
1102 	} else								\
1103 		ret = __get(ctx);					\
1104 									\
1105 	return ret;							\
1106 }									\
1107 DEFINE_SIMPLE_ATTRIBUTE(__name, __##__get, __set, __fmt);
1108 
1109 static void spufs_signal1_type_set(void *data, u64 val)
1110 {
1111 	struct spu_context *ctx = data;
1112 
1113 	spu_acquire(ctx);
1114 	ctx->ops->signal1_type_set(ctx, val);
1115 	spu_release(ctx);
1116 }
1117 
1118 static u64 spufs_signal1_type_get(struct spu_context *ctx)
1119 {
1120 	return ctx->ops->signal1_type_get(ctx);
1121 }
1122 DEFINE_SPUFS_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get,
1123 		       spufs_signal1_type_set, "%llu", SPU_ATTR_ACQUIRE);
1124 
1125 
1126 static void spufs_signal2_type_set(void *data, u64 val)
1127 {
1128 	struct spu_context *ctx = data;
1129 
1130 	spu_acquire(ctx);
1131 	ctx->ops->signal2_type_set(ctx, val);
1132 	spu_release(ctx);
1133 }
1134 
1135 static u64 spufs_signal2_type_get(struct spu_context *ctx)
1136 {
1137 	return ctx->ops->signal2_type_get(ctx);
1138 }
1139 DEFINE_SPUFS_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get,
1140 		       spufs_signal2_type_set, "%llu", SPU_ATTR_ACQUIRE);
1141 
1142 #if SPUFS_MMAP_4K
1143 static unsigned long spufs_mss_mmap_nopfn(struct vm_area_struct *vma,
1144 					  unsigned long address)
1145 {
1146 	return spufs_ps_nopfn(vma, address, 0x0000, 0x1000);
1147 }
1148 
1149 static struct vm_operations_struct spufs_mss_mmap_vmops = {
1150 	.nopfn = spufs_mss_mmap_nopfn,
1151 };
1152 
1153 /*
1154  * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1155  */
1156 static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
1157 {
1158 	if (!(vma->vm_flags & VM_SHARED))
1159 		return -EINVAL;
1160 
1161 	vma->vm_flags |= VM_IO | VM_PFNMAP;
1162 	vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1163 				     | _PAGE_NO_CACHE | _PAGE_GUARDED);
1164 
1165 	vma->vm_ops = &spufs_mss_mmap_vmops;
1166 	return 0;
1167 }
1168 #else /* SPUFS_MMAP_4K */
1169 #define spufs_mss_mmap NULL
1170 #endif /* !SPUFS_MMAP_4K */
1171 
1172 static int spufs_mss_open(struct inode *inode, struct file *file)
1173 {
1174 	struct spufs_inode_info *i = SPUFS_I(inode);
1175 	struct spu_context *ctx = i->i_ctx;
1176 
1177 	file->private_data = i->i_ctx;
1178 
1179 	mutex_lock(&ctx->mapping_lock);
1180 	if (!i->i_openers++)
1181 		ctx->mss = inode->i_mapping;
1182 	mutex_unlock(&ctx->mapping_lock);
1183 	return nonseekable_open(inode, file);
1184 }
1185 
1186 static int
1187 spufs_mss_release(struct inode *inode, struct file *file)
1188 {
1189 	struct spufs_inode_info *i = SPUFS_I(inode);
1190 	struct spu_context *ctx = i->i_ctx;
1191 
1192 	mutex_lock(&ctx->mapping_lock);
1193 	if (!--i->i_openers)
1194 		ctx->mss = NULL;
1195 	mutex_unlock(&ctx->mapping_lock);
1196 	return 0;
1197 }
1198 
1199 static const struct file_operations spufs_mss_fops = {
1200 	.open	 = spufs_mss_open,
1201 	.release = spufs_mss_release,
1202 	.mmap	 = spufs_mss_mmap,
1203 };
1204 
1205 static unsigned long spufs_psmap_mmap_nopfn(struct vm_area_struct *vma,
1206 					    unsigned long address)
1207 {
1208 	return spufs_ps_nopfn(vma, address, 0x0000, 0x20000);
1209 }
1210 
1211 static struct vm_operations_struct spufs_psmap_mmap_vmops = {
1212 	.nopfn = spufs_psmap_mmap_nopfn,
1213 };
1214 
1215 /*
1216  * mmap support for full problem state area [0x00000 - 0x1ffff].
1217  */
1218 static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma)
1219 {
1220 	if (!(vma->vm_flags & VM_SHARED))
1221 		return -EINVAL;
1222 
1223 	vma->vm_flags |= VM_IO | VM_PFNMAP;
1224 	vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1225 				     | _PAGE_NO_CACHE | _PAGE_GUARDED);
1226 
1227 	vma->vm_ops = &spufs_psmap_mmap_vmops;
1228 	return 0;
1229 }
1230 
1231 static int spufs_psmap_open(struct inode *inode, struct file *file)
1232 {
1233 	struct spufs_inode_info *i = SPUFS_I(inode);
1234 	struct spu_context *ctx = i->i_ctx;
1235 
1236 	mutex_lock(&ctx->mapping_lock);
1237 	file->private_data = i->i_ctx;
1238 	if (!i->i_openers++)
1239 		ctx->psmap = inode->i_mapping;
1240 	mutex_unlock(&ctx->mapping_lock);
1241 	return nonseekable_open(inode, file);
1242 }
1243 
1244 static int
1245 spufs_psmap_release(struct inode *inode, struct file *file)
1246 {
1247 	struct spufs_inode_info *i = SPUFS_I(inode);
1248 	struct spu_context *ctx = i->i_ctx;
1249 
1250 	mutex_lock(&ctx->mapping_lock);
1251 	if (!--i->i_openers)
1252 		ctx->psmap = NULL;
1253 	mutex_unlock(&ctx->mapping_lock);
1254 	return 0;
1255 }
1256 
1257 static const struct file_operations spufs_psmap_fops = {
1258 	.open	 = spufs_psmap_open,
1259 	.release = spufs_psmap_release,
1260 	.mmap	 = spufs_psmap_mmap,
1261 };
1262 
1263 
1264 #if SPUFS_MMAP_4K
1265 static unsigned long spufs_mfc_mmap_nopfn(struct vm_area_struct *vma,
1266 					  unsigned long address)
1267 {
1268 	return spufs_ps_nopfn(vma, address, 0x3000, 0x1000);
1269 }
1270 
1271 static struct vm_operations_struct spufs_mfc_mmap_vmops = {
1272 	.nopfn = spufs_mfc_mmap_nopfn,
1273 };
1274 
1275 /*
1276  * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1277  */
1278 static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
1279 {
1280 	if (!(vma->vm_flags & VM_SHARED))
1281 		return -EINVAL;
1282 
1283 	vma->vm_flags |= VM_IO | VM_PFNMAP;
1284 	vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1285 				     | _PAGE_NO_CACHE | _PAGE_GUARDED);
1286 
1287 	vma->vm_ops = &spufs_mfc_mmap_vmops;
1288 	return 0;
1289 }
1290 #else /* SPUFS_MMAP_4K */
1291 #define spufs_mfc_mmap NULL
1292 #endif /* !SPUFS_MMAP_4K */
1293 
1294 static int spufs_mfc_open(struct inode *inode, struct file *file)
1295 {
1296 	struct spufs_inode_info *i = SPUFS_I(inode);
1297 	struct spu_context *ctx = i->i_ctx;
1298 
1299 	/* we don't want to deal with DMA into other processes */
1300 	if (ctx->owner != current->mm)
1301 		return -EINVAL;
1302 
1303 	if (atomic_read(&inode->i_count) != 1)
1304 		return -EBUSY;
1305 
1306 	mutex_lock(&ctx->mapping_lock);
1307 	file->private_data = ctx;
1308 	if (!i->i_openers++)
1309 		ctx->mfc = inode->i_mapping;
1310 	mutex_unlock(&ctx->mapping_lock);
1311 	return nonseekable_open(inode, file);
1312 }
1313 
1314 static int
1315 spufs_mfc_release(struct inode *inode, struct file *file)
1316 {
1317 	struct spufs_inode_info *i = SPUFS_I(inode);
1318 	struct spu_context *ctx = i->i_ctx;
1319 
1320 	mutex_lock(&ctx->mapping_lock);
1321 	if (!--i->i_openers)
1322 		ctx->mfc = NULL;
1323 	mutex_unlock(&ctx->mapping_lock);
1324 	return 0;
1325 }
1326 
1327 /* interrupt-level mfc callback function. */
1328 void spufs_mfc_callback(struct spu *spu)
1329 {
1330 	struct spu_context *ctx = spu->ctx;
1331 
1332 	wake_up_all(&ctx->mfc_wq);
1333 
1334 	pr_debug("%s %s\n", __FUNCTION__, spu->name);
1335 	if (ctx->mfc_fasync) {
1336 		u32 free_elements, tagstatus;
1337 		unsigned int mask;
1338 
1339 		/* no need for spu_acquire in interrupt context */
1340 		free_elements = ctx->ops->get_mfc_free_elements(ctx);
1341 		tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1342 
1343 		mask = 0;
1344 		if (free_elements & 0xffff)
1345 			mask |= POLLOUT;
1346 		if (tagstatus & ctx->tagwait)
1347 			mask |= POLLIN;
1348 
1349 		kill_fasync(&ctx->mfc_fasync, SIGIO, mask);
1350 	}
1351 }
1352 
1353 static int spufs_read_mfc_tagstatus(struct spu_context *ctx, u32 *status)
1354 {
1355 	/* See if there is one tag group is complete */
1356 	/* FIXME we need locking around tagwait */
1357 	*status = ctx->ops->read_mfc_tagstatus(ctx) & ctx->tagwait;
1358 	ctx->tagwait &= ~*status;
1359 	if (*status)
1360 		return 1;
1361 
1362 	/* enable interrupt waiting for any tag group,
1363 	   may silently fail if interrupts are already enabled */
1364 	ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1365 	return 0;
1366 }
1367 
1368 static ssize_t spufs_mfc_read(struct file *file, char __user *buffer,
1369 			size_t size, loff_t *pos)
1370 {
1371 	struct spu_context *ctx = file->private_data;
1372 	int ret = -EINVAL;
1373 	u32 status;
1374 
1375 	if (size != 4)
1376 		goto out;
1377 
1378 	spu_acquire(ctx);
1379 	if (file->f_flags & O_NONBLOCK) {
1380 		status = ctx->ops->read_mfc_tagstatus(ctx);
1381 		if (!(status & ctx->tagwait))
1382 			ret = -EAGAIN;
1383 		else
1384 			ctx->tagwait &= ~status;
1385 	} else {
1386 		ret = spufs_wait(ctx->mfc_wq,
1387 			   spufs_read_mfc_tagstatus(ctx, &status));
1388 	}
1389 	spu_release(ctx);
1390 
1391 	if (ret)
1392 		goto out;
1393 
1394 	ret = 4;
1395 	if (copy_to_user(buffer, &status, 4))
1396 		ret = -EFAULT;
1397 
1398 out:
1399 	return ret;
1400 }
1401 
1402 static int spufs_check_valid_dma(struct mfc_dma_command *cmd)
1403 {
1404 	pr_debug("queueing DMA %x %lx %x %x %x\n", cmd->lsa,
1405 		 cmd->ea, cmd->size, cmd->tag, cmd->cmd);
1406 
1407 	switch (cmd->cmd) {
1408 	case MFC_PUT_CMD:
1409 	case MFC_PUTF_CMD:
1410 	case MFC_PUTB_CMD:
1411 	case MFC_GET_CMD:
1412 	case MFC_GETF_CMD:
1413 	case MFC_GETB_CMD:
1414 		break;
1415 	default:
1416 		pr_debug("invalid DMA opcode %x\n", cmd->cmd);
1417 		return -EIO;
1418 	}
1419 
1420 	if ((cmd->lsa & 0xf) != (cmd->ea &0xf)) {
1421 		pr_debug("invalid DMA alignment, ea %lx lsa %x\n",
1422 				cmd->ea, cmd->lsa);
1423 		return -EIO;
1424 	}
1425 
1426 	switch (cmd->size & 0xf) {
1427 	case 1:
1428 		break;
1429 	case 2:
1430 		if (cmd->lsa & 1)
1431 			goto error;
1432 		break;
1433 	case 4:
1434 		if (cmd->lsa & 3)
1435 			goto error;
1436 		break;
1437 	case 8:
1438 		if (cmd->lsa & 7)
1439 			goto error;
1440 		break;
1441 	case 0:
1442 		if (cmd->lsa & 15)
1443 			goto error;
1444 		break;
1445 	error:
1446 	default:
1447 		pr_debug("invalid DMA alignment %x for size %x\n",
1448 			cmd->lsa & 0xf, cmd->size);
1449 		return -EIO;
1450 	}
1451 
1452 	if (cmd->size > 16 * 1024) {
1453 		pr_debug("invalid DMA size %x\n", cmd->size);
1454 		return -EIO;
1455 	}
1456 
1457 	if (cmd->tag & 0xfff0) {
1458 		/* we reserve the higher tag numbers for kernel use */
1459 		pr_debug("invalid DMA tag\n");
1460 		return -EIO;
1461 	}
1462 
1463 	if (cmd->class) {
1464 		/* not supported in this version */
1465 		pr_debug("invalid DMA class\n");
1466 		return -EIO;
1467 	}
1468 
1469 	return 0;
1470 }
1471 
1472 static int spu_send_mfc_command(struct spu_context *ctx,
1473 				struct mfc_dma_command cmd,
1474 				int *error)
1475 {
1476 	*error = ctx->ops->send_mfc_command(ctx, &cmd);
1477 	if (*error == -EAGAIN) {
1478 		/* wait for any tag group to complete
1479 		   so we have space for the new command */
1480 		ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1481 		/* try again, because the queue might be
1482 		   empty again */
1483 		*error = ctx->ops->send_mfc_command(ctx, &cmd);
1484 		if (*error == -EAGAIN)
1485 			return 0;
1486 	}
1487 	return 1;
1488 }
1489 
1490 static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,
1491 			size_t size, loff_t *pos)
1492 {
1493 	struct spu_context *ctx = file->private_data;
1494 	struct mfc_dma_command cmd;
1495 	int ret = -EINVAL;
1496 
1497 	if (size != sizeof cmd)
1498 		goto out;
1499 
1500 	ret = -EFAULT;
1501 	if (copy_from_user(&cmd, buffer, sizeof cmd))
1502 		goto out;
1503 
1504 	ret = spufs_check_valid_dma(&cmd);
1505 	if (ret)
1506 		goto out;
1507 
1508 	ret = spu_acquire_runnable(ctx, 0);
1509 	if (ret)
1510 		goto out;
1511 
1512 	if (file->f_flags & O_NONBLOCK) {
1513 		ret = ctx->ops->send_mfc_command(ctx, &cmd);
1514 	} else {
1515 		int status;
1516 		ret = spufs_wait(ctx->mfc_wq,
1517 				 spu_send_mfc_command(ctx, cmd, &status));
1518 		if (status)
1519 			ret = status;
1520 	}
1521 
1522 	if (ret)
1523 		goto out_unlock;
1524 
1525 	ctx->tagwait |= 1 << cmd.tag;
1526 	ret = size;
1527 
1528 out_unlock:
1529 	spu_release(ctx);
1530 out:
1531 	return ret;
1532 }
1533 
1534 static unsigned int spufs_mfc_poll(struct file *file,poll_table *wait)
1535 {
1536 	struct spu_context *ctx = file->private_data;
1537 	u32 free_elements, tagstatus;
1538 	unsigned int mask;
1539 
1540 	poll_wait(file, &ctx->mfc_wq, wait);
1541 
1542 	spu_acquire(ctx);
1543 	ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2);
1544 	free_elements = ctx->ops->get_mfc_free_elements(ctx);
1545 	tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1546 	spu_release(ctx);
1547 
1548 	mask = 0;
1549 	if (free_elements & 0xffff)
1550 		mask |= POLLOUT | POLLWRNORM;
1551 	if (tagstatus & ctx->tagwait)
1552 		mask |= POLLIN | POLLRDNORM;
1553 
1554 	pr_debug("%s: free %d tagstatus %d tagwait %d\n", __FUNCTION__,
1555 		free_elements, tagstatus, ctx->tagwait);
1556 
1557 	return mask;
1558 }
1559 
1560 static int spufs_mfc_flush(struct file *file, fl_owner_t id)
1561 {
1562 	struct spu_context *ctx = file->private_data;
1563 	int ret;
1564 
1565 	spu_acquire(ctx);
1566 #if 0
1567 /* this currently hangs */
1568 	ret = spufs_wait(ctx->mfc_wq,
1569 			 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2));
1570 	if (ret)
1571 		goto out;
1572 	ret = spufs_wait(ctx->mfc_wq,
1573 			 ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait);
1574 out:
1575 #else
1576 	ret = 0;
1577 #endif
1578 	spu_release(ctx);
1579 
1580 	return ret;
1581 }
1582 
1583 static int spufs_mfc_fsync(struct file *file, struct dentry *dentry,
1584 			   int datasync)
1585 {
1586 	return spufs_mfc_flush(file, NULL);
1587 }
1588 
1589 static int spufs_mfc_fasync(int fd, struct file *file, int on)
1590 {
1591 	struct spu_context *ctx = file->private_data;
1592 
1593 	return fasync_helper(fd, file, on, &ctx->mfc_fasync);
1594 }
1595 
1596 static const struct file_operations spufs_mfc_fops = {
1597 	.open	 = spufs_mfc_open,
1598 	.release = spufs_mfc_release,
1599 	.read	 = spufs_mfc_read,
1600 	.write	 = spufs_mfc_write,
1601 	.poll	 = spufs_mfc_poll,
1602 	.flush	 = spufs_mfc_flush,
1603 	.fsync	 = spufs_mfc_fsync,
1604 	.fasync	 = spufs_mfc_fasync,
1605 	.mmap	 = spufs_mfc_mmap,
1606 };
1607 
1608 static void spufs_npc_set(void *data, u64 val)
1609 {
1610 	struct spu_context *ctx = data;
1611 	spu_acquire(ctx);
1612 	ctx->ops->npc_write(ctx, val);
1613 	spu_release(ctx);
1614 }
1615 
1616 static u64 spufs_npc_get(struct spu_context *ctx)
1617 {
1618 	return ctx->ops->npc_read(ctx);
1619 }
1620 DEFINE_SPUFS_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set,
1621 		       "0x%llx\n", SPU_ATTR_ACQUIRE);
1622 
1623 static void spufs_decr_set(void *data, u64 val)
1624 {
1625 	struct spu_context *ctx = data;
1626 	struct spu_lscsa *lscsa = ctx->csa.lscsa;
1627 	spu_acquire_saved(ctx);
1628 	lscsa->decr.slot[0] = (u32) val;
1629 	spu_release_saved(ctx);
1630 }
1631 
1632 static u64 spufs_decr_get(struct spu_context *ctx)
1633 {
1634 	struct spu_lscsa *lscsa = ctx->csa.lscsa;
1635 	return lscsa->decr.slot[0];
1636 }
1637 DEFINE_SPUFS_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set,
1638 		       "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED);
1639 
1640 static void spufs_decr_status_set(void *data, u64 val)
1641 {
1642 	struct spu_context *ctx = data;
1643 	spu_acquire_saved(ctx);
1644 	if (val)
1645 		ctx->csa.priv2.mfc_control_RW |= MFC_CNTL_DECREMENTER_RUNNING;
1646 	else
1647 		ctx->csa.priv2.mfc_control_RW &= ~MFC_CNTL_DECREMENTER_RUNNING;
1648 	spu_release_saved(ctx);
1649 }
1650 
1651 static u64 spufs_decr_status_get(struct spu_context *ctx)
1652 {
1653 	if (ctx->csa.priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING)
1654 		return SPU_DECR_STATUS_RUNNING;
1655 	else
1656 		return 0;
1657 }
1658 DEFINE_SPUFS_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get,
1659 		       spufs_decr_status_set, "0x%llx\n",
1660 		       SPU_ATTR_ACQUIRE_SAVED);
1661 
1662 static void spufs_event_mask_set(void *data, u64 val)
1663 {
1664 	struct spu_context *ctx = data;
1665 	struct spu_lscsa *lscsa = ctx->csa.lscsa;
1666 	spu_acquire_saved(ctx);
1667 	lscsa->event_mask.slot[0] = (u32) val;
1668 	spu_release_saved(ctx);
1669 }
1670 
1671 static u64 spufs_event_mask_get(struct spu_context *ctx)
1672 {
1673 	struct spu_lscsa *lscsa = ctx->csa.lscsa;
1674 	return lscsa->event_mask.slot[0];
1675 }
1676 
1677 DEFINE_SPUFS_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get,
1678 		       spufs_event_mask_set, "0x%llx\n",
1679 		       SPU_ATTR_ACQUIRE_SAVED);
1680 
1681 static u64 spufs_event_status_get(struct spu_context *ctx)
1682 {
1683 	struct spu_state *state = &ctx->csa;
1684 	u64 stat;
1685 	stat = state->spu_chnlcnt_RW[0];
1686 	if (stat)
1687 		return state->spu_chnldata_RW[0];
1688 	return 0;
1689 }
1690 DEFINE_SPUFS_ATTRIBUTE(spufs_event_status_ops, spufs_event_status_get,
1691 		       NULL, "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED)
1692 
1693 static void spufs_srr0_set(void *data, u64 val)
1694 {
1695 	struct spu_context *ctx = data;
1696 	struct spu_lscsa *lscsa = ctx->csa.lscsa;
1697 	spu_acquire_saved(ctx);
1698 	lscsa->srr0.slot[0] = (u32) val;
1699 	spu_release_saved(ctx);
1700 }
1701 
1702 static u64 spufs_srr0_get(struct spu_context *ctx)
1703 {
1704 	struct spu_lscsa *lscsa = ctx->csa.lscsa;
1705 	return lscsa->srr0.slot[0];
1706 }
1707 DEFINE_SPUFS_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set,
1708 		       "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED)
1709 
1710 static u64 spufs_id_get(struct spu_context *ctx)
1711 {
1712 	u64 num;
1713 
1714 	if (ctx->state == SPU_STATE_RUNNABLE)
1715 		num = ctx->spu->number;
1716 	else
1717 		num = (unsigned int)-1;
1718 
1719 	return num;
1720 }
1721 DEFINE_SPUFS_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n",
1722 		       SPU_ATTR_ACQUIRE)
1723 
1724 static u64 spufs_object_id_get(struct spu_context *ctx)
1725 {
1726 	/* FIXME: Should there really be no locking here? */
1727 	return ctx->object_id;
1728 }
1729 
1730 static void spufs_object_id_set(void *data, u64 id)
1731 {
1732 	struct spu_context *ctx = data;
1733 	ctx->object_id = id;
1734 }
1735 
1736 DEFINE_SPUFS_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get,
1737 		       spufs_object_id_set, "0x%llx\n", SPU_ATTR_NOACQUIRE);
1738 
1739 static u64 spufs_lslr_get(struct spu_context *ctx)
1740 {
1741 	return ctx->csa.priv2.spu_lslr_RW;
1742 }
1743 DEFINE_SPUFS_ATTRIBUTE(spufs_lslr_ops, spufs_lslr_get, NULL, "0x%llx\n",
1744 		       SPU_ATTR_ACQUIRE_SAVED);
1745 
1746 static int spufs_info_open(struct inode *inode, struct file *file)
1747 {
1748 	struct spufs_inode_info *i = SPUFS_I(inode);
1749 	struct spu_context *ctx = i->i_ctx;
1750 	file->private_data = ctx;
1751 	return 0;
1752 }
1753 
1754 static int spufs_caps_show(struct seq_file *s, void *private)
1755 {
1756 	struct spu_context *ctx = s->private;
1757 
1758 	if (!(ctx->flags & SPU_CREATE_NOSCHED))
1759 		seq_puts(s, "sched\n");
1760 	if (!(ctx->flags & SPU_CREATE_ISOLATE))
1761 		seq_puts(s, "step\n");
1762 	return 0;
1763 }
1764 
1765 static int spufs_caps_open(struct inode *inode, struct file *file)
1766 {
1767 	return single_open(file, spufs_caps_show, SPUFS_I(inode)->i_ctx);
1768 }
1769 
1770 static const struct file_operations spufs_caps_fops = {
1771 	.open		= spufs_caps_open,
1772 	.read		= seq_read,
1773 	.llseek		= seq_lseek,
1774 	.release	= single_release,
1775 };
1776 
1777 static ssize_t __spufs_mbox_info_read(struct spu_context *ctx,
1778 			char __user *buf, size_t len, loff_t *pos)
1779 {
1780 	u32 mbox_stat;
1781 	u32 data;
1782 
1783 	mbox_stat = ctx->csa.prob.mb_stat_R;
1784 	if (mbox_stat & 0x0000ff) {
1785 		data = ctx->csa.prob.pu_mb_R;
1786 	}
1787 
1788 	return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
1789 }
1790 
1791 static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf,
1792 				   size_t len, loff_t *pos)
1793 {
1794 	int ret;
1795 	struct spu_context *ctx = file->private_data;
1796 
1797 	if (!access_ok(VERIFY_WRITE, buf, len))
1798 		return -EFAULT;
1799 
1800 	spu_acquire_saved(ctx);
1801 	spin_lock(&ctx->csa.register_lock);
1802 	ret = __spufs_mbox_info_read(ctx, buf, len, pos);
1803 	spin_unlock(&ctx->csa.register_lock);
1804 	spu_release_saved(ctx);
1805 
1806 	return ret;
1807 }
1808 
1809 static const struct file_operations spufs_mbox_info_fops = {
1810 	.open = spufs_info_open,
1811 	.read = spufs_mbox_info_read,
1812 	.llseek  = generic_file_llseek,
1813 };
1814 
1815 static ssize_t __spufs_ibox_info_read(struct spu_context *ctx,
1816 				char __user *buf, size_t len, loff_t *pos)
1817 {
1818 	u32 ibox_stat;
1819 	u32 data;
1820 
1821 	ibox_stat = ctx->csa.prob.mb_stat_R;
1822 	if (ibox_stat & 0xff0000) {
1823 		data = ctx->csa.priv2.puint_mb_R;
1824 	}
1825 
1826 	return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
1827 }
1828 
1829 static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf,
1830 				   size_t len, loff_t *pos)
1831 {
1832 	struct spu_context *ctx = file->private_data;
1833 	int ret;
1834 
1835 	if (!access_ok(VERIFY_WRITE, buf, len))
1836 		return -EFAULT;
1837 
1838 	spu_acquire_saved(ctx);
1839 	spin_lock(&ctx->csa.register_lock);
1840 	ret = __spufs_ibox_info_read(ctx, buf, len, pos);
1841 	spin_unlock(&ctx->csa.register_lock);
1842 	spu_release_saved(ctx);
1843 
1844 	return ret;
1845 }
1846 
1847 static const struct file_operations spufs_ibox_info_fops = {
1848 	.open = spufs_info_open,
1849 	.read = spufs_ibox_info_read,
1850 	.llseek  = generic_file_llseek,
1851 };
1852 
1853 static ssize_t __spufs_wbox_info_read(struct spu_context *ctx,
1854 			char __user *buf, size_t len, loff_t *pos)
1855 {
1856 	int i, cnt;
1857 	u32 data[4];
1858 	u32 wbox_stat;
1859 
1860 	wbox_stat = ctx->csa.prob.mb_stat_R;
1861 	cnt = 4 - ((wbox_stat & 0x00ff00) >> 8);
1862 	for (i = 0; i < cnt; i++) {
1863 		data[i] = ctx->csa.spu_mailbox_data[i];
1864 	}
1865 
1866 	return simple_read_from_buffer(buf, len, pos, &data,
1867 				cnt * sizeof(u32));
1868 }
1869 
1870 static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf,
1871 				   size_t len, loff_t *pos)
1872 {
1873 	struct spu_context *ctx = file->private_data;
1874 	int ret;
1875 
1876 	if (!access_ok(VERIFY_WRITE, buf, len))
1877 		return -EFAULT;
1878 
1879 	spu_acquire_saved(ctx);
1880 	spin_lock(&ctx->csa.register_lock);
1881 	ret = __spufs_wbox_info_read(ctx, buf, len, pos);
1882 	spin_unlock(&ctx->csa.register_lock);
1883 	spu_release_saved(ctx);
1884 
1885 	return ret;
1886 }
1887 
1888 static const struct file_operations spufs_wbox_info_fops = {
1889 	.open = spufs_info_open,
1890 	.read = spufs_wbox_info_read,
1891 	.llseek  = generic_file_llseek,
1892 };
1893 
1894 static ssize_t __spufs_dma_info_read(struct spu_context *ctx,
1895 			char __user *buf, size_t len, loff_t *pos)
1896 {
1897 	struct spu_dma_info info;
1898 	struct mfc_cq_sr *qp, *spuqp;
1899 	int i;
1900 
1901 	info.dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW;
1902 	info.dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0];
1903 	info.dma_info_status = ctx->csa.spu_chnldata_RW[24];
1904 	info.dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25];
1905 	info.dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27];
1906 	for (i = 0; i < 16; i++) {
1907 		qp = &info.dma_info_command_data[i];
1908 		spuqp = &ctx->csa.priv2.spuq[i];
1909 
1910 		qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW;
1911 		qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW;
1912 		qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW;
1913 		qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW;
1914 	}
1915 
1916 	return simple_read_from_buffer(buf, len, pos, &info,
1917 				sizeof info);
1918 }
1919 
1920 static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
1921 			      size_t len, loff_t *pos)
1922 {
1923 	struct spu_context *ctx = file->private_data;
1924 	int ret;
1925 
1926 	if (!access_ok(VERIFY_WRITE, buf, len))
1927 		return -EFAULT;
1928 
1929 	spu_acquire_saved(ctx);
1930 	spin_lock(&ctx->csa.register_lock);
1931 	ret = __spufs_dma_info_read(ctx, buf, len, pos);
1932 	spin_unlock(&ctx->csa.register_lock);
1933 	spu_release_saved(ctx);
1934 
1935 	return ret;
1936 }
1937 
1938 static const struct file_operations spufs_dma_info_fops = {
1939 	.open = spufs_info_open,
1940 	.read = spufs_dma_info_read,
1941 };
1942 
1943 static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx,
1944 			char __user *buf, size_t len, loff_t *pos)
1945 {
1946 	struct spu_proxydma_info info;
1947 	struct mfc_cq_sr *qp, *puqp;
1948 	int ret = sizeof info;
1949 	int i;
1950 
1951 	if (len < ret)
1952 		return -EINVAL;
1953 
1954 	if (!access_ok(VERIFY_WRITE, buf, len))
1955 		return -EFAULT;
1956 
1957 	info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW;
1958 	info.proxydma_info_mask = ctx->csa.prob.dma_querymask_RW;
1959 	info.proxydma_info_status = ctx->csa.prob.dma_tagstatus_R;
1960 	for (i = 0; i < 8; i++) {
1961 		qp = &info.proxydma_info_command_data[i];
1962 		puqp = &ctx->csa.priv2.puq[i];
1963 
1964 		qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW;
1965 		qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW;
1966 		qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW;
1967 		qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW;
1968 	}
1969 
1970 	return simple_read_from_buffer(buf, len, pos, &info,
1971 				sizeof info);
1972 }
1973 
1974 static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf,
1975 				   size_t len, loff_t *pos)
1976 {
1977 	struct spu_context *ctx = file->private_data;
1978 	int ret;
1979 
1980 	spu_acquire_saved(ctx);
1981 	spin_lock(&ctx->csa.register_lock);
1982 	ret = __spufs_proxydma_info_read(ctx, buf, len, pos);
1983 	spin_unlock(&ctx->csa.register_lock);
1984 	spu_release_saved(ctx);
1985 
1986 	return ret;
1987 }
1988 
1989 static const struct file_operations spufs_proxydma_info_fops = {
1990 	.open = spufs_info_open,
1991 	.read = spufs_proxydma_info_read,
1992 };
1993 
1994 static int spufs_show_tid(struct seq_file *s, void *private)
1995 {
1996 	struct spu_context *ctx = s->private;
1997 
1998 	seq_printf(s, "%d\n", ctx->tid);
1999 	return 0;
2000 }
2001 
2002 static int spufs_tid_open(struct inode *inode, struct file *file)
2003 {
2004 	return single_open(file, spufs_show_tid, SPUFS_I(inode)->i_ctx);
2005 }
2006 
2007 static const struct file_operations spufs_tid_fops = {
2008 	.open		= spufs_tid_open,
2009 	.read		= seq_read,
2010 	.llseek		= seq_lseek,
2011 	.release	= single_release,
2012 };
2013 
2014 static const char *ctx_state_names[] = {
2015 	"user", "system", "iowait", "loaded"
2016 };
2017 
2018 static unsigned long long spufs_acct_time(struct spu_context *ctx,
2019 		enum spu_utilization_state state)
2020 {
2021 	struct timespec ts;
2022 	unsigned long long time = ctx->stats.times[state];
2023 
2024 	/*
2025 	 * In general, utilization statistics are updated by the controlling
2026 	 * thread as the spu context moves through various well defined
2027 	 * state transitions, but if the context is lazily loaded its
2028 	 * utilization statistics are not updated as the controlling thread
2029 	 * is not tightly coupled with the execution of the spu context.  We
2030 	 * calculate and apply the time delta from the last recorded state
2031 	 * of the spu context.
2032 	 */
2033 	if (ctx->spu && ctx->stats.util_state == state) {
2034 		ktime_get_ts(&ts);
2035 		time += timespec_to_ns(&ts) - ctx->stats.tstamp;
2036 	}
2037 
2038 	return time / NSEC_PER_MSEC;
2039 }
2040 
2041 static unsigned long long spufs_slb_flts(struct spu_context *ctx)
2042 {
2043 	unsigned long long slb_flts = ctx->stats.slb_flt;
2044 
2045 	if (ctx->state == SPU_STATE_RUNNABLE) {
2046 		slb_flts += (ctx->spu->stats.slb_flt -
2047 			     ctx->stats.slb_flt_base);
2048 	}
2049 
2050 	return slb_flts;
2051 }
2052 
2053 static unsigned long long spufs_class2_intrs(struct spu_context *ctx)
2054 {
2055 	unsigned long long class2_intrs = ctx->stats.class2_intr;
2056 
2057 	if (ctx->state == SPU_STATE_RUNNABLE) {
2058 		class2_intrs += (ctx->spu->stats.class2_intr -
2059 				 ctx->stats.class2_intr_base);
2060 	}
2061 
2062 	return class2_intrs;
2063 }
2064 
2065 
2066 static int spufs_show_stat(struct seq_file *s, void *private)
2067 {
2068 	struct spu_context *ctx = s->private;
2069 
2070 	spu_acquire(ctx);
2071 	seq_printf(s, "%s %llu %llu %llu %llu "
2072 		      "%llu %llu %llu %llu %llu %llu %llu %llu\n",
2073 		ctx_state_names[ctx->stats.util_state],
2074 		spufs_acct_time(ctx, SPU_UTIL_USER),
2075 		spufs_acct_time(ctx, SPU_UTIL_SYSTEM),
2076 		spufs_acct_time(ctx, SPU_UTIL_IOWAIT),
2077 		spufs_acct_time(ctx, SPU_UTIL_IDLE_LOADED),
2078 		ctx->stats.vol_ctx_switch,
2079 		ctx->stats.invol_ctx_switch,
2080 		spufs_slb_flts(ctx),
2081 		ctx->stats.hash_flt,
2082 		ctx->stats.min_flt,
2083 		ctx->stats.maj_flt,
2084 		spufs_class2_intrs(ctx),
2085 		ctx->stats.libassist);
2086 	spu_release(ctx);
2087 	return 0;
2088 }
2089 
2090 static int spufs_stat_open(struct inode *inode, struct file *file)
2091 {
2092 	return single_open(file, spufs_show_stat, SPUFS_I(inode)->i_ctx);
2093 }
2094 
2095 static const struct file_operations spufs_stat_fops = {
2096 	.open		= spufs_stat_open,
2097 	.read		= seq_read,
2098 	.llseek		= seq_lseek,
2099 	.release	= single_release,
2100 };
2101 
2102 
2103 struct tree_descr spufs_dir_contents[] = {
2104 	{ "capabilities", &spufs_caps_fops, 0444, },
2105 	{ "mem",  &spufs_mem_fops,  0666, },
2106 	{ "regs", &spufs_regs_fops,  0666, },
2107 	{ "mbox", &spufs_mbox_fops, 0444, },
2108 	{ "ibox", &spufs_ibox_fops, 0444, },
2109 	{ "wbox", &spufs_wbox_fops, 0222, },
2110 	{ "mbox_stat", &spufs_mbox_stat_fops, 0444, },
2111 	{ "ibox_stat", &spufs_ibox_stat_fops, 0444, },
2112 	{ "wbox_stat", &spufs_wbox_stat_fops, 0444, },
2113 	{ "signal1", &spufs_signal1_fops, 0666, },
2114 	{ "signal2", &spufs_signal2_fops, 0666, },
2115 	{ "signal1_type", &spufs_signal1_type, 0666, },
2116 	{ "signal2_type", &spufs_signal2_type, 0666, },
2117 	{ "cntl", &spufs_cntl_fops,  0666, },
2118 	{ "fpcr", &spufs_fpcr_fops, 0666, },
2119 	{ "lslr", &spufs_lslr_ops, 0444, },
2120 	{ "mfc", &spufs_mfc_fops, 0666, },
2121 	{ "mss", &spufs_mss_fops, 0666, },
2122 	{ "npc", &spufs_npc_ops, 0666, },
2123 	{ "srr0", &spufs_srr0_ops, 0666, },
2124 	{ "decr", &spufs_decr_ops, 0666, },
2125 	{ "decr_status", &spufs_decr_status_ops, 0666, },
2126 	{ "event_mask", &spufs_event_mask_ops, 0666, },
2127 	{ "event_status", &spufs_event_status_ops, 0444, },
2128 	{ "psmap", &spufs_psmap_fops, 0666, },
2129 	{ "phys-id", &spufs_id_ops, 0666, },
2130 	{ "object-id", &spufs_object_id_ops, 0666, },
2131 	{ "mbox_info", &spufs_mbox_info_fops, 0444, },
2132 	{ "ibox_info", &spufs_ibox_info_fops, 0444, },
2133 	{ "wbox_info", &spufs_wbox_info_fops, 0444, },
2134 	{ "dma_info", &spufs_dma_info_fops, 0444, },
2135 	{ "proxydma_info", &spufs_proxydma_info_fops, 0444, },
2136 	{ "tid", &spufs_tid_fops, 0444, },
2137 	{ "stat", &spufs_stat_fops, 0444, },
2138 	{},
2139 };
2140 
2141 struct tree_descr spufs_dir_nosched_contents[] = {
2142 	{ "capabilities", &spufs_caps_fops, 0444, },
2143 	{ "mem",  &spufs_mem_fops,  0666, },
2144 	{ "mbox", &spufs_mbox_fops, 0444, },
2145 	{ "ibox", &spufs_ibox_fops, 0444, },
2146 	{ "wbox", &spufs_wbox_fops, 0222, },
2147 	{ "mbox_stat", &spufs_mbox_stat_fops, 0444, },
2148 	{ "ibox_stat", &spufs_ibox_stat_fops, 0444, },
2149 	{ "wbox_stat", &spufs_wbox_stat_fops, 0444, },
2150 	{ "signal1", &spufs_signal1_nosched_fops, 0222, },
2151 	{ "signal2", &spufs_signal2_nosched_fops, 0222, },
2152 	{ "signal1_type", &spufs_signal1_type, 0666, },
2153 	{ "signal2_type", &spufs_signal2_type, 0666, },
2154 	{ "mss", &spufs_mss_fops, 0666, },
2155 	{ "mfc", &spufs_mfc_fops, 0666, },
2156 	{ "cntl", &spufs_cntl_fops,  0666, },
2157 	{ "npc", &spufs_npc_ops, 0666, },
2158 	{ "psmap", &spufs_psmap_fops, 0666, },
2159 	{ "phys-id", &spufs_id_ops, 0666, },
2160 	{ "object-id", &spufs_object_id_ops, 0666, },
2161 	{ "tid", &spufs_tid_fops, 0444, },
2162 	{ "stat", &spufs_stat_fops, 0444, },
2163 	{},
2164 };
2165 
2166 struct spufs_coredump_reader spufs_coredump_read[] = {
2167 	{ "regs", __spufs_regs_read, NULL, sizeof(struct spu_reg128[128])},
2168 	{ "fpcr", __spufs_fpcr_read, NULL, sizeof(struct spu_reg128) },
2169 	{ "lslr", NULL, spufs_lslr_get, 19 },
2170 	{ "decr", NULL, spufs_decr_get, 19 },
2171 	{ "decr_status", NULL, spufs_decr_status_get, 19 },
2172 	{ "mem", __spufs_mem_read, NULL, LS_SIZE, },
2173 	{ "signal1", __spufs_signal1_read, NULL, sizeof(u32) },
2174 	{ "signal1_type", NULL, spufs_signal1_type_get, 19 },
2175 	{ "signal2", __spufs_signal2_read, NULL, sizeof(u32) },
2176 	{ "signal2_type", NULL, spufs_signal2_type_get, 19 },
2177 	{ "event_mask", NULL, spufs_event_mask_get, 19 },
2178 	{ "event_status", NULL, spufs_event_status_get, 19 },
2179 	{ "mbox_info", __spufs_mbox_info_read, NULL, sizeof(u32) },
2180 	{ "ibox_info", __spufs_ibox_info_read, NULL, sizeof(u32) },
2181 	{ "wbox_info", __spufs_wbox_info_read, NULL, 4 * sizeof(u32)},
2182 	{ "dma_info", __spufs_dma_info_read, NULL, sizeof(struct spu_dma_info)},
2183 	{ "proxydma_info", __spufs_proxydma_info_read,
2184 			   NULL, sizeof(struct spu_proxydma_info)},
2185 	{ "object-id", NULL, spufs_object_id_get, 19 },
2186 	{ "npc", NULL, spufs_npc_get, 19 },
2187 	{ NULL },
2188 };
2189