1 #define DEBUG
2 
3 #include <linux/wait.h>
4 #include <linux/ptrace.h>
5 
6 #include <asm/spu.h>
7 #include <asm/spu_priv1.h>
8 #include <asm/io.h>
9 #include <asm/unistd.h>
10 
11 #include "spufs.h"
12 
13 /* interrupt-level stop callback function. */
14 void spufs_stop_callback(struct spu *spu, int irq)
15 {
16 	struct spu_context *ctx = spu->ctx;
17 
18 	/*
19 	 * It should be impossible to preempt a context while an exception
20 	 * is being processed, since the context switch code is specially
21 	 * coded to deal with interrupts ... But, just in case, sanity check
22 	 * the context pointer.  It is OK to return doing nothing since
23 	 * the exception will be regenerated when the context is resumed.
24 	 */
25 	if (ctx) {
26 		/* Copy exception arguments into module specific structure */
27 		switch(irq) {
28 		case 0 :
29 			ctx->csa.class_0_pending = spu->class_0_pending;
30 			ctx->csa.class_0_dsisr = spu->class_0_dsisr;
31 			ctx->csa.class_0_dar = spu->class_0_dar;
32 			break;
33 		case 1 :
34 			ctx->csa.class_1_dsisr = spu->class_1_dsisr;
35 			ctx->csa.class_1_dar = spu->class_1_dar;
36 			break;
37 		case 2 :
38 			break;
39 		}
40 
41 		/* ensure that the exception status has hit memory before a
42 		 * thread waiting on the context's stop queue is woken */
43 		smp_wmb();
44 
45 		wake_up_all(&ctx->stop_wq);
46 	}
47 }
48 
49 int spu_stopped(struct spu_context *ctx, u32 *stat)
50 {
51 	u64 dsisr;
52 	u32 stopped;
53 
54 	*stat = ctx->ops->status_read(ctx);
55 
56 	if (test_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags))
57 		return 1;
58 
59 	stopped = SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP |
60 		SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP;
61 	if (!(*stat & SPU_STATUS_RUNNING) && (*stat & stopped))
62 		return 1;
63 
64 	dsisr = ctx->csa.class_0_dsisr;
65 	if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED))
66 		return 1;
67 
68 	dsisr = ctx->csa.class_1_dsisr;
69 	if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED))
70 		return 1;
71 
72 	if (ctx->csa.class_0_pending)
73 		return 1;
74 
75 	return 0;
76 }
77 
78 static int spu_setup_isolated(struct spu_context *ctx)
79 {
80 	int ret;
81 	u64 __iomem *mfc_cntl;
82 	u64 sr1;
83 	u32 status;
84 	unsigned long timeout;
85 	const u32 status_loading = SPU_STATUS_RUNNING
86 		| SPU_STATUS_ISOLATED_STATE | SPU_STATUS_ISOLATED_LOAD_STATUS;
87 
88 	ret = -ENODEV;
89 	if (!isolated_loader)
90 		goto out;
91 
92 	/*
93 	 * We need to exclude userspace access to the context.
94 	 *
95 	 * To protect against memory access we invalidate all ptes
96 	 * and make sure the pagefault handlers block on the mutex.
97 	 */
98 	spu_unmap_mappings(ctx);
99 
100 	mfc_cntl = &ctx->spu->priv2->mfc_control_RW;
101 
102 	/* purge the MFC DMA queue to ensure no spurious accesses before we
103 	 * enter kernel mode */
104 	timeout = jiffies + HZ;
105 	out_be64(mfc_cntl, MFC_CNTL_PURGE_DMA_REQUEST);
106 	while ((in_be64(mfc_cntl) & MFC_CNTL_PURGE_DMA_STATUS_MASK)
107 			!= MFC_CNTL_PURGE_DMA_COMPLETE) {
108 		if (time_after(jiffies, timeout)) {
109 			printk(KERN_ERR "%s: timeout flushing MFC DMA queue\n",
110 					__func__);
111 			ret = -EIO;
112 			goto out;
113 		}
114 		cond_resched();
115 	}
116 
117 	/* put the SPE in kernel mode to allow access to the loader */
118 	sr1 = spu_mfc_sr1_get(ctx->spu);
119 	sr1 &= ~MFC_STATE1_PROBLEM_STATE_MASK;
120 	spu_mfc_sr1_set(ctx->spu, sr1);
121 
122 	/* start the loader */
123 	ctx->ops->signal1_write(ctx, (unsigned long)isolated_loader >> 32);
124 	ctx->ops->signal2_write(ctx,
125 			(unsigned long)isolated_loader & 0xffffffff);
126 
127 	ctx->ops->runcntl_write(ctx,
128 			SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
129 
130 	ret = 0;
131 	timeout = jiffies + HZ;
132 	while (((status = ctx->ops->status_read(ctx)) & status_loading) ==
133 				status_loading) {
134 		if (time_after(jiffies, timeout)) {
135 			printk(KERN_ERR "%s: timeout waiting for loader\n",
136 					__func__);
137 			ret = -EIO;
138 			goto out_drop_priv;
139 		}
140 		cond_resched();
141 	}
142 
143 	if (!(status & SPU_STATUS_RUNNING)) {
144 		/* If isolated LOAD has failed: run SPU, we will get a stop-and
145 		 * signal later. */
146 		pr_debug("%s: isolated LOAD failed\n", __func__);
147 		ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
148 		ret = -EACCES;
149 		goto out_drop_priv;
150 	}
151 
152 	if (!(status & SPU_STATUS_ISOLATED_STATE)) {
153 		/* This isn't allowed by the CBEA, but check anyway */
154 		pr_debug("%s: SPU fell out of isolated mode?\n", __func__);
155 		ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_STOP);
156 		ret = -EINVAL;
157 		goto out_drop_priv;
158 	}
159 
160 out_drop_priv:
161 	/* Finished accessing the loader. Drop kernel mode */
162 	sr1 |= MFC_STATE1_PROBLEM_STATE_MASK;
163 	spu_mfc_sr1_set(ctx->spu, sr1);
164 
165 out:
166 	return ret;
167 }
168 
169 static int spu_run_init(struct spu_context *ctx, u32 *npc)
170 {
171 	unsigned long runcntl = SPU_RUNCNTL_RUNNABLE;
172 	int ret;
173 
174 	spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
175 
176 	/*
177 	 * NOSCHED is synchronous scheduling with respect to the caller.
178 	 * The caller waits for the context to be loaded.
179 	 */
180 	if (ctx->flags & SPU_CREATE_NOSCHED) {
181 		if (ctx->state == SPU_STATE_SAVED) {
182 			ret = spu_activate(ctx, 0);
183 			if (ret)
184 				return ret;
185 		}
186 	}
187 
188 	/*
189 	 * Apply special setup as required.
190 	 */
191 	if (ctx->flags & SPU_CREATE_ISOLATE) {
192 		if (!(ctx->ops->status_read(ctx) & SPU_STATUS_ISOLATED_STATE)) {
193 			ret = spu_setup_isolated(ctx);
194 			if (ret)
195 				return ret;
196 		}
197 
198 		/*
199 		 * If userspace has set the runcntrl register (eg, to
200 		 * issue an isolated exit), we need to re-set it here
201 		 */
202 		runcntl = ctx->ops->runcntl_read(ctx) &
203 			(SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
204 		if (runcntl == 0)
205 			runcntl = SPU_RUNCNTL_RUNNABLE;
206 	}
207 
208 	if (ctx->flags & SPU_CREATE_NOSCHED) {
209 		spuctx_switch_state(ctx, SPU_UTIL_USER);
210 		ctx->ops->runcntl_write(ctx, runcntl);
211 	} else {
212 		unsigned long privcntl;
213 
214 		if (test_thread_flag(TIF_SINGLESTEP))
215 			privcntl = SPU_PRIVCNTL_MODE_SINGLE_STEP;
216 		else
217 			privcntl = SPU_PRIVCNTL_MODE_NORMAL;
218 
219 		ctx->ops->npc_write(ctx, *npc);
220 		ctx->ops->privcntl_write(ctx, privcntl);
221 		ctx->ops->runcntl_write(ctx, runcntl);
222 
223 		if (ctx->state == SPU_STATE_SAVED) {
224 			ret = spu_activate(ctx, 0);
225 			if (ret)
226 				return ret;
227 		} else {
228 			spuctx_switch_state(ctx, SPU_UTIL_USER);
229 		}
230 	}
231 
232 	set_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags);
233 	return 0;
234 }
235 
236 static int spu_run_fini(struct spu_context *ctx, u32 *npc,
237 			       u32 *status)
238 {
239 	int ret = 0;
240 
241 	spu_del_from_rq(ctx);
242 
243 	*status = ctx->ops->status_read(ctx);
244 	*npc = ctx->ops->npc_read(ctx);
245 
246 	spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
247 	clear_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags);
248 	spu_release(ctx);
249 
250 	if (signal_pending(current))
251 		ret = -ERESTARTSYS;
252 
253 	return ret;
254 }
255 
256 /*
257  * SPU syscall restarting is tricky because we violate the basic
258  * assumption that the signal handler is running on the interrupted
259  * thread. Here instead, the handler runs on PowerPC user space code,
260  * while the syscall was called from the SPU.
261  * This means we can only do a very rough approximation of POSIX
262  * signal semantics.
263  */
264 static int spu_handle_restartsys(struct spu_context *ctx, long *spu_ret,
265 			  unsigned int *npc)
266 {
267 	int ret;
268 
269 	switch (*spu_ret) {
270 	case -ERESTARTSYS:
271 	case -ERESTARTNOINTR:
272 		/*
273 		 * Enter the regular syscall restarting for
274 		 * sys_spu_run, then restart the SPU syscall
275 		 * callback.
276 		 */
277 		*npc -= 8;
278 		ret = -ERESTARTSYS;
279 		break;
280 	case -ERESTARTNOHAND:
281 	case -ERESTART_RESTARTBLOCK:
282 		/*
283 		 * Restart block is too hard for now, just return -EINTR
284 		 * to the SPU.
285 		 * ERESTARTNOHAND comes from sys_pause, we also return
286 		 * -EINTR from there.
287 		 * Assume that we need to be restarted ourselves though.
288 		 */
289 		*spu_ret = -EINTR;
290 		ret = -ERESTARTSYS;
291 		break;
292 	default:
293 		printk(KERN_WARNING "%s: unexpected return code %ld\n",
294 			__func__, *spu_ret);
295 		ret = 0;
296 	}
297 	return ret;
298 }
299 
300 static int spu_process_callback(struct spu_context *ctx)
301 {
302 	struct spu_syscall_block s;
303 	u32 ls_pointer, npc;
304 	void __iomem *ls;
305 	long spu_ret;
306 	int ret;
307 
308 	/* get syscall block from local store */
309 	npc = ctx->ops->npc_read(ctx) & ~3;
310 	ls = (void __iomem *)ctx->ops->get_ls(ctx);
311 	ls_pointer = in_be32(ls + npc);
312 	if (ls_pointer > (LS_SIZE - sizeof(s)))
313 		return -EFAULT;
314 	memcpy_fromio(&s, ls + ls_pointer, sizeof(s));
315 
316 	/* do actual syscall without pinning the spu */
317 	ret = 0;
318 	spu_ret = -ENOSYS;
319 	npc += 4;
320 
321 	if (s.nr_ret < __NR_syscalls) {
322 		spu_release(ctx);
323 		/* do actual system call from here */
324 		spu_ret = spu_sys_callback(&s);
325 		if (spu_ret <= -ERESTARTSYS) {
326 			ret = spu_handle_restartsys(ctx, &spu_ret, &npc);
327 		}
328 		mutex_lock(&ctx->state_mutex);
329 		if (ret == -ERESTARTSYS)
330 			return ret;
331 	}
332 
333 	/* need to re-get the ls, as it may have changed when we released the
334 	 * spu */
335 	ls = (void __iomem *)ctx->ops->get_ls(ctx);
336 
337 	/* write result, jump over indirect pointer */
338 	memcpy_toio(ls + ls_pointer, &spu_ret, sizeof(spu_ret));
339 	ctx->ops->npc_write(ctx, npc);
340 	ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
341 	return ret;
342 }
343 
344 long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
345 {
346 	int ret;
347 	struct spu *spu;
348 	u32 status;
349 
350 	if (mutex_lock_interruptible(&ctx->run_mutex))
351 		return -ERESTARTSYS;
352 
353 	ctx->event_return = 0;
354 
355 	ret = spu_acquire(ctx);
356 	if (ret)
357 		goto out_unlock;
358 
359 	spu_enable_spu(ctx);
360 
361 	spu_update_sched_info(ctx);
362 
363 	ret = spu_run_init(ctx, npc);
364 	if (ret) {
365 		spu_release(ctx);
366 		goto out;
367 	}
368 
369 	do {
370 		ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, &status));
371 		if (unlikely(ret)) {
372 			/*
373 			 * This is nasty: we need the state_mutex for all the
374 			 * bookkeeping even if the syscall was interrupted by
375 			 * a signal. ewww.
376 			 */
377 			mutex_lock(&ctx->state_mutex);
378 			break;
379 		}
380 		spu = ctx->spu;
381 		if (unlikely(test_and_clear_bit(SPU_SCHED_NOTIFY_ACTIVE,
382 						&ctx->sched_flags))) {
383 			if (!(status & SPU_STATUS_STOPPED_BY_STOP)) {
384 				spu_switch_notify(spu, ctx);
385 				continue;
386 			}
387 		}
388 
389 		spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
390 
391 		if ((status & SPU_STATUS_STOPPED_BY_STOP) &&
392 		    (status >> SPU_STOP_STATUS_SHIFT == 0x2104)) {
393 			ret = spu_process_callback(ctx);
394 			if (ret)
395 				break;
396 			status &= ~SPU_STATUS_STOPPED_BY_STOP;
397 		}
398 		ret = spufs_handle_class1(ctx);
399 		if (ret)
400 			break;
401 
402 		ret = spufs_handle_class0(ctx);
403 		if (ret)
404 			break;
405 
406 		if (signal_pending(current))
407 			ret = -ERESTARTSYS;
408 	} while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP |
409 				      SPU_STATUS_STOPPED_BY_HALT |
410 				       SPU_STATUS_SINGLE_STEP)));
411 
412 	spu_disable_spu(ctx);
413 	ret = spu_run_fini(ctx, npc, &status);
414 	spu_yield(ctx);
415 
416 	spu_switch_log_notify(NULL, ctx, SWITCH_LOG_EXIT, status);
417 
418 	if ((status & SPU_STATUS_STOPPED_BY_STOP) &&
419 	    (((status >> SPU_STOP_STATUS_SHIFT) & 0x3f00) == 0x2100))
420 		ctx->stats.libassist++;
421 
422 	if ((ret == 0) ||
423 	    ((ret == -ERESTARTSYS) &&
424 	     ((status & SPU_STATUS_STOPPED_BY_HALT) ||
425 	      (status & SPU_STATUS_SINGLE_STEP) ||
426 	      ((status & SPU_STATUS_STOPPED_BY_STOP) &&
427 	       (status >> SPU_STOP_STATUS_SHIFT != 0x2104)))))
428 		ret = status;
429 
430 	/* Note: we don't need to force_sig SIGTRAP on single-step
431 	 * since we have TIF_SINGLESTEP set, thus the kernel will do
432 	 * it upon return from the syscall anyawy
433 	 */
434 	if (unlikely(status & SPU_STATUS_SINGLE_STEP))
435 		ret = -ERESTARTSYS;
436 
437 	else if (unlikely((status & SPU_STATUS_STOPPED_BY_STOP)
438 	    && (status >> SPU_STOP_STATUS_SHIFT) == 0x3fff)) {
439 		force_sig(SIGTRAP, current);
440 		ret = -ERESTARTSYS;
441 	}
442 
443 out:
444 	*event = ctx->event_return;
445 out_unlock:
446 	mutex_unlock(&ctx->run_mutex);
447 	return ret;
448 }
449