1 /* backing_ops.c - query/set operations on saved SPU context.
2  *
3  * Copyright (C) IBM 2005
4  * Author: Mark Nutter <mnutter@us.ibm.com>
5  *
6  * These register operations allow SPUFS to operate on saved
7  * SPU contexts rather than hardware.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2, or (at your option)
12  * any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22  */
23 
24 #include <linux/module.h>
25 #include <linux/errno.h>
26 #include <linux/sched.h>
27 #include <linux/kernel.h>
28 #include <linux/mm.h>
29 #include <linux/vmalloc.h>
30 #include <linux/smp.h>
31 #include <linux/stddef.h>
32 #include <linux/unistd.h>
33 #include <linux/poll.h>
34 
35 #include <asm/io.h>
36 #include <asm/spu.h>
37 #include <asm/spu_csa.h>
38 #include <asm/spu_info.h>
39 #include <asm/mmu_context.h>
40 #include "spufs.h"
41 
42 /*
43  * Reads/writes to various problem and priv2 registers require
44  * state changes, i.e.  generate SPU events, modify channel
45  * counts, etc.
46  */
47 
48 static void gen_spu_event(struct spu_context *ctx, u32 event)
49 {
50 	u64 ch0_cnt;
51 	u64 ch0_data;
52 	u64 ch1_data;
53 
54 	ch0_cnt = ctx->csa.spu_chnlcnt_RW[0];
55 	ch0_data = ctx->csa.spu_chnldata_RW[0];
56 	ch1_data = ctx->csa.spu_chnldata_RW[1];
57 	ctx->csa.spu_chnldata_RW[0] |= event;
58 	if ((ch0_cnt == 0) && !(ch0_data & event) && (ch1_data & event)) {
59 		ctx->csa.spu_chnlcnt_RW[0] = 1;
60 	}
61 }
62 
63 static int spu_backing_mbox_read(struct spu_context *ctx, u32 * data)
64 {
65 	u32 mbox_stat;
66 	int ret = 0;
67 
68 	spin_lock(&ctx->csa.register_lock);
69 	mbox_stat = ctx->csa.prob.mb_stat_R;
70 	if (mbox_stat & 0x0000ff) {
71 		/* Read the first available word.
72 		 * Implementation note: the depth
73 		 * of pu_mb_R is currently 1.
74 		 */
75 		*data = ctx->csa.prob.pu_mb_R;
76 		ctx->csa.prob.mb_stat_R &= ~(0x0000ff);
77 		ctx->csa.spu_chnlcnt_RW[28] = 1;
78 		gen_spu_event(ctx, MFC_PU_MAILBOX_AVAILABLE_EVENT);
79 		ret = 4;
80 	}
81 	spin_unlock(&ctx->csa.register_lock);
82 	return ret;
83 }
84 
85 static u32 spu_backing_mbox_stat_read(struct spu_context *ctx)
86 {
87 	return ctx->csa.prob.mb_stat_R;
88 }
89 
90 static unsigned int spu_backing_mbox_stat_poll(struct spu_context *ctx,
91 					  unsigned int events)
92 {
93 	int ret;
94 	u32 stat;
95 
96 	ret = 0;
97 	spin_lock_irq(&ctx->csa.register_lock);
98 	stat = ctx->csa.prob.mb_stat_R;
99 
100 	/* if the requested event is there, return the poll
101 	   mask, otherwise enable the interrupt to get notified,
102 	   but first mark any pending interrupts as done so
103 	   we don't get woken up unnecessarily */
104 
105 	if (events & (POLLIN | POLLRDNORM)) {
106 		if (stat & 0xff0000)
107 			ret |= POLLIN | POLLRDNORM;
108 		else {
109 			ctx->csa.priv1.int_stat_class2_RW &=
110 				~CLASS2_MAILBOX_INTR;
111 			ctx->csa.priv1.int_mask_class2_RW |=
112 				CLASS2_ENABLE_MAILBOX_INTR;
113 		}
114 	}
115 	if (events & (POLLOUT | POLLWRNORM)) {
116 		if (stat & 0x00ff00)
117 			ret = POLLOUT | POLLWRNORM;
118 		else {
119 			ctx->csa.priv1.int_stat_class2_RW &=
120 				~CLASS2_MAILBOX_THRESHOLD_INTR;
121 			ctx->csa.priv1.int_mask_class2_RW |=
122 				CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR;
123 		}
124 	}
125 	spin_unlock_irq(&ctx->csa.register_lock);
126 	return ret;
127 }
128 
129 static int spu_backing_ibox_read(struct spu_context *ctx, u32 * data)
130 {
131 	int ret;
132 
133 	spin_lock(&ctx->csa.register_lock);
134 	if (ctx->csa.prob.mb_stat_R & 0xff0000) {
135 		/* Read the first available word.
136 		 * Implementation note: the depth
137 		 * of puint_mb_R is currently 1.
138 		 */
139 		*data = ctx->csa.priv2.puint_mb_R;
140 		ctx->csa.prob.mb_stat_R &= ~(0xff0000);
141 		ctx->csa.spu_chnlcnt_RW[30] = 1;
142 		gen_spu_event(ctx, MFC_PU_INT_MAILBOX_AVAILABLE_EVENT);
143 		ret = 4;
144 	} else {
145 		/* make sure we get woken up by the interrupt */
146 		ctx->csa.priv1.int_mask_class2_RW |= CLASS2_ENABLE_MAILBOX_INTR;
147 		ret = 0;
148 	}
149 	spin_unlock(&ctx->csa.register_lock);
150 	return ret;
151 }
152 
153 static int spu_backing_wbox_write(struct spu_context *ctx, u32 data)
154 {
155 	int ret;
156 
157 	spin_lock(&ctx->csa.register_lock);
158 	if ((ctx->csa.prob.mb_stat_R) & 0x00ff00) {
159 		int slot = ctx->csa.spu_chnlcnt_RW[29];
160 		int avail = (ctx->csa.prob.mb_stat_R & 0x00ff00) >> 8;
161 
162 		/* We have space to write wbox_data.
163 		 * Implementation note: the depth
164 		 * of spu_mb_W is currently 4.
165 		 */
166 		BUG_ON(avail != (4 - slot));
167 		ctx->csa.spu_mailbox_data[slot] = data;
168 		ctx->csa.spu_chnlcnt_RW[29] = ++slot;
169 		ctx->csa.prob.mb_stat_R &= ~(0x00ff00);
170 		ctx->csa.prob.mb_stat_R |= (((4 - slot) & 0xff) << 8);
171 		gen_spu_event(ctx, MFC_SPU_MAILBOX_WRITTEN_EVENT);
172 		ret = 4;
173 	} else {
174 		/* make sure we get woken up by the interrupt when space
175 		   becomes available */
176 		ctx->csa.priv1.int_mask_class2_RW |=
177 			CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR;
178 		ret = 0;
179 	}
180 	spin_unlock(&ctx->csa.register_lock);
181 	return ret;
182 }
183 
184 static u32 spu_backing_signal1_read(struct spu_context *ctx)
185 {
186 	return ctx->csa.spu_chnldata_RW[3];
187 }
188 
189 static void spu_backing_signal1_write(struct spu_context *ctx, u32 data)
190 {
191 	spin_lock(&ctx->csa.register_lock);
192 	if (ctx->csa.priv2.spu_cfg_RW & 0x1)
193 		ctx->csa.spu_chnldata_RW[3] |= data;
194 	else
195 		ctx->csa.spu_chnldata_RW[3] = data;
196 	ctx->csa.spu_chnlcnt_RW[3] = 1;
197 	gen_spu_event(ctx, MFC_SIGNAL_1_EVENT);
198 	spin_unlock(&ctx->csa.register_lock);
199 }
200 
201 static u32 spu_backing_signal2_read(struct spu_context *ctx)
202 {
203 	return ctx->csa.spu_chnldata_RW[4];
204 }
205 
206 static void spu_backing_signal2_write(struct spu_context *ctx, u32 data)
207 {
208 	spin_lock(&ctx->csa.register_lock);
209 	if (ctx->csa.priv2.spu_cfg_RW & 0x2)
210 		ctx->csa.spu_chnldata_RW[4] |= data;
211 	else
212 		ctx->csa.spu_chnldata_RW[4] = data;
213 	ctx->csa.spu_chnlcnt_RW[4] = 1;
214 	gen_spu_event(ctx, MFC_SIGNAL_2_EVENT);
215 	spin_unlock(&ctx->csa.register_lock);
216 }
217 
218 static void spu_backing_signal1_type_set(struct spu_context *ctx, u64 val)
219 {
220 	u64 tmp;
221 
222 	spin_lock(&ctx->csa.register_lock);
223 	tmp = ctx->csa.priv2.spu_cfg_RW;
224 	if (val)
225 		tmp |= 1;
226 	else
227 		tmp &= ~1;
228 	ctx->csa.priv2.spu_cfg_RW = tmp;
229 	spin_unlock(&ctx->csa.register_lock);
230 }
231 
232 static u64 spu_backing_signal1_type_get(struct spu_context *ctx)
233 {
234 	return ((ctx->csa.priv2.spu_cfg_RW & 1) != 0);
235 }
236 
237 static void spu_backing_signal2_type_set(struct spu_context *ctx, u64 val)
238 {
239 	u64 tmp;
240 
241 	spin_lock(&ctx->csa.register_lock);
242 	tmp = ctx->csa.priv2.spu_cfg_RW;
243 	if (val)
244 		tmp |= 2;
245 	else
246 		tmp &= ~2;
247 	ctx->csa.priv2.spu_cfg_RW = tmp;
248 	spin_unlock(&ctx->csa.register_lock);
249 }
250 
251 static u64 spu_backing_signal2_type_get(struct spu_context *ctx)
252 {
253 	return ((ctx->csa.priv2.spu_cfg_RW & 2) != 0);
254 }
255 
256 static u32 spu_backing_npc_read(struct spu_context *ctx)
257 {
258 	return ctx->csa.prob.spu_npc_RW;
259 }
260 
261 static void spu_backing_npc_write(struct spu_context *ctx, u32 val)
262 {
263 	ctx->csa.prob.spu_npc_RW = val;
264 }
265 
266 static u32 spu_backing_status_read(struct spu_context *ctx)
267 {
268 	return ctx->csa.prob.spu_status_R;
269 }
270 
271 static char *spu_backing_get_ls(struct spu_context *ctx)
272 {
273 	return ctx->csa.lscsa->ls;
274 }
275 
276 static void spu_backing_privcntl_write(struct spu_context *ctx, u64 val)
277 {
278 	ctx->csa.priv2.spu_privcntl_RW = val;
279 }
280 
281 static u32 spu_backing_runcntl_read(struct spu_context *ctx)
282 {
283 	return ctx->csa.prob.spu_runcntl_RW;
284 }
285 
286 static void spu_backing_runcntl_write(struct spu_context *ctx, u32 val)
287 {
288 	spin_lock(&ctx->csa.register_lock);
289 	ctx->csa.prob.spu_runcntl_RW = val;
290 	if (val & SPU_RUNCNTL_RUNNABLE) {
291 		ctx->csa.prob.spu_status_R &=
292 			~SPU_STATUS_STOPPED_BY_STOP &
293 			~SPU_STATUS_STOPPED_BY_HALT &
294 			~SPU_STATUS_SINGLE_STEP &
295 			~SPU_STATUS_INVALID_INSTR &
296 			~SPU_STATUS_INVALID_CH;
297 		ctx->csa.prob.spu_status_R |= SPU_STATUS_RUNNING;
298 	} else {
299 		ctx->csa.prob.spu_status_R &= ~SPU_STATUS_RUNNING;
300 	}
301 	spin_unlock(&ctx->csa.register_lock);
302 }
303 
304 static void spu_backing_runcntl_stop(struct spu_context *ctx)
305 {
306 	spu_backing_runcntl_write(ctx, SPU_RUNCNTL_STOP);
307 }
308 
309 static void spu_backing_master_start(struct spu_context *ctx)
310 {
311 	struct spu_state *csa = &ctx->csa;
312 	u64 sr1;
313 
314 	spin_lock(&csa->register_lock);
315 	sr1 = csa->priv1.mfc_sr1_RW | MFC_STATE1_MASTER_RUN_CONTROL_MASK;
316 	csa->priv1.mfc_sr1_RW = sr1;
317 	spin_unlock(&csa->register_lock);
318 }
319 
320 static void spu_backing_master_stop(struct spu_context *ctx)
321 {
322 	struct spu_state *csa = &ctx->csa;
323 	u64 sr1;
324 
325 	spin_lock(&csa->register_lock);
326 	sr1 = csa->priv1.mfc_sr1_RW & ~MFC_STATE1_MASTER_RUN_CONTROL_MASK;
327 	csa->priv1.mfc_sr1_RW = sr1;
328 	spin_unlock(&csa->register_lock);
329 }
330 
331 static int spu_backing_set_mfc_query(struct spu_context * ctx, u32 mask,
332 					u32 mode)
333 {
334 	struct spu_problem_collapsed *prob = &ctx->csa.prob;
335 	int ret;
336 
337 	spin_lock(&ctx->csa.register_lock);
338 	ret = -EAGAIN;
339 	if (prob->dma_querytype_RW)
340 		goto out;
341 	ret = 0;
342 	/* FIXME: what are the side-effects of this? */
343 	prob->dma_querymask_RW = mask;
344 	prob->dma_querytype_RW = mode;
345 	/* In the current implementation, the SPU context is always
346 	 * acquired in runnable state when new bits are added to the
347 	 * mask (tagwait), so it's sufficient just to mask
348 	 * dma_tagstatus_R with the 'mask' parameter here.
349 	 */
350 	ctx->csa.prob.dma_tagstatus_R &= mask;
351 out:
352 	spin_unlock(&ctx->csa.register_lock);
353 
354 	return ret;
355 }
356 
357 static u32 spu_backing_read_mfc_tagstatus(struct spu_context * ctx)
358 {
359 	return ctx->csa.prob.dma_tagstatus_R;
360 }
361 
362 static u32 spu_backing_get_mfc_free_elements(struct spu_context *ctx)
363 {
364 	return ctx->csa.prob.dma_qstatus_R;
365 }
366 
367 static int spu_backing_send_mfc_command(struct spu_context *ctx,
368 					struct mfc_dma_command *cmd)
369 {
370 	int ret;
371 
372 	spin_lock(&ctx->csa.register_lock);
373 	ret = -EAGAIN;
374 	/* FIXME: set up priv2->puq */
375 	spin_unlock(&ctx->csa.register_lock);
376 
377 	return ret;
378 }
379 
380 static void spu_backing_restart_dma(struct spu_context *ctx)
381 {
382 	ctx->csa.priv2.mfc_control_RW |= MFC_CNTL_RESTART_DMA_COMMAND;
383 }
384 
385 struct spu_context_ops spu_backing_ops = {
386 	.mbox_read = spu_backing_mbox_read,
387 	.mbox_stat_read = spu_backing_mbox_stat_read,
388 	.mbox_stat_poll = spu_backing_mbox_stat_poll,
389 	.ibox_read = spu_backing_ibox_read,
390 	.wbox_write = spu_backing_wbox_write,
391 	.signal1_read = spu_backing_signal1_read,
392 	.signal1_write = spu_backing_signal1_write,
393 	.signal2_read = spu_backing_signal2_read,
394 	.signal2_write = spu_backing_signal2_write,
395 	.signal1_type_set = spu_backing_signal1_type_set,
396 	.signal1_type_get = spu_backing_signal1_type_get,
397 	.signal2_type_set = spu_backing_signal2_type_set,
398 	.signal2_type_get = spu_backing_signal2_type_get,
399 	.npc_read = spu_backing_npc_read,
400 	.npc_write = spu_backing_npc_write,
401 	.status_read = spu_backing_status_read,
402 	.get_ls = spu_backing_get_ls,
403 	.privcntl_write = spu_backing_privcntl_write,
404 	.runcntl_read = spu_backing_runcntl_read,
405 	.runcntl_write = spu_backing_runcntl_write,
406 	.runcntl_stop = spu_backing_runcntl_stop,
407 	.master_start = spu_backing_master_start,
408 	.master_stop = spu_backing_master_stop,
409 	.set_mfc_query = spu_backing_set_mfc_query,
410 	.read_mfc_tagstatus = spu_backing_read_mfc_tagstatus,
411 	.get_mfc_free_elements = spu_backing_get_mfc_free_elements,
412 	.send_mfc_command = spu_backing_send_mfc_command,
413 	.restart_dma = spu_backing_restart_dma,
414 };
415