1 /* backing_ops.c - query/set operations on saved SPU context.
2  *
3  * Copyright (C) IBM 2005
4  * Author: Mark Nutter <mnutter@us.ibm.com>
5  *
6  * These register operations allow SPUFS to operate on saved
7  * SPU contexts rather than hardware.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2, or (at your option)
12  * any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22  */
23 
24 #include <linux/module.h>
25 #include <linux/errno.h>
26 #include <linux/sched.h>
27 #include <linux/kernel.h>
28 #include <linux/mm.h>
29 #include <linux/vmalloc.h>
30 #include <linux/smp.h>
31 #include <linux/stddef.h>
32 #include <linux/unistd.h>
33 #include <linux/poll.h>
34 
35 #include <asm/io.h>
36 #include <asm/spu.h>
37 #include <asm/spu_csa.h>
38 #include <asm/spu_info.h>
39 #include <asm/mmu_context.h>
40 #include "spufs.h"
41 
42 /*
43  * Reads/writes to various problem and priv2 registers require
44  * state changes, i.e.  generate SPU events, modify channel
45  * counts, etc.
46  */
47 
48 static void gen_spu_event(struct spu_context *ctx, u32 event)
49 {
50 	u64 ch0_cnt;
51 	u64 ch0_data;
52 	u64 ch1_data;
53 
54 	ch0_cnt = ctx->csa.spu_chnlcnt_RW[0];
55 	ch0_data = ctx->csa.spu_chnldata_RW[0];
56 	ch1_data = ctx->csa.spu_chnldata_RW[1];
57 	ctx->csa.spu_chnldata_RW[0] |= event;
58 	if ((ch0_cnt == 0) && !(ch0_data & event) && (ch1_data & event)) {
59 		ctx->csa.spu_chnlcnt_RW[0] = 1;
60 	}
61 }
62 
63 static int spu_backing_mbox_read(struct spu_context *ctx, u32 * data)
64 {
65 	u32 mbox_stat;
66 	int ret = 0;
67 
68 	spin_lock(&ctx->csa.register_lock);
69 	mbox_stat = ctx->csa.prob.mb_stat_R;
70 	if (mbox_stat & 0x0000ff) {
71 		/* Read the first available word.
72 		 * Implementation note: the depth
73 		 * of pu_mb_R is currently 1.
74 		 */
75 		*data = ctx->csa.prob.pu_mb_R;
76 		ctx->csa.prob.mb_stat_R &= ~(0x0000ff);
77 		ctx->csa.spu_chnlcnt_RW[28] = 1;
78 		gen_spu_event(ctx, MFC_PU_MAILBOX_AVAILABLE_EVENT);
79 		ret = 4;
80 	}
81 	spin_unlock(&ctx->csa.register_lock);
82 	return ret;
83 }
84 
85 static u32 spu_backing_mbox_stat_read(struct spu_context *ctx)
86 {
87 	return ctx->csa.prob.mb_stat_R;
88 }
89 
90 static unsigned int spu_backing_mbox_stat_poll(struct spu_context *ctx,
91 					  unsigned int events)
92 {
93 	int ret;
94 	u32 stat;
95 
96 	ret = 0;
97 	spin_lock_irq(&ctx->csa.register_lock);
98 	stat = ctx->csa.prob.mb_stat_R;
99 
100 	/* if the requested event is there, return the poll
101 	   mask, otherwise enable the interrupt to get notified,
102 	   but first mark any pending interrupts as done so
103 	   we don't get woken up unnecessarily */
104 
105 	if (events & (POLLIN | POLLRDNORM)) {
106 		if (stat & 0xff0000)
107 			ret |= POLLIN | POLLRDNORM;
108 		else {
109 			ctx->csa.priv1.int_stat_class2_RW &=
110 				~CLASS2_MAILBOX_INTR;
111 			ctx->csa.priv1.int_mask_class2_RW |=
112 				CLASS2_ENABLE_MAILBOX_INTR;
113 		}
114 	}
115 	if (events & (POLLOUT | POLLWRNORM)) {
116 		if (stat & 0x00ff00)
117 			ret = POLLOUT | POLLWRNORM;
118 		else {
119 			ctx->csa.priv1.int_stat_class2_RW &=
120 				~CLASS2_MAILBOX_THRESHOLD_INTR;
121 			ctx->csa.priv1.int_mask_class2_RW |=
122 				CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR;
123 		}
124 	}
125 	spin_unlock_irq(&ctx->csa.register_lock);
126 	return ret;
127 }
128 
129 static int spu_backing_ibox_read(struct spu_context *ctx, u32 * data)
130 {
131 	int ret;
132 
133 	spin_lock(&ctx->csa.register_lock);
134 	if (ctx->csa.prob.mb_stat_R & 0xff0000) {
135 		/* Read the first available word.
136 		 * Implementation note: the depth
137 		 * of puint_mb_R is currently 1.
138 		 */
139 		*data = ctx->csa.priv2.puint_mb_R;
140 		ctx->csa.prob.mb_stat_R &= ~(0xff0000);
141 		ctx->csa.spu_chnlcnt_RW[30] = 1;
142 		gen_spu_event(ctx, MFC_PU_INT_MAILBOX_AVAILABLE_EVENT);
143 		ret = 4;
144 	} else {
145 		/* make sure we get woken up by the interrupt */
146 		ctx->csa.priv1.int_mask_class2_RW |= CLASS2_ENABLE_MAILBOX_INTR;
147 		ret = 0;
148 	}
149 	spin_unlock(&ctx->csa.register_lock);
150 	return ret;
151 }
152 
153 static int spu_backing_wbox_write(struct spu_context *ctx, u32 data)
154 {
155 	int ret;
156 
157 	spin_lock(&ctx->csa.register_lock);
158 	if ((ctx->csa.prob.mb_stat_R) & 0x00ff00) {
159 		int slot = ctx->csa.spu_chnlcnt_RW[29];
160 		int avail = (ctx->csa.prob.mb_stat_R & 0x00ff00) >> 8;
161 
162 		/* We have space to write wbox_data.
163 		 * Implementation note: the depth
164 		 * of spu_mb_W is currently 4.
165 		 */
166 		BUG_ON(avail != (4 - slot));
167 		ctx->csa.spu_mailbox_data[slot] = data;
168 		ctx->csa.spu_chnlcnt_RW[29] = ++slot;
169 		ctx->csa.prob.mb_stat_R &= ~(0x00ff00);
170 		ctx->csa.prob.mb_stat_R |= (((4 - slot) & 0xff) << 8);
171 		gen_spu_event(ctx, MFC_SPU_MAILBOX_WRITTEN_EVENT);
172 		ret = 4;
173 	} else {
174 		/* make sure we get woken up by the interrupt when space
175 		   becomes available */
176 		ctx->csa.priv1.int_mask_class2_RW |=
177 			CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR;
178 		ret = 0;
179 	}
180 	spin_unlock(&ctx->csa.register_lock);
181 	return ret;
182 }
183 
184 static u32 spu_backing_signal1_read(struct spu_context *ctx)
185 {
186 	return ctx->csa.spu_chnldata_RW[3];
187 }
188 
189 static void spu_backing_signal1_write(struct spu_context *ctx, u32 data)
190 {
191 	spin_lock(&ctx->csa.register_lock);
192 	if (ctx->csa.priv2.spu_cfg_RW & 0x1)
193 		ctx->csa.spu_chnldata_RW[3] |= data;
194 	else
195 		ctx->csa.spu_chnldata_RW[3] = data;
196 	ctx->csa.spu_chnlcnt_RW[3] = 1;
197 	gen_spu_event(ctx, MFC_SIGNAL_1_EVENT);
198 	spin_unlock(&ctx->csa.register_lock);
199 }
200 
201 static u32 spu_backing_signal2_read(struct spu_context *ctx)
202 {
203 	return ctx->csa.spu_chnldata_RW[4];
204 }
205 
206 static void spu_backing_signal2_write(struct spu_context *ctx, u32 data)
207 {
208 	spin_lock(&ctx->csa.register_lock);
209 	if (ctx->csa.priv2.spu_cfg_RW & 0x2)
210 		ctx->csa.spu_chnldata_RW[4] |= data;
211 	else
212 		ctx->csa.spu_chnldata_RW[4] = data;
213 	ctx->csa.spu_chnlcnt_RW[4] = 1;
214 	gen_spu_event(ctx, MFC_SIGNAL_2_EVENT);
215 	spin_unlock(&ctx->csa.register_lock);
216 }
217 
218 static void spu_backing_signal1_type_set(struct spu_context *ctx, u64 val)
219 {
220 	u64 tmp;
221 
222 	spin_lock(&ctx->csa.register_lock);
223 	tmp = ctx->csa.priv2.spu_cfg_RW;
224 	if (val)
225 		tmp |= 1;
226 	else
227 		tmp &= ~1;
228 	ctx->csa.priv2.spu_cfg_RW = tmp;
229 	spin_unlock(&ctx->csa.register_lock);
230 }
231 
232 static u64 spu_backing_signal1_type_get(struct spu_context *ctx)
233 {
234 	return ((ctx->csa.priv2.spu_cfg_RW & 1) != 0);
235 }
236 
237 static void spu_backing_signal2_type_set(struct spu_context *ctx, u64 val)
238 {
239 	u64 tmp;
240 
241 	spin_lock(&ctx->csa.register_lock);
242 	tmp = ctx->csa.priv2.spu_cfg_RW;
243 	if (val)
244 		tmp |= 2;
245 	else
246 		tmp &= ~2;
247 	ctx->csa.priv2.spu_cfg_RW = tmp;
248 	spin_unlock(&ctx->csa.register_lock);
249 }
250 
251 static u64 spu_backing_signal2_type_get(struct spu_context *ctx)
252 {
253 	return ((ctx->csa.priv2.spu_cfg_RW & 2) != 0);
254 }
255 
256 static u32 spu_backing_npc_read(struct spu_context *ctx)
257 {
258 	return ctx->csa.prob.spu_npc_RW;
259 }
260 
261 static void spu_backing_npc_write(struct spu_context *ctx, u32 val)
262 {
263 	ctx->csa.prob.spu_npc_RW = val;
264 }
265 
266 static u32 spu_backing_status_read(struct spu_context *ctx)
267 {
268 	return ctx->csa.prob.spu_status_R;
269 }
270 
271 static char *spu_backing_get_ls(struct spu_context *ctx)
272 {
273 	return ctx->csa.lscsa->ls;
274 }
275 
276 static void spu_backing_privcntl_write(struct spu_context *ctx, u64 val)
277 {
278 	ctx->csa.priv2.spu_privcntl_RW = val;
279 }
280 
281 static u32 spu_backing_runcntl_read(struct spu_context *ctx)
282 {
283 	return ctx->csa.prob.spu_runcntl_RW;
284 }
285 
286 static void spu_backing_runcntl_write(struct spu_context *ctx, u32 val)
287 {
288 	spin_lock(&ctx->csa.register_lock);
289 	ctx->csa.prob.spu_runcntl_RW = val;
290 	if (val & SPU_RUNCNTL_RUNNABLE) {
291 		ctx->csa.prob.spu_status_R |= SPU_STATUS_RUNNING;
292 	} else {
293 		ctx->csa.prob.spu_status_R &= ~SPU_STATUS_RUNNING;
294 	}
295 	spin_unlock(&ctx->csa.register_lock);
296 }
297 
298 static void spu_backing_runcntl_stop(struct spu_context *ctx)
299 {
300 	spu_backing_runcntl_write(ctx, SPU_RUNCNTL_STOP);
301 }
302 
303 static void spu_backing_master_start(struct spu_context *ctx)
304 {
305 	struct spu_state *csa = &ctx->csa;
306 	u64 sr1;
307 
308 	spin_lock(&csa->register_lock);
309 	sr1 = csa->priv1.mfc_sr1_RW | MFC_STATE1_MASTER_RUN_CONTROL_MASK;
310 	csa->priv1.mfc_sr1_RW = sr1;
311 	spin_unlock(&csa->register_lock);
312 }
313 
314 static void spu_backing_master_stop(struct spu_context *ctx)
315 {
316 	struct spu_state *csa = &ctx->csa;
317 	u64 sr1;
318 
319 	spin_lock(&csa->register_lock);
320 	sr1 = csa->priv1.mfc_sr1_RW & ~MFC_STATE1_MASTER_RUN_CONTROL_MASK;
321 	csa->priv1.mfc_sr1_RW = sr1;
322 	spin_unlock(&csa->register_lock);
323 }
324 
325 static int spu_backing_set_mfc_query(struct spu_context * ctx, u32 mask,
326 					u32 mode)
327 {
328 	struct spu_problem_collapsed *prob = &ctx->csa.prob;
329 	int ret;
330 
331 	spin_lock(&ctx->csa.register_lock);
332 	ret = -EAGAIN;
333 	if (prob->dma_querytype_RW)
334 		goto out;
335 	ret = 0;
336 	/* FIXME: what are the side-effects of this? */
337 	prob->dma_querymask_RW = mask;
338 	prob->dma_querytype_RW = mode;
339 	/* In the current implementation, the SPU context is always
340 	 * acquired in runnable state when new bits are added to the
341 	 * mask (tagwait), so it's sufficient just to mask
342 	 * dma_tagstatus_R with the 'mask' parameter here.
343 	 */
344 	ctx->csa.prob.dma_tagstatus_R &= mask;
345 out:
346 	spin_unlock(&ctx->csa.register_lock);
347 
348 	return ret;
349 }
350 
351 static u32 spu_backing_read_mfc_tagstatus(struct spu_context * ctx)
352 {
353 	return ctx->csa.prob.dma_tagstatus_R;
354 }
355 
356 static u32 spu_backing_get_mfc_free_elements(struct spu_context *ctx)
357 {
358 	return ctx->csa.prob.dma_qstatus_R;
359 }
360 
361 static int spu_backing_send_mfc_command(struct spu_context *ctx,
362 					struct mfc_dma_command *cmd)
363 {
364 	int ret;
365 
366 	spin_lock(&ctx->csa.register_lock);
367 	ret = -EAGAIN;
368 	/* FIXME: set up priv2->puq */
369 	spin_unlock(&ctx->csa.register_lock);
370 
371 	return ret;
372 }
373 
374 static void spu_backing_restart_dma(struct spu_context *ctx)
375 {
376 	/* nothing to do here */
377 }
378 
379 struct spu_context_ops spu_backing_ops = {
380 	.mbox_read = spu_backing_mbox_read,
381 	.mbox_stat_read = spu_backing_mbox_stat_read,
382 	.mbox_stat_poll = spu_backing_mbox_stat_poll,
383 	.ibox_read = spu_backing_ibox_read,
384 	.wbox_write = spu_backing_wbox_write,
385 	.signal1_read = spu_backing_signal1_read,
386 	.signal1_write = spu_backing_signal1_write,
387 	.signal2_read = spu_backing_signal2_read,
388 	.signal2_write = spu_backing_signal2_write,
389 	.signal1_type_set = spu_backing_signal1_type_set,
390 	.signal1_type_get = spu_backing_signal1_type_get,
391 	.signal2_type_set = spu_backing_signal2_type_set,
392 	.signal2_type_get = spu_backing_signal2_type_get,
393 	.npc_read = spu_backing_npc_read,
394 	.npc_write = spu_backing_npc_write,
395 	.status_read = spu_backing_status_read,
396 	.get_ls = spu_backing_get_ls,
397 	.privcntl_write = spu_backing_privcntl_write,
398 	.runcntl_read = spu_backing_runcntl_read,
399 	.runcntl_write = spu_backing_runcntl_write,
400 	.runcntl_stop = spu_backing_runcntl_stop,
401 	.master_start = spu_backing_master_start,
402 	.master_stop = spu_backing_master_stop,
403 	.set_mfc_query = spu_backing_set_mfc_query,
404 	.read_mfc_tagstatus = spu_backing_read_mfc_tagstatus,
405 	.get_mfc_free_elements = spu_backing_get_mfc_free_elements,
406 	.send_mfc_command = spu_backing_send_mfc_command,
407 	.restart_dma = spu_backing_restart_dma,
408 };
409