1 /* backing_ops.c - query/set operations on saved SPU context.
2  *
3  * Copyright (C) IBM 2005
4  * Author: Mark Nutter <mnutter@us.ibm.com>
5  *
6  * These register operations allow SPUFS to operate on saved
7  * SPU contexts rather than hardware.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2, or (at your option)
12  * any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22  */
23 
24 #include <linux/module.h>
25 #include <linux/errno.h>
26 #include <linux/sched.h>
27 #include <linux/kernel.h>
28 #include <linux/mm.h>
29 #include <linux/vmalloc.h>
30 #include <linux/smp.h>
31 #include <linux/stddef.h>
32 #include <linux/unistd.h>
33 #include <linux/poll.h>
34 
35 #include <asm/io.h>
36 #include <asm/spu.h>
37 #include <asm/spu_csa.h>
38 #include <asm/spu_info.h>
39 #include <asm/mmu_context.h>
40 #include "spufs.h"
41 
42 /*
43  * Reads/writes to various problem and priv2 registers require
44  * state changes, i.e.  generate SPU events, modify channel
45  * counts, etc.
46  */
47 
48 static void gen_spu_event(struct spu_context *ctx, u32 event)
49 {
50 	u64 ch0_cnt;
51 	u64 ch0_data;
52 	u64 ch1_data;
53 
54 	ch0_cnt = ctx->csa.spu_chnlcnt_RW[0];
55 	ch0_data = ctx->csa.spu_chnldata_RW[0];
56 	ch1_data = ctx->csa.spu_chnldata_RW[1];
57 	ctx->csa.spu_chnldata_RW[0] |= event;
58 	if ((ch0_cnt == 0) && !(ch0_data & event) && (ch1_data & event)) {
59 		ctx->csa.spu_chnlcnt_RW[0] = 1;
60 	}
61 }
62 
63 static int spu_backing_mbox_read(struct spu_context *ctx, u32 * data)
64 {
65 	u32 mbox_stat;
66 	int ret = 0;
67 
68 	spin_lock(&ctx->csa.register_lock);
69 	mbox_stat = ctx->csa.prob.mb_stat_R;
70 	if (mbox_stat & 0x0000ff) {
71 		/* Read the first available word.
72 		 * Implementation note: the depth
73 		 * of pu_mb_R is currently 1.
74 		 */
75 		*data = ctx->csa.prob.pu_mb_R;
76 		ctx->csa.prob.mb_stat_R &= ~(0x0000ff);
77 		ctx->csa.spu_chnlcnt_RW[28] = 1;
78 		gen_spu_event(ctx, MFC_PU_MAILBOX_AVAILABLE_EVENT);
79 		ret = 4;
80 	}
81 	spin_unlock(&ctx->csa.register_lock);
82 	return ret;
83 }
84 
85 static u32 spu_backing_mbox_stat_read(struct spu_context *ctx)
86 {
87 	return ctx->csa.prob.mb_stat_R;
88 }
89 
90 static unsigned int spu_backing_mbox_stat_poll(struct spu_context *ctx,
91 					  unsigned int events)
92 {
93 	int ret;
94 	u32 stat;
95 
96 	ret = 0;
97 	spin_lock_irq(&ctx->csa.register_lock);
98 	stat = ctx->csa.prob.mb_stat_R;
99 
100 	/* if the requested event is there, return the poll
101 	   mask, otherwise enable the interrupt to get notified,
102 	   but first mark any pending interrupts as done so
103 	   we don't get woken up unnecessarily */
104 
105 	if (events & (POLLIN | POLLRDNORM)) {
106 		if (stat & 0xff0000)
107 			ret |= POLLIN | POLLRDNORM;
108 		else {
109 			ctx->csa.priv1.int_stat_class0_RW &= ~0x1;
110 			ctx->csa.priv1.int_mask_class2_RW |= 0x1;
111 		}
112 	}
113 	if (events & (POLLOUT | POLLWRNORM)) {
114 		if (stat & 0x00ff00)
115 			ret = POLLOUT | POLLWRNORM;
116 		else {
117 			ctx->csa.priv1.int_stat_class0_RW &= ~0x10;
118 			ctx->csa.priv1.int_mask_class2_RW |= 0x10;
119 		}
120 	}
121 	spin_unlock_irq(&ctx->csa.register_lock);
122 	return ret;
123 }
124 
125 static int spu_backing_ibox_read(struct spu_context *ctx, u32 * data)
126 {
127 	int ret;
128 
129 	spin_lock(&ctx->csa.register_lock);
130 	if (ctx->csa.prob.mb_stat_R & 0xff0000) {
131 		/* Read the first available word.
132 		 * Implementation note: the depth
133 		 * of puint_mb_R is currently 1.
134 		 */
135 		*data = ctx->csa.priv2.puint_mb_R;
136 		ctx->csa.prob.mb_stat_R &= ~(0xff0000);
137 		ctx->csa.spu_chnlcnt_RW[30] = 1;
138 		gen_spu_event(ctx, MFC_PU_INT_MAILBOX_AVAILABLE_EVENT);
139 		ret = 4;
140 	} else {
141 		/* make sure we get woken up by the interrupt */
142 		ctx->csa.priv1.int_mask_class2_RW |= 0x1UL;
143 		ret = 0;
144 	}
145 	spin_unlock(&ctx->csa.register_lock);
146 	return ret;
147 }
148 
149 static int spu_backing_wbox_write(struct spu_context *ctx, u32 data)
150 {
151 	int ret;
152 
153 	spin_lock(&ctx->csa.register_lock);
154 	if ((ctx->csa.prob.mb_stat_R) & 0x00ff00) {
155 		int slot = ctx->csa.spu_chnlcnt_RW[29];
156 		int avail = (ctx->csa.prob.mb_stat_R & 0x00ff00) >> 8;
157 
158 		/* We have space to write wbox_data.
159 		 * Implementation note: the depth
160 		 * of spu_mb_W is currently 4.
161 		 */
162 		BUG_ON(avail != (4 - slot));
163 		ctx->csa.spu_mailbox_data[slot] = data;
164 		ctx->csa.spu_chnlcnt_RW[29] = ++slot;
165 		ctx->csa.prob.mb_stat_R &= ~(0x00ff00);
166 		ctx->csa.prob.mb_stat_R |= (((4 - slot) & 0xff) << 8);
167 		gen_spu_event(ctx, MFC_SPU_MAILBOX_WRITTEN_EVENT);
168 		ret = 4;
169 	} else {
170 		/* make sure we get woken up by the interrupt when space
171 		   becomes available */
172 		ctx->csa.priv1.int_mask_class2_RW |= 0x10;
173 		ret = 0;
174 	}
175 	spin_unlock(&ctx->csa.register_lock);
176 	return ret;
177 }
178 
179 static u32 spu_backing_signal1_read(struct spu_context *ctx)
180 {
181 	return ctx->csa.spu_chnldata_RW[3];
182 }
183 
184 static void spu_backing_signal1_write(struct spu_context *ctx, u32 data)
185 {
186 	spin_lock(&ctx->csa.register_lock);
187 	if (ctx->csa.priv2.spu_cfg_RW & 0x1)
188 		ctx->csa.spu_chnldata_RW[3] |= data;
189 	else
190 		ctx->csa.spu_chnldata_RW[3] = data;
191 	ctx->csa.spu_chnlcnt_RW[3] = 1;
192 	gen_spu_event(ctx, MFC_SIGNAL_1_EVENT);
193 	spin_unlock(&ctx->csa.register_lock);
194 }
195 
196 static u32 spu_backing_signal2_read(struct spu_context *ctx)
197 {
198 	return ctx->csa.spu_chnldata_RW[4];
199 }
200 
201 static void spu_backing_signal2_write(struct spu_context *ctx, u32 data)
202 {
203 	spin_lock(&ctx->csa.register_lock);
204 	if (ctx->csa.priv2.spu_cfg_RW & 0x2)
205 		ctx->csa.spu_chnldata_RW[4] |= data;
206 	else
207 		ctx->csa.spu_chnldata_RW[4] = data;
208 	ctx->csa.spu_chnlcnt_RW[4] = 1;
209 	gen_spu_event(ctx, MFC_SIGNAL_2_EVENT);
210 	spin_unlock(&ctx->csa.register_lock);
211 }
212 
213 static void spu_backing_signal1_type_set(struct spu_context *ctx, u64 val)
214 {
215 	u64 tmp;
216 
217 	spin_lock(&ctx->csa.register_lock);
218 	tmp = ctx->csa.priv2.spu_cfg_RW;
219 	if (val)
220 		tmp |= 1;
221 	else
222 		tmp &= ~1;
223 	ctx->csa.priv2.spu_cfg_RW = tmp;
224 	spin_unlock(&ctx->csa.register_lock);
225 }
226 
227 static u64 spu_backing_signal1_type_get(struct spu_context *ctx)
228 {
229 	return ((ctx->csa.priv2.spu_cfg_RW & 1) != 0);
230 }
231 
232 static void spu_backing_signal2_type_set(struct spu_context *ctx, u64 val)
233 {
234 	u64 tmp;
235 
236 	spin_lock(&ctx->csa.register_lock);
237 	tmp = ctx->csa.priv2.spu_cfg_RW;
238 	if (val)
239 		tmp |= 2;
240 	else
241 		tmp &= ~2;
242 	ctx->csa.priv2.spu_cfg_RW = tmp;
243 	spin_unlock(&ctx->csa.register_lock);
244 }
245 
246 static u64 spu_backing_signal2_type_get(struct spu_context *ctx)
247 {
248 	return ((ctx->csa.priv2.spu_cfg_RW & 2) != 0);
249 }
250 
251 static u32 spu_backing_npc_read(struct spu_context *ctx)
252 {
253 	return ctx->csa.prob.spu_npc_RW;
254 }
255 
256 static void spu_backing_npc_write(struct spu_context *ctx, u32 val)
257 {
258 	ctx->csa.prob.spu_npc_RW = val;
259 }
260 
261 static u32 spu_backing_status_read(struct spu_context *ctx)
262 {
263 	return ctx->csa.prob.spu_status_R;
264 }
265 
266 static char *spu_backing_get_ls(struct spu_context *ctx)
267 {
268 	return ctx->csa.lscsa->ls;
269 }
270 
271 static u32 spu_backing_runcntl_read(struct spu_context *ctx)
272 {
273 	return ctx->csa.prob.spu_runcntl_RW;
274 }
275 
276 static void spu_backing_runcntl_write(struct spu_context *ctx, u32 val)
277 {
278 	spin_lock(&ctx->csa.register_lock);
279 	ctx->csa.prob.spu_runcntl_RW = val;
280 	if (val & SPU_RUNCNTL_RUNNABLE) {
281 		ctx->csa.prob.spu_status_R |= SPU_STATUS_RUNNING;
282 	} else {
283 		ctx->csa.prob.spu_status_R &= ~SPU_STATUS_RUNNING;
284 	}
285 	spin_unlock(&ctx->csa.register_lock);
286 }
287 
288 static void spu_backing_runcntl_stop(struct spu_context *ctx)
289 {
290 	spu_backing_runcntl_write(ctx, SPU_RUNCNTL_STOP);
291 }
292 
293 static void spu_backing_master_start(struct spu_context *ctx)
294 {
295 	struct spu_state *csa = &ctx->csa;
296 	u64 sr1;
297 
298 	spin_lock(&csa->register_lock);
299 	sr1 = csa->priv1.mfc_sr1_RW | MFC_STATE1_MASTER_RUN_CONTROL_MASK;
300 	csa->priv1.mfc_sr1_RW = sr1;
301 	spin_unlock(&csa->register_lock);
302 }
303 
304 static void spu_backing_master_stop(struct spu_context *ctx)
305 {
306 	struct spu_state *csa = &ctx->csa;
307 	u64 sr1;
308 
309 	spin_lock(&csa->register_lock);
310 	sr1 = csa->priv1.mfc_sr1_RW & ~MFC_STATE1_MASTER_RUN_CONTROL_MASK;
311 	csa->priv1.mfc_sr1_RW = sr1;
312 	spin_unlock(&csa->register_lock);
313 }
314 
315 static int spu_backing_set_mfc_query(struct spu_context * ctx, u32 mask,
316 					u32 mode)
317 {
318 	struct spu_problem_collapsed *prob = &ctx->csa.prob;
319 	int ret;
320 
321 	spin_lock(&ctx->csa.register_lock);
322 	ret = -EAGAIN;
323 	if (prob->dma_querytype_RW)
324 		goto out;
325 	ret = 0;
326 	/* FIXME: what are the side-effects of this? */
327 	prob->dma_querymask_RW = mask;
328 	prob->dma_querytype_RW = mode;
329 	/* In the current implementation, the SPU context is always
330 	 * acquired in runnable state when new bits are added to the
331 	 * mask (tagwait), so it's sufficient just to mask
332 	 * dma_tagstatus_R with the 'mask' parameter here.
333 	 */
334 	ctx->csa.prob.dma_tagstatus_R &= mask;
335 out:
336 	spin_unlock(&ctx->csa.register_lock);
337 
338 	return ret;
339 }
340 
341 static u32 spu_backing_read_mfc_tagstatus(struct spu_context * ctx)
342 {
343 	return ctx->csa.prob.dma_tagstatus_R;
344 }
345 
346 static u32 spu_backing_get_mfc_free_elements(struct spu_context *ctx)
347 {
348 	return ctx->csa.prob.dma_qstatus_R;
349 }
350 
351 static int spu_backing_send_mfc_command(struct spu_context *ctx,
352 					struct mfc_dma_command *cmd)
353 {
354 	int ret;
355 
356 	spin_lock(&ctx->csa.register_lock);
357 	ret = -EAGAIN;
358 	/* FIXME: set up priv2->puq */
359 	spin_unlock(&ctx->csa.register_lock);
360 
361 	return ret;
362 }
363 
364 static void spu_backing_restart_dma(struct spu_context *ctx)
365 {
366 	/* nothing to do here */
367 }
368 
369 struct spu_context_ops spu_backing_ops = {
370 	.mbox_read = spu_backing_mbox_read,
371 	.mbox_stat_read = spu_backing_mbox_stat_read,
372 	.mbox_stat_poll = spu_backing_mbox_stat_poll,
373 	.ibox_read = spu_backing_ibox_read,
374 	.wbox_write = spu_backing_wbox_write,
375 	.signal1_read = spu_backing_signal1_read,
376 	.signal1_write = spu_backing_signal1_write,
377 	.signal2_read = spu_backing_signal2_read,
378 	.signal2_write = spu_backing_signal2_write,
379 	.signal1_type_set = spu_backing_signal1_type_set,
380 	.signal1_type_get = spu_backing_signal1_type_get,
381 	.signal2_type_set = spu_backing_signal2_type_set,
382 	.signal2_type_get = spu_backing_signal2_type_get,
383 	.npc_read = spu_backing_npc_read,
384 	.npc_write = spu_backing_npc_write,
385 	.status_read = spu_backing_status_read,
386 	.get_ls = spu_backing_get_ls,
387 	.runcntl_read = spu_backing_runcntl_read,
388 	.runcntl_write = spu_backing_runcntl_write,
389 	.runcntl_stop = spu_backing_runcntl_stop,
390 	.master_start = spu_backing_master_start,
391 	.master_stop = spu_backing_master_stop,
392 	.set_mfc_query = spu_backing_set_mfc_query,
393 	.read_mfc_tagstatus = spu_backing_read_mfc_tagstatus,
394 	.get_mfc_free_elements = spu_backing_get_mfc_free_elements,
395 	.send_mfc_command = spu_backing_send_mfc_command,
396 	.restart_dma = spu_backing_restart_dma,
397 };
398