1 /* backing_ops.c - query/set operations on saved SPU context.
2  *
3  * Copyright (C) IBM 2005
4  * Author: Mark Nutter <mnutter@us.ibm.com>
5  *
6  * These register operations allow SPUFS to operate on saved
7  * SPU contexts rather than hardware.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2, or (at your option)
12  * any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22  */
23 
24 #include <linux/module.h>
25 #include <linux/errno.h>
26 #include <linux/sched.h>
27 #include <linux/kernel.h>
28 #include <linux/mm.h>
29 #include <linux/vmalloc.h>
30 #include <linux/smp.h>
31 #include <linux/smp_lock.h>
32 #include <linux/stddef.h>
33 #include <linux/unistd.h>
34 #include <linux/poll.h>
35 
36 #include <asm/io.h>
37 #include <asm/spu.h>
38 #include <asm/spu_csa.h>
39 #include <asm/spu_info.h>
40 #include <asm/mmu_context.h>
41 #include "spufs.h"
42 
43 /*
44  * Reads/writes to various problem and priv2 registers require
45  * state changes, i.e.  generate SPU events, modify channel
46  * counts, etc.
47  */
48 
49 static void gen_spu_event(struct spu_context *ctx, u32 event)
50 {
51 	u64 ch0_cnt;
52 	u64 ch0_data;
53 	u64 ch1_data;
54 
55 	ch0_cnt = ctx->csa.spu_chnlcnt_RW[0];
56 	ch0_data = ctx->csa.spu_chnldata_RW[0];
57 	ch1_data = ctx->csa.spu_chnldata_RW[1];
58 	ctx->csa.spu_chnldata_RW[0] |= event;
59 	if ((ch0_cnt == 0) && !(ch0_data & event) && (ch1_data & event)) {
60 		ctx->csa.spu_chnlcnt_RW[0] = 1;
61 	}
62 }
63 
64 static int spu_backing_mbox_read(struct spu_context *ctx, u32 * data)
65 {
66 	u32 mbox_stat;
67 	int ret = 0;
68 
69 	spin_lock(&ctx->csa.register_lock);
70 	mbox_stat = ctx->csa.prob.mb_stat_R;
71 	if (mbox_stat & 0x0000ff) {
72 		/* Read the first available word.
73 		 * Implementation note: the depth
74 		 * of pu_mb_R is currently 1.
75 		 */
76 		*data = ctx->csa.prob.pu_mb_R;
77 		ctx->csa.prob.mb_stat_R &= ~(0x0000ff);
78 		ctx->csa.spu_chnlcnt_RW[28] = 1;
79 		gen_spu_event(ctx, MFC_PU_MAILBOX_AVAILABLE_EVENT);
80 		ret = 4;
81 	}
82 	spin_unlock(&ctx->csa.register_lock);
83 	return ret;
84 }
85 
86 static u32 spu_backing_mbox_stat_read(struct spu_context *ctx)
87 {
88 	return ctx->csa.prob.mb_stat_R;
89 }
90 
91 static unsigned int spu_backing_mbox_stat_poll(struct spu_context *ctx,
92 					  unsigned int events)
93 {
94 	int ret;
95 	u32 stat;
96 
97 	ret = 0;
98 	spin_lock_irq(&ctx->csa.register_lock);
99 	stat = ctx->csa.prob.mb_stat_R;
100 
101 	/* if the requested event is there, return the poll
102 	   mask, otherwise enable the interrupt to get notified,
103 	   but first mark any pending interrupts as done so
104 	   we don't get woken up unnecessarily */
105 
106 	if (events & (POLLIN | POLLRDNORM)) {
107 		if (stat & 0xff0000)
108 			ret |= POLLIN | POLLRDNORM;
109 		else {
110 			ctx->csa.priv1.int_stat_class0_RW &= ~0x1;
111 			ctx->csa.priv1.int_mask_class2_RW |= 0x1;
112 		}
113 	}
114 	if (events & (POLLOUT | POLLWRNORM)) {
115 		if (stat & 0x00ff00)
116 			ret = POLLOUT | POLLWRNORM;
117 		else {
118 			ctx->csa.priv1.int_stat_class0_RW &= ~0x10;
119 			ctx->csa.priv1.int_mask_class2_RW |= 0x10;
120 		}
121 	}
122 	spin_unlock_irq(&ctx->csa.register_lock);
123 	return ret;
124 }
125 
126 static int spu_backing_ibox_read(struct spu_context *ctx, u32 * data)
127 {
128 	int ret;
129 
130 	spin_lock(&ctx->csa.register_lock);
131 	if (ctx->csa.prob.mb_stat_R & 0xff0000) {
132 		/* Read the first available word.
133 		 * Implementation note: the depth
134 		 * of puint_mb_R is currently 1.
135 		 */
136 		*data = ctx->csa.priv2.puint_mb_R;
137 		ctx->csa.prob.mb_stat_R &= ~(0xff0000);
138 		ctx->csa.spu_chnlcnt_RW[30] = 1;
139 		gen_spu_event(ctx, MFC_PU_INT_MAILBOX_AVAILABLE_EVENT);
140 		ret = 4;
141 	} else {
142 		/* make sure we get woken up by the interrupt */
143 		ctx->csa.priv1.int_mask_class2_RW |= 0x1UL;
144 		ret = 0;
145 	}
146 	spin_unlock(&ctx->csa.register_lock);
147 	return ret;
148 }
149 
150 static int spu_backing_wbox_write(struct spu_context *ctx, u32 data)
151 {
152 	int ret;
153 
154 	spin_lock(&ctx->csa.register_lock);
155 	if ((ctx->csa.prob.mb_stat_R) & 0x00ff00) {
156 		int slot = ctx->csa.spu_chnlcnt_RW[29];
157 		int avail = (ctx->csa.prob.mb_stat_R & 0x00ff00) >> 8;
158 
159 		/* We have space to write wbox_data.
160 		 * Implementation note: the depth
161 		 * of spu_mb_W is currently 4.
162 		 */
163 		BUG_ON(avail != (4 - slot));
164 		ctx->csa.spu_mailbox_data[slot] = data;
165 		ctx->csa.spu_chnlcnt_RW[29] = ++slot;
166 		ctx->csa.prob.mb_stat_R = (((4 - slot) & 0xff) << 8);
167 		gen_spu_event(ctx, MFC_SPU_MAILBOX_WRITTEN_EVENT);
168 		ret = 4;
169 	} else {
170 		/* make sure we get woken up by the interrupt when space
171 		   becomes available */
172 		ctx->csa.priv1.int_mask_class2_RW |= 0x10;
173 		ret = 0;
174 	}
175 	spin_unlock(&ctx->csa.register_lock);
176 	return ret;
177 }
178 
179 static u32 spu_backing_signal1_read(struct spu_context *ctx)
180 {
181 	return ctx->csa.spu_chnldata_RW[3];
182 }
183 
184 static void spu_backing_signal1_write(struct spu_context *ctx, u32 data)
185 {
186 	spin_lock(&ctx->csa.register_lock);
187 	if (ctx->csa.priv2.spu_cfg_RW & 0x1)
188 		ctx->csa.spu_chnldata_RW[3] |= data;
189 	else
190 		ctx->csa.spu_chnldata_RW[3] = data;
191 	ctx->csa.spu_chnlcnt_RW[3] = 1;
192 	gen_spu_event(ctx, MFC_SIGNAL_1_EVENT);
193 	spin_unlock(&ctx->csa.register_lock);
194 }
195 
196 static u32 spu_backing_signal2_read(struct spu_context *ctx)
197 {
198 	return ctx->csa.spu_chnldata_RW[4];
199 }
200 
201 static void spu_backing_signal2_write(struct spu_context *ctx, u32 data)
202 {
203 	spin_lock(&ctx->csa.register_lock);
204 	if (ctx->csa.priv2.spu_cfg_RW & 0x2)
205 		ctx->csa.spu_chnldata_RW[4] |= data;
206 	else
207 		ctx->csa.spu_chnldata_RW[4] = data;
208 	ctx->csa.spu_chnlcnt_RW[4] = 1;
209 	gen_spu_event(ctx, MFC_SIGNAL_2_EVENT);
210 	spin_unlock(&ctx->csa.register_lock);
211 }
212 
213 static void spu_backing_signal1_type_set(struct spu_context *ctx, u64 val)
214 {
215 	u64 tmp;
216 
217 	spin_lock(&ctx->csa.register_lock);
218 	tmp = ctx->csa.priv2.spu_cfg_RW;
219 	if (val)
220 		tmp |= 1;
221 	else
222 		tmp &= ~1;
223 	ctx->csa.priv2.spu_cfg_RW = tmp;
224 	spin_unlock(&ctx->csa.register_lock);
225 }
226 
227 static u64 spu_backing_signal1_type_get(struct spu_context *ctx)
228 {
229 	return ((ctx->csa.priv2.spu_cfg_RW & 1) != 0);
230 }
231 
232 static void spu_backing_signal2_type_set(struct spu_context *ctx, u64 val)
233 {
234 	u64 tmp;
235 
236 	spin_lock(&ctx->csa.register_lock);
237 	tmp = ctx->csa.priv2.spu_cfg_RW;
238 	if (val)
239 		tmp |= 2;
240 	else
241 		tmp &= ~2;
242 	ctx->csa.priv2.spu_cfg_RW = tmp;
243 	spin_unlock(&ctx->csa.register_lock);
244 }
245 
246 static u64 spu_backing_signal2_type_get(struct spu_context *ctx)
247 {
248 	return ((ctx->csa.priv2.spu_cfg_RW & 2) != 0);
249 }
250 
251 static u32 spu_backing_npc_read(struct spu_context *ctx)
252 {
253 	return ctx->csa.prob.spu_npc_RW;
254 }
255 
256 static void spu_backing_npc_write(struct spu_context *ctx, u32 val)
257 {
258 	ctx->csa.prob.spu_npc_RW = val;
259 }
260 
261 static u32 spu_backing_status_read(struct spu_context *ctx)
262 {
263 	return ctx->csa.prob.spu_status_R;
264 }
265 
266 static char *spu_backing_get_ls(struct spu_context *ctx)
267 {
268 	return ctx->csa.lscsa->ls;
269 }
270 
271 static u32 spu_backing_runcntl_read(struct spu_context *ctx)
272 {
273 	return ctx->csa.prob.spu_runcntl_RW;
274 }
275 
276 static void spu_backing_runcntl_write(struct spu_context *ctx, u32 val)
277 {
278 	spin_lock(&ctx->csa.register_lock);
279 	ctx->csa.prob.spu_runcntl_RW = val;
280 	if (val & SPU_RUNCNTL_RUNNABLE) {
281 		ctx->csa.prob.spu_status_R |= SPU_STATUS_RUNNING;
282 	} else {
283 		ctx->csa.prob.spu_status_R &= ~SPU_STATUS_RUNNING;
284 	}
285 	spin_unlock(&ctx->csa.register_lock);
286 }
287 
288 static void spu_backing_master_start(struct spu_context *ctx)
289 {
290 	struct spu_state *csa = &ctx->csa;
291 	u64 sr1;
292 
293 	spin_lock(&csa->register_lock);
294 	sr1 = csa->priv1.mfc_sr1_RW | MFC_STATE1_MASTER_RUN_CONTROL_MASK;
295 	csa->priv1.mfc_sr1_RW = sr1;
296 	spin_unlock(&csa->register_lock);
297 }
298 
299 static void spu_backing_master_stop(struct spu_context *ctx)
300 {
301 	struct spu_state *csa = &ctx->csa;
302 	u64 sr1;
303 
304 	spin_lock(&csa->register_lock);
305 	sr1 = csa->priv1.mfc_sr1_RW & ~MFC_STATE1_MASTER_RUN_CONTROL_MASK;
306 	csa->priv1.mfc_sr1_RW = sr1;
307 	spin_unlock(&csa->register_lock);
308 }
309 
310 static int spu_backing_set_mfc_query(struct spu_context * ctx, u32 mask,
311 					u32 mode)
312 {
313 	struct spu_problem_collapsed *prob = &ctx->csa.prob;
314 	int ret;
315 
316 	spin_lock(&ctx->csa.register_lock);
317 	ret = -EAGAIN;
318 	if (prob->dma_querytype_RW)
319 		goto out;
320 	ret = 0;
321 	/* FIXME: what are the side-effects of this? */
322 	prob->dma_querymask_RW = mask;
323 	prob->dma_querytype_RW = mode;
324 out:
325 	spin_unlock(&ctx->csa.register_lock);
326 
327 	return ret;
328 }
329 
330 static u32 spu_backing_read_mfc_tagstatus(struct spu_context * ctx)
331 {
332 	return ctx->csa.prob.dma_tagstatus_R;
333 }
334 
335 static u32 spu_backing_get_mfc_free_elements(struct spu_context *ctx)
336 {
337 	return ctx->csa.prob.dma_qstatus_R;
338 }
339 
340 static int spu_backing_send_mfc_command(struct spu_context *ctx,
341 					struct mfc_dma_command *cmd)
342 {
343 	int ret;
344 
345 	spin_lock(&ctx->csa.register_lock);
346 	ret = -EAGAIN;
347 	/* FIXME: set up priv2->puq */
348 	spin_unlock(&ctx->csa.register_lock);
349 
350 	return ret;
351 }
352 
353 static void spu_backing_restart_dma(struct spu_context *ctx)
354 {
355 	/* nothing to do here */
356 }
357 
358 struct spu_context_ops spu_backing_ops = {
359 	.mbox_read = spu_backing_mbox_read,
360 	.mbox_stat_read = spu_backing_mbox_stat_read,
361 	.mbox_stat_poll = spu_backing_mbox_stat_poll,
362 	.ibox_read = spu_backing_ibox_read,
363 	.wbox_write = spu_backing_wbox_write,
364 	.signal1_read = spu_backing_signal1_read,
365 	.signal1_write = spu_backing_signal1_write,
366 	.signal2_read = spu_backing_signal2_read,
367 	.signal2_write = spu_backing_signal2_write,
368 	.signal1_type_set = spu_backing_signal1_type_set,
369 	.signal1_type_get = spu_backing_signal1_type_get,
370 	.signal2_type_set = spu_backing_signal2_type_set,
371 	.signal2_type_get = spu_backing_signal2_type_get,
372 	.npc_read = spu_backing_npc_read,
373 	.npc_write = spu_backing_npc_write,
374 	.status_read = spu_backing_status_read,
375 	.get_ls = spu_backing_get_ls,
376 	.runcntl_read = spu_backing_runcntl_read,
377 	.runcntl_write = spu_backing_runcntl_write,
378 	.master_start = spu_backing_master_start,
379 	.master_stop = spu_backing_master_stop,
380 	.set_mfc_query = spu_backing_set_mfc_query,
381 	.read_mfc_tagstatus = spu_backing_read_mfc_tagstatus,
382 	.get_mfc_free_elements = spu_backing_get_mfc_free_elements,
383 	.send_mfc_command = spu_backing_send_mfc_command,
384 	.restart_dma = spu_backing_restart_dma,
385 };
386