1 /* backing_ops.c - query/set operations on saved SPU context.
2  *
3  * Copyright (C) IBM 2005
4  * Author: Mark Nutter <mnutter@us.ibm.com>
5  *
6  * These register operations allow SPUFS to operate on saved
7  * SPU contexts rather than hardware.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2, or (at your option)
12  * any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22  */
23 
24 #include <linux/module.h>
25 #include <linux/errno.h>
26 #include <linux/sched.h>
27 #include <linux/kernel.h>
28 #include <linux/mm.h>
29 #include <linux/vmalloc.h>
30 #include <linux/smp.h>
31 #include <linux/smp_lock.h>
32 #include <linux/stddef.h>
33 #include <linux/unistd.h>
34 #include <linux/poll.h>
35 
36 #include <asm/io.h>
37 #include <asm/spu.h>
38 #include <asm/spu_csa.h>
39 #include <asm/spu_info.h>
40 #include <asm/mmu_context.h>
41 #include "spufs.h"
42 
43 /*
44  * Reads/writes to various problem and priv2 registers require
45  * state changes, i.e.  generate SPU events, modify channel
46  * counts, etc.
47  */
48 
49 static void gen_spu_event(struct spu_context *ctx, u32 event)
50 {
51 	u64 ch0_cnt;
52 	u64 ch0_data;
53 	u64 ch1_data;
54 
55 	ch0_cnt = ctx->csa.spu_chnlcnt_RW[0];
56 	ch0_data = ctx->csa.spu_chnldata_RW[0];
57 	ch1_data = ctx->csa.spu_chnldata_RW[1];
58 	ctx->csa.spu_chnldata_RW[0] |= event;
59 	if ((ch0_cnt == 0) && !(ch0_data & event) && (ch1_data & event)) {
60 		ctx->csa.spu_chnlcnt_RW[0] = 1;
61 	}
62 }
63 
64 static int spu_backing_mbox_read(struct spu_context *ctx, u32 * data)
65 {
66 	u32 mbox_stat;
67 	int ret = 0;
68 
69 	spin_lock(&ctx->csa.register_lock);
70 	mbox_stat = ctx->csa.prob.mb_stat_R;
71 	if (mbox_stat & 0x0000ff) {
72 		/* Read the first available word.
73 		 * Implementation note: the depth
74 		 * of pu_mb_R is currently 1.
75 		 */
76 		*data = ctx->csa.prob.pu_mb_R;
77 		ctx->csa.prob.mb_stat_R &= ~(0x0000ff);
78 		ctx->csa.spu_chnlcnt_RW[28] = 1;
79 		gen_spu_event(ctx, MFC_PU_MAILBOX_AVAILABLE_EVENT);
80 		ret = 4;
81 	}
82 	spin_unlock(&ctx->csa.register_lock);
83 	return ret;
84 }
85 
86 static u32 spu_backing_mbox_stat_read(struct spu_context *ctx)
87 {
88 	return ctx->csa.prob.mb_stat_R;
89 }
90 
91 static unsigned int spu_backing_mbox_stat_poll(struct spu_context *ctx,
92 					  unsigned int events)
93 {
94 	int ret;
95 	u32 stat;
96 
97 	ret = 0;
98 	spin_lock_irq(&ctx->csa.register_lock);
99 	stat = ctx->csa.prob.mb_stat_R;
100 
101 	/* if the requested event is there, return the poll
102 	   mask, otherwise enable the interrupt to get notified,
103 	   but first mark any pending interrupts as done so
104 	   we don't get woken up unnecessarily */
105 
106 	if (events & (POLLIN | POLLRDNORM)) {
107 		if (stat & 0xff0000)
108 			ret |= POLLIN | POLLRDNORM;
109 		else {
110 			ctx->csa.priv1.int_stat_class0_RW &= ~0x1;
111 			ctx->csa.priv1.int_mask_class2_RW |= 0x1;
112 		}
113 	}
114 	if (events & (POLLOUT | POLLWRNORM)) {
115 		if (stat & 0x00ff00)
116 			ret = POLLOUT | POLLWRNORM;
117 		else {
118 			ctx->csa.priv1.int_stat_class0_RW &= ~0x10;
119 			ctx->csa.priv1.int_mask_class2_RW |= 0x10;
120 		}
121 	}
122 	spin_unlock_irq(&ctx->csa.register_lock);
123 	return ret;
124 }
125 
126 static int spu_backing_ibox_read(struct spu_context *ctx, u32 * data)
127 {
128 	int ret;
129 
130 	spin_lock(&ctx->csa.register_lock);
131 	if (ctx->csa.prob.mb_stat_R & 0xff0000) {
132 		/* Read the first available word.
133 		 * Implementation note: the depth
134 		 * of puint_mb_R is currently 1.
135 		 */
136 		*data = ctx->csa.priv2.puint_mb_R;
137 		ctx->csa.prob.mb_stat_R &= ~(0xff0000);
138 		ctx->csa.spu_chnlcnt_RW[30] = 1;
139 		gen_spu_event(ctx, MFC_PU_INT_MAILBOX_AVAILABLE_EVENT);
140 		ret = 4;
141 	} else {
142 		/* make sure we get woken up by the interrupt */
143 		ctx->csa.priv1.int_mask_class2_RW |= 0x1UL;
144 		ret = 0;
145 	}
146 	spin_unlock(&ctx->csa.register_lock);
147 	return ret;
148 }
149 
150 static int spu_backing_wbox_write(struct spu_context *ctx, u32 data)
151 {
152 	int ret;
153 
154 	spin_lock(&ctx->csa.register_lock);
155 	if ((ctx->csa.prob.mb_stat_R) & 0x00ff00) {
156 		int slot = ctx->csa.spu_chnlcnt_RW[29];
157 		int avail = (ctx->csa.prob.mb_stat_R & 0x00ff00) >> 8;
158 
159 		/* We have space to write wbox_data.
160 		 * Implementation note: the depth
161 		 * of spu_mb_W is currently 4.
162 		 */
163 		BUG_ON(avail != (4 - slot));
164 		ctx->csa.spu_mailbox_data[slot] = data;
165 		ctx->csa.spu_chnlcnt_RW[29] = ++slot;
166 		ctx->csa.prob.mb_stat_R = (((4 - slot) & 0xff) << 8);
167 		gen_spu_event(ctx, MFC_SPU_MAILBOX_WRITTEN_EVENT);
168 		ret = 4;
169 	} else {
170 		/* make sure we get woken up by the interrupt when space
171 		   becomes available */
172 		ctx->csa.priv1.int_mask_class2_RW |= 0x10;
173 		ret = 0;
174 	}
175 	spin_unlock(&ctx->csa.register_lock);
176 	return ret;
177 }
178 
179 static u32 spu_backing_signal1_read(struct spu_context *ctx)
180 {
181 	return ctx->csa.spu_chnldata_RW[3];
182 }
183 
184 static void spu_backing_signal1_write(struct spu_context *ctx, u32 data)
185 {
186 	spin_lock(&ctx->csa.register_lock);
187 	if (ctx->csa.priv2.spu_cfg_RW & 0x1)
188 		ctx->csa.spu_chnldata_RW[3] |= data;
189 	else
190 		ctx->csa.spu_chnldata_RW[3] = data;
191 	ctx->csa.spu_chnlcnt_RW[3] = 1;
192 	gen_spu_event(ctx, MFC_SIGNAL_1_EVENT);
193 	spin_unlock(&ctx->csa.register_lock);
194 }
195 
196 static u32 spu_backing_signal2_read(struct spu_context *ctx)
197 {
198 	return ctx->csa.spu_chnldata_RW[4];
199 }
200 
201 static void spu_backing_signal2_write(struct spu_context *ctx, u32 data)
202 {
203 	spin_lock(&ctx->csa.register_lock);
204 	if (ctx->csa.priv2.spu_cfg_RW & 0x2)
205 		ctx->csa.spu_chnldata_RW[4] |= data;
206 	else
207 		ctx->csa.spu_chnldata_RW[4] = data;
208 	ctx->csa.spu_chnlcnt_RW[4] = 1;
209 	gen_spu_event(ctx, MFC_SIGNAL_2_EVENT);
210 	spin_unlock(&ctx->csa.register_lock);
211 }
212 
213 static void spu_backing_signal1_type_set(struct spu_context *ctx, u64 val)
214 {
215 	u64 tmp;
216 
217 	spin_lock(&ctx->csa.register_lock);
218 	tmp = ctx->csa.priv2.spu_cfg_RW;
219 	if (val)
220 		tmp |= 1;
221 	else
222 		tmp &= ~1;
223 	ctx->csa.priv2.spu_cfg_RW = tmp;
224 	spin_unlock(&ctx->csa.register_lock);
225 }
226 
227 static u64 spu_backing_signal1_type_get(struct spu_context *ctx)
228 {
229 	return ((ctx->csa.priv2.spu_cfg_RW & 1) != 0);
230 }
231 
232 static void spu_backing_signal2_type_set(struct spu_context *ctx, u64 val)
233 {
234 	u64 tmp;
235 
236 	spin_lock(&ctx->csa.register_lock);
237 	tmp = ctx->csa.priv2.spu_cfg_RW;
238 	if (val)
239 		tmp |= 2;
240 	else
241 		tmp &= ~2;
242 	ctx->csa.priv2.spu_cfg_RW = tmp;
243 	spin_unlock(&ctx->csa.register_lock);
244 }
245 
246 static u64 spu_backing_signal2_type_get(struct spu_context *ctx)
247 {
248 	return ((ctx->csa.priv2.spu_cfg_RW & 2) != 0);
249 }
250 
251 static u32 spu_backing_npc_read(struct spu_context *ctx)
252 {
253 	return ctx->csa.prob.spu_npc_RW;
254 }
255 
256 static void spu_backing_npc_write(struct spu_context *ctx, u32 val)
257 {
258 	ctx->csa.prob.spu_npc_RW = val;
259 }
260 
261 static u32 spu_backing_status_read(struct spu_context *ctx)
262 {
263 	return ctx->csa.prob.spu_status_R;
264 }
265 
266 static char *spu_backing_get_ls(struct spu_context *ctx)
267 {
268 	return ctx->csa.lscsa->ls;
269 }
270 
271 static void spu_backing_runcntl_write(struct spu_context *ctx, u32 val)
272 {
273 	spin_lock(&ctx->csa.register_lock);
274 	ctx->csa.prob.spu_runcntl_RW = val;
275 	if (val & SPU_RUNCNTL_RUNNABLE) {
276 		ctx->csa.prob.spu_status_R |= SPU_STATUS_RUNNING;
277 	} else {
278 		ctx->csa.prob.spu_status_R &= ~SPU_STATUS_RUNNING;
279 	}
280 	spin_unlock(&ctx->csa.register_lock);
281 }
282 
283 static void spu_backing_master_start(struct spu_context *ctx)
284 {
285 	struct spu_state *csa = &ctx->csa;
286 	u64 sr1;
287 
288 	spin_lock(&csa->register_lock);
289 	sr1 = csa->priv1.mfc_sr1_RW | MFC_STATE1_MASTER_RUN_CONTROL_MASK;
290 	csa->priv1.mfc_sr1_RW = sr1;
291 	spin_unlock(&csa->register_lock);
292 }
293 
294 static void spu_backing_master_stop(struct spu_context *ctx)
295 {
296 	struct spu_state *csa = &ctx->csa;
297 	u64 sr1;
298 
299 	spin_lock(&csa->register_lock);
300 	sr1 = csa->priv1.mfc_sr1_RW & ~MFC_STATE1_MASTER_RUN_CONTROL_MASK;
301 	csa->priv1.mfc_sr1_RW = sr1;
302 	spin_unlock(&csa->register_lock);
303 }
304 
305 static int spu_backing_set_mfc_query(struct spu_context * ctx, u32 mask,
306 					u32 mode)
307 {
308 	struct spu_problem_collapsed *prob = &ctx->csa.prob;
309 	int ret;
310 
311 	spin_lock(&ctx->csa.register_lock);
312 	ret = -EAGAIN;
313 	if (prob->dma_querytype_RW)
314 		goto out;
315 	ret = 0;
316 	/* FIXME: what are the side-effects of this? */
317 	prob->dma_querymask_RW = mask;
318 	prob->dma_querytype_RW = mode;
319 out:
320 	spin_unlock(&ctx->csa.register_lock);
321 
322 	return ret;
323 }
324 
325 static u32 spu_backing_read_mfc_tagstatus(struct spu_context * ctx)
326 {
327 	return ctx->csa.prob.dma_tagstatus_R;
328 }
329 
330 static u32 spu_backing_get_mfc_free_elements(struct spu_context *ctx)
331 {
332 	return ctx->csa.prob.dma_qstatus_R;
333 }
334 
335 static int spu_backing_send_mfc_command(struct spu_context *ctx,
336 					struct mfc_dma_command *cmd)
337 {
338 	int ret;
339 
340 	spin_lock(&ctx->csa.register_lock);
341 	ret = -EAGAIN;
342 	/* FIXME: set up priv2->puq */
343 	spin_unlock(&ctx->csa.register_lock);
344 
345 	return ret;
346 }
347 
348 struct spu_context_ops spu_backing_ops = {
349 	.mbox_read = spu_backing_mbox_read,
350 	.mbox_stat_read = spu_backing_mbox_stat_read,
351 	.mbox_stat_poll = spu_backing_mbox_stat_poll,
352 	.ibox_read = spu_backing_ibox_read,
353 	.wbox_write = spu_backing_wbox_write,
354 	.signal1_read = spu_backing_signal1_read,
355 	.signal1_write = spu_backing_signal1_write,
356 	.signal2_read = spu_backing_signal2_read,
357 	.signal2_write = spu_backing_signal2_write,
358 	.signal1_type_set = spu_backing_signal1_type_set,
359 	.signal1_type_get = spu_backing_signal1_type_get,
360 	.signal2_type_set = spu_backing_signal2_type_set,
361 	.signal2_type_get = spu_backing_signal2_type_get,
362 	.npc_read = spu_backing_npc_read,
363 	.npc_write = spu_backing_npc_write,
364 	.status_read = spu_backing_status_read,
365 	.get_ls = spu_backing_get_ls,
366 	.runcntl_write = spu_backing_runcntl_write,
367 	.master_start = spu_backing_master_start,
368 	.master_stop = spu_backing_master_stop,
369 	.set_mfc_query = spu_backing_set_mfc_query,
370 	.read_mfc_tagstatus = spu_backing_read_mfc_tagstatus,
371 	.get_mfc_free_elements = spu_backing_get_mfc_free_elements,
372 	.send_mfc_command = spu_backing_send_mfc_command,
373 };
374