1 /* hw_ops.c - query/set operations on active SPU context.
2  *
3  * Copyright (C) IBM 2005
4  * Author: Mark Nutter <mnutter@us.ibm.com>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2, or (at your option)
9  * any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19  */
20 
21 #include <linux/config.h>
22 #include <linux/module.h>
23 #include <linux/errno.h>
24 #include <linux/sched.h>
25 #include <linux/kernel.h>
26 #include <linux/mm.h>
27 #include <linux/poll.h>
28 #include <linux/smp.h>
29 #include <linux/smp_lock.h>
30 #include <linux/stddef.h>
31 #include <linux/unistd.h>
32 
33 #include <asm/io.h>
34 #include <asm/spu.h>
35 #include <asm/spu_priv1.h>
36 #include <asm/spu_csa.h>
37 #include <asm/mmu_context.h>
38 #include "spufs.h"
39 
40 static int spu_hw_mbox_read(struct spu_context *ctx, u32 * data)
41 {
42 	struct spu *spu = ctx->spu;
43 	struct spu_problem __iomem *prob = spu->problem;
44 	u32 mbox_stat;
45 	int ret = 0;
46 
47 	spin_lock_irq(&spu->register_lock);
48 	mbox_stat = in_be32(&prob->mb_stat_R);
49 	if (mbox_stat & 0x0000ff) {
50 		*data = in_be32(&prob->pu_mb_R);
51 		ret = 4;
52 	}
53 	spin_unlock_irq(&spu->register_lock);
54 	return ret;
55 }
56 
57 static u32 spu_hw_mbox_stat_read(struct spu_context *ctx)
58 {
59 	return in_be32(&ctx->spu->problem->mb_stat_R);
60 }
61 
62 static unsigned int spu_hw_mbox_stat_poll(struct spu_context *ctx,
63 					  unsigned int events)
64 {
65 	struct spu *spu = ctx->spu;
66 	int ret = 0;
67 	u32 stat;
68 
69 	spin_lock_irq(&spu->register_lock);
70 	stat = in_be32(&spu->problem->mb_stat_R);
71 
72 	/* if the requested event is there, return the poll
73 	   mask, otherwise enable the interrupt to get notified,
74 	   but first mark any pending interrupts as done so
75 	   we don't get woken up unnecessarily */
76 
77 	if (events & (POLLIN | POLLRDNORM)) {
78 		if (stat & 0xff0000)
79 			ret |= POLLIN | POLLRDNORM;
80 		else {
81 			spu_int_stat_clear(spu, 2, 0x1);
82 			spu_int_mask_or(spu, 2, 0x1);
83 		}
84 	}
85 	if (events & (POLLOUT | POLLWRNORM)) {
86 		if (stat & 0x00ff00)
87 			ret = POLLOUT | POLLWRNORM;
88 		else {
89 			spu_int_stat_clear(spu, 2, 0x10);
90 			spu_int_mask_or(spu, 2, 0x10);
91 		}
92 	}
93 	spin_unlock_irq(&spu->register_lock);
94 	return ret;
95 }
96 
97 static int spu_hw_ibox_read(struct spu_context *ctx, u32 * data)
98 {
99 	struct spu *spu = ctx->spu;
100 	struct spu_problem __iomem *prob = spu->problem;
101 	struct spu_priv2 __iomem *priv2 = spu->priv2;
102 	int ret;
103 
104 	spin_lock_irq(&spu->register_lock);
105 	if (in_be32(&prob->mb_stat_R) & 0xff0000) {
106 		/* read the first available word */
107 		*data = in_be64(&priv2->puint_mb_R);
108 		ret = 4;
109 	} else {
110 		/* make sure we get woken up by the interrupt */
111 		spu_int_mask_or(spu, 2, 0x1);
112 		ret = 0;
113 	}
114 	spin_unlock_irq(&spu->register_lock);
115 	return ret;
116 }
117 
118 static int spu_hw_wbox_write(struct spu_context *ctx, u32 data)
119 {
120 	struct spu *spu = ctx->spu;
121 	struct spu_problem __iomem *prob = spu->problem;
122 	int ret;
123 
124 	spin_lock_irq(&spu->register_lock);
125 	if (in_be32(&prob->mb_stat_R) & 0x00ff00) {
126 		/* we have space to write wbox_data to */
127 		out_be32(&prob->spu_mb_W, data);
128 		ret = 4;
129 	} else {
130 		/* make sure we get woken up by the interrupt when space
131 		   becomes available */
132 		spu_int_mask_or(spu, 2, 0x10);
133 		ret = 0;
134 	}
135 	spin_unlock_irq(&spu->register_lock);
136 	return ret;
137 }
138 
139 static u32 spu_hw_signal1_read(struct spu_context *ctx)
140 {
141 	return in_be32(&ctx->spu->problem->signal_notify1);
142 }
143 
144 static void spu_hw_signal1_write(struct spu_context *ctx, u32 data)
145 {
146 	out_be32(&ctx->spu->problem->signal_notify1, data);
147 }
148 
149 static u32 spu_hw_signal2_read(struct spu_context *ctx)
150 {
151 	return in_be32(&ctx->spu->problem->signal_notify1);
152 }
153 
154 static void spu_hw_signal2_write(struct spu_context *ctx, u32 data)
155 {
156 	out_be32(&ctx->spu->problem->signal_notify2, data);
157 }
158 
159 static void spu_hw_signal1_type_set(struct spu_context *ctx, u64 val)
160 {
161 	struct spu *spu = ctx->spu;
162 	struct spu_priv2 __iomem *priv2 = spu->priv2;
163 	u64 tmp;
164 
165 	spin_lock_irq(&spu->register_lock);
166 	tmp = in_be64(&priv2->spu_cfg_RW);
167 	if (val)
168 		tmp |= 1;
169 	else
170 		tmp &= ~1;
171 	out_be64(&priv2->spu_cfg_RW, tmp);
172 	spin_unlock_irq(&spu->register_lock);
173 }
174 
175 static u64 spu_hw_signal1_type_get(struct spu_context *ctx)
176 {
177 	return ((in_be64(&ctx->spu->priv2->spu_cfg_RW) & 1) != 0);
178 }
179 
180 static void spu_hw_signal2_type_set(struct spu_context *ctx, u64 val)
181 {
182 	struct spu *spu = ctx->spu;
183 	struct spu_priv2 __iomem *priv2 = spu->priv2;
184 	u64 tmp;
185 
186 	spin_lock_irq(&spu->register_lock);
187 	tmp = in_be64(&priv2->spu_cfg_RW);
188 	if (val)
189 		tmp |= 2;
190 	else
191 		tmp &= ~2;
192 	out_be64(&priv2->spu_cfg_RW, tmp);
193 	spin_unlock_irq(&spu->register_lock);
194 }
195 
196 static u64 spu_hw_signal2_type_get(struct spu_context *ctx)
197 {
198 	return ((in_be64(&ctx->spu->priv2->spu_cfg_RW) & 2) != 0);
199 }
200 
201 static u32 spu_hw_npc_read(struct spu_context *ctx)
202 {
203 	return in_be32(&ctx->spu->problem->spu_npc_RW);
204 }
205 
206 static void spu_hw_npc_write(struct spu_context *ctx, u32 val)
207 {
208 	out_be32(&ctx->spu->problem->spu_npc_RW, val);
209 }
210 
211 static u32 spu_hw_status_read(struct spu_context *ctx)
212 {
213 	return in_be32(&ctx->spu->problem->spu_status_R);
214 }
215 
216 static char *spu_hw_get_ls(struct spu_context *ctx)
217 {
218 	return ctx->spu->local_store;
219 }
220 
221 static void spu_hw_runcntl_write(struct spu_context *ctx, u32 val)
222 {
223 	eieio();
224 	out_be32(&ctx->spu->problem->spu_runcntl_RW, val);
225 }
226 
227 static void spu_hw_runcntl_stop(struct spu_context *ctx)
228 {
229 	spin_lock_irq(&ctx->spu->register_lock);
230 	out_be32(&ctx->spu->problem->spu_runcntl_RW, SPU_RUNCNTL_STOP);
231 	while (in_be32(&ctx->spu->problem->spu_status_R) & SPU_STATUS_RUNNING)
232 		cpu_relax();
233 	spin_unlock_irq(&ctx->spu->register_lock);
234 }
235 
236 static int spu_hw_set_mfc_query(struct spu_context * ctx, u32 mask, u32 mode)
237 {
238 	struct spu_problem *prob = ctx->spu->problem;
239 	int ret;
240 
241 	spin_lock_irq(&ctx->spu->register_lock);
242 	ret = -EAGAIN;
243 	if (in_be32(&prob->dma_querytype_RW))
244 		goto out;
245 	ret = 0;
246 	out_be32(&prob->dma_querymask_RW, mask);
247 	out_be32(&prob->dma_querytype_RW, mode);
248 out:
249 	spin_unlock_irq(&ctx->spu->register_lock);
250 	return ret;
251 }
252 
253 static u32 spu_hw_read_mfc_tagstatus(struct spu_context * ctx)
254 {
255 	return in_be32(&ctx->spu->problem->dma_tagstatus_R);
256 }
257 
258 static u32 spu_hw_get_mfc_free_elements(struct spu_context *ctx)
259 {
260 	return in_be32(&ctx->spu->problem->dma_qstatus_R);
261 }
262 
263 static int spu_hw_send_mfc_command(struct spu_context *ctx,
264 					struct mfc_dma_command *cmd)
265 {
266 	u32 status;
267 	struct spu_problem *prob = ctx->spu->problem;
268 
269 	spin_lock_irq(&ctx->spu->register_lock);
270 	out_be32(&prob->mfc_lsa_W, cmd->lsa);
271 	out_be64(&prob->mfc_ea_W, cmd->ea);
272 	out_be32(&prob->mfc_union_W.by32.mfc_size_tag32,
273 				cmd->size << 16 | cmd->tag);
274 	out_be32(&prob->mfc_union_W.by32.mfc_class_cmd32,
275 				cmd->class << 16 | cmd->cmd);
276 	status = in_be32(&prob->mfc_union_W.by32.mfc_class_cmd32);
277 	spin_unlock_irq(&ctx->spu->register_lock);
278 
279 	switch (status & 0xffff) {
280 	case 0:
281 		return 0;
282 	case 2:
283 		return -EAGAIN;
284 	default:
285 		return -EINVAL;
286 	}
287 }
288 
289 struct spu_context_ops spu_hw_ops = {
290 	.mbox_read = spu_hw_mbox_read,
291 	.mbox_stat_read = spu_hw_mbox_stat_read,
292 	.mbox_stat_poll = spu_hw_mbox_stat_poll,
293 	.ibox_read = spu_hw_ibox_read,
294 	.wbox_write = spu_hw_wbox_write,
295 	.signal1_read = spu_hw_signal1_read,
296 	.signal1_write = spu_hw_signal1_write,
297 	.signal2_read = spu_hw_signal2_read,
298 	.signal2_write = spu_hw_signal2_write,
299 	.signal1_type_set = spu_hw_signal1_type_set,
300 	.signal1_type_get = spu_hw_signal1_type_get,
301 	.signal2_type_set = spu_hw_signal2_type_set,
302 	.signal2_type_get = spu_hw_signal2_type_get,
303 	.npc_read = spu_hw_npc_read,
304 	.npc_write = spu_hw_npc_write,
305 	.status_read = spu_hw_status_read,
306 	.get_ls = spu_hw_get_ls,
307 	.runcntl_write = spu_hw_runcntl_write,
308 	.runcntl_stop = spu_hw_runcntl_stop,
309 	.set_mfc_query = spu_hw_set_mfc_query,
310 	.read_mfc_tagstatus = spu_hw_read_mfc_tagstatus,
311 	.get_mfc_free_elements = spu_hw_get_mfc_free_elements,
312 	.send_mfc_command = spu_hw_send_mfc_command,
313 };
314