1 /* hw_ops.c - query/set operations on active SPU context.
2  *
3  * Copyright (C) IBM 2005
4  * Author: Mark Nutter <mnutter@us.ibm.com>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2, or (at your option)
9  * any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19  */
20 
21 #include <linux/module.h>
22 #include <linux/errno.h>
23 #include <linux/sched.h>
24 #include <linux/kernel.h>
25 #include <linux/mm.h>
26 #include <linux/poll.h>
27 #include <linux/smp.h>
28 #include <linux/smp_lock.h>
29 #include <linux/stddef.h>
30 #include <linux/unistd.h>
31 
32 #include <asm/io.h>
33 #include <asm/spu.h>
34 #include <asm/spu_priv1.h>
35 #include <asm/spu_csa.h>
36 #include <asm/mmu_context.h>
37 #include "spufs.h"
38 
39 static int spu_hw_mbox_read(struct spu_context *ctx, u32 * data)
40 {
41 	struct spu *spu = ctx->spu;
42 	struct spu_problem __iomem *prob = spu->problem;
43 	u32 mbox_stat;
44 	int ret = 0;
45 
46 	spin_lock_irq(&spu->register_lock);
47 	mbox_stat = in_be32(&prob->mb_stat_R);
48 	if (mbox_stat & 0x0000ff) {
49 		*data = in_be32(&prob->pu_mb_R);
50 		ret = 4;
51 	}
52 	spin_unlock_irq(&spu->register_lock);
53 	return ret;
54 }
55 
56 static u32 spu_hw_mbox_stat_read(struct spu_context *ctx)
57 {
58 	return in_be32(&ctx->spu->problem->mb_stat_R);
59 }
60 
61 static unsigned int spu_hw_mbox_stat_poll(struct spu_context *ctx,
62 					  unsigned int events)
63 {
64 	struct spu *spu = ctx->spu;
65 	int ret = 0;
66 	u32 stat;
67 
68 	spin_lock_irq(&spu->register_lock);
69 	stat = in_be32(&spu->problem->mb_stat_R);
70 
71 	/* if the requested event is there, return the poll
72 	   mask, otherwise enable the interrupt to get notified,
73 	   but first mark any pending interrupts as done so
74 	   we don't get woken up unnecessarily */
75 
76 	if (events & (POLLIN | POLLRDNORM)) {
77 		if (stat & 0xff0000)
78 			ret |= POLLIN | POLLRDNORM;
79 		else {
80 			spu_int_stat_clear(spu, 2, 0x1);
81 			spu_int_mask_or(spu, 2, 0x1);
82 		}
83 	}
84 	if (events & (POLLOUT | POLLWRNORM)) {
85 		if (stat & 0x00ff00)
86 			ret = POLLOUT | POLLWRNORM;
87 		else {
88 			spu_int_stat_clear(spu, 2, 0x10);
89 			spu_int_mask_or(spu, 2, 0x10);
90 		}
91 	}
92 	spin_unlock_irq(&spu->register_lock);
93 	return ret;
94 }
95 
96 static int spu_hw_ibox_read(struct spu_context *ctx, u32 * data)
97 {
98 	struct spu *spu = ctx->spu;
99 	struct spu_problem __iomem *prob = spu->problem;
100 	struct spu_priv2 __iomem *priv2 = spu->priv2;
101 	int ret;
102 
103 	spin_lock_irq(&spu->register_lock);
104 	if (in_be32(&prob->mb_stat_R) & 0xff0000) {
105 		/* read the first available word */
106 		*data = in_be64(&priv2->puint_mb_R);
107 		ret = 4;
108 	} else {
109 		/* make sure we get woken up by the interrupt */
110 		spu_int_mask_or(spu, 2, 0x1);
111 		ret = 0;
112 	}
113 	spin_unlock_irq(&spu->register_lock);
114 	return ret;
115 }
116 
117 static int spu_hw_wbox_write(struct spu_context *ctx, u32 data)
118 {
119 	struct spu *spu = ctx->spu;
120 	struct spu_problem __iomem *prob = spu->problem;
121 	int ret;
122 
123 	spin_lock_irq(&spu->register_lock);
124 	if (in_be32(&prob->mb_stat_R) & 0x00ff00) {
125 		/* we have space to write wbox_data to */
126 		out_be32(&prob->spu_mb_W, data);
127 		ret = 4;
128 	} else {
129 		/* make sure we get woken up by the interrupt when space
130 		   becomes available */
131 		spu_int_mask_or(spu, 2, 0x10);
132 		ret = 0;
133 	}
134 	spin_unlock_irq(&spu->register_lock);
135 	return ret;
136 }
137 
138 static u32 spu_hw_signal1_read(struct spu_context *ctx)
139 {
140 	return in_be32(&ctx->spu->problem->signal_notify1);
141 }
142 
143 static void spu_hw_signal1_write(struct spu_context *ctx, u32 data)
144 {
145 	out_be32(&ctx->spu->problem->signal_notify1, data);
146 }
147 
148 static u32 spu_hw_signal2_read(struct spu_context *ctx)
149 {
150 	return in_be32(&ctx->spu->problem->signal_notify2);
151 }
152 
153 static void spu_hw_signal2_write(struct spu_context *ctx, u32 data)
154 {
155 	out_be32(&ctx->spu->problem->signal_notify2, data);
156 }
157 
158 static void spu_hw_signal1_type_set(struct spu_context *ctx, u64 val)
159 {
160 	struct spu *spu = ctx->spu;
161 	struct spu_priv2 __iomem *priv2 = spu->priv2;
162 	u64 tmp;
163 
164 	spin_lock_irq(&spu->register_lock);
165 	tmp = in_be64(&priv2->spu_cfg_RW);
166 	if (val)
167 		tmp |= 1;
168 	else
169 		tmp &= ~1;
170 	out_be64(&priv2->spu_cfg_RW, tmp);
171 	spin_unlock_irq(&spu->register_lock);
172 }
173 
174 static u64 spu_hw_signal1_type_get(struct spu_context *ctx)
175 {
176 	return ((in_be64(&ctx->spu->priv2->spu_cfg_RW) & 1) != 0);
177 }
178 
179 static void spu_hw_signal2_type_set(struct spu_context *ctx, u64 val)
180 {
181 	struct spu *spu = ctx->spu;
182 	struct spu_priv2 __iomem *priv2 = spu->priv2;
183 	u64 tmp;
184 
185 	spin_lock_irq(&spu->register_lock);
186 	tmp = in_be64(&priv2->spu_cfg_RW);
187 	if (val)
188 		tmp |= 2;
189 	else
190 		tmp &= ~2;
191 	out_be64(&priv2->spu_cfg_RW, tmp);
192 	spin_unlock_irq(&spu->register_lock);
193 }
194 
195 static u64 spu_hw_signal2_type_get(struct spu_context *ctx)
196 {
197 	return ((in_be64(&ctx->spu->priv2->spu_cfg_RW) & 2) != 0);
198 }
199 
200 static u32 spu_hw_npc_read(struct spu_context *ctx)
201 {
202 	return in_be32(&ctx->spu->problem->spu_npc_RW);
203 }
204 
205 static void spu_hw_npc_write(struct spu_context *ctx, u32 val)
206 {
207 	out_be32(&ctx->spu->problem->spu_npc_RW, val);
208 }
209 
210 static u32 spu_hw_status_read(struct spu_context *ctx)
211 {
212 	return in_be32(&ctx->spu->problem->spu_status_R);
213 }
214 
215 static char *spu_hw_get_ls(struct spu_context *ctx)
216 {
217 	return ctx->spu->local_store;
218 }
219 
220 static void spu_hw_runcntl_write(struct spu_context *ctx, u32 val)
221 {
222 	spin_lock_irq(&ctx->spu->register_lock);
223 	if (val & SPU_RUNCNTL_ISOLATE)
224 		out_be64(&ctx->spu->priv2->spu_privcntl_RW, 4LL);
225 	out_be32(&ctx->spu->problem->spu_runcntl_RW, val);
226 	spin_unlock_irq(&ctx->spu->register_lock);
227 }
228 
229 static void spu_hw_runcntl_stop(struct spu_context *ctx)
230 {
231 	spin_lock_irq(&ctx->spu->register_lock);
232 	out_be32(&ctx->spu->problem->spu_runcntl_RW, SPU_RUNCNTL_STOP);
233 	while (in_be32(&ctx->spu->problem->spu_status_R) & SPU_STATUS_RUNNING)
234 		cpu_relax();
235 	spin_unlock_irq(&ctx->spu->register_lock);
236 }
237 
238 static int spu_hw_set_mfc_query(struct spu_context * ctx, u32 mask, u32 mode)
239 {
240 	struct spu_problem __iomem *prob = ctx->spu->problem;
241 	int ret;
242 
243 	spin_lock_irq(&ctx->spu->register_lock);
244 	ret = -EAGAIN;
245 	if (in_be32(&prob->dma_querytype_RW))
246 		goto out;
247 	ret = 0;
248 	out_be32(&prob->dma_querymask_RW, mask);
249 	out_be32(&prob->dma_querytype_RW, mode);
250 out:
251 	spin_unlock_irq(&ctx->spu->register_lock);
252 	return ret;
253 }
254 
255 static u32 spu_hw_read_mfc_tagstatus(struct spu_context * ctx)
256 {
257 	return in_be32(&ctx->spu->problem->dma_tagstatus_R);
258 }
259 
260 static u32 spu_hw_get_mfc_free_elements(struct spu_context *ctx)
261 {
262 	return in_be32(&ctx->spu->problem->dma_qstatus_R);
263 }
264 
265 static int spu_hw_send_mfc_command(struct spu_context *ctx,
266 					struct mfc_dma_command *cmd)
267 {
268 	u32 status;
269 	struct spu_problem __iomem *prob = ctx->spu->problem;
270 
271 	spin_lock_irq(&ctx->spu->register_lock);
272 	out_be32(&prob->mfc_lsa_W, cmd->lsa);
273 	out_be64(&prob->mfc_ea_W, cmd->ea);
274 	out_be32(&prob->mfc_union_W.by32.mfc_size_tag32,
275 				cmd->size << 16 | cmd->tag);
276 	out_be32(&prob->mfc_union_W.by32.mfc_class_cmd32,
277 				cmd->class << 16 | cmd->cmd);
278 	status = in_be32(&prob->mfc_union_W.by32.mfc_class_cmd32);
279 	spin_unlock_irq(&ctx->spu->register_lock);
280 
281 	switch (status & 0xffff) {
282 	case 0:
283 		return 0;
284 	case 2:
285 		return -EAGAIN;
286 	default:
287 		return -EINVAL;
288 	}
289 }
290 
291 struct spu_context_ops spu_hw_ops = {
292 	.mbox_read = spu_hw_mbox_read,
293 	.mbox_stat_read = spu_hw_mbox_stat_read,
294 	.mbox_stat_poll = spu_hw_mbox_stat_poll,
295 	.ibox_read = spu_hw_ibox_read,
296 	.wbox_write = spu_hw_wbox_write,
297 	.signal1_read = spu_hw_signal1_read,
298 	.signal1_write = spu_hw_signal1_write,
299 	.signal2_read = spu_hw_signal2_read,
300 	.signal2_write = spu_hw_signal2_write,
301 	.signal1_type_set = spu_hw_signal1_type_set,
302 	.signal1_type_get = spu_hw_signal1_type_get,
303 	.signal2_type_set = spu_hw_signal2_type_set,
304 	.signal2_type_get = spu_hw_signal2_type_get,
305 	.npc_read = spu_hw_npc_read,
306 	.npc_write = spu_hw_npc_write,
307 	.status_read = spu_hw_status_read,
308 	.get_ls = spu_hw_get_ls,
309 	.runcntl_write = spu_hw_runcntl_write,
310 	.runcntl_stop = spu_hw_runcntl_stop,
311 	.set_mfc_query = spu_hw_set_mfc_query,
312 	.read_mfc_tagstatus = spu_hw_read_mfc_tagstatus,
313 	.get_mfc_free_elements = spu_hw_get_mfc_free_elements,
314 	.send_mfc_command = spu_hw_send_mfc_command,
315 };
316