1 /* hw_ops.c - query/set operations on active SPU context.
2  *
3  * Copyright (C) IBM 2005
4  * Author: Mark Nutter <mnutter@us.ibm.com>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2, or (at your option)
9  * any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19  */
20 
21 #include <linux/config.h>
22 #include <linux/module.h>
23 #include <linux/errno.h>
24 #include <linux/sched.h>
25 #include <linux/kernel.h>
26 #include <linux/mm.h>
27 #include <linux/poll.h>
28 #include <linux/smp.h>
29 #include <linux/smp_lock.h>
30 #include <linux/stddef.h>
31 #include <linux/unistd.h>
32 
33 #include <asm/io.h>
34 #include <asm/spu.h>
35 #include <asm/spu_csa.h>
36 #include <asm/mmu_context.h>
37 #include "spufs.h"
38 
39 static int spu_hw_mbox_read(struct spu_context *ctx, u32 * data)
40 {
41 	struct spu *spu = ctx->spu;
42 	struct spu_problem __iomem *prob = spu->problem;
43 	u32 mbox_stat;
44 	int ret = 0;
45 
46 	spin_lock_irq(&spu->register_lock);
47 	mbox_stat = in_be32(&prob->mb_stat_R);
48 	if (mbox_stat & 0x0000ff) {
49 		*data = in_be32(&prob->pu_mb_R);
50 		ret = 4;
51 	}
52 	spin_unlock_irq(&spu->register_lock);
53 	return ret;
54 }
55 
56 static u32 spu_hw_mbox_stat_read(struct spu_context *ctx)
57 {
58 	return in_be32(&ctx->spu->problem->mb_stat_R);
59 }
60 
61 static unsigned int spu_hw_mbox_stat_poll(struct spu_context *ctx,
62 					  unsigned int events)
63 {
64 	struct spu *spu = ctx->spu;
65 	struct spu_priv1 __iomem *priv1 = spu->priv1;
66 	int ret = 0;
67 	u32 stat;
68 
69 	spin_lock_irq(&spu->register_lock);
70 	stat = in_be32(&spu->problem->mb_stat_R);
71 
72 	/* if the requested event is there, return the poll
73 	   mask, otherwise enable the interrupt to get notified,
74 	   but first mark any pending interrupts as done so
75 	   we don't get woken up unnecessarily */
76 
77 	if (events & (POLLIN | POLLRDNORM)) {
78 		if (stat & 0xff0000)
79 			ret |= POLLIN | POLLRDNORM;
80 		else {
81 			out_be64(&priv1->int_stat_class2_RW, 0x1);
82 			out_be64(&priv1->int_mask_class2_RW,
83 				 in_be64(&priv1->int_mask_class2_RW) | 0x1);
84 		}
85 	}
86 	if (events & (POLLOUT | POLLWRNORM)) {
87 		if (stat & 0x00ff00)
88 			ret = POLLOUT | POLLWRNORM;
89 		else {
90 			out_be64(&priv1->int_stat_class2_RW, 0x10);
91 			out_be64(&priv1->int_mask_class2_RW,
92 				 in_be64(&priv1->int_mask_class2_RW) | 0x10);
93 		}
94 	}
95 	spin_unlock_irq(&spu->register_lock);
96 	return ret;
97 }
98 
99 static int spu_hw_ibox_read(struct spu_context *ctx, u32 * data)
100 {
101 	struct spu *spu = ctx->spu;
102 	struct spu_problem __iomem *prob = spu->problem;
103 	struct spu_priv1 __iomem *priv1 = spu->priv1;
104 	struct spu_priv2 __iomem *priv2 = spu->priv2;
105 	int ret;
106 
107 	spin_lock_irq(&spu->register_lock);
108 	if (in_be32(&prob->mb_stat_R) & 0xff0000) {
109 		/* read the first available word */
110 		*data = in_be64(&priv2->puint_mb_R);
111 		ret = 4;
112 	} else {
113 		/* make sure we get woken up by the interrupt */
114 		out_be64(&priv1->int_mask_class2_RW,
115 			 in_be64(&priv1->int_mask_class2_RW) | 0x1);
116 		ret = 0;
117 	}
118 	spin_unlock_irq(&spu->register_lock);
119 	return ret;
120 }
121 
122 static int spu_hw_wbox_write(struct spu_context *ctx, u32 data)
123 {
124 	struct spu *spu = ctx->spu;
125 	struct spu_problem __iomem *prob = spu->problem;
126 	struct spu_priv1 __iomem *priv1 = spu->priv1;
127 	int ret;
128 
129 	spin_lock_irq(&spu->register_lock);
130 	if (in_be32(&prob->mb_stat_R) & 0x00ff00) {
131 		/* we have space to write wbox_data to */
132 		out_be32(&prob->spu_mb_W, data);
133 		ret = 4;
134 	} else {
135 		/* make sure we get woken up by the interrupt when space
136 		   becomes available */
137 		out_be64(&priv1->int_mask_class2_RW,
138 			 in_be64(&priv1->int_mask_class2_RW) | 0x10);
139 		ret = 0;
140 	}
141 	spin_unlock_irq(&spu->register_lock);
142 	return ret;
143 }
144 
145 static u32 spu_hw_signal1_read(struct spu_context *ctx)
146 {
147 	return in_be32(&ctx->spu->problem->signal_notify1);
148 }
149 
150 static void spu_hw_signal1_write(struct spu_context *ctx, u32 data)
151 {
152 	out_be32(&ctx->spu->problem->signal_notify1, data);
153 }
154 
155 static u32 spu_hw_signal2_read(struct spu_context *ctx)
156 {
157 	return in_be32(&ctx->spu->problem->signal_notify1);
158 }
159 
160 static void spu_hw_signal2_write(struct spu_context *ctx, u32 data)
161 {
162 	out_be32(&ctx->spu->problem->signal_notify2, data);
163 }
164 
165 static void spu_hw_signal1_type_set(struct spu_context *ctx, u64 val)
166 {
167 	struct spu *spu = ctx->spu;
168 	struct spu_priv2 __iomem *priv2 = spu->priv2;
169 	u64 tmp;
170 
171 	spin_lock_irq(&spu->register_lock);
172 	tmp = in_be64(&priv2->spu_cfg_RW);
173 	if (val)
174 		tmp |= 1;
175 	else
176 		tmp &= ~1;
177 	out_be64(&priv2->spu_cfg_RW, tmp);
178 	spin_unlock_irq(&spu->register_lock);
179 }
180 
181 static u64 spu_hw_signal1_type_get(struct spu_context *ctx)
182 {
183 	return ((in_be64(&ctx->spu->priv2->spu_cfg_RW) & 1) != 0);
184 }
185 
186 static void spu_hw_signal2_type_set(struct spu_context *ctx, u64 val)
187 {
188 	struct spu *spu = ctx->spu;
189 	struct spu_priv2 __iomem *priv2 = spu->priv2;
190 	u64 tmp;
191 
192 	spin_lock_irq(&spu->register_lock);
193 	tmp = in_be64(&priv2->spu_cfg_RW);
194 	if (val)
195 		tmp |= 2;
196 	else
197 		tmp &= ~2;
198 	out_be64(&priv2->spu_cfg_RW, tmp);
199 	spin_unlock_irq(&spu->register_lock);
200 }
201 
202 static u64 spu_hw_signal2_type_get(struct spu_context *ctx)
203 {
204 	return ((in_be64(&ctx->spu->priv2->spu_cfg_RW) & 2) != 0);
205 }
206 
207 static u32 spu_hw_npc_read(struct spu_context *ctx)
208 {
209 	return in_be32(&ctx->spu->problem->spu_npc_RW);
210 }
211 
212 static void spu_hw_npc_write(struct spu_context *ctx, u32 val)
213 {
214 	out_be32(&ctx->spu->problem->spu_npc_RW, val);
215 }
216 
217 static u32 spu_hw_status_read(struct spu_context *ctx)
218 {
219 	return in_be32(&ctx->spu->problem->spu_status_R);
220 }
221 
222 static char *spu_hw_get_ls(struct spu_context *ctx)
223 {
224 	return ctx->spu->local_store;
225 }
226 
227 static void spu_hw_runcntl_write(struct spu_context *ctx, u32 val)
228 {
229 	eieio();
230 	out_be32(&ctx->spu->problem->spu_runcntl_RW, val);
231 }
232 
233 static void spu_hw_runcntl_stop(struct spu_context *ctx)
234 {
235 	spin_lock_irq(&ctx->spu->register_lock);
236 	out_be32(&ctx->spu->problem->spu_runcntl_RW, SPU_RUNCNTL_STOP);
237 	while (in_be32(&ctx->spu->problem->spu_status_R) & SPU_STATUS_RUNNING)
238 		cpu_relax();
239 	spin_unlock_irq(&ctx->spu->register_lock);
240 }
241 
242 struct spu_context_ops spu_hw_ops = {
243 	.mbox_read = spu_hw_mbox_read,
244 	.mbox_stat_read = spu_hw_mbox_stat_read,
245 	.mbox_stat_poll = spu_hw_mbox_stat_poll,
246 	.ibox_read = spu_hw_ibox_read,
247 	.wbox_write = spu_hw_wbox_write,
248 	.signal1_read = spu_hw_signal1_read,
249 	.signal1_write = spu_hw_signal1_write,
250 	.signal2_read = spu_hw_signal2_read,
251 	.signal2_write = spu_hw_signal2_write,
252 	.signal1_type_set = spu_hw_signal1_type_set,
253 	.signal1_type_get = spu_hw_signal1_type_get,
254 	.signal2_type_set = spu_hw_signal2_type_set,
255 	.signal2_type_get = spu_hw_signal2_type_get,
256 	.npc_read = spu_hw_npc_read,
257 	.npc_write = spu_hw_npc_write,
258 	.status_read = spu_hw_status_read,
259 	.get_ls = spu_hw_get_ls,
260 	.runcntl_write = spu_hw_runcntl_write,
261 	.runcntl_stop = spu_hw_runcntl_stop,
262 };
263