1 /* backing_ops.c - query/set operations on saved SPU context. 2 * 3 * Copyright (C) IBM 2005 4 * Author: Mark Nutter <mnutter@us.ibm.com> 5 * 6 * These register operations allow SPUFS to operate on saved 7 * SPU contexts rather than hardware. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as published by 11 * the Free Software Foundation; either version 2, or (at your option) 12 * any later version. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 * 19 * You should have received a copy of the GNU General Public License 20 * along with this program; if not, write to the Free Software 21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 22 */ 23 24 #include <linux/config.h> 25 #include <linux/module.h> 26 #include <linux/errno.h> 27 #include <linux/sched.h> 28 #include <linux/kernel.h> 29 #include <linux/mm.h> 30 #include <linux/vmalloc.h> 31 #include <linux/smp.h> 32 #include <linux/smp_lock.h> 33 #include <linux/stddef.h> 34 #include <linux/unistd.h> 35 36 #include <asm/io.h> 37 #include <asm/spu.h> 38 #include <asm/spu_csa.h> 39 #include <asm/mmu_context.h> 40 #include "spufs.h" 41 42 /* 43 * Reads/writes to various problem and priv2 registers require 44 * state changes, i.e. generate SPU events, modify channel 45 * counts, etc. 46 */ 47 48 static void gen_spu_event(struct spu_context *ctx, u32 event) 49 { 50 u64 ch0_cnt; 51 u64 ch0_data; 52 u64 ch1_data; 53 54 ch0_cnt = ctx->csa.spu_chnlcnt_RW[0]; 55 ch0_data = ctx->csa.spu_chnldata_RW[0]; 56 ch1_data = ctx->csa.spu_chnldata_RW[1]; 57 ctx->csa.spu_chnldata_RW[0] |= event; 58 if ((ch0_cnt == 0) && !(ch0_data & event) && (ch1_data & event)) { 59 ctx->csa.spu_chnlcnt_RW[0] = 1; 60 } 61 } 62 63 static int spu_backing_mbox_read(struct spu_context *ctx, u32 * data) 64 { 65 u32 mbox_stat; 66 int ret = 0; 67 68 spin_lock(&ctx->csa.register_lock); 69 mbox_stat = ctx->csa.prob.mb_stat_R; 70 if (mbox_stat & 0x0000ff) { 71 /* Read the first available word. 72 * Implementation note: the depth 73 * of pu_mb_R is currently 1. 74 */ 75 *data = ctx->csa.prob.pu_mb_R; 76 ctx->csa.prob.mb_stat_R &= ~(0x0000ff); 77 ctx->csa.spu_chnlcnt_RW[28] = 1; 78 gen_spu_event(ctx, MFC_PU_MAILBOX_AVAILABLE_EVENT); 79 ret = 4; 80 } 81 spin_unlock(&ctx->csa.register_lock); 82 return ret; 83 } 84 85 static u32 spu_backing_mbox_stat_read(struct spu_context *ctx) 86 { 87 return ctx->csa.prob.mb_stat_R; 88 } 89 90 static int spu_backing_ibox_read(struct spu_context *ctx, u32 * data) 91 { 92 int ret; 93 94 spin_lock(&ctx->csa.register_lock); 95 if (ctx->csa.prob.mb_stat_R & 0xff0000) { 96 /* Read the first available word. 97 * Implementation note: the depth 98 * of puint_mb_R is currently 1. 99 */ 100 *data = ctx->csa.priv2.puint_mb_R; 101 ctx->csa.prob.mb_stat_R &= ~(0xff0000); 102 ctx->csa.spu_chnlcnt_RW[30] = 1; 103 gen_spu_event(ctx, MFC_PU_INT_MAILBOX_AVAILABLE_EVENT); 104 ret = 4; 105 } else { 106 /* make sure we get woken up by the interrupt */ 107 ctx->csa.priv1.int_mask_class2_RW |= 0x1UL; 108 ret = 0; 109 } 110 spin_unlock(&ctx->csa.register_lock); 111 return ret; 112 } 113 114 static int spu_backing_wbox_write(struct spu_context *ctx, u32 data) 115 { 116 int ret; 117 118 spin_lock(&ctx->csa.register_lock); 119 if ((ctx->csa.prob.mb_stat_R) & 0x00ff00) { 120 int slot = ctx->csa.spu_chnlcnt_RW[29]; 121 int avail = (ctx->csa.prob.mb_stat_R & 0x00ff00) >> 8; 122 123 /* We have space to write wbox_data. 124 * Implementation note: the depth 125 * of spu_mb_W is currently 4. 126 */ 127 BUG_ON(avail != (4 - slot)); 128 ctx->csa.spu_mailbox_data[slot] = data; 129 ctx->csa.spu_chnlcnt_RW[29] = ++slot; 130 ctx->csa.prob.mb_stat_R = (((4 - slot) & 0xff) << 8); 131 gen_spu_event(ctx, MFC_SPU_MAILBOX_WRITTEN_EVENT); 132 ret = 4; 133 } else { 134 /* make sure we get woken up by the interrupt when space 135 becomes available */ 136 ctx->csa.priv1.int_mask_class2_RW |= 0x10; 137 ret = 0; 138 } 139 spin_unlock(&ctx->csa.register_lock); 140 return ret; 141 } 142 143 static u32 spu_backing_signal1_read(struct spu_context *ctx) 144 { 145 return ctx->csa.spu_chnldata_RW[3]; 146 } 147 148 static void spu_backing_signal1_write(struct spu_context *ctx, u32 data) 149 { 150 spin_lock(&ctx->csa.register_lock); 151 if (ctx->csa.priv2.spu_cfg_RW & 0x1) 152 ctx->csa.spu_chnldata_RW[3] |= data; 153 else 154 ctx->csa.spu_chnldata_RW[3] = data; 155 ctx->csa.spu_chnlcnt_RW[3] = 1; 156 gen_spu_event(ctx, MFC_SIGNAL_1_EVENT); 157 spin_unlock(&ctx->csa.register_lock); 158 } 159 160 static u32 spu_backing_signal2_read(struct spu_context *ctx) 161 { 162 return ctx->csa.spu_chnldata_RW[4]; 163 } 164 165 static void spu_backing_signal2_write(struct spu_context *ctx, u32 data) 166 { 167 spin_lock(&ctx->csa.register_lock); 168 if (ctx->csa.priv2.spu_cfg_RW & 0x2) 169 ctx->csa.spu_chnldata_RW[4] |= data; 170 else 171 ctx->csa.spu_chnldata_RW[4] = data; 172 ctx->csa.spu_chnlcnt_RW[4] = 1; 173 gen_spu_event(ctx, MFC_SIGNAL_2_EVENT); 174 spin_unlock(&ctx->csa.register_lock); 175 } 176 177 static void spu_backing_signal1_type_set(struct spu_context *ctx, u64 val) 178 { 179 u64 tmp; 180 181 spin_lock(&ctx->csa.register_lock); 182 tmp = ctx->csa.priv2.spu_cfg_RW; 183 if (val) 184 tmp |= 1; 185 else 186 tmp &= ~1; 187 ctx->csa.priv2.spu_cfg_RW = tmp; 188 spin_unlock(&ctx->csa.register_lock); 189 } 190 191 static u64 spu_backing_signal1_type_get(struct spu_context *ctx) 192 { 193 return ((ctx->csa.priv2.spu_cfg_RW & 1) != 0); 194 } 195 196 static void spu_backing_signal2_type_set(struct spu_context *ctx, u64 val) 197 { 198 u64 tmp; 199 200 spin_lock(&ctx->csa.register_lock); 201 tmp = ctx->csa.priv2.spu_cfg_RW; 202 if (val) 203 tmp |= 2; 204 else 205 tmp &= ~2; 206 ctx->csa.priv2.spu_cfg_RW = tmp; 207 spin_unlock(&ctx->csa.register_lock); 208 } 209 210 static u64 spu_backing_signal2_type_get(struct spu_context *ctx) 211 { 212 return ((ctx->csa.priv2.spu_cfg_RW & 2) != 0); 213 } 214 215 static u32 spu_backing_npc_read(struct spu_context *ctx) 216 { 217 return ctx->csa.prob.spu_npc_RW; 218 } 219 220 static void spu_backing_npc_write(struct spu_context *ctx, u32 val) 221 { 222 ctx->csa.prob.spu_npc_RW = val; 223 } 224 225 static u32 spu_backing_status_read(struct spu_context *ctx) 226 { 227 return ctx->csa.prob.spu_status_R; 228 } 229 230 static char *spu_backing_get_ls(struct spu_context *ctx) 231 { 232 return ctx->csa.lscsa->ls; 233 } 234 235 static void spu_backing_runcntl_write(struct spu_context *ctx, u32 val) 236 { 237 spin_lock(&ctx->csa.register_lock); 238 ctx->csa.prob.spu_runcntl_RW = val; 239 if (val & SPU_RUNCNTL_RUNNABLE) { 240 ctx->csa.prob.spu_status_R |= SPU_STATUS_RUNNING; 241 } else { 242 ctx->csa.prob.spu_status_R &= ~SPU_STATUS_RUNNING; 243 } 244 spin_unlock(&ctx->csa.register_lock); 245 } 246 247 static void spu_backing_runcntl_stop(struct spu_context *ctx) 248 { 249 spu_backing_runcntl_write(ctx, SPU_RUNCNTL_STOP); 250 } 251 252 struct spu_context_ops spu_backing_ops = { 253 .mbox_read = spu_backing_mbox_read, 254 .mbox_stat_read = spu_backing_mbox_stat_read, 255 .ibox_read = spu_backing_ibox_read, 256 .wbox_write = spu_backing_wbox_write, 257 .signal1_read = spu_backing_signal1_read, 258 .signal1_write = spu_backing_signal1_write, 259 .signal2_read = spu_backing_signal2_read, 260 .signal2_write = spu_backing_signal2_write, 261 .signal1_type_set = spu_backing_signal1_type_set, 262 .signal1_type_get = spu_backing_signal1_type_get, 263 .signal2_type_set = spu_backing_signal2_type_set, 264 .signal2_type_get = spu_backing_signal2_type_get, 265 .npc_read = spu_backing_npc_read, 266 .npc_write = spu_backing_npc_write, 267 .status_read = spu_backing_status_read, 268 .get_ls = spu_backing_get_ls, 269 .runcntl_write = spu_backing_runcntl_write, 270 .runcntl_stop = spu_backing_runcntl_stop, 271 }; 272