1 /* 2 * SPU file system 3 * 4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 5 * 6 * Author: Arnd Bergmann <arndb@de.ibm.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2, or (at your option) 11 * any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 */ 22 #ifndef SPUFS_H 23 #define SPUFS_H 24 25 #include <linux/kref.h> 26 #include <linux/mutex.h> 27 #include <linux/spinlock.h> 28 #include <linux/fs.h> 29 #include <linux/cpumask.h> 30 31 #include <asm/spu.h> 32 #include <asm/spu_csa.h> 33 #include <asm/spu_info.h> 34 35 #define SPUFS_PS_MAP_SIZE 0x20000 36 #define SPUFS_MFC_MAP_SIZE 0x1000 37 #define SPUFS_CNTL_MAP_SIZE 0x1000 38 #define SPUFS_CNTL_MAP_SIZE 0x1000 39 #define SPUFS_SIGNAL_MAP_SIZE PAGE_SIZE 40 #define SPUFS_MSS_MAP_SIZE 0x1000 41 42 /* The magic number for our file system */ 43 enum { 44 SPUFS_MAGIC = 0x23c9b64e, 45 }; 46 47 struct spu_context_ops; 48 struct spu_gang; 49 50 /* ctx->sched_flags */ 51 enum { 52 SPU_SCHED_NOTIFY_ACTIVE, 53 SPU_SCHED_WAS_ACTIVE, /* was active upon spu_acquire_saved() */ 54 SPU_SCHED_SPU_RUN, /* context is within spu_run */ 55 }; 56 57 enum { 58 SWITCH_LOG_BUFSIZE = 4096, 59 }; 60 61 enum { 62 SWITCH_LOG_START, 63 SWITCH_LOG_STOP, 64 SWITCH_LOG_EXIT, 65 }; 66 67 struct switch_log { 68 wait_queue_head_t wait; 69 unsigned long head; 70 unsigned long tail; 71 struct switch_log_entry { 72 struct timespec tstamp; 73 s32 spu_id; 74 u32 type; 75 u32 val; 76 u64 timebase; 77 } log[]; 78 }; 79 80 struct spu_context { 81 struct spu *spu; /* pointer to a physical SPU */ 82 struct spu_state csa; /* SPU context save area. */ 83 spinlock_t mmio_lock; /* protects mmio access */ 84 struct address_space *local_store; /* local store mapping. */ 85 struct address_space *mfc; /* 'mfc' area mappings. */ 86 struct address_space *cntl; /* 'control' area mappings. */ 87 struct address_space *signal1; /* 'signal1' area mappings. */ 88 struct address_space *signal2; /* 'signal2' area mappings. */ 89 struct address_space *mss; /* 'mss' area mappings. */ 90 struct address_space *psmap; /* 'psmap' area mappings. */ 91 struct mutex mapping_lock; 92 u64 object_id; /* user space pointer for oprofile */ 93 94 enum { SPU_STATE_RUNNABLE, SPU_STATE_SAVED } state; 95 struct mutex state_mutex; 96 struct mutex run_mutex; 97 98 struct mm_struct *owner; 99 100 struct kref kref; 101 wait_queue_head_t ibox_wq; 102 wait_queue_head_t wbox_wq; 103 wait_queue_head_t stop_wq; 104 wait_queue_head_t mfc_wq; 105 wait_queue_head_t run_wq; 106 struct fasync_struct *ibox_fasync; 107 struct fasync_struct *wbox_fasync; 108 struct fasync_struct *mfc_fasync; 109 u32 tagwait; 110 struct spu_context_ops *ops; 111 struct work_struct reap_work; 112 unsigned long flags; 113 unsigned long event_return; 114 115 struct list_head gang_list; 116 struct spu_gang *gang; 117 struct kref *prof_priv_kref; 118 void ( * prof_priv_release) (struct kref *kref); 119 120 /* owner thread */ 121 pid_t tid; 122 123 /* scheduler fields */ 124 struct list_head rq; 125 unsigned int time_slice; 126 unsigned long sched_flags; 127 cpumask_t cpus_allowed; 128 int policy; 129 int prio; 130 int last_ran; 131 132 /* statistics */ 133 struct { 134 /* updates protected by ctx->state_mutex */ 135 enum spu_utilization_state util_state; 136 unsigned long long tstamp; /* time of last state switch */ 137 unsigned long long times[SPU_UTIL_MAX]; 138 unsigned long long vol_ctx_switch; 139 unsigned long long invol_ctx_switch; 140 unsigned long long min_flt; 141 unsigned long long maj_flt; 142 unsigned long long hash_flt; 143 unsigned long long slb_flt; 144 unsigned long long slb_flt_base; /* # at last ctx switch */ 145 unsigned long long class2_intr; 146 unsigned long long class2_intr_base; /* # at last ctx switch */ 147 unsigned long long libassist; 148 } stats; 149 150 /* context switch log */ 151 struct switch_log *switch_log; 152 153 struct list_head aff_list; 154 int aff_head; 155 int aff_offset; 156 }; 157 158 struct spu_gang { 159 struct list_head list; 160 struct mutex mutex; 161 struct kref kref; 162 int contexts; 163 164 struct spu_context *aff_ref_ctx; 165 struct list_head aff_list_head; 166 struct mutex aff_mutex; 167 int aff_flags; 168 struct spu *aff_ref_spu; 169 atomic_t aff_sched_count; 170 }; 171 172 /* Flag bits for spu_gang aff_flags */ 173 #define AFF_OFFSETS_SET 1 174 #define AFF_MERGED 2 175 176 struct mfc_dma_command { 177 int32_t pad; /* reserved */ 178 uint32_t lsa; /* local storage address */ 179 uint64_t ea; /* effective address */ 180 uint16_t size; /* transfer size */ 181 uint16_t tag; /* command tag */ 182 uint16_t class; /* class ID */ 183 uint16_t cmd; /* command opcode */ 184 }; 185 186 187 /* SPU context query/set operations. */ 188 struct spu_context_ops { 189 int (*mbox_read) (struct spu_context * ctx, u32 * data); 190 u32(*mbox_stat_read) (struct spu_context * ctx); 191 unsigned int (*mbox_stat_poll)(struct spu_context *ctx, 192 unsigned int events); 193 int (*ibox_read) (struct spu_context * ctx, u32 * data); 194 int (*wbox_write) (struct spu_context * ctx, u32 data); 195 u32(*signal1_read) (struct spu_context * ctx); 196 void (*signal1_write) (struct spu_context * ctx, u32 data); 197 u32(*signal2_read) (struct spu_context * ctx); 198 void (*signal2_write) (struct spu_context * ctx, u32 data); 199 void (*signal1_type_set) (struct spu_context * ctx, u64 val); 200 u64(*signal1_type_get) (struct spu_context * ctx); 201 void (*signal2_type_set) (struct spu_context * ctx, u64 val); 202 u64(*signal2_type_get) (struct spu_context * ctx); 203 u32(*npc_read) (struct spu_context * ctx); 204 void (*npc_write) (struct spu_context * ctx, u32 data); 205 u32(*status_read) (struct spu_context * ctx); 206 char*(*get_ls) (struct spu_context * ctx); 207 void (*privcntl_write) (struct spu_context *ctx, u64 data); 208 u32 (*runcntl_read) (struct spu_context * ctx); 209 void (*runcntl_write) (struct spu_context * ctx, u32 data); 210 void (*runcntl_stop) (struct spu_context * ctx); 211 void (*master_start) (struct spu_context * ctx); 212 void (*master_stop) (struct spu_context * ctx); 213 int (*set_mfc_query)(struct spu_context * ctx, u32 mask, u32 mode); 214 u32 (*read_mfc_tagstatus)(struct spu_context * ctx); 215 u32 (*get_mfc_free_elements)(struct spu_context *ctx); 216 int (*send_mfc_command)(struct spu_context * ctx, 217 struct mfc_dma_command * cmd); 218 void (*dma_info_read) (struct spu_context * ctx, 219 struct spu_dma_info * info); 220 void (*proxydma_info_read) (struct spu_context * ctx, 221 struct spu_proxydma_info * info); 222 void (*restart_dma)(struct spu_context *ctx); 223 }; 224 225 extern struct spu_context_ops spu_hw_ops; 226 extern struct spu_context_ops spu_backing_ops; 227 228 struct spufs_inode_info { 229 struct spu_context *i_ctx; 230 struct spu_gang *i_gang; 231 struct inode vfs_inode; 232 int i_openers; 233 }; 234 #define SPUFS_I(inode) \ 235 container_of(inode, struct spufs_inode_info, vfs_inode) 236 237 struct spufs_tree_descr { 238 const char *name; 239 const struct file_operations *ops; 240 int mode; 241 size_t size; 242 }; 243 244 extern struct spufs_tree_descr spufs_dir_contents[]; 245 extern struct spufs_tree_descr spufs_dir_nosched_contents[]; 246 extern struct spufs_tree_descr spufs_dir_debug_contents[]; 247 248 /* system call implementation */ 249 extern struct spufs_calls spufs_calls; 250 long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *status); 251 long spufs_create(struct nameidata *nd, unsigned int flags, 252 mode_t mode, struct file *filp); 253 /* ELF coredump callbacks for writing SPU ELF notes */ 254 extern int spufs_coredump_extra_notes_size(void); 255 extern int spufs_coredump_extra_notes_write(struct file *file, loff_t *foffset); 256 257 extern const struct file_operations spufs_context_fops; 258 259 /* gang management */ 260 struct spu_gang *alloc_spu_gang(void); 261 struct spu_gang *get_spu_gang(struct spu_gang *gang); 262 int put_spu_gang(struct spu_gang *gang); 263 void spu_gang_remove_ctx(struct spu_gang *gang, struct spu_context *ctx); 264 void spu_gang_add_ctx(struct spu_gang *gang, struct spu_context *ctx); 265 266 /* fault handling */ 267 int spufs_handle_class1(struct spu_context *ctx); 268 int spufs_handle_class0(struct spu_context *ctx); 269 270 /* affinity */ 271 struct spu *affinity_check(struct spu_context *ctx); 272 273 /* context management */ 274 extern atomic_t nr_spu_contexts; 275 static inline int __must_check spu_acquire(struct spu_context *ctx) 276 { 277 return mutex_lock_interruptible(&ctx->state_mutex); 278 } 279 280 static inline void spu_release(struct spu_context *ctx) 281 { 282 mutex_unlock(&ctx->state_mutex); 283 } 284 285 struct spu_context * alloc_spu_context(struct spu_gang *gang); 286 void destroy_spu_context(struct kref *kref); 287 struct spu_context * get_spu_context(struct spu_context *ctx); 288 int put_spu_context(struct spu_context *ctx); 289 void spu_unmap_mappings(struct spu_context *ctx); 290 291 void spu_forget(struct spu_context *ctx); 292 int __must_check spu_acquire_saved(struct spu_context *ctx); 293 void spu_release_saved(struct spu_context *ctx); 294 295 int spu_stopped(struct spu_context *ctx, u32 * stat); 296 void spu_del_from_rq(struct spu_context *ctx); 297 int spu_activate(struct spu_context *ctx, unsigned long flags); 298 void spu_deactivate(struct spu_context *ctx); 299 void spu_yield(struct spu_context *ctx); 300 void spu_switch_notify(struct spu *spu, struct spu_context *ctx); 301 void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx, 302 u32 type, u32 val); 303 void spu_set_timeslice(struct spu_context *ctx); 304 void spu_update_sched_info(struct spu_context *ctx); 305 void __spu_update_sched_info(struct spu_context *ctx); 306 int __init spu_sched_init(void); 307 void spu_sched_exit(void); 308 309 extern char *isolated_loader; 310 311 /* 312 * spufs_wait 313 * Same as wait_event_interruptible(), except that here 314 * we need to call spu_release(ctx) before sleeping, and 315 * then spu_acquire(ctx) when awoken. 316 * 317 * Returns with state_mutex re-acquired when successfull or 318 * with -ERESTARTSYS and the state_mutex dropped when interrupted. 319 */ 320 321 #define spufs_wait(wq, condition) \ 322 ({ \ 323 int __ret = 0; \ 324 DEFINE_WAIT(__wait); \ 325 for (;;) { \ 326 prepare_to_wait(&(wq), &__wait, TASK_INTERRUPTIBLE); \ 327 if (condition) \ 328 break; \ 329 spu_release(ctx); \ 330 if (signal_pending(current)) { \ 331 __ret = -ERESTARTSYS; \ 332 break; \ 333 } \ 334 schedule(); \ 335 __ret = spu_acquire(ctx); \ 336 if (__ret) \ 337 break; \ 338 } \ 339 finish_wait(&(wq), &__wait); \ 340 __ret; \ 341 }) 342 343 size_t spu_wbox_write(struct spu_context *ctx, u32 data); 344 size_t spu_ibox_read(struct spu_context *ctx, u32 *data); 345 346 /* irq callback funcs. */ 347 void spufs_ibox_callback(struct spu *spu); 348 void spufs_wbox_callback(struct spu *spu); 349 void spufs_stop_callback(struct spu *spu, int irq); 350 void spufs_mfc_callback(struct spu *spu); 351 void spufs_dma_callback(struct spu *spu, int type); 352 353 extern struct spu_coredump_calls spufs_coredump_calls; 354 struct spufs_coredump_reader { 355 char *name; 356 ssize_t (*read)(struct spu_context *ctx, 357 char __user *buffer, size_t size, loff_t *pos); 358 u64 (*get)(struct spu_context *ctx); 359 size_t size; 360 }; 361 extern struct spufs_coredump_reader spufs_coredump_read[]; 362 extern int spufs_coredump_num_notes; 363 364 extern int spu_init_csa(struct spu_state *csa); 365 extern void spu_fini_csa(struct spu_state *csa); 366 extern int spu_save(struct spu_state *prev, struct spu *spu); 367 extern int spu_restore(struct spu_state *new, struct spu *spu); 368 extern int spu_switch(struct spu_state *prev, struct spu_state *new, 369 struct spu *spu); 370 extern int spu_alloc_lscsa(struct spu_state *csa); 371 extern void spu_free_lscsa(struct spu_state *csa); 372 373 extern void spuctx_switch_state(struct spu_context *ctx, 374 enum spu_utilization_state new_state); 375 376 #define spu_context_trace(name, ctx, spu) \ 377 trace_mark(name, "ctx %p spu %p", ctx, spu); 378 #define spu_context_nospu_trace(name, ctx) \ 379 trace_mark(name, "ctx %p", ctx); 380 381 #endif 382