1 /* 2 * SPU file system -- SPU context management 3 * 4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 5 * 6 * Author: Arnd Bergmann <arndb@de.ibm.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2, or (at your option) 11 * any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 */ 22 23 #include <linux/fs.h> 24 #include <linux/mm.h> 25 #include <linux/module.h> 26 #include <linux/slab.h> 27 #include <asm/atomic.h> 28 #include <asm/spu.h> 29 #include <asm/spu_csa.h> 30 #include "spufs.h" 31 32 33 atomic_t nr_spu_contexts = ATOMIC_INIT(0); 34 35 struct spu_context *alloc_spu_context(struct spu_gang *gang) 36 { 37 struct spu_context *ctx; 38 ctx = kzalloc(sizeof *ctx, GFP_KERNEL); 39 if (!ctx) 40 goto out; 41 /* Binding to physical processor deferred 42 * until spu_activate(). 43 */ 44 if (spu_init_csa(&ctx->csa)) 45 goto out_free; 46 spin_lock_init(&ctx->mmio_lock); 47 mutex_init(&ctx->mapping_lock); 48 kref_init(&ctx->kref); 49 mutex_init(&ctx->state_mutex); 50 mutex_init(&ctx->run_mutex); 51 init_waitqueue_head(&ctx->ibox_wq); 52 init_waitqueue_head(&ctx->wbox_wq); 53 init_waitqueue_head(&ctx->stop_wq); 54 init_waitqueue_head(&ctx->mfc_wq); 55 ctx->state = SPU_STATE_SAVED; 56 ctx->ops = &spu_backing_ops; 57 ctx->owner = get_task_mm(current); 58 INIT_LIST_HEAD(&ctx->rq); 59 INIT_LIST_HEAD(&ctx->aff_list); 60 if (gang) 61 spu_gang_add_ctx(gang, ctx); 62 63 __spu_update_sched_info(ctx); 64 spu_set_timeslice(ctx); 65 ctx->stats.util_state = SPU_UTIL_IDLE_LOADED; 66 67 atomic_inc(&nr_spu_contexts); 68 goto out; 69 out_free: 70 kfree(ctx); 71 ctx = NULL; 72 out: 73 return ctx; 74 } 75 76 void destroy_spu_context(struct kref *kref) 77 { 78 struct spu_context *ctx; 79 ctx = container_of(kref, struct spu_context, kref); 80 mutex_lock(&ctx->state_mutex); 81 spu_deactivate(ctx); 82 mutex_unlock(&ctx->state_mutex); 83 spu_fini_csa(&ctx->csa); 84 if (ctx->gang) 85 spu_gang_remove_ctx(ctx->gang, ctx); 86 if (ctx->prof_priv_kref) 87 kref_put(ctx->prof_priv_kref, ctx->prof_priv_release); 88 BUG_ON(!list_empty(&ctx->rq)); 89 atomic_dec(&nr_spu_contexts); 90 kfree(ctx); 91 } 92 93 struct spu_context * get_spu_context(struct spu_context *ctx) 94 { 95 kref_get(&ctx->kref); 96 return ctx; 97 } 98 99 int put_spu_context(struct spu_context *ctx) 100 { 101 return kref_put(&ctx->kref, &destroy_spu_context); 102 } 103 104 /* give up the mm reference when the context is about to be destroyed */ 105 void spu_forget(struct spu_context *ctx) 106 { 107 struct mm_struct *mm; 108 spu_acquire_saved(ctx); 109 mm = ctx->owner; 110 ctx->owner = NULL; 111 mmput(mm); 112 spu_release(ctx); 113 } 114 115 void spu_unmap_mappings(struct spu_context *ctx) 116 { 117 mutex_lock(&ctx->mapping_lock); 118 if (ctx->local_store) 119 unmap_mapping_range(ctx->local_store, 0, LS_SIZE, 1); 120 if (ctx->mfc) 121 unmap_mapping_range(ctx->mfc, 0, 0x1000, 1); 122 if (ctx->cntl) 123 unmap_mapping_range(ctx->cntl, 0, 0x1000, 1); 124 if (ctx->signal1) 125 unmap_mapping_range(ctx->signal1, 0, PAGE_SIZE, 1); 126 if (ctx->signal2) 127 unmap_mapping_range(ctx->signal2, 0, PAGE_SIZE, 1); 128 if (ctx->mss) 129 unmap_mapping_range(ctx->mss, 0, 0x1000, 1); 130 if (ctx->psmap) 131 unmap_mapping_range(ctx->psmap, 0, 0x20000, 1); 132 mutex_unlock(&ctx->mapping_lock); 133 } 134 135 /** 136 * spu_acquire_runnable - lock spu contex and make sure it is in runnable state 137 * @ctx: spu contex to lock 138 * 139 * Note: 140 * Returns 0 and with the context locked on success 141 * Returns negative error and with the context _unlocked_ on failure. 142 */ 143 int spu_acquire_runnable(struct spu_context *ctx, unsigned long flags) 144 { 145 int ret = -EINVAL; 146 147 spu_acquire(ctx); 148 if (ctx->state == SPU_STATE_SAVED) { 149 /* 150 * Context is about to be freed, so we can't acquire it anymore. 151 */ 152 if (!ctx->owner) 153 goto out_unlock; 154 ret = spu_activate(ctx, flags); 155 if (ret) 156 goto out_unlock; 157 } 158 159 return 0; 160 161 out_unlock: 162 spu_release(ctx); 163 return ret; 164 } 165 166 /** 167 * spu_acquire_saved - lock spu contex and make sure it is in saved state 168 * @ctx: spu contex to lock 169 */ 170 void spu_acquire_saved(struct spu_context *ctx) 171 { 172 spu_acquire(ctx); 173 if (ctx->state != SPU_STATE_SAVED) { 174 set_bit(SPU_SCHED_WAS_ACTIVE, &ctx->sched_flags); 175 spu_deactivate(ctx); 176 } 177 } 178 179 /** 180 * spu_release_saved - unlock spu context and return it to the runqueue 181 * @ctx: context to unlock 182 */ 183 void spu_release_saved(struct spu_context *ctx) 184 { 185 BUG_ON(ctx->state != SPU_STATE_SAVED); 186 187 if (test_and_clear_bit(SPU_SCHED_WAS_ACTIVE, &ctx->sched_flags)) 188 spu_activate(ctx, 0); 189 190 spu_release(ctx); 191 } 192 193 void spu_set_profile_private_kref(struct spu_context *ctx, 194 struct kref *prof_info_kref, 195 void ( * prof_info_release) (struct kref *kref)) 196 { 197 ctx->prof_priv_kref = prof_info_kref; 198 ctx->prof_priv_release = prof_info_release; 199 } 200 EXPORT_SYMBOL_GPL(spu_set_profile_private_kref); 201 202 void *spu_get_profile_private_kref(struct spu_context *ctx) 203 { 204 return ctx->prof_priv_kref; 205 } 206 EXPORT_SYMBOL_GPL(spu_get_profile_private_kref); 207 208 209