167207b96SArnd Bergmann /* 267207b96SArnd Bergmann * SPU file system -- SPU context management 367207b96SArnd Bergmann * 467207b96SArnd Bergmann * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 567207b96SArnd Bergmann * 667207b96SArnd Bergmann * Author: Arnd Bergmann <arndb@de.ibm.com> 767207b96SArnd Bergmann * 867207b96SArnd Bergmann * This program is free software; you can redistribute it and/or modify 967207b96SArnd Bergmann * it under the terms of the GNU General Public License as published by 1067207b96SArnd Bergmann * the Free Software Foundation; either version 2, or (at your option) 1167207b96SArnd Bergmann * any later version. 1267207b96SArnd Bergmann * 1367207b96SArnd Bergmann * This program is distributed in the hope that it will be useful, 1467207b96SArnd Bergmann * but WITHOUT ANY WARRANTY; without even the implied warranty of 1567207b96SArnd Bergmann * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 1667207b96SArnd Bergmann * GNU General Public License for more details. 1767207b96SArnd Bergmann * 1867207b96SArnd Bergmann * You should have received a copy of the GNU General Public License 1967207b96SArnd Bergmann * along with this program; if not, write to the Free Software 2067207b96SArnd Bergmann * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 2167207b96SArnd Bergmann */ 2267207b96SArnd Bergmann 238b3d6663SArnd Bergmann #include <linux/fs.h> 248b3d6663SArnd Bergmann #include <linux/mm.h> 2567207b96SArnd Bergmann #include <linux/slab.h> 2660063497SArun Sharma #include <linux/atomic.h> 2762fe91bbSPaul Gortmaker #include <linux/sched.h> 2867207b96SArnd Bergmann #include <asm/spu.h> 295473af04SMark Nutter #include <asm/spu_csa.h> 3067207b96SArnd Bergmann #include "spufs.h" 31ae142e0cSChristoph Hellwig #include "sputrace.h" 3267207b96SArnd Bergmann 3365de66f0SChristoph Hellwig 3465de66f0SChristoph Hellwig atomic_t nr_spu_contexts = ATOMIC_INIT(0); 3565de66f0SChristoph Hellwig 366263203eSArnd Bergmann struct spu_context *alloc_spu_context(struct spu_gang *gang) 3767207b96SArnd Bergmann { 3867207b96SArnd Bergmann struct spu_context *ctx; 398f748aaeSArnd Bergmann 40c5c45913SJeremy Kerr ctx = kzalloc(sizeof *ctx, GFP_KERNEL); 4167207b96SArnd Bergmann if (!ctx) 4267207b96SArnd Bergmann goto out; 438b3d6663SArnd Bergmann /* Binding to physical processor deferred 448b3d6663SArnd Bergmann * until spu_activate(). 455473af04SMark Nutter */ 46f1fa74f4SBenjamin Herrenschmidt if (spu_init_csa(&ctx->csa)) 475473af04SMark Nutter goto out_free; 4867207b96SArnd Bergmann spin_lock_init(&ctx->mmio_lock); 4947d3a5faSChristoph Hellwig mutex_init(&ctx->mapping_lock); 5067207b96SArnd Bergmann kref_init(&ctx->kref); 51650f8b02SChristoph Hellwig mutex_init(&ctx->state_mutex); 52e45d48a3SChristoph Hellwig mutex_init(&ctx->run_mutex); 538b3d6663SArnd Bergmann init_waitqueue_head(&ctx->ibox_wq); 548b3d6663SArnd Bergmann init_waitqueue_head(&ctx->wbox_wq); 555110459fSArnd Bergmann init_waitqueue_head(&ctx->stop_wq); 56a33a7d73SArnd Bergmann init_waitqueue_head(&ctx->mfc_wq); 5733bfd7a7SArnd Bergmann init_waitqueue_head(&ctx->run_wq); 588b3d6663SArnd Bergmann ctx->state = SPU_STATE_SAVED; 598b3d6663SArnd Bergmann ctx->ops = &spu_backing_ops; 608b3d6663SArnd Bergmann ctx->owner = get_task_mm(current); 61a475c2f4SChristoph Hellwig INIT_LIST_HEAD(&ctx->rq); 628e68e2f2SArnd Bergmann INIT_LIST_HEAD(&ctx->aff_list); 636263203eSArnd Bergmann if (gang) 646263203eSArnd Bergmann spu_gang_add_ctx(gang, ctx); 659d78592eSChristoph Hellwig 669d78592eSChristoph Hellwig __spu_update_sched_info(ctx); 67fe443ef2SChristoph Hellwig spu_set_timeslice(ctx); 6827ec41d3SAndre Detsch ctx->stats.util_state = SPU_UTIL_IDLE_LOADED; 69f2dec1eaSThomas Gleixner ctx->stats.tstamp = ktime_get_ns(); 7065de66f0SChristoph Hellwig 7165de66f0SChristoph Hellwig atomic_inc(&nr_spu_contexts); 7267207b96SArnd Bergmann goto out; 7367207b96SArnd Bergmann out_free: 7467207b96SArnd Bergmann kfree(ctx); 7567207b96SArnd Bergmann ctx = NULL; 7667207b96SArnd Bergmann out: 7767207b96SArnd Bergmann return ctx; 7867207b96SArnd Bergmann } 7967207b96SArnd Bergmann 8067207b96SArnd Bergmann void destroy_spu_context(struct kref *kref) 8167207b96SArnd Bergmann { 8267207b96SArnd Bergmann struct spu_context *ctx; 8367207b96SArnd Bergmann ctx = container_of(kref, struct spu_context, kref); 8453457881SJulio M. Merino Vidal spu_context_nospu_trace(destroy_spu_context__enter, ctx); 85650f8b02SChristoph Hellwig mutex_lock(&ctx->state_mutex); 868b3d6663SArnd Bergmann spu_deactivate(ctx); 87650f8b02SChristoph Hellwig mutex_unlock(&ctx->state_mutex); 885473af04SMark Nutter spu_fini_csa(&ctx->csa); 896263203eSArnd Bergmann if (ctx->gang) 906263203eSArnd Bergmann spu_gang_remove_ctx(ctx->gang, ctx); 911474855dSBob Nelson if (ctx->prof_priv_kref) 921474855dSBob Nelson kref_put(ctx->prof_priv_kref, ctx->prof_priv_release); 93a475c2f4SChristoph Hellwig BUG_ON(!list_empty(&ctx->rq)); 9465de66f0SChristoph Hellwig atomic_dec(&nr_spu_contexts); 955158e9b5SChristoph Hellwig kfree(ctx->switch_log); 9667207b96SArnd Bergmann kfree(ctx); 9767207b96SArnd Bergmann } 9867207b96SArnd Bergmann 9967207b96SArnd Bergmann struct spu_context * get_spu_context(struct spu_context *ctx) 10067207b96SArnd Bergmann { 10167207b96SArnd Bergmann kref_get(&ctx->kref); 10267207b96SArnd Bergmann return ctx; 10367207b96SArnd Bergmann } 10467207b96SArnd Bergmann 10567207b96SArnd Bergmann int put_spu_context(struct spu_context *ctx) 10667207b96SArnd Bergmann { 10767207b96SArnd Bergmann return kref_put(&ctx->kref, &destroy_spu_context); 10867207b96SArnd Bergmann } 10967207b96SArnd Bergmann 1108b3d6663SArnd Bergmann /* give up the mm reference when the context is about to be destroyed */ 1118b3d6663SArnd Bergmann void spu_forget(struct spu_context *ctx) 1128b3d6663SArnd Bergmann { 1138b3d6663SArnd Bergmann struct mm_struct *mm; 114c9101bdbSChristoph Hellwig 115c9101bdbSChristoph Hellwig /* 116c9101bdbSChristoph Hellwig * This is basically an open-coded spu_acquire_saved, except that 1170111a701SJeremy Kerr * we don't acquire the state mutex interruptible, and we don't 1180111a701SJeremy Kerr * want this context to be rescheduled on release. 119c9101bdbSChristoph Hellwig */ 120c9101bdbSChristoph Hellwig mutex_lock(&ctx->state_mutex); 1210111a701SJeremy Kerr if (ctx->state != SPU_STATE_SAVED) 122c9101bdbSChristoph Hellwig spu_deactivate(ctx); 123c9101bdbSChristoph Hellwig 1248b3d6663SArnd Bergmann mm = ctx->owner; 1258b3d6663SArnd Bergmann ctx->owner = NULL; 1268b3d6663SArnd Bergmann mmput(mm); 1278b3d6663SArnd Bergmann spu_release(ctx); 1288b3d6663SArnd Bergmann } 12967207b96SArnd Bergmann 1305110459fSArnd Bergmann void spu_unmap_mappings(struct spu_context *ctx) 1318b3d6663SArnd Bergmann { 13247d3a5faSChristoph Hellwig mutex_lock(&ctx->mapping_lock); 1336df10a82SMark Nutter if (ctx->local_store) 1348b3d6663SArnd Bergmann unmap_mapping_range(ctx->local_store, 0, LS_SIZE, 1); 1356df10a82SMark Nutter if (ctx->mfc) 13687ff6090SJeremy Kerr unmap_mapping_range(ctx->mfc, 0, SPUFS_MFC_MAP_SIZE, 1); 1376df10a82SMark Nutter if (ctx->cntl) 13887ff6090SJeremy Kerr unmap_mapping_range(ctx->cntl, 0, SPUFS_CNTL_MAP_SIZE, 1); 1396df10a82SMark Nutter if (ctx->signal1) 14087ff6090SJeremy Kerr unmap_mapping_range(ctx->signal1, 0, SPUFS_SIGNAL_MAP_SIZE, 1); 1416df10a82SMark Nutter if (ctx->signal2) 14287ff6090SJeremy Kerr unmap_mapping_range(ctx->signal2, 0, SPUFS_SIGNAL_MAP_SIZE, 1); 14317e0e270SBenjamin Herrenschmidt if (ctx->mss) 14487ff6090SJeremy Kerr unmap_mapping_range(ctx->mss, 0, SPUFS_MSS_MAP_SIZE, 1); 14517e0e270SBenjamin Herrenschmidt if (ctx->psmap) 14687ff6090SJeremy Kerr unmap_mapping_range(ctx->psmap, 0, SPUFS_PS_MAP_SIZE, 1); 14747d3a5faSChristoph Hellwig mutex_unlock(&ctx->mapping_lock); 1488b3d6663SArnd Bergmann } 1498b3d6663SArnd Bergmann 1506a0641e5SChristoph Hellwig /** 1516a0641e5SChristoph Hellwig * spu_acquire_saved - lock spu contex and make sure it is in saved state 1526a0641e5SChristoph Hellwig * @ctx: spu contex to lock 1536a0641e5SChristoph Hellwig */ 154c9101bdbSChristoph Hellwig int spu_acquire_saved(struct spu_context *ctx) 1558b3d6663SArnd Bergmann { 156c9101bdbSChristoph Hellwig int ret; 157c9101bdbSChristoph Hellwig 1583734dfc6SJulio M. Merino Vidal spu_context_nospu_trace(spu_acquire_saved__enter, ctx); 1593734dfc6SJulio M. Merino Vidal 160c9101bdbSChristoph Hellwig ret = spu_acquire(ctx); 161c9101bdbSChristoph Hellwig if (ret) 162c9101bdbSChristoph Hellwig return ret; 163c9101bdbSChristoph Hellwig 16427b1ea09SChristoph Hellwig if (ctx->state != SPU_STATE_SAVED) { 16527b1ea09SChristoph Hellwig set_bit(SPU_SCHED_WAS_ACTIVE, &ctx->sched_flags); 1668b3d6663SArnd Bergmann spu_deactivate(ctx); 1678b3d6663SArnd Bergmann } 168c9101bdbSChristoph Hellwig 169c9101bdbSChristoph Hellwig return 0; 17027b1ea09SChristoph Hellwig } 17127b1ea09SChristoph Hellwig 17227b1ea09SChristoph Hellwig /** 17327b1ea09SChristoph Hellwig * spu_release_saved - unlock spu context and return it to the runqueue 17427b1ea09SChristoph Hellwig * @ctx: context to unlock 17527b1ea09SChristoph Hellwig */ 17627b1ea09SChristoph Hellwig void spu_release_saved(struct spu_context *ctx) 17727b1ea09SChristoph Hellwig { 17827b1ea09SChristoph Hellwig BUG_ON(ctx->state != SPU_STATE_SAVED); 17927b1ea09SChristoph Hellwig 180c368392aSJeremy Kerr if (test_and_clear_bit(SPU_SCHED_WAS_ACTIVE, &ctx->sched_flags) && 181c368392aSJeremy Kerr test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags)) 18227b1ea09SChristoph Hellwig spu_activate(ctx, 0); 18327b1ea09SChristoph Hellwig 18427b1ea09SChristoph Hellwig spu_release(ctx); 18527b1ea09SChristoph Hellwig } 1861474855dSBob Nelson 187