167207b96SArnd Bergmann /* 267207b96SArnd Bergmann * SPU file system -- SPU context management 367207b96SArnd Bergmann * 467207b96SArnd Bergmann * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 567207b96SArnd Bergmann * 667207b96SArnd Bergmann * Author: Arnd Bergmann <arndb@de.ibm.com> 767207b96SArnd Bergmann * 867207b96SArnd Bergmann * This program is free software; you can redistribute it and/or modify 967207b96SArnd Bergmann * it under the terms of the GNU General Public License as published by 1067207b96SArnd Bergmann * the Free Software Foundation; either version 2, or (at your option) 1167207b96SArnd Bergmann * any later version. 1267207b96SArnd Bergmann * 1367207b96SArnd Bergmann * This program is distributed in the hope that it will be useful, 1467207b96SArnd Bergmann * but WITHOUT ANY WARRANTY; without even the implied warranty of 1567207b96SArnd Bergmann * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 1667207b96SArnd Bergmann * GNU General Public License for more details. 1767207b96SArnd Bergmann * 1867207b96SArnd Bergmann * You should have received a copy of the GNU General Public License 1967207b96SArnd Bergmann * along with this program; if not, write to the Free Software 2067207b96SArnd Bergmann * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 2167207b96SArnd Bergmann */ 2267207b96SArnd Bergmann 238b3d6663SArnd Bergmann #include <linux/fs.h> 248b3d6663SArnd Bergmann #include <linux/mm.h> 251474855dSBob Nelson #include <linux/module.h> 2667207b96SArnd Bergmann #include <linux/slab.h> 2765de66f0SChristoph Hellwig #include <asm/atomic.h> 2867207b96SArnd Bergmann #include <asm/spu.h> 295473af04SMark Nutter #include <asm/spu_csa.h> 3067207b96SArnd Bergmann #include "spufs.h" 3167207b96SArnd Bergmann 3265de66f0SChristoph Hellwig 3365de66f0SChristoph Hellwig atomic_t nr_spu_contexts = ATOMIC_INIT(0); 3465de66f0SChristoph Hellwig 356263203eSArnd Bergmann struct spu_context *alloc_spu_context(struct spu_gang *gang) 3667207b96SArnd Bergmann { 3767207b96SArnd Bergmann struct spu_context *ctx; 38c5c45913SJeremy Kerr ctx = kzalloc(sizeof *ctx, GFP_KERNEL); 3967207b96SArnd Bergmann if (!ctx) 4067207b96SArnd Bergmann goto out; 418b3d6663SArnd Bergmann /* Binding to physical processor deferred 428b3d6663SArnd Bergmann * until spu_activate(). 435473af04SMark Nutter */ 44f1fa74f4SBenjamin Herrenschmidt if (spu_init_csa(&ctx->csa)) 455473af04SMark Nutter goto out_free; 4667207b96SArnd Bergmann spin_lock_init(&ctx->mmio_lock); 4747d3a5faSChristoph Hellwig mutex_init(&ctx->mapping_lock); 4867207b96SArnd Bergmann kref_init(&ctx->kref); 49650f8b02SChristoph Hellwig mutex_init(&ctx->state_mutex); 50e45d48a3SChristoph Hellwig mutex_init(&ctx->run_mutex); 518b3d6663SArnd Bergmann init_waitqueue_head(&ctx->ibox_wq); 528b3d6663SArnd Bergmann init_waitqueue_head(&ctx->wbox_wq); 535110459fSArnd Bergmann init_waitqueue_head(&ctx->stop_wq); 54a33a7d73SArnd Bergmann init_waitqueue_head(&ctx->mfc_wq); 5533bfd7a7SArnd Bergmann init_waitqueue_head(&ctx->run_wq); 568b3d6663SArnd Bergmann ctx->state = SPU_STATE_SAVED; 578b3d6663SArnd Bergmann ctx->ops = &spu_backing_ops; 588b3d6663SArnd Bergmann ctx->owner = get_task_mm(current); 59a475c2f4SChristoph Hellwig INIT_LIST_HEAD(&ctx->rq); 608e68e2f2SArnd Bergmann INIT_LIST_HEAD(&ctx->aff_list); 616263203eSArnd Bergmann if (gang) 626263203eSArnd Bergmann spu_gang_add_ctx(gang, ctx); 639d78592eSChristoph Hellwig 649d78592eSChristoph Hellwig __spu_update_sched_info(ctx); 65fe443ef2SChristoph Hellwig spu_set_timeslice(ctx); 6627ec41d3SAndre Detsch ctx->stats.util_state = SPU_UTIL_IDLE_LOADED; 6765de66f0SChristoph Hellwig 6865de66f0SChristoph Hellwig atomic_inc(&nr_spu_contexts); 6967207b96SArnd Bergmann goto out; 7067207b96SArnd Bergmann out_free: 7167207b96SArnd Bergmann kfree(ctx); 7267207b96SArnd Bergmann ctx = NULL; 7367207b96SArnd Bergmann out: 7467207b96SArnd Bergmann return ctx; 7567207b96SArnd Bergmann } 7667207b96SArnd Bergmann 7767207b96SArnd Bergmann void destroy_spu_context(struct kref *kref) 7867207b96SArnd Bergmann { 7967207b96SArnd Bergmann struct spu_context *ctx; 8067207b96SArnd Bergmann ctx = container_of(kref, struct spu_context, kref); 81650f8b02SChristoph Hellwig mutex_lock(&ctx->state_mutex); 828b3d6663SArnd Bergmann spu_deactivate(ctx); 83650f8b02SChristoph Hellwig mutex_unlock(&ctx->state_mutex); 845473af04SMark Nutter spu_fini_csa(&ctx->csa); 856263203eSArnd Bergmann if (ctx->gang) 866263203eSArnd Bergmann spu_gang_remove_ctx(ctx->gang, ctx); 871474855dSBob Nelson if (ctx->prof_priv_kref) 881474855dSBob Nelson kref_put(ctx->prof_priv_kref, ctx->prof_priv_release); 89a475c2f4SChristoph Hellwig BUG_ON(!list_empty(&ctx->rq)); 9065de66f0SChristoph Hellwig atomic_dec(&nr_spu_contexts); 9167207b96SArnd Bergmann kfree(ctx); 9267207b96SArnd Bergmann } 9367207b96SArnd Bergmann 9467207b96SArnd Bergmann struct spu_context * get_spu_context(struct spu_context *ctx) 9567207b96SArnd Bergmann { 9667207b96SArnd Bergmann kref_get(&ctx->kref); 9767207b96SArnd Bergmann return ctx; 9867207b96SArnd Bergmann } 9967207b96SArnd Bergmann 10067207b96SArnd Bergmann int put_spu_context(struct spu_context *ctx) 10167207b96SArnd Bergmann { 10267207b96SArnd Bergmann return kref_put(&ctx->kref, &destroy_spu_context); 10367207b96SArnd Bergmann } 10467207b96SArnd Bergmann 1058b3d6663SArnd Bergmann /* give up the mm reference when the context is about to be destroyed */ 1068b3d6663SArnd Bergmann void spu_forget(struct spu_context *ctx) 1078b3d6663SArnd Bergmann { 1088b3d6663SArnd Bergmann struct mm_struct *mm; 109c9101bdbSChristoph Hellwig 110c9101bdbSChristoph Hellwig /* 111c9101bdbSChristoph Hellwig * This is basically an open-coded spu_acquire_saved, except that 1120111a701SJeremy Kerr * we don't acquire the state mutex interruptible, and we don't 1130111a701SJeremy Kerr * want this context to be rescheduled on release. 114c9101bdbSChristoph Hellwig */ 115c9101bdbSChristoph Hellwig mutex_lock(&ctx->state_mutex); 1160111a701SJeremy Kerr if (ctx->state != SPU_STATE_SAVED) 117c9101bdbSChristoph Hellwig spu_deactivate(ctx); 118c9101bdbSChristoph Hellwig 1198b3d6663SArnd Bergmann mm = ctx->owner; 1208b3d6663SArnd Bergmann ctx->owner = NULL; 1218b3d6663SArnd Bergmann mmput(mm); 1228b3d6663SArnd Bergmann spu_release(ctx); 1238b3d6663SArnd Bergmann } 12467207b96SArnd Bergmann 1255110459fSArnd Bergmann void spu_unmap_mappings(struct spu_context *ctx) 1268b3d6663SArnd Bergmann { 12747d3a5faSChristoph Hellwig mutex_lock(&ctx->mapping_lock); 1286df10a82SMark Nutter if (ctx->local_store) 1298b3d6663SArnd Bergmann unmap_mapping_range(ctx->local_store, 0, LS_SIZE, 1); 1306df10a82SMark Nutter if (ctx->mfc) 13117e0e270SBenjamin Herrenschmidt unmap_mapping_range(ctx->mfc, 0, 0x1000, 1); 1326df10a82SMark Nutter if (ctx->cntl) 13317e0e270SBenjamin Herrenschmidt unmap_mapping_range(ctx->cntl, 0, 0x1000, 1); 1346df10a82SMark Nutter if (ctx->signal1) 13517e0e270SBenjamin Herrenschmidt unmap_mapping_range(ctx->signal1, 0, PAGE_SIZE, 1); 1366df10a82SMark Nutter if (ctx->signal2) 13717e0e270SBenjamin Herrenschmidt unmap_mapping_range(ctx->signal2, 0, PAGE_SIZE, 1); 13817e0e270SBenjamin Herrenschmidt if (ctx->mss) 13917e0e270SBenjamin Herrenschmidt unmap_mapping_range(ctx->mss, 0, 0x1000, 1); 14017e0e270SBenjamin Herrenschmidt if (ctx->psmap) 14117e0e270SBenjamin Herrenschmidt unmap_mapping_range(ctx->psmap, 0, 0x20000, 1); 14247d3a5faSChristoph Hellwig mutex_unlock(&ctx->mapping_lock); 1438b3d6663SArnd Bergmann } 1448b3d6663SArnd Bergmann 1456a0641e5SChristoph Hellwig /** 1466a0641e5SChristoph Hellwig * spu_acquire_saved - lock spu contex and make sure it is in saved state 1476a0641e5SChristoph Hellwig * @ctx: spu contex to lock 1486a0641e5SChristoph Hellwig */ 149c9101bdbSChristoph Hellwig int spu_acquire_saved(struct spu_context *ctx) 1508b3d6663SArnd Bergmann { 151c9101bdbSChristoph Hellwig int ret; 152c9101bdbSChristoph Hellwig 153c9101bdbSChristoph Hellwig ret = spu_acquire(ctx); 154c9101bdbSChristoph Hellwig if (ret) 155c9101bdbSChristoph Hellwig return ret; 156c9101bdbSChristoph Hellwig 15727b1ea09SChristoph Hellwig if (ctx->state != SPU_STATE_SAVED) { 15827b1ea09SChristoph Hellwig set_bit(SPU_SCHED_WAS_ACTIVE, &ctx->sched_flags); 1598b3d6663SArnd Bergmann spu_deactivate(ctx); 1608b3d6663SArnd Bergmann } 161c9101bdbSChristoph Hellwig 162c9101bdbSChristoph Hellwig return 0; 16327b1ea09SChristoph Hellwig } 16427b1ea09SChristoph Hellwig 16527b1ea09SChristoph Hellwig /** 16627b1ea09SChristoph Hellwig * spu_release_saved - unlock spu context and return it to the runqueue 16727b1ea09SChristoph Hellwig * @ctx: context to unlock 16827b1ea09SChristoph Hellwig */ 16927b1ea09SChristoph Hellwig void spu_release_saved(struct spu_context *ctx) 17027b1ea09SChristoph Hellwig { 17127b1ea09SChristoph Hellwig BUG_ON(ctx->state != SPU_STATE_SAVED); 17227b1ea09SChristoph Hellwig 17327b1ea09SChristoph Hellwig if (test_and_clear_bit(SPU_SCHED_WAS_ACTIVE, &ctx->sched_flags)) 17427b1ea09SChristoph Hellwig spu_activate(ctx, 0); 17527b1ea09SChristoph Hellwig 17627b1ea09SChristoph Hellwig spu_release(ctx); 17727b1ea09SChristoph Hellwig } 1781474855dSBob Nelson 179