167207b96SArnd Bergmann /* 267207b96SArnd Bergmann * SPU file system -- SPU context management 367207b96SArnd Bergmann * 467207b96SArnd Bergmann * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 567207b96SArnd Bergmann * 667207b96SArnd Bergmann * Author: Arnd Bergmann <arndb@de.ibm.com> 767207b96SArnd Bergmann * 867207b96SArnd Bergmann * This program is free software; you can redistribute it and/or modify 967207b96SArnd Bergmann * it under the terms of the GNU General Public License as published by 1067207b96SArnd Bergmann * the Free Software Foundation; either version 2, or (at your option) 1167207b96SArnd Bergmann * any later version. 1267207b96SArnd Bergmann * 1367207b96SArnd Bergmann * This program is distributed in the hope that it will be useful, 1467207b96SArnd Bergmann * but WITHOUT ANY WARRANTY; without even the implied warranty of 1567207b96SArnd Bergmann * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 1667207b96SArnd Bergmann * GNU General Public License for more details. 1767207b96SArnd Bergmann * 1867207b96SArnd Bergmann * You should have received a copy of the GNU General Public License 1967207b96SArnd Bergmann * along with this program; if not, write to the Free Software 2067207b96SArnd Bergmann * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 2167207b96SArnd Bergmann */ 2267207b96SArnd Bergmann 238b3d6663SArnd Bergmann #include <linux/fs.h> 248b3d6663SArnd Bergmann #include <linux/mm.h> 2567207b96SArnd Bergmann #include <linux/slab.h> 2667207b96SArnd Bergmann #include <asm/spu.h> 275473af04SMark Nutter #include <asm/spu_csa.h> 2867207b96SArnd Bergmann #include "spufs.h" 2967207b96SArnd Bergmann 306263203eSArnd Bergmann struct spu_context *alloc_spu_context(struct spu_gang *gang) 3167207b96SArnd Bergmann { 3267207b96SArnd Bergmann struct spu_context *ctx; 33c5c45913SJeremy Kerr ctx = kzalloc(sizeof *ctx, GFP_KERNEL); 3467207b96SArnd Bergmann if (!ctx) 3567207b96SArnd Bergmann goto out; 368b3d6663SArnd Bergmann /* Binding to physical processor deferred 378b3d6663SArnd Bergmann * until spu_activate(). 385473af04SMark Nutter */ 395473af04SMark Nutter spu_init_csa(&ctx->csa); 405473af04SMark Nutter if (!ctx->csa.lscsa) { 415473af04SMark Nutter goto out_free; 425473af04SMark Nutter } 4367207b96SArnd Bergmann spin_lock_init(&ctx->mmio_lock); 4443c2bbd9SChristoph Hellwig spin_lock_init(&ctx->mapping_lock); 4567207b96SArnd Bergmann kref_init(&ctx->kref); 46650f8b02SChristoph Hellwig mutex_init(&ctx->state_mutex); 475ef8224aSArnd Bergmann init_MUTEX(&ctx->run_sema); 488b3d6663SArnd Bergmann init_waitqueue_head(&ctx->ibox_wq); 498b3d6663SArnd Bergmann init_waitqueue_head(&ctx->wbox_wq); 505110459fSArnd Bergmann init_waitqueue_head(&ctx->stop_wq); 51a33a7d73SArnd Bergmann init_waitqueue_head(&ctx->mfc_wq); 528b3d6663SArnd Bergmann ctx->state = SPU_STATE_SAVED; 538b3d6663SArnd Bergmann ctx->ops = &spu_backing_ops; 548b3d6663SArnd Bergmann ctx->owner = get_task_mm(current); 556263203eSArnd Bergmann if (gang) 566263203eSArnd Bergmann spu_gang_add_ctx(gang, ctx); 5752f04fcfSChristoph Hellwig ctx->rt_priority = current->rt_priority; 582eb1b120SChristoph Hellwig ctx->policy = current->policy; 598389998aSChristoph Hellwig ctx->prio = current->prio; 602eb1b120SChristoph Hellwig INIT_DELAYED_WORK(&ctx->sched_work, spu_sched_tick); 6167207b96SArnd Bergmann goto out; 6267207b96SArnd Bergmann out_free: 6367207b96SArnd Bergmann kfree(ctx); 6467207b96SArnd Bergmann ctx = NULL; 6567207b96SArnd Bergmann out: 6667207b96SArnd Bergmann return ctx; 6767207b96SArnd Bergmann } 6867207b96SArnd Bergmann 6967207b96SArnd Bergmann void destroy_spu_context(struct kref *kref) 7067207b96SArnd Bergmann { 7167207b96SArnd Bergmann struct spu_context *ctx; 7267207b96SArnd Bergmann ctx = container_of(kref, struct spu_context, kref); 73650f8b02SChristoph Hellwig mutex_lock(&ctx->state_mutex); 748b3d6663SArnd Bergmann spu_deactivate(ctx); 75650f8b02SChristoph Hellwig mutex_unlock(&ctx->state_mutex); 765473af04SMark Nutter spu_fini_csa(&ctx->csa); 776263203eSArnd Bergmann if (ctx->gang) 786263203eSArnd Bergmann spu_gang_remove_ctx(ctx->gang, ctx); 7967207b96SArnd Bergmann kfree(ctx); 8067207b96SArnd Bergmann } 8167207b96SArnd Bergmann 8267207b96SArnd Bergmann struct spu_context * get_spu_context(struct spu_context *ctx) 8367207b96SArnd Bergmann { 8467207b96SArnd Bergmann kref_get(&ctx->kref); 8567207b96SArnd Bergmann return ctx; 8667207b96SArnd Bergmann } 8767207b96SArnd Bergmann 8867207b96SArnd Bergmann int put_spu_context(struct spu_context *ctx) 8967207b96SArnd Bergmann { 9067207b96SArnd Bergmann return kref_put(&ctx->kref, &destroy_spu_context); 9167207b96SArnd Bergmann } 9267207b96SArnd Bergmann 938b3d6663SArnd Bergmann /* give up the mm reference when the context is about to be destroyed */ 948b3d6663SArnd Bergmann void spu_forget(struct spu_context *ctx) 958b3d6663SArnd Bergmann { 968b3d6663SArnd Bergmann struct mm_struct *mm; 978b3d6663SArnd Bergmann spu_acquire_saved(ctx); 988b3d6663SArnd Bergmann mm = ctx->owner; 998b3d6663SArnd Bergmann ctx->owner = NULL; 1008b3d6663SArnd Bergmann mmput(mm); 1018b3d6663SArnd Bergmann spu_release(ctx); 1028b3d6663SArnd Bergmann } 10367207b96SArnd Bergmann 1045110459fSArnd Bergmann void spu_unmap_mappings(struct spu_context *ctx) 1058b3d6663SArnd Bergmann { 1066df10a82SMark Nutter if (ctx->local_store) 1078b3d6663SArnd Bergmann unmap_mapping_range(ctx->local_store, 0, LS_SIZE, 1); 1086df10a82SMark Nutter if (ctx->mfc) 10917e0e270SBenjamin Herrenschmidt unmap_mapping_range(ctx->mfc, 0, 0x1000, 1); 1106df10a82SMark Nutter if (ctx->cntl) 11117e0e270SBenjamin Herrenschmidt unmap_mapping_range(ctx->cntl, 0, 0x1000, 1); 1126df10a82SMark Nutter if (ctx->signal1) 11317e0e270SBenjamin Herrenschmidt unmap_mapping_range(ctx->signal1, 0, PAGE_SIZE, 1); 1146df10a82SMark Nutter if (ctx->signal2) 11517e0e270SBenjamin Herrenschmidt unmap_mapping_range(ctx->signal2, 0, PAGE_SIZE, 1); 11617e0e270SBenjamin Herrenschmidt if (ctx->mss) 11717e0e270SBenjamin Herrenschmidt unmap_mapping_range(ctx->mss, 0, 0x1000, 1); 11817e0e270SBenjamin Herrenschmidt if (ctx->psmap) 11917e0e270SBenjamin Herrenschmidt unmap_mapping_range(ctx->psmap, 0, 0x20000, 1); 1208b3d6663SArnd Bergmann } 1218b3d6663SArnd Bergmann 1226a0641e5SChristoph Hellwig /** 1236a0641e5SChristoph Hellwig * spu_acquire_exclusive - lock spu contex and protect against userspace access 1246a0641e5SChristoph Hellwig * @ctx: spu contex to lock 1256a0641e5SChristoph Hellwig * 1266a0641e5SChristoph Hellwig * Note: 1276a0641e5SChristoph Hellwig * Returns 0 and with the context locked on success 1286a0641e5SChristoph Hellwig * Returns negative error and with the context _unlocked_ on failure. 1296a0641e5SChristoph Hellwig */ 130099814bbSJeremy Kerr int spu_acquire_exclusive(struct spu_context *ctx) 131099814bbSJeremy Kerr { 1326a0641e5SChristoph Hellwig int ret = -EINVAL; 133099814bbSJeremy Kerr 1346a0641e5SChristoph Hellwig spu_acquire(ctx); 1356a0641e5SChristoph Hellwig /* 1366a0641e5SChristoph Hellwig * Context is about to be freed, so we can't acquire it anymore. 1376a0641e5SChristoph Hellwig */ 1386a0641e5SChristoph Hellwig if (!ctx->owner) 1396a0641e5SChristoph Hellwig goto out_unlock; 140099814bbSJeremy Kerr 141099814bbSJeremy Kerr if (ctx->state == SPU_STATE_SAVED) { 142099814bbSJeremy Kerr ret = spu_activate(ctx, 0); 143099814bbSJeremy Kerr if (ret) 1446a0641e5SChristoph Hellwig goto out_unlock; 145099814bbSJeremy Kerr } else { 1466a0641e5SChristoph Hellwig /* 1476a0641e5SChristoph Hellwig * We need to exclude userspace access to the context. 1486a0641e5SChristoph Hellwig * 1496a0641e5SChristoph Hellwig * To protect against memory access we invalidate all ptes 1506a0641e5SChristoph Hellwig * and make sure the pagefault handlers block on the mutex. 1516a0641e5SChristoph Hellwig */ 152099814bbSJeremy Kerr spu_unmap_mappings(ctx); 153099814bbSJeremy Kerr } 154099814bbSJeremy Kerr 1556a0641e5SChristoph Hellwig return 0; 1566a0641e5SChristoph Hellwig 1576a0641e5SChristoph Hellwig out_unlock: 1586a0641e5SChristoph Hellwig spu_release(ctx); 159099814bbSJeremy Kerr return ret; 160099814bbSJeremy Kerr } 161099814bbSJeremy Kerr 1626a0641e5SChristoph Hellwig /** 1636a0641e5SChristoph Hellwig * spu_acquire_runnable - lock spu contex and make sure it is in runnable state 1646a0641e5SChristoph Hellwig * @ctx: spu contex to lock 1656a0641e5SChristoph Hellwig * 1666a0641e5SChristoph Hellwig * Note: 1676a0641e5SChristoph Hellwig * Returns 0 and with the context locked on success 1686a0641e5SChristoph Hellwig * Returns negative error and with the context _unlocked_ on failure. 1696a0641e5SChristoph Hellwig */ 17026bec673SChristoph Hellwig int spu_acquire_runnable(struct spu_context *ctx, unsigned long flags) 1718b3d6663SArnd Bergmann { 1726a0641e5SChristoph Hellwig int ret = -EINVAL; 1738b3d6663SArnd Bergmann 1746a0641e5SChristoph Hellwig spu_acquire(ctx); 1758b3d6663SArnd Bergmann if (ctx->state == SPU_STATE_SAVED) { 1766a0641e5SChristoph Hellwig /* 1776a0641e5SChristoph Hellwig * Context is about to be freed, so we can't acquire it anymore. 1786a0641e5SChristoph Hellwig */ 1796a0641e5SChristoph Hellwig if (!ctx->owner) 1806a0641e5SChristoph Hellwig goto out_unlock; 18126bec673SChristoph Hellwig ret = spu_activate(ctx, flags); 1828b3d6663SArnd Bergmann if (ret) 1836a0641e5SChristoph Hellwig goto out_unlock; 1848389998aSChristoph Hellwig } 1858b3d6663SArnd Bergmann 1866a0641e5SChristoph Hellwig return 0; 1878b3d6663SArnd Bergmann 1886a0641e5SChristoph Hellwig out_unlock: 1896a0641e5SChristoph Hellwig spu_release(ctx); 1908b3d6663SArnd Bergmann return ret; 1918b3d6663SArnd Bergmann } 1928b3d6663SArnd Bergmann 1936a0641e5SChristoph Hellwig /** 1946a0641e5SChristoph Hellwig * spu_acquire_saved - lock spu contex and make sure it is in saved state 1956a0641e5SChristoph Hellwig * @ctx: spu contex to lock 1966a0641e5SChristoph Hellwig */ 1978b3d6663SArnd Bergmann void spu_acquire_saved(struct spu_context *ctx) 1988b3d6663SArnd Bergmann { 1996a0641e5SChristoph Hellwig spu_acquire(ctx); 2006a0641e5SChristoph Hellwig if (ctx->state != SPU_STATE_SAVED) 2018b3d6663SArnd Bergmann spu_deactivate(ctx); 2028b3d6663SArnd Bergmann } 203