1de6cc651SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
28b3d6663SArnd Bergmann /* backing_ops.c - query/set operations on saved SPU context.
38b3d6663SArnd Bergmann  *
48b3d6663SArnd Bergmann  * Copyright (C) IBM 2005
58b3d6663SArnd Bergmann  * Author: Mark Nutter <mnutter@us.ibm.com>
68b3d6663SArnd Bergmann  *
78b3d6663SArnd Bergmann  * These register operations allow SPUFS to operate on saved
88b3d6663SArnd Bergmann  * SPU contexts rather than hardware.
98b3d6663SArnd Bergmann  */
108b3d6663SArnd Bergmann 
118b3d6663SArnd Bergmann #include <linux/errno.h>
128b3d6663SArnd Bergmann #include <linux/sched.h>
138b3d6663SArnd Bergmann #include <linux/kernel.h>
148b3d6663SArnd Bergmann #include <linux/mm.h>
158b3d6663SArnd Bergmann #include <linux/vmalloc.h>
168b3d6663SArnd Bergmann #include <linux/smp.h>
178b3d6663SArnd Bergmann #include <linux/stddef.h>
188b3d6663SArnd Bergmann #include <linux/unistd.h>
193a843d7cSArnd Bergmann #include <linux/poll.h>
208b3d6663SArnd Bergmann 
218b3d6663SArnd Bergmann #include <asm/io.h>
228b3d6663SArnd Bergmann #include <asm/spu.h>
238b3d6663SArnd Bergmann #include <asm/spu_csa.h>
24b9e3bd77SDwayne Grant McConnell #include <asm/spu_info.h>
258b3d6663SArnd Bergmann #include <asm/mmu_context.h>
268b3d6663SArnd Bergmann #include "spufs.h"
278b3d6663SArnd Bergmann 
288b3d6663SArnd Bergmann /*
298b3d6663SArnd Bergmann  * Reads/writes to various problem and priv2 registers require
308b3d6663SArnd Bergmann  * state changes, i.e.  generate SPU events, modify channel
318b3d6663SArnd Bergmann  * counts, etc.
328b3d6663SArnd Bergmann  */
338b3d6663SArnd Bergmann 
gen_spu_event(struct spu_context * ctx,u32 event)348b3d6663SArnd Bergmann static void gen_spu_event(struct spu_context *ctx, u32 event)
358b3d6663SArnd Bergmann {
368b3d6663SArnd Bergmann 	u64 ch0_cnt;
378b3d6663SArnd Bergmann 	u64 ch0_data;
388b3d6663SArnd Bergmann 	u64 ch1_data;
398b3d6663SArnd Bergmann 
408b3d6663SArnd Bergmann 	ch0_cnt = ctx->csa.spu_chnlcnt_RW[0];
418b3d6663SArnd Bergmann 	ch0_data = ctx->csa.spu_chnldata_RW[0];
428b3d6663SArnd Bergmann 	ch1_data = ctx->csa.spu_chnldata_RW[1];
438b3d6663SArnd Bergmann 	ctx->csa.spu_chnldata_RW[0] |= event;
448b3d6663SArnd Bergmann 	if ((ch0_cnt == 0) && !(ch0_data & event) && (ch1_data & event)) {
458b3d6663SArnd Bergmann 		ctx->csa.spu_chnlcnt_RW[0] = 1;
468b3d6663SArnd Bergmann 	}
478b3d6663SArnd Bergmann }
488b3d6663SArnd Bergmann 
spu_backing_mbox_read(struct spu_context * ctx,u32 * data)498b3d6663SArnd Bergmann static int spu_backing_mbox_read(struct spu_context *ctx, u32 * data)
508b3d6663SArnd Bergmann {
518b3d6663SArnd Bergmann 	u32 mbox_stat;
528b3d6663SArnd Bergmann 	int ret = 0;
538b3d6663SArnd Bergmann 
548b3d6663SArnd Bergmann 	spin_lock(&ctx->csa.register_lock);
558b3d6663SArnd Bergmann 	mbox_stat = ctx->csa.prob.mb_stat_R;
568b3d6663SArnd Bergmann 	if (mbox_stat & 0x0000ff) {
578b3d6663SArnd Bergmann 		/* Read the first available word.
588b3d6663SArnd Bergmann 		 * Implementation note: the depth
598b3d6663SArnd Bergmann 		 * of pu_mb_R is currently 1.
608b3d6663SArnd Bergmann 		 */
618b3d6663SArnd Bergmann 		*data = ctx->csa.prob.pu_mb_R;
628b3d6663SArnd Bergmann 		ctx->csa.prob.mb_stat_R &= ~(0x0000ff);
638b3d6663SArnd Bergmann 		ctx->csa.spu_chnlcnt_RW[28] = 1;
648b3d6663SArnd Bergmann 		gen_spu_event(ctx, MFC_PU_MAILBOX_AVAILABLE_EVENT);
658b3d6663SArnd Bergmann 		ret = 4;
668b3d6663SArnd Bergmann 	}
678b3d6663SArnd Bergmann 	spin_unlock(&ctx->csa.register_lock);
688b3d6663SArnd Bergmann 	return ret;
698b3d6663SArnd Bergmann }
708b3d6663SArnd Bergmann 
spu_backing_mbox_stat_read(struct spu_context * ctx)718b3d6663SArnd Bergmann static u32 spu_backing_mbox_stat_read(struct spu_context *ctx)
728b3d6663SArnd Bergmann {
738b3d6663SArnd Bergmann 	return ctx->csa.prob.mb_stat_R;
748b3d6663SArnd Bergmann }
758b3d6663SArnd Bergmann 
spu_backing_mbox_stat_poll(struct spu_context * ctx,__poll_t events)768153a5eaSAl Viro static __poll_t spu_backing_mbox_stat_poll(struct spu_context *ctx,
778153a5eaSAl Viro 					  __poll_t events)
783a843d7cSArnd Bergmann {
798153a5eaSAl Viro 	__poll_t ret;
803a843d7cSArnd Bergmann 	u32 stat;
813a843d7cSArnd Bergmann 
823a843d7cSArnd Bergmann 	ret = 0;
833a843d7cSArnd Bergmann 	spin_lock_irq(&ctx->csa.register_lock);
843a843d7cSArnd Bergmann 	stat = ctx->csa.prob.mb_stat_R;
853a843d7cSArnd Bergmann 
863a843d7cSArnd Bergmann 	/* if the requested event is there, return the poll
873a843d7cSArnd Bergmann 	   mask, otherwise enable the interrupt to get notified,
883a843d7cSArnd Bergmann 	   but first mark any pending interrupts as done so
893a843d7cSArnd Bergmann 	   we don't get woken up unnecessarily */
903a843d7cSArnd Bergmann 
91a9a08845SLinus Torvalds 	if (events & (EPOLLIN | EPOLLRDNORM)) {
923a843d7cSArnd Bergmann 		if (stat & 0xff0000)
93a9a08845SLinus Torvalds 			ret |= EPOLLIN | EPOLLRDNORM;
943a843d7cSArnd Bergmann 		else {
958af30675SJeremy Kerr 			ctx->csa.priv1.int_stat_class2_RW &=
968af30675SJeremy Kerr 				~CLASS2_MAILBOX_INTR;
978af30675SJeremy Kerr 			ctx->csa.priv1.int_mask_class2_RW |=
988af30675SJeremy Kerr 				CLASS2_ENABLE_MAILBOX_INTR;
993a843d7cSArnd Bergmann 		}
1003a843d7cSArnd Bergmann 	}
101a9a08845SLinus Torvalds 	if (events & (EPOLLOUT | EPOLLWRNORM)) {
1023a843d7cSArnd Bergmann 		if (stat & 0x00ff00)
103a9a08845SLinus Torvalds 			ret = EPOLLOUT | EPOLLWRNORM;
1043a843d7cSArnd Bergmann 		else {
1058af30675SJeremy Kerr 			ctx->csa.priv1.int_stat_class2_RW &=
1068af30675SJeremy Kerr 				~CLASS2_MAILBOX_THRESHOLD_INTR;
1078af30675SJeremy Kerr 			ctx->csa.priv1.int_mask_class2_RW |=
1088af30675SJeremy Kerr 				CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR;
1093a843d7cSArnd Bergmann 		}
1103a843d7cSArnd Bergmann 	}
1113a843d7cSArnd Bergmann 	spin_unlock_irq(&ctx->csa.register_lock);
1123a843d7cSArnd Bergmann 	return ret;
1133a843d7cSArnd Bergmann }
1143a843d7cSArnd Bergmann 
spu_backing_ibox_read(struct spu_context * ctx,u32 * data)1158b3d6663SArnd Bergmann static int spu_backing_ibox_read(struct spu_context *ctx, u32 * data)
1168b3d6663SArnd Bergmann {
1178b3d6663SArnd Bergmann 	int ret;
1188b3d6663SArnd Bergmann 
1198b3d6663SArnd Bergmann 	spin_lock(&ctx->csa.register_lock);
1208b3d6663SArnd Bergmann 	if (ctx->csa.prob.mb_stat_R & 0xff0000) {
1218b3d6663SArnd Bergmann 		/* Read the first available word.
1228b3d6663SArnd Bergmann 		 * Implementation note: the depth
1238b3d6663SArnd Bergmann 		 * of puint_mb_R is currently 1.
1248b3d6663SArnd Bergmann 		 */
1258b3d6663SArnd Bergmann 		*data = ctx->csa.priv2.puint_mb_R;
1268b3d6663SArnd Bergmann 		ctx->csa.prob.mb_stat_R &= ~(0xff0000);
1278b3d6663SArnd Bergmann 		ctx->csa.spu_chnlcnt_RW[30] = 1;
1288b3d6663SArnd Bergmann 		gen_spu_event(ctx, MFC_PU_INT_MAILBOX_AVAILABLE_EVENT);
1298b3d6663SArnd Bergmann 		ret = 4;
1308b3d6663SArnd Bergmann 	} else {
1318b3d6663SArnd Bergmann 		/* make sure we get woken up by the interrupt */
1328af30675SJeremy Kerr 		ctx->csa.priv1.int_mask_class2_RW |= CLASS2_ENABLE_MAILBOX_INTR;
1338b3d6663SArnd Bergmann 		ret = 0;
1348b3d6663SArnd Bergmann 	}
1358b3d6663SArnd Bergmann 	spin_unlock(&ctx->csa.register_lock);
1368b3d6663SArnd Bergmann 	return ret;
1378b3d6663SArnd Bergmann }
1388b3d6663SArnd Bergmann 
spu_backing_wbox_write(struct spu_context * ctx,u32 data)1398b3d6663SArnd Bergmann static int spu_backing_wbox_write(struct spu_context *ctx, u32 data)
1408b3d6663SArnd Bergmann {
1418b3d6663SArnd Bergmann 	int ret;
1428b3d6663SArnd Bergmann 
1438b3d6663SArnd Bergmann 	spin_lock(&ctx->csa.register_lock);
1448b3d6663SArnd Bergmann 	if ((ctx->csa.prob.mb_stat_R) & 0x00ff00) {
1458b3d6663SArnd Bergmann 		int slot = ctx->csa.spu_chnlcnt_RW[29];
1468b3d6663SArnd Bergmann 		int avail = (ctx->csa.prob.mb_stat_R & 0x00ff00) >> 8;
1478b3d6663SArnd Bergmann 
1488b3d6663SArnd Bergmann 		/* We have space to write wbox_data.
1498b3d6663SArnd Bergmann 		 * Implementation note: the depth
1508b3d6663SArnd Bergmann 		 * of spu_mb_W is currently 4.
1518b3d6663SArnd Bergmann 		 */
1528b3d6663SArnd Bergmann 		BUG_ON(avail != (4 - slot));
1538b3d6663SArnd Bergmann 		ctx->csa.spu_mailbox_data[slot] = data;
1548b3d6663SArnd Bergmann 		ctx->csa.spu_chnlcnt_RW[29] = ++slot;
15562ee68e3SArnd Bergmann 		ctx->csa.prob.mb_stat_R &= ~(0x00ff00);
15662ee68e3SArnd Bergmann 		ctx->csa.prob.mb_stat_R |= (((4 - slot) & 0xff) << 8);
1578b3d6663SArnd Bergmann 		gen_spu_event(ctx, MFC_SPU_MAILBOX_WRITTEN_EVENT);
1588b3d6663SArnd Bergmann 		ret = 4;
1598b3d6663SArnd Bergmann 	} else {
1608b3d6663SArnd Bergmann 		/* make sure we get woken up by the interrupt when space
1618b3d6663SArnd Bergmann 		   becomes available */
1628af30675SJeremy Kerr 		ctx->csa.priv1.int_mask_class2_RW |=
1638af30675SJeremy Kerr 			CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR;
1648b3d6663SArnd Bergmann 		ret = 0;
1658b3d6663SArnd Bergmann 	}
1668b3d6663SArnd Bergmann 	spin_unlock(&ctx->csa.register_lock);
1678b3d6663SArnd Bergmann 	return ret;
1688b3d6663SArnd Bergmann }
1698b3d6663SArnd Bergmann 
spu_backing_signal1_read(struct spu_context * ctx)1708b3d6663SArnd Bergmann static u32 spu_backing_signal1_read(struct spu_context *ctx)
1718b3d6663SArnd Bergmann {
1728b3d6663SArnd Bergmann 	return ctx->csa.spu_chnldata_RW[3];
1738b3d6663SArnd Bergmann }
1748b3d6663SArnd Bergmann 
spu_backing_signal1_write(struct spu_context * ctx,u32 data)1758b3d6663SArnd Bergmann static void spu_backing_signal1_write(struct spu_context *ctx, u32 data)
1768b3d6663SArnd Bergmann {
1778b3d6663SArnd Bergmann 	spin_lock(&ctx->csa.register_lock);
1788b3d6663SArnd Bergmann 	if (ctx->csa.priv2.spu_cfg_RW & 0x1)
1798b3d6663SArnd Bergmann 		ctx->csa.spu_chnldata_RW[3] |= data;
1808b3d6663SArnd Bergmann 	else
1818b3d6663SArnd Bergmann 		ctx->csa.spu_chnldata_RW[3] = data;
1828b3d6663SArnd Bergmann 	ctx->csa.spu_chnlcnt_RW[3] = 1;
1838b3d6663SArnd Bergmann 	gen_spu_event(ctx, MFC_SIGNAL_1_EVENT);
1848b3d6663SArnd Bergmann 	spin_unlock(&ctx->csa.register_lock);
1858b3d6663SArnd Bergmann }
1868b3d6663SArnd Bergmann 
spu_backing_signal2_read(struct spu_context * ctx)1878b3d6663SArnd Bergmann static u32 spu_backing_signal2_read(struct spu_context *ctx)
1888b3d6663SArnd Bergmann {
1898b3d6663SArnd Bergmann 	return ctx->csa.spu_chnldata_RW[4];
1908b3d6663SArnd Bergmann }
1918b3d6663SArnd Bergmann 
spu_backing_signal2_write(struct spu_context * ctx,u32 data)1928b3d6663SArnd Bergmann static void spu_backing_signal2_write(struct spu_context *ctx, u32 data)
1938b3d6663SArnd Bergmann {
1948b3d6663SArnd Bergmann 	spin_lock(&ctx->csa.register_lock);
1958b3d6663SArnd Bergmann 	if (ctx->csa.priv2.spu_cfg_RW & 0x2)
1968b3d6663SArnd Bergmann 		ctx->csa.spu_chnldata_RW[4] |= data;
1978b3d6663SArnd Bergmann 	else
1988b3d6663SArnd Bergmann 		ctx->csa.spu_chnldata_RW[4] = data;
1998b3d6663SArnd Bergmann 	ctx->csa.spu_chnlcnt_RW[4] = 1;
2008b3d6663SArnd Bergmann 	gen_spu_event(ctx, MFC_SIGNAL_2_EVENT);
2018b3d6663SArnd Bergmann 	spin_unlock(&ctx->csa.register_lock);
2028b3d6663SArnd Bergmann }
2038b3d6663SArnd Bergmann 
spu_backing_signal1_type_set(struct spu_context * ctx,u64 val)2048b3d6663SArnd Bergmann static void spu_backing_signal1_type_set(struct spu_context *ctx, u64 val)
2058b3d6663SArnd Bergmann {
2068b3d6663SArnd Bergmann 	u64 tmp;
2078b3d6663SArnd Bergmann 
2088b3d6663SArnd Bergmann 	spin_lock(&ctx->csa.register_lock);
2098b3d6663SArnd Bergmann 	tmp = ctx->csa.priv2.spu_cfg_RW;
2108b3d6663SArnd Bergmann 	if (val)
2118b3d6663SArnd Bergmann 		tmp |= 1;
2128b3d6663SArnd Bergmann 	else
2138b3d6663SArnd Bergmann 		tmp &= ~1;
2148b3d6663SArnd Bergmann 	ctx->csa.priv2.spu_cfg_RW = tmp;
2158b3d6663SArnd Bergmann 	spin_unlock(&ctx->csa.register_lock);
2168b3d6663SArnd Bergmann }
2178b3d6663SArnd Bergmann 
spu_backing_signal1_type_get(struct spu_context * ctx)2188b3d6663SArnd Bergmann static u64 spu_backing_signal1_type_get(struct spu_context *ctx)
2198b3d6663SArnd Bergmann {
2208b3d6663SArnd Bergmann 	return ((ctx->csa.priv2.spu_cfg_RW & 1) != 0);
2218b3d6663SArnd Bergmann }
2228b3d6663SArnd Bergmann 
spu_backing_signal2_type_set(struct spu_context * ctx,u64 val)2238b3d6663SArnd Bergmann static void spu_backing_signal2_type_set(struct spu_context *ctx, u64 val)
2248b3d6663SArnd Bergmann {
2258b3d6663SArnd Bergmann 	u64 tmp;
2268b3d6663SArnd Bergmann 
2278b3d6663SArnd Bergmann 	spin_lock(&ctx->csa.register_lock);
2288b3d6663SArnd Bergmann 	tmp = ctx->csa.priv2.spu_cfg_RW;
2298b3d6663SArnd Bergmann 	if (val)
2308b3d6663SArnd Bergmann 		tmp |= 2;
2318b3d6663SArnd Bergmann 	else
2328b3d6663SArnd Bergmann 		tmp &= ~2;
2338b3d6663SArnd Bergmann 	ctx->csa.priv2.spu_cfg_RW = tmp;
2348b3d6663SArnd Bergmann 	spin_unlock(&ctx->csa.register_lock);
2358b3d6663SArnd Bergmann }
2368b3d6663SArnd Bergmann 
spu_backing_signal2_type_get(struct spu_context * ctx)2378b3d6663SArnd Bergmann static u64 spu_backing_signal2_type_get(struct spu_context *ctx)
2388b3d6663SArnd Bergmann {
2398b3d6663SArnd Bergmann 	return ((ctx->csa.priv2.spu_cfg_RW & 2) != 0);
2408b3d6663SArnd Bergmann }
2418b3d6663SArnd Bergmann 
spu_backing_npc_read(struct spu_context * ctx)2428b3d6663SArnd Bergmann static u32 spu_backing_npc_read(struct spu_context *ctx)
2438b3d6663SArnd Bergmann {
2448b3d6663SArnd Bergmann 	return ctx->csa.prob.spu_npc_RW;
2458b3d6663SArnd Bergmann }
2468b3d6663SArnd Bergmann 
spu_backing_npc_write(struct spu_context * ctx,u32 val)2478b3d6663SArnd Bergmann static void spu_backing_npc_write(struct spu_context *ctx, u32 val)
2488b3d6663SArnd Bergmann {
2498b3d6663SArnd Bergmann 	ctx->csa.prob.spu_npc_RW = val;
2508b3d6663SArnd Bergmann }
2518b3d6663SArnd Bergmann 
spu_backing_status_read(struct spu_context * ctx)2528b3d6663SArnd Bergmann static u32 spu_backing_status_read(struct spu_context *ctx)
2538b3d6663SArnd Bergmann {
2548b3d6663SArnd Bergmann 	return ctx->csa.prob.spu_status_R;
2558b3d6663SArnd Bergmann }
2568b3d6663SArnd Bergmann 
spu_backing_get_ls(struct spu_context * ctx)2578b3d6663SArnd Bergmann static char *spu_backing_get_ls(struct spu_context *ctx)
2588b3d6663SArnd Bergmann {
2598b3d6663SArnd Bergmann 	return ctx->csa.lscsa->ls;
2608b3d6663SArnd Bergmann }
2618b3d6663SArnd Bergmann 
spu_backing_privcntl_write(struct spu_context * ctx,u64 val)262cc210b3eSLuke Browning static void spu_backing_privcntl_write(struct spu_context *ctx, u64 val)
263cc210b3eSLuke Browning {
264cc210b3eSLuke Browning 	ctx->csa.priv2.spu_privcntl_RW = val;
265cc210b3eSLuke Browning }
266cc210b3eSLuke Browning 
spu_backing_runcntl_read(struct spu_context * ctx)2673960c260SJeremy Kerr static u32 spu_backing_runcntl_read(struct spu_context *ctx)
2683960c260SJeremy Kerr {
2693960c260SJeremy Kerr 	return ctx->csa.prob.spu_runcntl_RW;
2703960c260SJeremy Kerr }
2713960c260SJeremy Kerr 
spu_backing_runcntl_write(struct spu_context * ctx,u32 val)2725110459fSArnd Bergmann static void spu_backing_runcntl_write(struct spu_context *ctx, u32 val)
2735110459fSArnd Bergmann {
2745110459fSArnd Bergmann 	spin_lock(&ctx->csa.register_lock);
2755110459fSArnd Bergmann 	ctx->csa.prob.spu_runcntl_RW = val;
2765110459fSArnd Bergmann 	if (val & SPU_RUNCNTL_RUNNABLE) {
277732377c5SMasato Noguchi 		ctx->csa.prob.spu_status_R &=
278732377c5SMasato Noguchi 			~SPU_STATUS_STOPPED_BY_STOP &
279732377c5SMasato Noguchi 			~SPU_STATUS_STOPPED_BY_HALT &
280732377c5SMasato Noguchi 			~SPU_STATUS_SINGLE_STEP &
281732377c5SMasato Noguchi 			~SPU_STATUS_INVALID_INSTR &
282732377c5SMasato Noguchi 			~SPU_STATUS_INVALID_CH;
2835110459fSArnd Bergmann 		ctx->csa.prob.spu_status_R |= SPU_STATUS_RUNNING;
2845110459fSArnd Bergmann 	} else {
2855110459fSArnd Bergmann 		ctx->csa.prob.spu_status_R &= ~SPU_STATUS_RUNNING;
2865110459fSArnd Bergmann 	}
2875110459fSArnd Bergmann 	spin_unlock(&ctx->csa.register_lock);
2885110459fSArnd Bergmann }
2895110459fSArnd Bergmann 
spu_backing_runcntl_stop(struct spu_context * ctx)290c25620d7SMasato Noguchi static void spu_backing_runcntl_stop(struct spu_context *ctx)
291c25620d7SMasato Noguchi {
292c25620d7SMasato Noguchi 	spu_backing_runcntl_write(ctx, SPU_RUNCNTL_STOP);
293c25620d7SMasato Noguchi }
294c25620d7SMasato Noguchi 
spu_backing_master_start(struct spu_context * ctx)295ee2d7340SArnd Bergmann static void spu_backing_master_start(struct spu_context *ctx)
2965110459fSArnd Bergmann {
297ee2d7340SArnd Bergmann 	struct spu_state *csa = &ctx->csa;
298ee2d7340SArnd Bergmann 	u64 sr1;
299ee2d7340SArnd Bergmann 
300ee2d7340SArnd Bergmann 	spin_lock(&csa->register_lock);
301ee2d7340SArnd Bergmann 	sr1 = csa->priv1.mfc_sr1_RW | MFC_STATE1_MASTER_RUN_CONTROL_MASK;
302ee2d7340SArnd Bergmann 	csa->priv1.mfc_sr1_RW = sr1;
303ee2d7340SArnd Bergmann 	spin_unlock(&csa->register_lock);
304ee2d7340SArnd Bergmann }
305ee2d7340SArnd Bergmann 
spu_backing_master_stop(struct spu_context * ctx)306ee2d7340SArnd Bergmann static void spu_backing_master_stop(struct spu_context *ctx)
307ee2d7340SArnd Bergmann {
308ee2d7340SArnd Bergmann 	struct spu_state *csa = &ctx->csa;
309ee2d7340SArnd Bergmann 	u64 sr1;
310ee2d7340SArnd Bergmann 
311ee2d7340SArnd Bergmann 	spin_lock(&csa->register_lock);
312ee2d7340SArnd Bergmann 	sr1 = csa->priv1.mfc_sr1_RW & ~MFC_STATE1_MASTER_RUN_CONTROL_MASK;
313ee2d7340SArnd Bergmann 	csa->priv1.mfc_sr1_RW = sr1;
314ee2d7340SArnd Bergmann 	spin_unlock(&csa->register_lock);
3155110459fSArnd Bergmann }
3165110459fSArnd Bergmann 
spu_backing_set_mfc_query(struct spu_context * ctx,u32 mask,u32 mode)317a33a7d73SArnd Bergmann static int spu_backing_set_mfc_query(struct spu_context * ctx, u32 mask,
318a33a7d73SArnd Bergmann 					u32 mode)
319a33a7d73SArnd Bergmann {
320a33a7d73SArnd Bergmann 	struct spu_problem_collapsed *prob = &ctx->csa.prob;
321a33a7d73SArnd Bergmann 	int ret;
322a33a7d73SArnd Bergmann 
323a33a7d73SArnd Bergmann 	spin_lock(&ctx->csa.register_lock);
324a33a7d73SArnd Bergmann 	ret = -EAGAIN;
325a33a7d73SArnd Bergmann 	if (prob->dma_querytype_RW)
326a33a7d73SArnd Bergmann 		goto out;
327a33a7d73SArnd Bergmann 	ret = 0;
328a33a7d73SArnd Bergmann 	/* FIXME: what are the side-effects of this? */
329a33a7d73SArnd Bergmann 	prob->dma_querymask_RW = mask;
330a33a7d73SArnd Bergmann 	prob->dma_querytype_RW = mode;
3318d038e04SKazunori Asayama 	/* In the current implementation, the SPU context is always
3328d038e04SKazunori Asayama 	 * acquired in runnable state when new bits are added to the
3338d038e04SKazunori Asayama 	 * mask (tagwait), so it's sufficient just to mask
3348d038e04SKazunori Asayama 	 * dma_tagstatus_R with the 'mask' parameter here.
3358d038e04SKazunori Asayama 	 */
3368d038e04SKazunori Asayama 	ctx->csa.prob.dma_tagstatus_R &= mask;
337a33a7d73SArnd Bergmann out:
338a33a7d73SArnd Bergmann 	spin_unlock(&ctx->csa.register_lock);
339a33a7d73SArnd Bergmann 
340a33a7d73SArnd Bergmann 	return ret;
341a33a7d73SArnd Bergmann }
342a33a7d73SArnd Bergmann 
spu_backing_read_mfc_tagstatus(struct spu_context * ctx)343a33a7d73SArnd Bergmann static u32 spu_backing_read_mfc_tagstatus(struct spu_context * ctx)
344a33a7d73SArnd Bergmann {
345a33a7d73SArnd Bergmann 	return ctx->csa.prob.dma_tagstatus_R;
346a33a7d73SArnd Bergmann }
347a33a7d73SArnd Bergmann 
spu_backing_get_mfc_free_elements(struct spu_context * ctx)348a33a7d73SArnd Bergmann static u32 spu_backing_get_mfc_free_elements(struct spu_context *ctx)
349a33a7d73SArnd Bergmann {
350a33a7d73SArnd Bergmann 	return ctx->csa.prob.dma_qstatus_R;
351a33a7d73SArnd Bergmann }
352a33a7d73SArnd Bergmann 
spu_backing_send_mfc_command(struct spu_context * ctx,struct mfc_dma_command * cmd)353a33a7d73SArnd Bergmann static int spu_backing_send_mfc_command(struct spu_context *ctx,
354a33a7d73SArnd Bergmann 					struct mfc_dma_command *cmd)
355a33a7d73SArnd Bergmann {
356a33a7d73SArnd Bergmann 	int ret;
357a33a7d73SArnd Bergmann 
358a33a7d73SArnd Bergmann 	spin_lock(&ctx->csa.register_lock);
359a33a7d73SArnd Bergmann 	ret = -EAGAIN;
360a33a7d73SArnd Bergmann 	/* FIXME: set up priv2->puq */
361a33a7d73SArnd Bergmann 	spin_unlock(&ctx->csa.register_lock);
362a33a7d73SArnd Bergmann 
363a33a7d73SArnd Bergmann 	return ret;
364a33a7d73SArnd Bergmann }
365a33a7d73SArnd Bergmann 
spu_backing_restart_dma(struct spu_context * ctx)36657dace23SArnd Bergmann static void spu_backing_restart_dma(struct spu_context *ctx)
36757dace23SArnd Bergmann {
36818789fb1SAndre Detsch 	ctx->csa.priv2.mfc_control_RW |= MFC_CNTL_RESTART_DMA_COMMAND;
36957dace23SArnd Bergmann }
37057dace23SArnd Bergmann 
3718b3d6663SArnd Bergmann struct spu_context_ops spu_backing_ops = {
3728b3d6663SArnd Bergmann 	.mbox_read = spu_backing_mbox_read,
3738b3d6663SArnd Bergmann 	.mbox_stat_read = spu_backing_mbox_stat_read,
3743a843d7cSArnd Bergmann 	.mbox_stat_poll = spu_backing_mbox_stat_poll,
3758b3d6663SArnd Bergmann 	.ibox_read = spu_backing_ibox_read,
3768b3d6663SArnd Bergmann 	.wbox_write = spu_backing_wbox_write,
3778b3d6663SArnd Bergmann 	.signal1_read = spu_backing_signal1_read,
3788b3d6663SArnd Bergmann 	.signal1_write = spu_backing_signal1_write,
3798b3d6663SArnd Bergmann 	.signal2_read = spu_backing_signal2_read,
3808b3d6663SArnd Bergmann 	.signal2_write = spu_backing_signal2_write,
3818b3d6663SArnd Bergmann 	.signal1_type_set = spu_backing_signal1_type_set,
3828b3d6663SArnd Bergmann 	.signal1_type_get = spu_backing_signal1_type_get,
3838b3d6663SArnd Bergmann 	.signal2_type_set = spu_backing_signal2_type_set,
3848b3d6663SArnd Bergmann 	.signal2_type_get = spu_backing_signal2_type_get,
3858b3d6663SArnd Bergmann 	.npc_read = spu_backing_npc_read,
3868b3d6663SArnd Bergmann 	.npc_write = spu_backing_npc_write,
3878b3d6663SArnd Bergmann 	.status_read = spu_backing_status_read,
3888b3d6663SArnd Bergmann 	.get_ls = spu_backing_get_ls,
389cc210b3eSLuke Browning 	.privcntl_write = spu_backing_privcntl_write,
3903960c260SJeremy Kerr 	.runcntl_read = spu_backing_runcntl_read,
3915110459fSArnd Bergmann 	.runcntl_write = spu_backing_runcntl_write,
392c25620d7SMasato Noguchi 	.runcntl_stop = spu_backing_runcntl_stop,
393ee2d7340SArnd Bergmann 	.master_start = spu_backing_master_start,
394ee2d7340SArnd Bergmann 	.master_stop = spu_backing_master_stop,
395a33a7d73SArnd Bergmann 	.set_mfc_query = spu_backing_set_mfc_query,
396a33a7d73SArnd Bergmann 	.read_mfc_tagstatus = spu_backing_read_mfc_tagstatus,
397a33a7d73SArnd Bergmann 	.get_mfc_free_elements = spu_backing_get_mfc_free_elements,
398a33a7d73SArnd Bergmann 	.send_mfc_command = spu_backing_send_mfc_command,
39957dace23SArnd Bergmann 	.restart_dma = spu_backing_restart_dma,
4008b3d6663SArnd Bergmann };
401