xref: /openbmc/linux/arch/powerpc/include/asm/sstep.h (revision e7bae9bb)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
4  */
5 #include <asm/inst.h>
6 
7 struct pt_regs;
8 
9 /*
10  * We don't allow single-stepping an mtmsrd that would clear
11  * MSR_RI, since that would make the exception unrecoverable.
12  * Since we need to single-step to proceed from a breakpoint,
13  * we don't allow putting a breakpoint on an mtmsrd instruction.
14  * Similarly we don't allow breakpoints on rfid instructions.
15  * These macros tell us if an instruction is a mtmsrd or rfid.
16  * Note that IS_MTMSRD returns true for both an mtmsr (32-bit)
17  * and an mtmsrd (64-bit).
18  */
19 #define IS_MTMSRD(instr)	((ppc_inst_val(instr) & 0xfc0007be) == 0x7c000124)
20 #define IS_RFID(instr)		((ppc_inst_val(instr) & 0xfc0007fe) == 0x4c000024)
21 #define IS_RFI(instr)		((ppc_inst_val(instr) & 0xfc0007fe) == 0x4c000064)
22 
23 enum instruction_type {
24 	COMPUTE,		/* arith/logical/CR op, etc. */
25 	LOAD,			/* load and store types need to be contiguous */
26 	LOAD_MULTI,
27 	LOAD_FP,
28 	LOAD_VMX,
29 	LOAD_VSX,
30 	STORE,
31 	STORE_MULTI,
32 	STORE_FP,
33 	STORE_VMX,
34 	STORE_VSX,
35 	LARX,
36 	STCX,
37 	BRANCH,
38 	MFSPR,
39 	MTSPR,
40 	CACHEOP,
41 	BARRIER,
42 	SYSCALL,
43 	SYSCALL_VECTORED_0,
44 	MFMSR,
45 	MTMSR,
46 	RFI,
47 	INTERRUPT,
48 	UNKNOWN
49 };
50 
51 #define INSTR_TYPE_MASK	0x1f
52 
53 #define OP_IS_LOAD(type)	((LOAD <= (type) && (type) <= LOAD_VSX) || (type) == LARX)
54 #define OP_IS_STORE(type)	((STORE <= (type) && (type) <= STORE_VSX) || (type) == STCX)
55 #define OP_IS_LOAD_STORE(type)	(LOAD <= (type) && (type) <= STCX)
56 
57 /* Compute flags, ORed in with type */
58 #define SETREG		0x20
59 #define SETCC		0x40
60 #define SETXER		0x80
61 
62 /* Branch flags, ORed in with type */
63 #define SETLK		0x20
64 #define BRTAKEN		0x40
65 #define DECCTR		0x80
66 
67 /* Load/store flags, ORed in with type */
68 #define SIGNEXT		0x20
69 #define UPDATE		0x40	/* matches bit in opcode 31 instructions */
70 #define BYTEREV		0x80
71 #define FPCONV		0x100
72 
73 /* Barrier type field, ORed in with type */
74 #define BARRIER_MASK	0xe0
75 #define BARRIER_SYNC	0x00
76 #define BARRIER_ISYNC	0x20
77 #define BARRIER_EIEIO	0x40
78 #define BARRIER_LWSYNC	0x60
79 #define BARRIER_PTESYNC	0x80
80 
81 /* Cacheop values, ORed in with type */
82 #define CACHEOP_MASK	0x700
83 #define DCBST		0
84 #define DCBF		0x100
85 #define DCBTST		0x200
86 #define DCBT		0x300
87 #define ICBI		0x400
88 #define DCBZ		0x500
89 
90 /* VSX flags values */
91 #define VSX_FPCONV	1	/* do floating point SP/DP conversion */
92 #define VSX_SPLAT	2	/* store loaded value into all elements */
93 #define VSX_LDLEFT	4	/* load VSX register from left */
94 #define VSX_CHECK_VEC	8	/* check MSR_VEC not MSR_VSX for reg >= 32 */
95 
96 /* Prefixed flag, ORed in with type */
97 #define PREFIXED       0x800
98 
99 /* Size field in type word */
100 #define SIZE(n)		((n) << 12)
101 #define GETSIZE(w)	((w) >> 12)
102 
103 #define GETTYPE(t)	((t) & INSTR_TYPE_MASK)
104 #define GETLENGTH(t)   (((t) & PREFIXED) ? 8 : 4)
105 
106 #define MKOP(t, f, s)	((t) | (f) | SIZE(s))
107 
108 /* Prefix instruction operands */
109 #define GET_PREFIX_RA(i)	(((i) >> 16) & 0x1f)
110 #define GET_PREFIX_R(i)		((i) & (1ul << 20))
111 
112 extern s32 patch__exec_instr;
113 
114 struct instruction_op {
115 	int type;
116 	int reg;
117 	unsigned long val;
118 	/* For LOAD/STORE/LARX/STCX */
119 	unsigned long ea;
120 	int update_reg;
121 	/* For MFSPR */
122 	int spr;
123 	u32 ccval;
124 	u32 xerval;
125 	u8 element_size;	/* for VSX/VMX loads/stores */
126 	u8 vsx_flags;
127 };
128 
129 union vsx_reg {
130 	u8	b[16];
131 	u16	h[8];
132 	u32	w[4];
133 	unsigned long d[2];
134 	float	fp[4];
135 	double	dp[2];
136 	__vector128 v;
137 };
138 
139 /*
140  * Decode an instruction, and return information about it in *op
141  * without changing *regs.
142  *
143  * Return value is 1 if the instruction can be emulated just by
144  * updating *regs with the information in *op, -1 if we need the
145  * GPRs but *regs doesn't contain the full register set, or 0
146  * otherwise.
147  */
148 extern int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
149 			 struct ppc_inst instr);
150 
151 /*
152  * Emulate an instruction that can be executed just by updating
153  * fields in *regs.
154  */
155 void emulate_update_regs(struct pt_regs *reg, struct instruction_op *op);
156 
157 /*
158  * Emulate instructions that cause a transfer of control,
159  * arithmetic/logical instructions, loads and stores,
160  * cache operations and barriers.
161  *
162  * Returns 1 if the instruction was emulated successfully,
163  * 0 if it could not be emulated, or -1 for an instruction that
164  * should not be emulated (rfid, mtmsrd clearing MSR_RI, etc.).
165  */
166 extern int emulate_step(struct pt_regs *regs, struct ppc_inst instr);
167 
168 /*
169  * Emulate a load or store instruction by reading/writing the
170  * memory of the current process.  FP/VMX/VSX registers are assumed
171  * to hold live values if the appropriate enable bit in regs->msr is
172  * set; otherwise this will use the saved values in the thread struct
173  * for user-mode accesses.
174  */
175 extern int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op);
176 
177 extern void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg,
178 			     const void *mem, bool cross_endian);
179 extern void emulate_vsx_store(struct instruction_op *op,
180 			      const union vsx_reg *reg, void *mem,
181 			      bool cross_endian);
182 extern int emulate_dcbz(unsigned long ea, struct pt_regs *regs);
183