xref: /openbmc/linux/arch/arm64/lib/insn.c (revision 6c9f86d3)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Huawei Ltd.
4  * Author: Jiang Liu <liuj97@gmail.com>
5  *
6  * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
7  */
8 #include <linux/bitops.h>
9 #include <linux/bug.h>
10 #include <linux/printk.h>
11 #include <linux/sizes.h>
12 #include <linux/types.h>
13 
14 #include <asm/debug-monitors.h>
15 #include <asm/errno.h>
16 #include <asm/insn.h>
17 #include <asm/kprobes.h>
18 
19 #define AARCH64_INSN_SF_BIT	BIT(31)
20 #define AARCH64_INSN_N_BIT	BIT(22)
21 #define AARCH64_INSN_LSL_12	BIT(22)
22 
aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,u32 * maskp,int * shiftp)23 static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,
24 						u32 *maskp, int *shiftp)
25 {
26 	u32 mask;
27 	int shift;
28 
29 	switch (type) {
30 	case AARCH64_INSN_IMM_26:
31 		mask = BIT(26) - 1;
32 		shift = 0;
33 		break;
34 	case AARCH64_INSN_IMM_19:
35 		mask = BIT(19) - 1;
36 		shift = 5;
37 		break;
38 	case AARCH64_INSN_IMM_16:
39 		mask = BIT(16) - 1;
40 		shift = 5;
41 		break;
42 	case AARCH64_INSN_IMM_14:
43 		mask = BIT(14) - 1;
44 		shift = 5;
45 		break;
46 	case AARCH64_INSN_IMM_12:
47 		mask = BIT(12) - 1;
48 		shift = 10;
49 		break;
50 	case AARCH64_INSN_IMM_9:
51 		mask = BIT(9) - 1;
52 		shift = 12;
53 		break;
54 	case AARCH64_INSN_IMM_7:
55 		mask = BIT(7) - 1;
56 		shift = 15;
57 		break;
58 	case AARCH64_INSN_IMM_6:
59 	case AARCH64_INSN_IMM_S:
60 		mask = BIT(6) - 1;
61 		shift = 10;
62 		break;
63 	case AARCH64_INSN_IMM_R:
64 		mask = BIT(6) - 1;
65 		shift = 16;
66 		break;
67 	case AARCH64_INSN_IMM_N:
68 		mask = 1;
69 		shift = 22;
70 		break;
71 	default:
72 		return -EINVAL;
73 	}
74 
75 	*maskp = mask;
76 	*shiftp = shift;
77 
78 	return 0;
79 }
80 
81 #define ADR_IMM_HILOSPLIT	2
82 #define ADR_IMM_SIZE		SZ_2M
83 #define ADR_IMM_LOMASK		((1 << ADR_IMM_HILOSPLIT) - 1)
84 #define ADR_IMM_HIMASK		((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1)
85 #define ADR_IMM_LOSHIFT		29
86 #define ADR_IMM_HISHIFT		5
87 
aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type,u32 insn)88 u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn)
89 {
90 	u32 immlo, immhi, mask;
91 	int shift;
92 
93 	switch (type) {
94 	case AARCH64_INSN_IMM_ADR:
95 		shift = 0;
96 		immlo = (insn >> ADR_IMM_LOSHIFT) & ADR_IMM_LOMASK;
97 		immhi = (insn >> ADR_IMM_HISHIFT) & ADR_IMM_HIMASK;
98 		insn = (immhi << ADR_IMM_HILOSPLIT) | immlo;
99 		mask = ADR_IMM_SIZE - 1;
100 		break;
101 	default:
102 		if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
103 			pr_err("%s: unknown immediate encoding %d\n", __func__,
104 			       type);
105 			return 0;
106 		}
107 	}
108 
109 	return (insn >> shift) & mask;
110 }
111 
aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,u32 insn,u64 imm)112 u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
113 				  u32 insn, u64 imm)
114 {
115 	u32 immlo, immhi, mask;
116 	int shift;
117 
118 	if (insn == AARCH64_BREAK_FAULT)
119 		return AARCH64_BREAK_FAULT;
120 
121 	switch (type) {
122 	case AARCH64_INSN_IMM_ADR:
123 		shift = 0;
124 		immlo = (imm & ADR_IMM_LOMASK) << ADR_IMM_LOSHIFT;
125 		imm >>= ADR_IMM_HILOSPLIT;
126 		immhi = (imm & ADR_IMM_HIMASK) << ADR_IMM_HISHIFT;
127 		imm = immlo | immhi;
128 		mask = ((ADR_IMM_LOMASK << ADR_IMM_LOSHIFT) |
129 			(ADR_IMM_HIMASK << ADR_IMM_HISHIFT));
130 		break;
131 	default:
132 		if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
133 			pr_err("%s: unknown immediate encoding %d\n", __func__,
134 			       type);
135 			return AARCH64_BREAK_FAULT;
136 		}
137 	}
138 
139 	/* Update the immediate field. */
140 	insn &= ~(mask << shift);
141 	insn |= (imm & mask) << shift;
142 
143 	return insn;
144 }
145 
aarch64_insn_decode_register(enum aarch64_insn_register_type type,u32 insn)146 u32 aarch64_insn_decode_register(enum aarch64_insn_register_type type,
147 					u32 insn)
148 {
149 	int shift;
150 
151 	switch (type) {
152 	case AARCH64_INSN_REGTYPE_RT:
153 	case AARCH64_INSN_REGTYPE_RD:
154 		shift = 0;
155 		break;
156 	case AARCH64_INSN_REGTYPE_RN:
157 		shift = 5;
158 		break;
159 	case AARCH64_INSN_REGTYPE_RT2:
160 	case AARCH64_INSN_REGTYPE_RA:
161 		shift = 10;
162 		break;
163 	case AARCH64_INSN_REGTYPE_RM:
164 		shift = 16;
165 		break;
166 	default:
167 		pr_err("%s: unknown register type encoding %d\n", __func__,
168 		       type);
169 		return 0;
170 	}
171 
172 	return (insn >> shift) & GENMASK(4, 0);
173 }
174 
aarch64_insn_encode_register(enum aarch64_insn_register_type type,u32 insn,enum aarch64_insn_register reg)175 static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
176 					u32 insn,
177 					enum aarch64_insn_register reg)
178 {
179 	int shift;
180 
181 	if (insn == AARCH64_BREAK_FAULT)
182 		return AARCH64_BREAK_FAULT;
183 
184 	if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) {
185 		pr_err("%s: unknown register encoding %d\n", __func__, reg);
186 		return AARCH64_BREAK_FAULT;
187 	}
188 
189 	switch (type) {
190 	case AARCH64_INSN_REGTYPE_RT:
191 	case AARCH64_INSN_REGTYPE_RD:
192 		shift = 0;
193 		break;
194 	case AARCH64_INSN_REGTYPE_RN:
195 		shift = 5;
196 		break;
197 	case AARCH64_INSN_REGTYPE_RT2:
198 	case AARCH64_INSN_REGTYPE_RA:
199 		shift = 10;
200 		break;
201 	case AARCH64_INSN_REGTYPE_RM:
202 	case AARCH64_INSN_REGTYPE_RS:
203 		shift = 16;
204 		break;
205 	default:
206 		pr_err("%s: unknown register type encoding %d\n", __func__,
207 		       type);
208 		return AARCH64_BREAK_FAULT;
209 	}
210 
211 	insn &= ~(GENMASK(4, 0) << shift);
212 	insn |= reg << shift;
213 
214 	return insn;
215 }
216 
217 static const u32 aarch64_insn_ldst_size[] = {
218 	[AARCH64_INSN_SIZE_8] = 0,
219 	[AARCH64_INSN_SIZE_16] = 1,
220 	[AARCH64_INSN_SIZE_32] = 2,
221 	[AARCH64_INSN_SIZE_64] = 3,
222 };
223 
aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,u32 insn)224 static u32 aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,
225 					 u32 insn)
226 {
227 	u32 size;
228 
229 	if (type < AARCH64_INSN_SIZE_8 || type > AARCH64_INSN_SIZE_64) {
230 		pr_err("%s: unknown size encoding %d\n", __func__, type);
231 		return AARCH64_BREAK_FAULT;
232 	}
233 
234 	size = aarch64_insn_ldst_size[type];
235 	insn &= ~GENMASK(31, 30);
236 	insn |= size << 30;
237 
238 	return insn;
239 }
240 
label_imm_common(unsigned long pc,unsigned long addr,long range)241 static inline long label_imm_common(unsigned long pc, unsigned long addr,
242 				     long range)
243 {
244 	long offset;
245 
246 	if ((pc & 0x3) || (addr & 0x3)) {
247 		pr_err("%s: A64 instructions must be word aligned\n", __func__);
248 		return range;
249 	}
250 
251 	offset = ((long)addr - (long)pc);
252 
253 	if (offset < -range || offset >= range) {
254 		pr_err("%s: offset out of range\n", __func__);
255 		return range;
256 	}
257 
258 	return offset;
259 }
260 
aarch64_insn_gen_branch_imm(unsigned long pc,unsigned long addr,enum aarch64_insn_branch_type type)261 u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
262 					  enum aarch64_insn_branch_type type)
263 {
264 	u32 insn;
265 	long offset;
266 
267 	/*
268 	 * B/BL support [-128M, 128M) offset
269 	 * ARM64 virtual address arrangement guarantees all kernel and module
270 	 * texts are within +/-128M.
271 	 */
272 	offset = label_imm_common(pc, addr, SZ_128M);
273 	if (offset >= SZ_128M)
274 		return AARCH64_BREAK_FAULT;
275 
276 	switch (type) {
277 	case AARCH64_INSN_BRANCH_LINK:
278 		insn = aarch64_insn_get_bl_value();
279 		break;
280 	case AARCH64_INSN_BRANCH_NOLINK:
281 		insn = aarch64_insn_get_b_value();
282 		break;
283 	default:
284 		pr_err("%s: unknown branch encoding %d\n", __func__, type);
285 		return AARCH64_BREAK_FAULT;
286 	}
287 
288 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
289 					     offset >> 2);
290 }
291 
aarch64_insn_gen_comp_branch_imm(unsigned long pc,unsigned long addr,enum aarch64_insn_register reg,enum aarch64_insn_variant variant,enum aarch64_insn_branch_type type)292 u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
293 				     enum aarch64_insn_register reg,
294 				     enum aarch64_insn_variant variant,
295 				     enum aarch64_insn_branch_type type)
296 {
297 	u32 insn;
298 	long offset;
299 
300 	offset = label_imm_common(pc, addr, SZ_1M);
301 	if (offset >= SZ_1M)
302 		return AARCH64_BREAK_FAULT;
303 
304 	switch (type) {
305 	case AARCH64_INSN_BRANCH_COMP_ZERO:
306 		insn = aarch64_insn_get_cbz_value();
307 		break;
308 	case AARCH64_INSN_BRANCH_COMP_NONZERO:
309 		insn = aarch64_insn_get_cbnz_value();
310 		break;
311 	default:
312 		pr_err("%s: unknown branch encoding %d\n", __func__, type);
313 		return AARCH64_BREAK_FAULT;
314 	}
315 
316 	switch (variant) {
317 	case AARCH64_INSN_VARIANT_32BIT:
318 		break;
319 	case AARCH64_INSN_VARIANT_64BIT:
320 		insn |= AARCH64_INSN_SF_BIT;
321 		break;
322 	default:
323 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
324 		return AARCH64_BREAK_FAULT;
325 	}
326 
327 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
328 
329 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
330 					     offset >> 2);
331 }
332 
aarch64_insn_gen_cond_branch_imm(unsigned long pc,unsigned long addr,enum aarch64_insn_condition cond)333 u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
334 				     enum aarch64_insn_condition cond)
335 {
336 	u32 insn;
337 	long offset;
338 
339 	offset = label_imm_common(pc, addr, SZ_1M);
340 
341 	insn = aarch64_insn_get_bcond_value();
342 
343 	if (cond < AARCH64_INSN_COND_EQ || cond > AARCH64_INSN_COND_AL) {
344 		pr_err("%s: unknown condition encoding %d\n", __func__, cond);
345 		return AARCH64_BREAK_FAULT;
346 	}
347 	insn |= cond;
348 
349 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
350 					     offset >> 2);
351 }
352 
aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,enum aarch64_insn_branch_type type)353 u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
354 				enum aarch64_insn_branch_type type)
355 {
356 	u32 insn;
357 
358 	switch (type) {
359 	case AARCH64_INSN_BRANCH_NOLINK:
360 		insn = aarch64_insn_get_br_value();
361 		break;
362 	case AARCH64_INSN_BRANCH_LINK:
363 		insn = aarch64_insn_get_blr_value();
364 		break;
365 	case AARCH64_INSN_BRANCH_RETURN:
366 		insn = aarch64_insn_get_ret_value();
367 		break;
368 	default:
369 		pr_err("%s: unknown branch encoding %d\n", __func__, type);
370 		return AARCH64_BREAK_FAULT;
371 	}
372 
373 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg);
374 }
375 
aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,enum aarch64_insn_register base,enum aarch64_insn_register offset,enum aarch64_insn_size_type size,enum aarch64_insn_ldst_type type)376 u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
377 				    enum aarch64_insn_register base,
378 				    enum aarch64_insn_register offset,
379 				    enum aarch64_insn_size_type size,
380 				    enum aarch64_insn_ldst_type type)
381 {
382 	u32 insn;
383 
384 	switch (type) {
385 	case AARCH64_INSN_LDST_LOAD_REG_OFFSET:
386 		insn = aarch64_insn_get_ldr_reg_value();
387 		break;
388 	case AARCH64_INSN_LDST_SIGNED_LOAD_REG_OFFSET:
389 		insn = aarch64_insn_get_signed_ldr_reg_value();
390 		break;
391 	case AARCH64_INSN_LDST_STORE_REG_OFFSET:
392 		insn = aarch64_insn_get_str_reg_value();
393 		break;
394 	default:
395 		pr_err("%s: unknown load/store encoding %d\n", __func__, type);
396 		return AARCH64_BREAK_FAULT;
397 	}
398 
399 	insn = aarch64_insn_encode_ldst_size(size, insn);
400 
401 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
402 
403 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
404 					    base);
405 
406 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
407 					    offset);
408 }
409 
aarch64_insn_gen_load_store_imm(enum aarch64_insn_register reg,enum aarch64_insn_register base,unsigned int imm,enum aarch64_insn_size_type size,enum aarch64_insn_ldst_type type)410 u32 aarch64_insn_gen_load_store_imm(enum aarch64_insn_register reg,
411 				    enum aarch64_insn_register base,
412 				    unsigned int imm,
413 				    enum aarch64_insn_size_type size,
414 				    enum aarch64_insn_ldst_type type)
415 {
416 	u32 insn;
417 	u32 shift;
418 
419 	if (size < AARCH64_INSN_SIZE_8 || size > AARCH64_INSN_SIZE_64) {
420 		pr_err("%s: unknown size encoding %d\n", __func__, type);
421 		return AARCH64_BREAK_FAULT;
422 	}
423 
424 	shift = aarch64_insn_ldst_size[size];
425 	if (imm & ~(BIT(12 + shift) - BIT(shift))) {
426 		pr_err("%s: invalid imm: %d\n", __func__, imm);
427 		return AARCH64_BREAK_FAULT;
428 	}
429 
430 	imm >>= shift;
431 
432 	switch (type) {
433 	case AARCH64_INSN_LDST_LOAD_IMM_OFFSET:
434 		insn = aarch64_insn_get_ldr_imm_value();
435 		break;
436 	case AARCH64_INSN_LDST_SIGNED_LOAD_IMM_OFFSET:
437 		insn = aarch64_insn_get_signed_load_imm_value();
438 		break;
439 	case AARCH64_INSN_LDST_STORE_IMM_OFFSET:
440 		insn = aarch64_insn_get_str_imm_value();
441 		break;
442 	default:
443 		pr_err("%s: unknown load/store encoding %d\n", __func__, type);
444 		return AARCH64_BREAK_FAULT;
445 	}
446 
447 	insn = aarch64_insn_encode_ldst_size(size, insn);
448 
449 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
450 
451 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
452 					    base);
453 
454 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm);
455 }
456 
aarch64_insn_gen_load_literal(unsigned long pc,unsigned long addr,enum aarch64_insn_register reg,bool is64bit)457 u32 aarch64_insn_gen_load_literal(unsigned long pc, unsigned long addr,
458 				  enum aarch64_insn_register reg,
459 				  bool is64bit)
460 {
461 	u32 insn;
462 	long offset;
463 
464 	offset = label_imm_common(pc, addr, SZ_1M);
465 	if (offset >= SZ_1M)
466 		return AARCH64_BREAK_FAULT;
467 
468 	insn = aarch64_insn_get_ldr_lit_value();
469 
470 	if (is64bit)
471 		insn |= BIT(30);
472 
473 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
474 
475 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
476 					     offset >> 2);
477 }
478 
aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,enum aarch64_insn_register reg2,enum aarch64_insn_register base,int offset,enum aarch64_insn_variant variant,enum aarch64_insn_ldst_type type)479 u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
480 				     enum aarch64_insn_register reg2,
481 				     enum aarch64_insn_register base,
482 				     int offset,
483 				     enum aarch64_insn_variant variant,
484 				     enum aarch64_insn_ldst_type type)
485 {
486 	u32 insn;
487 	int shift;
488 
489 	switch (type) {
490 	case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX:
491 		insn = aarch64_insn_get_ldp_pre_value();
492 		break;
493 	case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX:
494 		insn = aarch64_insn_get_stp_pre_value();
495 		break;
496 	case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX:
497 		insn = aarch64_insn_get_ldp_post_value();
498 		break;
499 	case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX:
500 		insn = aarch64_insn_get_stp_post_value();
501 		break;
502 	default:
503 		pr_err("%s: unknown load/store encoding %d\n", __func__, type);
504 		return AARCH64_BREAK_FAULT;
505 	}
506 
507 	switch (variant) {
508 	case AARCH64_INSN_VARIANT_32BIT:
509 		if ((offset & 0x3) || (offset < -256) || (offset > 252)) {
510 			pr_err("%s: offset must be multiples of 4 in the range of [-256, 252] %d\n",
511 			       __func__, offset);
512 			return AARCH64_BREAK_FAULT;
513 		}
514 		shift = 2;
515 		break;
516 	case AARCH64_INSN_VARIANT_64BIT:
517 		if ((offset & 0x7) || (offset < -512) || (offset > 504)) {
518 			pr_err("%s: offset must be multiples of 8 in the range of [-512, 504] %d\n",
519 			       __func__, offset);
520 			return AARCH64_BREAK_FAULT;
521 		}
522 		shift = 3;
523 		insn |= AARCH64_INSN_SF_BIT;
524 		break;
525 	default:
526 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
527 		return AARCH64_BREAK_FAULT;
528 	}
529 
530 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
531 					    reg1);
532 
533 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
534 					    reg2);
535 
536 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
537 					    base);
538 
539 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7, insn,
540 					     offset >> shift);
541 }
542 
aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,enum aarch64_insn_register base,enum aarch64_insn_register state,enum aarch64_insn_size_type size,enum aarch64_insn_ldst_type type)543 u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
544 				   enum aarch64_insn_register base,
545 				   enum aarch64_insn_register state,
546 				   enum aarch64_insn_size_type size,
547 				   enum aarch64_insn_ldst_type type)
548 {
549 	u32 insn;
550 
551 	switch (type) {
552 	case AARCH64_INSN_LDST_LOAD_EX:
553 	case AARCH64_INSN_LDST_LOAD_ACQ_EX:
554 		insn = aarch64_insn_get_load_ex_value();
555 		if (type == AARCH64_INSN_LDST_LOAD_ACQ_EX)
556 			insn |= BIT(15);
557 		break;
558 	case AARCH64_INSN_LDST_STORE_EX:
559 	case AARCH64_INSN_LDST_STORE_REL_EX:
560 		insn = aarch64_insn_get_store_ex_value();
561 		if (type == AARCH64_INSN_LDST_STORE_REL_EX)
562 			insn |= BIT(15);
563 		break;
564 	default:
565 		pr_err("%s: unknown load/store exclusive encoding %d\n", __func__, type);
566 		return AARCH64_BREAK_FAULT;
567 	}
568 
569 	insn = aarch64_insn_encode_ldst_size(size, insn);
570 
571 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
572 					    reg);
573 
574 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
575 					    base);
576 
577 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
578 					    AARCH64_INSN_REG_ZR);
579 
580 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
581 					    state);
582 }
583 
584 #ifdef CONFIG_ARM64_LSE_ATOMICS
aarch64_insn_encode_ldst_order(enum aarch64_insn_mem_order_type type,u32 insn)585 static u32 aarch64_insn_encode_ldst_order(enum aarch64_insn_mem_order_type type,
586 					  u32 insn)
587 {
588 	u32 order;
589 
590 	switch (type) {
591 	case AARCH64_INSN_MEM_ORDER_NONE:
592 		order = 0;
593 		break;
594 	case AARCH64_INSN_MEM_ORDER_ACQ:
595 		order = 2;
596 		break;
597 	case AARCH64_INSN_MEM_ORDER_REL:
598 		order = 1;
599 		break;
600 	case AARCH64_INSN_MEM_ORDER_ACQREL:
601 		order = 3;
602 		break;
603 	default:
604 		pr_err("%s: unknown mem order %d\n", __func__, type);
605 		return AARCH64_BREAK_FAULT;
606 	}
607 
608 	insn &= ~GENMASK(23, 22);
609 	insn |= order << 22;
610 
611 	return insn;
612 }
613 
aarch64_insn_gen_atomic_ld_op(enum aarch64_insn_register result,enum aarch64_insn_register address,enum aarch64_insn_register value,enum aarch64_insn_size_type size,enum aarch64_insn_mem_atomic_op op,enum aarch64_insn_mem_order_type order)614 u32 aarch64_insn_gen_atomic_ld_op(enum aarch64_insn_register result,
615 				  enum aarch64_insn_register address,
616 				  enum aarch64_insn_register value,
617 				  enum aarch64_insn_size_type size,
618 				  enum aarch64_insn_mem_atomic_op op,
619 				  enum aarch64_insn_mem_order_type order)
620 {
621 	u32 insn;
622 
623 	switch (op) {
624 	case AARCH64_INSN_MEM_ATOMIC_ADD:
625 		insn = aarch64_insn_get_ldadd_value();
626 		break;
627 	case AARCH64_INSN_MEM_ATOMIC_CLR:
628 		insn = aarch64_insn_get_ldclr_value();
629 		break;
630 	case AARCH64_INSN_MEM_ATOMIC_EOR:
631 		insn = aarch64_insn_get_ldeor_value();
632 		break;
633 	case AARCH64_INSN_MEM_ATOMIC_SET:
634 		insn = aarch64_insn_get_ldset_value();
635 		break;
636 	case AARCH64_INSN_MEM_ATOMIC_SWP:
637 		insn = aarch64_insn_get_swp_value();
638 		break;
639 	default:
640 		pr_err("%s: unimplemented mem atomic op %d\n", __func__, op);
641 		return AARCH64_BREAK_FAULT;
642 	}
643 
644 	switch (size) {
645 	case AARCH64_INSN_SIZE_32:
646 	case AARCH64_INSN_SIZE_64:
647 		break;
648 	default:
649 		pr_err("%s: unimplemented size encoding %d\n", __func__, size);
650 		return AARCH64_BREAK_FAULT;
651 	}
652 
653 	insn = aarch64_insn_encode_ldst_size(size, insn);
654 
655 	insn = aarch64_insn_encode_ldst_order(order, insn);
656 
657 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
658 					    result);
659 
660 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
661 					    address);
662 
663 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
664 					    value);
665 }
666 
aarch64_insn_encode_cas_order(enum aarch64_insn_mem_order_type type,u32 insn)667 static u32 aarch64_insn_encode_cas_order(enum aarch64_insn_mem_order_type type,
668 					 u32 insn)
669 {
670 	u32 order;
671 
672 	switch (type) {
673 	case AARCH64_INSN_MEM_ORDER_NONE:
674 		order = 0;
675 		break;
676 	case AARCH64_INSN_MEM_ORDER_ACQ:
677 		order = BIT(22);
678 		break;
679 	case AARCH64_INSN_MEM_ORDER_REL:
680 		order = BIT(15);
681 		break;
682 	case AARCH64_INSN_MEM_ORDER_ACQREL:
683 		order = BIT(15) | BIT(22);
684 		break;
685 	default:
686 		pr_err("%s: unknown mem order %d\n", __func__, type);
687 		return AARCH64_BREAK_FAULT;
688 	}
689 
690 	insn &= ~(BIT(15) | BIT(22));
691 	insn |= order;
692 
693 	return insn;
694 }
695 
aarch64_insn_gen_cas(enum aarch64_insn_register result,enum aarch64_insn_register address,enum aarch64_insn_register value,enum aarch64_insn_size_type size,enum aarch64_insn_mem_order_type order)696 u32 aarch64_insn_gen_cas(enum aarch64_insn_register result,
697 			 enum aarch64_insn_register address,
698 			 enum aarch64_insn_register value,
699 			 enum aarch64_insn_size_type size,
700 			 enum aarch64_insn_mem_order_type order)
701 {
702 	u32 insn;
703 
704 	switch (size) {
705 	case AARCH64_INSN_SIZE_32:
706 	case AARCH64_INSN_SIZE_64:
707 		break;
708 	default:
709 		pr_err("%s: unimplemented size encoding %d\n", __func__, size);
710 		return AARCH64_BREAK_FAULT;
711 	}
712 
713 	insn = aarch64_insn_get_cas_value();
714 
715 	insn = aarch64_insn_encode_ldst_size(size, insn);
716 
717 	insn = aarch64_insn_encode_cas_order(order, insn);
718 
719 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
720 					    result);
721 
722 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
723 					    address);
724 
725 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
726 					    value);
727 }
728 #endif
729 
aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,enum aarch64_insn_register src,int imm,enum aarch64_insn_variant variant,enum aarch64_insn_adsb_type type)730 u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
731 				 enum aarch64_insn_register src,
732 				 int imm, enum aarch64_insn_variant variant,
733 				 enum aarch64_insn_adsb_type type)
734 {
735 	u32 insn;
736 
737 	switch (type) {
738 	case AARCH64_INSN_ADSB_ADD:
739 		insn = aarch64_insn_get_add_imm_value();
740 		break;
741 	case AARCH64_INSN_ADSB_SUB:
742 		insn = aarch64_insn_get_sub_imm_value();
743 		break;
744 	case AARCH64_INSN_ADSB_ADD_SETFLAGS:
745 		insn = aarch64_insn_get_adds_imm_value();
746 		break;
747 	case AARCH64_INSN_ADSB_SUB_SETFLAGS:
748 		insn = aarch64_insn_get_subs_imm_value();
749 		break;
750 	default:
751 		pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
752 		return AARCH64_BREAK_FAULT;
753 	}
754 
755 	switch (variant) {
756 	case AARCH64_INSN_VARIANT_32BIT:
757 		break;
758 	case AARCH64_INSN_VARIANT_64BIT:
759 		insn |= AARCH64_INSN_SF_BIT;
760 		break;
761 	default:
762 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
763 		return AARCH64_BREAK_FAULT;
764 	}
765 
766 	/* We can't encode more than a 24bit value (12bit + 12bit shift) */
767 	if (imm & ~(BIT(24) - 1))
768 		goto out;
769 
770 	/* If we have something in the top 12 bits... */
771 	if (imm & ~(SZ_4K - 1)) {
772 		/* ... and in the low 12 bits -> error */
773 		if (imm & (SZ_4K - 1))
774 			goto out;
775 
776 		imm >>= 12;
777 		insn |= AARCH64_INSN_LSL_12;
778 	}
779 
780 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
781 
782 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
783 
784 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm);
785 
786 out:
787 	pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
788 	return AARCH64_BREAK_FAULT;
789 }
790 
aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,enum aarch64_insn_register src,int immr,int imms,enum aarch64_insn_variant variant,enum aarch64_insn_bitfield_type type)791 u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
792 			      enum aarch64_insn_register src,
793 			      int immr, int imms,
794 			      enum aarch64_insn_variant variant,
795 			      enum aarch64_insn_bitfield_type type)
796 {
797 	u32 insn;
798 	u32 mask;
799 
800 	switch (type) {
801 	case AARCH64_INSN_BITFIELD_MOVE:
802 		insn = aarch64_insn_get_bfm_value();
803 		break;
804 	case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED:
805 		insn = aarch64_insn_get_ubfm_value();
806 		break;
807 	case AARCH64_INSN_BITFIELD_MOVE_SIGNED:
808 		insn = aarch64_insn_get_sbfm_value();
809 		break;
810 	default:
811 		pr_err("%s: unknown bitfield encoding %d\n", __func__, type);
812 		return AARCH64_BREAK_FAULT;
813 	}
814 
815 	switch (variant) {
816 	case AARCH64_INSN_VARIANT_32BIT:
817 		mask = GENMASK(4, 0);
818 		break;
819 	case AARCH64_INSN_VARIANT_64BIT:
820 		insn |= AARCH64_INSN_SF_BIT | AARCH64_INSN_N_BIT;
821 		mask = GENMASK(5, 0);
822 		break;
823 	default:
824 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
825 		return AARCH64_BREAK_FAULT;
826 	}
827 
828 	if (immr & ~mask) {
829 		pr_err("%s: invalid immr encoding %d\n", __func__, immr);
830 		return AARCH64_BREAK_FAULT;
831 	}
832 	if (imms & ~mask) {
833 		pr_err("%s: invalid imms encoding %d\n", __func__, imms);
834 		return AARCH64_BREAK_FAULT;
835 	}
836 
837 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
838 
839 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
840 
841 	insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
842 
843 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
844 }
845 
aarch64_insn_gen_movewide(enum aarch64_insn_register dst,int imm,int shift,enum aarch64_insn_variant variant,enum aarch64_insn_movewide_type type)846 u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst,
847 			      int imm, int shift,
848 			      enum aarch64_insn_variant variant,
849 			      enum aarch64_insn_movewide_type type)
850 {
851 	u32 insn;
852 
853 	switch (type) {
854 	case AARCH64_INSN_MOVEWIDE_ZERO:
855 		insn = aarch64_insn_get_movz_value();
856 		break;
857 	case AARCH64_INSN_MOVEWIDE_KEEP:
858 		insn = aarch64_insn_get_movk_value();
859 		break;
860 	case AARCH64_INSN_MOVEWIDE_INVERSE:
861 		insn = aarch64_insn_get_movn_value();
862 		break;
863 	default:
864 		pr_err("%s: unknown movewide encoding %d\n", __func__, type);
865 		return AARCH64_BREAK_FAULT;
866 	}
867 
868 	if (imm & ~(SZ_64K - 1)) {
869 		pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
870 		return AARCH64_BREAK_FAULT;
871 	}
872 
873 	switch (variant) {
874 	case AARCH64_INSN_VARIANT_32BIT:
875 		if (shift != 0 && shift != 16) {
876 			pr_err("%s: invalid shift encoding %d\n", __func__,
877 			       shift);
878 			return AARCH64_BREAK_FAULT;
879 		}
880 		break;
881 	case AARCH64_INSN_VARIANT_64BIT:
882 		insn |= AARCH64_INSN_SF_BIT;
883 		if (shift != 0 && shift != 16 && shift != 32 && shift != 48) {
884 			pr_err("%s: invalid shift encoding %d\n", __func__,
885 			       shift);
886 			return AARCH64_BREAK_FAULT;
887 		}
888 		break;
889 	default:
890 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
891 		return AARCH64_BREAK_FAULT;
892 	}
893 
894 	insn |= (shift >> 4) << 21;
895 
896 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
897 
898 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
899 }
900 
aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,enum aarch64_insn_register src,enum aarch64_insn_register reg,int shift,enum aarch64_insn_variant variant,enum aarch64_insn_adsb_type type)901 u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,
902 					 enum aarch64_insn_register src,
903 					 enum aarch64_insn_register reg,
904 					 int shift,
905 					 enum aarch64_insn_variant variant,
906 					 enum aarch64_insn_adsb_type type)
907 {
908 	u32 insn;
909 
910 	switch (type) {
911 	case AARCH64_INSN_ADSB_ADD:
912 		insn = aarch64_insn_get_add_value();
913 		break;
914 	case AARCH64_INSN_ADSB_SUB:
915 		insn = aarch64_insn_get_sub_value();
916 		break;
917 	case AARCH64_INSN_ADSB_ADD_SETFLAGS:
918 		insn = aarch64_insn_get_adds_value();
919 		break;
920 	case AARCH64_INSN_ADSB_SUB_SETFLAGS:
921 		insn = aarch64_insn_get_subs_value();
922 		break;
923 	default:
924 		pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
925 		return AARCH64_BREAK_FAULT;
926 	}
927 
928 	switch (variant) {
929 	case AARCH64_INSN_VARIANT_32BIT:
930 		if (shift & ~(SZ_32 - 1)) {
931 			pr_err("%s: invalid shift encoding %d\n", __func__,
932 			       shift);
933 			return AARCH64_BREAK_FAULT;
934 		}
935 		break;
936 	case AARCH64_INSN_VARIANT_64BIT:
937 		insn |= AARCH64_INSN_SF_BIT;
938 		if (shift & ~(SZ_64 - 1)) {
939 			pr_err("%s: invalid shift encoding %d\n", __func__,
940 			       shift);
941 			return AARCH64_BREAK_FAULT;
942 		}
943 		break;
944 	default:
945 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
946 		return AARCH64_BREAK_FAULT;
947 	}
948 
949 
950 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
951 
952 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
953 
954 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
955 
956 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
957 }
958 
aarch64_insn_gen_data1(enum aarch64_insn_register dst,enum aarch64_insn_register src,enum aarch64_insn_variant variant,enum aarch64_insn_data1_type type)959 u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst,
960 			   enum aarch64_insn_register src,
961 			   enum aarch64_insn_variant variant,
962 			   enum aarch64_insn_data1_type type)
963 {
964 	u32 insn;
965 
966 	switch (type) {
967 	case AARCH64_INSN_DATA1_REVERSE_16:
968 		insn = aarch64_insn_get_rev16_value();
969 		break;
970 	case AARCH64_INSN_DATA1_REVERSE_32:
971 		insn = aarch64_insn_get_rev32_value();
972 		break;
973 	case AARCH64_INSN_DATA1_REVERSE_64:
974 		if (variant != AARCH64_INSN_VARIANT_64BIT) {
975 			pr_err("%s: invalid variant for reverse64 %d\n",
976 			       __func__, variant);
977 			return AARCH64_BREAK_FAULT;
978 		}
979 		insn = aarch64_insn_get_rev64_value();
980 		break;
981 	default:
982 		pr_err("%s: unknown data1 encoding %d\n", __func__, type);
983 		return AARCH64_BREAK_FAULT;
984 	}
985 
986 	switch (variant) {
987 	case AARCH64_INSN_VARIANT_32BIT:
988 		break;
989 	case AARCH64_INSN_VARIANT_64BIT:
990 		insn |= AARCH64_INSN_SF_BIT;
991 		break;
992 	default:
993 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
994 		return AARCH64_BREAK_FAULT;
995 	}
996 
997 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
998 
999 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1000 }
1001 
aarch64_insn_gen_data2(enum aarch64_insn_register dst,enum aarch64_insn_register src,enum aarch64_insn_register reg,enum aarch64_insn_variant variant,enum aarch64_insn_data2_type type)1002 u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst,
1003 			   enum aarch64_insn_register src,
1004 			   enum aarch64_insn_register reg,
1005 			   enum aarch64_insn_variant variant,
1006 			   enum aarch64_insn_data2_type type)
1007 {
1008 	u32 insn;
1009 
1010 	switch (type) {
1011 	case AARCH64_INSN_DATA2_UDIV:
1012 		insn = aarch64_insn_get_udiv_value();
1013 		break;
1014 	case AARCH64_INSN_DATA2_SDIV:
1015 		insn = aarch64_insn_get_sdiv_value();
1016 		break;
1017 	case AARCH64_INSN_DATA2_LSLV:
1018 		insn = aarch64_insn_get_lslv_value();
1019 		break;
1020 	case AARCH64_INSN_DATA2_LSRV:
1021 		insn = aarch64_insn_get_lsrv_value();
1022 		break;
1023 	case AARCH64_INSN_DATA2_ASRV:
1024 		insn = aarch64_insn_get_asrv_value();
1025 		break;
1026 	case AARCH64_INSN_DATA2_RORV:
1027 		insn = aarch64_insn_get_rorv_value();
1028 		break;
1029 	default:
1030 		pr_err("%s: unknown data2 encoding %d\n", __func__, type);
1031 		return AARCH64_BREAK_FAULT;
1032 	}
1033 
1034 	switch (variant) {
1035 	case AARCH64_INSN_VARIANT_32BIT:
1036 		break;
1037 	case AARCH64_INSN_VARIANT_64BIT:
1038 		insn |= AARCH64_INSN_SF_BIT;
1039 		break;
1040 	default:
1041 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1042 		return AARCH64_BREAK_FAULT;
1043 	}
1044 
1045 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1046 
1047 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1048 
1049 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1050 }
1051 
aarch64_insn_gen_data3(enum aarch64_insn_register dst,enum aarch64_insn_register src,enum aarch64_insn_register reg1,enum aarch64_insn_register reg2,enum aarch64_insn_variant variant,enum aarch64_insn_data3_type type)1052 u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst,
1053 			   enum aarch64_insn_register src,
1054 			   enum aarch64_insn_register reg1,
1055 			   enum aarch64_insn_register reg2,
1056 			   enum aarch64_insn_variant variant,
1057 			   enum aarch64_insn_data3_type type)
1058 {
1059 	u32 insn;
1060 
1061 	switch (type) {
1062 	case AARCH64_INSN_DATA3_MADD:
1063 		insn = aarch64_insn_get_madd_value();
1064 		break;
1065 	case AARCH64_INSN_DATA3_MSUB:
1066 		insn = aarch64_insn_get_msub_value();
1067 		break;
1068 	default:
1069 		pr_err("%s: unknown data3 encoding %d\n", __func__, type);
1070 		return AARCH64_BREAK_FAULT;
1071 	}
1072 
1073 	switch (variant) {
1074 	case AARCH64_INSN_VARIANT_32BIT:
1075 		break;
1076 	case AARCH64_INSN_VARIANT_64BIT:
1077 		insn |= AARCH64_INSN_SF_BIT;
1078 		break;
1079 	default:
1080 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1081 		return AARCH64_BREAK_FAULT;
1082 	}
1083 
1084 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1085 
1086 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RA, insn, src);
1087 
1088 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
1089 					    reg1);
1090 
1091 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
1092 					    reg2);
1093 }
1094 
aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,enum aarch64_insn_register src,enum aarch64_insn_register reg,int shift,enum aarch64_insn_variant variant,enum aarch64_insn_logic_type type)1095 u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
1096 					 enum aarch64_insn_register src,
1097 					 enum aarch64_insn_register reg,
1098 					 int shift,
1099 					 enum aarch64_insn_variant variant,
1100 					 enum aarch64_insn_logic_type type)
1101 {
1102 	u32 insn;
1103 
1104 	switch (type) {
1105 	case AARCH64_INSN_LOGIC_AND:
1106 		insn = aarch64_insn_get_and_value();
1107 		break;
1108 	case AARCH64_INSN_LOGIC_BIC:
1109 		insn = aarch64_insn_get_bic_value();
1110 		break;
1111 	case AARCH64_INSN_LOGIC_ORR:
1112 		insn = aarch64_insn_get_orr_value();
1113 		break;
1114 	case AARCH64_INSN_LOGIC_ORN:
1115 		insn = aarch64_insn_get_orn_value();
1116 		break;
1117 	case AARCH64_INSN_LOGIC_EOR:
1118 		insn = aarch64_insn_get_eor_value();
1119 		break;
1120 	case AARCH64_INSN_LOGIC_EON:
1121 		insn = aarch64_insn_get_eon_value();
1122 		break;
1123 	case AARCH64_INSN_LOGIC_AND_SETFLAGS:
1124 		insn = aarch64_insn_get_ands_value();
1125 		break;
1126 	case AARCH64_INSN_LOGIC_BIC_SETFLAGS:
1127 		insn = aarch64_insn_get_bics_value();
1128 		break;
1129 	default:
1130 		pr_err("%s: unknown logical encoding %d\n", __func__, type);
1131 		return AARCH64_BREAK_FAULT;
1132 	}
1133 
1134 	switch (variant) {
1135 	case AARCH64_INSN_VARIANT_32BIT:
1136 		if (shift & ~(SZ_32 - 1)) {
1137 			pr_err("%s: invalid shift encoding %d\n", __func__,
1138 			       shift);
1139 			return AARCH64_BREAK_FAULT;
1140 		}
1141 		break;
1142 	case AARCH64_INSN_VARIANT_64BIT:
1143 		insn |= AARCH64_INSN_SF_BIT;
1144 		if (shift & ~(SZ_64 - 1)) {
1145 			pr_err("%s: invalid shift encoding %d\n", __func__,
1146 			       shift);
1147 			return AARCH64_BREAK_FAULT;
1148 		}
1149 		break;
1150 	default:
1151 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1152 		return AARCH64_BREAK_FAULT;
1153 	}
1154 
1155 
1156 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1157 
1158 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1159 
1160 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1161 
1162 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
1163 }
1164 
1165 /*
1166  * MOV (register) is architecturally an alias of ORR (shifted register) where
1167  * MOV <*d>, <*m> is equivalent to ORR <*d>, <*ZR>, <*m>
1168  */
aarch64_insn_gen_move_reg(enum aarch64_insn_register dst,enum aarch64_insn_register src,enum aarch64_insn_variant variant)1169 u32 aarch64_insn_gen_move_reg(enum aarch64_insn_register dst,
1170 			      enum aarch64_insn_register src,
1171 			      enum aarch64_insn_variant variant)
1172 {
1173 	return aarch64_insn_gen_logical_shifted_reg(dst, AARCH64_INSN_REG_ZR,
1174 						    src, 0, variant,
1175 						    AARCH64_INSN_LOGIC_ORR);
1176 }
1177 
aarch64_insn_gen_adr(unsigned long pc,unsigned long addr,enum aarch64_insn_register reg,enum aarch64_insn_adr_type type)1178 u32 aarch64_insn_gen_adr(unsigned long pc, unsigned long addr,
1179 			 enum aarch64_insn_register reg,
1180 			 enum aarch64_insn_adr_type type)
1181 {
1182 	u32 insn;
1183 	s32 offset;
1184 
1185 	switch (type) {
1186 	case AARCH64_INSN_ADR_TYPE_ADR:
1187 		insn = aarch64_insn_get_adr_value();
1188 		offset = addr - pc;
1189 		break;
1190 	case AARCH64_INSN_ADR_TYPE_ADRP:
1191 		insn = aarch64_insn_get_adrp_value();
1192 		offset = (addr - ALIGN_DOWN(pc, SZ_4K)) >> 12;
1193 		break;
1194 	default:
1195 		pr_err("%s: unknown adr encoding %d\n", __func__, type);
1196 		return AARCH64_BREAK_FAULT;
1197 	}
1198 
1199 	if (offset < -SZ_1M || offset >= SZ_1M)
1200 		return AARCH64_BREAK_FAULT;
1201 
1202 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, reg);
1203 
1204 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR, insn, offset);
1205 }
1206 
1207 /*
1208  * Decode the imm field of a branch, and return the byte offset as a
1209  * signed value (so it can be used when computing a new branch
1210  * target).
1211  */
aarch64_get_branch_offset(u32 insn)1212 s32 aarch64_get_branch_offset(u32 insn)
1213 {
1214 	s32 imm;
1215 
1216 	if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) {
1217 		imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn);
1218 		return (imm << 6) >> 4;
1219 	}
1220 
1221 	if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1222 	    aarch64_insn_is_bcond(insn)) {
1223 		imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_19, insn);
1224 		return (imm << 13) >> 11;
1225 	}
1226 
1227 	if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn)) {
1228 		imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_14, insn);
1229 		return (imm << 18) >> 16;
1230 	}
1231 
1232 	/* Unhandled instruction */
1233 	BUG();
1234 }
1235 
1236 /*
1237  * Encode the displacement of a branch in the imm field and return the
1238  * updated instruction.
1239  */
aarch64_set_branch_offset(u32 insn,s32 offset)1240 u32 aarch64_set_branch_offset(u32 insn, s32 offset)
1241 {
1242 	if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn))
1243 		return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
1244 						     offset >> 2);
1245 
1246 	if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1247 	    aarch64_insn_is_bcond(insn))
1248 		return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
1249 						     offset >> 2);
1250 
1251 	if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn))
1252 		return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_14, insn,
1253 						     offset >> 2);
1254 
1255 	/* Unhandled instruction */
1256 	BUG();
1257 }
1258 
aarch64_insn_adrp_get_offset(u32 insn)1259 s32 aarch64_insn_adrp_get_offset(u32 insn)
1260 {
1261 	BUG_ON(!aarch64_insn_is_adrp(insn));
1262 	return aarch64_insn_decode_immediate(AARCH64_INSN_IMM_ADR, insn) << 12;
1263 }
1264 
aarch64_insn_adrp_set_offset(u32 insn,s32 offset)1265 u32 aarch64_insn_adrp_set_offset(u32 insn, s32 offset)
1266 {
1267 	BUG_ON(!aarch64_insn_is_adrp(insn));
1268 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR, insn,
1269 						offset >> 12);
1270 }
1271 
1272 /*
1273  * Extract the Op/CR data from a msr/mrs instruction.
1274  */
aarch64_insn_extract_system_reg(u32 insn)1275 u32 aarch64_insn_extract_system_reg(u32 insn)
1276 {
1277 	return (insn & 0x1FFFE0) >> 5;
1278 }
1279 
aarch32_insn_is_wide(u32 insn)1280 bool aarch32_insn_is_wide(u32 insn)
1281 {
1282 	return insn >= 0xe800;
1283 }
1284 
1285 /*
1286  * Macros/defines for extracting register numbers from instruction.
1287  */
aarch32_insn_extract_reg_num(u32 insn,int offset)1288 u32 aarch32_insn_extract_reg_num(u32 insn, int offset)
1289 {
1290 	return (insn & (0xf << offset)) >> offset;
1291 }
1292 
1293 #define OPC2_MASK	0x7
1294 #define OPC2_OFFSET	5
aarch32_insn_mcr_extract_opc2(u32 insn)1295 u32 aarch32_insn_mcr_extract_opc2(u32 insn)
1296 {
1297 	return (insn & (OPC2_MASK << OPC2_OFFSET)) >> OPC2_OFFSET;
1298 }
1299 
1300 #define CRM_MASK	0xf
aarch32_insn_mcr_extract_crm(u32 insn)1301 u32 aarch32_insn_mcr_extract_crm(u32 insn)
1302 {
1303 	return insn & CRM_MASK;
1304 }
1305 
range_of_ones(u64 val)1306 static bool range_of_ones(u64 val)
1307 {
1308 	/* Doesn't handle full ones or full zeroes */
1309 	u64 sval = val >> __ffs64(val);
1310 
1311 	/* One of Sean Eron Anderson's bithack tricks */
1312 	return ((sval + 1) & (sval)) == 0;
1313 }
1314 
aarch64_encode_immediate(u64 imm,enum aarch64_insn_variant variant,u32 insn)1315 static u32 aarch64_encode_immediate(u64 imm,
1316 				    enum aarch64_insn_variant variant,
1317 				    u32 insn)
1318 {
1319 	unsigned int immr, imms, n, ones, ror, esz, tmp;
1320 	u64 mask;
1321 
1322 	switch (variant) {
1323 	case AARCH64_INSN_VARIANT_32BIT:
1324 		esz = 32;
1325 		break;
1326 	case AARCH64_INSN_VARIANT_64BIT:
1327 		insn |= AARCH64_INSN_SF_BIT;
1328 		esz = 64;
1329 		break;
1330 	default:
1331 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1332 		return AARCH64_BREAK_FAULT;
1333 	}
1334 
1335 	mask = GENMASK(esz - 1, 0);
1336 
1337 	/* Can't encode full zeroes, full ones, or value wider than the mask */
1338 	if (!imm || imm == mask || imm & ~mask)
1339 		return AARCH64_BREAK_FAULT;
1340 
1341 	/*
1342 	 * Inverse of Replicate(). Try to spot a repeating pattern
1343 	 * with a pow2 stride.
1344 	 */
1345 	for (tmp = esz / 2; tmp >= 2; tmp /= 2) {
1346 		u64 emask = BIT(tmp) - 1;
1347 
1348 		if ((imm & emask) != ((imm >> tmp) & emask))
1349 			break;
1350 
1351 		esz = tmp;
1352 		mask = emask;
1353 	}
1354 
1355 	/* N is only set if we're encoding a 64bit value */
1356 	n = esz == 64;
1357 
1358 	/* Trim imm to the element size */
1359 	imm &= mask;
1360 
1361 	/* That's how many ones we need to encode */
1362 	ones = hweight64(imm);
1363 
1364 	/*
1365 	 * imms is set to (ones - 1), prefixed with a string of ones
1366 	 * and a zero if they fit. Cap it to 6 bits.
1367 	 */
1368 	imms  = ones - 1;
1369 	imms |= 0xf << ffs(esz);
1370 	imms &= BIT(6) - 1;
1371 
1372 	/* Compute the rotation */
1373 	if (range_of_ones(imm)) {
1374 		/*
1375 		 * Pattern: 0..01..10..0
1376 		 *
1377 		 * Compute how many rotate we need to align it right
1378 		 */
1379 		ror = __ffs64(imm);
1380 	} else {
1381 		/*
1382 		 * Pattern: 0..01..10..01..1
1383 		 *
1384 		 * Fill the unused top bits with ones, and check if
1385 		 * the result is a valid immediate (all ones with a
1386 		 * contiguous ranges of zeroes).
1387 		 */
1388 		imm |= ~mask;
1389 		if (!range_of_ones(~imm))
1390 			return AARCH64_BREAK_FAULT;
1391 
1392 		/*
1393 		 * Compute the rotation to get a continuous set of
1394 		 * ones, with the first bit set at position 0
1395 		 */
1396 		ror = fls64(~imm);
1397 	}
1398 
1399 	/*
1400 	 * immr is the number of bits we need to rotate back to the
1401 	 * original set of ones. Note that this is relative to the
1402 	 * element size...
1403 	 */
1404 	immr = (esz - ror) % esz;
1405 
1406 	insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N, insn, n);
1407 	insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
1408 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
1409 }
1410 
aarch64_insn_gen_logical_immediate(enum aarch64_insn_logic_type type,enum aarch64_insn_variant variant,enum aarch64_insn_register Rn,enum aarch64_insn_register Rd,u64 imm)1411 u32 aarch64_insn_gen_logical_immediate(enum aarch64_insn_logic_type type,
1412 				       enum aarch64_insn_variant variant,
1413 				       enum aarch64_insn_register Rn,
1414 				       enum aarch64_insn_register Rd,
1415 				       u64 imm)
1416 {
1417 	u32 insn;
1418 
1419 	switch (type) {
1420 	case AARCH64_INSN_LOGIC_AND:
1421 		insn = aarch64_insn_get_and_imm_value();
1422 		break;
1423 	case AARCH64_INSN_LOGIC_ORR:
1424 		insn = aarch64_insn_get_orr_imm_value();
1425 		break;
1426 	case AARCH64_INSN_LOGIC_EOR:
1427 		insn = aarch64_insn_get_eor_imm_value();
1428 		break;
1429 	case AARCH64_INSN_LOGIC_AND_SETFLAGS:
1430 		insn = aarch64_insn_get_ands_imm_value();
1431 		break;
1432 	default:
1433 		pr_err("%s: unknown logical encoding %d\n", __func__, type);
1434 		return AARCH64_BREAK_FAULT;
1435 	}
1436 
1437 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, Rd);
1438 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn);
1439 	return aarch64_encode_immediate(imm, variant, insn);
1440 }
1441 
aarch64_insn_gen_extr(enum aarch64_insn_variant variant,enum aarch64_insn_register Rm,enum aarch64_insn_register Rn,enum aarch64_insn_register Rd,u8 lsb)1442 u32 aarch64_insn_gen_extr(enum aarch64_insn_variant variant,
1443 			  enum aarch64_insn_register Rm,
1444 			  enum aarch64_insn_register Rn,
1445 			  enum aarch64_insn_register Rd,
1446 			  u8 lsb)
1447 {
1448 	u32 insn;
1449 
1450 	insn = aarch64_insn_get_extr_value();
1451 
1452 	switch (variant) {
1453 	case AARCH64_INSN_VARIANT_32BIT:
1454 		if (lsb > 31)
1455 			return AARCH64_BREAK_FAULT;
1456 		break;
1457 	case AARCH64_INSN_VARIANT_64BIT:
1458 		if (lsb > 63)
1459 			return AARCH64_BREAK_FAULT;
1460 		insn |= AARCH64_INSN_SF_BIT;
1461 		insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N, insn, 1);
1462 		break;
1463 	default:
1464 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1465 		return AARCH64_BREAK_FAULT;
1466 	}
1467 
1468 	insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, lsb);
1469 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, Rd);
1470 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn);
1471 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, Rm);
1472 }
1473 
aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type)1474 u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type)
1475 {
1476 	u32 opt;
1477 	u32 insn;
1478 
1479 	switch (type) {
1480 	case AARCH64_INSN_MB_SY:
1481 		opt = 0xf;
1482 		break;
1483 	case AARCH64_INSN_MB_ST:
1484 		opt = 0xe;
1485 		break;
1486 	case AARCH64_INSN_MB_LD:
1487 		opt = 0xd;
1488 		break;
1489 	case AARCH64_INSN_MB_ISH:
1490 		opt = 0xb;
1491 		break;
1492 	case AARCH64_INSN_MB_ISHST:
1493 		opt = 0xa;
1494 		break;
1495 	case AARCH64_INSN_MB_ISHLD:
1496 		opt = 0x9;
1497 		break;
1498 	case AARCH64_INSN_MB_NSH:
1499 		opt = 0x7;
1500 		break;
1501 	case AARCH64_INSN_MB_NSHST:
1502 		opt = 0x6;
1503 		break;
1504 	case AARCH64_INSN_MB_NSHLD:
1505 		opt = 0x5;
1506 		break;
1507 	default:
1508 		pr_err("%s: unknown dmb type %d\n", __func__, type);
1509 		return AARCH64_BREAK_FAULT;
1510 	}
1511 
1512 	insn = aarch64_insn_get_dmb_value();
1513 	insn &= ~GENMASK(11, 8);
1514 	insn |= (opt << 8);
1515 
1516 	return insn;
1517 }
1518