xref: /openbmc/linux/drivers/misc/lkdtm/cfi.c (revision e65e175b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * This is for all the tests relating directly to Control Flow Integrity.
4  */
5 #include "lkdtm.h"
6 #include <asm/page.h>
7 
8 static int called_count;
9 
10 /* Function taking one argument, without a return value. */
11 static noinline void lkdtm_increment_void(int *counter)
12 {
13 	(*counter)++;
14 }
15 
16 /* Function taking one argument, returning int. */
17 static noinline int lkdtm_increment_int(int *counter)
18 {
19 	(*counter)++;
20 
21 	return *counter;
22 }
23 
24 /* Don't allow the compiler to inline the calls. */
25 static noinline void lkdtm_indirect_call(void (*func)(int *))
26 {
27 	func(&called_count);
28 }
29 
30 /*
31  * This tries to call an indirect function with a mismatched prototype.
32  */
33 static void lkdtm_CFI_FORWARD_PROTO(void)
34 {
35 	/*
36 	 * Matches lkdtm_increment_void()'s prototype, but not
37 	 * lkdtm_increment_int()'s prototype.
38 	 */
39 	pr_info("Calling matched prototype ...\n");
40 	lkdtm_indirect_call(lkdtm_increment_void);
41 
42 	pr_info("Calling mismatched prototype ...\n");
43 	lkdtm_indirect_call((void *)lkdtm_increment_int);
44 
45 	pr_err("FAIL: survived mismatched prototype function call!\n");
46 	pr_expected_config(CONFIG_CFI_CLANG);
47 }
48 
49 /*
50  * This can stay local to LKDTM, as there should not be a production reason
51  * to disable PAC && SCS.
52  */
53 #ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
54 # ifdef CONFIG_ARM64_BTI_KERNEL
55 #  define __no_pac             "branch-protection=bti"
56 # else
57 #  ifdef CONFIG_CC_HAS_BRANCH_PROT_PAC_RET
58 #   define __no_pac            "branch-protection=none"
59 #  else
60 #   define __no_pac            "sign-return-address=none"
61 #  endif
62 # endif
63 # define __no_ret_protection   __noscs __attribute__((__target__(__no_pac)))
64 #else
65 # define __no_ret_protection   __noscs
66 #endif
67 
68 #define no_pac_addr(addr)      \
69 	((__force __typeof__(addr))((uintptr_t)(addr) | PAGE_OFFSET))
70 
71 /* The ultimate ROP gadget. */
72 static noinline __no_ret_protection
73 void set_return_addr_unchecked(unsigned long *expected, unsigned long *addr)
74 {
75 	/* Use of volatile is to make sure final write isn't seen as a dead store. */
76 	unsigned long * volatile *ret_addr = (unsigned long **)__builtin_frame_address(0) + 1;
77 
78 	/* Make sure we've found the right place on the stack before writing it. */
79 	if (no_pac_addr(*ret_addr) == expected)
80 		*ret_addr = (addr);
81 	else
82 		/* Check architecture, stack layout, or compiler behavior... */
83 		pr_warn("Eek: return address mismatch! %px != %px\n",
84 			*ret_addr, addr);
85 }
86 
87 static noinline
88 void set_return_addr(unsigned long *expected, unsigned long *addr)
89 {
90 	/* Use of volatile is to make sure final write isn't seen as a dead store. */
91 	unsigned long * volatile *ret_addr = (unsigned long **)__builtin_frame_address(0) + 1;
92 
93 	/* Make sure we've found the right place on the stack before writing it. */
94 	if (no_pac_addr(*ret_addr) == expected)
95 		*ret_addr = (addr);
96 	else
97 		/* Check architecture, stack layout, or compiler behavior... */
98 		pr_warn("Eek: return address mismatch! %px != %px\n",
99 			*ret_addr, addr);
100 }
101 
102 static volatile int force_check;
103 
104 static void lkdtm_CFI_BACKWARD(void)
105 {
106 	/* Use calculated gotos to keep labels addressable. */
107 	void *labels[] = { NULL, &&normal, &&redirected, &&check_normal, &&check_redirected };
108 
109 	pr_info("Attempting unchecked stack return address redirection ...\n");
110 
111 	/* Always false */
112 	if (force_check) {
113 		/*
114 		 * Prepare to call with NULLs to avoid parameters being treated as
115 		 * constants in -02.
116 		 */
117 		set_return_addr_unchecked(NULL, NULL);
118 		set_return_addr(NULL, NULL);
119 		if (force_check)
120 			goto *labels[1];
121 		if (force_check)
122 			goto *labels[2];
123 		if (force_check)
124 			goto *labels[3];
125 		if (force_check)
126 			goto *labels[4];
127 		return;
128 	}
129 
130 	/*
131 	 * Use fallthrough switch case to keep basic block ordering between
132 	 * set_return_addr*() and the label after it.
133 	 */
134 	switch (force_check) {
135 	case 0:
136 		set_return_addr_unchecked(&&normal, &&redirected);
137 		fallthrough;
138 	case 1:
139 normal:
140 		/* Always true */
141 		if (!force_check) {
142 			pr_err("FAIL: stack return address manipulation failed!\n");
143 			/* If we can't redirect "normally", we can't test mitigations. */
144 			return;
145 		}
146 		break;
147 	default:
148 redirected:
149 		pr_info("ok: redirected stack return address.\n");
150 		break;
151 	}
152 
153 	pr_info("Attempting checked stack return address redirection ...\n");
154 
155 	switch (force_check) {
156 	case 0:
157 		set_return_addr(&&check_normal, &&check_redirected);
158 		fallthrough;
159 	case 1:
160 check_normal:
161 		/* Always true */
162 		if (!force_check) {
163 			pr_info("ok: control flow unchanged.\n");
164 			return;
165 		}
166 
167 check_redirected:
168 		pr_err("FAIL: stack return address was redirected!\n");
169 		break;
170 	}
171 
172 	if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL)) {
173 		pr_expected_config(CONFIG_ARM64_PTR_AUTH_KERNEL);
174 		return;
175 	}
176 	if (IS_ENABLED(CONFIG_SHADOW_CALL_STACK)) {
177 		pr_expected_config(CONFIG_SHADOW_CALL_STACK);
178 		return;
179 	}
180 	pr_warn("This is probably expected, since this %s was built *without* %s=y nor %s=y\n",
181 		lkdtm_kernel_info,
182 		"CONFIG_ARM64_PTR_AUTH_KERNEL", "CONFIG_SHADOW_CALL_STACK");
183 }
184 
185 static struct crashtype crashtypes[] = {
186 	CRASHTYPE(CFI_FORWARD_PROTO),
187 	CRASHTYPE(CFI_BACKWARD),
188 };
189 
190 struct crashtype_category cfi_crashtypes = {
191 	.crashtypes = crashtypes,
192 	.len	    = ARRAY_SIZE(crashtypes),
193 };
194