1a146fed5SMarco Elver // SPDX-License-Identifier: GPL-2.0
2a146fed5SMarco Elver /*
3a146fed5SMarco Elver * KCSAN test with various race scenarious to test runtime behaviour. Since the
4a146fed5SMarco Elver * interface with which KCSAN's reports are obtained is via the console, this is
5a146fed5SMarco Elver * the output we should verify. For each test case checks the presence (or
6a146fed5SMarco Elver * absence) of generated reports. Relies on 'console' tracepoint to capture
7a146fed5SMarco Elver * reports as they appear in the kernel log.
8a146fed5SMarco Elver *
9a146fed5SMarco Elver * Makes use of KUnit for test organization, and the Torture framework for test
10a146fed5SMarco Elver * thread control.
11a146fed5SMarco Elver *
12a146fed5SMarco Elver * Copyright (C) 2020, Google LLC.
13a146fed5SMarco Elver * Author: Marco Elver <elver@google.com>
14a146fed5SMarco Elver */
15a146fed5SMarco Elver
16f6a14914SMarco Elver #define pr_fmt(fmt) "kcsan_test: " fmt
17f6a14914SMarco Elver
18a146fed5SMarco Elver #include <kunit/test.h>
198bc32b34SMarco Elver #include <linux/atomic.h>
208bc32b34SMarco Elver #include <linux/bitops.h>
21a146fed5SMarco Elver #include <linux/jiffies.h>
22a146fed5SMarco Elver #include <linux/kcsan-checks.h>
23a146fed5SMarco Elver #include <linux/kernel.h>
248bc32b34SMarco Elver #include <linux/mutex.h>
25a146fed5SMarco Elver #include <linux/sched.h>
26a146fed5SMarco Elver #include <linux/seqlock.h>
27a146fed5SMarco Elver #include <linux/spinlock.h>
28a146fed5SMarco Elver #include <linux/string.h>
29a146fed5SMarco Elver #include <linux/timer.h>
30a146fed5SMarco Elver #include <linux/torture.h>
31a146fed5SMarco Elver #include <linux/tracepoint.h>
32a146fed5SMarco Elver #include <linux/types.h>
33a146fed5SMarco Elver #include <trace/events/printk.h>
34a146fed5SMarco Elver
3580804284SMarco Elver #define KCSAN_TEST_REQUIRES(test, cond) do { \
3680804284SMarco Elver if (!(cond)) \
3780804284SMarco Elver kunit_skip((test), "Test requires: " #cond); \
3880804284SMarco Elver } while (0)
3980804284SMarco Elver
40a146fed5SMarco Elver #ifdef CONFIG_CC_HAS_TSAN_COMPOUND_READ_BEFORE_WRITE
41a146fed5SMarco Elver #define __KCSAN_ACCESS_RW(alt) (KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE)
42a146fed5SMarco Elver #else
43a146fed5SMarco Elver #define __KCSAN_ACCESS_RW(alt) (alt)
44a146fed5SMarco Elver #endif
45a146fed5SMarco Elver
46a146fed5SMarco Elver /* Points to current test-case memory access "kernels". */
47a146fed5SMarco Elver static void (*access_kernels[2])(void);
48a146fed5SMarco Elver
49a146fed5SMarco Elver static struct task_struct **threads; /* Lists of threads. */
50a146fed5SMarco Elver static unsigned long end_time; /* End time of test. */
51a146fed5SMarco Elver
52a146fed5SMarco Elver /* Report as observed from console. */
53a146fed5SMarco Elver static struct {
54a146fed5SMarco Elver spinlock_t lock;
55a146fed5SMarco Elver int nlines;
56a146fed5SMarco Elver char lines[3][512];
57a146fed5SMarco Elver } observed = {
58a146fed5SMarco Elver .lock = __SPIN_LOCK_UNLOCKED(observed.lock),
59a146fed5SMarco Elver };
60a146fed5SMarco Elver
61a146fed5SMarco Elver /* Setup test checking loop. */
62a146fed5SMarco Elver static __no_kcsan inline void
begin_test_checks(void (* func1)(void),void (* func2)(void))63a146fed5SMarco Elver begin_test_checks(void (*func1)(void), void (*func2)(void))
64a146fed5SMarco Elver {
65a146fed5SMarco Elver kcsan_disable_current();
66a146fed5SMarco Elver
67a146fed5SMarco Elver /*
68a146fed5SMarco Elver * Require at least as long as KCSAN_REPORT_ONCE_IN_MS, to ensure at
69a146fed5SMarco Elver * least one race is reported.
70a146fed5SMarco Elver */
71a146fed5SMarco Elver end_time = jiffies + msecs_to_jiffies(CONFIG_KCSAN_REPORT_ONCE_IN_MS + 500);
72a146fed5SMarco Elver
73a146fed5SMarco Elver /* Signal start; release potential initialization of shared data. */
74a146fed5SMarco Elver smp_store_release(&access_kernels[0], func1);
75a146fed5SMarco Elver smp_store_release(&access_kernels[1], func2);
76a146fed5SMarco Elver }
77a146fed5SMarco Elver
78a146fed5SMarco Elver /* End test checking loop. */
79a146fed5SMarco Elver static __no_kcsan inline bool
end_test_checks(bool stop)80a146fed5SMarco Elver end_test_checks(bool stop)
81a146fed5SMarco Elver {
82a146fed5SMarco Elver if (!stop && time_before(jiffies, end_time)) {
83a146fed5SMarco Elver /* Continue checking */
84a146fed5SMarco Elver might_sleep();
85a146fed5SMarco Elver return false;
86a146fed5SMarco Elver }
87a146fed5SMarco Elver
88a146fed5SMarco Elver kcsan_enable_current();
89a146fed5SMarco Elver return true;
90a146fed5SMarco Elver }
91a146fed5SMarco Elver
92a146fed5SMarco Elver /*
93a146fed5SMarco Elver * Probe for console output: checks if a race was reported, and obtains observed
94a146fed5SMarco Elver * lines of interest.
95a146fed5SMarco Elver */
96a146fed5SMarco Elver __no_kcsan
probe_console(void * ignore,const char * buf,size_t len)97a146fed5SMarco Elver static void probe_console(void *ignore, const char *buf, size_t len)
98a146fed5SMarco Elver {
99a146fed5SMarco Elver unsigned long flags;
100a146fed5SMarco Elver int nlines;
101a146fed5SMarco Elver
102a146fed5SMarco Elver /*
103a146fed5SMarco Elver * Note that KCSAN reports under a global lock, so we do not risk the
104a146fed5SMarco Elver * possibility of having multiple reports interleaved. If that were the
105a146fed5SMarco Elver * case, we'd expect tests to fail.
106a146fed5SMarco Elver */
107a146fed5SMarco Elver
108a146fed5SMarco Elver spin_lock_irqsave(&observed.lock, flags);
109a146fed5SMarco Elver nlines = observed.nlines;
110a146fed5SMarco Elver
111a146fed5SMarco Elver if (strnstr(buf, "BUG: KCSAN: ", len) && strnstr(buf, "test_", len)) {
112a146fed5SMarco Elver /*
113a146fed5SMarco Elver * KCSAN report and related to the test.
114a146fed5SMarco Elver *
115a146fed5SMarco Elver * The provided @buf is not NUL-terminated; copy no more than
116a146fed5SMarco Elver * @len bytes and let strscpy() add the missing NUL-terminator.
117a146fed5SMarco Elver */
118a146fed5SMarco Elver strscpy(observed.lines[0], buf, min(len + 1, sizeof(observed.lines[0])));
119a146fed5SMarco Elver nlines = 1;
120a146fed5SMarco Elver } else if ((nlines == 1 || nlines == 2) && strnstr(buf, "bytes by", len)) {
121a146fed5SMarco Elver strscpy(observed.lines[nlines++], buf, min(len + 1, sizeof(observed.lines[0])));
122a146fed5SMarco Elver
123a146fed5SMarco Elver if (strnstr(buf, "race at unknown origin", len)) {
124a146fed5SMarco Elver if (WARN_ON(nlines != 2))
125a146fed5SMarco Elver goto out;
126a146fed5SMarco Elver
127a146fed5SMarco Elver /* No second line of interest. */
128a146fed5SMarco Elver strcpy(observed.lines[nlines++], "<none>");
129a146fed5SMarco Elver }
130a146fed5SMarco Elver }
131a146fed5SMarco Elver
132a146fed5SMarco Elver out:
133a146fed5SMarco Elver WRITE_ONCE(observed.nlines, nlines); /* Publish new nlines. */
134a146fed5SMarco Elver spin_unlock_irqrestore(&observed.lock, flags);
135a146fed5SMarco Elver }
136a146fed5SMarco Elver
137a146fed5SMarco Elver /* Check if a report related to the test exists. */
138a146fed5SMarco Elver __no_kcsan
report_available(void)139a146fed5SMarco Elver static bool report_available(void)
140a146fed5SMarco Elver {
141a146fed5SMarco Elver return READ_ONCE(observed.nlines) == ARRAY_SIZE(observed.lines);
142a146fed5SMarco Elver }
143a146fed5SMarco Elver
144a146fed5SMarco Elver /* Report information we expect in a report. */
145a146fed5SMarco Elver struct expect_report {
146a146fed5SMarco Elver /* Access information of both accesses. */
147a146fed5SMarco Elver struct {
148a146fed5SMarco Elver void *fn; /* Function pointer to expected function of top frame. */
149a146fed5SMarco Elver void *addr; /* Address of access; unchecked if NULL. */
150a146fed5SMarco Elver size_t size; /* Size of access; unchecked if @addr is NULL. */
151a146fed5SMarco Elver int type; /* Access type, see KCSAN_ACCESS definitions. */
152a146fed5SMarco Elver } access[2];
153a146fed5SMarco Elver };
154a146fed5SMarco Elver
155a146fed5SMarco Elver /* Check observed report matches information in @r. */
156a146fed5SMarco Elver __no_kcsan
__report_matches(const struct expect_report * r)1577310bd1fSMarco Elver static bool __report_matches(const struct expect_report *r)
158a146fed5SMarco Elver {
159a146fed5SMarco Elver const bool is_assert = (r->access[0].type | r->access[1].type) & KCSAN_ACCESS_ASSERT;
160a146fed5SMarco Elver bool ret = false;
161a146fed5SMarco Elver unsigned long flags;
1625b24ac2dSMax Filippov typeof(*observed.lines) *expect;
163a146fed5SMarco Elver const char *end;
164a146fed5SMarco Elver char *cur;
165a146fed5SMarco Elver int i;
166a146fed5SMarco Elver
167a146fed5SMarco Elver /* Doubled-checked locking. */
168a146fed5SMarco Elver if (!report_available())
169a146fed5SMarco Elver return false;
170a146fed5SMarco Elver
1715b24ac2dSMax Filippov expect = kmalloc(sizeof(observed.lines), GFP_KERNEL);
1725b24ac2dSMax Filippov if (WARN_ON(!expect))
1735b24ac2dSMax Filippov return false;
1745b24ac2dSMax Filippov
175a146fed5SMarco Elver /* Generate expected report contents. */
176a146fed5SMarco Elver
177a146fed5SMarco Elver /* Title */
178a146fed5SMarco Elver cur = expect[0];
179a146fed5SMarco Elver end = &expect[0][sizeof(expect[0]) - 1];
180a146fed5SMarco Elver cur += scnprintf(cur, end - cur, "BUG: KCSAN: %s in ",
181a146fed5SMarco Elver is_assert ? "assert: race" : "data-race");
182a146fed5SMarco Elver if (r->access[1].fn) {
183a146fed5SMarco Elver char tmp[2][64];
184a146fed5SMarco Elver int cmp;
185a146fed5SMarco Elver
186a146fed5SMarco Elver /* Expect lexographically sorted function names in title. */
187a146fed5SMarco Elver scnprintf(tmp[0], sizeof(tmp[0]), "%pS", r->access[0].fn);
188a146fed5SMarco Elver scnprintf(tmp[1], sizeof(tmp[1]), "%pS", r->access[1].fn);
189a146fed5SMarco Elver cmp = strcmp(tmp[0], tmp[1]);
190a146fed5SMarco Elver cur += scnprintf(cur, end - cur, "%ps / %ps",
191a146fed5SMarco Elver cmp < 0 ? r->access[0].fn : r->access[1].fn,
192a146fed5SMarco Elver cmp < 0 ? r->access[1].fn : r->access[0].fn);
193a146fed5SMarco Elver } else {
194a146fed5SMarco Elver scnprintf(cur, end - cur, "%pS", r->access[0].fn);
195a146fed5SMarco Elver /* The exact offset won't match, remove it. */
196a146fed5SMarco Elver cur = strchr(expect[0], '+');
197a146fed5SMarco Elver if (cur)
198a146fed5SMarco Elver *cur = '\0';
199a146fed5SMarco Elver }
200a146fed5SMarco Elver
201a146fed5SMarco Elver /* Access 1 */
202a146fed5SMarco Elver cur = expect[1];
203a146fed5SMarco Elver end = &expect[1][sizeof(expect[1]) - 1];
204a146fed5SMarco Elver if (!r->access[1].fn)
205a146fed5SMarco Elver cur += scnprintf(cur, end - cur, "race at unknown origin, with ");
206a146fed5SMarco Elver
207a146fed5SMarco Elver /* Access 1 & 2 */
208a146fed5SMarco Elver for (i = 0; i < 2; ++i) {
209a146fed5SMarco Elver const int ty = r->access[i].type;
210a146fed5SMarco Elver const char *const access_type =
211a146fed5SMarco Elver (ty & KCSAN_ACCESS_ASSERT) ?
212a146fed5SMarco Elver ((ty & KCSAN_ACCESS_WRITE) ?
213a146fed5SMarco Elver "assert no accesses" :
214a146fed5SMarco Elver "assert no writes") :
215a146fed5SMarco Elver ((ty & KCSAN_ACCESS_WRITE) ?
216a146fed5SMarco Elver ((ty & KCSAN_ACCESS_COMPOUND) ?
217a146fed5SMarco Elver "read-write" :
218a146fed5SMarco Elver "write") :
219a146fed5SMarco Elver "read");
220d627c537SMarco Elver const bool is_atomic = (ty & KCSAN_ACCESS_ATOMIC);
221d627c537SMarco Elver const bool is_scoped = (ty & KCSAN_ACCESS_SCOPED);
222a146fed5SMarco Elver const char *const access_type_aux =
2233cc21a53SMarco Elver (is_atomic && is_scoped) ? " (marked, reordered)"
224d627c537SMarco Elver : (is_atomic ? " (marked)"
2253cc21a53SMarco Elver : (is_scoped ? " (reordered)" : ""));
226a146fed5SMarco Elver
227a146fed5SMarco Elver if (i == 1) {
228a146fed5SMarco Elver /* Access 2 */
229a146fed5SMarco Elver cur = expect[2];
230a146fed5SMarco Elver end = &expect[2][sizeof(expect[2]) - 1];
231a146fed5SMarco Elver
232a146fed5SMarco Elver if (!r->access[1].fn) {
233a146fed5SMarco Elver /* Dummy string if no second access is available. */
234a146fed5SMarco Elver strcpy(cur, "<none>");
235a146fed5SMarco Elver break;
236a146fed5SMarco Elver }
237a146fed5SMarco Elver }
238a146fed5SMarco Elver
239a146fed5SMarco Elver cur += scnprintf(cur, end - cur, "%s%s to ", access_type,
240a146fed5SMarco Elver access_type_aux);
241a146fed5SMarco Elver
242a146fed5SMarco Elver if (r->access[i].addr) /* Address is optional. */
243a146fed5SMarco Elver cur += scnprintf(cur, end - cur, "0x%px of %zu bytes",
244a146fed5SMarco Elver r->access[i].addr, r->access[i].size);
245a146fed5SMarco Elver }
246a146fed5SMarco Elver
247a146fed5SMarco Elver spin_lock_irqsave(&observed.lock, flags);
248a146fed5SMarco Elver if (!report_available())
249a146fed5SMarco Elver goto out; /* A new report is being captured. */
250a146fed5SMarco Elver
251a146fed5SMarco Elver /* Finally match expected output to what we actually observed. */
252a146fed5SMarco Elver ret = strstr(observed.lines[0], expect[0]) &&
253a146fed5SMarco Elver /* Access info may appear in any order. */
254a146fed5SMarco Elver ((strstr(observed.lines[1], expect[1]) &&
255a146fed5SMarco Elver strstr(observed.lines[2], expect[2])) ||
256a146fed5SMarco Elver (strstr(observed.lines[1], expect[2]) &&
257a146fed5SMarco Elver strstr(observed.lines[2], expect[1])));
258a146fed5SMarco Elver out:
259a146fed5SMarco Elver spin_unlock_irqrestore(&observed.lock, flags);
2605b24ac2dSMax Filippov kfree(expect);
261a146fed5SMarco Elver return ret;
262a146fed5SMarco Elver }
263a146fed5SMarco Elver
2647310bd1fSMarco Elver static __always_inline const struct expect_report *
__report_set_scoped(struct expect_report * r,int accesses)2657310bd1fSMarco Elver __report_set_scoped(struct expect_report *r, int accesses)
2667310bd1fSMarco Elver {
2677310bd1fSMarco Elver BUILD_BUG_ON(accesses > 3);
2687310bd1fSMarco Elver
2697310bd1fSMarco Elver if (accesses & 1)
2707310bd1fSMarco Elver r->access[0].type |= KCSAN_ACCESS_SCOPED;
2717310bd1fSMarco Elver else
2727310bd1fSMarco Elver r->access[0].type &= ~KCSAN_ACCESS_SCOPED;
2737310bd1fSMarco Elver
2747310bd1fSMarco Elver if (accesses & 2)
2757310bd1fSMarco Elver r->access[1].type |= KCSAN_ACCESS_SCOPED;
2767310bd1fSMarco Elver else
2777310bd1fSMarco Elver r->access[1].type &= ~KCSAN_ACCESS_SCOPED;
2787310bd1fSMarco Elver
2797310bd1fSMarco Elver return r;
2807310bd1fSMarco Elver }
2817310bd1fSMarco Elver
2827310bd1fSMarco Elver __no_kcsan
report_matches_any_reordered(struct expect_report * r)2837310bd1fSMarco Elver static bool report_matches_any_reordered(struct expect_report *r)
2847310bd1fSMarco Elver {
2857310bd1fSMarco Elver return __report_matches(__report_set_scoped(r, 0)) ||
2867310bd1fSMarco Elver __report_matches(__report_set_scoped(r, 1)) ||
2877310bd1fSMarco Elver __report_matches(__report_set_scoped(r, 2)) ||
2887310bd1fSMarco Elver __report_matches(__report_set_scoped(r, 3));
2897310bd1fSMarco Elver }
2907310bd1fSMarco Elver
2917310bd1fSMarco Elver #ifdef CONFIG_KCSAN_WEAK_MEMORY
2927310bd1fSMarco Elver /* Due to reordering accesses, any access may appear as "(reordered)". */
2937310bd1fSMarco Elver #define report_matches report_matches_any_reordered
2947310bd1fSMarco Elver #else
2957310bd1fSMarco Elver #define report_matches __report_matches
2967310bd1fSMarco Elver #endif
2977310bd1fSMarco Elver
298a146fed5SMarco Elver /* ===== Test kernels ===== */
299a146fed5SMarco Elver
300a146fed5SMarco Elver static long test_sink;
301a146fed5SMarco Elver static long test_var;
302a146fed5SMarco Elver /* @test_array should be large enough to fall into multiple watchpoint slots. */
303a146fed5SMarco Elver static long test_array[3 * PAGE_SIZE / sizeof(long)];
304a146fed5SMarco Elver static struct {
305a146fed5SMarco Elver long val[8];
306a146fed5SMarco Elver } test_struct;
307a146fed5SMarco Elver static DEFINE_SEQLOCK(test_seqlock);
308a70d36e6SMarco Elver static DEFINE_SPINLOCK(test_spinlock);
309a70d36e6SMarco Elver static DEFINE_MUTEX(test_mutex);
310a146fed5SMarco Elver
311a146fed5SMarco Elver /*
312a146fed5SMarco Elver * Helper to avoid compiler optimizing out reads, and to generate source values
313a146fed5SMarco Elver * for writes.
314a146fed5SMarco Elver */
315a146fed5SMarco Elver __no_kcsan
sink_value(long v)316a146fed5SMarco Elver static noinline void sink_value(long v) { WRITE_ONCE(test_sink, v); }
317a146fed5SMarco Elver
3188bc32b34SMarco Elver /*
3198bc32b34SMarco Elver * Generates a delay and some accesses that enter the runtime but do not produce
3208bc32b34SMarco Elver * data races.
3218bc32b34SMarco Elver */
test_delay(int iter)3228bc32b34SMarco Elver static noinline void test_delay(int iter)
3238bc32b34SMarco Elver {
3248bc32b34SMarco Elver while (iter--)
3258bc32b34SMarco Elver sink_value(READ_ONCE(test_sink));
3268bc32b34SMarco Elver }
3278bc32b34SMarco Elver
test_kernel_read(void)328a146fed5SMarco Elver static noinline void test_kernel_read(void) { sink_value(test_var); }
329a146fed5SMarco Elver
test_kernel_write(void)330a146fed5SMarco Elver static noinline void test_kernel_write(void)
331a146fed5SMarco Elver {
332a146fed5SMarco Elver test_var = READ_ONCE_NOCHECK(test_sink) + 1;
333a146fed5SMarco Elver }
334a146fed5SMarco Elver
test_kernel_write_nochange(void)335a146fed5SMarco Elver static noinline void test_kernel_write_nochange(void) { test_var = 42; }
336a146fed5SMarco Elver
337a146fed5SMarco Elver /* Suffixed by value-change exception filter. */
test_kernel_write_nochange_rcu(void)338a146fed5SMarco Elver static noinline void test_kernel_write_nochange_rcu(void) { test_var = 42; }
339a146fed5SMarco Elver
test_kernel_read_atomic(void)340a146fed5SMarco Elver static noinline void test_kernel_read_atomic(void)
341a146fed5SMarco Elver {
342a146fed5SMarco Elver sink_value(READ_ONCE(test_var));
343a146fed5SMarco Elver }
344a146fed5SMarco Elver
test_kernel_write_atomic(void)345a146fed5SMarco Elver static noinline void test_kernel_write_atomic(void)
346a146fed5SMarco Elver {
347a146fed5SMarco Elver WRITE_ONCE(test_var, READ_ONCE_NOCHECK(test_sink) + 1);
348a146fed5SMarco Elver }
349a146fed5SMarco Elver
test_kernel_atomic_rmw(void)350a146fed5SMarco Elver static noinline void test_kernel_atomic_rmw(void)
351a146fed5SMarco Elver {
352a146fed5SMarco Elver /* Use builtin, so we can set up the "bad" atomic/non-atomic scenario. */
353a146fed5SMarco Elver __atomic_fetch_add(&test_var, 1, __ATOMIC_RELAXED);
354a146fed5SMarco Elver }
355a146fed5SMarco Elver
356a146fed5SMarco Elver __no_kcsan
test_kernel_write_uninstrumented(void)357a146fed5SMarco Elver static noinline void test_kernel_write_uninstrumented(void) { test_var++; }
358a146fed5SMarco Elver
test_kernel_data_race(void)359a146fed5SMarco Elver static noinline void test_kernel_data_race(void) { data_race(test_var++); }
360a146fed5SMarco Elver
test_kernel_assert_writer(void)361a146fed5SMarco Elver static noinline void test_kernel_assert_writer(void)
362a146fed5SMarco Elver {
363a146fed5SMarco Elver ASSERT_EXCLUSIVE_WRITER(test_var);
364a146fed5SMarco Elver }
365a146fed5SMarco Elver
test_kernel_assert_access(void)366a146fed5SMarco Elver static noinline void test_kernel_assert_access(void)
367a146fed5SMarco Elver {
368a146fed5SMarco Elver ASSERT_EXCLUSIVE_ACCESS(test_var);
369a146fed5SMarco Elver }
370a146fed5SMarco Elver
371a146fed5SMarco Elver #define TEST_CHANGE_BITS 0xff00ff00
372a146fed5SMarco Elver
test_kernel_change_bits(void)373a146fed5SMarco Elver static noinline void test_kernel_change_bits(void)
374a146fed5SMarco Elver {
375a146fed5SMarco Elver if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) {
376a146fed5SMarco Elver /*
377a146fed5SMarco Elver * Avoid race of unknown origin for this test, just pretend they
378a146fed5SMarco Elver * are atomic.
379a146fed5SMarco Elver */
380a146fed5SMarco Elver kcsan_nestable_atomic_begin();
381a146fed5SMarco Elver test_var ^= TEST_CHANGE_BITS;
382a146fed5SMarco Elver kcsan_nestable_atomic_end();
383a146fed5SMarco Elver } else
384a146fed5SMarco Elver WRITE_ONCE(test_var, READ_ONCE(test_var) ^ TEST_CHANGE_BITS);
385a146fed5SMarco Elver }
386a146fed5SMarco Elver
test_kernel_assert_bits_change(void)387a146fed5SMarco Elver static noinline void test_kernel_assert_bits_change(void)
388a146fed5SMarco Elver {
389a146fed5SMarco Elver ASSERT_EXCLUSIVE_BITS(test_var, TEST_CHANGE_BITS);
390a146fed5SMarco Elver }
391a146fed5SMarco Elver
test_kernel_assert_bits_nochange(void)392a146fed5SMarco Elver static noinline void test_kernel_assert_bits_nochange(void)
393a146fed5SMarco Elver {
394a146fed5SMarco Elver ASSERT_EXCLUSIVE_BITS(test_var, ~TEST_CHANGE_BITS);
395a146fed5SMarco Elver }
396a146fed5SMarco Elver
3976c65eb75SMarco Elver /*
3986c65eb75SMarco Elver * Scoped assertions do trigger anywhere in scope. However, the report should
3996c65eb75SMarco Elver * still only point at the start of the scope.
4006c65eb75SMarco Elver */
test_enter_scope(void)401a146fed5SMarco Elver static noinline void test_enter_scope(void)
402a146fed5SMarco Elver {
403a146fed5SMarco Elver int x = 0;
404a146fed5SMarco Elver
405a146fed5SMarco Elver /* Unrelated accesses to scoped assert. */
406a146fed5SMarco Elver READ_ONCE(test_sink);
407a146fed5SMarco Elver kcsan_check_read(&x, sizeof(x));
408a146fed5SMarco Elver }
409a146fed5SMarco Elver
test_kernel_assert_writer_scoped(void)410a146fed5SMarco Elver static noinline void test_kernel_assert_writer_scoped(void)
411a146fed5SMarco Elver {
412a146fed5SMarco Elver ASSERT_EXCLUSIVE_WRITER_SCOPED(test_var);
413a146fed5SMarco Elver test_enter_scope();
414a146fed5SMarco Elver }
415a146fed5SMarco Elver
test_kernel_assert_access_scoped(void)416a146fed5SMarco Elver static noinline void test_kernel_assert_access_scoped(void)
417a146fed5SMarco Elver {
418a146fed5SMarco Elver ASSERT_EXCLUSIVE_ACCESS_SCOPED(test_var);
419a146fed5SMarco Elver test_enter_scope();
420a146fed5SMarco Elver }
421a146fed5SMarco Elver
test_kernel_rmw_array(void)422a146fed5SMarco Elver static noinline void test_kernel_rmw_array(void)
423a146fed5SMarco Elver {
424a146fed5SMarco Elver int i;
425a146fed5SMarco Elver
426a146fed5SMarco Elver for (i = 0; i < ARRAY_SIZE(test_array); ++i)
427a146fed5SMarco Elver test_array[i]++;
428a146fed5SMarco Elver }
429a146fed5SMarco Elver
test_kernel_write_struct(void)430a146fed5SMarco Elver static noinline void test_kernel_write_struct(void)
431a146fed5SMarco Elver {
432a146fed5SMarco Elver kcsan_check_write(&test_struct, sizeof(test_struct));
433a146fed5SMarco Elver kcsan_disable_current();
434a146fed5SMarco Elver test_struct.val[3]++; /* induce value change */
435a146fed5SMarco Elver kcsan_enable_current();
436a146fed5SMarco Elver }
437a146fed5SMarco Elver
test_kernel_write_struct_part(void)438a146fed5SMarco Elver static noinline void test_kernel_write_struct_part(void)
439a146fed5SMarco Elver {
440a146fed5SMarco Elver test_struct.val[3] = 42;
441a146fed5SMarco Elver }
442a146fed5SMarco Elver
test_kernel_read_struct_zero_size(void)443a146fed5SMarco Elver static noinline void test_kernel_read_struct_zero_size(void)
444a146fed5SMarco Elver {
445a146fed5SMarco Elver kcsan_check_read(&test_struct.val[3], 0);
446a146fed5SMarco Elver }
447a146fed5SMarco Elver
test_kernel_jiffies_reader(void)448a146fed5SMarco Elver static noinline void test_kernel_jiffies_reader(void)
449a146fed5SMarco Elver {
450a146fed5SMarco Elver sink_value((long)jiffies);
451a146fed5SMarco Elver }
452a146fed5SMarco Elver
test_kernel_seqlock_reader(void)453a146fed5SMarco Elver static noinline void test_kernel_seqlock_reader(void)
454a146fed5SMarco Elver {
455a146fed5SMarco Elver unsigned int seq;
456a146fed5SMarco Elver
457a146fed5SMarco Elver do {
458a146fed5SMarco Elver seq = read_seqbegin(&test_seqlock);
459a146fed5SMarco Elver sink_value(test_var);
460a146fed5SMarco Elver } while (read_seqretry(&test_seqlock, seq));
461a146fed5SMarco Elver }
462a146fed5SMarco Elver
test_kernel_seqlock_writer(void)463a146fed5SMarco Elver static noinline void test_kernel_seqlock_writer(void)
464a146fed5SMarco Elver {
465a146fed5SMarco Elver unsigned long flags;
466a146fed5SMarco Elver
467a146fed5SMarco Elver write_seqlock_irqsave(&test_seqlock, flags);
468a146fed5SMarco Elver test_var++;
469a146fed5SMarco Elver write_sequnlock_irqrestore(&test_seqlock, flags);
470a146fed5SMarco Elver }
471a146fed5SMarco Elver
test_kernel_atomic_builtins(void)472a146fed5SMarco Elver static noinline void test_kernel_atomic_builtins(void)
473a146fed5SMarco Elver {
474a146fed5SMarco Elver /*
475a146fed5SMarco Elver * Generate concurrent accesses, expecting no reports, ensuring KCSAN
476a146fed5SMarco Elver * treats builtin atomics as actually atomic.
477a146fed5SMarco Elver */
478a146fed5SMarco Elver __atomic_load_n(&test_var, __ATOMIC_RELAXED);
479a146fed5SMarco Elver }
480a146fed5SMarco Elver
test_kernel_xor_1bit(void)481d8fd74d3SMarco Elver static noinline void test_kernel_xor_1bit(void)
482d8fd74d3SMarco Elver {
483d8fd74d3SMarco Elver /* Do not report data races between the read-writes. */
484d8fd74d3SMarco Elver kcsan_nestable_atomic_begin();
485d8fd74d3SMarco Elver test_var ^= 0x10000;
486d8fd74d3SMarco Elver kcsan_nestable_atomic_end();
487d8fd74d3SMarco Elver }
488d8fd74d3SMarco Elver
4898bc32b34SMarco Elver #define TEST_KERNEL_LOCKED(name, acquire, release) \
4908bc32b34SMarco Elver static noinline void test_kernel_##name(void) \
4918bc32b34SMarco Elver { \
4928bc32b34SMarco Elver long *flag = &test_struct.val[0]; \
4938bc32b34SMarco Elver long v = 0; \
4948bc32b34SMarco Elver if (!(acquire)) \
4958bc32b34SMarco Elver return; \
4968bc32b34SMarco Elver while (v++ < 100) { \
4978bc32b34SMarco Elver test_var++; \
4988bc32b34SMarco Elver barrier(); \
4998bc32b34SMarco Elver } \
5008bc32b34SMarco Elver release; \
5018bc32b34SMarco Elver test_delay(10); \
5028bc32b34SMarco Elver }
5038bc32b34SMarco Elver
5048bc32b34SMarco Elver TEST_KERNEL_LOCKED(with_memorder,
5058bc32b34SMarco Elver cmpxchg_acquire(flag, 0, 1) == 0,
5068bc32b34SMarco Elver smp_store_release(flag, 0));
5078bc32b34SMarco Elver TEST_KERNEL_LOCKED(wrong_memorder,
5088bc32b34SMarco Elver cmpxchg_relaxed(flag, 0, 1) == 0,
5098bc32b34SMarco Elver WRITE_ONCE(*flag, 0));
5108bc32b34SMarco Elver TEST_KERNEL_LOCKED(atomic_builtin_with_memorder,
5118bc32b34SMarco Elver __atomic_compare_exchange_n(flag, &v, 1, 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED),
5128bc32b34SMarco Elver __atomic_store_n(flag, 0, __ATOMIC_RELEASE));
5138bc32b34SMarco Elver TEST_KERNEL_LOCKED(atomic_builtin_wrong_memorder,
5148bc32b34SMarco Elver __atomic_compare_exchange_n(flag, &v, 1, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED),
5158bc32b34SMarco Elver __atomic_store_n(flag, 0, __ATOMIC_RELAXED));
5168bc32b34SMarco Elver
517a146fed5SMarco Elver /* ===== Test cases ===== */
518a146fed5SMarco Elver
5198bc32b34SMarco Elver /*
5208bc32b34SMarco Elver * Tests that various barriers have the expected effect on internal state. Not
5218bc32b34SMarco Elver * exhaustive on atomic_t operations. Unlike the selftest, also checks for
5228bc32b34SMarco Elver * too-strict barrier instrumentation; these can be tolerated, because it does
5238bc32b34SMarco Elver * not cause false positives, but at least we should be aware of such cases.
5248bc32b34SMarco Elver */
test_barrier_nothreads(struct kunit * test)5258bc32b34SMarco Elver static void test_barrier_nothreads(struct kunit *test)
5268bc32b34SMarco Elver {
5278bc32b34SMarco Elver #ifdef CONFIG_KCSAN_WEAK_MEMORY
5288bc32b34SMarco Elver struct kcsan_scoped_access *reorder_access = ¤t->kcsan_ctx.reorder_access;
5298bc32b34SMarco Elver #else
5308bc32b34SMarco Elver struct kcsan_scoped_access *reorder_access = NULL;
5318bc32b34SMarco Elver #endif
5328bc32b34SMarco Elver arch_spinlock_t arch_spinlock = __ARCH_SPIN_LOCK_UNLOCKED;
5338bc32b34SMarco Elver atomic_t dummy;
5348bc32b34SMarco Elver
5358bc32b34SMarco Elver KCSAN_TEST_REQUIRES(test, reorder_access != NULL);
5368bc32b34SMarco Elver KCSAN_TEST_REQUIRES(test, IS_ENABLED(CONFIG_SMP));
5378bc32b34SMarco Elver
5388bc32b34SMarco Elver #define __KCSAN_EXPECT_BARRIER(access_type, barrier, order_before, name) \
5398bc32b34SMarco Elver do { \
5408bc32b34SMarco Elver reorder_access->type = (access_type) | KCSAN_ACCESS_SCOPED; \
5418bc32b34SMarco Elver reorder_access->size = sizeof(test_var); \
5428bc32b34SMarco Elver barrier; \
5438bc32b34SMarco Elver KUNIT_EXPECT_EQ_MSG(test, reorder_access->size, \
5448bc32b34SMarco Elver order_before ? 0 : sizeof(test_var), \
5458bc32b34SMarco Elver "improperly instrumented type=(" #access_type "): " name); \
5468bc32b34SMarco Elver } while (0)
5478bc32b34SMarco Elver #define KCSAN_EXPECT_READ_BARRIER(b, o) __KCSAN_EXPECT_BARRIER(0, b, o, #b)
5488bc32b34SMarco Elver #define KCSAN_EXPECT_WRITE_BARRIER(b, o) __KCSAN_EXPECT_BARRIER(KCSAN_ACCESS_WRITE, b, o, #b)
5498bc32b34SMarco Elver #define KCSAN_EXPECT_RW_BARRIER(b, o) __KCSAN_EXPECT_BARRIER(KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE, b, o, #b)
5508bc32b34SMarco Elver
551a70d36e6SMarco Elver /*
552a70d36e6SMarco Elver * Lockdep initialization can strengthen certain locking operations due
553a70d36e6SMarco Elver * to calling into instrumented files; "warm up" our locks.
554a70d36e6SMarco Elver */
555a70d36e6SMarco Elver spin_lock(&test_spinlock);
556a70d36e6SMarco Elver spin_unlock(&test_spinlock);
557a70d36e6SMarco Elver mutex_lock(&test_mutex);
558a70d36e6SMarco Elver mutex_unlock(&test_mutex);
559a70d36e6SMarco Elver
5608bc32b34SMarco Elver /* Force creating a valid entry in reorder_access first. */
5618bc32b34SMarco Elver test_var = 0;
5628bc32b34SMarco Elver while (test_var++ < 1000000 && reorder_access->size != sizeof(test_var))
5638bc32b34SMarco Elver __kcsan_check_read(&test_var, sizeof(test_var));
5648bc32b34SMarco Elver KUNIT_ASSERT_EQ(test, reorder_access->size, sizeof(test_var));
5658bc32b34SMarco Elver
5668bc32b34SMarco Elver kcsan_nestable_atomic_begin(); /* No watchpoints in called functions. */
5678bc32b34SMarco Elver
5688bc32b34SMarco Elver KCSAN_EXPECT_READ_BARRIER(mb(), true);
5698bc32b34SMarco Elver KCSAN_EXPECT_READ_BARRIER(wmb(), false);
5708bc32b34SMarco Elver KCSAN_EXPECT_READ_BARRIER(rmb(), true);
5718bc32b34SMarco Elver KCSAN_EXPECT_READ_BARRIER(smp_mb(), true);
5728bc32b34SMarco Elver KCSAN_EXPECT_READ_BARRIER(smp_wmb(), false);
5738bc32b34SMarco Elver KCSAN_EXPECT_READ_BARRIER(smp_rmb(), true);
5748bc32b34SMarco Elver KCSAN_EXPECT_READ_BARRIER(dma_wmb(), false);
5758bc32b34SMarco Elver KCSAN_EXPECT_READ_BARRIER(dma_rmb(), true);
5768bc32b34SMarco Elver KCSAN_EXPECT_READ_BARRIER(smp_mb__before_atomic(), true);
5778bc32b34SMarco Elver KCSAN_EXPECT_READ_BARRIER(smp_mb__after_atomic(), true);
5788bc32b34SMarco Elver KCSAN_EXPECT_READ_BARRIER(smp_mb__after_spinlock(), true);
5798bc32b34SMarco Elver KCSAN_EXPECT_READ_BARRIER(smp_store_mb(test_var, 0), true);
5808bc32b34SMarco Elver KCSAN_EXPECT_READ_BARRIER(smp_load_acquire(&test_var), false);
5818bc32b34SMarco Elver KCSAN_EXPECT_READ_BARRIER(smp_store_release(&test_var, 0), true);
5828bc32b34SMarco Elver KCSAN_EXPECT_READ_BARRIER(xchg(&test_var, 0), true);
5838bc32b34SMarco Elver KCSAN_EXPECT_READ_BARRIER(xchg_release(&test_var, 0), true);
5848bc32b34SMarco Elver KCSAN_EXPECT_READ_BARRIER(xchg_relaxed(&test_var, 0), false);
5858bc32b34SMarco Elver KCSAN_EXPECT_READ_BARRIER(cmpxchg(&test_var, 0, 0), true);
5868bc32b34SMarco Elver KCSAN_EXPECT_READ_BARRIER(cmpxchg_release(&test_var, 0, 0), true);
5878bc32b34SMarco Elver KCSAN_EXPECT_READ_BARRIER(cmpxchg_relaxed(&test_var, 0, 0), false);
5888bc32b34SMarco Elver KCSAN_EXPECT_READ_BARRIER(atomic_read(&dummy), false);
5898bc32b34SMarco Elver KCSAN_EXPECT_READ_BARRIER(atomic_read_acquire(&dummy), false);
5908bc32b34SMarco Elver KCSAN_EXPECT_READ_BARRIER(atomic_set(&dummy, 0), false);
5918bc32b34SMarco Elver KCSAN_EXPECT_READ_BARRIER(atomic_set_release(&dummy, 0), true);
5928bc32b34SMarco Elver KCSAN_EXPECT_READ_BARRIER(atomic_add(1, &dummy), false);
5938bc32b34SMarco Elver KCSAN_EXPECT_READ_BARRIER(atomic_add_return(1, &dummy), true);
5948bc32b34SMarco Elver KCSAN_EXPECT_READ_BARRIER(atomic_add_return_acquire(1, &dummy), false);
5958bc32b34SMarco Elver KCSAN_EXPECT_READ_BARRIER(atomic_add_return_release(1, &dummy), true);
5968bc32b34SMarco Elver KCSAN_EXPECT_READ_BARRIER(atomic_add_return_relaxed(1, &dummy), false);
5978bc32b34SMarco Elver KCSAN_EXPECT_READ_BARRIER(atomic_fetch_add(1, &dummy), true);
5988bc32b34SMarco Elver KCSAN_EXPECT_READ_BARRIER(atomic_fetch_add_acquire(1, &dummy), false);
5998bc32b34SMarco Elver KCSAN_EXPECT_READ_BARRIER(atomic_fetch_add_release(1, &dummy), true);
6008bc32b34SMarco Elver KCSAN_EXPECT_READ_BARRIER(atomic_fetch_add_relaxed(1, &dummy), false);
6018bc32b34SMarco Elver KCSAN_EXPECT_READ_BARRIER(test_and_set_bit(0, &test_var), true);
6028bc32b34SMarco Elver KCSAN_EXPECT_READ_BARRIER(test_and_clear_bit(0, &test_var), true);
6038bc32b34SMarco Elver KCSAN_EXPECT_READ_BARRIER(test_and_change_bit(0, &test_var), true);
6048bc32b34SMarco Elver KCSAN_EXPECT_READ_BARRIER(clear_bit_unlock(0, &test_var), true);
6058bc32b34SMarco Elver KCSAN_EXPECT_READ_BARRIER(__clear_bit_unlock(0, &test_var), true);
6068bc32b34SMarco Elver KCSAN_EXPECT_READ_BARRIER(arch_spin_lock(&arch_spinlock), false);
6078bc32b34SMarco Elver KCSAN_EXPECT_READ_BARRIER(arch_spin_unlock(&arch_spinlock), true);
608a70d36e6SMarco Elver KCSAN_EXPECT_READ_BARRIER(spin_lock(&test_spinlock), false);
609a70d36e6SMarco Elver KCSAN_EXPECT_READ_BARRIER(spin_unlock(&test_spinlock), true);
610a70d36e6SMarco Elver KCSAN_EXPECT_READ_BARRIER(mutex_lock(&test_mutex), false);
611a70d36e6SMarco Elver KCSAN_EXPECT_READ_BARRIER(mutex_unlock(&test_mutex), true);
6128bc32b34SMarco Elver
6138bc32b34SMarco Elver KCSAN_EXPECT_WRITE_BARRIER(mb(), true);
6148bc32b34SMarco Elver KCSAN_EXPECT_WRITE_BARRIER(wmb(), true);
6158bc32b34SMarco Elver KCSAN_EXPECT_WRITE_BARRIER(rmb(), false);
6168bc32b34SMarco Elver KCSAN_EXPECT_WRITE_BARRIER(smp_mb(), true);
6178bc32b34SMarco Elver KCSAN_EXPECT_WRITE_BARRIER(smp_wmb(), true);
6188bc32b34SMarco Elver KCSAN_EXPECT_WRITE_BARRIER(smp_rmb(), false);
6198bc32b34SMarco Elver KCSAN_EXPECT_WRITE_BARRIER(dma_wmb(), true);
6208bc32b34SMarco Elver KCSAN_EXPECT_WRITE_BARRIER(dma_rmb(), false);
6218bc32b34SMarco Elver KCSAN_EXPECT_WRITE_BARRIER(smp_mb__before_atomic(), true);
6228bc32b34SMarco Elver KCSAN_EXPECT_WRITE_BARRIER(smp_mb__after_atomic(), true);
6238bc32b34SMarco Elver KCSAN_EXPECT_WRITE_BARRIER(smp_mb__after_spinlock(), true);
6248bc32b34SMarco Elver KCSAN_EXPECT_WRITE_BARRIER(smp_store_mb(test_var, 0), true);
6258bc32b34SMarco Elver KCSAN_EXPECT_WRITE_BARRIER(smp_load_acquire(&test_var), false);
6268bc32b34SMarco Elver KCSAN_EXPECT_WRITE_BARRIER(smp_store_release(&test_var, 0), true);
6278bc32b34SMarco Elver KCSAN_EXPECT_WRITE_BARRIER(xchg(&test_var, 0), true);
6288bc32b34SMarco Elver KCSAN_EXPECT_WRITE_BARRIER(xchg_release(&test_var, 0), true);
6298bc32b34SMarco Elver KCSAN_EXPECT_WRITE_BARRIER(xchg_relaxed(&test_var, 0), false);
6308bc32b34SMarco Elver KCSAN_EXPECT_WRITE_BARRIER(cmpxchg(&test_var, 0, 0), true);
6318bc32b34SMarco Elver KCSAN_EXPECT_WRITE_BARRIER(cmpxchg_release(&test_var, 0, 0), true);
6328bc32b34SMarco Elver KCSAN_EXPECT_WRITE_BARRIER(cmpxchg_relaxed(&test_var, 0, 0), false);
6338bc32b34SMarco Elver KCSAN_EXPECT_WRITE_BARRIER(atomic_read(&dummy), false);
6348bc32b34SMarco Elver KCSAN_EXPECT_WRITE_BARRIER(atomic_read_acquire(&dummy), false);
6358bc32b34SMarco Elver KCSAN_EXPECT_WRITE_BARRIER(atomic_set(&dummy, 0), false);
6368bc32b34SMarco Elver KCSAN_EXPECT_WRITE_BARRIER(atomic_set_release(&dummy, 0), true);
6378bc32b34SMarco Elver KCSAN_EXPECT_WRITE_BARRIER(atomic_add(1, &dummy), false);
6388bc32b34SMarco Elver KCSAN_EXPECT_WRITE_BARRIER(atomic_add_return(1, &dummy), true);
6398bc32b34SMarco Elver KCSAN_EXPECT_WRITE_BARRIER(atomic_add_return_acquire(1, &dummy), false);
6408bc32b34SMarco Elver KCSAN_EXPECT_WRITE_BARRIER(atomic_add_return_release(1, &dummy), true);
6418bc32b34SMarco Elver KCSAN_EXPECT_WRITE_BARRIER(atomic_add_return_relaxed(1, &dummy), false);
6428bc32b34SMarco Elver KCSAN_EXPECT_WRITE_BARRIER(atomic_fetch_add(1, &dummy), true);
6438bc32b34SMarco Elver KCSAN_EXPECT_WRITE_BARRIER(atomic_fetch_add_acquire(1, &dummy), false);
6448bc32b34SMarco Elver KCSAN_EXPECT_WRITE_BARRIER(atomic_fetch_add_release(1, &dummy), true);
6458bc32b34SMarco Elver KCSAN_EXPECT_WRITE_BARRIER(atomic_fetch_add_relaxed(1, &dummy), false);
6468bc32b34SMarco Elver KCSAN_EXPECT_WRITE_BARRIER(test_and_set_bit(0, &test_var), true);
6478bc32b34SMarco Elver KCSAN_EXPECT_WRITE_BARRIER(test_and_clear_bit(0, &test_var), true);
6488bc32b34SMarco Elver KCSAN_EXPECT_WRITE_BARRIER(test_and_change_bit(0, &test_var), true);
6498bc32b34SMarco Elver KCSAN_EXPECT_WRITE_BARRIER(clear_bit_unlock(0, &test_var), true);
6508bc32b34SMarco Elver KCSAN_EXPECT_WRITE_BARRIER(__clear_bit_unlock(0, &test_var), true);
6518bc32b34SMarco Elver KCSAN_EXPECT_WRITE_BARRIER(arch_spin_lock(&arch_spinlock), false);
6528bc32b34SMarco Elver KCSAN_EXPECT_WRITE_BARRIER(arch_spin_unlock(&arch_spinlock), true);
653a70d36e6SMarco Elver KCSAN_EXPECT_WRITE_BARRIER(spin_lock(&test_spinlock), false);
654a70d36e6SMarco Elver KCSAN_EXPECT_WRITE_BARRIER(spin_unlock(&test_spinlock), true);
655a70d36e6SMarco Elver KCSAN_EXPECT_WRITE_BARRIER(mutex_lock(&test_mutex), false);
656a70d36e6SMarco Elver KCSAN_EXPECT_WRITE_BARRIER(mutex_unlock(&test_mutex), true);
6578bc32b34SMarco Elver
6588bc32b34SMarco Elver KCSAN_EXPECT_RW_BARRIER(mb(), true);
6598bc32b34SMarco Elver KCSAN_EXPECT_RW_BARRIER(wmb(), true);
6608bc32b34SMarco Elver KCSAN_EXPECT_RW_BARRIER(rmb(), true);
6618bc32b34SMarco Elver KCSAN_EXPECT_RW_BARRIER(smp_mb(), true);
6628bc32b34SMarco Elver KCSAN_EXPECT_RW_BARRIER(smp_wmb(), true);
6638bc32b34SMarco Elver KCSAN_EXPECT_RW_BARRIER(smp_rmb(), true);
6648bc32b34SMarco Elver KCSAN_EXPECT_RW_BARRIER(dma_wmb(), true);
6658bc32b34SMarco Elver KCSAN_EXPECT_RW_BARRIER(dma_rmb(), true);
6668bc32b34SMarco Elver KCSAN_EXPECT_RW_BARRIER(smp_mb__before_atomic(), true);
6678bc32b34SMarco Elver KCSAN_EXPECT_RW_BARRIER(smp_mb__after_atomic(), true);
6688bc32b34SMarco Elver KCSAN_EXPECT_RW_BARRIER(smp_mb__after_spinlock(), true);
6698bc32b34SMarco Elver KCSAN_EXPECT_RW_BARRIER(smp_store_mb(test_var, 0), true);
6708bc32b34SMarco Elver KCSAN_EXPECT_RW_BARRIER(smp_load_acquire(&test_var), false);
6718bc32b34SMarco Elver KCSAN_EXPECT_RW_BARRIER(smp_store_release(&test_var, 0), true);
6728bc32b34SMarco Elver KCSAN_EXPECT_RW_BARRIER(xchg(&test_var, 0), true);
6738bc32b34SMarco Elver KCSAN_EXPECT_RW_BARRIER(xchg_release(&test_var, 0), true);
6748bc32b34SMarco Elver KCSAN_EXPECT_RW_BARRIER(xchg_relaxed(&test_var, 0), false);
6758bc32b34SMarco Elver KCSAN_EXPECT_RW_BARRIER(cmpxchg(&test_var, 0, 0), true);
6768bc32b34SMarco Elver KCSAN_EXPECT_RW_BARRIER(cmpxchg_release(&test_var, 0, 0), true);
6778bc32b34SMarco Elver KCSAN_EXPECT_RW_BARRIER(cmpxchg_relaxed(&test_var, 0, 0), false);
6788bc32b34SMarco Elver KCSAN_EXPECT_RW_BARRIER(atomic_read(&dummy), false);
6798bc32b34SMarco Elver KCSAN_EXPECT_RW_BARRIER(atomic_read_acquire(&dummy), false);
6808bc32b34SMarco Elver KCSAN_EXPECT_RW_BARRIER(atomic_set(&dummy, 0), false);
6818bc32b34SMarco Elver KCSAN_EXPECT_RW_BARRIER(atomic_set_release(&dummy, 0), true);
6828bc32b34SMarco Elver KCSAN_EXPECT_RW_BARRIER(atomic_add(1, &dummy), false);
6838bc32b34SMarco Elver KCSAN_EXPECT_RW_BARRIER(atomic_add_return(1, &dummy), true);
6848bc32b34SMarco Elver KCSAN_EXPECT_RW_BARRIER(atomic_add_return_acquire(1, &dummy), false);
6858bc32b34SMarco Elver KCSAN_EXPECT_RW_BARRIER(atomic_add_return_release(1, &dummy), true);
6868bc32b34SMarco Elver KCSAN_EXPECT_RW_BARRIER(atomic_add_return_relaxed(1, &dummy), false);
6878bc32b34SMarco Elver KCSAN_EXPECT_RW_BARRIER(atomic_fetch_add(1, &dummy), true);
6888bc32b34SMarco Elver KCSAN_EXPECT_RW_BARRIER(atomic_fetch_add_acquire(1, &dummy), false);
6898bc32b34SMarco Elver KCSAN_EXPECT_RW_BARRIER(atomic_fetch_add_release(1, &dummy), true);
6908bc32b34SMarco Elver KCSAN_EXPECT_RW_BARRIER(atomic_fetch_add_relaxed(1, &dummy), false);
6918bc32b34SMarco Elver KCSAN_EXPECT_RW_BARRIER(test_and_set_bit(0, &test_var), true);
6928bc32b34SMarco Elver KCSAN_EXPECT_RW_BARRIER(test_and_clear_bit(0, &test_var), true);
6938bc32b34SMarco Elver KCSAN_EXPECT_RW_BARRIER(test_and_change_bit(0, &test_var), true);
6948bc32b34SMarco Elver KCSAN_EXPECT_RW_BARRIER(clear_bit_unlock(0, &test_var), true);
6958bc32b34SMarco Elver KCSAN_EXPECT_RW_BARRIER(__clear_bit_unlock(0, &test_var), true);
6968bc32b34SMarco Elver KCSAN_EXPECT_RW_BARRIER(arch_spin_lock(&arch_spinlock), false);
6978bc32b34SMarco Elver KCSAN_EXPECT_RW_BARRIER(arch_spin_unlock(&arch_spinlock), true);
698a70d36e6SMarco Elver KCSAN_EXPECT_RW_BARRIER(spin_lock(&test_spinlock), false);
699a70d36e6SMarco Elver KCSAN_EXPECT_RW_BARRIER(spin_unlock(&test_spinlock), true);
700a70d36e6SMarco Elver KCSAN_EXPECT_RW_BARRIER(mutex_lock(&test_mutex), false);
701a70d36e6SMarco Elver KCSAN_EXPECT_RW_BARRIER(mutex_unlock(&test_mutex), true);
7028bc32b34SMarco Elver
703b473a389SMarco Elver #ifdef clear_bit_unlock_is_negative_byte
704b473a389SMarco Elver KCSAN_EXPECT_READ_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var), true);
705b473a389SMarco Elver KCSAN_EXPECT_WRITE_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var), true);
706b473a389SMarco Elver KCSAN_EXPECT_RW_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var), true);
707b473a389SMarco Elver #endif
7088bc32b34SMarco Elver kcsan_nestable_atomic_end();
7098bc32b34SMarco Elver }
7108bc32b34SMarco Elver
711a146fed5SMarco Elver /* Simple test with normal data race. */
712a146fed5SMarco Elver __no_kcsan
test_basic(struct kunit * test)713a146fed5SMarco Elver static void test_basic(struct kunit *test)
714a146fed5SMarco Elver {
7157310bd1fSMarco Elver struct expect_report expect = {
716a146fed5SMarco Elver .access = {
717a146fed5SMarco Elver { test_kernel_write, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
718a146fed5SMarco Elver { test_kernel_read, &test_var, sizeof(test_var), 0 },
719a146fed5SMarco Elver },
720a146fed5SMarco Elver };
7217310bd1fSMarco Elver struct expect_report never = {
722a146fed5SMarco Elver .access = {
723a146fed5SMarco Elver { test_kernel_read, &test_var, sizeof(test_var), 0 },
724a146fed5SMarco Elver { test_kernel_read, &test_var, sizeof(test_var), 0 },
725a146fed5SMarco Elver },
726a146fed5SMarco Elver };
727a146fed5SMarco Elver bool match_expect = false;
728a146fed5SMarco Elver bool match_never = false;
729a146fed5SMarco Elver
730a146fed5SMarco Elver begin_test_checks(test_kernel_write, test_kernel_read);
731a146fed5SMarco Elver do {
732a146fed5SMarco Elver match_expect |= report_matches(&expect);
733a146fed5SMarco Elver match_never = report_matches(&never);
734a146fed5SMarco Elver } while (!end_test_checks(match_never));
735a146fed5SMarco Elver KUNIT_EXPECT_TRUE(test, match_expect);
736a146fed5SMarco Elver KUNIT_EXPECT_FALSE(test, match_never);
737a146fed5SMarco Elver }
738a146fed5SMarco Elver
739a146fed5SMarco Elver /*
740a146fed5SMarco Elver * Stress KCSAN with lots of concurrent races on different addresses until
741a146fed5SMarco Elver * timeout.
742a146fed5SMarco Elver */
743a146fed5SMarco Elver __no_kcsan
test_concurrent_races(struct kunit * test)744a146fed5SMarco Elver static void test_concurrent_races(struct kunit *test)
745a146fed5SMarco Elver {
7467310bd1fSMarco Elver struct expect_report expect = {
747a146fed5SMarco Elver .access = {
748a146fed5SMarco Elver /* NULL will match any address. */
749a146fed5SMarco Elver { test_kernel_rmw_array, NULL, 0, __KCSAN_ACCESS_RW(KCSAN_ACCESS_WRITE) },
750a146fed5SMarco Elver { test_kernel_rmw_array, NULL, 0, __KCSAN_ACCESS_RW(0) },
751a146fed5SMarco Elver },
752a146fed5SMarco Elver };
7537310bd1fSMarco Elver struct expect_report never = {
754a146fed5SMarco Elver .access = {
755a146fed5SMarco Elver { test_kernel_rmw_array, NULL, 0, 0 },
756a146fed5SMarco Elver { test_kernel_rmw_array, NULL, 0, 0 },
757a146fed5SMarco Elver },
758a146fed5SMarco Elver };
759a146fed5SMarco Elver bool match_expect = false;
760a146fed5SMarco Elver bool match_never = false;
761a146fed5SMarco Elver
762a146fed5SMarco Elver begin_test_checks(test_kernel_rmw_array, test_kernel_rmw_array);
763a146fed5SMarco Elver do {
764a146fed5SMarco Elver match_expect |= report_matches(&expect);
765a146fed5SMarco Elver match_never |= report_matches(&never);
766a146fed5SMarco Elver } while (!end_test_checks(false));
767a146fed5SMarco Elver KUNIT_EXPECT_TRUE(test, match_expect); /* Sanity check matches exist. */
768a146fed5SMarco Elver KUNIT_EXPECT_FALSE(test, match_never);
769a146fed5SMarco Elver }
770a146fed5SMarco Elver
771a146fed5SMarco Elver /* Test the KCSAN_REPORT_VALUE_CHANGE_ONLY option. */
772a146fed5SMarco Elver __no_kcsan
test_novalue_change(struct kunit * test)773a146fed5SMarco Elver static void test_novalue_change(struct kunit *test)
774a146fed5SMarco Elver {
7757310bd1fSMarco Elver struct expect_report expect_rw = {
776a146fed5SMarco Elver .access = {
777a146fed5SMarco Elver { test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
778a146fed5SMarco Elver { test_kernel_read, &test_var, sizeof(test_var), 0 },
779a146fed5SMarco Elver },
780a146fed5SMarco Elver };
7817310bd1fSMarco Elver struct expect_report expect_ww = {
782ade3a58bSMarco Elver .access = {
783ade3a58bSMarco Elver { test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
784ade3a58bSMarco Elver { test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
785ade3a58bSMarco Elver },
786ade3a58bSMarco Elver };
787a146fed5SMarco Elver bool match_expect = false;
788a146fed5SMarco Elver
789ade3a58bSMarco Elver test_kernel_write_nochange(); /* Reset value. */
790a146fed5SMarco Elver begin_test_checks(test_kernel_write_nochange, test_kernel_read);
791a146fed5SMarco Elver do {
792ade3a58bSMarco Elver match_expect = report_matches(&expect_rw) || report_matches(&expect_ww);
793a146fed5SMarco Elver } while (!end_test_checks(match_expect));
794a146fed5SMarco Elver if (IS_ENABLED(CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY))
795a146fed5SMarco Elver KUNIT_EXPECT_FALSE(test, match_expect);
796a146fed5SMarco Elver else
797a146fed5SMarco Elver KUNIT_EXPECT_TRUE(test, match_expect);
798a146fed5SMarco Elver }
799a146fed5SMarco Elver
800a146fed5SMarco Elver /*
801a146fed5SMarco Elver * Test that the rules where the KCSAN_REPORT_VALUE_CHANGE_ONLY option should
802a146fed5SMarco Elver * never apply work.
803a146fed5SMarco Elver */
804a146fed5SMarco Elver __no_kcsan
test_novalue_change_exception(struct kunit * test)805a146fed5SMarco Elver static void test_novalue_change_exception(struct kunit *test)
806a146fed5SMarco Elver {
8077310bd1fSMarco Elver struct expect_report expect_rw = {
808a146fed5SMarco Elver .access = {
809a146fed5SMarco Elver { test_kernel_write_nochange_rcu, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
810a146fed5SMarco Elver { test_kernel_read, &test_var, sizeof(test_var), 0 },
811a146fed5SMarco Elver },
812a146fed5SMarco Elver };
8137310bd1fSMarco Elver struct expect_report expect_ww = {
814ade3a58bSMarco Elver .access = {
815ade3a58bSMarco Elver { test_kernel_write_nochange_rcu, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
816ade3a58bSMarco Elver { test_kernel_write_nochange_rcu, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
817ade3a58bSMarco Elver },
818ade3a58bSMarco Elver };
819a146fed5SMarco Elver bool match_expect = false;
820a146fed5SMarco Elver
821ade3a58bSMarco Elver test_kernel_write_nochange_rcu(); /* Reset value. */
822a146fed5SMarco Elver begin_test_checks(test_kernel_write_nochange_rcu, test_kernel_read);
823a146fed5SMarco Elver do {
824ade3a58bSMarco Elver match_expect = report_matches(&expect_rw) || report_matches(&expect_ww);
825a146fed5SMarco Elver } while (!end_test_checks(match_expect));
826a146fed5SMarco Elver KUNIT_EXPECT_TRUE(test, match_expect);
827a146fed5SMarco Elver }
828a146fed5SMarco Elver
829a146fed5SMarco Elver /* Test that data races of unknown origin are reported. */
830a146fed5SMarco Elver __no_kcsan
test_unknown_origin(struct kunit * test)831a146fed5SMarco Elver static void test_unknown_origin(struct kunit *test)
832a146fed5SMarco Elver {
8337310bd1fSMarco Elver struct expect_report expect = {
834a146fed5SMarco Elver .access = {
835a146fed5SMarco Elver { test_kernel_read, &test_var, sizeof(test_var), 0 },
836a146fed5SMarco Elver { NULL },
837a146fed5SMarco Elver },
838a146fed5SMarco Elver };
839a146fed5SMarco Elver bool match_expect = false;
840a146fed5SMarco Elver
841a146fed5SMarco Elver begin_test_checks(test_kernel_write_uninstrumented, test_kernel_read);
842a146fed5SMarco Elver do {
843a146fed5SMarco Elver match_expect = report_matches(&expect);
844a146fed5SMarco Elver } while (!end_test_checks(match_expect));
845a146fed5SMarco Elver if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN))
846a146fed5SMarco Elver KUNIT_EXPECT_TRUE(test, match_expect);
847a146fed5SMarco Elver else
848a146fed5SMarco Elver KUNIT_EXPECT_FALSE(test, match_expect);
849a146fed5SMarco Elver }
850a146fed5SMarco Elver
851a146fed5SMarco Elver /* Test KCSAN_ASSUME_PLAIN_WRITES_ATOMIC if it is selected. */
852a146fed5SMarco Elver __no_kcsan
test_write_write_assume_atomic(struct kunit * test)853a146fed5SMarco Elver static void test_write_write_assume_atomic(struct kunit *test)
854a146fed5SMarco Elver {
8557310bd1fSMarco Elver struct expect_report expect = {
856a146fed5SMarco Elver .access = {
857a146fed5SMarco Elver { test_kernel_write, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
858a146fed5SMarco Elver { test_kernel_write, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
859a146fed5SMarco Elver },
860a146fed5SMarco Elver };
861a146fed5SMarco Elver bool match_expect = false;
862a146fed5SMarco Elver
863a146fed5SMarco Elver begin_test_checks(test_kernel_write, test_kernel_write);
864a146fed5SMarco Elver do {
865a146fed5SMarco Elver sink_value(READ_ONCE(test_var)); /* induce value-change */
866a146fed5SMarco Elver match_expect = report_matches(&expect);
867a146fed5SMarco Elver } while (!end_test_checks(match_expect));
868a146fed5SMarco Elver if (IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC))
869a146fed5SMarco Elver KUNIT_EXPECT_FALSE(test, match_expect);
870a146fed5SMarco Elver else
871a146fed5SMarco Elver KUNIT_EXPECT_TRUE(test, match_expect);
872a146fed5SMarco Elver }
873a146fed5SMarco Elver
874a146fed5SMarco Elver /*
875a146fed5SMarco Elver * Test that data races with writes larger than word-size are always reported,
876a146fed5SMarco Elver * even if KCSAN_ASSUME_PLAIN_WRITES_ATOMIC is selected.
877a146fed5SMarco Elver */
878a146fed5SMarco Elver __no_kcsan
test_write_write_struct(struct kunit * test)879a146fed5SMarco Elver static void test_write_write_struct(struct kunit *test)
880a146fed5SMarco Elver {
8817310bd1fSMarco Elver struct expect_report expect = {
882a146fed5SMarco Elver .access = {
883a146fed5SMarco Elver { test_kernel_write_struct, &test_struct, sizeof(test_struct), KCSAN_ACCESS_WRITE },
884a146fed5SMarco Elver { test_kernel_write_struct, &test_struct, sizeof(test_struct), KCSAN_ACCESS_WRITE },
885a146fed5SMarco Elver },
886a146fed5SMarco Elver };
887a146fed5SMarco Elver bool match_expect = false;
888a146fed5SMarco Elver
889a146fed5SMarco Elver begin_test_checks(test_kernel_write_struct, test_kernel_write_struct);
890a146fed5SMarco Elver do {
891a146fed5SMarco Elver match_expect = report_matches(&expect);
892a146fed5SMarco Elver } while (!end_test_checks(match_expect));
893a146fed5SMarco Elver KUNIT_EXPECT_TRUE(test, match_expect);
894a146fed5SMarco Elver }
895a146fed5SMarco Elver
896a146fed5SMarco Elver /*
897a146fed5SMarco Elver * Test that data races where only one write is larger than word-size are always
898a146fed5SMarco Elver * reported, even if KCSAN_ASSUME_PLAIN_WRITES_ATOMIC is selected.
899a146fed5SMarco Elver */
900a146fed5SMarco Elver __no_kcsan
test_write_write_struct_part(struct kunit * test)901a146fed5SMarco Elver static void test_write_write_struct_part(struct kunit *test)
902a146fed5SMarco Elver {
9037310bd1fSMarco Elver struct expect_report expect = {
904a146fed5SMarco Elver .access = {
905a146fed5SMarco Elver { test_kernel_write_struct, &test_struct, sizeof(test_struct), KCSAN_ACCESS_WRITE },
906a146fed5SMarco Elver { test_kernel_write_struct_part, &test_struct.val[3], sizeof(test_struct.val[3]), KCSAN_ACCESS_WRITE },
907a146fed5SMarco Elver },
908a146fed5SMarco Elver };
909a146fed5SMarco Elver bool match_expect = false;
910a146fed5SMarco Elver
911a146fed5SMarco Elver begin_test_checks(test_kernel_write_struct, test_kernel_write_struct_part);
912a146fed5SMarco Elver do {
913a146fed5SMarco Elver match_expect = report_matches(&expect);
914a146fed5SMarco Elver } while (!end_test_checks(match_expect));
915a146fed5SMarco Elver KUNIT_EXPECT_TRUE(test, match_expect);
916a146fed5SMarco Elver }
917a146fed5SMarco Elver
918a146fed5SMarco Elver /* Test that races with atomic accesses never result in reports. */
919a146fed5SMarco Elver __no_kcsan
test_read_atomic_write_atomic(struct kunit * test)920a146fed5SMarco Elver static void test_read_atomic_write_atomic(struct kunit *test)
921a146fed5SMarco Elver {
922a146fed5SMarco Elver bool match_never = false;
923a146fed5SMarco Elver
924a146fed5SMarco Elver begin_test_checks(test_kernel_read_atomic, test_kernel_write_atomic);
925a146fed5SMarco Elver do {
926a146fed5SMarco Elver match_never = report_available();
927a146fed5SMarco Elver } while (!end_test_checks(match_never));
928a146fed5SMarco Elver KUNIT_EXPECT_FALSE(test, match_never);
929a146fed5SMarco Elver }
930a146fed5SMarco Elver
931a146fed5SMarco Elver /* Test that a race with an atomic and plain access result in reports. */
932a146fed5SMarco Elver __no_kcsan
test_read_plain_atomic_write(struct kunit * test)933a146fed5SMarco Elver static void test_read_plain_atomic_write(struct kunit *test)
934a146fed5SMarco Elver {
9357310bd1fSMarco Elver struct expect_report expect = {
936a146fed5SMarco Elver .access = {
937a146fed5SMarco Elver { test_kernel_read, &test_var, sizeof(test_var), 0 },
938a146fed5SMarco Elver { test_kernel_write_atomic, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC },
939a146fed5SMarco Elver },
940a146fed5SMarco Elver };
941a146fed5SMarco Elver bool match_expect = false;
942a146fed5SMarco Elver
94380804284SMarco Elver KCSAN_TEST_REQUIRES(test, !IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS));
944a146fed5SMarco Elver
945a146fed5SMarco Elver begin_test_checks(test_kernel_read, test_kernel_write_atomic);
946a146fed5SMarco Elver do {
947a146fed5SMarco Elver match_expect = report_matches(&expect);
948a146fed5SMarco Elver } while (!end_test_checks(match_expect));
949a146fed5SMarco Elver KUNIT_EXPECT_TRUE(test, match_expect);
950a146fed5SMarco Elver }
951a146fed5SMarco Elver
952a146fed5SMarco Elver /* Test that atomic RMWs generate correct report. */
953a146fed5SMarco Elver __no_kcsan
test_read_plain_atomic_rmw(struct kunit * test)954a146fed5SMarco Elver static void test_read_plain_atomic_rmw(struct kunit *test)
955a146fed5SMarco Elver {
9567310bd1fSMarco Elver struct expect_report expect = {
957a146fed5SMarco Elver .access = {
958a146fed5SMarco Elver { test_kernel_read, &test_var, sizeof(test_var), 0 },
959a146fed5SMarco Elver { test_kernel_atomic_rmw, &test_var, sizeof(test_var),
960a146fed5SMarco Elver KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC },
961a146fed5SMarco Elver },
962a146fed5SMarco Elver };
963a146fed5SMarco Elver bool match_expect = false;
964a146fed5SMarco Elver
96580804284SMarco Elver KCSAN_TEST_REQUIRES(test, !IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS));
966a146fed5SMarco Elver
967a146fed5SMarco Elver begin_test_checks(test_kernel_read, test_kernel_atomic_rmw);
968a146fed5SMarco Elver do {
969a146fed5SMarco Elver match_expect = report_matches(&expect);
970a146fed5SMarco Elver } while (!end_test_checks(match_expect));
971a146fed5SMarco Elver KUNIT_EXPECT_TRUE(test, match_expect);
972a146fed5SMarco Elver }
973a146fed5SMarco Elver
974a146fed5SMarco Elver /* Zero-sized accesses should never cause data race reports. */
975a146fed5SMarco Elver __no_kcsan
test_zero_size_access(struct kunit * test)976a146fed5SMarco Elver static void test_zero_size_access(struct kunit *test)
977a146fed5SMarco Elver {
9787310bd1fSMarco Elver struct expect_report expect = {
979a146fed5SMarco Elver .access = {
980a146fed5SMarco Elver { test_kernel_write_struct, &test_struct, sizeof(test_struct), KCSAN_ACCESS_WRITE },
981a146fed5SMarco Elver { test_kernel_write_struct, &test_struct, sizeof(test_struct), KCSAN_ACCESS_WRITE },
982a146fed5SMarco Elver },
983a146fed5SMarco Elver };
9847310bd1fSMarco Elver struct expect_report never = {
985a146fed5SMarco Elver .access = {
986a146fed5SMarco Elver { test_kernel_write_struct, &test_struct, sizeof(test_struct), KCSAN_ACCESS_WRITE },
987a146fed5SMarco Elver { test_kernel_read_struct_zero_size, &test_struct.val[3], 0, 0 },
988a146fed5SMarco Elver },
989a146fed5SMarco Elver };
990a146fed5SMarco Elver bool match_expect = false;
991a146fed5SMarco Elver bool match_never = false;
992a146fed5SMarco Elver
993a146fed5SMarco Elver begin_test_checks(test_kernel_write_struct, test_kernel_read_struct_zero_size);
994a146fed5SMarco Elver do {
995a146fed5SMarco Elver match_expect |= report_matches(&expect);
996a146fed5SMarco Elver match_never = report_matches(&never);
997a146fed5SMarco Elver } while (!end_test_checks(match_never));
998a146fed5SMarco Elver KUNIT_EXPECT_TRUE(test, match_expect); /* Sanity check. */
999a146fed5SMarco Elver KUNIT_EXPECT_FALSE(test, match_never);
1000a146fed5SMarco Elver }
1001a146fed5SMarco Elver
1002a146fed5SMarco Elver /* Test the data_race() macro. */
1003a146fed5SMarco Elver __no_kcsan
test_data_race(struct kunit * test)1004a146fed5SMarco Elver static void test_data_race(struct kunit *test)
1005a146fed5SMarco Elver {
1006a146fed5SMarco Elver bool match_never = false;
1007a146fed5SMarco Elver
1008a146fed5SMarco Elver begin_test_checks(test_kernel_data_race, test_kernel_data_race);
1009a146fed5SMarco Elver do {
1010a146fed5SMarco Elver match_never = report_available();
1011a146fed5SMarco Elver } while (!end_test_checks(match_never));
1012a146fed5SMarco Elver KUNIT_EXPECT_FALSE(test, match_never);
1013a146fed5SMarco Elver }
1014a146fed5SMarco Elver
1015a146fed5SMarco Elver __no_kcsan
test_assert_exclusive_writer(struct kunit * test)1016a146fed5SMarco Elver static void test_assert_exclusive_writer(struct kunit *test)
1017a146fed5SMarco Elver {
10187310bd1fSMarco Elver struct expect_report expect = {
1019a146fed5SMarco Elver .access = {
1020a146fed5SMarco Elver { test_kernel_assert_writer, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT },
1021a146fed5SMarco Elver { test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
1022a146fed5SMarco Elver },
1023a146fed5SMarco Elver };
1024a146fed5SMarco Elver bool match_expect = false;
1025a146fed5SMarco Elver
1026a146fed5SMarco Elver begin_test_checks(test_kernel_assert_writer, test_kernel_write_nochange);
1027a146fed5SMarco Elver do {
1028a146fed5SMarco Elver match_expect = report_matches(&expect);
1029a146fed5SMarco Elver } while (!end_test_checks(match_expect));
1030a146fed5SMarco Elver KUNIT_EXPECT_TRUE(test, match_expect);
1031a146fed5SMarco Elver }
1032a146fed5SMarco Elver
1033a146fed5SMarco Elver __no_kcsan
test_assert_exclusive_access(struct kunit * test)1034a146fed5SMarco Elver static void test_assert_exclusive_access(struct kunit *test)
1035a146fed5SMarco Elver {
10367310bd1fSMarco Elver struct expect_report expect = {
1037a146fed5SMarco Elver .access = {
1038a146fed5SMarco Elver { test_kernel_assert_access, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE },
1039a146fed5SMarco Elver { test_kernel_read, &test_var, sizeof(test_var), 0 },
1040a146fed5SMarco Elver },
1041a146fed5SMarco Elver };
1042a146fed5SMarco Elver bool match_expect = false;
1043a146fed5SMarco Elver
1044a146fed5SMarco Elver begin_test_checks(test_kernel_assert_access, test_kernel_read);
1045a146fed5SMarco Elver do {
1046a146fed5SMarco Elver match_expect = report_matches(&expect);
1047a146fed5SMarco Elver } while (!end_test_checks(match_expect));
1048a146fed5SMarco Elver KUNIT_EXPECT_TRUE(test, match_expect);
1049a146fed5SMarco Elver }
1050a146fed5SMarco Elver
1051a146fed5SMarco Elver __no_kcsan
test_assert_exclusive_access_writer(struct kunit * test)1052a146fed5SMarco Elver static void test_assert_exclusive_access_writer(struct kunit *test)
1053a146fed5SMarco Elver {
10547310bd1fSMarco Elver struct expect_report expect_access_writer = {
1055a146fed5SMarco Elver .access = {
1056a146fed5SMarco Elver { test_kernel_assert_access, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE },
1057a146fed5SMarco Elver { test_kernel_assert_writer, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT },
1058a146fed5SMarco Elver },
1059a146fed5SMarco Elver };
10607310bd1fSMarco Elver struct expect_report expect_access_access = {
1061a146fed5SMarco Elver .access = {
1062a146fed5SMarco Elver { test_kernel_assert_access, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE },
1063a146fed5SMarco Elver { test_kernel_assert_access, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE },
1064a146fed5SMarco Elver },
1065a146fed5SMarco Elver };
10667310bd1fSMarco Elver struct expect_report never = {
1067a146fed5SMarco Elver .access = {
1068a146fed5SMarco Elver { test_kernel_assert_writer, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT },
1069a146fed5SMarco Elver { test_kernel_assert_writer, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT },
1070a146fed5SMarco Elver },
1071a146fed5SMarco Elver };
1072a146fed5SMarco Elver bool match_expect_access_writer = false;
1073a146fed5SMarco Elver bool match_expect_access_access = false;
1074a146fed5SMarco Elver bool match_never = false;
1075a146fed5SMarco Elver
1076a146fed5SMarco Elver begin_test_checks(test_kernel_assert_access, test_kernel_assert_writer);
1077a146fed5SMarco Elver do {
1078a146fed5SMarco Elver match_expect_access_writer |= report_matches(&expect_access_writer);
1079a146fed5SMarco Elver match_expect_access_access |= report_matches(&expect_access_access);
1080a146fed5SMarco Elver match_never |= report_matches(&never);
1081a146fed5SMarco Elver } while (!end_test_checks(match_never));
1082a146fed5SMarco Elver KUNIT_EXPECT_TRUE(test, match_expect_access_writer);
1083a146fed5SMarco Elver KUNIT_EXPECT_TRUE(test, match_expect_access_access);
1084a146fed5SMarco Elver KUNIT_EXPECT_FALSE(test, match_never);
1085a146fed5SMarco Elver }
1086a146fed5SMarco Elver
1087a146fed5SMarco Elver __no_kcsan
test_assert_exclusive_bits_change(struct kunit * test)1088a146fed5SMarco Elver static void test_assert_exclusive_bits_change(struct kunit *test)
1089a146fed5SMarco Elver {
10907310bd1fSMarco Elver struct expect_report expect = {
1091a146fed5SMarco Elver .access = {
1092a146fed5SMarco Elver { test_kernel_assert_bits_change, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT },
1093a146fed5SMarco Elver { test_kernel_change_bits, &test_var, sizeof(test_var),
1094a146fed5SMarco Elver KCSAN_ACCESS_WRITE | (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS) ? 0 : KCSAN_ACCESS_ATOMIC) },
1095a146fed5SMarco Elver },
1096a146fed5SMarco Elver };
1097a146fed5SMarco Elver bool match_expect = false;
1098a146fed5SMarco Elver
1099a146fed5SMarco Elver begin_test_checks(test_kernel_assert_bits_change, test_kernel_change_bits);
1100a146fed5SMarco Elver do {
1101a146fed5SMarco Elver match_expect = report_matches(&expect);
1102a146fed5SMarco Elver } while (!end_test_checks(match_expect));
1103a146fed5SMarco Elver KUNIT_EXPECT_TRUE(test, match_expect);
1104a146fed5SMarco Elver }
1105a146fed5SMarco Elver
1106a146fed5SMarco Elver __no_kcsan
test_assert_exclusive_bits_nochange(struct kunit * test)1107a146fed5SMarco Elver static void test_assert_exclusive_bits_nochange(struct kunit *test)
1108a146fed5SMarco Elver {
1109a146fed5SMarco Elver bool match_never = false;
1110a146fed5SMarco Elver
1111a146fed5SMarco Elver begin_test_checks(test_kernel_assert_bits_nochange, test_kernel_change_bits);
1112a146fed5SMarco Elver do {
1113a146fed5SMarco Elver match_never = report_available();
1114a146fed5SMarco Elver } while (!end_test_checks(match_never));
1115a146fed5SMarco Elver KUNIT_EXPECT_FALSE(test, match_never);
1116a146fed5SMarco Elver }
1117a146fed5SMarco Elver
1118a146fed5SMarco Elver __no_kcsan
test_assert_exclusive_writer_scoped(struct kunit * test)1119a146fed5SMarco Elver static void test_assert_exclusive_writer_scoped(struct kunit *test)
1120a146fed5SMarco Elver {
11217310bd1fSMarco Elver struct expect_report expect_start = {
1122a146fed5SMarco Elver .access = {
1123a146fed5SMarco Elver { test_kernel_assert_writer_scoped, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_SCOPED },
1124a146fed5SMarco Elver { test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
1125a146fed5SMarco Elver },
1126a146fed5SMarco Elver };
11277310bd1fSMarco Elver struct expect_report expect_inscope = {
1128a146fed5SMarco Elver .access = {
1129a146fed5SMarco Elver { test_enter_scope, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_SCOPED },
1130a146fed5SMarco Elver { test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
1131a146fed5SMarco Elver },
1132a146fed5SMarco Elver };
1133a146fed5SMarco Elver bool match_expect_start = false;
11346c65eb75SMarco Elver bool match_expect_inscope = false;
1135a146fed5SMarco Elver
1136a146fed5SMarco Elver begin_test_checks(test_kernel_assert_writer_scoped, test_kernel_write_nochange);
1137a146fed5SMarco Elver do {
1138a146fed5SMarco Elver match_expect_start |= report_matches(&expect_start);
11396c65eb75SMarco Elver match_expect_inscope |= report_matches(&expect_inscope);
11406c65eb75SMarco Elver } while (!end_test_checks(match_expect_inscope));
1141a146fed5SMarco Elver KUNIT_EXPECT_TRUE(test, match_expect_start);
11426c65eb75SMarco Elver KUNIT_EXPECT_FALSE(test, match_expect_inscope);
1143a146fed5SMarco Elver }
1144a146fed5SMarco Elver
1145a146fed5SMarco Elver __no_kcsan
test_assert_exclusive_access_scoped(struct kunit * test)1146a146fed5SMarco Elver static void test_assert_exclusive_access_scoped(struct kunit *test)
1147a146fed5SMarco Elver {
11487310bd1fSMarco Elver struct expect_report expect_start1 = {
1149a146fed5SMarco Elver .access = {
1150a146fed5SMarco Elver { test_kernel_assert_access_scoped, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_SCOPED },
1151a146fed5SMarco Elver { test_kernel_read, &test_var, sizeof(test_var), 0 },
1152a146fed5SMarco Elver },
1153a146fed5SMarco Elver };
11547310bd1fSMarco Elver struct expect_report expect_start2 = {
1155a146fed5SMarco Elver .access = { expect_start1.access[0], expect_start1.access[0] },
1156a146fed5SMarco Elver };
11577310bd1fSMarco Elver struct expect_report expect_inscope = {
1158a146fed5SMarco Elver .access = {
1159a146fed5SMarco Elver { test_enter_scope, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_SCOPED },
1160a146fed5SMarco Elver { test_kernel_read, &test_var, sizeof(test_var), 0 },
1161a146fed5SMarco Elver },
1162a146fed5SMarco Elver };
1163a146fed5SMarco Elver bool match_expect_start = false;
1164a146fed5SMarco Elver bool match_expect_inscope = false;
1165a146fed5SMarco Elver
1166a146fed5SMarco Elver begin_test_checks(test_kernel_assert_access_scoped, test_kernel_read);
1167a146fed5SMarco Elver end_time += msecs_to_jiffies(1000); /* This test requires a bit more time. */
1168a146fed5SMarco Elver do {
1169a146fed5SMarco Elver match_expect_start |= report_matches(&expect_start1) || report_matches(&expect_start2);
1170a146fed5SMarco Elver match_expect_inscope |= report_matches(&expect_inscope);
11716c65eb75SMarco Elver } while (!end_test_checks(match_expect_inscope));
1172a146fed5SMarco Elver KUNIT_EXPECT_TRUE(test, match_expect_start);
11736c65eb75SMarco Elver KUNIT_EXPECT_FALSE(test, match_expect_inscope);
1174a146fed5SMarco Elver }
1175a146fed5SMarco Elver
1176a146fed5SMarco Elver /*
1177a146fed5SMarco Elver * jiffies is special (declared to be volatile) and its accesses are typically
1178a146fed5SMarco Elver * not marked; this test ensures that the compiler nor KCSAN gets confused about
1179a146fed5SMarco Elver * jiffies's declaration on different architectures.
1180a146fed5SMarco Elver */
1181a146fed5SMarco Elver __no_kcsan
test_jiffies_noreport(struct kunit * test)1182a146fed5SMarco Elver static void test_jiffies_noreport(struct kunit *test)
1183a146fed5SMarco Elver {
1184a146fed5SMarco Elver bool match_never = false;
1185a146fed5SMarco Elver
1186a146fed5SMarco Elver begin_test_checks(test_kernel_jiffies_reader, test_kernel_jiffies_reader);
1187a146fed5SMarco Elver do {
1188a146fed5SMarco Elver match_never = report_available();
1189a146fed5SMarco Elver } while (!end_test_checks(match_never));
1190a146fed5SMarco Elver KUNIT_EXPECT_FALSE(test, match_never);
1191a146fed5SMarco Elver }
1192a146fed5SMarco Elver
1193a146fed5SMarco Elver /* Test that racing accesses in seqlock critical sections are not reported. */
1194a146fed5SMarco Elver __no_kcsan
test_seqlock_noreport(struct kunit * test)1195a146fed5SMarco Elver static void test_seqlock_noreport(struct kunit *test)
1196a146fed5SMarco Elver {
1197a146fed5SMarco Elver bool match_never = false;
1198a146fed5SMarco Elver
1199a146fed5SMarco Elver begin_test_checks(test_kernel_seqlock_reader, test_kernel_seqlock_writer);
1200a146fed5SMarco Elver do {
1201a146fed5SMarco Elver match_never = report_available();
1202a146fed5SMarco Elver } while (!end_test_checks(match_never));
1203a146fed5SMarco Elver KUNIT_EXPECT_FALSE(test, match_never);
1204a146fed5SMarco Elver }
1205a146fed5SMarco Elver
1206a146fed5SMarco Elver /*
1207a146fed5SMarco Elver * Test atomic builtins work and required instrumentation functions exist. We
1208a146fed5SMarco Elver * also test that KCSAN understands they're atomic by racing with them via
1209a146fed5SMarco Elver * test_kernel_atomic_builtins(), and expect no reports.
1210a146fed5SMarco Elver *
1211a146fed5SMarco Elver * The atomic builtins _SHOULD NOT_ be used in normal kernel code!
1212a146fed5SMarco Elver */
test_atomic_builtins(struct kunit * test)1213a146fed5SMarco Elver static void test_atomic_builtins(struct kunit *test)
1214a146fed5SMarco Elver {
1215a146fed5SMarco Elver bool match_never = false;
1216a146fed5SMarco Elver
1217a146fed5SMarco Elver begin_test_checks(test_kernel_atomic_builtins, test_kernel_atomic_builtins);
1218a146fed5SMarco Elver do {
1219a146fed5SMarco Elver long tmp;
1220a146fed5SMarco Elver
1221a146fed5SMarco Elver kcsan_enable_current();
1222a146fed5SMarco Elver
1223a146fed5SMarco Elver __atomic_store_n(&test_var, 42L, __ATOMIC_RELAXED);
1224a146fed5SMarco Elver KUNIT_EXPECT_EQ(test, 42L, __atomic_load_n(&test_var, __ATOMIC_RELAXED));
1225a146fed5SMarco Elver
1226a146fed5SMarco Elver KUNIT_EXPECT_EQ(test, 42L, __atomic_exchange_n(&test_var, 20, __ATOMIC_RELAXED));
1227a146fed5SMarco Elver KUNIT_EXPECT_EQ(test, 20L, test_var);
1228a146fed5SMarco Elver
1229a146fed5SMarco Elver tmp = 20L;
1230a146fed5SMarco Elver KUNIT_EXPECT_TRUE(test, __atomic_compare_exchange_n(&test_var, &tmp, 30L,
1231a146fed5SMarco Elver 0, __ATOMIC_RELAXED,
1232a146fed5SMarco Elver __ATOMIC_RELAXED));
1233a146fed5SMarco Elver KUNIT_EXPECT_EQ(test, tmp, 20L);
1234a146fed5SMarco Elver KUNIT_EXPECT_EQ(test, test_var, 30L);
1235a146fed5SMarco Elver KUNIT_EXPECT_FALSE(test, __atomic_compare_exchange_n(&test_var, &tmp, 40L,
1236a146fed5SMarco Elver 1, __ATOMIC_RELAXED,
1237a146fed5SMarco Elver __ATOMIC_RELAXED));
1238a146fed5SMarco Elver KUNIT_EXPECT_EQ(test, tmp, 30L);
1239a146fed5SMarco Elver KUNIT_EXPECT_EQ(test, test_var, 30L);
1240a146fed5SMarco Elver
1241a146fed5SMarco Elver KUNIT_EXPECT_EQ(test, 30L, __atomic_fetch_add(&test_var, 1, __ATOMIC_RELAXED));
1242a146fed5SMarco Elver KUNIT_EXPECT_EQ(test, 31L, __atomic_fetch_sub(&test_var, 1, __ATOMIC_RELAXED));
1243a146fed5SMarco Elver KUNIT_EXPECT_EQ(test, 30L, __atomic_fetch_and(&test_var, 0xf, __ATOMIC_RELAXED));
1244a146fed5SMarco Elver KUNIT_EXPECT_EQ(test, 14L, __atomic_fetch_xor(&test_var, 0xf, __ATOMIC_RELAXED));
1245a146fed5SMarco Elver KUNIT_EXPECT_EQ(test, 1L, __atomic_fetch_or(&test_var, 0xf0, __ATOMIC_RELAXED));
1246a146fed5SMarco Elver KUNIT_EXPECT_EQ(test, 241L, __atomic_fetch_nand(&test_var, 0xf, __ATOMIC_RELAXED));
1247a146fed5SMarco Elver KUNIT_EXPECT_EQ(test, -2L, test_var);
1248a146fed5SMarco Elver
1249a146fed5SMarco Elver __atomic_thread_fence(__ATOMIC_SEQ_CST);
1250a146fed5SMarco Elver __atomic_signal_fence(__ATOMIC_SEQ_CST);
1251a146fed5SMarco Elver
1252a146fed5SMarco Elver kcsan_disable_current();
1253a146fed5SMarco Elver
1254a146fed5SMarco Elver match_never = report_available();
1255a146fed5SMarco Elver } while (!end_test_checks(match_never));
1256a146fed5SMarco Elver KUNIT_EXPECT_FALSE(test, match_never);
1257a146fed5SMarco Elver }
1258a146fed5SMarco Elver
1259d8fd74d3SMarco Elver __no_kcsan
test_1bit_value_change(struct kunit * test)1260d8fd74d3SMarco Elver static void test_1bit_value_change(struct kunit *test)
1261d8fd74d3SMarco Elver {
12627310bd1fSMarco Elver struct expect_report expect = {
1263d8fd74d3SMarco Elver .access = {
1264d8fd74d3SMarco Elver { test_kernel_read, &test_var, sizeof(test_var), 0 },
1265d8fd74d3SMarco Elver { test_kernel_xor_1bit, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(KCSAN_ACCESS_WRITE) },
1266d8fd74d3SMarco Elver },
1267d8fd74d3SMarco Elver };
1268d8fd74d3SMarco Elver bool match = false;
1269d8fd74d3SMarco Elver
1270d8fd74d3SMarco Elver begin_test_checks(test_kernel_read, test_kernel_xor_1bit);
1271d8fd74d3SMarco Elver do {
1272d8fd74d3SMarco Elver match = IS_ENABLED(CONFIG_KCSAN_PERMISSIVE)
1273d8fd74d3SMarco Elver ? report_available()
1274d8fd74d3SMarco Elver : report_matches(&expect);
1275d8fd74d3SMarco Elver } while (!end_test_checks(match));
1276d8fd74d3SMarco Elver if (IS_ENABLED(CONFIG_KCSAN_PERMISSIVE))
1277d8fd74d3SMarco Elver KUNIT_EXPECT_FALSE(test, match);
1278d8fd74d3SMarco Elver else
1279d8fd74d3SMarco Elver KUNIT_EXPECT_TRUE(test, match);
1280d8fd74d3SMarco Elver }
1281d8fd74d3SMarco Elver
12828bc32b34SMarco Elver __no_kcsan
test_correct_barrier(struct kunit * test)12838bc32b34SMarco Elver static void test_correct_barrier(struct kunit *test)
12848bc32b34SMarco Elver {
12858bc32b34SMarco Elver struct expect_report expect = {
12868bc32b34SMarco Elver .access = {
12878bc32b34SMarco Elver { test_kernel_with_memorder, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(KCSAN_ACCESS_WRITE) },
12888bc32b34SMarco Elver { test_kernel_with_memorder, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(0) },
12898bc32b34SMarco Elver },
12908bc32b34SMarco Elver };
12918bc32b34SMarco Elver bool match_expect = false;
12928bc32b34SMarco Elver
12938bc32b34SMarco Elver test_struct.val[0] = 0; /* init unlocked */
12948bc32b34SMarco Elver begin_test_checks(test_kernel_with_memorder, test_kernel_with_memorder);
12958bc32b34SMarco Elver do {
12968bc32b34SMarco Elver match_expect = report_matches_any_reordered(&expect);
12978bc32b34SMarco Elver } while (!end_test_checks(match_expect));
12988bc32b34SMarco Elver KUNIT_EXPECT_FALSE(test, match_expect);
12998bc32b34SMarco Elver }
13008bc32b34SMarco Elver
13018bc32b34SMarco Elver __no_kcsan
test_missing_barrier(struct kunit * test)13028bc32b34SMarco Elver static void test_missing_barrier(struct kunit *test)
13038bc32b34SMarco Elver {
13048bc32b34SMarco Elver struct expect_report expect = {
13058bc32b34SMarco Elver .access = {
13068bc32b34SMarco Elver { test_kernel_wrong_memorder, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(KCSAN_ACCESS_WRITE) },
13078bc32b34SMarco Elver { test_kernel_wrong_memorder, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(0) },
13088bc32b34SMarco Elver },
13098bc32b34SMarco Elver };
13108bc32b34SMarco Elver bool match_expect = false;
13118bc32b34SMarco Elver
13128bc32b34SMarco Elver test_struct.val[0] = 0; /* init unlocked */
13138bc32b34SMarco Elver begin_test_checks(test_kernel_wrong_memorder, test_kernel_wrong_memorder);
13148bc32b34SMarco Elver do {
13158bc32b34SMarco Elver match_expect = report_matches_any_reordered(&expect);
13168bc32b34SMarco Elver } while (!end_test_checks(match_expect));
13178bc32b34SMarco Elver if (IS_ENABLED(CONFIG_KCSAN_WEAK_MEMORY))
13188bc32b34SMarco Elver KUNIT_EXPECT_TRUE(test, match_expect);
13198bc32b34SMarco Elver else
13208bc32b34SMarco Elver KUNIT_EXPECT_FALSE(test, match_expect);
13218bc32b34SMarco Elver }
13228bc32b34SMarco Elver
13238bc32b34SMarco Elver __no_kcsan
test_atomic_builtins_correct_barrier(struct kunit * test)13248bc32b34SMarco Elver static void test_atomic_builtins_correct_barrier(struct kunit *test)
13258bc32b34SMarco Elver {
13268bc32b34SMarco Elver struct expect_report expect = {
13278bc32b34SMarco Elver .access = {
13288bc32b34SMarco Elver { test_kernel_atomic_builtin_with_memorder, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(KCSAN_ACCESS_WRITE) },
13298bc32b34SMarco Elver { test_kernel_atomic_builtin_with_memorder, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(0) },
13308bc32b34SMarco Elver },
13318bc32b34SMarco Elver };
13328bc32b34SMarco Elver bool match_expect = false;
13338bc32b34SMarco Elver
13348bc32b34SMarco Elver test_struct.val[0] = 0; /* init unlocked */
13358bc32b34SMarco Elver begin_test_checks(test_kernel_atomic_builtin_with_memorder,
13368bc32b34SMarco Elver test_kernel_atomic_builtin_with_memorder);
13378bc32b34SMarco Elver do {
13388bc32b34SMarco Elver match_expect = report_matches_any_reordered(&expect);
13398bc32b34SMarco Elver } while (!end_test_checks(match_expect));
13408bc32b34SMarco Elver KUNIT_EXPECT_FALSE(test, match_expect);
13418bc32b34SMarco Elver }
13428bc32b34SMarco Elver
13438bc32b34SMarco Elver __no_kcsan
test_atomic_builtins_missing_barrier(struct kunit * test)13448bc32b34SMarco Elver static void test_atomic_builtins_missing_barrier(struct kunit *test)
13458bc32b34SMarco Elver {
13468bc32b34SMarco Elver struct expect_report expect = {
13478bc32b34SMarco Elver .access = {
13488bc32b34SMarco Elver { test_kernel_atomic_builtin_wrong_memorder, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(KCSAN_ACCESS_WRITE) },
13498bc32b34SMarco Elver { test_kernel_atomic_builtin_wrong_memorder, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(0) },
13508bc32b34SMarco Elver },
13518bc32b34SMarco Elver };
13528bc32b34SMarco Elver bool match_expect = false;
13538bc32b34SMarco Elver
13548bc32b34SMarco Elver test_struct.val[0] = 0; /* init unlocked */
13558bc32b34SMarco Elver begin_test_checks(test_kernel_atomic_builtin_wrong_memorder,
13568bc32b34SMarco Elver test_kernel_atomic_builtin_wrong_memorder);
13578bc32b34SMarco Elver do {
13588bc32b34SMarco Elver match_expect = report_matches_any_reordered(&expect);
13598bc32b34SMarco Elver } while (!end_test_checks(match_expect));
13608bc32b34SMarco Elver if (IS_ENABLED(CONFIG_KCSAN_WEAK_MEMORY))
13618bc32b34SMarco Elver KUNIT_EXPECT_TRUE(test, match_expect);
13628bc32b34SMarco Elver else
13638bc32b34SMarco Elver KUNIT_EXPECT_FALSE(test, match_expect);
13648bc32b34SMarco Elver }
13658bc32b34SMarco Elver
1366a146fed5SMarco Elver /*
1367f6a14914SMarco Elver * Generate thread counts for all test cases. Values generated are in interval
1368f6a14914SMarco Elver * [2, 5] followed by exponentially increasing thread counts from 8 to 32.
1369a146fed5SMarco Elver *
1370a146fed5SMarco Elver * The thread counts are chosen to cover potentially interesting boundaries and
1371f6a14914SMarco Elver * corner cases (2 to 5), and then stress the system with larger counts.
1372a146fed5SMarco Elver */
nthreads_gen_params(const void * prev,char * desc)1373f6a14914SMarco Elver static const void *nthreads_gen_params(const void *prev, char *desc)
1374f6a14914SMarco Elver {
1375f6a14914SMarco Elver long nthreads = (long)prev;
1376a146fed5SMarco Elver
1377f6a14914SMarco Elver if (nthreads < 0 || nthreads >= 32)
1378f6a14914SMarco Elver nthreads = 0; /* stop */
1379f6a14914SMarco Elver else if (!nthreads)
1380f6a14914SMarco Elver nthreads = 2; /* initial value */
1381f6a14914SMarco Elver else if (nthreads < 5)
1382f6a14914SMarco Elver nthreads++;
1383f6a14914SMarco Elver else if (nthreads == 5)
1384f6a14914SMarco Elver nthreads = 8;
1385f6a14914SMarco Elver else
1386f6a14914SMarco Elver nthreads *= 2;
1387f6a14914SMarco Elver
13885693fa74SValentin Schneider if (!preempt_model_preemptible() ||
13895693fa74SValentin Schneider !IS_ENABLED(CONFIG_KCSAN_INTERRUPT_WATCHER)) {
1390f6a14914SMarco Elver /*
1391f6a14914SMarco Elver * Without any preemption, keep 2 CPUs free for other tasks, one
1392f6a14914SMarco Elver * of which is the main test case function checking for
1393f6a14914SMarco Elver * completion or failure.
1394f6a14914SMarco Elver */
13955693fa74SValentin Schneider const long min_unused_cpus = preempt_model_none() ? 2 : 0;
1396f6a14914SMarco Elver const long min_required_cpus = 2 + min_unused_cpus;
1397f6a14914SMarco Elver
1398f6a14914SMarco Elver if (num_online_cpus() < min_required_cpus) {
1399f4abe996SArnd Bergmann pr_err_once("Too few online CPUs (%u < %ld) for test\n",
1400f6a14914SMarco Elver num_online_cpus(), min_required_cpus);
1401f6a14914SMarco Elver nthreads = 0;
1402f6a14914SMarco Elver } else if (nthreads >= num_online_cpus() - min_unused_cpus) {
1403f6a14914SMarco Elver /* Use negative value to indicate last param. */
1404f6a14914SMarco Elver nthreads = -(num_online_cpus() - min_unused_cpus);
1405f6a14914SMarco Elver pr_warn_once("Limiting number of threads to %ld (only %d online CPUs)\n",
1406f6a14914SMarco Elver -nthreads, num_online_cpus());
1407f6a14914SMarco Elver }
1408f6a14914SMarco Elver }
1409f6a14914SMarco Elver
1410f6a14914SMarco Elver snprintf(desc, KUNIT_PARAM_DESC_SIZE, "threads=%ld", abs(nthreads));
1411f6a14914SMarco Elver return (void *)nthreads;
1412f6a14914SMarco Elver }
1413f6a14914SMarco Elver
1414f6a14914SMarco Elver #define KCSAN_KUNIT_CASE(test_name) KUNIT_CASE_PARAM(test_name, nthreads_gen_params)
1415a146fed5SMarco Elver static struct kunit_case kcsan_test_cases[] = {
14168bc32b34SMarco Elver KUNIT_CASE(test_barrier_nothreads),
1417a146fed5SMarco Elver KCSAN_KUNIT_CASE(test_basic),
1418a146fed5SMarco Elver KCSAN_KUNIT_CASE(test_concurrent_races),
1419a146fed5SMarco Elver KCSAN_KUNIT_CASE(test_novalue_change),
1420a146fed5SMarco Elver KCSAN_KUNIT_CASE(test_novalue_change_exception),
1421a146fed5SMarco Elver KCSAN_KUNIT_CASE(test_unknown_origin),
1422a146fed5SMarco Elver KCSAN_KUNIT_CASE(test_write_write_assume_atomic),
1423a146fed5SMarco Elver KCSAN_KUNIT_CASE(test_write_write_struct),
1424a146fed5SMarco Elver KCSAN_KUNIT_CASE(test_write_write_struct_part),
1425a146fed5SMarco Elver KCSAN_KUNIT_CASE(test_read_atomic_write_atomic),
1426a146fed5SMarco Elver KCSAN_KUNIT_CASE(test_read_plain_atomic_write),
1427a146fed5SMarco Elver KCSAN_KUNIT_CASE(test_read_plain_atomic_rmw),
1428a146fed5SMarco Elver KCSAN_KUNIT_CASE(test_zero_size_access),
1429a146fed5SMarco Elver KCSAN_KUNIT_CASE(test_data_race),
1430a146fed5SMarco Elver KCSAN_KUNIT_CASE(test_assert_exclusive_writer),
1431a146fed5SMarco Elver KCSAN_KUNIT_CASE(test_assert_exclusive_access),
1432a146fed5SMarco Elver KCSAN_KUNIT_CASE(test_assert_exclusive_access_writer),
1433a146fed5SMarco Elver KCSAN_KUNIT_CASE(test_assert_exclusive_bits_change),
1434a146fed5SMarco Elver KCSAN_KUNIT_CASE(test_assert_exclusive_bits_nochange),
1435a146fed5SMarco Elver KCSAN_KUNIT_CASE(test_assert_exclusive_writer_scoped),
1436a146fed5SMarco Elver KCSAN_KUNIT_CASE(test_assert_exclusive_access_scoped),
1437a146fed5SMarco Elver KCSAN_KUNIT_CASE(test_jiffies_noreport),
1438a146fed5SMarco Elver KCSAN_KUNIT_CASE(test_seqlock_noreport),
1439a146fed5SMarco Elver KCSAN_KUNIT_CASE(test_atomic_builtins),
1440d8fd74d3SMarco Elver KCSAN_KUNIT_CASE(test_1bit_value_change),
14418bc32b34SMarco Elver KCSAN_KUNIT_CASE(test_correct_barrier),
14428bc32b34SMarco Elver KCSAN_KUNIT_CASE(test_missing_barrier),
14438bc32b34SMarco Elver KCSAN_KUNIT_CASE(test_atomic_builtins_correct_barrier),
14448bc32b34SMarco Elver KCSAN_KUNIT_CASE(test_atomic_builtins_missing_barrier),
1445a146fed5SMarco Elver {},
1446a146fed5SMarco Elver };
1447a146fed5SMarco Elver
1448a146fed5SMarco Elver /* ===== End test cases ===== */
1449a146fed5SMarco Elver
1450a146fed5SMarco Elver /* Concurrent accesses from interrupts. */
1451a146fed5SMarco Elver __no_kcsan
access_thread_timer(struct timer_list * timer)1452a146fed5SMarco Elver static void access_thread_timer(struct timer_list *timer)
1453a146fed5SMarco Elver {
1454a146fed5SMarco Elver static atomic_t cnt = ATOMIC_INIT(0);
1455a146fed5SMarco Elver unsigned int idx;
1456a146fed5SMarco Elver void (*func)(void);
1457a146fed5SMarco Elver
1458a146fed5SMarco Elver idx = (unsigned int)atomic_inc_return(&cnt) % ARRAY_SIZE(access_kernels);
1459a146fed5SMarco Elver /* Acquire potential initialization. */
1460a146fed5SMarco Elver func = smp_load_acquire(&access_kernels[idx]);
1461a146fed5SMarco Elver if (func)
1462a146fed5SMarco Elver func();
1463a146fed5SMarco Elver }
1464a146fed5SMarco Elver
1465a146fed5SMarco Elver /* The main loop for each thread. */
1466a146fed5SMarco Elver __no_kcsan
access_thread(void * arg)1467a146fed5SMarco Elver static int access_thread(void *arg)
1468a146fed5SMarco Elver {
1469a146fed5SMarco Elver struct timer_list timer;
1470a146fed5SMarco Elver unsigned int cnt = 0;
1471a146fed5SMarco Elver unsigned int idx;
1472a146fed5SMarco Elver void (*func)(void);
1473a146fed5SMarco Elver
1474a146fed5SMarco Elver timer_setup_on_stack(&timer, access_thread_timer, 0);
1475a146fed5SMarco Elver do {
1476a146fed5SMarco Elver might_sleep();
1477a146fed5SMarco Elver
1478a146fed5SMarco Elver if (!timer_pending(&timer))
1479a146fed5SMarco Elver mod_timer(&timer, jiffies + 1);
1480a146fed5SMarco Elver else {
1481a146fed5SMarco Elver /* Iterate through all kernels. */
1482a146fed5SMarco Elver idx = cnt++ % ARRAY_SIZE(access_kernels);
1483a146fed5SMarco Elver /* Acquire potential initialization. */
1484a146fed5SMarco Elver func = smp_load_acquire(&access_kernels[idx]);
1485a146fed5SMarco Elver if (func)
1486a146fed5SMarco Elver func();
1487a146fed5SMarco Elver }
1488a146fed5SMarco Elver } while (!torture_must_stop());
1489a146fed5SMarco Elver del_timer_sync(&timer);
1490a146fed5SMarco Elver destroy_timer_on_stack(&timer);
1491a146fed5SMarco Elver
1492a146fed5SMarco Elver torture_kthread_stopping("access_thread");
1493a146fed5SMarco Elver return 0;
1494a146fed5SMarco Elver }
1495a146fed5SMarco Elver
1496a146fed5SMarco Elver __no_kcsan
test_init(struct kunit * test)1497a146fed5SMarco Elver static int test_init(struct kunit *test)
1498a146fed5SMarco Elver {
1499a146fed5SMarco Elver unsigned long flags;
1500a146fed5SMarco Elver int nthreads;
1501a146fed5SMarco Elver int i;
1502a146fed5SMarco Elver
1503a146fed5SMarco Elver spin_lock_irqsave(&observed.lock, flags);
1504a146fed5SMarco Elver for (i = 0; i < ARRAY_SIZE(observed.lines); ++i)
1505a146fed5SMarco Elver observed.lines[i][0] = '\0';
1506a146fed5SMarco Elver observed.nlines = 0;
1507a146fed5SMarco Elver spin_unlock_irqrestore(&observed.lock, flags);
1508a146fed5SMarco Elver
15098bc32b34SMarco Elver if (strstr(test->name, "nothreads"))
15108bc32b34SMarco Elver return 0;
15118bc32b34SMarco Elver
1512a146fed5SMarco Elver if (!torture_init_begin((char *)test->name, 1))
1513a146fed5SMarco Elver return -EBUSY;
1514a146fed5SMarco Elver
1515a146fed5SMarco Elver if (WARN_ON(threads))
1516a146fed5SMarco Elver goto err;
1517a146fed5SMarco Elver
1518a146fed5SMarco Elver for (i = 0; i < ARRAY_SIZE(access_kernels); ++i) {
1519a146fed5SMarco Elver if (WARN_ON(access_kernels[i]))
1520a146fed5SMarco Elver goto err;
1521a146fed5SMarco Elver }
1522a146fed5SMarco Elver
1523f6a14914SMarco Elver nthreads = abs((long)test->param_value);
1524f6a14914SMarco Elver if (WARN_ON(!nthreads))
1525a146fed5SMarco Elver goto err;
1526a146fed5SMarco Elver
1527f6a14914SMarco Elver threads = kcalloc(nthreads + 1, sizeof(struct task_struct *), GFP_KERNEL);
1528a146fed5SMarco Elver if (WARN_ON(!threads))
1529a146fed5SMarco Elver goto err;
1530a146fed5SMarco Elver
1531a146fed5SMarco Elver threads[nthreads] = NULL;
1532a146fed5SMarco Elver for (i = 0; i < nthreads; ++i) {
1533f6a14914SMarco Elver if (torture_create_kthread(access_thread, NULL, threads[i]))
1534a146fed5SMarco Elver goto err;
1535a146fed5SMarco Elver }
1536a146fed5SMarco Elver
1537a146fed5SMarco Elver torture_init_end();
1538a146fed5SMarco Elver
1539a146fed5SMarco Elver return 0;
1540a146fed5SMarco Elver
1541a146fed5SMarco Elver err:
1542a146fed5SMarco Elver kfree(threads);
1543a146fed5SMarco Elver threads = NULL;
1544a146fed5SMarco Elver torture_init_end();
1545a146fed5SMarco Elver return -EINVAL;
1546a146fed5SMarco Elver }
1547a146fed5SMarco Elver
1548a146fed5SMarco Elver __no_kcsan
test_exit(struct kunit * test)1549a146fed5SMarco Elver static void test_exit(struct kunit *test)
1550a146fed5SMarco Elver {
1551a146fed5SMarco Elver struct task_struct **stop_thread;
1552a146fed5SMarco Elver int i;
1553a146fed5SMarco Elver
15548bc32b34SMarco Elver if (strstr(test->name, "nothreads"))
15558bc32b34SMarco Elver return;
15568bc32b34SMarco Elver
1557a146fed5SMarco Elver if (torture_cleanup_begin())
1558a146fed5SMarco Elver return;
1559a146fed5SMarco Elver
1560a146fed5SMarco Elver for (i = 0; i < ARRAY_SIZE(access_kernels); ++i)
1561a146fed5SMarco Elver WRITE_ONCE(access_kernels[i], NULL);
1562a146fed5SMarco Elver
1563a146fed5SMarco Elver if (threads) {
1564a146fed5SMarco Elver for (stop_thread = threads; *stop_thread; stop_thread++)
1565a146fed5SMarco Elver torture_stop_kthread(reader_thread, *stop_thread);
1566a146fed5SMarco Elver
1567a146fed5SMarco Elver kfree(threads);
1568a146fed5SMarco Elver threads = NULL;
1569a146fed5SMarco Elver }
1570a146fed5SMarco Elver
1571a146fed5SMarco Elver torture_cleanup_end();
1572a146fed5SMarco Elver }
1573a146fed5SMarco Elver
1574a146fed5SMarco Elver __no_kcsan
register_tracepoints(void)1575*1f6ab566SPavankumar Kondeti static void register_tracepoints(void)
1576a146fed5SMarco Elver {
1577*1f6ab566SPavankumar Kondeti register_trace_console(probe_console, NULL);
1578a146fed5SMarco Elver }
1579a146fed5SMarco Elver
1580a146fed5SMarco Elver __no_kcsan
unregister_tracepoints(void)1581*1f6ab566SPavankumar Kondeti static void unregister_tracepoints(void)
1582a146fed5SMarco Elver {
1583*1f6ab566SPavankumar Kondeti unregister_trace_console(probe_console, NULL);
1584a146fed5SMarco Elver }
1585a146fed5SMarco Elver
kcsan_suite_init(struct kunit_suite * suite)15862434031cSMarco Elver static int kcsan_suite_init(struct kunit_suite *suite)
1587a146fed5SMarco Elver {
1588*1f6ab566SPavankumar Kondeti register_tracepoints();
15892434031cSMarco Elver return 0;
1590a146fed5SMarco Elver }
1591a146fed5SMarco Elver
kcsan_suite_exit(struct kunit_suite * suite)15922434031cSMarco Elver static void kcsan_suite_exit(struct kunit_suite *suite)
1593a146fed5SMarco Elver {
1594*1f6ab566SPavankumar Kondeti unregister_tracepoints();
1595a146fed5SMarco Elver tracepoint_synchronize_unregister();
1596a146fed5SMarco Elver }
1597a146fed5SMarco Elver
15982434031cSMarco Elver static struct kunit_suite kcsan_test_suite = {
15992434031cSMarco Elver .name = "kcsan",
16002434031cSMarco Elver .test_cases = kcsan_test_cases,
16012434031cSMarco Elver .init = test_init,
16022434031cSMarco Elver .exit = test_exit,
16032434031cSMarco Elver .suite_init = kcsan_suite_init,
16042434031cSMarco Elver .suite_exit = kcsan_suite_exit,
16052434031cSMarco Elver };
16062434031cSMarco Elver
16072434031cSMarco Elver kunit_test_suites(&kcsan_test_suite);
1608a146fed5SMarco Elver
1609a146fed5SMarco Elver MODULE_LICENSE("GPL v2");
1610a146fed5SMarco Elver MODULE_AUTHOR("Marco Elver <elver@google.com>");
1611