Marco Elver | 1fe84fd | 2020-05-05 20:28:21 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * KCSAN test with various race scenarious to test runtime behaviour. Since the |
| 4 | * interface with which KCSAN's reports are obtained is via the console, this is |
| 5 | * the output we should verify. For each test case checks the presence (or |
| 6 | * absence) of generated reports. Relies on 'console' tracepoint to capture |
| 7 | * reports as they appear in the kernel log. |
| 8 | * |
| 9 | * Makes use of KUnit for test organization, and the Torture framework for test |
| 10 | * thread control. |
| 11 | * |
| 12 | * Copyright (C) 2020, Google LLC. |
| 13 | * Author: Marco Elver <elver@google.com> |
| 14 | */ |
| 15 | |
Marco Elver | f6a1491 | 2021-01-13 17:05:57 +0100 | [diff] [blame] | 16 | #define pr_fmt(fmt) "kcsan_test: " fmt |
| 17 | |
Marco Elver | 1fe84fd | 2020-05-05 20:28:21 +0200 | [diff] [blame] | 18 | #include <kunit/test.h> |
| 19 | #include <linux/jiffies.h> |
| 20 | #include <linux/kcsan-checks.h> |
| 21 | #include <linux/kernel.h> |
| 22 | #include <linux/sched.h> |
| 23 | #include <linux/seqlock.h> |
| 24 | #include <linux/spinlock.h> |
| 25 | #include <linux/string.h> |
| 26 | #include <linux/timer.h> |
| 27 | #include <linux/torture.h> |
| 28 | #include <linux/tracepoint.h> |
| 29 | #include <linux/types.h> |
| 30 | #include <trace/events/printk.h> |
| 31 | |
Marco Elver | bec4a24 | 2020-07-24 09:00:05 +0200 | [diff] [blame] | 32 | #ifdef CONFIG_CC_HAS_TSAN_COMPOUND_READ_BEFORE_WRITE |
| 33 | #define __KCSAN_ACCESS_RW(alt) (KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE) |
| 34 | #else |
| 35 | #define __KCSAN_ACCESS_RW(alt) (alt) |
| 36 | #endif |
| 37 | |
Marco Elver | 1fe84fd | 2020-05-05 20:28:21 +0200 | [diff] [blame] | 38 | /* Points to current test-case memory access "kernels". */ |
| 39 | static void (*access_kernels[2])(void); |
| 40 | |
| 41 | static struct task_struct **threads; /* Lists of threads. */ |
| 42 | static unsigned long end_time; /* End time of test. */ |
| 43 | |
| 44 | /* Report as observed from console. */ |
| 45 | static struct { |
| 46 | spinlock_t lock; |
| 47 | int nlines; |
| 48 | char lines[3][512]; |
| 49 | } observed = { |
| 50 | .lock = __SPIN_LOCK_UNLOCKED(observed.lock), |
| 51 | }; |
| 52 | |
| 53 | /* Setup test checking loop. */ |
Marco Elver | 2888557 | 2020-06-02 16:36:33 +0200 | [diff] [blame] | 54 | static __no_kcsan inline void |
Marco Elver | 1fe84fd | 2020-05-05 20:28:21 +0200 | [diff] [blame] | 55 | begin_test_checks(void (*func1)(void), void (*func2)(void)) |
| 56 | { |
| 57 | kcsan_disable_current(); |
| 58 | |
| 59 | /* |
| 60 | * Require at least as long as KCSAN_REPORT_ONCE_IN_MS, to ensure at |
| 61 | * least one race is reported. |
| 62 | */ |
| 63 | end_time = jiffies + msecs_to_jiffies(CONFIG_KCSAN_REPORT_ONCE_IN_MS + 500); |
| 64 | |
| 65 | /* Signal start; release potential initialization of shared data. */ |
| 66 | smp_store_release(&access_kernels[0], func1); |
| 67 | smp_store_release(&access_kernels[1], func2); |
| 68 | } |
| 69 | |
| 70 | /* End test checking loop. */ |
Marco Elver | 2888557 | 2020-06-02 16:36:33 +0200 | [diff] [blame] | 71 | static __no_kcsan inline bool |
Marco Elver | 1fe84fd | 2020-05-05 20:28:21 +0200 | [diff] [blame] | 72 | end_test_checks(bool stop) |
| 73 | { |
| 74 | if (!stop && time_before(jiffies, end_time)) { |
| 75 | /* Continue checking */ |
| 76 | might_sleep(); |
| 77 | return false; |
| 78 | } |
| 79 | |
| 80 | kcsan_enable_current(); |
| 81 | return true; |
| 82 | } |
| 83 | |
| 84 | /* |
| 85 | * Probe for console output: checks if a race was reported, and obtains observed |
| 86 | * lines of interest. |
| 87 | */ |
| 88 | __no_kcsan |
| 89 | static void probe_console(void *ignore, const char *buf, size_t len) |
| 90 | { |
| 91 | unsigned long flags; |
| 92 | int nlines; |
| 93 | |
| 94 | /* |
| 95 | * Note that KCSAN reports under a global lock, so we do not risk the |
| 96 | * possibility of having multiple reports interleaved. If that were the |
| 97 | * case, we'd expect tests to fail. |
| 98 | */ |
| 99 | |
| 100 | spin_lock_irqsave(&observed.lock, flags); |
| 101 | nlines = observed.nlines; |
| 102 | |
| 103 | if (strnstr(buf, "BUG: KCSAN: ", len) && strnstr(buf, "test_", len)) { |
| 104 | /* |
| 105 | * KCSAN report and related to the test. |
| 106 | * |
| 107 | * The provided @buf is not NUL-terminated; copy no more than |
| 108 | * @len bytes and let strscpy() add the missing NUL-terminator. |
| 109 | */ |
| 110 | strscpy(observed.lines[0], buf, min(len + 1, sizeof(observed.lines[0]))); |
| 111 | nlines = 1; |
| 112 | } else if ((nlines == 1 || nlines == 2) && strnstr(buf, "bytes by", len)) { |
| 113 | strscpy(observed.lines[nlines++], buf, min(len + 1, sizeof(observed.lines[0]))); |
| 114 | |
| 115 | if (strnstr(buf, "race at unknown origin", len)) { |
| 116 | if (WARN_ON(nlines != 2)) |
| 117 | goto out; |
| 118 | |
| 119 | /* No second line of interest. */ |
| 120 | strcpy(observed.lines[nlines++], "<none>"); |
| 121 | } |
| 122 | } |
| 123 | |
| 124 | out: |
| 125 | WRITE_ONCE(observed.nlines, nlines); /* Publish new nlines. */ |
| 126 | spin_unlock_irqrestore(&observed.lock, flags); |
| 127 | } |
| 128 | |
| 129 | /* Check if a report related to the test exists. */ |
| 130 | __no_kcsan |
| 131 | static bool report_available(void) |
| 132 | { |
| 133 | return READ_ONCE(observed.nlines) == ARRAY_SIZE(observed.lines); |
| 134 | } |
| 135 | |
| 136 | /* Report information we expect in a report. */ |
| 137 | struct expect_report { |
| 138 | /* Access information of both accesses. */ |
| 139 | struct { |
| 140 | void *fn; /* Function pointer to expected function of top frame. */ |
| 141 | void *addr; /* Address of access; unchecked if NULL. */ |
| 142 | size_t size; /* Size of access; unchecked if @addr is NULL. */ |
| 143 | int type; /* Access type, see KCSAN_ACCESS definitions. */ |
| 144 | } access[2]; |
| 145 | }; |
| 146 | |
| 147 | /* Check observed report matches information in @r. */ |
| 148 | __no_kcsan |
| 149 | static bool report_matches(const struct expect_report *r) |
| 150 | { |
| 151 | const bool is_assert = (r->access[0].type | r->access[1].type) & KCSAN_ACCESS_ASSERT; |
| 152 | bool ret = false; |
| 153 | unsigned long flags; |
| 154 | typeof(observed.lines) expect; |
| 155 | const char *end; |
| 156 | char *cur; |
| 157 | int i; |
| 158 | |
| 159 | /* Doubled-checked locking. */ |
| 160 | if (!report_available()) |
| 161 | return false; |
| 162 | |
| 163 | /* Generate expected report contents. */ |
| 164 | |
| 165 | /* Title */ |
| 166 | cur = expect[0]; |
| 167 | end = &expect[0][sizeof(expect[0]) - 1]; |
| 168 | cur += scnprintf(cur, end - cur, "BUG: KCSAN: %s in ", |
| 169 | is_assert ? "assert: race" : "data-race"); |
| 170 | if (r->access[1].fn) { |
| 171 | char tmp[2][64]; |
| 172 | int cmp; |
| 173 | |
| 174 | /* Expect lexographically sorted function names in title. */ |
| 175 | scnprintf(tmp[0], sizeof(tmp[0]), "%pS", r->access[0].fn); |
| 176 | scnprintf(tmp[1], sizeof(tmp[1]), "%pS", r->access[1].fn); |
| 177 | cmp = strcmp(tmp[0], tmp[1]); |
| 178 | cur += scnprintf(cur, end - cur, "%ps / %ps", |
| 179 | cmp < 0 ? r->access[0].fn : r->access[1].fn, |
| 180 | cmp < 0 ? r->access[1].fn : r->access[0].fn); |
| 181 | } else { |
| 182 | scnprintf(cur, end - cur, "%pS", r->access[0].fn); |
| 183 | /* The exact offset won't match, remove it. */ |
| 184 | cur = strchr(expect[0], '+'); |
| 185 | if (cur) |
| 186 | *cur = '\0'; |
| 187 | } |
| 188 | |
| 189 | /* Access 1 */ |
| 190 | cur = expect[1]; |
| 191 | end = &expect[1][sizeof(expect[1]) - 1]; |
| 192 | if (!r->access[1].fn) |
| 193 | cur += scnprintf(cur, end - cur, "race at unknown origin, with "); |
| 194 | |
| 195 | /* Access 1 & 2 */ |
| 196 | for (i = 0; i < 2; ++i) { |
Marco Elver | bec4a24 | 2020-07-24 09:00:05 +0200 | [diff] [blame] | 197 | const int ty = r->access[i].type; |
Marco Elver | 1fe84fd | 2020-05-05 20:28:21 +0200 | [diff] [blame] | 198 | const char *const access_type = |
Marco Elver | bec4a24 | 2020-07-24 09:00:05 +0200 | [diff] [blame] | 199 | (ty & KCSAN_ACCESS_ASSERT) ? |
| 200 | ((ty & KCSAN_ACCESS_WRITE) ? |
| 201 | "assert no accesses" : |
| 202 | "assert no writes") : |
| 203 | ((ty & KCSAN_ACCESS_WRITE) ? |
| 204 | ((ty & KCSAN_ACCESS_COMPOUND) ? |
| 205 | "read-write" : |
| 206 | "write") : |
| 207 | "read"); |
Marco Elver | 1fe84fd | 2020-05-05 20:28:21 +0200 | [diff] [blame] | 208 | const char *const access_type_aux = |
Marco Elver | bec4a24 | 2020-07-24 09:00:05 +0200 | [diff] [blame] | 209 | (ty & KCSAN_ACCESS_ATOMIC) ? |
| 210 | " (marked)" : |
| 211 | ((ty & KCSAN_ACCESS_SCOPED) ? " (scoped)" : ""); |
Marco Elver | 1fe84fd | 2020-05-05 20:28:21 +0200 | [diff] [blame] | 212 | |
| 213 | if (i == 1) { |
| 214 | /* Access 2 */ |
| 215 | cur = expect[2]; |
| 216 | end = &expect[2][sizeof(expect[2]) - 1]; |
| 217 | |
| 218 | if (!r->access[1].fn) { |
| 219 | /* Dummy string if no second access is available. */ |
| 220 | strcpy(cur, "<none>"); |
| 221 | break; |
| 222 | } |
| 223 | } |
| 224 | |
| 225 | cur += scnprintf(cur, end - cur, "%s%s to ", access_type, |
| 226 | access_type_aux); |
| 227 | |
| 228 | if (r->access[i].addr) /* Address is optional. */ |
| 229 | cur += scnprintf(cur, end - cur, "0x%px of %zu bytes", |
| 230 | r->access[i].addr, r->access[i].size); |
| 231 | } |
| 232 | |
| 233 | spin_lock_irqsave(&observed.lock, flags); |
| 234 | if (!report_available()) |
| 235 | goto out; /* A new report is being captured. */ |
| 236 | |
| 237 | /* Finally match expected output to what we actually observed. */ |
| 238 | ret = strstr(observed.lines[0], expect[0]) && |
| 239 | /* Access info may appear in any order. */ |
| 240 | ((strstr(observed.lines[1], expect[1]) && |
| 241 | strstr(observed.lines[2], expect[2])) || |
| 242 | (strstr(observed.lines[1], expect[2]) && |
| 243 | strstr(observed.lines[2], expect[1]))); |
| 244 | out: |
| 245 | spin_unlock_irqrestore(&observed.lock, flags); |
| 246 | return ret; |
| 247 | } |
| 248 | |
| 249 | /* ===== Test kernels ===== */ |
| 250 | |
| 251 | static long test_sink; |
| 252 | static long test_var; |
| 253 | /* @test_array should be large enough to fall into multiple watchpoint slots. */ |
| 254 | static long test_array[3 * PAGE_SIZE / sizeof(long)]; |
| 255 | static struct { |
| 256 | long val[8]; |
| 257 | } test_struct; |
| 258 | static DEFINE_SEQLOCK(test_seqlock); |
| 259 | |
| 260 | /* |
| 261 | * Helper to avoid compiler optimizing out reads, and to generate source values |
| 262 | * for writes. |
| 263 | */ |
| 264 | __no_kcsan |
| 265 | static noinline void sink_value(long v) { WRITE_ONCE(test_sink, v); } |
| 266 | |
| 267 | static noinline void test_kernel_read(void) { sink_value(test_var); } |
| 268 | |
| 269 | static noinline void test_kernel_write(void) |
| 270 | { |
| 271 | test_var = READ_ONCE_NOCHECK(test_sink) + 1; |
| 272 | } |
| 273 | |
| 274 | static noinline void test_kernel_write_nochange(void) { test_var = 42; } |
| 275 | |
| 276 | /* Suffixed by value-change exception filter. */ |
| 277 | static noinline void test_kernel_write_nochange_rcu(void) { test_var = 42; } |
| 278 | |
| 279 | static noinline void test_kernel_read_atomic(void) |
| 280 | { |
| 281 | sink_value(READ_ONCE(test_var)); |
| 282 | } |
| 283 | |
| 284 | static noinline void test_kernel_write_atomic(void) |
| 285 | { |
| 286 | WRITE_ONCE(test_var, READ_ONCE_NOCHECK(test_sink) + 1); |
| 287 | } |
| 288 | |
Marco Elver | bec4a24 | 2020-07-24 09:00:05 +0200 | [diff] [blame] | 289 | static noinline void test_kernel_atomic_rmw(void) |
| 290 | { |
| 291 | /* Use builtin, so we can set up the "bad" atomic/non-atomic scenario. */ |
| 292 | __atomic_fetch_add(&test_var, 1, __ATOMIC_RELAXED); |
| 293 | } |
| 294 | |
Marco Elver | 1fe84fd | 2020-05-05 20:28:21 +0200 | [diff] [blame] | 295 | __no_kcsan |
| 296 | static noinline void test_kernel_write_uninstrumented(void) { test_var++; } |
| 297 | |
| 298 | static noinline void test_kernel_data_race(void) { data_race(test_var++); } |
| 299 | |
| 300 | static noinline void test_kernel_assert_writer(void) |
| 301 | { |
| 302 | ASSERT_EXCLUSIVE_WRITER(test_var); |
| 303 | } |
| 304 | |
| 305 | static noinline void test_kernel_assert_access(void) |
| 306 | { |
| 307 | ASSERT_EXCLUSIVE_ACCESS(test_var); |
| 308 | } |
| 309 | |
| 310 | #define TEST_CHANGE_BITS 0xff00ff00 |
| 311 | |
| 312 | static noinline void test_kernel_change_bits(void) |
| 313 | { |
| 314 | if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { |
| 315 | /* |
| 316 | * Avoid race of unknown origin for this test, just pretend they |
| 317 | * are atomic. |
| 318 | */ |
| 319 | kcsan_nestable_atomic_begin(); |
| 320 | test_var ^= TEST_CHANGE_BITS; |
| 321 | kcsan_nestable_atomic_end(); |
| 322 | } else |
| 323 | WRITE_ONCE(test_var, READ_ONCE(test_var) ^ TEST_CHANGE_BITS); |
| 324 | } |
| 325 | |
| 326 | static noinline void test_kernel_assert_bits_change(void) |
| 327 | { |
| 328 | ASSERT_EXCLUSIVE_BITS(test_var, TEST_CHANGE_BITS); |
| 329 | } |
| 330 | |
| 331 | static noinline void test_kernel_assert_bits_nochange(void) |
| 332 | { |
| 333 | ASSERT_EXCLUSIVE_BITS(test_var, ~TEST_CHANGE_BITS); |
| 334 | } |
| 335 | |
| 336 | /* To check that scoped assertions do trigger anywhere in scope. */ |
| 337 | static noinline void test_enter_scope(void) |
| 338 | { |
| 339 | int x = 0; |
| 340 | |
| 341 | /* Unrelated accesses to scoped assert. */ |
| 342 | READ_ONCE(test_sink); |
| 343 | kcsan_check_read(&x, sizeof(x)); |
| 344 | } |
| 345 | |
| 346 | static noinline void test_kernel_assert_writer_scoped(void) |
| 347 | { |
| 348 | ASSERT_EXCLUSIVE_WRITER_SCOPED(test_var); |
| 349 | test_enter_scope(); |
| 350 | } |
| 351 | |
| 352 | static noinline void test_kernel_assert_access_scoped(void) |
| 353 | { |
| 354 | ASSERT_EXCLUSIVE_ACCESS_SCOPED(test_var); |
| 355 | test_enter_scope(); |
| 356 | } |
| 357 | |
| 358 | static noinline void test_kernel_rmw_array(void) |
| 359 | { |
| 360 | int i; |
| 361 | |
| 362 | for (i = 0; i < ARRAY_SIZE(test_array); ++i) |
| 363 | test_array[i]++; |
| 364 | } |
| 365 | |
| 366 | static noinline void test_kernel_write_struct(void) |
| 367 | { |
| 368 | kcsan_check_write(&test_struct, sizeof(test_struct)); |
| 369 | kcsan_disable_current(); |
| 370 | test_struct.val[3]++; /* induce value change */ |
| 371 | kcsan_enable_current(); |
| 372 | } |
| 373 | |
| 374 | static noinline void test_kernel_write_struct_part(void) |
| 375 | { |
| 376 | test_struct.val[3] = 42; |
| 377 | } |
| 378 | |
| 379 | static noinline void test_kernel_read_struct_zero_size(void) |
| 380 | { |
| 381 | kcsan_check_read(&test_struct.val[3], 0); |
| 382 | } |
| 383 | |
Marco Elver | 56b031f | 2020-06-16 14:36:25 +0200 | [diff] [blame] | 384 | static noinline void test_kernel_jiffies_reader(void) |
| 385 | { |
| 386 | sink_value((long)jiffies); |
| 387 | } |
| 388 | |
Marco Elver | 1fe84fd | 2020-05-05 20:28:21 +0200 | [diff] [blame] | 389 | static noinline void test_kernel_seqlock_reader(void) |
| 390 | { |
| 391 | unsigned int seq; |
| 392 | |
| 393 | do { |
| 394 | seq = read_seqbegin(&test_seqlock); |
| 395 | sink_value(test_var); |
| 396 | } while (read_seqretry(&test_seqlock, seq)); |
| 397 | } |
| 398 | |
| 399 | static noinline void test_kernel_seqlock_writer(void) |
| 400 | { |
| 401 | unsigned long flags; |
| 402 | |
| 403 | write_seqlock_irqsave(&test_seqlock, flags); |
| 404 | test_var++; |
| 405 | write_sequnlock_irqrestore(&test_seqlock, flags); |
| 406 | } |
| 407 | |
Marco Elver | f9ea631 | 2020-07-03 15:40:31 +0200 | [diff] [blame] | 408 | static noinline void test_kernel_atomic_builtins(void) |
| 409 | { |
| 410 | /* |
| 411 | * Generate concurrent accesses, expecting no reports, ensuring KCSAN |
| 412 | * treats builtin atomics as actually atomic. |
| 413 | */ |
| 414 | __atomic_load_n(&test_var, __ATOMIC_RELAXED); |
| 415 | } |
| 416 | |
Marco Elver | 1fe84fd | 2020-05-05 20:28:21 +0200 | [diff] [blame] | 417 | /* ===== Test cases ===== */ |
| 418 | |
| 419 | /* Simple test with normal data race. */ |
| 420 | __no_kcsan |
| 421 | static void test_basic(struct kunit *test) |
| 422 | { |
| 423 | const struct expect_report expect = { |
| 424 | .access = { |
| 425 | { test_kernel_write, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE }, |
| 426 | { test_kernel_read, &test_var, sizeof(test_var), 0 }, |
| 427 | }, |
| 428 | }; |
| 429 | static const struct expect_report never = { |
| 430 | .access = { |
| 431 | { test_kernel_read, &test_var, sizeof(test_var), 0 }, |
| 432 | { test_kernel_read, &test_var, sizeof(test_var), 0 }, |
| 433 | }, |
| 434 | }; |
| 435 | bool match_expect = false; |
| 436 | bool match_never = false; |
| 437 | |
| 438 | begin_test_checks(test_kernel_write, test_kernel_read); |
| 439 | do { |
| 440 | match_expect |= report_matches(&expect); |
| 441 | match_never = report_matches(&never); |
| 442 | } while (!end_test_checks(match_never)); |
| 443 | KUNIT_EXPECT_TRUE(test, match_expect); |
| 444 | KUNIT_EXPECT_FALSE(test, match_never); |
| 445 | } |
| 446 | |
| 447 | /* |
| 448 | * Stress KCSAN with lots of concurrent races on different addresses until |
| 449 | * timeout. |
| 450 | */ |
| 451 | __no_kcsan |
| 452 | static void test_concurrent_races(struct kunit *test) |
| 453 | { |
| 454 | const struct expect_report expect = { |
| 455 | .access = { |
| 456 | /* NULL will match any address. */ |
Marco Elver | bec4a24 | 2020-07-24 09:00:05 +0200 | [diff] [blame] | 457 | { test_kernel_rmw_array, NULL, 0, __KCSAN_ACCESS_RW(KCSAN_ACCESS_WRITE) }, |
| 458 | { test_kernel_rmw_array, NULL, 0, __KCSAN_ACCESS_RW(0) }, |
Marco Elver | 1fe84fd | 2020-05-05 20:28:21 +0200 | [diff] [blame] | 459 | }, |
| 460 | }; |
| 461 | static const struct expect_report never = { |
| 462 | .access = { |
| 463 | { test_kernel_rmw_array, NULL, 0, 0 }, |
| 464 | { test_kernel_rmw_array, NULL, 0, 0 }, |
| 465 | }, |
| 466 | }; |
| 467 | bool match_expect = false; |
| 468 | bool match_never = false; |
| 469 | |
| 470 | begin_test_checks(test_kernel_rmw_array, test_kernel_rmw_array); |
| 471 | do { |
| 472 | match_expect |= report_matches(&expect); |
| 473 | match_never |= report_matches(&never); |
| 474 | } while (!end_test_checks(false)); |
| 475 | KUNIT_EXPECT_TRUE(test, match_expect); /* Sanity check matches exist. */ |
| 476 | KUNIT_EXPECT_FALSE(test, match_never); |
| 477 | } |
| 478 | |
| 479 | /* Test the KCSAN_REPORT_VALUE_CHANGE_ONLY option. */ |
| 480 | __no_kcsan |
| 481 | static void test_novalue_change(struct kunit *test) |
| 482 | { |
| 483 | const struct expect_report expect = { |
| 484 | .access = { |
| 485 | { test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE }, |
| 486 | { test_kernel_read, &test_var, sizeof(test_var), 0 }, |
| 487 | }, |
| 488 | }; |
| 489 | bool match_expect = false; |
| 490 | |
| 491 | begin_test_checks(test_kernel_write_nochange, test_kernel_read); |
| 492 | do { |
| 493 | match_expect = report_matches(&expect); |
| 494 | } while (!end_test_checks(match_expect)); |
| 495 | if (IS_ENABLED(CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY)) |
| 496 | KUNIT_EXPECT_FALSE(test, match_expect); |
| 497 | else |
| 498 | KUNIT_EXPECT_TRUE(test, match_expect); |
| 499 | } |
| 500 | |
| 501 | /* |
| 502 | * Test that the rules where the KCSAN_REPORT_VALUE_CHANGE_ONLY option should |
| 503 | * never apply work. |
| 504 | */ |
| 505 | __no_kcsan |
| 506 | static void test_novalue_change_exception(struct kunit *test) |
| 507 | { |
| 508 | const struct expect_report expect = { |
| 509 | .access = { |
| 510 | { test_kernel_write_nochange_rcu, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE }, |
| 511 | { test_kernel_read, &test_var, sizeof(test_var), 0 }, |
| 512 | }, |
| 513 | }; |
| 514 | bool match_expect = false; |
| 515 | |
| 516 | begin_test_checks(test_kernel_write_nochange_rcu, test_kernel_read); |
| 517 | do { |
| 518 | match_expect = report_matches(&expect); |
| 519 | } while (!end_test_checks(match_expect)); |
| 520 | KUNIT_EXPECT_TRUE(test, match_expect); |
| 521 | } |
| 522 | |
| 523 | /* Test that data races of unknown origin are reported. */ |
| 524 | __no_kcsan |
| 525 | static void test_unknown_origin(struct kunit *test) |
| 526 | { |
| 527 | const struct expect_report expect = { |
| 528 | .access = { |
| 529 | { test_kernel_read, &test_var, sizeof(test_var), 0 }, |
| 530 | { NULL }, |
| 531 | }, |
| 532 | }; |
| 533 | bool match_expect = false; |
| 534 | |
| 535 | begin_test_checks(test_kernel_write_uninstrumented, test_kernel_read); |
| 536 | do { |
| 537 | match_expect = report_matches(&expect); |
| 538 | } while (!end_test_checks(match_expect)); |
| 539 | if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN)) |
| 540 | KUNIT_EXPECT_TRUE(test, match_expect); |
| 541 | else |
| 542 | KUNIT_EXPECT_FALSE(test, match_expect); |
| 543 | } |
| 544 | |
| 545 | /* Test KCSAN_ASSUME_PLAIN_WRITES_ATOMIC if it is selected. */ |
| 546 | __no_kcsan |
| 547 | static void test_write_write_assume_atomic(struct kunit *test) |
| 548 | { |
| 549 | const struct expect_report expect = { |
| 550 | .access = { |
| 551 | { test_kernel_write, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE }, |
| 552 | { test_kernel_write, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE }, |
| 553 | }, |
| 554 | }; |
| 555 | bool match_expect = false; |
| 556 | |
| 557 | begin_test_checks(test_kernel_write, test_kernel_write); |
| 558 | do { |
| 559 | sink_value(READ_ONCE(test_var)); /* induce value-change */ |
| 560 | match_expect = report_matches(&expect); |
| 561 | } while (!end_test_checks(match_expect)); |
| 562 | if (IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC)) |
| 563 | KUNIT_EXPECT_FALSE(test, match_expect); |
| 564 | else |
| 565 | KUNIT_EXPECT_TRUE(test, match_expect); |
| 566 | } |
| 567 | |
| 568 | /* |
| 569 | * Test that data races with writes larger than word-size are always reported, |
| 570 | * even if KCSAN_ASSUME_PLAIN_WRITES_ATOMIC is selected. |
| 571 | */ |
| 572 | __no_kcsan |
| 573 | static void test_write_write_struct(struct kunit *test) |
| 574 | { |
| 575 | const struct expect_report expect = { |
| 576 | .access = { |
| 577 | { test_kernel_write_struct, &test_struct, sizeof(test_struct), KCSAN_ACCESS_WRITE }, |
| 578 | { test_kernel_write_struct, &test_struct, sizeof(test_struct), KCSAN_ACCESS_WRITE }, |
| 579 | }, |
| 580 | }; |
| 581 | bool match_expect = false; |
| 582 | |
| 583 | begin_test_checks(test_kernel_write_struct, test_kernel_write_struct); |
| 584 | do { |
| 585 | match_expect = report_matches(&expect); |
| 586 | } while (!end_test_checks(match_expect)); |
| 587 | KUNIT_EXPECT_TRUE(test, match_expect); |
| 588 | } |
| 589 | |
| 590 | /* |
| 591 | * Test that data races where only one write is larger than word-size are always |
| 592 | * reported, even if KCSAN_ASSUME_PLAIN_WRITES_ATOMIC is selected. |
| 593 | */ |
| 594 | __no_kcsan |
| 595 | static void test_write_write_struct_part(struct kunit *test) |
| 596 | { |
| 597 | const struct expect_report expect = { |
| 598 | .access = { |
| 599 | { test_kernel_write_struct, &test_struct, sizeof(test_struct), KCSAN_ACCESS_WRITE }, |
| 600 | { test_kernel_write_struct_part, &test_struct.val[3], sizeof(test_struct.val[3]), KCSAN_ACCESS_WRITE }, |
| 601 | }, |
| 602 | }; |
| 603 | bool match_expect = false; |
| 604 | |
| 605 | begin_test_checks(test_kernel_write_struct, test_kernel_write_struct_part); |
| 606 | do { |
| 607 | match_expect = report_matches(&expect); |
| 608 | } while (!end_test_checks(match_expect)); |
| 609 | KUNIT_EXPECT_TRUE(test, match_expect); |
| 610 | } |
| 611 | |
| 612 | /* Test that races with atomic accesses never result in reports. */ |
| 613 | __no_kcsan |
| 614 | static void test_read_atomic_write_atomic(struct kunit *test) |
| 615 | { |
| 616 | bool match_never = false; |
| 617 | |
| 618 | begin_test_checks(test_kernel_read_atomic, test_kernel_write_atomic); |
| 619 | do { |
| 620 | match_never = report_available(); |
| 621 | } while (!end_test_checks(match_never)); |
| 622 | KUNIT_EXPECT_FALSE(test, match_never); |
| 623 | } |
| 624 | |
| 625 | /* Test that a race with an atomic and plain access result in reports. */ |
| 626 | __no_kcsan |
| 627 | static void test_read_plain_atomic_write(struct kunit *test) |
| 628 | { |
| 629 | const struct expect_report expect = { |
| 630 | .access = { |
| 631 | { test_kernel_read, &test_var, sizeof(test_var), 0 }, |
| 632 | { test_kernel_write_atomic, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC }, |
| 633 | }, |
| 634 | }; |
| 635 | bool match_expect = false; |
| 636 | |
| 637 | if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) |
| 638 | return; |
| 639 | |
| 640 | begin_test_checks(test_kernel_read, test_kernel_write_atomic); |
| 641 | do { |
| 642 | match_expect = report_matches(&expect); |
| 643 | } while (!end_test_checks(match_expect)); |
| 644 | KUNIT_EXPECT_TRUE(test, match_expect); |
| 645 | } |
| 646 | |
Marco Elver | bec4a24 | 2020-07-24 09:00:05 +0200 | [diff] [blame] | 647 | /* Test that atomic RMWs generate correct report. */ |
| 648 | __no_kcsan |
| 649 | static void test_read_plain_atomic_rmw(struct kunit *test) |
| 650 | { |
| 651 | const struct expect_report expect = { |
| 652 | .access = { |
| 653 | { test_kernel_read, &test_var, sizeof(test_var), 0 }, |
| 654 | { test_kernel_atomic_rmw, &test_var, sizeof(test_var), |
| 655 | KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC }, |
| 656 | }, |
| 657 | }; |
| 658 | bool match_expect = false; |
| 659 | |
| 660 | if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) |
| 661 | return; |
| 662 | |
| 663 | begin_test_checks(test_kernel_read, test_kernel_atomic_rmw); |
| 664 | do { |
| 665 | match_expect = report_matches(&expect); |
| 666 | } while (!end_test_checks(match_expect)); |
| 667 | KUNIT_EXPECT_TRUE(test, match_expect); |
| 668 | } |
| 669 | |
Marco Elver | 1fe84fd | 2020-05-05 20:28:21 +0200 | [diff] [blame] | 670 | /* Zero-sized accesses should never cause data race reports. */ |
| 671 | __no_kcsan |
| 672 | static void test_zero_size_access(struct kunit *test) |
| 673 | { |
| 674 | const struct expect_report expect = { |
| 675 | .access = { |
| 676 | { test_kernel_write_struct, &test_struct, sizeof(test_struct), KCSAN_ACCESS_WRITE }, |
| 677 | { test_kernel_write_struct, &test_struct, sizeof(test_struct), KCSAN_ACCESS_WRITE }, |
| 678 | }, |
| 679 | }; |
| 680 | const struct expect_report never = { |
| 681 | .access = { |
| 682 | { test_kernel_write_struct, &test_struct, sizeof(test_struct), KCSAN_ACCESS_WRITE }, |
| 683 | { test_kernel_read_struct_zero_size, &test_struct.val[3], 0, 0 }, |
| 684 | }, |
| 685 | }; |
| 686 | bool match_expect = false; |
| 687 | bool match_never = false; |
| 688 | |
| 689 | begin_test_checks(test_kernel_write_struct, test_kernel_read_struct_zero_size); |
| 690 | do { |
| 691 | match_expect |= report_matches(&expect); |
| 692 | match_never = report_matches(&never); |
| 693 | } while (!end_test_checks(match_never)); |
| 694 | KUNIT_EXPECT_TRUE(test, match_expect); /* Sanity check. */ |
| 695 | KUNIT_EXPECT_FALSE(test, match_never); |
| 696 | } |
| 697 | |
| 698 | /* Test the data_race() macro. */ |
| 699 | __no_kcsan |
| 700 | static void test_data_race(struct kunit *test) |
| 701 | { |
| 702 | bool match_never = false; |
| 703 | |
| 704 | begin_test_checks(test_kernel_data_race, test_kernel_data_race); |
| 705 | do { |
| 706 | match_never = report_available(); |
| 707 | } while (!end_test_checks(match_never)); |
| 708 | KUNIT_EXPECT_FALSE(test, match_never); |
| 709 | } |
| 710 | |
| 711 | __no_kcsan |
| 712 | static void test_assert_exclusive_writer(struct kunit *test) |
| 713 | { |
| 714 | const struct expect_report expect = { |
| 715 | .access = { |
| 716 | { test_kernel_assert_writer, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT }, |
| 717 | { test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE }, |
| 718 | }, |
| 719 | }; |
| 720 | bool match_expect = false; |
| 721 | |
| 722 | begin_test_checks(test_kernel_assert_writer, test_kernel_write_nochange); |
| 723 | do { |
| 724 | match_expect = report_matches(&expect); |
| 725 | } while (!end_test_checks(match_expect)); |
| 726 | KUNIT_EXPECT_TRUE(test, match_expect); |
| 727 | } |
| 728 | |
| 729 | __no_kcsan |
| 730 | static void test_assert_exclusive_access(struct kunit *test) |
| 731 | { |
| 732 | const struct expect_report expect = { |
| 733 | .access = { |
| 734 | { test_kernel_assert_access, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE }, |
| 735 | { test_kernel_read, &test_var, sizeof(test_var), 0 }, |
| 736 | }, |
| 737 | }; |
| 738 | bool match_expect = false; |
| 739 | |
| 740 | begin_test_checks(test_kernel_assert_access, test_kernel_read); |
| 741 | do { |
| 742 | match_expect = report_matches(&expect); |
| 743 | } while (!end_test_checks(match_expect)); |
| 744 | KUNIT_EXPECT_TRUE(test, match_expect); |
| 745 | } |
| 746 | |
| 747 | __no_kcsan |
| 748 | static void test_assert_exclusive_access_writer(struct kunit *test) |
| 749 | { |
| 750 | const struct expect_report expect_access_writer = { |
| 751 | .access = { |
| 752 | { test_kernel_assert_access, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE }, |
| 753 | { test_kernel_assert_writer, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT }, |
| 754 | }, |
| 755 | }; |
| 756 | const struct expect_report expect_access_access = { |
| 757 | .access = { |
| 758 | { test_kernel_assert_access, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE }, |
| 759 | { test_kernel_assert_access, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE }, |
| 760 | }, |
| 761 | }; |
| 762 | const struct expect_report never = { |
| 763 | .access = { |
| 764 | { test_kernel_assert_writer, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT }, |
| 765 | { test_kernel_assert_writer, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT }, |
| 766 | }, |
| 767 | }; |
| 768 | bool match_expect_access_writer = false; |
| 769 | bool match_expect_access_access = false; |
| 770 | bool match_never = false; |
| 771 | |
| 772 | begin_test_checks(test_kernel_assert_access, test_kernel_assert_writer); |
| 773 | do { |
| 774 | match_expect_access_writer |= report_matches(&expect_access_writer); |
| 775 | match_expect_access_access |= report_matches(&expect_access_access); |
| 776 | match_never |= report_matches(&never); |
| 777 | } while (!end_test_checks(match_never)); |
| 778 | KUNIT_EXPECT_TRUE(test, match_expect_access_writer); |
| 779 | KUNIT_EXPECT_TRUE(test, match_expect_access_access); |
| 780 | KUNIT_EXPECT_FALSE(test, match_never); |
| 781 | } |
| 782 | |
| 783 | __no_kcsan |
| 784 | static void test_assert_exclusive_bits_change(struct kunit *test) |
| 785 | { |
| 786 | const struct expect_report expect = { |
| 787 | .access = { |
| 788 | { test_kernel_assert_bits_change, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT }, |
| 789 | { test_kernel_change_bits, &test_var, sizeof(test_var), |
| 790 | KCSAN_ACCESS_WRITE | (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS) ? 0 : KCSAN_ACCESS_ATOMIC) }, |
| 791 | }, |
| 792 | }; |
| 793 | bool match_expect = false; |
| 794 | |
| 795 | begin_test_checks(test_kernel_assert_bits_change, test_kernel_change_bits); |
| 796 | do { |
| 797 | match_expect = report_matches(&expect); |
| 798 | } while (!end_test_checks(match_expect)); |
| 799 | KUNIT_EXPECT_TRUE(test, match_expect); |
| 800 | } |
| 801 | |
| 802 | __no_kcsan |
| 803 | static void test_assert_exclusive_bits_nochange(struct kunit *test) |
| 804 | { |
| 805 | bool match_never = false; |
| 806 | |
| 807 | begin_test_checks(test_kernel_assert_bits_nochange, test_kernel_change_bits); |
| 808 | do { |
| 809 | match_never = report_available(); |
| 810 | } while (!end_test_checks(match_never)); |
| 811 | KUNIT_EXPECT_FALSE(test, match_never); |
| 812 | } |
| 813 | |
| 814 | __no_kcsan |
| 815 | static void test_assert_exclusive_writer_scoped(struct kunit *test) |
| 816 | { |
| 817 | const struct expect_report expect_start = { |
| 818 | .access = { |
| 819 | { test_kernel_assert_writer_scoped, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_SCOPED }, |
| 820 | { test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE }, |
| 821 | }, |
| 822 | }; |
| 823 | const struct expect_report expect_anywhere = { |
| 824 | .access = { |
| 825 | { test_enter_scope, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_SCOPED }, |
| 826 | { test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE }, |
| 827 | }, |
| 828 | }; |
| 829 | bool match_expect_start = false; |
| 830 | bool match_expect_anywhere = false; |
| 831 | |
| 832 | begin_test_checks(test_kernel_assert_writer_scoped, test_kernel_write_nochange); |
| 833 | do { |
| 834 | match_expect_start |= report_matches(&expect_start); |
| 835 | match_expect_anywhere |= report_matches(&expect_anywhere); |
| 836 | } while (!end_test_checks(match_expect_start && match_expect_anywhere)); |
| 837 | KUNIT_EXPECT_TRUE(test, match_expect_start); |
| 838 | KUNIT_EXPECT_TRUE(test, match_expect_anywhere); |
| 839 | } |
| 840 | |
| 841 | __no_kcsan |
| 842 | static void test_assert_exclusive_access_scoped(struct kunit *test) |
| 843 | { |
| 844 | const struct expect_report expect_start1 = { |
| 845 | .access = { |
| 846 | { test_kernel_assert_access_scoped, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_SCOPED }, |
| 847 | { test_kernel_read, &test_var, sizeof(test_var), 0 }, |
| 848 | }, |
| 849 | }; |
| 850 | const struct expect_report expect_start2 = { |
| 851 | .access = { expect_start1.access[0], expect_start1.access[0] }, |
| 852 | }; |
| 853 | const struct expect_report expect_inscope = { |
| 854 | .access = { |
| 855 | { test_enter_scope, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_SCOPED }, |
| 856 | { test_kernel_read, &test_var, sizeof(test_var), 0 }, |
| 857 | }, |
| 858 | }; |
| 859 | bool match_expect_start = false; |
| 860 | bool match_expect_inscope = false; |
| 861 | |
| 862 | begin_test_checks(test_kernel_assert_access_scoped, test_kernel_read); |
| 863 | end_time += msecs_to_jiffies(1000); /* This test requires a bit more time. */ |
| 864 | do { |
| 865 | match_expect_start |= report_matches(&expect_start1) || report_matches(&expect_start2); |
| 866 | match_expect_inscope |= report_matches(&expect_inscope); |
| 867 | } while (!end_test_checks(match_expect_start && match_expect_inscope)); |
| 868 | KUNIT_EXPECT_TRUE(test, match_expect_start); |
| 869 | KUNIT_EXPECT_TRUE(test, match_expect_inscope); |
| 870 | } |
| 871 | |
Marco Elver | 56b031f | 2020-06-16 14:36:25 +0200 | [diff] [blame] | 872 | /* |
| 873 | * jiffies is special (declared to be volatile) and its accesses are typically |
| 874 | * not marked; this test ensures that the compiler nor KCSAN gets confused about |
| 875 | * jiffies's declaration on different architectures. |
| 876 | */ |
| 877 | __no_kcsan |
| 878 | static void test_jiffies_noreport(struct kunit *test) |
| 879 | { |
| 880 | bool match_never = false; |
| 881 | |
| 882 | begin_test_checks(test_kernel_jiffies_reader, test_kernel_jiffies_reader); |
| 883 | do { |
| 884 | match_never = report_available(); |
| 885 | } while (!end_test_checks(match_never)); |
| 886 | KUNIT_EXPECT_FALSE(test, match_never); |
| 887 | } |
| 888 | |
Marco Elver | 1fe84fd | 2020-05-05 20:28:21 +0200 | [diff] [blame] | 889 | /* Test that racing accesses in seqlock critical sections are not reported. */ |
| 890 | __no_kcsan |
| 891 | static void test_seqlock_noreport(struct kunit *test) |
| 892 | { |
| 893 | bool match_never = false; |
| 894 | |
| 895 | begin_test_checks(test_kernel_seqlock_reader, test_kernel_seqlock_writer); |
| 896 | do { |
| 897 | match_never = report_available(); |
| 898 | } while (!end_test_checks(match_never)); |
| 899 | KUNIT_EXPECT_FALSE(test, match_never); |
| 900 | } |
| 901 | |
| 902 | /* |
Marco Elver | f9ea631 | 2020-07-03 15:40:31 +0200 | [diff] [blame] | 903 | * Test atomic builtins work and required instrumentation functions exist. We |
| 904 | * also test that KCSAN understands they're atomic by racing with them via |
| 905 | * test_kernel_atomic_builtins(), and expect no reports. |
| 906 | * |
| 907 | * The atomic builtins _SHOULD NOT_ be used in normal kernel code! |
| 908 | */ |
| 909 | static void test_atomic_builtins(struct kunit *test) |
| 910 | { |
| 911 | bool match_never = false; |
| 912 | |
| 913 | begin_test_checks(test_kernel_atomic_builtins, test_kernel_atomic_builtins); |
| 914 | do { |
| 915 | long tmp; |
| 916 | |
| 917 | kcsan_enable_current(); |
| 918 | |
| 919 | __atomic_store_n(&test_var, 42L, __ATOMIC_RELAXED); |
| 920 | KUNIT_EXPECT_EQ(test, 42L, __atomic_load_n(&test_var, __ATOMIC_RELAXED)); |
| 921 | |
| 922 | KUNIT_EXPECT_EQ(test, 42L, __atomic_exchange_n(&test_var, 20, __ATOMIC_RELAXED)); |
| 923 | KUNIT_EXPECT_EQ(test, 20L, test_var); |
| 924 | |
| 925 | tmp = 20L; |
| 926 | KUNIT_EXPECT_TRUE(test, __atomic_compare_exchange_n(&test_var, &tmp, 30L, |
| 927 | 0, __ATOMIC_RELAXED, |
| 928 | __ATOMIC_RELAXED)); |
| 929 | KUNIT_EXPECT_EQ(test, tmp, 20L); |
| 930 | KUNIT_EXPECT_EQ(test, test_var, 30L); |
| 931 | KUNIT_EXPECT_FALSE(test, __atomic_compare_exchange_n(&test_var, &tmp, 40L, |
| 932 | 1, __ATOMIC_RELAXED, |
| 933 | __ATOMIC_RELAXED)); |
| 934 | KUNIT_EXPECT_EQ(test, tmp, 30L); |
| 935 | KUNIT_EXPECT_EQ(test, test_var, 30L); |
| 936 | |
| 937 | KUNIT_EXPECT_EQ(test, 30L, __atomic_fetch_add(&test_var, 1, __ATOMIC_RELAXED)); |
| 938 | KUNIT_EXPECT_EQ(test, 31L, __atomic_fetch_sub(&test_var, 1, __ATOMIC_RELAXED)); |
| 939 | KUNIT_EXPECT_EQ(test, 30L, __atomic_fetch_and(&test_var, 0xf, __ATOMIC_RELAXED)); |
| 940 | KUNIT_EXPECT_EQ(test, 14L, __atomic_fetch_xor(&test_var, 0xf, __ATOMIC_RELAXED)); |
| 941 | KUNIT_EXPECT_EQ(test, 1L, __atomic_fetch_or(&test_var, 0xf0, __ATOMIC_RELAXED)); |
| 942 | KUNIT_EXPECT_EQ(test, 241L, __atomic_fetch_nand(&test_var, 0xf, __ATOMIC_RELAXED)); |
| 943 | KUNIT_EXPECT_EQ(test, -2L, test_var); |
| 944 | |
| 945 | __atomic_thread_fence(__ATOMIC_SEQ_CST); |
| 946 | __atomic_signal_fence(__ATOMIC_SEQ_CST); |
| 947 | |
| 948 | kcsan_disable_current(); |
| 949 | |
| 950 | match_never = report_available(); |
| 951 | } while (!end_test_checks(match_never)); |
| 952 | KUNIT_EXPECT_FALSE(test, match_never); |
| 953 | } |
| 954 | |
| 955 | /* |
Marco Elver | f6a1491 | 2021-01-13 17:05:57 +0100 | [diff] [blame] | 956 | * Generate thread counts for all test cases. Values generated are in interval |
| 957 | * [2, 5] followed by exponentially increasing thread counts from 8 to 32. |
Marco Elver | 1fe84fd | 2020-05-05 20:28:21 +0200 | [diff] [blame] | 958 | * |
| 959 | * The thread counts are chosen to cover potentially interesting boundaries and |
Marco Elver | f6a1491 | 2021-01-13 17:05:57 +0100 | [diff] [blame] | 960 | * corner cases (2 to 5), and then stress the system with larger counts. |
Marco Elver | 1fe84fd | 2020-05-05 20:28:21 +0200 | [diff] [blame] | 961 | */ |
Marco Elver | f6a1491 | 2021-01-13 17:05:57 +0100 | [diff] [blame] | 962 | static const void *nthreads_gen_params(const void *prev, char *desc) |
| 963 | { |
| 964 | long nthreads = (long)prev; |
Marco Elver | 1fe84fd | 2020-05-05 20:28:21 +0200 | [diff] [blame] | 965 | |
Marco Elver | f6a1491 | 2021-01-13 17:05:57 +0100 | [diff] [blame] | 966 | if (nthreads < 0 || nthreads >= 32) |
| 967 | nthreads = 0; /* stop */ |
| 968 | else if (!nthreads) |
| 969 | nthreads = 2; /* initial value */ |
| 970 | else if (nthreads < 5) |
| 971 | nthreads++; |
| 972 | else if (nthreads == 5) |
| 973 | nthreads = 8; |
| 974 | else |
| 975 | nthreads *= 2; |
| 976 | |
| 977 | if (!IS_ENABLED(CONFIG_PREEMPT) || !IS_ENABLED(CONFIG_KCSAN_INTERRUPT_WATCHER)) { |
| 978 | /* |
| 979 | * Without any preemption, keep 2 CPUs free for other tasks, one |
| 980 | * of which is the main test case function checking for |
| 981 | * completion or failure. |
| 982 | */ |
| 983 | const long min_unused_cpus = IS_ENABLED(CONFIG_PREEMPT_NONE) ? 2 : 0; |
| 984 | const long min_required_cpus = 2 + min_unused_cpus; |
| 985 | |
| 986 | if (num_online_cpus() < min_required_cpus) { |
Arnd Bergmann | f4abe99 | 2021-04-21 15:50:38 +0200 | [diff] [blame] | 987 | pr_err_once("Too few online CPUs (%u < %ld) for test\n", |
Marco Elver | f6a1491 | 2021-01-13 17:05:57 +0100 | [diff] [blame] | 988 | num_online_cpus(), min_required_cpus); |
| 989 | nthreads = 0; |
| 990 | } else if (nthreads >= num_online_cpus() - min_unused_cpus) { |
| 991 | /* Use negative value to indicate last param. */ |
| 992 | nthreads = -(num_online_cpus() - min_unused_cpus); |
| 993 | pr_warn_once("Limiting number of threads to %ld (only %d online CPUs)\n", |
| 994 | -nthreads, num_online_cpus()); |
| 995 | } |
| 996 | } |
| 997 | |
| 998 | snprintf(desc, KUNIT_PARAM_DESC_SIZE, "threads=%ld", abs(nthreads)); |
| 999 | return (void *)nthreads; |
| 1000 | } |
| 1001 | |
| 1002 | #define KCSAN_KUNIT_CASE(test_name) KUNIT_CASE_PARAM(test_name, nthreads_gen_params) |
Marco Elver | 1fe84fd | 2020-05-05 20:28:21 +0200 | [diff] [blame] | 1003 | static struct kunit_case kcsan_test_cases[] = { |
| 1004 | KCSAN_KUNIT_CASE(test_basic), |
| 1005 | KCSAN_KUNIT_CASE(test_concurrent_races), |
| 1006 | KCSAN_KUNIT_CASE(test_novalue_change), |
| 1007 | KCSAN_KUNIT_CASE(test_novalue_change_exception), |
| 1008 | KCSAN_KUNIT_CASE(test_unknown_origin), |
| 1009 | KCSAN_KUNIT_CASE(test_write_write_assume_atomic), |
| 1010 | KCSAN_KUNIT_CASE(test_write_write_struct), |
| 1011 | KCSAN_KUNIT_CASE(test_write_write_struct_part), |
| 1012 | KCSAN_KUNIT_CASE(test_read_atomic_write_atomic), |
| 1013 | KCSAN_KUNIT_CASE(test_read_plain_atomic_write), |
Marco Elver | bec4a24 | 2020-07-24 09:00:05 +0200 | [diff] [blame] | 1014 | KCSAN_KUNIT_CASE(test_read_plain_atomic_rmw), |
Marco Elver | 1fe84fd | 2020-05-05 20:28:21 +0200 | [diff] [blame] | 1015 | KCSAN_KUNIT_CASE(test_zero_size_access), |
| 1016 | KCSAN_KUNIT_CASE(test_data_race), |
| 1017 | KCSAN_KUNIT_CASE(test_assert_exclusive_writer), |
| 1018 | KCSAN_KUNIT_CASE(test_assert_exclusive_access), |
| 1019 | KCSAN_KUNIT_CASE(test_assert_exclusive_access_writer), |
| 1020 | KCSAN_KUNIT_CASE(test_assert_exclusive_bits_change), |
| 1021 | KCSAN_KUNIT_CASE(test_assert_exclusive_bits_nochange), |
| 1022 | KCSAN_KUNIT_CASE(test_assert_exclusive_writer_scoped), |
| 1023 | KCSAN_KUNIT_CASE(test_assert_exclusive_access_scoped), |
Marco Elver | 56b031f | 2020-06-16 14:36:25 +0200 | [diff] [blame] | 1024 | KCSAN_KUNIT_CASE(test_jiffies_noreport), |
Marco Elver | 1fe84fd | 2020-05-05 20:28:21 +0200 | [diff] [blame] | 1025 | KCSAN_KUNIT_CASE(test_seqlock_noreport), |
Marco Elver | f9ea631 | 2020-07-03 15:40:31 +0200 | [diff] [blame] | 1026 | KCSAN_KUNIT_CASE(test_atomic_builtins), |
Marco Elver | 1fe84fd | 2020-05-05 20:28:21 +0200 | [diff] [blame] | 1027 | {}, |
| 1028 | }; |
| 1029 | |
| 1030 | /* ===== End test cases ===== */ |
| 1031 | |
Marco Elver | 1fe84fd | 2020-05-05 20:28:21 +0200 | [diff] [blame] | 1032 | /* Concurrent accesses from interrupts. */ |
| 1033 | __no_kcsan |
| 1034 | static void access_thread_timer(struct timer_list *timer) |
| 1035 | { |
| 1036 | static atomic_t cnt = ATOMIC_INIT(0); |
| 1037 | unsigned int idx; |
| 1038 | void (*func)(void); |
| 1039 | |
| 1040 | idx = (unsigned int)atomic_inc_return(&cnt) % ARRAY_SIZE(access_kernels); |
| 1041 | /* Acquire potential initialization. */ |
| 1042 | func = smp_load_acquire(&access_kernels[idx]); |
| 1043 | if (func) |
| 1044 | func(); |
| 1045 | } |
| 1046 | |
| 1047 | /* The main loop for each thread. */ |
| 1048 | __no_kcsan |
| 1049 | static int access_thread(void *arg) |
| 1050 | { |
| 1051 | struct timer_list timer; |
| 1052 | unsigned int cnt = 0; |
| 1053 | unsigned int idx; |
| 1054 | void (*func)(void); |
| 1055 | |
| 1056 | timer_setup_on_stack(&timer, access_thread_timer, 0); |
| 1057 | do { |
| 1058 | might_sleep(); |
| 1059 | |
| 1060 | if (!timer_pending(&timer)) |
| 1061 | mod_timer(&timer, jiffies + 1); |
| 1062 | else { |
| 1063 | /* Iterate through all kernels. */ |
| 1064 | idx = cnt++ % ARRAY_SIZE(access_kernels); |
| 1065 | /* Acquire potential initialization. */ |
| 1066 | func = smp_load_acquire(&access_kernels[idx]); |
| 1067 | if (func) |
| 1068 | func(); |
| 1069 | } |
| 1070 | } while (!torture_must_stop()); |
| 1071 | del_timer_sync(&timer); |
| 1072 | destroy_timer_on_stack(&timer); |
| 1073 | |
| 1074 | torture_kthread_stopping("access_thread"); |
| 1075 | return 0; |
| 1076 | } |
| 1077 | |
| 1078 | __no_kcsan |
| 1079 | static int test_init(struct kunit *test) |
| 1080 | { |
| 1081 | unsigned long flags; |
| 1082 | int nthreads; |
| 1083 | int i; |
| 1084 | |
| 1085 | spin_lock_irqsave(&observed.lock, flags); |
| 1086 | for (i = 0; i < ARRAY_SIZE(observed.lines); ++i) |
| 1087 | observed.lines[i][0] = '\0'; |
| 1088 | observed.nlines = 0; |
| 1089 | spin_unlock_irqrestore(&observed.lock, flags); |
| 1090 | |
| 1091 | if (!torture_init_begin((char *)test->name, 1)) |
| 1092 | return -EBUSY; |
| 1093 | |
Marco Elver | 1fe84fd | 2020-05-05 20:28:21 +0200 | [diff] [blame] | 1094 | if (WARN_ON(threads)) |
| 1095 | goto err; |
| 1096 | |
| 1097 | for (i = 0; i < ARRAY_SIZE(access_kernels); ++i) { |
| 1098 | if (WARN_ON(access_kernels[i])) |
| 1099 | goto err; |
| 1100 | } |
| 1101 | |
Marco Elver | f6a1491 | 2021-01-13 17:05:57 +0100 | [diff] [blame] | 1102 | nthreads = abs((long)test->param_value); |
| 1103 | if (WARN_ON(!nthreads)) |
| 1104 | goto err; |
Marco Elver | 1fe84fd | 2020-05-05 20:28:21 +0200 | [diff] [blame] | 1105 | |
Marco Elver | f6a1491 | 2021-01-13 17:05:57 +0100 | [diff] [blame] | 1106 | threads = kcalloc(nthreads + 1, sizeof(struct task_struct *), GFP_KERNEL); |
| 1107 | if (WARN_ON(!threads)) |
| 1108 | goto err; |
| 1109 | |
| 1110 | threads[nthreads] = NULL; |
| 1111 | for (i = 0; i < nthreads; ++i) { |
| 1112 | if (torture_create_kthread(access_thread, NULL, threads[i])) |
Marco Elver | 1fe84fd | 2020-05-05 20:28:21 +0200 | [diff] [blame] | 1113 | goto err; |
Marco Elver | 1fe84fd | 2020-05-05 20:28:21 +0200 | [diff] [blame] | 1114 | } |
| 1115 | |
| 1116 | torture_init_end(); |
| 1117 | |
| 1118 | return 0; |
| 1119 | |
| 1120 | err: |
| 1121 | kfree(threads); |
| 1122 | threads = NULL; |
| 1123 | torture_init_end(); |
| 1124 | return -EINVAL; |
| 1125 | } |
| 1126 | |
| 1127 | __no_kcsan |
| 1128 | static void test_exit(struct kunit *test) |
| 1129 | { |
| 1130 | struct task_struct **stop_thread; |
| 1131 | int i; |
| 1132 | |
| 1133 | if (torture_cleanup_begin()) |
| 1134 | return; |
| 1135 | |
| 1136 | for (i = 0; i < ARRAY_SIZE(access_kernels); ++i) |
| 1137 | WRITE_ONCE(access_kernels[i], NULL); |
| 1138 | |
| 1139 | if (threads) { |
| 1140 | for (stop_thread = threads; *stop_thread; stop_thread++) |
| 1141 | torture_stop_kthread(reader_thread, *stop_thread); |
| 1142 | |
| 1143 | kfree(threads); |
| 1144 | threads = NULL; |
| 1145 | } |
| 1146 | |
| 1147 | torture_cleanup_end(); |
| 1148 | } |
| 1149 | |
| 1150 | static struct kunit_suite kcsan_test_suite = { |
Marco Elver | a146fed | 2021-01-13 17:05:56 +0100 | [diff] [blame] | 1151 | .name = "kcsan", |
Marco Elver | 1fe84fd | 2020-05-05 20:28:21 +0200 | [diff] [blame] | 1152 | .test_cases = kcsan_test_cases, |
| 1153 | .init = test_init, |
| 1154 | .exit = test_exit, |
| 1155 | }; |
| 1156 | static struct kunit_suite *kcsan_test_suites[] = { &kcsan_test_suite, NULL }; |
| 1157 | |
| 1158 | __no_kcsan |
| 1159 | static void register_tracepoints(struct tracepoint *tp, void *ignore) |
| 1160 | { |
| 1161 | check_trace_callback_type_console(probe_console); |
| 1162 | if (!strcmp(tp->name, "console")) |
| 1163 | WARN_ON(tracepoint_probe_register(tp, probe_console, NULL)); |
| 1164 | } |
| 1165 | |
| 1166 | __no_kcsan |
| 1167 | static void unregister_tracepoints(struct tracepoint *tp, void *ignore) |
| 1168 | { |
| 1169 | if (!strcmp(tp->name, "console")) |
| 1170 | tracepoint_probe_unregister(tp, probe_console, NULL); |
| 1171 | } |
| 1172 | |
| 1173 | /* |
| 1174 | * We only want to do tracepoints setup and teardown once, therefore we have to |
| 1175 | * customize the init and exit functions and cannot rely on kunit_test_suite(). |
| 1176 | */ |
| 1177 | static int __init kcsan_test_init(void) |
| 1178 | { |
| 1179 | /* |
| 1180 | * Because we want to be able to build the test as a module, we need to |
| 1181 | * iterate through all known tracepoints, since the static registration |
| 1182 | * won't work here. |
| 1183 | */ |
| 1184 | for_each_kernel_tracepoint(register_tracepoints, NULL); |
| 1185 | return __kunit_test_suites_init(kcsan_test_suites); |
| 1186 | } |
| 1187 | |
| 1188 | static void kcsan_test_exit(void) |
| 1189 | { |
| 1190 | __kunit_test_suites_exit(kcsan_test_suites); |
| 1191 | for_each_kernel_tracepoint(unregister_tracepoints, NULL); |
| 1192 | tracepoint_synchronize_unregister(); |
| 1193 | } |
| 1194 | |
| 1195 | late_initcall(kcsan_test_init); |
| 1196 | module_exit(kcsan_test_exit); |
| 1197 | |
| 1198 | MODULE_LICENSE("GPL v2"); |
| 1199 | MODULE_AUTHOR("Marco Elver <elver@google.com>"); |