Skip to content

Commit

Permalink
Merge tag 'kcsan.2021.11.11a' of git://git.kernel.org/pub/scm/linux/k…
Browse files Browse the repository at this point in the history
…ernel/git/paulmck/linux-rcu

Pull KCSAN updates from Paul McKenney:
 "This contains initialization fixups, testing improvements, addition of
  instruction pointer to data-race reports, and scoped data-race checks"

* tag 'kcsan.2021.11.11a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu:
  kcsan: selftest: Cleanup and add missing __init
  kcsan: Move ctx to start of argument list
  kcsan: Support reporting scoped read-write access type
  kcsan: Start stack trace with explicit location if provided
  kcsan: Save instruction pointer for scoped accesses
  kcsan: Add ability to pass instruction pointer of access to reporting
  kcsan: test: Fix flaky test case
  kcsan: test: Use kunit_skip() to skip tests
  kcsan: test: Defer kcsan_test_init() after kunit initialization
  • Loading branch information
torvalds committed Nov 11, 2021
2 parents 5593a73 + ac20e39 commit ca2ef2d
Show file tree
Hide file tree
Showing 6 changed files with 186 additions and 111 deletions.
3 changes: 3 additions & 0 deletions include/linux/kcsan-checks.h
Original file line number Diff line number Diff line change
Expand Up @@ -100,9 +100,12 @@ void kcsan_set_access_mask(unsigned long mask);
/* Scoped access information. */
struct kcsan_scoped_access {
struct list_head list;
/* Access information. */
const volatile void *ptr;
size_t size;
int type;
/* Location where scoped access was set up. */
unsigned long ip;
};
/*
* Automatically call kcsan_end_scoped_access() when kcsan_scoped_access goes
Expand Down
75 changes: 43 additions & 32 deletions kernel/kcsan/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -202,6 +202,9 @@ static __always_inline struct kcsan_ctx *get_ctx(void)
return in_task() ? &current->kcsan_ctx : raw_cpu_ptr(&kcsan_cpu_ctx);
}

static __always_inline void
check_access(const volatile void *ptr, size_t size, int type, unsigned long ip);

/* Check scoped accesses; never inline because this is a slow-path! */
static noinline void kcsan_check_scoped_accesses(void)
{
Expand All @@ -210,14 +213,16 @@ static noinline void kcsan_check_scoped_accesses(void)
struct kcsan_scoped_access *scoped_access;

ctx->scoped_accesses.prev = NULL; /* Avoid recursion. */
list_for_each_entry(scoped_access, &ctx->scoped_accesses, list)
__kcsan_check_access(scoped_access->ptr, scoped_access->size, scoped_access->type);
list_for_each_entry(scoped_access, &ctx->scoped_accesses, list) {
check_access(scoped_access->ptr, scoped_access->size,
scoped_access->type, scoped_access->ip);
}
ctx->scoped_accesses.prev = prev_save;
}

/* Rules for generic atomic accesses. Called from fast-path. */
static __always_inline bool
is_atomic(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *ctx)
is_atomic(struct kcsan_ctx *ctx, const volatile void *ptr, size_t size, int type)
{
if (type & KCSAN_ACCESS_ATOMIC)
return true;
Expand Down Expand Up @@ -254,7 +259,7 @@ is_atomic(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *ctx
}

static __always_inline bool
should_watch(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *ctx)
should_watch(struct kcsan_ctx *ctx, const volatile void *ptr, size_t size, int type)
{
/*
* Never set up watchpoints when memory operations are atomic.
Expand All @@ -263,7 +268,7 @@ should_watch(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *
* should not count towards skipped instructions, and (2) to actually
* decrement kcsan_atomic_next for consecutive instruction stream.
*/
if (is_atomic(ptr, size, type, ctx))
if (is_atomic(ctx, ptr, size, type))
return false;

if (this_cpu_dec_return(kcsan_skip) >= 0)
Expand Down Expand Up @@ -350,6 +355,7 @@ void kcsan_restore_irqtrace(struct task_struct *task)
static noinline void kcsan_found_watchpoint(const volatile void *ptr,
size_t size,
int type,
unsigned long ip,
atomic_long_t *watchpoint,
long encoded_watchpoint)
{
Expand Down Expand Up @@ -396,7 +402,7 @@ static noinline void kcsan_found_watchpoint(const volatile void *ptr,

if (consumed) {
kcsan_save_irqtrace(current);
kcsan_report_set_info(ptr, size, type, watchpoint - watchpoints);
kcsan_report_set_info(ptr, size, type, ip, watchpoint - watchpoints);
kcsan_restore_irqtrace(current);
} else {
/*
Expand All @@ -416,7 +422,7 @@ static noinline void kcsan_found_watchpoint(const volatile void *ptr,
}

static noinline void
kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type, unsigned long ip)
{
const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0;
const bool is_assert = (type & KCSAN_ACCESS_ASSERT) != 0;
Expand Down Expand Up @@ -568,8 +574,8 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
if (is_assert && value_change == KCSAN_VALUE_CHANGE_TRUE)
atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]);

kcsan_report_known_origin(ptr, size, type, value_change,
watchpoint - watchpoints,
kcsan_report_known_origin(ptr, size, type, ip,
value_change, watchpoint - watchpoints,
old, new, access_mask);
} else if (value_change == KCSAN_VALUE_CHANGE_TRUE) {
/* Inferring a race, since the value should not have changed. */
Expand All @@ -578,8 +584,10 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
if (is_assert)
atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]);

if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN) || is_assert)
kcsan_report_unknown_origin(ptr, size, type, old, new, access_mask);
if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN) || is_assert) {
kcsan_report_unknown_origin(ptr, size, type, ip,
old, new, access_mask);
}
}

/*
Expand All @@ -596,8 +604,8 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
user_access_restore(ua_flags);
}

static __always_inline void check_access(const volatile void *ptr, size_t size,
int type)
static __always_inline void
check_access(const volatile void *ptr, size_t size, int type, unsigned long ip)
{
const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0;
atomic_long_t *watchpoint;
Expand Down Expand Up @@ -625,13 +633,12 @@ static __always_inline void check_access(const volatile void *ptr, size_t size,
*/

if (unlikely(watchpoint != NULL))
kcsan_found_watchpoint(ptr, size, type, watchpoint,
encoded_watchpoint);
kcsan_found_watchpoint(ptr, size, type, ip, watchpoint, encoded_watchpoint);
else {
struct kcsan_ctx *ctx = get_ctx(); /* Call only once in fast-path. */

if (unlikely(should_watch(ptr, size, type, ctx)))
kcsan_setup_watchpoint(ptr, size, type);
if (unlikely(should_watch(ctx, ptr, size, type)))
kcsan_setup_watchpoint(ptr, size, type, ip);
else if (unlikely(ctx->scoped_accesses.prev))
kcsan_check_scoped_accesses();
}
Expand Down Expand Up @@ -757,14 +764,15 @@ kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type,
{
struct kcsan_ctx *ctx = get_ctx();

__kcsan_check_access(ptr, size, type);
check_access(ptr, size, type, _RET_IP_);

ctx->disable_count++; /* Disable KCSAN, in case list debugging is on. */

INIT_LIST_HEAD(&sa->list);
sa->ptr = ptr;
sa->size = size;
sa->type = type;
sa->ip = _RET_IP_;

if (!ctx->scoped_accesses.prev) /* Lazy initialize list head. */
INIT_LIST_HEAD(&ctx->scoped_accesses);
Expand Down Expand Up @@ -796,13 +804,13 @@ void kcsan_end_scoped_access(struct kcsan_scoped_access *sa)

ctx->disable_count--;

__kcsan_check_access(sa->ptr, sa->size, sa->type);
check_access(sa->ptr, sa->size, sa->type, sa->ip);
}
EXPORT_SYMBOL(kcsan_end_scoped_access);

void __kcsan_check_access(const volatile void *ptr, size_t size, int type)
{
check_access(ptr, size, type);
check_access(ptr, size, type, _RET_IP_);
}
EXPORT_SYMBOL(__kcsan_check_access);

Expand All @@ -823,7 +831,7 @@ EXPORT_SYMBOL(__kcsan_check_access);
void __tsan_read##size(void *ptr); \
void __tsan_read##size(void *ptr) \
{ \
check_access(ptr, size, 0); \
check_access(ptr, size, 0, _RET_IP_); \
} \
EXPORT_SYMBOL(__tsan_read##size); \
void __tsan_unaligned_read##size(void *ptr) \
Expand All @@ -832,7 +840,7 @@ EXPORT_SYMBOL(__kcsan_check_access);
void __tsan_write##size(void *ptr); \
void __tsan_write##size(void *ptr) \
{ \
check_access(ptr, size, KCSAN_ACCESS_WRITE); \
check_access(ptr, size, KCSAN_ACCESS_WRITE, _RET_IP_); \
} \
EXPORT_SYMBOL(__tsan_write##size); \
void __tsan_unaligned_write##size(void *ptr) \
Expand All @@ -842,7 +850,8 @@ EXPORT_SYMBOL(__kcsan_check_access);
void __tsan_read_write##size(void *ptr) \
{ \
check_access(ptr, size, \
KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE); \
KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE, \
_RET_IP_); \
} \
EXPORT_SYMBOL(__tsan_read_write##size); \
void __tsan_unaligned_read_write##size(void *ptr) \
Expand All @@ -858,14 +867,14 @@ DEFINE_TSAN_READ_WRITE(16);
void __tsan_read_range(void *ptr, size_t size);
void __tsan_read_range(void *ptr, size_t size)
{
check_access(ptr, size, 0);
check_access(ptr, size, 0, _RET_IP_);
}
EXPORT_SYMBOL(__tsan_read_range);

void __tsan_write_range(void *ptr, size_t size);
void __tsan_write_range(void *ptr, size_t size)
{
check_access(ptr, size, KCSAN_ACCESS_WRITE);
check_access(ptr, size, KCSAN_ACCESS_WRITE, _RET_IP_);
}
EXPORT_SYMBOL(__tsan_write_range);

Expand All @@ -886,7 +895,8 @@ EXPORT_SYMBOL(__tsan_write_range);
IS_ALIGNED((unsigned long)ptr, size); \
if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS) && is_atomic) \
return; \
check_access(ptr, size, is_atomic ? KCSAN_ACCESS_ATOMIC : 0); \
check_access(ptr, size, is_atomic ? KCSAN_ACCESS_ATOMIC : 0, \
_RET_IP_); \
} \
EXPORT_SYMBOL(__tsan_volatile_read##size); \
void __tsan_unaligned_volatile_read##size(void *ptr) \
Expand All @@ -901,7 +911,8 @@ EXPORT_SYMBOL(__tsan_write_range);
return; \
check_access(ptr, size, \
KCSAN_ACCESS_WRITE | \
(is_atomic ? KCSAN_ACCESS_ATOMIC : 0)); \
(is_atomic ? KCSAN_ACCESS_ATOMIC : 0), \
_RET_IP_); \
} \
EXPORT_SYMBOL(__tsan_volatile_write##size); \
void __tsan_unaligned_volatile_write##size(void *ptr) \
Expand Down Expand Up @@ -955,7 +966,7 @@ EXPORT_SYMBOL(__tsan_init);
u##bits __tsan_atomic##bits##_load(const u##bits *ptr, int memorder) \
{ \
if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
check_access(ptr, bits / BITS_PER_BYTE, KCSAN_ACCESS_ATOMIC); \
check_access(ptr, bits / BITS_PER_BYTE, KCSAN_ACCESS_ATOMIC, _RET_IP_); \
} \
return __atomic_load_n(ptr, memorder); \
} \
Expand All @@ -965,7 +976,7 @@ EXPORT_SYMBOL(__tsan_init);
{ \
if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
check_access(ptr, bits / BITS_PER_BYTE, \
KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC); \
KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC, _RET_IP_); \
} \
__atomic_store_n(ptr, v, memorder); \
} \
Expand All @@ -978,7 +989,7 @@ EXPORT_SYMBOL(__tsan_init);
if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
check_access(ptr, bits / BITS_PER_BYTE, \
KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \
KCSAN_ACCESS_ATOMIC); \
KCSAN_ACCESS_ATOMIC, _RET_IP_); \
} \
return __atomic_##op##suffix(ptr, v, memorder); \
} \
Expand Down Expand Up @@ -1010,7 +1021,7 @@ EXPORT_SYMBOL(__tsan_init);
if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
check_access(ptr, bits / BITS_PER_BYTE, \
KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \
KCSAN_ACCESS_ATOMIC); \
KCSAN_ACCESS_ATOMIC, _RET_IP_); \
} \
return __atomic_compare_exchange_n(ptr, exp, val, weak, mo, fail_mo); \
} \
Expand All @@ -1025,7 +1036,7 @@ EXPORT_SYMBOL(__tsan_init);
if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
check_access(ptr, bits / BITS_PER_BYTE, \
KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \
KCSAN_ACCESS_ATOMIC); \
KCSAN_ACCESS_ATOMIC, _RET_IP_); \
} \
__atomic_compare_exchange_n(ptr, &exp, val, 0, mo, fail_mo); \
return exp; \
Expand Down
8 changes: 4 additions & 4 deletions kernel/kcsan/kcsan.h
Original file line number Diff line number Diff line change
Expand Up @@ -121,22 +121,22 @@ enum kcsan_value_change {
* to be consumed by the reporting thread. No report is printed yet.
*/
void kcsan_report_set_info(const volatile void *ptr, size_t size, int access_type,
int watchpoint_idx);
unsigned long ip, int watchpoint_idx);

/*
* The calling thread observed that the watchpoint it set up was hit and
* consumed: print the full report based on information set by the racing
* thread.
*/
void kcsan_report_known_origin(const volatile void *ptr, size_t size, int access_type,
enum kcsan_value_change value_change, int watchpoint_idx,
u64 old, u64 new, u64 mask);
unsigned long ip, enum kcsan_value_change value_change,
int watchpoint_idx, u64 old, u64 new, u64 mask);

/*
* No other thread was observed to race with the access, but the data value
* before and after the stall differs. Reports a race of "unknown origin".
*/
void kcsan_report_unknown_origin(const volatile void *ptr, size_t size, int access_type,
u64 old, u64 new, u64 mask);
unsigned long ip, u64 old, u64 new, u64 mask);

#endif /* _KERNEL_KCSAN_KCSAN_H */
Loading

0 comments on commit ca2ef2d

Please sign in to comment.