Skip to content

Commit

Permalink
Merge branch 'address-masking'
Browse files Browse the repository at this point in the history
Merge user access fast validation using address masking.

This allows architectures to optionally use a data dependent address
masking model instead of a conditional branch for validating user
accesses.  That avoids the Spectre-v1 speculation barriers.

Right now only x86-64 takes advantage of this, and not all architectures
will be able to do it.  It requires a guard region between the user and
kernel address spaces (so that you can't overflow from one to the
other), and an easy way to generate a guaranteed-to-fault address for
invalid user pointers.

Also note that this currently assumes that there is no difference
between user read and write accesses.  If extended to architectures like
powerpc, we'll also need to separate out the user read-vs-write cases.

* address-masking:
  x86: make the masked_user_access_begin() macro use its argument only once
  x86: do the user address masking outside the user access area
  x86: support user address masking instead of non-speculative conditional
  • Loading branch information
torvalds committed Sep 22, 2024
2 parents af9c191 + 533ab22 commit de5cb0d
Show file tree
Hide file tree
Showing 5 changed files with 39 additions and 1 deletion.
11 changes: 11 additions & 0 deletions arch/x86/include/asm/uaccess_64.h
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,17 @@ static inline unsigned long __untagged_addr_remote(struct mm_struct *mm,
*/
#define valid_user_address(x) ((__force long)(x) >= 0)

/*
* Masking the user address is an alternative to a conditional
* user_access_begin that can avoid the fencing. This only works
* for dense accesses starting at the address.
*/
#define mask_user_address(x) ((typeof(x))((long)(x)|((long)(x)>>63)))
#define masked_user_access_begin(x) ({ \
__auto_type __masked_ptr = (x); \
__masked_ptr = mask_user_address(__masked_ptr); \
__uaccess_begin(); __masked_ptr; })

/*
* User pointers can have tag bits on x86-64. This scheme tolerates
* arbitrary values in those bits rather then masking them off.
Expand Down
4 changes: 3 additions & 1 deletion fs/select.c
Original file line number Diff line number Diff line change
Expand Up @@ -777,7 +777,9 @@ static inline int get_sigset_argpack(struct sigset_argpack *to,
{
// the path is hot enough for overhead of copy_from_user() to matter
if (from) {
if (!user_read_access_begin(from, sizeof(*from)))
if (can_do_masked_user_access())
from = masked_user_access_begin(from);
else if (!user_read_access_begin(from, sizeof(*from)))
return -EFAULT;
unsafe_get_user(to->p, &from->p, Efault);
unsafe_get_user(to->size, &from->size, Efault);
Expand Down
7 changes: 7 additions & 0 deletions include/linux/uaccess.h
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,13 @@
})
#endif

#ifdef masked_user_access_begin
#define can_do_masked_user_access() 1
#else
#define can_do_masked_user_access() 0
#define masked_user_access_begin(src) NULL
#endif

/*
* Architectures should provide two primitives (raw_copy_{to,from}_user())
* and get rid of their private instances of copy_{to,from}_user() and
Expand Down
9 changes: 9 additions & 0 deletions lib/strncpy_from_user.c
Original file line number Diff line number Diff line change
Expand Up @@ -120,6 +120,15 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
if (unlikely(count <= 0))
return 0;

if (can_do_masked_user_access()) {
long retval;

src = masked_user_access_begin(src);
retval = do_strncpy_from_user(dst, src, count, count);
user_read_access_end();
return retval;
}

max_addr = TASK_SIZE_MAX;
src_addr = (unsigned long)untagged_addr(src);
if (likely(src_addr < max_addr)) {
Expand Down
9 changes: 9 additions & 0 deletions lib/strnlen_user.c
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,15 @@ long strnlen_user(const char __user *str, long count)
if (unlikely(count <= 0))
return 0;

if (can_do_masked_user_access()) {
long retval;

str = masked_user_access_begin(str);
retval = do_strnlen_user(str, count, count);
user_read_access_end();
return retval;
}

max_addr = TASK_SIZE_MAX;
src_addr = (unsigned long)untagged_addr(str);
if (likely(src_addr < max_addr)) {
Expand Down

0 comments on commit de5cb0d

Please sign in to comment.