Skip to content

Commit

Permalink
Merge tag 'random_for_linus' of git://git.kernel.org/pub/scm/linux/ke…
Browse files Browse the repository at this point in the history
…rnel/git/tytso/random

Pull random updates from Ted Ts'o:
 "Add wait_for_random_bytes() and get_random_*_wait() functions so that
  callers can more safely get random bytes if they can block until the
  CRNG is initialized.

  Also print a warning if get_random_*() is called before the CRNG is
  initialized. By default, only one single-line warning will be printed
  per boot. If CONFIG_WARN_ALL_UNSEEDED_RANDOM is defined, then a
  warning will be printed for each function which tries to get random
  bytes before the CRNG is initialized. This can get spammy for certain
  architecture types, so it is not enabled by default"

* tag 'random_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/random:
  random: reorder READ_ONCE() in get_random_uXX
  random: suppress spammy warnings about unseeded randomness
  random: warn when kernel uses unseeded randomness
  net/route: use get_random_int for random counter
  net/neighbor: use get_random_u32 for 32-bit hash random
  rhashtable: use get_random_u32 for hash_rnd
  ceph: ensure RNG is seeded before using
  iscsi: ensure RNG is seeded before use
  cifs: use get_random_u32 for 32-bit lock random
  random: add get_random_{bytes,u32,u64,int,long,once}_wait family
  random: add wait_for_random_bytes() API
  • Loading branch information
torvalds committed Jul 15, 2017
2 parents 78dcf73 + 72e5c74 commit 52f6c58
Show file tree
Hide file tree
Showing 12 changed files with 168 additions and 38 deletions.
96 changes: 76 additions & 20 deletions drivers/char/random.c
Original file line number Diff line number Diff line change
Expand Up @@ -288,7 +288,6 @@
#define SEC_XFER_SIZE 512
#define EXTRACT_SIZE 10

#define DEBUG_RANDOM_BOOT 0

#define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long))

Expand Down Expand Up @@ -437,6 +436,7 @@ static void _extract_crng(struct crng_state *crng,
static void _crng_backtrack_protect(struct crng_state *crng,
__u8 tmp[CHACHA20_BLOCK_SIZE], int used);
static void process_random_ready_list(void);
static void _get_random_bytes(void *buf, int nbytes);

/**********************************************************************
*
Expand Down Expand Up @@ -777,7 +777,7 @@ static void crng_initialize(struct crng_state *crng)
_extract_entropy(&input_pool, &crng->state[4],
sizeof(__u32) * 12, 0);
else
get_random_bytes(&crng->state[4], sizeof(__u32) * 12);
_get_random_bytes(&crng->state[4], sizeof(__u32) * 12);
for (i = 4; i < 16; i++) {
if (!arch_get_random_seed_long(&rv) &&
!arch_get_random_long(&rv))
Expand Down Expand Up @@ -851,11 +851,6 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
}
}

static inline void crng_wait_ready(void)
{
wait_event_interruptible(crng_init_wait, crng_ready());
}

static void _extract_crng(struct crng_state *crng,
__u8 out[CHACHA20_BLOCK_SIZE])
{
Expand Down Expand Up @@ -1477,22 +1472,44 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
return ret;
}

#define warn_unseeded_randomness(previous) \
_warn_unseeded_randomness(__func__, (void *) _RET_IP_, (previous))

static void _warn_unseeded_randomness(const char *func_name, void *caller,
void **previous)
{
#ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM
const bool print_once = false;
#else
static bool print_once __read_mostly;
#endif

if (print_once ||
crng_ready() ||
(previous && (caller == READ_ONCE(*previous))))
return;
WRITE_ONCE(*previous, caller);
#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM
print_once = true;
#endif
pr_notice("random: %s called from %pF with crng_init=%d\n",
func_name, caller, crng_init);
}

/*
* This function is the exported kernel interface. It returns some
* number of good random numbers, suitable for key generation, seeding
* TCP sequence numbers, etc. It does not rely on the hardware random
* number generator. For random bytes direct from the hardware RNG
* (when available), use get_random_bytes_arch().
* (when available), use get_random_bytes_arch(). In order to ensure
* that the randomness provided by this function is okay, the function
* wait_for_random_bytes() should be called and return 0 at least once
* at any point prior.
*/
void get_random_bytes(void *buf, int nbytes)
static void _get_random_bytes(void *buf, int nbytes)
{
__u8 tmp[CHACHA20_BLOCK_SIZE];

#if DEBUG_RANDOM_BOOT > 0
if (!crng_ready())
printk(KERN_NOTICE "random: %pF get_random_bytes called "
"with crng_init = %d\n", (void *) _RET_IP_, crng_init);
#endif
trace_get_random_bytes(nbytes, _RET_IP_);

while (nbytes >= CHACHA20_BLOCK_SIZE) {
Expand All @@ -1509,8 +1526,34 @@ void get_random_bytes(void *buf, int nbytes)
crng_backtrack_protect(tmp, CHACHA20_BLOCK_SIZE);
memzero_explicit(tmp, sizeof(tmp));
}

void get_random_bytes(void *buf, int nbytes)
{
static void *previous;

warn_unseeded_randomness(&previous);
_get_random_bytes(buf, nbytes);
}
EXPORT_SYMBOL(get_random_bytes);

/*
* Wait for the urandom pool to be seeded and thus guaranteed to supply
* cryptographically secure random numbers. This applies to: the /dev/urandom
* device, the get_random_bytes function, and the get_random_{u32,u64,int,long}
* family of functions. Using any of these functions without first calling
* this function forfeits the guarantee of security.
*
* Returns: 0 if the urandom pool has been seeded.
* -ERESTARTSYS if the function was interrupted by a signal.
*/
int wait_for_random_bytes(void)
{
if (likely(crng_ready()))
return 0;
return wait_event_interruptible(crng_init_wait, crng_ready());
}
EXPORT_SYMBOL(wait_for_random_bytes);

/*
* Add a callback function that will be invoked when the nonblocking
* pool is initialised.
Expand Down Expand Up @@ -1865,6 +1908,8 @@ const struct file_operations urandom_fops = {
SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count,
unsigned int, flags)
{
int ret;

if (flags & ~(GRND_NONBLOCK|GRND_RANDOM))
return -EINVAL;

Expand All @@ -1877,9 +1922,9 @@ SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count,
if (!crng_ready()) {
if (flags & GRND_NONBLOCK)
return -EAGAIN;
crng_wait_ready();
if (signal_pending(current))
return -ERESTARTSYS;
ret = wait_for_random_bytes();
if (unlikely(ret))
return ret;
}
return urandom_read(NULL, buf, count, NULL);
}
Expand Down Expand Up @@ -2040,15 +2085,19 @@ static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_
/*
* Get a random word for internal kernel use only. The quality of the random
* number is either as good as RDRAND or as good as /dev/urandom, with the
* goal of being quite fast and not depleting entropy.
* goal of being quite fast and not depleting entropy. In order to ensure
* that the randomness provided by this function is okay, the function
* wait_for_random_bytes() should be called and return 0 at least once
* at any point prior.
*/
static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
u64 get_random_u64(void)
{
u64 ret;
bool use_lock = READ_ONCE(crng_init) < 2;
bool use_lock;
unsigned long flags = 0;
struct batched_entropy *batch;
static void *previous;

#if BITS_PER_LONG == 64
if (arch_get_random_long((unsigned long *)&ret))
Expand All @@ -2059,6 +2108,9 @@ u64 get_random_u64(void)
return ret;
#endif

warn_unseeded_randomness(&previous);

use_lock = READ_ONCE(crng_init) < 2;
batch = &get_cpu_var(batched_entropy_u64);
if (use_lock)
read_lock_irqsave(&batched_entropy_reset_lock, flags);
Expand All @@ -2078,13 +2130,17 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
u32 get_random_u32(void)
{
u32 ret;
bool use_lock = READ_ONCE(crng_init) < 2;
bool use_lock;
unsigned long flags = 0;
struct batched_entropy *batch;
static void *previous;

if (arch_get_random_int(&ret))
return ret;

warn_unseeded_randomness(&previous);

use_lock = READ_ONCE(crng_init) < 2;
batch = &get_cpu_var(batched_entropy_u32);
if (use_lock)
read_lock_irqsave(&batched_entropy_reset_lock, flags);
Expand Down
14 changes: 11 additions & 3 deletions drivers/target/iscsi/iscsi_target_auth.c
Original file line number Diff line number Diff line change
Expand Up @@ -47,18 +47,21 @@ static void chap_binaryhex_to_asciihex(char *dst, char *src, int src_len)
}
}

static void chap_gen_challenge(
static int chap_gen_challenge(
struct iscsi_conn *conn,
int caller,
char *c_str,
unsigned int *c_len)
{
int ret;
unsigned char challenge_asciihex[CHAP_CHALLENGE_LENGTH * 2 + 1];
struct iscsi_chap *chap = conn->auth_protocol;

memset(challenge_asciihex, 0, CHAP_CHALLENGE_LENGTH * 2 + 1);

get_random_bytes(chap->challenge, CHAP_CHALLENGE_LENGTH);
ret = get_random_bytes_wait(chap->challenge, CHAP_CHALLENGE_LENGTH);
if (unlikely(ret))
return ret;
chap_binaryhex_to_asciihex(challenge_asciihex, chap->challenge,
CHAP_CHALLENGE_LENGTH);
/*
Expand All @@ -69,6 +72,7 @@ static void chap_gen_challenge(

pr_debug("[%s] Sending CHAP_C=0x%s\n\n", (caller) ? "server" : "client",
challenge_asciihex);
return 0;
}

static int chap_check_algorithm(const char *a_str)
Expand Down Expand Up @@ -143,6 +147,7 @@ static struct iscsi_chap *chap_server_open(
case CHAP_DIGEST_UNKNOWN:
default:
pr_err("Unsupported CHAP_A value\n");
kfree(conn->auth_protocol);
return NULL;
}

Expand All @@ -156,7 +161,10 @@ static struct iscsi_chap *chap_server_open(
/*
* Generate Challenge.
*/
chap_gen_challenge(conn, 1, aic_str, aic_len);
if (chap_gen_challenge(conn, 1, aic_str, aic_len) < 0) {
kfree(conn->auth_protocol);
return NULL;
}

return chap;
}
Expand Down
22 changes: 14 additions & 8 deletions drivers/target/iscsi/iscsi_target_login.c
Original file line number Diff line number Diff line change
Expand Up @@ -245,22 +245,26 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
return 0;
}

static void iscsi_login_set_conn_values(
static int iscsi_login_set_conn_values(
struct iscsi_session *sess,
struct iscsi_conn *conn,
__be16 cid)
{
int ret;
conn->sess = sess;
conn->cid = be16_to_cpu(cid);
/*
* Generate a random Status sequence number (statsn) for the new
* iSCSI connection.
*/
get_random_bytes(&conn->stat_sn, sizeof(u32));
ret = get_random_bytes_wait(&conn->stat_sn, sizeof(u32));
if (unlikely(ret))
return ret;

mutex_lock(&auth_id_lock);
conn->auth_id = iscsit_global->auth_id++;
mutex_unlock(&auth_id_lock);
return 0;
}

__printf(2, 3) int iscsi_change_param_sprintf(
Expand Down Expand Up @@ -306,7 +310,11 @@ static int iscsi_login_zero_tsih_s1(
return -ENOMEM;
}

iscsi_login_set_conn_values(sess, conn, pdu->cid);
ret = iscsi_login_set_conn_values(sess, conn, pdu->cid);
if (unlikely(ret)) {
kfree(sess);
return ret;
}
sess->init_task_tag = pdu->itt;
memcpy(&sess->isid, pdu->isid, 6);
sess->exp_cmd_sn = be32_to_cpu(pdu->cmdsn);
Expand Down Expand Up @@ -497,8 +505,7 @@ static int iscsi_login_non_zero_tsih_s1(
{
struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf;

iscsi_login_set_conn_values(NULL, conn, pdu->cid);
return 0;
return iscsi_login_set_conn_values(NULL, conn, pdu->cid);
}

/*
Expand Down Expand Up @@ -554,9 +561,8 @@ static int iscsi_login_non_zero_tsih_s2(
atomic_set(&sess->session_continuation, 1);
spin_unlock_bh(&sess->conn_lock);

iscsi_login_set_conn_values(sess, conn, pdu->cid);

if (iscsi_copy_param_list(&conn->param_list,
if (iscsi_login_set_conn_values(sess, conn, pdu->cid) < 0 ||
iscsi_copy_param_list(&conn->param_list,
conn->tpg->param_list, 0) < 0) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
Expand Down
2 changes: 1 addition & 1 deletion fs/cifs/cifsfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -1354,7 +1354,7 @@ init_cifs(void)
spin_lock_init(&cifs_tcp_ses_lock);
spin_lock_init(&GlobalMid_Lock);

get_random_bytes(&cifs_lock_secret, sizeof(cifs_lock_secret));
cifs_lock_secret = get_random_u32();

if (cifs_max_pending < 2) {
cifs_max_pending = 2;
Expand Down
2 changes: 2 additions & 0 deletions include/linux/net.h
Original file line number Diff line number Diff line change
Expand Up @@ -274,6 +274,8 @@ do { \

#define net_get_random_once(buf, nbytes) \
get_random_once((buf), (nbytes))
#define net_get_random_once_wait(buf, nbytes) \
get_random_once_wait((buf), (nbytes))

int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec,
size_t num, size_t len);
Expand Down
2 changes: 2 additions & 0 deletions include/linux/once.h
Original file line number Diff line number Diff line change
Expand Up @@ -53,5 +53,7 @@ void __do_once_done(bool *done, struct static_key *once_key,

#define get_random_once(buf, nbytes) \
DO_ONCE(get_random_bytes, (buf), (nbytes))
#define get_random_once_wait(buf, nbytes) \
DO_ONCE(get_random_bytes_wait, (buf), (nbytes)) \

#endif /* _LINUX_ONCE_H */
26 changes: 26 additions & 0 deletions include/linux/random.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ extern void add_input_randomness(unsigned int type, unsigned int code,
extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;

extern void get_random_bytes(void *buf, int nbytes);
extern int wait_for_random_bytes(void);
extern int add_random_ready_callback(struct random_ready_callback *rdy);
extern void del_random_ready_callback(struct random_ready_callback *rdy);
extern void get_random_bytes_arch(void *buf, int nbytes);
Expand Down Expand Up @@ -78,6 +79,31 @@ static inline unsigned long get_random_canary(void)
return val & CANARY_MASK;
}

/* Calls wait_for_random_bytes() and then calls get_random_bytes(buf, nbytes).
* Returns the result of the call to wait_for_random_bytes. */
static inline int get_random_bytes_wait(void *buf, int nbytes)
{
int ret = wait_for_random_bytes();
if (unlikely(ret))
return ret;
get_random_bytes(buf, nbytes);
return 0;
}

#define declare_get_random_var_wait(var) \
static inline int get_random_ ## var ## _wait(var *out) { \
int ret = wait_for_random_bytes(); \
if (unlikely(ret)) \
return ret; \
*out = get_random_ ## var(); \
return 0; \
}
declare_get_random_var_wait(u32)
declare_get_random_var_wait(u64)
declare_get_random_var_wait(int)
declare_get_random_var_wait(long)
#undef declare_get_random_var

unsigned long randomize_page(unsigned long start, unsigned long range);

u32 prandom_u32(void);
Expand Down
Loading

0 comments on commit 52f6c58

Please sign in to comment.