Skip to content

Commit

Permalink
Merge tag '20201024-v4-5.10' of git://git.kernel.org/pub/scm/linux/ke…
Browse files Browse the repository at this point in the history
…rnel/git/wtarreau/prandom

Pull random32 updates from Willy Tarreau:
 "Make prandom_u32() less predictable.

  This is the cleanup of the latest series of prandom_u32
  experimentations consisting in using SipHash instead of Tausworthe to
  produce the randoms used by the network stack.

  The changes to the files were kept minimal, and the controversial
  commit that used to take noise from the fast_pool (f227e3e) was
  reverted. Instead, a dedicated "net_rand_noise" per_cpu variable is
  fed from various sources of activities (networking, scheduling) to
  perturb the SipHash state using fast, non-trivially predictable data,
  instead of keeping it fully deterministic. The goal is essentially to
  make any occasional memory leakage or brute-force attempt useless.

  The resulting code was verified to be very slightly faster on x86_64
  than what is was with the controversial commit above, though this
  remains barely above measurement noise. It was also tested on i386 and
  arm, and build- tested only on arm64"

Link: https://lore.kernel.org/netdev/[email protected]/

* tag '20201024-v4-5.10' of git://git.kernel.org/pub/scm/linux/kernel/git/wtarreau/prandom:
  random32: add a selftest for the prandom32 code
  random32: add noise from network and scheduling activity
  random32: make prandom_u32() output unpredictable
  • Loading branch information
torvalds committed Oct 25, 2020
2 parents d769139 + c6e169b commit 91f28da
Show file tree
Hide file tree
Showing 5 changed files with 404 additions and 190 deletions.
1 change: 0 additions & 1 deletion drivers/char/random.c
Original file line number Diff line number Diff line change
Expand Up @@ -1277,7 +1277,6 @@ void add_interrupt_randomness(int irq, int irq_flags)

fast_mix(fast_pool);
add_interrupt_bench(cycles);
this_cpu_add(net_rand_state.s1, fast_pool->pool[cycles & 3]);

if (unlikely(crng_init == 0)) {
if ((fast_pool->count >= 64) &&
Expand Down
55 changes: 53 additions & 2 deletions include/linux/prandom.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,12 +16,62 @@ void prandom_bytes(void *buf, size_t nbytes);
void prandom_seed(u32 seed);
void prandom_reseed_late(void);

DECLARE_PER_CPU(unsigned long, net_rand_noise);

#define PRANDOM_ADD_NOISE(a, b, c, d) \
prandom_u32_add_noise((unsigned long)(a), (unsigned long)(b), \
(unsigned long)(c), (unsigned long)(d))

#if BITS_PER_LONG == 64
/*
* The core SipHash round function. Each line can be executed in
* parallel given enough CPU resources.
*/
#define PRND_SIPROUND(v0, v1, v2, v3) ( \
v0 += v1, v1 = rol64(v1, 13), v2 += v3, v3 = rol64(v3, 16), \
v1 ^= v0, v0 = rol64(v0, 32), v3 ^= v2, \
v0 += v3, v3 = rol64(v3, 21), v2 += v1, v1 = rol64(v1, 17), \
v3 ^= v0, v1 ^= v2, v2 = rol64(v2, 32) \
)

#define PRND_K0 (0x736f6d6570736575 ^ 0x6c7967656e657261)
#define PRND_K1 (0x646f72616e646f6d ^ 0x7465646279746573)

#elif BITS_PER_LONG == 32
/*
* On 32-bit machines, we use HSipHash, a reduced-width version of SipHash.
* This is weaker, but 32-bit machines are not used for high-traffic
* applications, so there is less output for an attacker to analyze.
*/
#define PRND_SIPROUND(v0, v1, v2, v3) ( \
v0 += v1, v1 = rol32(v1, 5), v2 += v3, v3 = rol32(v3, 8), \
v1 ^= v0, v0 = rol32(v0, 16), v3 ^= v2, \
v0 += v3, v3 = rol32(v3, 7), v2 += v1, v1 = rol32(v1, 13), \
v3 ^= v0, v1 ^= v2, v2 = rol32(v2, 16) \
)
#define PRND_K0 0x6c796765
#define PRND_K1 0x74656462

#else
#error Unsupported BITS_PER_LONG
#endif

static inline void prandom_u32_add_noise(unsigned long a, unsigned long b,
unsigned long c, unsigned long d)
{
/*
* This is not used cryptographically; it's just
* a convenient 4-word hash function. (3 xor, 2 add, 2 rol)
*/
a ^= raw_cpu_read(net_rand_noise);
PRND_SIPROUND(a, b, c, d);
raw_cpu_write(net_rand_noise, d);
}

struct rnd_state {
__u32 s1, s2, s3, s4;
};

DECLARE_PER_CPU(struct rnd_state, net_rand_state);

u32 prandom_u32_state(struct rnd_state *state);
void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state);
Expand Down Expand Up @@ -67,6 +117,7 @@ static inline void prandom_seed_state(struct rnd_state *state, u64 seed)
state->s2 = __seed(i, 8U);
state->s3 = __seed(i, 16U);
state->s4 = __seed(i, 128U);
PRANDOM_ADD_NOISE(state, i, 0, 0);
}

/* Pseudo random number generator from numerical recipes. */
Expand Down
9 changes: 2 additions & 7 deletions kernel/time/timer.c
Original file line number Diff line number Diff line change
Expand Up @@ -1706,6 +1706,8 @@ void update_process_times(int user_tick)
{
struct task_struct *p = current;

PRANDOM_ADD_NOISE(jiffies, user_tick, p, 0);

/* Note: this timer irq context must be accounted for as well. */
account_process_tick(p, user_tick);
run_local_timers();
Expand All @@ -1717,13 +1719,6 @@ void update_process_times(int user_tick)
scheduler_tick();
if (IS_ENABLED(CONFIG_POSIX_TIMERS))
run_posix_cpu_timers();

/* The current CPU might make use of net randoms without receiving IRQs
* to renew them often enough. Let's update the net_rand_state from a
* non-constant value that's not affine to the number of calls to make
* sure it's updated when there's some activity (we don't care in idle).
*/
this_cpu_add(net_rand_state.s1, rol32(jiffies, 24) + user_tick);
}

/**
Expand Down
Loading

0 comments on commit 91f28da

Please sign in to comment.