Skip to content

Commit

Permalink
kern: random: collect ~16x less from fast-entropy sources
Browse files Browse the repository at this point in the history
Previously, we were collecting at a base rate of:

64 bits x 32 pools x 10 Hz = 2.5 kB/s

This change drops it to closer to 64-ish bits per pool per second, to
work a little better with entropy providers in virtualized environments
without compromising the security goals of Fortuna.

(cherry picked from commit 5e79bba)
  • Loading branch information
kevans91 committed Oct 6, 2021
1 parent 40f9f22 commit 06248c8
Showing 1 changed file with 23 additions and 3 deletions.
26 changes: 23 additions & 3 deletions sys/dev/random/random_harvestq.c
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,13 @@ __FBSDID("$FreeBSD$");
#define _RANDOM_HARVEST_UMA_OFF (1u << RANDOM_UMA)
#endif

/*
* Note that random_sources_feed() will also use this to try and split up
* entropy into a subset of pools per iteration with the goal of feeding
* HARVESTSIZE into every pool at least once per second.
*/
#define RANDOM_KTHREAD_HZ 10

static void random_kthread(void);
static void random_sources_feed(void);

Expand Down Expand Up @@ -199,7 +206,8 @@ random_kthread(void)
}
}
/* XXX: FIX!! This is a *great* place to pass hardware/live entropy to random(9) */
tsleep_sbt(&harvest_context.hc_kthread_proc, 0, "-", SBT_1S/10, 0, C_PREL(1));
tsleep_sbt(&harvest_context.hc_kthread_proc, 0, "-",
SBT_1S/RANDOM_KTHREAD_HZ, 0, C_PREL(1));
}
random_kthread_control = -1;
wakeup(&harvest_context.hc_kthread_proc);
Expand Down Expand Up @@ -229,19 +237,31 @@ random_sources_feed(void)
uint32_t entropy[HARVESTSIZE];
struct epoch_tracker et;
struct random_sources *rrs;
u_int i, n;
u_int i, n, npools;
bool rse_warm;

rse_warm = epoch_inited;

/*
* Evenly-ish distribute pool population across the second based on how
* frequently random_kthread iterates.
*
* For Fortuna, the math currently works out as such:
*
* 64 bits * 4 pools = 256 bits per iteration
* 256 bits * 10 Hz = 2560 bits per second, 320 B/s
*
*/
npools = howmany(p_random_alg_context->ra_poolcount, RANDOM_KTHREAD_HZ);

/*
* Step over all of live entropy sources, and feed their output
* to the system-wide RNG.
*/
if (rse_warm)
epoch_enter_preempt(rs_epoch, &et);
CK_LIST_FOREACH(rrs, &source_list, rrs_entries) {
for (i = 0; i < p_random_alg_context->ra_poolcount; i++) {
for (i = 0; i < npools; i++) {
n = rrs->rrs_source->rs_read(entropy, sizeof(entropy));
KASSERT((n <= sizeof(entropy)), ("%s: rs_read returned too much data (%u > %zu)", __func__, n, sizeof(entropy)));
/*
Expand Down

0 comments on commit 06248c8

Please sign in to comment.