Module Name: src Committed By: martin Date: Fri Aug 11 14:35:26 UTC 2023
Modified Files: src/etc [netbsd-10]: security src/etc/defaults [netbsd-10]: rc.conf src/lib/libc/gen [netbsd-10]: getentropy.3 src/lib/libc/sys [netbsd-10]: getrandom.2 src/share/man/man4 [netbsd-10]: rnd.4 src/share/man/man5 [netbsd-10]: rc.conf.5 src/share/man/man7 [netbsd-10]: entropy.7 src/share/man/man9 [netbsd-10]: rnd.9 src/sys/crypto/cprng_fast [netbsd-10]: cprng_fast.c src/sys/dev/pci [netbsd-10]: hifn7751.c ubsec.c viornd.c src/sys/kern [netbsd-10]: kern_clock.c kern_entropy.c subr_cprng.c subr_prf.c src/sys/sys [netbsd-10]: rndio.h rndsource.h src/tests/lib/libc/sys [netbsd-10]: t_getrandom.c Log Message: Pull up following revision(s) (requested by riastradh in ticket #319): sys/dev/pci/ubsec.c: revision 1.64 sys/dev/pci/hifn7751.c: revision 1.82 lib/libc/gen/getentropy.3: revision 1.5 lib/libc/gen/getentropy.3: revision 1.6 share/man/man4/rnd.4: revision 1.41 lib/libc/sys/getrandom.2: revision 1.2 lib/libc/sys/getrandom.2: revision 1.3 share/man/man5/rc.conf.5: revision 1.193 share/man/man7/entropy.7: revision 1.5 share/man/man7/entropy.7: revision 1.6 share/man/man7/entropy.7: revision 1.7 share/man/man7/entropy.7: revision 1.8 etc/security: revision 1.130 share/man/man7/entropy.7: revision 1.9 etc/security: revision 1.131 sys/crypto/cprng_fast/cprng_fast.c: revision 1.19 sys/sys/rndio.h: revision 1.3 tests/lib/libc/sys/t_getrandom.c: revision 1.5 etc/defaults/rc.conf: revision 1.164 etc/defaults/rc.conf: revision 1.165 sys/sys/rndsource.h: revision 1.10 sys/kern/kern_entropy.c: revision 1.62 sys/kern/kern_entropy.c: revision 1.63 sys/kern/kern_entropy.c: revision 1.64 sys/kern/subr_cprng.c: revision 1.44 sys/kern/kern_entropy.c: revision 1.65 sys/kern/kern_clock.c: revision 1.149 sys/dev/pci/viornd.c: revision 1.22 share/man/man9/rnd.9: revision 1.32 sys/kern/subr_prf.c: revision 1.202 sys/sys/rndsource.h: revision 1.8 sys/sys/rndsource.h: revision 1.9 share/man/man7/entropy.7: revision 1.10 1. Reinstate netbsd<=9 entropy estimator to unblock /dev/random, in parallel with assessment of only confident entropy sources (seed, HWRNG) for security warnings like sshd keys in motd and daily insecurity report. 2. Make multiuser boot wait for first /dev/random output soon after loading a seed and configuring rndctl, so that getentropy(3) meets its contract starting early at boot without introducing blocking paths that could cause hangs in init(8) or single-user mode. Operators can choose to disable this wait in rc.conf. 3. Fix some bugs left over from reducing the global entropy lock from a spin lock at IPL_VM to an adaptive lock at IPL_SOFTSERIAL. 4. Update man pages. To generate a diff of this commit: cvs rdiff -u -r1.129 -r1.129.2.1 src/etc/security cvs rdiff -u -r1.162 -r1.162.2.1 src/etc/defaults/rc.conf cvs rdiff -u -r1.4 -r1.4.2.1 src/lib/libc/gen/getentropy.3 cvs rdiff -u -r1.1 -r1.1.6.1 src/lib/libc/sys/getrandom.2 cvs rdiff -u -r1.40 -r1.40.2.1 src/share/man/man4/rnd.4 cvs rdiff -u -r1.192 -r1.192.2.1 src/share/man/man5/rc.conf.5 cvs rdiff -u -r1.4 -r1.4.2.1 src/share/man/man7/entropy.7 cvs rdiff -u -r1.31 -r1.31.2.1 src/share/man/man9/rnd.9 cvs rdiff -u -r1.18 -r1.18.4.1 src/sys/crypto/cprng_fast/cprng_fast.c cvs rdiff -u -r1.80 -r1.80.4.1 src/sys/dev/pci/hifn7751.c cvs rdiff -u -r1.62 -r1.62.4.1 src/sys/dev/pci/ubsec.c cvs rdiff -u -r1.18.4.1 -r1.18.4.2 src/sys/dev/pci/viornd.c cvs rdiff -u -r1.148 -r1.148.4.1 src/sys/kern/kern_clock.c cvs rdiff -u -r1.57.4.3 -r1.57.4.4 src/sys/kern/kern_entropy.c cvs rdiff -u -r1.43 -r1.43.4.1 src/sys/kern/subr_cprng.c cvs rdiff -u -r1.196.2.1 -r1.196.2.2 src/sys/kern/subr_prf.c cvs rdiff -u -r1.2 -r1.2.50.1 src/sys/sys/rndio.h cvs rdiff -u -r1.7 -r1.7.20.1 src/sys/sys/rndsource.h cvs rdiff -u -r1.4 -r1.4.2.1 src/tests/lib/libc/sys/t_getrandom.c Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Modified files: Index: src/etc/security diff -u src/etc/security:1.129 src/etc/security:1.129.2.1 --- src/etc/security:1.129 Thu Nov 4 12:40:00 2021 +++ src/etc/security Fri Aug 11 14:35:25 2023 @@ -1,6 +1,6 @@ #!/bin/sh - # -# $NetBSD: security,v 1.129 2021/11/04 12:40:00 nia Exp $ +# $NetBSD: security,v 1.129.2.1 2023/08/11 14:35:25 martin Exp $ # from: @(#)security 8.1 (Berkeley) 6/9/93 # @@ -195,8 +195,7 @@ done | mtree -CM -k all > $SPECIALSPEC | # Check for enough entropy. # if checkyesno check_entropy; then - if ! dd if=/dev/random iflag=nonblock of=/dev/null bs=1 count=1 \ - msgfmt=quiet 2>/dev/null; then + if [ "$(sysctl -n kern.entropy.needed)" != 0 ]; then printf '\n' printf 'Entropy:\n' printf 'System may need more entropy for cryptography.\n' Index: src/etc/defaults/rc.conf diff -u src/etc/defaults/rc.conf:1.162 src/etc/defaults/rc.conf:1.162.2.1 --- src/etc/defaults/rc.conf:1.162 Sun Feb 20 14:42:07 2022 +++ src/etc/defaults/rc.conf Fri Aug 11 14:35:25 2023 @@ -1,4 +1,4 @@ -# $NetBSD: rc.conf,v 1.162 2022/02/20 14:42:07 alnsn Exp $ +# $NetBSD: rc.conf,v 1.162.2.1 2023/08/11 14:35:25 martin Exp $ # # /etc/defaults/rc.conf -- # default configuration of /etc/rc.conf @@ -386,9 +386,10 @@ veriexec_flags="-k" random_seed=YES # Set to `check' to abort multi-user boot if not enough entropy, or -# `wait' to wait until enough entropy. +# `wait' to wait until enough entropy, or `' (empty) to boot without +# waiting or checking. # -entropy="" +entropy="wait" # Creating / updating of man page index on boot makemandb=YES Index: src/lib/libc/gen/getentropy.3 diff -u src/lib/libc/gen/getentropy.3:1.4 src/lib/libc/gen/getentropy.3:1.4.2.1 --- src/lib/libc/gen/getentropy.3:1.4 Tue May 31 13:42:59 2022 +++ src/lib/libc/gen/getentropy.3 Fri Aug 11 14:35:24 2023 @@ -1,4 +1,4 @@ -.\" $NetBSD: getentropy.3,v 1.4 2022/05/31 13:42:59 riastradh Exp $ $ +.\" $NetBSD: getentropy.3,v 1.4.2.1 2023/08/11 14:35:24 martin Exp $ $ .\" .\" Copyright (c) 2020 The NetBSD Foundation, Inc. .\" All rights reserved. @@ -27,12 +27,12 @@ .\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE .\" POSSIBILITY OF SUCH DAMAGE. .\" -.Dd May 1, 2020 +.Dd February 28, 2023 .Dt GETENTROPY 3 .Os .Sh NAME .Nm getentropy -.Nd fill a buffer with high quality random data +.Nd generate uniform random seeds from system entropy for cryptography .Sh LIBRARY .Lb libc .Sh SYNOPSIS @@ -40,29 +40,34 @@ .Ft int .Fn getentropy "void *buf" "size_t buflen" .In limits.h +.Pp .Li #define GETENTROPY_MAX 256 .Sh DESCRIPTION .Pp The -.Fn getentropy -function fills a buffer with high quality random data, suitable for seeding -cryptographically secure psuedorandom number generators. +.Nm +function fills +.Fa buf +with exactly +.Fa buflen +independent uniform random bytes derived from the system's entropy +pool. .Pp -.Fn getentropy -is only intended for seeding random number generators and is not intended -for use by regular code which simply needs secure random data. -For this purpose, please use +The output of +.Nm +is meant to be unpredictable to an adversary and fit for use in +cryptography. +See +.Sx CAVEATS +below. +.Pp +.Nm +is meant for seeding random number generators, not for direct use by +applications; most applications should use .Xr arc4random 3 . .Pp -The maximum value for -.Li buflen -is 256 bytes. -.Sh IMPLEMENTATION NOTES -.Fn getentropy -reads from the -.Xr sysctl 7 -variable -.Li kern.arandom . +.Fa buflen +must be at most 256. .Sh RETURN VALUES .Rv -std getentropy .Sh ERRORS @@ -75,13 +80,33 @@ The argument points to an invalid memory address. .It Bq Er EINVAL More than 256 bytes were requested. +.El +.Sh CAVEATS +Security can only be guaranteed relative to whatever unpredictable +physical processes or secret seed material are available to the system; +see +.Xr entropy 7 . +.Pp +On systems which have no hardware random number generator and which +have not had secret seed material loaded, +.Nx +makes a reasonable effort to incorporate samples from various physical +processes available to it that might be unpredictable from random +jitter in timing. +.Pp +However, the +.Nm +interface alone can make no security guarantees without a physical +system configuration that includes random number generation hardware or +secret seed material from such hardware on another machine. .Sh SEE ALSO .Xr arc4random 3 , -.Xr rnd 4 +.Xr rnd 4 , +.Xr entropy 7 .Sh STANDARDS The .Fn getentropy -function is non-standard. +function is nonstandard. However, it is likely to be included in the next revision of POSIX. .Sh HISTORY The @@ -90,5 +115,5 @@ function first appeared in .Ox 5.6 , then in .Fx 12.0 , -and -.Nx 10 . +and in +.Nx 10.0 . Index: src/lib/libc/sys/getrandom.2 diff -u src/lib/libc/sys/getrandom.2:1.1 src/lib/libc/sys/getrandom.2:1.1.6.1 --- src/lib/libc/sys/getrandom.2:1.1 Fri Aug 14 00:53:16 2020 +++ src/lib/libc/sys/getrandom.2 Fri Aug 11 14:35:24 2023 @@ -1,4 +1,4 @@ -.\" $NetBSD: getrandom.2,v 1.1 2020/08/14 00:53:16 riastradh Exp $ +.\" $NetBSD: getrandom.2,v 1.1.6.1 2023/08/11 14:35:24 martin Exp $ .\" .\" Copyright (c) 2020 The NetBSD Foundation, Inc. .\" All rights reserved. @@ -27,12 +27,12 @@ .\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE .\" POSSIBILITY OF SUCH DAMAGE. .\" -.Dd January 13, 2020 +.Dd March 17, 2022 .Dt GETRANDOM 2 .Os .Sh NAME .Nm getrandom -.Nd random number generation from system entropy +.Nd generate uniform random seeds from system entropy for cryptography .Sh LIBRARY .Lb libc .Sh SYNOPSIS @@ -49,90 +49,56 @@ with up to independent uniform random bytes derived from the system's entropy pool. .Pp -The function may block until the system has full entropy, meaning that -the system has observed enough noise from physical processes that an -adversary cannot predict what state it is in: -.Bl -bullet -compact -.It -When the system has only partial entropy, the output of -.Fn getrandom -may be predictable. -.It -When the system has full entropy, the output is fit for use as -cryptographic key material. -.El +The output of +.Nm +is meant to be unpredictable to an adversary and fit for use in +cryptography. +See CAVEATS below. +.Pp +.Nm +is meant for seeding random number generators, not for direct use by +applications; most applications should use +.Xr arc4random 3 . +.Pp +.Nm +is a nonstandard extension that was added before POSIX began to +converge on +.Xr getentropy 2 . +Applications should avoid +.Nm +and use +.Xr getentropy 2 +instead; +.Nm +may be removed from a later release. +.Pp +.Nm +may block indefinitely unless the +.Dv GRND_INSECURE +or +.Dv GRND_NONBLOCK +flags are specified. .Pp The .Fa flags argument may be: -.Bl -tag -offset abcd -width GRND_INSECURE +.Bl -tag -offset indent -width GRND_INSECURE .It Li 0 -Block until the system entropy pool has full entropy; then generate -arbitrarily much data. -.Em Recommended . -.Pp -If interrupted by a signal, may fail with -.Er EINTR -or return a short read. -If successful, guaranteed to return at least 256 bytes even if -interrupted. +May block. +On success, guaranteed to generate the smaller of +.Fa buflen +or 256 bytes. .It Dv GRND_INSECURE -Do not block; instead fill -.Fa buf -with output derived from whatever is in the system entropy pool so -far. -Equivalent to reading from -.Pa /dev/urandom ; -see -.Xr rnd 4 . -.Pp -If interrupted by a signal, may fail with -.Er EINTR -or return a short read. -If successful, guaranteed to return at least 256 bytes even if -interrupted. -.Pp -Despite the name, this is secure as long as you only do it -.Em after -at least one successful call without -.Dv GRND_INSECURE , -such as -.Li "getrandom(..., 0)" -or -.Li "getrandom(..., GRND_RANDOM)" , -or after reading at least one byte from -.Pa /dev/random . -.Pp -.Sy WARNING : -If you use -.Dv GRND_INSECURE -.Em before -the system has full entropy. the output may enable an adversary to -search the possible states of the entropy pool by brute force, and -thereby reduce its entropy to zero. -Thus, incautious use of -.Dv GRND_INSECURE -can ruin the security of the whole system. -.Pp -.Nx -attempts to defend against this threat model by resetting the system's -entropy estimate to zero in this event, requiring gathering full -entropy again before -.Pa /dev/random -or -.Fn getrandom -without -.Dv GRND_INSECURE -will unblock, but other operating systems may not. +Never blocks. +On success, guaranteed to generate the smaller of +.Fa buflen +or 256 bytes. .It Dv GRND_RANDOM -Block until the system entropy pool has full entropy; then generate a -small amount of data. -Equivalent to reading from -.Pa /dev/random ; -see -.Xr rnd 4 . -This is provided mainly for source compatibility with Linux; there is -essentially no reason to ever use it. +Will probably block. +On success, may generate as little as a single byte of data. +.Pp +This is provided for source compatibility with Linux; there is no +reason to ever use it. .El .Pp The flag @@ -158,7 +124,7 @@ since never blocks. The combination .Dv GRND_INSECURE Ns Li "|" Ns Li GRND_RANDOM -is nonsensical and fails with +always fails with .Er EINVAL . .Sh RETURN VALUES If successful, @@ -174,7 +140,9 @@ Since .Li "getrandom(..., 0)" and .Li "getrandom(..., GRND_INSECURE)" -are guaranteed to return at least 256 bytes if successful, it +are guaranteed to generate +.Fa buflen +or 256 bytes, whichever is smaller, if successful, it is sufficient to use, e.g., .Bd -literal -compact getrandom(buf, 32, 0) == -1 @@ -187,9 +155,8 @@ to detect failure. However, with .Dv GRND_RANDOM , .Fn getrandom -may return as little as a single byte if successful. +may generate as little as a single byte if successful. .Sh EXAMPLES -.Sy Recommended usage . Generate a key for cryptography: .Bd -literal uint8_t secretkey[32]; @@ -198,53 +165,23 @@ Generate a key for cryptography: err(EXIT_FAILURE, "getrandom"); crypto_secretbox_xsalsa20poly1305(..., secretkey); .Ed -.Pp -Other idioms for illustration: -.Bl -bullet -.It -Wait for entropy once, and then generate many keys without waiting: -.Bd -literal - struct { uint8_t key[32]; } user[100]; - - if (getrandom(NULL, 0, 0) == -1) - err(EXIT_FAILURE, "getrandom"); - for (i = 0; i < 100; i++) - if (getrandom(user[i].key, sizeof user[i].key, - GRND_INSECURE) == -1) - err(EXIT_FAILURE, "getrandom"); -.Ed -.It -Twiddle thumbs while waiting for entropy: -.Bd -literal - uint8_t secretkey[32]; - - while (getrandom(secretkey, sizeof secretkey, GRND_NONBLOCK) - == -1) { - if (errno != EAGAIN) - err(EXIT_FAILURE, "getrandom"); - twiddle_thumbs(); - } - crypto_secretbox_xsalsa20poly1305(..., secretkey); -.Ed -.El -.Pp -(No examples of -.Dv GRND_RANDOM -because it is not useful.) .Sh ERRORS .Bl -tag -width Er .It Bq Er EAGAIN The .Dv GRND_NONBLOCK -flag was specified, and the system entropy pool does not have full -entropy. +flag was specified, and +.Nm +would have blocked waiting for entropy. .It Bq Er EINTR The .Dv GRND_NONBLOCK flag was .Em not -specified, the system entropy pool does not have full entropy, and the -process was interrupted by a signal while waiting. +specified, +.Nm +blocked waiting for entropy, and the process was interrupted by a +signal. .It Bq Er EINVAL .Fa flags contains an unrecognized flag or a nonsensical combination of flags. @@ -252,20 +189,39 @@ contains an unrecognized flag or a nonse .Fa buf points outside the allocated address space. .El +.Sh CAVEATS +Security can only be guaranteed relative to whatever unpredictable +physical processes or secret seed material are available to the system; +see +.Xr entropy 7 . +.Pp +On systems which have no hardware random number generator and which +have not had secret seed material loaded, +.Nx +makes a reasonable effort to incorporate samples from various physical +processes available to it that might be unpredictable from random +jitter in timing. +.Pp +However, the +.Nm +interface alone can make no security guarantees without a physical +system configuration that includes random number generation hardware or +secret seed material from such hardware on another machine. .Sh SEE ALSO -.Xr rnd 4 +.Xr arc4random 3 , +.Xr getentropy 3 , +.Xr rnd 4 , +.Xr entropy 7 +.Sh STANDARDS +The +.Nm +function is a nonstandard Linux extension and will probably never be +standardized. .Sh HISTORY The .Nm system call first appeared in Linux 3.17, and was added to .Nx 10.0 . -.Sh AUTHORS -The -.Nx -implementation of -.Nm -and this man page were written by -.An Taylor R Campbell Aq Mt riastr...@netbsd.org . .Sh BUGS There is no way to multiplex waiting for .Fn getrandom @@ -273,11 +229,16 @@ with other I/O in .Xr select 2 , .Xr poll 2 , or -.Xr kqueue 2 . +.Xr kqueue 2 , +or to atomically unmask a set of signals while +.Nm +blocks. Instead, you can wait for a read from .Pa /dev/random ; see .Xr rnd 4 . .Pp -.Dv GRND_RANDOM -is a little silly. +The +.Nm +interface has more options than real-world applications need, with +confusing and unclear semantics inherited from Linux. Index: src/share/man/man4/rnd.4 diff -u src/share/man/man4/rnd.4:1.40 src/share/man/man4/rnd.4:1.40.2.1 --- src/share/man/man4/rnd.4:1.40 Sun Mar 20 18:19:57 2022 +++ src/share/man/man4/rnd.4 Fri Aug 11 14:35:24 2023 @@ -1,4 +1,4 @@ -.\" $NetBSD: rnd.4,v 1.40 2022/03/20 18:19:57 riastradh Exp $ +.\" $NetBSD: rnd.4,v 1.40.2.1 2023/08/11 14:35:24 martin Exp $ .\" .\" Copyright (c) 2014-2020 The NetBSD Foundation, Inc. .\" All rights reserved. @@ -27,7 +27,7 @@ .\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE .\" POSSIBILITY OF SUCH DAMAGE. .\" -.Dd May 1, 2020 +.Dd August 7, 2023 .Dt RND 4 .Os .\""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" @@ -613,6 +613,12 @@ A buggy bootloader tried to provide an e the kernel. Subsequent seeds will be entered into the entropy pool, but they will be considered to contribute no entropy. +.It entropy: best effort +The system has gathered enough samples from interrupt timings and other +non-confident sources of entropy for the first time to unblock +.Pa /dev/random , +but it may not have full entropy from a seed or hardware random number +generator. .It entropy: ready The system has full entropy for the first time. .El Index: src/share/man/man5/rc.conf.5 diff -u src/share/man/man5/rc.conf.5:1.192 src/share/man/man5/rc.conf.5:1.192.2.1 --- src/share/man/man5/rc.conf.5:1.192 Sun Feb 20 14:43:39 2022 +++ src/share/man/man5/rc.conf.5 Fri Aug 11 14:35:25 2023 @@ -1,4 +1,4 @@ -.\" $NetBSD: rc.conf.5,v 1.192 2022/02/20 14:43:39 alnsn Exp $ +.\" $NetBSD: rc.conf.5,v 1.192.2.1 2023/08/11 14:35:25 martin Exp $ .\" .\" Copyright (c) 1996 Matthew R. Green .\" All rights reserved. @@ -454,11 +454,13 @@ from the output of Passes .Sy dmesg_flags . .It Sy entropy -A string, -.Sq Li check +A string, either +.Sq Li check , +.Sq Li wait , or -.Sq Li wait . -If set, then during boot-up, after +.Sq Li "" +(empty). +If set and nonempty, then during boot-up, after .Sy random_seed and .Sy rndctl , Index: src/share/man/man7/entropy.7 diff -u src/share/man/man7/entropy.7:1.4 src/share/man/man7/entropy.7:1.4.2.1 --- src/share/man/man7/entropy.7:1.4 Sun Mar 20 18:19:58 2022 +++ src/share/man/man7/entropy.7 Fri Aug 11 14:35:25 2023 @@ -1,4 +1,4 @@ -.\" $NetBSD: entropy.7,v 1.4 2022/03/20 18:19:58 riastradh Exp $ +.\" $NetBSD: entropy.7,v 1.4.2.1 2023/08/11 14:35:25 martin Exp $ .\" .\" Copyright (c) 2021 The NetBSD Foundation, Inc. .\" All rights reserved. @@ -24,7 +24,7 @@ .\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE .\" POSSIBILITY OF SUCH DAMAGE. .\" -.Dd January 4, 2021 +.Dd June 30, 2023 .Dt ENTROPY 7 .Os .\""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" @@ -62,11 +62,17 @@ then they could impersonate you. .Nx relies on unpredictable secrets to make sure that private user data stored on nonvolatile media when memory is scarce -.Pq Xr swapctl 8 , using So Li vm.swap_encrypt=1 Sc ; see Xr sysctl 7 +.Po +.Xr swapctl 8 , +using +.Ql vm.swap_encrypt=1 ; +see +.Xr sysctl 7 +.Pc cannot be recovered by forensic tools after shutdown. .El .\"""""""""""""""""""""""""""""""""""""" -.Ss Entropy in Nx +.Ss Entropy in NetBSD .Nx gathers samples from various kinds of entropy sources, including: .Bl -bullet -compact @@ -105,13 +111,8 @@ for security, thanks to modern cryptogra .Pp To detect potentially insecure systems, .Nx -records how many bits it needs to achieve the full 256 bits, exposed -via the -.Xr sysctl 7 -variable -.Li kern.entropy.needed , -and takes measures to alert the operator if there isn't definitely -enough for security: +takes measures to alert the operator if there isn't definitely enough +for security: .Bl -bullet .It .Nx @@ -119,29 +120,41 @@ issues warnings on the console if there' programs need it; see .Xr rnd 4 . .It -The daily security report includes an alert if there's not enough +The +.Xr motd 5 +has a warning if there was not enough entropy when network daemons such as +.Xr sshd 8 +first generated keys. +.It +The daily security report includes an alert if there's still not enough entropy; see .Xr security.conf 5 . -.It -The operator can set -.Sq Li entropy=check -in -.Xr rc.conf 5 -so that -.Nx -will refuse to boot to multiuser unless there is enough entropy, or set -.Sq Li entropy=wait -so that -.Nx -will wait for entropy before booting to multiuser (with the caveat that -it may cause boot to hang forever). .El .Pp -Since it is difficult to confidently model the unpredictability of most -physical systems, only devices specifically designed to be hardware -random number generators count toward -.Nx Ns 's -estimate of the entropy. +Since it is hard to know how unpredictable most physical systems are, +only devices specifically designed to be hardware random number +generators, or a seed file stored on disk, count toward these alerts. +.Pp +At boot, +.Nx +will wait, when +.Ql entropy=wait +is set in +.Xr rc.conf 5 , +or fail to single-user mode, when +.Ql entropy=check +is set, if there is not enough entropy from +.Em any +sources, including devices not designed to be unpredictable, such as +the CPU cycle counter sampled by a periodic timer, provided the samples +pass a simple filter called the +.Sq entropy estimator , +like other operating systems. +Sources known to be predictable, which could give a false sense of +security, can be disabled from unblocking boot by setting +.Li rndctl_flags +in +.Xr rc.conf 5 . .Pp Many new computers have hardware random number generators, such as RDRAND/RDSEED in Intel/AMD CPUs, or ARMv8.5-RNDRRS; @@ -206,7 +219,7 @@ After adding entropy, that might be predictable because they were previously generated with too little entropy. For example, if -.Sq Li sshd=YES +.Ql sshd=YES is enabled in .Pa /etc/rc.conf , then @@ -219,7 +232,7 @@ create new ones before allowing anyone t .Sh DIAGNOSTICS .Nx may print the following warnings to the console: -.Bl -diag -offset indent +.Bl -diag .It WARNING: system needs entropy for security; see entropy(7) Some process tried to draw use entropy from .Nx , Index: src/share/man/man9/rnd.9 diff -u src/share/man/man9/rnd.9:1.31 src/share/man/man9/rnd.9:1.31.2.1 --- src/share/man/man9/rnd.9:1.31 Tue May 17 01:39:57 2022 +++ src/share/man/man9/rnd.9 Fri Aug 11 14:35:25 2023 @@ -1,4 +1,4 @@ -.\" $NetBSD: rnd.9,v 1.31 2022/05/17 01:39:57 riastradh Exp $ +.\" $NetBSD: rnd.9,v 1.31.2.1 2023/08/11 14:35:25 martin Exp $ .\" .\" Copyright (c) 1997 The NetBSD Foundation, Inc. .\" All rights reserved. @@ -35,6 +35,7 @@ .Nm rnd_attach_source , .Nm rnd_detach_source , .Nm rnd_add_data , +.Nm rnd_add_data_intr , .Nm rnd_add_data_sync , .Nm rnd_add_uint32 .Nd functions to make a device available for entropy collection @@ -50,6 +51,8 @@ .Ft void .Fn rnd_add_data "krndsource_t *rnd_source" "void *data" "uint32_t len" "uint32_t entropy" .Ft void +.Fn rnd_add_data_intr "krndsource_t *rnd_source" "void *data" "uint32_t len" "uint32_t entropy" +.Ft void .Fn rnd_add_data_sync "krndsource_t *rnd_source" "void *data" "uint32_t len" "uint32_t entropy" .Ft void .Fn rnd_add_uint32 "krndsource_t *rnd_source" "uint32_t datum" @@ -82,7 +85,8 @@ Attach the random source with .Fn rnd_attach_source . .It Enter data with -.Fn rnd_add_data +.Fn rnd_add_data , +.Fn rnd_add_data_intr , or .Fn rnd_add_uint32 , or, if in the callback, @@ -147,7 +151,8 @@ The callback normally does one of two th Sends a request to a hardware device for entropy and returns. The hardware will later return data asynchronously by an interrupt, and the callback will use -.Fn rnd_add_data +.Fn rnd_add_data , +.Fn rnd_add_data_intr , or .Fn rnd_add_uint32 to add the data to the pool. @@ -161,9 +166,10 @@ returning, the callback use .Fn rnd_add_data_sync , not -.Fn rnd_add_data +.Fn rnd_add_data , +.Fn rnd_add_data_intr \" this works for now but no promises or -.Fn rnd_add_uint32 . +.Fn rnd_add_uint32 . \" this also works for now but no promises .El .Pp .Nm @@ -285,22 +291,65 @@ be used during a callback as set with use .Fn rnd_add_data_sync instead. +.Pp +.Fn rnd_add_data +.Em must not +be called from thread context with spin locks held. +.Pp +For compatibility, +.Fn rnd_add_data +currently +.Em may +but +.Em should not +be called from interrupt context, possibly with spin locks held. +However, this may be forbidden in the future; use +.Fn rnd_add_data_intr +from interrupt context instead, if the work can't be usefully deferred +to softint or thread. +.It Fn rnd_add_data_intr "rnd_source" "data" "len" "entropy" +Tries to enter +.Fa len +bytes at +.Fa data +into the entropy pool like +.Fn rnd_add_data , +but if this fills or would overflow a sample buffer, schedules a +softint to process it and discards an unspecified subset of the data +while counting zero entropy for the sample. +.Pp +.Fn rnd_add_data_intr +may be called from any context, including hard interrupt context, +including contexts where spin locks are held, except that it +.Em must not +be used during a callback as set with +.Fn rndsource_setcb ; +use +.Fn rnd_add_data_sync +in that context instead. .It Fn rnd_add_data_sync "rnd_source" "data" "len" "entropy" Like .Fn rnd_add_data , but may be used in a callback as set with .Fn rndsource_setcb . +Must always be called in thread context. .It Fn rnd_add_uint32 "rnd_source" "datum" Equivalent to -.Li rnd_add_data Ns ( Ns Fa rnd_source , Li & Ns Fa datum , Li 4 , 0 ) . +.Li rnd_add_data_intr Ns ( Ns Fa rnd_source , Li & Ns Fa datum , Li 4 , 0 ) . .Pp .Fn rnd_add_uint32 +may be called from any context, including hard interrupt context, +including contexts where spin locks are held, except that it .Em must not be used during a callback as set with .Fn rndsource_setcb ; use .Fn rnd_add_data_sync -instead. +in that context instead. +.Pp +.Fn rnd_add_uint32 +is meant for cheaply taking samples from devices that aren't designed +to be hardware random number generators. .El .Sh FILES These functions are declared in src/sys/sys/rndsource.h and defined in Index: src/sys/crypto/cprng_fast/cprng_fast.c diff -u src/sys/crypto/cprng_fast/cprng_fast.c:1.18 src/sys/crypto/cprng_fast/cprng_fast.c:1.18.4.1 --- src/sys/crypto/cprng_fast/cprng_fast.c:1.18 Thu Sep 1 18:32:25 2022 +++ src/sys/crypto/cprng_fast/cprng_fast.c Fri Aug 11 14:35:25 2023 @@ -1,4 +1,4 @@ -/* $NetBSD: cprng_fast.c,v 1.18 2022/09/01 18:32:25 riastradh Exp $ */ +/* $NetBSD: cprng_fast.c,v 1.18.4.1 2023/08/11 14:35:25 martin Exp $ */ /*- * Copyright (c) 2014 The NetBSD Foundation, Inc. @@ -30,7 +30,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: cprng_fast.c,v 1.18 2022/09/01 18:32:25 riastradh Exp $"); +__KERNEL_RCSID(0, "$NetBSD: cprng_fast.c,v 1.18.4.1 2023/08/11 14:35:25 martin Exp $"); #include <sys/types.h> #include <sys/param.h> @@ -58,7 +58,7 @@ struct cprng_fast { }; static void cprng_fast_init_cpu(void *, void *, struct cpu_info *); -static void cprng_fast_reseed(struct cprng_fast *); +static void cprng_fast_reseed(struct cprng_fast **, unsigned); static void cprng_fast_seed(struct cprng_fast *, const void *); static void cprng_fast_buf(struct cprng_fast *, void *, unsigned); @@ -93,6 +93,7 @@ static int cprng_fast_get(struct cprng_fast **cprngp) { struct cprng_fast *cprng; + unsigned epoch; int s; KASSERT(!cpu_intr_p()); @@ -101,9 +102,10 @@ cprng_fast_get(struct cprng_fast **cprng *cprngp = cprng = percpu_getref(cprng_fast_percpu); s = splsoftserial(); - if (__predict_false(cprng->epoch != entropy_epoch())) { + epoch = entropy_epoch(); + if (__predict_false(cprng->epoch != epoch)) { splx(s); - cprng_fast_reseed(cprng); + cprng_fast_reseed(cprngp, epoch); s = splsoftserial(); } @@ -121,13 +123,25 @@ cprng_fast_put(struct cprng_fast *cprng, } static void -cprng_fast_reseed(struct cprng_fast *cprng) +cprng_fast_reseed(struct cprng_fast **cprngp, unsigned epoch) { - unsigned epoch = entropy_epoch(); + struct cprng_fast *cprng; uint8_t seed[CPRNG_FAST_SEED_BYTES]; int s; + /* + * Drop the percpu(9) reference to extract a fresh seed from + * the entropy pool. cprng_strong may sleep on an adaptive + * lock, which invalidates our percpu(9) reference. + * + * This may race with reseeding in another thread, which is no + * big deal -- worst case, we rewind the entropy epoch here and + * cause the next caller to reseed again, and in the end we + * just reseed a couple more times than necessary. + */ + percpu_putref(cprng_fast_percpu); cprng_strong(kern_cprng, seed, sizeof(seed), 0); + *cprngp = cprng = percpu_getref(cprng_fast_percpu); s = splsoftserial(); cprng_fast_seed(cprng, seed); Index: src/sys/dev/pci/hifn7751.c diff -u src/sys/dev/pci/hifn7751.c:1.80 src/sys/dev/pci/hifn7751.c:1.80.4.1 --- src/sys/dev/pci/hifn7751.c:1.80 Sun May 22 11:39:27 2022 +++ src/sys/dev/pci/hifn7751.c Fri Aug 11 14:35:24 2023 @@ -1,4 +1,4 @@ -/* $NetBSD: hifn7751.c,v 1.80 2022/05/22 11:39:27 riastradh Exp $ */ +/* $NetBSD: hifn7751.c,v 1.80.4.1 2023/08/11 14:35:24 martin Exp $ */ /* $OpenBSD: hifn7751.c,v 1.179 2020/01/11 21:34:03 cheloha Exp $ */ /* @@ -47,7 +47,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: hifn7751.c,v 1.80 2022/05/22 11:39:27 riastradh Exp $"); +__KERNEL_RCSID(0, "$NetBSD: hifn7751.c,v 1.80.4.1 2023/08/11 14:35:24 martin Exp $"); #include <sys/param.h> #include <sys/cprng.h> @@ -655,7 +655,7 @@ hifn_rng(struct hifn_softc *sc) hexdump(printf, "hifn", num, sizeof num); #endif entropybits = NBBY*sizeof(num)/HIFN_RNG_BITSPER; - rnd_add_data(&sc->sc_rnd_source, num, sizeof(num), + rnd_add_data_intr(&sc->sc_rnd_source, num, sizeof(num), entropybits); entropybits = MAX(entropybits, 1); entropybits = MIN(entropybits, sc->sc_rng_needbits); @@ -693,7 +693,7 @@ hifn_rng(struct hifn_softc *sc) hexdump(printf, "hifn", num, sizeof num); #endif entropybits = NBBY*sizeof(num)/HIFN_RNG_BITSPER; - rnd_add_data(&sc->sc_rnd_source, num, sizeof num, + rnd_add_data_intr(&sc->sc_rnd_source, num, sizeof num, entropybits); entropybits = MAX(entropybits, 1); entropybits = MIN(entropybits, sc->sc_rng_needbits); Index: src/sys/dev/pci/ubsec.c diff -u src/sys/dev/pci/ubsec.c:1.62 src/sys/dev/pci/ubsec.c:1.62.4.1 --- src/sys/dev/pci/ubsec.c:1.62 Sat Aug 27 05:35:17 2022 +++ src/sys/dev/pci/ubsec.c Fri Aug 11 14:35:24 2023 @@ -1,4 +1,4 @@ -/* $NetBSD: ubsec.c,v 1.62 2022/08/27 05:35:17 skrll Exp $ */ +/* $NetBSD: ubsec.c,v 1.62.4.1 2023/08/11 14:35:24 martin Exp $ */ /* $FreeBSD: src/sys/dev/ubsec/ubsec.c,v 1.6.2.6 2003/01/23 21:06:43 sam Exp $ */ /* $OpenBSD: ubsec.c,v 1.143 2009/03/27 13:31:30 reyk Exp$ */ @@ -35,7 +35,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: ubsec.c,v 1.62 2022/08/27 05:35:17 skrll Exp $"); +__KERNEL_RCSID(0, "$NetBSD: ubsec.c,v 1.62.4.1 2023/08/11 14:35:24 martin Exp $"); #undef UBSEC_DEBUG @@ -1939,7 +1939,7 @@ ubsec_callback2(struct ubsec_softc *sc, rng->rng_buf.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD); p = (u_int32_t *)rng->rng_buf.dma_vaddr; i = UBSEC_RNG_BUFSIZ * sizeof(u_int32_t); - rnd_add_data(&sc->sc_rnd_source, (char *)p, i, i * NBBY); + rnd_add_data_intr(&sc->sc_rnd_source, (char *)p, i, i * NBBY); sc->sc_rng_need -= i; rng->rng_used = 0; if (sc->sc_rng_need > 0) { Index: src/sys/dev/pci/viornd.c diff -u src/sys/dev/pci/viornd.c:1.18.4.1 src/sys/dev/pci/viornd.c:1.18.4.2 --- src/sys/dev/pci/viornd.c:1.18.4.1 Sat May 13 10:56:10 2023 +++ src/sys/dev/pci/viornd.c Fri Aug 11 14:35:24 2023 @@ -1,4 +1,4 @@ -/* $NetBSD: viornd.c,v 1.18.4.1 2023/05/13 10:56:10 martin Exp $ */ +/* $NetBSD: viornd.c,v 1.18.4.2 2023/08/11 14:35:24 martin Exp $ */ /* $OpenBSD: viornd.c,v 1.1 2014/01/21 21:14:58 sf Exp $ */ /* @@ -245,8 +245,9 @@ viornd_vq_done(struct virtqueue *vq) #if VIORND_DEBUG aprint_normal("%s: got %d bytes of entropy\n", __func__, len); #endif - rnd_add_data(&sc->sc_rndsource, sc->sc_buf, VIORND_BUFSIZE, - VIORND_BUFSIZE * NBBY); + /* XXX Shouldn't this be len instead of VIORND_BUFSIZE? */ + rnd_add_data_intr(&sc->sc_rndsource, sc->sc_buf, VIORND_BUFSIZE, + VIORND_BUFSIZE * NBBY); out: virtio_dequeue_commit(vsc, vq, slot); mutex_exit(&sc->sc_mutex); Index: src/sys/kern/kern_clock.c diff -u src/sys/kern/kern_clock.c:1.148 src/sys/kern/kern_clock.c:1.148.4.1 --- src/sys/kern/kern_clock.c:1.148 Sat Mar 19 14:34:47 2022 +++ src/sys/kern/kern_clock.c Fri Aug 11 14:35:25 2023 @@ -1,4 +1,4 @@ -/* $NetBSD: kern_clock.c,v 1.148 2022/03/19 14:34:47 riastradh Exp $ */ +/* $NetBSD: kern_clock.c,v 1.148.4.1 2023/08/11 14:35:25 martin Exp $ */ /*- * Copyright (c) 2000, 2004, 2006, 2007, 2008 The NetBSD Foundation, Inc. @@ -69,7 +69,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: kern_clock.c,v 1.148 2022/03/19 14:34:47 riastradh Exp $"); +__KERNEL_RCSID(0, "$NetBSD: kern_clock.c,v 1.148.4.1 2023/08/11 14:35:25 martin Exp $"); #ifdef _KERNEL_OPT #include "opt_dtrace.h" @@ -284,12 +284,14 @@ initclocks(void) rndsource_setcb(&hardclockrnd.source, clockrnd_get, &hardclockrnd); rnd_attach_source(&hardclockrnd.source, "hardclock", RND_TYPE_SKEW, - RND_FLAG_COLLECT_TIME|RND_FLAG_HASCB); + RND_FLAG_COLLECT_TIME|RND_FLAG_ESTIMATE_TIME|RND_FLAG_HASCB); if (stathz) { rndsource_setcb(&statclockrnd.source, clockrnd_get, &statclockrnd); rnd_attach_source(&statclockrnd.source, "statclock", - RND_TYPE_SKEW, RND_FLAG_COLLECT_TIME|RND_FLAG_HASCB); + RND_TYPE_SKEW, + (RND_FLAG_COLLECT_TIME|RND_FLAG_ESTIMATE_TIME| + RND_FLAG_HASCB)); } } Index: src/sys/kern/kern_entropy.c diff -u src/sys/kern/kern_entropy.c:1.57.4.3 src/sys/kern/kern_entropy.c:1.57.4.4 --- src/sys/kern/kern_entropy.c:1.57.4.3 Tue Aug 1 16:10:59 2023 +++ src/sys/kern/kern_entropy.c Fri Aug 11 14:35:25 2023 @@ -1,4 +1,4 @@ -/* $NetBSD: kern_entropy.c,v 1.57.4.3 2023/08/01 16:10:59 martin Exp $ */ +/* $NetBSD: kern_entropy.c,v 1.57.4.4 2023/08/11 14:35:25 martin Exp $ */ /*- * Copyright (c) 2019 The NetBSD Foundation, Inc. @@ -62,20 +62,22 @@ * facilitates an operator explicitly causing everything to * reseed by sysctl -w kern.entropy.consolidate=1. * - * * No entropy estimation based on the sample values, which is a - * contradiction in terms and a potential source of side - * channels. It is the responsibility of the driver author to - * study how predictable the physical source of input can ever - * be, and to furnish a lower bound on the amount of entropy it - * has. - * * * Entropy depletion is available for testing (or if you're into * that sort of thing), with sysctl -w kern.entropy.depletion=1; * the logic to support it is small, to minimize chance of bugs. + * + * * While cold, a single global entropy pool is available for + * entering and extracting, serialized through splhigh/splx. + * The per-CPU entropy pool data structures are initialized in + * entropy_init and entropy_init_late (separated mainly for + * hysterical raisins at this point), but are not used until the + * system is warm, at which point access to the global entropy + * pool is limited to thread and softint context and serialized + * by E->lock. */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: kern_entropy.c,v 1.57.4.3 2023/08/01 16:10:59 martin Exp $"); +__KERNEL_RCSID(0, "$NetBSD: kern_entropy.c,v 1.57.4.4 2023/08/11 14:35:25 martin Exp $"); #include <sys/param.h> #include <sys/types.h> @@ -122,6 +124,10 @@ __KERNEL_RCSID(0, "$NetBSD: kern_entropy #include <machine/cpu_counter.h> #endif +#define MINENTROPYBYTES ENTROPY_CAPACITY +#define MINENTROPYBITS (MINENTROPYBYTES*NBBY) +#define MINSAMPLES (2*MINENTROPYBITS) + /* * struct entropy_cpu * @@ -138,7 +144,8 @@ struct entropy_cpu { struct evcnt intrtrunc; } *ec_evcnt; struct entpool *ec_pool; - unsigned ec_pending; + unsigned ec_bitspending; + unsigned ec_samplespending; bool ec_locked; }; @@ -161,6 +168,7 @@ struct rndsource_cpu { unsigned rc_entropybits; unsigned rc_timesamples; unsigned rc_datasamples; + rnd_delta_t rc_timedelta; }; /* @@ -173,8 +181,10 @@ struct rndsource_cpu { struct { kmutex_t lock; /* covers all global state */ struct entpool pool; /* global pool for extraction */ - unsigned needed; /* (A) needed globally */ - unsigned pending; /* (A) pending in per-CPU pools */ + unsigned bitsneeded; /* (A) needed globally */ + unsigned bitspending; /* pending in per-CPU pools */ + unsigned samplesneeded; /* (A) needed globally */ + unsigned samplespending; /* pending in per-CPU pools */ unsigned timestamp; /* (A) time of last consolidation */ unsigned epoch; /* (A) changes when needed -> 0 */ kcondvar_t cv; /* notifies state changes */ @@ -182,20 +192,15 @@ struct { struct lwp *sourcelock; /* lock on list of sources */ kcondvar_t sourcelock_cv; /* notifies sourcelock release */ LIST_HEAD(,krndsource) sources; /* list of entropy sources */ - enum entropy_stage { - ENTROPY_COLD = 0, /* single-threaded */ - ENTROPY_WARM, /* multi-threaded at boot before CPUs */ - ENTROPY_HOT, /* multi-threaded multi-CPU */ - } stage; bool consolidate; /* kick thread to consolidate */ bool seed_rndsource; /* true if seed source is attached */ bool seeded; /* true if seed file already loaded */ } entropy_global __cacheline_aligned = { /* Fields that must be initialized when the kernel is loaded. */ - .needed = ENTROPY_CAPACITY*NBBY, + .bitsneeded = MINENTROPYBITS, + .samplesneeded = MINSAMPLES, .epoch = (unsigned)-1, /* -1 means entropy never consolidated */ .sources = LIST_HEAD_INITIALIZER(entropy_global.sources), - .stage = ENTROPY_COLD, }; #define E (&entropy_global) /* declutter */ @@ -249,11 +254,11 @@ static struct sysctllog *entropy_sysctl static void entropy_init_cpu(void *, void *, struct cpu_info *); static void entropy_fini_cpu(void *, void *, struct cpu_info *); static void entropy_account_cpu(struct entropy_cpu *); -static void entropy_enter(const void *, size_t, unsigned); -static bool entropy_enter_intr(const void *, size_t, unsigned); +static void entropy_enter(const void *, size_t, unsigned, bool); +static bool entropy_enter_intr(const void *, size_t, unsigned, bool); static void entropy_softintr(void *); static void entropy_thread(void *); -static uint32_t entropy_pending(void); +static bool entropy_pending(void); static void entropy_pending_cpu(void *, void *, struct cpu_info *); static void entropy_do_consolidate(void); static void entropy_consolidate_xc(void *, void *); @@ -263,8 +268,10 @@ static int sysctl_entropy_gather(SYSCTLF static void filt_entropy_read_detach(struct knote *); static int filt_entropy_read_event(struct knote *, long); static int entropy_request(size_t, int); +static void rnd_add_data_internal(struct krndsource *, const void *, + uint32_t, uint32_t, bool); static void rnd_add_data_1(struct krndsource *, const void *, uint32_t, - uint32_t, uint32_t); + uint32_t, bool, uint32_t, bool); static unsigned rndsource_entropybits(struct krndsource *); static void rndsource_entropybits_cpu(void *, void *, struct cpu_info *); static void rndsource_to_user(struct krndsource *, rndsource_t *); @@ -306,12 +313,17 @@ static void attach_seed_rndsource(void) { + KASSERT(!cpu_intr_p()); + KASSERT(!cpu_softintr_p()); + KASSERT(cold); + /* * First called no later than entropy_init, while we are still * single-threaded, so no need for RUN_ONCE. */ - if (E->stage >= ENTROPY_WARM || E->seed_rndsource) + if (E->seed_rndsource) return; + rnd_attach_source(&seed_rndsource, "seed", RND_TYPE_UNKNOWN, RND_FLAG_COLLECT_VALUE); E->seed_rndsource = true; @@ -322,7 +334,8 @@ attach_seed_rndsource(void) * * Initialize the entropy subsystem. Panic on failure. * - * Requires percpu(9) and sysctl(9) to be initialized. + * Requires percpu(9) and sysctl(9) to be initialized. Must run + * while cold. */ static void entropy_init(void) @@ -331,7 +344,7 @@ entropy_init(void) struct krndsource *rs; unsigned i = 0; - KASSERT(E->stage == ENTROPY_COLD); + KASSERT(cold); /* Grab some cycle counts early at boot. */ extra[i++] = entropy_timer(); @@ -368,12 +381,24 @@ entropy_init(void) /* XXX These should maybe not be readable at securelevel>0. */ sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT, - "needed", SYSCTL_DESCR("Systemwide entropy deficit"), - NULL, 0, &E->needed, 0, CTL_CREATE, CTL_EOL); + "needed", + SYSCTL_DESCR("Systemwide entropy deficit (bits of entropy)"), + NULL, 0, &E->bitsneeded, 0, CTL_CREATE, CTL_EOL); sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT, - "pending", SYSCTL_DESCR("Entropy pending on CPUs"), - NULL, 0, &E->pending, 0, CTL_CREATE, CTL_EOL); + "pending", + SYSCTL_DESCR("Number of bits of entropy pending on CPUs"), + NULL, 0, &E->bitspending, 0, CTL_CREATE, CTL_EOL); + sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, + CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT, + "samplesneeded", + SYSCTL_DESCR("Systemwide entropy deficit (samples)"), + NULL, 0, &E->samplesneeded, 0, CTL_CREATE, CTL_EOL); + sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, + CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT, + "samplespending", + SYSCTL_DESCR("Number of samples pending on CPUs"), + NULL, 0, &E->samplespending, 0, CTL_CREATE, CTL_EOL); sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT, "epoch", SYSCTL_DESCR("Entropy epoch"), @@ -403,30 +428,8 @@ entropy_init(void) /* Enter the boot cycle count to get started. */ extra[i++] = entropy_timer(); KASSERT(i == __arraycount(extra)); - entropy_enter(extra, sizeof extra, 0); + entropy_enter(extra, sizeof extra, /*nbits*/0, /*count*/false); explicit_memset(extra, 0, sizeof extra); - - /* We are now ready for multi-threaded operation. */ - E->stage = ENTROPY_WARM; -} - -static void -entropy_init_late_cpu(void *a, void *b) -{ - int bound; - - /* - * We're not necessarily in a softint lwp here (xc_broadcast - * triggers softint on other CPUs, but calls directly on this - * CPU), so explicitly bind to the current CPU to invoke the - * softintr -- this lets us have a simpler assertion in - * entropy_account_cpu. Not necessary to avoid migration - * because xc_broadcast disables kpreemption anyway, but it - * doesn't hurt. - */ - bound = curlwp_bind(); - entropy_softintr(NULL); - curlwp_bindx(bound); } /* @@ -435,22 +438,22 @@ entropy_init_late_cpu(void *a, void *b) * Late initialization. Panic on failure. * * Requires CPUs to have been detected and LWPs to have started. + * Must run while cold. */ static void entropy_init_late(void) { - void *sih; int error; - KASSERT(E->stage == ENTROPY_WARM); + KASSERT(cold); /* * Establish the softint at the highest softint priority level. * Must happen after CPU detection. */ - sih = softint_establish(SOFTINT_SERIAL|SOFTINT_MPSAFE, + entropy_sih = softint_establish(SOFTINT_SERIAL|SOFTINT_MPSAFE, &entropy_softintr, NULL); - if (sih == NULL) + if (entropy_sih == NULL) panic("unable to establish entropy softint"); /* @@ -462,25 +465,6 @@ entropy_init_late(void) if (error) panic("unable to create entropy housekeeping thread: %d", error); - - /* - * Wait until the per-CPU initialization has hit all CPUs - * before proceeding to mark the entropy system hot and - * enabling use of the softint. - */ - xc_barrier(XC_HIGHPRI); - E->stage = ENTROPY_HOT; - atomic_store_relaxed(&entropy_sih, sih); - - /* - * At this point, entering new samples from interrupt handlers - * will trigger the softint to process them. But there may be - * some samples that were entered from interrupt handlers - * before the softint was available. Make sure we process - * those samples on all CPUs by running the softint logic on - * all CPUs. - */ - xc_wait(xc_broadcast(XC_HIGHPRI, entropy_init_late_cpu, NULL, NULL)); } /* @@ -496,7 +480,8 @@ entropy_init_cpu(void *ptr, void *cookie ec->ec_evcnt = kmem_alloc(sizeof(*ec->ec_evcnt), KM_SLEEP); ec->ec_pool = kmem_zalloc(sizeof(*ec->ec_pool), KM_SLEEP); - ec->ec_pending = 0; + ec->ec_bitspending = 0; + ec->ec_samplespending = 0; ec->ec_locked = false; /* XXX ci_cpuname may not be initialized early enough. */ @@ -594,6 +579,10 @@ entropy_seed(rndsave_t *seed) uint8_t digest[SHA1_DIGEST_LENGTH]; bool seeded; + KASSERT(!cpu_intr_p()); + KASSERT(!cpu_softintr_p()); + KASSERT(cold); + /* * Verify the checksum. If the checksum fails, take the data * but ignore the entropy estimate -- the file may have been @@ -627,12 +616,8 @@ entropy_seed(rndsave_t *seed) attach_seed_rndsource(); /* Test and set E->seeded. */ - if (E->stage >= ENTROPY_WARM) - mutex_enter(&E->lock); seeded = E->seeded; E->seeded = (seed->entropy > 0); - if (E->stage >= ENTROPY_WARM) - mutex_exit(&E->lock); /* * If we've been seeded, may be re-entering the same seed @@ -657,23 +642,23 @@ entropy_seed(rndsave_t *seed) * entropy_bootrequest() * * Request entropy from all sources at boot, once config is - * complete and interrupts are running. + * complete and interrupts are running but we are still cold. */ void entropy_bootrequest(void) { int error; - KASSERT(E->stage >= ENTROPY_WARM); + KASSERT(!cpu_intr_p()); + KASSERT(!cpu_softintr_p()); + KASSERT(cold); /* * Request enough to satisfy the maximum entropy shortage. * This is harmless overkill if the bootloader provided a seed. */ - mutex_enter(&E->lock); - error = entropy_request(ENTROPY_CAPACITY, ENTROPY_WAIT); - KASSERT(error == 0); - mutex_exit(&E->lock); + error = entropy_request(MINENTROPYBYTES, ENTROPY_WAIT); + KASSERTMSG(error == 0, "error=%d", error); } /* @@ -721,7 +706,7 @@ bool entropy_ready(void) { - return atomic_load_relaxed(&E->needed) == 0; + return atomic_load_relaxed(&E->bitsneeded) == 0; } /* @@ -746,16 +731,17 @@ entropy_account_cpu(struct entropy_cpu * { struct entropy_cpu_lock lock; struct entropy_cpu *ec0; - unsigned diff; + unsigned bitsdiff, samplesdiff; - KASSERT(E->stage >= ENTROPY_WARM); + KASSERT(!cpu_intr_p()); + KASSERT(!cold); KASSERT(curlwp->l_pflag & LP_BOUND); /* * If there's no entropy needed, and entropy has been * consolidated in the last minute, do nothing. */ - if (__predict_true(atomic_load_relaxed(&E->needed) == 0) && + if (__predict_true(atomic_load_relaxed(&E->bitsneeded) == 0) && __predict_true(!atomic_load_relaxed(&entropy_depletion)) && __predict_true((time_uptime - E->timestamp) <= 60)) return; @@ -767,9 +753,10 @@ entropy_account_cpu(struct entropy_cpu * mutex_enter(&E->lock); ec0 = entropy_cpu_get(&lock); KASSERT(ec0 == ec); - if (ec->ec_pending == 0) { + + if (ec->ec_bitspending == 0 && ec->ec_samplespending == 0) { /* Raced with consolidation xcall. Nothing to do. */ - } else if (E->needed != 0 && E->needed <= ec->ec_pending) { + } else if (E->bitsneeded != 0 && E->bitsneeded <= ec->ec_bitspending) { /* * If we have not yet attained full entropy but we can * now, do so. This way we disseminate entropy @@ -783,38 +770,64 @@ entropy_account_cpu(struct entropy_cpu * /* Transfer from the local pool to the global pool. */ entpool_extract(ec->ec_pool, buf, sizeof buf); entpool_enter(&E->pool, buf, sizeof buf); - atomic_store_relaxed(&ec->ec_pending, 0); - atomic_store_relaxed(&E->needed, 0); + atomic_store_relaxed(&ec->ec_bitspending, 0); + atomic_store_relaxed(&ec->ec_samplespending, 0); + atomic_store_relaxed(&E->bitsneeded, 0); + atomic_store_relaxed(&E->samplesneeded, 0); /* Notify waiters that we now have full entropy. */ entropy_notify(); entropy_immediate_evcnt.ev_count++; } else { /* Determine how much we can add to the global pool. */ - KASSERTMSG(E->pending <= ENTROPY_CAPACITY*NBBY, - "E->pending=%u", E->pending); - diff = MIN(ec->ec_pending, ENTROPY_CAPACITY*NBBY - E->pending); + KASSERTMSG(E->bitspending <= MINENTROPYBITS, + "E->bitspending=%u", E->bitspending); + bitsdiff = MIN(ec->ec_bitspending, + MINENTROPYBITS - E->bitspending); + KASSERTMSG(E->samplespending <= MINSAMPLES, + "E->samplespending=%u", E->samplespending); + samplesdiff = MIN(ec->ec_samplespending, + MINSAMPLES - E->samplespending); /* * This should make a difference unless we are already * saturated. */ - KASSERTMSG(diff || E->pending == ENTROPY_CAPACITY*NBBY, - "diff=%u E->pending=%u ec->ec_pending=%u cap=%u", - diff, E->pending, ec->ec_pending, - (unsigned)ENTROPY_CAPACITY*NBBY); + KASSERTMSG((bitsdiff || samplesdiff || + E->bitspending == MINENTROPYBITS || + E->samplespending == MINSAMPLES), + "bitsdiff=%u E->bitspending=%u ec->ec_bitspending=%u" + "samplesdiff=%u E->samplespending=%u" + " ec->ec_samplespending=%u" + " minentropybits=%u minsamples=%u", + bitsdiff, E->bitspending, ec->ec_bitspending, + samplesdiff, E->samplespending, ec->ec_samplespending, + (unsigned)MINENTROPYBITS, (unsigned)MINSAMPLES); /* Add to the global, subtract from the local. */ - E->pending += diff; - KASSERT(E->pending); - KASSERTMSG(E->pending <= ENTROPY_CAPACITY*NBBY, - "E->pending=%u", E->pending); - atomic_store_relaxed(&ec->ec_pending, ec->ec_pending - diff); + E->bitspending += bitsdiff; + KASSERTMSG(E->bitspending <= MINENTROPYBITS, + "E->bitspending=%u", E->bitspending); + atomic_store_relaxed(&ec->ec_bitspending, + ec->ec_bitspending - bitsdiff); + + E->samplespending += samplesdiff; + KASSERTMSG(E->samplespending <= MINSAMPLES, + "E->samplespending=%u", E->samplespending); + atomic_store_relaxed(&ec->ec_samplespending, + ec->ec_samplespending - samplesdiff); + + /* One or the other must have gone up from zero. */ + KASSERT(E->bitspending || E->samplespending); - if (E->needed <= E->pending) { + if (E->bitsneeded <= E->bitspending || + E->samplesneeded <= E->samplespending) { /* - * Enough entropy between all the per-CPU - * pools. Wake up the housekeeping thread. + * Enough bits or at least samples between all + * the per-CPU pools. Leave a note for the + * housekeeping thread to consolidate entropy + * next time it wakes up -- and wake it up if + * this is the first time, to speed things up. * * If we don't need any entropy, this doesn't * mean much, but it is the only time we ever @@ -824,14 +837,16 @@ entropy_account_cpu(struct entropy_cpu * * negligible performance cost. */ E->consolidate = true; - cv_broadcast(&E->cv); - if (E->needed == 0) + if (E->epoch == (unsigned)-1) + cv_broadcast(&E->cv); + if (E->bitsneeded == 0) entropy_discretionary_evcnt.ev_count++; } else { /* Can't get full entropy. Keep gathering. */ entropy_partial_evcnt.ev_count++; } } + entropy_cpu_put(&lock, ec); mutex_exit(&E->lock); } @@ -848,8 +863,19 @@ static void entropy_enter_early(const void *buf, size_t len, unsigned nbits) { bool notify = false; + int s; + + KASSERT(cold); - KASSERT(E->stage == ENTROPY_COLD); + /* + * We're early at boot before multithreading and multi-CPU + * operation, and we don't have softints yet to defer + * processing from interrupt context, so we have to enter the + * samples directly into the global pool. But interrupts may + * be enabled, and we enter this path from interrupt context, + * so block interrupts until we're done. + */ + s = splhigh(); /* Enter it into the pool. */ entpool_enter(&E->pool, buf, len); @@ -858,20 +884,29 @@ entropy_enter_early(const void *buf, siz * Decide whether to notify reseed -- we will do so if either: * (a) we transition from partial entropy to full entropy, or * (b) we get a batch of full entropy all at once. + * We don't count timing samples because we assume, while cold, + * there's not likely to be much jitter yet. */ - notify |= (E->needed && E->needed <= nbits); - notify |= (nbits >= ENTROPY_CAPACITY*NBBY); + notify |= (E->bitsneeded && E->bitsneeded <= nbits); + notify |= (nbits >= MINENTROPYBITS); - /* Subtract from the needed count and notify if appropriate. */ - E->needed -= MIN(E->needed, nbits); + /* + * Subtract from the needed count and notify if appropriate. + * We don't count samples here because entropy_timer might + * still be returning zero at this point if there's no CPU + * cycle counter. + */ + E->bitsneeded -= MIN(E->bitsneeded, nbits); if (notify) { entropy_notify(); entropy_immediate_evcnt.ev_count++; } + + splx(s); } /* - * entropy_enter(buf, len, nbits) + * entropy_enter(buf, len, nbits, count) * * Enter len bytes of data from buf into the system's entropy * pool, stirring as necessary when the internal buffer fills up. @@ -879,11 +914,11 @@ entropy_enter_early(const void *buf, siz * process that led to this sample. */ static void -entropy_enter(const void *buf, size_t len, unsigned nbits) +entropy_enter(const void *buf, size_t len, unsigned nbits, bool count) { struct entropy_cpu_lock lock; struct entropy_cpu *ec; - unsigned pending; + unsigned bitspending, samplespending; int bound; KASSERTMSG(!cpu_intr_p(), @@ -891,8 +926,11 @@ entropy_enter(const void *buf, size_t le KASSERTMSG(howmany(nbits, NBBY) <= len, "impossible entropy rate: %u bits in %zu-byte string", nbits, len); - /* If it's too early after boot, just use entropy_enter_early. */ - if (__predict_false(E->stage == ENTROPY_COLD)) { + /* + * If we're still cold, just use entropy_enter_early to put + * samples directly into the global pool. + */ + if (__predict_false(cold)) { entropy_enter_early(buf, len, nbits); return; } @@ -908,23 +946,32 @@ entropy_enter(const void *buf, size_t le /* * With the per-CPU state locked, enter into the per-CPU pool * and count up what we can add. + * + * We don't count samples while cold because entropy_timer + * might still be returning zero if there's no CPU cycle + * counter. */ ec = entropy_cpu_get(&lock); entpool_enter(ec->ec_pool, buf, len); - pending = ec->ec_pending; - pending += MIN(ENTROPY_CAPACITY*NBBY - pending, nbits); - atomic_store_relaxed(&ec->ec_pending, pending); + bitspending = ec->ec_bitspending; + bitspending += MIN(MINENTROPYBITS - bitspending, nbits); + atomic_store_relaxed(&ec->ec_bitspending, bitspending); + samplespending = ec->ec_samplespending; + if (__predict_true(count)) { + samplespending += MIN(MINSAMPLES - samplespending, 1); + atomic_store_relaxed(&ec->ec_samplespending, samplespending); + } entropy_cpu_put(&lock, ec); /* Consolidate globally if appropriate based on what we added. */ - if (pending) + if (bitspending > 0 || samplespending >= MINSAMPLES) entropy_account_cpu(ec); curlwp_bindx(bound); } /* - * entropy_enter_intr(buf, len, nbits) + * entropy_enter_intr(buf, len, nbits, count) * * Enter up to len bytes of data from buf into the system's * entropy pool without stirring. nbits is a lower bound on the @@ -935,28 +982,40 @@ entropy_enter(const void *buf, size_t le * instance. Schedule a softint to stir the entropy pool if * needed. Return true if used fully, false if truncated at all. * - * Using this in thread context will work, but you might as well - * use entropy_enter in that case. + * Using this in thread or softint context with no spin locks held + * will work, but you might as well use entropy_enter in that + * case. */ static bool -entropy_enter_intr(const void *buf, size_t len, unsigned nbits) +entropy_enter_intr(const void *buf, size_t len, unsigned nbits, bool count) { struct entropy_cpu *ec; bool fullyused = false; - uint32_t pending; - void *sih; + uint32_t bitspending, samplespending; + int s; - KASSERT(cpu_intr_p()); KASSERTMSG(howmany(nbits, NBBY) <= len, "impossible entropy rate: %u bits in %zu-byte string", nbits, len); - /* If it's too early after boot, just use entropy_enter_early. */ - if (__predict_false(E->stage == ENTROPY_COLD)) { + /* + * If we're still cold, just use entropy_enter_early to put + * samples directly into the global pool. + */ + if (__predict_false(cold)) { entropy_enter_early(buf, len, nbits); return true; } /* + * In case we were called in thread or interrupt context with + * interrupts unblocked, block soft interrupts up to + * IPL_SOFTSERIAL. This way logic that is safe in interrupt + * context or under a spin lock is also safe in less + * restrictive contexts. + */ + s = splsoftserial(); + + /* * Acquire the per-CPU state. If someone is in the middle of * using it, drop the sample. Otherwise, take the lock so that * higher-priority interrupts will drop their samples. @@ -974,33 +1033,42 @@ entropy_enter_intr(const void *buf, size * truncated, schedule a softint to stir the pool and stop. */ if (!entpool_enter_nostir(ec->ec_pool, buf, len)) { - sih = atomic_load_relaxed(&entropy_sih); - if (__predict_true(sih != NULL)) - softint_schedule(sih); + if (__predict_true(!cold)) + softint_schedule(entropy_sih); ec->ec_evcnt->intrtrunc.ev_count++; goto out1; } fullyused = true; - /* Count up what we can contribute. */ - pending = ec->ec_pending; - pending += MIN(ENTROPY_CAPACITY*NBBY - pending, nbits); - atomic_store_relaxed(&ec->ec_pending, pending); + /* + * Count up what we can contribute. + * + * We don't count samples while cold because entropy_timer + * might still be returning zero if there's no CPU cycle + * counter. + */ + bitspending = ec->ec_bitspending; + bitspending += MIN(MINENTROPYBITS - bitspending, nbits); + atomic_store_relaxed(&ec->ec_bitspending, bitspending); + if (__predict_true(count)) { + samplespending = ec->ec_samplespending; + samplespending += MIN(MINSAMPLES - samplespending, 1); + atomic_store_relaxed(&ec->ec_samplespending, samplespending); + } /* Schedule a softint if we added anything and it matters. */ - if (__predict_false((atomic_load_relaxed(&E->needed) != 0) || + if (__predict_false(atomic_load_relaxed(&E->bitsneeded) || atomic_load_relaxed(&entropy_depletion)) && - nbits != 0) { - sih = atomic_load_relaxed(&entropy_sih); - if (__predict_true(sih != NULL)) - softint_schedule(sih); - } + (nbits != 0 || count) && + __predict_true(!cold)) + softint_schedule(entropy_sih); out1: /* Release the per-CPU state. */ KASSERT(ec->ec_locked); __insn_barrier(); ec->ec_locked = false; out0: percpu_putref(entropy_percpu); + splx(s); return fullyused; } @@ -1018,7 +1086,7 @@ entropy_softintr(void *cookie) { struct entropy_cpu_lock lock; struct entropy_cpu *ec; - unsigned pending; + unsigned bitspending, samplespending; /* * With the per-CPU state locked, stir the pool if necessary @@ -1028,11 +1096,12 @@ entropy_softintr(void *cookie) ec = entropy_cpu_get(&lock); ec->ec_evcnt->softint.ev_count++; entpool_stir(ec->ec_pool); - pending = ec->ec_pending; + bitspending = ec->ec_bitspending; + samplespending = ec->ec_samplespending; entropy_cpu_put(&lock, ec); /* Consolidate globally if appropriate based on what we added. */ - if (pending) + if (bitspending > 0 || samplespending >= MINSAMPLES) entropy_account_cpu(ec); } @@ -1046,13 +1115,17 @@ entropy_thread(void *cookie) { bool consolidate; +#ifndef _RUMPKERNEL /* XXX rump starts threads before cold */ + KASSERT(!cold); +#endif + for (;;) { /* * Wait until there's full entropy somewhere among the * CPUs, as confirmed at most once per minute, or * someone wants to consolidate. */ - if (entropy_pending() >= ENTROPY_CAPACITY*NBBY) { + if (entropy_pending()) { consolidate = true; } else { mutex_enter(&E->lock); @@ -1073,29 +1146,41 @@ entropy_thread(void *cookie) } } +struct entropy_pending_count { + uint32_t bitspending; + uint32_t samplespending; +}; + /* * entropy_pending() * - * Count up the amount of entropy pending on other CPUs. + * True if enough bits or samples are pending on other CPUs to + * warrant consolidation. */ -static uint32_t +static bool entropy_pending(void) { - uint32_t pending = 0; + struct entropy_pending_count count = { 0, 0 }, *C = &count; - percpu_foreach(entropy_percpu, &entropy_pending_cpu, &pending); - return pending; + percpu_foreach(entropy_percpu, &entropy_pending_cpu, C); + return C->bitspending >= MINENTROPYBITS || + C->samplespending >= MINSAMPLES; } static void entropy_pending_cpu(void *ptr, void *cookie, struct cpu_info *ci) { struct entropy_cpu *ec = ptr; - uint32_t *pendingp = cookie; - uint32_t cpu_pending; - - cpu_pending = atomic_load_relaxed(&ec->ec_pending); - *pendingp += MIN(ENTROPY_CAPACITY*NBBY - *pendingp, cpu_pending); + struct entropy_pending_count *C = cookie; + uint32_t cpu_bitspending; + uint32_t cpu_samplespending; + + cpu_bitspending = atomic_load_relaxed(&ec->ec_bitspending); + cpu_samplespending = atomic_load_relaxed(&ec->ec_samplespending); + C->bitspending += MIN(MINENTROPYBITS - C->bitspending, + cpu_bitspending); + C->samplespending += MIN(MINSAMPLES - C->samplespending, + cpu_samplespending); } /* @@ -1111,9 +1196,12 @@ entropy_do_consolidate(void) static struct timeval lasttime; /* serialized by E->lock */ struct entpool pool; uint8_t buf[ENTPOOL_CAPACITY]; - unsigned diff; + unsigned bitsdiff, samplesdiff; uint64_t ticket; + KASSERT(!cold); + ASSERT_SLEEPABLE(); + /* Gather entropy on all CPUs into a temporary pool. */ memset(&pool, 0, sizeof pool); ticket = xc_broadcast(0, &entropy_consolidate_xc, &pool, NULL); @@ -1134,10 +1222,10 @@ entropy_do_consolidate(void) explicit_memset(&pool, 0, sizeof pool); /* Count the entropy that was gathered. */ - diff = MIN(E->needed, E->pending); - atomic_store_relaxed(&E->needed, E->needed - diff); - E->pending -= diff; - if (__predict_false(E->needed > 0)) { + bitsdiff = MIN(E->bitsneeded, E->bitspending); + atomic_store_relaxed(&E->bitsneeded, E->bitsneeded - bitsdiff); + E->bitspending -= bitsdiff; + if (__predict_false(E->bitsneeded > 0) && bitsdiff != 0) { if ((boothowto & AB_DEBUG) != 0 && ratecheck(&lasttime, &interval)) { printf("WARNING:" @@ -1145,6 +1233,11 @@ entropy_do_consolidate(void) } } + samplesdiff = MIN(E->samplesneeded, E->samplespending); + atomic_store_relaxed(&E->samplesneeded, + E->samplesneeded - samplesdiff); + E->samplespending -= samplesdiff; + /* Advance the epoch and notify waiters. */ entropy_notify(); @@ -1179,7 +1272,8 @@ entropy_consolidate_xc(void *vpool, void ec = entropy_cpu_get(&lock); extra[i++] = entropy_timer(); entpool_extract(ec->ec_pool, buf, sizeof buf); - atomic_store_relaxed(&ec->ec_pending, 0); + atomic_store_relaxed(&ec->ec_bitspending, 0); + atomic_store_relaxed(&ec->ec_samplespending, 0); extra[i++] = entropy_timer(); entropy_cpu_put(&lock, ec); extra[i++] = entropy_timer(); @@ -1205,27 +1299,37 @@ entropy_consolidate_xc(void *vpool, void * Caller just contributed entropy to the global pool. Advance * the entropy epoch and notify waiters. * - * Caller must hold the global entropy lock. Except for the - * `sysctl -w kern.entropy.consolidate=1` trigger, the caller must - * have just have transitioned from partial entropy to full - * entropy -- E->needed should be zero now. + * Caller must hold the global entropy lock. */ static void entropy_notify(void) { static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0}; static struct timeval lasttime; /* serialized by E->lock */ + static bool ready = false, besteffort = false; unsigned epoch; - KASSERT(E->stage == ENTROPY_COLD || mutex_owned(&E->lock)); + KASSERT(__predict_false(cold) || mutex_owned(&E->lock)); /* * If this is the first time, print a message to the console * that we're ready so operators can compare it to the timing * of other events. - */ - if (__predict_false(E->epoch == (unsigned)-1) && E->needed == 0) - printf("entropy: ready\n"); + * + * If we didn't get full entropy from reliable sources, report + * instead that we are running on fumes with best effort. (If + * we ever do get full entropy after that, print the ready + * message once.) + */ + if (__predict_false(!ready)) { + if (E->bitsneeded == 0) { + printf("entropy: ready\n"); + ready = true; + } else if (E->samplesneeded == 0 && !besteffort) { + printf("entropy: best effort\n"); + besteffort = true; + } + } /* Set the epoch; roll over from UINTMAX-1 to 1. */ if (__predict_true(!atomic_load_relaxed(&entropy_depletion)) || @@ -1238,7 +1342,7 @@ entropy_notify(void) KASSERT(E->epoch != (unsigned)-1); /* Notify waiters. */ - if (E->stage >= ENTROPY_WARM) { + if (__predict_true(!cold)) { cv_broadcast(&E->cv); selnotify(&E->selq, POLLIN|POLLRDNORM, NOTE_SUBMIT); } @@ -1264,7 +1368,8 @@ entropy_consolidate(void) uint64_t ticket; int error; - KASSERT(E->stage == ENTROPY_HOT); + KASSERT(!cold); + ASSERT_SLEEPABLE(); mutex_enter(&E->lock); ticket = entropy_consolidate_evcnt.ev_count; @@ -1294,8 +1399,6 @@ sysctl_entropy_consolidate(SYSCTLFN_ARGS int arg = 0; int error; - KASSERT(E->stage == ENTROPY_HOT); - node.sysctl_data = &arg; error = sysctl_lookup(SYSCTLFN_CALL(&node)); if (error || newp == NULL) @@ -1320,8 +1423,6 @@ sysctl_entropy_gather(SYSCTLFN_ARGS) int arg = 0; int error; - KASSERT(E->stage == ENTROPY_HOT); - node.sysctl_data = &arg; error = sysctl_lookup(SYSCTLFN_CALL(&node)); if (error || newp == NULL) @@ -1348,6 +1449,9 @@ sysctl_entropy_gather(SYSCTLFN_ARGS) * provide backtracking resistance -- it must be combined with a * PRNG/DRBG that does. * + * This may be used very early at boot, before even entropy_init + * has been called. + * * You generally shouldn't use this directly -- use cprng(9) * instead. * @@ -1364,39 +1468,56 @@ sysctl_entropy_gather(SYSCTLFN_ARGS) * EINTR/ERESTART No entropy, ENTROPY_SIG set, and interrupted. * * If ENTROPY_WAIT is set, allowed only in thread context. If - * ENTROPY_WAIT is not set, allowed also in softint context. - * Forbidden in hard interrupt context. + * ENTROPY_WAIT is not set, allowed also in softint context -- may + * sleep on an adaptive lock up to IPL_SOFTSERIAL. Forbidden in + * hard interrupt context. */ int entropy_extract(void *buf, size_t len, int flags) { static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0}; static struct timeval lasttime; /* serialized by E->lock */ - int error; + bool printed = false; + int s = -1/*XXXGCC*/, error; if (ISSET(flags, ENTROPY_WAIT)) { ASSERT_SLEEPABLE(); - KASSERTMSG(E->stage >= ENTROPY_WARM, - "can't wait for entropy until warm"); + KASSERT(!cold); } /* Refuse to operate in interrupt context. */ KASSERT(!cpu_intr_p()); - /* Acquire the global lock to get at the global pool. */ - if (E->stage >= ENTROPY_WARM) + /* + * If we're cold, we are only contending with interrupts on the + * current CPU, so block them. Otherwise, we are _not_ + * contending with interrupts on the current CPU, but we are + * contending with other threads, to exclude them with a mutex. + */ + if (__predict_false(cold)) + s = splhigh(); + else mutex_enter(&E->lock); /* Wait until there is enough entropy in the system. */ error = 0; - while (E->needed) { + if (E->bitsneeded > 0 && E->samplesneeded == 0) { + /* + * We don't have full entropy from reliable sources, + * but we gathered a plausible number of samples from + * other sources such as timers. Try asking for more + * from any sources we can, but don't worry if it + * fails -- best effort. + */ + (void)entropy_request(ENTROPY_CAPACITY, flags); + } else while (E->bitsneeded > 0 && E->samplesneeded > 0) { /* Ask for more, synchronously if possible. */ error = entropy_request(len, flags); if (error) break; /* If we got enough, we're done. */ - if (E->needed == 0) { + if (E->bitsneeded == 0 || E->samplesneeded == 0) { KASSERT(error == 0); break; } @@ -1408,16 +1529,19 @@ entropy_extract(void *buf, size_t len, i } /* Wait for some entropy to come in and try again. */ - KASSERT(E->stage >= ENTROPY_WARM); - printf("entropy: pid %d (%s) blocking due to lack of entropy\n", - curproc->p_pid, curproc->p_comm); + KASSERT(!cold); + if (!printed) { + printf("entropy: pid %d (%s) waiting for entropy(7)\n", + curproc->p_pid, curproc->p_comm); + printed = true; + } if (ISSET(flags, ENTROPY_SIG)) { - error = cv_wait_sig(&E->cv, &E->lock); - if (error) + error = cv_timedwait_sig(&E->cv, &E->lock, hz); + if (error && error != EWOULDBLOCK) break; } else { - cv_wait(&E->cv, &E->lock); + cv_timedwait(&E->cv, &E->lock, hz); } } @@ -1432,7 +1556,7 @@ entropy_extract(void *buf, size_t len, i } /* - * Report a warning if we have never yet reached full entropy. + * Report a warning if we haven't yet reached full entropy. * This is the only case where we consider entropy to be * `depleted' without kern.entropy.depletion enabled -- when we * only have partial entropy, an adversary may be able to @@ -1440,13 +1564,22 @@ entropy_extract(void *buf, size_t len, i * possibilities; the output then enables them to confirm a * guess, reducing its entropy from the adversary's perspective * to zero. + * + * This should only happen if the operator has chosen to + * consolidate, either through sysctl kern.entropy.consolidate + * or by writing less than full entropy to /dev/random as root + * (which /dev/random promises will immediately affect + * subsequent output, for better or worse). */ - if (__predict_false(E->epoch == (unsigned)-1)) { - if (ratecheck(&lasttime, &interval)) + if (E->bitsneeded > 0 && E->samplesneeded > 0) { + if (__predict_false(E->epoch == (unsigned)-1) && + ratecheck(&lasttime, &interval)) { printf("WARNING:" " system needs entropy for security;" " see entropy(7)\n"); - atomic_store_relaxed(&E->needed, ENTROPY_CAPACITY*NBBY); + } + atomic_store_relaxed(&E->bitsneeded, MINENTROPYBITS); + atomic_store_relaxed(&E->samplesneeded, MINSAMPLES); } /* Extract data from the pool, and `deplete' if we're doing that. */ @@ -1454,14 +1587,21 @@ entropy_extract(void *buf, size_t len, i if (__predict_false(atomic_load_relaxed(&entropy_depletion)) && error == 0) { unsigned cost = MIN(len, ENTROPY_CAPACITY)*NBBY; + unsigned bitsneeded = E->bitsneeded; + unsigned samplesneeded = E->samplesneeded; + + bitsneeded += MIN(MINENTROPYBITS - bitsneeded, cost); + samplesneeded += MIN(MINSAMPLES - samplesneeded, cost); - atomic_store_relaxed(&E->needed, - E->needed + MIN(ENTROPY_CAPACITY*NBBY - E->needed, cost)); + atomic_store_relaxed(&E->bitsneeded, bitsneeded); + atomic_store_relaxed(&E->samplesneeded, samplesneeded); entropy_deplete_evcnt.ev_count++; } out: /* Release the global lock and return the error. */ - if (E->stage >= ENTROPY_WARM) + if (__predict_false(cold)) + splx(s); + else mutex_exit(&E->lock); return error; } @@ -1477,7 +1617,7 @@ entropy_poll(int events) { int revents = 0; - KASSERT(E->stage >= ENTROPY_WARM); + KASSERT(!cold); /* Always ready for writing. */ revents |= events & (POLLOUT|POLLWRNORM); @@ -1491,7 +1631,8 @@ entropy_poll(int events) * If we have reached full entropy and we're not depleting * entropy, we are forever ready. */ - if (__predict_true(atomic_load_relaxed(&E->needed) == 0) && + if (__predict_true(atomic_load_relaxed(&E->bitsneeded) == 0 || + atomic_load_relaxed(&E->samplesneeded) == 0) && __predict_true(!atomic_load_relaxed(&entropy_depletion))) return revents | events; @@ -1500,7 +1641,7 @@ entropy_poll(int events) * we don't, we're ready; if we do, add ourselves to the queue. */ mutex_enter(&E->lock); - if (E->needed == 0) + if (E->bitsneeded == 0 || E->samplesneeded == 0) revents |= events; else selrecord(curlwp, &E->selq); @@ -1519,7 +1660,7 @@ static void filt_entropy_read_detach(struct knote *kn) { - KASSERT(E->stage >= ENTROPY_WARM); + KASSERT(!cold); mutex_enter(&E->lock); selremove_knote(&E->selq, kn); @@ -1538,7 +1679,7 @@ filt_entropy_read_event(struct knote *kn { int ret; - KASSERT(E->stage >= ENTROPY_WARM); + KASSERT(!cold); /* Acquire the lock, if caller is outside entropy subsystem. */ if (hint == NOTE_SUBMIT) @@ -1550,7 +1691,7 @@ filt_entropy_read_event(struct knote *kn * If we still need entropy, can't read anything; if not, can * read arbitrarily much. */ - if (E->needed != 0) { + if (E->bitsneeded != 0 && E->samplesneeded != 0) { ret = 0; } else { if (atomic_load_relaxed(&entropy_depletion)) @@ -1587,7 +1728,7 @@ int entropy_kqfilter(struct knote *kn) { - KASSERT(E->stage >= ENTROPY_WARM); + KASSERT(!cold); switch (kn->kn_filter) { case EVFILT_READ: @@ -1663,15 +1804,15 @@ rnd_attach_source(struct krndsource *rs, rs->total = 0; rs->type = type; rs->flags = flags; - if (E->stage >= ENTROPY_WARM) + if (entropy_percpu != NULL) rs->state = percpu_alloc(sizeof(struct rndsource_cpu)); extra[i++] = entropy_timer(); /* Wire it into the global list of random sources. */ - if (E->stage >= ENTROPY_WARM) + if (__predict_true(!cold)) mutex_enter(&E->lock); LIST_INSERT_HEAD(&E->sources, rs, list); - if (E->stage >= ENTROPY_WARM) + if (__predict_true(!cold)) mutex_exit(&E->lock); extra[i++] = entropy_timer(); @@ -1682,7 +1823,7 @@ rnd_attach_source(struct krndsource *rs, /* Mix the extra into the pool. */ KASSERT(i == __arraycount(extra)); - entropy_enter(extra, sizeof extra, 0); + entropy_enter(extra, sizeof extra, 0, /*count*/__predict_true(!cold)); explicit_memset(extra, 0, sizeof extra); } @@ -1700,7 +1841,7 @@ rnd_detach_source(struct krndsource *rs) * If we're cold (shouldn't happen, but hey), just remove it * from the list -- there's nothing allocated. */ - if (E->stage == ENTROPY_COLD) { + if (__predict_false(cold) && entropy_percpu == NULL) { LIST_REMOVE(rs, list); return; } @@ -1727,6 +1868,8 @@ rnd_detach_source(struct krndsource *rs) * rnd_unlock_sources even while the caller releases the global * entropy lock. * + * May be called very early at boot, before entropy_init. + * * If flags & ENTROPY_WAIT, wait for concurrent access to finish. * If flags & ENTROPY_SIG, allow interruption by signal. */ @@ -1735,10 +1878,11 @@ rnd_lock_sources(int flags) { int error; - KASSERT(E->stage == ENTROPY_COLD || mutex_owned(&E->lock)); + KASSERT(__predict_false(cold) || mutex_owned(&E->lock)); + KASSERT(!cpu_intr_p()); while (E->sourcelock) { - KASSERT(E->stage >= ENTROPY_WARM); + KASSERT(!cold); if (!ISSET(flags, ENTROPY_WAIT)) return EWOULDBLOCK; if (ISSET(flags, ENTROPY_SIG)) { @@ -1759,17 +1903,20 @@ rnd_lock_sources(int flags) * * Unlock the list of sources after rnd_lock_sources. Caller must * hold the global entropy lock. + * + * May be called very early at boot, before entropy_init. */ static void rnd_unlock_sources(void) { - KASSERT(E->stage == ENTROPY_COLD || mutex_owned(&E->lock)); + KASSERT(__predict_false(cold) || mutex_owned(&E->lock)); + KASSERT(!cpu_intr_p()); KASSERTMSG(E->sourcelock == curlwp, "lwp %p releasing lock held by %p", curlwp, E->sourcelock); E->sourcelock = NULL; - if (E->stage >= ENTROPY_WARM) + if (__predict_true(!cold)) cv_signal(&E->sourcelock_cv); } @@ -1778,6 +1925,8 @@ rnd_unlock_sources(void) * * True if we hold the list of rndsources locked, for diagnostic * assertions. + * + * May be called very early at boot, before entropy_init. */ static bool __diagused rnd_sources_locked(void) @@ -1793,6 +1942,8 @@ rnd_sources_locked(void) * OK if we overdo it. Caller must hold the global entropy lock; * will release and re-acquire it. * + * May be called very early at boot, before entropy_init. + * * If flags & ENTROPY_WAIT, wait for concurrent access to finish. * If flags & ENTROPY_SIG, allow interruption by signal. */ @@ -1802,8 +1953,9 @@ entropy_request(size_t nbytes, int flags struct krndsource *rs; int error; - KASSERT(E->stage == ENTROPY_COLD || mutex_owned(&E->lock)); - if (flags & ENTROPY_WAIT) + KASSERT(__predict_false(cold) || mutex_owned(&E->lock)); + KASSERT(!cpu_intr_p()); + if ((flags & ENTROPY_WAIT) != 0 && __predict_false(!cold)) ASSERT_SLEEPABLE(); /* @@ -1833,10 +1985,10 @@ entropy_request(size_t nbytes, int flags continue; /* Drop the lock while we call the callback. */ - if (E->stage >= ENTROPY_WARM) + if (__predict_true(!cold)) mutex_exit(&E->lock); (*rs->get)(nbytes, rs->getarg); - if (E->stage >= ENTROPY_WARM) + if (__predict_true(!cold)) mutex_enter(&E->lock); } @@ -1845,35 +1997,98 @@ entropy_request(size_t nbytes, int flags return 0; } +static inline uint32_t +rnd_delta_estimate(rnd_delta_t *d, uint32_t v, int32_t delta) +{ + int32_t delta2, delta3; + + /* + * Calculate the second and third order differentials + */ + delta2 = d->dx - delta; + if (delta2 < 0) + delta2 = -delta2; /* XXX arithmetic overflow */ + + delta3 = d->d2x - delta2; + if (delta3 < 0) + delta3 = -delta3; /* XXX arithmetic overflow */ + + d->x = v; + d->dx = delta; + d->d2x = delta2; + + /* + * If any delta is 0, we got no entropy. If all are non-zero, we + * might have something. + */ + if (delta == 0 || delta2 == 0 || delta3 == 0) + return 0; + + return 1; +} + +static inline uint32_t +rnd_dt_estimate(struct krndsource *rs, uint32_t t) +{ + int32_t delta; + uint32_t ret; + rnd_delta_t *d; + struct rndsource_cpu *rc; + + rc = percpu_getref(rs->state); + d = &rc->rc_timedelta; + + if (t < d->x) { + delta = UINT32_MAX - d->x + t; + } else { + delta = d->x - t; + } + + if (delta < 0) { + delta = -delta; /* XXX arithmetic overflow */ + } + + ret = rnd_delta_estimate(d, t, delta); + + KASSERT(d->x == t); + KASSERT(d->dx == delta); + percpu_putref(rs->state); + return ret; +} + /* * rnd_add_uint32(rs, value) * * Enter 32 bits of data from an entropy source into the pool. * - * If rs is NULL, may not be called from interrupt context. + * May be called from any context or with spin locks held, but may + * drop data. * - * If rs is non-NULL, may be called from any context. May drop - * data if called from interrupt context. + * This is meant for cheaply taking samples from devices that + * aren't designed to be hardware random number generators. */ void rnd_add_uint32(struct krndsource *rs, uint32_t value) { + bool intr_p = true; - rnd_add_data(rs, &value, sizeof value, 0); + rnd_add_data_internal(rs, &value, sizeof value, 0, intr_p); } void _rnd_add_uint32(struct krndsource *rs, uint32_t value) { + bool intr_p = true; - rnd_add_data(rs, &value, sizeof value, 0); + rnd_add_data_internal(rs, &value, sizeof value, 0, intr_p); } void _rnd_add_uint64(struct krndsource *rs, uint64_t value) { + bool intr_p = true; - rnd_add_data(rs, &value, sizeof value, 0); + rnd_add_data_internal(rs, &value, sizeof value, 0, intr_p); } /* @@ -1884,32 +2099,87 @@ _rnd_add_uint64(struct krndsource *rs, u * the data has. If RND_FLAG_NO_ESTIMATE, we ignore the driver's * estimate and treat it as zero. * - * If rs is NULL, may not be called from interrupt context. + * rs MAY but SHOULD NOT be NULL. If rs is NULL, MUST NOT be + * called from interrupt context or with spin locks held. * - * If rs is non-NULL, may be called from any context. May drop - * data if called from interrupt context. + * If rs is non-NULL, MAY but SHOULD NOT be called from interrupt + * context, in which case act like rnd_add_data_intr -- if the + * sample buffer is full, schedule a softint and drop any + * additional data on the floor. (This may change later once we + * fix drivers that still call this from interrupt context to use + * rnd_add_data_intr instead.) MUST NOT be called with spin locks + * held if not in hard interrupt context -- i.e., MUST NOT be + * called in thread context or softint context with spin locks + * held. */ void rnd_add_data(struct krndsource *rs, const void *buf, uint32_t len, uint32_t entropybits) { - uint32_t extra; - uint32_t flags; + bool intr_p = cpu_intr_p(); /* XXX make this unconditionally false */ - KASSERTMSG(howmany(entropybits, NBBY) <= len, - "%s: impossible entropy rate:" - " %"PRIu32" bits in %"PRIu32"-byte string", - rs ? rs->name : "(anonymous)", entropybits, len); - - /* If there's no rndsource, just enter the data and time now. */ + /* + * Weird legacy exception that we should rip out and replace by + * creating new rndsources to attribute entropy to the callers: + * If there's no rndsource, just enter the data and time now. + */ if (rs == NULL) { - entropy_enter(buf, len, entropybits); + uint32_t extra; + + KASSERT(!intr_p); + KASSERTMSG(howmany(entropybits, NBBY) <= len, + "%s: impossible entropy rate:" + " %"PRIu32" bits in %"PRIu32"-byte string", + rs ? rs->name : "(anonymous)", entropybits, len); + entropy_enter(buf, len, entropybits, /*count*/false); extra = entropy_timer(); - entropy_enter(&extra, sizeof extra, 0); + entropy_enter(&extra, sizeof extra, 0, /*count*/false); explicit_memset(&extra, 0, sizeof extra); return; } + rnd_add_data_internal(rs, buf, len, entropybits, intr_p); +} + +/* + * rnd_add_data_intr(rs, buf, len, entropybits) + * + * Try to enter data from an entropy source into the pool, with a + * driver's estimate of how much entropy the physical source of + * the data has. If RND_FLAG_NO_ESTIMATE, we ignore the driver's + * estimate and treat it as zero. If the sample buffer is full, + * schedule a softint and drop any additional data on the floor. + */ +void +rnd_add_data_intr(struct krndsource *rs, const void *buf, uint32_t len, + uint32_t entropybits) +{ + bool intr_p = true; + + rnd_add_data_internal(rs, buf, len, entropybits, intr_p); +} + +/* + * rnd_add_data_internal(rs, buf, len, entropybits, intr_p) + * + * Internal subroutine to decide whether or not to enter data or + * timing for a particular rndsource, and if so, to enter it. + * + * intr_p is true for callers from interrupt context or spin locks + * held, and false for callers from thread or soft interrupt + * context and no spin locks held. + */ +static void +rnd_add_data_internal(struct krndsource *rs, const void *buf, uint32_t len, + uint32_t entropybits, bool intr_p) +{ + uint32_t flags; + + KASSERTMSG(howmany(entropybits, NBBY) <= len, + "%s: impossible entropy rate:" + " %"PRIu32" bits in %"PRIu32"-byte string", + rs ? rs->name : "(anonymous)", entropybits, len); + /* * Hold up the reset xcall before it zeroes the entropy counts * on this CPU or globally. Otherwise, we might leave some @@ -1937,15 +2207,28 @@ rnd_add_data(struct krndsource *rs, cons entropybits = 0; /* If we are collecting data, enter them. */ - if (ISSET(flags, RND_FLAG_COLLECT_VALUE)) - rnd_add_data_1(rs, buf, len, entropybits, - RND_FLAG_COLLECT_VALUE); + if (ISSET(flags, RND_FLAG_COLLECT_VALUE)) { + rnd_add_data_1(rs, buf, len, entropybits, /*count*/false, + RND_FLAG_COLLECT_VALUE, intr_p); + } /* If we are collecting timings, enter one. */ if (ISSET(flags, RND_FLAG_COLLECT_TIME)) { + uint32_t extra; + bool count; + + /* Sample a timer. */ extra = entropy_timer(); - rnd_add_data_1(rs, &extra, sizeof extra, 0, - RND_FLAG_COLLECT_TIME); + + /* If asked, do entropy estimation on the time. */ + if ((flags & (RND_FLAG_ESTIMATE_TIME|RND_FLAG_NO_ESTIMATE)) == + RND_FLAG_ESTIMATE_TIME && __predict_true(!cold)) + count = rnd_dt_estimate(rs, extra); + else + count = false; + + rnd_add_data_1(rs, &extra, sizeof extra, 0, count, + RND_FLAG_COLLECT_TIME, intr_p); } out: /* Allow concurrent changes to flags to finish. */ @@ -1961,7 +2244,7 @@ add_sat(unsigned a, unsigned b) } /* - * rnd_add_data_1(rs, buf, len, entropybits, flag) + * rnd_add_data_1(rs, buf, len, entropybits, count, flag) * * Internal subroutine to call either entropy_enter_intr, if we're * in interrupt context, or entropy_enter if not, and to count the @@ -1969,19 +2252,19 @@ add_sat(unsigned a, unsigned b) */ static void rnd_add_data_1(struct krndsource *rs, const void *buf, uint32_t len, - uint32_t entropybits, uint32_t flag) + uint32_t entropybits, bool count, uint32_t flag, bool intr_p) { bool fullyused; /* - * If we're in interrupt context, use entropy_enter_intr and - * take note of whether it consumed the full sample; if not, - * use entropy_enter, which always consumes the full sample. + * For the interrupt-like path, use entropy_enter_intr and take + * note of whether it consumed the full sample; otherwise, use + * entropy_enter, which always consumes the full sample. */ - if (curlwp && cpu_intr_p()) { - fullyused = entropy_enter_intr(buf, len, entropybits); + if (intr_p) { + fullyused = entropy_enter_intr(buf, len, entropybits, count); } else { - entropy_enter(buf, len, entropybits); + entropy_enter(buf, len, entropybits, count); fullyused = true; } @@ -1990,7 +2273,8 @@ rnd_add_data_1(struct krndsource *rs, co * contributed from this source. */ if (fullyused) { - if (__predict_false(E->stage == ENTROPY_COLD)) { + if (__predict_false(cold)) { + const int s = splhigh(); rs->total = add_sat(rs->total, entropybits); switch (flag) { case RND_FLAG_COLLECT_TIME: @@ -2002,6 +2286,7 @@ rnd_add_data_1(struct krndsource *rs, co add_sat(rs->value_delta.insamples, 1); break; } + splx(s); } else { struct rndsource_cpu *rc = percpu_getref(rs->state); @@ -2048,7 +2333,7 @@ rndsource_entropybits(struct krndsource { unsigned nbits = rs->total; - KASSERT(E->stage >= ENTROPY_WARM); + KASSERT(!cold); KASSERT(rnd_sources_locked()); percpu_foreach(rs->state, rndsource_entropybits_cpu, &nbits); return nbits; @@ -2074,7 +2359,7 @@ static void rndsource_to_user(struct krndsource *rs, rndsource_t *urs) { - KASSERT(E->stage >= ENTROPY_WARM); + KASSERT(!cold); KASSERT(rnd_sources_locked()); /* Avoid kernel memory disclosure. */ @@ -2097,7 +2382,7 @@ static void rndsource_to_user_est(struct krndsource *rs, rndsource_est_t *urse) { - KASSERT(E->stage >= ENTROPY_WARM); + KASSERT(!cold); KASSERT(rnd_sources_locked()); /* Avoid kernel memory disclosure. */ @@ -2143,7 +2428,8 @@ entropy_reset_xc(void *arg1 __unused, vo * enter a cycle count for fun. */ ec = entropy_cpu_get(&lock); - ec->ec_pending = 0; + ec->ec_bitspending = 0; + ec->ec_samplespending = 0; entpool_enter(ec->ec_pool, &extra, sizeof extra); entropy_cpu_put(&lock, ec); } @@ -2160,7 +2446,7 @@ entropy_ioctl(unsigned long cmd, void *d bool privileged; int error; - KASSERT(E->stage >= ENTROPY_WARM); + KASSERT(!cold); /* Verify user's authorization to perform the ioctl. */ switch (cmd) { @@ -2220,7 +2506,7 @@ entropy_ioctl(unsigned long cmd, void *d uint32_t *countp = data; mutex_enter(&E->lock); - *countp = ENTROPY_CAPACITY*NBBY - E->needed; + *countp = MINENTROPYBITS - E->bitsneeded; mutex_exit(&E->lock); break; @@ -2232,15 +2518,22 @@ entropy_ioctl(unsigned long cmd, void *d /* parameters */ pstat->poolsize = ENTPOOL_SIZE/sizeof(uint32_t); /* words */ - pstat->threshold = ENTROPY_CAPACITY*1; /* bytes */ + pstat->threshold = MINENTROPYBITS/NBBY; /* bytes */ pstat->maxentropy = ENTROPY_CAPACITY*NBBY; /* bits */ /* state */ pstat->added = 0; /* XXX total entropy_enter count */ - pstat->curentropy = ENTROPY_CAPACITY*NBBY - E->needed; + pstat->curentropy = MINENTROPYBITS - E->bitsneeded; /* bits */ pstat->removed = 0; /* XXX total entropy_extract count */ pstat->discarded = 0; /* XXX bits of entropy beyond capacity */ - pstat->generated = 0; /* XXX bits of data...fabricated? */ + + /* + * This used to be bits of data fabricated in some + * sense; we'll take it to mean number of samples, + * excluding the bits of entropy from HWRNG or seed. + */ + pstat->generated = MINSAMPLES - E->samplesneeded; + pstat->generated -= MIN(pstat->generated, pstat->curentropy); mutex_exit(&E->lock); break; @@ -2422,9 +2715,10 @@ entropy_ioctl(unsigned long cmd, void *d if (reset) { xc_broadcast(0, &entropy_reset_xc, NULL, NULL); mutex_enter(&E->lock); - E->pending = 0; - atomic_store_relaxed(&E->needed, - ENTROPY_CAPACITY*NBBY); + E->bitspending = 0; + E->samplespending = 0; + atomic_store_relaxed(&E->bitsneeded, MINENTROPYBITS); + atomic_store_relaxed(&E->samplesneeded, MINSAMPLES); E->consolidate = false; mutex_exit(&E->lock); } Index: src/sys/kern/subr_cprng.c diff -u src/sys/kern/subr_cprng.c:1.43 src/sys/kern/subr_cprng.c:1.43.4.1 --- src/sys/kern/subr_cprng.c:1.43 Fri May 13 09:40:25 2022 +++ src/sys/kern/subr_cprng.c Fri Aug 11 14:35:25 2023 @@ -1,4 +1,4 @@ -/* $NetBSD: subr_cprng.c,v 1.43 2022/05/13 09:40:25 riastradh Exp $ */ +/* $NetBSD: subr_cprng.c,v 1.43.4.1 2023/08/11 14:35:25 martin Exp $ */ /*- * Copyright (c) 2019 The NetBSD Foundation, Inc. @@ -52,7 +52,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: subr_cprng.c,v 1.43 2022/05/13 09:40:25 riastradh Exp $"); +__KERNEL_RCSID(0, "$NetBSD: subr_cprng.c,v 1.43.4.1 2023/08/11 14:35:25 martin Exp $"); #include <sys/param.h> #include <sys/types.h> @@ -265,10 +265,39 @@ cprng_fini_cpu(void *ptr, void *cookie, kmem_free(cc->cc_drbg, sizeof(*cc->cc_drbg)); } +static void +cprng_strong_reseed(struct cprng_strong *cprng, unsigned epoch, + struct cprng_cpu **ccp, int *sp) +{ + uint8_t seed[NIST_HASH_DRBG_SEEDLEN_BYTES]; + + /* + * Drop everything to extract a fresh seed from the entropy + * pool. entropy_extract may sleep on an adaptive lock, which + * invalidates our percpu(9) reference. + * + * This may race with reseeding in another thread, which is no + * big deal -- worst case, we rewind the entropy epoch here and + * cause the next caller to reseed again, and in the end we + * just reseed a couple more times than necessary. + */ + splx(*sp); + percpu_putref(cprng->cs_percpu); + entropy_extract(seed, sizeof seed, 0); + *ccp = percpu_getref(cprng->cs_percpu); + *sp = splraiseipl(cprng->cs_iplcookie); + + (*ccp)->cc_evcnt->reseed.ev_count++; + if (__predict_false(nist_hash_drbg_reseed((*ccp)->cc_drbg, + seed, sizeof seed, NULL, 0))) + panic("nist_hash_drbg_reseed"); + explicit_memset(seed, 0, sizeof seed); + (*ccp)->cc_epoch = epoch; +} + size_t cprng_strong(struct cprng_strong *cprng, void *buf, size_t len, int flags) { - uint8_t seed[NIST_HASH_DRBG_SEEDLEN_BYTES]; struct cprng_cpu *cc; unsigned epoch; int s; @@ -293,25 +322,13 @@ cprng_strong(struct cprng_strong *cprng, /* If the entropy epoch has changed, (re)seed. */ epoch = entropy_epoch(); - if (__predict_false(epoch != cc->cc_epoch)) { - entropy_extract(seed, sizeof seed, 0); - cc->cc_evcnt->reseed.ev_count++; - if (__predict_false(nist_hash_drbg_reseed(cc->cc_drbg, - seed, sizeof seed, NULL, 0))) - panic("nist_hash_drbg_reseed"); - explicit_memset(seed, 0, sizeof seed); - cc->cc_epoch = epoch; - } + if (__predict_false(epoch != cc->cc_epoch)) + cprng_strong_reseed(cprng, epoch, &cc, &s); /* Generate data. Failure here means it's time to reseed. */ if (__predict_false(nist_hash_drbg_generate(cc->cc_drbg, buf, len, NULL, 0))) { - entropy_extract(seed, sizeof seed, 0); - cc->cc_evcnt->reseed.ev_count++; - if (__predict_false(nist_hash_drbg_reseed(cc->cc_drbg, - seed, sizeof seed, NULL, 0))) - panic("nist_hash_drbg_reseed"); - explicit_memset(seed, 0, sizeof seed); + cprng_strong_reseed(cprng, epoch, &cc, &s); if (__predict_false(nist_hash_drbg_generate(cc->cc_drbg, buf, len, NULL, 0))) panic("nist_hash_drbg_generate"); Index: src/sys/kern/subr_prf.c diff -u src/sys/kern/subr_prf.c:1.196.2.1 src/sys/kern/subr_prf.c:1.196.2.2 --- src/sys/kern/subr_prf.c:1.196.2.1 Wed Feb 8 18:24:52 2023 +++ src/sys/kern/subr_prf.c Fri Aug 11 14:35:25 2023 @@ -1,4 +1,4 @@ -/* $NetBSD: subr_prf.c,v 1.196.2.1 2023/02/08 18:24:52 martin Exp $ */ +/* $NetBSD: subr_prf.c,v 1.196.2.2 2023/08/11 14:35:25 martin Exp $ */ /*- * Copyright (c) 1986, 1988, 1991, 1993 @@ -37,7 +37,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: subr_prf.c,v 1.196.2.1 2023/02/08 18:24:52 martin Exp $"); +__KERNEL_RCSID(0, "$NetBSD: subr_prf.c,v 1.196.2.2 2023/08/11 14:35:25 martin Exp $"); #ifdef _KERNEL_OPT #include "opt_ddb.h" @@ -520,7 +520,7 @@ putchar(int c, int flags, struct tty *tp #ifdef RND_PRINTF if (__predict_true(kprintf_inited)) { unsigned char ch = c; - rnd_add_data(&rnd_printf_source, &ch, 1, 0); + rnd_add_data_intr(&rnd_printf_source, &ch, 1, 0); } #endif } @@ -1622,7 +1622,7 @@ done: #ifdef RND_PRINTF if (__predict_true(kprintf_inited)) - rnd_add_data(&rnd_printf_source, NULL, 0, 0); + rnd_add_data_intr(&rnd_printf_source, NULL, 0, 0); #endif return ret; } Index: src/sys/sys/rndio.h diff -u src/sys/sys/rndio.h:1.2 src/sys/sys/rndio.h:1.2.50.1 --- src/sys/sys/rndio.h:1.2 Sun Sep 6 06:01:02 2015 +++ src/sys/sys/rndio.h Fri Aug 11 14:35:25 2023 @@ -1,4 +1,4 @@ -/* $NetBSD: rndio.h,v 1.2 2015/09/06 06:01:02 dholland Exp $ */ +/* $NetBSD: rndio.h,v 1.2.50.1 2023/08/11 14:35:25 martin Exp $ */ /*- * Copyright (c) 1997 The NetBSD Foundation, Inc. @@ -50,8 +50,7 @@ typedef struct { } rndsave_t; /* Statistics exposed by RNDGETPOOLSTAT */ -typedef struct -{ +typedef struct { uint32_t poolsize; uint32_t threshold; uint32_t maxentropy; Index: src/sys/sys/rndsource.h diff -u src/sys/sys/rndsource.h:1.7 src/sys/sys/rndsource.h:1.7.20.1 --- src/sys/sys/rndsource.h:1.7 Thu Apr 30 03:28:19 2020 +++ src/sys/sys/rndsource.h Fri Aug 11 14:35:25 2023 @@ -1,4 +1,4 @@ -/* $NetBSD: rndsource.h,v 1.7 2020/04/30 03:28:19 riastradh Exp $ */ +/* $NetBSD: rndsource.h,v 1.7.20.1 2023/08/11 14:35:25 martin Exp $ */ /*- * Copyright (c) 2019 The NetBSD Foundation, Inc. @@ -46,7 +46,7 @@ struct percpu; /* * struct rnd_delta_estimator * - * Unused. Preserved for ABI compatibility. + * State for the time-delta entropy estimation model. */ typedef struct rnd_delta_estimator { uint64_t x; @@ -67,14 +67,14 @@ typedef struct rnd_delta_estimator { */ struct krndsource { LIST_ENTRY(krndsource) list; /* the linked list */ - char name[16]; /* device name */ - rnd_delta_t time_delta; /* unused */ - rnd_delta_t value_delta; /* unused */ - uint32_t total; /* number of bits added while cold */ - uint32_t type; /* type, RND_TYPE_* */ - uint32_t flags; /* flags, RND_FLAG_* */ - void *state; /* percpu (struct rndsource_cpu *) */ - size_t test_cnt; /* unused */ + char name[16]; /* device name */ + rnd_delta_t time_delta; /* time samples added while cold */ + rnd_delta_t value_delta; /* value samples added whiel cold */ + uint32_t total; /* number of bits added while cold */ + uint32_t type; /* type, RND_TYPE_* */ + uint32_t flags; /* flags, RND_FLAG_* */ + void *state; /* percpu (struct rndsource_cpu *) */ + size_t test_cnt; /* unused */ void (*get)(size_t, void *); /* pool wants N bytes (badly) */ void *getarg; /* argument to get-function */ void (*enable)(struct krndsource *, bool); /* unused */ @@ -94,6 +94,8 @@ void _rnd_add_uint64(struct krndsource * void rnd_add_uint32(struct krndsource *, uint32_t); void rnd_add_data(struct krndsource *, const void *, uint32_t, uint32_t); +void rnd_add_data_intr(struct krndsource *, const void *, uint32_t, + uint32_t); void rnd_add_data_sync(struct krndsource *, const void *, uint32_t, uint32_t); Index: src/tests/lib/libc/sys/t_getrandom.c diff -u src/tests/lib/libc/sys/t_getrandom.c:1.4 src/tests/lib/libc/sys/t_getrandom.c:1.4.2.1 --- src/tests/lib/libc/sys/t_getrandom.c:1.4 Fri Mar 18 23:35:37 2022 +++ src/tests/lib/libc/sys/t_getrandom.c Fri Aug 11 14:35:25 2023 @@ -1,4 +1,4 @@ -/* $NetBSD: t_getrandom.c,v 1.4 2022/03/18 23:35:37 riastradh Exp $ */ +/* $NetBSD: t_getrandom.c,v 1.4.2.1 2023/08/11 14:35:25 martin Exp $ */ /*- * Copyright (c) 2020 The NetBSD Foundation, Inc. @@ -30,7 +30,7 @@ */ #include <sys/cdefs.h> -__RCSID("$NetBSD: t_getrandom.c,v 1.4 2022/03/18 23:35:37 riastradh Exp $"); +__RCSID("$NetBSD: t_getrandom.c,v 1.4.2.1 2023/08/11 14:35:25 martin Exp $"); #include <sys/param.h> @@ -94,7 +94,7 @@ ATF_TC_BODY(getrandom_default, tc) ATF_CHECK(n >= (ssize_t)MIN(256, sizeof buf)); ATF_CHECK((size_t)n <= sizeof buf); ATF_CHECK(memcmp(buf, zero24, 24) != 0); - ATF_CHECK(memcmp(buf + sizeof buf - 24, zero24, 24) != 0); + ATF_CHECK(memcmp(buf + n - 24, zero24, 24) != 0); } alarm(0); } @@ -117,7 +117,7 @@ ATF_TC_BODY(getrandom_nonblock, tc) ATF_CHECK(n >= (ssize_t)MIN(256, sizeof buf)); ATF_CHECK((size_t)n <= sizeof buf); ATF_CHECK(memcmp(buf, zero24, 24) != 0); - ATF_CHECK(memcmp(buf + sizeof buf - 24, zero24, 24) != 0); + ATF_CHECK(memcmp(buf + n - 24, zero24, 24) != 0); } } @@ -137,7 +137,7 @@ ATF_TC_BODY(getrandom_insecure, tc) ATF_CHECK(n >= (ssize_t)MIN(256, sizeof buf)); ATF_CHECK((size_t)n <= sizeof buf); ATF_CHECK(memcmp(buf, zero24, 24) != 0); - ATF_CHECK(memcmp(buf + sizeof buf - 24, zero24, 24) != 0); + ATF_CHECK(memcmp(buf + n - 24, zero24, 24) != 0); } ATF_TC(getrandom_insecure_nonblock); @@ -157,7 +157,7 @@ ATF_TC_BODY(getrandom_insecure_nonblock, ATF_CHECK(n >= (ssize_t)MIN(256, sizeof buf)); ATF_CHECK((size_t)n <= sizeof buf); ATF_CHECK(memcmp(buf, zero24, 24) != 0); - ATF_CHECK(memcmp(buf + sizeof buf - 24, zero24, 24) != 0); + ATF_CHECK(memcmp(buf + n - 24, zero24, 24) != 0); } ATF_TC(getrandom_random);