Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 1 | /* |
Stephan Mueller | dfc9fa9 | 2015-06-23 16:18:54 +0200 | [diff] [blame] | 2 | * Non-physical true random number generator based on timing jitter -- |
| 3 | * Jitter RNG standalone code. |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 4 | * |
Stephan Müller | bb897c5 | 2023-04-21 08:08:04 +0200 | [diff] [blame] | 5 | * Copyright Stephan Mueller <smueller@chronox.de>, 2015 - 2023 |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 6 | * |
| 7 | * Design |
| 8 | * ====== |
| 9 | * |
Alexander A. Klimov | 9332a9e | 2020-07-19 18:49:59 +0200 | [diff] [blame] | 10 | * See https://www.chronox.de/jent.html |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 11 | * |
| 12 | * License |
| 13 | * ======= |
| 14 | * |
| 15 | * Redistribution and use in source and binary forms, with or without |
| 16 | * modification, are permitted provided that the following conditions |
| 17 | * are met: |
| 18 | * 1. Redistributions of source code must retain the above copyright |
| 19 | * notice, and the entire permission notice in its entirety, |
| 20 | * including the disclaimer of warranties. |
| 21 | * 2. Redistributions in binary form must reproduce the above copyright |
| 22 | * notice, this list of conditions and the following disclaimer in the |
| 23 | * documentation and/or other materials provided with the distribution. |
| 24 | * 3. The name of the author may not be used to endorse or promote |
| 25 | * products derived from this software without specific prior |
| 26 | * written permission. |
| 27 | * |
| 28 | * ALTERNATIVELY, this product may be distributed under the terms of |
| 29 | * the GNU General Public License, in which case the provisions of the GPL2 are |
| 30 | * required INSTEAD OF the above restrictions. (This clause is |
| 31 | * necessary due to a potential bad interaction between the GPL and |
| 32 | * the restrictions contained in a BSD-style copyright.) |
| 33 | * |
| 34 | * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED |
| 35 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
| 36 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF |
| 37 | * WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE |
| 38 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
| 39 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT |
| 40 | * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR |
| 41 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF |
| 42 | * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 43 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE |
| 44 | * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH |
| 45 | * DAMAGE. |
| 46 | */ |
| 47 | |
| 48 | /* |
| 49 | * This Jitterentropy RNG is based on the jitterentropy library |
Stephan Müller | bb897c5 | 2023-04-21 08:08:04 +0200 | [diff] [blame] | 50 | * version 3.4.0 provided at https://www.chronox.de/jent.html |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 51 | */ |
| 52 | |
Stephan Mueller | dfc9fa9 | 2015-06-23 16:18:54 +0200 | [diff] [blame] | 53 | #ifdef __OPTIMIZE__ |
| 54 | #error "The CPU Jitter random number generator must not be compiled with optimizations. See documentation. Use the compiler switch -O0 for compiling jitterentropy.c." |
| 55 | #endif |
| 56 | |
| 57 | typedef unsigned long long __u64; |
| 58 | typedef long long __s64; |
| 59 | typedef unsigned int __u32; |
Stephan Müller | bb897c5 | 2023-04-21 08:08:04 +0200 | [diff] [blame] | 60 | typedef unsigned char u8; |
Stephan Mueller | dfc9fa9 | 2015-06-23 16:18:54 +0200 | [diff] [blame] | 61 | #define NULL ((void *) 0) |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 62 | |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 63 | /* The entropy pool */ |
| 64 | struct rand_data { |
Stephan Müller | bb897c5 | 2023-04-21 08:08:04 +0200 | [diff] [blame] | 65 | /* SHA3-256 is used as conditioner */ |
| 66 | #define DATA_SIZE_BITS 256 |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 67 | /* all data values that are vital to maintain the security |
| 68 | * of the RNG are marked as SENSITIVE. A user must not |
| 69 | * access that information while the RNG executes its loops to |
| 70 | * calculate the next random value. */ |
Stephan Müller | bb897c5 | 2023-04-21 08:08:04 +0200 | [diff] [blame] | 71 | void *hash_state; /* SENSITIVE hash state entropy pool */ |
| 72 | __u64 prev_time; /* SENSITIVE Previous time stamp */ |
| 73 | __u64 last_delta; /* SENSITIVE stuck test */ |
| 74 | __s64 last_delta2; /* SENSITIVE stuck test */ |
| 75 | unsigned int osr; /* Oversample rate */ |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 76 | #define JENT_MEMORY_BLOCKS 64 |
| 77 | #define JENT_MEMORY_BLOCKSIZE 32 |
| 78 | #define JENT_MEMORY_ACCESSLOOPS 128 |
| 79 | #define JENT_MEMORY_SIZE (JENT_MEMORY_BLOCKS*JENT_MEMORY_BLOCKSIZE) |
| 80 | unsigned char *mem; /* Memory access location with size of |
| 81 | * memblocks * memblocksize */ |
| 82 | unsigned int memlocation; /* Pointer to byte in *mem */ |
| 83 | unsigned int memblocks; /* Number of memory blocks in *mem */ |
| 84 | unsigned int memblocksize; /* Size of one memory block in bytes */ |
| 85 | unsigned int memaccessloops; /* Number of memory accesses per random |
| 86 | * bit generation */ |
Stephan Müller | 764428f | 2020-04-17 21:33:33 +0200 | [diff] [blame] | 87 | |
| 88 | /* Repetition Count Test */ |
Stephan Müller | 3fde2fe | 2023-03-27 09:03:52 +0200 | [diff] [blame] | 89 | unsigned int rct_count; /* Number of stuck values */ |
Stephan Müller | 764428f | 2020-04-17 21:33:33 +0200 | [diff] [blame] | 90 | |
Stephan Müller | 3fde2fe | 2023-03-27 09:03:52 +0200 | [diff] [blame] | 91 | /* Intermittent health test failure threshold of 2^-30 */ |
| 92 | #define JENT_RCT_CUTOFF 30 /* Taken from SP800-90B sec 4.4.1 */ |
Stephan Müller | 764428f | 2020-04-17 21:33:33 +0200 | [diff] [blame] | 93 | #define JENT_APT_CUTOFF 325 /* Taken from SP800-90B sec 4.4.2 */ |
Stephan Müller | 3fde2fe | 2023-03-27 09:03:52 +0200 | [diff] [blame] | 94 | /* Permanent health test failure threshold of 2^-60 */ |
| 95 | #define JENT_RCT_CUTOFF_PERMANENT 60 |
| 96 | #define JENT_APT_CUTOFF_PERMANENT 355 |
Stephan Müller | 764428f | 2020-04-17 21:33:33 +0200 | [diff] [blame] | 97 | #define JENT_APT_WINDOW_SIZE 512 /* Data window size */ |
| 98 | /* LSB of time stamp to process */ |
| 99 | #define JENT_APT_LSB 16 |
| 100 | #define JENT_APT_WORD_MASK (JENT_APT_LSB - 1) |
| 101 | unsigned int apt_observations; /* Number of collected observations */ |
| 102 | unsigned int apt_count; /* APT counter */ |
| 103 | unsigned int apt_base; /* APT base reference */ |
| 104 | unsigned int apt_base_set:1; /* APT base reference set? */ |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 105 | }; |
| 106 | |
| 107 | /* Flags that can be used to initialize the RNG */ |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 108 | #define JENT_DISABLE_MEMORY_ACCESS (1<<2) /* Disable memory access for more |
| 109 | * entropy, saves MEMORY_SIZE RAM for |
| 110 | * entropy collector */ |
| 111 | |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 112 | /* -- error codes for init function -- */ |
| 113 | #define JENT_ENOTIME 1 /* Timer service not available */ |
| 114 | #define JENT_ECOARSETIME 2 /* Timer too coarse for RNG */ |
| 115 | #define JENT_ENOMONOTONIC 3 /* Timer is not monotonic increasing */ |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 116 | #define JENT_EVARVAR 5 /* Timer does not produce variations of |
| 117 | * variations (2nd derivation of time is |
| 118 | * zero). */ |
Stephan Müller | d9d67c8 | 2019-05-29 21:24:25 +0200 | [diff] [blame] | 119 | #define JENT_ESTUCK 8 /* Too many stuck results during init. */ |
Stephan Müller | 764428f | 2020-04-17 21:33:33 +0200 | [diff] [blame] | 120 | #define JENT_EHEALTH 9 /* Health test failed during initialization */ |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 121 | |
Stephan Müller | 908dffa | 2021-12-20 07:21:53 +0100 | [diff] [blame] | 122 | /* |
| 123 | * The output n bits can receive more than n bits of min entropy, of course, |
| 124 | * but the fixed output of the conditioning function can only asymptotically |
| 125 | * approach the output size bits of min entropy, not attain that bound. Random |
| 126 | * maps will tend to have output collisions, which reduces the creditable |
| 127 | * output entropy (that is what SP 800-90B Section 3.1.5.1.2 attempts to bound). |
| 128 | * |
| 129 | * The value "64" is justified in Appendix A.4 of the current 90C draft, |
| 130 | * and aligns with NIST's in "epsilon" definition in this document, which is |
| 131 | * that a string can be considered "full entropy" if you can bound the min |
| 132 | * entropy in each bit of output to at least 1-epsilon, where epsilon is |
| 133 | * required to be <= 2^(-32). |
| 134 | */ |
| 135 | #define JENT_ENTROPY_SAFETY_FACTOR 64 |
| 136 | |
| 137 | #include <linux/fips.h> |
Ben Dooks | 965d728 | 2019-10-09 10:12:56 +0100 | [diff] [blame] | 138 | #include "jitterentropy.h" |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 139 | |
Stephan Müller | 764428f | 2020-04-17 21:33:33 +0200 | [diff] [blame] | 140 | /*************************************************************************** |
| 141 | * Adaptive Proportion Test |
| 142 | * |
| 143 | * This test complies with SP800-90B section 4.4.2. |
| 144 | ***************************************************************************/ |
| 145 | |
Randy Dunlap | 04cb788 | 2021-08-24 14:05:13 -0700 | [diff] [blame] | 146 | /* |
Stephan Müller | 764428f | 2020-04-17 21:33:33 +0200 | [diff] [blame] | 147 | * Reset the APT counter |
| 148 | * |
| 149 | * @ec [in] Reference to entropy collector |
| 150 | */ |
| 151 | static void jent_apt_reset(struct rand_data *ec, unsigned int delta_masked) |
| 152 | { |
| 153 | /* Reset APT counter */ |
| 154 | ec->apt_count = 0; |
| 155 | ec->apt_base = delta_masked; |
| 156 | ec->apt_observations = 0; |
| 157 | } |
| 158 | |
Randy Dunlap | 04cb788 | 2021-08-24 14:05:13 -0700 | [diff] [blame] | 159 | /* |
Stephan Müller | 764428f | 2020-04-17 21:33:33 +0200 | [diff] [blame] | 160 | * Insert a new entropy event into APT |
| 161 | * |
| 162 | * @ec [in] Reference to entropy collector |
| 163 | * @delta_masked [in] Masked time delta to process |
| 164 | */ |
| 165 | static void jent_apt_insert(struct rand_data *ec, unsigned int delta_masked) |
| 166 | { |
| 167 | /* Initialize the base reference */ |
| 168 | if (!ec->apt_base_set) { |
| 169 | ec->apt_base = delta_masked; |
| 170 | ec->apt_base_set = 1; |
| 171 | return; |
| 172 | } |
| 173 | |
Stephan Müller | 3fde2fe | 2023-03-27 09:03:52 +0200 | [diff] [blame] | 174 | if (delta_masked == ec->apt_base) |
Stephan Müller | 764428f | 2020-04-17 21:33:33 +0200 | [diff] [blame] | 175 | ec->apt_count++; |
| 176 | |
Stephan Müller | 764428f | 2020-04-17 21:33:33 +0200 | [diff] [blame] | 177 | ec->apt_observations++; |
| 178 | |
| 179 | if (ec->apt_observations >= JENT_APT_WINDOW_SIZE) |
| 180 | jent_apt_reset(ec, delta_masked); |
| 181 | } |
| 182 | |
Stephan Müller | 3fde2fe | 2023-03-27 09:03:52 +0200 | [diff] [blame] | 183 | /* APT health test failure detection */ |
| 184 | static int jent_apt_permanent_failure(struct rand_data *ec) |
| 185 | { |
| 186 | return (ec->apt_count >= JENT_APT_CUTOFF_PERMANENT) ? 1 : 0; |
| 187 | } |
| 188 | |
| 189 | static int jent_apt_failure(struct rand_data *ec) |
| 190 | { |
| 191 | return (ec->apt_count >= JENT_APT_CUTOFF) ? 1 : 0; |
| 192 | } |
| 193 | |
Stephan Müller | 764428f | 2020-04-17 21:33:33 +0200 | [diff] [blame] | 194 | /*************************************************************************** |
| 195 | * Stuck Test and its use as Repetition Count Test |
| 196 | * |
| 197 | * The Jitter RNG uses an enhanced version of the Repetition Count Test |
| 198 | * (RCT) specified in SP800-90B section 4.4.1. Instead of counting identical |
| 199 | * back-to-back values, the input to the RCT is the counting of the stuck |
| 200 | * values during the generation of one Jitter RNG output block. |
| 201 | * |
| 202 | * The RCT is applied with an alpha of 2^{-30} compliant to FIPS 140-2 IG 9.8. |
| 203 | * |
| 204 | * During the counting operation, the Jitter RNG always calculates the RCT |
| 205 | * cut-off value of C. If that value exceeds the allowed cut-off value, |
| 206 | * the Jitter RNG output block will be calculated completely but discarded at |
| 207 | * the end. The caller of the Jitter RNG is informed with an error code. |
| 208 | ***************************************************************************/ |
| 209 | |
Randy Dunlap | 04cb788 | 2021-08-24 14:05:13 -0700 | [diff] [blame] | 210 | /* |
Stephan Müller | 764428f | 2020-04-17 21:33:33 +0200 | [diff] [blame] | 211 | * Repetition Count Test as defined in SP800-90B section 4.4.1 |
| 212 | * |
| 213 | * @ec [in] Reference to entropy collector |
| 214 | * @stuck [in] Indicator whether the value is stuck |
| 215 | */ |
| 216 | static void jent_rct_insert(struct rand_data *ec, int stuck) |
| 217 | { |
Stephan Müller | 764428f | 2020-04-17 21:33:33 +0200 | [diff] [blame] | 218 | if (stuck) { |
| 219 | ec->rct_count++; |
Stephan Müller | 764428f | 2020-04-17 21:33:33 +0200 | [diff] [blame] | 220 | } else { |
Stephan Müller | 3fde2fe | 2023-03-27 09:03:52 +0200 | [diff] [blame] | 221 | /* Reset RCT */ |
Stephan Müller | 764428f | 2020-04-17 21:33:33 +0200 | [diff] [blame] | 222 | ec->rct_count = 0; |
| 223 | } |
| 224 | } |
| 225 | |
Stephan Müller | 764428f | 2020-04-17 21:33:33 +0200 | [diff] [blame] | 226 | static inline __u64 jent_delta(__u64 prev, __u64 next) |
| 227 | { |
| 228 | #define JENT_UINT64_MAX (__u64)(~((__u64) 0)) |
| 229 | return (prev < next) ? (next - prev) : |
| 230 | (JENT_UINT64_MAX - prev + 1 + next); |
| 231 | } |
| 232 | |
Randy Dunlap | 04cb788 | 2021-08-24 14:05:13 -0700 | [diff] [blame] | 233 | /* |
Stephan Müller | 764428f | 2020-04-17 21:33:33 +0200 | [diff] [blame] | 234 | * Stuck test by checking the: |
| 235 | * 1st derivative of the jitter measurement (time delta) |
| 236 | * 2nd derivative of the jitter measurement (delta of time deltas) |
| 237 | * 3rd derivative of the jitter measurement (delta of delta of time deltas) |
| 238 | * |
| 239 | * All values must always be non-zero. |
| 240 | * |
| 241 | * @ec [in] Reference to entropy collector |
| 242 | * @current_delta [in] Jitter time delta |
| 243 | * |
| 244 | * @return |
| 245 | * 0 jitter measurement not stuck (good bit) |
| 246 | * 1 jitter measurement stuck (reject bit) |
| 247 | */ |
| 248 | static int jent_stuck(struct rand_data *ec, __u64 current_delta) |
| 249 | { |
| 250 | __u64 delta2 = jent_delta(ec->last_delta, current_delta); |
| 251 | __u64 delta3 = jent_delta(ec->last_delta2, delta2); |
Stephan Müller | 764428f | 2020-04-17 21:33:33 +0200 | [diff] [blame] | 252 | |
| 253 | ec->last_delta = current_delta; |
| 254 | ec->last_delta2 = delta2; |
| 255 | |
| 256 | /* |
| 257 | * Insert the result of the comparison of two back-to-back time |
| 258 | * deltas. |
| 259 | */ |
Stephan Müller | 552d03a | 2021-11-21 15:14:20 +0100 | [diff] [blame] | 260 | jent_apt_insert(ec, current_delta); |
Stephan Müller | 764428f | 2020-04-17 21:33:33 +0200 | [diff] [blame] | 261 | |
| 262 | if (!current_delta || !delta2 || !delta3) { |
| 263 | /* RCT with a stuck bit */ |
| 264 | jent_rct_insert(ec, 1); |
| 265 | return 1; |
| 266 | } |
| 267 | |
| 268 | /* RCT with a non-stuck bit */ |
| 269 | jent_rct_insert(ec, 0); |
| 270 | |
| 271 | return 0; |
| 272 | } |
| 273 | |
Stephan Müller | 3fde2fe | 2023-03-27 09:03:52 +0200 | [diff] [blame] | 274 | /* RCT health test failure detection */ |
| 275 | static int jent_rct_permanent_failure(struct rand_data *ec) |
| 276 | { |
| 277 | return (ec->rct_count >= JENT_RCT_CUTOFF_PERMANENT) ? 1 : 0; |
| 278 | } |
| 279 | |
| 280 | static int jent_rct_failure(struct rand_data *ec) |
| 281 | { |
| 282 | return (ec->rct_count >= JENT_RCT_CUTOFF) ? 1 : 0; |
| 283 | } |
| 284 | |
| 285 | /* Report of health test failures */ |
Stephan Müller | 764428f | 2020-04-17 21:33:33 +0200 | [diff] [blame] | 286 | static int jent_health_failure(struct rand_data *ec) |
| 287 | { |
Stephan Müller | 3fde2fe | 2023-03-27 09:03:52 +0200 | [diff] [blame] | 288 | return jent_rct_failure(ec) | jent_apt_failure(ec); |
| 289 | } |
| 290 | |
| 291 | static int jent_permanent_health_failure(struct rand_data *ec) |
| 292 | { |
| 293 | return jent_rct_permanent_failure(ec) | jent_apt_permanent_failure(ec); |
Stephan Müller | 764428f | 2020-04-17 21:33:33 +0200 | [diff] [blame] | 294 | } |
| 295 | |
| 296 | /*************************************************************************** |
| 297 | * Noise sources |
| 298 | ***************************************************************************/ |
| 299 | |
Randy Dunlap | 04cb788 | 2021-08-24 14:05:13 -0700 | [diff] [blame] | 300 | /* |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 301 | * Update of the loop count used for the next round of |
| 302 | * an entropy collection. |
| 303 | * |
| 304 | * Input: |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 305 | * @bits is the number of low bits of the timer to consider |
| 306 | * @min is the number of bits we shift the timer value to the right at |
| 307 | * the end to make sure we have a guaranteed minimum value |
| 308 | * |
| 309 | * @return Newly calculated loop counter |
| 310 | */ |
Stephan Müller | bb897c5 | 2023-04-21 08:08:04 +0200 | [diff] [blame] | 311 | static __u64 jent_loop_shuffle(unsigned int bits, unsigned int min) |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 312 | { |
| 313 | __u64 time = 0; |
| 314 | __u64 shuffle = 0; |
| 315 | unsigned int i = 0; |
| 316 | unsigned int mask = (1<<bits) - 1; |
| 317 | |
| 318 | jent_get_nstime(&time); |
Stephan Müller | bb897c5 | 2023-04-21 08:08:04 +0200 | [diff] [blame] | 319 | |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 320 | /* |
Stephan Müller | d9d67c8 | 2019-05-29 21:24:25 +0200 | [diff] [blame] | 321 | * We fold the time value as much as possible to ensure that as many |
| 322 | * bits of the time stamp are included as possible. |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 323 | */ |
Stephan Müller | d9d67c8 | 2019-05-29 21:24:25 +0200 | [diff] [blame] | 324 | for (i = 0; ((DATA_SIZE_BITS + bits - 1) / bits) > i; i++) { |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 325 | shuffle ^= time & mask; |
| 326 | time = time >> bits; |
| 327 | } |
| 328 | |
| 329 | /* |
| 330 | * We add a lower boundary value to ensure we have a minimum |
| 331 | * RNG loop count. |
| 332 | */ |
| 333 | return (shuffle + (1<<min)); |
| 334 | } |
| 335 | |
Randy Dunlap | 04cb788 | 2021-08-24 14:05:13 -0700 | [diff] [blame] | 336 | /* |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 337 | * CPU Jitter noise source -- this is the noise source based on the CPU |
| 338 | * execution time jitter |
| 339 | * |
Stephan Müller | d9d67c8 | 2019-05-29 21:24:25 +0200 | [diff] [blame] | 340 | * This function injects the individual bits of the time value into the |
Stephan Müller | bb897c5 | 2023-04-21 08:08:04 +0200 | [diff] [blame] | 341 | * entropy pool using a hash. |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 342 | * |
Stephan Müller | bb897c5 | 2023-04-21 08:08:04 +0200 | [diff] [blame] | 343 | * ec [in] entropy collector |
| 344 | * time [in] time stamp to be injected |
| 345 | * stuck [in] Is the time stamp identified as stuck? |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 346 | * |
| 347 | * Output: |
Stephan Müller | bb897c5 | 2023-04-21 08:08:04 +0200 | [diff] [blame] | 348 | * updated hash context in the entropy collector or error code |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 349 | */ |
Stephan Müller | bb897c5 | 2023-04-21 08:08:04 +0200 | [diff] [blame] | 350 | static int jent_condition_data(struct rand_data *ec, __u64 time, int stuck) |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 351 | { |
Stephan Müller | bb897c5 | 2023-04-21 08:08:04 +0200 | [diff] [blame] | 352 | #define SHA3_HASH_LOOP (1<<3) |
| 353 | struct { |
| 354 | int rct_count; |
| 355 | unsigned int apt_observations; |
| 356 | unsigned int apt_count; |
| 357 | unsigned int apt_base; |
| 358 | } addtl = { |
| 359 | ec->rct_count, |
| 360 | ec->apt_observations, |
| 361 | ec->apt_count, |
| 362 | ec->apt_base |
| 363 | }; |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 364 | |
Stephan Müller | bb897c5 | 2023-04-21 08:08:04 +0200 | [diff] [blame] | 365 | return jent_hash_time(ec->hash_state, time, (u8 *)&addtl, sizeof(addtl), |
| 366 | SHA3_HASH_LOOP, stuck); |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 367 | } |
| 368 | |
Randy Dunlap | 04cb788 | 2021-08-24 14:05:13 -0700 | [diff] [blame] | 369 | /* |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 370 | * Memory Access noise source -- this is a noise source based on variations in |
| 371 | * memory access times |
| 372 | * |
| 373 | * This function performs memory accesses which will add to the timing |
| 374 | * variations due to an unknown amount of CPU wait states that need to be |
| 375 | * added when accessing memory. The memory size should be larger than the L1 |
| 376 | * caches as outlined in the documentation and the associated testing. |
| 377 | * |
| 378 | * The L1 cache has a very high bandwidth, albeit its access rate is usually |
| 379 | * slower than accessing CPU registers. Therefore, L1 accesses only add minimal |
| 380 | * variations as the CPU has hardly to wait. Starting with L2, significant |
| 381 | * variations are added because L2 typically does not belong to the CPU any more |
| 382 | * and therefore a wider range of CPU wait states is necessary for accesses. |
| 383 | * L3 and real memory accesses have even a wider range of wait states. However, |
| 384 | * to reliably access either L3 or memory, the ec->mem memory must be quite |
| 385 | * large which is usually not desirable. |
| 386 | * |
Stephan Müller | 764428f | 2020-04-17 21:33:33 +0200 | [diff] [blame] | 387 | * @ec [in] Reference to the entropy collector with the memory access data -- if |
| 388 | * the reference to the memory block to be accessed is NULL, this noise |
| 389 | * source is disabled |
| 390 | * @loop_cnt [in] if a value not equal to 0 is set, use the given value |
| 391 | * number of loops to perform the LFSR |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 392 | */ |
Stephan Müller | 764428f | 2020-04-17 21:33:33 +0200 | [diff] [blame] | 393 | static void jent_memaccess(struct rand_data *ec, __u64 loop_cnt) |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 394 | { |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 395 | unsigned int wrap = 0; |
| 396 | __u64 i = 0; |
| 397 | #define MAX_ACC_LOOP_BIT 7 |
| 398 | #define MIN_ACC_LOOP_BIT 0 |
| 399 | __u64 acc_loop_cnt = |
Stephan Müller | bb897c5 | 2023-04-21 08:08:04 +0200 | [diff] [blame] | 400 | jent_loop_shuffle(MAX_ACC_LOOP_BIT, MIN_ACC_LOOP_BIT); |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 401 | |
| 402 | if (NULL == ec || NULL == ec->mem) |
Stephan Müller | 764428f | 2020-04-17 21:33:33 +0200 | [diff] [blame] | 403 | return; |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 404 | wrap = ec->memblocksize * ec->memblocks; |
| 405 | |
| 406 | /* |
| 407 | * testing purposes -- allow test app to set the counter, not |
| 408 | * needed during runtime |
| 409 | */ |
| 410 | if (loop_cnt) |
| 411 | acc_loop_cnt = loop_cnt; |
| 412 | |
| 413 | for (i = 0; i < (ec->memaccessloops + acc_loop_cnt); i++) { |
Stephan Müller | d9d67c8 | 2019-05-29 21:24:25 +0200 | [diff] [blame] | 414 | unsigned char *tmpval = ec->mem + ec->memlocation; |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 415 | /* |
| 416 | * memory access: just add 1 to one byte, |
| 417 | * wrap at 255 -- memory access implies read |
| 418 | * from and write to memory location |
| 419 | */ |
| 420 | *tmpval = (*tmpval + 1) & 0xff; |
| 421 | /* |
| 422 | * Addition of memblocksize - 1 to pointer |
| 423 | * with wrap around logic to ensure that every |
| 424 | * memory location is hit evenly |
| 425 | */ |
| 426 | ec->memlocation = ec->memlocation + ec->memblocksize - 1; |
| 427 | ec->memlocation = ec->memlocation % wrap; |
| 428 | } |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 429 | } |
| 430 | |
| 431 | /*************************************************************************** |
| 432 | * Start of entropy processing logic |
| 433 | ***************************************************************************/ |
Randy Dunlap | 04cb788 | 2021-08-24 14:05:13 -0700 | [diff] [blame] | 434 | /* |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 435 | * This is the heart of the entropy generation: calculate time deltas and |
Stephan Müller | d9d67c8 | 2019-05-29 21:24:25 +0200 | [diff] [blame] | 436 | * use the CPU jitter in the time deltas. The jitter is injected into the |
| 437 | * entropy pool. |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 438 | * |
| 439 | * WARNING: ensure that ->prev_time is primed before using the output |
| 440 | * of this function! This can be done by calling this function |
| 441 | * and not using its result. |
| 442 | * |
Stephan Müller | 764428f | 2020-04-17 21:33:33 +0200 | [diff] [blame] | 443 | * @ec [in] Reference to entropy collector |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 444 | * |
Stephan Müller | d9d67c8 | 2019-05-29 21:24:25 +0200 | [diff] [blame] | 445 | * @return result of stuck test |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 446 | */ |
Stephan Müller | d9d67c8 | 2019-05-29 21:24:25 +0200 | [diff] [blame] | 447 | static int jent_measure_jitter(struct rand_data *ec) |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 448 | { |
| 449 | __u64 time = 0; |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 450 | __u64 current_delta = 0; |
Stephan Müller | 764428f | 2020-04-17 21:33:33 +0200 | [diff] [blame] | 451 | int stuck; |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 452 | |
| 453 | /* Invoke one noise source before time measurement to add variations */ |
| 454 | jent_memaccess(ec, 0); |
| 455 | |
| 456 | /* |
| 457 | * Get time stamp and calculate time delta to previous |
| 458 | * invocation to measure the timing variations |
| 459 | */ |
| 460 | jent_get_nstime(&time); |
Stephan Müller | 764428f | 2020-04-17 21:33:33 +0200 | [diff] [blame] | 461 | current_delta = jent_delta(ec->prev_time, time); |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 462 | ec->prev_time = time; |
| 463 | |
Stephan Müller | d9d67c8 | 2019-05-29 21:24:25 +0200 | [diff] [blame] | 464 | /* Check whether we have a stuck measurement. */ |
Stephan Müller | 764428f | 2020-04-17 21:33:33 +0200 | [diff] [blame] | 465 | stuck = jent_stuck(ec, current_delta); |
| 466 | |
| 467 | /* Now call the next noise sources which also injects the data */ |
Stephan Müller | bb897c5 | 2023-04-21 08:08:04 +0200 | [diff] [blame] | 468 | if (jent_condition_data(ec, current_delta, stuck)) |
| 469 | stuck = 1; |
Stephan Müller | 764428f | 2020-04-17 21:33:33 +0200 | [diff] [blame] | 470 | |
| 471 | return stuck; |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 472 | } |
| 473 | |
Randy Dunlap | 04cb788 | 2021-08-24 14:05:13 -0700 | [diff] [blame] | 474 | /* |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 475 | * Generator of one 64 bit random number |
Stephan Müller | bb897c5 | 2023-04-21 08:08:04 +0200 | [diff] [blame] | 476 | * Function fills rand_data->hash_state |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 477 | * |
Stephan Müller | 764428f | 2020-04-17 21:33:33 +0200 | [diff] [blame] | 478 | * @ec [in] Reference to entropy collector |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 479 | */ |
| 480 | static void jent_gen_entropy(struct rand_data *ec) |
| 481 | { |
Stephan Müller | 908dffa | 2021-12-20 07:21:53 +0100 | [diff] [blame] | 482 | unsigned int k = 0, safety_factor = 0; |
| 483 | |
| 484 | if (fips_enabled) |
| 485 | safety_factor = JENT_ENTROPY_SAFETY_FACTOR; |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 486 | |
| 487 | /* priming of the ->prev_time value */ |
| 488 | jent_measure_jitter(ec); |
| 489 | |
Nicolai Stange | 710ce4b | 2021-11-30 15:10:09 +0100 | [diff] [blame] | 490 | while (!jent_health_failure(ec)) { |
Stephan Müller | d9d67c8 | 2019-05-29 21:24:25 +0200 | [diff] [blame] | 491 | /* If a stuck measurement is received, repeat measurement */ |
| 492 | if (jent_measure_jitter(ec)) |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 493 | continue; |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 494 | |
| 495 | /* |
| 496 | * We multiply the loop value with ->osr to obtain the |
| 497 | * oversampling rate requested by the caller |
| 498 | */ |
Stephan Müller | 908dffa | 2021-12-20 07:21:53 +0100 | [diff] [blame] | 499 | if (++k >= ((DATA_SIZE_BITS + safety_factor) * ec->osr)) |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 500 | break; |
| 501 | } |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 502 | } |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 503 | |
Randy Dunlap | 04cb788 | 2021-08-24 14:05:13 -0700 | [diff] [blame] | 504 | /* |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 505 | * Entry function: Obtain entropy for the caller. |
| 506 | * |
| 507 | * This function invokes the entropy gathering logic as often to generate |
| 508 | * as many bytes as requested by the caller. The entropy gathering logic |
| 509 | * creates 64 bit per invocation. |
| 510 | * |
| 511 | * This function truncates the last 64 bit entropy value output to the exact |
| 512 | * size specified by the caller. |
| 513 | * |
Stephan Müller | 764428f | 2020-04-17 21:33:33 +0200 | [diff] [blame] | 514 | * @ec [in] Reference to entropy collector |
| 515 | * @data [in] pointer to buffer for storing random data -- buffer must already |
| 516 | * exist |
| 517 | * @len [in] size of the buffer, specifying also the requested number of random |
| 518 | * in bytes |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 519 | * |
| 520 | * @return 0 when request is fulfilled or an error |
| 521 | * |
| 522 | * The following error codes can occur: |
Stephan Müller | bb897c5 | 2023-04-21 08:08:04 +0200 | [diff] [blame] | 523 | * -1 entropy_collector is NULL or the generation failed |
Stephan Müller | 3fde2fe | 2023-03-27 09:03:52 +0200 | [diff] [blame] | 524 | * -2 Intermittent health failure |
| 525 | * -3 Permanent health failure |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 526 | */ |
Stephan Mueller | dfc9fa9 | 2015-06-23 16:18:54 +0200 | [diff] [blame] | 527 | int jent_read_entropy(struct rand_data *ec, unsigned char *data, |
| 528 | unsigned int len) |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 529 | { |
Stephan Mueller | dfc9fa9 | 2015-06-23 16:18:54 +0200 | [diff] [blame] | 530 | unsigned char *p = data; |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 531 | |
| 532 | if (!ec) |
Stephan Mueller | dfc9fa9 | 2015-06-23 16:18:54 +0200 | [diff] [blame] | 533 | return -1; |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 534 | |
Milan Djurovic | 36c2501 | 2021-03-16 18:44:03 -0700 | [diff] [blame] | 535 | while (len > 0) { |
Stephan Mueller | dfc9fa9 | 2015-06-23 16:18:54 +0200 | [diff] [blame] | 536 | unsigned int tocopy; |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 537 | |
| 538 | jent_gen_entropy(ec); |
Stephan Müller | 764428f | 2020-04-17 21:33:33 +0200 | [diff] [blame] | 539 | |
Stephan Müller | 3fde2fe | 2023-03-27 09:03:52 +0200 | [diff] [blame] | 540 | if (jent_permanent_health_failure(ec)) { |
Stephan Müller | 764428f | 2020-04-17 21:33:33 +0200 | [diff] [blame] | 541 | /* |
Stephan Müller | 3fde2fe | 2023-03-27 09:03:52 +0200 | [diff] [blame] | 542 | * At this point, the Jitter RNG instance is considered |
| 543 | * as a failed instance. There is no rerun of the |
| 544 | * startup test any more, because the caller |
| 545 | * is assumed to not further use this instance. |
| 546 | */ |
| 547 | return -3; |
| 548 | } else if (jent_health_failure(ec)) { |
| 549 | /* |
| 550 | * Perform startup health tests and return permanent |
| 551 | * error if it fails. |
Stephan Müller | 764428f | 2020-04-17 21:33:33 +0200 | [diff] [blame] | 552 | */ |
Stephan Müller | bb897c5 | 2023-04-21 08:08:04 +0200 | [diff] [blame] | 553 | if (jent_entropy_init(ec->hash_state)) |
Stephan Müller | 3fde2fe | 2023-03-27 09:03:52 +0200 | [diff] [blame] | 554 | return -3; |
Stephan Müller | 764428f | 2020-04-17 21:33:33 +0200 | [diff] [blame] | 555 | |
Stephan Müller | 3fde2fe | 2023-03-27 09:03:52 +0200 | [diff] [blame] | 556 | return -2; |
Stephan Müller | 764428f | 2020-04-17 21:33:33 +0200 | [diff] [blame] | 557 | } |
| 558 | |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 559 | if ((DATA_SIZE_BITS / 8) < len) |
| 560 | tocopy = (DATA_SIZE_BITS / 8); |
| 561 | else |
| 562 | tocopy = len; |
Stephan Müller | bb897c5 | 2023-04-21 08:08:04 +0200 | [diff] [blame] | 563 | if (jent_read_random_block(ec->hash_state, p, tocopy)) |
| 564 | return -1; |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 565 | |
| 566 | len -= tocopy; |
| 567 | p += tocopy; |
| 568 | } |
| 569 | |
| 570 | return 0; |
| 571 | } |
| 572 | |
| 573 | /*************************************************************************** |
| 574 | * Initialization logic |
| 575 | ***************************************************************************/ |
| 576 | |
Stephan Mueller | dfc9fa9 | 2015-06-23 16:18:54 +0200 | [diff] [blame] | 577 | struct rand_data *jent_entropy_collector_alloc(unsigned int osr, |
Stephan Müller | bb897c5 | 2023-04-21 08:08:04 +0200 | [diff] [blame] | 578 | unsigned int flags, |
| 579 | void *hash_state) |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 580 | { |
| 581 | struct rand_data *entropy_collector; |
| 582 | |
Stephan Mueller | dfc9fa9 | 2015-06-23 16:18:54 +0200 | [diff] [blame] | 583 | entropy_collector = jent_zalloc(sizeof(struct rand_data)); |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 584 | if (!entropy_collector) |
| 585 | return NULL; |
| 586 | |
| 587 | if (!(flags & JENT_DISABLE_MEMORY_ACCESS)) { |
| 588 | /* Allocate memory for adding variations based on memory |
| 589 | * access |
| 590 | */ |
Stephan Mueller | dfc9fa9 | 2015-06-23 16:18:54 +0200 | [diff] [blame] | 591 | entropy_collector->mem = jent_zalloc(JENT_MEMORY_SIZE); |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 592 | if (!entropy_collector->mem) { |
Stephan Mueller | dfc9fa9 | 2015-06-23 16:18:54 +0200 | [diff] [blame] | 593 | jent_zfree(entropy_collector); |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 594 | return NULL; |
| 595 | } |
| 596 | entropy_collector->memblocksize = JENT_MEMORY_BLOCKSIZE; |
| 597 | entropy_collector->memblocks = JENT_MEMORY_BLOCKS; |
| 598 | entropy_collector->memaccessloops = JENT_MEMORY_ACCESSLOOPS; |
| 599 | } |
| 600 | |
| 601 | /* verify and set the oversampling rate */ |
Milan Djurovic | 36c2501 | 2021-03-16 18:44:03 -0700 | [diff] [blame] | 602 | if (osr == 0) |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 603 | osr = 1; /* minimum sampling rate is 1 */ |
| 604 | entropy_collector->osr = osr; |
| 605 | |
Stephan Müller | bb897c5 | 2023-04-21 08:08:04 +0200 | [diff] [blame] | 606 | entropy_collector->hash_state = hash_state; |
| 607 | |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 608 | /* fill the data pad with non-zero values */ |
| 609 | jent_gen_entropy(entropy_collector); |
| 610 | |
| 611 | return entropy_collector; |
| 612 | } |
| 613 | |
Stephan Mueller | dfc9fa9 | 2015-06-23 16:18:54 +0200 | [diff] [blame] | 614 | void jent_entropy_collector_free(struct rand_data *entropy_collector) |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 615 | { |
Markus Elfring | cea0a3c | 2015-06-23 22:30:21 +0200 | [diff] [blame] | 616 | jent_zfree(entropy_collector->mem); |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 617 | entropy_collector->mem = NULL; |
Markus Elfring | cea0a3c | 2015-06-23 22:30:21 +0200 | [diff] [blame] | 618 | jent_zfree(entropy_collector); |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 619 | } |
| 620 | |
Stephan Müller | bb897c5 | 2023-04-21 08:08:04 +0200 | [diff] [blame] | 621 | int jent_entropy_init(void *hash_state) |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 622 | { |
| 623 | int i; |
| 624 | __u64 delta_sum = 0; |
| 625 | __u64 old_delta = 0; |
Stephan Müller | 764428f | 2020-04-17 21:33:33 +0200 | [diff] [blame] | 626 | unsigned int nonstuck = 0; |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 627 | int time_backwards = 0; |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 628 | int count_mod = 0; |
Stephan Müller | d9d67c8 | 2019-05-29 21:24:25 +0200 | [diff] [blame] | 629 | int count_stuck = 0; |
| 630 | struct rand_data ec = { 0 }; |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 631 | |
Stephan Müller | 764428f | 2020-04-17 21:33:33 +0200 | [diff] [blame] | 632 | /* Required for RCT */ |
| 633 | ec.osr = 1; |
Stephan Müller | bb897c5 | 2023-04-21 08:08:04 +0200 | [diff] [blame] | 634 | ec.hash_state = hash_state; |
Stephan Müller | 764428f | 2020-04-17 21:33:33 +0200 | [diff] [blame] | 635 | |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 636 | /* We could perform statistical tests here, but the problem is |
| 637 | * that we only have a few loop counts to do testing. These |
| 638 | * loop counts may show some slight skew and we produce |
| 639 | * false positives. |
| 640 | * |
| 641 | * Moreover, only old systems show potentially problematic |
| 642 | * jitter entropy that could potentially be caught here. But |
| 643 | * the RNG is intended for hardware that is available or widely |
| 644 | * used, but not old systems that are long out of favor. Thus, |
| 645 | * no statistical tests. |
| 646 | */ |
| 647 | |
| 648 | /* |
| 649 | * We could add a check for system capabilities such as clock_getres or |
| 650 | * check for CONFIG_X86_TSC, but it does not make much sense as the |
| 651 | * following sanity checks verify that we have a high-resolution |
| 652 | * timer. |
| 653 | */ |
| 654 | /* |
| 655 | * TESTLOOPCOUNT needs some loops to identify edge systems. 100 is |
| 656 | * definitely too little. |
Stephan Müller | 764428f | 2020-04-17 21:33:33 +0200 | [diff] [blame] | 657 | * |
| 658 | * SP800-90B requires at least 1024 initial test cycles. |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 659 | */ |
Stephan Müller | 764428f | 2020-04-17 21:33:33 +0200 | [diff] [blame] | 660 | #define TESTLOOPCOUNT 1024 |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 661 | #define CLEARCACHE 100 |
| 662 | for (i = 0; (TESTLOOPCOUNT + CLEARCACHE) > i; i++) { |
| 663 | __u64 time = 0; |
| 664 | __u64 time2 = 0; |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 665 | __u64 delta = 0; |
| 666 | unsigned int lowdelta = 0; |
Stephan Müller | d9d67c8 | 2019-05-29 21:24:25 +0200 | [diff] [blame] | 667 | int stuck; |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 668 | |
Stephan Müller | d9d67c8 | 2019-05-29 21:24:25 +0200 | [diff] [blame] | 669 | /* Invoke core entropy collection logic */ |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 670 | jent_get_nstime(&time); |
Stephan Müller | d9d67c8 | 2019-05-29 21:24:25 +0200 | [diff] [blame] | 671 | ec.prev_time = time; |
Stephan Müller | bb897c5 | 2023-04-21 08:08:04 +0200 | [diff] [blame] | 672 | jent_condition_data(&ec, time, 0); |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 673 | jent_get_nstime(&time2); |
| 674 | |
| 675 | /* test whether timer works */ |
| 676 | if (!time || !time2) |
| 677 | return JENT_ENOTIME; |
Stephan Müller | 764428f | 2020-04-17 21:33:33 +0200 | [diff] [blame] | 678 | delta = jent_delta(time, time2); |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 679 | /* |
| 680 | * test whether timer is fine grained enough to provide |
| 681 | * delta even when called shortly after each other -- this |
| 682 | * implies that we also have a high resolution timer |
| 683 | */ |
| 684 | if (!delta) |
| 685 | return JENT_ECOARSETIME; |
| 686 | |
Stephan Müller | d9d67c8 | 2019-05-29 21:24:25 +0200 | [diff] [blame] | 687 | stuck = jent_stuck(&ec, delta); |
| 688 | |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 689 | /* |
| 690 | * up to here we did not modify any variable that will be |
| 691 | * evaluated later, but we already performed some work. Thus we |
| 692 | * already have had an impact on the caches, branch prediction, |
| 693 | * etc. with the goal to clear it to get the worst case |
| 694 | * measurements. |
| 695 | */ |
Milan Djurovic | 36c2501 | 2021-03-16 18:44:03 -0700 | [diff] [blame] | 696 | if (i < CLEARCACHE) |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 697 | continue; |
| 698 | |
Stephan Müller | d9d67c8 | 2019-05-29 21:24:25 +0200 | [diff] [blame] | 699 | if (stuck) |
| 700 | count_stuck++; |
Stephan Müller | 764428f | 2020-04-17 21:33:33 +0200 | [diff] [blame] | 701 | else { |
| 702 | nonstuck++; |
| 703 | |
| 704 | /* |
| 705 | * Ensure that the APT succeeded. |
| 706 | * |
| 707 | * With the check below that count_stuck must be less |
| 708 | * than 10% of the overall generated raw entropy values |
| 709 | * it is guaranteed that the APT is invoked at |
| 710 | * floor((TESTLOOPCOUNT * 0.9) / 64) == 14 times. |
| 711 | */ |
| 712 | if ((nonstuck % JENT_APT_WINDOW_SIZE) == 0) { |
| 713 | jent_apt_reset(&ec, |
| 714 | delta & JENT_APT_WORD_MASK); |
Stephan Müller | 764428f | 2020-04-17 21:33:33 +0200 | [diff] [blame] | 715 | } |
| 716 | } |
| 717 | |
Stephan Müller | d236597 | 2023-05-25 19:00:05 +0200 | [diff] [blame] | 718 | /* Validate health test result */ |
| 719 | if (jent_health_failure(&ec)) |
| 720 | return JENT_EHEALTH; |
Stephan Müller | d9d67c8 | 2019-05-29 21:24:25 +0200 | [diff] [blame] | 721 | |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 722 | /* test whether we have an increasing timer */ |
| 723 | if (!(time2 > time)) |
| 724 | time_backwards++; |
| 725 | |
Stephan Müller | d9d67c8 | 2019-05-29 21:24:25 +0200 | [diff] [blame] | 726 | /* use 32 bit value to ensure compilation on 32 bit arches */ |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 727 | lowdelta = time2 - time; |
| 728 | if (!(lowdelta % 100)) |
| 729 | count_mod++; |
| 730 | |
| 731 | /* |
| 732 | * ensure that we have a varying delta timer which is necessary |
| 733 | * for the calculation of entropy -- perform this check |
| 734 | * only after the first loop is executed as we need to prime |
| 735 | * the old_data value |
| 736 | */ |
Stephan Müller | d9d67c8 | 2019-05-29 21:24:25 +0200 | [diff] [blame] | 737 | if (delta > old_delta) |
| 738 | delta_sum += (delta - old_delta); |
| 739 | else |
| 740 | delta_sum += (old_delta - delta); |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 741 | old_delta = delta; |
| 742 | } |
| 743 | |
| 744 | /* |
| 745 | * we allow up to three times the time running backwards. |
| 746 | * CLOCK_REALTIME is affected by adjtime and NTP operations. Thus, |
| 747 | * if such an operation just happens to interfere with our test, it |
| 748 | * should not fail. The value of 3 should cover the NTP case being |
| 749 | * performed during our test run. |
| 750 | */ |
Milan Djurovic | 36c2501 | 2021-03-16 18:44:03 -0700 | [diff] [blame] | 751 | if (time_backwards > 3) |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 752 | return JENT_ENOMONOTONIC; |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 753 | |
| 754 | /* |
| 755 | * Variations of deltas of time must on average be larger |
| 756 | * than 1 to ensure the entropy estimation |
| 757 | * implied with 1 is preserved |
| 758 | */ |
Stephan Müller | d9d67c8 | 2019-05-29 21:24:25 +0200 | [diff] [blame] | 759 | if ((delta_sum) <= 1) |
| 760 | return JENT_EVARVAR; |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 761 | |
| 762 | /* |
| 763 | * Ensure that we have variations in the time stamp below 10 for at |
Stephan Müller | d9d67c8 | 2019-05-29 21:24:25 +0200 | [diff] [blame] | 764 | * least 10% of all checks -- on some platforms, the counter increments |
| 765 | * in multiples of 100, but not always |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 766 | */ |
| 767 | if ((TESTLOOPCOUNT/10 * 9) < count_mod) |
| 768 | return JENT_ECOARSETIME; |
| 769 | |
Stephan Müller | d9d67c8 | 2019-05-29 21:24:25 +0200 | [diff] [blame] | 770 | /* |
| 771 | * If we have more than 90% stuck results, then this Jitter RNG is |
| 772 | * likely to not work well. |
| 773 | */ |
| 774 | if ((TESTLOOPCOUNT/10 * 9) < count_stuck) |
| 775 | return JENT_ESTUCK; |
| 776 | |
Stephan Mueller | bb5530e | 2015-05-25 15:10:20 +0200 | [diff] [blame] | 777 | return 0; |
| 778 | } |