Thomas Gleixner | 9c92ab6 | 2019-05-29 07:17:56 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Colin Cross | c672528 | 2012-03-07 17:34:32 -0800 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2012 Google, Inc. |
Colin Cross | c672528 | 2012-03-07 17:34:32 -0800 | [diff] [blame] | 4 | */ |
| 5 | |
Kees Cook | 9ee85b8 | 2018-10-26 01:14:01 -0700 | [diff] [blame] | 6 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
Fabian Frederick | ef74885 | 2014-06-06 14:37:31 -0700 | [diff] [blame] | 7 | |
Colin Cross | 404a604 | 2012-03-07 17:34:34 -0800 | [diff] [blame] | 8 | #include <linux/device.h> |
| 9 | #include <linux/err.h> |
Colin Cross | c672528 | 2012-03-07 17:34:32 -0800 | [diff] [blame] | 10 | #include <linux/errno.h> |
Colin Cross | c672528 | 2012-03-07 17:34:32 -0800 | [diff] [blame] | 11 | #include <linux/init.h> |
| 12 | #include <linux/io.h> |
Mark Salyzyn | 5bf6d1b | 2016-09-01 08:13:46 -0700 | [diff] [blame] | 13 | #include <linux/kernel.h> |
Colin Cross | 404a604 | 2012-03-07 17:34:34 -0800 | [diff] [blame] | 14 | #include <linux/list.h> |
| 15 | #include <linux/memblock.h> |
Colin Cross | 9cc05ad | 2012-03-07 17:34:33 -0800 | [diff] [blame] | 16 | #include <linux/rslib.h> |
Colin Cross | c672528 | 2012-03-07 17:34:32 -0800 | [diff] [blame] | 17 | #include <linux/slab.h> |
Mark Salyzyn | 5bf6d1b | 2016-09-01 08:13:46 -0700 | [diff] [blame] | 18 | #include <linux/uaccess.h> |
Colin Cross | 404a604 | 2012-03-07 17:34:34 -0800 | [diff] [blame] | 19 | #include <linux/vmalloc.h> |
Yuxiao Zhang | 104fd0b | 2023-06-27 13:25:41 -0700 | [diff] [blame] | 20 | #include <linux/mm.h> |
Anton Vorontsov | 24c3d2f | 2012-05-11 17:17:54 -0700 | [diff] [blame] | 21 | #include <asm/page.h> |
Colin Cross | c672528 | 2012-03-07 17:34:32 -0800 | [diff] [blame] | 22 | |
Kees Cook | 8bd4da0 | 2022-10-11 13:01:10 -0700 | [diff] [blame] | 23 | #include "ram_internal.h" |
| 24 | |
Kees Cook | c208f7d | 2018-11-01 15:11:47 -0700 | [diff] [blame] | 25 | /** |
| 26 | * struct persistent_ram_buffer - persistent circular RAM buffer |
| 27 | * |
Matthew Wilcox (Oracle) | af58740 | 2023-08-18 21:12:53 +0100 | [diff] [blame] | 28 | * @sig: Signature to indicate header (PERSISTENT_RAM_SIG xor PRZ-type value) |
| 29 | * @start: First valid byte in the buffer. |
| 30 | * @size: Number of valid bytes in the buffer. |
| 31 | * @data: The contents of the buffer. |
Kees Cook | c208f7d | 2018-11-01 15:11:47 -0700 | [diff] [blame] | 32 | */ |
Colin Cross | c672528 | 2012-03-07 17:34:32 -0800 | [diff] [blame] | 33 | struct persistent_ram_buffer { |
| 34 | uint32_t sig; |
Colin Cross | 808d038 | 2012-03-07 17:34:35 -0800 | [diff] [blame] | 35 | atomic_t start; |
| 36 | atomic_t size; |
Gustavo A. R. Silva | 8128d3a | 2020-03-09 15:23:27 -0500 | [diff] [blame] | 37 | uint8_t data[]; |
Colin Cross | c672528 | 2012-03-07 17:34:32 -0800 | [diff] [blame] | 38 | }; |
| 39 | |
| 40 | #define PERSISTENT_RAM_SIG (0x43474244) /* DBGC */ |
| 41 | |
Colin Cross | 808d038 | 2012-03-07 17:34:35 -0800 | [diff] [blame] | 42 | static inline size_t buffer_size(struct persistent_ram_zone *prz) |
| 43 | { |
| 44 | return atomic_read(&prz->buffer->size); |
| 45 | } |
| 46 | |
| 47 | static inline size_t buffer_start(struct persistent_ram_zone *prz) |
| 48 | { |
| 49 | return atomic_read(&prz->buffer->start); |
| 50 | } |
| 51 | |
Rob Herring | 0405a5c | 2013-04-08 20:23:33 -0500 | [diff] [blame] | 52 | /* increase and wrap the start pointer, returning the old value */ |
Sebastian Andrzej Siewior | d5a9bf0 | 2016-09-08 13:48:06 +0200 | [diff] [blame] | 53 | static size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a) |
Rob Herring | 0405a5c | 2013-04-08 20:23:33 -0500 | [diff] [blame] | 54 | { |
| 55 | int old; |
| 56 | int new; |
Joel Fernandes | 663deb4 | 2016-10-20 00:34:01 -0700 | [diff] [blame] | 57 | unsigned long flags = 0; |
Rob Herring | 0405a5c | 2013-04-08 20:23:33 -0500 | [diff] [blame] | 58 | |
Joel Fernandes | 663deb4 | 2016-10-20 00:34:01 -0700 | [diff] [blame] | 59 | if (!(prz->flags & PRZ_FLAG_NO_LOCK)) |
| 60 | raw_spin_lock_irqsave(&prz->buffer_lock, flags); |
Rob Herring | 0405a5c | 2013-04-08 20:23:33 -0500 | [diff] [blame] | 61 | |
| 62 | old = atomic_read(&prz->buffer->start); |
| 63 | new = old + a; |
Liu ShuoX | 017321c | 2014-03-12 21:24:44 +0800 | [diff] [blame] | 64 | while (unlikely(new >= prz->buffer_size)) |
Rob Herring | 0405a5c | 2013-04-08 20:23:33 -0500 | [diff] [blame] | 65 | new -= prz->buffer_size; |
| 66 | atomic_set(&prz->buffer->start, new); |
| 67 | |
Joel Fernandes | 663deb4 | 2016-10-20 00:34:01 -0700 | [diff] [blame] | 68 | if (!(prz->flags & PRZ_FLAG_NO_LOCK)) |
| 69 | raw_spin_unlock_irqrestore(&prz->buffer_lock, flags); |
Rob Herring | 0405a5c | 2013-04-08 20:23:33 -0500 | [diff] [blame] | 70 | |
| 71 | return old; |
| 72 | } |
| 73 | |
| 74 | /* increase the size counter until it hits the max size */ |
Sebastian Andrzej Siewior | d5a9bf0 | 2016-09-08 13:48:06 +0200 | [diff] [blame] | 75 | static void buffer_size_add(struct persistent_ram_zone *prz, size_t a) |
Rob Herring | 0405a5c | 2013-04-08 20:23:33 -0500 | [diff] [blame] | 76 | { |
| 77 | size_t old; |
| 78 | size_t new; |
Joel Fernandes | 663deb4 | 2016-10-20 00:34:01 -0700 | [diff] [blame] | 79 | unsigned long flags = 0; |
Rob Herring | 0405a5c | 2013-04-08 20:23:33 -0500 | [diff] [blame] | 80 | |
Joel Fernandes | 663deb4 | 2016-10-20 00:34:01 -0700 | [diff] [blame] | 81 | if (!(prz->flags & PRZ_FLAG_NO_LOCK)) |
| 82 | raw_spin_lock_irqsave(&prz->buffer_lock, flags); |
Rob Herring | 0405a5c | 2013-04-08 20:23:33 -0500 | [diff] [blame] | 83 | |
| 84 | old = atomic_read(&prz->buffer->size); |
| 85 | if (old == prz->buffer_size) |
| 86 | goto exit; |
| 87 | |
| 88 | new = old + a; |
| 89 | if (new > prz->buffer_size) |
| 90 | new = prz->buffer_size; |
| 91 | atomic_set(&prz->buffer->size, new); |
| 92 | |
| 93 | exit: |
Joel Fernandes | 663deb4 | 2016-10-20 00:34:01 -0700 | [diff] [blame] | 94 | if (!(prz->flags & PRZ_FLAG_NO_LOCK)) |
| 95 | raw_spin_unlock_irqrestore(&prz->buffer_lock, flags); |
Rob Herring | 0405a5c | 2013-04-08 20:23:33 -0500 | [diff] [blame] | 96 | } |
| 97 | |
Colin Cross | a15d0b3 | 2012-03-07 17:34:36 -0800 | [diff] [blame] | 98 | static void notrace persistent_ram_encode_rs8(struct persistent_ram_zone *prz, |
Colin Cross | c672528 | 2012-03-07 17:34:32 -0800 | [diff] [blame] | 99 | uint8_t *data, size_t len, uint8_t *ecc) |
| 100 | { |
| 101 | int i; |
Colin Cross | 9cc05ad | 2012-03-07 17:34:33 -0800 | [diff] [blame] | 102 | |
Colin Cross | c672528 | 2012-03-07 17:34:32 -0800 | [diff] [blame] | 103 | /* Initialize the parity buffer */ |
Kees Cook | f2531f1 | 2018-03-07 12:18:33 -0800 | [diff] [blame] | 104 | memset(prz->ecc_info.par, 0, |
| 105 | prz->ecc_info.ecc_size * sizeof(prz->ecc_info.par[0])); |
| 106 | encode_rs8(prz->rs_decoder, data, len, prz->ecc_info.par, 0); |
Arve Hjønnevåg | c31ad08 | 2012-05-22 16:33:23 -0700 | [diff] [blame] | 107 | for (i = 0; i < prz->ecc_info.ecc_size; i++) |
Kees Cook | f2531f1 | 2018-03-07 12:18:33 -0800 | [diff] [blame] | 108 | ecc[i] = prz->ecc_info.par[i]; |
Colin Cross | c672528 | 2012-03-07 17:34:32 -0800 | [diff] [blame] | 109 | } |
| 110 | |
| 111 | static int persistent_ram_decode_rs8(struct persistent_ram_zone *prz, |
| 112 | void *data, size_t len, uint8_t *ecc) |
| 113 | { |
| 114 | int i; |
Colin Cross | 9cc05ad | 2012-03-07 17:34:33 -0800 | [diff] [blame] | 115 | |
Arve Hjønnevåg | c31ad08 | 2012-05-22 16:33:23 -0700 | [diff] [blame] | 116 | for (i = 0; i < prz->ecc_info.ecc_size; i++) |
Kees Cook | f2531f1 | 2018-03-07 12:18:33 -0800 | [diff] [blame] | 117 | prz->ecc_info.par[i] = ecc[i]; |
| 118 | return decode_rs8(prz->rs_decoder, data, prz->ecc_info.par, len, |
Colin Cross | c672528 | 2012-03-07 17:34:32 -0800 | [diff] [blame] | 119 | NULL, 0, NULL, 0, NULL); |
| 120 | } |
Colin Cross | 9cc05ad | 2012-03-07 17:34:33 -0800 | [diff] [blame] | 121 | |
Colin Cross | a15d0b3 | 2012-03-07 17:34:36 -0800 | [diff] [blame] | 122 | static void notrace persistent_ram_update_ecc(struct persistent_ram_zone *prz, |
Colin Cross | 808d038 | 2012-03-07 17:34:35 -0800 | [diff] [blame] | 123 | unsigned int start, unsigned int count) |
Colin Cross | 9cc05ad | 2012-03-07 17:34:33 -0800 | [diff] [blame] | 124 | { |
| 125 | struct persistent_ram_buffer *buffer = prz->buffer; |
| 126 | uint8_t *buffer_end = buffer->data + prz->buffer_size; |
| 127 | uint8_t *block; |
| 128 | uint8_t *par; |
Arve Hjønnevåg | c31ad08 | 2012-05-22 16:33:23 -0700 | [diff] [blame] | 129 | int ecc_block_size = prz->ecc_info.block_size; |
| 130 | int ecc_size = prz->ecc_info.ecc_size; |
| 131 | int size = ecc_block_size; |
Colin Cross | 9cc05ad | 2012-03-07 17:34:33 -0800 | [diff] [blame] | 132 | |
Arve Hjønnevåg | c31ad08 | 2012-05-22 16:33:23 -0700 | [diff] [blame] | 133 | if (!ecc_size) |
Colin Cross | 9cc05ad | 2012-03-07 17:34:33 -0800 | [diff] [blame] | 134 | return; |
| 135 | |
Colin Cross | 808d038 | 2012-03-07 17:34:35 -0800 | [diff] [blame] | 136 | block = buffer->data + (start & ~(ecc_block_size - 1)); |
Arve Hjønnevåg | c31ad08 | 2012-05-22 16:33:23 -0700 | [diff] [blame] | 137 | par = prz->par_buffer + (start / ecc_block_size) * ecc_size; |
Colin Cross | 808d038 | 2012-03-07 17:34:35 -0800 | [diff] [blame] | 138 | |
Colin Cross | 9cc05ad | 2012-03-07 17:34:33 -0800 | [diff] [blame] | 139 | do { |
| 140 | if (block + ecc_block_size > buffer_end) |
| 141 | size = buffer_end - block; |
| 142 | persistent_ram_encode_rs8(prz, block, size, par); |
| 143 | block += ecc_block_size; |
| 144 | par += ecc_size; |
Colin Cross | 808d038 | 2012-03-07 17:34:35 -0800 | [diff] [blame] | 145 | } while (block < buffer->data + start + count); |
Colin Cross | 9cc05ad | 2012-03-07 17:34:33 -0800 | [diff] [blame] | 146 | } |
| 147 | |
| 148 | static void persistent_ram_update_header_ecc(struct persistent_ram_zone *prz) |
| 149 | { |
| 150 | struct persistent_ram_buffer *buffer = prz->buffer; |
| 151 | |
Arve Hjønnevåg | c31ad08 | 2012-05-22 16:33:23 -0700 | [diff] [blame] | 152 | if (!prz->ecc_info.ecc_size) |
Colin Cross | 9cc05ad | 2012-03-07 17:34:33 -0800 | [diff] [blame] | 153 | return; |
| 154 | |
| 155 | persistent_ram_encode_rs8(prz, (uint8_t *)buffer, sizeof(*buffer), |
| 156 | prz->par_header); |
| 157 | } |
| 158 | |
| 159 | static void persistent_ram_ecc_old(struct persistent_ram_zone *prz) |
| 160 | { |
| 161 | struct persistent_ram_buffer *buffer = prz->buffer; |
| 162 | uint8_t *block; |
| 163 | uint8_t *par; |
| 164 | |
Arve Hjønnevåg | c31ad08 | 2012-05-22 16:33:23 -0700 | [diff] [blame] | 165 | if (!prz->ecc_info.ecc_size) |
Colin Cross | 9cc05ad | 2012-03-07 17:34:33 -0800 | [diff] [blame] | 166 | return; |
| 167 | |
| 168 | block = buffer->data; |
| 169 | par = prz->par_buffer; |
Colin Cross | 808d038 | 2012-03-07 17:34:35 -0800 | [diff] [blame] | 170 | while (block < buffer->data + buffer_size(prz)) { |
Colin Cross | 9cc05ad | 2012-03-07 17:34:33 -0800 | [diff] [blame] | 171 | int numerr; |
Arve Hjønnevåg | c31ad08 | 2012-05-22 16:33:23 -0700 | [diff] [blame] | 172 | int size = prz->ecc_info.block_size; |
Colin Cross | 9cc05ad | 2012-03-07 17:34:33 -0800 | [diff] [blame] | 173 | if (block + size > buffer->data + prz->buffer_size) |
| 174 | size = buffer->data + prz->buffer_size - block; |
| 175 | numerr = persistent_ram_decode_rs8(prz, block, size, par); |
| 176 | if (numerr > 0) { |
Fabian Frederick | ef74885 | 2014-06-06 14:37:31 -0700 | [diff] [blame] | 177 | pr_devel("error in block %p, %d\n", block, numerr); |
Colin Cross | 9cc05ad | 2012-03-07 17:34:33 -0800 | [diff] [blame] | 178 | prz->corrected_bytes += numerr; |
| 179 | } else if (numerr < 0) { |
Fabian Frederick | ef74885 | 2014-06-06 14:37:31 -0700 | [diff] [blame] | 180 | pr_devel("uncorrectable error in block %p\n", block); |
Colin Cross | 9cc05ad | 2012-03-07 17:34:33 -0800 | [diff] [blame] | 181 | prz->bad_blocks++; |
| 182 | } |
Arve Hjønnevåg | c31ad08 | 2012-05-22 16:33:23 -0700 | [diff] [blame] | 183 | block += prz->ecc_info.block_size; |
| 184 | par += prz->ecc_info.ecc_size; |
Colin Cross | 9cc05ad | 2012-03-07 17:34:33 -0800 | [diff] [blame] | 185 | } |
| 186 | } |
| 187 | |
Anton Vorontsov | 5ca5d4e | 2012-07-09 17:03:19 -0700 | [diff] [blame] | 188 | static int persistent_ram_init_ecc(struct persistent_ram_zone *prz, |
Arve Hjønnevåg | c31ad08 | 2012-05-22 16:33:23 -0700 | [diff] [blame] | 189 | struct persistent_ram_ecc_info *ecc_info) |
Colin Cross | 9cc05ad | 2012-03-07 17:34:33 -0800 | [diff] [blame] | 190 | { |
| 191 | int numerr; |
| 192 | struct persistent_ram_buffer *buffer = prz->buffer; |
Sergey Shtylyov | 86222a8 | 2023-11-05 23:29:36 +0300 | [diff] [blame] | 193 | size_t ecc_blocks; |
Anton Vorontsov | 1e6a9e56 | 2012-06-18 19:15:53 -0700 | [diff] [blame] | 194 | size_t ecc_total; |
Colin Cross | 9cc05ad | 2012-03-07 17:34:33 -0800 | [diff] [blame] | 195 | |
Arve Hjønnevåg | c31ad08 | 2012-05-22 16:33:23 -0700 | [diff] [blame] | 196 | if (!ecc_info || !ecc_info->ecc_size) |
Colin Cross | 9cc05ad | 2012-03-07 17:34:33 -0800 | [diff] [blame] | 197 | return 0; |
| 198 | |
Arve Hjønnevåg | c31ad08 | 2012-05-22 16:33:23 -0700 | [diff] [blame] | 199 | prz->ecc_info.block_size = ecc_info->block_size ?: 128; |
| 200 | prz->ecc_info.ecc_size = ecc_info->ecc_size ?: 16; |
| 201 | prz->ecc_info.symsize = ecc_info->symsize ?: 8; |
| 202 | prz->ecc_info.poly = ecc_info->poly ?: 0x11d; |
Colin Cross | 9cc05ad | 2012-03-07 17:34:33 -0800 | [diff] [blame] | 203 | |
Arve Hjønnevåg | c31ad08 | 2012-05-22 16:33:23 -0700 | [diff] [blame] | 204 | ecc_blocks = DIV_ROUND_UP(prz->buffer_size - prz->ecc_info.ecc_size, |
| 205 | prz->ecc_info.block_size + |
| 206 | prz->ecc_info.ecc_size); |
| 207 | ecc_total = (ecc_blocks + 1) * prz->ecc_info.ecc_size; |
Anton Vorontsov | 1e6a9e56 | 2012-06-18 19:15:53 -0700 | [diff] [blame] | 208 | if (ecc_total >= prz->buffer_size) { |
| 209 | pr_err("%s: invalid ecc_size %u (total %zu, buffer size %zu)\n", |
Arve Hjønnevåg | c31ad08 | 2012-05-22 16:33:23 -0700 | [diff] [blame] | 210 | __func__, prz->ecc_info.ecc_size, |
| 211 | ecc_total, prz->buffer_size); |
Colin Cross | 9cc05ad | 2012-03-07 17:34:33 -0800 | [diff] [blame] | 212 | return -EINVAL; |
| 213 | } |
| 214 | |
Anton Vorontsov | 1e6a9e56 | 2012-06-18 19:15:53 -0700 | [diff] [blame] | 215 | prz->buffer_size -= ecc_total; |
Colin Cross | 9cc05ad | 2012-03-07 17:34:33 -0800 | [diff] [blame] | 216 | prz->par_buffer = buffer->data + prz->buffer_size; |
Arve Hjønnevåg | c31ad08 | 2012-05-22 16:33:23 -0700 | [diff] [blame] | 217 | prz->par_header = prz->par_buffer + |
| 218 | ecc_blocks * prz->ecc_info.ecc_size; |
Colin Cross | 9cc05ad | 2012-03-07 17:34:33 -0800 | [diff] [blame] | 219 | |
| 220 | /* |
| 221 | * first consecutive root is 0 |
| 222 | * primitive element to generate roots = 1 |
| 223 | */ |
Arve Hjønnevåg | c31ad08 | 2012-05-22 16:33:23 -0700 | [diff] [blame] | 224 | prz->rs_decoder = init_rs(prz->ecc_info.symsize, prz->ecc_info.poly, |
| 225 | 0, 1, prz->ecc_info.ecc_size); |
Colin Cross | 9cc05ad | 2012-03-07 17:34:33 -0800 | [diff] [blame] | 226 | if (prz->rs_decoder == NULL) { |
Fabian Frederick | ef74885 | 2014-06-06 14:37:31 -0700 | [diff] [blame] | 227 | pr_info("init_rs failed\n"); |
Colin Cross | 9cc05ad | 2012-03-07 17:34:33 -0800 | [diff] [blame] | 228 | return -EINVAL; |
| 229 | } |
| 230 | |
Kees Cook | f2531f1 | 2018-03-07 12:18:33 -0800 | [diff] [blame] | 231 | /* allocate workspace instead of using stack VLA */ |
| 232 | prz->ecc_info.par = kmalloc_array(prz->ecc_info.ecc_size, |
| 233 | sizeof(*prz->ecc_info.par), |
| 234 | GFP_KERNEL); |
| 235 | if (!prz->ecc_info.par) { |
| 236 | pr_err("cannot allocate ECC parity workspace\n"); |
| 237 | return -ENOMEM; |
| 238 | } |
| 239 | |
Colin Cross | 9cc05ad | 2012-03-07 17:34:33 -0800 | [diff] [blame] | 240 | prz->corrected_bytes = 0; |
| 241 | prz->bad_blocks = 0; |
| 242 | |
| 243 | numerr = persistent_ram_decode_rs8(prz, buffer, sizeof(*buffer), |
| 244 | prz->par_header); |
| 245 | if (numerr > 0) { |
Fabian Frederick | ef74885 | 2014-06-06 14:37:31 -0700 | [diff] [blame] | 246 | pr_info("error in header, %d\n", numerr); |
Colin Cross | 9cc05ad | 2012-03-07 17:34:33 -0800 | [diff] [blame] | 247 | prz->corrected_bytes += numerr; |
| 248 | } else if (numerr < 0) { |
Dmitry Osipenko | 7db688e | 2021-03-02 12:58:50 +0300 | [diff] [blame] | 249 | pr_info_ratelimited("uncorrectable error in header\n"); |
Colin Cross | 9cc05ad | 2012-03-07 17:34:33 -0800 | [diff] [blame] | 250 | prz->bad_blocks++; |
| 251 | } |
| 252 | |
| 253 | return 0; |
| 254 | } |
| 255 | |
| 256 | ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz, |
| 257 | char *str, size_t len) |
| 258 | { |
| 259 | ssize_t ret; |
| 260 | |
Arve Hjønnevåg | bd08ec3 | 2012-12-05 21:19:51 -0800 | [diff] [blame] | 261 | if (!prz->ecc_info.ecc_size) |
| 262 | return 0; |
| 263 | |
Colin Cross | 9cc05ad | 2012-03-07 17:34:33 -0800 | [diff] [blame] | 264 | if (prz->corrected_bytes || prz->bad_blocks) |
| 265 | ret = snprintf(str, len, "" |
Vincent Whitchurch | 023bbde | 2022-03-01 15:49:32 +0100 | [diff] [blame] | 266 | "\nECC: %d Corrected bytes, %d unrecoverable blocks\n", |
Colin Cross | 9cc05ad | 2012-03-07 17:34:33 -0800 | [diff] [blame] | 267 | prz->corrected_bytes, prz->bad_blocks); |
| 268 | else |
Vincent Whitchurch | 023bbde | 2022-03-01 15:49:32 +0100 | [diff] [blame] | 269 | ret = snprintf(str, len, "\nECC: No errors detected\n"); |
Colin Cross | 9cc05ad | 2012-03-07 17:34:33 -0800 | [diff] [blame] | 270 | |
| 271 | return ret; |
| 272 | } |
Colin Cross | c672528 | 2012-03-07 17:34:32 -0800 | [diff] [blame] | 273 | |
Colin Cross | a15d0b3 | 2012-03-07 17:34:36 -0800 | [diff] [blame] | 274 | static void notrace persistent_ram_update(struct persistent_ram_zone *prz, |
Colin Cross | 808d038 | 2012-03-07 17:34:35 -0800 | [diff] [blame] | 275 | const void *s, unsigned int start, unsigned int count) |
Colin Cross | c672528 | 2012-03-07 17:34:32 -0800 | [diff] [blame] | 276 | { |
| 277 | struct persistent_ram_buffer *buffer = prz->buffer; |
Furquan Shaikh | 7e75678 | 2016-02-15 09:19:48 +0100 | [diff] [blame] | 278 | memcpy_toio(buffer->data + start, s, count); |
Colin Cross | 808d038 | 2012-03-07 17:34:35 -0800 | [diff] [blame] | 279 | persistent_ram_update_ecc(prz, start, count); |
Colin Cross | c672528 | 2012-03-07 17:34:32 -0800 | [diff] [blame] | 280 | } |
| 281 | |
Mark Salyzyn | 5bf6d1b | 2016-09-01 08:13:46 -0700 | [diff] [blame] | 282 | static int notrace persistent_ram_update_user(struct persistent_ram_zone *prz, |
| 283 | const void __user *s, unsigned int start, unsigned int count) |
| 284 | { |
| 285 | struct persistent_ram_buffer *buffer = prz->buffer; |
Al Viro | ff84778 | 2020-02-18 15:43:52 -0500 | [diff] [blame] | 286 | int ret = unlikely(copy_from_user(buffer->data + start, s, count)) ? |
Mark Salyzyn | 5bf6d1b | 2016-09-01 08:13:46 -0700 | [diff] [blame] | 287 | -EFAULT : 0; |
| 288 | persistent_ram_update_ecc(prz, start, count); |
| 289 | return ret; |
| 290 | } |
| 291 | |
Anton Vorontsov | 201e4ac | 2012-05-26 06:07:49 -0700 | [diff] [blame] | 292 | void persistent_ram_save_old(struct persistent_ram_zone *prz) |
Colin Cross | c672528 | 2012-03-07 17:34:32 -0800 | [diff] [blame] | 293 | { |
| 294 | struct persistent_ram_buffer *buffer = prz->buffer; |
Colin Cross | 808d038 | 2012-03-07 17:34:35 -0800 | [diff] [blame] | 295 | size_t size = buffer_size(prz); |
| 296 | size_t start = buffer_start(prz); |
Colin Cross | c672528 | 2012-03-07 17:34:32 -0800 | [diff] [blame] | 297 | |
Anton Vorontsov | 201e4ac | 2012-05-26 06:07:49 -0700 | [diff] [blame] | 298 | if (!size) |
| 299 | return; |
Colin Cross | c672528 | 2012-03-07 17:34:32 -0800 | [diff] [blame] | 300 | |
Anton Vorontsov | 201e4ac | 2012-05-26 06:07:49 -0700 | [diff] [blame] | 301 | if (!prz->old_log) { |
| 302 | persistent_ram_ecc_old(prz); |
Yuxiao Zhang | 104fd0b | 2023-06-27 13:25:41 -0700 | [diff] [blame] | 303 | prz->old_log = kvzalloc(size, GFP_KERNEL); |
Anton Vorontsov | 201e4ac | 2012-05-26 06:07:49 -0700 | [diff] [blame] | 304 | } |
| 305 | if (!prz->old_log) { |
Fabian Frederick | ef74885 | 2014-06-06 14:37:31 -0700 | [diff] [blame] | 306 | pr_err("failed to allocate buffer\n"); |
Colin Cross | c672528 | 2012-03-07 17:34:32 -0800 | [diff] [blame] | 307 | return; |
| 308 | } |
| 309 | |
Colin Cross | 808d038 | 2012-03-07 17:34:35 -0800 | [diff] [blame] | 310 | prz->old_log_size = size; |
Andrew Bresticker | d771fdf | 2016-02-15 09:19:49 +0100 | [diff] [blame] | 311 | memcpy_fromio(prz->old_log, &buffer->data[start], size - start); |
| 312 | memcpy_fromio(prz->old_log + size - start, &buffer->data[0], start); |
Colin Cross | c672528 | 2012-03-07 17:34:32 -0800 | [diff] [blame] | 313 | } |
| 314 | |
Colin Cross | a15d0b3 | 2012-03-07 17:34:36 -0800 | [diff] [blame] | 315 | int notrace persistent_ram_write(struct persistent_ram_zone *prz, |
Colin Cross | c672528 | 2012-03-07 17:34:32 -0800 | [diff] [blame] | 316 | const void *s, unsigned int count) |
| 317 | { |
| 318 | int rem; |
| 319 | int c = count; |
Colin Cross | 808d038 | 2012-03-07 17:34:35 -0800 | [diff] [blame] | 320 | size_t start; |
Colin Cross | c672528 | 2012-03-07 17:34:32 -0800 | [diff] [blame] | 321 | |
Colin Cross | 808d038 | 2012-03-07 17:34:35 -0800 | [diff] [blame] | 322 | if (unlikely(c > prz->buffer_size)) { |
Colin Cross | c672528 | 2012-03-07 17:34:32 -0800 | [diff] [blame] | 323 | s += c - prz->buffer_size; |
| 324 | c = prz->buffer_size; |
| 325 | } |
Colin Cross | 808d038 | 2012-03-07 17:34:35 -0800 | [diff] [blame] | 326 | |
Anton Vorontsov | 484dd30 | 2012-05-11 17:17:17 -0700 | [diff] [blame] | 327 | buffer_size_add(prz, c); |
Colin Cross | 808d038 | 2012-03-07 17:34:35 -0800 | [diff] [blame] | 328 | |
| 329 | start = buffer_start_add(prz, c); |
| 330 | |
| 331 | rem = prz->buffer_size - start; |
| 332 | if (unlikely(rem < c)) { |
| 333 | persistent_ram_update(prz, s, start, rem); |
Colin Cross | c672528 | 2012-03-07 17:34:32 -0800 | [diff] [blame] | 334 | s += rem; |
| 335 | c -= rem; |
Colin Cross | 808d038 | 2012-03-07 17:34:35 -0800 | [diff] [blame] | 336 | start = 0; |
Colin Cross | c672528 | 2012-03-07 17:34:32 -0800 | [diff] [blame] | 337 | } |
Colin Cross | 808d038 | 2012-03-07 17:34:35 -0800 | [diff] [blame] | 338 | persistent_ram_update(prz, s, start, c); |
Colin Cross | c672528 | 2012-03-07 17:34:32 -0800 | [diff] [blame] | 339 | |
Colin Cross | 9cc05ad | 2012-03-07 17:34:33 -0800 | [diff] [blame] | 340 | persistent_ram_update_header_ecc(prz); |
Colin Cross | c672528 | 2012-03-07 17:34:32 -0800 | [diff] [blame] | 341 | |
| 342 | return count; |
| 343 | } |
| 344 | |
Mark Salyzyn | 5bf6d1b | 2016-09-01 08:13:46 -0700 | [diff] [blame] | 345 | int notrace persistent_ram_write_user(struct persistent_ram_zone *prz, |
| 346 | const void __user *s, unsigned int count) |
| 347 | { |
| 348 | int rem, ret = 0, c = count; |
| 349 | size_t start; |
| 350 | |
Mark Salyzyn | 5bf6d1b | 2016-09-01 08:13:46 -0700 | [diff] [blame] | 351 | if (unlikely(c > prz->buffer_size)) { |
| 352 | s += c - prz->buffer_size; |
| 353 | c = prz->buffer_size; |
| 354 | } |
| 355 | |
| 356 | buffer_size_add(prz, c); |
| 357 | |
| 358 | start = buffer_start_add(prz, c); |
| 359 | |
| 360 | rem = prz->buffer_size - start; |
| 361 | if (unlikely(rem < c)) { |
| 362 | ret = persistent_ram_update_user(prz, s, start, rem); |
| 363 | s += rem; |
| 364 | c -= rem; |
| 365 | start = 0; |
| 366 | } |
| 367 | if (likely(!ret)) |
| 368 | ret = persistent_ram_update_user(prz, s, start, c); |
| 369 | |
| 370 | persistent_ram_update_header_ecc(prz); |
| 371 | |
| 372 | return unlikely(ret) ? ret : count; |
| 373 | } |
| 374 | |
Colin Cross | c672528 | 2012-03-07 17:34:32 -0800 | [diff] [blame] | 375 | size_t persistent_ram_old_size(struct persistent_ram_zone *prz) |
| 376 | { |
| 377 | return prz->old_log_size; |
| 378 | } |
| 379 | |
| 380 | void *persistent_ram_old(struct persistent_ram_zone *prz) |
| 381 | { |
| 382 | return prz->old_log; |
| 383 | } |
| 384 | |
| 385 | void persistent_ram_free_old(struct persistent_ram_zone *prz) |
| 386 | { |
Yuxiao Zhang | 104fd0b | 2023-06-27 13:25:41 -0700 | [diff] [blame] | 387 | kvfree(prz->old_log); |
Colin Cross | c672528 | 2012-03-07 17:34:32 -0800 | [diff] [blame] | 388 | prz->old_log = NULL; |
| 389 | prz->old_log_size = 0; |
| 390 | } |
| 391 | |
Anton Vorontsov | fce3979 | 2012-05-26 06:07:51 -0700 | [diff] [blame] | 392 | void persistent_ram_zap(struct persistent_ram_zone *prz) |
| 393 | { |
| 394 | atomic_set(&prz->buffer->start, 0); |
| 395 | atomic_set(&prz->buffer->size, 0); |
| 396 | persistent_ram_update_header_ecc(prz); |
| 397 | } |
| 398 | |
Mukesh Ojha | 9d843e8 | 2021-03-23 00:12:17 +0530 | [diff] [blame] | 399 | #define MEM_TYPE_WCOMBINE 0 |
| 400 | #define MEM_TYPE_NONCACHED 1 |
| 401 | #define MEM_TYPE_NORMAL 2 |
| 402 | |
Tony Lindgren | 027bc8b | 2014-09-16 13:50:01 -0700 | [diff] [blame] | 403 | static void *persistent_ram_vmap(phys_addr_t start, size_t size, |
| 404 | unsigned int memtype) |
Colin Cross | c672528 | 2012-03-07 17:34:32 -0800 | [diff] [blame] | 405 | { |
Colin Cross | 404a604 | 2012-03-07 17:34:34 -0800 | [diff] [blame] | 406 | struct page **pages; |
| 407 | phys_addr_t page_start; |
| 408 | unsigned int page_count; |
| 409 | pgprot_t prot; |
| 410 | unsigned int i; |
Anton Vorontsov | 2b1321e | 2012-05-11 17:17:43 -0700 | [diff] [blame] | 411 | void *vaddr; |
Colin Cross | c672528 | 2012-03-07 17:34:32 -0800 | [diff] [blame] | 412 | |
Colin Cross | 404a604 | 2012-03-07 17:34:34 -0800 | [diff] [blame] | 413 | page_start = start - offset_in_page(start); |
| 414 | page_count = DIV_ROUND_UP(size + offset_in_page(start), PAGE_SIZE); |
Colin Cross | c672528 | 2012-03-07 17:34:32 -0800 | [diff] [blame] | 415 | |
Mukesh Ojha | 9d843e8 | 2021-03-23 00:12:17 +0530 | [diff] [blame] | 416 | switch (memtype) { |
| 417 | case MEM_TYPE_NORMAL: |
| 418 | prot = PAGE_KERNEL; |
| 419 | break; |
| 420 | case MEM_TYPE_NONCACHED: |
Tony Lindgren | 027bc8b | 2014-09-16 13:50:01 -0700 | [diff] [blame] | 421 | prot = pgprot_noncached(PAGE_KERNEL); |
Mukesh Ojha | 9d843e8 | 2021-03-23 00:12:17 +0530 | [diff] [blame] | 422 | break; |
| 423 | case MEM_TYPE_WCOMBINE: |
Tony Lindgren | 027bc8b | 2014-09-16 13:50:01 -0700 | [diff] [blame] | 424 | prot = pgprot_writecombine(PAGE_KERNEL); |
Mukesh Ojha | 9d843e8 | 2021-03-23 00:12:17 +0530 | [diff] [blame] | 425 | break; |
| 426 | default: |
| 427 | pr_err("invalid mem_type=%d\n", memtype); |
| 428 | return NULL; |
| 429 | } |
Colin Cross | c672528 | 2012-03-07 17:34:32 -0800 | [diff] [blame] | 430 | |
Fabian Frederick | b8f52d8 | 2014-08-08 14:22:35 -0700 | [diff] [blame] | 431 | pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL); |
Colin Cross | 404a604 | 2012-03-07 17:34:34 -0800 | [diff] [blame] | 432 | if (!pages) { |
Fabian Frederick | ef74885 | 2014-06-06 14:37:31 -0700 | [diff] [blame] | 433 | pr_err("%s: Failed to allocate array for %u pages\n", |
| 434 | __func__, page_count); |
Anton Vorontsov | 2b1321e | 2012-05-11 17:17:43 -0700 | [diff] [blame] | 435 | return NULL; |
Colin Cross | c672528 | 2012-03-07 17:34:32 -0800 | [diff] [blame] | 436 | } |
| 437 | |
Colin Cross | 404a604 | 2012-03-07 17:34:34 -0800 | [diff] [blame] | 438 | for (i = 0; i < page_count; i++) { |
| 439 | phys_addr_t addr = page_start + i * PAGE_SIZE; |
| 440 | pages[i] = pfn_to_page(addr >> PAGE_SHIFT); |
| 441 | } |
Stephen Boyd | e6b8427 | 2022-12-05 15:31:36 -0800 | [diff] [blame] | 442 | /* |
| 443 | * VM_IOREMAP used here to bypass this region during vread() |
| 444 | * and kmap_atomic() (i.e. kcore) to avoid __va() failures. |
| 445 | */ |
| 446 | vaddr = vmap(pages, page_count, VM_MAP | VM_IOREMAP, prot); |
Colin Cross | 404a604 | 2012-03-07 17:34:34 -0800 | [diff] [blame] | 447 | kfree(pages); |
Anton Vorontsov | 2b1321e | 2012-05-11 17:17:43 -0700 | [diff] [blame] | 448 | |
Bin Yang | 831b624 | 2018-09-12 03:36:34 +0000 | [diff] [blame] | 449 | /* |
| 450 | * Since vmap() uses page granularity, we must add the offset |
| 451 | * into the page here, to get the byte granularity address |
| 452 | * into the mapping to represent the actual "start" location. |
| 453 | */ |
| 454 | return vaddr + offset_in_page(start); |
Anton Vorontsov | 2b1321e | 2012-05-11 17:17:43 -0700 | [diff] [blame] | 455 | } |
| 456 | |
Tony Lindgren | 027bc8b | 2014-09-16 13:50:01 -0700 | [diff] [blame] | 457 | static void *persistent_ram_iomap(phys_addr_t start, size_t size, |
Kees Cook | 1227daa | 2018-10-17 17:20:35 -0700 | [diff] [blame] | 458 | unsigned int memtype, char *label) |
Anton Vorontsov | 24c3d2f | 2012-05-11 17:17:54 -0700 | [diff] [blame] | 459 | { |
Tony Lindgren | 027bc8b | 2014-09-16 13:50:01 -0700 | [diff] [blame] | 460 | void *va; |
| 461 | |
Kees Cook | 1227daa | 2018-10-17 17:20:35 -0700 | [diff] [blame] | 462 | if (!request_mem_region(start, size, label ?: "ramoops")) { |
Kees Cook | 9ee85b8 | 2018-10-26 01:14:01 -0700 | [diff] [blame] | 463 | pr_err("request mem region (%s 0x%llx@0x%llx) failed\n", |
| 464 | label ?: "ramoops", |
Anton Vorontsov | 24c3d2f | 2012-05-11 17:17:54 -0700 | [diff] [blame] | 465 | (unsigned long long)size, (unsigned long long)start); |
| 466 | return NULL; |
| 467 | } |
| 468 | |
Tony Lindgren | 027bc8b | 2014-09-16 13:50:01 -0700 | [diff] [blame] | 469 | if (memtype) |
| 470 | va = ioremap(start, size); |
| 471 | else |
| 472 | va = ioremap_wc(start, size); |
| 473 | |
Bin Yang | 831b624 | 2018-09-12 03:36:34 +0000 | [diff] [blame] | 474 | /* |
| 475 | * Since request_mem_region() and ioremap() are byte-granularity |
| 476 | * there is no need handle anything special like we do when the |
| 477 | * vmap() case in persistent_ram_vmap() above. |
| 478 | */ |
Tony Lindgren | 027bc8b | 2014-09-16 13:50:01 -0700 | [diff] [blame] | 479 | return va; |
Anton Vorontsov | 24c3d2f | 2012-05-11 17:17:54 -0700 | [diff] [blame] | 480 | } |
| 481 | |
Anton Vorontsov | 2b1321e | 2012-05-11 17:17:43 -0700 | [diff] [blame] | 482 | static int persistent_ram_buffer_map(phys_addr_t start, phys_addr_t size, |
Tony Lindgren | 027bc8b | 2014-09-16 13:50:01 -0700 | [diff] [blame] | 483 | struct persistent_ram_zone *prz, int memtype) |
Anton Vorontsov | 2b1321e | 2012-05-11 17:17:43 -0700 | [diff] [blame] | 484 | { |
Anton Vorontsov | d3b4876 | 2012-05-11 17:18:05 -0700 | [diff] [blame] | 485 | prz->paddr = start; |
| 486 | prz->size = size; |
| 487 | |
Anton Vorontsov | 24c3d2f | 2012-05-11 17:17:54 -0700 | [diff] [blame] | 488 | if (pfn_valid(start >> PAGE_SHIFT)) |
Tony Lindgren | 027bc8b | 2014-09-16 13:50:01 -0700 | [diff] [blame] | 489 | prz->vaddr = persistent_ram_vmap(start, size, memtype); |
Anton Vorontsov | 24c3d2f | 2012-05-11 17:17:54 -0700 | [diff] [blame] | 490 | else |
Kees Cook | 1227daa | 2018-10-17 17:20:35 -0700 | [diff] [blame] | 491 | prz->vaddr = persistent_ram_iomap(start, size, memtype, |
| 492 | prz->label); |
Anton Vorontsov | 24c3d2f | 2012-05-11 17:17:54 -0700 | [diff] [blame] | 493 | |
Colin Cross | 404a604 | 2012-03-07 17:34:34 -0800 | [diff] [blame] | 494 | if (!prz->vaddr) { |
Anton Vorontsov | 2b1321e | 2012-05-11 17:17:43 -0700 | [diff] [blame] | 495 | pr_err("%s: Failed to map 0x%llx pages at 0x%llx\n", __func__, |
| 496 | (unsigned long long)size, (unsigned long long)start); |
Colin Cross | 404a604 | 2012-03-07 17:34:34 -0800 | [diff] [blame] | 497 | return -ENOMEM; |
Colin Cross | c672528 | 2012-03-07 17:34:32 -0800 | [diff] [blame] | 498 | } |
| 499 | |
Bin Yang | 831b624 | 2018-09-12 03:36:34 +0000 | [diff] [blame] | 500 | prz->buffer = prz->vaddr; |
Colin Cross | 404a604 | 2012-03-07 17:34:34 -0800 | [diff] [blame] | 501 | prz->buffer_size = size - sizeof(struct persistent_ram_buffer); |
Colin Cross | c672528 | 2012-03-07 17:34:32 -0800 | [diff] [blame] | 502 | |
| 503 | return 0; |
| 504 | } |
| 505 | |
Greg Kroah-Hartman | f568f6c | 2012-12-21 15:02:05 -0800 | [diff] [blame] | 506 | static int persistent_ram_post_init(struct persistent_ram_zone *prz, u32 sig, |
Kees Cook | 76d5692 | 2017-02-09 15:43:44 -0800 | [diff] [blame] | 507 | struct persistent_ram_ecc_info *ecc_info) |
Colin Cross | 404a604 | 2012-03-07 17:34:34 -0800 | [diff] [blame] | 508 | { |
Anton Vorontsov | bb4206f | 2012-05-11 17:17:25 -0700 | [diff] [blame] | 509 | int ret; |
Peng Wang | 7684bd3 | 2018-10-30 15:52:34 +0800 | [diff] [blame] | 510 | bool zap = !!(prz->flags & PRZ_FLAG_ZAP_OLD); |
Colin Cross | 404a604 | 2012-03-07 17:34:34 -0800 | [diff] [blame] | 511 | |
Arve Hjønnevåg | c31ad08 | 2012-05-22 16:33:23 -0700 | [diff] [blame] | 512 | ret = persistent_ram_init_ecc(prz, ecc_info); |
Kees Cook | 0eed84f | 2018-11-01 14:03:07 -0700 | [diff] [blame] | 513 | if (ret) { |
| 514 | pr_warn("ECC failed %s\n", prz->label); |
Anton Vorontsov | bb4206f | 2012-05-11 17:17:25 -0700 | [diff] [blame] | 515 | return ret; |
Kees Cook | 0eed84f | 2018-11-01 14:03:07 -0700 | [diff] [blame] | 516 | } |
Colin Cross | 404a604 | 2012-03-07 17:34:34 -0800 | [diff] [blame] | 517 | |
Anton Vorontsov | cbe7cbf | 2012-07-17 12:11:12 -0700 | [diff] [blame] | 518 | sig ^= PERSISTENT_RAM_SIG; |
| 519 | |
| 520 | if (prz->buffer->sig == sig) { |
Enlin Mu | fe8c362 | 2023-08-01 14:04:32 +0800 | [diff] [blame] | 521 | if (buffer_size(prz) == 0 && buffer_start(prz) == 0) { |
Joel Fernandes (Google) | 3069637 | 2018-11-03 16:38:18 -0700 | [diff] [blame] | 522 | pr_debug("found existing empty buffer\n"); |
| 523 | return 0; |
| 524 | } |
| 525 | |
Colin Cross | 808d038 | 2012-03-07 17:34:35 -0800 | [diff] [blame] | 526 | if (buffer_size(prz) > prz->buffer_size || |
Peng Wang | 7684bd3 | 2018-10-30 15:52:34 +0800 | [diff] [blame] | 527 | buffer_start(prz) > buffer_size(prz)) { |
Fabian Frederick | ef74885 | 2014-06-06 14:37:31 -0700 | [diff] [blame] | 528 | pr_info("found existing invalid buffer, size %zu, start %zu\n", |
| 529 | buffer_size(prz), buffer_start(prz)); |
Peng Wang | 7684bd3 | 2018-10-30 15:52:34 +0800 | [diff] [blame] | 530 | zap = true; |
| 531 | } else { |
Fabian Frederick | ef74885 | 2014-06-06 14:37:31 -0700 | [diff] [blame] | 532 | pr_debug("found existing buffer, size %zu, start %zu\n", |
| 533 | buffer_size(prz), buffer_start(prz)); |
Colin Cross | 404a604 | 2012-03-07 17:34:34 -0800 | [diff] [blame] | 534 | persistent_ram_save_old(prz); |
| 535 | } |
| 536 | } else { |
Fabian Frederick | ef74885 | 2014-06-06 14:37:31 -0700 | [diff] [blame] | 537 | pr_debug("no valid data in buffer (sig = 0x%08x)\n", |
| 538 | prz->buffer->sig); |
Peng Wang | 7684bd3 | 2018-10-30 15:52:34 +0800 | [diff] [blame] | 539 | prz->buffer->sig = sig; |
| 540 | zap = true; |
Colin Cross | 404a604 | 2012-03-07 17:34:34 -0800 | [diff] [blame] | 541 | } |
| 542 | |
Peng Wang | 7684bd3 | 2018-10-30 15:52:34 +0800 | [diff] [blame] | 543 | /* Reset missing, invalid, or single-use memory area. */ |
| 544 | if (zap) |
| 545 | persistent_ram_zap(prz); |
Colin Cross | 404a604 | 2012-03-07 17:34:34 -0800 | [diff] [blame] | 546 | |
Anton Vorontsov | bb4206f | 2012-05-11 17:17:25 -0700 | [diff] [blame] | 547 | return 0; |
| 548 | } |
| 549 | |
Kees Cook | 06b4e09 | 2022-10-11 13:01:11 -0700 | [diff] [blame] | 550 | void persistent_ram_free(struct persistent_ram_zone **_prz) |
Anton Vorontsov | d3b4876 | 2012-05-11 17:18:05 -0700 | [diff] [blame] | 551 | { |
Kees Cook | 06b4e09 | 2022-10-11 13:01:11 -0700 | [diff] [blame] | 552 | struct persistent_ram_zone *prz; |
| 553 | |
| 554 | if (!_prz) |
| 555 | return; |
| 556 | |
| 557 | prz = *_prz; |
Anton Vorontsov | beeb943 | 2012-06-18 19:15:52 -0700 | [diff] [blame] | 558 | if (!prz) |
| 559 | return; |
| 560 | |
| 561 | if (prz->vaddr) { |
| 562 | if (pfn_valid(prz->paddr >> PAGE_SHIFT)) { |
Bin Yang | 831b624 | 2018-09-12 03:36:34 +0000 | [diff] [blame] | 563 | /* We must vunmap() at page-granularity. */ |
| 564 | vunmap(prz->vaddr - offset_in_page(prz->paddr)); |
Anton Vorontsov | beeb943 | 2012-06-18 19:15:52 -0700 | [diff] [blame] | 565 | } else { |
| 566 | iounmap(prz->vaddr); |
| 567 | release_mem_region(prz->paddr, prz->size); |
| 568 | } |
| 569 | prz->vaddr = NULL; |
Anton Vorontsov | d3b4876 | 2012-05-11 17:18:05 -0700 | [diff] [blame] | 570 | } |
Kees Cook | f2531f1 | 2018-03-07 12:18:33 -0800 | [diff] [blame] | 571 | if (prz->rs_decoder) { |
| 572 | free_rs(prz->rs_decoder); |
| 573 | prz->rs_decoder = NULL; |
| 574 | } |
| 575 | kfree(prz->ecc_info.par); |
| 576 | prz->ecc_info.par = NULL; |
| 577 | |
Anton Vorontsov | d3b4876 | 2012-05-11 17:18:05 -0700 | [diff] [blame] | 578 | persistent_ram_free_old(prz); |
Kees Cook | 1227daa | 2018-10-17 17:20:35 -0700 | [diff] [blame] | 579 | kfree(prz->label); |
Anton Vorontsov | d3b4876 | 2012-05-11 17:18:05 -0700 | [diff] [blame] | 580 | kfree(prz); |
Kees Cook | 06b4e09 | 2022-10-11 13:01:11 -0700 | [diff] [blame] | 581 | *_prz = NULL; |
Anton Vorontsov | d3b4876 | 2012-05-11 17:18:05 -0700 | [diff] [blame] | 582 | } |
| 583 | |
Greg Kroah-Hartman | f568f6c | 2012-12-21 15:02:05 -0800 | [diff] [blame] | 584 | struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size, |
Tony Lindgren | 027bc8b | 2014-09-16 13:50:01 -0700 | [diff] [blame] | 585 | u32 sig, struct persistent_ram_ecc_info *ecc_info, |
Kees Cook | 1227daa | 2018-10-17 17:20:35 -0700 | [diff] [blame] | 586 | unsigned int memtype, u32 flags, char *label) |
Anton Vorontsov | 8cf5aff | 2012-05-11 17:17:34 -0700 | [diff] [blame] | 587 | { |
| 588 | struct persistent_ram_zone *prz; |
| 589 | int ret = -ENOMEM; |
| 590 | |
| 591 | prz = kzalloc(sizeof(struct persistent_ram_zone), GFP_KERNEL); |
| 592 | if (!prz) { |
Fabian Frederick | ef74885 | 2014-06-06 14:37:31 -0700 | [diff] [blame] | 593 | pr_err("failed to allocate persistent ram zone\n"); |
Anton Vorontsov | 8cf5aff | 2012-05-11 17:17:34 -0700 | [diff] [blame] | 594 | goto err; |
| 595 | } |
| 596 | |
Kees Cook | 76d5692 | 2017-02-09 15:43:44 -0800 | [diff] [blame] | 597 | /* Initialize general buffer state. */ |
Kees Cook | e9a330c | 2017-03-05 22:08:58 -0800 | [diff] [blame] | 598 | raw_spin_lock_init(&prz->buffer_lock); |
Kees Cook | 76d5692 | 2017-02-09 15:43:44 -0800 | [diff] [blame] | 599 | prz->flags = flags; |
Kees Cook | e163fdb | 2020-01-08 10:06:54 -0800 | [diff] [blame] | 600 | prz->label = kstrdup(label, GFP_KERNEL); |
Jiasheng Jiang | d97038d | 2023-06-14 17:37:33 +0800 | [diff] [blame] | 601 | if (!prz->label) |
| 602 | goto err; |
Kees Cook | 76d5692 | 2017-02-09 15:43:44 -0800 | [diff] [blame] | 603 | |
Tony Lindgren | 027bc8b | 2014-09-16 13:50:01 -0700 | [diff] [blame] | 604 | ret = persistent_ram_buffer_map(start, size, prz, memtype); |
Anton Vorontsov | 8cf5aff | 2012-05-11 17:17:34 -0700 | [diff] [blame] | 605 | if (ret) |
| 606 | goto err; |
| 607 | |
Kees Cook | 76d5692 | 2017-02-09 15:43:44 -0800 | [diff] [blame] | 608 | ret = persistent_ram_post_init(prz, sig, ecc_info); |
Anton Vorontsov | beeb943 | 2012-06-18 19:15:52 -0700 | [diff] [blame] | 609 | if (ret) |
| 610 | goto err; |
Anton Vorontsov | 8cf5aff | 2012-05-11 17:17:34 -0700 | [diff] [blame] | 611 | |
Kees Cook | dc80b1e | 2018-11-01 14:14:47 -0700 | [diff] [blame] | 612 | pr_debug("attached %s 0x%zx@0x%llx: %zu header, %zu data, %zu ecc (%d/%d)\n", |
| 613 | prz->label, prz->size, (unsigned long long)prz->paddr, |
| 614 | sizeof(*prz->buffer), prz->buffer_size, |
| 615 | prz->size - sizeof(*prz->buffer) - prz->buffer_size, |
| 616 | prz->ecc_info.ecc_size, prz->ecc_info.block_size); |
| 617 | |
Anton Vorontsov | 8cf5aff | 2012-05-11 17:17:34 -0700 | [diff] [blame] | 618 | return prz; |
| 619 | err: |
Kees Cook | 06b4e09 | 2022-10-11 13:01:11 -0700 | [diff] [blame] | 620 | persistent_ram_free(&prz); |
Anton Vorontsov | 8cf5aff | 2012-05-11 17:17:34 -0700 | [diff] [blame] | 621 | return ERR_PTR(ret); |
| 622 | } |