Russ Weight | bdf86d0 | 2022-06-06 09:00:35 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Intel MAX10 Board Management Controller Secure Update Driver |
| 4 | * |
| 5 | * Copyright (C) 2019-2022 Intel Corporation. All rights reserved. |
| 6 | * |
| 7 | */ |
| 8 | #include <linux/bitfield.h> |
| 9 | #include <linux/device.h> |
| 10 | #include <linux/firmware.h> |
| 11 | #include <linux/mfd/intel-m10-bmc.h> |
| 12 | #include <linux/mod_devicetable.h> |
| 13 | #include <linux/module.h> |
| 14 | #include <linux/platform_device.h> |
| 15 | #include <linux/slab.h> |
| 16 | |
| 17 | struct m10bmc_sec { |
| 18 | struct device *dev; |
| 19 | struct intel_m10bmc *m10bmc; |
Russ Weight | 5cd339b | 2022-06-06 09:00:38 -0700 | [diff] [blame] | 20 | struct fw_upload *fwl; |
| 21 | char *fw_name; |
| 22 | u32 fw_name_id; |
| 23 | bool cancel_request; |
Russ Weight | bdf86d0 | 2022-06-06 09:00:35 -0700 | [diff] [blame] | 24 | }; |
| 25 | |
Russ Weight | 5cd339b | 2022-06-06 09:00:38 -0700 | [diff] [blame] | 26 | static DEFINE_XARRAY_ALLOC(fw_upload_xa); |
| 27 | |
Russ Weight | bdf86d0 | 2022-06-06 09:00:35 -0700 | [diff] [blame] | 28 | /* Root Entry Hash (REH) support */ |
| 29 | #define REH_SHA256_SIZE 32 |
| 30 | #define REH_SHA384_SIZE 48 |
| 31 | #define REH_MAGIC GENMASK(15, 0) |
| 32 | #define REH_SHA_NUM_BYTES GENMASK(31, 16) |
| 33 | |
| 34 | static ssize_t |
| 35 | show_root_entry_hash(struct device *dev, u32 exp_magic, |
| 36 | u32 prog_addr, u32 reh_addr, char *buf) |
| 37 | { |
| 38 | struct m10bmc_sec *sec = dev_get_drvdata(dev); |
| 39 | int sha_num_bytes, i, ret, cnt = 0; |
| 40 | u8 hash[REH_SHA384_SIZE]; |
| 41 | unsigned int stride; |
| 42 | u32 magic; |
| 43 | |
| 44 | stride = regmap_get_reg_stride(sec->m10bmc->regmap); |
| 45 | ret = m10bmc_raw_read(sec->m10bmc, prog_addr, &magic); |
| 46 | if (ret) |
| 47 | return ret; |
| 48 | |
| 49 | if (FIELD_GET(REH_MAGIC, magic) != exp_magic) |
| 50 | return sysfs_emit(buf, "hash not programmed\n"); |
| 51 | |
| 52 | sha_num_bytes = FIELD_GET(REH_SHA_NUM_BYTES, magic) / 8; |
| 53 | if ((sha_num_bytes % stride) || |
| 54 | (sha_num_bytes != REH_SHA256_SIZE && |
| 55 | sha_num_bytes != REH_SHA384_SIZE)) { |
| 56 | dev_err(sec->dev, "%s bad sha num bytes %d\n", __func__, |
| 57 | sha_num_bytes); |
| 58 | return -EINVAL; |
| 59 | } |
| 60 | |
| 61 | ret = regmap_bulk_read(sec->m10bmc->regmap, reh_addr, |
| 62 | hash, sha_num_bytes / stride); |
| 63 | if (ret) { |
| 64 | dev_err(dev, "failed to read root entry hash: %x cnt %x: %d\n", |
| 65 | reh_addr, sha_num_bytes / stride, ret); |
| 66 | return ret; |
| 67 | } |
| 68 | |
| 69 | for (i = 0; i < sha_num_bytes; i++) |
| 70 | cnt += sprintf(buf + cnt, "%02x", hash[i]); |
| 71 | cnt += sprintf(buf + cnt, "\n"); |
| 72 | |
| 73 | return cnt; |
| 74 | } |
| 75 | |
| 76 | #define DEVICE_ATTR_SEC_REH_RO(_name, _magic, _prog_addr, _reh_addr) \ |
| 77 | static ssize_t _name##_root_entry_hash_show(struct device *dev, \ |
| 78 | struct device_attribute *attr, \ |
| 79 | char *buf) \ |
| 80 | { return show_root_entry_hash(dev, _magic, _prog_addr, _reh_addr, buf); } \ |
| 81 | static DEVICE_ATTR_RO(_name##_root_entry_hash) |
| 82 | |
| 83 | DEVICE_ATTR_SEC_REH_RO(bmc, BMC_PROG_MAGIC, BMC_PROG_ADDR, BMC_REH_ADDR); |
| 84 | DEVICE_ATTR_SEC_REH_RO(sr, SR_PROG_MAGIC, SR_PROG_ADDR, SR_REH_ADDR); |
| 85 | DEVICE_ATTR_SEC_REH_RO(pr, PR_PROG_MAGIC, PR_PROG_ADDR, PR_REH_ADDR); |
| 86 | |
Russ Weight | 7f03d84 | 2022-06-06 09:00:37 -0700 | [diff] [blame] | 87 | #define CSK_BIT_LEN 128U |
| 88 | #define CSK_32ARRAY_SIZE DIV_ROUND_UP(CSK_BIT_LEN, 32) |
| 89 | |
| 90 | static ssize_t |
| 91 | show_canceled_csk(struct device *dev, u32 addr, char *buf) |
| 92 | { |
| 93 | unsigned int i, stride, size = CSK_32ARRAY_SIZE * sizeof(u32); |
| 94 | struct m10bmc_sec *sec = dev_get_drvdata(dev); |
| 95 | DECLARE_BITMAP(csk_map, CSK_BIT_LEN); |
| 96 | __le32 csk_le32[CSK_32ARRAY_SIZE]; |
| 97 | u32 csk32[CSK_32ARRAY_SIZE]; |
| 98 | int ret; |
| 99 | |
| 100 | stride = regmap_get_reg_stride(sec->m10bmc->regmap); |
| 101 | if (size % stride) { |
| 102 | dev_err(sec->dev, |
| 103 | "CSK vector size (0x%x) not aligned to stride (0x%x)\n", |
| 104 | size, stride); |
| 105 | WARN_ON_ONCE(1); |
| 106 | return -EINVAL; |
| 107 | } |
| 108 | |
| 109 | ret = regmap_bulk_read(sec->m10bmc->regmap, addr, csk_le32, |
| 110 | size / stride); |
| 111 | if (ret) { |
| 112 | dev_err(sec->dev, "failed to read CSK vector: %x cnt %x: %d\n", |
| 113 | addr, size / stride, ret); |
| 114 | return ret; |
| 115 | } |
| 116 | |
| 117 | for (i = 0; i < CSK_32ARRAY_SIZE; i++) |
| 118 | csk32[i] = le32_to_cpu(((csk_le32[i]))); |
| 119 | |
| 120 | bitmap_from_arr32(csk_map, csk32, CSK_BIT_LEN); |
| 121 | bitmap_complement(csk_map, csk_map, CSK_BIT_LEN); |
| 122 | return bitmap_print_to_pagebuf(1, buf, csk_map, CSK_BIT_LEN); |
| 123 | } |
| 124 | |
| 125 | #define DEVICE_ATTR_SEC_CSK_RO(_name, _addr) \ |
| 126 | static ssize_t _name##_canceled_csks_show(struct device *dev, \ |
| 127 | struct device_attribute *attr, \ |
| 128 | char *buf) \ |
| 129 | { return show_canceled_csk(dev, _addr, buf); } \ |
| 130 | static DEVICE_ATTR_RO(_name##_canceled_csks) |
| 131 | |
| 132 | #define CSK_VEC_OFFSET 0x34 |
| 133 | |
| 134 | DEVICE_ATTR_SEC_CSK_RO(bmc, BMC_PROG_ADDR + CSK_VEC_OFFSET); |
| 135 | DEVICE_ATTR_SEC_CSK_RO(sr, SR_PROG_ADDR + CSK_VEC_OFFSET); |
| 136 | DEVICE_ATTR_SEC_CSK_RO(pr, PR_PROG_ADDR + CSK_VEC_OFFSET); |
| 137 | |
Russ Weight | 154afa5 | 2022-06-06 09:00:36 -0700 | [diff] [blame] | 138 | #define FLASH_COUNT_SIZE 4096 /* count stored as inverted bit vector */ |
| 139 | |
| 140 | static ssize_t flash_count_show(struct device *dev, |
| 141 | struct device_attribute *attr, char *buf) |
| 142 | { |
| 143 | struct m10bmc_sec *sec = dev_get_drvdata(dev); |
| 144 | unsigned int stride, num_bits; |
| 145 | u8 *flash_buf; |
| 146 | int cnt, ret; |
| 147 | |
| 148 | stride = regmap_get_reg_stride(sec->m10bmc->regmap); |
| 149 | num_bits = FLASH_COUNT_SIZE * 8; |
| 150 | |
Russ Weight | 154afa5 | 2022-06-06 09:00:36 -0700 | [diff] [blame] | 151 | if (FLASH_COUNT_SIZE % stride) { |
| 152 | dev_err(sec->dev, |
| 153 | "FLASH_COUNT_SIZE (0x%x) not aligned to stride (0x%x)\n", |
| 154 | FLASH_COUNT_SIZE, stride); |
| 155 | WARN_ON_ONCE(1); |
| 156 | return -EINVAL; |
| 157 | } |
| 158 | |
Russ Weight | 468c9d9 | 2022-09-16 16:52:05 -0700 | [diff] [blame] | 159 | flash_buf = kmalloc(FLASH_COUNT_SIZE, GFP_KERNEL); |
| 160 | if (!flash_buf) |
| 161 | return -ENOMEM; |
| 162 | |
Russ Weight | 154afa5 | 2022-06-06 09:00:36 -0700 | [diff] [blame] | 163 | ret = regmap_bulk_read(sec->m10bmc->regmap, STAGING_FLASH_COUNT, |
| 164 | flash_buf, FLASH_COUNT_SIZE / stride); |
| 165 | if (ret) { |
| 166 | dev_err(sec->dev, |
| 167 | "failed to read flash count: %x cnt %x: %d\n", |
| 168 | STAGING_FLASH_COUNT, FLASH_COUNT_SIZE / stride, ret); |
| 169 | goto exit_free; |
| 170 | } |
| 171 | cnt = num_bits - bitmap_weight((unsigned long *)flash_buf, num_bits); |
| 172 | |
| 173 | exit_free: |
| 174 | kfree(flash_buf); |
| 175 | |
| 176 | return ret ? : sysfs_emit(buf, "%u\n", cnt); |
| 177 | } |
| 178 | static DEVICE_ATTR_RO(flash_count); |
| 179 | |
Russ Weight | bdf86d0 | 2022-06-06 09:00:35 -0700 | [diff] [blame] | 180 | static struct attribute *m10bmc_security_attrs[] = { |
Russ Weight | 154afa5 | 2022-06-06 09:00:36 -0700 | [diff] [blame] | 181 | &dev_attr_flash_count.attr, |
Russ Weight | bdf86d0 | 2022-06-06 09:00:35 -0700 | [diff] [blame] | 182 | &dev_attr_bmc_root_entry_hash.attr, |
| 183 | &dev_attr_sr_root_entry_hash.attr, |
| 184 | &dev_attr_pr_root_entry_hash.attr, |
Russ Weight | 7f03d84 | 2022-06-06 09:00:37 -0700 | [diff] [blame] | 185 | &dev_attr_sr_canceled_csks.attr, |
| 186 | &dev_attr_pr_canceled_csks.attr, |
| 187 | &dev_attr_bmc_canceled_csks.attr, |
Russ Weight | bdf86d0 | 2022-06-06 09:00:35 -0700 | [diff] [blame] | 188 | NULL, |
| 189 | }; |
| 190 | |
| 191 | static struct attribute_group m10bmc_security_attr_group = { |
| 192 | .name = "security", |
| 193 | .attrs = m10bmc_security_attrs, |
| 194 | }; |
| 195 | |
| 196 | static const struct attribute_group *m10bmc_sec_attr_groups[] = { |
| 197 | &m10bmc_security_attr_group, |
| 198 | NULL, |
| 199 | }; |
| 200 | |
Russ Weight | 5cd339b | 2022-06-06 09:00:38 -0700 | [diff] [blame] | 201 | static void log_error_regs(struct m10bmc_sec *sec, u32 doorbell) |
| 202 | { |
| 203 | u32 auth_result; |
| 204 | |
| 205 | dev_err(sec->dev, "RSU error status: 0x%08x\n", doorbell); |
| 206 | |
| 207 | if (!m10bmc_sys_read(sec->m10bmc, M10BMC_AUTH_RESULT, &auth_result)) |
| 208 | dev_err(sec->dev, "RSU auth result: 0x%08x\n", auth_result); |
| 209 | } |
| 210 | |
| 211 | static enum fw_upload_err rsu_check_idle(struct m10bmc_sec *sec) |
| 212 | { |
| 213 | u32 doorbell; |
| 214 | int ret; |
| 215 | |
| 216 | ret = m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, &doorbell); |
| 217 | if (ret) |
| 218 | return FW_UPLOAD_ERR_RW_ERROR; |
| 219 | |
| 220 | if (rsu_prog(doorbell) != RSU_PROG_IDLE && |
| 221 | rsu_prog(doorbell) != RSU_PROG_RSU_DONE) { |
| 222 | log_error_regs(sec, doorbell); |
| 223 | return FW_UPLOAD_ERR_BUSY; |
| 224 | } |
| 225 | |
| 226 | return FW_UPLOAD_ERR_NONE; |
| 227 | } |
| 228 | |
| 229 | static inline bool rsu_start_done(u32 doorbell) |
| 230 | { |
| 231 | u32 status, progress; |
| 232 | |
| 233 | if (doorbell & DRBL_RSU_REQUEST) |
| 234 | return false; |
| 235 | |
| 236 | status = rsu_stat(doorbell); |
| 237 | if (status == RSU_STAT_ERASE_FAIL || status == RSU_STAT_WEAROUT) |
| 238 | return true; |
| 239 | |
| 240 | progress = rsu_prog(doorbell); |
| 241 | if (progress != RSU_PROG_IDLE && progress != RSU_PROG_RSU_DONE) |
| 242 | return true; |
| 243 | |
| 244 | return false; |
| 245 | } |
| 246 | |
| 247 | static enum fw_upload_err rsu_update_init(struct m10bmc_sec *sec) |
| 248 | { |
| 249 | u32 doorbell, status; |
| 250 | int ret; |
| 251 | |
| 252 | ret = regmap_update_bits(sec->m10bmc->regmap, |
| 253 | M10BMC_SYS_BASE + M10BMC_DOORBELL, |
| 254 | DRBL_RSU_REQUEST | DRBL_HOST_STATUS, |
| 255 | DRBL_RSU_REQUEST | |
| 256 | FIELD_PREP(DRBL_HOST_STATUS, |
| 257 | HOST_STATUS_IDLE)); |
| 258 | if (ret) |
| 259 | return FW_UPLOAD_ERR_RW_ERROR; |
| 260 | |
| 261 | ret = regmap_read_poll_timeout(sec->m10bmc->regmap, |
| 262 | M10BMC_SYS_BASE + M10BMC_DOORBELL, |
| 263 | doorbell, |
| 264 | rsu_start_done(doorbell), |
| 265 | NIOS_HANDSHAKE_INTERVAL_US, |
| 266 | NIOS_HANDSHAKE_TIMEOUT_US); |
| 267 | |
| 268 | if (ret == -ETIMEDOUT) { |
| 269 | log_error_regs(sec, doorbell); |
| 270 | return FW_UPLOAD_ERR_TIMEOUT; |
| 271 | } else if (ret) { |
| 272 | return FW_UPLOAD_ERR_RW_ERROR; |
| 273 | } |
| 274 | |
| 275 | status = rsu_stat(doorbell); |
| 276 | if (status == RSU_STAT_WEAROUT) { |
| 277 | dev_warn(sec->dev, "Excessive flash update count detected\n"); |
| 278 | return FW_UPLOAD_ERR_WEAROUT; |
| 279 | } else if (status == RSU_STAT_ERASE_FAIL) { |
| 280 | log_error_regs(sec, doorbell); |
| 281 | return FW_UPLOAD_ERR_HW_ERROR; |
| 282 | } |
| 283 | |
| 284 | return FW_UPLOAD_ERR_NONE; |
| 285 | } |
| 286 | |
| 287 | static enum fw_upload_err rsu_prog_ready(struct m10bmc_sec *sec) |
| 288 | { |
| 289 | unsigned long poll_timeout; |
| 290 | u32 doorbell, progress; |
| 291 | int ret; |
| 292 | |
| 293 | ret = m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, &doorbell); |
| 294 | if (ret) |
| 295 | return FW_UPLOAD_ERR_RW_ERROR; |
| 296 | |
| 297 | poll_timeout = jiffies + msecs_to_jiffies(RSU_PREP_TIMEOUT_MS); |
| 298 | while (rsu_prog(doorbell) == RSU_PROG_PREPARE) { |
| 299 | msleep(RSU_PREP_INTERVAL_MS); |
| 300 | if (time_after(jiffies, poll_timeout)) |
| 301 | break; |
| 302 | |
| 303 | ret = m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, &doorbell); |
| 304 | if (ret) |
| 305 | return FW_UPLOAD_ERR_RW_ERROR; |
| 306 | } |
| 307 | |
| 308 | progress = rsu_prog(doorbell); |
| 309 | if (progress == RSU_PROG_PREPARE) { |
| 310 | log_error_regs(sec, doorbell); |
| 311 | return FW_UPLOAD_ERR_TIMEOUT; |
| 312 | } else if (progress != RSU_PROG_READY) { |
| 313 | log_error_regs(sec, doorbell); |
| 314 | return FW_UPLOAD_ERR_HW_ERROR; |
| 315 | } |
| 316 | |
| 317 | return FW_UPLOAD_ERR_NONE; |
| 318 | } |
| 319 | |
| 320 | static enum fw_upload_err rsu_send_data(struct m10bmc_sec *sec) |
| 321 | { |
| 322 | u32 doorbell; |
| 323 | int ret; |
| 324 | |
| 325 | ret = regmap_update_bits(sec->m10bmc->regmap, |
| 326 | M10BMC_SYS_BASE + M10BMC_DOORBELL, |
| 327 | DRBL_HOST_STATUS, |
| 328 | FIELD_PREP(DRBL_HOST_STATUS, |
| 329 | HOST_STATUS_WRITE_DONE)); |
| 330 | if (ret) |
| 331 | return FW_UPLOAD_ERR_RW_ERROR; |
| 332 | |
| 333 | ret = regmap_read_poll_timeout(sec->m10bmc->regmap, |
| 334 | M10BMC_SYS_BASE + M10BMC_DOORBELL, |
| 335 | doorbell, |
| 336 | rsu_prog(doorbell) != RSU_PROG_READY, |
| 337 | NIOS_HANDSHAKE_INTERVAL_US, |
| 338 | NIOS_HANDSHAKE_TIMEOUT_US); |
| 339 | |
| 340 | if (ret == -ETIMEDOUT) { |
| 341 | log_error_regs(sec, doorbell); |
| 342 | return FW_UPLOAD_ERR_TIMEOUT; |
| 343 | } else if (ret) { |
| 344 | return FW_UPLOAD_ERR_RW_ERROR; |
| 345 | } |
| 346 | |
| 347 | switch (rsu_stat(doorbell)) { |
| 348 | case RSU_STAT_NORMAL: |
| 349 | case RSU_STAT_NIOS_OK: |
| 350 | case RSU_STAT_USER_OK: |
| 351 | case RSU_STAT_FACTORY_OK: |
| 352 | break; |
| 353 | default: |
| 354 | log_error_regs(sec, doorbell); |
| 355 | return FW_UPLOAD_ERR_HW_ERROR; |
| 356 | } |
| 357 | |
| 358 | return FW_UPLOAD_ERR_NONE; |
| 359 | } |
| 360 | |
| 361 | static int rsu_check_complete(struct m10bmc_sec *sec, u32 *doorbell) |
| 362 | { |
| 363 | if (m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, doorbell)) |
| 364 | return -EIO; |
| 365 | |
| 366 | switch (rsu_stat(*doorbell)) { |
| 367 | case RSU_STAT_NORMAL: |
| 368 | case RSU_STAT_NIOS_OK: |
| 369 | case RSU_STAT_USER_OK: |
| 370 | case RSU_STAT_FACTORY_OK: |
| 371 | break; |
| 372 | default: |
| 373 | return -EINVAL; |
| 374 | } |
| 375 | |
| 376 | switch (rsu_prog(*doorbell)) { |
| 377 | case RSU_PROG_IDLE: |
| 378 | case RSU_PROG_RSU_DONE: |
| 379 | return 0; |
| 380 | case RSU_PROG_AUTHENTICATING: |
| 381 | case RSU_PROG_COPYING: |
| 382 | case RSU_PROG_UPDATE_CANCEL: |
| 383 | case RSU_PROG_PROGRAM_KEY_HASH: |
| 384 | return -EAGAIN; |
| 385 | default: |
| 386 | return -EINVAL; |
| 387 | } |
| 388 | } |
| 389 | |
| 390 | static enum fw_upload_err rsu_cancel(struct m10bmc_sec *sec) |
| 391 | { |
| 392 | u32 doorbell; |
| 393 | int ret; |
| 394 | |
| 395 | ret = m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, &doorbell); |
| 396 | if (ret) |
| 397 | return FW_UPLOAD_ERR_RW_ERROR; |
| 398 | |
| 399 | if (rsu_prog(doorbell) != RSU_PROG_READY) |
| 400 | return FW_UPLOAD_ERR_BUSY; |
| 401 | |
| 402 | ret = regmap_update_bits(sec->m10bmc->regmap, |
| 403 | M10BMC_SYS_BASE + M10BMC_DOORBELL, |
| 404 | DRBL_HOST_STATUS, |
| 405 | FIELD_PREP(DRBL_HOST_STATUS, |
| 406 | HOST_STATUS_ABORT_RSU)); |
| 407 | if (ret) |
| 408 | return FW_UPLOAD_ERR_RW_ERROR; |
| 409 | |
| 410 | return FW_UPLOAD_ERR_CANCELED; |
| 411 | } |
| 412 | |
| 413 | static enum fw_upload_err m10bmc_sec_prepare(struct fw_upload *fwl, |
| 414 | const u8 *data, u32 size) |
| 415 | { |
| 416 | struct m10bmc_sec *sec = fwl->dd_handle; |
| 417 | u32 ret; |
| 418 | |
| 419 | sec->cancel_request = false; |
| 420 | |
| 421 | if (!size || size > M10BMC_STAGING_SIZE) |
| 422 | return FW_UPLOAD_ERR_INVALID_SIZE; |
| 423 | |
| 424 | ret = rsu_check_idle(sec); |
| 425 | if (ret != FW_UPLOAD_ERR_NONE) |
| 426 | return ret; |
| 427 | |
| 428 | ret = rsu_update_init(sec); |
| 429 | if (ret != FW_UPLOAD_ERR_NONE) |
| 430 | return ret; |
| 431 | |
| 432 | ret = rsu_prog_ready(sec); |
| 433 | if (ret != FW_UPLOAD_ERR_NONE) |
| 434 | return ret; |
| 435 | |
| 436 | if (sec->cancel_request) |
| 437 | return rsu_cancel(sec); |
| 438 | |
| 439 | return FW_UPLOAD_ERR_NONE; |
| 440 | } |
| 441 | |
| 442 | #define WRITE_BLOCK_SIZE 0x4000 /* Default write-block size is 0x4000 bytes */ |
| 443 | |
| 444 | static enum fw_upload_err m10bmc_sec_write(struct fw_upload *fwl, const u8 *data, |
| 445 | u32 offset, u32 size, u32 *written) |
| 446 | { |
| 447 | struct m10bmc_sec *sec = fwl->dd_handle; |
| 448 | u32 blk_size, doorbell, extra_offset; |
| 449 | unsigned int stride, extra = 0; |
| 450 | int ret; |
| 451 | |
| 452 | stride = regmap_get_reg_stride(sec->m10bmc->regmap); |
| 453 | if (sec->cancel_request) |
| 454 | return rsu_cancel(sec); |
| 455 | |
| 456 | ret = m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, &doorbell); |
| 457 | if (ret) { |
| 458 | return FW_UPLOAD_ERR_RW_ERROR; |
| 459 | } else if (rsu_prog(doorbell) != RSU_PROG_READY) { |
| 460 | log_error_regs(sec, doorbell); |
| 461 | return FW_UPLOAD_ERR_HW_ERROR; |
| 462 | } |
| 463 | |
| 464 | WARN_ON_ONCE(WRITE_BLOCK_SIZE % stride); |
| 465 | blk_size = min_t(u32, WRITE_BLOCK_SIZE, size); |
| 466 | ret = regmap_bulk_write(sec->m10bmc->regmap, |
| 467 | M10BMC_STAGING_BASE + offset, |
| 468 | (void *)data + offset, |
| 469 | blk_size / stride); |
| 470 | if (ret) |
| 471 | return FW_UPLOAD_ERR_RW_ERROR; |
| 472 | |
| 473 | /* |
| 474 | * If blk_size is not aligned to stride, then handle the extra |
| 475 | * bytes with regmap_write. |
| 476 | */ |
| 477 | if (blk_size % stride) { |
| 478 | extra_offset = offset + ALIGN_DOWN(blk_size, stride); |
| 479 | memcpy(&extra, (u8 *)(data + extra_offset), blk_size % stride); |
| 480 | ret = regmap_write(sec->m10bmc->regmap, |
| 481 | M10BMC_STAGING_BASE + extra_offset, extra); |
| 482 | if (ret) |
| 483 | return FW_UPLOAD_ERR_RW_ERROR; |
| 484 | } |
| 485 | |
| 486 | *written = blk_size; |
| 487 | return FW_UPLOAD_ERR_NONE; |
| 488 | } |
| 489 | |
| 490 | static enum fw_upload_err m10bmc_sec_poll_complete(struct fw_upload *fwl) |
| 491 | { |
| 492 | struct m10bmc_sec *sec = fwl->dd_handle; |
| 493 | unsigned long poll_timeout; |
| 494 | u32 doorbell, result; |
| 495 | int ret; |
| 496 | |
| 497 | if (sec->cancel_request) |
| 498 | return rsu_cancel(sec); |
| 499 | |
| 500 | result = rsu_send_data(sec); |
| 501 | if (result != FW_UPLOAD_ERR_NONE) |
| 502 | return result; |
| 503 | |
| 504 | poll_timeout = jiffies + msecs_to_jiffies(RSU_COMPLETE_TIMEOUT_MS); |
| 505 | do { |
| 506 | msleep(RSU_COMPLETE_INTERVAL_MS); |
| 507 | ret = rsu_check_complete(sec, &doorbell); |
| 508 | } while (ret == -EAGAIN && !time_after(jiffies, poll_timeout)); |
| 509 | |
| 510 | if (ret == -EAGAIN) { |
| 511 | log_error_regs(sec, doorbell); |
| 512 | return FW_UPLOAD_ERR_TIMEOUT; |
| 513 | } else if (ret == -EIO) { |
| 514 | return FW_UPLOAD_ERR_RW_ERROR; |
| 515 | } else if (ret) { |
| 516 | log_error_regs(sec, doorbell); |
| 517 | return FW_UPLOAD_ERR_HW_ERROR; |
| 518 | } |
| 519 | |
| 520 | return FW_UPLOAD_ERR_NONE; |
| 521 | } |
| 522 | |
| 523 | /* |
| 524 | * m10bmc_sec_cancel() may be called asynchronously with an on-going update. |
| 525 | * All other functions are called sequentially in a single thread. To avoid |
| 526 | * contention on register accesses, m10bmc_sec_cancel() must only update |
| 527 | * the cancel_request flag. Other functions will check this flag and handle |
| 528 | * the cancel request synchronously. |
| 529 | */ |
| 530 | static void m10bmc_sec_cancel(struct fw_upload *fwl) |
| 531 | { |
| 532 | struct m10bmc_sec *sec = fwl->dd_handle; |
| 533 | |
| 534 | sec->cancel_request = true; |
| 535 | } |
| 536 | |
| 537 | static void m10bmc_sec_cleanup(struct fw_upload *fwl) |
| 538 | { |
| 539 | struct m10bmc_sec *sec = fwl->dd_handle; |
| 540 | |
| 541 | (void)rsu_cancel(sec); |
| 542 | } |
| 543 | |
| 544 | static const struct fw_upload_ops m10bmc_ops = { |
| 545 | .prepare = m10bmc_sec_prepare, |
| 546 | .write = m10bmc_sec_write, |
| 547 | .poll_complete = m10bmc_sec_poll_complete, |
| 548 | .cancel = m10bmc_sec_cancel, |
| 549 | .cleanup = m10bmc_sec_cleanup, |
| 550 | }; |
| 551 | |
Russ Weight | bdf86d0 | 2022-06-06 09:00:35 -0700 | [diff] [blame] | 552 | #define SEC_UPDATE_LEN_MAX 32 |
| 553 | static int m10bmc_sec_probe(struct platform_device *pdev) |
| 554 | { |
Russ Weight | 5cd339b | 2022-06-06 09:00:38 -0700 | [diff] [blame] | 555 | char buf[SEC_UPDATE_LEN_MAX]; |
Russ Weight | bdf86d0 | 2022-06-06 09:00:35 -0700 | [diff] [blame] | 556 | struct m10bmc_sec *sec; |
Russ Weight | 5cd339b | 2022-06-06 09:00:38 -0700 | [diff] [blame] | 557 | struct fw_upload *fwl; |
| 558 | unsigned int len; |
| 559 | int ret; |
Russ Weight | bdf86d0 | 2022-06-06 09:00:35 -0700 | [diff] [blame] | 560 | |
| 561 | sec = devm_kzalloc(&pdev->dev, sizeof(*sec), GFP_KERNEL); |
| 562 | if (!sec) |
| 563 | return -ENOMEM; |
| 564 | |
| 565 | sec->dev = &pdev->dev; |
| 566 | sec->m10bmc = dev_get_drvdata(pdev->dev.parent); |
| 567 | dev_set_drvdata(&pdev->dev, sec); |
| 568 | |
Russ Weight | 5cd339b | 2022-06-06 09:00:38 -0700 | [diff] [blame] | 569 | ret = xa_alloc(&fw_upload_xa, &sec->fw_name_id, sec, |
| 570 | xa_limit_32b, GFP_KERNEL); |
| 571 | if (ret) |
| 572 | return ret; |
| 573 | |
| 574 | len = scnprintf(buf, SEC_UPDATE_LEN_MAX, "secure-update%d", |
| 575 | sec->fw_name_id); |
| 576 | sec->fw_name = kmemdup_nul(buf, len, GFP_KERNEL); |
| 577 | if (!sec->fw_name) |
| 578 | return -ENOMEM; |
| 579 | |
| 580 | fwl = firmware_upload_register(THIS_MODULE, sec->dev, sec->fw_name, |
| 581 | &m10bmc_ops, sec); |
| 582 | if (IS_ERR(fwl)) { |
| 583 | dev_err(sec->dev, "Firmware Upload driver failed to start\n"); |
| 584 | kfree(sec->fw_name); |
| 585 | xa_erase(&fw_upload_xa, sec->fw_name_id); |
| 586 | return PTR_ERR(fwl); |
| 587 | } |
| 588 | |
| 589 | sec->fwl = fwl; |
| 590 | return 0; |
| 591 | } |
| 592 | |
| 593 | static int m10bmc_sec_remove(struct platform_device *pdev) |
| 594 | { |
| 595 | struct m10bmc_sec *sec = dev_get_drvdata(&pdev->dev); |
| 596 | |
| 597 | firmware_upload_unregister(sec->fwl); |
| 598 | kfree(sec->fw_name); |
| 599 | xa_erase(&fw_upload_xa, sec->fw_name_id); |
| 600 | |
Russ Weight | bdf86d0 | 2022-06-06 09:00:35 -0700 | [diff] [blame] | 601 | return 0; |
| 602 | } |
| 603 | |
| 604 | static const struct platform_device_id intel_m10bmc_sec_ids[] = { |
| 605 | { |
| 606 | .name = "n3000bmc-sec-update", |
| 607 | }, |
Russ Weight | 562d0bf | 2022-09-02 09:57:06 -0700 | [diff] [blame] | 608 | { |
| 609 | .name = "d5005bmc-sec-update", |
| 610 | }, |
Russ Weight | bdf86d0 | 2022-06-06 09:00:35 -0700 | [diff] [blame] | 611 | { } |
| 612 | }; |
| 613 | MODULE_DEVICE_TABLE(platform, intel_m10bmc_sec_ids); |
| 614 | |
| 615 | static struct platform_driver intel_m10bmc_sec_driver = { |
| 616 | .probe = m10bmc_sec_probe, |
Russ Weight | 5cd339b | 2022-06-06 09:00:38 -0700 | [diff] [blame] | 617 | .remove = m10bmc_sec_remove, |
Russ Weight | bdf86d0 | 2022-06-06 09:00:35 -0700 | [diff] [blame] | 618 | .driver = { |
| 619 | .name = "intel-m10bmc-sec-update", |
| 620 | .dev_groups = m10bmc_sec_attr_groups, |
| 621 | }, |
| 622 | .id_table = intel_m10bmc_sec_ids, |
| 623 | }; |
| 624 | module_platform_driver(intel_m10bmc_sec_driver); |
| 625 | |
| 626 | MODULE_AUTHOR("Intel Corporation"); |
| 627 | MODULE_DESCRIPTION("Intel MAX10 BMC Secure Update"); |
| 628 | MODULE_LICENSE("GPL"); |