Mika Westerberg | fd3b339 | 2018-10-01 12:31:21 +0300 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Mika Westerberg | 3e13676 | 2017-06-06 15:25:14 +0300 | [diff] [blame] | 2 | /* |
| 3 | * Thunderbolt DMA configuration based mailbox support |
| 4 | * |
| 5 | * Copyright (C) 2017, Intel Corporation |
| 6 | * Authors: Michael Jamet <michael.jamet@intel.com> |
| 7 | * Mika Westerberg <mika.westerberg@linux.intel.com> |
Mika Westerberg | 3e13676 | 2017-06-06 15:25:14 +0300 | [diff] [blame] | 8 | */ |
| 9 | |
| 10 | #include <linux/delay.h> |
| 11 | #include <linux/slab.h> |
| 12 | |
| 13 | #include "dma_port.h" |
| 14 | #include "tb_regs.h" |
| 15 | |
| 16 | #define DMA_PORT_CAP 0x3e |
| 17 | |
| 18 | #define MAIL_DATA 1 |
| 19 | #define MAIL_DATA_DWORDS 16 |
| 20 | |
| 21 | #define MAIL_IN 17 |
| 22 | #define MAIL_IN_CMD_SHIFT 28 |
| 23 | #define MAIL_IN_CMD_MASK GENMASK(31, 28) |
| 24 | #define MAIL_IN_CMD_FLASH_WRITE 0x0 |
| 25 | #define MAIL_IN_CMD_FLASH_UPDATE_AUTH 0x1 |
| 26 | #define MAIL_IN_CMD_FLASH_READ 0x2 |
| 27 | #define MAIL_IN_CMD_POWER_CYCLE 0x4 |
| 28 | #define MAIL_IN_DWORDS_SHIFT 24 |
| 29 | #define MAIL_IN_DWORDS_MASK GENMASK(27, 24) |
| 30 | #define MAIL_IN_ADDRESS_SHIFT 2 |
| 31 | #define MAIL_IN_ADDRESS_MASK GENMASK(23, 2) |
| 32 | #define MAIL_IN_CSS BIT(1) |
| 33 | #define MAIL_IN_OP_REQUEST BIT(0) |
| 34 | |
| 35 | #define MAIL_OUT 18 |
| 36 | #define MAIL_OUT_STATUS_RESPONSE BIT(29) |
| 37 | #define MAIL_OUT_STATUS_CMD_SHIFT 4 |
| 38 | #define MAIL_OUT_STATUS_CMD_MASK GENMASK(7, 4) |
| 39 | #define MAIL_OUT_STATUS_MASK GENMASK(3, 0) |
| 40 | #define MAIL_OUT_STATUS_COMPLETED 0 |
| 41 | #define MAIL_OUT_STATUS_ERR_AUTH 1 |
| 42 | #define MAIL_OUT_STATUS_ERR_ACCESS 2 |
| 43 | |
| 44 | #define DMA_PORT_TIMEOUT 5000 /* ms */ |
| 45 | #define DMA_PORT_RETRIES 3 |
| 46 | |
| 47 | /** |
| 48 | * struct tb_dma_port - DMA control port |
| 49 | * @sw: Switch the DMA port belongs to |
| 50 | * @port: Switch port number where DMA capability is found |
| 51 | * @base: Start offset of the mailbox registers |
| 52 | * @buf: Temporary buffer to store a single block |
| 53 | */ |
| 54 | struct tb_dma_port { |
| 55 | struct tb_switch *sw; |
| 56 | u8 port; |
| 57 | u32 base; |
| 58 | u8 *buf; |
| 59 | }; |
| 60 | |
| 61 | /* |
| 62 | * When the switch is in safe mode it supports very little functionality |
| 63 | * so we don't validate that much here. |
| 64 | */ |
| 65 | static bool dma_port_match(const struct tb_cfg_request *req, |
| 66 | const struct ctl_pkg *pkg) |
| 67 | { |
| 68 | u64 route = tb_cfg_get_route(pkg->buffer) & ~BIT_ULL(63); |
| 69 | |
| 70 | if (pkg->frame.eof == TB_CFG_PKG_ERROR) |
| 71 | return true; |
| 72 | if (pkg->frame.eof != req->response_type) |
| 73 | return false; |
| 74 | if (route != tb_cfg_get_route(req->request)) |
| 75 | return false; |
| 76 | if (pkg->frame.size != req->response_size) |
| 77 | return false; |
| 78 | |
| 79 | return true; |
| 80 | } |
| 81 | |
| 82 | static bool dma_port_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg) |
| 83 | { |
| 84 | memcpy(req->response, pkg->buffer, req->response_size); |
| 85 | return true; |
| 86 | } |
| 87 | |
| 88 | static int dma_port_read(struct tb_ctl *ctl, void *buffer, u64 route, |
| 89 | u32 port, u32 offset, u32 length, int timeout_msec) |
| 90 | { |
| 91 | struct cfg_read_pkg request = { |
| 92 | .header = tb_cfg_make_header(route), |
| 93 | .addr = { |
| 94 | .seq = 1, |
| 95 | .port = port, |
| 96 | .space = TB_CFG_PORT, |
| 97 | .offset = offset, |
| 98 | .length = length, |
| 99 | }, |
| 100 | }; |
| 101 | struct tb_cfg_request *req; |
| 102 | struct cfg_write_pkg reply; |
| 103 | struct tb_cfg_result res; |
| 104 | |
| 105 | req = tb_cfg_request_alloc(); |
| 106 | if (!req) |
| 107 | return -ENOMEM; |
| 108 | |
| 109 | req->match = dma_port_match; |
| 110 | req->copy = dma_port_copy; |
| 111 | req->request = &request; |
| 112 | req->request_size = sizeof(request); |
| 113 | req->request_type = TB_CFG_PKG_READ; |
| 114 | req->response = &reply; |
| 115 | req->response_size = 12 + 4 * length; |
| 116 | req->response_type = TB_CFG_PKG_READ; |
| 117 | |
| 118 | res = tb_cfg_request_sync(ctl, req, timeout_msec); |
| 119 | |
| 120 | tb_cfg_request_put(req); |
| 121 | |
| 122 | if (res.err) |
| 123 | return res.err; |
| 124 | |
| 125 | memcpy(buffer, &reply.data, 4 * length); |
| 126 | return 0; |
| 127 | } |
| 128 | |
| 129 | static int dma_port_write(struct tb_ctl *ctl, const void *buffer, u64 route, |
| 130 | u32 port, u32 offset, u32 length, int timeout_msec) |
| 131 | { |
| 132 | struct cfg_write_pkg request = { |
| 133 | .header = tb_cfg_make_header(route), |
| 134 | .addr = { |
| 135 | .seq = 1, |
| 136 | .port = port, |
| 137 | .space = TB_CFG_PORT, |
| 138 | .offset = offset, |
| 139 | .length = length, |
| 140 | }, |
| 141 | }; |
| 142 | struct tb_cfg_request *req; |
| 143 | struct cfg_read_pkg reply; |
| 144 | struct tb_cfg_result res; |
| 145 | |
| 146 | memcpy(&request.data, buffer, length * 4); |
| 147 | |
| 148 | req = tb_cfg_request_alloc(); |
| 149 | if (!req) |
| 150 | return -ENOMEM; |
| 151 | |
| 152 | req->match = dma_port_match; |
| 153 | req->copy = dma_port_copy; |
| 154 | req->request = &request; |
| 155 | req->request_size = 12 + 4 * length; |
| 156 | req->request_type = TB_CFG_PKG_WRITE; |
| 157 | req->response = &reply; |
| 158 | req->response_size = sizeof(reply); |
| 159 | req->response_type = TB_CFG_PKG_WRITE; |
| 160 | |
| 161 | res = tb_cfg_request_sync(ctl, req, timeout_msec); |
| 162 | |
| 163 | tb_cfg_request_put(req); |
| 164 | |
| 165 | return res.err; |
| 166 | } |
| 167 | |
| 168 | static int dma_find_port(struct tb_switch *sw) |
| 169 | { |
Radion Mirchevsky | 4bac471 | 2017-10-04 16:43:43 +0300 | [diff] [blame] | 170 | static const int ports[] = { 3, 5, 7 }; |
| 171 | int i; |
Mika Westerberg | 3e13676 | 2017-06-06 15:25:14 +0300 | [diff] [blame] | 172 | |
| 173 | /* |
Radion Mirchevsky | 4bac471 | 2017-10-04 16:43:43 +0300 | [diff] [blame] | 174 | * The DMA (NHI) port is either 3, 5 or 7 depending on the |
| 175 | * controller. Try all of them. |
Mika Westerberg | 3e13676 | 2017-06-06 15:25:14 +0300 | [diff] [blame] | 176 | */ |
Radion Mirchevsky | 4bac471 | 2017-10-04 16:43:43 +0300 | [diff] [blame] | 177 | for (i = 0; i < ARRAY_SIZE(ports); i++) { |
| 178 | u32 type; |
| 179 | int ret; |
Mika Westerberg | 3e13676 | 2017-06-06 15:25:14 +0300 | [diff] [blame] | 180 | |
Radion Mirchevsky | 4bac471 | 2017-10-04 16:43:43 +0300 | [diff] [blame] | 181 | ret = dma_port_read(sw->tb->ctl, &type, tb_route(sw), ports[i], |
| 182 | 2, 1, DMA_PORT_TIMEOUT); |
| 183 | if (!ret && (type & 0xffffff) == TB_TYPE_NHI) |
| 184 | return ports[i]; |
| 185 | } |
Mika Westerberg | 3e13676 | 2017-06-06 15:25:14 +0300 | [diff] [blame] | 186 | |
| 187 | return -ENODEV; |
| 188 | } |
| 189 | |
| 190 | /** |
| 191 | * dma_port_alloc() - Finds DMA control port from a switch pointed by route |
| 192 | * @sw: Switch from where find the DMA port |
| 193 | * |
| 194 | * Function checks if the switch NHI port supports DMA configuration |
| 195 | * based mailbox capability and if it does, allocates and initializes |
| 196 | * DMA port structure. Returns %NULL if the capabity was not found. |
| 197 | * |
| 198 | * The DMA control port is functional also when the switch is in safe |
| 199 | * mode. |
| 200 | */ |
| 201 | struct tb_dma_port *dma_port_alloc(struct tb_switch *sw) |
| 202 | { |
| 203 | struct tb_dma_port *dma; |
| 204 | int port; |
| 205 | |
| 206 | port = dma_find_port(sw); |
| 207 | if (port < 0) |
| 208 | return NULL; |
| 209 | |
| 210 | dma = kzalloc(sizeof(*dma), GFP_KERNEL); |
| 211 | if (!dma) |
| 212 | return NULL; |
| 213 | |
| 214 | dma->buf = kmalloc_array(MAIL_DATA_DWORDS, sizeof(u32), GFP_KERNEL); |
| 215 | if (!dma->buf) { |
| 216 | kfree(dma); |
| 217 | return NULL; |
| 218 | } |
| 219 | |
| 220 | dma->sw = sw; |
| 221 | dma->port = port; |
| 222 | dma->base = DMA_PORT_CAP; |
| 223 | |
| 224 | return dma; |
| 225 | } |
| 226 | |
| 227 | /** |
| 228 | * dma_port_free() - Release DMA control port structure |
| 229 | * @dma: DMA control port |
| 230 | */ |
| 231 | void dma_port_free(struct tb_dma_port *dma) |
| 232 | { |
| 233 | if (dma) { |
| 234 | kfree(dma->buf); |
| 235 | kfree(dma); |
| 236 | } |
| 237 | } |
| 238 | |
| 239 | static int dma_port_wait_for_completion(struct tb_dma_port *dma, |
| 240 | unsigned int timeout) |
| 241 | { |
| 242 | unsigned long end = jiffies + msecs_to_jiffies(timeout); |
| 243 | struct tb_switch *sw = dma->sw; |
| 244 | |
| 245 | do { |
| 246 | int ret; |
| 247 | u32 in; |
| 248 | |
| 249 | ret = dma_port_read(sw->tb->ctl, &in, tb_route(sw), dma->port, |
| 250 | dma->base + MAIL_IN, 1, 50); |
| 251 | if (ret) { |
| 252 | if (ret != -ETIMEDOUT) |
| 253 | return ret; |
| 254 | } else if (!(in & MAIL_IN_OP_REQUEST)) { |
| 255 | return 0; |
| 256 | } |
| 257 | |
| 258 | usleep_range(50, 100); |
| 259 | } while (time_before(jiffies, end)); |
| 260 | |
| 261 | return -ETIMEDOUT; |
| 262 | } |
| 263 | |
| 264 | static int status_to_errno(u32 status) |
| 265 | { |
| 266 | switch (status & MAIL_OUT_STATUS_MASK) { |
| 267 | case MAIL_OUT_STATUS_COMPLETED: |
| 268 | return 0; |
| 269 | case MAIL_OUT_STATUS_ERR_AUTH: |
| 270 | return -EINVAL; |
| 271 | case MAIL_OUT_STATUS_ERR_ACCESS: |
| 272 | return -EACCES; |
| 273 | } |
| 274 | |
| 275 | return -EIO; |
| 276 | } |
| 277 | |
| 278 | static int dma_port_request(struct tb_dma_port *dma, u32 in, |
| 279 | unsigned int timeout) |
| 280 | { |
| 281 | struct tb_switch *sw = dma->sw; |
| 282 | u32 out; |
| 283 | int ret; |
| 284 | |
| 285 | ret = dma_port_write(sw->tb->ctl, &in, tb_route(sw), dma->port, |
| 286 | dma->base + MAIL_IN, 1, DMA_PORT_TIMEOUT); |
| 287 | if (ret) |
| 288 | return ret; |
| 289 | |
| 290 | ret = dma_port_wait_for_completion(dma, timeout); |
| 291 | if (ret) |
| 292 | return ret; |
| 293 | |
| 294 | ret = dma_port_read(sw->tb->ctl, &out, tb_route(sw), dma->port, |
| 295 | dma->base + MAIL_OUT, 1, DMA_PORT_TIMEOUT); |
| 296 | if (ret) |
| 297 | return ret; |
| 298 | |
| 299 | return status_to_errno(out); |
| 300 | } |
| 301 | |
| 302 | static int dma_port_flash_read_block(struct tb_dma_port *dma, u32 address, |
| 303 | void *buf, u32 size) |
| 304 | { |
| 305 | struct tb_switch *sw = dma->sw; |
| 306 | u32 in, dwaddress, dwords; |
| 307 | int ret; |
| 308 | |
| 309 | dwaddress = address / 4; |
| 310 | dwords = size / 4; |
| 311 | |
| 312 | in = MAIL_IN_CMD_FLASH_READ << MAIL_IN_CMD_SHIFT; |
| 313 | if (dwords < MAIL_DATA_DWORDS) |
| 314 | in |= (dwords << MAIL_IN_DWORDS_SHIFT) & MAIL_IN_DWORDS_MASK; |
| 315 | in |= (dwaddress << MAIL_IN_ADDRESS_SHIFT) & MAIL_IN_ADDRESS_MASK; |
| 316 | in |= MAIL_IN_OP_REQUEST; |
| 317 | |
| 318 | ret = dma_port_request(dma, in, DMA_PORT_TIMEOUT); |
| 319 | if (ret) |
| 320 | return ret; |
| 321 | |
| 322 | return dma_port_read(sw->tb->ctl, buf, tb_route(sw), dma->port, |
| 323 | dma->base + MAIL_DATA, dwords, DMA_PORT_TIMEOUT); |
| 324 | } |
| 325 | |
| 326 | static int dma_port_flash_write_block(struct tb_dma_port *dma, u32 address, |
| 327 | const void *buf, u32 size) |
| 328 | { |
| 329 | struct tb_switch *sw = dma->sw; |
| 330 | u32 in, dwaddress, dwords; |
| 331 | int ret; |
| 332 | |
| 333 | dwords = size / 4; |
| 334 | |
| 335 | /* Write the block to MAIL_DATA registers */ |
| 336 | ret = dma_port_write(sw->tb->ctl, buf, tb_route(sw), dma->port, |
| 337 | dma->base + MAIL_DATA, dwords, DMA_PORT_TIMEOUT); |
| 338 | |
| 339 | in = MAIL_IN_CMD_FLASH_WRITE << MAIL_IN_CMD_SHIFT; |
| 340 | |
| 341 | /* CSS header write is always done to the same magic address */ |
| 342 | if (address >= DMA_PORT_CSS_ADDRESS) { |
| 343 | dwaddress = DMA_PORT_CSS_ADDRESS; |
| 344 | in |= MAIL_IN_CSS; |
| 345 | } else { |
| 346 | dwaddress = address / 4; |
| 347 | } |
| 348 | |
| 349 | in |= ((dwords - 1) << MAIL_IN_DWORDS_SHIFT) & MAIL_IN_DWORDS_MASK; |
| 350 | in |= (dwaddress << MAIL_IN_ADDRESS_SHIFT) & MAIL_IN_ADDRESS_MASK; |
| 351 | in |= MAIL_IN_OP_REQUEST; |
| 352 | |
| 353 | return dma_port_request(dma, in, DMA_PORT_TIMEOUT); |
| 354 | } |
| 355 | |
| 356 | /** |
| 357 | * dma_port_flash_read() - Read from active flash region |
| 358 | * @dma: DMA control port |
| 359 | * @address: Address relative to the start of active region |
| 360 | * @buf: Buffer where the data is read |
| 361 | * @size: Size of the buffer |
| 362 | */ |
| 363 | int dma_port_flash_read(struct tb_dma_port *dma, unsigned int address, |
| 364 | void *buf, size_t size) |
| 365 | { |
| 366 | unsigned int retries = DMA_PORT_RETRIES; |
| 367 | unsigned int offset; |
| 368 | |
| 369 | offset = address & 3; |
| 370 | address = address & ~3; |
| 371 | |
| 372 | do { |
| 373 | u32 nbytes = min_t(u32, size, MAIL_DATA_DWORDS * 4); |
| 374 | int ret; |
| 375 | |
| 376 | ret = dma_port_flash_read_block(dma, address, dma->buf, |
| 377 | ALIGN(nbytes, 4)); |
| 378 | if (ret) { |
| 379 | if (ret == -ETIMEDOUT) { |
| 380 | if (retries--) |
| 381 | continue; |
| 382 | ret = -EIO; |
| 383 | } |
| 384 | return ret; |
| 385 | } |
| 386 | |
| 387 | memcpy(buf, dma->buf + offset, nbytes); |
| 388 | |
| 389 | size -= nbytes; |
| 390 | address += nbytes; |
| 391 | buf += nbytes; |
| 392 | } while (size > 0); |
| 393 | |
| 394 | return 0; |
| 395 | } |
| 396 | |
| 397 | /** |
| 398 | * dma_port_flash_write() - Write to non-active flash region |
| 399 | * @dma: DMA control port |
| 400 | * @address: Address relative to the start of non-active region |
| 401 | * @buf: Data to write |
| 402 | * @size: Size of the buffer |
| 403 | * |
| 404 | * Writes block of data to the non-active flash region of the switch. If |
| 405 | * the address is given as %DMA_PORT_CSS_ADDRESS the block is written |
| 406 | * using CSS command. |
| 407 | */ |
| 408 | int dma_port_flash_write(struct tb_dma_port *dma, unsigned int address, |
| 409 | const void *buf, size_t size) |
| 410 | { |
| 411 | unsigned int retries = DMA_PORT_RETRIES; |
| 412 | unsigned int offset; |
| 413 | |
| 414 | if (address >= DMA_PORT_CSS_ADDRESS) { |
| 415 | offset = 0; |
| 416 | if (size > DMA_PORT_CSS_MAX_SIZE) |
| 417 | return -E2BIG; |
| 418 | } else { |
| 419 | offset = address & 3; |
| 420 | address = address & ~3; |
| 421 | } |
| 422 | |
| 423 | do { |
| 424 | u32 nbytes = min_t(u32, size, MAIL_DATA_DWORDS * 4); |
| 425 | int ret; |
| 426 | |
| 427 | memcpy(dma->buf + offset, buf, nbytes); |
| 428 | |
| 429 | ret = dma_port_flash_write_block(dma, address, buf, nbytes); |
| 430 | if (ret) { |
| 431 | if (ret == -ETIMEDOUT) { |
| 432 | if (retries--) |
| 433 | continue; |
| 434 | ret = -EIO; |
| 435 | } |
| 436 | return ret; |
| 437 | } |
| 438 | |
| 439 | size -= nbytes; |
| 440 | address += nbytes; |
| 441 | buf += nbytes; |
| 442 | } while (size > 0); |
| 443 | |
| 444 | return 0; |
| 445 | } |
| 446 | |
| 447 | /** |
| 448 | * dma_port_flash_update_auth() - Starts flash authenticate cycle |
| 449 | * @dma: DMA control port |
| 450 | * |
| 451 | * Starts the flash update authentication cycle. If the image in the |
| 452 | * non-active area was valid, the switch starts upgrade process where |
| 453 | * active and non-active area get swapped in the end. Caller should call |
| 454 | * dma_port_flash_update_auth_status() to get status of this command. |
| 455 | * This is because if the switch in question is root switch the |
| 456 | * thunderbolt host controller gets reset as well. |
| 457 | */ |
| 458 | int dma_port_flash_update_auth(struct tb_dma_port *dma) |
| 459 | { |
| 460 | u32 in; |
| 461 | |
| 462 | in = MAIL_IN_CMD_FLASH_UPDATE_AUTH << MAIL_IN_CMD_SHIFT; |
| 463 | in |= MAIL_IN_OP_REQUEST; |
| 464 | |
| 465 | return dma_port_request(dma, in, 150); |
| 466 | } |
| 467 | |
| 468 | /** |
| 469 | * dma_port_flash_update_auth_status() - Reads status of update auth command |
| 470 | * @dma: DMA control port |
| 471 | * @status: Status code of the operation |
| 472 | * |
| 473 | * The function checks if there is status available from the last update |
| 474 | * auth command. Returns %0 if there is no status and no further |
| 475 | * action is required. If there is status, %1 is returned instead and |
| 476 | * @status holds the failure code. |
| 477 | * |
| 478 | * Negative return means there was an error reading status from the |
| 479 | * switch. |
| 480 | */ |
| 481 | int dma_port_flash_update_auth_status(struct tb_dma_port *dma, u32 *status) |
| 482 | { |
| 483 | struct tb_switch *sw = dma->sw; |
| 484 | u32 out, cmd; |
| 485 | int ret; |
| 486 | |
| 487 | ret = dma_port_read(sw->tb->ctl, &out, tb_route(sw), dma->port, |
| 488 | dma->base + MAIL_OUT, 1, DMA_PORT_TIMEOUT); |
| 489 | if (ret) |
| 490 | return ret; |
| 491 | |
| 492 | /* Check if the status relates to flash update auth */ |
| 493 | cmd = (out & MAIL_OUT_STATUS_CMD_MASK) >> MAIL_OUT_STATUS_CMD_SHIFT; |
| 494 | if (cmd == MAIL_IN_CMD_FLASH_UPDATE_AUTH) { |
| 495 | if (status) |
| 496 | *status = out & MAIL_OUT_STATUS_MASK; |
| 497 | |
| 498 | /* Reset is needed in any case */ |
| 499 | return 1; |
| 500 | } |
| 501 | |
| 502 | return 0; |
| 503 | } |
| 504 | |
| 505 | /** |
| 506 | * dma_port_power_cycle() - Power cycles the switch |
| 507 | * @dma: DMA control port |
| 508 | * |
| 509 | * Triggers power cycle to the switch. |
| 510 | */ |
| 511 | int dma_port_power_cycle(struct tb_dma_port *dma) |
| 512 | { |
| 513 | u32 in; |
| 514 | |
| 515 | in = MAIL_IN_CMD_POWER_CYCLE << MAIL_IN_CMD_SHIFT; |
| 516 | in |= MAIL_IN_OP_REQUEST; |
| 517 | |
| 518 | return dma_port_request(dma, in, 150); |
| 519 | } |