Dan Williams | 4cdadfd | 2021-02-16 20:09:50 -0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* Copyright(c) 2020 Intel Corporation. All rights reserved. */ |
Dan Williams | 4faf31b | 2021-09-08 22:12:32 -0700 | [diff] [blame] | 3 | #include <linux/io-64-nonatomic-lo-hi.h> |
Ben Widawsky | 229e882 | 2022-01-31 15:51:45 -0800 | [diff] [blame] | 4 | #include <linux/moduleparam.h> |
Dan Williams | 4cdadfd | 2021-02-16 20:09:50 -0800 | [diff] [blame] | 5 | #include <linux/module.h> |
Ben Widawsky | 229e882 | 2022-01-31 15:51:45 -0800 | [diff] [blame] | 6 | #include <linux/delay.h> |
Dan Williams | fae8817 | 2021-04-16 17:43:30 -0700 | [diff] [blame] | 7 | #include <linux/sizes.h> |
Dan Williams | b39cb10 | 2021-02-16 20:09:52 -0800 | [diff] [blame] | 8 | #include <linux/mutex.h> |
Ira Weiny | 30af972 | 2021-06-03 17:50:36 -0700 | [diff] [blame] | 9 | #include <linux/list.h> |
Dan Williams | 4cdadfd | 2021-02-16 20:09:50 -0800 | [diff] [blame] | 10 | #include <linux/pci.h> |
Ira Weiny | 3eddcc9 | 2022-07-19 13:52:47 -0700 | [diff] [blame] | 11 | #include <linux/pci-doe.h> |
Dan Williams | 4cdadfd | 2021-02-16 20:09:50 -0800 | [diff] [blame] | 12 | #include <linux/io.h> |
Ben Widawsky | 5161a55 | 2021-08-02 10:29:38 -0700 | [diff] [blame] | 13 | #include "cxlmem.h" |
Dan Williams | af9cae9 | 2022-01-23 16:30:25 -0800 | [diff] [blame] | 14 | #include "cxlpci.h" |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 15 | #include "cxl.h" |
| 16 | |
| 17 | /** |
Ben Widawsky | 21e9f76 | 2021-05-26 10:44:13 -0700 | [diff] [blame] | 18 | * DOC: cxl pci |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 19 | * |
Ben Widawsky | 21e9f76 | 2021-05-26 10:44:13 -0700 | [diff] [blame] | 20 | * This implements the PCI exclusive functionality for a CXL device as it is |
| 21 | * defined by the Compute Express Link specification. CXL devices may surface |
Ben Widawsky | ed97afb | 2021-09-13 09:33:24 -0700 | [diff] [blame] | 22 | * certain functionality even if it isn't CXL enabled. While this driver is |
| 23 | * focused around the PCI specific aspects of a CXL device, it binds to the |
| 24 | * specific CXL memory device class code, and therefore the implementation of |
| 25 | * cxl_pci is focused around CXL memory devices. |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 26 | * |
| 27 | * The driver has several responsibilities, mainly: |
| 28 | * - Create the memX device and register on the CXL bus. |
| 29 | * - Enumerate device's register interface and map them. |
Ben Widawsky | ed97afb | 2021-09-13 09:33:24 -0700 | [diff] [blame] | 30 | * - Registers nvdimm bridge device with cxl_core. |
| 31 | * - Registers a CXL mailbox with cxl_core. |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 32 | */ |
| 33 | |
Ira Weiny | 5e2411a | 2021-11-02 13:29:01 -0700 | [diff] [blame] | 34 | #define cxl_doorbell_busy(cxlds) \ |
| 35 | (readl((cxlds)->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET) & \ |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 36 | CXLDEV_MBOX_CTRL_DOORBELL) |
| 37 | |
| 38 | /* CXL 2.0 - 8.2.8.4 */ |
| 39 | #define CXL_MAILBOX_TIMEOUT_MS (2 * HZ) |
| 40 | |
Ben Widawsky | 229e882 | 2022-01-31 15:51:45 -0800 | [diff] [blame] | 41 | /* |
| 42 | * CXL 2.0 ECN "Add Mailbox Ready Time" defines a capability field to |
| 43 | * dictate how long to wait for the mailbox to become ready. The new |
| 44 | * field allows the device to tell software the amount of time to wait |
| 45 | * before mailbox ready. This field per the spec theoretically allows |
| 46 | * for up to 255 seconds. 255 seconds is unreasonably long, its longer |
| 47 | * than the maximum SATA port link recovery wait. Default to 60 seconds |
| 48 | * until someone builds a CXL device that needs more time in practice. |
| 49 | */ |
| 50 | static unsigned short mbox_ready_timeout = 60; |
| 51 | module_param(mbox_ready_timeout, ushort, 0644); |
Dan Williams | 2e4ba0e | 2022-05-18 16:34:43 -0700 | [diff] [blame] | 52 | MODULE_PARM_DESC(mbox_ready_timeout, "seconds to wait for mailbox ready"); |
Ben Widawsky | 229e882 | 2022-01-31 15:51:45 -0800 | [diff] [blame] | 53 | |
Ira Weiny | 5e2411a | 2021-11-02 13:29:01 -0700 | [diff] [blame] | 54 | static int cxl_pci_mbox_wait_for_doorbell(struct cxl_dev_state *cxlds) |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 55 | { |
| 56 | const unsigned long start = jiffies; |
| 57 | unsigned long end = start; |
| 58 | |
Ira Weiny | 5e2411a | 2021-11-02 13:29:01 -0700 | [diff] [blame] | 59 | while (cxl_doorbell_busy(cxlds)) { |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 60 | end = jiffies; |
| 61 | |
| 62 | if (time_after(end, start + CXL_MAILBOX_TIMEOUT_MS)) { |
| 63 | /* Check again in case preempted before timeout test */ |
Ira Weiny | 5e2411a | 2021-11-02 13:29:01 -0700 | [diff] [blame] | 64 | if (!cxl_doorbell_busy(cxlds)) |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 65 | break; |
| 66 | return -ETIMEDOUT; |
| 67 | } |
| 68 | cpu_relax(); |
| 69 | } |
| 70 | |
Ira Weiny | 5e2411a | 2021-11-02 13:29:01 -0700 | [diff] [blame] | 71 | dev_dbg(cxlds->dev, "Doorbell wait took %dms", |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 72 | jiffies_to_msecs(end) - jiffies_to_msecs(start)); |
| 73 | return 0; |
| 74 | } |
| 75 | |
Dan Williams | 4f195ee | 2022-01-23 16:28:54 -0800 | [diff] [blame] | 76 | #define cxl_err(dev, status, msg) \ |
| 77 | dev_err_ratelimited(dev, msg ", device state %s%s\n", \ |
| 78 | status & CXLMDEV_DEV_FATAL ? " fatal" : "", \ |
| 79 | status & CXLMDEV_FW_HALT ? " firmware-halt" : "") |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 80 | |
Dan Williams | 4f195ee | 2022-01-23 16:28:54 -0800 | [diff] [blame] | 81 | #define cxl_cmd_err(dev, cmd, status, msg) \ |
| 82 | dev_err_ratelimited(dev, msg " (opcode: %#x), device state %s%s\n", \ |
| 83 | (cmd)->opcode, \ |
| 84 | status & CXLMDEV_DEV_FATAL ? " fatal" : "", \ |
| 85 | status & CXLMDEV_FW_HALT ? " firmware-halt" : "") |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 86 | |
| 87 | /** |
Ben Widawsky | ed97afb | 2021-09-13 09:33:24 -0700 | [diff] [blame] | 88 | * __cxl_pci_mbox_send_cmd() - Execute a mailbox command |
Ira Weiny | 5e2411a | 2021-11-02 13:29:01 -0700 | [diff] [blame] | 89 | * @cxlds: The device state to communicate with. |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 90 | * @mbox_cmd: Command to send to the memory device. |
| 91 | * |
| 92 | * Context: Any context. Expects mbox_mutex to be held. |
| 93 | * Return: -ETIMEDOUT if timeout occurred waiting for completion. 0 on success. |
| 94 | * Caller should check the return code in @mbox_cmd to make sure it |
| 95 | * succeeded. |
| 96 | * |
| 97 | * This is a generic form of the CXL mailbox send command thus only using the |
| 98 | * registers defined by the mailbox capability ID - CXL 2.0 8.2.8.4. Memory |
| 99 | * devices, and perhaps other types of CXL devices may have further information |
| 100 | * available upon error conditions. Driver facilities wishing to send mailbox |
| 101 | * commands should use the wrapper command. |
| 102 | * |
| 103 | * The CXL spec allows for up to two mailboxes. The intention is for the primary |
| 104 | * mailbox to be OS controlled and the secondary mailbox to be used by system |
| 105 | * firmware. This allows the OS and firmware to communicate with the device and |
| 106 | * not need to coordinate with each other. The driver only uses the primary |
| 107 | * mailbox. |
| 108 | */ |
Ira Weiny | 5e2411a | 2021-11-02 13:29:01 -0700 | [diff] [blame] | 109 | static int __cxl_pci_mbox_send_cmd(struct cxl_dev_state *cxlds, |
Dan Williams | b64955a | 2021-09-08 22:12:21 -0700 | [diff] [blame] | 110 | struct cxl_mbox_cmd *mbox_cmd) |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 111 | { |
Ira Weiny | 5e2411a | 2021-11-02 13:29:01 -0700 | [diff] [blame] | 112 | void __iomem *payload = cxlds->regs.mbox + CXLDEV_MBOX_PAYLOAD_OFFSET; |
| 113 | struct device *dev = cxlds->dev; |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 114 | u64 cmd_reg, status_reg; |
| 115 | size_t out_len; |
| 116 | int rc; |
| 117 | |
Ira Weiny | 5e2411a | 2021-11-02 13:29:01 -0700 | [diff] [blame] | 118 | lockdep_assert_held(&cxlds->mbox_mutex); |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 119 | |
| 120 | /* |
| 121 | * Here are the steps from 8.2.8.4 of the CXL 2.0 spec. |
| 122 | * 1. Caller reads MB Control Register to verify doorbell is clear |
| 123 | * 2. Caller writes Command Register |
| 124 | * 3. Caller writes Command Payload Registers if input payload is non-empty |
| 125 | * 4. Caller writes MB Control Register to set doorbell |
| 126 | * 5. Caller either polls for doorbell to be clear or waits for interrupt if configured |
| 127 | * 6. Caller reads MB Status Register to fetch Return code |
| 128 | * 7. If command successful, Caller reads Command Register to get Payload Length |
| 129 | * 8. If output payload is non-empty, host reads Command Payload Registers |
| 130 | * |
| 131 | * Hardware is free to do whatever it wants before the doorbell is rung, |
| 132 | * and isn't allowed to change anything after it clears the doorbell. As |
| 133 | * such, steps 2 and 3 can happen in any order, and steps 6, 7, 8 can |
| 134 | * also happen in any order (though some orders might not make sense). |
| 135 | */ |
| 136 | |
| 137 | /* #1 */ |
Ira Weiny | 5e2411a | 2021-11-02 13:29:01 -0700 | [diff] [blame] | 138 | if (cxl_doorbell_busy(cxlds)) { |
Dan Williams | 4f195ee | 2022-01-23 16:28:54 -0800 | [diff] [blame] | 139 | u64 md_status = |
| 140 | readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET); |
| 141 | |
| 142 | cxl_cmd_err(cxlds->dev, mbox_cmd, md_status, |
| 143 | "mailbox queue busy"); |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 144 | return -EBUSY; |
| 145 | } |
| 146 | |
| 147 | cmd_reg = FIELD_PREP(CXLDEV_MBOX_CMD_COMMAND_OPCODE_MASK, |
| 148 | mbox_cmd->opcode); |
| 149 | if (mbox_cmd->size_in) { |
| 150 | if (WARN_ON(!mbox_cmd->payload_in)) |
| 151 | return -EINVAL; |
| 152 | |
| 153 | cmd_reg |= FIELD_PREP(CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK, |
| 154 | mbox_cmd->size_in); |
| 155 | memcpy_toio(payload, mbox_cmd->payload_in, mbox_cmd->size_in); |
| 156 | } |
| 157 | |
| 158 | /* #2, #3 */ |
Ira Weiny | 5e2411a | 2021-11-02 13:29:01 -0700 | [diff] [blame] | 159 | writeq(cmd_reg, cxlds->regs.mbox + CXLDEV_MBOX_CMD_OFFSET); |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 160 | |
| 161 | /* #4 */ |
Dan Williams | 99e222a | 2021-09-08 22:12:09 -0700 | [diff] [blame] | 162 | dev_dbg(dev, "Sending command\n"); |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 163 | writel(CXLDEV_MBOX_CTRL_DOORBELL, |
Ira Weiny | 5e2411a | 2021-11-02 13:29:01 -0700 | [diff] [blame] | 164 | cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET); |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 165 | |
| 166 | /* #5 */ |
Ira Weiny | 5e2411a | 2021-11-02 13:29:01 -0700 | [diff] [blame] | 167 | rc = cxl_pci_mbox_wait_for_doorbell(cxlds); |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 168 | if (rc == -ETIMEDOUT) { |
Dan Williams | 4f195ee | 2022-01-23 16:28:54 -0800 | [diff] [blame] | 169 | u64 md_status = readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET); |
| 170 | |
| 171 | cxl_cmd_err(cxlds->dev, mbox_cmd, md_status, "mailbox timeout"); |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 172 | return rc; |
| 173 | } |
| 174 | |
| 175 | /* #6 */ |
Ira Weiny | 5e2411a | 2021-11-02 13:29:01 -0700 | [diff] [blame] | 176 | status_reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_STATUS_OFFSET); |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 177 | mbox_cmd->return_code = |
| 178 | FIELD_GET(CXLDEV_MBOX_STATUS_RET_CODE_MASK, status_reg); |
| 179 | |
Davidlohr Bueso | 92fcc1a | 2022-04-03 19:12:15 -0700 | [diff] [blame] | 180 | if (mbox_cmd->return_code != CXL_MBOX_CMD_RC_SUCCESS) { |
Davidlohr Bueso | c43e036 | 2022-04-03 19:12:16 -0700 | [diff] [blame] | 181 | dev_dbg(dev, "Mailbox operation had an error: %s\n", |
| 182 | cxl_mbox_cmd_rc2str(mbox_cmd)); |
Davidlohr Bueso | cbe83a2 | 2022-04-03 19:12:14 -0700 | [diff] [blame] | 183 | return 0; /* completed but caller must check return_code */ |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 184 | } |
| 185 | |
| 186 | /* #7 */ |
Ira Weiny | 5e2411a | 2021-11-02 13:29:01 -0700 | [diff] [blame] | 187 | cmd_reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_CMD_OFFSET); |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 188 | out_len = FIELD_GET(CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK, cmd_reg); |
| 189 | |
| 190 | /* #8 */ |
| 191 | if (out_len && mbox_cmd->payload_out) { |
| 192 | /* |
| 193 | * Sanitize the copy. If hardware misbehaves, out_len per the |
| 194 | * spec can actually be greater than the max allowed size (21 |
| 195 | * bits available but spec defined 1M max). The caller also may |
| 196 | * have requested less data than the hardware supplied even |
| 197 | * within spec. |
| 198 | */ |
Ira Weiny | 5e2411a | 2021-11-02 13:29:01 -0700 | [diff] [blame] | 199 | size_t n = min3(mbox_cmd->size_out, cxlds->payload_size, out_len); |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 200 | |
| 201 | memcpy_fromio(mbox_cmd->payload_out, payload, n); |
| 202 | mbox_cmd->size_out = n; |
| 203 | } else { |
| 204 | mbox_cmd->size_out = 0; |
| 205 | } |
| 206 | |
| 207 | return 0; |
| 208 | } |
| 209 | |
Ira Weiny | 5e2411a | 2021-11-02 13:29:01 -0700 | [diff] [blame] | 210 | static int cxl_pci_mbox_send(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) |
Dan Williams | b64955a | 2021-09-08 22:12:21 -0700 | [diff] [blame] | 211 | { |
| 212 | int rc; |
| 213 | |
Dan Williams | 4f195ee | 2022-01-23 16:28:54 -0800 | [diff] [blame] | 214 | mutex_lock_io(&cxlds->mbox_mutex); |
Ira Weiny | 5e2411a | 2021-11-02 13:29:01 -0700 | [diff] [blame] | 215 | rc = __cxl_pci_mbox_send_cmd(cxlds, cmd); |
Dan Williams | 4f195ee | 2022-01-23 16:28:54 -0800 | [diff] [blame] | 216 | mutex_unlock(&cxlds->mbox_mutex); |
Dan Williams | b64955a | 2021-09-08 22:12:21 -0700 | [diff] [blame] | 217 | |
| 218 | return rc; |
| 219 | } |
| 220 | |
Ira Weiny | 5e2411a | 2021-11-02 13:29:01 -0700 | [diff] [blame] | 221 | static int cxl_pci_setup_mailbox(struct cxl_dev_state *cxlds) |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 222 | { |
Ira Weiny | 5e2411a | 2021-11-02 13:29:01 -0700 | [diff] [blame] | 223 | const int cap = readl(cxlds->regs.mbox + CXLDEV_MBOX_CAPS_OFFSET); |
Ben Widawsky | 229e882 | 2022-01-31 15:51:45 -0800 | [diff] [blame] | 224 | unsigned long timeout; |
| 225 | u64 md_status; |
| 226 | |
| 227 | timeout = jiffies + mbox_ready_timeout * HZ; |
| 228 | do { |
| 229 | md_status = readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET); |
| 230 | if (md_status & CXLMDEV_MBOX_IF_READY) |
| 231 | break; |
| 232 | if (msleep_interruptible(100)) |
| 233 | break; |
| 234 | } while (!time_after(jiffies, timeout)); |
| 235 | |
| 236 | if (!(md_status & CXLMDEV_MBOX_IF_READY)) { |
Dan Williams | 4f195ee | 2022-01-23 16:28:54 -0800 | [diff] [blame] | 237 | cxl_err(cxlds->dev, md_status, |
| 238 | "timeout awaiting mailbox ready"); |
| 239 | return -ETIMEDOUT; |
| 240 | } |
| 241 | |
| 242 | /* |
| 243 | * A command may be in flight from a previous driver instance, |
| 244 | * think kexec, do one doorbell wait so that |
| 245 | * __cxl_pci_mbox_send_cmd() can assume that it is the only |
| 246 | * source for future doorbell busy events. |
| 247 | */ |
| 248 | if (cxl_pci_mbox_wait_for_doorbell(cxlds) != 0) { |
| 249 | cxl_err(cxlds->dev, md_status, "timeout awaiting mailbox idle"); |
| 250 | return -ETIMEDOUT; |
Ben Widawsky | 229e882 | 2022-01-31 15:51:45 -0800 | [diff] [blame] | 251 | } |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 252 | |
Ira Weiny | 5e2411a | 2021-11-02 13:29:01 -0700 | [diff] [blame] | 253 | cxlds->mbox_send = cxl_pci_mbox_send; |
| 254 | cxlds->payload_size = |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 255 | 1 << FIELD_GET(CXLDEV_MBOX_CAP_PAYLOAD_SIZE_MASK, cap); |
| 256 | |
| 257 | /* |
| 258 | * CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register |
| 259 | * |
| 260 | * If the size is too small, mandatory commands will not work and so |
| 261 | * there's no point in going forward. If the size is too large, there's |
| 262 | * no harm is soft limiting it. |
| 263 | */ |
Ira Weiny | 5e2411a | 2021-11-02 13:29:01 -0700 | [diff] [blame] | 264 | cxlds->payload_size = min_t(size_t, cxlds->payload_size, SZ_1M); |
| 265 | if (cxlds->payload_size < 256) { |
| 266 | dev_err(cxlds->dev, "Mailbox is too small (%zub)", |
| 267 | cxlds->payload_size); |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 268 | return -ENXIO; |
| 269 | } |
| 270 | |
Ira Weiny | 5e2411a | 2021-11-02 13:29:01 -0700 | [diff] [blame] | 271 | dev_dbg(cxlds->dev, "Mailbox payload sized %zu", |
| 272 | cxlds->payload_size); |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 273 | |
| 274 | return 0; |
| 275 | } |
| 276 | |
Dan Williams | a261e9a | 2021-10-15 14:57:27 -0700 | [diff] [blame] | 277 | static int cxl_map_regblock(struct pci_dev *pdev, struct cxl_register_map *map) |
Ben Widawsky | 1b0a1a2a | 2021-04-07 15:26:20 -0700 | [diff] [blame] | 278 | { |
Ira Weiny | f8a7e8c | 2021-05-27 17:49:19 -0700 | [diff] [blame] | 279 | void __iomem *addr; |
Ben Widawsky | 7dc7a64 | 2021-10-13 16:53:29 -0700 | [diff] [blame] | 280 | int bar = map->barno; |
| 281 | struct device *dev = &pdev->dev; |
| 282 | resource_size_t offset = map->block_offset; |
Ben Widawsky | 1b0a1a2a | 2021-04-07 15:26:20 -0700 | [diff] [blame] | 283 | |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 284 | /* Basic sanity check that BAR is big enough */ |
| 285 | if (pci_resource_len(pdev, bar) < offset) { |
Ben Widawsky | 7dc7a64 | 2021-10-13 16:53:29 -0700 | [diff] [blame] | 286 | dev_err(dev, "BAR%d: %pr: too small (offset: %pa)\n", bar, |
| 287 | &pdev->resource[bar], &offset); |
Dan Williams | a261e9a | 2021-10-15 14:57:27 -0700 | [diff] [blame] | 288 | return -ENXIO; |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 289 | } |
| 290 | |
Ira Weiny | 30af972 | 2021-06-03 17:50:36 -0700 | [diff] [blame] | 291 | addr = pci_iomap(pdev, bar, 0); |
Ira Weiny | f8a7e8c | 2021-05-27 17:49:19 -0700 | [diff] [blame] | 292 | if (!addr) { |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 293 | dev_err(dev, "failed to map registers\n"); |
Dan Williams | a261e9a | 2021-10-15 14:57:27 -0700 | [diff] [blame] | 294 | return -ENOMEM; |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 295 | } |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 296 | |
Ben Widawsky | 7dc7a64 | 2021-10-13 16:53:29 -0700 | [diff] [blame] | 297 | dev_dbg(dev, "Mapped CXL Memory Device resource bar %u @ %pa\n", |
| 298 | bar, &offset); |
Ben Widawsky | 6630d31 | 2021-05-20 14:29:53 -0700 | [diff] [blame] | 299 | |
Dan Williams | a261e9a | 2021-10-15 14:57:27 -0700 | [diff] [blame] | 300 | map->base = addr + map->block_offset; |
| 301 | return 0; |
Ira Weiny | 30af972 | 2021-06-03 17:50:36 -0700 | [diff] [blame] | 302 | } |
| 303 | |
Dan Williams | a261e9a | 2021-10-15 14:57:27 -0700 | [diff] [blame] | 304 | static void cxl_unmap_regblock(struct pci_dev *pdev, |
| 305 | struct cxl_register_map *map) |
Ira Weiny | 30af972 | 2021-06-03 17:50:36 -0700 | [diff] [blame] | 306 | { |
Dan Williams | a261e9a | 2021-10-15 14:57:27 -0700 | [diff] [blame] | 307 | pci_iounmap(pdev, map->base - map->block_offset); |
| 308 | map->base = NULL; |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 309 | } |
Dan Williams | 4cdadfd | 2021-02-16 20:09:50 -0800 | [diff] [blame] | 310 | |
Dan Williams | a261e9a | 2021-10-15 14:57:27 -0700 | [diff] [blame] | 311 | static int cxl_probe_regs(struct pci_dev *pdev, struct cxl_register_map *map) |
Ira Weiny | 30af972 | 2021-06-03 17:50:36 -0700 | [diff] [blame] | 312 | { |
Ben Widawsky | 0842237 | 2021-05-27 17:49:22 -0700 | [diff] [blame] | 313 | struct cxl_component_reg_map *comp_map; |
Ira Weiny | 30af972 | 2021-06-03 17:50:36 -0700 | [diff] [blame] | 314 | struct cxl_device_reg_map *dev_map; |
Ben Widawsky | 7dc7a64 | 2021-10-13 16:53:29 -0700 | [diff] [blame] | 315 | struct device *dev = &pdev->dev; |
Dan Williams | a261e9a | 2021-10-15 14:57:27 -0700 | [diff] [blame] | 316 | void __iomem *base = map->base; |
Ira Weiny | 30af972 | 2021-06-03 17:50:36 -0700 | [diff] [blame] | 317 | |
| 318 | switch (map->reg_type) { |
Ben Widawsky | 0842237 | 2021-05-27 17:49:22 -0700 | [diff] [blame] | 319 | case CXL_REGLOC_RBI_COMPONENT: |
| 320 | comp_map = &map->component_map; |
| 321 | cxl_probe_component_regs(dev, base, comp_map); |
| 322 | if (!comp_map->hdm_decoder.valid) { |
| 323 | dev_err(dev, "HDM decoder registers not found\n"); |
| 324 | return -ENXIO; |
| 325 | } |
| 326 | |
| 327 | dev_dbg(dev, "Set up component registers\n"); |
| 328 | break; |
Ira Weiny | 30af972 | 2021-06-03 17:50:36 -0700 | [diff] [blame] | 329 | case CXL_REGLOC_RBI_MEMDEV: |
| 330 | dev_map = &map->device_map; |
| 331 | cxl_probe_device_regs(dev, base, dev_map); |
| 332 | if (!dev_map->status.valid || !dev_map->mbox.valid || |
| 333 | !dev_map->memdev.valid) { |
| 334 | dev_err(dev, "registers not found: %s%s%s\n", |
| 335 | !dev_map->status.valid ? "status " : "", |
Li Qiang (Johnny Li) | da582aa | 2021-09-03 19:20:50 -0700 | [diff] [blame] | 336 | !dev_map->mbox.valid ? "mbox " : "", |
| 337 | !dev_map->memdev.valid ? "memdev " : ""); |
Ira Weiny | 30af972 | 2021-06-03 17:50:36 -0700 | [diff] [blame] | 338 | return -ENXIO; |
| 339 | } |
| 340 | |
| 341 | dev_dbg(dev, "Probing device registers...\n"); |
| 342 | break; |
| 343 | default: |
| 344 | break; |
| 345 | } |
| 346 | |
| 347 | return 0; |
| 348 | } |
| 349 | |
Ira Weiny | 5e2411a | 2021-11-02 13:29:01 -0700 | [diff] [blame] | 350 | static int cxl_map_regs(struct cxl_dev_state *cxlds, struct cxl_register_map *map) |
Ira Weiny | 30af972 | 2021-06-03 17:50:36 -0700 | [diff] [blame] | 351 | { |
Ira Weiny | 5e2411a | 2021-11-02 13:29:01 -0700 | [diff] [blame] | 352 | struct device *dev = cxlds->dev; |
Dan Williams | 99e222a | 2021-09-08 22:12:09 -0700 | [diff] [blame] | 353 | struct pci_dev *pdev = to_pci_dev(dev); |
Ira Weiny | 30af972 | 2021-06-03 17:50:36 -0700 | [diff] [blame] | 354 | |
| 355 | switch (map->reg_type) { |
Ben Widawsky | 0842237 | 2021-05-27 17:49:22 -0700 | [diff] [blame] | 356 | case CXL_REGLOC_RBI_COMPONENT: |
Ira Weiny | 5e2411a | 2021-11-02 13:29:01 -0700 | [diff] [blame] | 357 | cxl_map_component_regs(pdev, &cxlds->regs.component, map); |
Ben Widawsky | 0842237 | 2021-05-27 17:49:22 -0700 | [diff] [blame] | 358 | dev_dbg(dev, "Mapping component registers...\n"); |
| 359 | break; |
Ira Weiny | 30af972 | 2021-06-03 17:50:36 -0700 | [diff] [blame] | 360 | case CXL_REGLOC_RBI_MEMDEV: |
Ira Weiny | 5e2411a | 2021-11-02 13:29:01 -0700 | [diff] [blame] | 361 | cxl_map_device_regs(pdev, &cxlds->regs.device_regs, map); |
Ira Weiny | 30af972 | 2021-06-03 17:50:36 -0700 | [diff] [blame] | 362 | dev_dbg(dev, "Probing device registers...\n"); |
| 363 | break; |
| 364 | default: |
| 365 | break; |
| 366 | } |
| 367 | |
| 368 | return 0; |
| 369 | } |
| 370 | |
Ben Widawsky | 85afc31 | 2021-10-15 16:30:42 -0700 | [diff] [blame] | 371 | static int cxl_setup_regs(struct pci_dev *pdev, enum cxl_regloc_type type, |
| 372 | struct cxl_register_map *map) |
| 373 | { |
| 374 | int rc; |
| 375 | |
| 376 | rc = cxl_find_regblock(pdev, type, map); |
| 377 | if (rc) |
| 378 | return rc; |
| 379 | |
| 380 | rc = cxl_map_regblock(pdev, map); |
| 381 | if (rc) |
| 382 | return rc; |
| 383 | |
| 384 | rc = cxl_probe_regs(pdev, map); |
| 385 | cxl_unmap_regblock(pdev, map); |
| 386 | |
| 387 | return rc; |
Ben Widawsky | 1d5a415 | 2021-04-07 15:26:21 -0700 | [diff] [blame] | 388 | } |
| 389 | |
Ira Weiny | 3eddcc9 | 2022-07-19 13:52:47 -0700 | [diff] [blame] | 390 | static void cxl_pci_destroy_doe(void *mbs) |
| 391 | { |
| 392 | xa_destroy(mbs); |
| 393 | } |
| 394 | |
| 395 | static void devm_cxl_pci_create_doe(struct cxl_dev_state *cxlds) |
| 396 | { |
| 397 | struct device *dev = cxlds->dev; |
| 398 | struct pci_dev *pdev = to_pci_dev(dev); |
| 399 | u16 off = 0; |
| 400 | |
| 401 | xa_init(&cxlds->doe_mbs); |
| 402 | if (devm_add_action(&pdev->dev, cxl_pci_destroy_doe, &cxlds->doe_mbs)) { |
| 403 | dev_err(dev, "Failed to create XArray for DOE's\n"); |
| 404 | return; |
| 405 | } |
| 406 | |
| 407 | /* |
| 408 | * Mailbox creation is best effort. Higher layers must determine if |
| 409 | * the lack of a mailbox for their protocol is a device failure or not. |
| 410 | */ |
| 411 | pci_doe_for_each_off(pdev, off) { |
| 412 | struct pci_doe_mb *doe_mb; |
| 413 | |
| 414 | doe_mb = pcim_doe_create_mb(pdev, off); |
| 415 | if (IS_ERR(doe_mb)) { |
| 416 | dev_err(dev, "Failed to create MB object for MB @ %x\n", |
| 417 | off); |
| 418 | continue; |
| 419 | } |
| 420 | |
| 421 | if (xa_insert(&cxlds->doe_mbs, off, doe_mb, GFP_KERNEL)) { |
| 422 | dev_err(dev, "xa_insert failed to insert MB @ %x\n", |
| 423 | off); |
| 424 | continue; |
| 425 | } |
| 426 | |
| 427 | dev_dbg(dev, "Created DOE mailbox @%x\n", off); |
| 428 | } |
| 429 | } |
| 430 | |
Ben Widawsky | ed97afb | 2021-09-13 09:33:24 -0700 | [diff] [blame] | 431 | static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) |
Dan Williams | 4cdadfd | 2021-02-16 20:09:50 -0800 | [diff] [blame] | 432 | { |
Ben Widawsky | 85afc31 | 2021-10-15 16:30:42 -0700 | [diff] [blame] | 433 | struct cxl_register_map map; |
Dan Williams | 21083f5 | 2021-06-15 16:36:31 -0700 | [diff] [blame] | 434 | struct cxl_memdev *cxlmd; |
Ira Weiny | 5e2411a | 2021-11-02 13:29:01 -0700 | [diff] [blame] | 435 | struct cxl_dev_state *cxlds; |
Ben Widawsky | 1d5a415 | 2021-04-07 15:26:21 -0700 | [diff] [blame] | 436 | int rc; |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 437 | |
Dan Williams | 5a2328f4 | 2021-09-08 22:12:38 -0700 | [diff] [blame] | 438 | /* |
| 439 | * Double check the anonymous union trickery in struct cxl_regs |
| 440 | * FIXME switch to struct_group() |
| 441 | */ |
| 442 | BUILD_BUG_ON(offsetof(struct cxl_regs, memdev) != |
| 443 | offsetof(struct cxl_regs, device_regs.memdev)); |
| 444 | |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 445 | rc = pcim_enable_device(pdev); |
| 446 | if (rc) |
| 447 | return rc; |
Dan Williams | 4cdadfd | 2021-02-16 20:09:50 -0800 | [diff] [blame] | 448 | |
Ira Weiny | 5e2411a | 2021-11-02 13:29:01 -0700 | [diff] [blame] | 449 | cxlds = cxl_dev_state_create(&pdev->dev); |
| 450 | if (IS_ERR(cxlds)) |
| 451 | return PTR_ERR(cxlds); |
Ben Widawsky | 1b0a1a2a | 2021-04-07 15:26:20 -0700 | [diff] [blame] | 452 | |
Dan Williams | bcc79ea | 2022-01-31 13:56:11 -0800 | [diff] [blame] | 453 | cxlds->serial = pci_get_dsn(pdev); |
Ben Widawsky | 06e279e | 2022-02-01 14:06:32 -0800 | [diff] [blame] | 454 | cxlds->cxl_dvsec = pci_find_dvsec_capability( |
| 455 | pdev, PCI_DVSEC_VENDOR_ID_CXL, CXL_DVSEC_PCIE_DEVICE); |
| 456 | if (!cxlds->cxl_dvsec) |
| 457 | dev_warn(&pdev->dev, |
| 458 | "Device DVSEC not present, skip CXL.mem init\n"); |
| 459 | |
Ben Widawsky | 85afc31 | 2021-10-15 16:30:42 -0700 | [diff] [blame] | 460 | rc = cxl_setup_regs(pdev, CXL_REGLOC_RBI_MEMDEV, &map); |
| 461 | if (rc) |
| 462 | return rc; |
| 463 | |
Ira Weiny | 5e2411a | 2021-11-02 13:29:01 -0700 | [diff] [blame] | 464 | rc = cxl_map_regs(cxlds, &map); |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 465 | if (rc) |
| 466 | return rc; |
| 467 | |
Ben Widawsky | 4112a08 | 2022-02-01 13:28:53 -0800 | [diff] [blame] | 468 | /* |
| 469 | * If the component registers can't be found, the cxl_pci driver may |
| 470 | * still be useful for management functions so don't return an error. |
| 471 | */ |
| 472 | cxlds->component_reg_phys = CXL_RESOURCE_NONE; |
| 473 | rc = cxl_setup_regs(pdev, CXL_REGLOC_RBI_COMPONENT, &map); |
| 474 | if (rc) |
| 475 | dev_warn(&pdev->dev, "No component registers (%d)\n", rc); |
| 476 | |
| 477 | cxlds->component_reg_phys = cxl_regmap_to_base(pdev, &map); |
| 478 | |
Ira Weiny | 3eddcc9 | 2022-07-19 13:52:47 -0700 | [diff] [blame] | 479 | devm_cxl_pci_create_doe(cxlds); |
| 480 | |
Ira Weiny | 5e2411a | 2021-11-02 13:29:01 -0700 | [diff] [blame] | 481 | rc = cxl_pci_setup_mailbox(cxlds); |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 482 | if (rc) |
| 483 | return rc; |
| 484 | |
Ira Weiny | 5e2411a | 2021-11-02 13:29:01 -0700 | [diff] [blame] | 485 | rc = cxl_enumerate_cmds(cxlds); |
Ben Widawsky | 472b1ce | 2021-02-16 20:09:55 -0800 | [diff] [blame] | 486 | if (rc) |
| 487 | return rc; |
| 488 | |
Ira Weiny | 5e2411a | 2021-11-02 13:29:01 -0700 | [diff] [blame] | 489 | rc = cxl_dev_state_identify(cxlds); |
Dan Williams | b39cb10 | 2021-02-16 20:09:52 -0800 | [diff] [blame] | 490 | if (rc) |
| 491 | return rc; |
| 492 | |
Ira Weiny | 5e2411a | 2021-11-02 13:29:01 -0700 | [diff] [blame] | 493 | rc = cxl_mem_create_range_info(cxlds); |
Ira Weiny | f847502 | 2021-08-10 11:57:59 -0700 | [diff] [blame] | 494 | if (rc) |
| 495 | return rc; |
| 496 | |
Ira Weiny | 5e2411a | 2021-11-02 13:29:01 -0700 | [diff] [blame] | 497 | cxlmd = devm_cxl_add_memdev(cxlds); |
Dan Williams | 21083f5 | 2021-06-15 16:36:31 -0700 | [diff] [blame] | 498 | if (IS_ERR(cxlmd)) |
| 499 | return PTR_ERR(cxlmd); |
| 500 | |
Dan Williams | d3b7502 | 2022-05-21 15:35:29 -0700 | [diff] [blame] | 501 | if (resource_size(&cxlds->pmem_res) && IS_ENABLED(CONFIG_CXL_PMEM)) |
Dan Williams | 21083f5 | 2021-06-15 16:36:31 -0700 | [diff] [blame] | 502 | rc = devm_cxl_add_nvdimm(&pdev->dev, cxlmd); |
| 503 | |
| 504 | return rc; |
Dan Williams | 4cdadfd | 2021-02-16 20:09:50 -0800 | [diff] [blame] | 505 | } |
| 506 | |
| 507 | static const struct pci_device_id cxl_mem_pci_tbl[] = { |
| 508 | /* PCI class code for CXL.mem Type-3 Devices */ |
| 509 | { PCI_DEVICE_CLASS((PCI_CLASS_MEMORY_CXL << 8 | CXL_MEMORY_PROGIF), ~0)}, |
| 510 | { /* terminate list */ }, |
| 511 | }; |
| 512 | MODULE_DEVICE_TABLE(pci, cxl_mem_pci_tbl); |
| 513 | |
Ben Widawsky | ed97afb | 2021-09-13 09:33:24 -0700 | [diff] [blame] | 514 | static struct pci_driver cxl_pci_driver = { |
Dan Williams | 4cdadfd | 2021-02-16 20:09:50 -0800 | [diff] [blame] | 515 | .name = KBUILD_MODNAME, |
| 516 | .id_table = cxl_mem_pci_tbl, |
Ben Widawsky | ed97afb | 2021-09-13 09:33:24 -0700 | [diff] [blame] | 517 | .probe = cxl_pci_probe, |
Dan Williams | 4cdadfd | 2021-02-16 20:09:50 -0800 | [diff] [blame] | 518 | .driver = { |
| 519 | .probe_type = PROBE_PREFER_ASYNCHRONOUS, |
| 520 | }, |
| 521 | }; |
| 522 | |
| 523 | MODULE_LICENSE("GPL v2"); |
Ben Widawsky | ed97afb | 2021-09-13 09:33:24 -0700 | [diff] [blame] | 524 | module_pci_driver(cxl_pci_driver); |
Dan Williams | b39cb10 | 2021-02-16 20:09:52 -0800 | [diff] [blame] | 525 | MODULE_IMPORT_NS(CXL); |