blob: faeb5d9d7a7ab3a753081872cbe1ec5c35b477be [file] [log] [blame]
Dan Williams4cdadfd2021-02-16 20:09:50 -08001// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright(c) 2020 Intel Corporation. All rights reserved. */
Dan Williams4faf31b2021-09-08 22:12:32 -07003#include <linux/io-64-nonatomic-lo-hi.h>
Ben Widawsky229e8822022-01-31 15:51:45 -08004#include <linux/moduleparam.h>
Dan Williams4cdadfd2021-02-16 20:09:50 -08005#include <linux/module.h>
Ben Widawsky229e8822022-01-31 15:51:45 -08006#include <linux/delay.h>
Dan Williamsfae88172021-04-16 17:43:30 -07007#include <linux/sizes.h>
Dan Williamsb39cb102021-02-16 20:09:52 -08008#include <linux/mutex.h>
Ira Weiny30af9722021-06-03 17:50:36 -07009#include <linux/list.h>
Dan Williams4cdadfd2021-02-16 20:09:50 -080010#include <linux/pci.h>
Ira Weiny3eddcc92022-07-19 13:52:47 -070011#include <linux/pci-doe.h>
Dan Williams4cdadfd2021-02-16 20:09:50 -080012#include <linux/io.h>
Ben Widawsky5161a552021-08-02 10:29:38 -070013#include "cxlmem.h"
Dan Williamsaf9cae92022-01-23 16:30:25 -080014#include "cxlpci.h"
Ben Widawsky8adaf742021-02-16 20:09:51 -080015#include "cxl.h"
16
17/**
Ben Widawsky21e9f762021-05-26 10:44:13 -070018 * DOC: cxl pci
Ben Widawsky8adaf742021-02-16 20:09:51 -080019 *
Ben Widawsky21e9f762021-05-26 10:44:13 -070020 * This implements the PCI exclusive functionality for a CXL device as it is
21 * defined by the Compute Express Link specification. CXL devices may surface
Ben Widawskyed97afb2021-09-13 09:33:24 -070022 * certain functionality even if it isn't CXL enabled. While this driver is
23 * focused around the PCI specific aspects of a CXL device, it binds to the
24 * specific CXL memory device class code, and therefore the implementation of
25 * cxl_pci is focused around CXL memory devices.
Ben Widawsky8adaf742021-02-16 20:09:51 -080026 *
27 * The driver has several responsibilities, mainly:
28 * - Create the memX device and register on the CXL bus.
29 * - Enumerate device's register interface and map them.
Ben Widawskyed97afb2021-09-13 09:33:24 -070030 * - Registers nvdimm bridge device with cxl_core.
31 * - Registers a CXL mailbox with cxl_core.
Ben Widawsky8adaf742021-02-16 20:09:51 -080032 */
33
Ira Weiny5e2411a2021-11-02 13:29:01 -070034#define cxl_doorbell_busy(cxlds) \
35 (readl((cxlds)->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET) & \
Ben Widawsky8adaf742021-02-16 20:09:51 -080036 CXLDEV_MBOX_CTRL_DOORBELL)
37
38/* CXL 2.0 - 8.2.8.4 */
39#define CXL_MAILBOX_TIMEOUT_MS (2 * HZ)
40
Ben Widawsky229e8822022-01-31 15:51:45 -080041/*
42 * CXL 2.0 ECN "Add Mailbox Ready Time" defines a capability field to
43 * dictate how long to wait for the mailbox to become ready. The new
44 * field allows the device to tell software the amount of time to wait
45 * before mailbox ready. This field per the spec theoretically allows
46 * for up to 255 seconds. 255 seconds is unreasonably long, its longer
47 * than the maximum SATA port link recovery wait. Default to 60 seconds
48 * until someone builds a CXL device that needs more time in practice.
49 */
50static unsigned short mbox_ready_timeout = 60;
51module_param(mbox_ready_timeout, ushort, 0644);
Dan Williams2e4ba0e2022-05-18 16:34:43 -070052MODULE_PARM_DESC(mbox_ready_timeout, "seconds to wait for mailbox ready");
Ben Widawsky229e8822022-01-31 15:51:45 -080053
Ira Weiny5e2411a2021-11-02 13:29:01 -070054static int cxl_pci_mbox_wait_for_doorbell(struct cxl_dev_state *cxlds)
Ben Widawsky8adaf742021-02-16 20:09:51 -080055{
56 const unsigned long start = jiffies;
57 unsigned long end = start;
58
Ira Weiny5e2411a2021-11-02 13:29:01 -070059 while (cxl_doorbell_busy(cxlds)) {
Ben Widawsky8adaf742021-02-16 20:09:51 -080060 end = jiffies;
61
62 if (time_after(end, start + CXL_MAILBOX_TIMEOUT_MS)) {
63 /* Check again in case preempted before timeout test */
Ira Weiny5e2411a2021-11-02 13:29:01 -070064 if (!cxl_doorbell_busy(cxlds))
Ben Widawsky8adaf742021-02-16 20:09:51 -080065 break;
66 return -ETIMEDOUT;
67 }
68 cpu_relax();
69 }
70
Ira Weiny5e2411a2021-11-02 13:29:01 -070071 dev_dbg(cxlds->dev, "Doorbell wait took %dms",
Ben Widawsky8adaf742021-02-16 20:09:51 -080072 jiffies_to_msecs(end) - jiffies_to_msecs(start));
73 return 0;
74}
75
Dan Williams4f195ee2022-01-23 16:28:54 -080076#define cxl_err(dev, status, msg) \
77 dev_err_ratelimited(dev, msg ", device state %s%s\n", \
78 status & CXLMDEV_DEV_FATAL ? " fatal" : "", \
79 status & CXLMDEV_FW_HALT ? " firmware-halt" : "")
Ben Widawsky8adaf742021-02-16 20:09:51 -080080
Dan Williams4f195ee2022-01-23 16:28:54 -080081#define cxl_cmd_err(dev, cmd, status, msg) \
82 dev_err_ratelimited(dev, msg " (opcode: %#x), device state %s%s\n", \
83 (cmd)->opcode, \
84 status & CXLMDEV_DEV_FATAL ? " fatal" : "", \
85 status & CXLMDEV_FW_HALT ? " firmware-halt" : "")
Ben Widawsky8adaf742021-02-16 20:09:51 -080086
87/**
Ben Widawskyed97afb2021-09-13 09:33:24 -070088 * __cxl_pci_mbox_send_cmd() - Execute a mailbox command
Ira Weiny5e2411a2021-11-02 13:29:01 -070089 * @cxlds: The device state to communicate with.
Ben Widawsky8adaf742021-02-16 20:09:51 -080090 * @mbox_cmd: Command to send to the memory device.
91 *
92 * Context: Any context. Expects mbox_mutex to be held.
93 * Return: -ETIMEDOUT if timeout occurred waiting for completion. 0 on success.
94 * Caller should check the return code in @mbox_cmd to make sure it
95 * succeeded.
96 *
97 * This is a generic form of the CXL mailbox send command thus only using the
98 * registers defined by the mailbox capability ID - CXL 2.0 8.2.8.4. Memory
99 * devices, and perhaps other types of CXL devices may have further information
100 * available upon error conditions. Driver facilities wishing to send mailbox
101 * commands should use the wrapper command.
102 *
103 * The CXL spec allows for up to two mailboxes. The intention is for the primary
104 * mailbox to be OS controlled and the secondary mailbox to be used by system
105 * firmware. This allows the OS and firmware to communicate with the device and
106 * not need to coordinate with each other. The driver only uses the primary
107 * mailbox.
108 */
Ira Weiny5e2411a2021-11-02 13:29:01 -0700109static int __cxl_pci_mbox_send_cmd(struct cxl_dev_state *cxlds,
Dan Williamsb64955a2021-09-08 22:12:21 -0700110 struct cxl_mbox_cmd *mbox_cmd)
Ben Widawsky8adaf742021-02-16 20:09:51 -0800111{
Ira Weiny5e2411a2021-11-02 13:29:01 -0700112 void __iomem *payload = cxlds->regs.mbox + CXLDEV_MBOX_PAYLOAD_OFFSET;
113 struct device *dev = cxlds->dev;
Ben Widawsky8adaf742021-02-16 20:09:51 -0800114 u64 cmd_reg, status_reg;
115 size_t out_len;
116 int rc;
117
Ira Weiny5e2411a2021-11-02 13:29:01 -0700118 lockdep_assert_held(&cxlds->mbox_mutex);
Ben Widawsky8adaf742021-02-16 20:09:51 -0800119
120 /*
121 * Here are the steps from 8.2.8.4 of the CXL 2.0 spec.
122 * 1. Caller reads MB Control Register to verify doorbell is clear
123 * 2. Caller writes Command Register
124 * 3. Caller writes Command Payload Registers if input payload is non-empty
125 * 4. Caller writes MB Control Register to set doorbell
126 * 5. Caller either polls for doorbell to be clear or waits for interrupt if configured
127 * 6. Caller reads MB Status Register to fetch Return code
128 * 7. If command successful, Caller reads Command Register to get Payload Length
129 * 8. If output payload is non-empty, host reads Command Payload Registers
130 *
131 * Hardware is free to do whatever it wants before the doorbell is rung,
132 * and isn't allowed to change anything after it clears the doorbell. As
133 * such, steps 2 and 3 can happen in any order, and steps 6, 7, 8 can
134 * also happen in any order (though some orders might not make sense).
135 */
136
137 /* #1 */
Ira Weiny5e2411a2021-11-02 13:29:01 -0700138 if (cxl_doorbell_busy(cxlds)) {
Dan Williams4f195ee2022-01-23 16:28:54 -0800139 u64 md_status =
140 readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET);
141
142 cxl_cmd_err(cxlds->dev, mbox_cmd, md_status,
143 "mailbox queue busy");
Ben Widawsky8adaf742021-02-16 20:09:51 -0800144 return -EBUSY;
145 }
146
147 cmd_reg = FIELD_PREP(CXLDEV_MBOX_CMD_COMMAND_OPCODE_MASK,
148 mbox_cmd->opcode);
149 if (mbox_cmd->size_in) {
150 if (WARN_ON(!mbox_cmd->payload_in))
151 return -EINVAL;
152
153 cmd_reg |= FIELD_PREP(CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK,
154 mbox_cmd->size_in);
155 memcpy_toio(payload, mbox_cmd->payload_in, mbox_cmd->size_in);
156 }
157
158 /* #2, #3 */
Ira Weiny5e2411a2021-11-02 13:29:01 -0700159 writeq(cmd_reg, cxlds->regs.mbox + CXLDEV_MBOX_CMD_OFFSET);
Ben Widawsky8adaf742021-02-16 20:09:51 -0800160
161 /* #4 */
Dan Williams99e222a2021-09-08 22:12:09 -0700162 dev_dbg(dev, "Sending command\n");
Ben Widawsky8adaf742021-02-16 20:09:51 -0800163 writel(CXLDEV_MBOX_CTRL_DOORBELL,
Ira Weiny5e2411a2021-11-02 13:29:01 -0700164 cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
Ben Widawsky8adaf742021-02-16 20:09:51 -0800165
166 /* #5 */
Ira Weiny5e2411a2021-11-02 13:29:01 -0700167 rc = cxl_pci_mbox_wait_for_doorbell(cxlds);
Ben Widawsky8adaf742021-02-16 20:09:51 -0800168 if (rc == -ETIMEDOUT) {
Dan Williams4f195ee2022-01-23 16:28:54 -0800169 u64 md_status = readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET);
170
171 cxl_cmd_err(cxlds->dev, mbox_cmd, md_status, "mailbox timeout");
Ben Widawsky8adaf742021-02-16 20:09:51 -0800172 return rc;
173 }
174
175 /* #6 */
Ira Weiny5e2411a2021-11-02 13:29:01 -0700176 status_reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_STATUS_OFFSET);
Ben Widawsky8adaf742021-02-16 20:09:51 -0800177 mbox_cmd->return_code =
178 FIELD_GET(CXLDEV_MBOX_STATUS_RET_CODE_MASK, status_reg);
179
Davidlohr Bueso92fcc1a2022-04-03 19:12:15 -0700180 if (mbox_cmd->return_code != CXL_MBOX_CMD_RC_SUCCESS) {
Davidlohr Buesoc43e0362022-04-03 19:12:16 -0700181 dev_dbg(dev, "Mailbox operation had an error: %s\n",
182 cxl_mbox_cmd_rc2str(mbox_cmd));
Davidlohr Buesocbe83a22022-04-03 19:12:14 -0700183 return 0; /* completed but caller must check return_code */
Ben Widawsky8adaf742021-02-16 20:09:51 -0800184 }
185
186 /* #7 */
Ira Weiny5e2411a2021-11-02 13:29:01 -0700187 cmd_reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_CMD_OFFSET);
Ben Widawsky8adaf742021-02-16 20:09:51 -0800188 out_len = FIELD_GET(CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK, cmd_reg);
189
190 /* #8 */
191 if (out_len && mbox_cmd->payload_out) {
192 /*
193 * Sanitize the copy. If hardware misbehaves, out_len per the
194 * spec can actually be greater than the max allowed size (21
195 * bits available but spec defined 1M max). The caller also may
196 * have requested less data than the hardware supplied even
197 * within spec.
198 */
Ira Weiny5e2411a2021-11-02 13:29:01 -0700199 size_t n = min3(mbox_cmd->size_out, cxlds->payload_size, out_len);
Ben Widawsky8adaf742021-02-16 20:09:51 -0800200
201 memcpy_fromio(mbox_cmd->payload_out, payload, n);
202 mbox_cmd->size_out = n;
203 } else {
204 mbox_cmd->size_out = 0;
205 }
206
207 return 0;
208}
209
Ira Weiny5e2411a2021-11-02 13:29:01 -0700210static int cxl_pci_mbox_send(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
Dan Williamsb64955a2021-09-08 22:12:21 -0700211{
212 int rc;
213
Dan Williams4f195ee2022-01-23 16:28:54 -0800214 mutex_lock_io(&cxlds->mbox_mutex);
Ira Weiny5e2411a2021-11-02 13:29:01 -0700215 rc = __cxl_pci_mbox_send_cmd(cxlds, cmd);
Dan Williams4f195ee2022-01-23 16:28:54 -0800216 mutex_unlock(&cxlds->mbox_mutex);
Dan Williamsb64955a2021-09-08 22:12:21 -0700217
218 return rc;
219}
220
Ira Weiny5e2411a2021-11-02 13:29:01 -0700221static int cxl_pci_setup_mailbox(struct cxl_dev_state *cxlds)
Ben Widawsky8adaf742021-02-16 20:09:51 -0800222{
Ira Weiny5e2411a2021-11-02 13:29:01 -0700223 const int cap = readl(cxlds->regs.mbox + CXLDEV_MBOX_CAPS_OFFSET);
Ben Widawsky229e8822022-01-31 15:51:45 -0800224 unsigned long timeout;
225 u64 md_status;
226
227 timeout = jiffies + mbox_ready_timeout * HZ;
228 do {
229 md_status = readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET);
230 if (md_status & CXLMDEV_MBOX_IF_READY)
231 break;
232 if (msleep_interruptible(100))
233 break;
234 } while (!time_after(jiffies, timeout));
235
236 if (!(md_status & CXLMDEV_MBOX_IF_READY)) {
Dan Williams4f195ee2022-01-23 16:28:54 -0800237 cxl_err(cxlds->dev, md_status,
238 "timeout awaiting mailbox ready");
239 return -ETIMEDOUT;
240 }
241
242 /*
243 * A command may be in flight from a previous driver instance,
244 * think kexec, do one doorbell wait so that
245 * __cxl_pci_mbox_send_cmd() can assume that it is the only
246 * source for future doorbell busy events.
247 */
248 if (cxl_pci_mbox_wait_for_doorbell(cxlds) != 0) {
249 cxl_err(cxlds->dev, md_status, "timeout awaiting mailbox idle");
250 return -ETIMEDOUT;
Ben Widawsky229e8822022-01-31 15:51:45 -0800251 }
Ben Widawsky8adaf742021-02-16 20:09:51 -0800252
Ira Weiny5e2411a2021-11-02 13:29:01 -0700253 cxlds->mbox_send = cxl_pci_mbox_send;
254 cxlds->payload_size =
Ben Widawsky8adaf742021-02-16 20:09:51 -0800255 1 << FIELD_GET(CXLDEV_MBOX_CAP_PAYLOAD_SIZE_MASK, cap);
256
257 /*
258 * CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register
259 *
260 * If the size is too small, mandatory commands will not work and so
261 * there's no point in going forward. If the size is too large, there's
262 * no harm is soft limiting it.
263 */
Ira Weiny5e2411a2021-11-02 13:29:01 -0700264 cxlds->payload_size = min_t(size_t, cxlds->payload_size, SZ_1M);
265 if (cxlds->payload_size < 256) {
266 dev_err(cxlds->dev, "Mailbox is too small (%zub)",
267 cxlds->payload_size);
Ben Widawsky8adaf742021-02-16 20:09:51 -0800268 return -ENXIO;
269 }
270
Ira Weiny5e2411a2021-11-02 13:29:01 -0700271 dev_dbg(cxlds->dev, "Mailbox payload sized %zu",
272 cxlds->payload_size);
Ben Widawsky8adaf742021-02-16 20:09:51 -0800273
274 return 0;
275}
276
Dan Williamsa261e9a2021-10-15 14:57:27 -0700277static int cxl_map_regblock(struct pci_dev *pdev, struct cxl_register_map *map)
Ben Widawsky1b0a1a2a2021-04-07 15:26:20 -0700278{
Ira Weinyf8a7e8c2021-05-27 17:49:19 -0700279 void __iomem *addr;
Ben Widawsky7dc7a642021-10-13 16:53:29 -0700280 int bar = map->barno;
281 struct device *dev = &pdev->dev;
282 resource_size_t offset = map->block_offset;
Ben Widawsky1b0a1a2a2021-04-07 15:26:20 -0700283
Ben Widawsky8adaf742021-02-16 20:09:51 -0800284 /* Basic sanity check that BAR is big enough */
285 if (pci_resource_len(pdev, bar) < offset) {
Ben Widawsky7dc7a642021-10-13 16:53:29 -0700286 dev_err(dev, "BAR%d: %pr: too small (offset: %pa)\n", bar,
287 &pdev->resource[bar], &offset);
Dan Williamsa261e9a2021-10-15 14:57:27 -0700288 return -ENXIO;
Ben Widawsky8adaf742021-02-16 20:09:51 -0800289 }
290
Ira Weiny30af9722021-06-03 17:50:36 -0700291 addr = pci_iomap(pdev, bar, 0);
Ira Weinyf8a7e8c2021-05-27 17:49:19 -0700292 if (!addr) {
Ben Widawsky8adaf742021-02-16 20:09:51 -0800293 dev_err(dev, "failed to map registers\n");
Dan Williamsa261e9a2021-10-15 14:57:27 -0700294 return -ENOMEM;
Ben Widawsky8adaf742021-02-16 20:09:51 -0800295 }
Ben Widawsky8adaf742021-02-16 20:09:51 -0800296
Ben Widawsky7dc7a642021-10-13 16:53:29 -0700297 dev_dbg(dev, "Mapped CXL Memory Device resource bar %u @ %pa\n",
298 bar, &offset);
Ben Widawsky6630d312021-05-20 14:29:53 -0700299
Dan Williamsa261e9a2021-10-15 14:57:27 -0700300 map->base = addr + map->block_offset;
301 return 0;
Ira Weiny30af9722021-06-03 17:50:36 -0700302}
303
Dan Williamsa261e9a2021-10-15 14:57:27 -0700304static void cxl_unmap_regblock(struct pci_dev *pdev,
305 struct cxl_register_map *map)
Ira Weiny30af9722021-06-03 17:50:36 -0700306{
Dan Williamsa261e9a2021-10-15 14:57:27 -0700307 pci_iounmap(pdev, map->base - map->block_offset);
308 map->base = NULL;
Ben Widawsky8adaf742021-02-16 20:09:51 -0800309}
Dan Williams4cdadfd2021-02-16 20:09:50 -0800310
Dan Williamsa261e9a2021-10-15 14:57:27 -0700311static int cxl_probe_regs(struct pci_dev *pdev, struct cxl_register_map *map)
Ira Weiny30af9722021-06-03 17:50:36 -0700312{
Ben Widawsky08422372021-05-27 17:49:22 -0700313 struct cxl_component_reg_map *comp_map;
Ira Weiny30af9722021-06-03 17:50:36 -0700314 struct cxl_device_reg_map *dev_map;
Ben Widawsky7dc7a642021-10-13 16:53:29 -0700315 struct device *dev = &pdev->dev;
Dan Williamsa261e9a2021-10-15 14:57:27 -0700316 void __iomem *base = map->base;
Ira Weiny30af9722021-06-03 17:50:36 -0700317
318 switch (map->reg_type) {
Ben Widawsky08422372021-05-27 17:49:22 -0700319 case CXL_REGLOC_RBI_COMPONENT:
320 comp_map = &map->component_map;
321 cxl_probe_component_regs(dev, base, comp_map);
322 if (!comp_map->hdm_decoder.valid) {
323 dev_err(dev, "HDM decoder registers not found\n");
324 return -ENXIO;
325 }
326
327 dev_dbg(dev, "Set up component registers\n");
328 break;
Ira Weiny30af9722021-06-03 17:50:36 -0700329 case CXL_REGLOC_RBI_MEMDEV:
330 dev_map = &map->device_map;
331 cxl_probe_device_regs(dev, base, dev_map);
332 if (!dev_map->status.valid || !dev_map->mbox.valid ||
333 !dev_map->memdev.valid) {
334 dev_err(dev, "registers not found: %s%s%s\n",
335 !dev_map->status.valid ? "status " : "",
Li Qiang (Johnny Li)da582aa2021-09-03 19:20:50 -0700336 !dev_map->mbox.valid ? "mbox " : "",
337 !dev_map->memdev.valid ? "memdev " : "");
Ira Weiny30af9722021-06-03 17:50:36 -0700338 return -ENXIO;
339 }
340
341 dev_dbg(dev, "Probing device registers...\n");
342 break;
343 default:
344 break;
345 }
346
347 return 0;
348}
349
Ira Weiny5e2411a2021-11-02 13:29:01 -0700350static int cxl_map_regs(struct cxl_dev_state *cxlds, struct cxl_register_map *map)
Ira Weiny30af9722021-06-03 17:50:36 -0700351{
Ira Weiny5e2411a2021-11-02 13:29:01 -0700352 struct device *dev = cxlds->dev;
Dan Williams99e222a2021-09-08 22:12:09 -0700353 struct pci_dev *pdev = to_pci_dev(dev);
Ira Weiny30af9722021-06-03 17:50:36 -0700354
355 switch (map->reg_type) {
Ben Widawsky08422372021-05-27 17:49:22 -0700356 case CXL_REGLOC_RBI_COMPONENT:
Ira Weiny5e2411a2021-11-02 13:29:01 -0700357 cxl_map_component_regs(pdev, &cxlds->regs.component, map);
Ben Widawsky08422372021-05-27 17:49:22 -0700358 dev_dbg(dev, "Mapping component registers...\n");
359 break;
Ira Weiny30af9722021-06-03 17:50:36 -0700360 case CXL_REGLOC_RBI_MEMDEV:
Ira Weiny5e2411a2021-11-02 13:29:01 -0700361 cxl_map_device_regs(pdev, &cxlds->regs.device_regs, map);
Ira Weiny30af9722021-06-03 17:50:36 -0700362 dev_dbg(dev, "Probing device registers...\n");
363 break;
364 default:
365 break;
366 }
367
368 return 0;
369}
370
Ben Widawsky85afc312021-10-15 16:30:42 -0700371static int cxl_setup_regs(struct pci_dev *pdev, enum cxl_regloc_type type,
372 struct cxl_register_map *map)
373{
374 int rc;
375
376 rc = cxl_find_regblock(pdev, type, map);
377 if (rc)
378 return rc;
379
380 rc = cxl_map_regblock(pdev, map);
381 if (rc)
382 return rc;
383
384 rc = cxl_probe_regs(pdev, map);
385 cxl_unmap_regblock(pdev, map);
386
387 return rc;
Ben Widawsky1d5a4152021-04-07 15:26:21 -0700388}
389
Ira Weiny3eddcc92022-07-19 13:52:47 -0700390static void cxl_pci_destroy_doe(void *mbs)
391{
392 xa_destroy(mbs);
393}
394
395static void devm_cxl_pci_create_doe(struct cxl_dev_state *cxlds)
396{
397 struct device *dev = cxlds->dev;
398 struct pci_dev *pdev = to_pci_dev(dev);
399 u16 off = 0;
400
401 xa_init(&cxlds->doe_mbs);
402 if (devm_add_action(&pdev->dev, cxl_pci_destroy_doe, &cxlds->doe_mbs)) {
403 dev_err(dev, "Failed to create XArray for DOE's\n");
404 return;
405 }
406
407 /*
408 * Mailbox creation is best effort. Higher layers must determine if
409 * the lack of a mailbox for their protocol is a device failure or not.
410 */
411 pci_doe_for_each_off(pdev, off) {
412 struct pci_doe_mb *doe_mb;
413
414 doe_mb = pcim_doe_create_mb(pdev, off);
415 if (IS_ERR(doe_mb)) {
416 dev_err(dev, "Failed to create MB object for MB @ %x\n",
417 off);
418 continue;
419 }
420
421 if (xa_insert(&cxlds->doe_mbs, off, doe_mb, GFP_KERNEL)) {
422 dev_err(dev, "xa_insert failed to insert MB @ %x\n",
423 off);
424 continue;
425 }
426
427 dev_dbg(dev, "Created DOE mailbox @%x\n", off);
428 }
429}
430
Ben Widawskyed97afb2021-09-13 09:33:24 -0700431static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
Dan Williams4cdadfd2021-02-16 20:09:50 -0800432{
Ben Widawsky85afc312021-10-15 16:30:42 -0700433 struct cxl_register_map map;
Dan Williams21083f52021-06-15 16:36:31 -0700434 struct cxl_memdev *cxlmd;
Ira Weiny5e2411a2021-11-02 13:29:01 -0700435 struct cxl_dev_state *cxlds;
Ben Widawsky1d5a4152021-04-07 15:26:21 -0700436 int rc;
Ben Widawsky8adaf742021-02-16 20:09:51 -0800437
Dan Williams5a2328f42021-09-08 22:12:38 -0700438 /*
439 * Double check the anonymous union trickery in struct cxl_regs
440 * FIXME switch to struct_group()
441 */
442 BUILD_BUG_ON(offsetof(struct cxl_regs, memdev) !=
443 offsetof(struct cxl_regs, device_regs.memdev));
444
Ben Widawsky8adaf742021-02-16 20:09:51 -0800445 rc = pcim_enable_device(pdev);
446 if (rc)
447 return rc;
Dan Williams4cdadfd2021-02-16 20:09:50 -0800448
Ira Weiny5e2411a2021-11-02 13:29:01 -0700449 cxlds = cxl_dev_state_create(&pdev->dev);
450 if (IS_ERR(cxlds))
451 return PTR_ERR(cxlds);
Ben Widawsky1b0a1a2a2021-04-07 15:26:20 -0700452
Dan Williamsbcc79ea2022-01-31 13:56:11 -0800453 cxlds->serial = pci_get_dsn(pdev);
Ben Widawsky06e279e2022-02-01 14:06:32 -0800454 cxlds->cxl_dvsec = pci_find_dvsec_capability(
455 pdev, PCI_DVSEC_VENDOR_ID_CXL, CXL_DVSEC_PCIE_DEVICE);
456 if (!cxlds->cxl_dvsec)
457 dev_warn(&pdev->dev,
458 "Device DVSEC not present, skip CXL.mem init\n");
459
Ben Widawsky85afc312021-10-15 16:30:42 -0700460 rc = cxl_setup_regs(pdev, CXL_REGLOC_RBI_MEMDEV, &map);
461 if (rc)
462 return rc;
463
Ira Weiny5e2411a2021-11-02 13:29:01 -0700464 rc = cxl_map_regs(cxlds, &map);
Ben Widawsky8adaf742021-02-16 20:09:51 -0800465 if (rc)
466 return rc;
467
Ben Widawsky4112a082022-02-01 13:28:53 -0800468 /*
469 * If the component registers can't be found, the cxl_pci driver may
470 * still be useful for management functions so don't return an error.
471 */
472 cxlds->component_reg_phys = CXL_RESOURCE_NONE;
473 rc = cxl_setup_regs(pdev, CXL_REGLOC_RBI_COMPONENT, &map);
474 if (rc)
475 dev_warn(&pdev->dev, "No component registers (%d)\n", rc);
476
477 cxlds->component_reg_phys = cxl_regmap_to_base(pdev, &map);
478
Ira Weiny3eddcc92022-07-19 13:52:47 -0700479 devm_cxl_pci_create_doe(cxlds);
480
Ira Weiny5e2411a2021-11-02 13:29:01 -0700481 rc = cxl_pci_setup_mailbox(cxlds);
Ben Widawsky8adaf742021-02-16 20:09:51 -0800482 if (rc)
483 return rc;
484
Ira Weiny5e2411a2021-11-02 13:29:01 -0700485 rc = cxl_enumerate_cmds(cxlds);
Ben Widawsky472b1ce2021-02-16 20:09:55 -0800486 if (rc)
487 return rc;
488
Ira Weiny5e2411a2021-11-02 13:29:01 -0700489 rc = cxl_dev_state_identify(cxlds);
Dan Williamsb39cb102021-02-16 20:09:52 -0800490 if (rc)
491 return rc;
492
Ira Weiny5e2411a2021-11-02 13:29:01 -0700493 rc = cxl_mem_create_range_info(cxlds);
Ira Weinyf8475022021-08-10 11:57:59 -0700494 if (rc)
495 return rc;
496
Ira Weiny5e2411a2021-11-02 13:29:01 -0700497 cxlmd = devm_cxl_add_memdev(cxlds);
Dan Williams21083f52021-06-15 16:36:31 -0700498 if (IS_ERR(cxlmd))
499 return PTR_ERR(cxlmd);
500
Dan Williamsd3b75022022-05-21 15:35:29 -0700501 if (resource_size(&cxlds->pmem_res) && IS_ENABLED(CONFIG_CXL_PMEM))
Dan Williams21083f52021-06-15 16:36:31 -0700502 rc = devm_cxl_add_nvdimm(&pdev->dev, cxlmd);
503
504 return rc;
Dan Williams4cdadfd2021-02-16 20:09:50 -0800505}
506
507static const struct pci_device_id cxl_mem_pci_tbl[] = {
508 /* PCI class code for CXL.mem Type-3 Devices */
509 { PCI_DEVICE_CLASS((PCI_CLASS_MEMORY_CXL << 8 | CXL_MEMORY_PROGIF), ~0)},
510 { /* terminate list */ },
511};
512MODULE_DEVICE_TABLE(pci, cxl_mem_pci_tbl);
513
Ben Widawskyed97afb2021-09-13 09:33:24 -0700514static struct pci_driver cxl_pci_driver = {
Dan Williams4cdadfd2021-02-16 20:09:50 -0800515 .name = KBUILD_MODNAME,
516 .id_table = cxl_mem_pci_tbl,
Ben Widawskyed97afb2021-09-13 09:33:24 -0700517 .probe = cxl_pci_probe,
Dan Williams4cdadfd2021-02-16 20:09:50 -0800518 .driver = {
519 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
520 },
521};
522
523MODULE_LICENSE("GPL v2");
Ben Widawskyed97afb2021-09-13 09:33:24 -0700524module_pci_driver(cxl_pci_driver);
Dan Williamsb39cb102021-02-16 20:09:52 -0800525MODULE_IMPORT_NS(CXL);