Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright(c) 2020 Intel Corporation. All rights reserved. */
3#include <linux/io-64-nonatomic-lo-hi.h>
4#include <linux/moduleparam.h>
5#include <linux/module.h>
6#include <linux/delay.h>
7#include <linux/sizes.h>
8#include <linux/mutex.h>
9#include <linux/list.h>
10#include <linux/pci.h>
11#include <linux/pci-doe.h>
12#include <linux/aer.h>
13#include <linux/io.h>
14#include "cxlmem.h"
15#include "cxlpci.h"
16#include "cxl.h"
17#define CREATE_TRACE_POINTS
18#include <trace/events/cxl.h>
19
20/**
21 * DOC: cxl pci
22 *
23 * This implements the PCI exclusive functionality for a CXL device as it is
24 * defined by the Compute Express Link specification. CXL devices may surface
25 * certain functionality even if it isn't CXL enabled. While this driver is
26 * focused around the PCI specific aspects of a CXL device, it binds to the
27 * specific CXL memory device class code, and therefore the implementation of
28 * cxl_pci is focused around CXL memory devices.
29 *
30 * The driver has several responsibilities, mainly:
31 * - Create the memX device and register on the CXL bus.
32 * - Enumerate device's register interface and map them.
33 * - Registers nvdimm bridge device with cxl_core.
34 * - Registers a CXL mailbox with cxl_core.
35 */
36
37#define cxl_doorbell_busy(cxlds) \
38 (readl((cxlds)->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET) & \
39 CXLDEV_MBOX_CTRL_DOORBELL)
40
41/* CXL 2.0 - 8.2.8.4 */
42#define CXL_MAILBOX_TIMEOUT_MS (2 * HZ)
43
44/*
45 * CXL 2.0 ECN "Add Mailbox Ready Time" defines a capability field to
46 * dictate how long to wait for the mailbox to become ready. The new
47 * field allows the device to tell software the amount of time to wait
48 * before mailbox ready. This field per the spec theoretically allows
49 * for up to 255 seconds. 255 seconds is unreasonably long, its longer
50 * than the maximum SATA port link recovery wait. Default to 60 seconds
51 * until someone builds a CXL device that needs more time in practice.
52 */
53static unsigned short mbox_ready_timeout = 60;
54module_param(mbox_ready_timeout, ushort, 0644);
55MODULE_PARM_DESC(mbox_ready_timeout, "seconds to wait for mailbox ready");
56
57static int cxl_pci_mbox_wait_for_doorbell(struct cxl_dev_state *cxlds)
58{
59 const unsigned long start = jiffies;
60 unsigned long end = start;
61
62 while (cxl_doorbell_busy(cxlds)) {
63 end = jiffies;
64
65 if (time_after(end, start + CXL_MAILBOX_TIMEOUT_MS)) {
66 /* Check again in case preempted before timeout test */
67 if (!cxl_doorbell_busy(cxlds))
68 break;
69 return -ETIMEDOUT;
70 }
71 cpu_relax();
72 }
73
74 dev_dbg(cxlds->dev, "Doorbell wait took %dms",
75 jiffies_to_msecs(end) - jiffies_to_msecs(start));
76 return 0;
77}
78
79#define cxl_err(dev, status, msg) \
80 dev_err_ratelimited(dev, msg ", device state %s%s\n", \
81 status & CXLMDEV_DEV_FATAL ? " fatal" : "", \
82 status & CXLMDEV_FW_HALT ? " firmware-halt" : "")
83
84#define cxl_cmd_err(dev, cmd, status, msg) \
85 dev_err_ratelimited(dev, msg " (opcode: %#x), device state %s%s\n", \
86 (cmd)->opcode, \
87 status & CXLMDEV_DEV_FATAL ? " fatal" : "", \
88 status & CXLMDEV_FW_HALT ? " firmware-halt" : "")
89
90/**
91 * __cxl_pci_mbox_send_cmd() - Execute a mailbox command
92 * @cxlds: The device state to communicate with.
93 * @mbox_cmd: Command to send to the memory device.
94 *
95 * Context: Any context. Expects mbox_mutex to be held.
96 * Return: -ETIMEDOUT if timeout occurred waiting for completion. 0 on success.
97 * Caller should check the return code in @mbox_cmd to make sure it
98 * succeeded.
99 *
100 * This is a generic form of the CXL mailbox send command thus only using the
101 * registers defined by the mailbox capability ID - CXL 2.0 8.2.8.4. Memory
102 * devices, and perhaps other types of CXL devices may have further information
103 * available upon error conditions. Driver facilities wishing to send mailbox
104 * commands should use the wrapper command.
105 *
106 * The CXL spec allows for up to two mailboxes. The intention is for the primary
107 * mailbox to be OS controlled and the secondary mailbox to be used by system
108 * firmware. This allows the OS and firmware to communicate with the device and
109 * not need to coordinate with each other. The driver only uses the primary
110 * mailbox.
111 */
112static int __cxl_pci_mbox_send_cmd(struct cxl_dev_state *cxlds,
113 struct cxl_mbox_cmd *mbox_cmd)
114{
115 void __iomem *payload = cxlds->regs.mbox + CXLDEV_MBOX_PAYLOAD_OFFSET;
116 struct device *dev = cxlds->dev;
117 u64 cmd_reg, status_reg;
118 size_t out_len;
119 int rc;
120
121 lockdep_assert_held(&cxlds->mbox_mutex);
122
123 /*
124 * Here are the steps from 8.2.8.4 of the CXL 2.0 spec.
125 * 1. Caller reads MB Control Register to verify doorbell is clear
126 * 2. Caller writes Command Register
127 * 3. Caller writes Command Payload Registers if input payload is non-empty
128 * 4. Caller writes MB Control Register to set doorbell
129 * 5. Caller either polls for doorbell to be clear or waits for interrupt if configured
130 * 6. Caller reads MB Status Register to fetch Return code
131 * 7. If command successful, Caller reads Command Register to get Payload Length
132 * 8. If output payload is non-empty, host reads Command Payload Registers
133 *
134 * Hardware is free to do whatever it wants before the doorbell is rung,
135 * and isn't allowed to change anything after it clears the doorbell. As
136 * such, steps 2 and 3 can happen in any order, and steps 6, 7, 8 can
137 * also happen in any order (though some orders might not make sense).
138 */
139
140 /* #1 */
141 if (cxl_doorbell_busy(cxlds)) {
142 u64 md_status =
143 readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET);
144
145 cxl_cmd_err(cxlds->dev, mbox_cmd, md_status,
146 "mailbox queue busy");
147 return -EBUSY;
148 }
149
150 cmd_reg = FIELD_PREP(CXLDEV_MBOX_CMD_COMMAND_OPCODE_MASK,
151 mbox_cmd->opcode);
152 if (mbox_cmd->size_in) {
153 if (WARN_ON(!mbox_cmd->payload_in))
154 return -EINVAL;
155
156 cmd_reg |= FIELD_PREP(CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK,
157 mbox_cmd->size_in);
158 memcpy_toio(payload, mbox_cmd->payload_in, mbox_cmd->size_in);
159 }
160
161 /* #2, #3 */
162 writeq(cmd_reg, cxlds->regs.mbox + CXLDEV_MBOX_CMD_OFFSET);
163
164 /* #4 */
165 dev_dbg(dev, "Sending command\n");
166 writel(CXLDEV_MBOX_CTRL_DOORBELL,
167 cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
168
169 /* #5 */
170 rc = cxl_pci_mbox_wait_for_doorbell(cxlds);
171 if (rc == -ETIMEDOUT) {
172 u64 md_status = readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET);
173
174 cxl_cmd_err(cxlds->dev, mbox_cmd, md_status, "mailbox timeout");
175 return rc;
176 }
177
178 /* #6 */
179 status_reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_STATUS_OFFSET);
180 mbox_cmd->return_code =
181 FIELD_GET(CXLDEV_MBOX_STATUS_RET_CODE_MASK, status_reg);
182
183 if (mbox_cmd->return_code != CXL_MBOX_CMD_RC_SUCCESS) {
184 dev_dbg(dev, "Mailbox operation had an error: %s\n",
185 cxl_mbox_cmd_rc2str(mbox_cmd));
186 return 0; /* completed but caller must check return_code */
187 }
188
189 /* #7 */
190 cmd_reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_CMD_OFFSET);
191 out_len = FIELD_GET(CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK, cmd_reg);
192
193 /* #8 */
194 if (out_len && mbox_cmd->payload_out) {
195 /*
196 * Sanitize the copy. If hardware misbehaves, out_len per the
197 * spec can actually be greater than the max allowed size (21
198 * bits available but spec defined 1M max). The caller also may
199 * have requested less data than the hardware supplied even
200 * within spec.
201 */
202 size_t n = min3(mbox_cmd->size_out, cxlds->payload_size, out_len);
203
204 memcpy_fromio(mbox_cmd->payload_out, payload, n);
205 mbox_cmd->size_out = n;
206 } else {
207 mbox_cmd->size_out = 0;
208 }
209
210 return 0;
211}
212
213static int cxl_pci_mbox_send(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
214{
215 int rc;
216
217 mutex_lock_io(&cxlds->mbox_mutex);
218 rc = __cxl_pci_mbox_send_cmd(cxlds, cmd);
219 mutex_unlock(&cxlds->mbox_mutex);
220
221 return rc;
222}
223
224static int cxl_pci_setup_mailbox(struct cxl_dev_state *cxlds)
225{
226 const int cap = readl(cxlds->regs.mbox + CXLDEV_MBOX_CAPS_OFFSET);
227 unsigned long timeout;
228 u64 md_status;
229
230 timeout = jiffies + mbox_ready_timeout * HZ;
231 do {
232 md_status = readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET);
233 if (md_status & CXLMDEV_MBOX_IF_READY)
234 break;
235 if (msleep_interruptible(100))
236 break;
237 } while (!time_after(jiffies, timeout));
238
239 if (!(md_status & CXLMDEV_MBOX_IF_READY)) {
240 cxl_err(cxlds->dev, md_status,
241 "timeout awaiting mailbox ready");
242 return -ETIMEDOUT;
243 }
244
245 /*
246 * A command may be in flight from a previous driver instance,
247 * think kexec, do one doorbell wait so that
248 * __cxl_pci_mbox_send_cmd() can assume that it is the only
249 * source for future doorbell busy events.
250 */
251 if (cxl_pci_mbox_wait_for_doorbell(cxlds) != 0) {
252 cxl_err(cxlds->dev, md_status, "timeout awaiting mailbox idle");
253 return -ETIMEDOUT;
254 }
255
256 cxlds->mbox_send = cxl_pci_mbox_send;
257 cxlds->payload_size =
258 1 << FIELD_GET(CXLDEV_MBOX_CAP_PAYLOAD_SIZE_MASK, cap);
259
260 /*
261 * CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register
262 *
263 * If the size is too small, mandatory commands will not work and so
264 * there's no point in going forward. If the size is too large, there's
265 * no harm is soft limiting it.
266 */
267 cxlds->payload_size = min_t(size_t, cxlds->payload_size, SZ_1M);
268 if (cxlds->payload_size < 256) {
269 dev_err(cxlds->dev, "Mailbox is too small (%zub)",
270 cxlds->payload_size);
271 return -ENXIO;
272 }
273
274 dev_dbg(cxlds->dev, "Mailbox payload sized %zu",
275 cxlds->payload_size);
276
277 return 0;
278}
279
280static int cxl_map_regblock(struct pci_dev *pdev, struct cxl_register_map *map)
281{
282 struct device *dev = &pdev->dev;
283
284 map->base = ioremap(map->resource, map->max_size);
285 if (!map->base) {
286 dev_err(dev, "failed to map registers\n");
287 return -ENOMEM;
288 }
289
290 dev_dbg(dev, "Mapped CXL Memory Device resource %pa\n", &map->resource);
291 return 0;
292}
293
294static void cxl_unmap_regblock(struct pci_dev *pdev,
295 struct cxl_register_map *map)
296{
297 iounmap(map->base);
298 map->base = NULL;
299}
300
301static int cxl_probe_regs(struct pci_dev *pdev, struct cxl_register_map *map)
302{
303 struct cxl_component_reg_map *comp_map;
304 struct cxl_device_reg_map *dev_map;
305 struct device *dev = &pdev->dev;
306 void __iomem *base = map->base;
307
308 switch (map->reg_type) {
309 case CXL_REGLOC_RBI_COMPONENT:
310 comp_map = &map->component_map;
311 cxl_probe_component_regs(dev, base, comp_map);
312 if (!comp_map->hdm_decoder.valid) {
313 dev_err(dev, "HDM decoder registers not found\n");
314 return -ENXIO;
315 }
316
317 if (!comp_map->ras.valid)
318 dev_dbg(dev, "RAS registers not found\n");
319
320 dev_dbg(dev, "Set up component registers\n");
321 break;
322 case CXL_REGLOC_RBI_MEMDEV:
323 dev_map = &map->device_map;
324 cxl_probe_device_regs(dev, base, dev_map);
325 if (!dev_map->status.valid || !dev_map->mbox.valid ||
326 !dev_map->memdev.valid) {
327 dev_err(dev, "registers not found: %s%s%s\n",
328 !dev_map->status.valid ? "status " : "",
329 !dev_map->mbox.valid ? "mbox " : "",
330 !dev_map->memdev.valid ? "memdev " : "");
331 return -ENXIO;
332 }
333
334 dev_dbg(dev, "Probing device registers...\n");
335 break;
336 default:
337 break;
338 }
339
340 return 0;
341}
342
343static int cxl_setup_regs(struct pci_dev *pdev, enum cxl_regloc_type type,
344 struct cxl_register_map *map)
345{
346 int rc;
347
348 rc = cxl_find_regblock(pdev, type, map);
349 if (rc)
350 return rc;
351
352 rc = cxl_map_regblock(pdev, map);
353 if (rc)
354 return rc;
355
356 rc = cxl_probe_regs(pdev, map);
357 cxl_unmap_regblock(pdev, map);
358
359 return rc;
360}
361
362static void cxl_pci_destroy_doe(void *mbs)
363{
364 xa_destroy(mbs);
365}
366
367static void devm_cxl_pci_create_doe(struct cxl_dev_state *cxlds)
368{
369 struct device *dev = cxlds->dev;
370 struct pci_dev *pdev = to_pci_dev(dev);
371 u16 off = 0;
372
373 xa_init(&cxlds->doe_mbs);
374 if (devm_add_action(&pdev->dev, cxl_pci_destroy_doe, &cxlds->doe_mbs)) {
375 dev_err(dev, "Failed to create XArray for DOE's\n");
376 return;
377 }
378
379 /*
380 * Mailbox creation is best effort. Higher layers must determine if
381 * the lack of a mailbox for their protocol is a device failure or not.
382 */
383 pci_doe_for_each_off(pdev, off) {
384 struct pci_doe_mb *doe_mb;
385
386 doe_mb = pcim_doe_create_mb(pdev, off);
387 if (IS_ERR(doe_mb)) {
388 dev_err(dev, "Failed to create MB object for MB @ %x\n",
389 off);
390 continue;
391 }
392
393 if (!pci_request_config_region_exclusive(pdev, off,
394 PCI_DOE_CAP_SIZEOF,
395 dev_name(dev)))
396 pci_err(pdev, "Failed to exclude DOE registers\n");
397
398 if (xa_insert(&cxlds->doe_mbs, off, doe_mb, GFP_KERNEL)) {
399 dev_err(dev, "xa_insert failed to insert MB @ %x\n",
400 off);
401 continue;
402 }
403
404 dev_dbg(dev, "Created DOE mailbox @%x\n", off);
405 }
406}
407
408/*
409 * Assume that any RCIEP that emits the CXL memory expander class code
410 * is an RCD
411 */
412static bool is_cxl_restricted(struct pci_dev *pdev)
413{
414 return pci_pcie_type(pdev) == PCI_EXP_TYPE_RC_END;
415}
416
417static void disable_aer(void *pdev)
418{
419 pci_disable_pcie_error_reporting(pdev);
420}
421
422static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
423{
424 struct cxl_register_map map;
425 struct cxl_memdev *cxlmd;
426 struct cxl_dev_state *cxlds;
427 int rc;
428
429 /*
430 * Double check the anonymous union trickery in struct cxl_regs
431 * FIXME switch to struct_group()
432 */
433 BUILD_BUG_ON(offsetof(struct cxl_regs, memdev) !=
434 offsetof(struct cxl_regs, device_regs.memdev));
435
436 rc = pcim_enable_device(pdev);
437 if (rc)
438 return rc;
439
440 cxlds = cxl_dev_state_create(&pdev->dev);
441 if (IS_ERR(cxlds))
442 return PTR_ERR(cxlds);
443 pci_set_drvdata(pdev, cxlds);
444
445 cxlds->rcd = is_cxl_restricted(pdev);
446 cxlds->serial = pci_get_dsn(pdev);
447 cxlds->cxl_dvsec = pci_find_dvsec_capability(
448 pdev, PCI_DVSEC_VENDOR_ID_CXL, CXL_DVSEC_PCIE_DEVICE);
449 if (!cxlds->cxl_dvsec)
450 dev_warn(&pdev->dev,
451 "Device DVSEC not present, skip CXL.mem init\n");
452
453 rc = cxl_setup_regs(pdev, CXL_REGLOC_RBI_MEMDEV, &map);
454 if (rc)
455 return rc;
456
457 rc = cxl_map_device_regs(&pdev->dev, &cxlds->regs.device_regs, &map);
458 if (rc)
459 return rc;
460
461 /*
462 * If the component registers can't be found, the cxl_pci driver may
463 * still be useful for management functions so don't return an error.
464 */
465 cxlds->component_reg_phys = CXL_RESOURCE_NONE;
466 rc = cxl_setup_regs(pdev, CXL_REGLOC_RBI_COMPONENT, &map);
467 if (rc)
468 dev_warn(&pdev->dev, "No component registers (%d)\n", rc);
469
470 cxlds->component_reg_phys = map.resource;
471
472 devm_cxl_pci_create_doe(cxlds);
473
474 rc = cxl_map_component_regs(&pdev->dev, &cxlds->regs.component,
475 &map, BIT(CXL_CM_CAP_CAP_ID_RAS));
476 if (rc)
477 dev_dbg(&pdev->dev, "Failed to map RAS capability.\n");
478
479 rc = cxl_pci_setup_mailbox(cxlds);
480 if (rc)
481 return rc;
482
483 rc = cxl_enumerate_cmds(cxlds);
484 if (rc)
485 return rc;
486
487 rc = cxl_dev_state_identify(cxlds);
488 if (rc)
489 return rc;
490
491 rc = cxl_mem_create_range_info(cxlds);
492 if (rc)
493 return rc;
494
495 cxlmd = devm_cxl_add_memdev(cxlds);
496 if (IS_ERR(cxlmd))
497 return PTR_ERR(cxlmd);
498
499 if (cxlds->regs.ras) {
500 pci_enable_pcie_error_reporting(pdev);
501 rc = devm_add_action_or_reset(&pdev->dev, disable_aer, pdev);
502 if (rc)
503 return rc;
504 }
505 pci_save_state(pdev);
506
507 return rc;
508}
509
510static const struct pci_device_id cxl_mem_pci_tbl[] = {
511 /* PCI class code for CXL.mem Type-3 Devices */
512 { PCI_DEVICE_CLASS((PCI_CLASS_MEMORY_CXL << 8 | CXL_MEMORY_PROGIF), ~0)},
513 { /* terminate list */ },
514};
515MODULE_DEVICE_TABLE(pci, cxl_mem_pci_tbl);
516
517/* CXL spec rev3.0 8.2.4.16.1 */
518static void header_log_copy(struct cxl_dev_state *cxlds, u32 *log)
519{
520 void __iomem *addr;
521 u32 *log_addr;
522 int i, log_u32_size = CXL_HEADERLOG_SIZE / sizeof(u32);
523
524 addr = cxlds->regs.ras + CXL_RAS_HEADER_LOG_OFFSET;
525 log_addr = log;
526
527 for (i = 0; i < log_u32_size; i++) {
528 *log_addr = readl(addr);
529 log_addr++;
530 addr += sizeof(u32);
531 }
532}
533
534/*
535 * Log the state of the RAS status registers and prepare them to log the
536 * next error status. Return 1 if reset needed.
537 */
538static bool cxl_report_and_clear(struct cxl_dev_state *cxlds)
539{
540 struct cxl_memdev *cxlmd = cxlds->cxlmd;
541 struct device *dev = &cxlmd->dev;
542 u32 hl[CXL_HEADERLOG_SIZE_U32];
543 void __iomem *addr;
544 u32 status;
545 u32 fe;
546
547 if (!cxlds->regs.ras)
548 return false;
549
550 addr = cxlds->regs.ras + CXL_RAS_UNCORRECTABLE_STATUS_OFFSET;
551 status = readl(addr);
552 if (!(status & CXL_RAS_UNCORRECTABLE_STATUS_MASK))
553 return false;
554
555 /* If multiple errors, log header points to first error from ctrl reg */
556 if (hweight32(status) > 1) {
557 void __iomem *rcc_addr =
558 cxlds->regs.ras + CXL_RAS_CAP_CONTROL_OFFSET;
559
560 fe = BIT(FIELD_GET(CXL_RAS_CAP_CONTROL_FE_MASK,
561 readl(rcc_addr)));
562 } else {
563 fe = status;
564 }
565
566 header_log_copy(cxlds, hl);
567 trace_cxl_aer_uncorrectable_error(dev, status, fe, hl);
568 writel(status & CXL_RAS_UNCORRECTABLE_STATUS_MASK, addr);
569
570 return true;
571}
572
573static pci_ers_result_t cxl_error_detected(struct pci_dev *pdev,
574 pci_channel_state_t state)
575{
576 struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
577 struct cxl_memdev *cxlmd = cxlds->cxlmd;
578 struct device *dev = &cxlmd->dev;
579 bool ue;
580
581 /*
582 * A frozen channel indicates an impending reset which is fatal to
583 * CXL.mem operation, and will likely crash the system. On the off
584 * chance the situation is recoverable dump the status of the RAS
585 * capability registers and bounce the active state of the memdev.
586 */
587 ue = cxl_report_and_clear(cxlds);
588
589 switch (state) {
590 case pci_channel_io_normal:
591 if (ue) {
592 device_release_driver(dev);
593 return PCI_ERS_RESULT_NEED_RESET;
594 }
595 return PCI_ERS_RESULT_CAN_RECOVER;
596 case pci_channel_io_frozen:
597 dev_warn(&pdev->dev,
598 "%s: frozen state error detected, disable CXL.mem\n",
599 dev_name(dev));
600 device_release_driver(dev);
601 return PCI_ERS_RESULT_NEED_RESET;
602 case pci_channel_io_perm_failure:
603 dev_warn(&pdev->dev,
604 "failure state error detected, request disconnect\n");
605 return PCI_ERS_RESULT_DISCONNECT;
606 }
607 return PCI_ERS_RESULT_NEED_RESET;
608}
609
610static pci_ers_result_t cxl_slot_reset(struct pci_dev *pdev)
611{
612 struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
613 struct cxl_memdev *cxlmd = cxlds->cxlmd;
614 struct device *dev = &cxlmd->dev;
615
616 dev_info(&pdev->dev, "%s: restart CXL.mem after slot reset\n",
617 dev_name(dev));
618 pci_restore_state(pdev);
619 if (device_attach(dev) <= 0)
620 return PCI_ERS_RESULT_DISCONNECT;
621 return PCI_ERS_RESULT_RECOVERED;
622}
623
624static void cxl_error_resume(struct pci_dev *pdev)
625{
626 struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
627 struct cxl_memdev *cxlmd = cxlds->cxlmd;
628 struct device *dev = &cxlmd->dev;
629
630 dev_info(&pdev->dev, "%s: error resume %s\n", dev_name(dev),
631 dev->driver ? "successful" : "failed");
632}
633
634static void cxl_cor_error_detected(struct pci_dev *pdev)
635{
636 struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
637 struct cxl_memdev *cxlmd = cxlds->cxlmd;
638 struct device *dev = &cxlmd->dev;
639 void __iomem *addr;
640 u32 status;
641
642 if (!cxlds->regs.ras)
643 return;
644
645 addr = cxlds->regs.ras + CXL_RAS_CORRECTABLE_STATUS_OFFSET;
646 status = readl(addr);
647 if (status & CXL_RAS_CORRECTABLE_STATUS_MASK) {
648 writel(status & CXL_RAS_CORRECTABLE_STATUS_MASK, addr);
649 trace_cxl_aer_correctable_error(dev, status);
650 }
651}
652
653static const struct pci_error_handlers cxl_error_handlers = {
654 .error_detected = cxl_error_detected,
655 .slot_reset = cxl_slot_reset,
656 .resume = cxl_error_resume,
657 .cor_error_detected = cxl_cor_error_detected,
658};
659
660static struct pci_driver cxl_pci_driver = {
661 .name = KBUILD_MODNAME,
662 .id_table = cxl_mem_pci_tbl,
663 .probe = cxl_pci_probe,
664 .err_handler = &cxl_error_handlers,
665 .driver = {
666 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
667 },
668};
669
670MODULE_LICENSE("GPL v2");
671module_pci_driver(cxl_pci_driver);
672MODULE_IMPORT_NS(CXL);
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright(c) 2020 Intel Corporation. All rights reserved. */
3#include <asm-generic/unaligned.h>
4#include <linux/io-64-nonatomic-lo-hi.h>
5#include <linux/moduleparam.h>
6#include <linux/module.h>
7#include <linux/delay.h>
8#include <linux/sizes.h>
9#include <linux/mutex.h>
10#include <linux/list.h>
11#include <linux/pci.h>
12#include <linux/aer.h>
13#include <linux/io.h>
14#include "cxlmem.h"
15#include "cxlpci.h"
16#include "cxl.h"
17#include "pmu.h"
18
19/**
20 * DOC: cxl pci
21 *
22 * This implements the PCI exclusive functionality for a CXL device as it is
23 * defined by the Compute Express Link specification. CXL devices may surface
24 * certain functionality even if it isn't CXL enabled. While this driver is
25 * focused around the PCI specific aspects of a CXL device, it binds to the
26 * specific CXL memory device class code, and therefore the implementation of
27 * cxl_pci is focused around CXL memory devices.
28 *
29 * The driver has several responsibilities, mainly:
30 * - Create the memX device and register on the CXL bus.
31 * - Enumerate device's register interface and map them.
32 * - Registers nvdimm bridge device with cxl_core.
33 * - Registers a CXL mailbox with cxl_core.
34 */
35
36#define cxl_doorbell_busy(cxlds) \
37 (readl((cxlds)->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET) & \
38 CXLDEV_MBOX_CTRL_DOORBELL)
39
40/* CXL 2.0 - 8.2.8.4 */
41#define CXL_MAILBOX_TIMEOUT_MS (2 * HZ)
42
43/*
44 * CXL 2.0 ECN "Add Mailbox Ready Time" defines a capability field to
45 * dictate how long to wait for the mailbox to become ready. The new
46 * field allows the device to tell software the amount of time to wait
47 * before mailbox ready. This field per the spec theoretically allows
48 * for up to 255 seconds. 255 seconds is unreasonably long, its longer
49 * than the maximum SATA port link recovery wait. Default to 60 seconds
50 * until someone builds a CXL device that needs more time in practice.
51 */
52static unsigned short mbox_ready_timeout = 60;
53module_param(mbox_ready_timeout, ushort, 0644);
54MODULE_PARM_DESC(mbox_ready_timeout, "seconds to wait for mailbox ready");
55
56static int cxl_pci_mbox_wait_for_doorbell(struct cxl_dev_state *cxlds)
57{
58 const unsigned long start = jiffies;
59 unsigned long end = start;
60
61 while (cxl_doorbell_busy(cxlds)) {
62 end = jiffies;
63
64 if (time_after(end, start + CXL_MAILBOX_TIMEOUT_MS)) {
65 /* Check again in case preempted before timeout test */
66 if (!cxl_doorbell_busy(cxlds))
67 break;
68 return -ETIMEDOUT;
69 }
70 cpu_relax();
71 }
72
73 dev_dbg(cxlds->dev, "Doorbell wait took %dms",
74 jiffies_to_msecs(end) - jiffies_to_msecs(start));
75 return 0;
76}
77
78#define cxl_err(dev, status, msg) \
79 dev_err_ratelimited(dev, msg ", device state %s%s\n", \
80 status & CXLMDEV_DEV_FATAL ? " fatal" : "", \
81 status & CXLMDEV_FW_HALT ? " firmware-halt" : "")
82
83#define cxl_cmd_err(dev, cmd, status, msg) \
84 dev_err_ratelimited(dev, msg " (opcode: %#x), device state %s%s\n", \
85 (cmd)->opcode, \
86 status & CXLMDEV_DEV_FATAL ? " fatal" : "", \
87 status & CXLMDEV_FW_HALT ? " firmware-halt" : "")
88
89/*
90 * Threaded irq dev_id's must be globally unique. cxl_dev_id provides a unique
91 * wrapper object for each irq within the same cxlds.
92 */
93struct cxl_dev_id {
94 struct cxl_dev_state *cxlds;
95};
96
97static int cxl_request_irq(struct cxl_dev_state *cxlds, int irq,
98 irq_handler_t thread_fn)
99{
100 struct device *dev = cxlds->dev;
101 struct cxl_dev_id *dev_id;
102
103 dev_id = devm_kzalloc(dev, sizeof(*dev_id), GFP_KERNEL);
104 if (!dev_id)
105 return -ENOMEM;
106 dev_id->cxlds = cxlds;
107
108 return devm_request_threaded_irq(dev, irq, NULL, thread_fn,
109 IRQF_SHARED | IRQF_ONESHOT, NULL,
110 dev_id);
111}
112
113static bool cxl_mbox_background_complete(struct cxl_dev_state *cxlds)
114{
115 u64 reg;
116
117 reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_BG_CMD_STATUS_OFFSET);
118 return FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_PCT_MASK, reg) == 100;
119}
120
121static irqreturn_t cxl_pci_mbox_irq(int irq, void *id)
122{
123 u64 reg;
124 u16 opcode;
125 struct cxl_dev_id *dev_id = id;
126 struct cxl_dev_state *cxlds = dev_id->cxlds;
127 struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
128
129 if (!cxl_mbox_background_complete(cxlds))
130 return IRQ_NONE;
131
132 reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_BG_CMD_STATUS_OFFSET);
133 opcode = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_OPCODE_MASK, reg);
134 if (opcode == CXL_MBOX_OP_SANITIZE) {
135 mutex_lock(&mds->mbox_mutex);
136 if (mds->security.sanitize_node)
137 mod_delayed_work(system_wq, &mds->security.poll_dwork, 0);
138 mutex_unlock(&mds->mbox_mutex);
139 } else {
140 /* short-circuit the wait in __cxl_pci_mbox_send_cmd() */
141 rcuwait_wake_up(&mds->mbox_wait);
142 }
143
144 return IRQ_HANDLED;
145}
146
147/*
148 * Sanitization operation polling mode.
149 */
150static void cxl_mbox_sanitize_work(struct work_struct *work)
151{
152 struct cxl_memdev_state *mds =
153 container_of(work, typeof(*mds), security.poll_dwork.work);
154 struct cxl_dev_state *cxlds = &mds->cxlds;
155
156 mutex_lock(&mds->mbox_mutex);
157 if (cxl_mbox_background_complete(cxlds)) {
158 mds->security.poll_tmo_secs = 0;
159 if (mds->security.sanitize_node)
160 sysfs_notify_dirent(mds->security.sanitize_node);
161 mds->security.sanitize_active = false;
162
163 dev_dbg(cxlds->dev, "Sanitization operation ended\n");
164 } else {
165 int timeout = mds->security.poll_tmo_secs + 10;
166
167 mds->security.poll_tmo_secs = min(15 * 60, timeout);
168 schedule_delayed_work(&mds->security.poll_dwork, timeout * HZ);
169 }
170 mutex_unlock(&mds->mbox_mutex);
171}
172
173/**
174 * __cxl_pci_mbox_send_cmd() - Execute a mailbox command
175 * @mds: The memory device driver data
176 * @mbox_cmd: Command to send to the memory device.
177 *
178 * Context: Any context. Expects mbox_mutex to be held.
179 * Return: -ETIMEDOUT if timeout occurred waiting for completion. 0 on success.
180 * Caller should check the return code in @mbox_cmd to make sure it
181 * succeeded.
182 *
183 * This is a generic form of the CXL mailbox send command thus only using the
184 * registers defined by the mailbox capability ID - CXL 2.0 8.2.8.4. Memory
185 * devices, and perhaps other types of CXL devices may have further information
186 * available upon error conditions. Driver facilities wishing to send mailbox
187 * commands should use the wrapper command.
188 *
189 * The CXL spec allows for up to two mailboxes. The intention is for the primary
190 * mailbox to be OS controlled and the secondary mailbox to be used by system
191 * firmware. This allows the OS and firmware to communicate with the device and
192 * not need to coordinate with each other. The driver only uses the primary
193 * mailbox.
194 */
195static int __cxl_pci_mbox_send_cmd(struct cxl_memdev_state *mds,
196 struct cxl_mbox_cmd *mbox_cmd)
197{
198 struct cxl_dev_state *cxlds = &mds->cxlds;
199 void __iomem *payload = cxlds->regs.mbox + CXLDEV_MBOX_PAYLOAD_OFFSET;
200 struct device *dev = cxlds->dev;
201 u64 cmd_reg, status_reg;
202 size_t out_len;
203 int rc;
204
205 lockdep_assert_held(&mds->mbox_mutex);
206
207 /*
208 * Here are the steps from 8.2.8.4 of the CXL 2.0 spec.
209 * 1. Caller reads MB Control Register to verify doorbell is clear
210 * 2. Caller writes Command Register
211 * 3. Caller writes Command Payload Registers if input payload is non-empty
212 * 4. Caller writes MB Control Register to set doorbell
213 * 5. Caller either polls for doorbell to be clear or waits for interrupt if configured
214 * 6. Caller reads MB Status Register to fetch Return code
215 * 7. If command successful, Caller reads Command Register to get Payload Length
216 * 8. If output payload is non-empty, host reads Command Payload Registers
217 *
218 * Hardware is free to do whatever it wants before the doorbell is rung,
219 * and isn't allowed to change anything after it clears the doorbell. As
220 * such, steps 2 and 3 can happen in any order, and steps 6, 7, 8 can
221 * also happen in any order (though some orders might not make sense).
222 */
223
224 /* #1 */
225 if (cxl_doorbell_busy(cxlds)) {
226 u64 md_status =
227 readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET);
228
229 cxl_cmd_err(cxlds->dev, mbox_cmd, md_status,
230 "mailbox queue busy");
231 return -EBUSY;
232 }
233
234 /*
235 * With sanitize polling, hardware might be done and the poller still
236 * not be in sync. Ensure no new command comes in until so. Keep the
237 * hardware semantics and only allow device health status.
238 */
239 if (mds->security.poll_tmo_secs > 0) {
240 if (mbox_cmd->opcode != CXL_MBOX_OP_GET_HEALTH_INFO)
241 return -EBUSY;
242 }
243
244 cmd_reg = FIELD_PREP(CXLDEV_MBOX_CMD_COMMAND_OPCODE_MASK,
245 mbox_cmd->opcode);
246 if (mbox_cmd->size_in) {
247 if (WARN_ON(!mbox_cmd->payload_in))
248 return -EINVAL;
249
250 cmd_reg |= FIELD_PREP(CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK,
251 mbox_cmd->size_in);
252 memcpy_toio(payload, mbox_cmd->payload_in, mbox_cmd->size_in);
253 }
254
255 /* #2, #3 */
256 writeq(cmd_reg, cxlds->regs.mbox + CXLDEV_MBOX_CMD_OFFSET);
257
258 /* #4 */
259 dev_dbg(dev, "Sending command: 0x%04x\n", mbox_cmd->opcode);
260 writel(CXLDEV_MBOX_CTRL_DOORBELL,
261 cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
262
263 /* #5 */
264 rc = cxl_pci_mbox_wait_for_doorbell(cxlds);
265 if (rc == -ETIMEDOUT) {
266 u64 md_status = readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET);
267
268 cxl_cmd_err(cxlds->dev, mbox_cmd, md_status, "mailbox timeout");
269 return rc;
270 }
271
272 /* #6 */
273 status_reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_STATUS_OFFSET);
274 mbox_cmd->return_code =
275 FIELD_GET(CXLDEV_MBOX_STATUS_RET_CODE_MASK, status_reg);
276
277 /*
278 * Handle the background command in a synchronous manner.
279 *
280 * All other mailbox commands will serialize/queue on the mbox_mutex,
281 * which we currently hold. Furthermore this also guarantees that
282 * cxl_mbox_background_complete() checks are safe amongst each other,
283 * in that no new bg operation can occur in between.
284 *
285 * Background operations are timesliced in accordance with the nature
286 * of the command. In the event of timeout, the mailbox state is
287 * indeterminate until the next successful command submission and the
288 * driver can get back in sync with the hardware state.
289 */
290 if (mbox_cmd->return_code == CXL_MBOX_CMD_RC_BACKGROUND) {
291 u64 bg_status_reg;
292 int i, timeout;
293
294 /*
295 * Sanitization is a special case which monopolizes the device
296 * and cannot be timesliced. Handle asynchronously instead,
297 * and allow userspace to poll(2) for completion.
298 */
299 if (mbox_cmd->opcode == CXL_MBOX_OP_SANITIZE) {
300 if (mds->security.sanitize_active)
301 return -EBUSY;
302
303 /* give first timeout a second */
304 timeout = 1;
305 mds->security.poll_tmo_secs = timeout;
306 mds->security.sanitize_active = true;
307 schedule_delayed_work(&mds->security.poll_dwork,
308 timeout * HZ);
309 dev_dbg(dev, "Sanitization operation started\n");
310 goto success;
311 }
312
313 dev_dbg(dev, "Mailbox background operation (0x%04x) started\n",
314 mbox_cmd->opcode);
315
316 timeout = mbox_cmd->poll_interval_ms;
317 for (i = 0; i < mbox_cmd->poll_count; i++) {
318 if (rcuwait_wait_event_timeout(&mds->mbox_wait,
319 cxl_mbox_background_complete(cxlds),
320 TASK_UNINTERRUPTIBLE,
321 msecs_to_jiffies(timeout)) > 0)
322 break;
323 }
324
325 if (!cxl_mbox_background_complete(cxlds)) {
326 dev_err(dev, "timeout waiting for background (%d ms)\n",
327 timeout * mbox_cmd->poll_count);
328 return -ETIMEDOUT;
329 }
330
331 bg_status_reg = readq(cxlds->regs.mbox +
332 CXLDEV_MBOX_BG_CMD_STATUS_OFFSET);
333 mbox_cmd->return_code =
334 FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_RC_MASK,
335 bg_status_reg);
336 dev_dbg(dev,
337 "Mailbox background operation (0x%04x) completed\n",
338 mbox_cmd->opcode);
339 }
340
341 if (mbox_cmd->return_code != CXL_MBOX_CMD_RC_SUCCESS) {
342 dev_dbg(dev, "Mailbox operation had an error: %s\n",
343 cxl_mbox_cmd_rc2str(mbox_cmd));
344 return 0; /* completed but caller must check return_code */
345 }
346
347success:
348 /* #7 */
349 cmd_reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_CMD_OFFSET);
350 out_len = FIELD_GET(CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK, cmd_reg);
351
352 /* #8 */
353 if (out_len && mbox_cmd->payload_out) {
354 /*
355 * Sanitize the copy. If hardware misbehaves, out_len per the
356 * spec can actually be greater than the max allowed size (21
357 * bits available but spec defined 1M max). The caller also may
358 * have requested less data than the hardware supplied even
359 * within spec.
360 */
361 size_t n;
362
363 n = min3(mbox_cmd->size_out, mds->payload_size, out_len);
364 memcpy_fromio(mbox_cmd->payload_out, payload, n);
365 mbox_cmd->size_out = n;
366 } else {
367 mbox_cmd->size_out = 0;
368 }
369
370 return 0;
371}
372
373static int cxl_pci_mbox_send(struct cxl_memdev_state *mds,
374 struct cxl_mbox_cmd *cmd)
375{
376 int rc;
377
378 mutex_lock_io(&mds->mbox_mutex);
379 rc = __cxl_pci_mbox_send_cmd(mds, cmd);
380 mutex_unlock(&mds->mbox_mutex);
381
382 return rc;
383}
384
385static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds, bool irq_avail)
386{
387 struct cxl_dev_state *cxlds = &mds->cxlds;
388 const int cap = readl(cxlds->regs.mbox + CXLDEV_MBOX_CAPS_OFFSET);
389 struct device *dev = cxlds->dev;
390 unsigned long timeout;
391 int irq, msgnum;
392 u64 md_status;
393 u32 ctrl;
394
395 timeout = jiffies + mbox_ready_timeout * HZ;
396 do {
397 md_status = readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET);
398 if (md_status & CXLMDEV_MBOX_IF_READY)
399 break;
400 if (msleep_interruptible(100))
401 break;
402 } while (!time_after(jiffies, timeout));
403
404 if (!(md_status & CXLMDEV_MBOX_IF_READY)) {
405 cxl_err(dev, md_status, "timeout awaiting mailbox ready");
406 return -ETIMEDOUT;
407 }
408
409 /*
410 * A command may be in flight from a previous driver instance,
411 * think kexec, do one doorbell wait so that
412 * __cxl_pci_mbox_send_cmd() can assume that it is the only
413 * source for future doorbell busy events.
414 */
415 if (cxl_pci_mbox_wait_for_doorbell(cxlds) != 0) {
416 cxl_err(dev, md_status, "timeout awaiting mailbox idle");
417 return -ETIMEDOUT;
418 }
419
420 mds->mbox_send = cxl_pci_mbox_send;
421 mds->payload_size =
422 1 << FIELD_GET(CXLDEV_MBOX_CAP_PAYLOAD_SIZE_MASK, cap);
423
424 /*
425 * CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register
426 *
427 * If the size is too small, mandatory commands will not work and so
428 * there's no point in going forward. If the size is too large, there's
429 * no harm is soft limiting it.
430 */
431 mds->payload_size = min_t(size_t, mds->payload_size, SZ_1M);
432 if (mds->payload_size < 256) {
433 dev_err(dev, "Mailbox is too small (%zub)",
434 mds->payload_size);
435 return -ENXIO;
436 }
437
438 dev_dbg(dev, "Mailbox payload sized %zu", mds->payload_size);
439
440 rcuwait_init(&mds->mbox_wait);
441 INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mbox_sanitize_work);
442
443 /* background command interrupts are optional */
444 if (!(cap & CXLDEV_MBOX_CAP_BG_CMD_IRQ) || !irq_avail)
445 return 0;
446
447 msgnum = FIELD_GET(CXLDEV_MBOX_CAP_IRQ_MSGNUM_MASK, cap);
448 irq = pci_irq_vector(to_pci_dev(cxlds->dev), msgnum);
449 if (irq < 0)
450 return 0;
451
452 if (cxl_request_irq(cxlds, irq, cxl_pci_mbox_irq))
453 return 0;
454
455 dev_dbg(cxlds->dev, "Mailbox interrupts enabled\n");
456 /* enable background command mbox irq support */
457 ctrl = readl(cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
458 ctrl |= CXLDEV_MBOX_CTRL_BG_CMD_IRQ;
459 writel(ctrl, cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
460
461 return 0;
462}
463
464/*
465 * Assume that any RCIEP that emits the CXL memory expander class code
466 * is an RCD
467 */
468static bool is_cxl_restricted(struct pci_dev *pdev)
469{
470 return pci_pcie_type(pdev) == PCI_EXP_TYPE_RC_END;
471}
472
473static int cxl_rcrb_get_comp_regs(struct pci_dev *pdev,
474 struct cxl_register_map *map)
475{
476 struct cxl_port *port;
477 struct cxl_dport *dport;
478 resource_size_t component_reg_phys;
479
480 *map = (struct cxl_register_map) {
481 .host = &pdev->dev,
482 .resource = CXL_RESOURCE_NONE,
483 };
484
485 port = cxl_pci_find_port(pdev, &dport);
486 if (!port)
487 return -EPROBE_DEFER;
488
489 component_reg_phys = cxl_rcd_component_reg_phys(&pdev->dev, dport);
490
491 put_device(&port->dev);
492
493 if (component_reg_phys == CXL_RESOURCE_NONE)
494 return -ENXIO;
495
496 map->resource = component_reg_phys;
497 map->reg_type = CXL_REGLOC_RBI_COMPONENT;
498 map->max_size = CXL_COMPONENT_REG_BLOCK_SIZE;
499
500 return 0;
501}
502
503static int cxl_pci_setup_regs(struct pci_dev *pdev, enum cxl_regloc_type type,
504 struct cxl_register_map *map)
505{
506 int rc;
507
508 rc = cxl_find_regblock(pdev, type, map);
509
510 /*
511 * If the Register Locator DVSEC does not exist, check if it
512 * is an RCH and try to extract the Component Registers from
513 * an RCRB.
514 */
515 if (rc && type == CXL_REGLOC_RBI_COMPONENT && is_cxl_restricted(pdev))
516 rc = cxl_rcrb_get_comp_regs(pdev, map);
517
518 if (rc)
519 return rc;
520
521 return cxl_setup_regs(map);
522}
523
524static int cxl_pci_ras_unmask(struct pci_dev *pdev)
525{
526 struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
527 void __iomem *addr;
528 u32 orig_val, val, mask;
529 u16 cap;
530 int rc;
531
532 if (!cxlds->regs.ras) {
533 dev_dbg(&pdev->dev, "No RAS registers.\n");
534 return 0;
535 }
536
537 /* BIOS has PCIe AER error control */
538 if (!pcie_aer_is_native(pdev))
539 return 0;
540
541 rc = pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &cap);
542 if (rc)
543 return rc;
544
545 if (cap & PCI_EXP_DEVCTL_URRE) {
546 addr = cxlds->regs.ras + CXL_RAS_UNCORRECTABLE_MASK_OFFSET;
547 orig_val = readl(addr);
548
549 mask = CXL_RAS_UNCORRECTABLE_MASK_MASK |
550 CXL_RAS_UNCORRECTABLE_MASK_F256B_MASK;
551 val = orig_val & ~mask;
552 writel(val, addr);
553 dev_dbg(&pdev->dev,
554 "Uncorrectable RAS Errors Mask: %#x -> %#x\n",
555 orig_val, val);
556 }
557
558 if (cap & PCI_EXP_DEVCTL_CERE) {
559 addr = cxlds->regs.ras + CXL_RAS_CORRECTABLE_MASK_OFFSET;
560 orig_val = readl(addr);
561 val = orig_val & ~CXL_RAS_CORRECTABLE_MASK_MASK;
562 writel(val, addr);
563 dev_dbg(&pdev->dev, "Correctable RAS Errors Mask: %#x -> %#x\n",
564 orig_val, val);
565 }
566
567 return 0;
568}
569
570static void free_event_buf(void *buf)
571{
572 kvfree(buf);
573}
574
575/*
576 * There is a single buffer for reading event logs from the mailbox. All logs
577 * share this buffer protected by the mds->event_log_lock.
578 */
579static int cxl_mem_alloc_event_buf(struct cxl_memdev_state *mds)
580{
581 struct cxl_get_event_payload *buf;
582
583 buf = kvmalloc(mds->payload_size, GFP_KERNEL);
584 if (!buf)
585 return -ENOMEM;
586 mds->event.buf = buf;
587
588 return devm_add_action_or_reset(mds->cxlds.dev, free_event_buf, buf);
589}
590
591static bool cxl_alloc_irq_vectors(struct pci_dev *pdev)
592{
593 int nvecs;
594
595 /*
596 * Per CXL 3.0 3.1.1 CXL.io Endpoint a function on a CXL device must
597 * not generate INTx messages if that function participates in
598 * CXL.cache or CXL.mem.
599 *
600 * Additionally pci_alloc_irq_vectors() handles calling
601 * pci_free_irq_vectors() automatically despite not being called
602 * pcim_*. See pci_setup_msi_context().
603 */
604 nvecs = pci_alloc_irq_vectors(pdev, 1, CXL_PCI_DEFAULT_MAX_VECTORS,
605 PCI_IRQ_MSIX | PCI_IRQ_MSI);
606 if (nvecs < 1) {
607 dev_dbg(&pdev->dev, "Failed to alloc irq vectors: %d\n", nvecs);
608 return false;
609 }
610 return true;
611}
612
613static irqreturn_t cxl_event_thread(int irq, void *id)
614{
615 struct cxl_dev_id *dev_id = id;
616 struct cxl_dev_state *cxlds = dev_id->cxlds;
617 struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
618 u32 status;
619
620 do {
621 /*
622 * CXL 3.0 8.2.8.3.1: The lower 32 bits are the status;
623 * ignore the reserved upper 32 bits
624 */
625 status = readl(cxlds->regs.status + CXLDEV_DEV_EVENT_STATUS_OFFSET);
626 /* Ignore logs unknown to the driver */
627 status &= CXLDEV_EVENT_STATUS_ALL;
628 if (!status)
629 break;
630 cxl_mem_get_event_records(mds, status);
631 cond_resched();
632 } while (status);
633
634 return IRQ_HANDLED;
635}
636
637static int cxl_event_req_irq(struct cxl_dev_state *cxlds, u8 setting)
638{
639 struct pci_dev *pdev = to_pci_dev(cxlds->dev);
640 int irq;
641
642 if (FIELD_GET(CXLDEV_EVENT_INT_MODE_MASK, setting) != CXL_INT_MSI_MSIX)
643 return -ENXIO;
644
645 irq = pci_irq_vector(pdev,
646 FIELD_GET(CXLDEV_EVENT_INT_MSGNUM_MASK, setting));
647 if (irq < 0)
648 return irq;
649
650 return cxl_request_irq(cxlds, irq, cxl_event_thread);
651}
652
653static int cxl_event_get_int_policy(struct cxl_memdev_state *mds,
654 struct cxl_event_interrupt_policy *policy)
655{
656 struct cxl_mbox_cmd mbox_cmd = {
657 .opcode = CXL_MBOX_OP_GET_EVT_INT_POLICY,
658 .payload_out = policy,
659 .size_out = sizeof(*policy),
660 };
661 int rc;
662
663 rc = cxl_internal_send_cmd(mds, &mbox_cmd);
664 if (rc < 0)
665 dev_err(mds->cxlds.dev,
666 "Failed to get event interrupt policy : %d", rc);
667
668 return rc;
669}
670
671static int cxl_event_config_msgnums(struct cxl_memdev_state *mds,
672 struct cxl_event_interrupt_policy *policy)
673{
674 struct cxl_mbox_cmd mbox_cmd;
675 int rc;
676
677 *policy = (struct cxl_event_interrupt_policy) {
678 .info_settings = CXL_INT_MSI_MSIX,
679 .warn_settings = CXL_INT_MSI_MSIX,
680 .failure_settings = CXL_INT_MSI_MSIX,
681 .fatal_settings = CXL_INT_MSI_MSIX,
682 };
683
684 mbox_cmd = (struct cxl_mbox_cmd) {
685 .opcode = CXL_MBOX_OP_SET_EVT_INT_POLICY,
686 .payload_in = policy,
687 .size_in = sizeof(*policy),
688 };
689
690 rc = cxl_internal_send_cmd(mds, &mbox_cmd);
691 if (rc < 0) {
692 dev_err(mds->cxlds.dev, "Failed to set event interrupt policy : %d",
693 rc);
694 return rc;
695 }
696
697 /* Retrieve final interrupt settings */
698 return cxl_event_get_int_policy(mds, policy);
699}
700
701static int cxl_event_irqsetup(struct cxl_memdev_state *mds)
702{
703 struct cxl_dev_state *cxlds = &mds->cxlds;
704 struct cxl_event_interrupt_policy policy;
705 int rc;
706
707 rc = cxl_event_config_msgnums(mds, &policy);
708 if (rc)
709 return rc;
710
711 rc = cxl_event_req_irq(cxlds, policy.info_settings);
712 if (rc) {
713 dev_err(cxlds->dev, "Failed to get interrupt for event Info log\n");
714 return rc;
715 }
716
717 rc = cxl_event_req_irq(cxlds, policy.warn_settings);
718 if (rc) {
719 dev_err(cxlds->dev, "Failed to get interrupt for event Warn log\n");
720 return rc;
721 }
722
723 rc = cxl_event_req_irq(cxlds, policy.failure_settings);
724 if (rc) {
725 dev_err(cxlds->dev, "Failed to get interrupt for event Failure log\n");
726 return rc;
727 }
728
729 rc = cxl_event_req_irq(cxlds, policy.fatal_settings);
730 if (rc) {
731 dev_err(cxlds->dev, "Failed to get interrupt for event Fatal log\n");
732 return rc;
733 }
734
735 return 0;
736}
737
738static bool cxl_event_int_is_fw(u8 setting)
739{
740 u8 mode = FIELD_GET(CXLDEV_EVENT_INT_MODE_MASK, setting);
741
742 return mode == CXL_INT_FW;
743}
744
745static int cxl_event_config(struct pci_host_bridge *host_bridge,
746 struct cxl_memdev_state *mds, bool irq_avail)
747{
748 struct cxl_event_interrupt_policy policy;
749 int rc;
750
751 /*
752 * When BIOS maintains CXL error reporting control, it will process
753 * event records. Only one agent can do so.
754 */
755 if (!host_bridge->native_cxl_error)
756 return 0;
757
758 if (!irq_avail) {
759 dev_info(mds->cxlds.dev, "No interrupt support, disable event processing.\n");
760 return 0;
761 }
762
763 rc = cxl_mem_alloc_event_buf(mds);
764 if (rc)
765 return rc;
766
767 rc = cxl_event_get_int_policy(mds, &policy);
768 if (rc)
769 return rc;
770
771 if (cxl_event_int_is_fw(policy.info_settings) ||
772 cxl_event_int_is_fw(policy.warn_settings) ||
773 cxl_event_int_is_fw(policy.failure_settings) ||
774 cxl_event_int_is_fw(policy.fatal_settings)) {
775 dev_err(mds->cxlds.dev,
776 "FW still in control of Event Logs despite _OSC settings\n");
777 return -EBUSY;
778 }
779
780 rc = cxl_event_irqsetup(mds);
781 if (rc)
782 return rc;
783
784 cxl_mem_get_event_records(mds, CXLDEV_EVENT_STATUS_ALL);
785
786 return 0;
787}
788
789static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
790{
791 struct pci_host_bridge *host_bridge = pci_find_host_bridge(pdev->bus);
792 struct cxl_memdev_state *mds;
793 struct cxl_dev_state *cxlds;
794 struct cxl_register_map map;
795 struct cxl_memdev *cxlmd;
796 int i, rc, pmu_count;
797 bool irq_avail;
798
799 /*
800 * Double check the anonymous union trickery in struct cxl_regs
801 * FIXME switch to struct_group()
802 */
803 BUILD_BUG_ON(offsetof(struct cxl_regs, memdev) !=
804 offsetof(struct cxl_regs, device_regs.memdev));
805
806 rc = pcim_enable_device(pdev);
807 if (rc)
808 return rc;
809 pci_set_master(pdev);
810
811 mds = cxl_memdev_state_create(&pdev->dev);
812 if (IS_ERR(mds))
813 return PTR_ERR(mds);
814 cxlds = &mds->cxlds;
815 pci_set_drvdata(pdev, cxlds);
816
817 cxlds->rcd = is_cxl_restricted(pdev);
818 cxlds->serial = pci_get_dsn(pdev);
819 cxlds->cxl_dvsec = pci_find_dvsec_capability(
820 pdev, PCI_DVSEC_VENDOR_ID_CXL, CXL_DVSEC_PCIE_DEVICE);
821 if (!cxlds->cxl_dvsec)
822 dev_warn(&pdev->dev,
823 "Device DVSEC not present, skip CXL.mem init\n");
824
825 rc = cxl_pci_setup_regs(pdev, CXL_REGLOC_RBI_MEMDEV, &map);
826 if (rc)
827 return rc;
828
829 rc = cxl_map_device_regs(&map, &cxlds->regs.device_regs);
830 if (rc)
831 return rc;
832
833 /*
834 * If the component registers can't be found, the cxl_pci driver may
835 * still be useful for management functions so don't return an error.
836 */
837 rc = cxl_pci_setup_regs(pdev, CXL_REGLOC_RBI_COMPONENT,
838 &cxlds->reg_map);
839 if (rc)
840 dev_warn(&pdev->dev, "No component registers (%d)\n", rc);
841 else if (!cxlds->reg_map.component_map.ras.valid)
842 dev_dbg(&pdev->dev, "RAS registers not found\n");
843
844 rc = cxl_map_component_regs(&cxlds->reg_map, &cxlds->regs.component,
845 BIT(CXL_CM_CAP_CAP_ID_RAS));
846 if (rc)
847 dev_dbg(&pdev->dev, "Failed to map RAS capability.\n");
848
849 rc = cxl_await_media_ready(cxlds);
850 if (rc == 0)
851 cxlds->media_ready = true;
852 else
853 dev_warn(&pdev->dev, "Media not active (%d)\n", rc);
854
855 irq_avail = cxl_alloc_irq_vectors(pdev);
856
857 rc = cxl_pci_setup_mailbox(mds, irq_avail);
858 if (rc)
859 return rc;
860
861 rc = cxl_enumerate_cmds(mds);
862 if (rc)
863 return rc;
864
865 rc = cxl_set_timestamp(mds);
866 if (rc)
867 return rc;
868
869 rc = cxl_poison_state_init(mds);
870 if (rc)
871 return rc;
872
873 rc = cxl_dev_state_identify(mds);
874 if (rc)
875 return rc;
876
877 rc = cxl_mem_create_range_info(mds);
878 if (rc)
879 return rc;
880
881 cxlmd = devm_cxl_add_memdev(&pdev->dev, cxlds);
882 if (IS_ERR(cxlmd))
883 return PTR_ERR(cxlmd);
884
885 rc = devm_cxl_setup_fw_upload(&pdev->dev, mds);
886 if (rc)
887 return rc;
888
889 rc = devm_cxl_sanitize_setup_notifier(&pdev->dev, cxlmd);
890 if (rc)
891 return rc;
892
893 pmu_count = cxl_count_regblock(pdev, CXL_REGLOC_RBI_PMU);
894 for (i = 0; i < pmu_count; i++) {
895 struct cxl_pmu_regs pmu_regs;
896
897 rc = cxl_find_regblock_instance(pdev, CXL_REGLOC_RBI_PMU, &map, i);
898 if (rc) {
899 dev_dbg(&pdev->dev, "Could not find PMU regblock\n");
900 break;
901 }
902
903 rc = cxl_map_pmu_regs(&map, &pmu_regs);
904 if (rc) {
905 dev_dbg(&pdev->dev, "Could not map PMU regs\n");
906 break;
907 }
908
909 rc = devm_cxl_pmu_add(cxlds->dev, &pmu_regs, cxlmd->id, i, CXL_PMU_MEMDEV);
910 if (rc) {
911 dev_dbg(&pdev->dev, "Could not add PMU instance\n");
912 break;
913 }
914 }
915
916 rc = cxl_event_config(host_bridge, mds, irq_avail);
917 if (rc)
918 return rc;
919
920 rc = cxl_pci_ras_unmask(pdev);
921 if (rc)
922 dev_dbg(&pdev->dev, "No RAS reporting unmasked\n");
923
924 pci_save_state(pdev);
925
926 return rc;
927}
928
929static const struct pci_device_id cxl_mem_pci_tbl[] = {
930 /* PCI class code for CXL.mem Type-3 Devices */
931 { PCI_DEVICE_CLASS((PCI_CLASS_MEMORY_CXL << 8 | CXL_MEMORY_PROGIF), ~0)},
932 { /* terminate list */ },
933};
934MODULE_DEVICE_TABLE(pci, cxl_mem_pci_tbl);
935
936static pci_ers_result_t cxl_slot_reset(struct pci_dev *pdev)
937{
938 struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
939 struct cxl_memdev *cxlmd = cxlds->cxlmd;
940 struct device *dev = &cxlmd->dev;
941
942 dev_info(&pdev->dev, "%s: restart CXL.mem after slot reset\n",
943 dev_name(dev));
944 pci_restore_state(pdev);
945 if (device_attach(dev) <= 0)
946 return PCI_ERS_RESULT_DISCONNECT;
947 return PCI_ERS_RESULT_RECOVERED;
948}
949
950static void cxl_error_resume(struct pci_dev *pdev)
951{
952 struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
953 struct cxl_memdev *cxlmd = cxlds->cxlmd;
954 struct device *dev = &cxlmd->dev;
955
956 dev_info(&pdev->dev, "%s: error resume %s\n", dev_name(dev),
957 dev->driver ? "successful" : "failed");
958}
959
960static const struct pci_error_handlers cxl_error_handlers = {
961 .error_detected = cxl_error_detected,
962 .slot_reset = cxl_slot_reset,
963 .resume = cxl_error_resume,
964 .cor_error_detected = cxl_cor_error_detected,
965};
966
967static struct pci_driver cxl_pci_driver = {
968 .name = KBUILD_MODNAME,
969 .id_table = cxl_mem_pci_tbl,
970 .probe = cxl_pci_probe,
971 .err_handler = &cxl_error_handlers,
972 .driver = {
973 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
974 },
975};
976
977module_pci_driver(cxl_pci_driver);
978MODULE_LICENSE("GPL v2");
979MODULE_IMPORT_NS(CXL);