Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.9.
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* Copyright(c) 2020 Intel Corporation. All rights reserved. */
  3#include <linux/io-64-nonatomic-lo-hi.h>
  4#include <linux/moduleparam.h>
  5#include <linux/module.h>
  6#include <linux/delay.h>
  7#include <linux/sizes.h>
  8#include <linux/mutex.h>
  9#include <linux/list.h>
 10#include <linux/pci.h>
 11#include <linux/pci-doe.h>
 12#include <linux/aer.h>
 13#include <linux/io.h>
 14#include "cxlmem.h"
 15#include "cxlpci.h"
 16#include "cxl.h"
 17#define CREATE_TRACE_POINTS
 18#include <trace/events/cxl.h>
 19
 20/**
 21 * DOC: cxl pci
 22 *
 23 * This implements the PCI exclusive functionality for a CXL device as it is
 24 * defined by the Compute Express Link specification. CXL devices may surface
 25 * certain functionality even if it isn't CXL enabled. While this driver is
 26 * focused around the PCI specific aspects of a CXL device, it binds to the
 27 * specific CXL memory device class code, and therefore the implementation of
 28 * cxl_pci is focused around CXL memory devices.
 29 *
 30 * The driver has several responsibilities, mainly:
 31 *  - Create the memX device and register on the CXL bus.
 32 *  - Enumerate device's register interface and map them.
 33 *  - Registers nvdimm bridge device with cxl_core.
 34 *  - Registers a CXL mailbox with cxl_core.
 35 */
 36
 37#define cxl_doorbell_busy(cxlds)                                                \
 38	(readl((cxlds)->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET) &                  \
 39	 CXLDEV_MBOX_CTRL_DOORBELL)
 40
 41/* CXL 2.0 - 8.2.8.4 */
 42#define CXL_MAILBOX_TIMEOUT_MS (2 * HZ)
 43
 44/*
 45 * CXL 2.0 ECN "Add Mailbox Ready Time" defines a capability field to
 46 * dictate how long to wait for the mailbox to become ready. The new
 47 * field allows the device to tell software the amount of time to wait
 48 * before mailbox ready. This field per the spec theoretically allows
 49 * for up to 255 seconds. 255 seconds is unreasonably long, its longer
 50 * than the maximum SATA port link recovery wait. Default to 60 seconds
 51 * until someone builds a CXL device that needs more time in practice.
 52 */
 53static unsigned short mbox_ready_timeout = 60;
 54module_param(mbox_ready_timeout, ushort, 0644);
 55MODULE_PARM_DESC(mbox_ready_timeout, "seconds to wait for mailbox ready");
 56
 57static int cxl_pci_mbox_wait_for_doorbell(struct cxl_dev_state *cxlds)
 58{
 59	const unsigned long start = jiffies;
 60	unsigned long end = start;
 61
 62	while (cxl_doorbell_busy(cxlds)) {
 63		end = jiffies;
 64
 65		if (time_after(end, start + CXL_MAILBOX_TIMEOUT_MS)) {
 66			/* Check again in case preempted before timeout test */
 67			if (!cxl_doorbell_busy(cxlds))
 68				break;
 69			return -ETIMEDOUT;
 70		}
 71		cpu_relax();
 72	}
 73
 74	dev_dbg(cxlds->dev, "Doorbell wait took %dms",
 75		jiffies_to_msecs(end) - jiffies_to_msecs(start));
 76	return 0;
 77}
 78
 79#define cxl_err(dev, status, msg)                                        \
 80	dev_err_ratelimited(dev, msg ", device state %s%s\n",                  \
 81			    status & CXLMDEV_DEV_FATAL ? " fatal" : "",        \
 82			    status & CXLMDEV_FW_HALT ? " firmware-halt" : "")
 83
 84#define cxl_cmd_err(dev, cmd, status, msg)                               \
 85	dev_err_ratelimited(dev, msg " (opcode: %#x), device state %s%s\n",    \
 86			    (cmd)->opcode,                                     \
 87			    status & CXLMDEV_DEV_FATAL ? " fatal" : "",        \
 88			    status & CXLMDEV_FW_HALT ? " firmware-halt" : "")
 89
 90/**
 91 * __cxl_pci_mbox_send_cmd() - Execute a mailbox command
 92 * @cxlds: The device state to communicate with.
 93 * @mbox_cmd: Command to send to the memory device.
 94 *
 95 * Context: Any context. Expects mbox_mutex to be held.
 96 * Return: -ETIMEDOUT if timeout occurred waiting for completion. 0 on success.
 97 *         Caller should check the return code in @mbox_cmd to make sure it
 98 *         succeeded.
 99 *
100 * This is a generic form of the CXL mailbox send command thus only using the
101 * registers defined by the mailbox capability ID - CXL 2.0 8.2.8.4. Memory
102 * devices, and perhaps other types of CXL devices may have further information
103 * available upon error conditions. Driver facilities wishing to send mailbox
104 * commands should use the wrapper command.
105 *
106 * The CXL spec allows for up to two mailboxes. The intention is for the primary
107 * mailbox to be OS controlled and the secondary mailbox to be used by system
108 * firmware. This allows the OS and firmware to communicate with the device and
109 * not need to coordinate with each other. The driver only uses the primary
110 * mailbox.
111 */
112static int __cxl_pci_mbox_send_cmd(struct cxl_dev_state *cxlds,
113				   struct cxl_mbox_cmd *mbox_cmd)
114{
115	void __iomem *payload = cxlds->regs.mbox + CXLDEV_MBOX_PAYLOAD_OFFSET;
116	struct device *dev = cxlds->dev;
117	u64 cmd_reg, status_reg;
118	size_t out_len;
119	int rc;
120
121	lockdep_assert_held(&cxlds->mbox_mutex);
122
123	/*
124	 * Here are the steps from 8.2.8.4 of the CXL 2.0 spec.
125	 *   1. Caller reads MB Control Register to verify doorbell is clear
126	 *   2. Caller writes Command Register
127	 *   3. Caller writes Command Payload Registers if input payload is non-empty
128	 *   4. Caller writes MB Control Register to set doorbell
129	 *   5. Caller either polls for doorbell to be clear or waits for interrupt if configured
130	 *   6. Caller reads MB Status Register to fetch Return code
131	 *   7. If command successful, Caller reads Command Register to get Payload Length
132	 *   8. If output payload is non-empty, host reads Command Payload Registers
133	 *
134	 * Hardware is free to do whatever it wants before the doorbell is rung,
135	 * and isn't allowed to change anything after it clears the doorbell. As
136	 * such, steps 2 and 3 can happen in any order, and steps 6, 7, 8 can
137	 * also happen in any order (though some orders might not make sense).
138	 */
139
140	/* #1 */
141	if (cxl_doorbell_busy(cxlds)) {
142		u64 md_status =
143			readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET);
144
145		cxl_cmd_err(cxlds->dev, mbox_cmd, md_status,
146			    "mailbox queue busy");
147		return -EBUSY;
148	}
149
150	cmd_reg = FIELD_PREP(CXLDEV_MBOX_CMD_COMMAND_OPCODE_MASK,
151			     mbox_cmd->opcode);
152	if (mbox_cmd->size_in) {
153		if (WARN_ON(!mbox_cmd->payload_in))
154			return -EINVAL;
155
156		cmd_reg |= FIELD_PREP(CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK,
157				      mbox_cmd->size_in);
158		memcpy_toio(payload, mbox_cmd->payload_in, mbox_cmd->size_in);
159	}
160
161	/* #2, #3 */
162	writeq(cmd_reg, cxlds->regs.mbox + CXLDEV_MBOX_CMD_OFFSET);
163
164	/* #4 */
165	dev_dbg(dev, "Sending command\n");
166	writel(CXLDEV_MBOX_CTRL_DOORBELL,
167	       cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
168
169	/* #5 */
170	rc = cxl_pci_mbox_wait_for_doorbell(cxlds);
171	if (rc == -ETIMEDOUT) {
172		u64 md_status = readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET);
173
174		cxl_cmd_err(cxlds->dev, mbox_cmd, md_status, "mailbox timeout");
175		return rc;
176	}
177
178	/* #6 */
179	status_reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_STATUS_OFFSET);
180	mbox_cmd->return_code =
181		FIELD_GET(CXLDEV_MBOX_STATUS_RET_CODE_MASK, status_reg);
182
183	if (mbox_cmd->return_code != CXL_MBOX_CMD_RC_SUCCESS) {
184		dev_dbg(dev, "Mailbox operation had an error: %s\n",
185			cxl_mbox_cmd_rc2str(mbox_cmd));
186		return 0; /* completed but caller must check return_code */
187	}
188
189	/* #7 */
190	cmd_reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_CMD_OFFSET);
191	out_len = FIELD_GET(CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK, cmd_reg);
192
193	/* #8 */
194	if (out_len && mbox_cmd->payload_out) {
195		/*
196		 * Sanitize the copy. If hardware misbehaves, out_len per the
197		 * spec can actually be greater than the max allowed size (21
198		 * bits available but spec defined 1M max). The caller also may
199		 * have requested less data than the hardware supplied even
200		 * within spec.
201		 */
202		size_t n = min3(mbox_cmd->size_out, cxlds->payload_size, out_len);
203
204		memcpy_fromio(mbox_cmd->payload_out, payload, n);
205		mbox_cmd->size_out = n;
206	} else {
207		mbox_cmd->size_out = 0;
208	}
209
210	return 0;
211}
212
213static int cxl_pci_mbox_send(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
214{
215	int rc;
216
217	mutex_lock_io(&cxlds->mbox_mutex);
218	rc = __cxl_pci_mbox_send_cmd(cxlds, cmd);
219	mutex_unlock(&cxlds->mbox_mutex);
220
221	return rc;
222}
223
224static int cxl_pci_setup_mailbox(struct cxl_dev_state *cxlds)
225{
226	const int cap = readl(cxlds->regs.mbox + CXLDEV_MBOX_CAPS_OFFSET);
227	unsigned long timeout;
228	u64 md_status;
229
230	timeout = jiffies + mbox_ready_timeout * HZ;
231	do {
232		md_status = readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET);
233		if (md_status & CXLMDEV_MBOX_IF_READY)
234			break;
235		if (msleep_interruptible(100))
236			break;
237	} while (!time_after(jiffies, timeout));
238
239	if (!(md_status & CXLMDEV_MBOX_IF_READY)) {
240		cxl_err(cxlds->dev, md_status,
241			"timeout awaiting mailbox ready");
242		return -ETIMEDOUT;
243	}
244
245	/*
246	 * A command may be in flight from a previous driver instance,
247	 * think kexec, do one doorbell wait so that
248	 * __cxl_pci_mbox_send_cmd() can assume that it is the only
249	 * source for future doorbell busy events.
250	 */
251	if (cxl_pci_mbox_wait_for_doorbell(cxlds) != 0) {
252		cxl_err(cxlds->dev, md_status, "timeout awaiting mailbox idle");
253		return -ETIMEDOUT;
254	}
255
256	cxlds->mbox_send = cxl_pci_mbox_send;
257	cxlds->payload_size =
258		1 << FIELD_GET(CXLDEV_MBOX_CAP_PAYLOAD_SIZE_MASK, cap);
259
260	/*
261	 * CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register
262	 *
263	 * If the size is too small, mandatory commands will not work and so
264	 * there's no point in going forward. If the size is too large, there's
265	 * no harm is soft limiting it.
266	 */
267	cxlds->payload_size = min_t(size_t, cxlds->payload_size, SZ_1M);
268	if (cxlds->payload_size < 256) {
269		dev_err(cxlds->dev, "Mailbox is too small (%zub)",
270			cxlds->payload_size);
271		return -ENXIO;
272	}
273
274	dev_dbg(cxlds->dev, "Mailbox payload sized %zu",
275		cxlds->payload_size);
276
277	return 0;
278}
279
280static int cxl_map_regblock(struct pci_dev *pdev, struct cxl_register_map *map)
281{
282	struct device *dev = &pdev->dev;
283
284	map->base = ioremap(map->resource, map->max_size);
285	if (!map->base) {
286		dev_err(dev, "failed to map registers\n");
287		return -ENOMEM;
288	}
289
290	dev_dbg(dev, "Mapped CXL Memory Device resource %pa\n", &map->resource);
291	return 0;
292}
293
294static void cxl_unmap_regblock(struct pci_dev *pdev,
295			       struct cxl_register_map *map)
296{
297	iounmap(map->base);
298	map->base = NULL;
299}
300
301static int cxl_probe_regs(struct pci_dev *pdev, struct cxl_register_map *map)
302{
303	struct cxl_component_reg_map *comp_map;
304	struct cxl_device_reg_map *dev_map;
305	struct device *dev = &pdev->dev;
306	void __iomem *base = map->base;
307
308	switch (map->reg_type) {
309	case CXL_REGLOC_RBI_COMPONENT:
310		comp_map = &map->component_map;
311		cxl_probe_component_regs(dev, base, comp_map);
312		if (!comp_map->hdm_decoder.valid) {
313			dev_err(dev, "HDM decoder registers not found\n");
314			return -ENXIO;
315		}
316
317		if (!comp_map->ras.valid)
318			dev_dbg(dev, "RAS registers not found\n");
319
320		dev_dbg(dev, "Set up component registers\n");
321		break;
322	case CXL_REGLOC_RBI_MEMDEV:
323		dev_map = &map->device_map;
324		cxl_probe_device_regs(dev, base, dev_map);
325		if (!dev_map->status.valid || !dev_map->mbox.valid ||
326		    !dev_map->memdev.valid) {
327			dev_err(dev, "registers not found: %s%s%s\n",
328				!dev_map->status.valid ? "status " : "",
329				!dev_map->mbox.valid ? "mbox " : "",
330				!dev_map->memdev.valid ? "memdev " : "");
331			return -ENXIO;
332		}
333
334		dev_dbg(dev, "Probing device registers...\n");
335		break;
336	default:
337		break;
338	}
339
340	return 0;
341}
342
343static int cxl_setup_regs(struct pci_dev *pdev, enum cxl_regloc_type type,
344			  struct cxl_register_map *map)
345{
346	int rc;
347
348	rc = cxl_find_regblock(pdev, type, map);
349	if (rc)
350		return rc;
351
352	rc = cxl_map_regblock(pdev, map);
353	if (rc)
354		return rc;
355
356	rc = cxl_probe_regs(pdev, map);
357	cxl_unmap_regblock(pdev, map);
358
359	return rc;
360}
361
362static void cxl_pci_destroy_doe(void *mbs)
363{
364	xa_destroy(mbs);
365}
366
367static void devm_cxl_pci_create_doe(struct cxl_dev_state *cxlds)
368{
369	struct device *dev = cxlds->dev;
370	struct pci_dev *pdev = to_pci_dev(dev);
371	u16 off = 0;
372
373	xa_init(&cxlds->doe_mbs);
374	if (devm_add_action(&pdev->dev, cxl_pci_destroy_doe, &cxlds->doe_mbs)) {
375		dev_err(dev, "Failed to create XArray for DOE's\n");
376		return;
377	}
378
379	/*
380	 * Mailbox creation is best effort.  Higher layers must determine if
381	 * the lack of a mailbox for their protocol is a device failure or not.
382	 */
383	pci_doe_for_each_off(pdev, off) {
384		struct pci_doe_mb *doe_mb;
385
386		doe_mb = pcim_doe_create_mb(pdev, off);
387		if (IS_ERR(doe_mb)) {
388			dev_err(dev, "Failed to create MB object for MB @ %x\n",
389				off);
390			continue;
391		}
392
393		if (!pci_request_config_region_exclusive(pdev, off,
394							 PCI_DOE_CAP_SIZEOF,
395							 dev_name(dev)))
396			pci_err(pdev, "Failed to exclude DOE registers\n");
397
398		if (xa_insert(&cxlds->doe_mbs, off, doe_mb, GFP_KERNEL)) {
399			dev_err(dev, "xa_insert failed to insert MB @ %x\n",
400				off);
401			continue;
402		}
403
404		dev_dbg(dev, "Created DOE mailbox @%x\n", off);
405	}
406}
407
408/*
409 * Assume that any RCIEP that emits the CXL memory expander class code
410 * is an RCD
411 */
412static bool is_cxl_restricted(struct pci_dev *pdev)
413{
414	return pci_pcie_type(pdev) == PCI_EXP_TYPE_RC_END;
415}
416
417static void disable_aer(void *pdev)
418{
419	pci_disable_pcie_error_reporting(pdev);
420}
421
422static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
423{
424	struct cxl_register_map map;
425	struct cxl_memdev *cxlmd;
426	struct cxl_dev_state *cxlds;
427	int rc;
428
429	/*
430	 * Double check the anonymous union trickery in struct cxl_regs
431	 * FIXME switch to struct_group()
432	 */
433	BUILD_BUG_ON(offsetof(struct cxl_regs, memdev) !=
434		     offsetof(struct cxl_regs, device_regs.memdev));
435
436	rc = pcim_enable_device(pdev);
437	if (rc)
438		return rc;
439
440	cxlds = cxl_dev_state_create(&pdev->dev);
441	if (IS_ERR(cxlds))
442		return PTR_ERR(cxlds);
443	pci_set_drvdata(pdev, cxlds);
444
445	cxlds->rcd = is_cxl_restricted(pdev);
446	cxlds->serial = pci_get_dsn(pdev);
447	cxlds->cxl_dvsec = pci_find_dvsec_capability(
448		pdev, PCI_DVSEC_VENDOR_ID_CXL, CXL_DVSEC_PCIE_DEVICE);
449	if (!cxlds->cxl_dvsec)
450		dev_warn(&pdev->dev,
451			 "Device DVSEC not present, skip CXL.mem init\n");
452
453	rc = cxl_setup_regs(pdev, CXL_REGLOC_RBI_MEMDEV, &map);
454	if (rc)
455		return rc;
456
457	rc = cxl_map_device_regs(&pdev->dev, &cxlds->regs.device_regs, &map);
458	if (rc)
459		return rc;
460
461	/*
462	 * If the component registers can't be found, the cxl_pci driver may
463	 * still be useful for management functions so don't return an error.
464	 */
465	cxlds->component_reg_phys = CXL_RESOURCE_NONE;
466	rc = cxl_setup_regs(pdev, CXL_REGLOC_RBI_COMPONENT, &map);
467	if (rc)
468		dev_warn(&pdev->dev, "No component registers (%d)\n", rc);
469
470	cxlds->component_reg_phys = map.resource;
471
472	devm_cxl_pci_create_doe(cxlds);
473
474	rc = cxl_map_component_regs(&pdev->dev, &cxlds->regs.component,
475				    &map, BIT(CXL_CM_CAP_CAP_ID_RAS));
476	if (rc)
477		dev_dbg(&pdev->dev, "Failed to map RAS capability.\n");
478
479	rc = cxl_pci_setup_mailbox(cxlds);
480	if (rc)
481		return rc;
482
483	rc = cxl_enumerate_cmds(cxlds);
484	if (rc)
485		return rc;
486
487	rc = cxl_dev_state_identify(cxlds);
488	if (rc)
489		return rc;
490
491	rc = cxl_mem_create_range_info(cxlds);
492	if (rc)
493		return rc;
494
495	cxlmd = devm_cxl_add_memdev(cxlds);
496	if (IS_ERR(cxlmd))
497		return PTR_ERR(cxlmd);
498
499	if (cxlds->regs.ras) {
500		pci_enable_pcie_error_reporting(pdev);
501		rc = devm_add_action_or_reset(&pdev->dev, disable_aer, pdev);
502		if (rc)
503			return rc;
504	}
505	pci_save_state(pdev);
506
507	return rc;
508}
509
510static const struct pci_device_id cxl_mem_pci_tbl[] = {
511	/* PCI class code for CXL.mem Type-3 Devices */
512	{ PCI_DEVICE_CLASS((PCI_CLASS_MEMORY_CXL << 8 | CXL_MEMORY_PROGIF), ~0)},
513	{ /* terminate list */ },
514};
515MODULE_DEVICE_TABLE(pci, cxl_mem_pci_tbl);
516
517/* CXL spec rev3.0 8.2.4.16.1 */
518static void header_log_copy(struct cxl_dev_state *cxlds, u32 *log)
519{
520	void __iomem *addr;
521	u32 *log_addr;
522	int i, log_u32_size = CXL_HEADERLOG_SIZE / sizeof(u32);
523
524	addr = cxlds->regs.ras + CXL_RAS_HEADER_LOG_OFFSET;
525	log_addr = log;
526
527	for (i = 0; i < log_u32_size; i++) {
528		*log_addr = readl(addr);
529		log_addr++;
530		addr += sizeof(u32);
531	}
532}
533
534/*
535 * Log the state of the RAS status registers and prepare them to log the
536 * next error status. Return 1 if reset needed.
537 */
538static bool cxl_report_and_clear(struct cxl_dev_state *cxlds)
539{
540	struct cxl_memdev *cxlmd = cxlds->cxlmd;
541	struct device *dev = &cxlmd->dev;
542	u32 hl[CXL_HEADERLOG_SIZE_U32];
543	void __iomem *addr;
544	u32 status;
545	u32 fe;
546
547	if (!cxlds->regs.ras)
548		return false;
549
550	addr = cxlds->regs.ras + CXL_RAS_UNCORRECTABLE_STATUS_OFFSET;
551	status = readl(addr);
552	if (!(status & CXL_RAS_UNCORRECTABLE_STATUS_MASK))
553		return false;
554
555	/* If multiple errors, log header points to first error from ctrl reg */
556	if (hweight32(status) > 1) {
557		void __iomem *rcc_addr =
558			cxlds->regs.ras + CXL_RAS_CAP_CONTROL_OFFSET;
559
560		fe = BIT(FIELD_GET(CXL_RAS_CAP_CONTROL_FE_MASK,
561				   readl(rcc_addr)));
562	} else {
563		fe = status;
564	}
565
566	header_log_copy(cxlds, hl);
567	trace_cxl_aer_uncorrectable_error(dev, status, fe, hl);
568	writel(status & CXL_RAS_UNCORRECTABLE_STATUS_MASK, addr);
569
570	return true;
571}
572
573static pci_ers_result_t cxl_error_detected(struct pci_dev *pdev,
574					   pci_channel_state_t state)
575{
576	struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
577	struct cxl_memdev *cxlmd = cxlds->cxlmd;
578	struct device *dev = &cxlmd->dev;
579	bool ue;
580
581	/*
582	 * A frozen channel indicates an impending reset which is fatal to
583	 * CXL.mem operation, and will likely crash the system. On the off
584	 * chance the situation is recoverable dump the status of the RAS
585	 * capability registers and bounce the active state of the memdev.
586	 */
587	ue = cxl_report_and_clear(cxlds);
588
589	switch (state) {
590	case pci_channel_io_normal:
591		if (ue) {
592			device_release_driver(dev);
593			return PCI_ERS_RESULT_NEED_RESET;
594		}
595		return PCI_ERS_RESULT_CAN_RECOVER;
596	case pci_channel_io_frozen:
597		dev_warn(&pdev->dev,
598			 "%s: frozen state error detected, disable CXL.mem\n",
599			 dev_name(dev));
600		device_release_driver(dev);
601		return PCI_ERS_RESULT_NEED_RESET;
602	case pci_channel_io_perm_failure:
603		dev_warn(&pdev->dev,
604			 "failure state error detected, request disconnect\n");
605		return PCI_ERS_RESULT_DISCONNECT;
606	}
607	return PCI_ERS_RESULT_NEED_RESET;
608}
609
610static pci_ers_result_t cxl_slot_reset(struct pci_dev *pdev)
611{
612	struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
613	struct cxl_memdev *cxlmd = cxlds->cxlmd;
614	struct device *dev = &cxlmd->dev;
615
616	dev_info(&pdev->dev, "%s: restart CXL.mem after slot reset\n",
617		 dev_name(dev));
618	pci_restore_state(pdev);
619	if (device_attach(dev) <= 0)
620		return PCI_ERS_RESULT_DISCONNECT;
621	return PCI_ERS_RESULT_RECOVERED;
622}
623
624static void cxl_error_resume(struct pci_dev *pdev)
625{
626	struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
627	struct cxl_memdev *cxlmd = cxlds->cxlmd;
628	struct device *dev = &cxlmd->dev;
629
630	dev_info(&pdev->dev, "%s: error resume %s\n", dev_name(dev),
631		 dev->driver ? "successful" : "failed");
632}
633
634static void cxl_cor_error_detected(struct pci_dev *pdev)
635{
636	struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
637	struct cxl_memdev *cxlmd = cxlds->cxlmd;
638	struct device *dev = &cxlmd->dev;
639	void __iomem *addr;
640	u32 status;
641
642	if (!cxlds->regs.ras)
643		return;
644
645	addr = cxlds->regs.ras + CXL_RAS_CORRECTABLE_STATUS_OFFSET;
646	status = readl(addr);
647	if (status & CXL_RAS_CORRECTABLE_STATUS_MASK) {
648		writel(status & CXL_RAS_CORRECTABLE_STATUS_MASK, addr);
649		trace_cxl_aer_correctable_error(dev, status);
650	}
651}
652
653static const struct pci_error_handlers cxl_error_handlers = {
654	.error_detected	= cxl_error_detected,
655	.slot_reset	= cxl_slot_reset,
656	.resume		= cxl_error_resume,
657	.cor_error_detected	= cxl_cor_error_detected,
658};
659
660static struct pci_driver cxl_pci_driver = {
661	.name			= KBUILD_MODNAME,
662	.id_table		= cxl_mem_pci_tbl,
663	.probe			= cxl_pci_probe,
664	.err_handler		= &cxl_error_handlers,
665	.driver	= {
666		.probe_type	= PROBE_PREFER_ASYNCHRONOUS,
667	},
668};
669
670MODULE_LICENSE("GPL v2");
671module_pci_driver(cxl_pci_driver);
672MODULE_IMPORT_NS(CXL);