Linux Audio

Check our new training course

Yocto distribution development and maintenance

Need a Yocto distribution for your embedded project?
Loading...
Note: File does not exist in v5.9.
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright(c) 2020 Intel Corporation. */
   3
   4#include <linux/io-64-nonatomic-lo-hi.h>
   5#include <linux/firmware.h>
   6#include <linux/device.h>
   7#include <linux/slab.h>
   8#include <linux/idr.h>
   9#include <linux/pci.h>
  10#include <cxlmem.h>
  11#include "trace.h"
  12#include "core.h"
  13
  14static DECLARE_RWSEM(cxl_memdev_rwsem);
  15
  16/*
  17 * An entire PCI topology full of devices should be enough for any
  18 * config
  19 */
  20#define CXL_MEM_MAX_DEVS 65536
  21
  22static int cxl_mem_major;
  23static DEFINE_IDA(cxl_memdev_ida);
  24
  25static void cxl_memdev_release(struct device *dev)
  26{
  27	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
  28
  29	ida_free(&cxl_memdev_ida, cxlmd->id);
  30	kfree(cxlmd);
  31}
  32
  33static char *cxl_memdev_devnode(const struct device *dev, umode_t *mode, kuid_t *uid,
  34				kgid_t *gid)
  35{
  36	return kasprintf(GFP_KERNEL, "cxl/%s", dev_name(dev));
  37}
  38
  39static ssize_t firmware_version_show(struct device *dev,
  40				     struct device_attribute *attr, char *buf)
  41{
  42	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
  43	struct cxl_dev_state *cxlds = cxlmd->cxlds;
  44	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
  45
  46	if (!mds)
  47		return sysfs_emit(buf, "\n");
  48	return sysfs_emit(buf, "%.16s\n", mds->firmware_version);
  49}
  50static DEVICE_ATTR_RO(firmware_version);
  51
  52static ssize_t payload_max_show(struct device *dev,
  53				struct device_attribute *attr, char *buf)
  54{
  55	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
  56	struct cxl_dev_state *cxlds = cxlmd->cxlds;
  57	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
  58
  59	if (!mds)
  60		return sysfs_emit(buf, "\n");
  61	return sysfs_emit(buf, "%zu\n", cxlds->cxl_mbox.payload_size);
  62}
  63static DEVICE_ATTR_RO(payload_max);
  64
  65static ssize_t label_storage_size_show(struct device *dev,
  66				       struct device_attribute *attr, char *buf)
  67{
  68	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
  69	struct cxl_dev_state *cxlds = cxlmd->cxlds;
  70	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
  71
  72	if (!mds)
  73		return sysfs_emit(buf, "\n");
  74	return sysfs_emit(buf, "%zu\n", mds->lsa_size);
  75}
  76static DEVICE_ATTR_RO(label_storage_size);
  77
  78static ssize_t ram_size_show(struct device *dev, struct device_attribute *attr,
  79			     char *buf)
  80{
  81	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
  82	struct cxl_dev_state *cxlds = cxlmd->cxlds;
  83	unsigned long long len = resource_size(&cxlds->ram_res);
  84
  85	return sysfs_emit(buf, "%#llx\n", len);
  86}
  87
  88static struct device_attribute dev_attr_ram_size =
  89	__ATTR(size, 0444, ram_size_show, NULL);
  90
  91static ssize_t pmem_size_show(struct device *dev, struct device_attribute *attr,
  92			      char *buf)
  93{
  94	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
  95	struct cxl_dev_state *cxlds = cxlmd->cxlds;
  96	unsigned long long len = resource_size(&cxlds->pmem_res);
  97
  98	return sysfs_emit(buf, "%#llx\n", len);
  99}
 100
 101static struct device_attribute dev_attr_pmem_size =
 102	__ATTR(size, 0444, pmem_size_show, NULL);
 103
 104static ssize_t serial_show(struct device *dev, struct device_attribute *attr,
 105			   char *buf)
 106{
 107	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
 108	struct cxl_dev_state *cxlds = cxlmd->cxlds;
 109
 110	return sysfs_emit(buf, "%#llx\n", cxlds->serial);
 111}
 112static DEVICE_ATTR_RO(serial);
 113
 114static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr,
 115			      char *buf)
 116{
 117	return sysfs_emit(buf, "%d\n", dev_to_node(dev));
 118}
 119static DEVICE_ATTR_RO(numa_node);
 120
 121static ssize_t security_state_show(struct device *dev,
 122				   struct device_attribute *attr,
 123				   char *buf)
 124{
 125	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
 126	struct cxl_dev_state *cxlds = cxlmd->cxlds;
 127	struct cxl_mailbox *cxl_mbox = &cxlds->cxl_mbox;
 128	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
 129	unsigned long state = mds->security.state;
 130	int rc = 0;
 131
 132	/* sync with latest submission state */
 133	mutex_lock(&cxl_mbox->mbox_mutex);
 134	if (mds->security.sanitize_active)
 135		rc = sysfs_emit(buf, "sanitize\n");
 136	mutex_unlock(&cxl_mbox->mbox_mutex);
 137	if (rc)
 138		return rc;
 139
 140	if (!(state & CXL_PMEM_SEC_STATE_USER_PASS_SET))
 141		return sysfs_emit(buf, "disabled\n");
 142	if (state & CXL_PMEM_SEC_STATE_FROZEN ||
 143	    state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT ||
 144	    state & CXL_PMEM_SEC_STATE_USER_PLIMIT)
 145		return sysfs_emit(buf, "frozen\n");
 146	if (state & CXL_PMEM_SEC_STATE_LOCKED)
 147		return sysfs_emit(buf, "locked\n");
 148	else
 149		return sysfs_emit(buf, "unlocked\n");
 150}
 151static struct device_attribute dev_attr_security_state =
 152	__ATTR(state, 0444, security_state_show, NULL);
 153
 154static ssize_t security_sanitize_store(struct device *dev,
 155				       struct device_attribute *attr,
 156				       const char *buf, size_t len)
 157{
 158	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
 159	bool sanitize;
 160	ssize_t rc;
 161
 162	if (kstrtobool(buf, &sanitize) || !sanitize)
 163		return -EINVAL;
 164
 165	rc = cxl_mem_sanitize(cxlmd, CXL_MBOX_OP_SANITIZE);
 166	if (rc)
 167		return rc;
 168
 169	return len;
 170}
 171static struct device_attribute dev_attr_security_sanitize =
 172	__ATTR(sanitize, 0200, NULL, security_sanitize_store);
 173
 174static ssize_t security_erase_store(struct device *dev,
 175				    struct device_attribute *attr,
 176				    const char *buf, size_t len)
 177{
 178	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
 179	ssize_t rc;
 180	bool erase;
 181
 182	if (kstrtobool(buf, &erase) || !erase)
 183		return -EINVAL;
 184
 185	rc = cxl_mem_sanitize(cxlmd, CXL_MBOX_OP_SECURE_ERASE);
 186	if (rc)
 187		return rc;
 188
 189	return len;
 190}
 191static struct device_attribute dev_attr_security_erase =
 192	__ATTR(erase, 0200, NULL, security_erase_store);
 193
 194static int cxl_get_poison_by_memdev(struct cxl_memdev *cxlmd)
 195{
 196	struct cxl_dev_state *cxlds = cxlmd->cxlds;
 197	u64 offset, length;
 198	int rc = 0;
 199
 200	/* CXL 3.0 Spec 8.2.9.8.4.1 Separate pmem and ram poison requests */
 201	if (resource_size(&cxlds->pmem_res)) {
 202		offset = cxlds->pmem_res.start;
 203		length = resource_size(&cxlds->pmem_res);
 204		rc = cxl_mem_get_poison(cxlmd, offset, length, NULL);
 205		if (rc)
 206			return rc;
 207	}
 208	if (resource_size(&cxlds->ram_res)) {
 209		offset = cxlds->ram_res.start;
 210		length = resource_size(&cxlds->ram_res);
 211		rc = cxl_mem_get_poison(cxlmd, offset, length, NULL);
 212		/*
 213		 * Invalid Physical Address is not an error for
 214		 * volatile addresses. Device support is optional.
 215		 */
 216		if (rc == -EFAULT)
 217			rc = 0;
 218	}
 219	return rc;
 220}
 221
 222int cxl_trigger_poison_list(struct cxl_memdev *cxlmd)
 223{
 224	struct cxl_port *port;
 225	int rc;
 226
 227	port = cxlmd->endpoint;
 228	if (!port || !is_cxl_endpoint(port))
 229		return -EINVAL;
 230
 231	rc = down_read_interruptible(&cxl_region_rwsem);
 232	if (rc)
 233		return rc;
 234
 235	rc = down_read_interruptible(&cxl_dpa_rwsem);
 236	if (rc) {
 237		up_read(&cxl_region_rwsem);
 238		return rc;
 239	}
 240
 241	if (cxl_num_decoders_committed(port) == 0) {
 242		/* No regions mapped to this memdev */
 243		rc = cxl_get_poison_by_memdev(cxlmd);
 244	} else {
 245		/* Regions mapped, collect poison by endpoint */
 246		rc =  cxl_get_poison_by_endpoint(port);
 247	}
 248	up_read(&cxl_dpa_rwsem);
 249	up_read(&cxl_region_rwsem);
 250
 251	return rc;
 252}
 253EXPORT_SYMBOL_NS_GPL(cxl_trigger_poison_list, "CXL");
 254
 255static int cxl_validate_poison_dpa(struct cxl_memdev *cxlmd, u64 dpa)
 256{
 257	struct cxl_dev_state *cxlds = cxlmd->cxlds;
 258
 259	if (!IS_ENABLED(CONFIG_DEBUG_FS))
 260		return 0;
 261
 262	if (!resource_size(&cxlds->dpa_res)) {
 263		dev_dbg(cxlds->dev, "device has no dpa resource\n");
 264		return -EINVAL;
 265	}
 266	if (dpa < cxlds->dpa_res.start || dpa > cxlds->dpa_res.end) {
 267		dev_dbg(cxlds->dev, "dpa:0x%llx not in resource:%pR\n",
 268			dpa, &cxlds->dpa_res);
 269		return -EINVAL;
 270	}
 271	if (!IS_ALIGNED(dpa, 64)) {
 272		dev_dbg(cxlds->dev, "dpa:0x%llx is not 64-byte aligned\n", dpa);
 273		return -EINVAL;
 274	}
 275
 276	return 0;
 277}
 278
 279int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
 280{
 281	struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
 282	struct cxl_mbox_inject_poison inject;
 283	struct cxl_poison_record record;
 284	struct cxl_mbox_cmd mbox_cmd;
 285	struct cxl_region *cxlr;
 286	int rc;
 287
 288	if (!IS_ENABLED(CONFIG_DEBUG_FS))
 289		return 0;
 290
 291	rc = down_read_interruptible(&cxl_region_rwsem);
 292	if (rc)
 293		return rc;
 294
 295	rc = down_read_interruptible(&cxl_dpa_rwsem);
 296	if (rc) {
 297		up_read(&cxl_region_rwsem);
 298		return rc;
 299	}
 300
 301	rc = cxl_validate_poison_dpa(cxlmd, dpa);
 302	if (rc)
 303		goto out;
 304
 305	inject.address = cpu_to_le64(dpa);
 306	mbox_cmd = (struct cxl_mbox_cmd) {
 307		.opcode = CXL_MBOX_OP_INJECT_POISON,
 308		.size_in = sizeof(inject),
 309		.payload_in = &inject,
 310	};
 311	rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
 312	if (rc)
 313		goto out;
 314
 315	cxlr = cxl_dpa_to_region(cxlmd, dpa);
 316	if (cxlr)
 317		dev_warn_once(cxl_mbox->host,
 318			      "poison inject dpa:%#llx region: %s\n", dpa,
 319			      dev_name(&cxlr->dev));
 320
 321	record = (struct cxl_poison_record) {
 322		.address = cpu_to_le64(dpa),
 323		.length = cpu_to_le32(1),
 324	};
 325	trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_INJECT);
 326out:
 327	up_read(&cxl_dpa_rwsem);
 328	up_read(&cxl_region_rwsem);
 329
 330	return rc;
 331}
 332EXPORT_SYMBOL_NS_GPL(cxl_inject_poison, "CXL");
 333
 334int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
 335{
 336	struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
 337	struct cxl_mbox_clear_poison clear;
 338	struct cxl_poison_record record;
 339	struct cxl_mbox_cmd mbox_cmd;
 340	struct cxl_region *cxlr;
 341	int rc;
 342
 343	if (!IS_ENABLED(CONFIG_DEBUG_FS))
 344		return 0;
 345
 346	rc = down_read_interruptible(&cxl_region_rwsem);
 347	if (rc)
 348		return rc;
 349
 350	rc = down_read_interruptible(&cxl_dpa_rwsem);
 351	if (rc) {
 352		up_read(&cxl_region_rwsem);
 353		return rc;
 354	}
 355
 356	rc = cxl_validate_poison_dpa(cxlmd, dpa);
 357	if (rc)
 358		goto out;
 359
 360	/*
 361	 * In CXL 3.0 Spec 8.2.9.8.4.3, the Clear Poison mailbox command
 362	 * is defined to accept 64 bytes of write-data, along with the
 363	 * address to clear. This driver uses zeroes as write-data.
 364	 */
 365	clear = (struct cxl_mbox_clear_poison) {
 366		.address = cpu_to_le64(dpa)
 367	};
 368
 369	mbox_cmd = (struct cxl_mbox_cmd) {
 370		.opcode = CXL_MBOX_OP_CLEAR_POISON,
 371		.size_in = sizeof(clear),
 372		.payload_in = &clear,
 373	};
 374
 375	rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
 376	if (rc)
 377		goto out;
 378
 379	cxlr = cxl_dpa_to_region(cxlmd, dpa);
 380	if (cxlr)
 381		dev_warn_once(cxl_mbox->host,
 382			      "poison clear dpa:%#llx region: %s\n", dpa,
 383			      dev_name(&cxlr->dev));
 384
 385	record = (struct cxl_poison_record) {
 386		.address = cpu_to_le64(dpa),
 387		.length = cpu_to_le32(1),
 388	};
 389	trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_CLEAR);
 390out:
 391	up_read(&cxl_dpa_rwsem);
 392	up_read(&cxl_region_rwsem);
 393
 394	return rc;
 395}
 396EXPORT_SYMBOL_NS_GPL(cxl_clear_poison, "CXL");
 397
 398static struct attribute *cxl_memdev_attributes[] = {
 399	&dev_attr_serial.attr,
 400	&dev_attr_firmware_version.attr,
 401	&dev_attr_payload_max.attr,
 402	&dev_attr_label_storage_size.attr,
 403	&dev_attr_numa_node.attr,
 404	NULL,
 405};
 406
 407static ssize_t pmem_qos_class_show(struct device *dev,
 408				   struct device_attribute *attr, char *buf)
 409{
 410	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
 411	struct cxl_dev_state *cxlds = cxlmd->cxlds;
 412	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
 413
 414	return sysfs_emit(buf, "%d\n", mds->pmem_perf.qos_class);
 415}
 416
 417static struct device_attribute dev_attr_pmem_qos_class =
 418	__ATTR(qos_class, 0444, pmem_qos_class_show, NULL);
 419
 420static struct attribute *cxl_memdev_pmem_attributes[] = {
 421	&dev_attr_pmem_size.attr,
 422	&dev_attr_pmem_qos_class.attr,
 423	NULL,
 424};
 425
 426static ssize_t ram_qos_class_show(struct device *dev,
 427				  struct device_attribute *attr, char *buf)
 428{
 429	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
 430	struct cxl_dev_state *cxlds = cxlmd->cxlds;
 431	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
 432
 433	return sysfs_emit(buf, "%d\n", mds->ram_perf.qos_class);
 434}
 435
 436static struct device_attribute dev_attr_ram_qos_class =
 437	__ATTR(qos_class, 0444, ram_qos_class_show, NULL);
 438
 439static struct attribute *cxl_memdev_ram_attributes[] = {
 440	&dev_attr_ram_size.attr,
 441	&dev_attr_ram_qos_class.attr,
 442	NULL,
 443};
 444
 445static struct attribute *cxl_memdev_security_attributes[] = {
 446	&dev_attr_security_state.attr,
 447	&dev_attr_security_sanitize.attr,
 448	&dev_attr_security_erase.attr,
 449	NULL,
 450};
 451
 452static umode_t cxl_memdev_visible(struct kobject *kobj, struct attribute *a,
 453				  int n)
 454{
 455	if (!IS_ENABLED(CONFIG_NUMA) && a == &dev_attr_numa_node.attr)
 456		return 0;
 457	return a->mode;
 458}
 459
 460static struct attribute_group cxl_memdev_attribute_group = {
 461	.attrs = cxl_memdev_attributes,
 462	.is_visible = cxl_memdev_visible,
 463};
 464
 465static umode_t cxl_ram_visible(struct kobject *kobj, struct attribute *a, int n)
 466{
 467	struct device *dev = kobj_to_dev(kobj);
 468	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
 469	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
 470
 471	if (a == &dev_attr_ram_qos_class.attr)
 472		if (mds->ram_perf.qos_class == CXL_QOS_CLASS_INVALID)
 473			return 0;
 474
 475	return a->mode;
 476}
 477
 478static struct attribute_group cxl_memdev_ram_attribute_group = {
 479	.name = "ram",
 480	.attrs = cxl_memdev_ram_attributes,
 481	.is_visible = cxl_ram_visible,
 482};
 483
 484static umode_t cxl_pmem_visible(struct kobject *kobj, struct attribute *a, int n)
 485{
 486	struct device *dev = kobj_to_dev(kobj);
 487	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
 488	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
 489
 490	if (a == &dev_attr_pmem_qos_class.attr)
 491		if (mds->pmem_perf.qos_class == CXL_QOS_CLASS_INVALID)
 492			return 0;
 493
 494	return a->mode;
 495}
 496
 497static struct attribute_group cxl_memdev_pmem_attribute_group = {
 498	.name = "pmem",
 499	.attrs = cxl_memdev_pmem_attributes,
 500	.is_visible = cxl_pmem_visible,
 501};
 502
 503static umode_t cxl_memdev_security_visible(struct kobject *kobj,
 504					   struct attribute *a, int n)
 505{
 506	struct device *dev = kobj_to_dev(kobj);
 507	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
 508	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
 509
 510	if (a == &dev_attr_security_sanitize.attr &&
 511	    !test_bit(CXL_SEC_ENABLED_SANITIZE, mds->security.enabled_cmds))
 512		return 0;
 513
 514	if (a == &dev_attr_security_erase.attr &&
 515	    !test_bit(CXL_SEC_ENABLED_SECURE_ERASE, mds->security.enabled_cmds))
 516		return 0;
 517
 518	return a->mode;
 519}
 520
 521static struct attribute_group cxl_memdev_security_attribute_group = {
 522	.name = "security",
 523	.attrs = cxl_memdev_security_attributes,
 524	.is_visible = cxl_memdev_security_visible,
 525};
 526
 527static const struct attribute_group *cxl_memdev_attribute_groups[] = {
 528	&cxl_memdev_attribute_group,
 529	&cxl_memdev_ram_attribute_group,
 530	&cxl_memdev_pmem_attribute_group,
 531	&cxl_memdev_security_attribute_group,
 532	NULL,
 533};
 534
 535void cxl_memdev_update_perf(struct cxl_memdev *cxlmd)
 536{
 537	sysfs_update_group(&cxlmd->dev.kobj, &cxl_memdev_ram_attribute_group);
 538	sysfs_update_group(&cxlmd->dev.kobj, &cxl_memdev_pmem_attribute_group);
 539}
 540EXPORT_SYMBOL_NS_GPL(cxl_memdev_update_perf, "CXL");
 541
 542static const struct device_type cxl_memdev_type = {
 543	.name = "cxl_memdev",
 544	.release = cxl_memdev_release,
 545	.devnode = cxl_memdev_devnode,
 546	.groups = cxl_memdev_attribute_groups,
 547};
 548
 549bool is_cxl_memdev(const struct device *dev)
 550{
 551	return dev->type == &cxl_memdev_type;
 552}
 553EXPORT_SYMBOL_NS_GPL(is_cxl_memdev, "CXL");
 554
 555/**
 556 * set_exclusive_cxl_commands() - atomically disable user cxl commands
 557 * @mds: The device state to operate on
 558 * @cmds: bitmap of commands to mark exclusive
 559 *
 560 * Grab the cxl_memdev_rwsem in write mode to flush in-flight
 561 * invocations of the ioctl path and then disable future execution of
 562 * commands with the command ids set in @cmds.
 563 */
 564void set_exclusive_cxl_commands(struct cxl_memdev_state *mds,
 565				unsigned long *cmds)
 566{
 567	down_write(&cxl_memdev_rwsem);
 568	bitmap_or(mds->exclusive_cmds, mds->exclusive_cmds, cmds,
 569		  CXL_MEM_COMMAND_ID_MAX);
 570	up_write(&cxl_memdev_rwsem);
 571}
 572EXPORT_SYMBOL_NS_GPL(set_exclusive_cxl_commands, "CXL");
 573
 574/**
 575 * clear_exclusive_cxl_commands() - atomically enable user cxl commands
 576 * @mds: The device state to modify
 577 * @cmds: bitmap of commands to mark available for userspace
 578 */
 579void clear_exclusive_cxl_commands(struct cxl_memdev_state *mds,
 580				  unsigned long *cmds)
 581{
 582	down_write(&cxl_memdev_rwsem);
 583	bitmap_andnot(mds->exclusive_cmds, mds->exclusive_cmds, cmds,
 584		      CXL_MEM_COMMAND_ID_MAX);
 585	up_write(&cxl_memdev_rwsem);
 586}
 587EXPORT_SYMBOL_NS_GPL(clear_exclusive_cxl_commands, "CXL");
 588
 589static void cxl_memdev_shutdown(struct device *dev)
 590{
 591	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
 592
 593	down_write(&cxl_memdev_rwsem);
 594	cxlmd->cxlds = NULL;
 595	up_write(&cxl_memdev_rwsem);
 596}
 597
 598static void cxl_memdev_unregister(void *_cxlmd)
 599{
 600	struct cxl_memdev *cxlmd = _cxlmd;
 601	struct device *dev = &cxlmd->dev;
 602
 603	cdev_device_del(&cxlmd->cdev, dev);
 604	cxl_memdev_shutdown(dev);
 605	put_device(dev);
 606}
 607
 608static void detach_memdev(struct work_struct *work)
 609{
 610	struct cxl_memdev *cxlmd;
 611
 612	cxlmd = container_of(work, typeof(*cxlmd), detach_work);
 613	device_release_driver(&cxlmd->dev);
 614	put_device(&cxlmd->dev);
 615}
 616
 617static struct lock_class_key cxl_memdev_key;
 618
 619static struct cxl_memdev *cxl_memdev_alloc(struct cxl_dev_state *cxlds,
 620					   const struct file_operations *fops)
 621{
 622	struct cxl_memdev *cxlmd;
 623	struct device *dev;
 624	struct cdev *cdev;
 625	int rc;
 626
 627	cxlmd = kzalloc(sizeof(*cxlmd), GFP_KERNEL);
 628	if (!cxlmd)
 629		return ERR_PTR(-ENOMEM);
 630
 631	rc = ida_alloc_max(&cxl_memdev_ida, CXL_MEM_MAX_DEVS - 1, GFP_KERNEL);
 632	if (rc < 0)
 633		goto err;
 634	cxlmd->id = rc;
 635	cxlmd->depth = -1;
 636
 637	dev = &cxlmd->dev;
 638	device_initialize(dev);
 639	lockdep_set_class(&dev->mutex, &cxl_memdev_key);
 640	dev->parent = cxlds->dev;
 641	dev->bus = &cxl_bus_type;
 642	dev->devt = MKDEV(cxl_mem_major, cxlmd->id);
 643	dev->type = &cxl_memdev_type;
 644	device_set_pm_not_required(dev);
 645	INIT_WORK(&cxlmd->detach_work, detach_memdev);
 646
 647	cdev = &cxlmd->cdev;
 648	cdev_init(cdev, fops);
 649	return cxlmd;
 650
 651err:
 652	kfree(cxlmd);
 653	return ERR_PTR(rc);
 654}
 655
 656static long __cxl_memdev_ioctl(struct cxl_memdev *cxlmd, unsigned int cmd,
 657			       unsigned long arg)
 658{
 659	switch (cmd) {
 660	case CXL_MEM_QUERY_COMMANDS:
 661		return cxl_query_cmd(cxlmd, (void __user *)arg);
 662	case CXL_MEM_SEND_COMMAND:
 663		return cxl_send_cmd(cxlmd, (void __user *)arg);
 664	default:
 665		return -ENOTTY;
 666	}
 667}
 668
 669static long cxl_memdev_ioctl(struct file *file, unsigned int cmd,
 670			     unsigned long arg)
 671{
 672	struct cxl_memdev *cxlmd = file->private_data;
 673	struct cxl_dev_state *cxlds;
 674	int rc = -ENXIO;
 675
 676	down_read(&cxl_memdev_rwsem);
 677	cxlds = cxlmd->cxlds;
 678	if (cxlds && cxlds->type == CXL_DEVTYPE_CLASSMEM)
 679		rc = __cxl_memdev_ioctl(cxlmd, cmd, arg);
 680	up_read(&cxl_memdev_rwsem);
 681
 682	return rc;
 683}
 684
 685static int cxl_memdev_open(struct inode *inode, struct file *file)
 686{
 687	struct cxl_memdev *cxlmd =
 688		container_of(inode->i_cdev, typeof(*cxlmd), cdev);
 689
 690	get_device(&cxlmd->dev);
 691	file->private_data = cxlmd;
 692
 693	return 0;
 694}
 695
 696static int cxl_memdev_release_file(struct inode *inode, struct file *file)
 697{
 698	struct cxl_memdev *cxlmd =
 699		container_of(inode->i_cdev, typeof(*cxlmd), cdev);
 700
 701	put_device(&cxlmd->dev);
 702
 703	return 0;
 704}
 705
 706/**
 707 * cxl_mem_get_fw_info - Get Firmware info
 708 * @mds: The device data for the operation
 709 *
 710 * Retrieve firmware info for the device specified.
 711 *
 712 * Return: 0 if no error: or the result of the mailbox command.
 713 *
 714 * See CXL-3.0 8.2.9.3.1 Get FW Info
 715 */
 716static int cxl_mem_get_fw_info(struct cxl_memdev_state *mds)
 717{
 718	struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
 719	struct cxl_mbox_get_fw_info info;
 720	struct cxl_mbox_cmd mbox_cmd;
 721	int rc;
 722
 723	mbox_cmd = (struct cxl_mbox_cmd) {
 724		.opcode = CXL_MBOX_OP_GET_FW_INFO,
 725		.size_out = sizeof(info),
 726		.payload_out = &info,
 727	};
 728
 729	rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
 730	if (rc < 0)
 731		return rc;
 732
 733	mds->fw.num_slots = info.num_slots;
 734	mds->fw.cur_slot = FIELD_GET(CXL_FW_INFO_SLOT_INFO_CUR_MASK,
 735				       info.slot_info);
 736
 737	return 0;
 738}
 739
 740/**
 741 * cxl_mem_activate_fw - Activate Firmware
 742 * @mds: The device data for the operation
 743 * @slot: slot number to activate
 744 *
 745 * Activate firmware in a given slot for the device specified.
 746 *
 747 * Return: 0 if no error: or the result of the mailbox command.
 748 *
 749 * See CXL-3.0 8.2.9.3.3 Activate FW
 750 */
 751static int cxl_mem_activate_fw(struct cxl_memdev_state *mds, int slot)
 752{
 753	struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
 754	struct cxl_mbox_activate_fw activate;
 755	struct cxl_mbox_cmd mbox_cmd;
 756
 757	if (slot == 0 || slot > mds->fw.num_slots)
 758		return -EINVAL;
 759
 760	mbox_cmd = (struct cxl_mbox_cmd) {
 761		.opcode = CXL_MBOX_OP_ACTIVATE_FW,
 762		.size_in = sizeof(activate),
 763		.payload_in = &activate,
 764	};
 765
 766	/* Only offline activation supported for now */
 767	activate.action = CXL_FW_ACTIVATE_OFFLINE;
 768	activate.slot = slot;
 769
 770	return cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
 771}
 772
 773/**
 774 * cxl_mem_abort_fw_xfer - Abort an in-progress FW transfer
 775 * @mds: The device data for the operation
 776 *
 777 * Abort an in-progress firmware transfer for the device specified.
 778 *
 779 * Return: 0 if no error: or the result of the mailbox command.
 780 *
 781 * See CXL-3.0 8.2.9.3.2 Transfer FW
 782 */
 783static int cxl_mem_abort_fw_xfer(struct cxl_memdev_state *mds)
 784{
 785	struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
 786	struct cxl_mbox_transfer_fw *transfer;
 787	struct cxl_mbox_cmd mbox_cmd;
 788	int rc;
 789
 790	transfer = kzalloc(struct_size(transfer, data, 0), GFP_KERNEL);
 791	if (!transfer)
 792		return -ENOMEM;
 793
 794	/* Set a 1s poll interval and a total wait time of 30s */
 795	mbox_cmd = (struct cxl_mbox_cmd) {
 796		.opcode = CXL_MBOX_OP_TRANSFER_FW,
 797		.size_in = sizeof(*transfer),
 798		.payload_in = transfer,
 799		.poll_interval_ms = 1000,
 800		.poll_count = 30,
 801	};
 802
 803	transfer->action = CXL_FW_TRANSFER_ACTION_ABORT;
 804
 805	rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
 806	kfree(transfer);
 807	return rc;
 808}
 809
 810static void cxl_fw_cleanup(struct fw_upload *fwl)
 811{
 812	struct cxl_memdev_state *mds = fwl->dd_handle;
 813
 814	mds->fw.next_slot = 0;
 815}
 816
 817static int cxl_fw_do_cancel(struct fw_upload *fwl)
 818{
 819	struct cxl_memdev_state *mds = fwl->dd_handle;
 820	struct cxl_dev_state *cxlds = &mds->cxlds;
 821	struct cxl_memdev *cxlmd = cxlds->cxlmd;
 822	int rc;
 823
 824	rc = cxl_mem_abort_fw_xfer(mds);
 825	if (rc < 0)
 826		dev_err(&cxlmd->dev, "Error aborting FW transfer: %d\n", rc);
 827
 828	return FW_UPLOAD_ERR_CANCELED;
 829}
 830
 831static enum fw_upload_err cxl_fw_prepare(struct fw_upload *fwl, const u8 *data,
 832					 u32 size)
 833{
 834	struct cxl_memdev_state *mds = fwl->dd_handle;
 835	struct cxl_mbox_transfer_fw *transfer;
 836	struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
 837
 838	if (!size)
 839		return FW_UPLOAD_ERR_INVALID_SIZE;
 840
 841	mds->fw.oneshot = struct_size(transfer, data, size) <
 842			    cxl_mbox->payload_size;
 843
 844	if (cxl_mem_get_fw_info(mds))
 845		return FW_UPLOAD_ERR_HW_ERROR;
 846
 847	/*
 848	 * So far no state has been changed, hence no other cleanup is
 849	 * necessary. Simply return the cancelled status.
 850	 */
 851	if (test_and_clear_bit(CXL_FW_CANCEL, mds->fw.state))
 852		return FW_UPLOAD_ERR_CANCELED;
 853
 854	return FW_UPLOAD_ERR_NONE;
 855}
 856
 857static enum fw_upload_err cxl_fw_write(struct fw_upload *fwl, const u8 *data,
 858				       u32 offset, u32 size, u32 *written)
 859{
 860	struct cxl_memdev_state *mds = fwl->dd_handle;
 861	struct cxl_dev_state *cxlds = &mds->cxlds;
 862	struct cxl_mailbox *cxl_mbox = &cxlds->cxl_mbox;
 863	struct cxl_memdev *cxlmd = cxlds->cxlmd;
 864	struct cxl_mbox_transfer_fw *transfer;
 865	struct cxl_mbox_cmd mbox_cmd;
 866	u32 cur_size, remaining;
 867	size_t size_in;
 868	int rc;
 869
 870	*written = 0;
 871
 872	/* Offset has to be aligned to 128B (CXL-3.0 8.2.9.3.2 Table 8-57) */
 873	if (!IS_ALIGNED(offset, CXL_FW_TRANSFER_ALIGNMENT)) {
 874		dev_err(&cxlmd->dev,
 875			"misaligned offset for FW transfer slice (%u)\n",
 876			offset);
 877		return FW_UPLOAD_ERR_RW_ERROR;
 878	}
 879
 880	/*
 881	 * Pick transfer size based on mds->payload_size @size must bw 128-byte
 882	 * aligned, ->payload_size is a power of 2 starting at 256 bytes, and
 883	 * sizeof(*transfer) is 128.  These constraints imply that @cur_size
 884	 * will always be 128b aligned.
 885	 */
 886	cur_size = min_t(size_t, size, cxl_mbox->payload_size - sizeof(*transfer));
 887
 888	remaining = size - cur_size;
 889	size_in = struct_size(transfer, data, cur_size);
 890
 891	if (test_and_clear_bit(CXL_FW_CANCEL, mds->fw.state))
 892		return cxl_fw_do_cancel(fwl);
 893
 894	/*
 895	 * Slot numbers are 1-indexed
 896	 * cur_slot is the 0-indexed next_slot (i.e. 'cur_slot - 1 + 1')
 897	 * Check for rollover using modulo, and 1-index it by adding 1
 898	 */
 899	mds->fw.next_slot = (mds->fw.cur_slot % mds->fw.num_slots) + 1;
 900
 901	/* Do the transfer via mailbox cmd */
 902	transfer = kzalloc(size_in, GFP_KERNEL);
 903	if (!transfer)
 904		return FW_UPLOAD_ERR_RW_ERROR;
 905
 906	transfer->offset = cpu_to_le32(offset / CXL_FW_TRANSFER_ALIGNMENT);
 907	memcpy(transfer->data, data + offset, cur_size);
 908	if (mds->fw.oneshot) {
 909		transfer->action = CXL_FW_TRANSFER_ACTION_FULL;
 910		transfer->slot = mds->fw.next_slot;
 911	} else {
 912		if (offset == 0) {
 913			transfer->action = CXL_FW_TRANSFER_ACTION_INITIATE;
 914		} else if (remaining == 0) {
 915			transfer->action = CXL_FW_TRANSFER_ACTION_END;
 916			transfer->slot = mds->fw.next_slot;
 917		} else {
 918			transfer->action = CXL_FW_TRANSFER_ACTION_CONTINUE;
 919		}
 920	}
 921
 922	mbox_cmd = (struct cxl_mbox_cmd) {
 923		.opcode = CXL_MBOX_OP_TRANSFER_FW,
 924		.size_in = size_in,
 925		.payload_in = transfer,
 926		.poll_interval_ms = 1000,
 927		.poll_count = 30,
 928	};
 929
 930	rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
 931	if (rc < 0) {
 932		rc = FW_UPLOAD_ERR_RW_ERROR;
 933		goto out_free;
 934	}
 935
 936	*written = cur_size;
 937
 938	/* Activate FW if oneshot or if the last slice was written */
 939	if (mds->fw.oneshot || remaining == 0) {
 940		dev_dbg(&cxlmd->dev, "Activating firmware slot: %d\n",
 941			mds->fw.next_slot);
 942		rc = cxl_mem_activate_fw(mds, mds->fw.next_slot);
 943		if (rc < 0) {
 944			dev_err(&cxlmd->dev, "Error activating firmware: %d\n",
 945				rc);
 946			rc = FW_UPLOAD_ERR_HW_ERROR;
 947			goto out_free;
 948		}
 949	}
 950
 951	rc = FW_UPLOAD_ERR_NONE;
 952
 953out_free:
 954	kfree(transfer);
 955	return rc;
 956}
 957
 958static enum fw_upload_err cxl_fw_poll_complete(struct fw_upload *fwl)
 959{
 960	struct cxl_memdev_state *mds = fwl->dd_handle;
 961
 962	/*
 963	 * cxl_internal_send_cmd() handles background operations synchronously.
 964	 * No need to wait for completions here - any errors would've been
 965	 * reported and handled during the ->write() call(s).
 966	 * Just check if a cancel request was received, and return success.
 967	 */
 968	if (test_and_clear_bit(CXL_FW_CANCEL, mds->fw.state))
 969		return cxl_fw_do_cancel(fwl);
 970
 971	return FW_UPLOAD_ERR_NONE;
 972}
 973
 974static void cxl_fw_cancel(struct fw_upload *fwl)
 975{
 976	struct cxl_memdev_state *mds = fwl->dd_handle;
 977
 978	set_bit(CXL_FW_CANCEL, mds->fw.state);
 979}
 980
 981static const struct fw_upload_ops cxl_memdev_fw_ops = {
 982        .prepare = cxl_fw_prepare,
 983        .write = cxl_fw_write,
 984        .poll_complete = cxl_fw_poll_complete,
 985        .cancel = cxl_fw_cancel,
 986        .cleanup = cxl_fw_cleanup,
 987};
 988
 989static void cxl_remove_fw_upload(void *fwl)
 990{
 991	firmware_upload_unregister(fwl);
 992}
 993
 994int devm_cxl_setup_fw_upload(struct device *host, struct cxl_memdev_state *mds)
 995{
 996	struct cxl_dev_state *cxlds = &mds->cxlds;
 997	struct device *dev = &cxlds->cxlmd->dev;
 998	struct fw_upload *fwl;
 999
1000	if (!test_bit(CXL_MEM_COMMAND_ID_GET_FW_INFO, mds->enabled_cmds))
1001		return 0;
1002
1003	fwl = firmware_upload_register(THIS_MODULE, dev, dev_name(dev),
1004				       &cxl_memdev_fw_ops, mds);
1005	if (IS_ERR(fwl))
1006		return PTR_ERR(fwl);
1007	return devm_add_action_or_reset(host, cxl_remove_fw_upload, fwl);
1008}
1009EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_fw_upload, "CXL");
1010
1011static const struct file_operations cxl_memdev_fops = {
1012	.owner = THIS_MODULE,
1013	.unlocked_ioctl = cxl_memdev_ioctl,
1014	.open = cxl_memdev_open,
1015	.release = cxl_memdev_release_file,
1016	.compat_ioctl = compat_ptr_ioctl,
1017	.llseek = noop_llseek,
1018};
1019
1020struct cxl_memdev *devm_cxl_add_memdev(struct device *host,
1021				       struct cxl_dev_state *cxlds)
1022{
1023	struct cxl_memdev *cxlmd;
1024	struct device *dev;
1025	struct cdev *cdev;
1026	int rc;
1027
1028	cxlmd = cxl_memdev_alloc(cxlds, &cxl_memdev_fops);
1029	if (IS_ERR(cxlmd))
1030		return cxlmd;
1031
1032	dev = &cxlmd->dev;
1033	rc = dev_set_name(dev, "mem%d", cxlmd->id);
1034	if (rc)
1035		goto err;
1036
1037	/*
1038	 * Activate ioctl operations, no cxl_memdev_rwsem manipulation
1039	 * needed as this is ordered with cdev_add() publishing the device.
1040	 */
1041	cxlmd->cxlds = cxlds;
1042	cxlds->cxlmd = cxlmd;
1043
1044	cdev = &cxlmd->cdev;
1045	rc = cdev_device_add(cdev, dev);
1046	if (rc)
1047		goto err;
1048
1049	rc = devm_add_action_or_reset(host, cxl_memdev_unregister, cxlmd);
1050	if (rc)
1051		return ERR_PTR(rc);
1052	return cxlmd;
1053
1054err:
1055	/*
1056	 * The cdev was briefly live, shutdown any ioctl operations that
1057	 * saw that state.
1058	 */
1059	cxl_memdev_shutdown(dev);
1060	put_device(dev);
1061	return ERR_PTR(rc);
1062}
1063EXPORT_SYMBOL_NS_GPL(devm_cxl_add_memdev, "CXL");
1064
1065static void sanitize_teardown_notifier(void *data)
1066{
1067	struct cxl_memdev_state *mds = data;
1068	struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
1069	struct kernfs_node *state;
1070
1071	/*
1072	 * Prevent new irq triggered invocations of the workqueue and
1073	 * flush inflight invocations.
1074	 */
1075	mutex_lock(&cxl_mbox->mbox_mutex);
1076	state = mds->security.sanitize_node;
1077	mds->security.sanitize_node = NULL;
1078	mutex_unlock(&cxl_mbox->mbox_mutex);
1079
1080	cancel_delayed_work_sync(&mds->security.poll_dwork);
1081	sysfs_put(state);
1082}
1083
1084int devm_cxl_sanitize_setup_notifier(struct device *host,
1085				     struct cxl_memdev *cxlmd)
1086{
1087	struct cxl_dev_state *cxlds = cxlmd->cxlds;
1088	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
1089	struct kernfs_node *sec;
1090
1091	if (!test_bit(CXL_SEC_ENABLED_SANITIZE, mds->security.enabled_cmds))
1092		return 0;
1093
1094	/*
1095	 * Note, the expectation is that @cxlmd would have failed to be
1096	 * created if these sysfs_get_dirent calls fail.
1097	 */
1098	sec = sysfs_get_dirent(cxlmd->dev.kobj.sd, "security");
1099	if (!sec)
1100		return -ENOENT;
1101	mds->security.sanitize_node = sysfs_get_dirent(sec, "state");
1102	sysfs_put(sec);
1103	if (!mds->security.sanitize_node)
1104		return -ENOENT;
1105
1106	return devm_add_action_or_reset(host, sanitize_teardown_notifier, mds);
1107}
1108EXPORT_SYMBOL_NS_GPL(devm_cxl_sanitize_setup_notifier, "CXL");
1109
1110__init int cxl_memdev_init(void)
1111{
1112	dev_t devt;
1113	int rc;
1114
1115	rc = alloc_chrdev_region(&devt, 0, CXL_MEM_MAX_DEVS, "cxl");
1116	if (rc)
1117		return rc;
1118
1119	cxl_mem_major = MAJOR(devt);
1120
1121	return 0;
1122}
1123
1124void cxl_memdev_exit(void)
1125{
1126	unregister_chrdev_region(MKDEV(cxl_mem_major, 0), CXL_MEM_MAX_DEVS);
1127}