Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright(c) 2020 Intel Corporation. All rights reserved. */
   3#include <linux/security.h>
   4#include <linux/debugfs.h>
   5#include <linux/ktime.h>
   6#include <linux/mutex.h>
   7#include <linux/unaligned.h>
   8#include <cxlpci.h>
   9#include <cxlmem.h>
  10#include <cxl.h>
  11
  12#include "core.h"
  13#include "trace.h"
  14
  15static bool cxl_raw_allow_all;
  16
  17/**
  18 * DOC: cxl mbox
  19 *
  20 * Core implementation of the CXL 2.0 Type-3 Memory Device Mailbox. The
  21 * implementation is used by the cxl_pci driver to initialize the device
  22 * and implement the cxl_mem.h IOCTL UAPI. It also implements the
  23 * backend of the cxl_pmem_ctl() transport for LIBNVDIMM.
  24 */
  25
  26#define cxl_for_each_cmd(cmd)                                                  \
  27	for ((cmd) = &cxl_mem_commands[0];                                     \
  28	     ((cmd) - cxl_mem_commands) < ARRAY_SIZE(cxl_mem_commands); (cmd)++)
  29
  30#define CXL_CMD(_id, sin, sout, _flags)                                        \
  31	[CXL_MEM_COMMAND_ID_##_id] = {                                         \
  32	.info =	{                                                              \
  33			.id = CXL_MEM_COMMAND_ID_##_id,                        \
  34			.size_in = sin,                                        \
  35			.size_out = sout,                                      \
  36		},                                                             \
  37	.opcode = CXL_MBOX_OP_##_id,                                           \
  38	.flags = _flags,                                                       \
  39	}
  40
  41#define CXL_VARIABLE_PAYLOAD	~0U
  42/*
  43 * This table defines the supported mailbox commands for the driver. This table
  44 * is made up of a UAPI structure. Non-negative values as parameters in the
  45 * table will be validated against the user's input. For example, if size_in is
  46 * 0, and the user passed in 1, it is an error.
  47 */
  48static struct cxl_mem_command cxl_mem_commands[CXL_MEM_COMMAND_ID_MAX] = {
  49	CXL_CMD(IDENTIFY, 0, 0x43, CXL_CMD_FLAG_FORCE_ENABLE),
  50#ifdef CONFIG_CXL_MEM_RAW_COMMANDS
  51	CXL_CMD(RAW, CXL_VARIABLE_PAYLOAD, CXL_VARIABLE_PAYLOAD, 0),
  52#endif
  53	CXL_CMD(GET_SUPPORTED_LOGS, 0, CXL_VARIABLE_PAYLOAD, CXL_CMD_FLAG_FORCE_ENABLE),
  54	CXL_CMD(GET_FW_INFO, 0, 0x50, 0),
  55	CXL_CMD(GET_PARTITION_INFO, 0, 0x20, 0),
  56	CXL_CMD(GET_LSA, 0x8, CXL_VARIABLE_PAYLOAD, 0),
  57	CXL_CMD(GET_HEALTH_INFO, 0, 0x12, 0),
  58	CXL_CMD(GET_LOG, 0x18, CXL_VARIABLE_PAYLOAD, CXL_CMD_FLAG_FORCE_ENABLE),
  59	CXL_CMD(GET_LOG_CAPS, 0x10, 0x4, 0),
  60	CXL_CMD(CLEAR_LOG, 0x10, 0, 0),
  61	CXL_CMD(GET_SUP_LOG_SUBLIST, 0x2, CXL_VARIABLE_PAYLOAD, 0),
  62	CXL_CMD(SET_PARTITION_INFO, 0x0a, 0, 0),
  63	CXL_CMD(SET_LSA, CXL_VARIABLE_PAYLOAD, 0, 0),
  64	CXL_CMD(GET_ALERT_CONFIG, 0, 0x10, 0),
  65	CXL_CMD(SET_ALERT_CONFIG, 0xc, 0, 0),
  66	CXL_CMD(GET_SHUTDOWN_STATE, 0, 0x1, 0),
  67	CXL_CMD(SET_SHUTDOWN_STATE, 0x1, 0, 0),
  68	CXL_CMD(GET_SCAN_MEDIA_CAPS, 0x10, 0x4, 0),
  69	CXL_CMD(GET_TIMESTAMP, 0, 0x8, 0),
  70};
  71
  72/*
  73 * Commands that RAW doesn't permit. The rationale for each:
  74 *
  75 * CXL_MBOX_OP_ACTIVATE_FW: Firmware activation requires adjustment /
  76 * coordination of transaction timeout values at the root bridge level.
  77 *
  78 * CXL_MBOX_OP_SET_PARTITION_INFO: The device memory map may change live
  79 * and needs to be coordinated with HDM updates.
  80 *
  81 * CXL_MBOX_OP_SET_LSA: The label storage area may be cached by the
  82 * driver and any writes from userspace invalidates those contents.
  83 *
  84 * CXL_MBOX_OP_SET_SHUTDOWN_STATE: Set shutdown state assumes no writes
  85 * to the device after it is marked clean, userspace can not make that
  86 * assertion.
  87 *
  88 * CXL_MBOX_OP_[GET_]SCAN_MEDIA: The kernel provides a native error list that
  89 * is kept up to date with patrol notifications and error management.
  90 *
  91 * CXL_MBOX_OP_[GET_,INJECT_,CLEAR_]POISON: These commands require kernel
  92 * driver orchestration for safety.
  93 */
  94static u16 cxl_disabled_raw_commands[] = {
  95	CXL_MBOX_OP_ACTIVATE_FW,
  96	CXL_MBOX_OP_SET_PARTITION_INFO,
  97	CXL_MBOX_OP_SET_LSA,
  98	CXL_MBOX_OP_SET_SHUTDOWN_STATE,
  99	CXL_MBOX_OP_SCAN_MEDIA,
 100	CXL_MBOX_OP_GET_SCAN_MEDIA,
 101	CXL_MBOX_OP_GET_POISON,
 102	CXL_MBOX_OP_INJECT_POISON,
 103	CXL_MBOX_OP_CLEAR_POISON,
 104};
 105
 106/*
 107 * Command sets that RAW doesn't permit. All opcodes in this set are
 108 * disabled because they pass plain text security payloads over the
 109 * user/kernel boundary. This functionality is intended to be wrapped
 110 * behind the keys ABI which allows for encrypted payloads in the UAPI
 111 */
 112static u8 security_command_sets[] = {
 113	0x44, /* Sanitize */
 114	0x45, /* Persistent Memory Data-at-rest Security */
 115	0x46, /* Security Passthrough */
 116};
 117
 118static bool cxl_is_security_command(u16 opcode)
 119{
 120	int i;
 121
 122	for (i = 0; i < ARRAY_SIZE(security_command_sets); i++)
 123		if (security_command_sets[i] == (opcode >> 8))
 124			return true;
 125	return false;
 126}
 127
 128static void cxl_set_security_cmd_enabled(struct cxl_security_state *security,
 129					 u16 opcode)
 130{
 131	switch (opcode) {
 132	case CXL_MBOX_OP_SANITIZE:
 133		set_bit(CXL_SEC_ENABLED_SANITIZE, security->enabled_cmds);
 134		break;
 135	case CXL_MBOX_OP_SECURE_ERASE:
 136		set_bit(CXL_SEC_ENABLED_SECURE_ERASE,
 137			security->enabled_cmds);
 138		break;
 139	case CXL_MBOX_OP_GET_SECURITY_STATE:
 140		set_bit(CXL_SEC_ENABLED_GET_SECURITY_STATE,
 141			security->enabled_cmds);
 142		break;
 143	case CXL_MBOX_OP_SET_PASSPHRASE:
 144		set_bit(CXL_SEC_ENABLED_SET_PASSPHRASE,
 145			security->enabled_cmds);
 146		break;
 147	case CXL_MBOX_OP_DISABLE_PASSPHRASE:
 148		set_bit(CXL_SEC_ENABLED_DISABLE_PASSPHRASE,
 149			security->enabled_cmds);
 150		break;
 151	case CXL_MBOX_OP_UNLOCK:
 152		set_bit(CXL_SEC_ENABLED_UNLOCK, security->enabled_cmds);
 153		break;
 154	case CXL_MBOX_OP_FREEZE_SECURITY:
 155		set_bit(CXL_SEC_ENABLED_FREEZE_SECURITY,
 156			security->enabled_cmds);
 157		break;
 158	case CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE:
 159		set_bit(CXL_SEC_ENABLED_PASSPHRASE_SECURE_ERASE,
 160			security->enabled_cmds);
 161		break;
 162	default:
 163		break;
 164	}
 165}
 166
 167static bool cxl_is_poison_command(u16 opcode)
 168{
 169#define CXL_MBOX_OP_POISON_CMDS 0x43
 170
 171	if ((opcode >> 8) == CXL_MBOX_OP_POISON_CMDS)
 172		return true;
 173
 174	return false;
 175}
 176
 177static void cxl_set_poison_cmd_enabled(struct cxl_poison_state *poison,
 178				       u16 opcode)
 179{
 180	switch (opcode) {
 181	case CXL_MBOX_OP_GET_POISON:
 182		set_bit(CXL_POISON_ENABLED_LIST, poison->enabled_cmds);
 183		break;
 184	case CXL_MBOX_OP_INJECT_POISON:
 185		set_bit(CXL_POISON_ENABLED_INJECT, poison->enabled_cmds);
 186		break;
 187	case CXL_MBOX_OP_CLEAR_POISON:
 188		set_bit(CXL_POISON_ENABLED_CLEAR, poison->enabled_cmds);
 189		break;
 190	case CXL_MBOX_OP_GET_SCAN_MEDIA_CAPS:
 191		set_bit(CXL_POISON_ENABLED_SCAN_CAPS, poison->enabled_cmds);
 192		break;
 193	case CXL_MBOX_OP_SCAN_MEDIA:
 194		set_bit(CXL_POISON_ENABLED_SCAN_MEDIA, poison->enabled_cmds);
 195		break;
 196	case CXL_MBOX_OP_GET_SCAN_MEDIA:
 197		set_bit(CXL_POISON_ENABLED_SCAN_RESULTS, poison->enabled_cmds);
 198		break;
 199	default:
 200		break;
 201	}
 202}
 203
 204static struct cxl_mem_command *cxl_mem_find_command(u16 opcode)
 205{
 206	struct cxl_mem_command *c;
 207
 208	cxl_for_each_cmd(c)
 209		if (c->opcode == opcode)
 210			return c;
 211
 212	return NULL;
 213}
 214
 215static const char *cxl_mem_opcode_to_name(u16 opcode)
 216{
 217	struct cxl_mem_command *c;
 218
 219	c = cxl_mem_find_command(opcode);
 220	if (!c)
 221		return NULL;
 222
 223	return cxl_command_names[c->info.id].name;
 224}
 225
 226/**
 227 * cxl_internal_send_cmd() - Kernel internal interface to send a mailbox command
 228 * @cxl_mbox: CXL mailbox context
 229 * @mbox_cmd: initialized command to execute
 230 *
 231 * Context: Any context.
 232 * Return:
 233 *  * %>=0	- Number of bytes returned in @out.
 234 *  * %-E2BIG	- Payload is too large for hardware.
 235 *  * %-EBUSY	- Couldn't acquire exclusive mailbox access.
 236 *  * %-EFAULT	- Hardware error occurred.
 237 *  * %-ENXIO	- Command completed, but device reported an error.
 238 *  * %-EIO	- Unexpected output size.
 239 *
 240 * Mailbox commands may execute successfully yet the device itself reported an
 241 * error. While this distinction can be useful for commands from userspace, the
 242 * kernel will only be able to use results when both are successful.
 243 */
 244int cxl_internal_send_cmd(struct cxl_mailbox *cxl_mbox,
 245			  struct cxl_mbox_cmd *mbox_cmd)
 246{
 247	size_t out_size, min_out;
 248	int rc;
 249
 250	if (mbox_cmd->size_in > cxl_mbox->payload_size ||
 251	    mbox_cmd->size_out > cxl_mbox->payload_size)
 252		return -E2BIG;
 253
 254	out_size = mbox_cmd->size_out;
 255	min_out = mbox_cmd->min_out;
 256	rc = cxl_mbox->mbox_send(cxl_mbox, mbox_cmd);
 257	/*
 258	 * EIO is reserved for a payload size mismatch and mbox_send()
 259	 * may not return this error.
 260	 */
 261	if (WARN_ONCE(rc == -EIO, "Bad return code: -EIO"))
 262		return -ENXIO;
 263	if (rc)
 264		return rc;
 265
 266	if (mbox_cmd->return_code != CXL_MBOX_CMD_RC_SUCCESS &&
 267	    mbox_cmd->return_code != CXL_MBOX_CMD_RC_BACKGROUND)
 268		return cxl_mbox_cmd_rc2errno(mbox_cmd);
 269
 270	if (!out_size)
 271		return 0;
 272
 273	/*
 274	 * Variable sized output needs to at least satisfy the caller's
 275	 * minimum if not the fully requested size.
 276	 */
 277	if (min_out == 0)
 278		min_out = out_size;
 279
 280	if (mbox_cmd->size_out < min_out)
 281		return -EIO;
 282	return 0;
 283}
 284EXPORT_SYMBOL_NS_GPL(cxl_internal_send_cmd, "CXL");
 285
 286static bool cxl_mem_raw_command_allowed(u16 opcode)
 287{
 288	int i;
 289
 290	if (!IS_ENABLED(CONFIG_CXL_MEM_RAW_COMMANDS))
 291		return false;
 292
 293	if (security_locked_down(LOCKDOWN_PCI_ACCESS))
 294		return false;
 295
 296	if (cxl_raw_allow_all)
 297		return true;
 298
 299	if (cxl_is_security_command(opcode))
 300		return false;
 301
 302	for (i = 0; i < ARRAY_SIZE(cxl_disabled_raw_commands); i++)
 303		if (cxl_disabled_raw_commands[i] == opcode)
 304			return false;
 305
 306	return true;
 307}
 308
 309/**
 310 * cxl_payload_from_user_allowed() - Check contents of in_payload.
 311 * @opcode: The mailbox command opcode.
 312 * @payload_in: Pointer to the input payload passed in from user space.
 313 *
 314 * Return:
 315 *  * true	- payload_in passes check for @opcode.
 316 *  * false	- payload_in contains invalid or unsupported values.
 317 *
 318 * The driver may inspect payload contents before sending a mailbox
 319 * command from user space to the device. The intent is to reject
 320 * commands with input payloads that are known to be unsafe. This
 321 * check is not intended to replace the users careful selection of
 322 * mailbox command parameters and makes no guarantee that the user
 323 * command will succeed, nor that it is appropriate.
 324 *
 325 * The specific checks are determined by the opcode.
 326 */
 327static bool cxl_payload_from_user_allowed(u16 opcode, void *payload_in)
 328{
 329	switch (opcode) {
 330	case CXL_MBOX_OP_SET_PARTITION_INFO: {
 331		struct cxl_mbox_set_partition_info *pi = payload_in;
 332
 333		if (pi->flags & CXL_SET_PARTITION_IMMEDIATE_FLAG)
 334			return false;
 335		break;
 336	}
 337	case CXL_MBOX_OP_CLEAR_LOG: {
 338		const uuid_t *uuid = (uuid_t *)payload_in;
 339
 340		/*
 341		 * Restrict the ‘Clear log’ action to only apply to
 342		 * Vendor debug logs.
 343		 */
 344		return uuid_equal(uuid, &DEFINE_CXL_VENDOR_DEBUG_UUID);
 345	}
 346	default:
 347		break;
 348	}
 349	return true;
 350}
 351
 352static int cxl_mbox_cmd_ctor(struct cxl_mbox_cmd *mbox,
 353			     struct cxl_memdev_state *mds, u16 opcode,
 354			     size_t in_size, size_t out_size, u64 in_payload)
 355{
 356	struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
 357	*mbox = (struct cxl_mbox_cmd) {
 358		.opcode = opcode,
 359		.size_in = in_size,
 360	};
 361
 362	if (in_size) {
 363		mbox->payload_in = vmemdup_user(u64_to_user_ptr(in_payload),
 364						in_size);
 365		if (IS_ERR(mbox->payload_in))
 366			return PTR_ERR(mbox->payload_in);
 367
 368		if (!cxl_payload_from_user_allowed(opcode, mbox->payload_in)) {
 369			dev_dbg(mds->cxlds.dev, "%s: input payload not allowed\n",
 370				cxl_mem_opcode_to_name(opcode));
 371			kvfree(mbox->payload_in);
 372			return -EBUSY;
 373		}
 374	}
 375
 376	/* Prepare to handle a full payload for variable sized output */
 377	if (out_size == CXL_VARIABLE_PAYLOAD)
 378		mbox->size_out = cxl_mbox->payload_size;
 379	else
 380		mbox->size_out = out_size;
 381
 382	if (mbox->size_out) {
 383		mbox->payload_out = kvzalloc(mbox->size_out, GFP_KERNEL);
 384		if (!mbox->payload_out) {
 385			kvfree(mbox->payload_in);
 386			return -ENOMEM;
 387		}
 388	}
 389	return 0;
 390}
 391
 392static void cxl_mbox_cmd_dtor(struct cxl_mbox_cmd *mbox)
 393{
 394	kvfree(mbox->payload_in);
 395	kvfree(mbox->payload_out);
 396}
 397
 398static int cxl_to_mem_cmd_raw(struct cxl_mem_command *mem_cmd,
 399			      const struct cxl_send_command *send_cmd,
 400			      struct cxl_memdev_state *mds)
 401{
 402	struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
 403
 404	if (send_cmd->raw.rsvd)
 405		return -EINVAL;
 406
 407	/*
 408	 * Unlike supported commands, the output size of RAW commands
 409	 * gets passed along without further checking, so it must be
 410	 * validated here.
 411	 */
 412	if (send_cmd->out.size > cxl_mbox->payload_size)
 413		return -EINVAL;
 414
 415	if (!cxl_mem_raw_command_allowed(send_cmd->raw.opcode))
 416		return -EPERM;
 417
 418	dev_WARN_ONCE(mds->cxlds.dev, true, "raw command path used\n");
 419
 420	*mem_cmd = (struct cxl_mem_command) {
 421		.info = {
 422			.id = CXL_MEM_COMMAND_ID_RAW,
 423			.size_in = send_cmd->in.size,
 424			.size_out = send_cmd->out.size,
 425		},
 426		.opcode = send_cmd->raw.opcode
 427	};
 428
 429	return 0;
 430}
 431
 432static int cxl_to_mem_cmd(struct cxl_mem_command *mem_cmd,
 433			  const struct cxl_send_command *send_cmd,
 434			  struct cxl_memdev_state *mds)
 435{
 436	struct cxl_mem_command *c = &cxl_mem_commands[send_cmd->id];
 437	const struct cxl_command_info *info = &c->info;
 438
 439	if (send_cmd->flags & ~CXL_MEM_COMMAND_FLAG_MASK)
 440		return -EINVAL;
 441
 442	if (send_cmd->rsvd)
 443		return -EINVAL;
 444
 445	if (send_cmd->in.rsvd || send_cmd->out.rsvd)
 446		return -EINVAL;
 447
 448	/* Check that the command is enabled for hardware */
 449	if (!test_bit(info->id, mds->enabled_cmds))
 450		return -ENOTTY;
 451
 452	/* Check that the command is not claimed for exclusive kernel use */
 453	if (test_bit(info->id, mds->exclusive_cmds))
 454		return -EBUSY;
 455
 456	/* Check the input buffer is the expected size */
 457	if ((info->size_in != CXL_VARIABLE_PAYLOAD) &&
 458	    (info->size_in != send_cmd->in.size))
 459		return -ENOMEM;
 460
 461	/* Check the output buffer is at least large enough */
 462	if ((info->size_out != CXL_VARIABLE_PAYLOAD) &&
 463	    (send_cmd->out.size < info->size_out))
 464		return -ENOMEM;
 465
 466	*mem_cmd = (struct cxl_mem_command) {
 467		.info = {
 468			.id = info->id,
 469			.flags = info->flags,
 470			.size_in = send_cmd->in.size,
 471			.size_out = send_cmd->out.size,
 472		},
 473		.opcode = c->opcode
 474	};
 475
 476	return 0;
 477}
 478
 479/**
 480 * cxl_validate_cmd_from_user() - Check fields for CXL_MEM_SEND_COMMAND.
 481 * @mbox_cmd: Sanitized and populated &struct cxl_mbox_cmd.
 482 * @mds: The driver data for the operation
 483 * @send_cmd: &struct cxl_send_command copied in from userspace.
 484 *
 485 * Return:
 486 *  * %0	- @out_cmd is ready to send.
 487 *  * %-ENOTTY	- Invalid command specified.
 488 *  * %-EINVAL	- Reserved fields or invalid values were used.
 489 *  * %-ENOMEM	- Input or output buffer wasn't sized properly.
 490 *  * %-EPERM	- Attempted to use a protected command.
 491 *  * %-EBUSY	- Kernel has claimed exclusive access to this opcode
 492 *
 493 * The result of this command is a fully validated command in @mbox_cmd that is
 494 * safe to send to the hardware.
 495 */
 496static int cxl_validate_cmd_from_user(struct cxl_mbox_cmd *mbox_cmd,
 497				      struct cxl_memdev_state *mds,
 498				      const struct cxl_send_command *send_cmd)
 499{
 500	struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
 501	struct cxl_mem_command mem_cmd;
 502	int rc;
 503
 504	if (send_cmd->id == 0 || send_cmd->id >= CXL_MEM_COMMAND_ID_MAX)
 505		return -ENOTTY;
 506
 507	/*
 508	 * The user can never specify an input payload larger than what hardware
 509	 * supports, but output can be arbitrarily large (simply write out as
 510	 * much data as the hardware provides).
 511	 */
 512	if (send_cmd->in.size > cxl_mbox->payload_size)
 513		return -EINVAL;
 514
 515	/* Sanitize and construct a cxl_mem_command */
 516	if (send_cmd->id == CXL_MEM_COMMAND_ID_RAW)
 517		rc = cxl_to_mem_cmd_raw(&mem_cmd, send_cmd, mds);
 518	else
 519		rc = cxl_to_mem_cmd(&mem_cmd, send_cmd, mds);
 520
 521	if (rc)
 522		return rc;
 523
 524	/* Sanitize and construct a cxl_mbox_cmd */
 525	return cxl_mbox_cmd_ctor(mbox_cmd, mds, mem_cmd.opcode,
 526				 mem_cmd.info.size_in, mem_cmd.info.size_out,
 527				 send_cmd->in.payload);
 528}
 529
 530int cxl_query_cmd(struct cxl_memdev *cxlmd,
 531		  struct cxl_mem_query_commands __user *q)
 532{
 533	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
 534	struct device *dev = &cxlmd->dev;
 535	struct cxl_mem_command *cmd;
 536	u32 n_commands;
 537	int j = 0;
 538
 539	dev_dbg(dev, "Query IOCTL\n");
 540
 541	if (get_user(n_commands, &q->n_commands))
 542		return -EFAULT;
 543
 544	/* returns the total number if 0 elements are requested. */
 545	if (n_commands == 0)
 546		return put_user(ARRAY_SIZE(cxl_mem_commands), &q->n_commands);
 547
 548	/*
 549	 * otherwise, return min(n_commands, total commands) cxl_command_info
 550	 * structures.
 551	 */
 552	cxl_for_each_cmd(cmd) {
 553		struct cxl_command_info info = cmd->info;
 554
 555		if (test_bit(info.id, mds->enabled_cmds))
 556			info.flags |= CXL_MEM_COMMAND_FLAG_ENABLED;
 557		if (test_bit(info.id, mds->exclusive_cmds))
 558			info.flags |= CXL_MEM_COMMAND_FLAG_EXCLUSIVE;
 559
 560		if (copy_to_user(&q->commands[j++], &info, sizeof(info)))
 561			return -EFAULT;
 562
 563		if (j == n_commands)
 564			break;
 565	}
 566
 567	return 0;
 568}
 569
 570/**
 571 * handle_mailbox_cmd_from_user() - Dispatch a mailbox command for userspace.
 572 * @mds: The driver data for the operation
 573 * @mbox_cmd: The validated mailbox command.
 574 * @out_payload: Pointer to userspace's output payload.
 575 * @size_out: (Input) Max payload size to copy out.
 576 *            (Output) Payload size hardware generated.
 577 * @retval: Hardware generated return code from the operation.
 578 *
 579 * Return:
 580 *  * %0	- Mailbox transaction succeeded. This implies the mailbox
 581 *		  protocol completed successfully not that the operation itself
 582 *		  was successful.
 583 *  * %-ENOMEM  - Couldn't allocate a bounce buffer.
 584 *  * %-EFAULT	- Something happened with copy_to/from_user.
 585 *  * %-EINTR	- Mailbox acquisition interrupted.
 586 *  * %-EXXX	- Transaction level failures.
 587 *
 588 * Dispatches a mailbox command on behalf of a userspace request.
 589 * The output payload is copied to userspace.
 590 *
 591 * See cxl_send_cmd().
 592 */
 593static int handle_mailbox_cmd_from_user(struct cxl_memdev_state *mds,
 594					struct cxl_mbox_cmd *mbox_cmd,
 595					u64 out_payload, s32 *size_out,
 596					u32 *retval)
 597{
 598	struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
 599	struct device *dev = mds->cxlds.dev;
 600	int rc;
 601
 602	dev_dbg(dev,
 603		"Submitting %s command for user\n"
 604		"\topcode: %x\n"
 605		"\tsize: %zx\n",
 606		cxl_mem_opcode_to_name(mbox_cmd->opcode),
 607		mbox_cmd->opcode, mbox_cmd->size_in);
 608
 609	rc = cxl_mbox->mbox_send(cxl_mbox, mbox_cmd);
 610	if (rc)
 611		goto out;
 612
 613	/*
 614	 * @size_out contains the max size that's allowed to be written back out
 615	 * to userspace. While the payload may have written more output than
 616	 * this it will have to be ignored.
 617	 */
 618	if (mbox_cmd->size_out) {
 619		dev_WARN_ONCE(dev, mbox_cmd->size_out > *size_out,
 620			      "Invalid return size\n");
 621		if (copy_to_user(u64_to_user_ptr(out_payload),
 622				 mbox_cmd->payload_out, mbox_cmd->size_out)) {
 623			rc = -EFAULT;
 624			goto out;
 625		}
 626	}
 627
 628	*size_out = mbox_cmd->size_out;
 629	*retval = mbox_cmd->return_code;
 630
 631out:
 632	cxl_mbox_cmd_dtor(mbox_cmd);
 633	return rc;
 634}
 635
 636int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s)
 637{
 638	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
 639	struct device *dev = &cxlmd->dev;
 640	struct cxl_send_command send;
 641	struct cxl_mbox_cmd mbox_cmd;
 642	int rc;
 643
 644	dev_dbg(dev, "Send IOCTL\n");
 645
 646	if (copy_from_user(&send, s, sizeof(send)))
 647		return -EFAULT;
 648
 649	rc = cxl_validate_cmd_from_user(&mbox_cmd, mds, &send);
 650	if (rc)
 651		return rc;
 652
 653	rc = handle_mailbox_cmd_from_user(mds, &mbox_cmd, send.out.payload,
 654					  &send.out.size, &send.retval);
 655	if (rc)
 656		return rc;
 657
 658	if (copy_to_user(s, &send, sizeof(send)))
 659		return -EFAULT;
 660
 661	return 0;
 662}
 663
 664static int cxl_xfer_log(struct cxl_memdev_state *mds, uuid_t *uuid,
 665			u32 *size, u8 *out)
 666{
 667	struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
 668	u32 remaining = *size;
 669	u32 offset = 0;
 670
 671	while (remaining) {
 672		u32 xfer_size = min_t(u32, remaining, cxl_mbox->payload_size);
 673		struct cxl_mbox_cmd mbox_cmd;
 674		struct cxl_mbox_get_log log;
 675		int rc;
 676
 677		log = (struct cxl_mbox_get_log) {
 678			.uuid = *uuid,
 679			.offset = cpu_to_le32(offset),
 680			.length = cpu_to_le32(xfer_size),
 681		};
 682
 683		mbox_cmd = (struct cxl_mbox_cmd) {
 684			.opcode = CXL_MBOX_OP_GET_LOG,
 685			.size_in = sizeof(log),
 686			.payload_in = &log,
 687			.size_out = xfer_size,
 688			.payload_out = out,
 689		};
 690
 691		rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
 692
 693		/*
 694		 * The output payload length that indicates the number
 695		 * of valid bytes can be smaller than the Log buffer
 696		 * size.
 697		 */
 698		if (rc == -EIO && mbox_cmd.size_out < xfer_size) {
 699			offset += mbox_cmd.size_out;
 700			break;
 701		}
 702
 703		if (rc < 0)
 704			return rc;
 705
 706		out += xfer_size;
 707		remaining -= xfer_size;
 708		offset += xfer_size;
 709	}
 710
 711	*size = offset;
 712
 713	return 0;
 714}
 715
 716/**
 717 * cxl_walk_cel() - Walk through the Command Effects Log.
 718 * @mds: The driver data for the operation
 719 * @size: Length of the Command Effects Log.
 720 * @cel: CEL
 721 *
 722 * Iterate over each entry in the CEL and determine if the driver supports the
 723 * command. If so, the command is enabled for the device and can be used later.
 724 */
 725static void cxl_walk_cel(struct cxl_memdev_state *mds, size_t size, u8 *cel)
 726{
 727	struct cxl_cel_entry *cel_entry;
 728	const int cel_entries = size / sizeof(*cel_entry);
 729	struct device *dev = mds->cxlds.dev;
 730	int i;
 731
 732	cel_entry = (struct cxl_cel_entry *) cel;
 733
 734	for (i = 0; i < cel_entries; i++) {
 735		u16 opcode = le16_to_cpu(cel_entry[i].opcode);
 736		struct cxl_mem_command *cmd = cxl_mem_find_command(opcode);
 737		int enabled = 0;
 738
 739		if (cmd) {
 740			set_bit(cmd->info.id, mds->enabled_cmds);
 741			enabled++;
 742		}
 743
 744		if (cxl_is_poison_command(opcode)) {
 745			cxl_set_poison_cmd_enabled(&mds->poison, opcode);
 746			enabled++;
 747		}
 748
 749		if (cxl_is_security_command(opcode)) {
 750			cxl_set_security_cmd_enabled(&mds->security, opcode);
 751			enabled++;
 752		}
 753
 754		dev_dbg(dev, "Opcode 0x%04x %s\n", opcode,
 755			enabled ? "enabled" : "unsupported by driver");
 756	}
 757}
 758
 759static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_memdev_state *mds)
 760{
 761	struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
 762	struct cxl_mbox_get_supported_logs *ret;
 763	struct cxl_mbox_cmd mbox_cmd;
 764	int rc;
 765
 766	ret = kvmalloc(cxl_mbox->payload_size, GFP_KERNEL);
 767	if (!ret)
 768		return ERR_PTR(-ENOMEM);
 769
 770	mbox_cmd = (struct cxl_mbox_cmd) {
 771		.opcode = CXL_MBOX_OP_GET_SUPPORTED_LOGS,
 772		.size_out = cxl_mbox->payload_size,
 773		.payload_out = ret,
 774		/* At least the record number field must be valid */
 775		.min_out = 2,
 776	};
 777	rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
 778	if (rc < 0) {
 779		kvfree(ret);
 780		return ERR_PTR(rc);
 781	}
 782
 783
 784	return ret;
 785}
 786
 787enum {
 788	CEL_UUID,
 789	VENDOR_DEBUG_UUID,
 790};
 791
 792/* See CXL 2.0 Table 170. Get Log Input Payload */
 793static const uuid_t log_uuid[] = {
 794	[CEL_UUID] = DEFINE_CXL_CEL_UUID,
 795	[VENDOR_DEBUG_UUID] = DEFINE_CXL_VENDOR_DEBUG_UUID,
 796};
 797
 798/**
 799 * cxl_enumerate_cmds() - Enumerate commands for a device.
 800 * @mds: The driver data for the operation
 801 *
 802 * Returns 0 if enumerate completed successfully.
 803 *
 804 * CXL devices have optional support for certain commands. This function will
 805 * determine the set of supported commands for the hardware and update the
 806 * enabled_cmds bitmap in the @mds.
 807 */
 808int cxl_enumerate_cmds(struct cxl_memdev_state *mds)
 809{
 810	struct cxl_mbox_get_supported_logs *gsl;
 811	struct device *dev = mds->cxlds.dev;
 812	struct cxl_mem_command *cmd;
 813	int i, rc;
 814
 815	gsl = cxl_get_gsl(mds);
 816	if (IS_ERR(gsl))
 817		return PTR_ERR(gsl);
 818
 819	rc = -ENOENT;
 820	for (i = 0; i < le16_to_cpu(gsl->entries); i++) {
 821		u32 size = le32_to_cpu(gsl->entry[i].size);
 822		uuid_t uuid = gsl->entry[i].uuid;
 823		u8 *log;
 824
 825		dev_dbg(dev, "Found LOG type %pU of size %d", &uuid, size);
 826
 827		if (!uuid_equal(&uuid, &log_uuid[CEL_UUID]))
 828			continue;
 829
 830		log = kvmalloc(size, GFP_KERNEL);
 831		if (!log) {
 832			rc = -ENOMEM;
 833			goto out;
 834		}
 835
 836		rc = cxl_xfer_log(mds, &uuid, &size, log);
 837		if (rc) {
 838			kvfree(log);
 839			goto out;
 840		}
 841
 842		cxl_walk_cel(mds, size, log);
 843		kvfree(log);
 844
 845		/* In case CEL was bogus, enable some default commands. */
 846		cxl_for_each_cmd(cmd)
 847			if (cmd->flags & CXL_CMD_FLAG_FORCE_ENABLE)
 848				set_bit(cmd->info.id, mds->enabled_cmds);
 849
 850		/* Found the required CEL */
 851		rc = 0;
 852	}
 853out:
 854	kvfree(gsl);
 855	return rc;
 856}
 857EXPORT_SYMBOL_NS_GPL(cxl_enumerate_cmds, "CXL");
 858
 859void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
 860			    enum cxl_event_log_type type,
 861			    enum cxl_event_type event_type,
 862			    const uuid_t *uuid, union cxl_event *evt)
 863{
 864	if (event_type == CXL_CPER_EVENT_MEM_MODULE) {
 
 
 
 
 865		trace_cxl_memory_module(cxlmd, type, &evt->mem_module);
 866		return;
 867	}
 868	if (event_type == CXL_CPER_EVENT_GENERIC) {
 869		trace_cxl_generic_event(cxlmd, type, uuid, &evt->generic);
 870		return;
 871	}
 872
 873	if (trace_cxl_general_media_enabled() || trace_cxl_dram_enabled()) {
 874		u64 dpa, hpa = ULLONG_MAX;
 875		struct cxl_region *cxlr;
 876
 877		/*
 878		 * These trace points are annotated with HPA and region
 879		 * translations. Take topology mutation locks and lookup
 880		 * { HPA, REGION } from { DPA, MEMDEV } in the event record.
 881		 */
 882		guard(rwsem_read)(&cxl_region_rwsem);
 883		guard(rwsem_read)(&cxl_dpa_rwsem);
 884
 885		dpa = le64_to_cpu(evt->media_hdr.phys_addr) & CXL_DPA_MASK;
 886		cxlr = cxl_dpa_to_region(cxlmd, dpa);
 887		if (cxlr)
 888			hpa = cxl_dpa_to_hpa(cxlr, cxlmd, dpa);
 889
 890		if (event_type == CXL_CPER_EVENT_GEN_MEDIA)
 891			trace_cxl_general_media(cxlmd, type, cxlr, hpa,
 892						&evt->gen_media);
 893		else if (event_type == CXL_CPER_EVENT_DRAM)
 894			trace_cxl_dram(cxlmd, type, cxlr, hpa, &evt->dram);
 895	}
 896}
 897EXPORT_SYMBOL_NS_GPL(cxl_event_trace_record, "CXL");
 898
 899static void __cxl_event_trace_record(const struct cxl_memdev *cxlmd,
 900				     enum cxl_event_log_type type,
 901				     struct cxl_event_record_raw *record)
 902{
 903	enum cxl_event_type ev_type = CXL_CPER_EVENT_GENERIC;
 904	const uuid_t *uuid = &record->id;
 905
 906	if (uuid_equal(uuid, &CXL_EVENT_GEN_MEDIA_UUID))
 907		ev_type = CXL_CPER_EVENT_GEN_MEDIA;
 908	else if (uuid_equal(uuid, &CXL_EVENT_DRAM_UUID))
 909		ev_type = CXL_CPER_EVENT_DRAM;
 910	else if (uuid_equal(uuid, &CXL_EVENT_MEM_MODULE_UUID))
 911		ev_type = CXL_CPER_EVENT_MEM_MODULE;
 912
 913	cxl_event_trace_record(cxlmd, type, ev_type, uuid, &record->event);
 914}
 915
 916static int cxl_clear_event_record(struct cxl_memdev_state *mds,
 917				  enum cxl_event_log_type log,
 918				  struct cxl_get_event_payload *get_pl)
 919{
 920	struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
 921	struct cxl_mbox_clear_event_payload *payload;
 922	u16 total = le16_to_cpu(get_pl->record_count);
 923	u8 max_handles = CXL_CLEAR_EVENT_MAX_HANDLES;
 924	size_t pl_size = struct_size(payload, handles, max_handles);
 925	struct cxl_mbox_cmd mbox_cmd;
 926	u16 cnt;
 927	int rc = 0;
 928	int i;
 929
 930	/* Payload size may limit the max handles */
 931	if (pl_size > cxl_mbox->payload_size) {
 932		max_handles = (cxl_mbox->payload_size - sizeof(*payload)) /
 933			      sizeof(__le16);
 934		pl_size = struct_size(payload, handles, max_handles);
 935	}
 936
 937	payload = kvzalloc(pl_size, GFP_KERNEL);
 938	if (!payload)
 939		return -ENOMEM;
 940
 941	*payload = (struct cxl_mbox_clear_event_payload) {
 942		.event_log = log,
 943	};
 944
 945	mbox_cmd = (struct cxl_mbox_cmd) {
 946		.opcode = CXL_MBOX_OP_CLEAR_EVENT_RECORD,
 947		.payload_in = payload,
 948		.size_in = pl_size,
 949	};
 950
 951	/*
 952	 * Clear Event Records uses u8 for the handle cnt while Get Event
 953	 * Record can return up to 0xffff records.
 954	 */
 955	i = 0;
 956	for (cnt = 0; cnt < total; cnt++) {
 957		struct cxl_event_record_raw *raw = &get_pl->records[cnt];
 958		struct cxl_event_generic *gen = &raw->event.generic;
 959
 960		payload->handles[i++] = gen->hdr.handle;
 961		dev_dbg(mds->cxlds.dev, "Event log '%d': Clearing %u\n", log,
 962			le16_to_cpu(payload->handles[i - 1]));
 963
 964		if (i == max_handles) {
 965			payload->nr_recs = i;
 966			rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
 967			if (rc)
 968				goto free_pl;
 969			i = 0;
 970		}
 971	}
 972
 973	/* Clear what is left if any */
 974	if (i) {
 975		payload->nr_recs = i;
 976		mbox_cmd.size_in = struct_size(payload, handles, i);
 977		rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
 978		if (rc)
 979			goto free_pl;
 980	}
 981
 982free_pl:
 983	kvfree(payload);
 984	return rc;
 985}
 986
 987static void cxl_mem_get_records_log(struct cxl_memdev_state *mds,
 988				    enum cxl_event_log_type type)
 989{
 990	struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
 991	struct cxl_memdev *cxlmd = mds->cxlds.cxlmd;
 992	struct device *dev = mds->cxlds.dev;
 993	struct cxl_get_event_payload *payload;
 
 994	u8 log_type = type;
 995	u16 nr_rec;
 996
 997	mutex_lock(&mds->event.log_lock);
 998	payload = mds->event.buf;
 999
 
 
 
 
 
 
 
 
 
1000	do {
1001		int rc, i;
1002		struct cxl_mbox_cmd mbox_cmd = (struct cxl_mbox_cmd) {
1003			.opcode = CXL_MBOX_OP_GET_EVENT_RECORD,
1004			.payload_in = &log_type,
1005			.size_in = sizeof(log_type),
1006			.payload_out = payload,
1007			.size_out = cxl_mbox->payload_size,
1008			.min_out = struct_size(payload, records, 0),
1009		};
1010
1011		rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
1012		if (rc) {
1013			dev_err_ratelimited(dev,
1014				"Event log '%d': Failed to query event records : %d",
1015				type, rc);
1016			break;
1017		}
1018
1019		nr_rec = le16_to_cpu(payload->record_count);
1020		if (!nr_rec)
1021			break;
1022
1023		for (i = 0; i < nr_rec; i++)
1024			__cxl_event_trace_record(cxlmd, type,
1025						 &payload->records[i]);
1026
1027		if (payload->flags & CXL_GET_EVENT_FLAG_OVERFLOW)
1028			trace_cxl_overflow(cxlmd, type, payload);
1029
1030		rc = cxl_clear_event_record(mds, type, payload);
1031		if (rc) {
1032			dev_err_ratelimited(dev,
1033				"Event log '%d': Failed to clear events : %d",
1034				type, rc);
1035			break;
1036		}
1037	} while (nr_rec);
1038
1039	mutex_unlock(&mds->event.log_lock);
1040}
1041
1042/**
1043 * cxl_mem_get_event_records - Get Event Records from the device
1044 * @mds: The driver data for the operation
1045 * @status: Event Status register value identifying which events are available.
1046 *
1047 * Retrieve all event records available on the device, report them as trace
1048 * events, and clear them.
1049 *
1050 * See CXL rev 3.0 @8.2.9.2.2 Get Event Records
1051 * See CXL rev 3.0 @8.2.9.2.3 Clear Event Records
1052 */
1053void cxl_mem_get_event_records(struct cxl_memdev_state *mds, u32 status)
1054{
1055	dev_dbg(mds->cxlds.dev, "Reading event logs: %x\n", status);
1056
1057	if (status & CXLDEV_EVENT_STATUS_FATAL)
1058		cxl_mem_get_records_log(mds, CXL_EVENT_TYPE_FATAL);
1059	if (status & CXLDEV_EVENT_STATUS_FAIL)
1060		cxl_mem_get_records_log(mds, CXL_EVENT_TYPE_FAIL);
1061	if (status & CXLDEV_EVENT_STATUS_WARN)
1062		cxl_mem_get_records_log(mds, CXL_EVENT_TYPE_WARN);
1063	if (status & CXLDEV_EVENT_STATUS_INFO)
1064		cxl_mem_get_records_log(mds, CXL_EVENT_TYPE_INFO);
1065}
1066EXPORT_SYMBOL_NS_GPL(cxl_mem_get_event_records, "CXL");
1067
1068/**
1069 * cxl_mem_get_partition_info - Get partition info
1070 * @mds: The driver data for the operation
1071 *
1072 * Retrieve the current partition info for the device specified.  The active
1073 * values are the current capacity in bytes.  If not 0, the 'next' values are
1074 * the pending values, in bytes, which take affect on next cold reset.
1075 *
1076 * Return: 0 if no error: or the result of the mailbox command.
1077 *
1078 * See CXL @8.2.9.5.2.1 Get Partition Info
1079 */
1080static int cxl_mem_get_partition_info(struct cxl_memdev_state *mds)
1081{
1082	struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
1083	struct cxl_mbox_get_partition_info pi;
1084	struct cxl_mbox_cmd mbox_cmd;
1085	int rc;
1086
1087	mbox_cmd = (struct cxl_mbox_cmd) {
1088		.opcode = CXL_MBOX_OP_GET_PARTITION_INFO,
1089		.size_out = sizeof(pi),
1090		.payload_out = &pi,
1091	};
1092	rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
1093	if (rc)
1094		return rc;
1095
1096	mds->active_volatile_bytes =
1097		le64_to_cpu(pi.active_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
1098	mds->active_persistent_bytes =
1099		le64_to_cpu(pi.active_persistent_cap) * CXL_CAPACITY_MULTIPLIER;
1100	mds->next_volatile_bytes =
1101		le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
1102	mds->next_persistent_bytes =
1103		le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
1104
1105	return 0;
1106}
1107
1108/**
1109 * cxl_dev_state_identify() - Send the IDENTIFY command to the device.
1110 * @mds: The driver data for the operation
1111 *
1112 * Return: 0 if identify was executed successfully or media not ready.
1113 *
1114 * This will dispatch the identify command to the device and on success populate
1115 * structures to be exported to sysfs.
1116 */
1117int cxl_dev_state_identify(struct cxl_memdev_state *mds)
1118{
1119	struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
1120	/* See CXL 2.0 Table 175 Identify Memory Device Output Payload */
1121	struct cxl_mbox_identify id;
1122	struct cxl_mbox_cmd mbox_cmd;
1123	u32 val;
1124	int rc;
1125
1126	if (!mds->cxlds.media_ready)
1127		return 0;
1128
1129	mbox_cmd = (struct cxl_mbox_cmd) {
1130		.opcode = CXL_MBOX_OP_IDENTIFY,
1131		.size_out = sizeof(id),
1132		.payload_out = &id,
1133	};
1134	rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
1135	if (rc < 0)
1136		return rc;
1137
1138	mds->total_bytes =
1139		le64_to_cpu(id.total_capacity) * CXL_CAPACITY_MULTIPLIER;
1140	mds->volatile_only_bytes =
1141		le64_to_cpu(id.volatile_capacity) * CXL_CAPACITY_MULTIPLIER;
1142	mds->persistent_only_bytes =
1143		le64_to_cpu(id.persistent_capacity) * CXL_CAPACITY_MULTIPLIER;
1144	mds->partition_align_bytes =
1145		le64_to_cpu(id.partition_align) * CXL_CAPACITY_MULTIPLIER;
1146
1147	mds->lsa_size = le32_to_cpu(id.lsa_size);
1148	memcpy(mds->firmware_version, id.fw_revision,
1149	       sizeof(id.fw_revision));
1150
1151	if (test_bit(CXL_POISON_ENABLED_LIST, mds->poison.enabled_cmds)) {
1152		val = get_unaligned_le24(id.poison_list_max_mer);
1153		mds->poison.max_errors = min_t(u32, val, CXL_POISON_LIST_MAX);
1154	}
1155
1156	return 0;
1157}
1158EXPORT_SYMBOL_NS_GPL(cxl_dev_state_identify, "CXL");
1159
1160static int __cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd)
1161{
1162	struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
1163	int rc;
1164	u32 sec_out = 0;
1165	struct cxl_get_security_output {
1166		__le32 flags;
1167	} out;
1168	struct cxl_mbox_cmd sec_cmd = {
1169		.opcode = CXL_MBOX_OP_GET_SECURITY_STATE,
1170		.payload_out = &out,
1171		.size_out = sizeof(out),
1172	};
1173	struct cxl_mbox_cmd mbox_cmd = { .opcode = cmd };
 
1174
1175	if (cmd != CXL_MBOX_OP_SANITIZE && cmd != CXL_MBOX_OP_SECURE_ERASE)
1176		return -EINVAL;
1177
1178	rc = cxl_internal_send_cmd(cxl_mbox, &sec_cmd);
1179	if (rc < 0) {
1180		dev_err(cxl_mbox->host, "Failed to get security state : %d", rc);
1181		return rc;
1182	}
1183
1184	/*
1185	 * Prior to using these commands, any security applied to
1186	 * the user data areas of the device shall be DISABLED (or
1187	 * UNLOCKED for secure erase case).
1188	 */
1189	sec_out = le32_to_cpu(out.flags);
1190	if (sec_out & CXL_PMEM_SEC_STATE_USER_PASS_SET)
1191		return -EINVAL;
1192
1193	if (cmd == CXL_MBOX_OP_SECURE_ERASE &&
1194	    sec_out & CXL_PMEM_SEC_STATE_LOCKED)
1195		return -EINVAL;
1196
1197	rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
1198	if (rc < 0) {
1199		dev_err(cxl_mbox->host, "Failed to sanitize device : %d", rc);
1200		return rc;
1201	}
1202
1203	return 0;
1204}
1205
1206
1207/**
1208 * cxl_mem_sanitize() - Send a sanitization command to the device.
1209 * @cxlmd: The device for the operation
1210 * @cmd: The specific sanitization command opcode
1211 *
1212 * Return: 0 if the command was executed successfully, regardless of
1213 * whether or not the actual security operation is done in the background,
1214 * such as for the Sanitize case.
1215 * Error return values can be the result of the mailbox command, -EINVAL
1216 * when security requirements are not met or invalid contexts, or -EBUSY
1217 * if the sanitize operation is already in flight.
1218 *
1219 * See CXL 3.0 @8.2.9.8.5.1 Sanitize and @8.2.9.8.5.2 Secure Erase.
1220 */
1221int cxl_mem_sanitize(struct cxl_memdev *cxlmd, u16 cmd)
1222{
1223	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
1224	struct cxl_port  *endpoint;
1225	int rc;
1226
1227	/* synchronize with cxl_mem_probe() and decoder write operations */
1228	guard(device)(&cxlmd->dev);
1229	endpoint = cxlmd->endpoint;
1230	down_read(&cxl_region_rwsem);
1231	/*
1232	 * Require an endpoint to be safe otherwise the driver can not
1233	 * be sure that the device is unmapped.
1234	 */
1235	if (endpoint && cxl_num_decoders_committed(endpoint) == 0)
1236		rc = __cxl_mem_sanitize(mds, cmd);
1237	else
1238		rc = -EBUSY;
1239	up_read(&cxl_region_rwsem);
 
1240
1241	return rc;
1242}
1243
1244static int add_dpa_res(struct device *dev, struct resource *parent,
1245		       struct resource *res, resource_size_t start,
1246		       resource_size_t size, const char *type)
1247{
1248	int rc;
1249
1250	res->name = type;
1251	res->start = start;
1252	res->end = start + size - 1;
1253	res->flags = IORESOURCE_MEM;
1254	if (resource_size(res) == 0) {
1255		dev_dbg(dev, "DPA(%s): no capacity\n", res->name);
1256		return 0;
1257	}
1258	rc = request_resource(parent, res);
1259	if (rc) {
1260		dev_err(dev, "DPA(%s): failed to track %pr (%d)\n", res->name,
1261			res, rc);
1262		return rc;
1263	}
1264
1265	dev_dbg(dev, "DPA(%s): %pr\n", res->name, res);
1266
1267	return 0;
1268}
1269
1270int cxl_mem_create_range_info(struct cxl_memdev_state *mds)
1271{
1272	struct cxl_dev_state *cxlds = &mds->cxlds;
1273	struct device *dev = cxlds->dev;
1274	int rc;
1275
1276	if (!cxlds->media_ready) {
1277		cxlds->dpa_res = DEFINE_RES_MEM(0, 0);
1278		cxlds->ram_res = DEFINE_RES_MEM(0, 0);
1279		cxlds->pmem_res = DEFINE_RES_MEM(0, 0);
1280		return 0;
1281	}
1282
1283	cxlds->dpa_res = DEFINE_RES_MEM(0, mds->total_bytes);
1284
1285	if (mds->partition_align_bytes == 0) {
1286		rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->ram_res, 0,
1287				 mds->volatile_only_bytes, "ram");
1288		if (rc)
1289			return rc;
1290		return add_dpa_res(dev, &cxlds->dpa_res, &cxlds->pmem_res,
1291				   mds->volatile_only_bytes,
1292				   mds->persistent_only_bytes, "pmem");
1293	}
1294
1295	rc = cxl_mem_get_partition_info(mds);
1296	if (rc) {
1297		dev_err(dev, "Failed to query partition information\n");
1298		return rc;
1299	}
1300
1301	rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->ram_res, 0,
1302			 mds->active_volatile_bytes, "ram");
1303	if (rc)
1304		return rc;
1305	return add_dpa_res(dev, &cxlds->dpa_res, &cxlds->pmem_res,
1306			   mds->active_volatile_bytes,
1307			   mds->active_persistent_bytes, "pmem");
1308}
1309EXPORT_SYMBOL_NS_GPL(cxl_mem_create_range_info, "CXL");
1310
1311int cxl_set_timestamp(struct cxl_memdev_state *mds)
1312{
1313	struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
1314	struct cxl_mbox_cmd mbox_cmd;
1315	struct cxl_mbox_set_timestamp_in pi;
1316	int rc;
1317
1318	pi.timestamp = cpu_to_le64(ktime_get_real_ns());
1319	mbox_cmd = (struct cxl_mbox_cmd) {
1320		.opcode = CXL_MBOX_OP_SET_TIMESTAMP,
1321		.size_in = sizeof(pi),
1322		.payload_in = &pi,
1323	};
1324
1325	rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
1326	/*
1327	 * Command is optional. Devices may have another way of providing
1328	 * a timestamp, or may return all 0s in timestamp fields.
1329	 * Don't report an error if this command isn't supported
1330	 */
1331	if (rc && (mbox_cmd.return_code != CXL_MBOX_CMD_RC_UNSUPPORTED))
1332		return rc;
1333
1334	return 0;
1335}
1336EXPORT_SYMBOL_NS_GPL(cxl_set_timestamp, "CXL");
1337
1338int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
1339		       struct cxl_region *cxlr)
1340{
1341	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
1342	struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
1343	struct cxl_mbox_poison_out *po;
1344	struct cxl_mbox_poison_in pi;
 
1345	int nr_records = 0;
1346	int rc;
1347
1348	rc = mutex_lock_interruptible(&mds->poison.lock);
1349	if (rc)
1350		return rc;
1351
1352	po = mds->poison.list_out;
1353	pi.offset = cpu_to_le64(offset);
1354	pi.length = cpu_to_le64(len / CXL_POISON_LEN_MULT);
1355
1356	do {
1357		struct cxl_mbox_cmd mbox_cmd = (struct cxl_mbox_cmd){
1358			.opcode = CXL_MBOX_OP_GET_POISON,
1359			.size_in = sizeof(pi),
1360			.payload_in = &pi,
1361			.size_out = cxl_mbox->payload_size,
1362			.payload_out = po,
1363			.min_out = struct_size(po, record, 0),
1364		};
1365
1366		rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
 
1367		if (rc)
1368			break;
1369
1370		for (int i = 0; i < le16_to_cpu(po->count); i++)
1371			trace_cxl_poison(cxlmd, cxlr, &po->record[i],
1372					 po->flags, po->overflow_ts,
1373					 CXL_POISON_TRACE_LIST);
1374
1375		/* Protect against an uncleared _FLAG_MORE */
1376		nr_records = nr_records + le16_to_cpu(po->count);
1377		if (nr_records >= mds->poison.max_errors) {
1378			dev_dbg(&cxlmd->dev, "Max Error Records reached: %d\n",
1379				nr_records);
1380			break;
1381		}
1382	} while (po->flags & CXL_POISON_FLAG_MORE);
1383
1384	mutex_unlock(&mds->poison.lock);
1385	return rc;
1386}
1387EXPORT_SYMBOL_NS_GPL(cxl_mem_get_poison, "CXL");
1388
1389static void free_poison_buf(void *buf)
1390{
1391	kvfree(buf);
1392}
1393
1394/* Get Poison List output buffer is protected by mds->poison.lock */
1395static int cxl_poison_alloc_buf(struct cxl_memdev_state *mds)
1396{
1397	struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
1398
1399	mds->poison.list_out = kvmalloc(cxl_mbox->payload_size, GFP_KERNEL);
1400	if (!mds->poison.list_out)
1401		return -ENOMEM;
1402
1403	return devm_add_action_or_reset(mds->cxlds.dev, free_poison_buf,
1404					mds->poison.list_out);
1405}
1406
1407int cxl_poison_state_init(struct cxl_memdev_state *mds)
1408{
1409	int rc;
1410
1411	if (!test_bit(CXL_POISON_ENABLED_LIST, mds->poison.enabled_cmds))
1412		return 0;
1413
1414	rc = cxl_poison_alloc_buf(mds);
1415	if (rc) {
1416		clear_bit(CXL_POISON_ENABLED_LIST, mds->poison.enabled_cmds);
1417		return rc;
1418	}
1419
1420	mutex_init(&mds->poison.lock);
1421	return 0;
1422}
1423EXPORT_SYMBOL_NS_GPL(cxl_poison_state_init, "CXL");
1424
1425int cxl_mailbox_init(struct cxl_mailbox *cxl_mbox, struct device *host)
1426{
1427	if (!cxl_mbox || !host)
1428		return -EINVAL;
1429
1430	cxl_mbox->host = host;
1431	mutex_init(&cxl_mbox->mbox_mutex);
1432	rcuwait_init(&cxl_mbox->mbox_wait);
1433
1434	return 0;
1435}
1436EXPORT_SYMBOL_NS_GPL(cxl_mailbox_init, "CXL");
1437
1438struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev)
1439{
1440	struct cxl_memdev_state *mds;
1441
1442	mds = devm_kzalloc(dev, sizeof(*mds), GFP_KERNEL);
1443	if (!mds) {
1444		dev_err(dev, "No memory available\n");
1445		return ERR_PTR(-ENOMEM);
1446	}
1447
 
1448	mutex_init(&mds->event.log_lock);
1449	mds->cxlds.dev = dev;
1450	mds->cxlds.reg_map.host = dev;
1451	mds->cxlds.reg_map.resource = CXL_RESOURCE_NONE;
1452	mds->cxlds.type = CXL_DEVTYPE_CLASSMEM;
1453	mds->ram_perf.qos_class = CXL_QOS_CLASS_INVALID;
1454	mds->pmem_perf.qos_class = CXL_QOS_CLASS_INVALID;
1455
1456	return mds;
1457}
1458EXPORT_SYMBOL_NS_GPL(cxl_memdev_state_create, "CXL");
1459
1460void __init cxl_mbox_init(void)
1461{
1462	struct dentry *mbox_debugfs;
1463
1464	mbox_debugfs = cxl_debugfs_create_dir("mbox");
1465	debugfs_create_bool("raw_allow_all", 0600, mbox_debugfs,
1466			    &cxl_raw_allow_all);
1467}
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright(c) 2020 Intel Corporation. All rights reserved. */
   3#include <linux/security.h>
   4#include <linux/debugfs.h>
   5#include <linux/ktime.h>
   6#include <linux/mutex.h>
   7#include <asm/unaligned.h>
   8#include <cxlpci.h>
   9#include <cxlmem.h>
  10#include <cxl.h>
  11
  12#include "core.h"
  13#include "trace.h"
  14
  15static bool cxl_raw_allow_all;
  16
  17/**
  18 * DOC: cxl mbox
  19 *
  20 * Core implementation of the CXL 2.0 Type-3 Memory Device Mailbox. The
  21 * implementation is used by the cxl_pci driver to initialize the device
  22 * and implement the cxl_mem.h IOCTL UAPI. It also implements the
  23 * backend of the cxl_pmem_ctl() transport for LIBNVDIMM.
  24 */
  25
  26#define cxl_for_each_cmd(cmd)                                                  \
  27	for ((cmd) = &cxl_mem_commands[0];                                     \
  28	     ((cmd) - cxl_mem_commands) < ARRAY_SIZE(cxl_mem_commands); (cmd)++)
  29
  30#define CXL_CMD(_id, sin, sout, _flags)                                        \
  31	[CXL_MEM_COMMAND_ID_##_id] = {                                         \
  32	.info =	{                                                              \
  33			.id = CXL_MEM_COMMAND_ID_##_id,                        \
  34			.size_in = sin,                                        \
  35			.size_out = sout,                                      \
  36		},                                                             \
  37	.opcode = CXL_MBOX_OP_##_id,                                           \
  38	.flags = _flags,                                                       \
  39	}
  40
  41#define CXL_VARIABLE_PAYLOAD	~0U
  42/*
  43 * This table defines the supported mailbox commands for the driver. This table
  44 * is made up of a UAPI structure. Non-negative values as parameters in the
  45 * table will be validated against the user's input. For example, if size_in is
  46 * 0, and the user passed in 1, it is an error.
  47 */
  48static struct cxl_mem_command cxl_mem_commands[CXL_MEM_COMMAND_ID_MAX] = {
  49	CXL_CMD(IDENTIFY, 0, 0x43, CXL_CMD_FLAG_FORCE_ENABLE),
  50#ifdef CONFIG_CXL_MEM_RAW_COMMANDS
  51	CXL_CMD(RAW, CXL_VARIABLE_PAYLOAD, CXL_VARIABLE_PAYLOAD, 0),
  52#endif
  53	CXL_CMD(GET_SUPPORTED_LOGS, 0, CXL_VARIABLE_PAYLOAD, CXL_CMD_FLAG_FORCE_ENABLE),
  54	CXL_CMD(GET_FW_INFO, 0, 0x50, 0),
  55	CXL_CMD(GET_PARTITION_INFO, 0, 0x20, 0),
  56	CXL_CMD(GET_LSA, 0x8, CXL_VARIABLE_PAYLOAD, 0),
  57	CXL_CMD(GET_HEALTH_INFO, 0, 0x12, 0),
  58	CXL_CMD(GET_LOG, 0x18, CXL_VARIABLE_PAYLOAD, CXL_CMD_FLAG_FORCE_ENABLE),
 
 
 
  59	CXL_CMD(SET_PARTITION_INFO, 0x0a, 0, 0),
  60	CXL_CMD(SET_LSA, CXL_VARIABLE_PAYLOAD, 0, 0),
  61	CXL_CMD(GET_ALERT_CONFIG, 0, 0x10, 0),
  62	CXL_CMD(SET_ALERT_CONFIG, 0xc, 0, 0),
  63	CXL_CMD(GET_SHUTDOWN_STATE, 0, 0x1, 0),
  64	CXL_CMD(SET_SHUTDOWN_STATE, 0x1, 0, 0),
  65	CXL_CMD(GET_SCAN_MEDIA_CAPS, 0x10, 0x4, 0),
  66	CXL_CMD(GET_TIMESTAMP, 0, 0x8, 0),
  67};
  68
  69/*
  70 * Commands that RAW doesn't permit. The rationale for each:
  71 *
  72 * CXL_MBOX_OP_ACTIVATE_FW: Firmware activation requires adjustment /
  73 * coordination of transaction timeout values at the root bridge level.
  74 *
  75 * CXL_MBOX_OP_SET_PARTITION_INFO: The device memory map may change live
  76 * and needs to be coordinated with HDM updates.
  77 *
  78 * CXL_MBOX_OP_SET_LSA: The label storage area may be cached by the
  79 * driver and any writes from userspace invalidates those contents.
  80 *
  81 * CXL_MBOX_OP_SET_SHUTDOWN_STATE: Set shutdown state assumes no writes
  82 * to the device after it is marked clean, userspace can not make that
  83 * assertion.
  84 *
  85 * CXL_MBOX_OP_[GET_]SCAN_MEDIA: The kernel provides a native error list that
  86 * is kept up to date with patrol notifications and error management.
  87 *
  88 * CXL_MBOX_OP_[GET_,INJECT_,CLEAR_]POISON: These commands require kernel
  89 * driver orchestration for safety.
  90 */
  91static u16 cxl_disabled_raw_commands[] = {
  92	CXL_MBOX_OP_ACTIVATE_FW,
  93	CXL_MBOX_OP_SET_PARTITION_INFO,
  94	CXL_MBOX_OP_SET_LSA,
  95	CXL_MBOX_OP_SET_SHUTDOWN_STATE,
  96	CXL_MBOX_OP_SCAN_MEDIA,
  97	CXL_MBOX_OP_GET_SCAN_MEDIA,
  98	CXL_MBOX_OP_GET_POISON,
  99	CXL_MBOX_OP_INJECT_POISON,
 100	CXL_MBOX_OP_CLEAR_POISON,
 101};
 102
 103/*
 104 * Command sets that RAW doesn't permit. All opcodes in this set are
 105 * disabled because they pass plain text security payloads over the
 106 * user/kernel boundary. This functionality is intended to be wrapped
 107 * behind the keys ABI which allows for encrypted payloads in the UAPI
 108 */
 109static u8 security_command_sets[] = {
 110	0x44, /* Sanitize */
 111	0x45, /* Persistent Memory Data-at-rest Security */
 112	0x46, /* Security Passthrough */
 113};
 114
 115static bool cxl_is_security_command(u16 opcode)
 116{
 117	int i;
 118
 119	for (i = 0; i < ARRAY_SIZE(security_command_sets); i++)
 120		if (security_command_sets[i] == (opcode >> 8))
 121			return true;
 122	return false;
 123}
 124
 125static void cxl_set_security_cmd_enabled(struct cxl_security_state *security,
 126					 u16 opcode)
 127{
 128	switch (opcode) {
 129	case CXL_MBOX_OP_SANITIZE:
 130		set_bit(CXL_SEC_ENABLED_SANITIZE, security->enabled_cmds);
 131		break;
 132	case CXL_MBOX_OP_SECURE_ERASE:
 133		set_bit(CXL_SEC_ENABLED_SECURE_ERASE,
 134			security->enabled_cmds);
 135		break;
 136	case CXL_MBOX_OP_GET_SECURITY_STATE:
 137		set_bit(CXL_SEC_ENABLED_GET_SECURITY_STATE,
 138			security->enabled_cmds);
 139		break;
 140	case CXL_MBOX_OP_SET_PASSPHRASE:
 141		set_bit(CXL_SEC_ENABLED_SET_PASSPHRASE,
 142			security->enabled_cmds);
 143		break;
 144	case CXL_MBOX_OP_DISABLE_PASSPHRASE:
 145		set_bit(CXL_SEC_ENABLED_DISABLE_PASSPHRASE,
 146			security->enabled_cmds);
 147		break;
 148	case CXL_MBOX_OP_UNLOCK:
 149		set_bit(CXL_SEC_ENABLED_UNLOCK, security->enabled_cmds);
 150		break;
 151	case CXL_MBOX_OP_FREEZE_SECURITY:
 152		set_bit(CXL_SEC_ENABLED_FREEZE_SECURITY,
 153			security->enabled_cmds);
 154		break;
 155	case CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE:
 156		set_bit(CXL_SEC_ENABLED_PASSPHRASE_SECURE_ERASE,
 157			security->enabled_cmds);
 158		break;
 159	default:
 160		break;
 161	}
 162}
 163
 164static bool cxl_is_poison_command(u16 opcode)
 165{
 166#define CXL_MBOX_OP_POISON_CMDS 0x43
 167
 168	if ((opcode >> 8) == CXL_MBOX_OP_POISON_CMDS)
 169		return true;
 170
 171	return false;
 172}
 173
 174static void cxl_set_poison_cmd_enabled(struct cxl_poison_state *poison,
 175				       u16 opcode)
 176{
 177	switch (opcode) {
 178	case CXL_MBOX_OP_GET_POISON:
 179		set_bit(CXL_POISON_ENABLED_LIST, poison->enabled_cmds);
 180		break;
 181	case CXL_MBOX_OP_INJECT_POISON:
 182		set_bit(CXL_POISON_ENABLED_INJECT, poison->enabled_cmds);
 183		break;
 184	case CXL_MBOX_OP_CLEAR_POISON:
 185		set_bit(CXL_POISON_ENABLED_CLEAR, poison->enabled_cmds);
 186		break;
 187	case CXL_MBOX_OP_GET_SCAN_MEDIA_CAPS:
 188		set_bit(CXL_POISON_ENABLED_SCAN_CAPS, poison->enabled_cmds);
 189		break;
 190	case CXL_MBOX_OP_SCAN_MEDIA:
 191		set_bit(CXL_POISON_ENABLED_SCAN_MEDIA, poison->enabled_cmds);
 192		break;
 193	case CXL_MBOX_OP_GET_SCAN_MEDIA:
 194		set_bit(CXL_POISON_ENABLED_SCAN_RESULTS, poison->enabled_cmds);
 195		break;
 196	default:
 197		break;
 198	}
 199}
 200
 201static struct cxl_mem_command *cxl_mem_find_command(u16 opcode)
 202{
 203	struct cxl_mem_command *c;
 204
 205	cxl_for_each_cmd(c)
 206		if (c->opcode == opcode)
 207			return c;
 208
 209	return NULL;
 210}
 211
 212static const char *cxl_mem_opcode_to_name(u16 opcode)
 213{
 214	struct cxl_mem_command *c;
 215
 216	c = cxl_mem_find_command(opcode);
 217	if (!c)
 218		return NULL;
 219
 220	return cxl_command_names[c->info.id].name;
 221}
 222
 223/**
 224 * cxl_internal_send_cmd() - Kernel internal interface to send a mailbox command
 225 * @mds: The driver data for the operation
 226 * @mbox_cmd: initialized command to execute
 227 *
 228 * Context: Any context.
 229 * Return:
 230 *  * %>=0	- Number of bytes returned in @out.
 231 *  * %-E2BIG	- Payload is too large for hardware.
 232 *  * %-EBUSY	- Couldn't acquire exclusive mailbox access.
 233 *  * %-EFAULT	- Hardware error occurred.
 234 *  * %-ENXIO	- Command completed, but device reported an error.
 235 *  * %-EIO	- Unexpected output size.
 236 *
 237 * Mailbox commands may execute successfully yet the device itself reported an
 238 * error. While this distinction can be useful for commands from userspace, the
 239 * kernel will only be able to use results when both are successful.
 240 */
 241int cxl_internal_send_cmd(struct cxl_memdev_state *mds,
 242			  struct cxl_mbox_cmd *mbox_cmd)
 243{
 244	size_t out_size, min_out;
 245	int rc;
 246
 247	if (mbox_cmd->size_in > mds->payload_size ||
 248	    mbox_cmd->size_out > mds->payload_size)
 249		return -E2BIG;
 250
 251	out_size = mbox_cmd->size_out;
 252	min_out = mbox_cmd->min_out;
 253	rc = mds->mbox_send(mds, mbox_cmd);
 254	/*
 255	 * EIO is reserved for a payload size mismatch and mbox_send()
 256	 * may not return this error.
 257	 */
 258	if (WARN_ONCE(rc == -EIO, "Bad return code: -EIO"))
 259		return -ENXIO;
 260	if (rc)
 261		return rc;
 262
 263	if (mbox_cmd->return_code != CXL_MBOX_CMD_RC_SUCCESS &&
 264	    mbox_cmd->return_code != CXL_MBOX_CMD_RC_BACKGROUND)
 265		return cxl_mbox_cmd_rc2errno(mbox_cmd);
 266
 267	if (!out_size)
 268		return 0;
 269
 270	/*
 271	 * Variable sized output needs to at least satisfy the caller's
 272	 * minimum if not the fully requested size.
 273	 */
 274	if (min_out == 0)
 275		min_out = out_size;
 276
 277	if (mbox_cmd->size_out < min_out)
 278		return -EIO;
 279	return 0;
 280}
 281EXPORT_SYMBOL_NS_GPL(cxl_internal_send_cmd, CXL);
 282
 283static bool cxl_mem_raw_command_allowed(u16 opcode)
 284{
 285	int i;
 286
 287	if (!IS_ENABLED(CONFIG_CXL_MEM_RAW_COMMANDS))
 288		return false;
 289
 290	if (security_locked_down(LOCKDOWN_PCI_ACCESS))
 291		return false;
 292
 293	if (cxl_raw_allow_all)
 294		return true;
 295
 296	if (cxl_is_security_command(opcode))
 297		return false;
 298
 299	for (i = 0; i < ARRAY_SIZE(cxl_disabled_raw_commands); i++)
 300		if (cxl_disabled_raw_commands[i] == opcode)
 301			return false;
 302
 303	return true;
 304}
 305
 306/**
 307 * cxl_payload_from_user_allowed() - Check contents of in_payload.
 308 * @opcode: The mailbox command opcode.
 309 * @payload_in: Pointer to the input payload passed in from user space.
 310 *
 311 * Return:
 312 *  * true	- payload_in passes check for @opcode.
 313 *  * false	- payload_in contains invalid or unsupported values.
 314 *
 315 * The driver may inspect payload contents before sending a mailbox
 316 * command from user space to the device. The intent is to reject
 317 * commands with input payloads that are known to be unsafe. This
 318 * check is not intended to replace the users careful selection of
 319 * mailbox command parameters and makes no guarantee that the user
 320 * command will succeed, nor that it is appropriate.
 321 *
 322 * The specific checks are determined by the opcode.
 323 */
 324static bool cxl_payload_from_user_allowed(u16 opcode, void *payload_in)
 325{
 326	switch (opcode) {
 327	case CXL_MBOX_OP_SET_PARTITION_INFO: {
 328		struct cxl_mbox_set_partition_info *pi = payload_in;
 329
 330		if (pi->flags & CXL_SET_PARTITION_IMMEDIATE_FLAG)
 331			return false;
 332		break;
 333	}
 
 
 
 
 
 
 
 
 
 334	default:
 335		break;
 336	}
 337	return true;
 338}
 339
 340static int cxl_mbox_cmd_ctor(struct cxl_mbox_cmd *mbox,
 341			     struct cxl_memdev_state *mds, u16 opcode,
 342			     size_t in_size, size_t out_size, u64 in_payload)
 343{
 
 344	*mbox = (struct cxl_mbox_cmd) {
 345		.opcode = opcode,
 346		.size_in = in_size,
 347	};
 348
 349	if (in_size) {
 350		mbox->payload_in = vmemdup_user(u64_to_user_ptr(in_payload),
 351						in_size);
 352		if (IS_ERR(mbox->payload_in))
 353			return PTR_ERR(mbox->payload_in);
 354
 355		if (!cxl_payload_from_user_allowed(opcode, mbox->payload_in)) {
 356			dev_dbg(mds->cxlds.dev, "%s: input payload not allowed\n",
 357				cxl_mem_opcode_to_name(opcode));
 358			kvfree(mbox->payload_in);
 359			return -EBUSY;
 360		}
 361	}
 362
 363	/* Prepare to handle a full payload for variable sized output */
 364	if (out_size == CXL_VARIABLE_PAYLOAD)
 365		mbox->size_out = mds->payload_size;
 366	else
 367		mbox->size_out = out_size;
 368
 369	if (mbox->size_out) {
 370		mbox->payload_out = kvzalloc(mbox->size_out, GFP_KERNEL);
 371		if (!mbox->payload_out) {
 372			kvfree(mbox->payload_in);
 373			return -ENOMEM;
 374		}
 375	}
 376	return 0;
 377}
 378
 379static void cxl_mbox_cmd_dtor(struct cxl_mbox_cmd *mbox)
 380{
 381	kvfree(mbox->payload_in);
 382	kvfree(mbox->payload_out);
 383}
 384
 385static int cxl_to_mem_cmd_raw(struct cxl_mem_command *mem_cmd,
 386			      const struct cxl_send_command *send_cmd,
 387			      struct cxl_memdev_state *mds)
 388{
 
 
 389	if (send_cmd->raw.rsvd)
 390		return -EINVAL;
 391
 392	/*
 393	 * Unlike supported commands, the output size of RAW commands
 394	 * gets passed along without further checking, so it must be
 395	 * validated here.
 396	 */
 397	if (send_cmd->out.size > mds->payload_size)
 398		return -EINVAL;
 399
 400	if (!cxl_mem_raw_command_allowed(send_cmd->raw.opcode))
 401		return -EPERM;
 402
 403	dev_WARN_ONCE(mds->cxlds.dev, true, "raw command path used\n");
 404
 405	*mem_cmd = (struct cxl_mem_command) {
 406		.info = {
 407			.id = CXL_MEM_COMMAND_ID_RAW,
 408			.size_in = send_cmd->in.size,
 409			.size_out = send_cmd->out.size,
 410		},
 411		.opcode = send_cmd->raw.opcode
 412	};
 413
 414	return 0;
 415}
 416
 417static int cxl_to_mem_cmd(struct cxl_mem_command *mem_cmd,
 418			  const struct cxl_send_command *send_cmd,
 419			  struct cxl_memdev_state *mds)
 420{
 421	struct cxl_mem_command *c = &cxl_mem_commands[send_cmd->id];
 422	const struct cxl_command_info *info = &c->info;
 423
 424	if (send_cmd->flags & ~CXL_MEM_COMMAND_FLAG_MASK)
 425		return -EINVAL;
 426
 427	if (send_cmd->rsvd)
 428		return -EINVAL;
 429
 430	if (send_cmd->in.rsvd || send_cmd->out.rsvd)
 431		return -EINVAL;
 432
 433	/* Check that the command is enabled for hardware */
 434	if (!test_bit(info->id, mds->enabled_cmds))
 435		return -ENOTTY;
 436
 437	/* Check that the command is not claimed for exclusive kernel use */
 438	if (test_bit(info->id, mds->exclusive_cmds))
 439		return -EBUSY;
 440
 441	/* Check the input buffer is the expected size */
 442	if ((info->size_in != CXL_VARIABLE_PAYLOAD) &&
 443	    (info->size_in != send_cmd->in.size))
 444		return -ENOMEM;
 445
 446	/* Check the output buffer is at least large enough */
 447	if ((info->size_out != CXL_VARIABLE_PAYLOAD) &&
 448	    (send_cmd->out.size < info->size_out))
 449		return -ENOMEM;
 450
 451	*mem_cmd = (struct cxl_mem_command) {
 452		.info = {
 453			.id = info->id,
 454			.flags = info->flags,
 455			.size_in = send_cmd->in.size,
 456			.size_out = send_cmd->out.size,
 457		},
 458		.opcode = c->opcode
 459	};
 460
 461	return 0;
 462}
 463
 464/**
 465 * cxl_validate_cmd_from_user() - Check fields for CXL_MEM_SEND_COMMAND.
 466 * @mbox_cmd: Sanitized and populated &struct cxl_mbox_cmd.
 467 * @mds: The driver data for the operation
 468 * @send_cmd: &struct cxl_send_command copied in from userspace.
 469 *
 470 * Return:
 471 *  * %0	- @out_cmd is ready to send.
 472 *  * %-ENOTTY	- Invalid command specified.
 473 *  * %-EINVAL	- Reserved fields or invalid values were used.
 474 *  * %-ENOMEM	- Input or output buffer wasn't sized properly.
 475 *  * %-EPERM	- Attempted to use a protected command.
 476 *  * %-EBUSY	- Kernel has claimed exclusive access to this opcode
 477 *
 478 * The result of this command is a fully validated command in @mbox_cmd that is
 479 * safe to send to the hardware.
 480 */
 481static int cxl_validate_cmd_from_user(struct cxl_mbox_cmd *mbox_cmd,
 482				      struct cxl_memdev_state *mds,
 483				      const struct cxl_send_command *send_cmd)
 484{
 
 485	struct cxl_mem_command mem_cmd;
 486	int rc;
 487
 488	if (send_cmd->id == 0 || send_cmd->id >= CXL_MEM_COMMAND_ID_MAX)
 489		return -ENOTTY;
 490
 491	/*
 492	 * The user can never specify an input payload larger than what hardware
 493	 * supports, but output can be arbitrarily large (simply write out as
 494	 * much data as the hardware provides).
 495	 */
 496	if (send_cmd->in.size > mds->payload_size)
 497		return -EINVAL;
 498
 499	/* Sanitize and construct a cxl_mem_command */
 500	if (send_cmd->id == CXL_MEM_COMMAND_ID_RAW)
 501		rc = cxl_to_mem_cmd_raw(&mem_cmd, send_cmd, mds);
 502	else
 503		rc = cxl_to_mem_cmd(&mem_cmd, send_cmd, mds);
 504
 505	if (rc)
 506		return rc;
 507
 508	/* Sanitize and construct a cxl_mbox_cmd */
 509	return cxl_mbox_cmd_ctor(mbox_cmd, mds, mem_cmd.opcode,
 510				 mem_cmd.info.size_in, mem_cmd.info.size_out,
 511				 send_cmd->in.payload);
 512}
 513
 514int cxl_query_cmd(struct cxl_memdev *cxlmd,
 515		  struct cxl_mem_query_commands __user *q)
 516{
 517	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
 518	struct device *dev = &cxlmd->dev;
 519	struct cxl_mem_command *cmd;
 520	u32 n_commands;
 521	int j = 0;
 522
 523	dev_dbg(dev, "Query IOCTL\n");
 524
 525	if (get_user(n_commands, &q->n_commands))
 526		return -EFAULT;
 527
 528	/* returns the total number if 0 elements are requested. */
 529	if (n_commands == 0)
 530		return put_user(ARRAY_SIZE(cxl_mem_commands), &q->n_commands);
 531
 532	/*
 533	 * otherwise, return max(n_commands, total commands) cxl_command_info
 534	 * structures.
 535	 */
 536	cxl_for_each_cmd(cmd) {
 537		struct cxl_command_info info = cmd->info;
 538
 539		if (test_bit(info.id, mds->enabled_cmds))
 540			info.flags |= CXL_MEM_COMMAND_FLAG_ENABLED;
 541		if (test_bit(info.id, mds->exclusive_cmds))
 542			info.flags |= CXL_MEM_COMMAND_FLAG_EXCLUSIVE;
 543
 544		if (copy_to_user(&q->commands[j++], &info, sizeof(info)))
 545			return -EFAULT;
 546
 547		if (j == n_commands)
 548			break;
 549	}
 550
 551	return 0;
 552}
 553
 554/**
 555 * handle_mailbox_cmd_from_user() - Dispatch a mailbox command for userspace.
 556 * @mds: The driver data for the operation
 557 * @mbox_cmd: The validated mailbox command.
 558 * @out_payload: Pointer to userspace's output payload.
 559 * @size_out: (Input) Max payload size to copy out.
 560 *            (Output) Payload size hardware generated.
 561 * @retval: Hardware generated return code from the operation.
 562 *
 563 * Return:
 564 *  * %0	- Mailbox transaction succeeded. This implies the mailbox
 565 *		  protocol completed successfully not that the operation itself
 566 *		  was successful.
 567 *  * %-ENOMEM  - Couldn't allocate a bounce buffer.
 568 *  * %-EFAULT	- Something happened with copy_to/from_user.
 569 *  * %-EINTR	- Mailbox acquisition interrupted.
 570 *  * %-EXXX	- Transaction level failures.
 571 *
 572 * Dispatches a mailbox command on behalf of a userspace request.
 573 * The output payload is copied to userspace.
 574 *
 575 * See cxl_send_cmd().
 576 */
 577static int handle_mailbox_cmd_from_user(struct cxl_memdev_state *mds,
 578					struct cxl_mbox_cmd *mbox_cmd,
 579					u64 out_payload, s32 *size_out,
 580					u32 *retval)
 581{
 
 582	struct device *dev = mds->cxlds.dev;
 583	int rc;
 584
 585	dev_dbg(dev,
 586		"Submitting %s command for user\n"
 587		"\topcode: %x\n"
 588		"\tsize: %zx\n",
 589		cxl_mem_opcode_to_name(mbox_cmd->opcode),
 590		mbox_cmd->opcode, mbox_cmd->size_in);
 591
 592	rc = mds->mbox_send(mds, mbox_cmd);
 593	if (rc)
 594		goto out;
 595
 596	/*
 597	 * @size_out contains the max size that's allowed to be written back out
 598	 * to userspace. While the payload may have written more output than
 599	 * this it will have to be ignored.
 600	 */
 601	if (mbox_cmd->size_out) {
 602		dev_WARN_ONCE(dev, mbox_cmd->size_out > *size_out,
 603			      "Invalid return size\n");
 604		if (copy_to_user(u64_to_user_ptr(out_payload),
 605				 mbox_cmd->payload_out, mbox_cmd->size_out)) {
 606			rc = -EFAULT;
 607			goto out;
 608		}
 609	}
 610
 611	*size_out = mbox_cmd->size_out;
 612	*retval = mbox_cmd->return_code;
 613
 614out:
 615	cxl_mbox_cmd_dtor(mbox_cmd);
 616	return rc;
 617}
 618
 619int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s)
 620{
 621	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
 622	struct device *dev = &cxlmd->dev;
 623	struct cxl_send_command send;
 624	struct cxl_mbox_cmd mbox_cmd;
 625	int rc;
 626
 627	dev_dbg(dev, "Send IOCTL\n");
 628
 629	if (copy_from_user(&send, s, sizeof(send)))
 630		return -EFAULT;
 631
 632	rc = cxl_validate_cmd_from_user(&mbox_cmd, mds, &send);
 633	if (rc)
 634		return rc;
 635
 636	rc = handle_mailbox_cmd_from_user(mds, &mbox_cmd, send.out.payload,
 637					  &send.out.size, &send.retval);
 638	if (rc)
 639		return rc;
 640
 641	if (copy_to_user(s, &send, sizeof(send)))
 642		return -EFAULT;
 643
 644	return 0;
 645}
 646
 647static int cxl_xfer_log(struct cxl_memdev_state *mds, uuid_t *uuid,
 648			u32 *size, u8 *out)
 649{
 
 650	u32 remaining = *size;
 651	u32 offset = 0;
 652
 653	while (remaining) {
 654		u32 xfer_size = min_t(u32, remaining, mds->payload_size);
 655		struct cxl_mbox_cmd mbox_cmd;
 656		struct cxl_mbox_get_log log;
 657		int rc;
 658
 659		log = (struct cxl_mbox_get_log) {
 660			.uuid = *uuid,
 661			.offset = cpu_to_le32(offset),
 662			.length = cpu_to_le32(xfer_size),
 663		};
 664
 665		mbox_cmd = (struct cxl_mbox_cmd) {
 666			.opcode = CXL_MBOX_OP_GET_LOG,
 667			.size_in = sizeof(log),
 668			.payload_in = &log,
 669			.size_out = xfer_size,
 670			.payload_out = out,
 671		};
 672
 673		rc = cxl_internal_send_cmd(mds, &mbox_cmd);
 674
 675		/*
 676		 * The output payload length that indicates the number
 677		 * of valid bytes can be smaller than the Log buffer
 678		 * size.
 679		 */
 680		if (rc == -EIO && mbox_cmd.size_out < xfer_size) {
 681			offset += mbox_cmd.size_out;
 682			break;
 683		}
 684
 685		if (rc < 0)
 686			return rc;
 687
 688		out += xfer_size;
 689		remaining -= xfer_size;
 690		offset += xfer_size;
 691	}
 692
 693	*size = offset;
 694
 695	return 0;
 696}
 697
 698/**
 699 * cxl_walk_cel() - Walk through the Command Effects Log.
 700 * @mds: The driver data for the operation
 701 * @size: Length of the Command Effects Log.
 702 * @cel: CEL
 703 *
 704 * Iterate over each entry in the CEL and determine if the driver supports the
 705 * command. If so, the command is enabled for the device and can be used later.
 706 */
 707static void cxl_walk_cel(struct cxl_memdev_state *mds, size_t size, u8 *cel)
 708{
 709	struct cxl_cel_entry *cel_entry;
 710	const int cel_entries = size / sizeof(*cel_entry);
 711	struct device *dev = mds->cxlds.dev;
 712	int i;
 713
 714	cel_entry = (struct cxl_cel_entry *) cel;
 715
 716	for (i = 0; i < cel_entries; i++) {
 717		u16 opcode = le16_to_cpu(cel_entry[i].opcode);
 718		struct cxl_mem_command *cmd = cxl_mem_find_command(opcode);
 719		int enabled = 0;
 720
 721		if (cmd) {
 722			set_bit(cmd->info.id, mds->enabled_cmds);
 723			enabled++;
 724		}
 725
 726		if (cxl_is_poison_command(opcode)) {
 727			cxl_set_poison_cmd_enabled(&mds->poison, opcode);
 728			enabled++;
 729		}
 730
 731		if (cxl_is_security_command(opcode)) {
 732			cxl_set_security_cmd_enabled(&mds->security, opcode);
 733			enabled++;
 734		}
 735
 736		dev_dbg(dev, "Opcode 0x%04x %s\n", opcode,
 737			enabled ? "enabled" : "unsupported by driver");
 738	}
 739}
 740
 741static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_memdev_state *mds)
 742{
 
 743	struct cxl_mbox_get_supported_logs *ret;
 744	struct cxl_mbox_cmd mbox_cmd;
 745	int rc;
 746
 747	ret = kvmalloc(mds->payload_size, GFP_KERNEL);
 748	if (!ret)
 749		return ERR_PTR(-ENOMEM);
 750
 751	mbox_cmd = (struct cxl_mbox_cmd) {
 752		.opcode = CXL_MBOX_OP_GET_SUPPORTED_LOGS,
 753		.size_out = mds->payload_size,
 754		.payload_out = ret,
 755		/* At least the record number field must be valid */
 756		.min_out = 2,
 757	};
 758	rc = cxl_internal_send_cmd(mds, &mbox_cmd);
 759	if (rc < 0) {
 760		kvfree(ret);
 761		return ERR_PTR(rc);
 762	}
 763
 764
 765	return ret;
 766}
 767
 768enum {
 769	CEL_UUID,
 770	VENDOR_DEBUG_UUID,
 771};
 772
 773/* See CXL 2.0 Table 170. Get Log Input Payload */
 774static const uuid_t log_uuid[] = {
 775	[CEL_UUID] = DEFINE_CXL_CEL_UUID,
 776	[VENDOR_DEBUG_UUID] = DEFINE_CXL_VENDOR_DEBUG_UUID,
 777};
 778
 779/**
 780 * cxl_enumerate_cmds() - Enumerate commands for a device.
 781 * @mds: The driver data for the operation
 782 *
 783 * Returns 0 if enumerate completed successfully.
 784 *
 785 * CXL devices have optional support for certain commands. This function will
 786 * determine the set of supported commands for the hardware and update the
 787 * enabled_cmds bitmap in the @mds.
 788 */
 789int cxl_enumerate_cmds(struct cxl_memdev_state *mds)
 790{
 791	struct cxl_mbox_get_supported_logs *gsl;
 792	struct device *dev = mds->cxlds.dev;
 793	struct cxl_mem_command *cmd;
 794	int i, rc;
 795
 796	gsl = cxl_get_gsl(mds);
 797	if (IS_ERR(gsl))
 798		return PTR_ERR(gsl);
 799
 800	rc = -ENOENT;
 801	for (i = 0; i < le16_to_cpu(gsl->entries); i++) {
 802		u32 size = le32_to_cpu(gsl->entry[i].size);
 803		uuid_t uuid = gsl->entry[i].uuid;
 804		u8 *log;
 805
 806		dev_dbg(dev, "Found LOG type %pU of size %d", &uuid, size);
 807
 808		if (!uuid_equal(&uuid, &log_uuid[CEL_UUID]))
 809			continue;
 810
 811		log = kvmalloc(size, GFP_KERNEL);
 812		if (!log) {
 813			rc = -ENOMEM;
 814			goto out;
 815		}
 816
 817		rc = cxl_xfer_log(mds, &uuid, &size, log);
 818		if (rc) {
 819			kvfree(log);
 820			goto out;
 821		}
 822
 823		cxl_walk_cel(mds, size, log);
 824		kvfree(log);
 825
 826		/* In case CEL was bogus, enable some default commands. */
 827		cxl_for_each_cmd(cmd)
 828			if (cmd->flags & CXL_CMD_FLAG_FORCE_ENABLE)
 829				set_bit(cmd->info.id, mds->enabled_cmds);
 830
 831		/* Found the required CEL */
 832		rc = 0;
 833	}
 834out:
 835	kvfree(gsl);
 836	return rc;
 837}
 838EXPORT_SYMBOL_NS_GPL(cxl_enumerate_cmds, CXL);
 839
 840void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
 841			    enum cxl_event_log_type type,
 842			    enum cxl_event_type event_type,
 843			    const uuid_t *uuid, union cxl_event *evt)
 844{
 845	if (event_type == CXL_CPER_EVENT_GEN_MEDIA)
 846		trace_cxl_general_media(cxlmd, type, &evt->gen_media);
 847	else if (event_type == CXL_CPER_EVENT_DRAM)
 848		trace_cxl_dram(cxlmd, type, &evt->dram);
 849	else if (event_type == CXL_CPER_EVENT_MEM_MODULE)
 850		trace_cxl_memory_module(cxlmd, type, &evt->mem_module);
 851	else
 
 
 852		trace_cxl_generic_event(cxlmd, type, uuid, &evt->generic);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 853}
 854EXPORT_SYMBOL_NS_GPL(cxl_event_trace_record, CXL);
 855
 856static void __cxl_event_trace_record(const struct cxl_memdev *cxlmd,
 857				     enum cxl_event_log_type type,
 858				     struct cxl_event_record_raw *record)
 859{
 860	enum cxl_event_type ev_type = CXL_CPER_EVENT_GENERIC;
 861	const uuid_t *uuid = &record->id;
 862
 863	if (uuid_equal(uuid, &CXL_EVENT_GEN_MEDIA_UUID))
 864		ev_type = CXL_CPER_EVENT_GEN_MEDIA;
 865	else if (uuid_equal(uuid, &CXL_EVENT_DRAM_UUID))
 866		ev_type = CXL_CPER_EVENT_DRAM;
 867	else if (uuid_equal(uuid, &CXL_EVENT_MEM_MODULE_UUID))
 868		ev_type = CXL_CPER_EVENT_MEM_MODULE;
 869
 870	cxl_event_trace_record(cxlmd, type, ev_type, uuid, &record->event);
 871}
 872
 873static int cxl_clear_event_record(struct cxl_memdev_state *mds,
 874				  enum cxl_event_log_type log,
 875				  struct cxl_get_event_payload *get_pl)
 876{
 
 877	struct cxl_mbox_clear_event_payload *payload;
 878	u16 total = le16_to_cpu(get_pl->record_count);
 879	u8 max_handles = CXL_CLEAR_EVENT_MAX_HANDLES;
 880	size_t pl_size = struct_size(payload, handles, max_handles);
 881	struct cxl_mbox_cmd mbox_cmd;
 882	u16 cnt;
 883	int rc = 0;
 884	int i;
 885
 886	/* Payload size may limit the max handles */
 887	if (pl_size > mds->payload_size) {
 888		max_handles = (mds->payload_size - sizeof(*payload)) /
 889			      sizeof(__le16);
 890		pl_size = struct_size(payload, handles, max_handles);
 891	}
 892
 893	payload = kvzalloc(pl_size, GFP_KERNEL);
 894	if (!payload)
 895		return -ENOMEM;
 896
 897	*payload = (struct cxl_mbox_clear_event_payload) {
 898		.event_log = log,
 899	};
 900
 901	mbox_cmd = (struct cxl_mbox_cmd) {
 902		.opcode = CXL_MBOX_OP_CLEAR_EVENT_RECORD,
 903		.payload_in = payload,
 904		.size_in = pl_size,
 905	};
 906
 907	/*
 908	 * Clear Event Records uses u8 for the handle cnt while Get Event
 909	 * Record can return up to 0xffff records.
 910	 */
 911	i = 0;
 912	for (cnt = 0; cnt < total; cnt++) {
 913		struct cxl_event_record_raw *raw = &get_pl->records[cnt];
 914		struct cxl_event_generic *gen = &raw->event.generic;
 915
 916		payload->handles[i++] = gen->hdr.handle;
 917		dev_dbg(mds->cxlds.dev, "Event log '%d': Clearing %u\n", log,
 918			le16_to_cpu(payload->handles[i]));
 919
 920		if (i == max_handles) {
 921			payload->nr_recs = i;
 922			rc = cxl_internal_send_cmd(mds, &mbox_cmd);
 923			if (rc)
 924				goto free_pl;
 925			i = 0;
 926		}
 927	}
 928
 929	/* Clear what is left if any */
 930	if (i) {
 931		payload->nr_recs = i;
 932		mbox_cmd.size_in = struct_size(payload, handles, i);
 933		rc = cxl_internal_send_cmd(mds, &mbox_cmd);
 934		if (rc)
 935			goto free_pl;
 936	}
 937
 938free_pl:
 939	kvfree(payload);
 940	return rc;
 941}
 942
 943static void cxl_mem_get_records_log(struct cxl_memdev_state *mds,
 944				    enum cxl_event_log_type type)
 945{
 
 946	struct cxl_memdev *cxlmd = mds->cxlds.cxlmd;
 947	struct device *dev = mds->cxlds.dev;
 948	struct cxl_get_event_payload *payload;
 949	struct cxl_mbox_cmd mbox_cmd;
 950	u8 log_type = type;
 951	u16 nr_rec;
 952
 953	mutex_lock(&mds->event.log_lock);
 954	payload = mds->event.buf;
 955
 956	mbox_cmd = (struct cxl_mbox_cmd) {
 957		.opcode = CXL_MBOX_OP_GET_EVENT_RECORD,
 958		.payload_in = &log_type,
 959		.size_in = sizeof(log_type),
 960		.payload_out = payload,
 961		.size_out = mds->payload_size,
 962		.min_out = struct_size(payload, records, 0),
 963	};
 964
 965	do {
 966		int rc, i;
 
 
 
 
 
 
 
 
 967
 968		rc = cxl_internal_send_cmd(mds, &mbox_cmd);
 969		if (rc) {
 970			dev_err_ratelimited(dev,
 971				"Event log '%d': Failed to query event records : %d",
 972				type, rc);
 973			break;
 974		}
 975
 976		nr_rec = le16_to_cpu(payload->record_count);
 977		if (!nr_rec)
 978			break;
 979
 980		for (i = 0; i < nr_rec; i++)
 981			__cxl_event_trace_record(cxlmd, type,
 982						 &payload->records[i]);
 983
 984		if (payload->flags & CXL_GET_EVENT_FLAG_OVERFLOW)
 985			trace_cxl_overflow(cxlmd, type, payload);
 986
 987		rc = cxl_clear_event_record(mds, type, payload);
 988		if (rc) {
 989			dev_err_ratelimited(dev,
 990				"Event log '%d': Failed to clear events : %d",
 991				type, rc);
 992			break;
 993		}
 994	} while (nr_rec);
 995
 996	mutex_unlock(&mds->event.log_lock);
 997}
 998
 999/**
1000 * cxl_mem_get_event_records - Get Event Records from the device
1001 * @mds: The driver data for the operation
1002 * @status: Event Status register value identifying which events are available.
1003 *
1004 * Retrieve all event records available on the device, report them as trace
1005 * events, and clear them.
1006 *
1007 * See CXL rev 3.0 @8.2.9.2.2 Get Event Records
1008 * See CXL rev 3.0 @8.2.9.2.3 Clear Event Records
1009 */
1010void cxl_mem_get_event_records(struct cxl_memdev_state *mds, u32 status)
1011{
1012	dev_dbg(mds->cxlds.dev, "Reading event logs: %x\n", status);
1013
1014	if (status & CXLDEV_EVENT_STATUS_FATAL)
1015		cxl_mem_get_records_log(mds, CXL_EVENT_TYPE_FATAL);
1016	if (status & CXLDEV_EVENT_STATUS_FAIL)
1017		cxl_mem_get_records_log(mds, CXL_EVENT_TYPE_FAIL);
1018	if (status & CXLDEV_EVENT_STATUS_WARN)
1019		cxl_mem_get_records_log(mds, CXL_EVENT_TYPE_WARN);
1020	if (status & CXLDEV_EVENT_STATUS_INFO)
1021		cxl_mem_get_records_log(mds, CXL_EVENT_TYPE_INFO);
1022}
1023EXPORT_SYMBOL_NS_GPL(cxl_mem_get_event_records, CXL);
1024
1025/**
1026 * cxl_mem_get_partition_info - Get partition info
1027 * @mds: The driver data for the operation
1028 *
1029 * Retrieve the current partition info for the device specified.  The active
1030 * values are the current capacity in bytes.  If not 0, the 'next' values are
1031 * the pending values, in bytes, which take affect on next cold reset.
1032 *
1033 * Return: 0 if no error: or the result of the mailbox command.
1034 *
1035 * See CXL @8.2.9.5.2.1 Get Partition Info
1036 */
1037static int cxl_mem_get_partition_info(struct cxl_memdev_state *mds)
1038{
 
1039	struct cxl_mbox_get_partition_info pi;
1040	struct cxl_mbox_cmd mbox_cmd;
1041	int rc;
1042
1043	mbox_cmd = (struct cxl_mbox_cmd) {
1044		.opcode = CXL_MBOX_OP_GET_PARTITION_INFO,
1045		.size_out = sizeof(pi),
1046		.payload_out = &pi,
1047	};
1048	rc = cxl_internal_send_cmd(mds, &mbox_cmd);
1049	if (rc)
1050		return rc;
1051
1052	mds->active_volatile_bytes =
1053		le64_to_cpu(pi.active_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
1054	mds->active_persistent_bytes =
1055		le64_to_cpu(pi.active_persistent_cap) * CXL_CAPACITY_MULTIPLIER;
1056	mds->next_volatile_bytes =
1057		le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
1058	mds->next_persistent_bytes =
1059		le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
1060
1061	return 0;
1062}
1063
1064/**
1065 * cxl_dev_state_identify() - Send the IDENTIFY command to the device.
1066 * @mds: The driver data for the operation
1067 *
1068 * Return: 0 if identify was executed successfully or media not ready.
1069 *
1070 * This will dispatch the identify command to the device and on success populate
1071 * structures to be exported to sysfs.
1072 */
1073int cxl_dev_state_identify(struct cxl_memdev_state *mds)
1074{
 
1075	/* See CXL 2.0 Table 175 Identify Memory Device Output Payload */
1076	struct cxl_mbox_identify id;
1077	struct cxl_mbox_cmd mbox_cmd;
1078	u32 val;
1079	int rc;
1080
1081	if (!mds->cxlds.media_ready)
1082		return 0;
1083
1084	mbox_cmd = (struct cxl_mbox_cmd) {
1085		.opcode = CXL_MBOX_OP_IDENTIFY,
1086		.size_out = sizeof(id),
1087		.payload_out = &id,
1088	};
1089	rc = cxl_internal_send_cmd(mds, &mbox_cmd);
1090	if (rc < 0)
1091		return rc;
1092
1093	mds->total_bytes =
1094		le64_to_cpu(id.total_capacity) * CXL_CAPACITY_MULTIPLIER;
1095	mds->volatile_only_bytes =
1096		le64_to_cpu(id.volatile_capacity) * CXL_CAPACITY_MULTIPLIER;
1097	mds->persistent_only_bytes =
1098		le64_to_cpu(id.persistent_capacity) * CXL_CAPACITY_MULTIPLIER;
1099	mds->partition_align_bytes =
1100		le64_to_cpu(id.partition_align) * CXL_CAPACITY_MULTIPLIER;
1101
1102	mds->lsa_size = le32_to_cpu(id.lsa_size);
1103	memcpy(mds->firmware_version, id.fw_revision,
1104	       sizeof(id.fw_revision));
1105
1106	if (test_bit(CXL_POISON_ENABLED_LIST, mds->poison.enabled_cmds)) {
1107		val = get_unaligned_le24(id.poison_list_max_mer);
1108		mds->poison.max_errors = min_t(u32, val, CXL_POISON_LIST_MAX);
1109	}
1110
1111	return 0;
1112}
1113EXPORT_SYMBOL_NS_GPL(cxl_dev_state_identify, CXL);
1114
1115static int __cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd)
1116{
 
1117	int rc;
1118	u32 sec_out = 0;
1119	struct cxl_get_security_output {
1120		__le32 flags;
1121	} out;
1122	struct cxl_mbox_cmd sec_cmd = {
1123		.opcode = CXL_MBOX_OP_GET_SECURITY_STATE,
1124		.payload_out = &out,
1125		.size_out = sizeof(out),
1126	};
1127	struct cxl_mbox_cmd mbox_cmd = { .opcode = cmd };
1128	struct cxl_dev_state *cxlds = &mds->cxlds;
1129
1130	if (cmd != CXL_MBOX_OP_SANITIZE && cmd != CXL_MBOX_OP_SECURE_ERASE)
1131		return -EINVAL;
1132
1133	rc = cxl_internal_send_cmd(mds, &sec_cmd);
1134	if (rc < 0) {
1135		dev_err(cxlds->dev, "Failed to get security state : %d", rc);
1136		return rc;
1137	}
1138
1139	/*
1140	 * Prior to using these commands, any security applied to
1141	 * the user data areas of the device shall be DISABLED (or
1142	 * UNLOCKED for secure erase case).
1143	 */
1144	sec_out = le32_to_cpu(out.flags);
1145	if (sec_out & CXL_PMEM_SEC_STATE_USER_PASS_SET)
1146		return -EINVAL;
1147
1148	if (cmd == CXL_MBOX_OP_SECURE_ERASE &&
1149	    sec_out & CXL_PMEM_SEC_STATE_LOCKED)
1150		return -EINVAL;
1151
1152	rc = cxl_internal_send_cmd(mds, &mbox_cmd);
1153	if (rc < 0) {
1154		dev_err(cxlds->dev, "Failed to sanitize device : %d", rc);
1155		return rc;
1156	}
1157
1158	return 0;
1159}
1160
1161
1162/**
1163 * cxl_mem_sanitize() - Send a sanitization command to the device.
1164 * @cxlmd: The device for the operation
1165 * @cmd: The specific sanitization command opcode
1166 *
1167 * Return: 0 if the command was executed successfully, regardless of
1168 * whether or not the actual security operation is done in the background,
1169 * such as for the Sanitize case.
1170 * Error return values can be the result of the mailbox command, -EINVAL
1171 * when security requirements are not met or invalid contexts, or -EBUSY
1172 * if the sanitize operation is already in flight.
1173 *
1174 * See CXL 3.0 @8.2.9.8.5.1 Sanitize and @8.2.9.8.5.2 Secure Erase.
1175 */
1176int cxl_mem_sanitize(struct cxl_memdev *cxlmd, u16 cmd)
1177{
1178	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
1179	struct cxl_port  *endpoint;
1180	int rc;
1181
1182	/* synchronize with cxl_mem_probe() and decoder write operations */
1183	device_lock(&cxlmd->dev);
1184	endpoint = cxlmd->endpoint;
1185	down_read(&cxl_region_rwsem);
1186	/*
1187	 * Require an endpoint to be safe otherwise the driver can not
1188	 * be sure that the device is unmapped.
1189	 */
1190	if (endpoint && cxl_num_decoders_committed(endpoint) == 0)
1191		rc = __cxl_mem_sanitize(mds, cmd);
1192	else
1193		rc = -EBUSY;
1194	up_read(&cxl_region_rwsem);
1195	device_unlock(&cxlmd->dev);
1196
1197	return rc;
1198}
1199
1200static int add_dpa_res(struct device *dev, struct resource *parent,
1201		       struct resource *res, resource_size_t start,
1202		       resource_size_t size, const char *type)
1203{
1204	int rc;
1205
1206	res->name = type;
1207	res->start = start;
1208	res->end = start + size - 1;
1209	res->flags = IORESOURCE_MEM;
1210	if (resource_size(res) == 0) {
1211		dev_dbg(dev, "DPA(%s): no capacity\n", res->name);
1212		return 0;
1213	}
1214	rc = request_resource(parent, res);
1215	if (rc) {
1216		dev_err(dev, "DPA(%s): failed to track %pr (%d)\n", res->name,
1217			res, rc);
1218		return rc;
1219	}
1220
1221	dev_dbg(dev, "DPA(%s): %pr\n", res->name, res);
1222
1223	return 0;
1224}
1225
1226int cxl_mem_create_range_info(struct cxl_memdev_state *mds)
1227{
1228	struct cxl_dev_state *cxlds = &mds->cxlds;
1229	struct device *dev = cxlds->dev;
1230	int rc;
1231
1232	if (!cxlds->media_ready) {
1233		cxlds->dpa_res = DEFINE_RES_MEM(0, 0);
1234		cxlds->ram_res = DEFINE_RES_MEM(0, 0);
1235		cxlds->pmem_res = DEFINE_RES_MEM(0, 0);
1236		return 0;
1237	}
1238
1239	cxlds->dpa_res = DEFINE_RES_MEM(0, mds->total_bytes);
1240
1241	if (mds->partition_align_bytes == 0) {
1242		rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->ram_res, 0,
1243				 mds->volatile_only_bytes, "ram");
1244		if (rc)
1245			return rc;
1246		return add_dpa_res(dev, &cxlds->dpa_res, &cxlds->pmem_res,
1247				   mds->volatile_only_bytes,
1248				   mds->persistent_only_bytes, "pmem");
1249	}
1250
1251	rc = cxl_mem_get_partition_info(mds);
1252	if (rc) {
1253		dev_err(dev, "Failed to query partition information\n");
1254		return rc;
1255	}
1256
1257	rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->ram_res, 0,
1258			 mds->active_volatile_bytes, "ram");
1259	if (rc)
1260		return rc;
1261	return add_dpa_res(dev, &cxlds->dpa_res, &cxlds->pmem_res,
1262			   mds->active_volatile_bytes,
1263			   mds->active_persistent_bytes, "pmem");
1264}
1265EXPORT_SYMBOL_NS_GPL(cxl_mem_create_range_info, CXL);
1266
1267int cxl_set_timestamp(struct cxl_memdev_state *mds)
1268{
 
1269	struct cxl_mbox_cmd mbox_cmd;
1270	struct cxl_mbox_set_timestamp_in pi;
1271	int rc;
1272
1273	pi.timestamp = cpu_to_le64(ktime_get_real_ns());
1274	mbox_cmd = (struct cxl_mbox_cmd) {
1275		.opcode = CXL_MBOX_OP_SET_TIMESTAMP,
1276		.size_in = sizeof(pi),
1277		.payload_in = &pi,
1278	};
1279
1280	rc = cxl_internal_send_cmd(mds, &mbox_cmd);
1281	/*
1282	 * Command is optional. Devices may have another way of providing
1283	 * a timestamp, or may return all 0s in timestamp fields.
1284	 * Don't report an error if this command isn't supported
1285	 */
1286	if (rc && (mbox_cmd.return_code != CXL_MBOX_CMD_RC_UNSUPPORTED))
1287		return rc;
1288
1289	return 0;
1290}
1291EXPORT_SYMBOL_NS_GPL(cxl_set_timestamp, CXL);
1292
1293int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
1294		       struct cxl_region *cxlr)
1295{
1296	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
 
1297	struct cxl_mbox_poison_out *po;
1298	struct cxl_mbox_poison_in pi;
1299	struct cxl_mbox_cmd mbox_cmd;
1300	int nr_records = 0;
1301	int rc;
1302
1303	rc = mutex_lock_interruptible(&mds->poison.lock);
1304	if (rc)
1305		return rc;
1306
1307	po = mds->poison.list_out;
1308	pi.offset = cpu_to_le64(offset);
1309	pi.length = cpu_to_le64(len / CXL_POISON_LEN_MULT);
1310
1311	mbox_cmd = (struct cxl_mbox_cmd) {
1312		.opcode = CXL_MBOX_OP_GET_POISON,
1313		.size_in = sizeof(pi),
1314		.payload_in = &pi,
1315		.size_out = mds->payload_size,
1316		.payload_out = po,
1317		.min_out = struct_size(po, record, 0),
1318	};
 
1319
1320	do {
1321		rc = cxl_internal_send_cmd(mds, &mbox_cmd);
1322		if (rc)
1323			break;
1324
1325		for (int i = 0; i < le16_to_cpu(po->count); i++)
1326			trace_cxl_poison(cxlmd, cxlr, &po->record[i],
1327					 po->flags, po->overflow_ts,
1328					 CXL_POISON_TRACE_LIST);
1329
1330		/* Protect against an uncleared _FLAG_MORE */
1331		nr_records = nr_records + le16_to_cpu(po->count);
1332		if (nr_records >= mds->poison.max_errors) {
1333			dev_dbg(&cxlmd->dev, "Max Error Records reached: %d\n",
1334				nr_records);
1335			break;
1336		}
1337	} while (po->flags & CXL_POISON_FLAG_MORE);
1338
1339	mutex_unlock(&mds->poison.lock);
1340	return rc;
1341}
1342EXPORT_SYMBOL_NS_GPL(cxl_mem_get_poison, CXL);
1343
1344static void free_poison_buf(void *buf)
1345{
1346	kvfree(buf);
1347}
1348
1349/* Get Poison List output buffer is protected by mds->poison.lock */
1350static int cxl_poison_alloc_buf(struct cxl_memdev_state *mds)
1351{
1352	mds->poison.list_out = kvmalloc(mds->payload_size, GFP_KERNEL);
 
 
1353	if (!mds->poison.list_out)
1354		return -ENOMEM;
1355
1356	return devm_add_action_or_reset(mds->cxlds.dev, free_poison_buf,
1357					mds->poison.list_out);
1358}
1359
1360int cxl_poison_state_init(struct cxl_memdev_state *mds)
1361{
1362	int rc;
1363
1364	if (!test_bit(CXL_POISON_ENABLED_LIST, mds->poison.enabled_cmds))
1365		return 0;
1366
1367	rc = cxl_poison_alloc_buf(mds);
1368	if (rc) {
1369		clear_bit(CXL_POISON_ENABLED_LIST, mds->poison.enabled_cmds);
1370		return rc;
1371	}
1372
1373	mutex_init(&mds->poison.lock);
1374	return 0;
1375}
1376EXPORT_SYMBOL_NS_GPL(cxl_poison_state_init, CXL);
 
 
 
 
 
 
 
 
 
 
 
 
 
1377
1378struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev)
1379{
1380	struct cxl_memdev_state *mds;
1381
1382	mds = devm_kzalloc(dev, sizeof(*mds), GFP_KERNEL);
1383	if (!mds) {
1384		dev_err(dev, "No memory available\n");
1385		return ERR_PTR(-ENOMEM);
1386	}
1387
1388	mutex_init(&mds->mbox_mutex);
1389	mutex_init(&mds->event.log_lock);
1390	mds->cxlds.dev = dev;
1391	mds->cxlds.reg_map.host = dev;
1392	mds->cxlds.reg_map.resource = CXL_RESOURCE_NONE;
1393	mds->cxlds.type = CXL_DEVTYPE_CLASSMEM;
1394	mds->ram_perf.qos_class = CXL_QOS_CLASS_INVALID;
1395	mds->pmem_perf.qos_class = CXL_QOS_CLASS_INVALID;
1396
1397	return mds;
1398}
1399EXPORT_SYMBOL_NS_GPL(cxl_memdev_state_create, CXL);
1400
1401void __init cxl_mbox_init(void)
1402{
1403	struct dentry *mbox_debugfs;
1404
1405	mbox_debugfs = cxl_debugfs_create_dir("mbox");
1406	debugfs_create_bool("raw_allow_all", 0600, mbox_debugfs,
1407			    &cxl_raw_allow_all);
1408}