Loading...
Note: File does not exist in v3.1.
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright(c) 2020 Intel Corporation. All rights reserved. */
3#include <linux/io-64-nonatomic-lo-hi.h>
4#include <linux/security.h>
5#include <linux/debugfs.h>
6#include <linux/mutex.h>
7#include <cxlmem.h>
8#include <cxl.h>
9
10#include "core.h"
11
12static bool cxl_raw_allow_all;
13
14/**
15 * DOC: cxl mbox
16 *
17 * Core implementation of the CXL 2.0 Type-3 Memory Device Mailbox. The
18 * implementation is used by the cxl_pci driver to initialize the device
19 * and implement the cxl_mem.h IOCTL UAPI. It also implements the
20 * backend of the cxl_pmem_ctl() transport for LIBNVDIMM.
21 */
22
23#define cxl_for_each_cmd(cmd) \
24 for ((cmd) = &cxl_mem_commands[0]; \
25 ((cmd) - cxl_mem_commands) < ARRAY_SIZE(cxl_mem_commands); (cmd)++)
26
27#define CXL_CMD(_id, sin, sout, _flags) \
28 [CXL_MEM_COMMAND_ID_##_id] = { \
29 .info = { \
30 .id = CXL_MEM_COMMAND_ID_##_id, \
31 .size_in = sin, \
32 .size_out = sout, \
33 }, \
34 .opcode = CXL_MBOX_OP_##_id, \
35 .flags = _flags, \
36 }
37
38#define CXL_VARIABLE_PAYLOAD ~0U
39/*
40 * This table defines the supported mailbox commands for the driver. This table
41 * is made up of a UAPI structure. Non-negative values as parameters in the
42 * table will be validated against the user's input. For example, if size_in is
43 * 0, and the user passed in 1, it is an error.
44 */
45static struct cxl_mem_command cxl_mem_commands[CXL_MEM_COMMAND_ID_MAX] = {
46 CXL_CMD(IDENTIFY, 0, 0x43, CXL_CMD_FLAG_FORCE_ENABLE),
47#ifdef CONFIG_CXL_MEM_RAW_COMMANDS
48 CXL_CMD(RAW, CXL_VARIABLE_PAYLOAD, CXL_VARIABLE_PAYLOAD, 0),
49#endif
50 CXL_CMD(GET_SUPPORTED_LOGS, 0, CXL_VARIABLE_PAYLOAD, CXL_CMD_FLAG_FORCE_ENABLE),
51 CXL_CMD(GET_FW_INFO, 0, 0x50, 0),
52 CXL_CMD(GET_PARTITION_INFO, 0, 0x20, 0),
53 CXL_CMD(GET_LSA, 0x8, CXL_VARIABLE_PAYLOAD, 0),
54 CXL_CMD(GET_HEALTH_INFO, 0, 0x12, 0),
55 CXL_CMD(GET_LOG, 0x18, CXL_VARIABLE_PAYLOAD, CXL_CMD_FLAG_FORCE_ENABLE),
56 CXL_CMD(SET_PARTITION_INFO, 0x0a, 0, 0),
57 CXL_CMD(SET_LSA, CXL_VARIABLE_PAYLOAD, 0, 0),
58 CXL_CMD(GET_ALERT_CONFIG, 0, 0x10, 0),
59 CXL_CMD(SET_ALERT_CONFIG, 0xc, 0, 0),
60 CXL_CMD(GET_SHUTDOWN_STATE, 0, 0x1, 0),
61 CXL_CMD(SET_SHUTDOWN_STATE, 0x1, 0, 0),
62 CXL_CMD(GET_POISON, 0x10, CXL_VARIABLE_PAYLOAD, 0),
63 CXL_CMD(INJECT_POISON, 0x8, 0, 0),
64 CXL_CMD(CLEAR_POISON, 0x48, 0, 0),
65 CXL_CMD(GET_SCAN_MEDIA_CAPS, 0x10, 0x4, 0),
66 CXL_CMD(SCAN_MEDIA, 0x11, 0, 0),
67 CXL_CMD(GET_SCAN_MEDIA, 0, CXL_VARIABLE_PAYLOAD, 0),
68};
69
70/*
71 * Commands that RAW doesn't permit. The rationale for each:
72 *
73 * CXL_MBOX_OP_ACTIVATE_FW: Firmware activation requires adjustment /
74 * coordination of transaction timeout values at the root bridge level.
75 *
76 * CXL_MBOX_OP_SET_PARTITION_INFO: The device memory map may change live
77 * and needs to be coordinated with HDM updates.
78 *
79 * CXL_MBOX_OP_SET_LSA: The label storage area may be cached by the
80 * driver and any writes from userspace invalidates those contents.
81 *
82 * CXL_MBOX_OP_SET_SHUTDOWN_STATE: Set shutdown state assumes no writes
83 * to the device after it is marked clean, userspace can not make that
84 * assertion.
85 *
86 * CXL_MBOX_OP_[GET_]SCAN_MEDIA: The kernel provides a native error list that
87 * is kept up to date with patrol notifications and error management.
88 */
89static u16 cxl_disabled_raw_commands[] = {
90 CXL_MBOX_OP_ACTIVATE_FW,
91 CXL_MBOX_OP_SET_PARTITION_INFO,
92 CXL_MBOX_OP_SET_LSA,
93 CXL_MBOX_OP_SET_SHUTDOWN_STATE,
94 CXL_MBOX_OP_SCAN_MEDIA,
95 CXL_MBOX_OP_GET_SCAN_MEDIA,
96};
97
98/*
99 * Command sets that RAW doesn't permit. All opcodes in this set are
100 * disabled because they pass plain text security payloads over the
101 * user/kernel boundary. This functionality is intended to be wrapped
102 * behind the keys ABI which allows for encrypted payloads in the UAPI
103 */
104static u8 security_command_sets[] = {
105 0x44, /* Sanitize */
106 0x45, /* Persistent Memory Data-at-rest Security */
107 0x46, /* Security Passthrough */
108};
109
110static bool cxl_is_security_command(u16 opcode)
111{
112 int i;
113
114 for (i = 0; i < ARRAY_SIZE(security_command_sets); i++)
115 if (security_command_sets[i] == (opcode >> 8))
116 return true;
117 return false;
118}
119
120static struct cxl_mem_command *cxl_mem_find_command(u16 opcode)
121{
122 struct cxl_mem_command *c;
123
124 cxl_for_each_cmd(c)
125 if (c->opcode == opcode)
126 return c;
127
128 return NULL;
129}
130
131static const char *cxl_mem_opcode_to_name(u16 opcode)
132{
133 struct cxl_mem_command *c;
134
135 c = cxl_mem_find_command(opcode);
136 if (!c)
137 return NULL;
138
139 return cxl_command_names[c->info.id].name;
140}
141
142/**
143 * cxl_internal_send_cmd() - Kernel internal interface to send a mailbox command
144 * @cxlds: The device data for the operation
145 * @mbox_cmd: initialized command to execute
146 *
147 * Context: Any context.
148 * Return:
149 * * %>=0 - Number of bytes returned in @out.
150 * * %-E2BIG - Payload is too large for hardware.
151 * * %-EBUSY - Couldn't acquire exclusive mailbox access.
152 * * %-EFAULT - Hardware error occurred.
153 * * %-ENXIO - Command completed, but device reported an error.
154 * * %-EIO - Unexpected output size.
155 *
156 * Mailbox commands may execute successfully yet the device itself reported an
157 * error. While this distinction can be useful for commands from userspace, the
158 * kernel will only be able to use results when both are successful.
159 */
160int cxl_internal_send_cmd(struct cxl_dev_state *cxlds,
161 struct cxl_mbox_cmd *mbox_cmd)
162{
163 size_t out_size, min_out;
164 int rc;
165
166 if (mbox_cmd->size_in > cxlds->payload_size ||
167 mbox_cmd->size_out > cxlds->payload_size)
168 return -E2BIG;
169
170 out_size = mbox_cmd->size_out;
171 min_out = mbox_cmd->min_out;
172 rc = cxlds->mbox_send(cxlds, mbox_cmd);
173 if (rc)
174 return rc;
175
176 if (mbox_cmd->return_code != CXL_MBOX_CMD_RC_SUCCESS)
177 return cxl_mbox_cmd_rc2errno(mbox_cmd);
178
179 if (!out_size)
180 return 0;
181
182 /*
183 * Variable sized output needs to at least satisfy the caller's
184 * minimum if not the fully requested size.
185 */
186 if (min_out == 0)
187 min_out = out_size;
188
189 if (mbox_cmd->size_out < min_out)
190 return -EIO;
191 return 0;
192}
193EXPORT_SYMBOL_NS_GPL(cxl_internal_send_cmd, CXL);
194
195static bool cxl_mem_raw_command_allowed(u16 opcode)
196{
197 int i;
198
199 if (!IS_ENABLED(CONFIG_CXL_MEM_RAW_COMMANDS))
200 return false;
201
202 if (security_locked_down(LOCKDOWN_PCI_ACCESS))
203 return false;
204
205 if (cxl_raw_allow_all)
206 return true;
207
208 if (cxl_is_security_command(opcode))
209 return false;
210
211 for (i = 0; i < ARRAY_SIZE(cxl_disabled_raw_commands); i++)
212 if (cxl_disabled_raw_commands[i] == opcode)
213 return false;
214
215 return true;
216}
217
218/**
219 * cxl_payload_from_user_allowed() - Check contents of in_payload.
220 * @opcode: The mailbox command opcode.
221 * @payload_in: Pointer to the input payload passed in from user space.
222 *
223 * Return:
224 * * true - payload_in passes check for @opcode.
225 * * false - payload_in contains invalid or unsupported values.
226 *
227 * The driver may inspect payload contents before sending a mailbox
228 * command from user space to the device. The intent is to reject
229 * commands with input payloads that are known to be unsafe. This
230 * check is not intended to replace the users careful selection of
231 * mailbox command parameters and makes no guarantee that the user
232 * command will succeed, nor that it is appropriate.
233 *
234 * The specific checks are determined by the opcode.
235 */
236static bool cxl_payload_from_user_allowed(u16 opcode, void *payload_in)
237{
238 switch (opcode) {
239 case CXL_MBOX_OP_SET_PARTITION_INFO: {
240 struct cxl_mbox_set_partition_info *pi = payload_in;
241
242 if (pi->flags & CXL_SET_PARTITION_IMMEDIATE_FLAG)
243 return false;
244 break;
245 }
246 default:
247 break;
248 }
249 return true;
250}
251
252static int cxl_mbox_cmd_ctor(struct cxl_mbox_cmd *mbox,
253 struct cxl_dev_state *cxlds, u16 opcode,
254 size_t in_size, size_t out_size, u64 in_payload)
255{
256 *mbox = (struct cxl_mbox_cmd) {
257 .opcode = opcode,
258 .size_in = in_size,
259 };
260
261 if (in_size) {
262 mbox->payload_in = vmemdup_user(u64_to_user_ptr(in_payload),
263 in_size);
264 if (IS_ERR(mbox->payload_in))
265 return PTR_ERR(mbox->payload_in);
266
267 if (!cxl_payload_from_user_allowed(opcode, mbox->payload_in)) {
268 dev_dbg(cxlds->dev, "%s: input payload not allowed\n",
269 cxl_mem_opcode_to_name(opcode));
270 kvfree(mbox->payload_in);
271 return -EBUSY;
272 }
273 }
274
275 /* Prepare to handle a full payload for variable sized output */
276 if (out_size == CXL_VARIABLE_PAYLOAD)
277 mbox->size_out = cxlds->payload_size;
278 else
279 mbox->size_out = out_size;
280
281 if (mbox->size_out) {
282 mbox->payload_out = kvzalloc(mbox->size_out, GFP_KERNEL);
283 if (!mbox->payload_out) {
284 kvfree(mbox->payload_in);
285 return -ENOMEM;
286 }
287 }
288 return 0;
289}
290
291static void cxl_mbox_cmd_dtor(struct cxl_mbox_cmd *mbox)
292{
293 kvfree(mbox->payload_in);
294 kvfree(mbox->payload_out);
295}
296
297static int cxl_to_mem_cmd_raw(struct cxl_mem_command *mem_cmd,
298 const struct cxl_send_command *send_cmd,
299 struct cxl_dev_state *cxlds)
300{
301 if (send_cmd->raw.rsvd)
302 return -EINVAL;
303
304 /*
305 * Unlike supported commands, the output size of RAW commands
306 * gets passed along without further checking, so it must be
307 * validated here.
308 */
309 if (send_cmd->out.size > cxlds->payload_size)
310 return -EINVAL;
311
312 if (!cxl_mem_raw_command_allowed(send_cmd->raw.opcode))
313 return -EPERM;
314
315 dev_WARN_ONCE(cxlds->dev, true, "raw command path used\n");
316
317 *mem_cmd = (struct cxl_mem_command) {
318 .info = {
319 .id = CXL_MEM_COMMAND_ID_RAW,
320 .size_in = send_cmd->in.size,
321 .size_out = send_cmd->out.size,
322 },
323 .opcode = send_cmd->raw.opcode
324 };
325
326 return 0;
327}
328
329static int cxl_to_mem_cmd(struct cxl_mem_command *mem_cmd,
330 const struct cxl_send_command *send_cmd,
331 struct cxl_dev_state *cxlds)
332{
333 struct cxl_mem_command *c = &cxl_mem_commands[send_cmd->id];
334 const struct cxl_command_info *info = &c->info;
335
336 if (send_cmd->flags & ~CXL_MEM_COMMAND_FLAG_MASK)
337 return -EINVAL;
338
339 if (send_cmd->rsvd)
340 return -EINVAL;
341
342 if (send_cmd->in.rsvd || send_cmd->out.rsvd)
343 return -EINVAL;
344
345 /* Check that the command is enabled for hardware */
346 if (!test_bit(info->id, cxlds->enabled_cmds))
347 return -ENOTTY;
348
349 /* Check that the command is not claimed for exclusive kernel use */
350 if (test_bit(info->id, cxlds->exclusive_cmds))
351 return -EBUSY;
352
353 /* Check the input buffer is the expected size */
354 if ((info->size_in != CXL_VARIABLE_PAYLOAD) &&
355 (info->size_in != send_cmd->in.size))
356 return -ENOMEM;
357
358 /* Check the output buffer is at least large enough */
359 if ((info->size_out != CXL_VARIABLE_PAYLOAD) &&
360 (send_cmd->out.size < info->size_out))
361 return -ENOMEM;
362
363 *mem_cmd = (struct cxl_mem_command) {
364 .info = {
365 .id = info->id,
366 .flags = info->flags,
367 .size_in = send_cmd->in.size,
368 .size_out = send_cmd->out.size,
369 },
370 .opcode = c->opcode
371 };
372
373 return 0;
374}
375
376/**
377 * cxl_validate_cmd_from_user() - Check fields for CXL_MEM_SEND_COMMAND.
378 * @mbox_cmd: Sanitized and populated &struct cxl_mbox_cmd.
379 * @cxlds: The device data for the operation
380 * @send_cmd: &struct cxl_send_command copied in from userspace.
381 *
382 * Return:
383 * * %0 - @out_cmd is ready to send.
384 * * %-ENOTTY - Invalid command specified.
385 * * %-EINVAL - Reserved fields or invalid values were used.
386 * * %-ENOMEM - Input or output buffer wasn't sized properly.
387 * * %-EPERM - Attempted to use a protected command.
388 * * %-EBUSY - Kernel has claimed exclusive access to this opcode
389 *
390 * The result of this command is a fully validated command in @mbox_cmd that is
391 * safe to send to the hardware.
392 */
393static int cxl_validate_cmd_from_user(struct cxl_mbox_cmd *mbox_cmd,
394 struct cxl_dev_state *cxlds,
395 const struct cxl_send_command *send_cmd)
396{
397 struct cxl_mem_command mem_cmd;
398 int rc;
399
400 if (send_cmd->id == 0 || send_cmd->id >= CXL_MEM_COMMAND_ID_MAX)
401 return -ENOTTY;
402
403 /*
404 * The user can never specify an input payload larger than what hardware
405 * supports, but output can be arbitrarily large (simply write out as
406 * much data as the hardware provides).
407 */
408 if (send_cmd->in.size > cxlds->payload_size)
409 return -EINVAL;
410
411 /* Sanitize and construct a cxl_mem_command */
412 if (send_cmd->id == CXL_MEM_COMMAND_ID_RAW)
413 rc = cxl_to_mem_cmd_raw(&mem_cmd, send_cmd, cxlds);
414 else
415 rc = cxl_to_mem_cmd(&mem_cmd, send_cmd, cxlds);
416
417 if (rc)
418 return rc;
419
420 /* Sanitize and construct a cxl_mbox_cmd */
421 return cxl_mbox_cmd_ctor(mbox_cmd, cxlds, mem_cmd.opcode,
422 mem_cmd.info.size_in, mem_cmd.info.size_out,
423 send_cmd->in.payload);
424}
425
426int cxl_query_cmd(struct cxl_memdev *cxlmd,
427 struct cxl_mem_query_commands __user *q)
428{
429 struct device *dev = &cxlmd->dev;
430 struct cxl_mem_command *cmd;
431 u32 n_commands;
432 int j = 0;
433
434 dev_dbg(dev, "Query IOCTL\n");
435
436 if (get_user(n_commands, &q->n_commands))
437 return -EFAULT;
438
439 /* returns the total number if 0 elements are requested. */
440 if (n_commands == 0)
441 return put_user(ARRAY_SIZE(cxl_mem_commands), &q->n_commands);
442
443 /*
444 * otherwise, return max(n_commands, total commands) cxl_command_info
445 * structures.
446 */
447 cxl_for_each_cmd(cmd) {
448 const struct cxl_command_info *info = &cmd->info;
449
450 if (copy_to_user(&q->commands[j++], info, sizeof(*info)))
451 return -EFAULT;
452
453 if (j == n_commands)
454 break;
455 }
456
457 return 0;
458}
459
460/**
461 * handle_mailbox_cmd_from_user() - Dispatch a mailbox command for userspace.
462 * @cxlds: The device data for the operation
463 * @mbox_cmd: The validated mailbox command.
464 * @out_payload: Pointer to userspace's output payload.
465 * @size_out: (Input) Max payload size to copy out.
466 * (Output) Payload size hardware generated.
467 * @retval: Hardware generated return code from the operation.
468 *
469 * Return:
470 * * %0 - Mailbox transaction succeeded. This implies the mailbox
471 * protocol completed successfully not that the operation itself
472 * was successful.
473 * * %-ENOMEM - Couldn't allocate a bounce buffer.
474 * * %-EFAULT - Something happened with copy_to/from_user.
475 * * %-EINTR - Mailbox acquisition interrupted.
476 * * %-EXXX - Transaction level failures.
477 *
478 * Dispatches a mailbox command on behalf of a userspace request.
479 * The output payload is copied to userspace.
480 *
481 * See cxl_send_cmd().
482 */
483static int handle_mailbox_cmd_from_user(struct cxl_dev_state *cxlds,
484 struct cxl_mbox_cmd *mbox_cmd,
485 u64 out_payload, s32 *size_out,
486 u32 *retval)
487{
488 struct device *dev = cxlds->dev;
489 int rc;
490
491 dev_dbg(dev,
492 "Submitting %s command for user\n"
493 "\topcode: %x\n"
494 "\tsize: %zx\n",
495 cxl_mem_opcode_to_name(mbox_cmd->opcode),
496 mbox_cmd->opcode, mbox_cmd->size_in);
497
498 rc = cxlds->mbox_send(cxlds, mbox_cmd);
499 if (rc)
500 goto out;
501
502 /*
503 * @size_out contains the max size that's allowed to be written back out
504 * to userspace. While the payload may have written more output than
505 * this it will have to be ignored.
506 */
507 if (mbox_cmd->size_out) {
508 dev_WARN_ONCE(dev, mbox_cmd->size_out > *size_out,
509 "Invalid return size\n");
510 if (copy_to_user(u64_to_user_ptr(out_payload),
511 mbox_cmd->payload_out, mbox_cmd->size_out)) {
512 rc = -EFAULT;
513 goto out;
514 }
515 }
516
517 *size_out = mbox_cmd->size_out;
518 *retval = mbox_cmd->return_code;
519
520out:
521 cxl_mbox_cmd_dtor(mbox_cmd);
522 return rc;
523}
524
525int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s)
526{
527 struct cxl_dev_state *cxlds = cxlmd->cxlds;
528 struct device *dev = &cxlmd->dev;
529 struct cxl_send_command send;
530 struct cxl_mbox_cmd mbox_cmd;
531 int rc;
532
533 dev_dbg(dev, "Send IOCTL\n");
534
535 if (copy_from_user(&send, s, sizeof(send)))
536 return -EFAULT;
537
538 rc = cxl_validate_cmd_from_user(&mbox_cmd, cxlmd->cxlds, &send);
539 if (rc)
540 return rc;
541
542 rc = handle_mailbox_cmd_from_user(cxlds, &mbox_cmd, send.out.payload,
543 &send.out.size, &send.retval);
544 if (rc)
545 return rc;
546
547 if (copy_to_user(s, &send, sizeof(send)))
548 return -EFAULT;
549
550 return 0;
551}
552
553static int cxl_xfer_log(struct cxl_dev_state *cxlds, uuid_t *uuid, u32 size, u8 *out)
554{
555 u32 remaining = size;
556 u32 offset = 0;
557
558 while (remaining) {
559 u32 xfer_size = min_t(u32, remaining, cxlds->payload_size);
560 struct cxl_mbox_cmd mbox_cmd;
561 struct cxl_mbox_get_log log;
562 int rc;
563
564 log = (struct cxl_mbox_get_log) {
565 .uuid = *uuid,
566 .offset = cpu_to_le32(offset),
567 .length = cpu_to_le32(xfer_size),
568 };
569
570 mbox_cmd = (struct cxl_mbox_cmd) {
571 .opcode = CXL_MBOX_OP_GET_LOG,
572 .size_in = sizeof(log),
573 .payload_in = &log,
574 .size_out = xfer_size,
575 .payload_out = out,
576 };
577
578 rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
579 if (rc < 0)
580 return rc;
581
582 out += xfer_size;
583 remaining -= xfer_size;
584 offset += xfer_size;
585 }
586
587 return 0;
588}
589
590/**
591 * cxl_walk_cel() - Walk through the Command Effects Log.
592 * @cxlds: The device data for the operation
593 * @size: Length of the Command Effects Log.
594 * @cel: CEL
595 *
596 * Iterate over each entry in the CEL and determine if the driver supports the
597 * command. If so, the command is enabled for the device and can be used later.
598 */
599static void cxl_walk_cel(struct cxl_dev_state *cxlds, size_t size, u8 *cel)
600{
601 struct cxl_cel_entry *cel_entry;
602 const int cel_entries = size / sizeof(*cel_entry);
603 int i;
604
605 cel_entry = (struct cxl_cel_entry *) cel;
606
607 for (i = 0; i < cel_entries; i++) {
608 u16 opcode = le16_to_cpu(cel_entry[i].opcode);
609 struct cxl_mem_command *cmd = cxl_mem_find_command(opcode);
610
611 if (!cmd) {
612 dev_dbg(cxlds->dev,
613 "Opcode 0x%04x unsupported by driver", opcode);
614 continue;
615 }
616
617 set_bit(cmd->info.id, cxlds->enabled_cmds);
618 }
619}
620
621static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_dev_state *cxlds)
622{
623 struct cxl_mbox_get_supported_logs *ret;
624 struct cxl_mbox_cmd mbox_cmd;
625 int rc;
626
627 ret = kvmalloc(cxlds->payload_size, GFP_KERNEL);
628 if (!ret)
629 return ERR_PTR(-ENOMEM);
630
631 mbox_cmd = (struct cxl_mbox_cmd) {
632 .opcode = CXL_MBOX_OP_GET_SUPPORTED_LOGS,
633 .size_out = cxlds->payload_size,
634 .payload_out = ret,
635 /* At least the record number field must be valid */
636 .min_out = 2,
637 };
638 rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
639 if (rc < 0) {
640 kvfree(ret);
641 return ERR_PTR(rc);
642 }
643
644
645 return ret;
646}
647
648enum {
649 CEL_UUID,
650 VENDOR_DEBUG_UUID,
651};
652
653/* See CXL 2.0 Table 170. Get Log Input Payload */
654static const uuid_t log_uuid[] = {
655 [CEL_UUID] = DEFINE_CXL_CEL_UUID,
656 [VENDOR_DEBUG_UUID] = DEFINE_CXL_VENDOR_DEBUG_UUID,
657};
658
659/**
660 * cxl_enumerate_cmds() - Enumerate commands for a device.
661 * @cxlds: The device data for the operation
662 *
663 * Returns 0 if enumerate completed successfully.
664 *
665 * CXL devices have optional support for certain commands. This function will
666 * determine the set of supported commands for the hardware and update the
667 * enabled_cmds bitmap in the @cxlds.
668 */
669int cxl_enumerate_cmds(struct cxl_dev_state *cxlds)
670{
671 struct cxl_mbox_get_supported_logs *gsl;
672 struct device *dev = cxlds->dev;
673 struct cxl_mem_command *cmd;
674 int i, rc;
675
676 gsl = cxl_get_gsl(cxlds);
677 if (IS_ERR(gsl))
678 return PTR_ERR(gsl);
679
680 rc = -ENOENT;
681 for (i = 0; i < le16_to_cpu(gsl->entries); i++) {
682 u32 size = le32_to_cpu(gsl->entry[i].size);
683 uuid_t uuid = gsl->entry[i].uuid;
684 u8 *log;
685
686 dev_dbg(dev, "Found LOG type %pU of size %d", &uuid, size);
687
688 if (!uuid_equal(&uuid, &log_uuid[CEL_UUID]))
689 continue;
690
691 log = kvmalloc(size, GFP_KERNEL);
692 if (!log) {
693 rc = -ENOMEM;
694 goto out;
695 }
696
697 rc = cxl_xfer_log(cxlds, &uuid, size, log);
698 if (rc) {
699 kvfree(log);
700 goto out;
701 }
702
703 cxl_walk_cel(cxlds, size, log);
704 kvfree(log);
705
706 /* In case CEL was bogus, enable some default commands. */
707 cxl_for_each_cmd(cmd)
708 if (cmd->flags & CXL_CMD_FLAG_FORCE_ENABLE)
709 set_bit(cmd->info.id, cxlds->enabled_cmds);
710
711 /* Found the required CEL */
712 rc = 0;
713 }
714out:
715 kvfree(gsl);
716 return rc;
717}
718EXPORT_SYMBOL_NS_GPL(cxl_enumerate_cmds, CXL);
719
720/**
721 * cxl_mem_get_partition_info - Get partition info
722 * @cxlds: The device data for the operation
723 *
724 * Retrieve the current partition info for the device specified. The active
725 * values are the current capacity in bytes. If not 0, the 'next' values are
726 * the pending values, in bytes, which take affect on next cold reset.
727 *
728 * Return: 0 if no error: or the result of the mailbox command.
729 *
730 * See CXL @8.2.9.5.2.1 Get Partition Info
731 */
732static int cxl_mem_get_partition_info(struct cxl_dev_state *cxlds)
733{
734 struct cxl_mbox_get_partition_info pi;
735 struct cxl_mbox_cmd mbox_cmd;
736 int rc;
737
738 mbox_cmd = (struct cxl_mbox_cmd) {
739 .opcode = CXL_MBOX_OP_GET_PARTITION_INFO,
740 .size_out = sizeof(pi),
741 .payload_out = &pi,
742 };
743 rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
744 if (rc)
745 return rc;
746
747 cxlds->active_volatile_bytes =
748 le64_to_cpu(pi.active_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
749 cxlds->active_persistent_bytes =
750 le64_to_cpu(pi.active_persistent_cap) * CXL_CAPACITY_MULTIPLIER;
751 cxlds->next_volatile_bytes =
752 le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
753 cxlds->next_persistent_bytes =
754 le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
755
756 return 0;
757}
758
759/**
760 * cxl_dev_state_identify() - Send the IDENTIFY command to the device.
761 * @cxlds: The device data for the operation
762 *
763 * Return: 0 if identify was executed successfully.
764 *
765 * This will dispatch the identify command to the device and on success populate
766 * structures to be exported to sysfs.
767 */
768int cxl_dev_state_identify(struct cxl_dev_state *cxlds)
769{
770 /* See CXL 2.0 Table 175 Identify Memory Device Output Payload */
771 struct cxl_mbox_identify id;
772 struct cxl_mbox_cmd mbox_cmd;
773 int rc;
774
775 mbox_cmd = (struct cxl_mbox_cmd) {
776 .opcode = CXL_MBOX_OP_IDENTIFY,
777 .size_out = sizeof(id),
778 .payload_out = &id,
779 };
780 rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
781 if (rc < 0)
782 return rc;
783
784 cxlds->total_bytes =
785 le64_to_cpu(id.total_capacity) * CXL_CAPACITY_MULTIPLIER;
786 cxlds->volatile_only_bytes =
787 le64_to_cpu(id.volatile_capacity) * CXL_CAPACITY_MULTIPLIER;
788 cxlds->persistent_only_bytes =
789 le64_to_cpu(id.persistent_capacity) * CXL_CAPACITY_MULTIPLIER;
790 cxlds->partition_align_bytes =
791 le64_to_cpu(id.partition_align) * CXL_CAPACITY_MULTIPLIER;
792
793 cxlds->lsa_size = le32_to_cpu(id.lsa_size);
794 memcpy(cxlds->firmware_version, id.fw_revision, sizeof(id.fw_revision));
795
796 return 0;
797}
798EXPORT_SYMBOL_NS_GPL(cxl_dev_state_identify, CXL);
799
800static int add_dpa_res(struct device *dev, struct resource *parent,
801 struct resource *res, resource_size_t start,
802 resource_size_t size, const char *type)
803{
804 int rc;
805
806 res->name = type;
807 res->start = start;
808 res->end = start + size - 1;
809 res->flags = IORESOURCE_MEM;
810 if (resource_size(res) == 0) {
811 dev_dbg(dev, "DPA(%s): no capacity\n", res->name);
812 return 0;
813 }
814 rc = request_resource(parent, res);
815 if (rc) {
816 dev_err(dev, "DPA(%s): failed to track %pr (%d)\n", res->name,
817 res, rc);
818 return rc;
819 }
820
821 dev_dbg(dev, "DPA(%s): %pr\n", res->name, res);
822
823 return 0;
824}
825
826int cxl_mem_create_range_info(struct cxl_dev_state *cxlds)
827{
828 struct device *dev = cxlds->dev;
829 int rc;
830
831 cxlds->dpa_res =
832 (struct resource)DEFINE_RES_MEM(0, cxlds->total_bytes);
833
834 if (cxlds->partition_align_bytes == 0) {
835 rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->ram_res, 0,
836 cxlds->volatile_only_bytes, "ram");
837 if (rc)
838 return rc;
839 return add_dpa_res(dev, &cxlds->dpa_res, &cxlds->pmem_res,
840 cxlds->volatile_only_bytes,
841 cxlds->persistent_only_bytes, "pmem");
842 }
843
844 rc = cxl_mem_get_partition_info(cxlds);
845 if (rc) {
846 dev_err(dev, "Failed to query partition information\n");
847 return rc;
848 }
849
850 rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->ram_res, 0,
851 cxlds->active_volatile_bytes, "ram");
852 if (rc)
853 return rc;
854 return add_dpa_res(dev, &cxlds->dpa_res, &cxlds->pmem_res,
855 cxlds->active_volatile_bytes,
856 cxlds->active_persistent_bytes, "pmem");
857}
858EXPORT_SYMBOL_NS_GPL(cxl_mem_create_range_info, CXL);
859
860struct cxl_dev_state *cxl_dev_state_create(struct device *dev)
861{
862 struct cxl_dev_state *cxlds;
863
864 cxlds = devm_kzalloc(dev, sizeof(*cxlds), GFP_KERNEL);
865 if (!cxlds) {
866 dev_err(dev, "No memory available\n");
867 return ERR_PTR(-ENOMEM);
868 }
869
870 mutex_init(&cxlds->mbox_mutex);
871 cxlds->dev = dev;
872
873 return cxlds;
874}
875EXPORT_SYMBOL_NS_GPL(cxl_dev_state_create, CXL);
876
877void __init cxl_mbox_init(void)
878{
879 struct dentry *mbox_debugfs;
880
881 mbox_debugfs = cxl_debugfs_create_dir("mbox");
882 debugfs_create_bool("raw_allow_all", 0600, mbox_debugfs,
883 &cxl_raw_allow_all);
884}