Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Intel On Demand (Software Defined Silicon) driver
4 *
5 * Copyright (c) 2022, Intel Corporation.
6 * All Rights Reserved.
7 *
8 * Author: "David E. Box" <david.e.box@linux.intel.com>
9 */
10
11#include <linux/auxiliary_bus.h>
12#include <linux/bits.h>
13#include <linux/bitfield.h>
14#include <linux/device.h>
15#include <linux/iopoll.h>
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/pci.h>
19#include <linux/slab.h>
20#include <linux/sysfs.h>
21#include <linux/types.h>
22#include <linux/uaccess.h>
23
24#include "vsec.h"
25
26#define ACCESS_TYPE_BARID 2
27#define ACCESS_TYPE_LOCAL 3
28
29#define SDSI_MIN_SIZE_DWORDS 276
30#define SDSI_SIZE_MAILBOX 1024
31#define SDSI_SIZE_REGS 80
32#define SDSI_SIZE_CMD sizeof(u64)
33
34/*
35 * Write messages are currently up to the size of the mailbox
36 * while read messages are up to 4 times the size of the
37 * mailbox, sent in packets
38 */
39#define SDSI_SIZE_WRITE_MSG SDSI_SIZE_MAILBOX
40#define SDSI_SIZE_READ_MSG (SDSI_SIZE_MAILBOX * 4)
41
42#define SDSI_ENABLED_FEATURES_OFFSET 16
43#define SDSI_FEATURE_SDSI BIT(3)
44#define SDSI_FEATURE_METERING BIT(26)
45
46#define SDSI_SOCKET_ID_OFFSET 64
47#define SDSI_SOCKET_ID GENMASK(3, 0)
48
49#define SDSI_MBOX_CMD_SUCCESS 0x40
50#define SDSI_MBOX_CMD_TIMEOUT 0x80
51
52#define MBOX_TIMEOUT_US 2000
53#define MBOX_TIMEOUT_ACQUIRE_US 1000
54#define MBOX_POLLING_PERIOD_US 100
55#define MBOX_ACQUIRE_NUM_RETRIES 5
56#define MBOX_ACQUIRE_RETRY_DELAY_MS 500
57#define MBOX_MAX_PACKETS 4
58
59#define MBOX_OWNER_NONE 0x00
60#define MBOX_OWNER_INBAND 0x01
61
62#define CTRL_RUN_BUSY BIT(0)
63#define CTRL_READ_WRITE BIT(1)
64#define CTRL_SOM BIT(2)
65#define CTRL_EOM BIT(3)
66#define CTRL_OWNER GENMASK(5, 4)
67#define CTRL_COMPLETE BIT(6)
68#define CTRL_READY BIT(7)
69#define CTRL_STATUS GENMASK(15, 8)
70#define CTRL_PACKET_SIZE GENMASK(31, 16)
71#define CTRL_MSG_SIZE GENMASK(63, 48)
72
73#define DISC_TABLE_SIZE 12
74#define DT_ACCESS_TYPE GENMASK(3, 0)
75#define DT_SIZE GENMASK(27, 12)
76#define DT_TBIR GENMASK(2, 0)
77#define DT_OFFSET(v) ((v) & GENMASK(31, 3))
78
79#define SDSI_GUID_V1 0x006DD191
80#define GUID_V1_CNTRL_SIZE 8
81#define GUID_V1_REGS_SIZE 72
82#define SDSI_GUID_V2 0xF210D9EF
83#define GUID_V2_CNTRL_SIZE 16
84#define GUID_V2_REGS_SIZE 80
85
86enum sdsi_command {
87 SDSI_CMD_PROVISION_AKC = 0x0004,
88 SDSI_CMD_PROVISION_CAP = 0x0008,
89 SDSI_CMD_READ_STATE = 0x0010,
90 SDSI_CMD_READ_METER = 0x0014,
91};
92
93struct sdsi_mbox_info {
94 u64 *payload;
95 void *buffer;
96 int size;
97};
98
99struct disc_table {
100 u32 access_info;
101 u32 guid;
102 u32 offset;
103};
104
105struct sdsi_priv {
106 struct mutex mb_lock; /* Mailbox access lock */
107 struct device *dev;
108 void __iomem *control_addr;
109 void __iomem *mbox_addr;
110 void __iomem *regs_addr;
111 int control_size;
112 int maibox_size;
113 int registers_size;
114 u32 guid;
115 u32 features;
116};
117
118/* SDSi mailbox operations must be performed using 64bit mov instructions */
119static __always_inline void
120sdsi_memcpy64_toio(u64 __iomem *to, const u64 *from, size_t count_bytes)
121{
122 size_t count = count_bytes / sizeof(*to);
123 int i;
124
125 for (i = 0; i < count; i++)
126 writeq(from[i], &to[i]);
127}
128
129static __always_inline void
130sdsi_memcpy64_fromio(u64 *to, const u64 __iomem *from, size_t count_bytes)
131{
132 size_t count = count_bytes / sizeof(*to);
133 int i;
134
135 for (i = 0; i < count; i++)
136 to[i] = readq(&from[i]);
137}
138
139static inline void sdsi_complete_transaction(struct sdsi_priv *priv)
140{
141 u64 control = FIELD_PREP(CTRL_COMPLETE, 1);
142
143 lockdep_assert_held(&priv->mb_lock);
144 writeq(control, priv->control_addr);
145}
146
147static int sdsi_status_to_errno(u32 status)
148{
149 switch (status) {
150 case SDSI_MBOX_CMD_SUCCESS:
151 return 0;
152 case SDSI_MBOX_CMD_TIMEOUT:
153 return -ETIMEDOUT;
154 default:
155 return -EIO;
156 }
157}
158
159static int sdsi_mbox_cmd_read(struct sdsi_priv *priv, struct sdsi_mbox_info *info,
160 size_t *data_size)
161{
162 struct device *dev = priv->dev;
163 u32 total, loop, eom, status, message_size;
164 u64 control;
165 int ret;
166
167 lockdep_assert_held(&priv->mb_lock);
168
169 /* Format and send the read command */
170 control = FIELD_PREP(CTRL_EOM, 1) |
171 FIELD_PREP(CTRL_SOM, 1) |
172 FIELD_PREP(CTRL_RUN_BUSY, 1) |
173 FIELD_PREP(CTRL_PACKET_SIZE, info->size);
174 writeq(control, priv->control_addr);
175
176 /* For reads, data sizes that are larger than the mailbox size are read in packets. */
177 total = 0;
178 loop = 0;
179 do {
180 void *buf = info->buffer + (SDSI_SIZE_MAILBOX * loop);
181 u32 packet_size;
182
183 /* Poll on ready bit */
184 ret = readq_poll_timeout(priv->control_addr, control, control & CTRL_READY,
185 MBOX_POLLING_PERIOD_US, MBOX_TIMEOUT_US);
186 if (ret)
187 break;
188
189 eom = FIELD_GET(CTRL_EOM, control);
190 status = FIELD_GET(CTRL_STATUS, control);
191 packet_size = FIELD_GET(CTRL_PACKET_SIZE, control);
192 message_size = FIELD_GET(CTRL_MSG_SIZE, control);
193
194 ret = sdsi_status_to_errno(status);
195 if (ret)
196 break;
197
198 /* Only the last packet can be less than the mailbox size. */
199 if (!eom && packet_size != SDSI_SIZE_MAILBOX) {
200 dev_err(dev, "Invalid packet size\n");
201 ret = -EPROTO;
202 break;
203 }
204
205 if (packet_size > SDSI_SIZE_MAILBOX) {
206 dev_err(dev, "Packet size too large\n");
207 ret = -EPROTO;
208 break;
209 }
210
211 sdsi_memcpy64_fromio(buf, priv->mbox_addr, round_up(packet_size, SDSI_SIZE_CMD));
212
213 total += packet_size;
214
215 sdsi_complete_transaction(priv);
216 } while (!eom && ++loop < MBOX_MAX_PACKETS);
217
218 if (ret) {
219 sdsi_complete_transaction(priv);
220 return ret;
221 }
222
223 if (!eom) {
224 dev_err(dev, "Exceeded read attempts\n");
225 return -EPROTO;
226 }
227
228 /* Message size check is only valid for multi-packet transfers */
229 if (loop && total != message_size)
230 dev_warn(dev, "Read count %u differs from expected count %u\n",
231 total, message_size);
232
233 *data_size = total;
234
235 return 0;
236}
237
238static int sdsi_mbox_cmd_write(struct sdsi_priv *priv, struct sdsi_mbox_info *info)
239{
240 u64 control;
241 u32 status;
242 int ret;
243
244 lockdep_assert_held(&priv->mb_lock);
245
246 /* Write rest of the payload */
247 sdsi_memcpy64_toio(priv->mbox_addr + SDSI_SIZE_CMD, info->payload + 1,
248 info->size - SDSI_SIZE_CMD);
249
250 /* Format and send the write command */
251 control = FIELD_PREP(CTRL_EOM, 1) |
252 FIELD_PREP(CTRL_SOM, 1) |
253 FIELD_PREP(CTRL_RUN_BUSY, 1) |
254 FIELD_PREP(CTRL_READ_WRITE, 1) |
255 FIELD_PREP(CTRL_PACKET_SIZE, info->size);
256 writeq(control, priv->control_addr);
257
258 /* Poll on ready bit */
259 ret = readq_poll_timeout(priv->control_addr, control, control & CTRL_READY,
260 MBOX_POLLING_PERIOD_US, MBOX_TIMEOUT_US);
261
262 if (ret)
263 goto release_mbox;
264
265 status = FIELD_GET(CTRL_STATUS, control);
266 ret = sdsi_status_to_errno(status);
267
268release_mbox:
269 sdsi_complete_transaction(priv);
270
271 return ret;
272}
273
274static int sdsi_mbox_acquire(struct sdsi_priv *priv, struct sdsi_mbox_info *info)
275{
276 u64 control;
277 u32 owner;
278 int ret, retries = 0;
279
280 lockdep_assert_held(&priv->mb_lock);
281
282 /* Check mailbox is available */
283 control = readq(priv->control_addr);
284 owner = FIELD_GET(CTRL_OWNER, control);
285 if (owner != MBOX_OWNER_NONE)
286 return -EBUSY;
287
288 /*
289 * If there has been no recent transaction and no one owns the mailbox,
290 * we should acquire it in under 1ms. However, if we've accessed it
291 * recently it may take up to 2.1 seconds to acquire it again.
292 */
293 do {
294 /* Write first qword of payload */
295 writeq(info->payload[0], priv->mbox_addr);
296
297 /* Check for ownership */
298 ret = readq_poll_timeout(priv->control_addr, control,
299 FIELD_GET(CTRL_OWNER, control) == MBOX_OWNER_INBAND,
300 MBOX_POLLING_PERIOD_US, MBOX_TIMEOUT_ACQUIRE_US);
301
302 if (FIELD_GET(CTRL_OWNER, control) == MBOX_OWNER_NONE &&
303 retries++ < MBOX_ACQUIRE_NUM_RETRIES) {
304 msleep(MBOX_ACQUIRE_RETRY_DELAY_MS);
305 continue;
306 }
307
308 /* Either we got it or someone else did. */
309 break;
310 } while (true);
311
312 return ret;
313}
314
315static int sdsi_mbox_write(struct sdsi_priv *priv, struct sdsi_mbox_info *info)
316{
317 int ret;
318
319 lockdep_assert_held(&priv->mb_lock);
320
321 ret = sdsi_mbox_acquire(priv, info);
322 if (ret)
323 return ret;
324
325 return sdsi_mbox_cmd_write(priv, info);
326}
327
328static int sdsi_mbox_read(struct sdsi_priv *priv, struct sdsi_mbox_info *info, size_t *data_size)
329{
330 int ret;
331
332 lockdep_assert_held(&priv->mb_lock);
333
334 ret = sdsi_mbox_acquire(priv, info);
335 if (ret)
336 return ret;
337
338 return sdsi_mbox_cmd_read(priv, info, data_size);
339}
340
341static ssize_t sdsi_provision(struct sdsi_priv *priv, char *buf, size_t count,
342 enum sdsi_command command)
343{
344 struct sdsi_mbox_info info;
345 int ret;
346
347 if (count > (SDSI_SIZE_WRITE_MSG - SDSI_SIZE_CMD))
348 return -EOVERFLOW;
349
350 /* Qword aligned message + command qword */
351 info.size = round_up(count, SDSI_SIZE_CMD) + SDSI_SIZE_CMD;
352
353 info.payload = kzalloc(info.size, GFP_KERNEL);
354 if (!info.payload)
355 return -ENOMEM;
356
357 /* Copy message to payload buffer */
358 memcpy(info.payload, buf, count);
359
360 /* Command is last qword of payload buffer */
361 info.payload[(info.size - SDSI_SIZE_CMD) / SDSI_SIZE_CMD] = command;
362
363 ret = mutex_lock_interruptible(&priv->mb_lock);
364 if (ret)
365 goto free_payload;
366 ret = sdsi_mbox_write(priv, &info);
367 mutex_unlock(&priv->mb_lock);
368
369free_payload:
370 kfree(info.payload);
371
372 if (ret)
373 return ret;
374
375 return count;
376}
377
378static ssize_t provision_akc_write(struct file *filp, struct kobject *kobj,
379 struct bin_attribute *attr, char *buf, loff_t off,
380 size_t count)
381{
382 struct device *dev = kobj_to_dev(kobj);
383 struct sdsi_priv *priv = dev_get_drvdata(dev);
384
385 if (off)
386 return -ESPIPE;
387
388 return sdsi_provision(priv, buf, count, SDSI_CMD_PROVISION_AKC);
389}
390static BIN_ATTR_WO(provision_akc, SDSI_SIZE_WRITE_MSG);
391
392static ssize_t provision_cap_write(struct file *filp, struct kobject *kobj,
393 struct bin_attribute *attr, char *buf, loff_t off,
394 size_t count)
395{
396 struct device *dev = kobj_to_dev(kobj);
397 struct sdsi_priv *priv = dev_get_drvdata(dev);
398
399 if (off)
400 return -ESPIPE;
401
402 return sdsi_provision(priv, buf, count, SDSI_CMD_PROVISION_CAP);
403}
404static BIN_ATTR_WO(provision_cap, SDSI_SIZE_WRITE_MSG);
405
406static ssize_t
407certificate_read(u64 command, struct sdsi_priv *priv, char *buf, loff_t off,
408 size_t count)
409{
410 struct sdsi_mbox_info info;
411 size_t size;
412 int ret;
413
414 if (off)
415 return 0;
416
417 /* Buffer for return data */
418 info.buffer = kmalloc(SDSI_SIZE_READ_MSG, GFP_KERNEL);
419 if (!info.buffer)
420 return -ENOMEM;
421
422 info.payload = &command;
423 info.size = sizeof(command);
424
425 ret = mutex_lock_interruptible(&priv->mb_lock);
426 if (ret)
427 goto free_buffer;
428 ret = sdsi_mbox_read(priv, &info, &size);
429 mutex_unlock(&priv->mb_lock);
430 if (ret < 0)
431 goto free_buffer;
432
433 if (size > count)
434 size = count;
435
436 memcpy(buf, info.buffer, size);
437
438free_buffer:
439 kfree(info.buffer);
440
441 if (ret)
442 return ret;
443
444 return size;
445}
446
447static ssize_t
448state_certificate_read(struct file *filp, struct kobject *kobj,
449 struct bin_attribute *attr, char *buf, loff_t off,
450 size_t count)
451{
452 struct device *dev = kobj_to_dev(kobj);
453 struct sdsi_priv *priv = dev_get_drvdata(dev);
454
455 return certificate_read(SDSI_CMD_READ_STATE, priv, buf, off, count);
456}
457static BIN_ATTR_ADMIN_RO(state_certificate, SDSI_SIZE_READ_MSG);
458
459static ssize_t
460meter_certificate_read(struct file *filp, struct kobject *kobj,
461 struct bin_attribute *attr, char *buf, loff_t off,
462 size_t count)
463{
464 struct device *dev = kobj_to_dev(kobj);
465 struct sdsi_priv *priv = dev_get_drvdata(dev);
466
467 return certificate_read(SDSI_CMD_READ_METER, priv, buf, off, count);
468}
469static BIN_ATTR_ADMIN_RO(meter_certificate, SDSI_SIZE_READ_MSG);
470
471static ssize_t registers_read(struct file *filp, struct kobject *kobj,
472 struct bin_attribute *attr, char *buf, loff_t off,
473 size_t count)
474{
475 struct device *dev = kobj_to_dev(kobj);
476 struct sdsi_priv *priv = dev_get_drvdata(dev);
477 void __iomem *addr = priv->regs_addr;
478 int size = priv->registers_size;
479
480 /*
481 * The check below is performed by the sysfs caller based on the static
482 * file size. But this may be greater than the actual size which is based
483 * on the GUID. So check here again based on actual size before reading.
484 */
485 if (off >= size)
486 return 0;
487
488 if (off + count > size)
489 count = size - off;
490
491 memcpy_fromio(buf, addr + off, count);
492
493 return count;
494}
495static BIN_ATTR_ADMIN_RO(registers, SDSI_SIZE_REGS);
496
497static struct bin_attribute *sdsi_bin_attrs[] = {
498 &bin_attr_registers,
499 &bin_attr_state_certificate,
500 &bin_attr_meter_certificate,
501 &bin_attr_provision_akc,
502 &bin_attr_provision_cap,
503 NULL
504};
505
506static umode_t
507sdsi_battr_is_visible(struct kobject *kobj, struct bin_attribute *attr, int n)
508{
509 struct device *dev = kobj_to_dev(kobj);
510 struct sdsi_priv *priv = dev_get_drvdata(dev);
511
512 /* Registers file is always readable if the device is present */
513 if (attr == &bin_attr_registers)
514 return attr->attr.mode;
515
516 /* All other attributes not visible if BIOS has not enabled On Demand */
517 if (!(priv->features & SDSI_FEATURE_SDSI))
518 return 0;
519
520 if (attr == &bin_attr_meter_certificate)
521 return (priv->features & SDSI_FEATURE_METERING) ?
522 attr->attr.mode : 0;
523
524 return attr->attr.mode;
525}
526
527static ssize_t guid_show(struct device *dev, struct device_attribute *attr, char *buf)
528{
529 struct sdsi_priv *priv = dev_get_drvdata(dev);
530
531 return sysfs_emit(buf, "0x%x\n", priv->guid);
532}
533static DEVICE_ATTR_RO(guid);
534
535static struct attribute *sdsi_attrs[] = {
536 &dev_attr_guid.attr,
537 NULL
538};
539
540static const struct attribute_group sdsi_group = {
541 .attrs = sdsi_attrs,
542 .bin_attrs = sdsi_bin_attrs,
543 .is_bin_visible = sdsi_battr_is_visible,
544};
545__ATTRIBUTE_GROUPS(sdsi);
546
547static int sdsi_get_layout(struct sdsi_priv *priv, struct disc_table *table)
548{
549 switch (table->guid) {
550 case SDSI_GUID_V1:
551 priv->control_size = GUID_V1_CNTRL_SIZE;
552 priv->registers_size = GUID_V1_REGS_SIZE;
553 break;
554 case SDSI_GUID_V2:
555 priv->control_size = GUID_V2_CNTRL_SIZE;
556 priv->registers_size = GUID_V2_REGS_SIZE;
557 break;
558 default:
559 dev_err(priv->dev, "Unrecognized GUID 0x%x\n", table->guid);
560 return -EINVAL;
561 }
562 return 0;
563}
564
565static int sdsi_map_mbox_registers(struct sdsi_priv *priv, struct pci_dev *parent,
566 struct disc_table *disc_table, struct resource *disc_res)
567{
568 u32 access_type = FIELD_GET(DT_ACCESS_TYPE, disc_table->access_info);
569 u32 size = FIELD_GET(DT_SIZE, disc_table->access_info);
570 u32 tbir = FIELD_GET(DT_TBIR, disc_table->offset);
571 u32 offset = DT_OFFSET(disc_table->offset);
572 struct resource res = {};
573
574 /* Starting location of SDSi MMIO region based on access type */
575 switch (access_type) {
576 case ACCESS_TYPE_LOCAL:
577 if (tbir) {
578 dev_err(priv->dev, "Unsupported BAR index %u for access type %u\n",
579 tbir, access_type);
580 return -EINVAL;
581 }
582
583 /*
584 * For access_type LOCAL, the base address is as follows:
585 * base address = end of discovery region + base offset + 1
586 */
587 res.start = disc_res->end + offset + 1;
588 break;
589
590 case ACCESS_TYPE_BARID:
591 res.start = pci_resource_start(parent, tbir) + offset;
592 break;
593
594 default:
595 dev_err(priv->dev, "Unrecognized access_type %u\n", access_type);
596 return -EINVAL;
597 }
598
599 res.end = res.start + size * sizeof(u32) - 1;
600 res.flags = IORESOURCE_MEM;
601
602 priv->control_addr = devm_ioremap_resource(priv->dev, &res);
603 if (IS_ERR(priv->control_addr))
604 return PTR_ERR(priv->control_addr);
605
606 priv->mbox_addr = priv->control_addr + priv->control_size;
607 priv->regs_addr = priv->mbox_addr + SDSI_SIZE_MAILBOX;
608
609 priv->features = readq(priv->regs_addr + SDSI_ENABLED_FEATURES_OFFSET);
610
611 return 0;
612}
613
614static int sdsi_probe(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id)
615{
616 struct intel_vsec_device *intel_cap_dev = auxdev_to_ivdev(auxdev);
617 struct disc_table disc_table;
618 struct resource *disc_res;
619 void __iomem *disc_addr;
620 struct sdsi_priv *priv;
621 int ret;
622
623 priv = devm_kzalloc(&auxdev->dev, sizeof(*priv), GFP_KERNEL);
624 if (!priv)
625 return -ENOMEM;
626
627 priv->dev = &auxdev->dev;
628 mutex_init(&priv->mb_lock);
629 auxiliary_set_drvdata(auxdev, priv);
630
631 /* Get the SDSi discovery table */
632 disc_res = &intel_cap_dev->resource[0];
633 disc_addr = devm_ioremap_resource(&auxdev->dev, disc_res);
634 if (IS_ERR(disc_addr))
635 return PTR_ERR(disc_addr);
636
637 memcpy_fromio(&disc_table, disc_addr, DISC_TABLE_SIZE);
638
639 priv->guid = disc_table.guid;
640
641 /* Get guid based layout info */
642 ret = sdsi_get_layout(priv, &disc_table);
643 if (ret)
644 return ret;
645
646 /* Map the SDSi mailbox registers */
647 ret = sdsi_map_mbox_registers(priv, intel_cap_dev->pcidev, &disc_table, disc_res);
648 if (ret)
649 return ret;
650
651 return 0;
652}
653
654static const struct auxiliary_device_id sdsi_aux_id_table[] = {
655 { .name = "intel_vsec.sdsi" },
656 {}
657};
658MODULE_DEVICE_TABLE(auxiliary, sdsi_aux_id_table);
659
660static struct auxiliary_driver sdsi_aux_driver = {
661 .driver = {
662 .dev_groups = sdsi_groups,
663 },
664 .id_table = sdsi_aux_id_table,
665 .probe = sdsi_probe,
666 /* No remove. All resources are handled under devm */
667};
668module_auxiliary_driver(sdsi_aux_driver);
669
670MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>");
671MODULE_DESCRIPTION("Intel On Demand (SDSi) driver");
672MODULE_LICENSE("GPL");
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Intel On Demand (Software Defined Silicon) driver
4 *
5 * Copyright (c) 2022, Intel Corporation.
6 * All Rights Reserved.
7 *
8 * Author: "David E. Box" <david.e.box@linux.intel.com>
9 */
10
11#include <linux/auxiliary_bus.h>
12#include <linux/bits.h>
13#include <linux/bitfield.h>
14#include <linux/device.h>
15#include <linux/intel_vsec.h>
16#include <linux/iopoll.h>
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/overflow.h>
20#include <linux/pci.h>
21#include <linux/slab.h>
22#include <linux/sysfs.h>
23#include <linux/types.h>
24#include <linux/uaccess.h>
25
26#define ACCESS_TYPE_BARID 2
27#define ACCESS_TYPE_LOCAL 3
28
29#define SDSI_MIN_SIZE_DWORDS 276
30#define SDSI_SIZE_MAILBOX 1024
31#define SDSI_SIZE_REGS 80
32#define SDSI_SIZE_CMD sizeof(u64)
33
34/*
35 * Write messages are currently up to the size of the mailbox
36 * while read messages are up to 4 times the size of the
37 * mailbox, sent in packets
38 */
39#define SDSI_SIZE_WRITE_MSG SDSI_SIZE_MAILBOX
40#define SDSI_SIZE_READ_MSG (SDSI_SIZE_MAILBOX * 4)
41
42#define SDSI_ENABLED_FEATURES_OFFSET 16
43#define SDSI_FEATURE_SDSI BIT(3)
44#define SDSI_FEATURE_METERING BIT(26)
45
46#define SDSI_SOCKET_ID_OFFSET 64
47#define SDSI_SOCKET_ID GENMASK(3, 0)
48
49#define SDSI_MBOX_CMD_SUCCESS 0x40
50#define SDSI_MBOX_CMD_TIMEOUT 0x80
51
52#define MBOX_TIMEOUT_US 500000
53#define MBOX_TIMEOUT_ACQUIRE_US 1000
54#define MBOX_POLLING_PERIOD_US 100
55#define MBOX_ACQUIRE_NUM_RETRIES 5
56#define MBOX_ACQUIRE_RETRY_DELAY_MS 500
57#define MBOX_MAX_PACKETS 4
58
59#define MBOX_OWNER_NONE 0x00
60#define MBOX_OWNER_INBAND 0x01
61
62#define CTRL_RUN_BUSY BIT(0)
63#define CTRL_READ_WRITE BIT(1)
64#define CTRL_SOM BIT(2)
65#define CTRL_EOM BIT(3)
66#define CTRL_OWNER GENMASK(5, 4)
67#define CTRL_COMPLETE BIT(6)
68#define CTRL_READY BIT(7)
69#define CTRL_INBAND_LOCK BIT(32)
70#define CTRL_METER_ENABLE_DRAM BIT(33)
71#define CTRL_STATUS GENMASK(15, 8)
72#define CTRL_PACKET_SIZE GENMASK(31, 16)
73#define CTRL_MSG_SIZE GENMASK(63, 48)
74
75#define DISC_TABLE_SIZE 12
76#define DT_ACCESS_TYPE GENMASK(3, 0)
77#define DT_SIZE GENMASK(27, 12)
78#define DT_TBIR GENMASK(2, 0)
79#define DT_OFFSET(v) ((v) & GENMASK(31, 3))
80
81#define SDSI_GUID_V1 0x006DD191
82#define GUID_V1_CNTRL_SIZE 8
83#define GUID_V1_REGS_SIZE 72
84#define SDSI_GUID_V2 0xF210D9EF
85#define GUID_V2_CNTRL_SIZE 16
86#define GUID_V2_REGS_SIZE 80
87
88enum sdsi_command {
89 SDSI_CMD_PROVISION_AKC = 0x0004,
90 SDSI_CMD_PROVISION_CAP = 0x0008,
91 SDSI_CMD_READ_STATE = 0x0010,
92 SDSI_CMD_READ_METER = 0x0014,
93};
94
95struct sdsi_mbox_info {
96 u64 *payload;
97 void *buffer;
98 u64 control_flags;
99 int size;
100};
101
102struct disc_table {
103 u32 access_info;
104 u32 guid;
105 u32 offset;
106};
107
108struct sdsi_priv {
109 struct mutex mb_lock; /* Mailbox access lock */
110 struct device *dev;
111 void __iomem *control_addr;
112 void __iomem *mbox_addr;
113 void __iomem *regs_addr;
114 int control_size;
115 int maibox_size;
116 int registers_size;
117 u32 guid;
118 u32 features;
119};
120
121/* SDSi mailbox operations must be performed using 64bit mov instructions */
122static __always_inline void
123sdsi_memcpy64_toio(u64 __iomem *to, const u64 *from, size_t count_bytes)
124{
125 size_t count = count_bytes / sizeof(*to);
126 int i;
127
128 for (i = 0; i < count; i++)
129 writeq(from[i], &to[i]);
130}
131
132static __always_inline void
133sdsi_memcpy64_fromio(u64 *to, const u64 __iomem *from, size_t count_bytes)
134{
135 size_t count = count_bytes / sizeof(*to);
136 int i;
137
138 for (i = 0; i < count; i++)
139 to[i] = readq(&from[i]);
140}
141
142static inline void sdsi_complete_transaction(struct sdsi_priv *priv)
143{
144 u64 control = FIELD_PREP(CTRL_COMPLETE, 1);
145
146 lockdep_assert_held(&priv->mb_lock);
147 writeq(control, priv->control_addr);
148}
149
150static int sdsi_status_to_errno(u32 status)
151{
152 switch (status) {
153 case SDSI_MBOX_CMD_SUCCESS:
154 return 0;
155 case SDSI_MBOX_CMD_TIMEOUT:
156 return -ETIMEDOUT;
157 default:
158 return -EIO;
159 }
160}
161
162static int sdsi_mbox_poll(struct sdsi_priv *priv, struct sdsi_mbox_info *info,
163 size_t *data_size)
164{
165 struct device *dev = priv->dev;
166 u32 total, loop, eom, status, message_size;
167 u64 control;
168 int ret;
169
170 lockdep_assert_held(&priv->mb_lock);
171
172 /* For reads, data sizes that are larger than the mailbox size are read in packets. */
173 total = 0;
174 loop = 0;
175 do {
176 u32 packet_size;
177
178 /* Poll on ready bit */
179 ret = readq_poll_timeout(priv->control_addr, control, control & CTRL_READY,
180 MBOX_POLLING_PERIOD_US, MBOX_TIMEOUT_US);
181 if (ret)
182 break;
183
184 eom = FIELD_GET(CTRL_EOM, control);
185 status = FIELD_GET(CTRL_STATUS, control);
186 packet_size = FIELD_GET(CTRL_PACKET_SIZE, control);
187 message_size = FIELD_GET(CTRL_MSG_SIZE, control);
188
189 ret = sdsi_status_to_errno(status);
190 if (ret)
191 break;
192
193 if (!packet_size) {
194 sdsi_complete_transaction(priv);
195 break;
196 }
197
198 /* Only the last packet can be less than the mailbox size. */
199 if (!eom && packet_size != SDSI_SIZE_MAILBOX) {
200 dev_err(dev, "Invalid packet size\n");
201 ret = -EPROTO;
202 break;
203 }
204
205 if (packet_size > SDSI_SIZE_MAILBOX) {
206 dev_err(dev, "Packet size too large\n");
207 ret = -EPROTO;
208 break;
209 }
210
211 if (info->buffer) {
212 void *buf = info->buffer + array_size(SDSI_SIZE_MAILBOX, loop);
213
214 sdsi_memcpy64_fromio(buf, priv->mbox_addr,
215 round_up(packet_size, SDSI_SIZE_CMD));
216 total += packet_size;
217 }
218
219 sdsi_complete_transaction(priv);
220 } while (!eom && ++loop < MBOX_MAX_PACKETS);
221
222 if (ret) {
223 sdsi_complete_transaction(priv);
224 return ret;
225 }
226
227 if (!eom) {
228 dev_err(dev, "Exceeded read attempts\n");
229 return -EPROTO;
230 }
231
232 /* Message size check is only valid for multi-packet transfers */
233 if (loop && total != message_size)
234 dev_warn(dev, "Read count %u differs from expected count %u\n",
235 total, message_size);
236
237 if (data_size)
238 *data_size = total;
239
240 return 0;
241}
242
243static int sdsi_mbox_cmd_read(struct sdsi_priv *priv, struct sdsi_mbox_info *info,
244 size_t *data_size)
245{
246 u64 control;
247
248 lockdep_assert_held(&priv->mb_lock);
249
250 /* Format and send the read command */
251 control = FIELD_PREP(CTRL_EOM, 1) |
252 FIELD_PREP(CTRL_SOM, 1) |
253 FIELD_PREP(CTRL_RUN_BUSY, 1) |
254 FIELD_PREP(CTRL_PACKET_SIZE, info->size) |
255 info->control_flags;
256 writeq(control, priv->control_addr);
257
258 return sdsi_mbox_poll(priv, info, data_size);
259}
260
261static int sdsi_mbox_cmd_write(struct sdsi_priv *priv, struct sdsi_mbox_info *info,
262 size_t *data_size)
263{
264 u64 control;
265
266 lockdep_assert_held(&priv->mb_lock);
267
268 /* Write rest of the payload */
269 sdsi_memcpy64_toio(priv->mbox_addr + SDSI_SIZE_CMD, info->payload + 1,
270 info->size - SDSI_SIZE_CMD);
271
272 /* Format and send the write command */
273 control = FIELD_PREP(CTRL_EOM, 1) |
274 FIELD_PREP(CTRL_SOM, 1) |
275 FIELD_PREP(CTRL_RUN_BUSY, 1) |
276 FIELD_PREP(CTRL_READ_WRITE, 1) |
277 FIELD_PREP(CTRL_MSG_SIZE, info->size) |
278 FIELD_PREP(CTRL_PACKET_SIZE, info->size);
279 writeq(control, priv->control_addr);
280
281 return sdsi_mbox_poll(priv, info, data_size);
282}
283
284static int sdsi_mbox_acquire(struct sdsi_priv *priv, struct sdsi_mbox_info *info)
285{
286 u64 control;
287 u32 owner;
288 int ret, retries = 0;
289
290 lockdep_assert_held(&priv->mb_lock);
291
292 /* Check mailbox is available */
293 control = readq(priv->control_addr);
294 owner = FIELD_GET(CTRL_OWNER, control);
295 if (owner != MBOX_OWNER_NONE)
296 return -EBUSY;
297
298 /*
299 * If there has been no recent transaction and no one owns the mailbox,
300 * we should acquire it in under 1ms. However, if we've accessed it
301 * recently it may take up to 2.1 seconds to acquire it again.
302 */
303 do {
304 /* Write first qword of payload */
305 writeq(info->payload[0], priv->mbox_addr);
306
307 /* Check for ownership */
308 ret = readq_poll_timeout(priv->control_addr, control,
309 FIELD_GET(CTRL_OWNER, control) == MBOX_OWNER_INBAND,
310 MBOX_POLLING_PERIOD_US, MBOX_TIMEOUT_ACQUIRE_US);
311
312 if (FIELD_GET(CTRL_OWNER, control) == MBOX_OWNER_NONE &&
313 retries++ < MBOX_ACQUIRE_NUM_RETRIES) {
314 msleep(MBOX_ACQUIRE_RETRY_DELAY_MS);
315 continue;
316 }
317
318 /* Either we got it or someone else did. */
319 break;
320 } while (true);
321
322 return ret;
323}
324
325static int sdsi_mbox_write(struct sdsi_priv *priv, struct sdsi_mbox_info *info,
326 size_t *data_size)
327{
328 int ret;
329
330 lockdep_assert_held(&priv->mb_lock);
331
332 ret = sdsi_mbox_acquire(priv, info);
333 if (ret)
334 return ret;
335
336 return sdsi_mbox_cmd_write(priv, info, data_size);
337}
338
339static int sdsi_mbox_read(struct sdsi_priv *priv, struct sdsi_mbox_info *info, size_t *data_size)
340{
341 int ret;
342
343 lockdep_assert_held(&priv->mb_lock);
344
345 ret = sdsi_mbox_acquire(priv, info);
346 if (ret)
347 return ret;
348
349 return sdsi_mbox_cmd_read(priv, info, data_size);
350}
351
352static bool sdsi_ib_locked(struct sdsi_priv *priv)
353{
354 return !!FIELD_GET(CTRL_INBAND_LOCK, readq(priv->control_addr));
355}
356
357static ssize_t sdsi_provision(struct sdsi_priv *priv, char *buf, size_t count,
358 enum sdsi_command command)
359{
360 struct sdsi_mbox_info info = {};
361 int ret;
362
363 if (count > (SDSI_SIZE_WRITE_MSG - SDSI_SIZE_CMD))
364 return -EOVERFLOW;
365
366 /* Make sure In-band lock is not set */
367 if (sdsi_ib_locked(priv))
368 return -EPERM;
369
370 /* Qword aligned message + command qword */
371 info.size = round_up(count, SDSI_SIZE_CMD) + SDSI_SIZE_CMD;
372
373 info.payload = kzalloc(info.size, GFP_KERNEL);
374 if (!info.payload)
375 return -ENOMEM;
376
377 /* Copy message to payload buffer */
378 memcpy(info.payload, buf, count);
379
380 /* Command is last qword of payload buffer */
381 info.payload[(info.size - SDSI_SIZE_CMD) / SDSI_SIZE_CMD] = command;
382
383 ret = mutex_lock_interruptible(&priv->mb_lock);
384 if (ret)
385 goto free_payload;
386
387 ret = sdsi_mbox_write(priv, &info, NULL);
388
389 mutex_unlock(&priv->mb_lock);
390
391free_payload:
392 kfree(info.payload);
393
394 if (ret)
395 return ret;
396
397 return count;
398}
399
400static ssize_t provision_akc_write(struct file *filp, struct kobject *kobj,
401 struct bin_attribute *attr, char *buf, loff_t off,
402 size_t count)
403{
404 struct device *dev = kobj_to_dev(kobj);
405 struct sdsi_priv *priv = dev_get_drvdata(dev);
406
407 if (off)
408 return -ESPIPE;
409
410 return sdsi_provision(priv, buf, count, SDSI_CMD_PROVISION_AKC);
411}
412static BIN_ATTR_WO(provision_akc, SDSI_SIZE_WRITE_MSG);
413
414static ssize_t provision_cap_write(struct file *filp, struct kobject *kobj,
415 struct bin_attribute *attr, char *buf, loff_t off,
416 size_t count)
417{
418 struct device *dev = kobj_to_dev(kobj);
419 struct sdsi_priv *priv = dev_get_drvdata(dev);
420
421 if (off)
422 return -ESPIPE;
423
424 return sdsi_provision(priv, buf, count, SDSI_CMD_PROVISION_CAP);
425}
426static BIN_ATTR_WO(provision_cap, SDSI_SIZE_WRITE_MSG);
427
428static ssize_t
429certificate_read(u64 command, u64 control_flags, struct sdsi_priv *priv,
430 char *buf, loff_t off, size_t count)
431{
432 struct sdsi_mbox_info info = {};
433 size_t size;
434 int ret;
435
436 if (off)
437 return 0;
438
439 /* Buffer for return data */
440 info.buffer = kmalloc(SDSI_SIZE_READ_MSG, GFP_KERNEL);
441 if (!info.buffer)
442 return -ENOMEM;
443
444 info.payload = &command;
445 info.size = sizeof(command);
446 info.control_flags = control_flags;
447
448 ret = mutex_lock_interruptible(&priv->mb_lock);
449 if (ret)
450 goto free_buffer;
451 ret = sdsi_mbox_read(priv, &info, &size);
452 mutex_unlock(&priv->mb_lock);
453 if (ret < 0)
454 goto free_buffer;
455
456 if (size > count)
457 size = count;
458
459 memcpy(buf, info.buffer, size);
460
461free_buffer:
462 kfree(info.buffer);
463
464 if (ret)
465 return ret;
466
467 return size;
468}
469
470static ssize_t
471state_certificate_read(struct file *filp, struct kobject *kobj,
472 struct bin_attribute *attr, char *buf, loff_t off,
473 size_t count)
474{
475 struct device *dev = kobj_to_dev(kobj);
476 struct sdsi_priv *priv = dev_get_drvdata(dev);
477
478 return certificate_read(SDSI_CMD_READ_STATE, 0, priv, buf, off, count);
479}
480static BIN_ATTR_ADMIN_RO(state_certificate, SDSI_SIZE_READ_MSG);
481
482static ssize_t
483meter_certificate_read(struct file *filp, struct kobject *kobj,
484 struct bin_attribute *attr, char *buf, loff_t off,
485 size_t count)
486{
487 struct device *dev = kobj_to_dev(kobj);
488 struct sdsi_priv *priv = dev_get_drvdata(dev);
489
490 return certificate_read(SDSI_CMD_READ_METER, 0, priv, buf, off, count);
491}
492static BIN_ATTR_ADMIN_RO(meter_certificate, SDSI_SIZE_READ_MSG);
493
494static ssize_t
495meter_current_read(struct file *filp, struct kobject *kobj,
496 struct bin_attribute *attr, char *buf, loff_t off,
497 size_t count)
498{
499 struct device *dev = kobj_to_dev(kobj);
500 struct sdsi_priv *priv = dev_get_drvdata(dev);
501
502 return certificate_read(SDSI_CMD_READ_METER, CTRL_METER_ENABLE_DRAM,
503 priv, buf, off, count);
504}
505static BIN_ATTR_ADMIN_RO(meter_current, SDSI_SIZE_READ_MSG);
506
507static ssize_t registers_read(struct file *filp, struct kobject *kobj,
508 struct bin_attribute *attr, char *buf, loff_t off,
509 size_t count)
510{
511 struct device *dev = kobj_to_dev(kobj);
512 struct sdsi_priv *priv = dev_get_drvdata(dev);
513 void __iomem *addr = priv->regs_addr;
514 int size = priv->registers_size;
515
516 /*
517 * The check below is performed by the sysfs caller based on the static
518 * file size. But this may be greater than the actual size which is based
519 * on the GUID. So check here again based on actual size before reading.
520 */
521 if (off >= size)
522 return 0;
523
524 if (off + count > size)
525 count = size - off;
526
527 memcpy_fromio(buf, addr + off, count);
528
529 return count;
530}
531static BIN_ATTR_ADMIN_RO(registers, SDSI_SIZE_REGS);
532
533static struct bin_attribute *sdsi_bin_attrs[] = {
534 &bin_attr_registers,
535 &bin_attr_state_certificate,
536 &bin_attr_meter_certificate,
537 &bin_attr_meter_current,
538 &bin_attr_provision_akc,
539 &bin_attr_provision_cap,
540 NULL
541};
542
543static umode_t
544sdsi_battr_is_visible(struct kobject *kobj, const struct bin_attribute *attr, int n)
545{
546 struct device *dev = kobj_to_dev(kobj);
547 struct sdsi_priv *priv = dev_get_drvdata(dev);
548
549 /* Registers file is always readable if the device is present */
550 if (attr == &bin_attr_registers)
551 return attr->attr.mode;
552
553 /* All other attributes not visible if BIOS has not enabled On Demand */
554 if (!(priv->features & SDSI_FEATURE_SDSI))
555 return 0;
556
557 if (attr == &bin_attr_meter_certificate || attr == &bin_attr_meter_current)
558 return (priv->features & SDSI_FEATURE_METERING) ?
559 attr->attr.mode : 0;
560
561 return attr->attr.mode;
562}
563
564static ssize_t guid_show(struct device *dev, struct device_attribute *attr, char *buf)
565{
566 struct sdsi_priv *priv = dev_get_drvdata(dev);
567
568 return sysfs_emit(buf, "0x%x\n", priv->guid);
569}
570static DEVICE_ATTR_RO(guid);
571
572static struct attribute *sdsi_attrs[] = {
573 &dev_attr_guid.attr,
574 NULL
575};
576
577static const struct attribute_group sdsi_group = {
578 .attrs = sdsi_attrs,
579 .bin_attrs = sdsi_bin_attrs,
580 .is_bin_visible = sdsi_battr_is_visible,
581};
582__ATTRIBUTE_GROUPS(sdsi);
583
584static int sdsi_get_layout(struct sdsi_priv *priv, struct disc_table *table)
585{
586 switch (table->guid) {
587 case SDSI_GUID_V1:
588 priv->control_size = GUID_V1_CNTRL_SIZE;
589 priv->registers_size = GUID_V1_REGS_SIZE;
590 break;
591 case SDSI_GUID_V2:
592 priv->control_size = GUID_V2_CNTRL_SIZE;
593 priv->registers_size = GUID_V2_REGS_SIZE;
594 break;
595 default:
596 dev_err(priv->dev, "Unrecognized GUID 0x%x\n", table->guid);
597 return -EINVAL;
598 }
599 return 0;
600}
601
602static int sdsi_map_mbox_registers(struct sdsi_priv *priv, struct pci_dev *parent,
603 struct disc_table *disc_table, struct resource *disc_res)
604{
605 u32 access_type = FIELD_GET(DT_ACCESS_TYPE, disc_table->access_info);
606 u32 size = FIELD_GET(DT_SIZE, disc_table->access_info);
607 u32 tbir = FIELD_GET(DT_TBIR, disc_table->offset);
608 u32 offset = DT_OFFSET(disc_table->offset);
609 struct resource res = {};
610
611 /* Starting location of SDSi MMIO region based on access type */
612 switch (access_type) {
613 case ACCESS_TYPE_LOCAL:
614 if (tbir) {
615 dev_err(priv->dev, "Unsupported BAR index %u for access type %u\n",
616 tbir, access_type);
617 return -EINVAL;
618 }
619
620 /*
621 * For access_type LOCAL, the base address is as follows:
622 * base address = end of discovery region + base offset + 1
623 */
624 res.start = disc_res->end + offset + 1;
625 break;
626
627 case ACCESS_TYPE_BARID:
628 res.start = pci_resource_start(parent, tbir) + offset;
629 break;
630
631 default:
632 dev_err(priv->dev, "Unrecognized access_type %u\n", access_type);
633 return -EINVAL;
634 }
635
636 res.end = res.start + size * sizeof(u32) - 1;
637 res.flags = IORESOURCE_MEM;
638
639 priv->control_addr = devm_ioremap_resource(priv->dev, &res);
640 if (IS_ERR(priv->control_addr))
641 return PTR_ERR(priv->control_addr);
642
643 priv->mbox_addr = priv->control_addr + priv->control_size;
644 priv->regs_addr = priv->mbox_addr + SDSI_SIZE_MAILBOX;
645
646 priv->features = readq(priv->regs_addr + SDSI_ENABLED_FEATURES_OFFSET);
647
648 return 0;
649}
650
651static int sdsi_probe(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id)
652{
653 struct intel_vsec_device *intel_cap_dev = auxdev_to_ivdev(auxdev);
654 struct disc_table disc_table;
655 struct resource *disc_res;
656 void __iomem *disc_addr;
657 struct sdsi_priv *priv;
658 int ret;
659
660 priv = devm_kzalloc(&auxdev->dev, sizeof(*priv), GFP_KERNEL);
661 if (!priv)
662 return -ENOMEM;
663
664 priv->dev = &auxdev->dev;
665 mutex_init(&priv->mb_lock);
666 auxiliary_set_drvdata(auxdev, priv);
667
668 /* Get the SDSi discovery table */
669 disc_res = &intel_cap_dev->resource[0];
670 disc_addr = devm_ioremap_resource(&auxdev->dev, disc_res);
671 if (IS_ERR(disc_addr))
672 return PTR_ERR(disc_addr);
673
674 memcpy_fromio(&disc_table, disc_addr, DISC_TABLE_SIZE);
675
676 priv->guid = disc_table.guid;
677
678 /* Get guid based layout info */
679 ret = sdsi_get_layout(priv, &disc_table);
680 if (ret)
681 return ret;
682
683 /* Map the SDSi mailbox registers */
684 ret = sdsi_map_mbox_registers(priv, intel_cap_dev->pcidev, &disc_table, disc_res);
685 if (ret)
686 return ret;
687
688 return 0;
689}
690
691static const struct auxiliary_device_id sdsi_aux_id_table[] = {
692 { .name = "intel_vsec.sdsi" },
693 {}
694};
695MODULE_DEVICE_TABLE(auxiliary, sdsi_aux_id_table);
696
697static struct auxiliary_driver sdsi_aux_driver = {
698 .driver = {
699 .dev_groups = sdsi_groups,
700 },
701 .id_table = sdsi_aux_id_table,
702 .probe = sdsi_probe,
703 /* No remove. All resources are handled under devm */
704};
705module_auxiliary_driver(sdsi_aux_driver);
706
707MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>");
708MODULE_DESCRIPTION("Intel On Demand (SDSi) driver");
709MODULE_LICENSE("GPL");