Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright 2014 IBM Corp.
4 */
5
6#include <linux/kernel.h>
7#include <linux/device.h>
8#include <linux/sysfs.h>
9#include <linux/pci_regs.h>
10
11#include "cxl.h"
12
13#define to_afu_chardev_m(d) dev_get_drvdata(d)
14
15/********* Adapter attributes **********************************************/
16
17static ssize_t caia_version_show(struct device *device,
18 struct device_attribute *attr,
19 char *buf)
20{
21 struct cxl *adapter = to_cxl_adapter(device);
22
23 return scnprintf(buf, PAGE_SIZE, "%i.%i\n", adapter->caia_major,
24 adapter->caia_minor);
25}
26
27static ssize_t psl_revision_show(struct device *device,
28 struct device_attribute *attr,
29 char *buf)
30{
31 struct cxl *adapter = to_cxl_adapter(device);
32
33 return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->psl_rev);
34}
35
36static ssize_t base_image_show(struct device *device,
37 struct device_attribute *attr,
38 char *buf)
39{
40 struct cxl *adapter = to_cxl_adapter(device);
41
42 return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->base_image);
43}
44
45static ssize_t image_loaded_show(struct device *device,
46 struct device_attribute *attr,
47 char *buf)
48{
49 struct cxl *adapter = to_cxl_adapter(device);
50
51 if (adapter->user_image_loaded)
52 return scnprintf(buf, PAGE_SIZE, "user\n");
53 return scnprintf(buf, PAGE_SIZE, "factory\n");
54}
55
56static ssize_t psl_timebase_synced_show(struct device *device,
57 struct device_attribute *attr,
58 char *buf)
59{
60 struct cxl *adapter = to_cxl_adapter(device);
61 u64 psl_tb, delta;
62
63 /* Recompute the status only in native mode */
64 if (cpu_has_feature(CPU_FTR_HVMODE)) {
65 psl_tb = adapter->native->sl_ops->timebase_read(adapter);
66 delta = abs(mftb() - psl_tb);
67
68 /* CORE TB and PSL TB difference <= 16usecs ? */
69 adapter->psl_timebase_synced = (tb_to_ns(delta) < 16000) ? true : false;
70 pr_devel("PSL timebase %s - delta: 0x%016llx\n",
71 (tb_to_ns(delta) < 16000) ? "synchronized" :
72 "not synchronized", tb_to_ns(delta));
73 }
74 return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->psl_timebase_synced);
75}
76
77static ssize_t tunneled_ops_supported_show(struct device *device,
78 struct device_attribute *attr,
79 char *buf)
80{
81 struct cxl *adapter = to_cxl_adapter(device);
82
83 return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->tunneled_ops_supported);
84}
85
86static ssize_t reset_adapter_store(struct device *device,
87 struct device_attribute *attr,
88 const char *buf, size_t count)
89{
90 struct cxl *adapter = to_cxl_adapter(device);
91 int rc;
92 int val;
93
94 rc = sscanf(buf, "%i", &val);
95 if ((rc != 1) || (val != 1 && val != -1))
96 return -EINVAL;
97
98 /*
99 * See if we can lock the context mapping that's only allowed
100 * when there are no contexts attached to the adapter. Once
101 * taken this will also prevent any context from getting activated.
102 */
103 if (val == 1) {
104 rc = cxl_adapter_context_lock(adapter);
105 if (rc)
106 goto out;
107
108 rc = cxl_ops->adapter_reset(adapter);
109 /* In case reset failed release context lock */
110 if (rc)
111 cxl_adapter_context_unlock(adapter);
112
113 } else if (val == -1) {
114 /* Perform a forced adapter reset */
115 rc = cxl_ops->adapter_reset(adapter);
116 }
117
118out:
119 return rc ? rc : count;
120}
121
122static ssize_t load_image_on_perst_show(struct device *device,
123 struct device_attribute *attr,
124 char *buf)
125{
126 struct cxl *adapter = to_cxl_adapter(device);
127
128 if (!adapter->perst_loads_image)
129 return scnprintf(buf, PAGE_SIZE, "none\n");
130
131 if (adapter->perst_select_user)
132 return scnprintf(buf, PAGE_SIZE, "user\n");
133 return scnprintf(buf, PAGE_SIZE, "factory\n");
134}
135
136static ssize_t load_image_on_perst_store(struct device *device,
137 struct device_attribute *attr,
138 const char *buf, size_t count)
139{
140 struct cxl *adapter = to_cxl_adapter(device);
141 int rc;
142
143 if (!strncmp(buf, "none", 4))
144 adapter->perst_loads_image = false;
145 else if (!strncmp(buf, "user", 4)) {
146 adapter->perst_select_user = true;
147 adapter->perst_loads_image = true;
148 } else if (!strncmp(buf, "factory", 7)) {
149 adapter->perst_select_user = false;
150 adapter->perst_loads_image = true;
151 } else
152 return -EINVAL;
153
154 if ((rc = cxl_update_image_control(adapter)))
155 return rc;
156
157 return count;
158}
159
160static ssize_t perst_reloads_same_image_show(struct device *device,
161 struct device_attribute *attr,
162 char *buf)
163{
164 struct cxl *adapter = to_cxl_adapter(device);
165
166 return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->perst_same_image);
167}
168
169static ssize_t perst_reloads_same_image_store(struct device *device,
170 struct device_attribute *attr,
171 const char *buf, size_t count)
172{
173 struct cxl *adapter = to_cxl_adapter(device);
174 int rc;
175 int val;
176
177 rc = sscanf(buf, "%i", &val);
178 if ((rc != 1) || !(val == 1 || val == 0))
179 return -EINVAL;
180
181 adapter->perst_same_image = (val == 1);
182 return count;
183}
184
185static struct device_attribute adapter_attrs[] = {
186 __ATTR_RO(caia_version),
187 __ATTR_RO(psl_revision),
188 __ATTR_RO(base_image),
189 __ATTR_RO(image_loaded),
190 __ATTR_RO(psl_timebase_synced),
191 __ATTR_RO(tunneled_ops_supported),
192 __ATTR_RW(load_image_on_perst),
193 __ATTR_RW(perst_reloads_same_image),
194 __ATTR(reset, S_IWUSR, NULL, reset_adapter_store),
195};
196
197
198/********* AFU master specific attributes **********************************/
199
200static ssize_t mmio_size_show_master(struct device *device,
201 struct device_attribute *attr,
202 char *buf)
203{
204 struct cxl_afu *afu = to_afu_chardev_m(device);
205
206 return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->adapter->ps_size);
207}
208
209static ssize_t pp_mmio_off_show(struct device *device,
210 struct device_attribute *attr,
211 char *buf)
212{
213 struct cxl_afu *afu = to_afu_chardev_m(device);
214
215 return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->native->pp_offset);
216}
217
218static ssize_t pp_mmio_len_show(struct device *device,
219 struct device_attribute *attr,
220 char *buf)
221{
222 struct cxl_afu *afu = to_afu_chardev_m(device);
223
224 return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->pp_size);
225}
226
227static struct device_attribute afu_master_attrs[] = {
228 __ATTR(mmio_size, S_IRUGO, mmio_size_show_master, NULL),
229 __ATTR_RO(pp_mmio_off),
230 __ATTR_RO(pp_mmio_len),
231};
232
233
234/********* AFU attributes **************************************************/
235
236static ssize_t mmio_size_show(struct device *device,
237 struct device_attribute *attr,
238 char *buf)
239{
240 struct cxl_afu *afu = to_cxl_afu(device);
241
242 if (afu->pp_size)
243 return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->pp_size);
244 return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->adapter->ps_size);
245}
246
247static ssize_t reset_store_afu(struct device *device,
248 struct device_attribute *attr,
249 const char *buf, size_t count)
250{
251 struct cxl_afu *afu = to_cxl_afu(device);
252 int rc;
253
254 /* Not safe to reset if it is currently in use */
255 mutex_lock(&afu->contexts_lock);
256 if (!idr_is_empty(&afu->contexts_idr)) {
257 rc = -EBUSY;
258 goto err;
259 }
260
261 if ((rc = cxl_ops->afu_reset(afu)))
262 goto err;
263
264 rc = count;
265err:
266 mutex_unlock(&afu->contexts_lock);
267 return rc;
268}
269
270static ssize_t irqs_min_show(struct device *device,
271 struct device_attribute *attr,
272 char *buf)
273{
274 struct cxl_afu *afu = to_cxl_afu(device);
275
276 return scnprintf(buf, PAGE_SIZE, "%i\n", afu->pp_irqs);
277}
278
279static ssize_t irqs_max_show(struct device *device,
280 struct device_attribute *attr,
281 char *buf)
282{
283 struct cxl_afu *afu = to_cxl_afu(device);
284
285 return scnprintf(buf, PAGE_SIZE, "%i\n", afu->irqs_max);
286}
287
288static ssize_t irqs_max_store(struct device *device,
289 struct device_attribute *attr,
290 const char *buf, size_t count)
291{
292 struct cxl_afu *afu = to_cxl_afu(device);
293 ssize_t ret;
294 int irqs_max;
295
296 ret = sscanf(buf, "%i", &irqs_max);
297 if (ret != 1)
298 return -EINVAL;
299
300 if (irqs_max < afu->pp_irqs)
301 return -EINVAL;
302
303 if (cpu_has_feature(CPU_FTR_HVMODE)) {
304 if (irqs_max > afu->adapter->user_irqs)
305 return -EINVAL;
306 } else {
307 /* pHyp sets a per-AFU limit */
308 if (irqs_max > afu->guest->max_ints)
309 return -EINVAL;
310 }
311
312 afu->irqs_max = irqs_max;
313 return count;
314}
315
316static ssize_t modes_supported_show(struct device *device,
317 struct device_attribute *attr, char *buf)
318{
319 struct cxl_afu *afu = to_cxl_afu(device);
320 char *p = buf, *end = buf + PAGE_SIZE;
321
322 if (afu->modes_supported & CXL_MODE_DEDICATED)
323 p += scnprintf(p, end - p, "dedicated_process\n");
324 if (afu->modes_supported & CXL_MODE_DIRECTED)
325 p += scnprintf(p, end - p, "afu_directed\n");
326 return (p - buf);
327}
328
329static ssize_t prefault_mode_show(struct device *device,
330 struct device_attribute *attr,
331 char *buf)
332{
333 struct cxl_afu *afu = to_cxl_afu(device);
334
335 switch (afu->prefault_mode) {
336 case CXL_PREFAULT_WED:
337 return scnprintf(buf, PAGE_SIZE, "work_element_descriptor\n");
338 case CXL_PREFAULT_ALL:
339 return scnprintf(buf, PAGE_SIZE, "all\n");
340 default:
341 return scnprintf(buf, PAGE_SIZE, "none\n");
342 }
343}
344
345static ssize_t prefault_mode_store(struct device *device,
346 struct device_attribute *attr,
347 const char *buf, size_t count)
348{
349 struct cxl_afu *afu = to_cxl_afu(device);
350 enum prefault_modes mode = -1;
351
352 if (!strncmp(buf, "none", 4))
353 mode = CXL_PREFAULT_NONE;
354 else {
355 if (!radix_enabled()) {
356
357 /* only allowed when not in radix mode */
358 if (!strncmp(buf, "work_element_descriptor", 23))
359 mode = CXL_PREFAULT_WED;
360 if (!strncmp(buf, "all", 3))
361 mode = CXL_PREFAULT_ALL;
362 } else {
363 dev_err(device, "Cannot prefault with radix enabled\n");
364 }
365 }
366
367 if (mode == -1)
368 return -EINVAL;
369
370 afu->prefault_mode = mode;
371 return count;
372}
373
374static ssize_t mode_show(struct device *device,
375 struct device_attribute *attr,
376 char *buf)
377{
378 struct cxl_afu *afu = to_cxl_afu(device);
379
380 if (afu->current_mode == CXL_MODE_DEDICATED)
381 return scnprintf(buf, PAGE_SIZE, "dedicated_process\n");
382 if (afu->current_mode == CXL_MODE_DIRECTED)
383 return scnprintf(buf, PAGE_SIZE, "afu_directed\n");
384 return scnprintf(buf, PAGE_SIZE, "none\n");
385}
386
387static ssize_t mode_store(struct device *device, struct device_attribute *attr,
388 const char *buf, size_t count)
389{
390 struct cxl_afu *afu = to_cxl_afu(device);
391 int old_mode, mode = -1;
392 int rc = -EBUSY;
393
394 /* can't change this if we have a user */
395 mutex_lock(&afu->contexts_lock);
396 if (!idr_is_empty(&afu->contexts_idr))
397 goto err;
398
399 if (!strncmp(buf, "dedicated_process", 17))
400 mode = CXL_MODE_DEDICATED;
401 if (!strncmp(buf, "afu_directed", 12))
402 mode = CXL_MODE_DIRECTED;
403 if (!strncmp(buf, "none", 4))
404 mode = 0;
405
406 if (mode == -1) {
407 rc = -EINVAL;
408 goto err;
409 }
410
411 /*
412 * afu_deactivate_mode needs to be done outside the lock, prevent
413 * other contexts coming in before we are ready:
414 */
415 old_mode = afu->current_mode;
416 afu->current_mode = 0;
417 afu->num_procs = 0;
418
419 mutex_unlock(&afu->contexts_lock);
420
421 if ((rc = cxl_ops->afu_deactivate_mode(afu, old_mode)))
422 return rc;
423 if ((rc = cxl_ops->afu_activate_mode(afu, mode)))
424 return rc;
425
426 return count;
427err:
428 mutex_unlock(&afu->contexts_lock);
429 return rc;
430}
431
432static ssize_t api_version_show(struct device *device,
433 struct device_attribute *attr,
434 char *buf)
435{
436 return scnprintf(buf, PAGE_SIZE, "%i\n", CXL_API_VERSION);
437}
438
439static ssize_t api_version_compatible_show(struct device *device,
440 struct device_attribute *attr,
441 char *buf)
442{
443 return scnprintf(buf, PAGE_SIZE, "%i\n", CXL_API_VERSION_COMPATIBLE);
444}
445
446static ssize_t afu_eb_read(struct file *filp, struct kobject *kobj,
447 struct bin_attribute *bin_attr, char *buf,
448 loff_t off, size_t count)
449{
450 struct cxl_afu *afu = to_cxl_afu(kobj_to_dev(kobj));
451
452 return cxl_ops->afu_read_err_buffer(afu, buf, off, count);
453}
454
455static struct device_attribute afu_attrs[] = {
456 __ATTR_RO(mmio_size),
457 __ATTR_RO(irqs_min),
458 __ATTR_RW(irqs_max),
459 __ATTR_RO(modes_supported),
460 __ATTR_RW(mode),
461 __ATTR_RW(prefault_mode),
462 __ATTR_RO(api_version),
463 __ATTR_RO(api_version_compatible),
464 __ATTR(reset, S_IWUSR, NULL, reset_store_afu),
465};
466
467int cxl_sysfs_adapter_add(struct cxl *adapter)
468{
469 struct device_attribute *dev_attr;
470 int i, rc;
471
472 for (i = 0; i < ARRAY_SIZE(adapter_attrs); i++) {
473 dev_attr = &adapter_attrs[i];
474 if (cxl_ops->support_attributes(dev_attr->attr.name,
475 CXL_ADAPTER_ATTRS)) {
476 if ((rc = device_create_file(&adapter->dev, dev_attr)))
477 goto err;
478 }
479 }
480 return 0;
481err:
482 for (i--; i >= 0; i--) {
483 dev_attr = &adapter_attrs[i];
484 if (cxl_ops->support_attributes(dev_attr->attr.name,
485 CXL_ADAPTER_ATTRS))
486 device_remove_file(&adapter->dev, dev_attr);
487 }
488 return rc;
489}
490
491void cxl_sysfs_adapter_remove(struct cxl *adapter)
492{
493 struct device_attribute *dev_attr;
494 int i;
495
496 for (i = 0; i < ARRAY_SIZE(adapter_attrs); i++) {
497 dev_attr = &adapter_attrs[i];
498 if (cxl_ops->support_attributes(dev_attr->attr.name,
499 CXL_ADAPTER_ATTRS))
500 device_remove_file(&adapter->dev, dev_attr);
501 }
502}
503
504struct afu_config_record {
505 struct kobject kobj;
506 struct bin_attribute config_attr;
507 struct list_head list;
508 int cr;
509 u16 device;
510 u16 vendor;
511 u32 class;
512};
513
514#define to_cr(obj) container_of(obj, struct afu_config_record, kobj)
515
516static ssize_t vendor_show(struct kobject *kobj,
517 struct kobj_attribute *attr, char *buf)
518{
519 struct afu_config_record *cr = to_cr(kobj);
520
521 return scnprintf(buf, PAGE_SIZE, "0x%.4x\n", cr->vendor);
522}
523
524static ssize_t device_show(struct kobject *kobj,
525 struct kobj_attribute *attr, char *buf)
526{
527 struct afu_config_record *cr = to_cr(kobj);
528
529 return scnprintf(buf, PAGE_SIZE, "0x%.4x\n", cr->device);
530}
531
532static ssize_t class_show(struct kobject *kobj,
533 struct kobj_attribute *attr, char *buf)
534{
535 struct afu_config_record *cr = to_cr(kobj);
536
537 return scnprintf(buf, PAGE_SIZE, "0x%.6x\n", cr->class);
538}
539
540static ssize_t afu_read_config(struct file *filp, struct kobject *kobj,
541 struct bin_attribute *bin_attr, char *buf,
542 loff_t off, size_t count)
543{
544 struct afu_config_record *cr = to_cr(kobj);
545 struct cxl_afu *afu = to_cxl_afu(kobj_to_dev(kobj->parent));
546
547 u64 i, j, val, rc;
548
549 for (i = 0; i < count;) {
550 rc = cxl_ops->afu_cr_read64(afu, cr->cr, off & ~0x7, &val);
551 if (rc)
552 val = ~0ULL;
553 for (j = off & 0x7; j < 8 && i < count; i++, j++, off++)
554 buf[i] = (val >> (j * 8)) & 0xff;
555 }
556
557 return count;
558}
559
560static struct kobj_attribute vendor_attribute =
561 __ATTR_RO(vendor);
562static struct kobj_attribute device_attribute =
563 __ATTR_RO(device);
564static struct kobj_attribute class_attribute =
565 __ATTR_RO(class);
566
567static struct attribute *afu_cr_attrs[] = {
568 &vendor_attribute.attr,
569 &device_attribute.attr,
570 &class_attribute.attr,
571 NULL,
572};
573ATTRIBUTE_GROUPS(afu_cr);
574
575static void release_afu_config_record(struct kobject *kobj)
576{
577 struct afu_config_record *cr = to_cr(kobj);
578
579 kfree(cr);
580}
581
582static const struct kobj_type afu_config_record_type = {
583 .sysfs_ops = &kobj_sysfs_ops,
584 .release = release_afu_config_record,
585 .default_groups = afu_cr_groups,
586};
587
588static struct afu_config_record *cxl_sysfs_afu_new_cr(struct cxl_afu *afu, int cr_idx)
589{
590 struct afu_config_record *cr;
591 int rc;
592
593 cr = kzalloc(sizeof(struct afu_config_record), GFP_KERNEL);
594 if (!cr)
595 return ERR_PTR(-ENOMEM);
596
597 cr->cr = cr_idx;
598
599 rc = cxl_ops->afu_cr_read16(afu, cr_idx, PCI_DEVICE_ID, &cr->device);
600 if (rc)
601 goto err;
602 rc = cxl_ops->afu_cr_read16(afu, cr_idx, PCI_VENDOR_ID, &cr->vendor);
603 if (rc)
604 goto err;
605 rc = cxl_ops->afu_cr_read32(afu, cr_idx, PCI_CLASS_REVISION, &cr->class);
606 if (rc)
607 goto err;
608 cr->class >>= 8;
609
610 /*
611 * Export raw AFU PCIe like config record. For now this is read only by
612 * root - we can expand that later to be readable by non-root and maybe
613 * even writable provided we have a good use-case. Once we support
614 * exposing AFUs through a virtual PHB they will get that for free from
615 * Linux' PCI infrastructure, but until then it's not clear that we
616 * need it for anything since the main use case is just identifying
617 * AFUs, which can be done via the vendor, device and class attributes.
618 */
619 sysfs_bin_attr_init(&cr->config_attr);
620 cr->config_attr.attr.name = "config";
621 cr->config_attr.attr.mode = S_IRUSR;
622 cr->config_attr.size = afu->crs_len;
623 cr->config_attr.read = afu_read_config;
624
625 rc = kobject_init_and_add(&cr->kobj, &afu_config_record_type,
626 &afu->dev.kobj, "cr%i", cr->cr);
627 if (rc)
628 goto err1;
629
630 rc = sysfs_create_bin_file(&cr->kobj, &cr->config_attr);
631 if (rc)
632 goto err1;
633
634 rc = kobject_uevent(&cr->kobj, KOBJ_ADD);
635 if (rc)
636 goto err2;
637
638 return cr;
639err2:
640 sysfs_remove_bin_file(&cr->kobj, &cr->config_attr);
641err1:
642 kobject_put(&cr->kobj);
643 return ERR_PTR(rc);
644err:
645 kfree(cr);
646 return ERR_PTR(rc);
647}
648
649void cxl_sysfs_afu_remove(struct cxl_afu *afu)
650{
651 struct device_attribute *dev_attr;
652 struct afu_config_record *cr, *tmp;
653 int i;
654
655 /* remove the err buffer bin attribute */
656 if (afu->eb_len)
657 device_remove_bin_file(&afu->dev, &afu->attr_eb);
658
659 for (i = 0; i < ARRAY_SIZE(afu_attrs); i++) {
660 dev_attr = &afu_attrs[i];
661 if (cxl_ops->support_attributes(dev_attr->attr.name,
662 CXL_AFU_ATTRS))
663 device_remove_file(&afu->dev, &afu_attrs[i]);
664 }
665
666 list_for_each_entry_safe(cr, tmp, &afu->crs, list) {
667 sysfs_remove_bin_file(&cr->kobj, &cr->config_attr);
668 kobject_put(&cr->kobj);
669 }
670}
671
672int cxl_sysfs_afu_add(struct cxl_afu *afu)
673{
674 struct device_attribute *dev_attr;
675 struct afu_config_record *cr;
676 int i, rc;
677
678 INIT_LIST_HEAD(&afu->crs);
679
680 for (i = 0; i < ARRAY_SIZE(afu_attrs); i++) {
681 dev_attr = &afu_attrs[i];
682 if (cxl_ops->support_attributes(dev_attr->attr.name,
683 CXL_AFU_ATTRS)) {
684 if ((rc = device_create_file(&afu->dev, &afu_attrs[i])))
685 goto err;
686 }
687 }
688
689 /* conditionally create the add the binary file for error info buffer */
690 if (afu->eb_len) {
691 sysfs_attr_init(&afu->attr_eb.attr);
692
693 afu->attr_eb.attr.name = "afu_err_buff";
694 afu->attr_eb.attr.mode = S_IRUGO;
695 afu->attr_eb.size = afu->eb_len;
696 afu->attr_eb.read = afu_eb_read;
697
698 rc = device_create_bin_file(&afu->dev, &afu->attr_eb);
699 if (rc) {
700 dev_err(&afu->dev,
701 "Unable to create eb attr for the afu. Err(%d)\n",
702 rc);
703 goto err;
704 }
705 }
706
707 for (i = 0; i < afu->crs_num; i++) {
708 cr = cxl_sysfs_afu_new_cr(afu, i);
709 if (IS_ERR(cr)) {
710 rc = PTR_ERR(cr);
711 goto err1;
712 }
713 list_add(&cr->list, &afu->crs);
714 }
715
716 return 0;
717
718err1:
719 cxl_sysfs_afu_remove(afu);
720 return rc;
721err:
722 /* reset the eb_len as we havent created the bin attr */
723 afu->eb_len = 0;
724
725 for (i--; i >= 0; i--) {
726 dev_attr = &afu_attrs[i];
727 if (cxl_ops->support_attributes(dev_attr->attr.name,
728 CXL_AFU_ATTRS))
729 device_remove_file(&afu->dev, &afu_attrs[i]);
730 }
731 return rc;
732}
733
734int cxl_sysfs_afu_m_add(struct cxl_afu *afu)
735{
736 struct device_attribute *dev_attr;
737 int i, rc;
738
739 for (i = 0; i < ARRAY_SIZE(afu_master_attrs); i++) {
740 dev_attr = &afu_master_attrs[i];
741 if (cxl_ops->support_attributes(dev_attr->attr.name,
742 CXL_AFU_MASTER_ATTRS)) {
743 if ((rc = device_create_file(afu->chardev_m, &afu_master_attrs[i])))
744 goto err;
745 }
746 }
747
748 return 0;
749
750err:
751 for (i--; i >= 0; i--) {
752 dev_attr = &afu_master_attrs[i];
753 if (cxl_ops->support_attributes(dev_attr->attr.name,
754 CXL_AFU_MASTER_ATTRS))
755 device_remove_file(afu->chardev_m, &afu_master_attrs[i]);
756 }
757 return rc;
758}
759
760void cxl_sysfs_afu_m_remove(struct cxl_afu *afu)
761{
762 struct device_attribute *dev_attr;
763 int i;
764
765 for (i = 0; i < ARRAY_SIZE(afu_master_attrs); i++) {
766 dev_attr = &afu_master_attrs[i];
767 if (cxl_ops->support_attributes(dev_attr->attr.name,
768 CXL_AFU_MASTER_ATTRS))
769 device_remove_file(afu->chardev_m, &afu_master_attrs[i]);
770 }
771}
1/*
2 * Copyright 2014 IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/kernel.h>
11#include <linux/device.h>
12#include <linux/sysfs.h>
13#include <linux/pci_regs.h>
14
15#include "cxl.h"
16
17#define to_afu_chardev_m(d) dev_get_drvdata(d)
18
19/********* Adapter attributes **********************************************/
20
21static ssize_t caia_version_show(struct device *device,
22 struct device_attribute *attr,
23 char *buf)
24{
25 struct cxl *adapter = to_cxl_adapter(device);
26
27 return scnprintf(buf, PAGE_SIZE, "%i.%i\n", adapter->caia_major,
28 adapter->caia_minor);
29}
30
31static ssize_t psl_revision_show(struct device *device,
32 struct device_attribute *attr,
33 char *buf)
34{
35 struct cxl *adapter = to_cxl_adapter(device);
36
37 return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->psl_rev);
38}
39
40static ssize_t base_image_show(struct device *device,
41 struct device_attribute *attr,
42 char *buf)
43{
44 struct cxl *adapter = to_cxl_adapter(device);
45
46 return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->base_image);
47}
48
49static ssize_t image_loaded_show(struct device *device,
50 struct device_attribute *attr,
51 char *buf)
52{
53 struct cxl *adapter = to_cxl_adapter(device);
54
55 if (adapter->user_image_loaded)
56 return scnprintf(buf, PAGE_SIZE, "user\n");
57 return scnprintf(buf, PAGE_SIZE, "factory\n");
58}
59
60static ssize_t reset_adapter_store(struct device *device,
61 struct device_attribute *attr,
62 const char *buf, size_t count)
63{
64 struct cxl *adapter = to_cxl_adapter(device);
65 int rc;
66 int val;
67
68 rc = sscanf(buf, "%i", &val);
69 if ((rc != 1) || (val != 1))
70 return -EINVAL;
71
72 if ((rc = cxl_ops->adapter_reset(adapter)))
73 return rc;
74 return count;
75}
76
77static ssize_t load_image_on_perst_show(struct device *device,
78 struct device_attribute *attr,
79 char *buf)
80{
81 struct cxl *adapter = to_cxl_adapter(device);
82
83 if (!adapter->perst_loads_image)
84 return scnprintf(buf, PAGE_SIZE, "none\n");
85
86 if (adapter->perst_select_user)
87 return scnprintf(buf, PAGE_SIZE, "user\n");
88 return scnprintf(buf, PAGE_SIZE, "factory\n");
89}
90
91static ssize_t load_image_on_perst_store(struct device *device,
92 struct device_attribute *attr,
93 const char *buf, size_t count)
94{
95 struct cxl *adapter = to_cxl_adapter(device);
96 int rc;
97
98 if (!strncmp(buf, "none", 4))
99 adapter->perst_loads_image = false;
100 else if (!strncmp(buf, "user", 4)) {
101 adapter->perst_select_user = true;
102 adapter->perst_loads_image = true;
103 } else if (!strncmp(buf, "factory", 7)) {
104 adapter->perst_select_user = false;
105 adapter->perst_loads_image = true;
106 } else
107 return -EINVAL;
108
109 if ((rc = cxl_update_image_control(adapter)))
110 return rc;
111
112 return count;
113}
114
115static ssize_t perst_reloads_same_image_show(struct device *device,
116 struct device_attribute *attr,
117 char *buf)
118{
119 struct cxl *adapter = to_cxl_adapter(device);
120
121 return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->perst_same_image);
122}
123
124static ssize_t perst_reloads_same_image_store(struct device *device,
125 struct device_attribute *attr,
126 const char *buf, size_t count)
127{
128 struct cxl *adapter = to_cxl_adapter(device);
129 int rc;
130 int val;
131
132 rc = sscanf(buf, "%i", &val);
133 if ((rc != 1) || !(val == 1 || val == 0))
134 return -EINVAL;
135
136 adapter->perst_same_image = (val == 1 ? true : false);
137 return count;
138}
139
140static struct device_attribute adapter_attrs[] = {
141 __ATTR_RO(caia_version),
142 __ATTR_RO(psl_revision),
143 __ATTR_RO(base_image),
144 __ATTR_RO(image_loaded),
145 __ATTR_RW(load_image_on_perst),
146 __ATTR_RW(perst_reloads_same_image),
147 __ATTR(reset, S_IWUSR, NULL, reset_adapter_store),
148};
149
150
151/********* AFU master specific attributes **********************************/
152
153static ssize_t mmio_size_show_master(struct device *device,
154 struct device_attribute *attr,
155 char *buf)
156{
157 struct cxl_afu *afu = to_afu_chardev_m(device);
158
159 return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->adapter->ps_size);
160}
161
162static ssize_t pp_mmio_off_show(struct device *device,
163 struct device_attribute *attr,
164 char *buf)
165{
166 struct cxl_afu *afu = to_afu_chardev_m(device);
167
168 return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->native->pp_offset);
169}
170
171static ssize_t pp_mmio_len_show(struct device *device,
172 struct device_attribute *attr,
173 char *buf)
174{
175 struct cxl_afu *afu = to_afu_chardev_m(device);
176
177 return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->pp_size);
178}
179
180static struct device_attribute afu_master_attrs[] = {
181 __ATTR(mmio_size, S_IRUGO, mmio_size_show_master, NULL),
182 __ATTR_RO(pp_mmio_off),
183 __ATTR_RO(pp_mmio_len),
184};
185
186
187/********* AFU attributes **************************************************/
188
189static ssize_t mmio_size_show(struct device *device,
190 struct device_attribute *attr,
191 char *buf)
192{
193 struct cxl_afu *afu = to_cxl_afu(device);
194
195 if (afu->pp_size)
196 return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->pp_size);
197 return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->adapter->ps_size);
198}
199
200static ssize_t reset_store_afu(struct device *device,
201 struct device_attribute *attr,
202 const char *buf, size_t count)
203{
204 struct cxl_afu *afu = to_cxl_afu(device);
205 int rc;
206
207 /* Not safe to reset if it is currently in use */
208 mutex_lock(&afu->contexts_lock);
209 if (!idr_is_empty(&afu->contexts_idr)) {
210 rc = -EBUSY;
211 goto err;
212 }
213
214 if ((rc = cxl_ops->afu_reset(afu)))
215 goto err;
216
217 rc = count;
218err:
219 mutex_unlock(&afu->contexts_lock);
220 return rc;
221}
222
223static ssize_t irqs_min_show(struct device *device,
224 struct device_attribute *attr,
225 char *buf)
226{
227 struct cxl_afu *afu = to_cxl_afu(device);
228
229 return scnprintf(buf, PAGE_SIZE, "%i\n", afu->pp_irqs);
230}
231
232static ssize_t irqs_max_show(struct device *device,
233 struct device_attribute *attr,
234 char *buf)
235{
236 struct cxl_afu *afu = to_cxl_afu(device);
237
238 return scnprintf(buf, PAGE_SIZE, "%i\n", afu->irqs_max);
239}
240
241static ssize_t irqs_max_store(struct device *device,
242 struct device_attribute *attr,
243 const char *buf, size_t count)
244{
245 struct cxl_afu *afu = to_cxl_afu(device);
246 ssize_t ret;
247 int irqs_max;
248
249 ret = sscanf(buf, "%i", &irqs_max);
250 if (ret != 1)
251 return -EINVAL;
252
253 if (irqs_max < afu->pp_irqs)
254 return -EINVAL;
255
256 if (cpu_has_feature(CPU_FTR_HVMODE)) {
257 if (irqs_max > afu->adapter->user_irqs)
258 return -EINVAL;
259 } else {
260 /* pHyp sets a per-AFU limit */
261 if (irqs_max > afu->guest->max_ints)
262 return -EINVAL;
263 }
264
265 afu->irqs_max = irqs_max;
266 return count;
267}
268
269static ssize_t modes_supported_show(struct device *device,
270 struct device_attribute *attr, char *buf)
271{
272 struct cxl_afu *afu = to_cxl_afu(device);
273 char *p = buf, *end = buf + PAGE_SIZE;
274
275 if (afu->modes_supported & CXL_MODE_DEDICATED)
276 p += scnprintf(p, end - p, "dedicated_process\n");
277 if (afu->modes_supported & CXL_MODE_DIRECTED)
278 p += scnprintf(p, end - p, "afu_directed\n");
279 return (p - buf);
280}
281
282static ssize_t prefault_mode_show(struct device *device,
283 struct device_attribute *attr,
284 char *buf)
285{
286 struct cxl_afu *afu = to_cxl_afu(device);
287
288 switch (afu->prefault_mode) {
289 case CXL_PREFAULT_WED:
290 return scnprintf(buf, PAGE_SIZE, "work_element_descriptor\n");
291 case CXL_PREFAULT_ALL:
292 return scnprintf(buf, PAGE_SIZE, "all\n");
293 default:
294 return scnprintf(buf, PAGE_SIZE, "none\n");
295 }
296}
297
298static ssize_t prefault_mode_store(struct device *device,
299 struct device_attribute *attr,
300 const char *buf, size_t count)
301{
302 struct cxl_afu *afu = to_cxl_afu(device);
303 enum prefault_modes mode = -1;
304
305 if (!strncmp(buf, "work_element_descriptor", 23))
306 mode = CXL_PREFAULT_WED;
307 if (!strncmp(buf, "all", 3))
308 mode = CXL_PREFAULT_ALL;
309 if (!strncmp(buf, "none", 4))
310 mode = CXL_PREFAULT_NONE;
311
312 if (mode == -1)
313 return -EINVAL;
314
315 afu->prefault_mode = mode;
316 return count;
317}
318
319static ssize_t mode_show(struct device *device,
320 struct device_attribute *attr,
321 char *buf)
322{
323 struct cxl_afu *afu = to_cxl_afu(device);
324
325 if (afu->current_mode == CXL_MODE_DEDICATED)
326 return scnprintf(buf, PAGE_SIZE, "dedicated_process\n");
327 if (afu->current_mode == CXL_MODE_DIRECTED)
328 return scnprintf(buf, PAGE_SIZE, "afu_directed\n");
329 return scnprintf(buf, PAGE_SIZE, "none\n");
330}
331
332static ssize_t mode_store(struct device *device, struct device_attribute *attr,
333 const char *buf, size_t count)
334{
335 struct cxl_afu *afu = to_cxl_afu(device);
336 int old_mode, mode = -1;
337 int rc = -EBUSY;
338
339 /* can't change this if we have a user */
340 mutex_lock(&afu->contexts_lock);
341 if (!idr_is_empty(&afu->contexts_idr))
342 goto err;
343
344 if (!strncmp(buf, "dedicated_process", 17))
345 mode = CXL_MODE_DEDICATED;
346 if (!strncmp(buf, "afu_directed", 12))
347 mode = CXL_MODE_DIRECTED;
348 if (!strncmp(buf, "none", 4))
349 mode = 0;
350
351 if (mode == -1) {
352 rc = -EINVAL;
353 goto err;
354 }
355
356 /*
357 * afu_deactivate_mode needs to be done outside the lock, prevent
358 * other contexts coming in before we are ready:
359 */
360 old_mode = afu->current_mode;
361 afu->current_mode = 0;
362 afu->num_procs = 0;
363
364 mutex_unlock(&afu->contexts_lock);
365
366 if ((rc = cxl_ops->afu_deactivate_mode(afu, old_mode)))
367 return rc;
368 if ((rc = cxl_ops->afu_activate_mode(afu, mode)))
369 return rc;
370
371 return count;
372err:
373 mutex_unlock(&afu->contexts_lock);
374 return rc;
375}
376
377static ssize_t api_version_show(struct device *device,
378 struct device_attribute *attr,
379 char *buf)
380{
381 return scnprintf(buf, PAGE_SIZE, "%i\n", CXL_API_VERSION);
382}
383
384static ssize_t api_version_compatible_show(struct device *device,
385 struct device_attribute *attr,
386 char *buf)
387{
388 return scnprintf(buf, PAGE_SIZE, "%i\n", CXL_API_VERSION_COMPATIBLE);
389}
390
391static ssize_t afu_eb_read(struct file *filp, struct kobject *kobj,
392 struct bin_attribute *bin_attr, char *buf,
393 loff_t off, size_t count)
394{
395 struct cxl_afu *afu = to_cxl_afu(kobj_to_dev(kobj));
396
397 return cxl_ops->afu_read_err_buffer(afu, buf, off, count);
398}
399
400static struct device_attribute afu_attrs[] = {
401 __ATTR_RO(mmio_size),
402 __ATTR_RO(irqs_min),
403 __ATTR_RW(irqs_max),
404 __ATTR_RO(modes_supported),
405 __ATTR_RW(mode),
406 __ATTR_RW(prefault_mode),
407 __ATTR_RO(api_version),
408 __ATTR_RO(api_version_compatible),
409 __ATTR(reset, S_IWUSR, NULL, reset_store_afu),
410};
411
412int cxl_sysfs_adapter_add(struct cxl *adapter)
413{
414 struct device_attribute *dev_attr;
415 int i, rc;
416
417 for (i = 0; i < ARRAY_SIZE(adapter_attrs); i++) {
418 dev_attr = &adapter_attrs[i];
419 if (cxl_ops->support_attributes(dev_attr->attr.name,
420 CXL_ADAPTER_ATTRS)) {
421 if ((rc = device_create_file(&adapter->dev, dev_attr)))
422 goto err;
423 }
424 }
425 return 0;
426err:
427 for (i--; i >= 0; i--) {
428 dev_attr = &adapter_attrs[i];
429 if (cxl_ops->support_attributes(dev_attr->attr.name,
430 CXL_ADAPTER_ATTRS))
431 device_remove_file(&adapter->dev, dev_attr);
432 }
433 return rc;
434}
435
436void cxl_sysfs_adapter_remove(struct cxl *adapter)
437{
438 struct device_attribute *dev_attr;
439 int i;
440
441 for (i = 0; i < ARRAY_SIZE(adapter_attrs); i++) {
442 dev_attr = &adapter_attrs[i];
443 if (cxl_ops->support_attributes(dev_attr->attr.name,
444 CXL_ADAPTER_ATTRS))
445 device_remove_file(&adapter->dev, dev_attr);
446 }
447}
448
449struct afu_config_record {
450 struct kobject kobj;
451 struct bin_attribute config_attr;
452 struct list_head list;
453 int cr;
454 u16 device;
455 u16 vendor;
456 u32 class;
457};
458
459#define to_cr(obj) container_of(obj, struct afu_config_record, kobj)
460
461static ssize_t vendor_show(struct kobject *kobj,
462 struct kobj_attribute *attr, char *buf)
463{
464 struct afu_config_record *cr = to_cr(kobj);
465
466 return scnprintf(buf, PAGE_SIZE, "0x%.4x\n", cr->vendor);
467}
468
469static ssize_t device_show(struct kobject *kobj,
470 struct kobj_attribute *attr, char *buf)
471{
472 struct afu_config_record *cr = to_cr(kobj);
473
474 return scnprintf(buf, PAGE_SIZE, "0x%.4x\n", cr->device);
475}
476
477static ssize_t class_show(struct kobject *kobj,
478 struct kobj_attribute *attr, char *buf)
479{
480 struct afu_config_record *cr = to_cr(kobj);
481
482 return scnprintf(buf, PAGE_SIZE, "0x%.6x\n", cr->class);
483}
484
485static ssize_t afu_read_config(struct file *filp, struct kobject *kobj,
486 struct bin_attribute *bin_attr, char *buf,
487 loff_t off, size_t count)
488{
489 struct afu_config_record *cr = to_cr(kobj);
490 struct cxl_afu *afu = to_cxl_afu(kobj_to_dev(kobj->parent));
491
492 u64 i, j, val, rc;
493
494 for (i = 0; i < count;) {
495 rc = cxl_ops->afu_cr_read64(afu, cr->cr, off & ~0x7, &val);
496 if (rc)
497 val = ~0ULL;
498 for (j = off & 0x7; j < 8 && i < count; i++, j++, off++)
499 buf[i] = (val >> (j * 8)) & 0xff;
500 }
501
502 return count;
503}
504
505static struct kobj_attribute vendor_attribute =
506 __ATTR_RO(vendor);
507static struct kobj_attribute device_attribute =
508 __ATTR_RO(device);
509static struct kobj_attribute class_attribute =
510 __ATTR_RO(class);
511
512static struct attribute *afu_cr_attrs[] = {
513 &vendor_attribute.attr,
514 &device_attribute.attr,
515 &class_attribute.attr,
516 NULL,
517};
518
519static void release_afu_config_record(struct kobject *kobj)
520{
521 struct afu_config_record *cr = to_cr(kobj);
522
523 kfree(cr);
524}
525
526static struct kobj_type afu_config_record_type = {
527 .sysfs_ops = &kobj_sysfs_ops,
528 .release = release_afu_config_record,
529 .default_attrs = afu_cr_attrs,
530};
531
532static struct afu_config_record *cxl_sysfs_afu_new_cr(struct cxl_afu *afu, int cr_idx)
533{
534 struct afu_config_record *cr;
535 int rc;
536
537 cr = kzalloc(sizeof(struct afu_config_record), GFP_KERNEL);
538 if (!cr)
539 return ERR_PTR(-ENOMEM);
540
541 cr->cr = cr_idx;
542
543 rc = cxl_ops->afu_cr_read16(afu, cr_idx, PCI_DEVICE_ID, &cr->device);
544 if (rc)
545 goto err;
546 rc = cxl_ops->afu_cr_read16(afu, cr_idx, PCI_VENDOR_ID, &cr->vendor);
547 if (rc)
548 goto err;
549 rc = cxl_ops->afu_cr_read32(afu, cr_idx, PCI_CLASS_REVISION, &cr->class);
550 if (rc)
551 goto err;
552 cr->class >>= 8;
553
554 /*
555 * Export raw AFU PCIe like config record. For now this is read only by
556 * root - we can expand that later to be readable by non-root and maybe
557 * even writable provided we have a good use-case. Once we support
558 * exposing AFUs through a virtual PHB they will get that for free from
559 * Linux' PCI infrastructure, but until then it's not clear that we
560 * need it for anything since the main use case is just identifying
561 * AFUs, which can be done via the vendor, device and class attributes.
562 */
563 sysfs_bin_attr_init(&cr->config_attr);
564 cr->config_attr.attr.name = "config";
565 cr->config_attr.attr.mode = S_IRUSR;
566 cr->config_attr.size = afu->crs_len;
567 cr->config_attr.read = afu_read_config;
568
569 rc = kobject_init_and_add(&cr->kobj, &afu_config_record_type,
570 &afu->dev.kobj, "cr%i", cr->cr);
571 if (rc)
572 goto err;
573
574 rc = sysfs_create_bin_file(&cr->kobj, &cr->config_attr);
575 if (rc)
576 goto err1;
577
578 rc = kobject_uevent(&cr->kobj, KOBJ_ADD);
579 if (rc)
580 goto err2;
581
582 return cr;
583err2:
584 sysfs_remove_bin_file(&cr->kobj, &cr->config_attr);
585err1:
586 kobject_put(&cr->kobj);
587 return ERR_PTR(rc);
588err:
589 kfree(cr);
590 return ERR_PTR(rc);
591}
592
593void cxl_sysfs_afu_remove(struct cxl_afu *afu)
594{
595 struct device_attribute *dev_attr;
596 struct afu_config_record *cr, *tmp;
597 int i;
598
599 /* remove the err buffer bin attribute */
600 if (afu->eb_len)
601 device_remove_bin_file(&afu->dev, &afu->attr_eb);
602
603 for (i = 0; i < ARRAY_SIZE(afu_attrs); i++) {
604 dev_attr = &afu_attrs[i];
605 if (cxl_ops->support_attributes(dev_attr->attr.name,
606 CXL_AFU_ATTRS))
607 device_remove_file(&afu->dev, &afu_attrs[i]);
608 }
609
610 list_for_each_entry_safe(cr, tmp, &afu->crs, list) {
611 sysfs_remove_bin_file(&cr->kobj, &cr->config_attr);
612 kobject_put(&cr->kobj);
613 }
614}
615
616int cxl_sysfs_afu_add(struct cxl_afu *afu)
617{
618 struct device_attribute *dev_attr;
619 struct afu_config_record *cr;
620 int i, rc;
621
622 INIT_LIST_HEAD(&afu->crs);
623
624 for (i = 0; i < ARRAY_SIZE(afu_attrs); i++) {
625 dev_attr = &afu_attrs[i];
626 if (cxl_ops->support_attributes(dev_attr->attr.name,
627 CXL_AFU_ATTRS)) {
628 if ((rc = device_create_file(&afu->dev, &afu_attrs[i])))
629 goto err;
630 }
631 }
632
633 /* conditionally create the add the binary file for error info buffer */
634 if (afu->eb_len) {
635 sysfs_attr_init(&afu->attr_eb.attr);
636
637 afu->attr_eb.attr.name = "afu_err_buff";
638 afu->attr_eb.attr.mode = S_IRUGO;
639 afu->attr_eb.size = afu->eb_len;
640 afu->attr_eb.read = afu_eb_read;
641
642 rc = device_create_bin_file(&afu->dev, &afu->attr_eb);
643 if (rc) {
644 dev_err(&afu->dev,
645 "Unable to create eb attr for the afu. Err(%d)\n",
646 rc);
647 goto err;
648 }
649 }
650
651 for (i = 0; i < afu->crs_num; i++) {
652 cr = cxl_sysfs_afu_new_cr(afu, i);
653 if (IS_ERR(cr)) {
654 rc = PTR_ERR(cr);
655 goto err1;
656 }
657 list_add(&cr->list, &afu->crs);
658 }
659
660 return 0;
661
662err1:
663 cxl_sysfs_afu_remove(afu);
664 return rc;
665err:
666 /* reset the eb_len as we havent created the bin attr */
667 afu->eb_len = 0;
668
669 for (i--; i >= 0; i--) {
670 dev_attr = &afu_attrs[i];
671 if (cxl_ops->support_attributes(dev_attr->attr.name,
672 CXL_AFU_ATTRS))
673 device_remove_file(&afu->dev, &afu_attrs[i]);
674 }
675 return rc;
676}
677
678int cxl_sysfs_afu_m_add(struct cxl_afu *afu)
679{
680 struct device_attribute *dev_attr;
681 int i, rc;
682
683 for (i = 0; i < ARRAY_SIZE(afu_master_attrs); i++) {
684 dev_attr = &afu_master_attrs[i];
685 if (cxl_ops->support_attributes(dev_attr->attr.name,
686 CXL_AFU_MASTER_ATTRS)) {
687 if ((rc = device_create_file(afu->chardev_m, &afu_master_attrs[i])))
688 goto err;
689 }
690 }
691
692 return 0;
693
694err:
695 for (i--; i >= 0; i--) {
696 dev_attr = &afu_master_attrs[i];
697 if (cxl_ops->support_attributes(dev_attr->attr.name,
698 CXL_AFU_MASTER_ATTRS))
699 device_remove_file(afu->chardev_m, &afu_master_attrs[i]);
700 }
701 return rc;
702}
703
704void cxl_sysfs_afu_m_remove(struct cxl_afu *afu)
705{
706 struct device_attribute *dev_attr;
707 int i;
708
709 for (i = 0; i < ARRAY_SIZE(afu_master_attrs); i++) {
710 dev_attr = &afu_master_attrs[i];
711 if (cxl_ops->support_attributes(dev_attr->attr.name,
712 CXL_AFU_MASTER_ATTRS))
713 device_remove_file(afu->chardev_m, &afu_master_attrs[i]);
714 }
715}