Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
3#include <linux/device.h>
4#include <linux/module.h>
5#include <linux/slab.h>
6
7#include "cxlmem.h"
8#include "cxlpci.h"
9
10/**
11 * DOC: cxl port
12 *
13 * The port driver enumerates dport via PCI and scans for HDM
14 * (Host-managed-Device-Memory) decoder resources via the
15 * @component_reg_phys value passed in by the agent that registered the
16 * port. All descendant ports of a CXL root port (described by platform
17 * firmware) are managed in this drivers context. Each driver instance
18 * is responsible for tearing down the driver context of immediate
19 * descendant ports. The locking for this is validated by
20 * CONFIG_PROVE_CXL_LOCKING.
21 *
22 * The primary service this driver provides is presenting APIs to other
23 * drivers to utilize the decoders, and indicating to userspace (via bind
24 * status) the connectivity of the CXL.mem protocol throughout the
25 * PCIe topology.
26 */
27
28static void schedule_detach(void *cxlmd)
29{
30 schedule_cxl_memdev_detach(cxlmd);
31}
32
33static int cxl_port_probe(struct device *dev)
34{
35 struct cxl_port *port = to_cxl_port(dev);
36 struct cxl_hdm *cxlhdm;
37 int rc;
38
39
40 if (!is_cxl_endpoint(port)) {
41 rc = devm_cxl_port_enumerate_dports(port);
42 if (rc < 0)
43 return rc;
44 if (rc == 1)
45 return devm_cxl_add_passthrough_decoder(port);
46 }
47
48 cxlhdm = devm_cxl_setup_hdm(port);
49 if (IS_ERR(cxlhdm))
50 return PTR_ERR(cxlhdm);
51
52 if (is_cxl_endpoint(port)) {
53 struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport);
54 struct cxl_dev_state *cxlds = cxlmd->cxlds;
55
56 /* Cache the data early to ensure is_visible() works */
57 read_cdat_data(port);
58
59 get_device(&cxlmd->dev);
60 rc = devm_add_action_or_reset(dev, schedule_detach, cxlmd);
61 if (rc)
62 return rc;
63
64 rc = cxl_hdm_decode_init(cxlds, cxlhdm);
65 if (rc)
66 return rc;
67
68 rc = cxl_await_media_ready(cxlds);
69 if (rc) {
70 dev_err(dev, "Media not active (%d)\n", rc);
71 return rc;
72 }
73 }
74
75 rc = devm_cxl_enumerate_decoders(cxlhdm);
76 if (rc) {
77 dev_err(dev, "Couldn't enumerate decoders (%d)\n", rc);
78 return rc;
79 }
80
81 return 0;
82}
83
84static ssize_t CDAT_read(struct file *filp, struct kobject *kobj,
85 struct bin_attribute *bin_attr, char *buf,
86 loff_t offset, size_t count)
87{
88 struct device *dev = kobj_to_dev(kobj);
89 struct cxl_port *port = to_cxl_port(dev);
90
91 if (!port->cdat_available)
92 return -ENXIO;
93
94 if (!port->cdat.table)
95 return 0;
96
97 return memory_read_from_buffer(buf, count, &offset,
98 port->cdat.table,
99 port->cdat.length);
100}
101
102static BIN_ATTR_ADMIN_RO(CDAT, 0);
103
104static umode_t cxl_port_bin_attr_is_visible(struct kobject *kobj,
105 struct bin_attribute *attr, int i)
106{
107 struct device *dev = kobj_to_dev(kobj);
108 struct cxl_port *port = to_cxl_port(dev);
109
110 if ((attr == &bin_attr_CDAT) && port->cdat_available)
111 return attr->attr.mode;
112
113 return 0;
114}
115
116static struct bin_attribute *cxl_cdat_bin_attributes[] = {
117 &bin_attr_CDAT,
118 NULL,
119};
120
121static struct attribute_group cxl_cdat_attribute_group = {
122 .bin_attrs = cxl_cdat_bin_attributes,
123 .is_bin_visible = cxl_port_bin_attr_is_visible,
124};
125
126static const struct attribute_group *cxl_port_attribute_groups[] = {
127 &cxl_cdat_attribute_group,
128 NULL,
129};
130
131static struct cxl_driver cxl_port_driver = {
132 .name = "cxl_port",
133 .probe = cxl_port_probe,
134 .id = CXL_DEVICE_PORT,
135 .drv = {
136 .dev_groups = cxl_port_attribute_groups,
137 },
138};
139
140module_cxl_driver(cxl_port_driver);
141MODULE_LICENSE("GPL v2");
142MODULE_IMPORT_NS(CXL);
143MODULE_ALIAS_CXL(CXL_DEVICE_PORT);
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
3#include <linux/device.h>
4#include <linux/module.h>
5#include <linux/slab.h>
6
7#include "cxlmem.h"
8#include "cxlpci.h"
9
10/**
11 * DOC: cxl port
12 *
13 * The port driver enumerates dport via PCI and scans for HDM
14 * (Host-managed-Device-Memory) decoder resources via the
15 * @component_reg_phys value passed in by the agent that registered the
16 * port. All descendant ports of a CXL root port (described by platform
17 * firmware) are managed in this drivers context. Each driver instance
18 * is responsible for tearing down the driver context of immediate
19 * descendant ports. The locking for this is validated by
20 * CONFIG_PROVE_CXL_LOCKING.
21 *
22 * The primary service this driver provides is presenting APIs to other
23 * drivers to utilize the decoders, and indicating to userspace (via bind
24 * status) the connectivity of the CXL.mem protocol throughout the
25 * PCIe topology.
26 */
27
28static void schedule_detach(void *cxlmd)
29{
30 schedule_cxl_memdev_detach(cxlmd);
31}
32
33static int discover_region(struct device *dev, void *root)
34{
35 struct cxl_endpoint_decoder *cxled;
36 int rc;
37
38 if (!is_endpoint_decoder(dev))
39 return 0;
40
41 cxled = to_cxl_endpoint_decoder(dev);
42 if ((cxled->cxld.flags & CXL_DECODER_F_ENABLE) == 0)
43 return 0;
44
45 if (cxled->state != CXL_DECODER_STATE_AUTO)
46 return 0;
47
48 /*
49 * Region enumeration is opportunistic, if this add-event fails,
50 * continue to the next endpoint decoder.
51 */
52 rc = cxl_add_to_region(root, cxled);
53 if (rc)
54 dev_dbg(dev, "failed to add to region: %#llx-%#llx\n",
55 cxled->cxld.hpa_range.start, cxled->cxld.hpa_range.end);
56
57 return 0;
58}
59
60static int cxl_switch_port_probe(struct cxl_port *port)
61{
62 struct cxl_hdm *cxlhdm;
63 int rc;
64
65 /* Cache the data early to ensure is_visible() works */
66 read_cdat_data(port);
67
68 rc = devm_cxl_port_enumerate_dports(port);
69 if (rc < 0)
70 return rc;
71
72 cxl_switch_parse_cdat(port);
73
74 cxlhdm = devm_cxl_setup_hdm(port, NULL);
75 if (!IS_ERR(cxlhdm))
76 return devm_cxl_enumerate_decoders(cxlhdm, NULL);
77
78 if (PTR_ERR(cxlhdm) != -ENODEV) {
79 dev_err(&port->dev, "Failed to map HDM decoder capability\n");
80 return PTR_ERR(cxlhdm);
81 }
82
83 if (rc == 1) {
84 dev_dbg(&port->dev, "Fallback to passthrough decoder\n");
85 return devm_cxl_add_passthrough_decoder(port);
86 }
87
88 dev_err(&port->dev, "HDM decoder capability not found\n");
89 return -ENXIO;
90}
91
92static int cxl_endpoint_port_probe(struct cxl_port *port)
93{
94 struct cxl_endpoint_dvsec_info info = { .port = port };
95 struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
96 struct cxl_dev_state *cxlds = cxlmd->cxlds;
97 struct cxl_hdm *cxlhdm;
98 struct cxl_port *root;
99 int rc;
100
101 rc = cxl_dvsec_rr_decode(cxlds->dev, port, &info);
102 if (rc < 0)
103 return rc;
104
105 cxlhdm = devm_cxl_setup_hdm(port, &info);
106 if (IS_ERR(cxlhdm)) {
107 if (PTR_ERR(cxlhdm) == -ENODEV)
108 dev_err(&port->dev, "HDM decoder registers not found\n");
109 return PTR_ERR(cxlhdm);
110 }
111
112 /* Cache the data early to ensure is_visible() works */
113 read_cdat_data(port);
114 cxl_endpoint_parse_cdat(port);
115
116 get_device(&cxlmd->dev);
117 rc = devm_add_action_or_reset(&port->dev, schedule_detach, cxlmd);
118 if (rc)
119 return rc;
120
121 rc = cxl_hdm_decode_init(cxlds, cxlhdm, &info);
122 if (rc)
123 return rc;
124
125 rc = devm_cxl_enumerate_decoders(cxlhdm, &info);
126 if (rc)
127 return rc;
128
129 /*
130 * This can't fail in practice as CXL root exit unregisters all
131 * descendant ports and that in turn synchronizes with cxl_port_probe()
132 */
133 struct cxl_root *cxl_root __free(put_cxl_root) = find_cxl_root(port);
134
135 root = &cxl_root->port;
136
137 /*
138 * Now that all endpoint decoders are successfully enumerated, try to
139 * assemble regions from committed decoders
140 */
141 device_for_each_child(&port->dev, root, discover_region);
142
143 return 0;
144}
145
146static int cxl_port_probe(struct device *dev)
147{
148 struct cxl_port *port = to_cxl_port(dev);
149
150 if (is_cxl_endpoint(port))
151 return cxl_endpoint_port_probe(port);
152 return cxl_switch_port_probe(port);
153}
154
155static ssize_t CDAT_read(struct file *filp, struct kobject *kobj,
156 struct bin_attribute *bin_attr, char *buf,
157 loff_t offset, size_t count)
158{
159 struct device *dev = kobj_to_dev(kobj);
160 struct cxl_port *port = to_cxl_port(dev);
161
162 if (!port->cdat_available)
163 return -ENXIO;
164
165 if (!port->cdat.table)
166 return 0;
167
168 return memory_read_from_buffer(buf, count, &offset,
169 port->cdat.table,
170 port->cdat.length);
171}
172
173static BIN_ATTR_ADMIN_RO(CDAT, 0);
174
175static umode_t cxl_port_bin_attr_is_visible(struct kobject *kobj,
176 const struct bin_attribute *attr, int i)
177{
178 struct device *dev = kobj_to_dev(kobj);
179 struct cxl_port *port = to_cxl_port(dev);
180
181 if ((attr == &bin_attr_CDAT) && port->cdat_available)
182 return attr->attr.mode;
183
184 return 0;
185}
186
187static struct bin_attribute *cxl_cdat_bin_attributes[] = {
188 &bin_attr_CDAT,
189 NULL,
190};
191
192static struct attribute_group cxl_cdat_attribute_group = {
193 .bin_attrs = cxl_cdat_bin_attributes,
194 .is_bin_visible = cxl_port_bin_attr_is_visible,
195};
196
197static const struct attribute_group *cxl_port_attribute_groups[] = {
198 &cxl_cdat_attribute_group,
199 NULL,
200};
201
202static struct cxl_driver cxl_port_driver = {
203 .name = "cxl_port",
204 .probe = cxl_port_probe,
205 .id = CXL_DEVICE_PORT,
206 .drv = {
207 .dev_groups = cxl_port_attribute_groups,
208 },
209};
210
211static int __init cxl_port_init(void)
212{
213 return cxl_driver_register(&cxl_port_driver);
214}
215/*
216 * Be ready to immediately enable ports emitted by the platform CXL root
217 * (e.g. cxl_acpi) when CONFIG_CXL_PORT=y.
218 */
219subsys_initcall(cxl_port_init);
220
221static void __exit cxl_port_exit(void)
222{
223 cxl_driver_unregister(&cxl_port_driver);
224}
225module_exit(cxl_port_exit);
226
227MODULE_DESCRIPTION("CXL: Port enumeration and services");
228MODULE_LICENSE("GPL v2");
229MODULE_IMPORT_NS("CXL");
230MODULE_ALIAS_CXL(CXL_DEVICE_PORT);