Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright 2014 IBM Corp.
4 */
5
6#include <linux/pci.h>
7#include <misc/cxl.h>
8#include "cxl.h"
9
10static int cxl_pci_probe_mode(struct pci_bus *bus)
11{
12 return PCI_PROBE_NORMAL;
13}
14
15static int cxl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
16{
17 return -ENODEV;
18}
19
20static void cxl_teardown_msi_irqs(struct pci_dev *pdev)
21{
22 /*
23 * MSI should never be set but need still need to provide this call
24 * back.
25 */
26}
27
28static bool cxl_pci_enable_device_hook(struct pci_dev *dev)
29{
30 struct pci_controller *phb;
31 struct cxl_afu *afu;
32 struct cxl_context *ctx;
33
34 phb = pci_bus_to_host(dev->bus);
35 afu = (struct cxl_afu *)phb->private_data;
36
37 if (!cxl_ops->link_ok(afu->adapter, afu)) {
38 dev_warn(&dev->dev, "%s: Device link is down, refusing to enable AFU\n", __func__);
39 return false;
40 }
41
42 dev->dev.archdata.dma_offset = PAGE_OFFSET;
43
44 /*
45 * Allocate a context to do cxl things too. If we eventually do real
46 * DMA ops, we'll need a default context to attach them to
47 */
48 ctx = cxl_dev_context_init(dev);
49 if (IS_ERR(ctx))
50 return false;
51 dev->dev.archdata.cxl_ctx = ctx;
52
53 return (cxl_ops->afu_check_and_enable(afu) == 0);
54}
55
56static void cxl_pci_disable_device(struct pci_dev *dev)
57{
58 struct cxl_context *ctx = cxl_get_context(dev);
59
60 if (ctx) {
61 if (ctx->status == STARTED) {
62 dev_err(&dev->dev, "Default context started\n");
63 return;
64 }
65 dev->dev.archdata.cxl_ctx = NULL;
66 cxl_release_context(ctx);
67 }
68}
69
70static void cxl_pci_reset_secondary_bus(struct pci_dev *dev)
71{
72 /* Should we do an AFU reset here ? */
73}
74
75static int cxl_pcie_cfg_record(u8 bus, u8 devfn)
76{
77 return (bus << 8) + devfn;
78}
79
80static inline struct cxl_afu *pci_bus_to_afu(struct pci_bus *bus)
81{
82 struct pci_controller *phb = bus ? pci_bus_to_host(bus) : NULL;
83
84 return phb ? phb->private_data : NULL;
85}
86
87static void cxl_afu_configured_put(struct cxl_afu *afu)
88{
89 atomic_dec_if_positive(&afu->configured_state);
90}
91
92static bool cxl_afu_configured_get(struct cxl_afu *afu)
93{
94 return atomic_inc_unless_negative(&afu->configured_state);
95}
96
97static inline int cxl_pcie_config_info(struct pci_bus *bus, unsigned int devfn,
98 struct cxl_afu *afu, int *_record)
99{
100 int record;
101
102 record = cxl_pcie_cfg_record(bus->number, devfn);
103 if (record > afu->crs_num)
104 return PCIBIOS_DEVICE_NOT_FOUND;
105
106 *_record = record;
107 return 0;
108}
109
110static int cxl_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
111 int offset, int len, u32 *val)
112{
113 int rc, record;
114 struct cxl_afu *afu;
115 u8 val8;
116 u16 val16;
117 u32 val32;
118
119 afu = pci_bus_to_afu(bus);
120 /* Grab a reader lock on afu. */
121 if (afu == NULL || !cxl_afu_configured_get(afu))
122 return PCIBIOS_DEVICE_NOT_FOUND;
123
124 rc = cxl_pcie_config_info(bus, devfn, afu, &record);
125 if (rc)
126 goto out;
127
128 switch (len) {
129 case 1:
130 rc = cxl_ops->afu_cr_read8(afu, record, offset, &val8);
131 *val = val8;
132 break;
133 case 2:
134 rc = cxl_ops->afu_cr_read16(afu, record, offset, &val16);
135 *val = val16;
136 break;
137 case 4:
138 rc = cxl_ops->afu_cr_read32(afu, record, offset, &val32);
139 *val = val32;
140 break;
141 default:
142 WARN_ON(1);
143 }
144
145out:
146 cxl_afu_configured_put(afu);
147 return rc ? PCIBIOS_DEVICE_NOT_FOUND : 0;
148}
149
150static int cxl_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
151 int offset, int len, u32 val)
152{
153 int rc, record;
154 struct cxl_afu *afu;
155
156 afu = pci_bus_to_afu(bus);
157 /* Grab a reader lock on afu. */
158 if (afu == NULL || !cxl_afu_configured_get(afu))
159 return PCIBIOS_DEVICE_NOT_FOUND;
160
161 rc = cxl_pcie_config_info(bus, devfn, afu, &record);
162 if (rc)
163 goto out;
164
165 switch (len) {
166 case 1:
167 rc = cxl_ops->afu_cr_write8(afu, record, offset, val & 0xff);
168 break;
169 case 2:
170 rc = cxl_ops->afu_cr_write16(afu, record, offset, val & 0xffff);
171 break;
172 case 4:
173 rc = cxl_ops->afu_cr_write32(afu, record, offset, val);
174 break;
175 default:
176 WARN_ON(1);
177 }
178
179out:
180 cxl_afu_configured_put(afu);
181 return rc ? PCIBIOS_SET_FAILED : 0;
182}
183
184static struct pci_ops cxl_pcie_pci_ops =
185{
186 .read = cxl_pcie_read_config,
187 .write = cxl_pcie_write_config,
188};
189
190
191static struct pci_controller_ops cxl_pci_controller_ops =
192{
193 .probe_mode = cxl_pci_probe_mode,
194 .enable_device_hook = cxl_pci_enable_device_hook,
195 .disable_device = cxl_pci_disable_device,
196 .release_device = cxl_pci_disable_device,
197 .reset_secondary_bus = cxl_pci_reset_secondary_bus,
198 .setup_msi_irqs = cxl_setup_msi_irqs,
199 .teardown_msi_irqs = cxl_teardown_msi_irqs,
200};
201
202int cxl_pci_vphb_add(struct cxl_afu *afu)
203{
204 struct pci_controller *phb;
205 struct device_node *vphb_dn;
206 struct device *parent;
207
208 /*
209 * If there are no AFU configuration records we won't have anything to
210 * expose under the vPHB, so skip creating one, returning success since
211 * this is still a valid case. This will also opt us out of EEH
212 * handling since we won't have anything special to do if there are no
213 * kernel drivers attached to the vPHB, and EEH handling is not yet
214 * supported in the peer model.
215 */
216 if (!afu->crs_num)
217 return 0;
218
219 /* The parent device is the adapter. Reuse the device node of
220 * the adapter.
221 * We don't seem to care what device node is used for the vPHB,
222 * but tools such as lsvpd walk up the device parents looking
223 * for a valid location code, so we might as well show devices
224 * attached to the adapter as being located on that adapter.
225 */
226 parent = afu->adapter->dev.parent;
227 vphb_dn = parent->of_node;
228
229 /* Alloc and setup PHB data structure */
230 phb = pcibios_alloc_controller(vphb_dn);
231 if (!phb)
232 return -ENODEV;
233
234 /* Setup parent in sysfs */
235 phb->parent = parent;
236
237 /* Setup the PHB using arch provided callback */
238 phb->ops = &cxl_pcie_pci_ops;
239 phb->cfg_addr = NULL;
240 phb->cfg_data = NULL;
241 phb->private_data = afu;
242 phb->controller_ops = cxl_pci_controller_ops;
243
244 /* Scan the bus */
245 pcibios_scan_phb(phb);
246 if (phb->bus == NULL)
247 return -ENXIO;
248
249 /* Set release hook on root bus */
250 pci_set_host_bridge_release(to_pci_host_bridge(phb->bus->bridge),
251 pcibios_free_controller_deferred,
252 (void *) phb);
253
254 /* Claim resources. This might need some rework as well depending
255 * whether we are doing probe-only or not, like assigning unassigned
256 * resources etc...
257 */
258 pcibios_claim_one_bus(phb->bus);
259
260 /* Add probed PCI devices to the device model */
261 pci_bus_add_devices(phb->bus);
262
263 afu->phb = phb;
264
265 return 0;
266}
267
268void cxl_pci_vphb_remove(struct cxl_afu *afu)
269{
270 struct pci_controller *phb;
271
272 /* If there is no configuration record we won't have one of these */
273 if (!afu || !afu->phb)
274 return;
275
276 phb = afu->phb;
277 afu->phb = NULL;
278
279 pci_remove_root_bus(phb->bus);
280 /*
281 * We don't free phb here - that's handled by
282 * pcibios_free_controller_deferred()
283 */
284}
285
286bool cxl_pci_is_vphb_device(struct pci_dev *dev)
287{
288 struct pci_controller *phb;
289
290 phb = pci_bus_to_host(dev->bus);
291
292 return (phb->ops == &cxl_pcie_pci_ops);
293}
294
295struct cxl_afu *cxl_pci_to_afu(struct pci_dev *dev)
296{
297 struct pci_controller *phb;
298
299 phb = pci_bus_to_host(dev->bus);
300
301 return (struct cxl_afu *)phb->private_data;
302}
303EXPORT_SYMBOL_GPL(cxl_pci_to_afu);
304
305unsigned int cxl_pci_to_cfg_record(struct pci_dev *dev)
306{
307 return cxl_pcie_cfg_record(dev->bus->number, dev->devfn);
308}
309EXPORT_SYMBOL_GPL(cxl_pci_to_cfg_record);
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright 2014 IBM Corp.
4 */
5
6#include <linux/pci.h>
7#include <misc/cxl.h>
8#include "cxl.h"
9
10static int cxl_pci_probe_mode(struct pci_bus *bus)
11{
12 return PCI_PROBE_NORMAL;
13}
14
15static int cxl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
16{
17 return -ENODEV;
18}
19
20static void cxl_teardown_msi_irqs(struct pci_dev *pdev)
21{
22 /*
23 * MSI should never be set but need still need to provide this call
24 * back.
25 */
26}
27
28static bool cxl_pci_enable_device_hook(struct pci_dev *dev)
29{
30 struct pci_controller *phb;
31 struct cxl_afu *afu;
32 struct cxl_context *ctx;
33
34 phb = pci_bus_to_host(dev->bus);
35 afu = (struct cxl_afu *)phb->private_data;
36
37 if (!cxl_ops->link_ok(afu->adapter, afu)) {
38 dev_warn(&dev->dev, "%s: Device link is down, refusing to enable AFU\n", __func__);
39 return false;
40 }
41
42 dev->dev.archdata.dma_offset = PAGE_OFFSET;
43
44 /*
45 * Allocate a context to do cxl things too. If we eventually do real
46 * DMA ops, we'll need a default context to attach them to
47 */
48 ctx = cxl_dev_context_init(dev);
49 if (IS_ERR(ctx))
50 return false;
51 dev->dev.archdata.cxl_ctx = ctx;
52
53 return (cxl_ops->afu_check_and_enable(afu) == 0);
54}
55
56static void cxl_pci_disable_device(struct pci_dev *dev)
57{
58 struct cxl_context *ctx = cxl_get_context(dev);
59
60 if (ctx) {
61 if (ctx->status == STARTED) {
62 dev_err(&dev->dev, "Default context started\n");
63 return;
64 }
65 dev->dev.archdata.cxl_ctx = NULL;
66 cxl_release_context(ctx);
67 }
68}
69
70static resource_size_t cxl_pci_window_alignment(struct pci_bus *bus,
71 unsigned long type)
72{
73 return 1;
74}
75
76static void cxl_pci_reset_secondary_bus(struct pci_dev *dev)
77{
78 /* Should we do an AFU reset here ? */
79}
80
81static int cxl_pcie_cfg_record(u8 bus, u8 devfn)
82{
83 return (bus << 8) + devfn;
84}
85
86static inline struct cxl_afu *pci_bus_to_afu(struct pci_bus *bus)
87{
88 struct pci_controller *phb = bus ? pci_bus_to_host(bus) : NULL;
89
90 return phb ? phb->private_data : NULL;
91}
92
93static void cxl_afu_configured_put(struct cxl_afu *afu)
94{
95 atomic_dec_if_positive(&afu->configured_state);
96}
97
98static bool cxl_afu_configured_get(struct cxl_afu *afu)
99{
100 return atomic_inc_unless_negative(&afu->configured_state);
101}
102
103static inline int cxl_pcie_config_info(struct pci_bus *bus, unsigned int devfn,
104 struct cxl_afu *afu, int *_record)
105{
106 int record;
107
108 record = cxl_pcie_cfg_record(bus->number, devfn);
109 if (record > afu->crs_num)
110 return PCIBIOS_DEVICE_NOT_FOUND;
111
112 *_record = record;
113 return 0;
114}
115
116static int cxl_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
117 int offset, int len, u32 *val)
118{
119 int rc, record;
120 struct cxl_afu *afu;
121 u8 val8;
122 u16 val16;
123 u32 val32;
124
125 afu = pci_bus_to_afu(bus);
126 /* Grab a reader lock on afu. */
127 if (afu == NULL || !cxl_afu_configured_get(afu))
128 return PCIBIOS_DEVICE_NOT_FOUND;
129
130 rc = cxl_pcie_config_info(bus, devfn, afu, &record);
131 if (rc)
132 goto out;
133
134 switch (len) {
135 case 1:
136 rc = cxl_ops->afu_cr_read8(afu, record, offset, &val8);
137 *val = val8;
138 break;
139 case 2:
140 rc = cxl_ops->afu_cr_read16(afu, record, offset, &val16);
141 *val = val16;
142 break;
143 case 4:
144 rc = cxl_ops->afu_cr_read32(afu, record, offset, &val32);
145 *val = val32;
146 break;
147 default:
148 WARN_ON(1);
149 }
150
151out:
152 cxl_afu_configured_put(afu);
153 return rc ? PCIBIOS_DEVICE_NOT_FOUND : 0;
154}
155
156static int cxl_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
157 int offset, int len, u32 val)
158{
159 int rc, record;
160 struct cxl_afu *afu;
161
162 afu = pci_bus_to_afu(bus);
163 /* Grab a reader lock on afu. */
164 if (afu == NULL || !cxl_afu_configured_get(afu))
165 return PCIBIOS_DEVICE_NOT_FOUND;
166
167 rc = cxl_pcie_config_info(bus, devfn, afu, &record);
168 if (rc)
169 goto out;
170
171 switch (len) {
172 case 1:
173 rc = cxl_ops->afu_cr_write8(afu, record, offset, val & 0xff);
174 break;
175 case 2:
176 rc = cxl_ops->afu_cr_write16(afu, record, offset, val & 0xffff);
177 break;
178 case 4:
179 rc = cxl_ops->afu_cr_write32(afu, record, offset, val);
180 break;
181 default:
182 WARN_ON(1);
183 }
184
185out:
186 cxl_afu_configured_put(afu);
187 return rc ? PCIBIOS_SET_FAILED : 0;
188}
189
190static struct pci_ops cxl_pcie_pci_ops =
191{
192 .read = cxl_pcie_read_config,
193 .write = cxl_pcie_write_config,
194};
195
196
197static struct pci_controller_ops cxl_pci_controller_ops =
198{
199 .probe_mode = cxl_pci_probe_mode,
200 .enable_device_hook = cxl_pci_enable_device_hook,
201 .disable_device = cxl_pci_disable_device,
202 .release_device = cxl_pci_disable_device,
203 .window_alignment = cxl_pci_window_alignment,
204 .reset_secondary_bus = cxl_pci_reset_secondary_bus,
205 .setup_msi_irqs = cxl_setup_msi_irqs,
206 .teardown_msi_irqs = cxl_teardown_msi_irqs,
207};
208
209int cxl_pci_vphb_add(struct cxl_afu *afu)
210{
211 struct pci_controller *phb;
212 struct device_node *vphb_dn;
213 struct device *parent;
214
215 /*
216 * If there are no AFU configuration records we won't have anything to
217 * expose under the vPHB, so skip creating one, returning success since
218 * this is still a valid case. This will also opt us out of EEH
219 * handling since we won't have anything special to do if there are no
220 * kernel drivers attached to the vPHB, and EEH handling is not yet
221 * supported in the peer model.
222 */
223 if (!afu->crs_num)
224 return 0;
225
226 /* The parent device is the adapter. Reuse the device node of
227 * the adapter.
228 * We don't seem to care what device node is used for the vPHB,
229 * but tools such as lsvpd walk up the device parents looking
230 * for a valid location code, so we might as well show devices
231 * attached to the adapter as being located on that adapter.
232 */
233 parent = afu->adapter->dev.parent;
234 vphb_dn = parent->of_node;
235
236 /* Alloc and setup PHB data structure */
237 phb = pcibios_alloc_controller(vphb_dn);
238 if (!phb)
239 return -ENODEV;
240
241 /* Setup parent in sysfs */
242 phb->parent = parent;
243
244 /* Setup the PHB using arch provided callback */
245 phb->ops = &cxl_pcie_pci_ops;
246 phb->cfg_addr = NULL;
247 phb->cfg_data = NULL;
248 phb->private_data = afu;
249 phb->controller_ops = cxl_pci_controller_ops;
250
251 /* Scan the bus */
252 pcibios_scan_phb(phb);
253 if (phb->bus == NULL)
254 return -ENXIO;
255
256 /* Set release hook on root bus */
257 pci_set_host_bridge_release(to_pci_host_bridge(phb->bus->bridge),
258 pcibios_free_controller_deferred,
259 (void *) phb);
260
261 /* Claim resources. This might need some rework as well depending
262 * whether we are doing probe-only or not, like assigning unassigned
263 * resources etc...
264 */
265 pcibios_claim_one_bus(phb->bus);
266
267 /* Add probed PCI devices to the device model */
268 pci_bus_add_devices(phb->bus);
269
270 afu->phb = phb;
271
272 return 0;
273}
274
275void cxl_pci_vphb_remove(struct cxl_afu *afu)
276{
277 struct pci_controller *phb;
278
279 /* If there is no configuration record we won't have one of these */
280 if (!afu || !afu->phb)
281 return;
282
283 phb = afu->phb;
284 afu->phb = NULL;
285
286 pci_remove_root_bus(phb->bus);
287 /*
288 * We don't free phb here - that's handled by
289 * pcibios_free_controller_deferred()
290 */
291}
292
293bool cxl_pci_is_vphb_device(struct pci_dev *dev)
294{
295 struct pci_controller *phb;
296
297 phb = pci_bus_to_host(dev->bus);
298
299 return (phb->ops == &cxl_pcie_pci_ops);
300}
301
302struct cxl_afu *cxl_pci_to_afu(struct pci_dev *dev)
303{
304 struct pci_controller *phb;
305
306 phb = pci_bus_to_host(dev->bus);
307
308 return (struct cxl_afu *)phb->private_data;
309}
310EXPORT_SYMBOL_GPL(cxl_pci_to_afu);
311
312unsigned int cxl_pci_to_cfg_record(struct pci_dev *dev)
313{
314 return cxl_pcie_cfg_record(dev->bus->number, dev->devfn);
315}
316EXPORT_SYMBOL_GPL(cxl_pci_to_cfg_record);