Loading...
Note: File does not exist in v5.9.
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
3#include <linux/io-64-nonatomic-hi-lo.h>
4#include <linux/seq_file.h>
5#include <linux/device.h>
6#include <linux/delay.h>
7
8#include "cxlmem.h"
9#include "core.h"
10
11/**
12 * DOC: cxl core hdm
13 *
14 * Compute Express Link Host Managed Device Memory, starting with the
15 * CXL 2.0 specification, is managed by an array of HDM Decoder register
16 * instances per CXL port and per CXL endpoint. Define common helpers
17 * for enumerating these registers and capabilities.
18 */
19
20DECLARE_RWSEM(cxl_dpa_rwsem);
21
22static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
23 int *target_map)
24{
25 int rc;
26
27 rc = cxl_decoder_add_locked(cxld, target_map);
28 if (rc) {
29 put_device(&cxld->dev);
30 dev_err(&port->dev, "Failed to add decoder\n");
31 return rc;
32 }
33
34 rc = cxl_decoder_autoremove(&port->dev, cxld);
35 if (rc)
36 return rc;
37
38 dev_dbg(&cxld->dev, "Added to port %s\n", dev_name(&port->dev));
39
40 return 0;
41}
42
43/*
44 * Per the CXL specification (8.2.5.12 CXL HDM Decoder Capability Structure)
45 * single ported host-bridges need not publish a decoder capability when a
46 * passthrough decode can be assumed, i.e. all transactions that the uport sees
47 * are claimed and passed to the single dport. Disable the range until the first
48 * CXL region is enumerated / activated.
49 */
50int devm_cxl_add_passthrough_decoder(struct cxl_port *port)
51{
52 struct cxl_switch_decoder *cxlsd;
53 struct cxl_dport *dport = NULL;
54 int single_port_map[1];
55 unsigned long index;
56
57 cxlsd = cxl_switch_decoder_alloc(port, 1);
58 if (IS_ERR(cxlsd))
59 return PTR_ERR(cxlsd);
60
61 device_lock_assert(&port->dev);
62
63 xa_for_each(&port->dports, index, dport)
64 break;
65 single_port_map[0] = dport->port_id;
66
67 return add_hdm_decoder(port, &cxlsd->cxld, single_port_map);
68}
69EXPORT_SYMBOL_NS_GPL(devm_cxl_add_passthrough_decoder, CXL);
70
71static void parse_hdm_decoder_caps(struct cxl_hdm *cxlhdm)
72{
73 u32 hdm_cap;
74
75 hdm_cap = readl(cxlhdm->regs.hdm_decoder + CXL_HDM_DECODER_CAP_OFFSET);
76 cxlhdm->decoder_count = cxl_hdm_decoder_count(hdm_cap);
77 cxlhdm->target_count =
78 FIELD_GET(CXL_HDM_DECODER_TARGET_COUNT_MASK, hdm_cap);
79 if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_11_8, hdm_cap))
80 cxlhdm->interleave_mask |= GENMASK(11, 8);
81 if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_14_12, hdm_cap))
82 cxlhdm->interleave_mask |= GENMASK(14, 12);
83}
84
85static int map_hdm_decoder_regs(struct cxl_port *port, void __iomem *crb,
86 struct cxl_component_regs *regs)
87{
88 struct cxl_register_map map = {
89 .resource = port->component_reg_phys,
90 .base = crb,
91 .max_size = CXL_COMPONENT_REG_BLOCK_SIZE,
92 };
93
94 cxl_probe_component_regs(&port->dev, crb, &map.component_map);
95 if (!map.component_map.hdm_decoder.valid) {
96 dev_err(&port->dev, "HDM decoder registers invalid\n");
97 return -ENXIO;
98 }
99
100 return cxl_map_component_regs(&port->dev, regs, &map,
101 BIT(CXL_CM_CAP_CAP_ID_HDM));
102}
103
104/**
105 * devm_cxl_setup_hdm - map HDM decoder component registers
106 * @port: cxl_port to map
107 */
108struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port)
109{
110 struct device *dev = &port->dev;
111 struct cxl_hdm *cxlhdm;
112 void __iomem *crb;
113 int rc;
114
115 cxlhdm = devm_kzalloc(dev, sizeof(*cxlhdm), GFP_KERNEL);
116 if (!cxlhdm)
117 return ERR_PTR(-ENOMEM);
118
119 cxlhdm->port = port;
120 crb = ioremap(port->component_reg_phys, CXL_COMPONENT_REG_BLOCK_SIZE);
121 if (!crb) {
122 dev_err(dev, "No component registers mapped\n");
123 return ERR_PTR(-ENXIO);
124 }
125
126 rc = map_hdm_decoder_regs(port, crb, &cxlhdm->regs);
127 iounmap(crb);
128 if (rc)
129 return ERR_PTR(rc);
130
131 parse_hdm_decoder_caps(cxlhdm);
132 if (cxlhdm->decoder_count == 0) {
133 dev_err(dev, "Spec violation. Caps invalid\n");
134 return ERR_PTR(-ENXIO);
135 }
136
137 dev_set_drvdata(dev, cxlhdm);
138
139 return cxlhdm;
140}
141EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_hdm, CXL);
142
143static void __cxl_dpa_debug(struct seq_file *file, struct resource *r, int depth)
144{
145 unsigned long long start = r->start, end = r->end;
146
147 seq_printf(file, "%*s%08llx-%08llx : %s\n", depth * 2, "", start, end,
148 r->name);
149}
150
151void cxl_dpa_debug(struct seq_file *file, struct cxl_dev_state *cxlds)
152{
153 struct resource *p1, *p2;
154
155 down_read(&cxl_dpa_rwsem);
156 for (p1 = cxlds->dpa_res.child; p1; p1 = p1->sibling) {
157 __cxl_dpa_debug(file, p1, 0);
158 for (p2 = p1->child; p2; p2 = p2->sibling)
159 __cxl_dpa_debug(file, p2, 1);
160 }
161 up_read(&cxl_dpa_rwsem);
162}
163EXPORT_SYMBOL_NS_GPL(cxl_dpa_debug, CXL);
164
165/*
166 * Must be called in a context that synchronizes against this decoder's
167 * port ->remove() callback (like an endpoint decoder sysfs attribute)
168 */
169static void __cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
170{
171 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
172 struct cxl_port *port = cxled_to_port(cxled);
173 struct cxl_dev_state *cxlds = cxlmd->cxlds;
174 struct resource *res = cxled->dpa_res;
175 resource_size_t skip_start;
176
177 lockdep_assert_held_write(&cxl_dpa_rwsem);
178
179 /* save @skip_start, before @res is released */
180 skip_start = res->start - cxled->skip;
181 __release_region(&cxlds->dpa_res, res->start, resource_size(res));
182 if (cxled->skip)
183 __release_region(&cxlds->dpa_res, skip_start, cxled->skip);
184 cxled->skip = 0;
185 cxled->dpa_res = NULL;
186 put_device(&cxled->cxld.dev);
187 port->hdm_end--;
188}
189
190static void cxl_dpa_release(void *cxled)
191{
192 down_write(&cxl_dpa_rwsem);
193 __cxl_dpa_release(cxled);
194 up_write(&cxl_dpa_rwsem);
195}
196
197/*
198 * Must be called from context that will not race port device
199 * unregistration, like decoder sysfs attribute methods
200 */
201static void devm_cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
202{
203 struct cxl_port *port = cxled_to_port(cxled);
204
205 lockdep_assert_held_write(&cxl_dpa_rwsem);
206 devm_remove_action(&port->dev, cxl_dpa_release, cxled);
207 __cxl_dpa_release(cxled);
208}
209
210static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
211 resource_size_t base, resource_size_t len,
212 resource_size_t skipped)
213{
214 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
215 struct cxl_port *port = cxled_to_port(cxled);
216 struct cxl_dev_state *cxlds = cxlmd->cxlds;
217 struct device *dev = &port->dev;
218 struct resource *res;
219
220 lockdep_assert_held_write(&cxl_dpa_rwsem);
221
222 if (!len)
223 goto success;
224
225 if (cxled->dpa_res) {
226 dev_dbg(dev, "decoder%d.%d: existing allocation %pr assigned\n",
227 port->id, cxled->cxld.id, cxled->dpa_res);
228 return -EBUSY;
229 }
230
231 if (port->hdm_end + 1 != cxled->cxld.id) {
232 /*
233 * Assumes alloc and commit order is always in hardware instance
234 * order per expectations from 8.2.5.12.20 Committing Decoder
235 * Programming that enforce decoder[m] committed before
236 * decoder[m+1] commit start.
237 */
238 dev_dbg(dev, "decoder%d.%d: expected decoder%d.%d\n", port->id,
239 cxled->cxld.id, port->id, port->hdm_end + 1);
240 return -EBUSY;
241 }
242
243 if (skipped) {
244 res = __request_region(&cxlds->dpa_res, base - skipped, skipped,
245 dev_name(&cxled->cxld.dev), 0);
246 if (!res) {
247 dev_dbg(dev,
248 "decoder%d.%d: failed to reserve skipped space\n",
249 port->id, cxled->cxld.id);
250 return -EBUSY;
251 }
252 }
253 res = __request_region(&cxlds->dpa_res, base, len,
254 dev_name(&cxled->cxld.dev), 0);
255 if (!res) {
256 dev_dbg(dev, "decoder%d.%d: failed to reserve allocation\n",
257 port->id, cxled->cxld.id);
258 if (skipped)
259 __release_region(&cxlds->dpa_res, base - skipped,
260 skipped);
261 return -EBUSY;
262 }
263 cxled->dpa_res = res;
264 cxled->skip = skipped;
265
266 if (resource_contains(&cxlds->pmem_res, res))
267 cxled->mode = CXL_DECODER_PMEM;
268 else if (resource_contains(&cxlds->ram_res, res))
269 cxled->mode = CXL_DECODER_RAM;
270 else {
271 dev_dbg(dev, "decoder%d.%d: %pr mixed\n", port->id,
272 cxled->cxld.id, cxled->dpa_res);
273 cxled->mode = CXL_DECODER_MIXED;
274 }
275
276success:
277 port->hdm_end++;
278 get_device(&cxled->cxld.dev);
279 return 0;
280}
281
282static int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
283 resource_size_t base, resource_size_t len,
284 resource_size_t skipped)
285{
286 struct cxl_port *port = cxled_to_port(cxled);
287 int rc;
288
289 down_write(&cxl_dpa_rwsem);
290 rc = __cxl_dpa_reserve(cxled, base, len, skipped);
291 up_write(&cxl_dpa_rwsem);
292
293 if (rc)
294 return rc;
295
296 return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled);
297}
298
299resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled)
300{
301 resource_size_t size = 0;
302
303 down_read(&cxl_dpa_rwsem);
304 if (cxled->dpa_res)
305 size = resource_size(cxled->dpa_res);
306 up_read(&cxl_dpa_rwsem);
307
308 return size;
309}
310
311resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled)
312{
313 resource_size_t base = -1;
314
315 down_read(&cxl_dpa_rwsem);
316 if (cxled->dpa_res)
317 base = cxled->dpa_res->start;
318 up_read(&cxl_dpa_rwsem);
319
320 return base;
321}
322
323int cxl_dpa_free(struct cxl_endpoint_decoder *cxled)
324{
325 struct cxl_port *port = cxled_to_port(cxled);
326 struct device *dev = &cxled->cxld.dev;
327 int rc;
328
329 down_write(&cxl_dpa_rwsem);
330 if (!cxled->dpa_res) {
331 rc = 0;
332 goto out;
333 }
334 if (cxled->cxld.region) {
335 dev_dbg(dev, "decoder assigned to: %s\n",
336 dev_name(&cxled->cxld.region->dev));
337 rc = -EBUSY;
338 goto out;
339 }
340 if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
341 dev_dbg(dev, "decoder enabled\n");
342 rc = -EBUSY;
343 goto out;
344 }
345 if (cxled->cxld.id != port->hdm_end) {
346 dev_dbg(dev, "expected decoder%d.%d\n", port->id,
347 port->hdm_end);
348 rc = -EBUSY;
349 goto out;
350 }
351 devm_cxl_dpa_release(cxled);
352 rc = 0;
353out:
354 up_write(&cxl_dpa_rwsem);
355 return rc;
356}
357
358int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled,
359 enum cxl_decoder_mode mode)
360{
361 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
362 struct cxl_dev_state *cxlds = cxlmd->cxlds;
363 struct device *dev = &cxled->cxld.dev;
364 int rc;
365
366 switch (mode) {
367 case CXL_DECODER_RAM:
368 case CXL_DECODER_PMEM:
369 break;
370 default:
371 dev_dbg(dev, "unsupported mode: %d\n", mode);
372 return -EINVAL;
373 }
374
375 down_write(&cxl_dpa_rwsem);
376 if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
377 rc = -EBUSY;
378 goto out;
379 }
380
381 /*
382 * Only allow modes that are supported by the current partition
383 * configuration
384 */
385 if (mode == CXL_DECODER_PMEM && !resource_size(&cxlds->pmem_res)) {
386 dev_dbg(dev, "no available pmem capacity\n");
387 rc = -ENXIO;
388 goto out;
389 }
390 if (mode == CXL_DECODER_RAM && !resource_size(&cxlds->ram_res)) {
391 dev_dbg(dev, "no available ram capacity\n");
392 rc = -ENXIO;
393 goto out;
394 }
395
396 cxled->mode = mode;
397 rc = 0;
398out:
399 up_write(&cxl_dpa_rwsem);
400
401 return rc;
402}
403
404int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
405{
406 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
407 resource_size_t free_ram_start, free_pmem_start;
408 struct cxl_port *port = cxled_to_port(cxled);
409 struct cxl_dev_state *cxlds = cxlmd->cxlds;
410 struct device *dev = &cxled->cxld.dev;
411 resource_size_t start, avail, skip;
412 struct resource *p, *last;
413 int rc;
414
415 down_write(&cxl_dpa_rwsem);
416 if (cxled->cxld.region) {
417 dev_dbg(dev, "decoder attached to %s\n",
418 dev_name(&cxled->cxld.region->dev));
419 rc = -EBUSY;
420 goto out;
421 }
422
423 if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
424 dev_dbg(dev, "decoder enabled\n");
425 rc = -EBUSY;
426 goto out;
427 }
428
429 for (p = cxlds->ram_res.child, last = NULL; p; p = p->sibling)
430 last = p;
431 if (last)
432 free_ram_start = last->end + 1;
433 else
434 free_ram_start = cxlds->ram_res.start;
435
436 for (p = cxlds->pmem_res.child, last = NULL; p; p = p->sibling)
437 last = p;
438 if (last)
439 free_pmem_start = last->end + 1;
440 else
441 free_pmem_start = cxlds->pmem_res.start;
442
443 if (cxled->mode == CXL_DECODER_RAM) {
444 start = free_ram_start;
445 avail = cxlds->ram_res.end - start + 1;
446 skip = 0;
447 } else if (cxled->mode == CXL_DECODER_PMEM) {
448 resource_size_t skip_start, skip_end;
449
450 start = free_pmem_start;
451 avail = cxlds->pmem_res.end - start + 1;
452 skip_start = free_ram_start;
453
454 /*
455 * If some pmem is already allocated, then that allocation
456 * already handled the skip.
457 */
458 if (cxlds->pmem_res.child &&
459 skip_start == cxlds->pmem_res.child->start)
460 skip_end = skip_start - 1;
461 else
462 skip_end = start - 1;
463 skip = skip_end - skip_start + 1;
464 } else {
465 dev_dbg(dev, "mode not set\n");
466 rc = -EINVAL;
467 goto out;
468 }
469
470 if (size > avail) {
471 dev_dbg(dev, "%pa exceeds available %s capacity: %pa\n", &size,
472 cxled->mode == CXL_DECODER_RAM ? "ram" : "pmem",
473 &avail);
474 rc = -ENOSPC;
475 goto out;
476 }
477
478 rc = __cxl_dpa_reserve(cxled, start, size, skip);
479out:
480 up_write(&cxl_dpa_rwsem);
481
482 if (rc)
483 return rc;
484
485 return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled);
486}
487
488static void cxld_set_interleave(struct cxl_decoder *cxld, u32 *ctrl)
489{
490 u16 eig;
491 u8 eiw;
492
493 /*
494 * Input validation ensures these warns never fire, but otherwise
495 * suppress unititalized variable usage warnings.
496 */
497 if (WARN_ONCE(ways_to_eiw(cxld->interleave_ways, &eiw),
498 "invalid interleave_ways: %d\n", cxld->interleave_ways))
499 return;
500 if (WARN_ONCE(granularity_to_eig(cxld->interleave_granularity, &eig),
501 "invalid interleave_granularity: %d\n",
502 cxld->interleave_granularity))
503 return;
504
505 u32p_replace_bits(ctrl, eig, CXL_HDM_DECODER0_CTRL_IG_MASK);
506 u32p_replace_bits(ctrl, eiw, CXL_HDM_DECODER0_CTRL_IW_MASK);
507 *ctrl |= CXL_HDM_DECODER0_CTRL_COMMIT;
508}
509
510static void cxld_set_type(struct cxl_decoder *cxld, u32 *ctrl)
511{
512 u32p_replace_bits(ctrl, !!(cxld->target_type == 3),
513 CXL_HDM_DECODER0_CTRL_TYPE);
514}
515
516static int cxlsd_set_targets(struct cxl_switch_decoder *cxlsd, u64 *tgt)
517{
518 struct cxl_dport **t = &cxlsd->target[0];
519 int ways = cxlsd->cxld.interleave_ways;
520
521 if (dev_WARN_ONCE(&cxlsd->cxld.dev,
522 ways > 8 || ways > cxlsd->nr_targets,
523 "ways: %d overflows targets: %d\n", ways,
524 cxlsd->nr_targets))
525 return -ENXIO;
526
527 *tgt = FIELD_PREP(GENMASK(7, 0), t[0]->port_id);
528 if (ways > 1)
529 *tgt |= FIELD_PREP(GENMASK(15, 8), t[1]->port_id);
530 if (ways > 2)
531 *tgt |= FIELD_PREP(GENMASK(23, 16), t[2]->port_id);
532 if (ways > 3)
533 *tgt |= FIELD_PREP(GENMASK(31, 24), t[3]->port_id);
534 if (ways > 4)
535 *tgt |= FIELD_PREP(GENMASK_ULL(39, 32), t[4]->port_id);
536 if (ways > 5)
537 *tgt |= FIELD_PREP(GENMASK_ULL(47, 40), t[5]->port_id);
538 if (ways > 6)
539 *tgt |= FIELD_PREP(GENMASK_ULL(55, 48), t[6]->port_id);
540 if (ways > 7)
541 *tgt |= FIELD_PREP(GENMASK_ULL(63, 56), t[7]->port_id);
542
543 return 0;
544}
545
546/*
547 * Per CXL 2.0 8.2.5.12.20 Committing Decoder Programming, hardware must set
548 * committed or error within 10ms, but just be generous with 20ms to account for
549 * clock skew and other marginal behavior
550 */
551#define COMMIT_TIMEOUT_MS 20
552static int cxld_await_commit(void __iomem *hdm, int id)
553{
554 u32 ctrl;
555 int i;
556
557 for (i = 0; i < COMMIT_TIMEOUT_MS; i++) {
558 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
559 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMIT_ERROR, ctrl)) {
560 ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT;
561 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
562 return -EIO;
563 }
564 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl))
565 return 0;
566 fsleep(1000);
567 }
568
569 return -ETIMEDOUT;
570}
571
572static int cxl_decoder_commit(struct cxl_decoder *cxld)
573{
574 struct cxl_port *port = to_cxl_port(cxld->dev.parent);
575 struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
576 void __iomem *hdm = cxlhdm->regs.hdm_decoder;
577 int id = cxld->id, rc;
578 u64 base, size;
579 u32 ctrl;
580
581 if (cxld->flags & CXL_DECODER_F_ENABLE)
582 return 0;
583
584 if (port->commit_end + 1 != id) {
585 dev_dbg(&port->dev,
586 "%s: out of order commit, expected decoder%d.%d\n",
587 dev_name(&cxld->dev), port->id, port->commit_end + 1);
588 return -EBUSY;
589 }
590
591 down_read(&cxl_dpa_rwsem);
592 /* common decoder settings */
593 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(cxld->id));
594 cxld_set_interleave(cxld, &ctrl);
595 cxld_set_type(cxld, &ctrl);
596 base = cxld->hpa_range.start;
597 size = range_len(&cxld->hpa_range);
598
599 writel(upper_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id));
600 writel(lower_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id));
601 writel(upper_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id));
602 writel(lower_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id));
603
604 if (is_switch_decoder(&cxld->dev)) {
605 struct cxl_switch_decoder *cxlsd =
606 to_cxl_switch_decoder(&cxld->dev);
607 void __iomem *tl_hi = hdm + CXL_HDM_DECODER0_TL_HIGH(id);
608 void __iomem *tl_lo = hdm + CXL_HDM_DECODER0_TL_LOW(id);
609 u64 targets;
610
611 rc = cxlsd_set_targets(cxlsd, &targets);
612 if (rc) {
613 dev_dbg(&port->dev, "%s: target configuration error\n",
614 dev_name(&cxld->dev));
615 goto err;
616 }
617
618 writel(upper_32_bits(targets), tl_hi);
619 writel(lower_32_bits(targets), tl_lo);
620 } else {
621 struct cxl_endpoint_decoder *cxled =
622 to_cxl_endpoint_decoder(&cxld->dev);
623 void __iomem *sk_hi = hdm + CXL_HDM_DECODER0_SKIP_HIGH(id);
624 void __iomem *sk_lo = hdm + CXL_HDM_DECODER0_SKIP_LOW(id);
625
626 writel(upper_32_bits(cxled->skip), sk_hi);
627 writel(lower_32_bits(cxled->skip), sk_lo);
628 }
629
630 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
631 up_read(&cxl_dpa_rwsem);
632
633 port->commit_end++;
634 rc = cxld_await_commit(hdm, cxld->id);
635err:
636 if (rc) {
637 dev_dbg(&port->dev, "%s: error %d committing decoder\n",
638 dev_name(&cxld->dev), rc);
639 cxld->reset(cxld);
640 return rc;
641 }
642 cxld->flags |= CXL_DECODER_F_ENABLE;
643
644 return 0;
645}
646
647static int cxl_decoder_reset(struct cxl_decoder *cxld)
648{
649 struct cxl_port *port = to_cxl_port(cxld->dev.parent);
650 struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
651 void __iomem *hdm = cxlhdm->regs.hdm_decoder;
652 int id = cxld->id;
653 u32 ctrl;
654
655 if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)
656 return 0;
657
658 if (port->commit_end != id) {
659 dev_dbg(&port->dev,
660 "%s: out of order reset, expected decoder%d.%d\n",
661 dev_name(&cxld->dev), port->id, port->commit_end);
662 return -EBUSY;
663 }
664
665 down_read(&cxl_dpa_rwsem);
666 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
667 ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT;
668 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
669
670 writel(0, hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id));
671 writel(0, hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id));
672 writel(0, hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id));
673 writel(0, hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id));
674 up_read(&cxl_dpa_rwsem);
675
676 port->commit_end--;
677 cxld->flags &= ~CXL_DECODER_F_ENABLE;
678
679 return 0;
680}
681
682static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
683 int *target_map, void __iomem *hdm, int which,
684 u64 *dpa_base)
685{
686 struct cxl_endpoint_decoder *cxled = NULL;
687 u64 size, base, skip, dpa_size;
688 bool committed;
689 u32 remainder;
690 int i, rc;
691 u32 ctrl;
692 union {
693 u64 value;
694 unsigned char target_id[8];
695 } target_list;
696
697 if (is_endpoint_decoder(&cxld->dev))
698 cxled = to_cxl_endpoint_decoder(&cxld->dev);
699
700 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
701 base = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(which));
702 size = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(which));
703 committed = !!(ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED);
704 cxld->commit = cxl_decoder_commit;
705 cxld->reset = cxl_decoder_reset;
706
707 if (!committed)
708 size = 0;
709 if (base == U64_MAX || size == U64_MAX) {
710 dev_warn(&port->dev, "decoder%d.%d: Invalid resource range\n",
711 port->id, cxld->id);
712 return -ENXIO;
713 }
714
715 cxld->hpa_range = (struct range) {
716 .start = base,
717 .end = base + size - 1,
718 };
719
720 /* decoders are enabled if committed */
721 if (committed) {
722 cxld->flags |= CXL_DECODER_F_ENABLE;
723 if (ctrl & CXL_HDM_DECODER0_CTRL_LOCK)
724 cxld->flags |= CXL_DECODER_F_LOCK;
725 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_TYPE, ctrl))
726 cxld->target_type = CXL_DECODER_EXPANDER;
727 else
728 cxld->target_type = CXL_DECODER_ACCELERATOR;
729 if (cxld->id != port->commit_end + 1) {
730 dev_warn(&port->dev,
731 "decoder%d.%d: Committed out of order\n",
732 port->id, cxld->id);
733 return -ENXIO;
734 }
735 port->commit_end = cxld->id;
736 } else {
737 /* unless / until type-2 drivers arrive, assume type-3 */
738 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_TYPE, ctrl) == 0) {
739 ctrl |= CXL_HDM_DECODER0_CTRL_TYPE;
740 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
741 }
742 cxld->target_type = CXL_DECODER_EXPANDER;
743 }
744 rc = eiw_to_ways(FIELD_GET(CXL_HDM_DECODER0_CTRL_IW_MASK, ctrl),
745 &cxld->interleave_ways);
746 if (rc) {
747 dev_warn(&port->dev,
748 "decoder%d.%d: Invalid interleave ways (ctrl: %#x)\n",
749 port->id, cxld->id, ctrl);
750 return rc;
751 }
752 rc = eig_to_granularity(FIELD_GET(CXL_HDM_DECODER0_CTRL_IG_MASK, ctrl),
753 &cxld->interleave_granularity);
754 if (rc)
755 return rc;
756
757 if (!cxled) {
758 target_list.value =
759 ioread64_hi_lo(hdm + CXL_HDM_DECODER0_TL_LOW(which));
760 for (i = 0; i < cxld->interleave_ways; i++)
761 target_map[i] = target_list.target_id[i];
762
763 return 0;
764 }
765
766 if (!committed)
767 return 0;
768
769 dpa_size = div_u64_rem(size, cxld->interleave_ways, &remainder);
770 if (remainder) {
771 dev_err(&port->dev,
772 "decoder%d.%d: invalid committed configuration size: %#llx ways: %d\n",
773 port->id, cxld->id, size, cxld->interleave_ways);
774 return -ENXIO;
775 }
776 skip = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_SKIP_LOW(which));
777 rc = devm_cxl_dpa_reserve(cxled, *dpa_base + skip, dpa_size, skip);
778 if (rc) {
779 dev_err(&port->dev,
780 "decoder%d.%d: Failed to reserve DPA range %#llx - %#llx\n (%d)",
781 port->id, cxld->id, *dpa_base,
782 *dpa_base + dpa_size + skip - 1, rc);
783 return rc;
784 }
785 *dpa_base += dpa_size + skip;
786 return 0;
787}
788
789/**
790 * devm_cxl_enumerate_decoders - add decoder objects per HDM register set
791 * @cxlhdm: Structure to populate with HDM capabilities
792 */
793int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm)
794{
795 void __iomem *hdm = cxlhdm->regs.hdm_decoder;
796 struct cxl_port *port = cxlhdm->port;
797 int i, committed;
798 u64 dpa_base = 0;
799 u32 ctrl;
800
801 /*
802 * Since the register resource was recently claimed via request_region()
803 * be careful about trusting the "not-committed" status until the commit
804 * timeout has elapsed. The commit timeout is 10ms (CXL 2.0
805 * 8.2.5.12.20), but double it to be tolerant of any clock skew between
806 * host and target.
807 */
808 for (i = 0, committed = 0; i < cxlhdm->decoder_count; i++) {
809 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(i));
810 if (ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED)
811 committed++;
812 }
813
814 /* ensure that future checks of committed can be trusted */
815 if (committed != cxlhdm->decoder_count)
816 msleep(20);
817
818 for (i = 0; i < cxlhdm->decoder_count; i++) {
819 int target_map[CXL_DECODER_MAX_INTERLEAVE] = { 0 };
820 int rc, target_count = cxlhdm->target_count;
821 struct cxl_decoder *cxld;
822
823 if (is_cxl_endpoint(port)) {
824 struct cxl_endpoint_decoder *cxled;
825
826 cxled = cxl_endpoint_decoder_alloc(port);
827 if (IS_ERR(cxled)) {
828 dev_warn(&port->dev,
829 "Failed to allocate the decoder\n");
830 return PTR_ERR(cxled);
831 }
832 cxld = &cxled->cxld;
833 } else {
834 struct cxl_switch_decoder *cxlsd;
835
836 cxlsd = cxl_switch_decoder_alloc(port, target_count);
837 if (IS_ERR(cxlsd)) {
838 dev_warn(&port->dev,
839 "Failed to allocate the decoder\n");
840 return PTR_ERR(cxlsd);
841 }
842 cxld = &cxlsd->cxld;
843 }
844
845 rc = init_hdm_decoder(port, cxld, target_map, hdm, i, &dpa_base);
846 if (rc) {
847 put_device(&cxld->dev);
848 return rc;
849 }
850 rc = add_hdm_decoder(port, cxld, target_map);
851 if (rc) {
852 dev_warn(&port->dev,
853 "Failed to add decoder to port\n");
854 return rc;
855 }
856 }
857
858 return 0;
859}
860EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_decoders, CXL);