Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Resource Director Technology(RDT)
4 * - Cache Allocation code.
5 *
6 * Copyright (C) 2016 Intel Corporation
7 *
8 * Authors:
9 * Fenghua Yu <fenghua.yu@intel.com>
10 * Tony Luck <tony.luck@intel.com>
11 * Vikas Shivappa <vikas.shivappa@intel.com>
12 *
13 * More information about RDT be found in the Intel (R) x86 Architecture
14 * Software Developer Manual June 2016, volume 3, section 17.17.
15 */
16
17#define pr_fmt(fmt) "resctrl: " fmt
18
19#include <linux/cpu.h>
20#include <linux/slab.h>
21#include <linux/err.h>
22#include <linux/cacheinfo.h>
23#include <linux/cpuhotplug.h>
24
25#include <asm/intel-family.h>
26#include <asm/resctrl.h>
27#include "internal.h"
28
29/*
30 * rdt_domain structures are kfree()d when their last CPU goes offline,
31 * and allocated when the first CPU in a new domain comes online.
32 * The rdt_resource's domain list is updated when this happens. Readers of
33 * the domain list must either take cpus_read_lock(), or rely on an RCU
34 * read-side critical section, to avoid observing concurrent modification.
35 * All writers take this mutex:
36 */
37static DEFINE_MUTEX(domain_list_lock);
38
39/*
40 * The cached resctrl_pqr_state is strictly per CPU and can never be
41 * updated from a remote CPU. Functions which modify the state
42 * are called with interrupts disabled and no preemption, which
43 * is sufficient for the protection.
44 */
45DEFINE_PER_CPU(struct resctrl_pqr_state, pqr_state);
46
47/*
48 * Used to store the max resource name width and max resource data width
49 * to display the schemata in a tabular format
50 */
51int max_name_width, max_data_width;
52
53/*
54 * Global boolean for rdt_alloc which is true if any
55 * resource allocation is enabled.
56 */
57bool rdt_alloc_capable;
58
59static void
60mba_wrmsr_intel(struct rdt_domain *d, struct msr_param *m,
61 struct rdt_resource *r);
62static void
63cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r);
64static void
65mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m,
66 struct rdt_resource *r);
67
68#define domain_init(id) LIST_HEAD_INIT(rdt_resources_all[id].r_resctrl.domains)
69
70struct rdt_hw_resource rdt_resources_all[] = {
71 [RDT_RESOURCE_L3] =
72 {
73 .r_resctrl = {
74 .rid = RDT_RESOURCE_L3,
75 .name = "L3",
76 .cache_level = 3,
77 .domains = domain_init(RDT_RESOURCE_L3),
78 .parse_ctrlval = parse_cbm,
79 .format_str = "%d=%0*x",
80 .fflags = RFTYPE_RES_CACHE,
81 },
82 .msr_base = MSR_IA32_L3_CBM_BASE,
83 .msr_update = cat_wrmsr,
84 },
85 [RDT_RESOURCE_L2] =
86 {
87 .r_resctrl = {
88 .rid = RDT_RESOURCE_L2,
89 .name = "L2",
90 .cache_level = 2,
91 .domains = domain_init(RDT_RESOURCE_L2),
92 .parse_ctrlval = parse_cbm,
93 .format_str = "%d=%0*x",
94 .fflags = RFTYPE_RES_CACHE,
95 },
96 .msr_base = MSR_IA32_L2_CBM_BASE,
97 .msr_update = cat_wrmsr,
98 },
99 [RDT_RESOURCE_MBA] =
100 {
101 .r_resctrl = {
102 .rid = RDT_RESOURCE_MBA,
103 .name = "MB",
104 .cache_level = 3,
105 .domains = domain_init(RDT_RESOURCE_MBA),
106 .parse_ctrlval = parse_bw,
107 .format_str = "%d=%*u",
108 .fflags = RFTYPE_RES_MB,
109 },
110 },
111 [RDT_RESOURCE_SMBA] =
112 {
113 .r_resctrl = {
114 .rid = RDT_RESOURCE_SMBA,
115 .name = "SMBA",
116 .cache_level = 3,
117 .domains = domain_init(RDT_RESOURCE_SMBA),
118 .parse_ctrlval = parse_bw,
119 .format_str = "%d=%*u",
120 .fflags = RFTYPE_RES_MB,
121 },
122 },
123};
124
125/*
126 * cache_alloc_hsw_probe() - Have to probe for Intel haswell server CPUs
127 * as they do not have CPUID enumeration support for Cache allocation.
128 * The check for Vendor/Family/Model is not enough to guarantee that
129 * the MSRs won't #GP fault because only the following SKUs support
130 * CAT:
131 * Intel(R) Xeon(R) CPU E5-2658 v3 @ 2.20GHz
132 * Intel(R) Xeon(R) CPU E5-2648L v3 @ 1.80GHz
133 * Intel(R) Xeon(R) CPU E5-2628L v3 @ 2.00GHz
134 * Intel(R) Xeon(R) CPU E5-2618L v3 @ 2.30GHz
135 * Intel(R) Xeon(R) CPU E5-2608L v3 @ 2.00GHz
136 * Intel(R) Xeon(R) CPU E5-2658A v3 @ 2.20GHz
137 *
138 * Probe by trying to write the first of the L3 cache mask registers
139 * and checking that the bits stick. Max CLOSids is always 4 and max cbm length
140 * is always 20 on hsw server parts. The minimum cache bitmask length
141 * allowed for HSW server is always 2 bits. Hardcode all of them.
142 */
143static inline void cache_alloc_hsw_probe(void)
144{
145 struct rdt_hw_resource *hw_res = &rdt_resources_all[RDT_RESOURCE_L3];
146 struct rdt_resource *r = &hw_res->r_resctrl;
147 u64 max_cbm = BIT_ULL_MASK(20) - 1, l3_cbm_0;
148
149 if (wrmsrl_safe(MSR_IA32_L3_CBM_BASE, max_cbm))
150 return;
151
152 rdmsrl(MSR_IA32_L3_CBM_BASE, l3_cbm_0);
153
154 /* If all the bits were set in MSR, return success */
155 if (l3_cbm_0 != max_cbm)
156 return;
157
158 hw_res->num_closid = 4;
159 r->default_ctrl = max_cbm;
160 r->cache.cbm_len = 20;
161 r->cache.shareable_bits = 0xc0000;
162 r->cache.min_cbm_bits = 2;
163 r->cache.arch_has_sparse_bitmasks = false;
164 r->alloc_capable = true;
165
166 rdt_alloc_capable = true;
167}
168
169bool is_mba_sc(struct rdt_resource *r)
170{
171 if (!r)
172 return rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl.membw.mba_sc;
173
174 /*
175 * The software controller support is only applicable to MBA resource.
176 * Make sure to check for resource type.
177 */
178 if (r->rid != RDT_RESOURCE_MBA)
179 return false;
180
181 return r->membw.mba_sc;
182}
183
184/*
185 * rdt_get_mb_table() - get a mapping of bandwidth(b/w) percentage values
186 * exposed to user interface and the h/w understandable delay values.
187 *
188 * The non-linear delay values have the granularity of power of two
189 * and also the h/w does not guarantee a curve for configured delay
190 * values vs. actual b/w enforced.
191 * Hence we need a mapping that is pre calibrated so the user can
192 * express the memory b/w as a percentage value.
193 */
194static inline bool rdt_get_mb_table(struct rdt_resource *r)
195{
196 /*
197 * There are no Intel SKUs as of now to support non-linear delay.
198 */
199 pr_info("MBA b/w map not implemented for cpu:%d, model:%d",
200 boot_cpu_data.x86, boot_cpu_data.x86_model);
201
202 return false;
203}
204
205static bool __get_mem_config_intel(struct rdt_resource *r)
206{
207 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
208 union cpuid_0x10_3_eax eax;
209 union cpuid_0x10_x_edx edx;
210 u32 ebx, ecx, max_delay;
211
212 cpuid_count(0x00000010, 3, &eax.full, &ebx, &ecx, &edx.full);
213 hw_res->num_closid = edx.split.cos_max + 1;
214 max_delay = eax.split.max_delay + 1;
215 r->default_ctrl = MAX_MBA_BW;
216 r->membw.arch_needs_linear = true;
217 if (ecx & MBA_IS_LINEAR) {
218 r->membw.delay_linear = true;
219 r->membw.min_bw = MAX_MBA_BW - max_delay;
220 r->membw.bw_gran = MAX_MBA_BW - max_delay;
221 } else {
222 if (!rdt_get_mb_table(r))
223 return false;
224 r->membw.arch_needs_linear = false;
225 }
226 r->data_width = 3;
227
228 if (boot_cpu_has(X86_FEATURE_PER_THREAD_MBA))
229 r->membw.throttle_mode = THREAD_THROTTLE_PER_THREAD;
230 else
231 r->membw.throttle_mode = THREAD_THROTTLE_MAX;
232 thread_throttle_mode_init();
233
234 r->alloc_capable = true;
235
236 return true;
237}
238
239static bool __rdt_get_mem_config_amd(struct rdt_resource *r)
240{
241 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
242 u32 eax, ebx, ecx, edx, subleaf;
243
244 /*
245 * Query CPUID_Fn80000020_EDX_x01 for MBA and
246 * CPUID_Fn80000020_EDX_x02 for SMBA
247 */
248 subleaf = (r->rid == RDT_RESOURCE_SMBA) ? 2 : 1;
249
250 cpuid_count(0x80000020, subleaf, &eax, &ebx, &ecx, &edx);
251 hw_res->num_closid = edx + 1;
252 r->default_ctrl = 1 << eax;
253
254 /* AMD does not use delay */
255 r->membw.delay_linear = false;
256 r->membw.arch_needs_linear = false;
257
258 /*
259 * AMD does not use memory delay throttle model to control
260 * the allocation like Intel does.
261 */
262 r->membw.throttle_mode = THREAD_THROTTLE_UNDEFINED;
263 r->membw.min_bw = 0;
264 r->membw.bw_gran = 1;
265 /* Max value is 2048, Data width should be 4 in decimal */
266 r->data_width = 4;
267
268 r->alloc_capable = true;
269
270 return true;
271}
272
273static void rdt_get_cache_alloc_cfg(int idx, struct rdt_resource *r)
274{
275 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
276 union cpuid_0x10_1_eax eax;
277 union cpuid_0x10_x_ecx ecx;
278 union cpuid_0x10_x_edx edx;
279 u32 ebx;
280
281 cpuid_count(0x00000010, idx, &eax.full, &ebx, &ecx.full, &edx.full);
282 hw_res->num_closid = edx.split.cos_max + 1;
283 r->cache.cbm_len = eax.split.cbm_len + 1;
284 r->default_ctrl = BIT_MASK(eax.split.cbm_len + 1) - 1;
285 r->cache.shareable_bits = ebx & r->default_ctrl;
286 r->data_width = (r->cache.cbm_len + 3) / 4;
287 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
288 r->cache.arch_has_sparse_bitmasks = ecx.split.noncont;
289 r->alloc_capable = true;
290}
291
292static void rdt_get_cdp_config(int level)
293{
294 /*
295 * By default, CDP is disabled. CDP can be enabled by mount parameter
296 * "cdp" during resctrl file system mount time.
297 */
298 rdt_resources_all[level].cdp_enabled = false;
299 rdt_resources_all[level].r_resctrl.cdp_capable = true;
300}
301
302static void rdt_get_cdp_l3_config(void)
303{
304 rdt_get_cdp_config(RDT_RESOURCE_L3);
305}
306
307static void rdt_get_cdp_l2_config(void)
308{
309 rdt_get_cdp_config(RDT_RESOURCE_L2);
310}
311
312static void
313mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r)
314{
315 unsigned int i;
316 struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
317 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
318
319 for (i = m->low; i < m->high; i++)
320 wrmsrl(hw_res->msr_base + i, hw_dom->ctrl_val[i]);
321}
322
323/*
324 * Map the memory b/w percentage value to delay values
325 * that can be written to QOS_MSRs.
326 * There are currently no SKUs which support non linear delay values.
327 */
328static u32 delay_bw_map(unsigned long bw, struct rdt_resource *r)
329{
330 if (r->membw.delay_linear)
331 return MAX_MBA_BW - bw;
332
333 pr_warn_once("Non Linear delay-bw map not supported but queried\n");
334 return r->default_ctrl;
335}
336
337static void
338mba_wrmsr_intel(struct rdt_domain *d, struct msr_param *m,
339 struct rdt_resource *r)
340{
341 unsigned int i;
342 struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
343 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
344
345 /* Write the delay values for mba. */
346 for (i = m->low; i < m->high; i++)
347 wrmsrl(hw_res->msr_base + i, delay_bw_map(hw_dom->ctrl_val[i], r));
348}
349
350static void
351cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r)
352{
353 unsigned int i;
354 struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
355 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
356
357 for (i = m->low; i < m->high; i++)
358 wrmsrl(hw_res->msr_base + i, hw_dom->ctrl_val[i]);
359}
360
361struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r)
362{
363 struct rdt_domain *d;
364
365 list_for_each_entry(d, &r->domains, list) {
366 /* Find the domain that contains this CPU */
367 if (cpumask_test_cpu(cpu, &d->cpu_mask))
368 return d;
369 }
370
371 return NULL;
372}
373
374u32 resctrl_arch_get_num_closid(struct rdt_resource *r)
375{
376 return resctrl_to_arch_res(r)->num_closid;
377}
378
379void rdt_ctrl_update(void *arg)
380{
381 struct msr_param *m = arg;
382 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(m->res);
383 struct rdt_resource *r = m->res;
384 int cpu = smp_processor_id();
385 struct rdt_domain *d;
386
387 d = get_domain_from_cpu(cpu, r);
388 if (d) {
389 hw_res->msr_update(d, m, r);
390 return;
391 }
392 pr_warn_once("cpu %d not found in any domain for resource %s\n",
393 cpu, r->name);
394}
395
396/*
397 * rdt_find_domain - Find a domain in a resource that matches input resource id
398 *
399 * Search resource r's domain list to find the resource id. If the resource
400 * id is found in a domain, return the domain. Otherwise, if requested by
401 * caller, return the first domain whose id is bigger than the input id.
402 * The domain list is sorted by id in ascending order.
403 */
404struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id,
405 struct list_head **pos)
406{
407 struct rdt_domain *d;
408 struct list_head *l;
409
410 if (id < 0)
411 return ERR_PTR(-ENODEV);
412
413 list_for_each(l, &r->domains) {
414 d = list_entry(l, struct rdt_domain, list);
415 /* When id is found, return its domain. */
416 if (id == d->id)
417 return d;
418 /* Stop searching when finding id's position in sorted list. */
419 if (id < d->id)
420 break;
421 }
422
423 if (pos)
424 *pos = l;
425
426 return NULL;
427}
428
429static void setup_default_ctrlval(struct rdt_resource *r, u32 *dc)
430{
431 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
432 int i;
433
434 /*
435 * Initialize the Control MSRs to having no control.
436 * For Cache Allocation: Set all bits in cbm
437 * For Memory Allocation: Set b/w requested to 100%
438 */
439 for (i = 0; i < hw_res->num_closid; i++, dc++)
440 *dc = r->default_ctrl;
441}
442
443static void domain_free(struct rdt_hw_domain *hw_dom)
444{
445 kfree(hw_dom->arch_mbm_total);
446 kfree(hw_dom->arch_mbm_local);
447 kfree(hw_dom->ctrl_val);
448 kfree(hw_dom);
449}
450
451static int domain_setup_ctrlval(struct rdt_resource *r, struct rdt_domain *d)
452{
453 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
454 struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
455 struct msr_param m;
456 u32 *dc;
457
458 dc = kmalloc_array(hw_res->num_closid, sizeof(*hw_dom->ctrl_val),
459 GFP_KERNEL);
460 if (!dc)
461 return -ENOMEM;
462
463 hw_dom->ctrl_val = dc;
464 setup_default_ctrlval(r, dc);
465
466 m.low = 0;
467 m.high = hw_res->num_closid;
468 hw_res->msr_update(d, &m, r);
469 return 0;
470}
471
472/**
473 * arch_domain_mbm_alloc() - Allocate arch private storage for the MBM counters
474 * @num_rmid: The size of the MBM counter array
475 * @hw_dom: The domain that owns the allocated arrays
476 */
477static int arch_domain_mbm_alloc(u32 num_rmid, struct rdt_hw_domain *hw_dom)
478{
479 size_t tsize;
480
481 if (is_mbm_total_enabled()) {
482 tsize = sizeof(*hw_dom->arch_mbm_total);
483 hw_dom->arch_mbm_total = kcalloc(num_rmid, tsize, GFP_KERNEL);
484 if (!hw_dom->arch_mbm_total)
485 return -ENOMEM;
486 }
487 if (is_mbm_local_enabled()) {
488 tsize = sizeof(*hw_dom->arch_mbm_local);
489 hw_dom->arch_mbm_local = kcalloc(num_rmid, tsize, GFP_KERNEL);
490 if (!hw_dom->arch_mbm_local) {
491 kfree(hw_dom->arch_mbm_total);
492 hw_dom->arch_mbm_total = NULL;
493 return -ENOMEM;
494 }
495 }
496
497 return 0;
498}
499
500/*
501 * domain_add_cpu - Add a cpu to a resource's domain list.
502 *
503 * If an existing domain in the resource r's domain list matches the cpu's
504 * resource id, add the cpu in the domain.
505 *
506 * Otherwise, a new domain is allocated and inserted into the right position
507 * in the domain list sorted by id in ascending order.
508 *
509 * The order in the domain list is visible to users when we print entries
510 * in the schemata file and schemata input is validated to have the same order
511 * as this list.
512 */
513static void domain_add_cpu(int cpu, struct rdt_resource *r)
514{
515 int id = get_cpu_cacheinfo_id(cpu, r->cache_level);
516 struct list_head *add_pos = NULL;
517 struct rdt_hw_domain *hw_dom;
518 struct rdt_domain *d;
519 int err;
520
521 lockdep_assert_held(&domain_list_lock);
522
523 d = rdt_find_domain(r, id, &add_pos);
524 if (IS_ERR(d)) {
525 pr_warn("Couldn't find cache id for CPU %d\n", cpu);
526 return;
527 }
528
529 if (d) {
530 cpumask_set_cpu(cpu, &d->cpu_mask);
531 if (r->cache.arch_has_per_cpu_cfg)
532 rdt_domain_reconfigure_cdp(r);
533 return;
534 }
535
536 hw_dom = kzalloc_node(sizeof(*hw_dom), GFP_KERNEL, cpu_to_node(cpu));
537 if (!hw_dom)
538 return;
539
540 d = &hw_dom->d_resctrl;
541 d->id = id;
542 cpumask_set_cpu(cpu, &d->cpu_mask);
543
544 rdt_domain_reconfigure_cdp(r);
545
546 if (r->alloc_capable && domain_setup_ctrlval(r, d)) {
547 domain_free(hw_dom);
548 return;
549 }
550
551 if (r->mon_capable && arch_domain_mbm_alloc(r->num_rmid, hw_dom)) {
552 domain_free(hw_dom);
553 return;
554 }
555
556 list_add_tail_rcu(&d->list, add_pos);
557
558 err = resctrl_online_domain(r, d);
559 if (err) {
560 list_del_rcu(&d->list);
561 synchronize_rcu();
562 domain_free(hw_dom);
563 }
564}
565
566static void domain_remove_cpu(int cpu, struct rdt_resource *r)
567{
568 int id = get_cpu_cacheinfo_id(cpu, r->cache_level);
569 struct rdt_hw_domain *hw_dom;
570 struct rdt_domain *d;
571
572 lockdep_assert_held(&domain_list_lock);
573
574 d = rdt_find_domain(r, id, NULL);
575 if (IS_ERR_OR_NULL(d)) {
576 pr_warn("Couldn't find cache id for CPU %d\n", cpu);
577 return;
578 }
579 hw_dom = resctrl_to_arch_dom(d);
580
581 cpumask_clear_cpu(cpu, &d->cpu_mask);
582 if (cpumask_empty(&d->cpu_mask)) {
583 resctrl_offline_domain(r, d);
584 list_del_rcu(&d->list);
585 synchronize_rcu();
586
587 /*
588 * rdt_domain "d" is going to be freed below, so clear
589 * its pointer from pseudo_lock_region struct.
590 */
591 if (d->plr)
592 d->plr->d = NULL;
593 domain_free(hw_dom);
594
595 return;
596 }
597}
598
599static void clear_closid_rmid(int cpu)
600{
601 struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state);
602
603 state->default_closid = RESCTRL_RESERVED_CLOSID;
604 state->default_rmid = RESCTRL_RESERVED_RMID;
605 state->cur_closid = RESCTRL_RESERVED_CLOSID;
606 state->cur_rmid = RESCTRL_RESERVED_RMID;
607 wrmsr(MSR_IA32_PQR_ASSOC, RESCTRL_RESERVED_RMID,
608 RESCTRL_RESERVED_CLOSID);
609}
610
611static int resctrl_arch_online_cpu(unsigned int cpu)
612{
613 struct rdt_resource *r;
614
615 mutex_lock(&domain_list_lock);
616 for_each_capable_rdt_resource(r)
617 domain_add_cpu(cpu, r);
618 mutex_unlock(&domain_list_lock);
619
620 clear_closid_rmid(cpu);
621 resctrl_online_cpu(cpu);
622
623 return 0;
624}
625
626static int resctrl_arch_offline_cpu(unsigned int cpu)
627{
628 struct rdt_resource *r;
629
630 resctrl_offline_cpu(cpu);
631
632 mutex_lock(&domain_list_lock);
633 for_each_capable_rdt_resource(r)
634 domain_remove_cpu(cpu, r);
635 mutex_unlock(&domain_list_lock);
636
637 clear_closid_rmid(cpu);
638
639 return 0;
640}
641
642/*
643 * Choose a width for the resource name and resource data based on the
644 * resource that has widest name and cbm.
645 */
646static __init void rdt_init_padding(void)
647{
648 struct rdt_resource *r;
649
650 for_each_alloc_capable_rdt_resource(r) {
651 if (r->data_width > max_data_width)
652 max_data_width = r->data_width;
653 }
654}
655
656enum {
657 RDT_FLAG_CMT,
658 RDT_FLAG_MBM_TOTAL,
659 RDT_FLAG_MBM_LOCAL,
660 RDT_FLAG_L3_CAT,
661 RDT_FLAG_L3_CDP,
662 RDT_FLAG_L2_CAT,
663 RDT_FLAG_L2_CDP,
664 RDT_FLAG_MBA,
665 RDT_FLAG_SMBA,
666 RDT_FLAG_BMEC,
667};
668
669#define RDT_OPT(idx, n, f) \
670[idx] = { \
671 .name = n, \
672 .flag = f \
673}
674
675struct rdt_options {
676 char *name;
677 int flag;
678 bool force_off, force_on;
679};
680
681static struct rdt_options rdt_options[] __initdata = {
682 RDT_OPT(RDT_FLAG_CMT, "cmt", X86_FEATURE_CQM_OCCUP_LLC),
683 RDT_OPT(RDT_FLAG_MBM_TOTAL, "mbmtotal", X86_FEATURE_CQM_MBM_TOTAL),
684 RDT_OPT(RDT_FLAG_MBM_LOCAL, "mbmlocal", X86_FEATURE_CQM_MBM_LOCAL),
685 RDT_OPT(RDT_FLAG_L3_CAT, "l3cat", X86_FEATURE_CAT_L3),
686 RDT_OPT(RDT_FLAG_L3_CDP, "l3cdp", X86_FEATURE_CDP_L3),
687 RDT_OPT(RDT_FLAG_L2_CAT, "l2cat", X86_FEATURE_CAT_L2),
688 RDT_OPT(RDT_FLAG_L2_CDP, "l2cdp", X86_FEATURE_CDP_L2),
689 RDT_OPT(RDT_FLAG_MBA, "mba", X86_FEATURE_MBA),
690 RDT_OPT(RDT_FLAG_SMBA, "smba", X86_FEATURE_SMBA),
691 RDT_OPT(RDT_FLAG_BMEC, "bmec", X86_FEATURE_BMEC),
692};
693#define NUM_RDT_OPTIONS ARRAY_SIZE(rdt_options)
694
695static int __init set_rdt_options(char *str)
696{
697 struct rdt_options *o;
698 bool force_off;
699 char *tok;
700
701 if (*str == '=')
702 str++;
703 while ((tok = strsep(&str, ",")) != NULL) {
704 force_off = *tok == '!';
705 if (force_off)
706 tok++;
707 for (o = rdt_options; o < &rdt_options[NUM_RDT_OPTIONS]; o++) {
708 if (strcmp(tok, o->name) == 0) {
709 if (force_off)
710 o->force_off = true;
711 else
712 o->force_on = true;
713 break;
714 }
715 }
716 }
717 return 1;
718}
719__setup("rdt", set_rdt_options);
720
721bool __init rdt_cpu_has(int flag)
722{
723 bool ret = boot_cpu_has(flag);
724 struct rdt_options *o;
725
726 if (!ret)
727 return ret;
728
729 for (o = rdt_options; o < &rdt_options[NUM_RDT_OPTIONS]; o++) {
730 if (flag == o->flag) {
731 if (o->force_off)
732 ret = false;
733 if (o->force_on)
734 ret = true;
735 break;
736 }
737 }
738 return ret;
739}
740
741static __init bool get_mem_config(void)
742{
743 struct rdt_hw_resource *hw_res = &rdt_resources_all[RDT_RESOURCE_MBA];
744
745 if (!rdt_cpu_has(X86_FEATURE_MBA))
746 return false;
747
748 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
749 return __get_mem_config_intel(&hw_res->r_resctrl);
750 else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
751 return __rdt_get_mem_config_amd(&hw_res->r_resctrl);
752
753 return false;
754}
755
756static __init bool get_slow_mem_config(void)
757{
758 struct rdt_hw_resource *hw_res = &rdt_resources_all[RDT_RESOURCE_SMBA];
759
760 if (!rdt_cpu_has(X86_FEATURE_SMBA))
761 return false;
762
763 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
764 return __rdt_get_mem_config_amd(&hw_res->r_resctrl);
765
766 return false;
767}
768
769static __init bool get_rdt_alloc_resources(void)
770{
771 struct rdt_resource *r;
772 bool ret = false;
773
774 if (rdt_alloc_capable)
775 return true;
776
777 if (!boot_cpu_has(X86_FEATURE_RDT_A))
778 return false;
779
780 if (rdt_cpu_has(X86_FEATURE_CAT_L3)) {
781 r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
782 rdt_get_cache_alloc_cfg(1, r);
783 if (rdt_cpu_has(X86_FEATURE_CDP_L3))
784 rdt_get_cdp_l3_config();
785 ret = true;
786 }
787 if (rdt_cpu_has(X86_FEATURE_CAT_L2)) {
788 /* CPUID 0x10.2 fields are same format at 0x10.1 */
789 r = &rdt_resources_all[RDT_RESOURCE_L2].r_resctrl;
790 rdt_get_cache_alloc_cfg(2, r);
791 if (rdt_cpu_has(X86_FEATURE_CDP_L2))
792 rdt_get_cdp_l2_config();
793 ret = true;
794 }
795
796 if (get_mem_config())
797 ret = true;
798
799 if (get_slow_mem_config())
800 ret = true;
801
802 return ret;
803}
804
805static __init bool get_rdt_mon_resources(void)
806{
807 struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
808
809 if (rdt_cpu_has(X86_FEATURE_CQM_OCCUP_LLC))
810 rdt_mon_features |= (1 << QOS_L3_OCCUP_EVENT_ID);
811 if (rdt_cpu_has(X86_FEATURE_CQM_MBM_TOTAL))
812 rdt_mon_features |= (1 << QOS_L3_MBM_TOTAL_EVENT_ID);
813 if (rdt_cpu_has(X86_FEATURE_CQM_MBM_LOCAL))
814 rdt_mon_features |= (1 << QOS_L3_MBM_LOCAL_EVENT_ID);
815
816 if (!rdt_mon_features)
817 return false;
818
819 return !rdt_get_mon_l3_config(r);
820}
821
822static __init void __check_quirks_intel(void)
823{
824 switch (boot_cpu_data.x86_model) {
825 case INTEL_FAM6_HASWELL_X:
826 if (!rdt_options[RDT_FLAG_L3_CAT].force_off)
827 cache_alloc_hsw_probe();
828 break;
829 case INTEL_FAM6_SKYLAKE_X:
830 if (boot_cpu_data.x86_stepping <= 4)
831 set_rdt_options("!cmt,!mbmtotal,!mbmlocal,!l3cat");
832 else
833 set_rdt_options("!l3cat");
834 fallthrough;
835 case INTEL_FAM6_BROADWELL_X:
836 intel_rdt_mbm_apply_quirk();
837 break;
838 }
839}
840
841static __init void check_quirks(void)
842{
843 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
844 __check_quirks_intel();
845}
846
847static __init bool get_rdt_resources(void)
848{
849 rdt_alloc_capable = get_rdt_alloc_resources();
850 rdt_mon_capable = get_rdt_mon_resources();
851
852 return (rdt_mon_capable || rdt_alloc_capable);
853}
854
855static __init void rdt_init_res_defs_intel(void)
856{
857 struct rdt_hw_resource *hw_res;
858 struct rdt_resource *r;
859
860 for_each_rdt_resource(r) {
861 hw_res = resctrl_to_arch_res(r);
862
863 if (r->rid == RDT_RESOURCE_L3 ||
864 r->rid == RDT_RESOURCE_L2) {
865 r->cache.arch_has_per_cpu_cfg = false;
866 r->cache.min_cbm_bits = 1;
867 } else if (r->rid == RDT_RESOURCE_MBA) {
868 hw_res->msr_base = MSR_IA32_MBA_THRTL_BASE;
869 hw_res->msr_update = mba_wrmsr_intel;
870 }
871 }
872}
873
874static __init void rdt_init_res_defs_amd(void)
875{
876 struct rdt_hw_resource *hw_res;
877 struct rdt_resource *r;
878
879 for_each_rdt_resource(r) {
880 hw_res = resctrl_to_arch_res(r);
881
882 if (r->rid == RDT_RESOURCE_L3 ||
883 r->rid == RDT_RESOURCE_L2) {
884 r->cache.arch_has_sparse_bitmasks = true;
885 r->cache.arch_has_per_cpu_cfg = true;
886 r->cache.min_cbm_bits = 0;
887 } else if (r->rid == RDT_RESOURCE_MBA) {
888 hw_res->msr_base = MSR_IA32_MBA_BW_BASE;
889 hw_res->msr_update = mba_wrmsr_amd;
890 } else if (r->rid == RDT_RESOURCE_SMBA) {
891 hw_res->msr_base = MSR_IA32_SMBA_BW_BASE;
892 hw_res->msr_update = mba_wrmsr_amd;
893 }
894 }
895}
896
897static __init void rdt_init_res_defs(void)
898{
899 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
900 rdt_init_res_defs_intel();
901 else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
902 rdt_init_res_defs_amd();
903}
904
905static enum cpuhp_state rdt_online;
906
907/* Runs once on the BSP during boot. */
908void resctrl_cpu_detect(struct cpuinfo_x86 *c)
909{
910 if (!cpu_has(c, X86_FEATURE_CQM_LLC)) {
911 c->x86_cache_max_rmid = -1;
912 c->x86_cache_occ_scale = -1;
913 c->x86_cache_mbm_width_offset = -1;
914 return;
915 }
916
917 /* will be overridden if occupancy monitoring exists */
918 c->x86_cache_max_rmid = cpuid_ebx(0xf);
919
920 if (cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC) ||
921 cpu_has(c, X86_FEATURE_CQM_MBM_TOTAL) ||
922 cpu_has(c, X86_FEATURE_CQM_MBM_LOCAL)) {
923 u32 eax, ebx, ecx, edx;
924
925 /* QoS sub-leaf, EAX=0Fh, ECX=1 */
926 cpuid_count(0xf, 1, &eax, &ebx, &ecx, &edx);
927
928 c->x86_cache_max_rmid = ecx;
929 c->x86_cache_occ_scale = ebx;
930 c->x86_cache_mbm_width_offset = eax & 0xff;
931
932 if (c->x86_vendor == X86_VENDOR_AMD && !c->x86_cache_mbm_width_offset)
933 c->x86_cache_mbm_width_offset = MBM_CNTR_WIDTH_OFFSET_AMD;
934 }
935}
936
937static int __init resctrl_late_init(void)
938{
939 struct rdt_resource *r;
940 int state, ret;
941
942 /*
943 * Initialize functions(or definitions) that are different
944 * between vendors here.
945 */
946 rdt_init_res_defs();
947
948 check_quirks();
949
950 if (!get_rdt_resources())
951 return -ENODEV;
952
953 rdt_init_padding();
954
955 state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
956 "x86/resctrl/cat:online:",
957 resctrl_arch_online_cpu,
958 resctrl_arch_offline_cpu);
959 if (state < 0)
960 return state;
961
962 ret = rdtgroup_init();
963 if (ret) {
964 cpuhp_remove_state(state);
965 return ret;
966 }
967 rdt_online = state;
968
969 for_each_alloc_capable_rdt_resource(r)
970 pr_info("%s allocation detected\n", r->name);
971
972 for_each_mon_capable_rdt_resource(r)
973 pr_info("%s monitoring detected\n", r->name);
974
975 return 0;
976}
977
978late_initcall(resctrl_late_init);
979
980static void __exit resctrl_exit(void)
981{
982 struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
983
984 cpuhp_remove_state(rdt_online);
985
986 rdtgroup_exit();
987
988 if (r->mon_capable)
989 rdt_put_mon_l3_config();
990}
991
992__exitcall(resctrl_exit);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Resource Director Technology(RDT)
4 * - Cache Allocation code.
5 *
6 * Copyright (C) 2016 Intel Corporation
7 *
8 * Authors:
9 * Fenghua Yu <fenghua.yu@intel.com>
10 * Tony Luck <tony.luck@intel.com>
11 * Vikas Shivappa <vikas.shivappa@intel.com>
12 *
13 * More information about RDT be found in the Intel (R) x86 Architecture
14 * Software Developer Manual June 2016, volume 3, section 17.17.
15 */
16
17#define pr_fmt(fmt) "resctrl: " fmt
18
19#include <linux/slab.h>
20#include <linux/err.h>
21#include <linux/cacheinfo.h>
22#include <linux/cpuhotplug.h>
23
24#include <asm/intel-family.h>
25#include <asm/resctrl_sched.h>
26#include "internal.h"
27
28/* Mutex to protect rdtgroup access. */
29DEFINE_MUTEX(rdtgroup_mutex);
30
31/*
32 * The cached resctrl_pqr_state is strictly per CPU and can never be
33 * updated from a remote CPU. Functions which modify the state
34 * are called with interrupts disabled and no preemption, which
35 * is sufficient for the protection.
36 */
37DEFINE_PER_CPU(struct resctrl_pqr_state, pqr_state);
38
39/*
40 * Used to store the max resource name width and max resource data width
41 * to display the schemata in a tabular format
42 */
43int max_name_width, max_data_width;
44
45/*
46 * Global boolean for rdt_alloc which is true if any
47 * resource allocation is enabled.
48 */
49bool rdt_alloc_capable;
50
51static void
52mba_wrmsr_intel(struct rdt_domain *d, struct msr_param *m,
53 struct rdt_resource *r);
54static void
55cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r);
56static void
57mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m,
58 struct rdt_resource *r);
59
60#define domain_init(id) LIST_HEAD_INIT(rdt_resources_all[id].domains)
61
62struct rdt_resource rdt_resources_all[] = {
63 [RDT_RESOURCE_L3] =
64 {
65 .rid = RDT_RESOURCE_L3,
66 .name = "L3",
67 .domains = domain_init(RDT_RESOURCE_L3),
68 .msr_base = MSR_IA32_L3_CBM_BASE,
69 .msr_update = cat_wrmsr,
70 .cache_level = 3,
71 .cache = {
72 .min_cbm_bits = 1,
73 .cbm_idx_mult = 1,
74 .cbm_idx_offset = 0,
75 },
76 .parse_ctrlval = parse_cbm,
77 .format_str = "%d=%0*x",
78 .fflags = RFTYPE_RES_CACHE,
79 },
80 [RDT_RESOURCE_L3DATA] =
81 {
82 .rid = RDT_RESOURCE_L3DATA,
83 .name = "L3DATA",
84 .domains = domain_init(RDT_RESOURCE_L3DATA),
85 .msr_base = MSR_IA32_L3_CBM_BASE,
86 .msr_update = cat_wrmsr,
87 .cache_level = 3,
88 .cache = {
89 .min_cbm_bits = 1,
90 .cbm_idx_mult = 2,
91 .cbm_idx_offset = 0,
92 },
93 .parse_ctrlval = parse_cbm,
94 .format_str = "%d=%0*x",
95 .fflags = RFTYPE_RES_CACHE,
96 },
97 [RDT_RESOURCE_L3CODE] =
98 {
99 .rid = RDT_RESOURCE_L3CODE,
100 .name = "L3CODE",
101 .domains = domain_init(RDT_RESOURCE_L3CODE),
102 .msr_base = MSR_IA32_L3_CBM_BASE,
103 .msr_update = cat_wrmsr,
104 .cache_level = 3,
105 .cache = {
106 .min_cbm_bits = 1,
107 .cbm_idx_mult = 2,
108 .cbm_idx_offset = 1,
109 },
110 .parse_ctrlval = parse_cbm,
111 .format_str = "%d=%0*x",
112 .fflags = RFTYPE_RES_CACHE,
113 },
114 [RDT_RESOURCE_L2] =
115 {
116 .rid = RDT_RESOURCE_L2,
117 .name = "L2",
118 .domains = domain_init(RDT_RESOURCE_L2),
119 .msr_base = MSR_IA32_L2_CBM_BASE,
120 .msr_update = cat_wrmsr,
121 .cache_level = 2,
122 .cache = {
123 .min_cbm_bits = 1,
124 .cbm_idx_mult = 1,
125 .cbm_idx_offset = 0,
126 },
127 .parse_ctrlval = parse_cbm,
128 .format_str = "%d=%0*x",
129 .fflags = RFTYPE_RES_CACHE,
130 },
131 [RDT_RESOURCE_L2DATA] =
132 {
133 .rid = RDT_RESOURCE_L2DATA,
134 .name = "L2DATA",
135 .domains = domain_init(RDT_RESOURCE_L2DATA),
136 .msr_base = MSR_IA32_L2_CBM_BASE,
137 .msr_update = cat_wrmsr,
138 .cache_level = 2,
139 .cache = {
140 .min_cbm_bits = 1,
141 .cbm_idx_mult = 2,
142 .cbm_idx_offset = 0,
143 },
144 .parse_ctrlval = parse_cbm,
145 .format_str = "%d=%0*x",
146 .fflags = RFTYPE_RES_CACHE,
147 },
148 [RDT_RESOURCE_L2CODE] =
149 {
150 .rid = RDT_RESOURCE_L2CODE,
151 .name = "L2CODE",
152 .domains = domain_init(RDT_RESOURCE_L2CODE),
153 .msr_base = MSR_IA32_L2_CBM_BASE,
154 .msr_update = cat_wrmsr,
155 .cache_level = 2,
156 .cache = {
157 .min_cbm_bits = 1,
158 .cbm_idx_mult = 2,
159 .cbm_idx_offset = 1,
160 },
161 .parse_ctrlval = parse_cbm,
162 .format_str = "%d=%0*x",
163 .fflags = RFTYPE_RES_CACHE,
164 },
165 [RDT_RESOURCE_MBA] =
166 {
167 .rid = RDT_RESOURCE_MBA,
168 .name = "MB",
169 .domains = domain_init(RDT_RESOURCE_MBA),
170 .cache_level = 3,
171 .format_str = "%d=%*u",
172 .fflags = RFTYPE_RES_MB,
173 },
174};
175
176static unsigned int cbm_idx(struct rdt_resource *r, unsigned int closid)
177{
178 return closid * r->cache.cbm_idx_mult + r->cache.cbm_idx_offset;
179}
180
181/*
182 * cache_alloc_hsw_probe() - Have to probe for Intel haswell server CPUs
183 * as they do not have CPUID enumeration support for Cache allocation.
184 * The check for Vendor/Family/Model is not enough to guarantee that
185 * the MSRs won't #GP fault because only the following SKUs support
186 * CAT:
187 * Intel(R) Xeon(R) CPU E5-2658 v3 @ 2.20GHz
188 * Intel(R) Xeon(R) CPU E5-2648L v3 @ 1.80GHz
189 * Intel(R) Xeon(R) CPU E5-2628L v3 @ 2.00GHz
190 * Intel(R) Xeon(R) CPU E5-2618L v3 @ 2.30GHz
191 * Intel(R) Xeon(R) CPU E5-2608L v3 @ 2.00GHz
192 * Intel(R) Xeon(R) CPU E5-2658A v3 @ 2.20GHz
193 *
194 * Probe by trying to write the first of the L3 cach mask registers
195 * and checking that the bits stick. Max CLOSids is always 4 and max cbm length
196 * is always 20 on hsw server parts. The minimum cache bitmask length
197 * allowed for HSW server is always 2 bits. Hardcode all of them.
198 */
199static inline void cache_alloc_hsw_probe(void)
200{
201 struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3];
202 u32 l, h, max_cbm = BIT_MASK(20) - 1;
203
204 if (wrmsr_safe(MSR_IA32_L3_CBM_BASE, max_cbm, 0))
205 return;
206
207 rdmsr(MSR_IA32_L3_CBM_BASE, l, h);
208
209 /* If all the bits were set in MSR, return success */
210 if (l != max_cbm)
211 return;
212
213 r->num_closid = 4;
214 r->default_ctrl = max_cbm;
215 r->cache.cbm_len = 20;
216 r->cache.shareable_bits = 0xc0000;
217 r->cache.min_cbm_bits = 2;
218 r->alloc_capable = true;
219 r->alloc_enabled = true;
220
221 rdt_alloc_capable = true;
222}
223
224bool is_mba_sc(struct rdt_resource *r)
225{
226 if (!r)
227 return rdt_resources_all[RDT_RESOURCE_MBA].membw.mba_sc;
228
229 return r->membw.mba_sc;
230}
231
232/*
233 * rdt_get_mb_table() - get a mapping of bandwidth(b/w) percentage values
234 * exposed to user interface and the h/w understandable delay values.
235 *
236 * The non-linear delay values have the granularity of power of two
237 * and also the h/w does not guarantee a curve for configured delay
238 * values vs. actual b/w enforced.
239 * Hence we need a mapping that is pre calibrated so the user can
240 * express the memory b/w as a percentage value.
241 */
242static inline bool rdt_get_mb_table(struct rdt_resource *r)
243{
244 /*
245 * There are no Intel SKUs as of now to support non-linear delay.
246 */
247 pr_info("MBA b/w map not implemented for cpu:%d, model:%d",
248 boot_cpu_data.x86, boot_cpu_data.x86_model);
249
250 return false;
251}
252
253static bool __get_mem_config_intel(struct rdt_resource *r)
254{
255 union cpuid_0x10_3_eax eax;
256 union cpuid_0x10_x_edx edx;
257 u32 ebx, ecx;
258
259 cpuid_count(0x00000010, 3, &eax.full, &ebx, &ecx, &edx.full);
260 r->num_closid = edx.split.cos_max + 1;
261 r->membw.max_delay = eax.split.max_delay + 1;
262 r->default_ctrl = MAX_MBA_BW;
263 if (ecx & MBA_IS_LINEAR) {
264 r->membw.delay_linear = true;
265 r->membw.min_bw = MAX_MBA_BW - r->membw.max_delay;
266 r->membw.bw_gran = MAX_MBA_BW - r->membw.max_delay;
267 } else {
268 if (!rdt_get_mb_table(r))
269 return false;
270 }
271 r->data_width = 3;
272
273 r->alloc_capable = true;
274 r->alloc_enabled = true;
275
276 return true;
277}
278
279static bool __rdt_get_mem_config_amd(struct rdt_resource *r)
280{
281 union cpuid_0x10_3_eax eax;
282 union cpuid_0x10_x_edx edx;
283 u32 ebx, ecx;
284
285 cpuid_count(0x80000020, 1, &eax.full, &ebx, &ecx, &edx.full);
286 r->num_closid = edx.split.cos_max + 1;
287 r->default_ctrl = MAX_MBA_BW_AMD;
288
289 /* AMD does not use delay */
290 r->membw.delay_linear = false;
291
292 r->membw.min_bw = 0;
293 r->membw.bw_gran = 1;
294 /* Max value is 2048, Data width should be 4 in decimal */
295 r->data_width = 4;
296
297 r->alloc_capable = true;
298 r->alloc_enabled = true;
299
300 return true;
301}
302
303static void rdt_get_cache_alloc_cfg(int idx, struct rdt_resource *r)
304{
305 union cpuid_0x10_1_eax eax;
306 union cpuid_0x10_x_edx edx;
307 u32 ebx, ecx;
308
309 cpuid_count(0x00000010, idx, &eax.full, &ebx, &ecx, &edx.full);
310 r->num_closid = edx.split.cos_max + 1;
311 r->cache.cbm_len = eax.split.cbm_len + 1;
312 r->default_ctrl = BIT_MASK(eax.split.cbm_len + 1) - 1;
313 r->cache.shareable_bits = ebx & r->default_ctrl;
314 r->data_width = (r->cache.cbm_len + 3) / 4;
315 r->alloc_capable = true;
316 r->alloc_enabled = true;
317}
318
319static void rdt_get_cdp_config(int level, int type)
320{
321 struct rdt_resource *r_l = &rdt_resources_all[level];
322 struct rdt_resource *r = &rdt_resources_all[type];
323
324 r->num_closid = r_l->num_closid / 2;
325 r->cache.cbm_len = r_l->cache.cbm_len;
326 r->default_ctrl = r_l->default_ctrl;
327 r->cache.shareable_bits = r_l->cache.shareable_bits;
328 r->data_width = (r->cache.cbm_len + 3) / 4;
329 r->alloc_capable = true;
330 /*
331 * By default, CDP is disabled. CDP can be enabled by mount parameter
332 * "cdp" during resctrl file system mount time.
333 */
334 r->alloc_enabled = false;
335}
336
337static void rdt_get_cdp_l3_config(void)
338{
339 rdt_get_cdp_config(RDT_RESOURCE_L3, RDT_RESOURCE_L3DATA);
340 rdt_get_cdp_config(RDT_RESOURCE_L3, RDT_RESOURCE_L3CODE);
341}
342
343static void rdt_get_cdp_l2_config(void)
344{
345 rdt_get_cdp_config(RDT_RESOURCE_L2, RDT_RESOURCE_L2DATA);
346 rdt_get_cdp_config(RDT_RESOURCE_L2, RDT_RESOURCE_L2CODE);
347}
348
349static int get_cache_id(int cpu, int level)
350{
351 struct cpu_cacheinfo *ci = get_cpu_cacheinfo(cpu);
352 int i;
353
354 for (i = 0; i < ci->num_leaves; i++) {
355 if (ci->info_list[i].level == level)
356 return ci->info_list[i].id;
357 }
358
359 return -1;
360}
361
362static void
363mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r)
364{
365 unsigned int i;
366
367 for (i = m->low; i < m->high; i++)
368 wrmsrl(r->msr_base + i, d->ctrl_val[i]);
369}
370
371/*
372 * Map the memory b/w percentage value to delay values
373 * that can be written to QOS_MSRs.
374 * There are currently no SKUs which support non linear delay values.
375 */
376u32 delay_bw_map(unsigned long bw, struct rdt_resource *r)
377{
378 if (r->membw.delay_linear)
379 return MAX_MBA_BW - bw;
380
381 pr_warn_once("Non Linear delay-bw map not supported but queried\n");
382 return r->default_ctrl;
383}
384
385static void
386mba_wrmsr_intel(struct rdt_domain *d, struct msr_param *m,
387 struct rdt_resource *r)
388{
389 unsigned int i;
390
391 /* Write the delay values for mba. */
392 for (i = m->low; i < m->high; i++)
393 wrmsrl(r->msr_base + i, delay_bw_map(d->ctrl_val[i], r));
394}
395
396static void
397cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r)
398{
399 unsigned int i;
400
401 for (i = m->low; i < m->high; i++)
402 wrmsrl(r->msr_base + cbm_idx(r, i), d->ctrl_val[i]);
403}
404
405struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r)
406{
407 struct rdt_domain *d;
408
409 list_for_each_entry(d, &r->domains, list) {
410 /* Find the domain that contains this CPU */
411 if (cpumask_test_cpu(cpu, &d->cpu_mask))
412 return d;
413 }
414
415 return NULL;
416}
417
418void rdt_ctrl_update(void *arg)
419{
420 struct msr_param *m = arg;
421 struct rdt_resource *r = m->res;
422 int cpu = smp_processor_id();
423 struct rdt_domain *d;
424
425 d = get_domain_from_cpu(cpu, r);
426 if (d) {
427 r->msr_update(d, m, r);
428 return;
429 }
430 pr_warn_once("cpu %d not found in any domain for resource %s\n",
431 cpu, r->name);
432}
433
434/*
435 * rdt_find_domain - Find a domain in a resource that matches input resource id
436 *
437 * Search resource r's domain list to find the resource id. If the resource
438 * id is found in a domain, return the domain. Otherwise, if requested by
439 * caller, return the first domain whose id is bigger than the input id.
440 * The domain list is sorted by id in ascending order.
441 */
442struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id,
443 struct list_head **pos)
444{
445 struct rdt_domain *d;
446 struct list_head *l;
447
448 if (id < 0)
449 return ERR_PTR(-ENODEV);
450
451 list_for_each(l, &r->domains) {
452 d = list_entry(l, struct rdt_domain, list);
453 /* When id is found, return its domain. */
454 if (id == d->id)
455 return d;
456 /* Stop searching when finding id's position in sorted list. */
457 if (id < d->id)
458 break;
459 }
460
461 if (pos)
462 *pos = l;
463
464 return NULL;
465}
466
467void setup_default_ctrlval(struct rdt_resource *r, u32 *dc, u32 *dm)
468{
469 int i;
470
471 /*
472 * Initialize the Control MSRs to having no control.
473 * For Cache Allocation: Set all bits in cbm
474 * For Memory Allocation: Set b/w requested to 100%
475 * and the bandwidth in MBps to U32_MAX
476 */
477 for (i = 0; i < r->num_closid; i++, dc++, dm++) {
478 *dc = r->default_ctrl;
479 *dm = MBA_MAX_MBPS;
480 }
481}
482
483static int domain_setup_ctrlval(struct rdt_resource *r, struct rdt_domain *d)
484{
485 struct msr_param m;
486 u32 *dc, *dm;
487
488 dc = kmalloc_array(r->num_closid, sizeof(*d->ctrl_val), GFP_KERNEL);
489 if (!dc)
490 return -ENOMEM;
491
492 dm = kmalloc_array(r->num_closid, sizeof(*d->mbps_val), GFP_KERNEL);
493 if (!dm) {
494 kfree(dc);
495 return -ENOMEM;
496 }
497
498 d->ctrl_val = dc;
499 d->mbps_val = dm;
500 setup_default_ctrlval(r, dc, dm);
501
502 m.low = 0;
503 m.high = r->num_closid;
504 r->msr_update(d, &m, r);
505 return 0;
506}
507
508static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_domain *d)
509{
510 size_t tsize;
511
512 if (is_llc_occupancy_enabled()) {
513 d->rmid_busy_llc = bitmap_zalloc(r->num_rmid, GFP_KERNEL);
514 if (!d->rmid_busy_llc)
515 return -ENOMEM;
516 INIT_DELAYED_WORK(&d->cqm_limbo, cqm_handle_limbo);
517 }
518 if (is_mbm_total_enabled()) {
519 tsize = sizeof(*d->mbm_total);
520 d->mbm_total = kcalloc(r->num_rmid, tsize, GFP_KERNEL);
521 if (!d->mbm_total) {
522 bitmap_free(d->rmid_busy_llc);
523 return -ENOMEM;
524 }
525 }
526 if (is_mbm_local_enabled()) {
527 tsize = sizeof(*d->mbm_local);
528 d->mbm_local = kcalloc(r->num_rmid, tsize, GFP_KERNEL);
529 if (!d->mbm_local) {
530 bitmap_free(d->rmid_busy_llc);
531 kfree(d->mbm_total);
532 return -ENOMEM;
533 }
534 }
535
536 if (is_mbm_enabled()) {
537 INIT_DELAYED_WORK(&d->mbm_over, mbm_handle_overflow);
538 mbm_setup_overflow_handler(d, MBM_OVERFLOW_INTERVAL);
539 }
540
541 return 0;
542}
543
544/*
545 * domain_add_cpu - Add a cpu to a resource's domain list.
546 *
547 * If an existing domain in the resource r's domain list matches the cpu's
548 * resource id, add the cpu in the domain.
549 *
550 * Otherwise, a new domain is allocated and inserted into the right position
551 * in the domain list sorted by id in ascending order.
552 *
553 * The order in the domain list is visible to users when we print entries
554 * in the schemata file and schemata input is validated to have the same order
555 * as this list.
556 */
557static void domain_add_cpu(int cpu, struct rdt_resource *r)
558{
559 int id = get_cache_id(cpu, r->cache_level);
560 struct list_head *add_pos = NULL;
561 struct rdt_domain *d;
562
563 d = rdt_find_domain(r, id, &add_pos);
564 if (IS_ERR(d)) {
565 pr_warn("Could't find cache id for cpu %d\n", cpu);
566 return;
567 }
568
569 if (d) {
570 cpumask_set_cpu(cpu, &d->cpu_mask);
571 return;
572 }
573
574 d = kzalloc_node(sizeof(*d), GFP_KERNEL, cpu_to_node(cpu));
575 if (!d)
576 return;
577
578 d->id = id;
579 cpumask_set_cpu(cpu, &d->cpu_mask);
580
581 if (r->alloc_capable && domain_setup_ctrlval(r, d)) {
582 kfree(d);
583 return;
584 }
585
586 if (r->mon_capable && domain_setup_mon_state(r, d)) {
587 kfree(d);
588 return;
589 }
590
591 list_add_tail(&d->list, add_pos);
592
593 /*
594 * If resctrl is mounted, add
595 * per domain monitor data directories.
596 */
597 if (static_branch_unlikely(&rdt_mon_enable_key))
598 mkdir_mondata_subdir_allrdtgrp(r, d);
599}
600
601static void domain_remove_cpu(int cpu, struct rdt_resource *r)
602{
603 int id = get_cache_id(cpu, r->cache_level);
604 struct rdt_domain *d;
605
606 d = rdt_find_domain(r, id, NULL);
607 if (IS_ERR_OR_NULL(d)) {
608 pr_warn("Could't find cache id for cpu %d\n", cpu);
609 return;
610 }
611
612 cpumask_clear_cpu(cpu, &d->cpu_mask);
613 if (cpumask_empty(&d->cpu_mask)) {
614 /*
615 * If resctrl is mounted, remove all the
616 * per domain monitor data directories.
617 */
618 if (static_branch_unlikely(&rdt_mon_enable_key))
619 rmdir_mondata_subdir_allrdtgrp(r, d->id);
620 list_del(&d->list);
621 if (is_mbm_enabled())
622 cancel_delayed_work(&d->mbm_over);
623 if (is_llc_occupancy_enabled() && has_busy_rmid(r, d)) {
624 /*
625 * When a package is going down, forcefully
626 * decrement rmid->ebusy. There is no way to know
627 * that the L3 was flushed and hence may lead to
628 * incorrect counts in rare scenarios, but leaving
629 * the RMID as busy creates RMID leaks if the
630 * package never comes back.
631 */
632 __check_limbo(d, true);
633 cancel_delayed_work(&d->cqm_limbo);
634 }
635
636 /*
637 * rdt_domain "d" is going to be freed below, so clear
638 * its pointer from pseudo_lock_region struct.
639 */
640 if (d->plr)
641 d->plr->d = NULL;
642
643 kfree(d->ctrl_val);
644 kfree(d->mbps_val);
645 bitmap_free(d->rmid_busy_llc);
646 kfree(d->mbm_total);
647 kfree(d->mbm_local);
648 kfree(d);
649 return;
650 }
651
652 if (r == &rdt_resources_all[RDT_RESOURCE_L3]) {
653 if (is_mbm_enabled() && cpu == d->mbm_work_cpu) {
654 cancel_delayed_work(&d->mbm_over);
655 mbm_setup_overflow_handler(d, 0);
656 }
657 if (is_llc_occupancy_enabled() && cpu == d->cqm_work_cpu &&
658 has_busy_rmid(r, d)) {
659 cancel_delayed_work(&d->cqm_limbo);
660 cqm_setup_limbo_handler(d, 0);
661 }
662 }
663}
664
665static void clear_closid_rmid(int cpu)
666{
667 struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state);
668
669 state->default_closid = 0;
670 state->default_rmid = 0;
671 state->cur_closid = 0;
672 state->cur_rmid = 0;
673 wrmsr(IA32_PQR_ASSOC, 0, 0);
674}
675
676static int resctrl_online_cpu(unsigned int cpu)
677{
678 struct rdt_resource *r;
679
680 mutex_lock(&rdtgroup_mutex);
681 for_each_capable_rdt_resource(r)
682 domain_add_cpu(cpu, r);
683 /* The cpu is set in default rdtgroup after online. */
684 cpumask_set_cpu(cpu, &rdtgroup_default.cpu_mask);
685 clear_closid_rmid(cpu);
686 mutex_unlock(&rdtgroup_mutex);
687
688 return 0;
689}
690
691static void clear_childcpus(struct rdtgroup *r, unsigned int cpu)
692{
693 struct rdtgroup *cr;
694
695 list_for_each_entry(cr, &r->mon.crdtgrp_list, mon.crdtgrp_list) {
696 if (cpumask_test_and_clear_cpu(cpu, &cr->cpu_mask)) {
697 break;
698 }
699 }
700}
701
702static int resctrl_offline_cpu(unsigned int cpu)
703{
704 struct rdtgroup *rdtgrp;
705 struct rdt_resource *r;
706
707 mutex_lock(&rdtgroup_mutex);
708 for_each_capable_rdt_resource(r)
709 domain_remove_cpu(cpu, r);
710 list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) {
711 if (cpumask_test_and_clear_cpu(cpu, &rdtgrp->cpu_mask)) {
712 clear_childcpus(rdtgrp, cpu);
713 break;
714 }
715 }
716 clear_closid_rmid(cpu);
717 mutex_unlock(&rdtgroup_mutex);
718
719 return 0;
720}
721
722/*
723 * Choose a width for the resource name and resource data based on the
724 * resource that has widest name and cbm.
725 */
726static __init void rdt_init_padding(void)
727{
728 struct rdt_resource *r;
729 int cl;
730
731 for_each_alloc_capable_rdt_resource(r) {
732 cl = strlen(r->name);
733 if (cl > max_name_width)
734 max_name_width = cl;
735
736 if (r->data_width > max_data_width)
737 max_data_width = r->data_width;
738 }
739}
740
741enum {
742 RDT_FLAG_CMT,
743 RDT_FLAG_MBM_TOTAL,
744 RDT_FLAG_MBM_LOCAL,
745 RDT_FLAG_L3_CAT,
746 RDT_FLAG_L3_CDP,
747 RDT_FLAG_L2_CAT,
748 RDT_FLAG_L2_CDP,
749 RDT_FLAG_MBA,
750};
751
752#define RDT_OPT(idx, n, f) \
753[idx] = { \
754 .name = n, \
755 .flag = f \
756}
757
758struct rdt_options {
759 char *name;
760 int flag;
761 bool force_off, force_on;
762};
763
764static struct rdt_options rdt_options[] __initdata = {
765 RDT_OPT(RDT_FLAG_CMT, "cmt", X86_FEATURE_CQM_OCCUP_LLC),
766 RDT_OPT(RDT_FLAG_MBM_TOTAL, "mbmtotal", X86_FEATURE_CQM_MBM_TOTAL),
767 RDT_OPT(RDT_FLAG_MBM_LOCAL, "mbmlocal", X86_FEATURE_CQM_MBM_LOCAL),
768 RDT_OPT(RDT_FLAG_L3_CAT, "l3cat", X86_FEATURE_CAT_L3),
769 RDT_OPT(RDT_FLAG_L3_CDP, "l3cdp", X86_FEATURE_CDP_L3),
770 RDT_OPT(RDT_FLAG_L2_CAT, "l2cat", X86_FEATURE_CAT_L2),
771 RDT_OPT(RDT_FLAG_L2_CDP, "l2cdp", X86_FEATURE_CDP_L2),
772 RDT_OPT(RDT_FLAG_MBA, "mba", X86_FEATURE_MBA),
773};
774#define NUM_RDT_OPTIONS ARRAY_SIZE(rdt_options)
775
776static int __init set_rdt_options(char *str)
777{
778 struct rdt_options *o;
779 bool force_off;
780 char *tok;
781
782 if (*str == '=')
783 str++;
784 while ((tok = strsep(&str, ",")) != NULL) {
785 force_off = *tok == '!';
786 if (force_off)
787 tok++;
788 for (o = rdt_options; o < &rdt_options[NUM_RDT_OPTIONS]; o++) {
789 if (strcmp(tok, o->name) == 0) {
790 if (force_off)
791 o->force_off = true;
792 else
793 o->force_on = true;
794 break;
795 }
796 }
797 }
798 return 1;
799}
800__setup("rdt", set_rdt_options);
801
802static bool __init rdt_cpu_has(int flag)
803{
804 bool ret = boot_cpu_has(flag);
805 struct rdt_options *o;
806
807 if (!ret)
808 return ret;
809
810 for (o = rdt_options; o < &rdt_options[NUM_RDT_OPTIONS]; o++) {
811 if (flag == o->flag) {
812 if (o->force_off)
813 ret = false;
814 if (o->force_on)
815 ret = true;
816 break;
817 }
818 }
819 return ret;
820}
821
822static __init bool get_mem_config(void)
823{
824 if (!rdt_cpu_has(X86_FEATURE_MBA))
825 return false;
826
827 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
828 return __get_mem_config_intel(&rdt_resources_all[RDT_RESOURCE_MBA]);
829 else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
830 return __rdt_get_mem_config_amd(&rdt_resources_all[RDT_RESOURCE_MBA]);
831
832 return false;
833}
834
835static __init bool get_rdt_alloc_resources(void)
836{
837 bool ret = false;
838
839 if (rdt_alloc_capable)
840 return true;
841
842 if (!boot_cpu_has(X86_FEATURE_RDT_A))
843 return false;
844
845 if (rdt_cpu_has(X86_FEATURE_CAT_L3)) {
846 rdt_get_cache_alloc_cfg(1, &rdt_resources_all[RDT_RESOURCE_L3]);
847 if (rdt_cpu_has(X86_FEATURE_CDP_L3))
848 rdt_get_cdp_l3_config();
849 ret = true;
850 }
851 if (rdt_cpu_has(X86_FEATURE_CAT_L2)) {
852 /* CPUID 0x10.2 fields are same format at 0x10.1 */
853 rdt_get_cache_alloc_cfg(2, &rdt_resources_all[RDT_RESOURCE_L2]);
854 if (rdt_cpu_has(X86_FEATURE_CDP_L2))
855 rdt_get_cdp_l2_config();
856 ret = true;
857 }
858
859 if (get_mem_config())
860 ret = true;
861
862 return ret;
863}
864
865static __init bool get_rdt_mon_resources(void)
866{
867 if (rdt_cpu_has(X86_FEATURE_CQM_OCCUP_LLC))
868 rdt_mon_features |= (1 << QOS_L3_OCCUP_EVENT_ID);
869 if (rdt_cpu_has(X86_FEATURE_CQM_MBM_TOTAL))
870 rdt_mon_features |= (1 << QOS_L3_MBM_TOTAL_EVENT_ID);
871 if (rdt_cpu_has(X86_FEATURE_CQM_MBM_LOCAL))
872 rdt_mon_features |= (1 << QOS_L3_MBM_LOCAL_EVENT_ID);
873
874 if (!rdt_mon_features)
875 return false;
876
877 return !rdt_get_mon_l3_config(&rdt_resources_all[RDT_RESOURCE_L3]);
878}
879
880static __init void __check_quirks_intel(void)
881{
882 switch (boot_cpu_data.x86_model) {
883 case INTEL_FAM6_HASWELL_X:
884 if (!rdt_options[RDT_FLAG_L3_CAT].force_off)
885 cache_alloc_hsw_probe();
886 break;
887 case INTEL_FAM6_SKYLAKE_X:
888 if (boot_cpu_data.x86_stepping <= 4)
889 set_rdt_options("!cmt,!mbmtotal,!mbmlocal,!l3cat");
890 else
891 set_rdt_options("!l3cat");
892 }
893}
894
895static __init void check_quirks(void)
896{
897 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
898 __check_quirks_intel();
899}
900
901static __init bool get_rdt_resources(void)
902{
903 rdt_alloc_capable = get_rdt_alloc_resources();
904 rdt_mon_capable = get_rdt_mon_resources();
905
906 return (rdt_mon_capable || rdt_alloc_capable);
907}
908
909static __init void rdt_init_res_defs_intel(void)
910{
911 struct rdt_resource *r;
912
913 for_each_rdt_resource(r) {
914 if (r->rid == RDT_RESOURCE_L3 ||
915 r->rid == RDT_RESOURCE_L3DATA ||
916 r->rid == RDT_RESOURCE_L3CODE ||
917 r->rid == RDT_RESOURCE_L2 ||
918 r->rid == RDT_RESOURCE_L2DATA ||
919 r->rid == RDT_RESOURCE_L2CODE)
920 r->cbm_validate = cbm_validate_intel;
921 else if (r->rid == RDT_RESOURCE_MBA) {
922 r->msr_base = MSR_IA32_MBA_THRTL_BASE;
923 r->msr_update = mba_wrmsr_intel;
924 r->parse_ctrlval = parse_bw_intel;
925 }
926 }
927}
928
929static __init void rdt_init_res_defs_amd(void)
930{
931 struct rdt_resource *r;
932
933 for_each_rdt_resource(r) {
934 if (r->rid == RDT_RESOURCE_L3 ||
935 r->rid == RDT_RESOURCE_L3DATA ||
936 r->rid == RDT_RESOURCE_L3CODE ||
937 r->rid == RDT_RESOURCE_L2 ||
938 r->rid == RDT_RESOURCE_L2DATA ||
939 r->rid == RDT_RESOURCE_L2CODE)
940 r->cbm_validate = cbm_validate_amd;
941 else if (r->rid == RDT_RESOURCE_MBA) {
942 r->msr_base = MSR_IA32_MBA_BW_BASE;
943 r->msr_update = mba_wrmsr_amd;
944 r->parse_ctrlval = parse_bw_amd;
945 }
946 }
947}
948
949static __init void rdt_init_res_defs(void)
950{
951 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
952 rdt_init_res_defs_intel();
953 else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
954 rdt_init_res_defs_amd();
955}
956
957static enum cpuhp_state rdt_online;
958
959static int __init resctrl_late_init(void)
960{
961 struct rdt_resource *r;
962 int state, ret;
963
964 /*
965 * Initialize functions(or definitions) that are different
966 * between vendors here.
967 */
968 rdt_init_res_defs();
969
970 check_quirks();
971
972 if (!get_rdt_resources())
973 return -ENODEV;
974
975 rdt_init_padding();
976
977 state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
978 "x86/resctrl/cat:online:",
979 resctrl_online_cpu, resctrl_offline_cpu);
980 if (state < 0)
981 return state;
982
983 ret = rdtgroup_init();
984 if (ret) {
985 cpuhp_remove_state(state);
986 return ret;
987 }
988 rdt_online = state;
989
990 for_each_alloc_capable_rdt_resource(r)
991 pr_info("%s allocation detected\n", r->name);
992
993 for_each_mon_capable_rdt_resource(r)
994 pr_info("%s monitoring detected\n", r->name);
995
996 return 0;
997}
998
999late_initcall(resctrl_late_init);
1000
1001static void __exit resctrl_exit(void)
1002{
1003 cpuhp_remove_state(rdt_online);
1004 rdtgroup_exit();
1005}
1006
1007__exitcall(resctrl_exit);