Linux Audio

Check our new training course

Loading...
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Resource Director Technology(RDT)
   4 * - Cache Allocation code.
   5 *
   6 * Copyright (C) 2016 Intel Corporation
   7 *
   8 * Authors:
   9 *    Fenghua Yu <fenghua.yu@intel.com>
  10 *    Tony Luck <tony.luck@intel.com>
  11 *    Vikas Shivappa <vikas.shivappa@intel.com>
  12 *
  13 * More information about RDT be found in the Intel (R) x86 Architecture
  14 * Software Developer Manual June 2016, volume 3, section 17.17.
  15 */
  16
  17#define pr_fmt(fmt)	"resctrl: " fmt
  18
  19#include <linux/slab.h>
  20#include <linux/err.h>
  21#include <linux/cacheinfo.h>
  22#include <linux/cpuhotplug.h>
  23
  24#include <asm/intel-family.h>
  25#include <asm/resctrl.h>
  26#include "internal.h"
  27
  28/* Mutex to protect rdtgroup access. */
  29DEFINE_MUTEX(rdtgroup_mutex);
  30
  31/*
  32 * The cached resctrl_pqr_state is strictly per CPU and can never be
  33 * updated from a remote CPU. Functions which modify the state
  34 * are called with interrupts disabled and no preemption, which
  35 * is sufficient for the protection.
  36 */
  37DEFINE_PER_CPU(struct resctrl_pqr_state, pqr_state);
  38
  39/*
  40 * Used to store the max resource name width and max resource data width
  41 * to display the schemata in a tabular format
  42 */
  43int max_name_width, max_data_width;
  44
  45/*
  46 * Global boolean for rdt_alloc which is true if any
  47 * resource allocation is enabled.
  48 */
  49bool rdt_alloc_capable;
  50
  51static void
  52mba_wrmsr_intel(struct rdt_domain *d, struct msr_param *m,
  53		struct rdt_resource *r);
  54static void
  55cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r);
  56static void
  57mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m,
  58	      struct rdt_resource *r);
  59
  60#define domain_init(id) LIST_HEAD_INIT(rdt_resources_all[id].domains)
  61
  62struct rdt_resource rdt_resources_all[] = {
  63	[RDT_RESOURCE_L3] =
  64	{
  65		.rid			= RDT_RESOURCE_L3,
  66		.name			= "L3",
  67		.domains		= domain_init(RDT_RESOURCE_L3),
  68		.msr_base		= MSR_IA32_L3_CBM_BASE,
  69		.msr_update		= cat_wrmsr,
  70		.cache_level		= 3,
  71		.cache = {
  72			.min_cbm_bits	= 1,
  73			.cbm_idx_mult	= 1,
  74			.cbm_idx_offset	= 0,
  75		},
  76		.parse_ctrlval		= parse_cbm,
  77		.format_str		= "%d=%0*x",
  78		.fflags			= RFTYPE_RES_CACHE,
  79	},
  80	[RDT_RESOURCE_L3DATA] =
  81	{
  82		.rid			= RDT_RESOURCE_L3DATA,
  83		.name			= "L3DATA",
  84		.domains		= domain_init(RDT_RESOURCE_L3DATA),
  85		.msr_base		= MSR_IA32_L3_CBM_BASE,
  86		.msr_update		= cat_wrmsr,
  87		.cache_level		= 3,
  88		.cache = {
  89			.min_cbm_bits	= 1,
  90			.cbm_idx_mult	= 2,
  91			.cbm_idx_offset	= 0,
  92		},
  93		.parse_ctrlval		= parse_cbm,
  94		.format_str		= "%d=%0*x",
  95		.fflags			= RFTYPE_RES_CACHE,
  96	},
  97	[RDT_RESOURCE_L3CODE] =
  98	{
  99		.rid			= RDT_RESOURCE_L3CODE,
 100		.name			= "L3CODE",
 101		.domains		= domain_init(RDT_RESOURCE_L3CODE),
 102		.msr_base		= MSR_IA32_L3_CBM_BASE,
 103		.msr_update		= cat_wrmsr,
 104		.cache_level		= 3,
 105		.cache = {
 106			.min_cbm_bits	= 1,
 107			.cbm_idx_mult	= 2,
 108			.cbm_idx_offset	= 1,
 109		},
 110		.parse_ctrlval		= parse_cbm,
 111		.format_str		= "%d=%0*x",
 112		.fflags			= RFTYPE_RES_CACHE,
 113	},
 114	[RDT_RESOURCE_L2] =
 115	{
 116		.rid			= RDT_RESOURCE_L2,
 117		.name			= "L2",
 118		.domains		= domain_init(RDT_RESOURCE_L2),
 119		.msr_base		= MSR_IA32_L2_CBM_BASE,
 120		.msr_update		= cat_wrmsr,
 121		.cache_level		= 2,
 122		.cache = {
 123			.min_cbm_bits	= 1,
 124			.cbm_idx_mult	= 1,
 125			.cbm_idx_offset	= 0,
 126		},
 127		.parse_ctrlval		= parse_cbm,
 128		.format_str		= "%d=%0*x",
 129		.fflags			= RFTYPE_RES_CACHE,
 130	},
 131	[RDT_RESOURCE_L2DATA] =
 132	{
 133		.rid			= RDT_RESOURCE_L2DATA,
 134		.name			= "L2DATA",
 135		.domains		= domain_init(RDT_RESOURCE_L2DATA),
 136		.msr_base		= MSR_IA32_L2_CBM_BASE,
 137		.msr_update		= cat_wrmsr,
 138		.cache_level		= 2,
 139		.cache = {
 140			.min_cbm_bits	= 1,
 141			.cbm_idx_mult	= 2,
 142			.cbm_idx_offset	= 0,
 143		},
 144		.parse_ctrlval		= parse_cbm,
 145		.format_str		= "%d=%0*x",
 146		.fflags			= RFTYPE_RES_CACHE,
 147	},
 148	[RDT_RESOURCE_L2CODE] =
 149	{
 150		.rid			= RDT_RESOURCE_L2CODE,
 151		.name			= "L2CODE",
 152		.domains		= domain_init(RDT_RESOURCE_L2CODE),
 153		.msr_base		= MSR_IA32_L2_CBM_BASE,
 154		.msr_update		= cat_wrmsr,
 155		.cache_level		= 2,
 156		.cache = {
 157			.min_cbm_bits	= 1,
 158			.cbm_idx_mult	= 2,
 159			.cbm_idx_offset	= 1,
 160		},
 161		.parse_ctrlval		= parse_cbm,
 162		.format_str		= "%d=%0*x",
 163		.fflags			= RFTYPE_RES_CACHE,
 164	},
 165	[RDT_RESOURCE_MBA] =
 166	{
 167		.rid			= RDT_RESOURCE_MBA,
 168		.name			= "MB",
 169		.domains		= domain_init(RDT_RESOURCE_MBA),
 170		.cache_level		= 3,
 171		.parse_ctrlval		= parse_bw,
 172		.format_str		= "%d=%*u",
 173		.fflags			= RFTYPE_RES_MB,
 
 
 174	},
 175};
 176
 177static unsigned int cbm_idx(struct rdt_resource *r, unsigned int closid)
 178{
 179	return closid * r->cache.cbm_idx_mult + r->cache.cbm_idx_offset;
 180}
 181
 182/*
 183 * cache_alloc_hsw_probe() - Have to probe for Intel haswell server CPUs
 184 * as they do not have CPUID enumeration support for Cache allocation.
 185 * The check for Vendor/Family/Model is not enough to guarantee that
 186 * the MSRs won't #GP fault because only the following SKUs support
 187 * CAT:
 188 *	Intel(R) Xeon(R)  CPU E5-2658  v3  @  2.20GHz
 189 *	Intel(R) Xeon(R)  CPU E5-2648L v3  @  1.80GHz
 190 *	Intel(R) Xeon(R)  CPU E5-2628L v3  @  2.00GHz
 191 *	Intel(R) Xeon(R)  CPU E5-2618L v3  @  2.30GHz
 192 *	Intel(R) Xeon(R)  CPU E5-2608L v3  @  2.00GHz
 193 *	Intel(R) Xeon(R)  CPU E5-2658A v3  @  2.20GHz
 194 *
 195 * Probe by trying to write the first of the L3 cache mask registers
 196 * and checking that the bits stick. Max CLOSids is always 4 and max cbm length
 197 * is always 20 on hsw server parts. The minimum cache bitmask length
 198 * allowed for HSW server is always 2 bits. Hardcode all of them.
 199 */
 200static inline void cache_alloc_hsw_probe(void)
 201{
 202	struct rdt_resource *r  = &rdt_resources_all[RDT_RESOURCE_L3];
 
 203	u32 l, h, max_cbm = BIT_MASK(20) - 1;
 204
 205	if (wrmsr_safe(MSR_IA32_L3_CBM_BASE, max_cbm, 0))
 206		return;
 207
 208	rdmsr(MSR_IA32_L3_CBM_BASE, l, h);
 209
 210	/* If all the bits were set in MSR, return success */
 211	if (l != max_cbm)
 212		return;
 213
 214	r->num_closid = 4;
 215	r->default_ctrl = max_cbm;
 216	r->cache.cbm_len = 20;
 217	r->cache.shareable_bits = 0xc0000;
 218	r->cache.min_cbm_bits = 2;
 219	r->alloc_capable = true;
 220	r->alloc_enabled = true;
 221
 222	rdt_alloc_capable = true;
 223}
 224
 225bool is_mba_sc(struct rdt_resource *r)
 226{
 227	if (!r)
 228		return rdt_resources_all[RDT_RESOURCE_MBA].membw.mba_sc;
 229
 230	return r->membw.mba_sc;
 231}
 232
 233/*
 234 * rdt_get_mb_table() - get a mapping of bandwidth(b/w) percentage values
 235 * exposed to user interface and the h/w understandable delay values.
 236 *
 237 * The non-linear delay values have the granularity of power of two
 238 * and also the h/w does not guarantee a curve for configured delay
 239 * values vs. actual b/w enforced.
 240 * Hence we need a mapping that is pre calibrated so the user can
 241 * express the memory b/w as a percentage value.
 242 */
 243static inline bool rdt_get_mb_table(struct rdt_resource *r)
 244{
 245	/*
 246	 * There are no Intel SKUs as of now to support non-linear delay.
 247	 */
 248	pr_info("MBA b/w map not implemented for cpu:%d, model:%d",
 249		boot_cpu_data.x86, boot_cpu_data.x86_model);
 250
 251	return false;
 252}
 253
 254static bool __get_mem_config_intel(struct rdt_resource *r)
 255{
 
 256	union cpuid_0x10_3_eax eax;
 257	union cpuid_0x10_x_edx edx;
 258	u32 ebx, ecx, max_delay;
 259
 260	cpuid_count(0x00000010, 3, &eax.full, &ebx, &ecx, &edx.full);
 261	r->num_closid = edx.split.cos_max + 1;
 262	max_delay = eax.split.max_delay + 1;
 263	r->default_ctrl = MAX_MBA_BW;
 264	r->membw.arch_needs_linear = true;
 265	if (ecx & MBA_IS_LINEAR) {
 266		r->membw.delay_linear = true;
 267		r->membw.min_bw = MAX_MBA_BW - max_delay;
 268		r->membw.bw_gran = MAX_MBA_BW - max_delay;
 269	} else {
 270		if (!rdt_get_mb_table(r))
 271			return false;
 272		r->membw.arch_needs_linear = false;
 273	}
 274	r->data_width = 3;
 275
 276	if (boot_cpu_has(X86_FEATURE_PER_THREAD_MBA))
 277		r->membw.throttle_mode = THREAD_THROTTLE_PER_THREAD;
 278	else
 279		r->membw.throttle_mode = THREAD_THROTTLE_MAX;
 280	thread_throttle_mode_init();
 281
 282	r->alloc_capable = true;
 283	r->alloc_enabled = true;
 284
 285	return true;
 286}
 287
 288static bool __rdt_get_mem_config_amd(struct rdt_resource *r)
 289{
 
 290	union cpuid_0x10_3_eax eax;
 291	union cpuid_0x10_x_edx edx;
 292	u32 ebx, ecx;
 293
 294	cpuid_count(0x80000020, 1, &eax.full, &ebx, &ecx, &edx.full);
 295	r->num_closid = edx.split.cos_max + 1;
 296	r->default_ctrl = MAX_MBA_BW_AMD;
 297
 298	/* AMD does not use delay */
 299	r->membw.delay_linear = false;
 300	r->membw.arch_needs_linear = false;
 301
 302	/*
 303	 * AMD does not use memory delay throttle model to control
 304	 * the allocation like Intel does.
 305	 */
 306	r->membw.throttle_mode = THREAD_THROTTLE_UNDEFINED;
 307	r->membw.min_bw = 0;
 308	r->membw.bw_gran = 1;
 309	/* Max value is 2048, Data width should be 4 in decimal */
 310	r->data_width = 4;
 311
 312	r->alloc_capable = true;
 313	r->alloc_enabled = true;
 314
 315	return true;
 316}
 317
 318static void rdt_get_cache_alloc_cfg(int idx, struct rdt_resource *r)
 319{
 
 320	union cpuid_0x10_1_eax eax;
 321	union cpuid_0x10_x_edx edx;
 322	u32 ebx, ecx;
 323
 324	cpuid_count(0x00000010, idx, &eax.full, &ebx, &ecx, &edx.full);
 325	r->num_closid = edx.split.cos_max + 1;
 326	r->cache.cbm_len = eax.split.cbm_len + 1;
 327	r->default_ctrl = BIT_MASK(eax.split.cbm_len + 1) - 1;
 328	r->cache.shareable_bits = ebx & r->default_ctrl;
 329	r->data_width = (r->cache.cbm_len + 3) / 4;
 330	r->alloc_capable = true;
 331	r->alloc_enabled = true;
 332}
 333
 334static void rdt_get_cdp_config(int level, int type)
 335{
 336	struct rdt_resource *r_l = &rdt_resources_all[level];
 337	struct rdt_resource *r = &rdt_resources_all[type];
 338
 339	r->num_closid = r_l->num_closid / 2;
 340	r->cache.cbm_len = r_l->cache.cbm_len;
 341	r->default_ctrl = r_l->default_ctrl;
 342	r->cache.shareable_bits = r_l->cache.shareable_bits;
 343	r->data_width = (r->cache.cbm_len + 3) / 4;
 344	r->alloc_capable = true;
 345	/*
 346	 * By default, CDP is disabled. CDP can be enabled by mount parameter
 347	 * "cdp" during resctrl file system mount time.
 348	 */
 349	r->alloc_enabled = false;
 
 350}
 351
 352static void rdt_get_cdp_l3_config(void)
 353{
 354	rdt_get_cdp_config(RDT_RESOURCE_L3, RDT_RESOURCE_L3DATA);
 355	rdt_get_cdp_config(RDT_RESOURCE_L3, RDT_RESOURCE_L3CODE);
 356}
 357
 358static void rdt_get_cdp_l2_config(void)
 359{
 360	rdt_get_cdp_config(RDT_RESOURCE_L2, RDT_RESOURCE_L2DATA);
 361	rdt_get_cdp_config(RDT_RESOURCE_L2, RDT_RESOURCE_L2CODE);
 362}
 363
 364static void
 365mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r)
 366{
 367	unsigned int i;
 
 
 368
 369	for (i = m->low; i < m->high; i++)
 370		wrmsrl(r->msr_base + i, d->ctrl_val[i]);
 371}
 372
 373/*
 374 * Map the memory b/w percentage value to delay values
 375 * that can be written to QOS_MSRs.
 376 * There are currently no SKUs which support non linear delay values.
 377 */
 378u32 delay_bw_map(unsigned long bw, struct rdt_resource *r)
 379{
 380	if (r->membw.delay_linear)
 381		return MAX_MBA_BW - bw;
 382
 383	pr_warn_once("Non Linear delay-bw map not supported but queried\n");
 384	return r->default_ctrl;
 385}
 386
 387static void
 388mba_wrmsr_intel(struct rdt_domain *d, struct msr_param *m,
 389		struct rdt_resource *r)
 390{
 391	unsigned int i;
 
 
 392
 393	/*  Write the delay values for mba. */
 394	for (i = m->low; i < m->high; i++)
 395		wrmsrl(r->msr_base + i, delay_bw_map(d->ctrl_val[i], r));
 396}
 397
 398static void
 399cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r)
 400{
 401	unsigned int i;
 
 
 402
 403	for (i = m->low; i < m->high; i++)
 404		wrmsrl(r->msr_base + cbm_idx(r, i), d->ctrl_val[i]);
 405}
 406
 407struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r)
 408{
 409	struct rdt_domain *d;
 410
 411	list_for_each_entry(d, &r->domains, list) {
 412		/* Find the domain that contains this CPU */
 413		if (cpumask_test_cpu(cpu, &d->cpu_mask))
 414			return d;
 415	}
 416
 417	return NULL;
 418}
 419
 
 
 
 
 
 420void rdt_ctrl_update(void *arg)
 421{
 422	struct msr_param *m = arg;
 
 423	struct rdt_resource *r = m->res;
 424	int cpu = smp_processor_id();
 425	struct rdt_domain *d;
 426
 427	d = get_domain_from_cpu(cpu, r);
 428	if (d) {
 429		r->msr_update(d, m, r);
 430		return;
 431	}
 432	pr_warn_once("cpu %d not found in any domain for resource %s\n",
 433		     cpu, r->name);
 434}
 435
 436/*
 437 * rdt_find_domain - Find a domain in a resource that matches input resource id
 438 *
 439 * Search resource r's domain list to find the resource id. If the resource
 440 * id is found in a domain, return the domain. Otherwise, if requested by
 441 * caller, return the first domain whose id is bigger than the input id.
 442 * The domain list is sorted by id in ascending order.
 443 */
 444struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id,
 445				   struct list_head **pos)
 446{
 447	struct rdt_domain *d;
 448	struct list_head *l;
 449
 450	if (id < 0)
 451		return ERR_PTR(-ENODEV);
 452
 453	list_for_each(l, &r->domains) {
 454		d = list_entry(l, struct rdt_domain, list);
 455		/* When id is found, return its domain. */
 456		if (id == d->id)
 457			return d;
 458		/* Stop searching when finding id's position in sorted list. */
 459		if (id < d->id)
 460			break;
 461	}
 462
 463	if (pos)
 464		*pos = l;
 465
 466	return NULL;
 467}
 468
 469void setup_default_ctrlval(struct rdt_resource *r, u32 *dc, u32 *dm)
 470{
 
 471	int i;
 472
 473	/*
 474	 * Initialize the Control MSRs to having no control.
 475	 * For Cache Allocation: Set all bits in cbm
 476	 * For Memory Allocation: Set b/w requested to 100%
 477	 * and the bandwidth in MBps to U32_MAX
 478	 */
 479	for (i = 0; i < r->num_closid; i++, dc++, dm++) {
 480		*dc = r->default_ctrl;
 481		*dm = MBA_MAX_MBPS;
 482	}
 
 
 
 
 
 
 483}
 484
 485static int domain_setup_ctrlval(struct rdt_resource *r, struct rdt_domain *d)
 486{
 
 
 487	struct msr_param m;
 488	u32 *dc, *dm;
 489
 490	dc = kmalloc_array(r->num_closid, sizeof(*d->ctrl_val), GFP_KERNEL);
 
 491	if (!dc)
 492		return -ENOMEM;
 493
 494	dm = kmalloc_array(r->num_closid, sizeof(*d->mbps_val), GFP_KERNEL);
 495	if (!dm) {
 496		kfree(dc);
 497		return -ENOMEM;
 498	}
 499
 500	d->ctrl_val = dc;
 501	d->mbps_val = dm;
 502	setup_default_ctrlval(r, dc, dm);
 503
 504	m.low = 0;
 505	m.high = r->num_closid;
 506	r->msr_update(d, &m, r);
 507	return 0;
 508}
 509
 510static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_domain *d)
 
 
 
 
 
 511{
 512	size_t tsize;
 513
 514	if (is_llc_occupancy_enabled()) {
 515		d->rmid_busy_llc = bitmap_zalloc(r->num_rmid, GFP_KERNEL);
 516		if (!d->rmid_busy_llc)
 517			return -ENOMEM;
 518		INIT_DELAYED_WORK(&d->cqm_limbo, cqm_handle_limbo);
 519	}
 520	if (is_mbm_total_enabled()) {
 521		tsize = sizeof(*d->mbm_total);
 522		d->mbm_total = kcalloc(r->num_rmid, tsize, GFP_KERNEL);
 523		if (!d->mbm_total) {
 524			bitmap_free(d->rmid_busy_llc);
 525			return -ENOMEM;
 526		}
 527	}
 528	if (is_mbm_local_enabled()) {
 529		tsize = sizeof(*d->mbm_local);
 530		d->mbm_local = kcalloc(r->num_rmid, tsize, GFP_KERNEL);
 531		if (!d->mbm_local) {
 532			bitmap_free(d->rmid_busy_llc);
 533			kfree(d->mbm_total);
 534			return -ENOMEM;
 535		}
 536	}
 537
 538	if (is_mbm_enabled()) {
 539		INIT_DELAYED_WORK(&d->mbm_over, mbm_handle_overflow);
 540		mbm_setup_overflow_handler(d, MBM_OVERFLOW_INTERVAL);
 541	}
 542
 543	return 0;
 544}
 545
 546/*
 547 * domain_add_cpu - Add a cpu to a resource's domain list.
 548 *
 549 * If an existing domain in the resource r's domain list matches the cpu's
 550 * resource id, add the cpu in the domain.
 551 *
 552 * Otherwise, a new domain is allocated and inserted into the right position
 553 * in the domain list sorted by id in ascending order.
 554 *
 555 * The order in the domain list is visible to users when we print entries
 556 * in the schemata file and schemata input is validated to have the same order
 557 * as this list.
 558 */
 559static void domain_add_cpu(int cpu, struct rdt_resource *r)
 560{
 561	int id = get_cpu_cacheinfo_id(cpu, r->cache_level);
 562	struct list_head *add_pos = NULL;
 
 563	struct rdt_domain *d;
 
 564
 565	d = rdt_find_domain(r, id, &add_pos);
 566	if (IS_ERR(d)) {
 567		pr_warn("Couldn't find cache id for CPU %d\n", cpu);
 568		return;
 569	}
 570
 571	if (d) {
 572		cpumask_set_cpu(cpu, &d->cpu_mask);
 573		if (r->cache.arch_has_per_cpu_cfg)
 574			rdt_domain_reconfigure_cdp(r);
 575		return;
 576	}
 577
 578	d = kzalloc_node(sizeof(*d), GFP_KERNEL, cpu_to_node(cpu));
 579	if (!d)
 580		return;
 581
 
 582	d->id = id;
 583	cpumask_set_cpu(cpu, &d->cpu_mask);
 584
 585	rdt_domain_reconfigure_cdp(r);
 586
 587	if (r->alloc_capable && domain_setup_ctrlval(r, d)) {
 588		kfree(d);
 589		return;
 590	}
 591
 592	if (r->mon_capable && domain_setup_mon_state(r, d)) {
 593		kfree(d->ctrl_val);
 594		kfree(d->mbps_val);
 595		kfree(d);
 596		return;
 597	}
 598
 599	list_add_tail(&d->list, add_pos);
 600
 601	/*
 602	 * If resctrl is mounted, add
 603	 * per domain monitor data directories.
 604	 */
 605	if (static_branch_unlikely(&rdt_mon_enable_key))
 606		mkdir_mondata_subdir_allrdtgrp(r, d);
 607}
 608
 609static void domain_remove_cpu(int cpu, struct rdt_resource *r)
 610{
 611	int id = get_cpu_cacheinfo_id(cpu, r->cache_level);
 
 612	struct rdt_domain *d;
 613
 614	d = rdt_find_domain(r, id, NULL);
 615	if (IS_ERR_OR_NULL(d)) {
 616		pr_warn("Couldn't find cache id for CPU %d\n", cpu);
 617		return;
 618	}
 
 619
 620	cpumask_clear_cpu(cpu, &d->cpu_mask);
 621	if (cpumask_empty(&d->cpu_mask)) {
 622		/*
 623		 * If resctrl is mounted, remove all the
 624		 * per domain monitor data directories.
 625		 */
 626		if (static_branch_unlikely(&rdt_mon_enable_key))
 627			rmdir_mondata_subdir_allrdtgrp(r, d->id);
 628		list_del(&d->list);
 629		if (r->mon_capable && is_mbm_enabled())
 630			cancel_delayed_work(&d->mbm_over);
 631		if (is_llc_occupancy_enabled() &&  has_busy_rmid(r, d)) {
 632			/*
 633			 * When a package is going down, forcefully
 634			 * decrement rmid->ebusy. There is no way to know
 635			 * that the L3 was flushed and hence may lead to
 636			 * incorrect counts in rare scenarios, but leaving
 637			 * the RMID as busy creates RMID leaks if the
 638			 * package never comes back.
 639			 */
 640			__check_limbo(d, true);
 641			cancel_delayed_work(&d->cqm_limbo);
 642		}
 643
 644		/*
 645		 * rdt_domain "d" is going to be freed below, so clear
 646		 * its pointer from pseudo_lock_region struct.
 647		 */
 648		if (d->plr)
 649			d->plr->d = NULL;
 
 650
 651		kfree(d->ctrl_val);
 652		kfree(d->mbps_val);
 653		bitmap_free(d->rmid_busy_llc);
 654		kfree(d->mbm_total);
 655		kfree(d->mbm_local);
 656		kfree(d);
 657		return;
 658	}
 659
 660	if (r == &rdt_resources_all[RDT_RESOURCE_L3]) {
 661		if (is_mbm_enabled() && cpu == d->mbm_work_cpu) {
 662			cancel_delayed_work(&d->mbm_over);
 663			mbm_setup_overflow_handler(d, 0);
 664		}
 665		if (is_llc_occupancy_enabled() && cpu == d->cqm_work_cpu &&
 666		    has_busy_rmid(r, d)) {
 667			cancel_delayed_work(&d->cqm_limbo);
 668			cqm_setup_limbo_handler(d, 0);
 669		}
 670	}
 671}
 672
 673static void clear_closid_rmid(int cpu)
 674{
 675	struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state);
 676
 677	state->default_closid = 0;
 678	state->default_rmid = 0;
 679	state->cur_closid = 0;
 680	state->cur_rmid = 0;
 681	wrmsr(IA32_PQR_ASSOC, 0, 0);
 682}
 683
 684static int resctrl_online_cpu(unsigned int cpu)
 685{
 686	struct rdt_resource *r;
 687
 688	mutex_lock(&rdtgroup_mutex);
 689	for_each_capable_rdt_resource(r)
 690		domain_add_cpu(cpu, r);
 691	/* The cpu is set in default rdtgroup after online. */
 692	cpumask_set_cpu(cpu, &rdtgroup_default.cpu_mask);
 693	clear_closid_rmid(cpu);
 694	mutex_unlock(&rdtgroup_mutex);
 695
 696	return 0;
 697}
 698
 699static void clear_childcpus(struct rdtgroup *r, unsigned int cpu)
 700{
 701	struct rdtgroup *cr;
 702
 703	list_for_each_entry(cr, &r->mon.crdtgrp_list, mon.crdtgrp_list) {
 704		if (cpumask_test_and_clear_cpu(cpu, &cr->cpu_mask)) {
 705			break;
 706		}
 707	}
 708}
 709
 710static int resctrl_offline_cpu(unsigned int cpu)
 711{
 712	struct rdtgroup *rdtgrp;
 713	struct rdt_resource *r;
 714
 715	mutex_lock(&rdtgroup_mutex);
 716	for_each_capable_rdt_resource(r)
 717		domain_remove_cpu(cpu, r);
 718	list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) {
 719		if (cpumask_test_and_clear_cpu(cpu, &rdtgrp->cpu_mask)) {
 720			clear_childcpus(rdtgrp, cpu);
 721			break;
 722		}
 723	}
 724	clear_closid_rmid(cpu);
 725	mutex_unlock(&rdtgroup_mutex);
 726
 727	return 0;
 728}
 729
 730/*
 731 * Choose a width for the resource name and resource data based on the
 732 * resource that has widest name and cbm.
 733 */
 734static __init void rdt_init_padding(void)
 735{
 736	struct rdt_resource *r;
 737	int cl;
 738
 739	for_each_alloc_capable_rdt_resource(r) {
 740		cl = strlen(r->name);
 741		if (cl > max_name_width)
 742			max_name_width = cl;
 743
 744		if (r->data_width > max_data_width)
 745			max_data_width = r->data_width;
 746	}
 747}
 748
 749enum {
 750	RDT_FLAG_CMT,
 751	RDT_FLAG_MBM_TOTAL,
 752	RDT_FLAG_MBM_LOCAL,
 753	RDT_FLAG_L3_CAT,
 754	RDT_FLAG_L3_CDP,
 755	RDT_FLAG_L2_CAT,
 756	RDT_FLAG_L2_CDP,
 757	RDT_FLAG_MBA,
 758};
 759
 760#define RDT_OPT(idx, n, f)	\
 761[idx] = {			\
 762	.name = n,		\
 763	.flag = f		\
 764}
 765
 766struct rdt_options {
 767	char	*name;
 768	int	flag;
 769	bool	force_off, force_on;
 770};
 771
 772static struct rdt_options rdt_options[]  __initdata = {
 773	RDT_OPT(RDT_FLAG_CMT,	    "cmt",	X86_FEATURE_CQM_OCCUP_LLC),
 774	RDT_OPT(RDT_FLAG_MBM_TOTAL, "mbmtotal", X86_FEATURE_CQM_MBM_TOTAL),
 775	RDT_OPT(RDT_FLAG_MBM_LOCAL, "mbmlocal", X86_FEATURE_CQM_MBM_LOCAL),
 776	RDT_OPT(RDT_FLAG_L3_CAT,    "l3cat",	X86_FEATURE_CAT_L3),
 777	RDT_OPT(RDT_FLAG_L3_CDP,    "l3cdp",	X86_FEATURE_CDP_L3),
 778	RDT_OPT(RDT_FLAG_L2_CAT,    "l2cat",	X86_FEATURE_CAT_L2),
 779	RDT_OPT(RDT_FLAG_L2_CDP,    "l2cdp",	X86_FEATURE_CDP_L2),
 780	RDT_OPT(RDT_FLAG_MBA,	    "mba",	X86_FEATURE_MBA),
 781};
 782#define NUM_RDT_OPTIONS ARRAY_SIZE(rdt_options)
 783
 784static int __init set_rdt_options(char *str)
 785{
 786	struct rdt_options *o;
 787	bool force_off;
 788	char *tok;
 789
 790	if (*str == '=')
 791		str++;
 792	while ((tok = strsep(&str, ",")) != NULL) {
 793		force_off = *tok == '!';
 794		if (force_off)
 795			tok++;
 796		for (o = rdt_options; o < &rdt_options[NUM_RDT_OPTIONS]; o++) {
 797			if (strcmp(tok, o->name) == 0) {
 798				if (force_off)
 799					o->force_off = true;
 800				else
 801					o->force_on = true;
 802				break;
 803			}
 804		}
 805	}
 806	return 1;
 807}
 808__setup("rdt", set_rdt_options);
 809
 810static bool __init rdt_cpu_has(int flag)
 811{
 812	bool ret = boot_cpu_has(flag);
 813	struct rdt_options *o;
 814
 815	if (!ret)
 816		return ret;
 817
 818	for (o = rdt_options; o < &rdt_options[NUM_RDT_OPTIONS]; o++) {
 819		if (flag == o->flag) {
 820			if (o->force_off)
 821				ret = false;
 822			if (o->force_on)
 823				ret = true;
 824			break;
 825		}
 826	}
 827	return ret;
 828}
 829
 830static __init bool get_mem_config(void)
 831{
 
 
 832	if (!rdt_cpu_has(X86_FEATURE_MBA))
 833		return false;
 834
 835	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
 836		return __get_mem_config_intel(&rdt_resources_all[RDT_RESOURCE_MBA]);
 837	else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
 838		return __rdt_get_mem_config_amd(&rdt_resources_all[RDT_RESOURCE_MBA]);
 839
 840	return false;
 841}
 842
 843static __init bool get_rdt_alloc_resources(void)
 844{
 
 845	bool ret = false;
 846
 847	if (rdt_alloc_capable)
 848		return true;
 849
 850	if (!boot_cpu_has(X86_FEATURE_RDT_A))
 851		return false;
 852
 853	if (rdt_cpu_has(X86_FEATURE_CAT_L3)) {
 854		rdt_get_cache_alloc_cfg(1, &rdt_resources_all[RDT_RESOURCE_L3]);
 
 855		if (rdt_cpu_has(X86_FEATURE_CDP_L3))
 856			rdt_get_cdp_l3_config();
 857		ret = true;
 858	}
 859	if (rdt_cpu_has(X86_FEATURE_CAT_L2)) {
 860		/* CPUID 0x10.2 fields are same format at 0x10.1 */
 861		rdt_get_cache_alloc_cfg(2, &rdt_resources_all[RDT_RESOURCE_L2]);
 
 862		if (rdt_cpu_has(X86_FEATURE_CDP_L2))
 863			rdt_get_cdp_l2_config();
 864		ret = true;
 865	}
 866
 867	if (get_mem_config())
 868		ret = true;
 869
 870	return ret;
 871}
 872
 873static __init bool get_rdt_mon_resources(void)
 874{
 
 
 875	if (rdt_cpu_has(X86_FEATURE_CQM_OCCUP_LLC))
 876		rdt_mon_features |= (1 << QOS_L3_OCCUP_EVENT_ID);
 877	if (rdt_cpu_has(X86_FEATURE_CQM_MBM_TOTAL))
 878		rdt_mon_features |= (1 << QOS_L3_MBM_TOTAL_EVENT_ID);
 879	if (rdt_cpu_has(X86_FEATURE_CQM_MBM_LOCAL))
 880		rdt_mon_features |= (1 << QOS_L3_MBM_LOCAL_EVENT_ID);
 881
 882	if (!rdt_mon_features)
 883		return false;
 884
 885	return !rdt_get_mon_l3_config(&rdt_resources_all[RDT_RESOURCE_L3]);
 886}
 887
 888static __init void __check_quirks_intel(void)
 889{
 890	switch (boot_cpu_data.x86_model) {
 891	case INTEL_FAM6_HASWELL_X:
 892		if (!rdt_options[RDT_FLAG_L3_CAT].force_off)
 893			cache_alloc_hsw_probe();
 894		break;
 895	case INTEL_FAM6_SKYLAKE_X:
 896		if (boot_cpu_data.x86_stepping <= 4)
 897			set_rdt_options("!cmt,!mbmtotal,!mbmlocal,!l3cat");
 898		else
 899			set_rdt_options("!l3cat");
 900		fallthrough;
 901	case INTEL_FAM6_BROADWELL_X:
 902		intel_rdt_mbm_apply_quirk();
 903		break;
 904	}
 905}
 906
 907static __init void check_quirks(void)
 908{
 909	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
 910		__check_quirks_intel();
 911}
 912
 913static __init bool get_rdt_resources(void)
 914{
 915	rdt_alloc_capable = get_rdt_alloc_resources();
 916	rdt_mon_capable = get_rdt_mon_resources();
 917
 918	return (rdt_mon_capable || rdt_alloc_capable);
 919}
 920
 921static __init void rdt_init_res_defs_intel(void)
 922{
 
 923	struct rdt_resource *r;
 924
 925	for_each_rdt_resource(r) {
 
 
 926		if (r->rid == RDT_RESOURCE_L3 ||
 927		    r->rid == RDT_RESOURCE_L3DATA ||
 928		    r->rid == RDT_RESOURCE_L3CODE ||
 929		    r->rid == RDT_RESOURCE_L2 ||
 930		    r->rid == RDT_RESOURCE_L2DATA ||
 931		    r->rid == RDT_RESOURCE_L2CODE) {
 932			r->cache.arch_has_sparse_bitmaps = false;
 933			r->cache.arch_has_empty_bitmaps = false;
 934			r->cache.arch_has_per_cpu_cfg = false;
 
 935		} else if (r->rid == RDT_RESOURCE_MBA) {
 936			r->msr_base = MSR_IA32_MBA_THRTL_BASE;
 937			r->msr_update = mba_wrmsr_intel;
 938		}
 939	}
 940}
 941
 942static __init void rdt_init_res_defs_amd(void)
 943{
 
 944	struct rdt_resource *r;
 945
 946	for_each_rdt_resource(r) {
 
 
 947		if (r->rid == RDT_RESOURCE_L3 ||
 948		    r->rid == RDT_RESOURCE_L3DATA ||
 949		    r->rid == RDT_RESOURCE_L3CODE ||
 950		    r->rid == RDT_RESOURCE_L2 ||
 951		    r->rid == RDT_RESOURCE_L2DATA ||
 952		    r->rid == RDT_RESOURCE_L2CODE) {
 953			r->cache.arch_has_sparse_bitmaps = true;
 954			r->cache.arch_has_empty_bitmaps = true;
 955			r->cache.arch_has_per_cpu_cfg = true;
 
 956		} else if (r->rid == RDT_RESOURCE_MBA) {
 957			r->msr_base = MSR_IA32_MBA_BW_BASE;
 958			r->msr_update = mba_wrmsr_amd;
 959		}
 960	}
 961}
 962
 963static __init void rdt_init_res_defs(void)
 964{
 965	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
 966		rdt_init_res_defs_intel();
 967	else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
 968		rdt_init_res_defs_amd();
 969}
 970
 971static enum cpuhp_state rdt_online;
 972
 973/* Runs once on the BSP during boot. */
 974void resctrl_cpu_detect(struct cpuinfo_x86 *c)
 975{
 976	if (!cpu_has(c, X86_FEATURE_CQM_LLC)) {
 977		c->x86_cache_max_rmid  = -1;
 978		c->x86_cache_occ_scale = -1;
 979		c->x86_cache_mbm_width_offset = -1;
 980		return;
 981	}
 982
 983	/* will be overridden if occupancy monitoring exists */
 984	c->x86_cache_max_rmid = cpuid_ebx(0xf);
 985
 986	if (cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC) ||
 987	    cpu_has(c, X86_FEATURE_CQM_MBM_TOTAL) ||
 988	    cpu_has(c, X86_FEATURE_CQM_MBM_LOCAL)) {
 989		u32 eax, ebx, ecx, edx;
 990
 991		/* QoS sub-leaf, EAX=0Fh, ECX=1 */
 992		cpuid_count(0xf, 1, &eax, &ebx, &ecx, &edx);
 993
 994		c->x86_cache_max_rmid  = ecx;
 995		c->x86_cache_occ_scale = ebx;
 996		c->x86_cache_mbm_width_offset = eax & 0xff;
 997
 998		if (c->x86_vendor == X86_VENDOR_AMD && !c->x86_cache_mbm_width_offset)
 999			c->x86_cache_mbm_width_offset = MBM_CNTR_WIDTH_OFFSET_AMD;
1000	}
1001}
1002
1003static int __init resctrl_late_init(void)
1004{
1005	struct rdt_resource *r;
1006	int state, ret;
1007
1008	/*
1009	 * Initialize functions(or definitions) that are different
1010	 * between vendors here.
1011	 */
1012	rdt_init_res_defs();
1013
1014	check_quirks();
1015
1016	if (!get_rdt_resources())
1017		return -ENODEV;
1018
1019	rdt_init_padding();
1020
1021	state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
1022				  "x86/resctrl/cat:online:",
1023				  resctrl_online_cpu, resctrl_offline_cpu);
1024	if (state < 0)
1025		return state;
1026
1027	ret = rdtgroup_init();
1028	if (ret) {
1029		cpuhp_remove_state(state);
1030		return ret;
1031	}
1032	rdt_online = state;
1033
1034	for_each_alloc_capable_rdt_resource(r)
1035		pr_info("%s allocation detected\n", r->name);
1036
1037	for_each_mon_capable_rdt_resource(r)
1038		pr_info("%s monitoring detected\n", r->name);
1039
1040	return 0;
1041}
1042
1043late_initcall(resctrl_late_init);
1044
1045static void __exit resctrl_exit(void)
1046{
1047	cpuhp_remove_state(rdt_online);
1048	rdtgroup_exit();
1049}
1050
1051__exitcall(resctrl_exit);
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Resource Director Technology(RDT)
  4 * - Cache Allocation code.
  5 *
  6 * Copyright (C) 2016 Intel Corporation
  7 *
  8 * Authors:
  9 *    Fenghua Yu <fenghua.yu@intel.com>
 10 *    Tony Luck <tony.luck@intel.com>
 11 *    Vikas Shivappa <vikas.shivappa@intel.com>
 12 *
 13 * More information about RDT be found in the Intel (R) x86 Architecture
 14 * Software Developer Manual June 2016, volume 3, section 17.17.
 15 */
 16
 17#define pr_fmt(fmt)	"resctrl: " fmt
 18
 19#include <linux/slab.h>
 20#include <linux/err.h>
 21#include <linux/cacheinfo.h>
 22#include <linux/cpuhotplug.h>
 23
 24#include <asm/intel-family.h>
 25#include <asm/resctrl.h>
 26#include "internal.h"
 27
 28/* Mutex to protect rdtgroup access. */
 29DEFINE_MUTEX(rdtgroup_mutex);
 30
 31/*
 32 * The cached resctrl_pqr_state is strictly per CPU and can never be
 33 * updated from a remote CPU. Functions which modify the state
 34 * are called with interrupts disabled and no preemption, which
 35 * is sufficient for the protection.
 36 */
 37DEFINE_PER_CPU(struct resctrl_pqr_state, pqr_state);
 38
 39/*
 40 * Used to store the max resource name width and max resource data width
 41 * to display the schemata in a tabular format
 42 */
 43int max_name_width, max_data_width;
 44
 45/*
 46 * Global boolean for rdt_alloc which is true if any
 47 * resource allocation is enabled.
 48 */
 49bool rdt_alloc_capable;
 50
 51static void
 52mba_wrmsr_intel(struct rdt_domain *d, struct msr_param *m,
 53		struct rdt_resource *r);
 54static void
 55cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r);
 56static void
 57mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m,
 58	      struct rdt_resource *r);
 59
 60#define domain_init(id) LIST_HEAD_INIT(rdt_resources_all[id].r_resctrl.domains)
 61
 62struct rdt_hw_resource rdt_resources_all[] = {
 63	[RDT_RESOURCE_L3] =
 64	{
 65		.r_resctrl = {
 66			.rid			= RDT_RESOURCE_L3,
 67			.name			= "L3",
 68			.cache_level		= 3,
 69			.domains		= domain_init(RDT_RESOURCE_L3),
 70			.parse_ctrlval		= parse_cbm,
 71			.format_str		= "%d=%0*x",
 72			.fflags			= RFTYPE_RES_CACHE,
 
 
 73		},
 
 
 
 
 
 
 
 
 
 74		.msr_base		= MSR_IA32_L3_CBM_BASE,
 75		.msr_update		= cat_wrmsr,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 76	},
 77	[RDT_RESOURCE_L2] =
 78	{
 79		.r_resctrl = {
 80			.rid			= RDT_RESOURCE_L2,
 81			.name			= "L2",
 82			.cache_level		= 2,
 83			.domains		= domain_init(RDT_RESOURCE_L2),
 84			.parse_ctrlval		= parse_cbm,
 85			.format_str		= "%d=%0*x",
 86			.fflags			= RFTYPE_RES_CACHE,
 
 
 87		},
 
 
 
 
 
 
 
 
 
 88		.msr_base		= MSR_IA32_L2_CBM_BASE,
 89		.msr_update		= cat_wrmsr,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 90	},
 91	[RDT_RESOURCE_MBA] =
 92	{
 93		.r_resctrl = {
 94			.rid			= RDT_RESOURCE_MBA,
 95			.name			= "MB",
 96			.cache_level		= 3,
 97			.domains		= domain_init(RDT_RESOURCE_MBA),
 98			.parse_ctrlval		= parse_bw,
 99			.format_str		= "%d=%*u",
100			.fflags			= RFTYPE_RES_MB,
101		},
102	},
103};
104
 
 
 
 
 
105/*
106 * cache_alloc_hsw_probe() - Have to probe for Intel haswell server CPUs
107 * as they do not have CPUID enumeration support for Cache allocation.
108 * The check for Vendor/Family/Model is not enough to guarantee that
109 * the MSRs won't #GP fault because only the following SKUs support
110 * CAT:
111 *	Intel(R) Xeon(R)  CPU E5-2658  v3  @  2.20GHz
112 *	Intel(R) Xeon(R)  CPU E5-2648L v3  @  1.80GHz
113 *	Intel(R) Xeon(R)  CPU E5-2628L v3  @  2.00GHz
114 *	Intel(R) Xeon(R)  CPU E5-2618L v3  @  2.30GHz
115 *	Intel(R) Xeon(R)  CPU E5-2608L v3  @  2.00GHz
116 *	Intel(R) Xeon(R)  CPU E5-2658A v3  @  2.20GHz
117 *
118 * Probe by trying to write the first of the L3 cache mask registers
119 * and checking that the bits stick. Max CLOSids is always 4 and max cbm length
120 * is always 20 on hsw server parts. The minimum cache bitmask length
121 * allowed for HSW server is always 2 bits. Hardcode all of them.
122 */
123static inline void cache_alloc_hsw_probe(void)
124{
125	struct rdt_hw_resource *hw_res = &rdt_resources_all[RDT_RESOURCE_L3];
126	struct rdt_resource *r  = &hw_res->r_resctrl;
127	u32 l, h, max_cbm = BIT_MASK(20) - 1;
128
129	if (wrmsr_safe(MSR_IA32_L3_CBM_BASE, max_cbm, 0))
130		return;
131
132	rdmsr(MSR_IA32_L3_CBM_BASE, l, h);
133
134	/* If all the bits were set in MSR, return success */
135	if (l != max_cbm)
136		return;
137
138	hw_res->num_closid = 4;
139	r->default_ctrl = max_cbm;
140	r->cache.cbm_len = 20;
141	r->cache.shareable_bits = 0xc0000;
142	r->cache.min_cbm_bits = 2;
143	r->alloc_capable = true;
 
144
145	rdt_alloc_capable = true;
146}
147
148bool is_mba_sc(struct rdt_resource *r)
149{
150	if (!r)
151		return rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl.membw.mba_sc;
152
153	return r->membw.mba_sc;
154}
155
156/*
157 * rdt_get_mb_table() - get a mapping of bandwidth(b/w) percentage values
158 * exposed to user interface and the h/w understandable delay values.
159 *
160 * The non-linear delay values have the granularity of power of two
161 * and also the h/w does not guarantee a curve for configured delay
162 * values vs. actual b/w enforced.
163 * Hence we need a mapping that is pre calibrated so the user can
164 * express the memory b/w as a percentage value.
165 */
166static inline bool rdt_get_mb_table(struct rdt_resource *r)
167{
168	/*
169	 * There are no Intel SKUs as of now to support non-linear delay.
170	 */
171	pr_info("MBA b/w map not implemented for cpu:%d, model:%d",
172		boot_cpu_data.x86, boot_cpu_data.x86_model);
173
174	return false;
175}
176
177static bool __get_mem_config_intel(struct rdt_resource *r)
178{
179	struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
180	union cpuid_0x10_3_eax eax;
181	union cpuid_0x10_x_edx edx;
182	u32 ebx, ecx, max_delay;
183
184	cpuid_count(0x00000010, 3, &eax.full, &ebx, &ecx, &edx.full);
185	hw_res->num_closid = edx.split.cos_max + 1;
186	max_delay = eax.split.max_delay + 1;
187	r->default_ctrl = MAX_MBA_BW;
188	r->membw.arch_needs_linear = true;
189	if (ecx & MBA_IS_LINEAR) {
190		r->membw.delay_linear = true;
191		r->membw.min_bw = MAX_MBA_BW - max_delay;
192		r->membw.bw_gran = MAX_MBA_BW - max_delay;
193	} else {
194		if (!rdt_get_mb_table(r))
195			return false;
196		r->membw.arch_needs_linear = false;
197	}
198	r->data_width = 3;
199
200	if (boot_cpu_has(X86_FEATURE_PER_THREAD_MBA))
201		r->membw.throttle_mode = THREAD_THROTTLE_PER_THREAD;
202	else
203		r->membw.throttle_mode = THREAD_THROTTLE_MAX;
204	thread_throttle_mode_init();
205
206	r->alloc_capable = true;
 
207
208	return true;
209}
210
211static bool __rdt_get_mem_config_amd(struct rdt_resource *r)
212{
213	struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
214	union cpuid_0x10_3_eax eax;
215	union cpuid_0x10_x_edx edx;
216	u32 ebx, ecx;
217
218	cpuid_count(0x80000020, 1, &eax.full, &ebx, &ecx, &edx.full);
219	hw_res->num_closid = edx.split.cos_max + 1;
220	r->default_ctrl = MAX_MBA_BW_AMD;
221
222	/* AMD does not use delay */
223	r->membw.delay_linear = false;
224	r->membw.arch_needs_linear = false;
225
226	/*
227	 * AMD does not use memory delay throttle model to control
228	 * the allocation like Intel does.
229	 */
230	r->membw.throttle_mode = THREAD_THROTTLE_UNDEFINED;
231	r->membw.min_bw = 0;
232	r->membw.bw_gran = 1;
233	/* Max value is 2048, Data width should be 4 in decimal */
234	r->data_width = 4;
235
236	r->alloc_capable = true;
 
237
238	return true;
239}
240
241static void rdt_get_cache_alloc_cfg(int idx, struct rdt_resource *r)
242{
243	struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
244	union cpuid_0x10_1_eax eax;
245	union cpuid_0x10_x_edx edx;
246	u32 ebx, ecx;
247
248	cpuid_count(0x00000010, idx, &eax.full, &ebx, &ecx, &edx.full);
249	hw_res->num_closid = edx.split.cos_max + 1;
250	r->cache.cbm_len = eax.split.cbm_len + 1;
251	r->default_ctrl = BIT_MASK(eax.split.cbm_len + 1) - 1;
252	r->cache.shareable_bits = ebx & r->default_ctrl;
253	r->data_width = (r->cache.cbm_len + 3) / 4;
254	r->alloc_capable = true;
 
255}
256
257static void rdt_get_cdp_config(int level)
258{
 
 
 
 
 
 
 
 
 
259	/*
260	 * By default, CDP is disabled. CDP can be enabled by mount parameter
261	 * "cdp" during resctrl file system mount time.
262	 */
263	rdt_resources_all[level].cdp_enabled = false;
264	rdt_resources_all[level].r_resctrl.cdp_capable = true;
265}
266
267static void rdt_get_cdp_l3_config(void)
268{
269	rdt_get_cdp_config(RDT_RESOURCE_L3);
 
270}
271
272static void rdt_get_cdp_l2_config(void)
273{
274	rdt_get_cdp_config(RDT_RESOURCE_L2);
 
275}
276
277static void
278mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r)
279{
280	unsigned int i;
281	struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
282	struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
283
284	for (i = m->low; i < m->high; i++)
285		wrmsrl(hw_res->msr_base + i, hw_dom->ctrl_val[i]);
286}
287
288/*
289 * Map the memory b/w percentage value to delay values
290 * that can be written to QOS_MSRs.
291 * There are currently no SKUs which support non linear delay values.
292 */
293static u32 delay_bw_map(unsigned long bw, struct rdt_resource *r)
294{
295	if (r->membw.delay_linear)
296		return MAX_MBA_BW - bw;
297
298	pr_warn_once("Non Linear delay-bw map not supported but queried\n");
299	return r->default_ctrl;
300}
301
302static void
303mba_wrmsr_intel(struct rdt_domain *d, struct msr_param *m,
304		struct rdt_resource *r)
305{
306	unsigned int i;
307	struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
308	struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
309
310	/*  Write the delay values for mba. */
311	for (i = m->low; i < m->high; i++)
312		wrmsrl(hw_res->msr_base + i, delay_bw_map(hw_dom->ctrl_val[i], r));
313}
314
315static void
316cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r)
317{
318	unsigned int i;
319	struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
320	struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
321
322	for (i = m->low; i < m->high; i++)
323		wrmsrl(hw_res->msr_base + i, hw_dom->ctrl_val[i]);
324}
325
326struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r)
327{
328	struct rdt_domain *d;
329
330	list_for_each_entry(d, &r->domains, list) {
331		/* Find the domain that contains this CPU */
332		if (cpumask_test_cpu(cpu, &d->cpu_mask))
333			return d;
334	}
335
336	return NULL;
337}
338
339u32 resctrl_arch_get_num_closid(struct rdt_resource *r)
340{
341	return resctrl_to_arch_res(r)->num_closid;
342}
343
344void rdt_ctrl_update(void *arg)
345{
346	struct msr_param *m = arg;
347	struct rdt_hw_resource *hw_res = resctrl_to_arch_res(m->res);
348	struct rdt_resource *r = m->res;
349	int cpu = smp_processor_id();
350	struct rdt_domain *d;
351
352	d = get_domain_from_cpu(cpu, r);
353	if (d) {
354		hw_res->msr_update(d, m, r);
355		return;
356	}
357	pr_warn_once("cpu %d not found in any domain for resource %s\n",
358		     cpu, r->name);
359}
360
361/*
362 * rdt_find_domain - Find a domain in a resource that matches input resource id
363 *
364 * Search resource r's domain list to find the resource id. If the resource
365 * id is found in a domain, return the domain. Otherwise, if requested by
366 * caller, return the first domain whose id is bigger than the input id.
367 * The domain list is sorted by id in ascending order.
368 */
369struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id,
370				   struct list_head **pos)
371{
372	struct rdt_domain *d;
373	struct list_head *l;
374
375	if (id < 0)
376		return ERR_PTR(-ENODEV);
377
378	list_for_each(l, &r->domains) {
379		d = list_entry(l, struct rdt_domain, list);
380		/* When id is found, return its domain. */
381		if (id == d->id)
382			return d;
383		/* Stop searching when finding id's position in sorted list. */
384		if (id < d->id)
385			break;
386	}
387
388	if (pos)
389		*pos = l;
390
391	return NULL;
392}
393
394static void setup_default_ctrlval(struct rdt_resource *r, u32 *dc)
395{
396	struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
397	int i;
398
399	/*
400	 * Initialize the Control MSRs to having no control.
401	 * For Cache Allocation: Set all bits in cbm
402	 * For Memory Allocation: Set b/w requested to 100%
 
403	 */
404	for (i = 0; i < hw_res->num_closid; i++, dc++)
405		*dc = r->default_ctrl;
406}
407
408static void domain_free(struct rdt_hw_domain *hw_dom)
409{
410	kfree(hw_dom->arch_mbm_total);
411	kfree(hw_dom->arch_mbm_local);
412	kfree(hw_dom->ctrl_val);
413	kfree(hw_dom);
414}
415
416static int domain_setup_ctrlval(struct rdt_resource *r, struct rdt_domain *d)
417{
418	struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
419	struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
420	struct msr_param m;
421	u32 *dc;
422
423	dc = kmalloc_array(hw_res->num_closid, sizeof(*hw_dom->ctrl_val),
424			   GFP_KERNEL);
425	if (!dc)
426		return -ENOMEM;
427
428	hw_dom->ctrl_val = dc;
429	setup_default_ctrlval(r, dc);
 
 
 
 
 
 
 
430
431	m.low = 0;
432	m.high = hw_res->num_closid;
433	hw_res->msr_update(d, &m, r);
434	return 0;
435}
436
437/**
438 * arch_domain_mbm_alloc() - Allocate arch private storage for the MBM counters
439 * @num_rmid:	The size of the MBM counter array
440 * @hw_dom:	The domain that owns the allocated arrays
441 */
442static int arch_domain_mbm_alloc(u32 num_rmid, struct rdt_hw_domain *hw_dom)
443{
444	size_t tsize;
445
 
 
 
 
 
 
446	if (is_mbm_total_enabled()) {
447		tsize = sizeof(*hw_dom->arch_mbm_total);
448		hw_dom->arch_mbm_total = kcalloc(num_rmid, tsize, GFP_KERNEL);
449		if (!hw_dom->arch_mbm_total)
 
450			return -ENOMEM;
 
451	}
452	if (is_mbm_local_enabled()) {
453		tsize = sizeof(*hw_dom->arch_mbm_local);
454		hw_dom->arch_mbm_local = kcalloc(num_rmid, tsize, GFP_KERNEL);
455		if (!hw_dom->arch_mbm_local) {
456			kfree(hw_dom->arch_mbm_total);
457			hw_dom->arch_mbm_total = NULL;
458			return -ENOMEM;
459		}
460	}
461
 
 
 
 
 
462	return 0;
463}
464
465/*
466 * domain_add_cpu - Add a cpu to a resource's domain list.
467 *
468 * If an existing domain in the resource r's domain list matches the cpu's
469 * resource id, add the cpu in the domain.
470 *
471 * Otherwise, a new domain is allocated and inserted into the right position
472 * in the domain list sorted by id in ascending order.
473 *
474 * The order in the domain list is visible to users when we print entries
475 * in the schemata file and schemata input is validated to have the same order
476 * as this list.
477 */
478static void domain_add_cpu(int cpu, struct rdt_resource *r)
479{
480	int id = get_cpu_cacheinfo_id(cpu, r->cache_level);
481	struct list_head *add_pos = NULL;
482	struct rdt_hw_domain *hw_dom;
483	struct rdt_domain *d;
484	int err;
485
486	d = rdt_find_domain(r, id, &add_pos);
487	if (IS_ERR(d)) {
488		pr_warn("Couldn't find cache id for CPU %d\n", cpu);
489		return;
490	}
491
492	if (d) {
493		cpumask_set_cpu(cpu, &d->cpu_mask);
494		if (r->cache.arch_has_per_cpu_cfg)
495			rdt_domain_reconfigure_cdp(r);
496		return;
497	}
498
499	hw_dom = kzalloc_node(sizeof(*hw_dom), GFP_KERNEL, cpu_to_node(cpu));
500	if (!hw_dom)
501		return;
502
503	d = &hw_dom->d_resctrl;
504	d->id = id;
505	cpumask_set_cpu(cpu, &d->cpu_mask);
506
507	rdt_domain_reconfigure_cdp(r);
508
509	if (r->alloc_capable && domain_setup_ctrlval(r, d)) {
510		domain_free(hw_dom);
511		return;
512	}
513
514	if (r->mon_capable && arch_domain_mbm_alloc(r->num_rmid, hw_dom)) {
515		domain_free(hw_dom);
 
 
516		return;
517	}
518
519	list_add_tail(&d->list, add_pos);
520
521	err = resctrl_online_domain(r, d);
522	if (err) {
523		list_del(&d->list);
524		domain_free(hw_dom);
525	}
 
526}
527
528static void domain_remove_cpu(int cpu, struct rdt_resource *r)
529{
530	int id = get_cpu_cacheinfo_id(cpu, r->cache_level);
531	struct rdt_hw_domain *hw_dom;
532	struct rdt_domain *d;
533
534	d = rdt_find_domain(r, id, NULL);
535	if (IS_ERR_OR_NULL(d)) {
536		pr_warn("Couldn't find cache id for CPU %d\n", cpu);
537		return;
538	}
539	hw_dom = resctrl_to_arch_dom(d);
540
541	cpumask_clear_cpu(cpu, &d->cpu_mask);
542	if (cpumask_empty(&d->cpu_mask)) {
543		resctrl_offline_domain(r, d);
 
 
 
 
 
544		list_del(&d->list);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
545
546		/*
547		 * rdt_domain "d" is going to be freed below, so clear
548		 * its pointer from pseudo_lock_region struct.
549		 */
550		if (d->plr)
551			d->plr->d = NULL;
552		domain_free(hw_dom);
553
 
 
 
 
 
 
554		return;
555	}
556
557	if (r == &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl) {
558		if (is_mbm_enabled() && cpu == d->mbm_work_cpu) {
559			cancel_delayed_work(&d->mbm_over);
560			mbm_setup_overflow_handler(d, 0);
561		}
562		if (is_llc_occupancy_enabled() && cpu == d->cqm_work_cpu &&
563		    has_busy_rmid(r, d)) {
564			cancel_delayed_work(&d->cqm_limbo);
565			cqm_setup_limbo_handler(d, 0);
566		}
567	}
568}
569
570static void clear_closid_rmid(int cpu)
571{
572	struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state);
573
574	state->default_closid = 0;
575	state->default_rmid = 0;
576	state->cur_closid = 0;
577	state->cur_rmid = 0;
578	wrmsr(MSR_IA32_PQR_ASSOC, 0, 0);
579}
580
581static int resctrl_online_cpu(unsigned int cpu)
582{
583	struct rdt_resource *r;
584
585	mutex_lock(&rdtgroup_mutex);
586	for_each_capable_rdt_resource(r)
587		domain_add_cpu(cpu, r);
588	/* The cpu is set in default rdtgroup after online. */
589	cpumask_set_cpu(cpu, &rdtgroup_default.cpu_mask);
590	clear_closid_rmid(cpu);
591	mutex_unlock(&rdtgroup_mutex);
592
593	return 0;
594}
595
596static void clear_childcpus(struct rdtgroup *r, unsigned int cpu)
597{
598	struct rdtgroup *cr;
599
600	list_for_each_entry(cr, &r->mon.crdtgrp_list, mon.crdtgrp_list) {
601		if (cpumask_test_and_clear_cpu(cpu, &cr->cpu_mask)) {
602			break;
603		}
604	}
605}
606
607static int resctrl_offline_cpu(unsigned int cpu)
608{
609	struct rdtgroup *rdtgrp;
610	struct rdt_resource *r;
611
612	mutex_lock(&rdtgroup_mutex);
613	for_each_capable_rdt_resource(r)
614		domain_remove_cpu(cpu, r);
615	list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) {
616		if (cpumask_test_and_clear_cpu(cpu, &rdtgrp->cpu_mask)) {
617			clear_childcpus(rdtgrp, cpu);
618			break;
619		}
620	}
621	clear_closid_rmid(cpu);
622	mutex_unlock(&rdtgroup_mutex);
623
624	return 0;
625}
626
627/*
628 * Choose a width for the resource name and resource data based on the
629 * resource that has widest name and cbm.
630 */
631static __init void rdt_init_padding(void)
632{
633	struct rdt_resource *r;
 
634
635	for_each_alloc_capable_rdt_resource(r) {
 
 
 
 
636		if (r->data_width > max_data_width)
637			max_data_width = r->data_width;
638	}
639}
640
641enum {
642	RDT_FLAG_CMT,
643	RDT_FLAG_MBM_TOTAL,
644	RDT_FLAG_MBM_LOCAL,
645	RDT_FLAG_L3_CAT,
646	RDT_FLAG_L3_CDP,
647	RDT_FLAG_L2_CAT,
648	RDT_FLAG_L2_CDP,
649	RDT_FLAG_MBA,
650};
651
652#define RDT_OPT(idx, n, f)	\
653[idx] = {			\
654	.name = n,		\
655	.flag = f		\
656}
657
658struct rdt_options {
659	char	*name;
660	int	flag;
661	bool	force_off, force_on;
662};
663
664static struct rdt_options rdt_options[]  __initdata = {
665	RDT_OPT(RDT_FLAG_CMT,	    "cmt",	X86_FEATURE_CQM_OCCUP_LLC),
666	RDT_OPT(RDT_FLAG_MBM_TOTAL, "mbmtotal", X86_FEATURE_CQM_MBM_TOTAL),
667	RDT_OPT(RDT_FLAG_MBM_LOCAL, "mbmlocal", X86_FEATURE_CQM_MBM_LOCAL),
668	RDT_OPT(RDT_FLAG_L3_CAT,    "l3cat",	X86_FEATURE_CAT_L3),
669	RDT_OPT(RDT_FLAG_L3_CDP,    "l3cdp",	X86_FEATURE_CDP_L3),
670	RDT_OPT(RDT_FLAG_L2_CAT,    "l2cat",	X86_FEATURE_CAT_L2),
671	RDT_OPT(RDT_FLAG_L2_CDP,    "l2cdp",	X86_FEATURE_CDP_L2),
672	RDT_OPT(RDT_FLAG_MBA,	    "mba",	X86_FEATURE_MBA),
673};
674#define NUM_RDT_OPTIONS ARRAY_SIZE(rdt_options)
675
676static int __init set_rdt_options(char *str)
677{
678	struct rdt_options *o;
679	bool force_off;
680	char *tok;
681
682	if (*str == '=')
683		str++;
684	while ((tok = strsep(&str, ",")) != NULL) {
685		force_off = *tok == '!';
686		if (force_off)
687			tok++;
688		for (o = rdt_options; o < &rdt_options[NUM_RDT_OPTIONS]; o++) {
689			if (strcmp(tok, o->name) == 0) {
690				if (force_off)
691					o->force_off = true;
692				else
693					o->force_on = true;
694				break;
695			}
696		}
697	}
698	return 1;
699}
700__setup("rdt", set_rdt_options);
701
702static bool __init rdt_cpu_has(int flag)
703{
704	bool ret = boot_cpu_has(flag);
705	struct rdt_options *o;
706
707	if (!ret)
708		return ret;
709
710	for (o = rdt_options; o < &rdt_options[NUM_RDT_OPTIONS]; o++) {
711		if (flag == o->flag) {
712			if (o->force_off)
713				ret = false;
714			if (o->force_on)
715				ret = true;
716			break;
717		}
718	}
719	return ret;
720}
721
722static __init bool get_mem_config(void)
723{
724	struct rdt_hw_resource *hw_res = &rdt_resources_all[RDT_RESOURCE_MBA];
725
726	if (!rdt_cpu_has(X86_FEATURE_MBA))
727		return false;
728
729	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
730		return __get_mem_config_intel(&hw_res->r_resctrl);
731	else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
732		return __rdt_get_mem_config_amd(&hw_res->r_resctrl);
733
734	return false;
735}
736
737static __init bool get_rdt_alloc_resources(void)
738{
739	struct rdt_resource *r;
740	bool ret = false;
741
742	if (rdt_alloc_capable)
743		return true;
744
745	if (!boot_cpu_has(X86_FEATURE_RDT_A))
746		return false;
747
748	if (rdt_cpu_has(X86_FEATURE_CAT_L3)) {
749		r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
750		rdt_get_cache_alloc_cfg(1, r);
751		if (rdt_cpu_has(X86_FEATURE_CDP_L3))
752			rdt_get_cdp_l3_config();
753		ret = true;
754	}
755	if (rdt_cpu_has(X86_FEATURE_CAT_L2)) {
756		/* CPUID 0x10.2 fields are same format at 0x10.1 */
757		r = &rdt_resources_all[RDT_RESOURCE_L2].r_resctrl;
758		rdt_get_cache_alloc_cfg(2, r);
759		if (rdt_cpu_has(X86_FEATURE_CDP_L2))
760			rdt_get_cdp_l2_config();
761		ret = true;
762	}
763
764	if (get_mem_config())
765		ret = true;
766
767	return ret;
768}
769
770static __init bool get_rdt_mon_resources(void)
771{
772	struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
773
774	if (rdt_cpu_has(X86_FEATURE_CQM_OCCUP_LLC))
775		rdt_mon_features |= (1 << QOS_L3_OCCUP_EVENT_ID);
776	if (rdt_cpu_has(X86_FEATURE_CQM_MBM_TOTAL))
777		rdt_mon_features |= (1 << QOS_L3_MBM_TOTAL_EVENT_ID);
778	if (rdt_cpu_has(X86_FEATURE_CQM_MBM_LOCAL))
779		rdt_mon_features |= (1 << QOS_L3_MBM_LOCAL_EVENT_ID);
780
781	if (!rdt_mon_features)
782		return false;
783
784	return !rdt_get_mon_l3_config(r);
785}
786
787static __init void __check_quirks_intel(void)
788{
789	switch (boot_cpu_data.x86_model) {
790	case INTEL_FAM6_HASWELL_X:
791		if (!rdt_options[RDT_FLAG_L3_CAT].force_off)
792			cache_alloc_hsw_probe();
793		break;
794	case INTEL_FAM6_SKYLAKE_X:
795		if (boot_cpu_data.x86_stepping <= 4)
796			set_rdt_options("!cmt,!mbmtotal,!mbmlocal,!l3cat");
797		else
798			set_rdt_options("!l3cat");
799		fallthrough;
800	case INTEL_FAM6_BROADWELL_X:
801		intel_rdt_mbm_apply_quirk();
802		break;
803	}
804}
805
806static __init void check_quirks(void)
807{
808	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
809		__check_quirks_intel();
810}
811
812static __init bool get_rdt_resources(void)
813{
814	rdt_alloc_capable = get_rdt_alloc_resources();
815	rdt_mon_capable = get_rdt_mon_resources();
816
817	return (rdt_mon_capable || rdt_alloc_capable);
818}
819
820static __init void rdt_init_res_defs_intel(void)
821{
822	struct rdt_hw_resource *hw_res;
823	struct rdt_resource *r;
824
825	for_each_rdt_resource(r) {
826		hw_res = resctrl_to_arch_res(r);
827
828		if (r->rid == RDT_RESOURCE_L3 ||
829		    r->rid == RDT_RESOURCE_L2) {
 
 
 
 
830			r->cache.arch_has_sparse_bitmaps = false;
 
831			r->cache.arch_has_per_cpu_cfg = false;
832			r->cache.min_cbm_bits = 1;
833		} else if (r->rid == RDT_RESOURCE_MBA) {
834			hw_res->msr_base = MSR_IA32_MBA_THRTL_BASE;
835			hw_res->msr_update = mba_wrmsr_intel;
836		}
837	}
838}
839
840static __init void rdt_init_res_defs_amd(void)
841{
842	struct rdt_hw_resource *hw_res;
843	struct rdt_resource *r;
844
845	for_each_rdt_resource(r) {
846		hw_res = resctrl_to_arch_res(r);
847
848		if (r->rid == RDT_RESOURCE_L3 ||
849		    r->rid == RDT_RESOURCE_L2) {
 
 
 
 
850			r->cache.arch_has_sparse_bitmaps = true;
 
851			r->cache.arch_has_per_cpu_cfg = true;
852			r->cache.min_cbm_bits = 0;
853		} else if (r->rid == RDT_RESOURCE_MBA) {
854			hw_res->msr_base = MSR_IA32_MBA_BW_BASE;
855			hw_res->msr_update = mba_wrmsr_amd;
856		}
857	}
858}
859
860static __init void rdt_init_res_defs(void)
861{
862	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
863		rdt_init_res_defs_intel();
864	else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
865		rdt_init_res_defs_amd();
866}
867
868static enum cpuhp_state rdt_online;
869
870/* Runs once on the BSP during boot. */
871void resctrl_cpu_detect(struct cpuinfo_x86 *c)
872{
873	if (!cpu_has(c, X86_FEATURE_CQM_LLC)) {
874		c->x86_cache_max_rmid  = -1;
875		c->x86_cache_occ_scale = -1;
876		c->x86_cache_mbm_width_offset = -1;
877		return;
878	}
879
880	/* will be overridden if occupancy monitoring exists */
881	c->x86_cache_max_rmid = cpuid_ebx(0xf);
882
883	if (cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC) ||
884	    cpu_has(c, X86_FEATURE_CQM_MBM_TOTAL) ||
885	    cpu_has(c, X86_FEATURE_CQM_MBM_LOCAL)) {
886		u32 eax, ebx, ecx, edx;
887
888		/* QoS sub-leaf, EAX=0Fh, ECX=1 */
889		cpuid_count(0xf, 1, &eax, &ebx, &ecx, &edx);
890
891		c->x86_cache_max_rmid  = ecx;
892		c->x86_cache_occ_scale = ebx;
893		c->x86_cache_mbm_width_offset = eax & 0xff;
894
895		if (c->x86_vendor == X86_VENDOR_AMD && !c->x86_cache_mbm_width_offset)
896			c->x86_cache_mbm_width_offset = MBM_CNTR_WIDTH_OFFSET_AMD;
897	}
898}
899
900static int __init resctrl_late_init(void)
901{
902	struct rdt_resource *r;
903	int state, ret;
904
905	/*
906	 * Initialize functions(or definitions) that are different
907	 * between vendors here.
908	 */
909	rdt_init_res_defs();
910
911	check_quirks();
912
913	if (!get_rdt_resources())
914		return -ENODEV;
915
916	rdt_init_padding();
917
918	state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
919				  "x86/resctrl/cat:online:",
920				  resctrl_online_cpu, resctrl_offline_cpu);
921	if (state < 0)
922		return state;
923
924	ret = rdtgroup_init();
925	if (ret) {
926		cpuhp_remove_state(state);
927		return ret;
928	}
929	rdt_online = state;
930
931	for_each_alloc_capable_rdt_resource(r)
932		pr_info("%s allocation detected\n", r->name);
933
934	for_each_mon_capable_rdt_resource(r)
935		pr_info("%s monitoring detected\n", r->name);
936
937	return 0;
938}
939
940late_initcall(resctrl_late_init);
941
942static void __exit resctrl_exit(void)
943{
944	cpuhp_remove_state(rdt_online);
945	rdtgroup_exit();
946}
947
948__exitcall(resctrl_exit);