Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.15.
  1/*
  2 * Resource Director Technology(RDT)
  3 * - Cache Allocation code.
  4 *
  5 * Copyright (C) 2016 Intel Corporation
  6 *
  7 * Authors:
  8 *    Fenghua Yu <fenghua.yu@intel.com>
  9 *    Tony Luck <tony.luck@intel.com>
 10 *    Vikas Shivappa <vikas.shivappa@intel.com>
 11 *
 12 * This program is free software; you can redistribute it and/or modify it
 13 * under the terms and conditions of the GNU General Public License,
 14 * version 2, as published by the Free Software Foundation.
 15 *
 16 * This program is distributed in the hope it will be useful, but WITHOUT
 17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 18 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 19 * more details.
 20 *
 21 * More information about RDT be found in the Intel (R) x86 Architecture
 22 * Software Developer Manual June 2016, volume 3, section 17.17.
 23 */
 24
 25#define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
 26
 27#include <linux/slab.h>
 28#include <linux/err.h>
 29#include <linux/cacheinfo.h>
 30#include <linux/cpuhotplug.h>
 31
 32#include <asm/intel-family.h>
 33#include <asm/intel_rdt_sched.h>
 34#include "intel_rdt.h"
 35
 36#define MAX_MBA_BW	100u
 37#define MBA_IS_LINEAR	0x4
 38
 39/* Mutex to protect rdtgroup access. */
 40DEFINE_MUTEX(rdtgroup_mutex);
 41
 42/*
 43 * The cached intel_pqr_state is strictly per CPU and can never be
 44 * updated from a remote CPU. Functions which modify the state
 45 * are called with interrupts disabled and no preemption, which
 46 * is sufficient for the protection.
 47 */
 48DEFINE_PER_CPU(struct intel_pqr_state, pqr_state);
 49
 50/*
 51 * Used to store the max resource name width and max resource data width
 52 * to display the schemata in a tabular format
 53 */
 54int max_name_width, max_data_width;
 55
 56/*
 57 * Global boolean for rdt_alloc which is true if any
 58 * resource allocation is enabled.
 59 */
 60bool rdt_alloc_capable;
 61
 62static void
 63mba_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r);
 64static void
 65cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r);
 66
 67#define domain_init(id) LIST_HEAD_INIT(rdt_resources_all[id].domains)
 68
 69struct rdt_resource rdt_resources_all[] = {
 70	[RDT_RESOURCE_L3] =
 71	{
 72		.rid			= RDT_RESOURCE_L3,
 73		.name			= "L3",
 74		.domains		= domain_init(RDT_RESOURCE_L3),
 75		.msr_base		= IA32_L3_CBM_BASE,
 76		.msr_update		= cat_wrmsr,
 77		.cache_level		= 3,
 78		.cache = {
 79			.min_cbm_bits	= 1,
 80			.cbm_idx_mult	= 1,
 81			.cbm_idx_offset	= 0,
 82		},
 83		.parse_ctrlval		= parse_cbm,
 84		.format_str		= "%d=%0*x",
 85		.fflags			= RFTYPE_RES_CACHE,
 86	},
 87	[RDT_RESOURCE_L3DATA] =
 88	{
 89		.rid			= RDT_RESOURCE_L3DATA,
 90		.name			= "L3DATA",
 91		.domains		= domain_init(RDT_RESOURCE_L3DATA),
 92		.msr_base		= IA32_L3_CBM_BASE,
 93		.msr_update		= cat_wrmsr,
 94		.cache_level		= 3,
 95		.cache = {
 96			.min_cbm_bits	= 1,
 97			.cbm_idx_mult	= 2,
 98			.cbm_idx_offset	= 0,
 99		},
100		.parse_ctrlval		= parse_cbm,
101		.format_str		= "%d=%0*x",
102		.fflags			= RFTYPE_RES_CACHE,
103	},
104	[RDT_RESOURCE_L3CODE] =
105	{
106		.rid			= RDT_RESOURCE_L3CODE,
107		.name			= "L3CODE",
108		.domains		= domain_init(RDT_RESOURCE_L3CODE),
109		.msr_base		= IA32_L3_CBM_BASE,
110		.msr_update		= cat_wrmsr,
111		.cache_level		= 3,
112		.cache = {
113			.min_cbm_bits	= 1,
114			.cbm_idx_mult	= 2,
115			.cbm_idx_offset	= 1,
116		},
117		.parse_ctrlval		= parse_cbm,
118		.format_str		= "%d=%0*x",
119		.fflags			= RFTYPE_RES_CACHE,
120	},
121	[RDT_RESOURCE_L2] =
122	{
123		.rid			= RDT_RESOURCE_L2,
124		.name			= "L2",
125		.domains		= domain_init(RDT_RESOURCE_L2),
126		.msr_base		= IA32_L2_CBM_BASE,
127		.msr_update		= cat_wrmsr,
128		.cache_level		= 2,
129		.cache = {
130			.min_cbm_bits	= 1,
131			.cbm_idx_mult	= 1,
132			.cbm_idx_offset	= 0,
133		},
134		.parse_ctrlval		= parse_cbm,
135		.format_str		= "%d=%0*x",
136		.fflags			= RFTYPE_RES_CACHE,
137	},
138	[RDT_RESOURCE_L2DATA] =
139	{
140		.rid			= RDT_RESOURCE_L2DATA,
141		.name			= "L2DATA",
142		.domains		= domain_init(RDT_RESOURCE_L2DATA),
143		.msr_base		= IA32_L2_CBM_BASE,
144		.msr_update		= cat_wrmsr,
145		.cache_level		= 2,
146		.cache = {
147			.min_cbm_bits	= 1,
148			.cbm_idx_mult	= 2,
149			.cbm_idx_offset	= 0,
150		},
151		.parse_ctrlval		= parse_cbm,
152		.format_str		= "%d=%0*x",
153		.fflags			= RFTYPE_RES_CACHE,
154	},
155	[RDT_RESOURCE_L2CODE] =
156	{
157		.rid			= RDT_RESOURCE_L2CODE,
158		.name			= "L2CODE",
159		.domains		= domain_init(RDT_RESOURCE_L2CODE),
160		.msr_base		= IA32_L2_CBM_BASE,
161		.msr_update		= cat_wrmsr,
162		.cache_level		= 2,
163		.cache = {
164			.min_cbm_bits	= 1,
165			.cbm_idx_mult	= 2,
166			.cbm_idx_offset	= 1,
167		},
168		.parse_ctrlval		= parse_cbm,
169		.format_str		= "%d=%0*x",
170		.fflags			= RFTYPE_RES_CACHE,
171	},
172	[RDT_RESOURCE_MBA] =
173	{
174		.rid			= RDT_RESOURCE_MBA,
175		.name			= "MB",
176		.domains		= domain_init(RDT_RESOURCE_MBA),
177		.msr_base		= IA32_MBA_THRTL_BASE,
178		.msr_update		= mba_wrmsr,
179		.cache_level		= 3,
180		.parse_ctrlval		= parse_bw,
181		.format_str		= "%d=%*d",
182		.fflags			= RFTYPE_RES_MB,
183	},
184};
185
186static unsigned int cbm_idx(struct rdt_resource *r, unsigned int closid)
187{
188	return closid * r->cache.cbm_idx_mult + r->cache.cbm_idx_offset;
189}
190
191/*
192 * cache_alloc_hsw_probe() - Have to probe for Intel haswell server CPUs
193 * as they do not have CPUID enumeration support for Cache allocation.
194 * The check for Vendor/Family/Model is not enough to guarantee that
195 * the MSRs won't #GP fault because only the following SKUs support
196 * CAT:
197 *	Intel(R) Xeon(R)  CPU E5-2658  v3  @  2.20GHz
198 *	Intel(R) Xeon(R)  CPU E5-2648L v3  @  1.80GHz
199 *	Intel(R) Xeon(R)  CPU E5-2628L v3  @  2.00GHz
200 *	Intel(R) Xeon(R)  CPU E5-2618L v3  @  2.30GHz
201 *	Intel(R) Xeon(R)  CPU E5-2608L v3  @  2.00GHz
202 *	Intel(R) Xeon(R)  CPU E5-2658A v3  @  2.20GHz
203 *
204 * Probe by trying to write the first of the L3 cach mask registers
205 * and checking that the bits stick. Max CLOSids is always 4 and max cbm length
206 * is always 20 on hsw server parts. The minimum cache bitmask length
207 * allowed for HSW server is always 2 bits. Hardcode all of them.
208 */
209static inline void cache_alloc_hsw_probe(void)
210{
211	struct rdt_resource *r  = &rdt_resources_all[RDT_RESOURCE_L3];
212	u32 l, h, max_cbm = BIT_MASK(20) - 1;
213
214	if (wrmsr_safe(IA32_L3_CBM_BASE, max_cbm, 0))
215		return;
216	rdmsr(IA32_L3_CBM_BASE, l, h);
217
218	/* If all the bits were set in MSR, return success */
219	if (l != max_cbm)
220		return;
221
222	r->num_closid = 4;
223	r->default_ctrl = max_cbm;
224	r->cache.cbm_len = 20;
225	r->cache.shareable_bits = 0xc0000;
226	r->cache.min_cbm_bits = 2;
227	r->alloc_capable = true;
228	r->alloc_enabled = true;
229
230	rdt_alloc_capable = true;
231}
232
233/*
234 * rdt_get_mb_table() - get a mapping of bandwidth(b/w) percentage values
235 * exposed to user interface and the h/w understandable delay values.
236 *
237 * The non-linear delay values have the granularity of power of two
238 * and also the h/w does not guarantee a curve for configured delay
239 * values vs. actual b/w enforced.
240 * Hence we need a mapping that is pre calibrated so the user can
241 * express the memory b/w as a percentage value.
242 */
243static inline bool rdt_get_mb_table(struct rdt_resource *r)
244{
245	/*
246	 * There are no Intel SKUs as of now to support non-linear delay.
247	 */
248	pr_info("MBA b/w map not implemented for cpu:%d, model:%d",
249		boot_cpu_data.x86, boot_cpu_data.x86_model);
250
251	return false;
252}
253
254static bool rdt_get_mem_config(struct rdt_resource *r)
255{
256	union cpuid_0x10_3_eax eax;
257	union cpuid_0x10_x_edx edx;
258	u32 ebx, ecx;
259
260	cpuid_count(0x00000010, 3, &eax.full, &ebx, &ecx, &edx.full);
261	r->num_closid = edx.split.cos_max + 1;
262	r->membw.max_delay = eax.split.max_delay + 1;
263	r->default_ctrl = MAX_MBA_BW;
264	if (ecx & MBA_IS_LINEAR) {
265		r->membw.delay_linear = true;
266		r->membw.min_bw = MAX_MBA_BW - r->membw.max_delay;
267		r->membw.bw_gran = MAX_MBA_BW - r->membw.max_delay;
268	} else {
269		if (!rdt_get_mb_table(r))
270			return false;
271	}
272	r->data_width = 3;
273
274	r->alloc_capable = true;
275	r->alloc_enabled = true;
276
277	return true;
278}
279
280static void rdt_get_cache_alloc_cfg(int idx, struct rdt_resource *r)
281{
282	union cpuid_0x10_1_eax eax;
283	union cpuid_0x10_x_edx edx;
284	u32 ebx, ecx;
285
286	cpuid_count(0x00000010, idx, &eax.full, &ebx, &ecx, &edx.full);
287	r->num_closid = edx.split.cos_max + 1;
288	r->cache.cbm_len = eax.split.cbm_len + 1;
289	r->default_ctrl = BIT_MASK(eax.split.cbm_len + 1) - 1;
290	r->cache.shareable_bits = ebx & r->default_ctrl;
291	r->data_width = (r->cache.cbm_len + 3) / 4;
292	r->alloc_capable = true;
293	r->alloc_enabled = true;
294}
295
296static void rdt_get_cdp_config(int level, int type)
297{
298	struct rdt_resource *r_l = &rdt_resources_all[level];
299	struct rdt_resource *r = &rdt_resources_all[type];
300
301	r->num_closid = r_l->num_closid / 2;
302	r->cache.cbm_len = r_l->cache.cbm_len;
303	r->default_ctrl = r_l->default_ctrl;
304	r->cache.shareable_bits = r_l->cache.shareable_bits;
305	r->data_width = (r->cache.cbm_len + 3) / 4;
306	r->alloc_capable = true;
307	/*
308	 * By default, CDP is disabled. CDP can be enabled by mount parameter
309	 * "cdp" during resctrl file system mount time.
310	 */
311	r->alloc_enabled = false;
312}
313
314static void rdt_get_cdp_l3_config(void)
315{
316	rdt_get_cdp_config(RDT_RESOURCE_L3, RDT_RESOURCE_L3DATA);
317	rdt_get_cdp_config(RDT_RESOURCE_L3, RDT_RESOURCE_L3CODE);
318}
319
320static void rdt_get_cdp_l2_config(void)
321{
322	rdt_get_cdp_config(RDT_RESOURCE_L2, RDT_RESOURCE_L2DATA);
323	rdt_get_cdp_config(RDT_RESOURCE_L2, RDT_RESOURCE_L2CODE);
324}
325
326static int get_cache_id(int cpu, int level)
327{
328	struct cpu_cacheinfo *ci = get_cpu_cacheinfo(cpu);
329	int i;
330
331	for (i = 0; i < ci->num_leaves; i++) {
332		if (ci->info_list[i].level == level)
333			return ci->info_list[i].id;
334	}
335
336	return -1;
337}
338
339/*
340 * Map the memory b/w percentage value to delay values
341 * that can be written to QOS_MSRs.
342 * There are currently no SKUs which support non linear delay values.
343 */
344static u32 delay_bw_map(unsigned long bw, struct rdt_resource *r)
345{
346	if (r->membw.delay_linear)
347		return MAX_MBA_BW - bw;
348
349	pr_warn_once("Non Linear delay-bw map not supported but queried\n");
350	return r->default_ctrl;
351}
352
353static void
354mba_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r)
355{
356	unsigned int i;
357
358	/*  Write the delay values for mba. */
359	for (i = m->low; i < m->high; i++)
360		wrmsrl(r->msr_base + i, delay_bw_map(d->ctrl_val[i], r));
361}
362
363static void
364cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r)
365{
366	unsigned int i;
367
368	for (i = m->low; i < m->high; i++)
369		wrmsrl(r->msr_base + cbm_idx(r, i), d->ctrl_val[i]);
370}
371
372struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r)
373{
374	struct rdt_domain *d;
375
376	list_for_each_entry(d, &r->domains, list) {
377		/* Find the domain that contains this CPU */
378		if (cpumask_test_cpu(cpu, &d->cpu_mask))
379			return d;
380	}
381
382	return NULL;
383}
384
385void rdt_ctrl_update(void *arg)
386{
387	struct msr_param *m = arg;
388	struct rdt_resource *r = m->res;
389	int cpu = smp_processor_id();
390	struct rdt_domain *d;
391
392	d = get_domain_from_cpu(cpu, r);
393	if (d) {
394		r->msr_update(d, m, r);
395		return;
396	}
397	pr_warn_once("cpu %d not found in any domain for resource %s\n",
398		     cpu, r->name);
399}
400
401/*
402 * rdt_find_domain - Find a domain in a resource that matches input resource id
403 *
404 * Search resource r's domain list to find the resource id. If the resource
405 * id is found in a domain, return the domain. Otherwise, if requested by
406 * caller, return the first domain whose id is bigger than the input id.
407 * The domain list is sorted by id in ascending order.
408 */
409struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id,
410				   struct list_head **pos)
411{
412	struct rdt_domain *d;
413	struct list_head *l;
414
415	if (id < 0)
416		return ERR_PTR(id);
417
418	list_for_each(l, &r->domains) {
419		d = list_entry(l, struct rdt_domain, list);
420		/* When id is found, return its domain. */
421		if (id == d->id)
422			return d;
423		/* Stop searching when finding id's position in sorted list. */
424		if (id < d->id)
425			break;
426	}
427
428	if (pos)
429		*pos = l;
430
431	return NULL;
432}
433
434static int domain_setup_ctrlval(struct rdt_resource *r, struct rdt_domain *d)
435{
436	struct msr_param m;
437	u32 *dc;
438	int i;
439
440	dc = kmalloc_array(r->num_closid, sizeof(*d->ctrl_val), GFP_KERNEL);
441	if (!dc)
442		return -ENOMEM;
443
444	d->ctrl_val = dc;
445
446	/*
447	 * Initialize the Control MSRs to having no control.
448	 * For Cache Allocation: Set all bits in cbm
449	 * For Memory Allocation: Set b/w requested to 100
450	 */
451	for (i = 0; i < r->num_closid; i++, dc++)
452		*dc = r->default_ctrl;
453
454	m.low = 0;
455	m.high = r->num_closid;
456	r->msr_update(d, &m, r);
457	return 0;
458}
459
460static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_domain *d)
461{
462	size_t tsize;
463
464	if (is_llc_occupancy_enabled()) {
465		d->rmid_busy_llc = kcalloc(BITS_TO_LONGS(r->num_rmid),
466					   sizeof(unsigned long),
467					   GFP_KERNEL);
468		if (!d->rmid_busy_llc)
469			return -ENOMEM;
470		INIT_DELAYED_WORK(&d->cqm_limbo, cqm_handle_limbo);
471	}
472	if (is_mbm_total_enabled()) {
473		tsize = sizeof(*d->mbm_total);
474		d->mbm_total = kcalloc(r->num_rmid, tsize, GFP_KERNEL);
475		if (!d->mbm_total) {
476			kfree(d->rmid_busy_llc);
477			return -ENOMEM;
478		}
479	}
480	if (is_mbm_local_enabled()) {
481		tsize = sizeof(*d->mbm_local);
482		d->mbm_local = kcalloc(r->num_rmid, tsize, GFP_KERNEL);
483		if (!d->mbm_local) {
484			kfree(d->rmid_busy_llc);
485			kfree(d->mbm_total);
486			return -ENOMEM;
487		}
488	}
489
490	if (is_mbm_enabled()) {
491		INIT_DELAYED_WORK(&d->mbm_over, mbm_handle_overflow);
492		mbm_setup_overflow_handler(d, MBM_OVERFLOW_INTERVAL);
493	}
494
495	return 0;
496}
497
498/*
499 * domain_add_cpu - Add a cpu to a resource's domain list.
500 *
501 * If an existing domain in the resource r's domain list matches the cpu's
502 * resource id, add the cpu in the domain.
503 *
504 * Otherwise, a new domain is allocated and inserted into the right position
505 * in the domain list sorted by id in ascending order.
506 *
507 * The order in the domain list is visible to users when we print entries
508 * in the schemata file and schemata input is validated to have the same order
509 * as this list.
510 */
511static void domain_add_cpu(int cpu, struct rdt_resource *r)
512{
513	int id = get_cache_id(cpu, r->cache_level);
514	struct list_head *add_pos = NULL;
515	struct rdt_domain *d;
516
517	d = rdt_find_domain(r, id, &add_pos);
518	if (IS_ERR(d)) {
519		pr_warn("Could't find cache id for cpu %d\n", cpu);
520		return;
521	}
522
523	if (d) {
524		cpumask_set_cpu(cpu, &d->cpu_mask);
525		return;
526	}
527
528	d = kzalloc_node(sizeof(*d), GFP_KERNEL, cpu_to_node(cpu));
529	if (!d)
530		return;
531
532	d->id = id;
533	cpumask_set_cpu(cpu, &d->cpu_mask);
534
535	if (r->alloc_capable && domain_setup_ctrlval(r, d)) {
536		kfree(d);
537		return;
538	}
539
540	if (r->mon_capable && domain_setup_mon_state(r, d)) {
541		kfree(d);
542		return;
543	}
544
545	list_add_tail(&d->list, add_pos);
546
547	/*
548	 * If resctrl is mounted, add
549	 * per domain monitor data directories.
550	 */
551	if (static_branch_unlikely(&rdt_mon_enable_key))
552		mkdir_mondata_subdir_allrdtgrp(r, d);
553}
554
555static void domain_remove_cpu(int cpu, struct rdt_resource *r)
556{
557	int id = get_cache_id(cpu, r->cache_level);
558	struct rdt_domain *d;
559
560	d = rdt_find_domain(r, id, NULL);
561	if (IS_ERR_OR_NULL(d)) {
562		pr_warn("Could't find cache id for cpu %d\n", cpu);
563		return;
564	}
565
566	cpumask_clear_cpu(cpu, &d->cpu_mask);
567	if (cpumask_empty(&d->cpu_mask)) {
568		/*
569		 * If resctrl is mounted, remove all the
570		 * per domain monitor data directories.
571		 */
572		if (static_branch_unlikely(&rdt_mon_enable_key))
573			rmdir_mondata_subdir_allrdtgrp(r, d->id);
574		list_del(&d->list);
575		if (is_mbm_enabled())
576			cancel_delayed_work(&d->mbm_over);
577		if (is_llc_occupancy_enabled() &&  has_busy_rmid(r, d)) {
578			/*
579			 * When a package is going down, forcefully
580			 * decrement rmid->ebusy. There is no way to know
581			 * that the L3 was flushed and hence may lead to
582			 * incorrect counts in rare scenarios, but leaving
583			 * the RMID as busy creates RMID leaks if the
584			 * package never comes back.
585			 */
586			__check_limbo(d, true);
587			cancel_delayed_work(&d->cqm_limbo);
588		}
589
590		kfree(d->ctrl_val);
591		kfree(d->rmid_busy_llc);
592		kfree(d->mbm_total);
593		kfree(d->mbm_local);
594		kfree(d);
595		return;
596	}
597
598	if (r == &rdt_resources_all[RDT_RESOURCE_L3]) {
599		if (is_mbm_enabled() && cpu == d->mbm_work_cpu) {
600			cancel_delayed_work(&d->mbm_over);
601			mbm_setup_overflow_handler(d, 0);
602		}
603		if (is_llc_occupancy_enabled() && cpu == d->cqm_work_cpu &&
604		    has_busy_rmid(r, d)) {
605			cancel_delayed_work(&d->cqm_limbo);
606			cqm_setup_limbo_handler(d, 0);
607		}
608	}
609}
610
611static void clear_closid_rmid(int cpu)
612{
613	struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
614
615	state->default_closid = 0;
616	state->default_rmid = 0;
617	state->cur_closid = 0;
618	state->cur_rmid = 0;
619	wrmsr(IA32_PQR_ASSOC, 0, 0);
620}
621
622static int intel_rdt_online_cpu(unsigned int cpu)
623{
624	struct rdt_resource *r;
625
626	mutex_lock(&rdtgroup_mutex);
627	for_each_capable_rdt_resource(r)
628		domain_add_cpu(cpu, r);
629	/* The cpu is set in default rdtgroup after online. */
630	cpumask_set_cpu(cpu, &rdtgroup_default.cpu_mask);
631	clear_closid_rmid(cpu);
632	mutex_unlock(&rdtgroup_mutex);
633
634	return 0;
635}
636
637static void clear_childcpus(struct rdtgroup *r, unsigned int cpu)
638{
639	struct rdtgroup *cr;
640
641	list_for_each_entry(cr, &r->mon.crdtgrp_list, mon.crdtgrp_list) {
642		if (cpumask_test_and_clear_cpu(cpu, &cr->cpu_mask)) {
643			break;
644		}
645	}
646}
647
648static int intel_rdt_offline_cpu(unsigned int cpu)
649{
650	struct rdtgroup *rdtgrp;
651	struct rdt_resource *r;
652
653	mutex_lock(&rdtgroup_mutex);
654	for_each_capable_rdt_resource(r)
655		domain_remove_cpu(cpu, r);
656	list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) {
657		if (cpumask_test_and_clear_cpu(cpu, &rdtgrp->cpu_mask)) {
658			clear_childcpus(rdtgrp, cpu);
659			break;
660		}
661	}
662	clear_closid_rmid(cpu);
663	mutex_unlock(&rdtgroup_mutex);
664
665	return 0;
666}
667
668/*
669 * Choose a width for the resource name and resource data based on the
670 * resource that has widest name and cbm.
671 */
672static __init void rdt_init_padding(void)
673{
674	struct rdt_resource *r;
675	int cl;
676
677	for_each_alloc_capable_rdt_resource(r) {
678		cl = strlen(r->name);
679		if (cl > max_name_width)
680			max_name_width = cl;
681
682		if (r->data_width > max_data_width)
683			max_data_width = r->data_width;
684	}
685}
686
687enum {
688	RDT_FLAG_CMT,
689	RDT_FLAG_MBM_TOTAL,
690	RDT_FLAG_MBM_LOCAL,
691	RDT_FLAG_L3_CAT,
692	RDT_FLAG_L3_CDP,
693	RDT_FLAG_L2_CAT,
694	RDT_FLAG_L2_CDP,
695	RDT_FLAG_MBA,
696};
697
698#define RDT_OPT(idx, n, f)	\
699[idx] = {			\
700	.name = n,		\
701	.flag = f		\
702}
703
704struct rdt_options {
705	char	*name;
706	int	flag;
707	bool	force_off, force_on;
708};
709
710static struct rdt_options rdt_options[]  __initdata = {
711	RDT_OPT(RDT_FLAG_CMT,	    "cmt",	X86_FEATURE_CQM_OCCUP_LLC),
712	RDT_OPT(RDT_FLAG_MBM_TOTAL, "mbmtotal", X86_FEATURE_CQM_MBM_TOTAL),
713	RDT_OPT(RDT_FLAG_MBM_LOCAL, "mbmlocal", X86_FEATURE_CQM_MBM_LOCAL),
714	RDT_OPT(RDT_FLAG_L3_CAT,    "l3cat",	X86_FEATURE_CAT_L3),
715	RDT_OPT(RDT_FLAG_L3_CDP,    "l3cdp",	X86_FEATURE_CDP_L3),
716	RDT_OPT(RDT_FLAG_L2_CAT,    "l2cat",	X86_FEATURE_CAT_L2),
717	RDT_OPT(RDT_FLAG_L2_CDP,    "l2cdp",	X86_FEATURE_CDP_L2),
718	RDT_OPT(RDT_FLAG_MBA,	    "mba",	X86_FEATURE_MBA),
719};
720#define NUM_RDT_OPTIONS ARRAY_SIZE(rdt_options)
721
722static int __init set_rdt_options(char *str)
723{
724	struct rdt_options *o;
725	bool force_off;
726	char *tok;
727
728	if (*str == '=')
729		str++;
730	while ((tok = strsep(&str, ",")) != NULL) {
731		force_off = *tok == '!';
732		if (force_off)
733			tok++;
734		for (o = rdt_options; o < &rdt_options[NUM_RDT_OPTIONS]; o++) {
735			if (strcmp(tok, o->name) == 0) {
736				if (force_off)
737					o->force_off = true;
738				else
739					o->force_on = true;
740				break;
741			}
742		}
743	}
744	return 1;
745}
746__setup("rdt", set_rdt_options);
747
748static bool __init rdt_cpu_has(int flag)
749{
750	bool ret = boot_cpu_has(flag);
751	struct rdt_options *o;
752
753	if (!ret)
754		return ret;
755
756	for (o = rdt_options; o < &rdt_options[NUM_RDT_OPTIONS]; o++) {
757		if (flag == o->flag) {
758			if (o->force_off)
759				ret = false;
760			if (o->force_on)
761				ret = true;
762			break;
763		}
764	}
765	return ret;
766}
767
768static __init bool get_rdt_alloc_resources(void)
769{
770	bool ret = false;
771
772	if (rdt_alloc_capable)
773		return true;
774
775	if (!boot_cpu_has(X86_FEATURE_RDT_A))
776		return false;
777
778	if (rdt_cpu_has(X86_FEATURE_CAT_L3)) {
779		rdt_get_cache_alloc_cfg(1, &rdt_resources_all[RDT_RESOURCE_L3]);
780		if (rdt_cpu_has(X86_FEATURE_CDP_L3))
781			rdt_get_cdp_l3_config();
782		ret = true;
783	}
784	if (rdt_cpu_has(X86_FEATURE_CAT_L2)) {
785		/* CPUID 0x10.2 fields are same format at 0x10.1 */
786		rdt_get_cache_alloc_cfg(2, &rdt_resources_all[RDT_RESOURCE_L2]);
787		if (rdt_cpu_has(X86_FEATURE_CDP_L2))
788			rdt_get_cdp_l2_config();
789		ret = true;
790	}
791
792	if (rdt_cpu_has(X86_FEATURE_MBA)) {
793		if (rdt_get_mem_config(&rdt_resources_all[RDT_RESOURCE_MBA]))
794			ret = true;
795	}
796	return ret;
797}
798
799static __init bool get_rdt_mon_resources(void)
800{
801	if (rdt_cpu_has(X86_FEATURE_CQM_OCCUP_LLC))
802		rdt_mon_features |= (1 << QOS_L3_OCCUP_EVENT_ID);
803	if (rdt_cpu_has(X86_FEATURE_CQM_MBM_TOTAL))
804		rdt_mon_features |= (1 << QOS_L3_MBM_TOTAL_EVENT_ID);
805	if (rdt_cpu_has(X86_FEATURE_CQM_MBM_LOCAL))
806		rdt_mon_features |= (1 << QOS_L3_MBM_LOCAL_EVENT_ID);
807
808	if (!rdt_mon_features)
809		return false;
810
811	return !rdt_get_mon_l3_config(&rdt_resources_all[RDT_RESOURCE_L3]);
812}
813
814static __init void rdt_quirks(void)
815{
816	switch (boot_cpu_data.x86_model) {
817	case INTEL_FAM6_HASWELL_X:
818		if (!rdt_options[RDT_FLAG_L3_CAT].force_off)
819			cache_alloc_hsw_probe();
820		break;
821	case INTEL_FAM6_SKYLAKE_X:
822		if (boot_cpu_data.x86_stepping <= 4)
823			set_rdt_options("!cmt,!mbmtotal,!mbmlocal,!l3cat");
824	}
825}
826
827static __init bool get_rdt_resources(void)
828{
829	rdt_quirks();
830	rdt_alloc_capable = get_rdt_alloc_resources();
831	rdt_mon_capable = get_rdt_mon_resources();
832
833	return (rdt_mon_capable || rdt_alloc_capable);
834}
835
836static int __init intel_rdt_late_init(void)
837{
838	struct rdt_resource *r;
839	int state, ret;
840
841	if (!get_rdt_resources())
842		return -ENODEV;
843
844	rdt_init_padding();
845
846	state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
847				  "x86/rdt/cat:online:",
848				  intel_rdt_online_cpu, intel_rdt_offline_cpu);
849	if (state < 0)
850		return state;
851
852	ret = rdtgroup_init();
853	if (ret) {
854		cpuhp_remove_state(state);
855		return ret;
856	}
857
858	for_each_alloc_capable_rdt_resource(r)
859		pr_info("Intel RDT %s allocation detected\n", r->name);
860
861	for_each_mon_capable_rdt_resource(r)
862		pr_info("Intel RDT %s monitoring detected\n", r->name);
863
864	return 0;
865}
866
867late_initcall(intel_rdt_late_init);