Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Resource Director Technology(RDT)
  4 * - Monitoring code
  5 *
  6 * Copyright (C) 2017 Intel Corporation
  7 *
  8 * Author:
  9 *    Vikas Shivappa <vikas.shivappa@intel.com>
 10 *
 11 * This replaces the cqm.c based on perf but we reuse a lot of
 12 * code and datastructures originally from Peter Zijlstra and Matt Fleming.
 13 *
 14 * More information about RDT be found in the Intel (R) x86 Architecture
 15 * Software Developer Manual June 2016, volume 3, section 17.17.
 16 */
 17
 18#include <linux/module.h>
 
 19#include <linux/slab.h>
 
 20#include <asm/cpu_device_id.h>
 
 
 21#include "internal.h"
 22
 23struct rmid_entry {
 24	u32				rmid;
 25	int				busy;
 26	struct list_head		list;
 27};
 28
 29/**
 30 * @rmid_free_lru    A least recently used list of free RMIDs
 31 *     These RMIDs are guaranteed to have an occupancy less than the
 32 *     threshold occupancy
 33 */
 34static LIST_HEAD(rmid_free_lru);
 35
 36/**
 37 * @rmid_limbo_count     count of currently unused but (potentially)
 38 *     dirty RMIDs.
 39 *     This counts RMIDs that no one is currently using but that
 40 *     may have a occupancy value > intel_cqm_threshold. User can change
 41 *     the threshold occupancy value.
 42 */
 43static unsigned int rmid_limbo_count;
 44
 45/**
 46 * @rmid_entry - The entry in the limbo and free lists.
 47 */
 48static struct rmid_entry	*rmid_ptrs;
 49
 50/*
 51 * Global boolean for rdt_monitor which is true if any
 52 * resource monitoring is enabled.
 53 */
 54bool rdt_mon_capable;
 55
 56/*
 57 * Global to indicate which monitoring events are enabled.
 58 */
 59unsigned int rdt_mon_features;
 60
 61/*
 62 * This is the threshold cache occupancy at which we will consider an
 63 * RMID available for re-allocation.
 64 */
 65unsigned int resctrl_cqm_threshold;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 66
 67static inline struct rmid_entry *__rmid_entry(u32 rmid)
 68{
 69	struct rmid_entry *entry;
 70
 71	entry = &rmid_ptrs[rmid];
 72	WARN_ON(entry->rmid != rmid);
 73
 74	return entry;
 75}
 76
 77static u64 __rmid_read(u32 rmid, u32 eventid)
 78{
 79	u64 val;
 80
 81	/*
 82	 * As per the SDM, when IA32_QM_EVTSEL.EvtID (bits 7:0) is configured
 83	 * with a valid event code for supported resource type and the bits
 84	 * IA32_QM_EVTSEL.RMID (bits 41:32) are configured with valid RMID,
 85	 * IA32_QM_CTR.data (bits 61:0) reports the monitored data.
 86	 * IA32_QM_CTR.Error (bit 63) and IA32_QM_CTR.Unavailable (bit 62)
 87	 * are error bits.
 88	 */
 89	wrmsr(MSR_IA32_QM_EVTSEL, eventid, rmid);
 90	rdmsrl(MSR_IA32_QM_CTR, val);
 91
 92	return val;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 93}
 94
 95static bool rmid_dirty(struct rmid_entry *entry)
 
 96{
 97	u64 val = __rmid_read(entry->rmid, QOS_L3_OCCUP_EVENT_ID);
 
 98
 99	return val >= resctrl_cqm_threshold;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
100}
101
102/*
103 * Check the RMIDs that are marked as busy for this domain. If the
104 * reported LLC occupancy is below the threshold clear the busy bit and
105 * decrement the count. If the busy count gets to zero on an RMID, we
106 * free the RMID
107 */
108void __check_limbo(struct rdt_domain *d, bool force_free)
109{
 
110	struct rmid_entry *entry;
111	struct rdt_resource *r;
112	u32 crmid = 1, nrmid;
113
114	r = &rdt_resources_all[RDT_RESOURCE_L3];
115
116	/*
117	 * Skip RMID 0 and start from RMID 1 and check all the RMIDs that
118	 * are marked as busy for occupancy < threshold. If the occupancy
119	 * is less than the threshold decrement the busy counter of the
120	 * RMID and move it to the free list when the counter reaches 0.
121	 */
122	for (;;) {
123		nrmid = find_next_bit(d->rmid_busy_llc, r->num_rmid, crmid);
124		if (nrmid >= r->num_rmid)
125			break;
126
127		entry = __rmid_entry(nrmid);
128		if (force_free || !rmid_dirty(entry)) {
 
 
 
 
 
 
 
 
129			clear_bit(entry->rmid, d->rmid_busy_llc);
130			if (!--entry->busy) {
131				rmid_limbo_count--;
132				list_add_tail(&entry->list, &rmid_free_lru);
133			}
134		}
135		crmid = nrmid + 1;
136	}
137}
138
139bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d)
140{
141	return find_first_bit(d->rmid_busy_llc, r->num_rmid) != r->num_rmid;
142}
143
144/*
145 * As of now the RMIDs allocation is global.
146 * However we keep track of which packages the RMIDs
147 * are used to optimize the limbo list management.
148 */
149int alloc_rmid(void)
150{
151	struct rmid_entry *entry;
152
153	lockdep_assert_held(&rdtgroup_mutex);
154
155	if (list_empty(&rmid_free_lru))
156		return rmid_limbo_count ? -EBUSY : -ENOSPC;
157
158	entry = list_first_entry(&rmid_free_lru,
159				 struct rmid_entry, list);
160	list_del(&entry->list);
161
162	return entry->rmid;
163}
164
165static void add_rmid_to_limbo(struct rmid_entry *entry)
166{
167	struct rdt_resource *r;
168	struct rdt_domain *d;
169	int cpu;
170	u64 val;
171
172	r = &rdt_resources_all[RDT_RESOURCE_L3];
173
174	entry->busy = 0;
175	cpu = get_cpu();
176	list_for_each_entry(d, &r->domains, list) {
177		if (cpumask_test_cpu(cpu, &d->cpu_mask)) {
178			val = __rmid_read(entry->rmid, QOS_L3_OCCUP_EVENT_ID);
179			if (val <= resctrl_cqm_threshold)
 
 
180				continue;
181		}
182
183		/*
184		 * For the first limbo RMID in the domain,
185		 * setup up the limbo worker.
186		 */
187		if (!has_busy_rmid(r, d))
188			cqm_setup_limbo_handler(d, CQM_LIMBOCHECK_INTERVAL);
189		set_bit(entry->rmid, d->rmid_busy_llc);
190		entry->busy++;
191	}
192	put_cpu();
193
194	if (entry->busy)
195		rmid_limbo_count++;
196	else
197		list_add_tail(&entry->list, &rmid_free_lru);
198}
199
200void free_rmid(u32 rmid)
201{
202	struct rmid_entry *entry;
203
204	if (!rmid)
205		return;
206
207	lockdep_assert_held(&rdtgroup_mutex);
208
209	entry = __rmid_entry(rmid);
210
211	if (is_llc_occupancy_enabled())
212		add_rmid_to_limbo(entry);
213	else
214		list_add_tail(&entry->list, &rmid_free_lru);
215}
216
217static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr)
 
218{
219	u64 shift = 64 - MBM_CNTR_WIDTH, chunks;
220
221	chunks = (cur_msr << shift) - (prev_msr << shift);
222	return chunks >>= shift;
 
 
 
 
223}
224
225static int __mon_event_count(u32 rmid, struct rmid_read *rr)
226{
227	struct mbm_state *m;
228	u64 chunks, tval;
229
230	tval = __rmid_read(rmid, rr->evtid);
231	if (tval & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL)) {
232		rr->val = tval;
233		return -EINVAL;
234	}
235	switch (rr->evtid) {
236	case QOS_L3_OCCUP_EVENT_ID:
237		rr->val += tval;
238		return 0;
239	case QOS_L3_MBM_TOTAL_EVENT_ID:
240		m = &rr->d->mbm_total[rmid];
241		break;
242	case QOS_L3_MBM_LOCAL_EVENT_ID:
243		m = &rr->d->mbm_local[rmid];
244		break;
245	default:
246		/*
247		 * Code would never reach here because
248		 * an invalid event id would fail the __rmid_read.
249		 */
250		return -EINVAL;
251	}
252
253	if (rr->first) {
254		memset(m, 0, sizeof(struct mbm_state));
255		m->prev_bw_msr = m->prev_msr = tval;
 
 
256		return 0;
257	}
258
259	chunks = mbm_overflow_count(m->prev_msr, tval);
260	m->chunks += chunks;
261	m->prev_msr = tval;
 
 
262
263	rr->val += m->chunks;
264	return 0;
265}
266
267/*
 
 
 
 
 
268 * Supporting function to calculate the memory bandwidth
269 * and delta bandwidth in MBps.
 
 
270 */
271static void mbm_bw_count(u32 rmid, struct rmid_read *rr)
272{
273	struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3];
274	struct mbm_state *m = &rr->d->mbm_local[rmid];
275	u64 tval, cur_bw, chunks;
276
277	tval = __rmid_read(rmid, rr->evtid);
278	if (tval & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL))
279		return;
280
281	chunks = mbm_overflow_count(m->prev_bw_msr, tval);
282	m->chunks_bw += chunks;
283	m->chunks = m->chunks_bw;
284	cur_bw = (chunks * r->mon_scale) >> 20;
285
286	if (m->delta_comp)
287		m->delta_bw = abs(cur_bw - m->prev_bw);
288	m->delta_comp = false;
289	m->prev_bw = cur_bw;
290	m->prev_bw_msr = tval;
291}
292
293/*
294 * This is called via IPI to read the CQM/MBM counters
295 * on a domain.
296 */
297void mon_event_count(void *info)
298{
299	struct rdtgroup *rdtgrp, *entry;
300	struct rmid_read *rr = info;
301	struct list_head *head;
 
302
303	rdtgrp = rr->rgrp;
304
305	if (__mon_event_count(rdtgrp->mon.rmid, rr))
306		return;
307
308	/*
309	 * For Ctrl groups read data from child monitor groups.
 
 
310	 */
311	head = &rdtgrp->mon.crdtgrp_list;
312
313	if (rdtgrp->type == RDTCTRL_GROUP) {
314		list_for_each_entry(entry, head, mon.crdtgrp_list) {
315			if (__mon_event_count(entry->mon.rmid, rr))
316				return;
317		}
318	}
 
 
 
 
 
 
 
 
319}
320
321/*
322 * Feedback loop for MBA software controller (mba_sc)
323 *
324 * mba_sc is a feedback loop where we periodically read MBM counters and
325 * adjust the bandwidth percentage values via the IA32_MBA_THRTL_MSRs so
326 * that:
327 *
328 *   current bandwdith(cur_bw) < user specified bandwidth(user_bw)
329 *
330 * This uses the MBM counters to measure the bandwidth and MBA throttle
331 * MSRs to control the bandwidth for a particular rdtgrp. It builds on the
332 * fact that resctrl rdtgroups have both monitoring and control.
333 *
334 * The frequency of the checks is 1s and we just tag along the MBM overflow
335 * timer. Having 1s interval makes the calculation of bandwidth simpler.
336 *
337 * Although MBA's goal is to restrict the bandwidth to a maximum, there may
338 * be a need to increase the bandwidth to avoid uncecessarily restricting
339 * the L2 <-> L3 traffic.
340 *
341 * Since MBA controls the L2 external bandwidth where as MBM measures the
342 * L3 external bandwidth the following sequence could lead to such a
343 * situation.
344 *
345 * Consider an rdtgroup which had high L3 <-> memory traffic in initial
346 * phases -> mba_sc kicks in and reduced bandwidth percentage values -> but
347 * after some time rdtgroup has mostly L2 <-> L3 traffic.
348 *
349 * In this case we may restrict the rdtgroup's L2 <-> L3 traffic as its
350 * throttle MSRs already have low percentage values.  To avoid
351 * unnecessarily restricting such rdtgroups, we also increase the bandwidth.
352 */
353static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
354{
355	u32 closid, rmid, cur_msr, cur_msr_val, new_msr_val;
356	struct mbm_state *pmbm_data, *cmbm_data;
357	u32 cur_bw, delta_bw, user_bw;
358	struct rdt_resource *r_mba;
359	struct rdt_domain *dom_mba;
360	struct list_head *head;
361	struct rdtgroup *entry;
362
363	if (!is_mbm_local_enabled())
364		return;
365
366	r_mba = &rdt_resources_all[RDT_RESOURCE_MBA];
 
367	closid = rgrp->closid;
368	rmid = rgrp->mon.rmid;
369	pmbm_data = &dom_mbm->mbm_local[rmid];
370
371	dom_mba = get_domain_from_cpu(smp_processor_id(), r_mba);
372	if (!dom_mba) {
373		pr_warn_once("Failure to get domain for MBA update\n");
374		return;
375	}
376
377	cur_bw = pmbm_data->prev_bw;
378	user_bw = dom_mba->mbps_val[closid];
379	delta_bw = pmbm_data->delta_bw;
380	cur_msr_val = dom_mba->ctrl_val[closid];
 
 
381
382	/*
383	 * For Ctrl groups read data from child monitor groups.
384	 */
385	head = &rgrp->mon.crdtgrp_list;
386	list_for_each_entry(entry, head, mon.crdtgrp_list) {
387		cmbm_data = &dom_mbm->mbm_local[entry->mon.rmid];
388		cur_bw += cmbm_data->prev_bw;
389		delta_bw += cmbm_data->delta_bw;
390	}
391
392	/*
393	 * Scale up/down the bandwidth linearly for the ctrl group.  The
394	 * bandwidth step is the bandwidth granularity specified by the
395	 * hardware.
396	 *
397	 * The delta_bw is used when increasing the bandwidth so that we
398	 * dont alternately increase and decrease the control values
399	 * continuously.
400	 *
401	 * For ex: consider cur_bw = 90MBps, user_bw = 100MBps and if
402	 * bandwidth step is 20MBps(> user_bw - cur_bw), we would keep
403	 * switching between 90 and 110 continuously if we only check
404	 * cur_bw < user_bw.
405	 */
406	if (cur_msr_val > r_mba->membw.min_bw && user_bw < cur_bw) {
407		new_msr_val = cur_msr_val - r_mba->membw.bw_gran;
408	} else if (cur_msr_val < MAX_MBA_BW &&
409		   (user_bw > (cur_bw + delta_bw))) {
410		new_msr_val = cur_msr_val + r_mba->membw.bw_gran;
411	} else {
412		return;
413	}
414
415	cur_msr = r_mba->msr_base + closid;
416	wrmsrl(cur_msr, delay_bw_map(new_msr_val, r_mba));
417	dom_mba->ctrl_val[closid] = new_msr_val;
418
419	/*
420	 * Delta values are updated dynamically package wise for each
421	 * rdtgrp everytime the throttle MSR changes value.
422	 *
423	 * This is because (1)the increase in bandwidth is not perfectly
424	 * linear and only "approximately" linear even when the hardware
425	 * says it is linear.(2)Also since MBA is a core specific
426	 * mechanism, the delta values vary based on number of cores used
427	 * by the rdtgrp.
428	 */
429	pmbm_data->delta_comp = true;
430	list_for_each_entry(entry, head, mon.crdtgrp_list) {
431		cmbm_data = &dom_mbm->mbm_local[entry->mon.rmid];
432		cmbm_data->delta_comp = true;
433	}
434}
435
436static void mbm_update(struct rdt_domain *d, int rmid)
437{
438	struct rmid_read rr;
439
440	rr.first = false;
 
441	rr.d = d;
442
443	/*
444	 * This is protected from concurrent reads from user
445	 * as both the user and we hold the global mutex.
446	 */
447	if (is_mbm_total_enabled()) {
448		rr.evtid = QOS_L3_MBM_TOTAL_EVENT_ID;
 
449		__mon_event_count(rmid, &rr);
450	}
451	if (is_mbm_local_enabled()) {
452		rr.evtid = QOS_L3_MBM_LOCAL_EVENT_ID;
 
 
453
454		/*
455		 * Call the MBA software controller only for the
456		 * control groups and when user has enabled
457		 * the software controller explicitly.
458		 */
459		if (!is_mba_sc(NULL))
460			__mon_event_count(rmid, &rr);
461		else
462			mbm_bw_count(rmid, &rr);
463	}
464}
465
466/*
467 * Handler to scan the limbo list and move the RMIDs
468 * to free list whose occupancy < threshold_occupancy.
469 */
470void cqm_handle_limbo(struct work_struct *work)
471{
472	unsigned long delay = msecs_to_jiffies(CQM_LIMBOCHECK_INTERVAL);
473	int cpu = smp_processor_id();
474	struct rdt_resource *r;
475	struct rdt_domain *d;
476
477	mutex_lock(&rdtgroup_mutex);
478
479	r = &rdt_resources_all[RDT_RESOURCE_L3];
480	d = get_domain_from_cpu(cpu, r);
481
482	if (!d) {
483		pr_warn_once("Failure to get domain for limbo worker\n");
484		goto out_unlock;
485	}
486
487	__check_limbo(d, false);
488
489	if (has_busy_rmid(r, d))
490		schedule_delayed_work_on(cpu, &d->cqm_limbo, delay);
491
492out_unlock:
493	mutex_unlock(&rdtgroup_mutex);
494}
495
496void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms)
497{
498	unsigned long delay = msecs_to_jiffies(delay_ms);
499	int cpu;
500
501	cpu = cpumask_any(&dom->cpu_mask);
502	dom->cqm_work_cpu = cpu;
503
504	schedule_delayed_work_on(cpu, &dom->cqm_limbo, delay);
505}
506
507void mbm_handle_overflow(struct work_struct *work)
508{
509	unsigned long delay = msecs_to_jiffies(MBM_OVERFLOW_INTERVAL);
510	struct rdtgroup *prgrp, *crgrp;
511	int cpu = smp_processor_id();
512	struct list_head *head;
 
513	struct rdt_domain *d;
514
515	mutex_lock(&rdtgroup_mutex);
516
517	if (!static_branch_likely(&rdt_enable_key))
518		goto out_unlock;
519
520	d = get_domain_from_cpu(cpu, &rdt_resources_all[RDT_RESOURCE_L3]);
521	if (!d)
522		goto out_unlock;
523
524	list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
525		mbm_update(d, prgrp->mon.rmid);
526
527		head = &prgrp->mon.crdtgrp_list;
528		list_for_each_entry(crgrp, head, mon.crdtgrp_list)
529			mbm_update(d, crgrp->mon.rmid);
530
531		if (is_mba_sc(NULL))
532			update_mba_bw(prgrp, d);
533	}
534
535	schedule_delayed_work_on(cpu, &d->mbm_over, delay);
536
537out_unlock:
538	mutex_unlock(&rdtgroup_mutex);
539}
540
541void mbm_setup_overflow_handler(struct rdt_domain *dom, unsigned long delay_ms)
542{
543	unsigned long delay = msecs_to_jiffies(delay_ms);
544	int cpu;
545
546	if (!static_branch_likely(&rdt_enable_key))
547		return;
548	cpu = cpumask_any(&dom->cpu_mask);
549	dom->mbm_work_cpu = cpu;
550	schedule_delayed_work_on(cpu, &dom->mbm_over, delay);
551}
552
553static int dom_data_init(struct rdt_resource *r)
554{
555	struct rmid_entry *entry = NULL;
556	int i, nr_rmids;
557
558	nr_rmids = r->num_rmid;
559	rmid_ptrs = kcalloc(nr_rmids, sizeof(struct rmid_entry), GFP_KERNEL);
560	if (!rmid_ptrs)
561		return -ENOMEM;
562
563	for (i = 0; i < nr_rmids; i++) {
564		entry = &rmid_ptrs[i];
565		INIT_LIST_HEAD(&entry->list);
566
567		entry->rmid = i;
568		list_add_tail(&entry->list, &rmid_free_lru);
569	}
570
571	/*
572	 * RMID 0 is special and is always allocated. It's used for all
573	 * tasks that are not monitored.
574	 */
575	entry = __rmid_entry(0);
576	list_del(&entry->list);
577
578	return 0;
579}
580
581static struct mon_evt llc_occupancy_event = {
582	.name		= "llc_occupancy",
583	.evtid		= QOS_L3_OCCUP_EVENT_ID,
584};
585
586static struct mon_evt mbm_total_event = {
587	.name		= "mbm_total_bytes",
588	.evtid		= QOS_L3_MBM_TOTAL_EVENT_ID,
589};
590
591static struct mon_evt mbm_local_event = {
592	.name		= "mbm_local_bytes",
593	.evtid		= QOS_L3_MBM_LOCAL_EVENT_ID,
594};
595
596/*
597 * Initialize the event list for the resource.
598 *
599 * Note that MBM events are also part of RDT_RESOURCE_L3 resource
600 * because as per the SDM the total and local memory bandwidth
601 * are enumerated as part of L3 monitoring.
602 */
603static void l3_mon_evt_init(struct rdt_resource *r)
604{
605	INIT_LIST_HEAD(&r->evt_list);
606
607	if (is_llc_occupancy_enabled())
608		list_add_tail(&llc_occupancy_event.list, &r->evt_list);
609	if (is_mbm_total_enabled())
610		list_add_tail(&mbm_total_event.list, &r->evt_list);
611	if (is_mbm_local_enabled())
612		list_add_tail(&mbm_local_event.list, &r->evt_list);
613}
614
615int rdt_get_mon_l3_config(struct rdt_resource *r)
616{
617	unsigned int cl_size = boot_cpu_data.x86_cache_size;
 
 
618	int ret;
619
620	r->mon_scale = boot_cpu_data.x86_cache_occ_scale;
 
621	r->num_rmid = boot_cpu_data.x86_cache_max_rmid + 1;
 
 
 
 
 
 
622
623	/*
624	 * A reasonable upper limit on the max threshold is the number
625	 * of lines tagged per RMID if all RMIDs have the same number of
626	 * lines tagged in the LLC.
627	 *
628	 * For a 35MB LLC and 56 RMIDs, this is ~1.8% of the LLC.
629	 */
630	resctrl_cqm_threshold = cl_size * 1024 / r->num_rmid;
631
632	/* h/w works in units of "boot_cpu_data.x86_cache_occ_scale" */
633	resctrl_cqm_threshold /= r->mon_scale;
 
 
 
 
634
635	ret = dom_data_init(r);
636	if (ret)
637		return ret;
638
 
 
 
 
 
 
 
 
 
 
 
639	l3_mon_evt_init(r);
640
641	r->mon_capable = true;
642	r->mon_enabled = true;
643
644	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
645}
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Resource Director Technology(RDT)
  4 * - Monitoring code
  5 *
  6 * Copyright (C) 2017 Intel Corporation
  7 *
  8 * Author:
  9 *    Vikas Shivappa <vikas.shivappa@intel.com>
 10 *
 11 * This replaces the cqm.c based on perf but we reuse a lot of
 12 * code and datastructures originally from Peter Zijlstra and Matt Fleming.
 13 *
 14 * More information about RDT be found in the Intel (R) x86 Architecture
 15 * Software Developer Manual June 2016, volume 3, section 17.17.
 16 */
 17
 18#include <linux/module.h>
 19#include <linux/sizes.h>
 20#include <linux/slab.h>
 21
 22#include <asm/cpu_device_id.h>
 23#include <asm/resctrl.h>
 24
 25#include "internal.h"
 26
 27struct rmid_entry {
 28	u32				rmid;
 29	int				busy;
 30	struct list_head		list;
 31};
 32
 33/*
 34 * @rmid_free_lru - A least recently used list of free RMIDs
 35 *     These RMIDs are guaranteed to have an occupancy less than the
 36 *     threshold occupancy
 37 */
 38static LIST_HEAD(rmid_free_lru);
 39
 40/*
 41 * @rmid_limbo_count - count of currently unused but (potentially)
 42 *     dirty RMIDs.
 43 *     This counts RMIDs that no one is currently using but that
 44 *     may have a occupancy value > resctrl_rmid_realloc_threshold. User can
 45 *     change the threshold occupancy value.
 46 */
 47static unsigned int rmid_limbo_count;
 48
 49/*
 50 * @rmid_entry - The entry in the limbo and free lists.
 51 */
 52static struct rmid_entry	*rmid_ptrs;
 53
 54/*
 55 * Global boolean for rdt_monitor which is true if any
 56 * resource monitoring is enabled.
 57 */
 58bool rdt_mon_capable;
 59
 60/*
 61 * Global to indicate which monitoring events are enabled.
 62 */
 63unsigned int rdt_mon_features;
 64
 65/*
 66 * This is the threshold cache occupancy in bytes at which we will consider an
 67 * RMID available for re-allocation.
 68 */
 69unsigned int resctrl_rmid_realloc_threshold;
 70
 71/*
 72 * This is the maximum value for the reallocation threshold, in bytes.
 73 */
 74unsigned int resctrl_rmid_realloc_limit;
 75
 76#define CF(cf)	((unsigned long)(1048576 * (cf) + 0.5))
 77
 78/*
 79 * The correction factor table is documented in Documentation/arch/x86/resctrl.rst.
 80 * If rmid > rmid threshold, MBM total and local values should be multiplied
 81 * by the correction factor.
 82 *
 83 * The original table is modified for better code:
 84 *
 85 * 1. The threshold 0 is changed to rmid count - 1 so don't do correction
 86 *    for the case.
 87 * 2. MBM total and local correction table indexed by core counter which is
 88 *    equal to (x86_cache_max_rmid + 1) / 8 - 1 and is from 0 up to 27.
 89 * 3. The correction factor is normalized to 2^20 (1048576) so it's faster
 90 *    to calculate corrected value by shifting:
 91 *    corrected_value = (original_value * correction_factor) >> 20
 92 */
 93static const struct mbm_correction_factor_table {
 94	u32 rmidthreshold;
 95	u64 cf;
 96} mbm_cf_table[] __initconst = {
 97	{7,	CF(1.000000)},
 98	{15,	CF(1.000000)},
 99	{15,	CF(0.969650)},
100	{31,	CF(1.000000)},
101	{31,	CF(1.066667)},
102	{31,	CF(0.969650)},
103	{47,	CF(1.142857)},
104	{63,	CF(1.000000)},
105	{63,	CF(1.185115)},
106	{63,	CF(1.066553)},
107	{79,	CF(1.454545)},
108	{95,	CF(1.000000)},
109	{95,	CF(1.230769)},
110	{95,	CF(1.142857)},
111	{95,	CF(1.066667)},
112	{127,	CF(1.000000)},
113	{127,	CF(1.254863)},
114	{127,	CF(1.185255)},
115	{151,	CF(1.000000)},
116	{127,	CF(1.066667)},
117	{167,	CF(1.000000)},
118	{159,	CF(1.454334)},
119	{183,	CF(1.000000)},
120	{127,	CF(0.969744)},
121	{191,	CF(1.280246)},
122	{191,	CF(1.230921)},
123	{215,	CF(1.000000)},
124	{191,	CF(1.143118)},
125};
126
127static u32 mbm_cf_rmidthreshold __read_mostly = UINT_MAX;
128static u64 mbm_cf __read_mostly;
129
130static inline u64 get_corrected_mbm_count(u32 rmid, unsigned long val)
131{
132	/* Correct MBM value. */
133	if (rmid > mbm_cf_rmidthreshold)
134		val = (val * mbm_cf) >> 20;
135
136	return val;
137}
138
139static inline struct rmid_entry *__rmid_entry(u32 rmid)
140{
141	struct rmid_entry *entry;
142
143	entry = &rmid_ptrs[rmid];
144	WARN_ON(entry->rmid != rmid);
145
146	return entry;
147}
148
149static int __rmid_read(u32 rmid, enum resctrl_event_id eventid, u64 *val)
150{
151	u64 msr_val;
152
153	/*
154	 * As per the SDM, when IA32_QM_EVTSEL.EvtID (bits 7:0) is configured
155	 * with a valid event code for supported resource type and the bits
156	 * IA32_QM_EVTSEL.RMID (bits 41:32) are configured with valid RMID,
157	 * IA32_QM_CTR.data (bits 61:0) reports the monitored data.
158	 * IA32_QM_CTR.Error (bit 63) and IA32_QM_CTR.Unavailable (bit 62)
159	 * are error bits.
160	 */
161	wrmsr(MSR_IA32_QM_EVTSEL, eventid, rmid);
162	rdmsrl(MSR_IA32_QM_CTR, msr_val);
163
164	if (msr_val & RMID_VAL_ERROR)
165		return -EIO;
166	if (msr_val & RMID_VAL_UNAVAIL)
167		return -EINVAL;
168
169	*val = msr_val;
170	return 0;
171}
172
173static struct arch_mbm_state *get_arch_mbm_state(struct rdt_hw_domain *hw_dom,
174						 u32 rmid,
175						 enum resctrl_event_id eventid)
176{
177	switch (eventid) {
178	case QOS_L3_OCCUP_EVENT_ID:
179		return NULL;
180	case QOS_L3_MBM_TOTAL_EVENT_ID:
181		return &hw_dom->arch_mbm_total[rmid];
182	case QOS_L3_MBM_LOCAL_EVENT_ID:
183		return &hw_dom->arch_mbm_local[rmid];
184	}
185
186	/* Never expect to get here */
187	WARN_ON_ONCE(1);
188
189	return NULL;
190}
191
192void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_domain *d,
193			     u32 rmid, enum resctrl_event_id eventid)
194{
195	struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
196	struct arch_mbm_state *am;
197
198	am = get_arch_mbm_state(hw_dom, rmid, eventid);
199	if (am) {
200		memset(am, 0, sizeof(*am));
201
202		/* Record any initial, non-zero count value. */
203		__rmid_read(rmid, eventid, &am->prev_msr);
204	}
205}
206
207/*
208 * Assumes that hardware counters are also reset and thus that there is
209 * no need to record initial non-zero counts.
210 */
211void resctrl_arch_reset_rmid_all(struct rdt_resource *r, struct rdt_domain *d)
212{
213	struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
214
215	if (is_mbm_total_enabled())
216		memset(hw_dom->arch_mbm_total, 0,
217		       sizeof(*hw_dom->arch_mbm_total) * r->num_rmid);
218
219	if (is_mbm_local_enabled())
220		memset(hw_dom->arch_mbm_local, 0,
221		       sizeof(*hw_dom->arch_mbm_local) * r->num_rmid);
222}
223
224static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr, unsigned int width)
225{
226	u64 shift = 64 - width, chunks;
227
228	chunks = (cur_msr << shift) - (prev_msr << shift);
229	return chunks >> shift;
230}
231
232int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d,
233			   u32 rmid, enum resctrl_event_id eventid, u64 *val)
234{
235	struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
236	struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
237	struct arch_mbm_state *am;
238	u64 msr_val, chunks;
239	int ret;
240
241	if (!cpumask_test_cpu(smp_processor_id(), &d->cpu_mask))
242		return -EINVAL;
243
244	ret = __rmid_read(rmid, eventid, &msr_val);
245	if (ret)
246		return ret;
247
248	am = get_arch_mbm_state(hw_dom, rmid, eventid);
249	if (am) {
250		am->chunks += mbm_overflow_count(am->prev_msr, msr_val,
251						 hw_res->mbm_width);
252		chunks = get_corrected_mbm_count(rmid, am->chunks);
253		am->prev_msr = msr_val;
254	} else {
255		chunks = msr_val;
256	}
257
258	*val = chunks * hw_res->mon_scale;
259
260	return 0;
261}
262
263/*
264 * Check the RMIDs that are marked as busy for this domain. If the
265 * reported LLC occupancy is below the threshold clear the busy bit and
266 * decrement the count. If the busy count gets to zero on an RMID, we
267 * free the RMID
268 */
269void __check_limbo(struct rdt_domain *d, bool force_free)
270{
271	struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
272	struct rmid_entry *entry;
 
273	u32 crmid = 1, nrmid;
274	bool rmid_dirty;
275	u64 val = 0;
276
277	/*
278	 * Skip RMID 0 and start from RMID 1 and check all the RMIDs that
279	 * are marked as busy for occupancy < threshold. If the occupancy
280	 * is less than the threshold decrement the busy counter of the
281	 * RMID and move it to the free list when the counter reaches 0.
282	 */
283	for (;;) {
284		nrmid = find_next_bit(d->rmid_busy_llc, r->num_rmid, crmid);
285		if (nrmid >= r->num_rmid)
286			break;
287
288		entry = __rmid_entry(nrmid);
289
290		if (resctrl_arch_rmid_read(r, d, entry->rmid,
291					   QOS_L3_OCCUP_EVENT_ID, &val)) {
292			rmid_dirty = true;
293		} else {
294			rmid_dirty = (val >= resctrl_rmid_realloc_threshold);
295		}
296
297		if (force_free || !rmid_dirty) {
298			clear_bit(entry->rmid, d->rmid_busy_llc);
299			if (!--entry->busy) {
300				rmid_limbo_count--;
301				list_add_tail(&entry->list, &rmid_free_lru);
302			}
303		}
304		crmid = nrmid + 1;
305	}
306}
307
308bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d)
309{
310	return find_first_bit(d->rmid_busy_llc, r->num_rmid) != r->num_rmid;
311}
312
313/*
314 * As of now the RMIDs allocation is global.
315 * However we keep track of which packages the RMIDs
316 * are used to optimize the limbo list management.
317 */
318int alloc_rmid(void)
319{
320	struct rmid_entry *entry;
321
322	lockdep_assert_held(&rdtgroup_mutex);
323
324	if (list_empty(&rmid_free_lru))
325		return rmid_limbo_count ? -EBUSY : -ENOSPC;
326
327	entry = list_first_entry(&rmid_free_lru,
328				 struct rmid_entry, list);
329	list_del(&entry->list);
330
331	return entry->rmid;
332}
333
334static void add_rmid_to_limbo(struct rmid_entry *entry)
335{
336	struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
337	struct rdt_domain *d;
338	int cpu, err;
339	u64 val = 0;
 
 
340
341	entry->busy = 0;
342	cpu = get_cpu();
343	list_for_each_entry(d, &r->domains, list) {
344		if (cpumask_test_cpu(cpu, &d->cpu_mask)) {
345			err = resctrl_arch_rmid_read(r, d, entry->rmid,
346						     QOS_L3_OCCUP_EVENT_ID,
347						     &val);
348			if (err || val <= resctrl_rmid_realloc_threshold)
349				continue;
350		}
351
352		/*
353		 * For the first limbo RMID in the domain,
354		 * setup up the limbo worker.
355		 */
356		if (!has_busy_rmid(r, d))
357			cqm_setup_limbo_handler(d, CQM_LIMBOCHECK_INTERVAL);
358		set_bit(entry->rmid, d->rmid_busy_llc);
359		entry->busy++;
360	}
361	put_cpu();
362
363	if (entry->busy)
364		rmid_limbo_count++;
365	else
366		list_add_tail(&entry->list, &rmid_free_lru);
367}
368
369void free_rmid(u32 rmid)
370{
371	struct rmid_entry *entry;
372
373	if (!rmid)
374		return;
375
376	lockdep_assert_held(&rdtgroup_mutex);
377
378	entry = __rmid_entry(rmid);
379
380	if (is_llc_occupancy_enabled())
381		add_rmid_to_limbo(entry);
382	else
383		list_add_tail(&entry->list, &rmid_free_lru);
384}
385
386static struct mbm_state *get_mbm_state(struct rdt_domain *d, u32 rmid,
387				       enum resctrl_event_id evtid)
388{
389	switch (evtid) {
390	case QOS_L3_MBM_TOTAL_EVENT_ID:
391		return &d->mbm_total[rmid];
392	case QOS_L3_MBM_LOCAL_EVENT_ID:
393		return &d->mbm_local[rmid];
394	default:
395		return NULL;
396	}
397}
398
399static int __mon_event_count(u32 rmid, struct rmid_read *rr)
400{
401	struct mbm_state *m;
402	u64 tval = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
403
404	if (rr->first) {
405		resctrl_arch_reset_rmid(rr->r, rr->d, rmid, rr->evtid);
406		m = get_mbm_state(rr->d, rmid, rr->evtid);
407		if (m)
408			memset(m, 0, sizeof(struct mbm_state));
409		return 0;
410	}
411
412	rr->err = resctrl_arch_rmid_read(rr->r, rr->d, rmid, rr->evtid, &tval);
413	if (rr->err)
414		return rr->err;
415
416	rr->val += tval;
417
 
418	return 0;
419}
420
421/*
422 * mbm_bw_count() - Update bw count from values previously read by
423 *		    __mon_event_count().
424 * @rmid:	The rmid used to identify the cached mbm_state.
425 * @rr:		The struct rmid_read populated by __mon_event_count().
426 *
427 * Supporting function to calculate the memory bandwidth
428 * and delta bandwidth in MBps. The chunks value previously read by
429 * __mon_event_count() is compared with the chunks value from the previous
430 * invocation. This must be called once per second to maintain values in MBps.
431 */
432static void mbm_bw_count(u32 rmid, struct rmid_read *rr)
433{
 
434	struct mbm_state *m = &rr->d->mbm_local[rmid];
435	u64 cur_bw, bytes, cur_bytes;
436
437	cur_bytes = rr->val;
438	bytes = cur_bytes - m->prev_bw_bytes;
439	m->prev_bw_bytes = cur_bytes;
440
441	cur_bw = bytes / SZ_1M;
 
 
 
442
443	if (m->delta_comp)
444		m->delta_bw = abs(cur_bw - m->prev_bw);
445	m->delta_comp = false;
446	m->prev_bw = cur_bw;
 
447}
448
449/*
450 * This is called via IPI to read the CQM/MBM counters
451 * on a domain.
452 */
453void mon_event_count(void *info)
454{
455	struct rdtgroup *rdtgrp, *entry;
456	struct rmid_read *rr = info;
457	struct list_head *head;
458	int ret;
459
460	rdtgrp = rr->rgrp;
461
462	ret = __mon_event_count(rdtgrp->mon.rmid, rr);
 
463
464	/*
465	 * For Ctrl groups read data from child monitor groups and
466	 * add them together. Count events which are read successfully.
467	 * Discard the rmid_read's reporting errors.
468	 */
469	head = &rdtgrp->mon.crdtgrp_list;
470
471	if (rdtgrp->type == RDTCTRL_GROUP) {
472		list_for_each_entry(entry, head, mon.crdtgrp_list) {
473			if (__mon_event_count(entry->mon.rmid, rr) == 0)
474				ret = 0;
475		}
476	}
477
478	/*
479	 * __mon_event_count() calls for newly created monitor groups may
480	 * report -EINVAL/Unavailable if the monitor hasn't seen any traffic.
481	 * Discard error if any of the monitor event reads succeeded.
482	 */
483	if (ret == 0)
484		rr->err = 0;
485}
486
487/*
488 * Feedback loop for MBA software controller (mba_sc)
489 *
490 * mba_sc is a feedback loop where we periodically read MBM counters and
491 * adjust the bandwidth percentage values via the IA32_MBA_THRTL_MSRs so
492 * that:
493 *
494 *   current bandwidth(cur_bw) < user specified bandwidth(user_bw)
495 *
496 * This uses the MBM counters to measure the bandwidth and MBA throttle
497 * MSRs to control the bandwidth for a particular rdtgrp. It builds on the
498 * fact that resctrl rdtgroups have both monitoring and control.
499 *
500 * The frequency of the checks is 1s and we just tag along the MBM overflow
501 * timer. Having 1s interval makes the calculation of bandwidth simpler.
502 *
503 * Although MBA's goal is to restrict the bandwidth to a maximum, there may
504 * be a need to increase the bandwidth to avoid unnecessarily restricting
505 * the L2 <-> L3 traffic.
506 *
507 * Since MBA controls the L2 external bandwidth where as MBM measures the
508 * L3 external bandwidth the following sequence could lead to such a
509 * situation.
510 *
511 * Consider an rdtgroup which had high L3 <-> memory traffic in initial
512 * phases -> mba_sc kicks in and reduced bandwidth percentage values -> but
513 * after some time rdtgroup has mostly L2 <-> L3 traffic.
514 *
515 * In this case we may restrict the rdtgroup's L2 <-> L3 traffic as its
516 * throttle MSRs already have low percentage values.  To avoid
517 * unnecessarily restricting such rdtgroups, we also increase the bandwidth.
518 */
519static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
520{
521	u32 closid, rmid, cur_msr_val, new_msr_val;
522	struct mbm_state *pmbm_data, *cmbm_data;
523	u32 cur_bw, delta_bw, user_bw;
524	struct rdt_resource *r_mba;
525	struct rdt_domain *dom_mba;
526	struct list_head *head;
527	struct rdtgroup *entry;
528
529	if (!is_mbm_local_enabled())
530		return;
531
532	r_mba = &rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl;
533
534	closid = rgrp->closid;
535	rmid = rgrp->mon.rmid;
536	pmbm_data = &dom_mbm->mbm_local[rmid];
537
538	dom_mba = get_domain_from_cpu(smp_processor_id(), r_mba);
539	if (!dom_mba) {
540		pr_warn_once("Failure to get domain for MBA update\n");
541		return;
542	}
543
544	cur_bw = pmbm_data->prev_bw;
545	user_bw = dom_mba->mbps_val[closid];
546	delta_bw = pmbm_data->delta_bw;
547
548	/* MBA resource doesn't support CDP */
549	cur_msr_val = resctrl_arch_get_config(r_mba, dom_mba, closid, CDP_NONE);
550
551	/*
552	 * For Ctrl groups read data from child monitor groups.
553	 */
554	head = &rgrp->mon.crdtgrp_list;
555	list_for_each_entry(entry, head, mon.crdtgrp_list) {
556		cmbm_data = &dom_mbm->mbm_local[entry->mon.rmid];
557		cur_bw += cmbm_data->prev_bw;
558		delta_bw += cmbm_data->delta_bw;
559	}
560
561	/*
562	 * Scale up/down the bandwidth linearly for the ctrl group.  The
563	 * bandwidth step is the bandwidth granularity specified by the
564	 * hardware.
565	 *
566	 * The delta_bw is used when increasing the bandwidth so that we
567	 * dont alternately increase and decrease the control values
568	 * continuously.
569	 *
570	 * For ex: consider cur_bw = 90MBps, user_bw = 100MBps and if
571	 * bandwidth step is 20MBps(> user_bw - cur_bw), we would keep
572	 * switching between 90 and 110 continuously if we only check
573	 * cur_bw < user_bw.
574	 */
575	if (cur_msr_val > r_mba->membw.min_bw && user_bw < cur_bw) {
576		new_msr_val = cur_msr_val - r_mba->membw.bw_gran;
577	} else if (cur_msr_val < MAX_MBA_BW &&
578		   (user_bw > (cur_bw + delta_bw))) {
579		new_msr_val = cur_msr_val + r_mba->membw.bw_gran;
580	} else {
581		return;
582	}
583
584	resctrl_arch_update_one(r_mba, dom_mba, closid, CDP_NONE, new_msr_val);
 
 
585
586	/*
587	 * Delta values are updated dynamically package wise for each
588	 * rdtgrp every time the throttle MSR changes value.
589	 *
590	 * This is because (1)the increase in bandwidth is not perfectly
591	 * linear and only "approximately" linear even when the hardware
592	 * says it is linear.(2)Also since MBA is a core specific
593	 * mechanism, the delta values vary based on number of cores used
594	 * by the rdtgrp.
595	 */
596	pmbm_data->delta_comp = true;
597	list_for_each_entry(entry, head, mon.crdtgrp_list) {
598		cmbm_data = &dom_mbm->mbm_local[entry->mon.rmid];
599		cmbm_data->delta_comp = true;
600	}
601}
602
603static void mbm_update(struct rdt_resource *r, struct rdt_domain *d, int rmid)
604{
605	struct rmid_read rr;
606
607	rr.first = false;
608	rr.r = r;
609	rr.d = d;
610
611	/*
612	 * This is protected from concurrent reads from user
613	 * as both the user and we hold the global mutex.
614	 */
615	if (is_mbm_total_enabled()) {
616		rr.evtid = QOS_L3_MBM_TOTAL_EVENT_ID;
617		rr.val = 0;
618		__mon_event_count(rmid, &rr);
619	}
620	if (is_mbm_local_enabled()) {
621		rr.evtid = QOS_L3_MBM_LOCAL_EVENT_ID;
622		rr.val = 0;
623		__mon_event_count(rmid, &rr);
624
625		/*
626		 * Call the MBA software controller only for the
627		 * control groups and when user has enabled
628		 * the software controller explicitly.
629		 */
630		if (is_mba_sc(NULL))
 
 
631			mbm_bw_count(rmid, &rr);
632	}
633}
634
635/*
636 * Handler to scan the limbo list and move the RMIDs
637 * to free list whose occupancy < threshold_occupancy.
638 */
639void cqm_handle_limbo(struct work_struct *work)
640{
641	unsigned long delay = msecs_to_jiffies(CQM_LIMBOCHECK_INTERVAL);
642	int cpu = smp_processor_id();
643	struct rdt_resource *r;
644	struct rdt_domain *d;
645
646	mutex_lock(&rdtgroup_mutex);
647
648	r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
649	d = container_of(work, struct rdt_domain, cqm_limbo.work);
 
 
 
 
 
650
651	__check_limbo(d, false);
652
653	if (has_busy_rmid(r, d))
654		schedule_delayed_work_on(cpu, &d->cqm_limbo, delay);
655
 
656	mutex_unlock(&rdtgroup_mutex);
657}
658
659void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms)
660{
661	unsigned long delay = msecs_to_jiffies(delay_ms);
662	int cpu;
663
664	cpu = cpumask_any(&dom->cpu_mask);
665	dom->cqm_work_cpu = cpu;
666
667	schedule_delayed_work_on(cpu, &dom->cqm_limbo, delay);
668}
669
670void mbm_handle_overflow(struct work_struct *work)
671{
672	unsigned long delay = msecs_to_jiffies(MBM_OVERFLOW_INTERVAL);
673	struct rdtgroup *prgrp, *crgrp;
674	int cpu = smp_processor_id();
675	struct list_head *head;
676	struct rdt_resource *r;
677	struct rdt_domain *d;
678
679	mutex_lock(&rdtgroup_mutex);
680
681	if (!static_branch_likely(&rdt_mon_enable_key))
682		goto out_unlock;
683
684	r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
685	d = container_of(work, struct rdt_domain, mbm_over.work);
 
686
687	list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
688		mbm_update(r, d, prgrp->mon.rmid);
689
690		head = &prgrp->mon.crdtgrp_list;
691		list_for_each_entry(crgrp, head, mon.crdtgrp_list)
692			mbm_update(r, d, crgrp->mon.rmid);
693
694		if (is_mba_sc(NULL))
695			update_mba_bw(prgrp, d);
696	}
697
698	schedule_delayed_work_on(cpu, &d->mbm_over, delay);
699
700out_unlock:
701	mutex_unlock(&rdtgroup_mutex);
702}
703
704void mbm_setup_overflow_handler(struct rdt_domain *dom, unsigned long delay_ms)
705{
706	unsigned long delay = msecs_to_jiffies(delay_ms);
707	int cpu;
708
709	if (!static_branch_likely(&rdt_mon_enable_key))
710		return;
711	cpu = cpumask_any(&dom->cpu_mask);
712	dom->mbm_work_cpu = cpu;
713	schedule_delayed_work_on(cpu, &dom->mbm_over, delay);
714}
715
716static int dom_data_init(struct rdt_resource *r)
717{
718	struct rmid_entry *entry = NULL;
719	int i, nr_rmids;
720
721	nr_rmids = r->num_rmid;
722	rmid_ptrs = kcalloc(nr_rmids, sizeof(struct rmid_entry), GFP_KERNEL);
723	if (!rmid_ptrs)
724		return -ENOMEM;
725
726	for (i = 0; i < nr_rmids; i++) {
727		entry = &rmid_ptrs[i];
728		INIT_LIST_HEAD(&entry->list);
729
730		entry->rmid = i;
731		list_add_tail(&entry->list, &rmid_free_lru);
732	}
733
734	/*
735	 * RMID 0 is special and is always allocated. It's used for all
736	 * tasks that are not monitored.
737	 */
738	entry = __rmid_entry(0);
739	list_del(&entry->list);
740
741	return 0;
742}
743
744static struct mon_evt llc_occupancy_event = {
745	.name		= "llc_occupancy",
746	.evtid		= QOS_L3_OCCUP_EVENT_ID,
747};
748
749static struct mon_evt mbm_total_event = {
750	.name		= "mbm_total_bytes",
751	.evtid		= QOS_L3_MBM_TOTAL_EVENT_ID,
752};
753
754static struct mon_evt mbm_local_event = {
755	.name		= "mbm_local_bytes",
756	.evtid		= QOS_L3_MBM_LOCAL_EVENT_ID,
757};
758
759/*
760 * Initialize the event list for the resource.
761 *
762 * Note that MBM events are also part of RDT_RESOURCE_L3 resource
763 * because as per the SDM the total and local memory bandwidth
764 * are enumerated as part of L3 monitoring.
765 */
766static void l3_mon_evt_init(struct rdt_resource *r)
767{
768	INIT_LIST_HEAD(&r->evt_list);
769
770	if (is_llc_occupancy_enabled())
771		list_add_tail(&llc_occupancy_event.list, &r->evt_list);
772	if (is_mbm_total_enabled())
773		list_add_tail(&mbm_total_event.list, &r->evt_list);
774	if (is_mbm_local_enabled())
775		list_add_tail(&mbm_local_event.list, &r->evt_list);
776}
777
778int __init rdt_get_mon_l3_config(struct rdt_resource *r)
779{
780	unsigned int mbm_offset = boot_cpu_data.x86_cache_mbm_width_offset;
781	struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
782	unsigned int threshold;
783	int ret;
784
785	resctrl_rmid_realloc_limit = boot_cpu_data.x86_cache_size * 1024;
786	hw_res->mon_scale = boot_cpu_data.x86_cache_occ_scale;
787	r->num_rmid = boot_cpu_data.x86_cache_max_rmid + 1;
788	hw_res->mbm_width = MBM_CNTR_WIDTH_BASE;
789
790	if (mbm_offset > 0 && mbm_offset <= MBM_CNTR_WIDTH_OFFSET_MAX)
791		hw_res->mbm_width += mbm_offset;
792	else if (mbm_offset > MBM_CNTR_WIDTH_OFFSET_MAX)
793		pr_warn("Ignoring impossible MBM counter offset\n");
794
795	/*
796	 * A reasonable upper limit on the max threshold is the number
797	 * of lines tagged per RMID if all RMIDs have the same number of
798	 * lines tagged in the LLC.
799	 *
800	 * For a 35MB LLC and 56 RMIDs, this is ~1.8% of the LLC.
801	 */
802	threshold = resctrl_rmid_realloc_limit / r->num_rmid;
803
804	/*
805	 * Because num_rmid may not be a power of two, round the value
806	 * to the nearest multiple of hw_res->mon_scale so it matches a
807	 * value the hardware will measure. mon_scale may not be a power of 2.
808	 */
809	resctrl_rmid_realloc_threshold = resctrl_arch_round_mon_val(threshold);
810
811	ret = dom_data_init(r);
812	if (ret)
813		return ret;
814
815	if (rdt_cpu_has(X86_FEATURE_BMEC)) {
816		if (rdt_cpu_has(X86_FEATURE_CQM_MBM_TOTAL)) {
817			mbm_total_event.configurable = true;
818			mbm_config_rftype_init("mbm_total_bytes_config");
819		}
820		if (rdt_cpu_has(X86_FEATURE_CQM_MBM_LOCAL)) {
821			mbm_local_event.configurable = true;
822			mbm_config_rftype_init("mbm_local_bytes_config");
823		}
824	}
825
826	l3_mon_evt_init(r);
827
828	r->mon_capable = true;
 
829
830	return 0;
831}
832
833void __init intel_rdt_mbm_apply_quirk(void)
834{
835	int cf_index;
836
837	cf_index = (boot_cpu_data.x86_cache_max_rmid + 1) / 8 - 1;
838	if (cf_index >= ARRAY_SIZE(mbm_cf_table)) {
839		pr_info("No MBM correction factor available\n");
840		return;
841	}
842
843	mbm_cf_rmidthreshold = mbm_cf_table[cf_index].rmidthreshold;
844	mbm_cf = mbm_cf_table[cf_index].cf;
845}