Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Resource Director Technology(RDT)
  4 * - Monitoring code
  5 *
  6 * Copyright (C) 2017 Intel Corporation
  7 *
  8 * Author:
  9 *    Vikas Shivappa <vikas.shivappa@intel.com>
 10 *
 11 * This replaces the cqm.c based on perf but we reuse a lot of
 12 * code and datastructures originally from Peter Zijlstra and Matt Fleming.
 13 *
 14 * More information about RDT be found in the Intel (R) x86 Architecture
 15 * Software Developer Manual June 2016, volume 3, section 17.17.
 16 */
 17
 18#include <linux/module.h>
 19#include <linux/slab.h>
 20#include <asm/cpu_device_id.h>
 21#include "internal.h"
 22
 23struct rmid_entry {
 24	u32				rmid;
 25	int				busy;
 26	struct list_head		list;
 27};
 28
 29/**
 30 * @rmid_free_lru    A least recently used list of free RMIDs
 31 *     These RMIDs are guaranteed to have an occupancy less than the
 32 *     threshold occupancy
 33 */
 34static LIST_HEAD(rmid_free_lru);
 35
 36/**
 37 * @rmid_limbo_count     count of currently unused but (potentially)
 38 *     dirty RMIDs.
 39 *     This counts RMIDs that no one is currently using but that
 40 *     may have a occupancy value > intel_cqm_threshold. User can change
 41 *     the threshold occupancy value.
 42 */
 43static unsigned int rmid_limbo_count;
 44
 45/**
 46 * @rmid_entry - The entry in the limbo and free lists.
 47 */
 48static struct rmid_entry	*rmid_ptrs;
 49
 50/*
 51 * Global boolean for rdt_monitor which is true if any
 52 * resource monitoring is enabled.
 53 */
 54bool rdt_mon_capable;
 55
 56/*
 57 * Global to indicate which monitoring events are enabled.
 58 */
 59unsigned int rdt_mon_features;
 60
 61/*
 62 * This is the threshold cache occupancy at which we will consider an
 63 * RMID available for re-allocation.
 64 */
 65unsigned int resctrl_cqm_threshold;
 66
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 67static inline struct rmid_entry *__rmid_entry(u32 rmid)
 68{
 69	struct rmid_entry *entry;
 70
 71	entry = &rmid_ptrs[rmid];
 72	WARN_ON(entry->rmid != rmid);
 73
 74	return entry;
 75}
 76
 77static u64 __rmid_read(u32 rmid, u32 eventid)
 78{
 79	u64 val;
 80
 81	/*
 82	 * As per the SDM, when IA32_QM_EVTSEL.EvtID (bits 7:0) is configured
 83	 * with a valid event code for supported resource type and the bits
 84	 * IA32_QM_EVTSEL.RMID (bits 41:32) are configured with valid RMID,
 85	 * IA32_QM_CTR.data (bits 61:0) reports the monitored data.
 86	 * IA32_QM_CTR.Error (bit 63) and IA32_QM_CTR.Unavailable (bit 62)
 87	 * are error bits.
 88	 */
 89	wrmsr(MSR_IA32_QM_EVTSEL, eventid, rmid);
 90	rdmsrl(MSR_IA32_QM_CTR, val);
 91
 92	return val;
 93}
 94
 95static bool rmid_dirty(struct rmid_entry *entry)
 96{
 97	u64 val = __rmid_read(entry->rmid, QOS_L3_OCCUP_EVENT_ID);
 98
 99	return val >= resctrl_cqm_threshold;
100}
101
102/*
103 * Check the RMIDs that are marked as busy for this domain. If the
104 * reported LLC occupancy is below the threshold clear the busy bit and
105 * decrement the count. If the busy count gets to zero on an RMID, we
106 * free the RMID
107 */
108void __check_limbo(struct rdt_domain *d, bool force_free)
109{
110	struct rmid_entry *entry;
111	struct rdt_resource *r;
112	u32 crmid = 1, nrmid;
113
114	r = &rdt_resources_all[RDT_RESOURCE_L3];
115
116	/*
117	 * Skip RMID 0 and start from RMID 1 and check all the RMIDs that
118	 * are marked as busy for occupancy < threshold. If the occupancy
119	 * is less than the threshold decrement the busy counter of the
120	 * RMID and move it to the free list when the counter reaches 0.
121	 */
122	for (;;) {
123		nrmid = find_next_bit(d->rmid_busy_llc, r->num_rmid, crmid);
124		if (nrmid >= r->num_rmid)
125			break;
126
127		entry = __rmid_entry(nrmid);
128		if (force_free || !rmid_dirty(entry)) {
129			clear_bit(entry->rmid, d->rmid_busy_llc);
130			if (!--entry->busy) {
131				rmid_limbo_count--;
132				list_add_tail(&entry->list, &rmid_free_lru);
133			}
134		}
135		crmid = nrmid + 1;
136	}
137}
138
139bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d)
140{
141	return find_first_bit(d->rmid_busy_llc, r->num_rmid) != r->num_rmid;
142}
143
144/*
145 * As of now the RMIDs allocation is global.
146 * However we keep track of which packages the RMIDs
147 * are used to optimize the limbo list management.
148 */
149int alloc_rmid(void)
150{
151	struct rmid_entry *entry;
152
153	lockdep_assert_held(&rdtgroup_mutex);
154
155	if (list_empty(&rmid_free_lru))
156		return rmid_limbo_count ? -EBUSY : -ENOSPC;
157
158	entry = list_first_entry(&rmid_free_lru,
159				 struct rmid_entry, list);
160	list_del(&entry->list);
161
162	return entry->rmid;
163}
164
165static void add_rmid_to_limbo(struct rmid_entry *entry)
166{
167	struct rdt_resource *r;
168	struct rdt_domain *d;
169	int cpu;
170	u64 val;
171
172	r = &rdt_resources_all[RDT_RESOURCE_L3];
173
174	entry->busy = 0;
175	cpu = get_cpu();
176	list_for_each_entry(d, &r->domains, list) {
177		if (cpumask_test_cpu(cpu, &d->cpu_mask)) {
178			val = __rmid_read(entry->rmid, QOS_L3_OCCUP_EVENT_ID);
179			if (val <= resctrl_cqm_threshold)
180				continue;
181		}
182
183		/*
184		 * For the first limbo RMID in the domain,
185		 * setup up the limbo worker.
186		 */
187		if (!has_busy_rmid(r, d))
188			cqm_setup_limbo_handler(d, CQM_LIMBOCHECK_INTERVAL);
189		set_bit(entry->rmid, d->rmid_busy_llc);
190		entry->busy++;
191	}
192	put_cpu();
193
194	if (entry->busy)
195		rmid_limbo_count++;
196	else
197		list_add_tail(&entry->list, &rmid_free_lru);
198}
199
200void free_rmid(u32 rmid)
201{
202	struct rmid_entry *entry;
203
204	if (!rmid)
205		return;
206
207	lockdep_assert_held(&rdtgroup_mutex);
208
209	entry = __rmid_entry(rmid);
210
211	if (is_llc_occupancy_enabled())
212		add_rmid_to_limbo(entry);
213	else
214		list_add_tail(&entry->list, &rmid_free_lru);
215}
216
217static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr)
218{
219	u64 shift = 64 - MBM_CNTR_WIDTH, chunks;
220
221	chunks = (cur_msr << shift) - (prev_msr << shift);
222	return chunks >>= shift;
223}
224
225static int __mon_event_count(u32 rmid, struct rmid_read *rr)
226{
227	struct mbm_state *m;
228	u64 chunks, tval;
229
230	tval = __rmid_read(rmid, rr->evtid);
231	if (tval & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL)) {
232		rr->val = tval;
233		return -EINVAL;
234	}
235	switch (rr->evtid) {
236	case QOS_L3_OCCUP_EVENT_ID:
237		rr->val += tval;
238		return 0;
239	case QOS_L3_MBM_TOTAL_EVENT_ID:
240		m = &rr->d->mbm_total[rmid];
241		break;
242	case QOS_L3_MBM_LOCAL_EVENT_ID:
243		m = &rr->d->mbm_local[rmid];
244		break;
245	default:
246		/*
247		 * Code would never reach here because
248		 * an invalid event id would fail the __rmid_read.
249		 */
250		return -EINVAL;
251	}
252
253	if (rr->first) {
254		memset(m, 0, sizeof(struct mbm_state));
255		m->prev_bw_msr = m->prev_msr = tval;
256		return 0;
257	}
258
259	chunks = mbm_overflow_count(m->prev_msr, tval);
260	m->chunks += chunks;
261	m->prev_msr = tval;
262
263	rr->val += m->chunks;
 
264	return 0;
265}
266
267/*
268 * Supporting function to calculate the memory bandwidth
269 * and delta bandwidth in MBps.
270 */
271static void mbm_bw_count(u32 rmid, struct rmid_read *rr)
272{
273	struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3];
274	struct mbm_state *m = &rr->d->mbm_local[rmid];
275	u64 tval, cur_bw, chunks;
276
277	tval = __rmid_read(rmid, rr->evtid);
278	if (tval & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL))
279		return;
280
281	chunks = mbm_overflow_count(m->prev_bw_msr, tval);
282	m->chunks_bw += chunks;
283	m->chunks = m->chunks_bw;
284	cur_bw = (chunks * r->mon_scale) >> 20;
285
286	if (m->delta_comp)
287		m->delta_bw = abs(cur_bw - m->prev_bw);
288	m->delta_comp = false;
289	m->prev_bw = cur_bw;
290	m->prev_bw_msr = tval;
291}
292
293/*
294 * This is called via IPI to read the CQM/MBM counters
295 * on a domain.
296 */
297void mon_event_count(void *info)
298{
299	struct rdtgroup *rdtgrp, *entry;
300	struct rmid_read *rr = info;
301	struct list_head *head;
 
302
303	rdtgrp = rr->rgrp;
304
305	if (__mon_event_count(rdtgrp->mon.rmid, rr))
306		return;
307
308	/*
309	 * For Ctrl groups read data from child monitor groups.
 
 
310	 */
311	head = &rdtgrp->mon.crdtgrp_list;
312
313	if (rdtgrp->type == RDTCTRL_GROUP) {
314		list_for_each_entry(entry, head, mon.crdtgrp_list) {
315			if (__mon_event_count(entry->mon.rmid, rr))
316				return;
317		}
318	}
 
 
 
 
319}
320
321/*
322 * Feedback loop for MBA software controller (mba_sc)
323 *
324 * mba_sc is a feedback loop where we periodically read MBM counters and
325 * adjust the bandwidth percentage values via the IA32_MBA_THRTL_MSRs so
326 * that:
327 *
328 *   current bandwdith(cur_bw) < user specified bandwidth(user_bw)
329 *
330 * This uses the MBM counters to measure the bandwidth and MBA throttle
331 * MSRs to control the bandwidth for a particular rdtgrp. It builds on the
332 * fact that resctrl rdtgroups have both monitoring and control.
333 *
334 * The frequency of the checks is 1s and we just tag along the MBM overflow
335 * timer. Having 1s interval makes the calculation of bandwidth simpler.
336 *
337 * Although MBA's goal is to restrict the bandwidth to a maximum, there may
338 * be a need to increase the bandwidth to avoid uncecessarily restricting
339 * the L2 <-> L3 traffic.
340 *
341 * Since MBA controls the L2 external bandwidth where as MBM measures the
342 * L3 external bandwidth the following sequence could lead to such a
343 * situation.
344 *
345 * Consider an rdtgroup which had high L3 <-> memory traffic in initial
346 * phases -> mba_sc kicks in and reduced bandwidth percentage values -> but
347 * after some time rdtgroup has mostly L2 <-> L3 traffic.
348 *
349 * In this case we may restrict the rdtgroup's L2 <-> L3 traffic as its
350 * throttle MSRs already have low percentage values.  To avoid
351 * unnecessarily restricting such rdtgroups, we also increase the bandwidth.
352 */
353static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
354{
355	u32 closid, rmid, cur_msr, cur_msr_val, new_msr_val;
356	struct mbm_state *pmbm_data, *cmbm_data;
357	u32 cur_bw, delta_bw, user_bw;
358	struct rdt_resource *r_mba;
359	struct rdt_domain *dom_mba;
360	struct list_head *head;
361	struct rdtgroup *entry;
362
363	if (!is_mbm_local_enabled())
364		return;
365
366	r_mba = &rdt_resources_all[RDT_RESOURCE_MBA];
367	closid = rgrp->closid;
368	rmid = rgrp->mon.rmid;
369	pmbm_data = &dom_mbm->mbm_local[rmid];
370
371	dom_mba = get_domain_from_cpu(smp_processor_id(), r_mba);
372	if (!dom_mba) {
373		pr_warn_once("Failure to get domain for MBA update\n");
374		return;
375	}
376
377	cur_bw = pmbm_data->prev_bw;
378	user_bw = dom_mba->mbps_val[closid];
379	delta_bw = pmbm_data->delta_bw;
380	cur_msr_val = dom_mba->ctrl_val[closid];
381
382	/*
383	 * For Ctrl groups read data from child monitor groups.
384	 */
385	head = &rgrp->mon.crdtgrp_list;
386	list_for_each_entry(entry, head, mon.crdtgrp_list) {
387		cmbm_data = &dom_mbm->mbm_local[entry->mon.rmid];
388		cur_bw += cmbm_data->prev_bw;
389		delta_bw += cmbm_data->delta_bw;
390	}
391
392	/*
393	 * Scale up/down the bandwidth linearly for the ctrl group.  The
394	 * bandwidth step is the bandwidth granularity specified by the
395	 * hardware.
396	 *
397	 * The delta_bw is used when increasing the bandwidth so that we
398	 * dont alternately increase and decrease the control values
399	 * continuously.
400	 *
401	 * For ex: consider cur_bw = 90MBps, user_bw = 100MBps and if
402	 * bandwidth step is 20MBps(> user_bw - cur_bw), we would keep
403	 * switching between 90 and 110 continuously if we only check
404	 * cur_bw < user_bw.
405	 */
406	if (cur_msr_val > r_mba->membw.min_bw && user_bw < cur_bw) {
407		new_msr_val = cur_msr_val - r_mba->membw.bw_gran;
408	} else if (cur_msr_val < MAX_MBA_BW &&
409		   (user_bw > (cur_bw + delta_bw))) {
410		new_msr_val = cur_msr_val + r_mba->membw.bw_gran;
411	} else {
412		return;
413	}
414
415	cur_msr = r_mba->msr_base + closid;
416	wrmsrl(cur_msr, delay_bw_map(new_msr_val, r_mba));
417	dom_mba->ctrl_val[closid] = new_msr_val;
418
419	/*
420	 * Delta values are updated dynamically package wise for each
421	 * rdtgrp everytime the throttle MSR changes value.
422	 *
423	 * This is because (1)the increase in bandwidth is not perfectly
424	 * linear and only "approximately" linear even when the hardware
425	 * says it is linear.(2)Also since MBA is a core specific
426	 * mechanism, the delta values vary based on number of cores used
427	 * by the rdtgrp.
428	 */
429	pmbm_data->delta_comp = true;
430	list_for_each_entry(entry, head, mon.crdtgrp_list) {
431		cmbm_data = &dom_mbm->mbm_local[entry->mon.rmid];
432		cmbm_data->delta_comp = true;
433	}
434}
435
436static void mbm_update(struct rdt_domain *d, int rmid)
437{
438	struct rmid_read rr;
439
440	rr.first = false;
 
441	rr.d = d;
442
443	/*
444	 * This is protected from concurrent reads from user
445	 * as both the user and we hold the global mutex.
446	 */
447	if (is_mbm_total_enabled()) {
448		rr.evtid = QOS_L3_MBM_TOTAL_EVENT_ID;
449		__mon_event_count(rmid, &rr);
450	}
451	if (is_mbm_local_enabled()) {
452		rr.evtid = QOS_L3_MBM_LOCAL_EVENT_ID;
 
453
454		/*
455		 * Call the MBA software controller only for the
456		 * control groups and when user has enabled
457		 * the software controller explicitly.
458		 */
459		if (!is_mba_sc(NULL))
460			__mon_event_count(rmid, &rr);
461		else
462			mbm_bw_count(rmid, &rr);
463	}
464}
465
466/*
467 * Handler to scan the limbo list and move the RMIDs
468 * to free list whose occupancy < threshold_occupancy.
469 */
470void cqm_handle_limbo(struct work_struct *work)
471{
472	unsigned long delay = msecs_to_jiffies(CQM_LIMBOCHECK_INTERVAL);
473	int cpu = smp_processor_id();
474	struct rdt_resource *r;
475	struct rdt_domain *d;
476
477	mutex_lock(&rdtgroup_mutex);
478
479	r = &rdt_resources_all[RDT_RESOURCE_L3];
480	d = get_domain_from_cpu(cpu, r);
481
482	if (!d) {
483		pr_warn_once("Failure to get domain for limbo worker\n");
484		goto out_unlock;
485	}
486
487	__check_limbo(d, false);
488
489	if (has_busy_rmid(r, d))
490		schedule_delayed_work_on(cpu, &d->cqm_limbo, delay);
491
492out_unlock:
493	mutex_unlock(&rdtgroup_mutex);
494}
495
496void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms)
497{
498	unsigned long delay = msecs_to_jiffies(delay_ms);
499	int cpu;
500
501	cpu = cpumask_any(&dom->cpu_mask);
502	dom->cqm_work_cpu = cpu;
503
504	schedule_delayed_work_on(cpu, &dom->cqm_limbo, delay);
505}
506
507void mbm_handle_overflow(struct work_struct *work)
508{
509	unsigned long delay = msecs_to_jiffies(MBM_OVERFLOW_INTERVAL);
510	struct rdtgroup *prgrp, *crgrp;
511	int cpu = smp_processor_id();
512	struct list_head *head;
 
513	struct rdt_domain *d;
514
515	mutex_lock(&rdtgroup_mutex);
516
517	if (!static_branch_likely(&rdt_enable_key))
518		goto out_unlock;
519
520	d = get_domain_from_cpu(cpu, &rdt_resources_all[RDT_RESOURCE_L3]);
521	if (!d)
522		goto out_unlock;
523
524	list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
525		mbm_update(d, prgrp->mon.rmid);
526
527		head = &prgrp->mon.crdtgrp_list;
528		list_for_each_entry(crgrp, head, mon.crdtgrp_list)
529			mbm_update(d, crgrp->mon.rmid);
530
531		if (is_mba_sc(NULL))
532			update_mba_bw(prgrp, d);
533	}
534
535	schedule_delayed_work_on(cpu, &d->mbm_over, delay);
536
537out_unlock:
538	mutex_unlock(&rdtgroup_mutex);
539}
540
541void mbm_setup_overflow_handler(struct rdt_domain *dom, unsigned long delay_ms)
542{
543	unsigned long delay = msecs_to_jiffies(delay_ms);
544	int cpu;
545
546	if (!static_branch_likely(&rdt_enable_key))
547		return;
548	cpu = cpumask_any(&dom->cpu_mask);
549	dom->mbm_work_cpu = cpu;
550	schedule_delayed_work_on(cpu, &dom->mbm_over, delay);
551}
552
553static int dom_data_init(struct rdt_resource *r)
554{
555	struct rmid_entry *entry = NULL;
556	int i, nr_rmids;
557
558	nr_rmids = r->num_rmid;
559	rmid_ptrs = kcalloc(nr_rmids, sizeof(struct rmid_entry), GFP_KERNEL);
560	if (!rmid_ptrs)
561		return -ENOMEM;
562
563	for (i = 0; i < nr_rmids; i++) {
564		entry = &rmid_ptrs[i];
565		INIT_LIST_HEAD(&entry->list);
566
567		entry->rmid = i;
568		list_add_tail(&entry->list, &rmid_free_lru);
569	}
570
571	/*
572	 * RMID 0 is special and is always allocated. It's used for all
573	 * tasks that are not monitored.
574	 */
575	entry = __rmid_entry(0);
576	list_del(&entry->list);
577
578	return 0;
579}
580
581static struct mon_evt llc_occupancy_event = {
582	.name		= "llc_occupancy",
583	.evtid		= QOS_L3_OCCUP_EVENT_ID,
584};
585
586static struct mon_evt mbm_total_event = {
587	.name		= "mbm_total_bytes",
588	.evtid		= QOS_L3_MBM_TOTAL_EVENT_ID,
589};
590
591static struct mon_evt mbm_local_event = {
592	.name		= "mbm_local_bytes",
593	.evtid		= QOS_L3_MBM_LOCAL_EVENT_ID,
594};
595
596/*
597 * Initialize the event list for the resource.
598 *
599 * Note that MBM events are also part of RDT_RESOURCE_L3 resource
600 * because as per the SDM the total and local memory bandwidth
601 * are enumerated as part of L3 monitoring.
602 */
603static void l3_mon_evt_init(struct rdt_resource *r)
604{
605	INIT_LIST_HEAD(&r->evt_list);
606
607	if (is_llc_occupancy_enabled())
608		list_add_tail(&llc_occupancy_event.list, &r->evt_list);
609	if (is_mbm_total_enabled())
610		list_add_tail(&mbm_total_event.list, &r->evt_list);
611	if (is_mbm_local_enabled())
612		list_add_tail(&mbm_local_event.list, &r->evt_list);
613}
614
615int rdt_get_mon_l3_config(struct rdt_resource *r)
616{
 
617	unsigned int cl_size = boot_cpu_data.x86_cache_size;
618	int ret;
619
620	r->mon_scale = boot_cpu_data.x86_cache_occ_scale;
621	r->num_rmid = boot_cpu_data.x86_cache_max_rmid + 1;
 
 
 
 
 
 
622
623	/*
624	 * A reasonable upper limit on the max threshold is the number
625	 * of lines tagged per RMID if all RMIDs have the same number of
626	 * lines tagged in the LLC.
627	 *
628	 * For a 35MB LLC and 56 RMIDs, this is ~1.8% of the LLC.
629	 */
630	resctrl_cqm_threshold = cl_size * 1024 / r->num_rmid;
631
632	/* h/w works in units of "boot_cpu_data.x86_cache_occ_scale" */
633	resctrl_cqm_threshold /= r->mon_scale;
634
635	ret = dom_data_init(r);
636	if (ret)
637		return ret;
638
639	l3_mon_evt_init(r);
640
641	r->mon_capable = true;
642	r->mon_enabled = true;
643
644	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
645}
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Resource Director Technology(RDT)
  4 * - Monitoring code
  5 *
  6 * Copyright (C) 2017 Intel Corporation
  7 *
  8 * Author:
  9 *    Vikas Shivappa <vikas.shivappa@intel.com>
 10 *
 11 * This replaces the cqm.c based on perf but we reuse a lot of
 12 * code and datastructures originally from Peter Zijlstra and Matt Fleming.
 13 *
 14 * More information about RDT be found in the Intel (R) x86 Architecture
 15 * Software Developer Manual June 2016, volume 3, section 17.17.
 16 */
 17
 18#include <linux/module.h>
 19#include <linux/slab.h>
 20#include <asm/cpu_device_id.h>
 21#include "internal.h"
 22
 23struct rmid_entry {
 24	u32				rmid;
 25	int				busy;
 26	struct list_head		list;
 27};
 28
 29/**
 30 * @rmid_free_lru    A least recently used list of free RMIDs
 31 *     These RMIDs are guaranteed to have an occupancy less than the
 32 *     threshold occupancy
 33 */
 34static LIST_HEAD(rmid_free_lru);
 35
 36/**
 37 * @rmid_limbo_count     count of currently unused but (potentially)
 38 *     dirty RMIDs.
 39 *     This counts RMIDs that no one is currently using but that
 40 *     may have a occupancy value > intel_cqm_threshold. User can change
 41 *     the threshold occupancy value.
 42 */
 43static unsigned int rmid_limbo_count;
 44
 45/**
 46 * @rmid_entry - The entry in the limbo and free lists.
 47 */
 48static struct rmid_entry	*rmid_ptrs;
 49
 50/*
 51 * Global boolean for rdt_monitor which is true if any
 52 * resource monitoring is enabled.
 53 */
 54bool rdt_mon_capable;
 55
 56/*
 57 * Global to indicate which monitoring events are enabled.
 58 */
 59unsigned int rdt_mon_features;
 60
 61/*
 62 * This is the threshold cache occupancy at which we will consider an
 63 * RMID available for re-allocation.
 64 */
 65unsigned int resctrl_cqm_threshold;
 66
 67#define CF(cf)	((unsigned long)(1048576 * (cf) + 0.5))
 68
 69/*
 70 * The correction factor table is documented in Documentation/x86/resctrl.rst.
 71 * If rmid > rmid threshold, MBM total and local values should be multiplied
 72 * by the correction factor.
 73 *
 74 * The original table is modified for better code:
 75 *
 76 * 1. The threshold 0 is changed to rmid count - 1 so don't do correction
 77 *    for the case.
 78 * 2. MBM total and local correction table indexed by core counter which is
 79 *    equal to (x86_cache_max_rmid + 1) / 8 - 1 and is from 0 up to 27.
 80 * 3. The correction factor is normalized to 2^20 (1048576) so it's faster
 81 *    to calculate corrected value by shifting:
 82 *    corrected_value = (original_value * correction_factor) >> 20
 83 */
 84static const struct mbm_correction_factor_table {
 85	u32 rmidthreshold;
 86	u64 cf;
 87} mbm_cf_table[] __initconst = {
 88	{7,	CF(1.000000)},
 89	{15,	CF(1.000000)},
 90	{15,	CF(0.969650)},
 91	{31,	CF(1.000000)},
 92	{31,	CF(1.066667)},
 93	{31,	CF(0.969650)},
 94	{47,	CF(1.142857)},
 95	{63,	CF(1.000000)},
 96	{63,	CF(1.185115)},
 97	{63,	CF(1.066553)},
 98	{79,	CF(1.454545)},
 99	{95,	CF(1.000000)},
100	{95,	CF(1.230769)},
101	{95,	CF(1.142857)},
102	{95,	CF(1.066667)},
103	{127,	CF(1.000000)},
104	{127,	CF(1.254863)},
105	{127,	CF(1.185255)},
106	{151,	CF(1.000000)},
107	{127,	CF(1.066667)},
108	{167,	CF(1.000000)},
109	{159,	CF(1.454334)},
110	{183,	CF(1.000000)},
111	{127,	CF(0.969744)},
112	{191,	CF(1.280246)},
113	{191,	CF(1.230921)},
114	{215,	CF(1.000000)},
115	{191,	CF(1.143118)},
116};
117
118static u32 mbm_cf_rmidthreshold __read_mostly = UINT_MAX;
119static u64 mbm_cf __read_mostly;
120
121static inline u64 get_corrected_mbm_count(u32 rmid, unsigned long val)
122{
123	/* Correct MBM value. */
124	if (rmid > mbm_cf_rmidthreshold)
125		val = (val * mbm_cf) >> 20;
126
127	return val;
128}
129
130static inline struct rmid_entry *__rmid_entry(u32 rmid)
131{
132	struct rmid_entry *entry;
133
134	entry = &rmid_ptrs[rmid];
135	WARN_ON(entry->rmid != rmid);
136
137	return entry;
138}
139
140static u64 __rmid_read(u32 rmid, u32 eventid)
141{
142	u64 val;
143
144	/*
145	 * As per the SDM, when IA32_QM_EVTSEL.EvtID (bits 7:0) is configured
146	 * with a valid event code for supported resource type and the bits
147	 * IA32_QM_EVTSEL.RMID (bits 41:32) are configured with valid RMID,
148	 * IA32_QM_CTR.data (bits 61:0) reports the monitored data.
149	 * IA32_QM_CTR.Error (bit 63) and IA32_QM_CTR.Unavailable (bit 62)
150	 * are error bits.
151	 */
152	wrmsr(MSR_IA32_QM_EVTSEL, eventid, rmid);
153	rdmsrl(MSR_IA32_QM_CTR, val);
154
155	return val;
156}
157
158static bool rmid_dirty(struct rmid_entry *entry)
159{
160	u64 val = __rmid_read(entry->rmid, QOS_L3_OCCUP_EVENT_ID);
161
162	return val >= resctrl_cqm_threshold;
163}
164
165/*
166 * Check the RMIDs that are marked as busy for this domain. If the
167 * reported LLC occupancy is below the threshold clear the busy bit and
168 * decrement the count. If the busy count gets to zero on an RMID, we
169 * free the RMID
170 */
171void __check_limbo(struct rdt_domain *d, bool force_free)
172{
173	struct rmid_entry *entry;
174	struct rdt_resource *r;
175	u32 crmid = 1, nrmid;
176
177	r = &rdt_resources_all[RDT_RESOURCE_L3];
178
179	/*
180	 * Skip RMID 0 and start from RMID 1 and check all the RMIDs that
181	 * are marked as busy for occupancy < threshold. If the occupancy
182	 * is less than the threshold decrement the busy counter of the
183	 * RMID and move it to the free list when the counter reaches 0.
184	 */
185	for (;;) {
186		nrmid = find_next_bit(d->rmid_busy_llc, r->num_rmid, crmid);
187		if (nrmid >= r->num_rmid)
188			break;
189
190		entry = __rmid_entry(nrmid);
191		if (force_free || !rmid_dirty(entry)) {
192			clear_bit(entry->rmid, d->rmid_busy_llc);
193			if (!--entry->busy) {
194				rmid_limbo_count--;
195				list_add_tail(&entry->list, &rmid_free_lru);
196			}
197		}
198		crmid = nrmid + 1;
199	}
200}
201
202bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d)
203{
204	return find_first_bit(d->rmid_busy_llc, r->num_rmid) != r->num_rmid;
205}
206
207/*
208 * As of now the RMIDs allocation is global.
209 * However we keep track of which packages the RMIDs
210 * are used to optimize the limbo list management.
211 */
212int alloc_rmid(void)
213{
214	struct rmid_entry *entry;
215
216	lockdep_assert_held(&rdtgroup_mutex);
217
218	if (list_empty(&rmid_free_lru))
219		return rmid_limbo_count ? -EBUSY : -ENOSPC;
220
221	entry = list_first_entry(&rmid_free_lru,
222				 struct rmid_entry, list);
223	list_del(&entry->list);
224
225	return entry->rmid;
226}
227
228static void add_rmid_to_limbo(struct rmid_entry *entry)
229{
230	struct rdt_resource *r;
231	struct rdt_domain *d;
232	int cpu;
233	u64 val;
234
235	r = &rdt_resources_all[RDT_RESOURCE_L3];
236
237	entry->busy = 0;
238	cpu = get_cpu();
239	list_for_each_entry(d, &r->domains, list) {
240		if (cpumask_test_cpu(cpu, &d->cpu_mask)) {
241			val = __rmid_read(entry->rmid, QOS_L3_OCCUP_EVENT_ID);
242			if (val <= resctrl_cqm_threshold)
243				continue;
244		}
245
246		/*
247		 * For the first limbo RMID in the domain,
248		 * setup up the limbo worker.
249		 */
250		if (!has_busy_rmid(r, d))
251			cqm_setup_limbo_handler(d, CQM_LIMBOCHECK_INTERVAL);
252		set_bit(entry->rmid, d->rmid_busy_llc);
253		entry->busy++;
254	}
255	put_cpu();
256
257	if (entry->busy)
258		rmid_limbo_count++;
259	else
260		list_add_tail(&entry->list, &rmid_free_lru);
261}
262
263void free_rmid(u32 rmid)
264{
265	struct rmid_entry *entry;
266
267	if (!rmid)
268		return;
269
270	lockdep_assert_held(&rdtgroup_mutex);
271
272	entry = __rmid_entry(rmid);
273
274	if (is_llc_occupancy_enabled())
275		add_rmid_to_limbo(entry);
276	else
277		list_add_tail(&entry->list, &rmid_free_lru);
278}
279
280static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr, unsigned int width)
281{
282	u64 shift = 64 - width, chunks;
283
284	chunks = (cur_msr << shift) - (prev_msr << shift);
285	return chunks >>= shift;
286}
287
288static u64 __mon_event_count(u32 rmid, struct rmid_read *rr)
289{
290	struct mbm_state *m;
291	u64 chunks, tval;
292
293	tval = __rmid_read(rmid, rr->evtid);
294	if (tval & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL)) {
295		return tval;
 
296	}
297	switch (rr->evtid) {
298	case QOS_L3_OCCUP_EVENT_ID:
299		rr->val += tval;
300		return 0;
301	case QOS_L3_MBM_TOTAL_EVENT_ID:
302		m = &rr->d->mbm_total[rmid];
303		break;
304	case QOS_L3_MBM_LOCAL_EVENT_ID:
305		m = &rr->d->mbm_local[rmid];
306		break;
307	default:
308		/*
309		 * Code would never reach here because an invalid
310		 * event id would fail the __rmid_read.
311		 */
312		return RMID_VAL_ERROR;
313	}
314
315	if (rr->first) {
316		memset(m, 0, sizeof(struct mbm_state));
317		m->prev_bw_msr = m->prev_msr = tval;
318		return 0;
319	}
320
321	chunks = mbm_overflow_count(m->prev_msr, tval, rr->r->mbm_width);
322	m->chunks += chunks;
323	m->prev_msr = tval;
324
325	rr->val += get_corrected_mbm_count(rmid, m->chunks);
326
327	return 0;
328}
329
330/*
331 * Supporting function to calculate the memory bandwidth
332 * and delta bandwidth in MBps.
333 */
334static void mbm_bw_count(u32 rmid, struct rmid_read *rr)
335{
336	struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3];
337	struct mbm_state *m = &rr->d->mbm_local[rmid];
338	u64 tval, cur_bw, chunks;
339
340	tval = __rmid_read(rmid, rr->evtid);
341	if (tval & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL))
342		return;
343
344	chunks = mbm_overflow_count(m->prev_bw_msr, tval, rr->r->mbm_width);
345	cur_bw = (get_corrected_mbm_count(rmid, chunks) * r->mon_scale) >> 20;
 
 
346
347	if (m->delta_comp)
348		m->delta_bw = abs(cur_bw - m->prev_bw);
349	m->delta_comp = false;
350	m->prev_bw = cur_bw;
351	m->prev_bw_msr = tval;
352}
353
354/*
355 * This is called via IPI to read the CQM/MBM counters
356 * on a domain.
357 */
358void mon_event_count(void *info)
359{
360	struct rdtgroup *rdtgrp, *entry;
361	struct rmid_read *rr = info;
362	struct list_head *head;
363	u64 ret_val;
364
365	rdtgrp = rr->rgrp;
366
367	ret_val = __mon_event_count(rdtgrp->mon.rmid, rr);
 
368
369	/*
370	 * For Ctrl groups read data from child monitor groups and
371	 * add them together. Count events which are read successfully.
372	 * Discard the rmid_read's reporting errors.
373	 */
374	head = &rdtgrp->mon.crdtgrp_list;
375
376	if (rdtgrp->type == RDTCTRL_GROUP) {
377		list_for_each_entry(entry, head, mon.crdtgrp_list) {
378			if (__mon_event_count(entry->mon.rmid, rr) == 0)
379				ret_val = 0;
380		}
381	}
382
383	/* Report error if none of rmid_reads are successful */
384	if (ret_val)
385		rr->val = ret_val;
386}
387
388/*
389 * Feedback loop for MBA software controller (mba_sc)
390 *
391 * mba_sc is a feedback loop where we periodically read MBM counters and
392 * adjust the bandwidth percentage values via the IA32_MBA_THRTL_MSRs so
393 * that:
394 *
395 *   current bandwidth(cur_bw) < user specified bandwidth(user_bw)
396 *
397 * This uses the MBM counters to measure the bandwidth and MBA throttle
398 * MSRs to control the bandwidth for a particular rdtgrp. It builds on the
399 * fact that resctrl rdtgroups have both monitoring and control.
400 *
401 * The frequency of the checks is 1s and we just tag along the MBM overflow
402 * timer. Having 1s interval makes the calculation of bandwidth simpler.
403 *
404 * Although MBA's goal is to restrict the bandwidth to a maximum, there may
405 * be a need to increase the bandwidth to avoid unnecessarily restricting
406 * the L2 <-> L3 traffic.
407 *
408 * Since MBA controls the L2 external bandwidth where as MBM measures the
409 * L3 external bandwidth the following sequence could lead to such a
410 * situation.
411 *
412 * Consider an rdtgroup which had high L3 <-> memory traffic in initial
413 * phases -> mba_sc kicks in and reduced bandwidth percentage values -> but
414 * after some time rdtgroup has mostly L2 <-> L3 traffic.
415 *
416 * In this case we may restrict the rdtgroup's L2 <-> L3 traffic as its
417 * throttle MSRs already have low percentage values.  To avoid
418 * unnecessarily restricting such rdtgroups, we also increase the bandwidth.
419 */
420static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
421{
422	u32 closid, rmid, cur_msr, cur_msr_val, new_msr_val;
423	struct mbm_state *pmbm_data, *cmbm_data;
424	u32 cur_bw, delta_bw, user_bw;
425	struct rdt_resource *r_mba;
426	struct rdt_domain *dom_mba;
427	struct list_head *head;
428	struct rdtgroup *entry;
429
430	if (!is_mbm_local_enabled())
431		return;
432
433	r_mba = &rdt_resources_all[RDT_RESOURCE_MBA];
434	closid = rgrp->closid;
435	rmid = rgrp->mon.rmid;
436	pmbm_data = &dom_mbm->mbm_local[rmid];
437
438	dom_mba = get_domain_from_cpu(smp_processor_id(), r_mba);
439	if (!dom_mba) {
440		pr_warn_once("Failure to get domain for MBA update\n");
441		return;
442	}
443
444	cur_bw = pmbm_data->prev_bw;
445	user_bw = dom_mba->mbps_val[closid];
446	delta_bw = pmbm_data->delta_bw;
447	cur_msr_val = dom_mba->ctrl_val[closid];
448
449	/*
450	 * For Ctrl groups read data from child monitor groups.
451	 */
452	head = &rgrp->mon.crdtgrp_list;
453	list_for_each_entry(entry, head, mon.crdtgrp_list) {
454		cmbm_data = &dom_mbm->mbm_local[entry->mon.rmid];
455		cur_bw += cmbm_data->prev_bw;
456		delta_bw += cmbm_data->delta_bw;
457	}
458
459	/*
460	 * Scale up/down the bandwidth linearly for the ctrl group.  The
461	 * bandwidth step is the bandwidth granularity specified by the
462	 * hardware.
463	 *
464	 * The delta_bw is used when increasing the bandwidth so that we
465	 * dont alternately increase and decrease the control values
466	 * continuously.
467	 *
468	 * For ex: consider cur_bw = 90MBps, user_bw = 100MBps and if
469	 * bandwidth step is 20MBps(> user_bw - cur_bw), we would keep
470	 * switching between 90 and 110 continuously if we only check
471	 * cur_bw < user_bw.
472	 */
473	if (cur_msr_val > r_mba->membw.min_bw && user_bw < cur_bw) {
474		new_msr_val = cur_msr_val - r_mba->membw.bw_gran;
475	} else if (cur_msr_val < MAX_MBA_BW &&
476		   (user_bw > (cur_bw + delta_bw))) {
477		new_msr_val = cur_msr_val + r_mba->membw.bw_gran;
478	} else {
479		return;
480	}
481
482	cur_msr = r_mba->msr_base + closid;
483	wrmsrl(cur_msr, delay_bw_map(new_msr_val, r_mba));
484	dom_mba->ctrl_val[closid] = new_msr_val;
485
486	/*
487	 * Delta values are updated dynamically package wise for each
488	 * rdtgrp every time the throttle MSR changes value.
489	 *
490	 * This is because (1)the increase in bandwidth is not perfectly
491	 * linear and only "approximately" linear even when the hardware
492	 * says it is linear.(2)Also since MBA is a core specific
493	 * mechanism, the delta values vary based on number of cores used
494	 * by the rdtgrp.
495	 */
496	pmbm_data->delta_comp = true;
497	list_for_each_entry(entry, head, mon.crdtgrp_list) {
498		cmbm_data = &dom_mbm->mbm_local[entry->mon.rmid];
499		cmbm_data->delta_comp = true;
500	}
501}
502
503static void mbm_update(struct rdt_resource *r, struct rdt_domain *d, int rmid)
504{
505	struct rmid_read rr;
506
507	rr.first = false;
508	rr.r = r;
509	rr.d = d;
510
511	/*
512	 * This is protected from concurrent reads from user
513	 * as both the user and we hold the global mutex.
514	 */
515	if (is_mbm_total_enabled()) {
516		rr.evtid = QOS_L3_MBM_TOTAL_EVENT_ID;
517		__mon_event_count(rmid, &rr);
518	}
519	if (is_mbm_local_enabled()) {
520		rr.evtid = QOS_L3_MBM_LOCAL_EVENT_ID;
521		__mon_event_count(rmid, &rr);
522
523		/*
524		 * Call the MBA software controller only for the
525		 * control groups and when user has enabled
526		 * the software controller explicitly.
527		 */
528		if (is_mba_sc(NULL))
 
 
529			mbm_bw_count(rmid, &rr);
530	}
531}
532
533/*
534 * Handler to scan the limbo list and move the RMIDs
535 * to free list whose occupancy < threshold_occupancy.
536 */
537void cqm_handle_limbo(struct work_struct *work)
538{
539	unsigned long delay = msecs_to_jiffies(CQM_LIMBOCHECK_INTERVAL);
540	int cpu = smp_processor_id();
541	struct rdt_resource *r;
542	struct rdt_domain *d;
543
544	mutex_lock(&rdtgroup_mutex);
545
546	r = &rdt_resources_all[RDT_RESOURCE_L3];
547	d = container_of(work, struct rdt_domain, cqm_limbo.work);
 
 
 
 
 
548
549	__check_limbo(d, false);
550
551	if (has_busy_rmid(r, d))
552		schedule_delayed_work_on(cpu, &d->cqm_limbo, delay);
553
 
554	mutex_unlock(&rdtgroup_mutex);
555}
556
557void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms)
558{
559	unsigned long delay = msecs_to_jiffies(delay_ms);
560	int cpu;
561
562	cpu = cpumask_any(&dom->cpu_mask);
563	dom->cqm_work_cpu = cpu;
564
565	schedule_delayed_work_on(cpu, &dom->cqm_limbo, delay);
566}
567
568void mbm_handle_overflow(struct work_struct *work)
569{
570	unsigned long delay = msecs_to_jiffies(MBM_OVERFLOW_INTERVAL);
571	struct rdtgroup *prgrp, *crgrp;
572	int cpu = smp_processor_id();
573	struct list_head *head;
574	struct rdt_resource *r;
575	struct rdt_domain *d;
576
577	mutex_lock(&rdtgroup_mutex);
578
579	if (!static_branch_likely(&rdt_mon_enable_key))
580		goto out_unlock;
581
582	r = &rdt_resources_all[RDT_RESOURCE_L3];
583	d = container_of(work, struct rdt_domain, mbm_over.work);
 
584
585	list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
586		mbm_update(r, d, prgrp->mon.rmid);
587
588		head = &prgrp->mon.crdtgrp_list;
589		list_for_each_entry(crgrp, head, mon.crdtgrp_list)
590			mbm_update(r, d, crgrp->mon.rmid);
591
592		if (is_mba_sc(NULL))
593			update_mba_bw(prgrp, d);
594	}
595
596	schedule_delayed_work_on(cpu, &d->mbm_over, delay);
597
598out_unlock:
599	mutex_unlock(&rdtgroup_mutex);
600}
601
602void mbm_setup_overflow_handler(struct rdt_domain *dom, unsigned long delay_ms)
603{
604	unsigned long delay = msecs_to_jiffies(delay_ms);
605	int cpu;
606
607	if (!static_branch_likely(&rdt_mon_enable_key))
608		return;
609	cpu = cpumask_any(&dom->cpu_mask);
610	dom->mbm_work_cpu = cpu;
611	schedule_delayed_work_on(cpu, &dom->mbm_over, delay);
612}
613
614static int dom_data_init(struct rdt_resource *r)
615{
616	struct rmid_entry *entry = NULL;
617	int i, nr_rmids;
618
619	nr_rmids = r->num_rmid;
620	rmid_ptrs = kcalloc(nr_rmids, sizeof(struct rmid_entry), GFP_KERNEL);
621	if (!rmid_ptrs)
622		return -ENOMEM;
623
624	for (i = 0; i < nr_rmids; i++) {
625		entry = &rmid_ptrs[i];
626		INIT_LIST_HEAD(&entry->list);
627
628		entry->rmid = i;
629		list_add_tail(&entry->list, &rmid_free_lru);
630	}
631
632	/*
633	 * RMID 0 is special and is always allocated. It's used for all
634	 * tasks that are not monitored.
635	 */
636	entry = __rmid_entry(0);
637	list_del(&entry->list);
638
639	return 0;
640}
641
642static struct mon_evt llc_occupancy_event = {
643	.name		= "llc_occupancy",
644	.evtid		= QOS_L3_OCCUP_EVENT_ID,
645};
646
647static struct mon_evt mbm_total_event = {
648	.name		= "mbm_total_bytes",
649	.evtid		= QOS_L3_MBM_TOTAL_EVENT_ID,
650};
651
652static struct mon_evt mbm_local_event = {
653	.name		= "mbm_local_bytes",
654	.evtid		= QOS_L3_MBM_LOCAL_EVENT_ID,
655};
656
657/*
658 * Initialize the event list for the resource.
659 *
660 * Note that MBM events are also part of RDT_RESOURCE_L3 resource
661 * because as per the SDM the total and local memory bandwidth
662 * are enumerated as part of L3 monitoring.
663 */
664static void l3_mon_evt_init(struct rdt_resource *r)
665{
666	INIT_LIST_HEAD(&r->evt_list);
667
668	if (is_llc_occupancy_enabled())
669		list_add_tail(&llc_occupancy_event.list, &r->evt_list);
670	if (is_mbm_total_enabled())
671		list_add_tail(&mbm_total_event.list, &r->evt_list);
672	if (is_mbm_local_enabled())
673		list_add_tail(&mbm_local_event.list, &r->evt_list);
674}
675
676int rdt_get_mon_l3_config(struct rdt_resource *r)
677{
678	unsigned int mbm_offset = boot_cpu_data.x86_cache_mbm_width_offset;
679	unsigned int cl_size = boot_cpu_data.x86_cache_size;
680	int ret;
681
682	r->mon_scale = boot_cpu_data.x86_cache_occ_scale;
683	r->num_rmid = boot_cpu_data.x86_cache_max_rmid + 1;
684	r->mbm_width = MBM_CNTR_WIDTH_BASE;
685
686	if (mbm_offset > 0 && mbm_offset <= MBM_CNTR_WIDTH_OFFSET_MAX)
687		r->mbm_width += mbm_offset;
688	else if (mbm_offset > MBM_CNTR_WIDTH_OFFSET_MAX)
689		pr_warn("Ignoring impossible MBM counter offset\n");
690
691	/*
692	 * A reasonable upper limit on the max threshold is the number
693	 * of lines tagged per RMID if all RMIDs have the same number of
694	 * lines tagged in the LLC.
695	 *
696	 * For a 35MB LLC and 56 RMIDs, this is ~1.8% of the LLC.
697	 */
698	resctrl_cqm_threshold = cl_size * 1024 / r->num_rmid;
699
700	/* h/w works in units of "boot_cpu_data.x86_cache_occ_scale" */
701	resctrl_cqm_threshold /= r->mon_scale;
702
703	ret = dom_data_init(r);
704	if (ret)
705		return ret;
706
707	l3_mon_evt_init(r);
708
709	r->mon_capable = true;
710	r->mon_enabled = true;
711
712	return 0;
713}
714
715void __init intel_rdt_mbm_apply_quirk(void)
716{
717	int cf_index;
718
719	cf_index = (boot_cpu_data.x86_cache_max_rmid + 1) / 8 - 1;
720	if (cf_index >= ARRAY_SIZE(mbm_cf_table)) {
721		pr_info("No MBM correction factor available\n");
722		return;
723	}
724
725	mbm_cf_rmidthreshold = mbm_cf_table[cf_index].rmidthreshold;
726	mbm_cf = mbm_cf_table[cf_index].cf;
727}