Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Resource Director Technology(RDT)
  4 * - Monitoring code
  5 *
  6 * Copyright (C) 2017 Intel Corporation
  7 *
  8 * Author:
  9 *    Vikas Shivappa <vikas.shivappa@intel.com>
 10 *
 11 * This replaces the cqm.c based on perf but we reuse a lot of
 12 * code and datastructures originally from Peter Zijlstra and Matt Fleming.
 13 *
 14 * More information about RDT be found in the Intel (R) x86 Architecture
 15 * Software Developer Manual June 2016, volume 3, section 17.17.
 16 */
 17
 
 18#include <linux/module.h>
 
 19#include <linux/slab.h>
 
 20#include <asm/cpu_device_id.h>
 
 
 21#include "internal.h"
 22
 
 
 
 
 
 
 
 
 
 
 
 
 23struct rmid_entry {
 
 24	u32				rmid;
 25	int				busy;
 26	struct list_head		list;
 27};
 28
 29/**
 30 * @rmid_free_lru    A least recently used list of free RMIDs
 31 *     These RMIDs are guaranteed to have an occupancy less than the
 32 *     threshold occupancy
 33 */
 34static LIST_HEAD(rmid_free_lru);
 35
 36/**
 37 * @rmid_limbo_count     count of currently unused but (potentially)
 
 
 
 
 
 
 
 38 *     dirty RMIDs.
 39 *     This counts RMIDs that no one is currently using but that
 40 *     may have a occupancy value > intel_cqm_threshold. User can change
 41 *     the threshold occupancy value.
 42 */
 43static unsigned int rmid_limbo_count;
 44
 45/**
 46 * @rmid_entry - The entry in the limbo and free lists.
 47 */
 48static struct rmid_entry	*rmid_ptrs;
 49
 50/*
 51 * Global boolean for rdt_monitor which is true if any
 52 * resource monitoring is enabled.
 53 */
 54bool rdt_mon_capable;
 55
 56/*
 57 * Global to indicate which monitoring events are enabled.
 58 */
 59unsigned int rdt_mon_features;
 60
 61/*
 62 * This is the threshold cache occupancy at which we will consider an
 63 * RMID available for re-allocation.
 64 */
 65unsigned int resctrl_cqm_threshold;
 66
 67static inline struct rmid_entry *__rmid_entry(u32 rmid)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 68{
 69	struct rmid_entry *entry;
 
 
 
 
 70
 71	entry = &rmid_ptrs[rmid];
 72	WARN_ON(entry->rmid != rmid);
 73
 74	return entry;
 75}
 76
 77static u64 __rmid_read(u32 rmid, u32 eventid)
 78{
 79	u64 val;
 80
 81	/*
 82	 * As per the SDM, when IA32_QM_EVTSEL.EvtID (bits 7:0) is configured
 83	 * with a valid event code for supported resource type and the bits
 84	 * IA32_QM_EVTSEL.RMID (bits 41:32) are configured with valid RMID,
 85	 * IA32_QM_CTR.data (bits 61:0) reports the monitored data.
 86	 * IA32_QM_CTR.Error (bit 63) and IA32_QM_CTR.Unavailable (bit 62)
 87	 * are error bits.
 88	 */
 89	wrmsr(MSR_IA32_QM_EVTSEL, eventid, rmid);
 90	rdmsrl(MSR_IA32_QM_CTR, val);
 91
 92	return val;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 93}
 94
 95static bool rmid_dirty(struct rmid_entry *entry)
 96{
 97	u64 val = __rmid_read(entry->rmid, QOS_L3_OCCUP_EVENT_ID);
 98
 99	return val >= resctrl_cqm_threshold;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
100}
101
102/*
103 * Check the RMIDs that are marked as busy for this domain. If the
104 * reported LLC occupancy is below the threshold clear the busy bit and
105 * decrement the count. If the busy count gets to zero on an RMID, we
106 * free the RMID
107 */
108void __check_limbo(struct rdt_domain *d, bool force_free)
109{
 
 
110	struct rmid_entry *entry;
111	struct rdt_resource *r;
112	u32 crmid = 1, nrmid;
113
114	r = &rdt_resources_all[RDT_RESOURCE_L3];
 
 
 
 
 
 
 
115
116	/*
117	 * Skip RMID 0 and start from RMID 1 and check all the RMIDs that
118	 * are marked as busy for occupancy < threshold. If the occupancy
119	 * is less than the threshold decrement the busy counter of the
120	 * RMID and move it to the free list when the counter reaches 0.
121	 */
122	for (;;) {
123		nrmid = find_next_bit(d->rmid_busy_llc, r->num_rmid, crmid);
124		if (nrmid >= r->num_rmid)
125			break;
126
127		entry = __rmid_entry(nrmid);
128		if (force_free || !rmid_dirty(entry)) {
129			clear_bit(entry->rmid, d->rmid_busy_llc);
130			if (!--entry->busy) {
131				rmid_limbo_count--;
132				list_add_tail(&entry->list, &rmid_free_lru);
133			}
 
 
 
 
 
 
134		}
135		crmid = nrmid + 1;
136	}
 
 
137}
138
139bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d)
140{
141	return find_first_bit(d->rmid_busy_llc, r->num_rmid) != r->num_rmid;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
142}
143
144/*
145 * As of now the RMIDs allocation is global.
146 * However we keep track of which packages the RMIDs
147 * are used to optimize the limbo list management.
 
148 */
149int alloc_rmid(void)
150{
151	struct rmid_entry *entry;
152
153	lockdep_assert_held(&rdtgroup_mutex);
154
155	if (list_empty(&rmid_free_lru))
156		return rmid_limbo_count ? -EBUSY : -ENOSPC;
 
157
158	entry = list_first_entry(&rmid_free_lru,
159				 struct rmid_entry, list);
160	list_del(&entry->list);
161
162	return entry->rmid;
163}
164
165static void add_rmid_to_limbo(struct rmid_entry *entry)
166{
167	struct rdt_resource *r;
168	struct rdt_domain *d;
169	int cpu;
170	u64 val;
 
171
172	r = &rdt_resources_all[RDT_RESOURCE_L3];
 
 
 
173
174	entry->busy = 0;
175	cpu = get_cpu();
176	list_for_each_entry(d, &r->domains, list) {
177		if (cpumask_test_cpu(cpu, &d->cpu_mask)) {
178			val = __rmid_read(entry->rmid, QOS_L3_OCCUP_EVENT_ID);
179			if (val <= resctrl_cqm_threshold)
180				continue;
181		}
182
183		/*
184		 * For the first limbo RMID in the domain,
185		 * setup up the limbo worker.
186		 */
187		if (!has_busy_rmid(r, d))
188			cqm_setup_limbo_handler(d, CQM_LIMBOCHECK_INTERVAL);
189		set_bit(entry->rmid, d->rmid_busy_llc);
 
190		entry->busy++;
191	}
192	put_cpu();
193
194	if (entry->busy)
195		rmid_limbo_count++;
196	else
197		list_add_tail(&entry->list, &rmid_free_lru);
198}
199
200void free_rmid(u32 rmid)
201{
 
202	struct rmid_entry *entry;
203
204	if (!rmid)
205		return;
206
207	lockdep_assert_held(&rdtgroup_mutex);
208
209	entry = __rmid_entry(rmid);
 
 
 
 
 
 
 
 
 
210
211	if (is_llc_occupancy_enabled())
212		add_rmid_to_limbo(entry);
213	else
214		list_add_tail(&entry->list, &rmid_free_lru);
215}
216
217static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr)
 
218{
219	u64 shift = 64 - MBM_CNTR_WIDTH, chunks;
220
221	chunks = (cur_msr << shift) - (prev_msr << shift);
222	return chunks >>= shift;
223}
224
225static int __mon_event_count(u32 rmid, struct rmid_read *rr)
226{
227	struct mbm_state *m;
228	u64 chunks, tval;
229
230	tval = __rmid_read(rmid, rr->evtid);
231	if (tval & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL)) {
232		rr->val = tval;
233		return -EINVAL;
234	}
235	switch (rr->evtid) {
236	case QOS_L3_OCCUP_EVENT_ID:
237		rr->val += tval;
238		return 0;
239	case QOS_L3_MBM_TOTAL_EVENT_ID:
240		m = &rr->d->mbm_total[rmid];
241		break;
242	case QOS_L3_MBM_LOCAL_EVENT_ID:
243		m = &rr->d->mbm_local[rmid];
244		break;
245	default:
246		/*
247		 * Code would never reach here because
248		 * an invalid event id would fail the __rmid_read.
249		 */
250		return -EINVAL;
251	}
 
 
 
 
 
 
252
253	if (rr->first) {
254		memset(m, 0, sizeof(struct mbm_state));
255		m->prev_bw_msr = m->prev_msr = tval;
 
 
256		return 0;
257	}
258
259	chunks = mbm_overflow_count(m->prev_msr, tval);
260	m->chunks += chunks;
261	m->prev_msr = tval;
 
 
 
262
263	rr->val += m->chunks;
264	return 0;
265}
266
267/*
 
 
 
 
 
 
268 * Supporting function to calculate the memory bandwidth
269 * and delta bandwidth in MBps.
 
 
270 */
271static void mbm_bw_count(u32 rmid, struct rmid_read *rr)
272{
273	struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3];
274	struct mbm_state *m = &rr->d->mbm_local[rmid];
275	u64 tval, cur_bw, chunks;
276
277	tval = __rmid_read(rmid, rr->evtid);
278	if (tval & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL))
279		return;
 
 
280
281	chunks = mbm_overflow_count(m->prev_bw_msr, tval);
282	m->chunks_bw += chunks;
283	m->chunks = m->chunks_bw;
284	cur_bw = (chunks * r->mon_scale) >> 20;
285
286	if (m->delta_comp)
287		m->delta_bw = abs(cur_bw - m->prev_bw);
288	m->delta_comp = false;
289	m->prev_bw = cur_bw;
290	m->prev_bw_msr = tval;
291}
292
293/*
294 * This is called via IPI to read the CQM/MBM counters
295 * on a domain.
296 */
297void mon_event_count(void *info)
298{
299	struct rdtgroup *rdtgrp, *entry;
300	struct rmid_read *rr = info;
301	struct list_head *head;
 
302
303	rdtgrp = rr->rgrp;
304
305	if (__mon_event_count(rdtgrp->mon.rmid, rr))
306		return;
307
308	/*
309	 * For Ctrl groups read data from child monitor groups.
 
 
310	 */
311	head = &rdtgrp->mon.crdtgrp_list;
312
313	if (rdtgrp->type == RDTCTRL_GROUP) {
314		list_for_each_entry(entry, head, mon.crdtgrp_list) {
315			if (__mon_event_count(entry->mon.rmid, rr))
316				return;
 
317		}
318	}
 
 
 
 
 
 
 
 
319}
320
321/*
322 * Feedback loop for MBA software controller (mba_sc)
323 *
324 * mba_sc is a feedback loop where we periodically read MBM counters and
325 * adjust the bandwidth percentage values via the IA32_MBA_THRTL_MSRs so
326 * that:
327 *
328 *   current bandwdith(cur_bw) < user specified bandwidth(user_bw)
329 *
330 * This uses the MBM counters to measure the bandwidth and MBA throttle
331 * MSRs to control the bandwidth for a particular rdtgrp. It builds on the
332 * fact that resctrl rdtgroups have both monitoring and control.
333 *
334 * The frequency of the checks is 1s and we just tag along the MBM overflow
335 * timer. Having 1s interval makes the calculation of bandwidth simpler.
336 *
337 * Although MBA's goal is to restrict the bandwidth to a maximum, there may
338 * be a need to increase the bandwidth to avoid uncecessarily restricting
339 * the L2 <-> L3 traffic.
340 *
341 * Since MBA controls the L2 external bandwidth where as MBM measures the
342 * L3 external bandwidth the following sequence could lead to such a
343 * situation.
344 *
345 * Consider an rdtgroup which had high L3 <-> memory traffic in initial
346 * phases -> mba_sc kicks in and reduced bandwidth percentage values -> but
347 * after some time rdtgroup has mostly L2 <-> L3 traffic.
348 *
349 * In this case we may restrict the rdtgroup's L2 <-> L3 traffic as its
350 * throttle MSRs already have low percentage values.  To avoid
351 * unnecessarily restricting such rdtgroups, we also increase the bandwidth.
352 */
353static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
354{
355	u32 closid, rmid, cur_msr, cur_msr_val, new_msr_val;
356	struct mbm_state *pmbm_data, *cmbm_data;
357	u32 cur_bw, delta_bw, user_bw;
358	struct rdt_resource *r_mba;
359	struct rdt_domain *dom_mba;
 
360	struct list_head *head;
361	struct rdtgroup *entry;
362
363	if (!is_mbm_local_enabled())
364		return;
365
366	r_mba = &rdt_resources_all[RDT_RESOURCE_MBA];
 
367	closid = rgrp->closid;
368	rmid = rgrp->mon.rmid;
369	pmbm_data = &dom_mbm->mbm_local[rmid];
 
370
371	dom_mba = get_domain_from_cpu(smp_processor_id(), r_mba);
372	if (!dom_mba) {
373		pr_warn_once("Failure to get domain for MBA update\n");
374		return;
375	}
376
377	cur_bw = pmbm_data->prev_bw;
378	user_bw = dom_mba->mbps_val[closid];
379	delta_bw = pmbm_data->delta_bw;
380	cur_msr_val = dom_mba->ctrl_val[closid];
 
381
382	/*
383	 * For Ctrl groups read data from child monitor groups.
384	 */
385	head = &rgrp->mon.crdtgrp_list;
386	list_for_each_entry(entry, head, mon.crdtgrp_list) {
387		cmbm_data = &dom_mbm->mbm_local[entry->mon.rmid];
388		cur_bw += cmbm_data->prev_bw;
389		delta_bw += cmbm_data->delta_bw;
390	}
391
392	/*
393	 * Scale up/down the bandwidth linearly for the ctrl group.  The
394	 * bandwidth step is the bandwidth granularity specified by the
395	 * hardware.
396	 *
397	 * The delta_bw is used when increasing the bandwidth so that we
398	 * dont alternately increase and decrease the control values
399	 * continuously.
400	 *
401	 * For ex: consider cur_bw = 90MBps, user_bw = 100MBps and if
402	 * bandwidth step is 20MBps(> user_bw - cur_bw), we would keep
403	 * switching between 90 and 110 continuously if we only check
404	 * cur_bw < user_bw.
405	 */
406	if (cur_msr_val > r_mba->membw.min_bw && user_bw < cur_bw) {
407		new_msr_val = cur_msr_val - r_mba->membw.bw_gran;
408	} else if (cur_msr_val < MAX_MBA_BW &&
409		   (user_bw > (cur_bw + delta_bw))) {
410		new_msr_val = cur_msr_val + r_mba->membw.bw_gran;
411	} else {
412		return;
413	}
414
415	cur_msr = r_mba->msr_base + closid;
416	wrmsrl(cur_msr, delay_bw_map(new_msr_val, r_mba));
417	dom_mba->ctrl_val[closid] = new_msr_val;
418
419	/*
420	 * Delta values are updated dynamically package wise for each
421	 * rdtgrp everytime the throttle MSR changes value.
422	 *
423	 * This is because (1)the increase in bandwidth is not perfectly
424	 * linear and only "approximately" linear even when the hardware
425	 * says it is linear.(2)Also since MBA is a core specific
426	 * mechanism, the delta values vary based on number of cores used
427	 * by the rdtgrp.
428	 */
429	pmbm_data->delta_comp = true;
430	list_for_each_entry(entry, head, mon.crdtgrp_list) {
431		cmbm_data = &dom_mbm->mbm_local[entry->mon.rmid];
432		cmbm_data->delta_comp = true;
433	}
434}
435
436static void mbm_update(struct rdt_domain *d, int rmid)
 
437{
438	struct rmid_read rr;
439
440	rr.first = false;
 
441	rr.d = d;
442
443	/*
444	 * This is protected from concurrent reads from user
445	 * as both the user and we hold the global mutex.
446	 */
447	if (is_mbm_total_enabled()) {
448		rr.evtid = QOS_L3_MBM_TOTAL_EVENT_ID;
449		__mon_event_count(rmid, &rr);
 
 
 
 
 
 
 
 
 
 
450	}
451	if (is_mbm_local_enabled()) {
452		rr.evtid = QOS_L3_MBM_LOCAL_EVENT_ID;
 
 
 
 
 
 
 
 
 
453
454		/*
455		 * Call the MBA software controller only for the
456		 * control groups and when user has enabled
457		 * the software controller explicitly.
458		 */
459		if (!is_mba_sc(NULL))
460			__mon_event_count(rmid, &rr);
461		else
462			mbm_bw_count(rmid, &rr);
463	}
464}
465
466/*
467 * Handler to scan the limbo list and move the RMIDs
468 * to free list whose occupancy < threshold_occupancy.
469 */
470void cqm_handle_limbo(struct work_struct *work)
471{
472	unsigned long delay = msecs_to_jiffies(CQM_LIMBOCHECK_INTERVAL);
473	int cpu = smp_processor_id();
474	struct rdt_resource *r;
475	struct rdt_domain *d;
476
 
477	mutex_lock(&rdtgroup_mutex);
478
479	r = &rdt_resources_all[RDT_RESOURCE_L3];
480	d = get_domain_from_cpu(cpu, r);
481
482	if (!d) {
483		pr_warn_once("Failure to get domain for limbo worker\n");
484		goto out_unlock;
485	}
486
487	__check_limbo(d, false);
488
489	if (has_busy_rmid(r, d))
490		schedule_delayed_work_on(cpu, &d->cqm_limbo, delay);
 
 
 
 
491
492out_unlock:
493	mutex_unlock(&rdtgroup_mutex);
 
494}
495
496void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms)
 
 
 
 
 
 
 
 
 
497{
498	unsigned long delay = msecs_to_jiffies(delay_ms);
499	int cpu;
500
501	cpu = cpumask_any(&dom->cpu_mask);
502	dom->cqm_work_cpu = cpu;
503
504	schedule_delayed_work_on(cpu, &dom->cqm_limbo, delay);
 
505}
506
507void mbm_handle_overflow(struct work_struct *work)
508{
509	unsigned long delay = msecs_to_jiffies(MBM_OVERFLOW_INTERVAL);
510	struct rdtgroup *prgrp, *crgrp;
511	int cpu = smp_processor_id();
512	struct list_head *head;
 
513	struct rdt_domain *d;
514
 
515	mutex_lock(&rdtgroup_mutex);
516
517	if (!static_branch_likely(&rdt_enable_key))
 
 
 
 
518		goto out_unlock;
519
520	d = get_domain_from_cpu(cpu, &rdt_resources_all[RDT_RESOURCE_L3]);
521	if (!d)
522		goto out_unlock;
523
524	list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
525		mbm_update(d, prgrp->mon.rmid);
526
527		head = &prgrp->mon.crdtgrp_list;
528		list_for_each_entry(crgrp, head, mon.crdtgrp_list)
529			mbm_update(d, crgrp->mon.rmid);
530
531		if (is_mba_sc(NULL))
532			update_mba_bw(prgrp, d);
533	}
534
535	schedule_delayed_work_on(cpu, &d->mbm_over, delay);
 
 
 
 
 
 
536
537out_unlock:
538	mutex_unlock(&rdtgroup_mutex);
 
539}
540
541void mbm_setup_overflow_handler(struct rdt_domain *dom, unsigned long delay_ms)
 
 
 
 
 
 
 
 
 
542{
543	unsigned long delay = msecs_to_jiffies(delay_ms);
544	int cpu;
545
546	if (!static_branch_likely(&rdt_enable_key))
 
 
 
 
547		return;
548	cpu = cpumask_any(&dom->cpu_mask);
549	dom->mbm_work_cpu = cpu;
550	schedule_delayed_work_on(cpu, &dom->mbm_over, delay);
 
 
551}
552
553static int dom_data_init(struct rdt_resource *r)
554{
 
 
555	struct rmid_entry *entry = NULL;
556	int i, nr_rmids;
 
 
 
 
 
557
558	nr_rmids = r->num_rmid;
559	rmid_ptrs = kcalloc(nr_rmids, sizeof(struct rmid_entry), GFP_KERNEL);
560	if (!rmid_ptrs)
561		return -ENOMEM;
 
 
 
 
 
 
 
562
563	for (i = 0; i < nr_rmids; i++) {
 
 
 
 
 
 
 
 
 
 
 
 
 
564		entry = &rmid_ptrs[i];
565		INIT_LIST_HEAD(&entry->list);
566
567		entry->rmid = i;
568		list_add_tail(&entry->list, &rmid_free_lru);
569	}
570
571	/*
572	 * RMID 0 is special and is always allocated. It's used for all
573	 * tasks that are not monitored.
 
574	 */
575	entry = __rmid_entry(0);
 
 
576	list_del(&entry->list);
577
578	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
579}
580
581static struct mon_evt llc_occupancy_event = {
582	.name		= "llc_occupancy",
583	.evtid		= QOS_L3_OCCUP_EVENT_ID,
584};
585
586static struct mon_evt mbm_total_event = {
587	.name		= "mbm_total_bytes",
588	.evtid		= QOS_L3_MBM_TOTAL_EVENT_ID,
589};
590
591static struct mon_evt mbm_local_event = {
592	.name		= "mbm_local_bytes",
593	.evtid		= QOS_L3_MBM_LOCAL_EVENT_ID,
594};
595
596/*
597 * Initialize the event list for the resource.
598 *
599 * Note that MBM events are also part of RDT_RESOURCE_L3 resource
600 * because as per the SDM the total and local memory bandwidth
601 * are enumerated as part of L3 monitoring.
602 */
603static void l3_mon_evt_init(struct rdt_resource *r)
604{
605	INIT_LIST_HEAD(&r->evt_list);
606
607	if (is_llc_occupancy_enabled())
608		list_add_tail(&llc_occupancy_event.list, &r->evt_list);
609	if (is_mbm_total_enabled())
610		list_add_tail(&mbm_total_event.list, &r->evt_list);
611	if (is_mbm_local_enabled())
612		list_add_tail(&mbm_local_event.list, &r->evt_list);
613}
614
615int rdt_get_mon_l3_config(struct rdt_resource *r)
616{
617	unsigned int cl_size = boot_cpu_data.x86_cache_size;
 
 
618	int ret;
619
620	r->mon_scale = boot_cpu_data.x86_cache_occ_scale;
 
621	r->num_rmid = boot_cpu_data.x86_cache_max_rmid + 1;
 
 
 
 
 
 
622
623	/*
624	 * A reasonable upper limit on the max threshold is the number
625	 * of lines tagged per RMID if all RMIDs have the same number of
626	 * lines tagged in the LLC.
627	 *
628	 * For a 35MB LLC and 56 RMIDs, this is ~1.8% of the LLC.
629	 */
630	resctrl_cqm_threshold = cl_size * 1024 / r->num_rmid;
631
632	/* h/w works in units of "boot_cpu_data.x86_cache_occ_scale" */
633	resctrl_cqm_threshold /= r->mon_scale;
 
 
 
 
634
635	ret = dom_data_init(r);
636	if (ret)
637		return ret;
638
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
639	l3_mon_evt_init(r);
640
641	r->mon_capable = true;
642	r->mon_enabled = true;
643
644	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
645}
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Resource Director Technology(RDT)
   4 * - Monitoring code
   5 *
   6 * Copyright (C) 2017 Intel Corporation
   7 *
   8 * Author:
   9 *    Vikas Shivappa <vikas.shivappa@intel.com>
  10 *
  11 * This replaces the cqm.c based on perf but we reuse a lot of
  12 * code and datastructures originally from Peter Zijlstra and Matt Fleming.
  13 *
  14 * More information about RDT be found in the Intel (R) x86 Architecture
  15 * Software Developer Manual June 2016, volume 3, section 17.17.
  16 */
  17
  18#include <linux/cpu.h>
  19#include <linux/module.h>
  20#include <linux/sizes.h>
  21#include <linux/slab.h>
  22
  23#include <asm/cpu_device_id.h>
  24#include <asm/resctrl.h>
  25
  26#include "internal.h"
  27
  28/**
  29 * struct rmid_entry - dirty tracking for all RMID.
  30 * @closid:	The CLOSID for this entry.
  31 * @rmid:	The RMID for this entry.
  32 * @busy:	The number of domains with cached data using this RMID.
  33 * @list:	Member of the rmid_free_lru list when busy == 0.
  34 *
  35 * Depending on the architecture the correct monitor is accessed using
  36 * both @closid and @rmid, or @rmid only.
  37 *
  38 * Take the rdtgroup_mutex when accessing.
  39 */
  40struct rmid_entry {
  41	u32				closid;
  42	u32				rmid;
  43	int				busy;
  44	struct list_head		list;
  45};
  46
  47/*
  48 * @rmid_free_lru - A least recently used list of free RMIDs
  49 *     These RMIDs are guaranteed to have an occupancy less than the
  50 *     threshold occupancy
  51 */
  52static LIST_HEAD(rmid_free_lru);
  53
  54/*
  55 * @closid_num_dirty_rmid    The number of dirty RMID each CLOSID has.
  56 *     Only allocated when CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID is defined.
  57 *     Indexed by CLOSID. Protected by rdtgroup_mutex.
  58 */
  59static u32 *closid_num_dirty_rmid;
  60
  61/*
  62 * @rmid_limbo_count - count of currently unused but (potentially)
  63 *     dirty RMIDs.
  64 *     This counts RMIDs that no one is currently using but that
  65 *     may have a occupancy value > resctrl_rmid_realloc_threshold. User can
  66 *     change the threshold occupancy value.
  67 */
  68static unsigned int rmid_limbo_count;
  69
  70/*
  71 * @rmid_entry - The entry in the limbo and free lists.
  72 */
  73static struct rmid_entry	*rmid_ptrs;
  74
  75/*
  76 * Global boolean for rdt_monitor which is true if any
  77 * resource monitoring is enabled.
  78 */
  79bool rdt_mon_capable;
  80
  81/*
  82 * Global to indicate which monitoring events are enabled.
  83 */
  84unsigned int rdt_mon_features;
  85
  86/*
  87 * This is the threshold cache occupancy in bytes at which we will consider an
  88 * RMID available for re-allocation.
  89 */
  90unsigned int resctrl_rmid_realloc_threshold;
  91
  92/*
  93 * This is the maximum value for the reallocation threshold, in bytes.
  94 */
  95unsigned int resctrl_rmid_realloc_limit;
  96
  97#define CF(cf)	((unsigned long)(1048576 * (cf) + 0.5))
  98
  99/*
 100 * The correction factor table is documented in Documentation/arch/x86/resctrl.rst.
 101 * If rmid > rmid threshold, MBM total and local values should be multiplied
 102 * by the correction factor.
 103 *
 104 * The original table is modified for better code:
 105 *
 106 * 1. The threshold 0 is changed to rmid count - 1 so don't do correction
 107 *    for the case.
 108 * 2. MBM total and local correction table indexed by core counter which is
 109 *    equal to (x86_cache_max_rmid + 1) / 8 - 1 and is from 0 up to 27.
 110 * 3. The correction factor is normalized to 2^20 (1048576) so it's faster
 111 *    to calculate corrected value by shifting:
 112 *    corrected_value = (original_value * correction_factor) >> 20
 113 */
 114static const struct mbm_correction_factor_table {
 115	u32 rmidthreshold;
 116	u64 cf;
 117} mbm_cf_table[] __initconst = {
 118	{7,	CF(1.000000)},
 119	{15,	CF(1.000000)},
 120	{15,	CF(0.969650)},
 121	{31,	CF(1.000000)},
 122	{31,	CF(1.066667)},
 123	{31,	CF(0.969650)},
 124	{47,	CF(1.142857)},
 125	{63,	CF(1.000000)},
 126	{63,	CF(1.185115)},
 127	{63,	CF(1.066553)},
 128	{79,	CF(1.454545)},
 129	{95,	CF(1.000000)},
 130	{95,	CF(1.230769)},
 131	{95,	CF(1.142857)},
 132	{95,	CF(1.066667)},
 133	{127,	CF(1.000000)},
 134	{127,	CF(1.254863)},
 135	{127,	CF(1.185255)},
 136	{151,	CF(1.000000)},
 137	{127,	CF(1.066667)},
 138	{167,	CF(1.000000)},
 139	{159,	CF(1.454334)},
 140	{183,	CF(1.000000)},
 141	{127,	CF(0.969744)},
 142	{191,	CF(1.280246)},
 143	{191,	CF(1.230921)},
 144	{215,	CF(1.000000)},
 145	{191,	CF(1.143118)},
 146};
 147
 148static u32 mbm_cf_rmidthreshold __read_mostly = UINT_MAX;
 149static u64 mbm_cf __read_mostly;
 150
 151static inline u64 get_corrected_mbm_count(u32 rmid, unsigned long val)
 152{
 153	/* Correct MBM value. */
 154	if (rmid > mbm_cf_rmidthreshold)
 155		val = (val * mbm_cf) >> 20;
 156
 157	return val;
 158}
 159
 160/*
 161 * x86 and arm64 differ in their handling of monitoring.
 162 * x86's RMID are independent numbers, there is only one source of traffic
 163 * with an RMID value of '1'.
 164 * arm64's PMG extends the PARTID/CLOSID space, there are multiple sources of
 165 * traffic with a PMG value of '1', one for each CLOSID, meaning the RMID
 166 * value is no longer unique.
 167 * To account for this, resctrl uses an index. On x86 this is just the RMID,
 168 * on arm64 it encodes the CLOSID and RMID. This gives a unique number.
 169 *
 170 * The domain's rmid_busy_llc and rmid_ptrs[] are sized by index. The arch code
 171 * must accept an attempt to read every index.
 172 */
 173static inline struct rmid_entry *__rmid_entry(u32 idx)
 174{
 175	struct rmid_entry *entry;
 176	u32 closid, rmid;
 177
 178	entry = &rmid_ptrs[idx];
 179	resctrl_arch_rmid_idx_decode(idx, &closid, &rmid);
 180
 181	WARN_ON_ONCE(entry->closid != closid);
 182	WARN_ON_ONCE(entry->rmid != rmid);
 183
 184	return entry;
 185}
 186
 187static int __rmid_read(u32 rmid, enum resctrl_event_id eventid, u64 *val)
 188{
 189	u64 msr_val;
 190
 191	/*
 192	 * As per the SDM, when IA32_QM_EVTSEL.EvtID (bits 7:0) is configured
 193	 * with a valid event code for supported resource type and the bits
 194	 * IA32_QM_EVTSEL.RMID (bits 41:32) are configured with valid RMID,
 195	 * IA32_QM_CTR.data (bits 61:0) reports the monitored data.
 196	 * IA32_QM_CTR.Error (bit 63) and IA32_QM_CTR.Unavailable (bit 62)
 197	 * are error bits.
 198	 */
 199	wrmsr(MSR_IA32_QM_EVTSEL, eventid, rmid);
 200	rdmsrl(MSR_IA32_QM_CTR, msr_val);
 201
 202	if (msr_val & RMID_VAL_ERROR)
 203		return -EIO;
 204	if (msr_val & RMID_VAL_UNAVAIL)
 205		return -EINVAL;
 206
 207	*val = msr_val;
 208	return 0;
 209}
 210
 211static struct arch_mbm_state *get_arch_mbm_state(struct rdt_hw_domain *hw_dom,
 212						 u32 rmid,
 213						 enum resctrl_event_id eventid)
 214{
 215	switch (eventid) {
 216	case QOS_L3_OCCUP_EVENT_ID:
 217		return NULL;
 218	case QOS_L3_MBM_TOTAL_EVENT_ID:
 219		return &hw_dom->arch_mbm_total[rmid];
 220	case QOS_L3_MBM_LOCAL_EVENT_ID:
 221		return &hw_dom->arch_mbm_local[rmid];
 222	}
 223
 224	/* Never expect to get here */
 225	WARN_ON_ONCE(1);
 226
 227	return NULL;
 228}
 229
 230void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_domain *d,
 231			     u32 unused, u32 rmid,
 232			     enum resctrl_event_id eventid)
 233{
 234	struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
 235	struct arch_mbm_state *am;
 236
 237	am = get_arch_mbm_state(hw_dom, rmid, eventid);
 238	if (am) {
 239		memset(am, 0, sizeof(*am));
 240
 241		/* Record any initial, non-zero count value. */
 242		__rmid_read(rmid, eventid, &am->prev_msr);
 243	}
 244}
 245
 246/*
 247 * Assumes that hardware counters are also reset and thus that there is
 248 * no need to record initial non-zero counts.
 249 */
 250void resctrl_arch_reset_rmid_all(struct rdt_resource *r, struct rdt_domain *d)
 251{
 252	struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
 253
 254	if (is_mbm_total_enabled())
 255		memset(hw_dom->arch_mbm_total, 0,
 256		       sizeof(*hw_dom->arch_mbm_total) * r->num_rmid);
 257
 258	if (is_mbm_local_enabled())
 259		memset(hw_dom->arch_mbm_local, 0,
 260		       sizeof(*hw_dom->arch_mbm_local) * r->num_rmid);
 261}
 262
 263static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr, unsigned int width)
 264{
 265	u64 shift = 64 - width, chunks;
 266
 267	chunks = (cur_msr << shift) - (prev_msr << shift);
 268	return chunks >> shift;
 269}
 270
 271int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d,
 272			   u32 unused, u32 rmid, enum resctrl_event_id eventid,
 273			   u64 *val, void *ignored)
 274{
 275	struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
 276	struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
 277	struct arch_mbm_state *am;
 278	u64 msr_val, chunks;
 279	int ret;
 280
 281	resctrl_arch_rmid_read_context_check();
 282
 283	if (!cpumask_test_cpu(smp_processor_id(), &d->cpu_mask))
 284		return -EINVAL;
 285
 286	ret = __rmid_read(rmid, eventid, &msr_val);
 287	if (ret)
 288		return ret;
 289
 290	am = get_arch_mbm_state(hw_dom, rmid, eventid);
 291	if (am) {
 292		am->chunks += mbm_overflow_count(am->prev_msr, msr_val,
 293						 hw_res->mbm_width);
 294		chunks = get_corrected_mbm_count(rmid, am->chunks);
 295		am->prev_msr = msr_val;
 296	} else {
 297		chunks = msr_val;
 298	}
 299
 300	*val = chunks * hw_res->mon_scale;
 301
 302	return 0;
 303}
 304
 305static void limbo_release_entry(struct rmid_entry *entry)
 306{
 307	lockdep_assert_held(&rdtgroup_mutex);
 308
 309	rmid_limbo_count--;
 310	list_add_tail(&entry->list, &rmid_free_lru);
 311
 312	if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID))
 313		closid_num_dirty_rmid[entry->closid]--;
 314}
 315
 316/*
 317 * Check the RMIDs that are marked as busy for this domain. If the
 318 * reported LLC occupancy is below the threshold clear the busy bit and
 319 * decrement the count. If the busy count gets to zero on an RMID, we
 320 * free the RMID
 321 */
 322void __check_limbo(struct rdt_domain *d, bool force_free)
 323{
 324	struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
 325	u32 idx_limit = resctrl_arch_system_num_rmid_idx();
 326	struct rmid_entry *entry;
 327	u32 idx, cur_idx = 1;
 328	void *arch_mon_ctx;
 329	bool rmid_dirty;
 330	u64 val = 0;
 331
 332	arch_mon_ctx = resctrl_arch_mon_ctx_alloc(r, QOS_L3_OCCUP_EVENT_ID);
 333	if (IS_ERR(arch_mon_ctx)) {
 334		pr_warn_ratelimited("Failed to allocate monitor context: %ld",
 335				    PTR_ERR(arch_mon_ctx));
 336		return;
 337	}
 338
 339	/*
 340	 * Skip RMID 0 and start from RMID 1 and check all the RMIDs that
 341	 * are marked as busy for occupancy < threshold. If the occupancy
 342	 * is less than the threshold decrement the busy counter of the
 343	 * RMID and move it to the free list when the counter reaches 0.
 344	 */
 345	for (;;) {
 346		idx = find_next_bit(d->rmid_busy_llc, idx_limit, cur_idx);
 347		if (idx >= idx_limit)
 348			break;
 349
 350		entry = __rmid_entry(idx);
 351		if (resctrl_arch_rmid_read(r, d, entry->closid, entry->rmid,
 352					   QOS_L3_OCCUP_EVENT_ID, &val,
 353					   arch_mon_ctx)) {
 354			rmid_dirty = true;
 355		} else {
 356			rmid_dirty = (val >= resctrl_rmid_realloc_threshold);
 357		}
 358
 359		if (force_free || !rmid_dirty) {
 360			clear_bit(idx, d->rmid_busy_llc);
 361			if (!--entry->busy)
 362				limbo_release_entry(entry);
 363		}
 364		cur_idx = idx + 1;
 365	}
 366
 367	resctrl_arch_mon_ctx_free(r, QOS_L3_OCCUP_EVENT_ID, arch_mon_ctx);
 368}
 369
 370bool has_busy_rmid(struct rdt_domain *d)
 371{
 372	u32 idx_limit = resctrl_arch_system_num_rmid_idx();
 373
 374	return find_first_bit(d->rmid_busy_llc, idx_limit) != idx_limit;
 375}
 376
 377static struct rmid_entry *resctrl_find_free_rmid(u32 closid)
 378{
 379	struct rmid_entry *itr;
 380	u32 itr_idx, cmp_idx;
 381
 382	if (list_empty(&rmid_free_lru))
 383		return rmid_limbo_count ? ERR_PTR(-EBUSY) : ERR_PTR(-ENOSPC);
 384
 385	list_for_each_entry(itr, &rmid_free_lru, list) {
 386		/*
 387		 * Get the index of this free RMID, and the index it would need
 388		 * to be if it were used with this CLOSID.
 389		 * If the CLOSID is irrelevant on this architecture, the two
 390		 * index values are always the same on every entry and thus the
 391		 * very first entry will be returned.
 392		 */
 393		itr_idx = resctrl_arch_rmid_idx_encode(itr->closid, itr->rmid);
 394		cmp_idx = resctrl_arch_rmid_idx_encode(closid, itr->rmid);
 395
 396		if (itr_idx == cmp_idx)
 397			return itr;
 398	}
 399
 400	return ERR_PTR(-ENOSPC);
 401}
 402
 403/**
 404 * resctrl_find_cleanest_closid() - Find a CLOSID where all the associated
 405 *                                  RMID are clean, or the CLOSID that has
 406 *                                  the most clean RMID.
 407 *
 408 * MPAM's equivalent of RMID are per-CLOSID, meaning a freshly allocated CLOSID
 409 * may not be able to allocate clean RMID. To avoid this the allocator will
 410 * choose the CLOSID with the most clean RMID.
 411 *
 412 * When the CLOSID and RMID are independent numbers, the first free CLOSID will
 413 * be returned.
 414 */
 415int resctrl_find_cleanest_closid(void)
 416{
 417	u32 cleanest_closid = ~0;
 418	int i = 0;
 419
 420	lockdep_assert_held(&rdtgroup_mutex);
 421
 422	if (!IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID))
 423		return -EIO;
 424
 425	for (i = 0; i < closids_supported(); i++) {
 426		int num_dirty;
 427
 428		if (closid_allocated(i))
 429			continue;
 430
 431		num_dirty = closid_num_dirty_rmid[i];
 432		if (num_dirty == 0)
 433			return i;
 434
 435		if (cleanest_closid == ~0)
 436			cleanest_closid = i;
 437
 438		if (num_dirty < closid_num_dirty_rmid[cleanest_closid])
 439			cleanest_closid = i;
 440	}
 441
 442	if (cleanest_closid == ~0)
 443		return -ENOSPC;
 444
 445	return cleanest_closid;
 446}
 447
 448/*
 449 * For MPAM the RMID value is not unique, and has to be considered with
 450 * the CLOSID. The (CLOSID, RMID) pair is allocated on all domains, which
 451 * allows all domains to be managed by a single free list.
 452 * Each domain also has a rmid_busy_llc to reduce the work of the limbo handler.
 453 */
 454int alloc_rmid(u32 closid)
 455{
 456	struct rmid_entry *entry;
 457
 458	lockdep_assert_held(&rdtgroup_mutex);
 459
 460	entry = resctrl_find_free_rmid(closid);
 461	if (IS_ERR(entry))
 462		return PTR_ERR(entry);
 463
 
 
 464	list_del(&entry->list);
 
 465	return entry->rmid;
 466}
 467
 468static void add_rmid_to_limbo(struct rmid_entry *entry)
 469{
 470	struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
 471	struct rdt_domain *d;
 472	u32 idx;
 473
 474	lockdep_assert_held(&rdtgroup_mutex);
 475
 476	/* Walking r->domains, ensure it can't race with cpuhp */
 477	lockdep_assert_cpus_held();
 478
 479	idx = resctrl_arch_rmid_idx_encode(entry->closid, entry->rmid);
 480
 481	entry->busy = 0;
 
 482	list_for_each_entry(d, &r->domains, list) {
 
 
 
 
 
 
 483		/*
 484		 * For the first limbo RMID in the domain,
 485		 * setup up the limbo worker.
 486		 */
 487		if (!has_busy_rmid(d))
 488			cqm_setup_limbo_handler(d, CQM_LIMBOCHECK_INTERVAL,
 489						RESCTRL_PICK_ANY_CPU);
 490		set_bit(idx, d->rmid_busy_llc);
 491		entry->busy++;
 492	}
 
 493
 494	rmid_limbo_count++;
 495	if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID))
 496		closid_num_dirty_rmid[entry->closid]++;
 
 497}
 498
 499void free_rmid(u32 closid, u32 rmid)
 500{
 501	u32 idx = resctrl_arch_rmid_idx_encode(closid, rmid);
 502	struct rmid_entry *entry;
 503
 
 
 
 504	lockdep_assert_held(&rdtgroup_mutex);
 505
 506	/*
 507	 * Do not allow the default rmid to be free'd. Comparing by index
 508	 * allows architectures that ignore the closid parameter to avoid an
 509	 * unnecessary check.
 510	 */
 511	if (idx == resctrl_arch_rmid_idx_encode(RESCTRL_RESERVED_CLOSID,
 512						RESCTRL_RESERVED_RMID))
 513		return;
 514
 515	entry = __rmid_entry(idx);
 516
 517	if (is_llc_occupancy_enabled())
 518		add_rmid_to_limbo(entry);
 519	else
 520		list_add_tail(&entry->list, &rmid_free_lru);
 521}
 522
 523static struct mbm_state *get_mbm_state(struct rdt_domain *d, u32 closid,
 524				       u32 rmid, enum resctrl_event_id evtid)
 525{
 526	u32 idx = resctrl_arch_rmid_idx_encode(closid, rmid);
 527
 528	switch (evtid) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 529	case QOS_L3_MBM_TOTAL_EVENT_ID:
 530		return &d->mbm_total[idx];
 
 531	case QOS_L3_MBM_LOCAL_EVENT_ID:
 532		return &d->mbm_local[idx];
 
 533	default:
 534		return NULL;
 
 
 
 
 535	}
 536}
 537
 538static int __mon_event_count(u32 closid, u32 rmid, struct rmid_read *rr)
 539{
 540	struct mbm_state *m;
 541	u64 tval = 0;
 542
 543	if (rr->first) {
 544		resctrl_arch_reset_rmid(rr->r, rr->d, closid, rmid, rr->evtid);
 545		m = get_mbm_state(rr->d, closid, rmid, rr->evtid);
 546		if (m)
 547			memset(m, 0, sizeof(struct mbm_state));
 548		return 0;
 549	}
 550
 551	rr->err = resctrl_arch_rmid_read(rr->r, rr->d, closid, rmid, rr->evtid,
 552					 &tval, rr->arch_mon_ctx);
 553	if (rr->err)
 554		return rr->err;
 555
 556	rr->val += tval;
 557
 
 558	return 0;
 559}
 560
 561/*
 562 * mbm_bw_count() - Update bw count from values previously read by
 563 *		    __mon_event_count().
 564 * @closid:	The closid used to identify the cached mbm_state.
 565 * @rmid:	The rmid used to identify the cached mbm_state.
 566 * @rr:		The struct rmid_read populated by __mon_event_count().
 567 *
 568 * Supporting function to calculate the memory bandwidth
 569 * and delta bandwidth in MBps. The chunks value previously read by
 570 * __mon_event_count() is compared with the chunks value from the previous
 571 * invocation. This must be called once per second to maintain values in MBps.
 572 */
 573static void mbm_bw_count(u32 closid, u32 rmid, struct rmid_read *rr)
 574{
 575	u32 idx = resctrl_arch_rmid_idx_encode(closid, rmid);
 576	struct mbm_state *m = &rr->d->mbm_local[idx];
 577	u64 cur_bw, bytes, cur_bytes;
 578
 579	cur_bytes = rr->val;
 580	bytes = cur_bytes - m->prev_bw_bytes;
 581	m->prev_bw_bytes = cur_bytes;
 582
 583	cur_bw = bytes / SZ_1M;
 584
 
 
 
 
 
 
 
 
 585	m->prev_bw = cur_bw;
 
 586}
 587
 588/*
 589 * This is scheduled by mon_event_read() to read the CQM/MBM counters
 590 * on a domain.
 591 */
 592void mon_event_count(void *info)
 593{
 594	struct rdtgroup *rdtgrp, *entry;
 595	struct rmid_read *rr = info;
 596	struct list_head *head;
 597	int ret;
 598
 599	rdtgrp = rr->rgrp;
 600
 601	ret = __mon_event_count(rdtgrp->closid, rdtgrp->mon.rmid, rr);
 
 602
 603	/*
 604	 * For Ctrl groups read data from child monitor groups and
 605	 * add them together. Count events which are read successfully.
 606	 * Discard the rmid_read's reporting errors.
 607	 */
 608	head = &rdtgrp->mon.crdtgrp_list;
 609
 610	if (rdtgrp->type == RDTCTRL_GROUP) {
 611		list_for_each_entry(entry, head, mon.crdtgrp_list) {
 612			if (__mon_event_count(entry->closid, entry->mon.rmid,
 613					      rr) == 0)
 614				ret = 0;
 615		}
 616	}
 617
 618	/*
 619	 * __mon_event_count() calls for newly created monitor groups may
 620	 * report -EINVAL/Unavailable if the monitor hasn't seen any traffic.
 621	 * Discard error if any of the monitor event reads succeeded.
 622	 */
 623	if (ret == 0)
 624		rr->err = 0;
 625}
 626
 627/*
 628 * Feedback loop for MBA software controller (mba_sc)
 629 *
 630 * mba_sc is a feedback loop where we periodically read MBM counters and
 631 * adjust the bandwidth percentage values via the IA32_MBA_THRTL_MSRs so
 632 * that:
 633 *
 634 *   current bandwidth(cur_bw) < user specified bandwidth(user_bw)
 635 *
 636 * This uses the MBM counters to measure the bandwidth and MBA throttle
 637 * MSRs to control the bandwidth for a particular rdtgrp. It builds on the
 638 * fact that resctrl rdtgroups have both monitoring and control.
 639 *
 640 * The frequency of the checks is 1s and we just tag along the MBM overflow
 641 * timer. Having 1s interval makes the calculation of bandwidth simpler.
 642 *
 643 * Although MBA's goal is to restrict the bandwidth to a maximum, there may
 644 * be a need to increase the bandwidth to avoid unnecessarily restricting
 645 * the L2 <-> L3 traffic.
 646 *
 647 * Since MBA controls the L2 external bandwidth where as MBM measures the
 648 * L3 external bandwidth the following sequence could lead to such a
 649 * situation.
 650 *
 651 * Consider an rdtgroup which had high L3 <-> memory traffic in initial
 652 * phases -> mba_sc kicks in and reduced bandwidth percentage values -> but
 653 * after some time rdtgroup has mostly L2 <-> L3 traffic.
 654 *
 655 * In this case we may restrict the rdtgroup's L2 <-> L3 traffic as its
 656 * throttle MSRs already have low percentage values.  To avoid
 657 * unnecessarily restricting such rdtgroups, we also increase the bandwidth.
 658 */
 659static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
 660{
 661	u32 closid, rmid, cur_msr_val, new_msr_val;
 662	struct mbm_state *pmbm_data, *cmbm_data;
 
 663	struct rdt_resource *r_mba;
 664	struct rdt_domain *dom_mba;
 665	u32 cur_bw, user_bw, idx;
 666	struct list_head *head;
 667	struct rdtgroup *entry;
 668
 669	if (!is_mbm_local_enabled())
 670		return;
 671
 672	r_mba = &rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl;
 673
 674	closid = rgrp->closid;
 675	rmid = rgrp->mon.rmid;
 676	idx = resctrl_arch_rmid_idx_encode(closid, rmid);
 677	pmbm_data = &dom_mbm->mbm_local[idx];
 678
 679	dom_mba = get_domain_from_cpu(smp_processor_id(), r_mba);
 680	if (!dom_mba) {
 681		pr_warn_once("Failure to get domain for MBA update\n");
 682		return;
 683	}
 684
 685	cur_bw = pmbm_data->prev_bw;
 686	user_bw = dom_mba->mbps_val[closid];
 687
 688	/* MBA resource doesn't support CDP */
 689	cur_msr_val = resctrl_arch_get_config(r_mba, dom_mba, closid, CDP_NONE);
 690
 691	/*
 692	 * For Ctrl groups read data from child monitor groups.
 693	 */
 694	head = &rgrp->mon.crdtgrp_list;
 695	list_for_each_entry(entry, head, mon.crdtgrp_list) {
 696		cmbm_data = &dom_mbm->mbm_local[entry->mon.rmid];
 697		cur_bw += cmbm_data->prev_bw;
 
 698	}
 699
 700	/*
 701	 * Scale up/down the bandwidth linearly for the ctrl group.  The
 702	 * bandwidth step is the bandwidth granularity specified by the
 703	 * hardware.
 704	 * Always increase throttling if current bandwidth is above the
 705	 * target set by user.
 706	 * But avoid thrashing up and down on every poll by checking
 707	 * whether a decrease in throttling is likely to push the group
 708	 * back over target. E.g. if currently throttling to 30% of bandwidth
 709	 * on a system with 10% granularity steps, check whether moving to
 710	 * 40% would go past the limit by multiplying current bandwidth by
 711	 * "(30 + 10) / 30".
 
 712	 */
 713	if (cur_msr_val > r_mba->membw.min_bw && user_bw < cur_bw) {
 714		new_msr_val = cur_msr_val - r_mba->membw.bw_gran;
 715	} else if (cur_msr_val < MAX_MBA_BW &&
 716		   (user_bw > (cur_bw * (cur_msr_val + r_mba->membw.min_bw) / cur_msr_val))) {
 717		new_msr_val = cur_msr_val + r_mba->membw.bw_gran;
 718	} else {
 719		return;
 720	}
 721
 722	resctrl_arch_update_one(r_mba, dom_mba, closid, CDP_NONE, new_msr_val);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 723}
 724
 725static void mbm_update(struct rdt_resource *r, struct rdt_domain *d,
 726		       u32 closid, u32 rmid)
 727{
 728	struct rmid_read rr;
 729
 730	rr.first = false;
 731	rr.r = r;
 732	rr.d = d;
 733
 734	/*
 735	 * This is protected from concurrent reads from user
 736	 * as both the user and we hold the global mutex.
 737	 */
 738	if (is_mbm_total_enabled()) {
 739		rr.evtid = QOS_L3_MBM_TOTAL_EVENT_ID;
 740		rr.val = 0;
 741		rr.arch_mon_ctx = resctrl_arch_mon_ctx_alloc(rr.r, rr.evtid);
 742		if (IS_ERR(rr.arch_mon_ctx)) {
 743			pr_warn_ratelimited("Failed to allocate monitor context: %ld",
 744					    PTR_ERR(rr.arch_mon_ctx));
 745			return;
 746		}
 747
 748		__mon_event_count(closid, rmid, &rr);
 749
 750		resctrl_arch_mon_ctx_free(rr.r, rr.evtid, rr.arch_mon_ctx);
 751	}
 752	if (is_mbm_local_enabled()) {
 753		rr.evtid = QOS_L3_MBM_LOCAL_EVENT_ID;
 754		rr.val = 0;
 755		rr.arch_mon_ctx = resctrl_arch_mon_ctx_alloc(rr.r, rr.evtid);
 756		if (IS_ERR(rr.arch_mon_ctx)) {
 757			pr_warn_ratelimited("Failed to allocate monitor context: %ld",
 758					    PTR_ERR(rr.arch_mon_ctx));
 759			return;
 760		}
 761
 762		__mon_event_count(closid, rmid, &rr);
 763
 764		/*
 765		 * Call the MBA software controller only for the
 766		 * control groups and when user has enabled
 767		 * the software controller explicitly.
 768		 */
 769		if (is_mba_sc(NULL))
 770			mbm_bw_count(closid, rmid, &rr);
 771
 772		resctrl_arch_mon_ctx_free(rr.r, rr.evtid, rr.arch_mon_ctx);
 773	}
 774}
 775
 776/*
 777 * Handler to scan the limbo list and move the RMIDs
 778 * to free list whose occupancy < threshold_occupancy.
 779 */
 780void cqm_handle_limbo(struct work_struct *work)
 781{
 782	unsigned long delay = msecs_to_jiffies(CQM_LIMBOCHECK_INTERVAL);
 
 
 783	struct rdt_domain *d;
 784
 785	cpus_read_lock();
 786	mutex_lock(&rdtgroup_mutex);
 787
 788	d = container_of(work, struct rdt_domain, cqm_limbo.work);
 
 
 
 
 
 
 789
 790	__check_limbo(d, false);
 791
 792	if (has_busy_rmid(d)) {
 793		d->cqm_work_cpu = cpumask_any_housekeeping(&d->cpu_mask,
 794							   RESCTRL_PICK_ANY_CPU);
 795		schedule_delayed_work_on(d->cqm_work_cpu, &d->cqm_limbo,
 796					 delay);
 797	}
 798
 
 799	mutex_unlock(&rdtgroup_mutex);
 800	cpus_read_unlock();
 801}
 802
 803/**
 804 * cqm_setup_limbo_handler() - Schedule the limbo handler to run for this
 805 *                             domain.
 806 * @dom:           The domain the limbo handler should run for.
 807 * @delay_ms:      How far in the future the handler should run.
 808 * @exclude_cpu:   Which CPU the handler should not run on,
 809 *		   RESCTRL_PICK_ANY_CPU to pick any CPU.
 810 */
 811void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms,
 812			     int exclude_cpu)
 813{
 814	unsigned long delay = msecs_to_jiffies(delay_ms);
 815	int cpu;
 816
 817	cpu = cpumask_any_housekeeping(&dom->cpu_mask, exclude_cpu);
 818	dom->cqm_work_cpu = cpu;
 819
 820	if (cpu < nr_cpu_ids)
 821		schedule_delayed_work_on(cpu, &dom->cqm_limbo, delay);
 822}
 823
 824void mbm_handle_overflow(struct work_struct *work)
 825{
 826	unsigned long delay = msecs_to_jiffies(MBM_OVERFLOW_INTERVAL);
 827	struct rdtgroup *prgrp, *crgrp;
 
 828	struct list_head *head;
 829	struct rdt_resource *r;
 830	struct rdt_domain *d;
 831
 832	cpus_read_lock();
 833	mutex_lock(&rdtgroup_mutex);
 834
 835	/*
 836	 * If the filesystem has been unmounted this work no longer needs to
 837	 * run.
 838	 */
 839	if (!resctrl_mounted || !resctrl_arch_mon_capable())
 840		goto out_unlock;
 841
 842	r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
 843	d = container_of(work, struct rdt_domain, mbm_over.work);
 
 844
 845	list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
 846		mbm_update(r, d, prgrp->closid, prgrp->mon.rmid);
 847
 848		head = &prgrp->mon.crdtgrp_list;
 849		list_for_each_entry(crgrp, head, mon.crdtgrp_list)
 850			mbm_update(r, d, crgrp->closid, crgrp->mon.rmid);
 851
 852		if (is_mba_sc(NULL))
 853			update_mba_bw(prgrp, d);
 854	}
 855
 856	/*
 857	 * Re-check for housekeeping CPUs. This allows the overflow handler to
 858	 * move off a nohz_full CPU quickly.
 859	 */
 860	d->mbm_work_cpu = cpumask_any_housekeeping(&d->cpu_mask,
 861						   RESCTRL_PICK_ANY_CPU);
 862	schedule_delayed_work_on(d->mbm_work_cpu, &d->mbm_over, delay);
 863
 864out_unlock:
 865	mutex_unlock(&rdtgroup_mutex);
 866	cpus_read_unlock();
 867}
 868
 869/**
 870 * mbm_setup_overflow_handler() - Schedule the overflow handler to run for this
 871 *                                domain.
 872 * @dom:           The domain the overflow handler should run for.
 873 * @delay_ms:      How far in the future the handler should run.
 874 * @exclude_cpu:   Which CPU the handler should not run on,
 875 *		   RESCTRL_PICK_ANY_CPU to pick any CPU.
 876 */
 877void mbm_setup_overflow_handler(struct rdt_domain *dom, unsigned long delay_ms,
 878				int exclude_cpu)
 879{
 880	unsigned long delay = msecs_to_jiffies(delay_ms);
 881	int cpu;
 882
 883	/*
 884	 * When a domain comes online there is no guarantee the filesystem is
 885	 * mounted. If not, there is no need to catch counter overflow.
 886	 */
 887	if (!resctrl_mounted || !resctrl_arch_mon_capable())
 888		return;
 889	cpu = cpumask_any_housekeeping(&dom->cpu_mask, exclude_cpu);
 890	dom->mbm_work_cpu = cpu;
 891
 892	if (cpu < nr_cpu_ids)
 893		schedule_delayed_work_on(cpu, &dom->mbm_over, delay);
 894}
 895
 896static int dom_data_init(struct rdt_resource *r)
 897{
 898	u32 idx_limit = resctrl_arch_system_num_rmid_idx();
 899	u32 num_closid = resctrl_arch_get_num_closid(r);
 900	struct rmid_entry *entry = NULL;
 901	int err = 0, i;
 902	u32 idx;
 903
 904	mutex_lock(&rdtgroup_mutex);
 905	if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) {
 906		u32 *tmp;
 907
 908		/*
 909		 * If the architecture hasn't provided a sanitised value here,
 910		 * this may result in larger arrays than necessary. Resctrl will
 911		 * use a smaller system wide value based on the resources in
 912		 * use.
 913		 */
 914		tmp = kcalloc(num_closid, sizeof(*tmp), GFP_KERNEL);
 915		if (!tmp) {
 916			err = -ENOMEM;
 917			goto out_unlock;
 918		}
 919
 920		closid_num_dirty_rmid = tmp;
 921	}
 922
 923	rmid_ptrs = kcalloc(idx_limit, sizeof(struct rmid_entry), GFP_KERNEL);
 924	if (!rmid_ptrs) {
 925		if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) {
 926			kfree(closid_num_dirty_rmid);
 927			closid_num_dirty_rmid = NULL;
 928		}
 929		err = -ENOMEM;
 930		goto out_unlock;
 931	}
 932
 933	for (i = 0; i < idx_limit; i++) {
 934		entry = &rmid_ptrs[i];
 935		INIT_LIST_HEAD(&entry->list);
 936
 937		resctrl_arch_rmid_idx_decode(i, &entry->closid, &entry->rmid);
 938		list_add_tail(&entry->list, &rmid_free_lru);
 939	}
 940
 941	/*
 942	 * RESCTRL_RESERVED_CLOSID and RESCTRL_RESERVED_RMID are special and
 943	 * are always allocated. These are used for the rdtgroup_default
 944	 * control group, which will be setup later in rdtgroup_init().
 945	 */
 946	idx = resctrl_arch_rmid_idx_encode(RESCTRL_RESERVED_CLOSID,
 947					   RESCTRL_RESERVED_RMID);
 948	entry = __rmid_entry(idx);
 949	list_del(&entry->list);
 950
 951out_unlock:
 952	mutex_unlock(&rdtgroup_mutex);
 953
 954	return err;
 955}
 956
 957static void __exit dom_data_exit(void)
 958{
 959	mutex_lock(&rdtgroup_mutex);
 960
 961	if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) {
 962		kfree(closid_num_dirty_rmid);
 963		closid_num_dirty_rmid = NULL;
 964	}
 965
 966	kfree(rmid_ptrs);
 967	rmid_ptrs = NULL;
 968
 969	mutex_unlock(&rdtgroup_mutex);
 970}
 971
 972static struct mon_evt llc_occupancy_event = {
 973	.name		= "llc_occupancy",
 974	.evtid		= QOS_L3_OCCUP_EVENT_ID,
 975};
 976
 977static struct mon_evt mbm_total_event = {
 978	.name		= "mbm_total_bytes",
 979	.evtid		= QOS_L3_MBM_TOTAL_EVENT_ID,
 980};
 981
 982static struct mon_evt mbm_local_event = {
 983	.name		= "mbm_local_bytes",
 984	.evtid		= QOS_L3_MBM_LOCAL_EVENT_ID,
 985};
 986
 987/*
 988 * Initialize the event list for the resource.
 989 *
 990 * Note that MBM events are also part of RDT_RESOURCE_L3 resource
 991 * because as per the SDM the total and local memory bandwidth
 992 * are enumerated as part of L3 monitoring.
 993 */
 994static void l3_mon_evt_init(struct rdt_resource *r)
 995{
 996	INIT_LIST_HEAD(&r->evt_list);
 997
 998	if (is_llc_occupancy_enabled())
 999		list_add_tail(&llc_occupancy_event.list, &r->evt_list);
1000	if (is_mbm_total_enabled())
1001		list_add_tail(&mbm_total_event.list, &r->evt_list);
1002	if (is_mbm_local_enabled())
1003		list_add_tail(&mbm_local_event.list, &r->evt_list);
1004}
1005
1006int __init rdt_get_mon_l3_config(struct rdt_resource *r)
1007{
1008	unsigned int mbm_offset = boot_cpu_data.x86_cache_mbm_width_offset;
1009	struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
1010	unsigned int threshold;
1011	int ret;
1012
1013	resctrl_rmid_realloc_limit = boot_cpu_data.x86_cache_size * 1024;
1014	hw_res->mon_scale = boot_cpu_data.x86_cache_occ_scale;
1015	r->num_rmid = boot_cpu_data.x86_cache_max_rmid + 1;
1016	hw_res->mbm_width = MBM_CNTR_WIDTH_BASE;
1017
1018	if (mbm_offset > 0 && mbm_offset <= MBM_CNTR_WIDTH_OFFSET_MAX)
1019		hw_res->mbm_width += mbm_offset;
1020	else if (mbm_offset > MBM_CNTR_WIDTH_OFFSET_MAX)
1021		pr_warn("Ignoring impossible MBM counter offset\n");
1022
1023	/*
1024	 * A reasonable upper limit on the max threshold is the number
1025	 * of lines tagged per RMID if all RMIDs have the same number of
1026	 * lines tagged in the LLC.
1027	 *
1028	 * For a 35MB LLC and 56 RMIDs, this is ~1.8% of the LLC.
1029	 */
1030	threshold = resctrl_rmid_realloc_limit / r->num_rmid;
1031
1032	/*
1033	 * Because num_rmid may not be a power of two, round the value
1034	 * to the nearest multiple of hw_res->mon_scale so it matches a
1035	 * value the hardware will measure. mon_scale may not be a power of 2.
1036	 */
1037	resctrl_rmid_realloc_threshold = resctrl_arch_round_mon_val(threshold);
1038
1039	ret = dom_data_init(r);
1040	if (ret)
1041		return ret;
1042
1043	if (rdt_cpu_has(X86_FEATURE_BMEC)) {
1044		u32 eax, ebx, ecx, edx;
1045
1046		/* Detect list of bandwidth sources that can be tracked */
1047		cpuid_count(0x80000020, 3, &eax, &ebx, &ecx, &edx);
1048		hw_res->mbm_cfg_mask = ecx & MAX_EVT_CONFIG_BITS;
1049
1050		if (rdt_cpu_has(X86_FEATURE_CQM_MBM_TOTAL)) {
1051			mbm_total_event.configurable = true;
1052			mbm_config_rftype_init("mbm_total_bytes_config");
1053		}
1054		if (rdt_cpu_has(X86_FEATURE_CQM_MBM_LOCAL)) {
1055			mbm_local_event.configurable = true;
1056			mbm_config_rftype_init("mbm_local_bytes_config");
1057		}
1058	}
1059
1060	l3_mon_evt_init(r);
1061
1062	r->mon_capable = true;
 
1063
1064	return 0;
1065}
1066
1067void __exit rdt_put_mon_l3_config(void)
1068{
1069	dom_data_exit();
1070}
1071
1072void __init intel_rdt_mbm_apply_quirk(void)
1073{
1074	int cf_index;
1075
1076	cf_index = (boot_cpu_data.x86_cache_max_rmid + 1) / 8 - 1;
1077	if (cf_index >= ARRAY_SIZE(mbm_cf_table)) {
1078		pr_info("No MBM correction factor available\n");
1079		return;
1080	}
1081
1082	mbm_cf_rmidthreshold = mbm_cf_table[cf_index].rmidthreshold;
1083	mbm_cf = mbm_cf_table[cf_index].cf;
1084}