Linux Audio

Check our new training course

Linux kernel drivers training

May 6-19, 2025
Register
Loading...
Note: File does not exist in v5.4.
  1/*
  2 * Resource Director Technology(RDT)
  3 * - Cache Allocation code.
  4 *
  5 * Copyright (C) 2016 Intel Corporation
  6 *
  7 * Authors:
  8 *    Fenghua Yu <fenghua.yu@intel.com>
  9 *    Tony Luck <tony.luck@intel.com>
 10 *
 11 * This program is free software; you can redistribute it and/or modify it
 12 * under the terms and conditions of the GNU General Public License,
 13 * version 2, as published by the Free Software Foundation.
 14 *
 15 * This program is distributed in the hope it will be useful, but WITHOUT
 16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 17 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 18 * more details.
 19 *
 20 * More information about RDT be found in the Intel (R) x86 Architecture
 21 * Software Developer Manual June 2016, volume 3, section 17.17.
 22 */
 23
 24#define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
 25
 26#include <linux/kernfs.h>
 27#include <linux/seq_file.h>
 28#include <linux/slab.h>
 29#include "intel_rdt.h"
 30
 31/*
 32 * Check whether MBA bandwidth percentage value is correct. The value is
 33 * checked against the minimum and max bandwidth values specified by the
 34 * hardware. The allocated bandwidth percentage is rounded to the next
 35 * control step available on the hardware.
 36 */
 37static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r)
 38{
 39	unsigned long bw;
 40	int ret;
 41
 42	/*
 43	 * Only linear delay values is supported for current Intel SKUs.
 44	 */
 45	if (!r->membw.delay_linear) {
 46		rdt_last_cmd_puts("No support for non-linear MB domains\n");
 47		return false;
 48	}
 49
 50	ret = kstrtoul(buf, 10, &bw);
 51	if (ret) {
 52		rdt_last_cmd_printf("Non-decimal digit in MB value %s\n", buf);
 53		return false;
 54	}
 55
 56	if (bw < r->membw.min_bw || bw > r->default_ctrl) {
 57		rdt_last_cmd_printf("MB value %ld out of range [%d,%d]\n", bw,
 58				    r->membw.min_bw, r->default_ctrl);
 59		return false;
 60	}
 61
 62	*data = roundup(bw, (unsigned long)r->membw.bw_gran);
 63	return true;
 64}
 65
 66int parse_bw(char *buf, struct rdt_resource *r, struct rdt_domain *d)
 67{
 68	unsigned long data;
 69
 70	if (d->have_new_ctrl) {
 71		rdt_last_cmd_printf("duplicate domain %d\n", d->id);
 72		return -EINVAL;
 73	}
 74
 75	if (!bw_validate(buf, &data, r))
 76		return -EINVAL;
 77	d->new_ctrl = data;
 78	d->have_new_ctrl = true;
 79
 80	return 0;
 81}
 82
 83/*
 84 * Check whether a cache bit mask is valid. The SDM says:
 85 *	Please note that all (and only) contiguous '1' combinations
 86 *	are allowed (e.g. FFFFH, 0FF0H, 003CH, etc.).
 87 * Additionally Haswell requires at least two bits set.
 88 */
 89static bool cbm_validate(char *buf, unsigned long *data, struct rdt_resource *r)
 90{
 91	unsigned long first_bit, zero_bit, val;
 92	unsigned int cbm_len = r->cache.cbm_len;
 93	int ret;
 94
 95	ret = kstrtoul(buf, 16, &val);
 96	if (ret) {
 97		rdt_last_cmd_printf("non-hex character in mask %s\n", buf);
 98		return false;
 99	}
100
101	if (val == 0 || val > r->default_ctrl) {
102		rdt_last_cmd_puts("mask out of range\n");
103		return false;
104	}
105
106	first_bit = find_first_bit(&val, cbm_len);
107	zero_bit = find_next_zero_bit(&val, cbm_len, first_bit);
108
109	if (find_next_bit(&val, cbm_len, zero_bit) < cbm_len) {
110		rdt_last_cmd_printf("mask %lx has non-consecutive 1-bits\n", val);
111		return false;
112	}
113
114	if ((zero_bit - first_bit) < r->cache.min_cbm_bits) {
115		rdt_last_cmd_printf("Need at least %d bits in mask\n",
116				    r->cache.min_cbm_bits);
117		return false;
118	}
119
120	*data = val;
121	return true;
122}
123
124/*
125 * Read one cache bit mask (hex). Check that it is valid for the current
126 * resource type.
127 */
128int parse_cbm(char *buf, struct rdt_resource *r, struct rdt_domain *d)
129{
130	unsigned long data;
131
132	if (d->have_new_ctrl) {
133		rdt_last_cmd_printf("duplicate domain %d\n", d->id);
134		return -EINVAL;
135	}
136
137	if(!cbm_validate(buf, &data, r))
138		return -EINVAL;
139	d->new_ctrl = data;
140	d->have_new_ctrl = true;
141
142	return 0;
143}
144
145/*
146 * For each domain in this resource we expect to find a series of:
147 *	id=mask
148 * separated by ";". The "id" is in decimal, and must match one of
149 * the "id"s for this resource.
150 */
151static int parse_line(char *line, struct rdt_resource *r)
152{
153	char *dom = NULL, *id;
154	struct rdt_domain *d;
155	unsigned long dom_id;
156
157next:
158	if (!line || line[0] == '\0')
159		return 0;
160	dom = strsep(&line, ";");
161	id = strsep(&dom, "=");
162	if (!dom || kstrtoul(id, 10, &dom_id)) {
163		rdt_last_cmd_puts("Missing '=' or non-numeric domain\n");
164		return -EINVAL;
165	}
166	dom = strim(dom);
167	list_for_each_entry(d, &r->domains, list) {
168		if (d->id == dom_id) {
169			if (r->parse_ctrlval(dom, r, d))
170				return -EINVAL;
171			goto next;
172		}
173	}
174	return -EINVAL;
175}
176
177static int update_domains(struct rdt_resource *r, int closid)
178{
179	struct msr_param msr_param;
180	cpumask_var_t cpu_mask;
181	struct rdt_domain *d;
182	int cpu;
183
184	if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
185		return -ENOMEM;
186
187	msr_param.low = closid;
188	msr_param.high = msr_param.low + 1;
189	msr_param.res = r;
190
191	list_for_each_entry(d, &r->domains, list) {
192		if (d->have_new_ctrl && d->new_ctrl != d->ctrl_val[closid]) {
193			cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
194			d->ctrl_val[closid] = d->new_ctrl;
195		}
196	}
197	if (cpumask_empty(cpu_mask))
198		goto done;
199	cpu = get_cpu();
200	/* Update CBM on this cpu if it's in cpu_mask. */
201	if (cpumask_test_cpu(cpu, cpu_mask))
202		rdt_ctrl_update(&msr_param);
203	/* Update CBM on other cpus. */
204	smp_call_function_many(cpu_mask, rdt_ctrl_update, &msr_param, 1);
205	put_cpu();
206
207done:
208	free_cpumask_var(cpu_mask);
209
210	return 0;
211}
212
213static int rdtgroup_parse_resource(char *resname, char *tok, int closid)
214{
215	struct rdt_resource *r;
216
217	for_each_alloc_enabled_rdt_resource(r) {
218		if (!strcmp(resname, r->name) && closid < r->num_closid)
219			return parse_line(tok, r);
220	}
221	rdt_last_cmd_printf("unknown/unsupported resource name '%s'\n", resname);
222	return -EINVAL;
223}
224
225ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
226				char *buf, size_t nbytes, loff_t off)
227{
228	struct rdtgroup *rdtgrp;
229	struct rdt_domain *dom;
230	struct rdt_resource *r;
231	char *tok, *resname;
232	int closid, ret = 0;
233
234	/* Valid input requires a trailing newline */
235	if (nbytes == 0 || buf[nbytes - 1] != '\n')
236		return -EINVAL;
237	buf[nbytes - 1] = '\0';
238
239	rdtgrp = rdtgroup_kn_lock_live(of->kn);
240	if (!rdtgrp) {
241		rdtgroup_kn_unlock(of->kn);
242		return -ENOENT;
243	}
244	rdt_last_cmd_clear();
245
246	closid = rdtgrp->closid;
247
248	for_each_alloc_enabled_rdt_resource(r) {
249		list_for_each_entry(dom, &r->domains, list)
250			dom->have_new_ctrl = false;
251	}
252
253	while ((tok = strsep(&buf, "\n")) != NULL) {
254		resname = strim(strsep(&tok, ":"));
255		if (!tok) {
256			rdt_last_cmd_puts("Missing ':'\n");
257			ret = -EINVAL;
258			goto out;
259		}
260		if (tok[0] == '\0') {
261			rdt_last_cmd_printf("Missing '%s' value\n", resname);
262			ret = -EINVAL;
263			goto out;
264		}
265		ret = rdtgroup_parse_resource(resname, tok, closid);
266		if (ret)
267			goto out;
268	}
269
270	for_each_alloc_enabled_rdt_resource(r) {
271		ret = update_domains(r, closid);
272		if (ret)
273			goto out;
274	}
275
276out:
277	rdtgroup_kn_unlock(of->kn);
278	return ret ?: nbytes;
279}
280
281static void show_doms(struct seq_file *s, struct rdt_resource *r, int closid)
282{
283	struct rdt_domain *dom;
284	bool sep = false;
285
286	seq_printf(s, "%*s:", max_name_width, r->name);
287	list_for_each_entry(dom, &r->domains, list) {
288		if (sep)
289			seq_puts(s, ";");
290		seq_printf(s, r->format_str, dom->id, max_data_width,
291			   dom->ctrl_val[closid]);
292		sep = true;
293	}
294	seq_puts(s, "\n");
295}
296
297int rdtgroup_schemata_show(struct kernfs_open_file *of,
298			   struct seq_file *s, void *v)
299{
300	struct rdtgroup *rdtgrp;
301	struct rdt_resource *r;
302	int ret = 0;
303	u32 closid;
304
305	rdtgrp = rdtgroup_kn_lock_live(of->kn);
306	if (rdtgrp) {
307		closid = rdtgrp->closid;
308		for_each_alloc_enabled_rdt_resource(r) {
309			if (closid < r->num_closid)
310				show_doms(s, r, closid);
311		}
312	} else {
313		ret = -ENOENT;
314	}
315	rdtgroup_kn_unlock(of->kn);
316	return ret;
317}
318
319void mon_event_read(struct rmid_read *rr, struct rdt_domain *d,
320		    struct rdtgroup *rdtgrp, int evtid, int first)
321{
322	/*
323	 * setup the parameters to send to the IPI to read the data.
324	 */
325	rr->rgrp = rdtgrp;
326	rr->evtid = evtid;
327	rr->d = d;
328	rr->val = 0;
329	rr->first = first;
330
331	smp_call_function_any(&d->cpu_mask, mon_event_count, rr, 1);
332}
333
334int rdtgroup_mondata_show(struct seq_file *m, void *arg)
335{
336	struct kernfs_open_file *of = m->private;
337	u32 resid, evtid, domid;
338	struct rdtgroup *rdtgrp;
339	struct rdt_resource *r;
340	union mon_data_bits md;
341	struct rdt_domain *d;
342	struct rmid_read rr;
343	int ret = 0;
344
345	rdtgrp = rdtgroup_kn_lock_live(of->kn);
346
347	md.priv = of->kn->priv;
348	resid = md.u.rid;
349	domid = md.u.domid;
350	evtid = md.u.evtid;
351
352	r = &rdt_resources_all[resid];
353	d = rdt_find_domain(r, domid, NULL);
354	if (!d) {
355		ret = -ENOENT;
356		goto out;
357	}
358
359	mon_event_read(&rr, d, rdtgrp, evtid, false);
360
361	if (rr.val & RMID_VAL_ERROR)
362		seq_puts(m, "Error\n");
363	else if (rr.val & RMID_VAL_UNAVAIL)
364		seq_puts(m, "Unavailable\n");
365	else
366		seq_printf(m, "%llu\n", rr.val * r->mon_scale);
367
368out:
369	rdtgroup_kn_unlock(of->kn);
370	return ret;
371}