Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.4.
   1/*
   2 * User interface for Resource Alloction in Resource Director Technology(RDT)
   3 *
   4 * Copyright (C) 2016 Intel Corporation
   5 *
   6 * Author: Fenghua Yu <fenghua.yu@intel.com>
   7 *
   8 * This program is free software; you can redistribute it and/or modify it
   9 * under the terms and conditions of the GNU General Public License,
  10 * version 2, as published by the Free Software Foundation.
  11 *
  12 * This program is distributed in the hope it will be useful, but WITHOUT
  13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  15 * more details.
  16 *
  17 * More information about RDT be found in the Intel (R) x86 Architecture
  18 * Software Developer Manual.
  19 */
  20
  21#define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
  22
  23#include <linux/cpu.h>
  24#include <linux/fs.h>
  25#include <linux/sysfs.h>
  26#include <linux/kernfs.h>
  27#include <linux/seq_buf.h>
  28#include <linux/seq_file.h>
  29#include <linux/sched/signal.h>
  30#include <linux/sched/task.h>
  31#include <linux/slab.h>
  32#include <linux/task_work.h>
  33
  34#include <uapi/linux/magic.h>
  35
  36#include <asm/intel_rdt_sched.h>
  37#include "intel_rdt.h"
  38
  39DEFINE_STATIC_KEY_FALSE(rdt_enable_key);
  40DEFINE_STATIC_KEY_FALSE(rdt_mon_enable_key);
  41DEFINE_STATIC_KEY_FALSE(rdt_alloc_enable_key);
  42static struct kernfs_root *rdt_root;
  43struct rdtgroup rdtgroup_default;
  44LIST_HEAD(rdt_all_groups);
  45
  46/* Kernel fs node for "info" directory under root */
  47static struct kernfs_node *kn_info;
  48
  49/* Kernel fs node for "mon_groups" directory under root */
  50static struct kernfs_node *kn_mongrp;
  51
  52/* Kernel fs node for "mon_data" directory under root */
  53static struct kernfs_node *kn_mondata;
  54
  55static struct seq_buf last_cmd_status;
  56static char last_cmd_status_buf[512];
  57
  58void rdt_last_cmd_clear(void)
  59{
  60	lockdep_assert_held(&rdtgroup_mutex);
  61	seq_buf_clear(&last_cmd_status);
  62}
  63
  64void rdt_last_cmd_puts(const char *s)
  65{
  66	lockdep_assert_held(&rdtgroup_mutex);
  67	seq_buf_puts(&last_cmd_status, s);
  68}
  69
  70void rdt_last_cmd_printf(const char *fmt, ...)
  71{
  72	va_list ap;
  73
  74	va_start(ap, fmt);
  75	lockdep_assert_held(&rdtgroup_mutex);
  76	seq_buf_vprintf(&last_cmd_status, fmt, ap);
  77	va_end(ap);
  78}
  79
  80/*
  81 * Trivial allocator for CLOSIDs. Since h/w only supports a small number,
  82 * we can keep a bitmap of free CLOSIDs in a single integer.
  83 *
  84 * Using a global CLOSID across all resources has some advantages and
  85 * some drawbacks:
  86 * + We can simply set "current->closid" to assign a task to a resource
  87 *   group.
  88 * + Context switch code can avoid extra memory references deciding which
  89 *   CLOSID to load into the PQR_ASSOC MSR
  90 * - We give up some options in configuring resource groups across multi-socket
  91 *   systems.
  92 * - Our choices on how to configure each resource become progressively more
  93 *   limited as the number of resources grows.
  94 */
  95static int closid_free_map;
  96
  97static void closid_init(void)
  98{
  99	struct rdt_resource *r;
 100	int rdt_min_closid = 32;
 101
 102	/* Compute rdt_min_closid across all resources */
 103	for_each_alloc_enabled_rdt_resource(r)
 104		rdt_min_closid = min(rdt_min_closid, r->num_closid);
 105
 106	closid_free_map = BIT_MASK(rdt_min_closid) - 1;
 107
 108	/* CLOSID 0 is always reserved for the default group */
 109	closid_free_map &= ~1;
 110}
 111
 112static int closid_alloc(void)
 113{
 114	u32 closid = ffs(closid_free_map);
 115
 116	if (closid == 0)
 117		return -ENOSPC;
 118	closid--;
 119	closid_free_map &= ~(1 << closid);
 120
 121	return closid;
 122}
 123
 124static void closid_free(int closid)
 125{
 126	closid_free_map |= 1 << closid;
 127}
 128
 129/* set uid and gid of rdtgroup dirs and files to that of the creator */
 130static int rdtgroup_kn_set_ugid(struct kernfs_node *kn)
 131{
 132	struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID,
 133				.ia_uid = current_fsuid(),
 134				.ia_gid = current_fsgid(), };
 135
 136	if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) &&
 137	    gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID))
 138		return 0;
 139
 140	return kernfs_setattr(kn, &iattr);
 141}
 142
 143static int rdtgroup_add_file(struct kernfs_node *parent_kn, struct rftype *rft)
 144{
 145	struct kernfs_node *kn;
 146	int ret;
 147
 148	kn = __kernfs_create_file(parent_kn, rft->name, rft->mode,
 149				  0, rft->kf_ops, rft, NULL, NULL);
 150	if (IS_ERR(kn))
 151		return PTR_ERR(kn);
 152
 153	ret = rdtgroup_kn_set_ugid(kn);
 154	if (ret) {
 155		kernfs_remove(kn);
 156		return ret;
 157	}
 158
 159	return 0;
 160}
 161
 162static int rdtgroup_seqfile_show(struct seq_file *m, void *arg)
 163{
 164	struct kernfs_open_file *of = m->private;
 165	struct rftype *rft = of->kn->priv;
 166
 167	if (rft->seq_show)
 168		return rft->seq_show(of, m, arg);
 169	return 0;
 170}
 171
 172static ssize_t rdtgroup_file_write(struct kernfs_open_file *of, char *buf,
 173				   size_t nbytes, loff_t off)
 174{
 175	struct rftype *rft = of->kn->priv;
 176
 177	if (rft->write)
 178		return rft->write(of, buf, nbytes, off);
 179
 180	return -EINVAL;
 181}
 182
 183static struct kernfs_ops rdtgroup_kf_single_ops = {
 184	.atomic_write_len	= PAGE_SIZE,
 185	.write			= rdtgroup_file_write,
 186	.seq_show		= rdtgroup_seqfile_show,
 187};
 188
 189static struct kernfs_ops kf_mondata_ops = {
 190	.atomic_write_len	= PAGE_SIZE,
 191	.seq_show		= rdtgroup_mondata_show,
 192};
 193
 194static bool is_cpu_list(struct kernfs_open_file *of)
 195{
 196	struct rftype *rft = of->kn->priv;
 197
 198	return rft->flags & RFTYPE_FLAGS_CPUS_LIST;
 199}
 200
 201static int rdtgroup_cpus_show(struct kernfs_open_file *of,
 202			      struct seq_file *s, void *v)
 203{
 204	struct rdtgroup *rdtgrp;
 205	int ret = 0;
 206
 207	rdtgrp = rdtgroup_kn_lock_live(of->kn);
 208
 209	if (rdtgrp) {
 210		seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n",
 211			   cpumask_pr_args(&rdtgrp->cpu_mask));
 212	} else {
 213		ret = -ENOENT;
 214	}
 215	rdtgroup_kn_unlock(of->kn);
 216
 217	return ret;
 218}
 219
 220/*
 221 * This is safe against intel_rdt_sched_in() called from __switch_to()
 222 * because __switch_to() is executed with interrupts disabled. A local call
 223 * from update_closid_rmid() is proteced against __switch_to() because
 224 * preemption is disabled.
 225 */
 226static void update_cpu_closid_rmid(void *info)
 227{
 228	struct rdtgroup *r = info;
 229
 230	if (r) {
 231		this_cpu_write(pqr_state.default_closid, r->closid);
 232		this_cpu_write(pqr_state.default_rmid, r->mon.rmid);
 233	}
 234
 235	/*
 236	 * We cannot unconditionally write the MSR because the current
 237	 * executing task might have its own closid selected. Just reuse
 238	 * the context switch code.
 239	 */
 240	intel_rdt_sched_in();
 241}
 242
 243/*
 244 * Update the PGR_ASSOC MSR on all cpus in @cpu_mask,
 245 *
 246 * Per task closids/rmids must have been set up before calling this function.
 247 */
 248static void
 249update_closid_rmid(const struct cpumask *cpu_mask, struct rdtgroup *r)
 250{
 251	int cpu = get_cpu();
 252
 253	if (cpumask_test_cpu(cpu, cpu_mask))
 254		update_cpu_closid_rmid(r);
 255	smp_call_function_many(cpu_mask, update_cpu_closid_rmid, r, 1);
 256	put_cpu();
 257}
 258
 259static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
 260			  cpumask_var_t tmpmask)
 261{
 262	struct rdtgroup *prgrp = rdtgrp->mon.parent, *crgrp;
 263	struct list_head *head;
 264
 265	/* Check whether cpus belong to parent ctrl group */
 266	cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask);
 267	if (cpumask_weight(tmpmask)) {
 268		rdt_last_cmd_puts("can only add CPUs to mongroup that belong to parent\n");
 269		return -EINVAL;
 270	}
 271
 272	/* Check whether cpus are dropped from this group */
 273	cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask);
 274	if (cpumask_weight(tmpmask)) {
 275		/* Give any dropped cpus to parent rdtgroup */
 276		cpumask_or(&prgrp->cpu_mask, &prgrp->cpu_mask, tmpmask);
 277		update_closid_rmid(tmpmask, prgrp);
 278	}
 279
 280	/*
 281	 * If we added cpus, remove them from previous group that owned them
 282	 * and update per-cpu rmid
 283	 */
 284	cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask);
 285	if (cpumask_weight(tmpmask)) {
 286		head = &prgrp->mon.crdtgrp_list;
 287		list_for_each_entry(crgrp, head, mon.crdtgrp_list) {
 288			if (crgrp == rdtgrp)
 289				continue;
 290			cpumask_andnot(&crgrp->cpu_mask, &crgrp->cpu_mask,
 291				       tmpmask);
 292		}
 293		update_closid_rmid(tmpmask, rdtgrp);
 294	}
 295
 296	/* Done pushing/pulling - update this group with new mask */
 297	cpumask_copy(&rdtgrp->cpu_mask, newmask);
 298
 299	return 0;
 300}
 301
 302static void cpumask_rdtgrp_clear(struct rdtgroup *r, struct cpumask *m)
 303{
 304	struct rdtgroup *crgrp;
 305
 306	cpumask_andnot(&r->cpu_mask, &r->cpu_mask, m);
 307	/* update the child mon group masks as well*/
 308	list_for_each_entry(crgrp, &r->mon.crdtgrp_list, mon.crdtgrp_list)
 309		cpumask_and(&crgrp->cpu_mask, &r->cpu_mask, &crgrp->cpu_mask);
 310}
 311
 312static int cpus_ctrl_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
 313			   cpumask_var_t tmpmask, cpumask_var_t tmpmask1)
 314{
 315	struct rdtgroup *r, *crgrp;
 316	struct list_head *head;
 317
 318	/* Check whether cpus are dropped from this group */
 319	cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask);
 320	if (cpumask_weight(tmpmask)) {
 321		/* Can't drop from default group */
 322		if (rdtgrp == &rdtgroup_default) {
 323			rdt_last_cmd_puts("Can't drop CPUs from default group\n");
 324			return -EINVAL;
 325		}
 326
 327		/* Give any dropped cpus to rdtgroup_default */
 328		cpumask_or(&rdtgroup_default.cpu_mask,
 329			   &rdtgroup_default.cpu_mask, tmpmask);
 330		update_closid_rmid(tmpmask, &rdtgroup_default);
 331	}
 332
 333	/*
 334	 * If we added cpus, remove them from previous group and
 335	 * the prev group's child groups that owned them
 336	 * and update per-cpu closid/rmid.
 337	 */
 338	cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask);
 339	if (cpumask_weight(tmpmask)) {
 340		list_for_each_entry(r, &rdt_all_groups, rdtgroup_list) {
 341			if (r == rdtgrp)
 342				continue;
 343			cpumask_and(tmpmask1, &r->cpu_mask, tmpmask);
 344			if (cpumask_weight(tmpmask1))
 345				cpumask_rdtgrp_clear(r, tmpmask1);
 346		}
 347		update_closid_rmid(tmpmask, rdtgrp);
 348	}
 349
 350	/* Done pushing/pulling - update this group with new mask */
 351	cpumask_copy(&rdtgrp->cpu_mask, newmask);
 352
 353	/*
 354	 * Clear child mon group masks since there is a new parent mask
 355	 * now and update the rmid for the cpus the child lost.
 356	 */
 357	head = &rdtgrp->mon.crdtgrp_list;
 358	list_for_each_entry(crgrp, head, mon.crdtgrp_list) {
 359		cpumask_and(tmpmask, &rdtgrp->cpu_mask, &crgrp->cpu_mask);
 360		update_closid_rmid(tmpmask, rdtgrp);
 361		cpumask_clear(&crgrp->cpu_mask);
 362	}
 363
 364	return 0;
 365}
 366
 367static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
 368				   char *buf, size_t nbytes, loff_t off)
 369{
 370	cpumask_var_t tmpmask, newmask, tmpmask1;
 371	struct rdtgroup *rdtgrp;
 372	int ret;
 373
 374	if (!buf)
 375		return -EINVAL;
 376
 377	if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
 378		return -ENOMEM;
 379	if (!zalloc_cpumask_var(&newmask, GFP_KERNEL)) {
 380		free_cpumask_var(tmpmask);
 381		return -ENOMEM;
 382	}
 383	if (!zalloc_cpumask_var(&tmpmask1, GFP_KERNEL)) {
 384		free_cpumask_var(tmpmask);
 385		free_cpumask_var(newmask);
 386		return -ENOMEM;
 387	}
 388
 389	rdtgrp = rdtgroup_kn_lock_live(of->kn);
 390	rdt_last_cmd_clear();
 391	if (!rdtgrp) {
 392		ret = -ENOENT;
 393		rdt_last_cmd_puts("directory was removed\n");
 394		goto unlock;
 395	}
 396
 397	if (is_cpu_list(of))
 398		ret = cpulist_parse(buf, newmask);
 399	else
 400		ret = cpumask_parse(buf, newmask);
 401
 402	if (ret) {
 403		rdt_last_cmd_puts("bad cpu list/mask\n");
 404		goto unlock;
 405	}
 406
 407	/* check that user didn't specify any offline cpus */
 408	cpumask_andnot(tmpmask, newmask, cpu_online_mask);
 409	if (cpumask_weight(tmpmask)) {
 410		ret = -EINVAL;
 411		rdt_last_cmd_puts("can only assign online cpus\n");
 412		goto unlock;
 413	}
 414
 415	if (rdtgrp->type == RDTCTRL_GROUP)
 416		ret = cpus_ctrl_write(rdtgrp, newmask, tmpmask, tmpmask1);
 417	else if (rdtgrp->type == RDTMON_GROUP)
 418		ret = cpus_mon_write(rdtgrp, newmask, tmpmask);
 419	else
 420		ret = -EINVAL;
 421
 422unlock:
 423	rdtgroup_kn_unlock(of->kn);
 424	free_cpumask_var(tmpmask);
 425	free_cpumask_var(newmask);
 426	free_cpumask_var(tmpmask1);
 427
 428	return ret ?: nbytes;
 429}
 430
 431struct task_move_callback {
 432	struct callback_head	work;
 433	struct rdtgroup		*rdtgrp;
 434};
 435
 436static void move_myself(struct callback_head *head)
 437{
 438	struct task_move_callback *callback;
 439	struct rdtgroup *rdtgrp;
 440
 441	callback = container_of(head, struct task_move_callback, work);
 442	rdtgrp = callback->rdtgrp;
 443
 444	/*
 445	 * If resource group was deleted before this task work callback
 446	 * was invoked, then assign the task to root group and free the
 447	 * resource group.
 448	 */
 449	if (atomic_dec_and_test(&rdtgrp->waitcount) &&
 450	    (rdtgrp->flags & RDT_DELETED)) {
 451		current->closid = 0;
 452		current->rmid = 0;
 453		kfree(rdtgrp);
 454	}
 455
 456	preempt_disable();
 457	/* update PQR_ASSOC MSR to make resource group go into effect */
 458	intel_rdt_sched_in();
 459	preempt_enable();
 460
 461	kfree(callback);
 462}
 463
 464static int __rdtgroup_move_task(struct task_struct *tsk,
 465				struct rdtgroup *rdtgrp)
 466{
 467	struct task_move_callback *callback;
 468	int ret;
 469
 470	callback = kzalloc(sizeof(*callback), GFP_KERNEL);
 471	if (!callback)
 472		return -ENOMEM;
 473	callback->work.func = move_myself;
 474	callback->rdtgrp = rdtgrp;
 475
 476	/*
 477	 * Take a refcount, so rdtgrp cannot be freed before the
 478	 * callback has been invoked.
 479	 */
 480	atomic_inc(&rdtgrp->waitcount);
 481	ret = task_work_add(tsk, &callback->work, true);
 482	if (ret) {
 483		/*
 484		 * Task is exiting. Drop the refcount and free the callback.
 485		 * No need to check the refcount as the group cannot be
 486		 * deleted before the write function unlocks rdtgroup_mutex.
 487		 */
 488		atomic_dec(&rdtgrp->waitcount);
 489		kfree(callback);
 490		rdt_last_cmd_puts("task exited\n");
 491	} else {
 492		/*
 493		 * For ctrl_mon groups move both closid and rmid.
 494		 * For monitor groups, can move the tasks only from
 495		 * their parent CTRL group.
 496		 */
 497		if (rdtgrp->type == RDTCTRL_GROUP) {
 498			tsk->closid = rdtgrp->closid;
 499			tsk->rmid = rdtgrp->mon.rmid;
 500		} else if (rdtgrp->type == RDTMON_GROUP) {
 501			if (rdtgrp->mon.parent->closid == tsk->closid) {
 502				tsk->rmid = rdtgrp->mon.rmid;
 503			} else {
 504				rdt_last_cmd_puts("Can't move task to different control group\n");
 505				ret = -EINVAL;
 506			}
 507		}
 508	}
 509	return ret;
 510}
 511
 512static int rdtgroup_task_write_permission(struct task_struct *task,
 513					  struct kernfs_open_file *of)
 514{
 515	const struct cred *tcred = get_task_cred(task);
 516	const struct cred *cred = current_cred();
 517	int ret = 0;
 518
 519	/*
 520	 * Even if we're attaching all tasks in the thread group, we only
 521	 * need to check permissions on one of them.
 522	 */
 523	if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
 524	    !uid_eq(cred->euid, tcred->uid) &&
 525	    !uid_eq(cred->euid, tcred->suid)) {
 526		rdt_last_cmd_printf("No permission to move task %d\n", task->pid);
 527		ret = -EPERM;
 528	}
 529
 530	put_cred(tcred);
 531	return ret;
 532}
 533
 534static int rdtgroup_move_task(pid_t pid, struct rdtgroup *rdtgrp,
 535			      struct kernfs_open_file *of)
 536{
 537	struct task_struct *tsk;
 538	int ret;
 539
 540	rcu_read_lock();
 541	if (pid) {
 542		tsk = find_task_by_vpid(pid);
 543		if (!tsk) {
 544			rcu_read_unlock();
 545			rdt_last_cmd_printf("No task %d\n", pid);
 546			return -ESRCH;
 547		}
 548	} else {
 549		tsk = current;
 550	}
 551
 552	get_task_struct(tsk);
 553	rcu_read_unlock();
 554
 555	ret = rdtgroup_task_write_permission(tsk, of);
 556	if (!ret)
 557		ret = __rdtgroup_move_task(tsk, rdtgrp);
 558
 559	put_task_struct(tsk);
 560	return ret;
 561}
 562
 563static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of,
 564				    char *buf, size_t nbytes, loff_t off)
 565{
 566	struct rdtgroup *rdtgrp;
 567	int ret = 0;
 568	pid_t pid;
 569
 570	if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
 571		return -EINVAL;
 572	rdtgrp = rdtgroup_kn_lock_live(of->kn);
 573	rdt_last_cmd_clear();
 574
 575	if (rdtgrp)
 576		ret = rdtgroup_move_task(pid, rdtgrp, of);
 577	else
 578		ret = -ENOENT;
 579
 580	rdtgroup_kn_unlock(of->kn);
 581
 582	return ret ?: nbytes;
 583}
 584
 585static void show_rdt_tasks(struct rdtgroup *r, struct seq_file *s)
 586{
 587	struct task_struct *p, *t;
 588
 589	rcu_read_lock();
 590	for_each_process_thread(p, t) {
 591		if ((r->type == RDTCTRL_GROUP && t->closid == r->closid) ||
 592		    (r->type == RDTMON_GROUP && t->rmid == r->mon.rmid))
 593			seq_printf(s, "%d\n", t->pid);
 594	}
 595	rcu_read_unlock();
 596}
 597
 598static int rdtgroup_tasks_show(struct kernfs_open_file *of,
 599			       struct seq_file *s, void *v)
 600{
 601	struct rdtgroup *rdtgrp;
 602	int ret = 0;
 603
 604	rdtgrp = rdtgroup_kn_lock_live(of->kn);
 605	if (rdtgrp)
 606		show_rdt_tasks(rdtgrp, s);
 607	else
 608		ret = -ENOENT;
 609	rdtgroup_kn_unlock(of->kn);
 610
 611	return ret;
 612}
 613
 614static int rdt_last_cmd_status_show(struct kernfs_open_file *of,
 615				    struct seq_file *seq, void *v)
 616{
 617	int len;
 618
 619	mutex_lock(&rdtgroup_mutex);
 620	len = seq_buf_used(&last_cmd_status);
 621	if (len)
 622		seq_printf(seq, "%.*s", len, last_cmd_status_buf);
 623	else
 624		seq_puts(seq, "ok\n");
 625	mutex_unlock(&rdtgroup_mutex);
 626	return 0;
 627}
 628
 629static int rdt_num_closids_show(struct kernfs_open_file *of,
 630				struct seq_file *seq, void *v)
 631{
 632	struct rdt_resource *r = of->kn->parent->priv;
 633
 634	seq_printf(seq, "%d\n", r->num_closid);
 635	return 0;
 636}
 637
 638static int rdt_default_ctrl_show(struct kernfs_open_file *of,
 639			     struct seq_file *seq, void *v)
 640{
 641	struct rdt_resource *r = of->kn->parent->priv;
 642
 643	seq_printf(seq, "%x\n", r->default_ctrl);
 644	return 0;
 645}
 646
 647static int rdt_min_cbm_bits_show(struct kernfs_open_file *of,
 648			     struct seq_file *seq, void *v)
 649{
 650	struct rdt_resource *r = of->kn->parent->priv;
 651
 652	seq_printf(seq, "%u\n", r->cache.min_cbm_bits);
 653	return 0;
 654}
 655
 656static int rdt_shareable_bits_show(struct kernfs_open_file *of,
 657				   struct seq_file *seq, void *v)
 658{
 659	struct rdt_resource *r = of->kn->parent->priv;
 660
 661	seq_printf(seq, "%x\n", r->cache.shareable_bits);
 662	return 0;
 663}
 664
 665static int rdt_min_bw_show(struct kernfs_open_file *of,
 666			     struct seq_file *seq, void *v)
 667{
 668	struct rdt_resource *r = of->kn->parent->priv;
 669
 670	seq_printf(seq, "%u\n", r->membw.min_bw);
 671	return 0;
 672}
 673
 674static int rdt_num_rmids_show(struct kernfs_open_file *of,
 675			      struct seq_file *seq, void *v)
 676{
 677	struct rdt_resource *r = of->kn->parent->priv;
 678
 679	seq_printf(seq, "%d\n", r->num_rmid);
 680
 681	return 0;
 682}
 683
 684static int rdt_mon_features_show(struct kernfs_open_file *of,
 685				 struct seq_file *seq, void *v)
 686{
 687	struct rdt_resource *r = of->kn->parent->priv;
 688	struct mon_evt *mevt;
 689
 690	list_for_each_entry(mevt, &r->evt_list, list)
 691		seq_printf(seq, "%s\n", mevt->name);
 692
 693	return 0;
 694}
 695
 696static int rdt_bw_gran_show(struct kernfs_open_file *of,
 697			     struct seq_file *seq, void *v)
 698{
 699	struct rdt_resource *r = of->kn->parent->priv;
 700
 701	seq_printf(seq, "%u\n", r->membw.bw_gran);
 702	return 0;
 703}
 704
 705static int rdt_delay_linear_show(struct kernfs_open_file *of,
 706			     struct seq_file *seq, void *v)
 707{
 708	struct rdt_resource *r = of->kn->parent->priv;
 709
 710	seq_printf(seq, "%u\n", r->membw.delay_linear);
 711	return 0;
 712}
 713
 714static int max_threshold_occ_show(struct kernfs_open_file *of,
 715				  struct seq_file *seq, void *v)
 716{
 717	struct rdt_resource *r = of->kn->parent->priv;
 718
 719	seq_printf(seq, "%u\n", intel_cqm_threshold * r->mon_scale);
 720
 721	return 0;
 722}
 723
 724static ssize_t max_threshold_occ_write(struct kernfs_open_file *of,
 725				       char *buf, size_t nbytes, loff_t off)
 726{
 727	struct rdt_resource *r = of->kn->parent->priv;
 728	unsigned int bytes;
 729	int ret;
 730
 731	ret = kstrtouint(buf, 0, &bytes);
 732	if (ret)
 733		return ret;
 734
 735	if (bytes > (boot_cpu_data.x86_cache_size * 1024))
 736		return -EINVAL;
 737
 738	intel_cqm_threshold = bytes / r->mon_scale;
 739
 740	return nbytes;
 741}
 742
 743/* rdtgroup information files for one cache resource. */
 744static struct rftype res_common_files[] = {
 745	{
 746		.name		= "last_cmd_status",
 747		.mode		= 0444,
 748		.kf_ops		= &rdtgroup_kf_single_ops,
 749		.seq_show	= rdt_last_cmd_status_show,
 750		.fflags		= RF_TOP_INFO,
 751	},
 752	{
 753		.name		= "num_closids",
 754		.mode		= 0444,
 755		.kf_ops		= &rdtgroup_kf_single_ops,
 756		.seq_show	= rdt_num_closids_show,
 757		.fflags		= RF_CTRL_INFO,
 758	},
 759	{
 760		.name		= "mon_features",
 761		.mode		= 0444,
 762		.kf_ops		= &rdtgroup_kf_single_ops,
 763		.seq_show	= rdt_mon_features_show,
 764		.fflags		= RF_MON_INFO,
 765	},
 766	{
 767		.name		= "num_rmids",
 768		.mode		= 0444,
 769		.kf_ops		= &rdtgroup_kf_single_ops,
 770		.seq_show	= rdt_num_rmids_show,
 771		.fflags		= RF_MON_INFO,
 772	},
 773	{
 774		.name		= "cbm_mask",
 775		.mode		= 0444,
 776		.kf_ops		= &rdtgroup_kf_single_ops,
 777		.seq_show	= rdt_default_ctrl_show,
 778		.fflags		= RF_CTRL_INFO | RFTYPE_RES_CACHE,
 779	},
 780	{
 781		.name		= "min_cbm_bits",
 782		.mode		= 0444,
 783		.kf_ops		= &rdtgroup_kf_single_ops,
 784		.seq_show	= rdt_min_cbm_bits_show,
 785		.fflags		= RF_CTRL_INFO | RFTYPE_RES_CACHE,
 786	},
 787	{
 788		.name		= "shareable_bits",
 789		.mode		= 0444,
 790		.kf_ops		= &rdtgroup_kf_single_ops,
 791		.seq_show	= rdt_shareable_bits_show,
 792		.fflags		= RF_CTRL_INFO | RFTYPE_RES_CACHE,
 793	},
 794	{
 795		.name		= "min_bandwidth",
 796		.mode		= 0444,
 797		.kf_ops		= &rdtgroup_kf_single_ops,
 798		.seq_show	= rdt_min_bw_show,
 799		.fflags		= RF_CTRL_INFO | RFTYPE_RES_MB,
 800	},
 801	{
 802		.name		= "bandwidth_gran",
 803		.mode		= 0444,
 804		.kf_ops		= &rdtgroup_kf_single_ops,
 805		.seq_show	= rdt_bw_gran_show,
 806		.fflags		= RF_CTRL_INFO | RFTYPE_RES_MB,
 807	},
 808	{
 809		.name		= "delay_linear",
 810		.mode		= 0444,
 811		.kf_ops		= &rdtgroup_kf_single_ops,
 812		.seq_show	= rdt_delay_linear_show,
 813		.fflags		= RF_CTRL_INFO | RFTYPE_RES_MB,
 814	},
 815	{
 816		.name		= "max_threshold_occupancy",
 817		.mode		= 0644,
 818		.kf_ops		= &rdtgroup_kf_single_ops,
 819		.write		= max_threshold_occ_write,
 820		.seq_show	= max_threshold_occ_show,
 821		.fflags		= RF_MON_INFO | RFTYPE_RES_CACHE,
 822	},
 823	{
 824		.name		= "cpus",
 825		.mode		= 0644,
 826		.kf_ops		= &rdtgroup_kf_single_ops,
 827		.write		= rdtgroup_cpus_write,
 828		.seq_show	= rdtgroup_cpus_show,
 829		.fflags		= RFTYPE_BASE,
 830	},
 831	{
 832		.name		= "cpus_list",
 833		.mode		= 0644,
 834		.kf_ops		= &rdtgroup_kf_single_ops,
 835		.write		= rdtgroup_cpus_write,
 836		.seq_show	= rdtgroup_cpus_show,
 837		.flags		= RFTYPE_FLAGS_CPUS_LIST,
 838		.fflags		= RFTYPE_BASE,
 839	},
 840	{
 841		.name		= "tasks",
 842		.mode		= 0644,
 843		.kf_ops		= &rdtgroup_kf_single_ops,
 844		.write		= rdtgroup_tasks_write,
 845		.seq_show	= rdtgroup_tasks_show,
 846		.fflags		= RFTYPE_BASE,
 847	},
 848	{
 849		.name		= "schemata",
 850		.mode		= 0644,
 851		.kf_ops		= &rdtgroup_kf_single_ops,
 852		.write		= rdtgroup_schemata_write,
 853		.seq_show	= rdtgroup_schemata_show,
 854		.fflags		= RF_CTRL_BASE,
 855	},
 856};
 857
 858static int rdtgroup_add_files(struct kernfs_node *kn, unsigned long fflags)
 859{
 860	struct rftype *rfts, *rft;
 861	int ret, len;
 862
 863	rfts = res_common_files;
 864	len = ARRAY_SIZE(res_common_files);
 865
 866	lockdep_assert_held(&rdtgroup_mutex);
 867
 868	for (rft = rfts; rft < rfts + len; rft++) {
 869		if ((fflags & rft->fflags) == rft->fflags) {
 870			ret = rdtgroup_add_file(kn, rft);
 871			if (ret)
 872				goto error;
 873		}
 874	}
 875
 876	return 0;
 877error:
 878	pr_warn("Failed to add %s, err=%d\n", rft->name, ret);
 879	while (--rft >= rfts) {
 880		if ((fflags & rft->fflags) == rft->fflags)
 881			kernfs_remove_by_name(kn, rft->name);
 882	}
 883	return ret;
 884}
 885
 886static int rdtgroup_mkdir_info_resdir(struct rdt_resource *r, char *name,
 887				      unsigned long fflags)
 888{
 889	struct kernfs_node *kn_subdir;
 890	int ret;
 891
 892	kn_subdir = kernfs_create_dir(kn_info, name,
 893				      kn_info->mode, r);
 894	if (IS_ERR(kn_subdir))
 895		return PTR_ERR(kn_subdir);
 896
 897	kernfs_get(kn_subdir);
 898	ret = rdtgroup_kn_set_ugid(kn_subdir);
 899	if (ret)
 900		return ret;
 901
 902	ret = rdtgroup_add_files(kn_subdir, fflags);
 903	if (!ret)
 904		kernfs_activate(kn_subdir);
 905
 906	return ret;
 907}
 908
 909static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn)
 910{
 911	struct rdt_resource *r;
 912	unsigned long fflags;
 913	char name[32];
 914	int ret;
 915
 916	/* create the directory */
 917	kn_info = kernfs_create_dir(parent_kn, "info", parent_kn->mode, NULL);
 918	if (IS_ERR(kn_info))
 919		return PTR_ERR(kn_info);
 920	kernfs_get(kn_info);
 921
 922	ret = rdtgroup_add_files(kn_info, RF_TOP_INFO);
 923	if (ret)
 924		goto out_destroy;
 925
 926	for_each_alloc_enabled_rdt_resource(r) {
 927		fflags =  r->fflags | RF_CTRL_INFO;
 928		ret = rdtgroup_mkdir_info_resdir(r, r->name, fflags);
 929		if (ret)
 930			goto out_destroy;
 931	}
 932
 933	for_each_mon_enabled_rdt_resource(r) {
 934		fflags =  r->fflags | RF_MON_INFO;
 935		sprintf(name, "%s_MON", r->name);
 936		ret = rdtgroup_mkdir_info_resdir(r, name, fflags);
 937		if (ret)
 938			goto out_destroy;
 939	}
 940
 941	/*
 942	 * This extra ref will be put in kernfs_remove() and guarantees
 943	 * that @rdtgrp->kn is always accessible.
 944	 */
 945	kernfs_get(kn_info);
 946
 947	ret = rdtgroup_kn_set_ugid(kn_info);
 948	if (ret)
 949		goto out_destroy;
 950
 951	kernfs_activate(kn_info);
 952
 953	return 0;
 954
 955out_destroy:
 956	kernfs_remove(kn_info);
 957	return ret;
 958}
 959
 960static int
 961mongroup_create_dir(struct kernfs_node *parent_kn, struct rdtgroup *prgrp,
 962		    char *name, struct kernfs_node **dest_kn)
 963{
 964	struct kernfs_node *kn;
 965	int ret;
 966
 967	/* create the directory */
 968	kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp);
 969	if (IS_ERR(kn))
 970		return PTR_ERR(kn);
 971
 972	if (dest_kn)
 973		*dest_kn = kn;
 974
 975	/*
 976	 * This extra ref will be put in kernfs_remove() and guarantees
 977	 * that @rdtgrp->kn is always accessible.
 978	 */
 979	kernfs_get(kn);
 980
 981	ret = rdtgroup_kn_set_ugid(kn);
 982	if (ret)
 983		goto out_destroy;
 984
 985	kernfs_activate(kn);
 986
 987	return 0;
 988
 989out_destroy:
 990	kernfs_remove(kn);
 991	return ret;
 992}
 993
 994static void l3_qos_cfg_update(void *arg)
 995{
 996	bool *enable = arg;
 997
 998	wrmsrl(IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL);
 999}
1000
1001static void l2_qos_cfg_update(void *arg)
1002{
1003	bool *enable = arg;
1004
1005	wrmsrl(IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL);
1006}
1007
1008static int set_cache_qos_cfg(int level, bool enable)
1009{
1010	void (*update)(void *arg);
1011	struct rdt_resource *r_l;
1012	cpumask_var_t cpu_mask;
1013	struct rdt_domain *d;
1014	int cpu;
1015
1016	if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
1017		return -ENOMEM;
1018
1019	if (level == RDT_RESOURCE_L3)
1020		update = l3_qos_cfg_update;
1021	else if (level == RDT_RESOURCE_L2)
1022		update = l2_qos_cfg_update;
1023	else
1024		return -EINVAL;
1025
1026	r_l = &rdt_resources_all[level];
1027	list_for_each_entry(d, &r_l->domains, list) {
1028		/* Pick one CPU from each domain instance to update MSR */
1029		cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
1030	}
1031	cpu = get_cpu();
1032	/* Update QOS_CFG MSR on this cpu if it's in cpu_mask. */
1033	if (cpumask_test_cpu(cpu, cpu_mask))
1034		update(&enable);
1035	/* Update QOS_CFG MSR on all other cpus in cpu_mask. */
1036	smp_call_function_many(cpu_mask, update, &enable, 1);
1037	put_cpu();
1038
1039	free_cpumask_var(cpu_mask);
1040
1041	return 0;
1042}
1043
1044static int cdp_enable(int level, int data_type, int code_type)
1045{
1046	struct rdt_resource *r_ldata = &rdt_resources_all[data_type];
1047	struct rdt_resource *r_lcode = &rdt_resources_all[code_type];
1048	struct rdt_resource *r_l = &rdt_resources_all[level];
1049	int ret;
1050
1051	if (!r_l->alloc_capable || !r_ldata->alloc_capable ||
1052	    !r_lcode->alloc_capable)
1053		return -EINVAL;
1054
1055	ret = set_cache_qos_cfg(level, true);
1056	if (!ret) {
1057		r_l->alloc_enabled = false;
1058		r_ldata->alloc_enabled = true;
1059		r_lcode->alloc_enabled = true;
1060	}
1061	return ret;
1062}
1063
1064static int cdpl3_enable(void)
1065{
1066	return cdp_enable(RDT_RESOURCE_L3, RDT_RESOURCE_L3DATA,
1067			  RDT_RESOURCE_L3CODE);
1068}
1069
1070static int cdpl2_enable(void)
1071{
1072	return cdp_enable(RDT_RESOURCE_L2, RDT_RESOURCE_L2DATA,
1073			  RDT_RESOURCE_L2CODE);
1074}
1075
1076static void cdp_disable(int level, int data_type, int code_type)
1077{
1078	struct rdt_resource *r = &rdt_resources_all[level];
1079
1080	r->alloc_enabled = r->alloc_capable;
1081
1082	if (rdt_resources_all[data_type].alloc_enabled) {
1083		rdt_resources_all[data_type].alloc_enabled = false;
1084		rdt_resources_all[code_type].alloc_enabled = false;
1085		set_cache_qos_cfg(level, false);
1086	}
1087}
1088
1089static void cdpl3_disable(void)
1090{
1091	cdp_disable(RDT_RESOURCE_L3, RDT_RESOURCE_L3DATA, RDT_RESOURCE_L3CODE);
1092}
1093
1094static void cdpl2_disable(void)
1095{
1096	cdp_disable(RDT_RESOURCE_L2, RDT_RESOURCE_L2DATA, RDT_RESOURCE_L2CODE);
1097}
1098
1099static void cdp_disable_all(void)
1100{
1101	if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled)
1102		cdpl3_disable();
1103	if (rdt_resources_all[RDT_RESOURCE_L2DATA].alloc_enabled)
1104		cdpl2_disable();
1105}
1106
1107static int parse_rdtgroupfs_options(char *data)
1108{
1109	char *token, *o = data;
1110	int ret = 0;
1111
1112	while ((token = strsep(&o, ",")) != NULL) {
1113		if (!*token) {
1114			ret = -EINVAL;
1115			goto out;
1116		}
1117
1118		if (!strcmp(token, "cdp")) {
1119			ret = cdpl3_enable();
1120			if (ret)
1121				goto out;
1122		} else if (!strcmp(token, "cdpl2")) {
1123			ret = cdpl2_enable();
1124			if (ret)
1125				goto out;
1126		} else {
1127			ret = -EINVAL;
1128			goto out;
1129		}
1130	}
1131
1132	return 0;
1133
1134out:
1135	pr_err("Invalid mount option \"%s\"\n", token);
1136
1137	return ret;
1138}
1139
1140/*
1141 * We don't allow rdtgroup directories to be created anywhere
1142 * except the root directory. Thus when looking for the rdtgroup
1143 * structure for a kernfs node we are either looking at a directory,
1144 * in which case the rdtgroup structure is pointed at by the "priv"
1145 * field, otherwise we have a file, and need only look to the parent
1146 * to find the rdtgroup.
1147 */
1148static struct rdtgroup *kernfs_to_rdtgroup(struct kernfs_node *kn)
1149{
1150	if (kernfs_type(kn) == KERNFS_DIR) {
1151		/*
1152		 * All the resource directories use "kn->priv"
1153		 * to point to the "struct rdtgroup" for the
1154		 * resource. "info" and its subdirectories don't
1155		 * have rdtgroup structures, so return NULL here.
1156		 */
1157		if (kn == kn_info || kn->parent == kn_info)
1158			return NULL;
1159		else
1160			return kn->priv;
1161	} else {
1162		return kn->parent->priv;
1163	}
1164}
1165
1166struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn)
1167{
1168	struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn);
1169
1170	if (!rdtgrp)
1171		return NULL;
1172
1173	atomic_inc(&rdtgrp->waitcount);
1174	kernfs_break_active_protection(kn);
1175
1176	mutex_lock(&rdtgroup_mutex);
1177
1178	/* Was this group deleted while we waited? */
1179	if (rdtgrp->flags & RDT_DELETED)
1180		return NULL;
1181
1182	return rdtgrp;
1183}
1184
1185void rdtgroup_kn_unlock(struct kernfs_node *kn)
1186{
1187	struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn);
1188
1189	if (!rdtgrp)
1190		return;
1191
1192	mutex_unlock(&rdtgroup_mutex);
1193
1194	if (atomic_dec_and_test(&rdtgrp->waitcount) &&
1195	    (rdtgrp->flags & RDT_DELETED)) {
1196		kernfs_unbreak_active_protection(kn);
1197		kernfs_put(rdtgrp->kn);
1198		kfree(rdtgrp);
1199	} else {
1200		kernfs_unbreak_active_protection(kn);
1201	}
1202}
1203
1204static int mkdir_mondata_all(struct kernfs_node *parent_kn,
1205			     struct rdtgroup *prgrp,
1206			     struct kernfs_node **mon_data_kn);
1207
1208static struct dentry *rdt_mount(struct file_system_type *fs_type,
1209				int flags, const char *unused_dev_name,
1210				void *data)
1211{
1212	struct rdt_domain *dom;
1213	struct rdt_resource *r;
1214	struct dentry *dentry;
1215	int ret;
1216
1217	cpus_read_lock();
1218	mutex_lock(&rdtgroup_mutex);
1219	/*
1220	 * resctrl file system can only be mounted once.
1221	 */
1222	if (static_branch_unlikely(&rdt_enable_key)) {
1223		dentry = ERR_PTR(-EBUSY);
1224		goto out;
1225	}
1226
1227	ret = parse_rdtgroupfs_options(data);
1228	if (ret) {
1229		dentry = ERR_PTR(ret);
1230		goto out_cdp;
1231	}
1232
1233	closid_init();
1234
1235	ret = rdtgroup_create_info_dir(rdtgroup_default.kn);
1236	if (ret) {
1237		dentry = ERR_PTR(ret);
1238		goto out_cdp;
1239	}
1240
1241	if (rdt_mon_capable) {
1242		ret = mongroup_create_dir(rdtgroup_default.kn,
1243					  NULL, "mon_groups",
1244					  &kn_mongrp);
1245		if (ret) {
1246			dentry = ERR_PTR(ret);
1247			goto out_info;
1248		}
1249		kernfs_get(kn_mongrp);
1250
1251		ret = mkdir_mondata_all(rdtgroup_default.kn,
1252					&rdtgroup_default, &kn_mondata);
1253		if (ret) {
1254			dentry = ERR_PTR(ret);
1255			goto out_mongrp;
1256		}
1257		kernfs_get(kn_mondata);
1258		rdtgroup_default.mon.mon_data_kn = kn_mondata;
1259	}
1260
1261	dentry = kernfs_mount(fs_type, flags, rdt_root,
1262			      RDTGROUP_SUPER_MAGIC, NULL);
1263	if (IS_ERR(dentry))
1264		goto out_mondata;
1265
1266	if (rdt_alloc_capable)
1267		static_branch_enable_cpuslocked(&rdt_alloc_enable_key);
1268	if (rdt_mon_capable)
1269		static_branch_enable_cpuslocked(&rdt_mon_enable_key);
1270
1271	if (rdt_alloc_capable || rdt_mon_capable)
1272		static_branch_enable_cpuslocked(&rdt_enable_key);
1273
1274	if (is_mbm_enabled()) {
1275		r = &rdt_resources_all[RDT_RESOURCE_L3];
1276		list_for_each_entry(dom, &r->domains, list)
1277			mbm_setup_overflow_handler(dom, MBM_OVERFLOW_INTERVAL);
1278	}
1279
1280	goto out;
1281
1282out_mondata:
1283	if (rdt_mon_capable)
1284		kernfs_remove(kn_mondata);
1285out_mongrp:
1286	if (rdt_mon_capable)
1287		kernfs_remove(kn_mongrp);
1288out_info:
1289	kernfs_remove(kn_info);
1290out_cdp:
1291	cdp_disable_all();
1292out:
1293	rdt_last_cmd_clear();
1294	mutex_unlock(&rdtgroup_mutex);
1295	cpus_read_unlock();
1296
1297	return dentry;
1298}
1299
1300static int reset_all_ctrls(struct rdt_resource *r)
1301{
1302	struct msr_param msr_param;
1303	cpumask_var_t cpu_mask;
1304	struct rdt_domain *d;
1305	int i, cpu;
1306
1307	if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
1308		return -ENOMEM;
1309
1310	msr_param.res = r;
1311	msr_param.low = 0;
1312	msr_param.high = r->num_closid;
1313
1314	/*
1315	 * Disable resource control for this resource by setting all
1316	 * CBMs in all domains to the maximum mask value. Pick one CPU
1317	 * from each domain to update the MSRs below.
1318	 */
1319	list_for_each_entry(d, &r->domains, list) {
1320		cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
1321
1322		for (i = 0; i < r->num_closid; i++)
1323			d->ctrl_val[i] = r->default_ctrl;
1324	}
1325	cpu = get_cpu();
1326	/* Update CBM on this cpu if it's in cpu_mask. */
1327	if (cpumask_test_cpu(cpu, cpu_mask))
1328		rdt_ctrl_update(&msr_param);
1329	/* Update CBM on all other cpus in cpu_mask. */
1330	smp_call_function_many(cpu_mask, rdt_ctrl_update, &msr_param, 1);
1331	put_cpu();
1332
1333	free_cpumask_var(cpu_mask);
1334
1335	return 0;
1336}
1337
1338static bool is_closid_match(struct task_struct *t, struct rdtgroup *r)
1339{
1340	return (rdt_alloc_capable &&
1341		(r->type == RDTCTRL_GROUP) && (t->closid == r->closid));
1342}
1343
1344static bool is_rmid_match(struct task_struct *t, struct rdtgroup *r)
1345{
1346	return (rdt_mon_capable &&
1347		(r->type == RDTMON_GROUP) && (t->rmid == r->mon.rmid));
1348}
1349
1350/*
1351 * Move tasks from one to the other group. If @from is NULL, then all tasks
1352 * in the systems are moved unconditionally (used for teardown).
1353 *
1354 * If @mask is not NULL the cpus on which moved tasks are running are set
1355 * in that mask so the update smp function call is restricted to affected
1356 * cpus.
1357 */
1358static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to,
1359				 struct cpumask *mask)
1360{
1361	struct task_struct *p, *t;
1362
1363	read_lock(&tasklist_lock);
1364	for_each_process_thread(p, t) {
1365		if (!from || is_closid_match(t, from) ||
1366		    is_rmid_match(t, from)) {
1367			t->closid = to->closid;
1368			t->rmid = to->mon.rmid;
1369
1370#ifdef CONFIG_SMP
1371			/*
1372			 * This is safe on x86 w/o barriers as the ordering
1373			 * of writing to task_cpu() and t->on_cpu is
1374			 * reverse to the reading here. The detection is
1375			 * inaccurate as tasks might move or schedule
1376			 * before the smp function call takes place. In
1377			 * such a case the function call is pointless, but
1378			 * there is no other side effect.
1379			 */
1380			if (mask && t->on_cpu)
1381				cpumask_set_cpu(task_cpu(t), mask);
1382#endif
1383		}
1384	}
1385	read_unlock(&tasklist_lock);
1386}
1387
1388static void free_all_child_rdtgrp(struct rdtgroup *rdtgrp)
1389{
1390	struct rdtgroup *sentry, *stmp;
1391	struct list_head *head;
1392
1393	head = &rdtgrp->mon.crdtgrp_list;
1394	list_for_each_entry_safe(sentry, stmp, head, mon.crdtgrp_list) {
1395		free_rmid(sentry->mon.rmid);
1396		list_del(&sentry->mon.crdtgrp_list);
1397		kfree(sentry);
1398	}
1399}
1400
1401/*
1402 * Forcibly remove all of subdirectories under root.
1403 */
1404static void rmdir_all_sub(void)
1405{
1406	struct rdtgroup *rdtgrp, *tmp;
1407
1408	/* Move all tasks to the default resource group */
1409	rdt_move_group_tasks(NULL, &rdtgroup_default, NULL);
1410
1411	list_for_each_entry_safe(rdtgrp, tmp, &rdt_all_groups, rdtgroup_list) {
1412		/* Free any child rmids */
1413		free_all_child_rdtgrp(rdtgrp);
1414
1415		/* Remove each rdtgroup other than root */
1416		if (rdtgrp == &rdtgroup_default)
1417			continue;
1418
1419		/*
1420		 * Give any CPUs back to the default group. We cannot copy
1421		 * cpu_online_mask because a CPU might have executed the
1422		 * offline callback already, but is still marked online.
1423		 */
1424		cpumask_or(&rdtgroup_default.cpu_mask,
1425			   &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask);
1426
1427		free_rmid(rdtgrp->mon.rmid);
1428
1429		kernfs_remove(rdtgrp->kn);
1430		list_del(&rdtgrp->rdtgroup_list);
1431		kfree(rdtgrp);
1432	}
1433	/* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */
1434	update_closid_rmid(cpu_online_mask, &rdtgroup_default);
1435
1436	kernfs_remove(kn_info);
1437	kernfs_remove(kn_mongrp);
1438	kernfs_remove(kn_mondata);
1439}
1440
1441static void rdt_kill_sb(struct super_block *sb)
1442{
1443	struct rdt_resource *r;
1444
1445	cpus_read_lock();
1446	mutex_lock(&rdtgroup_mutex);
1447
1448	/*Put everything back to default values. */
1449	for_each_alloc_enabled_rdt_resource(r)
1450		reset_all_ctrls(r);
1451	cdp_disable_all();
1452	rmdir_all_sub();
1453	static_branch_disable_cpuslocked(&rdt_alloc_enable_key);
1454	static_branch_disable_cpuslocked(&rdt_mon_enable_key);
1455	static_branch_disable_cpuslocked(&rdt_enable_key);
1456	kernfs_kill_sb(sb);
1457	mutex_unlock(&rdtgroup_mutex);
1458	cpus_read_unlock();
1459}
1460
1461static struct file_system_type rdt_fs_type = {
1462	.name    = "resctrl",
1463	.mount   = rdt_mount,
1464	.kill_sb = rdt_kill_sb,
1465};
1466
1467static int mon_addfile(struct kernfs_node *parent_kn, const char *name,
1468		       void *priv)
1469{
1470	struct kernfs_node *kn;
1471	int ret = 0;
1472
1473	kn = __kernfs_create_file(parent_kn, name, 0444, 0,
1474				  &kf_mondata_ops, priv, NULL, NULL);
1475	if (IS_ERR(kn))
1476		return PTR_ERR(kn);
1477
1478	ret = rdtgroup_kn_set_ugid(kn);
1479	if (ret) {
1480		kernfs_remove(kn);
1481		return ret;
1482	}
1483
1484	return ret;
1485}
1486
1487/*
1488 * Remove all subdirectories of mon_data of ctrl_mon groups
1489 * and monitor groups with given domain id.
1490 */
1491void rmdir_mondata_subdir_allrdtgrp(struct rdt_resource *r, unsigned int dom_id)
1492{
1493	struct rdtgroup *prgrp, *crgrp;
1494	char name[32];
1495
1496	if (!r->mon_enabled)
1497		return;
1498
1499	list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
1500		sprintf(name, "mon_%s_%02d", r->name, dom_id);
1501		kernfs_remove_by_name(prgrp->mon.mon_data_kn, name);
1502
1503		list_for_each_entry(crgrp, &prgrp->mon.crdtgrp_list, mon.crdtgrp_list)
1504			kernfs_remove_by_name(crgrp->mon.mon_data_kn, name);
1505	}
1506}
1507
1508static int mkdir_mondata_subdir(struct kernfs_node *parent_kn,
1509				struct rdt_domain *d,
1510				struct rdt_resource *r, struct rdtgroup *prgrp)
1511{
1512	union mon_data_bits priv;
1513	struct kernfs_node *kn;
1514	struct mon_evt *mevt;
1515	struct rmid_read rr;
1516	char name[32];
1517	int ret;
1518
1519	sprintf(name, "mon_%s_%02d", r->name, d->id);
1520	/* create the directory */
1521	kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp);
1522	if (IS_ERR(kn))
1523		return PTR_ERR(kn);
1524
1525	/*
1526	 * This extra ref will be put in kernfs_remove() and guarantees
1527	 * that kn is always accessible.
1528	 */
1529	kernfs_get(kn);
1530	ret = rdtgroup_kn_set_ugid(kn);
1531	if (ret)
1532		goto out_destroy;
1533
1534	if (WARN_ON(list_empty(&r->evt_list))) {
1535		ret = -EPERM;
1536		goto out_destroy;
1537	}
1538
1539	priv.u.rid = r->rid;
1540	priv.u.domid = d->id;
1541	list_for_each_entry(mevt, &r->evt_list, list) {
1542		priv.u.evtid = mevt->evtid;
1543		ret = mon_addfile(kn, mevt->name, priv.priv);
1544		if (ret)
1545			goto out_destroy;
1546
1547		if (is_mbm_event(mevt->evtid))
1548			mon_event_read(&rr, d, prgrp, mevt->evtid, true);
1549	}
1550	kernfs_activate(kn);
1551	return 0;
1552
1553out_destroy:
1554	kernfs_remove(kn);
1555	return ret;
1556}
1557
1558/*
1559 * Add all subdirectories of mon_data for "ctrl_mon" groups
1560 * and "monitor" groups with given domain id.
1561 */
1562void mkdir_mondata_subdir_allrdtgrp(struct rdt_resource *r,
1563				    struct rdt_domain *d)
1564{
1565	struct kernfs_node *parent_kn;
1566	struct rdtgroup *prgrp, *crgrp;
1567	struct list_head *head;
1568
1569	if (!r->mon_enabled)
1570		return;
1571
1572	list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
1573		parent_kn = prgrp->mon.mon_data_kn;
1574		mkdir_mondata_subdir(parent_kn, d, r, prgrp);
1575
1576		head = &prgrp->mon.crdtgrp_list;
1577		list_for_each_entry(crgrp, head, mon.crdtgrp_list) {
1578			parent_kn = crgrp->mon.mon_data_kn;
1579			mkdir_mondata_subdir(parent_kn, d, r, crgrp);
1580		}
1581	}
1582}
1583
1584static int mkdir_mondata_subdir_alldom(struct kernfs_node *parent_kn,
1585				       struct rdt_resource *r,
1586				       struct rdtgroup *prgrp)
1587{
1588	struct rdt_domain *dom;
1589	int ret;
1590
1591	list_for_each_entry(dom, &r->domains, list) {
1592		ret = mkdir_mondata_subdir(parent_kn, dom, r, prgrp);
1593		if (ret)
1594			return ret;
1595	}
1596
1597	return 0;
1598}
1599
1600/*
1601 * This creates a directory mon_data which contains the monitored data.
1602 *
1603 * mon_data has one directory for each domain whic are named
1604 * in the format mon_<domain_name>_<domain_id>. For ex: A mon_data
1605 * with L3 domain looks as below:
1606 * ./mon_data:
1607 * mon_L3_00
1608 * mon_L3_01
1609 * mon_L3_02
1610 * ...
1611 *
1612 * Each domain directory has one file per event:
1613 * ./mon_L3_00/:
1614 * llc_occupancy
1615 *
1616 */
1617static int mkdir_mondata_all(struct kernfs_node *parent_kn,
1618			     struct rdtgroup *prgrp,
1619			     struct kernfs_node **dest_kn)
1620{
1621	struct rdt_resource *r;
1622	struct kernfs_node *kn;
1623	int ret;
1624
1625	/*
1626	 * Create the mon_data directory first.
1627	 */
1628	ret = mongroup_create_dir(parent_kn, NULL, "mon_data", &kn);
1629	if (ret)
1630		return ret;
1631
1632	if (dest_kn)
1633		*dest_kn = kn;
1634
1635	/*
1636	 * Create the subdirectories for each domain. Note that all events
1637	 * in a domain like L3 are grouped into a resource whose domain is L3
1638	 */
1639	for_each_mon_enabled_rdt_resource(r) {
1640		ret = mkdir_mondata_subdir_alldom(kn, r, prgrp);
1641		if (ret)
1642			goto out_destroy;
1643	}
1644
1645	return 0;
1646
1647out_destroy:
1648	kernfs_remove(kn);
1649	return ret;
1650}
1651
1652static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
1653			     struct kernfs_node *prgrp_kn,
1654			     const char *name, umode_t mode,
1655			     enum rdt_group_type rtype, struct rdtgroup **r)
1656{
1657	struct rdtgroup *prdtgrp, *rdtgrp;
1658	struct kernfs_node *kn;
1659	uint files = 0;
1660	int ret;
1661
1662	prdtgrp = rdtgroup_kn_lock_live(prgrp_kn);
1663	rdt_last_cmd_clear();
1664	if (!prdtgrp) {
1665		ret = -ENODEV;
1666		rdt_last_cmd_puts("directory was removed\n");
1667		goto out_unlock;
1668	}
1669
1670	/* allocate the rdtgroup. */
1671	rdtgrp = kzalloc(sizeof(*rdtgrp), GFP_KERNEL);
1672	if (!rdtgrp) {
1673		ret = -ENOSPC;
1674		rdt_last_cmd_puts("kernel out of memory\n");
1675		goto out_unlock;
1676	}
1677	*r = rdtgrp;
1678	rdtgrp->mon.parent = prdtgrp;
1679	rdtgrp->type = rtype;
1680	INIT_LIST_HEAD(&rdtgrp->mon.crdtgrp_list);
1681
1682	/* kernfs creates the directory for rdtgrp */
1683	kn = kernfs_create_dir(parent_kn, name, mode, rdtgrp);
1684	if (IS_ERR(kn)) {
1685		ret = PTR_ERR(kn);
1686		rdt_last_cmd_puts("kernfs create error\n");
1687		goto out_free_rgrp;
1688	}
1689	rdtgrp->kn = kn;
1690
1691	/*
1692	 * kernfs_remove() will drop the reference count on "kn" which
1693	 * will free it. But we still need it to stick around for the
1694	 * rdtgroup_kn_unlock(kn} call below. Take one extra reference
1695	 * here, which will be dropped inside rdtgroup_kn_unlock().
1696	 */
1697	kernfs_get(kn);
1698
1699	ret = rdtgroup_kn_set_ugid(kn);
1700	if (ret) {
1701		rdt_last_cmd_puts("kernfs perm error\n");
1702		goto out_destroy;
1703	}
1704
1705	files = RFTYPE_BASE | BIT(RF_CTRLSHIFT + rtype);
1706	ret = rdtgroup_add_files(kn, files);
1707	if (ret) {
1708		rdt_last_cmd_puts("kernfs fill error\n");
1709		goto out_destroy;
1710	}
1711
1712	if (rdt_mon_capable) {
1713		ret = alloc_rmid();
1714		if (ret < 0) {
1715			rdt_last_cmd_puts("out of RMIDs\n");
1716			goto out_destroy;
1717		}
1718		rdtgrp->mon.rmid = ret;
1719
1720		ret = mkdir_mondata_all(kn, rdtgrp, &rdtgrp->mon.mon_data_kn);
1721		if (ret) {
1722			rdt_last_cmd_puts("kernfs subdir error\n");
1723			goto out_idfree;
1724		}
1725	}
1726	kernfs_activate(kn);
1727
1728	/*
1729	 * The caller unlocks the prgrp_kn upon success.
1730	 */
1731	return 0;
1732
1733out_idfree:
1734	free_rmid(rdtgrp->mon.rmid);
1735out_destroy:
1736	kernfs_remove(rdtgrp->kn);
1737out_free_rgrp:
1738	kfree(rdtgrp);
1739out_unlock:
1740	rdtgroup_kn_unlock(prgrp_kn);
1741	return ret;
1742}
1743
1744static void mkdir_rdt_prepare_clean(struct rdtgroup *rgrp)
1745{
1746	kernfs_remove(rgrp->kn);
1747	free_rmid(rgrp->mon.rmid);
1748	kfree(rgrp);
1749}
1750
1751/*
1752 * Create a monitor group under "mon_groups" directory of a control
1753 * and monitor group(ctrl_mon). This is a resource group
1754 * to monitor a subset of tasks and cpus in its parent ctrl_mon group.
1755 */
1756static int rdtgroup_mkdir_mon(struct kernfs_node *parent_kn,
1757			      struct kernfs_node *prgrp_kn,
1758			      const char *name,
1759			      umode_t mode)
1760{
1761	struct rdtgroup *rdtgrp, *prgrp;
1762	int ret;
1763
1764	ret = mkdir_rdt_prepare(parent_kn, prgrp_kn, name, mode, RDTMON_GROUP,
1765				&rdtgrp);
1766	if (ret)
1767		return ret;
1768
1769	prgrp = rdtgrp->mon.parent;
1770	rdtgrp->closid = prgrp->closid;
1771
1772	/*
1773	 * Add the rdtgrp to the list of rdtgrps the parent
1774	 * ctrl_mon group has to track.
1775	 */
1776	list_add_tail(&rdtgrp->mon.crdtgrp_list, &prgrp->mon.crdtgrp_list);
1777
1778	rdtgroup_kn_unlock(prgrp_kn);
1779	return ret;
1780}
1781
1782/*
1783 * These are rdtgroups created under the root directory. Can be used
1784 * to allocate and monitor resources.
1785 */
1786static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn,
1787				   struct kernfs_node *prgrp_kn,
1788				   const char *name, umode_t mode)
1789{
1790	struct rdtgroup *rdtgrp;
1791	struct kernfs_node *kn;
1792	u32 closid;
1793	int ret;
1794
1795	ret = mkdir_rdt_prepare(parent_kn, prgrp_kn, name, mode, RDTCTRL_GROUP,
1796				&rdtgrp);
1797	if (ret)
1798		return ret;
1799
1800	kn = rdtgrp->kn;
1801	ret = closid_alloc();
1802	if (ret < 0) {
1803		rdt_last_cmd_puts("out of CLOSIDs\n");
1804		goto out_common_fail;
1805	}
1806	closid = ret;
1807	ret = 0;
1808
1809	rdtgrp->closid = closid;
1810	list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups);
1811
1812	if (rdt_mon_capable) {
1813		/*
1814		 * Create an empty mon_groups directory to hold the subset
1815		 * of tasks and cpus to monitor.
1816		 */
1817		ret = mongroup_create_dir(kn, NULL, "mon_groups", NULL);
1818		if (ret) {
1819			rdt_last_cmd_puts("kernfs subdir error\n");
1820			goto out_id_free;
1821		}
1822	}
1823
1824	goto out_unlock;
1825
1826out_id_free:
1827	closid_free(closid);
1828	list_del(&rdtgrp->rdtgroup_list);
1829out_common_fail:
1830	mkdir_rdt_prepare_clean(rdtgrp);
1831out_unlock:
1832	rdtgroup_kn_unlock(prgrp_kn);
1833	return ret;
1834}
1835
1836/*
1837 * We allow creating mon groups only with in a directory called "mon_groups"
1838 * which is present in every ctrl_mon group. Check if this is a valid
1839 * "mon_groups" directory.
1840 *
1841 * 1. The directory should be named "mon_groups".
1842 * 2. The mon group itself should "not" be named "mon_groups".
1843 *   This makes sure "mon_groups" directory always has a ctrl_mon group
1844 *   as parent.
1845 */
1846static bool is_mon_groups(struct kernfs_node *kn, const char *name)
1847{
1848	return (!strcmp(kn->name, "mon_groups") &&
1849		strcmp(name, "mon_groups"));
1850}
1851
1852static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
1853			  umode_t mode)
1854{
1855	/* Do not accept '\n' to avoid unparsable situation. */
1856	if (strchr(name, '\n'))
1857		return -EINVAL;
1858
1859	/*
1860	 * If the parent directory is the root directory and RDT
1861	 * allocation is supported, add a control and monitoring
1862	 * subdirectory
1863	 */
1864	if (rdt_alloc_capable && parent_kn == rdtgroup_default.kn)
1865		return rdtgroup_mkdir_ctrl_mon(parent_kn, parent_kn, name, mode);
1866
1867	/*
1868	 * If RDT monitoring is supported and the parent directory is a valid
1869	 * "mon_groups" directory, add a monitoring subdirectory.
1870	 */
1871	if (rdt_mon_capable && is_mon_groups(parent_kn, name))
1872		return rdtgroup_mkdir_mon(parent_kn, parent_kn->parent, name, mode);
1873
1874	return -EPERM;
1875}
1876
1877static int rdtgroup_rmdir_mon(struct kernfs_node *kn, struct rdtgroup *rdtgrp,
1878			      cpumask_var_t tmpmask)
1879{
1880	struct rdtgroup *prdtgrp = rdtgrp->mon.parent;
1881	int cpu;
1882
1883	/* Give any tasks back to the parent group */
1884	rdt_move_group_tasks(rdtgrp, prdtgrp, tmpmask);
1885
1886	/* Update per cpu rmid of the moved CPUs first */
1887	for_each_cpu(cpu, &rdtgrp->cpu_mask)
1888		per_cpu(pqr_state.default_rmid, cpu) = prdtgrp->mon.rmid;
1889	/*
1890	 * Update the MSR on moved CPUs and CPUs which have moved
1891	 * task running on them.
1892	 */
1893	cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask);
1894	update_closid_rmid(tmpmask, NULL);
1895
1896	rdtgrp->flags = RDT_DELETED;
1897	free_rmid(rdtgrp->mon.rmid);
1898
1899	/*
1900	 * Remove the rdtgrp from the parent ctrl_mon group's list
1901	 */
1902	WARN_ON(list_empty(&prdtgrp->mon.crdtgrp_list));
1903	list_del(&rdtgrp->mon.crdtgrp_list);
1904
1905	/*
1906	 * one extra hold on this, will drop when we kfree(rdtgrp)
1907	 * in rdtgroup_kn_unlock()
1908	 */
1909	kernfs_get(kn);
1910	kernfs_remove(rdtgrp->kn);
1911
1912	return 0;
1913}
1914
1915static int rdtgroup_rmdir_ctrl(struct kernfs_node *kn, struct rdtgroup *rdtgrp,
1916			       cpumask_var_t tmpmask)
1917{
1918	int cpu;
1919
1920	/* Give any tasks back to the default group */
1921	rdt_move_group_tasks(rdtgrp, &rdtgroup_default, tmpmask);
1922
1923	/* Give any CPUs back to the default group */
1924	cpumask_or(&rdtgroup_default.cpu_mask,
1925		   &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask);
1926
1927	/* Update per cpu closid and rmid of the moved CPUs first */
1928	for_each_cpu(cpu, &rdtgrp->cpu_mask) {
1929		per_cpu(pqr_state.default_closid, cpu) = rdtgroup_default.closid;
1930		per_cpu(pqr_state.default_rmid, cpu) = rdtgroup_default.mon.rmid;
1931	}
1932
1933	/*
1934	 * Update the MSR on moved CPUs and CPUs which have moved
1935	 * task running on them.
1936	 */
1937	cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask);
1938	update_closid_rmid(tmpmask, NULL);
1939
1940	rdtgrp->flags = RDT_DELETED;
1941	closid_free(rdtgrp->closid);
1942	free_rmid(rdtgrp->mon.rmid);
1943
1944	/*
1945	 * Free all the child monitor group rmids.
1946	 */
1947	free_all_child_rdtgrp(rdtgrp);
1948
1949	list_del(&rdtgrp->rdtgroup_list);
1950
1951	/*
1952	 * one extra hold on this, will drop when we kfree(rdtgrp)
1953	 * in rdtgroup_kn_unlock()
1954	 */
1955	kernfs_get(kn);
1956	kernfs_remove(rdtgrp->kn);
1957
1958	return 0;
1959}
1960
1961static int rdtgroup_rmdir(struct kernfs_node *kn)
1962{
1963	struct kernfs_node *parent_kn = kn->parent;
1964	struct rdtgroup *rdtgrp;
1965	cpumask_var_t tmpmask;
1966	int ret = 0;
1967
1968	if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
1969		return -ENOMEM;
1970
1971	rdtgrp = rdtgroup_kn_lock_live(kn);
1972	if (!rdtgrp) {
1973		ret = -EPERM;
1974		goto out;
1975	}
1976
1977	/*
1978	 * If the rdtgroup is a ctrl_mon group and parent directory
1979	 * is the root directory, remove the ctrl_mon group.
1980	 *
1981	 * If the rdtgroup is a mon group and parent directory
1982	 * is a valid "mon_groups" directory, remove the mon group.
1983	 */
1984	if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn)
1985		ret = rdtgroup_rmdir_ctrl(kn, rdtgrp, tmpmask);
1986	else if (rdtgrp->type == RDTMON_GROUP &&
1987		 is_mon_groups(parent_kn, kn->name))
1988		ret = rdtgroup_rmdir_mon(kn, rdtgrp, tmpmask);
1989	else
1990		ret = -EPERM;
1991
1992out:
1993	rdtgroup_kn_unlock(kn);
1994	free_cpumask_var(tmpmask);
1995	return ret;
1996}
1997
1998static int rdtgroup_show_options(struct seq_file *seq, struct kernfs_root *kf)
1999{
2000	if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled)
2001		seq_puts(seq, ",cdp");
2002	return 0;
2003}
2004
2005static struct kernfs_syscall_ops rdtgroup_kf_syscall_ops = {
2006	.mkdir		= rdtgroup_mkdir,
2007	.rmdir		= rdtgroup_rmdir,
2008	.show_options	= rdtgroup_show_options,
2009};
2010
2011static int __init rdtgroup_setup_root(void)
2012{
2013	int ret;
2014
2015	rdt_root = kernfs_create_root(&rdtgroup_kf_syscall_ops,
2016				      KERNFS_ROOT_CREATE_DEACTIVATED,
2017				      &rdtgroup_default);
2018	if (IS_ERR(rdt_root))
2019		return PTR_ERR(rdt_root);
2020
2021	mutex_lock(&rdtgroup_mutex);
2022
2023	rdtgroup_default.closid = 0;
2024	rdtgroup_default.mon.rmid = 0;
2025	rdtgroup_default.type = RDTCTRL_GROUP;
2026	INIT_LIST_HEAD(&rdtgroup_default.mon.crdtgrp_list);
2027
2028	list_add(&rdtgroup_default.rdtgroup_list, &rdt_all_groups);
2029
2030	ret = rdtgroup_add_files(rdt_root->kn, RF_CTRL_BASE);
2031	if (ret) {
2032		kernfs_destroy_root(rdt_root);
2033		goto out;
2034	}
2035
2036	rdtgroup_default.kn = rdt_root->kn;
2037	kernfs_activate(rdtgroup_default.kn);
2038
2039out:
2040	mutex_unlock(&rdtgroup_mutex);
2041
2042	return ret;
2043}
2044
2045/*
2046 * rdtgroup_init - rdtgroup initialization
2047 *
2048 * Setup resctrl file system including set up root, create mount point,
2049 * register rdtgroup filesystem, and initialize files under root directory.
2050 *
2051 * Return: 0 on success or -errno
2052 */
2053int __init rdtgroup_init(void)
2054{
2055	int ret = 0;
2056
2057	seq_buf_init(&last_cmd_status, last_cmd_status_buf,
2058		     sizeof(last_cmd_status_buf));
2059
2060	ret = rdtgroup_setup_root();
2061	if (ret)
2062		return ret;
2063
2064	ret = sysfs_create_mount_point(fs_kobj, "resctrl");
2065	if (ret)
2066		goto cleanup_root;
2067
2068	ret = register_filesystem(&rdt_fs_type);
2069	if (ret)
2070		goto cleanup_mountpoint;
2071
2072	return 0;
2073
2074cleanup_mountpoint:
2075	sysfs_remove_mount_point(fs_kobj, "resctrl");
2076cleanup_root:
2077	kernfs_destroy_root(rdt_root);
2078
2079	return ret;
2080}