Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.9.
   1/*
   2 * User interface for Resource Alloction in Resource Director Technology(RDT)
   3 *
   4 * Copyright (C) 2016 Intel Corporation
   5 *
   6 * Author: Fenghua Yu <fenghua.yu@intel.com>
   7 *
   8 * This program is free software; you can redistribute it and/or modify it
   9 * under the terms and conditions of the GNU General Public License,
  10 * version 2, as published by the Free Software Foundation.
  11 *
  12 * This program is distributed in the hope it will be useful, but WITHOUT
  13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  15 * more details.
  16 *
  17 * More information about RDT be found in the Intel (R) x86 Architecture
  18 * Software Developer Manual.
  19 */
  20
  21#define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
  22
  23#include <linux/cpu.h>
  24#include <linux/fs.h>
  25#include <linux/sysfs.h>
  26#include <linux/kernfs.h>
  27#include <linux/seq_file.h>
  28#include <linux/sched.h>
  29#include <linux/slab.h>
  30#include <linux/cpu.h>
  31#include <linux/task_work.h>
  32
  33#include <uapi/linux/magic.h>
  34
  35#include <asm/intel_rdt.h>
  36#include <asm/intel_rdt_common.h>
  37
  38DEFINE_STATIC_KEY_FALSE(rdt_enable_key);
  39struct kernfs_root *rdt_root;
  40struct rdtgroup rdtgroup_default;
  41LIST_HEAD(rdt_all_groups);
  42
  43/* Kernel fs node for "info" directory under root */
  44static struct kernfs_node *kn_info;
  45
  46/*
  47 * Trivial allocator for CLOSIDs. Since h/w only supports a small number,
  48 * we can keep a bitmap of free CLOSIDs in a single integer.
  49 *
  50 * Using a global CLOSID across all resources has some advantages and
  51 * some drawbacks:
  52 * + We can simply set "current->closid" to assign a task to a resource
  53 *   group.
  54 * + Context switch code can avoid extra memory references deciding which
  55 *   CLOSID to load into the PQR_ASSOC MSR
  56 * - We give up some options in configuring resource groups across multi-socket
  57 *   systems.
  58 * - Our choices on how to configure each resource become progressively more
  59 *   limited as the number of resources grows.
  60 */
  61static int closid_free_map;
  62
  63static void closid_init(void)
  64{
  65	struct rdt_resource *r;
  66	int rdt_min_closid = 32;
  67
  68	/* Compute rdt_min_closid across all resources */
  69	for_each_enabled_rdt_resource(r)
  70		rdt_min_closid = min(rdt_min_closid, r->num_closid);
  71
  72	closid_free_map = BIT_MASK(rdt_min_closid) - 1;
  73
  74	/* CLOSID 0 is always reserved for the default group */
  75	closid_free_map &= ~1;
  76}
  77
  78int closid_alloc(void)
  79{
  80	int closid = ffs(closid_free_map);
  81
  82	if (closid == 0)
  83		return -ENOSPC;
  84	closid--;
  85	closid_free_map &= ~(1 << closid);
  86
  87	return closid;
  88}
  89
  90static void closid_free(int closid)
  91{
  92	closid_free_map |= 1 << closid;
  93}
  94
  95/* set uid and gid of rdtgroup dirs and files to that of the creator */
  96static int rdtgroup_kn_set_ugid(struct kernfs_node *kn)
  97{
  98	struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID,
  99				.ia_uid = current_fsuid(),
 100				.ia_gid = current_fsgid(), };
 101
 102	if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) &&
 103	    gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID))
 104		return 0;
 105
 106	return kernfs_setattr(kn, &iattr);
 107}
 108
 109static int rdtgroup_add_file(struct kernfs_node *parent_kn, struct rftype *rft)
 110{
 111	struct kernfs_node *kn;
 112	int ret;
 113
 114	kn = __kernfs_create_file(parent_kn, rft->name, rft->mode,
 115				  0, rft->kf_ops, rft, NULL, NULL);
 116	if (IS_ERR(kn))
 117		return PTR_ERR(kn);
 118
 119	ret = rdtgroup_kn_set_ugid(kn);
 120	if (ret) {
 121		kernfs_remove(kn);
 122		return ret;
 123	}
 124
 125	return 0;
 126}
 127
 128static int rdtgroup_add_files(struct kernfs_node *kn, struct rftype *rfts,
 129			      int len)
 130{
 131	struct rftype *rft;
 132	int ret;
 133
 134	lockdep_assert_held(&rdtgroup_mutex);
 135
 136	for (rft = rfts; rft < rfts + len; rft++) {
 137		ret = rdtgroup_add_file(kn, rft);
 138		if (ret)
 139			goto error;
 140	}
 141
 142	return 0;
 143error:
 144	pr_warn("Failed to add %s, err=%d\n", rft->name, ret);
 145	while (--rft >= rfts)
 146		kernfs_remove_by_name(kn, rft->name);
 147	return ret;
 148}
 149
 150static int rdtgroup_seqfile_show(struct seq_file *m, void *arg)
 151{
 152	struct kernfs_open_file *of = m->private;
 153	struct rftype *rft = of->kn->priv;
 154
 155	if (rft->seq_show)
 156		return rft->seq_show(of, m, arg);
 157	return 0;
 158}
 159
 160static ssize_t rdtgroup_file_write(struct kernfs_open_file *of, char *buf,
 161				   size_t nbytes, loff_t off)
 162{
 163	struct rftype *rft = of->kn->priv;
 164
 165	if (rft->write)
 166		return rft->write(of, buf, nbytes, off);
 167
 168	return -EINVAL;
 169}
 170
 171static struct kernfs_ops rdtgroup_kf_single_ops = {
 172	.atomic_write_len	= PAGE_SIZE,
 173	.write			= rdtgroup_file_write,
 174	.seq_show		= rdtgroup_seqfile_show,
 175};
 176
 177static int rdtgroup_cpus_show(struct kernfs_open_file *of,
 178			      struct seq_file *s, void *v)
 179{
 180	struct rdtgroup *rdtgrp;
 181	int ret = 0;
 182
 183	rdtgrp = rdtgroup_kn_lock_live(of->kn);
 184
 185	if (rdtgrp)
 186		seq_printf(s, "%*pb\n", cpumask_pr_args(&rdtgrp->cpu_mask));
 187	else
 188		ret = -ENOENT;
 189	rdtgroup_kn_unlock(of->kn);
 190
 191	return ret;
 192}
 193
 194/*
 195 * This is safe against intel_rdt_sched_in() called from __switch_to()
 196 * because __switch_to() is executed with interrupts disabled. A local call
 197 * from rdt_update_closid() is proteced against __switch_to() because
 198 * preemption is disabled.
 199 */
 200static void rdt_update_cpu_closid(void *closid)
 201{
 202	if (closid)
 203		this_cpu_write(cpu_closid, *(int *)closid);
 204	/*
 205	 * We cannot unconditionally write the MSR because the current
 206	 * executing task might have its own closid selected. Just reuse
 207	 * the context switch code.
 208	 */
 209	intel_rdt_sched_in();
 210}
 211
 212/*
 213 * Update the PGR_ASSOC MSR on all cpus in @cpu_mask,
 214 *
 215 * Per task closids must have been set up before calling this function.
 216 *
 217 * The per cpu closids are updated with the smp function call, when @closid
 218 * is not NULL. If @closid is NULL then all affected percpu closids must
 219 * have been set up before calling this function.
 220 */
 221static void
 222rdt_update_closid(const struct cpumask *cpu_mask, int *closid)
 223{
 224	int cpu = get_cpu();
 225
 226	if (cpumask_test_cpu(cpu, cpu_mask))
 227		rdt_update_cpu_closid(closid);
 228	smp_call_function_many(cpu_mask, rdt_update_cpu_closid, closid, 1);
 229	put_cpu();
 230}
 231
 232static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
 233				   char *buf, size_t nbytes, loff_t off)
 234{
 235	cpumask_var_t tmpmask, newmask;
 236	struct rdtgroup *rdtgrp, *r;
 237	int ret;
 238
 239	if (!buf)
 240		return -EINVAL;
 241
 242	if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
 243		return -ENOMEM;
 244	if (!zalloc_cpumask_var(&newmask, GFP_KERNEL)) {
 245		free_cpumask_var(tmpmask);
 246		return -ENOMEM;
 247	}
 248
 249	rdtgrp = rdtgroup_kn_lock_live(of->kn);
 250	if (!rdtgrp) {
 251		ret = -ENOENT;
 252		goto unlock;
 253	}
 254
 255	ret = cpumask_parse(buf, newmask);
 256	if (ret)
 257		goto unlock;
 258
 259	/* check that user didn't specify any offline cpus */
 260	cpumask_andnot(tmpmask, newmask, cpu_online_mask);
 261	if (cpumask_weight(tmpmask)) {
 262		ret = -EINVAL;
 263		goto unlock;
 264	}
 265
 266	/* Check whether cpus are dropped from this group */
 267	cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask);
 268	if (cpumask_weight(tmpmask)) {
 269		/* Can't drop from default group */
 270		if (rdtgrp == &rdtgroup_default) {
 271			ret = -EINVAL;
 272			goto unlock;
 273		}
 274		/* Give any dropped cpus to rdtgroup_default */
 275		cpumask_or(&rdtgroup_default.cpu_mask,
 276			   &rdtgroup_default.cpu_mask, tmpmask);
 277		rdt_update_closid(tmpmask, &rdtgroup_default.closid);
 278	}
 279
 280	/*
 281	 * If we added cpus, remove them from previous group that owned them
 282	 * and update per-cpu closid
 283	 */
 284	cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask);
 285	if (cpumask_weight(tmpmask)) {
 286		list_for_each_entry(r, &rdt_all_groups, rdtgroup_list) {
 287			if (r == rdtgrp)
 288				continue;
 289			cpumask_andnot(&r->cpu_mask, &r->cpu_mask, tmpmask);
 290		}
 291		rdt_update_closid(tmpmask, &rdtgrp->closid);
 292	}
 293
 294	/* Done pushing/pulling - update this group with new mask */
 295	cpumask_copy(&rdtgrp->cpu_mask, newmask);
 296
 297unlock:
 298	rdtgroup_kn_unlock(of->kn);
 299	free_cpumask_var(tmpmask);
 300	free_cpumask_var(newmask);
 301
 302	return ret ?: nbytes;
 303}
 304
 305struct task_move_callback {
 306	struct callback_head	work;
 307	struct rdtgroup		*rdtgrp;
 308};
 309
 310static void move_myself(struct callback_head *head)
 311{
 312	struct task_move_callback *callback;
 313	struct rdtgroup *rdtgrp;
 314
 315	callback = container_of(head, struct task_move_callback, work);
 316	rdtgrp = callback->rdtgrp;
 317
 318	/*
 319	 * If resource group was deleted before this task work callback
 320	 * was invoked, then assign the task to root group and free the
 321	 * resource group.
 322	 */
 323	if (atomic_dec_and_test(&rdtgrp->waitcount) &&
 324	    (rdtgrp->flags & RDT_DELETED)) {
 325		current->closid = 0;
 326		kfree(rdtgrp);
 327	}
 328
 329	preempt_disable();
 330	/* update PQR_ASSOC MSR to make resource group go into effect */
 331	intel_rdt_sched_in();
 332	preempt_enable();
 333
 334	kfree(callback);
 335}
 336
 337static int __rdtgroup_move_task(struct task_struct *tsk,
 338				struct rdtgroup *rdtgrp)
 339{
 340	struct task_move_callback *callback;
 341	int ret;
 342
 343	callback = kzalloc(sizeof(*callback), GFP_KERNEL);
 344	if (!callback)
 345		return -ENOMEM;
 346	callback->work.func = move_myself;
 347	callback->rdtgrp = rdtgrp;
 348
 349	/*
 350	 * Take a refcount, so rdtgrp cannot be freed before the
 351	 * callback has been invoked.
 352	 */
 353	atomic_inc(&rdtgrp->waitcount);
 354	ret = task_work_add(tsk, &callback->work, true);
 355	if (ret) {
 356		/*
 357		 * Task is exiting. Drop the refcount and free the callback.
 358		 * No need to check the refcount as the group cannot be
 359		 * deleted before the write function unlocks rdtgroup_mutex.
 360		 */
 361		atomic_dec(&rdtgrp->waitcount);
 362		kfree(callback);
 363	} else {
 364		tsk->closid = rdtgrp->closid;
 365	}
 366	return ret;
 367}
 368
 369static int rdtgroup_task_write_permission(struct task_struct *task,
 370					  struct kernfs_open_file *of)
 371{
 372	const struct cred *tcred = get_task_cred(task);
 373	const struct cred *cred = current_cred();
 374	int ret = 0;
 375
 376	/*
 377	 * Even if we're attaching all tasks in the thread group, we only
 378	 * need to check permissions on one of them.
 379	 */
 380	if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
 381	    !uid_eq(cred->euid, tcred->uid) &&
 382	    !uid_eq(cred->euid, tcred->suid))
 383		ret = -EPERM;
 384
 385	put_cred(tcred);
 386	return ret;
 387}
 388
 389static int rdtgroup_move_task(pid_t pid, struct rdtgroup *rdtgrp,
 390			      struct kernfs_open_file *of)
 391{
 392	struct task_struct *tsk;
 393	int ret;
 394
 395	rcu_read_lock();
 396	if (pid) {
 397		tsk = find_task_by_vpid(pid);
 398		if (!tsk) {
 399			rcu_read_unlock();
 400			return -ESRCH;
 401		}
 402	} else {
 403		tsk = current;
 404	}
 405
 406	get_task_struct(tsk);
 407	rcu_read_unlock();
 408
 409	ret = rdtgroup_task_write_permission(tsk, of);
 410	if (!ret)
 411		ret = __rdtgroup_move_task(tsk, rdtgrp);
 412
 413	put_task_struct(tsk);
 414	return ret;
 415}
 416
 417static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of,
 418				    char *buf, size_t nbytes, loff_t off)
 419{
 420	struct rdtgroup *rdtgrp;
 421	int ret = 0;
 422	pid_t pid;
 423
 424	if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
 425		return -EINVAL;
 426	rdtgrp = rdtgroup_kn_lock_live(of->kn);
 427
 428	if (rdtgrp)
 429		ret = rdtgroup_move_task(pid, rdtgrp, of);
 430	else
 431		ret = -ENOENT;
 432
 433	rdtgroup_kn_unlock(of->kn);
 434
 435	return ret ?: nbytes;
 436}
 437
 438static void show_rdt_tasks(struct rdtgroup *r, struct seq_file *s)
 439{
 440	struct task_struct *p, *t;
 441
 442	rcu_read_lock();
 443	for_each_process_thread(p, t) {
 444		if (t->closid == r->closid)
 445			seq_printf(s, "%d\n", t->pid);
 446	}
 447	rcu_read_unlock();
 448}
 449
 450static int rdtgroup_tasks_show(struct kernfs_open_file *of,
 451			       struct seq_file *s, void *v)
 452{
 453	struct rdtgroup *rdtgrp;
 454	int ret = 0;
 455
 456	rdtgrp = rdtgroup_kn_lock_live(of->kn);
 457	if (rdtgrp)
 458		show_rdt_tasks(rdtgrp, s);
 459	else
 460		ret = -ENOENT;
 461	rdtgroup_kn_unlock(of->kn);
 462
 463	return ret;
 464}
 465
 466/* Files in each rdtgroup */
 467static struct rftype rdtgroup_base_files[] = {
 468	{
 469		.name		= "cpus",
 470		.mode		= 0644,
 471		.kf_ops		= &rdtgroup_kf_single_ops,
 472		.write		= rdtgroup_cpus_write,
 473		.seq_show	= rdtgroup_cpus_show,
 474	},
 475	{
 476		.name		= "tasks",
 477		.mode		= 0644,
 478		.kf_ops		= &rdtgroup_kf_single_ops,
 479		.write		= rdtgroup_tasks_write,
 480		.seq_show	= rdtgroup_tasks_show,
 481	},
 482	{
 483		.name		= "schemata",
 484		.mode		= 0644,
 485		.kf_ops		= &rdtgroup_kf_single_ops,
 486		.write		= rdtgroup_schemata_write,
 487		.seq_show	= rdtgroup_schemata_show,
 488	},
 489};
 490
 491static int rdt_num_closids_show(struct kernfs_open_file *of,
 492				struct seq_file *seq, void *v)
 493{
 494	struct rdt_resource *r = of->kn->parent->priv;
 495
 496	seq_printf(seq, "%d\n", r->num_closid);
 497
 498	return 0;
 499}
 500
 501static int rdt_cbm_mask_show(struct kernfs_open_file *of,
 502			     struct seq_file *seq, void *v)
 503{
 504	struct rdt_resource *r = of->kn->parent->priv;
 505
 506	seq_printf(seq, "%x\n", r->max_cbm);
 507
 508	return 0;
 509}
 510
 511static int rdt_min_cbm_bits_show(struct kernfs_open_file *of,
 512			     struct seq_file *seq, void *v)
 513{
 514	struct rdt_resource *r = of->kn->parent->priv;
 515
 516	seq_printf(seq, "%d\n", r->min_cbm_bits);
 517
 518	return 0;
 519}
 520
 521/* rdtgroup information files for one cache resource. */
 522static struct rftype res_info_files[] = {
 523	{
 524		.name		= "num_closids",
 525		.mode		= 0444,
 526		.kf_ops		= &rdtgroup_kf_single_ops,
 527		.seq_show	= rdt_num_closids_show,
 528	},
 529	{
 530		.name		= "cbm_mask",
 531		.mode		= 0444,
 532		.kf_ops		= &rdtgroup_kf_single_ops,
 533		.seq_show	= rdt_cbm_mask_show,
 534	},
 535	{
 536		.name		= "min_cbm_bits",
 537		.mode		= 0444,
 538		.kf_ops		= &rdtgroup_kf_single_ops,
 539		.seq_show	= rdt_min_cbm_bits_show,
 540	},
 541};
 542
 543static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn)
 544{
 545	struct kernfs_node *kn_subdir;
 546	struct rdt_resource *r;
 547	int ret;
 548
 549	/* create the directory */
 550	kn_info = kernfs_create_dir(parent_kn, "info", parent_kn->mode, NULL);
 551	if (IS_ERR(kn_info))
 552		return PTR_ERR(kn_info);
 553	kernfs_get(kn_info);
 554
 555	for_each_enabled_rdt_resource(r) {
 556		kn_subdir = kernfs_create_dir(kn_info, r->name,
 557					      kn_info->mode, r);
 558		if (IS_ERR(kn_subdir)) {
 559			ret = PTR_ERR(kn_subdir);
 560			goto out_destroy;
 561		}
 562		kernfs_get(kn_subdir);
 563		ret = rdtgroup_kn_set_ugid(kn_subdir);
 564		if (ret)
 565			goto out_destroy;
 566		ret = rdtgroup_add_files(kn_subdir, res_info_files,
 567					 ARRAY_SIZE(res_info_files));
 568		if (ret)
 569			goto out_destroy;
 570		kernfs_activate(kn_subdir);
 571	}
 572
 573	/*
 574	 * This extra ref will be put in kernfs_remove() and guarantees
 575	 * that @rdtgrp->kn is always accessible.
 576	 */
 577	kernfs_get(kn_info);
 578
 579	ret = rdtgroup_kn_set_ugid(kn_info);
 580	if (ret)
 581		goto out_destroy;
 582
 583	kernfs_activate(kn_info);
 584
 585	return 0;
 586
 587out_destroy:
 588	kernfs_remove(kn_info);
 589	return ret;
 590}
 591
 592static void l3_qos_cfg_update(void *arg)
 593{
 594	bool *enable = arg;
 595
 596	wrmsrl(IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL);
 597}
 598
 599static int set_l3_qos_cfg(struct rdt_resource *r, bool enable)
 600{
 601	cpumask_var_t cpu_mask;
 602	struct rdt_domain *d;
 603	int cpu;
 604
 605	if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
 606		return -ENOMEM;
 607
 608	list_for_each_entry(d, &r->domains, list) {
 609		/* Pick one CPU from each domain instance to update MSR */
 610		cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
 611	}
 612	cpu = get_cpu();
 613	/* Update QOS_CFG MSR on this cpu if it's in cpu_mask. */
 614	if (cpumask_test_cpu(cpu, cpu_mask))
 615		l3_qos_cfg_update(&enable);
 616	/* Update QOS_CFG MSR on all other cpus in cpu_mask. */
 617	smp_call_function_many(cpu_mask, l3_qos_cfg_update, &enable, 1);
 618	put_cpu();
 619
 620	free_cpumask_var(cpu_mask);
 621
 622	return 0;
 623}
 624
 625static int cdp_enable(void)
 626{
 627	struct rdt_resource *r_l3data = &rdt_resources_all[RDT_RESOURCE_L3DATA];
 628	struct rdt_resource *r_l3code = &rdt_resources_all[RDT_RESOURCE_L3CODE];
 629	struct rdt_resource *r_l3 = &rdt_resources_all[RDT_RESOURCE_L3];
 630	int ret;
 631
 632	if (!r_l3->capable || !r_l3data->capable || !r_l3code->capable)
 633		return -EINVAL;
 634
 635	ret = set_l3_qos_cfg(r_l3, true);
 636	if (!ret) {
 637		r_l3->enabled = false;
 638		r_l3data->enabled = true;
 639		r_l3code->enabled = true;
 640	}
 641	return ret;
 642}
 643
 644static void cdp_disable(void)
 645{
 646	struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3];
 647
 648	r->enabled = r->capable;
 649
 650	if (rdt_resources_all[RDT_RESOURCE_L3DATA].enabled) {
 651		rdt_resources_all[RDT_RESOURCE_L3DATA].enabled = false;
 652		rdt_resources_all[RDT_RESOURCE_L3CODE].enabled = false;
 653		set_l3_qos_cfg(r, false);
 654	}
 655}
 656
 657static int parse_rdtgroupfs_options(char *data)
 658{
 659	char *token, *o = data;
 660	int ret = 0;
 661
 662	while ((token = strsep(&o, ",")) != NULL) {
 663		if (!*token)
 664			return -EINVAL;
 665
 666		if (!strcmp(token, "cdp"))
 667			ret = cdp_enable();
 668	}
 669
 670	return ret;
 671}
 672
 673/*
 674 * We don't allow rdtgroup directories to be created anywhere
 675 * except the root directory. Thus when looking for the rdtgroup
 676 * structure for a kernfs node we are either looking at a directory,
 677 * in which case the rdtgroup structure is pointed at by the "priv"
 678 * field, otherwise we have a file, and need only look to the parent
 679 * to find the rdtgroup.
 680 */
 681static struct rdtgroup *kernfs_to_rdtgroup(struct kernfs_node *kn)
 682{
 683	if (kernfs_type(kn) == KERNFS_DIR) {
 684		/*
 685		 * All the resource directories use "kn->priv"
 686		 * to point to the "struct rdtgroup" for the
 687		 * resource. "info" and its subdirectories don't
 688		 * have rdtgroup structures, so return NULL here.
 689		 */
 690		if (kn == kn_info || kn->parent == kn_info)
 691			return NULL;
 692		else
 693			return kn->priv;
 694	} else {
 695		return kn->parent->priv;
 696	}
 697}
 698
 699struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn)
 700{
 701	struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn);
 702
 703	if (!rdtgrp)
 704		return NULL;
 705
 706	atomic_inc(&rdtgrp->waitcount);
 707	kernfs_break_active_protection(kn);
 708
 709	mutex_lock(&rdtgroup_mutex);
 710
 711	/* Was this group deleted while we waited? */
 712	if (rdtgrp->flags & RDT_DELETED)
 713		return NULL;
 714
 715	return rdtgrp;
 716}
 717
 718void rdtgroup_kn_unlock(struct kernfs_node *kn)
 719{
 720	struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn);
 721
 722	if (!rdtgrp)
 723		return;
 724
 725	mutex_unlock(&rdtgroup_mutex);
 726
 727	if (atomic_dec_and_test(&rdtgrp->waitcount) &&
 728	    (rdtgrp->flags & RDT_DELETED)) {
 729		kernfs_unbreak_active_protection(kn);
 730		kernfs_put(rdtgrp->kn);
 731		kfree(rdtgrp);
 732	} else {
 733		kernfs_unbreak_active_protection(kn);
 734	}
 735}
 736
 737static struct dentry *rdt_mount(struct file_system_type *fs_type,
 738				int flags, const char *unused_dev_name,
 739				void *data)
 740{
 741	struct dentry *dentry;
 742	int ret;
 743
 744	mutex_lock(&rdtgroup_mutex);
 745	/*
 746	 * resctrl file system can only be mounted once.
 747	 */
 748	if (static_branch_unlikely(&rdt_enable_key)) {
 749		dentry = ERR_PTR(-EBUSY);
 750		goto out;
 751	}
 752
 753	ret = parse_rdtgroupfs_options(data);
 754	if (ret) {
 755		dentry = ERR_PTR(ret);
 756		goto out_cdp;
 757	}
 758
 759	closid_init();
 760
 761	ret = rdtgroup_create_info_dir(rdtgroup_default.kn);
 762	if (ret) {
 763		dentry = ERR_PTR(ret);
 764		goto out_cdp;
 765	}
 766
 767	dentry = kernfs_mount(fs_type, flags, rdt_root,
 768			      RDTGROUP_SUPER_MAGIC, NULL);
 769	if (IS_ERR(dentry))
 770		goto out_cdp;
 771
 772	static_branch_enable(&rdt_enable_key);
 773	goto out;
 774
 775out_cdp:
 776	cdp_disable();
 777out:
 778	mutex_unlock(&rdtgroup_mutex);
 779
 780	return dentry;
 781}
 782
 783static int reset_all_cbms(struct rdt_resource *r)
 784{
 785	struct msr_param msr_param;
 786	cpumask_var_t cpu_mask;
 787	struct rdt_domain *d;
 788	int i, cpu;
 789
 790	if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
 791		return -ENOMEM;
 792
 793	msr_param.res = r;
 794	msr_param.low = 0;
 795	msr_param.high = r->num_closid;
 796
 797	/*
 798	 * Disable resource control for this resource by setting all
 799	 * CBMs in all domains to the maximum mask value. Pick one CPU
 800	 * from each domain to update the MSRs below.
 801	 */
 802	list_for_each_entry(d, &r->domains, list) {
 803		cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
 804
 805		for (i = 0; i < r->num_closid; i++)
 806			d->cbm[i] = r->max_cbm;
 807	}
 808	cpu = get_cpu();
 809	/* Update CBM on this cpu if it's in cpu_mask. */
 810	if (cpumask_test_cpu(cpu, cpu_mask))
 811		rdt_cbm_update(&msr_param);
 812	/* Update CBM on all other cpus in cpu_mask. */
 813	smp_call_function_many(cpu_mask, rdt_cbm_update, &msr_param, 1);
 814	put_cpu();
 815
 816	free_cpumask_var(cpu_mask);
 817
 818	return 0;
 819}
 820
 821/*
 822 * Move tasks from one to the other group. If @from is NULL, then all tasks
 823 * in the systems are moved unconditionally (used for teardown).
 824 *
 825 * If @mask is not NULL the cpus on which moved tasks are running are set
 826 * in that mask so the update smp function call is restricted to affected
 827 * cpus.
 828 */
 829static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to,
 830				 struct cpumask *mask)
 831{
 832	struct task_struct *p, *t;
 833
 834	read_lock(&tasklist_lock);
 835	for_each_process_thread(p, t) {
 836		if (!from || t->closid == from->closid) {
 837			t->closid = to->closid;
 838#ifdef CONFIG_SMP
 839			/*
 840			 * This is safe on x86 w/o barriers as the ordering
 841			 * of writing to task_cpu() and t->on_cpu is
 842			 * reverse to the reading here. The detection is
 843			 * inaccurate as tasks might move or schedule
 844			 * before the smp function call takes place. In
 845			 * such a case the function call is pointless, but
 846			 * there is no other side effect.
 847			 */
 848			if (mask && t->on_cpu)
 849				cpumask_set_cpu(task_cpu(t), mask);
 850#endif
 851		}
 852	}
 853	read_unlock(&tasklist_lock);
 854}
 855
 856/*
 857 * Forcibly remove all of subdirectories under root.
 858 */
 859static void rmdir_all_sub(void)
 860{
 861	struct rdtgroup *rdtgrp, *tmp;
 862
 863	/* Move all tasks to the default resource group */
 864	rdt_move_group_tasks(NULL, &rdtgroup_default, NULL);
 865
 866	list_for_each_entry_safe(rdtgrp, tmp, &rdt_all_groups, rdtgroup_list) {
 867		/* Remove each rdtgroup other than root */
 868		if (rdtgrp == &rdtgroup_default)
 869			continue;
 870
 871		/*
 872		 * Give any CPUs back to the default group. We cannot copy
 873		 * cpu_online_mask because a CPU might have executed the
 874		 * offline callback already, but is still marked online.
 875		 */
 876		cpumask_or(&rdtgroup_default.cpu_mask,
 877			   &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask);
 878
 879		kernfs_remove(rdtgrp->kn);
 880		list_del(&rdtgrp->rdtgroup_list);
 881		kfree(rdtgrp);
 882	}
 883	/* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */
 884	get_online_cpus();
 885	rdt_update_closid(cpu_online_mask, &rdtgroup_default.closid);
 886	put_online_cpus();
 887
 888	kernfs_remove(kn_info);
 889}
 890
 891static void rdt_kill_sb(struct super_block *sb)
 892{
 893	struct rdt_resource *r;
 894
 895	mutex_lock(&rdtgroup_mutex);
 896
 897	/*Put everything back to default values. */
 898	for_each_enabled_rdt_resource(r)
 899		reset_all_cbms(r);
 900	cdp_disable();
 901	rmdir_all_sub();
 902	static_branch_disable(&rdt_enable_key);
 903	kernfs_kill_sb(sb);
 904	mutex_unlock(&rdtgroup_mutex);
 905}
 906
 907static struct file_system_type rdt_fs_type = {
 908	.name    = "resctrl",
 909	.mount   = rdt_mount,
 910	.kill_sb = rdt_kill_sb,
 911};
 912
 913static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
 914			  umode_t mode)
 915{
 916	struct rdtgroup *parent, *rdtgrp;
 917	struct kernfs_node *kn;
 918	int ret, closid;
 919
 920	/* Only allow mkdir in the root directory */
 921	if (parent_kn != rdtgroup_default.kn)
 922		return -EPERM;
 923
 924	/* Do not accept '\n' to avoid unparsable situation. */
 925	if (strchr(name, '\n'))
 926		return -EINVAL;
 927
 928	parent = rdtgroup_kn_lock_live(parent_kn);
 929	if (!parent) {
 930		ret = -ENODEV;
 931		goto out_unlock;
 932	}
 933
 934	ret = closid_alloc();
 935	if (ret < 0)
 936		goto out_unlock;
 937	closid = ret;
 938
 939	/* allocate the rdtgroup. */
 940	rdtgrp = kzalloc(sizeof(*rdtgrp), GFP_KERNEL);
 941	if (!rdtgrp) {
 942		ret = -ENOSPC;
 943		goto out_closid_free;
 944	}
 945	rdtgrp->closid = closid;
 946	list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups);
 947
 948	/* kernfs creates the directory for rdtgrp */
 949	kn = kernfs_create_dir(parent->kn, name, mode, rdtgrp);
 950	if (IS_ERR(kn)) {
 951		ret = PTR_ERR(kn);
 952		goto out_cancel_ref;
 953	}
 954	rdtgrp->kn = kn;
 955
 956	/*
 957	 * kernfs_remove() will drop the reference count on "kn" which
 958	 * will free it. But we still need it to stick around for the
 959	 * rdtgroup_kn_unlock(kn} call below. Take one extra reference
 960	 * here, which will be dropped inside rdtgroup_kn_unlock().
 961	 */
 962	kernfs_get(kn);
 963
 964	ret = rdtgroup_kn_set_ugid(kn);
 965	if (ret)
 966		goto out_destroy;
 967
 968	ret = rdtgroup_add_files(kn, rdtgroup_base_files,
 969				 ARRAY_SIZE(rdtgroup_base_files));
 970	if (ret)
 971		goto out_destroy;
 972
 973	kernfs_activate(kn);
 974
 975	ret = 0;
 976	goto out_unlock;
 977
 978out_destroy:
 979	kernfs_remove(rdtgrp->kn);
 980out_cancel_ref:
 981	list_del(&rdtgrp->rdtgroup_list);
 982	kfree(rdtgrp);
 983out_closid_free:
 984	closid_free(closid);
 985out_unlock:
 986	rdtgroup_kn_unlock(parent_kn);
 987	return ret;
 988}
 989
 990static int rdtgroup_rmdir(struct kernfs_node *kn)
 991{
 992	int ret, cpu, closid = rdtgroup_default.closid;
 993	struct rdtgroup *rdtgrp;
 994	cpumask_var_t tmpmask;
 995
 996	if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
 997		return -ENOMEM;
 998
 999	rdtgrp = rdtgroup_kn_lock_live(kn);
1000	if (!rdtgrp) {
1001		ret = -EPERM;
1002		goto out;
1003	}
1004
1005	/* Give any tasks back to the default group */
1006	rdt_move_group_tasks(rdtgrp, &rdtgroup_default, tmpmask);
1007
1008	/* Give any CPUs back to the default group */
1009	cpumask_or(&rdtgroup_default.cpu_mask,
1010		   &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask);
1011
1012	/* Update per cpu closid of the moved CPUs first */
1013	for_each_cpu(cpu, &rdtgrp->cpu_mask)
1014		per_cpu(cpu_closid, cpu) = closid;
1015	/*
1016	 * Update the MSR on moved CPUs and CPUs which have moved
1017	 * task running on them.
1018	 */
1019	cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask);
1020	rdt_update_closid(tmpmask, NULL);
1021
1022	rdtgrp->flags = RDT_DELETED;
1023	closid_free(rdtgrp->closid);
1024	list_del(&rdtgrp->rdtgroup_list);
1025
1026	/*
1027	 * one extra hold on this, will drop when we kfree(rdtgrp)
1028	 * in rdtgroup_kn_unlock()
1029	 */
1030	kernfs_get(kn);
1031	kernfs_remove(rdtgrp->kn);
1032	ret = 0;
1033out:
1034	rdtgroup_kn_unlock(kn);
1035	free_cpumask_var(tmpmask);
1036	return ret;
1037}
1038
1039static int rdtgroup_show_options(struct seq_file *seq, struct kernfs_root *kf)
1040{
1041	if (rdt_resources_all[RDT_RESOURCE_L3DATA].enabled)
1042		seq_puts(seq, ",cdp");
1043	return 0;
1044}
1045
1046static struct kernfs_syscall_ops rdtgroup_kf_syscall_ops = {
1047	.mkdir		= rdtgroup_mkdir,
1048	.rmdir		= rdtgroup_rmdir,
1049	.show_options	= rdtgroup_show_options,
1050};
1051
1052static int __init rdtgroup_setup_root(void)
1053{
1054	int ret;
1055
1056	rdt_root = kernfs_create_root(&rdtgroup_kf_syscall_ops,
1057				      KERNFS_ROOT_CREATE_DEACTIVATED,
1058				      &rdtgroup_default);
1059	if (IS_ERR(rdt_root))
1060		return PTR_ERR(rdt_root);
1061
1062	mutex_lock(&rdtgroup_mutex);
1063
1064	rdtgroup_default.closid = 0;
1065	list_add(&rdtgroup_default.rdtgroup_list, &rdt_all_groups);
1066
1067	ret = rdtgroup_add_files(rdt_root->kn, rdtgroup_base_files,
1068				 ARRAY_SIZE(rdtgroup_base_files));
1069	if (ret) {
1070		kernfs_destroy_root(rdt_root);
1071		goto out;
1072	}
1073
1074	rdtgroup_default.kn = rdt_root->kn;
1075	kernfs_activate(rdtgroup_default.kn);
1076
1077out:
1078	mutex_unlock(&rdtgroup_mutex);
1079
1080	return ret;
1081}
1082
1083/*
1084 * rdtgroup_init - rdtgroup initialization
1085 *
1086 * Setup resctrl file system including set up root, create mount point,
1087 * register rdtgroup filesystem, and initialize files under root directory.
1088 *
1089 * Return: 0 on success or -errno
1090 */
1091int __init rdtgroup_init(void)
1092{
1093	int ret = 0;
1094
1095	ret = rdtgroup_setup_root();
1096	if (ret)
1097		return ret;
1098
1099	ret = sysfs_create_mount_point(fs_kobj, "resctrl");
1100	if (ret)
1101		goto cleanup_root;
1102
1103	ret = register_filesystem(&rdt_fs_type);
1104	if (ret)
1105		goto cleanup_mountpoint;
1106
1107	return 0;
1108
1109cleanup_mountpoint:
1110	sysfs_remove_mount_point(fs_kobj, "resctrl");
1111cleanup_root:
1112	kernfs_destroy_root(rdt_root);
1113
1114	return ret;
1115}