Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  kernel/sched/syscalls.c
   4 *
   5 *  Core kernel scheduler syscalls related code
   6 *
   7 *  Copyright (C) 1991-2002  Linus Torvalds
   8 *  Copyright (C) 1998-2024  Ingo Molnar, Red Hat
   9 */
  10#include <linux/sched.h>
  11#include <linux/cpuset.h>
  12#include <linux/sched/debug.h>
  13
  14#include <uapi/linux/sched/types.h>
  15
  16#include "sched.h"
  17#include "autogroup.h"
  18
  19static inline int __normal_prio(int policy, int rt_prio, int nice)
  20{
  21	int prio;
  22
  23	if (dl_policy(policy))
  24		prio = MAX_DL_PRIO - 1;
  25	else if (rt_policy(policy))
  26		prio = MAX_RT_PRIO - 1 - rt_prio;
  27	else
  28		prio = NICE_TO_PRIO(nice);
  29
  30	return prio;
  31}
  32
  33/*
  34 * Calculate the expected normal priority: i.e. priority
  35 * without taking RT-inheritance into account. Might be
  36 * boosted by interactivity modifiers. Changes upon fork,
  37 * setprio syscalls, and whenever the interactivity
  38 * estimator recalculates.
  39 */
  40static inline int normal_prio(struct task_struct *p)
  41{
  42	return __normal_prio(p->policy, p->rt_priority, PRIO_TO_NICE(p->static_prio));
  43}
  44
  45/*
  46 * Calculate the current priority, i.e. the priority
  47 * taken into account by the scheduler. This value might
  48 * be boosted by RT tasks, or might be boosted by
  49 * interactivity modifiers. Will be RT if the task got
  50 * RT-boosted. If not then it returns p->normal_prio.
  51 */
  52static int effective_prio(struct task_struct *p)
  53{
  54	p->normal_prio = normal_prio(p);
  55	/*
  56	 * If we are RT tasks or we were boosted to RT priority,
  57	 * keep the priority unchanged. Otherwise, update priority
  58	 * to the normal priority:
  59	 */
  60	if (!rt_or_dl_prio(p->prio))
  61		return p->normal_prio;
  62	return p->prio;
  63}
  64
  65void set_user_nice(struct task_struct *p, long nice)
  66{
  67	bool queued, running;
  68	struct rq *rq;
  69	int old_prio;
  70
  71	if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
  72		return;
  73	/*
  74	 * We have to be careful, if called from sys_setpriority(),
  75	 * the task might be in the middle of scheduling on another CPU.
  76	 */
  77	CLASS(task_rq_lock, rq_guard)(p);
  78	rq = rq_guard.rq;
  79
  80	update_rq_clock(rq);
  81
  82	/*
  83	 * The RT priorities are set via sched_setscheduler(), but we still
  84	 * allow the 'normal' nice value to be set - but as expected
  85	 * it won't have any effect on scheduling until the task is
  86	 * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR:
  87	 */
  88	if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
  89		p->static_prio = NICE_TO_PRIO(nice);
  90		return;
  91	}
  92
  93	queued = task_on_rq_queued(p);
  94	running = task_current_donor(rq, p);
  95	if (queued)
  96		dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);
  97	if (running)
  98		put_prev_task(rq, p);
  99
 100	p->static_prio = NICE_TO_PRIO(nice);
 101	set_load_weight(p, true);
 102	old_prio = p->prio;
 103	p->prio = effective_prio(p);
 104
 105	if (queued)
 106		enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
 107	if (running)
 108		set_next_task(rq, p);
 109
 110	/*
 111	 * If the task increased its priority or is running and
 112	 * lowered its priority, then reschedule its CPU:
 113	 */
 114	p->sched_class->prio_changed(rq, p, old_prio);
 115}
 116EXPORT_SYMBOL(set_user_nice);
 117
 118/*
 119 * is_nice_reduction - check if nice value is an actual reduction
 120 *
 121 * Similar to can_nice() but does not perform a capability check.
 122 *
 123 * @p: task
 124 * @nice: nice value
 125 */
 126static bool is_nice_reduction(const struct task_struct *p, const int nice)
 127{
 128	/* Convert nice value [19,-20] to rlimit style value [1,40]: */
 129	int nice_rlim = nice_to_rlimit(nice);
 130
 131	return (nice_rlim <= task_rlimit(p, RLIMIT_NICE));
 132}
 133
 134/*
 135 * can_nice - check if a task can reduce its nice value
 136 * @p: task
 137 * @nice: nice value
 138 */
 139int can_nice(const struct task_struct *p, const int nice)
 140{
 141	return is_nice_reduction(p, nice) || capable(CAP_SYS_NICE);
 142}
 143
 144#ifdef __ARCH_WANT_SYS_NICE
 145
 146/*
 147 * sys_nice - change the priority of the current process.
 148 * @increment: priority increment
 149 *
 150 * sys_setpriority is a more generic, but much slower function that
 151 * does similar things.
 152 */
 153SYSCALL_DEFINE1(nice, int, increment)
 154{
 155	long nice, retval;
 156
 157	/*
 158	 * Setpriority might change our priority at the same moment.
 159	 * We don't have to worry. Conceptually one call occurs first
 160	 * and we have a single winner.
 161	 */
 162	increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH);
 163	nice = task_nice(current) + increment;
 164
 165	nice = clamp_val(nice, MIN_NICE, MAX_NICE);
 166	if (increment < 0 && !can_nice(current, nice))
 167		return -EPERM;
 168
 169	retval = security_task_setnice(current, nice);
 170	if (retval)
 171		return retval;
 172
 173	set_user_nice(current, nice);
 174	return 0;
 175}
 176
 177#endif
 178
 179/**
 180 * task_prio - return the priority value of a given task.
 181 * @p: the task in question.
 182 *
 183 * Return: The priority value as seen by users in /proc.
 184 *
 185 * sched policy         return value   kernel prio    user prio/nice
 186 *
 187 * normal, batch, idle     [0 ... 39]  [100 ... 139]          0/[-20 ... 19]
 188 * fifo, rr             [-2 ... -100]     [98 ... 0]  [1 ... 99]
 189 * deadline                     -101             -1           0
 190 */
 191int task_prio(const struct task_struct *p)
 192{
 193	return p->prio - MAX_RT_PRIO;
 194}
 195
 196/**
 197 * idle_cpu - is a given CPU idle currently?
 198 * @cpu: the processor in question.
 199 *
 200 * Return: 1 if the CPU is currently idle. 0 otherwise.
 201 */
 202int idle_cpu(int cpu)
 203{
 204	struct rq *rq = cpu_rq(cpu);
 205
 206	if (rq->curr != rq->idle)
 207		return 0;
 208
 209	if (rq->nr_running)
 210		return 0;
 211
 212#ifdef CONFIG_SMP
 213	if (rq->ttwu_pending)
 214		return 0;
 215#endif
 216
 217	return 1;
 218}
 219
 220/**
 221 * available_idle_cpu - is a given CPU idle for enqueuing work.
 222 * @cpu: the CPU in question.
 223 *
 224 * Return: 1 if the CPU is currently idle. 0 otherwise.
 225 */
 226int available_idle_cpu(int cpu)
 227{
 228	if (!idle_cpu(cpu))
 229		return 0;
 230
 231	if (vcpu_is_preempted(cpu))
 232		return 0;
 233
 234	return 1;
 235}
 236
 237/**
 238 * idle_task - return the idle task for a given CPU.
 239 * @cpu: the processor in question.
 240 *
 241 * Return: The idle task for the CPU @cpu.
 242 */
 243struct task_struct *idle_task(int cpu)
 244{
 245	return cpu_rq(cpu)->idle;
 246}
 247
 248#ifdef CONFIG_SCHED_CORE
 249int sched_core_idle_cpu(int cpu)
 250{
 251	struct rq *rq = cpu_rq(cpu);
 252
 253	if (sched_core_enabled(rq) && rq->curr == rq->idle)
 254		return 1;
 255
 256	return idle_cpu(cpu);
 257}
 258
 259#endif
 260
 261/**
 262 * find_process_by_pid - find a process with a matching PID value.
 263 * @pid: the pid in question.
 264 *
 265 * The task of @pid, if found. %NULL otherwise.
 266 */
 267static struct task_struct *find_process_by_pid(pid_t pid)
 268{
 269	return pid ? find_task_by_vpid(pid) : current;
 270}
 271
 272static struct task_struct *find_get_task(pid_t pid)
 273{
 274	struct task_struct *p;
 275	guard(rcu)();
 276
 277	p = find_process_by_pid(pid);
 278	if (likely(p))
 279		get_task_struct(p);
 280
 281	return p;
 282}
 283
 284DEFINE_CLASS(find_get_task, struct task_struct *, if (_T) put_task_struct(_T),
 285	     find_get_task(pid), pid_t pid)
 286
 287/*
 288 * sched_setparam() passes in -1 for its policy, to let the functions
 289 * it calls know not to change it.
 290 */
 291#define SETPARAM_POLICY	-1
 292
 293static void __setscheduler_params(struct task_struct *p,
 294		const struct sched_attr *attr)
 295{
 296	int policy = attr->sched_policy;
 297
 298	if (policy == SETPARAM_POLICY)
 299		policy = p->policy;
 300
 301	p->policy = policy;
 302
 303	if (dl_policy(policy)) {
 304		__setparam_dl(p, attr);
 305	} else if (fair_policy(policy)) {
 306		p->static_prio = NICE_TO_PRIO(attr->sched_nice);
 307		if (attr->sched_runtime) {
 308			p->se.custom_slice = 1;
 309			p->se.slice = clamp_t(u64, attr->sched_runtime,
 310					      NSEC_PER_MSEC/10,   /* HZ=1000 * 10 */
 311					      NSEC_PER_MSEC*100); /* HZ=100  / 10 */
 312		} else {
 313			p->se.custom_slice = 0;
 314			p->se.slice = sysctl_sched_base_slice;
 315		}
 316	}
 317
 318	/* rt-policy tasks do not have a timerslack */
 319	if (rt_or_dl_task_policy(p)) {
 320		p->timer_slack_ns = 0;
 321	} else if (p->timer_slack_ns == 0) {
 322		/* when switching back to non-rt policy, restore timerslack */
 323		p->timer_slack_ns = p->default_timer_slack_ns;
 324	}
 325
 326	/*
 327	 * __sched_setscheduler() ensures attr->sched_priority == 0 when
 328	 * !rt_policy. Always setting this ensures that things like
 329	 * getparam()/getattr() don't report silly values for !rt tasks.
 330	 */
 331	p->rt_priority = attr->sched_priority;
 332	p->normal_prio = normal_prio(p);
 333	set_load_weight(p, true);
 334}
 335
 336/*
 337 * Check the target process has a UID that matches the current process's:
 338 */
 339static bool check_same_owner(struct task_struct *p)
 340{
 341	const struct cred *cred = current_cred(), *pcred;
 342	guard(rcu)();
 343
 344	pcred = __task_cred(p);
 345	return (uid_eq(cred->euid, pcred->euid) ||
 346		uid_eq(cred->euid, pcred->uid));
 347}
 348
 349#ifdef CONFIG_UCLAMP_TASK
 350
 351static int uclamp_validate(struct task_struct *p,
 352			   const struct sched_attr *attr)
 353{
 354	int util_min = p->uclamp_req[UCLAMP_MIN].value;
 355	int util_max = p->uclamp_req[UCLAMP_MAX].value;
 356
 357	if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN) {
 358		util_min = attr->sched_util_min;
 359
 360		if (util_min + 1 > SCHED_CAPACITY_SCALE + 1)
 361			return -EINVAL;
 362	}
 363
 364	if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX) {
 365		util_max = attr->sched_util_max;
 366
 367		if (util_max + 1 > SCHED_CAPACITY_SCALE + 1)
 368			return -EINVAL;
 369	}
 370
 371	if (util_min != -1 && util_max != -1 && util_min > util_max)
 372		return -EINVAL;
 373
 374	/*
 375	 * We have valid uclamp attributes; make sure uclamp is enabled.
 376	 *
 377	 * We need to do that here, because enabling static branches is a
 378	 * blocking operation which obviously cannot be done while holding
 379	 * scheduler locks.
 380	 */
 381	static_branch_enable(&sched_uclamp_used);
 382
 383	return 0;
 384}
 385
 386static bool uclamp_reset(const struct sched_attr *attr,
 387			 enum uclamp_id clamp_id,
 388			 struct uclamp_se *uc_se)
 389{
 390	/* Reset on sched class change for a non user-defined clamp value. */
 391	if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)) &&
 392	    !uc_se->user_defined)
 393		return true;
 394
 395	/* Reset on sched_util_{min,max} == -1. */
 396	if (clamp_id == UCLAMP_MIN &&
 397	    attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN &&
 398	    attr->sched_util_min == -1) {
 399		return true;
 400	}
 401
 402	if (clamp_id == UCLAMP_MAX &&
 403	    attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX &&
 404	    attr->sched_util_max == -1) {
 405		return true;
 406	}
 407
 408	return false;
 409}
 410
 411static void __setscheduler_uclamp(struct task_struct *p,
 412				  const struct sched_attr *attr)
 413{
 414	enum uclamp_id clamp_id;
 415
 416	for_each_clamp_id(clamp_id) {
 417		struct uclamp_se *uc_se = &p->uclamp_req[clamp_id];
 418		unsigned int value;
 419
 420		if (!uclamp_reset(attr, clamp_id, uc_se))
 421			continue;
 422
 423		/*
 424		 * RT by default have a 100% boost value that could be modified
 425		 * at runtime.
 426		 */
 427		if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN))
 428			value = sysctl_sched_uclamp_util_min_rt_default;
 429		else
 430			value = uclamp_none(clamp_id);
 431
 432		uclamp_se_set(uc_se, value, false);
 433
 434	}
 435
 436	if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)))
 437		return;
 438
 439	if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN &&
 440	    attr->sched_util_min != -1) {
 441		uclamp_se_set(&p->uclamp_req[UCLAMP_MIN],
 442			      attr->sched_util_min, true);
 443	}
 444
 445	if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX &&
 446	    attr->sched_util_max != -1) {
 447		uclamp_se_set(&p->uclamp_req[UCLAMP_MAX],
 448			      attr->sched_util_max, true);
 449	}
 450}
 451
 452#else /* !CONFIG_UCLAMP_TASK: */
 453
 454static inline int uclamp_validate(struct task_struct *p,
 455				  const struct sched_attr *attr)
 456{
 457	return -EOPNOTSUPP;
 458}
 459static void __setscheduler_uclamp(struct task_struct *p,
 460				  const struct sched_attr *attr) { }
 461#endif
 462
 463/*
 464 * Allow unprivileged RT tasks to decrease priority.
 465 * Only issue a capable test if needed and only once to avoid an audit
 466 * event on permitted non-privileged operations:
 467 */
 468static int user_check_sched_setscheduler(struct task_struct *p,
 469					 const struct sched_attr *attr,
 470					 int policy, int reset_on_fork)
 471{
 472	if (fair_policy(policy)) {
 473		if (attr->sched_nice < task_nice(p) &&
 474		    !is_nice_reduction(p, attr->sched_nice))
 475			goto req_priv;
 476	}
 477
 478	if (rt_policy(policy)) {
 479		unsigned long rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO);
 480
 481		/* Can't set/change the rt policy: */
 482		if (policy != p->policy && !rlim_rtprio)
 483			goto req_priv;
 484
 485		/* Can't increase priority: */
 486		if (attr->sched_priority > p->rt_priority &&
 487		    attr->sched_priority > rlim_rtprio)
 488			goto req_priv;
 489	}
 490
 491	/*
 492	 * Can't set/change SCHED_DEADLINE policy at all for now
 493	 * (safest behavior); in the future we would like to allow
 494	 * unprivileged DL tasks to increase their relative deadline
 495	 * or reduce their runtime (both ways reducing utilization)
 496	 */
 497	if (dl_policy(policy))
 498		goto req_priv;
 499
 500	/*
 501	 * Treat SCHED_IDLE as nice 20. Only allow a switch to
 502	 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
 503	 */
 504	if (task_has_idle_policy(p) && !idle_policy(policy)) {
 505		if (!is_nice_reduction(p, task_nice(p)))
 506			goto req_priv;
 507	}
 508
 509	/* Can't change other user's priorities: */
 510	if (!check_same_owner(p))
 511		goto req_priv;
 512
 513	/* Normal users shall not reset the sched_reset_on_fork flag: */
 514	if (p->sched_reset_on_fork && !reset_on_fork)
 515		goto req_priv;
 516
 517	return 0;
 518
 519req_priv:
 520	if (!capable(CAP_SYS_NICE))
 521		return -EPERM;
 522
 523	return 0;
 524}
 525
 526int __sched_setscheduler(struct task_struct *p,
 527			 const struct sched_attr *attr,
 528			 bool user, bool pi)
 529{
 530	int oldpolicy = -1, policy = attr->sched_policy;
 531	int retval, oldprio, newprio, queued, running;
 532	const struct sched_class *prev_class, *next_class;
 533	struct balance_callback *head;
 534	struct rq_flags rf;
 535	int reset_on_fork;
 536	int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
 537	struct rq *rq;
 538	bool cpuset_locked = false;
 539
 540	/* The pi code expects interrupts enabled */
 541	BUG_ON(pi && in_interrupt());
 542recheck:
 543	/* Double check policy once rq lock held: */
 544	if (policy < 0) {
 545		reset_on_fork = p->sched_reset_on_fork;
 546		policy = oldpolicy = p->policy;
 547	} else {
 548		reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK);
 549
 550		if (!valid_policy(policy))
 551			return -EINVAL;
 552	}
 553
 554	if (attr->sched_flags & ~(SCHED_FLAG_ALL | SCHED_FLAG_SUGOV))
 555		return -EINVAL;
 556
 557	/*
 558	 * Valid priorities for SCHED_FIFO and SCHED_RR are
 559	 * 1..MAX_RT_PRIO-1, valid priority for SCHED_NORMAL,
 560	 * SCHED_BATCH and SCHED_IDLE is 0.
 561	 */
 562	if (attr->sched_priority > MAX_RT_PRIO-1)
 563		return -EINVAL;
 564	if ((dl_policy(policy) && !__checkparam_dl(attr)) ||
 565	    (rt_policy(policy) != (attr->sched_priority != 0)))
 566		return -EINVAL;
 567
 568	if (user) {
 569		retval = user_check_sched_setscheduler(p, attr, policy, reset_on_fork);
 570		if (retval)
 571			return retval;
 572
 573		if (attr->sched_flags & SCHED_FLAG_SUGOV)
 574			return -EINVAL;
 575
 576		retval = security_task_setscheduler(p);
 577		if (retval)
 578			return retval;
 579	}
 580
 581	/* Update task specific "requested" clamps */
 582	if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) {
 583		retval = uclamp_validate(p, attr);
 584		if (retval)
 585			return retval;
 586	}
 587
 588	/*
 589	 * SCHED_DEADLINE bandwidth accounting relies on stable cpusets
 590	 * information.
 591	 */
 592	if (dl_policy(policy) || dl_policy(p->policy)) {
 593		cpuset_locked = true;
 594		cpuset_lock();
 595	}
 596
 597	/*
 598	 * Make sure no PI-waiters arrive (or leave) while we are
 599	 * changing the priority of the task:
 600	 *
 601	 * To be able to change p->policy safely, the appropriate
 602	 * runqueue lock must be held.
 603	 */
 604	rq = task_rq_lock(p, &rf);
 605	update_rq_clock(rq);
 606
 607	/*
 608	 * Changing the policy of the stop threads its a very bad idea:
 609	 */
 610	if (p == rq->stop) {
 611		retval = -EINVAL;
 612		goto unlock;
 613	}
 614
 615	retval = scx_check_setscheduler(p, policy);
 616	if (retval)
 617		goto unlock;
 618
 619	/*
 620	 * If not changing anything there's no need to proceed further,
 621	 * but store a possible modification of reset_on_fork.
 622	 */
 623	if (unlikely(policy == p->policy)) {
 624		if (fair_policy(policy) &&
 625		    (attr->sched_nice != task_nice(p) ||
 626		     (attr->sched_runtime != p->se.slice)))
 627			goto change;
 628		if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
 629			goto change;
 630		if (dl_policy(policy) && dl_param_changed(p, attr))
 631			goto change;
 632		if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)
 633			goto change;
 634
 635		p->sched_reset_on_fork = reset_on_fork;
 636		retval = 0;
 637		goto unlock;
 638	}
 639change:
 640
 641	if (user) {
 642#ifdef CONFIG_RT_GROUP_SCHED
 643		/*
 644		 * Do not allow real-time tasks into groups that have no runtime
 645		 * assigned.
 646		 */
 647		if (rt_bandwidth_enabled() && rt_policy(policy) &&
 648				task_group(p)->rt_bandwidth.rt_runtime == 0 &&
 649				!task_group_is_autogroup(task_group(p))) {
 650			retval = -EPERM;
 651			goto unlock;
 652		}
 653#endif
 654#ifdef CONFIG_SMP
 655		if (dl_bandwidth_enabled() && dl_policy(policy) &&
 656				!(attr->sched_flags & SCHED_FLAG_SUGOV)) {
 657			cpumask_t *span = rq->rd->span;
 658
 659			/*
 660			 * Don't allow tasks with an affinity mask smaller than
 661			 * the entire root_domain to become SCHED_DEADLINE. We
 662			 * will also fail if there's no bandwidth available.
 663			 */
 664			if (!cpumask_subset(span, p->cpus_ptr) ||
 665			    rq->rd->dl_bw.bw == 0) {
 666				retval = -EPERM;
 667				goto unlock;
 668			}
 669		}
 670#endif
 671	}
 672
 673	/* Re-check policy now with rq lock held: */
 674	if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
 675		policy = oldpolicy = -1;
 676		task_rq_unlock(rq, p, &rf);
 677		if (cpuset_locked)
 678			cpuset_unlock();
 679		goto recheck;
 680	}
 681
 682	/*
 683	 * If setscheduling to SCHED_DEADLINE (or changing the parameters
 684	 * of a SCHED_DEADLINE task) we need to check if enough bandwidth
 685	 * is available.
 686	 */
 687	if ((dl_policy(policy) || dl_task(p)) && sched_dl_overflow(p, policy, attr)) {
 688		retval = -EBUSY;
 689		goto unlock;
 690	}
 691
 692	p->sched_reset_on_fork = reset_on_fork;
 693	oldprio = p->prio;
 694
 695	newprio = __normal_prio(policy, attr->sched_priority, attr->sched_nice);
 696	if (pi) {
 697		/*
 698		 * Take priority boosted tasks into account. If the new
 699		 * effective priority is unchanged, we just store the new
 700		 * normal parameters and do not touch the scheduler class and
 701		 * the runqueue. This will be done when the task deboost
 702		 * itself.
 703		 */
 704		newprio = rt_effective_prio(p, newprio);
 705		if (newprio == oldprio)
 706			queue_flags &= ~DEQUEUE_MOVE;
 707	}
 708
 709	prev_class = p->sched_class;
 710	next_class = __setscheduler_class(policy, newprio);
 711
 712	if (prev_class != next_class && p->se.sched_delayed)
 713		dequeue_task(rq, p, DEQUEUE_SLEEP | DEQUEUE_DELAYED | DEQUEUE_NOCLOCK);
 714
 715	queued = task_on_rq_queued(p);
 716	running = task_current_donor(rq, p);
 717	if (queued)
 718		dequeue_task(rq, p, queue_flags);
 719	if (running)
 720		put_prev_task(rq, p);
 721
 722	if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) {
 723		__setscheduler_params(p, attr);
 724		p->sched_class = next_class;
 725		p->prio = newprio;
 726	}
 727	__setscheduler_uclamp(p, attr);
 728	check_class_changing(rq, p, prev_class);
 729
 730	if (queued) {
 731		/*
 732		 * We enqueue to tail when the priority of a task is
 733		 * increased (user space view).
 734		 */
 735		if (oldprio < p->prio)
 736			queue_flags |= ENQUEUE_HEAD;
 737
 738		enqueue_task(rq, p, queue_flags);
 739	}
 740	if (running)
 741		set_next_task(rq, p);
 742
 743	check_class_changed(rq, p, prev_class, oldprio);
 744
 745	/* Avoid rq from going away on us: */
 746	preempt_disable();
 747	head = splice_balance_callbacks(rq);
 748	task_rq_unlock(rq, p, &rf);
 749
 750	if (pi) {
 751		if (cpuset_locked)
 752			cpuset_unlock();
 753		rt_mutex_adjust_pi(p);
 754	}
 755
 756	/* Run balance callbacks after we've adjusted the PI chain: */
 757	balance_callbacks(rq, head);
 758	preempt_enable();
 759
 760	return 0;
 761
 762unlock:
 763	task_rq_unlock(rq, p, &rf);
 764	if (cpuset_locked)
 765		cpuset_unlock();
 766	return retval;
 767}
 768
 769static int _sched_setscheduler(struct task_struct *p, int policy,
 770			       const struct sched_param *param, bool check)
 771{
 772	struct sched_attr attr = {
 773		.sched_policy   = policy,
 774		.sched_priority = param->sched_priority,
 775		.sched_nice	= PRIO_TO_NICE(p->static_prio),
 776	};
 777
 778	if (p->se.custom_slice)
 779		attr.sched_runtime = p->se.slice;
 780
 781	/* Fixup the legacy SCHED_RESET_ON_FORK hack. */
 782	if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) {
 783		attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
 784		policy &= ~SCHED_RESET_ON_FORK;
 785		attr.sched_policy = policy;
 786	}
 787
 788	return __sched_setscheduler(p, &attr, check, true);
 789}
 790/**
 791 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
 792 * @p: the task in question.
 793 * @policy: new policy.
 794 * @param: structure containing the new RT priority.
 795 *
 796 * Use sched_set_fifo(), read its comment.
 797 *
 798 * Return: 0 on success. An error code otherwise.
 799 *
 800 * NOTE that the task may be already dead.
 801 */
 802int sched_setscheduler(struct task_struct *p, int policy,
 803		       const struct sched_param *param)
 804{
 805	return _sched_setscheduler(p, policy, param, true);
 806}
 807
 808int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
 809{
 810	return __sched_setscheduler(p, attr, true, true);
 811}
 812
 813int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr)
 814{
 815	return __sched_setscheduler(p, attr, false, true);
 816}
 817EXPORT_SYMBOL_GPL(sched_setattr_nocheck);
 818
 819/**
 820 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernel-space.
 821 * @p: the task in question.
 822 * @policy: new policy.
 823 * @param: structure containing the new RT priority.
 824 *
 825 * Just like sched_setscheduler, only don't bother checking if the
 826 * current context has permission.  For example, this is needed in
 827 * stop_machine(): we create temporary high priority worker threads,
 828 * but our caller might not have that capability.
 829 *
 830 * Return: 0 on success. An error code otherwise.
 831 */
 832int sched_setscheduler_nocheck(struct task_struct *p, int policy,
 833			       const struct sched_param *param)
 834{
 835	return _sched_setscheduler(p, policy, param, false);
 836}
 837
 838/*
 839 * SCHED_FIFO is a broken scheduler model; that is, it is fundamentally
 840 * incapable of resource management, which is the one thing an OS really should
 841 * be doing.
 842 *
 843 * This is of course the reason it is limited to privileged users only.
 844 *
 845 * Worse still; it is fundamentally impossible to compose static priority
 846 * workloads. You cannot take two correctly working static prio workloads
 847 * and smash them together and still expect them to work.
 848 *
 849 * For this reason 'all' FIFO tasks the kernel creates are basically at:
 850 *
 851 *   MAX_RT_PRIO / 2
 852 *
 853 * The administrator _MUST_ configure the system, the kernel simply doesn't
 854 * know enough information to make a sensible choice.
 855 */
 856void sched_set_fifo(struct task_struct *p)
 857{
 858	struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 };
 859	WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
 860}
 861EXPORT_SYMBOL_GPL(sched_set_fifo);
 862
 863/*
 864 * For when you don't much care about FIFO, but want to be above SCHED_NORMAL.
 865 */
 866void sched_set_fifo_low(struct task_struct *p)
 867{
 868	struct sched_param sp = { .sched_priority = 1 };
 869	WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
 870}
 871EXPORT_SYMBOL_GPL(sched_set_fifo_low);
 872
 873void sched_set_normal(struct task_struct *p, int nice)
 874{
 875	struct sched_attr attr = {
 876		.sched_policy = SCHED_NORMAL,
 877		.sched_nice = nice,
 878	};
 879	WARN_ON_ONCE(sched_setattr_nocheck(p, &attr) != 0);
 880}
 881EXPORT_SYMBOL_GPL(sched_set_normal);
 882
 883static int
 884do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
 885{
 886	struct sched_param lparam;
 887
 888	if (!param || pid < 0)
 889		return -EINVAL;
 890	if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
 891		return -EFAULT;
 892
 893	CLASS(find_get_task, p)(pid);
 894	if (!p)
 895		return -ESRCH;
 896
 897	return sched_setscheduler(p, policy, &lparam);
 898}
 899
 900/*
 901 * Mimics kernel/events/core.c perf_copy_attr().
 902 */
 903static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr)
 904{
 905	u32 size;
 906	int ret;
 907
 908	/* Zero the full structure, so that a short copy will be nice: */
 909	memset(attr, 0, sizeof(*attr));
 910
 911	ret = get_user(size, &uattr->size);
 912	if (ret)
 913		return ret;
 914
 915	/* ABI compatibility quirk: */
 916	if (!size)
 917		size = SCHED_ATTR_SIZE_VER0;
 918	if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE)
 919		goto err_size;
 920
 921	ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size);
 922	if (ret) {
 923		if (ret == -E2BIG)
 924			goto err_size;
 925		return ret;
 926	}
 927
 928	if ((attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) &&
 929	    size < SCHED_ATTR_SIZE_VER1)
 930		return -EINVAL;
 931
 932	/*
 933	 * XXX: Do we want to be lenient like existing syscalls; or do we want
 934	 * to be strict and return an error on out-of-bounds values?
 935	 */
 936	attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE);
 937
 938	return 0;
 939
 940err_size:
 941	put_user(sizeof(*attr), &uattr->size);
 942	return -E2BIG;
 943}
 944
 945static void get_params(struct task_struct *p, struct sched_attr *attr)
 946{
 947	if (task_has_dl_policy(p)) {
 948		__getparam_dl(p, attr);
 949	} else if (task_has_rt_policy(p)) {
 950		attr->sched_priority = p->rt_priority;
 951	} else {
 952		attr->sched_nice = task_nice(p);
 953		attr->sched_runtime = p->se.slice;
 954	}
 955}
 956
 957/**
 958 * sys_sched_setscheduler - set/change the scheduler policy and RT priority
 959 * @pid: the pid in question.
 960 * @policy: new policy.
 961 * @param: structure containing the new RT priority.
 962 *
 963 * Return: 0 on success. An error code otherwise.
 964 */
 965SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param)
 966{
 967	if (policy < 0)
 968		return -EINVAL;
 969
 970	return do_sched_setscheduler(pid, policy, param);
 971}
 972
 973/**
 974 * sys_sched_setparam - set/change the RT priority of a thread
 975 * @pid: the pid in question.
 976 * @param: structure containing the new RT priority.
 977 *
 978 * Return: 0 on success. An error code otherwise.
 979 */
 980SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
 981{
 982	return do_sched_setscheduler(pid, SETPARAM_POLICY, param);
 983}
 984
 985/**
 986 * sys_sched_setattr - same as above, but with extended sched_attr
 987 * @pid: the pid in question.
 988 * @uattr: structure containing the extended parameters.
 989 * @flags: for future extension.
 990 */
 991SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
 992			       unsigned int, flags)
 993{
 994	struct sched_attr attr;
 995	int retval;
 996
 997	if (!uattr || pid < 0 || flags)
 998		return -EINVAL;
 999
1000	retval = sched_copy_attr(uattr, &attr);
1001	if (retval)
1002		return retval;
1003
1004	if ((int)attr.sched_policy < 0)
1005		return -EINVAL;
1006	if (attr.sched_flags & SCHED_FLAG_KEEP_POLICY)
1007		attr.sched_policy = SETPARAM_POLICY;
1008
1009	CLASS(find_get_task, p)(pid);
1010	if (!p)
1011		return -ESRCH;
1012
1013	if (attr.sched_flags & SCHED_FLAG_KEEP_PARAMS)
1014		get_params(p, &attr);
1015
1016	return sched_setattr(p, &attr);
1017}
1018
1019/**
1020 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
1021 * @pid: the pid in question.
1022 *
1023 * Return: On success, the policy of the thread. Otherwise, a negative error
1024 * code.
1025 */
1026SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
1027{
1028	struct task_struct *p;
1029	int retval;
1030
1031	if (pid < 0)
1032		return -EINVAL;
1033
1034	guard(rcu)();
1035	p = find_process_by_pid(pid);
1036	if (!p)
1037		return -ESRCH;
1038
1039	retval = security_task_getscheduler(p);
1040	if (!retval) {
1041		retval = p->policy;
1042		if (p->sched_reset_on_fork)
1043			retval |= SCHED_RESET_ON_FORK;
1044	}
1045	return retval;
1046}
1047
1048/**
1049 * sys_sched_getparam - get the RT priority of a thread
1050 * @pid: the pid in question.
1051 * @param: structure containing the RT priority.
1052 *
1053 * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
1054 * code.
1055 */
1056SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
1057{
1058	struct sched_param lp = { .sched_priority = 0 };
1059	struct task_struct *p;
1060	int retval;
1061
1062	if (!param || pid < 0)
1063		return -EINVAL;
1064
1065	scoped_guard (rcu) {
1066		p = find_process_by_pid(pid);
1067		if (!p)
1068			return -ESRCH;
1069
1070		retval = security_task_getscheduler(p);
1071		if (retval)
1072			return retval;
1073
1074		if (task_has_rt_policy(p))
1075			lp.sched_priority = p->rt_priority;
1076	}
1077
1078	/*
1079	 * This one might sleep, we cannot do it with a spinlock held ...
1080	 */
1081	return copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
1082}
1083
1084/**
1085 * sys_sched_getattr - similar to sched_getparam, but with sched_attr
1086 * @pid: the pid in question.
1087 * @uattr: structure containing the extended parameters.
1088 * @usize: sizeof(attr) for fwd/bwd comp.
1089 * @flags: for future extension.
1090 */
1091SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
1092		unsigned int, usize, unsigned int, flags)
1093{
1094	struct sched_attr kattr = { };
1095	struct task_struct *p;
1096	int retval;
1097
1098	if (!uattr || pid < 0 || usize > PAGE_SIZE ||
1099	    usize < SCHED_ATTR_SIZE_VER0 || flags)
1100		return -EINVAL;
1101
1102	scoped_guard (rcu) {
1103		p = find_process_by_pid(pid);
1104		if (!p)
1105			return -ESRCH;
1106
1107		retval = security_task_getscheduler(p);
1108		if (retval)
1109			return retval;
1110
1111		kattr.sched_policy = p->policy;
1112		if (p->sched_reset_on_fork)
1113			kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
1114		get_params(p, &kattr);
1115		kattr.sched_flags &= SCHED_FLAG_ALL;
1116
1117#ifdef CONFIG_UCLAMP_TASK
1118		/*
1119		 * This could race with another potential updater, but this is fine
1120		 * because it'll correctly read the old or the new value. We don't need
1121		 * to guarantee who wins the race as long as it doesn't return garbage.
1122		 */
1123		kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value;
1124		kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value;
1125#endif
1126	}
1127
1128	kattr.size = min(usize, sizeof(kattr));
1129	return copy_struct_to_user(uattr, usize, &kattr, sizeof(kattr), NULL);
1130}
1131
1132#ifdef CONFIG_SMP
1133int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
1134{
1135	/*
1136	 * If the task isn't a deadline task or admission control is
1137	 * disabled then we don't care about affinity changes.
1138	 */
1139	if (!task_has_dl_policy(p) || !dl_bandwidth_enabled())
1140		return 0;
1141
1142	/*
1143	 * Since bandwidth control happens on root_domain basis,
1144	 * if admission test is enabled, we only admit -deadline
1145	 * tasks allowed to run on all the CPUs in the task's
1146	 * root_domain.
1147	 */
1148	guard(rcu)();
1149	if (!cpumask_subset(task_rq(p)->rd->span, mask))
1150		return -EBUSY;
1151
1152	return 0;
1153}
1154#endif /* CONFIG_SMP */
1155
1156int __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx)
1157{
1158	int retval;
1159	cpumask_var_t cpus_allowed, new_mask;
1160
1161	if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL))
1162		return -ENOMEM;
1163
1164	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
1165		retval = -ENOMEM;
1166		goto out_free_cpus_allowed;
1167	}
1168
1169	cpuset_cpus_allowed(p, cpus_allowed);
1170	cpumask_and(new_mask, ctx->new_mask, cpus_allowed);
1171
1172	ctx->new_mask = new_mask;
1173	ctx->flags |= SCA_CHECK;
1174
1175	retval = dl_task_check_affinity(p, new_mask);
1176	if (retval)
1177		goto out_free_new_mask;
1178
1179	retval = __set_cpus_allowed_ptr(p, ctx);
1180	if (retval)
1181		goto out_free_new_mask;
1182
1183	cpuset_cpus_allowed(p, cpus_allowed);
1184	if (!cpumask_subset(new_mask, cpus_allowed)) {
1185		/*
1186		 * We must have raced with a concurrent cpuset update.
1187		 * Just reset the cpumask to the cpuset's cpus_allowed.
1188		 */
1189		cpumask_copy(new_mask, cpus_allowed);
1190
1191		/*
1192		 * If SCA_USER is set, a 2nd call to __set_cpus_allowed_ptr()
1193		 * will restore the previous user_cpus_ptr value.
1194		 *
1195		 * In the unlikely event a previous user_cpus_ptr exists,
1196		 * we need to further restrict the mask to what is allowed
1197		 * by that old user_cpus_ptr.
1198		 */
1199		if (unlikely((ctx->flags & SCA_USER) && ctx->user_mask)) {
1200			bool empty = !cpumask_and(new_mask, new_mask,
1201						  ctx->user_mask);
1202
1203			if (empty)
1204				cpumask_copy(new_mask, cpus_allowed);
1205		}
1206		__set_cpus_allowed_ptr(p, ctx);
1207		retval = -EINVAL;
1208	}
1209
1210out_free_new_mask:
1211	free_cpumask_var(new_mask);
1212out_free_cpus_allowed:
1213	free_cpumask_var(cpus_allowed);
1214	return retval;
1215}
1216
1217long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
1218{
1219	struct affinity_context ac;
1220	struct cpumask *user_mask;
1221	int retval;
1222
1223	CLASS(find_get_task, p)(pid);
1224	if (!p)
1225		return -ESRCH;
1226
1227	if (p->flags & PF_NO_SETAFFINITY)
1228		return -EINVAL;
1229
1230	if (!check_same_owner(p)) {
1231		guard(rcu)();
1232		if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE))
1233			return -EPERM;
1234	}
1235
1236	retval = security_task_setscheduler(p);
1237	if (retval)
1238		return retval;
1239
1240	/*
1241	 * With non-SMP configs, user_cpus_ptr/user_mask isn't used and
1242	 * alloc_user_cpus_ptr() returns NULL.
1243	 */
1244	user_mask = alloc_user_cpus_ptr(NUMA_NO_NODE);
1245	if (user_mask) {
1246		cpumask_copy(user_mask, in_mask);
1247	} else if (IS_ENABLED(CONFIG_SMP)) {
1248		return -ENOMEM;
1249	}
1250
1251	ac = (struct affinity_context){
1252		.new_mask  = in_mask,
1253		.user_mask = user_mask,
1254		.flags     = SCA_USER,
1255	};
1256
1257	retval = __sched_setaffinity(p, &ac);
1258	kfree(ac.user_mask);
1259
1260	return retval;
1261}
1262
1263static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
1264			     struct cpumask *new_mask)
1265{
1266	if (len < cpumask_size())
1267		cpumask_clear(new_mask);
1268	else if (len > cpumask_size())
1269		len = cpumask_size();
1270
1271	return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
1272}
1273
1274/**
1275 * sys_sched_setaffinity - set the CPU affinity of a process
1276 * @pid: pid of the process
1277 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
1278 * @user_mask_ptr: user-space pointer to the new CPU mask
1279 *
1280 * Return: 0 on success. An error code otherwise.
1281 */
1282SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
1283		unsigned long __user *, user_mask_ptr)
1284{
1285	cpumask_var_t new_mask;
1286	int retval;
1287
1288	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
1289		return -ENOMEM;
1290
1291	retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
1292	if (retval == 0)
1293		retval = sched_setaffinity(pid, new_mask);
1294	free_cpumask_var(new_mask);
1295	return retval;
1296}
1297
1298long sched_getaffinity(pid_t pid, struct cpumask *mask)
1299{
1300	struct task_struct *p;
1301	int retval;
1302
1303	guard(rcu)();
1304	p = find_process_by_pid(pid);
1305	if (!p)
1306		return -ESRCH;
1307
1308	retval = security_task_getscheduler(p);
1309	if (retval)
1310		return retval;
1311
1312	guard(raw_spinlock_irqsave)(&p->pi_lock);
1313	cpumask_and(mask, &p->cpus_mask, cpu_active_mask);
1314
1315	return 0;
1316}
1317
1318/**
1319 * sys_sched_getaffinity - get the CPU affinity of a process
1320 * @pid: pid of the process
1321 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
1322 * @user_mask_ptr: user-space pointer to hold the current CPU mask
1323 *
1324 * Return: size of CPU mask copied to user_mask_ptr on success. An
1325 * error code otherwise.
1326 */
1327SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
1328		unsigned long __user *, user_mask_ptr)
1329{
1330	int ret;
1331	cpumask_var_t mask;
1332
1333	if ((len * BITS_PER_BYTE) < nr_cpu_ids)
1334		return -EINVAL;
1335	if (len & (sizeof(unsigned long)-1))
1336		return -EINVAL;
1337
1338	if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
1339		return -ENOMEM;
1340
1341	ret = sched_getaffinity(pid, mask);
1342	if (ret == 0) {
1343		unsigned int retlen = min(len, cpumask_size());
1344
1345		if (copy_to_user(user_mask_ptr, cpumask_bits(mask), retlen))
1346			ret = -EFAULT;
1347		else
1348			ret = retlen;
1349	}
1350	free_cpumask_var(mask);
1351
1352	return ret;
1353}
1354
1355static void do_sched_yield(void)
1356{
1357	struct rq_flags rf;
1358	struct rq *rq;
1359
1360	rq = this_rq_lock_irq(&rf);
1361
1362	schedstat_inc(rq->yld_count);
1363	current->sched_class->yield_task(rq);
1364
1365	preempt_disable();
1366	rq_unlock_irq(rq, &rf);
1367	sched_preempt_enable_no_resched();
1368
1369	schedule();
1370}
1371
1372/**
1373 * sys_sched_yield - yield the current processor to other threads.
1374 *
1375 * This function yields the current CPU to other tasks. If there are no
1376 * other threads running on this CPU then this function will return.
1377 *
1378 * Return: 0.
1379 */
1380SYSCALL_DEFINE0(sched_yield)
1381{
1382	do_sched_yield();
1383	return 0;
1384}
1385
1386/**
1387 * yield - yield the current processor to other threads.
1388 *
1389 * Do not ever use this function, there's a 99% chance you're doing it wrong.
1390 *
1391 * The scheduler is at all times free to pick the calling task as the most
1392 * eligible task to run, if removing the yield() call from your code breaks
1393 * it, it's already broken.
1394 *
1395 * Typical broken usage is:
1396 *
1397 * while (!event)
1398 *	yield();
1399 *
1400 * where one assumes that yield() will let 'the other' process run that will
1401 * make event true. If the current task is a SCHED_FIFO task that will never
1402 * happen. Never use yield() as a progress guarantee!!
1403 *
1404 * If you want to use yield() to wait for something, use wait_event().
1405 * If you want to use yield() to be 'nice' for others, use cond_resched().
1406 * If you still want to use yield(), do not!
1407 */
1408void __sched yield(void)
1409{
1410	set_current_state(TASK_RUNNING);
1411	do_sched_yield();
1412}
1413EXPORT_SYMBOL(yield);
1414
1415/**
1416 * yield_to - yield the current processor to another thread in
1417 * your thread group, or accelerate that thread toward the
1418 * processor it's on.
1419 * @p: target task
1420 * @preempt: whether task preemption is allowed or not
1421 *
1422 * It's the caller's job to ensure that the target task struct
1423 * can't go away on us before we can do any checks.
1424 *
1425 * Return:
1426 *	true (>0) if we indeed boosted the target task.
1427 *	false (0) if we failed to boost the target.
1428 *	-ESRCH if there's no task to yield to.
1429 */
1430int __sched yield_to(struct task_struct *p, bool preempt)
1431{
1432	struct task_struct *curr = current;
1433	struct rq *rq, *p_rq;
1434	int yielded = 0;
1435
1436	scoped_guard (raw_spinlock_irqsave, &p->pi_lock) {
1437		rq = this_rq();
1438
1439again:
1440		p_rq = task_rq(p);
1441		/*
1442		 * If we're the only runnable task on the rq and target rq also
1443		 * has only one task, there's absolutely no point in yielding.
1444		 */
1445		if (rq->nr_running == 1 && p_rq->nr_running == 1)
1446			return -ESRCH;
1447
1448		guard(double_rq_lock)(rq, p_rq);
1449		if (task_rq(p) != p_rq)
1450			goto again;
1451
1452		if (!curr->sched_class->yield_to_task)
1453			return 0;
1454
1455		if (curr->sched_class != p->sched_class)
1456			return 0;
1457
1458		if (task_on_cpu(p_rq, p) || !task_is_running(p))
1459			return 0;
1460
1461		yielded = curr->sched_class->yield_to_task(rq, p);
1462		if (yielded) {
1463			schedstat_inc(rq->yld_count);
1464			/*
1465			 * Make p's CPU reschedule; pick_next_entity
1466			 * takes care of fairness.
1467			 */
1468			if (preempt && rq != p_rq)
1469				resched_curr(p_rq);
1470		}
1471	}
1472
1473	if (yielded)
1474		schedule();
1475
1476	return yielded;
1477}
1478EXPORT_SYMBOL_GPL(yield_to);
1479
1480/**
1481 * sys_sched_get_priority_max - return maximum RT priority.
1482 * @policy: scheduling class.
1483 *
1484 * Return: On success, this syscall returns the maximum
1485 * rt_priority that can be used by a given scheduling class.
1486 * On failure, a negative error code is returned.
1487 */
1488SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
1489{
1490	int ret = -EINVAL;
1491
1492	switch (policy) {
1493	case SCHED_FIFO:
1494	case SCHED_RR:
1495		ret = MAX_RT_PRIO-1;
1496		break;
1497	case SCHED_DEADLINE:
1498	case SCHED_NORMAL:
1499	case SCHED_BATCH:
1500	case SCHED_IDLE:
1501	case SCHED_EXT:
1502		ret = 0;
1503		break;
1504	}
1505	return ret;
1506}
1507
1508/**
1509 * sys_sched_get_priority_min - return minimum RT priority.
1510 * @policy: scheduling class.
1511 *
1512 * Return: On success, this syscall returns the minimum
1513 * rt_priority that can be used by a given scheduling class.
1514 * On failure, a negative error code is returned.
1515 */
1516SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
1517{
1518	int ret = -EINVAL;
1519
1520	switch (policy) {
1521	case SCHED_FIFO:
1522	case SCHED_RR:
1523		ret = 1;
1524		break;
1525	case SCHED_DEADLINE:
1526	case SCHED_NORMAL:
1527	case SCHED_BATCH:
1528	case SCHED_IDLE:
1529	case SCHED_EXT:
1530		ret = 0;
1531	}
1532	return ret;
1533}
1534
1535static int sched_rr_get_interval(pid_t pid, struct timespec64 *t)
1536{
1537	unsigned int time_slice = 0;
1538	int retval;
1539
1540	if (pid < 0)
1541		return -EINVAL;
1542
1543	scoped_guard (rcu) {
1544		struct task_struct *p = find_process_by_pid(pid);
1545		if (!p)
1546			return -ESRCH;
1547
1548		retval = security_task_getscheduler(p);
1549		if (retval)
1550			return retval;
1551
1552		scoped_guard (task_rq_lock, p) {
1553			struct rq *rq = scope.rq;
1554			if (p->sched_class->get_rr_interval)
1555				time_slice = p->sched_class->get_rr_interval(rq, p);
1556		}
1557	}
1558
1559	jiffies_to_timespec64(time_slice, t);
1560	return 0;
1561}
1562
1563/**
1564 * sys_sched_rr_get_interval - return the default time-slice of a process.
1565 * @pid: pid of the process.
1566 * @interval: userspace pointer to the time-slice value.
1567 *
1568 * this syscall writes the default time-slice value of a given process
1569 * into the user-space timespec buffer. A value of '0' means infinity.
1570 *
1571 * Return: On success, 0 and the time-slice is in @interval. Otherwise,
1572 * an error code.
1573 */
1574SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
1575		struct __kernel_timespec __user *, interval)
1576{
1577	struct timespec64 t;
1578	int retval = sched_rr_get_interval(pid, &t);
1579
1580	if (retval == 0)
1581		retval = put_timespec64(&t, interval);
1582
1583	return retval;
1584}
1585
1586#ifdef CONFIG_COMPAT_32BIT_TIME
1587SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid,
1588		struct old_timespec32 __user *, interval)
1589{
1590	struct timespec64 t;
1591	int retval = sched_rr_get_interval(pid, &t);
1592
1593	if (retval == 0)
1594		retval = put_old_timespec32(&t, interval);
1595	return retval;
1596}
1597#endif