Linux Audio

Check our new training course

Loading...
v5.4
   1/*
   2 * Pressure stall information for CPU, memory and IO
   3 *
   4 * Copyright (c) 2018 Facebook, Inc.
   5 * Author: Johannes Weiner <hannes@cmpxchg.org>
   6 *
   7 * Polling support by Suren Baghdasaryan <surenb@google.com>
   8 * Copyright (c) 2018 Google, Inc.
   9 *
  10 * When CPU, memory and IO are contended, tasks experience delays that
  11 * reduce throughput and introduce latencies into the workload. Memory
  12 * and IO contention, in addition, can cause a full loss of forward
  13 * progress in which the CPU goes idle.
  14 *
  15 * This code aggregates individual task delays into resource pressure
  16 * metrics that indicate problems with both workload health and
  17 * resource utilization.
  18 *
  19 *			Model
  20 *
  21 * The time in which a task can execute on a CPU is our baseline for
  22 * productivity. Pressure expresses the amount of time in which this
  23 * potential cannot be realized due to resource contention.
  24 *
  25 * This concept of productivity has two components: the workload and
  26 * the CPU. To measure the impact of pressure on both, we define two
  27 * contention states for a resource: SOME and FULL.
  28 *
  29 * In the SOME state of a given resource, one or more tasks are
  30 * delayed on that resource. This affects the workload's ability to
  31 * perform work, but the CPU may still be executing other tasks.
  32 *
  33 * In the FULL state of a given resource, all non-idle tasks are
  34 * delayed on that resource such that nobody is advancing and the CPU
  35 * goes idle. This leaves both workload and CPU unproductive.
  36 *
  37 * (Naturally, the FULL state doesn't exist for the CPU resource.)
  38 *
  39 *	SOME = nr_delayed_tasks != 0
  40 *	FULL = nr_delayed_tasks != 0 && nr_running_tasks == 0
  41 *
  42 * The percentage of wallclock time spent in those compound stall
  43 * states gives pressure numbers between 0 and 100 for each resource,
  44 * where the SOME percentage indicates workload slowdowns and the FULL
  45 * percentage indicates reduced CPU utilization:
  46 *
  47 *	%SOME = time(SOME) / period
  48 *	%FULL = time(FULL) / period
  49 *
  50 *			Multiple CPUs
  51 *
  52 * The more tasks and available CPUs there are, the more work can be
  53 * performed concurrently. This means that the potential that can go
  54 * unrealized due to resource contention *also* scales with non-idle
  55 * tasks and CPUs.
  56 *
  57 * Consider a scenario where 257 number crunching tasks are trying to
  58 * run concurrently on 256 CPUs. If we simply aggregated the task
  59 * states, we would have to conclude a CPU SOME pressure number of
  60 * 100%, since *somebody* is waiting on a runqueue at all
  61 * times. However, that is clearly not the amount of contention the
  62 * workload is experiencing: only one out of 256 possible exceution
  63 * threads will be contended at any given time, or about 0.4%.
  64 *
  65 * Conversely, consider a scenario of 4 tasks and 4 CPUs where at any
  66 * given time *one* of the tasks is delayed due to a lack of memory.
  67 * Again, looking purely at the task state would yield a memory FULL
  68 * pressure number of 0%, since *somebody* is always making forward
  69 * progress. But again this wouldn't capture the amount of execution
  70 * potential lost, which is 1 out of 4 CPUs, or 25%.
  71 *
  72 * To calculate wasted potential (pressure) with multiple processors,
  73 * we have to base our calculation on the number of non-idle tasks in
  74 * conjunction with the number of available CPUs, which is the number
  75 * of potential execution threads. SOME becomes then the proportion of
  76 * delayed tasks to possibe threads, and FULL is the share of possible
  77 * threads that are unproductive due to delays:
  78 *
  79 *	threads = min(nr_nonidle_tasks, nr_cpus)
  80 *	   SOME = min(nr_delayed_tasks / threads, 1)
  81 *	   FULL = (threads - min(nr_running_tasks, threads)) / threads
  82 *
  83 * For the 257 number crunchers on 256 CPUs, this yields:
  84 *
  85 *	threads = min(257, 256)
  86 *	   SOME = min(1 / 256, 1)             = 0.4%
  87 *	   FULL = (256 - min(257, 256)) / 256 = 0%
  88 *
  89 * For the 1 out of 4 memory-delayed tasks, this yields:
  90 *
  91 *	threads = min(4, 4)
  92 *	   SOME = min(1 / 4, 1)               = 25%
  93 *	   FULL = (4 - min(3, 4)) / 4         = 25%
  94 *
  95 * [ Substitute nr_cpus with 1, and you can see that it's a natural
  96 *   extension of the single-CPU model. ]
  97 *
  98 *			Implementation
  99 *
 100 * To assess the precise time spent in each such state, we would have
 101 * to freeze the system on task changes and start/stop the state
 102 * clocks accordingly. Obviously that doesn't scale in practice.
 103 *
 104 * Because the scheduler aims to distribute the compute load evenly
 105 * among the available CPUs, we can track task state locally to each
 106 * CPU and, at much lower frequency, extrapolate the global state for
 107 * the cumulative stall times and the running averages.
 108 *
 109 * For each runqueue, we track:
 110 *
 111 *	   tSOME[cpu] = time(nr_delayed_tasks[cpu] != 0)
 112 *	   tFULL[cpu] = time(nr_delayed_tasks[cpu] && !nr_running_tasks[cpu])
 113 *	tNONIDLE[cpu] = time(nr_nonidle_tasks[cpu] != 0)
 114 *
 115 * and then periodically aggregate:
 116 *
 117 *	tNONIDLE = sum(tNONIDLE[i])
 118 *
 119 *	   tSOME = sum(tSOME[i] * tNONIDLE[i]) / tNONIDLE
 120 *	   tFULL = sum(tFULL[i] * tNONIDLE[i]) / tNONIDLE
 121 *
 122 *	   %SOME = tSOME / period
 123 *	   %FULL = tFULL / period
 124 *
 125 * This gives us an approximation of pressure that is practical
 126 * cost-wise, yet way more sensitive and accurate than periodic
 127 * sampling of the aggregate task states would be.
 128 */
 129
 130#include "../workqueue_internal.h"
 131#include <linux/sched/loadavg.h>
 132#include <linux/seq_file.h>
 133#include <linux/proc_fs.h>
 134#include <linux/seqlock.h>
 135#include <linux/uaccess.h>
 136#include <linux/cgroup.h>
 137#include <linux/module.h>
 138#include <linux/sched.h>
 139#include <linux/ctype.h>
 140#include <linux/file.h>
 141#include <linux/poll.h>
 142#include <linux/psi.h>
 143#include "sched.h"
 144
 145static int psi_bug __read_mostly;
 146
 147DEFINE_STATIC_KEY_FALSE(psi_disabled);
 148
 149#ifdef CONFIG_PSI_DEFAULT_DISABLED
 150static bool psi_enable;
 151#else
 152static bool psi_enable = true;
 153#endif
 154static int __init setup_psi(char *str)
 155{
 156	return kstrtobool(str, &psi_enable) == 0;
 157}
 158__setup("psi=", setup_psi);
 159
 160/* Running averages - we need to be higher-res than loadavg */
 161#define PSI_FREQ	(2*HZ+1)	/* 2 sec intervals */
 162#define EXP_10s		1677		/* 1/exp(2s/10s) as fixed-point */
 163#define EXP_60s		1981		/* 1/exp(2s/60s) */
 164#define EXP_300s	2034		/* 1/exp(2s/300s) */
 165
 166/* PSI trigger definitions */
 167#define WINDOW_MIN_US 500000	/* Min window size is 500ms */
 168#define WINDOW_MAX_US 10000000	/* Max window size is 10s */
 169#define UPDATES_PER_WINDOW 10	/* 10 updates per window */
 170
 171/* Sampling frequency in nanoseconds */
 172static u64 psi_period __read_mostly;
 173
 174/* System-level pressure and stall tracking */
 175static DEFINE_PER_CPU(struct psi_group_cpu, system_group_pcpu);
 176struct psi_group psi_system = {
 177	.pcpu = &system_group_pcpu,
 178};
 179
 180static void psi_avgs_work(struct work_struct *work);
 181
 182static void group_init(struct psi_group *group)
 183{
 184	int cpu;
 185
 186	for_each_possible_cpu(cpu)
 187		seqcount_init(&per_cpu_ptr(group->pcpu, cpu)->seq);
 188	group->avg_next_update = sched_clock() + psi_period;
 
 189	INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work);
 190	mutex_init(&group->avgs_lock);
 191	/* Init trigger-related members */
 192	atomic_set(&group->poll_scheduled, 0);
 193	mutex_init(&group->trigger_lock);
 194	INIT_LIST_HEAD(&group->triggers);
 195	memset(group->nr_triggers, 0, sizeof(group->nr_triggers));
 196	group->poll_states = 0;
 197	group->poll_min_period = U32_MAX;
 198	memset(group->polling_total, 0, sizeof(group->polling_total));
 199	group->polling_next_update = ULLONG_MAX;
 200	group->polling_until = 0;
 201	rcu_assign_pointer(group->poll_kworker, NULL);
 202}
 203
 204void __init psi_init(void)
 205{
 206	if (!psi_enable) {
 207		static_branch_enable(&psi_disabled);
 208		return;
 209	}
 210
 211	psi_period = jiffies_to_nsecs(PSI_FREQ);
 212	group_init(&psi_system);
 213}
 214
 215static bool test_state(unsigned int *tasks, enum psi_states state)
 216{
 217	switch (state) {
 218	case PSI_IO_SOME:
 219		return tasks[NR_IOWAIT];
 220	case PSI_IO_FULL:
 221		return tasks[NR_IOWAIT] && !tasks[NR_RUNNING];
 222	case PSI_MEM_SOME:
 223		return tasks[NR_MEMSTALL];
 224	case PSI_MEM_FULL:
 225		return tasks[NR_MEMSTALL] && !tasks[NR_RUNNING];
 226	case PSI_CPU_SOME:
 227		return tasks[NR_RUNNING] > 1;
 228	case PSI_NONIDLE:
 229		return tasks[NR_IOWAIT] || tasks[NR_MEMSTALL] ||
 230			tasks[NR_RUNNING];
 231	default:
 232		return false;
 233	}
 234}
 235
 236static void get_recent_times(struct psi_group *group, int cpu,
 237			     enum psi_aggregators aggregator, u32 *times,
 238			     u32 *pchanged_states)
 239{
 240	struct psi_group_cpu *groupc = per_cpu_ptr(group->pcpu, cpu);
 241	u64 now, state_start;
 242	enum psi_states s;
 243	unsigned int seq;
 244	u32 state_mask;
 245
 246	*pchanged_states = 0;
 247
 248	/* Snapshot a coherent view of the CPU state */
 249	do {
 250		seq = read_seqcount_begin(&groupc->seq);
 251		now = cpu_clock(cpu);
 252		memcpy(times, groupc->times, sizeof(groupc->times));
 253		state_mask = groupc->state_mask;
 254		state_start = groupc->state_start;
 255	} while (read_seqcount_retry(&groupc->seq, seq));
 256
 257	/* Calculate state time deltas against the previous snapshot */
 258	for (s = 0; s < NR_PSI_STATES; s++) {
 259		u32 delta;
 260		/*
 261		 * In addition to already concluded states, we also
 262		 * incorporate currently active states on the CPU,
 263		 * since states may last for many sampling periods.
 264		 *
 265		 * This way we keep our delta sampling buckets small
 266		 * (u32) and our reported pressure close to what's
 267		 * actually happening.
 268		 */
 269		if (state_mask & (1 << s))
 270			times[s] += now - state_start;
 271
 272		delta = times[s] - groupc->times_prev[aggregator][s];
 273		groupc->times_prev[aggregator][s] = times[s];
 274
 275		times[s] = delta;
 276		if (delta)
 277			*pchanged_states |= (1 << s);
 278	}
 279}
 280
 281static void calc_avgs(unsigned long avg[3], int missed_periods,
 282		      u64 time, u64 period)
 283{
 284	unsigned long pct;
 285
 286	/* Fill in zeroes for periods of no activity */
 287	if (missed_periods) {
 288		avg[0] = calc_load_n(avg[0], EXP_10s, 0, missed_periods);
 289		avg[1] = calc_load_n(avg[1], EXP_60s, 0, missed_periods);
 290		avg[2] = calc_load_n(avg[2], EXP_300s, 0, missed_periods);
 291	}
 292
 293	/* Sample the most recent active period */
 294	pct = div_u64(time * 100, period);
 295	pct *= FIXED_1;
 296	avg[0] = calc_load(avg[0], EXP_10s, pct);
 297	avg[1] = calc_load(avg[1], EXP_60s, pct);
 298	avg[2] = calc_load(avg[2], EXP_300s, pct);
 299}
 300
 301static void collect_percpu_times(struct psi_group *group,
 302				 enum psi_aggregators aggregator,
 303				 u32 *pchanged_states)
 304{
 305	u64 deltas[NR_PSI_STATES - 1] = { 0, };
 306	unsigned long nonidle_total = 0;
 307	u32 changed_states = 0;
 308	int cpu;
 309	int s;
 310
 311	/*
 312	 * Collect the per-cpu time buckets and average them into a
 313	 * single time sample that is normalized to wallclock time.
 314	 *
 315	 * For averaging, each CPU is weighted by its non-idle time in
 316	 * the sampling period. This eliminates artifacts from uneven
 317	 * loading, or even entirely idle CPUs.
 318	 */
 319	for_each_possible_cpu(cpu) {
 320		u32 times[NR_PSI_STATES];
 321		u32 nonidle;
 322		u32 cpu_changed_states;
 323
 324		get_recent_times(group, cpu, aggregator, times,
 325				&cpu_changed_states);
 326		changed_states |= cpu_changed_states;
 327
 328		nonidle = nsecs_to_jiffies(times[PSI_NONIDLE]);
 329		nonidle_total += nonidle;
 330
 331		for (s = 0; s < PSI_NONIDLE; s++)
 332			deltas[s] += (u64)times[s] * nonidle;
 333	}
 334
 335	/*
 336	 * Integrate the sample into the running statistics that are
 337	 * reported to userspace: the cumulative stall times and the
 338	 * decaying averages.
 339	 *
 340	 * Pressure percentages are sampled at PSI_FREQ. We might be
 341	 * called more often when the user polls more frequently than
 342	 * that; we might be called less often when there is no task
 343	 * activity, thus no data, and clock ticks are sporadic. The
 344	 * below handles both.
 345	 */
 346
 347	/* total= */
 348	for (s = 0; s < NR_PSI_STATES - 1; s++)
 349		group->total[aggregator][s] +=
 350				div_u64(deltas[s], max(nonidle_total, 1UL));
 351
 352	if (pchanged_states)
 353		*pchanged_states = changed_states;
 354}
 355
 356static u64 update_averages(struct psi_group *group, u64 now)
 357{
 358	unsigned long missed_periods = 0;
 359	u64 expires, period;
 360	u64 avg_next_update;
 361	int s;
 362
 363	/* avgX= */
 364	expires = group->avg_next_update;
 365	if (now - expires >= psi_period)
 366		missed_periods = div_u64(now - expires, psi_period);
 367
 368	/*
 369	 * The periodic clock tick can get delayed for various
 370	 * reasons, especially on loaded systems. To avoid clock
 371	 * drift, we schedule the clock in fixed psi_period intervals.
 372	 * But the deltas we sample out of the per-cpu buckets above
 373	 * are based on the actual time elapsing between clock ticks.
 374	 */
 375	avg_next_update = expires + ((1 + missed_periods) * psi_period);
 376	period = now - (group->avg_last_update + (missed_periods * psi_period));
 377	group->avg_last_update = now;
 378
 379	for (s = 0; s < NR_PSI_STATES - 1; s++) {
 380		u32 sample;
 381
 382		sample = group->total[PSI_AVGS][s] - group->avg_total[s];
 383		/*
 384		 * Due to the lockless sampling of the time buckets,
 385		 * recorded time deltas can slip into the next period,
 386		 * which under full pressure can result in samples in
 387		 * excess of the period length.
 388		 *
 389		 * We don't want to report non-sensical pressures in
 390		 * excess of 100%, nor do we want to drop such events
 391		 * on the floor. Instead we punt any overage into the
 392		 * future until pressure subsides. By doing this we
 393		 * don't underreport the occurring pressure curve, we
 394		 * just report it delayed by one period length.
 395		 *
 396		 * The error isn't cumulative. As soon as another
 397		 * delta slips from a period P to P+1, by definition
 398		 * it frees up its time T in P.
 399		 */
 400		if (sample > period)
 401			sample = period;
 402		group->avg_total[s] += sample;
 403		calc_avgs(group->avg[s], missed_periods, sample, period);
 404	}
 405
 406	return avg_next_update;
 407}
 408
 409static void psi_avgs_work(struct work_struct *work)
 410{
 411	struct delayed_work *dwork;
 412	struct psi_group *group;
 413	u32 changed_states;
 414	bool nonidle;
 415	u64 now;
 416
 417	dwork = to_delayed_work(work);
 418	group = container_of(dwork, struct psi_group, avgs_work);
 419
 420	mutex_lock(&group->avgs_lock);
 421
 422	now = sched_clock();
 423
 424	collect_percpu_times(group, PSI_AVGS, &changed_states);
 425	nonidle = changed_states & (1 << PSI_NONIDLE);
 426	/*
 427	 * If there is task activity, periodically fold the per-cpu
 428	 * times and feed samples into the running averages. If things
 429	 * are idle and there is no data to process, stop the clock.
 430	 * Once restarted, we'll catch up the running averages in one
 431	 * go - see calc_avgs() and missed_periods.
 432	 */
 433	if (now >= group->avg_next_update)
 434		group->avg_next_update = update_averages(group, now);
 435
 436	if (nonidle) {
 437		schedule_delayed_work(dwork, nsecs_to_jiffies(
 438				group->avg_next_update - now) + 1);
 439	}
 440
 441	mutex_unlock(&group->avgs_lock);
 442}
 443
 444/* Trigger tracking window manupulations */
 445static void window_reset(struct psi_window *win, u64 now, u64 value,
 446			 u64 prev_growth)
 447{
 448	win->start_time = now;
 449	win->start_value = value;
 450	win->prev_growth = prev_growth;
 451}
 452
 453/*
 454 * PSI growth tracking window update and growth calculation routine.
 455 *
 456 * This approximates a sliding tracking window by interpolating
 457 * partially elapsed windows using historical growth data from the
 458 * previous intervals. This minimizes memory requirements (by not storing
 459 * all the intermediate values in the previous window) and simplifies
 460 * the calculations. It works well because PSI signal changes only in
 461 * positive direction and over relatively small window sizes the growth
 462 * is close to linear.
 463 */
 464static u64 window_update(struct psi_window *win, u64 now, u64 value)
 465{
 466	u64 elapsed;
 467	u64 growth;
 468
 469	elapsed = now - win->start_time;
 470	growth = value - win->start_value;
 471	/*
 472	 * After each tracking window passes win->start_value and
 473	 * win->start_time get reset and win->prev_growth stores
 474	 * the average per-window growth of the previous window.
 475	 * win->prev_growth is then used to interpolate additional
 476	 * growth from the previous window assuming it was linear.
 477	 */
 478	if (elapsed > win->size)
 479		window_reset(win, now, value, growth);
 480	else {
 481		u32 remaining;
 482
 483		remaining = win->size - elapsed;
 484		growth += div_u64(win->prev_growth * remaining, win->size);
 485	}
 486
 487	return growth;
 488}
 489
 490static void init_triggers(struct psi_group *group, u64 now)
 491{
 492	struct psi_trigger *t;
 493
 494	list_for_each_entry(t, &group->triggers, node)
 495		window_reset(&t->win, now,
 496				group->total[PSI_POLL][t->state], 0);
 497	memcpy(group->polling_total, group->total[PSI_POLL],
 498		   sizeof(group->polling_total));
 499	group->polling_next_update = now + group->poll_min_period;
 500}
 501
 502static u64 update_triggers(struct psi_group *group, u64 now)
 503{
 504	struct psi_trigger *t;
 505	bool new_stall = false;
 506	u64 *total = group->total[PSI_POLL];
 507
 508	/*
 509	 * On subsequent updates, calculate growth deltas and let
 510	 * watchers know when their specified thresholds are exceeded.
 511	 */
 512	list_for_each_entry(t, &group->triggers, node) {
 513		u64 growth;
 514
 515		/* Check for stall activity */
 516		if (group->polling_total[t->state] == total[t->state])
 517			continue;
 518
 519		/*
 520		 * Multiple triggers might be looking at the same state,
 521		 * remember to update group->polling_total[] once we've
 522		 * been through all of them. Also remember to extend the
 523		 * polling time if we see new stall activity.
 524		 */
 525		new_stall = true;
 526
 527		/* Calculate growth since last update */
 528		growth = window_update(&t->win, now, total[t->state]);
 529		if (growth < t->threshold)
 530			continue;
 531
 532		/* Limit event signaling to once per window */
 533		if (now < t->last_event_time + t->win.size)
 534			continue;
 535
 536		/* Generate an event */
 537		if (cmpxchg(&t->event, 0, 1) == 0)
 538			wake_up_interruptible(&t->event_wait);
 539		t->last_event_time = now;
 540	}
 541
 542	if (new_stall)
 543		memcpy(group->polling_total, total,
 544				sizeof(group->polling_total));
 545
 546	return now + group->poll_min_period;
 547}
 548
 549/*
 550 * Schedule polling if it's not already scheduled. It's safe to call even from
 551 * hotpath because even though kthread_queue_delayed_work takes worker->lock
 552 * spinlock that spinlock is never contended due to poll_scheduled atomic
 553 * preventing such competition.
 554 */
 555static void psi_schedule_poll_work(struct psi_group *group, unsigned long delay)
 556{
 557	struct kthread_worker *kworker;
 558
 559	/* Do not reschedule if already scheduled */
 560	if (atomic_cmpxchg(&group->poll_scheduled, 0, 1) != 0)
 
 
 
 
 
 561		return;
 562
 563	rcu_read_lock();
 564
 565	kworker = rcu_dereference(group->poll_kworker);
 566	/*
 567	 * kworker might be NULL in case psi_trigger_destroy races with
 568	 * psi_task_change (hotpath) which can't use locks
 569	 */
 570	if (likely(kworker))
 571		kthread_queue_delayed_work(kworker, &group->poll_work, delay);
 572	else
 573		atomic_set(&group->poll_scheduled, 0);
 574
 575	rcu_read_unlock();
 576}
 577
 578static void psi_poll_work(struct kthread_work *work)
 579{
 580	struct kthread_delayed_work *dwork;
 581	struct psi_group *group;
 582	u32 changed_states;
 583	u64 now;
 584
 585	dwork = container_of(work, struct kthread_delayed_work, work);
 586	group = container_of(dwork, struct psi_group, poll_work);
 587
 588	atomic_set(&group->poll_scheduled, 0);
 589
 590	mutex_lock(&group->trigger_lock);
 591
 592	now = sched_clock();
 593
 594	collect_percpu_times(group, PSI_POLL, &changed_states);
 595
 596	if (changed_states & group->poll_states) {
 597		/* Initialize trigger windows when entering polling mode */
 598		if (now > group->polling_until)
 599			init_triggers(group, now);
 600
 601		/*
 602		 * Keep the monitor active for at least the duration of the
 603		 * minimum tracking window as long as monitor states are
 604		 * changing.
 605		 */
 606		group->polling_until = now +
 607			group->poll_min_period * UPDATES_PER_WINDOW;
 608	}
 609
 610	if (now > group->polling_until) {
 611		group->polling_next_update = ULLONG_MAX;
 612		goto out;
 613	}
 614
 615	if (now >= group->polling_next_update)
 616		group->polling_next_update = update_triggers(group, now);
 617
 618	psi_schedule_poll_work(group,
 619		nsecs_to_jiffies(group->polling_next_update - now) + 1);
 620
 621out:
 622	mutex_unlock(&group->trigger_lock);
 623}
 624
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 625static void record_times(struct psi_group_cpu *groupc, int cpu,
 626			 bool memstall_tick)
 627{
 628	u32 delta;
 629	u64 now;
 630
 631	now = cpu_clock(cpu);
 632	delta = now - groupc->state_start;
 633	groupc->state_start = now;
 634
 635	if (groupc->state_mask & (1 << PSI_IO_SOME)) {
 636		groupc->times[PSI_IO_SOME] += delta;
 637		if (groupc->state_mask & (1 << PSI_IO_FULL))
 638			groupc->times[PSI_IO_FULL] += delta;
 639	}
 640
 641	if (groupc->state_mask & (1 << PSI_MEM_SOME)) {
 642		groupc->times[PSI_MEM_SOME] += delta;
 643		if (groupc->state_mask & (1 << PSI_MEM_FULL))
 644			groupc->times[PSI_MEM_FULL] += delta;
 645		else if (memstall_tick) {
 646			u32 sample;
 647			/*
 648			 * Since we care about lost potential, a
 649			 * memstall is FULL when there are no other
 650			 * working tasks, but also when the CPU is
 651			 * actively reclaiming and nothing productive
 652			 * could run even if it were runnable.
 653			 *
 654			 * When the timer tick sees a reclaiming CPU,
 655			 * regardless of runnable tasks, sample a FULL
 656			 * tick (or less if it hasn't been a full tick
 657			 * since the last state change).
 658			 */
 659			sample = min(delta, (u32)jiffies_to_nsecs(1));
 660			groupc->times[PSI_MEM_FULL] += sample;
 661		}
 662	}
 663
 664	if (groupc->state_mask & (1 << PSI_CPU_SOME))
 665		groupc->times[PSI_CPU_SOME] += delta;
 666
 667	if (groupc->state_mask & (1 << PSI_NONIDLE))
 668		groupc->times[PSI_NONIDLE] += delta;
 669}
 670
 671static u32 psi_group_change(struct psi_group *group, int cpu,
 672			    unsigned int clear, unsigned int set)
 
 673{
 674	struct psi_group_cpu *groupc;
 
 675	unsigned int t, m;
 676	enum psi_states s;
 677	u32 state_mask = 0;
 678
 679	groupc = per_cpu_ptr(group->pcpu, cpu);
 680
 681	/*
 682	 * First we assess the aggregate resource states this CPU's
 683	 * tasks have been in since the last change, and account any
 684	 * SOME and FULL time these may have resulted in.
 685	 *
 686	 * Then we update the task counts according to the state
 687	 * change requested through the @clear and @set bits.
 688	 */
 689	write_seqcount_begin(&groupc->seq);
 690
 691	record_times(groupc, cpu, false);
 692
 693	for (t = 0, m = clear; m; m &= ~(1 << t), t++) {
 694		if (!(m & (1 << t)))
 695			continue;
 696		if (groupc->tasks[t] == 0 && !psi_bug) {
 697			printk_deferred(KERN_ERR "psi: task underflow! cpu=%d t=%d tasks=[%u %u %u] clear=%x set=%x\n",
 698					cpu, t, groupc->tasks[0],
 699					groupc->tasks[1], groupc->tasks[2],
 700					clear, set);
 701			psi_bug = 1;
 702		}
 703		groupc->tasks[t]--;
 704	}
 705
 706	for (t = 0; set; set &= ~(1 << t), t++)
 707		if (set & (1 << t))
 708			groupc->tasks[t]++;
 709
 710	/* Calculate state mask representing active states */
 711	for (s = 0; s < NR_PSI_STATES; s++) {
 712		if (test_state(groupc->tasks, s))
 713			state_mask |= (1 << s);
 714	}
 715	groupc->state_mask = state_mask;
 716
 717	write_seqcount_end(&groupc->seq);
 718
 719	return state_mask;
 
 
 
 
 720}
 721
 722static struct psi_group *iterate_groups(struct task_struct *task, void **iter)
 723{
 724#ifdef CONFIG_CGROUPS
 725	struct cgroup *cgroup = NULL;
 726
 727	if (!*iter)
 728		cgroup = task->cgroups->dfl_cgrp;
 729	else if (*iter == &psi_system)
 730		return NULL;
 731	else
 732		cgroup = cgroup_parent(*iter);
 733
 734	if (cgroup && cgroup_parent(cgroup)) {
 735		*iter = cgroup;
 736		return cgroup_psi(cgroup);
 737	}
 738#else
 739	if (*iter)
 740		return NULL;
 741#endif
 742	*iter = &psi_system;
 743	return &psi_system;
 744}
 745
 746void psi_task_change(struct task_struct *task, int clear, int set)
 747{
 748	int cpu = task_cpu(task);
 749	struct psi_group *group;
 750	bool wake_clock = true;
 751	void *iter = NULL;
 752
 753	if (!task->pid)
 754		return;
 755
 756	if (((task->psi_flags & set) ||
 757	     (task->psi_flags & clear) != clear) &&
 758	    !psi_bug) {
 759		printk_deferred(KERN_ERR "psi: inconsistent task state! task=%d:%s cpu=%d psi_flags=%x clear=%x set=%x\n",
 760				task->pid, task->comm, cpu,
 761				task->psi_flags, clear, set);
 762		psi_bug = 1;
 763	}
 764
 765	task->psi_flags &= ~clear;
 766	task->psi_flags |= set;
 
 
 
 
 
 
 
 
 
 
 
 
 
 767
 768	/*
 769	 * Periodic aggregation shuts off if there is a period of no
 770	 * task changes, so we wake it back up if necessary. However,
 771	 * don't do this if the task change is the aggregation worker
 772	 * itself going to sleep, or we'll ping-pong forever.
 773	 */
 774	if (unlikely((clear & TSK_RUNNING) &&
 775		     (task->flags & PF_WQ_WORKER) &&
 776		     wq_worker_last_func(task) == psi_avgs_work))
 777		wake_clock = false;
 778
 779	while ((group = iterate_groups(task, &iter))) {
 780		u32 state_mask = psi_group_change(group, cpu, clear, set);
 
 781
 782		if (state_mask & group->poll_states)
 783			psi_schedule_poll_work(group, 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 784
 785		if (wake_clock && !delayed_work_pending(&group->avgs_work))
 786			schedule_delayed_work(&group->avgs_work, PSI_FREQ);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 787	}
 788}
 789
 790void psi_memstall_tick(struct task_struct *task, int cpu)
 791{
 792	struct psi_group *group;
 793	void *iter = NULL;
 794
 795	while ((group = iterate_groups(task, &iter))) {
 796		struct psi_group_cpu *groupc;
 797
 798		groupc = per_cpu_ptr(group->pcpu, cpu);
 799		write_seqcount_begin(&groupc->seq);
 800		record_times(groupc, cpu, true);
 801		write_seqcount_end(&groupc->seq);
 802	}
 803}
 804
 805/**
 806 * psi_memstall_enter - mark the beginning of a memory stall section
 807 * @flags: flags to handle nested sections
 808 *
 809 * Marks the calling task as being stalled due to a lack of memory,
 810 * such as waiting for a refault or performing reclaim.
 811 */
 812void psi_memstall_enter(unsigned long *flags)
 813{
 814	struct rq_flags rf;
 815	struct rq *rq;
 816
 817	if (static_branch_likely(&psi_disabled))
 818		return;
 819
 820	*flags = current->flags & PF_MEMSTALL;
 821	if (*flags)
 822		return;
 823	/*
 824	 * PF_MEMSTALL setting & accounting needs to be atomic wrt
 825	 * changes to the task's scheduling state, otherwise we can
 826	 * race with CPU migration.
 827	 */
 828	rq = this_rq_lock_irq(&rf);
 829
 830	current->flags |= PF_MEMSTALL;
 831	psi_task_change(current, 0, TSK_MEMSTALL);
 832
 833	rq_unlock_irq(rq, &rf);
 834}
 835
 836/**
 837 * psi_memstall_leave - mark the end of an memory stall section
 838 * @flags: flags to handle nested memdelay sections
 839 *
 840 * Marks the calling task as no longer stalled due to lack of memory.
 841 */
 842void psi_memstall_leave(unsigned long *flags)
 843{
 844	struct rq_flags rf;
 845	struct rq *rq;
 846
 847	if (static_branch_likely(&psi_disabled))
 848		return;
 849
 850	if (*flags)
 851		return;
 852	/*
 853	 * PF_MEMSTALL clearing & accounting needs to be atomic wrt
 854	 * changes to the task's scheduling state, otherwise we could
 855	 * race with CPU migration.
 856	 */
 857	rq = this_rq_lock_irq(&rf);
 858
 859	current->flags &= ~PF_MEMSTALL;
 860	psi_task_change(current, TSK_MEMSTALL, 0);
 861
 862	rq_unlock_irq(rq, &rf);
 863}
 864
 865#ifdef CONFIG_CGROUPS
 866int psi_cgroup_alloc(struct cgroup *cgroup)
 867{
 868	if (static_branch_likely(&psi_disabled))
 869		return 0;
 870
 871	cgroup->psi.pcpu = alloc_percpu(struct psi_group_cpu);
 872	if (!cgroup->psi.pcpu)
 873		return -ENOMEM;
 874	group_init(&cgroup->psi);
 875	return 0;
 876}
 877
 878void psi_cgroup_free(struct cgroup *cgroup)
 879{
 880	if (static_branch_likely(&psi_disabled))
 881		return;
 882
 883	cancel_delayed_work_sync(&cgroup->psi.avgs_work);
 884	free_percpu(cgroup->psi.pcpu);
 885	/* All triggers must be removed by now */
 886	WARN_ONCE(cgroup->psi.poll_states, "psi: trigger leak\n");
 887}
 888
 889/**
 890 * cgroup_move_task - move task to a different cgroup
 891 * @task: the task
 892 * @to: the target css_set
 893 *
 894 * Move task to a new cgroup and safely migrate its associated stall
 895 * state between the different groups.
 896 *
 897 * This function acquires the task's rq lock to lock out concurrent
 898 * changes to the task's scheduling state and - in case the task is
 899 * running - concurrent changes to its stall state.
 900 */
 901void cgroup_move_task(struct task_struct *task, struct css_set *to)
 902{
 903	unsigned int task_flags = 0;
 904	struct rq_flags rf;
 905	struct rq *rq;
 906
 907	if (static_branch_likely(&psi_disabled)) {
 908		/*
 909		 * Lame to do this here, but the scheduler cannot be locked
 910		 * from the outside, so we move cgroups from inside sched/.
 911		 */
 912		rcu_assign_pointer(task->cgroups, to);
 913		return;
 914	}
 915
 916	rq = task_rq_lock(task, &rf);
 917
 918	if (task_on_rq_queued(task))
 919		task_flags = TSK_RUNNING;
 920	else if (task->in_iowait)
 
 
 921		task_flags = TSK_IOWAIT;
 922
 923	if (task->flags & PF_MEMSTALL)
 924		task_flags |= TSK_MEMSTALL;
 925
 926	if (task_flags)
 927		psi_task_change(task, task_flags, 0);
 928
 929	/* See comment above */
 930	rcu_assign_pointer(task->cgroups, to);
 931
 932	if (task_flags)
 933		psi_task_change(task, 0, task_flags);
 934
 935	task_rq_unlock(rq, task, &rf);
 936}
 937#endif /* CONFIG_CGROUPS */
 938
 939int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res)
 940{
 941	int full;
 942	u64 now;
 943
 944	if (static_branch_likely(&psi_disabled))
 945		return -EOPNOTSUPP;
 946
 947	/* Update averages before reporting them */
 948	mutex_lock(&group->avgs_lock);
 949	now = sched_clock();
 950	collect_percpu_times(group, PSI_AVGS, NULL);
 951	if (now >= group->avg_next_update)
 952		group->avg_next_update = update_averages(group, now);
 953	mutex_unlock(&group->avgs_lock);
 954
 955	for (full = 0; full < 2 - (res == PSI_CPU); full++) {
 956		unsigned long avg[3];
 957		u64 total;
 958		int w;
 959
 960		for (w = 0; w < 3; w++)
 961			avg[w] = group->avg[res * 2 + full][w];
 962		total = div_u64(group->total[PSI_AVGS][res * 2 + full],
 963				NSEC_PER_USEC);
 964
 965		seq_printf(m, "%s avg10=%lu.%02lu avg60=%lu.%02lu avg300=%lu.%02lu total=%llu\n",
 966			   full ? "full" : "some",
 967			   LOAD_INT(avg[0]), LOAD_FRAC(avg[0]),
 968			   LOAD_INT(avg[1]), LOAD_FRAC(avg[1]),
 969			   LOAD_INT(avg[2]), LOAD_FRAC(avg[2]),
 970			   total);
 971	}
 972
 973	return 0;
 974}
 975
 976static int psi_io_show(struct seq_file *m, void *v)
 977{
 978	return psi_show(m, &psi_system, PSI_IO);
 979}
 980
 981static int psi_memory_show(struct seq_file *m, void *v)
 982{
 983	return psi_show(m, &psi_system, PSI_MEM);
 984}
 985
 986static int psi_cpu_show(struct seq_file *m, void *v)
 987{
 988	return psi_show(m, &psi_system, PSI_CPU);
 989}
 990
 991static int psi_io_open(struct inode *inode, struct file *file)
 992{
 993	return single_open(file, psi_io_show, NULL);
 994}
 995
 996static int psi_memory_open(struct inode *inode, struct file *file)
 997{
 998	return single_open(file, psi_memory_show, NULL);
 999}
1000
1001static int psi_cpu_open(struct inode *inode, struct file *file)
1002{
1003	return single_open(file, psi_cpu_show, NULL);
1004}
1005
1006struct psi_trigger *psi_trigger_create(struct psi_group *group,
1007			char *buf, size_t nbytes, enum psi_res res)
1008{
1009	struct psi_trigger *t;
1010	enum psi_states state;
1011	u32 threshold_us;
1012	u32 window_us;
1013
1014	if (static_branch_likely(&psi_disabled))
1015		return ERR_PTR(-EOPNOTSUPP);
1016
1017	if (sscanf(buf, "some %u %u", &threshold_us, &window_us) == 2)
1018		state = PSI_IO_SOME + res * 2;
1019	else if (sscanf(buf, "full %u %u", &threshold_us, &window_us) == 2)
1020		state = PSI_IO_FULL + res * 2;
1021	else
1022		return ERR_PTR(-EINVAL);
1023
1024	if (state >= PSI_NONIDLE)
1025		return ERR_PTR(-EINVAL);
1026
1027	if (window_us < WINDOW_MIN_US ||
1028		window_us > WINDOW_MAX_US)
1029		return ERR_PTR(-EINVAL);
1030
1031	/* Check threshold */
1032	if (threshold_us == 0 || threshold_us > window_us)
1033		return ERR_PTR(-EINVAL);
1034
1035	t = kmalloc(sizeof(*t), GFP_KERNEL);
1036	if (!t)
1037		return ERR_PTR(-ENOMEM);
1038
1039	t->group = group;
1040	t->state = state;
1041	t->threshold = threshold_us * NSEC_PER_USEC;
1042	t->win.size = window_us * NSEC_PER_USEC;
1043	window_reset(&t->win, 0, 0, 0);
1044
1045	t->event = 0;
1046	t->last_event_time = 0;
1047	init_waitqueue_head(&t->event_wait);
1048	kref_init(&t->refcount);
1049
1050	mutex_lock(&group->trigger_lock);
1051
1052	if (!rcu_access_pointer(group->poll_kworker)) {
1053		struct sched_param param = {
1054			.sched_priority = 1,
1055		};
1056		struct kthread_worker *kworker;
1057
1058		kworker = kthread_create_worker(0, "psimon");
1059		if (IS_ERR(kworker)) {
1060			kfree(t);
1061			mutex_unlock(&group->trigger_lock);
1062			return ERR_CAST(kworker);
1063		}
1064		sched_setscheduler_nocheck(kworker->task, SCHED_FIFO, &param);
1065		kthread_init_delayed_work(&group->poll_work,
1066				psi_poll_work);
1067		rcu_assign_pointer(group->poll_kworker, kworker);
 
1068	}
1069
1070	list_add(&t->node, &group->triggers);
1071	group->poll_min_period = min(group->poll_min_period,
1072		div_u64(t->win.size, UPDATES_PER_WINDOW));
1073	group->nr_triggers[t->state]++;
1074	group->poll_states |= (1 << t->state);
1075
1076	mutex_unlock(&group->trigger_lock);
1077
1078	return t;
1079}
1080
1081static void psi_trigger_destroy(struct kref *ref)
1082{
1083	struct psi_trigger *t = container_of(ref, struct psi_trigger, refcount);
1084	struct psi_group *group = t->group;
1085	struct kthread_worker *kworker_to_destroy = NULL;
1086
1087	if (static_branch_likely(&psi_disabled))
1088		return;
1089
1090	/*
1091	 * Wakeup waiters to stop polling. Can happen if cgroup is deleted
1092	 * from under a polling process.
1093	 */
1094	wake_up_interruptible(&t->event_wait);
1095
1096	mutex_lock(&group->trigger_lock);
1097
1098	if (!list_empty(&t->node)) {
1099		struct psi_trigger *tmp;
1100		u64 period = ULLONG_MAX;
1101
1102		list_del(&t->node);
1103		group->nr_triggers[t->state]--;
1104		if (!group->nr_triggers[t->state])
1105			group->poll_states &= ~(1 << t->state);
1106		/* reset min update period for the remaining triggers */
1107		list_for_each_entry(tmp, &group->triggers, node)
1108			period = min(period, div_u64(tmp->win.size,
1109					UPDATES_PER_WINDOW));
1110		group->poll_min_period = period;
1111		/* Destroy poll_kworker when the last trigger is destroyed */
1112		if (group->poll_states == 0) {
1113			group->polling_until = 0;
1114			kworker_to_destroy = rcu_dereference_protected(
1115					group->poll_kworker,
1116					lockdep_is_held(&group->trigger_lock));
1117			rcu_assign_pointer(group->poll_kworker, NULL);
1118		}
1119	}
1120
1121	mutex_unlock(&group->trigger_lock);
1122
1123	/*
1124	 * Wait for both *trigger_ptr from psi_trigger_replace and
1125	 * poll_kworker RCUs to complete their read-side critical sections
1126	 * before destroying the trigger and optionally the poll_kworker
1127	 */
1128	synchronize_rcu();
1129	/*
1130	 * Destroy the kworker after releasing trigger_lock to prevent a
1131	 * deadlock while waiting for psi_poll_work to acquire trigger_lock
1132	 */
1133	if (kworker_to_destroy) {
1134		/*
1135		 * After the RCU grace period has expired, the worker
1136		 * can no longer be found through group->poll_kworker.
1137		 * But it might have been already scheduled before
1138		 * that - deschedule it cleanly before destroying it.
1139		 */
1140		kthread_cancel_delayed_work_sync(&group->poll_work);
1141		atomic_set(&group->poll_scheduled, 0);
1142
1143		kthread_destroy_worker(kworker_to_destroy);
1144	}
1145	kfree(t);
1146}
1147
1148void psi_trigger_replace(void **trigger_ptr, struct psi_trigger *new)
1149{
1150	struct psi_trigger *old = *trigger_ptr;
1151
1152	if (static_branch_likely(&psi_disabled))
1153		return;
1154
1155	rcu_assign_pointer(*trigger_ptr, new);
1156	if (old)
1157		kref_put(&old->refcount, psi_trigger_destroy);
1158}
1159
1160__poll_t psi_trigger_poll(void **trigger_ptr,
1161				struct file *file, poll_table *wait)
1162{
1163	__poll_t ret = DEFAULT_POLLMASK;
1164	struct psi_trigger *t;
1165
1166	if (static_branch_likely(&psi_disabled))
1167		return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI;
1168
1169	rcu_read_lock();
1170
1171	t = rcu_dereference(*(void __rcu __force **)trigger_ptr);
1172	if (!t) {
1173		rcu_read_unlock();
1174		return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI;
1175	}
1176	kref_get(&t->refcount);
1177
1178	rcu_read_unlock();
1179
1180	poll_wait(file, &t->event_wait, wait);
1181
1182	if (cmpxchg(&t->event, 1, 0) == 1)
1183		ret |= EPOLLPRI;
1184
1185	kref_put(&t->refcount, psi_trigger_destroy);
1186
1187	return ret;
1188}
1189
1190static ssize_t psi_write(struct file *file, const char __user *user_buf,
1191			 size_t nbytes, enum psi_res res)
1192{
1193	char buf[32];
1194	size_t buf_size;
1195	struct seq_file *seq;
1196	struct psi_trigger *new;
1197
1198	if (static_branch_likely(&psi_disabled))
1199		return -EOPNOTSUPP;
1200
 
 
 
1201	buf_size = min(nbytes, sizeof(buf));
1202	if (copy_from_user(buf, user_buf, buf_size))
1203		return -EFAULT;
1204
1205	buf[buf_size - 1] = '\0';
1206
1207	new = psi_trigger_create(&psi_system, buf, nbytes, res);
1208	if (IS_ERR(new))
1209		return PTR_ERR(new);
1210
1211	seq = file->private_data;
1212	/* Take seq->lock to protect seq->private from concurrent writes */
1213	mutex_lock(&seq->lock);
1214	psi_trigger_replace(&seq->private, new);
1215	mutex_unlock(&seq->lock);
1216
1217	return nbytes;
1218}
1219
1220static ssize_t psi_io_write(struct file *file, const char __user *user_buf,
1221			    size_t nbytes, loff_t *ppos)
1222{
1223	return psi_write(file, user_buf, nbytes, PSI_IO);
1224}
1225
1226static ssize_t psi_memory_write(struct file *file, const char __user *user_buf,
1227				size_t nbytes, loff_t *ppos)
1228{
1229	return psi_write(file, user_buf, nbytes, PSI_MEM);
1230}
1231
1232static ssize_t psi_cpu_write(struct file *file, const char __user *user_buf,
1233			     size_t nbytes, loff_t *ppos)
1234{
1235	return psi_write(file, user_buf, nbytes, PSI_CPU);
1236}
1237
1238static __poll_t psi_fop_poll(struct file *file, poll_table *wait)
1239{
1240	struct seq_file *seq = file->private_data;
1241
1242	return psi_trigger_poll(&seq->private, file, wait);
1243}
1244
1245static int psi_fop_release(struct inode *inode, struct file *file)
1246{
1247	struct seq_file *seq = file->private_data;
1248
1249	psi_trigger_replace(&seq->private, NULL);
1250	return single_release(inode, file);
1251}
1252
1253static const struct file_operations psi_io_fops = {
1254	.open           = psi_io_open,
1255	.read           = seq_read,
1256	.llseek         = seq_lseek,
1257	.write          = psi_io_write,
1258	.poll           = psi_fop_poll,
1259	.release        = psi_fop_release,
1260};
1261
1262static const struct file_operations psi_memory_fops = {
1263	.open           = psi_memory_open,
1264	.read           = seq_read,
1265	.llseek         = seq_lseek,
1266	.write          = psi_memory_write,
1267	.poll           = psi_fop_poll,
1268	.release        = psi_fop_release,
1269};
1270
1271static const struct file_operations psi_cpu_fops = {
1272	.open           = psi_cpu_open,
1273	.read           = seq_read,
1274	.llseek         = seq_lseek,
1275	.write          = psi_cpu_write,
1276	.poll           = psi_fop_poll,
1277	.release        = psi_fop_release,
1278};
1279
1280static int __init psi_proc_init(void)
1281{
1282	proc_mkdir("pressure", NULL);
1283	proc_create("pressure/io", 0, NULL, &psi_io_fops);
1284	proc_create("pressure/memory", 0, NULL, &psi_memory_fops);
1285	proc_create("pressure/cpu", 0, NULL, &psi_cpu_fops);
 
 
1286	return 0;
1287}
1288module_init(psi_proc_init);
v5.9
   1/*
   2 * Pressure stall information for CPU, memory and IO
   3 *
   4 * Copyright (c) 2018 Facebook, Inc.
   5 * Author: Johannes Weiner <hannes@cmpxchg.org>
   6 *
   7 * Polling support by Suren Baghdasaryan <surenb@google.com>
   8 * Copyright (c) 2018 Google, Inc.
   9 *
  10 * When CPU, memory and IO are contended, tasks experience delays that
  11 * reduce throughput and introduce latencies into the workload. Memory
  12 * and IO contention, in addition, can cause a full loss of forward
  13 * progress in which the CPU goes idle.
  14 *
  15 * This code aggregates individual task delays into resource pressure
  16 * metrics that indicate problems with both workload health and
  17 * resource utilization.
  18 *
  19 *			Model
  20 *
  21 * The time in which a task can execute on a CPU is our baseline for
  22 * productivity. Pressure expresses the amount of time in which this
  23 * potential cannot be realized due to resource contention.
  24 *
  25 * This concept of productivity has two components: the workload and
  26 * the CPU. To measure the impact of pressure on both, we define two
  27 * contention states for a resource: SOME and FULL.
  28 *
  29 * In the SOME state of a given resource, one or more tasks are
  30 * delayed on that resource. This affects the workload's ability to
  31 * perform work, but the CPU may still be executing other tasks.
  32 *
  33 * In the FULL state of a given resource, all non-idle tasks are
  34 * delayed on that resource such that nobody is advancing and the CPU
  35 * goes idle. This leaves both workload and CPU unproductive.
  36 *
  37 * (Naturally, the FULL state doesn't exist for the CPU resource.)
  38 *
  39 *	SOME = nr_delayed_tasks != 0
  40 *	FULL = nr_delayed_tasks != 0 && nr_running_tasks == 0
  41 *
  42 * The percentage of wallclock time spent in those compound stall
  43 * states gives pressure numbers between 0 and 100 for each resource,
  44 * where the SOME percentage indicates workload slowdowns and the FULL
  45 * percentage indicates reduced CPU utilization:
  46 *
  47 *	%SOME = time(SOME) / period
  48 *	%FULL = time(FULL) / period
  49 *
  50 *			Multiple CPUs
  51 *
  52 * The more tasks and available CPUs there are, the more work can be
  53 * performed concurrently. This means that the potential that can go
  54 * unrealized due to resource contention *also* scales with non-idle
  55 * tasks and CPUs.
  56 *
  57 * Consider a scenario where 257 number crunching tasks are trying to
  58 * run concurrently on 256 CPUs. If we simply aggregated the task
  59 * states, we would have to conclude a CPU SOME pressure number of
  60 * 100%, since *somebody* is waiting on a runqueue at all
  61 * times. However, that is clearly not the amount of contention the
  62 * workload is experiencing: only one out of 256 possible exceution
  63 * threads will be contended at any given time, or about 0.4%.
  64 *
  65 * Conversely, consider a scenario of 4 tasks and 4 CPUs where at any
  66 * given time *one* of the tasks is delayed due to a lack of memory.
  67 * Again, looking purely at the task state would yield a memory FULL
  68 * pressure number of 0%, since *somebody* is always making forward
  69 * progress. But again this wouldn't capture the amount of execution
  70 * potential lost, which is 1 out of 4 CPUs, or 25%.
  71 *
  72 * To calculate wasted potential (pressure) with multiple processors,
  73 * we have to base our calculation on the number of non-idle tasks in
  74 * conjunction with the number of available CPUs, which is the number
  75 * of potential execution threads. SOME becomes then the proportion of
  76 * delayed tasks to possibe threads, and FULL is the share of possible
  77 * threads that are unproductive due to delays:
  78 *
  79 *	threads = min(nr_nonidle_tasks, nr_cpus)
  80 *	   SOME = min(nr_delayed_tasks / threads, 1)
  81 *	   FULL = (threads - min(nr_running_tasks, threads)) / threads
  82 *
  83 * For the 257 number crunchers on 256 CPUs, this yields:
  84 *
  85 *	threads = min(257, 256)
  86 *	   SOME = min(1 / 256, 1)             = 0.4%
  87 *	   FULL = (256 - min(257, 256)) / 256 = 0%
  88 *
  89 * For the 1 out of 4 memory-delayed tasks, this yields:
  90 *
  91 *	threads = min(4, 4)
  92 *	   SOME = min(1 / 4, 1)               = 25%
  93 *	   FULL = (4 - min(3, 4)) / 4         = 25%
  94 *
  95 * [ Substitute nr_cpus with 1, and you can see that it's a natural
  96 *   extension of the single-CPU model. ]
  97 *
  98 *			Implementation
  99 *
 100 * To assess the precise time spent in each such state, we would have
 101 * to freeze the system on task changes and start/stop the state
 102 * clocks accordingly. Obviously that doesn't scale in practice.
 103 *
 104 * Because the scheduler aims to distribute the compute load evenly
 105 * among the available CPUs, we can track task state locally to each
 106 * CPU and, at much lower frequency, extrapolate the global state for
 107 * the cumulative stall times and the running averages.
 108 *
 109 * For each runqueue, we track:
 110 *
 111 *	   tSOME[cpu] = time(nr_delayed_tasks[cpu] != 0)
 112 *	   tFULL[cpu] = time(nr_delayed_tasks[cpu] && !nr_running_tasks[cpu])
 113 *	tNONIDLE[cpu] = time(nr_nonidle_tasks[cpu] != 0)
 114 *
 115 * and then periodically aggregate:
 116 *
 117 *	tNONIDLE = sum(tNONIDLE[i])
 118 *
 119 *	   tSOME = sum(tSOME[i] * tNONIDLE[i]) / tNONIDLE
 120 *	   tFULL = sum(tFULL[i] * tNONIDLE[i]) / tNONIDLE
 121 *
 122 *	   %SOME = tSOME / period
 123 *	   %FULL = tFULL / period
 124 *
 125 * This gives us an approximation of pressure that is practical
 126 * cost-wise, yet way more sensitive and accurate than periodic
 127 * sampling of the aggregate task states would be.
 128 */
 129
 130#include "../workqueue_internal.h"
 131#include <linux/sched/loadavg.h>
 132#include <linux/seq_file.h>
 133#include <linux/proc_fs.h>
 134#include <linux/seqlock.h>
 135#include <linux/uaccess.h>
 136#include <linux/cgroup.h>
 137#include <linux/module.h>
 138#include <linux/sched.h>
 139#include <linux/ctype.h>
 140#include <linux/file.h>
 141#include <linux/poll.h>
 142#include <linux/psi.h>
 143#include "sched.h"
 144
 145static int psi_bug __read_mostly;
 146
 147DEFINE_STATIC_KEY_FALSE(psi_disabled);
 148
 149#ifdef CONFIG_PSI_DEFAULT_DISABLED
 150static bool psi_enable;
 151#else
 152static bool psi_enable = true;
 153#endif
 154static int __init setup_psi(char *str)
 155{
 156	return kstrtobool(str, &psi_enable) == 0;
 157}
 158__setup("psi=", setup_psi);
 159
 160/* Running averages - we need to be higher-res than loadavg */
 161#define PSI_FREQ	(2*HZ+1)	/* 2 sec intervals */
 162#define EXP_10s		1677		/* 1/exp(2s/10s) as fixed-point */
 163#define EXP_60s		1981		/* 1/exp(2s/60s) */
 164#define EXP_300s	2034		/* 1/exp(2s/300s) */
 165
 166/* PSI trigger definitions */
 167#define WINDOW_MIN_US 500000	/* Min window size is 500ms */
 168#define WINDOW_MAX_US 10000000	/* Max window size is 10s */
 169#define UPDATES_PER_WINDOW 10	/* 10 updates per window */
 170
 171/* Sampling frequency in nanoseconds */
 172static u64 psi_period __read_mostly;
 173
 174/* System-level pressure and stall tracking */
 175static DEFINE_PER_CPU(struct psi_group_cpu, system_group_pcpu);
 176struct psi_group psi_system = {
 177	.pcpu = &system_group_pcpu,
 178};
 179
 180static void psi_avgs_work(struct work_struct *work);
 181
 182static void group_init(struct psi_group *group)
 183{
 184	int cpu;
 185
 186	for_each_possible_cpu(cpu)
 187		seqcount_init(&per_cpu_ptr(group->pcpu, cpu)->seq);
 188	group->avg_last_update = sched_clock();
 189	group->avg_next_update = group->avg_last_update + psi_period;
 190	INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work);
 191	mutex_init(&group->avgs_lock);
 192	/* Init trigger-related members */
 
 193	mutex_init(&group->trigger_lock);
 194	INIT_LIST_HEAD(&group->triggers);
 195	memset(group->nr_triggers, 0, sizeof(group->nr_triggers));
 196	group->poll_states = 0;
 197	group->poll_min_period = U32_MAX;
 198	memset(group->polling_total, 0, sizeof(group->polling_total));
 199	group->polling_next_update = ULLONG_MAX;
 200	group->polling_until = 0;
 201	rcu_assign_pointer(group->poll_task, NULL);
 202}
 203
 204void __init psi_init(void)
 205{
 206	if (!psi_enable) {
 207		static_branch_enable(&psi_disabled);
 208		return;
 209	}
 210
 211	psi_period = jiffies_to_nsecs(PSI_FREQ);
 212	group_init(&psi_system);
 213}
 214
 215static bool test_state(unsigned int *tasks, enum psi_states state)
 216{
 217	switch (state) {
 218	case PSI_IO_SOME:
 219		return tasks[NR_IOWAIT];
 220	case PSI_IO_FULL:
 221		return tasks[NR_IOWAIT] && !tasks[NR_RUNNING];
 222	case PSI_MEM_SOME:
 223		return tasks[NR_MEMSTALL];
 224	case PSI_MEM_FULL:
 225		return tasks[NR_MEMSTALL] && !tasks[NR_RUNNING];
 226	case PSI_CPU_SOME:
 227		return tasks[NR_RUNNING] > tasks[NR_ONCPU];
 228	case PSI_NONIDLE:
 229		return tasks[NR_IOWAIT] || tasks[NR_MEMSTALL] ||
 230			tasks[NR_RUNNING];
 231	default:
 232		return false;
 233	}
 234}
 235
 236static void get_recent_times(struct psi_group *group, int cpu,
 237			     enum psi_aggregators aggregator, u32 *times,
 238			     u32 *pchanged_states)
 239{
 240	struct psi_group_cpu *groupc = per_cpu_ptr(group->pcpu, cpu);
 241	u64 now, state_start;
 242	enum psi_states s;
 243	unsigned int seq;
 244	u32 state_mask;
 245
 246	*pchanged_states = 0;
 247
 248	/* Snapshot a coherent view of the CPU state */
 249	do {
 250		seq = read_seqcount_begin(&groupc->seq);
 251		now = cpu_clock(cpu);
 252		memcpy(times, groupc->times, sizeof(groupc->times));
 253		state_mask = groupc->state_mask;
 254		state_start = groupc->state_start;
 255	} while (read_seqcount_retry(&groupc->seq, seq));
 256
 257	/* Calculate state time deltas against the previous snapshot */
 258	for (s = 0; s < NR_PSI_STATES; s++) {
 259		u32 delta;
 260		/*
 261		 * In addition to already concluded states, we also
 262		 * incorporate currently active states on the CPU,
 263		 * since states may last for many sampling periods.
 264		 *
 265		 * This way we keep our delta sampling buckets small
 266		 * (u32) and our reported pressure close to what's
 267		 * actually happening.
 268		 */
 269		if (state_mask & (1 << s))
 270			times[s] += now - state_start;
 271
 272		delta = times[s] - groupc->times_prev[aggregator][s];
 273		groupc->times_prev[aggregator][s] = times[s];
 274
 275		times[s] = delta;
 276		if (delta)
 277			*pchanged_states |= (1 << s);
 278	}
 279}
 280
 281static void calc_avgs(unsigned long avg[3], int missed_periods,
 282		      u64 time, u64 period)
 283{
 284	unsigned long pct;
 285
 286	/* Fill in zeroes for periods of no activity */
 287	if (missed_periods) {
 288		avg[0] = calc_load_n(avg[0], EXP_10s, 0, missed_periods);
 289		avg[1] = calc_load_n(avg[1], EXP_60s, 0, missed_periods);
 290		avg[2] = calc_load_n(avg[2], EXP_300s, 0, missed_periods);
 291	}
 292
 293	/* Sample the most recent active period */
 294	pct = div_u64(time * 100, period);
 295	pct *= FIXED_1;
 296	avg[0] = calc_load(avg[0], EXP_10s, pct);
 297	avg[1] = calc_load(avg[1], EXP_60s, pct);
 298	avg[2] = calc_load(avg[2], EXP_300s, pct);
 299}
 300
 301static void collect_percpu_times(struct psi_group *group,
 302				 enum psi_aggregators aggregator,
 303				 u32 *pchanged_states)
 304{
 305	u64 deltas[NR_PSI_STATES - 1] = { 0, };
 306	unsigned long nonidle_total = 0;
 307	u32 changed_states = 0;
 308	int cpu;
 309	int s;
 310
 311	/*
 312	 * Collect the per-cpu time buckets and average them into a
 313	 * single time sample that is normalized to wallclock time.
 314	 *
 315	 * For averaging, each CPU is weighted by its non-idle time in
 316	 * the sampling period. This eliminates artifacts from uneven
 317	 * loading, or even entirely idle CPUs.
 318	 */
 319	for_each_possible_cpu(cpu) {
 320		u32 times[NR_PSI_STATES];
 321		u32 nonidle;
 322		u32 cpu_changed_states;
 323
 324		get_recent_times(group, cpu, aggregator, times,
 325				&cpu_changed_states);
 326		changed_states |= cpu_changed_states;
 327
 328		nonidle = nsecs_to_jiffies(times[PSI_NONIDLE]);
 329		nonidle_total += nonidle;
 330
 331		for (s = 0; s < PSI_NONIDLE; s++)
 332			deltas[s] += (u64)times[s] * nonidle;
 333	}
 334
 335	/*
 336	 * Integrate the sample into the running statistics that are
 337	 * reported to userspace: the cumulative stall times and the
 338	 * decaying averages.
 339	 *
 340	 * Pressure percentages are sampled at PSI_FREQ. We might be
 341	 * called more often when the user polls more frequently than
 342	 * that; we might be called less often when there is no task
 343	 * activity, thus no data, and clock ticks are sporadic. The
 344	 * below handles both.
 345	 */
 346
 347	/* total= */
 348	for (s = 0; s < NR_PSI_STATES - 1; s++)
 349		group->total[aggregator][s] +=
 350				div_u64(deltas[s], max(nonidle_total, 1UL));
 351
 352	if (pchanged_states)
 353		*pchanged_states = changed_states;
 354}
 355
 356static u64 update_averages(struct psi_group *group, u64 now)
 357{
 358	unsigned long missed_periods = 0;
 359	u64 expires, period;
 360	u64 avg_next_update;
 361	int s;
 362
 363	/* avgX= */
 364	expires = group->avg_next_update;
 365	if (now - expires >= psi_period)
 366		missed_periods = div_u64(now - expires, psi_period);
 367
 368	/*
 369	 * The periodic clock tick can get delayed for various
 370	 * reasons, especially on loaded systems. To avoid clock
 371	 * drift, we schedule the clock in fixed psi_period intervals.
 372	 * But the deltas we sample out of the per-cpu buckets above
 373	 * are based on the actual time elapsing between clock ticks.
 374	 */
 375	avg_next_update = expires + ((1 + missed_periods) * psi_period);
 376	period = now - (group->avg_last_update + (missed_periods * psi_period));
 377	group->avg_last_update = now;
 378
 379	for (s = 0; s < NR_PSI_STATES - 1; s++) {
 380		u32 sample;
 381
 382		sample = group->total[PSI_AVGS][s] - group->avg_total[s];
 383		/*
 384		 * Due to the lockless sampling of the time buckets,
 385		 * recorded time deltas can slip into the next period,
 386		 * which under full pressure can result in samples in
 387		 * excess of the period length.
 388		 *
 389		 * We don't want to report non-sensical pressures in
 390		 * excess of 100%, nor do we want to drop such events
 391		 * on the floor. Instead we punt any overage into the
 392		 * future until pressure subsides. By doing this we
 393		 * don't underreport the occurring pressure curve, we
 394		 * just report it delayed by one period length.
 395		 *
 396		 * The error isn't cumulative. As soon as another
 397		 * delta slips from a period P to P+1, by definition
 398		 * it frees up its time T in P.
 399		 */
 400		if (sample > period)
 401			sample = period;
 402		group->avg_total[s] += sample;
 403		calc_avgs(group->avg[s], missed_periods, sample, period);
 404	}
 405
 406	return avg_next_update;
 407}
 408
 409static void psi_avgs_work(struct work_struct *work)
 410{
 411	struct delayed_work *dwork;
 412	struct psi_group *group;
 413	u32 changed_states;
 414	bool nonidle;
 415	u64 now;
 416
 417	dwork = to_delayed_work(work);
 418	group = container_of(dwork, struct psi_group, avgs_work);
 419
 420	mutex_lock(&group->avgs_lock);
 421
 422	now = sched_clock();
 423
 424	collect_percpu_times(group, PSI_AVGS, &changed_states);
 425	nonidle = changed_states & (1 << PSI_NONIDLE);
 426	/*
 427	 * If there is task activity, periodically fold the per-cpu
 428	 * times and feed samples into the running averages. If things
 429	 * are idle and there is no data to process, stop the clock.
 430	 * Once restarted, we'll catch up the running averages in one
 431	 * go - see calc_avgs() and missed_periods.
 432	 */
 433	if (now >= group->avg_next_update)
 434		group->avg_next_update = update_averages(group, now);
 435
 436	if (nonidle) {
 437		schedule_delayed_work(dwork, nsecs_to_jiffies(
 438				group->avg_next_update - now) + 1);
 439	}
 440
 441	mutex_unlock(&group->avgs_lock);
 442}
 443
 444/* Trigger tracking window manupulations */
 445static void window_reset(struct psi_window *win, u64 now, u64 value,
 446			 u64 prev_growth)
 447{
 448	win->start_time = now;
 449	win->start_value = value;
 450	win->prev_growth = prev_growth;
 451}
 452
 453/*
 454 * PSI growth tracking window update and growth calculation routine.
 455 *
 456 * This approximates a sliding tracking window by interpolating
 457 * partially elapsed windows using historical growth data from the
 458 * previous intervals. This minimizes memory requirements (by not storing
 459 * all the intermediate values in the previous window) and simplifies
 460 * the calculations. It works well because PSI signal changes only in
 461 * positive direction and over relatively small window sizes the growth
 462 * is close to linear.
 463 */
 464static u64 window_update(struct psi_window *win, u64 now, u64 value)
 465{
 466	u64 elapsed;
 467	u64 growth;
 468
 469	elapsed = now - win->start_time;
 470	growth = value - win->start_value;
 471	/*
 472	 * After each tracking window passes win->start_value and
 473	 * win->start_time get reset and win->prev_growth stores
 474	 * the average per-window growth of the previous window.
 475	 * win->prev_growth is then used to interpolate additional
 476	 * growth from the previous window assuming it was linear.
 477	 */
 478	if (elapsed > win->size)
 479		window_reset(win, now, value, growth);
 480	else {
 481		u32 remaining;
 482
 483		remaining = win->size - elapsed;
 484		growth += div64_u64(win->prev_growth * remaining, win->size);
 485	}
 486
 487	return growth;
 488}
 489
 490static void init_triggers(struct psi_group *group, u64 now)
 491{
 492	struct psi_trigger *t;
 493
 494	list_for_each_entry(t, &group->triggers, node)
 495		window_reset(&t->win, now,
 496				group->total[PSI_POLL][t->state], 0);
 497	memcpy(group->polling_total, group->total[PSI_POLL],
 498		   sizeof(group->polling_total));
 499	group->polling_next_update = now + group->poll_min_period;
 500}
 501
 502static u64 update_triggers(struct psi_group *group, u64 now)
 503{
 504	struct psi_trigger *t;
 505	bool new_stall = false;
 506	u64 *total = group->total[PSI_POLL];
 507
 508	/*
 509	 * On subsequent updates, calculate growth deltas and let
 510	 * watchers know when their specified thresholds are exceeded.
 511	 */
 512	list_for_each_entry(t, &group->triggers, node) {
 513		u64 growth;
 514
 515		/* Check for stall activity */
 516		if (group->polling_total[t->state] == total[t->state])
 517			continue;
 518
 519		/*
 520		 * Multiple triggers might be looking at the same state,
 521		 * remember to update group->polling_total[] once we've
 522		 * been through all of them. Also remember to extend the
 523		 * polling time if we see new stall activity.
 524		 */
 525		new_stall = true;
 526
 527		/* Calculate growth since last update */
 528		growth = window_update(&t->win, now, total[t->state]);
 529		if (growth < t->threshold)
 530			continue;
 531
 532		/* Limit event signaling to once per window */
 533		if (now < t->last_event_time + t->win.size)
 534			continue;
 535
 536		/* Generate an event */
 537		if (cmpxchg(&t->event, 0, 1) == 0)
 538			wake_up_interruptible(&t->event_wait);
 539		t->last_event_time = now;
 540	}
 541
 542	if (new_stall)
 543		memcpy(group->polling_total, total,
 544				sizeof(group->polling_total));
 545
 546	return now + group->poll_min_period;
 547}
 548
 549/* Schedule polling if it's not already scheduled. */
 
 
 
 
 
 550static void psi_schedule_poll_work(struct psi_group *group, unsigned long delay)
 551{
 552	struct task_struct *task;
 553
 554	/*
 555	 * Do not reschedule if already scheduled.
 556	 * Possible race with a timer scheduled after this check but before
 557	 * mod_timer below can be tolerated because group->polling_next_update
 558	 * will keep updates on schedule.
 559	 */
 560	if (timer_pending(&group->poll_timer))
 561		return;
 562
 563	rcu_read_lock();
 564
 565	task = rcu_dereference(group->poll_task);
 566	/*
 567	 * kworker might be NULL in case psi_trigger_destroy races with
 568	 * psi_task_change (hotpath) which can't use locks
 569	 */
 570	if (likely(task))
 571		mod_timer(&group->poll_timer, jiffies + delay);
 
 
 572
 573	rcu_read_unlock();
 574}
 575
 576static void psi_poll_work(struct psi_group *group)
 577{
 
 
 578	u32 changed_states;
 579	u64 now;
 580
 
 
 
 
 
 581	mutex_lock(&group->trigger_lock);
 582
 583	now = sched_clock();
 584
 585	collect_percpu_times(group, PSI_POLL, &changed_states);
 586
 587	if (changed_states & group->poll_states) {
 588		/* Initialize trigger windows when entering polling mode */
 589		if (now > group->polling_until)
 590			init_triggers(group, now);
 591
 592		/*
 593		 * Keep the monitor active for at least the duration of the
 594		 * minimum tracking window as long as monitor states are
 595		 * changing.
 596		 */
 597		group->polling_until = now +
 598			group->poll_min_period * UPDATES_PER_WINDOW;
 599	}
 600
 601	if (now > group->polling_until) {
 602		group->polling_next_update = ULLONG_MAX;
 603		goto out;
 604	}
 605
 606	if (now >= group->polling_next_update)
 607		group->polling_next_update = update_triggers(group, now);
 608
 609	psi_schedule_poll_work(group,
 610		nsecs_to_jiffies(group->polling_next_update - now) + 1);
 611
 612out:
 613	mutex_unlock(&group->trigger_lock);
 614}
 615
 616static int psi_poll_worker(void *data)
 617{
 618	struct psi_group *group = (struct psi_group *)data;
 619
 620	sched_set_fifo_low(current);
 621
 622	while (true) {
 623		wait_event_interruptible(group->poll_wait,
 624				atomic_cmpxchg(&group->poll_wakeup, 1, 0) ||
 625				kthread_should_stop());
 626		if (kthread_should_stop())
 627			break;
 628
 629		psi_poll_work(group);
 630	}
 631	return 0;
 632}
 633
 634static void poll_timer_fn(struct timer_list *t)
 635{
 636	struct psi_group *group = from_timer(group, t, poll_timer);
 637
 638	atomic_set(&group->poll_wakeup, 1);
 639	wake_up_interruptible(&group->poll_wait);
 640}
 641
 642static void record_times(struct psi_group_cpu *groupc, int cpu,
 643			 bool memstall_tick)
 644{
 645	u32 delta;
 646	u64 now;
 647
 648	now = cpu_clock(cpu);
 649	delta = now - groupc->state_start;
 650	groupc->state_start = now;
 651
 652	if (groupc->state_mask & (1 << PSI_IO_SOME)) {
 653		groupc->times[PSI_IO_SOME] += delta;
 654		if (groupc->state_mask & (1 << PSI_IO_FULL))
 655			groupc->times[PSI_IO_FULL] += delta;
 656	}
 657
 658	if (groupc->state_mask & (1 << PSI_MEM_SOME)) {
 659		groupc->times[PSI_MEM_SOME] += delta;
 660		if (groupc->state_mask & (1 << PSI_MEM_FULL))
 661			groupc->times[PSI_MEM_FULL] += delta;
 662		else if (memstall_tick) {
 663			u32 sample;
 664			/*
 665			 * Since we care about lost potential, a
 666			 * memstall is FULL when there are no other
 667			 * working tasks, but also when the CPU is
 668			 * actively reclaiming and nothing productive
 669			 * could run even if it were runnable.
 670			 *
 671			 * When the timer tick sees a reclaiming CPU,
 672			 * regardless of runnable tasks, sample a FULL
 673			 * tick (or less if it hasn't been a full tick
 674			 * since the last state change).
 675			 */
 676			sample = min(delta, (u32)jiffies_to_nsecs(1));
 677			groupc->times[PSI_MEM_FULL] += sample;
 678		}
 679	}
 680
 681	if (groupc->state_mask & (1 << PSI_CPU_SOME))
 682		groupc->times[PSI_CPU_SOME] += delta;
 683
 684	if (groupc->state_mask & (1 << PSI_NONIDLE))
 685		groupc->times[PSI_NONIDLE] += delta;
 686}
 687
 688static void psi_group_change(struct psi_group *group, int cpu,
 689			     unsigned int clear, unsigned int set,
 690			     bool wake_clock)
 691{
 692	struct psi_group_cpu *groupc;
 693	u32 state_mask = 0;
 694	unsigned int t, m;
 695	enum psi_states s;
 
 696
 697	groupc = per_cpu_ptr(group->pcpu, cpu);
 698
 699	/*
 700	 * First we assess the aggregate resource states this CPU's
 701	 * tasks have been in since the last change, and account any
 702	 * SOME and FULL time these may have resulted in.
 703	 *
 704	 * Then we update the task counts according to the state
 705	 * change requested through the @clear and @set bits.
 706	 */
 707	write_seqcount_begin(&groupc->seq);
 708
 709	record_times(groupc, cpu, false);
 710
 711	for (t = 0, m = clear; m; m &= ~(1 << t), t++) {
 712		if (!(m & (1 << t)))
 713			continue;
 714		if (groupc->tasks[t] == 0 && !psi_bug) {
 715			printk_deferred(KERN_ERR "psi: task underflow! cpu=%d t=%d tasks=[%u %u %u %u] clear=%x set=%x\n",
 716					cpu, t, groupc->tasks[0],
 717					groupc->tasks[1], groupc->tasks[2],
 718					groupc->tasks[3], clear, set);
 719			psi_bug = 1;
 720		}
 721		groupc->tasks[t]--;
 722	}
 723
 724	for (t = 0; set; set &= ~(1 << t), t++)
 725		if (set & (1 << t))
 726			groupc->tasks[t]++;
 727
 728	/* Calculate state mask representing active states */
 729	for (s = 0; s < NR_PSI_STATES; s++) {
 730		if (test_state(groupc->tasks, s))
 731			state_mask |= (1 << s);
 732	}
 733	groupc->state_mask = state_mask;
 734
 735	write_seqcount_end(&groupc->seq);
 736
 737	if (state_mask & group->poll_states)
 738		psi_schedule_poll_work(group, 1);
 739
 740	if (wake_clock && !delayed_work_pending(&group->avgs_work))
 741		schedule_delayed_work(&group->avgs_work, PSI_FREQ);
 742}
 743
 744static struct psi_group *iterate_groups(struct task_struct *task, void **iter)
 745{
 746#ifdef CONFIG_CGROUPS
 747	struct cgroup *cgroup = NULL;
 748
 749	if (!*iter)
 750		cgroup = task->cgroups->dfl_cgrp;
 751	else if (*iter == &psi_system)
 752		return NULL;
 753	else
 754		cgroup = cgroup_parent(*iter);
 755
 756	if (cgroup && cgroup_parent(cgroup)) {
 757		*iter = cgroup;
 758		return cgroup_psi(cgroup);
 759	}
 760#else
 761	if (*iter)
 762		return NULL;
 763#endif
 764	*iter = &psi_system;
 765	return &psi_system;
 766}
 767
 768static void psi_flags_change(struct task_struct *task, int clear, int set)
 769{
 
 
 
 
 
 
 
 
 770	if (((task->psi_flags & set) ||
 771	     (task->psi_flags & clear) != clear) &&
 772	    !psi_bug) {
 773		printk_deferred(KERN_ERR "psi: inconsistent task state! task=%d:%s cpu=%d psi_flags=%x clear=%x set=%x\n",
 774				task->pid, task->comm, task_cpu(task),
 775				task->psi_flags, clear, set);
 776		psi_bug = 1;
 777	}
 778
 779	task->psi_flags &= ~clear;
 780	task->psi_flags |= set;
 781}
 782
 783void psi_task_change(struct task_struct *task, int clear, int set)
 784{
 785	int cpu = task_cpu(task);
 786	struct psi_group *group;
 787	bool wake_clock = true;
 788	void *iter = NULL;
 789
 790	if (!task->pid)
 791		return;
 792
 793	psi_flags_change(task, clear, set);
 794
 795	/*
 796	 * Periodic aggregation shuts off if there is a period of no
 797	 * task changes, so we wake it back up if necessary. However,
 798	 * don't do this if the task change is the aggregation worker
 799	 * itself going to sleep, or we'll ping-pong forever.
 800	 */
 801	if (unlikely((clear & TSK_RUNNING) &&
 802		     (task->flags & PF_WQ_WORKER) &&
 803		     wq_worker_last_func(task) == psi_avgs_work))
 804		wake_clock = false;
 805
 806	while ((group = iterate_groups(task, &iter)))
 807		psi_group_change(group, cpu, clear, set, wake_clock);
 808}
 809
 810void psi_task_switch(struct task_struct *prev, struct task_struct *next,
 811		     bool sleep)
 812{
 813	struct psi_group *group, *common = NULL;
 814	int cpu = task_cpu(prev);
 815	void *iter;
 816
 817	if (next->pid) {
 818		psi_flags_change(next, 0, TSK_ONCPU);
 819		/*
 820		 * When moving state between tasks, the group that
 821		 * contains them both does not change: we can stop
 822		 * updating the tree once we reach the first common
 823		 * ancestor. Iterate @next's ancestors until we
 824		 * encounter @prev's state.
 825		 */
 826		iter = NULL;
 827		while ((group = iterate_groups(next, &iter))) {
 828			if (per_cpu_ptr(group->pcpu, cpu)->tasks[NR_ONCPU]) {
 829				common = group;
 830				break;
 831			}
 832
 833			psi_group_change(group, cpu, 0, TSK_ONCPU, true);
 834		}
 835	}
 836
 837	/*
 838	 * If this is a voluntary sleep, dequeue will have taken care
 839	 * of the outgoing TSK_ONCPU alongside TSK_RUNNING already. We
 840	 * only need to deal with it during preemption.
 841	 */
 842	if (sleep)
 843		return;
 844
 845	if (prev->pid) {
 846		psi_flags_change(prev, TSK_ONCPU, 0);
 847
 848		iter = NULL;
 849		while ((group = iterate_groups(prev, &iter)) && group != common)
 850			psi_group_change(group, cpu, TSK_ONCPU, 0, true);
 851	}
 852}
 853
 854void psi_memstall_tick(struct task_struct *task, int cpu)
 855{
 856	struct psi_group *group;
 857	void *iter = NULL;
 858
 859	while ((group = iterate_groups(task, &iter))) {
 860		struct psi_group_cpu *groupc;
 861
 862		groupc = per_cpu_ptr(group->pcpu, cpu);
 863		write_seqcount_begin(&groupc->seq);
 864		record_times(groupc, cpu, true);
 865		write_seqcount_end(&groupc->seq);
 866	}
 867}
 868
 869/**
 870 * psi_memstall_enter - mark the beginning of a memory stall section
 871 * @flags: flags to handle nested sections
 872 *
 873 * Marks the calling task as being stalled due to a lack of memory,
 874 * such as waiting for a refault or performing reclaim.
 875 */
 876void psi_memstall_enter(unsigned long *flags)
 877{
 878	struct rq_flags rf;
 879	struct rq *rq;
 880
 881	if (static_branch_likely(&psi_disabled))
 882		return;
 883
 884	*flags = current->in_memstall;
 885	if (*flags)
 886		return;
 887	/*
 888	 * in_memstall setting & accounting needs to be atomic wrt
 889	 * changes to the task's scheduling state, otherwise we can
 890	 * race with CPU migration.
 891	 */
 892	rq = this_rq_lock_irq(&rf);
 893
 894	current->in_memstall = 1;
 895	psi_task_change(current, 0, TSK_MEMSTALL);
 896
 897	rq_unlock_irq(rq, &rf);
 898}
 899
 900/**
 901 * psi_memstall_leave - mark the end of an memory stall section
 902 * @flags: flags to handle nested memdelay sections
 903 *
 904 * Marks the calling task as no longer stalled due to lack of memory.
 905 */
 906void psi_memstall_leave(unsigned long *flags)
 907{
 908	struct rq_flags rf;
 909	struct rq *rq;
 910
 911	if (static_branch_likely(&psi_disabled))
 912		return;
 913
 914	if (*flags)
 915		return;
 916	/*
 917	 * in_memstall clearing & accounting needs to be atomic wrt
 918	 * changes to the task's scheduling state, otherwise we could
 919	 * race with CPU migration.
 920	 */
 921	rq = this_rq_lock_irq(&rf);
 922
 923	current->in_memstall = 0;
 924	psi_task_change(current, TSK_MEMSTALL, 0);
 925
 926	rq_unlock_irq(rq, &rf);
 927}
 928
 929#ifdef CONFIG_CGROUPS
 930int psi_cgroup_alloc(struct cgroup *cgroup)
 931{
 932	if (static_branch_likely(&psi_disabled))
 933		return 0;
 934
 935	cgroup->psi.pcpu = alloc_percpu(struct psi_group_cpu);
 936	if (!cgroup->psi.pcpu)
 937		return -ENOMEM;
 938	group_init(&cgroup->psi);
 939	return 0;
 940}
 941
 942void psi_cgroup_free(struct cgroup *cgroup)
 943{
 944	if (static_branch_likely(&psi_disabled))
 945		return;
 946
 947	cancel_delayed_work_sync(&cgroup->psi.avgs_work);
 948	free_percpu(cgroup->psi.pcpu);
 949	/* All triggers must be removed by now */
 950	WARN_ONCE(cgroup->psi.poll_states, "psi: trigger leak\n");
 951}
 952
 953/**
 954 * cgroup_move_task - move task to a different cgroup
 955 * @task: the task
 956 * @to: the target css_set
 957 *
 958 * Move task to a new cgroup and safely migrate its associated stall
 959 * state between the different groups.
 960 *
 961 * This function acquires the task's rq lock to lock out concurrent
 962 * changes to the task's scheduling state and - in case the task is
 963 * running - concurrent changes to its stall state.
 964 */
 965void cgroup_move_task(struct task_struct *task, struct css_set *to)
 966{
 967	unsigned int task_flags = 0;
 968	struct rq_flags rf;
 969	struct rq *rq;
 970
 971	if (static_branch_likely(&psi_disabled)) {
 972		/*
 973		 * Lame to do this here, but the scheduler cannot be locked
 974		 * from the outside, so we move cgroups from inside sched/.
 975		 */
 976		rcu_assign_pointer(task->cgroups, to);
 977		return;
 978	}
 979
 980	rq = task_rq_lock(task, &rf);
 981
 982	if (task_on_rq_queued(task)) {
 983		task_flags = TSK_RUNNING;
 984		if (task_current(rq, task))
 985			task_flags |= TSK_ONCPU;
 986	} else if (task->in_iowait)
 987		task_flags = TSK_IOWAIT;
 988
 989	if (task->in_memstall)
 990		task_flags |= TSK_MEMSTALL;
 991
 992	if (task_flags)
 993		psi_task_change(task, task_flags, 0);
 994
 995	/* See comment above */
 996	rcu_assign_pointer(task->cgroups, to);
 997
 998	if (task_flags)
 999		psi_task_change(task, 0, task_flags);
1000
1001	task_rq_unlock(rq, task, &rf);
1002}
1003#endif /* CONFIG_CGROUPS */
1004
1005int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res)
1006{
1007	int full;
1008	u64 now;
1009
1010	if (static_branch_likely(&psi_disabled))
1011		return -EOPNOTSUPP;
1012
1013	/* Update averages before reporting them */
1014	mutex_lock(&group->avgs_lock);
1015	now = sched_clock();
1016	collect_percpu_times(group, PSI_AVGS, NULL);
1017	if (now >= group->avg_next_update)
1018		group->avg_next_update = update_averages(group, now);
1019	mutex_unlock(&group->avgs_lock);
1020
1021	for (full = 0; full < 2 - (res == PSI_CPU); full++) {
1022		unsigned long avg[3];
1023		u64 total;
1024		int w;
1025
1026		for (w = 0; w < 3; w++)
1027			avg[w] = group->avg[res * 2 + full][w];
1028		total = div_u64(group->total[PSI_AVGS][res * 2 + full],
1029				NSEC_PER_USEC);
1030
1031		seq_printf(m, "%s avg10=%lu.%02lu avg60=%lu.%02lu avg300=%lu.%02lu total=%llu\n",
1032			   full ? "full" : "some",
1033			   LOAD_INT(avg[0]), LOAD_FRAC(avg[0]),
1034			   LOAD_INT(avg[1]), LOAD_FRAC(avg[1]),
1035			   LOAD_INT(avg[2]), LOAD_FRAC(avg[2]),
1036			   total);
1037	}
1038
1039	return 0;
1040}
1041
1042static int psi_io_show(struct seq_file *m, void *v)
1043{
1044	return psi_show(m, &psi_system, PSI_IO);
1045}
1046
1047static int psi_memory_show(struct seq_file *m, void *v)
1048{
1049	return psi_show(m, &psi_system, PSI_MEM);
1050}
1051
1052static int psi_cpu_show(struct seq_file *m, void *v)
1053{
1054	return psi_show(m, &psi_system, PSI_CPU);
1055}
1056
1057static int psi_io_open(struct inode *inode, struct file *file)
1058{
1059	return single_open(file, psi_io_show, NULL);
1060}
1061
1062static int psi_memory_open(struct inode *inode, struct file *file)
1063{
1064	return single_open(file, psi_memory_show, NULL);
1065}
1066
1067static int psi_cpu_open(struct inode *inode, struct file *file)
1068{
1069	return single_open(file, psi_cpu_show, NULL);
1070}
1071
1072struct psi_trigger *psi_trigger_create(struct psi_group *group,
1073			char *buf, size_t nbytes, enum psi_res res)
1074{
1075	struct psi_trigger *t;
1076	enum psi_states state;
1077	u32 threshold_us;
1078	u32 window_us;
1079
1080	if (static_branch_likely(&psi_disabled))
1081		return ERR_PTR(-EOPNOTSUPP);
1082
1083	if (sscanf(buf, "some %u %u", &threshold_us, &window_us) == 2)
1084		state = PSI_IO_SOME + res * 2;
1085	else if (sscanf(buf, "full %u %u", &threshold_us, &window_us) == 2)
1086		state = PSI_IO_FULL + res * 2;
1087	else
1088		return ERR_PTR(-EINVAL);
1089
1090	if (state >= PSI_NONIDLE)
1091		return ERR_PTR(-EINVAL);
1092
1093	if (window_us < WINDOW_MIN_US ||
1094		window_us > WINDOW_MAX_US)
1095		return ERR_PTR(-EINVAL);
1096
1097	/* Check threshold */
1098	if (threshold_us == 0 || threshold_us > window_us)
1099		return ERR_PTR(-EINVAL);
1100
1101	t = kmalloc(sizeof(*t), GFP_KERNEL);
1102	if (!t)
1103		return ERR_PTR(-ENOMEM);
1104
1105	t->group = group;
1106	t->state = state;
1107	t->threshold = threshold_us * NSEC_PER_USEC;
1108	t->win.size = window_us * NSEC_PER_USEC;
1109	window_reset(&t->win, 0, 0, 0);
1110
1111	t->event = 0;
1112	t->last_event_time = 0;
1113	init_waitqueue_head(&t->event_wait);
1114	kref_init(&t->refcount);
1115
1116	mutex_lock(&group->trigger_lock);
1117
1118	if (!rcu_access_pointer(group->poll_task)) {
1119		struct task_struct *task;
 
 
 
1120
1121		task = kthread_create(psi_poll_worker, group, "psimon");
1122		if (IS_ERR(task)) {
1123			kfree(t);
1124			mutex_unlock(&group->trigger_lock);
1125			return ERR_CAST(task);
1126		}
1127		atomic_set(&group->poll_wakeup, 0);
1128		init_waitqueue_head(&group->poll_wait);
1129		wake_up_process(task);
1130		timer_setup(&group->poll_timer, poll_timer_fn, 0);
1131		rcu_assign_pointer(group->poll_task, task);
1132	}
1133
1134	list_add(&t->node, &group->triggers);
1135	group->poll_min_period = min(group->poll_min_period,
1136		div_u64(t->win.size, UPDATES_PER_WINDOW));
1137	group->nr_triggers[t->state]++;
1138	group->poll_states |= (1 << t->state);
1139
1140	mutex_unlock(&group->trigger_lock);
1141
1142	return t;
1143}
1144
1145static void psi_trigger_destroy(struct kref *ref)
1146{
1147	struct psi_trigger *t = container_of(ref, struct psi_trigger, refcount);
1148	struct psi_group *group = t->group;
1149	struct task_struct *task_to_destroy = NULL;
1150
1151	if (static_branch_likely(&psi_disabled))
1152		return;
1153
1154	/*
1155	 * Wakeup waiters to stop polling. Can happen if cgroup is deleted
1156	 * from under a polling process.
1157	 */
1158	wake_up_interruptible(&t->event_wait);
1159
1160	mutex_lock(&group->trigger_lock);
1161
1162	if (!list_empty(&t->node)) {
1163		struct psi_trigger *tmp;
1164		u64 period = ULLONG_MAX;
1165
1166		list_del(&t->node);
1167		group->nr_triggers[t->state]--;
1168		if (!group->nr_triggers[t->state])
1169			group->poll_states &= ~(1 << t->state);
1170		/* reset min update period for the remaining triggers */
1171		list_for_each_entry(tmp, &group->triggers, node)
1172			period = min(period, div_u64(tmp->win.size,
1173					UPDATES_PER_WINDOW));
1174		group->poll_min_period = period;
1175		/* Destroy poll_task when the last trigger is destroyed */
1176		if (group->poll_states == 0) {
1177			group->polling_until = 0;
1178			task_to_destroy = rcu_dereference_protected(
1179					group->poll_task,
1180					lockdep_is_held(&group->trigger_lock));
1181			rcu_assign_pointer(group->poll_task, NULL);
1182		}
1183	}
1184
1185	mutex_unlock(&group->trigger_lock);
1186
1187	/*
1188	 * Wait for both *trigger_ptr from psi_trigger_replace and
1189	 * poll_task RCUs to complete their read-side critical sections
1190	 * before destroying the trigger and optionally the poll_task
1191	 */
1192	synchronize_rcu();
1193	/*
1194	 * Destroy the kworker after releasing trigger_lock to prevent a
1195	 * deadlock while waiting for psi_poll_work to acquire trigger_lock
1196	 */
1197	if (task_to_destroy) {
1198		/*
1199		 * After the RCU grace period has expired, the worker
1200		 * can no longer be found through group->poll_task.
1201		 * But it might have been already scheduled before
1202		 * that - deschedule it cleanly before destroying it.
1203		 */
1204		del_timer_sync(&group->poll_timer);
1205		kthread_stop(task_to_destroy);
 
 
1206	}
1207	kfree(t);
1208}
1209
1210void psi_trigger_replace(void **trigger_ptr, struct psi_trigger *new)
1211{
1212	struct psi_trigger *old = *trigger_ptr;
1213
1214	if (static_branch_likely(&psi_disabled))
1215		return;
1216
1217	rcu_assign_pointer(*trigger_ptr, new);
1218	if (old)
1219		kref_put(&old->refcount, psi_trigger_destroy);
1220}
1221
1222__poll_t psi_trigger_poll(void **trigger_ptr,
1223				struct file *file, poll_table *wait)
1224{
1225	__poll_t ret = DEFAULT_POLLMASK;
1226	struct psi_trigger *t;
1227
1228	if (static_branch_likely(&psi_disabled))
1229		return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI;
1230
1231	rcu_read_lock();
1232
1233	t = rcu_dereference(*(void __rcu __force **)trigger_ptr);
1234	if (!t) {
1235		rcu_read_unlock();
1236		return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI;
1237	}
1238	kref_get(&t->refcount);
1239
1240	rcu_read_unlock();
1241
1242	poll_wait(file, &t->event_wait, wait);
1243
1244	if (cmpxchg(&t->event, 1, 0) == 1)
1245		ret |= EPOLLPRI;
1246
1247	kref_put(&t->refcount, psi_trigger_destroy);
1248
1249	return ret;
1250}
1251
1252static ssize_t psi_write(struct file *file, const char __user *user_buf,
1253			 size_t nbytes, enum psi_res res)
1254{
1255	char buf[32];
1256	size_t buf_size;
1257	struct seq_file *seq;
1258	struct psi_trigger *new;
1259
1260	if (static_branch_likely(&psi_disabled))
1261		return -EOPNOTSUPP;
1262
1263	if (!nbytes)
1264		return -EINVAL;
1265
1266	buf_size = min(nbytes, sizeof(buf));
1267	if (copy_from_user(buf, user_buf, buf_size))
1268		return -EFAULT;
1269
1270	buf[buf_size - 1] = '\0';
1271
1272	new = psi_trigger_create(&psi_system, buf, nbytes, res);
1273	if (IS_ERR(new))
1274		return PTR_ERR(new);
1275
1276	seq = file->private_data;
1277	/* Take seq->lock to protect seq->private from concurrent writes */
1278	mutex_lock(&seq->lock);
1279	psi_trigger_replace(&seq->private, new);
1280	mutex_unlock(&seq->lock);
1281
1282	return nbytes;
1283}
1284
1285static ssize_t psi_io_write(struct file *file, const char __user *user_buf,
1286			    size_t nbytes, loff_t *ppos)
1287{
1288	return psi_write(file, user_buf, nbytes, PSI_IO);
1289}
1290
1291static ssize_t psi_memory_write(struct file *file, const char __user *user_buf,
1292				size_t nbytes, loff_t *ppos)
1293{
1294	return psi_write(file, user_buf, nbytes, PSI_MEM);
1295}
1296
1297static ssize_t psi_cpu_write(struct file *file, const char __user *user_buf,
1298			     size_t nbytes, loff_t *ppos)
1299{
1300	return psi_write(file, user_buf, nbytes, PSI_CPU);
1301}
1302
1303static __poll_t psi_fop_poll(struct file *file, poll_table *wait)
1304{
1305	struct seq_file *seq = file->private_data;
1306
1307	return psi_trigger_poll(&seq->private, file, wait);
1308}
1309
1310static int psi_fop_release(struct inode *inode, struct file *file)
1311{
1312	struct seq_file *seq = file->private_data;
1313
1314	psi_trigger_replace(&seq->private, NULL);
1315	return single_release(inode, file);
1316}
1317
1318static const struct proc_ops psi_io_proc_ops = {
1319	.proc_open	= psi_io_open,
1320	.proc_read	= seq_read,
1321	.proc_lseek	= seq_lseek,
1322	.proc_write	= psi_io_write,
1323	.proc_poll	= psi_fop_poll,
1324	.proc_release	= psi_fop_release,
1325};
1326
1327static const struct proc_ops psi_memory_proc_ops = {
1328	.proc_open	= psi_memory_open,
1329	.proc_read	= seq_read,
1330	.proc_lseek	= seq_lseek,
1331	.proc_write	= psi_memory_write,
1332	.proc_poll	= psi_fop_poll,
1333	.proc_release	= psi_fop_release,
1334};
1335
1336static const struct proc_ops psi_cpu_proc_ops = {
1337	.proc_open	= psi_cpu_open,
1338	.proc_read	= seq_read,
1339	.proc_lseek	= seq_lseek,
1340	.proc_write	= psi_cpu_write,
1341	.proc_poll	= psi_fop_poll,
1342	.proc_release	= psi_fop_release,
1343};
1344
1345static int __init psi_proc_init(void)
1346{
1347	if (psi_enable) {
1348		proc_mkdir("pressure", NULL);
1349		proc_create("pressure/io", 0, NULL, &psi_io_proc_ops);
1350		proc_create("pressure/memory", 0, NULL, &psi_memory_proc_ops);
1351		proc_create("pressure/cpu", 0, NULL, &psi_cpu_proc_ops);
1352	}
1353	return 0;
1354}
1355module_init(psi_proc_init);