Linux Audio

Check our new training course

Loading...
v3.15
 
  1/*
  2 *  kernel/sched/cpupri.c
  3 *
  4 *  CPU priority management
  5 *
  6 *  Copyright (C) 2007-2008 Novell
  7 *
  8 *  Author: Gregory Haskins <ghaskins@novell.com>
  9 *
 10 *  This code tracks the priority of each CPU so that global migration
 11 *  decisions are easy to calculate.  Each CPU can be in a state as follows:
 12 *
 13 *                 (INVALID), IDLE, NORMAL, RT1, ... RT99
 14 *
 15 *  going from the lowest priority to the highest.  CPUs in the INVALID state
 16 *  are not eligible for routing.  The system maintains this state with
 17 *  a 2 dimensional bitmap (the first for priority class, the second for cpus
 18 *  in that class).  Therefore a typical application without affinity
 19 *  restrictions can find a suitable CPU with O(1) complexity (e.g. two bit
 20 *  searches).  For tasks with affinity restrictions, the algorithm has a
 21 *  worst case complexity of O(min(102, nr_domcpus)), though the scenario that
 22 *  yields the worst case search is fairly contrived.
 23 *
 24 *  This program is free software; you can redistribute it and/or
 25 *  modify it under the terms of the GNU General Public License
 26 *  as published by the Free Software Foundation; version 2
 27 *  of the License.
 28 */
 29
 30#include <linux/gfp.h>
 31#include <linux/sched.h>
 32#include <linux/sched/rt.h>
 33#include <linux/slab.h>
 34#include "cpupri.h"
 35
 36/* Convert between a 140 based task->prio, and our 102 based cpupri */
 
 
 
 
 
 
 
 
 
 37static int convert_prio(int prio)
 38{
 39	int cpupri;
 40
 41	if (prio == CPUPRI_INVALID)
 42		cpupri = CPUPRI_INVALID;
 43	else if (prio == MAX_PRIO)
 44		cpupri = CPUPRI_IDLE;
 45	else if (prio >= MAX_RT_PRIO)
 46		cpupri = CPUPRI_NORMAL;
 47	else
 48		cpupri = MAX_RT_PRIO - prio + 1;
 
 
 
 
 
 
 
 
 
 49
 50	return cpupri;
 51}
 52
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 53/**
 54 * cpupri_find - find the best (lowest-pri) CPU in the system
 55 * @cp: The cpupri context
 56 * @p: The task
 57 * @lowest_mask: A mask to fill in with selected CPUs (or NULL)
 
 
 58 *
 59 * Note: This function returns the recommended CPUs as calculated during the
 60 * current invocation.  By the time the call returns, the CPUs may have in
 61 * fact changed priorities any number of times.  While not ideal, it is not
 62 * an issue of correctness since the normal rebalancer logic will correct
 63 * any discrepancies created by racing against the uncertainty of the current
 64 * priority configuration.
 65 *
 66 * Return: (int)bool - CPUs were found
 67 */
 68int cpupri_find(struct cpupri *cp, struct task_struct *p,
 69		struct cpumask *lowest_mask)
 
 70{
 71	int idx = 0;
 72	int task_pri = convert_prio(p->prio);
 
 73
 74	BUG_ON(task_pri >= CPUPRI_NR_PRIORITIES);
 75
 76	for (idx = 0; idx < task_pri; idx++) {
 77		struct cpupri_vec *vec  = &cp->pri_to_cpu[idx];
 78		int skip = 0;
 79
 80		if (!atomic_read(&(vec)->count))
 81			skip = 1;
 82		/*
 83		 * When looking at the vector, we need to read the counter,
 84		 * do a memory barrier, then read the mask.
 85		 *
 86		 * Note: This is still all racey, but we can deal with it.
 87		 *  Ideally, we only want to look at masks that are set.
 88		 *
 89		 *  If a mask is not set, then the only thing wrong is that we
 90		 *  did a little more work than necessary.
 91		 *
 92		 *  If we read a zero count but the mask is set, because of the
 93		 *  memory barriers, that can only happen when the highest prio
 94		 *  task for a run queue has left the run queue, in which case,
 95		 *  it will be followed by a pull. If the task we are processing
 96		 *  fails to find a proper place to go, that pull request will
 97		 *  pull this task if the run queue is running at a lower
 98		 *  priority.
 99		 */
100		smp_rmb();
101
102		/* Need to do the rmb for every iteration */
103		if (skip)
104			continue;
105
106		if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids)
107			continue;
108
109		if (lowest_mask) {
110			cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask);
111
112			/*
113			 * We have to ensure that we have at least one bit
114			 * still set in the array, since the map could have
115			 * been concurrently emptied between the first and
116			 * second reads of vec->mask.  If we hit this
117			 * condition, simply act as though we never hit this
118			 * priority level and continue on.
119			 */
120			if (cpumask_any(lowest_mask) >= nr_cpu_ids)
121				continue;
122		}
123
 
 
 
 
 
 
 
124		return 1;
125	}
126
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
127	return 0;
128}
129
130/**
131 * cpupri_set - update the cpu priority setting
132 * @cp: The cpupri context
133 * @cpu: The target cpu
134 * @newpri: The priority (INVALID-RT99) to assign to this CPU
135 *
136 * Note: Assumes cpu_rq(cpu)->lock is locked
137 *
138 * Returns: (void)
139 */
140void cpupri_set(struct cpupri *cp, int cpu, int newpri)
141{
142	int *currpri = &cp->cpu_to_pri[cpu];
143	int oldpri = *currpri;
144	int do_mb = 0;
145
146	newpri = convert_prio(newpri);
147
148	BUG_ON(newpri >= CPUPRI_NR_PRIORITIES);
149
150	if (newpri == oldpri)
151		return;
152
153	/*
154	 * If the cpu was currently mapped to a different value, we
155	 * need to map it to the new value then remove the old value.
156	 * Note, we must add the new value first, otherwise we risk the
157	 * cpu being missed by the priority loop in cpupri_find.
158	 */
159	if (likely(newpri != CPUPRI_INVALID)) {
160		struct cpupri_vec *vec = &cp->pri_to_cpu[newpri];
161
162		cpumask_set_cpu(cpu, vec->mask);
163		/*
164		 * When adding a new vector, we update the mask first,
165		 * do a write memory barrier, and then update the count, to
166		 * make sure the vector is visible when count is set.
167		 */
168		smp_mb__before_atomic_inc();
169		atomic_inc(&(vec)->count);
170		do_mb = 1;
171	}
172	if (likely(oldpri != CPUPRI_INVALID)) {
173		struct cpupri_vec *vec  = &cp->pri_to_cpu[oldpri];
174
175		/*
176		 * Because the order of modification of the vec->count
177		 * is important, we must make sure that the update
178		 * of the new prio is seen before we decrement the
179		 * old prio. This makes sure that the loop sees
180		 * one or the other when we raise the priority of
181		 * the run queue. We don't care about when we lower the
182		 * priority, as that will trigger an rt pull anyway.
183		 *
184		 * We only need to do a memory barrier if we updated
185		 * the new priority vec.
186		 */
187		if (do_mb)
188			smp_mb__after_atomic_inc();
189
190		/*
191		 * When removing from the vector, we decrement the counter first
192		 * do a memory barrier and then clear the mask.
193		 */
194		atomic_dec(&(vec)->count);
195		smp_mb__after_atomic_inc();
196		cpumask_clear_cpu(cpu, vec->mask);
197	}
198
199	*currpri = newpri;
200}
201
202/**
203 * cpupri_init - initialize the cpupri structure
204 * @cp: The cpupri context
205 *
206 * Return: -ENOMEM on memory allocation failure.
207 */
208int cpupri_init(struct cpupri *cp)
209{
210	int i;
211
212	memset(cp, 0, sizeof(*cp));
213
214	for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) {
215		struct cpupri_vec *vec = &cp->pri_to_cpu[i];
216
217		atomic_set(&vec->count, 0);
218		if (!zalloc_cpumask_var(&vec->mask, GFP_KERNEL))
219			goto cleanup;
220	}
221
222	cp->cpu_to_pri = kcalloc(nr_cpu_ids, sizeof(int), GFP_KERNEL);
223	if (!cp->cpu_to_pri)
224		goto cleanup;
225
226	for_each_possible_cpu(i)
227		cp->cpu_to_pri[i] = CPUPRI_INVALID;
228
229	return 0;
230
231cleanup:
232	for (i--; i >= 0; i--)
233		free_cpumask_var(cp->pri_to_cpu[i].mask);
234	return -ENOMEM;
235}
236
237/**
238 * cpupri_cleanup - clean up the cpupri structure
239 * @cp: The cpupri context
240 */
241void cpupri_cleanup(struct cpupri *cp)
242{
243	int i;
244
245	kfree(cp->cpu_to_pri);
246	for (i = 0; i < CPUPRI_NR_PRIORITIES; i++)
247		free_cpumask_var(cp->pri_to_cpu[i].mask);
248}
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 *  kernel/sched/cpupri.c
  4 *
  5 *  CPU priority management
  6 *
  7 *  Copyright (C) 2007-2008 Novell
  8 *
  9 *  Author: Gregory Haskins <ghaskins@novell.com>
 10 *
 11 *  This code tracks the priority of each CPU so that global migration
 12 *  decisions are easy to calculate.  Each CPU can be in a state as follows:
 13 *
 14 *                 (INVALID), NORMAL, RT1, ... RT99, HIGHER
 15 *
 16 *  going from the lowest priority to the highest.  CPUs in the INVALID state
 17 *  are not eligible for routing.  The system maintains this state with
 18 *  a 2 dimensional bitmap (the first for priority class, the second for CPUs
 19 *  in that class).  Therefore a typical application without affinity
 20 *  restrictions can find a suitable CPU with O(1) complexity (e.g. two bit
 21 *  searches).  For tasks with affinity restrictions, the algorithm has a
 22 *  worst case complexity of O(min(101, nr_domcpus)), though the scenario that
 23 *  yields the worst case search is fairly contrived.
 
 
 
 
 
 24 */
 25
 26/*
 27 * p->rt_priority   p->prio   newpri   cpupri
 28 *
 29 *				  -1       -1 (CPUPRI_INVALID)
 30 *
 31 *				  99        0 (CPUPRI_NORMAL)
 32 *
 33 *		1        98       98        1
 34 *	      ...
 35 *	       49        50       50       49
 36 *	       50        49       49       50
 37 *	      ...
 38 *	       99         0        0       99
 39 *
 40 *				 100	  100 (CPUPRI_HIGHER)
 41 */
 42static int convert_prio(int prio)
 43{
 44	int cpupri;
 45
 46	switch (prio) {
 47	case CPUPRI_INVALID:
 48		cpupri = CPUPRI_INVALID;	/* -1 */
 49		break;
 50
 51	case 0 ... 98:
 52		cpupri = MAX_RT_PRIO-1 - prio;	/* 1 ... 99 */
 53		break;
 54
 55	case MAX_RT_PRIO-1:
 56		cpupri = CPUPRI_NORMAL;		/*  0 */
 57		break;
 58
 59	case MAX_RT_PRIO:
 60		cpupri = CPUPRI_HIGHER;		/* 100 */
 61		break;
 62	}
 63
 64	return cpupri;
 65}
 66
 67static inline int __cpupri_find(struct cpupri *cp, struct task_struct *p,
 68				struct cpumask *lowest_mask, int idx)
 69{
 70	struct cpupri_vec *vec  = &cp->pri_to_cpu[idx];
 71	int skip = 0;
 72
 73	if (!atomic_read(&(vec)->count))
 74		skip = 1;
 75	/*
 76	 * When looking at the vector, we need to read the counter,
 77	 * do a memory barrier, then read the mask.
 78	 *
 79	 * Note: This is still all racy, but we can deal with it.
 80	 *  Ideally, we only want to look at masks that are set.
 81	 *
 82	 *  If a mask is not set, then the only thing wrong is that we
 83	 *  did a little more work than necessary.
 84	 *
 85	 *  If we read a zero count but the mask is set, because of the
 86	 *  memory barriers, that can only happen when the highest prio
 87	 *  task for a run queue has left the run queue, in which case,
 88	 *  it will be followed by a pull. If the task we are processing
 89	 *  fails to find a proper place to go, that pull request will
 90	 *  pull this task if the run queue is running at a lower
 91	 *  priority.
 92	 */
 93	smp_rmb();
 94
 95	/* Need to do the rmb for every iteration */
 96	if (skip)
 97		return 0;
 98
 99	if (cpumask_any_and(&p->cpus_mask, vec->mask) >= nr_cpu_ids)
100		return 0;
101
102	if (lowest_mask) {
103		cpumask_and(lowest_mask, &p->cpus_mask, vec->mask);
104
105		/*
106		 * We have to ensure that we have at least one bit
107		 * still set in the array, since the map could have
108		 * been concurrently emptied between the first and
109		 * second reads of vec->mask.  If we hit this
110		 * condition, simply act as though we never hit this
111		 * priority level and continue on.
112		 */
113		if (cpumask_empty(lowest_mask))
114			return 0;
115	}
116
117	return 1;
118}
119
120int cpupri_find(struct cpupri *cp, struct task_struct *p,
121		struct cpumask *lowest_mask)
122{
123	return cpupri_find_fitness(cp, p, lowest_mask, NULL);
124}
125
126/**
127 * cpupri_find_fitness - find the best (lowest-pri) CPU in the system
128 * @cp: The cpupri context
129 * @p: The task
130 * @lowest_mask: A mask to fill in with selected CPUs (or NULL)
131 * @fitness_fn: A pointer to a function to do custom checks whether the CPU
132 *              fits a specific criteria so that we only return those CPUs.
133 *
134 * Note: This function returns the recommended CPUs as calculated during the
135 * current invocation.  By the time the call returns, the CPUs may have in
136 * fact changed priorities any number of times.  While not ideal, it is not
137 * an issue of correctness since the normal rebalancer logic will correct
138 * any discrepancies created by racing against the uncertainty of the current
139 * priority configuration.
140 *
141 * Return: (int)bool - CPUs were found
142 */
143int cpupri_find_fitness(struct cpupri *cp, struct task_struct *p,
144		struct cpumask *lowest_mask,
145		bool (*fitness_fn)(struct task_struct *p, int cpu))
146{
 
147	int task_pri = convert_prio(p->prio);
148	int idx, cpu;
149
150	WARN_ON_ONCE(task_pri >= CPUPRI_NR_PRIORITIES);
151
152	for (idx = 0; idx < task_pri; idx++) {
 
 
153
154		if (!__cpupri_find(cp, p, lowest_mask, idx))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
155			continue;
156
157		if (!lowest_mask || !fitness_fn)
158			return 1;
159
160		/* Ensure the capacity of the CPUs fit the task */
161		for_each_cpu(cpu, lowest_mask) {
162			if (!fitness_fn(p, cpu))
163				cpumask_clear_cpu(cpu, lowest_mask);
 
 
 
 
 
 
164		}
165
166		/*
167		 * If no CPU at the current priority can fit the task
168		 * continue looking
169		 */
170		if (cpumask_empty(lowest_mask))
171			continue;
172
173		return 1;
174	}
175
176	/*
177	 * If we failed to find a fitting lowest_mask, kick off a new search
178	 * but without taking into account any fitness criteria this time.
179	 *
180	 * This rule favours honouring priority over fitting the task in the
181	 * correct CPU (Capacity Awareness being the only user now).
182	 * The idea is that if a higher priority task can run, then it should
183	 * run even if this ends up being on unfitting CPU.
184	 *
185	 * The cost of this trade-off is not entirely clear and will probably
186	 * be good for some workloads and bad for others.
187	 *
188	 * The main idea here is that if some CPUs were over-committed, we try
189	 * to spread which is what the scheduler traditionally did. Sys admins
190	 * must do proper RT planning to avoid overloading the system if they
191	 * really care.
192	 */
193	if (fitness_fn)
194		return cpupri_find(cp, p, lowest_mask);
195
196	return 0;
197}
198
199/**
200 * cpupri_set - update the CPU priority setting
201 * @cp: The cpupri context
202 * @cpu: The target CPU
203 * @newpri: The priority (INVALID,NORMAL,RT1-RT99,HIGHER) to assign to this CPU
204 *
205 * Note: Assumes cpu_rq(cpu)->lock is locked
206 *
207 * Returns: (void)
208 */
209void cpupri_set(struct cpupri *cp, int cpu, int newpri)
210{
211	int *currpri = &cp->cpu_to_pri[cpu];
212	int oldpri = *currpri;
213	int do_mb = 0;
214
215	newpri = convert_prio(newpri);
216
217	BUG_ON(newpri >= CPUPRI_NR_PRIORITIES);
218
219	if (newpri == oldpri)
220		return;
221
222	/*
223	 * If the CPU was currently mapped to a different value, we
224	 * need to map it to the new value then remove the old value.
225	 * Note, we must add the new value first, otherwise we risk the
226	 * cpu being missed by the priority loop in cpupri_find.
227	 */
228	if (likely(newpri != CPUPRI_INVALID)) {
229		struct cpupri_vec *vec = &cp->pri_to_cpu[newpri];
230
231		cpumask_set_cpu(cpu, vec->mask);
232		/*
233		 * When adding a new vector, we update the mask first,
234		 * do a write memory barrier, and then update the count, to
235		 * make sure the vector is visible when count is set.
236		 */
237		smp_mb__before_atomic();
238		atomic_inc(&(vec)->count);
239		do_mb = 1;
240	}
241	if (likely(oldpri != CPUPRI_INVALID)) {
242		struct cpupri_vec *vec  = &cp->pri_to_cpu[oldpri];
243
244		/*
245		 * Because the order of modification of the vec->count
246		 * is important, we must make sure that the update
247		 * of the new prio is seen before we decrement the
248		 * old prio. This makes sure that the loop sees
249		 * one or the other when we raise the priority of
250		 * the run queue. We don't care about when we lower the
251		 * priority, as that will trigger an rt pull anyway.
252		 *
253		 * We only need to do a memory barrier if we updated
254		 * the new priority vec.
255		 */
256		if (do_mb)
257			smp_mb__after_atomic();
258
259		/*
260		 * When removing from the vector, we decrement the counter first
261		 * do a memory barrier and then clear the mask.
262		 */
263		atomic_dec(&(vec)->count);
264		smp_mb__after_atomic();
265		cpumask_clear_cpu(cpu, vec->mask);
266	}
267
268	*currpri = newpri;
269}
270
271/**
272 * cpupri_init - initialize the cpupri structure
273 * @cp: The cpupri context
274 *
275 * Return: -ENOMEM on memory allocation failure.
276 */
277int cpupri_init(struct cpupri *cp)
278{
279	int i;
 
 
280
281	for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) {
282		struct cpupri_vec *vec = &cp->pri_to_cpu[i];
283
284		atomic_set(&vec->count, 0);
285		if (!zalloc_cpumask_var(&vec->mask, GFP_KERNEL))
286			goto cleanup;
287	}
288
289	cp->cpu_to_pri = kcalloc(nr_cpu_ids, sizeof(int), GFP_KERNEL);
290	if (!cp->cpu_to_pri)
291		goto cleanup;
292
293	for_each_possible_cpu(i)
294		cp->cpu_to_pri[i] = CPUPRI_INVALID;
295
296	return 0;
297
298cleanup:
299	for (i--; i >= 0; i--)
300		free_cpumask_var(cp->pri_to_cpu[i].mask);
301	return -ENOMEM;
302}
303
304/**
305 * cpupri_cleanup - clean up the cpupri structure
306 * @cp: The cpupri context
307 */
308void cpupri_cleanup(struct cpupri *cp)
309{
310	int i;
311
312	kfree(cp->cpu_to_pri);
313	for (i = 0; i < CPUPRI_NR_PRIORITIES; i++)
314		free_cpumask_var(cp->pri_to_cpu[i].mask);
315}