Linux Audio

Check our new training course

Loading...
v4.17
 
  1/*
  2 *  kernel/sched/cpupri.c
  3 *
  4 *  CPU priority management
  5 *
  6 *  Copyright (C) 2007-2008 Novell
  7 *
  8 *  Author: Gregory Haskins <ghaskins@novell.com>
  9 *
 10 *  This code tracks the priority of each CPU so that global migration
 11 *  decisions are easy to calculate.  Each CPU can be in a state as follows:
 12 *
 13 *                 (INVALID), IDLE, NORMAL, RT1, ... RT99
 14 *
 15 *  going from the lowest priority to the highest.  CPUs in the INVALID state
 16 *  are not eligible for routing.  The system maintains this state with
 17 *  a 2 dimensional bitmap (the first for priority class, the second for CPUs
 18 *  in that class).  Therefore a typical application without affinity
 19 *  restrictions can find a suitable CPU with O(1) complexity (e.g. two bit
 20 *  searches).  For tasks with affinity restrictions, the algorithm has a
 21 *  worst case complexity of O(min(102, nr_domcpus)), though the scenario that
 22 *  yields the worst case search is fairly contrived.
 23 *
 24 *  This program is free software; you can redistribute it and/or
 25 *  modify it under the terms of the GNU General Public License
 26 *  as published by the Free Software Foundation; version 2
 27 *  of the License.
 28 */
 29#include "sched.h"
 30
 31/* Convert between a 140 based task->prio, and our 102 based cpupri */
 32static int convert_prio(int prio)
 33{
 34	int cpupri;
 35
 36	if (prio == CPUPRI_INVALID)
 37		cpupri = CPUPRI_INVALID;
 38	else if (prio == MAX_PRIO)
 39		cpupri = CPUPRI_IDLE;
 40	else if (prio >= MAX_RT_PRIO)
 41		cpupri = CPUPRI_NORMAL;
 42	else
 43		cpupri = MAX_RT_PRIO - prio + 1;
 44
 45	return cpupri;
 46}
 47
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 48/**
 49 * cpupri_find - find the best (lowest-pri) CPU in the system
 50 * @cp: The cpupri context
 51 * @p: The task
 52 * @lowest_mask: A mask to fill in with selected CPUs (or NULL)
 
 
 53 *
 54 * Note: This function returns the recommended CPUs as calculated during the
 55 * current invocation.  By the time the call returns, the CPUs may have in
 56 * fact changed priorities any number of times.  While not ideal, it is not
 57 * an issue of correctness since the normal rebalancer logic will correct
 58 * any discrepancies created by racing against the uncertainty of the current
 59 * priority configuration.
 60 *
 61 * Return: (int)bool - CPUs were found
 62 */
 63int cpupri_find(struct cpupri *cp, struct task_struct *p,
 64		struct cpumask *lowest_mask)
 
 65{
 66	int idx = 0;
 67	int task_pri = convert_prio(p->prio);
 
 68
 69	BUG_ON(task_pri >= CPUPRI_NR_PRIORITIES);
 70
 71	for (idx = 0; idx < task_pri; idx++) {
 72		struct cpupri_vec *vec  = &cp->pri_to_cpu[idx];
 73		int skip = 0;
 74
 75		if (!atomic_read(&(vec)->count))
 76			skip = 1;
 77		/*
 78		 * When looking at the vector, we need to read the counter,
 79		 * do a memory barrier, then read the mask.
 80		 *
 81		 * Note: This is still all racey, but we can deal with it.
 82		 *  Ideally, we only want to look at masks that are set.
 83		 *
 84		 *  If a mask is not set, then the only thing wrong is that we
 85		 *  did a little more work than necessary.
 86		 *
 87		 *  If we read a zero count but the mask is set, because of the
 88		 *  memory barriers, that can only happen when the highest prio
 89		 *  task for a run queue has left the run queue, in which case,
 90		 *  it will be followed by a pull. If the task we are processing
 91		 *  fails to find a proper place to go, that pull request will
 92		 *  pull this task if the run queue is running at a lower
 93		 *  priority.
 94		 */
 95		smp_rmb();
 96
 97		/* Need to do the rmb for every iteration */
 98		if (skip)
 99			continue;
100
101		if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids)
102			continue;
103
104		if (lowest_mask) {
105			cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask);
106
107			/*
108			 * We have to ensure that we have at least one bit
109			 * still set in the array, since the map could have
110			 * been concurrently emptied between the first and
111			 * second reads of vec->mask.  If we hit this
112			 * condition, simply act as though we never hit this
113			 * priority level and continue on.
114			 */
115			if (cpumask_any(lowest_mask) >= nr_cpu_ids)
116				continue;
117		}
118
 
 
 
 
 
 
 
119		return 1;
120	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
121
122	return 0;
123}
124
125/**
126 * cpupri_set - update the CPU priority setting
127 * @cp: The cpupri context
128 * @cpu: The target CPU
129 * @newpri: The priority (INVALID-RT99) to assign to this CPU
130 *
131 * Note: Assumes cpu_rq(cpu)->lock is locked
132 *
133 * Returns: (void)
134 */
135void cpupri_set(struct cpupri *cp, int cpu, int newpri)
136{
137	int *currpri = &cp->cpu_to_pri[cpu];
138	int oldpri = *currpri;
139	int do_mb = 0;
140
141	newpri = convert_prio(newpri);
142
143	BUG_ON(newpri >= CPUPRI_NR_PRIORITIES);
144
145	if (newpri == oldpri)
146		return;
147
148	/*
149	 * If the CPU was currently mapped to a different value, we
150	 * need to map it to the new value then remove the old value.
151	 * Note, we must add the new value first, otherwise we risk the
152	 * cpu being missed by the priority loop in cpupri_find.
153	 */
154	if (likely(newpri != CPUPRI_INVALID)) {
155		struct cpupri_vec *vec = &cp->pri_to_cpu[newpri];
156
157		cpumask_set_cpu(cpu, vec->mask);
158		/*
159		 * When adding a new vector, we update the mask first,
160		 * do a write memory barrier, and then update the count, to
161		 * make sure the vector is visible when count is set.
162		 */
163		smp_mb__before_atomic();
164		atomic_inc(&(vec)->count);
165		do_mb = 1;
166	}
167	if (likely(oldpri != CPUPRI_INVALID)) {
168		struct cpupri_vec *vec  = &cp->pri_to_cpu[oldpri];
169
170		/*
171		 * Because the order of modification of the vec->count
172		 * is important, we must make sure that the update
173		 * of the new prio is seen before we decrement the
174		 * old prio. This makes sure that the loop sees
175		 * one or the other when we raise the priority of
176		 * the run queue. We don't care about when we lower the
177		 * priority, as that will trigger an rt pull anyway.
178		 *
179		 * We only need to do a memory barrier if we updated
180		 * the new priority vec.
181		 */
182		if (do_mb)
183			smp_mb__after_atomic();
184
185		/*
186		 * When removing from the vector, we decrement the counter first
187		 * do a memory barrier and then clear the mask.
188		 */
189		atomic_dec(&(vec)->count);
190		smp_mb__after_atomic();
191		cpumask_clear_cpu(cpu, vec->mask);
192	}
193
194	*currpri = newpri;
195}
196
197/**
198 * cpupri_init - initialize the cpupri structure
199 * @cp: The cpupri context
200 *
201 * Return: -ENOMEM on memory allocation failure.
202 */
203int cpupri_init(struct cpupri *cp)
204{
205	int i;
206
207	for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) {
208		struct cpupri_vec *vec = &cp->pri_to_cpu[i];
209
210		atomic_set(&vec->count, 0);
211		if (!zalloc_cpumask_var(&vec->mask, GFP_KERNEL))
212			goto cleanup;
213	}
214
215	cp->cpu_to_pri = kcalloc(nr_cpu_ids, sizeof(int), GFP_KERNEL);
216	if (!cp->cpu_to_pri)
217		goto cleanup;
218
219	for_each_possible_cpu(i)
220		cp->cpu_to_pri[i] = CPUPRI_INVALID;
221
222	return 0;
223
224cleanup:
225	for (i--; i >= 0; i--)
226		free_cpumask_var(cp->pri_to_cpu[i].mask);
227	return -ENOMEM;
228}
229
230/**
231 * cpupri_cleanup - clean up the cpupri structure
232 * @cp: The cpupri context
233 */
234void cpupri_cleanup(struct cpupri *cp)
235{
236	int i;
237
238	kfree(cp->cpu_to_pri);
239	for (i = 0; i < CPUPRI_NR_PRIORITIES; i++)
240		free_cpumask_var(cp->pri_to_cpu[i].mask);
241}
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 *  kernel/sched/cpupri.c
  4 *
  5 *  CPU priority management
  6 *
  7 *  Copyright (C) 2007-2008 Novell
  8 *
  9 *  Author: Gregory Haskins <ghaskins@novell.com>
 10 *
 11 *  This code tracks the priority of each CPU so that global migration
 12 *  decisions are easy to calculate.  Each CPU can be in a state as follows:
 13 *
 14 *                 (INVALID), IDLE, NORMAL, RT1, ... RT99
 15 *
 16 *  going from the lowest priority to the highest.  CPUs in the INVALID state
 17 *  are not eligible for routing.  The system maintains this state with
 18 *  a 2 dimensional bitmap (the first for priority class, the second for CPUs
 19 *  in that class).  Therefore a typical application without affinity
 20 *  restrictions can find a suitable CPU with O(1) complexity (e.g. two bit
 21 *  searches).  For tasks with affinity restrictions, the algorithm has a
 22 *  worst case complexity of O(min(102, nr_domcpus)), though the scenario that
 23 *  yields the worst case search is fairly contrived.
 
 
 
 
 
 24 */
 25#include "sched.h"
 26
 27/* Convert between a 140 based task->prio, and our 102 based cpupri */
 28static int convert_prio(int prio)
 29{
 30	int cpupri;
 31
 32	if (prio == CPUPRI_INVALID)
 33		cpupri = CPUPRI_INVALID;
 34	else if (prio == MAX_PRIO)
 35		cpupri = CPUPRI_IDLE;
 36	else if (prio >= MAX_RT_PRIO)
 37		cpupri = CPUPRI_NORMAL;
 38	else
 39		cpupri = MAX_RT_PRIO - prio + 1;
 40
 41	return cpupri;
 42}
 43
 44static inline int __cpupri_find(struct cpupri *cp, struct task_struct *p,
 45				struct cpumask *lowest_mask, int idx)
 46{
 47	struct cpupri_vec *vec  = &cp->pri_to_cpu[idx];
 48	int skip = 0;
 49
 50	if (!atomic_read(&(vec)->count))
 51		skip = 1;
 52	/*
 53	 * When looking at the vector, we need to read the counter,
 54	 * do a memory barrier, then read the mask.
 55	 *
 56	 * Note: This is still all racey, but we can deal with it.
 57	 *  Ideally, we only want to look at masks that are set.
 58	 *
 59	 *  If a mask is not set, then the only thing wrong is that we
 60	 *  did a little more work than necessary.
 61	 *
 62	 *  If we read a zero count but the mask is set, because of the
 63	 *  memory barriers, that can only happen when the highest prio
 64	 *  task for a run queue has left the run queue, in which case,
 65	 *  it will be followed by a pull. If the task we are processing
 66	 *  fails to find a proper place to go, that pull request will
 67	 *  pull this task if the run queue is running at a lower
 68	 *  priority.
 69	 */
 70	smp_rmb();
 71
 72	/* Need to do the rmb for every iteration */
 73	if (skip)
 74		return 0;
 75
 76	if (cpumask_any_and(p->cpus_ptr, vec->mask) >= nr_cpu_ids)
 77		return 0;
 78
 79	if (lowest_mask) {
 80		cpumask_and(lowest_mask, p->cpus_ptr, vec->mask);
 81
 82		/*
 83		 * We have to ensure that we have at least one bit
 84		 * still set in the array, since the map could have
 85		 * been concurrently emptied between the first and
 86		 * second reads of vec->mask.  If we hit this
 87		 * condition, simply act as though we never hit this
 88		 * priority level and continue on.
 89		 */
 90		if (cpumask_empty(lowest_mask))
 91			return 0;
 92	}
 93
 94	return 1;
 95}
 96
 97int cpupri_find(struct cpupri *cp, struct task_struct *p,
 98		struct cpumask *lowest_mask)
 99{
100	return cpupri_find_fitness(cp, p, lowest_mask, NULL);
101}
102
103/**
104 * cpupri_find_fitness - find the best (lowest-pri) CPU in the system
105 * @cp: The cpupri context
106 * @p: The task
107 * @lowest_mask: A mask to fill in with selected CPUs (or NULL)
108 * @fitness_fn: A pointer to a function to do custom checks whether the CPU
109 *              fits a specific criteria so that we only return those CPUs.
110 *
111 * Note: This function returns the recommended CPUs as calculated during the
112 * current invocation.  By the time the call returns, the CPUs may have in
113 * fact changed priorities any number of times.  While not ideal, it is not
114 * an issue of correctness since the normal rebalancer logic will correct
115 * any discrepancies created by racing against the uncertainty of the current
116 * priority configuration.
117 *
118 * Return: (int)bool - CPUs were found
119 */
120int cpupri_find_fitness(struct cpupri *cp, struct task_struct *p,
121		struct cpumask *lowest_mask,
122		bool (*fitness_fn)(struct task_struct *p, int cpu))
123{
 
124	int task_pri = convert_prio(p->prio);
125	int idx, cpu;
126
127	BUG_ON(task_pri >= CPUPRI_NR_PRIORITIES);
128
129	for (idx = 0; idx < task_pri; idx++) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
130
131		if (!__cpupri_find(cp, p, lowest_mask, idx))
 
132			continue;
133
134		if (!lowest_mask || !fitness_fn)
135			return 1;
 
 
 
136
137		/* Ensure the capacity of the CPUs fit the task */
138		for_each_cpu(cpu, lowest_mask) {
139			if (!fitness_fn(p, cpu))
140				cpumask_clear_cpu(cpu, lowest_mask);
 
 
 
 
 
 
141		}
142
143		/*
144		 * If no CPU at the current priority can fit the task
145		 * continue looking
146		 */
147		if (cpumask_empty(lowest_mask))
148			continue;
149
150		return 1;
151	}
152
153	/*
154	 * If we failed to find a fitting lowest_mask, kick off a new search
155	 * but without taking into account any fitness criteria this time.
156	 *
157	 * This rule favours honouring priority over fitting the task in the
158	 * correct CPU (Capacity Awareness being the only user now).
159	 * The idea is that if a higher priority task can run, then it should
160	 * run even if this ends up being on unfitting CPU.
161	 *
162	 * The cost of this trade-off is not entirely clear and will probably
163	 * be good for some workloads and bad for others.
164	 *
165	 * The main idea here is that if some CPUs were overcommitted, we try
166	 * to spread which is what the scheduler traditionally did. Sys admins
167	 * must do proper RT planning to avoid overloading the system if they
168	 * really care.
169	 */
170	if (fitness_fn)
171		return cpupri_find(cp, p, lowest_mask);
172
173	return 0;
174}
175
176/**
177 * cpupri_set - update the CPU priority setting
178 * @cp: The cpupri context
179 * @cpu: The target CPU
180 * @newpri: The priority (INVALID-RT99) to assign to this CPU
181 *
182 * Note: Assumes cpu_rq(cpu)->lock is locked
183 *
184 * Returns: (void)
185 */
186void cpupri_set(struct cpupri *cp, int cpu, int newpri)
187{
188	int *currpri = &cp->cpu_to_pri[cpu];
189	int oldpri = *currpri;
190	int do_mb = 0;
191
192	newpri = convert_prio(newpri);
193
194	BUG_ON(newpri >= CPUPRI_NR_PRIORITIES);
195
196	if (newpri == oldpri)
197		return;
198
199	/*
200	 * If the CPU was currently mapped to a different value, we
201	 * need to map it to the new value then remove the old value.
202	 * Note, we must add the new value first, otherwise we risk the
203	 * cpu being missed by the priority loop in cpupri_find.
204	 */
205	if (likely(newpri != CPUPRI_INVALID)) {
206		struct cpupri_vec *vec = &cp->pri_to_cpu[newpri];
207
208		cpumask_set_cpu(cpu, vec->mask);
209		/*
210		 * When adding a new vector, we update the mask first,
211		 * do a write memory barrier, and then update the count, to
212		 * make sure the vector is visible when count is set.
213		 */
214		smp_mb__before_atomic();
215		atomic_inc(&(vec)->count);
216		do_mb = 1;
217	}
218	if (likely(oldpri != CPUPRI_INVALID)) {
219		struct cpupri_vec *vec  = &cp->pri_to_cpu[oldpri];
220
221		/*
222		 * Because the order of modification of the vec->count
223		 * is important, we must make sure that the update
224		 * of the new prio is seen before we decrement the
225		 * old prio. This makes sure that the loop sees
226		 * one or the other when we raise the priority of
227		 * the run queue. We don't care about when we lower the
228		 * priority, as that will trigger an rt pull anyway.
229		 *
230		 * We only need to do a memory barrier if we updated
231		 * the new priority vec.
232		 */
233		if (do_mb)
234			smp_mb__after_atomic();
235
236		/*
237		 * When removing from the vector, we decrement the counter first
238		 * do a memory barrier and then clear the mask.
239		 */
240		atomic_dec(&(vec)->count);
241		smp_mb__after_atomic();
242		cpumask_clear_cpu(cpu, vec->mask);
243	}
244
245	*currpri = newpri;
246}
247
248/**
249 * cpupri_init - initialize the cpupri structure
250 * @cp: The cpupri context
251 *
252 * Return: -ENOMEM on memory allocation failure.
253 */
254int cpupri_init(struct cpupri *cp)
255{
256	int i;
257
258	for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) {
259		struct cpupri_vec *vec = &cp->pri_to_cpu[i];
260
261		atomic_set(&vec->count, 0);
262		if (!zalloc_cpumask_var(&vec->mask, GFP_KERNEL))
263			goto cleanup;
264	}
265
266	cp->cpu_to_pri = kcalloc(nr_cpu_ids, sizeof(int), GFP_KERNEL);
267	if (!cp->cpu_to_pri)
268		goto cleanup;
269
270	for_each_possible_cpu(i)
271		cp->cpu_to_pri[i] = CPUPRI_INVALID;
272
273	return 0;
274
275cleanup:
276	for (i--; i >= 0; i--)
277		free_cpumask_var(cp->pri_to_cpu[i].mask);
278	return -ENOMEM;
279}
280
281/**
282 * cpupri_cleanup - clean up the cpupri structure
283 * @cp: The cpupri context
284 */
285void cpupri_cleanup(struct cpupri *cp)
286{
287	int i;
288
289	kfree(cp->cpu_to_pri);
290	for (i = 0; i < CPUPRI_NR_PRIORITIES; i++)
291		free_cpumask_var(cp->pri_to_cpu[i].mask);
292}