Linux Audio

Check our new training course

Loading...
v4.10.11
  1/*
  2 *  kernel/sched/cpupri.c
  3 *
  4 *  CPU priority management
  5 *
  6 *  Copyright (C) 2007-2008 Novell
  7 *
  8 *  Author: Gregory Haskins <ghaskins@novell.com>
  9 *
 10 *  This code tracks the priority of each CPU so that global migration
 11 *  decisions are easy to calculate.  Each CPU can be in a state as follows:
 12 *
 13 *                 (INVALID), IDLE, NORMAL, RT1, ... RT99
 14 *
 15 *  going from the lowest priority to the highest.  CPUs in the INVALID state
 16 *  are not eligible for routing.  The system maintains this state with
 17 *  a 2 dimensional bitmap (the first for priority class, the second for cpus
 18 *  in that class).  Therefore a typical application without affinity
 19 *  restrictions can find a suitable CPU with O(1) complexity (e.g. two bit
 20 *  searches).  For tasks with affinity restrictions, the algorithm has a
 21 *  worst case complexity of O(min(102, nr_domcpus)), though the scenario that
 22 *  yields the worst case search is fairly contrived.
 23 *
 24 *  This program is free software; you can redistribute it and/or
 25 *  modify it under the terms of the GNU General Public License
 26 *  as published by the Free Software Foundation; version 2
 27 *  of the License.
 28 */
 29
 30#include <linux/gfp.h>
 31#include <linux/sched.h>
 32#include <linux/sched/rt.h>
 33#include <linux/slab.h>
 34#include "cpupri.h"
 35
 36/* Convert between a 140 based task->prio, and our 102 based cpupri */
 37static int convert_prio(int prio)
 38{
 39	int cpupri;
 40
 41	if (prio == CPUPRI_INVALID)
 42		cpupri = CPUPRI_INVALID;
 43	else if (prio == MAX_PRIO)
 44		cpupri = CPUPRI_IDLE;
 45	else if (prio >= MAX_RT_PRIO)
 46		cpupri = CPUPRI_NORMAL;
 47	else
 48		cpupri = MAX_RT_PRIO - prio + 1;
 49
 50	return cpupri;
 51}
 52
 53/**
 54 * cpupri_find - find the best (lowest-pri) CPU in the system
 55 * @cp: The cpupri context
 56 * @p: The task
 57 * @lowest_mask: A mask to fill in with selected CPUs (or NULL)
 58 *
 59 * Note: This function returns the recommended CPUs as calculated during the
 60 * current invocation.  By the time the call returns, the CPUs may have in
 61 * fact changed priorities any number of times.  While not ideal, it is not
 62 * an issue of correctness since the normal rebalancer logic will correct
 63 * any discrepancies created by racing against the uncertainty of the current
 64 * priority configuration.
 65 *
 66 * Return: (int)bool - CPUs were found
 67 */
 68int cpupri_find(struct cpupri *cp, struct task_struct *p,
 69		struct cpumask *lowest_mask)
 70{
 71	int idx = 0;
 72	int task_pri = convert_prio(p->prio);
 73
 74	BUG_ON(task_pri >= CPUPRI_NR_PRIORITIES);
 
 75
 76	for (idx = 0; idx < task_pri; idx++) {
 77		struct cpupri_vec *vec  = &cp->pri_to_cpu[idx];
 78		int skip = 0;
 79
 80		if (!atomic_read(&(vec)->count))
 81			skip = 1;
 82		/*
 83		 * When looking at the vector, we need to read the counter,
 84		 * do a memory barrier, then read the mask.
 85		 *
 86		 * Note: This is still all racey, but we can deal with it.
 87		 *  Ideally, we only want to look at masks that are set.
 88		 *
 89		 *  If a mask is not set, then the only thing wrong is that we
 90		 *  did a little more work than necessary.
 91		 *
 92		 *  If we read a zero count but the mask is set, because of the
 93		 *  memory barriers, that can only happen when the highest prio
 94		 *  task for a run queue has left the run queue, in which case,
 95		 *  it will be followed by a pull. If the task we are processing
 96		 *  fails to find a proper place to go, that pull request will
 97		 *  pull this task if the run queue is running at a lower
 98		 *  priority.
 99		 */
100		smp_rmb();
101
102		/* Need to do the rmb for every iteration */
103		if (skip)
104			continue;
105
106		if (cpumask_any_and(tsk_cpus_allowed(p), vec->mask) >= nr_cpu_ids)
107			continue;
108
109		if (lowest_mask) {
110			cpumask_and(lowest_mask, tsk_cpus_allowed(p), vec->mask);
111
112			/*
113			 * We have to ensure that we have at least one bit
114			 * still set in the array, since the map could have
115			 * been concurrently emptied between the first and
116			 * second reads of vec->mask.  If we hit this
117			 * condition, simply act as though we never hit this
118			 * priority level and continue on.
119			 */
120			if (cpumask_any(lowest_mask) >= nr_cpu_ids)
121				continue;
122		}
123
124		return 1;
125	}
126
127	return 0;
128}
129
130/**
131 * cpupri_set - update the cpu priority setting
132 * @cp: The cpupri context
133 * @cpu: The target cpu
134 * @newpri: The priority (INVALID-RT99) to assign to this CPU
135 *
136 * Note: Assumes cpu_rq(cpu)->lock is locked
137 *
138 * Returns: (void)
139 */
140void cpupri_set(struct cpupri *cp, int cpu, int newpri)
141{
142	int *currpri = &cp->cpu_to_pri[cpu];
143	int oldpri = *currpri;
144	int do_mb = 0;
145
146	newpri = convert_prio(newpri);
147
148	BUG_ON(newpri >= CPUPRI_NR_PRIORITIES);
149
150	if (newpri == oldpri)
151		return;
152
153	/*
154	 * If the cpu was currently mapped to a different value, we
155	 * need to map it to the new value then remove the old value.
156	 * Note, we must add the new value first, otherwise we risk the
157	 * cpu being missed by the priority loop in cpupri_find.
158	 */
159	if (likely(newpri != CPUPRI_INVALID)) {
160		struct cpupri_vec *vec = &cp->pri_to_cpu[newpri];
161
162		cpumask_set_cpu(cpu, vec->mask);
163		/*
164		 * When adding a new vector, we update the mask first,
165		 * do a write memory barrier, and then update the count, to
166		 * make sure the vector is visible when count is set.
167		 */
168		smp_mb__before_atomic();
169		atomic_inc(&(vec)->count);
170		do_mb = 1;
171	}
172	if (likely(oldpri != CPUPRI_INVALID)) {
173		struct cpupri_vec *vec  = &cp->pri_to_cpu[oldpri];
174
175		/*
176		 * Because the order of modification of the vec->count
177		 * is important, we must make sure that the update
178		 * of the new prio is seen before we decrement the
179		 * old prio. This makes sure that the loop sees
180		 * one or the other when we raise the priority of
181		 * the run queue. We don't care about when we lower the
182		 * priority, as that will trigger an rt pull anyway.
183		 *
184		 * We only need to do a memory barrier if we updated
185		 * the new priority vec.
186		 */
187		if (do_mb)
188			smp_mb__after_atomic();
189
190		/*
191		 * When removing from the vector, we decrement the counter first
192		 * do a memory barrier and then clear the mask.
193		 */
194		atomic_dec(&(vec)->count);
195		smp_mb__after_atomic();
196		cpumask_clear_cpu(cpu, vec->mask);
197	}
198
199	*currpri = newpri;
200}
201
202/**
203 * cpupri_init - initialize the cpupri structure
204 * @cp: The cpupri context
205 *
206 * Return: -ENOMEM on memory allocation failure.
207 */
208int cpupri_init(struct cpupri *cp)
209{
210	int i;
211
212	memset(cp, 0, sizeof(*cp));
213
214	for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) {
215		struct cpupri_vec *vec = &cp->pri_to_cpu[i];
216
217		atomic_set(&vec->count, 0);
218		if (!zalloc_cpumask_var(&vec->mask, GFP_KERNEL))
219			goto cleanup;
220	}
221
222	cp->cpu_to_pri = kcalloc(nr_cpu_ids, sizeof(int), GFP_KERNEL);
223	if (!cp->cpu_to_pri)
224		goto cleanup;
225
226	for_each_possible_cpu(i)
227		cp->cpu_to_pri[i] = CPUPRI_INVALID;
228
229	return 0;
230
231cleanup:
232	for (i--; i >= 0; i--)
233		free_cpumask_var(cp->pri_to_cpu[i].mask);
234	return -ENOMEM;
235}
236
237/**
238 * cpupri_cleanup - clean up the cpupri structure
239 * @cp: The cpupri context
240 */
241void cpupri_cleanup(struct cpupri *cp)
242{
243	int i;
244
245	kfree(cp->cpu_to_pri);
246	for (i = 0; i < CPUPRI_NR_PRIORITIES; i++)
247		free_cpumask_var(cp->pri_to_cpu[i].mask);
248}
v3.5.6
  1/*
  2 *  kernel/sched/cpupri.c
  3 *
  4 *  CPU priority management
  5 *
  6 *  Copyright (C) 2007-2008 Novell
  7 *
  8 *  Author: Gregory Haskins <ghaskins@novell.com>
  9 *
 10 *  This code tracks the priority of each CPU so that global migration
 11 *  decisions are easy to calculate.  Each CPU can be in a state as follows:
 12 *
 13 *                 (INVALID), IDLE, NORMAL, RT1, ... RT99
 14 *
 15 *  going from the lowest priority to the highest.  CPUs in the INVALID state
 16 *  are not eligible for routing.  The system maintains this state with
 17 *  a 2 dimensional bitmap (the first for priority class, the second for cpus
 18 *  in that class).  Therefore a typical application without affinity
 19 *  restrictions can find a suitable CPU with O(1) complexity (e.g. two bit
 20 *  searches).  For tasks with affinity restrictions, the algorithm has a
 21 *  worst case complexity of O(min(102, nr_domcpus)), though the scenario that
 22 *  yields the worst case search is fairly contrived.
 23 *
 24 *  This program is free software; you can redistribute it and/or
 25 *  modify it under the terms of the GNU General Public License
 26 *  as published by the Free Software Foundation; version 2
 27 *  of the License.
 28 */
 29
 30#include <linux/gfp.h>
 
 
 
 31#include "cpupri.h"
 32
 33/* Convert between a 140 based task->prio, and our 102 based cpupri */
 34static int convert_prio(int prio)
 35{
 36	int cpupri;
 37
 38	if (prio == CPUPRI_INVALID)
 39		cpupri = CPUPRI_INVALID;
 40	else if (prio == MAX_PRIO)
 41		cpupri = CPUPRI_IDLE;
 42	else if (prio >= MAX_RT_PRIO)
 43		cpupri = CPUPRI_NORMAL;
 44	else
 45		cpupri = MAX_RT_PRIO - prio + 1;
 46
 47	return cpupri;
 48}
 49
 50/**
 51 * cpupri_find - find the best (lowest-pri) CPU in the system
 52 * @cp: The cpupri context
 53 * @p: The task
 54 * @lowest_mask: A mask to fill in with selected CPUs (or NULL)
 55 *
 56 * Note: This function returns the recommended CPUs as calculated during the
 57 * current invocation.  By the time the call returns, the CPUs may have in
 58 * fact changed priorities any number of times.  While not ideal, it is not
 59 * an issue of correctness since the normal rebalancer logic will correct
 60 * any discrepancies created by racing against the uncertainty of the current
 61 * priority configuration.
 62 *
 63 * Returns: (int)bool - CPUs were found
 64 */
 65int cpupri_find(struct cpupri *cp, struct task_struct *p,
 66		struct cpumask *lowest_mask)
 67{
 68	int                  idx      = 0;
 69	int                  task_pri = convert_prio(p->prio);
 70
 71	if (task_pri >= MAX_RT_PRIO)
 72		return 0;
 73
 74	for (idx = 0; idx < task_pri; idx++) {
 75		struct cpupri_vec *vec  = &cp->pri_to_cpu[idx];
 76		int skip = 0;
 77
 78		if (!atomic_read(&(vec)->count))
 79			skip = 1;
 80		/*
 81		 * When looking at the vector, we need to read the counter,
 82		 * do a memory barrier, then read the mask.
 83		 *
 84		 * Note: This is still all racey, but we can deal with it.
 85		 *  Ideally, we only want to look at masks that are set.
 86		 *
 87		 *  If a mask is not set, then the only thing wrong is that we
 88		 *  did a little more work than necessary.
 89		 *
 90		 *  If we read a zero count but the mask is set, because of the
 91		 *  memory barriers, that can only happen when the highest prio
 92		 *  task for a run queue has left the run queue, in which case,
 93		 *  it will be followed by a pull. If the task we are processing
 94		 *  fails to find a proper place to go, that pull request will
 95		 *  pull this task if the run queue is running at a lower
 96		 *  priority.
 97		 */
 98		smp_rmb();
 99
100		/* Need to do the rmb for every iteration */
101		if (skip)
102			continue;
103
104		if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids)
105			continue;
106
107		if (lowest_mask) {
108			cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask);
109
110			/*
111			 * We have to ensure that we have at least one bit
112			 * still set in the array, since the map could have
113			 * been concurrently emptied between the first and
114			 * second reads of vec->mask.  If we hit this
115			 * condition, simply act as though we never hit this
116			 * priority level and continue on.
117			 */
118			if (cpumask_any(lowest_mask) >= nr_cpu_ids)
119				continue;
120		}
121
122		return 1;
123	}
124
125	return 0;
126}
127
128/**
129 * cpupri_set - update the cpu priority setting
130 * @cp: The cpupri context
131 * @cpu: The target cpu
132 * @newpri: The priority (INVALID-RT99) to assign to this CPU
133 *
134 * Note: Assumes cpu_rq(cpu)->lock is locked
135 *
136 * Returns: (void)
137 */
138void cpupri_set(struct cpupri *cp, int cpu, int newpri)
139{
140	int                 *currpri = &cp->cpu_to_pri[cpu];
141	int                  oldpri  = *currpri;
142	int                  do_mb = 0;
143
144	newpri = convert_prio(newpri);
145
146	BUG_ON(newpri >= CPUPRI_NR_PRIORITIES);
147
148	if (newpri == oldpri)
149		return;
150
151	/*
152	 * If the cpu was currently mapped to a different value, we
153	 * need to map it to the new value then remove the old value.
154	 * Note, we must add the new value first, otherwise we risk the
155	 * cpu being missed by the priority loop in cpupri_find.
156	 */
157	if (likely(newpri != CPUPRI_INVALID)) {
158		struct cpupri_vec *vec = &cp->pri_to_cpu[newpri];
159
160		cpumask_set_cpu(cpu, vec->mask);
161		/*
162		 * When adding a new vector, we update the mask first,
163		 * do a write memory barrier, and then update the count, to
164		 * make sure the vector is visible when count is set.
165		 */
166		smp_mb__before_atomic_inc();
167		atomic_inc(&(vec)->count);
168		do_mb = 1;
169	}
170	if (likely(oldpri != CPUPRI_INVALID)) {
171		struct cpupri_vec *vec  = &cp->pri_to_cpu[oldpri];
172
173		/*
174		 * Because the order of modification of the vec->count
175		 * is important, we must make sure that the update
176		 * of the new prio is seen before we decrement the
177		 * old prio. This makes sure that the loop sees
178		 * one or the other when we raise the priority of
179		 * the run queue. We don't care about when we lower the
180		 * priority, as that will trigger an rt pull anyway.
181		 *
182		 * We only need to do a memory barrier if we updated
183		 * the new priority vec.
184		 */
185		if (do_mb)
186			smp_mb__after_atomic_inc();
187
188		/*
189		 * When removing from the vector, we decrement the counter first
190		 * do a memory barrier and then clear the mask.
191		 */
192		atomic_dec(&(vec)->count);
193		smp_mb__after_atomic_inc();
194		cpumask_clear_cpu(cpu, vec->mask);
195	}
196
197	*currpri = newpri;
198}
199
200/**
201 * cpupri_init - initialize the cpupri structure
202 * @cp: The cpupri context
203 *
204 * Returns: -ENOMEM if memory fails.
205 */
206int cpupri_init(struct cpupri *cp)
207{
208	int i;
209
210	memset(cp, 0, sizeof(*cp));
211
212	for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) {
213		struct cpupri_vec *vec = &cp->pri_to_cpu[i];
214
215		atomic_set(&vec->count, 0);
216		if (!zalloc_cpumask_var(&vec->mask, GFP_KERNEL))
217			goto cleanup;
218	}
219
 
 
 
 
220	for_each_possible_cpu(i)
221		cp->cpu_to_pri[i] = CPUPRI_INVALID;
 
222	return 0;
223
224cleanup:
225	for (i--; i >= 0; i--)
226		free_cpumask_var(cp->pri_to_cpu[i].mask);
227	return -ENOMEM;
228}
229
230/**
231 * cpupri_cleanup - clean up the cpupri structure
232 * @cp: The cpupri context
233 */
234void cpupri_cleanup(struct cpupri *cp)
235{
236	int i;
237
 
238	for (i = 0; i < CPUPRI_NR_PRIORITIES; i++)
239		free_cpumask_var(cp->pri_to_cpu[i].mask);
240}