Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * acpi_pad.c ACPI Processor Aggregator Driver
  4 *
  5 * Copyright (c) 2009, Intel Corporation.
  6 */
  7
  8#include <linux/kernel.h>
  9#include <linux/cpumask.h>
 10#include <linux/module.h>
 11#include <linux/init.h>
 12#include <linux/types.h>
 13#include <linux/kthread.h>
 14#include <uapi/linux/sched/types.h>
 15#include <linux/freezer.h>
 16#include <linux/cpu.h>
 17#include <linux/tick.h>
 18#include <linux/slab.h>
 19#include <linux/acpi.h>
 20#include <linux/perf_event.h>
 21#include <linux/platform_device.h>
 22#include <asm/mwait.h>
 23#include <xen/xen.h>
 24
 25#define ACPI_PROCESSOR_AGGREGATOR_CLASS	"acpi_pad"
 26#define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
 27#define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
 28
 29#define ACPI_PROCESSOR_AGGREGATOR_STATUS_SUCCESS	0
 30#define ACPI_PROCESSOR_AGGREGATOR_STATUS_NO_ACTION	1
 31
 32static DEFINE_MUTEX(isolated_cpus_lock);
 33static DEFINE_MUTEX(round_robin_lock);
 34
 35static unsigned long power_saving_mwait_eax;
 36
 37static unsigned char tsc_detected_unstable;
 38static unsigned char tsc_marked_unstable;
 39
 40static void power_saving_mwait_init(void)
 41{
 42	unsigned int eax, ebx, ecx, edx;
 43	unsigned int highest_cstate = 0;
 44	unsigned int highest_subcstate = 0;
 45	int i;
 46
 47	if (!boot_cpu_has(X86_FEATURE_MWAIT))
 48		return;
 49	if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
 50		return;
 51
 52	cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
 53
 54	if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
 55	    !(ecx & CPUID5_ECX_INTERRUPT_BREAK))
 56		return;
 57
 58	edx >>= MWAIT_SUBSTATE_SIZE;
 59	for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) {
 60		if (edx & MWAIT_SUBSTATE_MASK) {
 61			highest_cstate = i;
 62			highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
 63		}
 64	}
 65	power_saving_mwait_eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
 66		(highest_subcstate - 1);
 67
 68#if defined(CONFIG_X86)
 69	switch (boot_cpu_data.x86_vendor) {
 70	case X86_VENDOR_HYGON:
 71	case X86_VENDOR_AMD:
 72	case X86_VENDOR_INTEL:
 73	case X86_VENDOR_ZHAOXIN:
 74	case X86_VENDOR_CENTAUR:
 75		/*
 76		 * AMD Fam10h TSC will tick in all
 77		 * C/P/S0/S1 states when this bit is set.
 78		 */
 79		if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
 80			tsc_detected_unstable = 1;
 81		break;
 82	default:
 83		/* TSC could halt in idle */
 84		tsc_detected_unstable = 1;
 85	}
 86#endif
 87}
 88
 89static unsigned long cpu_weight[NR_CPUS];
 90static int tsk_in_cpu[NR_CPUS] = {[0 ... NR_CPUS-1] = -1};
 91static DECLARE_BITMAP(pad_busy_cpus_bits, NR_CPUS);
 92static void round_robin_cpu(unsigned int tsk_index)
 93{
 94	struct cpumask *pad_busy_cpus = to_cpumask(pad_busy_cpus_bits);
 95	cpumask_var_t tmp;
 96	int cpu;
 97	unsigned long min_weight = -1;
 98	unsigned long preferred_cpu;
 99
100	if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
101		return;
102
103	mutex_lock(&round_robin_lock);
104	cpumask_clear(tmp);
105	for_each_cpu(cpu, pad_busy_cpus)
106		cpumask_or(tmp, tmp, topology_sibling_cpumask(cpu));
107	cpumask_andnot(tmp, cpu_online_mask, tmp);
108	/* avoid HT siblings if possible */
109	if (cpumask_empty(tmp))
110		cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus);
111	if (cpumask_empty(tmp)) {
112		mutex_unlock(&round_robin_lock);
113		free_cpumask_var(tmp);
114		return;
115	}
116	for_each_cpu(cpu, tmp) {
117		if (cpu_weight[cpu] < min_weight) {
118			min_weight = cpu_weight[cpu];
119			preferred_cpu = cpu;
120		}
121	}
122
123	if (tsk_in_cpu[tsk_index] != -1)
124		cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus);
125	tsk_in_cpu[tsk_index] = preferred_cpu;
126	cpumask_set_cpu(preferred_cpu, pad_busy_cpus);
127	cpu_weight[preferred_cpu]++;
128	mutex_unlock(&round_robin_lock);
129
130	set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu));
131
132	free_cpumask_var(tmp);
133}
134
135static void exit_round_robin(unsigned int tsk_index)
136{
137	struct cpumask *pad_busy_cpus = to_cpumask(pad_busy_cpus_bits);
138
139	if (tsk_in_cpu[tsk_index] != -1) {
140		cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus);
141		tsk_in_cpu[tsk_index] = -1;
142	}
143}
144
145static unsigned int idle_pct = 5; /* percentage */
146static unsigned int round_robin_time = 1; /* second */
147static int power_saving_thread(void *data)
148{
149	int do_sleep;
150	unsigned int tsk_index = (unsigned long)data;
151	u64 last_jiffies = 0;
152
153	sched_set_fifo_low(current);
154
155	while (!kthread_should_stop()) {
156		unsigned long expire_time;
157
158		/* round robin to cpus */
159		expire_time = last_jiffies + round_robin_time * HZ;
160		if (time_before(expire_time, jiffies)) {
161			last_jiffies = jiffies;
162			round_robin_cpu(tsk_index);
163		}
164
165		do_sleep = 0;
166
167		expire_time = jiffies + HZ * (100 - idle_pct) / 100;
168
169		while (!need_resched()) {
170			if (tsc_detected_unstable && !tsc_marked_unstable) {
171				/* TSC could halt in idle, so notify users */
172				mark_tsc_unstable("TSC halts in idle");
173				tsc_marked_unstable = 1;
174			}
175			local_irq_disable();
176
177			perf_lopwr_cb(true);
178
179			tick_broadcast_enable();
180			tick_broadcast_enter();
181			stop_critical_timings();
182
183			mwait_idle_with_hints(power_saving_mwait_eax, 1);
184
185			start_critical_timings();
186			tick_broadcast_exit();
187
188			perf_lopwr_cb(false);
189
190			local_irq_enable();
191
192			if (time_before(expire_time, jiffies)) {
193				do_sleep = 1;
194				break;
195			}
196		}
197
198		/*
199		 * current sched_rt has threshold for rt task running time.
200		 * When a rt task uses 95% CPU time, the rt thread will be
201		 * scheduled out for 5% CPU time to not starve other tasks. But
202		 * the mechanism only works when all CPUs have RT task running,
203		 * as if one CPU hasn't RT task, RT task from other CPUs will
204		 * borrow CPU time from this CPU and cause RT task use > 95%
205		 * CPU time. To make 'avoid starvation' work, takes a nap here.
206		 */
207		if (unlikely(do_sleep))
208			schedule_timeout_killable(HZ * idle_pct / 100);
209
210		/* If an external event has set the need_resched flag, then
211		 * we need to deal with it, or this loop will continue to
212		 * spin without calling __mwait().
213		 */
214		if (unlikely(need_resched()))
215			schedule();
216	}
217
218	exit_round_robin(tsk_index);
219	return 0;
220}
221
222static struct task_struct *ps_tsks[NR_CPUS];
223static unsigned int ps_tsk_num;
224static int create_power_saving_task(void)
225{
226	int rc;
227
228	ps_tsks[ps_tsk_num] = kthread_run(power_saving_thread,
229		(void *)(unsigned long)ps_tsk_num,
230		"acpi_pad/%d", ps_tsk_num);
231
232	if (IS_ERR(ps_tsks[ps_tsk_num])) {
233		rc = PTR_ERR(ps_tsks[ps_tsk_num]);
234		ps_tsks[ps_tsk_num] = NULL;
235	} else {
236		rc = 0;
237		ps_tsk_num++;
238	}
239
240	return rc;
241}
242
243static void destroy_power_saving_task(void)
244{
245	if (ps_tsk_num > 0) {
246		ps_tsk_num--;
247		kthread_stop(ps_tsks[ps_tsk_num]);
248		ps_tsks[ps_tsk_num] = NULL;
249	}
250}
251
252static void set_power_saving_task_num(unsigned int num)
253{
254	if (num > ps_tsk_num) {
255		while (ps_tsk_num < num) {
256			if (create_power_saving_task())
257				return;
258		}
259	} else if (num < ps_tsk_num) {
260		while (ps_tsk_num > num)
261			destroy_power_saving_task();
262	}
263}
264
265static void acpi_pad_idle_cpus(unsigned int num_cpus)
266{
267	cpus_read_lock();
268
269	num_cpus = min_t(unsigned int, num_cpus, num_online_cpus());
270	set_power_saving_task_num(num_cpus);
271
272	cpus_read_unlock();
273}
274
275static uint32_t acpi_pad_idle_cpus_num(void)
276{
277	return ps_tsk_num;
278}
279
280static ssize_t rrtime_store(struct device *dev,
281	struct device_attribute *attr, const char *buf, size_t count)
282{
283	unsigned long num;
284
285	if (kstrtoul(buf, 0, &num))
286		return -EINVAL;
287	if (num < 1 || num >= 100)
288		return -EINVAL;
289	mutex_lock(&isolated_cpus_lock);
290	round_robin_time = num;
291	mutex_unlock(&isolated_cpus_lock);
292	return count;
293}
294
295static ssize_t rrtime_show(struct device *dev,
296	struct device_attribute *attr, char *buf)
297{
298	return sysfs_emit(buf, "%d\n", round_robin_time);
299}
300static DEVICE_ATTR_RW(rrtime);
301
302static ssize_t idlepct_store(struct device *dev,
303	struct device_attribute *attr, const char *buf, size_t count)
304{
305	unsigned long num;
306
307	if (kstrtoul(buf, 0, &num))
308		return -EINVAL;
309	if (num < 1 || num >= 100)
310		return -EINVAL;
311	mutex_lock(&isolated_cpus_lock);
312	idle_pct = num;
313	mutex_unlock(&isolated_cpus_lock);
314	return count;
315}
316
317static ssize_t idlepct_show(struct device *dev,
318	struct device_attribute *attr, char *buf)
319{
320	return sysfs_emit(buf, "%d\n", idle_pct);
321}
322static DEVICE_ATTR_RW(idlepct);
323
324static ssize_t idlecpus_store(struct device *dev,
325	struct device_attribute *attr, const char *buf, size_t count)
326{
327	unsigned long num;
328
329	if (kstrtoul(buf, 0, &num))
330		return -EINVAL;
331	mutex_lock(&isolated_cpus_lock);
332	acpi_pad_idle_cpus(num);
333	mutex_unlock(&isolated_cpus_lock);
334	return count;
335}
336
337static ssize_t idlecpus_show(struct device *dev,
338	struct device_attribute *attr, char *buf)
339{
340	return cpumap_print_to_pagebuf(false, buf,
341				       to_cpumask(pad_busy_cpus_bits));
342}
343
344static DEVICE_ATTR_RW(idlecpus);
345
346static struct attribute *acpi_pad_attrs[] = {
347	&dev_attr_idlecpus.attr,
348	&dev_attr_idlepct.attr,
349	&dev_attr_rrtime.attr,
350	NULL
351};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
352
353ATTRIBUTE_GROUPS(acpi_pad);
 
 
 
 
 
354
355/*
356 * Query firmware how many CPUs should be idle
357 * return -1 on failure
358 */
359static int acpi_pad_pur(acpi_handle handle)
360{
361	struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
362	union acpi_object *package;
363	int num = -1;
364
365	if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PUR", NULL, &buffer)))
366		return num;
367
368	if (!buffer.length || !buffer.pointer)
369		return num;
370
371	package = buffer.pointer;
372
373	if (package->type == ACPI_TYPE_PACKAGE &&
374		package->package.count == 2 &&
375		package->package.elements[0].integer.value == 1) /* rev 1 */
376
377		num = package->package.elements[1].integer.value;
378
379	kfree(buffer.pointer);
380	return num;
381}
382
383static void acpi_pad_handle_notify(acpi_handle handle)
384{
385	int num_cpus;
386	uint32_t idle_cpus;
387	struct acpi_buffer param = {
388		.length = 4,
389		.pointer = (void *)&idle_cpus,
390	};
391	u32 status;
392
393	mutex_lock(&isolated_cpus_lock);
394	num_cpus = acpi_pad_pur(handle);
395	if (num_cpus < 0) {
396		/* The ACPI specification says that if no action was performed when
397		 * processing the _PUR object, _OST should still be evaluated, albeit
398		 * with a different status code.
399		 */
400		status = ACPI_PROCESSOR_AGGREGATOR_STATUS_NO_ACTION;
401	} else {
402		status = ACPI_PROCESSOR_AGGREGATOR_STATUS_SUCCESS;
403		acpi_pad_idle_cpus(num_cpus);
404	}
405
406	idle_cpus = acpi_pad_idle_cpus_num();
407	acpi_evaluate_ost(handle, ACPI_PROCESSOR_AGGREGATOR_NOTIFY, status, &param);
408	mutex_unlock(&isolated_cpus_lock);
409}
410
411static void acpi_pad_notify(acpi_handle handle, u32 event,
412	void *data)
413{
414	struct acpi_device *adev = data;
415
416	switch (event) {
417	case ACPI_PROCESSOR_AGGREGATOR_NOTIFY:
418		acpi_pad_handle_notify(handle);
419		acpi_bus_generate_netlink_event(adev->pnp.device_class,
420			dev_name(&adev->dev), event, 0);
421		break;
422	default:
423		pr_warn("Unsupported event [0x%x]\n", event);
424		break;
425	}
426}
427
428static int acpi_pad_probe(struct platform_device *pdev)
429{
430	struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
431	acpi_status status;
432
433	strscpy(acpi_device_name(adev), ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME);
434	strscpy(acpi_device_class(adev), ACPI_PROCESSOR_AGGREGATOR_CLASS);
435
436	status = acpi_install_notify_handler(adev->handle,
437		ACPI_DEVICE_NOTIFY, acpi_pad_notify, adev);
438
439	if (ACPI_FAILURE(status))
 
 
 
440		return -ENODEV;
 
441
442	return 0;
443}
444
445static void acpi_pad_remove(struct platform_device *pdev)
446{
447	struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
448
449	mutex_lock(&isolated_cpus_lock);
450	acpi_pad_idle_cpus(0);
451	mutex_unlock(&isolated_cpus_lock);
452
453	acpi_remove_notify_handler(adev->handle,
454		ACPI_DEVICE_NOTIFY, acpi_pad_notify);
 
 
455}
456
457static const struct acpi_device_id pad_device_ids[] = {
458	{"ACPI000C", 0},
459	{"", 0},
460};
461MODULE_DEVICE_TABLE(acpi, pad_device_ids);
462
463static struct platform_driver acpi_pad_driver = {
464	.probe = acpi_pad_probe,
465	.remove = acpi_pad_remove,
466	.driver = {
467		.dev_groups = acpi_pad_groups,
468		.name = "processor_aggregator",
469		.acpi_match_table = pad_device_ids,
470	},
471};
472
473static int __init acpi_pad_init(void)
474{
475	/* Xen ACPI PAD is used when running as Xen Dom0. */
476	if (xen_initial_domain())
477		return -ENODEV;
478
479	power_saving_mwait_init();
480	if (power_saving_mwait_eax == 0)
481		return -EINVAL;
482
483	return platform_driver_register(&acpi_pad_driver);
484}
485
486static void __exit acpi_pad_exit(void)
487{
488	platform_driver_unregister(&acpi_pad_driver);
489}
490
491module_init(acpi_pad_init);
492module_exit(acpi_pad_exit);
493MODULE_AUTHOR("Shaohua Li<shaohua.li@intel.com>");
494MODULE_DESCRIPTION("ACPI Processor Aggregator Driver");
495MODULE_LICENSE("GPL");
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * acpi_pad.c ACPI Processor Aggregator Driver
  4 *
  5 * Copyright (c) 2009, Intel Corporation.
  6 */
  7
  8#include <linux/kernel.h>
  9#include <linux/cpumask.h>
 10#include <linux/module.h>
 11#include <linux/init.h>
 12#include <linux/types.h>
 13#include <linux/kthread.h>
 14#include <uapi/linux/sched/types.h>
 15#include <linux/freezer.h>
 16#include <linux/cpu.h>
 17#include <linux/tick.h>
 18#include <linux/slab.h>
 19#include <linux/acpi.h>
 
 
 20#include <asm/mwait.h>
 21#include <xen/xen.h>
 22
 23#define ACPI_PROCESSOR_AGGREGATOR_CLASS	"acpi_pad"
 24#define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
 25#define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
 
 
 
 
 26static DEFINE_MUTEX(isolated_cpus_lock);
 27static DEFINE_MUTEX(round_robin_lock);
 28
 29static unsigned long power_saving_mwait_eax;
 30
 31static unsigned char tsc_detected_unstable;
 32static unsigned char tsc_marked_unstable;
 33
 34static void power_saving_mwait_init(void)
 35{
 36	unsigned int eax, ebx, ecx, edx;
 37	unsigned int highest_cstate = 0;
 38	unsigned int highest_subcstate = 0;
 39	int i;
 40
 41	if (!boot_cpu_has(X86_FEATURE_MWAIT))
 42		return;
 43	if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
 44		return;
 45
 46	cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
 47
 48	if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
 49	    !(ecx & CPUID5_ECX_INTERRUPT_BREAK))
 50		return;
 51
 52	edx >>= MWAIT_SUBSTATE_SIZE;
 53	for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) {
 54		if (edx & MWAIT_SUBSTATE_MASK) {
 55			highest_cstate = i;
 56			highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
 57		}
 58	}
 59	power_saving_mwait_eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
 60		(highest_subcstate - 1);
 61
 62#if defined(CONFIG_X86)
 63	switch (boot_cpu_data.x86_vendor) {
 64	case X86_VENDOR_HYGON:
 65	case X86_VENDOR_AMD:
 66	case X86_VENDOR_INTEL:
 67	case X86_VENDOR_ZHAOXIN:
 
 68		/*
 69		 * AMD Fam10h TSC will tick in all
 70		 * C/P/S0/S1 states when this bit is set.
 71		 */
 72		if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
 73			tsc_detected_unstable = 1;
 74		break;
 75	default:
 76		/* TSC could halt in idle */
 77		tsc_detected_unstable = 1;
 78	}
 79#endif
 80}
 81
 82static unsigned long cpu_weight[NR_CPUS];
 83static int tsk_in_cpu[NR_CPUS] = {[0 ... NR_CPUS-1] = -1};
 84static DECLARE_BITMAP(pad_busy_cpus_bits, NR_CPUS);
 85static void round_robin_cpu(unsigned int tsk_index)
 86{
 87	struct cpumask *pad_busy_cpus = to_cpumask(pad_busy_cpus_bits);
 88	cpumask_var_t tmp;
 89	int cpu;
 90	unsigned long min_weight = -1;
 91	unsigned long preferred_cpu;
 92
 93	if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
 94		return;
 95
 96	mutex_lock(&round_robin_lock);
 97	cpumask_clear(tmp);
 98	for_each_cpu(cpu, pad_busy_cpus)
 99		cpumask_or(tmp, tmp, topology_sibling_cpumask(cpu));
100	cpumask_andnot(tmp, cpu_online_mask, tmp);
101	/* avoid HT sibilings if possible */
102	if (cpumask_empty(tmp))
103		cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus);
104	if (cpumask_empty(tmp)) {
105		mutex_unlock(&round_robin_lock);
106		free_cpumask_var(tmp);
107		return;
108	}
109	for_each_cpu(cpu, tmp) {
110		if (cpu_weight[cpu] < min_weight) {
111			min_weight = cpu_weight[cpu];
112			preferred_cpu = cpu;
113		}
114	}
115
116	if (tsk_in_cpu[tsk_index] != -1)
117		cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus);
118	tsk_in_cpu[tsk_index] = preferred_cpu;
119	cpumask_set_cpu(preferred_cpu, pad_busy_cpus);
120	cpu_weight[preferred_cpu]++;
121	mutex_unlock(&round_robin_lock);
122
123	set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu));
124
125	free_cpumask_var(tmp);
126}
127
128static void exit_round_robin(unsigned int tsk_index)
129{
130	struct cpumask *pad_busy_cpus = to_cpumask(pad_busy_cpus_bits);
131
132	cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus);
133	tsk_in_cpu[tsk_index] = -1;
 
 
134}
135
136static unsigned int idle_pct = 5; /* percentage */
137static unsigned int round_robin_time = 1; /* second */
138static int power_saving_thread(void *data)
139{
140	int do_sleep;
141	unsigned int tsk_index = (unsigned long)data;
142	u64 last_jiffies = 0;
143
144	sched_set_fifo_low(current);
145
146	while (!kthread_should_stop()) {
147		unsigned long expire_time;
148
149		/* round robin to cpus */
150		expire_time = last_jiffies + round_robin_time * HZ;
151		if (time_before(expire_time, jiffies)) {
152			last_jiffies = jiffies;
153			round_robin_cpu(tsk_index);
154		}
155
156		do_sleep = 0;
157
158		expire_time = jiffies + HZ * (100 - idle_pct) / 100;
159
160		while (!need_resched()) {
161			if (tsc_detected_unstable && !tsc_marked_unstable) {
162				/* TSC could halt in idle, so notify users */
163				mark_tsc_unstable("TSC halts in idle");
164				tsc_marked_unstable = 1;
165			}
166			local_irq_disable();
 
 
 
167			tick_broadcast_enable();
168			tick_broadcast_enter();
169			stop_critical_timings();
170
171			mwait_idle_with_hints(power_saving_mwait_eax, 1);
172
173			start_critical_timings();
174			tick_broadcast_exit();
 
 
 
175			local_irq_enable();
176
177			if (time_before(expire_time, jiffies)) {
178				do_sleep = 1;
179				break;
180			}
181		}
182
183		/*
184		 * current sched_rt has threshold for rt task running time.
185		 * When a rt task uses 95% CPU time, the rt thread will be
186		 * scheduled out for 5% CPU time to not starve other tasks. But
187		 * the mechanism only works when all CPUs have RT task running,
188		 * as if one CPU hasn't RT task, RT task from other CPUs will
189		 * borrow CPU time from this CPU and cause RT task use > 95%
190		 * CPU time. To make 'avoid starvation' work, takes a nap here.
191		 */
192		if (unlikely(do_sleep))
193			schedule_timeout_killable(HZ * idle_pct / 100);
194
195		/* If an external event has set the need_resched flag, then
196		 * we need to deal with it, or this loop will continue to
197		 * spin without calling __mwait().
198		 */
199		if (unlikely(need_resched()))
200			schedule();
201	}
202
203	exit_round_robin(tsk_index);
204	return 0;
205}
206
207static struct task_struct *ps_tsks[NR_CPUS];
208static unsigned int ps_tsk_num;
209static int create_power_saving_task(void)
210{
211	int rc;
212
213	ps_tsks[ps_tsk_num] = kthread_run(power_saving_thread,
214		(void *)(unsigned long)ps_tsk_num,
215		"acpi_pad/%d", ps_tsk_num);
216
217	if (IS_ERR(ps_tsks[ps_tsk_num])) {
218		rc = PTR_ERR(ps_tsks[ps_tsk_num]);
219		ps_tsks[ps_tsk_num] = NULL;
220	} else {
221		rc = 0;
222		ps_tsk_num++;
223	}
224
225	return rc;
226}
227
228static void destroy_power_saving_task(void)
229{
230	if (ps_tsk_num > 0) {
231		ps_tsk_num--;
232		kthread_stop(ps_tsks[ps_tsk_num]);
233		ps_tsks[ps_tsk_num] = NULL;
234	}
235}
236
237static void set_power_saving_task_num(unsigned int num)
238{
239	if (num > ps_tsk_num) {
240		while (ps_tsk_num < num) {
241			if (create_power_saving_task())
242				return;
243		}
244	} else if (num < ps_tsk_num) {
245		while (ps_tsk_num > num)
246			destroy_power_saving_task();
247	}
248}
249
250static void acpi_pad_idle_cpus(unsigned int num_cpus)
251{
252	get_online_cpus();
253
254	num_cpus = min_t(unsigned int, num_cpus, num_online_cpus());
255	set_power_saving_task_num(num_cpus);
256
257	put_online_cpus();
258}
259
260static uint32_t acpi_pad_idle_cpus_num(void)
261{
262	return ps_tsk_num;
263}
264
265static ssize_t rrtime_store(struct device *dev,
266	struct device_attribute *attr, const char *buf, size_t count)
267{
268	unsigned long num;
269
270	if (kstrtoul(buf, 0, &num))
271		return -EINVAL;
272	if (num < 1 || num >= 100)
273		return -EINVAL;
274	mutex_lock(&isolated_cpus_lock);
275	round_robin_time = num;
276	mutex_unlock(&isolated_cpus_lock);
277	return count;
278}
279
280static ssize_t rrtime_show(struct device *dev,
281	struct device_attribute *attr, char *buf)
282{
283	return scnprintf(buf, PAGE_SIZE, "%d\n", round_robin_time);
284}
285static DEVICE_ATTR_RW(rrtime);
286
287static ssize_t idlepct_store(struct device *dev,
288	struct device_attribute *attr, const char *buf, size_t count)
289{
290	unsigned long num;
291
292	if (kstrtoul(buf, 0, &num))
293		return -EINVAL;
294	if (num < 1 || num >= 100)
295		return -EINVAL;
296	mutex_lock(&isolated_cpus_lock);
297	idle_pct = num;
298	mutex_unlock(&isolated_cpus_lock);
299	return count;
300}
301
302static ssize_t idlepct_show(struct device *dev,
303	struct device_attribute *attr, char *buf)
304{
305	return scnprintf(buf, PAGE_SIZE, "%d\n", idle_pct);
306}
307static DEVICE_ATTR_RW(idlepct);
308
309static ssize_t idlecpus_store(struct device *dev,
310	struct device_attribute *attr, const char *buf, size_t count)
311{
312	unsigned long num;
313
314	if (kstrtoul(buf, 0, &num))
315		return -EINVAL;
316	mutex_lock(&isolated_cpus_lock);
317	acpi_pad_idle_cpus(num);
318	mutex_unlock(&isolated_cpus_lock);
319	return count;
320}
321
322static ssize_t idlecpus_show(struct device *dev,
323	struct device_attribute *attr, char *buf)
324{
325	return cpumap_print_to_pagebuf(false, buf,
326				       to_cpumask(pad_busy_cpus_bits));
327}
328
329static DEVICE_ATTR_RW(idlecpus);
330
331static int acpi_pad_add_sysfs(struct acpi_device *device)
332{
333	int result;
334
335	result = device_create_file(&device->dev, &dev_attr_idlecpus);
336	if (result)
337		return -ENODEV;
338	result = device_create_file(&device->dev, &dev_attr_idlepct);
339	if (result) {
340		device_remove_file(&device->dev, &dev_attr_idlecpus);
341		return -ENODEV;
342	}
343	result = device_create_file(&device->dev, &dev_attr_rrtime);
344	if (result) {
345		device_remove_file(&device->dev, &dev_attr_idlecpus);
346		device_remove_file(&device->dev, &dev_attr_idlepct);
347		return -ENODEV;
348	}
349	return 0;
350}
351
352static void acpi_pad_remove_sysfs(struct acpi_device *device)
353{
354	device_remove_file(&device->dev, &dev_attr_idlecpus);
355	device_remove_file(&device->dev, &dev_attr_idlepct);
356	device_remove_file(&device->dev, &dev_attr_rrtime);
357}
358
359/*
360 * Query firmware how many CPUs should be idle
361 * return -1 on failure
362 */
363static int acpi_pad_pur(acpi_handle handle)
364{
365	struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
366	union acpi_object *package;
367	int num = -1;
368
369	if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PUR", NULL, &buffer)))
370		return num;
371
372	if (!buffer.length || !buffer.pointer)
373		return num;
374
375	package = buffer.pointer;
376
377	if (package->type == ACPI_TYPE_PACKAGE &&
378		package->package.count == 2 &&
379		package->package.elements[0].integer.value == 1) /* rev 1 */
380
381		num = package->package.elements[1].integer.value;
382
383	kfree(buffer.pointer);
384	return num;
385}
386
387static void acpi_pad_handle_notify(acpi_handle handle)
388{
389	int num_cpus;
390	uint32_t idle_cpus;
391	struct acpi_buffer param = {
392		.length = 4,
393		.pointer = (void *)&idle_cpus,
394	};
 
395
396	mutex_lock(&isolated_cpus_lock);
397	num_cpus = acpi_pad_pur(handle);
398	if (num_cpus < 0) {
399		mutex_unlock(&isolated_cpus_lock);
400		return;
 
 
 
 
 
 
401	}
402	acpi_pad_idle_cpus(num_cpus);
403	idle_cpus = acpi_pad_idle_cpus_num();
404	acpi_evaluate_ost(handle, ACPI_PROCESSOR_AGGREGATOR_NOTIFY, 0, &param);
405	mutex_unlock(&isolated_cpus_lock);
406}
407
408static void acpi_pad_notify(acpi_handle handle, u32 event,
409	void *data)
410{
411	struct acpi_device *device = data;
412
413	switch (event) {
414	case ACPI_PROCESSOR_AGGREGATOR_NOTIFY:
415		acpi_pad_handle_notify(handle);
416		acpi_bus_generate_netlink_event(device->pnp.device_class,
417			dev_name(&device->dev), event, 0);
418		break;
419	default:
420		pr_warn("Unsupported event [0x%x]\n", event);
421		break;
422	}
423}
424
425static int acpi_pad_add(struct acpi_device *device)
426{
 
427	acpi_status status;
428
429	strcpy(acpi_device_name(device), ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME);
430	strcpy(acpi_device_class(device), ACPI_PROCESSOR_AGGREGATOR_CLASS);
431
432	if (acpi_pad_add_sysfs(device))
433		return -ENODEV;
434
435	status = acpi_install_notify_handler(device->handle,
436		ACPI_DEVICE_NOTIFY, acpi_pad_notify, device);
437	if (ACPI_FAILURE(status)) {
438		acpi_pad_remove_sysfs(device);
439		return -ENODEV;
440	}
441
442	return 0;
443}
444
445static int acpi_pad_remove(struct acpi_device *device)
446{
 
 
447	mutex_lock(&isolated_cpus_lock);
448	acpi_pad_idle_cpus(0);
449	mutex_unlock(&isolated_cpus_lock);
450
451	acpi_remove_notify_handler(device->handle,
452		ACPI_DEVICE_NOTIFY, acpi_pad_notify);
453	acpi_pad_remove_sysfs(device);
454	return 0;
455}
456
457static const struct acpi_device_id pad_device_ids[] = {
458	{"ACPI000C", 0},
459	{"", 0},
460};
461MODULE_DEVICE_TABLE(acpi, pad_device_ids);
462
463static struct acpi_driver acpi_pad_driver = {
464	.name = "processor_aggregator",
465	.class = ACPI_PROCESSOR_AGGREGATOR_CLASS,
466	.ids = pad_device_ids,
467	.ops = {
468		.add = acpi_pad_add,
469		.remove = acpi_pad_remove,
470	},
471};
472
473static int __init acpi_pad_init(void)
474{
475	/* Xen ACPI PAD is used when running as Xen Dom0. */
476	if (xen_initial_domain())
477		return -ENODEV;
478
479	power_saving_mwait_init();
480	if (power_saving_mwait_eax == 0)
481		return -EINVAL;
482
483	return acpi_bus_register_driver(&acpi_pad_driver);
484}
485
486static void __exit acpi_pad_exit(void)
487{
488	acpi_bus_unregister_driver(&acpi_pad_driver);
489}
490
491module_init(acpi_pad_init);
492module_exit(acpi_pad_exit);
493MODULE_AUTHOR("Shaohua Li<shaohua.li@intel.com>");
494MODULE_DESCRIPTION("ACPI Processor Aggregator Driver");
495MODULE_LICENSE("GPL");