Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Idle functions for s390.
4 *
5 * Copyright IBM Corp. 2014
6 *
7 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
8 */
9
10#include <linux/kernel.h>
11#include <linux/kernel_stat.h>
12#include <linux/notifier.h>
13#include <linux/init.h>
14#include <linux/cpu.h>
15#include <trace/events/power.h>
16#include <asm/cpu_mf.h>
17#include <asm/cputime.h>
18#include <asm/nmi.h>
19#include <asm/smp.h>
20#include "entry.h"
21
22static DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
23
24void account_idle_time_irq(void)
25{
26 struct s390_idle_data *idle = this_cpu_ptr(&s390_idle);
27 struct lowcore *lc = get_lowcore();
28 unsigned long idle_time;
29 u64 cycles_new[8];
30 int i;
31
32 if (smp_cpu_mtid) {
33 stcctm(MT_DIAG, smp_cpu_mtid, cycles_new);
34 for (i = 0; i < smp_cpu_mtid; i++)
35 this_cpu_add(mt_cycles[i], cycles_new[i] - idle->mt_cycles_enter[i]);
36 }
37
38 idle_time = lc->int_clock - idle->clock_idle_enter;
39
40 lc->steal_timer += idle->clock_idle_enter - lc->last_update_clock;
41 lc->last_update_clock = lc->int_clock;
42
43 lc->system_timer += lc->last_update_timer - idle->timer_idle_enter;
44 lc->last_update_timer = lc->sys_enter_timer;
45
46 /* Account time spent with enabled wait psw loaded as idle time. */
47 WRITE_ONCE(idle->idle_time, READ_ONCE(idle->idle_time) + idle_time);
48 WRITE_ONCE(idle->idle_count, READ_ONCE(idle->idle_count) + 1);
49 account_idle_time(cputime_to_nsecs(idle_time));
50}
51
52void noinstr arch_cpu_idle(void)
53{
54 struct s390_idle_data *idle = this_cpu_ptr(&s390_idle);
55 unsigned long psw_mask;
56
57 /* Wait for external, I/O or machine check interrupt. */
58 psw_mask = PSW_KERNEL_BITS | PSW_MASK_WAIT |
59 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
60 clear_cpu_flag(CIF_NOHZ_DELAY);
61 set_cpu_flag(CIF_ENABLED_WAIT);
62 if (smp_cpu_mtid)
63 stcctm(MT_DIAG, smp_cpu_mtid, (u64 *)&idle->mt_cycles_enter);
64 idle->clock_idle_enter = get_tod_clock_fast();
65 idle->timer_idle_enter = get_cpu_timer();
66 bpon();
67 __load_psw_mask(psw_mask);
68}
69
70static ssize_t show_idle_count(struct device *dev,
71 struct device_attribute *attr, char *buf)
72{
73 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
74
75 return sysfs_emit(buf, "%lu\n", READ_ONCE(idle->idle_count));
76}
77DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
78
79static ssize_t show_idle_time(struct device *dev,
80 struct device_attribute *attr, char *buf)
81{
82 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
83
84 return sysfs_emit(buf, "%lu\n", READ_ONCE(idle->idle_time) >> 12);
85}
86DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
87
88void arch_cpu_idle_enter(void)
89{
90}
91
92void arch_cpu_idle_exit(void)
93{
94}
95
96void __noreturn arch_cpu_idle_dead(void)
97{
98 cpu_die();
99}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Idle functions for s390.
4 *
5 * Copyright IBM Corp. 2014
6 *
7 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
8 */
9
10#include <linux/kernel.h>
11#include <linux/kernel_stat.h>
12#include <linux/kprobes.h>
13#include <linux/notifier.h>
14#include <linux/init.h>
15#include <linux/cpu.h>
16#include <linux/sched/cputime.h>
17#include <trace/events/power.h>
18#include <asm/nmi.h>
19#include <asm/smp.h>
20#include "entry.h"
21
22static DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
23
24void enabled_wait(void)
25{
26 struct s390_idle_data *idle = this_cpu_ptr(&s390_idle);
27 unsigned long long idle_time;
28 unsigned long psw_mask, flags;
29
30
31 /* Wait for external, I/O or machine check interrupt. */
32 psw_mask = PSW_KERNEL_BITS | PSW_MASK_WAIT | PSW_MASK_DAT |
33 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
34 clear_cpu_flag(CIF_NOHZ_DELAY);
35
36 local_irq_save(flags);
37 /* Call the assembler magic in entry.S */
38 psw_idle(idle, psw_mask);
39 local_irq_restore(flags);
40
41 /* Account time spent with enabled wait psw loaded as idle time. */
42 raw_write_seqcount_begin(&idle->seqcount);
43 idle_time = idle->clock_idle_exit - idle->clock_idle_enter;
44 idle->clock_idle_enter = idle->clock_idle_exit = 0ULL;
45 idle->idle_time += idle_time;
46 idle->idle_count++;
47 account_idle_time(cputime_to_nsecs(idle_time));
48 raw_write_seqcount_end(&idle->seqcount);
49}
50NOKPROBE_SYMBOL(enabled_wait);
51
52static ssize_t show_idle_count(struct device *dev,
53 struct device_attribute *attr, char *buf)
54{
55 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
56 unsigned long long idle_count;
57 unsigned int seq;
58
59 do {
60 seq = read_seqcount_begin(&idle->seqcount);
61 idle_count = READ_ONCE(idle->idle_count);
62 if (READ_ONCE(idle->clock_idle_enter))
63 idle_count++;
64 } while (read_seqcount_retry(&idle->seqcount, seq));
65 return sprintf(buf, "%llu\n", idle_count);
66}
67DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
68
69static ssize_t show_idle_time(struct device *dev,
70 struct device_attribute *attr, char *buf)
71{
72 unsigned long long now, idle_time, idle_enter, idle_exit, in_idle;
73 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
74 unsigned int seq;
75
76 do {
77 seq = read_seqcount_begin(&idle->seqcount);
78 idle_time = READ_ONCE(idle->idle_time);
79 idle_enter = READ_ONCE(idle->clock_idle_enter);
80 idle_exit = READ_ONCE(idle->clock_idle_exit);
81 } while (read_seqcount_retry(&idle->seqcount, seq));
82 in_idle = 0;
83 now = get_tod_clock();
84 if (idle_enter) {
85 if (idle_exit) {
86 in_idle = idle_exit - idle_enter;
87 } else if (now > idle_enter) {
88 in_idle = now - idle_enter;
89 }
90 }
91 idle_time += in_idle;
92 return sprintf(buf, "%llu\n", idle_time >> 12);
93}
94DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
95
96u64 arch_cpu_idle_time(int cpu)
97{
98 struct s390_idle_data *idle = &per_cpu(s390_idle, cpu);
99 unsigned long long now, idle_enter, idle_exit, in_idle;
100 unsigned int seq;
101
102 do {
103 seq = read_seqcount_begin(&idle->seqcount);
104 idle_enter = READ_ONCE(idle->clock_idle_enter);
105 idle_exit = READ_ONCE(idle->clock_idle_exit);
106 } while (read_seqcount_retry(&idle->seqcount, seq));
107 in_idle = 0;
108 now = get_tod_clock();
109 if (idle_enter) {
110 if (idle_exit) {
111 in_idle = idle_exit - idle_enter;
112 } else if (now > idle_enter) {
113 in_idle = now - idle_enter;
114 }
115 }
116 return cputime_to_nsecs(in_idle);
117}
118
119void arch_cpu_idle_enter(void)
120{
121}
122
123void arch_cpu_idle(void)
124{
125 enabled_wait();
126 local_irq_enable();
127}
128
129void arch_cpu_idle_exit(void)
130{
131}
132
133void arch_cpu_idle_dead(void)
134{
135 cpu_die();
136}