Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Idle functions for s390.
4 *
5 * Copyright IBM Corp. 2014
6 *
7 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
8 */
9
10#include <linux/kernel.h>
11#include <linux/kernel_stat.h>
12#include <linux/notifier.h>
13#include <linux/init.h>
14#include <linux/cpu.h>
15#include <linux/sched/cputime.h>
16#include <trace/events/power.h>
17#include <asm/cpu_mf.h>
18#include <asm/nmi.h>
19#include <asm/smp.h>
20#include "entry.h"
21
22static DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
23
24void account_idle_time_irq(void)
25{
26 struct s390_idle_data *idle = this_cpu_ptr(&s390_idle);
27 u64 cycles_new[8];
28 int i;
29
30 clear_cpu_flag(CIF_ENABLED_WAIT);
31 if (smp_cpu_mtid) {
32 stcctm(MT_DIAG, smp_cpu_mtid, cycles_new);
33 for (i = 0; i < smp_cpu_mtid; i++)
34 this_cpu_add(mt_cycles[i], cycles_new[i] - idle->mt_cycles_enter[i]);
35 }
36
37 idle->clock_idle_exit = S390_lowcore.int_clock;
38 idle->timer_idle_exit = S390_lowcore.sys_enter_timer;
39
40 S390_lowcore.steal_timer += idle->clock_idle_enter - S390_lowcore.last_update_clock;
41 S390_lowcore.last_update_clock = idle->clock_idle_exit;
42
43 S390_lowcore.system_timer += S390_lowcore.last_update_timer - idle->timer_idle_enter;
44 S390_lowcore.last_update_timer = idle->timer_idle_exit;
45}
46
47void arch_cpu_idle(void)
48{
49 struct s390_idle_data *idle = this_cpu_ptr(&s390_idle);
50 unsigned long idle_time;
51 unsigned long psw_mask;
52
53 /* Wait for external, I/O or machine check interrupt. */
54 psw_mask = PSW_KERNEL_BITS | PSW_MASK_WAIT | PSW_MASK_DAT |
55 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
56 clear_cpu_flag(CIF_NOHZ_DELAY);
57
58 /* psw_idle() returns with interrupts disabled. */
59 psw_idle(idle, psw_mask);
60
61 /* Account time spent with enabled wait psw loaded as idle time. */
62 raw_write_seqcount_begin(&idle->seqcount);
63 idle_time = idle->clock_idle_exit - idle->clock_idle_enter;
64 idle->clock_idle_enter = idle->clock_idle_exit = 0ULL;
65 idle->idle_time += idle_time;
66 idle->idle_count++;
67 account_idle_time(cputime_to_nsecs(idle_time));
68 raw_write_seqcount_end(&idle->seqcount);
69 raw_local_irq_enable();
70}
71
72static ssize_t show_idle_count(struct device *dev,
73 struct device_attribute *attr, char *buf)
74{
75 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
76 unsigned long idle_count;
77 unsigned int seq;
78
79 do {
80 seq = read_seqcount_begin(&idle->seqcount);
81 idle_count = READ_ONCE(idle->idle_count);
82 if (READ_ONCE(idle->clock_idle_enter))
83 idle_count++;
84 } while (read_seqcount_retry(&idle->seqcount, seq));
85 return sprintf(buf, "%lu\n", idle_count);
86}
87DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
88
89static ssize_t show_idle_time(struct device *dev,
90 struct device_attribute *attr, char *buf)
91{
92 unsigned long now, idle_time, idle_enter, idle_exit, in_idle;
93 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
94 unsigned int seq;
95
96 do {
97 seq = read_seqcount_begin(&idle->seqcount);
98 idle_time = READ_ONCE(idle->idle_time);
99 idle_enter = READ_ONCE(idle->clock_idle_enter);
100 idle_exit = READ_ONCE(idle->clock_idle_exit);
101 } while (read_seqcount_retry(&idle->seqcount, seq));
102 in_idle = 0;
103 now = get_tod_clock();
104 if (idle_enter) {
105 if (idle_exit) {
106 in_idle = idle_exit - idle_enter;
107 } else if (now > idle_enter) {
108 in_idle = now - idle_enter;
109 }
110 }
111 idle_time += in_idle;
112 return sprintf(buf, "%lu\n", idle_time >> 12);
113}
114DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
115
116u64 arch_cpu_idle_time(int cpu)
117{
118 struct s390_idle_data *idle = &per_cpu(s390_idle, cpu);
119 unsigned long now, idle_enter, idle_exit, in_idle;
120 unsigned int seq;
121
122 do {
123 seq = read_seqcount_begin(&idle->seqcount);
124 idle_enter = READ_ONCE(idle->clock_idle_enter);
125 idle_exit = READ_ONCE(idle->clock_idle_exit);
126 } while (read_seqcount_retry(&idle->seqcount, seq));
127 in_idle = 0;
128 now = get_tod_clock();
129 if (idle_enter) {
130 if (idle_exit) {
131 in_idle = idle_exit - idle_enter;
132 } else if (now > idle_enter) {
133 in_idle = now - idle_enter;
134 }
135 }
136 return cputime_to_nsecs(in_idle);
137}
138
139void arch_cpu_idle_enter(void)
140{
141}
142
143void arch_cpu_idle_exit(void)
144{
145}
146
147void arch_cpu_idle_dead(void)
148{
149 cpu_die();
150}
1/*
2 * Idle functions for s390.
3 *
4 * Copyright IBM Corp. 2014
5 *
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7 */
8
9#include <linux/kernel.h>
10#include <linux/kernel_stat.h>
11#include <linux/kprobes.h>
12#include <linux/notifier.h>
13#include <linux/init.h>
14#include <linux/cpu.h>
15#include <asm/cputime.h>
16#include <asm/nmi.h>
17#include <asm/smp.h>
18#include "entry.h"
19
20static DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
21
22void enabled_wait(void)
23{
24 struct s390_idle_data *idle = this_cpu_ptr(&s390_idle);
25 unsigned long long idle_time;
26 unsigned long psw_mask;
27
28 trace_hardirqs_on();
29
30 /* Wait for external, I/O or machine check interrupt. */
31 psw_mask = PSW_KERNEL_BITS | PSW_MASK_WAIT | PSW_MASK_DAT |
32 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
33 clear_cpu_flag(CIF_NOHZ_DELAY);
34
35 /* Call the assembler magic in entry.S */
36 psw_idle(idle, psw_mask);
37
38 trace_hardirqs_off();
39
40 /* Account time spent with enabled wait psw loaded as idle time. */
41 write_seqcount_begin(&idle->seqcount);
42 idle_time = idle->clock_idle_exit - idle->clock_idle_enter;
43 idle->clock_idle_enter = idle->clock_idle_exit = 0ULL;
44 idle->idle_time += idle_time;
45 idle->idle_count++;
46 account_idle_time(idle_time);
47 write_seqcount_end(&idle->seqcount);
48}
49NOKPROBE_SYMBOL(enabled_wait);
50
51static ssize_t show_idle_count(struct device *dev,
52 struct device_attribute *attr, char *buf)
53{
54 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
55 unsigned long long idle_count;
56 unsigned int seq;
57
58 do {
59 seq = read_seqcount_begin(&idle->seqcount);
60 idle_count = ACCESS_ONCE(idle->idle_count);
61 if (ACCESS_ONCE(idle->clock_idle_enter))
62 idle_count++;
63 } while (read_seqcount_retry(&idle->seqcount, seq));
64 return sprintf(buf, "%llu\n", idle_count);
65}
66DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
67
68static ssize_t show_idle_time(struct device *dev,
69 struct device_attribute *attr, char *buf)
70{
71 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
72 unsigned long long now, idle_time, idle_enter, idle_exit;
73 unsigned int seq;
74
75 do {
76 now = get_tod_clock();
77 seq = read_seqcount_begin(&idle->seqcount);
78 idle_time = ACCESS_ONCE(idle->idle_time);
79 idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
80 idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
81 } while (read_seqcount_retry(&idle->seqcount, seq));
82 idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0;
83 return sprintf(buf, "%llu\n", idle_time >> 12);
84}
85DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
86
87cputime64_t arch_cpu_idle_time(int cpu)
88{
89 struct s390_idle_data *idle = &per_cpu(s390_idle, cpu);
90 unsigned long long now, idle_enter, idle_exit;
91 unsigned int seq;
92
93 do {
94 now = get_tod_clock();
95 seq = read_seqcount_begin(&idle->seqcount);
96 idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
97 idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
98 } while (read_seqcount_retry(&idle->seqcount, seq));
99 return idle_enter ? ((idle_exit ?: now) - idle_enter) : 0;
100}
101
102void arch_cpu_idle_enter(void)
103{
104 local_mcck_disable();
105}
106
107void arch_cpu_idle(void)
108{
109 if (!test_cpu_flag(CIF_MCCK_PENDING))
110 /* Halt the cpu and keep track of cpu time accounting. */
111 enabled_wait();
112 local_irq_enable();
113}
114
115void arch_cpu_idle_exit(void)
116{
117 local_mcck_enable();
118 if (test_cpu_flag(CIF_MCCK_PENDING))
119 s390_handle_mcck();
120}
121
122void arch_cpu_idle_dead(void)
123{
124 cpu_die();
125}