Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Idle functions for s390.
4 *
5 * Copyright IBM Corp. 2014
6 *
7 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
8 */
9
10#include <linux/kernel.h>
11#include <linux/kernel_stat.h>
12#include <linux/kprobes.h>
13#include <linux/notifier.h>
14#include <linux/init.h>
15#include <linux/cpu.h>
16#include <linux/sched/cputime.h>
17#include <asm/nmi.h>
18#include <asm/smp.h>
19#include "entry.h"
20
21static DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
22
23void enabled_wait(void)
24{
25 struct s390_idle_data *idle = this_cpu_ptr(&s390_idle);
26 unsigned long long idle_time;
27 unsigned long psw_mask;
28
29 trace_hardirqs_on();
30
31 /* Wait for external, I/O or machine check interrupt. */
32 psw_mask = PSW_KERNEL_BITS | PSW_MASK_WAIT | PSW_MASK_DAT |
33 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
34 clear_cpu_flag(CIF_NOHZ_DELAY);
35
36 /* Call the assembler magic in entry.S */
37 psw_idle(idle, psw_mask);
38
39 trace_hardirqs_off();
40
41 /* Account time spent with enabled wait psw loaded as idle time. */
42 write_seqcount_begin(&idle->seqcount);
43 idle_time = idle->clock_idle_exit - idle->clock_idle_enter;
44 idle->clock_idle_enter = idle->clock_idle_exit = 0ULL;
45 idle->idle_time += idle_time;
46 idle->idle_count++;
47 account_idle_time(cputime_to_nsecs(idle_time));
48 write_seqcount_end(&idle->seqcount);
49}
50NOKPROBE_SYMBOL(enabled_wait);
51
52static ssize_t show_idle_count(struct device *dev,
53 struct device_attribute *attr, char *buf)
54{
55 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
56 unsigned long long idle_count;
57 unsigned int seq;
58
59 do {
60 seq = read_seqcount_begin(&idle->seqcount);
61 idle_count = READ_ONCE(idle->idle_count);
62 if (READ_ONCE(idle->clock_idle_enter))
63 idle_count++;
64 } while (read_seqcount_retry(&idle->seqcount, seq));
65 return sprintf(buf, "%llu\n", idle_count);
66}
67DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
68
69static ssize_t show_idle_time(struct device *dev,
70 struct device_attribute *attr, char *buf)
71{
72 unsigned long long now, idle_time, idle_enter, idle_exit, in_idle;
73 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
74 unsigned int seq;
75
76 do {
77 seq = read_seqcount_begin(&idle->seqcount);
78 idle_time = READ_ONCE(idle->idle_time);
79 idle_enter = READ_ONCE(idle->clock_idle_enter);
80 idle_exit = READ_ONCE(idle->clock_idle_exit);
81 } while (read_seqcount_retry(&idle->seqcount, seq));
82 in_idle = 0;
83 now = get_tod_clock();
84 if (idle_enter) {
85 if (idle_exit) {
86 in_idle = idle_exit - idle_enter;
87 } else if (now > idle_enter) {
88 in_idle = now - idle_enter;
89 }
90 }
91 idle_time += in_idle;
92 return sprintf(buf, "%llu\n", idle_time >> 12);
93}
94DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
95
96u64 arch_cpu_idle_time(int cpu)
97{
98 struct s390_idle_data *idle = &per_cpu(s390_idle, cpu);
99 unsigned long long now, idle_enter, idle_exit, in_idle;
100 unsigned int seq;
101
102 do {
103 seq = read_seqcount_begin(&idle->seqcount);
104 idle_enter = READ_ONCE(idle->clock_idle_enter);
105 idle_exit = READ_ONCE(idle->clock_idle_exit);
106 } while (read_seqcount_retry(&idle->seqcount, seq));
107 in_idle = 0;
108 now = get_tod_clock();
109 if (idle_enter) {
110 if (idle_exit) {
111 in_idle = idle_exit - idle_enter;
112 } else if (now > idle_enter) {
113 in_idle = now - idle_enter;
114 }
115 }
116 return cputime_to_nsecs(in_idle);
117}
118
119void arch_cpu_idle_enter(void)
120{
121 local_mcck_disable();
122}
123
124void arch_cpu_idle(void)
125{
126 if (!test_cpu_flag(CIF_MCCK_PENDING))
127 /* Halt the cpu and keep track of cpu time accounting. */
128 enabled_wait();
129 local_irq_enable();
130}
131
132void arch_cpu_idle_exit(void)
133{
134 local_mcck_enable();
135 if (test_cpu_flag(CIF_MCCK_PENDING))
136 s390_handle_mcck();
137}
138
139void arch_cpu_idle_dead(void)
140{
141 cpu_die();
142}
1/*
2 * Idle functions for s390.
3 *
4 * Copyright IBM Corp. 2014
5 *
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7 */
8
9#include <linux/kernel.h>
10#include <linux/kernel_stat.h>
11#include <linux/kprobes.h>
12#include <linux/notifier.h>
13#include <linux/init.h>
14#include <linux/cpu.h>
15#include <asm/cputime.h>
16#include <asm/nmi.h>
17#include <asm/smp.h>
18#include "entry.h"
19
20static DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
21
22void enabled_wait(void)
23{
24 struct s390_idle_data *idle = this_cpu_ptr(&s390_idle);
25 unsigned long long idle_time;
26 unsigned long psw_mask;
27
28 trace_hardirqs_on();
29
30 /* Wait for external, I/O or machine check interrupt. */
31 psw_mask = PSW_KERNEL_BITS | PSW_MASK_WAIT | PSW_MASK_DAT |
32 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
33 clear_cpu_flag(CIF_NOHZ_DELAY);
34
35 /* Call the assembler magic in entry.S */
36 psw_idle(idle, psw_mask);
37
38 trace_hardirqs_off();
39
40 /* Account time spent with enabled wait psw loaded as idle time. */
41 write_seqcount_begin(&idle->seqcount);
42 idle_time = idle->clock_idle_exit - idle->clock_idle_enter;
43 idle->clock_idle_enter = idle->clock_idle_exit = 0ULL;
44 idle->idle_time += idle_time;
45 idle->idle_count++;
46 account_idle_time(idle_time);
47 write_seqcount_end(&idle->seqcount);
48}
49NOKPROBE_SYMBOL(enabled_wait);
50
51static ssize_t show_idle_count(struct device *dev,
52 struct device_attribute *attr, char *buf)
53{
54 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
55 unsigned long long idle_count;
56 unsigned int seq;
57
58 do {
59 seq = read_seqcount_begin(&idle->seqcount);
60 idle_count = ACCESS_ONCE(idle->idle_count);
61 if (ACCESS_ONCE(idle->clock_idle_enter))
62 idle_count++;
63 } while (read_seqcount_retry(&idle->seqcount, seq));
64 return sprintf(buf, "%llu\n", idle_count);
65}
66DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
67
68static ssize_t show_idle_time(struct device *dev,
69 struct device_attribute *attr, char *buf)
70{
71 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
72 unsigned long long now, idle_time, idle_enter, idle_exit;
73 unsigned int seq;
74
75 do {
76 now = get_tod_clock();
77 seq = read_seqcount_begin(&idle->seqcount);
78 idle_time = ACCESS_ONCE(idle->idle_time);
79 idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
80 idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
81 } while (read_seqcount_retry(&idle->seqcount, seq));
82 idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0;
83 return sprintf(buf, "%llu\n", idle_time >> 12);
84}
85DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
86
87cputime64_t arch_cpu_idle_time(int cpu)
88{
89 struct s390_idle_data *idle = &per_cpu(s390_idle, cpu);
90 unsigned long long now, idle_enter, idle_exit;
91 unsigned int seq;
92
93 do {
94 now = get_tod_clock();
95 seq = read_seqcount_begin(&idle->seqcount);
96 idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
97 idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
98 } while (read_seqcount_retry(&idle->seqcount, seq));
99 return idle_enter ? ((idle_exit ?: now) - idle_enter) : 0;
100}
101
102void arch_cpu_idle_enter(void)
103{
104 local_mcck_disable();
105}
106
107void arch_cpu_idle(void)
108{
109 if (!test_cpu_flag(CIF_MCCK_PENDING))
110 /* Halt the cpu and keep track of cpu time accounting. */
111 enabled_wait();
112 local_irq_enable();
113}
114
115void arch_cpu_idle_exit(void)
116{
117 local_mcck_enable();
118 if (test_cpu_flag(CIF_MCCK_PENDING))
119 s390_handle_mcck();
120}
121
122void arch_cpu_idle_dead(void)
123{
124 cpu_die();
125}