Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Virtual cpu timer based timer functions.
4 *
5 * Copyright IBM Corp. 2004, 2012
6 * Author(s): Jan Glauber <jan.glauber@de.ibm.com>
7 */
8
9#include <linux/kernel_stat.h>
10#include <linux/sched/cputime.h>
11#include <linux/export.h>
12#include <linux/kernel.h>
13#include <linux/timex.h>
14#include <linux/types.h>
15#include <linux/time.h>
16#include <asm/alternative.h>
17#include <asm/vtimer.h>
18#include <asm/vtime.h>
19#include <asm/cpu_mf.h>
20#include <asm/smp.h>
21
22#include "entry.h"
23
24static void virt_timer_expire(void);
25
26static LIST_HEAD(virt_timer_list);
27static DEFINE_SPINLOCK(virt_timer_lock);
28static atomic64_t virt_timer_current;
29static atomic64_t virt_timer_elapsed;
30
31DEFINE_PER_CPU(u64, mt_cycles[8]);
32static DEFINE_PER_CPU(u64, mt_scaling_mult) = { 1 };
33static DEFINE_PER_CPU(u64, mt_scaling_div) = { 1 };
34static DEFINE_PER_CPU(u64, mt_scaling_jiffies);
35
36static inline u64 get_vtimer(void)
37{
38 u64 timer;
39
40 asm volatile("stpt %0" : "=Q" (timer));
41 return timer;
42}
43
44static inline void set_vtimer(u64 expires)
45{
46 u64 timer;
47
48 asm volatile(
49 " stpt %0\n" /* Store current cpu timer value */
50 " spt %1" /* Set new value imm. afterwards */
51 : "=Q" (timer) : "Q" (expires));
52 S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer;
53 S390_lowcore.last_update_timer = expires;
54}
55
56static inline int virt_timer_forward(u64 elapsed)
57{
58 BUG_ON(!irqs_disabled());
59
60 if (list_empty(&virt_timer_list))
61 return 0;
62 elapsed = atomic64_add_return(elapsed, &virt_timer_elapsed);
63 return elapsed >= atomic64_read(&virt_timer_current);
64}
65
66static void update_mt_scaling(void)
67{
68 u64 cycles_new[8], *cycles_old;
69 u64 delta, fac, mult, div;
70 int i;
71
72 stcctm(MT_DIAG, smp_cpu_mtid + 1, cycles_new);
73 cycles_old = this_cpu_ptr(mt_cycles);
74 fac = 1;
75 mult = div = 0;
76 for (i = 0; i <= smp_cpu_mtid; i++) {
77 delta = cycles_new[i] - cycles_old[i];
78 div += delta;
79 mult *= i + 1;
80 mult += delta * fac;
81 fac *= i + 1;
82 }
83 div *= fac;
84 if (div > 0) {
85 /* Update scaling factor */
86 __this_cpu_write(mt_scaling_mult, mult);
87 __this_cpu_write(mt_scaling_div, div);
88 memcpy(cycles_old, cycles_new,
89 sizeof(u64) * (smp_cpu_mtid + 1));
90 }
91 __this_cpu_write(mt_scaling_jiffies, jiffies_64);
92}
93
94static inline u64 update_tsk_timer(unsigned long *tsk_vtime, u64 new)
95{
96 u64 delta;
97
98 delta = new - *tsk_vtime;
99 *tsk_vtime = new;
100 return delta;
101}
102
103
104static inline u64 scale_vtime(u64 vtime)
105{
106 u64 mult = __this_cpu_read(mt_scaling_mult);
107 u64 div = __this_cpu_read(mt_scaling_div);
108
109 if (smp_cpu_mtid)
110 return vtime * mult / div;
111 return vtime;
112}
113
114static void account_system_index_scaled(struct task_struct *p, u64 cputime,
115 enum cpu_usage_stat index)
116{
117 p->stimescaled += cputime_to_nsecs(scale_vtime(cputime));
118 account_system_index_time(p, cputime_to_nsecs(cputime), index);
119}
120
121/*
122 * Update process times based on virtual cpu times stored by entry.S
123 * to the lowcore fields user_timer, system_timer & steal_clock.
124 */
125static int do_account_vtime(struct task_struct *tsk)
126{
127 u64 timer, clock, user, guest, system, hardirq, softirq;
128
129 timer = S390_lowcore.last_update_timer;
130 clock = S390_lowcore.last_update_clock;
131 asm volatile(
132 " stpt %0\n" /* Store current cpu timer value */
133 " stckf %1" /* Store current tod clock value */
134 : "=Q" (S390_lowcore.last_update_timer),
135 "=Q" (S390_lowcore.last_update_clock)
136 : : "cc");
137 clock = S390_lowcore.last_update_clock - clock;
138 timer -= S390_lowcore.last_update_timer;
139
140 if (hardirq_count())
141 S390_lowcore.hardirq_timer += timer;
142 else
143 S390_lowcore.system_timer += timer;
144
145 /* Update MT utilization calculation */
146 if (smp_cpu_mtid &&
147 time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies)))
148 update_mt_scaling();
149
150 /* Calculate cputime delta */
151 user = update_tsk_timer(&tsk->thread.user_timer,
152 READ_ONCE(S390_lowcore.user_timer));
153 guest = update_tsk_timer(&tsk->thread.guest_timer,
154 READ_ONCE(S390_lowcore.guest_timer));
155 system = update_tsk_timer(&tsk->thread.system_timer,
156 READ_ONCE(S390_lowcore.system_timer));
157 hardirq = update_tsk_timer(&tsk->thread.hardirq_timer,
158 READ_ONCE(S390_lowcore.hardirq_timer));
159 softirq = update_tsk_timer(&tsk->thread.softirq_timer,
160 READ_ONCE(S390_lowcore.softirq_timer));
161 S390_lowcore.steal_timer +=
162 clock - user - guest - system - hardirq - softirq;
163
164 /* Push account value */
165 if (user) {
166 account_user_time(tsk, cputime_to_nsecs(user));
167 tsk->utimescaled += cputime_to_nsecs(scale_vtime(user));
168 }
169
170 if (guest) {
171 account_guest_time(tsk, cputime_to_nsecs(guest));
172 tsk->utimescaled += cputime_to_nsecs(scale_vtime(guest));
173 }
174
175 if (system)
176 account_system_index_scaled(tsk, system, CPUTIME_SYSTEM);
177 if (hardirq)
178 account_system_index_scaled(tsk, hardirq, CPUTIME_IRQ);
179 if (softirq)
180 account_system_index_scaled(tsk, softirq, CPUTIME_SOFTIRQ);
181
182 return virt_timer_forward(user + guest + system + hardirq + softirq);
183}
184
185void vtime_task_switch(struct task_struct *prev)
186{
187 do_account_vtime(prev);
188 prev->thread.user_timer = S390_lowcore.user_timer;
189 prev->thread.guest_timer = S390_lowcore.guest_timer;
190 prev->thread.system_timer = S390_lowcore.system_timer;
191 prev->thread.hardirq_timer = S390_lowcore.hardirq_timer;
192 prev->thread.softirq_timer = S390_lowcore.softirq_timer;
193 S390_lowcore.user_timer = current->thread.user_timer;
194 S390_lowcore.guest_timer = current->thread.guest_timer;
195 S390_lowcore.system_timer = current->thread.system_timer;
196 S390_lowcore.hardirq_timer = current->thread.hardirq_timer;
197 S390_lowcore.softirq_timer = current->thread.softirq_timer;
198}
199
200/*
201 * In s390, accounting pending user time also implies
202 * accounting system time in order to correctly compute
203 * the stolen time accounting.
204 */
205void vtime_flush(struct task_struct *tsk)
206{
207 u64 steal, avg_steal;
208
209 if (do_account_vtime(tsk))
210 virt_timer_expire();
211
212 steal = S390_lowcore.steal_timer;
213 avg_steal = S390_lowcore.avg_steal_timer / 2;
214 if ((s64) steal > 0) {
215 S390_lowcore.steal_timer = 0;
216 account_steal_time(cputime_to_nsecs(steal));
217 avg_steal += steal;
218 }
219 S390_lowcore.avg_steal_timer = avg_steal;
220}
221
222static u64 vtime_delta(void)
223{
224 u64 timer = S390_lowcore.last_update_timer;
225
226 S390_lowcore.last_update_timer = get_vtimer();
227
228 return timer - S390_lowcore.last_update_timer;
229}
230
231/*
232 * Update process times based on virtual cpu times stored by entry.S
233 * to the lowcore fields user_timer, system_timer & steal_clock.
234 */
235void vtime_account_kernel(struct task_struct *tsk)
236{
237 u64 delta = vtime_delta();
238
239 if (tsk->flags & PF_VCPU)
240 S390_lowcore.guest_timer += delta;
241 else
242 S390_lowcore.system_timer += delta;
243
244 virt_timer_forward(delta);
245}
246EXPORT_SYMBOL_GPL(vtime_account_kernel);
247
248void vtime_account_softirq(struct task_struct *tsk)
249{
250 u64 delta = vtime_delta();
251
252 S390_lowcore.softirq_timer += delta;
253
254 virt_timer_forward(delta);
255}
256
257void vtime_account_hardirq(struct task_struct *tsk)
258{
259 u64 delta = vtime_delta();
260
261 S390_lowcore.hardirq_timer += delta;
262
263 virt_timer_forward(delta);
264}
265
266/*
267 * Sorted add to a list. List is linear searched until first bigger
268 * element is found.
269 */
270static void list_add_sorted(struct vtimer_list *timer, struct list_head *head)
271{
272 struct vtimer_list *tmp;
273
274 list_for_each_entry(tmp, head, entry) {
275 if (tmp->expires > timer->expires) {
276 list_add_tail(&timer->entry, &tmp->entry);
277 return;
278 }
279 }
280 list_add_tail(&timer->entry, head);
281}
282
283/*
284 * Handler for expired virtual CPU timer.
285 */
286static void virt_timer_expire(void)
287{
288 struct vtimer_list *timer, *tmp;
289 unsigned long elapsed;
290 LIST_HEAD(cb_list);
291
292 /* walk timer list, fire all expired timers */
293 spin_lock(&virt_timer_lock);
294 elapsed = atomic64_read(&virt_timer_elapsed);
295 list_for_each_entry_safe(timer, tmp, &virt_timer_list, entry) {
296 if (timer->expires < elapsed)
297 /* move expired timer to the callback queue */
298 list_move_tail(&timer->entry, &cb_list);
299 else
300 timer->expires -= elapsed;
301 }
302 if (!list_empty(&virt_timer_list)) {
303 timer = list_first_entry(&virt_timer_list,
304 struct vtimer_list, entry);
305 atomic64_set(&virt_timer_current, timer->expires);
306 }
307 atomic64_sub(elapsed, &virt_timer_elapsed);
308 spin_unlock(&virt_timer_lock);
309
310 /* Do callbacks and recharge periodic timers */
311 list_for_each_entry_safe(timer, tmp, &cb_list, entry) {
312 list_del_init(&timer->entry);
313 timer->function(timer->data);
314 if (timer->interval) {
315 /* Recharge interval timer */
316 timer->expires = timer->interval +
317 atomic64_read(&virt_timer_elapsed);
318 spin_lock(&virt_timer_lock);
319 list_add_sorted(timer, &virt_timer_list);
320 spin_unlock(&virt_timer_lock);
321 }
322 }
323}
324
325void init_virt_timer(struct vtimer_list *timer)
326{
327 timer->function = NULL;
328 INIT_LIST_HEAD(&timer->entry);
329}
330EXPORT_SYMBOL(init_virt_timer);
331
332static inline int vtimer_pending(struct vtimer_list *timer)
333{
334 return !list_empty(&timer->entry);
335}
336
337static void internal_add_vtimer(struct vtimer_list *timer)
338{
339 if (list_empty(&virt_timer_list)) {
340 /* First timer, just program it. */
341 atomic64_set(&virt_timer_current, timer->expires);
342 atomic64_set(&virt_timer_elapsed, 0);
343 list_add(&timer->entry, &virt_timer_list);
344 } else {
345 /* Update timer against current base. */
346 timer->expires += atomic64_read(&virt_timer_elapsed);
347 if (likely((s64) timer->expires <
348 (s64) atomic64_read(&virt_timer_current)))
349 /* The new timer expires before the current timer. */
350 atomic64_set(&virt_timer_current, timer->expires);
351 /* Insert new timer into the list. */
352 list_add_sorted(timer, &virt_timer_list);
353 }
354}
355
356static void __add_vtimer(struct vtimer_list *timer, int periodic)
357{
358 unsigned long flags;
359
360 timer->interval = periodic ? timer->expires : 0;
361 spin_lock_irqsave(&virt_timer_lock, flags);
362 internal_add_vtimer(timer);
363 spin_unlock_irqrestore(&virt_timer_lock, flags);
364}
365
366/*
367 * add_virt_timer - add a oneshot virtual CPU timer
368 */
369void add_virt_timer(struct vtimer_list *timer)
370{
371 __add_vtimer(timer, 0);
372}
373EXPORT_SYMBOL(add_virt_timer);
374
375/*
376 * add_virt_timer_int - add an interval virtual CPU timer
377 */
378void add_virt_timer_periodic(struct vtimer_list *timer)
379{
380 __add_vtimer(timer, 1);
381}
382EXPORT_SYMBOL(add_virt_timer_periodic);
383
384static int __mod_vtimer(struct vtimer_list *timer, u64 expires, int periodic)
385{
386 unsigned long flags;
387 int rc;
388
389 BUG_ON(!timer->function);
390
391 if (timer->expires == expires && vtimer_pending(timer))
392 return 1;
393 spin_lock_irqsave(&virt_timer_lock, flags);
394 rc = vtimer_pending(timer);
395 if (rc)
396 list_del_init(&timer->entry);
397 timer->interval = periodic ? expires : 0;
398 timer->expires = expires;
399 internal_add_vtimer(timer);
400 spin_unlock_irqrestore(&virt_timer_lock, flags);
401 return rc;
402}
403
404/*
405 * returns whether it has modified a pending timer (1) or not (0)
406 */
407int mod_virt_timer(struct vtimer_list *timer, u64 expires)
408{
409 return __mod_vtimer(timer, expires, 0);
410}
411EXPORT_SYMBOL(mod_virt_timer);
412
413/*
414 * returns whether it has modified a pending timer (1) or not (0)
415 */
416int mod_virt_timer_periodic(struct vtimer_list *timer, u64 expires)
417{
418 return __mod_vtimer(timer, expires, 1);
419}
420EXPORT_SYMBOL(mod_virt_timer_periodic);
421
422/*
423 * Delete a virtual timer.
424 *
425 * returns whether the deleted timer was pending (1) or not (0)
426 */
427int del_virt_timer(struct vtimer_list *timer)
428{
429 unsigned long flags;
430
431 if (!vtimer_pending(timer))
432 return 0;
433 spin_lock_irqsave(&virt_timer_lock, flags);
434 list_del_init(&timer->entry);
435 spin_unlock_irqrestore(&virt_timer_lock, flags);
436 return 1;
437}
438EXPORT_SYMBOL(del_virt_timer);
439
440/*
441 * Start the virtual CPU timer on the current CPU.
442 */
443void vtime_init(void)
444{
445 /* set initial cpu timer */
446 set_vtimer(VTIMER_MAX_SLICE);
447 /* Setup initial MT scaling values */
448 if (smp_cpu_mtid) {
449 __this_cpu_write(mt_scaling_jiffies, jiffies);
450 __this_cpu_write(mt_scaling_mult, 1);
451 __this_cpu_write(mt_scaling_div, 1);
452 stcctm(MT_DIAG, smp_cpu_mtid + 1, this_cpu_ptr(mt_cycles));
453 }
454}
1/*
2 * arch/s390/kernel/vtime.c
3 * Virtual cpu timer based timer functions.
4 *
5 * S390 version
6 * Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Jan Glauber <jan.glauber@de.ibm.com>
8 */
9
10#include <linux/module.h>
11#include <linux/kernel.h>
12#include <linux/time.h>
13#include <linux/delay.h>
14#include <linux/init.h>
15#include <linux/smp.h>
16#include <linux/types.h>
17#include <linux/timex.h>
18#include <linux/notifier.h>
19#include <linux/kernel_stat.h>
20#include <linux/rcupdate.h>
21#include <linux/posix-timers.h>
22#include <linux/cpu.h>
23#include <linux/kprobes.h>
24
25#include <asm/timer.h>
26#include <asm/irq_regs.h>
27#include <asm/cputime.h>
28#include <asm/irq.h>
29#include "entry.h"
30
31static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer);
32
33DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
34
35static inline __u64 get_vtimer(void)
36{
37 __u64 timer;
38
39 asm volatile("STPT %0" : "=m" (timer));
40 return timer;
41}
42
43static inline void set_vtimer(__u64 expires)
44{
45 __u64 timer;
46
47 asm volatile (" STPT %0\n" /* Store current cpu timer value */
48 " SPT %1" /* Set new value immediately afterwards */
49 : "=m" (timer) : "m" (expires) );
50 S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer;
51 S390_lowcore.last_update_timer = expires;
52}
53
54/*
55 * Update process times based on virtual cpu times stored by entry.S
56 * to the lowcore fields user_timer, system_timer & steal_clock.
57 */
58static void do_account_vtime(struct task_struct *tsk, int hardirq_offset)
59{
60 struct thread_info *ti = task_thread_info(tsk);
61 __u64 timer, clock, user, system, steal;
62
63 timer = S390_lowcore.last_update_timer;
64 clock = S390_lowcore.last_update_clock;
65 asm volatile (" STPT %0\n" /* Store current cpu timer value */
66 " STCK %1" /* Store current tod clock value */
67 : "=m" (S390_lowcore.last_update_timer),
68 "=m" (S390_lowcore.last_update_clock) );
69 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
70 S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock;
71
72 user = S390_lowcore.user_timer - ti->user_timer;
73 S390_lowcore.steal_timer -= user;
74 ti->user_timer = S390_lowcore.user_timer;
75 account_user_time(tsk, user, user);
76
77 system = S390_lowcore.system_timer - ti->system_timer;
78 S390_lowcore.steal_timer -= system;
79 ti->system_timer = S390_lowcore.system_timer;
80 account_system_time(tsk, hardirq_offset, system, system);
81
82 steal = S390_lowcore.steal_timer;
83 if ((s64) steal > 0) {
84 S390_lowcore.steal_timer = 0;
85 account_steal_time(steal);
86 }
87}
88
89void account_vtime(struct task_struct *prev, struct task_struct *next)
90{
91 struct thread_info *ti;
92
93 do_account_vtime(prev, 0);
94 ti = task_thread_info(prev);
95 ti->user_timer = S390_lowcore.user_timer;
96 ti->system_timer = S390_lowcore.system_timer;
97 ti = task_thread_info(next);
98 S390_lowcore.user_timer = ti->user_timer;
99 S390_lowcore.system_timer = ti->system_timer;
100}
101
102void account_process_tick(struct task_struct *tsk, int user_tick)
103{
104 do_account_vtime(tsk, HARDIRQ_OFFSET);
105}
106
107/*
108 * Update process times based on virtual cpu times stored by entry.S
109 * to the lowcore fields user_timer, system_timer & steal_clock.
110 */
111void account_system_vtime(struct task_struct *tsk)
112{
113 struct thread_info *ti = task_thread_info(tsk);
114 __u64 timer, system;
115
116 timer = S390_lowcore.last_update_timer;
117 S390_lowcore.last_update_timer = get_vtimer();
118 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
119
120 system = S390_lowcore.system_timer - ti->system_timer;
121 S390_lowcore.steal_timer -= system;
122 ti->system_timer = S390_lowcore.system_timer;
123 account_system_time(tsk, 0, system, system);
124}
125EXPORT_SYMBOL_GPL(account_system_vtime);
126
127void __kprobes vtime_stop_cpu(void)
128{
129 struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
130 struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer);
131 unsigned long long idle_time;
132 unsigned long psw_mask;
133
134 trace_hardirqs_on();
135 /* Don't trace preempt off for idle. */
136 stop_critical_timings();
137
138 /* Wait for external, I/O or machine check interrupt. */
139 psw_mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_DAT |
140 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
141 idle->nohz_delay = 0;
142
143 /* Call the assembler magic in entry.S */
144 psw_idle(idle, vq, psw_mask, !list_empty(&vq->list));
145
146 /* Reenable preemption tracer. */
147 start_critical_timings();
148
149 /* Account time spent with enabled wait psw loaded as idle time. */
150 idle->sequence++;
151 smp_wmb();
152 idle_time = idle->idle_exit - idle->idle_enter;
153 idle->idle_time += idle_time;
154 idle->idle_enter = idle->idle_exit = 0ULL;
155 idle->idle_count++;
156 account_idle_time(idle_time);
157 smp_wmb();
158 idle->sequence++;
159}
160
161cputime64_t s390_get_idle_time(int cpu)
162{
163 struct s390_idle_data *idle = &per_cpu(s390_idle, cpu);
164 unsigned long long now, idle_enter, idle_exit;
165 unsigned int sequence;
166
167 do {
168 now = get_clock();
169 sequence = ACCESS_ONCE(idle->sequence);
170 idle_enter = ACCESS_ONCE(idle->idle_enter);
171 idle_exit = ACCESS_ONCE(idle->idle_exit);
172 } while ((sequence & 1) || (idle->sequence != sequence));
173 return idle_enter ? ((idle_exit ? : now) - idle_enter) : 0;
174}
175
176/*
177 * Sorted add to a list. List is linear searched until first bigger
178 * element is found.
179 */
180static void list_add_sorted(struct vtimer_list *timer, struct list_head *head)
181{
182 struct vtimer_list *event;
183
184 list_for_each_entry(event, head, entry) {
185 if (event->expires > timer->expires) {
186 list_add_tail(&timer->entry, &event->entry);
187 return;
188 }
189 }
190 list_add_tail(&timer->entry, head);
191}
192
193/*
194 * Do the callback functions of expired vtimer events.
195 * Called from within the interrupt handler.
196 */
197static void do_callbacks(struct list_head *cb_list)
198{
199 struct vtimer_queue *vq;
200 struct vtimer_list *event, *tmp;
201
202 if (list_empty(cb_list))
203 return;
204
205 vq = &__get_cpu_var(virt_cpu_timer);
206
207 list_for_each_entry_safe(event, tmp, cb_list, entry) {
208 list_del_init(&event->entry);
209 (event->function)(event->data);
210 if (event->interval) {
211 /* Recharge interval timer */
212 event->expires = event->interval + vq->elapsed;
213 spin_lock(&vq->lock);
214 list_add_sorted(event, &vq->list);
215 spin_unlock(&vq->lock);
216 }
217 }
218}
219
220/*
221 * Handler for the virtual CPU timer.
222 */
223static void do_cpu_timer_interrupt(struct ext_code ext_code,
224 unsigned int param32, unsigned long param64)
225{
226 struct vtimer_queue *vq;
227 struct vtimer_list *event, *tmp;
228 struct list_head cb_list; /* the callback queue */
229 __u64 elapsed, next;
230
231 kstat_cpu(smp_processor_id()).irqs[EXTINT_TMR]++;
232 INIT_LIST_HEAD(&cb_list);
233 vq = &__get_cpu_var(virt_cpu_timer);
234
235 /* walk timer list, fire all expired events */
236 spin_lock(&vq->lock);
237
238 elapsed = vq->elapsed + (vq->timer - S390_lowcore.async_enter_timer);
239 BUG_ON((s64) elapsed < 0);
240 vq->elapsed = 0;
241 list_for_each_entry_safe(event, tmp, &vq->list, entry) {
242 if (event->expires < elapsed)
243 /* move expired timer to the callback queue */
244 list_move_tail(&event->entry, &cb_list);
245 else
246 event->expires -= elapsed;
247 }
248 spin_unlock(&vq->lock);
249
250 do_callbacks(&cb_list);
251
252 /* next event is first in list */
253 next = VTIMER_MAX_SLICE;
254 spin_lock(&vq->lock);
255 if (!list_empty(&vq->list)) {
256 event = list_first_entry(&vq->list, struct vtimer_list, entry);
257 next = event->expires;
258 }
259 spin_unlock(&vq->lock);
260 /*
261 * To improve precision add the time spent by the
262 * interrupt handler to the elapsed time.
263 * Note: CPU timer counts down and we got an interrupt,
264 * the current content is negative
265 */
266 elapsed = S390_lowcore.async_enter_timer - get_vtimer();
267 set_vtimer(next - elapsed);
268 vq->timer = next - elapsed;
269 vq->elapsed = elapsed;
270}
271
272void init_virt_timer(struct vtimer_list *timer)
273{
274 timer->function = NULL;
275 INIT_LIST_HEAD(&timer->entry);
276}
277EXPORT_SYMBOL(init_virt_timer);
278
279static inline int vtimer_pending(struct vtimer_list *timer)
280{
281 return (!list_empty(&timer->entry));
282}
283
284/*
285 * this function should only run on the specified CPU
286 */
287static void internal_add_vtimer(struct vtimer_list *timer)
288{
289 struct vtimer_queue *vq;
290 unsigned long flags;
291 __u64 left, expires;
292
293 vq = &per_cpu(virt_cpu_timer, timer->cpu);
294 spin_lock_irqsave(&vq->lock, flags);
295
296 BUG_ON(timer->cpu != smp_processor_id());
297
298 if (list_empty(&vq->list)) {
299 /* First timer on this cpu, just program it. */
300 list_add(&timer->entry, &vq->list);
301 set_vtimer(timer->expires);
302 vq->timer = timer->expires;
303 vq->elapsed = 0;
304 } else {
305 /* Check progress of old timers. */
306 expires = timer->expires;
307 left = get_vtimer();
308 if (likely((s64) expires < (s64) left)) {
309 /* The new timer expires before the current timer. */
310 set_vtimer(expires);
311 vq->elapsed += vq->timer - left;
312 vq->timer = expires;
313 } else {
314 vq->elapsed += vq->timer - left;
315 vq->timer = left;
316 }
317 /* Insert new timer into per cpu list. */
318 timer->expires += vq->elapsed;
319 list_add_sorted(timer, &vq->list);
320 }
321
322 spin_unlock_irqrestore(&vq->lock, flags);
323 /* release CPU acquired in prepare_vtimer or mod_virt_timer() */
324 put_cpu();
325}
326
327static inline void prepare_vtimer(struct vtimer_list *timer)
328{
329 BUG_ON(!timer->function);
330 BUG_ON(!timer->expires || timer->expires > VTIMER_MAX_SLICE);
331 BUG_ON(vtimer_pending(timer));
332 timer->cpu = get_cpu();
333}
334
335/*
336 * add_virt_timer - add an oneshot virtual CPU timer
337 */
338void add_virt_timer(void *new)
339{
340 struct vtimer_list *timer;
341
342 timer = (struct vtimer_list *)new;
343 prepare_vtimer(timer);
344 timer->interval = 0;
345 internal_add_vtimer(timer);
346}
347EXPORT_SYMBOL(add_virt_timer);
348
349/*
350 * add_virt_timer_int - add an interval virtual CPU timer
351 */
352void add_virt_timer_periodic(void *new)
353{
354 struct vtimer_list *timer;
355
356 timer = (struct vtimer_list *)new;
357 prepare_vtimer(timer);
358 timer->interval = timer->expires;
359 internal_add_vtimer(timer);
360}
361EXPORT_SYMBOL(add_virt_timer_periodic);
362
363static int __mod_vtimer(struct vtimer_list *timer, __u64 expires, int periodic)
364{
365 struct vtimer_queue *vq;
366 unsigned long flags;
367 int cpu;
368
369 BUG_ON(!timer->function);
370 BUG_ON(!expires || expires > VTIMER_MAX_SLICE);
371
372 if (timer->expires == expires && vtimer_pending(timer))
373 return 1;
374
375 cpu = get_cpu();
376 vq = &per_cpu(virt_cpu_timer, cpu);
377
378 /* disable interrupts before test if timer is pending */
379 spin_lock_irqsave(&vq->lock, flags);
380
381 /* if timer isn't pending add it on the current CPU */
382 if (!vtimer_pending(timer)) {
383 spin_unlock_irqrestore(&vq->lock, flags);
384
385 if (periodic)
386 timer->interval = expires;
387 else
388 timer->interval = 0;
389 timer->expires = expires;
390 timer->cpu = cpu;
391 internal_add_vtimer(timer);
392 return 0;
393 }
394
395 /* check if we run on the right CPU */
396 BUG_ON(timer->cpu != cpu);
397
398 list_del_init(&timer->entry);
399 timer->expires = expires;
400 if (periodic)
401 timer->interval = expires;
402
403 /* the timer can't expire anymore so we can release the lock */
404 spin_unlock_irqrestore(&vq->lock, flags);
405 internal_add_vtimer(timer);
406 return 1;
407}
408
409/*
410 * If we change a pending timer the function must be called on the CPU
411 * where the timer is running on.
412 *
413 * returns whether it has modified a pending timer (1) or not (0)
414 */
415int mod_virt_timer(struct vtimer_list *timer, __u64 expires)
416{
417 return __mod_vtimer(timer, expires, 0);
418}
419EXPORT_SYMBOL(mod_virt_timer);
420
421/*
422 * If we change a pending timer the function must be called on the CPU
423 * where the timer is running on.
424 *
425 * returns whether it has modified a pending timer (1) or not (0)
426 */
427int mod_virt_timer_periodic(struct vtimer_list *timer, __u64 expires)
428{
429 return __mod_vtimer(timer, expires, 1);
430}
431EXPORT_SYMBOL(mod_virt_timer_periodic);
432
433/*
434 * delete a virtual timer
435 *
436 * returns whether the deleted timer was pending (1) or not (0)
437 */
438int del_virt_timer(struct vtimer_list *timer)
439{
440 unsigned long flags;
441 struct vtimer_queue *vq;
442
443 /* check if timer is pending */
444 if (!vtimer_pending(timer))
445 return 0;
446
447 vq = &per_cpu(virt_cpu_timer, timer->cpu);
448 spin_lock_irqsave(&vq->lock, flags);
449
450 /* we don't interrupt a running timer, just let it expire! */
451 list_del_init(&timer->entry);
452
453 spin_unlock_irqrestore(&vq->lock, flags);
454 return 1;
455}
456EXPORT_SYMBOL(del_virt_timer);
457
458/*
459 * Start the virtual CPU timer on the current CPU.
460 */
461void init_cpu_vtimer(void)
462{
463 struct vtimer_queue *vq;
464
465 /* initialize per cpu vtimer structure */
466 vq = &__get_cpu_var(virt_cpu_timer);
467 INIT_LIST_HEAD(&vq->list);
468 spin_lock_init(&vq->lock);
469
470 /* enable cpu timer interrupts */
471 __ctl_set_bit(0,10);
472
473 /* set initial cpu timer */
474 set_vtimer(0x7fffffffffffffffULL);
475}
476
477static int __cpuinit s390_nohz_notify(struct notifier_block *self,
478 unsigned long action, void *hcpu)
479{
480 struct s390_idle_data *idle;
481 long cpu = (long) hcpu;
482
483 idle = &per_cpu(s390_idle, cpu);
484 switch (action) {
485 case CPU_DYING:
486 case CPU_DYING_FROZEN:
487 idle->nohz_delay = 0;
488 default:
489 break;
490 }
491 return NOTIFY_OK;
492}
493
494void __init vtime_init(void)
495{
496 /* request the cpu timer external interrupt */
497 if (register_external_interrupt(0x1005, do_cpu_timer_interrupt))
498 panic("Couldn't request external interrupt 0x1005");
499
500 /* Enable cpu timer interrupts on the boot cpu. */
501 init_cpu_vtimer();
502 cpu_notifier(s390_nohz_notify, 0);
503}
504