Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
 
  3 *    Virtual cpu timer based timer functions.
  4 *
  5 *    Copyright IBM Corp. 2004, 2012
 
  6 *    Author(s): Jan Glauber <jan.glauber@de.ibm.com>
  7 */
  8
  9#include <linux/kernel_stat.h>
 10#include <linux/export.h>
 11#include <linux/kernel.h>
 12#include <linux/timex.h>
 13#include <linux/types.h>
 14#include <linux/time.h>
 15#include <asm/alternative.h>
 16#include <asm/cputime.h>
 17#include <asm/vtimer.h>
 18#include <asm/vtime.h>
 19#include <asm/cpu_mf.h>
 20#include <asm/smp.h>
 
 
 
 
 
 21
 
 
 
 
 22#include "entry.h"
 23
 24static void virt_timer_expire(void);
 25
 26static LIST_HEAD(virt_timer_list);
 27static DEFINE_SPINLOCK(virt_timer_lock);
 28static atomic64_t virt_timer_current;
 29static atomic64_t virt_timer_elapsed;
 30
 31DEFINE_PER_CPU(u64, mt_cycles[8]);
 32static DEFINE_PER_CPU(u64, mt_scaling_mult) = { 1 };
 33static DEFINE_PER_CPU(u64, mt_scaling_div) = { 1 };
 34static DEFINE_PER_CPU(u64, mt_scaling_jiffies);
 35
 36static inline void set_vtimer(u64 expires)
 37{
 38	struct lowcore *lc = get_lowcore();
 39	u64 timer;
 40
 41	asm volatile(
 42		"	stpt	%0\n"	/* Store current cpu timer value */
 43		"	spt	%1"	/* Set new value imm. afterwards */
 44		: "=Q" (timer) : "Q" (expires));
 45	lc->system_timer += lc->last_update_timer - timer;
 46	lc->last_update_timer = expires;
 47}
 48
 49static inline int virt_timer_forward(u64 elapsed)
 50{
 51	BUG_ON(!irqs_disabled());
 52
 53	if (list_empty(&virt_timer_list))
 54		return 0;
 55	elapsed = atomic64_add_return(elapsed, &virt_timer_elapsed);
 56	return elapsed >= atomic64_read(&virt_timer_current);
 57}
 58
 59static void update_mt_scaling(void)
 60{
 61	u64 cycles_new[8], *cycles_old;
 62	u64 delta, fac, mult, div;
 63	int i;
 64
 65	stcctm(MT_DIAG, smp_cpu_mtid + 1, cycles_new);
 66	cycles_old = this_cpu_ptr(mt_cycles);
 67	fac = 1;
 68	mult = div = 0;
 69	for (i = 0; i <= smp_cpu_mtid; i++) {
 70		delta = cycles_new[i] - cycles_old[i];
 71		div += delta;
 72		mult *= i + 1;
 73		mult += delta * fac;
 74		fac *= i + 1;
 75	}
 76	div *= fac;
 77	if (div > 0) {
 78		/* Update scaling factor */
 79		__this_cpu_write(mt_scaling_mult, mult);
 80		__this_cpu_write(mt_scaling_div, div);
 81		memcpy(cycles_old, cycles_new,
 82		       sizeof(u64) * (smp_cpu_mtid + 1));
 83	}
 84	__this_cpu_write(mt_scaling_jiffies, jiffies_64);
 85}
 86
 87static inline u64 update_tsk_timer(unsigned long *tsk_vtime, u64 new)
 88{
 89	u64 delta;
 90
 91	delta = new - *tsk_vtime;
 92	*tsk_vtime = new;
 93	return delta;
 94}
 95
 96
 97static inline u64 scale_vtime(u64 vtime)
 98{
 99	u64 mult = __this_cpu_read(mt_scaling_mult);
100	u64 div = __this_cpu_read(mt_scaling_div);
101
102	if (smp_cpu_mtid)
103		return vtime * mult / div;
104	return vtime;
105}
106
107static void account_system_index_scaled(struct task_struct *p, u64 cputime,
108					enum cpu_usage_stat index)
109{
110	p->stimescaled += cputime_to_nsecs(scale_vtime(cputime));
111	account_system_index_time(p, cputime_to_nsecs(cputime), index);
112}
113
114/*
115 * Update process times based on virtual cpu times stored by entry.S
116 * to the lowcore fields user_timer, system_timer & steal_clock.
117 */
118static int do_account_vtime(struct task_struct *tsk)
119{
120	u64 timer, clock, user, guest, system, hardirq, softirq;
121	struct lowcore *lc = get_lowcore();
122
123	timer = lc->last_update_timer;
124	clock = lc->last_update_clock;
125	asm volatile(
126		"	stpt	%0\n"	/* Store current cpu timer value */
127		"	stckf	%1"	/* Store current tod clock value */
128		: "=Q" (lc->last_update_timer),
129		  "=Q" (lc->last_update_clock)
130		: : "cc");
131	clock = lc->last_update_clock - clock;
132	timer -= lc->last_update_timer;
133
134	if (hardirq_count())
135		lc->hardirq_timer += timer;
136	else
137		lc->system_timer += timer;
138
139	/* Update MT utilization calculation */
140	if (smp_cpu_mtid &&
141	    time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies)))
142		update_mt_scaling();
143
144	/* Calculate cputime delta */
145	user = update_tsk_timer(&tsk->thread.user_timer,
146				READ_ONCE(lc->user_timer));
147	guest = update_tsk_timer(&tsk->thread.guest_timer,
148				 READ_ONCE(lc->guest_timer));
149	system = update_tsk_timer(&tsk->thread.system_timer,
150				  READ_ONCE(lc->system_timer));
151	hardirq = update_tsk_timer(&tsk->thread.hardirq_timer,
152				   READ_ONCE(lc->hardirq_timer));
153	softirq = update_tsk_timer(&tsk->thread.softirq_timer,
154				   READ_ONCE(lc->softirq_timer));
155	lc->steal_timer +=
156		clock - user - guest - system - hardirq - softirq;
157
158	/* Push account value */
159	if (user) {
160		account_user_time(tsk, cputime_to_nsecs(user));
161		tsk->utimescaled += cputime_to_nsecs(scale_vtime(user));
162	}
163
164	if (guest) {
165		account_guest_time(tsk, cputime_to_nsecs(guest));
166		tsk->utimescaled += cputime_to_nsecs(scale_vtime(guest));
 
167	}
168
169	if (system)
170		account_system_index_scaled(tsk, system, CPUTIME_SYSTEM);
171	if (hardirq)
172		account_system_index_scaled(tsk, hardirq, CPUTIME_IRQ);
173	if (softirq)
174		account_system_index_scaled(tsk, softirq, CPUTIME_SOFTIRQ);
175
176	return virt_timer_forward(user + guest + system + hardirq + softirq);
177}
178
179void vtime_task_switch(struct task_struct *prev)
180{
181	struct lowcore *lc = get_lowcore();
182
183	do_account_vtime(prev);
184	prev->thread.user_timer = lc->user_timer;
185	prev->thread.guest_timer = lc->guest_timer;
186	prev->thread.system_timer = lc->system_timer;
187	prev->thread.hardirq_timer = lc->hardirq_timer;
188	prev->thread.softirq_timer = lc->softirq_timer;
189	lc->user_timer = current->thread.user_timer;
190	lc->guest_timer = current->thread.guest_timer;
191	lc->system_timer = current->thread.system_timer;
192	lc->hardirq_timer = current->thread.hardirq_timer;
193	lc->softirq_timer = current->thread.softirq_timer;
194}
195
196/*
197 * In s390, accounting pending user time also implies
198 * accounting system time in order to correctly compute
199 * the stolen time accounting.
200 */
201void vtime_flush(struct task_struct *tsk)
202{
203	struct lowcore *lc = get_lowcore();
204	u64 steal, avg_steal;
205
206	if (do_account_vtime(tsk))
207		virt_timer_expire();
208
209	steal = lc->steal_timer;
210	avg_steal = lc->avg_steal_timer;
211	if ((s64) steal > 0) {
212		lc->steal_timer = 0;
213		account_steal_time(cputime_to_nsecs(steal));
214		avg_steal += steal;
215	}
216	lc->avg_steal_timer = avg_steal / 2;
217}
218
219static u64 vtime_delta(void)
220{
221	struct lowcore *lc = get_lowcore();
222	u64 timer = lc->last_update_timer;
223
224	lc->last_update_timer = get_cpu_timer();
225	return timer - lc->last_update_timer;
226}
227
228/*
229 * Update process times based on virtual cpu times stored by entry.S
230 * to the lowcore fields user_timer, system_timer & steal_clock.
231 */
232void vtime_account_kernel(struct task_struct *tsk)
233{
234	struct lowcore *lc = get_lowcore();
235	u64 delta = vtime_delta();
236
237	if (tsk->flags & PF_VCPU)
238		lc->guest_timer += delta;
239	else
240		lc->system_timer += delta;
241
242	virt_timer_forward(delta);
243}
244EXPORT_SYMBOL_GPL(vtime_account_kernel);
245
246void vtime_account_softirq(struct task_struct *tsk)
247{
248	u64 delta = vtime_delta();
249
250	get_lowcore()->softirq_timer += delta;
251
252	virt_timer_forward(delta);
253}
254
255void vtime_account_hardirq(struct task_struct *tsk)
256{
257	u64 delta = vtime_delta();
258
259	get_lowcore()->hardirq_timer += delta;
260
261	virt_timer_forward(delta);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
262}
263
264/*
265 * Sorted add to a list. List is linear searched until first bigger
266 * element is found.
267 */
268static void list_add_sorted(struct vtimer_list *timer, struct list_head *head)
269{
270	struct vtimer_list *tmp;
271
272	list_for_each_entry(tmp, head, entry) {
273		if (tmp->expires > timer->expires) {
274			list_add_tail(&timer->entry, &tmp->entry);
275			return;
276		}
277	}
278	list_add_tail(&timer->entry, head);
279}
280
281/*
282 * Handler for expired virtual CPU timer.
 
283 */
284static void virt_timer_expire(void)
285{
286	struct vtimer_list *timer, *tmp;
287	unsigned long elapsed;
288	LIST_HEAD(cb_list);
289
290	/* walk timer list, fire all expired timers */
291	spin_lock(&virt_timer_lock);
292	elapsed = atomic64_read(&virt_timer_elapsed);
293	list_for_each_entry_safe(timer, tmp, &virt_timer_list, entry) {
294		if (timer->expires < elapsed)
295			/* move expired timer to the callback queue */
296			list_move_tail(&timer->entry, &cb_list);
297		else
298			timer->expires -= elapsed;
299	}
300	if (!list_empty(&virt_timer_list)) {
301		timer = list_first_entry(&virt_timer_list,
302					 struct vtimer_list, entry);
303		atomic64_set(&virt_timer_current, timer->expires);
304	}
305	atomic64_sub(elapsed, &virt_timer_elapsed);
306	spin_unlock(&virt_timer_lock);
307
308	/* Do callbacks and recharge periodic timers */
309	list_for_each_entry_safe(timer, tmp, &cb_list, entry) {
310		list_del_init(&timer->entry);
311		timer->function(timer->data);
312		if (timer->interval) {
 
313			/* Recharge interval timer */
314			timer->expires = timer->interval +
315				atomic64_read(&virt_timer_elapsed);
316			spin_lock(&virt_timer_lock);
317			list_add_sorted(timer, &virt_timer_list);
318			spin_unlock(&virt_timer_lock);
319		}
320	}
321}
322
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
323void init_virt_timer(struct vtimer_list *timer)
324{
325	timer->function = NULL;
326	INIT_LIST_HEAD(&timer->entry);
327}
328EXPORT_SYMBOL(init_virt_timer);
329
330static inline int vtimer_pending(struct vtimer_list *timer)
331{
332	return !list_empty(&timer->entry);
333}
334
 
 
 
335static void internal_add_vtimer(struct vtimer_list *timer)
336{
337	if (list_empty(&virt_timer_list)) {
338		/* First timer, just program it. */
339		atomic64_set(&virt_timer_current, timer->expires);
340		atomic64_set(&virt_timer_elapsed, 0);
341		list_add(&timer->entry, &virt_timer_list);
 
 
 
 
 
 
 
 
 
 
342	} else {
343		/* Update timer against current base. */
344		timer->expires += atomic64_read(&virt_timer_elapsed);
345		if (likely((s64) timer->expires <
346			   (s64) atomic64_read(&virt_timer_current)))
347			/* The new timer expires before the current timer. */
348			atomic64_set(&virt_timer_current, timer->expires);
349		/* Insert new timer into the list. */
350		list_add_sorted(timer, &virt_timer_list);
 
 
 
 
 
 
 
351	}
 
 
 
 
352}
353
354static void __add_vtimer(struct vtimer_list *timer, int periodic)
355{
356	unsigned long flags;
357
358	timer->interval = periodic ? timer->expires : 0;
359	spin_lock_irqsave(&virt_timer_lock, flags);
360	internal_add_vtimer(timer);
361	spin_unlock_irqrestore(&virt_timer_lock, flags);
362}
363
364/*
365 * add_virt_timer - add a oneshot virtual CPU timer
366 */
367void add_virt_timer(struct vtimer_list *timer)
368{
369	__add_vtimer(timer, 0);
 
 
 
 
 
370}
371EXPORT_SYMBOL(add_virt_timer);
372
373/*
374 * add_virt_timer_int - add an interval virtual CPU timer
375 */
376void add_virt_timer_periodic(struct vtimer_list *timer)
377{
378	__add_vtimer(timer, 1);
 
 
 
 
 
379}
380EXPORT_SYMBOL(add_virt_timer_periodic);
381
382static int __mod_vtimer(struct vtimer_list *timer, u64 expires, int periodic)
383{
 
384	unsigned long flags;
385	int rc;
386
387	BUG_ON(!timer->function);
 
388
389	if (timer->expires == expires && vtimer_pending(timer))
390		return 1;
391	spin_lock_irqsave(&virt_timer_lock, flags);
392	rc = vtimer_pending(timer);
393	if (rc)
394		list_del_init(&timer->entry);
395	timer->interval = periodic ? expires : 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
396	timer->expires = expires;
 
 
 
 
 
397	internal_add_vtimer(timer);
398	spin_unlock_irqrestore(&virt_timer_lock, flags);
399	return rc;
400}
401
402/*
 
 
 
403 * returns whether it has modified a pending timer (1) or not (0)
404 */
405int mod_virt_timer(struct vtimer_list *timer, u64 expires)
406{
407	return __mod_vtimer(timer, expires, 0);
408}
409EXPORT_SYMBOL(mod_virt_timer);
410
411/*
 
 
 
412 * returns whether it has modified a pending timer (1) or not (0)
413 */
414int mod_virt_timer_periodic(struct vtimer_list *timer, u64 expires)
415{
416	return __mod_vtimer(timer, expires, 1);
417}
418EXPORT_SYMBOL(mod_virt_timer_periodic);
419
420/*
421 * Delete a virtual timer.
422 *
423 * returns whether the deleted timer was pending (1) or not (0)
424 */
425int del_virt_timer(struct vtimer_list *timer)
426{
427	unsigned long flags;
 
428
 
429	if (!vtimer_pending(timer))
430		return 0;
431	spin_lock_irqsave(&virt_timer_lock, flags);
 
 
 
 
432	list_del_init(&timer->entry);
433	spin_unlock_irqrestore(&virt_timer_lock, flags);
 
434	return 1;
435}
436EXPORT_SYMBOL(del_virt_timer);
437
438/*
439 * Start the virtual CPU timer on the current CPU.
440 */
441void vtime_init(void)
442{
 
 
 
 
 
 
 
 
 
 
443	/* set initial cpu timer */
444	set_vtimer(VTIMER_MAX_SLICE);
445	/* Setup initial MT scaling values */
446	if (smp_cpu_mtid) {
447		__this_cpu_write(mt_scaling_jiffies, jiffies);
448		__this_cpu_write(mt_scaling_mult, 1);
449		__this_cpu_write(mt_scaling_div, 1);
450		stcctm(MT_DIAG, smp_cpu_mtid + 1, this_cpu_ptr(mt_cycles));
 
 
 
 
 
 
 
 
 
451	}
 
452}
v3.5.6
 
  1/*
  2 *  arch/s390/kernel/vtime.c
  3 *    Virtual cpu timer based timer functions.
  4 *
  5 *  S390 version
  6 *    Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation
  7 *    Author(s): Jan Glauber <jan.glauber@de.ibm.com>
  8 */
  9
 10#include <linux/module.h>
 
 11#include <linux/kernel.h>
 
 
 12#include <linux/time.h>
 13#include <linux/delay.h>
 14#include <linux/init.h>
 15#include <linux/smp.h>
 16#include <linux/types.h>
 17#include <linux/timex.h>
 18#include <linux/notifier.h>
 19#include <linux/kernel_stat.h>
 20#include <linux/rcupdate.h>
 21#include <linux/posix-timers.h>
 22#include <linux/cpu.h>
 23#include <linux/kprobes.h>
 24
 25#include <asm/timer.h>
 26#include <asm/irq_regs.h>
 27#include <asm/cputime.h>
 28#include <asm/irq.h>
 29#include "entry.h"
 30
 31static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer);
 32
 33DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
 
 
 
 34
 35static inline __u64 get_vtimer(void)
 
 
 
 
 
 36{
 37	__u64 timer;
 
 38
 39	asm volatile("STPT %0" : "=m" (timer));
 40	return timer;
 
 
 
 
 41}
 42
 43static inline void set_vtimer(__u64 expires)
 44{
 45	__u64 timer;
 
 
 
 
 
 
 46
 47	asm volatile ("  STPT %0\n"  /* Store current cpu timer value */
 48		      "  SPT %1"     /* Set new value immediately afterwards */
 49		      : "=m" (timer) : "m" (expires) );
 50	S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer;
 51	S390_lowcore.last_update_timer = expires;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 52}
 53
 54/*
 55 * Update process times based on virtual cpu times stored by entry.S
 56 * to the lowcore fields user_timer, system_timer & steal_clock.
 57 */
 58static void do_account_vtime(struct task_struct *tsk, int hardirq_offset)
 59{
 60	struct thread_info *ti = task_thread_info(tsk);
 61	__u64 timer, clock, user, system, steal;
 62
 63	timer = S390_lowcore.last_update_timer;
 64	clock = S390_lowcore.last_update_clock;
 65	asm volatile ("  STPT %0\n"    /* Store current cpu timer value */
 66		      "  STCK %1"      /* Store current tod clock value */
 67		      : "=m" (S390_lowcore.last_update_timer),
 68		        "=m" (S390_lowcore.last_update_clock) );
 69	S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
 70	S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock;
 71
 72	user = S390_lowcore.user_timer - ti->user_timer;
 73	S390_lowcore.steal_timer -= user;
 74	ti->user_timer = S390_lowcore.user_timer;
 75	account_user_time(tsk, user, user);
 76
 77	system = S390_lowcore.system_timer - ti->system_timer;
 78	S390_lowcore.steal_timer -= system;
 79	ti->system_timer = S390_lowcore.system_timer;
 80	account_system_time(tsk, hardirq_offset, system, system);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 81
 82	steal = S390_lowcore.steal_timer;
 83	if ((s64) steal > 0) {
 84		S390_lowcore.steal_timer = 0;
 85		account_steal_time(steal);
 86	}
 
 
 
 
 
 
 
 
 
 87}
 88
 89void account_vtime(struct task_struct *prev, struct task_struct *next)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 90{
 91	struct thread_info *ti;
 
 
 
 
 92
 93	do_account_vtime(prev, 0);
 94	ti = task_thread_info(prev);
 95	ti->user_timer = S390_lowcore.user_timer;
 96	ti->system_timer = S390_lowcore.system_timer;
 97	ti = task_thread_info(next);
 98	S390_lowcore.user_timer = ti->user_timer;
 99	S390_lowcore.system_timer = ti->system_timer;
 
100}
101
102void account_process_tick(struct task_struct *tsk, int user_tick)
103{
104	do_account_vtime(tsk, HARDIRQ_OFFSET);
 
 
 
 
105}
106
107/*
108 * Update process times based on virtual cpu times stored by entry.S
109 * to the lowcore fields user_timer, system_timer & steal_clock.
110 */
111void account_system_vtime(struct task_struct *tsk)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
112{
113	struct thread_info *ti = task_thread_info(tsk);
114	__u64 timer, system;
 
115
116	timer = S390_lowcore.last_update_timer;
117	S390_lowcore.last_update_timer = get_vtimer();
118	S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
119
120	system = S390_lowcore.system_timer - ti->system_timer;
121	S390_lowcore.steal_timer -= system;
122	ti->system_timer = S390_lowcore.system_timer;
123	account_system_time(tsk, 0, system, system);
124}
125EXPORT_SYMBOL_GPL(account_system_vtime);
126
127void __kprobes vtime_stop_cpu(void)
128{
129	struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
130	struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer);
131	unsigned long long idle_time;
132	unsigned long psw_mask;
133
134	trace_hardirqs_on();
135	/* Don't trace preempt off for idle. */
136	stop_critical_timings();
137
138	/* Wait for external, I/O or machine check interrupt. */
139	psw_mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_DAT |
140		PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
141	idle->nohz_delay = 0;
142
143	/* Call the assembler magic in entry.S */
144	psw_idle(idle, vq, psw_mask, !list_empty(&vq->list));
145
146	/* Reenable preemption tracer. */
147	start_critical_timings();
148
149	/* Account time spent with enabled wait psw loaded as idle time. */
150	idle->sequence++;
151	smp_wmb();
152	idle_time = idle->idle_exit - idle->idle_enter;
153	idle->idle_time += idle_time;
154	idle->idle_enter = idle->idle_exit = 0ULL;
155	idle->idle_count++;
156	account_idle_time(idle_time);
157	smp_wmb();
158	idle->sequence++;
159}
160
161cputime64_t s390_get_idle_time(int cpu)
162{
163	struct s390_idle_data *idle = &per_cpu(s390_idle, cpu);
164	unsigned long long now, idle_enter, idle_exit;
165	unsigned int sequence;
166
167	do {
168		now = get_clock();
169		sequence = ACCESS_ONCE(idle->sequence);
170		idle_enter = ACCESS_ONCE(idle->idle_enter);
171		idle_exit = ACCESS_ONCE(idle->idle_exit);
172	} while ((sequence & 1) || (idle->sequence != sequence));
173	return idle_enter ? ((idle_exit ? : now) - idle_enter) : 0;
174}
175
176/*
177 * Sorted add to a list. List is linear searched until first bigger
178 * element is found.
179 */
180static void list_add_sorted(struct vtimer_list *timer, struct list_head *head)
181{
182	struct vtimer_list *event;
183
184	list_for_each_entry(event, head, entry) {
185		if (event->expires > timer->expires) {
186			list_add_tail(&timer->entry, &event->entry);
187			return;
188		}
189	}
190	list_add_tail(&timer->entry, head);
191}
192
193/*
194 * Do the callback functions of expired vtimer events.
195 * Called from within the interrupt handler.
196 */
197static void do_callbacks(struct list_head *cb_list)
198{
199	struct vtimer_queue *vq;
200	struct vtimer_list *event, *tmp;
201
202	if (list_empty(cb_list))
203		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
204
205	vq = &__get_cpu_var(virt_cpu_timer);
206
207	list_for_each_entry_safe(event, tmp, cb_list, entry) {
208		list_del_init(&event->entry);
209		(event->function)(event->data);
210		if (event->interval) {
211			/* Recharge interval timer */
212			event->expires = event->interval + vq->elapsed;
213			spin_lock(&vq->lock);
214			list_add_sorted(event, &vq->list);
215			spin_unlock(&vq->lock);
 
216		}
217	}
218}
219
220/*
221 * Handler for the virtual CPU timer.
222 */
223static void do_cpu_timer_interrupt(struct ext_code ext_code,
224				   unsigned int param32, unsigned long param64)
225{
226	struct vtimer_queue *vq;
227	struct vtimer_list *event, *tmp;
228	struct list_head cb_list;	/* the callback queue */
229	__u64 elapsed, next;
230
231	kstat_cpu(smp_processor_id()).irqs[EXTINT_TMR]++;
232	INIT_LIST_HEAD(&cb_list);
233	vq = &__get_cpu_var(virt_cpu_timer);
234
235	/* walk timer list, fire all expired events */
236	spin_lock(&vq->lock);
237
238	elapsed = vq->elapsed + (vq->timer - S390_lowcore.async_enter_timer);
239	BUG_ON((s64) elapsed < 0);
240	vq->elapsed = 0;
241	list_for_each_entry_safe(event, tmp, &vq->list, entry) {
242		if (event->expires < elapsed)
243			/* move expired timer to the callback queue */
244			list_move_tail(&event->entry, &cb_list);
245		else
246			event->expires -= elapsed;
247	}
248	spin_unlock(&vq->lock);
249
250	do_callbacks(&cb_list);
251
252	/* next event is first in list */
253	next = VTIMER_MAX_SLICE;
254	spin_lock(&vq->lock);
255	if (!list_empty(&vq->list)) {
256		event = list_first_entry(&vq->list, struct vtimer_list, entry);
257		next = event->expires;
258	}
259	spin_unlock(&vq->lock);
260	/*
261	 * To improve precision add the time spent by the
262	 * interrupt handler to the elapsed time.
263	 * Note: CPU timer counts down and we got an interrupt,
264	 *	 the current content is negative
265	 */
266	elapsed = S390_lowcore.async_enter_timer - get_vtimer();
267	set_vtimer(next - elapsed);
268	vq->timer = next - elapsed;
269	vq->elapsed = elapsed;
270}
271
272void init_virt_timer(struct vtimer_list *timer)
273{
274	timer->function = NULL;
275	INIT_LIST_HEAD(&timer->entry);
276}
277EXPORT_SYMBOL(init_virt_timer);
278
279static inline int vtimer_pending(struct vtimer_list *timer)
280{
281	return (!list_empty(&timer->entry));
282}
283
284/*
285 * this function should only run on the specified CPU
286 */
287static void internal_add_vtimer(struct vtimer_list *timer)
288{
289	struct vtimer_queue *vq;
290	unsigned long flags;
291	__u64 left, expires;
292
293	vq = &per_cpu(virt_cpu_timer, timer->cpu);
294	spin_lock_irqsave(&vq->lock, flags);
295
296	BUG_ON(timer->cpu != smp_processor_id());
297
298	if (list_empty(&vq->list)) {
299		/* First timer on this cpu, just program it. */
300		list_add(&timer->entry, &vq->list);
301		set_vtimer(timer->expires);
302		vq->timer = timer->expires;
303		vq->elapsed = 0;
304	} else {
305		/* Check progress of old timers. */
306		expires = timer->expires;
307		left = get_vtimer();
308		if (likely((s64) expires < (s64) left)) {
309			/* The new timer expires before the current timer. */
310			set_vtimer(expires);
311			vq->elapsed += vq->timer - left;
312			vq->timer = expires;
313		} else {
314			vq->elapsed += vq->timer - left;
315			vq->timer = left;
316		}
317		/* Insert new timer into per cpu list. */
318		timer->expires += vq->elapsed;
319		list_add_sorted(timer, &vq->list);
320	}
321
322	spin_unlock_irqrestore(&vq->lock, flags);
323	/* release CPU acquired in prepare_vtimer or mod_virt_timer() */
324	put_cpu();
325}
326
327static inline void prepare_vtimer(struct vtimer_list *timer)
328{
329	BUG_ON(!timer->function);
330	BUG_ON(!timer->expires || timer->expires > VTIMER_MAX_SLICE);
331	BUG_ON(vtimer_pending(timer));
332	timer->cpu = get_cpu();
 
 
333}
334
335/*
336 * add_virt_timer - add an oneshot virtual CPU timer
337 */
338void add_virt_timer(void *new)
339{
340	struct vtimer_list *timer;
341
342	timer = (struct vtimer_list *)new;
343	prepare_vtimer(timer);
344	timer->interval = 0;
345	internal_add_vtimer(timer);
346}
347EXPORT_SYMBOL(add_virt_timer);
348
349/*
350 * add_virt_timer_int - add an interval virtual CPU timer
351 */
352void add_virt_timer_periodic(void *new)
353{
354	struct vtimer_list *timer;
355
356	timer = (struct vtimer_list *)new;
357	prepare_vtimer(timer);
358	timer->interval = timer->expires;
359	internal_add_vtimer(timer);
360}
361EXPORT_SYMBOL(add_virt_timer_periodic);
362
363static int __mod_vtimer(struct vtimer_list *timer, __u64 expires, int periodic)
364{
365	struct vtimer_queue *vq;
366	unsigned long flags;
367	int cpu;
368
369	BUG_ON(!timer->function);
370	BUG_ON(!expires || expires > VTIMER_MAX_SLICE);
371
372	if (timer->expires == expires && vtimer_pending(timer))
373		return 1;
374
375	cpu = get_cpu();
376	vq = &per_cpu(virt_cpu_timer, cpu);
377
378	/* disable interrupts before test if timer is pending */
379	spin_lock_irqsave(&vq->lock, flags);
380
381	/* if timer isn't pending add it on the current CPU */
382	if (!vtimer_pending(timer)) {
383		spin_unlock_irqrestore(&vq->lock, flags);
384
385		if (periodic)
386			timer->interval = expires;
387		else
388			timer->interval = 0;
389		timer->expires = expires;
390		timer->cpu = cpu;
391		internal_add_vtimer(timer);
392		return 0;
393	}
394
395	/* check if we run on the right CPU */
396	BUG_ON(timer->cpu != cpu);
397
398	list_del_init(&timer->entry);
399	timer->expires = expires;
400	if (periodic)
401		timer->interval = expires;
402
403	/* the timer can't expire anymore so we can release the lock */
404	spin_unlock_irqrestore(&vq->lock, flags);
405	internal_add_vtimer(timer);
406	return 1;
 
407}
408
409/*
410 * If we change a pending timer the function must be called on the CPU
411 * where the timer is running on.
412 *
413 * returns whether it has modified a pending timer (1) or not (0)
414 */
415int mod_virt_timer(struct vtimer_list *timer, __u64 expires)
416{
417	return __mod_vtimer(timer, expires, 0);
418}
419EXPORT_SYMBOL(mod_virt_timer);
420
421/*
422 * If we change a pending timer the function must be called on the CPU
423 * where the timer is running on.
424 *
425 * returns whether it has modified a pending timer (1) or not (0)
426 */
427int mod_virt_timer_periodic(struct vtimer_list *timer, __u64 expires)
428{
429	return __mod_vtimer(timer, expires, 1);
430}
431EXPORT_SYMBOL(mod_virt_timer_periodic);
432
433/*
434 * delete a virtual timer
435 *
436 * returns whether the deleted timer was pending (1) or not (0)
437 */
438int del_virt_timer(struct vtimer_list *timer)
439{
440	unsigned long flags;
441	struct vtimer_queue *vq;
442
443	/* check if timer is pending */
444	if (!vtimer_pending(timer))
445		return 0;
446
447	vq = &per_cpu(virt_cpu_timer, timer->cpu);
448	spin_lock_irqsave(&vq->lock, flags);
449
450	/* we don't interrupt a running timer, just let it expire! */
451	list_del_init(&timer->entry);
452
453	spin_unlock_irqrestore(&vq->lock, flags);
454	return 1;
455}
456EXPORT_SYMBOL(del_virt_timer);
457
458/*
459 * Start the virtual CPU timer on the current CPU.
460 */
461void init_cpu_vtimer(void)
462{
463	struct vtimer_queue *vq;
464
465	/* initialize per cpu vtimer structure */
466	vq = &__get_cpu_var(virt_cpu_timer);
467	INIT_LIST_HEAD(&vq->list);
468	spin_lock_init(&vq->lock);
469
470	/* enable cpu timer interrupts */
471	__ctl_set_bit(0,10);
472
473	/* set initial cpu timer */
474	set_vtimer(0x7fffffffffffffffULL);
475}
476
477static int __cpuinit s390_nohz_notify(struct notifier_block *self,
478				      unsigned long action, void *hcpu)
479{
480	struct s390_idle_data *idle;
481	long cpu = (long) hcpu;
482
483	idle = &per_cpu(s390_idle, cpu);
484	switch (action) {
485	case CPU_DYING:
486	case CPU_DYING_FROZEN:
487		idle->nohz_delay = 0;
488	default:
489		break;
490	}
491	return NOTIFY_OK;
492}
493
494void __init vtime_init(void)
495{
496	/* request the cpu timer external interrupt */
497	if (register_external_interrupt(0x1005, do_cpu_timer_interrupt))
498		panic("Couldn't request external interrupt 0x1005");
499
500	/* Enable cpu timer interrupts on the boot cpu. */
501	init_cpu_vtimer();
502	cpu_notifier(s390_nohz_notify, 0);
503}
504