Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_HARDIRQ_H
3#define _ASM_X86_HARDIRQ_H
4
5#include <linux/threads.h>
6
7typedef struct {
8 u16 __softirq_pending;
9#if IS_ENABLED(CONFIG_KVM_INTEL)
10 u8 kvm_cpu_l1tf_flush_l1d;
11#endif
12 unsigned int __nmi_count; /* arch dependent */
13#ifdef CONFIG_X86_LOCAL_APIC
14 unsigned int apic_timer_irqs; /* arch dependent */
15 unsigned int irq_spurious_count;
16 unsigned int icr_read_retry_count;
17#endif
18#ifdef CONFIG_HAVE_KVM
19 unsigned int kvm_posted_intr_ipis;
20 unsigned int kvm_posted_intr_wakeup_ipis;
21 unsigned int kvm_posted_intr_nested_ipis;
22#endif
23 unsigned int x86_platform_ipis; /* arch dependent */
24 unsigned int apic_perf_irqs;
25 unsigned int apic_irq_work_irqs;
26#ifdef CONFIG_SMP
27 unsigned int irq_resched_count;
28 unsigned int irq_call_count;
29#endif
30 unsigned int irq_tlb_count;
31#ifdef CONFIG_X86_THERMAL_VECTOR
32 unsigned int irq_thermal_count;
33#endif
34#ifdef CONFIG_X86_MCE_THRESHOLD
35 unsigned int irq_threshold_count;
36#endif
37#ifdef CONFIG_X86_MCE_AMD
38 unsigned int irq_deferred_error_count;
39#endif
40#ifdef CONFIG_X86_HV_CALLBACK_VECTOR
41 unsigned int irq_hv_callback_count;
42#endif
43#if IS_ENABLED(CONFIG_HYPERV)
44 unsigned int irq_hv_reenlightenment_count;
45 unsigned int hyperv_stimer0_count;
46#endif
47} ____cacheline_aligned irq_cpustat_t;
48
49DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
50
51#define __ARCH_IRQ_STAT
52
53#define inc_irq_stat(member) this_cpu_inc(irq_stat.member)
54
55extern void ack_bad_irq(unsigned int irq);
56
57extern u64 arch_irq_stat_cpu(unsigned int cpu);
58#define arch_irq_stat_cpu arch_irq_stat_cpu
59
60extern u64 arch_irq_stat(void);
61#define arch_irq_stat arch_irq_stat
62
63
64#if IS_ENABLED(CONFIG_KVM_INTEL)
65static inline void kvm_set_cpu_l1tf_flush_l1d(void)
66{
67 __this_cpu_write(irq_stat.kvm_cpu_l1tf_flush_l1d, 1);
68}
69
70static __always_inline void kvm_clear_cpu_l1tf_flush_l1d(void)
71{
72 __this_cpu_write(irq_stat.kvm_cpu_l1tf_flush_l1d, 0);
73}
74
75static __always_inline bool kvm_get_cpu_l1tf_flush_l1d(void)
76{
77 return __this_cpu_read(irq_stat.kvm_cpu_l1tf_flush_l1d);
78}
79#else /* !IS_ENABLED(CONFIG_KVM_INTEL) */
80static inline void kvm_set_cpu_l1tf_flush_l1d(void) { }
81#endif /* IS_ENABLED(CONFIG_KVM_INTEL) */
82
83#endif /* _ASM_X86_HARDIRQ_H */
1#ifndef _ASM_X86_HARDIRQ_H
2#define _ASM_X86_HARDIRQ_H
3
4#include <linux/threads.h>
5#include <linux/irq.h>
6
7typedef struct {
8 unsigned int __softirq_pending;
9 unsigned int __nmi_count; /* arch dependent */
10#ifdef CONFIG_X86_LOCAL_APIC
11 unsigned int apic_timer_irqs; /* arch dependent */
12 unsigned int irq_spurious_count;
13 unsigned int icr_read_retry_count;
14#endif
15#ifdef CONFIG_HAVE_KVM
16 unsigned int kvm_posted_intr_ipis;
17#endif
18 unsigned int x86_platform_ipis; /* arch dependent */
19 unsigned int apic_perf_irqs;
20 unsigned int apic_irq_work_irqs;
21#ifdef CONFIG_SMP
22 unsigned int irq_resched_count;
23 unsigned int irq_call_count;
24 /*
25 * irq_tlb_count is double-counted in irq_call_count, so it must be
26 * subtracted from irq_call_count when displaying irq_call_count
27 */
28 unsigned int irq_tlb_count;
29#endif
30#ifdef CONFIG_X86_THERMAL_VECTOR
31 unsigned int irq_thermal_count;
32#endif
33#ifdef CONFIG_X86_MCE_THRESHOLD
34 unsigned int irq_threshold_count;
35#endif
36#if IS_ENABLED(CONFIG_HYPERV) || defined(CONFIG_XEN)
37 unsigned int irq_hv_callback_count;
38#endif
39} ____cacheline_aligned irq_cpustat_t;
40
41DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
42
43/* We can have at most NR_VECTORS irqs routed to a cpu at a time */
44#define MAX_HARDIRQS_PER_CPU NR_VECTORS
45
46#define __ARCH_IRQ_STAT
47
48#define inc_irq_stat(member) this_cpu_inc(irq_stat.member)
49
50#define local_softirq_pending() this_cpu_read(irq_stat.__softirq_pending)
51
52#define __ARCH_SET_SOFTIRQ_PENDING
53
54#define set_softirq_pending(x) \
55 this_cpu_write(irq_stat.__softirq_pending, (x))
56#define or_softirq_pending(x) this_cpu_or(irq_stat.__softirq_pending, (x))
57
58extern void ack_bad_irq(unsigned int irq);
59
60extern u64 arch_irq_stat_cpu(unsigned int cpu);
61#define arch_irq_stat_cpu arch_irq_stat_cpu
62
63extern u64 arch_irq_stat(void);
64#define arch_irq_stat arch_irq_stat
65
66#endif /* _ASM_X86_HARDIRQ_H */