Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_HARDIRQ_H
3#define _ASM_X86_HARDIRQ_H
4
5#include <linux/threads.h>
6
7typedef struct {
8 u16 __softirq_pending;
9#if IS_ENABLED(CONFIG_KVM_INTEL)
10 u8 kvm_cpu_l1tf_flush_l1d;
11#endif
12 unsigned int __nmi_count; /* arch dependent */
13#ifdef CONFIG_X86_LOCAL_APIC
14 unsigned int apic_timer_irqs; /* arch dependent */
15 unsigned int irq_spurious_count;
16 unsigned int icr_read_retry_count;
17#endif
18#ifdef CONFIG_HAVE_KVM
19 unsigned int kvm_posted_intr_ipis;
20 unsigned int kvm_posted_intr_wakeup_ipis;
21 unsigned int kvm_posted_intr_nested_ipis;
22#endif
23 unsigned int x86_platform_ipis; /* arch dependent */
24 unsigned int apic_perf_irqs;
25 unsigned int apic_irq_work_irqs;
26#ifdef CONFIG_SMP
27 unsigned int irq_resched_count;
28 unsigned int irq_call_count;
29#endif
30 unsigned int irq_tlb_count;
31#ifdef CONFIG_X86_THERMAL_VECTOR
32 unsigned int irq_thermal_count;
33#endif
34#ifdef CONFIG_X86_MCE_THRESHOLD
35 unsigned int irq_threshold_count;
36#endif
37#ifdef CONFIG_X86_MCE_AMD
38 unsigned int irq_deferred_error_count;
39#endif
40#ifdef CONFIG_X86_HV_CALLBACK_VECTOR
41 unsigned int irq_hv_callback_count;
42#endif
43#if IS_ENABLED(CONFIG_HYPERV)
44 unsigned int irq_hv_reenlightenment_count;
45 unsigned int hyperv_stimer0_count;
46#endif
47} ____cacheline_aligned irq_cpustat_t;
48
49DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
50
51#define __ARCH_IRQ_STAT
52
53#define inc_irq_stat(member) this_cpu_inc(irq_stat.member)
54
55extern void ack_bad_irq(unsigned int irq);
56
57extern u64 arch_irq_stat_cpu(unsigned int cpu);
58#define arch_irq_stat_cpu arch_irq_stat_cpu
59
60extern u64 arch_irq_stat(void);
61#define arch_irq_stat arch_irq_stat
62
63
64#if IS_ENABLED(CONFIG_KVM_INTEL)
65static inline void kvm_set_cpu_l1tf_flush_l1d(void)
66{
67 __this_cpu_write(irq_stat.kvm_cpu_l1tf_flush_l1d, 1);
68}
69
70static __always_inline void kvm_clear_cpu_l1tf_flush_l1d(void)
71{
72 __this_cpu_write(irq_stat.kvm_cpu_l1tf_flush_l1d, 0);
73}
74
75static __always_inline bool kvm_get_cpu_l1tf_flush_l1d(void)
76{
77 return __this_cpu_read(irq_stat.kvm_cpu_l1tf_flush_l1d);
78}
79#else /* !IS_ENABLED(CONFIG_KVM_INTEL) */
80static inline void kvm_set_cpu_l1tf_flush_l1d(void) { }
81#endif /* IS_ENABLED(CONFIG_KVM_INTEL) */
82
83#endif /* _ASM_X86_HARDIRQ_H */
1#ifndef _ASM_X86_HARDIRQ_H
2#define _ASM_X86_HARDIRQ_H
3
4#include <linux/threads.h>
5#include <linux/irq.h>
6
7typedef struct {
8 unsigned int __softirq_pending;
9 unsigned int __nmi_count; /* arch dependent */
10#ifdef CONFIG_X86_LOCAL_APIC
11 unsigned int apic_timer_irqs; /* arch dependent */
12 unsigned int irq_spurious_count;
13 unsigned int icr_read_retry_count;
14#endif
15 unsigned int x86_platform_ipis; /* arch dependent */
16 unsigned int apic_perf_irqs;
17 unsigned int apic_irq_work_irqs;
18#ifdef CONFIG_SMP
19 unsigned int irq_resched_count;
20 unsigned int irq_call_count;
21 unsigned int irq_tlb_count;
22#endif
23#ifdef CONFIG_X86_THERMAL_VECTOR
24 unsigned int irq_thermal_count;
25#endif
26#ifdef CONFIG_X86_MCE_THRESHOLD
27 unsigned int irq_threshold_count;
28#endif
29} ____cacheline_aligned irq_cpustat_t;
30
31DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
32
33/* We can have at most NR_VECTORS irqs routed to a cpu at a time */
34#define MAX_HARDIRQS_PER_CPU NR_VECTORS
35
36#define __ARCH_IRQ_STAT
37
38#define inc_irq_stat(member) this_cpu_inc(irq_stat.member)
39
40#define local_softirq_pending() this_cpu_read(irq_stat.__softirq_pending)
41
42#define __ARCH_SET_SOFTIRQ_PENDING
43
44#define set_softirq_pending(x) \
45 this_cpu_write(irq_stat.__softirq_pending, (x))
46#define or_softirq_pending(x) this_cpu_or(irq_stat.__softirq_pending, (x))
47
48extern void ack_bad_irq(unsigned int irq);
49
50extern u64 arch_irq_stat_cpu(unsigned int cpu);
51#define arch_irq_stat_cpu arch_irq_stat_cpu
52
53extern u64 arch_irq_stat(void);
54#define arch_irq_stat arch_irq_stat
55
56#endif /* _ASM_X86_HARDIRQ_H */