Loading...
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Copyright 2016,2017 IBM Corporation.
4 */
5#ifndef __XIVE_INTERNAL_H
6#define __XIVE_INTERNAL_H
7
8/*
9 * A "disabled" interrupt should never fire, to catch problems
10 * we set its logical number to this
11 */
12#define XIVE_BAD_IRQ 0x7fffffff
13#define XIVE_MAX_IRQ (XIVE_BAD_IRQ - 1)
14
15/* Each CPU carry one of these with various per-CPU state */
16struct xive_cpu {
17#ifdef CONFIG_SMP
18 /* HW irq number and data of IPI */
19 u32 hw_ipi;
20 struct xive_irq_data ipi_data;
21#endif /* CONFIG_SMP */
22
23 int chip_id;
24
25 /* Queue datas. Only one is populated */
26#define XIVE_MAX_QUEUES 8
27 struct xive_q queue[XIVE_MAX_QUEUES];
28
29 /*
30 * Pending mask. Each bit corresponds to a priority that
31 * potentially has pending interrupts.
32 */
33 u8 pending_prio;
34
35 /* Cache of HW CPPR */
36 u8 cppr;
37};
38
39/* Backend ops */
40struct xive_ops {
41 int (*populate_irq_data)(u32 hw_irq, struct xive_irq_data *data);
42 int (*configure_irq)(u32 hw_irq, u32 target, u8 prio, u32 sw_irq);
43 int (*get_irq_config)(u32 hw_irq, u32 *target, u8 *prio,
44 u32 *sw_irq);
45 int (*setup_queue)(unsigned int cpu, struct xive_cpu *xc, u8 prio);
46 void (*cleanup_queue)(unsigned int cpu, struct xive_cpu *xc, u8 prio);
47 void (*prepare_cpu)(unsigned int cpu, struct xive_cpu *xc);
48 void (*setup_cpu)(unsigned int cpu, struct xive_cpu *xc);
49 void (*teardown_cpu)(unsigned int cpu, struct xive_cpu *xc);
50 bool (*match)(struct device_node *np);
51 void (*shutdown)(void);
52
53 void (*update_pending)(struct xive_cpu *xc);
54 void (*sync_source)(u32 hw_irq);
55 u64 (*esb_rw)(u32 hw_irq, u32 offset, u64 data, bool write);
56#ifdef CONFIG_SMP
57 int (*get_ipi)(unsigned int cpu, struct xive_cpu *xc);
58 void (*put_ipi)(unsigned int cpu, struct xive_cpu *xc);
59#endif
60 int (*debug_show)(struct seq_file *m, void *private);
61 int (*debug_create)(struct dentry *xive_dir);
62 const char *name;
63};
64
65bool xive_core_init(struct device_node *np, const struct xive_ops *ops,
66 void __iomem *area, u32 offset, u8 max_prio);
67__be32 *xive_queue_page_alloc(unsigned int cpu, u32 queue_shift);
68int xive_core_debug_init(void);
69
70static inline u32 xive_alloc_order(u32 queue_shift)
71{
72 return (queue_shift > PAGE_SHIFT) ? (queue_shift - PAGE_SHIFT) : 0;
73}
74
75extern bool xive_cmdline_disabled;
76extern bool xive_has_save_restore;
77
78#endif /* __XIVE_INTERNAL_H */
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Copyright 2016,2017 IBM Corporation.
4 */
5#ifndef __XIVE_INTERNAL_H
6#define __XIVE_INTERNAL_H
7
8/* Each CPU carry one of these with various per-CPU state */
9struct xive_cpu {
10#ifdef CONFIG_SMP
11 /* HW irq number and data of IPI */
12 u32 hw_ipi;
13 struct xive_irq_data ipi_data;
14#endif /* CONFIG_SMP */
15
16 int chip_id;
17
18 /* Queue datas. Only one is populated */
19#define XIVE_MAX_QUEUES 8
20 struct xive_q queue[XIVE_MAX_QUEUES];
21
22 /*
23 * Pending mask. Each bit corresponds to a priority that
24 * potentially has pending interrupts.
25 */
26 u8 pending_prio;
27
28 /* Cache of HW CPPR */
29 u8 cppr;
30};
31
32/* Backend ops */
33struct xive_ops {
34 int (*populate_irq_data)(u32 hw_irq, struct xive_irq_data *data);
35 int (*configure_irq)(u32 hw_irq, u32 target, u8 prio, u32 sw_irq);
36 int (*get_irq_config)(u32 hw_irq, u32 *target, u8 *prio,
37 u32 *sw_irq);
38 int (*setup_queue)(unsigned int cpu, struct xive_cpu *xc, u8 prio);
39 void (*cleanup_queue)(unsigned int cpu, struct xive_cpu *xc, u8 prio);
40 void (*setup_cpu)(unsigned int cpu, struct xive_cpu *xc);
41 void (*teardown_cpu)(unsigned int cpu, struct xive_cpu *xc);
42 bool (*match)(struct device_node *np);
43 void (*shutdown)(void);
44
45 void (*update_pending)(struct xive_cpu *xc);
46 void (*eoi)(u32 hw_irq);
47 void (*sync_source)(u32 hw_irq);
48 u64 (*esb_rw)(u32 hw_irq, u32 offset, u64 data, bool write);
49#ifdef CONFIG_SMP
50 int (*get_ipi)(unsigned int cpu, struct xive_cpu *xc);
51 void (*put_ipi)(unsigned int cpu, struct xive_cpu *xc);
52#endif
53 const char *name;
54};
55
56bool xive_core_init(const struct xive_ops *ops, void __iomem *area, u32 offset,
57 u8 max_prio);
58__be32 *xive_queue_page_alloc(unsigned int cpu, u32 queue_shift);
59
60static inline u32 xive_alloc_order(u32 queue_shift)
61{
62 return (queue_shift > PAGE_SHIFT) ? (queue_shift - PAGE_SHIFT) : 0;
63}
64
65extern bool xive_cmdline_disabled;
66
67#endif /* __XIVE_INTERNAL_H */