Loading...
1/*
2 * MSI hooks for standard x86 apic
3 */
4
5#include <linux/pci.h>
6#include <linux/irq.h>
7#include <linux/msi.h>
8#include <linux/dmar.h>
9#include <asm/smp.h>
10#include <asm/msidef.h>
11
12static struct irq_chip ia64_msi_chip;
13
14#ifdef CONFIG_SMP
15static int ia64_set_msi_irq_affinity(struct irq_data *idata,
16 const cpumask_t *cpu_mask, bool force)
17{
18 struct msi_msg msg;
19 u32 addr, data;
20 int cpu = first_cpu(*cpu_mask);
21 unsigned int irq = idata->irq;
22
23 if (!cpu_online(cpu))
24 return -1;
25
26 if (irq_prepare_move(irq, cpu))
27 return -1;
28
29 get_cached_msi_msg(irq, &msg);
30
31 addr = msg.address_lo;
32 addr &= MSI_ADDR_DEST_ID_MASK;
33 addr |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu));
34 msg.address_lo = addr;
35
36 data = msg.data;
37 data &= MSI_DATA_VECTOR_MASK;
38 data |= MSI_DATA_VECTOR(irq_to_vector(irq));
39 msg.data = data;
40
41 write_msi_msg(irq, &msg);
42 cpumask_copy(idata->affinity, cpumask_of(cpu));
43
44 return 0;
45}
46#endif /* CONFIG_SMP */
47
48int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
49{
50 struct msi_msg msg;
51 unsigned long dest_phys_id;
52 int irq, vector;
53 cpumask_t mask;
54
55 irq = create_irq();
56 if (irq < 0)
57 return irq;
58
59 irq_set_msi_desc(irq, desc);
60 cpus_and(mask, irq_to_domain(irq), cpu_online_map);
61 dest_phys_id = cpu_physical_id(first_cpu(mask));
62 vector = irq_to_vector(irq);
63
64 msg.address_hi = 0;
65 msg.address_lo =
66 MSI_ADDR_HEADER |
67 MSI_ADDR_DEST_MODE_PHYS |
68 MSI_ADDR_REDIRECTION_CPU |
69 MSI_ADDR_DEST_ID_CPU(dest_phys_id);
70
71 msg.data =
72 MSI_DATA_TRIGGER_EDGE |
73 MSI_DATA_LEVEL_ASSERT |
74 MSI_DATA_DELIVERY_FIXED |
75 MSI_DATA_VECTOR(vector);
76
77 write_msi_msg(irq, &msg);
78 irq_set_chip_and_handler(irq, &ia64_msi_chip, handle_edge_irq);
79
80 return 0;
81}
82
83void ia64_teardown_msi_irq(unsigned int irq)
84{
85 destroy_irq(irq);
86}
87
88static void ia64_ack_msi_irq(struct irq_data *data)
89{
90 irq_complete_move(data->irq);
91 irq_move_irq(data);
92 ia64_eoi();
93}
94
95static int ia64_msi_retrigger_irq(struct irq_data *data)
96{
97 unsigned int vector = irq_to_vector(data->irq);
98 ia64_resend_irq(vector);
99
100 return 1;
101}
102
103/*
104 * Generic ops used on most IA64 platforms.
105 */
106static struct irq_chip ia64_msi_chip = {
107 .name = "PCI-MSI",
108 .irq_mask = mask_msi_irq,
109 .irq_unmask = unmask_msi_irq,
110 .irq_ack = ia64_ack_msi_irq,
111#ifdef CONFIG_SMP
112 .irq_set_affinity = ia64_set_msi_irq_affinity,
113#endif
114 .irq_retrigger = ia64_msi_retrigger_irq,
115};
116
117
118int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
119{
120 if (platform_setup_msi_irq)
121 return platform_setup_msi_irq(pdev, desc);
122
123 return ia64_setup_msi_irq(pdev, desc);
124}
125
126void arch_teardown_msi_irq(unsigned int irq)
127{
128 if (platform_teardown_msi_irq)
129 return platform_teardown_msi_irq(irq);
130
131 return ia64_teardown_msi_irq(irq);
132}
133
134#ifdef CONFIG_DMAR
135#ifdef CONFIG_SMP
136static int dmar_msi_set_affinity(struct irq_data *data,
137 const struct cpumask *mask, bool force)
138{
139 unsigned int irq = data->irq;
140 struct irq_cfg *cfg = irq_cfg + irq;
141 struct msi_msg msg;
142 int cpu = cpumask_first(mask);
143
144 if (!cpu_online(cpu))
145 return -1;
146
147 if (irq_prepare_move(irq, cpu))
148 return -1;
149
150 dmar_msi_read(irq, &msg);
151
152 msg.data &= ~MSI_DATA_VECTOR_MASK;
153 msg.data |= MSI_DATA_VECTOR(cfg->vector);
154 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
155 msg.address_lo |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu));
156
157 dmar_msi_write(irq, &msg);
158 cpumask_copy(data->affinity, mask);
159
160 return 0;
161}
162#endif /* CONFIG_SMP */
163
164static struct irq_chip dmar_msi_type = {
165 .name = "DMAR_MSI",
166 .irq_unmask = dmar_msi_unmask,
167 .irq_mask = dmar_msi_mask,
168 .irq_ack = ia64_ack_msi_irq,
169#ifdef CONFIG_SMP
170 .irq_set_affinity = dmar_msi_set_affinity,
171#endif
172 .irq_retrigger = ia64_msi_retrigger_irq,
173};
174
175static int
176msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
177{
178 struct irq_cfg *cfg = irq_cfg + irq;
179 unsigned dest;
180 cpumask_t mask;
181
182 cpus_and(mask, irq_to_domain(irq), cpu_online_map);
183 dest = cpu_physical_id(first_cpu(mask));
184
185 msg->address_hi = 0;
186 msg->address_lo =
187 MSI_ADDR_HEADER |
188 MSI_ADDR_DEST_MODE_PHYS |
189 MSI_ADDR_REDIRECTION_CPU |
190 MSI_ADDR_DEST_ID_CPU(dest);
191
192 msg->data =
193 MSI_DATA_TRIGGER_EDGE |
194 MSI_DATA_LEVEL_ASSERT |
195 MSI_DATA_DELIVERY_FIXED |
196 MSI_DATA_VECTOR(cfg->vector);
197 return 0;
198}
199
200int arch_setup_dmar_msi(unsigned int irq)
201{
202 int ret;
203 struct msi_msg msg;
204
205 ret = msi_compose_msg(NULL, irq, &msg);
206 if (ret < 0)
207 return ret;
208 dmar_msi_write(irq, &msg);
209 irq_set_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
210 "edge");
211 return 0;
212}
213#endif /* CONFIG_DMAR */
214
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * MSI hooks for standard x86 apic
4 */
5
6#include <linux/pci.h>
7#include <linux/irq.h>
8#include <linux/msi.h>
9#include <linux/dmar.h>
10#include <asm/smp.h>
11#include <asm/msidef.h>
12
13static struct irq_chip ia64_msi_chip;
14
15#ifdef CONFIG_SMP
16static int ia64_set_msi_irq_affinity(struct irq_data *idata,
17 const cpumask_t *cpu_mask, bool force)
18{
19 struct msi_msg msg;
20 u32 addr, data;
21 int cpu = cpumask_first_and(cpu_mask, cpu_online_mask);
22 unsigned int irq = idata->irq;
23
24 if (irq_prepare_move(irq, cpu))
25 return -1;
26
27 __get_cached_msi_msg(irq_data_get_msi_desc(idata), &msg);
28
29 addr = msg.address_lo;
30 addr &= MSI_ADDR_DEST_ID_MASK;
31 addr |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu));
32 msg.address_lo = addr;
33
34 data = msg.data;
35 data &= MSI_DATA_VECTOR_MASK;
36 data |= MSI_DATA_VECTOR(irq_to_vector(irq));
37 msg.data = data;
38
39 pci_write_msi_msg(irq, &msg);
40 irq_data_update_affinity(idata, cpumask_of(cpu));
41
42 return 0;
43}
44#endif /* CONFIG_SMP */
45
46int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
47{
48 struct msi_msg msg;
49 unsigned long dest_phys_id;
50 int irq, vector;
51
52 irq = create_irq();
53 if (irq < 0)
54 return irq;
55
56 irq_set_msi_desc(irq, desc);
57 dest_phys_id = cpu_physical_id(cpumask_any_and(&(irq_to_domain(irq)),
58 cpu_online_mask));
59 vector = irq_to_vector(irq);
60
61 msg.address_hi = 0;
62 msg.address_lo =
63 MSI_ADDR_HEADER |
64 MSI_ADDR_DEST_MODE_PHYS |
65 MSI_ADDR_REDIRECTION_CPU |
66 MSI_ADDR_DEST_ID_CPU(dest_phys_id);
67
68 msg.data =
69 MSI_DATA_TRIGGER_EDGE |
70 MSI_DATA_LEVEL_ASSERT |
71 MSI_DATA_DELIVERY_FIXED |
72 MSI_DATA_VECTOR(vector);
73
74 pci_write_msi_msg(irq, &msg);
75 irq_set_chip_and_handler(irq, &ia64_msi_chip, handle_edge_irq);
76
77 return 0;
78}
79
80void arch_teardown_msi_irq(unsigned int irq)
81{
82 destroy_irq(irq);
83}
84
85static void ia64_ack_msi_irq(struct irq_data *data)
86{
87 irq_complete_move(data->irq);
88 irq_move_irq(data);
89 ia64_eoi();
90}
91
92static int ia64_msi_retrigger_irq(struct irq_data *data)
93{
94 unsigned int vector = irq_to_vector(data->irq);
95 ia64_resend_irq(vector);
96
97 return 1;
98}
99
100/*
101 * Generic ops used on most IA64 platforms.
102 */
103static struct irq_chip ia64_msi_chip = {
104 .name = "PCI-MSI",
105 .irq_mask = pci_msi_mask_irq,
106 .irq_unmask = pci_msi_unmask_irq,
107 .irq_ack = ia64_ack_msi_irq,
108#ifdef CONFIG_SMP
109 .irq_set_affinity = ia64_set_msi_irq_affinity,
110#endif
111 .irq_retrigger = ia64_msi_retrigger_irq,
112};
113
114#ifdef CONFIG_INTEL_IOMMU
115#ifdef CONFIG_SMP
116static int dmar_msi_set_affinity(struct irq_data *data,
117 const struct cpumask *mask, bool force)
118{
119 unsigned int irq = data->irq;
120 struct irq_cfg *cfg = irq_cfg + irq;
121 struct msi_msg msg;
122 int cpu = cpumask_first_and(mask, cpu_online_mask);
123
124 if (irq_prepare_move(irq, cpu))
125 return -1;
126
127 dmar_msi_read(irq, &msg);
128
129 msg.data &= ~MSI_DATA_VECTOR_MASK;
130 msg.data |= MSI_DATA_VECTOR(cfg->vector);
131 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
132 msg.address_lo |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu));
133
134 dmar_msi_write(irq, &msg);
135 irq_data_update_affinity(data, mask);
136
137 return 0;
138}
139#endif /* CONFIG_SMP */
140
141static struct irq_chip dmar_msi_type = {
142 .name = "DMAR_MSI",
143 .irq_unmask = dmar_msi_unmask,
144 .irq_mask = dmar_msi_mask,
145 .irq_ack = ia64_ack_msi_irq,
146#ifdef CONFIG_SMP
147 .irq_set_affinity = dmar_msi_set_affinity,
148#endif
149 .irq_retrigger = ia64_msi_retrigger_irq,
150};
151
152static void
153msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
154{
155 struct irq_cfg *cfg = irq_cfg + irq;
156 unsigned dest;
157
158 dest = cpu_physical_id(cpumask_first_and(&(irq_to_domain(irq)),
159 cpu_online_mask));
160
161 msg->address_hi = 0;
162 msg->address_lo =
163 MSI_ADDR_HEADER |
164 MSI_ADDR_DEST_MODE_PHYS |
165 MSI_ADDR_REDIRECTION_CPU |
166 MSI_ADDR_DEST_ID_CPU(dest);
167
168 msg->data =
169 MSI_DATA_TRIGGER_EDGE |
170 MSI_DATA_LEVEL_ASSERT |
171 MSI_DATA_DELIVERY_FIXED |
172 MSI_DATA_VECTOR(cfg->vector);
173}
174
175int dmar_alloc_hwirq(int id, int node, void *arg)
176{
177 int irq;
178 struct msi_msg msg;
179
180 irq = create_irq();
181 if (irq > 0) {
182 irq_set_handler_data(irq, arg);
183 irq_set_chip_and_handler_name(irq, &dmar_msi_type,
184 handle_edge_irq, "edge");
185 msi_compose_msg(NULL, irq, &msg);
186 dmar_msi_write(irq, &msg);
187 }
188
189 return irq;
190}
191
192void dmar_free_hwirq(int irq)
193{
194 irq_set_handler_data(irq, NULL);
195 destroy_irq(irq);
196}
197#endif /* CONFIG_INTEL_IOMMU */
198