Loading...
1/*
2 * MSI hooks for standard x86 apic
3 */
4
5#include <linux/pci.h>
6#include <linux/irq.h>
7#include <linux/msi.h>
8#include <linux/dmar.h>
9#include <asm/smp.h>
10#include <asm/msidef.h>
11
12static struct irq_chip ia64_msi_chip;
13
14#ifdef CONFIG_SMP
15static int ia64_set_msi_irq_affinity(struct irq_data *idata,
16 const cpumask_t *cpu_mask, bool force)
17{
18 struct msi_msg msg;
19 u32 addr, data;
20 int cpu = first_cpu(*cpu_mask);
21 unsigned int irq = idata->irq;
22
23 if (!cpu_online(cpu))
24 return -1;
25
26 if (irq_prepare_move(irq, cpu))
27 return -1;
28
29 get_cached_msi_msg(irq, &msg);
30
31 addr = msg.address_lo;
32 addr &= MSI_ADDR_DEST_ID_MASK;
33 addr |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu));
34 msg.address_lo = addr;
35
36 data = msg.data;
37 data &= MSI_DATA_VECTOR_MASK;
38 data |= MSI_DATA_VECTOR(irq_to_vector(irq));
39 msg.data = data;
40
41 write_msi_msg(irq, &msg);
42 cpumask_copy(idata->affinity, cpumask_of(cpu));
43
44 return 0;
45}
46#endif /* CONFIG_SMP */
47
48int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
49{
50 struct msi_msg msg;
51 unsigned long dest_phys_id;
52 int irq, vector;
53 cpumask_t mask;
54
55 irq = create_irq();
56 if (irq < 0)
57 return irq;
58
59 irq_set_msi_desc(irq, desc);
60 cpus_and(mask, irq_to_domain(irq), cpu_online_map);
61 dest_phys_id = cpu_physical_id(first_cpu(mask));
62 vector = irq_to_vector(irq);
63
64 msg.address_hi = 0;
65 msg.address_lo =
66 MSI_ADDR_HEADER |
67 MSI_ADDR_DEST_MODE_PHYS |
68 MSI_ADDR_REDIRECTION_CPU |
69 MSI_ADDR_DEST_ID_CPU(dest_phys_id);
70
71 msg.data =
72 MSI_DATA_TRIGGER_EDGE |
73 MSI_DATA_LEVEL_ASSERT |
74 MSI_DATA_DELIVERY_FIXED |
75 MSI_DATA_VECTOR(vector);
76
77 write_msi_msg(irq, &msg);
78 irq_set_chip_and_handler(irq, &ia64_msi_chip, handle_edge_irq);
79
80 return 0;
81}
82
83void ia64_teardown_msi_irq(unsigned int irq)
84{
85 destroy_irq(irq);
86}
87
88static void ia64_ack_msi_irq(struct irq_data *data)
89{
90 irq_complete_move(data->irq);
91 irq_move_irq(data);
92 ia64_eoi();
93}
94
95static int ia64_msi_retrigger_irq(struct irq_data *data)
96{
97 unsigned int vector = irq_to_vector(data->irq);
98 ia64_resend_irq(vector);
99
100 return 1;
101}
102
103/*
104 * Generic ops used on most IA64 platforms.
105 */
106static struct irq_chip ia64_msi_chip = {
107 .name = "PCI-MSI",
108 .irq_mask = mask_msi_irq,
109 .irq_unmask = unmask_msi_irq,
110 .irq_ack = ia64_ack_msi_irq,
111#ifdef CONFIG_SMP
112 .irq_set_affinity = ia64_set_msi_irq_affinity,
113#endif
114 .irq_retrigger = ia64_msi_retrigger_irq,
115};
116
117
118int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
119{
120 if (platform_setup_msi_irq)
121 return platform_setup_msi_irq(pdev, desc);
122
123 return ia64_setup_msi_irq(pdev, desc);
124}
125
126void arch_teardown_msi_irq(unsigned int irq)
127{
128 if (platform_teardown_msi_irq)
129 return platform_teardown_msi_irq(irq);
130
131 return ia64_teardown_msi_irq(irq);
132}
133
134#ifdef CONFIG_DMAR
135#ifdef CONFIG_SMP
136static int dmar_msi_set_affinity(struct irq_data *data,
137 const struct cpumask *mask, bool force)
138{
139 unsigned int irq = data->irq;
140 struct irq_cfg *cfg = irq_cfg + irq;
141 struct msi_msg msg;
142 int cpu = cpumask_first(mask);
143
144 if (!cpu_online(cpu))
145 return -1;
146
147 if (irq_prepare_move(irq, cpu))
148 return -1;
149
150 dmar_msi_read(irq, &msg);
151
152 msg.data &= ~MSI_DATA_VECTOR_MASK;
153 msg.data |= MSI_DATA_VECTOR(cfg->vector);
154 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
155 msg.address_lo |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu));
156
157 dmar_msi_write(irq, &msg);
158 cpumask_copy(data->affinity, mask);
159
160 return 0;
161}
162#endif /* CONFIG_SMP */
163
164static struct irq_chip dmar_msi_type = {
165 .name = "DMAR_MSI",
166 .irq_unmask = dmar_msi_unmask,
167 .irq_mask = dmar_msi_mask,
168 .irq_ack = ia64_ack_msi_irq,
169#ifdef CONFIG_SMP
170 .irq_set_affinity = dmar_msi_set_affinity,
171#endif
172 .irq_retrigger = ia64_msi_retrigger_irq,
173};
174
175static int
176msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
177{
178 struct irq_cfg *cfg = irq_cfg + irq;
179 unsigned dest;
180 cpumask_t mask;
181
182 cpus_and(mask, irq_to_domain(irq), cpu_online_map);
183 dest = cpu_physical_id(first_cpu(mask));
184
185 msg->address_hi = 0;
186 msg->address_lo =
187 MSI_ADDR_HEADER |
188 MSI_ADDR_DEST_MODE_PHYS |
189 MSI_ADDR_REDIRECTION_CPU |
190 MSI_ADDR_DEST_ID_CPU(dest);
191
192 msg->data =
193 MSI_DATA_TRIGGER_EDGE |
194 MSI_DATA_LEVEL_ASSERT |
195 MSI_DATA_DELIVERY_FIXED |
196 MSI_DATA_VECTOR(cfg->vector);
197 return 0;
198}
199
200int arch_setup_dmar_msi(unsigned int irq)
201{
202 int ret;
203 struct msi_msg msg;
204
205 ret = msi_compose_msg(NULL, irq, &msg);
206 if (ret < 0)
207 return ret;
208 dmar_msi_write(irq, &msg);
209 irq_set_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
210 "edge");
211 return 0;
212}
213#endif /* CONFIG_DMAR */
214
1/*
2 * MSI hooks for standard x86 apic
3 */
4
5#include <linux/pci.h>
6#include <linux/irq.h>
7#include <linux/msi.h>
8#include <linux/dmar.h>
9#include <asm/smp.h>
10#include <asm/msidef.h>
11
12static struct irq_chip ia64_msi_chip;
13
14#ifdef CONFIG_SMP
15static int ia64_set_msi_irq_affinity(struct irq_data *idata,
16 const cpumask_t *cpu_mask, bool force)
17{
18 struct msi_msg msg;
19 u32 addr, data;
20 int cpu = cpumask_first_and(cpu_mask, cpu_online_mask);
21 unsigned int irq = idata->irq;
22
23 if (irq_prepare_move(irq, cpu))
24 return -1;
25
26 __get_cached_msi_msg(irq_data_get_msi_desc(idata), &msg);
27
28 addr = msg.address_lo;
29 addr &= MSI_ADDR_DEST_ID_MASK;
30 addr |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu));
31 msg.address_lo = addr;
32
33 data = msg.data;
34 data &= MSI_DATA_VECTOR_MASK;
35 data |= MSI_DATA_VECTOR(irq_to_vector(irq));
36 msg.data = data;
37
38 pci_write_msi_msg(irq, &msg);
39 cpumask_copy(irq_data_get_affinity_mask(idata), cpumask_of(cpu));
40
41 return 0;
42}
43#endif /* CONFIG_SMP */
44
45int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
46{
47 struct msi_msg msg;
48 unsigned long dest_phys_id;
49 int irq, vector;
50
51 irq = create_irq();
52 if (irq < 0)
53 return irq;
54
55 irq_set_msi_desc(irq, desc);
56 dest_phys_id = cpu_physical_id(cpumask_any_and(&(irq_to_domain(irq)),
57 cpu_online_mask));
58 vector = irq_to_vector(irq);
59
60 msg.address_hi = 0;
61 msg.address_lo =
62 MSI_ADDR_HEADER |
63 MSI_ADDR_DEST_MODE_PHYS |
64 MSI_ADDR_REDIRECTION_CPU |
65 MSI_ADDR_DEST_ID_CPU(dest_phys_id);
66
67 msg.data =
68 MSI_DATA_TRIGGER_EDGE |
69 MSI_DATA_LEVEL_ASSERT |
70 MSI_DATA_DELIVERY_FIXED |
71 MSI_DATA_VECTOR(vector);
72
73 pci_write_msi_msg(irq, &msg);
74 irq_set_chip_and_handler(irq, &ia64_msi_chip, handle_edge_irq);
75
76 return 0;
77}
78
79void ia64_teardown_msi_irq(unsigned int irq)
80{
81 destroy_irq(irq);
82}
83
84static void ia64_ack_msi_irq(struct irq_data *data)
85{
86 irq_complete_move(data->irq);
87 irq_move_irq(data);
88 ia64_eoi();
89}
90
91static int ia64_msi_retrigger_irq(struct irq_data *data)
92{
93 unsigned int vector = irq_to_vector(data->irq);
94 ia64_resend_irq(vector);
95
96 return 1;
97}
98
99/*
100 * Generic ops used on most IA64 platforms.
101 */
102static struct irq_chip ia64_msi_chip = {
103 .name = "PCI-MSI",
104 .irq_mask = pci_msi_mask_irq,
105 .irq_unmask = pci_msi_unmask_irq,
106 .irq_ack = ia64_ack_msi_irq,
107#ifdef CONFIG_SMP
108 .irq_set_affinity = ia64_set_msi_irq_affinity,
109#endif
110 .irq_retrigger = ia64_msi_retrigger_irq,
111};
112
113
114int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
115{
116 if (platform_setup_msi_irq)
117 return platform_setup_msi_irq(pdev, desc);
118
119 return ia64_setup_msi_irq(pdev, desc);
120}
121
122void arch_teardown_msi_irq(unsigned int irq)
123{
124 if (platform_teardown_msi_irq)
125 return platform_teardown_msi_irq(irq);
126
127 return ia64_teardown_msi_irq(irq);
128}
129
130#ifdef CONFIG_INTEL_IOMMU
131#ifdef CONFIG_SMP
132static int dmar_msi_set_affinity(struct irq_data *data,
133 const struct cpumask *mask, bool force)
134{
135 unsigned int irq = data->irq;
136 struct irq_cfg *cfg = irq_cfg + irq;
137 struct msi_msg msg;
138 int cpu = cpumask_first_and(mask, cpu_online_mask);
139
140 if (irq_prepare_move(irq, cpu))
141 return -1;
142
143 dmar_msi_read(irq, &msg);
144
145 msg.data &= ~MSI_DATA_VECTOR_MASK;
146 msg.data |= MSI_DATA_VECTOR(cfg->vector);
147 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
148 msg.address_lo |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu));
149
150 dmar_msi_write(irq, &msg);
151 cpumask_copy(irq_data_get_affinity_mask(data), mask);
152
153 return 0;
154}
155#endif /* CONFIG_SMP */
156
157static struct irq_chip dmar_msi_type = {
158 .name = "DMAR_MSI",
159 .irq_unmask = dmar_msi_unmask,
160 .irq_mask = dmar_msi_mask,
161 .irq_ack = ia64_ack_msi_irq,
162#ifdef CONFIG_SMP
163 .irq_set_affinity = dmar_msi_set_affinity,
164#endif
165 .irq_retrigger = ia64_msi_retrigger_irq,
166};
167
168static void
169msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
170{
171 struct irq_cfg *cfg = irq_cfg + irq;
172 unsigned dest;
173
174 dest = cpu_physical_id(cpumask_first_and(&(irq_to_domain(irq)),
175 cpu_online_mask));
176
177 msg->address_hi = 0;
178 msg->address_lo =
179 MSI_ADDR_HEADER |
180 MSI_ADDR_DEST_MODE_PHYS |
181 MSI_ADDR_REDIRECTION_CPU |
182 MSI_ADDR_DEST_ID_CPU(dest);
183
184 msg->data =
185 MSI_DATA_TRIGGER_EDGE |
186 MSI_DATA_LEVEL_ASSERT |
187 MSI_DATA_DELIVERY_FIXED |
188 MSI_DATA_VECTOR(cfg->vector);
189}
190
191int dmar_alloc_hwirq(int id, int node, void *arg)
192{
193 int irq;
194 struct msi_msg msg;
195
196 irq = create_irq();
197 if (irq > 0) {
198 irq_set_handler_data(irq, arg);
199 irq_set_chip_and_handler_name(irq, &dmar_msi_type,
200 handle_edge_irq, "edge");
201 msi_compose_msg(NULL, irq, &msg);
202 dmar_msi_write(irq, &msg);
203 }
204
205 return irq;
206}
207
208void dmar_free_hwirq(int irq)
209{
210 irq_set_handler_data(irq, NULL);
211 destroy_irq(irq);
212}
213#endif /* CONFIG_INTEL_IOMMU */
214