Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * i8259 interrupt controller driver.
4 */
5#undef DEBUG
6
7#include <linux/ioport.h>
8#include <linux/interrupt.h>
9#include <linux/irqdomain.h>
10#include <linux/kernel.h>
11#include <linux/delay.h>
12#include <asm/io.h>
13#include <asm/i8259.h>
14
15static volatile void __iomem *pci_intack; /* RO, gives us the irq vector */
16
17static unsigned char cached_8259[2] = { 0xff, 0xff };
18#define cached_A1 (cached_8259[0])
19#define cached_21 (cached_8259[1])
20
21static DEFINE_RAW_SPINLOCK(i8259_lock);
22
23static struct irq_domain *i8259_host;
24
25/*
26 * Acknowledge the IRQ using either the PCI host bridge's interrupt
27 * acknowledge feature or poll. How i8259_init() is called determines
28 * which is called. It should be noted that polling is broken on some
29 * IBM and Motorola PReP boxes so we must use the int-ack feature on them.
30 */
31unsigned int i8259_irq(void)
32{
33 int irq;
34 int lock = 0;
35
36 /* Either int-ack or poll for the IRQ */
37 if (pci_intack)
38 irq = readb(pci_intack);
39 else {
40 raw_spin_lock(&i8259_lock);
41 lock = 1;
42
43 /* Perform an interrupt acknowledge cycle on controller 1. */
44 outb(0x0C, 0x20); /* prepare for poll */
45 irq = inb(0x20) & 7;
46 if (irq == 2 ) {
47 /*
48 * Interrupt is cascaded so perform interrupt
49 * acknowledge on controller 2.
50 */
51 outb(0x0C, 0xA0); /* prepare for poll */
52 irq = (inb(0xA0) & 7) + 8;
53 }
54 }
55
56 if (irq == 7) {
57 /*
58 * This may be a spurious interrupt.
59 *
60 * Read the interrupt status register (ISR). If the most
61 * significant bit is not set then there is no valid
62 * interrupt.
63 */
64 if (!pci_intack)
65 outb(0x0B, 0x20); /* ISR register */
66 if(~inb(0x20) & 0x80)
67 irq = 0;
68 } else if (irq == 0xff)
69 irq = 0;
70
71 if (lock)
72 raw_spin_unlock(&i8259_lock);
73 return irq;
74}
75
76static void i8259_mask_and_ack_irq(struct irq_data *d)
77{
78 unsigned long flags;
79
80 raw_spin_lock_irqsave(&i8259_lock, flags);
81 if (d->irq > 7) {
82 cached_A1 |= 1 << (d->irq-8);
83 inb(0xA1); /* DUMMY */
84 outb(cached_A1, 0xA1);
85 outb(0x20, 0xA0); /* Non-specific EOI */
86 outb(0x20, 0x20); /* Non-specific EOI to cascade */
87 } else {
88 cached_21 |= 1 << d->irq;
89 inb(0x21); /* DUMMY */
90 outb(cached_21, 0x21);
91 outb(0x20, 0x20); /* Non-specific EOI */
92 }
93 raw_spin_unlock_irqrestore(&i8259_lock, flags);
94}
95
96static void i8259_set_irq_mask(int irq_nr)
97{
98 outb(cached_A1,0xA1);
99 outb(cached_21,0x21);
100}
101
102static void i8259_mask_irq(struct irq_data *d)
103{
104 unsigned long flags;
105
106 pr_debug("i8259_mask_irq(%d)\n", d->irq);
107
108 raw_spin_lock_irqsave(&i8259_lock, flags);
109 if (d->irq < 8)
110 cached_21 |= 1 << d->irq;
111 else
112 cached_A1 |= 1 << (d->irq-8);
113 i8259_set_irq_mask(d->irq);
114 raw_spin_unlock_irqrestore(&i8259_lock, flags);
115}
116
117static void i8259_unmask_irq(struct irq_data *d)
118{
119 unsigned long flags;
120
121 pr_debug("i8259_unmask_irq(%d)\n", d->irq);
122
123 raw_spin_lock_irqsave(&i8259_lock, flags);
124 if (d->irq < 8)
125 cached_21 &= ~(1 << d->irq);
126 else
127 cached_A1 &= ~(1 << (d->irq-8));
128 i8259_set_irq_mask(d->irq);
129 raw_spin_unlock_irqrestore(&i8259_lock, flags);
130}
131
132static struct irq_chip i8259_pic = {
133 .name = "i8259",
134 .irq_mask = i8259_mask_irq,
135 .irq_disable = i8259_mask_irq,
136 .irq_unmask = i8259_unmask_irq,
137 .irq_mask_ack = i8259_mask_and_ack_irq,
138};
139
140static struct resource pic1_iores = {
141 .name = "8259 (master)",
142 .start = 0x20,
143 .end = 0x21,
144 .flags = IORESOURCE_IO | IORESOURCE_BUSY,
145};
146
147static struct resource pic2_iores = {
148 .name = "8259 (slave)",
149 .start = 0xa0,
150 .end = 0xa1,
151 .flags = IORESOURCE_IO | IORESOURCE_BUSY,
152};
153
154static struct resource pic_edgectrl_iores = {
155 .name = "8259 edge control",
156 .start = 0x4d0,
157 .end = 0x4d1,
158 .flags = IORESOURCE_IO | IORESOURCE_BUSY,
159};
160
161static int i8259_host_match(struct irq_domain *h, struct device_node *node,
162 enum irq_domain_bus_token bus_token)
163{
164 struct device_node *of_node = irq_domain_get_of_node(h);
165 return of_node == NULL || of_node == node;
166}
167
168static int i8259_host_map(struct irq_domain *h, unsigned int virq,
169 irq_hw_number_t hw)
170{
171 pr_debug("i8259_host_map(%d, 0x%lx)\n", virq, hw);
172
173 /* We block the internal cascade */
174 if (hw == 2)
175 irq_set_status_flags(virq, IRQ_NOREQUEST);
176
177 /* We use the level handler only for now, we might want to
178 * be more cautious here but that works for now
179 */
180 irq_set_status_flags(virq, IRQ_LEVEL);
181 irq_set_chip_and_handler(virq, &i8259_pic, handle_level_irq);
182 return 0;
183}
184
185static int i8259_host_xlate(struct irq_domain *h, struct device_node *ct,
186 const u32 *intspec, unsigned int intsize,
187 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
188{
189 static unsigned char map_isa_senses[4] = {
190 IRQ_TYPE_LEVEL_LOW,
191 IRQ_TYPE_LEVEL_HIGH,
192 IRQ_TYPE_EDGE_FALLING,
193 IRQ_TYPE_EDGE_RISING,
194 };
195
196 *out_hwirq = intspec[0];
197 if (intsize > 1 && intspec[1] < 4)
198 *out_flags = map_isa_senses[intspec[1]];
199 else
200 *out_flags = IRQ_TYPE_NONE;
201
202 return 0;
203}
204
205static const struct irq_domain_ops i8259_host_ops = {
206 .match = i8259_host_match,
207 .map = i8259_host_map,
208 .xlate = i8259_host_xlate,
209};
210
211struct irq_domain *__init i8259_get_host(void)
212{
213 return i8259_host;
214}
215
216/**
217 * i8259_init - Initialize the legacy controller
218 * @node: device node of the legacy PIC (can be NULL, but then, it will match
219 * all interrupts, so beware)
220 * @intack_addr: PCI interrupt acknowledge (real) address which will return
221 * the active irq from the 8259
222 */
223void i8259_init(struct device_node *node, unsigned long intack_addr)
224{
225 unsigned long flags;
226
227 /* initialize the controller */
228 raw_spin_lock_irqsave(&i8259_lock, flags);
229
230 /* Mask all first */
231 outb(0xff, 0xA1);
232 outb(0xff, 0x21);
233
234 /* init master interrupt controller */
235 outb(0x11, 0x20); /* Start init sequence */
236 outb(0x00, 0x21); /* Vector base */
237 outb(0x04, 0x21); /* edge triggered, Cascade (slave) on IRQ2 */
238 outb(0x01, 0x21); /* Select 8086 mode */
239
240 /* init slave interrupt controller */
241 outb(0x11, 0xA0); /* Start init sequence */
242 outb(0x08, 0xA1); /* Vector base */
243 outb(0x02, 0xA1); /* edge triggered, Cascade (slave) on IRQ2 */
244 outb(0x01, 0xA1); /* Select 8086 mode */
245
246 /* That thing is slow */
247 udelay(100);
248
249 /* always read ISR */
250 outb(0x0B, 0x20);
251 outb(0x0B, 0xA0);
252
253 /* Unmask the internal cascade */
254 cached_21 &= ~(1 << 2);
255
256 /* Set interrupt masks */
257 outb(cached_A1, 0xA1);
258 outb(cached_21, 0x21);
259
260 raw_spin_unlock_irqrestore(&i8259_lock, flags);
261
262 /* create a legacy host */
263 i8259_host = irq_domain_add_legacy(node, NR_IRQS_LEGACY, 0, 0,
264 &i8259_host_ops, NULL);
265 if (i8259_host == NULL) {
266 printk(KERN_ERR "i8259: failed to allocate irq host !\n");
267 return;
268 }
269
270 /* reserve our resources */
271 /* XXX should we continue doing that ? it seems to cause problems
272 * with further requesting of PCI IO resources for that range...
273 * need to look into it.
274 */
275 request_resource(&ioport_resource, &pic1_iores);
276 request_resource(&ioport_resource, &pic2_iores);
277 request_resource(&ioport_resource, &pic_edgectrl_iores);
278
279 if (intack_addr != 0)
280 pci_intack = ioremap(intack_addr, 1);
281
282 printk(KERN_INFO "i8259 legacy interrupt controller initialized\n");
283}
1/*
2 * i8259 interrupt controller driver.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9#undef DEBUG
10
11#include <linux/init.h>
12#include <linux/ioport.h>
13#include <linux/interrupt.h>
14#include <linux/kernel.h>
15#include <linux/delay.h>
16#include <asm/io.h>
17#include <asm/i8259.h>
18#include <asm/prom.h>
19
20static volatile void __iomem *pci_intack; /* RO, gives us the irq vector */
21
22static unsigned char cached_8259[2] = { 0xff, 0xff };
23#define cached_A1 (cached_8259[0])
24#define cached_21 (cached_8259[1])
25
26static DEFINE_RAW_SPINLOCK(i8259_lock);
27
28static struct irq_domain *i8259_host;
29
30/*
31 * Acknowledge the IRQ using either the PCI host bridge's interrupt
32 * acknowledge feature or poll. How i8259_init() is called determines
33 * which is called. It should be noted that polling is broken on some
34 * IBM and Motorola PReP boxes so we must use the int-ack feature on them.
35 */
36unsigned int i8259_irq(void)
37{
38 int irq;
39 int lock = 0;
40
41 /* Either int-ack or poll for the IRQ */
42 if (pci_intack)
43 irq = readb(pci_intack);
44 else {
45 raw_spin_lock(&i8259_lock);
46 lock = 1;
47
48 /* Perform an interrupt acknowledge cycle on controller 1. */
49 outb(0x0C, 0x20); /* prepare for poll */
50 irq = inb(0x20) & 7;
51 if (irq == 2 ) {
52 /*
53 * Interrupt is cascaded so perform interrupt
54 * acknowledge on controller 2.
55 */
56 outb(0x0C, 0xA0); /* prepare for poll */
57 irq = (inb(0xA0) & 7) + 8;
58 }
59 }
60
61 if (irq == 7) {
62 /*
63 * This may be a spurious interrupt.
64 *
65 * Read the interrupt status register (ISR). If the most
66 * significant bit is not set then there is no valid
67 * interrupt.
68 */
69 if (!pci_intack)
70 outb(0x0B, 0x20); /* ISR register */
71 if(~inb(0x20) & 0x80)
72 irq = NO_IRQ;
73 } else if (irq == 0xff)
74 irq = NO_IRQ;
75
76 if (lock)
77 raw_spin_unlock(&i8259_lock);
78 return irq;
79}
80
81static void i8259_mask_and_ack_irq(struct irq_data *d)
82{
83 unsigned long flags;
84
85 raw_spin_lock_irqsave(&i8259_lock, flags);
86 if (d->irq > 7) {
87 cached_A1 |= 1 << (d->irq-8);
88 inb(0xA1); /* DUMMY */
89 outb(cached_A1, 0xA1);
90 outb(0x20, 0xA0); /* Non-specific EOI */
91 outb(0x20, 0x20); /* Non-specific EOI to cascade */
92 } else {
93 cached_21 |= 1 << d->irq;
94 inb(0x21); /* DUMMY */
95 outb(cached_21, 0x21);
96 outb(0x20, 0x20); /* Non-specific EOI */
97 }
98 raw_spin_unlock_irqrestore(&i8259_lock, flags);
99}
100
101static void i8259_set_irq_mask(int irq_nr)
102{
103 outb(cached_A1,0xA1);
104 outb(cached_21,0x21);
105}
106
107static void i8259_mask_irq(struct irq_data *d)
108{
109 unsigned long flags;
110
111 pr_debug("i8259_mask_irq(%d)\n", d->irq);
112
113 raw_spin_lock_irqsave(&i8259_lock, flags);
114 if (d->irq < 8)
115 cached_21 |= 1 << d->irq;
116 else
117 cached_A1 |= 1 << (d->irq-8);
118 i8259_set_irq_mask(d->irq);
119 raw_spin_unlock_irqrestore(&i8259_lock, flags);
120}
121
122static void i8259_unmask_irq(struct irq_data *d)
123{
124 unsigned long flags;
125
126 pr_debug("i8259_unmask_irq(%d)\n", d->irq);
127
128 raw_spin_lock_irqsave(&i8259_lock, flags);
129 if (d->irq < 8)
130 cached_21 &= ~(1 << d->irq);
131 else
132 cached_A1 &= ~(1 << (d->irq-8));
133 i8259_set_irq_mask(d->irq);
134 raw_spin_unlock_irqrestore(&i8259_lock, flags);
135}
136
137static struct irq_chip i8259_pic = {
138 .name = "i8259",
139 .irq_mask = i8259_mask_irq,
140 .irq_disable = i8259_mask_irq,
141 .irq_unmask = i8259_unmask_irq,
142 .irq_mask_ack = i8259_mask_and_ack_irq,
143};
144
145static struct resource pic1_iores = {
146 .name = "8259 (master)",
147 .start = 0x20,
148 .end = 0x21,
149 .flags = IORESOURCE_BUSY,
150};
151
152static struct resource pic2_iores = {
153 .name = "8259 (slave)",
154 .start = 0xa0,
155 .end = 0xa1,
156 .flags = IORESOURCE_BUSY,
157};
158
159static struct resource pic_edgectrl_iores = {
160 .name = "8259 edge control",
161 .start = 0x4d0,
162 .end = 0x4d1,
163 .flags = IORESOURCE_BUSY,
164};
165
166static int i8259_host_match(struct irq_domain *h, struct device_node *node)
167{
168 return h->of_node == NULL || h->of_node == node;
169}
170
171static int i8259_host_map(struct irq_domain *h, unsigned int virq,
172 irq_hw_number_t hw)
173{
174 pr_debug("i8259_host_map(%d, 0x%lx)\n", virq, hw);
175
176 /* We block the internal cascade */
177 if (hw == 2)
178 irq_set_status_flags(virq, IRQ_NOREQUEST);
179
180 /* We use the level handler only for now, we might want to
181 * be more cautious here but that works for now
182 */
183 irq_set_status_flags(virq, IRQ_LEVEL);
184 irq_set_chip_and_handler(virq, &i8259_pic, handle_level_irq);
185 return 0;
186}
187
188static int i8259_host_xlate(struct irq_domain *h, struct device_node *ct,
189 const u32 *intspec, unsigned int intsize,
190 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
191{
192 static unsigned char map_isa_senses[4] = {
193 IRQ_TYPE_LEVEL_LOW,
194 IRQ_TYPE_LEVEL_HIGH,
195 IRQ_TYPE_EDGE_FALLING,
196 IRQ_TYPE_EDGE_RISING,
197 };
198
199 *out_hwirq = intspec[0];
200 if (intsize > 1 && intspec[1] < 4)
201 *out_flags = map_isa_senses[intspec[1]];
202 else
203 *out_flags = IRQ_TYPE_NONE;
204
205 return 0;
206}
207
208static struct irq_domain_ops i8259_host_ops = {
209 .match = i8259_host_match,
210 .map = i8259_host_map,
211 .xlate = i8259_host_xlate,
212};
213
214struct irq_domain *i8259_get_host(void)
215{
216 return i8259_host;
217}
218
219/**
220 * i8259_init - Initialize the legacy controller
221 * @node: device node of the legacy PIC (can be NULL, but then, it will match
222 * all interrupts, so beware)
223 * @intack_addr: PCI interrupt acknowledge (real) address which will return
224 * the active irq from the 8259
225 */
226void i8259_init(struct device_node *node, unsigned long intack_addr)
227{
228 unsigned long flags;
229
230 /* initialize the controller */
231 raw_spin_lock_irqsave(&i8259_lock, flags);
232
233 /* Mask all first */
234 outb(0xff, 0xA1);
235 outb(0xff, 0x21);
236
237 /* init master interrupt controller */
238 outb(0x11, 0x20); /* Start init sequence */
239 outb(0x00, 0x21); /* Vector base */
240 outb(0x04, 0x21); /* edge tiggered, Cascade (slave) on IRQ2 */
241 outb(0x01, 0x21); /* Select 8086 mode */
242
243 /* init slave interrupt controller */
244 outb(0x11, 0xA0); /* Start init sequence */
245 outb(0x08, 0xA1); /* Vector base */
246 outb(0x02, 0xA1); /* edge triggered, Cascade (slave) on IRQ2 */
247 outb(0x01, 0xA1); /* Select 8086 mode */
248
249 /* That thing is slow */
250 udelay(100);
251
252 /* always read ISR */
253 outb(0x0B, 0x20);
254 outb(0x0B, 0xA0);
255
256 /* Unmask the internal cascade */
257 cached_21 &= ~(1 << 2);
258
259 /* Set interrupt masks */
260 outb(cached_A1, 0xA1);
261 outb(cached_21, 0x21);
262
263 raw_spin_unlock_irqrestore(&i8259_lock, flags);
264
265 /* create a legacy host */
266 i8259_host = irq_domain_add_legacy_isa(node, &i8259_host_ops, NULL);
267 if (i8259_host == NULL) {
268 printk(KERN_ERR "i8259: failed to allocate irq host !\n");
269 return;
270 }
271
272 /* reserve our resources */
273 /* XXX should we continue doing that ? it seems to cause problems
274 * with further requesting of PCI IO resources for that range...
275 * need to look into it.
276 */
277 request_resource(&ioport_resource, &pic1_iores);
278 request_resource(&ioport_resource, &pic2_iores);
279 request_resource(&ioport_resource, &pic_edgectrl_iores);
280
281 if (intack_addr != 0)
282 pci_intack = ioremap(intack_addr, 1);
283
284 printk(KERN_INFO "i8259 legacy interrupt controller initialized\n");
285}