Loading...
1/*
2 * linux/drivers/mmc/core/sdio_irq.c
3 *
4 * Author: Nicolas Pitre
5 * Created: June 18, 2007
6 * Copyright: MontaVista Software Inc.
7 *
8 * Copyright 2008 Pierre Ossman
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or (at
13 * your option) any later version.
14 */
15
16#include <linux/kernel.h>
17#include <linux/sched.h>
18#include <linux/kthread.h>
19#include <linux/export.h>
20#include <linux/wait.h>
21#include <linux/delay.h>
22
23#include <linux/mmc/core.h>
24#include <linux/mmc/host.h>
25#include <linux/mmc/card.h>
26#include <linux/mmc/sdio.h>
27#include <linux/mmc/sdio_func.h>
28
29#include "sdio_ops.h"
30
31static int process_sdio_pending_irqs(struct mmc_host *host)
32{
33 struct mmc_card *card = host->card;
34 int i, ret, count;
35 unsigned char pending;
36 struct sdio_func *func;
37
38 /*
39 * Optimization, if there is only 1 function interrupt registered
40 * and we know an IRQ was signaled then call irq handler directly.
41 * Otherwise do the full probe.
42 */
43 func = card->sdio_single_irq;
44 if (func && host->sdio_irq_pending) {
45 func->irq_handler(func);
46 return 1;
47 }
48
49 ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_INTx, 0, &pending);
50 if (ret) {
51 pr_debug("%s: error %d reading SDIO_CCCR_INTx\n",
52 mmc_card_id(card), ret);
53 return ret;
54 }
55
56 if (pending && mmc_card_broken_irq_polling(card) &&
57 !(host->caps & MMC_CAP_SDIO_IRQ)) {
58 unsigned char dummy;
59
60 /* A fake interrupt could be created when we poll SDIO_CCCR_INTx
61 * register with a Marvell SD8797 card. A dummy CMD52 read to
62 * function 0 register 0xff can avoid this.
63 */
64 mmc_io_rw_direct(card, 0, 0, 0xff, 0, &dummy);
65 }
66
67 count = 0;
68 for (i = 1; i <= 7; i++) {
69 if (pending & (1 << i)) {
70 func = card->sdio_func[i - 1];
71 if (!func) {
72 pr_warn("%s: pending IRQ for non-existent function\n",
73 mmc_card_id(card));
74 ret = -EINVAL;
75 } else if (func->irq_handler) {
76 func->irq_handler(func);
77 count++;
78 } else {
79 pr_warn("%s: pending IRQ with no handler\n",
80 sdio_func_id(func));
81 ret = -EINVAL;
82 }
83 }
84 }
85
86 if (count)
87 return count;
88
89 return ret;
90}
91
92void sdio_run_irqs(struct mmc_host *host)
93{
94 mmc_claim_host(host);
95 host->sdio_irq_pending = true;
96 process_sdio_pending_irqs(host);
97 mmc_release_host(host);
98}
99EXPORT_SYMBOL_GPL(sdio_run_irqs);
100
101static int sdio_irq_thread(void *_host)
102{
103 struct mmc_host *host = _host;
104 struct sched_param param = { .sched_priority = 1 };
105 unsigned long period, idle_period;
106 int ret;
107
108 sched_setscheduler(current, SCHED_FIFO, ¶m);
109
110 /*
111 * We want to allow for SDIO cards to work even on non SDIO
112 * aware hosts. One thing that non SDIO host cannot do is
113 * asynchronous notification of pending SDIO card interrupts
114 * hence we poll for them in that case.
115 */
116 idle_period = msecs_to_jiffies(10);
117 period = (host->caps & MMC_CAP_SDIO_IRQ) ?
118 MAX_SCHEDULE_TIMEOUT : idle_period;
119
120 pr_debug("%s: IRQ thread started (poll period = %lu jiffies)\n",
121 mmc_hostname(host), period);
122
123 do {
124 /*
125 * We claim the host here on drivers behalf for a couple
126 * reasons:
127 *
128 * 1) it is already needed to retrieve the CCCR_INTx;
129 * 2) we want the driver(s) to clear the IRQ condition ASAP;
130 * 3) we need to control the abort condition locally.
131 *
132 * Just like traditional hard IRQ handlers, we expect SDIO
133 * IRQ handlers to be quick and to the point, so that the
134 * holding of the host lock does not cover too much work
135 * that doesn't require that lock to be held.
136 */
137 ret = __mmc_claim_host(host, &host->sdio_irq_thread_abort);
138 if (ret)
139 break;
140 ret = process_sdio_pending_irqs(host);
141 host->sdio_irq_pending = false;
142 mmc_release_host(host);
143
144 /*
145 * Give other threads a chance to run in the presence of
146 * errors.
147 */
148 if (ret < 0) {
149 set_current_state(TASK_INTERRUPTIBLE);
150 if (!kthread_should_stop())
151 schedule_timeout(HZ);
152 set_current_state(TASK_RUNNING);
153 }
154
155 /*
156 * Adaptive polling frequency based on the assumption
157 * that an interrupt will be closely followed by more.
158 * This has a substantial benefit for network devices.
159 */
160 if (!(host->caps & MMC_CAP_SDIO_IRQ)) {
161 if (ret > 0)
162 period /= 2;
163 else {
164 period++;
165 if (period > idle_period)
166 period = idle_period;
167 }
168 }
169
170 set_current_state(TASK_INTERRUPTIBLE);
171 if (host->caps & MMC_CAP_SDIO_IRQ)
172 host->ops->enable_sdio_irq(host, 1);
173 if (!kthread_should_stop())
174 schedule_timeout(period);
175 set_current_state(TASK_RUNNING);
176 } while (!kthread_should_stop());
177
178 if (host->caps & MMC_CAP_SDIO_IRQ)
179 host->ops->enable_sdio_irq(host, 0);
180
181 pr_debug("%s: IRQ thread exiting with code %d\n",
182 mmc_hostname(host), ret);
183
184 return ret;
185}
186
187static int sdio_card_irq_get(struct mmc_card *card)
188{
189 struct mmc_host *host = card->host;
190
191 WARN_ON(!host->claimed);
192
193 if (!host->sdio_irqs++) {
194 if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD)) {
195 atomic_set(&host->sdio_irq_thread_abort, 0);
196 host->sdio_irq_thread =
197 kthread_run(sdio_irq_thread, host,
198 "ksdioirqd/%s", mmc_hostname(host));
199 if (IS_ERR(host->sdio_irq_thread)) {
200 int err = PTR_ERR(host->sdio_irq_thread);
201 host->sdio_irqs--;
202 return err;
203 }
204 } else if (host->caps & MMC_CAP_SDIO_IRQ) {
205 host->ops->enable_sdio_irq(host, 1);
206 }
207 }
208
209 return 0;
210}
211
212static int sdio_card_irq_put(struct mmc_card *card)
213{
214 struct mmc_host *host = card->host;
215
216 WARN_ON(!host->claimed);
217 BUG_ON(host->sdio_irqs < 1);
218
219 if (!--host->sdio_irqs) {
220 if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD)) {
221 atomic_set(&host->sdio_irq_thread_abort, 1);
222 kthread_stop(host->sdio_irq_thread);
223 } else if (host->caps & MMC_CAP_SDIO_IRQ) {
224 host->ops->enable_sdio_irq(host, 0);
225 }
226 }
227
228 return 0;
229}
230
231/* If there is only 1 function registered set sdio_single_irq */
232static void sdio_single_irq_set(struct mmc_card *card)
233{
234 struct sdio_func *func;
235 int i;
236
237 card->sdio_single_irq = NULL;
238 if ((card->host->caps & MMC_CAP_SDIO_IRQ) &&
239 card->host->sdio_irqs == 1)
240 for (i = 0; i < card->sdio_funcs; i++) {
241 func = card->sdio_func[i];
242 if (func && func->irq_handler) {
243 card->sdio_single_irq = func;
244 break;
245 }
246 }
247}
248
249/**
250 * sdio_claim_irq - claim the IRQ for a SDIO function
251 * @func: SDIO function
252 * @handler: IRQ handler callback
253 *
254 * Claim and activate the IRQ for the given SDIO function. The provided
255 * handler will be called when that IRQ is asserted. The host is always
256 * claimed already when the handler is called so the handler must not
257 * call sdio_claim_host() nor sdio_release_host().
258 */
259int sdio_claim_irq(struct sdio_func *func, sdio_irq_handler_t *handler)
260{
261 int ret;
262 unsigned char reg;
263
264 BUG_ON(!func);
265 BUG_ON(!func->card);
266
267 pr_debug("SDIO: Enabling IRQ for %s...\n", sdio_func_id(func));
268
269 if (func->irq_handler) {
270 pr_debug("SDIO: IRQ for %s already in use.\n", sdio_func_id(func));
271 return -EBUSY;
272 }
273
274 ret = mmc_io_rw_direct(func->card, 0, 0, SDIO_CCCR_IENx, 0, ®);
275 if (ret)
276 return ret;
277
278 reg |= 1 << func->num;
279
280 reg |= 1; /* Master interrupt enable */
281
282 ret = mmc_io_rw_direct(func->card, 1, 0, SDIO_CCCR_IENx, reg, NULL);
283 if (ret)
284 return ret;
285
286 func->irq_handler = handler;
287 ret = sdio_card_irq_get(func->card);
288 if (ret)
289 func->irq_handler = NULL;
290 sdio_single_irq_set(func->card);
291
292 return ret;
293}
294EXPORT_SYMBOL_GPL(sdio_claim_irq);
295
296/**
297 * sdio_release_irq - release the IRQ for a SDIO function
298 * @func: SDIO function
299 *
300 * Disable and release the IRQ for the given SDIO function.
301 */
302int sdio_release_irq(struct sdio_func *func)
303{
304 int ret;
305 unsigned char reg;
306
307 BUG_ON(!func);
308 BUG_ON(!func->card);
309
310 pr_debug("SDIO: Disabling IRQ for %s...\n", sdio_func_id(func));
311
312 if (func->irq_handler) {
313 func->irq_handler = NULL;
314 sdio_card_irq_put(func->card);
315 sdio_single_irq_set(func->card);
316 }
317
318 ret = mmc_io_rw_direct(func->card, 0, 0, SDIO_CCCR_IENx, 0, ®);
319 if (ret)
320 return ret;
321
322 reg &= ~(1 << func->num);
323
324 /* Disable master interrupt with the last function interrupt */
325 if (!(reg & 0xFE))
326 reg = 0;
327
328 ret = mmc_io_rw_direct(func->card, 1, 0, SDIO_CCCR_IENx, reg, NULL);
329 if (ret)
330 return ret;
331
332 return 0;
333}
334EXPORT_SYMBOL_GPL(sdio_release_irq);
335
1/*
2 * linux/drivers/mmc/core/sdio_irq.c
3 *
4 * Author: Nicolas Pitre
5 * Created: June 18, 2007
6 * Copyright: MontaVista Software Inc.
7 *
8 * Copyright 2008 Pierre Ossman
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or (at
13 * your option) any later version.
14 */
15
16#include <linux/kernel.h>
17#include <linux/sched.h>
18#include <uapi/linux/sched/types.h>
19#include <linux/kthread.h>
20#include <linux/export.h>
21#include <linux/wait.h>
22#include <linux/delay.h>
23
24#include <linux/mmc/core.h>
25#include <linux/mmc/host.h>
26#include <linux/mmc/card.h>
27#include <linux/mmc/sdio.h>
28#include <linux/mmc/sdio_func.h>
29
30#include "sdio_ops.h"
31#include "core.h"
32#include "card.h"
33
34static int process_sdio_pending_irqs(struct mmc_host *host)
35{
36 struct mmc_card *card = host->card;
37 int i, ret, count;
38 unsigned char pending;
39 struct sdio_func *func;
40
41 /*
42 * Optimization, if there is only 1 function interrupt registered
43 * and we know an IRQ was signaled then call irq handler directly.
44 * Otherwise do the full probe.
45 */
46 func = card->sdio_single_irq;
47 if (func && host->sdio_irq_pending) {
48 func->irq_handler(func);
49 return 1;
50 }
51
52 ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_INTx, 0, &pending);
53 if (ret) {
54 pr_debug("%s: error %d reading SDIO_CCCR_INTx\n",
55 mmc_card_id(card), ret);
56 return ret;
57 }
58
59 if (pending && mmc_card_broken_irq_polling(card) &&
60 !(host->caps & MMC_CAP_SDIO_IRQ)) {
61 unsigned char dummy;
62
63 /* A fake interrupt could be created when we poll SDIO_CCCR_INTx
64 * register with a Marvell SD8797 card. A dummy CMD52 read to
65 * function 0 register 0xff can avoid this.
66 */
67 mmc_io_rw_direct(card, 0, 0, 0xff, 0, &dummy);
68 }
69
70 count = 0;
71 for (i = 1; i <= 7; i++) {
72 if (pending & (1 << i)) {
73 func = card->sdio_func[i - 1];
74 if (!func) {
75 pr_warn("%s: pending IRQ for non-existent function\n",
76 mmc_card_id(card));
77 ret = -EINVAL;
78 } else if (func->irq_handler) {
79 func->irq_handler(func);
80 count++;
81 } else {
82 pr_warn("%s: pending IRQ with no handler\n",
83 sdio_func_id(func));
84 ret = -EINVAL;
85 }
86 }
87 }
88
89 if (count)
90 return count;
91
92 return ret;
93}
94
95void sdio_run_irqs(struct mmc_host *host)
96{
97 mmc_claim_host(host);
98 if (host->sdio_irqs) {
99 host->sdio_irq_pending = true;
100 process_sdio_pending_irqs(host);
101 if (host->ops->ack_sdio_irq)
102 host->ops->ack_sdio_irq(host);
103 }
104 mmc_release_host(host);
105}
106EXPORT_SYMBOL_GPL(sdio_run_irqs);
107
108void sdio_irq_work(struct work_struct *work)
109{
110 struct mmc_host *host =
111 container_of(work, struct mmc_host, sdio_irq_work.work);
112
113 sdio_run_irqs(host);
114}
115
116void sdio_signal_irq(struct mmc_host *host)
117{
118 queue_delayed_work(system_wq, &host->sdio_irq_work, 0);
119}
120EXPORT_SYMBOL_GPL(sdio_signal_irq);
121
122static int sdio_irq_thread(void *_host)
123{
124 struct mmc_host *host = _host;
125 struct sched_param param = { .sched_priority = 1 };
126 unsigned long period, idle_period;
127 int ret;
128
129 sched_setscheduler(current, SCHED_FIFO, ¶m);
130
131 /*
132 * We want to allow for SDIO cards to work even on non SDIO
133 * aware hosts. One thing that non SDIO host cannot do is
134 * asynchronous notification of pending SDIO card interrupts
135 * hence we poll for them in that case.
136 */
137 idle_period = msecs_to_jiffies(10);
138 period = (host->caps & MMC_CAP_SDIO_IRQ) ?
139 MAX_SCHEDULE_TIMEOUT : idle_period;
140
141 pr_debug("%s: IRQ thread started (poll period = %lu jiffies)\n",
142 mmc_hostname(host), period);
143
144 do {
145 /*
146 * We claim the host here on drivers behalf for a couple
147 * reasons:
148 *
149 * 1) it is already needed to retrieve the CCCR_INTx;
150 * 2) we want the driver(s) to clear the IRQ condition ASAP;
151 * 3) we need to control the abort condition locally.
152 *
153 * Just like traditional hard IRQ handlers, we expect SDIO
154 * IRQ handlers to be quick and to the point, so that the
155 * holding of the host lock does not cover too much work
156 * that doesn't require that lock to be held.
157 */
158 ret = __mmc_claim_host(host, NULL,
159 &host->sdio_irq_thread_abort);
160 if (ret)
161 break;
162 ret = process_sdio_pending_irqs(host);
163 host->sdio_irq_pending = false;
164 mmc_release_host(host);
165
166 /*
167 * Give other threads a chance to run in the presence of
168 * errors.
169 */
170 if (ret < 0) {
171 set_current_state(TASK_INTERRUPTIBLE);
172 if (!kthread_should_stop())
173 schedule_timeout(HZ);
174 set_current_state(TASK_RUNNING);
175 }
176
177 /*
178 * Adaptive polling frequency based on the assumption
179 * that an interrupt will be closely followed by more.
180 * This has a substantial benefit for network devices.
181 */
182 if (!(host->caps & MMC_CAP_SDIO_IRQ)) {
183 if (ret > 0)
184 period /= 2;
185 else {
186 period++;
187 if (period > idle_period)
188 period = idle_period;
189 }
190 }
191
192 set_current_state(TASK_INTERRUPTIBLE);
193 if (host->caps & MMC_CAP_SDIO_IRQ)
194 host->ops->enable_sdio_irq(host, 1);
195 if (!kthread_should_stop())
196 schedule_timeout(period);
197 set_current_state(TASK_RUNNING);
198 } while (!kthread_should_stop());
199
200 if (host->caps & MMC_CAP_SDIO_IRQ)
201 host->ops->enable_sdio_irq(host, 0);
202
203 pr_debug("%s: IRQ thread exiting with code %d\n",
204 mmc_hostname(host), ret);
205
206 return ret;
207}
208
209static int sdio_card_irq_get(struct mmc_card *card)
210{
211 struct mmc_host *host = card->host;
212
213 WARN_ON(!host->claimed);
214
215 if (!host->sdio_irqs++) {
216 if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD)) {
217 atomic_set(&host->sdio_irq_thread_abort, 0);
218 host->sdio_irq_thread =
219 kthread_run(sdio_irq_thread, host,
220 "ksdioirqd/%s", mmc_hostname(host));
221 if (IS_ERR(host->sdio_irq_thread)) {
222 int err = PTR_ERR(host->sdio_irq_thread);
223 host->sdio_irqs--;
224 return err;
225 }
226 } else if (host->caps & MMC_CAP_SDIO_IRQ) {
227 host->ops->enable_sdio_irq(host, 1);
228 }
229 }
230
231 return 0;
232}
233
234static int sdio_card_irq_put(struct mmc_card *card)
235{
236 struct mmc_host *host = card->host;
237
238 WARN_ON(!host->claimed);
239
240 if (host->sdio_irqs < 1)
241 return -EINVAL;
242
243 if (!--host->sdio_irqs) {
244 if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD)) {
245 atomic_set(&host->sdio_irq_thread_abort, 1);
246 kthread_stop(host->sdio_irq_thread);
247 } else if (host->caps & MMC_CAP_SDIO_IRQ) {
248 host->ops->enable_sdio_irq(host, 0);
249 }
250 }
251
252 return 0;
253}
254
255/* If there is only 1 function registered set sdio_single_irq */
256static void sdio_single_irq_set(struct mmc_card *card)
257{
258 struct sdio_func *func;
259 int i;
260
261 card->sdio_single_irq = NULL;
262 if ((card->host->caps & MMC_CAP_SDIO_IRQ) &&
263 card->host->sdio_irqs == 1)
264 for (i = 0; i < card->sdio_funcs; i++) {
265 func = card->sdio_func[i];
266 if (func && func->irq_handler) {
267 card->sdio_single_irq = func;
268 break;
269 }
270 }
271}
272
273/**
274 * sdio_claim_irq - claim the IRQ for a SDIO function
275 * @func: SDIO function
276 * @handler: IRQ handler callback
277 *
278 * Claim and activate the IRQ for the given SDIO function. The provided
279 * handler will be called when that IRQ is asserted. The host is always
280 * claimed already when the handler is called so the handler should not
281 * call sdio_claim_host() or sdio_release_host().
282 */
283int sdio_claim_irq(struct sdio_func *func, sdio_irq_handler_t *handler)
284{
285 int ret;
286 unsigned char reg;
287
288 if (!func)
289 return -EINVAL;
290
291 pr_debug("SDIO: Enabling IRQ for %s...\n", sdio_func_id(func));
292
293 if (func->irq_handler) {
294 pr_debug("SDIO: IRQ for %s already in use.\n", sdio_func_id(func));
295 return -EBUSY;
296 }
297
298 ret = mmc_io_rw_direct(func->card, 0, 0, SDIO_CCCR_IENx, 0, ®);
299 if (ret)
300 return ret;
301
302 reg |= 1 << func->num;
303
304 reg |= 1; /* Master interrupt enable */
305
306 ret = mmc_io_rw_direct(func->card, 1, 0, SDIO_CCCR_IENx, reg, NULL);
307 if (ret)
308 return ret;
309
310 func->irq_handler = handler;
311 ret = sdio_card_irq_get(func->card);
312 if (ret)
313 func->irq_handler = NULL;
314 sdio_single_irq_set(func->card);
315
316 return ret;
317}
318EXPORT_SYMBOL_GPL(sdio_claim_irq);
319
320/**
321 * sdio_release_irq - release the IRQ for a SDIO function
322 * @func: SDIO function
323 *
324 * Disable and release the IRQ for the given SDIO function.
325 */
326int sdio_release_irq(struct sdio_func *func)
327{
328 int ret;
329 unsigned char reg;
330
331 if (!func)
332 return -EINVAL;
333
334 pr_debug("SDIO: Disabling IRQ for %s...\n", sdio_func_id(func));
335
336 if (func->irq_handler) {
337 func->irq_handler = NULL;
338 sdio_card_irq_put(func->card);
339 sdio_single_irq_set(func->card);
340 }
341
342 ret = mmc_io_rw_direct(func->card, 0, 0, SDIO_CCCR_IENx, 0, ®);
343 if (ret)
344 return ret;
345
346 reg &= ~(1 << func->num);
347
348 /* Disable master interrupt with the last function interrupt */
349 if (!(reg & 0xFE))
350 reg = 0;
351
352 ret = mmc_io_rw_direct(func->card, 1, 0, SDIO_CCCR_IENx, reg, NULL);
353 if (ret)
354 return ret;
355
356 return 0;
357}
358EXPORT_SYMBOL_GPL(sdio_release_irq);
359