Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 *
4 * MMC software queue support based on command queue interfaces
5 *
6 * Copyright (C) 2019 Linaro, Inc.
7 * Author: Baolin Wang <baolin.wang@linaro.org>
8 */
9
10#include <linux/mmc/card.h>
11#include <linux/mmc/host.h>
12#include <linux/module.h>
13
14#include "mmc_hsq.h"
15
16static void mmc_hsq_retry_handler(struct work_struct *work)
17{
18 struct mmc_hsq *hsq = container_of(work, struct mmc_hsq, retry_work);
19 struct mmc_host *mmc = hsq->mmc;
20
21 mmc->ops->request(mmc, hsq->mrq);
22}
23
24static void mmc_hsq_pump_requests(struct mmc_hsq *hsq)
25{
26 struct mmc_host *mmc = hsq->mmc;
27 struct hsq_slot *slot;
28 unsigned long flags;
29 int ret = 0;
30
31 spin_lock_irqsave(&hsq->lock, flags);
32
33 /* Make sure we are not already running a request now */
34 if (hsq->mrq || hsq->recovery_halt) {
35 spin_unlock_irqrestore(&hsq->lock, flags);
36 return;
37 }
38
39 /* Make sure there are remain requests need to pump */
40 if (!hsq->qcnt || !hsq->enabled) {
41 spin_unlock_irqrestore(&hsq->lock, flags);
42 return;
43 }
44
45 slot = &hsq->slot[hsq->next_tag];
46 hsq->mrq = slot->mrq;
47 hsq->qcnt--;
48
49 spin_unlock_irqrestore(&hsq->lock, flags);
50
51 if (mmc->ops->request_atomic)
52 ret = mmc->ops->request_atomic(mmc, hsq->mrq);
53 else
54 mmc->ops->request(mmc, hsq->mrq);
55
56 /*
57 * If returning BUSY from request_atomic(), which means the card
58 * may be busy now, and we should change to non-atomic context to
59 * try again for this unusual case, to avoid time-consuming operations
60 * in the atomic context.
61 *
62 * Note: we just give a warning for other error cases, since the host
63 * driver will handle them.
64 */
65 if (ret == -EBUSY)
66 schedule_work(&hsq->retry_work);
67 else
68 WARN_ON_ONCE(ret);
69}
70
71static void mmc_hsq_update_next_tag(struct mmc_hsq *hsq, int remains)
72{
73 int tag;
74
75 /*
76 * If there are no remain requests in software queue, then set a invalid
77 * tag.
78 */
79 if (!remains) {
80 hsq->next_tag = HSQ_INVALID_TAG;
81 hsq->tail_tag = HSQ_INVALID_TAG;
82 return;
83 }
84
85 tag = hsq->tag_slot[hsq->next_tag];
86 hsq->tag_slot[hsq->next_tag] = HSQ_INVALID_TAG;
87 hsq->next_tag = tag;
88}
89
90static void mmc_hsq_post_request(struct mmc_hsq *hsq)
91{
92 unsigned long flags;
93 int remains;
94
95 spin_lock_irqsave(&hsq->lock, flags);
96
97 remains = hsq->qcnt;
98 hsq->mrq = NULL;
99
100 /* Update the next available tag to be queued. */
101 mmc_hsq_update_next_tag(hsq, remains);
102
103 if (hsq->waiting_for_idle && !remains) {
104 hsq->waiting_for_idle = false;
105 wake_up(&hsq->wait_queue);
106 }
107
108 /* Do not pump new request in recovery mode. */
109 if (hsq->recovery_halt) {
110 spin_unlock_irqrestore(&hsq->lock, flags);
111 return;
112 }
113
114 spin_unlock_irqrestore(&hsq->lock, flags);
115
116 /*
117 * Try to pump new request to host controller as fast as possible,
118 * after completing previous request.
119 */
120 if (remains > 0)
121 mmc_hsq_pump_requests(hsq);
122}
123
124/**
125 * mmc_hsq_finalize_request - finalize one request if the request is done
126 * @mmc: the host controller
127 * @mrq: the request need to be finalized
128 *
129 * Return true if we finalized the corresponding request in software queue,
130 * otherwise return false.
131 */
132bool mmc_hsq_finalize_request(struct mmc_host *mmc, struct mmc_request *mrq)
133{
134 struct mmc_hsq *hsq = mmc->cqe_private;
135 unsigned long flags;
136
137 spin_lock_irqsave(&hsq->lock, flags);
138
139 if (!hsq->enabled || !hsq->mrq || hsq->mrq != mrq) {
140 spin_unlock_irqrestore(&hsq->lock, flags);
141 return false;
142 }
143
144 /*
145 * Clear current completed slot request to make a room for new request.
146 */
147 hsq->slot[hsq->next_tag].mrq = NULL;
148
149 spin_unlock_irqrestore(&hsq->lock, flags);
150
151 mmc_cqe_request_done(mmc, hsq->mrq);
152
153 mmc_hsq_post_request(hsq);
154
155 return true;
156}
157EXPORT_SYMBOL_GPL(mmc_hsq_finalize_request);
158
159static void mmc_hsq_recovery_start(struct mmc_host *mmc)
160{
161 struct mmc_hsq *hsq = mmc->cqe_private;
162 unsigned long flags;
163
164 spin_lock_irqsave(&hsq->lock, flags);
165
166 hsq->recovery_halt = true;
167
168 spin_unlock_irqrestore(&hsq->lock, flags);
169}
170
171static void mmc_hsq_recovery_finish(struct mmc_host *mmc)
172{
173 struct mmc_hsq *hsq = mmc->cqe_private;
174 int remains;
175
176 spin_lock_irq(&hsq->lock);
177
178 hsq->recovery_halt = false;
179 remains = hsq->qcnt;
180
181 spin_unlock_irq(&hsq->lock);
182
183 /*
184 * Try to pump new request if there are request pending in software
185 * queue after finishing recovery.
186 */
187 if (remains > 0)
188 mmc_hsq_pump_requests(hsq);
189}
190
191static int mmc_hsq_request(struct mmc_host *mmc, struct mmc_request *mrq)
192{
193 struct mmc_hsq *hsq = mmc->cqe_private;
194 int tag = mrq->tag;
195
196 spin_lock_irq(&hsq->lock);
197
198 if (!hsq->enabled) {
199 spin_unlock_irq(&hsq->lock);
200 return -ESHUTDOWN;
201 }
202
203 /* Do not queue any new requests in recovery mode. */
204 if (hsq->recovery_halt) {
205 spin_unlock_irq(&hsq->lock);
206 return -EBUSY;
207 }
208
209 hsq->slot[tag].mrq = mrq;
210
211 /*
212 * Set the next tag as current request tag if no available
213 * next tag.
214 */
215 if (hsq->next_tag == HSQ_INVALID_TAG) {
216 hsq->next_tag = tag;
217 hsq->tail_tag = tag;
218 hsq->tag_slot[hsq->tail_tag] = HSQ_INVALID_TAG;
219 } else {
220 hsq->tag_slot[hsq->tail_tag] = tag;
221 hsq->tail_tag = tag;
222 }
223
224 hsq->qcnt++;
225
226 spin_unlock_irq(&hsq->lock);
227
228 mmc_hsq_pump_requests(hsq);
229
230 return 0;
231}
232
233static void mmc_hsq_post_req(struct mmc_host *mmc, struct mmc_request *mrq)
234{
235 if (mmc->ops->post_req)
236 mmc->ops->post_req(mmc, mrq, 0);
237}
238
239static bool mmc_hsq_queue_is_idle(struct mmc_hsq *hsq, int *ret)
240{
241 bool is_idle;
242
243 spin_lock_irq(&hsq->lock);
244
245 is_idle = (!hsq->mrq && !hsq->qcnt) ||
246 hsq->recovery_halt;
247
248 *ret = hsq->recovery_halt ? -EBUSY : 0;
249 hsq->waiting_for_idle = !is_idle;
250
251 spin_unlock_irq(&hsq->lock);
252
253 return is_idle;
254}
255
256static int mmc_hsq_wait_for_idle(struct mmc_host *mmc)
257{
258 struct mmc_hsq *hsq = mmc->cqe_private;
259 int ret;
260
261 wait_event(hsq->wait_queue,
262 mmc_hsq_queue_is_idle(hsq, &ret));
263
264 return ret;
265}
266
267static void mmc_hsq_disable(struct mmc_host *mmc)
268{
269 struct mmc_hsq *hsq = mmc->cqe_private;
270 u32 timeout = 500;
271 int ret;
272
273 spin_lock_irq(&hsq->lock);
274
275 if (!hsq->enabled) {
276 spin_unlock_irq(&hsq->lock);
277 return;
278 }
279
280 spin_unlock_irq(&hsq->lock);
281
282 ret = wait_event_timeout(hsq->wait_queue,
283 mmc_hsq_queue_is_idle(hsq, &ret),
284 msecs_to_jiffies(timeout));
285 if (ret == 0) {
286 pr_warn("could not stop mmc software queue\n");
287 return;
288 }
289
290 spin_lock_irq(&hsq->lock);
291
292 hsq->enabled = false;
293
294 spin_unlock_irq(&hsq->lock);
295}
296
297static int mmc_hsq_enable(struct mmc_host *mmc, struct mmc_card *card)
298{
299 struct mmc_hsq *hsq = mmc->cqe_private;
300
301 spin_lock_irq(&hsq->lock);
302
303 if (hsq->enabled) {
304 spin_unlock_irq(&hsq->lock);
305 return -EBUSY;
306 }
307
308 hsq->enabled = true;
309
310 spin_unlock_irq(&hsq->lock);
311
312 return 0;
313}
314
315static const struct mmc_cqe_ops mmc_hsq_ops = {
316 .cqe_enable = mmc_hsq_enable,
317 .cqe_disable = mmc_hsq_disable,
318 .cqe_request = mmc_hsq_request,
319 .cqe_post_req = mmc_hsq_post_req,
320 .cqe_wait_for_idle = mmc_hsq_wait_for_idle,
321 .cqe_recovery_start = mmc_hsq_recovery_start,
322 .cqe_recovery_finish = mmc_hsq_recovery_finish,
323};
324
325int mmc_hsq_init(struct mmc_hsq *hsq, struct mmc_host *mmc)
326{
327 int i;
328 hsq->num_slots = HSQ_NUM_SLOTS;
329 hsq->next_tag = HSQ_INVALID_TAG;
330 hsq->tail_tag = HSQ_INVALID_TAG;
331
332 hsq->slot = devm_kcalloc(mmc_dev(mmc), hsq->num_slots,
333 sizeof(struct hsq_slot), GFP_KERNEL);
334 if (!hsq->slot)
335 return -ENOMEM;
336
337 hsq->mmc = mmc;
338 hsq->mmc->cqe_private = hsq;
339 mmc->cqe_ops = &mmc_hsq_ops;
340
341 for (i = 0; i < HSQ_NUM_SLOTS; i++)
342 hsq->tag_slot[i] = HSQ_INVALID_TAG;
343
344 INIT_WORK(&hsq->retry_work, mmc_hsq_retry_handler);
345 spin_lock_init(&hsq->lock);
346 init_waitqueue_head(&hsq->wait_queue);
347
348 return 0;
349}
350EXPORT_SYMBOL_GPL(mmc_hsq_init);
351
352void mmc_hsq_suspend(struct mmc_host *mmc)
353{
354 mmc_hsq_disable(mmc);
355}
356EXPORT_SYMBOL_GPL(mmc_hsq_suspend);
357
358int mmc_hsq_resume(struct mmc_host *mmc)
359{
360 return mmc_hsq_enable(mmc, NULL);
361}
362EXPORT_SYMBOL_GPL(mmc_hsq_resume);
363
364MODULE_DESCRIPTION("MMC Host Software Queue support");
365MODULE_LICENSE("GPL v2");
1// SPDX-License-Identifier: GPL-2.0
2/*
3 *
4 * MMC software queue support based on command queue interfaces
5 *
6 * Copyright (C) 2019 Linaro, Inc.
7 * Author: Baolin Wang <baolin.wang@linaro.org>
8 */
9
10#include <linux/mmc/card.h>
11#include <linux/mmc/host.h>
12#include <linux/module.h>
13
14#include "mmc_hsq.h"
15
16#define HSQ_NUM_SLOTS 64
17#define HSQ_INVALID_TAG HSQ_NUM_SLOTS
18
19static void mmc_hsq_retry_handler(struct work_struct *work)
20{
21 struct mmc_hsq *hsq = container_of(work, struct mmc_hsq, retry_work);
22 struct mmc_host *mmc = hsq->mmc;
23
24 mmc->ops->request(mmc, hsq->mrq);
25}
26
27static void mmc_hsq_pump_requests(struct mmc_hsq *hsq)
28{
29 struct mmc_host *mmc = hsq->mmc;
30 struct hsq_slot *slot;
31 unsigned long flags;
32 int ret = 0;
33
34 spin_lock_irqsave(&hsq->lock, flags);
35
36 /* Make sure we are not already running a request now */
37 if (hsq->mrq) {
38 spin_unlock_irqrestore(&hsq->lock, flags);
39 return;
40 }
41
42 /* Make sure there are remain requests need to pump */
43 if (!hsq->qcnt || !hsq->enabled) {
44 spin_unlock_irqrestore(&hsq->lock, flags);
45 return;
46 }
47
48 slot = &hsq->slot[hsq->next_tag];
49 hsq->mrq = slot->mrq;
50 hsq->qcnt--;
51
52 spin_unlock_irqrestore(&hsq->lock, flags);
53
54 if (mmc->ops->request_atomic)
55 ret = mmc->ops->request_atomic(mmc, hsq->mrq);
56 else
57 mmc->ops->request(mmc, hsq->mrq);
58
59 /*
60 * If returning BUSY from request_atomic(), which means the card
61 * may be busy now, and we should change to non-atomic context to
62 * try again for this unusual case, to avoid time-consuming operations
63 * in the atomic context.
64 *
65 * Note: we just give a warning for other error cases, since the host
66 * driver will handle them.
67 */
68 if (ret == -EBUSY)
69 schedule_work(&hsq->retry_work);
70 else
71 WARN_ON_ONCE(ret);
72}
73
74static void mmc_hsq_update_next_tag(struct mmc_hsq *hsq, int remains)
75{
76 struct hsq_slot *slot;
77 int tag;
78
79 /*
80 * If there are no remain requests in software queue, then set a invalid
81 * tag.
82 */
83 if (!remains) {
84 hsq->next_tag = HSQ_INVALID_TAG;
85 return;
86 }
87
88 /*
89 * Increasing the next tag and check if the corresponding request is
90 * available, if yes, then we found a candidate request.
91 */
92 if (++hsq->next_tag != HSQ_INVALID_TAG) {
93 slot = &hsq->slot[hsq->next_tag];
94 if (slot->mrq)
95 return;
96 }
97
98 /* Othersie we should iterate all slots to find a available tag. */
99 for (tag = 0; tag < HSQ_NUM_SLOTS; tag++) {
100 slot = &hsq->slot[tag];
101 if (slot->mrq)
102 break;
103 }
104
105 if (tag == HSQ_NUM_SLOTS)
106 tag = HSQ_INVALID_TAG;
107
108 hsq->next_tag = tag;
109}
110
111static void mmc_hsq_post_request(struct mmc_hsq *hsq)
112{
113 unsigned long flags;
114 int remains;
115
116 spin_lock_irqsave(&hsq->lock, flags);
117
118 remains = hsq->qcnt;
119 hsq->mrq = NULL;
120
121 /* Update the next available tag to be queued. */
122 mmc_hsq_update_next_tag(hsq, remains);
123
124 if (hsq->waiting_for_idle && !remains) {
125 hsq->waiting_for_idle = false;
126 wake_up(&hsq->wait_queue);
127 }
128
129 /* Do not pump new request in recovery mode. */
130 if (hsq->recovery_halt) {
131 spin_unlock_irqrestore(&hsq->lock, flags);
132 return;
133 }
134
135 spin_unlock_irqrestore(&hsq->lock, flags);
136
137 /*
138 * Try to pump new request to host controller as fast as possible,
139 * after completing previous request.
140 */
141 if (remains > 0)
142 mmc_hsq_pump_requests(hsq);
143}
144
145/**
146 * mmc_hsq_finalize_request - finalize one request if the request is done
147 * @mmc: the host controller
148 * @mrq: the request need to be finalized
149 *
150 * Return true if we finalized the corresponding request in software queue,
151 * otherwise return false.
152 */
153bool mmc_hsq_finalize_request(struct mmc_host *mmc, struct mmc_request *mrq)
154{
155 struct mmc_hsq *hsq = mmc->cqe_private;
156 unsigned long flags;
157
158 spin_lock_irqsave(&hsq->lock, flags);
159
160 if (!hsq->enabled || !hsq->mrq || hsq->mrq != mrq) {
161 spin_unlock_irqrestore(&hsq->lock, flags);
162 return false;
163 }
164
165 /*
166 * Clear current completed slot request to make a room for new request.
167 */
168 hsq->slot[hsq->next_tag].mrq = NULL;
169
170 spin_unlock_irqrestore(&hsq->lock, flags);
171
172 mmc_cqe_request_done(mmc, hsq->mrq);
173
174 mmc_hsq_post_request(hsq);
175
176 return true;
177}
178EXPORT_SYMBOL_GPL(mmc_hsq_finalize_request);
179
180static void mmc_hsq_recovery_start(struct mmc_host *mmc)
181{
182 struct mmc_hsq *hsq = mmc->cqe_private;
183 unsigned long flags;
184
185 spin_lock_irqsave(&hsq->lock, flags);
186
187 hsq->recovery_halt = true;
188
189 spin_unlock_irqrestore(&hsq->lock, flags);
190}
191
192static void mmc_hsq_recovery_finish(struct mmc_host *mmc)
193{
194 struct mmc_hsq *hsq = mmc->cqe_private;
195 int remains;
196
197 spin_lock_irq(&hsq->lock);
198
199 hsq->recovery_halt = false;
200 remains = hsq->qcnt;
201
202 spin_unlock_irq(&hsq->lock);
203
204 /*
205 * Try to pump new request if there are request pending in software
206 * queue after finishing recovery.
207 */
208 if (remains > 0)
209 mmc_hsq_pump_requests(hsq);
210}
211
212static int mmc_hsq_request(struct mmc_host *mmc, struct mmc_request *mrq)
213{
214 struct mmc_hsq *hsq = mmc->cqe_private;
215 int tag = mrq->tag;
216
217 spin_lock_irq(&hsq->lock);
218
219 if (!hsq->enabled) {
220 spin_unlock_irq(&hsq->lock);
221 return -ESHUTDOWN;
222 }
223
224 /* Do not queue any new requests in recovery mode. */
225 if (hsq->recovery_halt) {
226 spin_unlock_irq(&hsq->lock);
227 return -EBUSY;
228 }
229
230 hsq->slot[tag].mrq = mrq;
231
232 /*
233 * Set the next tag as current request tag if no available
234 * next tag.
235 */
236 if (hsq->next_tag == HSQ_INVALID_TAG)
237 hsq->next_tag = tag;
238
239 hsq->qcnt++;
240
241 spin_unlock_irq(&hsq->lock);
242
243 mmc_hsq_pump_requests(hsq);
244
245 return 0;
246}
247
248static void mmc_hsq_post_req(struct mmc_host *mmc, struct mmc_request *mrq)
249{
250 if (mmc->ops->post_req)
251 mmc->ops->post_req(mmc, mrq, 0);
252}
253
254static bool mmc_hsq_queue_is_idle(struct mmc_hsq *hsq, int *ret)
255{
256 bool is_idle;
257
258 spin_lock_irq(&hsq->lock);
259
260 is_idle = (!hsq->mrq && !hsq->qcnt) ||
261 hsq->recovery_halt;
262
263 *ret = hsq->recovery_halt ? -EBUSY : 0;
264 hsq->waiting_for_idle = !is_idle;
265
266 spin_unlock_irq(&hsq->lock);
267
268 return is_idle;
269}
270
271static int mmc_hsq_wait_for_idle(struct mmc_host *mmc)
272{
273 struct mmc_hsq *hsq = mmc->cqe_private;
274 int ret;
275
276 wait_event(hsq->wait_queue,
277 mmc_hsq_queue_is_idle(hsq, &ret));
278
279 return ret;
280}
281
282static void mmc_hsq_disable(struct mmc_host *mmc)
283{
284 struct mmc_hsq *hsq = mmc->cqe_private;
285 u32 timeout = 500;
286 int ret;
287
288 spin_lock_irq(&hsq->lock);
289
290 if (!hsq->enabled) {
291 spin_unlock_irq(&hsq->lock);
292 return;
293 }
294
295 spin_unlock_irq(&hsq->lock);
296
297 ret = wait_event_timeout(hsq->wait_queue,
298 mmc_hsq_queue_is_idle(hsq, &ret),
299 msecs_to_jiffies(timeout));
300 if (ret == 0) {
301 pr_warn("could not stop mmc software queue\n");
302 return;
303 }
304
305 spin_lock_irq(&hsq->lock);
306
307 hsq->enabled = false;
308
309 spin_unlock_irq(&hsq->lock);
310}
311
312static int mmc_hsq_enable(struct mmc_host *mmc, struct mmc_card *card)
313{
314 struct mmc_hsq *hsq = mmc->cqe_private;
315
316 spin_lock_irq(&hsq->lock);
317
318 if (hsq->enabled) {
319 spin_unlock_irq(&hsq->lock);
320 return -EBUSY;
321 }
322
323 hsq->enabled = true;
324
325 spin_unlock_irq(&hsq->lock);
326
327 return 0;
328}
329
330static const struct mmc_cqe_ops mmc_hsq_ops = {
331 .cqe_enable = mmc_hsq_enable,
332 .cqe_disable = mmc_hsq_disable,
333 .cqe_request = mmc_hsq_request,
334 .cqe_post_req = mmc_hsq_post_req,
335 .cqe_wait_for_idle = mmc_hsq_wait_for_idle,
336 .cqe_recovery_start = mmc_hsq_recovery_start,
337 .cqe_recovery_finish = mmc_hsq_recovery_finish,
338};
339
340int mmc_hsq_init(struct mmc_hsq *hsq, struct mmc_host *mmc)
341{
342 hsq->num_slots = HSQ_NUM_SLOTS;
343 hsq->next_tag = HSQ_INVALID_TAG;
344
345 hsq->slot = devm_kcalloc(mmc_dev(mmc), hsq->num_slots,
346 sizeof(struct hsq_slot), GFP_KERNEL);
347 if (!hsq->slot)
348 return -ENOMEM;
349
350 hsq->mmc = mmc;
351 hsq->mmc->cqe_private = hsq;
352 mmc->cqe_ops = &mmc_hsq_ops;
353
354 INIT_WORK(&hsq->retry_work, mmc_hsq_retry_handler);
355 spin_lock_init(&hsq->lock);
356 init_waitqueue_head(&hsq->wait_queue);
357
358 return 0;
359}
360EXPORT_SYMBOL_GPL(mmc_hsq_init);
361
362void mmc_hsq_suspend(struct mmc_host *mmc)
363{
364 mmc_hsq_disable(mmc);
365}
366EXPORT_SYMBOL_GPL(mmc_hsq_suspend);
367
368int mmc_hsq_resume(struct mmc_host *mmc)
369{
370 return mmc_hsq_enable(mmc, NULL);
371}
372EXPORT_SYMBOL_GPL(mmc_hsq_resume);
373
374MODULE_DESCRIPTION("MMC Host Software Queue support");
375MODULE_LICENSE("GPL v2");