Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 *
4 * MMC software queue support based on command queue interfaces
5 *
6 * Copyright (C) 2019 Linaro, Inc.
7 * Author: Baolin Wang <baolin.wang@linaro.org>
8 */
9
10#include <linux/mmc/card.h>
11#include <linux/mmc/host.h>
12#include <linux/module.h>
13
14#include "mmc_hsq.h"
15
16static void mmc_hsq_retry_handler(struct work_struct *work)
17{
18 struct mmc_hsq *hsq = container_of(work, struct mmc_hsq, retry_work);
19 struct mmc_host *mmc = hsq->mmc;
20
21 mmc->ops->request(mmc, hsq->mrq);
22}
23
24static void mmc_hsq_pump_requests(struct mmc_hsq *hsq)
25{
26 struct mmc_host *mmc = hsq->mmc;
27 struct hsq_slot *slot;
28 unsigned long flags;
29 int ret = 0;
30
31 spin_lock_irqsave(&hsq->lock, flags);
32
33 /* Make sure we are not already running a request now */
34 if (hsq->mrq || hsq->recovery_halt) {
35 spin_unlock_irqrestore(&hsq->lock, flags);
36 return;
37 }
38
39 /* Make sure there are remain requests need to pump */
40 if (!hsq->qcnt || !hsq->enabled) {
41 spin_unlock_irqrestore(&hsq->lock, flags);
42 return;
43 }
44
45 slot = &hsq->slot[hsq->next_tag];
46 hsq->mrq = slot->mrq;
47 hsq->qcnt--;
48
49 spin_unlock_irqrestore(&hsq->lock, flags);
50
51 if (mmc->ops->request_atomic)
52 ret = mmc->ops->request_atomic(mmc, hsq->mrq);
53 else
54 mmc->ops->request(mmc, hsq->mrq);
55
56 /*
57 * If returning BUSY from request_atomic(), which means the card
58 * may be busy now, and we should change to non-atomic context to
59 * try again for this unusual case, to avoid time-consuming operations
60 * in the atomic context.
61 *
62 * Note: we just give a warning for other error cases, since the host
63 * driver will handle them.
64 */
65 if (ret == -EBUSY)
66 schedule_work(&hsq->retry_work);
67 else
68 WARN_ON_ONCE(ret);
69}
70
71static void mmc_hsq_update_next_tag(struct mmc_hsq *hsq, int remains)
72{
73 int tag;
74
75 /*
76 * If there are no remain requests in software queue, then set a invalid
77 * tag.
78 */
79 if (!remains) {
80 hsq->next_tag = HSQ_INVALID_TAG;
81 hsq->tail_tag = HSQ_INVALID_TAG;
82 return;
83 }
84
85 tag = hsq->tag_slot[hsq->next_tag];
86 hsq->tag_slot[hsq->next_tag] = HSQ_INVALID_TAG;
87 hsq->next_tag = tag;
88}
89
90static void mmc_hsq_post_request(struct mmc_hsq *hsq)
91{
92 unsigned long flags;
93 int remains;
94
95 spin_lock_irqsave(&hsq->lock, flags);
96
97 remains = hsq->qcnt;
98 hsq->mrq = NULL;
99
100 /* Update the next available tag to be queued. */
101 mmc_hsq_update_next_tag(hsq, remains);
102
103 if (hsq->waiting_for_idle && !remains) {
104 hsq->waiting_for_idle = false;
105 wake_up(&hsq->wait_queue);
106 }
107
108 /* Do not pump new request in recovery mode. */
109 if (hsq->recovery_halt) {
110 spin_unlock_irqrestore(&hsq->lock, flags);
111 return;
112 }
113
114 spin_unlock_irqrestore(&hsq->lock, flags);
115
116 /*
117 * Try to pump new request to host controller as fast as possible,
118 * after completing previous request.
119 */
120 if (remains > 0)
121 mmc_hsq_pump_requests(hsq);
122}
123
124/**
125 * mmc_hsq_finalize_request - finalize one request if the request is done
126 * @mmc: the host controller
127 * @mrq: the request need to be finalized
128 *
129 * Return true if we finalized the corresponding request in software queue,
130 * otherwise return false.
131 */
132bool mmc_hsq_finalize_request(struct mmc_host *mmc, struct mmc_request *mrq)
133{
134 struct mmc_hsq *hsq = mmc->cqe_private;
135 unsigned long flags;
136
137 spin_lock_irqsave(&hsq->lock, flags);
138
139 if (!hsq->enabled || !hsq->mrq || hsq->mrq != mrq) {
140 spin_unlock_irqrestore(&hsq->lock, flags);
141 return false;
142 }
143
144 /*
145 * Clear current completed slot request to make a room for new request.
146 */
147 hsq->slot[hsq->next_tag].mrq = NULL;
148
149 spin_unlock_irqrestore(&hsq->lock, flags);
150
151 mmc_cqe_request_done(mmc, hsq->mrq);
152
153 mmc_hsq_post_request(hsq);
154
155 return true;
156}
157EXPORT_SYMBOL_GPL(mmc_hsq_finalize_request);
158
159static void mmc_hsq_recovery_start(struct mmc_host *mmc)
160{
161 struct mmc_hsq *hsq = mmc->cqe_private;
162 unsigned long flags;
163
164 spin_lock_irqsave(&hsq->lock, flags);
165
166 hsq->recovery_halt = true;
167
168 spin_unlock_irqrestore(&hsq->lock, flags);
169}
170
171static void mmc_hsq_recovery_finish(struct mmc_host *mmc)
172{
173 struct mmc_hsq *hsq = mmc->cqe_private;
174 int remains;
175
176 spin_lock_irq(&hsq->lock);
177
178 hsq->recovery_halt = false;
179 remains = hsq->qcnt;
180
181 spin_unlock_irq(&hsq->lock);
182
183 /*
184 * Try to pump new request if there are request pending in software
185 * queue after finishing recovery.
186 */
187 if (remains > 0)
188 mmc_hsq_pump_requests(hsq);
189}
190
191static int mmc_hsq_request(struct mmc_host *mmc, struct mmc_request *mrq)
192{
193 struct mmc_hsq *hsq = mmc->cqe_private;
194 int tag = mrq->tag;
195
196 spin_lock_irq(&hsq->lock);
197
198 if (!hsq->enabled) {
199 spin_unlock_irq(&hsq->lock);
200 return -ESHUTDOWN;
201 }
202
203 /* Do not queue any new requests in recovery mode. */
204 if (hsq->recovery_halt) {
205 spin_unlock_irq(&hsq->lock);
206 return -EBUSY;
207 }
208
209 hsq->slot[tag].mrq = mrq;
210
211 /*
212 * Set the next tag as current request tag if no available
213 * next tag.
214 */
215 if (hsq->next_tag == HSQ_INVALID_TAG) {
216 hsq->next_tag = tag;
217 hsq->tail_tag = tag;
218 hsq->tag_slot[hsq->tail_tag] = HSQ_INVALID_TAG;
219 } else {
220 hsq->tag_slot[hsq->tail_tag] = tag;
221 hsq->tail_tag = tag;
222 }
223
224 hsq->qcnt++;
225
226 spin_unlock_irq(&hsq->lock);
227
228 mmc_hsq_pump_requests(hsq);
229
230 return 0;
231}
232
233static void mmc_hsq_post_req(struct mmc_host *mmc, struct mmc_request *mrq)
234{
235 if (mmc->ops->post_req)
236 mmc->ops->post_req(mmc, mrq, 0);
237}
238
239static bool mmc_hsq_queue_is_idle(struct mmc_hsq *hsq, int *ret)
240{
241 bool is_idle;
242
243 spin_lock_irq(&hsq->lock);
244
245 is_idle = (!hsq->mrq && !hsq->qcnt) ||
246 hsq->recovery_halt;
247
248 *ret = hsq->recovery_halt ? -EBUSY : 0;
249 hsq->waiting_for_idle = !is_idle;
250
251 spin_unlock_irq(&hsq->lock);
252
253 return is_idle;
254}
255
256static int mmc_hsq_wait_for_idle(struct mmc_host *mmc)
257{
258 struct mmc_hsq *hsq = mmc->cqe_private;
259 int ret;
260
261 wait_event(hsq->wait_queue,
262 mmc_hsq_queue_is_idle(hsq, &ret));
263
264 return ret;
265}
266
267static void mmc_hsq_disable(struct mmc_host *mmc)
268{
269 struct mmc_hsq *hsq = mmc->cqe_private;
270 u32 timeout = 500;
271 int ret;
272
273 spin_lock_irq(&hsq->lock);
274
275 if (!hsq->enabled) {
276 spin_unlock_irq(&hsq->lock);
277 return;
278 }
279
280 spin_unlock_irq(&hsq->lock);
281
282 ret = wait_event_timeout(hsq->wait_queue,
283 mmc_hsq_queue_is_idle(hsq, &ret),
284 msecs_to_jiffies(timeout));
285 if (ret == 0) {
286 pr_warn("could not stop mmc software queue\n");
287 return;
288 }
289
290 spin_lock_irq(&hsq->lock);
291
292 hsq->enabled = false;
293
294 spin_unlock_irq(&hsq->lock);
295}
296
297static int mmc_hsq_enable(struct mmc_host *mmc, struct mmc_card *card)
298{
299 struct mmc_hsq *hsq = mmc->cqe_private;
300
301 spin_lock_irq(&hsq->lock);
302
303 if (hsq->enabled) {
304 spin_unlock_irq(&hsq->lock);
305 return -EBUSY;
306 }
307
308 hsq->enabled = true;
309
310 spin_unlock_irq(&hsq->lock);
311
312 return 0;
313}
314
315static const struct mmc_cqe_ops mmc_hsq_ops = {
316 .cqe_enable = mmc_hsq_enable,
317 .cqe_disable = mmc_hsq_disable,
318 .cqe_request = mmc_hsq_request,
319 .cqe_post_req = mmc_hsq_post_req,
320 .cqe_wait_for_idle = mmc_hsq_wait_for_idle,
321 .cqe_recovery_start = mmc_hsq_recovery_start,
322 .cqe_recovery_finish = mmc_hsq_recovery_finish,
323};
324
325int mmc_hsq_init(struct mmc_hsq *hsq, struct mmc_host *mmc)
326{
327 int i;
328 hsq->num_slots = HSQ_NUM_SLOTS;
329 hsq->next_tag = HSQ_INVALID_TAG;
330 hsq->tail_tag = HSQ_INVALID_TAG;
331
332 hsq->slot = devm_kcalloc(mmc_dev(mmc), hsq->num_slots,
333 sizeof(struct hsq_slot), GFP_KERNEL);
334 if (!hsq->slot)
335 return -ENOMEM;
336
337 hsq->mmc = mmc;
338 hsq->mmc->cqe_private = hsq;
339 mmc->cqe_ops = &mmc_hsq_ops;
340
341 for (i = 0; i < HSQ_NUM_SLOTS; i++)
342 hsq->tag_slot[i] = HSQ_INVALID_TAG;
343
344 INIT_WORK(&hsq->retry_work, mmc_hsq_retry_handler);
345 spin_lock_init(&hsq->lock);
346 init_waitqueue_head(&hsq->wait_queue);
347
348 return 0;
349}
350EXPORT_SYMBOL_GPL(mmc_hsq_init);
351
352void mmc_hsq_suspend(struct mmc_host *mmc)
353{
354 mmc_hsq_disable(mmc);
355}
356EXPORT_SYMBOL_GPL(mmc_hsq_suspend);
357
358int mmc_hsq_resume(struct mmc_host *mmc)
359{
360 return mmc_hsq_enable(mmc, NULL);
361}
362EXPORT_SYMBOL_GPL(mmc_hsq_resume);
363
364MODULE_DESCRIPTION("MMC Host Software Queue support");
365MODULE_LICENSE("GPL v2");
1// SPDX-License-Identifier: GPL-2.0
2/*
3 *
4 * MMC software queue support based on command queue interfaces
5 *
6 * Copyright (C) 2019 Linaro, Inc.
7 * Author: Baolin Wang <baolin.wang@linaro.org>
8 */
9
10#include <linux/mmc/card.h>
11#include <linux/mmc/host.h>
12#include <linux/module.h>
13
14#include "mmc_hsq.h"
15
16static void mmc_hsq_retry_handler(struct work_struct *work)
17{
18 struct mmc_hsq *hsq = container_of(work, struct mmc_hsq, retry_work);
19 struct mmc_host *mmc = hsq->mmc;
20
21 mmc->ops->request(mmc, hsq->mrq);
22}
23
24static void mmc_hsq_modify_threshold(struct mmc_hsq *hsq)
25{
26 struct mmc_host *mmc = hsq->mmc;
27 struct mmc_request *mrq;
28 unsigned int tag, need_change = 0;
29
30 mmc->hsq_depth = HSQ_NORMAL_DEPTH;
31 for (tag = 0; tag < HSQ_NUM_SLOTS; tag++) {
32 mrq = hsq->slot[tag].mrq;
33 if (mrq && mrq->data &&
34 (mrq->data->blksz * mrq->data->blocks == 4096) &&
35 (mrq->data->flags & MMC_DATA_WRITE) &&
36 (++need_change == 2)) {
37 mmc->hsq_depth = HSQ_PERFORMANCE_DEPTH;
38 break;
39 }
40 }
41}
42
43static void mmc_hsq_pump_requests(struct mmc_hsq *hsq)
44{
45 struct mmc_host *mmc = hsq->mmc;
46 struct hsq_slot *slot;
47 unsigned long flags;
48 int ret = 0;
49
50 spin_lock_irqsave(&hsq->lock, flags);
51
52 /* Make sure we are not already running a request now */
53 if (hsq->mrq || hsq->recovery_halt) {
54 spin_unlock_irqrestore(&hsq->lock, flags);
55 return;
56 }
57
58 /* Make sure there are remain requests need to pump */
59 if (!hsq->qcnt || !hsq->enabled) {
60 spin_unlock_irqrestore(&hsq->lock, flags);
61 return;
62 }
63
64 mmc_hsq_modify_threshold(hsq);
65
66 slot = &hsq->slot[hsq->next_tag];
67 hsq->mrq = slot->mrq;
68 hsq->qcnt--;
69
70 spin_unlock_irqrestore(&hsq->lock, flags);
71
72 if (mmc->ops->request_atomic)
73 ret = mmc->ops->request_atomic(mmc, hsq->mrq);
74 else
75 mmc->ops->request(mmc, hsq->mrq);
76
77 /*
78 * If returning BUSY from request_atomic(), which means the card
79 * may be busy now, and we should change to non-atomic context to
80 * try again for this unusual case, to avoid time-consuming operations
81 * in the atomic context.
82 *
83 * Note: we just give a warning for other error cases, since the host
84 * driver will handle them.
85 */
86 if (ret == -EBUSY)
87 schedule_work(&hsq->retry_work);
88 else
89 WARN_ON_ONCE(ret);
90}
91
92static void mmc_hsq_update_next_tag(struct mmc_hsq *hsq, int remains)
93{
94 int tag;
95
96 /*
97 * If there are no remain requests in software queue, then set a invalid
98 * tag.
99 */
100 if (!remains) {
101 hsq->next_tag = HSQ_INVALID_TAG;
102 hsq->tail_tag = HSQ_INVALID_TAG;
103 return;
104 }
105
106 tag = hsq->tag_slot[hsq->next_tag];
107 hsq->tag_slot[hsq->next_tag] = HSQ_INVALID_TAG;
108 hsq->next_tag = tag;
109}
110
111static void mmc_hsq_post_request(struct mmc_hsq *hsq)
112{
113 unsigned long flags;
114 int remains;
115
116 spin_lock_irqsave(&hsq->lock, flags);
117
118 remains = hsq->qcnt;
119 hsq->mrq = NULL;
120
121 /* Update the next available tag to be queued. */
122 mmc_hsq_update_next_tag(hsq, remains);
123
124 if (hsq->waiting_for_idle && !remains) {
125 hsq->waiting_for_idle = false;
126 wake_up(&hsq->wait_queue);
127 }
128
129 /* Do not pump new request in recovery mode. */
130 if (hsq->recovery_halt) {
131 spin_unlock_irqrestore(&hsq->lock, flags);
132 return;
133 }
134
135 spin_unlock_irqrestore(&hsq->lock, flags);
136
137 /*
138 * Try to pump new request to host controller as fast as possible,
139 * after completing previous request.
140 */
141 if (remains > 0)
142 mmc_hsq_pump_requests(hsq);
143}
144
145/**
146 * mmc_hsq_finalize_request - finalize one request if the request is done
147 * @mmc: the host controller
148 * @mrq: the request need to be finalized
149 *
150 * Return true if we finalized the corresponding request in software queue,
151 * otherwise return false.
152 */
153bool mmc_hsq_finalize_request(struct mmc_host *mmc, struct mmc_request *mrq)
154{
155 struct mmc_hsq *hsq = mmc->cqe_private;
156 unsigned long flags;
157
158 spin_lock_irqsave(&hsq->lock, flags);
159
160 if (!hsq->enabled || !hsq->mrq || hsq->mrq != mrq) {
161 spin_unlock_irqrestore(&hsq->lock, flags);
162 return false;
163 }
164
165 /*
166 * Clear current completed slot request to make a room for new request.
167 */
168 hsq->slot[hsq->next_tag].mrq = NULL;
169
170 spin_unlock_irqrestore(&hsq->lock, flags);
171
172 mmc_cqe_request_done(mmc, hsq->mrq);
173
174 mmc_hsq_post_request(hsq);
175
176 return true;
177}
178EXPORT_SYMBOL_GPL(mmc_hsq_finalize_request);
179
180static void mmc_hsq_recovery_start(struct mmc_host *mmc)
181{
182 struct mmc_hsq *hsq = mmc->cqe_private;
183 unsigned long flags;
184
185 spin_lock_irqsave(&hsq->lock, flags);
186
187 hsq->recovery_halt = true;
188
189 spin_unlock_irqrestore(&hsq->lock, flags);
190}
191
192static void mmc_hsq_recovery_finish(struct mmc_host *mmc)
193{
194 struct mmc_hsq *hsq = mmc->cqe_private;
195 int remains;
196
197 spin_lock_irq(&hsq->lock);
198
199 hsq->recovery_halt = false;
200 remains = hsq->qcnt;
201
202 spin_unlock_irq(&hsq->lock);
203
204 /*
205 * Try to pump new request if there are request pending in software
206 * queue after finishing recovery.
207 */
208 if (remains > 0)
209 mmc_hsq_pump_requests(hsq);
210}
211
212static int mmc_hsq_request(struct mmc_host *mmc, struct mmc_request *mrq)
213{
214 struct mmc_hsq *hsq = mmc->cqe_private;
215 int tag = mrq->tag;
216
217 spin_lock_irq(&hsq->lock);
218
219 if (!hsq->enabled) {
220 spin_unlock_irq(&hsq->lock);
221 return -ESHUTDOWN;
222 }
223
224 /* Do not queue any new requests in recovery mode. */
225 if (hsq->recovery_halt) {
226 spin_unlock_irq(&hsq->lock);
227 return -EBUSY;
228 }
229
230 hsq->slot[tag].mrq = mrq;
231
232 /*
233 * Set the next tag as current request tag if no available
234 * next tag.
235 */
236 if (hsq->next_tag == HSQ_INVALID_TAG) {
237 hsq->next_tag = tag;
238 hsq->tail_tag = tag;
239 hsq->tag_slot[hsq->tail_tag] = HSQ_INVALID_TAG;
240 } else {
241 hsq->tag_slot[hsq->tail_tag] = tag;
242 hsq->tail_tag = tag;
243 }
244
245 hsq->qcnt++;
246
247 spin_unlock_irq(&hsq->lock);
248
249 mmc_hsq_pump_requests(hsq);
250
251 return 0;
252}
253
254static void mmc_hsq_post_req(struct mmc_host *mmc, struct mmc_request *mrq)
255{
256 if (mmc->ops->post_req)
257 mmc->ops->post_req(mmc, mrq, 0);
258}
259
260static bool mmc_hsq_queue_is_idle(struct mmc_hsq *hsq, int *ret)
261{
262 bool is_idle;
263
264 spin_lock_irq(&hsq->lock);
265
266 is_idle = (!hsq->mrq && !hsq->qcnt) ||
267 hsq->recovery_halt;
268
269 *ret = hsq->recovery_halt ? -EBUSY : 0;
270 hsq->waiting_for_idle = !is_idle;
271
272 spin_unlock_irq(&hsq->lock);
273
274 return is_idle;
275}
276
277static int mmc_hsq_wait_for_idle(struct mmc_host *mmc)
278{
279 struct mmc_hsq *hsq = mmc->cqe_private;
280 int ret;
281
282 wait_event(hsq->wait_queue,
283 mmc_hsq_queue_is_idle(hsq, &ret));
284
285 return ret;
286}
287
288static void mmc_hsq_disable(struct mmc_host *mmc)
289{
290 struct mmc_hsq *hsq = mmc->cqe_private;
291 u32 timeout = 500;
292 int ret;
293
294 spin_lock_irq(&hsq->lock);
295
296 if (!hsq->enabled) {
297 spin_unlock_irq(&hsq->lock);
298 return;
299 }
300
301 spin_unlock_irq(&hsq->lock);
302
303 ret = wait_event_timeout(hsq->wait_queue,
304 mmc_hsq_queue_is_idle(hsq, &ret),
305 msecs_to_jiffies(timeout));
306 if (ret == 0) {
307 pr_warn("could not stop mmc software queue\n");
308 return;
309 }
310
311 spin_lock_irq(&hsq->lock);
312
313 hsq->enabled = false;
314
315 spin_unlock_irq(&hsq->lock);
316}
317
318static int mmc_hsq_enable(struct mmc_host *mmc, struct mmc_card *card)
319{
320 struct mmc_hsq *hsq = mmc->cqe_private;
321
322 spin_lock_irq(&hsq->lock);
323
324 if (hsq->enabled) {
325 spin_unlock_irq(&hsq->lock);
326 return -EBUSY;
327 }
328
329 hsq->enabled = true;
330
331 spin_unlock_irq(&hsq->lock);
332
333 return 0;
334}
335
336static const struct mmc_cqe_ops mmc_hsq_ops = {
337 .cqe_enable = mmc_hsq_enable,
338 .cqe_disable = mmc_hsq_disable,
339 .cqe_request = mmc_hsq_request,
340 .cqe_post_req = mmc_hsq_post_req,
341 .cqe_wait_for_idle = mmc_hsq_wait_for_idle,
342 .cqe_recovery_start = mmc_hsq_recovery_start,
343 .cqe_recovery_finish = mmc_hsq_recovery_finish,
344};
345
346int mmc_hsq_init(struct mmc_hsq *hsq, struct mmc_host *mmc)
347{
348 int i;
349 hsq->num_slots = HSQ_NUM_SLOTS;
350 hsq->next_tag = HSQ_INVALID_TAG;
351 hsq->tail_tag = HSQ_INVALID_TAG;
352
353 hsq->slot = devm_kcalloc(mmc_dev(mmc), hsq->num_slots,
354 sizeof(struct hsq_slot), GFP_KERNEL);
355 if (!hsq->slot)
356 return -ENOMEM;
357
358 hsq->mmc = mmc;
359 hsq->mmc->cqe_private = hsq;
360 mmc->cqe_ops = &mmc_hsq_ops;
361 mmc->hsq_depth = HSQ_NORMAL_DEPTH;
362
363 for (i = 0; i < HSQ_NUM_SLOTS; i++)
364 hsq->tag_slot[i] = HSQ_INVALID_TAG;
365
366 INIT_WORK(&hsq->retry_work, mmc_hsq_retry_handler);
367 spin_lock_init(&hsq->lock);
368 init_waitqueue_head(&hsq->wait_queue);
369
370 return 0;
371}
372EXPORT_SYMBOL_GPL(mmc_hsq_init);
373
374void mmc_hsq_suspend(struct mmc_host *mmc)
375{
376 mmc_hsq_disable(mmc);
377}
378EXPORT_SYMBOL_GPL(mmc_hsq_suspend);
379
380int mmc_hsq_resume(struct mmc_host *mmc)
381{
382 return mmc_hsq_enable(mmc, NULL);
383}
384EXPORT_SYMBOL_GPL(mmc_hsq_resume);
385
386MODULE_DESCRIPTION("MMC Host Software Queue support");
387MODULE_LICENSE("GPL v2");