Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *
  4 * MMC software queue support based on command queue interfaces
  5 *
  6 * Copyright (C) 2019 Linaro, Inc.
  7 * Author: Baolin Wang <baolin.wang@linaro.org>
  8 */
  9
 10#include <linux/mmc/card.h>
 11#include <linux/mmc/host.h>
 12#include <linux/module.h>
 13
 14#include "mmc_hsq.h"
 15
 
 
 
 16static void mmc_hsq_retry_handler(struct work_struct *work)
 17{
 18	struct mmc_hsq *hsq = container_of(work, struct mmc_hsq, retry_work);
 19	struct mmc_host *mmc = hsq->mmc;
 20
 21	mmc->ops->request(mmc, hsq->mrq);
 22}
 23
 24static void mmc_hsq_modify_threshold(struct mmc_hsq *hsq)
 25{
 26	struct mmc_host *mmc = hsq->mmc;
 27	struct mmc_request *mrq;
 28	unsigned int tag, need_change = 0;
 29
 30	mmc->hsq_depth = HSQ_NORMAL_DEPTH;
 31	for (tag = 0; tag < HSQ_NUM_SLOTS; tag++) {
 32		mrq = hsq->slot[tag].mrq;
 33		if (mrq && mrq->data &&
 34		   (mrq->data->blksz * mrq->data->blocks == 4096) &&
 35		   (mrq->data->flags & MMC_DATA_WRITE) &&
 36		   (++need_change == 2)) {
 37			mmc->hsq_depth = HSQ_PERFORMANCE_DEPTH;
 38			break;
 39		}
 40	}
 41}
 42
 43static void mmc_hsq_pump_requests(struct mmc_hsq *hsq)
 44{
 45	struct mmc_host *mmc = hsq->mmc;
 46	struct hsq_slot *slot;
 47	unsigned long flags;
 48	int ret = 0;
 49
 50	spin_lock_irqsave(&hsq->lock, flags);
 51
 52	/* Make sure we are not already running a request now */
 53	if (hsq->mrq || hsq->recovery_halt) {
 54		spin_unlock_irqrestore(&hsq->lock, flags);
 55		return;
 56	}
 57
 58	/* Make sure there are remain requests need to pump */
 59	if (!hsq->qcnt || !hsq->enabled) {
 60		spin_unlock_irqrestore(&hsq->lock, flags);
 61		return;
 62	}
 63
 64	mmc_hsq_modify_threshold(hsq);
 65
 66	slot = &hsq->slot[hsq->next_tag];
 67	hsq->mrq = slot->mrq;
 68	hsq->qcnt--;
 69
 70	spin_unlock_irqrestore(&hsq->lock, flags);
 71
 72	if (mmc->ops->request_atomic)
 73		ret = mmc->ops->request_atomic(mmc, hsq->mrq);
 74	else
 75		mmc->ops->request(mmc, hsq->mrq);
 76
 77	/*
 78	 * If returning BUSY from request_atomic(), which means the card
 79	 * may be busy now, and we should change to non-atomic context to
 80	 * try again for this unusual case, to avoid time-consuming operations
 81	 * in the atomic context.
 82	 *
 83	 * Note: we just give a warning for other error cases, since the host
 84	 * driver will handle them.
 85	 */
 86	if (ret == -EBUSY)
 87		schedule_work(&hsq->retry_work);
 88	else
 89		WARN_ON_ONCE(ret);
 90}
 91
 92static void mmc_hsq_update_next_tag(struct mmc_hsq *hsq, int remains)
 93{
 
 94	int tag;
 95
 96	/*
 97	 * If there are no remain requests in software queue, then set a invalid
 98	 * tag.
 99	 */
100	if (!remains) {
101		hsq->next_tag = HSQ_INVALID_TAG;
102		hsq->tail_tag = HSQ_INVALID_TAG;
103		return;
104	}
105
106	tag = hsq->tag_slot[hsq->next_tag];
107	hsq->tag_slot[hsq->next_tag] = HSQ_INVALID_TAG;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
108	hsq->next_tag = tag;
109}
110
111static void mmc_hsq_post_request(struct mmc_hsq *hsq)
112{
113	unsigned long flags;
114	int remains;
115
116	spin_lock_irqsave(&hsq->lock, flags);
117
118	remains = hsq->qcnt;
119	hsq->mrq = NULL;
120
121	/* Update the next available tag to be queued. */
122	mmc_hsq_update_next_tag(hsq, remains);
123
124	if (hsq->waiting_for_idle && !remains) {
125		hsq->waiting_for_idle = false;
126		wake_up(&hsq->wait_queue);
127	}
128
129	/* Do not pump new request in recovery mode. */
130	if (hsq->recovery_halt) {
131		spin_unlock_irqrestore(&hsq->lock, flags);
132		return;
133	}
134
135	spin_unlock_irqrestore(&hsq->lock, flags);
136
137	 /*
138	  * Try to pump new request to host controller as fast as possible,
139	  * after completing previous request.
140	  */
141	if (remains > 0)
142		mmc_hsq_pump_requests(hsq);
143}
144
145/**
146 * mmc_hsq_finalize_request - finalize one request if the request is done
147 * @mmc: the host controller
148 * @mrq: the request need to be finalized
149 *
150 * Return true if we finalized the corresponding request in software queue,
151 * otherwise return false.
152 */
153bool mmc_hsq_finalize_request(struct mmc_host *mmc, struct mmc_request *mrq)
154{
155	struct mmc_hsq *hsq = mmc->cqe_private;
156	unsigned long flags;
157
158	spin_lock_irqsave(&hsq->lock, flags);
159
160	if (!hsq->enabled || !hsq->mrq || hsq->mrq != mrq) {
161		spin_unlock_irqrestore(&hsq->lock, flags);
162		return false;
163	}
164
165	/*
166	 * Clear current completed slot request to make a room for new request.
167	 */
168	hsq->slot[hsq->next_tag].mrq = NULL;
169
170	spin_unlock_irqrestore(&hsq->lock, flags);
171
172	mmc_cqe_request_done(mmc, hsq->mrq);
173
174	mmc_hsq_post_request(hsq);
175
176	return true;
177}
178EXPORT_SYMBOL_GPL(mmc_hsq_finalize_request);
179
180static void mmc_hsq_recovery_start(struct mmc_host *mmc)
181{
182	struct mmc_hsq *hsq = mmc->cqe_private;
183	unsigned long flags;
184
185	spin_lock_irqsave(&hsq->lock, flags);
186
187	hsq->recovery_halt = true;
188
189	spin_unlock_irqrestore(&hsq->lock, flags);
190}
191
192static void mmc_hsq_recovery_finish(struct mmc_host *mmc)
193{
194	struct mmc_hsq *hsq = mmc->cqe_private;
195	int remains;
196
197	spin_lock_irq(&hsq->lock);
198
199	hsq->recovery_halt = false;
200	remains = hsq->qcnt;
201
202	spin_unlock_irq(&hsq->lock);
203
204	/*
205	 * Try to pump new request if there are request pending in software
206	 * queue after finishing recovery.
207	 */
208	if (remains > 0)
209		mmc_hsq_pump_requests(hsq);
210}
211
212static int mmc_hsq_request(struct mmc_host *mmc, struct mmc_request *mrq)
213{
214	struct mmc_hsq *hsq = mmc->cqe_private;
215	int tag = mrq->tag;
216
217	spin_lock_irq(&hsq->lock);
218
219	if (!hsq->enabled) {
220		spin_unlock_irq(&hsq->lock);
221		return -ESHUTDOWN;
222	}
223
224	/* Do not queue any new requests in recovery mode. */
225	if (hsq->recovery_halt) {
226		spin_unlock_irq(&hsq->lock);
227		return -EBUSY;
228	}
229
230	hsq->slot[tag].mrq = mrq;
231
232	/*
233	 * Set the next tag as current request tag if no available
234	 * next tag.
235	 */
236	if (hsq->next_tag == HSQ_INVALID_TAG) {
237		hsq->next_tag = tag;
238		hsq->tail_tag = tag;
239		hsq->tag_slot[hsq->tail_tag] = HSQ_INVALID_TAG;
240	} else {
241		hsq->tag_slot[hsq->tail_tag] = tag;
242		hsq->tail_tag = tag;
243	}
244
245	hsq->qcnt++;
246
247	spin_unlock_irq(&hsq->lock);
248
249	mmc_hsq_pump_requests(hsq);
250
251	return 0;
252}
253
254static void mmc_hsq_post_req(struct mmc_host *mmc, struct mmc_request *mrq)
255{
256	if (mmc->ops->post_req)
257		mmc->ops->post_req(mmc, mrq, 0);
258}
259
260static bool mmc_hsq_queue_is_idle(struct mmc_hsq *hsq, int *ret)
261{
262	bool is_idle;
263
264	spin_lock_irq(&hsq->lock);
265
266	is_idle = (!hsq->mrq && !hsq->qcnt) ||
267		hsq->recovery_halt;
268
269	*ret = hsq->recovery_halt ? -EBUSY : 0;
270	hsq->waiting_for_idle = !is_idle;
271
272	spin_unlock_irq(&hsq->lock);
273
274	return is_idle;
275}
276
277static int mmc_hsq_wait_for_idle(struct mmc_host *mmc)
278{
279	struct mmc_hsq *hsq = mmc->cqe_private;
280	int ret;
281
282	wait_event(hsq->wait_queue,
283		   mmc_hsq_queue_is_idle(hsq, &ret));
284
285	return ret;
286}
287
288static void mmc_hsq_disable(struct mmc_host *mmc)
289{
290	struct mmc_hsq *hsq = mmc->cqe_private;
291	u32 timeout = 500;
292	int ret;
293
294	spin_lock_irq(&hsq->lock);
295
296	if (!hsq->enabled) {
297		spin_unlock_irq(&hsq->lock);
298		return;
299	}
300
301	spin_unlock_irq(&hsq->lock);
302
303	ret = wait_event_timeout(hsq->wait_queue,
304				 mmc_hsq_queue_is_idle(hsq, &ret),
305				 msecs_to_jiffies(timeout));
306	if (ret == 0) {
307		pr_warn("could not stop mmc software queue\n");
308		return;
309	}
310
311	spin_lock_irq(&hsq->lock);
312
313	hsq->enabled = false;
314
315	spin_unlock_irq(&hsq->lock);
316}
317
318static int mmc_hsq_enable(struct mmc_host *mmc, struct mmc_card *card)
319{
320	struct mmc_hsq *hsq = mmc->cqe_private;
321
322	spin_lock_irq(&hsq->lock);
323
324	if (hsq->enabled) {
325		spin_unlock_irq(&hsq->lock);
326		return -EBUSY;
327	}
328
329	hsq->enabled = true;
330
331	spin_unlock_irq(&hsq->lock);
332
333	return 0;
334}
335
336static const struct mmc_cqe_ops mmc_hsq_ops = {
337	.cqe_enable = mmc_hsq_enable,
338	.cqe_disable = mmc_hsq_disable,
339	.cqe_request = mmc_hsq_request,
340	.cqe_post_req = mmc_hsq_post_req,
341	.cqe_wait_for_idle = mmc_hsq_wait_for_idle,
342	.cqe_recovery_start = mmc_hsq_recovery_start,
343	.cqe_recovery_finish = mmc_hsq_recovery_finish,
344};
345
346int mmc_hsq_init(struct mmc_hsq *hsq, struct mmc_host *mmc)
347{
348	int i;
349	hsq->num_slots = HSQ_NUM_SLOTS;
350	hsq->next_tag = HSQ_INVALID_TAG;
351	hsq->tail_tag = HSQ_INVALID_TAG;
352
353	hsq->slot = devm_kcalloc(mmc_dev(mmc), hsq->num_slots,
354				 sizeof(struct hsq_slot), GFP_KERNEL);
355	if (!hsq->slot)
356		return -ENOMEM;
357
358	hsq->mmc = mmc;
359	hsq->mmc->cqe_private = hsq;
360	mmc->cqe_ops = &mmc_hsq_ops;
361	mmc->hsq_depth = HSQ_NORMAL_DEPTH;
362
363	for (i = 0; i < HSQ_NUM_SLOTS; i++)
364		hsq->tag_slot[i] = HSQ_INVALID_TAG;
365
366	INIT_WORK(&hsq->retry_work, mmc_hsq_retry_handler);
367	spin_lock_init(&hsq->lock);
368	init_waitqueue_head(&hsq->wait_queue);
369
370	return 0;
371}
372EXPORT_SYMBOL_GPL(mmc_hsq_init);
373
374void mmc_hsq_suspend(struct mmc_host *mmc)
375{
376	mmc_hsq_disable(mmc);
377}
378EXPORT_SYMBOL_GPL(mmc_hsq_suspend);
379
380int mmc_hsq_resume(struct mmc_host *mmc)
381{
382	return mmc_hsq_enable(mmc, NULL);
383}
384EXPORT_SYMBOL_GPL(mmc_hsq_resume);
385
386MODULE_DESCRIPTION("MMC Host Software Queue support");
387MODULE_LICENSE("GPL v2");
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *
  4 * MMC software queue support based on command queue interfaces
  5 *
  6 * Copyright (C) 2019 Linaro, Inc.
  7 * Author: Baolin Wang <baolin.wang@linaro.org>
  8 */
  9
 10#include <linux/mmc/card.h>
 11#include <linux/mmc/host.h>
 12#include <linux/module.h>
 13
 14#include "mmc_hsq.h"
 15
 16#define HSQ_NUM_SLOTS	64
 17#define HSQ_INVALID_TAG	HSQ_NUM_SLOTS
 18
 19static void mmc_hsq_retry_handler(struct work_struct *work)
 20{
 21	struct mmc_hsq *hsq = container_of(work, struct mmc_hsq, retry_work);
 22	struct mmc_host *mmc = hsq->mmc;
 23
 24	mmc->ops->request(mmc, hsq->mrq);
 25}
 26
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 27static void mmc_hsq_pump_requests(struct mmc_hsq *hsq)
 28{
 29	struct mmc_host *mmc = hsq->mmc;
 30	struct hsq_slot *slot;
 31	unsigned long flags;
 32	int ret = 0;
 33
 34	spin_lock_irqsave(&hsq->lock, flags);
 35
 36	/* Make sure we are not already running a request now */
 37	if (hsq->mrq) {
 38		spin_unlock_irqrestore(&hsq->lock, flags);
 39		return;
 40	}
 41
 42	/* Make sure there are remain requests need to pump */
 43	if (!hsq->qcnt || !hsq->enabled) {
 44		spin_unlock_irqrestore(&hsq->lock, flags);
 45		return;
 46	}
 47
 
 
 48	slot = &hsq->slot[hsq->next_tag];
 49	hsq->mrq = slot->mrq;
 50	hsq->qcnt--;
 51
 52	spin_unlock_irqrestore(&hsq->lock, flags);
 53
 54	if (mmc->ops->request_atomic)
 55		ret = mmc->ops->request_atomic(mmc, hsq->mrq);
 56	else
 57		mmc->ops->request(mmc, hsq->mrq);
 58
 59	/*
 60	 * If returning BUSY from request_atomic(), which means the card
 61	 * may be busy now, and we should change to non-atomic context to
 62	 * try again for this unusual case, to avoid time-consuming operations
 63	 * in the atomic context.
 64	 *
 65	 * Note: we just give a warning for other error cases, since the host
 66	 * driver will handle them.
 67	 */
 68	if (ret == -EBUSY)
 69		schedule_work(&hsq->retry_work);
 70	else
 71		WARN_ON_ONCE(ret);
 72}
 73
 74static void mmc_hsq_update_next_tag(struct mmc_hsq *hsq, int remains)
 75{
 76	struct hsq_slot *slot;
 77	int tag;
 78
 79	/*
 80	 * If there are no remain requests in software queue, then set a invalid
 81	 * tag.
 82	 */
 83	if (!remains) {
 84		hsq->next_tag = HSQ_INVALID_TAG;
 
 85		return;
 86	}
 87
 88	/*
 89	 * Increasing the next tag and check if the corresponding request is
 90	 * available, if yes, then we found a candidate request.
 91	 */
 92	if (++hsq->next_tag != HSQ_INVALID_TAG) {
 93		slot = &hsq->slot[hsq->next_tag];
 94		if (slot->mrq)
 95			return;
 96	}
 97
 98	/* Othersie we should iterate all slots to find a available tag. */
 99	for (tag = 0; tag < HSQ_NUM_SLOTS; tag++) {
100		slot = &hsq->slot[tag];
101		if (slot->mrq)
102			break;
103	}
104
105	if (tag == HSQ_NUM_SLOTS)
106		tag = HSQ_INVALID_TAG;
107
108	hsq->next_tag = tag;
109}
110
111static void mmc_hsq_post_request(struct mmc_hsq *hsq)
112{
113	unsigned long flags;
114	int remains;
115
116	spin_lock_irqsave(&hsq->lock, flags);
117
118	remains = hsq->qcnt;
119	hsq->mrq = NULL;
120
121	/* Update the next available tag to be queued. */
122	mmc_hsq_update_next_tag(hsq, remains);
123
124	if (hsq->waiting_for_idle && !remains) {
125		hsq->waiting_for_idle = false;
126		wake_up(&hsq->wait_queue);
127	}
128
129	/* Do not pump new request in recovery mode. */
130	if (hsq->recovery_halt) {
131		spin_unlock_irqrestore(&hsq->lock, flags);
132		return;
133	}
134
135	spin_unlock_irqrestore(&hsq->lock, flags);
136
137	 /*
138	  * Try to pump new request to host controller as fast as possible,
139	  * after completing previous request.
140	  */
141	if (remains > 0)
142		mmc_hsq_pump_requests(hsq);
143}
144
145/**
146 * mmc_hsq_finalize_request - finalize one request if the request is done
147 * @mmc: the host controller
148 * @mrq: the request need to be finalized
149 *
150 * Return true if we finalized the corresponding request in software queue,
151 * otherwise return false.
152 */
153bool mmc_hsq_finalize_request(struct mmc_host *mmc, struct mmc_request *mrq)
154{
155	struct mmc_hsq *hsq = mmc->cqe_private;
156	unsigned long flags;
157
158	spin_lock_irqsave(&hsq->lock, flags);
159
160	if (!hsq->enabled || !hsq->mrq || hsq->mrq != mrq) {
161		spin_unlock_irqrestore(&hsq->lock, flags);
162		return false;
163	}
164
165	/*
166	 * Clear current completed slot request to make a room for new request.
167	 */
168	hsq->slot[hsq->next_tag].mrq = NULL;
169
170	spin_unlock_irqrestore(&hsq->lock, flags);
171
172	mmc_cqe_request_done(mmc, hsq->mrq);
173
174	mmc_hsq_post_request(hsq);
175
176	return true;
177}
178EXPORT_SYMBOL_GPL(mmc_hsq_finalize_request);
179
180static void mmc_hsq_recovery_start(struct mmc_host *mmc)
181{
182	struct mmc_hsq *hsq = mmc->cqe_private;
183	unsigned long flags;
184
185	spin_lock_irqsave(&hsq->lock, flags);
186
187	hsq->recovery_halt = true;
188
189	spin_unlock_irqrestore(&hsq->lock, flags);
190}
191
192static void mmc_hsq_recovery_finish(struct mmc_host *mmc)
193{
194	struct mmc_hsq *hsq = mmc->cqe_private;
195	int remains;
196
197	spin_lock_irq(&hsq->lock);
198
199	hsq->recovery_halt = false;
200	remains = hsq->qcnt;
201
202	spin_unlock_irq(&hsq->lock);
203
204	/*
205	 * Try to pump new request if there are request pending in software
206	 * queue after finishing recovery.
207	 */
208	if (remains > 0)
209		mmc_hsq_pump_requests(hsq);
210}
211
212static int mmc_hsq_request(struct mmc_host *mmc, struct mmc_request *mrq)
213{
214	struct mmc_hsq *hsq = mmc->cqe_private;
215	int tag = mrq->tag;
216
217	spin_lock_irq(&hsq->lock);
218
219	if (!hsq->enabled) {
220		spin_unlock_irq(&hsq->lock);
221		return -ESHUTDOWN;
222	}
223
224	/* Do not queue any new requests in recovery mode. */
225	if (hsq->recovery_halt) {
226		spin_unlock_irq(&hsq->lock);
227		return -EBUSY;
228	}
229
230	hsq->slot[tag].mrq = mrq;
231
232	/*
233	 * Set the next tag as current request tag if no available
234	 * next tag.
235	 */
236	if (hsq->next_tag == HSQ_INVALID_TAG)
237		hsq->next_tag = tag;
 
 
 
 
 
 
238
239	hsq->qcnt++;
240
241	spin_unlock_irq(&hsq->lock);
242
243	mmc_hsq_pump_requests(hsq);
244
245	return 0;
246}
247
248static void mmc_hsq_post_req(struct mmc_host *mmc, struct mmc_request *mrq)
249{
250	if (mmc->ops->post_req)
251		mmc->ops->post_req(mmc, mrq, 0);
252}
253
254static bool mmc_hsq_queue_is_idle(struct mmc_hsq *hsq, int *ret)
255{
256	bool is_idle;
257
258	spin_lock_irq(&hsq->lock);
259
260	is_idle = (!hsq->mrq && !hsq->qcnt) ||
261		hsq->recovery_halt;
262
263	*ret = hsq->recovery_halt ? -EBUSY : 0;
264	hsq->waiting_for_idle = !is_idle;
265
266	spin_unlock_irq(&hsq->lock);
267
268	return is_idle;
269}
270
271static int mmc_hsq_wait_for_idle(struct mmc_host *mmc)
272{
273	struct mmc_hsq *hsq = mmc->cqe_private;
274	int ret;
275
276	wait_event(hsq->wait_queue,
277		   mmc_hsq_queue_is_idle(hsq, &ret));
278
279	return ret;
280}
281
282static void mmc_hsq_disable(struct mmc_host *mmc)
283{
284	struct mmc_hsq *hsq = mmc->cqe_private;
285	u32 timeout = 500;
286	int ret;
287
288	spin_lock_irq(&hsq->lock);
289
290	if (!hsq->enabled) {
291		spin_unlock_irq(&hsq->lock);
292		return;
293	}
294
295	spin_unlock_irq(&hsq->lock);
296
297	ret = wait_event_timeout(hsq->wait_queue,
298				 mmc_hsq_queue_is_idle(hsq, &ret),
299				 msecs_to_jiffies(timeout));
300	if (ret == 0) {
301		pr_warn("could not stop mmc software queue\n");
302		return;
303	}
304
305	spin_lock_irq(&hsq->lock);
306
307	hsq->enabled = false;
308
309	spin_unlock_irq(&hsq->lock);
310}
311
312static int mmc_hsq_enable(struct mmc_host *mmc, struct mmc_card *card)
313{
314	struct mmc_hsq *hsq = mmc->cqe_private;
315
316	spin_lock_irq(&hsq->lock);
317
318	if (hsq->enabled) {
319		spin_unlock_irq(&hsq->lock);
320		return -EBUSY;
321	}
322
323	hsq->enabled = true;
324
325	spin_unlock_irq(&hsq->lock);
326
327	return 0;
328}
329
330static const struct mmc_cqe_ops mmc_hsq_ops = {
331	.cqe_enable = mmc_hsq_enable,
332	.cqe_disable = mmc_hsq_disable,
333	.cqe_request = mmc_hsq_request,
334	.cqe_post_req = mmc_hsq_post_req,
335	.cqe_wait_for_idle = mmc_hsq_wait_for_idle,
336	.cqe_recovery_start = mmc_hsq_recovery_start,
337	.cqe_recovery_finish = mmc_hsq_recovery_finish,
338};
339
340int mmc_hsq_init(struct mmc_hsq *hsq, struct mmc_host *mmc)
341{
 
342	hsq->num_slots = HSQ_NUM_SLOTS;
343	hsq->next_tag = HSQ_INVALID_TAG;
 
344
345	hsq->slot = devm_kcalloc(mmc_dev(mmc), hsq->num_slots,
346				 sizeof(struct hsq_slot), GFP_KERNEL);
347	if (!hsq->slot)
348		return -ENOMEM;
349
350	hsq->mmc = mmc;
351	hsq->mmc->cqe_private = hsq;
352	mmc->cqe_ops = &mmc_hsq_ops;
 
 
 
 
353
354	INIT_WORK(&hsq->retry_work, mmc_hsq_retry_handler);
355	spin_lock_init(&hsq->lock);
356	init_waitqueue_head(&hsq->wait_queue);
357
358	return 0;
359}
360EXPORT_SYMBOL_GPL(mmc_hsq_init);
361
362void mmc_hsq_suspend(struct mmc_host *mmc)
363{
364	mmc_hsq_disable(mmc);
365}
366EXPORT_SYMBOL_GPL(mmc_hsq_suspend);
367
368int mmc_hsq_resume(struct mmc_host *mmc)
369{
370	return mmc_hsq_enable(mmc, NULL);
371}
372EXPORT_SYMBOL_GPL(mmc_hsq_resume);
373
374MODULE_DESCRIPTION("MMC Host Software Queue support");
375MODULE_LICENSE("GPL v2");