Linux Audio

Check our new training course

Loading...
v3.5.6
  1/*
  2 *  linux/drivers/mmc/card/queue.c
  3 *
  4 *  Copyright (C) 2003 Russell King, All Rights Reserved.
  5 *  Copyright 2006-2007 Pierre Ossman
  6 *
  7 * This program is free software; you can redistribute it and/or modify
  8 * it under the terms of the GNU General Public License version 2 as
  9 * published by the Free Software Foundation.
 10 *
 11 */
 12#include <linux/slab.h>
 13#include <linux/module.h>
 14#include <linux/blkdev.h>
 15#include <linux/freezer.h>
 16#include <linux/kthread.h>
 17#include <linux/scatterlist.h>
 
 18
 19#include <linux/mmc/card.h>
 20#include <linux/mmc/host.h>
 21#include "queue.h"
 22
 23#define MMC_QUEUE_BOUNCESZ	65536
 24
 25#define MMC_QUEUE_SUSPENDED	(1 << 0)
 26
 27/*
 28 * Prepare a MMC request. This just filters out odd stuff.
 29 */
 30static int mmc_prep_request(struct request_queue *q, struct request *req)
 31{
 32	struct mmc_queue *mq = q->queuedata;
 33
 34	/*
 35	 * We only like normal block requests and discards.
 36	 */
 37	if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) {
 38		blk_dump_rq_flags(req, "MMC bad request");
 39		return BLKPREP_KILL;
 40	}
 41
 42	if (mq && mmc_card_removed(mq->card))
 43		return BLKPREP_KILL;
 44
 45	req->cmd_flags |= REQ_DONTPREP;
 46
 47	return BLKPREP_OK;
 48}
 49
 50static int mmc_queue_thread(void *d)
 51{
 52	struct mmc_queue *mq = d;
 53	struct request_queue *q = mq->queue;
 54
 55	current->flags |= PF_MEMALLOC;
 56
 57	down(&mq->thread_sem);
 58	do {
 59		struct request *req = NULL;
 60		struct mmc_queue_req *tmp;
 61
 62		spin_lock_irq(q->queue_lock);
 63		set_current_state(TASK_INTERRUPTIBLE);
 64		req = blk_fetch_request(q);
 65		mq->mqrq_cur->req = req;
 66		spin_unlock_irq(q->queue_lock);
 67
 68		if (req || mq->mqrq_prev->req) {
 69			set_current_state(TASK_RUNNING);
 
 70			mq->issue_fn(mq, req);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 71		} else {
 72			if (kthread_should_stop()) {
 73				set_current_state(TASK_RUNNING);
 74				break;
 75			}
 76			up(&mq->thread_sem);
 77			schedule();
 78			down(&mq->thread_sem);
 79		}
 80
 81		/* Current request becomes previous request and vice versa. */
 82		mq->mqrq_prev->brq.mrq.data = NULL;
 83		mq->mqrq_prev->req = NULL;
 84		tmp = mq->mqrq_prev;
 85		mq->mqrq_prev = mq->mqrq_cur;
 86		mq->mqrq_cur = tmp;
 87	} while (1);
 88	up(&mq->thread_sem);
 89
 90	return 0;
 91}
 92
 93/*
 94 * Generic MMC request handler.  This is called for any queue on a
 95 * particular host.  When the host is not busy, we look for a request
 96 * on any queue on this host, and attempt to issue it.  This may
 97 * not be the queue we were asked to process.
 98 */
 99static void mmc_request_fn(struct request_queue *q)
100{
101	struct mmc_queue *mq = q->queuedata;
102	struct request *req;
 
 
103
104	if (!mq) {
105		while ((req = blk_fetch_request(q)) != NULL) {
106			req->cmd_flags |= REQ_QUIET;
107			__blk_end_request_all(req, -EIO);
108		}
109		return;
110	}
111
112	if (!mq->mqrq_cur->req && !mq->mqrq_prev->req)
 
 
 
 
 
 
 
 
 
 
 
 
 
113		wake_up_process(mq->thread);
114}
115
116static struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
117{
118	struct scatterlist *sg;
119
120	sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL);
121	if (!sg)
122		*err = -ENOMEM;
123	else {
124		*err = 0;
125		sg_init_table(sg, sg_len);
126	}
127
128	return sg;
129}
130
131static void mmc_queue_setup_discard(struct request_queue *q,
132				    struct mmc_card *card)
133{
134	unsigned max_discard;
135
136	max_discard = mmc_calc_max_discard(card);
137	if (!max_discard)
138		return;
139
140	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
141	q->limits.max_discard_sectors = max_discard;
142	if (card->erased_byte == 0 && !mmc_can_discard(card))
143		q->limits.discard_zeroes_data = 1;
144	q->limits.discard_granularity = card->pref_erase << 9;
145	/* granularity must not be greater than max. discard */
146	if (card->pref_erase > max_discard)
147		q->limits.discard_granularity = 0;
148	if (mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))
149		queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q);
150}
151
152/**
153 * mmc_init_queue - initialise a queue structure.
154 * @mq: mmc queue
155 * @card: mmc card to attach this queue
156 * @lock: queue lock
157 * @subname: partition subname
158 *
159 * Initialise a MMC card request queue.
160 */
161int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
162		   spinlock_t *lock, const char *subname)
163{
164	struct mmc_host *host = card->host;
165	u64 limit = BLK_BOUNCE_HIGH;
166	int ret;
167	struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
168	struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
169
170	if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
171		limit = *mmc_dev(host)->dma_mask;
172
173	mq->card = card;
174	mq->queue = blk_init_queue(mmc_request_fn, lock);
175	if (!mq->queue)
176		return -ENOMEM;
177
178	mq->mqrq_cur = mqrq_cur;
179	mq->mqrq_prev = mqrq_prev;
180	mq->queue->queuedata = mq;
181
182	blk_queue_prep_rq(mq->queue, mmc_prep_request);
183	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
 
184	if (mmc_can_erase(card))
185		mmc_queue_setup_discard(mq->queue, card);
186
187#ifdef CONFIG_MMC_BLOCK_BOUNCE
188	if (host->max_segs == 1) {
189		unsigned int bouncesz;
190
191		bouncesz = MMC_QUEUE_BOUNCESZ;
192
193		if (bouncesz > host->max_req_size)
194			bouncesz = host->max_req_size;
195		if (bouncesz > host->max_seg_size)
196			bouncesz = host->max_seg_size;
197		if (bouncesz > (host->max_blk_count * 512))
198			bouncesz = host->max_blk_count * 512;
199
200		if (bouncesz > 512) {
201			mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
202			if (!mqrq_cur->bounce_buf) {
203				pr_warning("%s: unable to "
204					"allocate bounce cur buffer\n",
205					mmc_card_name(card));
206			}
207			mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
208			if (!mqrq_prev->bounce_buf) {
209				pr_warning("%s: unable to "
210					"allocate bounce prev buffer\n",
211					mmc_card_name(card));
212				kfree(mqrq_cur->bounce_buf);
213				mqrq_cur->bounce_buf = NULL;
 
 
 
 
 
 
 
214			}
215		}
216
217		if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) {
218			blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
219			blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
220			blk_queue_max_segments(mq->queue, bouncesz / 512);
221			blk_queue_max_segment_size(mq->queue, bouncesz);
222
223			mqrq_cur->sg = mmc_alloc_sg(1, &ret);
224			if (ret)
225				goto cleanup_queue;
226
227			mqrq_cur->bounce_sg =
228				mmc_alloc_sg(bouncesz / 512, &ret);
229			if (ret)
230				goto cleanup_queue;
231
232			mqrq_prev->sg = mmc_alloc_sg(1, &ret);
233			if (ret)
234				goto cleanup_queue;
235
236			mqrq_prev->bounce_sg =
237				mmc_alloc_sg(bouncesz / 512, &ret);
238			if (ret)
239				goto cleanup_queue;
240		}
241	}
242#endif
243
244	if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) {
245		blk_queue_bounce_limit(mq->queue, limit);
246		blk_queue_max_hw_sectors(mq->queue,
247			min(host->max_blk_count, host->max_req_size / 512));
248		blk_queue_max_segments(mq->queue, host->max_segs);
249		blk_queue_max_segment_size(mq->queue, host->max_seg_size);
250
251		mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret);
252		if (ret)
253			goto cleanup_queue;
254
255
256		mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret);
257		if (ret)
258			goto cleanup_queue;
259	}
260
261	sema_init(&mq->thread_sem, 1);
262
263	mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
264		host->index, subname ? subname : "");
265
266	if (IS_ERR(mq->thread)) {
267		ret = PTR_ERR(mq->thread);
268		goto free_bounce_sg;
269	}
270
271	return 0;
272 free_bounce_sg:
273	kfree(mqrq_cur->bounce_sg);
274	mqrq_cur->bounce_sg = NULL;
275	kfree(mqrq_prev->bounce_sg);
276	mqrq_prev->bounce_sg = NULL;
277
278 cleanup_queue:
279	kfree(mqrq_cur->sg);
280	mqrq_cur->sg = NULL;
281	kfree(mqrq_cur->bounce_buf);
282	mqrq_cur->bounce_buf = NULL;
283
284	kfree(mqrq_prev->sg);
285	mqrq_prev->sg = NULL;
286	kfree(mqrq_prev->bounce_buf);
287	mqrq_prev->bounce_buf = NULL;
288
289	blk_cleanup_queue(mq->queue);
290	return ret;
291}
292
293void mmc_cleanup_queue(struct mmc_queue *mq)
294{
295	struct request_queue *q = mq->queue;
296	unsigned long flags;
297	struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
298	struct mmc_queue_req *mqrq_prev = mq->mqrq_prev;
299
300	/* Make sure the queue isn't suspended, as that will deadlock */
301	mmc_queue_resume(mq);
302
303	/* Then terminate our worker thread */
304	kthread_stop(mq->thread);
305
306	/* Empty the queue */
307	spin_lock_irqsave(q->queue_lock, flags);
308	q->queuedata = NULL;
309	blk_start_queue(q);
310	spin_unlock_irqrestore(q->queue_lock, flags);
311
312	kfree(mqrq_cur->bounce_sg);
313	mqrq_cur->bounce_sg = NULL;
314
315	kfree(mqrq_cur->sg);
316	mqrq_cur->sg = NULL;
317
318	kfree(mqrq_cur->bounce_buf);
319	mqrq_cur->bounce_buf = NULL;
320
321	kfree(mqrq_prev->bounce_sg);
322	mqrq_prev->bounce_sg = NULL;
323
324	kfree(mqrq_prev->sg);
325	mqrq_prev->sg = NULL;
326
327	kfree(mqrq_prev->bounce_buf);
328	mqrq_prev->bounce_buf = NULL;
329
330	mq->card = NULL;
331}
332EXPORT_SYMBOL(mmc_cleanup_queue);
333
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
334/**
335 * mmc_queue_suspend - suspend a MMC request queue
336 * @mq: MMC queue to suspend
337 *
338 * Stop the block request queue, and wait for our thread to
339 * complete any outstanding requests.  This ensures that we
340 * won't suspend while a request is being processed.
341 */
342void mmc_queue_suspend(struct mmc_queue *mq)
343{
344	struct request_queue *q = mq->queue;
345	unsigned long flags;
346
347	if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
348		mq->flags |= MMC_QUEUE_SUSPENDED;
349
350		spin_lock_irqsave(q->queue_lock, flags);
351		blk_stop_queue(q);
352		spin_unlock_irqrestore(q->queue_lock, flags);
353
354		down(&mq->thread_sem);
355	}
356}
357
358/**
359 * mmc_queue_resume - resume a previously suspended MMC request queue
360 * @mq: MMC queue to resume
361 */
362void mmc_queue_resume(struct mmc_queue *mq)
363{
364	struct request_queue *q = mq->queue;
365	unsigned long flags;
366
367	if (mq->flags & MMC_QUEUE_SUSPENDED) {
368		mq->flags &= ~MMC_QUEUE_SUSPENDED;
369
370		up(&mq->thread_sem);
371
372		spin_lock_irqsave(q->queue_lock, flags);
373		blk_start_queue(q);
374		spin_unlock_irqrestore(q->queue_lock, flags);
375	}
376}
377
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
378/*
379 * Prepare the sg list(s) to be handed of to the host driver
380 */
381unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
382{
383	unsigned int sg_len;
384	size_t buflen;
385	struct scatterlist *sg;
 
386	int i;
387
388	if (!mqrq->bounce_buf)
389		return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
 
 
 
 
 
 
 
390
391	BUG_ON(!mqrq->bounce_sg);
392
393	sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
 
 
 
 
394
395	mqrq->bounce_sg_len = sg_len;
396
397	buflen = 0;
398	for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
399		buflen += sg->length;
400
401	sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
402
403	return 1;
404}
405
406/*
407 * If writing, bounce the data to the buffer before the request
408 * is sent to the host driver
409 */
410void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
411{
412	if (!mqrq->bounce_buf)
413		return;
414
415	if (rq_data_dir(mqrq->req) != WRITE)
416		return;
417
418	sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
419		mqrq->bounce_buf, mqrq->sg[0].length);
420}
421
422/*
423 * If reading, bounce the data from the buffer after the request
424 * has been handled by the host driver
425 */
426void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
427{
428	if (!mqrq->bounce_buf)
429		return;
430
431	if (rq_data_dir(mqrq->req) != READ)
432		return;
433
434	sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
435		mqrq->bounce_buf, mqrq->sg[0].length);
436}
v4.6
  1/*
  2 *  linux/drivers/mmc/card/queue.c
  3 *
  4 *  Copyright (C) 2003 Russell King, All Rights Reserved.
  5 *  Copyright 2006-2007 Pierre Ossman
  6 *
  7 * This program is free software; you can redistribute it and/or modify
  8 * it under the terms of the GNU General Public License version 2 as
  9 * published by the Free Software Foundation.
 10 *
 11 */
 12#include <linux/slab.h>
 13#include <linux/module.h>
 14#include <linux/blkdev.h>
 15#include <linux/freezer.h>
 16#include <linux/kthread.h>
 17#include <linux/scatterlist.h>
 18#include <linux/dma-mapping.h>
 19
 20#include <linux/mmc/card.h>
 21#include <linux/mmc/host.h>
 22#include "queue.h"
 23
 24#define MMC_QUEUE_BOUNCESZ	65536
 25
 
 
 26/*
 27 * Prepare a MMC request. This just filters out odd stuff.
 28 */
 29static int mmc_prep_request(struct request_queue *q, struct request *req)
 30{
 31	struct mmc_queue *mq = q->queuedata;
 32
 33	/*
 34	 * We only like normal block requests and discards.
 35	 */
 36	if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) {
 37		blk_dump_rq_flags(req, "MMC bad request");
 38		return BLKPREP_KILL;
 39	}
 40
 41	if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq)))
 42		return BLKPREP_KILL;
 43
 44	req->cmd_flags |= REQ_DONTPREP;
 45
 46	return BLKPREP_OK;
 47}
 48
 49static int mmc_queue_thread(void *d)
 50{
 51	struct mmc_queue *mq = d;
 52	struct request_queue *q = mq->queue;
 53
 54	current->flags |= PF_MEMALLOC;
 55
 56	down(&mq->thread_sem);
 57	do {
 58		struct request *req = NULL;
 59		unsigned int cmd_flags = 0;
 60
 61		spin_lock_irq(q->queue_lock);
 62		set_current_state(TASK_INTERRUPTIBLE);
 63		req = blk_fetch_request(q);
 64		mq->mqrq_cur->req = req;
 65		spin_unlock_irq(q->queue_lock);
 66
 67		if (req || mq->mqrq_prev->req) {
 68			set_current_state(TASK_RUNNING);
 69			cmd_flags = req ? req->cmd_flags : 0;
 70			mq->issue_fn(mq, req);
 71			cond_resched();
 72			if (mq->flags & MMC_QUEUE_NEW_REQUEST) {
 73				mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
 74				continue; /* fetch again */
 75			}
 76
 77			/*
 78			 * Current request becomes previous request
 79			 * and vice versa.
 80			 * In case of special requests, current request
 81			 * has been finished. Do not assign it to previous
 82			 * request.
 83			 */
 84			if (cmd_flags & MMC_REQ_SPECIAL_MASK)
 85				mq->mqrq_cur->req = NULL;
 86
 87			mq->mqrq_prev->brq.mrq.data = NULL;
 88			mq->mqrq_prev->req = NULL;
 89			swap(mq->mqrq_prev, mq->mqrq_cur);
 90		} else {
 91			if (kthread_should_stop()) {
 92				set_current_state(TASK_RUNNING);
 93				break;
 94			}
 95			up(&mq->thread_sem);
 96			schedule();
 97			down(&mq->thread_sem);
 98		}
 
 
 
 
 
 
 
 99	} while (1);
100	up(&mq->thread_sem);
101
102	return 0;
103}
104
105/*
106 * Generic MMC request handler.  This is called for any queue on a
107 * particular host.  When the host is not busy, we look for a request
108 * on any queue on this host, and attempt to issue it.  This may
109 * not be the queue we were asked to process.
110 */
111static void mmc_request_fn(struct request_queue *q)
112{
113	struct mmc_queue *mq = q->queuedata;
114	struct request *req;
115	unsigned long flags;
116	struct mmc_context_info *cntx;
117
118	if (!mq) {
119		while ((req = blk_fetch_request(q)) != NULL) {
120			req->cmd_flags |= REQ_QUIET;
121			__blk_end_request_all(req, -EIO);
122		}
123		return;
124	}
125
126	cntx = &mq->card->host->context_info;
127	if (!mq->mqrq_cur->req && mq->mqrq_prev->req) {
128		/*
129		 * New MMC request arrived when MMC thread may be
130		 * blocked on the previous request to be complete
131		 * with no current request fetched
132		 */
133		spin_lock_irqsave(&cntx->lock, flags);
134		if (cntx->is_waiting_last_req) {
135			cntx->is_new_req = true;
136			wake_up_interruptible(&cntx->wait);
137		}
138		spin_unlock_irqrestore(&cntx->lock, flags);
139	} else if (!mq->mqrq_cur->req && !mq->mqrq_prev->req)
140		wake_up_process(mq->thread);
141}
142
143static struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
144{
145	struct scatterlist *sg;
146
147	sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL);
148	if (!sg)
149		*err = -ENOMEM;
150	else {
151		*err = 0;
152		sg_init_table(sg, sg_len);
153	}
154
155	return sg;
156}
157
158static void mmc_queue_setup_discard(struct request_queue *q,
159				    struct mmc_card *card)
160{
161	unsigned max_discard;
162
163	max_discard = mmc_calc_max_discard(card);
164	if (!max_discard)
165		return;
166
167	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
168	blk_queue_max_discard_sectors(q, max_discard);
169	if (card->erased_byte == 0 && !mmc_can_discard(card))
170		q->limits.discard_zeroes_data = 1;
171	q->limits.discard_granularity = card->pref_erase << 9;
172	/* granularity must not be greater than max. discard */
173	if (card->pref_erase > max_discard)
174		q->limits.discard_granularity = 0;
175	if (mmc_can_secure_erase_trim(card))
176		queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q);
177}
178
179/**
180 * mmc_init_queue - initialise a queue structure.
181 * @mq: mmc queue
182 * @card: mmc card to attach this queue
183 * @lock: queue lock
184 * @subname: partition subname
185 *
186 * Initialise a MMC card request queue.
187 */
188int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
189		   spinlock_t *lock, const char *subname)
190{
191	struct mmc_host *host = card->host;
192	u64 limit = BLK_BOUNCE_HIGH;
193	int ret;
194	struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
195	struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
196
197	if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
198		limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
199
200	mq->card = card;
201	mq->queue = blk_init_queue(mmc_request_fn, lock);
202	if (!mq->queue)
203		return -ENOMEM;
204
205	mq->mqrq_cur = mqrq_cur;
206	mq->mqrq_prev = mqrq_prev;
207	mq->queue->queuedata = mq;
208
209	blk_queue_prep_rq(mq->queue, mmc_prep_request);
210	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
211	queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue);
212	if (mmc_can_erase(card))
213		mmc_queue_setup_discard(mq->queue, card);
214
215#ifdef CONFIG_MMC_BLOCK_BOUNCE
216	if (host->max_segs == 1) {
217		unsigned int bouncesz;
218
219		bouncesz = MMC_QUEUE_BOUNCESZ;
220
221		if (bouncesz > host->max_req_size)
222			bouncesz = host->max_req_size;
223		if (bouncesz > host->max_seg_size)
224			bouncesz = host->max_seg_size;
225		if (bouncesz > (host->max_blk_count * 512))
226			bouncesz = host->max_blk_count * 512;
227
228		if (bouncesz > 512) {
229			mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
230			if (!mqrq_cur->bounce_buf) {
231				pr_warn("%s: unable to allocate bounce cur buffer\n",
 
 
 
 
 
 
 
232					mmc_card_name(card));
233			} else {
234				mqrq_prev->bounce_buf =
235						kmalloc(bouncesz, GFP_KERNEL);
236				if (!mqrq_prev->bounce_buf) {
237					pr_warn("%s: unable to allocate bounce prev buffer\n",
238						mmc_card_name(card));
239					kfree(mqrq_cur->bounce_buf);
240					mqrq_cur->bounce_buf = NULL;
241				}
242			}
243		}
244
245		if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) {
246			blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
247			blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
248			blk_queue_max_segments(mq->queue, bouncesz / 512);
249			blk_queue_max_segment_size(mq->queue, bouncesz);
250
251			mqrq_cur->sg = mmc_alloc_sg(1, &ret);
252			if (ret)
253				goto cleanup_queue;
254
255			mqrq_cur->bounce_sg =
256				mmc_alloc_sg(bouncesz / 512, &ret);
257			if (ret)
258				goto cleanup_queue;
259
260			mqrq_prev->sg = mmc_alloc_sg(1, &ret);
261			if (ret)
262				goto cleanup_queue;
263
264			mqrq_prev->bounce_sg =
265				mmc_alloc_sg(bouncesz / 512, &ret);
266			if (ret)
267				goto cleanup_queue;
268		}
269	}
270#endif
271
272	if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) {
273		blk_queue_bounce_limit(mq->queue, limit);
274		blk_queue_max_hw_sectors(mq->queue,
275			min(host->max_blk_count, host->max_req_size / 512));
276		blk_queue_max_segments(mq->queue, host->max_segs);
277		blk_queue_max_segment_size(mq->queue, host->max_seg_size);
278
279		mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret);
280		if (ret)
281			goto cleanup_queue;
282
283
284		mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret);
285		if (ret)
286			goto cleanup_queue;
287	}
288
289	sema_init(&mq->thread_sem, 1);
290
291	mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
292		host->index, subname ? subname : "");
293
294	if (IS_ERR(mq->thread)) {
295		ret = PTR_ERR(mq->thread);
296		goto free_bounce_sg;
297	}
298
299	return 0;
300 free_bounce_sg:
301	kfree(mqrq_cur->bounce_sg);
302	mqrq_cur->bounce_sg = NULL;
303	kfree(mqrq_prev->bounce_sg);
304	mqrq_prev->bounce_sg = NULL;
305
306 cleanup_queue:
307	kfree(mqrq_cur->sg);
308	mqrq_cur->sg = NULL;
309	kfree(mqrq_cur->bounce_buf);
310	mqrq_cur->bounce_buf = NULL;
311
312	kfree(mqrq_prev->sg);
313	mqrq_prev->sg = NULL;
314	kfree(mqrq_prev->bounce_buf);
315	mqrq_prev->bounce_buf = NULL;
316
317	blk_cleanup_queue(mq->queue);
318	return ret;
319}
320
321void mmc_cleanup_queue(struct mmc_queue *mq)
322{
323	struct request_queue *q = mq->queue;
324	unsigned long flags;
325	struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
326	struct mmc_queue_req *mqrq_prev = mq->mqrq_prev;
327
328	/* Make sure the queue isn't suspended, as that will deadlock */
329	mmc_queue_resume(mq);
330
331	/* Then terminate our worker thread */
332	kthread_stop(mq->thread);
333
334	/* Empty the queue */
335	spin_lock_irqsave(q->queue_lock, flags);
336	q->queuedata = NULL;
337	blk_start_queue(q);
338	spin_unlock_irqrestore(q->queue_lock, flags);
339
340	kfree(mqrq_cur->bounce_sg);
341	mqrq_cur->bounce_sg = NULL;
342
343	kfree(mqrq_cur->sg);
344	mqrq_cur->sg = NULL;
345
346	kfree(mqrq_cur->bounce_buf);
347	mqrq_cur->bounce_buf = NULL;
348
349	kfree(mqrq_prev->bounce_sg);
350	mqrq_prev->bounce_sg = NULL;
351
352	kfree(mqrq_prev->sg);
353	mqrq_prev->sg = NULL;
354
355	kfree(mqrq_prev->bounce_buf);
356	mqrq_prev->bounce_buf = NULL;
357
358	mq->card = NULL;
359}
360EXPORT_SYMBOL(mmc_cleanup_queue);
361
362int mmc_packed_init(struct mmc_queue *mq, struct mmc_card *card)
363{
364	struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
365	struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
366	int ret = 0;
367
368
369	mqrq_cur->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
370	if (!mqrq_cur->packed) {
371		pr_warn("%s: unable to allocate packed cmd for mqrq_cur\n",
372			mmc_card_name(card));
373		ret = -ENOMEM;
374		goto out;
375	}
376
377	mqrq_prev->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
378	if (!mqrq_prev->packed) {
379		pr_warn("%s: unable to allocate packed cmd for mqrq_prev\n",
380			mmc_card_name(card));
381		kfree(mqrq_cur->packed);
382		mqrq_cur->packed = NULL;
383		ret = -ENOMEM;
384		goto out;
385	}
386
387	INIT_LIST_HEAD(&mqrq_cur->packed->list);
388	INIT_LIST_HEAD(&mqrq_prev->packed->list);
389
390out:
391	return ret;
392}
393
394void mmc_packed_clean(struct mmc_queue *mq)
395{
396	struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
397	struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
398
399	kfree(mqrq_cur->packed);
400	mqrq_cur->packed = NULL;
401	kfree(mqrq_prev->packed);
402	mqrq_prev->packed = NULL;
403}
404
405/**
406 * mmc_queue_suspend - suspend a MMC request queue
407 * @mq: MMC queue to suspend
408 *
409 * Stop the block request queue, and wait for our thread to
410 * complete any outstanding requests.  This ensures that we
411 * won't suspend while a request is being processed.
412 */
413void mmc_queue_suspend(struct mmc_queue *mq)
414{
415	struct request_queue *q = mq->queue;
416	unsigned long flags;
417
418	if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
419		mq->flags |= MMC_QUEUE_SUSPENDED;
420
421		spin_lock_irqsave(q->queue_lock, flags);
422		blk_stop_queue(q);
423		spin_unlock_irqrestore(q->queue_lock, flags);
424
425		down(&mq->thread_sem);
426	}
427}
428
429/**
430 * mmc_queue_resume - resume a previously suspended MMC request queue
431 * @mq: MMC queue to resume
432 */
433void mmc_queue_resume(struct mmc_queue *mq)
434{
435	struct request_queue *q = mq->queue;
436	unsigned long flags;
437
438	if (mq->flags & MMC_QUEUE_SUSPENDED) {
439		mq->flags &= ~MMC_QUEUE_SUSPENDED;
440
441		up(&mq->thread_sem);
442
443		spin_lock_irqsave(q->queue_lock, flags);
444		blk_start_queue(q);
445		spin_unlock_irqrestore(q->queue_lock, flags);
446	}
447}
448
449static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq,
450					    struct mmc_packed *packed,
451					    struct scatterlist *sg,
452					    enum mmc_packed_type cmd_type)
453{
454	struct scatterlist *__sg = sg;
455	unsigned int sg_len = 0;
456	struct request *req;
457
458	if (mmc_packed_wr(cmd_type)) {
459		unsigned int hdr_sz = mmc_large_sector(mq->card) ? 4096 : 512;
460		unsigned int max_seg_sz = queue_max_segment_size(mq->queue);
461		unsigned int len, remain, offset = 0;
462		u8 *buf = (u8 *)packed->cmd_hdr;
463
464		remain = hdr_sz;
465		do {
466			len = min(remain, max_seg_sz);
467			sg_set_buf(__sg, buf + offset, len);
468			offset += len;
469			remain -= len;
470			sg_unmark_end(__sg++);
471			sg_len++;
472		} while (remain);
473	}
474
475	list_for_each_entry(req, &packed->list, queuelist) {
476		sg_len += blk_rq_map_sg(mq->queue, req, __sg);
477		__sg = sg + (sg_len - 1);
478		sg_unmark_end(__sg++);
479	}
480	sg_mark_end(sg + (sg_len - 1));
481	return sg_len;
482}
483
484/*
485 * Prepare the sg list(s) to be handed of to the host driver
486 */
487unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
488{
489	unsigned int sg_len;
490	size_t buflen;
491	struct scatterlist *sg;
492	enum mmc_packed_type cmd_type;
493	int i;
494
495	cmd_type = mqrq->cmd_type;
496
497	if (!mqrq->bounce_buf) {
498		if (mmc_packed_cmd(cmd_type))
499			return mmc_queue_packed_map_sg(mq, mqrq->packed,
500						       mqrq->sg, cmd_type);
501		else
502			return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
503	}
504
505	BUG_ON(!mqrq->bounce_sg);
506
507	if (mmc_packed_cmd(cmd_type))
508		sg_len = mmc_queue_packed_map_sg(mq, mqrq->packed,
509						 mqrq->bounce_sg, cmd_type);
510	else
511		sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
512
513	mqrq->bounce_sg_len = sg_len;
514
515	buflen = 0;
516	for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
517		buflen += sg->length;
518
519	sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
520
521	return 1;
522}
523
524/*
525 * If writing, bounce the data to the buffer before the request
526 * is sent to the host driver
527 */
528void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
529{
530	if (!mqrq->bounce_buf)
531		return;
532
533	if (rq_data_dir(mqrq->req) != WRITE)
534		return;
535
536	sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
537		mqrq->bounce_buf, mqrq->sg[0].length);
538}
539
540/*
541 * If reading, bounce the data from the buffer after the request
542 * has been handled by the host driver
543 */
544void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
545{
546	if (!mqrq->bounce_buf)
547		return;
548
549	if (rq_data_dir(mqrq->req) != READ)
550		return;
551
552	sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
553		mqrq->bounce_buf, mqrq->sg[0].length);
554}