Linux Audio

Check our new training course

Loading...
v3.15
 
  1/*
  2 * Functions related to mapping data to requests
  3 */
  4#include <linux/kernel.h>
 
  5#include <linux/module.h>
  6#include <linux/bio.h>
  7#include <linux/blkdev.h>
  8#include <scsi/sg.h>		/* for struct sg_iovec */
  9
 10#include "blk.h"
 11
 12int blk_rq_append_bio(struct request_queue *q, struct request *rq,
 13		      struct bio *bio)
 
 
 
 
 
 
 
 14{
 15	if (!rq->bio)
 16		blk_rq_bio_prep(q, rq, bio);
 17	else if (!ll_back_merge_fn(q, rq, bio))
 18		return -EINVAL;
 19	else {
 20		rq->biotail->bi_next = bio;
 21		rq->biotail = bio;
 22
 23		rq->__data_len += bio->bi_iter.bi_size;
 
 
 
 
 
 
 
 
 
 24	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 25	return 0;
 26}
 27
 28static int __blk_rq_unmap_user(struct bio *bio)
 
 
 
 
 
 
 
 
 29{
 30	int ret = 0;
 
 31
 32	if (bio) {
 33		if (bio_flagged(bio, BIO_USER_MAPPED))
 34			bio_unmap_user(bio);
 35		else
 36			ret = bio_uncopy_user(bio);
 
 
 
 
 
 
 
 
 37	}
 38
 39	return ret;
 40}
 41
 42static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
 43			     struct rq_map_data *map_data, void __user *ubuf,
 44			     unsigned int len, gfp_t gfp_mask)
 45{
 46	unsigned long uaddr;
 47	struct bio *bio, *orig_bio;
 48	int reading, ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 49
 50	reading = rq_data_dir(rq) == READ;
 
 
 
 
 
 
 
 
 
 
 
 
 
 51
 52	/*
 53	 * if alignment requirement is satisfied, map in user pages for
 54	 * direct dma. else, set up kernel bounce buffers
 
 55	 */
 56	uaddr = (unsigned long) ubuf;
 57	if (blk_rq_aligned(q, uaddr, len) && !map_data)
 58		bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
 59	else
 60		bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
 61
 62	if (IS_ERR(bio))
 63		return PTR_ERR(bio);
 
 
 
 
 
 
 
 
 
 
 
 
 64
 65	if (map_data && map_data->null_mapped)
 66		bio->bi_flags |= (1 << BIO_NULL_MAPPED);
 67
 68	orig_bio = bio;
 69	blk_queue_bounce(q, &bio);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 70
 71	/*
 72	 * We link the bounce buffer in and could have to traverse it
 73	 * later so we have to get a ref to prevent it from being freed
 74	 */
 75	bio_get(bio);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 76
 77	ret = blk_rq_append_bio(q, rq, bio);
 78	if (!ret)
 79		return bio->bi_iter.bi_size;
 80
 81	/* if it was boucned we must call the end io function */
 82	bio_endio(bio, 0);
 83	__blk_rq_unmap_user(orig_bio);
 84	bio_put(bio);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 85	return ret;
 86}
 87
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 88/**
 89 * blk_rq_map_user - map user data to a request, for REQ_TYPE_BLOCK_PC usage
 90 * @q:		request queue where request should be inserted
 91 * @rq:		request structure to fill
 92 * @map_data:   pointer to the rq_map_data holding pages (if necessary)
 93 * @ubuf:	the user buffer
 94 * @len:	length of user data
 95 * @gfp_mask:	memory allocation flags
 96 *
 97 * Description:
 98 *    Data will be mapped directly for zero copy I/O, if possible. Otherwise
 99 *    a kernel bounce buffer is used.
100 *
101 *    A matching blk_rq_unmap_user() must be issued at the end of I/O, while
102 *    still in process context.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103 *
104 *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
105 *    before being submitted to the device, as pages mapped may be out of
106 *    reach. It's the callers responsibility to make sure this happens. The
107 *    original bio must be passed back in to blk_rq_unmap_user() for proper
108 *    unmapping.
109 */
110int blk_rq_map_user(struct request_queue *q, struct request *rq,
111		    struct rq_map_data *map_data, void __user *ubuf,
112		    unsigned long len, gfp_t gfp_mask)
113{
114	unsigned long bytes_read = 0;
115	struct bio *bio = NULL;
116	int ret;
 
 
 
117
118	if (len > (queue_max_hw_sectors(q) << 9))
119		return -EINVAL;
120	if (!len)
121		return -EINVAL;
 
122
123	if (!ubuf && (!map_data || !map_data->null_mapped))
124		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125
126	while (bytes_read != len) {
127		unsigned long map_len, end, start;
128
129		map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE);
130		end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1)
131								>> PAGE_SHIFT;
132		start = (unsigned long)ubuf >> PAGE_SHIFT;
133
134		/*
135		 * A bad offset could cause us to require BIO_MAX_PAGES + 1
136		 * pages. If this happens we just lower the requested
137		 * mapping len by a page so that we can fit
138		 */
139		if (end - start > BIO_MAX_PAGES)
140			map_len -= PAGE_SIZE;
141
142		ret = __blk_rq_map_user(q, rq, map_data, ubuf, map_len,
143					gfp_mask);
144		if (ret < 0)
145			goto unmap_rq;
146		if (!bio)
147			bio = rq->bio;
148		bytes_read += ret;
149		ubuf += ret;
 
 
 
 
 
 
 
150
151		if (map_data)
152			map_data->offset += ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
153	}
154
155	if (!bio_flagged(bio, BIO_USER_MAPPED))
156		rq->cmd_flags |= REQ_COPY_USER;
 
157
158	rq->buffer = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
159	return 0;
160unmap_rq:
161	blk_rq_unmap_user(bio);
162	rq->bio = NULL;
163	return ret;
164}
165EXPORT_SYMBOL(blk_rq_map_user);
166
167/**
168 * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
169 * @q:		request queue where request should be inserted
170 * @rq:		request to map data to
171 * @map_data:   pointer to the rq_map_data holding pages (if necessary)
172 * @iov:	pointer to the iovec
173 * @iov_count:	number of elements in the iovec
174 * @len:	I/O byte count
175 * @gfp_mask:	memory allocation flags
176 *
177 * Description:
178 *    Data will be mapped directly for zero copy I/O, if possible. Otherwise
179 *    a kernel bounce buffer is used.
180 *
181 *    A matching blk_rq_unmap_user() must be issued at the end of I/O, while
182 *    still in process context.
183 *
184 *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
185 *    before being submitted to the device, as pages mapped may be out of
186 *    reach. It's the callers responsibility to make sure this happens. The
187 *    original bio must be passed back in to blk_rq_unmap_user() for proper
188 *    unmapping.
189 */
190int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
191			struct rq_map_data *map_data, const struct sg_iovec *iov,
192			int iov_count, unsigned int len, gfp_t gfp_mask)
193{
194	struct bio *bio;
195	int i, read = rq_data_dir(rq) == READ;
196	int unaligned = 0;
 
 
197
198	if (!iov || iov_count <= 0)
199		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
200
201	for (i = 0; i < iov_count; i++) {
202		unsigned long uaddr = (unsigned long)iov[i].iov_base;
 
 
 
 
 
 
 
 
 
203
204		if (!iov[i].iov_len)
205			return -EINVAL;
206
207		/*
208		 * Keep going so we check length of all segments
209		 */
210		if (uaddr & queue_dma_alignment(q))
211			unaligned = 1;
212	}
 
213
214	if (unaligned || (q->dma_pad_mask & len) || map_data)
215		bio = bio_copy_user_iov(q, map_data, iov, iov_count, read,
216					gfp_mask);
217	else
218		bio = bio_map_user_iov(q, NULL, iov, iov_count, read, gfp_mask);
 
219
220	if (IS_ERR(bio))
221		return PTR_ERR(bio);
222
223	if (bio->bi_iter.bi_size != len) {
224		/*
225		 * Grab an extra reference to this bio, as bio_unmap_user()
226		 * expects to be able to drop it twice as it happens on the
227		 * normal IO completion path
228		 */
229		bio_get(bio);
230		bio_endio(bio, 0);
231		__blk_rq_unmap_user(bio);
232		return -EINVAL;
233	}
234
235	if (!bio_flagged(bio, BIO_USER_MAPPED))
236		rq->cmd_flags |= REQ_COPY_USER;
 
 
 
237
238	blk_queue_bounce(q, &bio);
239	bio_get(bio);
240	blk_rq_bio_prep(q, rq, bio);
241	rq->buffer = NULL;
242	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
243}
244EXPORT_SYMBOL(blk_rq_map_user_iov);
245
246/**
247 * blk_rq_unmap_user - unmap a request with user data
248 * @bio:	       start of bio list
249 *
250 * Description:
251 *    Unmap a rq previously mapped by blk_rq_map_user(). The caller must
252 *    supply the original rq->bio from the blk_rq_map_user() return, since
253 *    the I/O completion may have changed rq->bio.
254 */
255int blk_rq_unmap_user(struct bio *bio)
256{
257	struct bio *mapped_bio;
258	int ret = 0, ret2;
259
260	while (bio) {
261		mapped_bio = bio;
262		if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
263			mapped_bio = bio->bi_private;
264
265		ret2 = __blk_rq_unmap_user(mapped_bio);
266		if (ret2 && !ret)
267			ret = ret2;
 
 
 
268
269		mapped_bio = bio;
270		bio = bio->bi_next;
271		bio_put(mapped_bio);
272	}
273
274	return ret;
275}
276EXPORT_SYMBOL(blk_rq_unmap_user);
277
278/**
279 * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
280 * @q:		request queue where request should be inserted
281 * @rq:		request to fill
282 * @kbuf:	the kernel buffer
283 * @len:	length of user data
284 * @gfp_mask:	memory allocation flags
285 *
286 * Description:
287 *    Data will be mapped directly if possible. Otherwise a bounce
288 *    buffer is used. Can be called multiple times to append multiple
289 *    buffers.
290 */
291int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
292		    unsigned int len, gfp_t gfp_mask)
293{
294	int reading = rq_data_dir(rq) == READ;
295	unsigned long addr = (unsigned long) kbuf;
296	int do_copy = 0;
297	struct bio *bio;
298	int ret;
299
300	if (len > (queue_max_hw_sectors(q) << 9))
301		return -EINVAL;
302	if (!len || !kbuf)
303		return -EINVAL;
304
305	do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
306	if (do_copy)
307		bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
308	else
309		bio = bio_map_kern(q, kbuf, len, gfp_mask);
310
311	if (IS_ERR(bio))
312		return PTR_ERR(bio);
313
314	if (!reading)
315		bio->bi_rw |= REQ_WRITE;
316
317	if (do_copy)
318		rq->cmd_flags |= REQ_COPY_USER;
319
320	ret = blk_rq_append_bio(q, rq, bio);
321	if (unlikely(ret)) {
322		/* request is too big */
323		bio_put(bio);
324		return ret;
325	}
326
327	blk_queue_bounce(q, &rq->bio);
328	rq->buffer = NULL;
329	return 0;
330}
331EXPORT_SYMBOL(blk_rq_map_kern);
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Functions related to mapping data to requests
  4 */
  5#include <linux/kernel.h>
  6#include <linux/sched/task_stack.h>
  7#include <linux/module.h>
  8#include <linux/bio.h>
  9#include <linux/blkdev.h>
 10#include <linux/uio.h>
 11
 12#include "blk.h"
 13
 14struct bio_map_data {
 15	bool is_our_pages : 1;
 16	bool is_null_mapped : 1;
 17	struct iov_iter iter;
 18	struct iovec iov[];
 19};
 20
 21static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data,
 22					       gfp_t gfp_mask)
 23{
 24	struct bio_map_data *bmd;
 
 
 
 
 
 
 25
 26	if (data->nr_segs > UIO_MAXIOV)
 27		return NULL;
 28
 29	bmd = kmalloc(struct_size(bmd, iov, data->nr_segs), gfp_mask);
 30	if (!bmd)
 31		return NULL;
 32	bmd->iter = *data;
 33	if (iter_is_iovec(data)) {
 34		memcpy(bmd->iov, iter_iov(data), sizeof(struct iovec) * data->nr_segs);
 35		bmd->iter.__iov = bmd->iov;
 36	}
 37	return bmd;
 38}
 39
 40/**
 41 * bio_copy_from_iter - copy all pages from iov_iter to bio
 42 * @bio: The &struct bio which describes the I/O as destination
 43 * @iter: iov_iter as source
 44 *
 45 * Copy all pages from iov_iter to bio.
 46 * Returns 0 on success, or error on failure.
 47 */
 48static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter)
 49{
 50	struct bio_vec *bvec;
 51	struct bvec_iter_all iter_all;
 52
 53	bio_for_each_segment_all(bvec, bio, iter_all) {
 54		ssize_t ret;
 55
 56		ret = copy_page_from_iter(bvec->bv_page,
 57					  bvec->bv_offset,
 58					  bvec->bv_len,
 59					  iter);
 60
 61		if (!iov_iter_count(iter))
 62			break;
 63
 64		if (ret < bvec->bv_len)
 65			return -EFAULT;
 66	}
 67
 68	return 0;
 69}
 70
 71/**
 72 * bio_copy_to_iter - copy all pages from bio to iov_iter
 73 * @bio: The &struct bio which describes the I/O as source
 74 * @iter: iov_iter as destination
 75 *
 76 * Copy all pages from bio to iov_iter.
 77 * Returns 0 on success, or error on failure.
 78 */
 79static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
 80{
 81	struct bio_vec *bvec;
 82	struct bvec_iter_all iter_all;
 83
 84	bio_for_each_segment_all(bvec, bio, iter_all) {
 85		ssize_t ret;
 86
 87		ret = copy_page_to_iter(bvec->bv_page,
 88					bvec->bv_offset,
 89					bvec->bv_len,
 90					&iter);
 91
 92		if (!iov_iter_count(&iter))
 93			break;
 94
 95		if (ret < bvec->bv_len)
 96			return -EFAULT;
 97	}
 98
 99	return 0;
100}
101
102/**
103 *	bio_uncopy_user	-	finish previously mapped bio
104 *	@bio: bio being terminated
105 *
106 *	Free pages allocated from bio_copy_user_iov() and write back data
107 *	to user space in case of a read.
108 */
109static int bio_uncopy_user(struct bio *bio)
110{
111	struct bio_map_data *bmd = bio->bi_private;
112	int ret = 0;
113
114	if (!bmd->is_null_mapped) {
115		/*
116		 * if we're in a workqueue, the request is orphaned, so
117		 * don't copy into a random user address space, just free
118		 * and return -EINTR so user space doesn't expect any data.
119		 */
120		if (!current->mm)
121			ret = -EINTR;
122		else if (bio_data_dir(bio) == READ)
123			ret = bio_copy_to_iter(bio, bmd->iter);
124		if (bmd->is_our_pages)
125			bio_free_pages(bio);
126	}
127	kfree(bmd);
128	return ret;
129}
130
131static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
132		struct iov_iter *iter, gfp_t gfp_mask)
133{
134	struct bio_map_data *bmd;
135	struct page *page;
136	struct bio *bio;
137	int i = 0, ret;
138	int nr_pages;
139	unsigned int len = iter->count;
140	unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0;
141
142	bmd = bio_alloc_map_data(iter, gfp_mask);
143	if (!bmd)
144		return -ENOMEM;
145
146	/*
147	 * We need to do a deep copy of the iov_iter including the iovecs.
148	 * The caller provided iov might point to an on-stack or otherwise
149	 * shortlived one.
150	 */
151	bmd->is_our_pages = !map_data;
152	bmd->is_null_mapped = (map_data && map_data->null_mapped);
 
 
 
153
154	nr_pages = bio_max_segs(DIV_ROUND_UP(offset + len, PAGE_SIZE));
155
156	ret = -ENOMEM;
157	bio = bio_kmalloc(nr_pages, gfp_mask);
158	if (!bio)
159		goto out_bmd;
160	bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, req_op(rq));
161
162	if (map_data) {
163		nr_pages = 1U << map_data->page_order;
164		i = map_data->offset / PAGE_SIZE;
165	}
166	while (len) {
167		unsigned int bytes = PAGE_SIZE;
168
169		bytes -= offset;
 
170
171		if (bytes > len)
172			bytes = len;
173
174		if (map_data) {
175			if (i == map_data->nr_entries * nr_pages) {
176				ret = -ENOMEM;
177				goto cleanup;
178			}
179
180			page = map_data->pages[i / nr_pages];
181			page += (i % nr_pages);
182
183			i++;
184		} else {
185			page = alloc_page(GFP_NOIO | gfp_mask);
186			if (!page) {
187				ret = -ENOMEM;
188				goto cleanup;
189			}
190		}
191
192		if (bio_add_pc_page(rq->q, bio, page, bytes, offset) < bytes) {
193			if (!map_data)
194				__free_page(page);
195			break;
196		}
197
198		len -= bytes;
199		offset = 0;
200	}
201
202	if (map_data)
203		map_data->offset += bio->bi_iter.bi_size;
204
205	/*
206	 * success
 
207	 */
208	if (iov_iter_rw(iter) == WRITE &&
209	     (!map_data || !map_data->null_mapped)) {
210		ret = bio_copy_from_iter(bio, iter);
211		if (ret)
212			goto cleanup;
213	} else if (map_data && map_data->from_user) {
214		struct iov_iter iter2 = *iter;
215
216		/* This is the copy-in part of SG_DXFER_TO_FROM_DEV. */
217		iter2.data_source = ITER_SOURCE;
218		ret = bio_copy_from_iter(bio, &iter2);
219		if (ret)
220			goto cleanup;
221	} else {
222		if (bmd->is_our_pages)
223			zero_fill_bio(bio);
224		iov_iter_advance(iter, bio->bi_iter.bi_size);
225	}
226
227	bio->bi_private = bmd;
228
229	ret = blk_rq_append_bio(rq, bio);
230	if (ret)
231		goto cleanup;
232	return 0;
233cleanup:
234	if (!map_data)
235		bio_free_pages(bio);
236	bio_uninit(bio);
237	kfree(bio);
238out_bmd:
239	kfree(bmd);
240	return ret;
241}
242
243static void blk_mq_map_bio_put(struct bio *bio)
244{
245	if (bio->bi_opf & REQ_ALLOC_CACHE) {
246		bio_put(bio);
247	} else {
248		bio_uninit(bio);
249		kfree(bio);
250	}
251}
252
253static struct bio *blk_rq_map_bio_alloc(struct request *rq,
254		unsigned int nr_vecs, gfp_t gfp_mask)
255{
256	struct bio *bio;
257
258	if (rq->cmd_flags & REQ_ALLOC_CACHE && (nr_vecs <= BIO_INLINE_VECS)) {
259		bio = bio_alloc_bioset(NULL, nr_vecs, rq->cmd_flags, gfp_mask,
260					&fs_bio_set);
261		if (!bio)
262			return NULL;
263	} else {
264		bio = bio_kmalloc(nr_vecs, gfp_mask);
265		if (!bio)
266			return NULL;
267		bio_init(bio, NULL, bio->bi_inline_vecs, nr_vecs, req_op(rq));
268	}
269	return bio;
270}
271
272static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
273		gfp_t gfp_mask)
274{
275	iov_iter_extraction_t extraction_flags = 0;
276	unsigned int max_sectors = queue_max_hw_sectors(rq->q);
277	unsigned int nr_vecs = iov_iter_npages(iter, BIO_MAX_VECS);
278	struct bio *bio;
279	int ret;
280	int j;
281
282	if (!iov_iter_count(iter))
283		return -EINVAL;
284
285	bio = blk_rq_map_bio_alloc(rq, nr_vecs, gfp_mask);
286	if (bio == NULL)
287		return -ENOMEM;
288
289	if (blk_queue_pci_p2pdma(rq->q))
290		extraction_flags |= ITER_ALLOW_P2PDMA;
291	if (iov_iter_extract_will_pin(iter))
292		bio_set_flag(bio, BIO_PAGE_PINNED);
293
294	while (iov_iter_count(iter)) {
295		struct page *stack_pages[UIO_FASTIOV];
296		struct page **pages = stack_pages;
297		ssize_t bytes;
298		size_t offs;
299		int npages;
300
301		if (nr_vecs > ARRAY_SIZE(stack_pages))
302			pages = NULL;
303
304		bytes = iov_iter_extract_pages(iter, &pages, LONG_MAX,
305					       nr_vecs, extraction_flags, &offs);
306		if (unlikely(bytes <= 0)) {
307			ret = bytes ? bytes : -EFAULT;
308			goto out_unmap;
309		}
310
311		npages = DIV_ROUND_UP(offs + bytes, PAGE_SIZE);
312
313		if (unlikely(offs & queue_dma_alignment(rq->q)))
314			j = 0;
315		else {
316			for (j = 0; j < npages; j++) {
317				struct page *page = pages[j];
318				unsigned int n = PAGE_SIZE - offs;
319				bool same_page = false;
320
321				if (n > bytes)
322					n = bytes;
323
324				if (!bio_add_hw_page(rq->q, bio, page, n, offs,
325						     max_sectors, &same_page))
326					break;
327
328				if (same_page)
329					bio_release_page(bio, page);
330				bytes -= n;
331				offs = 0;
332			}
333		}
334		/*
335		 * release the pages we didn't map into the bio, if any
336		 */
337		while (j < npages)
338			bio_release_page(bio, pages[j++]);
339		if (pages != stack_pages)
340			kvfree(pages);
341		/* couldn't stuff something into bio? */
342		if (bytes) {
343			iov_iter_revert(iter, bytes);
344			break;
345		}
346	}
347
348	ret = blk_rq_append_bio(rq, bio);
349	if (ret)
350		goto out_unmap;
351	return 0;
352
353 out_unmap:
354	bio_release_pages(bio, false);
355	blk_mq_map_bio_put(bio);
356	return ret;
357}
358
359static void bio_invalidate_vmalloc_pages(struct bio *bio)
360{
361#ifdef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE
362	if (bio->bi_private && !op_is_write(bio_op(bio))) {
363		unsigned long i, len = 0;
364
365		for (i = 0; i < bio->bi_vcnt; i++)
366			len += bio->bi_io_vec[i].bv_len;
367		invalidate_kernel_vmap_range(bio->bi_private, len);
368	}
369#endif
370}
371
372static void bio_map_kern_endio(struct bio *bio)
373{
374	bio_invalidate_vmalloc_pages(bio);
375	bio_uninit(bio);
376	kfree(bio);
377}
378
379/**
380 *	bio_map_kern	-	map kernel address into bio
381 *	@q: the struct request_queue for the bio
382 *	@data: pointer to buffer to map
383 *	@len: length in bytes
384 *	@gfp_mask: allocation flags for bio allocation
 
 
 
 
 
 
385 *
386 *	Map the kernel address into a bio suitable for io to a block
387 *	device. Returns an error pointer in case of error.
388 */
389static struct bio *bio_map_kern(struct request_queue *q, void *data,
390		unsigned int len, gfp_t gfp_mask)
391{
392	unsigned long kaddr = (unsigned long)data;
393	unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
394	unsigned long start = kaddr >> PAGE_SHIFT;
395	const int nr_pages = end - start;
396	bool is_vmalloc = is_vmalloc_addr(data);
397	struct page *page;
398	int offset, i;
399	struct bio *bio;
400
401	bio = bio_kmalloc(nr_pages, gfp_mask);
402	if (!bio)
403		return ERR_PTR(-ENOMEM);
404	bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, 0);
405
406	if (is_vmalloc) {
407		flush_kernel_vmap_range(data, len);
408		bio->bi_private = data;
409	}
410
411	offset = offset_in_page(kaddr);
412	for (i = 0; i < nr_pages; i++) {
413		unsigned int bytes = PAGE_SIZE - offset;
414
415		if (len <= 0)
416			break;
417
418		if (bytes > len)
419			bytes = len;
420
421		if (!is_vmalloc)
422			page = virt_to_page(data);
423		else
424			page = vmalloc_to_page(data);
425		if (bio_add_pc_page(q, bio, page, bytes,
426				    offset) < bytes) {
427			/* we don't support partial mappings */
428			bio_uninit(bio);
429			kfree(bio);
430			return ERR_PTR(-EINVAL);
431		}
432
433		data += bytes;
434		len -= bytes;
435		offset = 0;
436	}
437
438	bio->bi_end_io = bio_map_kern_endio;
439	return bio;
440}
441
442static void bio_copy_kern_endio(struct bio *bio)
443{
444	bio_free_pages(bio);
445	bio_uninit(bio);
446	kfree(bio);
447}
448
449static void bio_copy_kern_endio_read(struct bio *bio)
450{
451	char *p = bio->bi_private;
452	struct bio_vec *bvec;
453	struct bvec_iter_all iter_all;
454
455	bio_for_each_segment_all(bvec, bio, iter_all) {
456		memcpy_from_bvec(p, bvec);
457		p += bvec->bv_len;
458	}
459
460	bio_copy_kern_endio(bio);
461}
462
463/**
464 *	bio_copy_kern	-	copy kernel address into bio
465 *	@q: the struct request_queue for the bio
466 *	@data: pointer to buffer to copy
467 *	@len: length in bytes
468 *	@gfp_mask: allocation flags for bio and page allocation
469 *	@reading: data direction is READ
470 *
471 *	copy the kernel address into a bio suitable for io to a block
472 *	device. Returns an error pointer in case of error.
 
 
 
473 */
474static struct bio *bio_copy_kern(struct request_queue *q, void *data,
475		unsigned int len, gfp_t gfp_mask, int reading)
 
476{
477	unsigned long kaddr = (unsigned long)data;
478	unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
479	unsigned long start = kaddr >> PAGE_SHIFT;
480	struct bio *bio;
481	void *p = data;
482	int nr_pages = 0;
483
484	/*
485	 * Overflow, abort
486	 */
487	if (end < start)
488		return ERR_PTR(-EINVAL);
489
490	nr_pages = end - start;
491	bio = bio_kmalloc(nr_pages, gfp_mask);
492	if (!bio)
493		return ERR_PTR(-ENOMEM);
494	bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, 0);
495
496	while (len) {
497		struct page *page;
498		unsigned int bytes = PAGE_SIZE;
499
500		if (bytes > len)
501			bytes = len;
502
503		page = alloc_page(GFP_NOIO | __GFP_ZERO | gfp_mask);
504		if (!page)
505			goto cleanup;
506
507		if (!reading)
508			memcpy(page_address(page), p, bytes);
509
510		if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
511			break;
 
 
512
513		len -= bytes;
514		p += bytes;
515	}
 
 
 
 
516
517	if (reading) {
518		bio->bi_end_io = bio_copy_kern_endio_read;
519		bio->bi_private = data;
520	} else {
521		bio->bi_end_io = bio_copy_kern_endio;
522	}
523
524	return bio;
525
526cleanup:
527	bio_free_pages(bio);
528	bio_uninit(bio);
529	kfree(bio);
530	return ERR_PTR(-ENOMEM);
531}
532
533/*
534 * Append a bio to a passthrough request.  Only works if the bio can be merged
535 * into the request based on the driver constraints.
536 */
537int blk_rq_append_bio(struct request *rq, struct bio *bio)
538{
539	struct bvec_iter iter;
540	struct bio_vec bv;
541	unsigned int nr_segs = 0;
542
543	bio_for_each_bvec(bv, bio, iter)
544		nr_segs++;
545
546	if (!rq->bio) {
547		blk_rq_bio_prep(rq, bio, nr_segs);
548	} else {
549		if (!ll_back_merge_fn(rq, bio, nr_segs))
550			return -EINVAL;
551		rq->biotail->bi_next = bio;
552		rq->biotail = bio;
553		rq->__data_len += (bio)->bi_iter.bi_size;
554		bio_crypt_free_ctx(bio);
555	}
556
557	return 0;
558}
559EXPORT_SYMBOL(blk_rq_append_bio);
560
561/* Prepare bio for passthrough IO given ITER_BVEC iter */
562static int blk_rq_map_user_bvec(struct request *rq, const struct iov_iter *iter)
563{
564	const struct queue_limits *lim = &rq->q->limits;
565	unsigned int max_bytes = lim->max_hw_sectors << SECTOR_SHIFT;
566	unsigned int nsegs;
567	struct bio *bio;
568	int ret;
569
570	if (!iov_iter_count(iter) || iov_iter_count(iter) > max_bytes)
571		return -EINVAL;
572
573	/* reuse the bvecs from the iterator instead of allocating new ones */
574	bio = blk_rq_map_bio_alloc(rq, 0, GFP_KERNEL);
575	if (!bio)
576		return -ENOMEM;
577	bio_iov_bvec_set(bio, iter);
578
579	/* check that the data layout matches the hardware restrictions */
580	ret = bio_split_rw_at(bio, lim, &nsegs, max_bytes);
581	if (ret) {
582		/* if we would have to split the bio, copy instead */
583		if (ret > 0)
584			ret = -EREMOTEIO;
585		blk_mq_map_bio_put(bio);
586		return ret;
587	}
588
589	blk_rq_bio_prep(rq, bio, nsegs);
590	return 0;
 
 
 
 
591}
 
592
593/**
594 * blk_rq_map_user_iov - map user data to a request, for passthrough requests
595 * @q:		request queue where request should be inserted
596 * @rq:		request to map data to
597 * @map_data:   pointer to the rq_map_data holding pages (if necessary)
598 * @iter:	iovec iterator
 
 
599 * @gfp_mask:	memory allocation flags
600 *
601 * Description:
602 *    Data will be mapped directly for zero copy I/O, if possible. Otherwise
603 *    a kernel bounce buffer is used.
604 *
605 *    A matching blk_rq_unmap_user() must be issued at the end of I/O, while
606 *    still in process context.
 
 
 
 
 
 
607 */
608int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
609			struct rq_map_data *map_data,
610			const struct iov_iter *iter, gfp_t gfp_mask)
611{
612	bool copy = false, map_bvec = false;
613	unsigned long align = blk_lim_dma_alignment_and_pad(&q->limits);
614	struct bio *bio = NULL;
615	struct iov_iter i;
616	int ret = -EINVAL;
617
618	if (map_data)
619		copy = true;
620	else if (blk_queue_may_bounce(q))
621		copy = true;
622	else if (iov_iter_alignment(iter) & align)
623		copy = true;
624	else if (iov_iter_is_bvec(iter))
625		map_bvec = true;
626	else if (!user_backed_iter(iter))
627		copy = true;
628	else if (queue_virt_boundary(q))
629		copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
630
631	if (map_bvec) {
632		ret = blk_rq_map_user_bvec(rq, iter);
633		if (!ret)
634			return 0;
635		if (ret != -EREMOTEIO)
636			goto fail;
637		/* fall back to copying the data on limits mismatches */
638		copy = true;
639	}
640
641	i = *iter;
642	do {
643		if (copy)
644			ret = bio_copy_user_iov(rq, map_data, &i, gfp_mask);
645		else
646			ret = bio_map_user_iov(rq, &i, gfp_mask);
647		if (ret)
648			goto unmap_rq;
649		if (!bio)
650			bio = rq->bio;
651	} while (iov_iter_count(&i));
652
653	return 0;
 
654
655unmap_rq:
656	blk_rq_unmap_user(bio);
657fail:
658	rq->bio = NULL;
659	return ret;
660}
661EXPORT_SYMBOL(blk_rq_map_user_iov);
662
663int blk_rq_map_user(struct request_queue *q, struct request *rq,
664		    struct rq_map_data *map_data, void __user *ubuf,
665		    unsigned long len, gfp_t gfp_mask)
666{
667	struct iov_iter i;
668	int ret = import_ubuf(rq_data_dir(rq), ubuf, len, &i);
669
670	if (unlikely(ret < 0))
671		return ret;
672
673	return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
674}
675EXPORT_SYMBOL(blk_rq_map_user);
 
 
 
 
 
 
 
 
676
677int blk_rq_map_user_io(struct request *req, struct rq_map_data *map_data,
678		void __user *ubuf, unsigned long buf_len, gfp_t gfp_mask,
679		bool vec, int iov_count, bool check_iter_count, int rw)
680{
681	int ret = 0;
682
683	if (vec) {
684		struct iovec fast_iov[UIO_FASTIOV];
685		struct iovec *iov = fast_iov;
686		struct iov_iter iter;
687
688		ret = import_iovec(rw, ubuf, iov_count ? iov_count : buf_len,
689				UIO_FASTIOV, &iov, &iter);
690		if (ret < 0)
691			return ret;
692
693		if (iov_count) {
694			/* SG_IO howto says that the shorter of the two wins */
695			iov_iter_truncate(&iter, buf_len);
696			if (check_iter_count && !iov_iter_count(&iter)) {
697				kfree(iov);
698				return -EINVAL;
699			}
700		}
701
702		ret = blk_rq_map_user_iov(req->q, req, map_data, &iter,
703				gfp_mask);
704		kfree(iov);
705	} else if (buf_len) {
706		ret = blk_rq_map_user(req->q, req, map_data, ubuf, buf_len,
707				gfp_mask);
708	}
709	return ret;
710}
711EXPORT_SYMBOL(blk_rq_map_user_io);
712
713/**
714 * blk_rq_unmap_user - unmap a request with user data
715 * @bio:	       start of bio list
716 *
717 * Description:
718 *    Unmap a rq previously mapped by blk_rq_map_user(). The caller must
719 *    supply the original rq->bio from the blk_rq_map_user() return, since
720 *    the I/O completion may have changed rq->bio.
721 */
722int blk_rq_unmap_user(struct bio *bio)
723{
724	struct bio *next_bio;
725	int ret = 0, ret2;
726
727	while (bio) {
728		if (bio->bi_private) {
729			ret2 = bio_uncopy_user(bio);
730			if (ret2 && !ret)
731				ret = ret2;
732		} else {
733			bio_release_pages(bio, bio_data_dir(bio) == READ);
734		}
735
736		if (bio_integrity(bio))
737			bio_integrity_unmap_user(bio);
738
739		next_bio = bio;
740		bio = bio->bi_next;
741		blk_mq_map_bio_put(next_bio);
742	}
743
744	return ret;
745}
746EXPORT_SYMBOL(blk_rq_unmap_user);
747
748/**
749 * blk_rq_map_kern - map kernel data to a request, for passthrough requests
750 * @q:		request queue where request should be inserted
751 * @rq:		request to fill
752 * @kbuf:	the kernel buffer
753 * @len:	length of user data
754 * @gfp_mask:	memory allocation flags
755 *
756 * Description:
757 *    Data will be mapped directly if possible. Otherwise a bounce
758 *    buffer is used. Can be called multiple times to append multiple
759 *    buffers.
760 */
761int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
762		    unsigned int len, gfp_t gfp_mask)
763{
764	int reading = rq_data_dir(rq) == READ;
765	unsigned long addr = (unsigned long) kbuf;
 
766	struct bio *bio;
767	int ret;
768
769	if (len > (queue_max_hw_sectors(q) << 9))
770		return -EINVAL;
771	if (!len || !kbuf)
772		return -EINVAL;
773
774	if (!blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf) ||
775	    blk_queue_may_bounce(q))
776		bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
777	else
778		bio = bio_map_kern(q, kbuf, len, gfp_mask);
779
780	if (IS_ERR(bio))
781		return PTR_ERR(bio);
782
783	bio->bi_opf &= ~REQ_OP_MASK;
784	bio->bi_opf |= req_op(rq);
785
786	ret = blk_rq_append_bio(rq, bio);
 
 
 
787	if (unlikely(ret)) {
788		bio_uninit(bio);
789		kfree(bio);
 
790	}
791	return ret;
 
 
 
792}
793EXPORT_SYMBOL(blk_rq_map_kern);