Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Functions related to mapping data to requests
4 */
5#include <linux/kernel.h>
6#include <linux/sched/task_stack.h>
7#include <linux/module.h>
8#include <linux/bio.h>
9#include <linux/blkdev.h>
10#include <linux/uio.h>
11
12#include "blk.h"
13
14/*
15 * Append a bio to a passthrough request. Only works if the bio can be merged
16 * into the request based on the driver constraints.
17 */
18int blk_rq_append_bio(struct request *rq, struct bio **bio)
19{
20 struct bio *orig_bio = *bio;
21
22 blk_queue_bounce(rq->q, bio);
23
24 if (!rq->bio) {
25 blk_rq_bio_prep(rq->q, rq, *bio);
26 } else {
27 if (!ll_back_merge_fn(rq->q, rq, *bio)) {
28 if (orig_bio != *bio) {
29 bio_put(*bio);
30 *bio = orig_bio;
31 }
32 return -EINVAL;
33 }
34
35 rq->biotail->bi_next = *bio;
36 rq->biotail = *bio;
37 rq->__data_len += (*bio)->bi_iter.bi_size;
38 }
39
40 return 0;
41}
42EXPORT_SYMBOL(blk_rq_append_bio);
43
44static int __blk_rq_unmap_user(struct bio *bio)
45{
46 int ret = 0;
47
48 if (bio) {
49 if (bio_flagged(bio, BIO_USER_MAPPED))
50 bio_unmap_user(bio);
51 else
52 ret = bio_uncopy_user(bio);
53 }
54
55 return ret;
56}
57
58static int __blk_rq_map_user_iov(struct request *rq,
59 struct rq_map_data *map_data, struct iov_iter *iter,
60 gfp_t gfp_mask, bool copy)
61{
62 struct request_queue *q = rq->q;
63 struct bio *bio, *orig_bio;
64 int ret;
65
66 if (copy)
67 bio = bio_copy_user_iov(q, map_data, iter, gfp_mask);
68 else
69 bio = bio_map_user_iov(q, iter, gfp_mask);
70
71 if (IS_ERR(bio))
72 return PTR_ERR(bio);
73
74 bio->bi_opf &= ~REQ_OP_MASK;
75 bio->bi_opf |= req_op(rq);
76
77 orig_bio = bio;
78
79 /*
80 * We link the bounce buffer in and could have to traverse it
81 * later so we have to get a ref to prevent it from being freed
82 */
83 ret = blk_rq_append_bio(rq, &bio);
84 if (ret) {
85 __blk_rq_unmap_user(orig_bio);
86 return ret;
87 }
88 bio_get(bio);
89
90 return 0;
91}
92
93/**
94 * blk_rq_map_user_iov - map user data to a request, for passthrough requests
95 * @q: request queue where request should be inserted
96 * @rq: request to map data to
97 * @map_data: pointer to the rq_map_data holding pages (if necessary)
98 * @iter: iovec iterator
99 * @gfp_mask: memory allocation flags
100 *
101 * Description:
102 * Data will be mapped directly for zero copy I/O, if possible. Otherwise
103 * a kernel bounce buffer is used.
104 *
105 * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
106 * still in process context.
107 *
108 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
109 * before being submitted to the device, as pages mapped may be out of
110 * reach. It's the callers responsibility to make sure this happens. The
111 * original bio must be passed back in to blk_rq_unmap_user() for proper
112 * unmapping.
113 */
114int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
115 struct rq_map_data *map_data,
116 const struct iov_iter *iter, gfp_t gfp_mask)
117{
118 bool copy = false;
119 unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
120 struct bio *bio = NULL;
121 struct iov_iter i;
122 int ret = -EINVAL;
123
124 if (!iter_is_iovec(iter))
125 goto fail;
126
127 if (map_data)
128 copy = true;
129 else if (iov_iter_alignment(iter) & align)
130 copy = true;
131 else if (queue_virt_boundary(q))
132 copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
133
134 i = *iter;
135 do {
136 ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy);
137 if (ret)
138 goto unmap_rq;
139 if (!bio)
140 bio = rq->bio;
141 } while (iov_iter_count(&i));
142
143 if (!bio_flagged(bio, BIO_USER_MAPPED))
144 rq->rq_flags |= RQF_COPY_USER;
145 return 0;
146
147unmap_rq:
148 __blk_rq_unmap_user(bio);
149fail:
150 rq->bio = NULL;
151 return ret;
152}
153EXPORT_SYMBOL(blk_rq_map_user_iov);
154
155int blk_rq_map_user(struct request_queue *q, struct request *rq,
156 struct rq_map_data *map_data, void __user *ubuf,
157 unsigned long len, gfp_t gfp_mask)
158{
159 struct iovec iov;
160 struct iov_iter i;
161 int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i);
162
163 if (unlikely(ret < 0))
164 return ret;
165
166 return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
167}
168EXPORT_SYMBOL(blk_rq_map_user);
169
170/**
171 * blk_rq_unmap_user - unmap a request with user data
172 * @bio: start of bio list
173 *
174 * Description:
175 * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
176 * supply the original rq->bio from the blk_rq_map_user() return, since
177 * the I/O completion may have changed rq->bio.
178 */
179int blk_rq_unmap_user(struct bio *bio)
180{
181 struct bio *mapped_bio;
182 int ret = 0, ret2;
183
184 while (bio) {
185 mapped_bio = bio;
186 if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
187 mapped_bio = bio->bi_private;
188
189 ret2 = __blk_rq_unmap_user(mapped_bio);
190 if (ret2 && !ret)
191 ret = ret2;
192
193 mapped_bio = bio;
194 bio = bio->bi_next;
195 bio_put(mapped_bio);
196 }
197
198 return ret;
199}
200EXPORT_SYMBOL(blk_rq_unmap_user);
201
202/**
203 * blk_rq_map_kern - map kernel data to a request, for passthrough requests
204 * @q: request queue where request should be inserted
205 * @rq: request to fill
206 * @kbuf: the kernel buffer
207 * @len: length of user data
208 * @gfp_mask: memory allocation flags
209 *
210 * Description:
211 * Data will be mapped directly if possible. Otherwise a bounce
212 * buffer is used. Can be called multiple times to append multiple
213 * buffers.
214 */
215int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
216 unsigned int len, gfp_t gfp_mask)
217{
218 int reading = rq_data_dir(rq) == READ;
219 unsigned long addr = (unsigned long) kbuf;
220 int do_copy = 0;
221 struct bio *bio, *orig_bio;
222 int ret;
223
224 if (len > (queue_max_hw_sectors(q) << 9))
225 return -EINVAL;
226 if (!len || !kbuf)
227 return -EINVAL;
228
229 do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
230 if (do_copy)
231 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
232 else
233 bio = bio_map_kern(q, kbuf, len, gfp_mask);
234
235 if (IS_ERR(bio))
236 return PTR_ERR(bio);
237
238 bio->bi_opf &= ~REQ_OP_MASK;
239 bio->bi_opf |= req_op(rq);
240
241 if (do_copy)
242 rq->rq_flags |= RQF_COPY_USER;
243
244 orig_bio = bio;
245 ret = blk_rq_append_bio(rq, &bio);
246 if (unlikely(ret)) {
247 /* request is too big */
248 bio_put(orig_bio);
249 return ret;
250 }
251
252 return 0;
253}
254EXPORT_SYMBOL(blk_rq_map_kern);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Functions related to mapping data to requests
4 */
5#include <linux/kernel.h>
6#include <linux/sched/task_stack.h>
7#include <linux/module.h>
8#include <linux/bio.h>
9#include <linux/blkdev.h>
10#include <linux/uio.h>
11
12#include "blk.h"
13
14/*
15 * Append a bio to a passthrough request. Only works if the bio can be merged
16 * into the request based on the driver constraints.
17 */
18int blk_rq_append_bio(struct request *rq, struct bio **bio)
19{
20 struct bio *orig_bio = *bio;
21 struct bvec_iter iter;
22 struct bio_vec bv;
23 unsigned int nr_segs = 0;
24
25 blk_queue_bounce(rq->q, bio);
26
27 bio_for_each_bvec(bv, *bio, iter)
28 nr_segs++;
29
30 if (!rq->bio) {
31 blk_rq_bio_prep(rq, *bio, nr_segs);
32 } else {
33 if (!ll_back_merge_fn(rq, *bio, nr_segs)) {
34 if (orig_bio != *bio) {
35 bio_put(*bio);
36 *bio = orig_bio;
37 }
38 return -EINVAL;
39 }
40
41 rq->biotail->bi_next = *bio;
42 rq->biotail = *bio;
43 rq->__data_len += (*bio)->bi_iter.bi_size;
44 }
45
46 return 0;
47}
48EXPORT_SYMBOL(blk_rq_append_bio);
49
50static int __blk_rq_unmap_user(struct bio *bio)
51{
52 int ret = 0;
53
54 if (bio) {
55 if (bio_flagged(bio, BIO_USER_MAPPED))
56 bio_unmap_user(bio);
57 else
58 ret = bio_uncopy_user(bio);
59 }
60
61 return ret;
62}
63
64static int __blk_rq_map_user_iov(struct request *rq,
65 struct rq_map_data *map_data, struct iov_iter *iter,
66 gfp_t gfp_mask, bool copy)
67{
68 struct request_queue *q = rq->q;
69 struct bio *bio, *orig_bio;
70 int ret;
71
72 if (copy)
73 bio = bio_copy_user_iov(q, map_data, iter, gfp_mask);
74 else
75 bio = bio_map_user_iov(q, iter, gfp_mask);
76
77 if (IS_ERR(bio))
78 return PTR_ERR(bio);
79
80 bio->bi_opf &= ~REQ_OP_MASK;
81 bio->bi_opf |= req_op(rq);
82
83 orig_bio = bio;
84
85 /*
86 * We link the bounce buffer in and could have to traverse it
87 * later so we have to get a ref to prevent it from being freed
88 */
89 ret = blk_rq_append_bio(rq, &bio);
90 if (ret) {
91 __blk_rq_unmap_user(orig_bio);
92 return ret;
93 }
94 bio_get(bio);
95
96 return 0;
97}
98
99/**
100 * blk_rq_map_user_iov - map user data to a request, for passthrough requests
101 * @q: request queue where request should be inserted
102 * @rq: request to map data to
103 * @map_data: pointer to the rq_map_data holding pages (if necessary)
104 * @iter: iovec iterator
105 * @gfp_mask: memory allocation flags
106 *
107 * Description:
108 * Data will be mapped directly for zero copy I/O, if possible. Otherwise
109 * a kernel bounce buffer is used.
110 *
111 * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
112 * still in process context.
113 *
114 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
115 * before being submitted to the device, as pages mapped may be out of
116 * reach. It's the callers responsibility to make sure this happens. The
117 * original bio must be passed back in to blk_rq_unmap_user() for proper
118 * unmapping.
119 */
120int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
121 struct rq_map_data *map_data,
122 const struct iov_iter *iter, gfp_t gfp_mask)
123{
124 bool copy = false;
125 unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
126 struct bio *bio = NULL;
127 struct iov_iter i;
128 int ret = -EINVAL;
129
130 if (!iter_is_iovec(iter))
131 goto fail;
132
133 if (map_data)
134 copy = true;
135 else if (iov_iter_alignment(iter) & align)
136 copy = true;
137 else if (queue_virt_boundary(q))
138 copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
139
140 i = *iter;
141 do {
142 ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy);
143 if (ret)
144 goto unmap_rq;
145 if (!bio)
146 bio = rq->bio;
147 } while (iov_iter_count(&i));
148
149 if (!bio_flagged(bio, BIO_USER_MAPPED))
150 rq->rq_flags |= RQF_COPY_USER;
151 return 0;
152
153unmap_rq:
154 __blk_rq_unmap_user(bio);
155fail:
156 rq->bio = NULL;
157 return ret;
158}
159EXPORT_SYMBOL(blk_rq_map_user_iov);
160
161int blk_rq_map_user(struct request_queue *q, struct request *rq,
162 struct rq_map_data *map_data, void __user *ubuf,
163 unsigned long len, gfp_t gfp_mask)
164{
165 struct iovec iov;
166 struct iov_iter i;
167 int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i);
168
169 if (unlikely(ret < 0))
170 return ret;
171
172 return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
173}
174EXPORT_SYMBOL(blk_rq_map_user);
175
176/**
177 * blk_rq_unmap_user - unmap a request with user data
178 * @bio: start of bio list
179 *
180 * Description:
181 * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
182 * supply the original rq->bio from the blk_rq_map_user() return, since
183 * the I/O completion may have changed rq->bio.
184 */
185int blk_rq_unmap_user(struct bio *bio)
186{
187 struct bio *mapped_bio;
188 int ret = 0, ret2;
189
190 while (bio) {
191 mapped_bio = bio;
192 if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
193 mapped_bio = bio->bi_private;
194
195 ret2 = __blk_rq_unmap_user(mapped_bio);
196 if (ret2 && !ret)
197 ret = ret2;
198
199 mapped_bio = bio;
200 bio = bio->bi_next;
201 bio_put(mapped_bio);
202 }
203
204 return ret;
205}
206EXPORT_SYMBOL(blk_rq_unmap_user);
207
208/**
209 * blk_rq_map_kern - map kernel data to a request, for passthrough requests
210 * @q: request queue where request should be inserted
211 * @rq: request to fill
212 * @kbuf: the kernel buffer
213 * @len: length of user data
214 * @gfp_mask: memory allocation flags
215 *
216 * Description:
217 * Data will be mapped directly if possible. Otherwise a bounce
218 * buffer is used. Can be called multiple times to append multiple
219 * buffers.
220 */
221int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
222 unsigned int len, gfp_t gfp_mask)
223{
224 int reading = rq_data_dir(rq) == READ;
225 unsigned long addr = (unsigned long) kbuf;
226 int do_copy = 0;
227 struct bio *bio, *orig_bio;
228 int ret;
229
230 if (len > (queue_max_hw_sectors(q) << 9))
231 return -EINVAL;
232 if (!len || !kbuf)
233 return -EINVAL;
234
235 do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
236 if (do_copy)
237 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
238 else
239 bio = bio_map_kern(q, kbuf, len, gfp_mask);
240
241 if (IS_ERR(bio))
242 return PTR_ERR(bio);
243
244 bio->bi_opf &= ~REQ_OP_MASK;
245 bio->bi_opf |= req_op(rq);
246
247 if (do_copy)
248 rq->rq_flags |= RQF_COPY_USER;
249
250 orig_bio = bio;
251 ret = blk_rq_append_bio(rq, &bio);
252 if (unlikely(ret)) {
253 /* request is too big */
254 bio_put(orig_bio);
255 return ret;
256 }
257
258 return 0;
259}
260EXPORT_SYMBOL(blk_rq_map_kern);