Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/fs/ext4/readpage.c
4 *
5 * Copyright (C) 2002, Linus Torvalds.
6 * Copyright (C) 2015, Google, Inc.
7 *
8 * This was originally taken from fs/mpage.c
9 *
10 * The ext4_mpage_readpages() function here is intended to
11 * replace mpage_readahead() in the general case, not just for
12 * encrypted files. It has some limitations (see below), where it
13 * will fall back to read_block_full_page(), but these limitations
14 * should only be hit when page_size != block_size.
15 *
16 * This will allow us to attach a callback function to support ext4
17 * encryption.
18 *
19 * If anything unusual happens, such as:
20 *
21 * - encountering a page which has buffers
22 * - encountering a page which has a non-hole after a hole
23 * - encountering a page with non-contiguous blocks
24 *
25 * then this code just gives up and calls the buffer_head-based read function.
26 * It does handle a page which has holes at the end - that is a common case:
27 * the end-of-file on blocksize < PAGE_SIZE setups.
28 *
29 */
30
31#include <linux/kernel.h>
32#include <linux/export.h>
33#include <linux/mm.h>
34#include <linux/kdev_t.h>
35#include <linux/gfp.h>
36#include <linux/bio.h>
37#include <linux/fs.h>
38#include <linux/buffer_head.h>
39#include <linux/blkdev.h>
40#include <linux/highmem.h>
41#include <linux/prefetch.h>
42#include <linux/mpage.h>
43#include <linux/writeback.h>
44#include <linux/backing-dev.h>
45#include <linux/pagevec.h>
46
47#include "ext4.h"
48
49#define NUM_PREALLOC_POST_READ_CTXS 128
50
51static struct kmem_cache *bio_post_read_ctx_cache;
52static mempool_t *bio_post_read_ctx_pool;
53
54/* postprocessing steps for read bios */
55enum bio_post_read_step {
56 STEP_INITIAL = 0,
57 STEP_DECRYPT,
58 STEP_VERITY,
59 STEP_MAX,
60};
61
62struct bio_post_read_ctx {
63 struct bio *bio;
64 struct work_struct work;
65 unsigned int cur_step;
66 unsigned int enabled_steps;
67};
68
69static void __read_end_io(struct bio *bio)
70{
71 struct page *page;
72 struct bio_vec *bv;
73 struct bvec_iter_all iter_all;
74
75 bio_for_each_segment_all(bv, bio, iter_all) {
76 page = bv->bv_page;
77
78 if (bio->bi_status)
79 ClearPageUptodate(page);
80 else
81 SetPageUptodate(page);
82 unlock_page(page);
83 }
84 if (bio->bi_private)
85 mempool_free(bio->bi_private, bio_post_read_ctx_pool);
86 bio_put(bio);
87}
88
89static void bio_post_read_processing(struct bio_post_read_ctx *ctx);
90
91static void decrypt_work(struct work_struct *work)
92{
93 struct bio_post_read_ctx *ctx =
94 container_of(work, struct bio_post_read_ctx, work);
95 struct bio *bio = ctx->bio;
96
97 if (fscrypt_decrypt_bio(bio))
98 bio_post_read_processing(ctx);
99 else
100 __read_end_io(bio);
101}
102
103static void verity_work(struct work_struct *work)
104{
105 struct bio_post_read_ctx *ctx =
106 container_of(work, struct bio_post_read_ctx, work);
107 struct bio *bio = ctx->bio;
108
109 /*
110 * fsverity_verify_bio() may call readahead() again, and although verity
111 * will be disabled for that, decryption may still be needed, causing
112 * another bio_post_read_ctx to be allocated. So to guarantee that
113 * mempool_alloc() never deadlocks we must free the current ctx first.
114 * This is safe because verity is the last post-read step.
115 */
116 BUILD_BUG_ON(STEP_VERITY + 1 != STEP_MAX);
117 mempool_free(ctx, bio_post_read_ctx_pool);
118 bio->bi_private = NULL;
119
120 fsverity_verify_bio(bio);
121
122 __read_end_io(bio);
123}
124
125static void bio_post_read_processing(struct bio_post_read_ctx *ctx)
126{
127 /*
128 * We use different work queues for decryption and for verity because
129 * verity may require reading metadata pages that need decryption, and
130 * we shouldn't recurse to the same workqueue.
131 */
132 switch (++ctx->cur_step) {
133 case STEP_DECRYPT:
134 if (ctx->enabled_steps & (1 << STEP_DECRYPT)) {
135 INIT_WORK(&ctx->work, decrypt_work);
136 fscrypt_enqueue_decrypt_work(&ctx->work);
137 return;
138 }
139 ctx->cur_step++;
140 fallthrough;
141 case STEP_VERITY:
142 if (ctx->enabled_steps & (1 << STEP_VERITY)) {
143 INIT_WORK(&ctx->work, verity_work);
144 fsverity_enqueue_verify_work(&ctx->work);
145 return;
146 }
147 ctx->cur_step++;
148 fallthrough;
149 default:
150 __read_end_io(ctx->bio);
151 }
152}
153
154static bool bio_post_read_required(struct bio *bio)
155{
156 return bio->bi_private && !bio->bi_status;
157}
158
159/*
160 * I/O completion handler for multipage BIOs.
161 *
162 * The mpage code never puts partial pages into a BIO (except for end-of-file).
163 * If a page does not map to a contiguous run of blocks then it simply falls
164 * back to block_read_full_folio().
165 *
166 * Why is this? If a page's completion depends on a number of different BIOs
167 * which can complete in any order (or at the same time) then determining the
168 * status of that page is hard. See end_buffer_async_read() for the details.
169 * There is no point in duplicating all that complexity.
170 */
171static void mpage_end_io(struct bio *bio)
172{
173 if (bio_post_read_required(bio)) {
174 struct bio_post_read_ctx *ctx = bio->bi_private;
175
176 ctx->cur_step = STEP_INITIAL;
177 bio_post_read_processing(ctx);
178 return;
179 }
180 __read_end_io(bio);
181}
182
183static inline bool ext4_need_verity(const struct inode *inode, pgoff_t idx)
184{
185 return fsverity_active(inode) &&
186 idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
187}
188
189static void ext4_set_bio_post_read_ctx(struct bio *bio,
190 const struct inode *inode,
191 pgoff_t first_idx)
192{
193 unsigned int post_read_steps = 0;
194
195 if (fscrypt_inode_uses_fs_layer_crypto(inode))
196 post_read_steps |= 1 << STEP_DECRYPT;
197
198 if (ext4_need_verity(inode, first_idx))
199 post_read_steps |= 1 << STEP_VERITY;
200
201 if (post_read_steps) {
202 /* Due to the mempool, this never fails. */
203 struct bio_post_read_ctx *ctx =
204 mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
205
206 ctx->bio = bio;
207 ctx->enabled_steps = post_read_steps;
208 bio->bi_private = ctx;
209 }
210}
211
212static inline loff_t ext4_readpage_limit(struct inode *inode)
213{
214 if (IS_ENABLED(CONFIG_FS_VERITY) &&
215 (IS_VERITY(inode) || ext4_verity_in_progress(inode)))
216 return inode->i_sb->s_maxbytes;
217
218 return i_size_read(inode);
219}
220
221int ext4_mpage_readpages(struct inode *inode,
222 struct readahead_control *rac, struct page *page)
223{
224 struct bio *bio = NULL;
225 sector_t last_block_in_bio = 0;
226
227 const unsigned blkbits = inode->i_blkbits;
228 const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
229 const unsigned blocksize = 1 << blkbits;
230 sector_t next_block;
231 sector_t block_in_file;
232 sector_t last_block;
233 sector_t last_block_in_file;
234 sector_t blocks[MAX_BUF_PER_PAGE];
235 unsigned page_block;
236 struct block_device *bdev = inode->i_sb->s_bdev;
237 int length;
238 unsigned relative_block = 0;
239 struct ext4_map_blocks map;
240 unsigned int nr_pages = rac ? readahead_count(rac) : 1;
241
242 map.m_pblk = 0;
243 map.m_lblk = 0;
244 map.m_len = 0;
245 map.m_flags = 0;
246
247 for (; nr_pages; nr_pages--) {
248 int fully_mapped = 1;
249 unsigned first_hole = blocks_per_page;
250
251 if (rac) {
252 page = readahead_page(rac);
253 prefetchw(&page->flags);
254 }
255
256 if (page_has_buffers(page))
257 goto confused;
258
259 block_in_file = next_block =
260 (sector_t)page->index << (PAGE_SHIFT - blkbits);
261 last_block = block_in_file + nr_pages * blocks_per_page;
262 last_block_in_file = (ext4_readpage_limit(inode) +
263 blocksize - 1) >> blkbits;
264 if (last_block > last_block_in_file)
265 last_block = last_block_in_file;
266 page_block = 0;
267
268 /*
269 * Map blocks using the previous result first.
270 */
271 if ((map.m_flags & EXT4_MAP_MAPPED) &&
272 block_in_file > map.m_lblk &&
273 block_in_file < (map.m_lblk + map.m_len)) {
274 unsigned map_offset = block_in_file - map.m_lblk;
275 unsigned last = map.m_len - map_offset;
276
277 for (relative_block = 0; ; relative_block++) {
278 if (relative_block == last) {
279 /* needed? */
280 map.m_flags &= ~EXT4_MAP_MAPPED;
281 break;
282 }
283 if (page_block == blocks_per_page)
284 break;
285 blocks[page_block] = map.m_pblk + map_offset +
286 relative_block;
287 page_block++;
288 block_in_file++;
289 }
290 }
291
292 /*
293 * Then do more ext4_map_blocks() calls until we are
294 * done with this page.
295 */
296 while (page_block < blocks_per_page) {
297 if (block_in_file < last_block) {
298 map.m_lblk = block_in_file;
299 map.m_len = last_block - block_in_file;
300
301 if (ext4_map_blocks(NULL, inode, &map, 0) < 0) {
302 set_error_page:
303 SetPageError(page);
304 zero_user_segment(page, 0,
305 PAGE_SIZE);
306 unlock_page(page);
307 goto next_page;
308 }
309 }
310 if ((map.m_flags & EXT4_MAP_MAPPED) == 0) {
311 fully_mapped = 0;
312 if (first_hole == blocks_per_page)
313 first_hole = page_block;
314 page_block++;
315 block_in_file++;
316 continue;
317 }
318 if (first_hole != blocks_per_page)
319 goto confused; /* hole -> non-hole */
320
321 /* Contiguous blocks? */
322 if (page_block && blocks[page_block-1] != map.m_pblk-1)
323 goto confused;
324 for (relative_block = 0; ; relative_block++) {
325 if (relative_block == map.m_len) {
326 /* needed? */
327 map.m_flags &= ~EXT4_MAP_MAPPED;
328 break;
329 } else if (page_block == blocks_per_page)
330 break;
331 blocks[page_block] = map.m_pblk+relative_block;
332 page_block++;
333 block_in_file++;
334 }
335 }
336 if (first_hole != blocks_per_page) {
337 zero_user_segment(page, first_hole << blkbits,
338 PAGE_SIZE);
339 if (first_hole == 0) {
340 if (ext4_need_verity(inode, page->index) &&
341 !fsverity_verify_page(page))
342 goto set_error_page;
343 SetPageUptodate(page);
344 unlock_page(page);
345 goto next_page;
346 }
347 } else if (fully_mapped) {
348 SetPageMappedToDisk(page);
349 }
350
351 /*
352 * This page will go to BIO. Do we need to send this
353 * BIO off first?
354 */
355 if (bio && (last_block_in_bio != blocks[0] - 1 ||
356 !fscrypt_mergeable_bio(bio, inode, next_block))) {
357 submit_and_realloc:
358 submit_bio(bio);
359 bio = NULL;
360 }
361 if (bio == NULL) {
362 /*
363 * bio_alloc will _always_ be able to allocate a bio if
364 * __GFP_DIRECT_RECLAIM is set, see bio_alloc_bioset().
365 */
366 bio = bio_alloc(bdev, bio_max_segs(nr_pages),
367 REQ_OP_READ, GFP_KERNEL);
368 fscrypt_set_bio_crypt_ctx(bio, inode, next_block,
369 GFP_KERNEL);
370 ext4_set_bio_post_read_ctx(bio, inode, page->index);
371 bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
372 bio->bi_end_io = mpage_end_io;
373 if (rac)
374 bio->bi_opf |= REQ_RAHEAD;
375 }
376
377 length = first_hole << blkbits;
378 if (bio_add_page(bio, page, length, 0) < length)
379 goto submit_and_realloc;
380
381 if (((map.m_flags & EXT4_MAP_BOUNDARY) &&
382 (relative_block == map.m_len)) ||
383 (first_hole != blocks_per_page)) {
384 submit_bio(bio);
385 bio = NULL;
386 } else
387 last_block_in_bio = blocks[blocks_per_page - 1];
388 goto next_page;
389 confused:
390 if (bio) {
391 submit_bio(bio);
392 bio = NULL;
393 }
394 if (!PageUptodate(page))
395 block_read_full_folio(page_folio(page), ext4_get_block);
396 else
397 unlock_page(page);
398 next_page:
399 if (rac)
400 put_page(page);
401 }
402 if (bio)
403 submit_bio(bio);
404 return 0;
405}
406
407int __init ext4_init_post_read_processing(void)
408{
409 bio_post_read_ctx_cache = KMEM_CACHE(bio_post_read_ctx, SLAB_RECLAIM_ACCOUNT);
410
411 if (!bio_post_read_ctx_cache)
412 goto fail;
413 bio_post_read_ctx_pool =
414 mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS,
415 bio_post_read_ctx_cache);
416 if (!bio_post_read_ctx_pool)
417 goto fail_free_cache;
418 return 0;
419
420fail_free_cache:
421 kmem_cache_destroy(bio_post_read_ctx_cache);
422fail:
423 return -ENOMEM;
424}
425
426void ext4_exit_post_read_processing(void)
427{
428 mempool_destroy(bio_post_read_ctx_pool);
429 kmem_cache_destroy(bio_post_read_ctx_cache);
430}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/fs/ext4/readpage.c
4 *
5 * Copyright (C) 2002, Linus Torvalds.
6 * Copyright (C) 2015, Google, Inc.
7 *
8 * This was originally taken from fs/mpage.c
9 *
10 * The intent is the ext4_mpage_readpages() function here is intended
11 * to replace mpage_readpages() in the general case, not just for
12 * encrypted files. It has some limitations (see below), where it
13 * will fall back to read_block_full_page(), but these limitations
14 * should only be hit when page_size != block_size.
15 *
16 * This will allow us to attach a callback function to support ext4
17 * encryption.
18 *
19 * If anything unusual happens, such as:
20 *
21 * - encountering a page which has buffers
22 * - encountering a page which has a non-hole after a hole
23 * - encountering a page with non-contiguous blocks
24 *
25 * then this code just gives up and calls the buffer_head-based read function.
26 * It does handle a page which has holes at the end - that is a common case:
27 * the end-of-file on blocksize < PAGE_SIZE setups.
28 *
29 */
30
31#include <linux/kernel.h>
32#include <linux/export.h>
33#include <linux/mm.h>
34#include <linux/kdev_t.h>
35#include <linux/gfp.h>
36#include <linux/bio.h>
37#include <linux/fs.h>
38#include <linux/buffer_head.h>
39#include <linux/blkdev.h>
40#include <linux/highmem.h>
41#include <linux/prefetch.h>
42#include <linux/mpage.h>
43#include <linux/writeback.h>
44#include <linux/backing-dev.h>
45#include <linux/pagevec.h>
46#include <linux/cleancache.h>
47
48#include "ext4.h"
49
50static inline bool ext4_bio_encrypted(struct bio *bio)
51{
52#ifdef CONFIG_EXT4_FS_ENCRYPTION
53 return unlikely(bio->bi_private != NULL);
54#else
55 return false;
56#endif
57}
58
59/*
60 * I/O completion handler for multipage BIOs.
61 *
62 * The mpage code never puts partial pages into a BIO (except for end-of-file).
63 * If a page does not map to a contiguous run of blocks then it simply falls
64 * back to block_read_full_page().
65 *
66 * Why is this? If a page's completion depends on a number of different BIOs
67 * which can complete in any order (or at the same time) then determining the
68 * status of that page is hard. See end_buffer_async_read() for the details.
69 * There is no point in duplicating all that complexity.
70 */
71static void mpage_end_io(struct bio *bio)
72{
73 struct bio_vec *bv;
74 int i;
75
76 if (ext4_bio_encrypted(bio)) {
77 if (bio->bi_status) {
78 fscrypt_release_ctx(bio->bi_private);
79 } else {
80 fscrypt_decrypt_bio_pages(bio->bi_private, bio);
81 return;
82 }
83 }
84 bio_for_each_segment_all(bv, bio, i) {
85 struct page *page = bv->bv_page;
86
87 if (!bio->bi_status) {
88 SetPageUptodate(page);
89 } else {
90 ClearPageUptodate(page);
91 SetPageError(page);
92 }
93 unlock_page(page);
94 }
95
96 bio_put(bio);
97}
98
99int ext4_mpage_readpages(struct address_space *mapping,
100 struct list_head *pages, struct page *page,
101 unsigned nr_pages)
102{
103 struct bio *bio = NULL;
104 sector_t last_block_in_bio = 0;
105
106 struct inode *inode = mapping->host;
107 const unsigned blkbits = inode->i_blkbits;
108 const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
109 const unsigned blocksize = 1 << blkbits;
110 sector_t block_in_file;
111 sector_t last_block;
112 sector_t last_block_in_file;
113 sector_t blocks[MAX_BUF_PER_PAGE];
114 unsigned page_block;
115 struct block_device *bdev = inode->i_sb->s_bdev;
116 int length;
117 unsigned relative_block = 0;
118 struct ext4_map_blocks map;
119
120 map.m_pblk = 0;
121 map.m_lblk = 0;
122 map.m_len = 0;
123 map.m_flags = 0;
124
125 for (; nr_pages; nr_pages--) {
126 int fully_mapped = 1;
127 unsigned first_hole = blocks_per_page;
128
129 prefetchw(&page->flags);
130 if (pages) {
131 page = list_entry(pages->prev, struct page, lru);
132 list_del(&page->lru);
133 if (add_to_page_cache_lru(page, mapping, page->index,
134 readahead_gfp_mask(mapping)))
135 goto next_page;
136 }
137
138 if (page_has_buffers(page))
139 goto confused;
140
141 block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
142 last_block = block_in_file + nr_pages * blocks_per_page;
143 last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
144 if (last_block > last_block_in_file)
145 last_block = last_block_in_file;
146 page_block = 0;
147
148 /*
149 * Map blocks using the previous result first.
150 */
151 if ((map.m_flags & EXT4_MAP_MAPPED) &&
152 block_in_file > map.m_lblk &&
153 block_in_file < (map.m_lblk + map.m_len)) {
154 unsigned map_offset = block_in_file - map.m_lblk;
155 unsigned last = map.m_len - map_offset;
156
157 for (relative_block = 0; ; relative_block++) {
158 if (relative_block == last) {
159 /* needed? */
160 map.m_flags &= ~EXT4_MAP_MAPPED;
161 break;
162 }
163 if (page_block == blocks_per_page)
164 break;
165 blocks[page_block] = map.m_pblk + map_offset +
166 relative_block;
167 page_block++;
168 block_in_file++;
169 }
170 }
171
172 /*
173 * Then do more ext4_map_blocks() calls until we are
174 * done with this page.
175 */
176 while (page_block < blocks_per_page) {
177 if (block_in_file < last_block) {
178 map.m_lblk = block_in_file;
179 map.m_len = last_block - block_in_file;
180
181 if (ext4_map_blocks(NULL, inode, &map, 0) < 0) {
182 set_error_page:
183 SetPageError(page);
184 zero_user_segment(page, 0,
185 PAGE_SIZE);
186 unlock_page(page);
187 goto next_page;
188 }
189 }
190 if ((map.m_flags & EXT4_MAP_MAPPED) == 0) {
191 fully_mapped = 0;
192 if (first_hole == blocks_per_page)
193 first_hole = page_block;
194 page_block++;
195 block_in_file++;
196 continue;
197 }
198 if (first_hole != blocks_per_page)
199 goto confused; /* hole -> non-hole */
200
201 /* Contiguous blocks? */
202 if (page_block && blocks[page_block-1] != map.m_pblk-1)
203 goto confused;
204 for (relative_block = 0; ; relative_block++) {
205 if (relative_block == map.m_len) {
206 /* needed? */
207 map.m_flags &= ~EXT4_MAP_MAPPED;
208 break;
209 } else if (page_block == blocks_per_page)
210 break;
211 blocks[page_block] = map.m_pblk+relative_block;
212 page_block++;
213 block_in_file++;
214 }
215 }
216 if (first_hole != blocks_per_page) {
217 zero_user_segment(page, first_hole << blkbits,
218 PAGE_SIZE);
219 if (first_hole == 0) {
220 SetPageUptodate(page);
221 unlock_page(page);
222 goto next_page;
223 }
224 } else if (fully_mapped) {
225 SetPageMappedToDisk(page);
226 }
227 if (fully_mapped && blocks_per_page == 1 &&
228 !PageUptodate(page) && cleancache_get_page(page) == 0) {
229 SetPageUptodate(page);
230 goto confused;
231 }
232
233 /*
234 * This page will go to BIO. Do we need to send this
235 * BIO off first?
236 */
237 if (bio && (last_block_in_bio != blocks[0] - 1)) {
238 submit_and_realloc:
239 submit_bio(bio);
240 bio = NULL;
241 }
242 if (bio == NULL) {
243 struct fscrypt_ctx *ctx = NULL;
244
245 if (ext4_encrypted_inode(inode) &&
246 S_ISREG(inode->i_mode)) {
247 ctx = fscrypt_get_ctx(inode, GFP_NOFS);
248 if (IS_ERR(ctx))
249 goto set_error_page;
250 }
251 bio = bio_alloc(GFP_KERNEL,
252 min_t(int, nr_pages, BIO_MAX_PAGES));
253 if (!bio) {
254 if (ctx)
255 fscrypt_release_ctx(ctx);
256 goto set_error_page;
257 }
258 bio_set_dev(bio, bdev);
259 bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
260 bio->bi_end_io = mpage_end_io;
261 bio->bi_private = ctx;
262 bio_set_op_attrs(bio, REQ_OP_READ, 0);
263 }
264
265 length = first_hole << blkbits;
266 if (bio_add_page(bio, page, length, 0) < length)
267 goto submit_and_realloc;
268
269 if (((map.m_flags & EXT4_MAP_BOUNDARY) &&
270 (relative_block == map.m_len)) ||
271 (first_hole != blocks_per_page)) {
272 submit_bio(bio);
273 bio = NULL;
274 } else
275 last_block_in_bio = blocks[blocks_per_page - 1];
276 goto next_page;
277 confused:
278 if (bio) {
279 submit_bio(bio);
280 bio = NULL;
281 }
282 if (!PageUptodate(page))
283 block_read_full_page(page, ext4_get_block);
284 else
285 unlock_page(page);
286 next_page:
287 if (pages)
288 put_page(page);
289 }
290 BUG_ON(pages && !list_empty(pages));
291 if (bio)
292 submit_bio(bio);
293 return 0;
294}