Loading...
1/*
2 * linux/fs/ext4/readpage.c
3 *
4 * Copyright (C) 2002, Linus Torvalds.
5 * Copyright (C) 2015, Google, Inc.
6 *
7 * This was originally taken from fs/mpage.c
8 *
9 * The intent is the ext4_mpage_readpages() function here is intended
10 * to replace mpage_readpages() in the general case, not just for
11 * encrypted files. It has some limitations (see below), where it
12 * will fall back to read_block_full_page(), but these limitations
13 * should only be hit when page_size != block_size.
14 *
15 * This will allow us to attach a callback function to support ext4
16 * encryption.
17 *
18 * If anything unusual happens, such as:
19 *
20 * - encountering a page which has buffers
21 * - encountering a page which has a non-hole after a hole
22 * - encountering a page with non-contiguous blocks
23 *
24 * then this code just gives up and calls the buffer_head-based read function.
25 * It does handle a page which has holes at the end - that is a common case:
26 * the end-of-file on blocksize < PAGE_SIZE setups.
27 *
28 */
29
30#include <linux/kernel.h>
31#include <linux/export.h>
32#include <linux/mm.h>
33#include <linux/kdev_t.h>
34#include <linux/gfp.h>
35#include <linux/bio.h>
36#include <linux/fs.h>
37#include <linux/buffer_head.h>
38#include <linux/blkdev.h>
39#include <linux/highmem.h>
40#include <linux/prefetch.h>
41#include <linux/mpage.h>
42#include <linux/writeback.h>
43#include <linux/backing-dev.h>
44#include <linux/pagevec.h>
45#include <linux/cleancache.h>
46
47#include "ext4.h"
48
49/*
50 * Call ext4_decrypt on every single page, reusing the encryption
51 * context.
52 */
53static void completion_pages(struct work_struct *work)
54{
55#ifdef CONFIG_EXT4_FS_ENCRYPTION
56 struct ext4_crypto_ctx *ctx =
57 container_of(work, struct ext4_crypto_ctx, r.work);
58 struct bio *bio = ctx->r.bio;
59 struct bio_vec *bv;
60 int i;
61
62 bio_for_each_segment_all(bv, bio, i) {
63 struct page *page = bv->bv_page;
64
65 int ret = ext4_decrypt(page);
66 if (ret) {
67 WARN_ON_ONCE(1);
68 SetPageError(page);
69 } else
70 SetPageUptodate(page);
71 unlock_page(page);
72 }
73 ext4_release_crypto_ctx(ctx);
74 bio_put(bio);
75#else
76 BUG();
77#endif
78}
79
80static inline bool ext4_bio_encrypted(struct bio *bio)
81{
82#ifdef CONFIG_EXT4_FS_ENCRYPTION
83 return unlikely(bio->bi_private != NULL);
84#else
85 return false;
86#endif
87}
88
89/*
90 * I/O completion handler for multipage BIOs.
91 *
92 * The mpage code never puts partial pages into a BIO (except for end-of-file).
93 * If a page does not map to a contiguous run of blocks then it simply falls
94 * back to block_read_full_page().
95 *
96 * Why is this? If a page's completion depends on a number of different BIOs
97 * which can complete in any order (or at the same time) then determining the
98 * status of that page is hard. See end_buffer_async_read() for the details.
99 * There is no point in duplicating all that complexity.
100 */
101static void mpage_end_io(struct bio *bio)
102{
103 struct bio_vec *bv;
104 int i;
105
106 if (ext4_bio_encrypted(bio)) {
107 struct ext4_crypto_ctx *ctx = bio->bi_private;
108
109 if (bio->bi_error) {
110 ext4_release_crypto_ctx(ctx);
111 } else {
112 INIT_WORK(&ctx->r.work, completion_pages);
113 ctx->r.bio = bio;
114 queue_work(ext4_read_workqueue, &ctx->r.work);
115 return;
116 }
117 }
118 bio_for_each_segment_all(bv, bio, i) {
119 struct page *page = bv->bv_page;
120
121 if (!bio->bi_error) {
122 SetPageUptodate(page);
123 } else {
124 ClearPageUptodate(page);
125 SetPageError(page);
126 }
127 unlock_page(page);
128 }
129
130 bio_put(bio);
131}
132
133int ext4_mpage_readpages(struct address_space *mapping,
134 struct list_head *pages, struct page *page,
135 unsigned nr_pages)
136{
137 struct bio *bio = NULL;
138 unsigned page_idx;
139 sector_t last_block_in_bio = 0;
140
141 struct inode *inode = mapping->host;
142 const unsigned blkbits = inode->i_blkbits;
143 const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
144 const unsigned blocksize = 1 << blkbits;
145 sector_t block_in_file;
146 sector_t last_block;
147 sector_t last_block_in_file;
148 sector_t blocks[MAX_BUF_PER_PAGE];
149 unsigned page_block;
150 struct block_device *bdev = inode->i_sb->s_bdev;
151 int length;
152 unsigned relative_block = 0;
153 struct ext4_map_blocks map;
154
155 map.m_pblk = 0;
156 map.m_lblk = 0;
157 map.m_len = 0;
158 map.m_flags = 0;
159
160 for (page_idx = 0; nr_pages; page_idx++, nr_pages--) {
161 int fully_mapped = 1;
162 unsigned first_hole = blocks_per_page;
163
164 prefetchw(&page->flags);
165 if (pages) {
166 page = list_entry(pages->prev, struct page, lru);
167 list_del(&page->lru);
168 if (add_to_page_cache_lru(page, mapping, page->index,
169 mapping_gfp_constraint(mapping, GFP_KERNEL)))
170 goto next_page;
171 }
172
173 if (page_has_buffers(page))
174 goto confused;
175
176 block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
177 last_block = block_in_file + nr_pages * blocks_per_page;
178 last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
179 if (last_block > last_block_in_file)
180 last_block = last_block_in_file;
181 page_block = 0;
182
183 /*
184 * Map blocks using the previous result first.
185 */
186 if ((map.m_flags & EXT4_MAP_MAPPED) &&
187 block_in_file > map.m_lblk &&
188 block_in_file < (map.m_lblk + map.m_len)) {
189 unsigned map_offset = block_in_file - map.m_lblk;
190 unsigned last = map.m_len - map_offset;
191
192 for (relative_block = 0; ; relative_block++) {
193 if (relative_block == last) {
194 /* needed? */
195 map.m_flags &= ~EXT4_MAP_MAPPED;
196 break;
197 }
198 if (page_block == blocks_per_page)
199 break;
200 blocks[page_block] = map.m_pblk + map_offset +
201 relative_block;
202 page_block++;
203 block_in_file++;
204 }
205 }
206
207 /*
208 * Then do more ext4_map_blocks() calls until we are
209 * done with this page.
210 */
211 while (page_block < blocks_per_page) {
212 if (block_in_file < last_block) {
213 map.m_lblk = block_in_file;
214 map.m_len = last_block - block_in_file;
215
216 if (ext4_map_blocks(NULL, inode, &map, 0) < 0) {
217 set_error_page:
218 SetPageError(page);
219 zero_user_segment(page, 0,
220 PAGE_SIZE);
221 unlock_page(page);
222 goto next_page;
223 }
224 }
225 if ((map.m_flags & EXT4_MAP_MAPPED) == 0) {
226 fully_mapped = 0;
227 if (first_hole == blocks_per_page)
228 first_hole = page_block;
229 page_block++;
230 block_in_file++;
231 continue;
232 }
233 if (first_hole != blocks_per_page)
234 goto confused; /* hole -> non-hole */
235
236 /* Contiguous blocks? */
237 if (page_block && blocks[page_block-1] != map.m_pblk-1)
238 goto confused;
239 for (relative_block = 0; ; relative_block++) {
240 if (relative_block == map.m_len) {
241 /* needed? */
242 map.m_flags &= ~EXT4_MAP_MAPPED;
243 break;
244 } else if (page_block == blocks_per_page)
245 break;
246 blocks[page_block] = map.m_pblk+relative_block;
247 page_block++;
248 block_in_file++;
249 }
250 }
251 if (first_hole != blocks_per_page) {
252 zero_user_segment(page, first_hole << blkbits,
253 PAGE_SIZE);
254 if (first_hole == 0) {
255 SetPageUptodate(page);
256 unlock_page(page);
257 goto next_page;
258 }
259 } else if (fully_mapped) {
260 SetPageMappedToDisk(page);
261 }
262 if (fully_mapped && blocks_per_page == 1 &&
263 !PageUptodate(page) && cleancache_get_page(page) == 0) {
264 SetPageUptodate(page);
265 goto confused;
266 }
267
268 /*
269 * This page will go to BIO. Do we need to send this
270 * BIO off first?
271 */
272 if (bio && (last_block_in_bio != blocks[0] - 1)) {
273 submit_and_realloc:
274 submit_bio(READ, bio);
275 bio = NULL;
276 }
277 if (bio == NULL) {
278 struct ext4_crypto_ctx *ctx = NULL;
279
280 if (ext4_encrypted_inode(inode) &&
281 S_ISREG(inode->i_mode)) {
282 ctx = ext4_get_crypto_ctx(inode, GFP_NOFS);
283 if (IS_ERR(ctx))
284 goto set_error_page;
285 }
286 bio = bio_alloc(GFP_KERNEL,
287 min_t(int, nr_pages, BIO_MAX_PAGES));
288 if (!bio) {
289 if (ctx)
290 ext4_release_crypto_ctx(ctx);
291 goto set_error_page;
292 }
293 bio->bi_bdev = bdev;
294 bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
295 bio->bi_end_io = mpage_end_io;
296 bio->bi_private = ctx;
297 }
298
299 length = first_hole << blkbits;
300 if (bio_add_page(bio, page, length, 0) < length)
301 goto submit_and_realloc;
302
303 if (((map.m_flags & EXT4_MAP_BOUNDARY) &&
304 (relative_block == map.m_len)) ||
305 (first_hole != blocks_per_page)) {
306 submit_bio(READ, bio);
307 bio = NULL;
308 } else
309 last_block_in_bio = blocks[blocks_per_page - 1];
310 goto next_page;
311 confused:
312 if (bio) {
313 submit_bio(READ, bio);
314 bio = NULL;
315 }
316 if (!PageUptodate(page))
317 block_read_full_page(page, ext4_get_block);
318 else
319 unlock_page(page);
320 next_page:
321 if (pages)
322 put_page(page);
323 }
324 BUG_ON(pages && !list_empty(pages));
325 if (bio)
326 submit_bio(READ, bio);
327 return 0;
328}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/fs/ext4/readpage.c
4 *
5 * Copyright (C) 2002, Linus Torvalds.
6 * Copyright (C) 2015, Google, Inc.
7 *
8 * This was originally taken from fs/mpage.c
9 *
10 * The ext4_mpage_readpages() function here is intended to
11 * replace mpage_readahead() in the general case, not just for
12 * encrypted files. It has some limitations (see below), where it
13 * will fall back to read_block_full_page(), but these limitations
14 * should only be hit when page_size != block_size.
15 *
16 * This will allow us to attach a callback function to support ext4
17 * encryption.
18 *
19 * If anything unusual happens, such as:
20 *
21 * - encountering a page which has buffers
22 * - encountering a page which has a non-hole after a hole
23 * - encountering a page with non-contiguous blocks
24 *
25 * then this code just gives up and calls the buffer_head-based read function.
26 * It does handle a page which has holes at the end - that is a common case:
27 * the end-of-file on blocksize < PAGE_SIZE setups.
28 *
29 */
30
31#include <linux/kernel.h>
32#include <linux/export.h>
33#include <linux/mm.h>
34#include <linux/kdev_t.h>
35#include <linux/gfp.h>
36#include <linux/bio.h>
37#include <linux/fs.h>
38#include <linux/buffer_head.h>
39#include <linux/blkdev.h>
40#include <linux/highmem.h>
41#include <linux/prefetch.h>
42#include <linux/mpage.h>
43#include <linux/writeback.h>
44#include <linux/backing-dev.h>
45#include <linux/pagevec.h>
46
47#include "ext4.h"
48
49#define NUM_PREALLOC_POST_READ_CTXS 128
50
51static struct kmem_cache *bio_post_read_ctx_cache;
52static mempool_t *bio_post_read_ctx_pool;
53
54/* postprocessing steps for read bios */
55enum bio_post_read_step {
56 STEP_INITIAL = 0,
57 STEP_DECRYPT,
58 STEP_VERITY,
59 STEP_MAX,
60};
61
62struct bio_post_read_ctx {
63 struct bio *bio;
64 struct work_struct work;
65 unsigned int cur_step;
66 unsigned int enabled_steps;
67};
68
69static void __read_end_io(struct bio *bio)
70{
71 struct folio_iter fi;
72
73 bio_for_each_folio_all(fi, bio)
74 folio_end_read(fi.folio, bio->bi_status == 0);
75 if (bio->bi_private)
76 mempool_free(bio->bi_private, bio_post_read_ctx_pool);
77 bio_put(bio);
78}
79
80static void bio_post_read_processing(struct bio_post_read_ctx *ctx);
81
82static void decrypt_work(struct work_struct *work)
83{
84 struct bio_post_read_ctx *ctx =
85 container_of(work, struct bio_post_read_ctx, work);
86 struct bio *bio = ctx->bio;
87
88 if (fscrypt_decrypt_bio(bio))
89 bio_post_read_processing(ctx);
90 else
91 __read_end_io(bio);
92}
93
94static void verity_work(struct work_struct *work)
95{
96 struct bio_post_read_ctx *ctx =
97 container_of(work, struct bio_post_read_ctx, work);
98 struct bio *bio = ctx->bio;
99
100 /*
101 * fsverity_verify_bio() may call readahead() again, and although verity
102 * will be disabled for that, decryption may still be needed, causing
103 * another bio_post_read_ctx to be allocated. So to guarantee that
104 * mempool_alloc() never deadlocks we must free the current ctx first.
105 * This is safe because verity is the last post-read step.
106 */
107 BUILD_BUG_ON(STEP_VERITY + 1 != STEP_MAX);
108 mempool_free(ctx, bio_post_read_ctx_pool);
109 bio->bi_private = NULL;
110
111 fsverity_verify_bio(bio);
112
113 __read_end_io(bio);
114}
115
116static void bio_post_read_processing(struct bio_post_read_ctx *ctx)
117{
118 /*
119 * We use different work queues for decryption and for verity because
120 * verity may require reading metadata pages that need decryption, and
121 * we shouldn't recurse to the same workqueue.
122 */
123 switch (++ctx->cur_step) {
124 case STEP_DECRYPT:
125 if (ctx->enabled_steps & (1 << STEP_DECRYPT)) {
126 INIT_WORK(&ctx->work, decrypt_work);
127 fscrypt_enqueue_decrypt_work(&ctx->work);
128 return;
129 }
130 ctx->cur_step++;
131 fallthrough;
132 case STEP_VERITY:
133 if (ctx->enabled_steps & (1 << STEP_VERITY)) {
134 INIT_WORK(&ctx->work, verity_work);
135 fsverity_enqueue_verify_work(&ctx->work);
136 return;
137 }
138 ctx->cur_step++;
139 fallthrough;
140 default:
141 __read_end_io(ctx->bio);
142 }
143}
144
145static bool bio_post_read_required(struct bio *bio)
146{
147 return bio->bi_private && !bio->bi_status;
148}
149
150/*
151 * I/O completion handler for multipage BIOs.
152 *
153 * The mpage code never puts partial pages into a BIO (except for end-of-file).
154 * If a page does not map to a contiguous run of blocks then it simply falls
155 * back to block_read_full_folio().
156 *
157 * Why is this? If a page's completion depends on a number of different BIOs
158 * which can complete in any order (or at the same time) then determining the
159 * status of that page is hard. See end_buffer_async_read() for the details.
160 * There is no point in duplicating all that complexity.
161 */
162static void mpage_end_io(struct bio *bio)
163{
164 if (bio_post_read_required(bio)) {
165 struct bio_post_read_ctx *ctx = bio->bi_private;
166
167 ctx->cur_step = STEP_INITIAL;
168 bio_post_read_processing(ctx);
169 return;
170 }
171 __read_end_io(bio);
172}
173
174static inline bool ext4_need_verity(const struct inode *inode, pgoff_t idx)
175{
176 return fsverity_active(inode) &&
177 idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
178}
179
180static void ext4_set_bio_post_read_ctx(struct bio *bio,
181 const struct inode *inode,
182 pgoff_t first_idx)
183{
184 unsigned int post_read_steps = 0;
185
186 if (fscrypt_inode_uses_fs_layer_crypto(inode))
187 post_read_steps |= 1 << STEP_DECRYPT;
188
189 if (ext4_need_verity(inode, first_idx))
190 post_read_steps |= 1 << STEP_VERITY;
191
192 if (post_read_steps) {
193 /* Due to the mempool, this never fails. */
194 struct bio_post_read_ctx *ctx =
195 mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
196
197 ctx->bio = bio;
198 ctx->enabled_steps = post_read_steps;
199 bio->bi_private = ctx;
200 }
201}
202
203static inline loff_t ext4_readpage_limit(struct inode *inode)
204{
205 if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode))
206 return inode->i_sb->s_maxbytes;
207
208 return i_size_read(inode);
209}
210
211int ext4_mpage_readpages(struct inode *inode,
212 struct readahead_control *rac, struct folio *folio)
213{
214 struct bio *bio = NULL;
215 sector_t last_block_in_bio = 0;
216
217 const unsigned blkbits = inode->i_blkbits;
218 const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
219 const unsigned blocksize = 1 << blkbits;
220 sector_t next_block;
221 sector_t block_in_file;
222 sector_t last_block;
223 sector_t last_block_in_file;
224 sector_t first_block;
225 unsigned page_block;
226 struct block_device *bdev = inode->i_sb->s_bdev;
227 int length;
228 unsigned relative_block = 0;
229 struct ext4_map_blocks map;
230 unsigned int nr_pages = rac ? readahead_count(rac) : 1;
231
232 map.m_pblk = 0;
233 map.m_lblk = 0;
234 map.m_len = 0;
235 map.m_flags = 0;
236
237 for (; nr_pages; nr_pages--) {
238 int fully_mapped = 1;
239 unsigned first_hole = blocks_per_page;
240
241 if (rac)
242 folio = readahead_folio(rac);
243 prefetchw(&folio->flags);
244
245 if (folio_buffers(folio))
246 goto confused;
247
248 block_in_file = next_block =
249 (sector_t)folio->index << (PAGE_SHIFT - blkbits);
250 last_block = block_in_file + nr_pages * blocks_per_page;
251 last_block_in_file = (ext4_readpage_limit(inode) +
252 blocksize - 1) >> blkbits;
253 if (last_block > last_block_in_file)
254 last_block = last_block_in_file;
255 page_block = 0;
256
257 /*
258 * Map blocks using the previous result first.
259 */
260 if ((map.m_flags & EXT4_MAP_MAPPED) &&
261 block_in_file > map.m_lblk &&
262 block_in_file < (map.m_lblk + map.m_len)) {
263 unsigned map_offset = block_in_file - map.m_lblk;
264 unsigned last = map.m_len - map_offset;
265
266 first_block = map.m_pblk + map_offset;
267 for (relative_block = 0; ; relative_block++) {
268 if (relative_block == last) {
269 /* needed? */
270 map.m_flags &= ~EXT4_MAP_MAPPED;
271 break;
272 }
273 if (page_block == blocks_per_page)
274 break;
275 page_block++;
276 block_in_file++;
277 }
278 }
279
280 /*
281 * Then do more ext4_map_blocks() calls until we are
282 * done with this folio.
283 */
284 while (page_block < blocks_per_page) {
285 if (block_in_file < last_block) {
286 map.m_lblk = block_in_file;
287 map.m_len = last_block - block_in_file;
288
289 if (ext4_map_blocks(NULL, inode, &map, 0) < 0) {
290 set_error_page:
291 folio_zero_segment(folio, 0,
292 folio_size(folio));
293 folio_unlock(folio);
294 goto next_page;
295 }
296 }
297 if ((map.m_flags & EXT4_MAP_MAPPED) == 0) {
298 fully_mapped = 0;
299 if (first_hole == blocks_per_page)
300 first_hole = page_block;
301 page_block++;
302 block_in_file++;
303 continue;
304 }
305 if (first_hole != blocks_per_page)
306 goto confused; /* hole -> non-hole */
307
308 /* Contiguous blocks? */
309 if (!page_block)
310 first_block = map.m_pblk;
311 else if (first_block + page_block != map.m_pblk)
312 goto confused;
313 for (relative_block = 0; ; relative_block++) {
314 if (relative_block == map.m_len) {
315 /* needed? */
316 map.m_flags &= ~EXT4_MAP_MAPPED;
317 break;
318 } else if (page_block == blocks_per_page)
319 break;
320 page_block++;
321 block_in_file++;
322 }
323 }
324 if (first_hole != blocks_per_page) {
325 folio_zero_segment(folio, first_hole << blkbits,
326 folio_size(folio));
327 if (first_hole == 0) {
328 if (ext4_need_verity(inode, folio->index) &&
329 !fsverity_verify_folio(folio))
330 goto set_error_page;
331 folio_end_read(folio, true);
332 continue;
333 }
334 } else if (fully_mapped) {
335 folio_set_mappedtodisk(folio);
336 }
337
338 /*
339 * This folio will go to BIO. Do we need to send this
340 * BIO off first?
341 */
342 if (bio && (last_block_in_bio != first_block - 1 ||
343 !fscrypt_mergeable_bio(bio, inode, next_block))) {
344 submit_and_realloc:
345 submit_bio(bio);
346 bio = NULL;
347 }
348 if (bio == NULL) {
349 /*
350 * bio_alloc will _always_ be able to allocate a bio if
351 * __GFP_DIRECT_RECLAIM is set, see bio_alloc_bioset().
352 */
353 bio = bio_alloc(bdev, bio_max_segs(nr_pages),
354 REQ_OP_READ, GFP_KERNEL);
355 fscrypt_set_bio_crypt_ctx(bio, inode, next_block,
356 GFP_KERNEL);
357 ext4_set_bio_post_read_ctx(bio, inode, folio->index);
358 bio->bi_iter.bi_sector = first_block << (blkbits - 9);
359 bio->bi_end_io = mpage_end_io;
360 if (rac)
361 bio->bi_opf |= REQ_RAHEAD;
362 }
363
364 length = first_hole << blkbits;
365 if (!bio_add_folio(bio, folio, length, 0))
366 goto submit_and_realloc;
367
368 if (((map.m_flags & EXT4_MAP_BOUNDARY) &&
369 (relative_block == map.m_len)) ||
370 (first_hole != blocks_per_page)) {
371 submit_bio(bio);
372 bio = NULL;
373 } else
374 last_block_in_bio = first_block + blocks_per_page - 1;
375 continue;
376 confused:
377 if (bio) {
378 submit_bio(bio);
379 bio = NULL;
380 }
381 if (!folio_test_uptodate(folio))
382 block_read_full_folio(folio, ext4_get_block);
383 else
384 folio_unlock(folio);
385next_page:
386 ; /* A label shall be followed by a statement until C23 */
387 }
388 if (bio)
389 submit_bio(bio);
390 return 0;
391}
392
393int __init ext4_init_post_read_processing(void)
394{
395 bio_post_read_ctx_cache = KMEM_CACHE(bio_post_read_ctx, SLAB_RECLAIM_ACCOUNT);
396
397 if (!bio_post_read_ctx_cache)
398 goto fail;
399 bio_post_read_ctx_pool =
400 mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS,
401 bio_post_read_ctx_cache);
402 if (!bio_post_read_ctx_pool)
403 goto fail_free_cache;
404 return 0;
405
406fail_free_cache:
407 kmem_cache_destroy(bio_post_read_ctx_cache);
408fail:
409 return -ENOMEM;
410}
411
412void ext4_exit_post_read_processing(void)
413{
414 mempool_destroy(bio_post_read_ctx_pool);
415 kmem_cache_destroy(bio_post_read_ctx_cache);
416}