Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * linux/fs/ext4/readpage.c
  4 *
  5 * Copyright (C) 2002, Linus Torvalds.
  6 * Copyright (C) 2015, Google, Inc.
  7 *
  8 * This was originally taken from fs/mpage.c
  9 *
 10 * The intent is the ext4_mpage_readpages() function here is intended
 11 * to replace mpage_readpages() in the general case, not just for
 12 * encrypted files.  It has some limitations (see below), where it
 13 * will fall back to read_block_full_page(), but these limitations
 14 * should only be hit when page_size != block_size.
 15 *
 16 * This will allow us to attach a callback function to support ext4
 17 * encryption.
 18 *
 19 * If anything unusual happens, such as:
 20 *
 21 * - encountering a page which has buffers
 22 * - encountering a page which has a non-hole after a hole
 23 * - encountering a page with non-contiguous blocks
 24 *
 25 * then this code just gives up and calls the buffer_head-based read function.
 26 * It does handle a page which has holes at the end - that is a common case:
 27 * the end-of-file on blocksize < PAGE_SIZE setups.
 28 *
 29 */
 30
 31#include <linux/kernel.h>
 32#include <linux/export.h>
 33#include <linux/mm.h>
 34#include <linux/kdev_t.h>
 35#include <linux/gfp.h>
 36#include <linux/bio.h>
 37#include <linux/fs.h>
 38#include <linux/buffer_head.h>
 39#include <linux/blkdev.h>
 40#include <linux/highmem.h>
 41#include <linux/prefetch.h>
 42#include <linux/mpage.h>
 43#include <linux/writeback.h>
 44#include <linux/backing-dev.h>
 45#include <linux/pagevec.h>
 46#include <linux/cleancache.h>
 47
 48#include "ext4.h"
 49
 50#define NUM_PREALLOC_POST_READ_CTXS	128
 51
 52static struct kmem_cache *bio_post_read_ctx_cache;
 53static mempool_t *bio_post_read_ctx_pool;
 54
 55/* postprocessing steps for read bios */
 56enum bio_post_read_step {
 57	STEP_INITIAL = 0,
 58	STEP_DECRYPT,
 59	STEP_VERITY,
 60};
 61
 62struct bio_post_read_ctx {
 63	struct bio *bio;
 64	struct work_struct work;
 65	unsigned int cur_step;
 66	unsigned int enabled_steps;
 67};
 68
 69static void __read_end_io(struct bio *bio)
 70{
 71	struct page *page;
 72	struct bio_vec *bv;
 73	struct bvec_iter_all iter_all;
 74
 75	bio_for_each_segment_all(bv, bio, iter_all) {
 76		page = bv->bv_page;
 77
 78		/* PG_error was set if any post_read step failed */
 79		if (bio->bi_status || PageError(page)) {
 80			ClearPageUptodate(page);
 81			/* will re-read again later */
 82			ClearPageError(page);
 83		} else {
 84			SetPageUptodate(page);
 85		}
 86		unlock_page(page);
 87	}
 88	if (bio->bi_private)
 89		mempool_free(bio->bi_private, bio_post_read_ctx_pool);
 90	bio_put(bio);
 91}
 92
 93static void bio_post_read_processing(struct bio_post_read_ctx *ctx);
 94
 95static void decrypt_work(struct work_struct *work)
 96{
 97	struct bio_post_read_ctx *ctx =
 98		container_of(work, struct bio_post_read_ctx, work);
 99
100	fscrypt_decrypt_bio(ctx->bio);
101
102	bio_post_read_processing(ctx);
103}
104
105static void verity_work(struct work_struct *work)
106{
107	struct bio_post_read_ctx *ctx =
108		container_of(work, struct bio_post_read_ctx, work);
109
110	fsverity_verify_bio(ctx->bio);
111
112	bio_post_read_processing(ctx);
113}
114
115static void bio_post_read_processing(struct bio_post_read_ctx *ctx)
116{
117	/*
118	 * We use different work queues for decryption and for verity because
119	 * verity may require reading metadata pages that need decryption, and
120	 * we shouldn't recurse to the same workqueue.
121	 */
122	switch (++ctx->cur_step) {
123	case STEP_DECRYPT:
124		if (ctx->enabled_steps & (1 << STEP_DECRYPT)) {
125			INIT_WORK(&ctx->work, decrypt_work);
126			fscrypt_enqueue_decrypt_work(&ctx->work);
127			return;
128		}
129		ctx->cur_step++;
130		/* fall-through */
131	case STEP_VERITY:
132		if (ctx->enabled_steps & (1 << STEP_VERITY)) {
133			INIT_WORK(&ctx->work, verity_work);
134			fsverity_enqueue_verify_work(&ctx->work);
135			return;
136		}
137		ctx->cur_step++;
138		/* fall-through */
139	default:
140		__read_end_io(ctx->bio);
141	}
142}
143
144static bool bio_post_read_required(struct bio *bio)
145{
146	return bio->bi_private && !bio->bi_status;
147}
148
149/*
150 * I/O completion handler for multipage BIOs.
151 *
152 * The mpage code never puts partial pages into a BIO (except for end-of-file).
153 * If a page does not map to a contiguous run of blocks then it simply falls
154 * back to block_read_full_page().
155 *
156 * Why is this?  If a page's completion depends on a number of different BIOs
157 * which can complete in any order (or at the same time) then determining the
158 * status of that page is hard.  See end_buffer_async_read() for the details.
159 * There is no point in duplicating all that complexity.
160 */
161static void mpage_end_io(struct bio *bio)
162{
163	if (bio_post_read_required(bio)) {
164		struct bio_post_read_ctx *ctx = bio->bi_private;
165
166		ctx->cur_step = STEP_INITIAL;
167		bio_post_read_processing(ctx);
168		return;
169	}
170	__read_end_io(bio);
171}
172
173static inline bool ext4_need_verity(const struct inode *inode, pgoff_t idx)
174{
175	return fsverity_active(inode) &&
176	       idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
177}
178
179static struct bio_post_read_ctx *get_bio_post_read_ctx(struct inode *inode,
180						       struct bio *bio,
181						       pgoff_t first_idx)
182{
183	unsigned int post_read_steps = 0;
184	struct bio_post_read_ctx *ctx = NULL;
185
186	if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode))
187		post_read_steps |= 1 << STEP_DECRYPT;
188
189	if (ext4_need_verity(inode, first_idx))
190		post_read_steps |= 1 << STEP_VERITY;
191
192	if (post_read_steps) {
193		ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
194		if (!ctx)
195			return ERR_PTR(-ENOMEM);
196		ctx->bio = bio;
197		ctx->enabled_steps = post_read_steps;
198		bio->bi_private = ctx;
199	}
200	return ctx;
201}
202
203static inline loff_t ext4_readpage_limit(struct inode *inode)
204{
205	if (IS_ENABLED(CONFIG_FS_VERITY) &&
206	    (IS_VERITY(inode) || ext4_verity_in_progress(inode)))
207		return inode->i_sb->s_maxbytes;
208
209	return i_size_read(inode);
210}
211
212int ext4_mpage_readpages(struct address_space *mapping,
213			 struct list_head *pages, struct page *page,
214			 unsigned nr_pages, bool is_readahead)
215{
216	struct bio *bio = NULL;
217	sector_t last_block_in_bio = 0;
218
219	struct inode *inode = mapping->host;
220	const unsigned blkbits = inode->i_blkbits;
221	const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
222	const unsigned blocksize = 1 << blkbits;
223	sector_t block_in_file;
224	sector_t last_block;
225	sector_t last_block_in_file;
226	sector_t blocks[MAX_BUF_PER_PAGE];
227	unsigned page_block;
228	struct block_device *bdev = inode->i_sb->s_bdev;
229	int length;
230	unsigned relative_block = 0;
231	struct ext4_map_blocks map;
232
233	map.m_pblk = 0;
234	map.m_lblk = 0;
235	map.m_len = 0;
236	map.m_flags = 0;
237
238	for (; nr_pages; nr_pages--) {
239		int fully_mapped = 1;
240		unsigned first_hole = blocks_per_page;
241
242		if (pages) {
243			page = lru_to_page(pages);
244
245			prefetchw(&page->flags);
246			list_del(&page->lru);
247			if (add_to_page_cache_lru(page, mapping, page->index,
248				  readahead_gfp_mask(mapping)))
249				goto next_page;
250		}
251
252		if (page_has_buffers(page))
253			goto confused;
254
255		block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
256		last_block = block_in_file + nr_pages * blocks_per_page;
257		last_block_in_file = (ext4_readpage_limit(inode) +
258				      blocksize - 1) >> blkbits;
259		if (last_block > last_block_in_file)
260			last_block = last_block_in_file;
261		page_block = 0;
262
263		/*
264		 * Map blocks using the previous result first.
265		 */
266		if ((map.m_flags & EXT4_MAP_MAPPED) &&
267		    block_in_file > map.m_lblk &&
268		    block_in_file < (map.m_lblk + map.m_len)) {
269			unsigned map_offset = block_in_file - map.m_lblk;
270			unsigned last = map.m_len - map_offset;
271
272			for (relative_block = 0; ; relative_block++) {
273				if (relative_block == last) {
274					/* needed? */
275					map.m_flags &= ~EXT4_MAP_MAPPED;
276					break;
277				}
278				if (page_block == blocks_per_page)
279					break;
280				blocks[page_block] = map.m_pblk + map_offset +
281					relative_block;
282				page_block++;
283				block_in_file++;
284			}
285		}
286
287		/*
288		 * Then do more ext4_map_blocks() calls until we are
289		 * done with this page.
290		 */
291		while (page_block < blocks_per_page) {
292			if (block_in_file < last_block) {
293				map.m_lblk = block_in_file;
294				map.m_len = last_block - block_in_file;
295
296				if (ext4_map_blocks(NULL, inode, &map, 0) < 0) {
297				set_error_page:
298					SetPageError(page);
299					zero_user_segment(page, 0,
300							  PAGE_SIZE);
301					unlock_page(page);
302					goto next_page;
303				}
304			}
305			if ((map.m_flags & EXT4_MAP_MAPPED) == 0) {
306				fully_mapped = 0;
307				if (first_hole == blocks_per_page)
308					first_hole = page_block;
309				page_block++;
310				block_in_file++;
311				continue;
312			}
313			if (first_hole != blocks_per_page)
314				goto confused;		/* hole -> non-hole */
315
316			/* Contiguous blocks? */
317			if (page_block && blocks[page_block-1] != map.m_pblk-1)
318				goto confused;
319			for (relative_block = 0; ; relative_block++) {
320				if (relative_block == map.m_len) {
321					/* needed? */
322					map.m_flags &= ~EXT4_MAP_MAPPED;
323					break;
324				} else if (page_block == blocks_per_page)
325					break;
326				blocks[page_block] = map.m_pblk+relative_block;
327				page_block++;
328				block_in_file++;
329			}
330		}
331		if (first_hole != blocks_per_page) {
332			zero_user_segment(page, first_hole << blkbits,
333					  PAGE_SIZE);
334			if (first_hole == 0) {
335				if (ext4_need_verity(inode, page->index) &&
336				    !fsverity_verify_page(page))
337					goto set_error_page;
338				SetPageUptodate(page);
339				unlock_page(page);
340				goto next_page;
341			}
342		} else if (fully_mapped) {
343			SetPageMappedToDisk(page);
344		}
345		if (fully_mapped && blocks_per_page == 1 &&
346		    !PageUptodate(page) && cleancache_get_page(page) == 0) {
347			SetPageUptodate(page);
348			goto confused;
349		}
350
351		/*
352		 * This page will go to BIO.  Do we need to send this
353		 * BIO off first?
354		 */
355		if (bio && (last_block_in_bio != blocks[0] - 1)) {
356		submit_and_realloc:
357			submit_bio(bio);
358			bio = NULL;
359		}
360		if (bio == NULL) {
361			struct bio_post_read_ctx *ctx;
362
363			bio = bio_alloc(GFP_KERNEL,
364				min_t(int, nr_pages, BIO_MAX_PAGES));
365			if (!bio)
366				goto set_error_page;
367			ctx = get_bio_post_read_ctx(inode, bio, page->index);
368			if (IS_ERR(ctx)) {
369				bio_put(bio);
370				bio = NULL;
371				goto set_error_page;
372			}
373			bio_set_dev(bio, bdev);
374			bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
375			bio->bi_end_io = mpage_end_io;
376			bio->bi_private = ctx;
377			bio_set_op_attrs(bio, REQ_OP_READ,
378						is_readahead ? REQ_RAHEAD : 0);
379		}
380
381		length = first_hole << blkbits;
382		if (bio_add_page(bio, page, length, 0) < length)
383			goto submit_and_realloc;
384
385		if (((map.m_flags & EXT4_MAP_BOUNDARY) &&
386		     (relative_block == map.m_len)) ||
387		    (first_hole != blocks_per_page)) {
388			submit_bio(bio);
389			bio = NULL;
390		} else
391			last_block_in_bio = blocks[blocks_per_page - 1];
392		goto next_page;
393	confused:
394		if (bio) {
395			submit_bio(bio);
396			bio = NULL;
397		}
398		if (!PageUptodate(page))
399			block_read_full_page(page, ext4_get_block);
400		else
401			unlock_page(page);
402	next_page:
403		if (pages)
404			put_page(page);
405	}
406	BUG_ON(pages && !list_empty(pages));
407	if (bio)
408		submit_bio(bio);
409	return 0;
410}
411
412int __init ext4_init_post_read_processing(void)
413{
414	bio_post_read_ctx_cache =
415		kmem_cache_create("ext4_bio_post_read_ctx",
416				  sizeof(struct bio_post_read_ctx), 0, 0, NULL);
417	if (!bio_post_read_ctx_cache)
418		goto fail;
419	bio_post_read_ctx_pool =
420		mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS,
421					 bio_post_read_ctx_cache);
422	if (!bio_post_read_ctx_pool)
423		goto fail_free_cache;
424	return 0;
425
426fail_free_cache:
427	kmem_cache_destroy(bio_post_read_ctx_cache);
428fail:
429	return -ENOMEM;
430}
431
432void ext4_exit_post_read_processing(void)
433{
434	mempool_destroy(bio_post_read_ctx_pool);
435	kmem_cache_destroy(bio_post_read_ctx_cache);
436}