Linux Audio

Check our new training course

Linux kernel drivers training

Mar 31-Apr 9, 2025, special US time zones
Register
Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * linux/fs/ext4/readpage.c
  4 *
  5 * Copyright (C) 2002, Linus Torvalds.
  6 * Copyright (C) 2015, Google, Inc.
  7 *
  8 * This was originally taken from fs/mpage.c
  9 *
 10 * The ext4_mpage_readpages() function here is intended to
 11 * replace mpage_readahead() in the general case, not just for
 12 * encrypted files.  It has some limitations (see below), where it
 13 * will fall back to read_block_full_page(), but these limitations
 14 * should only be hit when page_size != block_size.
 15 *
 16 * This will allow us to attach a callback function to support ext4
 17 * encryption.
 18 *
 19 * If anything unusual happens, such as:
 20 *
 21 * - encountering a page which has buffers
 22 * - encountering a page which has a non-hole after a hole
 23 * - encountering a page with non-contiguous blocks
 24 *
 25 * then this code just gives up and calls the buffer_head-based read function.
 26 * It does handle a page which has holes at the end - that is a common case:
 27 * the end-of-file on blocksize < PAGE_SIZE setups.
 28 *
 29 */
 30
 31#include <linux/kernel.h>
 32#include <linux/export.h>
 33#include <linux/mm.h>
 34#include <linux/kdev_t.h>
 35#include <linux/gfp.h>
 36#include <linux/bio.h>
 37#include <linux/fs.h>
 38#include <linux/buffer_head.h>
 39#include <linux/blkdev.h>
 40#include <linux/highmem.h>
 41#include <linux/prefetch.h>
 42#include <linux/mpage.h>
 43#include <linux/writeback.h>
 44#include <linux/backing-dev.h>
 45#include <linux/pagevec.h>
 
 46
 47#include "ext4.h"
 48
 49#define NUM_PREALLOC_POST_READ_CTXS	128
 50
 51static struct kmem_cache *bio_post_read_ctx_cache;
 52static mempool_t *bio_post_read_ctx_pool;
 53
 54/* postprocessing steps for read bios */
 55enum bio_post_read_step {
 56	STEP_INITIAL = 0,
 57	STEP_DECRYPT,
 58	STEP_VERITY,
 59	STEP_MAX,
 60};
 61
 62struct bio_post_read_ctx {
 63	struct bio *bio;
 64	struct work_struct work;
 65	unsigned int cur_step;
 66	unsigned int enabled_steps;
 67};
 68
 69static void __read_end_io(struct bio *bio)
 70{
 71	struct page *page;
 72	struct bio_vec *bv;
 73	struct bvec_iter_all iter_all;
 74
 75	bio_for_each_segment_all(bv, bio, iter_all) {
 76		page = bv->bv_page;
 77
 78		if (bio->bi_status)
 79			ClearPageUptodate(page);
 80		else
 81			SetPageUptodate(page);
 82		unlock_page(page);
 83	}
 84	if (bio->bi_private)
 85		mempool_free(bio->bi_private, bio_post_read_ctx_pool);
 86	bio_put(bio);
 87}
 88
 89static void bio_post_read_processing(struct bio_post_read_ctx *ctx);
 90
 91static void decrypt_work(struct work_struct *work)
 92{
 93	struct bio_post_read_ctx *ctx =
 94		container_of(work, struct bio_post_read_ctx, work);
 95	struct bio *bio = ctx->bio;
 96
 97	if (fscrypt_decrypt_bio(bio))
 98		bio_post_read_processing(ctx);
 99	else
100		__read_end_io(bio);
101}
102
103static void verity_work(struct work_struct *work)
104{
105	struct bio_post_read_ctx *ctx =
106		container_of(work, struct bio_post_read_ctx, work);
107	struct bio *bio = ctx->bio;
108
109	/*
110	 * fsverity_verify_bio() may call readahead() again, and although verity
111	 * will be disabled for that, decryption may still be needed, causing
112	 * another bio_post_read_ctx to be allocated.  So to guarantee that
113	 * mempool_alloc() never deadlocks we must free the current ctx first.
114	 * This is safe because verity is the last post-read step.
115	 */
116	BUILD_BUG_ON(STEP_VERITY + 1 != STEP_MAX);
117	mempool_free(ctx, bio_post_read_ctx_pool);
118	bio->bi_private = NULL;
119
120	fsverity_verify_bio(bio);
121
122	__read_end_io(bio);
123}
124
125static void bio_post_read_processing(struct bio_post_read_ctx *ctx)
126{
127	/*
128	 * We use different work queues for decryption and for verity because
129	 * verity may require reading metadata pages that need decryption, and
130	 * we shouldn't recurse to the same workqueue.
131	 */
132	switch (++ctx->cur_step) {
133	case STEP_DECRYPT:
134		if (ctx->enabled_steps & (1 << STEP_DECRYPT)) {
135			INIT_WORK(&ctx->work, decrypt_work);
136			fscrypt_enqueue_decrypt_work(&ctx->work);
137			return;
138		}
139		ctx->cur_step++;
140		fallthrough;
141	case STEP_VERITY:
142		if (ctx->enabled_steps & (1 << STEP_VERITY)) {
143			INIT_WORK(&ctx->work, verity_work);
144			fsverity_enqueue_verify_work(&ctx->work);
145			return;
146		}
147		ctx->cur_step++;
148		fallthrough;
149	default:
150		__read_end_io(ctx->bio);
151	}
152}
153
154static bool bio_post_read_required(struct bio *bio)
155{
156	return bio->bi_private && !bio->bi_status;
157}
158
159/*
160 * I/O completion handler for multipage BIOs.
161 *
162 * The mpage code never puts partial pages into a BIO (except for end-of-file).
163 * If a page does not map to a contiguous run of blocks then it simply falls
164 * back to block_read_full_folio().
165 *
166 * Why is this?  If a page's completion depends on a number of different BIOs
167 * which can complete in any order (or at the same time) then determining the
168 * status of that page is hard.  See end_buffer_async_read() for the details.
169 * There is no point in duplicating all that complexity.
170 */
171static void mpage_end_io(struct bio *bio)
172{
173	if (bio_post_read_required(bio)) {
174		struct bio_post_read_ctx *ctx = bio->bi_private;
175
176		ctx->cur_step = STEP_INITIAL;
177		bio_post_read_processing(ctx);
178		return;
 
 
 
 
179	}
180	__read_end_io(bio);
181}
182
183static inline bool ext4_need_verity(const struct inode *inode, pgoff_t idx)
184{
185	return fsverity_active(inode) &&
186	       idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
187}
188
189static void ext4_set_bio_post_read_ctx(struct bio *bio,
190				       const struct inode *inode,
191				       pgoff_t first_idx)
192{
193	unsigned int post_read_steps = 0;
194
195	if (fscrypt_inode_uses_fs_layer_crypto(inode))
196		post_read_steps |= 1 << STEP_DECRYPT;
197
198	if (ext4_need_verity(inode, first_idx))
199		post_read_steps |= 1 << STEP_VERITY;
200
201	if (post_read_steps) {
202		/* Due to the mempool, this never fails. */
203		struct bio_post_read_ctx *ctx =
204			mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
205
206		ctx->bio = bio;
207		ctx->enabled_steps = post_read_steps;
208		bio->bi_private = ctx;
209	}
210}
211
212static inline loff_t ext4_readpage_limit(struct inode *inode)
213{
214	if (IS_ENABLED(CONFIG_FS_VERITY) &&
215	    (IS_VERITY(inode) || ext4_verity_in_progress(inode)))
216		return inode->i_sb->s_maxbytes;
217
218	return i_size_read(inode);
219}
220
221int ext4_mpage_readpages(struct inode *inode,
222		struct readahead_control *rac, struct page *page)
 
223{
224	struct bio *bio = NULL;
225	sector_t last_block_in_bio = 0;
226
 
227	const unsigned blkbits = inode->i_blkbits;
228	const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
229	const unsigned blocksize = 1 << blkbits;
230	sector_t next_block;
231	sector_t block_in_file;
232	sector_t last_block;
233	sector_t last_block_in_file;
234	sector_t blocks[MAX_BUF_PER_PAGE];
235	unsigned page_block;
236	struct block_device *bdev = inode->i_sb->s_bdev;
237	int length;
238	unsigned relative_block = 0;
239	struct ext4_map_blocks map;
240	unsigned int nr_pages = rac ? readahead_count(rac) : 1;
241
242	map.m_pblk = 0;
243	map.m_lblk = 0;
244	map.m_len = 0;
245	map.m_flags = 0;
246
247	for (; nr_pages; nr_pages--) {
248		int fully_mapped = 1;
249		unsigned first_hole = blocks_per_page;
250
251		if (rac) {
252			page = readahead_page(rac);
253			prefetchw(&page->flags);
 
 
 
 
254		}
255
256		if (page_has_buffers(page))
257			goto confused;
258
259		block_in_file = next_block =
260			(sector_t)page->index << (PAGE_SHIFT - blkbits);
261		last_block = block_in_file + nr_pages * blocks_per_page;
262		last_block_in_file = (ext4_readpage_limit(inode) +
263				      blocksize - 1) >> blkbits;
264		if (last_block > last_block_in_file)
265			last_block = last_block_in_file;
266		page_block = 0;
267
268		/*
269		 * Map blocks using the previous result first.
270		 */
271		if ((map.m_flags & EXT4_MAP_MAPPED) &&
272		    block_in_file > map.m_lblk &&
273		    block_in_file < (map.m_lblk + map.m_len)) {
274			unsigned map_offset = block_in_file - map.m_lblk;
275			unsigned last = map.m_len - map_offset;
276
277			for (relative_block = 0; ; relative_block++) {
278				if (relative_block == last) {
279					/* needed? */
280					map.m_flags &= ~EXT4_MAP_MAPPED;
281					break;
282				}
283				if (page_block == blocks_per_page)
284					break;
285				blocks[page_block] = map.m_pblk + map_offset +
286					relative_block;
287				page_block++;
288				block_in_file++;
289			}
290		}
291
292		/*
293		 * Then do more ext4_map_blocks() calls until we are
294		 * done with this page.
295		 */
296		while (page_block < blocks_per_page) {
297			if (block_in_file < last_block) {
298				map.m_lblk = block_in_file;
299				map.m_len = last_block - block_in_file;
300
301				if (ext4_map_blocks(NULL, inode, &map, 0) < 0) {
302				set_error_page:
303					SetPageError(page);
304					zero_user_segment(page, 0,
305							  PAGE_SIZE);
306					unlock_page(page);
307					goto next_page;
308				}
309			}
310			if ((map.m_flags & EXT4_MAP_MAPPED) == 0) {
311				fully_mapped = 0;
312				if (first_hole == blocks_per_page)
313					first_hole = page_block;
314				page_block++;
315				block_in_file++;
316				continue;
317			}
318			if (first_hole != blocks_per_page)
319				goto confused;		/* hole -> non-hole */
320
321			/* Contiguous blocks? */
322			if (page_block && blocks[page_block-1] != map.m_pblk-1)
323				goto confused;
324			for (relative_block = 0; ; relative_block++) {
325				if (relative_block == map.m_len) {
326					/* needed? */
327					map.m_flags &= ~EXT4_MAP_MAPPED;
328					break;
329				} else if (page_block == blocks_per_page)
330					break;
331				blocks[page_block] = map.m_pblk+relative_block;
332				page_block++;
333				block_in_file++;
334			}
335		}
336		if (first_hole != blocks_per_page) {
337			zero_user_segment(page, first_hole << blkbits,
338					  PAGE_SIZE);
339			if (first_hole == 0) {
340				if (ext4_need_verity(inode, page->index) &&
341				    !fsverity_verify_page(page))
342					goto set_error_page;
343				SetPageUptodate(page);
344				unlock_page(page);
345				goto next_page;
346			}
347		} else if (fully_mapped) {
348			SetPageMappedToDisk(page);
349		}
 
 
 
 
 
350
351		/*
352		 * This page will go to BIO.  Do we need to send this
353		 * BIO off first?
354		 */
355		if (bio && (last_block_in_bio != blocks[0] - 1 ||
356			    !fscrypt_mergeable_bio(bio, inode, next_block))) {
357		submit_and_realloc:
358			submit_bio(bio);
359			bio = NULL;
360		}
361		if (bio == NULL) {
362			/*
363			 * bio_alloc will _always_ be able to allocate a bio if
364			 * __GFP_DIRECT_RECLAIM is set, see bio_alloc_bioset().
365			 */
366			bio = bio_alloc(bdev, bio_max_segs(nr_pages),
367					REQ_OP_READ, GFP_KERNEL);
368			fscrypt_set_bio_crypt_ctx(bio, inode, next_block,
369						  GFP_KERNEL);
370			ext4_set_bio_post_read_ctx(bio, inode, page->index);
 
 
 
 
 
 
 
371			bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
372			bio->bi_end_io = mpage_end_io;
373			if (rac)
374				bio->bi_opf |= REQ_RAHEAD;
375		}
376
377		length = first_hole << blkbits;
378		if (bio_add_page(bio, page, length, 0) < length)
379			goto submit_and_realloc;
380
381		if (((map.m_flags & EXT4_MAP_BOUNDARY) &&
382		     (relative_block == map.m_len)) ||
383		    (first_hole != blocks_per_page)) {
384			submit_bio(bio);
385			bio = NULL;
386		} else
387			last_block_in_bio = blocks[blocks_per_page - 1];
388		goto next_page;
389	confused:
390		if (bio) {
391			submit_bio(bio);
392			bio = NULL;
393		}
394		if (!PageUptodate(page))
395			block_read_full_folio(page_folio(page), ext4_get_block);
396		else
397			unlock_page(page);
398	next_page:
399		if (rac)
400			put_page(page);
401	}
 
402	if (bio)
403		submit_bio(bio);
404	return 0;
405}
406
407int __init ext4_init_post_read_processing(void)
408{
409	bio_post_read_ctx_cache = KMEM_CACHE(bio_post_read_ctx, SLAB_RECLAIM_ACCOUNT);
410
411	if (!bio_post_read_ctx_cache)
412		goto fail;
413	bio_post_read_ctx_pool =
414		mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS,
415					 bio_post_read_ctx_cache);
416	if (!bio_post_read_ctx_pool)
417		goto fail_free_cache;
418	return 0;
419
420fail_free_cache:
421	kmem_cache_destroy(bio_post_read_ctx_cache);
422fail:
423	return -ENOMEM;
424}
425
426void ext4_exit_post_read_processing(void)
427{
428	mempool_destroy(bio_post_read_ctx_pool);
429	kmem_cache_destroy(bio_post_read_ctx_cache);
430}
v4.10.11
 
  1/*
  2 * linux/fs/ext4/readpage.c
  3 *
  4 * Copyright (C) 2002, Linus Torvalds.
  5 * Copyright (C) 2015, Google, Inc.
  6 *
  7 * This was originally taken from fs/mpage.c
  8 *
  9 * The intent is the ext4_mpage_readpages() function here is intended
 10 * to replace mpage_readpages() in the general case, not just for
 11 * encrypted files.  It has some limitations (see below), where it
 12 * will fall back to read_block_full_page(), but these limitations
 13 * should only be hit when page_size != block_size.
 14 *
 15 * This will allow us to attach a callback function to support ext4
 16 * encryption.
 17 *
 18 * If anything unusual happens, such as:
 19 *
 20 * - encountering a page which has buffers
 21 * - encountering a page which has a non-hole after a hole
 22 * - encountering a page with non-contiguous blocks
 23 *
 24 * then this code just gives up and calls the buffer_head-based read function.
 25 * It does handle a page which has holes at the end - that is a common case:
 26 * the end-of-file on blocksize < PAGE_SIZE setups.
 27 *
 28 */
 29
 30#include <linux/kernel.h>
 31#include <linux/export.h>
 32#include <linux/mm.h>
 33#include <linux/kdev_t.h>
 34#include <linux/gfp.h>
 35#include <linux/bio.h>
 36#include <linux/fs.h>
 37#include <linux/buffer_head.h>
 38#include <linux/blkdev.h>
 39#include <linux/highmem.h>
 40#include <linux/prefetch.h>
 41#include <linux/mpage.h>
 42#include <linux/writeback.h>
 43#include <linux/backing-dev.h>
 44#include <linux/pagevec.h>
 45#include <linux/cleancache.h>
 46
 47#include "ext4.h"
 48
 49static inline bool ext4_bio_encrypted(struct bio *bio)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 50{
 51#ifdef CONFIG_EXT4_FS_ENCRYPTION
 52	return unlikely(bio->bi_private != NULL);
 53#else
 54	return false;
 55#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 56}
 57
 58/*
 59 * I/O completion handler for multipage BIOs.
 60 *
 61 * The mpage code never puts partial pages into a BIO (except for end-of-file).
 62 * If a page does not map to a contiguous run of blocks then it simply falls
 63 * back to block_read_full_page().
 64 *
 65 * Why is this?  If a page's completion depends on a number of different BIOs
 66 * which can complete in any order (or at the same time) then determining the
 67 * status of that page is hard.  See end_buffer_async_read() for the details.
 68 * There is no point in duplicating all that complexity.
 69 */
 70static void mpage_end_io(struct bio *bio)
 71{
 72	struct bio_vec *bv;
 73	int i;
 74
 75	if (ext4_bio_encrypted(bio)) {
 76		if (bio->bi_error) {
 77			fscrypt_release_ctx(bio->bi_private);
 78		} else {
 79			fscrypt_decrypt_bio_pages(bio->bi_private, bio);
 80			return;
 81		}
 82	}
 83	bio_for_each_segment_all(bv, bio, i) {
 84		struct page *page = bv->bv_page;
 85
 86		if (!bio->bi_error) {
 87			SetPageUptodate(page);
 88		} else {
 89			ClearPageUptodate(page);
 90			SetPageError(page);
 91		}
 92		unlock_page(page);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 93	}
 
 94
 95	bio_put(bio);
 
 
 
 
 
 
 96}
 97
 98int ext4_mpage_readpages(struct address_space *mapping,
 99			 struct list_head *pages, struct page *page,
100			 unsigned nr_pages)
101{
102	struct bio *bio = NULL;
103	sector_t last_block_in_bio = 0;
104
105	struct inode *inode = mapping->host;
106	const unsigned blkbits = inode->i_blkbits;
107	const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
108	const unsigned blocksize = 1 << blkbits;
 
109	sector_t block_in_file;
110	sector_t last_block;
111	sector_t last_block_in_file;
112	sector_t blocks[MAX_BUF_PER_PAGE];
113	unsigned page_block;
114	struct block_device *bdev = inode->i_sb->s_bdev;
115	int length;
116	unsigned relative_block = 0;
117	struct ext4_map_blocks map;
 
118
119	map.m_pblk = 0;
120	map.m_lblk = 0;
121	map.m_len = 0;
122	map.m_flags = 0;
123
124	for (; nr_pages; nr_pages--) {
125		int fully_mapped = 1;
126		unsigned first_hole = blocks_per_page;
127
128		prefetchw(&page->flags);
129		if (pages) {
130			page = list_entry(pages->prev, struct page, lru);
131			list_del(&page->lru);
132			if (add_to_page_cache_lru(page, mapping, page->index,
133				  readahead_gfp_mask(mapping)))
134				goto next_page;
135		}
136
137		if (page_has_buffers(page))
138			goto confused;
139
140		block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
 
141		last_block = block_in_file + nr_pages * blocks_per_page;
142		last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
 
143		if (last_block > last_block_in_file)
144			last_block = last_block_in_file;
145		page_block = 0;
146
147		/*
148		 * Map blocks using the previous result first.
149		 */
150		if ((map.m_flags & EXT4_MAP_MAPPED) &&
151		    block_in_file > map.m_lblk &&
152		    block_in_file < (map.m_lblk + map.m_len)) {
153			unsigned map_offset = block_in_file - map.m_lblk;
154			unsigned last = map.m_len - map_offset;
155
156			for (relative_block = 0; ; relative_block++) {
157				if (relative_block == last) {
158					/* needed? */
159					map.m_flags &= ~EXT4_MAP_MAPPED;
160					break;
161				}
162				if (page_block == blocks_per_page)
163					break;
164				blocks[page_block] = map.m_pblk + map_offset +
165					relative_block;
166				page_block++;
167				block_in_file++;
168			}
169		}
170
171		/*
172		 * Then do more ext4_map_blocks() calls until we are
173		 * done with this page.
174		 */
175		while (page_block < blocks_per_page) {
176			if (block_in_file < last_block) {
177				map.m_lblk = block_in_file;
178				map.m_len = last_block - block_in_file;
179
180				if (ext4_map_blocks(NULL, inode, &map, 0) < 0) {
181				set_error_page:
182					SetPageError(page);
183					zero_user_segment(page, 0,
184							  PAGE_SIZE);
185					unlock_page(page);
186					goto next_page;
187				}
188			}
189			if ((map.m_flags & EXT4_MAP_MAPPED) == 0) {
190				fully_mapped = 0;
191				if (first_hole == blocks_per_page)
192					first_hole = page_block;
193				page_block++;
194				block_in_file++;
195				continue;
196			}
197			if (first_hole != blocks_per_page)
198				goto confused;		/* hole -> non-hole */
199
200			/* Contiguous blocks? */
201			if (page_block && blocks[page_block-1] != map.m_pblk-1)
202				goto confused;
203			for (relative_block = 0; ; relative_block++) {
204				if (relative_block == map.m_len) {
205					/* needed? */
206					map.m_flags &= ~EXT4_MAP_MAPPED;
207					break;
208				} else if (page_block == blocks_per_page)
209					break;
210				blocks[page_block] = map.m_pblk+relative_block;
211				page_block++;
212				block_in_file++;
213			}
214		}
215		if (first_hole != blocks_per_page) {
216			zero_user_segment(page, first_hole << blkbits,
217					  PAGE_SIZE);
218			if (first_hole == 0) {
 
 
 
219				SetPageUptodate(page);
220				unlock_page(page);
221				goto next_page;
222			}
223		} else if (fully_mapped) {
224			SetPageMappedToDisk(page);
225		}
226		if (fully_mapped && blocks_per_page == 1 &&
227		    !PageUptodate(page) && cleancache_get_page(page) == 0) {
228			SetPageUptodate(page);
229			goto confused;
230		}
231
232		/*
233		 * This page will go to BIO.  Do we need to send this
234		 * BIO off first?
235		 */
236		if (bio && (last_block_in_bio != blocks[0] - 1)) {
 
237		submit_and_realloc:
238			submit_bio(bio);
239			bio = NULL;
240		}
241		if (bio == NULL) {
242			struct fscrypt_ctx *ctx = NULL;
243
244			if (ext4_encrypted_inode(inode) &&
245			    S_ISREG(inode->i_mode)) {
246				ctx = fscrypt_get_ctx(inode, GFP_NOFS);
247				if (IS_ERR(ctx))
248					goto set_error_page;
249			}
250			bio = bio_alloc(GFP_KERNEL,
251				min_t(int, nr_pages, BIO_MAX_PAGES));
252			if (!bio) {
253				if (ctx)
254					fscrypt_release_ctx(ctx);
255				goto set_error_page;
256			}
257			bio->bi_bdev = bdev;
258			bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
259			bio->bi_end_io = mpage_end_io;
260			bio->bi_private = ctx;
261			bio_set_op_attrs(bio, REQ_OP_READ, 0);
262		}
263
264		length = first_hole << blkbits;
265		if (bio_add_page(bio, page, length, 0) < length)
266			goto submit_and_realloc;
267
268		if (((map.m_flags & EXT4_MAP_BOUNDARY) &&
269		     (relative_block == map.m_len)) ||
270		    (first_hole != blocks_per_page)) {
271			submit_bio(bio);
272			bio = NULL;
273		} else
274			last_block_in_bio = blocks[blocks_per_page - 1];
275		goto next_page;
276	confused:
277		if (bio) {
278			submit_bio(bio);
279			bio = NULL;
280		}
281		if (!PageUptodate(page))
282			block_read_full_page(page, ext4_get_block);
283		else
284			unlock_page(page);
285	next_page:
286		if (pages)
287			put_page(page);
288	}
289	BUG_ON(pages && !list_empty(pages));
290	if (bio)
291		submit_bio(bio);
292	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
293}