Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * linux/fs/ext4/page-io.c
  4 *
  5 * This contains the new page_io functions for ext4
  6 *
  7 * Written by Theodore Ts'o, 2010.
  8 */
  9
 10#include <linux/fs.h>
 11#include <linux/time.h>
 12#include <linux/highuid.h>
 13#include <linux/pagemap.h>
 14#include <linux/quotaops.h>
 15#include <linux/string.h>
 16#include <linux/buffer_head.h>
 17#include <linux/writeback.h>
 18#include <linux/pagevec.h>
 19#include <linux/mpage.h>
 20#include <linux/namei.h>
 21#include <linux/uio.h>
 22#include <linux/bio.h>
 23#include <linux/workqueue.h>
 24#include <linux/kernel.h>
 25#include <linux/slab.h>
 26#include <linux/mm.h>
 27#include <linux/sched/mm.h>
 28
 29#include "ext4_jbd2.h"
 30#include "xattr.h"
 31#include "acl.h"
 32
 33static struct kmem_cache *io_end_cachep;
 34static struct kmem_cache *io_end_vec_cachep;
 35
 36int __init ext4_init_pageio(void)
 37{
 38	io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT);
 39	if (io_end_cachep == NULL)
 40		return -ENOMEM;
 41
 42	io_end_vec_cachep = KMEM_CACHE(ext4_io_end_vec, 0);
 43	if (io_end_vec_cachep == NULL) {
 44		kmem_cache_destroy(io_end_cachep);
 45		return -ENOMEM;
 46	}
 47	return 0;
 48}
 49
 50void ext4_exit_pageio(void)
 51{
 52	kmem_cache_destroy(io_end_cachep);
 53	kmem_cache_destroy(io_end_vec_cachep);
 54}
 55
 56struct ext4_io_end_vec *ext4_alloc_io_end_vec(ext4_io_end_t *io_end)
 57{
 58	struct ext4_io_end_vec *io_end_vec;
 59
 60	io_end_vec = kmem_cache_zalloc(io_end_vec_cachep, GFP_NOFS);
 61	if (!io_end_vec)
 62		return ERR_PTR(-ENOMEM);
 63	INIT_LIST_HEAD(&io_end_vec->list);
 64	list_add_tail(&io_end_vec->list, &io_end->list_vec);
 65	return io_end_vec;
 66}
 67
 68static void ext4_free_io_end_vec(ext4_io_end_t *io_end)
 69{
 70	struct ext4_io_end_vec *io_end_vec, *tmp;
 71
 72	if (list_empty(&io_end->list_vec))
 73		return;
 74	list_for_each_entry_safe(io_end_vec, tmp, &io_end->list_vec, list) {
 75		list_del(&io_end_vec->list);
 76		kmem_cache_free(io_end_vec_cachep, io_end_vec);
 77	}
 78}
 79
 80struct ext4_io_end_vec *ext4_last_io_end_vec(ext4_io_end_t *io_end)
 81{
 82	BUG_ON(list_empty(&io_end->list_vec));
 83	return list_last_entry(&io_end->list_vec, struct ext4_io_end_vec, list);
 84}
 85
 86/*
 87 * Print an buffer I/O error compatible with the fs/buffer.c.  This
 88 * provides compatibility with dmesg scrapers that look for a specific
 89 * buffer I/O error message.  We really need a unified error reporting
 90 * structure to userspace ala Digital Unix's uerf system, but it's
 91 * probably not going to happen in my lifetime, due to LKML politics...
 92 */
 93static void buffer_io_error(struct buffer_head *bh)
 94{
 95	printk_ratelimited(KERN_ERR "Buffer I/O error on device %pg, logical block %llu\n",
 96		       bh->b_bdev,
 97			(unsigned long long)bh->b_blocknr);
 98}
 99
100static void ext4_finish_bio(struct bio *bio)
101{
 
102	struct bio_vec *bvec;
103	struct bvec_iter_all iter_all;
104
105	bio_for_each_segment_all(bvec, bio, iter_all) {
106		struct page *page = bvec->bv_page;
107		struct page *bounce_page = NULL;
 
 
 
108		struct buffer_head *bh, *head;
109		unsigned bio_start = bvec->bv_offset;
110		unsigned bio_end = bio_start + bvec->bv_len;
111		unsigned under_io = 0;
112		unsigned long flags;
113
114		if (fscrypt_is_bounce_page(page)) {
115			bounce_page = page;
116			page = fscrypt_pagecache_page(bounce_page);
 
 
 
 
 
 
117		}
 
118
119		if (bio->bi_status) {
120			SetPageError(page);
121			mapping_set_error(page->mapping, -EIO);
122		}
123		bh = head = page_buffers(page);
124		/*
125		 * We check all buffers in the page under b_uptodate_lock
126		 * to avoid races with other end io clearing async_write flags
127		 */
128		spin_lock_irqsave(&head->b_uptodate_lock, flags);
 
129		do {
130			if (bh_offset(bh) < bio_start ||
131			    bh_offset(bh) + bh->b_size > bio_end) {
132				if (buffer_async_write(bh))
133					under_io++;
134				continue;
135			}
136			clear_buffer_async_write(bh);
137			if (bio->bi_status) {
138				set_buffer_write_io_error(bh);
139				buffer_io_error(bh);
140			}
141		} while ((bh = bh->b_this_page) != head);
142		spin_unlock_irqrestore(&head->b_uptodate_lock, flags);
 
143		if (!under_io) {
144			fscrypt_free_bounce_page(bounce_page);
 
 
 
145			end_page_writeback(page);
146		}
147	}
148}
149
150static void ext4_release_io_end(ext4_io_end_t *io_end)
151{
152	struct bio *bio, *next_bio;
153
154	BUG_ON(!list_empty(&io_end->list));
155	BUG_ON(io_end->flag & EXT4_IO_END_UNWRITTEN);
156	WARN_ON(io_end->handle);
157
158	for (bio = io_end->bio; bio; bio = next_bio) {
159		next_bio = bio->bi_private;
160		ext4_finish_bio(bio);
161		bio_put(bio);
162	}
163	ext4_free_io_end_vec(io_end);
164	kmem_cache_free(io_end_cachep, io_end);
165}
166
167/*
168 * Check a range of space and convert unwritten extents to written. Note that
169 * we are protected from truncate touching same part of extent tree by the
170 * fact that truncate code waits for all DIO to finish (thus exclusion from
171 * direct IO is achieved) and also waits for PageWriteback bits. Thus we
172 * cannot get to ext4_ext_truncate() before all IOs overlapping that range are
173 * completed (happens from ext4_free_ioend()).
174 */
175static int ext4_end_io_end(ext4_io_end_t *io_end)
176{
177	struct inode *inode = io_end->inode;
178	handle_t *handle = io_end->handle;
 
 
179	int ret = 0;
180
181	ext4_debug("ext4_end_io_nolock: io_end 0x%p from inode %lu,list->next 0x%p,"
182		   "list->prev 0x%p\n",
183		   io_end, inode->i_ino, io_end->list.next, io_end->list.prev);
184
185	io_end->handle = NULL;	/* Following call will use up the handle */
186	ret = ext4_convert_unwritten_io_end_vec(handle, io_end);
187	if (ret < 0 && !ext4_forced_shutdown(EXT4_SB(inode->i_sb))) {
188		ext4_msg(inode->i_sb, KERN_EMERG,
189			 "failed to convert unwritten extents to written "
190			 "extents -- potential data loss!  "
191			 "(inode %lu, error %d)", inode->i_ino, ret);
 
192	}
193	ext4_clear_io_unwritten_flag(io_end);
194	ext4_release_io_end(io_end);
195	return ret;
196}
197
198static void dump_completed_IO(struct inode *inode, struct list_head *head)
199{
200#ifdef	EXT4FS_DEBUG
201	struct list_head *cur, *before, *after;
202	ext4_io_end_t *io_end, *io_end0, *io_end1;
203
204	if (list_empty(head))
205		return;
206
207	ext4_debug("Dump inode %lu completed io list\n", inode->i_ino);
208	list_for_each_entry(io_end, head, list) {
209		cur = &io_end->list;
210		before = cur->prev;
211		io_end0 = container_of(before, ext4_io_end_t, list);
212		after = cur->next;
213		io_end1 = container_of(after, ext4_io_end_t, list);
214
215		ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n",
216			    io_end, inode->i_ino, io_end0, io_end1);
217	}
218#endif
219}
220
221/* Add the io_end to per-inode completed end_io list. */
222static void ext4_add_complete_io(ext4_io_end_t *io_end)
223{
224	struct ext4_inode_info *ei = EXT4_I(io_end->inode);
225	struct ext4_sb_info *sbi = EXT4_SB(io_end->inode->i_sb);
226	struct workqueue_struct *wq;
227	unsigned long flags;
228
229	/* Only reserved conversions from writeback should enter here */
230	WARN_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN));
231	WARN_ON(!io_end->handle && sbi->s_journal);
232	spin_lock_irqsave(&ei->i_completed_io_lock, flags);
233	wq = sbi->rsv_conversion_wq;
234	if (list_empty(&ei->i_rsv_conversion_list))
235		queue_work(wq, &ei->i_rsv_conversion_work);
236	list_add_tail(&io_end->list, &ei->i_rsv_conversion_list);
237	spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
238}
239
240static int ext4_do_flush_completed_IO(struct inode *inode,
241				      struct list_head *head)
242{
243	ext4_io_end_t *io_end;
244	struct list_head unwritten;
245	unsigned long flags;
246	struct ext4_inode_info *ei = EXT4_I(inode);
247	int err, ret = 0;
248
249	spin_lock_irqsave(&ei->i_completed_io_lock, flags);
250	dump_completed_IO(inode, head);
251	list_replace_init(head, &unwritten);
252	spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
253
254	while (!list_empty(&unwritten)) {
255		io_end = list_entry(unwritten.next, ext4_io_end_t, list);
256		BUG_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN));
257		list_del_init(&io_end->list);
258
259		err = ext4_end_io_end(io_end);
260		if (unlikely(!ret && err))
261			ret = err;
262	}
263	return ret;
264}
265
266/*
267 * work on completed IO, to convert unwritten extents to extents
268 */
269void ext4_end_io_rsv_work(struct work_struct *work)
270{
271	struct ext4_inode_info *ei = container_of(work, struct ext4_inode_info,
272						  i_rsv_conversion_work);
273	ext4_do_flush_completed_IO(&ei->vfs_inode, &ei->i_rsv_conversion_list);
274}
275
276ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
277{
278	ext4_io_end_t *io_end = kmem_cache_zalloc(io_end_cachep, flags);
279
280	if (io_end) {
281		io_end->inode = inode;
282		INIT_LIST_HEAD(&io_end->list);
283		INIT_LIST_HEAD(&io_end->list_vec);
284		refcount_set(&io_end->count, 1);
285	}
286	return io_end;
287}
288
289void ext4_put_io_end_defer(ext4_io_end_t *io_end)
290{
291	if (refcount_dec_and_test(&io_end->count)) {
292		if (!(io_end->flag & EXT4_IO_END_UNWRITTEN) ||
293				list_empty(&io_end->list_vec)) {
294			ext4_release_io_end(io_end);
295			return;
296		}
297		ext4_add_complete_io(io_end);
298	}
299}
300
301int ext4_put_io_end(ext4_io_end_t *io_end)
302{
303	int err = 0;
304
305	if (refcount_dec_and_test(&io_end->count)) {
306		if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
307			err = ext4_convert_unwritten_io_end_vec(io_end->handle,
308								io_end);
 
309			io_end->handle = NULL;
310			ext4_clear_io_unwritten_flag(io_end);
311		}
312		ext4_release_io_end(io_end);
313	}
314	return err;
315}
316
317ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end)
318{
319	refcount_inc(&io_end->count);
320	return io_end;
321}
322
323/* BIO completion function for page writeback */
324static void ext4_end_bio(struct bio *bio)
325{
326	ext4_io_end_t *io_end = bio->bi_private;
327	sector_t bi_sector = bio->bi_iter.bi_sector;
328
329	if (WARN_ONCE(!io_end, "io_end is NULL: %pg: sector %Lu len %u err %d\n",
330		      bio->bi_bdev,
331		      (long long) bio->bi_iter.bi_sector,
332		      (unsigned) bio_sectors(bio),
333		      bio->bi_status)) {
334		ext4_finish_bio(bio);
335		bio_put(bio);
336		return;
337	}
338	bio->bi_end_io = NULL;
339
340	if (bio->bi_status) {
341		struct inode *inode = io_end->inode;
342
343		ext4_warning(inode->i_sb, "I/O error %d writing to inode %lu "
344			     "starting block %llu)",
345			     bio->bi_status, inode->i_ino,
 
 
346			     (unsigned long long)
347			     bi_sector >> (inode->i_blkbits - 9));
348		mapping_set_error(inode->i_mapping,
349				blk_status_to_errno(bio->bi_status));
350	}
351
352	if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
353		/*
354		 * Link bio into list hanging from io_end. We have to do it
355		 * atomically as bio completions can be racing against each
356		 * other.
357		 */
358		bio->bi_private = xchg(&io_end->bio, bio);
359		ext4_put_io_end_defer(io_end);
360	} else {
361		/*
362		 * Drop io_end reference early. Inode can get freed once
363		 * we finish the bio.
364		 */
365		ext4_put_io_end_defer(io_end);
366		ext4_finish_bio(bio);
367		bio_put(bio);
368	}
369}
370
371void ext4_io_submit(struct ext4_io_submit *io)
372{
373	struct bio *bio = io->io_bio;
374
375	if (bio) {
376		if (io->io_wbc->sync_mode == WB_SYNC_ALL)
377			io->io_bio->bi_opf |= REQ_SYNC;
378		submit_bio(io->io_bio);
 
 
379	}
380	io->io_bio = NULL;
381}
382
383void ext4_io_submit_init(struct ext4_io_submit *io,
384			 struct writeback_control *wbc)
385{
386	io->io_wbc = wbc;
387	io->io_bio = NULL;
388	io->io_end = NULL;
389}
390
391static void io_submit_init_bio(struct ext4_io_submit *io,
392			       struct buffer_head *bh)
393{
394	struct bio *bio;
395
396	/*
397	 * bio_alloc will _always_ be able to allocate a bio if
398	 * __GFP_DIRECT_RECLAIM is set, see comments for bio_alloc_bioset().
399	 */
400	bio = bio_alloc(bh->b_bdev, BIO_MAX_VECS, REQ_OP_WRITE, GFP_NOIO);
401	fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
402	bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
 
403	bio->bi_end_io = ext4_end_bio;
404	bio->bi_private = ext4_get_io_end(io->io_end);
405	io->io_bio = bio;
406	io->io_next_block = bh->b_blocknr;
407	wbc_init_bio(io->io_wbc, bio);
408}
409
410static void io_submit_add_bh(struct ext4_io_submit *io,
411			     struct inode *inode,
412			     struct page *page,
413			     struct buffer_head *bh)
414{
415	int ret;
416
417	if (io->io_bio && (bh->b_blocknr != io->io_next_block ||
418			   !fscrypt_mergeable_bio_bh(io->io_bio, bh))) {
419submit_and_retry:
420		ext4_io_submit(io);
421	}
422	if (io->io_bio == NULL)
423		io_submit_init_bio(io, bh);
 
 
 
424	ret = bio_add_page(io->io_bio, page, bh->b_size, bh_offset(bh));
425	if (ret != bh->b_size)
426		goto submit_and_retry;
427	wbc_account_cgroup_owner(io->io_wbc, page, bh->b_size);
428	io->io_next_block++;
 
429}
430
431int ext4_bio_write_page(struct ext4_io_submit *io,
432			struct page *page,
433			int len)
 
 
434{
435	struct page *bounce_page = NULL;
436	struct inode *inode = page->mapping->host;
437	unsigned block_start;
438	struct buffer_head *bh, *head;
439	int ret = 0;
 
440	int nr_to_submit = 0;
441	struct writeback_control *wbc = io->io_wbc;
442	bool keep_towrite = false;
443
444	BUG_ON(!PageLocked(page));
445	BUG_ON(PageWriteback(page));
446
 
 
 
 
447	ClearPageError(page);
448
449	/*
450	 * Comments copied from block_write_full_page:
451	 *
452	 * The page straddles i_size.  It must be zeroed out on each and every
453	 * writepage invocation because it may be mmapped.  "A file is mapped
454	 * in multiples of the page size.  For a file that is not a multiple of
455	 * the page size, the remaining memory is zeroed when mapped, and
456	 * writes to that region are not written out to the file."
457	 */
458	if (len < PAGE_SIZE)
459		zero_user_segment(page, len, PAGE_SIZE);
460	/*
461	 * In the first loop we prepare and mark buffers to submit. We have to
462	 * mark all buffers in the page before submitting so that
463	 * end_page_writeback() cannot be called from ext4_end_bio() when IO
464	 * on the first buffer finishes and we are still working on submitting
465	 * the second buffer.
466	 */
467	bh = head = page_buffers(page);
468	do {
469		block_start = bh_offset(bh);
470		if (block_start >= len) {
471			clear_buffer_dirty(bh);
472			set_buffer_uptodate(bh);
473			continue;
474		}
475		if (!buffer_dirty(bh) || buffer_delay(bh) ||
476		    !buffer_mapped(bh) || buffer_unwritten(bh)) {
477			/* A hole? We can safely clear the dirty bit */
478			if (!buffer_mapped(bh))
479				clear_buffer_dirty(bh);
480			/*
481			 * Keeping dirty some buffer we cannot write? Make sure
482			 * to redirty the page and keep TOWRITE tag so that
483			 * racing WB_SYNC_ALL writeback does not skip the page.
484			 * This happens e.g. when doing writeout for
485			 * transaction commit.
486			 */
487			if (buffer_dirty(bh)) {
488				if (!PageDirty(page))
489					redirty_page_for_writepage(wbc, page);
490				keep_towrite = true;
491			}
492			continue;
493		}
494		if (buffer_new(bh))
495			clear_buffer_new(bh);
 
 
496		set_buffer_async_write(bh);
497		clear_buffer_dirty(bh);
498		nr_to_submit++;
499	} while ((bh = bh->b_this_page) != head);
500
501	/* Nothing to submit? Just unlock the page... */
502	if (!nr_to_submit)
503		goto unlock;
504
505	bh = head = page_buffers(page);
506
507	/*
508	 * If any blocks are being written to an encrypted file, encrypt them
509	 * into a bounce page.  For simplicity, just encrypt until the last
510	 * block which might be needed.  This may cause some unneeded blocks
511	 * (e.g. holes) to be unnecessarily encrypted, but this is rare and
512	 * can't happen in the common case of blocksize == PAGE_SIZE.
513	 */
514	if (fscrypt_inode_uses_fs_layer_crypto(inode) && nr_to_submit) {
515		gfp_t gfp_flags = GFP_NOFS;
516		unsigned int enc_bytes = round_up(len, i_blocksize(inode));
517
518		/*
519		 * Since bounce page allocation uses a mempool, we can only use
520		 * a waiting mask (i.e. request guaranteed allocation) on the
521		 * first page of the bio.  Otherwise it can deadlock.
522		 */
523		if (io->io_bio)
524			gfp_flags = GFP_NOWAIT | __GFP_NOWARN;
525	retry_encrypt:
526		bounce_page = fscrypt_encrypt_pagecache_blocks(page, enc_bytes,
527							       0, gfp_flags);
528		if (IS_ERR(bounce_page)) {
529			ret = PTR_ERR(bounce_page);
530			if (ret == -ENOMEM &&
531			    (io->io_bio || wbc->sync_mode == WB_SYNC_ALL)) {
532				gfp_t new_gfp_flags = GFP_NOFS;
533				if (io->io_bio)
534					ext4_io_submit(io);
535				else
536					new_gfp_flags |= __GFP_NOFAIL;
537				memalloc_retry_wait(gfp_flags);
538				gfp_flags = new_gfp_flags;
539				goto retry_encrypt;
540			}
541
542			printk_ratelimited(KERN_ERR "%s: ret = %d\n", __func__, ret);
543			redirty_page_for_writepage(wbc, page);
544			do {
545				if (buffer_async_write(bh)) {
546					clear_buffer_async_write(bh);
547					set_buffer_dirty(bh);
548				}
549				bh = bh->b_this_page;
550			} while (bh != head);
551			goto unlock;
552		}
553	}
554
555	if (keep_towrite)
556		set_page_writeback_keepwrite(page);
557	else
558		set_page_writeback(page);
559
560	/* Now submit buffers to write */
561	do {
562		if (!buffer_async_write(bh))
563			continue;
564		io_submit_add_bh(io, inode,
565				 bounce_page ? bounce_page : page, bh);
 
 
 
 
 
 
 
 
 
 
566	} while ((bh = bh->b_this_page) != head);
567unlock:
 
 
 
 
 
 
 
 
 
 
 
 
568	unlock_page(page);
 
 
 
569	return ret;
570}
v4.6
 
  1/*
  2 * linux/fs/ext4/page-io.c
  3 *
  4 * This contains the new page_io functions for ext4
  5 *
  6 * Written by Theodore Ts'o, 2010.
  7 */
  8
  9#include <linux/fs.h>
 10#include <linux/time.h>
 11#include <linux/highuid.h>
 12#include <linux/pagemap.h>
 13#include <linux/quotaops.h>
 14#include <linux/string.h>
 15#include <linux/buffer_head.h>
 16#include <linux/writeback.h>
 17#include <linux/pagevec.h>
 18#include <linux/mpage.h>
 19#include <linux/namei.h>
 20#include <linux/uio.h>
 21#include <linux/bio.h>
 22#include <linux/workqueue.h>
 23#include <linux/kernel.h>
 24#include <linux/slab.h>
 25#include <linux/mm.h>
 26#include <linux/backing-dev.h>
 27
 28#include "ext4_jbd2.h"
 29#include "xattr.h"
 30#include "acl.h"
 31
 32static struct kmem_cache *io_end_cachep;
 
 33
 34int __init ext4_init_pageio(void)
 35{
 36	io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT);
 37	if (io_end_cachep == NULL)
 38		return -ENOMEM;
 
 
 
 
 
 
 39	return 0;
 40}
 41
 42void ext4_exit_pageio(void)
 43{
 44	kmem_cache_destroy(io_end_cachep);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 45}
 46
 47/*
 48 * Print an buffer I/O error compatible with the fs/buffer.c.  This
 49 * provides compatibility with dmesg scrapers that look for a specific
 50 * buffer I/O error message.  We really need a unified error reporting
 51 * structure to userspace ala Digital Unix's uerf system, but it's
 52 * probably not going to happen in my lifetime, due to LKML politics...
 53 */
 54static void buffer_io_error(struct buffer_head *bh)
 55{
 56	printk_ratelimited(KERN_ERR "Buffer I/O error on device %pg, logical block %llu\n",
 57		       bh->b_bdev,
 58			(unsigned long long)bh->b_blocknr);
 59}
 60
 61static void ext4_finish_bio(struct bio *bio)
 62{
 63	int i;
 64	struct bio_vec *bvec;
 
 65
 66	bio_for_each_segment_all(bvec, bio, i) {
 67		struct page *page = bvec->bv_page;
 68#ifdef CONFIG_EXT4_FS_ENCRYPTION
 69		struct page *data_page = NULL;
 70		struct ext4_crypto_ctx *ctx = NULL;
 71#endif
 72		struct buffer_head *bh, *head;
 73		unsigned bio_start = bvec->bv_offset;
 74		unsigned bio_end = bio_start + bvec->bv_len;
 75		unsigned under_io = 0;
 76		unsigned long flags;
 77
 78		if (!page)
 79			continue;
 80
 81#ifdef CONFIG_EXT4_FS_ENCRYPTION
 82		if (!page->mapping) {
 83			/* The bounce data pages are unmapped. */
 84			data_page = page;
 85			ctx = (struct ext4_crypto_ctx *)page_private(data_page);
 86			page = ctx->w.control_page;
 87		}
 88#endif
 89
 90		if (bio->bi_error) {
 91			SetPageError(page);
 92			set_bit(AS_EIO, &page->mapping->flags);
 93		}
 94		bh = head = page_buffers(page);
 95		/*
 96		 * We check all buffers in the page under BH_Uptodate_Lock
 97		 * to avoid races with other end io clearing async_write flags
 98		 */
 99		local_irq_save(flags);
100		bit_spin_lock(BH_Uptodate_Lock, &head->b_state);
101		do {
102			if (bh_offset(bh) < bio_start ||
103			    bh_offset(bh) + bh->b_size > bio_end) {
104				if (buffer_async_write(bh))
105					under_io++;
106				continue;
107			}
108			clear_buffer_async_write(bh);
109			if (bio->bi_error)
 
110				buffer_io_error(bh);
 
111		} while ((bh = bh->b_this_page) != head);
112		bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
113		local_irq_restore(flags);
114		if (!under_io) {
115#ifdef CONFIG_EXT4_FS_ENCRYPTION
116			if (ctx)
117				ext4_restore_control_page(data_page);
118#endif
119			end_page_writeback(page);
120		}
121	}
122}
123
124static void ext4_release_io_end(ext4_io_end_t *io_end)
125{
126	struct bio *bio, *next_bio;
127
128	BUG_ON(!list_empty(&io_end->list));
129	BUG_ON(io_end->flag & EXT4_IO_END_UNWRITTEN);
130	WARN_ON(io_end->handle);
131
132	for (bio = io_end->bio; bio; bio = next_bio) {
133		next_bio = bio->bi_private;
134		ext4_finish_bio(bio);
135		bio_put(bio);
136	}
 
137	kmem_cache_free(io_end_cachep, io_end);
138}
139
140/*
141 * Check a range of space and convert unwritten extents to written. Note that
142 * we are protected from truncate touching same part of extent tree by the
143 * fact that truncate code waits for all DIO to finish (thus exclusion from
144 * direct IO is achieved) and also waits for PageWriteback bits. Thus we
145 * cannot get to ext4_ext_truncate() before all IOs overlapping that range are
146 * completed (happens from ext4_free_ioend()).
147 */
148static int ext4_end_io(ext4_io_end_t *io)
149{
150	struct inode *inode = io->inode;
151	loff_t offset = io->offset;
152	ssize_t size = io->size;
153	handle_t *handle = io->handle;
154	int ret = 0;
155
156	ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p,"
157		   "list->prev 0x%p\n",
158		   io, inode->i_ino, io->list.next, io->list.prev);
159
160	io->handle = NULL;	/* Following call will use up the handle */
161	ret = ext4_convert_unwritten_extents(handle, inode, offset, size);
162	if (ret < 0) {
163		ext4_msg(inode->i_sb, KERN_EMERG,
164			 "failed to convert unwritten extents to written "
165			 "extents -- potential data loss!  "
166			 "(inode %lu, offset %llu, size %zd, error %d)",
167			 inode->i_ino, offset, size, ret);
168	}
169	ext4_clear_io_unwritten_flag(io);
170	ext4_release_io_end(io);
171	return ret;
172}
173
174static void dump_completed_IO(struct inode *inode, struct list_head *head)
175{
176#ifdef	EXT4FS_DEBUG
177	struct list_head *cur, *before, *after;
178	ext4_io_end_t *io, *io0, *io1;
179
180	if (list_empty(head))
181		return;
182
183	ext4_debug("Dump inode %lu completed io list\n", inode->i_ino);
184	list_for_each_entry(io, head, list) {
185		cur = &io->list;
186		before = cur->prev;
187		io0 = container_of(before, ext4_io_end_t, list);
188		after = cur->next;
189		io1 = container_of(after, ext4_io_end_t, list);
190
191		ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n",
192			    io, inode->i_ino, io0, io1);
193	}
194#endif
195}
196
197/* Add the io_end to per-inode completed end_io list. */
198static void ext4_add_complete_io(ext4_io_end_t *io_end)
199{
200	struct ext4_inode_info *ei = EXT4_I(io_end->inode);
201	struct ext4_sb_info *sbi = EXT4_SB(io_end->inode->i_sb);
202	struct workqueue_struct *wq;
203	unsigned long flags;
204
205	/* Only reserved conversions from writeback should enter here */
206	WARN_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN));
207	WARN_ON(!io_end->handle && sbi->s_journal);
208	spin_lock_irqsave(&ei->i_completed_io_lock, flags);
209	wq = sbi->rsv_conversion_wq;
210	if (list_empty(&ei->i_rsv_conversion_list))
211		queue_work(wq, &ei->i_rsv_conversion_work);
212	list_add_tail(&io_end->list, &ei->i_rsv_conversion_list);
213	spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
214}
215
216static int ext4_do_flush_completed_IO(struct inode *inode,
217				      struct list_head *head)
218{
219	ext4_io_end_t *io;
220	struct list_head unwritten;
221	unsigned long flags;
222	struct ext4_inode_info *ei = EXT4_I(inode);
223	int err, ret = 0;
224
225	spin_lock_irqsave(&ei->i_completed_io_lock, flags);
226	dump_completed_IO(inode, head);
227	list_replace_init(head, &unwritten);
228	spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
229
230	while (!list_empty(&unwritten)) {
231		io = list_entry(unwritten.next, ext4_io_end_t, list);
232		BUG_ON(!(io->flag & EXT4_IO_END_UNWRITTEN));
233		list_del_init(&io->list);
234
235		err = ext4_end_io(io);
236		if (unlikely(!ret && err))
237			ret = err;
238	}
239	return ret;
240}
241
242/*
243 * work on completed IO, to convert unwritten extents to extents
244 */
245void ext4_end_io_rsv_work(struct work_struct *work)
246{
247	struct ext4_inode_info *ei = container_of(work, struct ext4_inode_info,
248						  i_rsv_conversion_work);
249	ext4_do_flush_completed_IO(&ei->vfs_inode, &ei->i_rsv_conversion_list);
250}
251
252ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
253{
254	ext4_io_end_t *io = kmem_cache_zalloc(io_end_cachep, flags);
255	if (io) {
256		io->inode = inode;
257		INIT_LIST_HEAD(&io->list);
258		atomic_set(&io->count, 1);
 
 
259	}
260	return io;
261}
262
263void ext4_put_io_end_defer(ext4_io_end_t *io_end)
264{
265	if (atomic_dec_and_test(&io_end->count)) {
266		if (!(io_end->flag & EXT4_IO_END_UNWRITTEN) || !io_end->size) {
 
267			ext4_release_io_end(io_end);
268			return;
269		}
270		ext4_add_complete_io(io_end);
271	}
272}
273
274int ext4_put_io_end(ext4_io_end_t *io_end)
275{
276	int err = 0;
277
278	if (atomic_dec_and_test(&io_end->count)) {
279		if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
280			err = ext4_convert_unwritten_extents(io_end->handle,
281						io_end->inode, io_end->offset,
282						io_end->size);
283			io_end->handle = NULL;
284			ext4_clear_io_unwritten_flag(io_end);
285		}
286		ext4_release_io_end(io_end);
287	}
288	return err;
289}
290
291ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end)
292{
293	atomic_inc(&io_end->count);
294	return io_end;
295}
296
297/* BIO completion function for page writeback */
298static void ext4_end_bio(struct bio *bio)
299{
300	ext4_io_end_t *io_end = bio->bi_private;
301	sector_t bi_sector = bio->bi_iter.bi_sector;
302
303	BUG_ON(!io_end);
 
 
 
 
 
 
 
 
304	bio->bi_end_io = NULL;
305
306	if (bio->bi_error) {
307		struct inode *inode = io_end->inode;
308
309		ext4_warning(inode->i_sb, "I/O error %d writing to inode %lu "
310			     "(offset %llu size %ld starting block %llu)",
311			     bio->bi_error, inode->i_ino,
312			     (unsigned long long) io_end->offset,
313			     (long) io_end->size,
314			     (unsigned long long)
315			     bi_sector >> (inode->i_blkbits - 9));
316		mapping_set_error(inode->i_mapping, bio->bi_error);
 
317	}
318
319	if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
320		/*
321		 * Link bio into list hanging from io_end. We have to do it
322		 * atomically as bio completions can be racing against each
323		 * other.
324		 */
325		bio->bi_private = xchg(&io_end->bio, bio);
326		ext4_put_io_end_defer(io_end);
327	} else {
328		/*
329		 * Drop io_end reference early. Inode can get freed once
330		 * we finish the bio.
331		 */
332		ext4_put_io_end_defer(io_end);
333		ext4_finish_bio(bio);
334		bio_put(bio);
335	}
336}
337
338void ext4_io_submit(struct ext4_io_submit *io)
339{
340	struct bio *bio = io->io_bio;
341
342	if (bio) {
343		int io_op = io->io_wbc->sync_mode == WB_SYNC_ALL ?
344			    WRITE_SYNC : WRITE;
345		bio_get(io->io_bio);
346		submit_bio(io_op, io->io_bio);
347		bio_put(io->io_bio);
348	}
349	io->io_bio = NULL;
350}
351
352void ext4_io_submit_init(struct ext4_io_submit *io,
353			 struct writeback_control *wbc)
354{
355	io->io_wbc = wbc;
356	io->io_bio = NULL;
357	io->io_end = NULL;
358}
359
360static int io_submit_init_bio(struct ext4_io_submit *io,
361			      struct buffer_head *bh)
362{
363	struct bio *bio;
364
365	bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
366	if (!bio)
367		return -ENOMEM;
368	wbc_init_bio(io->io_wbc, bio);
 
 
369	bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
370	bio->bi_bdev = bh->b_bdev;
371	bio->bi_end_io = ext4_end_bio;
372	bio->bi_private = ext4_get_io_end(io->io_end);
373	io->io_bio = bio;
374	io->io_next_block = bh->b_blocknr;
375	return 0;
376}
377
378static int io_submit_add_bh(struct ext4_io_submit *io,
379			    struct inode *inode,
380			    struct page *page,
381			    struct buffer_head *bh)
382{
383	int ret;
384
385	if (io->io_bio && bh->b_blocknr != io->io_next_block) {
 
386submit_and_retry:
387		ext4_io_submit(io);
388	}
389	if (io->io_bio == NULL) {
390		ret = io_submit_init_bio(io, bh);
391		if (ret)
392			return ret;
393	}
394	ret = bio_add_page(io->io_bio, page, bh->b_size, bh_offset(bh));
395	if (ret != bh->b_size)
396		goto submit_and_retry;
397	wbc_account_io(io->io_wbc, page, bh->b_size);
398	io->io_next_block++;
399	return 0;
400}
401
402int ext4_bio_write_page(struct ext4_io_submit *io,
403			struct page *page,
404			int len,
405			struct writeback_control *wbc,
406			bool keep_towrite)
407{
408	struct page *data_page = NULL;
409	struct inode *inode = page->mapping->host;
410	unsigned block_start, blocksize;
411	struct buffer_head *bh, *head;
412	int ret = 0;
413	int nr_submitted = 0;
414	int nr_to_submit = 0;
415
416	blocksize = 1 << inode->i_blkbits;
417
418	BUG_ON(!PageLocked(page));
419	BUG_ON(PageWriteback(page));
420
421	if (keep_towrite)
422		set_page_writeback_keepwrite(page);
423	else
424		set_page_writeback(page);
425	ClearPageError(page);
426
427	/*
428	 * Comments copied from block_write_full_page:
429	 *
430	 * The page straddles i_size.  It must be zeroed out on each and every
431	 * writepage invocation because it may be mmapped.  "A file is mapped
432	 * in multiples of the page size.  For a file that is not a multiple of
433	 * the page size, the remaining memory is zeroed when mapped, and
434	 * writes to that region are not written out to the file."
435	 */
436	if (len < PAGE_SIZE)
437		zero_user_segment(page, len, PAGE_SIZE);
438	/*
439	 * In the first loop we prepare and mark buffers to submit. We have to
440	 * mark all buffers in the page before submitting so that
441	 * end_page_writeback() cannot be called from ext4_bio_end_io() when IO
442	 * on the first buffer finishes and we are still working on submitting
443	 * the second buffer.
444	 */
445	bh = head = page_buffers(page);
446	do {
447		block_start = bh_offset(bh);
448		if (block_start >= len) {
449			clear_buffer_dirty(bh);
450			set_buffer_uptodate(bh);
451			continue;
452		}
453		if (!buffer_dirty(bh) || buffer_delay(bh) ||
454		    !buffer_mapped(bh) || buffer_unwritten(bh)) {
455			/* A hole? We can safely clear the dirty bit */
456			if (!buffer_mapped(bh))
457				clear_buffer_dirty(bh);
458			if (io->io_bio)
459				ext4_io_submit(io);
 
 
 
 
 
 
 
 
 
 
460			continue;
461		}
462		if (buffer_new(bh)) {
463			clear_buffer_new(bh);
464			unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
465		}
466		set_buffer_async_write(bh);
 
467		nr_to_submit++;
468	} while ((bh = bh->b_this_page) != head);
469
 
 
 
 
470	bh = head = page_buffers(page);
471
472	if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode) &&
473	    nr_to_submit) {
 
 
 
 
 
 
474		gfp_t gfp_flags = GFP_NOFS;
 
475
 
 
 
 
 
 
 
476	retry_encrypt:
477		data_page = ext4_encrypt(inode, page, gfp_flags);
478		if (IS_ERR(data_page)) {
479			ret = PTR_ERR(data_page);
480			if (ret == -ENOMEM && wbc->sync_mode == WB_SYNC_ALL) {
481				if (io->io_bio) {
 
 
 
482					ext4_io_submit(io);
483					congestion_wait(BLK_RW_ASYNC, HZ/50);
484				}
485				gfp_flags |= __GFP_NOFAIL;
 
486				goto retry_encrypt;
487			}
488			data_page = NULL;
489			goto out;
 
 
 
 
 
 
 
 
 
490		}
491	}
492
 
 
 
 
 
493	/* Now submit buffers to write */
494	do {
495		if (!buffer_async_write(bh))
496			continue;
497		ret = io_submit_add_bh(io, inode,
498				       data_page ? data_page : page, bh);
499		if (ret) {
500			/*
501			 * We only get here on ENOMEM.  Not much else
502			 * we can do but mark the page as dirty, and
503			 * better luck next time.
504			 */
505			break;
506		}
507		nr_submitted++;
508		clear_buffer_dirty(bh);
509	} while ((bh = bh->b_this_page) != head);
510
511	/* Error stopped previous loop? Clean up buffers... */
512	if (ret) {
513	out:
514		if (data_page)
515			ext4_restore_control_page(data_page);
516		printk_ratelimited(KERN_ERR "%s: ret = %d\n", __func__, ret);
517		redirty_page_for_writepage(wbc, page);
518		do {
519			clear_buffer_async_write(bh);
520			bh = bh->b_this_page;
521		} while (bh != head);
522	}
523	unlock_page(page);
524	/* Nothing submitted - we have to end page writeback */
525	if (!nr_submitted)
526		end_page_writeback(page);
527	return ret;
528}