Linux Audio

Check our new training course

Loading...
v3.5.6
 
   1/*
   2 *  linux/fs/ext4/inode.c
   3 *
   4 * Copyright (C) 1992, 1993, 1994, 1995
   5 * Remy Card (card@masi.ibp.fr)
   6 * Laboratoire MASI - Institut Blaise Pascal
   7 * Universite Pierre et Marie Curie (Paris VI)
   8 *
   9 *  from
  10 *
  11 *  linux/fs/minix/inode.c
  12 *
  13 *  Copyright (C) 1991, 1992  Linus Torvalds
  14 *
  15 *  64-bit file support on 64-bit platforms by Jakub Jelinek
  16 *	(jj@sunsite.ms.mff.cuni.cz)
  17 *
  18 *  Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
  19 */
  20
  21#include <linux/fs.h>
  22#include <linux/time.h>
  23#include <linux/jbd2.h>
  24#include <linux/highuid.h>
  25#include <linux/pagemap.h>
 
  26#include <linux/quotaops.h>
  27#include <linux/string.h>
  28#include <linux/buffer_head.h>
  29#include <linux/writeback.h>
  30#include <linux/pagevec.h>
  31#include <linux/mpage.h>
  32#include <linux/namei.h>
  33#include <linux/uio.h>
  34#include <linux/bio.h>
  35#include <linux/workqueue.h>
  36#include <linux/kernel.h>
  37#include <linux/printk.h>
  38#include <linux/slab.h>
  39#include <linux/ratelimit.h>
 
 
  40
  41#include "ext4_jbd2.h"
  42#include "xattr.h"
  43#include "acl.h"
  44#include "truncate.h"
  45
  46#include <trace/events/ext4.h>
  47
  48#define MPAGE_DA_EXTENT_TAIL 0x01
  49
  50static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
  51			      struct ext4_inode_info *ei)
  52{
  53	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  54	__u16 csum_lo;
  55	__u16 csum_hi = 0;
  56	__u32 csum;
 
 
 
 
 
 
 
 
 
  57
  58	csum_lo = raw->i_checksum_lo;
  59	raw->i_checksum_lo = 0;
  60	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
  61	    EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) {
  62		csum_hi = raw->i_checksum_hi;
  63		raw->i_checksum_hi = 0;
 
 
 
 
 
 
  64	}
  65
  66	csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw,
  67			   EXT4_INODE_SIZE(inode->i_sb));
  68
  69	raw->i_checksum_lo = csum_lo;
  70	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
  71	    EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
  72		raw->i_checksum_hi = csum_hi;
  73
  74	return csum;
  75}
  76
  77static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw,
  78				  struct ext4_inode_info *ei)
  79{
  80	__u32 provided, calculated;
  81
  82	if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
  83	    cpu_to_le32(EXT4_OS_LINUX) ||
  84	    !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
  85		EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
  86		return 1;
  87
  88	provided = le16_to_cpu(raw->i_checksum_lo);
  89	calculated = ext4_inode_csum(inode, raw, ei);
  90	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
  91	    EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
  92		provided |= ((__u32)le16_to_cpu(raw->i_checksum_hi)) << 16;
  93	else
  94		calculated &= 0xFFFF;
  95
  96	return provided == calculated;
  97}
  98
  99static void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw,
 100				struct ext4_inode_info *ei)
 101{
 102	__u32 csum;
 103
 104	if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
 105	    cpu_to_le32(EXT4_OS_LINUX) ||
 106	    !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
 107		EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
 108		return;
 109
 110	csum = ext4_inode_csum(inode, raw, ei);
 111	raw->i_checksum_lo = cpu_to_le16(csum & 0xFFFF);
 112	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
 113	    EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
 114		raw->i_checksum_hi = cpu_to_le16(csum >> 16);
 115}
 116
 117static inline int ext4_begin_ordered_truncate(struct inode *inode,
 118					      loff_t new_size)
 119{
 120	trace_ext4_begin_ordered_truncate(inode, new_size);
 121	/*
 122	 * If jinode is zero, then we never opened the file for
 123	 * writing, so there's no need to call
 124	 * jbd2_journal_begin_ordered_truncate() since there's no
 125	 * outstanding writes we need to flush.
 126	 */
 127	if (!EXT4_I(inode)->jinode)
 128		return 0;
 129	return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode),
 130						   EXT4_I(inode)->jinode,
 131						   new_size);
 132}
 133
 134static void ext4_invalidatepage(struct page *page, unsigned long offset);
 135static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
 136				   struct buffer_head *bh_result, int create);
 137static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode);
 138static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate);
 139static int __ext4_journalled_writepage(struct page *page, unsigned int len);
 140static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh);
 141static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle,
 142		struct inode *inode, struct page *page, loff_t from,
 143		loff_t length, int flags);
 144
 145/*
 146 * Test whether an inode is a fast symlink.
 
 147 */
 148static int ext4_inode_is_fast_symlink(struct inode *inode)
 149{
 150	int ea_blocks = EXT4_I(inode)->i_file_acl ?
 151		(inode->i_sb->s_blocksize >> 9) : 0;
 
 152
 153	return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
 
 
 
 
 
 
 154}
 155
 156/*
 157 * Restart the transaction associated with *handle.  This does a commit,
 158 * so before we call here everything must be consistently dirtied against
 159 * this transaction.
 160 */
 161int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode,
 162				 int nblocks)
 163{
 164	int ret;
 165
 166	/*
 167	 * Drop i_data_sem to avoid deadlock with ext4_map_blocks.  At this
 168	 * moment, get_block can be called only for blocks inside i_size since
 169	 * page cache has been already dropped and writes are blocked by
 170	 * i_mutex. So we can safely drop the i_data_sem here.
 171	 */
 172	BUG_ON(EXT4_JOURNAL(inode) == NULL);
 173	jbd_debug(2, "restarting handle %p\n", handle);
 174	up_write(&EXT4_I(inode)->i_data_sem);
 175	ret = ext4_journal_restart(handle, nblocks);
 176	down_write(&EXT4_I(inode)->i_data_sem);
 177	ext4_discard_preallocations(inode);
 178
 179	return ret;
 180}
 181
 182/*
 183 * Called at the last iput() if i_nlink is zero.
 184 */
 185void ext4_evict_inode(struct inode *inode)
 186{
 187	handle_t *handle;
 188	int err;
 
 
 189
 190	trace_ext4_evict_inode(inode);
 191
 192	ext4_ioend_wait(inode);
 193
 194	if (inode->i_nlink) {
 195		/*
 196		 * When journalling data dirty buffers are tracked only in the
 197		 * journal. So although mm thinks everything is clean and
 198		 * ready for reaping the inode might still have some pages to
 199		 * write in the running transaction or waiting to be
 200		 * checkpointed. Thus calling jbd2_journal_invalidatepage()
 201		 * (via truncate_inode_pages()) to discard these buffers can
 202		 * cause data loss. Also even if we did not discard these
 203		 * buffers, we would have no way to find them after the inode
 204		 * is reaped and thus user could see stale data if he tries to
 205		 * read them before the transaction is checkpointed. So be
 206		 * careful and force everything to disk here... We use
 207		 * ei->i_datasync_tid to store the newest transaction
 208		 * containing inode's data.
 209		 *
 210		 * Note that directories do not have this problem because they
 211		 * don't use page cache.
 212		 */
 213		if (ext4_should_journal_data(inode) &&
 214		    (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) {
 
 
 215			journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
 216			tid_t commit_tid = EXT4_I(inode)->i_datasync_tid;
 217
 218			jbd2_log_start_commit(journal, commit_tid);
 219			jbd2_log_wait_commit(journal, commit_tid);
 220			filemap_write_and_wait(&inode->i_data);
 221		}
 222		truncate_inode_pages(&inode->i_data, 0);
 
 223		goto no_delete;
 224	}
 225
 226	if (!is_bad_inode(inode))
 227		dquot_initialize(inode);
 
 228
 229	if (ext4_should_order_data(inode))
 230		ext4_begin_ordered_truncate(inode, 0);
 231	truncate_inode_pages(&inode->i_data, 0);
 232
 233	if (is_bad_inode(inode))
 234		goto no_delete;
 
 
 
 
 
 
 235
 236	handle = ext4_journal_start(inode, ext4_blocks_for_truncate(inode)+3);
 
 237	if (IS_ERR(handle)) {
 238		ext4_std_error(inode->i_sb, PTR_ERR(handle));
 239		/*
 240		 * If we're going to skip the normal cleanup, we still need to
 241		 * make sure that the in-core orphan linked list is properly
 242		 * cleaned up.
 243		 */
 244		ext4_orphan_del(NULL, inode);
 
 245		goto no_delete;
 246	}
 247
 248	if (IS_SYNC(inode))
 249		ext4_handle_sync(handle);
 
 
 
 
 
 
 
 
 
 
 250	inode->i_size = 0;
 251	err = ext4_mark_inode_dirty(handle, inode);
 252	if (err) {
 253		ext4_warning(inode->i_sb,
 254			     "couldn't mark inode dirty (err %d)", err);
 255		goto stop_handle;
 256	}
 257	if (inode->i_blocks)
 258		ext4_truncate(inode);
 259
 260	/*
 261	 * ext4_ext_truncate() doesn't reserve any slop when it
 262	 * restarts journal transactions; therefore there may not be
 263	 * enough credits left in the handle to remove the inode from
 264	 * the orphan list and set the dtime field.
 265	 */
 266	if (!ext4_handle_has_enough_credits(handle, 3)) {
 267		err = ext4_journal_extend(handle, 3);
 268		if (err > 0)
 269			err = ext4_journal_restart(handle, 3);
 270		if (err != 0) {
 271			ext4_warning(inode->i_sb,
 272				     "couldn't extend journal (err %d)", err);
 273		stop_handle:
 274			ext4_journal_stop(handle);
 275			ext4_orphan_del(NULL, inode);
 276			goto no_delete;
 277		}
 278	}
 279
 
 
 
 
 
 
 
 
 
 
 
 
 
 280	/*
 281	 * Kill off the orphan record which ext4_truncate created.
 282	 * AKPM: I think this can be inside the above `if'.
 283	 * Note that ext4_orphan_del() has to be able to cope with the
 284	 * deletion of a non-existent orphan - this is because we don't
 285	 * know if ext4_truncate() actually created an orphan record.
 286	 * (Well, we could do this if we need to, but heck - it works)
 287	 */
 288	ext4_orphan_del(handle, inode);
 289	EXT4_I(inode)->i_dtime	= get_seconds();
 290
 291	/*
 292	 * One subtle ordering requirement: if anything has gone wrong
 293	 * (transaction abort, IO errors, whatever), then we can still
 294	 * do these next steps (the fs will already have been marked as
 295	 * having errors), but we can't free the inode if the mark_dirty
 296	 * fails.
 297	 */
 298	if (ext4_mark_inode_dirty(handle, inode))
 299		/* If that failed, just do the required in-core inode clear. */
 300		ext4_clear_inode(inode);
 301	else
 302		ext4_free_inode(handle, inode);
 303	ext4_journal_stop(handle);
 
 
 304	return;
 305no_delete:
 306	ext4_clear_inode(inode);	/* We must guarantee clearing of inode... */
 307}
 308
 309#ifdef CONFIG_QUOTA
 310qsize_t *ext4_get_reserved_space(struct inode *inode)
 311{
 312	return &EXT4_I(inode)->i_reserved_quota;
 313}
 314#endif
 315
 316/*
 317 * Calculate the number of metadata blocks need to reserve
 318 * to allocate a block located at @lblock
 319 */
 320static int ext4_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
 321{
 322	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
 323		return ext4_ext_calc_metadata_amount(inode, lblock);
 324
 325	return ext4_ind_calc_metadata_amount(inode, lblock);
 326}
 327
 328/*
 329 * Called with i_data_sem down, which is important since we can call
 330 * ext4_discard_preallocations() from here.
 331 */
 332void ext4_da_update_reserve_space(struct inode *inode,
 333					int used, int quota_claim)
 334{
 335	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
 336	struct ext4_inode_info *ei = EXT4_I(inode);
 337
 338	spin_lock(&ei->i_block_reservation_lock);
 339	trace_ext4_da_update_reserve_space(inode, used, quota_claim);
 340	if (unlikely(used > ei->i_reserved_data_blocks)) {
 341		ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, used %d "
 342			 "with only %d reserved data blocks",
 343			 __func__, inode->i_ino, used,
 344			 ei->i_reserved_data_blocks);
 345		WARN_ON(1);
 346		used = ei->i_reserved_data_blocks;
 347	}
 348
 349	if (unlikely(ei->i_allocated_meta_blocks > ei->i_reserved_meta_blocks)) {
 350		ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, allocated %d "
 351			 "with only %d reserved metadata blocks\n", __func__,
 352			 inode->i_ino, ei->i_allocated_meta_blocks,
 353			 ei->i_reserved_meta_blocks);
 354		WARN_ON(1);
 355		ei->i_allocated_meta_blocks = ei->i_reserved_meta_blocks;
 356	}
 357
 358	/* Update per-inode reservations */
 359	ei->i_reserved_data_blocks -= used;
 360	ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks;
 361	percpu_counter_sub(&sbi->s_dirtyclusters_counter,
 362			   used + ei->i_allocated_meta_blocks);
 363	ei->i_allocated_meta_blocks = 0;
 364
 365	if (ei->i_reserved_data_blocks == 0) {
 366		/*
 367		 * We can release all of the reserved metadata blocks
 368		 * only when we have written all of the delayed
 369		 * allocation blocks.
 370		 */
 371		percpu_counter_sub(&sbi->s_dirtyclusters_counter,
 372				   ei->i_reserved_meta_blocks);
 373		ei->i_reserved_meta_blocks = 0;
 374		ei->i_da_metadata_calc_len = 0;
 375	}
 376	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
 377
 378	/* Update quota subsystem for data blocks */
 379	if (quota_claim)
 380		dquot_claim_block(inode, EXT4_C2B(sbi, used));
 381	else {
 382		/*
 383		 * We did fallocate with an offset that is already delayed
 384		 * allocated. So on delayed allocated writeback we should
 385		 * not re-claim the quota for fallocated blocks.
 386		 */
 387		dquot_release_reservation_block(inode, EXT4_C2B(sbi, used));
 388	}
 389
 390	/*
 391	 * If we have done all the pending block allocations and if
 392	 * there aren't any writers on the inode, we can discard the
 393	 * inode's preallocations.
 394	 */
 395	if ((ei->i_reserved_data_blocks == 0) &&
 396	    (atomic_read(&inode->i_writecount) == 0))
 397		ext4_discard_preallocations(inode);
 398}
 399
 400static int __check_block_validity(struct inode *inode, const char *func,
 401				unsigned int line,
 402				struct ext4_map_blocks *map)
 403{
 
 
 
 
 404	if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk,
 405				   map->m_len)) {
 406		ext4_error_inode(inode, func, line, map->m_pblk,
 407				 "lblock %lu mapped to illegal pblock "
 408				 "(length %d)", (unsigned long) map->m_lblk,
 409				 map->m_len);
 410		return -EIO;
 411	}
 412	return 0;
 413}
 414
 415#define check_block_validity(inode, map)	\
 416	__check_block_validity((inode), __func__, __LINE__, (map))
 417
 418/*
 419 * Return the number of contiguous dirty pages in a given inode
 420 * starting at page frame idx.
 421 */
 422static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx,
 423				    unsigned int max_pages)
 424{
 425	struct address_space *mapping = inode->i_mapping;
 426	pgoff_t	index;
 427	struct pagevec pvec;
 428	pgoff_t num = 0;
 429	int i, nr_pages, done = 0;
 430
 431	if (max_pages == 0)
 432		return 0;
 433	pagevec_init(&pvec, 0);
 434	while (!done) {
 435		index = idx;
 436		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
 437					      PAGECACHE_TAG_DIRTY,
 438					      (pgoff_t)PAGEVEC_SIZE);
 439		if (nr_pages == 0)
 440			break;
 441		for (i = 0; i < nr_pages; i++) {
 442			struct page *page = pvec.pages[i];
 443			struct buffer_head *bh, *head;
 444
 445			lock_page(page);
 446			if (unlikely(page->mapping != mapping) ||
 447			    !PageDirty(page) ||
 448			    PageWriteback(page) ||
 449			    page->index != idx) {
 450				done = 1;
 451				unlock_page(page);
 452				break;
 453			}
 454			if (page_has_buffers(page)) {
 455				bh = head = page_buffers(page);
 456				do {
 457					if (!buffer_delay(bh) &&
 458					    !buffer_unwritten(bh))
 459						done = 1;
 460					bh = bh->b_this_page;
 461				} while (!done && (bh != head));
 462			}
 463			unlock_page(page);
 464			if (done)
 465				break;
 466			idx++;
 467			num++;
 468			if (num >= max_pages) {
 469				done = 1;
 470				break;
 471			}
 472		}
 473		pagevec_release(&pvec);
 474	}
 475	return num;
 476}
 477
 478/*
 479 * Sets the BH_Da_Mapped bit on the buffer heads corresponding to the given map.
 480 */
 481static void set_buffers_da_mapped(struct inode *inode,
 482				   struct ext4_map_blocks *map)
 483{
 484	struct address_space *mapping = inode->i_mapping;
 485	struct pagevec pvec;
 486	int i, nr_pages;
 487	pgoff_t index, end;
 488
 489	index = map->m_lblk >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
 490	end = (map->m_lblk + map->m_len - 1) >>
 491		(PAGE_CACHE_SHIFT - inode->i_blkbits);
 492
 493	pagevec_init(&pvec, 0);
 494	while (index <= end) {
 495		nr_pages = pagevec_lookup(&pvec, mapping, index,
 496					  min(end - index + 1,
 497					      (pgoff_t)PAGEVEC_SIZE));
 498		if (nr_pages == 0)
 499			break;
 500		for (i = 0; i < nr_pages; i++) {
 501			struct page *page = pvec.pages[i];
 502			struct buffer_head *bh, *head;
 503
 504			if (unlikely(page->mapping != mapping) ||
 505			    !PageDirty(page))
 506				break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 507
 508			if (page_has_buffers(page)) {
 509				bh = head = page_buffers(page);
 510				do {
 511					set_buffer_da_mapped(bh);
 512					bh = bh->b_this_page;
 513				} while (bh != head);
 514			}
 515			index++;
 516		}
 517		pagevec_release(&pvec);
 
 
 
 
 518	}
 519}
 
 520
 521/*
 522 * The ext4_map_blocks() function tries to look up the requested blocks,
 523 * and returns if the blocks are already mapped.
 524 *
 525 * Otherwise it takes the write lock of the i_data_sem and allocate blocks
 526 * and store the allocated blocks in the result buffer head and mark it
 527 * mapped.
 528 *
 529 * If file type is extents based, it will call ext4_ext_map_blocks(),
 530 * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
 531 * based files
 532 *
 533 * On success, it returns the number of blocks being mapped or allocate.
 534 * if create==0 and the blocks are pre-allocated and uninitialized block,
 535 * the result buffer head is unmapped. If the create ==1, it will make sure
 536 * the buffer head is mapped.
 537 *
 538 * It returns 0 if plain look up failed (blocks have not been allocated), in
 539 * that case, buffer head is unmapped
 
 540 *
 541 * It returns the error in case of allocation failure.
 542 */
 543int ext4_map_blocks(handle_t *handle, struct inode *inode,
 544		    struct ext4_map_blocks *map, int flags)
 545{
 
 546	int retval;
 
 
 
 
 
 
 547
 548	map->m_flags = 0;
 549	ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u,"
 550		  "logical block %lu\n", inode->i_ino, flags, map->m_len,
 551		  (unsigned long) map->m_lblk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 552	/*
 553	 * Try to see if we can get the block without requesting a new
 554	 * file system block.
 555	 */
 556	down_read((&EXT4_I(inode)->i_data_sem));
 557	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
 558		retval = ext4_ext_map_blocks(handle, inode, map, flags &
 559					     EXT4_GET_BLOCKS_KEEP_SIZE);
 560	} else {
 561		retval = ext4_ind_map_blocks(handle, inode, map, flags &
 562					     EXT4_GET_BLOCKS_KEEP_SIZE);
 563	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 564	up_read((&EXT4_I(inode)->i_data_sem));
 565
 
 566	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
 567		int ret = check_block_validity(inode, map);
 568		if (ret != 0)
 569			return ret;
 570	}
 571
 572	/* If it is only a block(s) look up */
 573	if ((flags & EXT4_GET_BLOCKS_CREATE) == 0)
 574		return retval;
 575
 576	/*
 577	 * Returns if the blocks have already allocated
 578	 *
 579	 * Note that if blocks have been preallocated
 580	 * ext4_ext_get_block() returns the create = 0
 581	 * with buffer head unmapped.
 582	 */
 583	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
 584		return retval;
 
 
 
 
 
 
 585
 586	/*
 587	 * When we call get_blocks without the create flag, the
 588	 * BH_Unwritten flag could have gotten set if the blocks
 589	 * requested were part of a uninitialized extent.  We need to
 590	 * clear this flag now that we are committed to convert all or
 591	 * part of the uninitialized extent to be an initialized
 592	 * extent.  This is because we need to avoid the combination
 593	 * of BH_Unwritten and BH_Mapped flags being simultaneously
 594	 * set on the buffer_head.
 595	 */
 596	map->m_flags &= ~EXT4_MAP_UNWRITTEN;
 597
 598	/*
 599	 * New blocks allocate and/or writing to uninitialized extent
 600	 * will possibly result in updating i_data, so we take
 601	 * the write lock of i_data_sem, and call get_blocks()
 602	 * with create == 1 flag.
 603	 */
 604	down_write((&EXT4_I(inode)->i_data_sem));
 605
 606	/*
 607	 * if the caller is from delayed allocation writeout path
 608	 * we have already reserved fs blocks for allocation
 609	 * let the underlying get_block() function know to
 610	 * avoid double accounting
 611	 */
 612	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
 613		ext4_set_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
 614	/*
 615	 * We need to check for EXT4 here because migrate
 616	 * could have changed the inode type in between
 617	 */
 618	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
 619		retval = ext4_ext_map_blocks(handle, inode, map, flags);
 620	} else {
 621		retval = ext4_ind_map_blocks(handle, inode, map, flags);
 622
 623		if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
 624			/*
 625			 * We allocated new blocks which will result in
 626			 * i_data's format changing.  Force the migrate
 627			 * to fail by clearing migrate flags
 628			 */
 629			ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
 630		}
 631
 632		/*
 633		 * Update reserved blocks/metadata blocks after successful
 634		 * block allocation which had been deferred till now. We don't
 635		 * support fallocate for non extent files. So we can update
 636		 * reserve space here.
 637		 */
 638		if ((retval > 0) &&
 639			(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE))
 640			ext4_da_update_reserve_space(inode, retval, 1);
 641	}
 642	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
 643		ext4_clear_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
 644
 645		/* If we have successfully mapped the delayed allocated blocks,
 646		 * set the BH_Da_Mapped bit on them. Its important to do this
 647		 * under the protection of i_data_sem.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 648		 */
 649		if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
 650			set_buffers_da_mapped(inode, map);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 651	}
 652
 
 653	up_write((&EXT4_I(inode)->i_data_sem));
 654	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
 655		int ret = check_block_validity(inode, map);
 656		if (ret != 0)
 657			return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 658	}
 659	return retval;
 660}
 661
 662/* Maximum number of blocks we map for direct IO at once. */
 663#define DIO_MAX_BLOCKS 4096
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 664
 665static int _ext4_get_block(struct inode *inode, sector_t iblock,
 666			   struct buffer_head *bh, int flags)
 667{
 668	handle_t *handle = ext4_journal_current_handle();
 669	struct ext4_map_blocks map;
 670	int ret = 0, started = 0;
 671	int dio_credits;
 
 
 672
 673	map.m_lblk = iblock;
 674	map.m_len = bh->b_size >> inode->i_blkbits;
 675
 676	if (flags && !handle) {
 677		/* Direct IO write... */
 678		if (map.m_len > DIO_MAX_BLOCKS)
 679			map.m_len = DIO_MAX_BLOCKS;
 680		dio_credits = ext4_chunk_trans_blocks(inode, map.m_len);
 681		handle = ext4_journal_start(inode, dio_credits);
 682		if (IS_ERR(handle)) {
 683			ret = PTR_ERR(handle);
 684			return ret;
 685		}
 686		started = 1;
 687	}
 688
 689	ret = ext4_map_blocks(handle, inode, &map, flags);
 690	if (ret > 0) {
 691		map_bh(bh, inode->i_sb, map.m_pblk);
 692		bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
 693		bh->b_size = inode->i_sb->s_blocksize * map.m_len;
 694		ret = 0;
 
 
 
 695	}
 696	if (started)
 697		ext4_journal_stop(handle);
 698	return ret;
 699}
 700
 701int ext4_get_block(struct inode *inode, sector_t iblock,
 702		   struct buffer_head *bh, int create)
 703{
 704	return _ext4_get_block(inode, iblock, bh,
 705			       create ? EXT4_GET_BLOCKS_CREATE : 0);
 706}
 707
 708/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 709 * `handle' can be NULL if create is zero
 710 */
 711struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
 712				ext4_lblk_t block, int create, int *errp)
 713{
 714	struct ext4_map_blocks map;
 715	struct buffer_head *bh;
 716	int fatal = 0, err;
 
 717
 718	J_ASSERT(handle != NULL || create == 0);
 719
 720	map.m_lblk = block;
 721	map.m_len = 1;
 722	err = ext4_map_blocks(handle, inode, &map,
 723			      create ? EXT4_GET_BLOCKS_CREATE : 0);
 724
 
 
 725	if (err < 0)
 726		*errp = err;
 727	if (err <= 0)
 728		return NULL;
 729	*errp = 0;
 730
 731	bh = sb_getblk(inode->i_sb, map.m_pblk);
 732	if (!bh) {
 733		*errp = -EIO;
 734		return NULL;
 735	}
 736	if (map.m_flags & EXT4_MAP_NEW) {
 737		J_ASSERT(create != 0);
 738		J_ASSERT(handle != NULL);
 739
 740		/*
 741		 * Now that we do not always journal data, we should
 742		 * keep in mind whether this should always journal the
 743		 * new buffer as metadata.  For now, regular file
 744		 * writes use ext4_get_block instead, so it's not a
 745		 * problem.
 746		 */
 747		lock_buffer(bh);
 748		BUFFER_TRACE(bh, "call get_create_access");
 749		fatal = ext4_journal_get_create_access(handle, bh);
 750		if (!fatal && !buffer_uptodate(bh)) {
 
 
 
 
 751			memset(bh->b_data, 0, inode->i_sb->s_blocksize);
 752			set_buffer_uptodate(bh);
 753		}
 754		unlock_buffer(bh);
 755		BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
 756		err = ext4_handle_dirty_metadata(handle, inode, bh);
 757		if (!fatal)
 758			fatal = err;
 759	} else {
 760		BUFFER_TRACE(bh, "not a new buffer");
 761	}
 762	if (fatal) {
 763		*errp = fatal;
 764		brelse(bh);
 765		bh = NULL;
 766	}
 767	return bh;
 
 
 
 768}
 769
 770struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
 771			       ext4_lblk_t block, int create, int *err)
 772{
 773	struct buffer_head *bh;
 774
 775	bh = ext4_getblk(handle, inode, block, create, err);
 776	if (!bh)
 777		return bh;
 778	if (buffer_uptodate(bh))
 779		return bh;
 780	ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh);
 781	wait_on_buffer(bh);
 782	if (buffer_uptodate(bh))
 783		return bh;
 784	put_bh(bh);
 785	*err = -EIO;
 786	return NULL;
 787}
 788
 789static int walk_page_buffers(handle_t *handle,
 790			     struct buffer_head *head,
 791			     unsigned from,
 792			     unsigned to,
 793			     int *partial,
 794			     int (*fn)(handle_t *handle,
 795				       struct buffer_head *bh))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 796{
 797	struct buffer_head *bh;
 798	unsigned block_start, block_end;
 799	unsigned blocksize = head->b_size;
 800	int err, ret = 0;
 801	struct buffer_head *next;
 802
 803	for (bh = head, block_start = 0;
 804	     ret == 0 && (bh != head || !block_start);
 805	     block_start = block_end, bh = next) {
 806		next = bh->b_this_page;
 807		block_end = block_start + blocksize;
 808		if (block_end <= from || block_start >= to) {
 809			if (partial && !buffer_uptodate(bh))
 810				*partial = 1;
 811			continue;
 812		}
 813		err = (*fn)(handle, bh);
 814		if (!ret)
 815			ret = err;
 816	}
 817	return ret;
 818}
 819
 820/*
 821 * To preserve ordering, it is essential that the hole instantiation and
 822 * the data write be encapsulated in a single transaction.  We cannot
 823 * close off a transaction and start a new one between the ext4_get_block()
 824 * and the commit_write().  So doing the jbd2_journal_start at the start of
 825 * prepare_write() is the right place.
 826 *
 827 * Also, this function can nest inside ext4_writepage() ->
 828 * block_write_full_page(). In that case, we *know* that ext4_writepage()
 829 * has generated enough buffer credits to do the whole page.  So we won't
 830 * block on the journal in that case, which is good, because the caller may
 831 * be PF_MEMALLOC.
 832 *
 833 * By accident, ext4 can be reentered when a transaction is open via
 834 * quota file writes.  If we were to commit the transaction while thus
 835 * reentered, there can be a deadlock - we would be holding a quota
 836 * lock, and the commit would never complete if another thread had a
 837 * transaction open and was blocking on the quota lock - a ranking
 838 * violation.
 839 *
 840 * So what we do is to rely on the fact that jbd2_journal_stop/journal_start
 841 * will _not_ run commit under these circumstances because handle->h_ref
 842 * is elevated.  We'll still have enough credits for the tiny quotafile
 843 * write.
 844 */
 845static int do_journal_get_write_access(handle_t *handle,
 846				       struct buffer_head *bh)
 847{
 848	int dirty = buffer_dirty(bh);
 849	int ret;
 850
 851	if (!buffer_mapped(bh) || buffer_freed(bh))
 852		return 0;
 853	/*
 854	 * __block_write_begin() could have dirtied some buffers. Clean
 855	 * the dirty bit as jbd2_journal_get_write_access() could complain
 856	 * otherwise about fs integrity issues. Setting of the dirty bit
 857	 * by __block_write_begin() isn't a real problem here as we clear
 858	 * the bit before releasing a page lock and thus writeback cannot
 859	 * ever write the buffer.
 860	 */
 861	if (dirty)
 862		clear_buffer_dirty(bh);
 
 863	ret = ext4_journal_get_write_access(handle, bh);
 864	if (!ret && dirty)
 865		ret = ext4_handle_dirty_metadata(handle, NULL, bh);
 866	return ret;
 867}
 868
 869static int ext4_get_block_write(struct inode *inode, sector_t iblock,
 870		   struct buffer_head *bh_result, int create);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 871static int ext4_write_begin(struct file *file, struct address_space *mapping,
 872			    loff_t pos, unsigned len, unsigned flags,
 873			    struct page **pagep, void **fsdata)
 874{
 875	struct inode *inode = mapping->host;
 876	int ret, needed_blocks;
 877	handle_t *handle;
 878	int retries = 0;
 879	struct page *page;
 880	pgoff_t index;
 881	unsigned from, to;
 882
 
 
 
 883	trace_ext4_write_begin(inode, pos, len, flags);
 884	/*
 885	 * Reserve one block more for addition to orphan list in case
 886	 * we allocate blocks but write fails for some reason
 887	 */
 888	needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
 889	index = pos >> PAGE_CACHE_SHIFT;
 890	from = pos & (PAGE_CACHE_SIZE - 1);
 891	to = from + len;
 892
 893retry:
 894	handle = ext4_journal_start(inode, needed_blocks);
 895	if (IS_ERR(handle)) {
 896		ret = PTR_ERR(handle);
 897		goto out;
 
 
 898	}
 899
 900	/* We cannot recurse into the filesystem as the transaction is already
 901	 * started */
 902	flags |= AOP_FLAG_NOFS;
 903
 
 
 
 
 904	page = grab_cache_page_write_begin(mapping, index, flags);
 905	if (!page) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 906		ext4_journal_stop(handle);
 907		ret = -ENOMEM;
 908		goto out;
 909	}
 910	*pagep = page;
 
 911
 
 912	if (ext4_should_dioread_nolock(inode))
 913		ret = __block_write_begin(page, pos, len, ext4_get_block_write);
 
 
 
 
 
 
 
 
 914	else
 915		ret = __block_write_begin(page, pos, len, ext4_get_block);
 916
 917	if (!ret && ext4_should_journal_data(inode)) {
 918		ret = walk_page_buffers(handle, page_buffers(page),
 919				from, to, NULL, do_journal_get_write_access);
 
 920	}
 921
 922	if (ret) {
 
 
 
 923		unlock_page(page);
 924		page_cache_release(page);
 925		/*
 926		 * __block_write_begin may have instantiated a few blocks
 927		 * outside i_size.  Trim these off again. Don't need
 928		 * i_size_read because we hold i_mutex.
 929		 *
 930		 * Add inode to orphan list in case we crash before
 931		 * truncate finishes
 932		 */
 933		if (pos + len > inode->i_size && ext4_can_truncate(inode))
 934			ext4_orphan_add(handle, inode);
 935
 936		ext4_journal_stop(handle);
 937		if (pos + len > inode->i_size) {
 938			ext4_truncate_failed_write(inode);
 939			/*
 940			 * If truncate failed early the inode might
 941			 * still be on the orphan list; we need to
 942			 * make sure the inode is removed from the
 943			 * orphan list in that case.
 944			 */
 945			if (inode->i_nlink)
 946				ext4_orphan_del(NULL, inode);
 947		}
 948	}
 949
 950	if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
 951		goto retry;
 952out:
 
 
 
 
 953	return ret;
 954}
 955
 956/* For write_end() in data=journal mode */
 957static int write_end_fn(handle_t *handle, struct buffer_head *bh)
 958{
 
 959	if (!buffer_mapped(bh) || buffer_freed(bh))
 960		return 0;
 961	set_buffer_uptodate(bh);
 962	return ext4_handle_dirty_metadata(handle, NULL, bh);
 
 
 
 963}
 964
 965static int ext4_generic_write_end(struct file *file,
 966				  struct address_space *mapping,
 967				  loff_t pos, unsigned len, unsigned copied,
 968				  struct page *page, void *fsdata)
 
 
 
 
 
 
 
 969{
 970	int i_size_changed = 0;
 971	struct inode *inode = mapping->host;
 972	handle_t *handle = ext4_journal_current_handle();
 
 
 
 
 
 
 973
 974	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
 975
 
 
 
 
 
 
 
 
 
 
 
 976	/*
 977	 * No need to use i_size_read() here, the i_size
 978	 * cannot change under us because we hold i_mutex.
 979	 *
 980	 * But it's important to update i_size while still holding page lock:
 981	 * page writeout could otherwise come in and zero beyond i_size.
 
 
 
 982	 */
 983	if (pos + copied > inode->i_size) {
 984		i_size_write(inode, pos + copied);
 985		i_size_changed = 1;
 986	}
 987
 988	if (pos + copied >  EXT4_I(inode)->i_disksize) {
 989		/* We need to mark inode dirty even if
 990		 * new_i_size is less that inode->i_size
 991		 * bu greater than i_disksize.(hint delalloc)
 992		 */
 993		ext4_update_i_disksize(inode, (pos + copied));
 994		i_size_changed = 1;
 995	}
 996	unlock_page(page);
 997	page_cache_release(page);
 998
 
 
 999	/*
1000	 * Don't mark the inode dirty under page lock. First, it unnecessarily
1001	 * makes the holding time of page lock longer. Second, it forces lock
1002	 * ordering of page lock and transaction start for journaling
1003	 * filesystems.
1004	 */
1005	if (i_size_changed)
1006		ext4_mark_inode_dirty(handle, inode);
1007
1008	return copied;
1009}
1010
1011/*
1012 * We need to pick up the new inode size which generic_commit_write gave us
1013 * `file' can be NULL - eg, when called from page_symlink().
1014 *
1015 * ext4 never places buffers on inode->i_mapping->private_list.  metadata
1016 * buffers are managed internally.
1017 */
1018static int ext4_ordered_write_end(struct file *file,
1019				  struct address_space *mapping,
1020				  loff_t pos, unsigned len, unsigned copied,
1021				  struct page *page, void *fsdata)
1022{
1023	handle_t *handle = ext4_journal_current_handle();
1024	struct inode *inode = mapping->host;
1025	int ret = 0, ret2;
1026
1027	trace_ext4_ordered_write_end(inode, pos, len, copied);
1028	ret = ext4_jbd2_file_inode(handle, inode);
1029
1030	if (ret == 0) {
1031		ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
1032							page, fsdata);
1033		copied = ret2;
1034		if (pos + len > inode->i_size && ext4_can_truncate(inode))
1035			/* if we have allocated more blocks and copied
1036			 * less. We will have blocks allocated outside
1037			 * inode->i_size. So truncate them
1038			 */
1039			ext4_orphan_add(handle, inode);
1040		if (ret2 < 0)
1041			ret = ret2;
1042	} else {
1043		unlock_page(page);
1044		page_cache_release(page);
1045	}
1046
1047	ret2 = ext4_journal_stop(handle);
1048	if (!ret)
1049		ret = ret2;
1050
1051	if (pos + len > inode->i_size) {
1052		ext4_truncate_failed_write(inode);
1053		/*
1054		 * If truncate failed early the inode might still be
1055		 * on the orphan list; we need to make sure the inode
1056		 * is removed from the orphan list in that case.
1057		 */
1058		if (inode->i_nlink)
1059			ext4_orphan_del(NULL, inode);
1060	}
1061
1062
1063	return ret ? ret : copied;
1064}
1065
1066static int ext4_writeback_write_end(struct file *file,
1067				    struct address_space *mapping,
1068				    loff_t pos, unsigned len, unsigned copied,
1069				    struct page *page, void *fsdata)
 
 
 
 
1070{
1071	handle_t *handle = ext4_journal_current_handle();
1072	struct inode *inode = mapping->host;
1073	int ret = 0, ret2;
1074
1075	trace_ext4_writeback_write_end(inode, pos, len, copied);
1076	ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
1077							page, fsdata);
1078	copied = ret2;
1079	if (pos + len > inode->i_size && ext4_can_truncate(inode))
1080		/* if we have allocated more blocks and copied
1081		 * less. We will have blocks allocated outside
1082		 * inode->i_size. So truncate them
1083		 */
1084		ext4_orphan_add(handle, inode);
1085
1086	if (ret2 < 0)
1087		ret = ret2;
1088
1089	ret2 = ext4_journal_stop(handle);
1090	if (!ret)
1091		ret = ret2;
 
 
 
 
1092
1093	if (pos + len > inode->i_size) {
1094		ext4_truncate_failed_write(inode);
1095		/*
1096		 * If truncate failed early the inode might still be
1097		 * on the orphan list; we need to make sure the inode
1098		 * is removed from the orphan list in that case.
1099		 */
1100		if (inode->i_nlink)
1101			ext4_orphan_del(NULL, inode);
1102	}
1103
1104	return ret ? ret : copied;
 
 
 
 
 
 
 
 
1105}
1106
1107static int ext4_journalled_write_end(struct file *file,
1108				     struct address_space *mapping,
1109				     loff_t pos, unsigned len, unsigned copied,
1110				     struct page *page, void *fsdata)
1111{
1112	handle_t *handle = ext4_journal_current_handle();
1113	struct inode *inode = mapping->host;
 
1114	int ret = 0, ret2;
1115	int partial = 0;
1116	unsigned from, to;
1117	loff_t new_i_size;
 
 
1118
1119	trace_ext4_journalled_write_end(inode, pos, len, copied);
1120	from = pos & (PAGE_CACHE_SIZE - 1);
1121	to = from + len;
1122
1123	BUG_ON(!ext4_handle_valid(handle));
1124
1125	if (copied < len) {
1126		if (!PageUptodate(page))
1127			copied = 0;
1128		page_zero_new_buffers(page, from+copied, to);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1129	}
1130
1131	ret = walk_page_buffers(handle, page_buffers(page), from,
1132				to, &partial, write_end_fn);
1133	if (!partial)
1134		SetPageUptodate(page);
1135	new_i_size = pos + copied;
1136	if (new_i_size > inode->i_size)
1137		i_size_write(inode, pos+copied);
1138	ext4_set_inode_state(inode, EXT4_STATE_JDATA);
1139	EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
1140	if (new_i_size > EXT4_I(inode)->i_disksize) {
1141		ext4_update_i_disksize(inode, new_i_size);
 
 
 
 
 
1142		ret2 = ext4_mark_inode_dirty(handle, inode);
1143		if (!ret)
1144			ret = ret2;
1145	}
1146
1147	unlock_page(page);
1148	page_cache_release(page);
1149	if (pos + len > inode->i_size && ext4_can_truncate(inode))
1150		/* if we have allocated more blocks and copied
1151		 * less. We will have blocks allocated outside
1152		 * inode->i_size. So truncate them
1153		 */
1154		ext4_orphan_add(handle, inode);
1155
 
1156	ret2 = ext4_journal_stop(handle);
1157	if (!ret)
1158		ret = ret2;
1159	if (pos + len > inode->i_size) {
1160		ext4_truncate_failed_write(inode);
1161		/*
1162		 * If truncate failed early the inode might still be
1163		 * on the orphan list; we need to make sure the inode
1164		 * is removed from the orphan list in that case.
1165		 */
1166		if (inode->i_nlink)
1167			ext4_orphan_del(NULL, inode);
1168	}
1169
1170	return ret ? ret : copied;
1171}
1172
1173/*
1174 * Reserve a single cluster located at lblock
1175 */
1176static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
1177{
1178	int retries = 0;
1179	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1180	struct ext4_inode_info *ei = EXT4_I(inode);
1181	unsigned int md_needed;
1182	int ret;
1183	ext4_lblk_t save_last_lblock;
1184	int save_len;
1185
1186	/*
1187	 * We will charge metadata quota at writeout time; this saves
1188	 * us from metadata over-estimation, though we may go over by
1189	 * a small amount in the end.  Here we just reserve for data.
1190	 */
1191	ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1));
1192	if (ret)
1193		return ret;
1194
1195	/*
1196	 * recalculate the amount of metadata blocks to reserve
1197	 * in order to allocate nrblocks
1198	 * worse case is one extent per block
1199	 */
1200repeat:
1201	spin_lock(&ei->i_block_reservation_lock);
1202	/*
1203	 * ext4_calc_metadata_amount() has side effects, which we have
1204	 * to be prepared undo if we fail to claim space.
1205	 */
1206	save_len = ei->i_da_metadata_calc_len;
1207	save_last_lblock = ei->i_da_metadata_calc_last_lblock;
1208	md_needed = EXT4_NUM_B2C(sbi,
1209				 ext4_calc_metadata_amount(inode, lblock));
1210	trace_ext4_da_reserve_space(inode, md_needed);
1211
1212	/*
1213	 * We do still charge estimated metadata to the sb though;
1214	 * we cannot afford to run out of free blocks.
1215	 */
1216	if (ext4_claim_free_clusters(sbi, md_needed + 1, 0)) {
1217		ei->i_da_metadata_calc_len = save_len;
1218		ei->i_da_metadata_calc_last_lblock = save_last_lblock;
1219		spin_unlock(&ei->i_block_reservation_lock);
1220		if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
1221			yield();
1222			goto repeat;
1223		}
1224		dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
1225		return -ENOSPC;
1226	}
1227	ei->i_reserved_data_blocks++;
1228	ei->i_reserved_meta_blocks += md_needed;
1229	spin_unlock(&ei->i_block_reservation_lock);
1230
1231	return 0;       /* success */
1232}
1233
1234static void ext4_da_release_space(struct inode *inode, int to_free)
1235{
1236	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1237	struct ext4_inode_info *ei = EXT4_I(inode);
1238
1239	if (!to_free)
1240		return;		/* Nothing to release, exit */
1241
1242	spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1243
1244	trace_ext4_da_release_space(inode, to_free);
1245	if (unlikely(to_free > ei->i_reserved_data_blocks)) {
1246		/*
1247		 * if there aren't enough reserved blocks, then the
1248		 * counter is messed up somewhere.  Since this
1249		 * function is called from invalidate page, it's
1250		 * harmless to return without any action.
1251		 */
1252		ext4_msg(inode->i_sb, KERN_NOTICE, "ext4_da_release_space: "
1253			 "ino %lu, to_free %d with only %d reserved "
1254			 "data blocks", inode->i_ino, to_free,
1255			 ei->i_reserved_data_blocks);
1256		WARN_ON(1);
1257		to_free = ei->i_reserved_data_blocks;
1258	}
1259	ei->i_reserved_data_blocks -= to_free;
1260
1261	if (ei->i_reserved_data_blocks == 0) {
1262		/*
1263		 * We can release all of the reserved metadata blocks
1264		 * only when we have written all of the delayed
1265		 * allocation blocks.
1266		 * Note that in case of bigalloc, i_reserved_meta_blocks,
1267		 * i_reserved_data_blocks, etc. refer to number of clusters.
1268		 */
1269		percpu_counter_sub(&sbi->s_dirtyclusters_counter,
1270				   ei->i_reserved_meta_blocks);
1271		ei->i_reserved_meta_blocks = 0;
1272		ei->i_da_metadata_calc_len = 0;
1273	}
1274
1275	/* update fs dirty data blocks counter */
1276	percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free);
1277
1278	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1279
1280	dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free));
1281}
1282
1283static void ext4_da_page_release_reservation(struct page *page,
1284					     unsigned long offset)
1285{
1286	int to_release = 0;
1287	struct buffer_head *head, *bh;
1288	unsigned int curr_off = 0;
1289	struct inode *inode = page->mapping->host;
1290	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1291	int num_clusters;
1292
1293	head = page_buffers(page);
1294	bh = head;
1295	do {
1296		unsigned int next_off = curr_off + bh->b_size;
1297
1298		if ((offset <= curr_off) && (buffer_delay(bh))) {
1299			to_release++;
1300			clear_buffer_delay(bh);
1301			clear_buffer_da_mapped(bh);
1302		}
1303		curr_off = next_off;
1304	} while ((bh = bh->b_this_page) != head);
1305
1306	/* If we have released all the blocks belonging to a cluster, then we
1307	 * need to release the reserved space for that cluster. */
1308	num_clusters = EXT4_NUM_B2C(sbi, to_release);
1309	while (num_clusters > 0) {
1310		ext4_fsblk_t lblk;
1311		lblk = (page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits)) +
1312			((num_clusters - 1) << sbi->s_cluster_bits);
1313		if (sbi->s_cluster_ratio == 1 ||
1314		    !ext4_find_delalloc_cluster(inode, lblk, 1))
1315			ext4_da_release_space(inode, 1);
1316
1317		num_clusters--;
1318	}
1319}
1320
1321/*
1322 * Delayed allocation stuff
1323 */
1324
1325/*
1326 * mpage_da_submit_io - walks through extent of pages and try to write
1327 * them with writepage() call back
1328 *
1329 * @mpd->inode: inode
1330 * @mpd->first_page: first page of the extent
1331 * @mpd->next_page: page after the last page of the extent
1332 *
1333 * By the time mpage_da_submit_io() is called we expect all blocks
1334 * to be allocated. this may be wrong if allocation failed.
1335 *
1336 * As pages are already locked by write_cache_pages(), we can't use it
1337 */
1338static int mpage_da_submit_io(struct mpage_da_data *mpd,
1339			      struct ext4_map_blocks *map)
1340{
1341	struct pagevec pvec;
1342	unsigned long index, end;
1343	int ret = 0, err, nr_pages, i;
1344	struct inode *inode = mpd->inode;
1345	struct address_space *mapping = inode->i_mapping;
1346	loff_t size = i_size_read(inode);
1347	unsigned int len, block_start;
1348	struct buffer_head *bh, *page_bufs = NULL;
1349	int journal_data = ext4_should_journal_data(inode);
1350	sector_t pblock = 0, cur_logical = 0;
1351	struct ext4_io_submit io_submit;
1352
1353	BUG_ON(mpd->next_page <= mpd->first_page);
1354	memset(&io_submit, 0, sizeof(io_submit));
1355	/*
1356	 * We need to start from the first_page to the next_page - 1
1357	 * to make sure we also write the mapped dirty buffer_heads.
1358	 * If we look at mpd->b_blocknr we would only be looking
1359	 * at the currently mapped buffer_heads.
1360	 */
1361	index = mpd->first_page;
1362	end = mpd->next_page - 1;
1363
1364	pagevec_init(&pvec, 0);
1365	while (index <= end) {
1366		nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
1367		if (nr_pages == 0)
1368			break;
1369		for (i = 0; i < nr_pages; i++) {
1370			int commit_write = 0, skip_page = 0;
1371			struct page *page = pvec.pages[i];
1372
1373			index = page->index;
1374			if (index > end)
1375				break;
1376
1377			if (index == size >> PAGE_CACHE_SHIFT)
1378				len = size & ~PAGE_CACHE_MASK;
1379			else
1380				len = PAGE_CACHE_SIZE;
1381			if (map) {
1382				cur_logical = index << (PAGE_CACHE_SHIFT -
1383							inode->i_blkbits);
1384				pblock = map->m_pblk + (cur_logical -
1385							map->m_lblk);
1386			}
1387			index++;
1388
1389			BUG_ON(!PageLocked(page));
1390			BUG_ON(PageWriteback(page));
1391
1392			/*
1393			 * If the page does not have buffers (for
1394			 * whatever reason), try to create them using
1395			 * __block_write_begin.  If this fails,
1396			 * skip the page and move on.
1397			 */
1398			if (!page_has_buffers(page)) {
1399				if (__block_write_begin(page, 0, len,
1400						noalloc_get_block_write)) {
1401				skip_page:
1402					unlock_page(page);
1403					continue;
1404				}
1405				commit_write = 1;
1406			}
1407
1408			bh = page_bufs = page_buffers(page);
1409			block_start = 0;
1410			do {
1411				if (!bh)
1412					goto skip_page;
1413				if (map && (cur_logical >= map->m_lblk) &&
1414				    (cur_logical <= (map->m_lblk +
1415						     (map->m_len - 1)))) {
1416					if (buffer_delay(bh)) {
1417						clear_buffer_delay(bh);
1418						bh->b_blocknr = pblock;
1419					}
1420					if (buffer_da_mapped(bh))
1421						clear_buffer_da_mapped(bh);
1422					if (buffer_unwritten(bh) ||
1423					    buffer_mapped(bh))
1424						BUG_ON(bh->b_blocknr != pblock);
1425					if (map->m_flags & EXT4_MAP_UNINIT)
1426						set_buffer_uninit(bh);
1427					clear_buffer_unwritten(bh);
1428				}
1429
1430				/*
1431				 * skip page if block allocation undone and
1432				 * block is dirty
1433				 */
1434				if (ext4_bh_delay_or_unwritten(NULL, bh))
1435					skip_page = 1;
1436				bh = bh->b_this_page;
1437				block_start += bh->b_size;
1438				cur_logical++;
1439				pblock++;
1440			} while (bh != page_bufs);
1441
1442			if (skip_page)
1443				goto skip_page;
1444
1445			if (commit_write)
1446				/* mark the buffer_heads as dirty & uptodate */
1447				block_commit_write(page, 0, len);
1448
1449			clear_page_dirty_for_io(page);
1450			/*
1451			 * Delalloc doesn't support data journalling,
1452			 * but eventually maybe we'll lift this
1453			 * restriction.
1454			 */
1455			if (unlikely(journal_data && PageChecked(page)))
1456				err = __ext4_journalled_writepage(page, len);
1457			else if (test_opt(inode->i_sb, MBLK_IO_SUBMIT))
1458				err = ext4_bio_write_page(&io_submit, page,
1459							  len, mpd->wbc);
1460			else if (buffer_uninit(page_bufs)) {
1461				ext4_set_bh_endio(page_bufs, inode);
1462				err = block_write_full_page_endio(page,
1463					noalloc_get_block_write,
1464					mpd->wbc, ext4_end_io_buffer_write);
1465			} else
1466				err = block_write_full_page(page,
1467					noalloc_get_block_write, mpd->wbc);
1468
1469			if (!err)
1470				mpd->pages_written++;
1471			/*
1472			 * In error case, we have to continue because
1473			 * remaining pages are still locked
1474			 */
1475			if (ret == 0)
1476				ret = err;
1477		}
1478		pagevec_release(&pvec);
1479	}
1480	ext4_io_submit(&io_submit);
1481	return ret;
1482}
1483
1484static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd)
 
1485{
1486	int nr_pages, i;
1487	pgoff_t index, end;
1488	struct pagevec pvec;
1489	struct inode *inode = mpd->inode;
1490	struct address_space *mapping = inode->i_mapping;
1491
 
 
 
 
1492	index = mpd->first_page;
1493	end   = mpd->next_page - 1;
 
 
 
 
 
 
 
 
1494	while (index <= end) {
1495		nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
1496		if (nr_pages == 0)
1497			break;
1498		for (i = 0; i < nr_pages; i++) {
1499			struct page *page = pvec.pages[i];
1500			if (page->index > end)
1501				break;
1502			BUG_ON(!PageLocked(page));
1503			BUG_ON(PageWriteback(page));
1504			block_invalidatepage(page, 0);
1505			ClearPageUptodate(page);
 
 
 
 
1506			unlock_page(page);
1507		}
1508		index = pvec.pages[nr_pages - 1]->index + 1;
1509		pagevec_release(&pvec);
1510	}
1511	return;
1512}
1513
1514static void ext4_print_free_blocks(struct inode *inode)
1515{
1516	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1517	struct super_block *sb = inode->i_sb;
 
1518
1519	ext4_msg(sb, KERN_CRIT, "Total free blocks count %lld",
1520	       EXT4_C2B(EXT4_SB(inode->i_sb),
1521			ext4_count_free_clusters(inode->i_sb)));
1522	ext4_msg(sb, KERN_CRIT, "Free/Dirty block details");
1523	ext4_msg(sb, KERN_CRIT, "free_blocks=%lld",
1524	       (long long) EXT4_C2B(EXT4_SB(inode->i_sb),
1525		percpu_counter_sum(&sbi->s_freeclusters_counter)));
1526	ext4_msg(sb, KERN_CRIT, "dirty_blocks=%lld",
1527	       (long long) EXT4_C2B(EXT4_SB(inode->i_sb),
1528		percpu_counter_sum(&sbi->s_dirtyclusters_counter)));
1529	ext4_msg(sb, KERN_CRIT, "Block reservation details");
1530	ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u",
1531		 EXT4_I(inode)->i_reserved_data_blocks);
1532	ext4_msg(sb, KERN_CRIT, "i_reserved_meta_blocks=%u",
1533	       EXT4_I(inode)->i_reserved_meta_blocks);
1534	return;
1535}
1536
1537/*
1538 * mpage_da_map_and_submit - go through given space, map them
1539 *       if necessary, and then submit them for I/O
1540 *
1541 * @mpd - bh describing space
1542 *
1543 * The function skips space we know is already mapped to disk blocks.
1544 *
1545 */
1546static void mpage_da_map_and_submit(struct mpage_da_data *mpd)
1547{
1548	int err, blks, get_blocks_flags;
1549	struct ext4_map_blocks map, *mapp = NULL;
1550	sector_t next = mpd->b_blocknr;
1551	unsigned max_blocks = mpd->b_size >> mpd->inode->i_blkbits;
1552	loff_t disksize = EXT4_I(mpd->inode)->i_disksize;
1553	handle_t *handle = NULL;
1554
1555	/*
1556	 * If the blocks are mapped already, or we couldn't accumulate
1557	 * any blocks, then proceed immediately to the submission stage.
1558	 */
1559	if ((mpd->b_size == 0) ||
1560	    ((mpd->b_state  & (1 << BH_Mapped)) &&
1561	     !(mpd->b_state & (1 << BH_Delay)) &&
1562	     !(mpd->b_state & (1 << BH_Unwritten))))
1563		goto submit_io;
1564
1565	handle = ext4_journal_current_handle();
1566	BUG_ON(!handle);
1567
1568	/*
1569	 * Call ext4_map_blocks() to allocate any delayed allocation
1570	 * blocks, or to convert an uninitialized extent to be
1571	 * initialized (in the case where we have written into
1572	 * one or more preallocated blocks).
1573	 *
1574	 * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE to
1575	 * indicate that we are on the delayed allocation path.  This
1576	 * affects functions in many different parts of the allocation
1577	 * call path.  This flag exists primarily because we don't
1578	 * want to change *many* call functions, so ext4_map_blocks()
1579	 * will set the EXT4_STATE_DELALLOC_RESERVED flag once the
1580	 * inode's allocation semaphore is taken.
1581	 *
1582	 * If the blocks in questions were delalloc blocks, set
1583	 * EXT4_GET_BLOCKS_DELALLOC_RESERVE so the delalloc accounting
1584	 * variables are updated after the blocks have been allocated.
1585	 */
1586	map.m_lblk = next;
1587	map.m_len = max_blocks;
1588	get_blocks_flags = EXT4_GET_BLOCKS_CREATE;
1589	if (ext4_should_dioread_nolock(mpd->inode))
1590		get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
1591	if (mpd->b_state & (1 << BH_Delay))
1592		get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
1593
1594	blks = ext4_map_blocks(handle, mpd->inode, &map, get_blocks_flags);
1595	if (blks < 0) {
1596		struct super_block *sb = mpd->inode->i_sb;
1597
1598		err = blks;
1599		/*
1600		 * If get block returns EAGAIN or ENOSPC and there
1601		 * appears to be free blocks we will just let
1602		 * mpage_da_submit_io() unlock all of the pages.
1603		 */
1604		if (err == -EAGAIN)
1605			goto submit_io;
1606
1607		if (err == -ENOSPC && ext4_count_free_clusters(sb)) {
1608			mpd->retval = err;
1609			goto submit_io;
1610		}
1611
1612		/*
1613		 * get block failure will cause us to loop in
1614		 * writepages, because a_ops->writepage won't be able
1615		 * to make progress. The page will be redirtied by
1616		 * writepage and writepages will again try to write
1617		 * the same.
1618		 */
1619		if (!(EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)) {
1620			ext4_msg(sb, KERN_CRIT,
1621				 "delayed block allocation failed for inode %lu "
1622				 "at logical offset %llu with max blocks %zd "
1623				 "with error %d", mpd->inode->i_ino,
1624				 (unsigned long long) next,
1625				 mpd->b_size >> mpd->inode->i_blkbits, err);
1626			ext4_msg(sb, KERN_CRIT,
1627				"This should not happen!! Data will be lost\n");
1628			if (err == -ENOSPC)
1629				ext4_print_free_blocks(mpd->inode);
1630		}
1631		/* invalidate all the pages */
1632		ext4_da_block_invalidatepages(mpd);
1633
1634		/* Mark this page range as having been completed */
1635		mpd->io_done = 1;
1636		return;
1637	}
1638	BUG_ON(blks == 0);
1639
1640	mapp = &map;
1641	if (map.m_flags & EXT4_MAP_NEW) {
1642		struct block_device *bdev = mpd->inode->i_sb->s_bdev;
1643		int i;
1644
1645		for (i = 0; i < map.m_len; i++)
1646			unmap_underlying_metadata(bdev, map.m_pblk + i);
1647
1648		if (ext4_should_order_data(mpd->inode)) {
1649			err = ext4_jbd2_file_inode(handle, mpd->inode);
1650			if (err) {
1651				/* Only if the journal is aborted */
1652				mpd->retval = err;
1653				goto submit_io;
1654			}
1655		}
1656	}
1657
1658	/*
1659	 * Update on-disk size along with block allocation.
1660	 */
1661	disksize = ((loff_t) next + blks) << mpd->inode->i_blkbits;
1662	if (disksize > i_size_read(mpd->inode))
1663		disksize = i_size_read(mpd->inode);
1664	if (disksize > EXT4_I(mpd->inode)->i_disksize) {
1665		ext4_update_i_disksize(mpd->inode, disksize);
1666		err = ext4_mark_inode_dirty(handle, mpd->inode);
1667		if (err)
1668			ext4_error(mpd->inode->i_sb,
1669				   "Failed to mark inode %lu dirty",
1670				   mpd->inode->i_ino);
1671	}
1672
1673submit_io:
1674	mpage_da_submit_io(mpd, mapp);
1675	mpd->io_done = 1;
1676}
1677
1678#define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \
1679		(1 << BH_Delay) | (1 << BH_Unwritten))
1680
1681/*
1682 * mpage_add_bh_to_extent - try to add one more block to extent of blocks
 
 
 
1683 *
1684 * @mpd->lbh - extent of blocks
1685 * @logical - logical number of the block in the file
1686 * @bh - bh of the block (used to access block's state)
1687 *
1688 * the function is used to collect contig. blocks in same state
1689 */
1690static void mpage_add_bh_to_extent(struct mpage_da_data *mpd,
1691				   sector_t logical, size_t b_size,
1692				   unsigned long b_state)
1693{
1694	sector_t next;
1695	int nrblocks = mpd->b_size >> mpd->inode->i_blkbits;
 
1696
1697	/*
1698	 * XXX Don't go larger than mballoc is willing to allocate
1699	 * This is a stopgap solution.  We eventually need to fold
1700	 * mpage_da_submit_io() into this function and then call
1701	 * ext4_map_blocks() multiple times in a loop
1702	 */
1703	if (nrblocks >= 8*1024*1024/mpd->inode->i_sb->s_blocksize)
1704		goto flush_it;
1705
1706	/* check if thereserved journal credits might overflow */
1707	if (!(ext4_test_inode_flag(mpd->inode, EXT4_INODE_EXTENTS))) {
1708		if (nrblocks >= EXT4_MAX_TRANS_DATA) {
1709			/*
1710			 * With non-extent format we are limited by the journal
1711			 * credit available.  Total credit needed to insert
1712			 * nrblocks contiguous blocks is dependent on the
1713			 * nrblocks.  So limit nrblocks.
1714			 */
1715			goto flush_it;
1716		} else if ((nrblocks + (b_size >> mpd->inode->i_blkbits)) >
1717				EXT4_MAX_TRANS_DATA) {
1718			/*
1719			 * Adding the new buffer_head would make it cross the
1720			 * allowed limit for which we have journal credit
1721			 * reserved. So limit the new bh->b_size
1722			 */
1723			b_size = (EXT4_MAX_TRANS_DATA - nrblocks) <<
1724						mpd->inode->i_blkbits;
1725			/* we will do mpage_da_submit_io in the next loop */
 
 
 
 
1726		}
1727	}
1728	/*
1729	 * First block in the extent
1730	 */
1731	if (mpd->b_size == 0) {
1732		mpd->b_blocknr = logical;
1733		mpd->b_size = b_size;
1734		mpd->b_state = b_state & BH_FLAGS;
1735		return;
1736	}
1737
1738	next = mpd->b_blocknr + nrblocks;
1739	/*
1740	 * Can we merge the block to our big extent?
1741	 */
1742	if (logical == next && (b_state & BH_FLAGS) == mpd->b_state) {
1743		mpd->b_size += b_size;
1744		return;
1745	}
1746
1747flush_it:
1748	/*
1749	 * We couldn't merge the block to our extent, so we
1750	 * need to flush current  extent and start new one
1751	 */
1752	mpage_da_map_and_submit(mpd);
1753	return;
1754}
1755
1756static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh)
1757{
1758	return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh);
1759}
1760
1761/*
1762 * This function is grabs code from the very beginning of
1763 * ext4_map_blocks, but assumes that the caller is from delayed write
1764 * time. This function looks up the requested blocks and sets the
1765 * buffer delay bit under the protection of i_data_sem.
1766 */
1767static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
1768			      struct ext4_map_blocks *map,
1769			      struct buffer_head *bh)
1770{
 
1771	int retval;
1772	sector_t invalid_block = ~((sector_t) 0xffff);
 
 
 
 
 
1773
1774	if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
1775		invalid_block = ~0;
1776
1777	map->m_flags = 0;
1778	ext_debug("ext4_da_map_blocks(): inode %lu, max_blocks %u,"
1779		  "logical block %lu\n", inode->i_ino, map->m_len,
1780		  (unsigned long) map->m_lblk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1781	/*
1782	 * Try to see if we can get the block without requesting a new
1783	 * file system block.
1784	 */
1785	down_read((&EXT4_I(inode)->i_data_sem));
1786	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
 
 
1787		retval = ext4_ext_map_blocks(NULL, inode, map, 0);
1788	else
1789		retval = ext4_ind_map_blocks(NULL, inode, map, 0);
1790
 
1791	if (retval == 0) {
 
 
1792		/*
1793		 * XXX: __block_prepare_write() unmaps passed block,
1794		 * is it OK?
1795		 */
1796		/* If the block was allocated from previously allocated cluster,
1797		 * then we dont need to reserve it again. */
1798		if (!(map->m_flags & EXT4_MAP_FROM_CLUSTER)) {
1799			retval = ext4_da_reserve_space(inode, iblock);
1800			if (retval)
1801				/* not enough space to reserve */
1802				goto out_unlock;
1803		}
1804
1805		/* Clear EXT4_MAP_FROM_CLUSTER flag since its purpose is served
1806		 * and it should not appear on the bh->b_state.
1807		 */
1808		map->m_flags &= ~EXT4_MAP_FROM_CLUSTER;
 
1809
1810		map_bh(bh, inode->i_sb, invalid_block);
1811		set_buffer_new(bh);
1812		set_buffer_delay(bh);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1813	}
1814
1815out_unlock:
1816	up_read((&EXT4_I(inode)->i_data_sem));
1817
1818	return retval;
1819}
1820
1821/*
1822 * This is a special get_blocks_t callback which is used by
1823 * ext4_da_write_begin().  It will either return mapped block or
1824 * reserve space for a single block.
1825 *
1826 * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set.
1827 * We also have b_blocknr = -1 and b_bdev initialized properly
1828 *
1829 * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set.
1830 * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev
1831 * initialized properly.
1832 */
1833static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
1834				  struct buffer_head *bh, int create)
1835{
1836	struct ext4_map_blocks map;
1837	int ret = 0;
1838
1839	BUG_ON(create == 0);
1840	BUG_ON(bh->b_size != inode->i_sb->s_blocksize);
1841
1842	map.m_lblk = iblock;
1843	map.m_len = 1;
1844
1845	/*
1846	 * first, we need to know whether the block is allocated already
1847	 * preallocated blocks are unmapped but should treated
1848	 * the same as allocated blocks.
1849	 */
1850	ret = ext4_da_map_blocks(inode, iblock, &map, bh);
1851	if (ret <= 0)
1852		return ret;
1853
1854	map_bh(bh, inode->i_sb, map.m_pblk);
1855	bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
1856
1857	if (buffer_unwritten(bh)) {
1858		/* A delayed write to unwritten bh should be marked
1859		 * new and mapped.  Mapped ensures that we don't do
1860		 * get_block multiple times when we write to the same
1861		 * offset and new ensures that we do proper zero out
1862		 * for partial write.
1863		 */
1864		set_buffer_new(bh);
1865		set_buffer_mapped(bh);
1866	}
1867	return 0;
1868}
1869
1870/*
1871 * This function is used as a standard get_block_t calback function
1872 * when there is no desire to allocate any blocks.  It is used as a
1873 * callback function for block_write_begin() and block_write_full_page().
1874 * These functions should only try to map a single block at a time.
1875 *
1876 * Since this function doesn't do block allocations even if the caller
1877 * requests it by passing in create=1, it is critically important that
1878 * any caller checks to make sure that any buffer heads are returned
1879 * by this function are either all already mapped or marked for
1880 * delayed allocation before calling  block_write_full_page().  Otherwise,
1881 * b_blocknr could be left unitialized, and the page write functions will
1882 * be taken by surprise.
1883 */
1884static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
1885				   struct buffer_head *bh_result, int create)
1886{
1887	BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize);
1888	return _ext4_get_block(inode, iblock, bh_result, 0);
1889}
1890
1891static int bget_one(handle_t *handle, struct buffer_head *bh)
1892{
1893	get_bh(bh);
1894	return 0;
1895}
1896
1897static int bput_one(handle_t *handle, struct buffer_head *bh)
1898{
1899	put_bh(bh);
1900	return 0;
1901}
1902
1903static int __ext4_journalled_writepage(struct page *page,
1904				       unsigned int len)
1905{
1906	struct address_space *mapping = page->mapping;
1907	struct inode *inode = mapping->host;
1908	struct buffer_head *page_bufs;
1909	handle_t *handle = NULL;
1910	int ret = 0;
1911	int err;
 
1912
1913	ClearPageChecked(page);
1914	page_bufs = page_buffers(page);
1915	BUG_ON(!page_bufs);
1916	walk_page_buffers(handle, page_bufs, 0, len, NULL, bget_one);
1917	/* As soon as we unlock the page, it can go away, but we have
1918	 * references to buffers so we are safe */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1919	unlock_page(page);
1920
1921	handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
 
1922	if (IS_ERR(handle)) {
1923		ret = PTR_ERR(handle);
1924		goto out;
 
1925	}
1926
1927	BUG_ON(!ext4_handle_valid(handle));
1928
1929	ret = walk_page_buffers(handle, page_bufs, 0, len, NULL,
1930				do_journal_get_write_access);
 
 
 
 
 
 
 
 
 
 
 
 
1931
1932	err = walk_page_buffers(handle, page_bufs, 0, len, NULL,
1933				write_end_fn);
 
1934	if (ret == 0)
1935		ret = err;
1936	EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
1937	err = ext4_journal_stop(handle);
1938	if (!ret)
1939		ret = err;
1940
1941	walk_page_buffers(handle, page_bufs, 0, len, NULL, bput_one);
 
 
1942	ext4_set_inode_state(inode, EXT4_STATE_JDATA);
1943out:
 
 
 
1944	return ret;
1945}
1946
1947static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode);
1948static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate);
1949
1950/*
1951 * Note that we don't need to start a transaction unless we're journaling data
1952 * because we should have holes filled from ext4_page_mkwrite(). We even don't
1953 * need to file the inode to the transaction's list in ordered mode because if
1954 * we are writing back data added by write(), the inode is already there and if
1955 * we are writing back data modified via mmap(), no one guarantees in which
1956 * transaction the data will hit the disk. In case we are journaling data, we
1957 * cannot start transaction directly because transaction start ranks above page
1958 * lock so we have to do some magic.
1959 *
1960 * This function can get called via...
1961 *   - ext4_da_writepages after taking page lock (have journal handle)
1962 *   - journal_submit_inode_data_buffers (no journal handle)
1963 *   - shrink_page_list via pdflush (no journal handle)
1964 *   - grab_page_cache when doing write_begin (have journal handle)
1965 *
1966 * We don't do any block allocation in this function. If we have page with
1967 * multiple blocks we need to write those buffer_heads that are mapped. This
1968 * is important for mmaped based write. So if we do with blocksize 1K
1969 * truncate(f, 1024);
1970 * a = mmap(f, 0, 4096);
1971 * a[0] = 'a';
1972 * truncate(f, 4096);
1973 * we have in the page first buffer_head mapped via page_mkwrite call back
1974 * but other buffer_heads would be unmapped but dirty (dirty done via the
1975 * do_wp_page). So writepage should write the first block. If we modify
1976 * the mmap area beyond 1024 we will again get a page_fault and the
1977 * page_mkwrite callback will do the block allocation and mark the
1978 * buffer_heads mapped.
1979 *
1980 * We redirty the page if we have any buffer_heads that is either delay or
1981 * unwritten in the page.
1982 *
1983 * We can get recursively called as show below.
1984 *
1985 *	ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
1986 *		ext4_writepage()
1987 *
1988 * But since we don't do any block allocation we should not deadlock.
1989 * Page also have the dirty flag cleared so we don't get recurive page_lock.
1990 */
1991static int ext4_writepage(struct page *page,
1992			  struct writeback_control *wbc)
1993{
1994	int ret = 0, commit_write = 0;
1995	loff_t size;
1996	unsigned int len;
1997	struct buffer_head *page_bufs = NULL;
1998	struct inode *inode = page->mapping->host;
 
 
 
 
 
 
 
 
1999
2000	trace_ext4_writepage(page);
2001	size = i_size_read(inode);
2002	if (page->index == size >> PAGE_CACHE_SHIFT)
2003		len = size & ~PAGE_CACHE_MASK;
 
2004	else
2005		len = PAGE_CACHE_SIZE;
2006
 
2007	/*
2008	 * If the page does not have buffers (for whatever reason),
2009	 * try to create them using __block_write_begin.  If this
2010	 * fails, redirty the page and move on.
2011	 */
2012	if (!page_has_buffers(page)) {
2013		if (__block_write_begin(page, 0, len,
2014					noalloc_get_block_write)) {
2015		redirty_page:
2016			redirty_page_for_writepage(wbc, page);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2017			unlock_page(page);
2018			return 0;
2019		}
2020		commit_write = 1;
2021	}
2022	page_bufs = page_buffers(page);
2023	if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
2024			      ext4_bh_delay_or_unwritten)) {
2025		/*
2026		 * We don't want to do block allocation, so redirty
2027		 * the page and return.  We may reach here when we do
2028		 * a journal commit via journal_submit_inode_data_buffers.
2029		 * We can also reach here via shrink_page_list but it
2030		 * should never be for direct reclaim so warn if that
2031		 * happens
2032		 */
2033		WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
2034								PF_MEMALLOC);
2035		goto redirty_page;
2036	}
2037	if (commit_write)
2038		/* now mark the buffer_heads as dirty and uptodate */
2039		block_commit_write(page, 0, len);
2040
2041	if (PageChecked(page) && ext4_should_journal_data(inode))
2042		/*
2043		 * It's mmapped pagecache.  Add buffers and journal it.  There
2044		 * doesn't seem much point in redirtying the page here.
2045		 */
2046		return __ext4_journalled_writepage(page, len);
2047
2048	if (buffer_uninit(page_bufs)) {
2049		ext4_set_bh_endio(page_bufs, inode);
2050		ret = block_write_full_page_endio(page, noalloc_get_block_write,
2051					    wbc, ext4_end_io_buffer_write);
2052	} else
2053		ret = block_write_full_page(page, noalloc_get_block_write,
2054					    wbc);
2055
 
 
 
2056	return ret;
2057}
2058
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2059/*
2060 * This is called via ext4_da_writepages() to
2061 * calculate the total number of credits to reserve to fit
2062 * a single extent allocation into a single transaction,
2063 * ext4_da_writpeages() will loop calling this before
2064 * the block allocation.
2065 */
 
2066
2067static int ext4_da_writepages_trans_blocks(struct inode *inode)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2068{
2069	int max_blocks = EXT4_I(inode)->i_reserved_data_blocks;
 
 
 
 
2070
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2071	/*
2072	 * With non-extent format the journal credit needed to
2073	 * insert nrblocks contiguous block is dependent on
2074	 * number of contiguous block. So we will limit
2075	 * number of contiguous block to a sane value
2076	 */
2077	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) &&
2078	    (max_blocks > EXT4_MAX_TRANS_DATA))
2079		max_blocks = EXT4_MAX_TRANS_DATA;
2080
2081	return ext4_chunk_trans_blocks(inode, max_blocks);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2082}
2083
2084/*
2085 * write_cache_pages_da - walk the list of dirty pages of the given
2086 * address space and accumulate pages that need writing, and call
2087 * mpage_da_map_and_submit to map a single contiguous memory region
2088 * and then write them.
2089 */
2090static int write_cache_pages_da(struct address_space *mapping,
2091				struct writeback_control *wbc,
2092				struct mpage_da_data *mpd,
2093				pgoff_t *done_index)
2094{
2095	struct buffer_head	*bh, *head;
2096	struct inode		*inode = mapping->host;
2097	struct pagevec		pvec;
2098	unsigned int		nr_pages;
2099	sector_t		logical;
2100	pgoff_t			index, end;
2101	long			nr_to_write = wbc->nr_to_write;
2102	int			i, tag, ret = 0;
2103
2104	memset(mpd, 0, sizeof(struct mpage_da_data));
2105	mpd->wbc = wbc;
2106	mpd->inode = inode;
2107	pagevec_init(&pvec, 0);
2108	index = wbc->range_start >> PAGE_CACHE_SHIFT;
2109	end = wbc->range_end >> PAGE_CACHE_SHIFT;
2110
2111	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2112		tag = PAGECACHE_TAG_TOWRITE;
2113	else
2114		tag = PAGECACHE_TAG_DIRTY;
2115
2116	*done_index = index;
 
 
2117	while (index <= end) {
2118		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
2119			      min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
2120		if (nr_pages == 0)
2121			return 0;
2122
2123		for (i = 0; i < nr_pages; i++) {
2124			struct page *page = pvec.pages[i];
2125
2126			/*
2127			 * At this point, the page may be truncated or
2128			 * invalidated (changing page->mapping to NULL), or
2129			 * even swizzled back from swapper_space to tmpfs file
2130			 * mapping. However, page->index will not change
2131			 * because we have a reference on the page.
 
2132			 */
2133			if (page->index > end)
2134				goto out;
2135
2136			*done_index = page->index + 1;
2137
2138			/*
2139			 * If we can't merge this page, and we have
2140			 * accumulated an contiguous region, write it
2141			 */
2142			if ((mpd->next_page != page->index) &&
2143			    (mpd->next_page != mpd->first_page)) {
2144				mpage_da_map_and_submit(mpd);
2145				goto ret_extent_tail;
2146			}
2147
2148			lock_page(page);
2149
2150			/*
2151			 * If the page is no longer dirty, or its
2152			 * mapping no longer corresponds to inode we
2153			 * are writing (which means it has been
2154			 * truncated or invalidated), or the page is
2155			 * already under writeback and we are not
2156			 * doing a data integrity writeback, skip the page
2157			 */
2158			if (!PageDirty(page) ||
2159			    (PageWriteback(page) &&
2160			     (wbc->sync_mode == WB_SYNC_NONE)) ||
2161			    unlikely(page->mapping != mapping)) {
2162				unlock_page(page);
2163				continue;
2164			}
2165
2166			wait_on_page_writeback(page);
2167			BUG_ON(PageWriteback(page));
2168
2169			if (mpd->next_page != page->index)
2170				mpd->first_page = page->index;
2171			mpd->next_page = page->index + 1;
2172			logical = (sector_t) page->index <<
2173				(PAGE_CACHE_SHIFT - inode->i_blkbits);
2174
2175			if (!page_has_buffers(page)) {
2176				mpage_add_bh_to_extent(mpd, logical,
2177						       PAGE_CACHE_SIZE,
2178						       (1 << BH_Dirty) | (1 << BH_Uptodate));
2179				if (mpd->io_done)
2180					goto ret_extent_tail;
2181			} else {
2182				/*
2183				 * Page with regular buffer heads,
2184				 * just add all dirty ones
2185				 */
2186				head = page_buffers(page);
2187				bh = head;
2188				do {
2189					BUG_ON(buffer_locked(bh));
2190					/*
2191					 * We need to try to allocate
2192					 * unmapped blocks in the same page.
2193					 * Otherwise we won't make progress
2194					 * with the page in ext4_writepage
2195					 */
2196					if (ext4_bh_delay_or_unwritten(NULL, bh)) {
2197						mpage_add_bh_to_extent(mpd, logical,
2198								       bh->b_size,
2199								       bh->b_state);
2200						if (mpd->io_done)
2201							goto ret_extent_tail;
2202					} else if (buffer_dirty(bh) && (buffer_mapped(bh))) {
2203						/*
2204						 * mapped dirty buffer. We need
2205						 * to update the b_state
2206						 * because we look at b_state
2207						 * in mpage_da_map_blocks.  We
2208						 * don't update b_size because
2209						 * if we find an unmapped
2210						 * buffer_head later we need to
2211						 * use the b_state flag of that
2212						 * buffer_head.
2213						 */
2214						if (mpd->b_size == 0)
2215							mpd->b_state = bh->b_state & BH_FLAGS;
2216					}
2217					logical++;
2218				} while ((bh = bh->b_this_page) != head);
2219			}
2220
2221			if (nr_to_write > 0) {
2222				nr_to_write--;
2223				if (nr_to_write == 0 &&
2224				    wbc->sync_mode == WB_SYNC_NONE)
2225					/*
2226					 * We stop writing back only if we are
2227					 * not doing integrity sync. In case of
2228					 * integrity sync we have to keep going
2229					 * because someone may be concurrently
2230					 * dirtying pages, and we might have
2231					 * synced a lot of newly appeared dirty
2232					 * pages, but have not synced all of the
2233					 * old dirty pages.
2234					 */
2235					goto out;
2236			}
2237		}
2238		pagevec_release(&pvec);
2239		cond_resched();
2240	}
2241	return 0;
2242ret_extent_tail:
2243	ret = MPAGE_DA_EXTENT_TAIL;
2244out:
2245	pagevec_release(&pvec);
2246	cond_resched();
2247	return ret;
2248}
2249
2250
2251static int ext4_da_writepages(struct address_space *mapping,
2252			      struct writeback_control *wbc)
2253{
2254	pgoff_t	index;
 
2255	int range_whole = 0;
 
2256	handle_t *handle = NULL;
2257	struct mpage_da_data mpd;
2258	struct inode *inode = mapping->host;
2259	int pages_written = 0;
2260	unsigned int max_pages;
2261	int range_cyclic, cycled = 1, io_done = 0;
2262	int needed_blocks, ret = 0;
2263	long desired_nr_to_write, nr_to_writebump = 0;
2264	loff_t range_start = wbc->range_start;
2265	struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
2266	pgoff_t done_index = 0;
2267	pgoff_t end;
2268	struct blk_plug plug;
 
 
 
 
2269
2270	trace_ext4_da_writepages(inode, wbc);
 
2271
2272	/*
2273	 * No pages to write? This is mainly a kludge to avoid starting
2274	 * a transaction for special inodes like journal inode on last iput()
2275	 * because that could violate lock ordering on umount
2276	 */
2277	if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
2278		return 0;
 
 
 
 
 
2279
2280	/*
2281	 * If the filesystem has aborted, it is read-only, so return
2282	 * right away instead of dumping stack traces later on that
2283	 * will obscure the real source of the problem.  We test
2284	 * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because
2285	 * the latter could be true if the filesystem is mounted
2286	 * read-only, and in that case, ext4_da_writepages should
2287	 * *never* be called, so if that ever happens, we would want
2288	 * the stack trace.
2289	 */
2290	if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED))
2291		return -EROFS;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2292
2293	if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2294		range_whole = 1;
2295
2296	range_cyclic = wbc->range_cyclic;
2297	if (wbc->range_cyclic) {
2298		index = mapping->writeback_index;
2299		if (index)
2300			cycled = 0;
2301		wbc->range_start = index << PAGE_CACHE_SHIFT;
2302		wbc->range_end  = LLONG_MAX;
2303		wbc->range_cyclic = 0;
2304		end = -1;
2305	} else {
2306		index = wbc->range_start >> PAGE_CACHE_SHIFT;
2307		end = wbc->range_end >> PAGE_CACHE_SHIFT;
2308	}
2309
2310	/*
2311	 * This works around two forms of stupidity.  The first is in
2312	 * the writeback code, which caps the maximum number of pages
2313	 * written to be 1024 pages.  This is wrong on multiple
2314	 * levels; different architectues have a different page size,
2315	 * which changes the maximum amount of data which gets
2316	 * written.  Secondly, 4 megabytes is way too small.  XFS
2317	 * forces this value to be 16 megabytes by multiplying
2318	 * nr_to_write parameter by four, and then relies on its
2319	 * allocator to allocate larger extents to make them
2320	 * contiguous.  Unfortunately this brings us to the second
2321	 * stupidity, which is that ext4's mballoc code only allocates
2322	 * at most 2048 blocks.  So we force contiguous writes up to
2323	 * the number of dirty blocks in the inode, or
2324	 * sbi->max_writeback_mb_bump whichever is smaller.
2325	 */
2326	max_pages = sbi->s_max_writeback_mb_bump << (20 - PAGE_CACHE_SHIFT);
2327	if (!range_cyclic && range_whole) {
2328		if (wbc->nr_to_write == LONG_MAX)
2329			desired_nr_to_write = wbc->nr_to_write;
2330		else
2331			desired_nr_to_write = wbc->nr_to_write * 8;
2332	} else
2333		desired_nr_to_write = ext4_num_dirty_pages(inode, index,
2334							   max_pages);
2335	if (desired_nr_to_write > max_pages)
2336		desired_nr_to_write = max_pages;
2337
2338	if (wbc->nr_to_write < desired_nr_to_write) {
2339		nr_to_writebump = desired_nr_to_write - wbc->nr_to_write;
2340		wbc->nr_to_write = desired_nr_to_write;
2341	}
2342
 
 
 
2343retry:
2344	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2345		tag_pages_for_writeback(mapping, index, end);
2346
2347	blk_start_plug(&plug);
2348	while (!ret && wbc->nr_to_write > 0) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2349
2350		/*
2351		 * we  insert one extent at a time. So we need
2352		 * credit needed for single extent allocation.
2353		 * journalled mode is currently not supported
2354		 * by delalloc
 
2355		 */
2356		BUG_ON(ext4_should_journal_data(inode));
2357		needed_blocks = ext4_da_writepages_trans_blocks(inode);
2358
2359		/* start a new transaction*/
2360		handle = ext4_journal_start(inode, needed_blocks);
 
2361		if (IS_ERR(handle)) {
2362			ret = PTR_ERR(handle);
2363			ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
2364			       "%ld pages, ino %lu; err %d", __func__,
2365				wbc->nr_to_write, inode->i_ino, ret);
2366			blk_finish_plug(&plug);
2367			goto out_writepages;
 
 
2368		}
 
2369
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2370		/*
2371		 * Now call write_cache_pages_da() to find the next
2372		 * contiguous region of logical blocks that need
2373		 * blocks to be allocated by ext4 and submit them.
2374		 */
2375		ret = write_cache_pages_da(mapping, wbc, &mpd, &done_index);
2376		/*
2377		 * If we have a contiguous extent of pages and we
2378		 * haven't done the I/O yet, map the blocks and submit
2379		 * them for I/O.
2380		 */
2381		if (!mpd.io_done && mpd.next_page != mpd.first_page) {
2382			mpage_da_map_and_submit(&mpd);
2383			ret = MPAGE_DA_EXTENT_TAIL;
2384		}
2385		trace_ext4_da_write_pages(inode, &mpd);
2386		wbc->nr_to_write -= mpd.pages_written;
 
 
2387
2388		ext4_journal_stop(handle);
 
 
 
 
 
 
 
 
 
 
 
 
2389
2390		if ((mpd.retval == -ENOSPC) && sbi->s_journal) {
2391			/* commit the transaction which would
 
2392			 * free blocks released in the transaction
2393			 * and try again
2394			 */
2395			jbd2_journal_force_commit_nested(sbi->s_journal);
2396			ret = 0;
2397		} else if (ret == MPAGE_DA_EXTENT_TAIL) {
2398			/*
2399			 * Got one extent now try with rest of the pages.
2400			 * If mpd.retval is set -EIO, journal is aborted.
2401			 * So we don't need to write any more.
2402			 */
2403			pages_written += mpd.pages_written;
2404			ret = mpd.retval;
2405			io_done = 1;
2406		} else if (wbc->nr_to_write)
2407			/*
2408			 * There is no more writeout needed
2409			 * or we requested for a noblocking writeout
2410			 * and we found the device congested
2411			 */
2412			break;
2413	}
 
2414	blk_finish_plug(&plug);
2415	if (!io_done && !cycled) {
2416		cycled = 1;
2417		index = 0;
2418		wbc->range_start = index << PAGE_CACHE_SHIFT;
2419		wbc->range_end  = mapping->writeback_index - 1;
2420		goto retry;
2421	}
2422
2423	/* Update index */
2424	wbc->range_cyclic = range_cyclic;
2425	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2426		/*
2427		 * set the writeback_index so that range_cyclic
2428		 * mode will write it back later
2429		 */
2430		mapping->writeback_index = done_index;
2431
2432out_writepages:
2433	wbc->nr_to_write -= nr_to_writebump;
2434	wbc->range_start = range_start;
2435	trace_ext4_da_writepages_result(inode, wbc, ret, pages_written);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2436	return ret;
2437}
2438
2439#define FALL_BACK_TO_NONDELALLOC 1
2440static int ext4_nonda_switch(struct super_block *sb)
2441{
2442	s64 free_blocks, dirty_blocks;
2443	struct ext4_sb_info *sbi = EXT4_SB(sb);
2444
2445	/*
2446	 * switch to non delalloc mode if we are running low
2447	 * on free block. The free block accounting via percpu
2448	 * counters can get slightly wrong with percpu_counter_batch getting
2449	 * accumulated on each CPU without updating global counters
2450	 * Delalloc need an accurate free block accounting. So switch
2451	 * to non delalloc when we are near to error range.
2452	 */
2453	free_blocks  = EXT4_C2B(sbi,
2454		percpu_counter_read_positive(&sbi->s_freeclusters_counter));
2455	dirty_blocks = percpu_counter_read_positive(&sbi->s_dirtyclusters_counter);
2456	if (2 * free_blocks < 3 * dirty_blocks ||
2457		free_blocks < (dirty_blocks + EXT4_FREECLUSTERS_WATERMARK)) {
 
 
 
 
 
 
 
2458		/*
2459		 * free block count is less than 150% of dirty blocks
2460		 * or free blocks is less than watermark
2461		 */
2462		return 1;
2463	}
2464	/*
2465	 * Even if we don't switch but are nearing capacity,
2466	 * start pushing delalloc when 1/2 of free blocks are dirty.
2467	 */
2468	if (free_blocks < 2 * dirty_blocks)
2469		writeback_inodes_sb_if_idle(sb, WB_REASON_FS_FREE_SPACE);
2470
2471	return 0;
2472}
2473
 
 
 
 
 
 
 
 
 
 
 
 
 
2474static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
2475			       loff_t pos, unsigned len, unsigned flags,
2476			       struct page **pagep, void **fsdata)
2477{
2478	int ret, retries = 0;
2479	struct page *page;
2480	pgoff_t index;
2481	struct inode *inode = mapping->host;
2482	handle_t *handle;
2483
2484	index = pos >> PAGE_CACHE_SHIFT;
 
 
 
2485
2486	if (ext4_nonda_switch(inode->i_sb)) {
 
2487		*fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
2488		return ext4_write_begin(file, mapping, pos,
2489					len, flags, pagep, fsdata);
2490	}
2491	*fsdata = (void *)0;
2492	trace_ext4_da_write_begin(inode, pos, len, flags);
2493retry:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2494	/*
2495	 * With delayed allocation, we don't log the i_disksize update
2496	 * if there is delayed block allocation. But we still need
2497	 * to journalling the i_disksize update if writes to the end
2498	 * of file which has an already mapped buffer.
2499	 */
2500	handle = ext4_journal_start(inode, 1);
 
 
2501	if (IS_ERR(handle)) {
2502		ret = PTR_ERR(handle);
2503		goto out;
2504	}
2505	/* We cannot recurse into the filesystem as the transaction is already
2506	 * started */
2507	flags |= AOP_FLAG_NOFS;
2508
2509	page = grab_cache_page_write_begin(mapping, index, flags);
2510	if (!page) {
 
 
 
2511		ext4_journal_stop(handle);
2512		ret = -ENOMEM;
2513		goto out;
2514	}
2515	*pagep = page;
 
2516
 
 
 
 
2517	ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep);
 
2518	if (ret < 0) {
2519		unlock_page(page);
2520		ext4_journal_stop(handle);
2521		page_cache_release(page);
2522		/*
2523		 * block_write_begin may have instantiated a few blocks
2524		 * outside i_size.  Trim these off again. Don't need
2525		 * i_size_read because we hold i_mutex.
2526		 */
2527		if (pos + len > inode->i_size)
2528			ext4_truncate_failed_write(inode);
 
 
 
 
 
 
 
2529	}
2530
2531	if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
2532		goto retry;
2533out:
2534	return ret;
2535}
2536
2537/*
2538 * Check if we should update i_disksize
2539 * when write to the end of file but not require block allocation
2540 */
2541static int ext4_da_should_update_i_disksize(struct page *page,
2542					    unsigned long offset)
2543{
2544	struct buffer_head *bh;
2545	struct inode *inode = page->mapping->host;
2546	unsigned int idx;
2547	int i;
2548
2549	bh = page_buffers(page);
2550	idx = offset >> inode->i_blkbits;
2551
2552	for (i = 0; i < idx; i++)
2553		bh = bh->b_this_page;
2554
2555	if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh))
2556		return 0;
2557	return 1;
2558}
2559
2560static int ext4_da_write_end(struct file *file,
2561			     struct address_space *mapping,
2562			     loff_t pos, unsigned len, unsigned copied,
2563			     struct page *page, void *fsdata)
2564{
2565	struct inode *inode = mapping->host;
2566	int ret = 0, ret2;
2567	handle_t *handle = ext4_journal_current_handle();
2568	loff_t new_i_size;
2569	unsigned long start, end;
2570	int write_mode = (int)(unsigned long)fsdata;
2571
2572	if (write_mode == FALL_BACK_TO_NONDELALLOC) {
2573		switch (ext4_inode_journal_mode(inode)) {
2574		case EXT4_INODE_ORDERED_DATA_MODE:
2575			return ext4_ordered_write_end(file, mapping, pos,
2576					len, copied, page, fsdata);
2577		case EXT4_INODE_WRITEBACK_DATA_MODE:
2578			return ext4_writeback_write_end(file, mapping, pos,
2579					len, copied, page, fsdata);
2580		default:
2581			BUG();
2582		}
2583	}
2584
2585	trace_ext4_da_write_end(inode, pos, len, copied);
2586	start = pos & (PAGE_CACHE_SIZE - 1);
2587	end = start + copied - 1;
2588
2589	/*
2590	 * generic_write_end() will run mark_inode_dirty() if i_size
2591	 * changes.  So let's piggyback the i_disksize mark_inode_dirty
2592	 * into that.
2593	 */
2594
2595	new_i_size = pos + copied;
2596	if (copied && new_i_size > EXT4_I(inode)->i_disksize) {
2597		if (ext4_da_should_update_i_disksize(page, end)) {
2598			down_write(&EXT4_I(inode)->i_data_sem);
2599			if (new_i_size > EXT4_I(inode)->i_disksize) {
2600				/*
2601				 * Updating i_disksize when extending file
2602				 * without needing block allocation
2603				 */
2604				if (ext4_should_order_data(inode))
2605					ret = ext4_jbd2_file_inode(handle,
2606								   inode);
2607
2608				EXT4_I(inode)->i_disksize = new_i_size;
2609			}
2610			up_write(&EXT4_I(inode)->i_data_sem);
2611			/* We need to mark inode dirty even if
2612			 * new_i_size is less that inode->i_size
2613			 * bu greater than i_disksize.(hint delalloc)
2614			 */
2615			ext4_mark_inode_dirty(handle, inode);
2616		}
2617	}
2618	ret2 = generic_write_end(file, mapping, pos, len, copied,
 
 
 
 
 
 
 
2619							page, fsdata);
 
2620	copied = ret2;
2621	if (ret2 < 0)
2622		ret = ret2;
2623	ret2 = ext4_journal_stop(handle);
2624	if (!ret)
2625		ret = ret2;
2626
2627	return ret ? ret : copied;
2628}
2629
2630static void ext4_da_invalidatepage(struct page *page, unsigned long offset)
2631{
2632	/*
2633	 * Drop reserved blocks
2634	 */
2635	BUG_ON(!PageLocked(page));
2636	if (!page_has_buffers(page))
2637		goto out;
2638
2639	ext4_da_page_release_reservation(page, offset);
2640
2641out:
2642	ext4_invalidatepage(page, offset);
2643
2644	return;
2645}
2646
2647/*
2648 * Force all delayed allocation blocks to be allocated for a given inode.
2649 */
2650int ext4_alloc_da_blocks(struct inode *inode)
2651{
2652	trace_ext4_alloc_da_blocks(inode);
2653
2654	if (!EXT4_I(inode)->i_reserved_data_blocks &&
2655	    !EXT4_I(inode)->i_reserved_meta_blocks)
2656		return 0;
2657
2658	/*
2659	 * We do something simple for now.  The filemap_flush() will
2660	 * also start triggering a write of the data blocks, which is
2661	 * not strictly speaking necessary (and for users of
2662	 * laptop_mode, not even desirable).  However, to do otherwise
2663	 * would require replicating code paths in:
2664	 *
2665	 * ext4_da_writepages() ->
2666	 *    write_cache_pages() ---> (via passed in callback function)
2667	 *        __mpage_da_writepage() -->
2668	 *           mpage_add_bh_to_extent()
2669	 *           mpage_da_map_blocks()
2670	 *
2671	 * The problem is that write_cache_pages(), located in
2672	 * mm/page-writeback.c, marks pages clean in preparation for
2673	 * doing I/O, which is not desirable if we're not planning on
2674	 * doing I/O at all.
2675	 *
2676	 * We could call write_cache_pages(), and then redirty all of
2677	 * the pages by calling redirty_page_for_writepage() but that
2678	 * would be ugly in the extreme.  So instead we would need to
2679	 * replicate parts of the code in the above functions,
2680	 * simplifying them because we wouldn't actually intend to
2681	 * write out the pages, but rather only collect contiguous
2682	 * logical block extents, call the multi-block allocator, and
2683	 * then update the buffer heads with the block allocations.
2684	 *
2685	 * For now, though, we'll cheat by calling filemap_flush(),
2686	 * which will map the blocks, and start the I/O, but not
2687	 * actually wait for the I/O to complete.
2688	 */
2689	return filemap_flush(inode->i_mapping);
2690}
2691
2692/*
2693 * bmap() is special.  It gets used by applications such as lilo and by
2694 * the swapper to find the on-disk block of a specific piece of data.
2695 *
2696 * Naturally, this is dangerous if the block concerned is still in the
2697 * journal.  If somebody makes a swapfile on an ext4 data-journaling
2698 * filesystem and enables swap, then they may get a nasty shock when the
2699 * data getting swapped to that swapfile suddenly gets overwritten by
2700 * the original zero's written out previously to the journal and
2701 * awaiting writeback in the kernel's buffer cache.
2702 *
2703 * So, if we see any bmap calls here on a modified, data-journaled file,
2704 * take extra steps to flush any blocks which might be in the cache.
2705 */
2706static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
2707{
2708	struct inode *inode = mapping->host;
2709	journal_t *journal;
2710	int err;
2711
 
 
 
 
 
 
2712	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
2713			test_opt(inode->i_sb, DELALLOC)) {
2714		/*
2715		 * With delalloc we want to sync the file
2716		 * so that we can make sure we allocate
2717		 * blocks for file
2718		 */
2719		filemap_write_and_wait(mapping);
2720	}
2721
2722	if (EXT4_JOURNAL(inode) &&
2723	    ext4_test_inode_state(inode, EXT4_STATE_JDATA)) {
2724		/*
2725		 * This is a REALLY heavyweight approach, but the use of
2726		 * bmap on dirty files is expected to be extremely rare:
2727		 * only if we run lilo or swapon on a freshly made file
2728		 * do we expect this to happen.
2729		 *
2730		 * (bmap requires CAP_SYS_RAWIO so this does not
2731		 * represent an unprivileged user DOS attack --- we'd be
2732		 * in trouble if mortal users could trigger this path at
2733		 * will.)
2734		 *
2735		 * NB. EXT4_STATE_JDATA is not set on files other than
2736		 * regular files.  If somebody wants to bmap a directory
2737		 * or symlink and gets confused because the buffer
2738		 * hasn't yet been flushed to disk, they deserve
2739		 * everything they get.
2740		 */
2741
2742		ext4_clear_inode_state(inode, EXT4_STATE_JDATA);
2743		journal = EXT4_JOURNAL(inode);
2744		jbd2_journal_lock_updates(journal);
2745		err = jbd2_journal_flush(journal);
2746		jbd2_journal_unlock_updates(journal);
2747
2748		if (err)
2749			return 0;
2750	}
2751
2752	return generic_block_bmap(mapping, block, ext4_get_block);
2753}
2754
2755static int ext4_readpage(struct file *file, struct page *page)
2756{
 
 
 
2757	trace_ext4_readpage(page);
2758	return mpage_readpage(page, ext4_get_block);
 
 
 
 
 
 
 
 
2759}
2760
2761static int
2762ext4_readpages(struct file *file, struct address_space *mapping,
2763		struct list_head *pages, unsigned nr_pages)
2764{
2765	return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
 
 
 
 
 
 
2766}
2767
2768static void ext4_invalidatepage_free_endio(struct page *page, unsigned long offset)
 
2769{
2770	struct buffer_head *head, *bh;
2771	unsigned int curr_off = 0;
2772
2773	if (!page_has_buffers(page))
2774		return;
2775	head = bh = page_buffers(page);
2776	do {
2777		if (offset <= curr_off && test_clear_buffer_uninit(bh)
2778					&& bh->b_private) {
2779			ext4_free_io_end(bh->b_private);
2780			bh->b_private = NULL;
2781			bh->b_end_io = NULL;
2782		}
2783		curr_off = curr_off + bh->b_size;
2784		bh = bh->b_this_page;
2785	} while (bh != head);
2786}
2787
2788static void ext4_invalidatepage(struct page *page, unsigned long offset)
 
 
2789{
2790	journal_t *journal = EXT4_JOURNAL(page->mapping->host);
2791
2792	trace_ext4_invalidatepage(page, offset);
2793
2794	/*
2795	 * free any io_end structure allocated for buffers to be discarded
2796	 */
2797	if (ext4_should_dioread_nolock(page->mapping->host))
2798		ext4_invalidatepage_free_endio(page, offset);
2799	/*
2800	 * If it's a full truncate we just forget about the pending dirtying
2801	 */
2802	if (offset == 0)
2803		ClearPageChecked(page);
2804
2805	if (journal)
2806		jbd2_journal_invalidatepage(journal, page, offset);
2807	else
2808		block_invalidatepage(page, offset);
 
 
 
 
 
2809}
2810
2811static int ext4_releasepage(struct page *page, gfp_t wait)
2812{
2813	journal_t *journal = EXT4_JOURNAL(page->mapping->host);
2814
2815	trace_ext4_releasepage(page);
2816
2817	WARN_ON(PageChecked(page));
2818	if (!page_has_buffers(page))
2819		return 0;
2820	if (journal)
2821		return jbd2_journal_try_to_free_buffers(journal, page, wait);
2822	else
2823		return try_to_free_buffers(page);
2824}
2825
2826/*
2827 * ext4_get_block used when preparing for a DIO write or buffer write.
2828 * We allocate an uinitialized extent if blocks haven't been allocated.
2829 * The extent will be converted to initialized after the IO is complete.
2830 */
2831static int ext4_get_block_write(struct inode *inode, sector_t iblock,
2832		   struct buffer_head *bh_result, int create)
2833{
2834	ext4_debug("ext4_get_block_write: inode %lu, create flag %d\n",
2835		   inode->i_ino, create);
2836	return _ext4_get_block(inode, iblock, bh_result,
2837			       EXT4_GET_BLOCKS_IO_CREATE_EXT);
 
 
 
 
 
2838}
2839
2840static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
2841			    ssize_t size, void *private, int ret,
2842			    bool is_async)
2843{
2844	struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
2845        ext4_io_end_t *io_end = iocb->private;
2846	struct workqueue_struct *wq;
2847	unsigned long flags;
2848	struct ext4_inode_info *ei;
2849
2850	/* if not async direct IO or dio with 0 bytes write, just return */
2851	if (!io_end || !size)
2852		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2853
2854	ext_debug("ext4_end_io_dio(): io_end 0x%p "
2855		  "for inode %lu, iocb 0x%p, offset %llu, size %zd\n",
2856 		  iocb->private, io_end->inode->i_ino, iocb, offset,
2857		  size);
2858
2859	iocb->private = NULL;
 
 
 
2860
2861	/* if not aio dio with unwritten extents, just free io and return */
2862	if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
2863		ext4_free_io_end(io_end);
2864out:
2865		if (is_async)
2866			aio_complete(iocb, ret, 0);
2867		inode_dio_done(inode);
2868		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2869	}
2870
2871	io_end->offset = offset;
2872	io_end->size = size;
2873	if (is_async) {
2874		io_end->iocb = iocb;
2875		io_end->result = ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2876	}
2877	wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq;
2878
2879	/* Add the io_end to per-inode completed aio dio list*/
2880	ei = EXT4_I(io_end->inode);
2881	spin_lock_irqsave(&ei->i_completed_io_lock, flags);
2882	list_add_tail(&io_end->list, &ei->i_completed_io_list);
2883	spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
2884
2885	/* queue the work to convert unwritten extents to written */
2886	queue_work(wq, &io_end->work);
2887}
2888
2889static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate)
 
2890{
2891	ext4_io_end_t *io_end = bh->b_private;
2892	struct workqueue_struct *wq;
2893	struct inode *inode;
2894	unsigned long flags;
2895
2896	if (!test_clear_buffer_uninit(bh) || !io_end)
2897		goto out;
2898
2899	if (!(io_end->inode->i_sb->s_flags & MS_ACTIVE)) {
2900		ext4_msg(io_end->inode->i_sb, KERN_INFO,
2901			 "sb umounted, discard end_io request for inode %lu",
2902			 io_end->inode->i_ino);
2903		ext4_free_io_end(io_end);
2904		goto out;
2905	}
2906
 
2907	/*
2908	 * It may be over-defensive here to check EXT4_IO_END_UNWRITTEN now,
2909	 * but being more careful is always safe for the future change.
2910	 */
2911	inode = io_end->inode;
2912	ext4_set_io_unwritten_flag(inode, io_end);
 
2913
2914	/* Add the io_end to per-inode completed io list*/
2915	spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags);
2916	list_add_tail(&io_end->list, &EXT4_I(inode)->i_completed_io_list);
2917	spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags);
2918
2919	wq = EXT4_SB(inode->i_sb)->dio_unwritten_wq;
2920	/* queue the work to convert unwritten extents to written */
2921	queue_work(wq, &io_end->work);
2922out:
2923	bh->b_private = NULL;
2924	bh->b_end_io = NULL;
2925	clear_buffer_uninit(bh);
2926	end_buffer_async_write(bh, uptodate);
 
 
 
 
 
 
 
 
 
 
 
 
2927}
2928
2929static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode)
2930{
2931	ext4_io_end_t *io_end;
2932	struct page *page = bh->b_page;
2933	loff_t offset = (sector_t)page->index << PAGE_CACHE_SHIFT;
2934	size_t size = bh->b_size;
2935
2936retry:
2937	io_end = ext4_init_io_end(inode, GFP_ATOMIC);
2938	if (!io_end) {
2939		pr_warn_ratelimited("%s: allocation fail\n", __func__);
2940		schedule();
2941		goto retry;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2942	}
2943	io_end->offset = offset;
2944	io_end->size = size;
2945	/*
2946	 * We need to hold a reference to the page to make sure it
2947	 * doesn't get evicted before ext4_end_io_work() has a chance
2948	 * to convert the extent from written to unwritten.
2949	 */
2950	io_end->page = page;
2951	get_page(io_end->page);
2952
2953	bh->b_private = io_end;
2954	bh->b_end_io = ext4_end_io_buffer_write;
2955	return 0;
2956}
2957
2958/*
2959 * For ext4 extent files, ext4 will do direct-io write to holes,
 
 
2960 * preallocated extents, and those write extend the file, no need to
2961 * fall back to buffered IO.
2962 *
2963 * For holes, we fallocate those blocks, mark them as uninitialized
2964 * If those blocks were preallocated, we mark sure they are splited, but
2965 * still keep the range to write as uninitialized.
2966 *
2967 * The unwrritten extents will be converted to written when DIO is completed.
2968 * For async direct IO, since the IO may still pending when return, we
2969 * set up an end_io call back function, which will do the conversion
2970 * when async direct IO completed.
2971 *
2972 * If the O_DIRECT write will extend the file then add this inode to the
2973 * orphan list.  So recovery will truncate it back to the original size
2974 * if the machine crashes during the write.
2975 *
2976 */
2977static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
2978			      const struct iovec *iov, loff_t offset,
2979			      unsigned long nr_segs)
2980{
2981	struct file *file = iocb->ki_filp;
2982	struct inode *inode = file->f_mapping->host;
 
2983	ssize_t ret;
2984	size_t count = iov_length(iov, nr_segs);
2985
 
 
 
2986	loff_t final_size = offset + count;
2987	if (rw == WRITE && final_size <= inode->i_size) {
2988		/*
2989 		 * We could direct write to holes and fallocate.
2990		 *
2991 		 * Allocated blocks to fill the hole are marked as uninitialized
2992 		 * to prevent parallel buffered read to expose the stale data
2993 		 * before DIO complete the data IO.
2994		 *
2995 		 * As to previously fallocated extents, ext4 get_block
2996 		 * will just simply mark the buffer mapped but still
2997 		 * keep the extents uninitialized.
2998 		 *
2999		 * for non AIO case, we will convert those unwritten extents
3000		 * to written after return back from blockdev_direct_IO.
3001		 *
3002		 * for async DIO, the conversion needs to be defered when
3003		 * the IO is completed. The ext4 end_io callback function
3004		 * will be called to take care of the conversion work.
3005		 * Here for async case, we allocate an io_end structure to
3006		 * hook to the iocb.
3007 		 */
3008		iocb->private = NULL;
3009		EXT4_I(inode)->cur_aio_dio = NULL;
3010		if (!is_sync_kiocb(iocb)) {
3011			ext4_io_end_t *io_end =
3012				ext4_init_io_end(inode, GFP_NOFS);
3013			if (!io_end)
3014				return -ENOMEM;
3015			io_end->flag |= EXT4_IO_END_DIRECT;
3016			iocb->private = io_end;
3017			/*
3018			 * we save the io structure for current async
3019			 * direct IO, so that later ext4_map_blocks()
3020			 * could flag the io structure whether there
3021			 * is a unwritten extents needs to be converted
3022			 * when IO is completed.
3023			 */
3024			EXT4_I(inode)->cur_aio_dio = iocb->private;
3025		}
 
 
 
 
3026
3027		ret = __blockdev_direct_IO(rw, iocb, inode,
3028					 inode->i_sb->s_bdev, iov,
3029					 offset, nr_segs,
3030					 ext4_get_block_write,
3031					 ext4_end_io_dio,
3032					 NULL,
3033					 DIO_LOCKING);
3034		if (iocb->private)
3035			EXT4_I(inode)->cur_aio_dio = NULL;
3036		/*
3037		 * The io_end structure takes a reference to the inode,
3038		 * that structure needs to be destroyed and the
3039		 * reference to the inode need to be dropped, when IO is
3040		 * complete, even with 0 byte write, or failed.
3041		 *
3042		 * In the successful AIO DIO case, the io_end structure will be
3043		 * desctroyed and the reference to the inode will be dropped
3044		 * after the end_io call back function is called.
3045		 *
3046		 * In the case there is 0 byte write, or error case, since
3047		 * VFS direct IO won't invoke the end_io call back function,
3048		 * we need to free the end_io structure here.
3049		 */
3050		if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) {
3051			ext4_free_io_end(iocb->private);
3052			iocb->private = NULL;
3053		} else if (ret > 0 && ext4_test_inode_state(inode,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3054						EXT4_STATE_DIO_UNWRITTEN)) {
3055			int err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3056			/*
3057			 * for non AIO case, since the IO is already
3058			 * completed, we could do the conversion right here
 
 
 
 
 
 
3059			 */
3060			err = ext4_convert_unwritten_extents(inode,
3061							     offset, ret);
3062			if (err < 0)
3063				ret = err;
3064			ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
 
3065		}
3066		return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3067	}
 
 
 
3068
3069	/* for write the the end of file case, we fall back to old way */
3070	return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3071}
3072
3073static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
3074			      const struct iovec *iov, loff_t offset,
3075			      unsigned long nr_segs)
3076{
3077	struct file *file = iocb->ki_filp;
3078	struct inode *inode = file->f_mapping->host;
 
 
3079	ssize_t ret;
3080
 
 
 
 
 
 
 
3081	/*
3082	 * If we are doing data journalling we don't support O_DIRECT
3083	 */
3084	if (ext4_should_journal_data(inode))
3085		return 0;
3086
3087	trace_ext4_direct_IO_enter(inode, offset, iov_length(iov, nr_segs), rw);
3088	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3089		ret = ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs);
 
 
 
 
3090	else
3091		ret = ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
3092	trace_ext4_direct_IO_exit(inode, offset,
3093				iov_length(iov, nr_segs), rw, ret);
3094	return ret;
3095}
3096
3097/*
3098 * Pages can be marked dirty completely asynchronously from ext4's journalling
3099 * activity.  By filemap_sync_pte(), try_to_unmap_one(), etc.  We cannot do
3100 * much here because ->set_page_dirty is called under VFS locks.  The page is
3101 * not necessarily locked.
3102 *
3103 * We cannot just dirty the page and leave attached buffers clean, because the
3104 * buffers' dirty state is "definitive".  We cannot just set the buffers dirty
3105 * or jbddirty because all the journalling code will explode.
3106 *
3107 * So what we do is to mark the page "pending dirty" and next time writepage
3108 * is called, propagate that into the buffers appropriately.
3109 */
3110static int ext4_journalled_set_page_dirty(struct page *page)
3111{
3112	SetPageChecked(page);
3113	return __set_page_dirty_nobuffers(page);
3114}
3115
3116static const struct address_space_operations ext4_ordered_aops = {
3117	.readpage		= ext4_readpage,
3118	.readpages		= ext4_readpages,
3119	.writepage		= ext4_writepage,
3120	.write_begin		= ext4_write_begin,
3121	.write_end		= ext4_ordered_write_end,
3122	.bmap			= ext4_bmap,
3123	.invalidatepage		= ext4_invalidatepage,
3124	.releasepage		= ext4_releasepage,
3125	.direct_IO		= ext4_direct_IO,
3126	.migratepage		= buffer_migrate_page,
3127	.is_partially_uptodate  = block_is_partially_uptodate,
3128	.error_remove_page	= generic_error_remove_page,
3129};
3130
3131static const struct address_space_operations ext4_writeback_aops = {
3132	.readpage		= ext4_readpage,
3133	.readpages		= ext4_readpages,
3134	.writepage		= ext4_writepage,
 
3135	.write_begin		= ext4_write_begin,
3136	.write_end		= ext4_writeback_write_end,
 
3137	.bmap			= ext4_bmap,
3138	.invalidatepage		= ext4_invalidatepage,
3139	.releasepage		= ext4_releasepage,
3140	.direct_IO		= ext4_direct_IO,
3141	.migratepage		= buffer_migrate_page,
3142	.is_partially_uptodate  = block_is_partially_uptodate,
3143	.error_remove_page	= generic_error_remove_page,
3144};
3145
3146static const struct address_space_operations ext4_journalled_aops = {
3147	.readpage		= ext4_readpage,
3148	.readpages		= ext4_readpages,
3149	.writepage		= ext4_writepage,
 
3150	.write_begin		= ext4_write_begin,
3151	.write_end		= ext4_journalled_write_end,
3152	.set_page_dirty		= ext4_journalled_set_page_dirty,
3153	.bmap			= ext4_bmap,
3154	.invalidatepage		= ext4_invalidatepage,
3155	.releasepage		= ext4_releasepage,
3156	.direct_IO		= ext4_direct_IO,
3157	.is_partially_uptodate  = block_is_partially_uptodate,
3158	.error_remove_page	= generic_error_remove_page,
3159};
3160
3161static const struct address_space_operations ext4_da_aops = {
3162	.readpage		= ext4_readpage,
3163	.readpages		= ext4_readpages,
3164	.writepage		= ext4_writepage,
3165	.writepages		= ext4_da_writepages,
3166	.write_begin		= ext4_da_write_begin,
3167	.write_end		= ext4_da_write_end,
 
3168	.bmap			= ext4_bmap,
3169	.invalidatepage		= ext4_da_invalidatepage,
3170	.releasepage		= ext4_releasepage,
3171	.direct_IO		= ext4_direct_IO,
3172	.migratepage		= buffer_migrate_page,
3173	.is_partially_uptodate  = block_is_partially_uptodate,
3174	.error_remove_page	= generic_error_remove_page,
3175};
3176
 
 
 
 
 
 
 
 
3177void ext4_set_aops(struct inode *inode)
3178{
3179	switch (ext4_inode_journal_mode(inode)) {
3180	case EXT4_INODE_ORDERED_DATA_MODE:
3181		if (test_opt(inode->i_sb, DELALLOC))
3182			inode->i_mapping->a_ops = &ext4_da_aops;
3183		else
3184			inode->i_mapping->a_ops = &ext4_ordered_aops;
3185		break;
3186	case EXT4_INODE_WRITEBACK_DATA_MODE:
3187		if (test_opt(inode->i_sb, DELALLOC))
3188			inode->i_mapping->a_ops = &ext4_da_aops;
3189		else
3190			inode->i_mapping->a_ops = &ext4_writeback_aops;
3191		break;
3192	case EXT4_INODE_JOURNAL_DATA_MODE:
3193		inode->i_mapping->a_ops = &ext4_journalled_aops;
3194		break;
3195	default:
3196		BUG();
3197	}
 
 
 
 
 
 
3198}
3199
3200
3201/*
3202 * ext4_discard_partial_page_buffers()
3203 * Wrapper function for ext4_discard_partial_page_buffers_no_lock.
3204 * This function finds and locks the page containing the offset
3205 * "from" and passes it to ext4_discard_partial_page_buffers_no_lock.
3206 * Calling functions that already have the page locked should call
3207 * ext4_discard_partial_page_buffers_no_lock directly.
3208 */
3209int ext4_discard_partial_page_buffers(handle_t *handle,
3210		struct address_space *mapping, loff_t from,
3211		loff_t length, int flags)
3212{
 
 
 
 
3213	struct inode *inode = mapping->host;
 
3214	struct page *page;
3215	int err = 0;
3216
3217	page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT,
3218				   mapping_gfp_mask(mapping) & ~__GFP_FS);
3219	if (!page)
3220		return -ENOMEM;
3221
3222	err = ext4_discard_partial_page_buffers_no_lock(handle, inode, page,
3223		from, length, flags);
3224
3225	unlock_page(page);
3226	page_cache_release(page);
3227	return err;
3228}
3229
3230/*
3231 * ext4_discard_partial_page_buffers_no_lock()
3232 * Zeros a page range of length 'length' starting from offset 'from'.
3233 * Buffer heads that correspond to the block aligned regions of the
3234 * zeroed range will be unmapped.  Unblock aligned regions
3235 * will have the corresponding buffer head mapped if needed so that
3236 * that region of the page can be updated with the partial zero out.
3237 *
3238 * This function assumes that the page has already been  locked.  The
3239 * The range to be discarded must be contained with in the given page.
3240 * If the specified range exceeds the end of the page it will be shortened
3241 * to the end of the page that corresponds to 'from'.  This function is
3242 * appropriate for updating a page and it buffer heads to be unmapped and
3243 * zeroed for blocks that have been either released, or are going to be
3244 * released.
3245 *
3246 * handle: The journal handle
3247 * inode:  The files inode
3248 * page:   A locked page that contains the offset "from"
3249 * from:   The starting byte offset (from the begining of the file)
3250 *         to begin discarding
3251 * len:    The length of bytes to discard
3252 * flags:  Optional flags that may be used:
3253 *
3254 *         EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED
3255 *         Only zero the regions of the page whose buffer heads
3256 *         have already been unmapped.  This flag is appropriate
3257 *         for updateing the contents of a page whose blocks may
3258 *         have already been released, and we only want to zero
3259 *         out the regions that correspond to those released blocks.
3260 *
3261 * Returns zero on sucess or negative on failure.
3262 */
3263static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle,
3264		struct inode *inode, struct page *page, loff_t from,
3265		loff_t length, int flags)
3266{
3267	ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
3268	unsigned int offset = from & (PAGE_CACHE_SIZE-1);
3269	unsigned int blocksize, max, pos;
3270	ext4_lblk_t iblock;
3271	struct buffer_head *bh;
3272	int err = 0;
3273
3274	blocksize = inode->i_sb->s_blocksize;
3275	max = PAGE_CACHE_SIZE - offset;
3276
3277	if (index != page->index)
3278		return -EINVAL;
3279
3280	/*
3281	 * correct length if it does not fall between
3282	 * 'from' and the end of the page
3283	 */
3284	if (length > max || length < 0)
3285		length = max;
3286
3287	iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
3288
3289	if (!page_has_buffers(page))
3290		create_empty_buffers(page, blocksize, 0);
3291
3292	/* Find the buffer that contains "offset" */
3293	bh = page_buffers(page);
3294	pos = blocksize;
3295	while (offset >= pos) {
3296		bh = bh->b_this_page;
3297		iblock++;
3298		pos += blocksize;
3299	}
 
 
 
 
 
 
 
 
 
 
 
 
 
3300
3301	pos = offset;
3302	while (pos < offset + length) {
3303		unsigned int end_of_block, range_to_discard;
3304
3305		err = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3306
3307		/* The length of space left to zero and unmap */
3308		range_to_discard = offset + length - pos;
 
 
 
 
 
 
 
3309
3310		/* The length of space until the end of the block */
3311		end_of_block = blocksize - (pos & (blocksize-1));
 
 
 
3312
3313		/*
3314		 * Do not unmap or zero past end of block
3315		 * for this buffer head
3316		 */
3317		if (range_to_discard > end_of_block)
3318			range_to_discard = end_of_block;
 
 
 
 
 
 
 
 
3319
 
 
 
 
 
 
3320
3321		/*
3322		 * Skip this buffer head if we are only zeroing unampped
3323		 * regions of the page
3324		 */
3325		if (flags & EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED &&
3326			buffer_mapped(bh))
3327				goto next;
3328
3329		/* If the range is block aligned, unmap */
3330		if (range_to_discard == blocksize) {
3331			clear_buffer_dirty(bh);
3332			bh->b_bdev = NULL;
3333			clear_buffer_mapped(bh);
3334			clear_buffer_req(bh);
3335			clear_buffer_new(bh);
3336			clear_buffer_delay(bh);
3337			clear_buffer_unwritten(bh);
3338			clear_buffer_uptodate(bh);
3339			zero_user(page, pos, range_to_discard);
3340			BUFFER_TRACE(bh, "Buffer discarded");
3341			goto next;
3342		}
3343
3344		/*
3345		 * If this block is not completely contained in the range
3346		 * to be discarded, then it is not going to be released. Because
3347		 * we need to keep this block, we need to make sure this part
3348		 * of the page is uptodate before we modify it by writeing
3349		 * partial zeros on it.
3350		 */
3351		if (!buffer_mapped(bh)) {
3352			/*
3353			 * Buffer head must be mapped before we can read
3354			 * from the block
3355			 */
3356			BUFFER_TRACE(bh, "unmapped");
3357			ext4_get_block(inode, iblock, bh, 0);
3358			/* unmapped? It's a hole - nothing to do */
3359			if (!buffer_mapped(bh)) {
3360				BUFFER_TRACE(bh, "still unmapped");
3361				goto next;
3362			}
3363		}
3364
3365		/* Ok, it's mapped. Make sure it's up-to-date */
3366		if (PageUptodate(page))
3367			set_buffer_uptodate(bh);
3368
3369		if (!buffer_uptodate(bh)) {
3370			err = -EIO;
3371			ll_rw_block(READ, 1, &bh);
3372			wait_on_buffer(bh);
3373			/* Uhhuh. Read error. Complain and punt.*/
3374			if (!buffer_uptodate(bh))
3375				goto next;
3376		}
3377
3378		if (ext4_should_journal_data(inode)) {
3379			BUFFER_TRACE(bh, "get write access");
3380			err = ext4_journal_get_write_access(handle, bh);
3381			if (err)
3382				goto next;
3383		}
 
 
 
3384
3385		zero_user(page, pos, range_to_discard);
 
3386
3387		err = 0;
3388		if (ext4_should_journal_data(inode)) {
3389			err = ext4_handle_dirty_metadata(handle, inode, bh);
3390		} else
3391			mark_buffer_dirty(bh);
3392
3393		BUFFER_TRACE(bh, "Partial buffer zeroed");
3394next:
3395		bh = bh->b_this_page;
3396		iblock++;
3397		pos += range_to_discard;
 
 
 
 
 
 
 
 
3398	}
3399
 
 
 
 
3400	return err;
3401}
3402
3403int ext4_can_truncate(struct inode *inode)
3404{
3405	if (S_ISREG(inode->i_mode))
3406		return 1;
3407	if (S_ISDIR(inode->i_mode))
3408		return 1;
3409	if (S_ISLNK(inode->i_mode))
3410		return !ext4_inode_is_fast_symlink(inode);
3411	return 0;
3412}
3413
3414/*
3415 * ext4_punch_hole: punches a hole in a file by releaseing the blocks
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3416 * associated with the given offset and length
3417 *
3418 * @inode:  File inode
3419 * @offset: The offset where the hole will begin
3420 * @len:    The length of the hole
3421 *
3422 * Returns: 0 on sucess or negative on failure
3423 */
3424
3425int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
3426{
3427	struct inode *inode = file->f_path.dentry->d_inode;
 
 
 
 
 
 
 
3428	if (!S_ISREG(inode->i_mode))
3429		return -EOPNOTSUPP;
3430
3431	if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
3432		/* TODO: Add support for non extent hole punching */
3433		return -EOPNOTSUPP;
 
 
 
 
 
 
3434	}
3435
3436	if (EXT4_SB(inode->i_sb)->s_cluster_ratio > 1) {
3437		/* TODO: Add support for bigalloc file systems */
3438		return -EOPNOTSUPP;
 
 
 
 
 
 
3439	}
3440
3441	return ext4_ext_punch_hole(file, offset, length);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3442}
3443
3444/*
3445 * ext4_truncate()
3446 *
3447 * We block out ext4_get_block() block instantiations across the entire
3448 * transaction, and VFS/VM ensures that ext4_truncate() cannot run
3449 * simultaneously on behalf of the same inode.
3450 *
3451 * As we work through the truncate and commit bits of it to the journal there
3452 * is one core, guiding principle: the file's tree must always be consistent on
3453 * disk.  We must be able to restart the truncate after a crash.
3454 *
3455 * The file's tree may be transiently inconsistent in memory (although it
3456 * probably isn't), but whenever we close off and commit a journal transaction,
3457 * the contents of (the filesystem + the journal) must be consistent and
3458 * restartable.  It's pretty simple, really: bottom up, right to left (although
3459 * left-to-right works OK too).
3460 *
3461 * Note that at recovery time, journal replay occurs *before* the restart of
3462 * truncate against the orphan inode list.
3463 *
3464 * The committed inode has the new, desired i_size (which is the same as
3465 * i_disksize in this case).  After a crash, ext4_orphan_cleanup() will see
3466 * that this inode's truncate did not complete and it will again call
3467 * ext4_truncate() to have another go.  So there will be instantiated blocks
3468 * to the right of the truncation point in a crashed ext4 filesystem.  But
3469 * that's fine - as long as they are linked from the inode, the post-crash
3470 * ext4_truncate() run will find them and release them.
3471 */
3472void ext4_truncate(struct inode *inode)
3473{
 
 
 
 
 
 
 
 
 
 
 
 
 
3474	trace_ext4_truncate_enter(inode);
3475
3476	if (!ext4_can_truncate(inode))
3477		return;
3478
3479	ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
3480
3481	if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
3482		ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
3483
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3484	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3485		ext4_ext_truncate(inode);
3486	else
3487		ext4_ind_truncate(inode);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3488
3489	trace_ext4_truncate_exit(inode);
 
3490}
3491
3492/*
3493 * ext4_get_inode_loc returns with an extra refcount against the inode's
3494 * underlying buffer_head on success. If 'in_mem' is true, we have all
3495 * data in memory that is needed to recreate the on-disk version of this
3496 * inode.
3497 */
3498static int __ext4_get_inode_loc(struct inode *inode,
3499				struct ext4_iloc *iloc, int in_mem)
3500{
3501	struct ext4_group_desc	*gdp;
3502	struct buffer_head	*bh;
3503	struct super_block	*sb = inode->i_sb;
3504	ext4_fsblk_t		block;
 
3505	int			inodes_per_block, inode_offset;
3506
3507	iloc->bh = NULL;
3508	if (!ext4_valid_inum(sb, inode->i_ino))
3509		return -EIO;
 
3510
3511	iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb);
3512	gdp = ext4_get_group_desc(sb, iloc->block_group, NULL);
3513	if (!gdp)
3514		return -EIO;
3515
3516	/*
3517	 * Figure out the offset within the block group inode table
3518	 */
3519	inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
3520	inode_offset = ((inode->i_ino - 1) %
3521			EXT4_INODES_PER_GROUP(sb));
3522	block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block);
3523	iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb);
3524
3525	bh = sb_getblk(sb, block);
3526	if (!bh) {
3527		EXT4_ERROR_INODE_BLOCK(inode, block,
3528				       "unable to read itable block");
3529		return -EIO;
3530	}
3531	if (!buffer_uptodate(bh)) {
3532		lock_buffer(bh);
3533
3534		/*
3535		 * If the buffer has the write error flag, we have failed
3536		 * to write out another inode in the same block.  In this
3537		 * case, we don't have to read the block because we may
3538		 * read the old inode data successfully.
3539		 */
3540		if (buffer_write_io_error(bh) && !buffer_uptodate(bh))
3541			set_buffer_uptodate(bh);
3542
3543		if (buffer_uptodate(bh)) {
3544			/* someone brought it uptodate while we waited */
3545			unlock_buffer(bh);
3546			goto has_buffer;
3547		}
3548
3549		/*
3550		 * If we have all information of the inode in memory and this
3551		 * is the only valid inode in the block, we need not read the
3552		 * block.
3553		 */
3554		if (in_mem) {
3555			struct buffer_head *bitmap_bh;
3556			int i, start;
3557
3558			start = inode_offset & ~(inodes_per_block - 1);
3559
3560			/* Is the inode bitmap in cache? */
3561			bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp));
3562			if (!bitmap_bh)
3563				goto make_io;
3564
3565			/*
3566			 * If the inode bitmap isn't in cache then the
3567			 * optimisation may end up performing two reads instead
3568			 * of one, so skip it.
3569			 */
3570			if (!buffer_uptodate(bitmap_bh)) {
3571				brelse(bitmap_bh);
3572				goto make_io;
3573			}
3574			for (i = start; i < start + inodes_per_block; i++) {
3575				if (i == inode_offset)
3576					continue;
3577				if (ext4_test_bit(i, bitmap_bh->b_data))
3578					break;
3579			}
3580			brelse(bitmap_bh);
3581			if (i == start + inodes_per_block) {
3582				/* all other inodes are free, so skip I/O */
3583				memset(bh->b_data, 0, bh->b_size);
3584				set_buffer_uptodate(bh);
3585				unlock_buffer(bh);
3586				goto has_buffer;
3587			}
3588		}
3589
3590make_io:
3591		/*
3592		 * If we need to do any I/O, try to pre-readahead extra
3593		 * blocks from the inode table.
3594		 */
 
3595		if (EXT4_SB(sb)->s_inode_readahead_blks) {
3596			ext4_fsblk_t b, end, table;
3597			unsigned num;
 
3598
3599			table = ext4_inode_table(sb, gdp);
3600			/* s_inode_readahead_blks is always a power of 2 */
3601			b = block & ~(EXT4_SB(sb)->s_inode_readahead_blks-1);
3602			if (table > b)
3603				b = table;
3604			end = b + EXT4_SB(sb)->s_inode_readahead_blks;
3605			num = EXT4_INODES_PER_GROUP(sb);
3606			if (ext4_has_group_desc_csum(sb))
3607				num -= ext4_itable_unused_count(sb, gdp);
3608			table += num / inodes_per_block;
3609			if (end > table)
3610				end = table;
3611			while (b <= end)
3612				sb_breadahead(sb, b++);
3613		}
3614
3615		/*
3616		 * There are other valid inodes in the buffer, this inode
3617		 * has in-inode xattrs, or we don't have this inode in memory.
3618		 * Read the block from disk.
3619		 */
3620		trace_ext4_load_inode(inode);
3621		get_bh(bh);
3622		bh->b_end_io = end_buffer_read_sync;
3623		submit_bh(READ | REQ_META | REQ_PRIO, bh);
 
3624		wait_on_buffer(bh);
3625		if (!buffer_uptodate(bh)) {
3626			EXT4_ERROR_INODE_BLOCK(inode, block,
3627					       "unable to read itable block");
3628			brelse(bh);
3629			return -EIO;
3630		}
3631	}
3632has_buffer:
3633	iloc->bh = bh;
3634	return 0;
3635}
3636
3637int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
3638{
3639	/* We have all inode data except xattrs in memory here. */
3640	return __ext4_get_inode_loc(inode, iloc,
3641		!ext4_test_inode_state(inode, EXT4_STATE_XATTR));
3642}
3643
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3644void ext4_set_inode_flags(struct inode *inode)
3645{
3646	unsigned int flags = EXT4_I(inode)->i_flags;
 
3647
3648	inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
3649	if (flags & EXT4_SYNC_FL)
3650		inode->i_flags |= S_SYNC;
3651	if (flags & EXT4_APPEND_FL)
3652		inode->i_flags |= S_APPEND;
3653	if (flags & EXT4_IMMUTABLE_FL)
3654		inode->i_flags |= S_IMMUTABLE;
3655	if (flags & EXT4_NOATIME_FL)
3656		inode->i_flags |= S_NOATIME;
3657	if (flags & EXT4_DIRSYNC_FL)
3658		inode->i_flags |= S_DIRSYNC;
3659}
3660
3661/* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
3662void ext4_get_inode_flags(struct ext4_inode_info *ei)
3663{
3664	unsigned int vfs_fl;
3665	unsigned long old_fl, new_fl;
3666
3667	do {
3668		vfs_fl = ei->vfs_inode.i_flags;
3669		old_fl = ei->i_flags;
3670		new_fl = old_fl & ~(EXT4_SYNC_FL|EXT4_APPEND_FL|
3671				EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|
3672				EXT4_DIRSYNC_FL);
3673		if (vfs_fl & S_SYNC)
3674			new_fl |= EXT4_SYNC_FL;
3675		if (vfs_fl & S_APPEND)
3676			new_fl |= EXT4_APPEND_FL;
3677		if (vfs_fl & S_IMMUTABLE)
3678			new_fl |= EXT4_IMMUTABLE_FL;
3679		if (vfs_fl & S_NOATIME)
3680			new_fl |= EXT4_NOATIME_FL;
3681		if (vfs_fl & S_DIRSYNC)
3682			new_fl |= EXT4_DIRSYNC_FL;
3683	} while (cmpxchg(&ei->i_flags, old_fl, new_fl) != old_fl);
3684}
3685
3686static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
3687				  struct ext4_inode_info *ei)
3688{
3689	blkcnt_t i_blocks ;
3690	struct inode *inode = &(ei->vfs_inode);
3691	struct super_block *sb = inode->i_sb;
3692
3693	if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
3694				EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) {
3695		/* we are using combined 48 bit field */
3696		i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 |
3697					le32_to_cpu(raw_inode->i_blocks_lo);
3698		if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) {
3699			/* i_blocks represent file system block size */
3700			return i_blocks  << (inode->i_blkbits - 9);
3701		} else {
3702			return i_blocks;
3703		}
3704	} else {
3705		return le32_to_cpu(raw_inode->i_blocks_lo);
3706	}
3707}
3708
3709struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3710{
3711	struct ext4_iloc iloc;
3712	struct ext4_inode *raw_inode;
3713	struct ext4_inode_info *ei;
3714	struct inode *inode;
3715	journal_t *journal = EXT4_SB(sb)->s_journal;
3716	long ret;
 
3717	int block;
3718	uid_t i_uid;
3719	gid_t i_gid;
 
 
 
 
 
 
 
 
 
 
 
 
 
3720
3721	inode = iget_locked(sb, ino);
3722	if (!inode)
3723		return ERR_PTR(-ENOMEM);
3724	if (!(inode->i_state & I_NEW))
3725		return inode;
3726
3727	ei = EXT4_I(inode);
3728	iloc.bh = NULL;
3729
3730	ret = __ext4_get_inode_loc(inode, &iloc, 0);
3731	if (ret < 0)
3732		goto bad_inode;
3733	raw_inode = ext4_raw_inode(&iloc);
3734
 
 
 
 
 
 
 
 
 
 
 
 
 
3735	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
3736		ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
3737		if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
3738		    EXT4_INODE_SIZE(inode->i_sb)) {
3739			EXT4_ERROR_INODE(inode, "bad extra_isize (%u != %u)",
3740				EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize,
3741				EXT4_INODE_SIZE(inode->i_sb));
3742			ret = -EIO;
 
 
 
3743			goto bad_inode;
3744		}
3745	} else
3746		ei->i_extra_isize = 0;
3747
3748	/* Precompute checksum seed for inode metadata */
3749	if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
3750			EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
3751		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3752		__u32 csum;
3753		__le32 inum = cpu_to_le32(inode->i_ino);
3754		__le32 gen = raw_inode->i_generation;
3755		csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum,
3756				   sizeof(inum));
3757		ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen,
3758					      sizeof(gen));
3759	}
3760
3761	if (!ext4_inode_csum_verify(inode, raw_inode, ei)) {
3762		EXT4_ERROR_INODE(inode, "checksum invalid");
3763		ret = -EIO;
 
3764		goto bad_inode;
3765	}
3766
3767	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
3768	i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
3769	i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
 
 
 
 
 
 
 
3770	if (!(test_opt(inode->i_sb, NO_UID32))) {
3771		i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
3772		i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
3773	}
3774	i_uid_write(inode, i_uid);
3775	i_gid_write(inode, i_gid);
 
3776	set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
3777
3778	ext4_clear_state_flags(ei);	/* Only relevant on 32-bit archs */
 
3779	ei->i_dir_start_lookup = 0;
3780	ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
3781	/* We now have enough fields to check if the inode was active or not.
3782	 * This is needed because nfsd might try to access dead inodes
3783	 * the test is that same one that e2fsck uses
3784	 * NeilBrown 1999oct15
3785	 */
3786	if (inode->i_nlink == 0) {
3787		if (inode->i_mode == 0 ||
3788		    !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) {
 
3789			/* this inode is deleted */
3790			ret = -ESTALE;
3791			goto bad_inode;
3792		}
3793		/* The only unlinked inodes we let through here have
3794		 * valid i_mode and are being read by the orphan
3795		 * recovery code: that's fine, we're about to complete
3796		 * the process of deleting those. */
 
 
3797	}
3798	ei->i_flags = le32_to_cpu(raw_inode->i_flags);
 
3799	inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
3800	ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
3801	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT))
3802		ei->i_file_acl |=
3803			((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
3804	inode->i_size = ext4_isize(raw_inode);
 
 
 
 
 
 
3805	ei->i_disksize = inode->i_size;
3806#ifdef CONFIG_QUOTA
3807	ei->i_reserved_quota = 0;
3808#endif
3809	inode->i_generation = le32_to_cpu(raw_inode->i_generation);
3810	ei->i_block_group = iloc.block_group;
3811	ei->i_last_alloc_group = ~0;
3812	/*
3813	 * NOTE! The in-memory inode i_data array is in little-endian order
3814	 * even on big-endian machines: we do NOT byteswap the block numbers!
3815	 */
3816	for (block = 0; block < EXT4_N_BLOCKS; block++)
3817		ei->i_data[block] = raw_inode->i_block[block];
3818	INIT_LIST_HEAD(&ei->i_orphan);
3819
3820	/*
3821	 * Set transaction id's of transactions that have to be committed
3822	 * to finish f[data]sync. We set them to currently running transaction
3823	 * as we cannot be sure that the inode or some of its metadata isn't
3824	 * part of the transaction - the inode could have been reclaimed and
3825	 * now it is reread from disk.
3826	 */
3827	if (journal) {
3828		transaction_t *transaction;
3829		tid_t tid;
3830
3831		read_lock(&journal->j_state_lock);
3832		if (journal->j_running_transaction)
3833			transaction = journal->j_running_transaction;
3834		else
3835			transaction = journal->j_committing_transaction;
3836		if (transaction)
3837			tid = transaction->t_tid;
3838		else
3839			tid = journal->j_commit_sequence;
3840		read_unlock(&journal->j_state_lock);
3841		ei->i_sync_tid = tid;
3842		ei->i_datasync_tid = tid;
3843	}
3844
3845	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
3846		if (ei->i_extra_isize == 0) {
3847			/* The extra space is currently unused. Use it. */
 
3848			ei->i_extra_isize = sizeof(struct ext4_inode) -
3849					    EXT4_GOOD_OLD_INODE_SIZE;
3850		} else {
3851			__le32 *magic = (void *)raw_inode +
3852					EXT4_GOOD_OLD_INODE_SIZE +
3853					ei->i_extra_isize;
3854			if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC))
3855				ext4_set_inode_state(inode, EXT4_STATE_XATTR);
3856		}
3857	}
3858
3859	EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode);
3860	EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
3861	EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
3862	EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
3863
3864	inode->i_version = le32_to_cpu(raw_inode->i_disk_version);
3865	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
3866		if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
3867			inode->i_version |=
3868			(__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
 
 
 
 
3869	}
3870
3871	ret = 0;
3872	if (ei->i_file_acl &&
3873	    !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) {
3874		EXT4_ERROR_INODE(inode, "bad extended attribute block %llu",
 
3875				 ei->i_file_acl);
3876		ret = -EIO;
3877		goto bad_inode;
3878	} else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
 
3879		if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
3880		    (S_ISLNK(inode->i_mode) &&
3881		     !ext4_inode_is_fast_symlink(inode)))
3882			/* Validate extent which is part of inode */
3883			ret = ext4_ext_check_inode(inode);
3884	} else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
3885		   (S_ISLNK(inode->i_mode) &&
3886		    !ext4_inode_is_fast_symlink(inode))) {
3887		/* Validate block references which are part of inode */
3888		ret = ext4_ind_check_inode(inode);
 
 
 
3889	}
3890	if (ret)
3891		goto bad_inode;
3892
3893	if (S_ISREG(inode->i_mode)) {
3894		inode->i_op = &ext4_file_inode_operations;
3895		inode->i_fop = &ext4_file_operations;
3896		ext4_set_aops(inode);
3897	} else if (S_ISDIR(inode->i_mode)) {
3898		inode->i_op = &ext4_dir_inode_operations;
3899		inode->i_fop = &ext4_dir_operations;
3900	} else if (S_ISLNK(inode->i_mode)) {
3901		if (ext4_inode_is_fast_symlink(inode)) {
 
 
 
 
 
 
 
 
 
 
 
 
3902			inode->i_op = &ext4_fast_symlink_inode_operations;
3903			nd_terminate_link(ei->i_data, inode->i_size,
3904				sizeof(ei->i_data) - 1);
3905		} else {
3906			inode->i_op = &ext4_symlink_inode_operations;
3907			ext4_set_aops(inode);
3908		}
 
3909	} else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
3910	      S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
3911		inode->i_op = &ext4_special_inode_operations;
3912		if (raw_inode->i_block[0])
3913			init_special_inode(inode, inode->i_mode,
3914			   old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
3915		else
3916			init_special_inode(inode, inode->i_mode,
3917			   new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
 
 
3918	} else {
3919		ret = -EIO;
3920		EXT4_ERROR_INODE(inode, "bogus i_mode (%o)", inode->i_mode);
 
3921		goto bad_inode;
3922	}
 
 
 
3923	brelse(iloc.bh);
3924	ext4_set_inode_flags(inode);
3925	unlock_new_inode(inode);
3926	return inode;
3927
3928bad_inode:
3929	brelse(iloc.bh);
3930	iget_failed(inode);
3931	return ERR_PTR(ret);
3932}
3933
3934static int ext4_inode_blocks_set(handle_t *handle,
3935				struct ext4_inode *raw_inode,
3936				struct ext4_inode_info *ei)
3937{
3938	struct inode *inode = &(ei->vfs_inode);
3939	u64 i_blocks = inode->i_blocks;
3940	struct super_block *sb = inode->i_sb;
3941
3942	if (i_blocks <= ~0U) {
3943		/*
3944		 * i_blocks can be represnted in a 32 bit variable
3945		 * as multiple of 512 bytes
3946		 */
3947		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
3948		raw_inode->i_blocks_high = 0;
3949		ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
3950		return 0;
3951	}
3952	if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE))
3953		return -EFBIG;
3954
3955	if (i_blocks <= 0xffffffffffffULL) {
3956		/*
3957		 * i_blocks can be represented in a 48 bit variable
3958		 * as multiple of 512 bytes
3959		 */
3960		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
3961		raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
3962		ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
3963	} else {
3964		ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE);
3965		/* i_block is stored in file system block size */
3966		i_blocks = i_blocks >> (inode->i_blkbits - 9);
3967		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
3968		raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
3969	}
3970	return 0;
3971}
3972
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3973/*
3974 * Post the struct inode info into an on-disk inode location in the
3975 * buffer-cache.  This gobbles the caller's reference to the
3976 * buffer_head in the inode location struct.
3977 *
3978 * The caller must have write access to iloc->bh.
3979 */
3980static int ext4_do_update_inode(handle_t *handle,
3981				struct inode *inode,
3982				struct ext4_iloc *iloc)
3983{
3984	struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
3985	struct ext4_inode_info *ei = EXT4_I(inode);
3986	struct buffer_head *bh = iloc->bh;
 
3987	int err = 0, rc, block;
 
3988	uid_t i_uid;
3989	gid_t i_gid;
 
 
 
3990
3991	/* For fields not not tracking in the in-memory inode,
3992	 * initialise them to zero for new inodes. */
3993	if (ext4_test_inode_state(inode, EXT4_STATE_NEW))
3994		memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
3995
3996	ext4_get_inode_flags(ei);
3997	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
3998	i_uid = i_uid_read(inode);
3999	i_gid = i_gid_read(inode);
 
4000	if (!(test_opt(inode->i_sb, NO_UID32))) {
4001		raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid));
4002		raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid));
4003/*
4004 * Fix up interoperability with old kernels. Otherwise, old inodes get
4005 * re-used with the upper 16 bits of the uid/gid intact
4006 */
4007		if (!ei->i_dtime) {
 
 
 
4008			raw_inode->i_uid_high =
4009				cpu_to_le16(high_16_bits(i_uid));
4010			raw_inode->i_gid_high =
4011				cpu_to_le16(high_16_bits(i_gid));
4012		} else {
4013			raw_inode->i_uid_high = 0;
4014			raw_inode->i_gid_high = 0;
4015		}
4016	} else {
4017		raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid));
4018		raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(i_gid));
4019		raw_inode->i_uid_high = 0;
4020		raw_inode->i_gid_high = 0;
4021	}
4022	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
4023
4024	EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
4025	EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
4026	EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
4027	EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
4028
4029	if (ext4_inode_blocks_set(handle, raw_inode, ei))
 
 
4030		goto out_brelse;
 
4031	raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
4032	raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF);
4033	if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
4034	    cpu_to_le32(EXT4_OS_HURD))
4035		raw_inode->i_file_acl_high =
4036			cpu_to_le16(ei->i_file_acl >> 32);
4037	raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
4038	ext4_isize_set(raw_inode, ei->i_disksize);
 
 
 
4039	if (ei->i_disksize > 0x7fffffffULL) {
4040		struct super_block *sb = inode->i_sb;
4041		if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
4042				EXT4_FEATURE_RO_COMPAT_LARGE_FILE) ||
4043				EXT4_SB(sb)->s_es->s_rev_level ==
4044				cpu_to_le32(EXT4_GOOD_OLD_REV)) {
4045			/* If this is the first large file
4046			 * created, add a flag to the superblock.
4047			 */
4048			err = ext4_journal_get_write_access(handle,
4049					EXT4_SB(sb)->s_sbh);
4050			if (err)
4051				goto out_brelse;
4052			ext4_update_dynamic_rev(sb);
4053			EXT4_SET_RO_COMPAT_FEATURE(sb,
4054					EXT4_FEATURE_RO_COMPAT_LARGE_FILE);
4055			ext4_handle_sync(handle);
4056			err = ext4_handle_dirty_super_now(handle, sb);
4057		}
4058	}
4059	raw_inode->i_generation = cpu_to_le32(inode->i_generation);
4060	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
4061		if (old_valid_dev(inode->i_rdev)) {
4062			raw_inode->i_block[0] =
4063				cpu_to_le32(old_encode_dev(inode->i_rdev));
4064			raw_inode->i_block[1] = 0;
4065		} else {
4066			raw_inode->i_block[0] = 0;
4067			raw_inode->i_block[1] =
4068				cpu_to_le32(new_encode_dev(inode->i_rdev));
4069			raw_inode->i_block[2] = 0;
4070		}
4071	} else
4072		for (block = 0; block < EXT4_N_BLOCKS; block++)
4073			raw_inode->i_block[block] = ei->i_data[block];
 
4074
4075	raw_inode->i_disk_version = cpu_to_le32(inode->i_version);
4076	if (ei->i_extra_isize) {
4077		if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
4078			raw_inode->i_version_hi =
4079			cpu_to_le32(inode->i_version >> 32);
4080		raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
 
 
 
 
 
4081	}
4082
 
 
 
 
 
 
 
4083	ext4_inode_csum_set(inode, raw_inode, ei);
 
 
 
 
4084
4085	BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
4086	rc = ext4_handle_dirty_metadata(handle, NULL, bh);
4087	if (!err)
4088		err = rc;
4089	ext4_clear_inode_state(inode, EXT4_STATE_NEW);
4090
4091	ext4_update_inode_fsync_trans(handle, inode, 0);
 
 
 
 
 
 
 
 
4092out_brelse:
4093	brelse(bh);
4094	ext4_std_error(inode->i_sb, err);
4095	return err;
4096}
4097
4098/*
4099 * ext4_write_inode()
4100 *
4101 * We are called from a few places:
4102 *
4103 * - Within generic_file_write() for O_SYNC files.
4104 *   Here, there will be no transaction running. We wait for any running
4105 *   trasnaction to commit.
4106 *
4107 * - Within sys_sync(), kupdate and such.
4108 *   We wait on commit, if tol to.
4109 *
4110 * - Within prune_icache() (PF_MEMALLOC == true)
4111 *   Here we simply return.  We can't afford to block kswapd on the
4112 *   journal commit.
4113 *
4114 * In all cases it is actually safe for us to return without doing anything,
4115 * because the inode has been copied into a raw inode buffer in
4116 * ext4_mark_inode_dirty().  This is a correctness thing for O_SYNC and for
4117 * knfsd.
4118 *
4119 * Note that we are absolutely dependent upon all inode dirtiers doing the
4120 * right thing: they *must* call mark_inode_dirty() after dirtying info in
4121 * which we are interested.
4122 *
4123 * It would be a bug for them to not do this.  The code:
4124 *
4125 *	mark_inode_dirty(inode)
4126 *	stuff();
4127 *	inode->i_size = expr;
4128 *
4129 * is in error because a kswapd-driven write_inode() could occur while
4130 * `stuff()' is running, and the new i_size will be lost.  Plus the inode
4131 * will no longer be on the superblock's dirty inode list.
4132 */
4133int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
4134{
4135	int err;
4136
4137	if (current->flags & PF_MEMALLOC)
 
4138		return 0;
4139
 
 
 
4140	if (EXT4_SB(inode->i_sb)->s_journal) {
4141		if (ext4_journal_current_handle()) {
4142			jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
4143			dump_stack();
4144			return -EIO;
4145		}
4146
4147		if (wbc->sync_mode != WB_SYNC_ALL)
 
 
 
 
 
4148			return 0;
4149
4150		err = ext4_force_commit(inode->i_sb);
 
4151	} else {
4152		struct ext4_iloc iloc;
4153
4154		err = __ext4_get_inode_loc(inode, &iloc, 0);
4155		if (err)
4156			return err;
4157		if (wbc->sync_mode == WB_SYNC_ALL)
 
 
 
 
4158			sync_dirty_buffer(iloc.bh);
4159		if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
4160			EXT4_ERROR_INODE_BLOCK(inode, iloc.bh->b_blocknr,
4161					 "IO error syncing inode");
4162			err = -EIO;
4163		}
4164		brelse(iloc.bh);
4165	}
4166	return err;
4167}
4168
4169/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4170 * ext4_setattr()
4171 *
4172 * Called from notify_change.
4173 *
4174 * We want to trap VFS attempts to truncate the file as soon as
4175 * possible.  In particular, we want to make sure that when the VFS
4176 * shrinks i_size, we put the inode on the orphan list and modify
4177 * i_disksize immediately, so that during the subsequent flushing of
4178 * dirty pages and freeing of disk blocks, we can guarantee that any
4179 * commit will leave the blocks being flushed in an unused state on
4180 * disk.  (On recovery, the inode will get truncated and the blocks will
4181 * be freed, so we have a strong guarantee that no future commit will
4182 * leave these blocks visible to the user.)
4183 *
4184 * Another thing we have to assure is that if we are in ordered mode
4185 * and inode is still attached to the committing transaction, we must
4186 * we start writeout of all the dirty pages which are being truncated.
4187 * This way we are sure that all the data written in the previous
4188 * transaction are already on disk (truncate waits for pages under
4189 * writeback).
4190 *
4191 * Called with inode->i_mutex down.
4192 */
4193int ext4_setattr(struct dentry *dentry, struct iattr *attr)
4194{
4195	struct inode *inode = dentry->d_inode;
4196	int error, rc = 0;
4197	int orphan = 0;
4198	const unsigned int ia_valid = attr->ia_valid;
4199
4200	error = inode_change_ok(inode, attr);
 
 
 
 
 
 
 
 
 
 
 
4201	if (error)
4202		return error;
4203
4204	if (is_quota_modification(inode, attr))
4205		dquot_initialize(inode);
 
 
 
 
 
 
 
 
 
 
 
4206	if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
4207	    (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
4208		handle_t *handle;
4209
4210		/* (user+group)*(old+new) structure, inode write (sb,
4211		 * inode block, ? - but truncate inode update has it) */
4212		handle = ext4_journal_start(inode, (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb)+
4213					EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb))+3);
 
4214		if (IS_ERR(handle)) {
4215			error = PTR_ERR(handle);
4216			goto err_out;
4217		}
 
 
 
 
 
4218		error = dquot_transfer(inode, attr);
 
 
4219		if (error) {
4220			ext4_journal_stop(handle);
4221			return error;
4222		}
4223		/* Update corresponding info in inode so that everything is in
4224		 * one transaction */
4225		if (attr->ia_valid & ATTR_UID)
4226			inode->i_uid = attr->ia_uid;
4227		if (attr->ia_valid & ATTR_GID)
4228			inode->i_gid = attr->ia_gid;
4229		error = ext4_mark_inode_dirty(handle, inode);
4230		ext4_journal_stop(handle);
4231	}
4232
4233	if (attr->ia_valid & ATTR_SIZE) {
4234		inode_dio_wait(inode);
 
 
4235
4236		if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
4237			struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4238
4239			if (attr->ia_size > sbi->s_bitmap_maxbytes)
4240				return -EFBIG;
4241		}
4242	}
4243
4244	if (S_ISREG(inode->i_mode) &&
4245	    attr->ia_valid & ATTR_SIZE &&
4246	    (attr->ia_size < inode->i_size)) {
4247		handle_t *handle;
4248
4249		handle = ext4_journal_start(inode, 3);
4250		if (IS_ERR(handle)) {
4251			error = PTR_ERR(handle);
4252			goto err_out;
4253		}
4254		if (ext4_handle_valid(handle)) {
4255			error = ext4_orphan_add(handle, inode);
4256			orphan = 1;
4257		}
4258		EXT4_I(inode)->i_disksize = attr->ia_size;
4259		rc = ext4_mark_inode_dirty(handle, inode);
4260		if (!error)
4261			error = rc;
4262		ext4_journal_stop(handle);
4263
4264		if (ext4_should_order_data(inode)) {
4265			error = ext4_begin_ordered_truncate(inode,
 
4266							    attr->ia_size);
4267			if (error) {
4268				/* Do as much error cleanup as possible */
4269				handle = ext4_journal_start(inode, 3);
4270				if (IS_ERR(handle)) {
4271					ext4_orphan_del(NULL, inode);
4272					goto err_out;
4273				}
4274				ext4_orphan_del(handle, inode);
4275				orphan = 0;
4276				ext4_journal_stop(handle);
4277				goto err_out;
4278			}
 
 
 
 
 
4279		}
4280	}
4281
4282	if (attr->ia_valid & ATTR_SIZE) {
4283		if (attr->ia_size != i_size_read(inode))
4284			truncate_setsize(inode, attr->ia_size);
4285		ext4_truncate(inode);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4286	}
4287
4288	if (!rc) {
4289		setattr_copy(inode, attr);
4290		mark_inode_dirty(inode);
4291	}
4292
4293	/*
4294	 * If the call to ext4_truncate failed to get a transaction handle at
4295	 * all, we need to clean up the in-core orphan list manually.
4296	 */
4297	if (orphan && inode->i_nlink)
4298		ext4_orphan_del(NULL, inode);
4299
4300	if (!rc && (ia_valid & ATTR_MODE))
4301		rc = ext4_acl_chmod(inode);
4302
4303err_out:
4304	ext4_std_error(inode->i_sb, error);
4305	if (!error)
4306		error = rc;
4307	return error;
4308}
4309
4310int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
4311		 struct kstat *stat)
4312{
4313	struct inode *inode;
4314	unsigned long delalloc_blocks;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4315
4316	inode = dentry->d_inode;
4317	generic_fillattr(inode, stat);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4318
4319	/*
4320	 * We can't update i_blocks if the block allocation is delayed
4321	 * otherwise in the case of system crash before the real block
4322	 * allocation is done, we will have i_blocks inconsistent with
4323	 * on-disk file blocks.
4324	 * We always keep i_blocks updated together with real
4325	 * allocation. But to not confuse with user, stat
4326	 * will return the blocks that include the delayed allocation
4327	 * blocks for this file.
4328	 */
4329	delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb),
4330				EXT4_I(inode)->i_reserved_data_blocks);
4331
4332	stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9;
4333	return 0;
4334}
4335
4336static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
 
4337{
4338	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
4339		return ext4_ind_trans_blocks(inode, nrblocks, chunk);
4340	return ext4_ext_index_trans_blocks(inode, nrblocks, chunk);
4341}
4342
4343/*
4344 * Account for index blocks, block groups bitmaps and block group
4345 * descriptor blocks if modify datablocks and index blocks
4346 * worse case, the indexs blocks spread over different block groups
4347 *
4348 * If datablocks are discontiguous, they are possible to spread over
4349 * different block groups too. If they are contiuguous, with flexbg,
4350 * they could still across block group boundary.
4351 *
4352 * Also account for superblock, inode, quota and xattr blocks
4353 */
4354static int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk)
 
4355{
4356	ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb);
4357	int gdpblocks;
4358	int idxblocks;
4359	int ret = 0;
4360
4361	/*
4362	 * How many index blocks need to touch to modify nrblocks?
4363	 * The "Chunk" flag indicating whether the nrblocks is
4364	 * physically contiguous on disk
4365	 *
4366	 * For Direct IO and fallocate, they calls get_block to allocate
4367	 * one single extent at a time, so they could set the "Chunk" flag
4368	 */
4369	idxblocks = ext4_index_trans_blocks(inode, nrblocks, chunk);
4370
4371	ret = idxblocks;
4372
4373	/*
4374	 * Now let's see how many group bitmaps and group descriptors need
4375	 * to account
4376	 */
4377	groups = idxblocks;
4378	if (chunk)
4379		groups += 1;
4380	else
4381		groups += nrblocks;
4382
4383	gdpblocks = groups;
4384	if (groups > ngroups)
4385		groups = ngroups;
4386	if (groups > EXT4_SB(inode->i_sb)->s_gdb_count)
4387		gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count;
4388
4389	/* bitmaps and block group descriptor blocks */
4390	ret += groups + gdpblocks;
4391
4392	/* Blocks for super block, inode, quota and xattr blocks */
4393	ret += EXT4_META_TRANS_BLOCKS(inode->i_sb);
4394
4395	return ret;
4396}
4397
4398/*
4399 * Calculate the total number of credits to reserve to fit
4400 * the modification of a single pages into a single transaction,
4401 * which may include multiple chunks of block allocations.
4402 *
4403 * This could be called via ext4_write_begin()
4404 *
4405 * We need to consider the worse case, when
4406 * one new block per extent.
4407 */
4408int ext4_writepage_trans_blocks(struct inode *inode)
4409{
4410	int bpp = ext4_journal_blocks_per_page(inode);
4411	int ret;
4412
4413	ret = ext4_meta_trans_blocks(inode, bpp, 0);
4414
4415	/* Account for data blocks for journalled mode */
4416	if (ext4_should_journal_data(inode))
4417		ret += bpp;
4418	return ret;
4419}
4420
4421/*
4422 * Calculate the journal credits for a chunk of data modification.
4423 *
4424 * This is called from DIO, fallocate or whoever calling
4425 * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks.
4426 *
4427 * journal buffers for data blocks are not included here, as DIO
4428 * and fallocate do no need to journal data buffers.
4429 */
4430int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks)
4431{
4432	return ext4_meta_trans_blocks(inode, nrblocks, 1);
4433}
4434
4435/*
4436 * The caller must have previously called ext4_reserve_inode_write().
4437 * Give this, we know that the caller already has write access to iloc->bh.
4438 */
4439int ext4_mark_iloc_dirty(handle_t *handle,
4440			 struct inode *inode, struct ext4_iloc *iloc)
4441{
4442	int err = 0;
4443
 
 
 
 
4444	if (IS_I_VERSION(inode))
4445		inode_inc_iversion(inode);
4446
4447	/* the do_update_inode consumes one bh->b_count */
4448	get_bh(iloc->bh);
4449
4450	/* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
4451	err = ext4_do_update_inode(handle, inode, iloc);
4452	put_bh(iloc->bh);
4453	return err;
4454}
4455
4456/*
4457 * On success, We end up with an outstanding reference count against
4458 * iloc->bh.  This _must_ be cleaned up later.
4459 */
4460
4461int
4462ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
4463			 struct ext4_iloc *iloc)
4464{
4465	int err;
4466
 
 
 
4467	err = ext4_get_inode_loc(inode, iloc);
4468	if (!err) {
4469		BUFFER_TRACE(iloc->bh, "get_write_access");
4470		err = ext4_journal_get_write_access(handle, iloc->bh);
4471		if (err) {
4472			brelse(iloc->bh);
4473			iloc->bh = NULL;
4474		}
4475	}
4476	ext4_std_error(inode->i_sb, err);
4477	return err;
4478}
4479
4480/*
4481 * Expand an inode by new_extra_isize bytes.
4482 * Returns 0 on success or negative error number on failure.
4483 */
4484static int ext4_expand_extra_isize(struct inode *inode,
4485				   unsigned int new_extra_isize,
4486				   struct ext4_iloc iloc,
4487				   handle_t *handle)
4488{
4489	struct ext4_inode *raw_inode;
4490	struct ext4_xattr_ibody_header *header;
 
4491
4492	if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
4493		return 0;
4494
4495	raw_inode = ext4_raw_inode(&iloc);
4496
4497	header = IHDR(inode, raw_inode);
4498
4499	/* No extended attributes present */
4500	if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
4501	    header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
4502		memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
4503			new_extra_isize);
 
4504		EXT4_I(inode)->i_extra_isize = new_extra_isize;
4505		return 0;
4506	}
4507
4508	/* try to expand with EAs present */
4509	return ext4_expand_extra_isize_ea(inode, new_extra_isize,
4510					  raw_inode, handle);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4511}
4512
4513/*
4514 * What we do here is to mark the in-core inode as clean with respect to inode
4515 * dirtiness (it may still be data-dirty).
4516 * This means that the in-core inode may be reaped by prune_icache
4517 * without having to perform any I/O.  This is a very good thing,
4518 * because *any* task may call prune_icache - even ones which
4519 * have a transaction open against a different journal.
4520 *
4521 * Is this cheating?  Not really.  Sure, we haven't written the
4522 * inode out, but prune_icache isn't a user-visible syncing function.
4523 * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
4524 * we start and wait on commits.
4525 *
4526 * Is this efficient/effective?  Well, we're being nice to the system
4527 * by cleaning up our inodes proactively so they can be reaped
4528 * without I/O.  But we are potentially leaving up to five seconds'
4529 * worth of inodes floating about which prune_icache wants us to
4530 * write out.  One way to fix that would be to get prune_icache()
4531 * to do a write_super() to free up some memory.  It has the desired
4532 * effect.
4533 */
4534int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
4535{
4536	struct ext4_iloc iloc;
4537	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4538	static unsigned int mnt_count;
4539	int err, ret;
4540
4541	might_sleep();
4542	trace_ext4_mark_inode_dirty(inode, _RET_IP_);
4543	err = ext4_reserve_inode_write(handle, inode, &iloc);
4544	if (ext4_handle_valid(handle) &&
4545	    EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
4546	    !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
4547		/*
4548		 * We need extra buffer credits since we may write into EA block
4549		 * with this same handle. If journal_extend fails, then it will
4550		 * only result in a minor loss of functionality for that inode.
4551		 * If this is felt to be critical, then e2fsck should be run to
4552		 * force a large enough s_min_extra_isize.
4553		 */
4554		if ((jbd2_journal_extend(handle,
4555			     EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) {
4556			ret = ext4_expand_extra_isize(inode,
4557						      sbi->s_want_extra_isize,
4558						      iloc, handle);
4559			if (ret) {
4560				ext4_set_inode_state(inode,
4561						     EXT4_STATE_NO_EXPAND);
4562				if (mnt_count !=
4563					le16_to_cpu(sbi->s_es->s_mnt_count)) {
4564					ext4_warning(inode->i_sb,
4565					"Unable to expand inode %lu. Delete"
4566					" some EAs or run e2fsck.",
4567					inode->i_ino);
4568					mnt_count =
4569					  le16_to_cpu(sbi->s_es->s_mnt_count);
4570				}
4571			}
4572		}
4573	}
4574	if (!err)
4575		err = ext4_mark_iloc_dirty(handle, inode, &iloc);
4576	return err;
4577}
4578
4579/*
4580 * ext4_dirty_inode() is called from __mark_inode_dirty()
4581 *
4582 * We're really interested in the case where a file is being extended.
4583 * i_size has been changed by generic_commit_write() and we thus need
4584 * to include the updated inode in the current transaction.
4585 *
4586 * Also, dquot_alloc_block() will always dirty the inode when blocks
4587 * are allocated to the file.
4588 *
4589 * If the inode is marked synchronous, we don't honour that here - doing
4590 * so would cause a commit on atime updates, which we don't bother doing.
4591 * We handle synchronous inodes at the highest possible level.
 
 
 
 
4592 */
4593void ext4_dirty_inode(struct inode *inode, int flags)
4594{
4595	handle_t *handle;
4596
4597	handle = ext4_journal_start(inode, 2);
 
 
4598	if (IS_ERR(handle))
4599		goto out;
4600
4601	ext4_mark_inode_dirty(handle, inode);
4602
4603	ext4_journal_stop(handle);
4604out:
4605	return;
4606}
4607
4608#if 0
4609/*
4610 * Bind an inode's backing buffer_head into this transaction, to prevent
4611 * it from being flushed to disk early.  Unlike
4612 * ext4_reserve_inode_write, this leaves behind no bh reference and
4613 * returns no iloc structure, so the caller needs to repeat the iloc
4614 * lookup to mark the inode dirty later.
4615 */
4616static int ext4_pin_inode(handle_t *handle, struct inode *inode)
4617{
4618	struct ext4_iloc iloc;
4619
4620	int err = 0;
4621	if (handle) {
4622		err = ext4_get_inode_loc(inode, &iloc);
4623		if (!err) {
4624			BUFFER_TRACE(iloc.bh, "get_write_access");
4625			err = jbd2_journal_get_write_access(handle, iloc.bh);
4626			if (!err)
4627				err = ext4_handle_dirty_metadata(handle,
4628								 NULL,
4629								 iloc.bh);
4630			brelse(iloc.bh);
4631		}
4632	}
4633	ext4_std_error(inode->i_sb, err);
4634	return err;
4635}
4636#endif
4637
4638int ext4_change_inode_journal_flag(struct inode *inode, int val)
4639{
4640	journal_t *journal;
4641	handle_t *handle;
4642	int err;
 
4643
4644	/*
4645	 * We have to be very careful here: changing a data block's
4646	 * journaling status dynamically is dangerous.  If we write a
4647	 * data block to the journal, change the status and then delete
4648	 * that block, we risk forgetting to revoke the old log record
4649	 * from the journal and so a subsequent replay can corrupt data.
4650	 * So, first we make sure that the journal is empty and that
4651	 * nobody is changing anything.
4652	 */
4653
4654	journal = EXT4_JOURNAL(inode);
4655	if (!journal)
4656		return 0;
4657	if (is_journal_aborted(journal))
4658		return -EROFS;
4659	/* We have to allocate physical blocks for delalloc blocks
4660	 * before flushing journal. otherwise delalloc blocks can not
4661	 * be allocated any more. even more truncate on delalloc blocks
4662	 * could trigger BUG by flushing delalloc blocks in journal.
4663	 * There is no delalloc block in non-journal data mode.
4664	 */
4665	if (val && test_opt(inode->i_sb, DELALLOC)) {
4666		err = ext4_alloc_da_blocks(inode);
4667		if (err < 0)
 
 
 
 
 
 
 
 
4668			return err;
 
4669	}
4670
 
4671	jbd2_journal_lock_updates(journal);
4672
4673	/*
4674	 * OK, there are no updates running now, and all cached data is
4675	 * synced to disk.  We are now in a completely consistent state
4676	 * which doesn't have anything in the journal, and we know that
4677	 * no filesystem updates are running, so it is safe to modify
4678	 * the inode's in-core data-journaling state flag now.
4679	 */
4680
4681	if (val)
4682		ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
4683	else {
4684		jbd2_journal_flush(journal);
 
 
 
 
 
4685		ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
4686	}
4687	ext4_set_aops(inode);
4688
4689	jbd2_journal_unlock_updates(journal);
 
 
 
 
4690
4691	/* Finally we can mark the inode as dirty. */
4692
4693	handle = ext4_journal_start(inode, 1);
4694	if (IS_ERR(handle))
4695		return PTR_ERR(handle);
4696
4697	err = ext4_mark_inode_dirty(handle, inode);
4698	ext4_handle_sync(handle);
4699	ext4_journal_stop(handle);
4700	ext4_std_error(inode->i_sb, err);
4701
4702	return err;
4703}
4704
4705static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh)
4706{
4707	return !buffer_mapped(bh);
4708}
4709
4710int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
4711{
 
4712	struct page *page = vmf->page;
4713	loff_t size;
4714	unsigned long len;
4715	int ret;
 
4716	struct file *file = vma->vm_file;
4717	struct inode *inode = file->f_path.dentry->d_inode;
4718	struct address_space *mapping = inode->i_mapping;
4719	handle_t *handle;
4720	get_block_t *get_block;
4721	int retries = 0;
4722
4723	/*
4724	 * This check is racy but catches the common case. We rely on
4725	 * __block_page_mkwrite() to do a reliable check.
4726	 */
4727	vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
 
 
 
 
 
 
 
4728	/* Delalloc case is easy... */
4729	if (test_opt(inode->i_sb, DELALLOC) &&
4730	    !ext4_should_journal_data(inode) &&
4731	    !ext4_nonda_switch(inode->i_sb)) {
4732		do {
4733			ret = __block_page_mkwrite(vma, vmf,
4734						   ext4_da_get_block_prep);
4735		} while (ret == -ENOSPC &&
4736		       ext4_should_retry_alloc(inode->i_sb, &retries));
4737		goto out_ret;
4738	}
4739
4740	lock_page(page);
4741	size = i_size_read(inode);
4742	/* Page got truncated from under us? */
4743	if (page->mapping != mapping || page_offset(page) > size) {
4744		unlock_page(page);
4745		ret = VM_FAULT_NOPAGE;
4746		goto out;
4747	}
4748
4749	if (page->index == size >> PAGE_CACHE_SHIFT)
4750		len = size & ~PAGE_CACHE_MASK;
4751	else
4752		len = PAGE_CACHE_SIZE;
4753	/*
4754	 * Return if we have all the buffers mapped. This avoids the need to do
4755	 * journal_start/journal_stop which can block and take a long time
4756	 */
4757	if (page_has_buffers(page)) {
4758		if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
4759					ext4_bh_unmapped)) {
 
4760			/* Wait so that we don't change page under IO */
4761			wait_on_page_writeback(page);
4762			ret = VM_FAULT_LOCKED;
4763			goto out;
4764		}
4765	}
4766	unlock_page(page);
4767	/* OK, we need to fill the hole... */
4768	if (ext4_should_dioread_nolock(inode))
4769		get_block = ext4_get_block_write;
4770	else
4771		get_block = ext4_get_block;
4772retry_alloc:
4773	handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
 
4774	if (IS_ERR(handle)) {
4775		ret = VM_FAULT_SIGBUS;
4776		goto out;
4777	}
4778	ret = __block_page_mkwrite(vma, vmf, get_block);
4779	if (!ret && ext4_should_journal_data(inode)) {
4780		if (walk_page_buffers(handle, page_buffers(page), 0,
4781			  PAGE_CACHE_SIZE, NULL, do_journal_get_write_access)) {
4782			unlock_page(page);
4783			ret = VM_FAULT_SIGBUS;
4784			ext4_journal_stop(handle);
4785			goto out;
4786		}
4787		ext4_set_inode_state(inode, EXT4_STATE_JDATA);
4788	}
4789	ext4_journal_stop(handle);
4790	if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
4791		goto retry_alloc;
4792out_ret:
4793	ret = block_page_mkwrite_return(ret);
4794out:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4795	return ret;
4796}
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  linux/fs/ext4/inode.c
   4 *
   5 * Copyright (C) 1992, 1993, 1994, 1995
   6 * Remy Card (card@masi.ibp.fr)
   7 * Laboratoire MASI - Institut Blaise Pascal
   8 * Universite Pierre et Marie Curie (Paris VI)
   9 *
  10 *  from
  11 *
  12 *  linux/fs/minix/inode.c
  13 *
  14 *  Copyright (C) 1991, 1992  Linus Torvalds
  15 *
  16 *  64-bit file support on 64-bit platforms by Jakub Jelinek
  17 *	(jj@sunsite.ms.mff.cuni.cz)
  18 *
  19 *  Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
  20 */
  21
  22#include <linux/fs.h>
  23#include <linux/time.h>
 
  24#include <linux/highuid.h>
  25#include <linux/pagemap.h>
  26#include <linux/dax.h>
  27#include <linux/quotaops.h>
  28#include <linux/string.h>
  29#include <linux/buffer_head.h>
  30#include <linux/writeback.h>
  31#include <linux/pagevec.h>
  32#include <linux/mpage.h>
  33#include <linux/namei.h>
  34#include <linux/uio.h>
  35#include <linux/bio.h>
  36#include <linux/workqueue.h>
  37#include <linux/kernel.h>
  38#include <linux/printk.h>
  39#include <linux/slab.h>
  40#include <linux/bitops.h>
  41#include <linux/iomap.h>
  42#include <linux/iversion.h>
  43
  44#include "ext4_jbd2.h"
  45#include "xattr.h"
  46#include "acl.h"
  47#include "truncate.h"
  48
  49#include <trace/events/ext4.h>
  50
  51#define MPAGE_DA_EXTENT_TAIL 0x01
  52
  53static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
  54			      struct ext4_inode_info *ei)
  55{
  56	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
 
 
  57	__u32 csum;
  58	__u16 dummy_csum = 0;
  59	int offset = offsetof(struct ext4_inode, i_checksum_lo);
  60	unsigned int csum_size = sizeof(dummy_csum);
  61
  62	csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw, offset);
  63	csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, csum_size);
  64	offset += csum_size;
  65	csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
  66			   EXT4_GOOD_OLD_INODE_SIZE - offset);
  67
  68	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
  69		offset = offsetof(struct ext4_inode, i_checksum_hi);
  70		csum = ext4_chksum(sbi, csum, (__u8 *)raw +
  71				   EXT4_GOOD_OLD_INODE_SIZE,
  72				   offset - EXT4_GOOD_OLD_INODE_SIZE);
  73		if (EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) {
  74			csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum,
  75					   csum_size);
  76			offset += csum_size;
  77		}
  78		csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
  79				   EXT4_INODE_SIZE(inode->i_sb) - offset);
  80	}
  81
 
 
 
 
 
 
 
 
  82	return csum;
  83}
  84
  85static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw,
  86				  struct ext4_inode_info *ei)
  87{
  88	__u32 provided, calculated;
  89
  90	if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
  91	    cpu_to_le32(EXT4_OS_LINUX) ||
  92	    !ext4_has_metadata_csum(inode->i_sb))
 
  93		return 1;
  94
  95	provided = le16_to_cpu(raw->i_checksum_lo);
  96	calculated = ext4_inode_csum(inode, raw, ei);
  97	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
  98	    EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
  99		provided |= ((__u32)le16_to_cpu(raw->i_checksum_hi)) << 16;
 100	else
 101		calculated &= 0xFFFF;
 102
 103	return provided == calculated;
 104}
 105
 106static void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw,
 107				struct ext4_inode_info *ei)
 108{
 109	__u32 csum;
 110
 111	if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
 112	    cpu_to_le32(EXT4_OS_LINUX) ||
 113	    !ext4_has_metadata_csum(inode->i_sb))
 
 114		return;
 115
 116	csum = ext4_inode_csum(inode, raw, ei);
 117	raw->i_checksum_lo = cpu_to_le16(csum & 0xFFFF);
 118	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
 119	    EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
 120		raw->i_checksum_hi = cpu_to_le16(csum >> 16);
 121}
 122
 123static inline int ext4_begin_ordered_truncate(struct inode *inode,
 124					      loff_t new_size)
 125{
 126	trace_ext4_begin_ordered_truncate(inode, new_size);
 127	/*
 128	 * If jinode is zero, then we never opened the file for
 129	 * writing, so there's no need to call
 130	 * jbd2_journal_begin_ordered_truncate() since there's no
 131	 * outstanding writes we need to flush.
 132	 */
 133	if (!EXT4_I(inode)->jinode)
 134		return 0;
 135	return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode),
 136						   EXT4_I(inode)->jinode,
 137						   new_size);
 138}
 139
 140static void ext4_invalidatepage(struct page *page, unsigned int offset,
 141				unsigned int length);
 
 
 
 142static int __ext4_journalled_writepage(struct page *page, unsigned int len);
 143static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh);
 144static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
 145				  int pextents);
 
 146
 147/*
 148 * Test whether an inode is a fast symlink.
 149 * A fast symlink has its symlink data stored in ext4_inode_info->i_data.
 150 */
 151int ext4_inode_is_fast_symlink(struct inode *inode)
 152{
 153	if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) {
 154		int ea_blocks = EXT4_I(inode)->i_file_acl ?
 155				EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0;
 156
 157		if (ext4_has_inline_data(inode))
 158			return 0;
 159
 160		return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
 161	}
 162	return S_ISLNK(inode->i_mode) && inode->i_size &&
 163	       (inode->i_size < EXT4_N_BLOCKS * 4);
 164}
 165
 166/*
 167 * Restart the transaction associated with *handle.  This does a commit,
 168 * so before we call here everything must be consistently dirtied against
 169 * this transaction.
 170 */
 171int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode,
 172				 int nblocks)
 173{
 174	int ret;
 175
 176	/*
 177	 * Drop i_data_sem to avoid deadlock with ext4_map_blocks.  At this
 178	 * moment, get_block can be called only for blocks inside i_size since
 179	 * page cache has been already dropped and writes are blocked by
 180	 * i_mutex. So we can safely drop the i_data_sem here.
 181	 */
 182	BUG_ON(EXT4_JOURNAL(inode) == NULL);
 183	jbd_debug(2, "restarting handle %p\n", handle);
 184	up_write(&EXT4_I(inode)->i_data_sem);
 185	ret = ext4_journal_restart(handle, nblocks);
 186	down_write(&EXT4_I(inode)->i_data_sem);
 187	ext4_discard_preallocations(inode);
 188
 189	return ret;
 190}
 191
 192/*
 193 * Called at the last iput() if i_nlink is zero.
 194 */
 195void ext4_evict_inode(struct inode *inode)
 196{
 197	handle_t *handle;
 198	int err;
 199	int extra_credits = 3;
 200	struct ext4_xattr_inode_array *ea_inode_array = NULL;
 201
 202	trace_ext4_evict_inode(inode);
 203
 
 
 204	if (inode->i_nlink) {
 205		/*
 206		 * When journalling data dirty buffers are tracked only in the
 207		 * journal. So although mm thinks everything is clean and
 208		 * ready for reaping the inode might still have some pages to
 209		 * write in the running transaction or waiting to be
 210		 * checkpointed. Thus calling jbd2_journal_invalidatepage()
 211		 * (via truncate_inode_pages()) to discard these buffers can
 212		 * cause data loss. Also even if we did not discard these
 213		 * buffers, we would have no way to find them after the inode
 214		 * is reaped and thus user could see stale data if he tries to
 215		 * read them before the transaction is checkpointed. So be
 216		 * careful and force everything to disk here... We use
 217		 * ei->i_datasync_tid to store the newest transaction
 218		 * containing inode's data.
 219		 *
 220		 * Note that directories do not have this problem because they
 221		 * don't use page cache.
 222		 */
 223		if (inode->i_ino != EXT4_JOURNAL_INO &&
 224		    ext4_should_journal_data(inode) &&
 225		    (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) &&
 226		    inode->i_data.nrpages) {
 227			journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
 228			tid_t commit_tid = EXT4_I(inode)->i_datasync_tid;
 229
 230			jbd2_complete_transaction(journal, commit_tid);
 
 231			filemap_write_and_wait(&inode->i_data);
 232		}
 233		truncate_inode_pages_final(&inode->i_data);
 234
 235		goto no_delete;
 236	}
 237
 238	if (is_bad_inode(inode))
 239		goto no_delete;
 240	dquot_initialize(inode);
 241
 242	if (ext4_should_order_data(inode))
 243		ext4_begin_ordered_truncate(inode, 0);
 244	truncate_inode_pages_final(&inode->i_data);
 245
 246	/*
 247	 * Protect us against freezing - iput() caller didn't have to have any
 248	 * protection against it
 249	 */
 250	sb_start_intwrite(inode->i_sb);
 251
 252	if (!IS_NOQUOTA(inode))
 253		extra_credits += EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb);
 254
 255	handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE,
 256				 ext4_blocks_for_truncate(inode)+extra_credits);
 257	if (IS_ERR(handle)) {
 258		ext4_std_error(inode->i_sb, PTR_ERR(handle));
 259		/*
 260		 * If we're going to skip the normal cleanup, we still need to
 261		 * make sure that the in-core orphan linked list is properly
 262		 * cleaned up.
 263		 */
 264		ext4_orphan_del(NULL, inode);
 265		sb_end_intwrite(inode->i_sb);
 266		goto no_delete;
 267	}
 268
 269	if (IS_SYNC(inode))
 270		ext4_handle_sync(handle);
 271
 272	/*
 273	 * Set inode->i_size to 0 before calling ext4_truncate(). We need
 274	 * special handling of symlinks here because i_size is used to
 275	 * determine whether ext4_inode_info->i_data contains symlink data or
 276	 * block mappings. Setting i_size to 0 will remove its fast symlink
 277	 * status. Erase i_data so that it becomes a valid empty block map.
 278	 */
 279	if (ext4_inode_is_fast_symlink(inode))
 280		memset(EXT4_I(inode)->i_data, 0, sizeof(EXT4_I(inode)->i_data));
 281	inode->i_size = 0;
 282	err = ext4_mark_inode_dirty(handle, inode);
 283	if (err) {
 284		ext4_warning(inode->i_sb,
 285			     "couldn't mark inode dirty (err %d)", err);
 286		goto stop_handle;
 287	}
 288	if (inode->i_blocks) {
 289		err = ext4_truncate(inode);
 290		if (err) {
 291			ext4_error(inode->i_sb,
 292				   "couldn't truncate inode %lu (err %d)",
 293				   inode->i_ino, err);
 294			goto stop_handle;
 
 
 
 
 
 
 
 
 
 
 
 
 
 295		}
 296	}
 297
 298	/* Remove xattr references. */
 299	err = ext4_xattr_delete_inode(handle, inode, &ea_inode_array,
 300				      extra_credits);
 301	if (err) {
 302		ext4_warning(inode->i_sb, "xattr delete (err %d)", err);
 303stop_handle:
 304		ext4_journal_stop(handle);
 305		ext4_orphan_del(NULL, inode);
 306		sb_end_intwrite(inode->i_sb);
 307		ext4_xattr_inode_array_free(ea_inode_array);
 308		goto no_delete;
 309	}
 310
 311	/*
 312	 * Kill off the orphan record which ext4_truncate created.
 313	 * AKPM: I think this can be inside the above `if'.
 314	 * Note that ext4_orphan_del() has to be able to cope with the
 315	 * deletion of a non-existent orphan - this is because we don't
 316	 * know if ext4_truncate() actually created an orphan record.
 317	 * (Well, we could do this if we need to, but heck - it works)
 318	 */
 319	ext4_orphan_del(handle, inode);
 320	EXT4_I(inode)->i_dtime	= (__u32)ktime_get_real_seconds();
 321
 322	/*
 323	 * One subtle ordering requirement: if anything has gone wrong
 324	 * (transaction abort, IO errors, whatever), then we can still
 325	 * do these next steps (the fs will already have been marked as
 326	 * having errors), but we can't free the inode if the mark_dirty
 327	 * fails.
 328	 */
 329	if (ext4_mark_inode_dirty(handle, inode))
 330		/* If that failed, just do the required in-core inode clear. */
 331		ext4_clear_inode(inode);
 332	else
 333		ext4_free_inode(handle, inode);
 334	ext4_journal_stop(handle);
 335	sb_end_intwrite(inode->i_sb);
 336	ext4_xattr_inode_array_free(ea_inode_array);
 337	return;
 338no_delete:
 339	ext4_clear_inode(inode);	/* We must guarantee clearing of inode... */
 340}
 341
 342#ifdef CONFIG_QUOTA
 343qsize_t *ext4_get_reserved_space(struct inode *inode)
 344{
 345	return &EXT4_I(inode)->i_reserved_quota;
 346}
 347#endif
 348
 349/*
 
 
 
 
 
 
 
 
 
 
 
 
 350 * Called with i_data_sem down, which is important since we can call
 351 * ext4_discard_preallocations() from here.
 352 */
 353void ext4_da_update_reserve_space(struct inode *inode,
 354					int used, int quota_claim)
 355{
 356	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
 357	struct ext4_inode_info *ei = EXT4_I(inode);
 358
 359	spin_lock(&ei->i_block_reservation_lock);
 360	trace_ext4_da_update_reserve_space(inode, used, quota_claim);
 361	if (unlikely(used > ei->i_reserved_data_blocks)) {
 362		ext4_warning(inode->i_sb, "%s: ino %lu, used %d "
 363			 "with only %d reserved data blocks",
 364			 __func__, inode->i_ino, used,
 365			 ei->i_reserved_data_blocks);
 366		WARN_ON(1);
 367		used = ei->i_reserved_data_blocks;
 368	}
 369
 
 
 
 
 
 
 
 
 
 370	/* Update per-inode reservations */
 371	ei->i_reserved_data_blocks -= used;
 372	percpu_counter_sub(&sbi->s_dirtyclusters_counter, used);
 373
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 374	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
 375
 376	/* Update quota subsystem for data blocks */
 377	if (quota_claim)
 378		dquot_claim_block(inode, EXT4_C2B(sbi, used));
 379	else {
 380		/*
 381		 * We did fallocate with an offset that is already delayed
 382		 * allocated. So on delayed allocated writeback we should
 383		 * not re-claim the quota for fallocated blocks.
 384		 */
 385		dquot_release_reservation_block(inode, EXT4_C2B(sbi, used));
 386	}
 387
 388	/*
 389	 * If we have done all the pending block allocations and if
 390	 * there aren't any writers on the inode, we can discard the
 391	 * inode's preallocations.
 392	 */
 393	if ((ei->i_reserved_data_blocks == 0) &&
 394	    !inode_is_open_for_write(inode))
 395		ext4_discard_preallocations(inode);
 396}
 397
 398static int __check_block_validity(struct inode *inode, const char *func,
 399				unsigned int line,
 400				struct ext4_map_blocks *map)
 401{
 402	if (ext4_has_feature_journal(inode->i_sb) &&
 403	    (inode->i_ino ==
 404	     le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum)))
 405		return 0;
 406	if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk,
 407				   map->m_len)) {
 408		ext4_error_inode(inode, func, line, map->m_pblk,
 409				 "lblock %lu mapped to illegal pblock %llu "
 410				 "(length %d)", (unsigned long) map->m_lblk,
 411				 map->m_pblk, map->m_len);
 412		return -EFSCORRUPTED;
 413	}
 414	return 0;
 415}
 416
 417int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk,
 418		       ext4_lblk_t len)
 
 
 
 
 
 
 
 419{
 420	int ret;
 
 
 
 
 421
 422	if (IS_ENCRYPTED(inode))
 423		return fscrypt_zeroout_range(inode, lblk, pblk, len);
 
 
 
 
 
 
 
 
 
 
 
 424
 425	ret = sb_issue_zeroout(inode->i_sb, pblk, len, GFP_NOFS);
 426	if (ret > 0)
 427		ret = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 428
 429	return ret;
 430}
 
 
 
 
 
 
 
 
 431
 432#define check_block_validity(inode, map)	\
 433	__check_block_validity((inode), __func__, __LINE__, (map))
 
 434
 435#ifdef ES_AGGRESSIVE_TEST
 436static void ext4_map_blocks_es_recheck(handle_t *handle,
 437				       struct inode *inode,
 438				       struct ext4_map_blocks *es_map,
 439				       struct ext4_map_blocks *map,
 440				       int flags)
 441{
 442	int retval;
 
 
 443
 444	map->m_flags = 0;
 445	/*
 446	 * There is a race window that the result is not the same.
 447	 * e.g. xfstests #223 when dioread_nolock enables.  The reason
 448	 * is that we lookup a block mapping in extent status tree with
 449	 * out taking i_data_sem.  So at the time the unwritten extent
 450	 * could be converted.
 451	 */
 452	down_read(&EXT4_I(inode)->i_data_sem);
 453	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
 454		retval = ext4_ext_map_blocks(handle, inode, map, flags &
 455					     EXT4_GET_BLOCKS_KEEP_SIZE);
 456	} else {
 457		retval = ext4_ind_map_blocks(handle, inode, map, flags &
 458					     EXT4_GET_BLOCKS_KEEP_SIZE);
 459	}
 460	up_read((&EXT4_I(inode)->i_data_sem));
 461
 462	/*
 463	 * We don't check m_len because extent will be collpased in status
 464	 * tree.  So the m_len might not equal.
 465	 */
 466	if (es_map->m_lblk != map->m_lblk ||
 467	    es_map->m_flags != map->m_flags ||
 468	    es_map->m_pblk != map->m_pblk) {
 469		printk("ES cache assertion failed for inode: %lu "
 470		       "es_cached ex [%d/%d/%llu/%x] != "
 471		       "found ex [%d/%d/%llu/%x] retval %d flags %x\n",
 472		       inode->i_ino, es_map->m_lblk, es_map->m_len,
 473		       es_map->m_pblk, es_map->m_flags, map->m_lblk,
 474		       map->m_len, map->m_pblk, map->m_flags,
 475		       retval, flags);
 476	}
 477}
 478#endif /* ES_AGGRESSIVE_TEST */
 479
 480/*
 481 * The ext4_map_blocks() function tries to look up the requested blocks,
 482 * and returns if the blocks are already mapped.
 483 *
 484 * Otherwise it takes the write lock of the i_data_sem and allocate blocks
 485 * and store the allocated blocks in the result buffer head and mark it
 486 * mapped.
 487 *
 488 * If file type is extents based, it will call ext4_ext_map_blocks(),
 489 * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
 490 * based files
 491 *
 492 * On success, it returns the number of blocks being mapped or allocated.  if
 493 * create==0 and the blocks are pre-allocated and unwritten, the resulting @map
 494 * is marked as unwritten. If the create == 1, it will mark @map as mapped.
 
 495 *
 496 * It returns 0 if plain look up failed (blocks have not been allocated), in
 497 * that case, @map is returned as unmapped but we still do fill map->m_len to
 498 * indicate the length of a hole starting at map->m_lblk.
 499 *
 500 * It returns the error in case of allocation failure.
 501 */
 502int ext4_map_blocks(handle_t *handle, struct inode *inode,
 503		    struct ext4_map_blocks *map, int flags)
 504{
 505	struct extent_status es;
 506	int retval;
 507	int ret = 0;
 508#ifdef ES_AGGRESSIVE_TEST
 509	struct ext4_map_blocks orig_map;
 510
 511	memcpy(&orig_map, map, sizeof(*map));
 512#endif
 513
 514	map->m_flags = 0;
 515	ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u,"
 516		  "logical block %lu\n", inode->i_ino, flags, map->m_len,
 517		  (unsigned long) map->m_lblk);
 518
 519	/*
 520	 * ext4_map_blocks returns an int, and m_len is an unsigned int
 521	 */
 522	if (unlikely(map->m_len > INT_MAX))
 523		map->m_len = INT_MAX;
 524
 525	/* We can handle the block number less than EXT_MAX_BLOCKS */
 526	if (unlikely(map->m_lblk >= EXT_MAX_BLOCKS))
 527		return -EFSCORRUPTED;
 528
 529	/* Lookup extent status tree firstly */
 530	if (ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) {
 531		if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) {
 532			map->m_pblk = ext4_es_pblock(&es) +
 533					map->m_lblk - es.es_lblk;
 534			map->m_flags |= ext4_es_is_written(&es) ?
 535					EXT4_MAP_MAPPED : EXT4_MAP_UNWRITTEN;
 536			retval = es.es_len - (map->m_lblk - es.es_lblk);
 537			if (retval > map->m_len)
 538				retval = map->m_len;
 539			map->m_len = retval;
 540		} else if (ext4_es_is_delayed(&es) || ext4_es_is_hole(&es)) {
 541			map->m_pblk = 0;
 542			retval = es.es_len - (map->m_lblk - es.es_lblk);
 543			if (retval > map->m_len)
 544				retval = map->m_len;
 545			map->m_len = retval;
 546			retval = 0;
 547		} else {
 548			BUG();
 549		}
 550#ifdef ES_AGGRESSIVE_TEST
 551		ext4_map_blocks_es_recheck(handle, inode, map,
 552					   &orig_map, flags);
 553#endif
 554		goto found;
 555	}
 556
 557	/*
 558	 * Try to see if we can get the block without requesting a new
 559	 * file system block.
 560	 */
 561	down_read(&EXT4_I(inode)->i_data_sem);
 562	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
 563		retval = ext4_ext_map_blocks(handle, inode, map, flags &
 564					     EXT4_GET_BLOCKS_KEEP_SIZE);
 565	} else {
 566		retval = ext4_ind_map_blocks(handle, inode, map, flags &
 567					     EXT4_GET_BLOCKS_KEEP_SIZE);
 568	}
 569	if (retval > 0) {
 570		unsigned int status;
 571
 572		if (unlikely(retval != map->m_len)) {
 573			ext4_warning(inode->i_sb,
 574				     "ES len assertion failed for inode "
 575				     "%lu: retval %d != map->m_len %d",
 576				     inode->i_ino, retval, map->m_len);
 577			WARN_ON(1);
 578		}
 579
 580		status = map->m_flags & EXT4_MAP_UNWRITTEN ?
 581				EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
 582		if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
 583		    !(status & EXTENT_STATUS_WRITTEN) &&
 584		    ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk,
 585				       map->m_lblk + map->m_len - 1))
 586			status |= EXTENT_STATUS_DELAYED;
 587		ret = ext4_es_insert_extent(inode, map->m_lblk,
 588					    map->m_len, map->m_pblk, status);
 589		if (ret < 0)
 590			retval = ret;
 591	}
 592	up_read((&EXT4_I(inode)->i_data_sem));
 593
 594found:
 595	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
 596		ret = check_block_validity(inode, map);
 597		if (ret != 0)
 598			return ret;
 599	}
 600
 601	/* If it is only a block(s) look up */
 602	if ((flags & EXT4_GET_BLOCKS_CREATE) == 0)
 603		return retval;
 604
 605	/*
 606	 * Returns if the blocks have already allocated
 607	 *
 608	 * Note that if blocks have been preallocated
 609	 * ext4_ext_get_block() returns the create = 0
 610	 * with buffer head unmapped.
 611	 */
 612	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
 613		/*
 614		 * If we need to convert extent to unwritten
 615		 * we continue and do the actual work in
 616		 * ext4_ext_map_blocks()
 617		 */
 618		if (!(flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN))
 619			return retval;
 620
 621	/*
 622	 * Here we clear m_flags because after allocating an new extent,
 623	 * it will be set again.
 
 
 
 
 
 
 624	 */
 625	map->m_flags &= ~EXT4_MAP_FLAGS;
 626
 627	/*
 628	 * New blocks allocate and/or writing to unwritten extent
 629	 * will possibly result in updating i_data, so we take
 630	 * the write lock of i_data_sem, and call get_block()
 631	 * with create == 1 flag.
 632	 */
 633	down_write(&EXT4_I(inode)->i_data_sem);
 634
 635	/*
 
 
 
 
 
 
 
 
 636	 * We need to check for EXT4 here because migrate
 637	 * could have changed the inode type in between
 638	 */
 639	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
 640		retval = ext4_ext_map_blocks(handle, inode, map, flags);
 641	} else {
 642		retval = ext4_ind_map_blocks(handle, inode, map, flags);
 643
 644		if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
 645			/*
 646			 * We allocated new blocks which will result in
 647			 * i_data's format changing.  Force the migrate
 648			 * to fail by clearing migrate flags
 649			 */
 650			ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
 651		}
 652
 653		/*
 654		 * Update reserved blocks/metadata blocks after successful
 655		 * block allocation which had been deferred till now. We don't
 656		 * support fallocate for non extent files. So we can update
 657		 * reserve space here.
 658		 */
 659		if ((retval > 0) &&
 660			(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE))
 661			ext4_da_update_reserve_space(inode, retval, 1);
 662	}
 
 
 663
 664	if (retval > 0) {
 665		unsigned int status;
 666
 667		if (unlikely(retval != map->m_len)) {
 668			ext4_warning(inode->i_sb,
 669				     "ES len assertion failed for inode "
 670				     "%lu: retval %d != map->m_len %d",
 671				     inode->i_ino, retval, map->m_len);
 672			WARN_ON(1);
 673		}
 674
 675		/*
 676		 * We have to zeroout blocks before inserting them into extent
 677		 * status tree. Otherwise someone could look them up there and
 678		 * use them before they are really zeroed. We also have to
 679		 * unmap metadata before zeroing as otherwise writeback can
 680		 * overwrite zeros with stale data from block device.
 681		 */
 682		if (flags & EXT4_GET_BLOCKS_ZERO &&
 683		    map->m_flags & EXT4_MAP_MAPPED &&
 684		    map->m_flags & EXT4_MAP_NEW) {
 685			ret = ext4_issue_zeroout(inode, map->m_lblk,
 686						 map->m_pblk, map->m_len);
 687			if (ret) {
 688				retval = ret;
 689				goto out_sem;
 690			}
 691		}
 692
 693		/*
 694		 * If the extent has been zeroed out, we don't need to update
 695		 * extent status tree.
 696		 */
 697		if ((flags & EXT4_GET_BLOCKS_PRE_IO) &&
 698		    ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) {
 699			if (ext4_es_is_written(&es))
 700				goto out_sem;
 701		}
 702		status = map->m_flags & EXT4_MAP_UNWRITTEN ?
 703				EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
 704		if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
 705		    !(status & EXTENT_STATUS_WRITTEN) &&
 706		    ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk,
 707				       map->m_lblk + map->m_len - 1))
 708			status |= EXTENT_STATUS_DELAYED;
 709		ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
 710					    map->m_pblk, status);
 711		if (ret < 0) {
 712			retval = ret;
 713			goto out_sem;
 714		}
 715	}
 716
 717out_sem:
 718	up_write((&EXT4_I(inode)->i_data_sem));
 719	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
 720		ret = check_block_validity(inode, map);
 721		if (ret != 0)
 722			return ret;
 723
 724		/*
 725		 * Inodes with freshly allocated blocks where contents will be
 726		 * visible after transaction commit must be on transaction's
 727		 * ordered data list.
 728		 */
 729		if (map->m_flags & EXT4_MAP_NEW &&
 730		    !(map->m_flags & EXT4_MAP_UNWRITTEN) &&
 731		    !(flags & EXT4_GET_BLOCKS_ZERO) &&
 732		    !ext4_is_quota_file(inode) &&
 733		    ext4_should_order_data(inode)) {
 734			loff_t start_byte =
 735				(loff_t)map->m_lblk << inode->i_blkbits;
 736			loff_t length = (loff_t)map->m_len << inode->i_blkbits;
 737
 738			if (flags & EXT4_GET_BLOCKS_IO_SUBMIT)
 739				ret = ext4_jbd2_inode_add_wait(handle, inode,
 740						start_byte, length);
 741			else
 742				ret = ext4_jbd2_inode_add_write(handle, inode,
 743						start_byte, length);
 744			if (ret)
 745				return ret;
 746		}
 747	}
 748	return retval;
 749}
 750
 751/*
 752 * Update EXT4_MAP_FLAGS in bh->b_state. For buffer heads attached to pages
 753 * we have to be careful as someone else may be manipulating b_state as well.
 754 */
 755static void ext4_update_bh_state(struct buffer_head *bh, unsigned long flags)
 756{
 757	unsigned long old_state;
 758	unsigned long new_state;
 759
 760	flags &= EXT4_MAP_FLAGS;
 761
 762	/* Dummy buffer_head? Set non-atomically. */
 763	if (!bh->b_page) {
 764		bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | flags;
 765		return;
 766	}
 767	/*
 768	 * Someone else may be modifying b_state. Be careful! This is ugly but
 769	 * once we get rid of using bh as a container for mapping information
 770	 * to pass to / from get_block functions, this can go away.
 771	 */
 772	do {
 773		old_state = READ_ONCE(bh->b_state);
 774		new_state = (old_state & ~EXT4_MAP_FLAGS) | flags;
 775	} while (unlikely(
 776		 cmpxchg(&bh->b_state, old_state, new_state) != old_state));
 777}
 778
 779static int _ext4_get_block(struct inode *inode, sector_t iblock,
 780			   struct buffer_head *bh, int flags)
 781{
 
 782	struct ext4_map_blocks map;
 783	int ret = 0;
 784
 785	if (ext4_has_inline_data(inode))
 786		return -ERANGE;
 787
 788	map.m_lblk = iblock;
 789	map.m_len = bh->b_size >> inode->i_blkbits;
 790
 791	ret = ext4_map_blocks(ext4_journal_current_handle(), inode, &map,
 792			      flags);
 
 
 
 
 
 
 
 
 
 
 
 
 793	if (ret > 0) {
 794		map_bh(bh, inode->i_sb, map.m_pblk);
 795		ext4_update_bh_state(bh, map.m_flags);
 796		bh->b_size = inode->i_sb->s_blocksize * map.m_len;
 797		ret = 0;
 798	} else if (ret == 0) {
 799		/* hole case, need to fill in bh->b_size */
 800		bh->b_size = inode->i_sb->s_blocksize * map.m_len;
 801	}
 
 
 802	return ret;
 803}
 804
 805int ext4_get_block(struct inode *inode, sector_t iblock,
 806		   struct buffer_head *bh, int create)
 807{
 808	return _ext4_get_block(inode, iblock, bh,
 809			       create ? EXT4_GET_BLOCKS_CREATE : 0);
 810}
 811
 812/*
 813 * Get block function used when preparing for buffered write if we require
 814 * creating an unwritten extent if blocks haven't been allocated.  The extent
 815 * will be converted to written after the IO is complete.
 816 */
 817int ext4_get_block_unwritten(struct inode *inode, sector_t iblock,
 818			     struct buffer_head *bh_result, int create)
 819{
 820	ext4_debug("ext4_get_block_unwritten: inode %lu, create flag %d\n",
 821		   inode->i_ino, create);
 822	return _ext4_get_block(inode, iblock, bh_result,
 823			       EXT4_GET_BLOCKS_IO_CREATE_EXT);
 824}
 825
 826/* Maximum number of blocks we map for direct IO at once. */
 827#define DIO_MAX_BLOCKS 4096
 828
 829/*
 830 * Get blocks function for the cases that need to start a transaction -
 831 * generally difference cases of direct IO and DAX IO. It also handles retries
 832 * in case of ENOSPC.
 833 */
 834static int ext4_get_block_trans(struct inode *inode, sector_t iblock,
 835				struct buffer_head *bh_result, int flags)
 836{
 837	int dio_credits;
 838	handle_t *handle;
 839	int retries = 0;
 840	int ret;
 841
 842	/* Trim mapping request to maximum we can map at once for DIO */
 843	if (bh_result->b_size >> inode->i_blkbits > DIO_MAX_BLOCKS)
 844		bh_result->b_size = DIO_MAX_BLOCKS << inode->i_blkbits;
 845	dio_credits = ext4_chunk_trans_blocks(inode,
 846				      bh_result->b_size >> inode->i_blkbits);
 847retry:
 848	handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, dio_credits);
 849	if (IS_ERR(handle))
 850		return PTR_ERR(handle);
 851
 852	ret = _ext4_get_block(inode, iblock, bh_result, flags);
 853	ext4_journal_stop(handle);
 854
 855	if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
 856		goto retry;
 857	return ret;
 858}
 859
 860/* Get block function for DIO reads and writes to inodes without extents */
 861int ext4_dio_get_block(struct inode *inode, sector_t iblock,
 862		       struct buffer_head *bh, int create)
 863{
 864	/* We don't expect handle for direct IO */
 865	WARN_ON_ONCE(ext4_journal_current_handle());
 866
 867	if (!create)
 868		return _ext4_get_block(inode, iblock, bh, 0);
 869	return ext4_get_block_trans(inode, iblock, bh, EXT4_GET_BLOCKS_CREATE);
 870}
 871
 872/*
 873 * Get block function for AIO DIO writes when we create unwritten extent if
 874 * blocks are not allocated yet. The extent will be converted to written
 875 * after IO is complete.
 876 */
 877static int ext4_dio_get_block_unwritten_async(struct inode *inode,
 878		sector_t iblock, struct buffer_head *bh_result,	int create)
 879{
 880	int ret;
 881
 882	/* We don't expect handle for direct IO */
 883	WARN_ON_ONCE(ext4_journal_current_handle());
 884
 885	ret = ext4_get_block_trans(inode, iblock, bh_result,
 886				   EXT4_GET_BLOCKS_IO_CREATE_EXT);
 887
 888	/*
 889	 * When doing DIO using unwritten extents, we need io_end to convert
 890	 * unwritten extents to written on IO completion. We allocate io_end
 891	 * once we spot unwritten extent and store it in b_private. Generic
 892	 * DIO code keeps b_private set and furthermore passes the value to
 893	 * our completion callback in 'private' argument.
 894	 */
 895	if (!ret && buffer_unwritten(bh_result)) {
 896		if (!bh_result->b_private) {
 897			ext4_io_end_t *io_end;
 898
 899			io_end = ext4_init_io_end(inode, GFP_KERNEL);
 900			if (!io_end)
 901				return -ENOMEM;
 902			bh_result->b_private = io_end;
 903			ext4_set_io_unwritten_flag(inode, io_end);
 904		}
 905		set_buffer_defer_completion(bh_result);
 906	}
 907
 908	return ret;
 909}
 910
 911/*
 912 * Get block function for non-AIO DIO writes when we create unwritten extent if
 913 * blocks are not allocated yet. The extent will be converted to written
 914 * after IO is complete by ext4_direct_IO_write().
 915 */
 916static int ext4_dio_get_block_unwritten_sync(struct inode *inode,
 917		sector_t iblock, struct buffer_head *bh_result,	int create)
 918{
 919	int ret;
 920
 921	/* We don't expect handle for direct IO */
 922	WARN_ON_ONCE(ext4_journal_current_handle());
 923
 924	ret = ext4_get_block_trans(inode, iblock, bh_result,
 925				   EXT4_GET_BLOCKS_IO_CREATE_EXT);
 926
 927	/*
 928	 * Mark inode as having pending DIO writes to unwritten extents.
 929	 * ext4_direct_IO_write() checks this flag and converts extents to
 930	 * written.
 931	 */
 932	if (!ret && buffer_unwritten(bh_result))
 933		ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
 934
 935	return ret;
 936}
 937
 938static int ext4_dio_get_block_overwrite(struct inode *inode, sector_t iblock,
 939		   struct buffer_head *bh_result, int create)
 940{
 941	int ret;
 942
 943	ext4_debug("ext4_dio_get_block_overwrite: inode %lu, create flag %d\n",
 944		   inode->i_ino, create);
 945	/* We don't expect handle for direct IO */
 946	WARN_ON_ONCE(ext4_journal_current_handle());
 947
 948	ret = _ext4_get_block(inode, iblock, bh_result, 0);
 949	/*
 950	 * Blocks should have been preallocated! ext4_file_write_iter() checks
 951	 * that.
 952	 */
 953	WARN_ON_ONCE(!buffer_mapped(bh_result) || buffer_unwritten(bh_result));
 954
 955	return ret;
 956}
 957
 958
 959/*
 960 * `handle' can be NULL if create is zero
 961 */
 962struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
 963				ext4_lblk_t block, int map_flags)
 964{
 965	struct ext4_map_blocks map;
 966	struct buffer_head *bh;
 967	int create = map_flags & EXT4_GET_BLOCKS_CREATE;
 968	int err;
 969
 970	J_ASSERT(handle != NULL || create == 0);
 971
 972	map.m_lblk = block;
 973	map.m_len = 1;
 974	err = ext4_map_blocks(handle, inode, &map, map_flags);
 
 975
 976	if (err == 0)
 977		return create ? ERR_PTR(-ENOSPC) : NULL;
 978	if (err < 0)
 979		return ERR_PTR(err);
 
 
 
 980
 981	bh = sb_getblk(inode->i_sb, map.m_pblk);
 982	if (unlikely(!bh))
 983		return ERR_PTR(-ENOMEM);
 
 
 984	if (map.m_flags & EXT4_MAP_NEW) {
 985		J_ASSERT(create != 0);
 986		J_ASSERT(handle != NULL);
 987
 988		/*
 989		 * Now that we do not always journal data, we should
 990		 * keep in mind whether this should always journal the
 991		 * new buffer as metadata.  For now, regular file
 992		 * writes use ext4_get_block instead, so it's not a
 993		 * problem.
 994		 */
 995		lock_buffer(bh);
 996		BUFFER_TRACE(bh, "call get_create_access");
 997		err = ext4_journal_get_create_access(handle, bh);
 998		if (unlikely(err)) {
 999			unlock_buffer(bh);
1000			goto errout;
1001		}
1002		if (!buffer_uptodate(bh)) {
1003			memset(bh->b_data, 0, inode->i_sb->s_blocksize);
1004			set_buffer_uptodate(bh);
1005		}
1006		unlock_buffer(bh);
1007		BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
1008		err = ext4_handle_dirty_metadata(handle, inode, bh);
1009		if (unlikely(err))
1010			goto errout;
1011	} else
1012		BUFFER_TRACE(bh, "not a new buffer");
 
 
 
 
 
 
1013	return bh;
1014errout:
1015	brelse(bh);
1016	return ERR_PTR(err);
1017}
1018
1019struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
1020			       ext4_lblk_t block, int map_flags)
1021{
1022	struct buffer_head *bh;
1023
1024	bh = ext4_getblk(handle, inode, block, map_flags);
1025	if (IS_ERR(bh))
1026		return bh;
1027	if (!bh || ext4_buffer_uptodate(bh))
1028		return bh;
1029	ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &bh);
1030	wait_on_buffer(bh);
1031	if (buffer_uptodate(bh))
1032		return bh;
1033	put_bh(bh);
1034	return ERR_PTR(-EIO);
 
1035}
1036
1037/* Read a contiguous batch of blocks. */
1038int ext4_bread_batch(struct inode *inode, ext4_lblk_t block, int bh_count,
1039		     bool wait, struct buffer_head **bhs)
1040{
1041	int i, err;
1042
1043	for (i = 0; i < bh_count; i++) {
1044		bhs[i] = ext4_getblk(NULL, inode, block + i, 0 /* map_flags */);
1045		if (IS_ERR(bhs[i])) {
1046			err = PTR_ERR(bhs[i]);
1047			bh_count = i;
1048			goto out_brelse;
1049		}
1050	}
1051
1052	for (i = 0; i < bh_count; i++)
1053		/* Note that NULL bhs[i] is valid because of holes. */
1054		if (bhs[i] && !ext4_buffer_uptodate(bhs[i]))
1055			ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1,
1056				    &bhs[i]);
1057
1058	if (!wait)
1059		return 0;
1060
1061	for (i = 0; i < bh_count; i++)
1062		if (bhs[i])
1063			wait_on_buffer(bhs[i]);
1064
1065	for (i = 0; i < bh_count; i++) {
1066		if (bhs[i] && !buffer_uptodate(bhs[i])) {
1067			err = -EIO;
1068			goto out_brelse;
1069		}
1070	}
1071	return 0;
1072
1073out_brelse:
1074	for (i = 0; i < bh_count; i++) {
1075		brelse(bhs[i]);
1076		bhs[i] = NULL;
1077	}
1078	return err;
1079}
1080
1081int ext4_walk_page_buffers(handle_t *handle,
1082			   struct buffer_head *head,
1083			   unsigned from,
1084			   unsigned to,
1085			   int *partial,
1086			   int (*fn)(handle_t *handle,
1087				     struct buffer_head *bh))
1088{
1089	struct buffer_head *bh;
1090	unsigned block_start, block_end;
1091	unsigned blocksize = head->b_size;
1092	int err, ret = 0;
1093	struct buffer_head *next;
1094
1095	for (bh = head, block_start = 0;
1096	     ret == 0 && (bh != head || !block_start);
1097	     block_start = block_end, bh = next) {
1098		next = bh->b_this_page;
1099		block_end = block_start + blocksize;
1100		if (block_end <= from || block_start >= to) {
1101			if (partial && !buffer_uptodate(bh))
1102				*partial = 1;
1103			continue;
1104		}
1105		err = (*fn)(handle, bh);
1106		if (!ret)
1107			ret = err;
1108	}
1109	return ret;
1110}
1111
1112/*
1113 * To preserve ordering, it is essential that the hole instantiation and
1114 * the data write be encapsulated in a single transaction.  We cannot
1115 * close off a transaction and start a new one between the ext4_get_block()
1116 * and the commit_write().  So doing the jbd2_journal_start at the start of
1117 * prepare_write() is the right place.
1118 *
1119 * Also, this function can nest inside ext4_writepage().  In that case, we
1120 * *know* that ext4_writepage() has generated enough buffer credits to do the
1121 * whole page.  So we won't block on the journal in that case, which is good,
1122 * because the caller may be PF_MEMALLOC.
 
1123 *
1124 * By accident, ext4 can be reentered when a transaction is open via
1125 * quota file writes.  If we were to commit the transaction while thus
1126 * reentered, there can be a deadlock - we would be holding a quota
1127 * lock, and the commit would never complete if another thread had a
1128 * transaction open and was blocking on the quota lock - a ranking
1129 * violation.
1130 *
1131 * So what we do is to rely on the fact that jbd2_journal_stop/journal_start
1132 * will _not_ run commit under these circumstances because handle->h_ref
1133 * is elevated.  We'll still have enough credits for the tiny quotafile
1134 * write.
1135 */
1136int do_journal_get_write_access(handle_t *handle,
1137				struct buffer_head *bh)
1138{
1139	int dirty = buffer_dirty(bh);
1140	int ret;
1141
1142	if (!buffer_mapped(bh) || buffer_freed(bh))
1143		return 0;
1144	/*
1145	 * __block_write_begin() could have dirtied some buffers. Clean
1146	 * the dirty bit as jbd2_journal_get_write_access() could complain
1147	 * otherwise about fs integrity issues. Setting of the dirty bit
1148	 * by __block_write_begin() isn't a real problem here as we clear
1149	 * the bit before releasing a page lock and thus writeback cannot
1150	 * ever write the buffer.
1151	 */
1152	if (dirty)
1153		clear_buffer_dirty(bh);
1154	BUFFER_TRACE(bh, "get write access");
1155	ret = ext4_journal_get_write_access(handle, bh);
1156	if (!ret && dirty)
1157		ret = ext4_handle_dirty_metadata(handle, NULL, bh);
1158	return ret;
1159}
1160
1161#ifdef CONFIG_FS_ENCRYPTION
1162static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
1163				  get_block_t *get_block)
1164{
1165	unsigned from = pos & (PAGE_SIZE - 1);
1166	unsigned to = from + len;
1167	struct inode *inode = page->mapping->host;
1168	unsigned block_start, block_end;
1169	sector_t block;
1170	int err = 0;
1171	unsigned blocksize = inode->i_sb->s_blocksize;
1172	unsigned bbits;
1173	struct buffer_head *bh, *head, *wait[2];
1174	int nr_wait = 0;
1175	int i;
1176
1177	BUG_ON(!PageLocked(page));
1178	BUG_ON(from > PAGE_SIZE);
1179	BUG_ON(to > PAGE_SIZE);
1180	BUG_ON(from > to);
1181
1182	if (!page_has_buffers(page))
1183		create_empty_buffers(page, blocksize, 0);
1184	head = page_buffers(page);
1185	bbits = ilog2(blocksize);
1186	block = (sector_t)page->index << (PAGE_SHIFT - bbits);
1187
1188	for (bh = head, block_start = 0; bh != head || !block_start;
1189	    block++, block_start = block_end, bh = bh->b_this_page) {
1190		block_end = block_start + blocksize;
1191		if (block_end <= from || block_start >= to) {
1192			if (PageUptodate(page)) {
1193				if (!buffer_uptodate(bh))
1194					set_buffer_uptodate(bh);
1195			}
1196			continue;
1197		}
1198		if (buffer_new(bh))
1199			clear_buffer_new(bh);
1200		if (!buffer_mapped(bh)) {
1201			WARN_ON(bh->b_size != blocksize);
1202			err = get_block(inode, block, bh, 1);
1203			if (err)
1204				break;
1205			if (buffer_new(bh)) {
1206				if (PageUptodate(page)) {
1207					clear_buffer_new(bh);
1208					set_buffer_uptodate(bh);
1209					mark_buffer_dirty(bh);
1210					continue;
1211				}
1212				if (block_end > to || block_start < from)
1213					zero_user_segments(page, to, block_end,
1214							   block_start, from);
1215				continue;
1216			}
1217		}
1218		if (PageUptodate(page)) {
1219			if (!buffer_uptodate(bh))
1220				set_buffer_uptodate(bh);
1221			continue;
1222		}
1223		if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1224		    !buffer_unwritten(bh) &&
1225		    (block_start < from || block_end > to)) {
1226			ll_rw_block(REQ_OP_READ, 0, 1, &bh);
1227			wait[nr_wait++] = bh;
1228		}
1229	}
1230	/*
1231	 * If we issued read requests, let them complete.
1232	 */
1233	for (i = 0; i < nr_wait; i++) {
1234		wait_on_buffer(wait[i]);
1235		if (!buffer_uptodate(wait[i]))
1236			err = -EIO;
1237	}
1238	if (unlikely(err)) {
1239		page_zero_new_buffers(page, from, to);
1240	} else if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode)) {
1241		for (i = 0; i < nr_wait; i++) {
1242			int err2;
1243
1244			err2 = fscrypt_decrypt_pagecache_blocks(page, blocksize,
1245								bh_offset(wait[i]));
1246			if (err2) {
1247				clear_buffer_uptodate(wait[i]);
1248				err = err2;
1249			}
1250		}
1251	}
1252
1253	return err;
1254}
1255#endif
1256
1257static int ext4_write_begin(struct file *file, struct address_space *mapping,
1258			    loff_t pos, unsigned len, unsigned flags,
1259			    struct page **pagep, void **fsdata)
1260{
1261	struct inode *inode = mapping->host;
1262	int ret, needed_blocks;
1263	handle_t *handle;
1264	int retries = 0;
1265	struct page *page;
1266	pgoff_t index;
1267	unsigned from, to;
1268
1269	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
1270		return -EIO;
1271
1272	trace_ext4_write_begin(inode, pos, len, flags);
1273	/*
1274	 * Reserve one block more for addition to orphan list in case
1275	 * we allocate blocks but write fails for some reason
1276	 */
1277	needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
1278	index = pos >> PAGE_SHIFT;
1279	from = pos & (PAGE_SIZE - 1);
1280	to = from + len;
1281
1282	if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
1283		ret = ext4_try_to_write_inline_data(mapping, inode, pos, len,
1284						    flags, pagep);
1285		if (ret < 0)
1286			return ret;
1287		if (ret == 1)
1288			return 0;
1289	}
1290
1291	/*
1292	 * grab_cache_page_write_begin() can take a long time if the
1293	 * system is thrashing due to memory pressure, or if the page
1294	 * is being written back.  So grab it first before we start
1295	 * the transaction handle.  This also allows us to allocate
1296	 * the page (if needed) without using GFP_NOFS.
1297	 */
1298retry_grab:
1299	page = grab_cache_page_write_begin(mapping, index, flags);
1300	if (!page)
1301		return -ENOMEM;
1302	unlock_page(page);
1303
1304retry_journal:
1305	handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks);
1306	if (IS_ERR(handle)) {
1307		put_page(page);
1308		return PTR_ERR(handle);
1309	}
1310
1311	lock_page(page);
1312	if (page->mapping != mapping) {
1313		/* The page got truncated from under us */
1314		unlock_page(page);
1315		put_page(page);
1316		ext4_journal_stop(handle);
1317		goto retry_grab;
 
1318	}
1319	/* In case writeback began while the page was unlocked */
1320	wait_for_stable_page(page);
1321
1322#ifdef CONFIG_FS_ENCRYPTION
1323	if (ext4_should_dioread_nolock(inode))
1324		ret = ext4_block_write_begin(page, pos, len,
1325					     ext4_get_block_unwritten);
1326	else
1327		ret = ext4_block_write_begin(page, pos, len,
1328					     ext4_get_block);
1329#else
1330	if (ext4_should_dioread_nolock(inode))
1331		ret = __block_write_begin(page, pos, len,
1332					  ext4_get_block_unwritten);
1333	else
1334		ret = __block_write_begin(page, pos, len, ext4_get_block);
1335#endif
1336	if (!ret && ext4_should_journal_data(inode)) {
1337		ret = ext4_walk_page_buffers(handle, page_buffers(page),
1338					     from, to, NULL,
1339					     do_journal_get_write_access);
1340	}
1341
1342	if (ret) {
1343		bool extended = (pos + len > inode->i_size) &&
1344				!ext4_verity_in_progress(inode);
1345
1346		unlock_page(page);
 
1347		/*
1348		 * __block_write_begin may have instantiated a few blocks
1349		 * outside i_size.  Trim these off again. Don't need
1350		 * i_size_read because we hold i_mutex.
1351		 *
1352		 * Add inode to orphan list in case we crash before
1353		 * truncate finishes
1354		 */
1355		if (extended && ext4_can_truncate(inode))
1356			ext4_orphan_add(handle, inode);
1357
1358		ext4_journal_stop(handle);
1359		if (extended) {
1360			ext4_truncate_failed_write(inode);
1361			/*
1362			 * If truncate failed early the inode might
1363			 * still be on the orphan list; we need to
1364			 * make sure the inode is removed from the
1365			 * orphan list in that case.
1366			 */
1367			if (inode->i_nlink)
1368				ext4_orphan_del(NULL, inode);
1369		}
 
1370
1371		if (ret == -ENOSPC &&
1372		    ext4_should_retry_alloc(inode->i_sb, &retries))
1373			goto retry_journal;
1374		put_page(page);
1375		return ret;
1376	}
1377	*pagep = page;
1378	return ret;
1379}
1380
1381/* For write_end() in data=journal mode */
1382static int write_end_fn(handle_t *handle, struct buffer_head *bh)
1383{
1384	int ret;
1385	if (!buffer_mapped(bh) || buffer_freed(bh))
1386		return 0;
1387	set_buffer_uptodate(bh);
1388	ret = ext4_handle_dirty_metadata(handle, NULL, bh);
1389	clear_buffer_meta(bh);
1390	clear_buffer_prio(bh);
1391	return ret;
1392}
1393
1394/*
1395 * We need to pick up the new inode size which generic_commit_write gave us
1396 * `file' can be NULL - eg, when called from page_symlink().
1397 *
1398 * ext4 never places buffers on inode->i_mapping->private_list.  metadata
1399 * buffers are managed internally.
1400 */
1401static int ext4_write_end(struct file *file,
1402			  struct address_space *mapping,
1403			  loff_t pos, unsigned len, unsigned copied,
1404			  struct page *page, void *fsdata)
1405{
 
 
1406	handle_t *handle = ext4_journal_current_handle();
1407	struct inode *inode = mapping->host;
1408	loff_t old_size = inode->i_size;
1409	int ret = 0, ret2;
1410	int i_size_changed = 0;
1411	int inline_data = ext4_has_inline_data(inode);
1412	bool verity = ext4_verity_in_progress(inode);
1413
1414	trace_ext4_write_end(inode, pos, len, copied);
1415	if (inline_data) {
1416		ret = ext4_write_inline_data_end(inode, pos, len,
1417						 copied, page);
1418		if (ret < 0) {
1419			unlock_page(page);
1420			put_page(page);
1421			goto errout;
1422		}
1423		copied = ret;
1424	} else
1425		copied = block_write_end(file, mapping, pos,
1426					 len, copied, page, fsdata);
1427	/*
1428	 * it's important to update i_size while still holding page lock:
 
 
 
1429	 * page writeout could otherwise come in and zero beyond i_size.
1430	 *
1431	 * If FS_IOC_ENABLE_VERITY is running on this inode, then Merkle tree
1432	 * blocks are being written past EOF, so skip the i_size update.
1433	 */
1434	if (!verity)
1435		i_size_changed = ext4_update_inode_size(inode, pos + copied);
 
 
 
 
 
 
 
 
 
 
 
1436	unlock_page(page);
1437	put_page(page);
1438
1439	if (old_size < pos && !verity)
1440		pagecache_isize_extended(inode, old_size, pos);
1441	/*
1442	 * Don't mark the inode dirty under page lock. First, it unnecessarily
1443	 * makes the holding time of page lock longer. Second, it forces lock
1444	 * ordering of page lock and transaction start for journaling
1445	 * filesystems.
1446	 */
1447	if (i_size_changed || inline_data)
1448		ext4_mark_inode_dirty(handle, inode);
1449
1450	if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode))
1451		/* if we have allocated more blocks and copied
1452		 * less. We will have blocks allocated outside
1453		 * inode->i_size. So truncate them
1454		 */
1455		ext4_orphan_add(handle, inode);
1456errout:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1457	ret2 = ext4_journal_stop(handle);
1458	if (!ret)
1459		ret = ret2;
1460
1461	if (pos + len > inode->i_size && !verity) {
1462		ext4_truncate_failed_write(inode);
1463		/*
1464		 * If truncate failed early the inode might still be
1465		 * on the orphan list; we need to make sure the inode
1466		 * is removed from the orphan list in that case.
1467		 */
1468		if (inode->i_nlink)
1469			ext4_orphan_del(NULL, inode);
1470	}
1471
 
1472	return ret ? ret : copied;
1473}
1474
1475/*
1476 * This is a private version of page_zero_new_buffers() which doesn't
1477 * set the buffer to be dirty, since in data=journalled mode we need
1478 * to call ext4_handle_dirty_metadata() instead.
1479 */
1480static void ext4_journalled_zero_new_buffers(handle_t *handle,
1481					    struct page *page,
1482					    unsigned from, unsigned to)
1483{
1484	unsigned int block_start = 0, block_end;
1485	struct buffer_head *head, *bh;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1486
1487	bh = head = page_buffers(page);
1488	do {
1489		block_end = block_start + bh->b_size;
1490		if (buffer_new(bh)) {
1491			if (block_end > from && block_start < to) {
1492				if (!PageUptodate(page)) {
1493					unsigned start, size;
1494
1495					start = max(from, block_start);
1496					size = min(to, block_end) - start;
 
 
 
 
 
 
 
 
1497
1498					zero_user(page, start, size);
1499					write_end_fn(handle, bh);
1500				}
1501				clear_buffer_new(bh);
1502			}
1503		}
1504		block_start = block_end;
1505		bh = bh->b_this_page;
1506	} while (bh != head);
1507}
1508
1509static int ext4_journalled_write_end(struct file *file,
1510				     struct address_space *mapping,
1511				     loff_t pos, unsigned len, unsigned copied,
1512				     struct page *page, void *fsdata)
1513{
1514	handle_t *handle = ext4_journal_current_handle();
1515	struct inode *inode = mapping->host;
1516	loff_t old_size = inode->i_size;
1517	int ret = 0, ret2;
1518	int partial = 0;
1519	unsigned from, to;
1520	int size_changed = 0;
1521	int inline_data = ext4_has_inline_data(inode);
1522	bool verity = ext4_verity_in_progress(inode);
1523
1524	trace_ext4_journalled_write_end(inode, pos, len, copied);
1525	from = pos & (PAGE_SIZE - 1);
1526	to = from + len;
1527
1528	BUG_ON(!ext4_handle_valid(handle));
1529
1530	if (inline_data) {
1531		ret = ext4_write_inline_data_end(inode, pos, len,
1532						 copied, page);
1533		if (ret < 0) {
1534			unlock_page(page);
1535			put_page(page);
1536			goto errout;
1537		}
1538		copied = ret;
1539	} else if (unlikely(copied < len) && !PageUptodate(page)) {
1540		copied = 0;
1541		ext4_journalled_zero_new_buffers(handle, page, from, to);
1542	} else {
1543		if (unlikely(copied < len))
1544			ext4_journalled_zero_new_buffers(handle, page,
1545							 from + copied, to);
1546		ret = ext4_walk_page_buffers(handle, page_buffers(page), from,
1547					     from + copied, &partial,
1548					     write_end_fn);
1549		if (!partial)
1550			SetPageUptodate(page);
1551	}
1552	if (!verity)
1553		size_changed = ext4_update_inode_size(inode, pos + copied);
 
 
 
 
 
 
1554	ext4_set_inode_state(inode, EXT4_STATE_JDATA);
1555	EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
1556	unlock_page(page);
1557	put_page(page);
1558
1559	if (old_size < pos && !verity)
1560		pagecache_isize_extended(inode, old_size, pos);
1561
1562	if (size_changed || inline_data) {
1563		ret2 = ext4_mark_inode_dirty(handle, inode);
1564		if (!ret)
1565			ret = ret2;
1566	}
1567
1568	if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode))
 
 
1569		/* if we have allocated more blocks and copied
1570		 * less. We will have blocks allocated outside
1571		 * inode->i_size. So truncate them
1572		 */
1573		ext4_orphan_add(handle, inode);
1574
1575errout:
1576	ret2 = ext4_journal_stop(handle);
1577	if (!ret)
1578		ret = ret2;
1579	if (pos + len > inode->i_size && !verity) {
1580		ext4_truncate_failed_write(inode);
1581		/*
1582		 * If truncate failed early the inode might still be
1583		 * on the orphan list; we need to make sure the inode
1584		 * is removed from the orphan list in that case.
1585		 */
1586		if (inode->i_nlink)
1587			ext4_orphan_del(NULL, inode);
1588	}
1589
1590	return ret ? ret : copied;
1591}
1592
1593/*
1594 * Reserve space for a single cluster
1595 */
1596static int ext4_da_reserve_space(struct inode *inode)
1597{
 
1598	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1599	struct ext4_inode_info *ei = EXT4_I(inode);
 
1600	int ret;
 
 
1601
1602	/*
1603	 * We will charge metadata quota at writeout time; this saves
1604	 * us from metadata over-estimation, though we may go over by
1605	 * a small amount in the end.  Here we just reserve for data.
1606	 */
1607	ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1));
1608	if (ret)
1609		return ret;
1610
 
 
 
 
 
 
1611	spin_lock(&ei->i_block_reservation_lock);
1612	if (ext4_claim_free_clusters(sbi, 1, 0)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1613		spin_unlock(&ei->i_block_reservation_lock);
 
 
 
 
1614		dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
1615		return -ENOSPC;
1616	}
1617	ei->i_reserved_data_blocks++;
1618	trace_ext4_da_reserve_space(inode);
1619	spin_unlock(&ei->i_block_reservation_lock);
1620
1621	return 0;       /* success */
1622}
1623
1624void ext4_da_release_space(struct inode *inode, int to_free)
1625{
1626	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1627	struct ext4_inode_info *ei = EXT4_I(inode);
1628
1629	if (!to_free)
1630		return;		/* Nothing to release, exit */
1631
1632	spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1633
1634	trace_ext4_da_release_space(inode, to_free);
1635	if (unlikely(to_free > ei->i_reserved_data_blocks)) {
1636		/*
1637		 * if there aren't enough reserved blocks, then the
1638		 * counter is messed up somewhere.  Since this
1639		 * function is called from invalidate page, it's
1640		 * harmless to return without any action.
1641		 */
1642		ext4_warning(inode->i_sb, "ext4_da_release_space: "
1643			 "ino %lu, to_free %d with only %d reserved "
1644			 "data blocks", inode->i_ino, to_free,
1645			 ei->i_reserved_data_blocks);
1646		WARN_ON(1);
1647		to_free = ei->i_reserved_data_blocks;
1648	}
1649	ei->i_reserved_data_blocks -= to_free;
1650
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1651	/* update fs dirty data blocks counter */
1652	percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free);
1653
1654	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1655
1656	dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free));
1657}
1658
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1659/*
1660 * Delayed allocation stuff
1661 */
1662
1663struct mpage_da_data {
1664	struct inode *inode;
1665	struct writeback_control *wbc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1666
1667	pgoff_t first_page;	/* The first page to write */
1668	pgoff_t next_page;	/* Current page to examine */
1669	pgoff_t last_page;	/* Last page to examine */
1670	/*
1671	 * Extent to map - this can be after first_page because that can be
1672	 * fully mapped. We somewhat abuse m_flags to store whether the extent
1673	 * is delalloc or unwritten.
1674	 */
1675	struct ext4_map_blocks map;
1676	struct ext4_io_submit io_submit;	/* IO submission data */
1677	unsigned int do_map:1;
1678};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1679
1680static void mpage_release_unused_pages(struct mpage_da_data *mpd,
1681				       bool invalidate)
1682{
1683	int nr_pages, i;
1684	pgoff_t index, end;
1685	struct pagevec pvec;
1686	struct inode *inode = mpd->inode;
1687	struct address_space *mapping = inode->i_mapping;
1688
1689	/* This is necessary when next_page == 0. */
1690	if (mpd->first_page >= mpd->next_page)
1691		return;
1692
1693	index = mpd->first_page;
1694	end   = mpd->next_page - 1;
1695	if (invalidate) {
1696		ext4_lblk_t start, last;
1697		start = index << (PAGE_SHIFT - inode->i_blkbits);
1698		last = end << (PAGE_SHIFT - inode->i_blkbits);
1699		ext4_es_remove_extent(inode, start, last - start + 1);
1700	}
1701
1702	pagevec_init(&pvec);
1703	while (index <= end) {
1704		nr_pages = pagevec_lookup_range(&pvec, mapping, &index, end);
1705		if (nr_pages == 0)
1706			break;
1707		for (i = 0; i < nr_pages; i++) {
1708			struct page *page = pvec.pages[i];
1709
 
1710			BUG_ON(!PageLocked(page));
1711			BUG_ON(PageWriteback(page));
1712			if (invalidate) {
1713				if (page_mapped(page))
1714					clear_page_dirty_for_io(page);
1715				block_invalidatepage(page, 0, PAGE_SIZE);
1716				ClearPageUptodate(page);
1717			}
1718			unlock_page(page);
1719		}
 
1720		pagevec_release(&pvec);
1721	}
 
1722}
1723
1724static void ext4_print_free_blocks(struct inode *inode)
1725{
1726	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1727	struct super_block *sb = inode->i_sb;
1728	struct ext4_inode_info *ei = EXT4_I(inode);
1729
1730	ext4_msg(sb, KERN_CRIT, "Total free blocks count %lld",
1731	       EXT4_C2B(EXT4_SB(inode->i_sb),
1732			ext4_count_free_clusters(sb)));
1733	ext4_msg(sb, KERN_CRIT, "Free/Dirty block details");
1734	ext4_msg(sb, KERN_CRIT, "free_blocks=%lld",
1735	       (long long) EXT4_C2B(EXT4_SB(sb),
1736		percpu_counter_sum(&sbi->s_freeclusters_counter)));
1737	ext4_msg(sb, KERN_CRIT, "dirty_blocks=%lld",
1738	       (long long) EXT4_C2B(EXT4_SB(sb),
1739		percpu_counter_sum(&sbi->s_dirtyclusters_counter)));
1740	ext4_msg(sb, KERN_CRIT, "Block reservation details");
1741	ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u",
1742		 ei->i_reserved_data_blocks);
 
 
1743	return;
1744}
1745
1746static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh)
 
 
 
 
 
 
 
 
 
1747{
1748	return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1749}
1750
 
 
 
1751/*
1752 * ext4_insert_delayed_block - adds a delayed block to the extents status
1753 *                             tree, incrementing the reserved cluster/block
1754 *                             count or making a pending reservation
1755 *                             where needed
1756 *
1757 * @inode - file containing the newly added block
1758 * @lblk - logical block to be added
 
1759 *
1760 * Returns 0 on success, negative error code on failure.
1761 */
1762static int ext4_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk)
 
 
1763{
1764	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1765	int ret;
1766	bool allocated = false;
1767
1768	/*
1769	 * If the cluster containing lblk is shared with a delayed,
1770	 * written, or unwritten extent in a bigalloc file system, it's
1771	 * already been accounted for and does not need to be reserved.
1772	 * A pending reservation must be made for the cluster if it's
1773	 * shared with a written or unwritten extent and doesn't already
1774	 * have one.  Written and unwritten extents can be purged from the
1775	 * extents status tree if the system is under memory pressure, so
1776	 * it's necessary to examine the extent tree if a search of the
1777	 * extents status tree doesn't get a match.
1778	 */
1779	if (sbi->s_cluster_ratio == 1) {
1780		ret = ext4_da_reserve_space(inode);
1781		if (ret != 0)   /* ENOSPC */
1782			goto errout;
1783	} else {   /* bigalloc */
1784		if (!ext4_es_scan_clu(inode, &ext4_es_is_delonly, lblk)) {
1785			if (!ext4_es_scan_clu(inode,
1786					      &ext4_es_is_mapped, lblk)) {
1787				ret = ext4_clu_mapped(inode,
1788						      EXT4_B2C(sbi, lblk));
1789				if (ret < 0)
1790					goto errout;
1791				if (ret == 0) {
1792					ret = ext4_da_reserve_space(inode);
1793					if (ret != 0)   /* ENOSPC */
1794						goto errout;
1795				} else {
1796					allocated = true;
1797				}
1798			} else {
1799				allocated = true;
1800			}
1801		}
1802	}
 
 
 
 
 
 
 
 
 
1803
1804	ret = ext4_es_insert_delayed_block(inode, lblk, allocated);
 
 
 
 
 
 
 
1805
1806errout:
1807	return ret;
 
 
 
 
 
 
 
 
 
 
1808}
1809
1810/*
1811 * This function is grabs code from the very beginning of
1812 * ext4_map_blocks, but assumes that the caller is from delayed write
1813 * time. This function looks up the requested blocks and sets the
1814 * buffer delay bit under the protection of i_data_sem.
1815 */
1816static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
1817			      struct ext4_map_blocks *map,
1818			      struct buffer_head *bh)
1819{
1820	struct extent_status es;
1821	int retval;
1822	sector_t invalid_block = ~((sector_t) 0xffff);
1823#ifdef ES_AGGRESSIVE_TEST
1824	struct ext4_map_blocks orig_map;
1825
1826	memcpy(&orig_map, map, sizeof(*map));
1827#endif
1828
1829	if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
1830		invalid_block = ~0;
1831
1832	map->m_flags = 0;
1833	ext_debug("ext4_da_map_blocks(): inode %lu, max_blocks %u,"
1834		  "logical block %lu\n", inode->i_ino, map->m_len,
1835		  (unsigned long) map->m_lblk);
1836
1837	/* Lookup extent status tree firstly */
1838	if (ext4_es_lookup_extent(inode, iblock, NULL, &es)) {
1839		if (ext4_es_is_hole(&es)) {
1840			retval = 0;
1841			down_read(&EXT4_I(inode)->i_data_sem);
1842			goto add_delayed;
1843		}
1844
1845		/*
1846		 * Delayed extent could be allocated by fallocate.
1847		 * So we need to check it.
1848		 */
1849		if (ext4_es_is_delayed(&es) && !ext4_es_is_unwritten(&es)) {
1850			map_bh(bh, inode->i_sb, invalid_block);
1851			set_buffer_new(bh);
1852			set_buffer_delay(bh);
1853			return 0;
1854		}
1855
1856		map->m_pblk = ext4_es_pblock(&es) + iblock - es.es_lblk;
1857		retval = es.es_len - (iblock - es.es_lblk);
1858		if (retval > map->m_len)
1859			retval = map->m_len;
1860		map->m_len = retval;
1861		if (ext4_es_is_written(&es))
1862			map->m_flags |= EXT4_MAP_MAPPED;
1863		else if (ext4_es_is_unwritten(&es))
1864			map->m_flags |= EXT4_MAP_UNWRITTEN;
1865		else
1866			BUG();
1867
1868#ifdef ES_AGGRESSIVE_TEST
1869		ext4_map_blocks_es_recheck(NULL, inode, map, &orig_map, 0);
1870#endif
1871		return retval;
1872	}
1873
1874	/*
1875	 * Try to see if we can get the block without requesting a new
1876	 * file system block.
1877	 */
1878	down_read(&EXT4_I(inode)->i_data_sem);
1879	if (ext4_has_inline_data(inode))
1880		retval = 0;
1881	else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
1882		retval = ext4_ext_map_blocks(NULL, inode, map, 0);
1883	else
1884		retval = ext4_ind_map_blocks(NULL, inode, map, 0);
1885
1886add_delayed:
1887	if (retval == 0) {
1888		int ret;
1889
1890		/*
1891		 * XXX: __block_prepare_write() unmaps passed block,
1892		 * is it OK?
1893		 */
 
 
 
 
 
 
 
 
1894
1895		ret = ext4_insert_delayed_block(inode, map->m_lblk);
1896		if (ret != 0) {
1897			retval = ret;
1898			goto out_unlock;
1899		}
1900
1901		map_bh(bh, inode->i_sb, invalid_block);
1902		set_buffer_new(bh);
1903		set_buffer_delay(bh);
1904	} else if (retval > 0) {
1905		int ret;
1906		unsigned int status;
1907
1908		if (unlikely(retval != map->m_len)) {
1909			ext4_warning(inode->i_sb,
1910				     "ES len assertion failed for inode "
1911				     "%lu: retval %d != map->m_len %d",
1912				     inode->i_ino, retval, map->m_len);
1913			WARN_ON(1);
1914		}
1915
1916		status = map->m_flags & EXT4_MAP_UNWRITTEN ?
1917				EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
1918		ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
1919					    map->m_pblk, status);
1920		if (ret != 0)
1921			retval = ret;
1922	}
1923
1924out_unlock:
1925	up_read((&EXT4_I(inode)->i_data_sem));
1926
1927	return retval;
1928}
1929
1930/*
1931 * This is a special get_block_t callback which is used by
1932 * ext4_da_write_begin().  It will either return mapped block or
1933 * reserve space for a single block.
1934 *
1935 * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set.
1936 * We also have b_blocknr = -1 and b_bdev initialized properly
1937 *
1938 * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set.
1939 * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev
1940 * initialized properly.
1941 */
1942int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
1943			   struct buffer_head *bh, int create)
1944{
1945	struct ext4_map_blocks map;
1946	int ret = 0;
1947
1948	BUG_ON(create == 0);
1949	BUG_ON(bh->b_size != inode->i_sb->s_blocksize);
1950
1951	map.m_lblk = iblock;
1952	map.m_len = 1;
1953
1954	/*
1955	 * first, we need to know whether the block is allocated already
1956	 * preallocated blocks are unmapped but should treated
1957	 * the same as allocated blocks.
1958	 */
1959	ret = ext4_da_map_blocks(inode, iblock, &map, bh);
1960	if (ret <= 0)
1961		return ret;
1962
1963	map_bh(bh, inode->i_sb, map.m_pblk);
1964	ext4_update_bh_state(bh, map.m_flags);
1965
1966	if (buffer_unwritten(bh)) {
1967		/* A delayed write to unwritten bh should be marked
1968		 * new and mapped.  Mapped ensures that we don't do
1969		 * get_block multiple times when we write to the same
1970		 * offset and new ensures that we do proper zero out
1971		 * for partial write.
1972		 */
1973		set_buffer_new(bh);
1974		set_buffer_mapped(bh);
1975	}
1976	return 0;
1977}
1978
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1979static int bget_one(handle_t *handle, struct buffer_head *bh)
1980{
1981	get_bh(bh);
1982	return 0;
1983}
1984
1985static int bput_one(handle_t *handle, struct buffer_head *bh)
1986{
1987	put_bh(bh);
1988	return 0;
1989}
1990
1991static int __ext4_journalled_writepage(struct page *page,
1992				       unsigned int len)
1993{
1994	struct address_space *mapping = page->mapping;
1995	struct inode *inode = mapping->host;
1996	struct buffer_head *page_bufs = NULL;
1997	handle_t *handle = NULL;
1998	int ret = 0, err = 0;
1999	int inline_data = ext4_has_inline_data(inode);
2000	struct buffer_head *inode_bh = NULL;
2001
2002	ClearPageChecked(page);
2003
2004	if (inline_data) {
2005		BUG_ON(page->index != 0);
2006		BUG_ON(len > ext4_get_max_inline_size(inode));
2007		inode_bh = ext4_journalled_write_inline_data(inode, len, page);
2008		if (inode_bh == NULL)
2009			goto out;
2010	} else {
2011		page_bufs = page_buffers(page);
2012		if (!page_bufs) {
2013			BUG();
2014			goto out;
2015		}
2016		ext4_walk_page_buffers(handle, page_bufs, 0, len,
2017				       NULL, bget_one);
2018	}
2019	/*
2020	 * We need to release the page lock before we start the
2021	 * journal, so grab a reference so the page won't disappear
2022	 * out from under us.
2023	 */
2024	get_page(page);
2025	unlock_page(page);
2026
2027	handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
2028				    ext4_writepage_trans_blocks(inode));
2029	if (IS_ERR(handle)) {
2030		ret = PTR_ERR(handle);
2031		put_page(page);
2032		goto out_no_pagelock;
2033	}
 
2034	BUG_ON(!ext4_handle_valid(handle));
2035
2036	lock_page(page);
2037	put_page(page);
2038	if (page->mapping != mapping) {
2039		/* The page got truncated from under us */
2040		ext4_journal_stop(handle);
2041		ret = 0;
2042		goto out;
2043	}
2044
2045	if (inline_data) {
2046		ret = ext4_mark_inode_dirty(handle, inode);
2047	} else {
2048		ret = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL,
2049					     do_journal_get_write_access);
2050
2051		err = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL,
2052					     write_end_fn);
2053	}
2054	if (ret == 0)
2055		ret = err;
2056	EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
2057	err = ext4_journal_stop(handle);
2058	if (!ret)
2059		ret = err;
2060
2061	if (!ext4_has_inline_data(inode))
2062		ext4_walk_page_buffers(NULL, page_bufs, 0, len,
2063				       NULL, bput_one);
2064	ext4_set_inode_state(inode, EXT4_STATE_JDATA);
2065out:
2066	unlock_page(page);
2067out_no_pagelock:
2068	brelse(inode_bh);
2069	return ret;
2070}
2071
 
 
 
2072/*
2073 * Note that we don't need to start a transaction unless we're journaling data
2074 * because we should have holes filled from ext4_page_mkwrite(). We even don't
2075 * need to file the inode to the transaction's list in ordered mode because if
2076 * we are writing back data added by write(), the inode is already there and if
2077 * we are writing back data modified via mmap(), no one guarantees in which
2078 * transaction the data will hit the disk. In case we are journaling data, we
2079 * cannot start transaction directly because transaction start ranks above page
2080 * lock so we have to do some magic.
2081 *
2082 * This function can get called via...
2083 *   - ext4_writepages after taking page lock (have journal handle)
2084 *   - journal_submit_inode_data_buffers (no journal handle)
2085 *   - shrink_page_list via the kswapd/direct reclaim (no journal handle)
2086 *   - grab_page_cache when doing write_begin (have journal handle)
2087 *
2088 * We don't do any block allocation in this function. If we have page with
2089 * multiple blocks we need to write those buffer_heads that are mapped. This
2090 * is important for mmaped based write. So if we do with blocksize 1K
2091 * truncate(f, 1024);
2092 * a = mmap(f, 0, 4096);
2093 * a[0] = 'a';
2094 * truncate(f, 4096);
2095 * we have in the page first buffer_head mapped via page_mkwrite call back
2096 * but other buffer_heads would be unmapped but dirty (dirty done via the
2097 * do_wp_page). So writepage should write the first block. If we modify
2098 * the mmap area beyond 1024 we will again get a page_fault and the
2099 * page_mkwrite callback will do the block allocation and mark the
2100 * buffer_heads mapped.
2101 *
2102 * We redirty the page if we have any buffer_heads that is either delay or
2103 * unwritten in the page.
2104 *
2105 * We can get recursively called as show below.
2106 *
2107 *	ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
2108 *		ext4_writepage()
2109 *
2110 * But since we don't do any block allocation we should not deadlock.
2111 * Page also have the dirty flag cleared so we don't get recurive page_lock.
2112 */
2113static int ext4_writepage(struct page *page,
2114			  struct writeback_control *wbc)
2115{
2116	int ret = 0;
2117	loff_t size;
2118	unsigned int len;
2119	struct buffer_head *page_bufs = NULL;
2120	struct inode *inode = page->mapping->host;
2121	struct ext4_io_submit io_submit;
2122	bool keep_towrite = false;
2123
2124	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) {
2125		ext4_invalidatepage(page, 0, PAGE_SIZE);
2126		unlock_page(page);
2127		return -EIO;
2128	}
2129
2130	trace_ext4_writepage(page);
2131	size = i_size_read(inode);
2132	if (page->index == size >> PAGE_SHIFT &&
2133	    !ext4_verity_in_progress(inode))
2134		len = size & ~PAGE_MASK;
2135	else
2136		len = PAGE_SIZE;
2137
2138	page_bufs = page_buffers(page);
2139	/*
2140	 * We cannot do block allocation or other extent handling in this
2141	 * function. If there are buffers needing that, we have to redirty
2142	 * the page. But we may reach here when we do a journal commit via
2143	 * journal_submit_inode_data_buffers() and in that case we must write
2144	 * allocated buffers to achieve data=ordered mode guarantees.
2145	 *
2146	 * Also, if there is only one buffer per page (the fs block
2147	 * size == the page size), if one buffer needs block
2148	 * allocation or needs to modify the extent tree to clear the
2149	 * unwritten flag, we know that the page can't be written at
2150	 * all, so we might as well refuse the write immediately.
2151	 * Unfortunately if the block size != page size, we can't as
2152	 * easily detect this case using ext4_walk_page_buffers(), but
2153	 * for the extremely common case, this is an optimization that
2154	 * skips a useless round trip through ext4_bio_write_page().
2155	 */
2156	if (ext4_walk_page_buffers(NULL, page_bufs, 0, len, NULL,
2157				   ext4_bh_delay_or_unwritten)) {
2158		redirty_page_for_writepage(wbc, page);
2159		if ((current->flags & PF_MEMALLOC) ||
2160		    (inode->i_sb->s_blocksize == PAGE_SIZE)) {
2161			/*
2162			 * For memory cleaning there's no point in writing only
2163			 * some buffers. So just bail out. Warn if we came here
2164			 * from direct reclaim.
2165			 */
2166			WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD))
2167							== PF_MEMALLOC);
2168			unlock_page(page);
2169			return 0;
2170		}
2171		keep_towrite = true;
2172	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2173
2174	if (PageChecked(page) && ext4_should_journal_data(inode))
2175		/*
2176		 * It's mmapped pagecache.  Add buffers and journal it.  There
2177		 * doesn't seem much point in redirtying the page here.
2178		 */
2179		return __ext4_journalled_writepage(page, len);
2180
2181	ext4_io_submit_init(&io_submit, wbc);
2182	io_submit.io_end = ext4_init_io_end(inode, GFP_NOFS);
2183	if (!io_submit.io_end) {
2184		redirty_page_for_writepage(wbc, page);
2185		unlock_page(page);
2186		return -ENOMEM;
2187	}
2188	ret = ext4_bio_write_page(&io_submit, page, len, wbc, keep_towrite);
2189	ext4_io_submit(&io_submit);
2190	/* Drop io_end reference we got from init */
2191	ext4_put_io_end_defer(io_submit.io_end);
2192	return ret;
2193}
2194
2195static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page)
2196{
2197	int len;
2198	loff_t size;
2199	int err;
2200
2201	BUG_ON(page->index != mpd->first_page);
2202	clear_page_dirty_for_io(page);
2203	/*
2204	 * We have to be very careful here!  Nothing protects writeback path
2205	 * against i_size changes and the page can be writeably mapped into
2206	 * page tables. So an application can be growing i_size and writing
2207	 * data through mmap while writeback runs. clear_page_dirty_for_io()
2208	 * write-protects our page in page tables and the page cannot get
2209	 * written to again until we release page lock. So only after
2210	 * clear_page_dirty_for_io() we are safe to sample i_size for
2211	 * ext4_bio_write_page() to zero-out tail of the written page. We rely
2212	 * on the barrier provided by TestClearPageDirty in
2213	 * clear_page_dirty_for_io() to make sure i_size is really sampled only
2214	 * after page tables are updated.
2215	 */
2216	size = i_size_read(mpd->inode);
2217	if (page->index == size >> PAGE_SHIFT &&
2218	    !ext4_verity_in_progress(mpd->inode))
2219		len = size & ~PAGE_MASK;
2220	else
2221		len = PAGE_SIZE;
2222	err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc, false);
2223	if (!err)
2224		mpd->wbc->nr_to_write--;
2225	mpd->first_page++;
2226
2227	return err;
2228}
2229
2230#define BH_FLAGS ((1 << BH_Unwritten) | (1 << BH_Delay))
2231
2232/*
2233 * mballoc gives us at most this number of blocks...
2234 * XXX: That seems to be only a limitation of ext4_mb_normalize_request().
2235 * The rest of mballoc seems to handle chunks up to full group size.
 
 
2236 */
2237#define MAX_WRITEPAGES_EXTENT_LEN 2048
2238
2239/*
2240 * mpage_add_bh_to_extent - try to add bh to extent of blocks to map
2241 *
2242 * @mpd - extent of blocks
2243 * @lblk - logical number of the block in the file
2244 * @bh - buffer head we want to add to the extent
2245 *
2246 * The function is used to collect contig. blocks in the same state. If the
2247 * buffer doesn't require mapping for writeback and we haven't started the
2248 * extent of buffers to map yet, the function returns 'true' immediately - the
2249 * caller can write the buffer right away. Otherwise the function returns true
2250 * if the block has been added to the extent, false if the block couldn't be
2251 * added.
2252 */
2253static bool mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk,
2254				   struct buffer_head *bh)
2255{
2256	struct ext4_map_blocks *map = &mpd->map;
2257
2258	/* Buffer that doesn't need mapping for writeback? */
2259	if (!buffer_dirty(bh) || !buffer_mapped(bh) ||
2260	    (!buffer_delay(bh) && !buffer_unwritten(bh))) {
2261		/* So far no extent to map => we write the buffer right away */
2262		if (map->m_len == 0)
2263			return true;
2264		return false;
2265	}
2266
2267	/* First block in the extent? */
2268	if (map->m_len == 0) {
2269		/* We cannot map unless handle is started... */
2270		if (!mpd->do_map)
2271			return false;
2272		map->m_lblk = lblk;
2273		map->m_len = 1;
2274		map->m_flags = bh->b_state & BH_FLAGS;
2275		return true;
2276	}
2277
2278	/* Don't go larger than mballoc is willing to allocate */
2279	if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN)
2280		return false;
2281
2282	/* Can we merge the block to our big extent? */
2283	if (lblk == map->m_lblk + map->m_len &&
2284	    (bh->b_state & BH_FLAGS) == map->m_flags) {
2285		map->m_len++;
2286		return true;
2287	}
2288	return false;
2289}
2290
2291/*
2292 * mpage_process_page_bufs - submit page buffers for IO or add them to extent
2293 *
2294 * @mpd - extent of blocks for mapping
2295 * @head - the first buffer in the page
2296 * @bh - buffer we should start processing from
2297 * @lblk - logical number of the block in the file corresponding to @bh
2298 *
2299 * Walk through page buffers from @bh upto @head (exclusive) and either submit
2300 * the page for IO if all buffers in this page were mapped and there's no
2301 * accumulated extent of buffers to map or add buffers in the page to the
2302 * extent of buffers to map. The function returns 1 if the caller can continue
2303 * by processing the next page, 0 if it should stop adding buffers to the
2304 * extent to map because we cannot extend it anymore. It can also return value
2305 * < 0 in case of error during IO submission.
2306 */
2307static int mpage_process_page_bufs(struct mpage_da_data *mpd,
2308				   struct buffer_head *head,
2309				   struct buffer_head *bh,
2310				   ext4_lblk_t lblk)
2311{
2312	struct inode *inode = mpd->inode;
2313	int err;
2314	ext4_lblk_t blocks = (i_size_read(inode) + i_blocksize(inode) - 1)
2315							>> inode->i_blkbits;
2316
2317	if (ext4_verity_in_progress(inode))
2318		blocks = EXT_MAX_BLOCKS;
2319
2320	do {
2321		BUG_ON(buffer_locked(bh));
2322
2323		if (lblk >= blocks || !mpage_add_bh_to_extent(mpd, lblk, bh)) {
2324			/* Found extent to map? */
2325			if (mpd->map.m_len)
2326				return 0;
2327			/* Buffer needs mapping and handle is not started? */
2328			if (!mpd->do_map)
2329				return 0;
2330			/* Everything mapped so far and we hit EOF */
2331			break;
2332		}
2333	} while (lblk++, (bh = bh->b_this_page) != head);
2334	/* So far everything mapped? Submit the page for IO. */
2335	if (mpd->map.m_len == 0) {
2336		err = mpage_submit_page(mpd, head->b_page);
2337		if (err < 0)
2338			return err;
2339	}
2340	return lblk < blocks;
2341}
2342
2343/*
2344 * mpage_map_buffers - update buffers corresponding to changed extent and
2345 *		       submit fully mapped pages for IO
2346 *
2347 * @mpd - description of extent to map, on return next extent to map
2348 *
2349 * Scan buffers corresponding to changed extent (we expect corresponding pages
2350 * to be already locked) and update buffer state according to new extent state.
2351 * We map delalloc buffers to their physical location, clear unwritten bits,
2352 * and mark buffers as uninit when we perform writes to unwritten extents
2353 * and do extent conversion after IO is finished. If the last page is not fully
2354 * mapped, we update @map to the next extent in the last page that needs
2355 * mapping. Otherwise we submit the page for IO.
2356 */
2357static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
2358{
2359	struct pagevec pvec;
2360	int nr_pages, i;
2361	struct inode *inode = mpd->inode;
2362	struct buffer_head *head, *bh;
2363	int bpp_bits = PAGE_SHIFT - inode->i_blkbits;
2364	pgoff_t start, end;
2365	ext4_lblk_t lblk;
2366	sector_t pblock;
2367	int err;
2368
2369	start = mpd->map.m_lblk >> bpp_bits;
2370	end = (mpd->map.m_lblk + mpd->map.m_len - 1) >> bpp_bits;
2371	lblk = start << bpp_bits;
2372	pblock = mpd->map.m_pblk;
2373
2374	pagevec_init(&pvec);
2375	while (start <= end) {
2376		nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping,
2377						&start, end);
2378		if (nr_pages == 0)
2379			break;
2380		for (i = 0; i < nr_pages; i++) {
2381			struct page *page = pvec.pages[i];
2382
2383			bh = head = page_buffers(page);
2384			do {
2385				if (lblk < mpd->map.m_lblk)
2386					continue;
2387				if (lblk >= mpd->map.m_lblk + mpd->map.m_len) {
2388					/*
2389					 * Buffer after end of mapped extent.
2390					 * Find next buffer in the page to map.
2391					 */
2392					mpd->map.m_len = 0;
2393					mpd->map.m_flags = 0;
2394					/*
2395					 * FIXME: If dioread_nolock supports
2396					 * blocksize < pagesize, we need to make
2397					 * sure we add size mapped so far to
2398					 * io_end->size as the following call
2399					 * can submit the page for IO.
2400					 */
2401					err = mpage_process_page_bufs(mpd, head,
2402								      bh, lblk);
2403					pagevec_release(&pvec);
2404					if (err > 0)
2405						err = 0;
2406					return err;
2407				}
2408				if (buffer_delay(bh)) {
2409					clear_buffer_delay(bh);
2410					bh->b_blocknr = pblock++;
2411				}
2412				clear_buffer_unwritten(bh);
2413			} while (lblk++, (bh = bh->b_this_page) != head);
2414
2415			/*
2416			 * FIXME: This is going to break if dioread_nolock
2417			 * supports blocksize < pagesize as we will try to
2418			 * convert potentially unmapped parts of inode.
2419			 */
2420			mpd->io_submit.io_end->size += PAGE_SIZE;
2421			/* Page fully mapped - let IO run! */
2422			err = mpage_submit_page(mpd, page);
2423			if (err < 0) {
2424				pagevec_release(&pvec);
2425				return err;
2426			}
2427		}
2428		pagevec_release(&pvec);
2429	}
2430	/* Extent fully mapped and matches with page boundary. We are done. */
2431	mpd->map.m_len = 0;
2432	mpd->map.m_flags = 0;
2433	return 0;
2434}
2435
2436static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd)
2437{
2438	struct inode *inode = mpd->inode;
2439	struct ext4_map_blocks *map = &mpd->map;
2440	int get_blocks_flags;
2441	int err, dioread_nolock;
2442
2443	trace_ext4_da_write_pages_extent(inode, map);
2444	/*
2445	 * Call ext4_map_blocks() to allocate any delayed allocation blocks, or
2446	 * to convert an unwritten extent to be initialized (in the case
2447	 * where we have written into one or more preallocated blocks).  It is
2448	 * possible that we're going to need more metadata blocks than
2449	 * previously reserved. However we must not fail because we're in
2450	 * writeback and there is nothing we can do about it so it might result
2451	 * in data loss.  So use reserved blocks to allocate metadata if
2452	 * possible.
2453	 *
2454	 * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE if
2455	 * the blocks in question are delalloc blocks.  This indicates
2456	 * that the blocks and quotas has already been checked when
2457	 * the data was copied into the page cache.
2458	 */
2459	get_blocks_flags = EXT4_GET_BLOCKS_CREATE |
2460			   EXT4_GET_BLOCKS_METADATA_NOFAIL |
2461			   EXT4_GET_BLOCKS_IO_SUBMIT;
2462	dioread_nolock = ext4_should_dioread_nolock(inode);
2463	if (dioread_nolock)
2464		get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
2465	if (map->m_flags & (1 << BH_Delay))
2466		get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
2467
2468	err = ext4_map_blocks(handle, inode, map, get_blocks_flags);
2469	if (err < 0)
2470		return err;
2471	if (dioread_nolock && (map->m_flags & EXT4_MAP_UNWRITTEN)) {
2472		if (!mpd->io_submit.io_end->handle &&
2473		    ext4_handle_valid(handle)) {
2474			mpd->io_submit.io_end->handle = handle->h_rsv_handle;
2475			handle->h_rsv_handle = NULL;
2476		}
2477		ext4_set_io_unwritten_flag(inode, mpd->io_submit.io_end);
2478	}
2479
2480	BUG_ON(map->m_len == 0);
2481	return 0;
2482}
2483
2484/*
2485 * mpage_map_and_submit_extent - map extent starting at mpd->lblk of length
2486 *				 mpd->len and submit pages underlying it for IO
2487 *
2488 * @handle - handle for journal operations
2489 * @mpd - extent to map
2490 * @give_up_on_write - we set this to true iff there is a fatal error and there
2491 *                     is no hope of writing the data. The caller should discard
2492 *                     dirty pages to avoid infinite loops.
2493 *
2494 * The function maps extent starting at mpd->lblk of length mpd->len. If it is
2495 * delayed, blocks are allocated, if it is unwritten, we may need to convert
2496 * them to initialized or split the described range from larger unwritten
2497 * extent. Note that we need not map all the described range since allocation
2498 * can return less blocks or the range is covered by more unwritten extents. We
2499 * cannot map more because we are limited by reserved transaction credits. On
2500 * the other hand we always make sure that the last touched page is fully
2501 * mapped so that it can be written out (and thus forward progress is
2502 * guaranteed). After mapping we submit all mapped pages for IO.
2503 */
2504static int mpage_map_and_submit_extent(handle_t *handle,
2505				       struct mpage_da_data *mpd,
2506				       bool *give_up_on_write)
2507{
2508	struct inode *inode = mpd->inode;
2509	struct ext4_map_blocks *map = &mpd->map;
2510	int err;
2511	loff_t disksize;
2512	int progress = 0;
2513
2514	mpd->io_submit.io_end->offset =
2515				((loff_t)map->m_lblk) << inode->i_blkbits;
2516	do {
2517		err = mpage_map_one_extent(handle, mpd);
2518		if (err < 0) {
2519			struct super_block *sb = inode->i_sb;
2520
2521			if (ext4_forced_shutdown(EXT4_SB(sb)) ||
2522			    EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)
2523				goto invalidate_dirty_pages;
2524			/*
2525			 * Let the uper layers retry transient errors.
2526			 * In the case of ENOSPC, if ext4_count_free_blocks()
2527			 * is non-zero, a commit should free up blocks.
2528			 */
2529			if ((err == -ENOMEM) ||
2530			    (err == -ENOSPC && ext4_count_free_clusters(sb))) {
2531				if (progress)
2532					goto update_disksize;
2533				return err;
2534			}
2535			ext4_msg(sb, KERN_CRIT,
2536				 "Delayed block allocation failed for "
2537				 "inode %lu at logical offset %llu with"
2538				 " max blocks %u with error %d",
2539				 inode->i_ino,
2540				 (unsigned long long)map->m_lblk,
2541				 (unsigned)map->m_len, -err);
2542			ext4_msg(sb, KERN_CRIT,
2543				 "This should not happen!! Data will "
2544				 "be lost\n");
2545			if (err == -ENOSPC)
2546				ext4_print_free_blocks(inode);
2547		invalidate_dirty_pages:
2548			*give_up_on_write = true;
2549			return err;
2550		}
2551		progress = 1;
2552		/*
2553		 * Update buffer state, submit mapped pages, and get us new
2554		 * extent to map
2555		 */
2556		err = mpage_map_and_submit_buffers(mpd);
2557		if (err < 0)
2558			goto update_disksize;
2559	} while (map->m_len);
2560
2561update_disksize:
2562	/*
2563	 * Update on-disk size after IO is submitted.  Races with
2564	 * truncate are avoided by checking i_size under i_data_sem.
2565	 */
2566	disksize = ((loff_t)mpd->first_page) << PAGE_SHIFT;
2567	if (disksize > EXT4_I(inode)->i_disksize) {
2568		int err2;
2569		loff_t i_size;
2570
2571		down_write(&EXT4_I(inode)->i_data_sem);
2572		i_size = i_size_read(inode);
2573		if (disksize > i_size)
2574			disksize = i_size;
2575		if (disksize > EXT4_I(inode)->i_disksize)
2576			EXT4_I(inode)->i_disksize = disksize;
2577		up_write(&EXT4_I(inode)->i_data_sem);
2578		err2 = ext4_mark_inode_dirty(handle, inode);
2579		if (err2)
2580			ext4_error(inode->i_sb,
2581				   "Failed to mark inode %lu dirty",
2582				   inode->i_ino);
2583		if (!err)
2584			err = err2;
2585	}
2586	return err;
2587}
2588
2589/*
2590 * Calculate the total number of credits to reserve for one writepages
2591 * iteration. This is called from ext4_writepages(). We map an extent of
2592 * up to MAX_WRITEPAGES_EXTENT_LEN blocks and then we go on and finish mapping
2593 * the last partial page. So in total we can map MAX_WRITEPAGES_EXTENT_LEN +
2594 * bpp - 1 blocks in bpp different extents.
2595 */
2596static int ext4_da_writepages_trans_blocks(struct inode *inode)
2597{
2598	int bpp = ext4_journal_blocks_per_page(inode);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2599
2600	return ext4_meta_trans_blocks(inode,
2601				MAX_WRITEPAGES_EXTENT_LEN + bpp - 1, bpp);
2602}
2603
2604/*
2605 * mpage_prepare_extent_to_map - find & lock contiguous range of dirty pages
2606 * 				 and underlying extent to map
2607 *
2608 * @mpd - where to look for pages
2609 *
2610 * Walk dirty pages in the mapping. If they are fully mapped, submit them for
2611 * IO immediately. When we find a page which isn't mapped we start accumulating
2612 * extent of buffers underlying these pages that needs mapping (formed by
2613 * either delayed or unwritten buffers). We also lock the pages containing
2614 * these buffers. The extent found is returned in @mpd structure (starting at
2615 * mpd->lblk with length mpd->len blocks).
2616 *
2617 * Note that this function can attach bios to one io_end structure which are
2618 * neither logically nor physically contiguous. Although it may seem as an
2619 * unnecessary complication, it is actually inevitable in blocksize < pagesize
2620 * case as we need to track IO to all buffers underlying a page in one io_end.
2621 */
2622static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
2623{
2624	struct address_space *mapping = mpd->inode->i_mapping;
2625	struct pagevec pvec;
2626	unsigned int nr_pages;
2627	long left = mpd->wbc->nr_to_write;
2628	pgoff_t index = mpd->first_page;
2629	pgoff_t end = mpd->last_page;
2630	xa_mark_t tag;
2631	int i, err = 0;
2632	int blkbits = mpd->inode->i_blkbits;
2633	ext4_lblk_t lblk;
2634	struct buffer_head *head;
2635
2636	if (mpd->wbc->sync_mode == WB_SYNC_ALL || mpd->wbc->tagged_writepages)
2637		tag = PAGECACHE_TAG_TOWRITE;
2638	else
2639		tag = PAGECACHE_TAG_DIRTY;
2640
2641	pagevec_init(&pvec);
2642	mpd->map.m_len = 0;
2643	mpd->next_page = index;
2644	while (index <= end) {
2645		nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
2646				tag);
2647		if (nr_pages == 0)
2648			goto out;
2649
2650		for (i = 0; i < nr_pages; i++) {
2651			struct page *page = pvec.pages[i];
2652
2653			/*
2654			 * Accumulated enough dirty pages? This doesn't apply
2655			 * to WB_SYNC_ALL mode. For integrity sync we have to
2656			 * keep going because someone may be concurrently
2657			 * dirtying pages, and we might have synced a lot of
2658			 * newly appeared dirty pages, but have not synced all
2659			 * of the old dirty pages.
2660			 */
2661			if (mpd->wbc->sync_mode == WB_SYNC_NONE && left <= 0)
2662				goto out;
2663
2664			/* If we can't merge this page, we are done. */
2665			if (mpd->map.m_len > 0 && mpd->next_page != page->index)
2666				goto out;
 
 
 
 
 
 
 
 
2667
2668			lock_page(page);
 
2669			/*
2670			 * If the page is no longer dirty, or its mapping no
2671			 * longer corresponds to inode we are writing (which
2672			 * means it has been truncated or invalidated), or the
2673			 * page is already under writeback and we are not doing
2674			 * a data integrity writeback, skip the page
 
2675			 */
2676			if (!PageDirty(page) ||
2677			    (PageWriteback(page) &&
2678			     (mpd->wbc->sync_mode == WB_SYNC_NONE)) ||
2679			    unlikely(page->mapping != mapping)) {
2680				unlock_page(page);
2681				continue;
2682			}
2683
2684			wait_on_page_writeback(page);
2685			BUG_ON(PageWriteback(page));
2686
2687			if (mpd->map.m_len == 0)
2688				mpd->first_page = page->index;
2689			mpd->next_page = page->index + 1;
2690			/* Add all dirty buffers to mpd */
2691			lblk = ((ext4_lblk_t)page->index) <<
2692				(PAGE_SHIFT - blkbits);
2693			head = page_buffers(page);
2694			err = mpage_process_page_bufs(mpd, head, head, lblk);
2695			if (err <= 0)
2696				goto out;
2697			err = 0;
2698			left--;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2699		}
2700		pagevec_release(&pvec);
2701		cond_resched();
2702	}
2703	return 0;
 
 
2704out:
2705	pagevec_release(&pvec);
2706	return err;
 
2707}
2708
2709static int ext4_writepages(struct address_space *mapping,
2710			   struct writeback_control *wbc)
 
2711{
2712	pgoff_t	writeback_index = 0;
2713	long nr_to_write = wbc->nr_to_write;
2714	int range_whole = 0;
2715	int cycled = 1;
2716	handle_t *handle = NULL;
2717	struct mpage_da_data mpd;
2718	struct inode *inode = mapping->host;
2719	int needed_blocks, rsv_blocks = 0, ret = 0;
 
 
 
 
 
2720	struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
2721	bool done;
 
2722	struct blk_plug plug;
2723	bool give_up_on_write = false;
2724
2725	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
2726		return -EIO;
2727
2728	percpu_down_read(&sbi->s_journal_flag_rwsem);
2729	trace_ext4_writepages(inode, wbc);
2730
2731	/*
2732	 * No pages to write? This is mainly a kludge to avoid starting
2733	 * a transaction for special inodes like journal inode on last iput()
2734	 * because that could violate lock ordering on umount
2735	 */
2736	if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
2737		goto out_writepages;
2738
2739	if (ext4_should_journal_data(inode)) {
2740		ret = generic_writepages(mapping, wbc);
2741		goto out_writepages;
2742	}
2743
2744	/*
2745	 * If the filesystem has aborted, it is read-only, so return
2746	 * right away instead of dumping stack traces later on that
2747	 * will obscure the real source of the problem.  We test
2748	 * EXT4_MF_FS_ABORTED instead of sb->s_flag's SB_RDONLY because
2749	 * the latter could be true if the filesystem is mounted
2750	 * read-only, and in that case, ext4_writepages should
2751	 * *never* be called, so if that ever happens, we would want
2752	 * the stack trace.
2753	 */
2754	if (unlikely(ext4_forced_shutdown(EXT4_SB(mapping->host->i_sb)) ||
2755		     sbi->s_mount_flags & EXT4_MF_FS_ABORTED)) {
2756		ret = -EROFS;
2757		goto out_writepages;
2758	}
2759
2760	/*
2761	 * If we have inline data and arrive here, it means that
2762	 * we will soon create the block for the 1st page, so
2763	 * we'd better clear the inline data here.
2764	 */
2765	if (ext4_has_inline_data(inode)) {
2766		/* Just inode will be modified... */
2767		handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
2768		if (IS_ERR(handle)) {
2769			ret = PTR_ERR(handle);
2770			goto out_writepages;
2771		}
2772		BUG_ON(ext4_test_inode_state(inode,
2773				EXT4_STATE_MAY_INLINE_DATA));
2774		ext4_destroy_inline_data(handle, inode);
2775		ext4_journal_stop(handle);
2776	}
2777
2778	if (ext4_should_dioread_nolock(inode)) {
2779		/*
2780		 * We may need to convert up to one extent per block in
2781		 * the page and we may dirty the inode.
2782		 */
2783		rsv_blocks = 1 + ext4_chunk_trans_blocks(inode,
2784						PAGE_SIZE >> inode->i_blkbits);
2785	}
2786
2787	if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2788		range_whole = 1;
2789
 
2790	if (wbc->range_cyclic) {
2791		writeback_index = mapping->writeback_index;
2792		if (writeback_index)
2793			cycled = 0;
2794		mpd.first_page = writeback_index;
2795		mpd.last_page = -1;
 
 
2796	} else {
2797		mpd.first_page = wbc->range_start >> PAGE_SHIFT;
2798		mpd.last_page = wbc->range_end >> PAGE_SHIFT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2799	}
2800
2801	mpd.inode = inode;
2802	mpd.wbc = wbc;
2803	ext4_io_submit_init(&mpd.io_submit, wbc);
2804retry:
2805	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2806		tag_pages_for_writeback(mapping, mpd.first_page, mpd.last_page);
2807	done = false;
2808	blk_start_plug(&plug);
2809
2810	/*
2811	 * First writeback pages that don't need mapping - we can avoid
2812	 * starting a transaction unnecessarily and also avoid being blocked
2813	 * in the block layer on device congestion while having transaction
2814	 * started.
2815	 */
2816	mpd.do_map = 0;
2817	mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
2818	if (!mpd.io_submit.io_end) {
2819		ret = -ENOMEM;
2820		goto unplug;
2821	}
2822	ret = mpage_prepare_extent_to_map(&mpd);
2823	/* Unlock pages we didn't use */
2824	mpage_release_unused_pages(&mpd, false);
2825	/* Submit prepared bio */
2826	ext4_io_submit(&mpd.io_submit);
2827	ext4_put_io_end_defer(mpd.io_submit.io_end);
2828	mpd.io_submit.io_end = NULL;
2829	if (ret < 0)
2830		goto unplug;
2831
2832	while (!done && mpd.first_page <= mpd.last_page) {
2833		/* For each extent of pages we use new io_end */
2834		mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
2835		if (!mpd.io_submit.io_end) {
2836			ret = -ENOMEM;
2837			break;
2838		}
2839
2840		/*
2841		 * We have two constraints: We find one extent to map and we
2842		 * must always write out whole page (makes a difference when
2843		 * blocksize < pagesize) so that we don't block on IO when we
2844		 * try to write out the rest of the page. Journalled mode is
2845		 * not supported by delalloc.
2846		 */
2847		BUG_ON(ext4_should_journal_data(inode));
2848		needed_blocks = ext4_da_writepages_trans_blocks(inode);
2849
2850		/* start a new transaction */
2851		handle = ext4_journal_start_with_reserve(inode,
2852				EXT4_HT_WRITE_PAGE, needed_blocks, rsv_blocks);
2853		if (IS_ERR(handle)) {
2854			ret = PTR_ERR(handle);
2855			ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
2856			       "%ld pages, ino %lu; err %d", __func__,
2857				wbc->nr_to_write, inode->i_ino, ret);
2858			/* Release allocated io_end */
2859			ext4_put_io_end(mpd.io_submit.io_end);
2860			mpd.io_submit.io_end = NULL;
2861			break;
2862		}
2863		mpd.do_map = 1;
2864
2865		trace_ext4_da_write_pages(inode, mpd.first_page, mpd.wbc);
2866		ret = mpage_prepare_extent_to_map(&mpd);
2867		if (!ret) {
2868			if (mpd.map.m_len)
2869				ret = mpage_map_and_submit_extent(handle, &mpd,
2870					&give_up_on_write);
2871			else {
2872				/*
2873				 * We scanned the whole range (or exhausted
2874				 * nr_to_write), submitted what was mapped and
2875				 * didn't find anything needing mapping. We are
2876				 * done.
2877				 */
2878				done = true;
2879			}
2880		}
2881		/*
2882		 * Caution: If the handle is synchronous,
2883		 * ext4_journal_stop() can wait for transaction commit
2884		 * to finish which may depend on writeback of pages to
2885		 * complete or on page lock to be released.  In that
2886		 * case, we have to wait until after after we have
2887		 * submitted all the IO, released page locks we hold,
2888		 * and dropped io_end reference (for extent conversion
2889		 * to be able to complete) before stopping the handle.
2890		 */
2891		if (!ext4_handle_valid(handle) || handle->h_sync == 0) {
2892			ext4_journal_stop(handle);
2893			handle = NULL;
2894			mpd.do_map = 0;
2895		}
2896		/* Unlock pages we didn't use */
2897		mpage_release_unused_pages(&mpd, give_up_on_write);
2898		/* Submit prepared bio */
2899		ext4_io_submit(&mpd.io_submit);
2900
2901		/*
2902		 * Drop our io_end reference we got from init. We have
2903		 * to be careful and use deferred io_end finishing if
2904		 * we are still holding the transaction as we can
2905		 * release the last reference to io_end which may end
2906		 * up doing unwritten extent conversion.
2907		 */
2908		if (handle) {
2909			ext4_put_io_end_defer(mpd.io_submit.io_end);
2910			ext4_journal_stop(handle);
2911		} else
2912			ext4_put_io_end(mpd.io_submit.io_end);
2913		mpd.io_submit.io_end = NULL;
2914
2915		if (ret == -ENOSPC && sbi->s_journal) {
2916			/*
2917			 * Commit the transaction which would
2918			 * free blocks released in the transaction
2919			 * and try again
2920			 */
2921			jbd2_journal_force_commit_nested(sbi->s_journal);
2922			ret = 0;
2923			continue;
2924		}
2925		/* Fatal error - ENOMEM, EIO... */
2926		if (ret)
 
 
 
 
 
 
 
 
 
 
 
2927			break;
2928	}
2929unplug:
2930	blk_finish_plug(&plug);
2931	if (!ret && !cycled && wbc->nr_to_write > 0) {
2932		cycled = 1;
2933		mpd.last_page = writeback_index - 1;
2934		mpd.first_page = 0;
 
2935		goto retry;
2936	}
2937
2938	/* Update index */
 
2939	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2940		/*
2941		 * Set the writeback_index so that range_cyclic
2942		 * mode will write it back later
2943		 */
2944		mapping->writeback_index = mpd.first_page;
2945
2946out_writepages:
2947	trace_ext4_writepages_result(inode, wbc, ret,
2948				     nr_to_write - wbc->nr_to_write);
2949	percpu_up_read(&sbi->s_journal_flag_rwsem);
2950	return ret;
2951}
2952
2953static int ext4_dax_writepages(struct address_space *mapping,
2954			       struct writeback_control *wbc)
2955{
2956	int ret;
2957	long nr_to_write = wbc->nr_to_write;
2958	struct inode *inode = mapping->host;
2959	struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
2960
2961	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
2962		return -EIO;
2963
2964	percpu_down_read(&sbi->s_journal_flag_rwsem);
2965	trace_ext4_writepages(inode, wbc);
2966
2967	ret = dax_writeback_mapping_range(mapping, inode->i_sb->s_bdev, wbc);
2968	trace_ext4_writepages_result(inode, wbc, ret,
2969				     nr_to_write - wbc->nr_to_write);
2970	percpu_up_read(&sbi->s_journal_flag_rwsem);
2971	return ret;
2972}
2973
 
2974static int ext4_nonda_switch(struct super_block *sb)
2975{
2976	s64 free_clusters, dirty_clusters;
2977	struct ext4_sb_info *sbi = EXT4_SB(sb);
2978
2979	/*
2980	 * switch to non delalloc mode if we are running low
2981	 * on free block. The free block accounting via percpu
2982	 * counters can get slightly wrong with percpu_counter_batch getting
2983	 * accumulated on each CPU without updating global counters
2984	 * Delalloc need an accurate free block accounting. So switch
2985	 * to non delalloc when we are near to error range.
2986	 */
2987	free_clusters =
2988		percpu_counter_read_positive(&sbi->s_freeclusters_counter);
2989	dirty_clusters =
2990		percpu_counter_read_positive(&sbi->s_dirtyclusters_counter);
2991	/*
2992	 * Start pushing delalloc when 1/2 of free blocks are dirty.
2993	 */
2994	if (dirty_clusters && (free_clusters < 2 * dirty_clusters))
2995		try_to_writeback_inodes_sb(sb, WB_REASON_FS_FREE_SPACE);
2996
2997	if (2 * free_clusters < 3 * dirty_clusters ||
2998	    free_clusters < (dirty_clusters + EXT4_FREECLUSTERS_WATERMARK)) {
2999		/*
3000		 * free block count is less than 150% of dirty blocks
3001		 * or free blocks is less than watermark
3002		 */
3003		return 1;
3004	}
 
 
 
 
 
 
 
3005	return 0;
3006}
3007
3008/* We always reserve for an inode update; the superblock could be there too */
3009static int ext4_da_write_credits(struct inode *inode, loff_t pos, unsigned len)
3010{
3011	if (likely(ext4_has_feature_large_file(inode->i_sb)))
3012		return 1;
3013
3014	if (pos + len <= 0x7fffffffULL)
3015		return 1;
3016
3017	/* We might need to update the superblock to set LARGE_FILE */
3018	return 2;
3019}
3020
3021static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
3022			       loff_t pos, unsigned len, unsigned flags,
3023			       struct page **pagep, void **fsdata)
3024{
3025	int ret, retries = 0;
3026	struct page *page;
3027	pgoff_t index;
3028	struct inode *inode = mapping->host;
3029	handle_t *handle;
3030
3031	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
3032		return -EIO;
3033
3034	index = pos >> PAGE_SHIFT;
3035
3036	if (ext4_nonda_switch(inode->i_sb) || S_ISLNK(inode->i_mode) ||
3037	    ext4_verity_in_progress(inode)) {
3038		*fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
3039		return ext4_write_begin(file, mapping, pos,
3040					len, flags, pagep, fsdata);
3041	}
3042	*fsdata = (void *)0;
3043	trace_ext4_da_write_begin(inode, pos, len, flags);
3044
3045	if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
3046		ret = ext4_da_write_inline_data_begin(mapping, inode,
3047						      pos, len, flags,
3048						      pagep, fsdata);
3049		if (ret < 0)
3050			return ret;
3051		if (ret == 1)
3052			return 0;
3053	}
3054
3055	/*
3056	 * grab_cache_page_write_begin() can take a long time if the
3057	 * system is thrashing due to memory pressure, or if the page
3058	 * is being written back.  So grab it first before we start
3059	 * the transaction handle.  This also allows us to allocate
3060	 * the page (if needed) without using GFP_NOFS.
3061	 */
3062retry_grab:
3063	page = grab_cache_page_write_begin(mapping, index, flags);
3064	if (!page)
3065		return -ENOMEM;
3066	unlock_page(page);
3067
3068	/*
3069	 * With delayed allocation, we don't log the i_disksize update
3070	 * if there is delayed block allocation. But we still need
3071	 * to journalling the i_disksize update if writes to the end
3072	 * of file which has an already mapped buffer.
3073	 */
3074retry_journal:
3075	handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
3076				ext4_da_write_credits(inode, pos, len));
3077	if (IS_ERR(handle)) {
3078		put_page(page);
3079		return PTR_ERR(handle);
3080	}
 
 
 
3081
3082	lock_page(page);
3083	if (page->mapping != mapping) {
3084		/* The page got truncated from under us */
3085		unlock_page(page);
3086		put_page(page);
3087		ext4_journal_stop(handle);
3088		goto retry_grab;
 
3089	}
3090	/* In case writeback began while the page was unlocked */
3091	wait_for_stable_page(page);
3092
3093#ifdef CONFIG_FS_ENCRYPTION
3094	ret = ext4_block_write_begin(page, pos, len,
3095				     ext4_da_get_block_prep);
3096#else
3097	ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep);
3098#endif
3099	if (ret < 0) {
3100		unlock_page(page);
3101		ext4_journal_stop(handle);
 
3102		/*
3103		 * block_write_begin may have instantiated a few blocks
3104		 * outside i_size.  Trim these off again. Don't need
3105		 * i_size_read because we hold i_mutex.
3106		 */
3107		if (pos + len > inode->i_size)
3108			ext4_truncate_failed_write(inode);
3109
3110		if (ret == -ENOSPC &&
3111		    ext4_should_retry_alloc(inode->i_sb, &retries))
3112			goto retry_journal;
3113
3114		put_page(page);
3115		return ret;
3116	}
3117
3118	*pagep = page;
 
 
3119	return ret;
3120}
3121
3122/*
3123 * Check if we should update i_disksize
3124 * when write to the end of file but not require block allocation
3125 */
3126static int ext4_da_should_update_i_disksize(struct page *page,
3127					    unsigned long offset)
3128{
3129	struct buffer_head *bh;
3130	struct inode *inode = page->mapping->host;
3131	unsigned int idx;
3132	int i;
3133
3134	bh = page_buffers(page);
3135	idx = offset >> inode->i_blkbits;
3136
3137	for (i = 0; i < idx; i++)
3138		bh = bh->b_this_page;
3139
3140	if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh))
3141		return 0;
3142	return 1;
3143}
3144
3145static int ext4_da_write_end(struct file *file,
3146			     struct address_space *mapping,
3147			     loff_t pos, unsigned len, unsigned copied,
3148			     struct page *page, void *fsdata)
3149{
3150	struct inode *inode = mapping->host;
3151	int ret = 0, ret2;
3152	handle_t *handle = ext4_journal_current_handle();
3153	loff_t new_i_size;
3154	unsigned long start, end;
3155	int write_mode = (int)(unsigned long)fsdata;
3156
3157	if (write_mode == FALL_BACK_TO_NONDELALLOC)
3158		return ext4_write_end(file, mapping, pos,
3159				      len, copied, page, fsdata);
 
 
 
 
 
 
 
 
 
3160
3161	trace_ext4_da_write_end(inode, pos, len, copied);
3162	start = pos & (PAGE_SIZE - 1);
3163	end = start + copied - 1;
3164
3165	/*
3166	 * generic_write_end() will run mark_inode_dirty() if i_size
3167	 * changes.  So let's piggyback the i_disksize mark_inode_dirty
3168	 * into that.
3169	 */
 
3170	new_i_size = pos + copied;
3171	if (copied && new_i_size > EXT4_I(inode)->i_disksize) {
3172		if (ext4_has_inline_data(inode) ||
3173		    ext4_da_should_update_i_disksize(page, end)) {
3174			ext4_update_i_disksize(inode, new_i_size);
 
 
 
 
 
 
 
 
 
 
 
3175			/* We need to mark inode dirty even if
3176			 * new_i_size is less that inode->i_size
3177			 * bu greater than i_disksize.(hint delalloc)
3178			 */
3179			ext4_mark_inode_dirty(handle, inode);
3180		}
3181	}
3182
3183	if (write_mode != CONVERT_INLINE_DATA &&
3184	    ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) &&
3185	    ext4_has_inline_data(inode))
3186		ret2 = ext4_da_write_inline_data_end(inode, pos, len, copied,
3187						     page);
3188	else
3189		ret2 = generic_write_end(file, mapping, pos, len, copied,
3190							page, fsdata);
3191
3192	copied = ret2;
3193	if (ret2 < 0)
3194		ret = ret2;
3195	ret2 = ext4_journal_stop(handle);
3196	if (!ret)
3197		ret = ret2;
3198
3199	return ret ? ret : copied;
3200}
3201
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3202/*
3203 * Force all delayed allocation blocks to be allocated for a given inode.
3204 */
3205int ext4_alloc_da_blocks(struct inode *inode)
3206{
3207	trace_ext4_alloc_da_blocks(inode);
3208
3209	if (!EXT4_I(inode)->i_reserved_data_blocks)
 
3210		return 0;
3211
3212	/*
3213	 * We do something simple for now.  The filemap_flush() will
3214	 * also start triggering a write of the data blocks, which is
3215	 * not strictly speaking necessary (and for users of
3216	 * laptop_mode, not even desirable).  However, to do otherwise
3217	 * would require replicating code paths in:
3218	 *
3219	 * ext4_writepages() ->
3220	 *    write_cache_pages() ---> (via passed in callback function)
3221	 *        __mpage_da_writepage() -->
3222	 *           mpage_add_bh_to_extent()
3223	 *           mpage_da_map_blocks()
3224	 *
3225	 * The problem is that write_cache_pages(), located in
3226	 * mm/page-writeback.c, marks pages clean in preparation for
3227	 * doing I/O, which is not desirable if we're not planning on
3228	 * doing I/O at all.
3229	 *
3230	 * We could call write_cache_pages(), and then redirty all of
3231	 * the pages by calling redirty_page_for_writepage() but that
3232	 * would be ugly in the extreme.  So instead we would need to
3233	 * replicate parts of the code in the above functions,
3234	 * simplifying them because we wouldn't actually intend to
3235	 * write out the pages, but rather only collect contiguous
3236	 * logical block extents, call the multi-block allocator, and
3237	 * then update the buffer heads with the block allocations.
3238	 *
3239	 * For now, though, we'll cheat by calling filemap_flush(),
3240	 * which will map the blocks, and start the I/O, but not
3241	 * actually wait for the I/O to complete.
3242	 */
3243	return filemap_flush(inode->i_mapping);
3244}
3245
3246/*
3247 * bmap() is special.  It gets used by applications such as lilo and by
3248 * the swapper to find the on-disk block of a specific piece of data.
3249 *
3250 * Naturally, this is dangerous if the block concerned is still in the
3251 * journal.  If somebody makes a swapfile on an ext4 data-journaling
3252 * filesystem and enables swap, then they may get a nasty shock when the
3253 * data getting swapped to that swapfile suddenly gets overwritten by
3254 * the original zero's written out previously to the journal and
3255 * awaiting writeback in the kernel's buffer cache.
3256 *
3257 * So, if we see any bmap calls here on a modified, data-journaled file,
3258 * take extra steps to flush any blocks which might be in the cache.
3259 */
3260static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
3261{
3262	struct inode *inode = mapping->host;
3263	journal_t *journal;
3264	int err;
3265
3266	/*
3267	 * We can get here for an inline file via the FIBMAP ioctl
3268	 */
3269	if (ext4_has_inline_data(inode))
3270		return 0;
3271
3272	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
3273			test_opt(inode->i_sb, DELALLOC)) {
3274		/*
3275		 * With delalloc we want to sync the file
3276		 * so that we can make sure we allocate
3277		 * blocks for file
3278		 */
3279		filemap_write_and_wait(mapping);
3280	}
3281
3282	if (EXT4_JOURNAL(inode) &&
3283	    ext4_test_inode_state(inode, EXT4_STATE_JDATA)) {
3284		/*
3285		 * This is a REALLY heavyweight approach, but the use of
3286		 * bmap on dirty files is expected to be extremely rare:
3287		 * only if we run lilo or swapon on a freshly made file
3288		 * do we expect this to happen.
3289		 *
3290		 * (bmap requires CAP_SYS_RAWIO so this does not
3291		 * represent an unprivileged user DOS attack --- we'd be
3292		 * in trouble if mortal users could trigger this path at
3293		 * will.)
3294		 *
3295		 * NB. EXT4_STATE_JDATA is not set on files other than
3296		 * regular files.  If somebody wants to bmap a directory
3297		 * or symlink and gets confused because the buffer
3298		 * hasn't yet been flushed to disk, they deserve
3299		 * everything they get.
3300		 */
3301
3302		ext4_clear_inode_state(inode, EXT4_STATE_JDATA);
3303		journal = EXT4_JOURNAL(inode);
3304		jbd2_journal_lock_updates(journal);
3305		err = jbd2_journal_flush(journal);
3306		jbd2_journal_unlock_updates(journal);
3307
3308		if (err)
3309			return 0;
3310	}
3311
3312	return generic_block_bmap(mapping, block, ext4_get_block);
3313}
3314
3315static int ext4_readpage(struct file *file, struct page *page)
3316{
3317	int ret = -EAGAIN;
3318	struct inode *inode = page->mapping->host;
3319
3320	trace_ext4_readpage(page);
3321
3322	if (ext4_has_inline_data(inode))
3323		ret = ext4_readpage_inline(inode, page);
3324
3325	if (ret == -EAGAIN)
3326		return ext4_mpage_readpages(page->mapping, NULL, page, 1,
3327						false);
3328
3329	return ret;
3330}
3331
3332static int
3333ext4_readpages(struct file *file, struct address_space *mapping,
3334		struct list_head *pages, unsigned nr_pages)
3335{
3336	struct inode *inode = mapping->host;
3337
3338	/* If the file has inline data, no need to do readpages. */
3339	if (ext4_has_inline_data(inode))
3340		return 0;
3341
3342	return ext4_mpage_readpages(mapping, pages, NULL, nr_pages, true);
3343}
3344
3345static void ext4_invalidatepage(struct page *page, unsigned int offset,
3346				unsigned int length)
3347{
3348	trace_ext4_invalidatepage(page, offset, length);
 
3349
3350	/* No journalling happens on data buffers when this function is used */
3351	WARN_ON(page_has_buffers(page) && buffer_jbd(page_buffers(page)));
3352
3353	block_invalidatepage(page, offset, length);
 
 
 
 
 
 
 
 
 
3354}
3355
3356static int __ext4_journalled_invalidatepage(struct page *page,
3357					    unsigned int offset,
3358					    unsigned int length)
3359{
3360	journal_t *journal = EXT4_JOURNAL(page->mapping->host);
3361
3362	trace_ext4_journalled_invalidatepage(page, offset, length);
3363
3364	/*
 
 
 
 
 
3365	 * If it's a full truncate we just forget about the pending dirtying
3366	 */
3367	if (offset == 0 && length == PAGE_SIZE)
3368		ClearPageChecked(page);
3369
3370	return jbd2_journal_invalidatepage(journal, page, offset, length);
3371}
3372
3373/* Wrapper for aops... */
3374static void ext4_journalled_invalidatepage(struct page *page,
3375					   unsigned int offset,
3376					   unsigned int length)
3377{
3378	WARN_ON(__ext4_journalled_invalidatepage(page, offset, length) < 0);
3379}
3380
3381static int ext4_releasepage(struct page *page, gfp_t wait)
3382{
3383	journal_t *journal = EXT4_JOURNAL(page->mapping->host);
3384
3385	trace_ext4_releasepage(page);
3386
3387	/* Page has dirty journalled data -> cannot release */
3388	if (PageChecked(page))
3389		return 0;
3390	if (journal)
3391		return jbd2_journal_try_to_free_buffers(journal, page, wait);
3392	else
3393		return try_to_free_buffers(page);
3394}
3395
3396static bool ext4_inode_datasync_dirty(struct inode *inode)
 
 
 
 
 
 
3397{
3398	journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
3399
3400	if (journal)
3401		return !jbd2_transaction_committed(journal,
3402					EXT4_I(inode)->i_datasync_tid);
3403	/* Any metadata buffers to write? */
3404	if (!list_empty(&inode->i_mapping->private_list))
3405		return true;
3406	return inode->i_state & I_DIRTY_DATASYNC;
3407}
3408
3409static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
3410			    unsigned flags, struct iomap *iomap)
3411{
3412	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3413	unsigned int blkbits = inode->i_blkbits;
3414	unsigned long first_block, last_block;
3415	struct ext4_map_blocks map;
3416	bool delalloc = false;
3417	int ret;
3418
3419	if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK)
3420		return -EINVAL;
3421	first_block = offset >> blkbits;
3422	last_block = min_t(loff_t, (offset + length - 1) >> blkbits,
3423			   EXT4_MAX_LOGICAL_BLOCK);
3424
3425	if (flags & IOMAP_REPORT) {
3426		if (ext4_has_inline_data(inode)) {
3427			ret = ext4_inline_data_iomap(inode, iomap);
3428			if (ret != -EAGAIN) {
3429				if (ret == 0 && offset >= iomap->length)
3430					ret = -ENOENT;
3431				return ret;
3432			}
3433		}
3434	} else {
3435		if (WARN_ON_ONCE(ext4_has_inline_data(inode)))
3436			return -ERANGE;
3437	}
3438
3439	map.m_lblk = first_block;
3440	map.m_len = last_block - first_block + 1;
 
 
3441
3442	if (flags & IOMAP_REPORT) {
3443		ret = ext4_map_blocks(NULL, inode, &map, 0);
3444		if (ret < 0)
3445			return ret;
3446
3447		if (ret == 0) {
3448			ext4_lblk_t end = map.m_lblk + map.m_len - 1;
3449			struct extent_status es;
3450
3451			ext4_es_find_extent_range(inode, &ext4_es_is_delayed,
3452						  map.m_lblk, end, &es);
3453
3454			if (!es.es_len || es.es_lblk > end) {
3455				/* entire range is a hole */
3456			} else if (es.es_lblk > map.m_lblk) {
3457				/* range starts with a hole */
3458				map.m_len = es.es_lblk - map.m_lblk;
3459			} else {
3460				ext4_lblk_t offs = 0;
3461
3462				if (es.es_lblk < map.m_lblk)
3463					offs = map.m_lblk - es.es_lblk;
3464				map.m_lblk = es.es_lblk + offs;
3465				map.m_len = es.es_len - offs;
3466				delalloc = true;
3467			}
3468		}
3469	} else if (flags & IOMAP_WRITE) {
3470		int dio_credits;
3471		handle_t *handle;
3472		int retries = 0;
3473
3474		/* Trim mapping request to maximum we can map at once for DIO */
3475		if (map.m_len > DIO_MAX_BLOCKS)
3476			map.m_len = DIO_MAX_BLOCKS;
3477		dio_credits = ext4_chunk_trans_blocks(inode, map.m_len);
3478retry:
3479		/*
3480		 * Either we allocate blocks and then we don't get unwritten
3481		 * extent so we have reserved enough credits, or the blocks
3482		 * are already allocated and unwritten and in that case
3483		 * extent conversion fits in the credits as well.
3484		 */
3485		handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
3486					    dio_credits);
3487		if (IS_ERR(handle))
3488			return PTR_ERR(handle);
3489
3490		ret = ext4_map_blocks(handle, inode, &map,
3491				      EXT4_GET_BLOCKS_CREATE_ZERO);
3492		if (ret < 0) {
3493			ext4_journal_stop(handle);
3494			if (ret == -ENOSPC &&
3495			    ext4_should_retry_alloc(inode->i_sb, &retries))
3496				goto retry;
3497			return ret;
3498		}
3499
3500		/*
3501		 * If we added blocks beyond i_size, we need to make sure they
3502		 * will get truncated if we crash before updating i_size in
3503		 * ext4_iomap_end(). For faults we don't need to do that (and
3504		 * even cannot because for orphan list operations inode_lock is
3505		 * required) - if we happen to instantiate block beyond i_size,
3506		 * it is because we race with truncate which has already added
3507		 * the inode to the orphan list.
3508		 */
3509		if (!(flags & IOMAP_FAULT) && first_block + map.m_len >
3510		    (i_size_read(inode) + (1 << blkbits) - 1) >> blkbits) {
3511			int err;
3512
3513			err = ext4_orphan_add(handle, inode);
3514			if (err < 0) {
3515				ext4_journal_stop(handle);
3516				return err;
3517			}
3518		}
3519		ext4_journal_stop(handle);
3520	} else {
3521		ret = ext4_map_blocks(NULL, inode, &map, 0);
3522		if (ret < 0)
3523			return ret;
3524	}
3525
3526	iomap->flags = 0;
3527	if (ext4_inode_datasync_dirty(inode))
3528		iomap->flags |= IOMAP_F_DIRTY;
3529	iomap->bdev = inode->i_sb->s_bdev;
3530	iomap->dax_dev = sbi->s_daxdev;
3531	iomap->offset = (u64)first_block << blkbits;
3532	iomap->length = (u64)map.m_len << blkbits;
3533
3534	if (ret == 0) {
3535		iomap->type = delalloc ? IOMAP_DELALLOC : IOMAP_HOLE;
3536		iomap->addr = IOMAP_NULL_ADDR;
3537	} else {
3538		if (map.m_flags & EXT4_MAP_MAPPED) {
3539			iomap->type = IOMAP_MAPPED;
3540		} else if (map.m_flags & EXT4_MAP_UNWRITTEN) {
3541			iomap->type = IOMAP_UNWRITTEN;
3542		} else {
3543			WARN_ON_ONCE(1);
3544			return -EIO;
3545		}
3546		iomap->addr = (u64)map.m_pblk << blkbits;
3547	}
 
3548
3549	if (map.m_flags & EXT4_MAP_NEW)
3550		iomap->flags |= IOMAP_F_NEW;
 
 
 
3551
3552	return 0;
 
3553}
3554
3555static int ext4_iomap_end(struct inode *inode, loff_t offset, loff_t length,
3556			  ssize_t written, unsigned flags, struct iomap *iomap)
3557{
3558	int ret = 0;
3559	handle_t *handle;
3560	int blkbits = inode->i_blkbits;
3561	bool truncate = false;
3562
3563	if (!(flags & IOMAP_WRITE) || (flags & IOMAP_FAULT))
3564		return 0;
3565
3566	handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
3567	if (IS_ERR(handle)) {
3568		ret = PTR_ERR(handle);
3569		goto orphan_del;
 
 
3570	}
3571	if (ext4_update_inode_size(inode, offset + written))
3572		ext4_mark_inode_dirty(handle, inode);
3573	/*
3574	 * We may need to truncate allocated but not written blocks beyond EOF.
 
3575	 */
3576	if (iomap->offset + iomap->length > 
3577	    ALIGN(inode->i_size, 1 << blkbits)) {
3578		ext4_lblk_t written_blk, end_blk;
3579
3580		written_blk = (offset + written) >> blkbits;
3581		end_blk = (offset + length) >> blkbits;
3582		if (written_blk < end_blk && ext4_can_truncate(inode))
3583			truncate = true;
3584	}
3585	/*
3586	 * Remove inode from orphan list if we were extending a inode and
3587	 * everything went fine.
3588	 */
3589	if (!truncate && inode->i_nlink &&
3590	    !list_empty(&EXT4_I(inode)->i_orphan))
3591		ext4_orphan_del(handle, inode);
3592	ext4_journal_stop(handle);
3593	if (truncate) {
3594		ext4_truncate_failed_write(inode);
3595orphan_del:
3596		/*
3597		 * If truncate failed early the inode might still be on the
3598		 * orphan list; we need to make sure the inode is removed from
3599		 * the orphan list in that case.
3600		 */
3601		if (inode->i_nlink)
3602			ext4_orphan_del(NULL, inode);
3603	}
3604	return ret;
3605}
3606
3607const struct iomap_ops ext4_iomap_ops = {
3608	.iomap_begin		= ext4_iomap_begin,
3609	.iomap_end		= ext4_iomap_end,
3610};
 
 
3611
3612static int ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
3613			    ssize_t size, void *private)
3614{
3615        ext4_io_end_t *io_end = private;
3616
3617	/* if not async direct IO just return */
3618	if (!io_end)
3619		return 0;
3620
3621	ext_debug("ext4_end_io_dio(): io_end 0x%p "
3622		  "for inode %lu, iocb 0x%p, offset %llu, size %zd\n",
3623		  io_end, io_end->inode->i_ino, iocb, offset, size);
3624
3625	/*
3626	 * Error during AIO DIO. We cannot convert unwritten extents as the
3627	 * data was not written. Just clear the unwritten flag and drop io_end.
3628	 */
3629	if (size <= 0) {
3630		ext4_clear_io_unwritten_flag(io_end);
3631		size = 0;
3632	}
3633	io_end->offset = offset;
3634	io_end->size = size;
3635	ext4_put_io_end(io_end);
 
 
 
 
 
 
3636
 
 
3637	return 0;
3638}
3639
3640/*
3641 * Handling of direct IO writes.
3642 *
3643 * For ext4 extent files, ext4 will do direct-io write even to holes,
3644 * preallocated extents, and those write extend the file, no need to
3645 * fall back to buffered IO.
3646 *
3647 * For holes, we fallocate those blocks, mark them as unwritten
3648 * If those blocks were preallocated, we mark sure they are split, but
3649 * still keep the range to write as unwritten.
3650 *
3651 * The unwritten extents will be converted to written when DIO is completed.
3652 * For async direct IO, since the IO may still pending when return, we
3653 * set up an end_io call back function, which will do the conversion
3654 * when async direct IO completed.
3655 *
3656 * If the O_DIRECT write will extend the file then add this inode to the
3657 * orphan list.  So recovery will truncate it back to the original size
3658 * if the machine crashes during the write.
3659 *
3660 */
3661static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter)
 
 
3662{
3663	struct file *file = iocb->ki_filp;
3664	struct inode *inode = file->f_mapping->host;
3665	struct ext4_inode_info *ei = EXT4_I(inode);
3666	ssize_t ret;
3667	loff_t offset = iocb->ki_pos;
3668	size_t count = iov_iter_count(iter);
3669	int overwrite = 0;
3670	get_block_t *get_block_func = NULL;
3671	int dio_flags = 0;
3672	loff_t final_size = offset + count;
3673	int orphan = 0;
3674	handle_t *handle;
3675
3676	if (final_size > inode->i_size || final_size > ei->i_disksize) {
3677		/* Credits for sb + inode write */
3678		handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
3679		if (IS_ERR(handle)) {
3680			ret = PTR_ERR(handle);
3681			goto out;
3682		}
3683		ret = ext4_orphan_add(handle, inode);
3684		if (ret) {
3685			ext4_journal_stop(handle);
3686			goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3687		}
3688		orphan = 1;
3689		ext4_update_i_disksize(inode, inode->i_size);
3690		ext4_journal_stop(handle);
3691	}
3692
3693	BUG_ON(iocb->private == NULL);
3694
3695	/*
3696	 * Make all waiters for direct IO properly wait also for extent
3697	 * conversion. This also disallows race between truncate() and
3698	 * overwrite DIO as i_dio_count needs to be incremented under i_mutex.
3699	 */
3700	inode_dio_begin(inode);
3701
3702	/* If we do a overwrite dio, i_mutex locking can be released */
3703	overwrite = *((int *)iocb->private);
3704
3705	if (overwrite)
3706		inode_unlock(inode);
3707
3708	/*
3709	 * For extent mapped files we could direct write to holes and fallocate.
3710	 *
3711	 * Allocated blocks to fill the hole are marked as unwritten to prevent
3712	 * parallel buffered read to expose the stale data before DIO complete
3713	 * the data IO.
3714	 *
3715	 * As to previously fallocated extents, ext4 get_block will just simply
3716	 * mark the buffer mapped but still keep the extents unwritten.
3717	 *
3718	 * For non AIO case, we will convert those unwritten extents to written
3719	 * after return back from blockdev_direct_IO. That way we save us from
3720	 * allocating io_end structure and also the overhead of offloading
3721	 * the extent convertion to a workqueue.
3722	 *
3723	 * For async DIO, the conversion needs to be deferred when the
3724	 * IO is completed. The ext4 end_io callback function will be
3725	 * called to take care of the conversion work.  Here for async
3726	 * case, we allocate an io_end structure to hook to the iocb.
3727	 */
3728	iocb->private = NULL;
3729	if (overwrite)
3730		get_block_func = ext4_dio_get_block_overwrite;
3731	else if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) ||
3732		   round_down(offset, i_blocksize(inode)) >= inode->i_size) {
3733		get_block_func = ext4_dio_get_block;
3734		dio_flags = DIO_LOCKING | DIO_SKIP_HOLES;
3735	} else if (is_sync_kiocb(iocb)) {
3736		get_block_func = ext4_dio_get_block_unwritten_sync;
3737		dio_flags = DIO_LOCKING;
3738	} else {
3739		get_block_func = ext4_dio_get_block_unwritten_async;
3740		dio_flags = DIO_LOCKING;
3741	}
3742	ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
3743				   get_block_func, ext4_end_io_dio, NULL,
3744				   dio_flags);
3745
3746	if (ret > 0 && !overwrite && ext4_test_inode_state(inode,
3747						EXT4_STATE_DIO_UNWRITTEN)) {
3748		int err;
3749		/*
3750		 * for non AIO case, since the IO is already
3751		 * completed, we could do the conversion right here
3752		 */
3753		err = ext4_convert_unwritten_extents(NULL, inode,
3754						     offset, ret);
3755		if (err < 0)
3756			ret = err;
3757		ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
3758	}
3759
3760	inode_dio_end(inode);
3761	/* take i_mutex locking again if we do a ovewrite dio */
3762	if (overwrite)
3763		inode_lock(inode);
3764
3765	if (ret < 0 && final_size > inode->i_size)
3766		ext4_truncate_failed_write(inode);
3767
3768	/* Handle extending of i_size after direct IO write */
3769	if (orphan) {
3770		int err;
3771
3772		/* Credits for sb + inode write */
3773		handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
3774		if (IS_ERR(handle)) {
3775			/*
3776			 * We wrote the data but cannot extend
3777			 * i_size. Bail out. In async io case, we do
3778			 * not return error here because we have
3779			 * already submmitted the corresponding
3780			 * bio. Returning error here makes the caller
3781			 * think that this IO is done and failed
3782			 * resulting in race with bio's completion
3783			 * handler.
3784			 */
3785			if (!ret)
3786				ret = PTR_ERR(handle);
3787			if (inode->i_nlink)
3788				ext4_orphan_del(NULL, inode);
3789
3790			goto out;
3791		}
3792		if (inode->i_nlink)
3793			ext4_orphan_del(handle, inode);
3794		if (ret > 0) {
3795			loff_t end = offset + ret;
3796			if (end > inode->i_size || end > ei->i_disksize) {
3797				ext4_update_i_disksize(inode, end);
3798				if (end > inode->i_size)
3799					i_size_write(inode, end);
3800				/*
3801				 * We're going to return a positive `ret'
3802				 * here due to non-zero-length I/O, so there's
3803				 * no way of reporting error returns from
3804				 * ext4_mark_inode_dirty() to userspace.  So
3805				 * ignore it.
3806				 */
3807				ext4_mark_inode_dirty(handle, inode);
3808			}
3809		}
3810		err = ext4_journal_stop(handle);
3811		if (ret == 0)
3812			ret = err;
3813	}
3814out:
3815	return ret;
3816}
3817
3818static ssize_t ext4_direct_IO_read(struct kiocb *iocb, struct iov_iter *iter)
3819{
3820	struct address_space *mapping = iocb->ki_filp->f_mapping;
3821	struct inode *inode = mapping->host;
3822	size_t count = iov_iter_count(iter);
3823	ssize_t ret;
3824
3825	/*
3826	 * Shared inode_lock is enough for us - it protects against concurrent
3827	 * writes & truncates and since we take care of writing back page cache,
3828	 * we are protected against page writeback as well.
3829	 */
3830	inode_lock_shared(inode);
3831	ret = filemap_write_and_wait_range(mapping, iocb->ki_pos,
3832					   iocb->ki_pos + count - 1);
3833	if (ret)
3834		goto out_unlock;
3835	ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
3836				   iter, ext4_dio_get_block, NULL, NULL, 0);
3837out_unlock:
3838	inode_unlock_shared(inode);
3839	return ret;
3840}
3841
3842static ssize_t ext4_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 
 
3843{
3844	struct file *file = iocb->ki_filp;
3845	struct inode *inode = file->f_mapping->host;
3846	size_t count = iov_iter_count(iter);
3847	loff_t offset = iocb->ki_pos;
3848	ssize_t ret;
3849
3850#ifdef CONFIG_FS_ENCRYPTION
3851	if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode))
3852		return 0;
3853#endif
3854	if (fsverity_active(inode))
3855		return 0;
3856
3857	/*
3858	 * If we are doing data journalling we don't support O_DIRECT
3859	 */
3860	if (ext4_should_journal_data(inode))
3861		return 0;
3862
3863	/* Let buffer I/O handle the inline data case. */
3864	if (ext4_has_inline_data(inode))
3865		return 0;
3866
3867	trace_ext4_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
3868	if (iov_iter_rw(iter) == READ)
3869		ret = ext4_direct_IO_read(iocb, iter);
3870	else
3871		ret = ext4_direct_IO_write(iocb, iter);
3872	trace_ext4_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), ret);
 
3873	return ret;
3874}
3875
3876/*
3877 * Pages can be marked dirty completely asynchronously from ext4's journalling
3878 * activity.  By filemap_sync_pte(), try_to_unmap_one(), etc.  We cannot do
3879 * much here because ->set_page_dirty is called under VFS locks.  The page is
3880 * not necessarily locked.
3881 *
3882 * We cannot just dirty the page and leave attached buffers clean, because the
3883 * buffers' dirty state is "definitive".  We cannot just set the buffers dirty
3884 * or jbddirty because all the journalling code will explode.
3885 *
3886 * So what we do is to mark the page "pending dirty" and next time writepage
3887 * is called, propagate that into the buffers appropriately.
3888 */
3889static int ext4_journalled_set_page_dirty(struct page *page)
3890{
3891	SetPageChecked(page);
3892	return __set_page_dirty_nobuffers(page);
3893}
3894
3895static int ext4_set_page_dirty(struct page *page)
3896{
3897	WARN_ON_ONCE(!PageLocked(page) && !PageDirty(page));
3898	WARN_ON_ONCE(!page_has_buffers(page));
3899	return __set_page_dirty_buffers(page);
3900}
 
 
 
 
 
 
 
 
3901
3902static const struct address_space_operations ext4_aops = {
3903	.readpage		= ext4_readpage,
3904	.readpages		= ext4_readpages,
3905	.writepage		= ext4_writepage,
3906	.writepages		= ext4_writepages,
3907	.write_begin		= ext4_write_begin,
3908	.write_end		= ext4_write_end,
3909	.set_page_dirty		= ext4_set_page_dirty,
3910	.bmap			= ext4_bmap,
3911	.invalidatepage		= ext4_invalidatepage,
3912	.releasepage		= ext4_releasepage,
3913	.direct_IO		= ext4_direct_IO,
3914	.migratepage		= buffer_migrate_page,
3915	.is_partially_uptodate  = block_is_partially_uptodate,
3916	.error_remove_page	= generic_error_remove_page,
3917};
3918
3919static const struct address_space_operations ext4_journalled_aops = {
3920	.readpage		= ext4_readpage,
3921	.readpages		= ext4_readpages,
3922	.writepage		= ext4_writepage,
3923	.writepages		= ext4_writepages,
3924	.write_begin		= ext4_write_begin,
3925	.write_end		= ext4_journalled_write_end,
3926	.set_page_dirty		= ext4_journalled_set_page_dirty,
3927	.bmap			= ext4_bmap,
3928	.invalidatepage		= ext4_journalled_invalidatepage,
3929	.releasepage		= ext4_releasepage,
3930	.direct_IO		= ext4_direct_IO,
3931	.is_partially_uptodate  = block_is_partially_uptodate,
3932	.error_remove_page	= generic_error_remove_page,
3933};
3934
3935static const struct address_space_operations ext4_da_aops = {
3936	.readpage		= ext4_readpage,
3937	.readpages		= ext4_readpages,
3938	.writepage		= ext4_writepage,
3939	.writepages		= ext4_writepages,
3940	.write_begin		= ext4_da_write_begin,
3941	.write_end		= ext4_da_write_end,
3942	.set_page_dirty		= ext4_set_page_dirty,
3943	.bmap			= ext4_bmap,
3944	.invalidatepage		= ext4_invalidatepage,
3945	.releasepage		= ext4_releasepage,
3946	.direct_IO		= ext4_direct_IO,
3947	.migratepage		= buffer_migrate_page,
3948	.is_partially_uptodate  = block_is_partially_uptodate,
3949	.error_remove_page	= generic_error_remove_page,
3950};
3951
3952static const struct address_space_operations ext4_dax_aops = {
3953	.writepages		= ext4_dax_writepages,
3954	.direct_IO		= noop_direct_IO,
3955	.set_page_dirty		= noop_set_page_dirty,
3956	.bmap			= ext4_bmap,
3957	.invalidatepage		= noop_invalidatepage,
3958};
3959
3960void ext4_set_aops(struct inode *inode)
3961{
3962	switch (ext4_inode_journal_mode(inode)) {
3963	case EXT4_INODE_ORDERED_DATA_MODE:
 
 
 
 
 
3964	case EXT4_INODE_WRITEBACK_DATA_MODE:
 
 
 
 
3965		break;
3966	case EXT4_INODE_JOURNAL_DATA_MODE:
3967		inode->i_mapping->a_ops = &ext4_journalled_aops;
3968		return;
3969	default:
3970		BUG();
3971	}
3972	if (IS_DAX(inode))
3973		inode->i_mapping->a_ops = &ext4_dax_aops;
3974	else if (test_opt(inode->i_sb, DELALLOC))
3975		inode->i_mapping->a_ops = &ext4_da_aops;
3976	else
3977		inode->i_mapping->a_ops = &ext4_aops;
3978}
3979
3980static int __ext4_block_zero_page_range(handle_t *handle,
3981		struct address_space *mapping, loff_t from, loff_t length)
 
 
 
 
 
 
 
 
 
 
3982{
3983	ext4_fsblk_t index = from >> PAGE_SHIFT;
3984	unsigned offset = from & (PAGE_SIZE-1);
3985	unsigned blocksize, pos;
3986	ext4_lblk_t iblock;
3987	struct inode *inode = mapping->host;
3988	struct buffer_head *bh;
3989	struct page *page;
3990	int err = 0;
3991
3992	page = find_or_create_page(mapping, from >> PAGE_SHIFT,
3993				   mapping_gfp_constraint(mapping, ~__GFP_FS));
3994	if (!page)
3995		return -ENOMEM;
3996
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3997	blocksize = inode->i_sb->s_blocksize;
 
3998
3999	iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
 
 
 
 
 
 
 
 
 
 
4000
4001	if (!page_has_buffers(page))
4002		create_empty_buffers(page, blocksize, 0);
4003
4004	/* Find the buffer that contains "offset" */
4005	bh = page_buffers(page);
4006	pos = blocksize;
4007	while (offset >= pos) {
4008		bh = bh->b_this_page;
4009		iblock++;
4010		pos += blocksize;
4011	}
4012	if (buffer_freed(bh)) {
4013		BUFFER_TRACE(bh, "freed: skip");
4014		goto unlock;
4015	}
4016	if (!buffer_mapped(bh)) {
4017		BUFFER_TRACE(bh, "unmapped");
4018		ext4_get_block(inode, iblock, bh, 0);
4019		/* unmapped? It's a hole - nothing to do */
4020		if (!buffer_mapped(bh)) {
4021			BUFFER_TRACE(bh, "still unmapped");
4022			goto unlock;
4023		}
4024	}
4025
4026	/* Ok, it's mapped. Make sure it's up-to-date */
4027	if (PageUptodate(page))
4028		set_buffer_uptodate(bh);
4029
4030	if (!buffer_uptodate(bh)) {
4031		err = -EIO;
4032		ll_rw_block(REQ_OP_READ, 0, 1, &bh);
4033		wait_on_buffer(bh);
4034		/* Uhhuh. Read error. Complain and punt. */
4035		if (!buffer_uptodate(bh))
4036			goto unlock;
4037		if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode)) {
4038			/* We expect the key to be set. */
4039			BUG_ON(!fscrypt_has_encryption_key(inode));
4040			WARN_ON_ONCE(fscrypt_decrypt_pagecache_blocks(
4041					page, blocksize, bh_offset(bh)));
4042		}
4043	}
4044	if (ext4_should_journal_data(inode)) {
4045		BUFFER_TRACE(bh, "get write access");
4046		err = ext4_journal_get_write_access(handle, bh);
4047		if (err)
4048			goto unlock;
4049	}
4050	zero_user(page, offset, length);
4051	BUFFER_TRACE(bh, "zeroed end of block");
4052
4053	if (ext4_should_journal_data(inode)) {
4054		err = ext4_handle_dirty_metadata(handle, inode, bh);
4055	} else {
4056		err = 0;
4057		mark_buffer_dirty(bh);
4058		if (ext4_should_order_data(inode))
4059			err = ext4_jbd2_inode_add_write(handle, inode, from,
4060					length);
4061	}
4062
4063unlock:
4064	unlock_page(page);
4065	put_page(page);
4066	return err;
4067}
4068
4069/*
4070 * ext4_block_zero_page_range() zeros out a mapping of length 'length'
4071 * starting from file offset 'from'.  The range to be zero'd must
4072 * be contained with in one block.  If the specified range exceeds
4073 * the end of the block it will be shortened to end of the block
4074 * that cooresponds to 'from'
4075 */
4076static int ext4_block_zero_page_range(handle_t *handle,
4077		struct address_space *mapping, loff_t from, loff_t length)
4078{
4079	struct inode *inode = mapping->host;
4080	unsigned offset = from & (PAGE_SIZE-1);
4081	unsigned blocksize = inode->i_sb->s_blocksize;
4082	unsigned max = blocksize - (offset & (blocksize - 1));
4083
4084	/*
4085	 * correct length if it does not fall between
4086	 * 'from' and the end of the block
4087	 */
4088	if (length > max || length < 0)
4089		length = max;
4090
4091	if (IS_DAX(inode)) {
4092		return iomap_zero_range(inode, from, length, NULL,
4093					&ext4_iomap_ops);
4094	}
4095	return __ext4_block_zero_page_range(handle, mapping, from, length);
4096}
 
4097
4098/*
4099 * ext4_block_truncate_page() zeroes out a mapping from file offset `from'
4100 * up to the end of the block which corresponds to `from'.
4101 * This required during truncate. We need to physically zero the tail end
4102 * of that block so it doesn't yield old data if the file is later grown.
4103 */
4104static int ext4_block_truncate_page(handle_t *handle,
4105		struct address_space *mapping, loff_t from)
4106{
4107	unsigned offset = from & (PAGE_SIZE-1);
4108	unsigned length;
4109	unsigned blocksize;
4110	struct inode *inode = mapping->host;
 
4111
4112	/* If we are processing an encrypted inode during orphan list handling */
4113	if (IS_ENCRYPTED(inode) && !fscrypt_has_encryption_key(inode))
4114		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4115
4116	blocksize = inode->i_sb->s_blocksize;
4117	length = blocksize - (offset & (blocksize - 1));
 
4118
4119	return ext4_block_zero_page_range(handle, mapping, from, length);
4120}
 
 
 
 
 
 
4121
4122int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode,
4123			     loff_t lstart, loff_t length)
4124{
4125	struct super_block *sb = inode->i_sb;
4126	struct address_space *mapping = inode->i_mapping;
4127	unsigned partial_start, partial_end;
4128	ext4_fsblk_t start, end;
4129	loff_t byte_end = (lstart + length - 1);
4130	int err = 0;
4131
4132	partial_start = lstart & (sb->s_blocksize - 1);
4133	partial_end = byte_end & (sb->s_blocksize - 1);
4134
4135	start = lstart >> sb->s_blocksize_bits;
4136	end = byte_end >> sb->s_blocksize_bits;
 
 
 
4137
4138	/* Handle partial zero within the single block */
4139	if (start == end &&
4140	    (partial_start || (partial_end != sb->s_blocksize - 1))) {
4141		err = ext4_block_zero_page_range(handle, mapping,
4142						 lstart, length);
4143		return err;
4144	}
4145	/* Handle partial zero out on the start of the range */
4146	if (partial_start) {
4147		err = ext4_block_zero_page_range(handle, mapping,
4148						 lstart, sb->s_blocksize);
4149		if (err)
4150			return err;
4151	}
4152	/* Handle partial zero out on the end of the range */
4153	if (partial_end != sb->s_blocksize - 1)
4154		err = ext4_block_zero_page_range(handle, mapping,
4155						 byte_end - partial_end,
4156						 partial_end + 1);
4157	return err;
4158}
4159
4160int ext4_can_truncate(struct inode *inode)
4161{
4162	if (S_ISREG(inode->i_mode))
4163		return 1;
4164	if (S_ISDIR(inode->i_mode))
4165		return 1;
4166	if (S_ISLNK(inode->i_mode))
4167		return !ext4_inode_is_fast_symlink(inode);
4168	return 0;
4169}
4170
4171/*
4172 * We have to make sure i_disksize gets properly updated before we truncate
4173 * page cache due to hole punching or zero range. Otherwise i_disksize update
4174 * can get lost as it may have been postponed to submission of writeback but
4175 * that will never happen after we truncate page cache.
4176 */
4177int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
4178				      loff_t len)
4179{
4180	handle_t *handle;
4181	loff_t size = i_size_read(inode);
4182
4183	WARN_ON(!inode_is_locked(inode));
4184	if (offset > size || offset + len < size)
4185		return 0;
4186
4187	if (EXT4_I(inode)->i_disksize >= size)
4188		return 0;
4189
4190	handle = ext4_journal_start(inode, EXT4_HT_MISC, 1);
4191	if (IS_ERR(handle))
4192		return PTR_ERR(handle);
4193	ext4_update_i_disksize(inode, size);
4194	ext4_mark_inode_dirty(handle, inode);
4195	ext4_journal_stop(handle);
4196
4197	return 0;
4198}
4199
4200static void ext4_wait_dax_page(struct ext4_inode_info *ei)
4201{
4202	up_write(&ei->i_mmap_sem);
4203	schedule();
4204	down_write(&ei->i_mmap_sem);
4205}
4206
4207int ext4_break_layouts(struct inode *inode)
4208{
4209	struct ext4_inode_info *ei = EXT4_I(inode);
4210	struct page *page;
4211	int error;
4212
4213	if (WARN_ON_ONCE(!rwsem_is_locked(&ei->i_mmap_sem)))
4214		return -EINVAL;
4215
4216	do {
4217		page = dax_layout_busy_page(inode->i_mapping);
4218		if (!page)
4219			return 0;
4220
4221		error = ___wait_var_event(&page->_refcount,
4222				atomic_read(&page->_refcount) == 1,
4223				TASK_INTERRUPTIBLE, 0, 0,
4224				ext4_wait_dax_page(ei));
4225	} while (error == 0);
4226
4227	return error;
4228}
4229
4230/*
4231 * ext4_punch_hole: punches a hole in a file by releasing the blocks
4232 * associated with the given offset and length
4233 *
4234 * @inode:  File inode
4235 * @offset: The offset where the hole will begin
4236 * @len:    The length of the hole
4237 *
4238 * Returns: 0 on success or negative on failure
4239 */
4240
4241int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
4242{
4243	struct super_block *sb = inode->i_sb;
4244	ext4_lblk_t first_block, stop_block;
4245	struct address_space *mapping = inode->i_mapping;
4246	loff_t first_block_offset, last_block_offset;
4247	handle_t *handle;
4248	unsigned int credits;
4249	int ret = 0;
4250
4251	if (!S_ISREG(inode->i_mode))
4252		return -EOPNOTSUPP;
4253
4254	trace_ext4_punch_hole(inode, offset, length, 0);
4255
4256	ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
4257	if (ext4_has_inline_data(inode)) {
4258		down_write(&EXT4_I(inode)->i_mmap_sem);
4259		ret = ext4_convert_inline_data(inode);
4260		up_write(&EXT4_I(inode)->i_mmap_sem);
4261		if (ret)
4262			return ret;
4263	}
4264
4265	/*
4266	 * Write out all dirty pages to avoid race conditions
4267	 * Then release them.
4268	 */
4269	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
4270		ret = filemap_write_and_wait_range(mapping, offset,
4271						   offset + length - 1);
4272		if (ret)
4273			return ret;
4274	}
4275
4276	inode_lock(inode);
4277
4278	/* No need to punch hole beyond i_size */
4279	if (offset >= inode->i_size)
4280		goto out_mutex;
4281
4282	/*
4283	 * If the hole extends beyond i_size, set the hole
4284	 * to end after the page that contains i_size
4285	 */
4286	if (offset + length > inode->i_size) {
4287		length = inode->i_size +
4288		   PAGE_SIZE - (inode->i_size & (PAGE_SIZE - 1)) -
4289		   offset;
4290	}
4291
4292	if (offset & (sb->s_blocksize - 1) ||
4293	    (offset + length) & (sb->s_blocksize - 1)) {
4294		/*
4295		 * Attach jinode to inode for jbd2 if we do any zeroing of
4296		 * partial block
4297		 */
4298		ret = ext4_inode_attach_jinode(inode);
4299		if (ret < 0)
4300			goto out_mutex;
4301
4302	}
4303
4304	/* Wait all existing dio workers, newcomers will block on i_mutex */
4305	inode_dio_wait(inode);
4306
4307	/*
4308	 * Prevent page faults from reinstantiating pages we have released from
4309	 * page cache.
4310	 */
4311	down_write(&EXT4_I(inode)->i_mmap_sem);
4312
4313	ret = ext4_break_layouts(inode);
4314	if (ret)
4315		goto out_dio;
4316
4317	first_block_offset = round_up(offset, sb->s_blocksize);
4318	last_block_offset = round_down((offset + length), sb->s_blocksize) - 1;
4319
4320	/* Now release the pages and zero block aligned part of pages*/
4321	if (last_block_offset > first_block_offset) {
4322		ret = ext4_update_disksize_before_punch(inode, offset, length);
4323		if (ret)
4324			goto out_dio;
4325		truncate_pagecache_range(inode, first_block_offset,
4326					 last_block_offset);
4327	}
4328
4329	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4330		credits = ext4_writepage_trans_blocks(inode);
4331	else
4332		credits = ext4_blocks_for_truncate(inode);
4333	handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
4334	if (IS_ERR(handle)) {
4335		ret = PTR_ERR(handle);
4336		ext4_std_error(sb, ret);
4337		goto out_dio;
4338	}
4339
4340	ret = ext4_zero_partial_blocks(handle, inode, offset,
4341				       length);
4342	if (ret)
4343		goto out_stop;
4344
4345	first_block = (offset + sb->s_blocksize - 1) >>
4346		EXT4_BLOCK_SIZE_BITS(sb);
4347	stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);
4348
4349	/* If there are blocks to remove, do it */
4350	if (stop_block > first_block) {
4351
4352		down_write(&EXT4_I(inode)->i_data_sem);
4353		ext4_discard_preallocations(inode);
4354
4355		ret = ext4_es_remove_extent(inode, first_block,
4356					    stop_block - first_block);
4357		if (ret) {
4358			up_write(&EXT4_I(inode)->i_data_sem);
4359			goto out_stop;
4360		}
4361
4362		if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4363			ret = ext4_ext_remove_space(inode, first_block,
4364						    stop_block - 1);
4365		else
4366			ret = ext4_ind_remove_space(handle, inode, first_block,
4367						    stop_block);
4368
4369		up_write(&EXT4_I(inode)->i_data_sem);
4370	}
4371	if (IS_SYNC(inode))
4372		ext4_handle_sync(handle);
4373
4374	inode->i_mtime = inode->i_ctime = current_time(inode);
4375	ext4_mark_inode_dirty(handle, inode);
4376	if (ret >= 0)
4377		ext4_update_inode_fsync_trans(handle, inode, 1);
4378out_stop:
4379	ext4_journal_stop(handle);
4380out_dio:
4381	up_write(&EXT4_I(inode)->i_mmap_sem);
4382out_mutex:
4383	inode_unlock(inode);
4384	return ret;
4385}
4386
4387int ext4_inode_attach_jinode(struct inode *inode)
4388{
4389	struct ext4_inode_info *ei = EXT4_I(inode);
4390	struct jbd2_inode *jinode;
4391
4392	if (ei->jinode || !EXT4_SB(inode->i_sb)->s_journal)
4393		return 0;
4394
4395	jinode = jbd2_alloc_inode(GFP_KERNEL);
4396	spin_lock(&inode->i_lock);
4397	if (!ei->jinode) {
4398		if (!jinode) {
4399			spin_unlock(&inode->i_lock);
4400			return -ENOMEM;
4401		}
4402		ei->jinode = jinode;
4403		jbd2_journal_init_jbd_inode(ei->jinode, inode);
4404		jinode = NULL;
4405	}
4406	spin_unlock(&inode->i_lock);
4407	if (unlikely(jinode != NULL))
4408		jbd2_free_inode(jinode);
4409	return 0;
4410}
4411
4412/*
4413 * ext4_truncate()
4414 *
4415 * We block out ext4_get_block() block instantiations across the entire
4416 * transaction, and VFS/VM ensures that ext4_truncate() cannot run
4417 * simultaneously on behalf of the same inode.
4418 *
4419 * As we work through the truncate and commit bits of it to the journal there
4420 * is one core, guiding principle: the file's tree must always be consistent on
4421 * disk.  We must be able to restart the truncate after a crash.
4422 *
4423 * The file's tree may be transiently inconsistent in memory (although it
4424 * probably isn't), but whenever we close off and commit a journal transaction,
4425 * the contents of (the filesystem + the journal) must be consistent and
4426 * restartable.  It's pretty simple, really: bottom up, right to left (although
4427 * left-to-right works OK too).
4428 *
4429 * Note that at recovery time, journal replay occurs *before* the restart of
4430 * truncate against the orphan inode list.
4431 *
4432 * The committed inode has the new, desired i_size (which is the same as
4433 * i_disksize in this case).  After a crash, ext4_orphan_cleanup() will see
4434 * that this inode's truncate did not complete and it will again call
4435 * ext4_truncate() to have another go.  So there will be instantiated blocks
4436 * to the right of the truncation point in a crashed ext4 filesystem.  But
4437 * that's fine - as long as they are linked from the inode, the post-crash
4438 * ext4_truncate() run will find them and release them.
4439 */
4440int ext4_truncate(struct inode *inode)
4441{
4442	struct ext4_inode_info *ei = EXT4_I(inode);
4443	unsigned int credits;
4444	int err = 0;
4445	handle_t *handle;
4446	struct address_space *mapping = inode->i_mapping;
4447
4448	/*
4449	 * There is a possibility that we're either freeing the inode
4450	 * or it's a completely new inode. In those cases we might not
4451	 * have i_mutex locked because it's not necessary.
4452	 */
4453	if (!(inode->i_state & (I_NEW|I_FREEING)))
4454		WARN_ON(!inode_is_locked(inode));
4455	trace_ext4_truncate_enter(inode);
4456
4457	if (!ext4_can_truncate(inode))
4458		return 0;
4459
4460	ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
4461
4462	if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
4463		ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
4464
4465	if (ext4_has_inline_data(inode)) {
4466		int has_inline = 1;
4467
4468		err = ext4_inline_data_truncate(inode, &has_inline);
4469		if (err)
4470			return err;
4471		if (has_inline)
4472			return 0;
4473	}
4474
4475	/* If we zero-out tail of the page, we have to create jinode for jbd2 */
4476	if (inode->i_size & (inode->i_sb->s_blocksize - 1)) {
4477		if (ext4_inode_attach_jinode(inode) < 0)
4478			return 0;
4479	}
4480
4481	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4482		credits = ext4_writepage_trans_blocks(inode);
4483	else
4484		credits = ext4_blocks_for_truncate(inode);
4485
4486	handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
4487	if (IS_ERR(handle))
4488		return PTR_ERR(handle);
4489
4490	if (inode->i_size & (inode->i_sb->s_blocksize - 1))
4491		ext4_block_truncate_page(handle, mapping, inode->i_size);
4492
4493	/*
4494	 * We add the inode to the orphan list, so that if this
4495	 * truncate spans multiple transactions, and we crash, we will
4496	 * resume the truncate when the filesystem recovers.  It also
4497	 * marks the inode dirty, to catch the new size.
4498	 *
4499	 * Implication: the file must always be in a sane, consistent
4500	 * truncatable state while each transaction commits.
4501	 */
4502	err = ext4_orphan_add(handle, inode);
4503	if (err)
4504		goto out_stop;
4505
4506	down_write(&EXT4_I(inode)->i_data_sem);
4507
4508	ext4_discard_preallocations(inode);
4509
4510	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4511		err = ext4_ext_truncate(handle, inode);
4512	else
4513		ext4_ind_truncate(handle, inode);
4514
4515	up_write(&ei->i_data_sem);
4516	if (err)
4517		goto out_stop;
4518
4519	if (IS_SYNC(inode))
4520		ext4_handle_sync(handle);
4521
4522out_stop:
4523	/*
4524	 * If this was a simple ftruncate() and the file will remain alive,
4525	 * then we need to clear up the orphan record which we created above.
4526	 * However, if this was a real unlink then we were called by
4527	 * ext4_evict_inode(), and we allow that function to clean up the
4528	 * orphan info for us.
4529	 */
4530	if (inode->i_nlink)
4531		ext4_orphan_del(handle, inode);
4532
4533	inode->i_mtime = inode->i_ctime = current_time(inode);
4534	ext4_mark_inode_dirty(handle, inode);
4535	ext4_journal_stop(handle);
4536
4537	trace_ext4_truncate_exit(inode);
4538	return err;
4539}
4540
4541/*
4542 * ext4_get_inode_loc returns with an extra refcount against the inode's
4543 * underlying buffer_head on success. If 'in_mem' is true, we have all
4544 * data in memory that is needed to recreate the on-disk version of this
4545 * inode.
4546 */
4547static int __ext4_get_inode_loc(struct inode *inode,
4548				struct ext4_iloc *iloc, int in_mem)
4549{
4550	struct ext4_group_desc	*gdp;
4551	struct buffer_head	*bh;
4552	struct super_block	*sb = inode->i_sb;
4553	ext4_fsblk_t		block;
4554	struct blk_plug		plug;
4555	int			inodes_per_block, inode_offset;
4556
4557	iloc->bh = NULL;
4558	if (inode->i_ino < EXT4_ROOT_INO ||
4559	    inode->i_ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))
4560		return -EFSCORRUPTED;
4561
4562	iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb);
4563	gdp = ext4_get_group_desc(sb, iloc->block_group, NULL);
4564	if (!gdp)
4565		return -EIO;
4566
4567	/*
4568	 * Figure out the offset within the block group inode table
4569	 */
4570	inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
4571	inode_offset = ((inode->i_ino - 1) %
4572			EXT4_INODES_PER_GROUP(sb));
4573	block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block);
4574	iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb);
4575
4576	bh = sb_getblk(sb, block);
4577	if (unlikely(!bh))
4578		return -ENOMEM;
 
 
 
4579	if (!buffer_uptodate(bh)) {
4580		lock_buffer(bh);
4581
4582		/*
4583		 * If the buffer has the write error flag, we have failed
4584		 * to write out another inode in the same block.  In this
4585		 * case, we don't have to read the block because we may
4586		 * read the old inode data successfully.
4587		 */
4588		if (buffer_write_io_error(bh) && !buffer_uptodate(bh))
4589			set_buffer_uptodate(bh);
4590
4591		if (buffer_uptodate(bh)) {
4592			/* someone brought it uptodate while we waited */
4593			unlock_buffer(bh);
4594			goto has_buffer;
4595		}
4596
4597		/*
4598		 * If we have all information of the inode in memory and this
4599		 * is the only valid inode in the block, we need not read the
4600		 * block.
4601		 */
4602		if (in_mem) {
4603			struct buffer_head *bitmap_bh;
4604			int i, start;
4605
4606			start = inode_offset & ~(inodes_per_block - 1);
4607
4608			/* Is the inode bitmap in cache? */
4609			bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp));
4610			if (unlikely(!bitmap_bh))
4611				goto make_io;
4612
4613			/*
4614			 * If the inode bitmap isn't in cache then the
4615			 * optimisation may end up performing two reads instead
4616			 * of one, so skip it.
4617			 */
4618			if (!buffer_uptodate(bitmap_bh)) {
4619				brelse(bitmap_bh);
4620				goto make_io;
4621			}
4622			for (i = start; i < start + inodes_per_block; i++) {
4623				if (i == inode_offset)
4624					continue;
4625				if (ext4_test_bit(i, bitmap_bh->b_data))
4626					break;
4627			}
4628			brelse(bitmap_bh);
4629			if (i == start + inodes_per_block) {
4630				/* all other inodes are free, so skip I/O */
4631				memset(bh->b_data, 0, bh->b_size);
4632				set_buffer_uptodate(bh);
4633				unlock_buffer(bh);
4634				goto has_buffer;
4635			}
4636		}
4637
4638make_io:
4639		/*
4640		 * If we need to do any I/O, try to pre-readahead extra
4641		 * blocks from the inode table.
4642		 */
4643		blk_start_plug(&plug);
4644		if (EXT4_SB(sb)->s_inode_readahead_blks) {
4645			ext4_fsblk_t b, end, table;
4646			unsigned num;
4647			__u32 ra_blks = EXT4_SB(sb)->s_inode_readahead_blks;
4648
4649			table = ext4_inode_table(sb, gdp);
4650			/* s_inode_readahead_blks is always a power of 2 */
4651			b = block & ~((ext4_fsblk_t) ra_blks - 1);
4652			if (table > b)
4653				b = table;
4654			end = b + ra_blks;
4655			num = EXT4_INODES_PER_GROUP(sb);
4656			if (ext4_has_group_desc_csum(sb))
4657				num -= ext4_itable_unused_count(sb, gdp);
4658			table += num / inodes_per_block;
4659			if (end > table)
4660				end = table;
4661			while (b <= end)
4662				sb_breadahead(sb, b++);
4663		}
4664
4665		/*
4666		 * There are other valid inodes in the buffer, this inode
4667		 * has in-inode xattrs, or we don't have this inode in memory.
4668		 * Read the block from disk.
4669		 */
4670		trace_ext4_load_inode(inode);
4671		get_bh(bh);
4672		bh->b_end_io = end_buffer_read_sync;
4673		submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, bh);
4674		blk_finish_plug(&plug);
4675		wait_on_buffer(bh);
4676		if (!buffer_uptodate(bh)) {
4677			EXT4_ERROR_INODE_BLOCK(inode, block,
4678					       "unable to read itable block");
4679			brelse(bh);
4680			return -EIO;
4681		}
4682	}
4683has_buffer:
4684	iloc->bh = bh;
4685	return 0;
4686}
4687
4688int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
4689{
4690	/* We have all inode data except xattrs in memory here. */
4691	return __ext4_get_inode_loc(inode, iloc,
4692		!ext4_test_inode_state(inode, EXT4_STATE_XATTR));
4693}
4694
4695static bool ext4_should_use_dax(struct inode *inode)
4696{
4697	if (!test_opt(inode->i_sb, DAX))
4698		return false;
4699	if (!S_ISREG(inode->i_mode))
4700		return false;
4701	if (ext4_should_journal_data(inode))
4702		return false;
4703	if (ext4_has_inline_data(inode))
4704		return false;
4705	if (ext4_test_inode_flag(inode, EXT4_INODE_ENCRYPT))
4706		return false;
4707	if (ext4_test_inode_flag(inode, EXT4_INODE_VERITY))
4708		return false;
4709	return true;
4710}
4711
4712void ext4_set_inode_flags(struct inode *inode)
4713{
4714	unsigned int flags = EXT4_I(inode)->i_flags;
4715	unsigned int new_fl = 0;
4716
 
4717	if (flags & EXT4_SYNC_FL)
4718		new_fl |= S_SYNC;
4719	if (flags & EXT4_APPEND_FL)
4720		new_fl |= S_APPEND;
4721	if (flags & EXT4_IMMUTABLE_FL)
4722		new_fl |= S_IMMUTABLE;
4723	if (flags & EXT4_NOATIME_FL)
4724		new_fl |= S_NOATIME;
4725	if (flags & EXT4_DIRSYNC_FL)
4726		new_fl |= S_DIRSYNC;
4727	if (ext4_should_use_dax(inode))
4728		new_fl |= S_DAX;
4729	if (flags & EXT4_ENCRYPT_FL)
4730		new_fl |= S_ENCRYPTED;
4731	if (flags & EXT4_CASEFOLD_FL)
4732		new_fl |= S_CASEFOLD;
4733	if (flags & EXT4_VERITY_FL)
4734		new_fl |= S_VERITY;
4735	inode_set_flags(inode, new_fl,
4736			S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_DAX|
4737			S_ENCRYPTED|S_CASEFOLD|S_VERITY);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4738}
4739
4740static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
4741				  struct ext4_inode_info *ei)
4742{
4743	blkcnt_t i_blocks ;
4744	struct inode *inode = &(ei->vfs_inode);
4745	struct super_block *sb = inode->i_sb;
4746
4747	if (ext4_has_feature_huge_file(sb)) {
 
4748		/* we are using combined 48 bit field */
4749		i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 |
4750					le32_to_cpu(raw_inode->i_blocks_lo);
4751		if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) {
4752			/* i_blocks represent file system block size */
4753			return i_blocks  << (inode->i_blkbits - 9);
4754		} else {
4755			return i_blocks;
4756		}
4757	} else {
4758		return le32_to_cpu(raw_inode->i_blocks_lo);
4759	}
4760}
4761
4762static inline int ext4_iget_extra_inode(struct inode *inode,
4763					 struct ext4_inode *raw_inode,
4764					 struct ext4_inode_info *ei)
4765{
4766	__le32 *magic = (void *)raw_inode +
4767			EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize;
4768
4769	if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize + sizeof(__le32) <=
4770	    EXT4_INODE_SIZE(inode->i_sb) &&
4771	    *magic == cpu_to_le32(EXT4_XATTR_MAGIC)) {
4772		ext4_set_inode_state(inode, EXT4_STATE_XATTR);
4773		return ext4_find_inline_data_nolock(inode);
4774	} else
4775		EXT4_I(inode)->i_inline_off = 0;
4776	return 0;
4777}
4778
4779int ext4_get_projid(struct inode *inode, kprojid_t *projid)
4780{
4781	if (!ext4_has_feature_project(inode->i_sb))
4782		return -EOPNOTSUPP;
4783	*projid = EXT4_I(inode)->i_projid;
4784	return 0;
4785}
4786
4787/*
4788 * ext4 has self-managed i_version for ea inodes, it stores the lower 32bit of
4789 * refcount in i_version, so use raw values if inode has EXT4_EA_INODE_FL flag
4790 * set.
4791 */
4792static inline void ext4_inode_set_iversion_queried(struct inode *inode, u64 val)
4793{
4794	if (unlikely(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
4795		inode_set_iversion_raw(inode, val);
4796	else
4797		inode_set_iversion_queried(inode, val);
4798}
4799static inline u64 ext4_inode_peek_iversion(const struct inode *inode)
4800{
4801	if (unlikely(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
4802		return inode_peek_iversion_raw(inode);
4803	else
4804		return inode_peek_iversion(inode);
4805}
4806
4807struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
4808			  ext4_iget_flags flags, const char *function,
4809			  unsigned int line)
4810{
4811	struct ext4_iloc iloc;
4812	struct ext4_inode *raw_inode;
4813	struct ext4_inode_info *ei;
4814	struct inode *inode;
4815	journal_t *journal = EXT4_SB(sb)->s_journal;
4816	long ret;
4817	loff_t size;
4818	int block;
4819	uid_t i_uid;
4820	gid_t i_gid;
4821	projid_t i_projid;
4822
4823	if ((!(flags & EXT4_IGET_SPECIAL) &&
4824	     (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)) ||
4825	    (ino < EXT4_ROOT_INO) ||
4826	    (ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))) {
4827		if (flags & EXT4_IGET_HANDLE)
4828			return ERR_PTR(-ESTALE);
4829		__ext4_error(sb, function, line,
4830			     "inode #%lu: comm %s: iget: illegal inode #",
4831			     ino, current->comm);
4832		return ERR_PTR(-EFSCORRUPTED);
4833	}
4834
4835	inode = iget_locked(sb, ino);
4836	if (!inode)
4837		return ERR_PTR(-ENOMEM);
4838	if (!(inode->i_state & I_NEW))
4839		return inode;
4840
4841	ei = EXT4_I(inode);
4842	iloc.bh = NULL;
4843
4844	ret = __ext4_get_inode_loc(inode, &iloc, 0);
4845	if (ret < 0)
4846		goto bad_inode;
4847	raw_inode = ext4_raw_inode(&iloc);
4848
4849	if ((ino == EXT4_ROOT_INO) && (raw_inode->i_links_count == 0)) {
4850		ext4_error_inode(inode, function, line, 0,
4851				 "iget: root inode unallocated");
4852		ret = -EFSCORRUPTED;
4853		goto bad_inode;
4854	}
4855
4856	if ((flags & EXT4_IGET_HANDLE) &&
4857	    (raw_inode->i_links_count == 0) && (raw_inode->i_mode == 0)) {
4858		ret = -ESTALE;
4859		goto bad_inode;
4860	}
4861
4862	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4863		ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
4864		if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
4865			EXT4_INODE_SIZE(inode->i_sb) ||
4866		    (ei->i_extra_isize & 3)) {
4867			ext4_error_inode(inode, function, line, 0,
4868					 "iget: bad extra_isize %u "
4869					 "(inode size %u)",
4870					 ei->i_extra_isize,
4871					 EXT4_INODE_SIZE(inode->i_sb));
4872			ret = -EFSCORRUPTED;
4873			goto bad_inode;
4874		}
4875	} else
4876		ei->i_extra_isize = 0;
4877
4878	/* Precompute checksum seed for inode metadata */
4879	if (ext4_has_metadata_csum(sb)) {
 
4880		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4881		__u32 csum;
4882		__le32 inum = cpu_to_le32(inode->i_ino);
4883		__le32 gen = raw_inode->i_generation;
4884		csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum,
4885				   sizeof(inum));
4886		ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen,
4887					      sizeof(gen));
4888	}
4889
4890	if (!ext4_inode_csum_verify(inode, raw_inode, ei)) {
4891		ext4_error_inode(inode, function, line, 0,
4892				 "iget: checksum invalid");
4893		ret = -EFSBADCRC;
4894		goto bad_inode;
4895	}
4896
4897	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
4898	i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
4899	i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
4900	if (ext4_has_feature_project(sb) &&
4901	    EXT4_INODE_SIZE(sb) > EXT4_GOOD_OLD_INODE_SIZE &&
4902	    EXT4_FITS_IN_INODE(raw_inode, ei, i_projid))
4903		i_projid = (projid_t)le32_to_cpu(raw_inode->i_projid);
4904	else
4905		i_projid = EXT4_DEF_PROJID;
4906
4907	if (!(test_opt(inode->i_sb, NO_UID32))) {
4908		i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
4909		i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
4910	}
4911	i_uid_write(inode, i_uid);
4912	i_gid_write(inode, i_gid);
4913	ei->i_projid = make_kprojid(&init_user_ns, i_projid);
4914	set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
4915
4916	ext4_clear_state_flags(ei);	/* Only relevant on 32-bit archs */
4917	ei->i_inline_off = 0;
4918	ei->i_dir_start_lookup = 0;
4919	ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
4920	/* We now have enough fields to check if the inode was active or not.
4921	 * This is needed because nfsd might try to access dead inodes
4922	 * the test is that same one that e2fsck uses
4923	 * NeilBrown 1999oct15
4924	 */
4925	if (inode->i_nlink == 0) {
4926		if ((inode->i_mode == 0 ||
4927		     !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) &&
4928		    ino != EXT4_BOOT_LOADER_INO) {
4929			/* this inode is deleted */
4930			ret = -ESTALE;
4931			goto bad_inode;
4932		}
4933		/* The only unlinked inodes we let through here have
4934		 * valid i_mode and are being read by the orphan
4935		 * recovery code: that's fine, we're about to complete
4936		 * the process of deleting those.
4937		 * OR it is the EXT4_BOOT_LOADER_INO which is
4938		 * not initialized on a new filesystem. */
4939	}
4940	ei->i_flags = le32_to_cpu(raw_inode->i_flags);
4941	ext4_set_inode_flags(inode);
4942	inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
4943	ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
4944	if (ext4_has_feature_64bit(sb))
4945		ei->i_file_acl |=
4946			((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
4947	inode->i_size = ext4_isize(sb, raw_inode);
4948	if ((size = i_size_read(inode)) < 0) {
4949		ext4_error_inode(inode, function, line, 0,
4950				 "iget: bad i_size value: %lld", size);
4951		ret = -EFSCORRUPTED;
4952		goto bad_inode;
4953	}
4954	ei->i_disksize = inode->i_size;
4955#ifdef CONFIG_QUOTA
4956	ei->i_reserved_quota = 0;
4957#endif
4958	inode->i_generation = le32_to_cpu(raw_inode->i_generation);
4959	ei->i_block_group = iloc.block_group;
4960	ei->i_last_alloc_group = ~0;
4961	/*
4962	 * NOTE! The in-memory inode i_data array is in little-endian order
4963	 * even on big-endian machines: we do NOT byteswap the block numbers!
4964	 */
4965	for (block = 0; block < EXT4_N_BLOCKS; block++)
4966		ei->i_data[block] = raw_inode->i_block[block];
4967	INIT_LIST_HEAD(&ei->i_orphan);
4968
4969	/*
4970	 * Set transaction id's of transactions that have to be committed
4971	 * to finish f[data]sync. We set them to currently running transaction
4972	 * as we cannot be sure that the inode or some of its metadata isn't
4973	 * part of the transaction - the inode could have been reclaimed and
4974	 * now it is reread from disk.
4975	 */
4976	if (journal) {
4977		transaction_t *transaction;
4978		tid_t tid;
4979
4980		read_lock(&journal->j_state_lock);
4981		if (journal->j_running_transaction)
4982			transaction = journal->j_running_transaction;
4983		else
4984			transaction = journal->j_committing_transaction;
4985		if (transaction)
4986			tid = transaction->t_tid;
4987		else
4988			tid = journal->j_commit_sequence;
4989		read_unlock(&journal->j_state_lock);
4990		ei->i_sync_tid = tid;
4991		ei->i_datasync_tid = tid;
4992	}
4993
4994	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4995		if (ei->i_extra_isize == 0) {
4996			/* The extra space is currently unused. Use it. */
4997			BUILD_BUG_ON(sizeof(struct ext4_inode) & 3);
4998			ei->i_extra_isize = sizeof(struct ext4_inode) -
4999					    EXT4_GOOD_OLD_INODE_SIZE;
5000		} else {
5001			ret = ext4_iget_extra_inode(inode, raw_inode, ei);
5002			if (ret)
5003				goto bad_inode;
 
 
5004		}
5005	}
5006
5007	EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode);
5008	EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
5009	EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
5010	EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
5011
5012	if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
5013		u64 ivers = le32_to_cpu(raw_inode->i_disk_version);
5014
5015		if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
5016			if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
5017				ivers |=
5018		    (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
5019		}
5020		ext4_inode_set_iversion_queried(inode, ivers);
5021	}
5022
5023	ret = 0;
5024	if (ei->i_file_acl &&
5025	    !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) {
5026		ext4_error_inode(inode, function, line, 0,
5027				 "iget: bad extended attribute block %llu",
5028				 ei->i_file_acl);
5029		ret = -EFSCORRUPTED;
5030		goto bad_inode;
5031	} else if (!ext4_has_inline_data(inode)) {
5032		/* validate the block references in the inode */
5033		if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
 
 
 
 
 
5034		   (S_ISLNK(inode->i_mode) &&
5035		    !ext4_inode_is_fast_symlink(inode))) {
5036			if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
5037				ret = ext4_ext_check_inode(inode);
5038			else
5039				ret = ext4_ind_check_inode(inode);
5040		}
5041	}
5042	if (ret)
5043		goto bad_inode;
5044
5045	if (S_ISREG(inode->i_mode)) {
5046		inode->i_op = &ext4_file_inode_operations;
5047		inode->i_fop = &ext4_file_operations;
5048		ext4_set_aops(inode);
5049	} else if (S_ISDIR(inode->i_mode)) {
5050		inode->i_op = &ext4_dir_inode_operations;
5051		inode->i_fop = &ext4_dir_operations;
5052	} else if (S_ISLNK(inode->i_mode)) {
5053		/* VFS does not allow setting these so must be corruption */
5054		if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) {
5055			ext4_error_inode(inode, function, line, 0,
5056					 "iget: immutable or append flags "
5057					 "not allowed on symlinks");
5058			ret = -EFSCORRUPTED;
5059			goto bad_inode;
5060		}
5061		if (IS_ENCRYPTED(inode)) {
5062			inode->i_op = &ext4_encrypted_symlink_inode_operations;
5063			ext4_set_aops(inode);
5064		} else if (ext4_inode_is_fast_symlink(inode)) {
5065			inode->i_link = (char *)ei->i_data;
5066			inode->i_op = &ext4_fast_symlink_inode_operations;
5067			nd_terminate_link(ei->i_data, inode->i_size,
5068				sizeof(ei->i_data) - 1);
5069		} else {
5070			inode->i_op = &ext4_symlink_inode_operations;
5071			ext4_set_aops(inode);
5072		}
5073		inode_nohighmem(inode);
5074	} else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
5075	      S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
5076		inode->i_op = &ext4_special_inode_operations;
5077		if (raw_inode->i_block[0])
5078			init_special_inode(inode, inode->i_mode,
5079			   old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
5080		else
5081			init_special_inode(inode, inode->i_mode,
5082			   new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
5083	} else if (ino == EXT4_BOOT_LOADER_INO) {
5084		make_bad_inode(inode);
5085	} else {
5086		ret = -EFSCORRUPTED;
5087		ext4_error_inode(inode, function, line, 0,
5088				 "iget: bogus i_mode (%o)", inode->i_mode);
5089		goto bad_inode;
5090	}
5091	if (IS_CASEFOLDED(inode) && !ext4_has_feature_casefold(inode->i_sb))
5092		ext4_error_inode(inode, function, line, 0,
5093				 "casefold flag without casefold feature");
5094	brelse(iloc.bh);
5095
5096	unlock_new_inode(inode);
5097	return inode;
5098
5099bad_inode:
5100	brelse(iloc.bh);
5101	iget_failed(inode);
5102	return ERR_PTR(ret);
5103}
5104
5105static int ext4_inode_blocks_set(handle_t *handle,
5106				struct ext4_inode *raw_inode,
5107				struct ext4_inode_info *ei)
5108{
5109	struct inode *inode = &(ei->vfs_inode);
5110	u64 i_blocks = inode->i_blocks;
5111	struct super_block *sb = inode->i_sb;
5112
5113	if (i_blocks <= ~0U) {
5114		/*
5115		 * i_blocks can be represented in a 32 bit variable
5116		 * as multiple of 512 bytes
5117		 */
5118		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
5119		raw_inode->i_blocks_high = 0;
5120		ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
5121		return 0;
5122	}
5123	if (!ext4_has_feature_huge_file(sb))
5124		return -EFBIG;
5125
5126	if (i_blocks <= 0xffffffffffffULL) {
5127		/*
5128		 * i_blocks can be represented in a 48 bit variable
5129		 * as multiple of 512 bytes
5130		 */
5131		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
5132		raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
5133		ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
5134	} else {
5135		ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE);
5136		/* i_block is stored in file system block size */
5137		i_blocks = i_blocks >> (inode->i_blkbits - 9);
5138		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
5139		raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
5140	}
5141	return 0;
5142}
5143
5144struct other_inode {
5145	unsigned long		orig_ino;
5146	struct ext4_inode	*raw_inode;
5147};
5148
5149static int other_inode_match(struct inode * inode, unsigned long ino,
5150			     void *data)
5151{
5152	struct other_inode *oi = (struct other_inode *) data;
5153
5154	if ((inode->i_ino != ino) ||
5155	    (inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW |
5156			       I_DIRTY_INODE)) ||
5157	    ((inode->i_state & I_DIRTY_TIME) == 0))
5158		return 0;
5159	spin_lock(&inode->i_lock);
5160	if (((inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW |
5161				I_DIRTY_INODE)) == 0) &&
5162	    (inode->i_state & I_DIRTY_TIME)) {
5163		struct ext4_inode_info	*ei = EXT4_I(inode);
5164
5165		inode->i_state &= ~(I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED);
5166		spin_unlock(&inode->i_lock);
5167
5168		spin_lock(&ei->i_raw_lock);
5169		EXT4_INODE_SET_XTIME(i_ctime, inode, oi->raw_inode);
5170		EXT4_INODE_SET_XTIME(i_mtime, inode, oi->raw_inode);
5171		EXT4_INODE_SET_XTIME(i_atime, inode, oi->raw_inode);
5172		ext4_inode_csum_set(inode, oi->raw_inode, ei);
5173		spin_unlock(&ei->i_raw_lock);
5174		trace_ext4_other_inode_update_time(inode, oi->orig_ino);
5175		return -1;
5176	}
5177	spin_unlock(&inode->i_lock);
5178	return -1;
5179}
5180
5181/*
5182 * Opportunistically update the other time fields for other inodes in
5183 * the same inode table block.
5184 */
5185static void ext4_update_other_inodes_time(struct super_block *sb,
5186					  unsigned long orig_ino, char *buf)
5187{
5188	struct other_inode oi;
5189	unsigned long ino;
5190	int i, inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
5191	int inode_size = EXT4_INODE_SIZE(sb);
5192
5193	oi.orig_ino = orig_ino;
5194	/*
5195	 * Calculate the first inode in the inode table block.  Inode
5196	 * numbers are one-based.  That is, the first inode in a block
5197	 * (assuming 4k blocks and 256 byte inodes) is (n*16 + 1).
5198	 */
5199	ino = ((orig_ino - 1) & ~(inodes_per_block - 1)) + 1;
5200	for (i = 0; i < inodes_per_block; i++, ino++, buf += inode_size) {
5201		if (ino == orig_ino)
5202			continue;
5203		oi.raw_inode = (struct ext4_inode *) buf;
5204		(void) find_inode_nowait(sb, ino, other_inode_match, &oi);
5205	}
5206}
5207
5208/*
5209 * Post the struct inode info into an on-disk inode location in the
5210 * buffer-cache.  This gobbles the caller's reference to the
5211 * buffer_head in the inode location struct.
5212 *
5213 * The caller must have write access to iloc->bh.
5214 */
5215static int ext4_do_update_inode(handle_t *handle,
5216				struct inode *inode,
5217				struct ext4_iloc *iloc)
5218{
5219	struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
5220	struct ext4_inode_info *ei = EXT4_I(inode);
5221	struct buffer_head *bh = iloc->bh;
5222	struct super_block *sb = inode->i_sb;
5223	int err = 0, rc, block;
5224	int need_datasync = 0, set_large_file = 0;
5225	uid_t i_uid;
5226	gid_t i_gid;
5227	projid_t i_projid;
5228
5229	spin_lock(&ei->i_raw_lock);
5230
5231	/* For fields not tracked in the in-memory inode,
5232	 * initialise them to zero for new inodes. */
5233	if (ext4_test_inode_state(inode, EXT4_STATE_NEW))
5234		memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
5235
 
5236	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
5237	i_uid = i_uid_read(inode);
5238	i_gid = i_gid_read(inode);
5239	i_projid = from_kprojid(&init_user_ns, ei->i_projid);
5240	if (!(test_opt(inode->i_sb, NO_UID32))) {
5241		raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid));
5242		raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid));
5243/*
5244 * Fix up interoperability with old kernels. Otherwise, old inodes get
5245 * re-used with the upper 16 bits of the uid/gid intact
5246 */
5247		if (ei->i_dtime && list_empty(&ei->i_orphan)) {
5248			raw_inode->i_uid_high = 0;
5249			raw_inode->i_gid_high = 0;
5250		} else {
5251			raw_inode->i_uid_high =
5252				cpu_to_le16(high_16_bits(i_uid));
5253			raw_inode->i_gid_high =
5254				cpu_to_le16(high_16_bits(i_gid));
 
 
 
5255		}
5256	} else {
5257		raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid));
5258		raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(i_gid));
5259		raw_inode->i_uid_high = 0;
5260		raw_inode->i_gid_high = 0;
5261	}
5262	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
5263
5264	EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
5265	EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
5266	EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
5267	EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
5268
5269	err = ext4_inode_blocks_set(handle, raw_inode, ei);
5270	if (err) {
5271		spin_unlock(&ei->i_raw_lock);
5272		goto out_brelse;
5273	}
5274	raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
5275	raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF);
5276	if (likely(!test_opt2(inode->i_sb, HURD_COMPAT)))
 
5277		raw_inode->i_file_acl_high =
5278			cpu_to_le16(ei->i_file_acl >> 32);
5279	raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
5280	if (ei->i_disksize != ext4_isize(inode->i_sb, raw_inode)) {
5281		ext4_isize_set(raw_inode, ei->i_disksize);
5282		need_datasync = 1;
5283	}
5284	if (ei->i_disksize > 0x7fffffffULL) {
5285		if (!ext4_has_feature_large_file(sb) ||
 
 
5286				EXT4_SB(sb)->s_es->s_rev_level ==
5287		    cpu_to_le32(EXT4_GOOD_OLD_REV))
5288			set_large_file = 1;
 
 
 
 
 
 
 
 
 
 
 
 
5289	}
5290	raw_inode->i_generation = cpu_to_le32(inode->i_generation);
5291	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
5292		if (old_valid_dev(inode->i_rdev)) {
5293			raw_inode->i_block[0] =
5294				cpu_to_le32(old_encode_dev(inode->i_rdev));
5295			raw_inode->i_block[1] = 0;
5296		} else {
5297			raw_inode->i_block[0] = 0;
5298			raw_inode->i_block[1] =
5299				cpu_to_le32(new_encode_dev(inode->i_rdev));
5300			raw_inode->i_block[2] = 0;
5301		}
5302	} else if (!ext4_has_inline_data(inode)) {
5303		for (block = 0; block < EXT4_N_BLOCKS; block++)
5304			raw_inode->i_block[block] = ei->i_data[block];
5305	}
5306
5307	if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
5308		u64 ivers = ext4_inode_peek_iversion(inode);
5309
5310		raw_inode->i_disk_version = cpu_to_le32(ivers);
5311		if (ei->i_extra_isize) {
5312			if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
5313				raw_inode->i_version_hi =
5314					cpu_to_le32(ivers >> 32);
5315			raw_inode->i_extra_isize =
5316				cpu_to_le16(ei->i_extra_isize);
5317		}
5318	}
5319
5320	BUG_ON(!ext4_has_feature_project(inode->i_sb) &&
5321	       i_projid != EXT4_DEF_PROJID);
5322
5323	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
5324	    EXT4_FITS_IN_INODE(raw_inode, ei, i_projid))
5325		raw_inode->i_projid = cpu_to_le32(i_projid);
5326
5327	ext4_inode_csum_set(inode, raw_inode, ei);
5328	spin_unlock(&ei->i_raw_lock);
5329	if (inode->i_sb->s_flags & SB_LAZYTIME)
5330		ext4_update_other_inodes_time(inode->i_sb, inode->i_ino,
5331					      bh->b_data);
5332
5333	BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
5334	rc = ext4_handle_dirty_metadata(handle, NULL, bh);
5335	if (!err)
5336		err = rc;
5337	ext4_clear_inode_state(inode, EXT4_STATE_NEW);
5338	if (set_large_file) {
5339		BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get write access");
5340		err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
5341		if (err)
5342			goto out_brelse;
5343		ext4_set_feature_large_file(sb);
5344		ext4_handle_sync(handle);
5345		err = ext4_handle_dirty_super(handle, sb);
5346	}
5347	ext4_update_inode_fsync_trans(handle, inode, need_datasync);
5348out_brelse:
5349	brelse(bh);
5350	ext4_std_error(inode->i_sb, err);
5351	return err;
5352}
5353
5354/*
5355 * ext4_write_inode()
5356 *
5357 * We are called from a few places:
5358 *
5359 * - Within generic_file_aio_write() -> generic_write_sync() for O_SYNC files.
5360 *   Here, there will be no transaction running. We wait for any running
5361 *   transaction to commit.
5362 *
5363 * - Within flush work (sys_sync(), kupdate and such).
5364 *   We wait on commit, if told to.
5365 *
5366 * - Within iput_final() -> write_inode_now()
5367 *   We wait on commit, if told to.
 
5368 *
5369 * In all cases it is actually safe for us to return without doing anything,
5370 * because the inode has been copied into a raw inode buffer in
5371 * ext4_mark_inode_dirty().  This is a correctness thing for WB_SYNC_ALL
5372 * writeback.
5373 *
5374 * Note that we are absolutely dependent upon all inode dirtiers doing the
5375 * right thing: they *must* call mark_inode_dirty() after dirtying info in
5376 * which we are interested.
5377 *
5378 * It would be a bug for them to not do this.  The code:
5379 *
5380 *	mark_inode_dirty(inode)
5381 *	stuff();
5382 *	inode->i_size = expr;
5383 *
5384 * is in error because write_inode() could occur while `stuff()' is running,
5385 * and the new i_size will be lost.  Plus the inode will no longer be on the
5386 * superblock's dirty inode list.
5387 */
5388int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
5389{
5390	int err;
5391
5392	if (WARN_ON_ONCE(current->flags & PF_MEMALLOC) ||
5393	    sb_rdonly(inode->i_sb))
5394		return 0;
5395
5396	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
5397		return -EIO;
5398
5399	if (EXT4_SB(inode->i_sb)->s_journal) {
5400		if (ext4_journal_current_handle()) {
5401			jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
5402			dump_stack();
5403			return -EIO;
5404		}
5405
5406		/*
5407		 * No need to force transaction in WB_SYNC_NONE mode. Also
5408		 * ext4_sync_fs() will force the commit after everything is
5409		 * written.
5410		 */
5411		if (wbc->sync_mode != WB_SYNC_ALL || wbc->for_sync)
5412			return 0;
5413
5414		err = jbd2_complete_transaction(EXT4_SB(inode->i_sb)->s_journal,
5415						EXT4_I(inode)->i_sync_tid);
5416	} else {
5417		struct ext4_iloc iloc;
5418
5419		err = __ext4_get_inode_loc(inode, &iloc, 0);
5420		if (err)
5421			return err;
5422		/*
5423		 * sync(2) will flush the whole buffer cache. No need to do
5424		 * it here separately for each inode.
5425		 */
5426		if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
5427			sync_dirty_buffer(iloc.bh);
5428		if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
5429			EXT4_ERROR_INODE_BLOCK(inode, iloc.bh->b_blocknr,
5430					 "IO error syncing inode");
5431			err = -EIO;
5432		}
5433		brelse(iloc.bh);
5434	}
5435	return err;
5436}
5437
5438/*
5439 * In data=journal mode ext4_journalled_invalidatepage() may fail to invalidate
5440 * buffers that are attached to a page stradding i_size and are undergoing
5441 * commit. In that case we have to wait for commit to finish and try again.
5442 */
5443static void ext4_wait_for_tail_page_commit(struct inode *inode)
5444{
5445	struct page *page;
5446	unsigned offset;
5447	journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
5448	tid_t commit_tid = 0;
5449	int ret;
5450
5451	offset = inode->i_size & (PAGE_SIZE - 1);
5452	/*
5453	 * All buffers in the last page remain valid? Then there's nothing to
5454	 * do. We do the check mainly to optimize the common PAGE_SIZE ==
5455	 * blocksize case
5456	 */
5457	if (offset > PAGE_SIZE - i_blocksize(inode))
5458		return;
5459	while (1) {
5460		page = find_lock_page(inode->i_mapping,
5461				      inode->i_size >> PAGE_SHIFT);
5462		if (!page)
5463			return;
5464		ret = __ext4_journalled_invalidatepage(page, offset,
5465						PAGE_SIZE - offset);
5466		unlock_page(page);
5467		put_page(page);
5468		if (ret != -EBUSY)
5469			return;
5470		commit_tid = 0;
5471		read_lock(&journal->j_state_lock);
5472		if (journal->j_committing_transaction)
5473			commit_tid = journal->j_committing_transaction->t_tid;
5474		read_unlock(&journal->j_state_lock);
5475		if (commit_tid)
5476			jbd2_log_wait_commit(journal, commit_tid);
5477	}
5478}
5479
5480/*
5481 * ext4_setattr()
5482 *
5483 * Called from notify_change.
5484 *
5485 * We want to trap VFS attempts to truncate the file as soon as
5486 * possible.  In particular, we want to make sure that when the VFS
5487 * shrinks i_size, we put the inode on the orphan list and modify
5488 * i_disksize immediately, so that during the subsequent flushing of
5489 * dirty pages and freeing of disk blocks, we can guarantee that any
5490 * commit will leave the blocks being flushed in an unused state on
5491 * disk.  (On recovery, the inode will get truncated and the blocks will
5492 * be freed, so we have a strong guarantee that no future commit will
5493 * leave these blocks visible to the user.)
5494 *
5495 * Another thing we have to assure is that if we are in ordered mode
5496 * and inode is still attached to the committing transaction, we must
5497 * we start writeout of all the dirty pages which are being truncated.
5498 * This way we are sure that all the data written in the previous
5499 * transaction are already on disk (truncate waits for pages under
5500 * writeback).
5501 *
5502 * Called with inode->i_mutex down.
5503 */
5504int ext4_setattr(struct dentry *dentry, struct iattr *attr)
5505{
5506	struct inode *inode = d_inode(dentry);
5507	int error, rc = 0;
5508	int orphan = 0;
5509	const unsigned int ia_valid = attr->ia_valid;
5510
5511	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
5512		return -EIO;
5513
5514	if (unlikely(IS_IMMUTABLE(inode)))
5515		return -EPERM;
5516
5517	if (unlikely(IS_APPEND(inode) &&
5518		     (ia_valid & (ATTR_MODE | ATTR_UID |
5519				  ATTR_GID | ATTR_TIMES_SET))))
5520		return -EPERM;
5521
5522	error = setattr_prepare(dentry, attr);
5523	if (error)
5524		return error;
5525
5526	error = fscrypt_prepare_setattr(dentry, attr);
5527	if (error)
5528		return error;
5529
5530	error = fsverity_prepare_setattr(dentry, attr);
5531	if (error)
5532		return error;
5533
5534	if (is_quota_modification(inode, attr)) {
5535		error = dquot_initialize(inode);
5536		if (error)
5537			return error;
5538	}
5539	if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
5540	    (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
5541		handle_t *handle;
5542
5543		/* (user+group)*(old+new) structure, inode write (sb,
5544		 * inode block, ? - but truncate inode update has it) */
5545		handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
5546			(EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb) +
5547			 EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)) + 3);
5548		if (IS_ERR(handle)) {
5549			error = PTR_ERR(handle);
5550			goto err_out;
5551		}
5552
5553		/* dquot_transfer() calls back ext4_get_inode_usage() which
5554		 * counts xattr inode references.
5555		 */
5556		down_read(&EXT4_I(inode)->xattr_sem);
5557		error = dquot_transfer(inode, attr);
5558		up_read(&EXT4_I(inode)->xattr_sem);
5559
5560		if (error) {
5561			ext4_journal_stop(handle);
5562			return error;
5563		}
5564		/* Update corresponding info in inode so that everything is in
5565		 * one transaction */
5566		if (attr->ia_valid & ATTR_UID)
5567			inode->i_uid = attr->ia_uid;
5568		if (attr->ia_valid & ATTR_GID)
5569			inode->i_gid = attr->ia_gid;
5570		error = ext4_mark_inode_dirty(handle, inode);
5571		ext4_journal_stop(handle);
5572	}
5573
5574	if (attr->ia_valid & ATTR_SIZE) {
5575		handle_t *handle;
5576		loff_t oldsize = inode->i_size;
5577		int shrink = (attr->ia_size < inode->i_size);
5578
5579		if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
5580			struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5581
5582			if (attr->ia_size > sbi->s_bitmap_maxbytes)
5583				return -EFBIG;
5584		}
5585		if (!S_ISREG(inode->i_mode))
5586			return -EINVAL;
 
 
 
 
5587
5588		if (IS_I_VERSION(inode) && attr->ia_size != inode->i_size)
5589			inode_inc_iversion(inode);
 
 
 
 
 
 
 
 
 
 
 
 
5590
5591		if (shrink) {
5592			if (ext4_should_order_data(inode)) {
5593				error = ext4_begin_ordered_truncate(inode,
5594							    attr->ia_size);
5595				if (error)
 
 
 
 
5596					goto err_out;
 
 
 
 
 
5597			}
5598			/*
5599			 * Blocks are going to be removed from the inode. Wait
5600			 * for dio in flight.
5601			 */
5602			inode_dio_wait(inode);
5603		}
 
5604
5605		down_write(&EXT4_I(inode)->i_mmap_sem);
5606
5607		rc = ext4_break_layouts(inode);
5608		if (rc) {
5609			up_write(&EXT4_I(inode)->i_mmap_sem);
5610			return rc;
5611		}
5612
5613		if (attr->ia_size != inode->i_size) {
5614			handle = ext4_journal_start(inode, EXT4_HT_INODE, 3);
5615			if (IS_ERR(handle)) {
5616				error = PTR_ERR(handle);
5617				goto out_mmap_sem;
5618			}
5619			if (ext4_handle_valid(handle) && shrink) {
5620				error = ext4_orphan_add(handle, inode);
5621				orphan = 1;
5622			}
5623			/*
5624			 * Update c/mtime on truncate up, ext4_truncate() will
5625			 * update c/mtime in shrink case below
5626			 */
5627			if (!shrink) {
5628				inode->i_mtime = current_time(inode);
5629				inode->i_ctime = inode->i_mtime;
5630			}
5631			down_write(&EXT4_I(inode)->i_data_sem);
5632			EXT4_I(inode)->i_disksize = attr->ia_size;
5633			rc = ext4_mark_inode_dirty(handle, inode);
5634			if (!error)
5635				error = rc;
5636			/*
5637			 * We have to update i_size under i_data_sem together
5638			 * with i_disksize to avoid races with writeback code
5639			 * running ext4_wb_update_i_disksize().
5640			 */
5641			if (!error)
5642				i_size_write(inode, attr->ia_size);
5643			up_write(&EXT4_I(inode)->i_data_sem);
5644			ext4_journal_stop(handle);
5645			if (error)
5646				goto out_mmap_sem;
5647			if (!shrink) {
5648				pagecache_isize_extended(inode, oldsize,
5649							 inode->i_size);
5650			} else if (ext4_should_journal_data(inode)) {
5651				ext4_wait_for_tail_page_commit(inode);
5652			}
5653		}
5654
5655		/*
5656		 * Truncate pagecache after we've waited for commit
5657		 * in data=journal mode to make pages freeable.
5658		 */
5659		truncate_pagecache(inode, inode->i_size);
5660		/*
5661		 * Call ext4_truncate() even if i_size didn't change to
5662		 * truncate possible preallocated blocks.
5663		 */
5664		if (attr->ia_size <= oldsize) {
5665			rc = ext4_truncate(inode);
5666			if (rc)
5667				error = rc;
5668		}
5669out_mmap_sem:
5670		up_write(&EXT4_I(inode)->i_mmap_sem);
5671	}
5672
5673	if (!error) {
5674		setattr_copy(inode, attr);
5675		mark_inode_dirty(inode);
5676	}
5677
5678	/*
5679	 * If the call to ext4_truncate failed to get a transaction handle at
5680	 * all, we need to clean up the in-core orphan list manually.
5681	 */
5682	if (orphan && inode->i_nlink)
5683		ext4_orphan_del(NULL, inode);
5684
5685	if (!error && (ia_valid & ATTR_MODE))
5686		rc = posix_acl_chmod(inode, inode->i_mode);
5687
5688err_out:
5689	ext4_std_error(inode->i_sb, error);
5690	if (!error)
5691		error = rc;
5692	return error;
5693}
5694
5695int ext4_getattr(const struct path *path, struct kstat *stat,
5696		 u32 request_mask, unsigned int query_flags)
5697{
5698	struct inode *inode = d_inode(path->dentry);
5699	struct ext4_inode *raw_inode;
5700	struct ext4_inode_info *ei = EXT4_I(inode);
5701	unsigned int flags;
5702
5703	if (EXT4_FITS_IN_INODE(raw_inode, ei, i_crtime)) {
5704		stat->result_mask |= STATX_BTIME;
5705		stat->btime.tv_sec = ei->i_crtime.tv_sec;
5706		stat->btime.tv_nsec = ei->i_crtime.tv_nsec;
5707	}
5708
5709	flags = ei->i_flags & EXT4_FL_USER_VISIBLE;
5710	if (flags & EXT4_APPEND_FL)
5711		stat->attributes |= STATX_ATTR_APPEND;
5712	if (flags & EXT4_COMPR_FL)
5713		stat->attributes |= STATX_ATTR_COMPRESSED;
5714	if (flags & EXT4_ENCRYPT_FL)
5715		stat->attributes |= STATX_ATTR_ENCRYPTED;
5716	if (flags & EXT4_IMMUTABLE_FL)
5717		stat->attributes |= STATX_ATTR_IMMUTABLE;
5718	if (flags & EXT4_NODUMP_FL)
5719		stat->attributes |= STATX_ATTR_NODUMP;
5720
5721	stat->attributes_mask |= (STATX_ATTR_APPEND |
5722				  STATX_ATTR_COMPRESSED |
5723				  STATX_ATTR_ENCRYPTED |
5724				  STATX_ATTR_IMMUTABLE |
5725				  STATX_ATTR_NODUMP);
5726
 
5727	generic_fillattr(inode, stat);
5728	return 0;
5729}
5730
5731int ext4_file_getattr(const struct path *path, struct kstat *stat,
5732		      u32 request_mask, unsigned int query_flags)
5733{
5734	struct inode *inode = d_inode(path->dentry);
5735	u64 delalloc_blocks;
5736
5737	ext4_getattr(path, stat, request_mask, query_flags);
5738
5739	/*
5740	 * If there is inline data in the inode, the inode will normally not
5741	 * have data blocks allocated (it may have an external xattr block).
5742	 * Report at least one sector for such files, so tools like tar, rsync,
5743	 * others don't incorrectly think the file is completely sparse.
5744	 */
5745	if (unlikely(ext4_has_inline_data(inode)))
5746		stat->blocks += (stat->size + 511) >> 9;
5747
5748	/*
5749	 * We can't update i_blocks if the block allocation is delayed
5750	 * otherwise in the case of system crash before the real block
5751	 * allocation is done, we will have i_blocks inconsistent with
5752	 * on-disk file blocks.
5753	 * We always keep i_blocks updated together with real
5754	 * allocation. But to not confuse with user, stat
5755	 * will return the blocks that include the delayed allocation
5756	 * blocks for this file.
5757	 */
5758	delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb),
5759				   EXT4_I(inode)->i_reserved_data_blocks);
5760	stat->blocks += delalloc_blocks << (inode->i_sb->s_blocksize_bits - 9);
 
5761	return 0;
5762}
5763
5764static int ext4_index_trans_blocks(struct inode *inode, int lblocks,
5765				   int pextents)
5766{
5767	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
5768		return ext4_ind_trans_blocks(inode, lblocks);
5769	return ext4_ext_index_trans_blocks(inode, pextents);
5770}
5771
5772/*
5773 * Account for index blocks, block groups bitmaps and block group
5774 * descriptor blocks if modify datablocks and index blocks
5775 * worse case, the indexs blocks spread over different block groups
5776 *
5777 * If datablocks are discontiguous, they are possible to spread over
5778 * different block groups too. If they are contiguous, with flexbg,
5779 * they could still across block group boundary.
5780 *
5781 * Also account for superblock, inode, quota and xattr blocks
5782 */
5783static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
5784				  int pextents)
5785{
5786	ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb);
5787	int gdpblocks;
5788	int idxblocks;
5789	int ret = 0;
5790
5791	/*
5792	 * How many index blocks need to touch to map @lblocks logical blocks
5793	 * to @pextents physical extents?
 
 
 
 
5794	 */
5795	idxblocks = ext4_index_trans_blocks(inode, lblocks, pextents);
5796
5797	ret = idxblocks;
5798
5799	/*
5800	 * Now let's see how many group bitmaps and group descriptors need
5801	 * to account
5802	 */
5803	groups = idxblocks + pextents;
 
 
 
 
 
5804	gdpblocks = groups;
5805	if (groups > ngroups)
5806		groups = ngroups;
5807	if (groups > EXT4_SB(inode->i_sb)->s_gdb_count)
5808		gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count;
5809
5810	/* bitmaps and block group descriptor blocks */
5811	ret += groups + gdpblocks;
5812
5813	/* Blocks for super block, inode, quota and xattr blocks */
5814	ret += EXT4_META_TRANS_BLOCKS(inode->i_sb);
5815
5816	return ret;
5817}
5818
5819/*
5820 * Calculate the total number of credits to reserve to fit
5821 * the modification of a single pages into a single transaction,
5822 * which may include multiple chunks of block allocations.
5823 *
5824 * This could be called via ext4_write_begin()
5825 *
5826 * We need to consider the worse case, when
5827 * one new block per extent.
5828 */
5829int ext4_writepage_trans_blocks(struct inode *inode)
5830{
5831	int bpp = ext4_journal_blocks_per_page(inode);
5832	int ret;
5833
5834	ret = ext4_meta_trans_blocks(inode, bpp, bpp);
5835
5836	/* Account for data blocks for journalled mode */
5837	if (ext4_should_journal_data(inode))
5838		ret += bpp;
5839	return ret;
5840}
5841
5842/*
5843 * Calculate the journal credits for a chunk of data modification.
5844 *
5845 * This is called from DIO, fallocate or whoever calling
5846 * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks.
5847 *
5848 * journal buffers for data blocks are not included here, as DIO
5849 * and fallocate do no need to journal data buffers.
5850 */
5851int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks)
5852{
5853	return ext4_meta_trans_blocks(inode, nrblocks, 1);
5854}
5855
5856/*
5857 * The caller must have previously called ext4_reserve_inode_write().
5858 * Give this, we know that the caller already has write access to iloc->bh.
5859 */
5860int ext4_mark_iloc_dirty(handle_t *handle,
5861			 struct inode *inode, struct ext4_iloc *iloc)
5862{
5863	int err = 0;
5864
5865	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) {
5866		put_bh(iloc->bh);
5867		return -EIO;
5868	}
5869	if (IS_I_VERSION(inode))
5870		inode_inc_iversion(inode);
5871
5872	/* the do_update_inode consumes one bh->b_count */
5873	get_bh(iloc->bh);
5874
5875	/* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
5876	err = ext4_do_update_inode(handle, inode, iloc);
5877	put_bh(iloc->bh);
5878	return err;
5879}
5880
5881/*
5882 * On success, We end up with an outstanding reference count against
5883 * iloc->bh.  This _must_ be cleaned up later.
5884 */
5885
5886int
5887ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
5888			 struct ext4_iloc *iloc)
5889{
5890	int err;
5891
5892	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
5893		return -EIO;
5894
5895	err = ext4_get_inode_loc(inode, iloc);
5896	if (!err) {
5897		BUFFER_TRACE(iloc->bh, "get_write_access");
5898		err = ext4_journal_get_write_access(handle, iloc->bh);
5899		if (err) {
5900			brelse(iloc->bh);
5901			iloc->bh = NULL;
5902		}
5903	}
5904	ext4_std_error(inode->i_sb, err);
5905	return err;
5906}
5907
5908static int __ext4_expand_extra_isize(struct inode *inode,
5909				     unsigned int new_extra_isize,
5910				     struct ext4_iloc *iloc,
5911				     handle_t *handle, int *no_expand)
 
 
 
 
5912{
5913	struct ext4_inode *raw_inode;
5914	struct ext4_xattr_ibody_header *header;
5915	int error;
5916
5917	raw_inode = ext4_raw_inode(iloc);
 
 
 
5918
5919	header = IHDR(inode, raw_inode);
5920
5921	/* No extended attributes present */
5922	if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
5923	    header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
5924		memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE +
5925		       EXT4_I(inode)->i_extra_isize, 0,
5926		       new_extra_isize - EXT4_I(inode)->i_extra_isize);
5927		EXT4_I(inode)->i_extra_isize = new_extra_isize;
5928		return 0;
5929	}
5930
5931	/* try to expand with EAs present */
5932	error = ext4_expand_extra_isize_ea(inode, new_extra_isize,
5933					   raw_inode, handle);
5934	if (error) {
5935		/*
5936		 * Inode size expansion failed; don't try again
5937		 */
5938		*no_expand = 1;
5939	}
5940
5941	return error;
5942}
5943
5944/*
5945 * Expand an inode by new_extra_isize bytes.
5946 * Returns 0 on success or negative error number on failure.
5947 */
5948static int ext4_try_to_expand_extra_isize(struct inode *inode,
5949					  unsigned int new_extra_isize,
5950					  struct ext4_iloc iloc,
5951					  handle_t *handle)
5952{
5953	int no_expand;
5954	int error;
5955
5956	if (ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND))
5957		return -EOVERFLOW;
5958
5959	/*
5960	 * In nojournal mode, we can immediately attempt to expand
5961	 * the inode.  When journaled, we first need to obtain extra
5962	 * buffer credits since we may write into the EA block
5963	 * with this same handle. If journal_extend fails, then it will
5964	 * only result in a minor loss of functionality for that inode.
5965	 * If this is felt to be critical, then e2fsck should be run to
5966	 * force a large enough s_min_extra_isize.
5967	 */
5968	if (ext4_handle_valid(handle) &&
5969	    jbd2_journal_extend(handle,
5970				EXT4_DATA_TRANS_BLOCKS(inode->i_sb)) != 0)
5971		return -ENOSPC;
5972
5973	if (ext4_write_trylock_xattr(inode, &no_expand) == 0)
5974		return -EBUSY;
5975
5976	error = __ext4_expand_extra_isize(inode, new_extra_isize, &iloc,
5977					  handle, &no_expand);
5978	ext4_write_unlock_xattr(inode, &no_expand);
5979
5980	return error;
5981}
5982
5983int ext4_expand_extra_isize(struct inode *inode,
5984			    unsigned int new_extra_isize,
5985			    struct ext4_iloc *iloc)
5986{
5987	handle_t *handle;
5988	int no_expand;
5989	int error, rc;
5990
5991	if (ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
5992		brelse(iloc->bh);
5993		return -EOVERFLOW;
5994	}
5995
5996	handle = ext4_journal_start(inode, EXT4_HT_INODE,
5997				    EXT4_DATA_TRANS_BLOCKS(inode->i_sb));
5998	if (IS_ERR(handle)) {
5999		error = PTR_ERR(handle);
6000		brelse(iloc->bh);
6001		return error;
6002	}
6003
6004	ext4_write_lock_xattr(inode, &no_expand);
6005
6006	BUFFER_TRACE(iloc->bh, "get_write_access");
6007	error = ext4_journal_get_write_access(handle, iloc->bh);
6008	if (error) {
6009		brelse(iloc->bh);
6010		goto out_stop;
6011	}
6012
6013	error = __ext4_expand_extra_isize(inode, new_extra_isize, iloc,
6014					  handle, &no_expand);
6015
6016	rc = ext4_mark_iloc_dirty(handle, inode, iloc);
6017	if (!error)
6018		error = rc;
6019
6020	ext4_write_unlock_xattr(inode, &no_expand);
6021out_stop:
6022	ext4_journal_stop(handle);
6023	return error;
6024}
6025
6026/*
6027 * What we do here is to mark the in-core inode as clean with respect to inode
6028 * dirtiness (it may still be data-dirty).
6029 * This means that the in-core inode may be reaped by prune_icache
6030 * without having to perform any I/O.  This is a very good thing,
6031 * because *any* task may call prune_icache - even ones which
6032 * have a transaction open against a different journal.
6033 *
6034 * Is this cheating?  Not really.  Sure, we haven't written the
6035 * inode out, but prune_icache isn't a user-visible syncing function.
6036 * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
6037 * we start and wait on commits.
 
 
 
 
 
 
 
 
6038 */
6039int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
6040{
6041	struct ext4_iloc iloc;
6042	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
6043	int err;
 
6044
6045	might_sleep();
6046	trace_ext4_mark_inode_dirty(inode, _RET_IP_);
6047	err = ext4_reserve_inode_write(handle, inode, &iloc);
6048	if (err)
6049		return err;
6050
6051	if (EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize)
6052		ext4_try_to_expand_extra_isize(inode, sbi->s_want_extra_isize,
6053					       iloc, handle);
6054
6055	return ext4_mark_iloc_dirty(handle, inode, &iloc);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6056}
6057
6058/*
6059 * ext4_dirty_inode() is called from __mark_inode_dirty()
6060 *
6061 * We're really interested in the case where a file is being extended.
6062 * i_size has been changed by generic_commit_write() and we thus need
6063 * to include the updated inode in the current transaction.
6064 *
6065 * Also, dquot_alloc_block() will always dirty the inode when blocks
6066 * are allocated to the file.
6067 *
6068 * If the inode is marked synchronous, we don't honour that here - doing
6069 * so would cause a commit on atime updates, which we don't bother doing.
6070 * We handle synchronous inodes at the highest possible level.
6071 *
6072 * If only the I_DIRTY_TIME flag is set, we can skip everything.  If
6073 * I_DIRTY_TIME and I_DIRTY_SYNC is set, the only inode fields we need
6074 * to copy into the on-disk inode structure are the timestamp files.
6075 */
6076void ext4_dirty_inode(struct inode *inode, int flags)
6077{
6078	handle_t *handle;
6079
6080	if (flags == I_DIRTY_TIME)
6081		return;
6082	handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
6083	if (IS_ERR(handle))
6084		goto out;
6085
6086	ext4_mark_inode_dirty(handle, inode);
6087
6088	ext4_journal_stop(handle);
6089out:
6090	return;
6091}
6092
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6093int ext4_change_inode_journal_flag(struct inode *inode, int val)
6094{
6095	journal_t *journal;
6096	handle_t *handle;
6097	int err;
6098	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
6099
6100	/*
6101	 * We have to be very careful here: changing a data block's
6102	 * journaling status dynamically is dangerous.  If we write a
6103	 * data block to the journal, change the status and then delete
6104	 * that block, we risk forgetting to revoke the old log record
6105	 * from the journal and so a subsequent replay can corrupt data.
6106	 * So, first we make sure that the journal is empty and that
6107	 * nobody is changing anything.
6108	 */
6109
6110	journal = EXT4_JOURNAL(inode);
6111	if (!journal)
6112		return 0;
6113	if (is_journal_aborted(journal))
6114		return -EROFS;
6115
6116	/* Wait for all existing dio workers */
6117	inode_dio_wait(inode);
6118
6119	/*
6120	 * Before flushing the journal and switching inode's aops, we have
6121	 * to flush all dirty data the inode has. There can be outstanding
6122	 * delayed allocations, there can be unwritten extents created by
6123	 * fallocate or buffered writes in dioread_nolock mode covered by
6124	 * dirty data which can be converted only after flushing the dirty
6125	 * data (and journalled aops don't know how to handle these cases).
6126	 */
6127	if (val) {
6128		down_write(&EXT4_I(inode)->i_mmap_sem);
6129		err = filemap_write_and_wait(inode->i_mapping);
6130		if (err < 0) {
6131			up_write(&EXT4_I(inode)->i_mmap_sem);
6132			return err;
6133		}
6134	}
6135
6136	percpu_down_write(&sbi->s_journal_flag_rwsem);
6137	jbd2_journal_lock_updates(journal);
6138
6139	/*
6140	 * OK, there are no updates running now, and all cached data is
6141	 * synced to disk.  We are now in a completely consistent state
6142	 * which doesn't have anything in the journal, and we know that
6143	 * no filesystem updates are running, so it is safe to modify
6144	 * the inode's in-core data-journaling state flag now.
6145	 */
6146
6147	if (val)
6148		ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
6149	else {
6150		err = jbd2_journal_flush(journal);
6151		if (err < 0) {
6152			jbd2_journal_unlock_updates(journal);
6153			percpu_up_write(&sbi->s_journal_flag_rwsem);
6154			return err;
6155		}
6156		ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
6157	}
6158	ext4_set_aops(inode);
6159
6160	jbd2_journal_unlock_updates(journal);
6161	percpu_up_write(&sbi->s_journal_flag_rwsem);
6162
6163	if (val)
6164		up_write(&EXT4_I(inode)->i_mmap_sem);
6165
6166	/* Finally we can mark the inode as dirty. */
6167
6168	handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
6169	if (IS_ERR(handle))
6170		return PTR_ERR(handle);
6171
6172	err = ext4_mark_inode_dirty(handle, inode);
6173	ext4_handle_sync(handle);
6174	ext4_journal_stop(handle);
6175	ext4_std_error(inode->i_sb, err);
6176
6177	return err;
6178}
6179
6180static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh)
6181{
6182	return !buffer_mapped(bh);
6183}
6184
6185vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf)
6186{
6187	struct vm_area_struct *vma = vmf->vma;
6188	struct page *page = vmf->page;
6189	loff_t size;
6190	unsigned long len;
6191	int err;
6192	vm_fault_t ret;
6193	struct file *file = vma->vm_file;
6194	struct inode *inode = file_inode(file);
6195	struct address_space *mapping = inode->i_mapping;
6196	handle_t *handle;
6197	get_block_t *get_block;
6198	int retries = 0;
6199
6200	if (unlikely(IS_IMMUTABLE(inode)))
6201		return VM_FAULT_SIGBUS;
6202
6203	sb_start_pagefault(inode->i_sb);
6204	file_update_time(vma->vm_file);
6205
6206	down_read(&EXT4_I(inode)->i_mmap_sem);
6207
6208	err = ext4_convert_inline_data(inode);
6209	if (err)
6210		goto out_ret;
6211
6212	/* Delalloc case is easy... */
6213	if (test_opt(inode->i_sb, DELALLOC) &&
6214	    !ext4_should_journal_data(inode) &&
6215	    !ext4_nonda_switch(inode->i_sb)) {
6216		do {
6217			err = block_page_mkwrite(vma, vmf,
6218						   ext4_da_get_block_prep);
6219		} while (err == -ENOSPC &&
6220		       ext4_should_retry_alloc(inode->i_sb, &retries));
6221		goto out_ret;
6222	}
6223
6224	lock_page(page);
6225	size = i_size_read(inode);
6226	/* Page got truncated from under us? */
6227	if (page->mapping != mapping || page_offset(page) > size) {
6228		unlock_page(page);
6229		ret = VM_FAULT_NOPAGE;
6230		goto out;
6231	}
6232
6233	if (page->index == size >> PAGE_SHIFT)
6234		len = size & ~PAGE_MASK;
6235	else
6236		len = PAGE_SIZE;
6237	/*
6238	 * Return if we have all the buffers mapped. This avoids the need to do
6239	 * journal_start/journal_stop which can block and take a long time
6240	 */
6241	if (page_has_buffers(page)) {
6242		if (!ext4_walk_page_buffers(NULL, page_buffers(page),
6243					    0, len, NULL,
6244					    ext4_bh_unmapped)) {
6245			/* Wait so that we don't change page under IO */
6246			wait_for_stable_page(page);
6247			ret = VM_FAULT_LOCKED;
6248			goto out;
6249		}
6250	}
6251	unlock_page(page);
6252	/* OK, we need to fill the hole... */
6253	if (ext4_should_dioread_nolock(inode))
6254		get_block = ext4_get_block_unwritten;
6255	else
6256		get_block = ext4_get_block;
6257retry_alloc:
6258	handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
6259				    ext4_writepage_trans_blocks(inode));
6260	if (IS_ERR(handle)) {
6261		ret = VM_FAULT_SIGBUS;
6262		goto out;
6263	}
6264	err = block_page_mkwrite(vma, vmf, get_block);
6265	if (!err && ext4_should_journal_data(inode)) {
6266		if (ext4_walk_page_buffers(handle, page_buffers(page), 0,
6267			  PAGE_SIZE, NULL, do_journal_get_write_access)) {
6268			unlock_page(page);
6269			ret = VM_FAULT_SIGBUS;
6270			ext4_journal_stop(handle);
6271			goto out;
6272		}
6273		ext4_set_inode_state(inode, EXT4_STATE_JDATA);
6274	}
6275	ext4_journal_stop(handle);
6276	if (err == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
6277		goto retry_alloc;
6278out_ret:
6279	ret = block_page_mkwrite_return(err);
6280out:
6281	up_read(&EXT4_I(inode)->i_mmap_sem);
6282	sb_end_pagefault(inode->i_sb);
6283	return ret;
6284}
6285
6286vm_fault_t ext4_filemap_fault(struct vm_fault *vmf)
6287{
6288	struct inode *inode = file_inode(vmf->vma->vm_file);
6289	vm_fault_t ret;
6290
6291	down_read(&EXT4_I(inode)->i_mmap_sem);
6292	ret = filemap_fault(vmf);
6293	up_read(&EXT4_I(inode)->i_mmap_sem);
6294
6295	return ret;
6296}