Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  linux/fs/ext4/inode.c
   4 *
   5 * Copyright (C) 1992, 1993, 1994, 1995
   6 * Remy Card (card@masi.ibp.fr)
   7 * Laboratoire MASI - Institut Blaise Pascal
   8 * Universite Pierre et Marie Curie (Paris VI)
   9 *
  10 *  from
  11 *
  12 *  linux/fs/minix/inode.c
  13 *
  14 *  Copyright (C) 1991, 1992  Linus Torvalds
  15 *
  16 *  64-bit file support on 64-bit platforms by Jakub Jelinek
  17 *	(jj@sunsite.ms.mff.cuni.cz)
  18 *
  19 *  Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
  20 */
  21
  22#include <linux/fs.h>
 
  23#include <linux/time.h>
  24#include <linux/highuid.h>
  25#include <linux/pagemap.h>
  26#include <linux/dax.h>
  27#include <linux/quotaops.h>
  28#include <linux/string.h>
  29#include <linux/buffer_head.h>
  30#include <linux/writeback.h>
  31#include <linux/pagevec.h>
  32#include <linux/mpage.h>
  33#include <linux/namei.h>
  34#include <linux/uio.h>
  35#include <linux/bio.h>
  36#include <linux/workqueue.h>
  37#include <linux/kernel.h>
  38#include <linux/printk.h>
  39#include <linux/slab.h>
  40#include <linux/bitops.h>
  41#include <linux/iomap.h>
  42#include <linux/iversion.h>
  43
  44#include "ext4_jbd2.h"
  45#include "xattr.h"
  46#include "acl.h"
  47#include "truncate.h"
  48
  49#include <trace/events/ext4.h>
  50
  51#define MPAGE_DA_EXTENT_TAIL 0x01
  52
  53static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
  54			      struct ext4_inode_info *ei)
  55{
  56	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  57	__u32 csum;
  58	__u16 dummy_csum = 0;
  59	int offset = offsetof(struct ext4_inode, i_checksum_lo);
  60	unsigned int csum_size = sizeof(dummy_csum);
  61
  62	csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw, offset);
  63	csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, csum_size);
  64	offset += csum_size;
  65	csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
  66			   EXT4_GOOD_OLD_INODE_SIZE - offset);
  67
  68	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
  69		offset = offsetof(struct ext4_inode, i_checksum_hi);
  70		csum = ext4_chksum(sbi, csum, (__u8 *)raw +
  71				   EXT4_GOOD_OLD_INODE_SIZE,
  72				   offset - EXT4_GOOD_OLD_INODE_SIZE);
  73		if (EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) {
  74			csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum,
  75					   csum_size);
  76			offset += csum_size;
  77		}
  78		csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
  79				   EXT4_INODE_SIZE(inode->i_sb) - offset);
  80	}
  81
  82	return csum;
  83}
  84
  85static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw,
  86				  struct ext4_inode_info *ei)
  87{
  88	__u32 provided, calculated;
  89
  90	if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
  91	    cpu_to_le32(EXT4_OS_LINUX) ||
  92	    !ext4_has_metadata_csum(inode->i_sb))
  93		return 1;
  94
  95	provided = le16_to_cpu(raw->i_checksum_lo);
  96	calculated = ext4_inode_csum(inode, raw, ei);
  97	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
  98	    EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
  99		provided |= ((__u32)le16_to_cpu(raw->i_checksum_hi)) << 16;
 100	else
 101		calculated &= 0xFFFF;
 102
 103	return provided == calculated;
 104}
 105
 106static void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw,
 107				struct ext4_inode_info *ei)
 108{
 109	__u32 csum;
 110
 111	if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
 112	    cpu_to_le32(EXT4_OS_LINUX) ||
 113	    !ext4_has_metadata_csum(inode->i_sb))
 114		return;
 115
 116	csum = ext4_inode_csum(inode, raw, ei);
 117	raw->i_checksum_lo = cpu_to_le16(csum & 0xFFFF);
 118	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
 119	    EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
 120		raw->i_checksum_hi = cpu_to_le16(csum >> 16);
 121}
 122
 123static inline int ext4_begin_ordered_truncate(struct inode *inode,
 124					      loff_t new_size)
 125{
 126	trace_ext4_begin_ordered_truncate(inode, new_size);
 127	/*
 128	 * If jinode is zero, then we never opened the file for
 129	 * writing, so there's no need to call
 130	 * jbd2_journal_begin_ordered_truncate() since there's no
 131	 * outstanding writes we need to flush.
 132	 */
 133	if (!EXT4_I(inode)->jinode)
 134		return 0;
 135	return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode),
 136						   EXT4_I(inode)->jinode,
 137						   new_size);
 138}
 139
 140static void ext4_invalidatepage(struct page *page, unsigned int offset,
 141				unsigned int length);
 142static int __ext4_journalled_writepage(struct page *page, unsigned int len);
 143static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh);
 144static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
 145				  int pextents);
 146
 147/*
 148 * Test whether an inode is a fast symlink.
 149 * A fast symlink has its symlink data stored in ext4_inode_info->i_data.
 150 */
 151int ext4_inode_is_fast_symlink(struct inode *inode)
 152{
 153	if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) {
 154		int ea_blocks = EXT4_I(inode)->i_file_acl ?
 155				EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0;
 156
 157		if (ext4_has_inline_data(inode))
 158			return 0;
 159
 160		return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
 161	}
 162	return S_ISLNK(inode->i_mode) && inode->i_size &&
 163	       (inode->i_size < EXT4_N_BLOCKS * 4);
 164}
 165
 166/*
 167 * Restart the transaction associated with *handle.  This does a commit,
 168 * so before we call here everything must be consistently dirtied against
 169 * this transaction.
 170 */
 171int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode,
 172				 int nblocks)
 173{
 174	int ret;
 175
 176	/*
 177	 * Drop i_data_sem to avoid deadlock with ext4_map_blocks.  At this
 178	 * moment, get_block can be called only for blocks inside i_size since
 179	 * page cache has been already dropped and writes are blocked by
 180	 * i_mutex. So we can safely drop the i_data_sem here.
 181	 */
 182	BUG_ON(EXT4_JOURNAL(inode) == NULL);
 183	jbd_debug(2, "restarting handle %p\n", handle);
 184	up_write(&EXT4_I(inode)->i_data_sem);
 185	ret = ext4_journal_restart(handle, nblocks);
 186	down_write(&EXT4_I(inode)->i_data_sem);
 187	ext4_discard_preallocations(inode);
 188
 189	return ret;
 190}
 191
 192/*
 193 * Called at the last iput() if i_nlink is zero.
 194 */
 195void ext4_evict_inode(struct inode *inode)
 196{
 197	handle_t *handle;
 198	int err;
 199	int extra_credits = 3;
 
 
 
 
 
 200	struct ext4_xattr_inode_array *ea_inode_array = NULL;
 
 201
 202	trace_ext4_evict_inode(inode);
 203
 
 
 204	if (inode->i_nlink) {
 205		/*
 206		 * When journalling data dirty buffers are tracked only in the
 207		 * journal. So although mm thinks everything is clean and
 208		 * ready for reaping the inode might still have some pages to
 209		 * write in the running transaction or waiting to be
 210		 * checkpointed. Thus calling jbd2_journal_invalidatepage()
 211		 * (via truncate_inode_pages()) to discard these buffers can
 212		 * cause data loss. Also even if we did not discard these
 213		 * buffers, we would have no way to find them after the inode
 214		 * is reaped and thus user could see stale data if he tries to
 215		 * read them before the transaction is checkpointed. So be
 216		 * careful and force everything to disk here... We use
 217		 * ei->i_datasync_tid to store the newest transaction
 218		 * containing inode's data.
 219		 *
 220		 * Note that directories do not have this problem because they
 221		 * don't use page cache.
 222		 */
 223		if (inode->i_ino != EXT4_JOURNAL_INO &&
 224		    ext4_should_journal_data(inode) &&
 225		    (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) &&
 226		    inode->i_data.nrpages) {
 227			journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
 228			tid_t commit_tid = EXT4_I(inode)->i_datasync_tid;
 229
 230			jbd2_complete_transaction(journal, commit_tid);
 231			filemap_write_and_wait(&inode->i_data);
 232		}
 233		truncate_inode_pages_final(&inode->i_data);
 234
 235		goto no_delete;
 236	}
 237
 238	if (is_bad_inode(inode))
 239		goto no_delete;
 240	dquot_initialize(inode);
 241
 242	if (ext4_should_order_data(inode))
 243		ext4_begin_ordered_truncate(inode, 0);
 244	truncate_inode_pages_final(&inode->i_data);
 245
 246	/*
 247	 * Protect us against freezing - iput() caller didn't have to have any
 248	 * protection against it
 
 
 
 249	 */
 250	sb_start_intwrite(inode->i_sb);
 
 
 
 
 
 
 
 
 
 
 
 
 251
 252	if (!IS_NOQUOTA(inode))
 253		extra_credits += EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb);
 254
 
 
 
 
 255	handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE,
 256				 ext4_blocks_for_truncate(inode)+extra_credits);
 257	if (IS_ERR(handle)) {
 258		ext4_std_error(inode->i_sb, PTR_ERR(handle));
 259		/*
 260		 * If we're going to skip the normal cleanup, we still need to
 261		 * make sure that the in-core orphan linked list is properly
 262		 * cleaned up.
 263		 */
 264		ext4_orphan_del(NULL, inode);
 265		sb_end_intwrite(inode->i_sb);
 
 266		goto no_delete;
 267	}
 268
 269	if (IS_SYNC(inode))
 270		ext4_handle_sync(handle);
 271
 272	/*
 273	 * Set inode->i_size to 0 before calling ext4_truncate(). We need
 274	 * special handling of symlinks here because i_size is used to
 275	 * determine whether ext4_inode_info->i_data contains symlink data or
 276	 * block mappings. Setting i_size to 0 will remove its fast symlink
 277	 * status. Erase i_data so that it becomes a valid empty block map.
 278	 */
 279	if (ext4_inode_is_fast_symlink(inode))
 280		memset(EXT4_I(inode)->i_data, 0, sizeof(EXT4_I(inode)->i_data));
 281	inode->i_size = 0;
 282	err = ext4_mark_inode_dirty(handle, inode);
 283	if (err) {
 284		ext4_warning(inode->i_sb,
 285			     "couldn't mark inode dirty (err %d)", err);
 286		goto stop_handle;
 287	}
 288	if (inode->i_blocks) {
 289		err = ext4_truncate(inode);
 290		if (err) {
 291			ext4_error(inode->i_sb,
 292				   "couldn't truncate inode %lu (err %d)",
 293				   inode->i_ino, err);
 294			goto stop_handle;
 295		}
 296	}
 297
 298	/* Remove xattr references. */
 299	err = ext4_xattr_delete_inode(handle, inode, &ea_inode_array,
 300				      extra_credits);
 301	if (err) {
 302		ext4_warning(inode->i_sb, "xattr delete (err %d)", err);
 303stop_handle:
 304		ext4_journal_stop(handle);
 305		ext4_orphan_del(NULL, inode);
 306		sb_end_intwrite(inode->i_sb);
 
 307		ext4_xattr_inode_array_free(ea_inode_array);
 308		goto no_delete;
 309	}
 310
 311	/*
 312	 * Kill off the orphan record which ext4_truncate created.
 313	 * AKPM: I think this can be inside the above `if'.
 314	 * Note that ext4_orphan_del() has to be able to cope with the
 315	 * deletion of a non-existent orphan - this is because we don't
 316	 * know if ext4_truncate() actually created an orphan record.
 317	 * (Well, we could do this if we need to, but heck - it works)
 318	 */
 319	ext4_orphan_del(handle, inode);
 320	EXT4_I(inode)->i_dtime	= (__u32)ktime_get_real_seconds();
 321
 322	/*
 323	 * One subtle ordering requirement: if anything has gone wrong
 324	 * (transaction abort, IO errors, whatever), then we can still
 325	 * do these next steps (the fs will already have been marked as
 326	 * having errors), but we can't free the inode if the mark_dirty
 327	 * fails.
 328	 */
 329	if (ext4_mark_inode_dirty(handle, inode))
 330		/* If that failed, just do the required in-core inode clear. */
 331		ext4_clear_inode(inode);
 332	else
 333		ext4_free_inode(handle, inode);
 334	ext4_journal_stop(handle);
 335	sb_end_intwrite(inode->i_sb);
 
 336	ext4_xattr_inode_array_free(ea_inode_array);
 337	return;
 338no_delete:
 
 
 
 
 
 
 
 
 339	ext4_clear_inode(inode);	/* We must guarantee clearing of inode... */
 340}
 341
 342#ifdef CONFIG_QUOTA
 343qsize_t *ext4_get_reserved_space(struct inode *inode)
 344{
 345	return &EXT4_I(inode)->i_reserved_quota;
 346}
 347#endif
 348
 349/*
 350 * Called with i_data_sem down, which is important since we can call
 351 * ext4_discard_preallocations() from here.
 352 */
 353void ext4_da_update_reserve_space(struct inode *inode,
 354					int used, int quota_claim)
 355{
 356	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
 357	struct ext4_inode_info *ei = EXT4_I(inode);
 358
 359	spin_lock(&ei->i_block_reservation_lock);
 360	trace_ext4_da_update_reserve_space(inode, used, quota_claim);
 361	if (unlikely(used > ei->i_reserved_data_blocks)) {
 362		ext4_warning(inode->i_sb, "%s: ino %lu, used %d "
 363			 "with only %d reserved data blocks",
 364			 __func__, inode->i_ino, used,
 365			 ei->i_reserved_data_blocks);
 366		WARN_ON(1);
 367		used = ei->i_reserved_data_blocks;
 368	}
 369
 370	/* Update per-inode reservations */
 371	ei->i_reserved_data_blocks -= used;
 372	percpu_counter_sub(&sbi->s_dirtyclusters_counter, used);
 373
 374	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
 375
 376	/* Update quota subsystem for data blocks */
 377	if (quota_claim)
 378		dquot_claim_block(inode, EXT4_C2B(sbi, used));
 379	else {
 380		/*
 381		 * We did fallocate with an offset that is already delayed
 382		 * allocated. So on delayed allocated writeback we should
 383		 * not re-claim the quota for fallocated blocks.
 384		 */
 385		dquot_release_reservation_block(inode, EXT4_C2B(sbi, used));
 386	}
 387
 388	/*
 389	 * If we have done all the pending block allocations and if
 390	 * there aren't any writers on the inode, we can discard the
 391	 * inode's preallocations.
 392	 */
 393	if ((ei->i_reserved_data_blocks == 0) &&
 394	    !inode_is_open_for_write(inode))
 395		ext4_discard_preallocations(inode);
 396}
 397
 398static int __check_block_validity(struct inode *inode, const char *func,
 399				unsigned int line,
 400				struct ext4_map_blocks *map)
 401{
 402	if (ext4_has_feature_journal(inode->i_sb) &&
 403	    (inode->i_ino ==
 404	     le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum)))
 405		return 0;
 406	if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk,
 407				   map->m_len)) {
 408		ext4_error_inode(inode, func, line, map->m_pblk,
 409				 "lblock %lu mapped to illegal pblock %llu "
 410				 "(length %d)", (unsigned long) map->m_lblk,
 411				 map->m_pblk, map->m_len);
 412		return -EFSCORRUPTED;
 413	}
 414	return 0;
 415}
 416
 417int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk,
 418		       ext4_lblk_t len)
 419{
 420	int ret;
 421
 422	if (IS_ENCRYPTED(inode))
 423		return fscrypt_zeroout_range(inode, lblk, pblk, len);
 424
 425	ret = sb_issue_zeroout(inode->i_sb, pblk, len, GFP_NOFS);
 426	if (ret > 0)
 427		ret = 0;
 428
 429	return ret;
 430}
 431
 432#define check_block_validity(inode, map)	\
 433	__check_block_validity((inode), __func__, __LINE__, (map))
 434
 435#ifdef ES_AGGRESSIVE_TEST
 436static void ext4_map_blocks_es_recheck(handle_t *handle,
 437				       struct inode *inode,
 438				       struct ext4_map_blocks *es_map,
 439				       struct ext4_map_blocks *map,
 440				       int flags)
 441{
 442	int retval;
 443
 444	map->m_flags = 0;
 445	/*
 446	 * There is a race window that the result is not the same.
 447	 * e.g. xfstests #223 when dioread_nolock enables.  The reason
 448	 * is that we lookup a block mapping in extent status tree with
 449	 * out taking i_data_sem.  So at the time the unwritten extent
 450	 * could be converted.
 451	 */
 452	down_read(&EXT4_I(inode)->i_data_sem);
 453	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
 454		retval = ext4_ext_map_blocks(handle, inode, map, flags &
 455					     EXT4_GET_BLOCKS_KEEP_SIZE);
 456	} else {
 457		retval = ext4_ind_map_blocks(handle, inode, map, flags &
 458					     EXT4_GET_BLOCKS_KEEP_SIZE);
 459	}
 460	up_read((&EXT4_I(inode)->i_data_sem));
 461
 462	/*
 463	 * We don't check m_len because extent will be collpased in status
 464	 * tree.  So the m_len might not equal.
 465	 */
 466	if (es_map->m_lblk != map->m_lblk ||
 467	    es_map->m_flags != map->m_flags ||
 468	    es_map->m_pblk != map->m_pblk) {
 469		printk("ES cache assertion failed for inode: %lu "
 470		       "es_cached ex [%d/%d/%llu/%x] != "
 471		       "found ex [%d/%d/%llu/%x] retval %d flags %x\n",
 472		       inode->i_ino, es_map->m_lblk, es_map->m_len,
 473		       es_map->m_pblk, es_map->m_flags, map->m_lblk,
 474		       map->m_len, map->m_pblk, map->m_flags,
 475		       retval, flags);
 476	}
 477}
 478#endif /* ES_AGGRESSIVE_TEST */
 479
 480/*
 481 * The ext4_map_blocks() function tries to look up the requested blocks,
 482 * and returns if the blocks are already mapped.
 483 *
 484 * Otherwise it takes the write lock of the i_data_sem and allocate blocks
 485 * and store the allocated blocks in the result buffer head and mark it
 486 * mapped.
 487 *
 488 * If file type is extents based, it will call ext4_ext_map_blocks(),
 489 * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
 490 * based files
 491 *
 492 * On success, it returns the number of blocks being mapped or allocated.  if
 493 * create==0 and the blocks are pre-allocated and unwritten, the resulting @map
 494 * is marked as unwritten. If the create == 1, it will mark @map as mapped.
 
 495 *
 496 * It returns 0 if plain look up failed (blocks have not been allocated), in
 497 * that case, @map is returned as unmapped but we still do fill map->m_len to
 498 * indicate the length of a hole starting at map->m_lblk.
 499 *
 500 * It returns the error in case of allocation failure.
 501 */
 502int ext4_map_blocks(handle_t *handle, struct inode *inode,
 503		    struct ext4_map_blocks *map, int flags)
 504{
 505	struct extent_status es;
 506	int retval;
 507	int ret = 0;
 508#ifdef ES_AGGRESSIVE_TEST
 509	struct ext4_map_blocks orig_map;
 510
 511	memcpy(&orig_map, map, sizeof(*map));
 512#endif
 513
 514	map->m_flags = 0;
 515	ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u,"
 516		  "logical block %lu\n", inode->i_ino, flags, map->m_len,
 517		  (unsigned long) map->m_lblk);
 518
 519	/*
 520	 * ext4_map_blocks returns an int, and m_len is an unsigned int
 521	 */
 522	if (unlikely(map->m_len > INT_MAX))
 523		map->m_len = INT_MAX;
 524
 525	/* We can handle the block number less than EXT_MAX_BLOCKS */
 526	if (unlikely(map->m_lblk >= EXT_MAX_BLOCKS))
 527		return -EFSCORRUPTED;
 528
 529	/* Lookup extent status tree firstly */
 530	if (ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) {
 
 531		if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) {
 532			map->m_pblk = ext4_es_pblock(&es) +
 533					map->m_lblk - es.es_lblk;
 534			map->m_flags |= ext4_es_is_written(&es) ?
 535					EXT4_MAP_MAPPED : EXT4_MAP_UNWRITTEN;
 536			retval = es.es_len - (map->m_lblk - es.es_lblk);
 537			if (retval > map->m_len)
 538				retval = map->m_len;
 539			map->m_len = retval;
 540		} else if (ext4_es_is_delayed(&es) || ext4_es_is_hole(&es)) {
 541			map->m_pblk = 0;
 
 
 542			retval = es.es_len - (map->m_lblk - es.es_lblk);
 543			if (retval > map->m_len)
 544				retval = map->m_len;
 545			map->m_len = retval;
 546			retval = 0;
 547		} else {
 548			BUG();
 549		}
 
 
 
 550#ifdef ES_AGGRESSIVE_TEST
 551		ext4_map_blocks_es_recheck(handle, inode, map,
 552					   &orig_map, flags);
 553#endif
 554		goto found;
 555	}
 
 
 
 
 
 
 556
 557	/*
 558	 * Try to see if we can get the block without requesting a new
 559	 * file system block.
 560	 */
 561	down_read(&EXT4_I(inode)->i_data_sem);
 562	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
 563		retval = ext4_ext_map_blocks(handle, inode, map, flags &
 564					     EXT4_GET_BLOCKS_KEEP_SIZE);
 565	} else {
 566		retval = ext4_ind_map_blocks(handle, inode, map, flags &
 567					     EXT4_GET_BLOCKS_KEEP_SIZE);
 568	}
 569	if (retval > 0) {
 570		unsigned int status;
 571
 572		if (unlikely(retval != map->m_len)) {
 573			ext4_warning(inode->i_sb,
 574				     "ES len assertion failed for inode "
 575				     "%lu: retval %d != map->m_len %d",
 576				     inode->i_ino, retval, map->m_len);
 577			WARN_ON(1);
 578		}
 579
 580		status = map->m_flags & EXT4_MAP_UNWRITTEN ?
 581				EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
 582		if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
 583		    !(status & EXTENT_STATUS_WRITTEN) &&
 584		    ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk,
 585				       map->m_lblk + map->m_len - 1))
 586			status |= EXTENT_STATUS_DELAYED;
 587		ret = ext4_es_insert_extent(inode, map->m_lblk,
 588					    map->m_len, map->m_pblk, status);
 589		if (ret < 0)
 590			retval = ret;
 591	}
 592	up_read((&EXT4_I(inode)->i_data_sem));
 593
 594found:
 595	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
 596		ret = check_block_validity(inode, map);
 597		if (ret != 0)
 598			return ret;
 599	}
 600
 601	/* If it is only a block(s) look up */
 602	if ((flags & EXT4_GET_BLOCKS_CREATE) == 0)
 603		return retval;
 604
 605	/*
 606	 * Returns if the blocks have already allocated
 607	 *
 608	 * Note that if blocks have been preallocated
 609	 * ext4_ext_get_block() returns the create = 0
 610	 * with buffer head unmapped.
 611	 */
 612	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
 613		/*
 614		 * If we need to convert extent to unwritten
 615		 * we continue and do the actual work in
 616		 * ext4_ext_map_blocks()
 617		 */
 618		if (!(flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN))
 619			return retval;
 620
 621	/*
 622	 * Here we clear m_flags because after allocating an new extent,
 623	 * it will be set again.
 624	 */
 625	map->m_flags &= ~EXT4_MAP_FLAGS;
 626
 627	/*
 628	 * New blocks allocate and/or writing to unwritten extent
 629	 * will possibly result in updating i_data, so we take
 630	 * the write lock of i_data_sem, and call get_block()
 631	 * with create == 1 flag.
 632	 */
 633	down_write(&EXT4_I(inode)->i_data_sem);
 634
 635	/*
 636	 * We need to check for EXT4 here because migrate
 637	 * could have changed the inode type in between
 638	 */
 639	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
 640		retval = ext4_ext_map_blocks(handle, inode, map, flags);
 641	} else {
 642		retval = ext4_ind_map_blocks(handle, inode, map, flags);
 643
 644		if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
 645			/*
 646			 * We allocated new blocks which will result in
 647			 * i_data's format changing.  Force the migrate
 648			 * to fail by clearing migrate flags
 649			 */
 650			ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
 651		}
 652
 653		/*
 654		 * Update reserved blocks/metadata blocks after successful
 655		 * block allocation which had been deferred till now. We don't
 656		 * support fallocate for non extent files. So we can update
 657		 * reserve space here.
 658		 */
 659		if ((retval > 0) &&
 660			(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE))
 661			ext4_da_update_reserve_space(inode, retval, 1);
 662	}
 663
 664	if (retval > 0) {
 665		unsigned int status;
 666
 667		if (unlikely(retval != map->m_len)) {
 668			ext4_warning(inode->i_sb,
 669				     "ES len assertion failed for inode "
 670				     "%lu: retval %d != map->m_len %d",
 671				     inode->i_ino, retval, map->m_len);
 672			WARN_ON(1);
 673		}
 674
 675		/*
 676		 * We have to zeroout blocks before inserting them into extent
 677		 * status tree. Otherwise someone could look them up there and
 678		 * use them before they are really zeroed. We also have to
 679		 * unmap metadata before zeroing as otherwise writeback can
 680		 * overwrite zeros with stale data from block device.
 681		 */
 682		if (flags & EXT4_GET_BLOCKS_ZERO &&
 683		    map->m_flags & EXT4_MAP_MAPPED &&
 684		    map->m_flags & EXT4_MAP_NEW) {
 685			ret = ext4_issue_zeroout(inode, map->m_lblk,
 686						 map->m_pblk, map->m_len);
 687			if (ret) {
 688				retval = ret;
 689				goto out_sem;
 690			}
 691		}
 692
 693		/*
 694		 * If the extent has been zeroed out, we don't need to update
 695		 * extent status tree.
 696		 */
 697		if ((flags & EXT4_GET_BLOCKS_PRE_IO) &&
 698		    ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) {
 699			if (ext4_es_is_written(&es))
 700				goto out_sem;
 701		}
 702		status = map->m_flags & EXT4_MAP_UNWRITTEN ?
 703				EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
 704		if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
 705		    !(status & EXTENT_STATUS_WRITTEN) &&
 706		    ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk,
 707				       map->m_lblk + map->m_len - 1))
 708			status |= EXTENT_STATUS_DELAYED;
 709		ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
 710					    map->m_pblk, status);
 711		if (ret < 0) {
 712			retval = ret;
 713			goto out_sem;
 714		}
 715	}
 716
 717out_sem:
 718	up_write((&EXT4_I(inode)->i_data_sem));
 719	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
 720		ret = check_block_validity(inode, map);
 721		if (ret != 0)
 722			return ret;
 723
 724		/*
 725		 * Inodes with freshly allocated blocks where contents will be
 726		 * visible after transaction commit must be on transaction's
 727		 * ordered data list.
 728		 */
 729		if (map->m_flags & EXT4_MAP_NEW &&
 730		    !(map->m_flags & EXT4_MAP_UNWRITTEN) &&
 731		    !(flags & EXT4_GET_BLOCKS_ZERO) &&
 732		    !ext4_is_quota_file(inode) &&
 733		    ext4_should_order_data(inode)) {
 734			loff_t start_byte =
 735				(loff_t)map->m_lblk << inode->i_blkbits;
 736			loff_t length = (loff_t)map->m_len << inode->i_blkbits;
 737
 738			if (flags & EXT4_GET_BLOCKS_IO_SUBMIT)
 739				ret = ext4_jbd2_inode_add_wait(handle, inode,
 740						start_byte, length);
 741			else
 742				ret = ext4_jbd2_inode_add_write(handle, inode,
 743						start_byte, length);
 744			if (ret)
 745				return ret;
 746		}
 747	}
 
 
 
 
 
 
 748	return retval;
 749}
 750
 751/*
 752 * Update EXT4_MAP_FLAGS in bh->b_state. For buffer heads attached to pages
 753 * we have to be careful as someone else may be manipulating b_state as well.
 754 */
 755static void ext4_update_bh_state(struct buffer_head *bh, unsigned long flags)
 756{
 757	unsigned long old_state;
 758	unsigned long new_state;
 759
 760	flags &= EXT4_MAP_FLAGS;
 761
 762	/* Dummy buffer_head? Set non-atomically. */
 763	if (!bh->b_page) {
 764		bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | flags;
 765		return;
 766	}
 767	/*
 768	 * Someone else may be modifying b_state. Be careful! This is ugly but
 769	 * once we get rid of using bh as a container for mapping information
 770	 * to pass to / from get_block functions, this can go away.
 771	 */
 
 772	do {
 773		old_state = READ_ONCE(bh->b_state);
 774		new_state = (old_state & ~EXT4_MAP_FLAGS) | flags;
 775	} while (unlikely(
 776		 cmpxchg(&bh->b_state, old_state, new_state) != old_state));
 777}
 778
 779static int _ext4_get_block(struct inode *inode, sector_t iblock,
 780			   struct buffer_head *bh, int flags)
 781{
 782	struct ext4_map_blocks map;
 783	int ret = 0;
 784
 785	if (ext4_has_inline_data(inode))
 786		return -ERANGE;
 787
 788	map.m_lblk = iblock;
 789	map.m_len = bh->b_size >> inode->i_blkbits;
 790
 791	ret = ext4_map_blocks(ext4_journal_current_handle(), inode, &map,
 792			      flags);
 793	if (ret > 0) {
 794		map_bh(bh, inode->i_sb, map.m_pblk);
 795		ext4_update_bh_state(bh, map.m_flags);
 796		bh->b_size = inode->i_sb->s_blocksize * map.m_len;
 797		ret = 0;
 798	} else if (ret == 0) {
 799		/* hole case, need to fill in bh->b_size */
 800		bh->b_size = inode->i_sb->s_blocksize * map.m_len;
 801	}
 802	return ret;
 803}
 804
 805int ext4_get_block(struct inode *inode, sector_t iblock,
 806		   struct buffer_head *bh, int create)
 807{
 808	return _ext4_get_block(inode, iblock, bh,
 809			       create ? EXT4_GET_BLOCKS_CREATE : 0);
 810}
 811
 812/*
 813 * Get block function used when preparing for buffered write if we require
 814 * creating an unwritten extent if blocks haven't been allocated.  The extent
 815 * will be converted to written after the IO is complete.
 816 */
 817int ext4_get_block_unwritten(struct inode *inode, sector_t iblock,
 818			     struct buffer_head *bh_result, int create)
 819{
 820	ext4_debug("ext4_get_block_unwritten: inode %lu, create flag %d\n",
 821		   inode->i_ino, create);
 822	return _ext4_get_block(inode, iblock, bh_result,
 823			       EXT4_GET_BLOCKS_IO_CREATE_EXT);
 824}
 825
 826/* Maximum number of blocks we map for direct IO at once. */
 827#define DIO_MAX_BLOCKS 4096
 828
 829/*
 830 * Get blocks function for the cases that need to start a transaction -
 831 * generally difference cases of direct IO and DAX IO. It also handles retries
 832 * in case of ENOSPC.
 833 */
 834static int ext4_get_block_trans(struct inode *inode, sector_t iblock,
 835				struct buffer_head *bh_result, int flags)
 836{
 837	int dio_credits;
 838	handle_t *handle;
 839	int retries = 0;
 840	int ret;
 841
 842	/* Trim mapping request to maximum we can map at once for DIO */
 843	if (bh_result->b_size >> inode->i_blkbits > DIO_MAX_BLOCKS)
 844		bh_result->b_size = DIO_MAX_BLOCKS << inode->i_blkbits;
 845	dio_credits = ext4_chunk_trans_blocks(inode,
 846				      bh_result->b_size >> inode->i_blkbits);
 847retry:
 848	handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, dio_credits);
 849	if (IS_ERR(handle))
 850		return PTR_ERR(handle);
 851
 852	ret = _ext4_get_block(inode, iblock, bh_result, flags);
 853	ext4_journal_stop(handle);
 854
 855	if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
 856		goto retry;
 857	return ret;
 858}
 859
 860/* Get block function for DIO reads and writes to inodes without extents */
 861int ext4_dio_get_block(struct inode *inode, sector_t iblock,
 862		       struct buffer_head *bh, int create)
 863{
 864	/* We don't expect handle for direct IO */
 865	WARN_ON_ONCE(ext4_journal_current_handle());
 866
 867	if (!create)
 868		return _ext4_get_block(inode, iblock, bh, 0);
 869	return ext4_get_block_trans(inode, iblock, bh, EXT4_GET_BLOCKS_CREATE);
 870}
 871
 872/*
 873 * Get block function for AIO DIO writes when we create unwritten extent if
 874 * blocks are not allocated yet. The extent will be converted to written
 875 * after IO is complete.
 876 */
 877static int ext4_dio_get_block_unwritten_async(struct inode *inode,
 878		sector_t iblock, struct buffer_head *bh_result,	int create)
 879{
 880	int ret;
 881
 882	/* We don't expect handle for direct IO */
 883	WARN_ON_ONCE(ext4_journal_current_handle());
 884
 885	ret = ext4_get_block_trans(inode, iblock, bh_result,
 886				   EXT4_GET_BLOCKS_IO_CREATE_EXT);
 887
 888	/*
 889	 * When doing DIO using unwritten extents, we need io_end to convert
 890	 * unwritten extents to written on IO completion. We allocate io_end
 891	 * once we spot unwritten extent and store it in b_private. Generic
 892	 * DIO code keeps b_private set and furthermore passes the value to
 893	 * our completion callback in 'private' argument.
 894	 */
 895	if (!ret && buffer_unwritten(bh_result)) {
 896		if (!bh_result->b_private) {
 897			ext4_io_end_t *io_end;
 898
 899			io_end = ext4_init_io_end(inode, GFP_KERNEL);
 900			if (!io_end)
 901				return -ENOMEM;
 902			bh_result->b_private = io_end;
 903			ext4_set_io_unwritten_flag(inode, io_end);
 904		}
 905		set_buffer_defer_completion(bh_result);
 906	}
 907
 908	return ret;
 909}
 910
 911/*
 912 * Get block function for non-AIO DIO writes when we create unwritten extent if
 913 * blocks are not allocated yet. The extent will be converted to written
 914 * after IO is complete by ext4_direct_IO_write().
 915 */
 916static int ext4_dio_get_block_unwritten_sync(struct inode *inode,
 917		sector_t iblock, struct buffer_head *bh_result,	int create)
 918{
 919	int ret;
 920
 921	/* We don't expect handle for direct IO */
 922	WARN_ON_ONCE(ext4_journal_current_handle());
 923
 924	ret = ext4_get_block_trans(inode, iblock, bh_result,
 925				   EXT4_GET_BLOCKS_IO_CREATE_EXT);
 926
 927	/*
 928	 * Mark inode as having pending DIO writes to unwritten extents.
 929	 * ext4_direct_IO_write() checks this flag and converts extents to
 930	 * written.
 931	 */
 932	if (!ret && buffer_unwritten(bh_result))
 933		ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
 934
 935	return ret;
 936}
 937
 938static int ext4_dio_get_block_overwrite(struct inode *inode, sector_t iblock,
 939		   struct buffer_head *bh_result, int create)
 940{
 941	int ret;
 942
 943	ext4_debug("ext4_dio_get_block_overwrite: inode %lu, create flag %d\n",
 944		   inode->i_ino, create);
 945	/* We don't expect handle for direct IO */
 946	WARN_ON_ONCE(ext4_journal_current_handle());
 947
 948	ret = _ext4_get_block(inode, iblock, bh_result, 0);
 949	/*
 950	 * Blocks should have been preallocated! ext4_file_write_iter() checks
 951	 * that.
 
 952	 */
 953	WARN_ON_ONCE(!buffer_mapped(bh_result) || buffer_unwritten(bh_result));
 
 954
 955	return ret;
 956}
 957
 
 
 958
 959/*
 960 * `handle' can be NULL if create is zero
 961 */
 962struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
 963				ext4_lblk_t block, int map_flags)
 964{
 965	struct ext4_map_blocks map;
 966	struct buffer_head *bh;
 967	int create = map_flags & EXT4_GET_BLOCKS_CREATE;
 
 968	int err;
 969
 970	J_ASSERT(handle != NULL || create == 0);
 
 
 971
 972	map.m_lblk = block;
 973	map.m_len = 1;
 974	err = ext4_map_blocks(handle, inode, &map, map_flags);
 975
 976	if (err == 0)
 977		return create ? ERR_PTR(-ENOSPC) : NULL;
 978	if (err < 0)
 979		return ERR_PTR(err);
 980
 
 
 
 981	bh = sb_getblk(inode->i_sb, map.m_pblk);
 982	if (unlikely(!bh))
 983		return ERR_PTR(-ENOMEM);
 984	if (map.m_flags & EXT4_MAP_NEW) {
 985		J_ASSERT(create != 0);
 986		J_ASSERT(handle != NULL);
 
 987
 988		/*
 989		 * Now that we do not always journal data, we should
 990		 * keep in mind whether this should always journal the
 991		 * new buffer as metadata.  For now, regular file
 992		 * writes use ext4_get_block instead, so it's not a
 993		 * problem.
 994		 */
 995		lock_buffer(bh);
 996		BUFFER_TRACE(bh, "call get_create_access");
 997		err = ext4_journal_get_create_access(handle, bh);
 
 998		if (unlikely(err)) {
 999			unlock_buffer(bh);
1000			goto errout;
1001		}
1002		if (!buffer_uptodate(bh)) {
1003			memset(bh->b_data, 0, inode->i_sb->s_blocksize);
1004			set_buffer_uptodate(bh);
1005		}
1006		unlock_buffer(bh);
1007		BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
1008		err = ext4_handle_dirty_metadata(handle, inode, bh);
1009		if (unlikely(err))
1010			goto errout;
1011	} else
1012		BUFFER_TRACE(bh, "not a new buffer");
1013	return bh;
1014errout:
1015	brelse(bh);
1016	return ERR_PTR(err);
1017}
1018
1019struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
1020			       ext4_lblk_t block, int map_flags)
1021{
1022	struct buffer_head *bh;
 
1023
1024	bh = ext4_getblk(handle, inode, block, map_flags);
1025	if (IS_ERR(bh))
1026		return bh;
1027	if (!bh || ext4_buffer_uptodate(bh))
1028		return bh;
1029	ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &bh);
1030	wait_on_buffer(bh);
1031	if (buffer_uptodate(bh))
1032		return bh;
1033	put_bh(bh);
1034	return ERR_PTR(-EIO);
 
1035}
1036
1037/* Read a contiguous batch of blocks. */
1038int ext4_bread_batch(struct inode *inode, ext4_lblk_t block, int bh_count,
1039		     bool wait, struct buffer_head **bhs)
1040{
1041	int i, err;
1042
1043	for (i = 0; i < bh_count; i++) {
1044		bhs[i] = ext4_getblk(NULL, inode, block + i, 0 /* map_flags */);
1045		if (IS_ERR(bhs[i])) {
1046			err = PTR_ERR(bhs[i]);
1047			bh_count = i;
1048			goto out_brelse;
1049		}
1050	}
1051
1052	for (i = 0; i < bh_count; i++)
1053		/* Note that NULL bhs[i] is valid because of holes. */
1054		if (bhs[i] && !ext4_buffer_uptodate(bhs[i]))
1055			ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1,
1056				    &bhs[i]);
1057
1058	if (!wait)
1059		return 0;
1060
1061	for (i = 0; i < bh_count; i++)
1062		if (bhs[i])
1063			wait_on_buffer(bhs[i]);
1064
1065	for (i = 0; i < bh_count; i++) {
1066		if (bhs[i] && !buffer_uptodate(bhs[i])) {
1067			err = -EIO;
1068			goto out_brelse;
1069		}
1070	}
1071	return 0;
1072
1073out_brelse:
1074	for (i = 0; i < bh_count; i++) {
1075		brelse(bhs[i]);
1076		bhs[i] = NULL;
1077	}
1078	return err;
1079}
1080
1081int ext4_walk_page_buffers(handle_t *handle,
1082			   struct buffer_head *head,
1083			   unsigned from,
1084			   unsigned to,
1085			   int *partial,
1086			   int (*fn)(handle_t *handle,
1087				     struct buffer_head *bh))
1088{
1089	struct buffer_head *bh;
1090	unsigned block_start, block_end;
1091	unsigned blocksize = head->b_size;
1092	int err, ret = 0;
1093	struct buffer_head *next;
1094
1095	for (bh = head, block_start = 0;
1096	     ret == 0 && (bh != head || !block_start);
1097	     block_start = block_end, bh = next) {
1098		next = bh->b_this_page;
1099		block_end = block_start + blocksize;
1100		if (block_end <= from || block_start >= to) {
1101			if (partial && !buffer_uptodate(bh))
1102				*partial = 1;
1103			continue;
1104		}
1105		err = (*fn)(handle, bh);
1106		if (!ret)
1107			ret = err;
1108	}
1109	return ret;
1110}
1111
1112/*
1113 * To preserve ordering, it is essential that the hole instantiation and
1114 * the data write be encapsulated in a single transaction.  We cannot
1115 * close off a transaction and start a new one between the ext4_get_block()
1116 * and the commit_write().  So doing the jbd2_journal_start at the start of
1117 * prepare_write() is the right place.
1118 *
1119 * Also, this function can nest inside ext4_writepage().  In that case, we
1120 * *know* that ext4_writepage() has generated enough buffer credits to do the
1121 * whole page.  So we won't block on the journal in that case, which is good,
1122 * because the caller may be PF_MEMALLOC.
1123 *
1124 * By accident, ext4 can be reentered when a transaction is open via
1125 * quota file writes.  If we were to commit the transaction while thus
1126 * reentered, there can be a deadlock - we would be holding a quota
1127 * lock, and the commit would never complete if another thread had a
1128 * transaction open and was blocking on the quota lock - a ranking
1129 * violation.
1130 *
1131 * So what we do is to rely on the fact that jbd2_journal_stop/journal_start
1132 * will _not_ run commit under these circumstances because handle->h_ref
1133 * is elevated.  We'll still have enough credits for the tiny quotafile
1134 * write.
1135 */
1136int do_journal_get_write_access(handle_t *handle,
 
 
 
 
 
 
1137				struct buffer_head *bh)
1138{
1139	int dirty = buffer_dirty(bh);
1140	int ret;
1141
1142	if (!buffer_mapped(bh) || buffer_freed(bh))
1143		return 0;
1144	/*
1145	 * __block_write_begin() could have dirtied some buffers. Clean
1146	 * the dirty bit as jbd2_journal_get_write_access() could complain
1147	 * otherwise about fs integrity issues. Setting of the dirty bit
1148	 * by __block_write_begin() isn't a real problem here as we clear
1149	 * the bit before releasing a page lock and thus writeback cannot
1150	 * ever write the buffer.
1151	 */
1152	if (dirty)
1153		clear_buffer_dirty(bh);
1154	BUFFER_TRACE(bh, "get write access");
1155	ret = ext4_journal_get_write_access(handle, bh);
 
1156	if (!ret && dirty)
1157		ret = ext4_handle_dirty_metadata(handle, NULL, bh);
1158	return ret;
1159}
1160
1161#ifdef CONFIG_FS_ENCRYPTION
1162static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
1163				  get_block_t *get_block)
1164{
1165	unsigned from = pos & (PAGE_SIZE - 1);
1166	unsigned to = from + len;
1167	struct inode *inode = page->mapping->host;
1168	unsigned block_start, block_end;
1169	sector_t block;
1170	int err = 0;
1171	unsigned blocksize = inode->i_sb->s_blocksize;
1172	unsigned bbits;
1173	struct buffer_head *bh, *head, *wait[2];
1174	int nr_wait = 0;
1175	int i;
1176
1177	BUG_ON(!PageLocked(page));
1178	BUG_ON(from > PAGE_SIZE);
1179	BUG_ON(to > PAGE_SIZE);
1180	BUG_ON(from > to);
1181
1182	if (!page_has_buffers(page))
1183		create_empty_buffers(page, blocksize, 0);
1184	head = page_buffers(page);
1185	bbits = ilog2(blocksize);
1186	block = (sector_t)page->index << (PAGE_SHIFT - bbits);
1187
1188	for (bh = head, block_start = 0; bh != head || !block_start;
1189	    block++, block_start = block_end, bh = bh->b_this_page) {
1190		block_end = block_start + blocksize;
1191		if (block_end <= from || block_start >= to) {
1192			if (PageUptodate(page)) {
1193				if (!buffer_uptodate(bh))
1194					set_buffer_uptodate(bh);
1195			}
1196			continue;
1197		}
1198		if (buffer_new(bh))
1199			clear_buffer_new(bh);
1200		if (!buffer_mapped(bh)) {
1201			WARN_ON(bh->b_size != blocksize);
1202			err = get_block(inode, block, bh, 1);
1203			if (err)
1204				break;
1205			if (buffer_new(bh)) {
1206				if (PageUptodate(page)) {
1207					clear_buffer_new(bh);
1208					set_buffer_uptodate(bh);
1209					mark_buffer_dirty(bh);
1210					continue;
1211				}
1212				if (block_end > to || block_start < from)
1213					zero_user_segments(page, to, block_end,
1214							   block_start, from);
 
1215				continue;
1216			}
1217		}
1218		if (PageUptodate(page)) {
1219			if (!buffer_uptodate(bh))
1220				set_buffer_uptodate(bh);
1221			continue;
1222		}
1223		if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1224		    !buffer_unwritten(bh) &&
1225		    (block_start < from || block_end > to)) {
1226			ll_rw_block(REQ_OP_READ, 0, 1, &bh);
1227			wait[nr_wait++] = bh;
1228		}
1229	}
1230	/*
1231	 * If we issued read requests, let them complete.
1232	 */
1233	for (i = 0; i < nr_wait; i++) {
1234		wait_on_buffer(wait[i]);
1235		if (!buffer_uptodate(wait[i]))
1236			err = -EIO;
1237	}
1238	if (unlikely(err)) {
1239		page_zero_new_buffers(page, from, to);
1240	} else if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode)) {
1241		for (i = 0; i < nr_wait; i++) {
1242			int err2;
1243
1244			err2 = fscrypt_decrypt_pagecache_blocks(page, blocksize,
1245								bh_offset(wait[i]));
1246			if (err2) {
1247				clear_buffer_uptodate(wait[i]);
1248				err = err2;
1249			}
1250		}
1251	}
1252
1253	return err;
1254}
1255#endif
1256
 
 
 
 
 
 
 
1257static int ext4_write_begin(struct file *file, struct address_space *mapping,
1258			    loff_t pos, unsigned len, unsigned flags,
1259			    struct page **pagep, void **fsdata)
1260{
1261	struct inode *inode = mapping->host;
1262	int ret, needed_blocks;
1263	handle_t *handle;
1264	int retries = 0;
1265	struct page *page;
1266	pgoff_t index;
1267	unsigned from, to;
1268
1269	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
1270		return -EIO;
1271
1272	trace_ext4_write_begin(inode, pos, len, flags);
1273	/*
1274	 * Reserve one block more for addition to orphan list in case
1275	 * we allocate blocks but write fails for some reason
1276	 */
1277	needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
1278	index = pos >> PAGE_SHIFT;
1279	from = pos & (PAGE_SIZE - 1);
1280	to = from + len;
1281
1282	if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
1283		ret = ext4_try_to_write_inline_data(mapping, inode, pos, len,
1284						    flags, pagep);
1285		if (ret < 0)
1286			return ret;
1287		if (ret == 1)
1288			return 0;
1289	}
1290
1291	/*
1292	 * grab_cache_page_write_begin() can take a long time if the
1293	 * system is thrashing due to memory pressure, or if the page
1294	 * is being written back.  So grab it first before we start
1295	 * the transaction handle.  This also allows us to allocate
1296	 * the page (if needed) without using GFP_NOFS.
1297	 */
1298retry_grab:
1299	page = grab_cache_page_write_begin(mapping, index, flags);
1300	if (!page)
1301		return -ENOMEM;
1302	unlock_page(page);
 
 
 
 
 
 
 
 
1303
1304retry_journal:
1305	handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks);
1306	if (IS_ERR(handle)) {
1307		put_page(page);
1308		return PTR_ERR(handle);
1309	}
1310
1311	lock_page(page);
1312	if (page->mapping != mapping) {
1313		/* The page got truncated from under us */
1314		unlock_page(page);
1315		put_page(page);
1316		ext4_journal_stop(handle);
1317		goto retry_grab;
1318	}
1319	/* In case writeback began while the page was unlocked */
1320	wait_for_stable_page(page);
1321
1322#ifdef CONFIG_FS_ENCRYPTION
1323	if (ext4_should_dioread_nolock(inode))
1324		ret = ext4_block_write_begin(page, pos, len,
1325					     ext4_get_block_unwritten);
1326	else
1327		ret = ext4_block_write_begin(page, pos, len,
1328					     ext4_get_block);
1329#else
1330	if (ext4_should_dioread_nolock(inode))
1331		ret = __block_write_begin(page, pos, len,
1332					  ext4_get_block_unwritten);
1333	else
1334		ret = __block_write_begin(page, pos, len, ext4_get_block);
1335#endif
1336	if (!ret && ext4_should_journal_data(inode)) {
1337		ret = ext4_walk_page_buffers(handle, page_buffers(page),
1338					     from, to, NULL,
1339					     do_journal_get_write_access);
1340	}
1341
1342	if (ret) {
1343		bool extended = (pos + len > inode->i_size) &&
1344				!ext4_verity_in_progress(inode);
1345
1346		unlock_page(page);
1347		/*
1348		 * __block_write_begin may have instantiated a few blocks
1349		 * outside i_size.  Trim these off again. Don't need
1350		 * i_size_read because we hold i_mutex.
1351		 *
1352		 * Add inode to orphan list in case we crash before
1353		 * truncate finishes
1354		 */
1355		if (extended && ext4_can_truncate(inode))
1356			ext4_orphan_add(handle, inode);
1357
1358		ext4_journal_stop(handle);
1359		if (extended) {
1360			ext4_truncate_failed_write(inode);
1361			/*
1362			 * If truncate failed early the inode might
1363			 * still be on the orphan list; we need to
1364			 * make sure the inode is removed from the
1365			 * orphan list in that case.
1366			 */
1367			if (inode->i_nlink)
1368				ext4_orphan_del(NULL, inode);
1369		}
1370
1371		if (ret == -ENOSPC &&
1372		    ext4_should_retry_alloc(inode->i_sb, &retries))
1373			goto retry_journal;
1374		put_page(page);
1375		return ret;
1376	}
1377	*pagep = page;
1378	return ret;
1379}
1380
1381/* For write_end() in data=journal mode */
1382static int write_end_fn(handle_t *handle, struct buffer_head *bh)
 
1383{
1384	int ret;
1385	if (!buffer_mapped(bh) || buffer_freed(bh))
1386		return 0;
1387	set_buffer_uptodate(bh);
1388	ret = ext4_handle_dirty_metadata(handle, NULL, bh);
1389	clear_buffer_meta(bh);
1390	clear_buffer_prio(bh);
1391	return ret;
1392}
1393
1394/*
1395 * We need to pick up the new inode size which generic_commit_write gave us
1396 * `file' can be NULL - eg, when called from page_symlink().
1397 *
1398 * ext4 never places buffers on inode->i_mapping->private_list.  metadata
1399 * buffers are managed internally.
1400 */
1401static int ext4_write_end(struct file *file,
1402			  struct address_space *mapping,
1403			  loff_t pos, unsigned len, unsigned copied,
1404			  struct page *page, void *fsdata)
1405{
 
1406	handle_t *handle = ext4_journal_current_handle();
1407	struct inode *inode = mapping->host;
1408	loff_t old_size = inode->i_size;
1409	int ret = 0, ret2;
1410	int i_size_changed = 0;
1411	int inline_data = ext4_has_inline_data(inode);
1412	bool verity = ext4_verity_in_progress(inode);
1413
1414	trace_ext4_write_end(inode, pos, len, copied);
1415	if (inline_data) {
1416		ret = ext4_write_inline_data_end(inode, pos, len,
1417						 copied, page);
1418		if (ret < 0) {
1419			unlock_page(page);
1420			put_page(page);
1421			goto errout;
1422		}
1423		copied = ret;
1424	} else
1425		copied = block_write_end(file, mapping, pos,
1426					 len, copied, page, fsdata);
1427	/*
1428	 * it's important to update i_size while still holding page lock:
1429	 * page writeout could otherwise come in and zero beyond i_size.
1430	 *
1431	 * If FS_IOC_ENABLE_VERITY is running on this inode, then Merkle tree
1432	 * blocks are being written past EOF, so skip the i_size update.
1433	 */
1434	if (!verity)
1435		i_size_changed = ext4_update_inode_size(inode, pos + copied);
1436	unlock_page(page);
1437	put_page(page);
1438
1439	if (old_size < pos && !verity)
1440		pagecache_isize_extended(inode, old_size, pos);
1441	/*
1442	 * Don't mark the inode dirty under page lock. First, it unnecessarily
1443	 * makes the holding time of page lock longer. Second, it forces lock
1444	 * ordering of page lock and transaction start for journaling
1445	 * filesystems.
1446	 */
1447	if (i_size_changed || inline_data)
1448		ext4_mark_inode_dirty(handle, inode);
1449
1450	if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode))
1451		/* if we have allocated more blocks and copied
1452		 * less. We will have blocks allocated outside
1453		 * inode->i_size. So truncate them
1454		 */
1455		ext4_orphan_add(handle, inode);
1456errout:
1457	ret2 = ext4_journal_stop(handle);
1458	if (!ret)
1459		ret = ret2;
1460
1461	if (pos + len > inode->i_size && !verity) {
1462		ext4_truncate_failed_write(inode);
1463		/*
1464		 * If truncate failed early the inode might still be
1465		 * on the orphan list; we need to make sure the inode
1466		 * is removed from the orphan list in that case.
1467		 */
1468		if (inode->i_nlink)
1469			ext4_orphan_del(NULL, inode);
1470	}
1471
1472	return ret ? ret : copied;
1473}
1474
1475/*
1476 * This is a private version of page_zero_new_buffers() which doesn't
1477 * set the buffer to be dirty, since in data=journalled mode we need
1478 * to call ext4_handle_dirty_metadata() instead.
1479 */
1480static void ext4_journalled_zero_new_buffers(handle_t *handle,
1481					    struct page *page,
 
1482					    unsigned from, unsigned to)
1483{
1484	unsigned int block_start = 0, block_end;
1485	struct buffer_head *head, *bh;
1486
1487	bh = head = page_buffers(page);
1488	do {
1489		block_end = block_start + bh->b_size;
1490		if (buffer_new(bh)) {
1491			if (block_end > from && block_start < to) {
1492				if (!PageUptodate(page)) {
1493					unsigned start, size;
1494
1495					start = max(from, block_start);
1496					size = min(to, block_end) - start;
1497
1498					zero_user(page, start, size);
1499					write_end_fn(handle, bh);
1500				}
1501				clear_buffer_new(bh);
1502			}
1503		}
1504		block_start = block_end;
1505		bh = bh->b_this_page;
1506	} while (bh != head);
1507}
1508
1509static int ext4_journalled_write_end(struct file *file,
1510				     struct address_space *mapping,
1511				     loff_t pos, unsigned len, unsigned copied,
1512				     struct page *page, void *fsdata)
1513{
 
1514	handle_t *handle = ext4_journal_current_handle();
1515	struct inode *inode = mapping->host;
1516	loff_t old_size = inode->i_size;
1517	int ret = 0, ret2;
1518	int partial = 0;
1519	unsigned from, to;
1520	int size_changed = 0;
1521	int inline_data = ext4_has_inline_data(inode);
1522	bool verity = ext4_verity_in_progress(inode);
1523
1524	trace_ext4_journalled_write_end(inode, pos, len, copied);
1525	from = pos & (PAGE_SIZE - 1);
1526	to = from + len;
1527
1528	BUG_ON(!ext4_handle_valid(handle));
1529
1530	if (inline_data) {
1531		ret = ext4_write_inline_data_end(inode, pos, len,
1532						 copied, page);
1533		if (ret < 0) {
1534			unlock_page(page);
1535			put_page(page);
1536			goto errout;
1537		}
1538		copied = ret;
1539	} else if (unlikely(copied < len) && !PageUptodate(page)) {
1540		copied = 0;
1541		ext4_journalled_zero_new_buffers(handle, page, from, to);
 
1542	} else {
1543		if (unlikely(copied < len))
1544			ext4_journalled_zero_new_buffers(handle, page,
1545							 from + copied, to);
1546		ret = ext4_walk_page_buffers(handle, page_buffers(page), from,
1547					     from + copied, &partial,
 
1548					     write_end_fn);
1549		if (!partial)
1550			SetPageUptodate(page);
1551	}
1552	if (!verity)
1553		size_changed = ext4_update_inode_size(inode, pos + copied);
1554	ext4_set_inode_state(inode, EXT4_STATE_JDATA);
1555	EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
1556	unlock_page(page);
1557	put_page(page);
1558
1559	if (old_size < pos && !verity)
1560		pagecache_isize_extended(inode, old_size, pos);
1561
1562	if (size_changed || inline_data) {
1563		ret2 = ext4_mark_inode_dirty(handle, inode);
1564		if (!ret)
1565			ret = ret2;
1566	}
1567
1568	if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode))
1569		/* if we have allocated more blocks and copied
1570		 * less. We will have blocks allocated outside
1571		 * inode->i_size. So truncate them
1572		 */
1573		ext4_orphan_add(handle, inode);
1574
1575errout:
1576	ret2 = ext4_journal_stop(handle);
1577	if (!ret)
1578		ret = ret2;
1579	if (pos + len > inode->i_size && !verity) {
1580		ext4_truncate_failed_write(inode);
1581		/*
1582		 * If truncate failed early the inode might still be
1583		 * on the orphan list; we need to make sure the inode
1584		 * is removed from the orphan list in that case.
1585		 */
1586		if (inode->i_nlink)
1587			ext4_orphan_del(NULL, inode);
1588	}
1589
1590	return ret ? ret : copied;
1591}
1592
1593/*
1594 * Reserve space for a single cluster
1595 */
1596static int ext4_da_reserve_space(struct inode *inode)
1597{
1598	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1599	struct ext4_inode_info *ei = EXT4_I(inode);
1600	int ret;
1601
1602	/*
1603	 * We will charge metadata quota at writeout time; this saves
1604	 * us from metadata over-estimation, though we may go over by
1605	 * a small amount in the end.  Here we just reserve for data.
1606	 */
1607	ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1));
1608	if (ret)
1609		return ret;
1610
1611	spin_lock(&ei->i_block_reservation_lock);
1612	if (ext4_claim_free_clusters(sbi, 1, 0)) {
1613		spin_unlock(&ei->i_block_reservation_lock);
1614		dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
1615		return -ENOSPC;
1616	}
1617	ei->i_reserved_data_blocks++;
1618	trace_ext4_da_reserve_space(inode);
1619	spin_unlock(&ei->i_block_reservation_lock);
1620
1621	return 0;       /* success */
1622}
1623
1624void ext4_da_release_space(struct inode *inode, int to_free)
1625{
1626	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1627	struct ext4_inode_info *ei = EXT4_I(inode);
1628
1629	if (!to_free)
1630		return;		/* Nothing to release, exit */
1631
1632	spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1633
1634	trace_ext4_da_release_space(inode, to_free);
1635	if (unlikely(to_free > ei->i_reserved_data_blocks)) {
1636		/*
1637		 * if there aren't enough reserved blocks, then the
1638		 * counter is messed up somewhere.  Since this
1639		 * function is called from invalidate page, it's
1640		 * harmless to return without any action.
1641		 */
1642		ext4_warning(inode->i_sb, "ext4_da_release_space: "
1643			 "ino %lu, to_free %d with only %d reserved "
1644			 "data blocks", inode->i_ino, to_free,
1645			 ei->i_reserved_data_blocks);
1646		WARN_ON(1);
1647		to_free = ei->i_reserved_data_blocks;
1648	}
1649	ei->i_reserved_data_blocks -= to_free;
1650
1651	/* update fs dirty data blocks counter */
1652	percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free);
1653
1654	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1655
1656	dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free));
1657}
1658
1659/*
1660 * Delayed allocation stuff
1661 */
1662
1663struct mpage_da_data {
 
1664	struct inode *inode;
1665	struct writeback_control *wbc;
 
1666
 
1667	pgoff_t first_page;	/* The first page to write */
1668	pgoff_t next_page;	/* Current page to examine */
1669	pgoff_t last_page;	/* Last page to examine */
1670	/*
1671	 * Extent to map - this can be after first_page because that can be
1672	 * fully mapped. We somewhat abuse m_flags to store whether the extent
1673	 * is delalloc or unwritten.
1674	 */
1675	struct ext4_map_blocks map;
1676	struct ext4_io_submit io_submit;	/* IO submission data */
1677	unsigned int do_map:1;
 
 
1678};
1679
1680static void mpage_release_unused_pages(struct mpage_da_data *mpd,
1681				       bool invalidate)
1682{
1683	int nr_pages, i;
1684	pgoff_t index, end;
1685	struct pagevec pvec;
1686	struct inode *inode = mpd->inode;
1687	struct address_space *mapping = inode->i_mapping;
1688
1689	/* This is necessary when next_page == 0. */
1690	if (mpd->first_page >= mpd->next_page)
1691		return;
1692
 
1693	index = mpd->first_page;
1694	end   = mpd->next_page - 1;
1695	if (invalidate) {
1696		ext4_lblk_t start, last;
1697		start = index << (PAGE_SHIFT - inode->i_blkbits);
1698		last = end << (PAGE_SHIFT - inode->i_blkbits);
 
 
 
 
 
 
1699		ext4_es_remove_extent(inode, start, last - start + 1);
 
1700	}
1701
1702	pagevec_init(&pvec);
1703	while (index <= end) {
1704		nr_pages = pagevec_lookup_range(&pvec, mapping, &index, end);
1705		if (nr_pages == 0)
1706			break;
1707		for (i = 0; i < nr_pages; i++) {
1708			struct page *page = pvec.pages[i];
1709
1710			BUG_ON(!PageLocked(page));
1711			BUG_ON(PageWriteback(page));
 
 
 
 
1712			if (invalidate) {
1713				if (page_mapped(page))
1714					clear_page_dirty_for_io(page);
1715				block_invalidatepage(page, 0, PAGE_SIZE);
1716				ClearPageUptodate(page);
 
1717			}
1718			unlock_page(page);
1719		}
1720		pagevec_release(&pvec);
1721	}
1722}
1723
1724static void ext4_print_free_blocks(struct inode *inode)
1725{
1726	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1727	struct super_block *sb = inode->i_sb;
1728	struct ext4_inode_info *ei = EXT4_I(inode);
1729
1730	ext4_msg(sb, KERN_CRIT, "Total free blocks count %lld",
1731	       EXT4_C2B(EXT4_SB(inode->i_sb),
1732			ext4_count_free_clusters(sb)));
1733	ext4_msg(sb, KERN_CRIT, "Free/Dirty block details");
1734	ext4_msg(sb, KERN_CRIT, "free_blocks=%lld",
1735	       (long long) EXT4_C2B(EXT4_SB(sb),
1736		percpu_counter_sum(&sbi->s_freeclusters_counter)));
1737	ext4_msg(sb, KERN_CRIT, "dirty_blocks=%lld",
1738	       (long long) EXT4_C2B(EXT4_SB(sb),
1739		percpu_counter_sum(&sbi->s_dirtyclusters_counter)));
1740	ext4_msg(sb, KERN_CRIT, "Block reservation details");
1741	ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u",
1742		 ei->i_reserved_data_blocks);
1743	return;
1744}
1745
1746static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh)
1747{
1748	return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh);
1749}
1750
1751/*
1752 * ext4_insert_delayed_block - adds a delayed block to the extents status
1753 *                             tree, incrementing the reserved cluster/block
1754 *                             count or making a pending reservation
1755 *                             where needed
1756 *
1757 * @inode - file containing the newly added block
1758 * @lblk - logical block to be added
1759 *
1760 * Returns 0 on success, negative error code on failure.
1761 */
1762static int ext4_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk)
1763{
1764	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1765	int ret;
1766	bool allocated = false;
1767
1768	/*
1769	 * If the cluster containing lblk is shared with a delayed,
1770	 * written, or unwritten extent in a bigalloc file system, it's
1771	 * already been accounted for and does not need to be reserved.
1772	 * A pending reservation must be made for the cluster if it's
1773	 * shared with a written or unwritten extent and doesn't already
1774	 * have one.  Written and unwritten extents can be purged from the
1775	 * extents status tree if the system is under memory pressure, so
1776	 * it's necessary to examine the extent tree if a search of the
1777	 * extents status tree doesn't get a match.
1778	 */
1779	if (sbi->s_cluster_ratio == 1) {
1780		ret = ext4_da_reserve_space(inode);
1781		if (ret != 0)   /* ENOSPC */
1782			goto errout;
1783	} else {   /* bigalloc */
1784		if (!ext4_es_scan_clu(inode, &ext4_es_is_delonly, lblk)) {
1785			if (!ext4_es_scan_clu(inode,
1786					      &ext4_es_is_mapped, lblk)) {
1787				ret = ext4_clu_mapped(inode,
1788						      EXT4_B2C(sbi, lblk));
1789				if (ret < 0)
1790					goto errout;
1791				if (ret == 0) {
1792					ret = ext4_da_reserve_space(inode);
1793					if (ret != 0)   /* ENOSPC */
1794						goto errout;
1795				} else {
1796					allocated = true;
1797				}
1798			} else {
1799				allocated = true;
1800			}
1801		}
1802	}
1803
1804	ret = ext4_es_insert_delayed_block(inode, lblk, allocated);
1805
1806errout:
1807	return ret;
1808}
1809
1810/*
1811 * This function is grabs code from the very beginning of
1812 * ext4_map_blocks, but assumes that the caller is from delayed write
1813 * time. This function looks up the requested blocks and sets the
1814 * buffer delay bit under the protection of i_data_sem.
1815 */
1816static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
1817			      struct ext4_map_blocks *map,
1818			      struct buffer_head *bh)
1819{
1820	struct extent_status es;
1821	int retval;
1822	sector_t invalid_block = ~((sector_t) 0xffff);
1823#ifdef ES_AGGRESSIVE_TEST
1824	struct ext4_map_blocks orig_map;
1825
1826	memcpy(&orig_map, map, sizeof(*map));
1827#endif
1828
1829	if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
1830		invalid_block = ~0;
1831
1832	map->m_flags = 0;
1833	ext_debug("ext4_da_map_blocks(): inode %lu, max_blocks %u,"
1834		  "logical block %lu\n", inode->i_ino, map->m_len,
1835		  (unsigned long) map->m_lblk);
1836
1837	/* Lookup extent status tree firstly */
1838	if (ext4_es_lookup_extent(inode, iblock, NULL, &es)) {
1839		if (ext4_es_is_hole(&es)) {
1840			retval = 0;
1841			down_read(&EXT4_I(inode)->i_data_sem);
1842			goto add_delayed;
1843		}
1844
1845		/*
1846		 * Delayed extent could be allocated by fallocate.
1847		 * So we need to check it.
1848		 */
1849		if (ext4_es_is_delayed(&es) && !ext4_es_is_unwritten(&es)) {
1850			map_bh(bh, inode->i_sb, invalid_block);
1851			set_buffer_new(bh);
1852			set_buffer_delay(bh);
1853			return 0;
1854		}
1855
1856		map->m_pblk = ext4_es_pblock(&es) + iblock - es.es_lblk;
1857		retval = es.es_len - (iblock - es.es_lblk);
1858		if (retval > map->m_len)
1859			retval = map->m_len;
1860		map->m_len = retval;
1861		if (ext4_es_is_written(&es))
1862			map->m_flags |= EXT4_MAP_MAPPED;
1863		else if (ext4_es_is_unwritten(&es))
1864			map->m_flags |= EXT4_MAP_UNWRITTEN;
1865		else
1866			BUG();
1867
1868#ifdef ES_AGGRESSIVE_TEST
1869		ext4_map_blocks_es_recheck(NULL, inode, map, &orig_map, 0);
1870#endif
1871		return retval;
1872	}
1873
1874	/*
1875	 * Try to see if we can get the block without requesting a new
1876	 * file system block.
1877	 */
1878	down_read(&EXT4_I(inode)->i_data_sem);
1879	if (ext4_has_inline_data(inode))
1880		retval = 0;
1881	else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
1882		retval = ext4_ext_map_blocks(NULL, inode, map, 0);
1883	else
1884		retval = ext4_ind_map_blocks(NULL, inode, map, 0);
1885
1886add_delayed:
1887	if (retval == 0) {
1888		int ret;
1889
1890		/*
1891		 * XXX: __block_prepare_write() unmaps passed block,
1892		 * is it OK?
1893		 */
1894
1895		ret = ext4_insert_delayed_block(inode, map->m_lblk);
1896		if (ret != 0) {
1897			retval = ret;
1898			goto out_unlock;
1899		}
1900
1901		map_bh(bh, inode->i_sb, invalid_block);
1902		set_buffer_new(bh);
1903		set_buffer_delay(bh);
1904	} else if (retval > 0) {
1905		int ret;
1906		unsigned int status;
1907
1908		if (unlikely(retval != map->m_len)) {
1909			ext4_warning(inode->i_sb,
1910				     "ES len assertion failed for inode "
1911				     "%lu: retval %d != map->m_len %d",
1912				     inode->i_ino, retval, map->m_len);
1913			WARN_ON(1);
1914		}
1915
1916		status = map->m_flags & EXT4_MAP_UNWRITTEN ?
1917				EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
1918		ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
1919					    map->m_pblk, status);
1920		if (ret != 0)
1921			retval = ret;
1922	}
 
1923
1924out_unlock:
1925	up_read((&EXT4_I(inode)->i_data_sem));
 
 
 
 
1926
 
 
 
1927	return retval;
1928}
1929
1930/*
1931 * This is a special get_block_t callback which is used by
1932 * ext4_da_write_begin().  It will either return mapped block or
1933 * reserve space for a single block.
1934 *
1935 * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set.
1936 * We also have b_blocknr = -1 and b_bdev initialized properly
1937 *
1938 * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set.
1939 * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev
1940 * initialized properly.
1941 */
1942int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
1943			   struct buffer_head *bh, int create)
1944{
1945	struct ext4_map_blocks map;
1946	int ret = 0;
1947
1948	BUG_ON(create == 0);
1949	BUG_ON(bh->b_size != inode->i_sb->s_blocksize);
1950
1951	map.m_lblk = iblock;
1952	map.m_len = 1;
1953
1954	/*
1955	 * first, we need to know whether the block is allocated already
1956	 * preallocated blocks are unmapped but should treated
1957	 * the same as allocated blocks.
1958	 */
1959	ret = ext4_da_map_blocks(inode, iblock, &map, bh);
1960	if (ret <= 0)
1961		return ret;
1962
1963	map_bh(bh, inode->i_sb, map.m_pblk);
1964	ext4_update_bh_state(bh, map.m_flags);
1965
1966	if (buffer_unwritten(bh)) {
1967		/* A delayed write to unwritten bh should be marked
1968		 * new and mapped.  Mapped ensures that we don't do
1969		 * get_block multiple times when we write to the same
1970		 * offset and new ensures that we do proper zero out
1971		 * for partial write.
1972		 */
1973		set_buffer_new(bh);
1974		set_buffer_mapped(bh);
1975	}
1976	return 0;
1977}
1978
1979static int bget_one(handle_t *handle, struct buffer_head *bh)
1980{
1981	get_bh(bh);
1982	return 0;
1983}
1984
1985static int bput_one(handle_t *handle, struct buffer_head *bh)
1986{
1987	put_bh(bh);
1988	return 0;
1989}
1990
1991static int __ext4_journalled_writepage(struct page *page,
1992				       unsigned int len)
1993{
1994	struct address_space *mapping = page->mapping;
1995	struct inode *inode = mapping->host;
1996	struct buffer_head *page_bufs = NULL;
1997	handle_t *handle = NULL;
1998	int ret = 0, err = 0;
1999	int inline_data = ext4_has_inline_data(inode);
2000	struct buffer_head *inode_bh = NULL;
2001
2002	ClearPageChecked(page);
2003
2004	if (inline_data) {
2005		BUG_ON(page->index != 0);
2006		BUG_ON(len > ext4_get_max_inline_size(inode));
2007		inode_bh = ext4_journalled_write_inline_data(inode, len, page);
2008		if (inode_bh == NULL)
2009			goto out;
2010	} else {
2011		page_bufs = page_buffers(page);
2012		if (!page_bufs) {
2013			BUG();
2014			goto out;
2015		}
2016		ext4_walk_page_buffers(handle, page_bufs, 0, len,
2017				       NULL, bget_one);
2018	}
2019	/*
2020	 * We need to release the page lock before we start the
2021	 * journal, so grab a reference so the page won't disappear
2022	 * out from under us.
2023	 */
2024	get_page(page);
2025	unlock_page(page);
2026
2027	handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
2028				    ext4_writepage_trans_blocks(inode));
2029	if (IS_ERR(handle)) {
2030		ret = PTR_ERR(handle);
2031		put_page(page);
2032		goto out_no_pagelock;
2033	}
2034	BUG_ON(!ext4_handle_valid(handle));
2035
2036	lock_page(page);
2037	put_page(page);
2038	if (page->mapping != mapping) {
2039		/* The page got truncated from under us */
2040		ext4_journal_stop(handle);
2041		ret = 0;
2042		goto out;
2043	}
2044
2045	if (inline_data) {
2046		ret = ext4_mark_inode_dirty(handle, inode);
2047	} else {
2048		ret = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL,
2049					     do_journal_get_write_access);
2050
2051		err = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL,
2052					     write_end_fn);
2053	}
2054	if (ret == 0)
2055		ret = err;
2056	EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
2057	err = ext4_journal_stop(handle);
2058	if (!ret)
2059		ret = err;
2060
2061	if (!ext4_has_inline_data(inode))
2062		ext4_walk_page_buffers(NULL, page_bufs, 0, len,
2063				       NULL, bput_one);
2064	ext4_set_inode_state(inode, EXT4_STATE_JDATA);
2065out:
2066	unlock_page(page);
2067out_no_pagelock:
2068	brelse(inode_bh);
2069	return ret;
2070}
2071
2072/*
2073 * Note that we don't need to start a transaction unless we're journaling data
2074 * because we should have holes filled from ext4_page_mkwrite(). We even don't
2075 * need to file the inode to the transaction's list in ordered mode because if
2076 * we are writing back data added by write(), the inode is already there and if
2077 * we are writing back data modified via mmap(), no one guarantees in which
2078 * transaction the data will hit the disk. In case we are journaling data, we
2079 * cannot start transaction directly because transaction start ranks above page
2080 * lock so we have to do some magic.
2081 *
2082 * This function can get called via...
2083 *   - ext4_writepages after taking page lock (have journal handle)
2084 *   - journal_submit_inode_data_buffers (no journal handle)
2085 *   - shrink_page_list via the kswapd/direct reclaim (no journal handle)
2086 *   - grab_page_cache when doing write_begin (have journal handle)
2087 *
2088 * We don't do any block allocation in this function. If we have page with
2089 * multiple blocks we need to write those buffer_heads that are mapped. This
2090 * is important for mmaped based write. So if we do with blocksize 1K
2091 * truncate(f, 1024);
2092 * a = mmap(f, 0, 4096);
2093 * a[0] = 'a';
2094 * truncate(f, 4096);
2095 * we have in the page first buffer_head mapped via page_mkwrite call back
2096 * but other buffer_heads would be unmapped but dirty (dirty done via the
2097 * do_wp_page). So writepage should write the first block. If we modify
2098 * the mmap area beyond 1024 we will again get a page_fault and the
2099 * page_mkwrite callback will do the block allocation and mark the
2100 * buffer_heads mapped.
2101 *
2102 * We redirty the page if we have any buffer_heads that is either delay or
2103 * unwritten in the page.
2104 *
2105 * We can get recursively called as show below.
2106 *
2107 *	ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
2108 *		ext4_writepage()
2109 *
2110 * But since we don't do any block allocation we should not deadlock.
2111 * Page also have the dirty flag cleared so we don't get recurive page_lock.
2112 */
2113static int ext4_writepage(struct page *page,
2114			  struct writeback_control *wbc)
2115{
2116	int ret = 0;
2117	loff_t size;
2118	unsigned int len;
2119	struct buffer_head *page_bufs = NULL;
2120	struct inode *inode = page->mapping->host;
2121	struct ext4_io_submit io_submit;
2122	bool keep_towrite = false;
2123
2124	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) {
2125		ext4_invalidatepage(page, 0, PAGE_SIZE);
2126		unlock_page(page);
2127		return -EIO;
2128	}
2129
2130	trace_ext4_writepage(page);
2131	size = i_size_read(inode);
2132	if (page->index == size >> PAGE_SHIFT &&
2133	    !ext4_verity_in_progress(inode))
2134		len = size & ~PAGE_MASK;
2135	else
2136		len = PAGE_SIZE;
2137
2138	page_bufs = page_buffers(page);
2139	/*
2140	 * We cannot do block allocation or other extent handling in this
2141	 * function. If there are buffers needing that, we have to redirty
2142	 * the page. But we may reach here when we do a journal commit via
2143	 * journal_submit_inode_data_buffers() and in that case we must write
2144	 * allocated buffers to achieve data=ordered mode guarantees.
2145	 *
2146	 * Also, if there is only one buffer per page (the fs block
2147	 * size == the page size), if one buffer needs block
2148	 * allocation or needs to modify the extent tree to clear the
2149	 * unwritten flag, we know that the page can't be written at
2150	 * all, so we might as well refuse the write immediately.
2151	 * Unfortunately if the block size != page size, we can't as
2152	 * easily detect this case using ext4_walk_page_buffers(), but
2153	 * for the extremely common case, this is an optimization that
2154	 * skips a useless round trip through ext4_bio_write_page().
2155	 */
2156	if (ext4_walk_page_buffers(NULL, page_bufs, 0, len, NULL,
2157				   ext4_bh_delay_or_unwritten)) {
2158		redirty_page_for_writepage(wbc, page);
2159		if ((current->flags & PF_MEMALLOC) ||
2160		    (inode->i_sb->s_blocksize == PAGE_SIZE)) {
2161			/*
2162			 * For memory cleaning there's no point in writing only
2163			 * some buffers. So just bail out. Warn if we came here
2164			 * from direct reclaim.
2165			 */
2166			WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD))
2167							== PF_MEMALLOC);
2168			unlock_page(page);
2169			return 0;
2170		}
2171		keep_towrite = true;
2172	}
2173
2174	if (PageChecked(page) && ext4_should_journal_data(inode))
2175		/*
2176		 * It's mmapped pagecache.  Add buffers and journal it.  There
2177		 * doesn't seem much point in redirtying the page here.
2178		 */
2179		return __ext4_journalled_writepage(page, len);
2180
2181	ext4_io_submit_init(&io_submit, wbc);
2182	io_submit.io_end = ext4_init_io_end(inode, GFP_NOFS);
2183	if (!io_submit.io_end) {
2184		redirty_page_for_writepage(wbc, page);
2185		unlock_page(page);
2186		return -ENOMEM;
2187	}
2188	ret = ext4_bio_write_page(&io_submit, page, len, wbc, keep_towrite);
2189	ext4_io_submit(&io_submit);
2190	/* Drop io_end reference we got from init */
2191	ext4_put_io_end_defer(io_submit.io_end);
2192	return ret;
2193}
2194
2195static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page)
2196{
2197	int len;
2198	loff_t size;
2199	int err;
2200
2201	BUG_ON(page->index != mpd->first_page);
2202	clear_page_dirty_for_io(page);
2203	/*
2204	 * We have to be very careful here!  Nothing protects writeback path
2205	 * against i_size changes and the page can be writeably mapped into
2206	 * page tables. So an application can be growing i_size and writing
2207	 * data through mmap while writeback runs. clear_page_dirty_for_io()
2208	 * write-protects our page in page tables and the page cannot get
2209	 * written to again until we release page lock. So only after
2210	 * clear_page_dirty_for_io() we are safe to sample i_size for
2211	 * ext4_bio_write_page() to zero-out tail of the written page. We rely
2212	 * on the barrier provided by TestClearPageDirty in
2213	 * clear_page_dirty_for_io() to make sure i_size is really sampled only
2214	 * after page tables are updated.
2215	 */
2216	size = i_size_read(mpd->inode);
2217	if (page->index == size >> PAGE_SHIFT &&
 
2218	    !ext4_verity_in_progress(mpd->inode))
2219		len = size & ~PAGE_MASK;
2220	else
2221		len = PAGE_SIZE;
2222	err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc, false);
2223	if (!err)
2224		mpd->wbc->nr_to_write--;
2225	mpd->first_page++;
2226
2227	return err;
2228}
2229
2230#define BH_FLAGS ((1 << BH_Unwritten) | (1 << BH_Delay))
2231
2232/*
2233 * mballoc gives us at most this number of blocks...
2234 * XXX: That seems to be only a limitation of ext4_mb_normalize_request().
2235 * The rest of mballoc seems to handle chunks up to full group size.
2236 */
2237#define MAX_WRITEPAGES_EXTENT_LEN 2048
2238
2239/*
2240 * mpage_add_bh_to_extent - try to add bh to extent of blocks to map
2241 *
2242 * @mpd - extent of blocks
2243 * @lblk - logical number of the block in the file
2244 * @bh - buffer head we want to add to the extent
2245 *
2246 * The function is used to collect contig. blocks in the same state. If the
2247 * buffer doesn't require mapping for writeback and we haven't started the
2248 * extent of buffers to map yet, the function returns 'true' immediately - the
2249 * caller can write the buffer right away. Otherwise the function returns true
2250 * if the block has been added to the extent, false if the block couldn't be
2251 * added.
2252 */
2253static bool mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk,
2254				   struct buffer_head *bh)
2255{
2256	struct ext4_map_blocks *map = &mpd->map;
2257
2258	/* Buffer that doesn't need mapping for writeback? */
2259	if (!buffer_dirty(bh) || !buffer_mapped(bh) ||
2260	    (!buffer_delay(bh) && !buffer_unwritten(bh))) {
2261		/* So far no extent to map => we write the buffer right away */
2262		if (map->m_len == 0)
2263			return true;
2264		return false;
2265	}
2266
2267	/* First block in the extent? */
2268	if (map->m_len == 0) {
2269		/* We cannot map unless handle is started... */
2270		if (!mpd->do_map)
2271			return false;
2272		map->m_lblk = lblk;
2273		map->m_len = 1;
2274		map->m_flags = bh->b_state & BH_FLAGS;
2275		return true;
2276	}
2277
2278	/* Don't go larger than mballoc is willing to allocate */
2279	if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN)
2280		return false;
2281
2282	/* Can we merge the block to our big extent? */
2283	if (lblk == map->m_lblk + map->m_len &&
2284	    (bh->b_state & BH_FLAGS) == map->m_flags) {
2285		map->m_len++;
2286		return true;
2287	}
2288	return false;
2289}
2290
2291/*
2292 * mpage_process_page_bufs - submit page buffers for IO or add them to extent
2293 *
2294 * @mpd - extent of blocks for mapping
2295 * @head - the first buffer in the page
2296 * @bh - buffer we should start processing from
2297 * @lblk - logical number of the block in the file corresponding to @bh
2298 *
2299 * Walk through page buffers from @bh upto @head (exclusive) and either submit
2300 * the page for IO if all buffers in this page were mapped and there's no
2301 * accumulated extent of buffers to map or add buffers in the page to the
2302 * extent of buffers to map. The function returns 1 if the caller can continue
2303 * by processing the next page, 0 if it should stop adding buffers to the
2304 * extent to map because we cannot extend it anymore. It can also return value
2305 * < 0 in case of error during IO submission.
2306 */
2307static int mpage_process_page_bufs(struct mpage_da_data *mpd,
2308				   struct buffer_head *head,
2309				   struct buffer_head *bh,
2310				   ext4_lblk_t lblk)
2311{
2312	struct inode *inode = mpd->inode;
2313	int err;
2314	ext4_lblk_t blocks = (i_size_read(inode) + i_blocksize(inode) - 1)
2315							>> inode->i_blkbits;
2316
2317	if (ext4_verity_in_progress(inode))
2318		blocks = EXT_MAX_BLOCKS;
2319
2320	do {
2321		BUG_ON(buffer_locked(bh));
2322
2323		if (lblk >= blocks || !mpage_add_bh_to_extent(mpd, lblk, bh)) {
2324			/* Found extent to map? */
2325			if (mpd->map.m_len)
2326				return 0;
2327			/* Buffer needs mapping and handle is not started? */
2328			if (!mpd->do_map)
2329				return 0;
2330			/* Everything mapped so far and we hit EOF */
2331			break;
2332		}
2333	} while (lblk++, (bh = bh->b_this_page) != head);
2334	/* So far everything mapped? Submit the page for IO. */
2335	if (mpd->map.m_len == 0) {
2336		err = mpage_submit_page(mpd, head->b_page);
2337		if (err < 0)
2338			return err;
 
 
 
 
 
2339	}
2340	return lblk < blocks;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2341}
2342
2343/*
2344 * mpage_map_buffers - update buffers corresponding to changed extent and
2345 *		       submit fully mapped pages for IO
2346 *
2347 * @mpd - description of extent to map, on return next extent to map
2348 *
2349 * Scan buffers corresponding to changed extent (we expect corresponding pages
2350 * to be already locked) and update buffer state according to new extent state.
2351 * We map delalloc buffers to their physical location, clear unwritten bits,
2352 * and mark buffers as uninit when we perform writes to unwritten extents
2353 * and do extent conversion after IO is finished. If the last page is not fully
2354 * mapped, we update @map to the next extent in the last page that needs
2355 * mapping. Otherwise we submit the page for IO.
2356 */
2357static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
2358{
2359	struct pagevec pvec;
2360	int nr_pages, i;
2361	struct inode *inode = mpd->inode;
2362	struct buffer_head *head, *bh;
2363	int bpp_bits = PAGE_SHIFT - inode->i_blkbits;
2364	pgoff_t start, end;
2365	ext4_lblk_t lblk;
2366	sector_t pblock;
2367	int err;
 
2368
2369	start = mpd->map.m_lblk >> bpp_bits;
2370	end = (mpd->map.m_lblk + mpd->map.m_len - 1) >> bpp_bits;
2371	lblk = start << bpp_bits;
2372	pblock = mpd->map.m_pblk;
2373
2374	pagevec_init(&pvec);
2375	while (start <= end) {
2376		nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping,
2377						&start, end);
2378		if (nr_pages == 0)
2379			break;
2380		for (i = 0; i < nr_pages; i++) {
2381			struct page *page = pvec.pages[i];
2382
2383			bh = head = page_buffers(page);
2384			do {
2385				if (lblk < mpd->map.m_lblk)
2386					continue;
2387				if (lblk >= mpd->map.m_lblk + mpd->map.m_len) {
2388					/*
2389					 * Buffer after end of mapped extent.
2390					 * Find next buffer in the page to map.
2391					 */
2392					mpd->map.m_len = 0;
2393					mpd->map.m_flags = 0;
2394					/*
2395					 * FIXME: If dioread_nolock supports
2396					 * blocksize < pagesize, we need to make
2397					 * sure we add size mapped so far to
2398					 * io_end->size as the following call
2399					 * can submit the page for IO.
2400					 */
2401					err = mpage_process_page_bufs(mpd, head,
2402								      bh, lblk);
2403					pagevec_release(&pvec);
2404					if (err > 0)
2405						err = 0;
2406					return err;
2407				}
2408				if (buffer_delay(bh)) {
2409					clear_buffer_delay(bh);
2410					bh->b_blocknr = pblock++;
2411				}
2412				clear_buffer_unwritten(bh);
2413			} while (lblk++, (bh = bh->b_this_page) != head);
2414
 
 
2415			/*
2416			 * FIXME: This is going to break if dioread_nolock
2417			 * supports blocksize < pagesize as we will try to
2418			 * convert potentially unmapped parts of inode.
2419			 */
2420			mpd->io_submit.io_end->size += PAGE_SIZE;
 
2421			/* Page fully mapped - let IO run! */
2422			err = mpage_submit_page(mpd, page);
2423			if (err < 0) {
2424				pagevec_release(&pvec);
2425				return err;
2426			}
2427		}
2428		pagevec_release(&pvec);
2429	}
2430	/* Extent fully mapped and matches with page boundary. We are done. */
2431	mpd->map.m_len = 0;
2432	mpd->map.m_flags = 0;
2433	return 0;
 
 
 
2434}
2435
2436static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd)
2437{
2438	struct inode *inode = mpd->inode;
2439	struct ext4_map_blocks *map = &mpd->map;
2440	int get_blocks_flags;
2441	int err, dioread_nolock;
2442
2443	trace_ext4_da_write_pages_extent(inode, map);
2444	/*
2445	 * Call ext4_map_blocks() to allocate any delayed allocation blocks, or
2446	 * to convert an unwritten extent to be initialized (in the case
2447	 * where we have written into one or more preallocated blocks).  It is
2448	 * possible that we're going to need more metadata blocks than
2449	 * previously reserved. However we must not fail because we're in
2450	 * writeback and there is nothing we can do about it so it might result
2451	 * in data loss.  So use reserved blocks to allocate metadata if
2452	 * possible.
2453	 *
2454	 * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE if
2455	 * the blocks in question are delalloc blocks.  This indicates
2456	 * that the blocks and quotas has already been checked when
2457	 * the data was copied into the page cache.
2458	 */
2459	get_blocks_flags = EXT4_GET_BLOCKS_CREATE |
2460			   EXT4_GET_BLOCKS_METADATA_NOFAIL |
2461			   EXT4_GET_BLOCKS_IO_SUBMIT;
2462	dioread_nolock = ext4_should_dioread_nolock(inode);
2463	if (dioread_nolock)
2464		get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
2465	if (map->m_flags & (1 << BH_Delay))
2466		get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
2467
2468	err = ext4_map_blocks(handle, inode, map, get_blocks_flags);
2469	if (err < 0)
2470		return err;
2471	if (dioread_nolock && (map->m_flags & EXT4_MAP_UNWRITTEN)) {
2472		if (!mpd->io_submit.io_end->handle &&
2473		    ext4_handle_valid(handle)) {
2474			mpd->io_submit.io_end->handle = handle->h_rsv_handle;
2475			handle->h_rsv_handle = NULL;
2476		}
2477		ext4_set_io_unwritten_flag(inode, mpd->io_submit.io_end);
2478	}
2479
2480	BUG_ON(map->m_len == 0);
2481	return 0;
2482}
2483
2484/*
2485 * mpage_map_and_submit_extent - map extent starting at mpd->lblk of length
2486 *				 mpd->len and submit pages underlying it for IO
2487 *
2488 * @handle - handle for journal operations
2489 * @mpd - extent to map
2490 * @give_up_on_write - we set this to true iff there is a fatal error and there
2491 *                     is no hope of writing the data. The caller should discard
2492 *                     dirty pages to avoid infinite loops.
2493 *
2494 * The function maps extent starting at mpd->lblk of length mpd->len. If it is
2495 * delayed, blocks are allocated, if it is unwritten, we may need to convert
2496 * them to initialized or split the described range from larger unwritten
2497 * extent. Note that we need not map all the described range since allocation
2498 * can return less blocks or the range is covered by more unwritten extents. We
2499 * cannot map more because we are limited by reserved transaction credits. On
2500 * the other hand we always make sure that the last touched page is fully
2501 * mapped so that it can be written out (and thus forward progress is
2502 * guaranteed). After mapping we submit all mapped pages for IO.
2503 */
2504static int mpage_map_and_submit_extent(handle_t *handle,
2505				       struct mpage_da_data *mpd,
2506				       bool *give_up_on_write)
2507{
2508	struct inode *inode = mpd->inode;
2509	struct ext4_map_blocks *map = &mpd->map;
2510	int err;
2511	loff_t disksize;
2512	int progress = 0;
 
 
2513
2514	mpd->io_submit.io_end->offset =
2515				((loff_t)map->m_lblk) << inode->i_blkbits;
 
 
2516	do {
2517		err = mpage_map_one_extent(handle, mpd);
2518		if (err < 0) {
2519			struct super_block *sb = inode->i_sb;
2520
2521			if (ext4_forced_shutdown(EXT4_SB(sb)) ||
2522			    EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)
2523				goto invalidate_dirty_pages;
2524			/*
2525			 * Let the uper layers retry transient errors.
2526			 * In the case of ENOSPC, if ext4_count_free_blocks()
2527			 * is non-zero, a commit should free up blocks.
2528			 */
2529			if ((err == -ENOMEM) ||
2530			    (err == -ENOSPC && ext4_count_free_clusters(sb))) {
2531				if (progress)
2532					goto update_disksize;
2533				return err;
2534			}
2535			ext4_msg(sb, KERN_CRIT,
2536				 "Delayed block allocation failed for "
2537				 "inode %lu at logical offset %llu with"
2538				 " max blocks %u with error %d",
2539				 inode->i_ino,
2540				 (unsigned long long)map->m_lblk,
2541				 (unsigned)map->m_len, -err);
2542			ext4_msg(sb, KERN_CRIT,
2543				 "This should not happen!! Data will "
2544				 "be lost\n");
2545			if (err == -ENOSPC)
2546				ext4_print_free_blocks(inode);
2547		invalidate_dirty_pages:
2548			*give_up_on_write = true;
2549			return err;
2550		}
2551		progress = 1;
2552		/*
2553		 * Update buffer state, submit mapped pages, and get us new
2554		 * extent to map
2555		 */
2556		err = mpage_map_and_submit_buffers(mpd);
2557		if (err < 0)
2558			goto update_disksize;
2559	} while (map->m_len);
2560
2561update_disksize:
2562	/*
2563	 * Update on-disk size after IO is submitted.  Races with
2564	 * truncate are avoided by checking i_size under i_data_sem.
2565	 */
2566	disksize = ((loff_t)mpd->first_page) << PAGE_SHIFT;
2567	if (disksize > EXT4_I(inode)->i_disksize) {
2568		int err2;
2569		loff_t i_size;
2570
2571		down_write(&EXT4_I(inode)->i_data_sem);
2572		i_size = i_size_read(inode);
2573		if (disksize > i_size)
2574			disksize = i_size;
2575		if (disksize > EXT4_I(inode)->i_disksize)
2576			EXT4_I(inode)->i_disksize = disksize;
2577		up_write(&EXT4_I(inode)->i_data_sem);
2578		err2 = ext4_mark_inode_dirty(handle, inode);
2579		if (err2)
2580			ext4_error(inode->i_sb,
2581				   "Failed to mark inode %lu dirty",
2582				   inode->i_ino);
 
2583		if (!err)
2584			err = err2;
2585	}
2586	return err;
2587}
2588
2589/*
2590 * Calculate the total number of credits to reserve for one writepages
2591 * iteration. This is called from ext4_writepages(). We map an extent of
2592 * up to MAX_WRITEPAGES_EXTENT_LEN blocks and then we go on and finish mapping
2593 * the last partial page. So in total we can map MAX_WRITEPAGES_EXTENT_LEN +
2594 * bpp - 1 blocks in bpp different extents.
2595 */
2596static int ext4_da_writepages_trans_blocks(struct inode *inode)
2597{
2598	int bpp = ext4_journal_blocks_per_page(inode);
2599
2600	return ext4_meta_trans_blocks(inode,
2601				MAX_WRITEPAGES_EXTENT_LEN + bpp - 1, bpp);
2602}
2603
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2604/*
2605 * mpage_prepare_extent_to_map - find & lock contiguous range of dirty pages
2606 * 				 and underlying extent to map
2607 *
2608 * @mpd - where to look for pages
2609 *
2610 * Walk dirty pages in the mapping. If they are fully mapped, submit them for
2611 * IO immediately. When we find a page which isn't mapped we start accumulating
2612 * extent of buffers underlying these pages that needs mapping (formed by
2613 * either delayed or unwritten buffers). We also lock the pages containing
2614 * these buffers. The extent found is returned in @mpd structure (starting at
2615 * mpd->lblk with length mpd->len blocks).
 
 
2616 *
2617 * Note that this function can attach bios to one io_end structure which are
2618 * neither logically nor physically contiguous. Although it may seem as an
2619 * unnecessary complication, it is actually inevitable in blocksize < pagesize
2620 * case as we need to track IO to all buffers underlying a page in one io_end.
2621 */
2622static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
2623{
2624	struct address_space *mapping = mpd->inode->i_mapping;
2625	struct pagevec pvec;
2626	unsigned int nr_pages;
2627	long left = mpd->wbc->nr_to_write;
2628	pgoff_t index = mpd->first_page;
2629	pgoff_t end = mpd->last_page;
2630	xa_mark_t tag;
2631	int i, err = 0;
2632	int blkbits = mpd->inode->i_blkbits;
2633	ext4_lblk_t lblk;
2634	struct buffer_head *head;
 
 
2635
2636	if (mpd->wbc->sync_mode == WB_SYNC_ALL || mpd->wbc->tagged_writepages)
2637		tag = PAGECACHE_TAG_TOWRITE;
2638	else
2639		tag = PAGECACHE_TAG_DIRTY;
2640
2641	pagevec_init(&pvec);
2642	mpd->map.m_len = 0;
2643	mpd->next_page = index;
 
 
 
 
 
 
 
2644	while (index <= end) {
2645		nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
2646				tag);
2647		if (nr_pages == 0)
2648			goto out;
2649
2650		for (i = 0; i < nr_pages; i++) {
2651			struct page *page = pvec.pages[i];
2652
2653			/*
2654			 * Accumulated enough dirty pages? This doesn't apply
2655			 * to WB_SYNC_ALL mode. For integrity sync we have to
2656			 * keep going because someone may be concurrently
2657			 * dirtying pages, and we might have synced a lot of
2658			 * newly appeared dirty pages, but have not synced all
2659			 * of the old dirty pages.
2660			 */
2661			if (mpd->wbc->sync_mode == WB_SYNC_NONE && left <= 0)
 
 
2662				goto out;
2663
2664			/* If we can't merge this page, we are done. */
2665			if (mpd->map.m_len > 0 && mpd->next_page != page->index)
2666				goto out;
2667
2668			lock_page(page);
 
 
 
 
 
 
 
2669			/*
2670			 * If the page is no longer dirty, or its mapping no
2671			 * longer corresponds to inode we are writing (which
2672			 * means it has been truncated or invalidated), or the
2673			 * page is already under writeback and we are not doing
2674			 * a data integrity writeback, skip the page
2675			 */
2676			if (!PageDirty(page) ||
2677			    (PageWriteback(page) &&
2678			     (mpd->wbc->sync_mode == WB_SYNC_NONE)) ||
2679			    unlikely(page->mapping != mapping)) {
2680				unlock_page(page);
2681				continue;
2682			}
2683
2684			wait_on_page_writeback(page);
2685			BUG_ON(PageWriteback(page));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2686
2687			if (mpd->map.m_len == 0)
2688				mpd->first_page = page->index;
2689			mpd->next_page = page->index + 1;
2690			/* Add all dirty buffers to mpd */
2691			lblk = ((ext4_lblk_t)page->index) <<
2692				(PAGE_SHIFT - blkbits);
2693			head = page_buffers(page);
2694			err = mpage_process_page_bufs(mpd, head, head, lblk);
2695			if (err <= 0)
2696				goto out;
2697			err = 0;
2698			left--;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2699		}
2700		pagevec_release(&pvec);
2701		cond_resched();
2702	}
 
 
 
2703	return 0;
2704out:
2705	pagevec_release(&pvec);
 
 
2706	return err;
2707}
2708
2709static int ext4_writepages(struct address_space *mapping,
2710			   struct writeback_control *wbc)
2711{
 
2712	pgoff_t	writeback_index = 0;
2713	long nr_to_write = wbc->nr_to_write;
2714	int range_whole = 0;
2715	int cycled = 1;
2716	handle_t *handle = NULL;
2717	struct mpage_da_data mpd;
2718	struct inode *inode = mapping->host;
2719	int needed_blocks, rsv_blocks = 0, ret = 0;
2720	struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
2721	bool done;
2722	struct blk_plug plug;
2723	bool give_up_on_write = false;
2724
2725	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
2726		return -EIO;
2727
2728	percpu_down_read(&sbi->s_journal_flag_rwsem);
2729	trace_ext4_writepages(inode, wbc);
2730
2731	/*
2732	 * No pages to write? This is mainly a kludge to avoid starting
2733	 * a transaction for special inodes like journal inode on last iput()
2734	 * because that could violate lock ordering on umount
2735	 */
2736	if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
2737		goto out_writepages;
2738
2739	if (ext4_should_journal_data(inode)) {
2740		ret = generic_writepages(mapping, wbc);
2741		goto out_writepages;
2742	}
2743
2744	/*
2745	 * If the filesystem has aborted, it is read-only, so return
2746	 * right away instead of dumping stack traces later on that
2747	 * will obscure the real source of the problem.  We test
2748	 * EXT4_MF_FS_ABORTED instead of sb->s_flag's SB_RDONLY because
2749	 * the latter could be true if the filesystem is mounted
2750	 * read-only, and in that case, ext4_writepages should
2751	 * *never* be called, so if that ever happens, we would want
2752	 * the stack trace.
2753	 */
2754	if (unlikely(ext4_forced_shutdown(EXT4_SB(mapping->host->i_sb)) ||
2755		     sbi->s_mount_flags & EXT4_MF_FS_ABORTED)) {
2756		ret = -EROFS;
2757		goto out_writepages;
2758	}
2759
2760	/*
2761	 * If we have inline data and arrive here, it means that
2762	 * we will soon create the block for the 1st page, so
2763	 * we'd better clear the inline data here.
2764	 */
2765	if (ext4_has_inline_data(inode)) {
2766		/* Just inode will be modified... */
2767		handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
2768		if (IS_ERR(handle)) {
2769			ret = PTR_ERR(handle);
2770			goto out_writepages;
2771		}
2772		BUG_ON(ext4_test_inode_state(inode,
2773				EXT4_STATE_MAY_INLINE_DATA));
2774		ext4_destroy_inline_data(handle, inode);
2775		ext4_journal_stop(handle);
2776	}
2777
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2778	if (ext4_should_dioread_nolock(inode)) {
2779		/*
2780		 * We may need to convert up to one extent per block in
2781		 * the page and we may dirty the inode.
2782		 */
2783		rsv_blocks = 1 + ext4_chunk_trans_blocks(inode,
2784						PAGE_SIZE >> inode->i_blkbits);
2785	}
2786
2787	if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2788		range_whole = 1;
2789
2790	if (wbc->range_cyclic) {
2791		writeback_index = mapping->writeback_index;
2792		if (writeback_index)
2793			cycled = 0;
2794		mpd.first_page = writeback_index;
2795		mpd.last_page = -1;
2796	} else {
2797		mpd.first_page = wbc->range_start >> PAGE_SHIFT;
2798		mpd.last_page = wbc->range_end >> PAGE_SHIFT;
2799	}
2800
2801	mpd.inode = inode;
2802	mpd.wbc = wbc;
2803	ext4_io_submit_init(&mpd.io_submit, wbc);
2804retry:
2805	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2806		tag_pages_for_writeback(mapping, mpd.first_page, mpd.last_page);
2807	done = false;
2808	blk_start_plug(&plug);
2809
2810	/*
2811	 * First writeback pages that don't need mapping - we can avoid
2812	 * starting a transaction unnecessarily and also avoid being blocked
2813	 * in the block layer on device congestion while having transaction
2814	 * started.
2815	 */
2816	mpd.do_map = 0;
2817	mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
2818	if (!mpd.io_submit.io_end) {
 
2819		ret = -ENOMEM;
2820		goto unplug;
2821	}
2822	ret = mpage_prepare_extent_to_map(&mpd);
2823	/* Unlock pages we didn't use */
2824	mpage_release_unused_pages(&mpd, false);
2825	/* Submit prepared bio */
2826	ext4_io_submit(&mpd.io_submit);
2827	ext4_put_io_end_defer(mpd.io_submit.io_end);
2828	mpd.io_submit.io_end = NULL;
2829	if (ret < 0)
2830		goto unplug;
2831
2832	while (!done && mpd.first_page <= mpd.last_page) {
2833		/* For each extent of pages we use new io_end */
2834		mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
2835		if (!mpd.io_submit.io_end) {
2836			ret = -ENOMEM;
2837			break;
2838		}
2839
 
2840		/*
2841		 * We have two constraints: We find one extent to map and we
2842		 * must always write out whole page (makes a difference when
2843		 * blocksize < pagesize) so that we don't block on IO when we
2844		 * try to write out the rest of the page. Journalled mode is
2845		 * not supported by delalloc.
2846		 */
2847		BUG_ON(ext4_should_journal_data(inode));
2848		needed_blocks = ext4_da_writepages_trans_blocks(inode);
2849
2850		/* start a new transaction */
2851		handle = ext4_journal_start_with_reserve(inode,
2852				EXT4_HT_WRITE_PAGE, needed_blocks, rsv_blocks);
2853		if (IS_ERR(handle)) {
2854			ret = PTR_ERR(handle);
2855			ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
2856			       "%ld pages, ino %lu; err %d", __func__,
2857				wbc->nr_to_write, inode->i_ino, ret);
2858			/* Release allocated io_end */
2859			ext4_put_io_end(mpd.io_submit.io_end);
2860			mpd.io_submit.io_end = NULL;
2861			break;
2862		}
2863		mpd.do_map = 1;
2864
2865		trace_ext4_da_write_pages(inode, mpd.first_page, mpd.wbc);
2866		ret = mpage_prepare_extent_to_map(&mpd);
2867		if (!ret) {
2868			if (mpd.map.m_len)
2869				ret = mpage_map_and_submit_extent(handle, &mpd,
2870					&give_up_on_write);
2871			else {
2872				/*
2873				 * We scanned the whole range (or exhausted
2874				 * nr_to_write), submitted what was mapped and
2875				 * didn't find anything needing mapping. We are
2876				 * done.
2877				 */
2878				done = true;
2879			}
2880		}
2881		/*
2882		 * Caution: If the handle is synchronous,
2883		 * ext4_journal_stop() can wait for transaction commit
2884		 * to finish which may depend on writeback of pages to
2885		 * complete or on page lock to be released.  In that
2886		 * case, we have to wait until after after we have
2887		 * submitted all the IO, released page locks we hold,
2888		 * and dropped io_end reference (for extent conversion
2889		 * to be able to complete) before stopping the handle.
2890		 */
2891		if (!ext4_handle_valid(handle) || handle->h_sync == 0) {
2892			ext4_journal_stop(handle);
2893			handle = NULL;
2894			mpd.do_map = 0;
2895		}
2896		/* Unlock pages we didn't use */
2897		mpage_release_unused_pages(&mpd, give_up_on_write);
2898		/* Submit prepared bio */
2899		ext4_io_submit(&mpd.io_submit);
2900
2901		/*
2902		 * Drop our io_end reference we got from init. We have
2903		 * to be careful and use deferred io_end finishing if
2904		 * we are still holding the transaction as we can
2905		 * release the last reference to io_end which may end
2906		 * up doing unwritten extent conversion.
2907		 */
2908		if (handle) {
2909			ext4_put_io_end_defer(mpd.io_submit.io_end);
2910			ext4_journal_stop(handle);
2911		} else
2912			ext4_put_io_end(mpd.io_submit.io_end);
2913		mpd.io_submit.io_end = NULL;
2914
2915		if (ret == -ENOSPC && sbi->s_journal) {
2916			/*
2917			 * Commit the transaction which would
2918			 * free blocks released in the transaction
2919			 * and try again
2920			 */
2921			jbd2_journal_force_commit_nested(sbi->s_journal);
2922			ret = 0;
2923			continue;
2924		}
2925		/* Fatal error - ENOMEM, EIO... */
2926		if (ret)
2927			break;
2928	}
2929unplug:
2930	blk_finish_plug(&plug);
2931	if (!ret && !cycled && wbc->nr_to_write > 0) {
2932		cycled = 1;
2933		mpd.last_page = writeback_index - 1;
2934		mpd.first_page = 0;
2935		goto retry;
2936	}
2937
2938	/* Update index */
2939	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2940		/*
2941		 * Set the writeback_index so that range_cyclic
2942		 * mode will write it back later
2943		 */
2944		mapping->writeback_index = mpd.first_page;
2945
2946out_writepages:
2947	trace_ext4_writepages_result(inode, wbc, ret,
2948				     nr_to_write - wbc->nr_to_write);
2949	percpu_up_read(&sbi->s_journal_flag_rwsem);
2950	return ret;
2951}
2952
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2953static int ext4_dax_writepages(struct address_space *mapping,
2954			       struct writeback_control *wbc)
2955{
2956	int ret;
2957	long nr_to_write = wbc->nr_to_write;
2958	struct inode *inode = mapping->host;
2959	struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
2960
2961	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
2962		return -EIO;
2963
2964	percpu_down_read(&sbi->s_journal_flag_rwsem);
2965	trace_ext4_writepages(inode, wbc);
2966
2967	ret = dax_writeback_mapping_range(mapping, inode->i_sb->s_bdev, wbc);
 
2968	trace_ext4_writepages_result(inode, wbc, ret,
2969				     nr_to_write - wbc->nr_to_write);
2970	percpu_up_read(&sbi->s_journal_flag_rwsem);
2971	return ret;
2972}
2973
2974static int ext4_nonda_switch(struct super_block *sb)
2975{
2976	s64 free_clusters, dirty_clusters;
2977	struct ext4_sb_info *sbi = EXT4_SB(sb);
2978
2979	/*
2980	 * switch to non delalloc mode if we are running low
2981	 * on free block. The free block accounting via percpu
2982	 * counters can get slightly wrong with percpu_counter_batch getting
2983	 * accumulated on each CPU without updating global counters
2984	 * Delalloc need an accurate free block accounting. So switch
2985	 * to non delalloc when we are near to error range.
2986	 */
2987	free_clusters =
2988		percpu_counter_read_positive(&sbi->s_freeclusters_counter);
2989	dirty_clusters =
2990		percpu_counter_read_positive(&sbi->s_dirtyclusters_counter);
2991	/*
2992	 * Start pushing delalloc when 1/2 of free blocks are dirty.
2993	 */
2994	if (dirty_clusters && (free_clusters < 2 * dirty_clusters))
2995		try_to_writeback_inodes_sb(sb, WB_REASON_FS_FREE_SPACE);
2996
2997	if (2 * free_clusters < 3 * dirty_clusters ||
2998	    free_clusters < (dirty_clusters + EXT4_FREECLUSTERS_WATERMARK)) {
2999		/*
3000		 * free block count is less than 150% of dirty blocks
3001		 * or free blocks is less than watermark
3002		 */
3003		return 1;
3004	}
3005	return 0;
3006}
3007
3008/* We always reserve for an inode update; the superblock could be there too */
3009static int ext4_da_write_credits(struct inode *inode, loff_t pos, unsigned len)
3010{
3011	if (likely(ext4_has_feature_large_file(inode->i_sb)))
3012		return 1;
3013
3014	if (pos + len <= 0x7fffffffULL)
3015		return 1;
3016
3017	/* We might need to update the superblock to set LARGE_FILE */
3018	return 2;
3019}
3020
3021static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
3022			       loff_t pos, unsigned len, unsigned flags,
3023			       struct page **pagep, void **fsdata)
3024{
3025	int ret, retries = 0;
3026	struct page *page;
3027	pgoff_t index;
3028	struct inode *inode = mapping->host;
3029	handle_t *handle;
3030
3031	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
3032		return -EIO;
3033
3034	index = pos >> PAGE_SHIFT;
3035
3036	if (ext4_nonda_switch(inode->i_sb) || S_ISLNK(inode->i_mode) ||
3037	    ext4_verity_in_progress(inode)) {
3038		*fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
3039		return ext4_write_begin(file, mapping, pos,
3040					len, flags, pagep, fsdata);
3041	}
3042	*fsdata = (void *)0;
3043	trace_ext4_da_write_begin(inode, pos, len, flags);
3044
3045	if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
3046		ret = ext4_da_write_inline_data_begin(mapping, inode,
3047						      pos, len, flags,
3048						      pagep, fsdata);
3049		if (ret < 0)
3050			return ret;
3051		if (ret == 1)
3052			return 0;
3053	}
3054
3055	/*
3056	 * grab_cache_page_write_begin() can take a long time if the
3057	 * system is thrashing due to memory pressure, or if the page
3058	 * is being written back.  So grab it first before we start
3059	 * the transaction handle.  This also allows us to allocate
3060	 * the page (if needed) without using GFP_NOFS.
3061	 */
3062retry_grab:
3063	page = grab_cache_page_write_begin(mapping, index, flags);
3064	if (!page)
3065		return -ENOMEM;
3066	unlock_page(page);
3067
3068	/*
3069	 * With delayed allocation, we don't log the i_disksize update
3070	 * if there is delayed block allocation. But we still need
3071	 * to journalling the i_disksize update if writes to the end
3072	 * of file which has an already mapped buffer.
3073	 */
3074retry_journal:
3075	handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
3076				ext4_da_write_credits(inode, pos, len));
3077	if (IS_ERR(handle)) {
3078		put_page(page);
3079		return PTR_ERR(handle);
3080	}
3081
3082	lock_page(page);
3083	if (page->mapping != mapping) {
3084		/* The page got truncated from under us */
3085		unlock_page(page);
3086		put_page(page);
3087		ext4_journal_stop(handle);
3088		goto retry_grab;
3089	}
3090	/* In case writeback began while the page was unlocked */
3091	wait_for_stable_page(page);
3092
3093#ifdef CONFIG_FS_ENCRYPTION
3094	ret = ext4_block_write_begin(page, pos, len,
3095				     ext4_da_get_block_prep);
3096#else
3097	ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep);
3098#endif
3099	if (ret < 0) {
3100		unlock_page(page);
3101		ext4_journal_stop(handle);
3102		/*
3103		 * block_write_begin may have instantiated a few blocks
3104		 * outside i_size.  Trim these off again. Don't need
3105		 * i_size_read because we hold i_mutex.
3106		 */
3107		if (pos + len > inode->i_size)
3108			ext4_truncate_failed_write(inode);
3109
3110		if (ret == -ENOSPC &&
3111		    ext4_should_retry_alloc(inode->i_sb, &retries))
3112			goto retry_journal;
3113
3114		put_page(page);
3115		return ret;
3116	}
3117
3118	*pagep = page;
3119	return ret;
3120}
3121
3122/*
3123 * Check if we should update i_disksize
3124 * when write to the end of file but not require block allocation
3125 */
3126static int ext4_da_should_update_i_disksize(struct page *page,
3127					    unsigned long offset)
3128{
3129	struct buffer_head *bh;
3130	struct inode *inode = page->mapping->host;
3131	unsigned int idx;
3132	int i;
3133
3134	bh = page_buffers(page);
3135	idx = offset >> inode->i_blkbits;
3136
3137	for (i = 0; i < idx; i++)
3138		bh = bh->b_this_page;
3139
3140	if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh))
3141		return 0;
3142	return 1;
3143}
3144
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3145static int ext4_da_write_end(struct file *file,
3146			     struct address_space *mapping,
3147			     loff_t pos, unsigned len, unsigned copied,
3148			     struct page *page, void *fsdata)
3149{
3150	struct inode *inode = mapping->host;
3151	int ret = 0, ret2;
3152	handle_t *handle = ext4_journal_current_handle();
3153	loff_t new_i_size;
3154	unsigned long start, end;
3155	int write_mode = (int)(unsigned long)fsdata;
 
3156
3157	if (write_mode == FALL_BACK_TO_NONDELALLOC)
3158		return ext4_write_end(file, mapping, pos,
3159				      len, copied, page, fsdata);
3160
3161	trace_ext4_da_write_end(inode, pos, len, copied);
3162	start = pos & (PAGE_SIZE - 1);
3163	end = start + copied - 1;
3164
3165	/*
3166	 * generic_write_end() will run mark_inode_dirty() if i_size
3167	 * changes.  So let's piggyback the i_disksize mark_inode_dirty
3168	 * into that.
3169	 */
3170	new_i_size = pos + copied;
3171	if (copied && new_i_size > EXT4_I(inode)->i_disksize) {
3172		if (ext4_has_inline_data(inode) ||
3173		    ext4_da_should_update_i_disksize(page, end)) {
3174			ext4_update_i_disksize(inode, new_i_size);
3175			/* We need to mark inode dirty even if
3176			 * new_i_size is less that inode->i_size
3177			 * bu greater than i_disksize.(hint delalloc)
3178			 */
3179			ext4_mark_inode_dirty(handle, inode);
3180		}
3181	}
3182
3183	if (write_mode != CONVERT_INLINE_DATA &&
3184	    ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) &&
3185	    ext4_has_inline_data(inode))
3186		ret2 = ext4_da_write_inline_data_end(inode, pos, len, copied,
3187						     page);
3188	else
3189		ret2 = generic_write_end(file, mapping, pos, len, copied,
3190							page, fsdata);
3191
3192	copied = ret2;
3193	if (ret2 < 0)
3194		ret = ret2;
3195	ret2 = ext4_journal_stop(handle);
3196	if (!ret)
3197		ret = ret2;
3198
3199	return ret ? ret : copied;
3200}
3201
3202/*
3203 * Force all delayed allocation blocks to be allocated for a given inode.
3204 */
3205int ext4_alloc_da_blocks(struct inode *inode)
3206{
3207	trace_ext4_alloc_da_blocks(inode);
3208
3209	if (!EXT4_I(inode)->i_reserved_data_blocks)
3210		return 0;
3211
3212	/*
3213	 * We do something simple for now.  The filemap_flush() will
3214	 * also start triggering a write of the data blocks, which is
3215	 * not strictly speaking necessary (and for users of
3216	 * laptop_mode, not even desirable).  However, to do otherwise
3217	 * would require replicating code paths in:
3218	 *
3219	 * ext4_writepages() ->
3220	 *    write_cache_pages() ---> (via passed in callback function)
3221	 *        __mpage_da_writepage() -->
3222	 *           mpage_add_bh_to_extent()
3223	 *           mpage_da_map_blocks()
3224	 *
3225	 * The problem is that write_cache_pages(), located in
3226	 * mm/page-writeback.c, marks pages clean in preparation for
3227	 * doing I/O, which is not desirable if we're not planning on
3228	 * doing I/O at all.
3229	 *
3230	 * We could call write_cache_pages(), and then redirty all of
3231	 * the pages by calling redirty_page_for_writepage() but that
3232	 * would be ugly in the extreme.  So instead we would need to
3233	 * replicate parts of the code in the above functions,
3234	 * simplifying them because we wouldn't actually intend to
3235	 * write out the pages, but rather only collect contiguous
3236	 * logical block extents, call the multi-block allocator, and
3237	 * then update the buffer heads with the block allocations.
3238	 *
3239	 * For now, though, we'll cheat by calling filemap_flush(),
3240	 * which will map the blocks, and start the I/O, but not
3241	 * actually wait for the I/O to complete.
3242	 */
3243	return filemap_flush(inode->i_mapping);
3244}
3245
3246/*
3247 * bmap() is special.  It gets used by applications such as lilo and by
3248 * the swapper to find the on-disk block of a specific piece of data.
3249 *
3250 * Naturally, this is dangerous if the block concerned is still in the
3251 * journal.  If somebody makes a swapfile on an ext4 data-journaling
3252 * filesystem and enables swap, then they may get a nasty shock when the
3253 * data getting swapped to that swapfile suddenly gets overwritten by
3254 * the original zero's written out previously to the journal and
3255 * awaiting writeback in the kernel's buffer cache.
3256 *
3257 * So, if we see any bmap calls here on a modified, data-journaled file,
3258 * take extra steps to flush any blocks which might be in the cache.
3259 */
3260static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
3261{
3262	struct inode *inode = mapping->host;
3263	journal_t *journal;
3264	int err;
3265
 
3266	/*
3267	 * We can get here for an inline file via the FIBMAP ioctl
3268	 */
3269	if (ext4_has_inline_data(inode))
3270		return 0;
3271
3272	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
3273			test_opt(inode->i_sb, DELALLOC)) {
 
3274		/*
3275		 * With delalloc we want to sync the file
3276		 * so that we can make sure we allocate
3277		 * blocks for file
3278		 */
3279		filemap_write_and_wait(mapping);
3280	}
3281
3282	if (EXT4_JOURNAL(inode) &&
3283	    ext4_test_inode_state(inode, EXT4_STATE_JDATA)) {
3284		/*
3285		 * This is a REALLY heavyweight approach, but the use of
3286		 * bmap on dirty files is expected to be extremely rare:
3287		 * only if we run lilo or swapon on a freshly made file
3288		 * do we expect this to happen.
3289		 *
3290		 * (bmap requires CAP_SYS_RAWIO so this does not
3291		 * represent an unprivileged user DOS attack --- we'd be
3292		 * in trouble if mortal users could trigger this path at
3293		 * will.)
3294		 *
3295		 * NB. EXT4_STATE_JDATA is not set on files other than
3296		 * regular files.  If somebody wants to bmap a directory
3297		 * or symlink and gets confused because the buffer
3298		 * hasn't yet been flushed to disk, they deserve
3299		 * everything they get.
3300		 */
3301
3302		ext4_clear_inode_state(inode, EXT4_STATE_JDATA);
3303		journal = EXT4_JOURNAL(inode);
3304		jbd2_journal_lock_updates(journal);
3305		err = jbd2_journal_flush(journal);
3306		jbd2_journal_unlock_updates(journal);
3307
3308		if (err)
3309			return 0;
3310	}
3311
3312	return generic_block_bmap(mapping, block, ext4_get_block);
3313}
3314
3315static int ext4_readpage(struct file *file, struct page *page)
3316{
3317	int ret = -EAGAIN;
3318	struct inode *inode = page->mapping->host;
3319
3320	trace_ext4_readpage(page);
3321
3322	if (ext4_has_inline_data(inode))
3323		ret = ext4_readpage_inline(inode, page);
3324
3325	if (ret == -EAGAIN)
3326		return ext4_mpage_readpages(page->mapping, NULL, page, 1,
3327						false);
3328
3329	return ret;
3330}
3331
3332static int
3333ext4_readpages(struct file *file, struct address_space *mapping,
3334		struct list_head *pages, unsigned nr_pages)
3335{
3336	struct inode *inode = mapping->host;
3337
3338	/* If the file has inline data, no need to do readpages. */
3339	if (ext4_has_inline_data(inode))
3340		return 0;
3341
3342	return ext4_mpage_readpages(mapping, pages, NULL, nr_pages, true);
3343}
3344
3345static void ext4_invalidatepage(struct page *page, unsigned int offset,
3346				unsigned int length)
3347{
3348	trace_ext4_invalidatepage(page, offset, length);
3349
3350	/* No journalling happens on data buffers when this function is used */
3351	WARN_ON(page_has_buffers(page) && buffer_jbd(page_buffers(page)));
3352
3353	block_invalidatepage(page, offset, length);
3354}
3355
3356static int __ext4_journalled_invalidatepage(struct page *page,
3357					    unsigned int offset,
3358					    unsigned int length)
3359{
3360	journal_t *journal = EXT4_JOURNAL(page->mapping->host);
3361
3362	trace_ext4_journalled_invalidatepage(page, offset, length);
3363
3364	/*
3365	 * If it's a full truncate we just forget about the pending dirtying
3366	 */
3367	if (offset == 0 && length == PAGE_SIZE)
3368		ClearPageChecked(page);
3369
3370	return jbd2_journal_invalidatepage(journal, page, offset, length);
3371}
3372
3373/* Wrapper for aops... */
3374static void ext4_journalled_invalidatepage(struct page *page,
3375					   unsigned int offset,
3376					   unsigned int length)
3377{
3378	WARN_ON(__ext4_journalled_invalidatepage(page, offset, length) < 0);
3379}
3380
3381static int ext4_releasepage(struct page *page, gfp_t wait)
3382{
3383	journal_t *journal = EXT4_JOURNAL(page->mapping->host);
 
3384
3385	trace_ext4_releasepage(page);
3386
3387	/* Page has dirty journalled data -> cannot release */
3388	if (PageChecked(page))
3389		return 0;
3390	if (journal)
3391		return jbd2_journal_try_to_free_buffers(journal, page, wait);
3392	else
3393		return try_to_free_buffers(page);
3394}
3395
3396static bool ext4_inode_datasync_dirty(struct inode *inode)
3397{
3398	journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
3399
3400	if (journal)
3401		return !jbd2_transaction_committed(journal,
3402					EXT4_I(inode)->i_datasync_tid);
 
 
 
 
 
 
3403	/* Any metadata buffers to write? */
3404	if (!list_empty(&inode->i_mapping->private_list))
3405		return true;
3406	return inode->i_state & I_DIRTY_DATASYNC;
3407}
3408
3409static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
3410			    unsigned flags, struct iomap *iomap)
 
3411{
3412	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3413	unsigned int blkbits = inode->i_blkbits;
3414	unsigned long first_block, last_block;
3415	struct ext4_map_blocks map;
3416	bool delalloc = false;
3417	int ret;
3418
3419	if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK)
3420		return -EINVAL;
3421	first_block = offset >> blkbits;
3422	last_block = min_t(loff_t, (offset + length - 1) >> blkbits,
3423			   EXT4_MAX_LOGICAL_BLOCK);
3424
3425	if (flags & IOMAP_REPORT) {
3426		if (ext4_has_inline_data(inode)) {
3427			ret = ext4_inline_data_iomap(inode, iomap);
3428			if (ret != -EAGAIN) {
3429				if (ret == 0 && offset >= iomap->length)
3430					ret = -ENOENT;
3431				return ret;
3432			}
3433		}
3434	} else {
3435		if (WARN_ON_ONCE(ext4_has_inline_data(inode)))
3436			return -ERANGE;
3437	}
3438
3439	map.m_lblk = first_block;
3440	map.m_len = last_block - first_block + 1;
3441
3442	if (flags & IOMAP_REPORT) {
3443		ret = ext4_map_blocks(NULL, inode, &map, 0);
3444		if (ret < 0)
3445			return ret;
3446
3447		if (ret == 0) {
3448			ext4_lblk_t end = map.m_lblk + map.m_len - 1;
3449			struct extent_status es;
3450
3451			ext4_es_find_extent_range(inode, &ext4_es_is_delayed,
3452						  map.m_lblk, end, &es);
3453
3454			if (!es.es_len || es.es_lblk > end) {
3455				/* entire range is a hole */
3456			} else if (es.es_lblk > map.m_lblk) {
3457				/* range starts with a hole */
3458				map.m_len = es.es_lblk - map.m_lblk;
3459			} else {
3460				ext4_lblk_t offs = 0;
3461
3462				if (es.es_lblk < map.m_lblk)
3463					offs = map.m_lblk - es.es_lblk;
3464				map.m_lblk = es.es_lblk + offs;
3465				map.m_len = es.es_len - offs;
3466				delalloc = true;
3467			}
3468		}
3469	} else if (flags & IOMAP_WRITE) {
3470		int dio_credits;
3471		handle_t *handle;
3472		int retries = 0;
3473
3474		/* Trim mapping request to maximum we can map at once for DIO */
3475		if (map.m_len > DIO_MAX_BLOCKS)
3476			map.m_len = DIO_MAX_BLOCKS;
3477		dio_credits = ext4_chunk_trans_blocks(inode, map.m_len);
3478retry:
3479		/*
3480		 * Either we allocate blocks and then we don't get unwritten
3481		 * extent so we have reserved enough credits, or the blocks
3482		 * are already allocated and unwritten and in that case
3483		 * extent conversion fits in the credits as well.
3484		 */
3485		handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
3486					    dio_credits);
3487		if (IS_ERR(handle))
3488			return PTR_ERR(handle);
3489
3490		ret = ext4_map_blocks(handle, inode, &map,
3491				      EXT4_GET_BLOCKS_CREATE_ZERO);
3492		if (ret < 0) {
3493			ext4_journal_stop(handle);
3494			if (ret == -ENOSPC &&
3495			    ext4_should_retry_alloc(inode->i_sb, &retries))
3496				goto retry;
3497			return ret;
3498		}
3499
3500		/*
3501		 * If we added blocks beyond i_size, we need to make sure they
3502		 * will get truncated if we crash before updating i_size in
3503		 * ext4_iomap_end(). For faults we don't need to do that (and
3504		 * even cannot because for orphan list operations inode_lock is
3505		 * required) - if we happen to instantiate block beyond i_size,
3506		 * it is because we race with truncate which has already added
3507		 * the inode to the orphan list.
3508		 */
3509		if (!(flags & IOMAP_FAULT) && first_block + map.m_len >
3510		    (i_size_read(inode) + (1 << blkbits) - 1) >> blkbits) {
3511			int err;
3512
3513			err = ext4_orphan_add(handle, inode);
3514			if (err < 0) {
3515				ext4_journal_stop(handle);
3516				return err;
3517			}
3518		}
3519		ext4_journal_stop(handle);
3520	} else {
3521		ret = ext4_map_blocks(NULL, inode, &map, 0);
3522		if (ret < 0)
3523			return ret;
3524	}
3525
 
 
 
 
 
3526	iomap->flags = 0;
3527	if (ext4_inode_datasync_dirty(inode))
 
3528		iomap->flags |= IOMAP_F_DIRTY;
3529	iomap->bdev = inode->i_sb->s_bdev;
3530	iomap->dax_dev = sbi->s_daxdev;
3531	iomap->offset = (u64)first_block << blkbits;
3532	iomap->length = (u64)map.m_len << blkbits;
3533
3534	if (ret == 0) {
3535		iomap->type = delalloc ? IOMAP_DELALLOC : IOMAP_HOLE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3536		iomap->addr = IOMAP_NULL_ADDR;
3537	} else {
3538		if (map.m_flags & EXT4_MAP_MAPPED) {
3539			iomap->type = IOMAP_MAPPED;
3540		} else if (map.m_flags & EXT4_MAP_UNWRITTEN) {
3541			iomap->type = IOMAP_UNWRITTEN;
3542		} else {
3543			WARN_ON_ONCE(1);
3544			return -EIO;
3545		}
3546		iomap->addr = (u64)map.m_pblk << blkbits;
3547	}
3548
3549	if (map.m_flags & EXT4_MAP_NEW)
3550		iomap->flags |= IOMAP_F_NEW;
3551
3552	return 0;
3553}
3554
3555static int ext4_iomap_end(struct inode *inode, loff_t offset, loff_t length,
3556			  ssize_t written, unsigned flags, struct iomap *iomap)
3557{
3558	int ret = 0;
3559	handle_t *handle;
3560	int blkbits = inode->i_blkbits;
3561	bool truncate = false;
3562
3563	if (!(flags & IOMAP_WRITE) || (flags & IOMAP_FAULT))
3564		return 0;
 
 
 
 
 
3565
3566	handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
3567	if (IS_ERR(handle)) {
3568		ret = PTR_ERR(handle);
3569		goto orphan_del;
3570	}
3571	if (ext4_update_inode_size(inode, offset + written))
3572		ext4_mark_inode_dirty(handle, inode);
3573	/*
3574	 * We may need to truncate allocated but not written blocks beyond EOF.
 
 
 
3575	 */
3576	if (iomap->offset + iomap->length > 
3577	    ALIGN(inode->i_size, 1 << blkbits)) {
3578		ext4_lblk_t written_blk, end_blk;
3579
3580		written_blk = (offset + written) >> blkbits;
3581		end_blk = (offset + length) >> blkbits;
3582		if (written_blk < end_blk && ext4_can_truncate(inode))
3583			truncate = true;
3584	}
3585	/*
3586	 * Remove inode from orphan list if we were extending a inode and
3587	 * everything went fine.
3588	 */
3589	if (!truncate && inode->i_nlink &&
3590	    !list_empty(&EXT4_I(inode)->i_orphan))
3591		ext4_orphan_del(handle, inode);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3592	ext4_journal_stop(handle);
3593	if (truncate) {
3594		ext4_truncate_failed_write(inode);
3595orphan_del:
3596		/*
3597		 * If truncate failed early the inode might still be on the
3598		 * orphan list; we need to make sure the inode is removed from
3599		 * the orphan list in that case.
3600		 */
3601		if (inode->i_nlink)
3602			ext4_orphan_del(NULL, inode);
3603	}
3604	return ret;
3605}
3606
3607const struct iomap_ops ext4_iomap_ops = {
3608	.iomap_begin		= ext4_iomap_begin,
3609	.iomap_end		= ext4_iomap_end,
3610};
3611
3612static int ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
3613			    ssize_t size, void *private)
3614{
3615        ext4_io_end_t *io_end = private;
 
 
3616
3617	/* if not async direct IO just return */
3618	if (!io_end)
3619		return 0;
3620
3621	ext_debug("ext4_end_io_dio(): io_end 0x%p "
3622		  "for inode %lu, iocb 0x%p, offset %llu, size %zd\n",
3623		  io_end, io_end->inode->i_ino, iocb, offset, size);
3624
3625	/*
3626	 * Error during AIO DIO. We cannot convert unwritten extents as the
3627	 * data was not written. Just clear the unwritten flag and drop io_end.
3628	 */
3629	if (size <= 0) {
3630		ext4_clear_io_unwritten_flag(io_end);
3631		size = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3632	}
3633	io_end->offset = offset;
3634	io_end->size = size;
3635	ext4_put_io_end(io_end);
 
 
 
 
 
 
 
 
 
3636
3637	return 0;
3638}
3639
3640/*
3641 * Handling of direct IO writes.
3642 *
3643 * For ext4 extent files, ext4 will do direct-io write even to holes,
3644 * preallocated extents, and those write extend the file, no need to
3645 * fall back to buffered IO.
3646 *
3647 * For holes, we fallocate those blocks, mark them as unwritten
3648 * If those blocks were preallocated, we mark sure they are split, but
3649 * still keep the range to write as unwritten.
3650 *
3651 * The unwritten extents will be converted to written when DIO is completed.
3652 * For async direct IO, since the IO may still pending when return, we
3653 * set up an end_io call back function, which will do the conversion
3654 * when async direct IO completed.
3655 *
3656 * If the O_DIRECT write will extend the file then add this inode to the
3657 * orphan list.  So recovery will truncate it back to the original size
3658 * if the machine crashes during the write.
3659 *
3660 */
3661static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter)
3662{
3663	struct file *file = iocb->ki_filp;
3664	struct inode *inode = file->f_mapping->host;
3665	struct ext4_inode_info *ei = EXT4_I(inode);
3666	ssize_t ret;
3667	loff_t offset = iocb->ki_pos;
3668	size_t count = iov_iter_count(iter);
3669	int overwrite = 0;
3670	get_block_t *get_block_func = NULL;
3671	int dio_flags = 0;
3672	loff_t final_size = offset + count;
3673	int orphan = 0;
3674	handle_t *handle;
3675
3676	if (final_size > inode->i_size || final_size > ei->i_disksize) {
3677		/* Credits for sb + inode write */
3678		handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
3679		if (IS_ERR(handle)) {
3680			ret = PTR_ERR(handle);
3681			goto out;
3682		}
3683		ret = ext4_orphan_add(handle, inode);
3684		if (ret) {
3685			ext4_journal_stop(handle);
3686			goto out;
3687		}
3688		orphan = 1;
3689		ext4_update_i_disksize(inode, inode->i_size);
3690		ext4_journal_stop(handle);
3691	}
3692
3693	BUG_ON(iocb->private == NULL);
3694
3695	/*
3696	 * Make all waiters for direct IO properly wait also for extent
3697	 * conversion. This also disallows race between truncate() and
3698	 * overwrite DIO as i_dio_count needs to be incremented under i_mutex.
3699	 */
3700	inode_dio_begin(inode);
3701
3702	/* If we do a overwrite dio, i_mutex locking can be released */
3703	overwrite = *((int *)iocb->private);
3704
3705	if (overwrite)
3706		inode_unlock(inode);
3707
 
 
 
3708	/*
3709	 * For extent mapped files we could direct write to holes and fallocate.
3710	 *
3711	 * Allocated blocks to fill the hole are marked as unwritten to prevent
3712	 * parallel buffered read to expose the stale data before DIO complete
3713	 * the data IO.
3714	 *
3715	 * As to previously fallocated extents, ext4 get_block will just simply
3716	 * mark the buffer mapped but still keep the extents unwritten.
3717	 *
3718	 * For non AIO case, we will convert those unwritten extents to written
3719	 * after return back from blockdev_direct_IO. That way we save us from
3720	 * allocating io_end structure and also the overhead of offloading
3721	 * the extent convertion to a workqueue.
3722	 *
3723	 * For async DIO, the conversion needs to be deferred when the
3724	 * IO is completed. The ext4 end_io callback function will be
3725	 * called to take care of the conversion work.  Here for async
3726	 * case, we allocate an io_end structure to hook to the iocb.
3727	 */
3728	iocb->private = NULL;
3729	if (overwrite)
3730		get_block_func = ext4_dio_get_block_overwrite;
3731	else if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) ||
3732		   round_down(offset, i_blocksize(inode)) >= inode->i_size) {
3733		get_block_func = ext4_dio_get_block;
3734		dio_flags = DIO_LOCKING | DIO_SKIP_HOLES;
3735	} else if (is_sync_kiocb(iocb)) {
3736		get_block_func = ext4_dio_get_block_unwritten_sync;
3737		dio_flags = DIO_LOCKING;
3738	} else {
3739		get_block_func = ext4_dio_get_block_unwritten_async;
3740		dio_flags = DIO_LOCKING;
3741	}
3742	ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
3743				   get_block_func, ext4_end_io_dio, NULL,
3744				   dio_flags);
3745
3746	if (ret > 0 && !overwrite && ext4_test_inode_state(inode,
3747						EXT4_STATE_DIO_UNWRITTEN)) {
3748		int err;
3749		/*
3750		 * for non AIO case, since the IO is already
3751		 * completed, we could do the conversion right here
3752		 */
3753		err = ext4_convert_unwritten_extents(NULL, inode,
3754						     offset, ret);
3755		if (err < 0)
3756			ret = err;
3757		ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
3758	}
3759
3760	inode_dio_end(inode);
3761	/* take i_mutex locking again if we do a ovewrite dio */
3762	if (overwrite)
3763		inode_lock(inode);
3764
3765	if (ret < 0 && final_size > inode->i_size)
3766		ext4_truncate_failed_write(inode);
 
 
3767
3768	/* Handle extending of i_size after direct IO write */
3769	if (orphan) {
3770		int err;
 
 
 
 
3771
3772		/* Credits for sb + inode write */
3773		handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
3774		if (IS_ERR(handle)) {
3775			/*
3776			 * We wrote the data but cannot extend
3777			 * i_size. Bail out. In async io case, we do
3778			 * not return error here because we have
3779			 * already submmitted the corresponding
3780			 * bio. Returning error here makes the caller
3781			 * think that this IO is done and failed
3782			 * resulting in race with bio's completion
3783			 * handler.
3784			 */
3785			if (!ret)
3786				ret = PTR_ERR(handle);
3787			if (inode->i_nlink)
3788				ext4_orphan_del(NULL, inode);
3789
3790			goto out;
3791		}
3792		if (inode->i_nlink)
3793			ext4_orphan_del(handle, inode);
3794		if (ret > 0) {
3795			loff_t end = offset + ret;
3796			if (end > inode->i_size || end > ei->i_disksize) {
3797				ext4_update_i_disksize(inode, end);
3798				if (end > inode->i_size)
3799					i_size_write(inode, end);
3800				/*
3801				 * We're going to return a positive `ret'
3802				 * here due to non-zero-length I/O, so there's
3803				 * no way of reporting error returns from
3804				 * ext4_mark_inode_dirty() to userspace.  So
3805				 * ignore it.
3806				 */
3807				ext4_mark_inode_dirty(handle, inode);
3808			}
3809		}
3810		err = ext4_journal_stop(handle);
3811		if (ret == 0)
3812			ret = err;
3813	}
3814out:
3815	return ret;
3816}
3817
3818static ssize_t ext4_direct_IO_read(struct kiocb *iocb, struct iov_iter *iter)
3819{
3820	struct address_space *mapping = iocb->ki_filp->f_mapping;
3821	struct inode *inode = mapping->host;
3822	size_t count = iov_iter_count(iter);
3823	ssize_t ret;
3824
3825	/*
3826	 * Shared inode_lock is enough for us - it protects against concurrent
3827	 * writes & truncates and since we take care of writing back page cache,
3828	 * we are protected against page writeback as well.
3829	 */
3830	inode_lock_shared(inode);
3831	ret = filemap_write_and_wait_range(mapping, iocb->ki_pos,
3832					   iocb->ki_pos + count - 1);
3833	if (ret)
3834		goto out_unlock;
3835	ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
3836				   iter, ext4_dio_get_block, NULL, NULL, 0);
3837out_unlock:
3838	inode_unlock_shared(inode);
3839	return ret;
3840}
3841
3842static ssize_t ext4_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
3843{
3844	struct file *file = iocb->ki_filp;
3845	struct inode *inode = file->f_mapping->host;
3846	size_t count = iov_iter_count(iter);
3847	loff_t offset = iocb->ki_pos;
3848	ssize_t ret;
3849
3850#ifdef CONFIG_FS_ENCRYPTION
3851	if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode))
3852		return 0;
3853#endif
3854	if (fsverity_active(inode))
3855		return 0;
3856
3857	/*
3858	 * If we are doing data journalling we don't support O_DIRECT
 
 
 
3859	 */
3860	if (ext4_should_journal_data(inode))
3861		return 0;
3862
3863	/* Let buffer I/O handle the inline data case. */
3864	if (ext4_has_inline_data(inode))
3865		return 0;
 
 
3866
3867	trace_ext4_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
3868	if (iov_iter_rw(iter) == READ)
3869		ret = ext4_direct_IO_read(iocb, iter);
3870	else
3871		ret = ext4_direct_IO_write(iocb, iter);
3872	trace_ext4_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), ret);
3873	return ret;
3874}
3875
 
 
 
 
3876/*
3877 * Pages can be marked dirty completely asynchronously from ext4's journalling
3878 * activity.  By filemap_sync_pte(), try_to_unmap_one(), etc.  We cannot do
3879 * much here because ->set_page_dirty is called under VFS locks.  The page is
3880 * not necessarily locked.
3881 *
3882 * We cannot just dirty the page and leave attached buffers clean, because the
3883 * buffers' dirty state is "definitive".  We cannot just set the buffers dirty
3884 * or jbddirty because all the journalling code will explode.
3885 *
3886 * So what we do is to mark the page "pending dirty" and next time writepage
3887 * is called, propagate that into the buffers appropriately.
3888 */
3889static int ext4_journalled_set_page_dirty(struct page *page)
3890{
3891	SetPageChecked(page);
3892	return __set_page_dirty_nobuffers(page);
 
 
 
 
 
 
 
 
 
 
 
 
3893}
3894
3895static int ext4_set_page_dirty(struct page *page)
 
3896{
3897	WARN_ON_ONCE(!PageLocked(page) && !PageDirty(page));
3898	WARN_ON_ONCE(!page_has_buffers(page));
3899	return __set_page_dirty_buffers(page);
3900}
3901
3902static const struct address_space_operations ext4_aops = {
3903	.readpage		= ext4_readpage,
3904	.readpages		= ext4_readpages,
3905	.writepage		= ext4_writepage,
3906	.writepages		= ext4_writepages,
3907	.write_begin		= ext4_write_begin,
3908	.write_end		= ext4_write_end,
3909	.set_page_dirty		= ext4_set_page_dirty,
3910	.bmap			= ext4_bmap,
3911	.invalidatepage		= ext4_invalidatepage,
3912	.releasepage		= ext4_releasepage,
3913	.direct_IO		= ext4_direct_IO,
3914	.migratepage		= buffer_migrate_page,
3915	.is_partially_uptodate  = block_is_partially_uptodate,
3916	.error_remove_page	= generic_error_remove_page,
 
3917};
3918
3919static const struct address_space_operations ext4_journalled_aops = {
3920	.readpage		= ext4_readpage,
3921	.readpages		= ext4_readpages,
3922	.writepage		= ext4_writepage,
3923	.writepages		= ext4_writepages,
3924	.write_begin		= ext4_write_begin,
3925	.write_end		= ext4_journalled_write_end,
3926	.set_page_dirty		= ext4_journalled_set_page_dirty,
3927	.bmap			= ext4_bmap,
3928	.invalidatepage		= ext4_journalled_invalidatepage,
3929	.releasepage		= ext4_releasepage,
3930	.direct_IO		= ext4_direct_IO,
 
3931	.is_partially_uptodate  = block_is_partially_uptodate,
3932	.error_remove_page	= generic_error_remove_page,
 
3933};
3934
3935static const struct address_space_operations ext4_da_aops = {
3936	.readpage		= ext4_readpage,
3937	.readpages		= ext4_readpages,
3938	.writepage		= ext4_writepage,
3939	.writepages		= ext4_writepages,
3940	.write_begin		= ext4_da_write_begin,
3941	.write_end		= ext4_da_write_end,
3942	.set_page_dirty		= ext4_set_page_dirty,
3943	.bmap			= ext4_bmap,
3944	.invalidatepage		= ext4_invalidatepage,
3945	.releasepage		= ext4_releasepage,
3946	.direct_IO		= ext4_direct_IO,
3947	.migratepage		= buffer_migrate_page,
3948	.is_partially_uptodate  = block_is_partially_uptodate,
3949	.error_remove_page	= generic_error_remove_page,
 
3950};
3951
3952static const struct address_space_operations ext4_dax_aops = {
3953	.writepages		= ext4_dax_writepages,
3954	.direct_IO		= noop_direct_IO,
3955	.set_page_dirty		= noop_set_page_dirty,
3956	.bmap			= ext4_bmap,
3957	.invalidatepage		= noop_invalidatepage,
3958};
3959
3960void ext4_set_aops(struct inode *inode)
3961{
3962	switch (ext4_inode_journal_mode(inode)) {
3963	case EXT4_INODE_ORDERED_DATA_MODE:
3964	case EXT4_INODE_WRITEBACK_DATA_MODE:
3965		break;
3966	case EXT4_INODE_JOURNAL_DATA_MODE:
3967		inode->i_mapping->a_ops = &ext4_journalled_aops;
3968		return;
3969	default:
3970		BUG();
3971	}
3972	if (IS_DAX(inode))
3973		inode->i_mapping->a_ops = &ext4_dax_aops;
3974	else if (test_opt(inode->i_sb, DELALLOC))
3975		inode->i_mapping->a_ops = &ext4_da_aops;
3976	else
3977		inode->i_mapping->a_ops = &ext4_aops;
3978}
3979
 
 
 
 
 
 
3980static int __ext4_block_zero_page_range(handle_t *handle,
3981		struct address_space *mapping, loff_t from, loff_t length)
3982{
3983	ext4_fsblk_t index = from >> PAGE_SHIFT;
3984	unsigned offset = from & (PAGE_SIZE-1);
3985	unsigned blocksize, pos;
3986	ext4_lblk_t iblock;
3987	struct inode *inode = mapping->host;
3988	struct buffer_head *bh;
3989	struct page *page;
3990	int err = 0;
3991
3992	page = find_or_create_page(mapping, from >> PAGE_SHIFT,
3993				   mapping_gfp_constraint(mapping, ~__GFP_FS));
3994	if (!page)
3995		return -ENOMEM;
 
3996
3997	blocksize = inode->i_sb->s_blocksize;
3998
3999	iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
4000
4001	if (!page_has_buffers(page))
4002		create_empty_buffers(page, blocksize, 0);
 
4003
4004	/* Find the buffer that contains "offset" */
4005	bh = page_buffers(page);
4006	pos = blocksize;
4007	while (offset >= pos) {
4008		bh = bh->b_this_page;
4009		iblock++;
4010		pos += blocksize;
4011	}
4012	if (buffer_freed(bh)) {
4013		BUFFER_TRACE(bh, "freed: skip");
4014		goto unlock;
4015	}
4016	if (!buffer_mapped(bh)) {
4017		BUFFER_TRACE(bh, "unmapped");
4018		ext4_get_block(inode, iblock, bh, 0);
4019		/* unmapped? It's a hole - nothing to do */
4020		if (!buffer_mapped(bh)) {
4021			BUFFER_TRACE(bh, "still unmapped");
4022			goto unlock;
4023		}
4024	}
4025
4026	/* Ok, it's mapped. Make sure it's up-to-date */
4027	if (PageUptodate(page))
4028		set_buffer_uptodate(bh);
4029
4030	if (!buffer_uptodate(bh)) {
4031		err = -EIO;
4032		ll_rw_block(REQ_OP_READ, 0, 1, &bh);
4033		wait_on_buffer(bh);
4034		/* Uhhuh. Read error. Complain and punt. */
4035		if (!buffer_uptodate(bh))
4036			goto unlock;
4037		if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode)) {
4038			/* We expect the key to be set. */
4039			BUG_ON(!fscrypt_has_encryption_key(inode));
4040			WARN_ON_ONCE(fscrypt_decrypt_pagecache_blocks(
4041					page, blocksize, bh_offset(bh)));
 
 
 
 
 
4042		}
4043	}
4044	if (ext4_should_journal_data(inode)) {
4045		BUFFER_TRACE(bh, "get write access");
4046		err = ext4_journal_get_write_access(handle, bh);
 
4047		if (err)
4048			goto unlock;
4049	}
4050	zero_user(page, offset, length);
4051	BUFFER_TRACE(bh, "zeroed end of block");
4052
4053	if (ext4_should_journal_data(inode)) {
4054		err = ext4_handle_dirty_metadata(handle, inode, bh);
4055	} else {
4056		err = 0;
4057		mark_buffer_dirty(bh);
4058		if (ext4_should_order_data(inode))
4059			err = ext4_jbd2_inode_add_write(handle, inode, from,
4060					length);
4061	}
4062
4063unlock:
4064	unlock_page(page);
4065	put_page(page);
4066	return err;
4067}
4068
4069/*
4070 * ext4_block_zero_page_range() zeros out a mapping of length 'length'
4071 * starting from file offset 'from'.  The range to be zero'd must
4072 * be contained with in one block.  If the specified range exceeds
4073 * the end of the block it will be shortened to end of the block
4074 * that cooresponds to 'from'
4075 */
4076static int ext4_block_zero_page_range(handle_t *handle,
4077		struct address_space *mapping, loff_t from, loff_t length)
4078{
4079	struct inode *inode = mapping->host;
4080	unsigned offset = from & (PAGE_SIZE-1);
4081	unsigned blocksize = inode->i_sb->s_blocksize;
4082	unsigned max = blocksize - (offset & (blocksize - 1));
4083
4084	/*
4085	 * correct length if it does not fall between
4086	 * 'from' and the end of the block
4087	 */
4088	if (length > max || length < 0)
4089		length = max;
4090
4091	if (IS_DAX(inode)) {
4092		return iomap_zero_range(inode, from, length, NULL,
4093					&ext4_iomap_ops);
4094	}
4095	return __ext4_block_zero_page_range(handle, mapping, from, length);
4096}
4097
4098/*
4099 * ext4_block_truncate_page() zeroes out a mapping from file offset `from'
4100 * up to the end of the block which corresponds to `from'.
4101 * This required during truncate. We need to physically zero the tail end
4102 * of that block so it doesn't yield old data if the file is later grown.
4103 */
4104static int ext4_block_truncate_page(handle_t *handle,
4105		struct address_space *mapping, loff_t from)
4106{
4107	unsigned offset = from & (PAGE_SIZE-1);
4108	unsigned length;
4109	unsigned blocksize;
4110	struct inode *inode = mapping->host;
4111
4112	/* If we are processing an encrypted inode during orphan list handling */
4113	if (IS_ENCRYPTED(inode) && !fscrypt_has_encryption_key(inode))
4114		return 0;
4115
4116	blocksize = inode->i_sb->s_blocksize;
4117	length = blocksize - (offset & (blocksize - 1));
4118
4119	return ext4_block_zero_page_range(handle, mapping, from, length);
4120}
4121
4122int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode,
4123			     loff_t lstart, loff_t length)
4124{
4125	struct super_block *sb = inode->i_sb;
4126	struct address_space *mapping = inode->i_mapping;
4127	unsigned partial_start, partial_end;
4128	ext4_fsblk_t start, end;
4129	loff_t byte_end = (lstart + length - 1);
4130	int err = 0;
4131
4132	partial_start = lstart & (sb->s_blocksize - 1);
4133	partial_end = byte_end & (sb->s_blocksize - 1);
4134
4135	start = lstart >> sb->s_blocksize_bits;
4136	end = byte_end >> sb->s_blocksize_bits;
4137
4138	/* Handle partial zero within the single block */
4139	if (start == end &&
4140	    (partial_start || (partial_end != sb->s_blocksize - 1))) {
4141		err = ext4_block_zero_page_range(handle, mapping,
4142						 lstart, length);
4143		return err;
4144	}
4145	/* Handle partial zero out on the start of the range */
4146	if (partial_start) {
4147		err = ext4_block_zero_page_range(handle, mapping,
4148						 lstart, sb->s_blocksize);
4149		if (err)
4150			return err;
4151	}
4152	/* Handle partial zero out on the end of the range */
4153	if (partial_end != sb->s_blocksize - 1)
4154		err = ext4_block_zero_page_range(handle, mapping,
4155						 byte_end - partial_end,
4156						 partial_end + 1);
4157	return err;
4158}
4159
4160int ext4_can_truncate(struct inode *inode)
4161{
4162	if (S_ISREG(inode->i_mode))
4163		return 1;
4164	if (S_ISDIR(inode->i_mode))
4165		return 1;
4166	if (S_ISLNK(inode->i_mode))
4167		return !ext4_inode_is_fast_symlink(inode);
4168	return 0;
4169}
4170
4171/*
4172 * We have to make sure i_disksize gets properly updated before we truncate
4173 * page cache due to hole punching or zero range. Otherwise i_disksize update
4174 * can get lost as it may have been postponed to submission of writeback but
4175 * that will never happen after we truncate page cache.
4176 */
4177int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
4178				      loff_t len)
4179{
4180	handle_t *handle;
 
 
4181	loff_t size = i_size_read(inode);
4182
4183	WARN_ON(!inode_is_locked(inode));
4184	if (offset > size || offset + len < size)
4185		return 0;
4186
4187	if (EXT4_I(inode)->i_disksize >= size)
4188		return 0;
4189
4190	handle = ext4_journal_start(inode, EXT4_HT_MISC, 1);
4191	if (IS_ERR(handle))
4192		return PTR_ERR(handle);
4193	ext4_update_i_disksize(inode, size);
4194	ext4_mark_inode_dirty(handle, inode);
4195	ext4_journal_stop(handle);
4196
4197	return 0;
4198}
4199
4200static void ext4_wait_dax_page(struct ext4_inode_info *ei)
4201{
4202	up_write(&ei->i_mmap_sem);
4203	schedule();
4204	down_write(&ei->i_mmap_sem);
4205}
4206
4207int ext4_break_layouts(struct inode *inode)
4208{
4209	struct ext4_inode_info *ei = EXT4_I(inode);
4210	struct page *page;
4211	int error;
4212
4213	if (WARN_ON_ONCE(!rwsem_is_locked(&ei->i_mmap_sem)))
4214		return -EINVAL;
4215
4216	do {
4217		page = dax_layout_busy_page(inode->i_mapping);
4218		if (!page)
4219			return 0;
4220
4221		error = ___wait_var_event(&page->_refcount,
4222				atomic_read(&page->_refcount) == 1,
4223				TASK_INTERRUPTIBLE, 0, 0,
4224				ext4_wait_dax_page(ei));
4225	} while (error == 0);
4226
4227	return error;
4228}
4229
4230/*
4231 * ext4_punch_hole: punches a hole in a file by releasing the blocks
4232 * associated with the given offset and length
4233 *
4234 * @inode:  File inode
4235 * @offset: The offset where the hole will begin
4236 * @len:    The length of the hole
4237 *
4238 * Returns: 0 on success or negative on failure
4239 */
4240
4241int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
4242{
 
4243	struct super_block *sb = inode->i_sb;
4244	ext4_lblk_t first_block, stop_block;
4245	struct address_space *mapping = inode->i_mapping;
4246	loff_t first_block_offset, last_block_offset;
 
4247	handle_t *handle;
4248	unsigned int credits;
4249	int ret = 0;
4250
4251	if (!S_ISREG(inode->i_mode))
4252		return -EOPNOTSUPP;
4253
4254	trace_ext4_punch_hole(inode, offset, length, 0);
4255
4256	ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
4257	if (ext4_has_inline_data(inode)) {
4258		down_write(&EXT4_I(inode)->i_mmap_sem);
4259		ret = ext4_convert_inline_data(inode);
4260		up_write(&EXT4_I(inode)->i_mmap_sem);
4261		if (ret)
4262			return ret;
4263	}
4264
4265	/*
4266	 * Write out all dirty pages to avoid race conditions
4267	 * Then release them.
4268	 */
4269	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
4270		ret = filemap_write_and_wait_range(mapping, offset,
4271						   offset + length - 1);
4272		if (ret)
4273			return ret;
4274	}
4275
4276	inode_lock(inode);
4277
4278	/* No need to punch hole beyond i_size */
4279	if (offset >= inode->i_size)
4280		goto out_mutex;
4281
4282	/*
4283	 * If the hole extends beyond i_size, set the hole
4284	 * to end after the page that contains i_size
4285	 */
4286	if (offset + length > inode->i_size) {
4287		length = inode->i_size +
4288		   PAGE_SIZE - (inode->i_size & (PAGE_SIZE - 1)) -
4289		   offset;
4290	}
4291
 
 
 
 
 
 
 
 
4292	if (offset & (sb->s_blocksize - 1) ||
4293	    (offset + length) & (sb->s_blocksize - 1)) {
4294		/*
4295		 * Attach jinode to inode for jbd2 if we do any zeroing of
4296		 * partial block
4297		 */
4298		ret = ext4_inode_attach_jinode(inode);
4299		if (ret < 0)
4300			goto out_mutex;
4301
4302	}
4303
4304	/* Wait all existing dio workers, newcomers will block on i_mutex */
4305	inode_dio_wait(inode);
4306
 
 
 
 
4307	/*
4308	 * Prevent page faults from reinstantiating pages we have released from
4309	 * page cache.
4310	 */
4311	down_write(&EXT4_I(inode)->i_mmap_sem);
4312
4313	ret = ext4_break_layouts(inode);
4314	if (ret)
4315		goto out_dio;
4316
4317	first_block_offset = round_up(offset, sb->s_blocksize);
4318	last_block_offset = round_down((offset + length), sb->s_blocksize) - 1;
4319
4320	/* Now release the pages and zero block aligned part of pages*/
4321	if (last_block_offset > first_block_offset) {
4322		ret = ext4_update_disksize_before_punch(inode, offset, length);
4323		if (ret)
4324			goto out_dio;
4325		truncate_pagecache_range(inode, first_block_offset,
4326					 last_block_offset);
4327	}
4328
4329	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4330		credits = ext4_writepage_trans_blocks(inode);
4331	else
4332		credits = ext4_blocks_for_truncate(inode);
4333	handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
4334	if (IS_ERR(handle)) {
4335		ret = PTR_ERR(handle);
4336		ext4_std_error(sb, ret);
4337		goto out_dio;
4338	}
4339
4340	ret = ext4_zero_partial_blocks(handle, inode, offset,
4341				       length);
4342	if (ret)
4343		goto out_stop;
4344
4345	first_block = (offset + sb->s_blocksize - 1) >>
4346		EXT4_BLOCK_SIZE_BITS(sb);
4347	stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);
4348
4349	/* If there are blocks to remove, do it */
4350	if (stop_block > first_block) {
 
4351
4352		down_write(&EXT4_I(inode)->i_data_sem);
4353		ext4_discard_preallocations(inode);
4354
4355		ret = ext4_es_remove_extent(inode, first_block,
4356					    stop_block - first_block);
4357		if (ret) {
4358			up_write(&EXT4_I(inode)->i_data_sem);
4359			goto out_stop;
4360		}
4361
4362		if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4363			ret = ext4_ext_remove_space(inode, first_block,
4364						    stop_block - 1);
4365		else
4366			ret = ext4_ind_remove_space(handle, inode, first_block,
4367						    stop_block);
4368
 
 
4369		up_write(&EXT4_I(inode)->i_data_sem);
4370	}
 
4371	if (IS_SYNC(inode))
4372		ext4_handle_sync(handle);
4373
4374	inode->i_mtime = inode->i_ctime = current_time(inode);
4375	ext4_mark_inode_dirty(handle, inode);
 
 
4376	if (ret >= 0)
4377		ext4_update_inode_fsync_trans(handle, inode, 1);
4378out_stop:
4379	ext4_journal_stop(handle);
4380out_dio:
4381	up_write(&EXT4_I(inode)->i_mmap_sem);
4382out_mutex:
4383	inode_unlock(inode);
4384	return ret;
4385}
4386
4387int ext4_inode_attach_jinode(struct inode *inode)
4388{
4389	struct ext4_inode_info *ei = EXT4_I(inode);
4390	struct jbd2_inode *jinode;
4391
4392	if (ei->jinode || !EXT4_SB(inode->i_sb)->s_journal)
4393		return 0;
4394
4395	jinode = jbd2_alloc_inode(GFP_KERNEL);
4396	spin_lock(&inode->i_lock);
4397	if (!ei->jinode) {
4398		if (!jinode) {
4399			spin_unlock(&inode->i_lock);
4400			return -ENOMEM;
4401		}
4402		ei->jinode = jinode;
4403		jbd2_journal_init_jbd_inode(ei->jinode, inode);
4404		jinode = NULL;
4405	}
4406	spin_unlock(&inode->i_lock);
4407	if (unlikely(jinode != NULL))
4408		jbd2_free_inode(jinode);
4409	return 0;
4410}
4411
4412/*
4413 * ext4_truncate()
4414 *
4415 * We block out ext4_get_block() block instantiations across the entire
4416 * transaction, and VFS/VM ensures that ext4_truncate() cannot run
4417 * simultaneously on behalf of the same inode.
4418 *
4419 * As we work through the truncate and commit bits of it to the journal there
4420 * is one core, guiding principle: the file's tree must always be consistent on
4421 * disk.  We must be able to restart the truncate after a crash.
4422 *
4423 * The file's tree may be transiently inconsistent in memory (although it
4424 * probably isn't), but whenever we close off and commit a journal transaction,
4425 * the contents of (the filesystem + the journal) must be consistent and
4426 * restartable.  It's pretty simple, really: bottom up, right to left (although
4427 * left-to-right works OK too).
4428 *
4429 * Note that at recovery time, journal replay occurs *before* the restart of
4430 * truncate against the orphan inode list.
4431 *
4432 * The committed inode has the new, desired i_size (which is the same as
4433 * i_disksize in this case).  After a crash, ext4_orphan_cleanup() will see
4434 * that this inode's truncate did not complete and it will again call
4435 * ext4_truncate() to have another go.  So there will be instantiated blocks
4436 * to the right of the truncation point in a crashed ext4 filesystem.  But
4437 * that's fine - as long as they are linked from the inode, the post-crash
4438 * ext4_truncate() run will find them and release them.
4439 */
4440int ext4_truncate(struct inode *inode)
4441{
4442	struct ext4_inode_info *ei = EXT4_I(inode);
4443	unsigned int credits;
4444	int err = 0;
4445	handle_t *handle;
4446	struct address_space *mapping = inode->i_mapping;
4447
4448	/*
4449	 * There is a possibility that we're either freeing the inode
4450	 * or it's a completely new inode. In those cases we might not
4451	 * have i_mutex locked because it's not necessary.
4452	 */
4453	if (!(inode->i_state & (I_NEW|I_FREEING)))
4454		WARN_ON(!inode_is_locked(inode));
4455	trace_ext4_truncate_enter(inode);
4456
4457	if (!ext4_can_truncate(inode))
4458		return 0;
4459
4460	ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
4461
4462	if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
4463		ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
4464
4465	if (ext4_has_inline_data(inode)) {
4466		int has_inline = 1;
4467
4468		err = ext4_inline_data_truncate(inode, &has_inline);
4469		if (err)
4470			return err;
4471		if (has_inline)
4472			return 0;
4473	}
4474
4475	/* If we zero-out tail of the page, we have to create jinode for jbd2 */
4476	if (inode->i_size & (inode->i_sb->s_blocksize - 1)) {
4477		if (ext4_inode_attach_jinode(inode) < 0)
4478			return 0;
 
4479	}
4480
4481	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4482		credits = ext4_writepage_trans_blocks(inode);
4483	else
4484		credits = ext4_blocks_for_truncate(inode);
4485
4486	handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
4487	if (IS_ERR(handle))
4488		return PTR_ERR(handle);
 
 
4489
4490	if (inode->i_size & (inode->i_sb->s_blocksize - 1))
4491		ext4_block_truncate_page(handle, mapping, inode->i_size);
4492
4493	/*
4494	 * We add the inode to the orphan list, so that if this
4495	 * truncate spans multiple transactions, and we crash, we will
4496	 * resume the truncate when the filesystem recovers.  It also
4497	 * marks the inode dirty, to catch the new size.
4498	 *
4499	 * Implication: the file must always be in a sane, consistent
4500	 * truncatable state while each transaction commits.
4501	 */
4502	err = ext4_orphan_add(handle, inode);
4503	if (err)
4504		goto out_stop;
4505
4506	down_write(&EXT4_I(inode)->i_data_sem);
4507
4508	ext4_discard_preallocations(inode);
4509
4510	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4511		err = ext4_ext_truncate(handle, inode);
4512	else
4513		ext4_ind_truncate(handle, inode);
4514
4515	up_write(&ei->i_data_sem);
4516	if (err)
4517		goto out_stop;
4518
4519	if (IS_SYNC(inode))
4520		ext4_handle_sync(handle);
4521
4522out_stop:
4523	/*
4524	 * If this was a simple ftruncate() and the file will remain alive,
4525	 * then we need to clear up the orphan record which we created above.
4526	 * However, if this was a real unlink then we were called by
4527	 * ext4_evict_inode(), and we allow that function to clean up the
4528	 * orphan info for us.
4529	 */
4530	if (inode->i_nlink)
4531		ext4_orphan_del(handle, inode);
4532
4533	inode->i_mtime = inode->i_ctime = current_time(inode);
4534	ext4_mark_inode_dirty(handle, inode);
 
 
4535	ext4_journal_stop(handle);
4536
 
4537	trace_ext4_truncate_exit(inode);
4538	return err;
4539}
4540
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4541/*
4542 * ext4_get_inode_loc returns with an extra refcount against the inode's
4543 * underlying buffer_head on success. If 'in_mem' is true, we have all
4544 * data in memory that is needed to recreate the on-disk version of this
4545 * inode.
4546 */
4547static int __ext4_get_inode_loc(struct inode *inode,
4548				struct ext4_iloc *iloc, int in_mem)
 
4549{
4550	struct ext4_group_desc	*gdp;
4551	struct buffer_head	*bh;
4552	struct super_block	*sb = inode->i_sb;
4553	ext4_fsblk_t		block;
4554	struct blk_plug		plug;
4555	int			inodes_per_block, inode_offset;
4556
4557	iloc->bh = NULL;
4558	if (inode->i_ino < EXT4_ROOT_INO ||
4559	    inode->i_ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))
4560		return -EFSCORRUPTED;
4561
4562	iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb);
4563	gdp = ext4_get_group_desc(sb, iloc->block_group, NULL);
4564	if (!gdp)
4565		return -EIO;
4566
4567	/*
4568	 * Figure out the offset within the block group inode table
4569	 */
4570	inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
4571	inode_offset = ((inode->i_ino - 1) %
4572			EXT4_INODES_PER_GROUP(sb));
4573	block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block);
4574	iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb);
4575
 
 
 
 
 
 
 
 
 
4576	bh = sb_getblk(sb, block);
4577	if (unlikely(!bh))
4578		return -ENOMEM;
4579	if (!buffer_uptodate(bh)) {
4580		lock_buffer(bh);
4581
4582		/*
4583		 * If the buffer has the write error flag, we have failed
4584		 * to write out another inode in the same block.  In this
4585		 * case, we don't have to read the block because we may
4586		 * read the old inode data successfully.
4587		 */
4588		if (buffer_write_io_error(bh) && !buffer_uptodate(bh))
4589			set_buffer_uptodate(bh);
4590
4591		if (buffer_uptodate(bh)) {
4592			/* someone brought it uptodate while we waited */
4593			unlock_buffer(bh);
4594			goto has_buffer;
4595		}
 
 
 
 
 
 
 
 
 
 
4596
4597		/*
4598		 * If we have all information of the inode in memory and this
4599		 * is the only valid inode in the block, we need not read the
4600		 * block.
4601		 */
4602		if (in_mem) {
4603			struct buffer_head *bitmap_bh;
4604			int i, start;
4605
4606			start = inode_offset & ~(inodes_per_block - 1);
4607
4608			/* Is the inode bitmap in cache? */
4609			bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp));
4610			if (unlikely(!bitmap_bh))
4611				goto make_io;
4612
4613			/*
4614			 * If the inode bitmap isn't in cache then the
4615			 * optimisation may end up performing two reads instead
4616			 * of one, so skip it.
4617			 */
4618			if (!buffer_uptodate(bitmap_bh)) {
4619				brelse(bitmap_bh);
4620				goto make_io;
4621			}
4622			for (i = start; i < start + inodes_per_block; i++) {
4623				if (i == inode_offset)
4624					continue;
4625				if (ext4_test_bit(i, bitmap_bh->b_data))
4626					break;
4627			}
4628			brelse(bitmap_bh);
4629			if (i == start + inodes_per_block) {
4630				/* all other inodes are free, so skip I/O */
4631				memset(bh->b_data, 0, bh->b_size);
4632				set_buffer_uptodate(bh);
4633				unlock_buffer(bh);
4634				goto has_buffer;
4635			}
4636		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4637
4638make_io:
4639		/*
4640		 * If we need to do any I/O, try to pre-readahead extra
4641		 * blocks from the inode table.
4642		 */
4643		blk_start_plug(&plug);
4644		if (EXT4_SB(sb)->s_inode_readahead_blks) {
4645			ext4_fsblk_t b, end, table;
4646			unsigned num;
4647			__u32 ra_blks = EXT4_SB(sb)->s_inode_readahead_blks;
4648
4649			table = ext4_inode_table(sb, gdp);
4650			/* s_inode_readahead_blks is always a power of 2 */
4651			b = block & ~((ext4_fsblk_t) ra_blks - 1);
4652			if (table > b)
4653				b = table;
4654			end = b + ra_blks;
4655			num = EXT4_INODES_PER_GROUP(sb);
4656			if (ext4_has_group_desc_csum(sb))
4657				num -= ext4_itable_unused_count(sb, gdp);
4658			table += num / inodes_per_block;
4659			if (end > table)
4660				end = table;
4661			while (b <= end)
4662				sb_breadahead(sb, b++);
4663		}
4664
4665		/*
4666		 * There are other valid inodes in the buffer, this inode
4667		 * has in-inode xattrs, or we don't have this inode in memory.
4668		 * Read the block from disk.
4669		 */
4670		trace_ext4_load_inode(inode);
4671		get_bh(bh);
4672		bh->b_end_io = end_buffer_read_sync;
4673		submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, bh);
4674		blk_finish_plug(&plug);
4675		wait_on_buffer(bh);
4676		if (!buffer_uptodate(bh)) {
4677			EXT4_ERROR_INODE_BLOCK(inode, block,
4678					       "unable to read itable block");
4679			brelse(bh);
4680			return -EIO;
4681		}
4682	}
4683has_buffer:
4684	iloc->bh = bh;
4685	return 0;
4686}
4687
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4688int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
4689{
4690	/* We have all inode data except xattrs in memory here. */
4691	return __ext4_get_inode_loc(inode, iloc,
4692		!ext4_test_inode_state(inode, EXT4_STATE_XATTR));
 
 
 
 
 
 
 
 
4693}
4694
4695static bool ext4_should_use_dax(struct inode *inode)
 
 
4696{
4697	if (!test_opt(inode->i_sb, DAX))
 
 
 
 
 
 
 
4698		return false;
4699	if (!S_ISREG(inode->i_mode))
4700		return false;
4701	if (ext4_should_journal_data(inode))
4702		return false;
4703	if (ext4_has_inline_data(inode))
4704		return false;
4705	if (ext4_test_inode_flag(inode, EXT4_INODE_ENCRYPT))
4706		return false;
4707	if (ext4_test_inode_flag(inode, EXT4_INODE_VERITY))
4708		return false;
4709	return true;
 
 
 
 
 
4710}
4711
4712void ext4_set_inode_flags(struct inode *inode)
4713{
4714	unsigned int flags = EXT4_I(inode)->i_flags;
4715	unsigned int new_fl = 0;
4716
 
 
4717	if (flags & EXT4_SYNC_FL)
4718		new_fl |= S_SYNC;
4719	if (flags & EXT4_APPEND_FL)
4720		new_fl |= S_APPEND;
4721	if (flags & EXT4_IMMUTABLE_FL)
4722		new_fl |= S_IMMUTABLE;
4723	if (flags & EXT4_NOATIME_FL)
4724		new_fl |= S_NOATIME;
4725	if (flags & EXT4_DIRSYNC_FL)
4726		new_fl |= S_DIRSYNC;
4727	if (ext4_should_use_dax(inode))
 
 
 
 
4728		new_fl |= S_DAX;
 
4729	if (flags & EXT4_ENCRYPT_FL)
4730		new_fl |= S_ENCRYPTED;
4731	if (flags & EXT4_CASEFOLD_FL)
4732		new_fl |= S_CASEFOLD;
4733	if (flags & EXT4_VERITY_FL)
4734		new_fl |= S_VERITY;
4735	inode_set_flags(inode, new_fl,
4736			S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_DAX|
4737			S_ENCRYPTED|S_CASEFOLD|S_VERITY);
4738}
4739
4740static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
4741				  struct ext4_inode_info *ei)
4742{
4743	blkcnt_t i_blocks ;
4744	struct inode *inode = &(ei->vfs_inode);
4745	struct super_block *sb = inode->i_sb;
4746
4747	if (ext4_has_feature_huge_file(sb)) {
4748		/* we are using combined 48 bit field */
4749		i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 |
4750					le32_to_cpu(raw_inode->i_blocks_lo);
4751		if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) {
4752			/* i_blocks represent file system block size */
4753			return i_blocks  << (inode->i_blkbits - 9);
4754		} else {
4755			return i_blocks;
4756		}
4757	} else {
4758		return le32_to_cpu(raw_inode->i_blocks_lo);
4759	}
4760}
4761
4762static inline int ext4_iget_extra_inode(struct inode *inode,
4763					 struct ext4_inode *raw_inode,
4764					 struct ext4_inode_info *ei)
4765{
4766	__le32 *magic = (void *)raw_inode +
4767			EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize;
4768
4769	if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize + sizeof(__le32) <=
4770	    EXT4_INODE_SIZE(inode->i_sb) &&
4771	    *magic == cpu_to_le32(EXT4_XATTR_MAGIC)) {
 
 
4772		ext4_set_inode_state(inode, EXT4_STATE_XATTR);
4773		return ext4_find_inline_data_nolock(inode);
 
 
 
4774	} else
4775		EXT4_I(inode)->i_inline_off = 0;
4776	return 0;
4777}
4778
4779int ext4_get_projid(struct inode *inode, kprojid_t *projid)
4780{
4781	if (!ext4_has_feature_project(inode->i_sb))
4782		return -EOPNOTSUPP;
4783	*projid = EXT4_I(inode)->i_projid;
4784	return 0;
4785}
4786
4787/*
4788 * ext4 has self-managed i_version for ea inodes, it stores the lower 32bit of
4789 * refcount in i_version, so use raw values if inode has EXT4_EA_INODE_FL flag
4790 * set.
4791 */
4792static inline void ext4_inode_set_iversion_queried(struct inode *inode, u64 val)
4793{
4794	if (unlikely(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
4795		inode_set_iversion_raw(inode, val);
4796	else
4797		inode_set_iversion_queried(inode, val);
4798}
4799static inline u64 ext4_inode_peek_iversion(const struct inode *inode)
 
 
4800{
4801	if (unlikely(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
4802		return inode_peek_iversion_raw(inode);
4803	else
4804		return inode_peek_iversion(inode);
 
 
 
 
 
 
 
 
 
4805}
4806
4807struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
4808			  ext4_iget_flags flags, const char *function,
4809			  unsigned int line)
4810{
4811	struct ext4_iloc iloc;
4812	struct ext4_inode *raw_inode;
4813	struct ext4_inode_info *ei;
 
4814	struct inode *inode;
 
4815	journal_t *journal = EXT4_SB(sb)->s_journal;
4816	long ret;
4817	loff_t size;
4818	int block;
4819	uid_t i_uid;
4820	gid_t i_gid;
4821	projid_t i_projid;
4822
4823	if ((!(flags & EXT4_IGET_SPECIAL) &&
4824	     (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)) ||
 
 
 
 
4825	    (ino < EXT4_ROOT_INO) ||
4826	    (ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))) {
4827		if (flags & EXT4_IGET_HANDLE)
4828			return ERR_PTR(-ESTALE);
4829		__ext4_error(sb, function, line,
4830			     "inode #%lu: comm %s: iget: illegal inode #",
4831			     ino, current->comm);
4832		return ERR_PTR(-EFSCORRUPTED);
4833	}
4834
4835	inode = iget_locked(sb, ino);
4836	if (!inode)
4837		return ERR_PTR(-ENOMEM);
4838	if (!(inode->i_state & I_NEW))
 
 
 
 
 
4839		return inode;
 
4840
4841	ei = EXT4_I(inode);
4842	iloc.bh = NULL;
4843
4844	ret = __ext4_get_inode_loc(inode, &iloc, 0);
4845	if (ret < 0)
4846		goto bad_inode;
4847	raw_inode = ext4_raw_inode(&iloc);
4848
4849	if ((ino == EXT4_ROOT_INO) && (raw_inode->i_links_count == 0)) {
4850		ext4_error_inode(inode, function, line, 0,
4851				 "iget: root inode unallocated");
4852		ret = -EFSCORRUPTED;
4853		goto bad_inode;
4854	}
4855
4856	if ((flags & EXT4_IGET_HANDLE) &&
4857	    (raw_inode->i_links_count == 0) && (raw_inode->i_mode == 0)) {
4858		ret = -ESTALE;
4859		goto bad_inode;
4860	}
4861
4862	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4863		ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
4864		if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
4865			EXT4_INODE_SIZE(inode->i_sb) ||
4866		    (ei->i_extra_isize & 3)) {
4867			ext4_error_inode(inode, function, line, 0,
4868					 "iget: bad extra_isize %u "
4869					 "(inode size %u)",
4870					 ei->i_extra_isize,
4871					 EXT4_INODE_SIZE(inode->i_sb));
4872			ret = -EFSCORRUPTED;
4873			goto bad_inode;
4874		}
4875	} else
4876		ei->i_extra_isize = 0;
4877
4878	/* Precompute checksum seed for inode metadata */
4879	if (ext4_has_metadata_csum(sb)) {
4880		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4881		__u32 csum;
4882		__le32 inum = cpu_to_le32(inode->i_ino);
4883		__le32 gen = raw_inode->i_generation;
4884		csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum,
4885				   sizeof(inum));
4886		ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen,
4887					      sizeof(gen));
4888	}
4889
4890	if (!ext4_inode_csum_verify(inode, raw_inode, ei)) {
4891		ext4_error_inode(inode, function, line, 0,
4892				 "iget: checksum invalid");
 
 
4893		ret = -EFSBADCRC;
4894		goto bad_inode;
4895	}
4896
4897	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
4898	i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
4899	i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
4900	if (ext4_has_feature_project(sb) &&
4901	    EXT4_INODE_SIZE(sb) > EXT4_GOOD_OLD_INODE_SIZE &&
4902	    EXT4_FITS_IN_INODE(raw_inode, ei, i_projid))
4903		i_projid = (projid_t)le32_to_cpu(raw_inode->i_projid);
4904	else
4905		i_projid = EXT4_DEF_PROJID;
4906
4907	if (!(test_opt(inode->i_sb, NO_UID32))) {
4908		i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
4909		i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
4910	}
4911	i_uid_write(inode, i_uid);
4912	i_gid_write(inode, i_gid);
4913	ei->i_projid = make_kprojid(&init_user_ns, i_projid);
4914	set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
4915
4916	ext4_clear_state_flags(ei);	/* Only relevant on 32-bit archs */
4917	ei->i_inline_off = 0;
4918	ei->i_dir_start_lookup = 0;
4919	ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
4920	/* We now have enough fields to check if the inode was active or not.
4921	 * This is needed because nfsd might try to access dead inodes
4922	 * the test is that same one that e2fsck uses
4923	 * NeilBrown 1999oct15
4924	 */
4925	if (inode->i_nlink == 0) {
4926		if ((inode->i_mode == 0 ||
4927		     !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) &&
4928		    ino != EXT4_BOOT_LOADER_INO) {
4929			/* this inode is deleted */
4930			ret = -ESTALE;
 
 
 
 
 
4931			goto bad_inode;
4932		}
4933		/* The only unlinked inodes we let through here have
4934		 * valid i_mode and are being read by the orphan
4935		 * recovery code: that's fine, we're about to complete
4936		 * the process of deleting those.
4937		 * OR it is the EXT4_BOOT_LOADER_INO which is
4938		 * not initialized on a new filesystem. */
4939	}
4940	ei->i_flags = le32_to_cpu(raw_inode->i_flags);
4941	ext4_set_inode_flags(inode);
4942	inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
4943	ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
4944	if (ext4_has_feature_64bit(sb))
4945		ei->i_file_acl |=
4946			((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
4947	inode->i_size = ext4_isize(sb, raw_inode);
4948	if ((size = i_size_read(inode)) < 0) {
4949		ext4_error_inode(inode, function, line, 0,
4950				 "iget: bad i_size value: %lld", size);
4951		ret = -EFSCORRUPTED;
4952		goto bad_inode;
4953	}
 
 
 
 
 
 
 
 
 
 
 
 
4954	ei->i_disksize = inode->i_size;
4955#ifdef CONFIG_QUOTA
4956	ei->i_reserved_quota = 0;
4957#endif
4958	inode->i_generation = le32_to_cpu(raw_inode->i_generation);
4959	ei->i_block_group = iloc.block_group;
4960	ei->i_last_alloc_group = ~0;
4961	/*
4962	 * NOTE! The in-memory inode i_data array is in little-endian order
4963	 * even on big-endian machines: we do NOT byteswap the block numbers!
4964	 */
4965	for (block = 0; block < EXT4_N_BLOCKS; block++)
4966		ei->i_data[block] = raw_inode->i_block[block];
4967	INIT_LIST_HEAD(&ei->i_orphan);
 
4968
4969	/*
4970	 * Set transaction id's of transactions that have to be committed
4971	 * to finish f[data]sync. We set them to currently running transaction
4972	 * as we cannot be sure that the inode or some of its metadata isn't
4973	 * part of the transaction - the inode could have been reclaimed and
4974	 * now it is reread from disk.
4975	 */
4976	if (journal) {
4977		transaction_t *transaction;
4978		tid_t tid;
4979
4980		read_lock(&journal->j_state_lock);
4981		if (journal->j_running_transaction)
4982			transaction = journal->j_running_transaction;
4983		else
4984			transaction = journal->j_committing_transaction;
4985		if (transaction)
4986			tid = transaction->t_tid;
4987		else
4988			tid = journal->j_commit_sequence;
4989		read_unlock(&journal->j_state_lock);
4990		ei->i_sync_tid = tid;
4991		ei->i_datasync_tid = tid;
4992	}
4993
4994	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4995		if (ei->i_extra_isize == 0) {
4996			/* The extra space is currently unused. Use it. */
4997			BUILD_BUG_ON(sizeof(struct ext4_inode) & 3);
4998			ei->i_extra_isize = sizeof(struct ext4_inode) -
4999					    EXT4_GOOD_OLD_INODE_SIZE;
5000		} else {
5001			ret = ext4_iget_extra_inode(inode, raw_inode, ei);
5002			if (ret)
5003				goto bad_inode;
5004		}
5005	}
5006
5007	EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode);
5008	EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
5009	EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
5010	EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
5011
5012	if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
5013		u64 ivers = le32_to_cpu(raw_inode->i_disk_version);
5014
5015		if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
5016			if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
5017				ivers |=
5018		    (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
5019		}
5020		ext4_inode_set_iversion_queried(inode, ivers);
5021	}
5022
5023	ret = 0;
5024	if (ei->i_file_acl &&
5025	    !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) {
5026		ext4_error_inode(inode, function, line, 0,
5027				 "iget: bad extended attribute block %llu",
5028				 ei->i_file_acl);
5029		ret = -EFSCORRUPTED;
5030		goto bad_inode;
5031	} else if (!ext4_has_inline_data(inode)) {
5032		/* validate the block references in the inode */
5033		if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
5034		   (S_ISLNK(inode->i_mode) &&
5035		    !ext4_inode_is_fast_symlink(inode))) {
 
5036			if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
5037				ret = ext4_ext_check_inode(inode);
5038			else
5039				ret = ext4_ind_check_inode(inode);
5040		}
5041	}
5042	if (ret)
5043		goto bad_inode;
5044
5045	if (S_ISREG(inode->i_mode)) {
5046		inode->i_op = &ext4_file_inode_operations;
5047		inode->i_fop = &ext4_file_operations;
5048		ext4_set_aops(inode);
5049	} else if (S_ISDIR(inode->i_mode)) {
5050		inode->i_op = &ext4_dir_inode_operations;
5051		inode->i_fop = &ext4_dir_operations;
5052	} else if (S_ISLNK(inode->i_mode)) {
5053		/* VFS does not allow setting these so must be corruption */
5054		if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) {
5055			ext4_error_inode(inode, function, line, 0,
5056					 "iget: immutable or append flags "
5057					 "not allowed on symlinks");
5058			ret = -EFSCORRUPTED;
5059			goto bad_inode;
5060		}
5061		if (IS_ENCRYPTED(inode)) {
5062			inode->i_op = &ext4_encrypted_symlink_inode_operations;
5063			ext4_set_aops(inode);
5064		} else if (ext4_inode_is_fast_symlink(inode)) {
5065			inode->i_link = (char *)ei->i_data;
5066			inode->i_op = &ext4_fast_symlink_inode_operations;
5067			nd_terminate_link(ei->i_data, inode->i_size,
5068				sizeof(ei->i_data) - 1);
5069		} else {
5070			inode->i_op = &ext4_symlink_inode_operations;
5071			ext4_set_aops(inode);
5072		}
5073		inode_nohighmem(inode);
5074	} else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
5075	      S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
5076		inode->i_op = &ext4_special_inode_operations;
5077		if (raw_inode->i_block[0])
5078			init_special_inode(inode, inode->i_mode,
5079			   old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
5080		else
5081			init_special_inode(inode, inode->i_mode,
5082			   new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
5083	} else if (ino == EXT4_BOOT_LOADER_INO) {
5084		make_bad_inode(inode);
5085	} else {
5086		ret = -EFSCORRUPTED;
5087		ext4_error_inode(inode, function, line, 0,
5088				 "iget: bogus i_mode (%o)", inode->i_mode);
5089		goto bad_inode;
5090	}
5091	if (IS_CASEFOLDED(inode) && !ext4_has_feature_casefold(inode->i_sb))
5092		ext4_error_inode(inode, function, line, 0,
5093				 "casefold flag without casefold feature");
5094	brelse(iloc.bh);
 
 
 
 
 
 
 
5095
 
5096	unlock_new_inode(inode);
5097	return inode;
5098
5099bad_inode:
5100	brelse(iloc.bh);
5101	iget_failed(inode);
5102	return ERR_PTR(ret);
5103}
5104
5105static int ext4_inode_blocks_set(handle_t *handle,
5106				struct ext4_inode *raw_inode,
5107				struct ext4_inode_info *ei)
 
5108{
5109	struct inode *inode = &(ei->vfs_inode);
5110	u64 i_blocks = inode->i_blocks;
5111	struct super_block *sb = inode->i_sb;
5112
5113	if (i_blocks <= ~0U) {
5114		/*
5115		 * i_blocks can be represented in a 32 bit variable
5116		 * as multiple of 512 bytes
5117		 */
5118		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
5119		raw_inode->i_blocks_high = 0;
5120		ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
5121		return 0;
5122	}
5123	if (!ext4_has_feature_huge_file(sb))
5124		return -EFBIG;
5125
5126	if (i_blocks <= 0xffffffffffffULL) {
5127		/*
5128		 * i_blocks can be represented in a 48 bit variable
5129		 * as multiple of 512 bytes
5130		 */
5131		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
5132		raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
5133		ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
5134	} else {
5135		ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE);
5136		/* i_block is stored in file system block size */
5137		i_blocks = i_blocks >> (inode->i_blkbits - 9);
5138		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
5139		raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
5140	}
5141	return 0;
5142}
5143
5144struct other_inode {
5145	unsigned long		orig_ino;
5146	struct ext4_inode	*raw_inode;
5147};
5148
5149static int other_inode_match(struct inode * inode, unsigned long ino,
5150			     void *data)
5151{
5152	struct other_inode *oi = (struct other_inode *) data;
5153
5154	if ((inode->i_ino != ino) ||
5155	    (inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW |
5156			       I_DIRTY_INODE)) ||
5157	    ((inode->i_state & I_DIRTY_TIME) == 0))
5158		return 0;
5159	spin_lock(&inode->i_lock);
5160	if (((inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW |
5161				I_DIRTY_INODE)) == 0) &&
5162	    (inode->i_state & I_DIRTY_TIME)) {
5163		struct ext4_inode_info	*ei = EXT4_I(inode);
5164
5165		inode->i_state &= ~(I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED);
5166		spin_unlock(&inode->i_lock);
5167
5168		spin_lock(&ei->i_raw_lock);
5169		EXT4_INODE_SET_XTIME(i_ctime, inode, oi->raw_inode);
5170		EXT4_INODE_SET_XTIME(i_mtime, inode, oi->raw_inode);
5171		EXT4_INODE_SET_XTIME(i_atime, inode, oi->raw_inode);
5172		ext4_inode_csum_set(inode, oi->raw_inode, ei);
5173		spin_unlock(&ei->i_raw_lock);
5174		trace_ext4_other_inode_update_time(inode, oi->orig_ino);
5175		return -1;
5176	}
5177	spin_unlock(&inode->i_lock);
5178	return -1;
5179}
5180
5181/*
5182 * Opportunistically update the other time fields for other inodes in
5183 * the same inode table block.
5184 */
5185static void ext4_update_other_inodes_time(struct super_block *sb,
5186					  unsigned long orig_ino, char *buf)
5187{
5188	struct other_inode oi;
5189	unsigned long ino;
5190	int i, inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
5191	int inode_size = EXT4_INODE_SIZE(sb);
5192
5193	oi.orig_ino = orig_ino;
5194	/*
5195	 * Calculate the first inode in the inode table block.  Inode
5196	 * numbers are one-based.  That is, the first inode in a block
5197	 * (assuming 4k blocks and 256 byte inodes) is (n*16 + 1).
5198	 */
5199	ino = ((orig_ino - 1) & ~(inodes_per_block - 1)) + 1;
 
5200	for (i = 0; i < inodes_per_block; i++, ino++, buf += inode_size) {
5201		if (ino == orig_ino)
5202			continue;
5203		oi.raw_inode = (struct ext4_inode *) buf;
5204		(void) find_inode_nowait(sb, ino, other_inode_match, &oi);
5205	}
 
5206}
5207
5208/*
5209 * Post the struct inode info into an on-disk inode location in the
5210 * buffer-cache.  This gobbles the caller's reference to the
5211 * buffer_head in the inode location struct.
5212 *
5213 * The caller must have write access to iloc->bh.
5214 */
5215static int ext4_do_update_inode(handle_t *handle,
5216				struct inode *inode,
5217				struct ext4_iloc *iloc)
5218{
5219	struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
5220	struct ext4_inode_info *ei = EXT4_I(inode);
5221	struct buffer_head *bh = iloc->bh;
5222	struct super_block *sb = inode->i_sb;
5223	int err = 0, rc, block;
5224	int need_datasync = 0, set_large_file = 0;
5225	uid_t i_uid;
5226	gid_t i_gid;
5227	projid_t i_projid;
5228
5229	spin_lock(&ei->i_raw_lock);
5230
5231	/* For fields not tracked in the in-memory inode,
5232	 * initialise them to zero for new inodes. */
 
 
5233	if (ext4_test_inode_state(inode, EXT4_STATE_NEW))
5234		memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
5235
5236	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
5237	i_uid = i_uid_read(inode);
5238	i_gid = i_gid_read(inode);
5239	i_projid = from_kprojid(&init_user_ns, ei->i_projid);
5240	if (!(test_opt(inode->i_sb, NO_UID32))) {
5241		raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid));
5242		raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid));
5243/*
5244 * Fix up interoperability with old kernels. Otherwise, old inodes get
5245 * re-used with the upper 16 bits of the uid/gid intact
5246 */
5247		if (ei->i_dtime && list_empty(&ei->i_orphan)) {
5248			raw_inode->i_uid_high = 0;
5249			raw_inode->i_gid_high = 0;
5250		} else {
5251			raw_inode->i_uid_high =
5252				cpu_to_le16(high_16_bits(i_uid));
5253			raw_inode->i_gid_high =
5254				cpu_to_le16(high_16_bits(i_gid));
5255		}
5256	} else {
5257		raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid));
5258		raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(i_gid));
5259		raw_inode->i_uid_high = 0;
5260		raw_inode->i_gid_high = 0;
5261	}
5262	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
5263
5264	EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
5265	EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
5266	EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
5267	EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
5268
5269	err = ext4_inode_blocks_set(handle, raw_inode, ei);
5270	if (err) {
5271		spin_unlock(&ei->i_raw_lock);
5272		goto out_brelse;
5273	}
5274	raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
5275	raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF);
5276	if (likely(!test_opt2(inode->i_sb, HURD_COMPAT)))
5277		raw_inode->i_file_acl_high =
5278			cpu_to_le16(ei->i_file_acl >> 32);
5279	raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
5280	if (ei->i_disksize != ext4_isize(inode->i_sb, raw_inode)) {
5281		ext4_isize_set(raw_inode, ei->i_disksize);
5282		need_datasync = 1;
5283	}
5284	if (ei->i_disksize > 0x7fffffffULL) {
5285		if (!ext4_has_feature_large_file(sb) ||
5286				EXT4_SB(sb)->s_es->s_rev_level ==
5287		    cpu_to_le32(EXT4_GOOD_OLD_REV))
5288			set_large_file = 1;
5289	}
5290	raw_inode->i_generation = cpu_to_le32(inode->i_generation);
5291	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
5292		if (old_valid_dev(inode->i_rdev)) {
5293			raw_inode->i_block[0] =
5294				cpu_to_le32(old_encode_dev(inode->i_rdev));
5295			raw_inode->i_block[1] = 0;
5296		} else {
5297			raw_inode->i_block[0] = 0;
5298			raw_inode->i_block[1] =
5299				cpu_to_le32(new_encode_dev(inode->i_rdev));
5300			raw_inode->i_block[2] = 0;
5301		}
5302	} else if (!ext4_has_inline_data(inode)) {
5303		for (block = 0; block < EXT4_N_BLOCKS; block++)
5304			raw_inode->i_block[block] = ei->i_data[block];
5305	}
5306
5307	if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
5308		u64 ivers = ext4_inode_peek_iversion(inode);
5309
5310		raw_inode->i_disk_version = cpu_to_le32(ivers);
5311		if (ei->i_extra_isize) {
5312			if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
5313				raw_inode->i_version_hi =
5314					cpu_to_le32(ivers >> 32);
5315			raw_inode->i_extra_isize =
5316				cpu_to_le16(ei->i_extra_isize);
5317		}
5318	}
5319
5320	BUG_ON(!ext4_has_feature_project(inode->i_sb) &&
5321	       i_projid != EXT4_DEF_PROJID);
5322
5323	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
5324	    EXT4_FITS_IN_INODE(raw_inode, ei, i_projid))
5325		raw_inode->i_projid = cpu_to_le32(i_projid);
5326
5327	ext4_inode_csum_set(inode, raw_inode, ei);
5328	spin_unlock(&ei->i_raw_lock);
5329	if (inode->i_sb->s_flags & SB_LAZYTIME)
5330		ext4_update_other_inodes_time(inode->i_sb, inode->i_ino,
5331					      bh->b_data);
5332
5333	BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
5334	rc = ext4_handle_dirty_metadata(handle, NULL, bh);
5335	if (!err)
5336		err = rc;
5337	ext4_clear_inode_state(inode, EXT4_STATE_NEW);
5338	if (set_large_file) {
5339		BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get write access");
5340		err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
 
 
5341		if (err)
5342			goto out_brelse;
 
5343		ext4_set_feature_large_file(sb);
 
 
5344		ext4_handle_sync(handle);
5345		err = ext4_handle_dirty_super(handle, sb);
 
5346	}
5347	ext4_update_inode_fsync_trans(handle, inode, need_datasync);
 
 
5348out_brelse:
5349	brelse(bh);
5350	ext4_std_error(inode->i_sb, err);
5351	return err;
5352}
5353
5354/*
5355 * ext4_write_inode()
5356 *
5357 * We are called from a few places:
5358 *
5359 * - Within generic_file_aio_write() -> generic_write_sync() for O_SYNC files.
5360 *   Here, there will be no transaction running. We wait for any running
5361 *   transaction to commit.
5362 *
5363 * - Within flush work (sys_sync(), kupdate and such).
5364 *   We wait on commit, if told to.
5365 *
5366 * - Within iput_final() -> write_inode_now()
5367 *   We wait on commit, if told to.
5368 *
5369 * In all cases it is actually safe for us to return without doing anything,
5370 * because the inode has been copied into a raw inode buffer in
5371 * ext4_mark_inode_dirty().  This is a correctness thing for WB_SYNC_ALL
5372 * writeback.
5373 *
5374 * Note that we are absolutely dependent upon all inode dirtiers doing the
5375 * right thing: they *must* call mark_inode_dirty() after dirtying info in
5376 * which we are interested.
5377 *
5378 * It would be a bug for them to not do this.  The code:
5379 *
5380 *	mark_inode_dirty(inode)
5381 *	stuff();
5382 *	inode->i_size = expr;
5383 *
5384 * is in error because write_inode() could occur while `stuff()' is running,
5385 * and the new i_size will be lost.  Plus the inode will no longer be on the
5386 * superblock's dirty inode list.
5387 */
5388int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
5389{
5390	int err;
5391
5392	if (WARN_ON_ONCE(current->flags & PF_MEMALLOC) ||
5393	    sb_rdonly(inode->i_sb))
5394		return 0;
5395
5396	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
5397		return -EIO;
5398
5399	if (EXT4_SB(inode->i_sb)->s_journal) {
5400		if (ext4_journal_current_handle()) {
5401			jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
5402			dump_stack();
5403			return -EIO;
5404		}
5405
5406		/*
5407		 * No need to force transaction in WB_SYNC_NONE mode. Also
5408		 * ext4_sync_fs() will force the commit after everything is
5409		 * written.
5410		 */
5411		if (wbc->sync_mode != WB_SYNC_ALL || wbc->for_sync)
5412			return 0;
5413
5414		err = jbd2_complete_transaction(EXT4_SB(inode->i_sb)->s_journal,
5415						EXT4_I(inode)->i_sync_tid);
5416	} else {
5417		struct ext4_iloc iloc;
5418
5419		err = __ext4_get_inode_loc(inode, &iloc, 0);
5420		if (err)
5421			return err;
5422		/*
5423		 * sync(2) will flush the whole buffer cache. No need to do
5424		 * it here separately for each inode.
5425		 */
5426		if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
5427			sync_dirty_buffer(iloc.bh);
5428		if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
5429			EXT4_ERROR_INODE_BLOCK(inode, iloc.bh->b_blocknr,
5430					 "IO error syncing inode");
5431			err = -EIO;
5432		}
5433		brelse(iloc.bh);
5434	}
5435	return err;
5436}
5437
5438/*
5439 * In data=journal mode ext4_journalled_invalidatepage() may fail to invalidate
5440 * buffers that are attached to a page stradding i_size and are undergoing
5441 * commit. In that case we have to wait for commit to finish and try again.
5442 */
5443static void ext4_wait_for_tail_page_commit(struct inode *inode)
5444{
5445	struct page *page;
5446	unsigned offset;
5447	journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
5448	tid_t commit_tid = 0;
5449	int ret;
5450
5451	offset = inode->i_size & (PAGE_SIZE - 1);
5452	/*
5453	 * All buffers in the last page remain valid? Then there's nothing to
5454	 * do. We do the check mainly to optimize the common PAGE_SIZE ==
5455	 * blocksize case
 
 
 
 
5456	 */
5457	if (offset > PAGE_SIZE - i_blocksize(inode))
5458		return;
5459	while (1) {
5460		page = find_lock_page(inode->i_mapping,
5461				      inode->i_size >> PAGE_SHIFT);
5462		if (!page)
5463			return;
5464		ret = __ext4_journalled_invalidatepage(page, offset,
5465						PAGE_SIZE - offset);
5466		unlock_page(page);
5467		put_page(page);
5468		if (ret != -EBUSY)
5469			return;
5470		commit_tid = 0;
5471		read_lock(&journal->j_state_lock);
5472		if (journal->j_committing_transaction)
5473			commit_tid = journal->j_committing_transaction->t_tid;
5474		read_unlock(&journal->j_state_lock);
5475		if (commit_tid)
5476			jbd2_log_wait_commit(journal, commit_tid);
5477	}
5478}
5479
5480/*
5481 * ext4_setattr()
5482 *
5483 * Called from notify_change.
5484 *
5485 * We want to trap VFS attempts to truncate the file as soon as
5486 * possible.  In particular, we want to make sure that when the VFS
5487 * shrinks i_size, we put the inode on the orphan list and modify
5488 * i_disksize immediately, so that during the subsequent flushing of
5489 * dirty pages and freeing of disk blocks, we can guarantee that any
5490 * commit will leave the blocks being flushed in an unused state on
5491 * disk.  (On recovery, the inode will get truncated and the blocks will
5492 * be freed, so we have a strong guarantee that no future commit will
5493 * leave these blocks visible to the user.)
5494 *
5495 * Another thing we have to assure is that if we are in ordered mode
5496 * and inode is still attached to the committing transaction, we must
5497 * we start writeout of all the dirty pages which are being truncated.
5498 * This way we are sure that all the data written in the previous
5499 * transaction are already on disk (truncate waits for pages under
5500 * writeback).
5501 *
5502 * Called with inode->i_mutex down.
5503 */
5504int ext4_setattr(struct dentry *dentry, struct iattr *attr)
 
5505{
5506	struct inode *inode = d_inode(dentry);
5507	int error, rc = 0;
5508	int orphan = 0;
5509	const unsigned int ia_valid = attr->ia_valid;
 
5510
5511	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
5512		return -EIO;
5513
5514	if (unlikely(IS_IMMUTABLE(inode)))
5515		return -EPERM;
5516
5517	if (unlikely(IS_APPEND(inode) &&
5518		     (ia_valid & (ATTR_MODE | ATTR_UID |
5519				  ATTR_GID | ATTR_TIMES_SET))))
5520		return -EPERM;
5521
5522	error = setattr_prepare(dentry, attr);
5523	if (error)
5524		return error;
5525
5526	error = fscrypt_prepare_setattr(dentry, attr);
5527	if (error)
5528		return error;
5529
5530	error = fsverity_prepare_setattr(dentry, attr);
5531	if (error)
5532		return error;
5533
5534	if (is_quota_modification(inode, attr)) {
5535		error = dquot_initialize(inode);
5536		if (error)
5537			return error;
5538	}
5539	if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
5540	    (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
 
5541		handle_t *handle;
5542
5543		/* (user+group)*(old+new) structure, inode write (sb,
5544		 * inode block, ? - but truncate inode update has it) */
5545		handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
5546			(EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb) +
5547			 EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)) + 3);
5548		if (IS_ERR(handle)) {
5549			error = PTR_ERR(handle);
5550			goto err_out;
5551		}
5552
5553		/* dquot_transfer() calls back ext4_get_inode_usage() which
5554		 * counts xattr inode references.
5555		 */
5556		down_read(&EXT4_I(inode)->xattr_sem);
5557		error = dquot_transfer(inode, attr);
5558		up_read(&EXT4_I(inode)->xattr_sem);
5559
5560		if (error) {
5561			ext4_journal_stop(handle);
5562			return error;
5563		}
5564		/* Update corresponding info in inode so that everything is in
5565		 * one transaction */
5566		if (attr->ia_valid & ATTR_UID)
5567			inode->i_uid = attr->ia_uid;
5568		if (attr->ia_valid & ATTR_GID)
5569			inode->i_gid = attr->ia_gid;
5570		error = ext4_mark_inode_dirty(handle, inode);
5571		ext4_journal_stop(handle);
 
 
 
5572	}
5573
5574	if (attr->ia_valid & ATTR_SIZE) {
5575		handle_t *handle;
5576		loff_t oldsize = inode->i_size;
 
5577		int shrink = (attr->ia_size < inode->i_size);
5578
5579		if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
5580			struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5581
5582			if (attr->ia_size > sbi->s_bitmap_maxbytes)
5583				return -EFBIG;
 
5584		}
5585		if (!S_ISREG(inode->i_mode))
5586			return -EINVAL;
 
5587
5588		if (IS_I_VERSION(inode) && attr->ia_size != inode->i_size)
5589			inode_inc_iversion(inode);
5590
5591		if (shrink) {
5592			if (ext4_should_order_data(inode)) {
5593				error = ext4_begin_ordered_truncate(inode,
5594							    attr->ia_size);
5595				if (error)
5596					goto err_out;
5597			}
5598			/*
5599			 * Blocks are going to be removed from the inode. Wait
5600			 * for dio in flight.
5601			 */
5602			inode_dio_wait(inode);
5603		}
5604
5605		down_write(&EXT4_I(inode)->i_mmap_sem);
5606
5607		rc = ext4_break_layouts(inode);
5608		if (rc) {
5609			up_write(&EXT4_I(inode)->i_mmap_sem);
5610			return rc;
5611		}
5612
5613		if (attr->ia_size != inode->i_size) {
5614			handle = ext4_journal_start(inode, EXT4_HT_INODE, 3);
5615			if (IS_ERR(handle)) {
5616				error = PTR_ERR(handle);
5617				goto out_mmap_sem;
5618			}
5619			if (ext4_handle_valid(handle) && shrink) {
5620				error = ext4_orphan_add(handle, inode);
5621				orphan = 1;
5622			}
5623			/*
5624			 * Update c/mtime on truncate up, ext4_truncate() will
5625			 * update c/mtime in shrink case below
5626			 */
5627			if (!shrink) {
5628				inode->i_mtime = current_time(inode);
5629				inode->i_ctime = inode->i_mtime;
5630			}
 
 
 
 
 
 
 
 
 
 
 
 
 
5631			down_write(&EXT4_I(inode)->i_data_sem);
 
5632			EXT4_I(inode)->i_disksize = attr->ia_size;
5633			rc = ext4_mark_inode_dirty(handle, inode);
5634			if (!error)
5635				error = rc;
5636			/*
5637			 * We have to update i_size under i_data_sem together
5638			 * with i_disksize to avoid races with writeback code
5639			 * running ext4_wb_update_i_disksize().
5640			 */
5641			if (!error)
5642				i_size_write(inode, attr->ia_size);
 
 
5643			up_write(&EXT4_I(inode)->i_data_sem);
5644			ext4_journal_stop(handle);
5645			if (error)
5646				goto out_mmap_sem;
5647			if (!shrink) {
5648				pagecache_isize_extended(inode, oldsize,
5649							 inode->i_size);
5650			} else if (ext4_should_journal_data(inode)) {
5651				ext4_wait_for_tail_page_commit(inode);
5652			}
5653		}
5654
5655		/*
5656		 * Truncate pagecache after we've waited for commit
5657		 * in data=journal mode to make pages freeable.
5658		 */
5659		truncate_pagecache(inode, inode->i_size);
5660		/*
5661		 * Call ext4_truncate() even if i_size didn't change to
5662		 * truncate possible preallocated blocks.
5663		 */
5664		if (attr->ia_size <= oldsize) {
5665			rc = ext4_truncate(inode);
5666			if (rc)
5667				error = rc;
5668		}
5669out_mmap_sem:
5670		up_write(&EXT4_I(inode)->i_mmap_sem);
5671	}
5672
5673	if (!error) {
5674		setattr_copy(inode, attr);
 
 
5675		mark_inode_dirty(inode);
5676	}
5677
5678	/*
5679	 * If the call to ext4_truncate failed to get a transaction handle at
5680	 * all, we need to clean up the in-core orphan list manually.
5681	 */
5682	if (orphan && inode->i_nlink)
5683		ext4_orphan_del(NULL, inode);
5684
5685	if (!error && (ia_valid & ATTR_MODE))
5686		rc = posix_acl_chmod(inode, inode->i_mode);
5687
5688err_out:
5689	ext4_std_error(inode->i_sb, error);
 
5690	if (!error)
5691		error = rc;
5692	return error;
5693}
5694
5695int ext4_getattr(const struct path *path, struct kstat *stat,
5696		 u32 request_mask, unsigned int query_flags)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5697{
5698	struct inode *inode = d_inode(path->dentry);
5699	struct ext4_inode *raw_inode;
5700	struct ext4_inode_info *ei = EXT4_I(inode);
5701	unsigned int flags;
5702
5703	if (EXT4_FITS_IN_INODE(raw_inode, ei, i_crtime)) {
 
5704		stat->result_mask |= STATX_BTIME;
5705		stat->btime.tv_sec = ei->i_crtime.tv_sec;
5706		stat->btime.tv_nsec = ei->i_crtime.tv_nsec;
5707	}
5708
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5709	flags = ei->i_flags & EXT4_FL_USER_VISIBLE;
5710	if (flags & EXT4_APPEND_FL)
5711		stat->attributes |= STATX_ATTR_APPEND;
5712	if (flags & EXT4_COMPR_FL)
5713		stat->attributes |= STATX_ATTR_COMPRESSED;
5714	if (flags & EXT4_ENCRYPT_FL)
5715		stat->attributes |= STATX_ATTR_ENCRYPTED;
5716	if (flags & EXT4_IMMUTABLE_FL)
5717		stat->attributes |= STATX_ATTR_IMMUTABLE;
5718	if (flags & EXT4_NODUMP_FL)
5719		stat->attributes |= STATX_ATTR_NODUMP;
 
 
5720
5721	stat->attributes_mask |= (STATX_ATTR_APPEND |
5722				  STATX_ATTR_COMPRESSED |
5723				  STATX_ATTR_ENCRYPTED |
5724				  STATX_ATTR_IMMUTABLE |
5725				  STATX_ATTR_NODUMP);
 
5726
5727	generic_fillattr(inode, stat);
5728	return 0;
5729}
5730
5731int ext4_file_getattr(const struct path *path, struct kstat *stat,
 
5732		      u32 request_mask, unsigned int query_flags)
5733{
5734	struct inode *inode = d_inode(path->dentry);
5735	u64 delalloc_blocks;
5736
5737	ext4_getattr(path, stat, request_mask, query_flags);
5738
5739	/*
5740	 * If there is inline data in the inode, the inode will normally not
5741	 * have data blocks allocated (it may have an external xattr block).
5742	 * Report at least one sector for such files, so tools like tar, rsync,
5743	 * others don't incorrectly think the file is completely sparse.
5744	 */
5745	if (unlikely(ext4_has_inline_data(inode)))
5746		stat->blocks += (stat->size + 511) >> 9;
5747
5748	/*
5749	 * We can't update i_blocks if the block allocation is delayed
5750	 * otherwise in the case of system crash before the real block
5751	 * allocation is done, we will have i_blocks inconsistent with
5752	 * on-disk file blocks.
5753	 * We always keep i_blocks updated together with real
5754	 * allocation. But to not confuse with user, stat
5755	 * will return the blocks that include the delayed allocation
5756	 * blocks for this file.
5757	 */
5758	delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb),
5759				   EXT4_I(inode)->i_reserved_data_blocks);
5760	stat->blocks += delalloc_blocks << (inode->i_sb->s_blocksize_bits - 9);
5761	return 0;
5762}
5763
5764static int ext4_index_trans_blocks(struct inode *inode, int lblocks,
5765				   int pextents)
5766{
5767	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
5768		return ext4_ind_trans_blocks(inode, lblocks);
5769	return ext4_ext_index_trans_blocks(inode, pextents);
5770}
5771
5772/*
5773 * Account for index blocks, block groups bitmaps and block group
5774 * descriptor blocks if modify datablocks and index blocks
5775 * worse case, the indexs blocks spread over different block groups
5776 *
5777 * If datablocks are discontiguous, they are possible to spread over
5778 * different block groups too. If they are contiguous, with flexbg,
5779 * they could still across block group boundary.
5780 *
5781 * Also account for superblock, inode, quota and xattr blocks
5782 */
5783static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
5784				  int pextents)
5785{
5786	ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb);
5787	int gdpblocks;
5788	int idxblocks;
5789	int ret = 0;
5790
5791	/*
5792	 * How many index blocks need to touch to map @lblocks logical blocks
5793	 * to @pextents physical extents?
5794	 */
5795	idxblocks = ext4_index_trans_blocks(inode, lblocks, pextents);
5796
5797	ret = idxblocks;
5798
5799	/*
5800	 * Now let's see how many group bitmaps and group descriptors need
5801	 * to account
5802	 */
5803	groups = idxblocks + pextents;
5804	gdpblocks = groups;
5805	if (groups > ngroups)
5806		groups = ngroups;
5807	if (groups > EXT4_SB(inode->i_sb)->s_gdb_count)
5808		gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count;
5809
5810	/* bitmaps and block group descriptor blocks */
5811	ret += groups + gdpblocks;
5812
5813	/* Blocks for super block, inode, quota and xattr blocks */
5814	ret += EXT4_META_TRANS_BLOCKS(inode->i_sb);
5815
5816	return ret;
5817}
5818
5819/*
5820 * Calculate the total number of credits to reserve to fit
5821 * the modification of a single pages into a single transaction,
5822 * which may include multiple chunks of block allocations.
5823 *
5824 * This could be called via ext4_write_begin()
5825 *
5826 * We need to consider the worse case, when
5827 * one new block per extent.
5828 */
5829int ext4_writepage_trans_blocks(struct inode *inode)
5830{
5831	int bpp = ext4_journal_blocks_per_page(inode);
5832	int ret;
5833
5834	ret = ext4_meta_trans_blocks(inode, bpp, bpp);
5835
5836	/* Account for data blocks for journalled mode */
5837	if (ext4_should_journal_data(inode))
5838		ret += bpp;
5839	return ret;
5840}
5841
5842/*
5843 * Calculate the journal credits for a chunk of data modification.
5844 *
5845 * This is called from DIO, fallocate or whoever calling
5846 * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks.
5847 *
5848 * journal buffers for data blocks are not included here, as DIO
5849 * and fallocate do no need to journal data buffers.
5850 */
5851int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks)
5852{
5853	return ext4_meta_trans_blocks(inode, nrblocks, 1);
5854}
5855
5856/*
5857 * The caller must have previously called ext4_reserve_inode_write().
5858 * Give this, we know that the caller already has write access to iloc->bh.
5859 */
5860int ext4_mark_iloc_dirty(handle_t *handle,
5861			 struct inode *inode, struct ext4_iloc *iloc)
5862{
5863	int err = 0;
5864
5865	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) {
5866		put_bh(iloc->bh);
5867		return -EIO;
5868	}
5869	if (IS_I_VERSION(inode))
5870		inode_inc_iversion(inode);
5871
5872	/* the do_update_inode consumes one bh->b_count */
5873	get_bh(iloc->bh);
5874
5875	/* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
5876	err = ext4_do_update_inode(handle, inode, iloc);
5877	put_bh(iloc->bh);
5878	return err;
5879}
5880
5881/*
5882 * On success, We end up with an outstanding reference count against
5883 * iloc->bh.  This _must_ be cleaned up later.
5884 */
5885
5886int
5887ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
5888			 struct ext4_iloc *iloc)
5889{
5890	int err;
5891
5892	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
5893		return -EIO;
5894
5895	err = ext4_get_inode_loc(inode, iloc);
5896	if (!err) {
5897		BUFFER_TRACE(iloc->bh, "get_write_access");
5898		err = ext4_journal_get_write_access(handle, iloc->bh);
 
5899		if (err) {
5900			brelse(iloc->bh);
5901			iloc->bh = NULL;
5902		}
5903	}
5904	ext4_std_error(inode->i_sb, err);
5905	return err;
5906}
5907
5908static int __ext4_expand_extra_isize(struct inode *inode,
5909				     unsigned int new_extra_isize,
5910				     struct ext4_iloc *iloc,
5911				     handle_t *handle, int *no_expand)
5912{
5913	struct ext4_inode *raw_inode;
5914	struct ext4_xattr_ibody_header *header;
 
 
5915	int error;
5916
 
 
 
 
 
 
 
 
 
 
 
 
 
5917	raw_inode = ext4_raw_inode(iloc);
5918
5919	header = IHDR(inode, raw_inode);
5920
5921	/* No extended attributes present */
5922	if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
5923	    header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
5924		memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE +
5925		       EXT4_I(inode)->i_extra_isize, 0,
5926		       new_extra_isize - EXT4_I(inode)->i_extra_isize);
5927		EXT4_I(inode)->i_extra_isize = new_extra_isize;
5928		return 0;
5929	}
5930
 
 
 
 
 
 
 
 
5931	/* try to expand with EAs present */
5932	error = ext4_expand_extra_isize_ea(inode, new_extra_isize,
5933					   raw_inode, handle);
5934	if (error) {
5935		/*
5936		 * Inode size expansion failed; don't try again
5937		 */
5938		*no_expand = 1;
5939	}
5940
5941	return error;
5942}
5943
5944/*
5945 * Expand an inode by new_extra_isize bytes.
5946 * Returns 0 on success or negative error number on failure.
5947 */
5948static int ext4_try_to_expand_extra_isize(struct inode *inode,
5949					  unsigned int new_extra_isize,
5950					  struct ext4_iloc iloc,
5951					  handle_t *handle)
5952{
5953	int no_expand;
5954	int error;
5955
5956	if (ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND))
5957		return -EOVERFLOW;
5958
5959	/*
5960	 * In nojournal mode, we can immediately attempt to expand
5961	 * the inode.  When journaled, we first need to obtain extra
5962	 * buffer credits since we may write into the EA block
5963	 * with this same handle. If journal_extend fails, then it will
5964	 * only result in a minor loss of functionality for that inode.
5965	 * If this is felt to be critical, then e2fsck should be run to
5966	 * force a large enough s_min_extra_isize.
5967	 */
5968	if (ext4_handle_valid(handle) &&
5969	    jbd2_journal_extend(handle,
5970				EXT4_DATA_TRANS_BLOCKS(inode->i_sb)) != 0)
5971		return -ENOSPC;
5972
5973	if (ext4_write_trylock_xattr(inode, &no_expand) == 0)
5974		return -EBUSY;
5975
5976	error = __ext4_expand_extra_isize(inode, new_extra_isize, &iloc,
5977					  handle, &no_expand);
5978	ext4_write_unlock_xattr(inode, &no_expand);
5979
5980	return error;
5981}
5982
5983int ext4_expand_extra_isize(struct inode *inode,
5984			    unsigned int new_extra_isize,
5985			    struct ext4_iloc *iloc)
5986{
5987	handle_t *handle;
5988	int no_expand;
5989	int error, rc;
5990
5991	if (ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
5992		brelse(iloc->bh);
5993		return -EOVERFLOW;
5994	}
5995
5996	handle = ext4_journal_start(inode, EXT4_HT_INODE,
5997				    EXT4_DATA_TRANS_BLOCKS(inode->i_sb));
5998	if (IS_ERR(handle)) {
5999		error = PTR_ERR(handle);
6000		brelse(iloc->bh);
6001		return error;
6002	}
6003
6004	ext4_write_lock_xattr(inode, &no_expand);
6005
6006	BUFFER_TRACE(iloc->bh, "get_write_access");
6007	error = ext4_journal_get_write_access(handle, iloc->bh);
 
6008	if (error) {
6009		brelse(iloc->bh);
6010		goto out_stop;
6011	}
6012
6013	error = __ext4_expand_extra_isize(inode, new_extra_isize, iloc,
6014					  handle, &no_expand);
6015
6016	rc = ext4_mark_iloc_dirty(handle, inode, iloc);
6017	if (!error)
6018		error = rc;
6019
 
6020	ext4_write_unlock_xattr(inode, &no_expand);
6021out_stop:
6022	ext4_journal_stop(handle);
6023	return error;
6024}
6025
6026/*
6027 * What we do here is to mark the in-core inode as clean with respect to inode
6028 * dirtiness (it may still be data-dirty).
6029 * This means that the in-core inode may be reaped by prune_icache
6030 * without having to perform any I/O.  This is a very good thing,
6031 * because *any* task may call prune_icache - even ones which
6032 * have a transaction open against a different journal.
6033 *
6034 * Is this cheating?  Not really.  Sure, we haven't written the
6035 * inode out, but prune_icache isn't a user-visible syncing function.
6036 * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
6037 * we start and wait on commits.
6038 */
6039int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
 
6040{
6041	struct ext4_iloc iloc;
6042	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
6043	int err;
6044
6045	might_sleep();
6046	trace_ext4_mark_inode_dirty(inode, _RET_IP_);
6047	err = ext4_reserve_inode_write(handle, inode, &iloc);
6048	if (err)
6049		return err;
6050
6051	if (EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize)
6052		ext4_try_to_expand_extra_isize(inode, sbi->s_want_extra_isize,
6053					       iloc, handle);
6054
6055	return ext4_mark_iloc_dirty(handle, inode, &iloc);
 
 
 
 
 
6056}
6057
6058/*
6059 * ext4_dirty_inode() is called from __mark_inode_dirty()
6060 *
6061 * We're really interested in the case where a file is being extended.
6062 * i_size has been changed by generic_commit_write() and we thus need
6063 * to include the updated inode in the current transaction.
6064 *
6065 * Also, dquot_alloc_block() will always dirty the inode when blocks
6066 * are allocated to the file.
6067 *
6068 * If the inode is marked synchronous, we don't honour that here - doing
6069 * so would cause a commit on atime updates, which we don't bother doing.
6070 * We handle synchronous inodes at the highest possible level.
6071 *
6072 * If only the I_DIRTY_TIME flag is set, we can skip everything.  If
6073 * I_DIRTY_TIME and I_DIRTY_SYNC is set, the only inode fields we need
6074 * to copy into the on-disk inode structure are the timestamp files.
6075 */
6076void ext4_dirty_inode(struct inode *inode, int flags)
6077{
6078	handle_t *handle;
6079
6080	if (flags == I_DIRTY_TIME)
6081		return;
6082	handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
6083	if (IS_ERR(handle))
6084		goto out;
6085
6086	ext4_mark_inode_dirty(handle, inode);
6087
6088	ext4_journal_stop(handle);
6089out:
6090	return;
6091}
6092
6093int ext4_change_inode_journal_flag(struct inode *inode, int val)
6094{
6095	journal_t *journal;
6096	handle_t *handle;
6097	int err;
6098	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
6099
6100	/*
6101	 * We have to be very careful here: changing a data block's
6102	 * journaling status dynamically is dangerous.  If we write a
6103	 * data block to the journal, change the status and then delete
6104	 * that block, we risk forgetting to revoke the old log record
6105	 * from the journal and so a subsequent replay can corrupt data.
6106	 * So, first we make sure that the journal is empty and that
6107	 * nobody is changing anything.
6108	 */
6109
6110	journal = EXT4_JOURNAL(inode);
6111	if (!journal)
6112		return 0;
6113	if (is_journal_aborted(journal))
6114		return -EROFS;
6115
6116	/* Wait for all existing dio workers */
6117	inode_dio_wait(inode);
6118
6119	/*
6120	 * Before flushing the journal and switching inode's aops, we have
6121	 * to flush all dirty data the inode has. There can be outstanding
6122	 * delayed allocations, there can be unwritten extents created by
6123	 * fallocate or buffered writes in dioread_nolock mode covered by
6124	 * dirty data which can be converted only after flushing the dirty
6125	 * data (and journalled aops don't know how to handle these cases).
6126	 */
6127	if (val) {
6128		down_write(&EXT4_I(inode)->i_mmap_sem);
6129		err = filemap_write_and_wait(inode->i_mapping);
6130		if (err < 0) {
6131			up_write(&EXT4_I(inode)->i_mmap_sem);
6132			return err;
6133		}
6134	}
6135
6136	percpu_down_write(&sbi->s_journal_flag_rwsem);
6137	jbd2_journal_lock_updates(journal);
6138
6139	/*
6140	 * OK, there are no updates running now, and all cached data is
6141	 * synced to disk.  We are now in a completely consistent state
6142	 * which doesn't have anything in the journal, and we know that
6143	 * no filesystem updates are running, so it is safe to modify
6144	 * the inode's in-core data-journaling state flag now.
6145	 */
6146
6147	if (val)
6148		ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
6149	else {
6150		err = jbd2_journal_flush(journal);
6151		if (err < 0) {
6152			jbd2_journal_unlock_updates(journal);
6153			percpu_up_write(&sbi->s_journal_flag_rwsem);
6154			return err;
6155		}
6156		ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
6157	}
6158	ext4_set_aops(inode);
6159
6160	jbd2_journal_unlock_updates(journal);
6161	percpu_up_write(&sbi->s_journal_flag_rwsem);
6162
6163	if (val)
6164		up_write(&EXT4_I(inode)->i_mmap_sem);
6165
6166	/* Finally we can mark the inode as dirty. */
6167
6168	handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
6169	if (IS_ERR(handle))
6170		return PTR_ERR(handle);
6171
 
 
6172	err = ext4_mark_inode_dirty(handle, inode);
6173	ext4_handle_sync(handle);
6174	ext4_journal_stop(handle);
6175	ext4_std_error(inode->i_sb, err);
6176
6177	return err;
6178}
6179
6180static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh)
 
6181{
6182	return !buffer_mapped(bh);
6183}
6184
6185vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf)
6186{
6187	struct vm_area_struct *vma = vmf->vma;
6188	struct page *page = vmf->page;
6189	loff_t size;
6190	unsigned long len;
6191	int err;
6192	vm_fault_t ret;
6193	struct file *file = vma->vm_file;
6194	struct inode *inode = file_inode(file);
6195	struct address_space *mapping = inode->i_mapping;
6196	handle_t *handle;
6197	get_block_t *get_block;
6198	int retries = 0;
6199
6200	if (unlikely(IS_IMMUTABLE(inode)))
6201		return VM_FAULT_SIGBUS;
6202
6203	sb_start_pagefault(inode->i_sb);
6204	file_update_time(vma->vm_file);
6205
6206	down_read(&EXT4_I(inode)->i_mmap_sem);
6207
6208	err = ext4_convert_inline_data(inode);
6209	if (err)
6210		goto out_ret;
6211
 
 
 
 
 
 
 
 
 
6212	/* Delalloc case is easy... */
6213	if (test_opt(inode->i_sb, DELALLOC) &&
6214	    !ext4_should_journal_data(inode) &&
6215	    !ext4_nonda_switch(inode->i_sb)) {
6216		do {
6217			err = block_page_mkwrite(vma, vmf,
6218						   ext4_da_get_block_prep);
6219		} while (err == -ENOSPC &&
6220		       ext4_should_retry_alloc(inode->i_sb, &retries));
6221		goto out_ret;
6222	}
6223
6224	lock_page(page);
6225	size = i_size_read(inode);
6226	/* Page got truncated from under us? */
6227	if (page->mapping != mapping || page_offset(page) > size) {
6228		unlock_page(page);
6229		ret = VM_FAULT_NOPAGE;
6230		goto out;
6231	}
6232
6233	if (page->index == size >> PAGE_SHIFT)
6234		len = size & ~PAGE_MASK;
6235	else
6236		len = PAGE_SIZE;
6237	/*
6238	 * Return if we have all the buffers mapped. This avoids the need to do
6239	 * journal_start/journal_stop which can block and take a long time
 
 
 
6240	 */
6241	if (page_has_buffers(page)) {
6242		if (!ext4_walk_page_buffers(NULL, page_buffers(page),
6243					    0, len, NULL,
6244					    ext4_bh_unmapped)) {
6245			/* Wait so that we don't change page under IO */
6246			wait_for_stable_page(page);
6247			ret = VM_FAULT_LOCKED;
6248			goto out;
6249		}
6250	}
6251	unlock_page(page);
6252	/* OK, we need to fill the hole... */
6253	if (ext4_should_dioread_nolock(inode))
6254		get_block = ext4_get_block_unwritten;
6255	else
6256		get_block = ext4_get_block;
6257retry_alloc:
6258	handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
6259				    ext4_writepage_trans_blocks(inode));
6260	if (IS_ERR(handle)) {
6261		ret = VM_FAULT_SIGBUS;
6262		goto out;
6263	}
6264	err = block_page_mkwrite(vma, vmf, get_block);
6265	if (!err && ext4_should_journal_data(inode)) {
6266		if (ext4_walk_page_buffers(handle, page_buffers(page), 0,
6267			  PAGE_SIZE, NULL, do_journal_get_write_access)) {
6268			unlock_page(page);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6269			ret = VM_FAULT_SIGBUS;
6270			ext4_journal_stop(handle);
6271			goto out;
 
 
6272		}
6273		ext4_set_inode_state(inode, EXT4_STATE_JDATA);
6274	}
6275	ext4_journal_stop(handle);
6276	if (err == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
6277		goto retry_alloc;
6278out_ret:
6279	ret = block_page_mkwrite_return(err);
6280out:
6281	up_read(&EXT4_I(inode)->i_mmap_sem);
6282	sb_end_pagefault(inode->i_sb);
6283	return ret;
6284}
6285
6286vm_fault_t ext4_filemap_fault(struct vm_fault *vmf)
6287{
6288	struct inode *inode = file_inode(vmf->vma->vm_file);
6289	vm_fault_t ret;
6290
6291	down_read(&EXT4_I(inode)->i_mmap_sem);
6292	ret = filemap_fault(vmf);
6293	up_read(&EXT4_I(inode)->i_mmap_sem);
6294
6295	return ret;
6296}
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  linux/fs/ext4/inode.c
   4 *
   5 * Copyright (C) 1992, 1993, 1994, 1995
   6 * Remy Card (card@masi.ibp.fr)
   7 * Laboratoire MASI - Institut Blaise Pascal
   8 * Universite Pierre et Marie Curie (Paris VI)
   9 *
  10 *  from
  11 *
  12 *  linux/fs/minix/inode.c
  13 *
  14 *  Copyright (C) 1991, 1992  Linus Torvalds
  15 *
  16 *  64-bit file support on 64-bit platforms by Jakub Jelinek
  17 *	(jj@sunsite.ms.mff.cuni.cz)
  18 *
  19 *  Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
  20 */
  21
  22#include <linux/fs.h>
  23#include <linux/mount.h>
  24#include <linux/time.h>
  25#include <linux/highuid.h>
  26#include <linux/pagemap.h>
  27#include <linux/dax.h>
  28#include <linux/quotaops.h>
  29#include <linux/string.h>
  30#include <linux/buffer_head.h>
  31#include <linux/writeback.h>
  32#include <linux/pagevec.h>
  33#include <linux/mpage.h>
  34#include <linux/namei.h>
  35#include <linux/uio.h>
  36#include <linux/bio.h>
  37#include <linux/workqueue.h>
  38#include <linux/kernel.h>
  39#include <linux/printk.h>
  40#include <linux/slab.h>
  41#include <linux/bitops.h>
  42#include <linux/iomap.h>
  43#include <linux/iversion.h>
  44
  45#include "ext4_jbd2.h"
  46#include "xattr.h"
  47#include "acl.h"
  48#include "truncate.h"
  49
  50#include <trace/events/ext4.h>
  51
 
 
  52static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
  53			      struct ext4_inode_info *ei)
  54{
  55	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  56	__u32 csum;
  57	__u16 dummy_csum = 0;
  58	int offset = offsetof(struct ext4_inode, i_checksum_lo);
  59	unsigned int csum_size = sizeof(dummy_csum);
  60
  61	csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw, offset);
  62	csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, csum_size);
  63	offset += csum_size;
  64	csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
  65			   EXT4_GOOD_OLD_INODE_SIZE - offset);
  66
  67	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
  68		offset = offsetof(struct ext4_inode, i_checksum_hi);
  69		csum = ext4_chksum(sbi, csum, (__u8 *)raw +
  70				   EXT4_GOOD_OLD_INODE_SIZE,
  71				   offset - EXT4_GOOD_OLD_INODE_SIZE);
  72		if (EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) {
  73			csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum,
  74					   csum_size);
  75			offset += csum_size;
  76		}
  77		csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
  78				   EXT4_INODE_SIZE(inode->i_sb) - offset);
  79	}
  80
  81	return csum;
  82}
  83
  84static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw,
  85				  struct ext4_inode_info *ei)
  86{
  87	__u32 provided, calculated;
  88
  89	if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
  90	    cpu_to_le32(EXT4_OS_LINUX) ||
  91	    !ext4_has_metadata_csum(inode->i_sb))
  92		return 1;
  93
  94	provided = le16_to_cpu(raw->i_checksum_lo);
  95	calculated = ext4_inode_csum(inode, raw, ei);
  96	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
  97	    EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
  98		provided |= ((__u32)le16_to_cpu(raw->i_checksum_hi)) << 16;
  99	else
 100		calculated &= 0xFFFF;
 101
 102	return provided == calculated;
 103}
 104
 105void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw,
 106			 struct ext4_inode_info *ei)
 107{
 108	__u32 csum;
 109
 110	if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
 111	    cpu_to_le32(EXT4_OS_LINUX) ||
 112	    !ext4_has_metadata_csum(inode->i_sb))
 113		return;
 114
 115	csum = ext4_inode_csum(inode, raw, ei);
 116	raw->i_checksum_lo = cpu_to_le16(csum & 0xFFFF);
 117	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
 118	    EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
 119		raw->i_checksum_hi = cpu_to_le16(csum >> 16);
 120}
 121
 122static inline int ext4_begin_ordered_truncate(struct inode *inode,
 123					      loff_t new_size)
 124{
 125	trace_ext4_begin_ordered_truncate(inode, new_size);
 126	/*
 127	 * If jinode is zero, then we never opened the file for
 128	 * writing, so there's no need to call
 129	 * jbd2_journal_begin_ordered_truncate() since there's no
 130	 * outstanding writes we need to flush.
 131	 */
 132	if (!EXT4_I(inode)->jinode)
 133		return 0;
 134	return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode),
 135						   EXT4_I(inode)->jinode,
 136						   new_size);
 137}
 138
 
 
 
 
 139static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
 140				  int pextents);
 141
 142/*
 143 * Test whether an inode is a fast symlink.
 144 * A fast symlink has its symlink data stored in ext4_inode_info->i_data.
 145 */
 146int ext4_inode_is_fast_symlink(struct inode *inode)
 147{
 148	if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) {
 149		int ea_blocks = EXT4_I(inode)->i_file_acl ?
 150				EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0;
 151
 152		if (ext4_has_inline_data(inode))
 153			return 0;
 154
 155		return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
 156	}
 157	return S_ISLNK(inode->i_mode) && inode->i_size &&
 158	       (inode->i_size < EXT4_N_BLOCKS * 4);
 159}
 160
 161/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 162 * Called at the last iput() if i_nlink is zero.
 163 */
 164void ext4_evict_inode(struct inode *inode)
 165{
 166	handle_t *handle;
 167	int err;
 168	/*
 169	 * Credits for final inode cleanup and freeing:
 170	 * sb + inode (ext4_orphan_del()), block bitmap, group descriptor
 171	 * (xattr block freeing), bitmap, group descriptor (inode freeing)
 172	 */
 173	int extra_credits = 6;
 174	struct ext4_xattr_inode_array *ea_inode_array = NULL;
 175	bool freeze_protected = false;
 176
 177	trace_ext4_evict_inode(inode);
 178
 179	if (EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)
 180		ext4_evict_ea_inode(inode);
 181	if (inode->i_nlink) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 182		truncate_inode_pages_final(&inode->i_data);
 183
 184		goto no_delete;
 185	}
 186
 187	if (is_bad_inode(inode))
 188		goto no_delete;
 189	dquot_initialize(inode);
 190
 191	if (ext4_should_order_data(inode))
 192		ext4_begin_ordered_truncate(inode, 0);
 193	truncate_inode_pages_final(&inode->i_data);
 194
 195	/*
 196	 * For inodes with journalled data, transaction commit could have
 197	 * dirtied the inode. And for inodes with dioread_nolock, unwritten
 198	 * extents converting worker could merge extents and also have dirtied
 199	 * the inode. Flush worker is ignoring it because of I_FREEING flag but
 200	 * we still need to remove the inode from the writeback lists.
 201	 */
 202	if (!list_empty_careful(&inode->i_io_list))
 203		inode_io_list_del(inode);
 204
 205	/*
 206	 * Protect us against freezing - iput() caller didn't have to have any
 207	 * protection against it. When we are in a running transaction though,
 208	 * we are already protected against freezing and we cannot grab further
 209	 * protection due to lock ordering constraints.
 210	 */
 211	if (!ext4_journal_current_handle()) {
 212		sb_start_intwrite(inode->i_sb);
 213		freeze_protected = true;
 214	}
 215
 216	if (!IS_NOQUOTA(inode))
 217		extra_credits += EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb);
 218
 219	/*
 220	 * Block bitmap, group descriptor, and inode are accounted in both
 221	 * ext4_blocks_for_truncate() and extra_credits. So subtract 3.
 222	 */
 223	handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE,
 224			 ext4_blocks_for_truncate(inode) + extra_credits - 3);
 225	if (IS_ERR(handle)) {
 226		ext4_std_error(inode->i_sb, PTR_ERR(handle));
 227		/*
 228		 * If we're going to skip the normal cleanup, we still need to
 229		 * make sure that the in-core orphan linked list is properly
 230		 * cleaned up.
 231		 */
 232		ext4_orphan_del(NULL, inode);
 233		if (freeze_protected)
 234			sb_end_intwrite(inode->i_sb);
 235		goto no_delete;
 236	}
 237
 238	if (IS_SYNC(inode))
 239		ext4_handle_sync(handle);
 240
 241	/*
 242	 * Set inode->i_size to 0 before calling ext4_truncate(). We need
 243	 * special handling of symlinks here because i_size is used to
 244	 * determine whether ext4_inode_info->i_data contains symlink data or
 245	 * block mappings. Setting i_size to 0 will remove its fast symlink
 246	 * status. Erase i_data so that it becomes a valid empty block map.
 247	 */
 248	if (ext4_inode_is_fast_symlink(inode))
 249		memset(EXT4_I(inode)->i_data, 0, sizeof(EXT4_I(inode)->i_data));
 250	inode->i_size = 0;
 251	err = ext4_mark_inode_dirty(handle, inode);
 252	if (err) {
 253		ext4_warning(inode->i_sb,
 254			     "couldn't mark inode dirty (err %d)", err);
 255		goto stop_handle;
 256	}
 257	if (inode->i_blocks) {
 258		err = ext4_truncate(inode);
 259		if (err) {
 260			ext4_error_err(inode->i_sb, -err,
 261				       "couldn't truncate inode %lu (err %d)",
 262				       inode->i_ino, err);
 263			goto stop_handle;
 264		}
 265	}
 266
 267	/* Remove xattr references. */
 268	err = ext4_xattr_delete_inode(handle, inode, &ea_inode_array,
 269				      extra_credits);
 270	if (err) {
 271		ext4_warning(inode->i_sb, "xattr delete (err %d)", err);
 272stop_handle:
 273		ext4_journal_stop(handle);
 274		ext4_orphan_del(NULL, inode);
 275		if (freeze_protected)
 276			sb_end_intwrite(inode->i_sb);
 277		ext4_xattr_inode_array_free(ea_inode_array);
 278		goto no_delete;
 279	}
 280
 281	/*
 282	 * Kill off the orphan record which ext4_truncate created.
 283	 * AKPM: I think this can be inside the above `if'.
 284	 * Note that ext4_orphan_del() has to be able to cope with the
 285	 * deletion of a non-existent orphan - this is because we don't
 286	 * know if ext4_truncate() actually created an orphan record.
 287	 * (Well, we could do this if we need to, but heck - it works)
 288	 */
 289	ext4_orphan_del(handle, inode);
 290	EXT4_I(inode)->i_dtime	= (__u32)ktime_get_real_seconds();
 291
 292	/*
 293	 * One subtle ordering requirement: if anything has gone wrong
 294	 * (transaction abort, IO errors, whatever), then we can still
 295	 * do these next steps (the fs will already have been marked as
 296	 * having errors), but we can't free the inode if the mark_dirty
 297	 * fails.
 298	 */
 299	if (ext4_mark_inode_dirty(handle, inode))
 300		/* If that failed, just do the required in-core inode clear. */
 301		ext4_clear_inode(inode);
 302	else
 303		ext4_free_inode(handle, inode);
 304	ext4_journal_stop(handle);
 305	if (freeze_protected)
 306		sb_end_intwrite(inode->i_sb);
 307	ext4_xattr_inode_array_free(ea_inode_array);
 308	return;
 309no_delete:
 310	/*
 311	 * Check out some where else accidentally dirty the evicting inode,
 312	 * which may probably cause inode use-after-free issues later.
 313	 */
 314	WARN_ON_ONCE(!list_empty_careful(&inode->i_io_list));
 315
 316	if (!list_empty(&EXT4_I(inode)->i_fc_list))
 317		ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_NOMEM, NULL);
 318	ext4_clear_inode(inode);	/* We must guarantee clearing of inode... */
 319}
 320
 321#ifdef CONFIG_QUOTA
 322qsize_t *ext4_get_reserved_space(struct inode *inode)
 323{
 324	return &EXT4_I(inode)->i_reserved_quota;
 325}
 326#endif
 327
 328/*
 329 * Called with i_data_sem down, which is important since we can call
 330 * ext4_discard_preallocations() from here.
 331 */
 332void ext4_da_update_reserve_space(struct inode *inode,
 333					int used, int quota_claim)
 334{
 335	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
 336	struct ext4_inode_info *ei = EXT4_I(inode);
 337
 338	spin_lock(&ei->i_block_reservation_lock);
 339	trace_ext4_da_update_reserve_space(inode, used, quota_claim);
 340	if (unlikely(used > ei->i_reserved_data_blocks)) {
 341		ext4_warning(inode->i_sb, "%s: ino %lu, used %d "
 342			 "with only %d reserved data blocks",
 343			 __func__, inode->i_ino, used,
 344			 ei->i_reserved_data_blocks);
 345		WARN_ON(1);
 346		used = ei->i_reserved_data_blocks;
 347	}
 348
 349	/* Update per-inode reservations */
 350	ei->i_reserved_data_blocks -= used;
 351	percpu_counter_sub(&sbi->s_dirtyclusters_counter, used);
 352
 353	spin_unlock(&ei->i_block_reservation_lock);
 354
 355	/* Update quota subsystem for data blocks */
 356	if (quota_claim)
 357		dquot_claim_block(inode, EXT4_C2B(sbi, used));
 358	else {
 359		/*
 360		 * We did fallocate with an offset that is already delayed
 361		 * allocated. So on delayed allocated writeback we should
 362		 * not re-claim the quota for fallocated blocks.
 363		 */
 364		dquot_release_reservation_block(inode, EXT4_C2B(sbi, used));
 365	}
 366
 367	/*
 368	 * If we have done all the pending block allocations and if
 369	 * there aren't any writers on the inode, we can discard the
 370	 * inode's preallocations.
 371	 */
 372	if ((ei->i_reserved_data_blocks == 0) &&
 373	    !inode_is_open_for_write(inode))
 374		ext4_discard_preallocations(inode);
 375}
 376
 377static int __check_block_validity(struct inode *inode, const char *func,
 378				unsigned int line,
 379				struct ext4_map_blocks *map)
 380{
 381	if (ext4_has_feature_journal(inode->i_sb) &&
 382	    (inode->i_ino ==
 383	     le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum)))
 384		return 0;
 385	if (!ext4_inode_block_valid(inode, map->m_pblk, map->m_len)) {
 
 386		ext4_error_inode(inode, func, line, map->m_pblk,
 387				 "lblock %lu mapped to illegal pblock %llu "
 388				 "(length %d)", (unsigned long) map->m_lblk,
 389				 map->m_pblk, map->m_len);
 390		return -EFSCORRUPTED;
 391	}
 392	return 0;
 393}
 394
 395int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk,
 396		       ext4_lblk_t len)
 397{
 398	int ret;
 399
 400	if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode))
 401		return fscrypt_zeroout_range(inode, lblk, pblk, len);
 402
 403	ret = sb_issue_zeroout(inode->i_sb, pblk, len, GFP_NOFS);
 404	if (ret > 0)
 405		ret = 0;
 406
 407	return ret;
 408}
 409
 410#define check_block_validity(inode, map)	\
 411	__check_block_validity((inode), __func__, __LINE__, (map))
 412
 413#ifdef ES_AGGRESSIVE_TEST
 414static void ext4_map_blocks_es_recheck(handle_t *handle,
 415				       struct inode *inode,
 416				       struct ext4_map_blocks *es_map,
 417				       struct ext4_map_blocks *map,
 418				       int flags)
 419{
 420	int retval;
 421
 422	map->m_flags = 0;
 423	/*
 424	 * There is a race window that the result is not the same.
 425	 * e.g. xfstests #223 when dioread_nolock enables.  The reason
 426	 * is that we lookup a block mapping in extent status tree with
 427	 * out taking i_data_sem.  So at the time the unwritten extent
 428	 * could be converted.
 429	 */
 430	down_read(&EXT4_I(inode)->i_data_sem);
 431	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
 432		retval = ext4_ext_map_blocks(handle, inode, map, 0);
 
 433	} else {
 434		retval = ext4_ind_map_blocks(handle, inode, map, 0);
 
 435	}
 436	up_read((&EXT4_I(inode)->i_data_sem));
 437
 438	/*
 439	 * We don't check m_len because extent will be collpased in status
 440	 * tree.  So the m_len might not equal.
 441	 */
 442	if (es_map->m_lblk != map->m_lblk ||
 443	    es_map->m_flags != map->m_flags ||
 444	    es_map->m_pblk != map->m_pblk) {
 445		printk("ES cache assertion failed for inode: %lu "
 446		       "es_cached ex [%d/%d/%llu/%x] != "
 447		       "found ex [%d/%d/%llu/%x] retval %d flags %x\n",
 448		       inode->i_ino, es_map->m_lblk, es_map->m_len,
 449		       es_map->m_pblk, es_map->m_flags, map->m_lblk,
 450		       map->m_len, map->m_pblk, map->m_flags,
 451		       retval, flags);
 452	}
 453}
 454#endif /* ES_AGGRESSIVE_TEST */
 455
 456/*
 457 * The ext4_map_blocks() function tries to look up the requested blocks,
 458 * and returns if the blocks are already mapped.
 459 *
 460 * Otherwise it takes the write lock of the i_data_sem and allocate blocks
 461 * and store the allocated blocks in the result buffer head and mark it
 462 * mapped.
 463 *
 464 * If file type is extents based, it will call ext4_ext_map_blocks(),
 465 * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
 466 * based files
 467 *
 468 * On success, it returns the number of blocks being mapped or allocated.
 469 * If flags doesn't contain EXT4_GET_BLOCKS_CREATE the blocks are
 470 * pre-allocated and unwritten, the resulting @map is marked as unwritten.
 471 * If the flags contain EXT4_GET_BLOCKS_CREATE, it will mark @map as mapped.
 472 *
 473 * It returns 0 if plain look up failed (blocks have not been allocated), in
 474 * that case, @map is returned as unmapped but we still do fill map->m_len to
 475 * indicate the length of a hole starting at map->m_lblk.
 476 *
 477 * It returns the error in case of allocation failure.
 478 */
 479int ext4_map_blocks(handle_t *handle, struct inode *inode,
 480		    struct ext4_map_blocks *map, int flags)
 481{
 482	struct extent_status es;
 483	int retval;
 484	int ret = 0;
 485#ifdef ES_AGGRESSIVE_TEST
 486	struct ext4_map_blocks orig_map;
 487
 488	memcpy(&orig_map, map, sizeof(*map));
 489#endif
 490
 491	map->m_flags = 0;
 492	ext_debug(inode, "flag 0x%x, max_blocks %u, logical block %lu\n",
 493		  flags, map->m_len, (unsigned long) map->m_lblk);
 
 494
 495	/*
 496	 * ext4_map_blocks returns an int, and m_len is an unsigned int
 497	 */
 498	if (unlikely(map->m_len > INT_MAX))
 499		map->m_len = INT_MAX;
 500
 501	/* We can handle the block number less than EXT_MAX_BLOCKS */
 502	if (unlikely(map->m_lblk >= EXT_MAX_BLOCKS))
 503		return -EFSCORRUPTED;
 504
 505	/* Lookup extent status tree firstly */
 506	if (!(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) &&
 507	    ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) {
 508		if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) {
 509			map->m_pblk = ext4_es_pblock(&es) +
 510					map->m_lblk - es.es_lblk;
 511			map->m_flags |= ext4_es_is_written(&es) ?
 512					EXT4_MAP_MAPPED : EXT4_MAP_UNWRITTEN;
 513			retval = es.es_len - (map->m_lblk - es.es_lblk);
 514			if (retval > map->m_len)
 515				retval = map->m_len;
 516			map->m_len = retval;
 517		} else if (ext4_es_is_delayed(&es) || ext4_es_is_hole(&es)) {
 518			map->m_pblk = 0;
 519			map->m_flags |= ext4_es_is_delayed(&es) ?
 520					EXT4_MAP_DELAYED : 0;
 521			retval = es.es_len - (map->m_lblk - es.es_lblk);
 522			if (retval > map->m_len)
 523				retval = map->m_len;
 524			map->m_len = retval;
 525			retval = 0;
 526		} else {
 527			BUG();
 528		}
 529
 530		if (flags & EXT4_GET_BLOCKS_CACHED_NOWAIT)
 531			return retval;
 532#ifdef ES_AGGRESSIVE_TEST
 533		ext4_map_blocks_es_recheck(handle, inode, map,
 534					   &orig_map, flags);
 535#endif
 536		goto found;
 537	}
 538	/*
 539	 * In the query cache no-wait mode, nothing we can do more if we
 540	 * cannot find extent in the cache.
 541	 */
 542	if (flags & EXT4_GET_BLOCKS_CACHED_NOWAIT)
 543		return 0;
 544
 545	/*
 546	 * Try to see if we can get the block without requesting a new
 547	 * file system block.
 548	 */
 549	down_read(&EXT4_I(inode)->i_data_sem);
 550	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
 551		retval = ext4_ext_map_blocks(handle, inode, map, 0);
 
 552	} else {
 553		retval = ext4_ind_map_blocks(handle, inode, map, 0);
 
 554	}
 555	if (retval > 0) {
 556		unsigned int status;
 557
 558		if (unlikely(retval != map->m_len)) {
 559			ext4_warning(inode->i_sb,
 560				     "ES len assertion failed for inode "
 561				     "%lu: retval %d != map->m_len %d",
 562				     inode->i_ino, retval, map->m_len);
 563			WARN_ON(1);
 564		}
 565
 566		status = map->m_flags & EXT4_MAP_UNWRITTEN ?
 567				EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
 568		if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
 569		    !(status & EXTENT_STATUS_WRITTEN) &&
 570		    ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk,
 571				       map->m_lblk + map->m_len - 1))
 572			status |= EXTENT_STATUS_DELAYED;
 573		ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
 574				      map->m_pblk, status);
 
 
 575	}
 576	up_read((&EXT4_I(inode)->i_data_sem));
 577
 578found:
 579	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
 580		ret = check_block_validity(inode, map);
 581		if (ret != 0)
 582			return ret;
 583	}
 584
 585	/* If it is only a block(s) look up */
 586	if ((flags & EXT4_GET_BLOCKS_CREATE) == 0)
 587		return retval;
 588
 589	/*
 590	 * Returns if the blocks have already allocated
 591	 *
 592	 * Note that if blocks have been preallocated
 593	 * ext4_ext_map_blocks() returns with buffer head unmapped
 
 594	 */
 595	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
 596		/*
 597		 * If we need to convert extent to unwritten
 598		 * we continue and do the actual work in
 599		 * ext4_ext_map_blocks()
 600		 */
 601		if (!(flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN))
 602			return retval;
 603
 604	/*
 605	 * Here we clear m_flags because after allocating an new extent,
 606	 * it will be set again.
 607	 */
 608	map->m_flags &= ~EXT4_MAP_FLAGS;
 609
 610	/*
 611	 * New blocks allocate and/or writing to unwritten extent
 612	 * will possibly result in updating i_data, so we take
 613	 * the write lock of i_data_sem, and call get_block()
 614	 * with create == 1 flag.
 615	 */
 616	down_write(&EXT4_I(inode)->i_data_sem);
 617
 618	/*
 619	 * We need to check for EXT4 here because migrate
 620	 * could have changed the inode type in between
 621	 */
 622	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
 623		retval = ext4_ext_map_blocks(handle, inode, map, flags);
 624	} else {
 625		retval = ext4_ind_map_blocks(handle, inode, map, flags);
 626
 627		if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
 628			/*
 629			 * We allocated new blocks which will result in
 630			 * i_data's format changing.  Force the migrate
 631			 * to fail by clearing migrate flags
 632			 */
 633			ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
 634		}
 
 
 
 
 
 
 
 
 
 
 635	}
 636
 637	if (retval > 0) {
 638		unsigned int status;
 639
 640		if (unlikely(retval != map->m_len)) {
 641			ext4_warning(inode->i_sb,
 642				     "ES len assertion failed for inode "
 643				     "%lu: retval %d != map->m_len %d",
 644				     inode->i_ino, retval, map->m_len);
 645			WARN_ON(1);
 646		}
 647
 648		/*
 649		 * We have to zeroout blocks before inserting them into extent
 650		 * status tree. Otherwise someone could look them up there and
 651		 * use them before they are really zeroed. We also have to
 652		 * unmap metadata before zeroing as otherwise writeback can
 653		 * overwrite zeros with stale data from block device.
 654		 */
 655		if (flags & EXT4_GET_BLOCKS_ZERO &&
 656		    map->m_flags & EXT4_MAP_MAPPED &&
 657		    map->m_flags & EXT4_MAP_NEW) {
 658			ret = ext4_issue_zeroout(inode, map->m_lblk,
 659						 map->m_pblk, map->m_len);
 660			if (ret) {
 661				retval = ret;
 662				goto out_sem;
 663			}
 664		}
 665
 666		/*
 667		 * If the extent has been zeroed out, we don't need to update
 668		 * extent status tree.
 669		 */
 670		if ((flags & EXT4_GET_BLOCKS_PRE_IO) &&
 671		    ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) {
 672			if (ext4_es_is_written(&es))
 673				goto out_sem;
 674		}
 675		status = map->m_flags & EXT4_MAP_UNWRITTEN ?
 676				EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
 677		if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
 678		    !(status & EXTENT_STATUS_WRITTEN) &&
 679		    ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk,
 680				       map->m_lblk + map->m_len - 1))
 681			status |= EXTENT_STATUS_DELAYED;
 682		ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
 683				      map->m_pblk, status);
 
 
 
 
 684	}
 685
 686out_sem:
 687	up_write((&EXT4_I(inode)->i_data_sem));
 688	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
 689		ret = check_block_validity(inode, map);
 690		if (ret != 0)
 691			return ret;
 692
 693		/*
 694		 * Inodes with freshly allocated blocks where contents will be
 695		 * visible after transaction commit must be on transaction's
 696		 * ordered data list.
 697		 */
 698		if (map->m_flags & EXT4_MAP_NEW &&
 699		    !(map->m_flags & EXT4_MAP_UNWRITTEN) &&
 700		    !(flags & EXT4_GET_BLOCKS_ZERO) &&
 701		    !ext4_is_quota_file(inode) &&
 702		    ext4_should_order_data(inode)) {
 703			loff_t start_byte =
 704				(loff_t)map->m_lblk << inode->i_blkbits;
 705			loff_t length = (loff_t)map->m_len << inode->i_blkbits;
 706
 707			if (flags & EXT4_GET_BLOCKS_IO_SUBMIT)
 708				ret = ext4_jbd2_inode_add_wait(handle, inode,
 709						start_byte, length);
 710			else
 711				ret = ext4_jbd2_inode_add_write(handle, inode,
 712						start_byte, length);
 713			if (ret)
 714				return ret;
 715		}
 716	}
 717	if (retval > 0 && (map->m_flags & EXT4_MAP_UNWRITTEN ||
 718				map->m_flags & EXT4_MAP_MAPPED))
 719		ext4_fc_track_range(handle, inode, map->m_lblk,
 720					map->m_lblk + map->m_len - 1);
 721	if (retval < 0)
 722		ext_debug(inode, "failed with err %d\n", retval);
 723	return retval;
 724}
 725
 726/*
 727 * Update EXT4_MAP_FLAGS in bh->b_state. For buffer heads attached to pages
 728 * we have to be careful as someone else may be manipulating b_state as well.
 729 */
 730static void ext4_update_bh_state(struct buffer_head *bh, unsigned long flags)
 731{
 732	unsigned long old_state;
 733	unsigned long new_state;
 734
 735	flags &= EXT4_MAP_FLAGS;
 736
 737	/* Dummy buffer_head? Set non-atomically. */
 738	if (!bh->b_page) {
 739		bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | flags;
 740		return;
 741	}
 742	/*
 743	 * Someone else may be modifying b_state. Be careful! This is ugly but
 744	 * once we get rid of using bh as a container for mapping information
 745	 * to pass to / from get_block functions, this can go away.
 746	 */
 747	old_state = READ_ONCE(bh->b_state);
 748	do {
 
 749		new_state = (old_state & ~EXT4_MAP_FLAGS) | flags;
 750	} while (unlikely(!try_cmpxchg(&bh->b_state, &old_state, new_state)));
 
 751}
 752
 753static int _ext4_get_block(struct inode *inode, sector_t iblock,
 754			   struct buffer_head *bh, int flags)
 755{
 756	struct ext4_map_blocks map;
 757	int ret = 0;
 758
 759	if (ext4_has_inline_data(inode))
 760		return -ERANGE;
 761
 762	map.m_lblk = iblock;
 763	map.m_len = bh->b_size >> inode->i_blkbits;
 764
 765	ret = ext4_map_blocks(ext4_journal_current_handle(), inode, &map,
 766			      flags);
 767	if (ret > 0) {
 768		map_bh(bh, inode->i_sb, map.m_pblk);
 769		ext4_update_bh_state(bh, map.m_flags);
 770		bh->b_size = inode->i_sb->s_blocksize * map.m_len;
 771		ret = 0;
 772	} else if (ret == 0) {
 773		/* hole case, need to fill in bh->b_size */
 774		bh->b_size = inode->i_sb->s_blocksize * map.m_len;
 775	}
 776	return ret;
 777}
 778
 779int ext4_get_block(struct inode *inode, sector_t iblock,
 780		   struct buffer_head *bh, int create)
 781{
 782	return _ext4_get_block(inode, iblock, bh,
 783			       create ? EXT4_GET_BLOCKS_CREATE : 0);
 784}
 785
 786/*
 787 * Get block function used when preparing for buffered write if we require
 788 * creating an unwritten extent if blocks haven't been allocated.  The extent
 789 * will be converted to written after the IO is complete.
 790 */
 791int ext4_get_block_unwritten(struct inode *inode, sector_t iblock,
 792			     struct buffer_head *bh_result, int create)
 793{
 794	int ret = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 795
 796	ext4_debug("ext4_get_block_unwritten: inode %lu, create flag %d\n",
 797		   inode->i_ino, create);
 798	ret = _ext4_get_block(inode, iblock, bh_result,
 799			       EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT);
 800
 
 801	/*
 802	 * If the buffer is marked unwritten, mark it as new to make sure it is
 803	 * zeroed out correctly in case of partial writes. Otherwise, there is
 804	 * a chance of stale data getting exposed.
 805	 */
 806	if (ret == 0 && buffer_unwritten(bh_result))
 807		set_buffer_new(bh_result);
 808
 809	return ret;
 810}
 811
 812/* Maximum number of blocks we map for direct IO at once. */
 813#define DIO_MAX_BLOCKS 4096
 814
 815/*
 816 * `handle' can be NULL if create is zero
 817 */
 818struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
 819				ext4_lblk_t block, int map_flags)
 820{
 821	struct ext4_map_blocks map;
 822	struct buffer_head *bh;
 823	int create = map_flags & EXT4_GET_BLOCKS_CREATE;
 824	bool nowait = map_flags & EXT4_GET_BLOCKS_CACHED_NOWAIT;
 825	int err;
 826
 827	ASSERT((EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
 828		    || handle != NULL || create == 0);
 829	ASSERT(create == 0 || !nowait);
 830
 831	map.m_lblk = block;
 832	map.m_len = 1;
 833	err = ext4_map_blocks(handle, inode, &map, map_flags);
 834
 835	if (err == 0)
 836		return create ? ERR_PTR(-ENOSPC) : NULL;
 837	if (err < 0)
 838		return ERR_PTR(err);
 839
 840	if (nowait)
 841		return sb_find_get_block(inode->i_sb, map.m_pblk);
 842
 843	bh = sb_getblk(inode->i_sb, map.m_pblk);
 844	if (unlikely(!bh))
 845		return ERR_PTR(-ENOMEM);
 846	if (map.m_flags & EXT4_MAP_NEW) {
 847		ASSERT(create != 0);
 848		ASSERT((EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
 849			    || (handle != NULL));
 850
 851		/*
 852		 * Now that we do not always journal data, we should
 853		 * keep in mind whether this should always journal the
 854		 * new buffer as metadata.  For now, regular file
 855		 * writes use ext4_get_block instead, so it's not a
 856		 * problem.
 857		 */
 858		lock_buffer(bh);
 859		BUFFER_TRACE(bh, "call get_create_access");
 860		err = ext4_journal_get_create_access(handle, inode->i_sb, bh,
 861						     EXT4_JTR_NONE);
 862		if (unlikely(err)) {
 863			unlock_buffer(bh);
 864			goto errout;
 865		}
 866		if (!buffer_uptodate(bh)) {
 867			memset(bh->b_data, 0, inode->i_sb->s_blocksize);
 868			set_buffer_uptodate(bh);
 869		}
 870		unlock_buffer(bh);
 871		BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
 872		err = ext4_handle_dirty_metadata(handle, inode, bh);
 873		if (unlikely(err))
 874			goto errout;
 875	} else
 876		BUFFER_TRACE(bh, "not a new buffer");
 877	return bh;
 878errout:
 879	brelse(bh);
 880	return ERR_PTR(err);
 881}
 882
 883struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
 884			       ext4_lblk_t block, int map_flags)
 885{
 886	struct buffer_head *bh;
 887	int ret;
 888
 889	bh = ext4_getblk(handle, inode, block, map_flags);
 890	if (IS_ERR(bh))
 891		return bh;
 892	if (!bh || ext4_buffer_uptodate(bh))
 893		return bh;
 894
 895	ret = ext4_read_bh_lock(bh, REQ_META | REQ_PRIO, true);
 896	if (ret) {
 897		put_bh(bh);
 898		return ERR_PTR(ret);
 899	}
 900	return bh;
 901}
 902
 903/* Read a contiguous batch of blocks. */
 904int ext4_bread_batch(struct inode *inode, ext4_lblk_t block, int bh_count,
 905		     bool wait, struct buffer_head **bhs)
 906{
 907	int i, err;
 908
 909	for (i = 0; i < bh_count; i++) {
 910		bhs[i] = ext4_getblk(NULL, inode, block + i, 0 /* map_flags */);
 911		if (IS_ERR(bhs[i])) {
 912			err = PTR_ERR(bhs[i]);
 913			bh_count = i;
 914			goto out_brelse;
 915		}
 916	}
 917
 918	for (i = 0; i < bh_count; i++)
 919		/* Note that NULL bhs[i] is valid because of holes. */
 920		if (bhs[i] && !ext4_buffer_uptodate(bhs[i]))
 921			ext4_read_bh_lock(bhs[i], REQ_META | REQ_PRIO, false);
 
 922
 923	if (!wait)
 924		return 0;
 925
 926	for (i = 0; i < bh_count; i++)
 927		if (bhs[i])
 928			wait_on_buffer(bhs[i]);
 929
 930	for (i = 0; i < bh_count; i++) {
 931		if (bhs[i] && !buffer_uptodate(bhs[i])) {
 932			err = -EIO;
 933			goto out_brelse;
 934		}
 935	}
 936	return 0;
 937
 938out_brelse:
 939	for (i = 0; i < bh_count; i++) {
 940		brelse(bhs[i]);
 941		bhs[i] = NULL;
 942	}
 943	return err;
 944}
 945
 946int ext4_walk_page_buffers(handle_t *handle, struct inode *inode,
 947			   struct buffer_head *head,
 948			   unsigned from,
 949			   unsigned to,
 950			   int *partial,
 951			   int (*fn)(handle_t *handle, struct inode *inode,
 952				     struct buffer_head *bh))
 953{
 954	struct buffer_head *bh;
 955	unsigned block_start, block_end;
 956	unsigned blocksize = head->b_size;
 957	int err, ret = 0;
 958	struct buffer_head *next;
 959
 960	for (bh = head, block_start = 0;
 961	     ret == 0 && (bh != head || !block_start);
 962	     block_start = block_end, bh = next) {
 963		next = bh->b_this_page;
 964		block_end = block_start + blocksize;
 965		if (block_end <= from || block_start >= to) {
 966			if (partial && !buffer_uptodate(bh))
 967				*partial = 1;
 968			continue;
 969		}
 970		err = (*fn)(handle, inode, bh);
 971		if (!ret)
 972			ret = err;
 973	}
 974	return ret;
 975}
 976
 977/*
 978 * Helper for handling dirtying of journalled data. We also mark the folio as
 979 * dirty so that writeback code knows about this page (and inode) contains
 980 * dirty data. ext4_writepages() then commits appropriate transaction to
 981 * make data stable.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 982 */
 983static int ext4_dirty_journalled_data(handle_t *handle, struct buffer_head *bh)
 984{
 985	folio_mark_dirty(bh->b_folio);
 986	return ext4_handle_dirty_metadata(handle, NULL, bh);
 987}
 988
 989int do_journal_get_write_access(handle_t *handle, struct inode *inode,
 990				struct buffer_head *bh)
 991{
 992	int dirty = buffer_dirty(bh);
 993	int ret;
 994
 995	if (!buffer_mapped(bh) || buffer_freed(bh))
 996		return 0;
 997	/*
 998	 * __block_write_begin() could have dirtied some buffers. Clean
 999	 * the dirty bit as jbd2_journal_get_write_access() could complain
1000	 * otherwise about fs integrity issues. Setting of the dirty bit
1001	 * by __block_write_begin() isn't a real problem here as we clear
1002	 * the bit before releasing a page lock and thus writeback cannot
1003	 * ever write the buffer.
1004	 */
1005	if (dirty)
1006		clear_buffer_dirty(bh);
1007	BUFFER_TRACE(bh, "get write access");
1008	ret = ext4_journal_get_write_access(handle, inode->i_sb, bh,
1009					    EXT4_JTR_NONE);
1010	if (!ret && dirty)
1011		ret = ext4_dirty_journalled_data(handle, bh);
1012	return ret;
1013}
1014
1015#ifdef CONFIG_FS_ENCRYPTION
1016static int ext4_block_write_begin(struct folio *folio, loff_t pos, unsigned len,
1017				  get_block_t *get_block)
1018{
1019	unsigned from = pos & (PAGE_SIZE - 1);
1020	unsigned to = from + len;
1021	struct inode *inode = folio->mapping->host;
1022	unsigned block_start, block_end;
1023	sector_t block;
1024	int err = 0;
1025	unsigned blocksize = inode->i_sb->s_blocksize;
1026	unsigned bbits;
1027	struct buffer_head *bh, *head, *wait[2];
1028	int nr_wait = 0;
1029	int i;
1030
1031	BUG_ON(!folio_test_locked(folio));
1032	BUG_ON(from > PAGE_SIZE);
1033	BUG_ON(to > PAGE_SIZE);
1034	BUG_ON(from > to);
1035
1036	head = folio_buffers(folio);
1037	if (!head)
1038		head = create_empty_buffers(folio, blocksize, 0);
1039	bbits = ilog2(blocksize);
1040	block = (sector_t)folio->index << (PAGE_SHIFT - bbits);
1041
1042	for (bh = head, block_start = 0; bh != head || !block_start;
1043	    block++, block_start = block_end, bh = bh->b_this_page) {
1044		block_end = block_start + blocksize;
1045		if (block_end <= from || block_start >= to) {
1046			if (folio_test_uptodate(folio)) {
1047				set_buffer_uptodate(bh);
 
1048			}
1049			continue;
1050		}
1051		if (buffer_new(bh))
1052			clear_buffer_new(bh);
1053		if (!buffer_mapped(bh)) {
1054			WARN_ON(bh->b_size != blocksize);
1055			err = get_block(inode, block, bh, 1);
1056			if (err)
1057				break;
1058			if (buffer_new(bh)) {
1059				if (folio_test_uptodate(folio)) {
1060					clear_buffer_new(bh);
1061					set_buffer_uptodate(bh);
1062					mark_buffer_dirty(bh);
1063					continue;
1064				}
1065				if (block_end > to || block_start < from)
1066					folio_zero_segments(folio, to,
1067							    block_end,
1068							    block_start, from);
1069				continue;
1070			}
1071		}
1072		if (folio_test_uptodate(folio)) {
1073			set_buffer_uptodate(bh);
 
1074			continue;
1075		}
1076		if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1077		    !buffer_unwritten(bh) &&
1078		    (block_start < from || block_end > to)) {
1079			ext4_read_bh_lock(bh, 0, false);
1080			wait[nr_wait++] = bh;
1081		}
1082	}
1083	/*
1084	 * If we issued read requests, let them complete.
1085	 */
1086	for (i = 0; i < nr_wait; i++) {
1087		wait_on_buffer(wait[i]);
1088		if (!buffer_uptodate(wait[i]))
1089			err = -EIO;
1090	}
1091	if (unlikely(err)) {
1092		folio_zero_new_buffers(folio, from, to);
1093	} else if (fscrypt_inode_uses_fs_layer_crypto(inode)) {
1094		for (i = 0; i < nr_wait; i++) {
1095			int err2;
1096
1097			err2 = fscrypt_decrypt_pagecache_blocks(folio,
1098						blocksize, bh_offset(wait[i]));
1099			if (err2) {
1100				clear_buffer_uptodate(wait[i]);
1101				err = err2;
1102			}
1103		}
1104	}
1105
1106	return err;
1107}
1108#endif
1109
1110/*
1111 * To preserve ordering, it is essential that the hole instantiation and
1112 * the data write be encapsulated in a single transaction.  We cannot
1113 * close off a transaction and start a new one between the ext4_get_block()
1114 * and the ext4_write_end().  So doing the jbd2_journal_start at the start of
1115 * ext4_write_begin() is the right place.
1116 */
1117static int ext4_write_begin(struct file *file, struct address_space *mapping,
1118			    loff_t pos, unsigned len,
1119			    struct page **pagep, void **fsdata)
1120{
1121	struct inode *inode = mapping->host;
1122	int ret, needed_blocks;
1123	handle_t *handle;
1124	int retries = 0;
1125	struct folio *folio;
1126	pgoff_t index;
1127	unsigned from, to;
1128
1129	if (unlikely(ext4_forced_shutdown(inode->i_sb)))
1130		return -EIO;
1131
1132	trace_ext4_write_begin(inode, pos, len);
1133	/*
1134	 * Reserve one block more for addition to orphan list in case
1135	 * we allocate blocks but write fails for some reason
1136	 */
1137	needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
1138	index = pos >> PAGE_SHIFT;
1139	from = pos & (PAGE_SIZE - 1);
1140	to = from + len;
1141
1142	if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
1143		ret = ext4_try_to_write_inline_data(mapping, inode, pos, len,
1144						    pagep);
1145		if (ret < 0)
1146			return ret;
1147		if (ret == 1)
1148			return 0;
1149	}
1150
1151	/*
1152	 * __filemap_get_folio() can take a long time if the
1153	 * system is thrashing due to memory pressure, or if the folio
1154	 * is being written back.  So grab it first before we start
1155	 * the transaction handle.  This also allows us to allocate
1156	 * the folio (if needed) without using GFP_NOFS.
1157	 */
1158retry_grab:
1159	folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
1160					mapping_gfp_mask(mapping));
1161	if (IS_ERR(folio))
1162		return PTR_ERR(folio);
1163	/*
1164	 * The same as page allocation, we prealloc buffer heads before
1165	 * starting the handle.
1166	 */
1167	if (!folio_buffers(folio))
1168		create_empty_buffers(folio, inode->i_sb->s_blocksize, 0);
1169
1170	folio_unlock(folio);
1171
1172retry_journal:
1173	handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks);
1174	if (IS_ERR(handle)) {
1175		folio_put(folio);
1176		return PTR_ERR(handle);
1177	}
1178
1179	folio_lock(folio);
1180	if (folio->mapping != mapping) {
1181		/* The folio got truncated from under us */
1182		folio_unlock(folio);
1183		folio_put(folio);
1184		ext4_journal_stop(handle);
1185		goto retry_grab;
1186	}
1187	/* In case writeback began while the folio was unlocked */
1188	folio_wait_stable(folio);
1189
1190#ifdef CONFIG_FS_ENCRYPTION
1191	if (ext4_should_dioread_nolock(inode))
1192		ret = ext4_block_write_begin(folio, pos, len,
1193					     ext4_get_block_unwritten);
1194	else
1195		ret = ext4_block_write_begin(folio, pos, len, ext4_get_block);
 
1196#else
1197	if (ext4_should_dioread_nolock(inode))
1198		ret = __block_write_begin(&folio->page, pos, len,
1199					  ext4_get_block_unwritten);
1200	else
1201		ret = __block_write_begin(&folio->page, pos, len, ext4_get_block);
1202#endif
1203	if (!ret && ext4_should_journal_data(inode)) {
1204		ret = ext4_walk_page_buffers(handle, inode,
1205					     folio_buffers(folio), from, to,
1206					     NULL, do_journal_get_write_access);
1207	}
1208
1209	if (ret) {
1210		bool extended = (pos + len > inode->i_size) &&
1211				!ext4_verity_in_progress(inode);
1212
1213		folio_unlock(folio);
1214		/*
1215		 * __block_write_begin may have instantiated a few blocks
1216		 * outside i_size.  Trim these off again. Don't need
1217		 * i_size_read because we hold i_rwsem.
1218		 *
1219		 * Add inode to orphan list in case we crash before
1220		 * truncate finishes
1221		 */
1222		if (extended && ext4_can_truncate(inode))
1223			ext4_orphan_add(handle, inode);
1224
1225		ext4_journal_stop(handle);
1226		if (extended) {
1227			ext4_truncate_failed_write(inode);
1228			/*
1229			 * If truncate failed early the inode might
1230			 * still be on the orphan list; we need to
1231			 * make sure the inode is removed from the
1232			 * orphan list in that case.
1233			 */
1234			if (inode->i_nlink)
1235				ext4_orphan_del(NULL, inode);
1236		}
1237
1238		if (ret == -ENOSPC &&
1239		    ext4_should_retry_alloc(inode->i_sb, &retries))
1240			goto retry_journal;
1241		folio_put(folio);
1242		return ret;
1243	}
1244	*pagep = &folio->page;
1245	return ret;
1246}
1247
1248/* For write_end() in data=journal mode */
1249static int write_end_fn(handle_t *handle, struct inode *inode,
1250			struct buffer_head *bh)
1251{
1252	int ret;
1253	if (!buffer_mapped(bh) || buffer_freed(bh))
1254		return 0;
1255	set_buffer_uptodate(bh);
1256	ret = ext4_dirty_journalled_data(handle, bh);
1257	clear_buffer_meta(bh);
1258	clear_buffer_prio(bh);
1259	return ret;
1260}
1261
1262/*
1263 * We need to pick up the new inode size which generic_commit_write gave us
1264 * `file' can be NULL - eg, when called from page_symlink().
1265 *
1266 * ext4 never places buffers on inode->i_mapping->i_private_list.  metadata
1267 * buffers are managed internally.
1268 */
1269static int ext4_write_end(struct file *file,
1270			  struct address_space *mapping,
1271			  loff_t pos, unsigned len, unsigned copied,
1272			  struct page *page, void *fsdata)
1273{
1274	struct folio *folio = page_folio(page);
1275	handle_t *handle = ext4_journal_current_handle();
1276	struct inode *inode = mapping->host;
1277	loff_t old_size = inode->i_size;
1278	int ret = 0, ret2;
1279	int i_size_changed = 0;
 
1280	bool verity = ext4_verity_in_progress(inode);
1281
1282	trace_ext4_write_end(inode, pos, len, copied);
1283
1284	if (ext4_has_inline_data(inode) &&
1285	    ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA))
1286		return ext4_write_inline_data_end(inode, pos, len, copied,
1287						  folio);
1288
1289	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
 
 
 
 
 
1290	/*
1291	 * it's important to update i_size while still holding folio lock:
1292	 * page writeout could otherwise come in and zero beyond i_size.
1293	 *
1294	 * If FS_IOC_ENABLE_VERITY is running on this inode, then Merkle tree
1295	 * blocks are being written past EOF, so skip the i_size update.
1296	 */
1297	if (!verity)
1298		i_size_changed = ext4_update_inode_size(inode, pos + copied);
1299	folio_unlock(folio);
1300	folio_put(folio);
1301
1302	if (old_size < pos && !verity)
1303		pagecache_isize_extended(inode, old_size, pos);
1304	/*
1305	 * Don't mark the inode dirty under folio lock. First, it unnecessarily
1306	 * makes the holding time of folio lock longer. Second, it forces lock
1307	 * ordering of folio lock and transaction start for journaling
1308	 * filesystems.
1309	 */
1310	if (i_size_changed)
1311		ret = ext4_mark_inode_dirty(handle, inode);
1312
1313	if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode))
1314		/* if we have allocated more blocks and copied
1315		 * less. We will have blocks allocated outside
1316		 * inode->i_size. So truncate them
1317		 */
1318		ext4_orphan_add(handle, inode);
1319
1320	ret2 = ext4_journal_stop(handle);
1321	if (!ret)
1322		ret = ret2;
1323
1324	if (pos + len > inode->i_size && !verity) {
1325		ext4_truncate_failed_write(inode);
1326		/*
1327		 * If truncate failed early the inode might still be
1328		 * on the orphan list; we need to make sure the inode
1329		 * is removed from the orphan list in that case.
1330		 */
1331		if (inode->i_nlink)
1332			ext4_orphan_del(NULL, inode);
1333	}
1334
1335	return ret ? ret : copied;
1336}
1337
1338/*
1339 * This is a private version of folio_zero_new_buffers() which doesn't
1340 * set the buffer to be dirty, since in data=journalled mode we need
1341 * to call ext4_dirty_journalled_data() instead.
1342 */
1343static void ext4_journalled_zero_new_buffers(handle_t *handle,
1344					    struct inode *inode,
1345					    struct folio *folio,
1346					    unsigned from, unsigned to)
1347{
1348	unsigned int block_start = 0, block_end;
1349	struct buffer_head *head, *bh;
1350
1351	bh = head = folio_buffers(folio);
1352	do {
1353		block_end = block_start + bh->b_size;
1354		if (buffer_new(bh)) {
1355			if (block_end > from && block_start < to) {
1356				if (!folio_test_uptodate(folio)) {
1357					unsigned start, size;
1358
1359					start = max(from, block_start);
1360					size = min(to, block_end) - start;
1361
1362					folio_zero_range(folio, start, size);
1363					write_end_fn(handle, inode, bh);
1364				}
1365				clear_buffer_new(bh);
1366			}
1367		}
1368		block_start = block_end;
1369		bh = bh->b_this_page;
1370	} while (bh != head);
1371}
1372
1373static int ext4_journalled_write_end(struct file *file,
1374				     struct address_space *mapping,
1375				     loff_t pos, unsigned len, unsigned copied,
1376				     struct page *page, void *fsdata)
1377{
1378	struct folio *folio = page_folio(page);
1379	handle_t *handle = ext4_journal_current_handle();
1380	struct inode *inode = mapping->host;
1381	loff_t old_size = inode->i_size;
1382	int ret = 0, ret2;
1383	int partial = 0;
1384	unsigned from, to;
1385	int size_changed = 0;
 
1386	bool verity = ext4_verity_in_progress(inode);
1387
1388	trace_ext4_journalled_write_end(inode, pos, len, copied);
1389	from = pos & (PAGE_SIZE - 1);
1390	to = from + len;
1391
1392	BUG_ON(!ext4_handle_valid(handle));
1393
1394	if (ext4_has_inline_data(inode))
1395		return ext4_write_inline_data_end(inode, pos, len, copied,
1396						  folio);
1397
1398	if (unlikely(copied < len) && !folio_test_uptodate(folio)) {
 
 
 
 
 
1399		copied = 0;
1400		ext4_journalled_zero_new_buffers(handle, inode, folio,
1401						 from, to);
1402	} else {
1403		if (unlikely(copied < len))
1404			ext4_journalled_zero_new_buffers(handle, inode, folio,
1405							 from + copied, to);
1406		ret = ext4_walk_page_buffers(handle, inode,
1407					     folio_buffers(folio),
1408					     from, from + copied, &partial,
1409					     write_end_fn);
1410		if (!partial)
1411			folio_mark_uptodate(folio);
1412	}
1413	if (!verity)
1414		size_changed = ext4_update_inode_size(inode, pos + copied);
 
1415	EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
1416	folio_unlock(folio);
1417	folio_put(folio);
1418
1419	if (old_size < pos && !verity)
1420		pagecache_isize_extended(inode, old_size, pos);
1421
1422	if (size_changed) {
1423		ret2 = ext4_mark_inode_dirty(handle, inode);
1424		if (!ret)
1425			ret = ret2;
1426	}
1427
1428	if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode))
1429		/* if we have allocated more blocks and copied
1430		 * less. We will have blocks allocated outside
1431		 * inode->i_size. So truncate them
1432		 */
1433		ext4_orphan_add(handle, inode);
1434
 
1435	ret2 = ext4_journal_stop(handle);
1436	if (!ret)
1437		ret = ret2;
1438	if (pos + len > inode->i_size && !verity) {
1439		ext4_truncate_failed_write(inode);
1440		/*
1441		 * If truncate failed early the inode might still be
1442		 * on the orphan list; we need to make sure the inode
1443		 * is removed from the orphan list in that case.
1444		 */
1445		if (inode->i_nlink)
1446			ext4_orphan_del(NULL, inode);
1447	}
1448
1449	return ret ? ret : copied;
1450}
1451
1452/*
1453 * Reserve space for a single cluster
1454 */
1455static int ext4_da_reserve_space(struct inode *inode)
1456{
1457	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1458	struct ext4_inode_info *ei = EXT4_I(inode);
1459	int ret;
1460
1461	/*
1462	 * We will charge metadata quota at writeout time; this saves
1463	 * us from metadata over-estimation, though we may go over by
1464	 * a small amount in the end.  Here we just reserve for data.
1465	 */
1466	ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1));
1467	if (ret)
1468		return ret;
1469
1470	spin_lock(&ei->i_block_reservation_lock);
1471	if (ext4_claim_free_clusters(sbi, 1, 0)) {
1472		spin_unlock(&ei->i_block_reservation_lock);
1473		dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
1474		return -ENOSPC;
1475	}
1476	ei->i_reserved_data_blocks++;
1477	trace_ext4_da_reserve_space(inode);
1478	spin_unlock(&ei->i_block_reservation_lock);
1479
1480	return 0;       /* success */
1481}
1482
1483void ext4_da_release_space(struct inode *inode, int to_free)
1484{
1485	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1486	struct ext4_inode_info *ei = EXT4_I(inode);
1487
1488	if (!to_free)
1489		return;		/* Nothing to release, exit */
1490
1491	spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1492
1493	trace_ext4_da_release_space(inode, to_free);
1494	if (unlikely(to_free > ei->i_reserved_data_blocks)) {
1495		/*
1496		 * if there aren't enough reserved blocks, then the
1497		 * counter is messed up somewhere.  Since this
1498		 * function is called from invalidate page, it's
1499		 * harmless to return without any action.
1500		 */
1501		ext4_warning(inode->i_sb, "ext4_da_release_space: "
1502			 "ino %lu, to_free %d with only %d reserved "
1503			 "data blocks", inode->i_ino, to_free,
1504			 ei->i_reserved_data_blocks);
1505		WARN_ON(1);
1506		to_free = ei->i_reserved_data_blocks;
1507	}
1508	ei->i_reserved_data_blocks -= to_free;
1509
1510	/* update fs dirty data blocks counter */
1511	percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free);
1512
1513	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1514
1515	dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free));
1516}
1517
1518/*
1519 * Delayed allocation stuff
1520 */
1521
1522struct mpage_da_data {
1523	/* These are input fields for ext4_do_writepages() */
1524	struct inode *inode;
1525	struct writeback_control *wbc;
1526	unsigned int can_map:1;	/* Can writepages call map blocks? */
1527
1528	/* These are internal state of ext4_do_writepages() */
1529	pgoff_t first_page;	/* The first page to write */
1530	pgoff_t next_page;	/* Current page to examine */
1531	pgoff_t last_page;	/* Last page to examine */
1532	/*
1533	 * Extent to map - this can be after first_page because that can be
1534	 * fully mapped. We somewhat abuse m_flags to store whether the extent
1535	 * is delalloc or unwritten.
1536	 */
1537	struct ext4_map_blocks map;
1538	struct ext4_io_submit io_submit;	/* IO submission data */
1539	unsigned int do_map:1;
1540	unsigned int scanned_until_end:1;
1541	unsigned int journalled_more_data:1;
1542};
1543
1544static void mpage_release_unused_pages(struct mpage_da_data *mpd,
1545				       bool invalidate)
1546{
1547	unsigned nr, i;
1548	pgoff_t index, end;
1549	struct folio_batch fbatch;
1550	struct inode *inode = mpd->inode;
1551	struct address_space *mapping = inode->i_mapping;
1552
1553	/* This is necessary when next_page == 0. */
1554	if (mpd->first_page >= mpd->next_page)
1555		return;
1556
1557	mpd->scanned_until_end = 0;
1558	index = mpd->first_page;
1559	end   = mpd->next_page - 1;
1560	if (invalidate) {
1561		ext4_lblk_t start, last;
1562		start = index << (PAGE_SHIFT - inode->i_blkbits);
1563		last = end << (PAGE_SHIFT - inode->i_blkbits);
1564
1565		/*
1566		 * avoid racing with extent status tree scans made by
1567		 * ext4_insert_delayed_block()
1568		 */
1569		down_write(&EXT4_I(inode)->i_data_sem);
1570		ext4_es_remove_extent(inode, start, last - start + 1);
1571		up_write(&EXT4_I(inode)->i_data_sem);
1572	}
1573
1574	folio_batch_init(&fbatch);
1575	while (index <= end) {
1576		nr = filemap_get_folios(mapping, &index, end, &fbatch);
1577		if (nr == 0)
1578			break;
1579		for (i = 0; i < nr; i++) {
1580			struct folio *folio = fbatch.folios[i];
1581
1582			if (folio->index < mpd->first_page)
1583				continue;
1584			if (folio_next_index(folio) - 1 > end)
1585				continue;
1586			BUG_ON(!folio_test_locked(folio));
1587			BUG_ON(folio_test_writeback(folio));
1588			if (invalidate) {
1589				if (folio_mapped(folio))
1590					folio_clear_dirty_for_io(folio);
1591				block_invalidate_folio(folio, 0,
1592						folio_size(folio));
1593				folio_clear_uptodate(folio);
1594			}
1595			folio_unlock(folio);
1596		}
1597		folio_batch_release(&fbatch);
1598	}
1599}
1600
1601static void ext4_print_free_blocks(struct inode *inode)
1602{
1603	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1604	struct super_block *sb = inode->i_sb;
1605	struct ext4_inode_info *ei = EXT4_I(inode);
1606
1607	ext4_msg(sb, KERN_CRIT, "Total free blocks count %lld",
1608	       EXT4_C2B(EXT4_SB(inode->i_sb),
1609			ext4_count_free_clusters(sb)));
1610	ext4_msg(sb, KERN_CRIT, "Free/Dirty block details");
1611	ext4_msg(sb, KERN_CRIT, "free_blocks=%lld",
1612	       (long long) EXT4_C2B(EXT4_SB(sb),
1613		percpu_counter_sum(&sbi->s_freeclusters_counter)));
1614	ext4_msg(sb, KERN_CRIT, "dirty_blocks=%lld",
1615	       (long long) EXT4_C2B(EXT4_SB(sb),
1616		percpu_counter_sum(&sbi->s_dirtyclusters_counter)));
1617	ext4_msg(sb, KERN_CRIT, "Block reservation details");
1618	ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u",
1619		 ei->i_reserved_data_blocks);
1620	return;
1621}
1622
 
 
 
 
 
1623/*
1624 * ext4_insert_delayed_block - adds a delayed block to the extents status
1625 *                             tree, incrementing the reserved cluster/block
1626 *                             count or making a pending reservation
1627 *                             where needed
1628 *
1629 * @inode - file containing the newly added block
1630 * @lblk - logical block to be added
1631 *
1632 * Returns 0 on success, negative error code on failure.
1633 */
1634static int ext4_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk)
1635{
1636	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1637	int ret;
1638	bool allocated = false;
1639
1640	/*
1641	 * If the cluster containing lblk is shared with a delayed,
1642	 * written, or unwritten extent in a bigalloc file system, it's
1643	 * already been accounted for and does not need to be reserved.
1644	 * A pending reservation must be made for the cluster if it's
1645	 * shared with a written or unwritten extent and doesn't already
1646	 * have one.  Written and unwritten extents can be purged from the
1647	 * extents status tree if the system is under memory pressure, so
1648	 * it's necessary to examine the extent tree if a search of the
1649	 * extents status tree doesn't get a match.
1650	 */
1651	if (sbi->s_cluster_ratio == 1) {
1652		ret = ext4_da_reserve_space(inode);
1653		if (ret != 0)   /* ENOSPC */
1654			return ret;
1655	} else {   /* bigalloc */
1656		if (!ext4_es_scan_clu(inode, &ext4_es_is_delonly, lblk)) {
1657			if (!ext4_es_scan_clu(inode,
1658					      &ext4_es_is_mapped, lblk)) {
1659				ret = ext4_clu_mapped(inode,
1660						      EXT4_B2C(sbi, lblk));
1661				if (ret < 0)
1662					return ret;
1663				if (ret == 0) {
1664					ret = ext4_da_reserve_space(inode);
1665					if (ret != 0)   /* ENOSPC */
1666						return ret;
1667				} else {
1668					allocated = true;
1669				}
1670			} else {
1671				allocated = true;
1672			}
1673		}
1674	}
1675
1676	ext4_es_insert_delayed_block(inode, lblk, allocated);
1677	return 0;
 
 
1678}
1679
1680/*
1681 * This function is grabs code from the very beginning of
1682 * ext4_map_blocks, but assumes that the caller is from delayed write
1683 * time. This function looks up the requested blocks and sets the
1684 * buffer delay bit under the protection of i_data_sem.
1685 */
1686static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
1687			      struct ext4_map_blocks *map,
1688			      struct buffer_head *bh)
1689{
1690	struct extent_status es;
1691	int retval;
1692	sector_t invalid_block = ~((sector_t) 0xffff);
1693#ifdef ES_AGGRESSIVE_TEST
1694	struct ext4_map_blocks orig_map;
1695
1696	memcpy(&orig_map, map, sizeof(*map));
1697#endif
1698
1699	if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
1700		invalid_block = ~0;
1701
1702	map->m_flags = 0;
1703	ext_debug(inode, "max_blocks %u, logical block %lu\n", map->m_len,
 
1704		  (unsigned long) map->m_lblk);
1705
1706	/* Lookup extent status tree firstly */
1707	if (ext4_es_lookup_extent(inode, iblock, NULL, &es)) {
1708		if (ext4_es_is_hole(&es))
 
 
1709			goto add_delayed;
 
1710
1711		/*
1712		 * Delayed extent could be allocated by fallocate.
1713		 * So we need to check it.
1714		 */
1715		if (ext4_es_is_delayed(&es) && !ext4_es_is_unwritten(&es)) {
1716			map_bh(bh, inode->i_sb, invalid_block);
1717			set_buffer_new(bh);
1718			set_buffer_delay(bh);
1719			return 0;
1720		}
1721
1722		map->m_pblk = ext4_es_pblock(&es) + iblock - es.es_lblk;
1723		retval = es.es_len - (iblock - es.es_lblk);
1724		if (retval > map->m_len)
1725			retval = map->m_len;
1726		map->m_len = retval;
1727		if (ext4_es_is_written(&es))
1728			map->m_flags |= EXT4_MAP_MAPPED;
1729		else if (ext4_es_is_unwritten(&es))
1730			map->m_flags |= EXT4_MAP_UNWRITTEN;
1731		else
1732			BUG();
1733
1734#ifdef ES_AGGRESSIVE_TEST
1735		ext4_map_blocks_es_recheck(NULL, inode, map, &orig_map, 0);
1736#endif
1737		return retval;
1738	}
1739
1740	/*
1741	 * Try to see if we can get the block without requesting a new
1742	 * file system block.
1743	 */
1744	down_read(&EXT4_I(inode)->i_data_sem);
1745	if (ext4_has_inline_data(inode))
1746		retval = 0;
1747	else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
1748		retval = ext4_ext_map_blocks(NULL, inode, map, 0);
1749	else
1750		retval = ext4_ind_map_blocks(NULL, inode, map, 0);
1751	if (retval < 0) {
1752		up_read(&EXT4_I(inode)->i_data_sem);
1753		return retval;
1754	}
1755	if (retval > 0) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1756		unsigned int status;
1757
1758		if (unlikely(retval != map->m_len)) {
1759			ext4_warning(inode->i_sb,
1760				     "ES len assertion failed for inode "
1761				     "%lu: retval %d != map->m_len %d",
1762				     inode->i_ino, retval, map->m_len);
1763			WARN_ON(1);
1764		}
1765
1766		status = map->m_flags & EXT4_MAP_UNWRITTEN ?
1767				EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
1768		ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
1769				      map->m_pblk, status);
1770		up_read(&EXT4_I(inode)->i_data_sem);
1771		return retval;
1772	}
1773	up_read(&EXT4_I(inode)->i_data_sem);
1774
1775add_delayed:
1776	down_write(&EXT4_I(inode)->i_data_sem);
1777	retval = ext4_insert_delayed_block(inode, map->m_lblk);
1778	up_write(&EXT4_I(inode)->i_data_sem);
1779	if (retval)
1780		return retval;
1781
1782	map_bh(bh, inode->i_sb, invalid_block);
1783	set_buffer_new(bh);
1784	set_buffer_delay(bh);
1785	return retval;
1786}
1787
1788/*
1789 * This is a special get_block_t callback which is used by
1790 * ext4_da_write_begin().  It will either return mapped block or
1791 * reserve space for a single block.
1792 *
1793 * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set.
1794 * We also have b_blocknr = -1 and b_bdev initialized properly
1795 *
1796 * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set.
1797 * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev
1798 * initialized properly.
1799 */
1800int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
1801			   struct buffer_head *bh, int create)
1802{
1803	struct ext4_map_blocks map;
1804	int ret = 0;
1805
1806	BUG_ON(create == 0);
1807	BUG_ON(bh->b_size != inode->i_sb->s_blocksize);
1808
1809	map.m_lblk = iblock;
1810	map.m_len = 1;
1811
1812	/*
1813	 * first, we need to know whether the block is allocated already
1814	 * preallocated blocks are unmapped but should treated
1815	 * the same as allocated blocks.
1816	 */
1817	ret = ext4_da_map_blocks(inode, iblock, &map, bh);
1818	if (ret <= 0)
1819		return ret;
1820
1821	map_bh(bh, inode->i_sb, map.m_pblk);
1822	ext4_update_bh_state(bh, map.m_flags);
1823
1824	if (buffer_unwritten(bh)) {
1825		/* A delayed write to unwritten bh should be marked
1826		 * new and mapped.  Mapped ensures that we don't do
1827		 * get_block multiple times when we write to the same
1828		 * offset and new ensures that we do proper zero out
1829		 * for partial write.
1830		 */
1831		set_buffer_new(bh);
1832		set_buffer_mapped(bh);
1833	}
1834	return 0;
1835}
1836
1837static void mpage_folio_done(struct mpage_da_data *mpd, struct folio *folio)
 
 
 
 
 
 
1838{
1839	mpd->first_page += folio_nr_pages(folio);
1840	folio_unlock(folio);
1841}
1842
1843static int mpage_submit_folio(struct mpage_da_data *mpd, struct folio *folio)
 
1844{
1845	size_t len;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1846	loff_t size;
1847	int err;
1848
1849	BUG_ON(folio->index != mpd->first_page);
1850	folio_clear_dirty_for_io(folio);
1851	/*
1852	 * We have to be very careful here!  Nothing protects writeback path
1853	 * against i_size changes and the page can be writeably mapped into
1854	 * page tables. So an application can be growing i_size and writing
1855	 * data through mmap while writeback runs. folio_clear_dirty_for_io()
1856	 * write-protects our page in page tables and the page cannot get
1857	 * written to again until we release folio lock. So only after
1858	 * folio_clear_dirty_for_io() we are safe to sample i_size for
1859	 * ext4_bio_write_folio() to zero-out tail of the written page. We rely
1860	 * on the barrier provided by folio_test_clear_dirty() in
1861	 * folio_clear_dirty_for_io() to make sure i_size is really sampled only
1862	 * after page tables are updated.
1863	 */
1864	size = i_size_read(mpd->inode);
1865	len = folio_size(folio);
1866	if (folio_pos(folio) + len > size &&
1867	    !ext4_verity_in_progress(mpd->inode))
1868		len = size & ~PAGE_MASK;
1869	err = ext4_bio_write_folio(&mpd->io_submit, folio, len);
 
 
1870	if (!err)
1871		mpd->wbc->nr_to_write--;
 
1872
1873	return err;
1874}
1875
1876#define BH_FLAGS (BIT(BH_Unwritten) | BIT(BH_Delay))
1877
1878/*
1879 * mballoc gives us at most this number of blocks...
1880 * XXX: That seems to be only a limitation of ext4_mb_normalize_request().
1881 * The rest of mballoc seems to handle chunks up to full group size.
1882 */
1883#define MAX_WRITEPAGES_EXTENT_LEN 2048
1884
1885/*
1886 * mpage_add_bh_to_extent - try to add bh to extent of blocks to map
1887 *
1888 * @mpd - extent of blocks
1889 * @lblk - logical number of the block in the file
1890 * @bh - buffer head we want to add to the extent
1891 *
1892 * The function is used to collect contig. blocks in the same state. If the
1893 * buffer doesn't require mapping for writeback and we haven't started the
1894 * extent of buffers to map yet, the function returns 'true' immediately - the
1895 * caller can write the buffer right away. Otherwise the function returns true
1896 * if the block has been added to the extent, false if the block couldn't be
1897 * added.
1898 */
1899static bool mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk,
1900				   struct buffer_head *bh)
1901{
1902	struct ext4_map_blocks *map = &mpd->map;
1903
1904	/* Buffer that doesn't need mapping for writeback? */
1905	if (!buffer_dirty(bh) || !buffer_mapped(bh) ||
1906	    (!buffer_delay(bh) && !buffer_unwritten(bh))) {
1907		/* So far no extent to map => we write the buffer right away */
1908		if (map->m_len == 0)
1909			return true;
1910		return false;
1911	}
1912
1913	/* First block in the extent? */
1914	if (map->m_len == 0) {
1915		/* We cannot map unless handle is started... */
1916		if (!mpd->do_map)
1917			return false;
1918		map->m_lblk = lblk;
1919		map->m_len = 1;
1920		map->m_flags = bh->b_state & BH_FLAGS;
1921		return true;
1922	}
1923
1924	/* Don't go larger than mballoc is willing to allocate */
1925	if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN)
1926		return false;
1927
1928	/* Can we merge the block to our big extent? */
1929	if (lblk == map->m_lblk + map->m_len &&
1930	    (bh->b_state & BH_FLAGS) == map->m_flags) {
1931		map->m_len++;
1932		return true;
1933	}
1934	return false;
1935}
1936
1937/*
1938 * mpage_process_page_bufs - submit page buffers for IO or add them to extent
1939 *
1940 * @mpd - extent of blocks for mapping
1941 * @head - the first buffer in the page
1942 * @bh - buffer we should start processing from
1943 * @lblk - logical number of the block in the file corresponding to @bh
1944 *
1945 * Walk through page buffers from @bh upto @head (exclusive) and either submit
1946 * the page for IO if all buffers in this page were mapped and there's no
1947 * accumulated extent of buffers to map or add buffers in the page to the
1948 * extent of buffers to map. The function returns 1 if the caller can continue
1949 * by processing the next page, 0 if it should stop adding buffers to the
1950 * extent to map because we cannot extend it anymore. It can also return value
1951 * < 0 in case of error during IO submission.
1952 */
1953static int mpage_process_page_bufs(struct mpage_da_data *mpd,
1954				   struct buffer_head *head,
1955				   struct buffer_head *bh,
1956				   ext4_lblk_t lblk)
1957{
1958	struct inode *inode = mpd->inode;
1959	int err;
1960	ext4_lblk_t blocks = (i_size_read(inode) + i_blocksize(inode) - 1)
1961							>> inode->i_blkbits;
1962
1963	if (ext4_verity_in_progress(inode))
1964		blocks = EXT_MAX_BLOCKS;
1965
1966	do {
1967		BUG_ON(buffer_locked(bh));
1968
1969		if (lblk >= blocks || !mpage_add_bh_to_extent(mpd, lblk, bh)) {
1970			/* Found extent to map? */
1971			if (mpd->map.m_len)
1972				return 0;
1973			/* Buffer needs mapping and handle is not started? */
1974			if (!mpd->do_map)
1975				return 0;
1976			/* Everything mapped so far and we hit EOF */
1977			break;
1978		}
1979	} while (lblk++, (bh = bh->b_this_page) != head);
1980	/* So far everything mapped? Submit the page for IO. */
1981	if (mpd->map.m_len == 0) {
1982		err = mpage_submit_folio(mpd, head->b_folio);
1983		if (err < 0)
1984			return err;
1985		mpage_folio_done(mpd, head->b_folio);
1986	}
1987	if (lblk >= blocks) {
1988		mpd->scanned_until_end = 1;
1989		return 0;
1990	}
1991	return 1;
1992}
1993
1994/*
1995 * mpage_process_folio - update folio buffers corresponding to changed extent
1996 *			 and may submit fully mapped page for IO
1997 * @mpd: description of extent to map, on return next extent to map
1998 * @folio: Contains these buffers.
1999 * @m_lblk: logical block mapping.
2000 * @m_pblk: corresponding physical mapping.
2001 * @map_bh: determines on return whether this page requires any further
2002 *		  mapping or not.
2003 *
2004 * Scan given folio buffers corresponding to changed extent and update buffer
2005 * state according to new extent state.
2006 * We map delalloc buffers to their physical location, clear unwritten bits.
2007 * If the given folio is not fully mapped, we update @mpd to the next extent in
2008 * the given folio that needs mapping & return @map_bh as true.
2009 */
2010static int mpage_process_folio(struct mpage_da_data *mpd, struct folio *folio,
2011			      ext4_lblk_t *m_lblk, ext4_fsblk_t *m_pblk,
2012			      bool *map_bh)
2013{
2014	struct buffer_head *head, *bh;
2015	ext4_io_end_t *io_end = mpd->io_submit.io_end;
2016	ext4_lblk_t lblk = *m_lblk;
2017	ext4_fsblk_t pblock = *m_pblk;
2018	int err = 0;
2019	int blkbits = mpd->inode->i_blkbits;
2020	ssize_t io_end_size = 0;
2021	struct ext4_io_end_vec *io_end_vec = ext4_last_io_end_vec(io_end);
2022
2023	bh = head = folio_buffers(folio);
2024	do {
2025		if (lblk < mpd->map.m_lblk)
2026			continue;
2027		if (lblk >= mpd->map.m_lblk + mpd->map.m_len) {
2028			/*
2029			 * Buffer after end of mapped extent.
2030			 * Find next buffer in the folio to map.
2031			 */
2032			mpd->map.m_len = 0;
2033			mpd->map.m_flags = 0;
2034			io_end_vec->size += io_end_size;
2035
2036			err = mpage_process_page_bufs(mpd, head, bh, lblk);
2037			if (err > 0)
2038				err = 0;
2039			if (!err && mpd->map.m_len && mpd->map.m_lblk > lblk) {
2040				io_end_vec = ext4_alloc_io_end_vec(io_end);
2041				if (IS_ERR(io_end_vec)) {
2042					err = PTR_ERR(io_end_vec);
2043					goto out;
2044				}
2045				io_end_vec->offset = (loff_t)mpd->map.m_lblk << blkbits;
2046			}
2047			*map_bh = true;
2048			goto out;
2049		}
2050		if (buffer_delay(bh)) {
2051			clear_buffer_delay(bh);
2052			bh->b_blocknr = pblock++;
2053		}
2054		clear_buffer_unwritten(bh);
2055		io_end_size += (1 << blkbits);
2056	} while (lblk++, (bh = bh->b_this_page) != head);
2057
2058	io_end_vec->size += io_end_size;
2059	*map_bh = false;
2060out:
2061	*m_lblk = lblk;
2062	*m_pblk = pblock;
2063	return err;
2064}
2065
2066/*
2067 * mpage_map_buffers - update buffers corresponding to changed extent and
2068 *		       submit fully mapped pages for IO
2069 *
2070 * @mpd - description of extent to map, on return next extent to map
2071 *
2072 * Scan buffers corresponding to changed extent (we expect corresponding pages
2073 * to be already locked) and update buffer state according to new extent state.
2074 * We map delalloc buffers to their physical location, clear unwritten bits,
2075 * and mark buffers as uninit when we perform writes to unwritten extents
2076 * and do extent conversion after IO is finished. If the last page is not fully
2077 * mapped, we update @map to the next extent in the last page that needs
2078 * mapping. Otherwise we submit the page for IO.
2079 */
2080static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
2081{
2082	struct folio_batch fbatch;
2083	unsigned nr, i;
2084	struct inode *inode = mpd->inode;
 
2085	int bpp_bits = PAGE_SHIFT - inode->i_blkbits;
2086	pgoff_t start, end;
2087	ext4_lblk_t lblk;
2088	ext4_fsblk_t pblock;
2089	int err;
2090	bool map_bh = false;
2091
2092	start = mpd->map.m_lblk >> bpp_bits;
2093	end = (mpd->map.m_lblk + mpd->map.m_len - 1) >> bpp_bits;
2094	lblk = start << bpp_bits;
2095	pblock = mpd->map.m_pblk;
2096
2097	folio_batch_init(&fbatch);
2098	while (start <= end) {
2099		nr = filemap_get_folios(inode->i_mapping, &start, end, &fbatch);
2100		if (nr == 0)
 
2101			break;
2102		for (i = 0; i < nr; i++) {
2103			struct folio *folio = fbatch.folios[i];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2104
2105			err = mpage_process_folio(mpd, folio, &lblk, &pblock,
2106						 &map_bh);
2107			/*
2108			 * If map_bh is true, means page may require further bh
2109			 * mapping, or maybe the page was submitted for IO.
2110			 * So we return to call further extent mapping.
2111			 */
2112			if (err < 0 || map_bh)
2113				goto out;
2114			/* Page fully mapped - let IO run! */
2115			err = mpage_submit_folio(mpd, folio);
2116			if (err < 0)
2117				goto out;
2118			mpage_folio_done(mpd, folio);
 
2119		}
2120		folio_batch_release(&fbatch);
2121	}
2122	/* Extent fully mapped and matches with page boundary. We are done. */
2123	mpd->map.m_len = 0;
2124	mpd->map.m_flags = 0;
2125	return 0;
2126out:
2127	folio_batch_release(&fbatch);
2128	return err;
2129}
2130
2131static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd)
2132{
2133	struct inode *inode = mpd->inode;
2134	struct ext4_map_blocks *map = &mpd->map;
2135	int get_blocks_flags;
2136	int err, dioread_nolock;
2137
2138	trace_ext4_da_write_pages_extent(inode, map);
2139	/*
2140	 * Call ext4_map_blocks() to allocate any delayed allocation blocks, or
2141	 * to convert an unwritten extent to be initialized (in the case
2142	 * where we have written into one or more preallocated blocks).  It is
2143	 * possible that we're going to need more metadata blocks than
2144	 * previously reserved. However we must not fail because we're in
2145	 * writeback and there is nothing we can do about it so it might result
2146	 * in data loss.  So use reserved blocks to allocate metadata if
2147	 * possible.
2148	 *
2149	 * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE if
2150	 * the blocks in question are delalloc blocks.  This indicates
2151	 * that the blocks and quotas has already been checked when
2152	 * the data was copied into the page cache.
2153	 */
2154	get_blocks_flags = EXT4_GET_BLOCKS_CREATE |
2155			   EXT4_GET_BLOCKS_METADATA_NOFAIL |
2156			   EXT4_GET_BLOCKS_IO_SUBMIT;
2157	dioread_nolock = ext4_should_dioread_nolock(inode);
2158	if (dioread_nolock)
2159		get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
2160	if (map->m_flags & BIT(BH_Delay))
2161		get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
2162
2163	err = ext4_map_blocks(handle, inode, map, get_blocks_flags);
2164	if (err < 0)
2165		return err;
2166	if (dioread_nolock && (map->m_flags & EXT4_MAP_UNWRITTEN)) {
2167		if (!mpd->io_submit.io_end->handle &&
2168		    ext4_handle_valid(handle)) {
2169			mpd->io_submit.io_end->handle = handle->h_rsv_handle;
2170			handle->h_rsv_handle = NULL;
2171		}
2172		ext4_set_io_unwritten_flag(inode, mpd->io_submit.io_end);
2173	}
2174
2175	BUG_ON(map->m_len == 0);
2176	return 0;
2177}
2178
2179/*
2180 * mpage_map_and_submit_extent - map extent starting at mpd->lblk of length
2181 *				 mpd->len and submit pages underlying it for IO
2182 *
2183 * @handle - handle for journal operations
2184 * @mpd - extent to map
2185 * @give_up_on_write - we set this to true iff there is a fatal error and there
2186 *                     is no hope of writing the data. The caller should discard
2187 *                     dirty pages to avoid infinite loops.
2188 *
2189 * The function maps extent starting at mpd->lblk of length mpd->len. If it is
2190 * delayed, blocks are allocated, if it is unwritten, we may need to convert
2191 * them to initialized or split the described range from larger unwritten
2192 * extent. Note that we need not map all the described range since allocation
2193 * can return less blocks or the range is covered by more unwritten extents. We
2194 * cannot map more because we are limited by reserved transaction credits. On
2195 * the other hand we always make sure that the last touched page is fully
2196 * mapped so that it can be written out (and thus forward progress is
2197 * guaranteed). After mapping we submit all mapped pages for IO.
2198 */
2199static int mpage_map_and_submit_extent(handle_t *handle,
2200				       struct mpage_da_data *mpd,
2201				       bool *give_up_on_write)
2202{
2203	struct inode *inode = mpd->inode;
2204	struct ext4_map_blocks *map = &mpd->map;
2205	int err;
2206	loff_t disksize;
2207	int progress = 0;
2208	ext4_io_end_t *io_end = mpd->io_submit.io_end;
2209	struct ext4_io_end_vec *io_end_vec;
2210
2211	io_end_vec = ext4_alloc_io_end_vec(io_end);
2212	if (IS_ERR(io_end_vec))
2213		return PTR_ERR(io_end_vec);
2214	io_end_vec->offset = ((loff_t)map->m_lblk) << inode->i_blkbits;
2215	do {
2216		err = mpage_map_one_extent(handle, mpd);
2217		if (err < 0) {
2218			struct super_block *sb = inode->i_sb;
2219
2220			if (ext4_forced_shutdown(sb))
 
2221				goto invalidate_dirty_pages;
2222			/*
2223			 * Let the uper layers retry transient errors.
2224			 * In the case of ENOSPC, if ext4_count_free_blocks()
2225			 * is non-zero, a commit should free up blocks.
2226			 */
2227			if ((err == -ENOMEM) ||
2228			    (err == -ENOSPC && ext4_count_free_clusters(sb))) {
2229				if (progress)
2230					goto update_disksize;
2231				return err;
2232			}
2233			ext4_msg(sb, KERN_CRIT,
2234				 "Delayed block allocation failed for "
2235				 "inode %lu at logical offset %llu with"
2236				 " max blocks %u with error %d",
2237				 inode->i_ino,
2238				 (unsigned long long)map->m_lblk,
2239				 (unsigned)map->m_len, -err);
2240			ext4_msg(sb, KERN_CRIT,
2241				 "This should not happen!! Data will "
2242				 "be lost\n");
2243			if (err == -ENOSPC)
2244				ext4_print_free_blocks(inode);
2245		invalidate_dirty_pages:
2246			*give_up_on_write = true;
2247			return err;
2248		}
2249		progress = 1;
2250		/*
2251		 * Update buffer state, submit mapped pages, and get us new
2252		 * extent to map
2253		 */
2254		err = mpage_map_and_submit_buffers(mpd);
2255		if (err < 0)
2256			goto update_disksize;
2257	} while (map->m_len);
2258
2259update_disksize:
2260	/*
2261	 * Update on-disk size after IO is submitted.  Races with
2262	 * truncate are avoided by checking i_size under i_data_sem.
2263	 */
2264	disksize = ((loff_t)mpd->first_page) << PAGE_SHIFT;
2265	if (disksize > READ_ONCE(EXT4_I(inode)->i_disksize)) {
2266		int err2;
2267		loff_t i_size;
2268
2269		down_write(&EXT4_I(inode)->i_data_sem);
2270		i_size = i_size_read(inode);
2271		if (disksize > i_size)
2272			disksize = i_size;
2273		if (disksize > EXT4_I(inode)->i_disksize)
2274			EXT4_I(inode)->i_disksize = disksize;
2275		up_write(&EXT4_I(inode)->i_data_sem);
2276		err2 = ext4_mark_inode_dirty(handle, inode);
2277		if (err2) {
2278			ext4_error_err(inode->i_sb, -err2,
2279				       "Failed to mark inode %lu dirty",
2280				       inode->i_ino);
2281		}
2282		if (!err)
2283			err = err2;
2284	}
2285	return err;
2286}
2287
2288/*
2289 * Calculate the total number of credits to reserve for one writepages
2290 * iteration. This is called from ext4_writepages(). We map an extent of
2291 * up to MAX_WRITEPAGES_EXTENT_LEN blocks and then we go on and finish mapping
2292 * the last partial page. So in total we can map MAX_WRITEPAGES_EXTENT_LEN +
2293 * bpp - 1 blocks in bpp different extents.
2294 */
2295static int ext4_da_writepages_trans_blocks(struct inode *inode)
2296{
2297	int bpp = ext4_journal_blocks_per_page(inode);
2298
2299	return ext4_meta_trans_blocks(inode,
2300				MAX_WRITEPAGES_EXTENT_LEN + bpp - 1, bpp);
2301}
2302
2303static int ext4_journal_folio_buffers(handle_t *handle, struct folio *folio,
2304				     size_t len)
2305{
2306	struct buffer_head *page_bufs = folio_buffers(folio);
2307	struct inode *inode = folio->mapping->host;
2308	int ret, err;
2309
2310	ret = ext4_walk_page_buffers(handle, inode, page_bufs, 0, len,
2311				     NULL, do_journal_get_write_access);
2312	err = ext4_walk_page_buffers(handle, inode, page_bufs, 0, len,
2313				     NULL, write_end_fn);
2314	if (ret == 0)
2315		ret = err;
2316	err = ext4_jbd2_inode_add_write(handle, inode, folio_pos(folio), len);
2317	if (ret == 0)
2318		ret = err;
2319	EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
2320
2321	return ret;
2322}
2323
2324static int mpage_journal_page_buffers(handle_t *handle,
2325				      struct mpage_da_data *mpd,
2326				      struct folio *folio)
2327{
2328	struct inode *inode = mpd->inode;
2329	loff_t size = i_size_read(inode);
2330	size_t len = folio_size(folio);
2331
2332	folio_clear_checked(folio);
2333	mpd->wbc->nr_to_write--;
2334
2335	if (folio_pos(folio) + len > size &&
2336	    !ext4_verity_in_progress(inode))
2337		len = size - folio_pos(folio);
2338
2339	return ext4_journal_folio_buffers(handle, folio, len);
2340}
2341
2342/*
2343 * mpage_prepare_extent_to_map - find & lock contiguous range of dirty pages
2344 * 				 needing mapping, submit mapped pages
2345 *
2346 * @mpd - where to look for pages
2347 *
2348 * Walk dirty pages in the mapping. If they are fully mapped, submit them for
2349 * IO immediately. If we cannot map blocks, we submit just already mapped
2350 * buffers in the page for IO and keep page dirty. When we can map blocks and
2351 * we find a page which isn't mapped we start accumulating extent of buffers
2352 * underlying these pages that needs mapping (formed by either delayed or
2353 * unwritten buffers). We also lock the pages containing these buffers. The
2354 * extent found is returned in @mpd structure (starting at mpd->lblk with
2355 * length mpd->len blocks).
2356 *
2357 * Note that this function can attach bios to one io_end structure which are
2358 * neither logically nor physically contiguous. Although it may seem as an
2359 * unnecessary complication, it is actually inevitable in blocksize < pagesize
2360 * case as we need to track IO to all buffers underlying a page in one io_end.
2361 */
2362static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
2363{
2364	struct address_space *mapping = mpd->inode->i_mapping;
2365	struct folio_batch fbatch;
2366	unsigned int nr_folios;
 
2367	pgoff_t index = mpd->first_page;
2368	pgoff_t end = mpd->last_page;
2369	xa_mark_t tag;
2370	int i, err = 0;
2371	int blkbits = mpd->inode->i_blkbits;
2372	ext4_lblk_t lblk;
2373	struct buffer_head *head;
2374	handle_t *handle = NULL;
2375	int bpp = ext4_journal_blocks_per_page(mpd->inode);
2376
2377	if (mpd->wbc->sync_mode == WB_SYNC_ALL || mpd->wbc->tagged_writepages)
2378		tag = PAGECACHE_TAG_TOWRITE;
2379	else
2380		tag = PAGECACHE_TAG_DIRTY;
2381
 
2382	mpd->map.m_len = 0;
2383	mpd->next_page = index;
2384	if (ext4_should_journal_data(mpd->inode)) {
2385		handle = ext4_journal_start(mpd->inode, EXT4_HT_WRITE_PAGE,
2386					    bpp);
2387		if (IS_ERR(handle))
2388			return PTR_ERR(handle);
2389	}
2390	folio_batch_init(&fbatch);
2391	while (index <= end) {
2392		nr_folios = filemap_get_folios_tag(mapping, &index, end,
2393				tag, &fbatch);
2394		if (nr_folios == 0)
2395			break;
2396
2397		for (i = 0; i < nr_folios; i++) {
2398			struct folio *folio = fbatch.folios[i];
2399
2400			/*
2401			 * Accumulated enough dirty pages? This doesn't apply
2402			 * to WB_SYNC_ALL mode. For integrity sync we have to
2403			 * keep going because someone may be concurrently
2404			 * dirtying pages, and we might have synced a lot of
2405			 * newly appeared dirty pages, but have not synced all
2406			 * of the old dirty pages.
2407			 */
2408			if (mpd->wbc->sync_mode == WB_SYNC_NONE &&
2409			    mpd->wbc->nr_to_write <=
2410			    mpd->map.m_len >> (PAGE_SHIFT - blkbits))
2411				goto out;
2412
2413			/* If we can't merge this page, we are done. */
2414			if (mpd->map.m_len > 0 && mpd->next_page != folio->index)
2415				goto out;
2416
2417			if (handle) {
2418				err = ext4_journal_ensure_credits(handle, bpp,
2419								  0);
2420				if (err < 0)
2421					goto out;
2422			}
2423
2424			folio_lock(folio);
2425			/*
2426			 * If the page is no longer dirty, or its mapping no
2427			 * longer corresponds to inode we are writing (which
2428			 * means it has been truncated or invalidated), or the
2429			 * page is already under writeback and we are not doing
2430			 * a data integrity writeback, skip the page
2431			 */
2432			if (!folio_test_dirty(folio) ||
2433			    (folio_test_writeback(folio) &&
2434			     (mpd->wbc->sync_mode == WB_SYNC_NONE)) ||
2435			    unlikely(folio->mapping != mapping)) {
2436				folio_unlock(folio);
2437				continue;
2438			}
2439
2440			folio_wait_writeback(folio);
2441			BUG_ON(folio_test_writeback(folio));
2442
2443			/*
2444			 * Should never happen but for buggy code in
2445			 * other subsystems that call
2446			 * set_page_dirty() without properly warning
2447			 * the file system first.  See [1] for more
2448			 * information.
2449			 *
2450			 * [1] https://lore.kernel.org/linux-mm/20180103100430.GE4911@quack2.suse.cz
2451			 */
2452			if (!folio_buffers(folio)) {
2453				ext4_warning_inode(mpd->inode, "page %lu does not have buffers attached", folio->index);
2454				folio_clear_dirty(folio);
2455				folio_unlock(folio);
2456				continue;
2457			}
2458
2459			if (mpd->map.m_len == 0)
2460				mpd->first_page = folio->index;
2461			mpd->next_page = folio_next_index(folio);
2462			/*
2463			 * Writeout when we cannot modify metadata is simple.
2464			 * Just submit the page. For data=journal mode we
2465			 * first handle writeout of the page for checkpoint and
2466			 * only after that handle delayed page dirtying. This
2467			 * makes sure current data is checkpointed to the final
2468			 * location before possibly journalling it again which
2469			 * is desirable when the page is frequently dirtied
2470			 * through a pin.
2471			 */
2472			if (!mpd->can_map) {
2473				err = mpage_submit_folio(mpd, folio);
2474				if (err < 0)
2475					goto out;
2476				/* Pending dirtying of journalled data? */
2477				if (folio_test_checked(folio)) {
2478					err = mpage_journal_page_buffers(handle,
2479						mpd, folio);
2480					if (err < 0)
2481						goto out;
2482					mpd->journalled_more_data = 1;
2483				}
2484				mpage_folio_done(mpd, folio);
2485			} else {
2486				/* Add all dirty buffers to mpd */
2487				lblk = ((ext4_lblk_t)folio->index) <<
2488					(PAGE_SHIFT - blkbits);
2489				head = folio_buffers(folio);
2490				err = mpage_process_page_bufs(mpd, head, head,
2491						lblk);
2492				if (err <= 0)
2493					goto out;
2494				err = 0;
2495			}
2496		}
2497		folio_batch_release(&fbatch);
2498		cond_resched();
2499	}
2500	mpd->scanned_until_end = 1;
2501	if (handle)
2502		ext4_journal_stop(handle);
2503	return 0;
2504out:
2505	folio_batch_release(&fbatch);
2506	if (handle)
2507		ext4_journal_stop(handle);
2508	return err;
2509}
2510
2511static int ext4_do_writepages(struct mpage_da_data *mpd)
 
2512{
2513	struct writeback_control *wbc = mpd->wbc;
2514	pgoff_t	writeback_index = 0;
2515	long nr_to_write = wbc->nr_to_write;
2516	int range_whole = 0;
2517	int cycled = 1;
2518	handle_t *handle = NULL;
2519	struct inode *inode = mpd->inode;
2520	struct address_space *mapping = inode->i_mapping;
2521	int needed_blocks, rsv_blocks = 0, ret = 0;
2522	struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
 
2523	struct blk_plug plug;
2524	bool give_up_on_write = false;
2525
 
 
 
 
2526	trace_ext4_writepages(inode, wbc);
2527
2528	/*
2529	 * No pages to write? This is mainly a kludge to avoid starting
2530	 * a transaction for special inodes like journal inode on last iput()
2531	 * because that could violate lock ordering on umount
2532	 */
2533	if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
2534		goto out_writepages;
2535
 
 
 
 
 
2536	/*
2537	 * If the filesystem has aborted, it is read-only, so return
2538	 * right away instead of dumping stack traces later on that
2539	 * will obscure the real source of the problem.  We test
2540	 * fs shutdown state instead of sb->s_flag's SB_RDONLY because
2541	 * the latter could be true if the filesystem is mounted
2542	 * read-only, and in that case, ext4_writepages should
2543	 * *never* be called, so if that ever happens, we would want
2544	 * the stack trace.
2545	 */
2546	if (unlikely(ext4_forced_shutdown(mapping->host->i_sb))) {
 
2547		ret = -EROFS;
2548		goto out_writepages;
2549	}
2550
2551	/*
2552	 * If we have inline data and arrive here, it means that
2553	 * we will soon create the block for the 1st page, so
2554	 * we'd better clear the inline data here.
2555	 */
2556	if (ext4_has_inline_data(inode)) {
2557		/* Just inode will be modified... */
2558		handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
2559		if (IS_ERR(handle)) {
2560			ret = PTR_ERR(handle);
2561			goto out_writepages;
2562		}
2563		BUG_ON(ext4_test_inode_state(inode,
2564				EXT4_STATE_MAY_INLINE_DATA));
2565		ext4_destroy_inline_data(handle, inode);
2566		ext4_journal_stop(handle);
2567	}
2568
2569	/*
2570	 * data=journal mode does not do delalloc so we just need to writeout /
2571	 * journal already mapped buffers. On the other hand we need to commit
2572	 * transaction to make data stable. We expect all the data to be
2573	 * already in the journal (the only exception are DMA pinned pages
2574	 * dirtied behind our back) so we commit transaction here and run the
2575	 * writeback loop to checkpoint them. The checkpointing is not actually
2576	 * necessary to make data persistent *but* quite a few places (extent
2577	 * shifting operations, fsverity, ...) depend on being able to drop
2578	 * pagecache pages after calling filemap_write_and_wait() and for that
2579	 * checkpointing needs to happen.
2580	 */
2581	if (ext4_should_journal_data(inode)) {
2582		mpd->can_map = 0;
2583		if (wbc->sync_mode == WB_SYNC_ALL)
2584			ext4_fc_commit(sbi->s_journal,
2585				       EXT4_I(inode)->i_datasync_tid);
2586	}
2587	mpd->journalled_more_data = 0;
2588
2589	if (ext4_should_dioread_nolock(inode)) {
2590		/*
2591		 * We may need to convert up to one extent per block in
2592		 * the page and we may dirty the inode.
2593		 */
2594		rsv_blocks = 1 + ext4_chunk_trans_blocks(inode,
2595						PAGE_SIZE >> inode->i_blkbits);
2596	}
2597
2598	if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2599		range_whole = 1;
2600
2601	if (wbc->range_cyclic) {
2602		writeback_index = mapping->writeback_index;
2603		if (writeback_index)
2604			cycled = 0;
2605		mpd->first_page = writeback_index;
2606		mpd->last_page = -1;
2607	} else {
2608		mpd->first_page = wbc->range_start >> PAGE_SHIFT;
2609		mpd->last_page = wbc->range_end >> PAGE_SHIFT;
2610	}
2611
2612	ext4_io_submit_init(&mpd->io_submit, wbc);
 
 
2613retry:
2614	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2615		tag_pages_for_writeback(mapping, mpd->first_page,
2616					mpd->last_page);
2617	blk_start_plug(&plug);
2618
2619	/*
2620	 * First writeback pages that don't need mapping - we can avoid
2621	 * starting a transaction unnecessarily and also avoid being blocked
2622	 * in the block layer on device congestion while having transaction
2623	 * started.
2624	 */
2625	mpd->do_map = 0;
2626	mpd->scanned_until_end = 0;
2627	mpd->io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
2628	if (!mpd->io_submit.io_end) {
2629		ret = -ENOMEM;
2630		goto unplug;
2631	}
2632	ret = mpage_prepare_extent_to_map(mpd);
2633	/* Unlock pages we didn't use */
2634	mpage_release_unused_pages(mpd, false);
2635	/* Submit prepared bio */
2636	ext4_io_submit(&mpd->io_submit);
2637	ext4_put_io_end_defer(mpd->io_submit.io_end);
2638	mpd->io_submit.io_end = NULL;
2639	if (ret < 0)
2640		goto unplug;
2641
2642	while (!mpd->scanned_until_end && wbc->nr_to_write > 0) {
2643		/* For each extent of pages we use new io_end */
2644		mpd->io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
2645		if (!mpd->io_submit.io_end) {
2646			ret = -ENOMEM;
2647			break;
2648		}
2649
2650		WARN_ON_ONCE(!mpd->can_map);
2651		/*
2652		 * We have two constraints: We find one extent to map and we
2653		 * must always write out whole page (makes a difference when
2654		 * blocksize < pagesize) so that we don't block on IO when we
2655		 * try to write out the rest of the page. Journalled mode is
2656		 * not supported by delalloc.
2657		 */
2658		BUG_ON(ext4_should_journal_data(inode));
2659		needed_blocks = ext4_da_writepages_trans_blocks(inode);
2660
2661		/* start a new transaction */
2662		handle = ext4_journal_start_with_reserve(inode,
2663				EXT4_HT_WRITE_PAGE, needed_blocks, rsv_blocks);
2664		if (IS_ERR(handle)) {
2665			ret = PTR_ERR(handle);
2666			ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
2667			       "%ld pages, ino %lu; err %d", __func__,
2668				wbc->nr_to_write, inode->i_ino, ret);
2669			/* Release allocated io_end */
2670			ext4_put_io_end(mpd->io_submit.io_end);
2671			mpd->io_submit.io_end = NULL;
2672			break;
2673		}
2674		mpd->do_map = 1;
2675
2676		trace_ext4_da_write_pages(inode, mpd->first_page, wbc);
2677		ret = mpage_prepare_extent_to_map(mpd);
2678		if (!ret && mpd->map.m_len)
2679			ret = mpage_map_and_submit_extent(handle, mpd,
 
2680					&give_up_on_write);
 
 
 
 
 
 
 
 
 
 
2681		/*
2682		 * Caution: If the handle is synchronous,
2683		 * ext4_journal_stop() can wait for transaction commit
2684		 * to finish which may depend on writeback of pages to
2685		 * complete or on page lock to be released.  In that
2686		 * case, we have to wait until after we have
2687		 * submitted all the IO, released page locks we hold,
2688		 * and dropped io_end reference (for extent conversion
2689		 * to be able to complete) before stopping the handle.
2690		 */
2691		if (!ext4_handle_valid(handle) || handle->h_sync == 0) {
2692			ext4_journal_stop(handle);
2693			handle = NULL;
2694			mpd->do_map = 0;
2695		}
2696		/* Unlock pages we didn't use */
2697		mpage_release_unused_pages(mpd, give_up_on_write);
2698		/* Submit prepared bio */
2699		ext4_io_submit(&mpd->io_submit);
2700
2701		/*
2702		 * Drop our io_end reference we got from init. We have
2703		 * to be careful and use deferred io_end finishing if
2704		 * we are still holding the transaction as we can
2705		 * release the last reference to io_end which may end
2706		 * up doing unwritten extent conversion.
2707		 */
2708		if (handle) {
2709			ext4_put_io_end_defer(mpd->io_submit.io_end);
2710			ext4_journal_stop(handle);
2711		} else
2712			ext4_put_io_end(mpd->io_submit.io_end);
2713		mpd->io_submit.io_end = NULL;
2714
2715		if (ret == -ENOSPC && sbi->s_journal) {
2716			/*
2717			 * Commit the transaction which would
2718			 * free blocks released in the transaction
2719			 * and try again
2720			 */
2721			jbd2_journal_force_commit_nested(sbi->s_journal);
2722			ret = 0;
2723			continue;
2724		}
2725		/* Fatal error - ENOMEM, EIO... */
2726		if (ret)
2727			break;
2728	}
2729unplug:
2730	blk_finish_plug(&plug);
2731	if (!ret && !cycled && wbc->nr_to_write > 0) {
2732		cycled = 1;
2733		mpd->last_page = writeback_index - 1;
2734		mpd->first_page = 0;
2735		goto retry;
2736	}
2737
2738	/* Update index */
2739	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2740		/*
2741		 * Set the writeback_index so that range_cyclic
2742		 * mode will write it back later
2743		 */
2744		mapping->writeback_index = mpd->first_page;
2745
2746out_writepages:
2747	trace_ext4_writepages_result(inode, wbc, ret,
2748				     nr_to_write - wbc->nr_to_write);
 
2749	return ret;
2750}
2751
2752static int ext4_writepages(struct address_space *mapping,
2753			   struct writeback_control *wbc)
2754{
2755	struct super_block *sb = mapping->host->i_sb;
2756	struct mpage_da_data mpd = {
2757		.inode = mapping->host,
2758		.wbc = wbc,
2759		.can_map = 1,
2760	};
2761	int ret;
2762	int alloc_ctx;
2763
2764	if (unlikely(ext4_forced_shutdown(sb)))
2765		return -EIO;
2766
2767	alloc_ctx = ext4_writepages_down_read(sb);
2768	ret = ext4_do_writepages(&mpd);
2769	/*
2770	 * For data=journal writeback we could have come across pages marked
2771	 * for delayed dirtying (PageChecked) which were just added to the
2772	 * running transaction. Try once more to get them to stable storage.
2773	 */
2774	if (!ret && mpd.journalled_more_data)
2775		ret = ext4_do_writepages(&mpd);
2776	ext4_writepages_up_read(sb, alloc_ctx);
2777
2778	return ret;
2779}
2780
2781int ext4_normal_submit_inode_data_buffers(struct jbd2_inode *jinode)
2782{
2783	struct writeback_control wbc = {
2784		.sync_mode = WB_SYNC_ALL,
2785		.nr_to_write = LONG_MAX,
2786		.range_start = jinode->i_dirty_start,
2787		.range_end = jinode->i_dirty_end,
2788	};
2789	struct mpage_da_data mpd = {
2790		.inode = jinode->i_vfs_inode,
2791		.wbc = &wbc,
2792		.can_map = 0,
2793	};
2794	return ext4_do_writepages(&mpd);
2795}
2796
2797static int ext4_dax_writepages(struct address_space *mapping,
2798			       struct writeback_control *wbc)
2799{
2800	int ret;
2801	long nr_to_write = wbc->nr_to_write;
2802	struct inode *inode = mapping->host;
2803	int alloc_ctx;
2804
2805	if (unlikely(ext4_forced_shutdown(inode->i_sb)))
2806		return -EIO;
2807
2808	alloc_ctx = ext4_writepages_down_read(inode->i_sb);
2809	trace_ext4_writepages(inode, wbc);
2810
2811	ret = dax_writeback_mapping_range(mapping,
2812					  EXT4_SB(inode->i_sb)->s_daxdev, wbc);
2813	trace_ext4_writepages_result(inode, wbc, ret,
2814				     nr_to_write - wbc->nr_to_write);
2815	ext4_writepages_up_read(inode->i_sb, alloc_ctx);
2816	return ret;
2817}
2818
2819static int ext4_nonda_switch(struct super_block *sb)
2820{
2821	s64 free_clusters, dirty_clusters;
2822	struct ext4_sb_info *sbi = EXT4_SB(sb);
2823
2824	/*
2825	 * switch to non delalloc mode if we are running low
2826	 * on free block. The free block accounting via percpu
2827	 * counters can get slightly wrong with percpu_counter_batch getting
2828	 * accumulated on each CPU without updating global counters
2829	 * Delalloc need an accurate free block accounting. So switch
2830	 * to non delalloc when we are near to error range.
2831	 */
2832	free_clusters =
2833		percpu_counter_read_positive(&sbi->s_freeclusters_counter);
2834	dirty_clusters =
2835		percpu_counter_read_positive(&sbi->s_dirtyclusters_counter);
2836	/*
2837	 * Start pushing delalloc when 1/2 of free blocks are dirty.
2838	 */
2839	if (dirty_clusters && (free_clusters < 2 * dirty_clusters))
2840		try_to_writeback_inodes_sb(sb, WB_REASON_FS_FREE_SPACE);
2841
2842	if (2 * free_clusters < 3 * dirty_clusters ||
2843	    free_clusters < (dirty_clusters + EXT4_FREECLUSTERS_WATERMARK)) {
2844		/*
2845		 * free block count is less than 150% of dirty blocks
2846		 * or free blocks is less than watermark
2847		 */
2848		return 1;
2849	}
2850	return 0;
2851}
2852
 
 
 
 
 
 
 
 
 
 
 
 
 
2853static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
2854			       loff_t pos, unsigned len,
2855			       struct page **pagep, void **fsdata)
2856{
2857	int ret, retries = 0;
2858	struct folio *folio;
2859	pgoff_t index;
2860	struct inode *inode = mapping->host;
 
2861
2862	if (unlikely(ext4_forced_shutdown(inode->i_sb)))
2863		return -EIO;
2864
2865	index = pos >> PAGE_SHIFT;
2866
2867	if (ext4_nonda_switch(inode->i_sb) || ext4_verity_in_progress(inode)) {
 
2868		*fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
2869		return ext4_write_begin(file, mapping, pos,
2870					len, pagep, fsdata);
2871	}
2872	*fsdata = (void *)0;
2873	trace_ext4_da_write_begin(inode, pos, len);
2874
2875	if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
2876		ret = ext4_da_write_inline_data_begin(mapping, inode, pos, len,
 
2877						      pagep, fsdata);
2878		if (ret < 0)
2879			return ret;
2880		if (ret == 1)
2881			return 0;
2882	}
2883
2884retry:
2885	folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
2886			mapping_gfp_mask(mapping));
2887	if (IS_ERR(folio))
2888		return PTR_ERR(folio);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2889
2890#ifdef CONFIG_FS_ENCRYPTION
2891	ret = ext4_block_write_begin(folio, pos, len, ext4_da_get_block_prep);
 
2892#else
2893	ret = __block_write_begin(&folio->page, pos, len, ext4_da_get_block_prep);
2894#endif
2895	if (ret < 0) {
2896		folio_unlock(folio);
2897		folio_put(folio);
2898		/*
2899		 * block_write_begin may have instantiated a few blocks
2900		 * outside i_size.  Trim these off again. Don't need
2901		 * i_size_read because we hold inode lock.
2902		 */
2903		if (pos + len > inode->i_size)
2904			ext4_truncate_failed_write(inode);
2905
2906		if (ret == -ENOSPC &&
2907		    ext4_should_retry_alloc(inode->i_sb, &retries))
2908			goto retry;
 
 
2909		return ret;
2910	}
2911
2912	*pagep = &folio->page;
2913	return ret;
2914}
2915
2916/*
2917 * Check if we should update i_disksize
2918 * when write to the end of file but not require block allocation
2919 */
2920static int ext4_da_should_update_i_disksize(struct folio *folio,
2921					    unsigned long offset)
2922{
2923	struct buffer_head *bh;
2924	struct inode *inode = folio->mapping->host;
2925	unsigned int idx;
2926	int i;
2927
2928	bh = folio_buffers(folio);
2929	idx = offset >> inode->i_blkbits;
2930
2931	for (i = 0; i < idx; i++)
2932		bh = bh->b_this_page;
2933
2934	if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh))
2935		return 0;
2936	return 1;
2937}
2938
2939static int ext4_da_do_write_end(struct address_space *mapping,
2940			loff_t pos, unsigned len, unsigned copied,
2941			struct folio *folio)
2942{
2943	struct inode *inode = mapping->host;
2944	loff_t old_size = inode->i_size;
2945	bool disksize_changed = false;
2946	loff_t new_i_size;
2947
2948	/*
2949	 * block_write_end() will mark the inode as dirty with I_DIRTY_PAGES
2950	 * flag, which all that's needed to trigger page writeback.
2951	 */
2952	copied = block_write_end(NULL, mapping, pos, len, copied,
2953			&folio->page, NULL);
2954	new_i_size = pos + copied;
2955
2956	/*
2957	 * It's important to update i_size while still holding folio lock,
2958	 * because folio writeout could otherwise come in and zero beyond
2959	 * i_size.
2960	 *
2961	 * Since we are holding inode lock, we are sure i_disksize <=
2962	 * i_size. We also know that if i_disksize < i_size, there are
2963	 * delalloc writes pending in the range up to i_size. If the end of
2964	 * the current write is <= i_size, there's no need to touch
2965	 * i_disksize since writeback will push i_disksize up to i_size
2966	 * eventually. If the end of the current write is > i_size and
2967	 * inside an allocated block which ext4_da_should_update_i_disksize()
2968	 * checked, we need to update i_disksize here as certain
2969	 * ext4_writepages() paths not allocating blocks and update i_disksize.
2970	 */
2971	if (new_i_size > inode->i_size) {
2972		unsigned long end;
2973
2974		i_size_write(inode, new_i_size);
2975		end = (new_i_size - 1) & (PAGE_SIZE - 1);
2976		if (copied && ext4_da_should_update_i_disksize(folio, end)) {
2977			ext4_update_i_disksize(inode, new_i_size);
2978			disksize_changed = true;
2979		}
2980	}
2981
2982	folio_unlock(folio);
2983	folio_put(folio);
2984
2985	if (old_size < pos)
2986		pagecache_isize_extended(inode, old_size, pos);
2987
2988	if (disksize_changed) {
2989		handle_t *handle;
2990
2991		handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
2992		if (IS_ERR(handle))
2993			return PTR_ERR(handle);
2994		ext4_mark_inode_dirty(handle, inode);
2995		ext4_journal_stop(handle);
2996	}
2997
2998	return copied;
2999}
3000
3001static int ext4_da_write_end(struct file *file,
3002			     struct address_space *mapping,
3003			     loff_t pos, unsigned len, unsigned copied,
3004			     struct page *page, void *fsdata)
3005{
3006	struct inode *inode = mapping->host;
 
 
 
 
3007	int write_mode = (int)(unsigned long)fsdata;
3008	struct folio *folio = page_folio(page);
3009
3010	if (write_mode == FALL_BACK_TO_NONDELALLOC)
3011		return ext4_write_end(file, mapping, pos,
3012				      len, copied, &folio->page, fsdata);
3013
3014	trace_ext4_da_write_end(inode, pos, len, copied);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3015
3016	if (write_mode != CONVERT_INLINE_DATA &&
3017	    ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) &&
3018	    ext4_has_inline_data(inode))
3019		return ext4_write_inline_data_end(inode, pos, len, copied,
3020						  folio);
 
 
 
3021
3022	if (unlikely(copied < len) && !folio_test_uptodate(folio))
3023		copied = 0;
 
 
 
 
3024
3025	return ext4_da_do_write_end(mapping, pos, len, copied, folio);
3026}
3027
3028/*
3029 * Force all delayed allocation blocks to be allocated for a given inode.
3030 */
3031int ext4_alloc_da_blocks(struct inode *inode)
3032{
3033	trace_ext4_alloc_da_blocks(inode);
3034
3035	if (!EXT4_I(inode)->i_reserved_data_blocks)
3036		return 0;
3037
3038	/*
3039	 * We do something simple for now.  The filemap_flush() will
3040	 * also start triggering a write of the data blocks, which is
3041	 * not strictly speaking necessary (and for users of
3042	 * laptop_mode, not even desirable).  However, to do otherwise
3043	 * would require replicating code paths in:
3044	 *
3045	 * ext4_writepages() ->
3046	 *    write_cache_pages() ---> (via passed in callback function)
3047	 *        __mpage_da_writepage() -->
3048	 *           mpage_add_bh_to_extent()
3049	 *           mpage_da_map_blocks()
3050	 *
3051	 * The problem is that write_cache_pages(), located in
3052	 * mm/page-writeback.c, marks pages clean in preparation for
3053	 * doing I/O, which is not desirable if we're not planning on
3054	 * doing I/O at all.
3055	 *
3056	 * We could call write_cache_pages(), and then redirty all of
3057	 * the pages by calling redirty_page_for_writepage() but that
3058	 * would be ugly in the extreme.  So instead we would need to
3059	 * replicate parts of the code in the above functions,
3060	 * simplifying them because we wouldn't actually intend to
3061	 * write out the pages, but rather only collect contiguous
3062	 * logical block extents, call the multi-block allocator, and
3063	 * then update the buffer heads with the block allocations.
3064	 *
3065	 * For now, though, we'll cheat by calling filemap_flush(),
3066	 * which will map the blocks, and start the I/O, but not
3067	 * actually wait for the I/O to complete.
3068	 */
3069	return filemap_flush(inode->i_mapping);
3070}
3071
3072/*
3073 * bmap() is special.  It gets used by applications such as lilo and by
3074 * the swapper to find the on-disk block of a specific piece of data.
3075 *
3076 * Naturally, this is dangerous if the block concerned is still in the
3077 * journal.  If somebody makes a swapfile on an ext4 data-journaling
3078 * filesystem and enables swap, then they may get a nasty shock when the
3079 * data getting swapped to that swapfile suddenly gets overwritten by
3080 * the original zero's written out previously to the journal and
3081 * awaiting writeback in the kernel's buffer cache.
3082 *
3083 * So, if we see any bmap calls here on a modified, data-journaled file,
3084 * take extra steps to flush any blocks which might be in the cache.
3085 */
3086static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
3087{
3088	struct inode *inode = mapping->host;
3089	sector_t ret = 0;
 
3090
3091	inode_lock_shared(inode);
3092	/*
3093	 * We can get here for an inline file via the FIBMAP ioctl
3094	 */
3095	if (ext4_has_inline_data(inode))
3096		goto out;
3097
3098	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
3099	    (test_opt(inode->i_sb, DELALLOC) ||
3100	     ext4_should_journal_data(inode))) {
3101		/*
3102		 * With delalloc or journalled data we want to sync the file so
3103		 * that we can make sure we allocate blocks for file and data
3104		 * is in place for the user to see it
3105		 */
3106		filemap_write_and_wait(mapping);
3107	}
3108
3109	ret = iomap_bmap(mapping, block, &ext4_iomap_ops);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3110
3111out:
3112	inode_unlock_shared(inode);
3113	return ret;
 
 
3114}
3115
3116static int ext4_read_folio(struct file *file, struct folio *folio)
3117{
3118	int ret = -EAGAIN;
3119	struct inode *inode = folio->mapping->host;
3120
3121	trace_ext4_read_folio(inode, folio);
3122
3123	if (ext4_has_inline_data(inode))
3124		ret = ext4_readpage_inline(inode, folio);
3125
3126	if (ret == -EAGAIN)
3127		return ext4_mpage_readpages(inode, NULL, folio);
 
3128
3129	return ret;
3130}
3131
3132static void ext4_readahead(struct readahead_control *rac)
 
 
3133{
3134	struct inode *inode = rac->mapping->host;
3135
3136	/* If the file has inline data, no need to do readahead. */
3137	if (ext4_has_inline_data(inode))
3138		return;
3139
3140	ext4_mpage_readpages(inode, rac, NULL);
3141}
3142
3143static void ext4_invalidate_folio(struct folio *folio, size_t offset,
3144				size_t length)
3145{
3146	trace_ext4_invalidate_folio(folio, offset, length);
3147
3148	/* No journalling happens on data buffers when this function is used */
3149	WARN_ON(folio_buffers(folio) && buffer_jbd(folio_buffers(folio)));
3150
3151	block_invalidate_folio(folio, offset, length);
3152}
3153
3154static int __ext4_journalled_invalidate_folio(struct folio *folio,
3155					    size_t offset, size_t length)
 
3156{
3157	journal_t *journal = EXT4_JOURNAL(folio->mapping->host);
3158
3159	trace_ext4_journalled_invalidate_folio(folio, offset, length);
3160
3161	/*
3162	 * If it's a full truncate we just forget about the pending dirtying
3163	 */
3164	if (offset == 0 && length == folio_size(folio))
3165		folio_clear_checked(folio);
3166
3167	return jbd2_journal_invalidate_folio(journal, folio, offset, length);
3168}
3169
3170/* Wrapper for aops... */
3171static void ext4_journalled_invalidate_folio(struct folio *folio,
3172					   size_t offset,
3173					   size_t length)
3174{
3175	WARN_ON(__ext4_journalled_invalidate_folio(folio, offset, length) < 0);
3176}
3177
3178static bool ext4_release_folio(struct folio *folio, gfp_t wait)
3179{
3180	struct inode *inode = folio->mapping->host;
3181	journal_t *journal = EXT4_JOURNAL(inode);
3182
3183	trace_ext4_release_folio(inode, folio);
3184
3185	/* Page has dirty journalled data -> cannot release */
3186	if (folio_test_checked(folio))
3187		return false;
3188	if (journal)
3189		return jbd2_journal_try_to_free_buffers(journal, folio);
3190	else
3191		return try_to_free_buffers(folio);
3192}
3193
3194static bool ext4_inode_datasync_dirty(struct inode *inode)
3195{
3196	journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
3197
3198	if (journal) {
3199		if (jbd2_transaction_committed(journal,
3200			EXT4_I(inode)->i_datasync_tid))
3201			return false;
3202		if (test_opt2(inode->i_sb, JOURNAL_FAST_COMMIT))
3203			return !list_empty(&EXT4_I(inode)->i_fc_list);
3204		return true;
3205	}
3206
3207	/* Any metadata buffers to write? */
3208	if (!list_empty(&inode->i_mapping->i_private_list))
3209		return true;
3210	return inode->i_state & I_DIRTY_DATASYNC;
3211}
3212
3213static void ext4_set_iomap(struct inode *inode, struct iomap *iomap,
3214			   struct ext4_map_blocks *map, loff_t offset,
3215			   loff_t length, unsigned int flags)
3216{
3217	u8 blkbits = inode->i_blkbits;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3218
3219	/*
3220	 * Writes that span EOF might trigger an I/O size update on completion,
3221	 * so consider them to be dirty for the purpose of O_DSYNC, even if
3222	 * there is no other metadata changes being made or are pending.
3223	 */
3224	iomap->flags = 0;
3225	if (ext4_inode_datasync_dirty(inode) ||
3226	    offset + length > i_size_read(inode))
3227		iomap->flags |= IOMAP_F_DIRTY;
 
 
 
 
3228
3229	if (map->m_flags & EXT4_MAP_NEW)
3230		iomap->flags |= IOMAP_F_NEW;
3231
3232	if (flags & IOMAP_DAX)
3233		iomap->dax_dev = EXT4_SB(inode->i_sb)->s_daxdev;
3234	else
3235		iomap->bdev = inode->i_sb->s_bdev;
3236	iomap->offset = (u64) map->m_lblk << blkbits;
3237	iomap->length = (u64) map->m_len << blkbits;
3238
3239	if ((map->m_flags & EXT4_MAP_MAPPED) &&
3240	    !ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3241		iomap->flags |= IOMAP_F_MERGED;
3242
3243	/*
3244	 * Flags passed to ext4_map_blocks() for direct I/O writes can result
3245	 * in m_flags having both EXT4_MAP_MAPPED and EXT4_MAP_UNWRITTEN bits
3246	 * set. In order for any allocated unwritten extents to be converted
3247	 * into written extents correctly within the ->end_io() handler, we
3248	 * need to ensure that the iomap->type is set appropriately. Hence, the
3249	 * reason why we need to check whether the EXT4_MAP_UNWRITTEN bit has
3250	 * been set first.
3251	 */
3252	if (map->m_flags & EXT4_MAP_UNWRITTEN) {
3253		iomap->type = IOMAP_UNWRITTEN;
3254		iomap->addr = (u64) map->m_pblk << blkbits;
3255		if (flags & IOMAP_DAX)
3256			iomap->addr += EXT4_SB(inode->i_sb)->s_dax_part_off;
3257	} else if (map->m_flags & EXT4_MAP_MAPPED) {
3258		iomap->type = IOMAP_MAPPED;
3259		iomap->addr = (u64) map->m_pblk << blkbits;
3260		if (flags & IOMAP_DAX)
3261			iomap->addr += EXT4_SB(inode->i_sb)->s_dax_part_off;
3262	} else if (map->m_flags & EXT4_MAP_DELAYED) {
3263		iomap->type = IOMAP_DELALLOC;
3264		iomap->addr = IOMAP_NULL_ADDR;
3265	} else {
3266		iomap->type = IOMAP_HOLE;
3267		iomap->addr = IOMAP_NULL_ADDR;
 
 
 
 
 
 
 
3268	}
 
 
 
 
 
3269}
3270
3271static int ext4_iomap_alloc(struct inode *inode, struct ext4_map_blocks *map,
3272			    unsigned int flags)
3273{
 
3274	handle_t *handle;
3275	u8 blkbits = inode->i_blkbits;
3276	int ret, dio_credits, m_flags = 0, retries = 0;
3277
3278	/*
3279	 * Trim the mapping request to the maximum value that we can map at
3280	 * once for direct I/O.
3281	 */
3282	if (map->m_len > DIO_MAX_BLOCKS)
3283		map->m_len = DIO_MAX_BLOCKS;
3284	dio_credits = ext4_chunk_trans_blocks(inode, map->m_len);
3285
3286retry:
 
 
 
 
 
 
3287	/*
3288	 * Either we allocate blocks and then don't get an unwritten extent, so
3289	 * in that case we have reserved enough credits. Or, the blocks are
3290	 * already allocated and unwritten. In that case, the extent conversion
3291	 * fits into the credits as well.
3292	 */
3293	handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, dio_credits);
3294	if (IS_ERR(handle))
3295		return PTR_ERR(handle);
3296
 
 
 
 
 
3297	/*
3298	 * DAX and direct I/O are the only two operations that are currently
3299	 * supported with IOMAP_WRITE.
3300	 */
3301	WARN_ON(!(flags & (IOMAP_DAX | IOMAP_DIRECT)));
3302	if (flags & IOMAP_DAX)
3303		m_flags = EXT4_GET_BLOCKS_CREATE_ZERO;
3304	/*
3305	 * We use i_size instead of i_disksize here because delalloc writeback
3306	 * can complete at any point during the I/O and subsequently push the
3307	 * i_disksize out to i_size. This could be beyond where direct I/O is
3308	 * happening and thus expose allocated blocks to direct I/O reads.
3309	 */
3310	else if (((loff_t)map->m_lblk << blkbits) >= i_size_read(inode))
3311		m_flags = EXT4_GET_BLOCKS_CREATE;
3312	else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3313		m_flags = EXT4_GET_BLOCKS_IO_CREATE_EXT;
3314
3315	ret = ext4_map_blocks(handle, inode, map, m_flags);
3316
3317	/*
3318	 * We cannot fill holes in indirect tree based inodes as that could
3319	 * expose stale data in the case of a crash. Use the magic error code
3320	 * to fallback to buffered I/O.
3321	 */
3322	if (!m_flags && !ret)
3323		ret = -ENOTBLK;
3324
3325	ext4_journal_stop(handle);
3326	if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
3327		goto retry;
3328
 
 
 
 
 
 
 
 
3329	return ret;
3330}
3331
 
 
 
 
3332
3333static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
3334		unsigned flags, struct iomap *iomap, struct iomap *srcmap)
3335{
3336	int ret;
3337	struct ext4_map_blocks map;
3338	u8 blkbits = inode->i_blkbits;
3339
3340	if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK)
3341		return -EINVAL;
 
3342
3343	if (WARN_ON_ONCE(ext4_has_inline_data(inode)))
3344		return -ERANGE;
 
3345
3346	/*
3347	 * Calculate the first and last logical blocks respectively.
 
3348	 */
3349	map.m_lblk = offset >> blkbits;
3350	map.m_len = min_t(loff_t, (offset + length - 1) >> blkbits,
3351			  EXT4_MAX_LOGICAL_BLOCK) - map.m_lblk + 1;
3352
3353	if (flags & IOMAP_WRITE) {
3354		/*
3355		 * We check here if the blocks are already allocated, then we
3356		 * don't need to start a journal txn and we can directly return
3357		 * the mapping information. This could boost performance
3358		 * especially in multi-threaded overwrite requests.
3359		 */
3360		if (offset + length <= i_size_read(inode)) {
3361			ret = ext4_map_blocks(NULL, inode, &map, 0);
3362			if (ret > 0 && (map.m_flags & EXT4_MAP_MAPPED))
3363				goto out;
3364		}
3365		ret = ext4_iomap_alloc(inode, &map, flags);
3366	} else {
3367		ret = ext4_map_blocks(NULL, inode, &map, 0);
3368	}
3369
3370	if (ret < 0)
3371		return ret;
3372out:
3373	/*
3374	 * When inline encryption is enabled, sometimes I/O to an encrypted file
3375	 * has to be broken up to guarantee DUN contiguity.  Handle this by
3376	 * limiting the length of the mapping returned.
3377	 */
3378	map.m_len = fscrypt_limit_io_blocks(inode, map.m_lblk, map.m_len);
3379
3380	ext4_set_iomap(inode, iomap, &map, offset, length, flags);
3381
3382	return 0;
3383}
3384
3385static int ext4_iomap_overwrite_begin(struct inode *inode, loff_t offset,
3386		loff_t length, unsigned flags, struct iomap *iomap,
3387		struct iomap *srcmap)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3388{
3389	int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3390
3391	/*
3392	 * Even for writes we don't need to allocate blocks, so just pretend
3393	 * we are reading to save overhead of starting a transaction.
 
3394	 */
3395	flags &= ~IOMAP_WRITE;
3396	ret = ext4_iomap_begin(inode, offset, length, flags, iomap, srcmap);
3397	WARN_ON_ONCE(!ret && iomap->type != IOMAP_MAPPED);
3398	return ret;
3399}
 
 
3400
3401static int ext4_iomap_end(struct inode *inode, loff_t offset, loff_t length,
3402			  ssize_t written, unsigned flags, struct iomap *iomap)
3403{
3404	/*
3405	 * Check to see whether an error occurred while writing out the data to
3406	 * the allocated blocks. If so, return the magic error code so that we
3407	 * fallback to buffered I/O and attempt to complete the remainder of
3408	 * the I/O. Any blocks that may have been allocated in preparation for
3409	 * the direct I/O will be reused during buffered I/O.
3410	 */
3411	if (flags & (IOMAP_WRITE | IOMAP_DIRECT) && written == 0)
3412		return -ENOTBLK;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3413
3414	return 0;
3415}
 
 
 
 
 
 
 
 
 
 
 
3416
3417const struct iomap_ops ext4_iomap_ops = {
3418	.iomap_begin		= ext4_iomap_begin,
3419	.iomap_end		= ext4_iomap_end,
3420};
3421
3422const struct iomap_ops ext4_iomap_overwrite_ops = {
3423	.iomap_begin		= ext4_iomap_overwrite_begin,
3424	.iomap_end		= ext4_iomap_end,
3425};
3426
3427static int ext4_iomap_begin_report(struct inode *inode, loff_t offset,
3428				   loff_t length, unsigned int flags,
3429				   struct iomap *iomap, struct iomap *srcmap)
3430{
3431	int ret;
3432	struct ext4_map_blocks map;
3433	u8 blkbits = inode->i_blkbits;
3434
3435	if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK)
3436		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3437
3438	if (ext4_has_inline_data(inode)) {
3439		ret = ext4_inline_data_iomap(inode, iomap);
3440		if (ret != -EAGAIN) {
3441			if (ret == 0 && offset >= iomap->length)
3442				ret = -ENOENT;
3443			return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
3444		}
 
 
 
3445	}
 
 
 
 
 
 
 
 
 
 
3446
3447	/*
3448	 * Calculate the first and last logical block respectively.
 
 
3449	 */
3450	map.m_lblk = offset >> blkbits;
3451	map.m_len = min_t(loff_t, (offset + length - 1) >> blkbits,
3452			  EXT4_MAX_LOGICAL_BLOCK) - map.m_lblk + 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3453
3454	/*
3455	 * Fiemap callers may call for offset beyond s_bitmap_maxbytes.
3456	 * So handle it here itself instead of querying ext4_map_blocks().
3457	 * Since ext4_map_blocks() will warn about it and will return
3458	 * -EIO error.
3459	 */
3460	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
3461		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3462
3463		if (offset >= sbi->s_bitmap_maxbytes) {
3464			map.m_flags = 0;
3465			goto set_iomap;
3466		}
3467	}
3468
3469	ret = ext4_map_blocks(NULL, inode, &map, 0);
3470	if (ret < 0)
3471		return ret;
3472set_iomap:
3473	ext4_set_iomap(inode, iomap, &map, offset, length, flags);
3474
3475	return 0;
3476}
3477
3478const struct iomap_ops ext4_iomap_report_ops = {
3479	.iomap_begin = ext4_iomap_begin_report,
3480};
3481
3482/*
3483 * For data=journal mode, folio should be marked dirty only when it was
3484 * writeably mapped. When that happens, it was already attached to the
3485 * transaction and marked as jbddirty (we take care of this in
3486 * ext4_page_mkwrite()). On transaction commit, we writeprotect page mappings
3487 * so we should have nothing to do here, except for the case when someone
3488 * had the page pinned and dirtied the page through this pin (e.g. by doing
3489 * direct IO to it). In that case we'd need to attach buffers here to the
3490 * transaction but we cannot due to lock ordering.  We cannot just dirty the
3491 * folio and leave attached buffers clean, because the buffers' dirty state is
3492 * "definitive".  We cannot just set the buffers dirty or jbddirty because all
3493 * the journalling code will explode.  So what we do is to mark the folio
3494 * "pending dirty" and next time ext4_writepages() is called, attach buffers
3495 * to the transaction appropriately.
3496 */
3497static bool ext4_journalled_dirty_folio(struct address_space *mapping,
3498		struct folio *folio)
3499{
3500	WARN_ON_ONCE(!folio_buffers(folio));
3501	if (folio_maybe_dma_pinned(folio))
3502		folio_set_checked(folio);
3503	return filemap_dirty_folio(mapping, folio);
3504}
3505
3506static bool ext4_dirty_folio(struct address_space *mapping, struct folio *folio)
3507{
3508	WARN_ON_ONCE(!folio_test_locked(folio) && !folio_test_dirty(folio));
3509	WARN_ON_ONCE(!folio_buffers(folio));
3510	return block_dirty_folio(mapping, folio);
3511}
3512
3513static int ext4_iomap_swap_activate(struct swap_info_struct *sis,
3514				    struct file *file, sector_t *span)
3515{
3516	return iomap_swapfile_activate(sis, file, span,
3517				       &ext4_iomap_report_ops);
 
3518}
3519
3520static const struct address_space_operations ext4_aops = {
3521	.read_folio		= ext4_read_folio,
3522	.readahead		= ext4_readahead,
 
3523	.writepages		= ext4_writepages,
3524	.write_begin		= ext4_write_begin,
3525	.write_end		= ext4_write_end,
3526	.dirty_folio		= ext4_dirty_folio,
3527	.bmap			= ext4_bmap,
3528	.invalidate_folio	= ext4_invalidate_folio,
3529	.release_folio		= ext4_release_folio,
3530	.direct_IO		= noop_direct_IO,
3531	.migrate_folio		= buffer_migrate_folio,
3532	.is_partially_uptodate  = block_is_partially_uptodate,
3533	.error_remove_folio	= generic_error_remove_folio,
3534	.swap_activate		= ext4_iomap_swap_activate,
3535};
3536
3537static const struct address_space_operations ext4_journalled_aops = {
3538	.read_folio		= ext4_read_folio,
3539	.readahead		= ext4_readahead,
 
3540	.writepages		= ext4_writepages,
3541	.write_begin		= ext4_write_begin,
3542	.write_end		= ext4_journalled_write_end,
3543	.dirty_folio		= ext4_journalled_dirty_folio,
3544	.bmap			= ext4_bmap,
3545	.invalidate_folio	= ext4_journalled_invalidate_folio,
3546	.release_folio		= ext4_release_folio,
3547	.direct_IO		= noop_direct_IO,
3548	.migrate_folio		= buffer_migrate_folio_norefs,
3549	.is_partially_uptodate  = block_is_partially_uptodate,
3550	.error_remove_folio	= generic_error_remove_folio,
3551	.swap_activate		= ext4_iomap_swap_activate,
3552};
3553
3554static const struct address_space_operations ext4_da_aops = {
3555	.read_folio		= ext4_read_folio,
3556	.readahead		= ext4_readahead,
 
3557	.writepages		= ext4_writepages,
3558	.write_begin		= ext4_da_write_begin,
3559	.write_end		= ext4_da_write_end,
3560	.dirty_folio		= ext4_dirty_folio,
3561	.bmap			= ext4_bmap,
3562	.invalidate_folio	= ext4_invalidate_folio,
3563	.release_folio		= ext4_release_folio,
3564	.direct_IO		= noop_direct_IO,
3565	.migrate_folio		= buffer_migrate_folio,
3566	.is_partially_uptodate  = block_is_partially_uptodate,
3567	.error_remove_folio	= generic_error_remove_folio,
3568	.swap_activate		= ext4_iomap_swap_activate,
3569};
3570
3571static const struct address_space_operations ext4_dax_aops = {
3572	.writepages		= ext4_dax_writepages,
3573	.direct_IO		= noop_direct_IO,
3574	.dirty_folio		= noop_dirty_folio,
3575	.bmap			= ext4_bmap,
3576	.swap_activate		= ext4_iomap_swap_activate,
3577};
3578
3579void ext4_set_aops(struct inode *inode)
3580{
3581	switch (ext4_inode_journal_mode(inode)) {
3582	case EXT4_INODE_ORDERED_DATA_MODE:
3583	case EXT4_INODE_WRITEBACK_DATA_MODE:
3584		break;
3585	case EXT4_INODE_JOURNAL_DATA_MODE:
3586		inode->i_mapping->a_ops = &ext4_journalled_aops;
3587		return;
3588	default:
3589		BUG();
3590	}
3591	if (IS_DAX(inode))
3592		inode->i_mapping->a_ops = &ext4_dax_aops;
3593	else if (test_opt(inode->i_sb, DELALLOC))
3594		inode->i_mapping->a_ops = &ext4_da_aops;
3595	else
3596		inode->i_mapping->a_ops = &ext4_aops;
3597}
3598
3599/*
3600 * Here we can't skip an unwritten buffer even though it usually reads zero
3601 * because it might have data in pagecache (eg, if called from ext4_zero_range,
3602 * ext4_punch_hole, etc) which needs to be properly zeroed out. Otherwise a
3603 * racing writeback can come later and flush the stale pagecache to disk.
3604 */
3605static int __ext4_block_zero_page_range(handle_t *handle,
3606		struct address_space *mapping, loff_t from, loff_t length)
3607{
3608	ext4_fsblk_t index = from >> PAGE_SHIFT;
3609	unsigned offset = from & (PAGE_SIZE-1);
3610	unsigned blocksize, pos;
3611	ext4_lblk_t iblock;
3612	struct inode *inode = mapping->host;
3613	struct buffer_head *bh;
3614	struct folio *folio;
3615	int err = 0;
3616
3617	folio = __filemap_get_folio(mapping, from >> PAGE_SHIFT,
3618				    FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
3619				    mapping_gfp_constraint(mapping, ~__GFP_FS));
3620	if (IS_ERR(folio))
3621		return PTR_ERR(folio);
3622
3623	blocksize = inode->i_sb->s_blocksize;
3624
3625	iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
3626
3627	bh = folio_buffers(folio);
3628	if (!bh)
3629		bh = create_empty_buffers(folio, blocksize, 0);
3630
3631	/* Find the buffer that contains "offset" */
 
3632	pos = blocksize;
3633	while (offset >= pos) {
3634		bh = bh->b_this_page;
3635		iblock++;
3636		pos += blocksize;
3637	}
3638	if (buffer_freed(bh)) {
3639		BUFFER_TRACE(bh, "freed: skip");
3640		goto unlock;
3641	}
3642	if (!buffer_mapped(bh)) {
3643		BUFFER_TRACE(bh, "unmapped");
3644		ext4_get_block(inode, iblock, bh, 0);
3645		/* unmapped? It's a hole - nothing to do */
3646		if (!buffer_mapped(bh)) {
3647			BUFFER_TRACE(bh, "still unmapped");
3648			goto unlock;
3649		}
3650	}
3651
3652	/* Ok, it's mapped. Make sure it's up-to-date */
3653	if (folio_test_uptodate(folio))
3654		set_buffer_uptodate(bh);
3655
3656	if (!buffer_uptodate(bh)) {
3657		err = ext4_read_bh_lock(bh, 0, true);
3658		if (err)
 
 
 
3659			goto unlock;
3660		if (fscrypt_inode_uses_fs_layer_crypto(inode)) {
3661			/* We expect the key to be set. */
3662			BUG_ON(!fscrypt_has_encryption_key(inode));
3663			err = fscrypt_decrypt_pagecache_blocks(folio,
3664							       blocksize,
3665							       bh_offset(bh));
3666			if (err) {
3667				clear_buffer_uptodate(bh);
3668				goto unlock;
3669			}
3670		}
3671	}
3672	if (ext4_should_journal_data(inode)) {
3673		BUFFER_TRACE(bh, "get write access");
3674		err = ext4_journal_get_write_access(handle, inode->i_sb, bh,
3675						    EXT4_JTR_NONE);
3676		if (err)
3677			goto unlock;
3678	}
3679	folio_zero_range(folio, offset, length);
3680	BUFFER_TRACE(bh, "zeroed end of block");
3681
3682	if (ext4_should_journal_data(inode)) {
3683		err = ext4_dirty_journalled_data(handle, bh);
3684	} else {
3685		err = 0;
3686		mark_buffer_dirty(bh);
3687		if (ext4_should_order_data(inode))
3688			err = ext4_jbd2_inode_add_write(handle, inode, from,
3689					length);
3690	}
3691
3692unlock:
3693	folio_unlock(folio);
3694	folio_put(folio);
3695	return err;
3696}
3697
3698/*
3699 * ext4_block_zero_page_range() zeros out a mapping of length 'length'
3700 * starting from file offset 'from'.  The range to be zero'd must
3701 * be contained with in one block.  If the specified range exceeds
3702 * the end of the block it will be shortened to end of the block
3703 * that corresponds to 'from'
3704 */
3705static int ext4_block_zero_page_range(handle_t *handle,
3706		struct address_space *mapping, loff_t from, loff_t length)
3707{
3708	struct inode *inode = mapping->host;
3709	unsigned offset = from & (PAGE_SIZE-1);
3710	unsigned blocksize = inode->i_sb->s_blocksize;
3711	unsigned max = blocksize - (offset & (blocksize - 1));
3712
3713	/*
3714	 * correct length if it does not fall between
3715	 * 'from' and the end of the block
3716	 */
3717	if (length > max || length < 0)
3718		length = max;
3719
3720	if (IS_DAX(inode)) {
3721		return dax_zero_range(inode, from, length, NULL,
3722				      &ext4_iomap_ops);
3723	}
3724	return __ext4_block_zero_page_range(handle, mapping, from, length);
3725}
3726
3727/*
3728 * ext4_block_truncate_page() zeroes out a mapping from file offset `from'
3729 * up to the end of the block which corresponds to `from'.
3730 * This required during truncate. We need to physically zero the tail end
3731 * of that block so it doesn't yield old data if the file is later grown.
3732 */
3733static int ext4_block_truncate_page(handle_t *handle,
3734		struct address_space *mapping, loff_t from)
3735{
3736	unsigned offset = from & (PAGE_SIZE-1);
3737	unsigned length;
3738	unsigned blocksize;
3739	struct inode *inode = mapping->host;
3740
3741	/* If we are processing an encrypted inode during orphan list handling */
3742	if (IS_ENCRYPTED(inode) && !fscrypt_has_encryption_key(inode))
3743		return 0;
3744
3745	blocksize = inode->i_sb->s_blocksize;
3746	length = blocksize - (offset & (blocksize - 1));
3747
3748	return ext4_block_zero_page_range(handle, mapping, from, length);
3749}
3750
3751int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode,
3752			     loff_t lstart, loff_t length)
3753{
3754	struct super_block *sb = inode->i_sb;
3755	struct address_space *mapping = inode->i_mapping;
3756	unsigned partial_start, partial_end;
3757	ext4_fsblk_t start, end;
3758	loff_t byte_end = (lstart + length - 1);
3759	int err = 0;
3760
3761	partial_start = lstart & (sb->s_blocksize - 1);
3762	partial_end = byte_end & (sb->s_blocksize - 1);
3763
3764	start = lstart >> sb->s_blocksize_bits;
3765	end = byte_end >> sb->s_blocksize_bits;
3766
3767	/* Handle partial zero within the single block */
3768	if (start == end &&
3769	    (partial_start || (partial_end != sb->s_blocksize - 1))) {
3770		err = ext4_block_zero_page_range(handle, mapping,
3771						 lstart, length);
3772		return err;
3773	}
3774	/* Handle partial zero out on the start of the range */
3775	if (partial_start) {
3776		err = ext4_block_zero_page_range(handle, mapping,
3777						 lstart, sb->s_blocksize);
3778		if (err)
3779			return err;
3780	}
3781	/* Handle partial zero out on the end of the range */
3782	if (partial_end != sb->s_blocksize - 1)
3783		err = ext4_block_zero_page_range(handle, mapping,
3784						 byte_end - partial_end,
3785						 partial_end + 1);
3786	return err;
3787}
3788
3789int ext4_can_truncate(struct inode *inode)
3790{
3791	if (S_ISREG(inode->i_mode))
3792		return 1;
3793	if (S_ISDIR(inode->i_mode))
3794		return 1;
3795	if (S_ISLNK(inode->i_mode))
3796		return !ext4_inode_is_fast_symlink(inode);
3797	return 0;
3798}
3799
3800/*
3801 * We have to make sure i_disksize gets properly updated before we truncate
3802 * page cache due to hole punching or zero range. Otherwise i_disksize update
3803 * can get lost as it may have been postponed to submission of writeback but
3804 * that will never happen after we truncate page cache.
3805 */
3806int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
3807				      loff_t len)
3808{
3809	handle_t *handle;
3810	int ret;
3811
3812	loff_t size = i_size_read(inode);
3813
3814	WARN_ON(!inode_is_locked(inode));
3815	if (offset > size || offset + len < size)
3816		return 0;
3817
3818	if (EXT4_I(inode)->i_disksize >= size)
3819		return 0;
3820
3821	handle = ext4_journal_start(inode, EXT4_HT_MISC, 1);
3822	if (IS_ERR(handle))
3823		return PTR_ERR(handle);
3824	ext4_update_i_disksize(inode, size);
3825	ret = ext4_mark_inode_dirty(handle, inode);
3826	ext4_journal_stop(handle);
3827
3828	return ret;
3829}
3830
3831static void ext4_wait_dax_page(struct inode *inode)
3832{
3833	filemap_invalidate_unlock(inode->i_mapping);
3834	schedule();
3835	filemap_invalidate_lock(inode->i_mapping);
3836}
3837
3838int ext4_break_layouts(struct inode *inode)
3839{
 
3840	struct page *page;
3841	int error;
3842
3843	if (WARN_ON_ONCE(!rwsem_is_locked(&inode->i_mapping->invalidate_lock)))
3844		return -EINVAL;
3845
3846	do {
3847		page = dax_layout_busy_page(inode->i_mapping);
3848		if (!page)
3849			return 0;
3850
3851		error = ___wait_var_event(&page->_refcount,
3852				atomic_read(&page->_refcount) == 1,
3853				TASK_INTERRUPTIBLE, 0, 0,
3854				ext4_wait_dax_page(inode));
3855	} while (error == 0);
3856
3857	return error;
3858}
3859
3860/*
3861 * ext4_punch_hole: punches a hole in a file by releasing the blocks
3862 * associated with the given offset and length
3863 *
3864 * @inode:  File inode
3865 * @offset: The offset where the hole will begin
3866 * @len:    The length of the hole
3867 *
3868 * Returns: 0 on success or negative on failure
3869 */
3870
3871int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
3872{
3873	struct inode *inode = file_inode(file);
3874	struct super_block *sb = inode->i_sb;
3875	ext4_lblk_t first_block, stop_block;
3876	struct address_space *mapping = inode->i_mapping;
3877	loff_t first_block_offset, last_block_offset, max_length;
3878	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3879	handle_t *handle;
3880	unsigned int credits;
3881	int ret = 0, ret2 = 0;
 
 
 
3882
3883	trace_ext4_punch_hole(inode, offset, length, 0);
3884
 
 
 
 
 
 
 
 
 
3885	/*
3886	 * Write out all dirty pages to avoid race conditions
3887	 * Then release them.
3888	 */
3889	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
3890		ret = filemap_write_and_wait_range(mapping, offset,
3891						   offset + length - 1);
3892		if (ret)
3893			return ret;
3894	}
3895
3896	inode_lock(inode);
3897
3898	/* No need to punch hole beyond i_size */
3899	if (offset >= inode->i_size)
3900		goto out_mutex;
3901
3902	/*
3903	 * If the hole extends beyond i_size, set the hole
3904	 * to end after the page that contains i_size
3905	 */
3906	if (offset + length > inode->i_size) {
3907		length = inode->i_size +
3908		   PAGE_SIZE - (inode->i_size & (PAGE_SIZE - 1)) -
3909		   offset;
3910	}
3911
3912	/*
3913	 * For punch hole the length + offset needs to be within one block
3914	 * before last range. Adjust the length if it goes beyond that limit.
3915	 */
3916	max_length = sbi->s_bitmap_maxbytes - inode->i_sb->s_blocksize;
3917	if (offset + length > max_length)
3918		length = max_length - offset;
3919
3920	if (offset & (sb->s_blocksize - 1) ||
3921	    (offset + length) & (sb->s_blocksize - 1)) {
3922		/*
3923		 * Attach jinode to inode for jbd2 if we do any zeroing of
3924		 * partial block
3925		 */
3926		ret = ext4_inode_attach_jinode(inode);
3927		if (ret < 0)
3928			goto out_mutex;
3929
3930	}
3931
3932	/* Wait all existing dio workers, newcomers will block on i_rwsem */
3933	inode_dio_wait(inode);
3934
3935	ret = file_modified(file);
3936	if (ret)
3937		goto out_mutex;
3938
3939	/*
3940	 * Prevent page faults from reinstantiating pages we have released from
3941	 * page cache.
3942	 */
3943	filemap_invalidate_lock(mapping);
3944
3945	ret = ext4_break_layouts(inode);
3946	if (ret)
3947		goto out_dio;
3948
3949	first_block_offset = round_up(offset, sb->s_blocksize);
3950	last_block_offset = round_down((offset + length), sb->s_blocksize) - 1;
3951
3952	/* Now release the pages and zero block aligned part of pages*/
3953	if (last_block_offset > first_block_offset) {
3954		ret = ext4_update_disksize_before_punch(inode, offset, length);
3955		if (ret)
3956			goto out_dio;
3957		truncate_pagecache_range(inode, first_block_offset,
3958					 last_block_offset);
3959	}
3960
3961	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3962		credits = ext4_writepage_trans_blocks(inode);
3963	else
3964		credits = ext4_blocks_for_truncate(inode);
3965	handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
3966	if (IS_ERR(handle)) {
3967		ret = PTR_ERR(handle);
3968		ext4_std_error(sb, ret);
3969		goto out_dio;
3970	}
3971
3972	ret = ext4_zero_partial_blocks(handle, inode, offset,
3973				       length);
3974	if (ret)
3975		goto out_stop;
3976
3977	first_block = (offset + sb->s_blocksize - 1) >>
3978		EXT4_BLOCK_SIZE_BITS(sb);
3979	stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);
3980
3981	/* If there are blocks to remove, do it */
3982	if (stop_block > first_block) {
3983		ext4_lblk_t hole_len = stop_block - first_block;
3984
3985		down_write(&EXT4_I(inode)->i_data_sem);
3986		ext4_discard_preallocations(inode);
3987
3988		ext4_es_remove_extent(inode, first_block, hole_len);
 
 
 
 
 
3989
3990		if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3991			ret = ext4_ext_remove_space(inode, first_block,
3992						    stop_block - 1);
3993		else
3994			ret = ext4_ind_remove_space(handle, inode, first_block,
3995						    stop_block);
3996
3997		ext4_es_insert_extent(inode, first_block, hole_len, ~0,
3998				      EXTENT_STATUS_HOLE);
3999		up_write(&EXT4_I(inode)->i_data_sem);
4000	}
4001	ext4_fc_track_range(handle, inode, first_block, stop_block);
4002	if (IS_SYNC(inode))
4003		ext4_handle_sync(handle);
4004
4005	inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
4006	ret2 = ext4_mark_inode_dirty(handle, inode);
4007	if (unlikely(ret2))
4008		ret = ret2;
4009	if (ret >= 0)
4010		ext4_update_inode_fsync_trans(handle, inode, 1);
4011out_stop:
4012	ext4_journal_stop(handle);
4013out_dio:
4014	filemap_invalidate_unlock(mapping);
4015out_mutex:
4016	inode_unlock(inode);
4017	return ret;
4018}
4019
4020int ext4_inode_attach_jinode(struct inode *inode)
4021{
4022	struct ext4_inode_info *ei = EXT4_I(inode);
4023	struct jbd2_inode *jinode;
4024
4025	if (ei->jinode || !EXT4_SB(inode->i_sb)->s_journal)
4026		return 0;
4027
4028	jinode = jbd2_alloc_inode(GFP_KERNEL);
4029	spin_lock(&inode->i_lock);
4030	if (!ei->jinode) {
4031		if (!jinode) {
4032			spin_unlock(&inode->i_lock);
4033			return -ENOMEM;
4034		}
4035		ei->jinode = jinode;
4036		jbd2_journal_init_jbd_inode(ei->jinode, inode);
4037		jinode = NULL;
4038	}
4039	spin_unlock(&inode->i_lock);
4040	if (unlikely(jinode != NULL))
4041		jbd2_free_inode(jinode);
4042	return 0;
4043}
4044
4045/*
4046 * ext4_truncate()
4047 *
4048 * We block out ext4_get_block() block instantiations across the entire
4049 * transaction, and VFS/VM ensures that ext4_truncate() cannot run
4050 * simultaneously on behalf of the same inode.
4051 *
4052 * As we work through the truncate and commit bits of it to the journal there
4053 * is one core, guiding principle: the file's tree must always be consistent on
4054 * disk.  We must be able to restart the truncate after a crash.
4055 *
4056 * The file's tree may be transiently inconsistent in memory (although it
4057 * probably isn't), but whenever we close off and commit a journal transaction,
4058 * the contents of (the filesystem + the journal) must be consistent and
4059 * restartable.  It's pretty simple, really: bottom up, right to left (although
4060 * left-to-right works OK too).
4061 *
4062 * Note that at recovery time, journal replay occurs *before* the restart of
4063 * truncate against the orphan inode list.
4064 *
4065 * The committed inode has the new, desired i_size (which is the same as
4066 * i_disksize in this case).  After a crash, ext4_orphan_cleanup() will see
4067 * that this inode's truncate did not complete and it will again call
4068 * ext4_truncate() to have another go.  So there will be instantiated blocks
4069 * to the right of the truncation point in a crashed ext4 filesystem.  But
4070 * that's fine - as long as they are linked from the inode, the post-crash
4071 * ext4_truncate() run will find them and release them.
4072 */
4073int ext4_truncate(struct inode *inode)
4074{
4075	struct ext4_inode_info *ei = EXT4_I(inode);
4076	unsigned int credits;
4077	int err = 0, err2;
4078	handle_t *handle;
4079	struct address_space *mapping = inode->i_mapping;
4080
4081	/*
4082	 * There is a possibility that we're either freeing the inode
4083	 * or it's a completely new inode. In those cases we might not
4084	 * have i_rwsem locked because it's not necessary.
4085	 */
4086	if (!(inode->i_state & (I_NEW|I_FREEING)))
4087		WARN_ON(!inode_is_locked(inode));
4088	trace_ext4_truncate_enter(inode);
4089
4090	if (!ext4_can_truncate(inode))
4091		goto out_trace;
 
 
4092
4093	if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
4094		ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
4095
4096	if (ext4_has_inline_data(inode)) {
4097		int has_inline = 1;
4098
4099		err = ext4_inline_data_truncate(inode, &has_inline);
4100		if (err || has_inline)
4101			goto out_trace;
 
 
4102	}
4103
4104	/* If we zero-out tail of the page, we have to create jinode for jbd2 */
4105	if (inode->i_size & (inode->i_sb->s_blocksize - 1)) {
4106		err = ext4_inode_attach_jinode(inode);
4107		if (err)
4108			goto out_trace;
4109	}
4110
4111	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4112		credits = ext4_writepage_trans_blocks(inode);
4113	else
4114		credits = ext4_blocks_for_truncate(inode);
4115
4116	handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
4117	if (IS_ERR(handle)) {
4118		err = PTR_ERR(handle);
4119		goto out_trace;
4120	}
4121
4122	if (inode->i_size & (inode->i_sb->s_blocksize - 1))
4123		ext4_block_truncate_page(handle, mapping, inode->i_size);
4124
4125	/*
4126	 * We add the inode to the orphan list, so that if this
4127	 * truncate spans multiple transactions, and we crash, we will
4128	 * resume the truncate when the filesystem recovers.  It also
4129	 * marks the inode dirty, to catch the new size.
4130	 *
4131	 * Implication: the file must always be in a sane, consistent
4132	 * truncatable state while each transaction commits.
4133	 */
4134	err = ext4_orphan_add(handle, inode);
4135	if (err)
4136		goto out_stop;
4137
4138	down_write(&EXT4_I(inode)->i_data_sem);
4139
4140	ext4_discard_preallocations(inode);
4141
4142	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4143		err = ext4_ext_truncate(handle, inode);
4144	else
4145		ext4_ind_truncate(handle, inode);
4146
4147	up_write(&ei->i_data_sem);
4148	if (err)
4149		goto out_stop;
4150
4151	if (IS_SYNC(inode))
4152		ext4_handle_sync(handle);
4153
4154out_stop:
4155	/*
4156	 * If this was a simple ftruncate() and the file will remain alive,
4157	 * then we need to clear up the orphan record which we created above.
4158	 * However, if this was a real unlink then we were called by
4159	 * ext4_evict_inode(), and we allow that function to clean up the
4160	 * orphan info for us.
4161	 */
4162	if (inode->i_nlink)
4163		ext4_orphan_del(handle, inode);
4164
4165	inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
4166	err2 = ext4_mark_inode_dirty(handle, inode);
4167	if (unlikely(err2 && !err))
4168		err = err2;
4169	ext4_journal_stop(handle);
4170
4171out_trace:
4172	trace_ext4_truncate_exit(inode);
4173	return err;
4174}
4175
4176static inline u64 ext4_inode_peek_iversion(const struct inode *inode)
4177{
4178	if (unlikely(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
4179		return inode_peek_iversion_raw(inode);
4180	else
4181		return inode_peek_iversion(inode);
4182}
4183
4184static int ext4_inode_blocks_set(struct ext4_inode *raw_inode,
4185				 struct ext4_inode_info *ei)
4186{
4187	struct inode *inode = &(ei->vfs_inode);
4188	u64 i_blocks = READ_ONCE(inode->i_blocks);
4189	struct super_block *sb = inode->i_sb;
4190
4191	if (i_blocks <= ~0U) {
4192		/*
4193		 * i_blocks can be represented in a 32 bit variable
4194		 * as multiple of 512 bytes
4195		 */
4196		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
4197		raw_inode->i_blocks_high = 0;
4198		ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
4199		return 0;
4200	}
4201
4202	/*
4203	 * This should never happen since sb->s_maxbytes should not have
4204	 * allowed this, sb->s_maxbytes was set according to the huge_file
4205	 * feature in ext4_fill_super().
4206	 */
4207	if (!ext4_has_feature_huge_file(sb))
4208		return -EFSCORRUPTED;
4209
4210	if (i_blocks <= 0xffffffffffffULL) {
4211		/*
4212		 * i_blocks can be represented in a 48 bit variable
4213		 * as multiple of 512 bytes
4214		 */
4215		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
4216		raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
4217		ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
4218	} else {
4219		ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE);
4220		/* i_block is stored in file system block size */
4221		i_blocks = i_blocks >> (inode->i_blkbits - 9);
4222		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
4223		raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
4224	}
4225	return 0;
4226}
4227
4228static int ext4_fill_raw_inode(struct inode *inode, struct ext4_inode *raw_inode)
4229{
4230	struct ext4_inode_info *ei = EXT4_I(inode);
4231	uid_t i_uid;
4232	gid_t i_gid;
4233	projid_t i_projid;
4234	int block;
4235	int err;
4236
4237	err = ext4_inode_blocks_set(raw_inode, ei);
4238
4239	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
4240	i_uid = i_uid_read(inode);
4241	i_gid = i_gid_read(inode);
4242	i_projid = from_kprojid(&init_user_ns, ei->i_projid);
4243	if (!(test_opt(inode->i_sb, NO_UID32))) {
4244		raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid));
4245		raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid));
4246		/*
4247		 * Fix up interoperability with old kernels. Otherwise,
4248		 * old inodes get re-used with the upper 16 bits of the
4249		 * uid/gid intact.
4250		 */
4251		if (ei->i_dtime && list_empty(&ei->i_orphan)) {
4252			raw_inode->i_uid_high = 0;
4253			raw_inode->i_gid_high = 0;
4254		} else {
4255			raw_inode->i_uid_high =
4256				cpu_to_le16(high_16_bits(i_uid));
4257			raw_inode->i_gid_high =
4258				cpu_to_le16(high_16_bits(i_gid));
4259		}
4260	} else {
4261		raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid));
4262		raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(i_gid));
4263		raw_inode->i_uid_high = 0;
4264		raw_inode->i_gid_high = 0;
4265	}
4266	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
4267
4268	EXT4_INODE_SET_CTIME(inode, raw_inode);
4269	EXT4_INODE_SET_MTIME(inode, raw_inode);
4270	EXT4_INODE_SET_ATIME(inode, raw_inode);
4271	EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
4272
4273	raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
4274	raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF);
4275	if (likely(!test_opt2(inode->i_sb, HURD_COMPAT)))
4276		raw_inode->i_file_acl_high =
4277			cpu_to_le16(ei->i_file_acl >> 32);
4278	raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
4279	ext4_isize_set(raw_inode, ei->i_disksize);
4280
4281	raw_inode->i_generation = cpu_to_le32(inode->i_generation);
4282	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
4283		if (old_valid_dev(inode->i_rdev)) {
4284			raw_inode->i_block[0] =
4285				cpu_to_le32(old_encode_dev(inode->i_rdev));
4286			raw_inode->i_block[1] = 0;
4287		} else {
4288			raw_inode->i_block[0] = 0;
4289			raw_inode->i_block[1] =
4290				cpu_to_le32(new_encode_dev(inode->i_rdev));
4291			raw_inode->i_block[2] = 0;
4292		}
4293	} else if (!ext4_has_inline_data(inode)) {
4294		for (block = 0; block < EXT4_N_BLOCKS; block++)
4295			raw_inode->i_block[block] = ei->i_data[block];
4296	}
4297
4298	if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
4299		u64 ivers = ext4_inode_peek_iversion(inode);
4300
4301		raw_inode->i_disk_version = cpu_to_le32(ivers);
4302		if (ei->i_extra_isize) {
4303			if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
4304				raw_inode->i_version_hi =
4305					cpu_to_le32(ivers >> 32);
4306			raw_inode->i_extra_isize =
4307				cpu_to_le16(ei->i_extra_isize);
4308		}
4309	}
4310
4311	if (i_projid != EXT4_DEF_PROJID &&
4312	    !ext4_has_feature_project(inode->i_sb))
4313		err = err ?: -EFSCORRUPTED;
4314
4315	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
4316	    EXT4_FITS_IN_INODE(raw_inode, ei, i_projid))
4317		raw_inode->i_projid = cpu_to_le32(i_projid);
4318
4319	ext4_inode_csum_set(inode, raw_inode, ei);
4320	return err;
4321}
4322
4323/*
4324 * ext4_get_inode_loc returns with an extra refcount against the inode's
4325 * underlying buffer_head on success. If we pass 'inode' and it does not
4326 * have in-inode xattr, we have all inode data in memory that is needed
4327 * to recreate the on-disk version of this inode.
4328 */
4329static int __ext4_get_inode_loc(struct super_block *sb, unsigned long ino,
4330				struct inode *inode, struct ext4_iloc *iloc,
4331				ext4_fsblk_t *ret_block)
4332{
4333	struct ext4_group_desc	*gdp;
4334	struct buffer_head	*bh;
 
4335	ext4_fsblk_t		block;
4336	struct blk_plug		plug;
4337	int			inodes_per_block, inode_offset;
4338
4339	iloc->bh = NULL;
4340	if (ino < EXT4_ROOT_INO ||
4341	    ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))
4342		return -EFSCORRUPTED;
4343
4344	iloc->block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
4345	gdp = ext4_get_group_desc(sb, iloc->block_group, NULL);
4346	if (!gdp)
4347		return -EIO;
4348
4349	/*
4350	 * Figure out the offset within the block group inode table
4351	 */
4352	inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
4353	inode_offset = ((ino - 1) %
4354			EXT4_INODES_PER_GROUP(sb));
 
4355	iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb);
4356
4357	block = ext4_inode_table(sb, gdp);
4358	if ((block <= le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) ||
4359	    (block >= ext4_blocks_count(EXT4_SB(sb)->s_es))) {
4360		ext4_error(sb, "Invalid inode table block %llu in "
4361			   "block_group %u", block, iloc->block_group);
4362		return -EFSCORRUPTED;
4363	}
4364	block += (inode_offset / inodes_per_block);
4365
4366	bh = sb_getblk(sb, block);
4367	if (unlikely(!bh))
4368		return -ENOMEM;
4369	if (ext4_buffer_uptodate(bh))
4370		goto has_buffer;
4371
4372	lock_buffer(bh);
4373	if (ext4_buffer_uptodate(bh)) {
4374		/* Someone brought it uptodate while we waited */
4375		unlock_buffer(bh);
4376		goto has_buffer;
4377	}
 
 
4378
4379	/*
4380	 * If we have all information of the inode in memory and this
4381	 * is the only valid inode in the block, we need not read the
4382	 * block.
4383	 */
4384	if (inode && !ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
4385		struct buffer_head *bitmap_bh;
4386		int i, start;
4387
4388		start = inode_offset & ~(inodes_per_block - 1);
4389
4390		/* Is the inode bitmap in cache? */
4391		bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp));
4392		if (unlikely(!bitmap_bh))
4393			goto make_io;
4394
4395		/*
4396		 * If the inode bitmap isn't in cache then the
4397		 * optimisation may end up performing two reads instead
4398		 * of one, so skip it.
4399		 */
4400		if (!buffer_uptodate(bitmap_bh)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4401			brelse(bitmap_bh);
4402			goto make_io;
4403		}
4404		for (i = start; i < start + inodes_per_block; i++) {
4405			if (i == inode_offset)
4406				continue;
4407			if (ext4_test_bit(i, bitmap_bh->b_data))
4408				break;
4409		}
4410		brelse(bitmap_bh);
4411		if (i == start + inodes_per_block) {
4412			struct ext4_inode *raw_inode =
4413				(struct ext4_inode *) (bh->b_data + iloc->offset);
4414
4415			/* all other inodes are free, so skip I/O */
4416			memset(bh->b_data, 0, bh->b_size);
4417			if (!ext4_test_inode_state(inode, EXT4_STATE_NEW))
4418				ext4_fill_raw_inode(inode, raw_inode);
4419			set_buffer_uptodate(bh);
4420			unlock_buffer(bh);
4421			goto has_buffer;
4422		}
4423	}
4424
4425make_io:
4426	/*
4427	 * If we need to do any I/O, try to pre-readahead extra
4428	 * blocks from the inode table.
4429	 */
4430	blk_start_plug(&plug);
4431	if (EXT4_SB(sb)->s_inode_readahead_blks) {
4432		ext4_fsblk_t b, end, table;
4433		unsigned num;
4434		__u32 ra_blks = EXT4_SB(sb)->s_inode_readahead_blks;
4435
4436		table = ext4_inode_table(sb, gdp);
4437		/* s_inode_readahead_blks is always a power of 2 */
4438		b = block & ~((ext4_fsblk_t) ra_blks - 1);
4439		if (table > b)
4440			b = table;
4441		end = b + ra_blks;
4442		num = EXT4_INODES_PER_GROUP(sb);
4443		if (ext4_has_group_desc_csum(sb))
4444			num -= ext4_itable_unused_count(sb, gdp);
4445		table += num / inodes_per_block;
4446		if (end > table)
4447			end = table;
4448		while (b <= end)
4449			ext4_sb_breadahead_unmovable(sb, b++);
4450	}
4451
4452	/*
4453	 * There are other valid inodes in the buffer, this inode
4454	 * has in-inode xattrs, or we don't have this inode in memory.
4455	 * Read the block from disk.
4456	 */
4457	trace_ext4_load_inode(sb, ino);
4458	ext4_read_bh_nowait(bh, REQ_META | REQ_PRIO, NULL);
4459	blk_finish_plug(&plug);
4460	wait_on_buffer(bh);
4461	ext4_simulate_fail_bh(sb, bh, EXT4_SIM_INODE_EIO);
4462	if (!buffer_uptodate(bh)) {
4463		if (ret_block)
4464			*ret_block = block;
4465		brelse(bh);
4466		return -EIO;
 
 
4467	}
4468has_buffer:
4469	iloc->bh = bh;
4470	return 0;
4471}
4472
4473static int __ext4_get_inode_loc_noinmem(struct inode *inode,
4474					struct ext4_iloc *iloc)
4475{
4476	ext4_fsblk_t err_blk = 0;
4477	int ret;
4478
4479	ret = __ext4_get_inode_loc(inode->i_sb, inode->i_ino, NULL, iloc,
4480					&err_blk);
4481
4482	if (ret == -EIO)
4483		ext4_error_inode_block(inode, err_blk, EIO,
4484					"unable to read itable block");
4485
4486	return ret;
4487}
4488
4489int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
4490{
4491	ext4_fsblk_t err_blk = 0;
4492	int ret;
4493
4494	ret = __ext4_get_inode_loc(inode->i_sb, inode->i_ino, inode, iloc,
4495					&err_blk);
4496
4497	if (ret == -EIO)
4498		ext4_error_inode_block(inode, err_blk, EIO,
4499					"unable to read itable block");
4500
4501	return ret;
4502}
4503
4504
4505int ext4_get_fc_inode_loc(struct super_block *sb, unsigned long ino,
4506			  struct ext4_iloc *iloc)
4507{
4508	return __ext4_get_inode_loc(sb, ino, NULL, iloc, NULL);
4509}
4510
4511static bool ext4_should_enable_dax(struct inode *inode)
4512{
4513	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4514
4515	if (test_opt2(inode->i_sb, DAX_NEVER))
4516		return false;
4517	if (!S_ISREG(inode->i_mode))
4518		return false;
4519	if (ext4_should_journal_data(inode))
4520		return false;
4521	if (ext4_has_inline_data(inode))
4522		return false;
4523	if (ext4_test_inode_flag(inode, EXT4_INODE_ENCRYPT))
4524		return false;
4525	if (ext4_test_inode_flag(inode, EXT4_INODE_VERITY))
4526		return false;
4527	if (!test_bit(EXT4_FLAGS_BDEV_IS_DAX, &sbi->s_ext4_flags))
4528		return false;
4529	if (test_opt(inode->i_sb, DAX_ALWAYS))
4530		return true;
4531
4532	return ext4_test_inode_flag(inode, EXT4_INODE_DAX);
4533}
4534
4535void ext4_set_inode_flags(struct inode *inode, bool init)
4536{
4537	unsigned int flags = EXT4_I(inode)->i_flags;
4538	unsigned int new_fl = 0;
4539
4540	WARN_ON_ONCE(IS_DAX(inode) && init);
4541
4542	if (flags & EXT4_SYNC_FL)
4543		new_fl |= S_SYNC;
4544	if (flags & EXT4_APPEND_FL)
4545		new_fl |= S_APPEND;
4546	if (flags & EXT4_IMMUTABLE_FL)
4547		new_fl |= S_IMMUTABLE;
4548	if (flags & EXT4_NOATIME_FL)
4549		new_fl |= S_NOATIME;
4550	if (flags & EXT4_DIRSYNC_FL)
4551		new_fl |= S_DIRSYNC;
4552
4553	/* Because of the way inode_set_flags() works we must preserve S_DAX
4554	 * here if already set. */
4555	new_fl |= (inode->i_flags & S_DAX);
4556	if (init && ext4_should_enable_dax(inode))
4557		new_fl |= S_DAX;
4558
4559	if (flags & EXT4_ENCRYPT_FL)
4560		new_fl |= S_ENCRYPTED;
4561	if (flags & EXT4_CASEFOLD_FL)
4562		new_fl |= S_CASEFOLD;
4563	if (flags & EXT4_VERITY_FL)
4564		new_fl |= S_VERITY;
4565	inode_set_flags(inode, new_fl,
4566			S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_DAX|
4567			S_ENCRYPTED|S_CASEFOLD|S_VERITY);
4568}
4569
4570static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
4571				  struct ext4_inode_info *ei)
4572{
4573	blkcnt_t i_blocks ;
4574	struct inode *inode = &(ei->vfs_inode);
4575	struct super_block *sb = inode->i_sb;
4576
4577	if (ext4_has_feature_huge_file(sb)) {
4578		/* we are using combined 48 bit field */
4579		i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 |
4580					le32_to_cpu(raw_inode->i_blocks_lo);
4581		if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) {
4582			/* i_blocks represent file system block size */
4583			return i_blocks  << (inode->i_blkbits - 9);
4584		} else {
4585			return i_blocks;
4586		}
4587	} else {
4588		return le32_to_cpu(raw_inode->i_blocks_lo);
4589	}
4590}
4591
4592static inline int ext4_iget_extra_inode(struct inode *inode,
4593					 struct ext4_inode *raw_inode,
4594					 struct ext4_inode_info *ei)
4595{
4596	__le32 *magic = (void *)raw_inode +
4597			EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize;
4598
4599	if (EXT4_INODE_HAS_XATTR_SPACE(inode)  &&
 
4600	    *magic == cpu_to_le32(EXT4_XATTR_MAGIC)) {
4601		int err;
4602
4603		ext4_set_inode_state(inode, EXT4_STATE_XATTR);
4604		err = ext4_find_inline_data_nolock(inode);
4605		if (!err && ext4_has_inline_data(inode))
4606			ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
4607		return err;
4608	} else
4609		EXT4_I(inode)->i_inline_off = 0;
4610	return 0;
4611}
4612
4613int ext4_get_projid(struct inode *inode, kprojid_t *projid)
4614{
4615	if (!ext4_has_feature_project(inode->i_sb))
4616		return -EOPNOTSUPP;
4617	*projid = EXT4_I(inode)->i_projid;
4618	return 0;
4619}
4620
4621/*
4622 * ext4 has self-managed i_version for ea inodes, it stores the lower 32bit of
4623 * refcount in i_version, so use raw values if inode has EXT4_EA_INODE_FL flag
4624 * set.
4625 */
4626static inline void ext4_inode_set_iversion_queried(struct inode *inode, u64 val)
4627{
4628	if (unlikely(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
4629		inode_set_iversion_raw(inode, val);
4630	else
4631		inode_set_iversion_queried(inode, val);
4632}
4633
4634static const char *check_igot_inode(struct inode *inode, ext4_iget_flags flags)
4635
4636{
4637	if (flags & EXT4_IGET_EA_INODE) {
4638		if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
4639			return "missing EA_INODE flag";
4640		if (ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
4641		    EXT4_I(inode)->i_file_acl)
4642			return "ea_inode with extended attributes";
4643	} else {
4644		if ((EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
4645			return "unexpected EA_INODE flag";
4646	}
4647	if (is_bad_inode(inode) && !(flags & EXT4_IGET_BAD))
4648		return "unexpected bad inode w/o EXT4_IGET_BAD";
4649	return NULL;
4650}
4651
4652struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
4653			  ext4_iget_flags flags, const char *function,
4654			  unsigned int line)
4655{
4656	struct ext4_iloc iloc;
4657	struct ext4_inode *raw_inode;
4658	struct ext4_inode_info *ei;
4659	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
4660	struct inode *inode;
4661	const char *err_str;
4662	journal_t *journal = EXT4_SB(sb)->s_journal;
4663	long ret;
4664	loff_t size;
4665	int block;
4666	uid_t i_uid;
4667	gid_t i_gid;
4668	projid_t i_projid;
4669
4670	if ((!(flags & EXT4_IGET_SPECIAL) &&
4671	     ((ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO) ||
4672	      ino == le32_to_cpu(es->s_usr_quota_inum) ||
4673	      ino == le32_to_cpu(es->s_grp_quota_inum) ||
4674	      ino == le32_to_cpu(es->s_prj_quota_inum) ||
4675	      ino == le32_to_cpu(es->s_orphan_file_inum))) ||
4676	    (ino < EXT4_ROOT_INO) ||
4677	    (ino > le32_to_cpu(es->s_inodes_count))) {
4678		if (flags & EXT4_IGET_HANDLE)
4679			return ERR_PTR(-ESTALE);
4680		__ext4_error(sb, function, line, false, EFSCORRUPTED, 0,
4681			     "inode #%lu: comm %s: iget: illegal inode #",
4682			     ino, current->comm);
4683		return ERR_PTR(-EFSCORRUPTED);
4684	}
4685
4686	inode = iget_locked(sb, ino);
4687	if (!inode)
4688		return ERR_PTR(-ENOMEM);
4689	if (!(inode->i_state & I_NEW)) {
4690		if ((err_str = check_igot_inode(inode, flags)) != NULL) {
4691			ext4_error_inode(inode, function, line, 0, err_str);
4692			iput(inode);
4693			return ERR_PTR(-EFSCORRUPTED);
4694		}
4695		return inode;
4696	}
4697
4698	ei = EXT4_I(inode);
4699	iloc.bh = NULL;
4700
4701	ret = __ext4_get_inode_loc_noinmem(inode, &iloc);
4702	if (ret < 0)
4703		goto bad_inode;
4704	raw_inode = ext4_raw_inode(&iloc);
4705
 
 
 
 
 
 
 
4706	if ((flags & EXT4_IGET_HANDLE) &&
4707	    (raw_inode->i_links_count == 0) && (raw_inode->i_mode == 0)) {
4708		ret = -ESTALE;
4709		goto bad_inode;
4710	}
4711
4712	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4713		ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
4714		if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
4715			EXT4_INODE_SIZE(inode->i_sb) ||
4716		    (ei->i_extra_isize & 3)) {
4717			ext4_error_inode(inode, function, line, 0,
4718					 "iget: bad extra_isize %u "
4719					 "(inode size %u)",
4720					 ei->i_extra_isize,
4721					 EXT4_INODE_SIZE(inode->i_sb));
4722			ret = -EFSCORRUPTED;
4723			goto bad_inode;
4724		}
4725	} else
4726		ei->i_extra_isize = 0;
4727
4728	/* Precompute checksum seed for inode metadata */
4729	if (ext4_has_metadata_csum(sb)) {
4730		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4731		__u32 csum;
4732		__le32 inum = cpu_to_le32(inode->i_ino);
4733		__le32 gen = raw_inode->i_generation;
4734		csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum,
4735				   sizeof(inum));
4736		ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen,
4737					      sizeof(gen));
4738	}
4739
4740	if ((!ext4_inode_csum_verify(inode, raw_inode, ei) ||
4741	    ext4_simulate_fail(sb, EXT4_SIM_INODE_CRC)) &&
4742	     (!(EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY))) {
4743		ext4_error_inode_err(inode, function, line, 0,
4744				EFSBADCRC, "iget: checksum invalid");
4745		ret = -EFSBADCRC;
4746		goto bad_inode;
4747	}
4748
4749	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
4750	i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
4751	i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
4752	if (ext4_has_feature_project(sb) &&
4753	    EXT4_INODE_SIZE(sb) > EXT4_GOOD_OLD_INODE_SIZE &&
4754	    EXT4_FITS_IN_INODE(raw_inode, ei, i_projid))
4755		i_projid = (projid_t)le32_to_cpu(raw_inode->i_projid);
4756	else
4757		i_projid = EXT4_DEF_PROJID;
4758
4759	if (!(test_opt(inode->i_sb, NO_UID32))) {
4760		i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
4761		i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
4762	}
4763	i_uid_write(inode, i_uid);
4764	i_gid_write(inode, i_gid);
4765	ei->i_projid = make_kprojid(&init_user_ns, i_projid);
4766	set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
4767
4768	ext4_clear_state_flags(ei);	/* Only relevant on 32-bit archs */
4769	ei->i_inline_off = 0;
4770	ei->i_dir_start_lookup = 0;
4771	ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
4772	/* We now have enough fields to check if the inode was active or not.
4773	 * This is needed because nfsd might try to access dead inodes
4774	 * the test is that same one that e2fsck uses
4775	 * NeilBrown 1999oct15
4776	 */
4777	if (inode->i_nlink == 0) {
4778		if ((inode->i_mode == 0 || flags & EXT4_IGET_SPECIAL ||
4779		     !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) &&
4780		    ino != EXT4_BOOT_LOADER_INO) {
4781			/* this inode is deleted or unallocated */
4782			if (flags & EXT4_IGET_SPECIAL) {
4783				ext4_error_inode(inode, function, line, 0,
4784						 "iget: special inode unallocated");
4785				ret = -EFSCORRUPTED;
4786			} else
4787				ret = -ESTALE;
4788			goto bad_inode;
4789		}
4790		/* The only unlinked inodes we let through here have
4791		 * valid i_mode and are being read by the orphan
4792		 * recovery code: that's fine, we're about to complete
4793		 * the process of deleting those.
4794		 * OR it is the EXT4_BOOT_LOADER_INO which is
4795		 * not initialized on a new filesystem. */
4796	}
4797	ei->i_flags = le32_to_cpu(raw_inode->i_flags);
4798	ext4_set_inode_flags(inode, true);
4799	inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
4800	ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
4801	if (ext4_has_feature_64bit(sb))
4802		ei->i_file_acl |=
4803			((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
4804	inode->i_size = ext4_isize(sb, raw_inode);
4805	if ((size = i_size_read(inode)) < 0) {
4806		ext4_error_inode(inode, function, line, 0,
4807				 "iget: bad i_size value: %lld", size);
4808		ret = -EFSCORRUPTED;
4809		goto bad_inode;
4810	}
4811	/*
4812	 * If dir_index is not enabled but there's dir with INDEX flag set,
4813	 * we'd normally treat htree data as empty space. But with metadata
4814	 * checksumming that corrupts checksums so forbid that.
4815	 */
4816	if (!ext4_has_feature_dir_index(sb) && ext4_has_metadata_csum(sb) &&
4817	    ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) {
4818		ext4_error_inode(inode, function, line, 0,
4819			 "iget: Dir with htree data on filesystem without dir_index feature.");
4820		ret = -EFSCORRUPTED;
4821		goto bad_inode;
4822	}
4823	ei->i_disksize = inode->i_size;
4824#ifdef CONFIG_QUOTA
4825	ei->i_reserved_quota = 0;
4826#endif
4827	inode->i_generation = le32_to_cpu(raw_inode->i_generation);
4828	ei->i_block_group = iloc.block_group;
4829	ei->i_last_alloc_group = ~0;
4830	/*
4831	 * NOTE! The in-memory inode i_data array is in little-endian order
4832	 * even on big-endian machines: we do NOT byteswap the block numbers!
4833	 */
4834	for (block = 0; block < EXT4_N_BLOCKS; block++)
4835		ei->i_data[block] = raw_inode->i_block[block];
4836	INIT_LIST_HEAD(&ei->i_orphan);
4837	ext4_fc_init_inode(&ei->vfs_inode);
4838
4839	/*
4840	 * Set transaction id's of transactions that have to be committed
4841	 * to finish f[data]sync. We set them to currently running transaction
4842	 * as we cannot be sure that the inode or some of its metadata isn't
4843	 * part of the transaction - the inode could have been reclaimed and
4844	 * now it is reread from disk.
4845	 */
4846	if (journal) {
4847		transaction_t *transaction;
4848		tid_t tid;
4849
4850		read_lock(&journal->j_state_lock);
4851		if (journal->j_running_transaction)
4852			transaction = journal->j_running_transaction;
4853		else
4854			transaction = journal->j_committing_transaction;
4855		if (transaction)
4856			tid = transaction->t_tid;
4857		else
4858			tid = journal->j_commit_sequence;
4859		read_unlock(&journal->j_state_lock);
4860		ei->i_sync_tid = tid;
4861		ei->i_datasync_tid = tid;
4862	}
4863
4864	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4865		if (ei->i_extra_isize == 0) {
4866			/* The extra space is currently unused. Use it. */
4867			BUILD_BUG_ON(sizeof(struct ext4_inode) & 3);
4868			ei->i_extra_isize = sizeof(struct ext4_inode) -
4869					    EXT4_GOOD_OLD_INODE_SIZE;
4870		} else {
4871			ret = ext4_iget_extra_inode(inode, raw_inode, ei);
4872			if (ret)
4873				goto bad_inode;
4874		}
4875	}
4876
4877	EXT4_INODE_GET_CTIME(inode, raw_inode);
4878	EXT4_INODE_GET_ATIME(inode, raw_inode);
4879	EXT4_INODE_GET_MTIME(inode, raw_inode);
4880	EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
4881
4882	if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
4883		u64 ivers = le32_to_cpu(raw_inode->i_disk_version);
4884
4885		if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4886			if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
4887				ivers |=
4888		    (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
4889		}
4890		ext4_inode_set_iversion_queried(inode, ivers);
4891	}
4892
4893	ret = 0;
4894	if (ei->i_file_acl &&
4895	    !ext4_inode_block_valid(inode, ei->i_file_acl, 1)) {
4896		ext4_error_inode(inode, function, line, 0,
4897				 "iget: bad extended attribute block %llu",
4898				 ei->i_file_acl);
4899		ret = -EFSCORRUPTED;
4900		goto bad_inode;
4901	} else if (!ext4_has_inline_data(inode)) {
4902		/* validate the block references in the inode */
4903		if (!(EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY) &&
4904			(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
4905			(S_ISLNK(inode->i_mode) &&
4906			!ext4_inode_is_fast_symlink(inode)))) {
4907			if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4908				ret = ext4_ext_check_inode(inode);
4909			else
4910				ret = ext4_ind_check_inode(inode);
4911		}
4912	}
4913	if (ret)
4914		goto bad_inode;
4915
4916	if (S_ISREG(inode->i_mode)) {
4917		inode->i_op = &ext4_file_inode_operations;
4918		inode->i_fop = &ext4_file_operations;
4919		ext4_set_aops(inode);
4920	} else if (S_ISDIR(inode->i_mode)) {
4921		inode->i_op = &ext4_dir_inode_operations;
4922		inode->i_fop = &ext4_dir_operations;
4923	} else if (S_ISLNK(inode->i_mode)) {
4924		/* VFS does not allow setting these so must be corruption */
4925		if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) {
4926			ext4_error_inode(inode, function, line, 0,
4927					 "iget: immutable or append flags "
4928					 "not allowed on symlinks");
4929			ret = -EFSCORRUPTED;
4930			goto bad_inode;
4931		}
4932		if (IS_ENCRYPTED(inode)) {
4933			inode->i_op = &ext4_encrypted_symlink_inode_operations;
 
4934		} else if (ext4_inode_is_fast_symlink(inode)) {
4935			inode->i_link = (char *)ei->i_data;
4936			inode->i_op = &ext4_fast_symlink_inode_operations;
4937			nd_terminate_link(ei->i_data, inode->i_size,
4938				sizeof(ei->i_data) - 1);
4939		} else {
4940			inode->i_op = &ext4_symlink_inode_operations;
 
4941		}
 
4942	} else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
4943	      S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
4944		inode->i_op = &ext4_special_inode_operations;
4945		if (raw_inode->i_block[0])
4946			init_special_inode(inode, inode->i_mode,
4947			   old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
4948		else
4949			init_special_inode(inode, inode->i_mode,
4950			   new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
4951	} else if (ino == EXT4_BOOT_LOADER_INO) {
4952		make_bad_inode(inode);
4953	} else {
4954		ret = -EFSCORRUPTED;
4955		ext4_error_inode(inode, function, line, 0,
4956				 "iget: bogus i_mode (%o)", inode->i_mode);
4957		goto bad_inode;
4958	}
4959	if (IS_CASEFOLDED(inode) && !ext4_has_feature_casefold(inode->i_sb)) {
4960		ext4_error_inode(inode, function, line, 0,
4961				 "casefold flag without casefold feature");
4962		ret = -EFSCORRUPTED;
4963		goto bad_inode;
4964	}
4965	if ((err_str = check_igot_inode(inode, flags)) != NULL) {
4966		ext4_error_inode(inode, function, line, 0, err_str);
4967		ret = -EFSCORRUPTED;
4968		goto bad_inode;
4969	}
4970
4971	brelse(iloc.bh);
4972	unlock_new_inode(inode);
4973	return inode;
4974
4975bad_inode:
4976	brelse(iloc.bh);
4977	iget_failed(inode);
4978	return ERR_PTR(ret);
4979}
4980
4981static void __ext4_update_other_inode_time(struct super_block *sb,
4982					   unsigned long orig_ino,
4983					   unsigned long ino,
4984					   struct ext4_inode *raw_inode)
4985{
4986	struct inode *inode;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4987
4988	inode = find_inode_by_ino_rcu(sb, ino);
4989	if (!inode)
4990		return;
 
4991
4992	if (!inode_is_dirtytime_only(inode))
4993		return;
 
 
4994
 
 
 
 
 
4995	spin_lock(&inode->i_lock);
4996	if (inode_is_dirtytime_only(inode)) {
 
 
4997		struct ext4_inode_info	*ei = EXT4_I(inode);
4998
4999		inode->i_state &= ~I_DIRTY_TIME;
5000		spin_unlock(&inode->i_lock);
5001
5002		spin_lock(&ei->i_raw_lock);
5003		EXT4_INODE_SET_CTIME(inode, raw_inode);
5004		EXT4_INODE_SET_MTIME(inode, raw_inode);
5005		EXT4_INODE_SET_ATIME(inode, raw_inode);
5006		ext4_inode_csum_set(inode, raw_inode, ei);
5007		spin_unlock(&ei->i_raw_lock);
5008		trace_ext4_other_inode_update_time(inode, orig_ino);
5009		return;
5010	}
5011	spin_unlock(&inode->i_lock);
 
5012}
5013
5014/*
5015 * Opportunistically update the other time fields for other inodes in
5016 * the same inode table block.
5017 */
5018static void ext4_update_other_inodes_time(struct super_block *sb,
5019					  unsigned long orig_ino, char *buf)
5020{
 
5021	unsigned long ino;
5022	int i, inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
5023	int inode_size = EXT4_INODE_SIZE(sb);
5024
 
5025	/*
5026	 * Calculate the first inode in the inode table block.  Inode
5027	 * numbers are one-based.  That is, the first inode in a block
5028	 * (assuming 4k blocks and 256 byte inodes) is (n*16 + 1).
5029	 */
5030	ino = ((orig_ino - 1) & ~(inodes_per_block - 1)) + 1;
5031	rcu_read_lock();
5032	for (i = 0; i < inodes_per_block; i++, ino++, buf += inode_size) {
5033		if (ino == orig_ino)
5034			continue;
5035		__ext4_update_other_inode_time(sb, orig_ino, ino,
5036					       (struct ext4_inode *)buf);
5037	}
5038	rcu_read_unlock();
5039}
5040
5041/*
5042 * Post the struct inode info into an on-disk inode location in the
5043 * buffer-cache.  This gobbles the caller's reference to the
5044 * buffer_head in the inode location struct.
5045 *
5046 * The caller must have write access to iloc->bh.
5047 */
5048static int ext4_do_update_inode(handle_t *handle,
5049				struct inode *inode,
5050				struct ext4_iloc *iloc)
5051{
5052	struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
5053	struct ext4_inode_info *ei = EXT4_I(inode);
5054	struct buffer_head *bh = iloc->bh;
5055	struct super_block *sb = inode->i_sb;
5056	int err;
5057	int need_datasync = 0, set_large_file = 0;
 
 
 
5058
5059	spin_lock(&ei->i_raw_lock);
5060
5061	/*
5062	 * For fields not tracked in the in-memory inode, initialise them
5063	 * to zero for new inodes.
5064	 */
5065	if (ext4_test_inode_state(inode, EXT4_STATE_NEW))
5066		memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
5067
5068	if (READ_ONCE(ei->i_disksize) != ext4_isize(inode->i_sb, raw_inode))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5069		need_datasync = 1;
 
5070	if (ei->i_disksize > 0x7fffffffULL) {
5071		if (!ext4_has_feature_large_file(sb) ||
5072		    EXT4_SB(sb)->s_es->s_rev_level == cpu_to_le32(EXT4_GOOD_OLD_REV))
 
5073			set_large_file = 1;
5074	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5075
5076	err = ext4_fill_raw_inode(inode, raw_inode);
5077	spin_unlock(&ei->i_raw_lock);
5078	if (err) {
5079		EXT4_ERROR_INODE(inode, "corrupted inode contents");
5080		goto out_brelse;
 
 
 
5081	}
5082
 
 
 
 
 
 
 
 
 
5083	if (inode->i_sb->s_flags & SB_LAZYTIME)
5084		ext4_update_other_inodes_time(inode->i_sb, inode->i_ino,
5085					      bh->b_data);
5086
5087	BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
5088	err = ext4_handle_dirty_metadata(handle, NULL, bh);
5089	if (err)
5090		goto out_error;
5091	ext4_clear_inode_state(inode, EXT4_STATE_NEW);
5092	if (set_large_file) {
5093		BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get write access");
5094		err = ext4_journal_get_write_access(handle, sb,
5095						    EXT4_SB(sb)->s_sbh,
5096						    EXT4_JTR_NONE);
5097		if (err)
5098			goto out_error;
5099		lock_buffer(EXT4_SB(sb)->s_sbh);
5100		ext4_set_feature_large_file(sb);
5101		ext4_superblock_csum_set(sb);
5102		unlock_buffer(EXT4_SB(sb)->s_sbh);
5103		ext4_handle_sync(handle);
5104		err = ext4_handle_dirty_metadata(handle, NULL,
5105						 EXT4_SB(sb)->s_sbh);
5106	}
5107	ext4_update_inode_fsync_trans(handle, inode, need_datasync);
5108out_error:
5109	ext4_std_error(inode->i_sb, err);
5110out_brelse:
5111	brelse(bh);
 
5112	return err;
5113}
5114
5115/*
5116 * ext4_write_inode()
5117 *
5118 * We are called from a few places:
5119 *
5120 * - Within generic_file_aio_write() -> generic_write_sync() for O_SYNC files.
5121 *   Here, there will be no transaction running. We wait for any running
5122 *   transaction to commit.
5123 *
5124 * - Within flush work (sys_sync(), kupdate and such).
5125 *   We wait on commit, if told to.
5126 *
5127 * - Within iput_final() -> write_inode_now()
5128 *   We wait on commit, if told to.
5129 *
5130 * In all cases it is actually safe for us to return without doing anything,
5131 * because the inode has been copied into a raw inode buffer in
5132 * ext4_mark_inode_dirty().  This is a correctness thing for WB_SYNC_ALL
5133 * writeback.
5134 *
5135 * Note that we are absolutely dependent upon all inode dirtiers doing the
5136 * right thing: they *must* call mark_inode_dirty() after dirtying info in
5137 * which we are interested.
5138 *
5139 * It would be a bug for them to not do this.  The code:
5140 *
5141 *	mark_inode_dirty(inode)
5142 *	stuff();
5143 *	inode->i_size = expr;
5144 *
5145 * is in error because write_inode() could occur while `stuff()' is running,
5146 * and the new i_size will be lost.  Plus the inode will no longer be on the
5147 * superblock's dirty inode list.
5148 */
5149int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
5150{
5151	int err;
5152
5153	if (WARN_ON_ONCE(current->flags & PF_MEMALLOC))
 
5154		return 0;
5155
5156	if (unlikely(ext4_forced_shutdown(inode->i_sb)))
5157		return -EIO;
5158
5159	if (EXT4_SB(inode->i_sb)->s_journal) {
5160		if (ext4_journal_current_handle()) {
5161			ext4_debug("called recursively, non-PF_MEMALLOC!\n");
5162			dump_stack();
5163			return -EIO;
5164		}
5165
5166		/*
5167		 * No need to force transaction in WB_SYNC_NONE mode. Also
5168		 * ext4_sync_fs() will force the commit after everything is
5169		 * written.
5170		 */
5171		if (wbc->sync_mode != WB_SYNC_ALL || wbc->for_sync)
5172			return 0;
5173
5174		err = ext4_fc_commit(EXT4_SB(inode->i_sb)->s_journal,
5175						EXT4_I(inode)->i_sync_tid);
5176	} else {
5177		struct ext4_iloc iloc;
5178
5179		err = __ext4_get_inode_loc_noinmem(inode, &iloc);
5180		if (err)
5181			return err;
5182		/*
5183		 * sync(2) will flush the whole buffer cache. No need to do
5184		 * it here separately for each inode.
5185		 */
5186		if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
5187			sync_dirty_buffer(iloc.bh);
5188		if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
5189			ext4_error_inode_block(inode, iloc.bh->b_blocknr, EIO,
5190					       "IO error syncing inode");
5191			err = -EIO;
5192		}
5193		brelse(iloc.bh);
5194	}
5195	return err;
5196}
5197
5198/*
5199 * In data=journal mode ext4_journalled_invalidate_folio() may fail to invalidate
5200 * buffers that are attached to a folio straddling i_size and are undergoing
5201 * commit. In that case we have to wait for commit to finish and try again.
5202 */
5203static void ext4_wait_for_tail_page_commit(struct inode *inode)
5204{
 
5205	unsigned offset;
5206	journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
5207	tid_t commit_tid = 0;
5208	int ret;
5209
5210	offset = inode->i_size & (PAGE_SIZE - 1);
5211	/*
5212	 * If the folio is fully truncated, we don't need to wait for any commit
5213	 * (and we even should not as __ext4_journalled_invalidate_folio() may
5214	 * strip all buffers from the folio but keep the folio dirty which can then
5215	 * confuse e.g. concurrent ext4_writepages() seeing dirty folio without
5216	 * buffers). Also we don't need to wait for any commit if all buffers in
5217	 * the folio remain valid. This is most beneficial for the common case of
5218	 * blocksize == PAGESIZE.
5219	 */
5220	if (!offset || offset > (PAGE_SIZE - i_blocksize(inode)))
5221		return;
5222	while (1) {
5223		struct folio *folio = filemap_lock_folio(inode->i_mapping,
5224				      inode->i_size >> PAGE_SHIFT);
5225		if (IS_ERR(folio))
5226			return;
5227		ret = __ext4_journalled_invalidate_folio(folio, offset,
5228						folio_size(folio) - offset);
5229		folio_unlock(folio);
5230		folio_put(folio);
5231		if (ret != -EBUSY)
5232			return;
5233		commit_tid = 0;
5234		read_lock(&journal->j_state_lock);
5235		if (journal->j_committing_transaction)
5236			commit_tid = journal->j_committing_transaction->t_tid;
5237		read_unlock(&journal->j_state_lock);
5238		if (commit_tid)
5239			jbd2_log_wait_commit(journal, commit_tid);
5240	}
5241}
5242
5243/*
5244 * ext4_setattr()
5245 *
5246 * Called from notify_change.
5247 *
5248 * We want to trap VFS attempts to truncate the file as soon as
5249 * possible.  In particular, we want to make sure that when the VFS
5250 * shrinks i_size, we put the inode on the orphan list and modify
5251 * i_disksize immediately, so that during the subsequent flushing of
5252 * dirty pages and freeing of disk blocks, we can guarantee that any
5253 * commit will leave the blocks being flushed in an unused state on
5254 * disk.  (On recovery, the inode will get truncated and the blocks will
5255 * be freed, so we have a strong guarantee that no future commit will
5256 * leave these blocks visible to the user.)
5257 *
5258 * Another thing we have to assure is that if we are in ordered mode
5259 * and inode is still attached to the committing transaction, we must
5260 * we start writeout of all the dirty pages which are being truncated.
5261 * This way we are sure that all the data written in the previous
5262 * transaction are already on disk (truncate waits for pages under
5263 * writeback).
5264 *
5265 * Called with inode->i_rwsem down.
5266 */
5267int ext4_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
5268		 struct iattr *attr)
5269{
5270	struct inode *inode = d_inode(dentry);
5271	int error, rc = 0;
5272	int orphan = 0;
5273	const unsigned int ia_valid = attr->ia_valid;
5274	bool inc_ivers = true;
5275
5276	if (unlikely(ext4_forced_shutdown(inode->i_sb)))
5277		return -EIO;
5278
5279	if (unlikely(IS_IMMUTABLE(inode)))
5280		return -EPERM;
5281
5282	if (unlikely(IS_APPEND(inode) &&
5283		     (ia_valid & (ATTR_MODE | ATTR_UID |
5284				  ATTR_GID | ATTR_TIMES_SET))))
5285		return -EPERM;
5286
5287	error = setattr_prepare(idmap, dentry, attr);
5288	if (error)
5289		return error;
5290
5291	error = fscrypt_prepare_setattr(dentry, attr);
5292	if (error)
5293		return error;
5294
5295	error = fsverity_prepare_setattr(dentry, attr);
5296	if (error)
5297		return error;
5298
5299	if (is_quota_modification(idmap, inode, attr)) {
5300		error = dquot_initialize(inode);
5301		if (error)
5302			return error;
5303	}
5304
5305	if (i_uid_needs_update(idmap, attr, inode) ||
5306	    i_gid_needs_update(idmap, attr, inode)) {
5307		handle_t *handle;
5308
5309		/* (user+group)*(old+new) structure, inode write (sb,
5310		 * inode block, ? - but truncate inode update has it) */
5311		handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
5312			(EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb) +
5313			 EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)) + 3);
5314		if (IS_ERR(handle)) {
5315			error = PTR_ERR(handle);
5316			goto err_out;
5317		}
5318
5319		/* dquot_transfer() calls back ext4_get_inode_usage() which
5320		 * counts xattr inode references.
5321		 */
5322		down_read(&EXT4_I(inode)->xattr_sem);
5323		error = dquot_transfer(idmap, inode, attr);
5324		up_read(&EXT4_I(inode)->xattr_sem);
5325
5326		if (error) {
5327			ext4_journal_stop(handle);
5328			return error;
5329		}
5330		/* Update corresponding info in inode so that everything is in
5331		 * one transaction */
5332		i_uid_update(idmap, attr, inode);
5333		i_gid_update(idmap, attr, inode);
 
 
5334		error = ext4_mark_inode_dirty(handle, inode);
5335		ext4_journal_stop(handle);
5336		if (unlikely(error)) {
5337			return error;
5338		}
5339	}
5340
5341	if (attr->ia_valid & ATTR_SIZE) {
5342		handle_t *handle;
5343		loff_t oldsize = inode->i_size;
5344		loff_t old_disksize;
5345		int shrink = (attr->ia_size < inode->i_size);
5346
5347		if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
5348			struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5349
5350			if (attr->ia_size > sbi->s_bitmap_maxbytes) {
5351				return -EFBIG;
5352			}
5353		}
5354		if (!S_ISREG(inode->i_mode)) {
5355			return -EINVAL;
5356		}
5357
5358		if (attr->ia_size == inode->i_size)
5359			inc_ivers = false;
5360
5361		if (shrink) {
5362			if (ext4_should_order_data(inode)) {
5363				error = ext4_begin_ordered_truncate(inode,
5364							    attr->ia_size);
5365				if (error)
5366					goto err_out;
5367			}
5368			/*
5369			 * Blocks are going to be removed from the inode. Wait
5370			 * for dio in flight.
5371			 */
5372			inode_dio_wait(inode);
5373		}
5374
5375		filemap_invalidate_lock(inode->i_mapping);
5376
5377		rc = ext4_break_layouts(inode);
5378		if (rc) {
5379			filemap_invalidate_unlock(inode->i_mapping);
5380			goto err_out;
5381		}
5382
5383		if (attr->ia_size != inode->i_size) {
5384			handle = ext4_journal_start(inode, EXT4_HT_INODE, 3);
5385			if (IS_ERR(handle)) {
5386				error = PTR_ERR(handle);
5387				goto out_mmap_sem;
5388			}
5389			if (ext4_handle_valid(handle) && shrink) {
5390				error = ext4_orphan_add(handle, inode);
5391				orphan = 1;
5392			}
5393			/*
5394			 * Update c/mtime on truncate up, ext4_truncate() will
5395			 * update c/mtime in shrink case below
5396			 */
5397			if (!shrink)
5398				inode_set_mtime_to_ts(inode,
5399						      inode_set_ctime_current(inode));
5400
5401			if (shrink)
5402				ext4_fc_track_range(handle, inode,
5403					(attr->ia_size > 0 ? attr->ia_size - 1 : 0) >>
5404					inode->i_sb->s_blocksize_bits,
5405					EXT_MAX_BLOCKS - 1);
5406			else
5407				ext4_fc_track_range(
5408					handle, inode,
5409					(oldsize > 0 ? oldsize - 1 : oldsize) >>
5410					inode->i_sb->s_blocksize_bits,
5411					(attr->ia_size > 0 ? attr->ia_size - 1 : 0) >>
5412					inode->i_sb->s_blocksize_bits);
5413
5414			down_write(&EXT4_I(inode)->i_data_sem);
5415			old_disksize = EXT4_I(inode)->i_disksize;
5416			EXT4_I(inode)->i_disksize = attr->ia_size;
5417			rc = ext4_mark_inode_dirty(handle, inode);
5418			if (!error)
5419				error = rc;
5420			/*
5421			 * We have to update i_size under i_data_sem together
5422			 * with i_disksize to avoid races with writeback code
5423			 * running ext4_wb_update_i_disksize().
5424			 */
5425			if (!error)
5426				i_size_write(inode, attr->ia_size);
5427			else
5428				EXT4_I(inode)->i_disksize = old_disksize;
5429			up_write(&EXT4_I(inode)->i_data_sem);
5430			ext4_journal_stop(handle);
5431			if (error)
5432				goto out_mmap_sem;
5433			if (!shrink) {
5434				pagecache_isize_extended(inode, oldsize,
5435							 inode->i_size);
5436			} else if (ext4_should_journal_data(inode)) {
5437				ext4_wait_for_tail_page_commit(inode);
5438			}
5439		}
5440
5441		/*
5442		 * Truncate pagecache after we've waited for commit
5443		 * in data=journal mode to make pages freeable.
5444		 */
5445		truncate_pagecache(inode, inode->i_size);
5446		/*
5447		 * Call ext4_truncate() even if i_size didn't change to
5448		 * truncate possible preallocated blocks.
5449		 */
5450		if (attr->ia_size <= oldsize) {
5451			rc = ext4_truncate(inode);
5452			if (rc)
5453				error = rc;
5454		}
5455out_mmap_sem:
5456		filemap_invalidate_unlock(inode->i_mapping);
5457	}
5458
5459	if (!error) {
5460		if (inc_ivers)
5461			inode_inc_iversion(inode);
5462		setattr_copy(idmap, inode, attr);
5463		mark_inode_dirty(inode);
5464	}
5465
5466	/*
5467	 * If the call to ext4_truncate failed to get a transaction handle at
5468	 * all, we need to clean up the in-core orphan list manually.
5469	 */
5470	if (orphan && inode->i_nlink)
5471		ext4_orphan_del(NULL, inode);
5472
5473	if (!error && (ia_valid & ATTR_MODE))
5474		rc = posix_acl_chmod(idmap, dentry, inode->i_mode);
5475
5476err_out:
5477	if  (error)
5478		ext4_std_error(inode->i_sb, error);
5479	if (!error)
5480		error = rc;
5481	return error;
5482}
5483
5484u32 ext4_dio_alignment(struct inode *inode)
5485{
5486	if (fsverity_active(inode))
5487		return 0;
5488	if (ext4_should_journal_data(inode))
5489		return 0;
5490	if (ext4_has_inline_data(inode))
5491		return 0;
5492	if (IS_ENCRYPTED(inode)) {
5493		if (!fscrypt_dio_supported(inode))
5494			return 0;
5495		return i_blocksize(inode);
5496	}
5497	return 1; /* use the iomap defaults */
5498}
5499
5500int ext4_getattr(struct mnt_idmap *idmap, const struct path *path,
5501		 struct kstat *stat, u32 request_mask, unsigned int query_flags)
5502{
5503	struct inode *inode = d_inode(path->dentry);
5504	struct ext4_inode *raw_inode;
5505	struct ext4_inode_info *ei = EXT4_I(inode);
5506	unsigned int flags;
5507
5508	if ((request_mask & STATX_BTIME) &&
5509	    EXT4_FITS_IN_INODE(raw_inode, ei, i_crtime)) {
5510		stat->result_mask |= STATX_BTIME;
5511		stat->btime.tv_sec = ei->i_crtime.tv_sec;
5512		stat->btime.tv_nsec = ei->i_crtime.tv_nsec;
5513	}
5514
5515	/*
5516	 * Return the DIO alignment restrictions if requested.  We only return
5517	 * this information when requested, since on encrypted files it might
5518	 * take a fair bit of work to get if the file wasn't opened recently.
5519	 */
5520	if ((request_mask & STATX_DIOALIGN) && S_ISREG(inode->i_mode)) {
5521		u32 dio_align = ext4_dio_alignment(inode);
5522
5523		stat->result_mask |= STATX_DIOALIGN;
5524		if (dio_align == 1) {
5525			struct block_device *bdev = inode->i_sb->s_bdev;
5526
5527			/* iomap defaults */
5528			stat->dio_mem_align = bdev_dma_alignment(bdev) + 1;
5529			stat->dio_offset_align = bdev_logical_block_size(bdev);
5530		} else {
5531			stat->dio_mem_align = dio_align;
5532			stat->dio_offset_align = dio_align;
5533		}
5534	}
5535
5536	flags = ei->i_flags & EXT4_FL_USER_VISIBLE;
5537	if (flags & EXT4_APPEND_FL)
5538		stat->attributes |= STATX_ATTR_APPEND;
5539	if (flags & EXT4_COMPR_FL)
5540		stat->attributes |= STATX_ATTR_COMPRESSED;
5541	if (flags & EXT4_ENCRYPT_FL)
5542		stat->attributes |= STATX_ATTR_ENCRYPTED;
5543	if (flags & EXT4_IMMUTABLE_FL)
5544		stat->attributes |= STATX_ATTR_IMMUTABLE;
5545	if (flags & EXT4_NODUMP_FL)
5546		stat->attributes |= STATX_ATTR_NODUMP;
5547	if (flags & EXT4_VERITY_FL)
5548		stat->attributes |= STATX_ATTR_VERITY;
5549
5550	stat->attributes_mask |= (STATX_ATTR_APPEND |
5551				  STATX_ATTR_COMPRESSED |
5552				  STATX_ATTR_ENCRYPTED |
5553				  STATX_ATTR_IMMUTABLE |
5554				  STATX_ATTR_NODUMP |
5555				  STATX_ATTR_VERITY);
5556
5557	generic_fillattr(idmap, request_mask, inode, stat);
5558	return 0;
5559}
5560
5561int ext4_file_getattr(struct mnt_idmap *idmap,
5562		      const struct path *path, struct kstat *stat,
5563		      u32 request_mask, unsigned int query_flags)
5564{
5565	struct inode *inode = d_inode(path->dentry);
5566	u64 delalloc_blocks;
5567
5568	ext4_getattr(idmap, path, stat, request_mask, query_flags);
5569
5570	/*
5571	 * If there is inline data in the inode, the inode will normally not
5572	 * have data blocks allocated (it may have an external xattr block).
5573	 * Report at least one sector for such files, so tools like tar, rsync,
5574	 * others don't incorrectly think the file is completely sparse.
5575	 */
5576	if (unlikely(ext4_has_inline_data(inode)))
5577		stat->blocks += (stat->size + 511) >> 9;
5578
5579	/*
5580	 * We can't update i_blocks if the block allocation is delayed
5581	 * otherwise in the case of system crash before the real block
5582	 * allocation is done, we will have i_blocks inconsistent with
5583	 * on-disk file blocks.
5584	 * We always keep i_blocks updated together with real
5585	 * allocation. But to not confuse with user, stat
5586	 * will return the blocks that include the delayed allocation
5587	 * blocks for this file.
5588	 */
5589	delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb),
5590				   EXT4_I(inode)->i_reserved_data_blocks);
5591	stat->blocks += delalloc_blocks << (inode->i_sb->s_blocksize_bits - 9);
5592	return 0;
5593}
5594
5595static int ext4_index_trans_blocks(struct inode *inode, int lblocks,
5596				   int pextents)
5597{
5598	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
5599		return ext4_ind_trans_blocks(inode, lblocks);
5600	return ext4_ext_index_trans_blocks(inode, pextents);
5601}
5602
5603/*
5604 * Account for index blocks, block groups bitmaps and block group
5605 * descriptor blocks if modify datablocks and index blocks
5606 * worse case, the indexs blocks spread over different block groups
5607 *
5608 * If datablocks are discontiguous, they are possible to spread over
5609 * different block groups too. If they are contiguous, with flexbg,
5610 * they could still across block group boundary.
5611 *
5612 * Also account for superblock, inode, quota and xattr blocks
5613 */
5614static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
5615				  int pextents)
5616{
5617	ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb);
5618	int gdpblocks;
5619	int idxblocks;
5620	int ret;
5621
5622	/*
5623	 * How many index blocks need to touch to map @lblocks logical blocks
5624	 * to @pextents physical extents?
5625	 */
5626	idxblocks = ext4_index_trans_blocks(inode, lblocks, pextents);
5627
5628	ret = idxblocks;
5629
5630	/*
5631	 * Now let's see how many group bitmaps and group descriptors need
5632	 * to account
5633	 */
5634	groups = idxblocks + pextents;
5635	gdpblocks = groups;
5636	if (groups > ngroups)
5637		groups = ngroups;
5638	if (groups > EXT4_SB(inode->i_sb)->s_gdb_count)
5639		gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count;
5640
5641	/* bitmaps and block group descriptor blocks */
5642	ret += groups + gdpblocks;
5643
5644	/* Blocks for super block, inode, quota and xattr blocks */
5645	ret += EXT4_META_TRANS_BLOCKS(inode->i_sb);
5646
5647	return ret;
5648}
5649
5650/*
5651 * Calculate the total number of credits to reserve to fit
5652 * the modification of a single pages into a single transaction,
5653 * which may include multiple chunks of block allocations.
5654 *
5655 * This could be called via ext4_write_begin()
5656 *
5657 * We need to consider the worse case, when
5658 * one new block per extent.
5659 */
5660int ext4_writepage_trans_blocks(struct inode *inode)
5661{
5662	int bpp = ext4_journal_blocks_per_page(inode);
5663	int ret;
5664
5665	ret = ext4_meta_trans_blocks(inode, bpp, bpp);
5666
5667	/* Account for data blocks for journalled mode */
5668	if (ext4_should_journal_data(inode))
5669		ret += bpp;
5670	return ret;
5671}
5672
5673/*
5674 * Calculate the journal credits for a chunk of data modification.
5675 *
5676 * This is called from DIO, fallocate or whoever calling
5677 * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks.
5678 *
5679 * journal buffers for data blocks are not included here, as DIO
5680 * and fallocate do no need to journal data buffers.
5681 */
5682int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks)
5683{
5684	return ext4_meta_trans_blocks(inode, nrblocks, 1);
5685}
5686
5687/*
5688 * The caller must have previously called ext4_reserve_inode_write().
5689 * Give this, we know that the caller already has write access to iloc->bh.
5690 */
5691int ext4_mark_iloc_dirty(handle_t *handle,
5692			 struct inode *inode, struct ext4_iloc *iloc)
5693{
5694	int err = 0;
5695
5696	if (unlikely(ext4_forced_shutdown(inode->i_sb))) {
5697		put_bh(iloc->bh);
5698		return -EIO;
5699	}
5700	ext4_fc_track_inode(handle, inode);
 
5701
5702	/* the do_update_inode consumes one bh->b_count */
5703	get_bh(iloc->bh);
5704
5705	/* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
5706	err = ext4_do_update_inode(handle, inode, iloc);
5707	put_bh(iloc->bh);
5708	return err;
5709}
5710
5711/*
5712 * On success, We end up with an outstanding reference count against
5713 * iloc->bh.  This _must_ be cleaned up later.
5714 */
5715
5716int
5717ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
5718			 struct ext4_iloc *iloc)
5719{
5720	int err;
5721
5722	if (unlikely(ext4_forced_shutdown(inode->i_sb)))
5723		return -EIO;
5724
5725	err = ext4_get_inode_loc(inode, iloc);
5726	if (!err) {
5727		BUFFER_TRACE(iloc->bh, "get_write_access");
5728		err = ext4_journal_get_write_access(handle, inode->i_sb,
5729						    iloc->bh, EXT4_JTR_NONE);
5730		if (err) {
5731			brelse(iloc->bh);
5732			iloc->bh = NULL;
5733		}
5734	}
5735	ext4_std_error(inode->i_sb, err);
5736	return err;
5737}
5738
5739static int __ext4_expand_extra_isize(struct inode *inode,
5740				     unsigned int new_extra_isize,
5741				     struct ext4_iloc *iloc,
5742				     handle_t *handle, int *no_expand)
5743{
5744	struct ext4_inode *raw_inode;
5745	struct ext4_xattr_ibody_header *header;
5746	unsigned int inode_size = EXT4_INODE_SIZE(inode->i_sb);
5747	struct ext4_inode_info *ei = EXT4_I(inode);
5748	int error;
5749
5750	/* this was checked at iget time, but double check for good measure */
5751	if ((EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > inode_size) ||
5752	    (ei->i_extra_isize & 3)) {
5753		EXT4_ERROR_INODE(inode, "bad extra_isize %u (inode size %u)",
5754				 ei->i_extra_isize,
5755				 EXT4_INODE_SIZE(inode->i_sb));
5756		return -EFSCORRUPTED;
5757	}
5758	if ((new_extra_isize < ei->i_extra_isize) ||
5759	    (new_extra_isize < 4) ||
5760	    (new_extra_isize > inode_size - EXT4_GOOD_OLD_INODE_SIZE))
5761		return -EINVAL;	/* Should never happen */
5762
5763	raw_inode = ext4_raw_inode(iloc);
5764
5765	header = IHDR(inode, raw_inode);
5766
5767	/* No extended attributes present */
5768	if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
5769	    header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
5770		memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE +
5771		       EXT4_I(inode)->i_extra_isize, 0,
5772		       new_extra_isize - EXT4_I(inode)->i_extra_isize);
5773		EXT4_I(inode)->i_extra_isize = new_extra_isize;
5774		return 0;
5775	}
5776
5777	/*
5778	 * We may need to allocate external xattr block so we need quotas
5779	 * initialized. Here we can be called with various locks held so we
5780	 * cannot affort to initialize quotas ourselves. So just bail.
5781	 */
5782	if (dquot_initialize_needed(inode))
5783		return -EAGAIN;
5784
5785	/* try to expand with EAs present */
5786	error = ext4_expand_extra_isize_ea(inode, new_extra_isize,
5787					   raw_inode, handle);
5788	if (error) {
5789		/*
5790		 * Inode size expansion failed; don't try again
5791		 */
5792		*no_expand = 1;
5793	}
5794
5795	return error;
5796}
5797
5798/*
5799 * Expand an inode by new_extra_isize bytes.
5800 * Returns 0 on success or negative error number on failure.
5801 */
5802static int ext4_try_to_expand_extra_isize(struct inode *inode,
5803					  unsigned int new_extra_isize,
5804					  struct ext4_iloc iloc,
5805					  handle_t *handle)
5806{
5807	int no_expand;
5808	int error;
5809
5810	if (ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND))
5811		return -EOVERFLOW;
5812
5813	/*
5814	 * In nojournal mode, we can immediately attempt to expand
5815	 * the inode.  When journaled, we first need to obtain extra
5816	 * buffer credits since we may write into the EA block
5817	 * with this same handle. If journal_extend fails, then it will
5818	 * only result in a minor loss of functionality for that inode.
5819	 * If this is felt to be critical, then e2fsck should be run to
5820	 * force a large enough s_min_extra_isize.
5821	 */
5822	if (ext4_journal_extend(handle,
5823				EXT4_DATA_TRANS_BLOCKS(inode->i_sb), 0) != 0)
 
5824		return -ENOSPC;
5825
5826	if (ext4_write_trylock_xattr(inode, &no_expand) == 0)
5827		return -EBUSY;
5828
5829	error = __ext4_expand_extra_isize(inode, new_extra_isize, &iloc,
5830					  handle, &no_expand);
5831	ext4_write_unlock_xattr(inode, &no_expand);
5832
5833	return error;
5834}
5835
5836int ext4_expand_extra_isize(struct inode *inode,
5837			    unsigned int new_extra_isize,
5838			    struct ext4_iloc *iloc)
5839{
5840	handle_t *handle;
5841	int no_expand;
5842	int error, rc;
5843
5844	if (ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
5845		brelse(iloc->bh);
5846		return -EOVERFLOW;
5847	}
5848
5849	handle = ext4_journal_start(inode, EXT4_HT_INODE,
5850				    EXT4_DATA_TRANS_BLOCKS(inode->i_sb));
5851	if (IS_ERR(handle)) {
5852		error = PTR_ERR(handle);
5853		brelse(iloc->bh);
5854		return error;
5855	}
5856
5857	ext4_write_lock_xattr(inode, &no_expand);
5858
5859	BUFFER_TRACE(iloc->bh, "get_write_access");
5860	error = ext4_journal_get_write_access(handle, inode->i_sb, iloc->bh,
5861					      EXT4_JTR_NONE);
5862	if (error) {
5863		brelse(iloc->bh);
5864		goto out_unlock;
5865	}
5866
5867	error = __ext4_expand_extra_isize(inode, new_extra_isize, iloc,
5868					  handle, &no_expand);
5869
5870	rc = ext4_mark_iloc_dirty(handle, inode, iloc);
5871	if (!error)
5872		error = rc;
5873
5874out_unlock:
5875	ext4_write_unlock_xattr(inode, &no_expand);
 
5876	ext4_journal_stop(handle);
5877	return error;
5878}
5879
5880/*
5881 * What we do here is to mark the in-core inode as clean with respect to inode
5882 * dirtiness (it may still be data-dirty).
5883 * This means that the in-core inode may be reaped by prune_icache
5884 * without having to perform any I/O.  This is a very good thing,
5885 * because *any* task may call prune_icache - even ones which
5886 * have a transaction open against a different journal.
5887 *
5888 * Is this cheating?  Not really.  Sure, we haven't written the
5889 * inode out, but prune_icache isn't a user-visible syncing function.
5890 * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
5891 * we start and wait on commits.
5892 */
5893int __ext4_mark_inode_dirty(handle_t *handle, struct inode *inode,
5894				const char *func, unsigned int line)
5895{
5896	struct ext4_iloc iloc;
5897	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5898	int err;
5899
5900	might_sleep();
5901	trace_ext4_mark_inode_dirty(inode, _RET_IP_);
5902	err = ext4_reserve_inode_write(handle, inode, &iloc);
5903	if (err)
5904		goto out;
5905
5906	if (EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize)
5907		ext4_try_to_expand_extra_isize(inode, sbi->s_want_extra_isize,
5908					       iloc, handle);
5909
5910	err = ext4_mark_iloc_dirty(handle, inode, &iloc);
5911out:
5912	if (unlikely(err))
5913		ext4_error_inode_err(inode, func, line, 0, err,
5914					"mark_inode_dirty error");
5915	return err;
5916}
5917
5918/*
5919 * ext4_dirty_inode() is called from __mark_inode_dirty()
5920 *
5921 * We're really interested in the case where a file is being extended.
5922 * i_size has been changed by generic_commit_write() and we thus need
5923 * to include the updated inode in the current transaction.
5924 *
5925 * Also, dquot_alloc_block() will always dirty the inode when blocks
5926 * are allocated to the file.
5927 *
5928 * If the inode is marked synchronous, we don't honour that here - doing
5929 * so would cause a commit on atime updates, which we don't bother doing.
5930 * We handle synchronous inodes at the highest possible level.
 
 
 
 
5931 */
5932void ext4_dirty_inode(struct inode *inode, int flags)
5933{
5934	handle_t *handle;
5935
 
 
5936	handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
5937	if (IS_ERR(handle))
5938		return;
 
5939	ext4_mark_inode_dirty(handle, inode);
 
5940	ext4_journal_stop(handle);
 
 
5941}
5942
5943int ext4_change_inode_journal_flag(struct inode *inode, int val)
5944{
5945	journal_t *journal;
5946	handle_t *handle;
5947	int err;
5948	int alloc_ctx;
5949
5950	/*
5951	 * We have to be very careful here: changing a data block's
5952	 * journaling status dynamically is dangerous.  If we write a
5953	 * data block to the journal, change the status and then delete
5954	 * that block, we risk forgetting to revoke the old log record
5955	 * from the journal and so a subsequent replay can corrupt data.
5956	 * So, first we make sure that the journal is empty and that
5957	 * nobody is changing anything.
5958	 */
5959
5960	journal = EXT4_JOURNAL(inode);
5961	if (!journal)
5962		return 0;
5963	if (is_journal_aborted(journal))
5964		return -EROFS;
5965
5966	/* Wait for all existing dio workers */
5967	inode_dio_wait(inode);
5968
5969	/*
5970	 * Before flushing the journal and switching inode's aops, we have
5971	 * to flush all dirty data the inode has. There can be outstanding
5972	 * delayed allocations, there can be unwritten extents created by
5973	 * fallocate or buffered writes in dioread_nolock mode covered by
5974	 * dirty data which can be converted only after flushing the dirty
5975	 * data (and journalled aops don't know how to handle these cases).
5976	 */
5977	if (val) {
5978		filemap_invalidate_lock(inode->i_mapping);
5979		err = filemap_write_and_wait(inode->i_mapping);
5980		if (err < 0) {
5981			filemap_invalidate_unlock(inode->i_mapping);
5982			return err;
5983		}
5984	}
5985
5986	alloc_ctx = ext4_writepages_down_write(inode->i_sb);
5987	jbd2_journal_lock_updates(journal);
5988
5989	/*
5990	 * OK, there are no updates running now, and all cached data is
5991	 * synced to disk.  We are now in a completely consistent state
5992	 * which doesn't have anything in the journal, and we know that
5993	 * no filesystem updates are running, so it is safe to modify
5994	 * the inode's in-core data-journaling state flag now.
5995	 */
5996
5997	if (val)
5998		ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
5999	else {
6000		err = jbd2_journal_flush(journal, 0);
6001		if (err < 0) {
6002			jbd2_journal_unlock_updates(journal);
6003			ext4_writepages_up_write(inode->i_sb, alloc_ctx);
6004			return err;
6005		}
6006		ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
6007	}
6008	ext4_set_aops(inode);
6009
6010	jbd2_journal_unlock_updates(journal);
6011	ext4_writepages_up_write(inode->i_sb, alloc_ctx);
6012
6013	if (val)
6014		filemap_invalidate_unlock(inode->i_mapping);
6015
6016	/* Finally we can mark the inode as dirty. */
6017
6018	handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
6019	if (IS_ERR(handle))
6020		return PTR_ERR(handle);
6021
6022	ext4_fc_mark_ineligible(inode->i_sb,
6023		EXT4_FC_REASON_JOURNAL_FLAG_CHANGE, handle);
6024	err = ext4_mark_inode_dirty(handle, inode);
6025	ext4_handle_sync(handle);
6026	ext4_journal_stop(handle);
6027	ext4_std_error(inode->i_sb, err);
6028
6029	return err;
6030}
6031
6032static int ext4_bh_unmapped(handle_t *handle, struct inode *inode,
6033			    struct buffer_head *bh)
6034{
6035	return !buffer_mapped(bh);
6036}
6037
6038vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf)
6039{
6040	struct vm_area_struct *vma = vmf->vma;
6041	struct folio *folio = page_folio(vmf->page);
6042	loff_t size;
6043	unsigned long len;
6044	int err;
6045	vm_fault_t ret;
6046	struct file *file = vma->vm_file;
6047	struct inode *inode = file_inode(file);
6048	struct address_space *mapping = inode->i_mapping;
6049	handle_t *handle;
6050	get_block_t *get_block;
6051	int retries = 0;
6052
6053	if (unlikely(IS_IMMUTABLE(inode)))
6054		return VM_FAULT_SIGBUS;
6055
6056	sb_start_pagefault(inode->i_sb);
6057	file_update_time(vma->vm_file);
6058
6059	filemap_invalidate_lock_shared(mapping);
6060
6061	err = ext4_convert_inline_data(inode);
6062	if (err)
6063		goto out_ret;
6064
6065	/*
6066	 * On data journalling we skip straight to the transaction handle:
6067	 * there's no delalloc; page truncated will be checked later; the
6068	 * early return w/ all buffers mapped (calculates size/len) can't
6069	 * be used; and there's no dioread_nolock, so only ext4_get_block.
6070	 */
6071	if (ext4_should_journal_data(inode))
6072		goto retry_alloc;
6073
6074	/* Delalloc case is easy... */
6075	if (test_opt(inode->i_sb, DELALLOC) &&
 
6076	    !ext4_nonda_switch(inode->i_sb)) {
6077		do {
6078			err = block_page_mkwrite(vma, vmf,
6079						   ext4_da_get_block_prep);
6080		} while (err == -ENOSPC &&
6081		       ext4_should_retry_alloc(inode->i_sb, &retries));
6082		goto out_ret;
6083	}
6084
6085	folio_lock(folio);
6086	size = i_size_read(inode);
6087	/* Page got truncated from under us? */
6088	if (folio->mapping != mapping || folio_pos(folio) > size) {
6089		folio_unlock(folio);
6090		ret = VM_FAULT_NOPAGE;
6091		goto out;
6092	}
6093
6094	len = folio_size(folio);
6095	if (folio_pos(folio) + len > size)
6096		len = size - folio_pos(folio);
 
6097	/*
6098	 * Return if we have all the buffers mapped. This avoids the need to do
6099	 * journal_start/journal_stop which can block and take a long time
6100	 *
6101	 * This cannot be done for data journalling, as we have to add the
6102	 * inode to the transaction's list to writeprotect pages on commit.
6103	 */
6104	if (folio_buffers(folio)) {
6105		if (!ext4_walk_page_buffers(NULL, inode, folio_buffers(folio),
6106					    0, len, NULL,
6107					    ext4_bh_unmapped)) {
6108			/* Wait so that we don't change page under IO */
6109			folio_wait_stable(folio);
6110			ret = VM_FAULT_LOCKED;
6111			goto out;
6112		}
6113	}
6114	folio_unlock(folio);
6115	/* OK, we need to fill the hole... */
6116	if (ext4_should_dioread_nolock(inode))
6117		get_block = ext4_get_block_unwritten;
6118	else
6119		get_block = ext4_get_block;
6120retry_alloc:
6121	handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
6122				    ext4_writepage_trans_blocks(inode));
6123	if (IS_ERR(handle)) {
6124		ret = VM_FAULT_SIGBUS;
6125		goto out;
6126	}
6127	/*
6128	 * Data journalling can't use block_page_mkwrite() because it
6129	 * will set_buffer_dirty() before do_journal_get_write_access()
6130	 * thus might hit warning messages for dirty metadata buffers.
6131	 */
6132	if (!ext4_should_journal_data(inode)) {
6133		err = block_page_mkwrite(vma, vmf, get_block);
6134	} else {
6135		folio_lock(folio);
6136		size = i_size_read(inode);
6137		/* Page got truncated from under us? */
6138		if (folio->mapping != mapping || folio_pos(folio) > size) {
6139			ret = VM_FAULT_NOPAGE;
6140			goto out_error;
6141		}
6142
6143		len = folio_size(folio);
6144		if (folio_pos(folio) + len > size)
6145			len = size - folio_pos(folio);
6146
6147		err = __block_write_begin(&folio->page, 0, len, ext4_get_block);
6148		if (!err) {
6149			ret = VM_FAULT_SIGBUS;
6150			if (ext4_journal_folio_buffers(handle, folio, len))
6151				goto out_error;
6152		} else {
6153			folio_unlock(folio);
6154		}
 
6155	}
6156	ext4_journal_stop(handle);
6157	if (err == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
6158		goto retry_alloc;
6159out_ret:
6160	ret = vmf_fs_error(err);
6161out:
6162	filemap_invalidate_unlock_shared(mapping);
6163	sb_end_pagefault(inode->i_sb);
6164	return ret;
6165out_error:
6166	folio_unlock(folio);
6167	ext4_journal_stop(handle);
6168	goto out;
 
 
 
 
 
 
 
 
6169}