Linux Audio

Check our new training course

Yocto distribution development and maintenance

Need a Yocto distribution for your embedded project?
Loading...
Note: File does not exist in v3.5.6.
   1/*
   2 * fs/f2fs/data.c
   3 *
   4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
   5 *             http://www.samsung.com/
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 */
  11#include <linux/fs.h>
  12#include <linux/f2fs_fs.h>
  13#include <linux/buffer_head.h>
  14#include <linux/mpage.h>
  15#include <linux/aio.h>
  16#include <linux/writeback.h>
  17#include <linux/backing-dev.h>
  18#include <linux/blkdev.h>
  19#include <linux/bio.h>
  20#include <linux/prefetch.h>
  21
  22#include "f2fs.h"
  23#include "node.h"
  24#include "segment.h"
  25#include <trace/events/f2fs.h>
  26
  27static void f2fs_read_end_io(struct bio *bio, int err)
  28{
  29	struct bio_vec *bvec;
  30	int i;
  31
  32	bio_for_each_segment_all(bvec, bio, i) {
  33		struct page *page = bvec->bv_page;
  34
  35		if (!err) {
  36			SetPageUptodate(page);
  37		} else {
  38			ClearPageUptodate(page);
  39			SetPageError(page);
  40		}
  41		unlock_page(page);
  42	}
  43	bio_put(bio);
  44}
  45
  46static void f2fs_write_end_io(struct bio *bio, int err)
  47{
  48	struct f2fs_sb_info *sbi = bio->bi_private;
  49	struct bio_vec *bvec;
  50	int i;
  51
  52	bio_for_each_segment_all(bvec, bio, i) {
  53		struct page *page = bvec->bv_page;
  54
  55		if (unlikely(err)) {
  56			SetPageError(page);
  57			set_bit(AS_EIO, &page->mapping->flags);
  58			f2fs_stop_checkpoint(sbi);
  59		}
  60		end_page_writeback(page);
  61		dec_page_count(sbi, F2FS_WRITEBACK);
  62	}
  63
  64	if (sbi->wait_io) {
  65		complete(sbi->wait_io);
  66		sbi->wait_io = NULL;
  67	}
  68
  69	if (!get_pages(sbi, F2FS_WRITEBACK) &&
  70			!list_empty(&sbi->cp_wait.task_list))
  71		wake_up(&sbi->cp_wait);
  72
  73	bio_put(bio);
  74}
  75
  76/*
  77 * Low-level block read/write IO operations.
  78 */
  79static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
  80				int npages, bool is_read)
  81{
  82	struct bio *bio;
  83
  84	/* No failure on bio allocation */
  85	bio = bio_alloc(GFP_NOIO, npages);
  86
  87	bio->bi_bdev = sbi->sb->s_bdev;
  88	bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
  89	bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
  90	bio->bi_private = sbi;
  91
  92	return bio;
  93}
  94
  95static void __submit_merged_bio(struct f2fs_bio_info *io)
  96{
  97	struct f2fs_io_info *fio = &io->fio;
  98	int rw;
  99
 100	if (!io->bio)
 101		return;
 102
 103	rw = fio->rw;
 104
 105	if (is_read_io(rw)) {
 106		trace_f2fs_submit_read_bio(io->sbi->sb, rw,
 107						fio->type, io->bio);
 108		submit_bio(rw, io->bio);
 109	} else {
 110		trace_f2fs_submit_write_bio(io->sbi->sb, rw,
 111						fio->type, io->bio);
 112		/*
 113		 * META_FLUSH is only from the checkpoint procedure, and we
 114		 * should wait this metadata bio for FS consistency.
 115		 */
 116		if (fio->type == META_FLUSH) {
 117			DECLARE_COMPLETION_ONSTACK(wait);
 118			io->sbi->wait_io = &wait;
 119			submit_bio(rw, io->bio);
 120			wait_for_completion(&wait);
 121		} else {
 122			submit_bio(rw, io->bio);
 123		}
 124	}
 125
 126	io->bio = NULL;
 127}
 128
 129void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
 130				enum page_type type, int rw)
 131{
 132	enum page_type btype = PAGE_TYPE_OF_BIO(type);
 133	struct f2fs_bio_info *io;
 134
 135	io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype];
 136
 137	down_write(&io->io_rwsem);
 138
 139	/* change META to META_FLUSH in the checkpoint procedure */
 140	if (type >= META_FLUSH) {
 141		io->fio.type = META_FLUSH;
 142		io->fio.rw = WRITE_FLUSH_FUA | REQ_META | REQ_PRIO;
 143	}
 144	__submit_merged_bio(io);
 145	up_write(&io->io_rwsem);
 146}
 147
 148/*
 149 * Fill the locked page with data located in the block address.
 150 * Return unlocked page.
 151 */
 152int f2fs_submit_page_bio(struct f2fs_sb_info *sbi, struct page *page,
 153					block_t blk_addr, int rw)
 154{
 155	struct bio *bio;
 156
 157	trace_f2fs_submit_page_bio(page, blk_addr, rw);
 158
 159	/* Allocate a new bio */
 160	bio = __bio_alloc(sbi, blk_addr, 1, is_read_io(rw));
 161
 162	if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
 163		bio_put(bio);
 164		f2fs_put_page(page, 1);
 165		return -EFAULT;
 166	}
 167
 168	submit_bio(rw, bio);
 169	return 0;
 170}
 171
 172void f2fs_submit_page_mbio(struct f2fs_sb_info *sbi, struct page *page,
 173			block_t blk_addr, struct f2fs_io_info *fio)
 174{
 175	enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
 176	struct f2fs_bio_info *io;
 177	bool is_read = is_read_io(fio->rw);
 178
 179	io = is_read ? &sbi->read_io : &sbi->write_io[btype];
 180
 181	verify_block_addr(sbi, blk_addr);
 182
 183	down_write(&io->io_rwsem);
 184
 185	if (!is_read)
 186		inc_page_count(sbi, F2FS_WRITEBACK);
 187
 188	if (io->bio && (io->last_block_in_bio != blk_addr - 1 ||
 189						io->fio.rw != fio->rw))
 190		__submit_merged_bio(io);
 191alloc_new:
 192	if (io->bio == NULL) {
 193		int bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi));
 194
 195		io->bio = __bio_alloc(sbi, blk_addr, bio_blocks, is_read);
 196		io->fio = *fio;
 197	}
 198
 199	if (bio_add_page(io->bio, page, PAGE_CACHE_SIZE, 0) <
 200							PAGE_CACHE_SIZE) {
 201		__submit_merged_bio(io);
 202		goto alloc_new;
 203	}
 204
 205	io->last_block_in_bio = blk_addr;
 206
 207	up_write(&io->io_rwsem);
 208	trace_f2fs_submit_page_mbio(page, fio->rw, fio->type, blk_addr);
 209}
 210
 211/*
 212 * Lock ordering for the change of data block address:
 213 * ->data_page
 214 *  ->node_page
 215 *    update block addresses in the node page
 216 */
 217static void __set_data_blkaddr(struct dnode_of_data *dn, block_t new_addr)
 218{
 219	struct f2fs_node *rn;
 220	__le32 *addr_array;
 221	struct page *node_page = dn->node_page;
 222	unsigned int ofs_in_node = dn->ofs_in_node;
 223
 224	f2fs_wait_on_page_writeback(node_page, NODE);
 225
 226	rn = F2FS_NODE(node_page);
 227
 228	/* Get physical address of data block */
 229	addr_array = blkaddr_in_node(rn);
 230	addr_array[ofs_in_node] = cpu_to_le32(new_addr);
 231	set_page_dirty(node_page);
 232}
 233
 234int reserve_new_block(struct dnode_of_data *dn)
 235{
 236	struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
 237
 238	if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
 239		return -EPERM;
 240	if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
 241		return -ENOSPC;
 242
 243	trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node);
 244
 245	__set_data_blkaddr(dn, NEW_ADDR);
 246	dn->data_blkaddr = NEW_ADDR;
 247	mark_inode_dirty(dn->inode);
 248	sync_inode_page(dn);
 249	return 0;
 250}
 251
 252int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
 253{
 254	bool need_put = dn->inode_page ? false : true;
 255	int err;
 256
 257	/* if inode_page exists, index should be zero */
 258	f2fs_bug_on(!need_put && index);
 259
 260	err = get_dnode_of_data(dn, index, ALLOC_NODE);
 261	if (err)
 262		return err;
 263
 264	if (dn->data_blkaddr == NULL_ADDR)
 265		err = reserve_new_block(dn);
 266	if (err || need_put)
 267		f2fs_put_dnode(dn);
 268	return err;
 269}
 270
 271static int check_extent_cache(struct inode *inode, pgoff_t pgofs,
 272					struct buffer_head *bh_result)
 273{
 274	struct f2fs_inode_info *fi = F2FS_I(inode);
 275	pgoff_t start_fofs, end_fofs;
 276	block_t start_blkaddr;
 277
 278	if (is_inode_flag_set(fi, FI_NO_EXTENT))
 279		return 0;
 280
 281	read_lock(&fi->ext.ext_lock);
 282	if (fi->ext.len == 0) {
 283		read_unlock(&fi->ext.ext_lock);
 284		return 0;
 285	}
 286
 287	stat_inc_total_hit(inode->i_sb);
 288
 289	start_fofs = fi->ext.fofs;
 290	end_fofs = fi->ext.fofs + fi->ext.len - 1;
 291	start_blkaddr = fi->ext.blk_addr;
 292
 293	if (pgofs >= start_fofs && pgofs <= end_fofs) {
 294		unsigned int blkbits = inode->i_sb->s_blocksize_bits;
 295		size_t count;
 296
 297		clear_buffer_new(bh_result);
 298		map_bh(bh_result, inode->i_sb,
 299				start_blkaddr + pgofs - start_fofs);
 300		count = end_fofs - pgofs + 1;
 301		if (count < (UINT_MAX >> blkbits))
 302			bh_result->b_size = (count << blkbits);
 303		else
 304			bh_result->b_size = UINT_MAX;
 305
 306		stat_inc_read_hit(inode->i_sb);
 307		read_unlock(&fi->ext.ext_lock);
 308		return 1;
 309	}
 310	read_unlock(&fi->ext.ext_lock);
 311	return 0;
 312}
 313
 314void update_extent_cache(block_t blk_addr, struct dnode_of_data *dn)
 315{
 316	struct f2fs_inode_info *fi = F2FS_I(dn->inode);
 317	pgoff_t fofs, start_fofs, end_fofs;
 318	block_t start_blkaddr, end_blkaddr;
 319	int need_update = true;
 320
 321	f2fs_bug_on(blk_addr == NEW_ADDR);
 322	fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
 323							dn->ofs_in_node;
 324
 325	/* Update the page address in the parent node */
 326	__set_data_blkaddr(dn, blk_addr);
 327
 328	if (is_inode_flag_set(fi, FI_NO_EXTENT))
 329		return;
 330
 331	write_lock(&fi->ext.ext_lock);
 332
 333	start_fofs = fi->ext.fofs;
 334	end_fofs = fi->ext.fofs + fi->ext.len - 1;
 335	start_blkaddr = fi->ext.blk_addr;
 336	end_blkaddr = fi->ext.blk_addr + fi->ext.len - 1;
 337
 338	/* Drop and initialize the matched extent */
 339	if (fi->ext.len == 1 && fofs == start_fofs)
 340		fi->ext.len = 0;
 341
 342	/* Initial extent */
 343	if (fi->ext.len == 0) {
 344		if (blk_addr != NULL_ADDR) {
 345			fi->ext.fofs = fofs;
 346			fi->ext.blk_addr = blk_addr;
 347			fi->ext.len = 1;
 348		}
 349		goto end_update;
 350	}
 351
 352	/* Front merge */
 353	if (fofs == start_fofs - 1 && blk_addr == start_blkaddr - 1) {
 354		fi->ext.fofs--;
 355		fi->ext.blk_addr--;
 356		fi->ext.len++;
 357		goto end_update;
 358	}
 359
 360	/* Back merge */
 361	if (fofs == end_fofs + 1 && blk_addr == end_blkaddr + 1) {
 362		fi->ext.len++;
 363		goto end_update;
 364	}
 365
 366	/* Split the existing extent */
 367	if (fi->ext.len > 1 &&
 368		fofs >= start_fofs && fofs <= end_fofs) {
 369		if ((end_fofs - fofs) < (fi->ext.len >> 1)) {
 370			fi->ext.len = fofs - start_fofs;
 371		} else {
 372			fi->ext.fofs = fofs + 1;
 373			fi->ext.blk_addr = start_blkaddr +
 374					fofs - start_fofs + 1;
 375			fi->ext.len -= fofs - start_fofs + 1;
 376		}
 377	} else {
 378		need_update = false;
 379	}
 380
 381	/* Finally, if the extent is very fragmented, let's drop the cache. */
 382	if (fi->ext.len < F2FS_MIN_EXTENT_LEN) {
 383		fi->ext.len = 0;
 384		set_inode_flag(fi, FI_NO_EXTENT);
 385		need_update = true;
 386	}
 387end_update:
 388	write_unlock(&fi->ext.ext_lock);
 389	if (need_update)
 390		sync_inode_page(dn);
 391	return;
 392}
 393
 394struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync)
 395{
 396	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
 397	struct address_space *mapping = inode->i_mapping;
 398	struct dnode_of_data dn;
 399	struct page *page;
 400	int err;
 401
 402	page = find_get_page(mapping, index);
 403	if (page && PageUptodate(page))
 404		return page;
 405	f2fs_put_page(page, 0);
 406
 407	set_new_dnode(&dn, inode, NULL, NULL, 0);
 408	err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
 409	if (err)
 410		return ERR_PTR(err);
 411	f2fs_put_dnode(&dn);
 412
 413	if (dn.data_blkaddr == NULL_ADDR)
 414		return ERR_PTR(-ENOENT);
 415
 416	/* By fallocate(), there is no cached page, but with NEW_ADDR */
 417	if (unlikely(dn.data_blkaddr == NEW_ADDR))
 418		return ERR_PTR(-EINVAL);
 419
 420	page = grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
 421	if (!page)
 422		return ERR_PTR(-ENOMEM);
 423
 424	if (PageUptodate(page)) {
 425		unlock_page(page);
 426		return page;
 427	}
 428
 429	err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr,
 430					sync ? READ_SYNC : READA);
 431	if (err)
 432		return ERR_PTR(err);
 433
 434	if (sync) {
 435		wait_on_page_locked(page);
 436		if (unlikely(!PageUptodate(page))) {
 437			f2fs_put_page(page, 0);
 438			return ERR_PTR(-EIO);
 439		}
 440	}
 441	return page;
 442}
 443
 444/*
 445 * If it tries to access a hole, return an error.
 446 * Because, the callers, functions in dir.c and GC, should be able to know
 447 * whether this page exists or not.
 448 */
 449struct page *get_lock_data_page(struct inode *inode, pgoff_t index)
 450{
 451	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
 452	struct address_space *mapping = inode->i_mapping;
 453	struct dnode_of_data dn;
 454	struct page *page;
 455	int err;
 456
 457repeat:
 458	page = grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
 459	if (!page)
 460		return ERR_PTR(-ENOMEM);
 461
 462	set_new_dnode(&dn, inode, NULL, NULL, 0);
 463	err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
 464	if (err) {
 465		f2fs_put_page(page, 1);
 466		return ERR_PTR(err);
 467	}
 468	f2fs_put_dnode(&dn);
 469
 470	if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
 471		f2fs_put_page(page, 1);
 472		return ERR_PTR(-ENOENT);
 473	}
 474
 475	if (PageUptodate(page))
 476		return page;
 477
 478	/*
 479	 * A new dentry page is allocated but not able to be written, since its
 480	 * new inode page couldn't be allocated due to -ENOSPC.
 481	 * In such the case, its blkaddr can be remained as NEW_ADDR.
 482	 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
 483	 */
 484	if (dn.data_blkaddr == NEW_ADDR) {
 485		zero_user_segment(page, 0, PAGE_CACHE_SIZE);
 486		SetPageUptodate(page);
 487		return page;
 488	}
 489
 490	err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr, READ_SYNC);
 491	if (err)
 492		return ERR_PTR(err);
 493
 494	lock_page(page);
 495	if (unlikely(!PageUptodate(page))) {
 496		f2fs_put_page(page, 1);
 497		return ERR_PTR(-EIO);
 498	}
 499	if (unlikely(page->mapping != mapping)) {
 500		f2fs_put_page(page, 1);
 501		goto repeat;
 502	}
 503	return page;
 504}
 505
 506/*
 507 * Caller ensures that this data page is never allocated.
 508 * A new zero-filled data page is allocated in the page cache.
 509 *
 510 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
 511 * f2fs_unlock_op().
 512 * Note that, ipage is set only by make_empty_dir.
 513 */
 514struct page *get_new_data_page(struct inode *inode,
 515		struct page *ipage, pgoff_t index, bool new_i_size)
 516{
 517	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
 518	struct address_space *mapping = inode->i_mapping;
 519	struct page *page;
 520	struct dnode_of_data dn;
 521	int err;
 522
 523	set_new_dnode(&dn, inode, ipage, NULL, 0);
 524	err = f2fs_reserve_block(&dn, index);
 525	if (err)
 526		return ERR_PTR(err);
 527repeat:
 528	page = grab_cache_page(mapping, index);
 529	if (!page) {
 530		err = -ENOMEM;
 531		goto put_err;
 532	}
 533
 534	if (PageUptodate(page))
 535		return page;
 536
 537	if (dn.data_blkaddr == NEW_ADDR) {
 538		zero_user_segment(page, 0, PAGE_CACHE_SIZE);
 539		SetPageUptodate(page);
 540	} else {
 541		err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr,
 542								READ_SYNC);
 543		if (err)
 544			goto put_err;
 545
 546		lock_page(page);
 547		if (unlikely(!PageUptodate(page))) {
 548			f2fs_put_page(page, 1);
 549			err = -EIO;
 550			goto put_err;
 551		}
 552		if (unlikely(page->mapping != mapping)) {
 553			f2fs_put_page(page, 1);
 554			goto repeat;
 555		}
 556	}
 557
 558	if (new_i_size &&
 559		i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) {
 560		i_size_write(inode, ((index + 1) << PAGE_CACHE_SHIFT));
 561		/* Only the directory inode sets new_i_size */
 562		set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
 563	}
 564	return page;
 565
 566put_err:
 567	f2fs_put_dnode(&dn);
 568	return ERR_PTR(err);
 569}
 570
 571static int __allocate_data_block(struct dnode_of_data *dn)
 572{
 573	struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
 574	struct f2fs_summary sum;
 575	block_t new_blkaddr;
 576	struct node_info ni;
 577	int type;
 578
 579	if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
 580		return -EPERM;
 581	if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
 582		return -ENOSPC;
 583
 584	__set_data_blkaddr(dn, NEW_ADDR);
 585	dn->data_blkaddr = NEW_ADDR;
 586
 587	get_node_info(sbi, dn->nid, &ni);
 588	set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
 589
 590	type = CURSEG_WARM_DATA;
 591
 592	allocate_data_block(sbi, NULL, NULL_ADDR, &new_blkaddr, &sum, type);
 593
 594	/* direct IO doesn't use extent cache to maximize the performance */
 595	set_inode_flag(F2FS_I(dn->inode), FI_NO_EXTENT);
 596	update_extent_cache(new_blkaddr, dn);
 597	clear_inode_flag(F2FS_I(dn->inode), FI_NO_EXTENT);
 598
 599	dn->data_blkaddr = new_blkaddr;
 600	return 0;
 601}
 602
 603/*
 604 * get_data_block() now supported readahead/bmap/rw direct_IO with mapped bh.
 605 * If original data blocks are allocated, then give them to blockdev.
 606 * Otherwise,
 607 *     a. preallocate requested block addresses
 608 *     b. do not use extent cache for better performance
 609 *     c. give the block addresses to blockdev
 610 */
 611static int get_data_block(struct inode *inode, sector_t iblock,
 612			struct buffer_head *bh_result, int create)
 613{
 614	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
 615	unsigned int blkbits = inode->i_sb->s_blocksize_bits;
 616	unsigned maxblocks = bh_result->b_size >> blkbits;
 617	struct dnode_of_data dn;
 618	int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA;
 619	pgoff_t pgofs, end_offset;
 620	int err = 0, ofs = 1;
 621	bool allocated = false;
 622
 623	/* Get the page offset from the block offset(iblock) */
 624	pgofs =	(pgoff_t)(iblock >> (PAGE_CACHE_SHIFT - blkbits));
 625
 626	if (check_extent_cache(inode, pgofs, bh_result))
 627		goto out;
 628
 629	if (create)
 630		f2fs_lock_op(sbi);
 631
 632	/* When reading holes, we need its node page */
 633	set_new_dnode(&dn, inode, NULL, NULL, 0);
 634	err = get_dnode_of_data(&dn, pgofs, mode);
 635	if (err) {
 636		if (err == -ENOENT)
 637			err = 0;
 638		goto unlock_out;
 639	}
 640	if (dn.data_blkaddr == NEW_ADDR)
 641		goto put_out;
 642
 643	if (dn.data_blkaddr != NULL_ADDR) {
 644		map_bh(bh_result, inode->i_sb, dn.data_blkaddr);
 645	} else if (create) {
 646		err = __allocate_data_block(&dn);
 647		if (err)
 648			goto put_out;
 649		allocated = true;
 650		map_bh(bh_result, inode->i_sb, dn.data_blkaddr);
 651	} else {
 652		goto put_out;
 653	}
 654
 655	end_offset = IS_INODE(dn.node_page) ?
 656			ADDRS_PER_INODE(F2FS_I(inode)) : ADDRS_PER_BLOCK;
 657	bh_result->b_size = (((size_t)1) << blkbits);
 658	dn.ofs_in_node++;
 659	pgofs++;
 660
 661get_next:
 662	if (dn.ofs_in_node >= end_offset) {
 663		if (allocated)
 664			sync_inode_page(&dn);
 665		allocated = false;
 666		f2fs_put_dnode(&dn);
 667
 668		set_new_dnode(&dn, inode, NULL, NULL, 0);
 669		err = get_dnode_of_data(&dn, pgofs, mode);
 670		if (err) {
 671			if (err == -ENOENT)
 672				err = 0;
 673			goto unlock_out;
 674		}
 675		if (dn.data_blkaddr == NEW_ADDR)
 676			goto put_out;
 677
 678		end_offset = IS_INODE(dn.node_page) ?
 679			ADDRS_PER_INODE(F2FS_I(inode)) : ADDRS_PER_BLOCK;
 680	}
 681
 682	if (maxblocks > (bh_result->b_size >> blkbits)) {
 683		block_t blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
 684		if (blkaddr == NULL_ADDR && create) {
 685			err = __allocate_data_block(&dn);
 686			if (err)
 687				goto sync_out;
 688			allocated = true;
 689			blkaddr = dn.data_blkaddr;
 690		}
 691		/* Give more consecutive addresses for the read ahead */
 692		if (blkaddr == (bh_result->b_blocknr + ofs)) {
 693			ofs++;
 694			dn.ofs_in_node++;
 695			pgofs++;
 696			bh_result->b_size += (((size_t)1) << blkbits);
 697			goto get_next;
 698		}
 699	}
 700sync_out:
 701	if (allocated)
 702		sync_inode_page(&dn);
 703put_out:
 704	f2fs_put_dnode(&dn);
 705unlock_out:
 706	if (create)
 707		f2fs_unlock_op(sbi);
 708out:
 709	trace_f2fs_get_data_block(inode, iblock, bh_result, err);
 710	return err;
 711}
 712
 713static int f2fs_read_data_page(struct file *file, struct page *page)
 714{
 715	struct inode *inode = page->mapping->host;
 716	int ret;
 717
 718	/* If the file has inline data, try to read it directlly */
 719	if (f2fs_has_inline_data(inode))
 720		ret = f2fs_read_inline_data(inode, page);
 721	else
 722		ret = mpage_readpage(page, get_data_block);
 723
 724	return ret;
 725}
 726
 727static int f2fs_read_data_pages(struct file *file,
 728			struct address_space *mapping,
 729			struct list_head *pages, unsigned nr_pages)
 730{
 731	struct inode *inode = file->f_mapping->host;
 732
 733	/* If the file has inline data, skip readpages */
 734	if (f2fs_has_inline_data(inode))
 735		return 0;
 736
 737	return mpage_readpages(mapping, pages, nr_pages, get_data_block);
 738}
 739
 740int do_write_data_page(struct page *page, struct f2fs_io_info *fio)
 741{
 742	struct inode *inode = page->mapping->host;
 743	block_t old_blkaddr, new_blkaddr;
 744	struct dnode_of_data dn;
 745	int err = 0;
 746
 747	set_new_dnode(&dn, inode, NULL, NULL, 0);
 748	err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
 749	if (err)
 750		return err;
 751
 752	old_blkaddr = dn.data_blkaddr;
 753
 754	/* This page is already truncated */
 755	if (old_blkaddr == NULL_ADDR)
 756		goto out_writepage;
 757
 758	set_page_writeback(page);
 759
 760	/*
 761	 * If current allocation needs SSR,
 762	 * it had better in-place writes for updated data.
 763	 */
 764	if (unlikely(old_blkaddr != NEW_ADDR &&
 765			!is_cold_data(page) &&
 766			need_inplace_update(inode))) {
 767		rewrite_data_page(page, old_blkaddr, fio);
 768	} else {
 769		write_data_page(page, &dn, &new_blkaddr, fio);
 770		update_extent_cache(new_blkaddr, &dn);
 771	}
 772out_writepage:
 773	f2fs_put_dnode(&dn);
 774	return err;
 775}
 776
 777static int f2fs_write_data_page(struct page *page,
 778					struct writeback_control *wbc)
 779{
 780	struct inode *inode = page->mapping->host;
 781	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
 782	loff_t i_size = i_size_read(inode);
 783	const pgoff_t end_index = ((unsigned long long) i_size)
 784							>> PAGE_CACHE_SHIFT;
 785	unsigned offset = 0;
 786	bool need_balance_fs = false;
 787	int err = 0;
 788	struct f2fs_io_info fio = {
 789		.type = DATA,
 790		.rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE,
 791	};
 792
 793	if (page->index < end_index)
 794		goto write;
 795
 796	/*
 797	 * If the offset is out-of-range of file size,
 798	 * this page does not have to be written to disk.
 799	 */
 800	offset = i_size & (PAGE_CACHE_SIZE - 1);
 801	if ((page->index >= end_index + 1) || !offset) {
 802		inode_dec_dirty_dents(inode);
 803		goto out;
 804	}
 805
 806	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
 807write:
 808	if (unlikely(sbi->por_doing))
 809		goto redirty_out;
 810
 811	/* Dentry blocks are controlled by checkpoint */
 812	if (S_ISDIR(inode->i_mode)) {
 813		inode_dec_dirty_dents(inode);
 814		err = do_write_data_page(page, &fio);
 815		goto done;
 816	}
 817
 818	if (!wbc->for_reclaim)
 819		need_balance_fs = true;
 820	else if (has_not_enough_free_secs(sbi, 0))
 821		goto redirty_out;
 822
 823	f2fs_lock_op(sbi);
 824	if (f2fs_has_inline_data(inode) || f2fs_may_inline(inode))
 825		err = f2fs_write_inline_data(inode, page, offset);
 826	else
 827		err = do_write_data_page(page, &fio);
 828	f2fs_unlock_op(sbi);
 829done:
 830	if (err && err != -ENOENT)
 831		goto redirty_out;
 832
 833	clear_cold_data(page);
 834out:
 835	unlock_page(page);
 836	if (need_balance_fs)
 837		f2fs_balance_fs(sbi);
 838	return 0;
 839
 840redirty_out:
 841	wbc->pages_skipped++;
 842	account_page_redirty(page);
 843	set_page_dirty(page);
 844	return AOP_WRITEPAGE_ACTIVATE;
 845}
 846
 847static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
 848			void *data)
 849{
 850	struct address_space *mapping = data;
 851	int ret = mapping->a_ops->writepage(page, wbc);
 852	mapping_set_error(mapping, ret);
 853	return ret;
 854}
 855
 856static int f2fs_write_data_pages(struct address_space *mapping,
 857			    struct writeback_control *wbc)
 858{
 859	struct inode *inode = mapping->host;
 860	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
 861	bool locked = false;
 862	int ret;
 863	long diff;
 864
 865	/* deal with chardevs and other special file */
 866	if (!mapping->a_ops->writepage)
 867		return 0;
 868
 869	if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
 870			get_dirty_dents(inode) < nr_pages_to_skip(sbi, DATA))
 871		goto skip_write;
 872
 873	diff = nr_pages_to_write(sbi, DATA, wbc);
 874
 875	if (!S_ISDIR(inode->i_mode)) {
 876		mutex_lock(&sbi->writepages);
 877		locked = true;
 878	}
 879	ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
 880	if (locked)
 881		mutex_unlock(&sbi->writepages);
 882
 883	f2fs_submit_merged_bio(sbi, DATA, WRITE);
 884
 885	remove_dirty_dir_inode(inode);
 886
 887	wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
 888	return ret;
 889
 890skip_write:
 891	wbc->pages_skipped += get_dirty_dents(inode);
 892	return 0;
 893}
 894
 895static int f2fs_write_begin(struct file *file, struct address_space *mapping,
 896		loff_t pos, unsigned len, unsigned flags,
 897		struct page **pagep, void **fsdata)
 898{
 899	struct inode *inode = mapping->host;
 900	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
 901	struct page *page;
 902	pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
 903	struct dnode_of_data dn;
 904	int err = 0;
 905
 906	f2fs_balance_fs(sbi);
 907repeat:
 908	err = f2fs_convert_inline_data(inode, pos + len);
 909	if (err)
 910		return err;
 911
 912	page = grab_cache_page_write_begin(mapping, index, flags);
 913	if (!page)
 914		return -ENOMEM;
 915	*pagep = page;
 916
 917	if (f2fs_has_inline_data(inode) && (pos + len) <= MAX_INLINE_DATA)
 918		goto inline_data;
 919
 920	f2fs_lock_op(sbi);
 921	set_new_dnode(&dn, inode, NULL, NULL, 0);
 922	err = f2fs_reserve_block(&dn, index);
 923	f2fs_unlock_op(sbi);
 924
 925	if (err) {
 926		f2fs_put_page(page, 1);
 927		return err;
 928	}
 929inline_data:
 930	if ((len == PAGE_CACHE_SIZE) || PageUptodate(page))
 931		return 0;
 932
 933	if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
 934		unsigned start = pos & (PAGE_CACHE_SIZE - 1);
 935		unsigned end = start + len;
 936
 937		/* Reading beyond i_size is simple: memset to zero */
 938		zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
 939		goto out;
 940	}
 941
 942	if (dn.data_blkaddr == NEW_ADDR) {
 943		zero_user_segment(page, 0, PAGE_CACHE_SIZE);
 944	} else {
 945		if (f2fs_has_inline_data(inode)) {
 946			err = f2fs_read_inline_data(inode, page);
 947			if (err) {
 948				page_cache_release(page);
 949				return err;
 950			}
 951		} else {
 952			err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr,
 953							READ_SYNC);
 954			if (err)
 955				return err;
 956		}
 957
 958		lock_page(page);
 959		if (unlikely(!PageUptodate(page))) {
 960			f2fs_put_page(page, 1);
 961			return -EIO;
 962		}
 963		if (unlikely(page->mapping != mapping)) {
 964			f2fs_put_page(page, 1);
 965			goto repeat;
 966		}
 967	}
 968out:
 969	SetPageUptodate(page);
 970	clear_cold_data(page);
 971	return 0;
 972}
 973
 974static int f2fs_write_end(struct file *file,
 975			struct address_space *mapping,
 976			loff_t pos, unsigned len, unsigned copied,
 977			struct page *page, void *fsdata)
 978{
 979	struct inode *inode = page->mapping->host;
 980
 981	SetPageUptodate(page);
 982	set_page_dirty(page);
 983
 984	if (pos + copied > i_size_read(inode)) {
 985		i_size_write(inode, pos + copied);
 986		mark_inode_dirty(inode);
 987		update_inode_page(inode);
 988	}
 989
 990	f2fs_put_page(page, 1);
 991	return copied;
 992}
 993
 994static int check_direct_IO(struct inode *inode, int rw,
 995		const struct iovec *iov, loff_t offset, unsigned long nr_segs)
 996{
 997	unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;
 998	int i;
 999
1000	if (rw == READ)
1001		return 0;
1002
1003	if (offset & blocksize_mask)
1004		return -EINVAL;
1005
1006	for (i = 0; i < nr_segs; i++)
1007		if (iov[i].iov_len & blocksize_mask)
1008			return -EINVAL;
1009	return 0;
1010}
1011
1012static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
1013		const struct iovec *iov, loff_t offset, unsigned long nr_segs)
1014{
1015	struct file *file = iocb->ki_filp;
1016	struct inode *inode = file->f_mapping->host;
1017
1018	/* Let buffer I/O handle the inline data case. */
1019	if (f2fs_has_inline_data(inode))
1020		return 0;
1021
1022	if (check_direct_IO(inode, rw, iov, offset, nr_segs))
1023		return 0;
1024
1025	return blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
1026							get_data_block);
1027}
1028
1029static void f2fs_invalidate_data_page(struct page *page, unsigned int offset,
1030				      unsigned int length)
1031{
1032	struct inode *inode = page->mapping->host;
1033	if (PageDirty(page))
1034		inode_dec_dirty_dents(inode);
1035	ClearPagePrivate(page);
1036}
1037
1038static int f2fs_release_data_page(struct page *page, gfp_t wait)
1039{
1040	ClearPagePrivate(page);
1041	return 1;
1042}
1043
1044static int f2fs_set_data_page_dirty(struct page *page)
1045{
1046	struct address_space *mapping = page->mapping;
1047	struct inode *inode = mapping->host;
1048
1049	trace_f2fs_set_page_dirty(page, DATA);
1050
1051	SetPageUptodate(page);
1052	mark_inode_dirty(inode);
1053
1054	if (!PageDirty(page)) {
1055		__set_page_dirty_nobuffers(page);
1056		set_dirty_dir_page(inode, page);
1057		return 1;
1058	}
1059	return 0;
1060}
1061
1062static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
1063{
1064	return generic_block_bmap(mapping, block, get_data_block);
1065}
1066
1067const struct address_space_operations f2fs_dblock_aops = {
1068	.readpage	= f2fs_read_data_page,
1069	.readpages	= f2fs_read_data_pages,
1070	.writepage	= f2fs_write_data_page,
1071	.writepages	= f2fs_write_data_pages,
1072	.write_begin	= f2fs_write_begin,
1073	.write_end	= f2fs_write_end,
1074	.set_page_dirty	= f2fs_set_data_page_dirty,
1075	.invalidatepage	= f2fs_invalidate_data_page,
1076	.releasepage	= f2fs_release_data_page,
1077	.direct_IO	= f2fs_direct_IO,
1078	.bmap		= f2fs_bmap,
1079};