Linux Audio

Check our new training course

Loading...
v3.15
 
   1/*
   2 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
   3 * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
   4 *
   5 * This copyrighted material is made available to anyone wishing to use,
   6 * modify, copy, or redistribute it subject to the terms and conditions
   7 * of the GNU General Public License version 2.
   8 */
   9
  10#include <linux/sched.h>
  11#include <linux/slab.h>
  12#include <linux/spinlock.h>
  13#include <linux/completion.h>
  14#include <linux/buffer_head.h>
  15#include <linux/pagemap.h>
  16#include <linux/pagevec.h>
  17#include <linux/mpage.h>
  18#include <linux/fs.h>
  19#include <linux/writeback.h>
  20#include <linux/swap.h>
  21#include <linux/gfs2_ondisk.h>
  22#include <linux/backing-dev.h>
  23#include <linux/aio.h>
  24#include <trace/events/writeback.h>
 
  25
  26#include "gfs2.h"
  27#include "incore.h"
  28#include "bmap.h"
  29#include "glock.h"
  30#include "inode.h"
  31#include "log.h"
  32#include "meta_io.h"
  33#include "quota.h"
  34#include "trans.h"
  35#include "rgrp.h"
  36#include "super.h"
  37#include "util.h"
  38#include "glops.h"
 
  39
  40
  41static void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
  42				   unsigned int from, unsigned int to)
  43{
  44	struct buffer_head *head = page_buffers(page);
  45	unsigned int bsize = head->b_size;
  46	struct buffer_head *bh;
  47	unsigned int start, end;
 
  48
  49	for (bh = head, start = 0; bh != head || !start;
  50	     bh = bh->b_this_page, start = end) {
  51		end = start + bsize;
  52		if (end <= from || start >= to)
  53			continue;
  54		if (gfs2_is_jdata(ip))
  55			set_buffer_uptodate(bh);
 
  56		gfs2_trans_add_data(ip->i_gl, bh);
  57	}
  58}
  59
  60/**
  61 * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
  62 * @inode: The inode
  63 * @lblock: The block number to look up
  64 * @bh_result: The buffer head to return the result in
  65 * @create: Non-zero if we may add block to the file
  66 *
  67 * Returns: errno
  68 */
  69
  70static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
  71				  struct buffer_head *bh_result, int create)
  72{
  73	int error;
  74
  75	error = gfs2_block_map(inode, lblock, bh_result, 0);
  76	if (error)
  77		return error;
  78	if (!buffer_mapped(bh_result))
  79		return -EIO;
  80	return 0;
  81}
  82
  83static int gfs2_get_block_direct(struct inode *inode, sector_t lblock,
  84				 struct buffer_head *bh_result, int create)
  85{
  86	return gfs2_block_map(inode, lblock, bh_result, 0);
  87}
  88
  89/**
  90 * gfs2_writepage_common - Common bits of writepage
  91 * @page: The page to be written
  92 * @wbc: The writeback control
  93 *
  94 * Returns: 1 if writepage is ok, otherwise an error code or zero if no error.
 
  95 */
  96
  97static int gfs2_writepage_common(struct page *page,
  98				 struct writeback_control *wbc)
  99{
 100	struct inode *inode = page->mapping->host;
 101	struct gfs2_inode *ip = GFS2_I(inode);
 102	struct gfs2_sbd *sdp = GFS2_SB(inode);
 103	loff_t i_size = i_size_read(inode);
 104	pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
 105	unsigned offset;
 106
 107	if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
 108		goto out;
 109	if (current->journal_info)
 110		goto redirty;
 111	/* Is the page fully outside i_size? (truncate in progress) */
 112	offset = i_size & (PAGE_CACHE_SIZE-1);
 113	if (page->index > end_index || (page->index == end_index && !offset)) {
 114		page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE);
 115		goto out;
 116	}
 117	return 1;
 118redirty:
 119	redirty_page_for_writepage(wbc, page);
 120out:
 121	unlock_page(page);
 122	return 0;
 123}
 124
 125/**
 126 * gfs2_writepage - Write page for writeback mappings
 127 * @page: The page
 128 * @wbc: The writeback control
 129 *
 130 */
 131
 132static int gfs2_writepage(struct page *page, struct writeback_control *wbc)
 133{
 134	int ret;
 135
 136	ret = gfs2_writepage_common(page, wbc);
 137	if (ret <= 0)
 138		return ret;
 139
 140	return nobh_writepage(page, gfs2_get_block_noalloc, wbc);
 
 141}
 142
 143/**
 144 * __gfs2_jdata_writepage - The core of jdata writepage
 145 * @page: The page to write
 146 * @wbc: The writeback control
 147 *
 148 * This is shared between writepage and writepages and implements the
 149 * core of the writepage operation. If a transaction is required then
 150 * PageChecked will have been set and the transaction will have
 151 * already been started before this is called.
 152 */
 153
 154static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
 155{
 156	struct inode *inode = page->mapping->host;
 157	struct gfs2_inode *ip = GFS2_I(inode);
 158	struct gfs2_sbd *sdp = GFS2_SB(inode);
 159
 160	if (PageChecked(page)) {
 161		ClearPageChecked(page);
 162		if (!page_has_buffers(page)) {
 163			create_empty_buffers(page, inode->i_sb->s_blocksize,
 164					     (1 << BH_Dirty)|(1 << BH_Uptodate));
 
 165		}
 166		gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize-1);
 167	}
 168	return block_write_full_page(page, gfs2_get_block_noalloc, wbc);
 169}
 170
 171/**
 172 * gfs2_jdata_writepage - Write complete page
 173 * @page: Page to write
 174 *
 175 * Returns: errno
 176 *
 177 */
 178
 179static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
 180{
 181	struct inode *inode = page->mapping->host;
 182	struct gfs2_sbd *sdp = GFS2_SB(inode);
 183	int ret;
 184	int done_trans = 0;
 185
 186	if (PageChecked(page)) {
 187		if (wbc->sync_mode != WB_SYNC_ALL)
 188			goto out_ignore;
 189		ret = gfs2_trans_begin(sdp, RES_DINODE + 1, 0);
 190		if (ret)
 191			goto out_ignore;
 192		done_trans = 1;
 193	}
 194	ret = gfs2_writepage_common(page, wbc);
 195	if (ret > 0)
 196		ret = __gfs2_jdata_writepage(page, wbc);
 197	if (done_trans)
 198		gfs2_trans_end(sdp);
 199	return ret;
 200
 201out_ignore:
 202	redirty_page_for_writepage(wbc, page);
 203	unlock_page(page);
 204	return 0;
 205}
 206
 207/**
 208 * gfs2_writepages - Write a bunch of dirty pages back to disk
 209 * @mapping: The mapping to write
 210 * @wbc: Write-back control
 211 *
 212 * Used for both ordered and writeback modes.
 213 */
 214static int gfs2_writepages(struct address_space *mapping,
 215			   struct writeback_control *wbc)
 216{
 217	return mpage_writepages(mapping, wbc, gfs2_get_block_noalloc);
 
 
 
 
 
 
 
 
 
 
 
 
 
 218}
 219
 220/**
 221 * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages
 222 * @mapping: The mapping
 223 * @wbc: The writeback control
 224 * @writepage: The writepage function to call for each page
 225 * @pvec: The vector of pages
 226 * @nr_pages: The number of pages to write
 227 *
 228 * Returns: non-zero if loop should terminate, zero otherwise
 229 */
 230
 231static int gfs2_write_jdata_pagevec(struct address_space *mapping,
 232				    struct writeback_control *wbc,
 233				    struct pagevec *pvec,
 234				    int nr_pages, pgoff_t end,
 235				    pgoff_t *done_index)
 236{
 237	struct inode *inode = mapping->host;
 238	struct gfs2_sbd *sdp = GFS2_SB(inode);
 239	unsigned nrblocks = nr_pages * (PAGE_CACHE_SIZE/inode->i_sb->s_blocksize);
 240	int i;
 241	int ret;
 
 
 
 
 
 
 242
 243	ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
 244	if (ret < 0)
 245		return ret;
 246
 247	for(i = 0; i < nr_pages; i++) {
 248		struct page *page = pvec->pages[i];
 249
 250		/*
 251		 * At this point, the page may be truncated or
 252		 * invalidated (changing page->mapping to NULL), or
 253		 * even swizzled back from swapper_space to tmpfs file
 254		 * mapping. However, page->index will not change
 255		 * because we have a reference on the page.
 256		 */
 257		if (page->index > end) {
 258			/*
 259			 * can't be range_cyclic (1st pass) because
 260			 * end == -1 in that case.
 261			 */
 262			ret = 1;
 263			break;
 264		}
 265
 266		*done_index = page->index;
 267
 268		lock_page(page);
 269
 270		if (unlikely(page->mapping != mapping)) {
 271continue_unlock:
 272			unlock_page(page);
 273			continue;
 274		}
 275
 276		if (!PageDirty(page)) {
 277			/* someone wrote it for us */
 278			goto continue_unlock;
 279		}
 280
 281		if (PageWriteback(page)) {
 282			if (wbc->sync_mode != WB_SYNC_NONE)
 283				wait_on_page_writeback(page);
 284			else
 285				goto continue_unlock;
 286		}
 287
 288		BUG_ON(PageWriteback(page));
 289		if (!clear_page_dirty_for_io(page))
 290			goto continue_unlock;
 291
 292		trace_wbc_writepage(wbc, mapping->backing_dev_info);
 293
 294		ret = __gfs2_jdata_writepage(page, wbc);
 295		if (unlikely(ret)) {
 296			if (ret == AOP_WRITEPAGE_ACTIVATE) {
 297				unlock_page(page);
 298				ret = 0;
 299			} else {
 300
 301				/*
 302				 * done_index is set past this page,
 303				 * so media errors will not choke
 304				 * background writeout for the entire
 305				 * file. This has consequences for
 306				 * range_cyclic semantics (ie. it may
 307				 * not be suitable for data integrity
 308				 * writeout).
 309				 */
 310				*done_index = page->index + 1;
 311				ret = 1;
 312				break;
 313			}
 314		}
 315
 316		/*
 317		 * We stop writing back only if we are not doing
 318		 * integrity sync. In case of integrity sync we have to
 319		 * keep going until we have written all the pages
 320		 * we tagged for writeback prior to entering this loop.
 321		 */
 322		if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) {
 323			ret = 1;
 324			break;
 325		}
 326
 327	}
 328	gfs2_trans_end(sdp);
 329	return ret;
 330}
 331
 332/**
 333 * gfs2_write_cache_jdata - Like write_cache_pages but different
 334 * @mapping: The mapping to write
 335 * @wbc: The writeback control
 336 * @writepage: The writepage function to call
 337 * @data: The data to pass to writepage
 338 *
 339 * The reason that we use our own function here is that we need to
 340 * start transactions before we grab page locks. This allows us
 341 * to get the ordering right.
 342 */
 343
 344static int gfs2_write_cache_jdata(struct address_space *mapping,
 345				  struct writeback_control *wbc)
 346{
 347	int ret = 0;
 348	int done = 0;
 349	struct pagevec pvec;
 350	int nr_pages;
 351	pgoff_t uninitialized_var(writeback_index);
 352	pgoff_t index;
 353	pgoff_t end;
 354	pgoff_t done_index;
 355	int cycled;
 356	int range_whole = 0;
 357	int tag;
 358
 359	pagevec_init(&pvec, 0);
 360	if (wbc->range_cyclic) {
 361		writeback_index = mapping->writeback_index; /* prev offset */
 362		index = writeback_index;
 363		if (index == 0)
 364			cycled = 1;
 365		else
 366			cycled = 0;
 367		end = -1;
 368	} else {
 369		index = wbc->range_start >> PAGE_CACHE_SHIFT;
 370		end = wbc->range_end >> PAGE_CACHE_SHIFT;
 371		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
 372			range_whole = 1;
 373		cycled = 1; /* ignore range_cyclic tests */
 374	}
 375	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
 376		tag = PAGECACHE_TAG_TOWRITE;
 377	else
 378		tag = PAGECACHE_TAG_DIRTY;
 379
 380retry:
 381	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
 382		tag_pages_for_writeback(mapping, index, end);
 383	done_index = index;
 384	while (!done && (index <= end)) {
 385		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
 386			      min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
 387		if (nr_pages == 0)
 388			break;
 389
 390		ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, end, &done_index);
 
 391		if (ret)
 392			done = 1;
 393		if (ret > 0)
 394			ret = 0;
 395		pagevec_release(&pvec);
 396		cond_resched();
 397	}
 398
 399	if (!cycled && !done) {
 400		/*
 401		 * range_cyclic:
 402		 * We hit the last page and there is more work to be done: wrap
 403		 * back to the start of the file
 404		 */
 405		cycled = 1;
 406		index = 0;
 407		end = writeback_index - 1;
 408		goto retry;
 409	}
 410
 411	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
 412		mapping->writeback_index = done_index;
 413
 414	return ret;
 415}
 416
 417
 418/**
 419 * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
 420 * @mapping: The mapping to write
 421 * @wbc: The writeback control
 422 * 
 423 */
 424
 425static int gfs2_jdata_writepages(struct address_space *mapping,
 426				 struct writeback_control *wbc)
 427{
 428	struct gfs2_inode *ip = GFS2_I(mapping->host);
 429	struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
 430	int ret;
 431
 432	ret = gfs2_write_cache_jdata(mapping, wbc);
 433	if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
 434		gfs2_log_flush(sdp, ip->i_gl);
 
 435		ret = gfs2_write_cache_jdata(mapping, wbc);
 436	}
 437	return ret;
 438}
 439
 440/**
 441 * stuffed_readpage - Fill in a Linux page with stuffed file data
 442 * @ip: the inode
 443 * @page: the page
 444 *
 445 * Returns: errno
 446 */
 447
 448static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
 449{
 450	struct buffer_head *dibh;
 451	u64 dsize = i_size_read(&ip->i_inode);
 452	void *kaddr;
 453	int error;
 454
 455	/*
 456	 * Due to the order of unstuffing files and ->fault(), we can be
 457	 * asked for a zero page in the case of a stuffed file being extended,
 458	 * so we need to supply one here. It doesn't happen often.
 459	 */
 460	if (unlikely(page->index)) {
 461		zero_user(page, 0, PAGE_CACHE_SIZE);
 462		SetPageUptodate(page);
 463		return 0;
 
 
 
 464	}
 465
 466	error = gfs2_meta_inode_buffer(ip, &dibh);
 467	if (error)
 468		return error;
 469
 470	kaddr = kmap_atomic(page);
 471	if (dsize > (dibh->b_size - sizeof(struct gfs2_dinode)))
 472		dsize = (dibh->b_size - sizeof(struct gfs2_dinode));
 473	memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
 474	memset(kaddr + dsize, 0, PAGE_CACHE_SIZE - dsize);
 475	kunmap_atomic(kaddr);
 476	flush_dcache_page(page);
 477	brelse(dibh);
 478	SetPageUptodate(page);
 
 479
 480	return 0;
 481}
 482
 483
 484/**
 485 * __gfs2_readpage - readpage
 486 * @file: The file to read a page for
 487 * @page: The page to read
 488 *
 489 * This is the core of gfs2's readpage. Its used by the internal file
 490 * reading code as in that case we already hold the glock. Also its
 491 * called by gfs2_readpage() once the required lock has been granted.
 492 *
 493 */
 494
 495static int __gfs2_readpage(void *file, struct page *page)
 496{
 497	struct gfs2_inode *ip = GFS2_I(page->mapping->host);
 498	struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
 
 499	int error;
 500
 501	if (gfs2_is_stuffed(ip)) {
 502		error = stuffed_readpage(ip, page);
 503		unlock_page(page);
 
 
 504	} else {
 505		error = mpage_readpage(page, gfs2_block_map);
 506	}
 507
 508	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
 509		return -EIO;
 510
 511	return error;
 512}
 513
 514/**
 515 * gfs2_readpage - read a page of a file
 516 * @file: The file to read
 517 * @page: The page of the file
 518 *
 519 * This deals with the locking required. We have to unlock and
 520 * relock the page in order to get the locking in the right
 521 * order.
 522 */
 523
 524static int gfs2_readpage(struct file *file, struct page *page)
 525{
 526	struct address_space *mapping = page->mapping;
 527	struct gfs2_inode *ip = GFS2_I(mapping->host);
 528	struct gfs2_holder gh;
 529	int error;
 530
 531	unlock_page(page);
 532	gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
 533	error = gfs2_glock_nq(&gh);
 534	if (unlikely(error))
 535		goto out;
 536	error = AOP_TRUNCATED_PAGE;
 537	lock_page(page);
 538	if (page->mapping == mapping && !PageUptodate(page))
 539		error = __gfs2_readpage(file, page);
 540	else
 541		unlock_page(page);
 542	gfs2_glock_dq(&gh);
 543out:
 544	gfs2_holder_uninit(&gh);
 545	if (error && error != AOP_TRUNCATED_PAGE)
 546		lock_page(page);
 547	return error;
 548}
 549
 550/**
 551 * gfs2_internal_read - read an internal file
 552 * @ip: The gfs2 inode
 553 * @buf: The buffer to fill
 554 * @pos: The file position
 555 * @size: The amount to read
 556 *
 557 */
 558
 559int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
 560                       unsigned size)
 561{
 562	struct address_space *mapping = ip->i_inode.i_mapping;
 563	unsigned long index = *pos / PAGE_CACHE_SIZE;
 564	unsigned offset = *pos & (PAGE_CACHE_SIZE - 1);
 565	unsigned copied = 0;
 566	unsigned amt;
 567	struct page *page;
 568	void *p;
 569
 570	do {
 571		amt = size - copied;
 572		if (offset + size > PAGE_CACHE_SIZE)
 573			amt = PAGE_CACHE_SIZE - offset;
 574		page = read_cache_page(mapping, index, __gfs2_readpage, NULL);
 575		if (IS_ERR(page))
 576			return PTR_ERR(page);
 577		p = kmap_atomic(page);
 578		memcpy(buf + copied, p + offset, amt);
 579		kunmap_atomic(p);
 580		mark_page_accessed(page);
 581		page_cache_release(page);
 582		copied += amt;
 583		index++;
 584		offset = 0;
 
 585	} while(copied < size);
 586	(*pos) += size;
 587	return size;
 588}
 589
 590/**
 591 * gfs2_readpages - Read a bunch of pages at once
 
 592 *
 593 * Some notes:
 594 * 1. This is only for readahead, so we can simply ignore any things
 595 *    which are slightly inconvenient (such as locking conflicts between
 596 *    the page lock and the glock) and return having done no I/O. Its
 597 *    obviously not something we'd want to do on too regular a basis.
 598 *    Any I/O we ignore at this time will be done via readpage later.
 599 * 2. We don't handle stuffed files here we let readpage do the honours.
 600 * 3. mpage_readpages() does most of the heavy lifting in the common case.
 601 * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
 602 */
 603
 604static int gfs2_readpages(struct file *file, struct address_space *mapping,
 605			  struct list_head *pages, unsigned nr_pages)
 606{
 607	struct inode *inode = mapping->host;
 608	struct gfs2_inode *ip = GFS2_I(inode);
 609	struct gfs2_sbd *sdp = GFS2_SB(inode);
 610	struct gfs2_holder gh;
 611	int ret;
 612
 613	gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
 614	ret = gfs2_glock_nq(&gh);
 615	if (unlikely(ret))
 616		goto out_uninit;
 617	if (!gfs2_is_stuffed(ip))
 618		ret = mpage_readpages(mapping, pages, nr_pages, gfs2_block_map);
 619	gfs2_glock_dq(&gh);
 620out_uninit:
 621	gfs2_holder_uninit(&gh);
 622	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
 623		ret = -EIO;
 624	return ret;
 625}
 626
 627/**
 628 * gfs2_write_begin - Begin to write to a file
 629 * @file: The file to write to
 630 * @mapping: The mapping in which to write
 631 * @pos: The file offset at which to start writing
 632 * @len: Length of the write
 633 * @flags: Various flags
 634 * @pagep: Pointer to return the page
 635 * @fsdata: Pointer to return fs data (unused by GFS2)
 636 *
 637 * Returns: errno
 638 */
 639
 640static int gfs2_write_begin(struct file *file, struct address_space *mapping,
 641			    loff_t pos, unsigned len, unsigned flags,
 642			    struct page **pagep, void **fsdata)
 643{
 644	struct gfs2_inode *ip = GFS2_I(mapping->host);
 645	struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
 646	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
 647	unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
 648	unsigned requested = 0;
 649	int alloc_required;
 650	int error = 0;
 651	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
 652	unsigned from = pos & (PAGE_CACHE_SIZE - 1);
 653	struct page *page;
 654
 655	gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
 656	error = gfs2_glock_nq(&ip->i_gh);
 657	if (unlikely(error))
 658		goto out_uninit;
 659	if (&ip->i_inode == sdp->sd_rindex) {
 660		error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE,
 661					   GL_NOCACHE, &m_ip->i_gh);
 662		if (unlikely(error)) {
 663			gfs2_glock_dq(&ip->i_gh);
 664			goto out_uninit;
 665		}
 666	}
 667
 668	alloc_required = gfs2_write_alloc_required(ip, pos, len);
 669
 670	if (alloc_required || gfs2_is_jdata(ip))
 671		gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks);
 672
 673	if (alloc_required) {
 674		struct gfs2_alloc_parms ap = { .aflags = 0, };
 675		error = gfs2_quota_lock_check(ip);
 676		if (error)
 677			goto out_unlock;
 678
 679		requested = data_blocks + ind_blocks;
 680		ap.target = requested;
 681		error = gfs2_inplace_reserve(ip, &ap);
 682		if (error)
 683			goto out_qunlock;
 684	}
 685
 686	rblocks = RES_DINODE + ind_blocks;
 687	if (gfs2_is_jdata(ip))
 688		rblocks += data_blocks ? data_blocks : 1;
 689	if (ind_blocks || data_blocks)
 690		rblocks += RES_STATFS + RES_QUOTA;
 691	if (&ip->i_inode == sdp->sd_rindex)
 692		rblocks += 2 * RES_STATFS;
 693	if (alloc_required)
 694		rblocks += gfs2_rg_blocks(ip, requested);
 695
 696	error = gfs2_trans_begin(sdp, rblocks,
 697				 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
 698	if (error)
 699		goto out_trans_fail;
 700
 701	error = -ENOMEM;
 702	flags |= AOP_FLAG_NOFS;
 703	page = grab_cache_page_write_begin(mapping, index, flags);
 704	*pagep = page;
 705	if (unlikely(!page))
 706		goto out_endtrans;
 707
 708	if (gfs2_is_stuffed(ip)) {
 709		error = 0;
 710		if (pos + len > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) {
 711			error = gfs2_unstuff_dinode(ip, page);
 712			if (error == 0)
 713				goto prepare_write;
 714		} else if (!PageUptodate(page)) {
 715			error = stuffed_readpage(ip, page);
 716		}
 717		goto out;
 718	}
 719
 720prepare_write:
 721	error = __block_write_begin(page, from, len, gfs2_block_map);
 722out:
 723	if (error == 0)
 724		return 0;
 725
 726	unlock_page(page);
 727	page_cache_release(page);
 728
 729	gfs2_trans_end(sdp);
 730	if (pos + len > ip->i_inode.i_size)
 731		gfs2_trim_blocks(&ip->i_inode);
 732	goto out_trans_fail;
 733
 734out_endtrans:
 735	gfs2_trans_end(sdp);
 736out_trans_fail:
 737	if (alloc_required) {
 738		gfs2_inplace_release(ip);
 739out_qunlock:
 740		gfs2_quota_unlock(ip);
 741	}
 742out_unlock:
 743	if (&ip->i_inode == sdp->sd_rindex) {
 744		gfs2_glock_dq(&m_ip->i_gh);
 745		gfs2_holder_uninit(&m_ip->i_gh);
 746	}
 747	gfs2_glock_dq(&ip->i_gh);
 748out_uninit:
 749	gfs2_holder_uninit(&ip->i_gh);
 750	return error;
 751}
 752
 753/**
 754 * adjust_fs_space - Adjusts the free space available due to gfs2_grow
 755 * @inode: the rindex inode
 756 */
 757static void adjust_fs_space(struct inode *inode)
 758{
 759	struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
 760	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
 761	struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
 762	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
 763	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
 764	struct buffer_head *m_bh, *l_bh;
 765	u64 fs_total, new_free;
 766
 
 
 
 767	/* Total up the file system space, according to the latest rindex. */
 768	fs_total = gfs2_ri_total(sdp);
 769	if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0)
 770		return;
 771
 772	spin_lock(&sdp->sd_statfs_spin);
 773	gfs2_statfs_change_in(m_sc, m_bh->b_data +
 774			      sizeof(struct gfs2_dinode));
 775	if (fs_total > (m_sc->sc_total + l_sc->sc_total))
 776		new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
 777	else
 778		new_free = 0;
 779	spin_unlock(&sdp->sd_statfs_spin);
 780	fs_warn(sdp, "File system extended by %llu blocks.\n",
 781		(unsigned long long)new_free);
 782	gfs2_statfs_change(sdp, new_free, new_free, 0);
 783
 784	if (gfs2_meta_inode_buffer(l_ip, &l_bh) != 0)
 785		goto out;
 786	update_statfs(sdp, m_bh, l_bh);
 787	brelse(l_bh);
 788out:
 789	brelse(m_bh);
 790}
 791
 792/**
 793 * gfs2_stuffed_write_end - Write end for stuffed files
 794 * @inode: The inode
 795 * @dibh: The buffer_head containing the on-disk inode
 796 * @pos: The file position
 797 * @len: The length of the write
 798 * @copied: How much was actually copied by the VFS
 799 * @page: The page
 800 *
 801 * This copies the data from the page into the inode block after
 802 * the inode data structure itself.
 803 *
 804 * Returns: errno
 805 */
 806static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
 807				  loff_t pos, unsigned len, unsigned copied,
 808				  struct page *page)
 809{
 810	struct gfs2_inode *ip = GFS2_I(inode);
 811	struct gfs2_sbd *sdp = GFS2_SB(inode);
 812	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
 813	u64 to = pos + copied;
 814	void *kaddr;
 815	unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode);
 816
 817	BUG_ON((pos + len) > (dibh->b_size - sizeof(struct gfs2_dinode)));
 818	kaddr = kmap_atomic(page);
 819	memcpy(buf + pos, kaddr + pos, copied);
 820	memset(kaddr + pos + copied, 0, len - copied);
 821	flush_dcache_page(page);
 822	kunmap_atomic(kaddr);
 823
 824	if (!PageUptodate(page))
 825		SetPageUptodate(page);
 826	unlock_page(page);
 827	page_cache_release(page);
 828
 829	if (copied) {
 830		if (inode->i_size < to)
 831			i_size_write(inode, to);
 832		mark_inode_dirty(inode);
 833	}
 834
 835	if (inode == sdp->sd_rindex) {
 836		adjust_fs_space(inode);
 837		sdp->sd_rindex_uptodate = 0;
 838	}
 839
 840	brelse(dibh);
 841	gfs2_trans_end(sdp);
 842	if (inode == sdp->sd_rindex) {
 843		gfs2_glock_dq(&m_ip->i_gh);
 844		gfs2_holder_uninit(&m_ip->i_gh);
 845	}
 846	gfs2_glock_dq(&ip->i_gh);
 847	gfs2_holder_uninit(&ip->i_gh);
 848	return copied;
 849}
 850
 851/**
 852 * gfs2_write_end
 853 * @file: The file to write to
 854 * @mapping: The address space to write to
 855 * @pos: The file position
 856 * @len: The length of the data
 857 * @copied:
 858 * @page: The page that has been written
 859 * @fsdata: The fsdata (unused in GFS2)
 860 *
 861 * The main write_end function for GFS2. We have a separate one for
 862 * stuffed files as they are slightly different, otherwise we just
 863 * put our locking around the VFS provided functions.
 864 *
 865 * Returns: errno
 866 */
 867
 868static int gfs2_write_end(struct file *file, struct address_space *mapping,
 869			  loff_t pos, unsigned len, unsigned copied,
 870			  struct page *page, void *fsdata)
 871{
 872	struct inode *inode = page->mapping->host;
 873	struct gfs2_inode *ip = GFS2_I(inode);
 874	struct gfs2_sbd *sdp = GFS2_SB(inode);
 875	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
 876	struct buffer_head *dibh;
 877	unsigned int from = pos & (PAGE_CACHE_SIZE - 1);
 878	unsigned int to = from + len;
 879	int ret;
 880	struct gfs2_trans *tr = current->journal_info;
 881	BUG_ON(!tr);
 882
 883	BUG_ON(gfs2_glock_is_locked_by_me(ip->i_gl) == NULL);
 884
 885	ret = gfs2_meta_inode_buffer(ip, &dibh);
 886	if (unlikely(ret)) {
 887		unlock_page(page);
 888		page_cache_release(page);
 889		goto failed;
 890	}
 891
 892	if (gfs2_is_stuffed(ip))
 893		return gfs2_stuffed_write_end(inode, dibh, pos, len, copied, page);
 894
 895	if (!gfs2_is_writeback(ip))
 896		gfs2_page_add_databufs(ip, page, from, to);
 897
 898	ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
 899	if (tr->tr_num_buf_new)
 900		__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
 901	else
 902		gfs2_trans_add_meta(ip->i_gl, dibh);
 903
 904
 905	if (inode == sdp->sd_rindex) {
 906		adjust_fs_space(inode);
 907		sdp->sd_rindex_uptodate = 0;
 908	}
 909
 910	brelse(dibh);
 911failed:
 912	gfs2_trans_end(sdp);
 913	gfs2_inplace_release(ip);
 914	if (ip->i_res->rs_qa_qd_num)
 915		gfs2_quota_unlock(ip);
 916	if (inode == sdp->sd_rindex) {
 917		gfs2_glock_dq(&m_ip->i_gh);
 918		gfs2_holder_uninit(&m_ip->i_gh);
 919	}
 920	gfs2_glock_dq(&ip->i_gh);
 921	gfs2_holder_uninit(&ip->i_gh);
 922	return ret;
 923}
 924
 925/**
 926 * gfs2_set_page_dirty - Page dirtying function
 927 * @page: The page to dirty
 928 *
 929 * Returns: 1 if it dirtyed the page, or 0 otherwise
 930 */
 931 
 932static int gfs2_set_page_dirty(struct page *page)
 933{
 934	SetPageChecked(page);
 935	return __set_page_dirty_buffers(page);
 
 936}
 937
 938/**
 939 * gfs2_bmap - Block map function
 940 * @mapping: Address space info
 941 * @lblock: The block to map
 942 *
 943 * Returns: The disk address for the block or 0 on hole or error
 944 */
 945
 946static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
 947{
 948	struct gfs2_inode *ip = GFS2_I(mapping->host);
 949	struct gfs2_holder i_gh;
 950	sector_t dblock = 0;
 951	int error;
 952
 953	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
 954	if (error)
 955		return 0;
 956
 957	if (!gfs2_is_stuffed(ip))
 958		dblock = generic_block_bmap(mapping, lblock, gfs2_block_map);
 959
 960	gfs2_glock_dq_uninit(&i_gh);
 961
 962	return dblock;
 963}
 964
 965static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
 966{
 967	struct gfs2_bufdata *bd;
 968
 969	lock_buffer(bh);
 970	gfs2_log_lock(sdp);
 971	clear_buffer_dirty(bh);
 972	bd = bh->b_private;
 973	if (bd) {
 974		if (!list_empty(&bd->bd_list) && !buffer_pinned(bh))
 975			list_del_init(&bd->bd_list);
 976		else
 977			gfs2_remove_from_journal(bh, current->journal_info, 0);
 
 
 
 978	}
 979	bh->b_bdev = NULL;
 980	clear_buffer_mapped(bh);
 981	clear_buffer_req(bh);
 982	clear_buffer_new(bh);
 983	gfs2_log_unlock(sdp);
 984	unlock_buffer(bh);
 985}
 986
 987static void gfs2_invalidatepage(struct page *page, unsigned int offset,
 988				unsigned int length)
 989{
 990	struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
 991	unsigned int stop = offset + length;
 992	int partial_page = (offset || length < PAGE_CACHE_SIZE);
 993	struct buffer_head *bh, *head;
 994	unsigned long pos = 0;
 995
 996	BUG_ON(!PageLocked(page));
 997	if (!partial_page)
 998		ClearPageChecked(page);
 999	if (!page_has_buffers(page))
 
1000		goto out;
1001
1002	bh = head = page_buffers(page);
1003	do {
1004		if (pos + bh->b_size > stop)
1005			return;
1006
1007		if (offset <= pos)
1008			gfs2_discard(sdp, bh);
1009		pos += bh->b_size;
1010		bh = bh->b_this_page;
1011	} while (bh != head);
1012out:
1013	if (!partial_page)
1014		try_to_release_page(page, 0);
1015}
1016
1017/**
1018 * gfs2_ok_for_dio - check that dio is valid on this file
1019 * @ip: The inode
1020 * @rw: READ or WRITE
1021 * @offset: The offset at which we are reading or writing
1022 *
1023 * Returns: 0 (to ignore the i/o request and thus fall back to buffered i/o)
1024 *          1 (to accept the i/o request)
1025 */
1026static int gfs2_ok_for_dio(struct gfs2_inode *ip, int rw, loff_t offset)
1027{
1028	/*
1029	 * Should we return an error here? I can't see that O_DIRECT for
1030	 * a stuffed file makes any sense. For now we'll silently fall
1031	 * back to buffered I/O
1032	 */
1033	if (gfs2_is_stuffed(ip))
1034		return 0;
1035
1036	if (offset >= i_size_read(&ip->i_inode))
1037		return 0;
1038	return 1;
1039}
1040
1041
1042
1043static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
1044			      const struct iovec *iov, loff_t offset,
1045			      unsigned long nr_segs)
1046{
1047	struct file *file = iocb->ki_filp;
1048	struct inode *inode = file->f_mapping->host;
1049	struct address_space *mapping = inode->i_mapping;
1050	struct gfs2_inode *ip = GFS2_I(inode);
1051	struct gfs2_holder gh;
1052	int rv;
1053
1054	/*
1055	 * Deferred lock, even if its a write, since we do no allocation
1056	 * on this path. All we need change is atime, and this lock mode
1057	 * ensures that other nodes have flushed their buffered read caches
1058	 * (i.e. their page cache entries for this inode). We do not,
1059	 * unfortunately have the option of only flushing a range like
1060	 * the VFS does.
1061	 */
1062	gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, &gh);
1063	rv = gfs2_glock_nq(&gh);
1064	if (rv)
1065		return rv;
1066	rv = gfs2_ok_for_dio(ip, rw, offset);
1067	if (rv != 1)
1068		goto out; /* dio not valid, fall back to buffered i/o */
1069
1070	/*
1071	 * Now since we are holding a deferred (CW) lock at this point, you
1072	 * might be wondering why this is ever needed. There is a case however
1073	 * where we've granted a deferred local lock against a cached exclusive
1074	 * glock. That is ok provided all granted local locks are deferred, but
1075	 * it also means that it is possible to encounter pages which are
1076	 * cached and possibly also mapped. So here we check for that and sort
1077	 * them out ahead of the dio. The glock state machine will take care of
1078	 * everything else.
1079	 *
1080	 * If in fact the cached glock state (gl->gl_state) is deferred (CW) in
1081	 * the first place, mapping->nr_pages will always be zero.
1082	 */
1083	if (mapping->nrpages) {
1084		loff_t lstart = offset & (PAGE_CACHE_SIZE - 1);
1085		loff_t len = iov_length(iov, nr_segs);
1086		loff_t end = PAGE_ALIGN(offset + len) - 1;
1087
1088		rv = 0;
1089		if (len == 0)
1090			goto out;
1091		if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
1092			unmap_shared_mapping_range(ip->i_inode.i_mapping, offset, len);
1093		rv = filemap_write_and_wait_range(mapping, lstart, end);
1094		if (rv)
1095			goto out;
1096		if (rw == WRITE)
1097			truncate_inode_pages_range(mapping, lstart, end);
1098	}
1099
1100	rv = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
1101				  offset, nr_segs, gfs2_get_block_direct,
1102				  NULL, NULL, 0);
1103out:
1104	gfs2_glock_dq(&gh);
1105	gfs2_holder_uninit(&gh);
1106	return rv;
1107}
1108
1109/**
1110 * gfs2_releasepage - free the metadata associated with a page
1111 * @page: the page that's being released
1112 * @gfp_mask: passed from Linux VFS, ignored by us
1113 *
1114 * Call try_to_free_buffers() if the buffers in this page can be
1115 * released.
1116 *
1117 * Returns: 0
1118 */
1119
1120int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
1121{
1122	struct address_space *mapping = page->mapping;
1123	struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
1124	struct buffer_head *bh, *head;
1125	struct gfs2_bufdata *bd;
1126
1127	if (!page_has_buffers(page))
1128		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
1129
1130	gfs2_log_lock(sdp);
1131	spin_lock(&sdp->sd_ail_lock);
1132	head = bh = page_buffers(page);
1133	do {
1134		if (atomic_read(&bh->b_count))
1135			goto cannot_release;
1136		bd = bh->b_private;
1137		if (bd && bd->bd_tr)
1138			goto cannot_release;
1139		if (buffer_pinned(bh) || buffer_dirty(bh))
1140			goto not_possible;
1141		bh = bh->b_this_page;
1142	} while(bh != head);
1143	spin_unlock(&sdp->sd_ail_lock);
1144
1145	head = bh = page_buffers(page);
1146	do {
1147		bd = bh->b_private;
1148		if (bd) {
1149			gfs2_assert_warn(sdp, bd->bd_bh == bh);
1150			if (!list_empty(&bd->bd_list))
1151				list_del_init(&bd->bd_list);
1152			bd->bd_bh = NULL;
1153			bh->b_private = NULL;
1154			kmem_cache_free(gfs2_bufdata_cachep, bd);
 
 
 
 
 
 
 
1155		}
1156
1157		bh = bh->b_this_page;
1158	} while (bh != head);
1159	gfs2_log_unlock(sdp);
1160
1161	return try_to_free_buffers(page);
1162
1163not_possible: /* Should never happen */
1164	WARN_ON(buffer_dirty(bh));
1165	WARN_ON(buffer_pinned(bh));
1166cannot_release:
1167	spin_unlock(&sdp->sd_ail_lock);
1168	gfs2_log_unlock(sdp);
1169	return 0;
1170}
1171
1172static const struct address_space_operations gfs2_writeback_aops = {
1173	.writepage = gfs2_writepage,
1174	.writepages = gfs2_writepages,
1175	.readpage = gfs2_readpage,
1176	.readpages = gfs2_readpages,
1177	.write_begin = gfs2_write_begin,
1178	.write_end = gfs2_write_end,
1179	.bmap = gfs2_bmap,
1180	.invalidatepage = gfs2_invalidatepage,
1181	.releasepage = gfs2_releasepage,
1182	.direct_IO = gfs2_direct_IO,
1183	.migratepage = buffer_migrate_page,
1184	.is_partially_uptodate = block_is_partially_uptodate,
1185	.error_remove_page = generic_error_remove_page,
1186};
1187
1188static const struct address_space_operations gfs2_ordered_aops = {
1189	.writepage = gfs2_writepage,
1190	.writepages = gfs2_writepages,
1191	.readpage = gfs2_readpage,
1192	.readpages = gfs2_readpages,
1193	.write_begin = gfs2_write_begin,
1194	.write_end = gfs2_write_end,
1195	.set_page_dirty = gfs2_set_page_dirty,
1196	.bmap = gfs2_bmap,
1197	.invalidatepage = gfs2_invalidatepage,
1198	.releasepage = gfs2_releasepage,
1199	.direct_IO = gfs2_direct_IO,
1200	.migratepage = buffer_migrate_page,
1201	.is_partially_uptodate = block_is_partially_uptodate,
1202	.error_remove_page = generic_error_remove_page,
1203};
1204
1205static const struct address_space_operations gfs2_jdata_aops = {
1206	.writepage = gfs2_jdata_writepage,
1207	.writepages = gfs2_jdata_writepages,
1208	.readpage = gfs2_readpage,
1209	.readpages = gfs2_readpages,
1210	.write_begin = gfs2_write_begin,
1211	.write_end = gfs2_write_end,
1212	.set_page_dirty = gfs2_set_page_dirty,
1213	.bmap = gfs2_bmap,
1214	.invalidatepage = gfs2_invalidatepage,
1215	.releasepage = gfs2_releasepage,
 
1216	.is_partially_uptodate = block_is_partially_uptodate,
1217	.error_remove_page = generic_error_remove_page,
1218};
1219
1220void gfs2_set_aops(struct inode *inode)
1221{
1222	struct gfs2_inode *ip = GFS2_I(inode);
1223
1224	if (gfs2_is_writeback(ip))
1225		inode->i_mapping->a_ops = &gfs2_writeback_aops;
1226	else if (gfs2_is_ordered(ip))
1227		inode->i_mapping->a_ops = &gfs2_ordered_aops;
1228	else if (gfs2_is_jdata(ip))
1229		inode->i_mapping->a_ops = &gfs2_jdata_aops;
1230	else
1231		BUG();
1232}
1233
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
  4 * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
 
 
 
 
  5 */
  6
  7#include <linux/sched.h>
  8#include <linux/slab.h>
  9#include <linux/spinlock.h>
 10#include <linux/completion.h>
 11#include <linux/buffer_head.h>
 12#include <linux/pagemap.h>
 13#include <linux/pagevec.h>
 14#include <linux/mpage.h>
 15#include <linux/fs.h>
 16#include <linux/writeback.h>
 17#include <linux/swap.h>
 18#include <linux/gfs2_ondisk.h>
 19#include <linux/backing-dev.h>
 20#include <linux/uio.h>
 21#include <trace/events/writeback.h>
 22#include <linux/sched/signal.h>
 23
 24#include "gfs2.h"
 25#include "incore.h"
 26#include "bmap.h"
 27#include "glock.h"
 28#include "inode.h"
 29#include "log.h"
 30#include "meta_io.h"
 31#include "quota.h"
 32#include "trans.h"
 33#include "rgrp.h"
 34#include "super.h"
 35#include "util.h"
 36#include "glops.h"
 37#include "aops.h"
 38
 39
 40void gfs2_trans_add_databufs(struct gfs2_inode *ip, struct folio *folio,
 41			     size_t from, size_t len)
 42{
 43	struct buffer_head *head = folio_buffers(folio);
 44	unsigned int bsize = head->b_size;
 45	struct buffer_head *bh;
 46	size_t to = from + len;
 47	size_t start, end;
 48
 49	for (bh = head, start = 0; bh != head || !start;
 50	     bh = bh->b_this_page, start = end) {
 51		end = start + bsize;
 52		if (end <= from)
 53			continue;
 54		if (start >= to)
 55			break;
 56		set_buffer_uptodate(bh);
 57		gfs2_trans_add_data(ip->i_gl, bh);
 58	}
 59}
 60
 61/**
 62 * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
 63 * @inode: The inode
 64 * @lblock: The block number to look up
 65 * @bh_result: The buffer head to return the result in
 66 * @create: Non-zero if we may add block to the file
 67 *
 68 * Returns: errno
 69 */
 70
 71static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
 72				  struct buffer_head *bh_result, int create)
 73{
 74	int error;
 75
 76	error = gfs2_block_map(inode, lblock, bh_result, 0);
 77	if (error)
 78		return error;
 79	if (!buffer_mapped(bh_result))
 80		return -ENODATA;
 81	return 0;
 82}
 83
 
 
 
 
 
 
 84/**
 85 * gfs2_write_jdata_folio - gfs2 jdata-specific version of block_write_full_folio
 86 * @folio: The folio to write
 87 * @wbc: The writeback control
 88 *
 89 * This is the same as calling block_write_full_folio, but it also
 90 * writes pages outside of i_size
 91 */
 92static int gfs2_write_jdata_folio(struct folio *folio,
 
 93				 struct writeback_control *wbc)
 94{
 95	struct inode * const inode = folio->mapping->host;
 
 
 96	loff_t i_size = i_size_read(inode);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 97
 98	/*
 99	 * The folio straddles i_size.  It must be zeroed out on each and every
100	 * writepage invocation because it may be mmapped.  "A file is mapped
101	 * in multiples of the page size.  For a file that is not a multiple of
102	 * the page size, the remaining memory is zeroed when mapped, and
103	 * writes to that region are not written out to the file."
104	 */
105	if (folio_pos(folio) < i_size &&
106	    i_size < folio_pos(folio) + folio_size(folio))
107		folio_zero_segment(folio, offset_in_folio(folio, i_size),
108				folio_size(folio));
 
 
 
109
110	return __block_write_full_folio(inode, folio, gfs2_get_block_noalloc,
111			wbc);
112}
113
114/**
115 * __gfs2_jdata_write_folio - The core of jdata writepage
116 * @folio: The folio to write
117 * @wbc: The writeback control
118 *
119 * Implements the core of write back. If a transaction is required then
120 * the checked flag will have been set and the transaction will have
 
121 * already been started before this is called.
122 */
123static int __gfs2_jdata_write_folio(struct folio *folio,
124		struct writeback_control *wbc)
125{
126	struct inode *inode = folio->mapping->host;
127	struct gfs2_inode *ip = GFS2_I(inode);
 
128
129	if (folio_test_checked(folio)) {
130		folio_clear_checked(folio);
131		if (!folio_buffers(folio)) {
132			create_empty_buffers(folio,
133					inode->i_sb->s_blocksize,
134					BIT(BH_Dirty)|BIT(BH_Uptodate));
135		}
136		gfs2_trans_add_databufs(ip, folio, 0, folio_size(folio));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
137	}
138	return gfs2_write_jdata_folio(folio, wbc);
 
 
 
 
 
 
 
 
 
 
139}
140
141/**
142 * gfs2_writepages - Write a bunch of dirty pages back to disk
143 * @mapping: The mapping to write
144 * @wbc: Write-back control
145 *
146 * Used for both ordered and writeback modes.
147 */
148static int gfs2_writepages(struct address_space *mapping,
149			   struct writeback_control *wbc)
150{
151	struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
152	struct iomap_writepage_ctx wpc = { };
153	int ret;
154
155	/*
156	 * Even if we didn't write enough pages here, we might still be holding
157	 * dirty pages in the ail. We forcibly flush the ail because we don't
158	 * want balance_dirty_pages() to loop indefinitely trying to write out
159	 * pages held in the ail that it can't find.
160	 */
161	ret = iomap_writepages(mapping, wbc, &wpc, &gfs2_writeback_ops);
162	if (ret == 0 && wbc->nr_to_write > 0)
163		set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
164	return ret;
165}
166
167/**
168 * gfs2_write_jdata_batch - Write back a folio batch's worth of folios
169 * @mapping: The mapping
170 * @wbc: The writeback control
171 * @fbatch: The batch of folios
172 * @done_index: Page index
 
173 *
174 * Returns: non-zero if loop should terminate, zero otherwise
175 */
176
177static int gfs2_write_jdata_batch(struct address_space *mapping,
178				    struct writeback_control *wbc,
179				    struct folio_batch *fbatch,
 
180				    pgoff_t *done_index)
181{
182	struct inode *inode = mapping->host;
183	struct gfs2_sbd *sdp = GFS2_SB(inode);
184	unsigned nrblocks;
185	int i;
186	int ret;
187	size_t size = 0;
188	int nr_folios = folio_batch_count(fbatch);
189
190	for (i = 0; i < nr_folios; i++)
191		size += folio_size(fbatch->folios[i]);
192	nrblocks = size >> inode->i_blkbits;
193
194	ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
195	if (ret < 0)
196		return ret;
197
198	for (i = 0; i < nr_folios; i++) {
199		struct folio *folio = fbatch->folios[i];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
200
201		*done_index = folio->index;
202
203		folio_lock(folio);
204
205		if (unlikely(folio->mapping != mapping)) {
206continue_unlock:
207			folio_unlock(folio);
208			continue;
209		}
210
211		if (!folio_test_dirty(folio)) {
212			/* someone wrote it for us */
213			goto continue_unlock;
214		}
215
216		if (folio_test_writeback(folio)) {
217			if (wbc->sync_mode != WB_SYNC_NONE)
218				folio_wait_writeback(folio);
219			else
220				goto continue_unlock;
221		}
222
223		BUG_ON(folio_test_writeback(folio));
224		if (!folio_clear_dirty_for_io(folio))
225			goto continue_unlock;
226
227		trace_wbc_writepage(wbc, inode_to_bdi(inode));
228
229		ret = __gfs2_jdata_write_folio(folio, wbc);
230		if (unlikely(ret)) {
231			if (ret == AOP_WRITEPAGE_ACTIVATE) {
232				folio_unlock(folio);
233				ret = 0;
234			} else {
235
236				/*
237				 * done_index is set past this page,
238				 * so media errors will not choke
239				 * background writeout for the entire
240				 * file. This has consequences for
241				 * range_cyclic semantics (ie. it may
242				 * not be suitable for data integrity
243				 * writeout).
244				 */
245				*done_index = folio_next_index(folio);
246				ret = 1;
247				break;
248			}
249		}
250
251		/*
252		 * We stop writing back only if we are not doing
253		 * integrity sync. In case of integrity sync we have to
254		 * keep going until we have written all the pages
255		 * we tagged for writeback prior to entering this loop.
256		 */
257		if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) {
258			ret = 1;
259			break;
260		}
261
262	}
263	gfs2_trans_end(sdp);
264	return ret;
265}
266
267/**
268 * gfs2_write_cache_jdata - Like write_cache_pages but different
269 * @mapping: The mapping to write
270 * @wbc: The writeback control
 
 
271 *
272 * The reason that we use our own function here is that we need to
273 * start transactions before we grab page locks. This allows us
274 * to get the ordering right.
275 */
276
277static int gfs2_write_cache_jdata(struct address_space *mapping,
278				  struct writeback_control *wbc)
279{
280	int ret = 0;
281	int done = 0;
282	struct folio_batch fbatch;
283	int nr_folios;
284	pgoff_t writeback_index;
285	pgoff_t index;
286	pgoff_t end;
287	pgoff_t done_index;
288	int cycled;
289	int range_whole = 0;
290	xa_mark_t tag;
291
292	folio_batch_init(&fbatch);
293	if (wbc->range_cyclic) {
294		writeback_index = mapping->writeback_index; /* prev offset */
295		index = writeback_index;
296		if (index == 0)
297			cycled = 1;
298		else
299			cycled = 0;
300		end = -1;
301	} else {
302		index = wbc->range_start >> PAGE_SHIFT;
303		end = wbc->range_end >> PAGE_SHIFT;
304		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
305			range_whole = 1;
306		cycled = 1; /* ignore range_cyclic tests */
307	}
308	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
309		tag = PAGECACHE_TAG_TOWRITE;
310	else
311		tag = PAGECACHE_TAG_DIRTY;
312
313retry:
314	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
315		tag_pages_for_writeback(mapping, index, end);
316	done_index = index;
317	while (!done && (index <= end)) {
318		nr_folios = filemap_get_folios_tag(mapping, &index, end,
319				tag, &fbatch);
320		if (nr_folios == 0)
321			break;
322
323		ret = gfs2_write_jdata_batch(mapping, wbc, &fbatch,
324				&done_index);
325		if (ret)
326			done = 1;
327		if (ret > 0)
328			ret = 0;
329		folio_batch_release(&fbatch);
330		cond_resched();
331	}
332
333	if (!cycled && !done) {
334		/*
335		 * range_cyclic:
336		 * We hit the last page and there is more work to be done: wrap
337		 * back to the start of the file
338		 */
339		cycled = 1;
340		index = 0;
341		end = writeback_index - 1;
342		goto retry;
343	}
344
345	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
346		mapping->writeback_index = done_index;
347
348	return ret;
349}
350
351
352/**
353 * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
354 * @mapping: The mapping to write
355 * @wbc: The writeback control
356 * 
357 */
358
359static int gfs2_jdata_writepages(struct address_space *mapping,
360				 struct writeback_control *wbc)
361{
362	struct gfs2_inode *ip = GFS2_I(mapping->host);
363	struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
364	int ret;
365
366	ret = gfs2_write_cache_jdata(mapping, wbc);
367	if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
368		gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
369			       GFS2_LFC_JDATA_WPAGES);
370		ret = gfs2_write_cache_jdata(mapping, wbc);
371	}
372	return ret;
373}
374
375/**
376 * stuffed_read_folio - Fill in a Linux folio with stuffed file data
377 * @ip: the inode
378 * @folio: the folio
379 *
380 * Returns: errno
381 */
382static int stuffed_read_folio(struct gfs2_inode *ip, struct folio *folio)
 
383{
384	struct buffer_head *dibh = NULL;
385	size_t dsize = i_size_read(&ip->i_inode);
386	void *from = NULL;
387	int error = 0;
388
389	/*
390	 * Due to the order of unstuffing files and ->fault(), we can be
391	 * asked for a zero folio in the case of a stuffed file being extended,
392	 * so we need to supply one here. It doesn't happen often.
393	 */
394	if (unlikely(folio->index)) {
395		dsize = 0;
396	} else {
397		error = gfs2_meta_inode_buffer(ip, &dibh);
398		if (error)
399			goto out;
400		from = dibh->b_data + sizeof(struct gfs2_dinode);
401	}
402
403	folio_fill_tail(folio, 0, from, dsize);
 
 
 
 
 
 
 
 
 
 
404	brelse(dibh);
405out:
406	folio_end_read(folio, error == 0);
407
408	return error;
409}
410
 
411/**
412 * gfs2_read_folio - read a folio from a file
413 * @file: The file to read
414 * @folio: The folio in the file
 
 
 
 
 
415 */
416static int gfs2_read_folio(struct file *file, struct folio *folio)
 
417{
418	struct inode *inode = folio->mapping->host;
419	struct gfs2_inode *ip = GFS2_I(inode);
420	struct gfs2_sbd *sdp = GFS2_SB(inode);
421	int error;
422
423	if (!gfs2_is_jdata(ip) ||
424	    (i_blocksize(inode) == PAGE_SIZE && !folio_buffers(folio))) {
425		error = iomap_read_folio(folio, &gfs2_iomap_ops);
426	} else if (gfs2_is_stuffed(ip)) {
427		error = stuffed_read_folio(ip, folio);
428	} else {
429		error = mpage_read_folio(folio, gfs2_block_map);
430	}
431
432	if (gfs2_withdrawing_or_withdrawn(sdp))
433		return -EIO;
434
435	return error;
436}
437
438/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
439 * gfs2_internal_read - read an internal file
440 * @ip: The gfs2 inode
441 * @buf: The buffer to fill
442 * @pos: The file position
443 * @size: The amount to read
444 *
445 */
446
447ssize_t gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
448			   size_t size)
449{
450	struct address_space *mapping = ip->i_inode.i_mapping;
451	unsigned long index = *pos >> PAGE_SHIFT;
452	size_t copied = 0;
 
 
 
 
453
454	do {
455		size_t offset, chunk;
456		struct folio *folio;
457
458		folio = read_cache_folio(mapping, index, gfs2_read_folio, NULL);
459		if (IS_ERR(folio)) {
460			if (PTR_ERR(folio) == -EINTR)
461				continue;
462			return PTR_ERR(folio);
463		}
464		offset = *pos + copied - folio_pos(folio);
465		chunk = min(size - copied, folio_size(folio) - offset);
466		memcpy_from_folio(buf + copied, folio, offset, chunk);
467		index = folio_next_index(folio);
468		folio_put(folio);
469		copied += chunk;
470	} while(copied < size);
471	(*pos) += size;
472	return size;
473}
474
475/**
476 * gfs2_readahead - Read a bunch of pages at once
477 * @rac: Read-ahead control structure
478 *
479 * Some notes:
480 * 1. This is only for readahead, so we can simply ignore any things
481 *    which are slightly inconvenient (such as locking conflicts between
482 *    the page lock and the glock) and return having done no I/O. Its
483 *    obviously not something we'd want to do on too regular a basis.
484 *    Any I/O we ignore at this time will be done via readpage later.
485 * 2. We don't handle stuffed files here we let readpage do the honours.
486 * 3. mpage_readahead() does most of the heavy lifting in the common case.
487 * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
488 */
489
490static void gfs2_readahead(struct readahead_control *rac)
 
491{
492	struct inode *inode = rac->mapping->host;
493	struct gfs2_inode *ip = GFS2_I(inode);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
494
495	if (gfs2_is_stuffed(ip))
496		;
497	else if (gfs2_is_jdata(ip))
498		mpage_readahead(rac, gfs2_block_map);
499	else
500		iomap_readahead(rac, &gfs2_iomap_ops);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
501}
502
503/**
504 * adjust_fs_space - Adjusts the free space available due to gfs2_grow
505 * @inode: the rindex inode
506 */
507void adjust_fs_space(struct inode *inode)
508{
509	struct gfs2_sbd *sdp = GFS2_SB(inode);
510	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
 
511	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
512	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
513	struct buffer_head *m_bh;
514	u64 fs_total, new_free;
515
516	if (gfs2_trans_begin(sdp, 2 * RES_STATFS, 0) != 0)
517		return;
518
519	/* Total up the file system space, according to the latest rindex. */
520	fs_total = gfs2_ri_total(sdp);
521	if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0)
522		goto out;
523
524	spin_lock(&sdp->sd_statfs_spin);
525	gfs2_statfs_change_in(m_sc, m_bh->b_data +
526			      sizeof(struct gfs2_dinode));
527	if (fs_total > (m_sc->sc_total + l_sc->sc_total))
528		new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
529	else
530		new_free = 0;
531	spin_unlock(&sdp->sd_statfs_spin);
532	fs_warn(sdp, "File system extended by %llu blocks.\n",
533		(unsigned long long)new_free);
534	gfs2_statfs_change(sdp, new_free, new_free, 0);
535
536	update_statfs(sdp, m_bh);
 
 
 
 
537	brelse(m_bh);
538out:
539	sdp->sd_rindex_uptodate = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
540	gfs2_trans_end(sdp);
 
 
 
 
 
 
 
 
 
 
541}
542
543static bool jdata_dirty_folio(struct address_space *mapping,
544		struct folio *folio)
 
 
 
 
 
 
545{
546	if (current->journal_info)
547		folio_set_checked(folio);
548	return block_dirty_folio(mapping, folio);
549}
550
551/**
552 * gfs2_bmap - Block map function
553 * @mapping: Address space info
554 * @lblock: The block to map
555 *
556 * Returns: The disk address for the block or 0 on hole or error
557 */
558
559static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
560{
561	struct gfs2_inode *ip = GFS2_I(mapping->host);
562	struct gfs2_holder i_gh;
563	sector_t dblock = 0;
564	int error;
565
566	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
567	if (error)
568		return 0;
569
570	if (!gfs2_is_stuffed(ip))
571		dblock = iomap_bmap(mapping, lblock, &gfs2_iomap_ops);
572
573	gfs2_glock_dq_uninit(&i_gh);
574
575	return dblock;
576}
577
578static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
579{
580	struct gfs2_bufdata *bd;
581
582	lock_buffer(bh);
583	gfs2_log_lock(sdp);
584	clear_buffer_dirty(bh);
585	bd = bh->b_private;
586	if (bd) {
587		if (!list_empty(&bd->bd_list) && !buffer_pinned(bh))
588			list_del_init(&bd->bd_list);
589		else {
590			spin_lock(&sdp->sd_ail_lock);
591			gfs2_remove_from_journal(bh, REMOVE_JDATA);
592			spin_unlock(&sdp->sd_ail_lock);
593		}
594	}
595	bh->b_bdev = NULL;
596	clear_buffer_mapped(bh);
597	clear_buffer_req(bh);
598	clear_buffer_new(bh);
599	gfs2_log_unlock(sdp);
600	unlock_buffer(bh);
601}
602
603static void gfs2_invalidate_folio(struct folio *folio, size_t offset,
604				size_t length)
605{
606	struct gfs2_sbd *sdp = GFS2_SB(folio->mapping->host);
607	size_t stop = offset + length;
608	int partial_page = (offset || length < folio_size(folio));
609	struct buffer_head *bh, *head;
610	unsigned long pos = 0;
611
612	BUG_ON(!folio_test_locked(folio));
613	if (!partial_page)
614		folio_clear_checked(folio);
615	head = folio_buffers(folio);
616	if (!head)
617		goto out;
618
619	bh = head;
620	do {
621		if (pos + bh->b_size > stop)
622			return;
623
624		if (offset <= pos)
625			gfs2_discard(sdp, bh);
626		pos += bh->b_size;
627		bh = bh->b_this_page;
628	} while (bh != head);
629out:
630	if (!partial_page)
631		filemap_release_folio(folio, 0);
632}
633
634/**
635 * gfs2_release_folio - free the metadata associated with a folio
636 * @folio: the folio that's being released
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
637 * @gfp_mask: passed from Linux VFS, ignored by us
638 *
639 * Calls try_to_free_buffers() to free the buffers and put the folio if the
640 * buffers can be released.
641 *
642 * Returns: true if the folio was put or else false
643 */
644
645bool gfs2_release_folio(struct folio *folio, gfp_t gfp_mask)
646{
647	struct address_space *mapping = folio->mapping;
648	struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
649	struct buffer_head *bh, *head;
650	struct gfs2_bufdata *bd;
651
652	head = folio_buffers(folio);
653	if (!head)
654		return false;
655
656	/*
657	 * mm accommodates an old ext3 case where clean folios might
658	 * not have had the dirty bit cleared.	Thus, it can send actual
659	 * dirty folios to ->release_folio() via shrink_active_list().
660	 *
661	 * As a workaround, we skip folios that contain dirty buffers
662	 * below.  Once ->release_folio isn't called on dirty folios
663	 * anymore, we can warn on dirty buffers like we used to here
664	 * again.
665	 */
666
667	gfs2_log_lock(sdp);
668	bh = head;
 
669	do {
670		if (atomic_read(&bh->b_count))
671			goto cannot_release;
672		bd = bh->b_private;
673		if (bd && bd->bd_tr)
674			goto cannot_release;
675		if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh)))
676			goto cannot_release;
677		bh = bh->b_this_page;
678	} while (bh != head);
 
679
680	bh = head;
681	do {
682		bd = bh->b_private;
683		if (bd) {
684			gfs2_assert_warn(sdp, bd->bd_bh == bh);
 
 
685			bd->bd_bh = NULL;
686			bh->b_private = NULL;
687			/*
688			 * The bd may still be queued as a revoke, in which
689			 * case we must not dequeue nor free it.
690			 */
691			if (!bd->bd_blkno && !list_empty(&bd->bd_list))
692				list_del_init(&bd->bd_list);
693			if (list_empty(&bd->bd_list))
694				kmem_cache_free(gfs2_bufdata_cachep, bd);
695		}
696
697		bh = bh->b_this_page;
698	} while (bh != head);
699	gfs2_log_unlock(sdp);
700
701	return try_to_free_buffers(folio);
702
 
 
 
703cannot_release:
 
704	gfs2_log_unlock(sdp);
705	return false;
706}
707
708static const struct address_space_operations gfs2_aops = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
709	.writepages = gfs2_writepages,
710	.read_folio = gfs2_read_folio,
711	.readahead = gfs2_readahead,
712	.dirty_folio = iomap_dirty_folio,
713	.release_folio = iomap_release_folio,
714	.invalidate_folio = iomap_invalidate_folio,
715	.bmap = gfs2_bmap,
716	.migrate_folio = filemap_migrate_folio,
717	.is_partially_uptodate = iomap_is_partially_uptodate,
718	.error_remove_folio = generic_error_remove_folio,
 
 
 
719};
720
721static const struct address_space_operations gfs2_jdata_aops = {
 
722	.writepages = gfs2_jdata_writepages,
723	.read_folio = gfs2_read_folio,
724	.readahead = gfs2_readahead,
725	.dirty_folio = jdata_dirty_folio,
 
 
726	.bmap = gfs2_bmap,
727	.migrate_folio = buffer_migrate_folio,
728	.invalidate_folio = gfs2_invalidate_folio,
729	.release_folio = gfs2_release_folio,
730	.is_partially_uptodate = block_is_partially_uptodate,
731	.error_remove_folio = generic_error_remove_folio,
732};
733
734void gfs2_set_aops(struct inode *inode)
735{
736	if (gfs2_is_jdata(GFS2_I(inode)))
 
 
 
 
 
 
737		inode->i_mapping->a_ops = &gfs2_jdata_aops;
738	else
739		inode->i_mapping->a_ops = &gfs2_aops;
740}