Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
   4 * All Rights Reserved.
 
 
 
 
 
 
 
 
 
 
 
 
 
   5 */
   6#include "xfs.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   7#include <linux/backing-dev.h>
 
   8
   9#include "xfs_shared.h"
  10#include "xfs_format.h"
  11#include "xfs_log_format.h"
  12#include "xfs_trans_resv.h"
  13#include "xfs_sb.h"
 
 
  14#include "xfs_mount.h"
  15#include "xfs_trace.h"
  16#include "xfs_log.h"
  17#include "xfs_log_recover.h"
  18#include "xfs_trans.h"
  19#include "xfs_buf_item.h"
  20#include "xfs_errortag.h"
  21#include "xfs_error.h"
  22
  23static kmem_zone_t *xfs_buf_zone;
  24
 
 
 
 
 
 
 
 
 
 
 
 
  25#define xb_to_gfp(flags) \
  26	((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : GFP_NOFS) | __GFP_NOWARN)
  27
  28/*
  29 * Locking orders
  30 *
  31 * xfs_buf_ioacct_inc:
  32 * xfs_buf_ioacct_dec:
  33 *	b_sema (caller holds)
  34 *	  b_lock
  35 *
  36 * xfs_buf_stale:
  37 *	b_sema (caller holds)
  38 *	  b_lock
  39 *	    lru_lock
  40 *
  41 * xfs_buf_rele:
  42 *	b_lock
  43 *	  pag_buf_lock
  44 *	    lru_lock
  45 *
  46 * xfs_buftarg_wait_rele
  47 *	lru_lock
  48 *	  b_lock (trylock due to inversion)
  49 *
  50 * xfs_buftarg_isolate
  51 *	lru_lock
  52 *	  b_lock (trylock due to inversion)
  53 */
  54
  55static inline int
  56xfs_buf_is_vmapped(
  57	struct xfs_buf	*bp)
  58{
  59	/*
  60	 * Return true if the buffer is vmapped.
  61	 *
  62	 * b_addr is null if the buffer is not mapped, but the code is clever
  63	 * enough to know it doesn't have to map a single page, so the check has
  64	 * to be both for b_addr and bp->b_page_count > 1.
  65	 */
  66	return bp->b_addr && bp->b_page_count > 1;
  67}
  68
  69static inline int
  70xfs_buf_vmap_len(
  71	struct xfs_buf	*bp)
  72{
  73	return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
  74}
  75
  76/*
  77 * Bump the I/O in flight count on the buftarg if we haven't yet done so for
  78 * this buffer. The count is incremented once per buffer (per hold cycle)
  79 * because the corresponding decrement is deferred to buffer release. Buffers
  80 * can undergo I/O multiple times in a hold-release cycle and per buffer I/O
  81 * tracking adds unnecessary overhead. This is used for sychronization purposes
  82 * with unmount (see xfs_wait_buftarg()), so all we really need is a count of
  83 * in-flight buffers.
  84 *
  85 * Buffers that are never released (e.g., superblock, iclog buffers) must set
  86 * the XBF_NO_IOACCT flag before I/O submission. Otherwise, the buftarg count
  87 * never reaches zero and unmount hangs indefinitely.
  88 */
  89static inline void
  90xfs_buf_ioacct_inc(
  91	struct xfs_buf	*bp)
  92{
  93	if (bp->b_flags & XBF_NO_IOACCT)
  94		return;
  95
  96	ASSERT(bp->b_flags & XBF_ASYNC);
  97	spin_lock(&bp->b_lock);
  98	if (!(bp->b_state & XFS_BSTATE_IN_FLIGHT)) {
  99		bp->b_state |= XFS_BSTATE_IN_FLIGHT;
 100		percpu_counter_inc(&bp->b_target->bt_io_count);
 101	}
 102	spin_unlock(&bp->b_lock);
 103}
 104
 105/*
 106 * Clear the in-flight state on a buffer about to be released to the LRU or
 107 * freed and unaccount from the buftarg.
 
 
 
 
 
 108 */
 109static inline void
 110__xfs_buf_ioacct_dec(
 111	struct xfs_buf	*bp)
 112{
 113	lockdep_assert_held(&bp->b_lock);
 114
 115	if (bp->b_state & XFS_BSTATE_IN_FLIGHT) {
 116		bp->b_state &= ~XFS_BSTATE_IN_FLIGHT;
 117		percpu_counter_dec(&bp->b_target->bt_io_count);
 118	}
 119}
 120
 121static inline void
 122xfs_buf_ioacct_dec(
 123	struct xfs_buf	*bp)
 124{
 125	spin_lock(&bp->b_lock);
 126	__xfs_buf_ioacct_dec(bp);
 127	spin_unlock(&bp->b_lock);
 128}
 129
 130/*
 131 * When we mark a buffer stale, we remove the buffer from the LRU and clear the
 132 * b_lru_ref count so that the buffer is freed immediately when the buffer
 133 * reference count falls to zero. If the buffer is already on the LRU, we need
 134 * to remove the reference that LRU holds on the buffer.
 135 *
 136 * This prevents build-up of stale buffers on the LRU.
 137 */
 138void
 139xfs_buf_stale(
 140	struct xfs_buf	*bp)
 141{
 142	ASSERT(xfs_buf_islocked(bp));
 143
 144	bp->b_flags |= XBF_STALE;
 145
 146	/*
 147	 * Clear the delwri status so that a delwri queue walker will not
 148	 * flush this buffer to disk now that it is stale. The delwri queue has
 149	 * a reference to the buffer, so this is safe to do.
 150	 */
 151	bp->b_flags &= ~_XBF_DELWRI_Q;
 152
 153	/*
 154	 * Once the buffer is marked stale and unlocked, a subsequent lookup
 155	 * could reset b_flags. There is no guarantee that the buffer is
 156	 * unaccounted (released to LRU) before that occurs. Drop in-flight
 157	 * status now to preserve accounting consistency.
 158	 */
 159	spin_lock(&bp->b_lock);
 160	__xfs_buf_ioacct_dec(bp);
 161
 162	atomic_set(&bp->b_lru_ref, 0);
 163	if (!(bp->b_state & XFS_BSTATE_DISPOSE) &&
 164	    (list_lru_del(&bp->b_target->bt_lru, &bp->b_lru)))
 165		atomic_dec(&bp->b_hold);
 166
 167	ASSERT(atomic_read(&bp->b_hold) >= 1);
 168	spin_unlock(&bp->b_lock);
 169}
 170
 171static int
 172xfs_buf_get_maps(
 173	struct xfs_buf		*bp,
 174	int			map_count)
 175{
 176	ASSERT(bp->b_maps == NULL);
 177	bp->b_map_count = map_count;
 178
 179	if (map_count == 1) {
 180		bp->b_maps = &bp->__b_map;
 181		return 0;
 182	}
 183
 184	bp->b_maps = kmem_zalloc(map_count * sizeof(struct xfs_buf_map),
 185				KM_NOFS);
 186	if (!bp->b_maps)
 187		return -ENOMEM;
 188	return 0;
 189}
 190
 191/*
 192 *	Frees b_pages if it was allocated.
 193 */
 194static void
 195xfs_buf_free_maps(
 196	struct xfs_buf	*bp)
 197{
 198	if (bp->b_maps != &bp->__b_map) {
 199		kmem_free(bp->b_maps);
 200		bp->b_maps = NULL;
 201	}
 
 202}
 203
 204static int
 205_xfs_buf_alloc(
 206	struct xfs_buftarg	*target,
 207	struct xfs_buf_map	*map,
 208	int			nmaps,
 209	xfs_buf_flags_t		flags,
 210	struct xfs_buf		**bpp)
 211{
 212	struct xfs_buf		*bp;
 213	int			error;
 214	int			i;
 215
 216	*bpp = NULL;
 217	bp = kmem_cache_zalloc(xfs_buf_zone, GFP_NOFS | __GFP_NOFAIL);
 
 218
 219	/*
 220	 * We don't want certain flags to appear in b_flags unless they are
 221	 * specifically set by later operations on the buffer.
 222	 */
 223	flags &= ~(XBF_UNMAPPED | XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD);
 224
 225	atomic_set(&bp->b_hold, 1);
 226	atomic_set(&bp->b_lru_ref, 1);
 227	init_completion(&bp->b_iowait);
 228	INIT_LIST_HEAD(&bp->b_lru);
 229	INIT_LIST_HEAD(&bp->b_list);
 230	INIT_LIST_HEAD(&bp->b_li_list);
 231	sema_init(&bp->b_sema, 0); /* held, no waiters */
 232	spin_lock_init(&bp->b_lock);
 233	bp->b_target = target;
 234	bp->b_mount = target->bt_mount;
 235	bp->b_flags = flags;
 236
 237	/*
 238	 * Set length and io_length to the same value initially.
 239	 * I/O routines should use io_length, which will be the same in
 240	 * most cases but may be reset (e.g. XFS recovery).
 241	 */
 242	error = xfs_buf_get_maps(bp, nmaps);
 243	if (error)  {
 244		kmem_cache_free(xfs_buf_zone, bp);
 245		return error;
 246	}
 247
 248	bp->b_bn = map[0].bm_bn;
 249	bp->b_length = 0;
 250	for (i = 0; i < nmaps; i++) {
 251		bp->b_maps[i].bm_bn = map[i].bm_bn;
 252		bp->b_maps[i].bm_len = map[i].bm_len;
 253		bp->b_length += map[i].bm_len;
 254	}
 255
 256	atomic_set(&bp->b_pin_count, 0);
 257	init_waitqueue_head(&bp->b_waiters);
 258
 259	XFS_STATS_INC(bp->b_mount, xb_create);
 260	trace_xfs_buf_init(bp, _RET_IP_);
 261
 262	*bpp = bp;
 263	return 0;
 264}
 265
 266/*
 267 *	Allocate a page array capable of holding a specified number
 268 *	of pages, and point the page buf at it.
 269 */
 270STATIC int
 271_xfs_buf_get_pages(
 272	xfs_buf_t		*bp,
 273	int			page_count)
 
 274{
 275	/* Make sure that we have a page list */
 276	if (bp->b_pages == NULL) {
 277		bp->b_page_count = page_count;
 278		if (page_count <= XB_PAGES) {
 279			bp->b_pages = bp->b_page_array;
 280		} else {
 281			bp->b_pages = kmem_alloc(sizeof(struct page *) *
 282						 page_count, KM_NOFS);
 283			if (bp->b_pages == NULL)
 284				return -ENOMEM;
 285		}
 286		memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
 287	}
 288	return 0;
 289}
 290
 291/*
 292 *	Frees b_pages if it was allocated.
 293 */
 294STATIC void
 295_xfs_buf_free_pages(
 296	xfs_buf_t	*bp)
 297{
 298	if (bp->b_pages != bp->b_page_array) {
 299		kmem_free(bp->b_pages);
 300		bp->b_pages = NULL;
 301	}
 302}
 303
 304/*
 305 *	Releases the specified buffer.
 306 *
 307 * 	The modification state of any associated pages is left unchanged.
 308 * 	The buffer must not be on any hash - use xfs_buf_rele instead for
 309 * 	hashed and refcounted buffers
 310 */
 311static void
 312xfs_buf_free(
 313	xfs_buf_t		*bp)
 314{
 315	trace_xfs_buf_free(bp, _RET_IP_);
 316
 317	ASSERT(list_empty(&bp->b_lru));
 318
 319	if (bp->b_flags & _XBF_PAGES) {
 320		uint		i;
 321
 322		if (xfs_buf_is_vmapped(bp))
 323			vm_unmap_ram(bp->b_addr - bp->b_offset,
 324					bp->b_page_count);
 325
 326		for (i = 0; i < bp->b_page_count; i++) {
 327			struct page	*page = bp->b_pages[i];
 328
 329			__free_page(page);
 330		}
 331		if (current->reclaim_state)
 332			current->reclaim_state->reclaimed_slab +=
 333							bp->b_page_count;
 334	} else if (bp->b_flags & _XBF_KMEM)
 335		kmem_free(bp->b_addr);
 336	_xfs_buf_free_pages(bp);
 337	xfs_buf_free_maps(bp);
 338	kmem_cache_free(xfs_buf_zone, bp);
 339}
 340
 341/*
 342 * Allocates all the pages for buffer in question and builds it's page list.
 343 */
 344STATIC int
 345xfs_buf_allocate_memory(
 346	xfs_buf_t		*bp,
 347	uint			flags)
 348{
 349	size_t			size;
 350	size_t			nbytes, offset;
 351	gfp_t			gfp_mask = xb_to_gfp(flags);
 352	unsigned short		page_count, i;
 353	xfs_off_t		start, end;
 354	int			error;
 355	xfs_km_flags_t		kmflag_mask = 0;
 356
 357	/*
 358	 * assure zeroed buffer for non-read cases.
 359	 */
 360	if (!(flags & XBF_READ)) {
 361		kmflag_mask |= KM_ZERO;
 362		gfp_mask |= __GFP_ZERO;
 363	}
 364
 365	/*
 366	 * for buffers that are contained within a single page, just allocate
 367	 * the memory from the heap - there's no need for the complexity of
 368	 * page arrays to keep allocation down to order 0.
 369	 */
 370	size = BBTOB(bp->b_length);
 371	if (size < PAGE_SIZE) {
 372		int align_mask = xfs_buftarg_dma_alignment(bp->b_target);
 373		bp->b_addr = kmem_alloc_io(size, align_mask,
 374					   KM_NOFS | kmflag_mask);
 375		if (!bp->b_addr) {
 376			/* low memory - use alloc_page loop instead */
 377			goto use_alloc_page;
 378		}
 379
 380		if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) !=
 381		    ((unsigned long)bp->b_addr & PAGE_MASK)) {
 382			/* b_addr spans two pages - use alloc_page instead */
 383			kmem_free(bp->b_addr);
 384			bp->b_addr = NULL;
 385			goto use_alloc_page;
 386		}
 387		bp->b_offset = offset_in_page(bp->b_addr);
 388		bp->b_pages = bp->b_page_array;
 389		bp->b_pages[0] = kmem_to_page(bp->b_addr);
 390		bp->b_page_count = 1;
 391		bp->b_flags |= _XBF_KMEM;
 392		return 0;
 393	}
 394
 395use_alloc_page:
 396	start = BBTOB(bp->b_maps[0].bm_bn) >> PAGE_SHIFT;
 397	end = (BBTOB(bp->b_maps[0].bm_bn + bp->b_length) + PAGE_SIZE - 1)
 398								>> PAGE_SHIFT;
 399	page_count = end - start;
 400	error = _xfs_buf_get_pages(bp, page_count);
 401	if (unlikely(error))
 402		return error;
 403
 404	offset = bp->b_offset;
 405	bp->b_flags |= _XBF_PAGES;
 406
 407	for (i = 0; i < bp->b_page_count; i++) {
 408		struct page	*page;
 409		uint		retries = 0;
 410retry:
 411		page = alloc_page(gfp_mask);
 412		if (unlikely(page == NULL)) {
 413			if (flags & XBF_READ_AHEAD) {
 414				bp->b_page_count = i;
 415				error = -ENOMEM;
 416				goto out_free_pages;
 417			}
 418
 419			/*
 420			 * This could deadlock.
 421			 *
 422			 * But until all the XFS lowlevel code is revamped to
 423			 * handle buffer allocation failures we can't do much.
 424			 */
 425			if (!(++retries % 100))
 426				xfs_err(NULL,
 427		"%s(%u) possible memory allocation deadlock in %s (mode:0x%x)",
 428					current->comm, current->pid,
 429					__func__, gfp_mask);
 430
 431			XFS_STATS_INC(bp->b_mount, xb_page_retries);
 432			congestion_wait(BLK_RW_ASYNC, HZ/50);
 433			goto retry;
 434		}
 435
 436		XFS_STATS_INC(bp->b_mount, xb_page_found);
 437
 438		nbytes = min_t(size_t, size, PAGE_SIZE - offset);
 439		size -= nbytes;
 440		bp->b_pages[i] = page;
 441		offset = 0;
 442	}
 443	return 0;
 444
 445out_free_pages:
 446	for (i = 0; i < bp->b_page_count; i++)
 447		__free_page(bp->b_pages[i]);
 448	bp->b_flags &= ~_XBF_PAGES;
 449	return error;
 450}
 451
 452/*
 453 *	Map buffer into kernel address-space if necessary.
 454 */
 455STATIC int
 456_xfs_buf_map_pages(
 457	xfs_buf_t		*bp,
 458	uint			flags)
 459{
 460	ASSERT(bp->b_flags & _XBF_PAGES);
 461	if (bp->b_page_count == 1) {
 462		/* A single page buffer is always mappable */
 463		bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
 464	} else if (flags & XBF_UNMAPPED) {
 465		bp->b_addr = NULL;
 466	} else {
 467		int retried = 0;
 468		unsigned nofs_flag;
 469
 470		/*
 471		 * vm_map_ram() will allocate auxiliary structures (e.g.
 472		 * pagetables) with GFP_KERNEL, yet we are likely to be under
 473		 * GFP_NOFS context here. Hence we need to tell memory reclaim
 474		 * that we are in such a context via PF_MEMALLOC_NOFS to prevent
 475		 * memory reclaim re-entering the filesystem here and
 476		 * potentially deadlocking.
 477		 */
 478		nofs_flag = memalloc_nofs_save();
 479		do {
 480			bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
 481						-1);
 482			if (bp->b_addr)
 483				break;
 484			vm_unmap_aliases();
 485		} while (retried++ <= 1);
 486		memalloc_nofs_restore(nofs_flag);
 487
 488		if (!bp->b_addr)
 489			return -ENOMEM;
 490		bp->b_addr += bp->b_offset;
 491	}
 492
 493	return 0;
 494}
 495
 496/*
 497 *	Finding and Reading Buffers
 498 */
 499static int
 500_xfs_buf_obj_cmp(
 501	struct rhashtable_compare_arg	*arg,
 502	const void			*obj)
 503{
 504	const struct xfs_buf_map	*map = arg->key;
 505	const struct xfs_buf		*bp = obj;
 506
 507	/*
 508	 * The key hashing in the lookup path depends on the key being the
 509	 * first element of the compare_arg, make sure to assert this.
 510	 */
 511	BUILD_BUG_ON(offsetof(struct xfs_buf_map, bm_bn) != 0);
 512
 513	if (bp->b_bn != map->bm_bn)
 514		return 1;
 515
 516	if (unlikely(bp->b_length != map->bm_len)) {
 517		/*
 518		 * found a block number match. If the range doesn't
 519		 * match, the only way this is allowed is if the buffer
 520		 * in the cache is stale and the transaction that made
 521		 * it stale has not yet committed. i.e. we are
 522		 * reallocating a busy extent. Skip this buffer and
 523		 * continue searching for an exact match.
 524		 */
 525		ASSERT(bp->b_flags & XBF_STALE);
 526		return 1;
 527	}
 528	return 0;
 529}
 530
 531static const struct rhashtable_params xfs_buf_hash_params = {
 532	.min_size		= 32,	/* empty AGs have minimal footprint */
 533	.nelem_hint		= 16,
 534	.key_len		= sizeof(xfs_daddr_t),
 535	.key_offset		= offsetof(struct xfs_buf, b_bn),
 536	.head_offset		= offsetof(struct xfs_buf, b_rhash_head),
 537	.automatic_shrinking	= true,
 538	.obj_cmpfn		= _xfs_buf_obj_cmp,
 539};
 540
 541int
 542xfs_buf_hash_init(
 543	struct xfs_perag	*pag)
 544{
 545	spin_lock_init(&pag->pag_buf_lock);
 546	return rhashtable_init(&pag->pag_buf_hash, &xfs_buf_hash_params);
 547}
 548
 549void
 550xfs_buf_hash_destroy(
 551	struct xfs_perag	*pag)
 552{
 553	rhashtable_destroy(&pag->pag_buf_hash);
 554}
 555
 556/*
 557 * Look up a buffer in the buffer cache and return it referenced and locked
 558 * in @found_bp.
 559 *
 560 * If @new_bp is supplied and we have a lookup miss, insert @new_bp into the
 561 * cache.
 562 *
 563 * If XBF_TRYLOCK is set in @flags, only try to lock the buffer and return
 564 * -EAGAIN if we fail to lock it.
 565 *
 566 * Return values are:
 567 *	-EFSCORRUPTED if have been supplied with an invalid address
 568 *	-EAGAIN on trylock failure
 569 *	-ENOENT if we fail to find a match and @new_bp was NULL
 570 *	0, with @found_bp:
 571 *		- @new_bp if we inserted it into the cache
 572 *		- the buffer we found and locked.
 573 */
 574static int
 575xfs_buf_find(
 576	struct xfs_buftarg	*btp,
 577	struct xfs_buf_map	*map,
 578	int			nmaps,
 579	xfs_buf_flags_t		flags,
 580	struct xfs_buf		*new_bp,
 581	struct xfs_buf		**found_bp)
 582{
 
 583	struct xfs_perag	*pag;
 
 
 584	xfs_buf_t		*bp;
 585	struct xfs_buf_map	cmap = { .bm_bn = map[0].bm_bn };
 586	xfs_daddr_t		eofs;
 587	int			i;
 588
 589	*found_bp = NULL;
 590
 591	for (i = 0; i < nmaps; i++)
 592		cmap.bm_len += map[i].bm_len;
 593
 594	/* Check for IOs smaller than the sector size / not sector aligned */
 595	ASSERT(!(BBTOB(cmap.bm_len) < btp->bt_meta_sectorsize));
 596	ASSERT(!(BBTOB(cmap.bm_bn) & (xfs_off_t)btp->bt_meta_sectormask));
 597
 598	/*
 599	 * Corrupted block numbers can get through to here, unfortunately, so we
 600	 * have to check that the buffer falls within the filesystem bounds.
 601	 */
 602	eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks);
 603	if (cmap.bm_bn < 0 || cmap.bm_bn >= eofs) {
 604		xfs_alert(btp->bt_mount,
 605			  "%s: daddr 0x%llx out of range, EOFS 0x%llx",
 606			  __func__, cmap.bm_bn, eofs);
 607		WARN_ON(1);
 608		return -EFSCORRUPTED;
 609	}
 610
 
 611	pag = xfs_perag_get(btp->bt_mount,
 612			    xfs_daddr_to_agno(btp->bt_mount, cmap.bm_bn));
 613
 
 614	spin_lock(&pag->pag_buf_lock);
 615	bp = rhashtable_lookup_fast(&pag->pag_buf_hash, &cmap,
 616				    xfs_buf_hash_params);
 617	if (bp) {
 618		atomic_inc(&bp->b_hold);
 619		goto found;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 620	}
 621
 622	/* No match found */
 623	if (!new_bp) {
 624		XFS_STATS_INC(btp->bt_mount, xb_miss_locked);
 
 
 
 
 
 
 625		spin_unlock(&pag->pag_buf_lock);
 626		xfs_perag_put(pag);
 627		return -ENOENT;
 628	}
 629
 630	/* the buffer keeps the perag reference until it is freed */
 631	new_bp->b_pag = pag;
 632	rhashtable_insert_fast(&pag->pag_buf_hash, &new_bp->b_rhash_head,
 633			       xfs_buf_hash_params);
 634	spin_unlock(&pag->pag_buf_lock);
 635	*found_bp = new_bp;
 636	return 0;
 637
 638found:
 639	spin_unlock(&pag->pag_buf_lock);
 640	xfs_perag_put(pag);
 641
 642	if (!xfs_buf_trylock(bp)) {
 643		if (flags & XBF_TRYLOCK) {
 644			xfs_buf_rele(bp);
 645			XFS_STATS_INC(btp->bt_mount, xb_busy_locked);
 646			return -EAGAIN;
 647		}
 648		xfs_buf_lock(bp);
 649		XFS_STATS_INC(btp->bt_mount, xb_get_locked_waited);
 650	}
 651
 652	/*
 653	 * if the buffer is stale, clear all the external state associated with
 654	 * it. We need to keep flags such as how we allocated the buffer memory
 655	 * intact here.
 656	 */
 657	if (bp->b_flags & XBF_STALE) {
 658		ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
 659		bp->b_flags &= _XBF_KMEM | _XBF_PAGES;
 660		bp->b_ops = NULL;
 661	}
 662
 663	trace_xfs_buf_find(bp, flags, _RET_IP_);
 664	XFS_STATS_INC(btp->bt_mount, xb_get_locked);
 665	*found_bp = bp;
 666	return 0;
 667}
 668
 669struct xfs_buf *
 670xfs_buf_incore(
 671	struct xfs_buftarg	*target,
 672	xfs_daddr_t		blkno,
 673	size_t			numblks,
 674	xfs_buf_flags_t		flags)
 675{
 676	struct xfs_buf		*bp;
 677	int			error;
 678	DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
 679
 680	error = xfs_buf_find(target, &map, 1, flags, NULL, &bp);
 681	if (error)
 682		return NULL;
 683	return bp;
 684}
 685
 686/*
 687 * Assembles a buffer covering the specified range. The code is optimised for
 688 * cache hits, as metadata intensive workloads will see 3 orders of magnitude
 689 * more hits than misses.
 690 */
 691int
 692xfs_buf_get_map(
 693	struct xfs_buftarg	*target,
 694	struct xfs_buf_map	*map,
 695	int			nmaps,
 696	xfs_buf_flags_t		flags,
 697	struct xfs_buf		**bpp)
 698{
 699	struct xfs_buf		*bp;
 700	struct xfs_buf		*new_bp;
 701	int			error = 0;
 702
 703	*bpp = NULL;
 704	error = xfs_buf_find(target, map, nmaps, flags, NULL, &bp);
 705	if (!error)
 706		goto found;
 707	if (error != -ENOENT)
 708		return error;
 709
 710	error = _xfs_buf_alloc(target, map, nmaps, flags, &new_bp);
 711	if (error)
 712		return error;
 713
 714	error = xfs_buf_allocate_memory(new_bp, flags);
 715	if (error) {
 716		xfs_buf_free(new_bp);
 717		return error;
 718	}
 719
 720	error = xfs_buf_find(target, map, nmaps, flags, new_bp, &bp);
 721	if (error) {
 722		xfs_buf_free(new_bp);
 723		return error;
 724	}
 725
 726	if (bp != new_bp)
 727		xfs_buf_free(new_bp);
 728
 
 
 729found:
 730	if (!bp->b_addr) {
 731		error = _xfs_buf_map_pages(bp, flags);
 732		if (unlikely(error)) {
 733			xfs_warn_ratelimited(target->bt_mount,
 734				"%s: failed to map %u pages", __func__,
 735				bp->b_page_count);
 736			xfs_buf_relse(bp);
 737			return error;
 738		}
 739	}
 740
 741	/*
 742	 * Clear b_error if this is a lookup from a caller that doesn't expect
 743	 * valid data to be found in the buffer.
 744	 */
 745	if (!(flags & XBF_READ))
 746		xfs_buf_ioerror(bp, 0);
 747
 748	XFS_STATS_INC(target->bt_mount, xb_get);
 749	trace_xfs_buf_get(bp, flags, _RET_IP_);
 750	*bpp = bp;
 751	return 0;
 752}
 753
 754STATIC int
 755_xfs_buf_read(
 756	xfs_buf_t		*bp,
 757	xfs_buf_flags_t		flags)
 758{
 759	ASSERT(!(flags & XBF_WRITE));
 760	ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL);
 761
 762	bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD);
 763	bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
 764
 765	return xfs_buf_submit(bp);
 766}
 767
 768/*
 769 * Reverify a buffer found in cache without an attached ->b_ops.
 770 *
 771 * If the caller passed an ops structure and the buffer doesn't have ops
 772 * assigned, set the ops and use it to verify the contents. If verification
 773 * fails, clear XBF_DONE. We assume the buffer has no recorded errors and is
 774 * already in XBF_DONE state on entry.
 775 *
 776 * Under normal operations, every in-core buffer is verified on read I/O
 777 * completion. There are two scenarios that can lead to in-core buffers without
 778 * an assigned ->b_ops. The first is during log recovery of buffers on a V4
 779 * filesystem, though these buffers are purged at the end of recovery. The
 780 * other is online repair, which intentionally reads with a NULL buffer ops to
 781 * run several verifiers across an in-core buffer in order to establish buffer
 782 * type.  If repair can't establish that, the buffer will be left in memory
 783 * with NULL buffer ops.
 784 */
 785int
 786xfs_buf_reverify(
 787	struct xfs_buf		*bp,
 788	const struct xfs_buf_ops *ops)
 789{
 790	ASSERT(bp->b_flags & XBF_DONE);
 791	ASSERT(bp->b_error == 0);
 792
 793	if (!ops || bp->b_ops)
 794		return 0;
 795
 796	bp->b_ops = ops;
 797	bp->b_ops->verify_read(bp);
 798	if (bp->b_error)
 799		bp->b_flags &= ~XBF_DONE;
 800	return bp->b_error;
 801}
 802
 803int
 804xfs_buf_read_map(
 805	struct xfs_buftarg	*target,
 806	struct xfs_buf_map	*map,
 807	int			nmaps,
 808	xfs_buf_flags_t		flags,
 809	struct xfs_buf		**bpp,
 810	const struct xfs_buf_ops *ops,
 811	xfs_failaddr_t		fa)
 812{
 813	struct xfs_buf		*bp;
 814	int			error;
 815
 816	flags |= XBF_READ;
 817	*bpp = NULL;
 818
 819	error = xfs_buf_get_map(target, map, nmaps, flags, &bp);
 820	if (error)
 821		return error;
 822
 823	trace_xfs_buf_read(bp, flags, _RET_IP_);
 824
 825	if (!(bp->b_flags & XBF_DONE)) {
 826		/* Initiate the buffer read and wait. */
 827		XFS_STATS_INC(target->bt_mount, xb_get_read);
 828		bp->b_ops = ops;
 829		error = _xfs_buf_read(bp, flags);
 830
 831		/* Readahead iodone already dropped the buffer, so exit. */
 832		if (flags & XBF_ASYNC)
 833			return 0;
 834	} else {
 835		/* Buffer already read; all we need to do is check it. */
 836		error = xfs_buf_reverify(bp, ops);
 837
 838		/* Readahead already finished; drop the buffer and exit. */
 839		if (flags & XBF_ASYNC) {
 
 
 
 
 
 
 840			xfs_buf_relse(bp);
 841			return 0;
 
 
 
 842		}
 843
 844		/* We do not want read in the flags */
 845		bp->b_flags &= ~XBF_READ;
 846		ASSERT(bp->b_ops != NULL || ops == NULL);
 847	}
 848
 849	/*
 850	 * If we've had a read error, then the contents of the buffer are
 851	 * invalid and should not be used. To ensure that a followup read tries
 852	 * to pull the buffer from disk again, we clear the XBF_DONE flag and
 853	 * mark the buffer stale. This ensures that anyone who has a current
 854	 * reference to the buffer will interpret it's contents correctly and
 855	 * future cache lookups will also treat it as an empty, uninitialised
 856	 * buffer.
 857	 */
 858	if (error) {
 859		if (!XFS_FORCED_SHUTDOWN(target->bt_mount))
 860			xfs_buf_ioerror_alert(bp, fa);
 861
 862		bp->b_flags &= ~XBF_DONE;
 863		xfs_buf_stale(bp);
 864		xfs_buf_relse(bp);
 865
 866		/* bad CRC means corrupted metadata */
 867		if (error == -EFSBADCRC)
 868			error = -EFSCORRUPTED;
 869		return error;
 870	}
 871
 872	*bpp = bp;
 873	return 0;
 874}
 875
 876/*
 877 *	If we are not low on memory then do the readahead in a deadlock
 878 *	safe manner.
 879 */
 880void
 881xfs_buf_readahead_map(
 882	struct xfs_buftarg	*target,
 883	struct xfs_buf_map	*map,
 884	int			nmaps,
 885	const struct xfs_buf_ops *ops)
 886{
 887	struct xfs_buf		*bp;
 888
 889	if (bdi_read_congested(target->bt_bdev->bd_bdi))
 890		return;
 891
 892	xfs_buf_read_map(target, map, nmaps,
 893		     XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD, &bp, ops,
 894		     __this_address);
 895}
 896
 897/*
 898 * Read an uncached buffer from disk. Allocates and returns a locked
 899 * buffer containing the disk contents or nothing.
 900 */
 901int
 902xfs_buf_read_uncached(
 903	struct xfs_buftarg	*target,
 904	xfs_daddr_t		daddr,
 905	size_t			numblks,
 906	int			flags,
 907	struct xfs_buf		**bpp,
 908	const struct xfs_buf_ops *ops)
 909{
 910	struct xfs_buf		*bp;
 911	int			error;
 912
 913	*bpp = NULL;
 914
 915	error = xfs_buf_get_uncached(target, numblks, flags, &bp);
 916	if (error)
 917		return error;
 918
 919	/* set up the buffer for a read IO */
 920	ASSERT(bp->b_map_count == 1);
 921	bp->b_bn = XFS_BUF_DADDR_NULL;  /* always null for uncached buffers */
 922	bp->b_maps[0].bm_bn = daddr;
 923	bp->b_flags |= XBF_READ;
 924	bp->b_ops = ops;
 925
 926	xfs_buf_submit(bp);
 927	if (bp->b_error) {
 928		error = bp->b_error;
 929		xfs_buf_relse(bp);
 930		return error;
 931	}
 
 
 932
 933	*bpp = bp;
 934	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 935}
 936
 937int
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 938xfs_buf_get_uncached(
 939	struct xfs_buftarg	*target,
 940	size_t			numblks,
 941	int			flags,
 942	struct xfs_buf		**bpp)
 943{
 944	unsigned long		page_count;
 945	int			error, i;
 946	struct xfs_buf		*bp;
 947	DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks);
 948
 949	*bpp = NULL;
 950
 951	/* flags might contain irrelevant bits, pass only what we care about */
 952	error = _xfs_buf_alloc(target, &map, 1, flags & XBF_NO_IOACCT, &bp);
 953	if (error)
 954		goto fail;
 955
 956	page_count = PAGE_ALIGN(numblks << BBSHIFT) >> PAGE_SHIFT;
 957	error = _xfs_buf_get_pages(bp, page_count);
 958	if (error)
 959		goto fail_free_buf;
 960
 961	for (i = 0; i < page_count; i++) {
 962		bp->b_pages[i] = alloc_page(xb_to_gfp(flags));
 963		if (!bp->b_pages[i]) {
 964			error = -ENOMEM;
 965			goto fail_free_mem;
 966		}
 967	}
 968	bp->b_flags |= _XBF_PAGES;
 969
 970	error = _xfs_buf_map_pages(bp, 0);
 971	if (unlikely(error)) {
 972		xfs_warn(target->bt_mount,
 973			"%s: failed to map pages", __func__);
 974		goto fail_free_mem;
 975	}
 976
 977	trace_xfs_buf_get_uncached(bp, _RET_IP_);
 978	*bpp = bp;
 979	return 0;
 980
 981 fail_free_mem:
 982	while (--i >= 0)
 983		__free_page(bp->b_pages[i]);
 984	_xfs_buf_free_pages(bp);
 985 fail_free_buf:
 986	xfs_buf_free_maps(bp);
 987	kmem_cache_free(xfs_buf_zone, bp);
 988 fail:
 989	return error;
 990}
 991
 992/*
 993 *	Increment reference count on buffer, to hold the buffer concurrently
 994 *	with another thread which may release (free) the buffer asynchronously.
 995 *	Must hold the buffer already to call this function.
 996 */
 997void
 998xfs_buf_hold(
 999	xfs_buf_t		*bp)
1000{
1001	trace_xfs_buf_hold(bp, _RET_IP_);
1002	atomic_inc(&bp->b_hold);
1003}
1004
1005/*
1006 * Release a hold on the specified buffer. If the hold count is 1, the buffer is
1007 * placed on LRU or freed (depending on b_lru_ref).
1008 */
1009void
1010xfs_buf_rele(
1011	xfs_buf_t		*bp)
1012{
1013	struct xfs_perag	*pag = bp->b_pag;
1014	bool			release;
1015	bool			freebuf = false;
1016
1017	trace_xfs_buf_rele(bp, _RET_IP_);
1018
1019	if (!pag) {
1020		ASSERT(list_empty(&bp->b_lru));
1021		if (atomic_dec_and_test(&bp->b_hold)) {
1022			xfs_buf_ioacct_dec(bp);
1023			xfs_buf_free(bp);
1024		}
1025		return;
1026	}
1027
1028	ASSERT(atomic_read(&bp->b_hold) > 0);
1029
1030	/*
1031	 * We grab the b_lock here first to serialise racing xfs_buf_rele()
1032	 * calls. The pag_buf_lock being taken on the last reference only
1033	 * serialises against racing lookups in xfs_buf_find(). IOWs, the second
1034	 * to last reference we drop here is not serialised against the last
1035	 * reference until we take bp->b_lock. Hence if we don't grab b_lock
1036	 * first, the last "release" reference can win the race to the lock and
1037	 * free the buffer before the second-to-last reference is processed,
1038	 * leading to a use-after-free scenario.
1039	 */
1040	spin_lock(&bp->b_lock);
1041	release = atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock);
1042	if (!release) {
1043		/*
1044		 * Drop the in-flight state if the buffer is already on the LRU
1045		 * and it holds the only reference. This is racy because we
1046		 * haven't acquired the pag lock, but the use of _XBF_IN_FLIGHT
1047		 * ensures the decrement occurs only once per-buf.
1048		 */
1049		if ((atomic_read(&bp->b_hold) == 1) && !list_empty(&bp->b_lru))
1050			__xfs_buf_ioacct_dec(bp);
1051		goto out_unlock;
1052	}
1053
1054	/* the last reference has been dropped ... */
1055	__xfs_buf_ioacct_dec(bp);
1056	if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) {
1057		/*
1058		 * If the buffer is added to the LRU take a new reference to the
1059		 * buffer for the LRU and clear the (now stale) dispose list
1060		 * state flag
1061		 */
1062		if (list_lru_add(&bp->b_target->bt_lru, &bp->b_lru)) {
1063			bp->b_state &= ~XFS_BSTATE_DISPOSE;
1064			atomic_inc(&bp->b_hold);
1065		}
1066		spin_unlock(&pag->pag_buf_lock);
1067	} else {
1068		/*
1069		 * most of the time buffers will already be removed from the
1070		 * LRU, so optimise that case by checking for the
1071		 * XFS_BSTATE_DISPOSE flag indicating the last list the buffer
1072		 * was on was the disposal list
1073		 */
1074		if (!(bp->b_state & XFS_BSTATE_DISPOSE)) {
1075			list_lru_del(&bp->b_target->bt_lru, &bp->b_lru);
1076		} else {
1077			ASSERT(list_empty(&bp->b_lru));
 
 
 
 
 
1078		}
1079
1080		ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
1081		rhashtable_remove_fast(&pag->pag_buf_hash, &bp->b_rhash_head,
1082				       xfs_buf_hash_params);
1083		spin_unlock(&pag->pag_buf_lock);
1084		xfs_perag_put(pag);
1085		freebuf = true;
1086	}
1087
1088out_unlock:
1089	spin_unlock(&bp->b_lock);
1090
1091	if (freebuf)
1092		xfs_buf_free(bp);
1093}
1094
1095
1096/*
1097 *	Lock a buffer object, if it is not already locked.
1098 *
1099 *	If we come across a stale, pinned, locked buffer, we know that we are
1100 *	being asked to lock a buffer that has been reallocated. Because it is
1101 *	pinned, we know that the log has not been pushed to disk and hence it
1102 *	will still be locked.  Rather than continuing to have trylock attempts
1103 *	fail until someone else pushes the log, push it ourselves before
1104 *	returning.  This means that the xfsaild will not get stuck trying
1105 *	to push on stale inode buffers.
1106 */
1107int
1108xfs_buf_trylock(
1109	struct xfs_buf		*bp)
1110{
1111	int			locked;
1112
1113	locked = down_trylock(&bp->b_sema) == 0;
1114	if (locked)
1115		trace_xfs_buf_trylock(bp, _RET_IP_);
1116	else
1117		trace_xfs_buf_trylock_fail(bp, _RET_IP_);
 
 
1118	return locked;
1119}
1120
1121/*
1122 *	Lock a buffer object.
1123 *
1124 *	If we come across a stale, pinned, locked buffer, we know that we
1125 *	are being asked to lock a buffer that has been reallocated. Because
1126 *	it is pinned, we know that the log has not been pushed to disk and
1127 *	hence it will still be locked. Rather than sleeping until someone
1128 *	else pushes the log, push it ourselves before trying to get the lock.
1129 */
1130void
1131xfs_buf_lock(
1132	struct xfs_buf		*bp)
1133{
1134	trace_xfs_buf_lock(bp, _RET_IP_);
1135
1136	if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
1137		xfs_log_force(bp->b_mount, 0);
1138	down(&bp->b_sema);
 
1139
1140	trace_xfs_buf_lock_done(bp, _RET_IP_);
1141}
1142
1143void
1144xfs_buf_unlock(
1145	struct xfs_buf		*bp)
1146{
1147	ASSERT(xfs_buf_islocked(bp));
1148
1149	up(&bp->b_sema);
 
1150	trace_xfs_buf_unlock(bp, _RET_IP_);
1151}
1152
1153STATIC void
1154xfs_buf_wait_unpin(
1155	xfs_buf_t		*bp)
1156{
1157	DECLARE_WAITQUEUE	(wait, current);
1158
1159	if (atomic_read(&bp->b_pin_count) == 0)
1160		return;
1161
1162	add_wait_queue(&bp->b_waiters, &wait);
1163	for (;;) {
1164		set_current_state(TASK_UNINTERRUPTIBLE);
1165		if (atomic_read(&bp->b_pin_count) == 0)
1166			break;
1167		io_schedule();
1168	}
1169	remove_wait_queue(&bp->b_waiters, &wait);
1170	set_current_state(TASK_RUNNING);
1171}
1172
1173/*
1174 *	Buffer Utility Routines
1175 */
1176
 
 
 
 
 
 
 
 
 
 
 
 
 
1177void
1178xfs_buf_ioend(
1179	struct xfs_buf	*bp)
 
1180{
1181	bool		read = bp->b_flags & XBF_READ;
1182
1183	trace_xfs_buf_iodone(bp, _RET_IP_);
1184
1185	bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
1186
1187	/*
1188	 * Pull in IO completion errors now. We are guaranteed to be running
1189	 * single threaded, so we don't need the lock to read b_io_error.
1190	 */
1191	if (!bp->b_error && bp->b_io_error)
1192		xfs_buf_ioerror(bp, bp->b_io_error);
1193
1194	if (read) {
1195		if (!bp->b_error && bp->b_ops)
1196			bp->b_ops->verify_read(bp);
1197		if (!bp->b_error)
1198			bp->b_flags |= XBF_DONE;
1199		xfs_buf_ioend_finish(bp);
1200		return;
1201	}
1202
1203	if (!bp->b_error) {
1204		bp->b_flags &= ~XBF_WRITE_FAIL;
1205		bp->b_flags |= XBF_DONE;
1206	}
1207
1208	/*
1209	 * If this is a log recovery buffer, we aren't doing transactional IO
1210	 * yet so we need to let it handle IO completions.
1211	 */
1212	if (bp->b_flags & _XBF_LOGRECOVERY) {
1213		xlog_recover_iodone(bp);
1214		return;
1215	}
1216
1217	if (bp->b_flags & _XBF_INODES) {
1218		xfs_buf_inode_iodone(bp);
1219		return;
1220	}
1221
1222	if (bp->b_flags & _XBF_DQUOTS) {
1223		xfs_buf_dquot_iodone(bp);
1224		return;
 
 
 
 
 
 
1225	}
1226	xfs_buf_iodone(bp);
1227}
1228
1229static void
1230xfs_buf_ioend_work(
1231	struct work_struct	*work)
1232{
1233	struct xfs_buf		*bp =
1234		container_of(work, xfs_buf_t, b_ioend_work);
1235
1236	xfs_buf_ioend(bp);
1237}
1238
1239static void
1240xfs_buf_ioend_async(
1241	struct xfs_buf	*bp)
1242{
1243	INIT_WORK(&bp->b_ioend_work, xfs_buf_ioend_work);
1244	queue_work(bp->b_mount->m_buf_workqueue, &bp->b_ioend_work);
1245}
1246
1247void
1248__xfs_buf_ioerror(
1249	xfs_buf_t		*bp,
1250	int			error,
1251	xfs_failaddr_t		failaddr)
1252{
1253	ASSERT(error <= 0 && error >= -1000);
1254	bp->b_error = error;
1255	trace_xfs_buf_ioerror(bp, error, failaddr);
1256}
1257
1258void
1259xfs_buf_ioerror_alert(
1260	struct xfs_buf		*bp,
1261	xfs_failaddr_t		func)
1262{
1263	xfs_buf_alert_ratelimited(bp, "XFS: metadata IO error",
1264		"metadata I/O error in \"%pS\" at daddr 0x%llx len %d error %d",
1265				  func, (uint64_t)XFS_BUF_ADDR(bp),
1266				  bp->b_length, -bp->b_error);
1267}
1268
1269/*
1270 * To simulate an I/O failure, the buffer must be locked and held with at least
1271 * three references. The LRU reference is dropped by the stale call. The buf
1272 * item reference is dropped via ioend processing. The third reference is owned
1273 * by the caller and is dropped on I/O completion if the buffer is XBF_ASYNC.
1274 */
1275void
1276xfs_buf_ioend_fail(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1277	struct xfs_buf	*bp)
1278{
1279	bp->b_flags &= ~XBF_DONE;
 
 
 
 
 
 
 
 
 
 
1280	xfs_buf_stale(bp);
1281	xfs_buf_ioerror(bp, -EIO);
1282	xfs_buf_ioend(bp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1283}
1284
1285int
1286xfs_bwrite(
1287	struct xfs_buf		*bp)
1288{
1289	int			error;
1290
1291	ASSERT(xfs_buf_islocked(bp));
1292
1293	bp->b_flags |= XBF_WRITE;
1294	bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q |
1295			 XBF_DONE);
1296
1297	error = xfs_buf_submit(bp);
1298	if (error)
1299		xfs_force_shutdown(bp->b_mount, SHUTDOWN_META_IO_ERROR);
 
 
 
 
1300	return error;
1301}
1302
1303static void
1304xfs_buf_bio_end_io(
1305	struct bio		*bio)
 
 
 
 
 
 
1306{
1307	struct xfs_buf		*bp = (struct xfs_buf *)bio->bi_private;
 
 
 
 
 
 
 
1308
1309	if (!bio->bi_status &&
1310	    (bp->b_flags & XBF_WRITE) && (bp->b_flags & XBF_ASYNC) &&
1311	    XFS_TEST_ERROR(false, bp->b_mount, XFS_ERRTAG_BUF_IOERROR))
1312		bio->bi_status = BLK_STS_IOERR;
 
 
 
 
1313
1314	/*
1315	 * don't overwrite existing errors - otherwise we can lose errors on
1316	 * buffers that require multiple bios to complete.
1317	 */
1318	if (bio->bi_status) {
1319		int error = blk_status_to_errno(bio->bi_status);
1320
1321		cmpxchg(&bp->b_io_error, 0, error);
1322	}
1323
1324	if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
1325		invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
1326
1327	if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
1328		xfs_buf_ioend_async(bp);
1329	bio_put(bio);
1330}
1331
1332static void
1333xfs_buf_ioapply_map(
1334	struct xfs_buf	*bp,
1335	int		map,
1336	int		*buf_offset,
1337	int		*count,
1338	int		op)
1339{
1340	int		page_index;
1341	int		total_nr_pages = bp->b_page_count;
1342	int		nr_pages;
1343	struct bio	*bio;
1344	sector_t	sector =  bp->b_maps[map].bm_bn;
1345	int		size;
1346	int		offset;
1347
1348	/* skip the pages in the buffer before the start offset */
1349	page_index = 0;
1350	offset = *buf_offset;
1351	while (offset >= PAGE_SIZE) {
1352		page_index++;
1353		offset -= PAGE_SIZE;
 
 
 
 
1354	}
1355
1356	/*
1357	 * Limit the IO size to the length of the current vector, and update the
1358	 * remaining IO count for the next time around.
1359	 */
1360	size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count);
1361	*count -= size;
1362	*buf_offset += size;
1363
1364next_chunk:
1365	atomic_inc(&bp->b_io_remaining);
1366	nr_pages = min(total_nr_pages, BIO_MAX_PAGES);
 
 
1367
1368	bio = bio_alloc(GFP_NOIO, nr_pages);
1369	bio_set_dev(bio, bp->b_target->bt_bdev);
1370	bio->bi_iter.bi_sector = sector;
1371	bio->bi_end_io = xfs_buf_bio_end_io;
1372	bio->bi_private = bp;
1373	bio->bi_opf = op;
1374
1375	for (; size && nr_pages; nr_pages--, page_index++) {
 
1376		int	rbytes, nbytes = PAGE_SIZE - offset;
1377
1378		if (nbytes > size)
1379			nbytes = size;
1380
1381		rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes,
1382				      offset);
1383		if (rbytes < nbytes)
1384			break;
1385
1386		offset = 0;
1387		sector += BTOBB(nbytes);
1388		size -= nbytes;
1389		total_nr_pages--;
1390	}
1391
1392	if (likely(bio->bi_iter.bi_size)) {
1393		if (xfs_buf_is_vmapped(bp)) {
1394			flush_kernel_vmap_range(bp->b_addr,
1395						xfs_buf_vmap_len(bp));
1396		}
1397		submit_bio(bio);
1398		if (size)
1399			goto next_chunk;
1400	} else {
1401		/*
1402		 * This is guaranteed not to be the last io reference count
1403		 * because the caller (xfs_buf_submit) holds a count itself.
1404		 */
1405		atomic_dec(&bp->b_io_remaining);
1406		xfs_buf_ioerror(bp, -EIO);
1407		bio_put(bio);
1408	}
1409
1410}
1411
1412STATIC void
1413_xfs_buf_ioapply(
1414	struct xfs_buf	*bp)
1415{
1416	struct blk_plug	plug;
1417	int		op;
1418	int		offset;
1419	int		size;
1420	int		i;
1421
1422	/*
1423	 * Make sure we capture only current IO errors rather than stale errors
1424	 * left over from previous use of the buffer (e.g. failed readahead).
1425	 */
1426	bp->b_error = 0;
1427
1428	if (bp->b_flags & XBF_WRITE) {
1429		op = REQ_OP_WRITE;
1430
1431		/*
1432		 * Run the write verifier callback function if it exists. If
1433		 * this function fails it will mark the buffer with an error and
1434		 * the IO should not be dispatched.
1435		 */
1436		if (bp->b_ops) {
1437			bp->b_ops->verify_write(bp);
1438			if (bp->b_error) {
1439				xfs_force_shutdown(bp->b_mount,
1440						   SHUTDOWN_CORRUPT_INCORE);
1441				return;
1442			}
1443		} else if (bp->b_bn != XFS_BUF_DADDR_NULL) {
1444			struct xfs_mount *mp = bp->b_mount;
1445
1446			/*
1447			 * non-crc filesystems don't attach verifiers during
1448			 * log recovery, so don't warn for such filesystems.
1449			 */
1450			if (xfs_sb_version_hascrc(&mp->m_sb)) {
1451				xfs_warn(mp,
1452					"%s: no buf ops on daddr 0x%llx len %d",
1453					__func__, bp->b_bn, bp->b_length);
1454				xfs_hex_dump(bp->b_addr,
1455						XFS_CORRUPTION_DUMP_LEN);
1456				dump_stack();
1457			}
1458		}
1459	} else {
1460		op = REQ_OP_READ;
1461		if (bp->b_flags & XBF_READ_AHEAD)
1462			op |= REQ_RAHEAD;
1463	}
1464
1465	/* we only use the buffer cache for meta-data */
1466	op |= REQ_META;
 
1467
1468	/*
1469	 * Walk all the vectors issuing IO on them. Set up the initial offset
1470	 * into the buffer and the desired IO size before we start -
1471	 * _xfs_buf_ioapply_vec() will modify them appropriately for each
1472	 * subsequent call.
1473	 */
1474	offset = bp->b_offset;
1475	size = BBTOB(bp->b_length);
1476	blk_start_plug(&plug);
1477	for (i = 0; i < bp->b_map_count; i++) {
1478		xfs_buf_ioapply_map(bp, i, &offset, &size, op);
1479		if (bp->b_error)
1480			break;
1481		if (size <= 0)
1482			break;	/* all done */
1483	}
1484	blk_finish_plug(&plug);
1485}
1486
1487/*
1488 * Wait for I/O completion of a sync buffer and return the I/O error code.
1489 */
1490static int
1491xfs_buf_iowait(
1492	struct xfs_buf	*bp)
1493{
1494	ASSERT(!(bp->b_flags & XBF_ASYNC));
1495
1496	trace_xfs_buf_iowait(bp, _RET_IP_);
1497	wait_for_completion(&bp->b_iowait);
1498	trace_xfs_buf_iowait_done(bp, _RET_IP_);
1499
1500	return bp->b_error;
1501}
1502
1503/*
1504 * Buffer I/O submission path, read or write. Asynchronous submission transfers
1505 * the buffer lock ownership and the current reference to the IO. It is not
1506 * safe to reference the buffer after a call to this function unless the caller
1507 * holds an additional reference itself.
1508 */
1509int
1510__xfs_buf_submit(
1511	struct xfs_buf	*bp,
1512	bool		wait)
1513{
1514	int		error = 0;
1515
1516	trace_xfs_buf_submit(bp, _RET_IP_);
1517
1518	ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
1519
1520	/* on shutdown we stale and complete the buffer immediately */
1521	if (XFS_FORCED_SHUTDOWN(bp->b_mount)) {
1522		xfs_buf_ioend_fail(bp);
1523		return -EIO;
1524	}
1525
1526	/*
1527	 * Grab a reference so the buffer does not go away underneath us. For
1528	 * async buffers, I/O completion drops the callers reference, which
1529	 * could occur before submission returns.
1530	 */
1531	xfs_buf_hold(bp);
1532
1533	if (bp->b_flags & XBF_WRITE)
1534		xfs_buf_wait_unpin(bp);
1535
1536	/* clear the internal error state to avoid spurious errors */
1537	bp->b_io_error = 0;
1538
1539	/*
1540	 * Set the count to 1 initially, this will stop an I/O completion
1541	 * callout which happens before we have started all the I/O from calling
1542	 * xfs_buf_ioend too early.
1543	 */
1544	atomic_set(&bp->b_io_remaining, 1);
1545	if (bp->b_flags & XBF_ASYNC)
1546		xfs_buf_ioacct_inc(bp);
1547	_xfs_buf_ioapply(bp);
1548
1549	/*
1550	 * If _xfs_buf_ioapply failed, we can get back here with only the IO
1551	 * reference we took above. If we drop it to zero, run completion so
1552	 * that we don't return to the caller with completion still pending.
1553	 */
1554	if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
1555		if (bp->b_error || !(bp->b_flags & XBF_ASYNC))
1556			xfs_buf_ioend(bp);
1557		else
1558			xfs_buf_ioend_async(bp);
1559	}
1560
1561	if (wait)
1562		error = xfs_buf_iowait(bp);
1563
1564	/*
1565	 * Release the hold that keeps the buffer referenced for the entire
1566	 * I/O. Note that if the buffer is async, it is not safe to reference
1567	 * after this release.
1568	 */
1569	xfs_buf_rele(bp);
1570	return error;
1571}
1572
1573void *
1574xfs_buf_offset(
1575	struct xfs_buf		*bp,
1576	size_t			offset)
1577{
1578	struct page		*page;
1579
1580	if (bp->b_addr)
1581		return bp->b_addr + offset;
1582
1583	offset += bp->b_offset;
1584	page = bp->b_pages[offset >> PAGE_SHIFT];
1585	return page_address(page) + (offset & (PAGE_SIZE-1));
1586}
1587
 
 
 
1588void
1589xfs_buf_zero(
1590	struct xfs_buf		*bp,
1591	size_t			boff,
1592	size_t			bsize)
 
 
1593{
1594	size_t			bend;
1595
1596	bend = boff + bsize;
1597	while (boff < bend) {
1598		struct page	*page;
1599		int		page_index, page_offset, csize;
1600
1601		page_index = (boff + bp->b_offset) >> PAGE_SHIFT;
1602		page_offset = (boff + bp->b_offset) & ~PAGE_MASK;
1603		page = bp->b_pages[page_index];
1604		csize = min_t(size_t, PAGE_SIZE - page_offset,
1605				      BBTOB(bp->b_length) - boff);
1606
1607		ASSERT((csize + page_offset) <= PAGE_SIZE);
1608
1609		memset(page_address(page) + page_offset, 0, csize);
 
 
 
 
 
 
 
 
 
1610
1611		boff += csize;
 
1612	}
1613}
1614
1615/*
1616 * Log a message about and stale a buffer that a caller has decided is corrupt.
1617 *
1618 * This function should be called for the kinds of metadata corruption that
1619 * cannot be detect from a verifier, such as incorrect inter-block relationship
1620 * data.  Do /not/ call this function from a verifier function.
1621 *
1622 * The buffer must be XBF_DONE prior to the call.  Afterwards, the buffer will
1623 * be marked stale, but b_error will not be set.  The caller is responsible for
1624 * releasing the buffer or fixing it.
1625 */
1626void
1627__xfs_buf_mark_corrupt(
1628	struct xfs_buf		*bp,
1629	xfs_failaddr_t		fa)
1630{
1631	ASSERT(bp->b_flags & XBF_DONE);
1632
1633	xfs_buf_corruption_error(bp, fa);
1634	xfs_buf_stale(bp);
1635}
1636
1637/*
1638 *	Handling of buffer targets (buftargs).
1639 */
1640
1641/*
1642 * Wait for any bufs with callbacks that have been submitted but have not yet
1643 * returned. These buffers will have an elevated hold count, so wait on those
1644 * while freeing all the buffers only held by the LRU.
1645 */
1646static enum lru_status
1647xfs_buftarg_wait_rele(
1648	struct list_head	*item,
1649	struct list_lru_one	*lru,
1650	spinlock_t		*lru_lock,
1651	void			*arg)
1652
1653{
1654	struct xfs_buf		*bp = container_of(item, struct xfs_buf, b_lru);
1655	struct list_head	*dispose = arg;
1656
1657	if (atomic_read(&bp->b_hold) > 1) {
1658		/* need to wait, so skip it this pass */
1659		trace_xfs_buf_wait_buftarg(bp, _RET_IP_);
1660		return LRU_SKIP;
1661	}
1662	if (!spin_trylock(&bp->b_lock))
1663		return LRU_SKIP;
1664
1665	/*
1666	 * clear the LRU reference count so the buffer doesn't get
1667	 * ignored in xfs_buf_rele().
1668	 */
1669	atomic_set(&bp->b_lru_ref, 0);
1670	bp->b_state |= XFS_BSTATE_DISPOSE;
1671	list_lru_isolate_move(lru, item, dispose);
1672	spin_unlock(&bp->b_lock);
1673	return LRU_REMOVED;
1674}
1675
1676void
1677xfs_wait_buftarg(
1678	struct xfs_buftarg	*btp)
1679{
1680	LIST_HEAD(dispose);
1681	int			loop = 0;
1682	bool			write_fail = false;
1683
1684	/*
1685	 * First wait on the buftarg I/O count for all in-flight buffers to be
1686	 * released. This is critical as new buffers do not make the LRU until
1687	 * they are released.
1688	 *
1689	 * Next, flush the buffer workqueue to ensure all completion processing
1690	 * has finished. Just waiting on buffer locks is not sufficient for
1691	 * async IO as the reference count held over IO is not released until
1692	 * after the buffer lock is dropped. Hence we need to ensure here that
1693	 * all reference counts have been dropped before we start walking the
1694	 * LRU list.
1695	 */
1696	while (percpu_counter_sum(&btp->bt_io_count))
1697		delay(100);
1698	flush_workqueue(btp->bt_mount->m_buf_workqueue);
1699
1700	/* loop until there is nothing left on the lru list. */
1701	while (list_lru_count(&btp->bt_lru)) {
1702		list_lru_walk(&btp->bt_lru, xfs_buftarg_wait_rele,
1703			      &dispose, LONG_MAX);
1704
1705		while (!list_empty(&dispose)) {
1706			struct xfs_buf *bp;
1707			bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
1708			list_del_init(&bp->b_lru);
1709			if (bp->b_flags & XBF_WRITE_FAIL) {
1710				write_fail = true;
1711				xfs_buf_alert_ratelimited(bp,
1712					"XFS: Corruption Alert",
1713"Corruption Alert: Buffer at daddr 0x%llx had permanent write failures!",
1714					(long long)bp->b_bn);
1715			}
1716			xfs_buf_rele(bp);
1717		}
1718		if (loop++ != 0)
1719			delay(100);
 
 
 
 
 
 
 
 
 
 
1720	}
1721
1722	/*
1723	 * If one or more failed buffers were freed, that means dirty metadata
1724	 * was thrown away. This should only ever happen after I/O completion
1725	 * handling has elevated I/O error(s) to permanent failures and shuts
1726	 * down the fs.
1727	 */
1728	if (write_fail) {
1729		ASSERT(XFS_FORCED_SHUTDOWN(btp->bt_mount));
1730		xfs_alert(btp->bt_mount,
1731	      "Please run xfs_repair to determine the extent of the problem.");
1732	}
1733}
1734
1735static enum lru_status
1736xfs_buftarg_isolate(
1737	struct list_head	*item,
1738	struct list_lru_one	*lru,
1739	spinlock_t		*lru_lock,
1740	void			*arg)
1741{
1742	struct xfs_buf		*bp = container_of(item, struct xfs_buf, b_lru);
1743	struct list_head	*dispose = arg;
1744
1745	/*
1746	 * we are inverting the lru lock/bp->b_lock here, so use a trylock.
1747	 * If we fail to get the lock, just skip it.
1748	 */
1749	if (!spin_trylock(&bp->b_lock))
1750		return LRU_SKIP;
1751	/*
1752	 * Decrement the b_lru_ref count unless the value is already
1753	 * zero. If the value is already zero, we need to reclaim the
1754	 * buffer, otherwise it gets another trip through the LRU.
1755	 */
1756	if (atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
1757		spin_unlock(&bp->b_lock);
1758		return LRU_ROTATE;
1759	}
1760
1761	bp->b_state |= XFS_BSTATE_DISPOSE;
1762	list_lru_isolate_move(lru, item, dispose);
1763	spin_unlock(&bp->b_lock);
1764	return LRU_REMOVED;
1765}
1766
1767static unsigned long
1768xfs_buftarg_shrink_scan(
1769	struct shrinker		*shrink,
1770	struct shrink_control	*sc)
1771{
1772	struct xfs_buftarg	*btp = container_of(shrink,
1773					struct xfs_buftarg, bt_shrinker);
 
 
1774	LIST_HEAD(dispose);
1775	unsigned long		freed;
1776
1777	freed = list_lru_shrink_walk(&btp->bt_lru, sc,
1778				     xfs_buftarg_isolate, &dispose);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1779
1780	while (!list_empty(&dispose)) {
1781		struct xfs_buf *bp;
1782		bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
1783		list_del_init(&bp->b_lru);
1784		xfs_buf_rele(bp);
1785	}
1786
1787	return freed;
1788}
1789
1790static unsigned long
1791xfs_buftarg_shrink_count(
1792	struct shrinker		*shrink,
1793	struct shrink_control	*sc)
1794{
1795	struct xfs_buftarg	*btp = container_of(shrink,
1796					struct xfs_buftarg, bt_shrinker);
1797	return list_lru_shrink_count(&btp->bt_lru, sc);
1798}
1799
1800void
1801xfs_free_buftarg(
 
1802	struct xfs_buftarg	*btp)
1803{
1804	unregister_shrinker(&btp->bt_shrinker);
1805	ASSERT(percpu_counter_sum(&btp->bt_io_count) == 0);
1806	percpu_counter_destroy(&btp->bt_io_count);
1807	list_lru_destroy(&btp->bt_lru);
1808
1809	xfs_blkdev_issue_flush(btp);
 
1810
1811	kmem_free(btp);
1812}
1813
1814int
1815xfs_setsize_buftarg(
1816	xfs_buftarg_t		*btp,
1817	unsigned int		sectorsize)
1818{
1819	/* Set up metadata sector size info */
1820	btp->bt_meta_sectorsize = sectorsize;
1821	btp->bt_meta_sectormask = sectorsize - 1;
 
 
1822
1823	if (set_blocksize(btp->bt_bdev, sectorsize)) {
 
 
 
 
1824		xfs_warn(btp->bt_mount,
1825			"Cannot set_blocksize to %u on device %pg",
1826			sectorsize, btp->bt_bdev);
1827		return -EINVAL;
1828	}
1829
1830	/* Set up device logical sector size mask */
1831	btp->bt_logical_sectorsize = bdev_logical_block_size(btp->bt_bdev);
1832	btp->bt_logical_sectormask = bdev_logical_block_size(btp->bt_bdev) - 1;
1833
1834	return 0;
1835}
1836
1837/*
1838 * When allocating the initial buffer target we have not yet
1839 * read in the superblock, so don't know what sized sectors
1840 * are being used at this early stage.  Play safe.
1841 */
1842STATIC int
1843xfs_setsize_buftarg_early(
1844	xfs_buftarg_t		*btp,
1845	struct block_device	*bdev)
1846{
1847	return xfs_setsize_buftarg(btp, bdev_logical_block_size(bdev));
 
 
 
 
 
 
 
 
 
 
1848}
1849
1850xfs_buftarg_t *
1851xfs_alloc_buftarg(
1852	struct xfs_mount	*mp,
1853	struct block_device	*bdev,
1854	struct dax_device	*dax_dev)
 
1855{
1856	xfs_buftarg_t		*btp;
1857
1858	btp = kmem_zalloc(sizeof(*btp), KM_NOFS);
1859
1860	btp->bt_mount = mp;
1861	btp->bt_dev =  bdev->bd_dev;
1862	btp->bt_bdev = bdev;
1863	btp->bt_daxdev = dax_dev;
1864
1865	/*
1866	 * Buffer IO error rate limiting. Limit it to no more than 10 messages
1867	 * per 30 seconds so as to not spam logs too much on repeated errors.
1868	 */
1869	ratelimit_state_init(&btp->bt_ioerror_rl, 30 * HZ,
1870			     DEFAULT_RATELIMIT_BURST);
1871
 
 
1872	if (xfs_setsize_buftarg_early(btp, bdev))
1873		goto error_free;
1874
1875	if (list_lru_init(&btp->bt_lru))
1876		goto error_free;
1877
1878	if (percpu_counter_init(&btp->bt_io_count, 0, GFP_KERNEL))
1879		goto error_lru;
1880
1881	btp->bt_shrinker.count_objects = xfs_buftarg_shrink_count;
1882	btp->bt_shrinker.scan_objects = xfs_buftarg_shrink_scan;
1883	btp->bt_shrinker.seeks = DEFAULT_SEEKS;
1884	btp->bt_shrinker.flags = SHRINKER_NUMA_AWARE;
1885	if (register_shrinker(&btp->bt_shrinker))
1886		goto error_pcpu;
1887	return btp;
1888
1889error_pcpu:
1890	percpu_counter_destroy(&btp->bt_io_count);
1891error_lru:
1892	list_lru_destroy(&btp->bt_lru);
1893error_free:
1894	kmem_free(btp);
1895	return NULL;
1896}
1897
1898/*
1899 * Cancel a delayed write list.
1900 *
1901 * Remove each buffer from the list, clear the delwri queue flag and drop the
1902 * associated buffer reference.
1903 */
1904void
1905xfs_buf_delwri_cancel(
1906	struct list_head	*list)
1907{
1908	struct xfs_buf		*bp;
1909
1910	while (!list_empty(list)) {
1911		bp = list_first_entry(list, struct xfs_buf, b_list);
1912
1913		xfs_buf_lock(bp);
1914		bp->b_flags &= ~_XBF_DELWRI_Q;
1915		list_del_init(&bp->b_list);
1916		xfs_buf_relse(bp);
1917	}
1918}
1919
1920/*
1921 * Add a buffer to the delayed write list.
1922 *
1923 * This queues a buffer for writeout if it hasn't already been.  Note that
1924 * neither this routine nor the buffer list submission functions perform
1925 * any internal synchronization.  It is expected that the lists are thread-local
1926 * to the callers.
1927 *
1928 * Returns true if we queued up the buffer, or false if it already had
1929 * been on the buffer list.
1930 */
1931bool
1932xfs_buf_delwri_queue(
1933	struct xfs_buf		*bp,
1934	struct list_head	*list)
1935{
1936	ASSERT(xfs_buf_islocked(bp));
1937	ASSERT(!(bp->b_flags & XBF_READ));
1938
1939	/*
1940	 * If the buffer is already marked delwri it already is queued up
1941	 * by someone else for imediate writeout.  Just ignore it in that
1942	 * case.
1943	 */
1944	if (bp->b_flags & _XBF_DELWRI_Q) {
1945		trace_xfs_buf_delwri_queued(bp, _RET_IP_);
1946		return false;
1947	}
1948
1949	trace_xfs_buf_delwri_queue(bp, _RET_IP_);
1950
1951	/*
1952	 * If a buffer gets written out synchronously or marked stale while it
1953	 * is on a delwri list we lazily remove it. To do this, the other party
1954	 * clears the  _XBF_DELWRI_Q flag but otherwise leaves the buffer alone.
1955	 * It remains referenced and on the list.  In a rare corner case it
1956	 * might get readded to a delwri list after the synchronous writeout, in
1957	 * which case we need just need to re-add the flag here.
1958	 */
1959	bp->b_flags |= _XBF_DELWRI_Q;
1960	if (list_empty(&bp->b_list)) {
1961		atomic_inc(&bp->b_hold);
1962		list_add_tail(&bp->b_list, list);
1963	}
1964
1965	return true;
1966}
1967
1968/*
1969 * Compare function is more complex than it needs to be because
1970 * the return value is only 32 bits and we are doing comparisons
1971 * on 64 bit values
1972 */
1973static int
1974xfs_buf_cmp(
1975	void		*priv,
1976	struct list_head *a,
1977	struct list_head *b)
1978{
1979	struct xfs_buf	*ap = container_of(a, struct xfs_buf, b_list);
1980	struct xfs_buf	*bp = container_of(b, struct xfs_buf, b_list);
1981	xfs_daddr_t		diff;
1982
1983	diff = ap->b_maps[0].bm_bn - bp->b_maps[0].bm_bn;
1984	if (diff < 0)
1985		return -1;
1986	if (diff > 0)
1987		return 1;
1988	return 0;
1989}
1990
1991/*
1992 * Submit buffers for write. If wait_list is specified, the buffers are
1993 * submitted using sync I/O and placed on the wait list such that the caller can
1994 * iowait each buffer. Otherwise async I/O is used and the buffers are released
1995 * at I/O completion time. In either case, buffers remain locked until I/O
1996 * completes and the buffer is released from the queue.
1997 */
1998static int
1999xfs_buf_delwri_submit_buffers(
2000	struct list_head	*buffer_list,
2001	struct list_head	*wait_list)
 
2002{
 
2003	struct xfs_buf		*bp, *n;
2004	int			pinned = 0;
2005	struct blk_plug		plug;
2006
2007	list_sort(NULL, buffer_list, xfs_buf_cmp);
2008
2009	blk_start_plug(&plug);
2010	list_for_each_entry_safe(bp, n, buffer_list, b_list) {
2011		if (!wait_list) {
2012			if (xfs_buf_ispinned(bp)) {
2013				pinned++;
2014				continue;
2015			}
2016			if (!xfs_buf_trylock(bp))
2017				continue;
2018		} else {
2019			xfs_buf_lock(bp);
2020		}
2021
2022		/*
2023		 * Someone else might have written the buffer synchronously or
2024		 * marked it stale in the meantime.  In that case only the
2025		 * _XBF_DELWRI_Q flag got cleared, and we have to drop the
2026		 * reference and remove it from the list here.
2027		 */
2028		if (!(bp->b_flags & _XBF_DELWRI_Q)) {
2029			list_del_init(&bp->b_list);
2030			xfs_buf_relse(bp);
2031			continue;
2032		}
2033
 
2034		trace_xfs_buf_delwri_split(bp, _RET_IP_);
 
2035
2036		/*
2037		 * If we have a wait list, each buffer (and associated delwri
2038		 * queue reference) transfers to it and is submitted
2039		 * synchronously. Otherwise, drop the buffer from the delwri
2040		 * queue and submit async.
2041		 */
2042		bp->b_flags &= ~_XBF_DELWRI_Q;
2043		bp->b_flags |= XBF_WRITE;
2044		if (wait_list) {
2045			bp->b_flags &= ~XBF_ASYNC;
2046			list_move_tail(&bp->b_list, wait_list);
2047		} else {
2048			bp->b_flags |= XBF_ASYNC;
2049			list_del_init(&bp->b_list);
2050		}
2051		__xfs_buf_submit(bp, false);
2052	}
2053	blk_finish_plug(&plug);
2054
2055	return pinned;
2056}
2057
2058/*
2059 * Write out a buffer list asynchronously.
2060 *
2061 * This will take the @buffer_list, write all non-locked and non-pinned buffers
2062 * out and not wait for I/O completion on any of the buffers.  This interface
2063 * is only safely useable for callers that can track I/O completion by higher
2064 * level means, e.g. AIL pushing as the @buffer_list is consumed in this
2065 * function.
2066 *
2067 * Note: this function will skip buffers it would block on, and in doing so
2068 * leaves them on @buffer_list so they can be retried on a later pass. As such,
2069 * it is up to the caller to ensure that the buffer list is fully submitted or
2070 * cancelled appropriately when they are finished with the list. Failure to
2071 * cancel or resubmit the list until it is empty will result in leaked buffers
2072 * at unmount time.
2073 */
2074int
2075xfs_buf_delwri_submit_nowait(
2076	struct list_head	*buffer_list)
2077{
2078	return xfs_buf_delwri_submit_buffers(buffer_list, NULL);
 
2079}
2080
2081/*
2082 * Write out a buffer list synchronously.
2083 *
2084 * This will take the @buffer_list, write all buffers out and wait for I/O
2085 * completion on all of the buffers. @buffer_list is consumed by the function,
2086 * so callers must have some other way of tracking buffers if they require such
2087 * functionality.
2088 */
2089int
2090xfs_buf_delwri_submit(
2091	struct list_head	*buffer_list)
2092{
2093	LIST_HEAD		(wait_list);
2094	int			error = 0, error2;
2095	struct xfs_buf		*bp;
2096
2097	xfs_buf_delwri_submit_buffers(buffer_list, &wait_list);
2098
2099	/* Wait for IO to complete. */
2100	while (!list_empty(&wait_list)) {
2101		bp = list_first_entry(&wait_list, struct xfs_buf, b_list);
2102
2103		list_del_init(&bp->b_list);
2104
2105		/*
2106		 * Wait on the locked buffer, check for errors and unlock and
2107		 * release the delwri queue reference.
2108		 */
2109		error2 = xfs_buf_iowait(bp);
2110		xfs_buf_relse(bp);
2111		if (!error)
2112			error = error2;
2113	}
2114
2115	return error;
2116}
2117
2118/*
2119 * Push a single buffer on a delwri queue.
2120 *
2121 * The purpose of this function is to submit a single buffer of a delwri queue
2122 * and return with the buffer still on the original queue. The waiting delwri
2123 * buffer submission infrastructure guarantees transfer of the delwri queue
2124 * buffer reference to a temporary wait list. We reuse this infrastructure to
2125 * transfer the buffer back to the original queue.
2126 *
2127 * Note the buffer transitions from the queued state, to the submitted and wait
2128 * listed state and back to the queued state during this call. The buffer
2129 * locking and queue management logic between _delwri_pushbuf() and
2130 * _delwri_queue() guarantee that the buffer cannot be queued to another list
2131 * before returning.
2132 */
2133int
2134xfs_buf_delwri_pushbuf(
2135	struct xfs_buf		*bp,
2136	struct list_head	*buffer_list)
2137{
2138	LIST_HEAD		(submit_list);
2139	int			error;
2140
2141	ASSERT(bp->b_flags & _XBF_DELWRI_Q);
2142
2143	trace_xfs_buf_delwri_pushbuf(bp, _RET_IP_);
2144
2145	/*
2146	 * Isolate the buffer to a new local list so we can submit it for I/O
2147	 * independently from the rest of the original list.
2148	 */
2149	xfs_buf_lock(bp);
2150	list_move(&bp->b_list, &submit_list);
2151	xfs_buf_unlock(bp);
2152
2153	/*
2154	 * Delwri submission clears the DELWRI_Q buffer flag and returns with
2155	 * the buffer on the wait list with the original reference. Rather than
2156	 * bounce the buffer from a local wait list back to the original list
2157	 * after I/O completion, reuse the original list as the wait list.
2158	 */
2159	xfs_buf_delwri_submit_buffers(&submit_list, buffer_list);
2160
2161	/*
2162	 * The buffer is now locked, under I/O and wait listed on the original
2163	 * delwri queue. Wait for I/O completion, restore the DELWRI_Q flag and
2164	 * return with the buffer unlocked and on the original queue.
2165	 */
2166	error = xfs_buf_iowait(bp);
2167	bp->b_flags |= _XBF_DELWRI_Q;
2168	xfs_buf_unlock(bp);
2169
2170	return error;
2171}
2172
2173int __init
2174xfs_buf_init(void)
2175{
2176	xfs_buf_zone = kmem_cache_create("xfs_buf", sizeof(struct xfs_buf), 0,
2177					 SLAB_HWCACHE_ALIGN |
2178					 SLAB_RECLAIM_ACCOUNT |
2179					 SLAB_MEM_SPREAD,
2180					 NULL);
2181	if (!xfs_buf_zone)
2182		goto out;
2183
 
 
 
 
 
2184	return 0;
2185
 
 
2186 out:
2187	return -ENOMEM;
2188}
2189
2190void
2191xfs_buf_terminate(void)
2192{
2193	kmem_cache_destroy(xfs_buf_zone);
2194}
2195
2196void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref)
2197{
2198	/*
2199	 * Set the lru reference count to 0 based on the error injection tag.
2200	 * This allows userspace to disrupt buffer caching for debug/testing
2201	 * purposes.
2202	 */
2203	if (XFS_TEST_ERROR(false, bp->b_mount, XFS_ERRTAG_BUF_LRU_REF))
2204		lru_ref = 0;
2205
2206	atomic_set(&bp->b_lru_ref, lru_ref);
2207}
2208
2209/*
2210 * Verify an on-disk magic value against the magic value specified in the
2211 * verifier structure. The verifier magic is in disk byte order so the caller is
2212 * expected to pass the value directly from disk.
2213 */
2214bool
2215xfs_verify_magic(
2216	struct xfs_buf		*bp,
2217	__be32			dmagic)
2218{
2219	struct xfs_mount	*mp = bp->b_mount;
2220	int			idx;
2221
2222	idx = xfs_sb_version_hascrc(&mp->m_sb);
2223	if (WARN_ON(!bp->b_ops || !bp->b_ops->magic[idx]))
2224		return false;
2225	return dmagic == bp->b_ops->magic[idx];
2226}
2227/*
2228 * Verify an on-disk magic value against the magic value specified in the
2229 * verifier structure. The verifier magic is in disk byte order so the caller is
2230 * expected to pass the value directly from disk.
2231 */
2232bool
2233xfs_verify_magic16(
2234	struct xfs_buf		*bp,
2235	__be16			dmagic)
2236{
2237	struct xfs_mount	*mp = bp->b_mount;
2238	int			idx;
2239
2240	idx = xfs_sb_version_hascrc(&mp->m_sb);
2241	if (WARN_ON(!bp->b_ops || !bp->b_ops->magic16[idx]))
2242		return false;
2243	return dmagic == bp->b_ops->magic16[idx];
2244}
v3.5.6
 
   1/*
   2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
   3 * All Rights Reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it would be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write the Free Software Foundation,
  16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  17 */
  18#include "xfs.h"
  19#include <linux/stddef.h>
  20#include <linux/errno.h>
  21#include <linux/gfp.h>
  22#include <linux/pagemap.h>
  23#include <linux/init.h>
  24#include <linux/vmalloc.h>
  25#include <linux/bio.h>
  26#include <linux/sysctl.h>
  27#include <linux/proc_fs.h>
  28#include <linux/workqueue.h>
  29#include <linux/percpu.h>
  30#include <linux/blkdev.h>
  31#include <linux/hash.h>
  32#include <linux/kthread.h>
  33#include <linux/migrate.h>
  34#include <linux/backing-dev.h>
  35#include <linux/freezer.h>
  36
 
 
 
 
  37#include "xfs_sb.h"
  38#include "xfs_log.h"
  39#include "xfs_ag.h"
  40#include "xfs_mount.h"
  41#include "xfs_trace.h"
 
 
 
 
 
 
  42
  43static kmem_zone_t *xfs_buf_zone;
  44
  45static struct workqueue_struct *xfslogd_workqueue;
  46
  47#ifdef XFS_BUF_LOCK_TRACKING
  48# define XB_SET_OWNER(bp)	((bp)->b_last_holder = current->pid)
  49# define XB_CLEAR_OWNER(bp)	((bp)->b_last_holder = -1)
  50# define XB_GET_OWNER(bp)	((bp)->b_last_holder)
  51#else
  52# define XB_SET_OWNER(bp)	do { } while (0)
  53# define XB_CLEAR_OWNER(bp)	do { } while (0)
  54# define XB_GET_OWNER(bp)	do { } while (0)
  55#endif
  56
  57#define xb_to_gfp(flags) \
  58	((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : GFP_NOFS) | __GFP_NOWARN)
  59
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  60
  61static inline int
  62xfs_buf_is_vmapped(
  63	struct xfs_buf	*bp)
  64{
  65	/*
  66	 * Return true if the buffer is vmapped.
  67	 *
  68	 * b_addr is null if the buffer is not mapped, but the code is clever
  69	 * enough to know it doesn't have to map a single page, so the check has
  70	 * to be both for b_addr and bp->b_page_count > 1.
  71	 */
  72	return bp->b_addr && bp->b_page_count > 1;
  73}
  74
  75static inline int
  76xfs_buf_vmap_len(
  77	struct xfs_buf	*bp)
  78{
  79	return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
  80}
  81
  82/*
  83 * xfs_buf_lru_add - add a buffer to the LRU.
 
 
 
 
 
 
  84 *
  85 * The LRU takes a new reference to the buffer so that it will only be freed
  86 * once the shrinker takes the buffer off the LRU.
 
  87 */
  88STATIC void
  89xfs_buf_lru_add(
  90	struct xfs_buf	*bp)
  91{
  92	struct xfs_buftarg *btp = bp->b_target;
 
  93
  94	spin_lock(&btp->bt_lru_lock);
  95	if (list_empty(&bp->b_lru)) {
  96		atomic_inc(&bp->b_hold);
  97		list_add_tail(&bp->b_lru, &btp->bt_lru);
  98		btp->bt_lru_nr++;
  99	}
 100	spin_unlock(&btp->bt_lru_lock);
 101}
 102
 103/*
 104 * xfs_buf_lru_del - remove a buffer from the LRU
 105 *
 106 * The unlocked check is safe here because it only occurs when there are not
 107 * b_lru_ref counts left on the inode under the pag->pag_buf_lock. it is there
 108 * to optimise the shrinker removing the buffer from the LRU and calling
 109 * xfs_buf_free(). i.e. it removes an unnecessary round trip on the
 110 * bt_lru_lock.
 111 */
 112STATIC void
 113xfs_buf_lru_del(
 114	struct xfs_buf	*bp)
 115{
 116	struct xfs_buftarg *btp = bp->b_target;
 117
 118	if (list_empty(&bp->b_lru))
 119		return;
 
 
 
 120
 121	spin_lock(&btp->bt_lru_lock);
 122	if (!list_empty(&bp->b_lru)) {
 123		list_del_init(&bp->b_lru);
 124		btp->bt_lru_nr--;
 125	}
 126	spin_unlock(&btp->bt_lru_lock);
 
 127}
 128
 129/*
 130 * When we mark a buffer stale, we remove the buffer from the LRU and clear the
 131 * b_lru_ref count so that the buffer is freed immediately when the buffer
 132 * reference count falls to zero. If the buffer is already on the LRU, we need
 133 * to remove the reference that LRU holds on the buffer.
 134 *
 135 * This prevents build-up of stale buffers on the LRU.
 136 */
 137void
 138xfs_buf_stale(
 139	struct xfs_buf	*bp)
 140{
 141	ASSERT(xfs_buf_islocked(bp));
 142
 143	bp->b_flags |= XBF_STALE;
 144
 145	/*
 146	 * Clear the delwri status so that a delwri queue walker will not
 147	 * flush this buffer to disk now that it is stale. The delwri queue has
 148	 * a reference to the buffer, so this is safe to do.
 149	 */
 150	bp->b_flags &= ~_XBF_DELWRI_Q;
 151
 152	atomic_set(&(bp)->b_lru_ref, 0);
 153	if (!list_empty(&bp->b_lru)) {
 154		struct xfs_buftarg *btp = bp->b_target;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 155
 156		spin_lock(&btp->bt_lru_lock);
 157		if (!list_empty(&bp->b_lru)) {
 158			list_del_init(&bp->b_lru);
 159			btp->bt_lru_nr--;
 160			atomic_dec(&bp->b_hold);
 161		}
 162		spin_unlock(&btp->bt_lru_lock);
 
 
 
 
 
 
 
 
 
 
 163	}
 164	ASSERT(atomic_read(&bp->b_hold) >= 1);
 165}
 166
 167struct xfs_buf *
 168xfs_buf_alloc(
 169	struct xfs_buftarg	*target,
 170	xfs_daddr_t		blkno,
 171	size_t			numblks,
 172	xfs_buf_flags_t		flags)
 
 173{
 174	struct xfs_buf		*bp;
 
 
 175
 176	bp = kmem_zone_zalloc(xfs_buf_zone, KM_NOFS);
 177	if (unlikely(!bp))
 178		return NULL;
 179
 180	/*
 181	 * We don't want certain flags to appear in b_flags unless they are
 182	 * specifically set by later operations on the buffer.
 183	 */
 184	flags &= ~(XBF_UNMAPPED | XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD);
 185
 186	atomic_set(&bp->b_hold, 1);
 187	atomic_set(&bp->b_lru_ref, 1);
 188	init_completion(&bp->b_iowait);
 189	INIT_LIST_HEAD(&bp->b_lru);
 190	INIT_LIST_HEAD(&bp->b_list);
 191	RB_CLEAR_NODE(&bp->b_rbnode);
 192	sema_init(&bp->b_sema, 0); /* held, no waiters */
 193	XB_SET_OWNER(bp);
 194	bp->b_target = target;
 
 
 195
 196	/*
 197	 * Set length and io_length to the same value initially.
 198	 * I/O routines should use io_length, which will be the same in
 199	 * most cases but may be reset (e.g. XFS recovery).
 200	 */
 201	bp->b_length = numblks;
 202	bp->b_io_length = numblks;
 203	bp->b_flags = flags;
 204	bp->b_bn = blkno;
 
 
 
 
 
 
 
 
 
 
 205	atomic_set(&bp->b_pin_count, 0);
 206	init_waitqueue_head(&bp->b_waiters);
 207
 208	XFS_STATS_INC(xb_create);
 209	trace_xfs_buf_init(bp, _RET_IP_);
 210
 211	return bp;
 
 212}
 213
 214/*
 215 *	Allocate a page array capable of holding a specified number
 216 *	of pages, and point the page buf at it.
 217 */
 218STATIC int
 219_xfs_buf_get_pages(
 220	xfs_buf_t		*bp,
 221	int			page_count,
 222	xfs_buf_flags_t		flags)
 223{
 224	/* Make sure that we have a page list */
 225	if (bp->b_pages == NULL) {
 226		bp->b_page_count = page_count;
 227		if (page_count <= XB_PAGES) {
 228			bp->b_pages = bp->b_page_array;
 229		} else {
 230			bp->b_pages = kmem_alloc(sizeof(struct page *) *
 231						 page_count, KM_NOFS);
 232			if (bp->b_pages == NULL)
 233				return -ENOMEM;
 234		}
 235		memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
 236	}
 237	return 0;
 238}
 239
 240/*
 241 *	Frees b_pages if it was allocated.
 242 */
 243STATIC void
 244_xfs_buf_free_pages(
 245	xfs_buf_t	*bp)
 246{
 247	if (bp->b_pages != bp->b_page_array) {
 248		kmem_free(bp->b_pages);
 249		bp->b_pages = NULL;
 250	}
 251}
 252
 253/*
 254 *	Releases the specified buffer.
 255 *
 256 * 	The modification state of any associated pages is left unchanged.
 257 * 	The buffer most not be on any hash - use xfs_buf_rele instead for
 258 * 	hashed and refcounted buffers
 259 */
 260void
 261xfs_buf_free(
 262	xfs_buf_t		*bp)
 263{
 264	trace_xfs_buf_free(bp, _RET_IP_);
 265
 266	ASSERT(list_empty(&bp->b_lru));
 267
 268	if (bp->b_flags & _XBF_PAGES) {
 269		uint		i;
 270
 271		if (xfs_buf_is_vmapped(bp))
 272			vm_unmap_ram(bp->b_addr - bp->b_offset,
 273					bp->b_page_count);
 274
 275		for (i = 0; i < bp->b_page_count; i++) {
 276			struct page	*page = bp->b_pages[i];
 277
 278			__free_page(page);
 279		}
 
 
 
 280	} else if (bp->b_flags & _XBF_KMEM)
 281		kmem_free(bp->b_addr);
 282	_xfs_buf_free_pages(bp);
 283	kmem_zone_free(xfs_buf_zone, bp);
 
 284}
 285
 286/*
 287 * Allocates all the pages for buffer in question and builds it's page list.
 288 */
 289STATIC int
 290xfs_buf_allocate_memory(
 291	xfs_buf_t		*bp,
 292	uint			flags)
 293{
 294	size_t			size;
 295	size_t			nbytes, offset;
 296	gfp_t			gfp_mask = xb_to_gfp(flags);
 297	unsigned short		page_count, i;
 298	xfs_off_t		start, end;
 299	int			error;
 
 
 
 
 
 
 
 
 
 300
 301	/*
 302	 * for buffers that are contained within a single page, just allocate
 303	 * the memory from the heap - there's no need for the complexity of
 304	 * page arrays to keep allocation down to order 0.
 305	 */
 306	size = BBTOB(bp->b_length);
 307	if (size < PAGE_SIZE) {
 308		bp->b_addr = kmem_alloc(size, KM_NOFS);
 
 
 309		if (!bp->b_addr) {
 310			/* low memory - use alloc_page loop instead */
 311			goto use_alloc_page;
 312		}
 313
 314		if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) !=
 315		    ((unsigned long)bp->b_addr & PAGE_MASK)) {
 316			/* b_addr spans two pages - use alloc_page instead */
 317			kmem_free(bp->b_addr);
 318			bp->b_addr = NULL;
 319			goto use_alloc_page;
 320		}
 321		bp->b_offset = offset_in_page(bp->b_addr);
 322		bp->b_pages = bp->b_page_array;
 323		bp->b_pages[0] = virt_to_page(bp->b_addr);
 324		bp->b_page_count = 1;
 325		bp->b_flags |= _XBF_KMEM;
 326		return 0;
 327	}
 328
 329use_alloc_page:
 330	start = BBTOB(bp->b_bn) >> PAGE_SHIFT;
 331	end = (BBTOB(bp->b_bn + bp->b_length) + PAGE_SIZE - 1) >> PAGE_SHIFT;
 
 332	page_count = end - start;
 333	error = _xfs_buf_get_pages(bp, page_count, flags);
 334	if (unlikely(error))
 335		return error;
 336
 337	offset = bp->b_offset;
 338	bp->b_flags |= _XBF_PAGES;
 339
 340	for (i = 0; i < bp->b_page_count; i++) {
 341		struct page	*page;
 342		uint		retries = 0;
 343retry:
 344		page = alloc_page(gfp_mask);
 345		if (unlikely(page == NULL)) {
 346			if (flags & XBF_READ_AHEAD) {
 347				bp->b_page_count = i;
 348				error = ENOMEM;
 349				goto out_free_pages;
 350			}
 351
 352			/*
 353			 * This could deadlock.
 354			 *
 355			 * But until all the XFS lowlevel code is revamped to
 356			 * handle buffer allocation failures we can't do much.
 357			 */
 358			if (!(++retries % 100))
 359				xfs_err(NULL,
 360		"possible memory allocation deadlock in %s (mode:0x%x)",
 
 361					__func__, gfp_mask);
 362
 363			XFS_STATS_INC(xb_page_retries);
 364			congestion_wait(BLK_RW_ASYNC, HZ/50);
 365			goto retry;
 366		}
 367
 368		XFS_STATS_INC(xb_page_found);
 369
 370		nbytes = min_t(size_t, size, PAGE_SIZE - offset);
 371		size -= nbytes;
 372		bp->b_pages[i] = page;
 373		offset = 0;
 374	}
 375	return 0;
 376
 377out_free_pages:
 378	for (i = 0; i < bp->b_page_count; i++)
 379		__free_page(bp->b_pages[i]);
 
 380	return error;
 381}
 382
 383/*
 384 *	Map buffer into kernel address-space if necessary.
 385 */
 386STATIC int
 387_xfs_buf_map_pages(
 388	xfs_buf_t		*bp,
 389	uint			flags)
 390{
 391	ASSERT(bp->b_flags & _XBF_PAGES);
 392	if (bp->b_page_count == 1) {
 393		/* A single page buffer is always mappable */
 394		bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
 395	} else if (flags & XBF_UNMAPPED) {
 396		bp->b_addr = NULL;
 397	} else {
 398		int retried = 0;
 
 399
 
 
 
 
 
 
 
 
 
 400		do {
 401			bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
 402						-1, PAGE_KERNEL);
 403			if (bp->b_addr)
 404				break;
 405			vm_unmap_aliases();
 406		} while (retried++ <= 1);
 
 407
 408		if (!bp->b_addr)
 409			return -ENOMEM;
 410		bp->b_addr += bp->b_offset;
 411	}
 412
 413	return 0;
 414}
 415
 416/*
 417 *	Finding and Reading Buffers
 418 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 419
 420/*
 421 *	Look up, and creates if absent, a lockable buffer for
 422 *	a given range of an inode.  The buffer is returned
 423 *	locked.	No I/O is implied by this call.
 
 
 
 
 
 
 
 
 
 
 
 
 
 424 */
 425xfs_buf_t *
 426_xfs_buf_find(
 427	struct xfs_buftarg	*btp,
 428	xfs_daddr_t		blkno,
 429	size_t			numblks,
 430	xfs_buf_flags_t		flags,
 431	xfs_buf_t		*new_bp)
 
 432{
 433	size_t			numbytes;
 434	struct xfs_perag	*pag;
 435	struct rb_node		**rbp;
 436	struct rb_node		*parent;
 437	xfs_buf_t		*bp;
 
 
 
 
 
 438
 439	numbytes = BBTOB(numblks);
 
 440
 441	/* Check for IOs smaller than the sector size / not sector aligned */
 442	ASSERT(!(numbytes < (1 << btp->bt_sshift)));
 443	ASSERT(!(BBTOB(blkno) & (xfs_off_t)btp->bt_smask));
 
 
 
 
 
 
 
 
 
 
 
 
 
 444
 445	/* get tree root */
 446	pag = xfs_perag_get(btp->bt_mount,
 447				xfs_daddr_to_agno(btp->bt_mount, blkno));
 448
 449	/* walk tree */
 450	spin_lock(&pag->pag_buf_lock);
 451	rbp = &pag->pag_buf_tree.rb_node;
 452	parent = NULL;
 453	bp = NULL;
 454	while (*rbp) {
 455		parent = *rbp;
 456		bp = rb_entry(parent, struct xfs_buf, b_rbnode);
 457
 458		if (blkno < bp->b_bn)
 459			rbp = &(*rbp)->rb_left;
 460		else if (blkno > bp->b_bn)
 461			rbp = &(*rbp)->rb_right;
 462		else {
 463			/*
 464			 * found a block number match. If the range doesn't
 465			 * match, the only way this is allowed is if the buffer
 466			 * in the cache is stale and the transaction that made
 467			 * it stale has not yet committed. i.e. we are
 468			 * reallocating a busy extent. Skip this buffer and
 469			 * continue searching to the right for an exact match.
 470			 */
 471			if (bp->b_length != numblks) {
 472				ASSERT(bp->b_flags & XBF_STALE);
 473				rbp = &(*rbp)->rb_right;
 474				continue;
 475			}
 476			atomic_inc(&bp->b_hold);
 477			goto found;
 478		}
 479	}
 480
 481	/* No match found */
 482	if (new_bp) {
 483		rb_link_node(&new_bp->b_rbnode, parent, rbp);
 484		rb_insert_color(&new_bp->b_rbnode, &pag->pag_buf_tree);
 485		/* the buffer keeps the perag reference until it is freed */
 486		new_bp->b_pag = pag;
 487		spin_unlock(&pag->pag_buf_lock);
 488	} else {
 489		XFS_STATS_INC(xb_miss_locked);
 490		spin_unlock(&pag->pag_buf_lock);
 491		xfs_perag_put(pag);
 
 492	}
 493	return new_bp;
 
 
 
 
 
 
 
 494
 495found:
 496	spin_unlock(&pag->pag_buf_lock);
 497	xfs_perag_put(pag);
 498
 499	if (!xfs_buf_trylock(bp)) {
 500		if (flags & XBF_TRYLOCK) {
 501			xfs_buf_rele(bp);
 502			XFS_STATS_INC(xb_busy_locked);
 503			return NULL;
 504		}
 505		xfs_buf_lock(bp);
 506		XFS_STATS_INC(xb_get_locked_waited);
 507	}
 508
 509	/*
 510	 * if the buffer is stale, clear all the external state associated with
 511	 * it. We need to keep flags such as how we allocated the buffer memory
 512	 * intact here.
 513	 */
 514	if (bp->b_flags & XBF_STALE) {
 515		ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
 516		bp->b_flags &= _XBF_KMEM | _XBF_PAGES;
 
 517	}
 518
 519	trace_xfs_buf_find(bp, flags, _RET_IP_);
 520	XFS_STATS_INC(xb_get_locked);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 521	return bp;
 522}
 523
 524/*
 525 * Assembles a buffer covering the specified range. The code is optimised for
 526 * cache hits, as metadata intensive workloads will see 3 orders of magnitude
 527 * more hits than misses.
 528 */
 529struct xfs_buf *
 530xfs_buf_get(
 531	xfs_buftarg_t		*target,
 532	xfs_daddr_t		blkno,
 533	size_t			numblks,
 534	xfs_buf_flags_t		flags)
 
 535{
 536	struct xfs_buf		*bp;
 537	struct xfs_buf		*new_bp;
 538	int			error = 0;
 539
 540	bp = _xfs_buf_find(target, blkno, numblks, flags, NULL);
 541	if (likely(bp))
 
 542		goto found;
 
 
 543
 544	new_bp = xfs_buf_alloc(target, blkno, numblks, flags);
 545	if (unlikely(!new_bp))
 546		return NULL;
 547
 548	error = xfs_buf_allocate_memory(new_bp, flags);
 549	if (error) {
 550		kmem_zone_free(xfs_buf_zone, new_bp);
 551		return NULL;
 552	}
 553
 554	bp = _xfs_buf_find(target, blkno, numblks, flags, new_bp);
 555	if (!bp) {
 556		xfs_buf_free(new_bp);
 557		return NULL;
 558	}
 559
 560	if (bp != new_bp)
 561		xfs_buf_free(new_bp);
 562
 563	bp->b_io_length = bp->b_length;
 564
 565found:
 566	if (!bp->b_addr) {
 567		error = _xfs_buf_map_pages(bp, flags);
 568		if (unlikely(error)) {
 569			xfs_warn(target->bt_mount,
 570				"%s: failed to map pages\n", __func__);
 
 571			xfs_buf_relse(bp);
 572			return NULL;
 573		}
 574	}
 575
 576	XFS_STATS_INC(xb_get);
 
 
 
 
 
 
 
 577	trace_xfs_buf_get(bp, flags, _RET_IP_);
 578	return bp;
 
 579}
 580
 581STATIC int
 582_xfs_buf_read(
 583	xfs_buf_t		*bp,
 584	xfs_buf_flags_t		flags)
 585{
 586	ASSERT(!(flags & XBF_WRITE));
 587	ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL);
 588
 589	bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD);
 590	bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
 591
 592	xfs_buf_iorequest(bp);
 593	if (flags & XBF_ASYNC)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 594		return 0;
 595	return xfs_buf_iowait(bp);
 
 
 
 
 
 596}
 597
 598xfs_buf_t *
 599xfs_buf_read(
 600	xfs_buftarg_t		*target,
 601	xfs_daddr_t		blkno,
 602	size_t			numblks,
 603	xfs_buf_flags_t		flags)
 
 
 
 604{
 605	xfs_buf_t		*bp;
 
 606
 607	flags |= XBF_READ;
 
 
 
 
 
 
 
 608
 609	bp = xfs_buf_get(target, blkno, numblks, flags);
 610	if (bp) {
 611		trace_xfs_buf_read(bp, flags, _RET_IP_);
 
 
 
 
 
 
 
 
 
 612
 613		if (!XFS_BUF_ISDONE(bp)) {
 614			XFS_STATS_INC(xb_get_read);
 615			_xfs_buf_read(bp, flags);
 616		} else if (flags & XBF_ASYNC) {
 617			/*
 618			 * Read ahead call which is already satisfied,
 619			 * drop the buffer
 620			 */
 621			xfs_buf_relse(bp);
 622			return NULL;
 623		} else {
 624			/* We do not want read in the flags */
 625			bp->b_flags &= ~XBF_READ;
 626		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 627	}
 628
 629	return bp;
 
 630}
 631
 632/*
 633 *	If we are not low on memory then do the readahead in a deadlock
 634 *	safe manner.
 635 */
 636void
 637xfs_buf_readahead(
 638	xfs_buftarg_t		*target,
 639	xfs_daddr_t		blkno,
 640	size_t			numblks)
 
 641{
 642	if (bdi_read_congested(target->bt_bdi))
 
 
 643		return;
 644
 645	xfs_buf_read(target, blkno, numblks,
 646		     XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD);
 
 647}
 648
 649/*
 650 * Read an uncached buffer from disk. Allocates and returns a locked
 651 * buffer containing the disk contents or nothing.
 652 */
 653struct xfs_buf *
 654xfs_buf_read_uncached(
 655	struct xfs_buftarg	*target,
 656	xfs_daddr_t		daddr,
 657	size_t			numblks,
 658	int			flags)
 
 
 659{
 660	xfs_buf_t		*bp;
 661	int			error;
 662
 663	bp = xfs_buf_get_uncached(target, numblks, flags);
 664	if (!bp)
 665		return NULL;
 
 
 666
 667	/* set up the buffer for a read IO */
 668	XFS_BUF_SET_ADDR(bp, daddr);
 669	XFS_BUF_READ(bp);
 670
 671	xfsbdstrat(target->bt_mount, bp);
 672	error = xfs_buf_iowait(bp);
 673	if (error) {
 
 
 
 674		xfs_buf_relse(bp);
 675		return NULL;
 676	}
 677	return bp;
 678}
 679
 680/*
 681 * Return a buffer allocated as an empty buffer and associated to external
 682 * memory via xfs_buf_associate_memory() back to it's empty state.
 683 */
 684void
 685xfs_buf_set_empty(
 686	struct xfs_buf		*bp,
 687	size_t			numblks)
 688{
 689	if (bp->b_pages)
 690		_xfs_buf_free_pages(bp);
 691
 692	bp->b_pages = NULL;
 693	bp->b_page_count = 0;
 694	bp->b_addr = NULL;
 695	bp->b_length = numblks;
 696	bp->b_io_length = numblks;
 697	bp->b_bn = XFS_BUF_DADDR_NULL;
 698}
 699
 700static inline struct page *
 701mem_to_page(
 702	void			*addr)
 703{
 704	if ((!is_vmalloc_addr(addr))) {
 705		return virt_to_page(addr);
 706	} else {
 707		return vmalloc_to_page(addr);
 708	}
 709}
 710
 711int
 712xfs_buf_associate_memory(
 713	xfs_buf_t		*bp,
 714	void			*mem,
 715	size_t			len)
 716{
 717	int			rval;
 718	int			i = 0;
 719	unsigned long		pageaddr;
 720	unsigned long		offset;
 721	size_t			buflen;
 722	int			page_count;
 723
 724	pageaddr = (unsigned long)mem & PAGE_MASK;
 725	offset = (unsigned long)mem - pageaddr;
 726	buflen = PAGE_ALIGN(len + offset);
 727	page_count = buflen >> PAGE_SHIFT;
 728
 729	/* Free any previous set of page pointers */
 730	if (bp->b_pages)
 731		_xfs_buf_free_pages(bp);
 732
 733	bp->b_pages = NULL;
 734	bp->b_addr = mem;
 735
 736	rval = _xfs_buf_get_pages(bp, page_count, 0);
 737	if (rval)
 738		return rval;
 739
 740	bp->b_offset = offset;
 741
 742	for (i = 0; i < bp->b_page_count; i++) {
 743		bp->b_pages[i] = mem_to_page((void *)pageaddr);
 744		pageaddr += PAGE_SIZE;
 745	}
 746
 747	bp->b_io_length = BTOBB(len);
 748	bp->b_length = BTOBB(buflen);
 749
 750	return 0;
 751}
 752
 753xfs_buf_t *
 754xfs_buf_get_uncached(
 755	struct xfs_buftarg	*target,
 756	size_t			numblks,
 757	int			flags)
 
 758{
 759	unsigned long		page_count;
 760	int			error, i;
 761	xfs_buf_t		*bp;
 
 
 
 762
 763	bp = xfs_buf_alloc(target, XFS_BUF_DADDR_NULL, numblks, 0);
 764	if (unlikely(bp == NULL))
 
 765		goto fail;
 766
 767	page_count = PAGE_ALIGN(numblks << BBSHIFT) >> PAGE_SHIFT;
 768	error = _xfs_buf_get_pages(bp, page_count, 0);
 769	if (error)
 770		goto fail_free_buf;
 771
 772	for (i = 0; i < page_count; i++) {
 773		bp->b_pages[i] = alloc_page(xb_to_gfp(flags));
 774		if (!bp->b_pages[i])
 
 775			goto fail_free_mem;
 
 776	}
 777	bp->b_flags |= _XBF_PAGES;
 778
 779	error = _xfs_buf_map_pages(bp, 0);
 780	if (unlikely(error)) {
 781		xfs_warn(target->bt_mount,
 782			"%s: failed to map pages\n", __func__);
 783		goto fail_free_mem;
 784	}
 785
 786	trace_xfs_buf_get_uncached(bp, _RET_IP_);
 787	return bp;
 
 788
 789 fail_free_mem:
 790	while (--i >= 0)
 791		__free_page(bp->b_pages[i]);
 792	_xfs_buf_free_pages(bp);
 793 fail_free_buf:
 794	kmem_zone_free(xfs_buf_zone, bp);
 
 795 fail:
 796	return NULL;
 797}
 798
 799/*
 800 *	Increment reference count on buffer, to hold the buffer concurrently
 801 *	with another thread which may release (free) the buffer asynchronously.
 802 *	Must hold the buffer already to call this function.
 803 */
 804void
 805xfs_buf_hold(
 806	xfs_buf_t		*bp)
 807{
 808	trace_xfs_buf_hold(bp, _RET_IP_);
 809	atomic_inc(&bp->b_hold);
 810}
 811
 812/*
 813 *	Releases a hold on the specified buffer.  If the
 814 *	the hold count is 1, calls xfs_buf_free.
 815 */
 816void
 817xfs_buf_rele(
 818	xfs_buf_t		*bp)
 819{
 820	struct xfs_perag	*pag = bp->b_pag;
 
 
 821
 822	trace_xfs_buf_rele(bp, _RET_IP_);
 823
 824	if (!pag) {
 825		ASSERT(list_empty(&bp->b_lru));
 826		ASSERT(RB_EMPTY_NODE(&bp->b_rbnode));
 827		if (atomic_dec_and_test(&bp->b_hold))
 828			xfs_buf_free(bp);
 
 829		return;
 830	}
 831
 832	ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode));
 833
 834	ASSERT(atomic_read(&bp->b_hold) > 0);
 835	if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) {
 836		if (!(bp->b_flags & XBF_STALE) &&
 837			   atomic_read(&bp->b_lru_ref)) {
 838			xfs_buf_lru_add(bp);
 839			spin_unlock(&pag->pag_buf_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 840		} else {
 841			xfs_buf_lru_del(bp);
 842			ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
 843			rb_erase(&bp->b_rbnode, &pag->pag_buf_tree);
 844			spin_unlock(&pag->pag_buf_lock);
 845			xfs_perag_put(pag);
 846			xfs_buf_free(bp);
 847		}
 
 
 
 
 
 
 
 848	}
 
 
 
 
 
 
 849}
 850
 851
 852/*
 853 *	Lock a buffer object, if it is not already locked.
 854 *
 855 *	If we come across a stale, pinned, locked buffer, we know that we are
 856 *	being asked to lock a buffer that has been reallocated. Because it is
 857 *	pinned, we know that the log has not been pushed to disk and hence it
 858 *	will still be locked.  Rather than continuing to have trylock attempts
 859 *	fail until someone else pushes the log, push it ourselves before
 860 *	returning.  This means that the xfsaild will not get stuck trying
 861 *	to push on stale inode buffers.
 862 */
 863int
 864xfs_buf_trylock(
 865	struct xfs_buf		*bp)
 866{
 867	int			locked;
 868
 869	locked = down_trylock(&bp->b_sema) == 0;
 870	if (locked)
 871		XB_SET_OWNER(bp);
 872	else if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
 873		xfs_log_force(bp->b_target->bt_mount, 0);
 874
 875	trace_xfs_buf_trylock(bp, _RET_IP_);
 876	return locked;
 877}
 878
 879/*
 880 *	Lock a buffer object.
 881 *
 882 *	If we come across a stale, pinned, locked buffer, we know that we
 883 *	are being asked to lock a buffer that has been reallocated. Because
 884 *	it is pinned, we know that the log has not been pushed to disk and
 885 *	hence it will still be locked. Rather than sleeping until someone
 886 *	else pushes the log, push it ourselves before trying to get the lock.
 887 */
 888void
 889xfs_buf_lock(
 890	struct xfs_buf		*bp)
 891{
 892	trace_xfs_buf_lock(bp, _RET_IP_);
 893
 894	if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
 895		xfs_log_force(bp->b_target->bt_mount, 0);
 896	down(&bp->b_sema);
 897	XB_SET_OWNER(bp);
 898
 899	trace_xfs_buf_lock_done(bp, _RET_IP_);
 900}
 901
 902void
 903xfs_buf_unlock(
 904	struct xfs_buf		*bp)
 905{
 906	XB_CLEAR_OWNER(bp);
 
 907	up(&bp->b_sema);
 908
 909	trace_xfs_buf_unlock(bp, _RET_IP_);
 910}
 911
 912STATIC void
 913xfs_buf_wait_unpin(
 914	xfs_buf_t		*bp)
 915{
 916	DECLARE_WAITQUEUE	(wait, current);
 917
 918	if (atomic_read(&bp->b_pin_count) == 0)
 919		return;
 920
 921	add_wait_queue(&bp->b_waiters, &wait);
 922	for (;;) {
 923		set_current_state(TASK_UNINTERRUPTIBLE);
 924		if (atomic_read(&bp->b_pin_count) == 0)
 925			break;
 926		io_schedule();
 927	}
 928	remove_wait_queue(&bp->b_waiters, &wait);
 929	set_current_state(TASK_RUNNING);
 930}
 931
 932/*
 933 *	Buffer Utility Routines
 934 */
 935
 936STATIC void
 937xfs_buf_iodone_work(
 938	struct work_struct	*work)
 939{
 940	xfs_buf_t		*bp =
 941		container_of(work, xfs_buf_t, b_iodone_work);
 942
 943	if (bp->b_iodone)
 944		(*(bp->b_iodone))(bp);
 945	else if (bp->b_flags & XBF_ASYNC)
 946		xfs_buf_relse(bp);
 947}
 948
 949void
 950xfs_buf_ioend(
 951	xfs_buf_t		*bp,
 952	int			schedule)
 953{
 
 
 954	trace_xfs_buf_iodone(bp, _RET_IP_);
 955
 956	bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
 957	if (bp->b_error == 0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 958		bp->b_flags |= XBF_DONE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 959
 960	if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
 961		if (schedule) {
 962			INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
 963			queue_work(xfslogd_workqueue, &bp->b_iodone_work);
 964		} else {
 965			xfs_buf_iodone_work(&bp->b_iodone_work);
 966		}
 967	} else {
 968		complete(&bp->b_iowait);
 969	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 970}
 971
 972void
 973xfs_buf_ioerror(
 974	xfs_buf_t		*bp,
 975	int			error)
 
 976{
 977	ASSERT(error >= 0 && error <= 0xffff);
 978	bp->b_error = (unsigned short)error;
 979	trace_xfs_buf_ioerror(bp, error, _RET_IP_);
 980}
 981
 982void
 983xfs_buf_ioerror_alert(
 984	struct xfs_buf		*bp,
 985	const char		*func)
 986{
 987	xfs_alert(bp->b_target->bt_mount,
 988"metadata I/O error: block 0x%llx (\"%s\") error %d numblks %d",
 989		(__uint64_t)XFS_BUF_ADDR(bp), func, bp->b_error, bp->b_length);
 
 990}
 991
 992/*
 993 * Called when we want to stop a buffer from getting written or read.
 994 * We attach the EIO error, muck with its flags, and call xfs_buf_ioend
 995 * so that the proper iodone callbacks get called.
 
 996 */
 997STATIC int
 998xfs_bioerror(
 999	xfs_buf_t *bp)
1000{
1001#ifdef XFSERRORDEBUG
1002	ASSERT(XFS_BUF_ISREAD(bp) || bp->b_iodone);
1003#endif
1004
1005	/*
1006	 * No need to wait until the buffer is unpinned, we aren't flushing it.
1007	 */
1008	xfs_buf_ioerror(bp, EIO);
1009
1010	/*
1011	 * We're calling xfs_buf_ioend, so delete XBF_DONE flag.
1012	 */
1013	XFS_BUF_UNREAD(bp);
1014	XFS_BUF_UNDONE(bp);
1015	xfs_buf_stale(bp);
1016
1017	xfs_buf_ioend(bp, 0);
1018
1019	return EIO;
1020}
1021
1022/*
1023 * Same as xfs_bioerror, except that we are releasing the buffer
1024 * here ourselves, and avoiding the xfs_buf_ioend call.
1025 * This is meant for userdata errors; metadata bufs come with
1026 * iodone functions attached, so that we can track down errors.
1027 */
1028STATIC int
1029xfs_bioerror_relse(
1030	struct xfs_buf	*bp)
1031{
1032	int64_t		fl = bp->b_flags;
1033	/*
1034	 * No need to wait until the buffer is unpinned.
1035	 * We aren't flushing it.
1036	 *
1037	 * chunkhold expects B_DONE to be set, whether
1038	 * we actually finish the I/O or not. We don't want to
1039	 * change that interface.
1040	 */
1041	XFS_BUF_UNREAD(bp);
1042	XFS_BUF_DONE(bp);
1043	xfs_buf_stale(bp);
1044	bp->b_iodone = NULL;
1045	if (!(fl & XBF_ASYNC)) {
1046		/*
1047		 * Mark b_error and B_ERROR _both_.
1048		 * Lot's of chunkcache code assumes that.
1049		 * There's no reason to mark error for
1050		 * ASYNC buffers.
1051		 */
1052		xfs_buf_ioerror(bp, EIO);
1053		complete(&bp->b_iowait);
1054	} else {
1055		xfs_buf_relse(bp);
1056	}
1057
1058	return EIO;
1059}
1060
1061STATIC int
1062xfs_bdstrat_cb(
1063	struct xfs_buf	*bp)
1064{
1065	if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
1066		trace_xfs_bdstrat_shut(bp, _RET_IP_);
1067		/*
1068		 * Metadata write that didn't get logged but
1069		 * written delayed anyway. These aren't associated
1070		 * with a transaction, and can be ignored.
1071		 */
1072		if (!bp->b_iodone && !XFS_BUF_ISREAD(bp))
1073			return xfs_bioerror_relse(bp);
1074		else
1075			return xfs_bioerror(bp);
1076	}
1077
1078	xfs_buf_iorequest(bp);
1079	return 0;
1080}
1081
1082int
1083xfs_bwrite(
1084	struct xfs_buf		*bp)
1085{
1086	int			error;
1087
1088	ASSERT(xfs_buf_islocked(bp));
1089
1090	bp->b_flags |= XBF_WRITE;
1091	bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q);
 
1092
1093	xfs_bdstrat_cb(bp);
1094
1095	error = xfs_buf_iowait(bp);
1096	if (error) {
1097		xfs_force_shutdown(bp->b_target->bt_mount,
1098				   SHUTDOWN_META_IO_ERROR);
1099	}
1100	return error;
1101}
1102
1103/*
1104 * Wrapper around bdstrat so that we can stop data from going to disk in case
1105 * we are shutting down the filesystem.  Typically user data goes thru this
1106 * path; one of the exceptions is the superblock.
1107 */
1108void
1109xfsbdstrat(
1110	struct xfs_mount	*mp,
1111	struct xfs_buf		*bp)
1112{
1113	if (XFS_FORCED_SHUTDOWN(mp)) {
1114		trace_xfs_bdstrat_shut(bp, _RET_IP_);
1115		xfs_bioerror_relse(bp);
1116		return;
1117	}
1118
1119	xfs_buf_iorequest(bp);
1120}
1121
1122STATIC void
1123_xfs_buf_ioend(
1124	xfs_buf_t		*bp,
1125	int			schedule)
1126{
1127	if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
1128		xfs_buf_ioend(bp, schedule);
1129}
1130
1131STATIC void
1132xfs_buf_bio_end_io(
1133	struct bio		*bio,
1134	int			error)
1135{
1136	xfs_buf_t		*bp = (xfs_buf_t *)bio->bi_private;
1137
1138	xfs_buf_ioerror(bp, -error);
 
1139
1140	if (!error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
1141		invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
1142
1143	_xfs_buf_ioend(bp, 1);
 
1144	bio_put(bio);
1145}
1146
1147STATIC void
1148_xfs_buf_ioapply(
1149	xfs_buf_t		*bp)
1150{
1151	int			rw, map_i, total_nr_pages, nr_pages;
1152	struct bio		*bio;
1153	int			offset = bp->b_offset;
1154	int			size = BBTOB(bp->b_io_length);
1155	sector_t		sector = bp->b_bn;
1156
1157	total_nr_pages = bp->b_page_count;
1158	map_i = 0;
1159
1160	if (bp->b_flags & XBF_WRITE) {
1161		if (bp->b_flags & XBF_SYNCIO)
1162			rw = WRITE_SYNC;
1163		else
1164			rw = WRITE;
1165		if (bp->b_flags & XBF_FUA)
1166			rw |= REQ_FUA;
1167		if (bp->b_flags & XBF_FLUSH)
1168			rw |= REQ_FLUSH;
1169	} else if (bp->b_flags & XBF_READ_AHEAD) {
1170		rw = READA;
1171	} else {
1172		rw = READ;
1173	}
1174
1175	/* we only use the buffer cache for meta-data */
1176	rw |= REQ_META;
 
 
 
 
 
1177
1178next_chunk:
1179	atomic_inc(&bp->b_io_remaining);
1180	nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
1181	if (nr_pages > total_nr_pages)
1182		nr_pages = total_nr_pages;
1183
1184	bio = bio_alloc(GFP_NOIO, nr_pages);
1185	bio->bi_bdev = bp->b_target->bt_bdev;
1186	bio->bi_sector = sector;
1187	bio->bi_end_io = xfs_buf_bio_end_io;
1188	bio->bi_private = bp;
 
1189
1190
1191	for (; size && nr_pages; nr_pages--, map_i++) {
1192		int	rbytes, nbytes = PAGE_SIZE - offset;
1193
1194		if (nbytes > size)
1195			nbytes = size;
1196
1197		rbytes = bio_add_page(bio, bp->b_pages[map_i], nbytes, offset);
 
1198		if (rbytes < nbytes)
1199			break;
1200
1201		offset = 0;
1202		sector += BTOBB(nbytes);
1203		size -= nbytes;
1204		total_nr_pages--;
1205	}
1206
1207	if (likely(bio->bi_size)) {
1208		if (xfs_buf_is_vmapped(bp)) {
1209			flush_kernel_vmap_range(bp->b_addr,
1210						xfs_buf_vmap_len(bp));
1211		}
1212		submit_bio(rw, bio);
1213		if (size)
1214			goto next_chunk;
1215	} else {
1216		xfs_buf_ioerror(bp, EIO);
 
 
 
 
 
1217		bio_put(bio);
1218	}
 
1219}
1220
1221void
1222xfs_buf_iorequest(
1223	xfs_buf_t		*bp)
1224{
1225	trace_xfs_buf_iorequest(bp, _RET_IP_);
 
 
 
 
 
 
 
 
 
 
 
 
 
1226
1227	ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1228
1229	if (bp->b_flags & XBF_WRITE)
1230		xfs_buf_wait_unpin(bp);
1231	xfs_buf_hold(bp);
1232
1233	/* Set the count to 1 initially, this will stop an I/O
1234	 * completion callout which happens before we have started
1235	 * all the I/O from calling xfs_buf_ioend too early.
 
 
1236	 */
1237	atomic_set(&bp->b_io_remaining, 1);
1238	_xfs_buf_ioapply(bp);
1239	_xfs_buf_ioend(bp, 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1240
1241	xfs_buf_rele(bp);
1242}
1243
1244/*
1245 * Waits for I/O to complete on the buffer supplied.  It returns immediately if
1246 * no I/O is pending or there is already a pending error on the buffer.  It
1247 * returns the I/O error code, if any, or 0 if there was no error.
 
1248 */
1249int
1250xfs_buf_iowait(
1251	xfs_buf_t		*bp)
 
1252{
1253	trace_xfs_buf_iowait(bp, _RET_IP_);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1254
1255	if (!bp->b_error)
1256		wait_for_completion(&bp->b_iowait);
1257
1258	trace_xfs_buf_iowait_done(bp, _RET_IP_);
1259	return bp->b_error;
 
 
 
 
 
1260}
1261
1262xfs_caddr_t
1263xfs_buf_offset(
1264	xfs_buf_t		*bp,
1265	size_t			offset)
1266{
1267	struct page		*page;
1268
1269	if (bp->b_addr)
1270		return bp->b_addr + offset;
1271
1272	offset += bp->b_offset;
1273	page = bp->b_pages[offset >> PAGE_SHIFT];
1274	return (xfs_caddr_t)page_address(page) + (offset & (PAGE_SIZE-1));
1275}
1276
1277/*
1278 *	Move data into or out of a buffer.
1279 */
1280void
1281xfs_buf_iomove(
1282	xfs_buf_t		*bp,	/* buffer to process		*/
1283	size_t			boff,	/* starting buffer offset	*/
1284	size_t			bsize,	/* length to copy		*/
1285	void			*data,	/* data address			*/
1286	xfs_buf_rw_t		mode)	/* read/write/zero flag		*/
1287{
1288	size_t			bend;
1289
1290	bend = boff + bsize;
1291	while (boff < bend) {
1292		struct page	*page;
1293		int		page_index, page_offset, csize;
1294
1295		page_index = (boff + bp->b_offset) >> PAGE_SHIFT;
1296		page_offset = (boff + bp->b_offset) & ~PAGE_MASK;
1297		page = bp->b_pages[page_index];
1298		csize = min_t(size_t, PAGE_SIZE - page_offset,
1299				      BBTOB(bp->b_io_length) - boff);
1300
1301		ASSERT((csize + page_offset) <= PAGE_SIZE);
1302
1303		switch (mode) {
1304		case XBRW_ZERO:
1305			memset(page_address(page) + page_offset, 0, csize);
1306			break;
1307		case XBRW_READ:
1308			memcpy(data, page_address(page) + page_offset, csize);
1309			break;
1310		case XBRW_WRITE:
1311			memcpy(page_address(page) + page_offset, data, csize);
1312		}
1313
1314		boff += csize;
1315		data += csize;
1316	}
1317}
1318
1319/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1320 *	Handling of buffer targets (buftargs).
1321 */
1322
1323/*
1324 * Wait for any bufs with callbacks that have been submitted but have not yet
1325 * returned. These buffers will have an elevated hold count, so wait on those
1326 * while freeing all the buffers only held by the LRU.
1327 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1328void
1329xfs_wait_buftarg(
1330	struct xfs_buftarg	*btp)
1331{
1332	struct xfs_buf		*bp;
 
 
1333
1334restart:
1335	spin_lock(&btp->bt_lru_lock);
1336	while (!list_empty(&btp->bt_lru)) {
1337		bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);
1338		if (atomic_read(&bp->b_hold) > 1) {
1339			spin_unlock(&btp->bt_lru_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1340			delay(100);
1341			goto restart;
1342		}
1343		/*
1344		 * clear the LRU reference count so the buffer doesn't get
1345		 * ignored in xfs_buf_rele().
1346		 */
1347		atomic_set(&bp->b_lru_ref, 0);
1348		spin_unlock(&btp->bt_lru_lock);
1349		xfs_buf_rele(bp);
1350		spin_lock(&btp->bt_lru_lock);
1351	}
1352	spin_unlock(&btp->bt_lru_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1353}
1354
1355int
1356xfs_buftarg_shrink(
1357	struct shrinker		*shrink,
1358	struct shrink_control	*sc)
1359{
1360	struct xfs_buftarg	*btp = container_of(shrink,
1361					struct xfs_buftarg, bt_shrinker);
1362	struct xfs_buf		*bp;
1363	int nr_to_scan = sc->nr_to_scan;
1364	LIST_HEAD(dispose);
 
1365
1366	if (!nr_to_scan)
1367		return btp->bt_lru_nr;
1368
1369	spin_lock(&btp->bt_lru_lock);
1370	while (!list_empty(&btp->bt_lru)) {
1371		if (nr_to_scan-- <= 0)
1372			break;
1373
1374		bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);
1375
1376		/*
1377		 * Decrement the b_lru_ref count unless the value is already
1378		 * zero. If the value is already zero, we need to reclaim the
1379		 * buffer, otherwise it gets another trip through the LRU.
1380		 */
1381		if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
1382			list_move_tail(&bp->b_lru, &btp->bt_lru);
1383			continue;
1384		}
1385
1386		/*
1387		 * remove the buffer from the LRU now to avoid needing another
1388		 * lock round trip inside xfs_buf_rele().
1389		 */
1390		list_move(&bp->b_lru, &dispose);
1391		btp->bt_lru_nr--;
1392	}
1393	spin_unlock(&btp->bt_lru_lock);
1394
1395	while (!list_empty(&dispose)) {
 
1396		bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
1397		list_del_init(&bp->b_lru);
1398		xfs_buf_rele(bp);
1399	}
1400
1401	return btp->bt_lru_nr;
 
 
 
 
 
 
 
 
 
 
1402}
1403
1404void
1405xfs_free_buftarg(
1406	struct xfs_mount	*mp,
1407	struct xfs_buftarg	*btp)
1408{
1409	unregister_shrinker(&btp->bt_shrinker);
 
 
 
1410
1411	if (mp->m_flags & XFS_MOUNT_BARRIER)
1412		xfs_blkdev_issue_flush(btp);
1413
1414	kmem_free(btp);
1415}
1416
1417STATIC int
1418xfs_setsize_buftarg_flags(
1419	xfs_buftarg_t		*btp,
1420	unsigned int		blocksize,
1421	unsigned int		sectorsize,
1422	int			verbose)
1423{
1424	btp->bt_bsize = blocksize;
1425	btp->bt_sshift = ffs(sectorsize) - 1;
1426	btp->bt_smask = sectorsize - 1;
1427
1428	if (set_blocksize(btp->bt_bdev, sectorsize)) {
1429		char name[BDEVNAME_SIZE];
1430
1431		bdevname(btp->bt_bdev, name);
1432
1433		xfs_warn(btp->bt_mount,
1434			"Cannot set_blocksize to %u on device %s\n",
1435			sectorsize, name);
1436		return EINVAL;
1437	}
1438
 
 
 
 
1439	return 0;
1440}
1441
1442/*
1443 *	When allocating the initial buffer target we have not yet
1444 *	read in the superblock, so don't know what sized sectors
1445 *	are being used is at this early stage.  Play safe.
1446 */
1447STATIC int
1448xfs_setsize_buftarg_early(
1449	xfs_buftarg_t		*btp,
1450	struct block_device	*bdev)
1451{
1452	return xfs_setsize_buftarg_flags(btp,
1453			PAGE_SIZE, bdev_logical_block_size(bdev), 0);
1454}
1455
1456int
1457xfs_setsize_buftarg(
1458	xfs_buftarg_t		*btp,
1459	unsigned int		blocksize,
1460	unsigned int		sectorsize)
1461{
1462	return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1);
1463}
1464
1465xfs_buftarg_t *
1466xfs_alloc_buftarg(
1467	struct xfs_mount	*mp,
1468	struct block_device	*bdev,
1469	int			external,
1470	const char		*fsname)
1471{
1472	xfs_buftarg_t		*btp;
1473
1474	btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
1475
1476	btp->bt_mount = mp;
1477	btp->bt_dev =  bdev->bd_dev;
1478	btp->bt_bdev = bdev;
1479	btp->bt_bdi = blk_get_backing_dev_info(bdev);
1480	if (!btp->bt_bdi)
1481		goto error;
 
 
 
 
 
1482
1483	INIT_LIST_HEAD(&btp->bt_lru);
1484	spin_lock_init(&btp->bt_lru_lock);
1485	if (xfs_setsize_buftarg_early(btp, bdev))
1486		goto error;
1487	btp->bt_shrinker.shrink = xfs_buftarg_shrink;
 
 
 
 
 
 
 
 
1488	btp->bt_shrinker.seeks = DEFAULT_SEEKS;
1489	register_shrinker(&btp->bt_shrinker);
 
 
1490	return btp;
1491
1492error:
 
 
 
 
1493	kmem_free(btp);
1494	return NULL;
1495}
1496
1497/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1498 * Add a buffer to the delayed write list.
1499 *
1500 * This queues a buffer for writeout if it hasn't already been.  Note that
1501 * neither this routine nor the buffer list submission functions perform
1502 * any internal synchronization.  It is expected that the lists are thread-local
1503 * to the callers.
1504 *
1505 * Returns true if we queued up the buffer, or false if it already had
1506 * been on the buffer list.
1507 */
1508bool
1509xfs_buf_delwri_queue(
1510	struct xfs_buf		*bp,
1511	struct list_head	*list)
1512{
1513	ASSERT(xfs_buf_islocked(bp));
1514	ASSERT(!(bp->b_flags & XBF_READ));
1515
1516	/*
1517	 * If the buffer is already marked delwri it already is queued up
1518	 * by someone else for imediate writeout.  Just ignore it in that
1519	 * case.
1520	 */
1521	if (bp->b_flags & _XBF_DELWRI_Q) {
1522		trace_xfs_buf_delwri_queued(bp, _RET_IP_);
1523		return false;
1524	}
1525
1526	trace_xfs_buf_delwri_queue(bp, _RET_IP_);
1527
1528	/*
1529	 * If a buffer gets written out synchronously or marked stale while it
1530	 * is on a delwri list we lazily remove it. To do this, the other party
1531	 * clears the  _XBF_DELWRI_Q flag but otherwise leaves the buffer alone.
1532	 * It remains referenced and on the list.  In a rare corner case it
1533	 * might get readded to a delwri list after the synchronous writeout, in
1534	 * which case we need just need to re-add the flag here.
1535	 */
1536	bp->b_flags |= _XBF_DELWRI_Q;
1537	if (list_empty(&bp->b_list)) {
1538		atomic_inc(&bp->b_hold);
1539		list_add_tail(&bp->b_list, list);
1540	}
1541
1542	return true;
1543}
1544
1545/*
1546 * Compare function is more complex than it needs to be because
1547 * the return value is only 32 bits and we are doing comparisons
1548 * on 64 bit values
1549 */
1550static int
1551xfs_buf_cmp(
1552	void		*priv,
1553	struct list_head *a,
1554	struct list_head *b)
1555{
1556	struct xfs_buf	*ap = container_of(a, struct xfs_buf, b_list);
1557	struct xfs_buf	*bp = container_of(b, struct xfs_buf, b_list);
1558	xfs_daddr_t		diff;
1559
1560	diff = ap->b_bn - bp->b_bn;
1561	if (diff < 0)
1562		return -1;
1563	if (diff > 0)
1564		return 1;
1565	return 0;
1566}
1567
 
 
 
 
 
 
 
1568static int
1569__xfs_buf_delwri_submit(
1570	struct list_head	*buffer_list,
1571	struct list_head	*io_list,
1572	bool			wait)
1573{
1574	struct blk_plug		plug;
1575	struct xfs_buf		*bp, *n;
1576	int			pinned = 0;
 
 
 
1577
 
1578	list_for_each_entry_safe(bp, n, buffer_list, b_list) {
1579		if (!wait) {
1580			if (xfs_buf_ispinned(bp)) {
1581				pinned++;
1582				continue;
1583			}
1584			if (!xfs_buf_trylock(bp))
1585				continue;
1586		} else {
1587			xfs_buf_lock(bp);
1588		}
1589
1590		/*
1591		 * Someone else might have written the buffer synchronously or
1592		 * marked it stale in the meantime.  In that case only the
1593		 * _XBF_DELWRI_Q flag got cleared, and we have to drop the
1594		 * reference and remove it from the list here.
1595		 */
1596		if (!(bp->b_flags & _XBF_DELWRI_Q)) {
1597			list_del_init(&bp->b_list);
1598			xfs_buf_relse(bp);
1599			continue;
1600		}
1601
1602		list_move_tail(&bp->b_list, io_list);
1603		trace_xfs_buf_delwri_split(bp, _RET_IP_);
1604	}
1605
1606	list_sort(NULL, io_list, xfs_buf_cmp);
1607
1608	blk_start_plug(&plug);
1609	list_for_each_entry_safe(bp, n, io_list, b_list) {
1610		bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC);
 
 
1611		bp->b_flags |= XBF_WRITE;
1612
1613		if (!wait) {
 
 
1614			bp->b_flags |= XBF_ASYNC;
1615			list_del_init(&bp->b_list);
1616		}
1617		xfs_bdstrat_cb(bp);
1618	}
1619	blk_finish_plug(&plug);
1620
1621	return pinned;
1622}
1623
1624/*
1625 * Write out a buffer list asynchronously.
1626 *
1627 * This will take the @buffer_list, write all non-locked and non-pinned buffers
1628 * out and not wait for I/O completion on any of the buffers.  This interface
1629 * is only safely useable for callers that can track I/O completion by higher
1630 * level means, e.g. AIL pushing as the @buffer_list is consumed in this
1631 * function.
 
 
 
 
 
 
 
1632 */
1633int
1634xfs_buf_delwri_submit_nowait(
1635	struct list_head	*buffer_list)
1636{
1637	LIST_HEAD		(io_list);
1638	return __xfs_buf_delwri_submit(buffer_list, &io_list, false);
1639}
1640
1641/*
1642 * Write out a buffer list synchronously.
1643 *
1644 * This will take the @buffer_list, write all buffers out and wait for I/O
1645 * completion on all of the buffers. @buffer_list is consumed by the function,
1646 * so callers must have some other way of tracking buffers if they require such
1647 * functionality.
1648 */
1649int
1650xfs_buf_delwri_submit(
1651	struct list_head	*buffer_list)
1652{
1653	LIST_HEAD		(io_list);
1654	int			error = 0, error2;
1655	struct xfs_buf		*bp;
1656
1657	__xfs_buf_delwri_submit(buffer_list, &io_list, true);
1658
1659	/* Wait for IO to complete. */
1660	while (!list_empty(&io_list)) {
1661		bp = list_first_entry(&io_list, struct xfs_buf, b_list);
1662
1663		list_del_init(&bp->b_list);
 
 
 
 
 
1664		error2 = xfs_buf_iowait(bp);
1665		xfs_buf_relse(bp);
1666		if (!error)
1667			error = error2;
1668	}
1669
1670	return error;
1671}
1672
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1673int __init
1674xfs_buf_init(void)
1675{
1676	xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
1677						KM_ZONE_HWALIGN, NULL);
 
 
 
1678	if (!xfs_buf_zone)
1679		goto out;
1680
1681	xfslogd_workqueue = alloc_workqueue("xfslogd",
1682					WQ_MEM_RECLAIM | WQ_HIGHPRI, 1);
1683	if (!xfslogd_workqueue)
1684		goto out_free_buf_zone;
1685
1686	return 0;
1687
1688 out_free_buf_zone:
1689	kmem_zone_destroy(xfs_buf_zone);
1690 out:
1691	return -ENOMEM;
1692}
1693
1694void
1695xfs_buf_terminate(void)
1696{
1697	destroy_workqueue(xfslogd_workqueue);
1698	kmem_zone_destroy(xfs_buf_zone);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1699}